Compare commits

...

20 Commits

Author SHA1 Message Date
Ludovic Ortega
880fbc902d chore: update contributing guide regarding Automated AI Agent (#2518)
Signed-off-by: Ludovic Ortega <ludovic.ortega@adminafk.fr>
2026-02-20 04:43:00 +05:00
Ludovic Ortega
fba20c1b39 ci: remove discord notification from release (#2501)
Signed-off-by: Ludovic Ortega <ludovic.ortega@adminafk.fr>
2026-02-19 22:47:26 +01:00
nova-api
fa905be002 docs: remove double quotes (") from DB_HOST environment variable (#2514) 2026-02-19 08:56:25 +00:00
fallenbagel
9da8bb6dea fix: preserve blocklist on media deletion & optimise watchlist-sync (#2478) 2026-02-18 21:23:50 +01:00
Michael Reid
0e636a3f99 docs(synology): add installation guide via SynoCommunity (#2503)
Co-authored-by: Ludovic Ortega <github@mail.adminafk.fr>
2026-02-18 16:49:51 +01:00
fallenbagel
e0e4b6f512 fix(watchlist-sync): correct permission typo for TV auto requests (#2488) 2026-02-18 15:23:20 +01:00
Ludovic Ortega
dc1734d41f docs(truenas): update install/migration guide (#2491)
Signed-off-by: Ludovic Ortega <ludovic.ortega@adminafk.fr>
2026-02-18 17:30:25 +05:00
Ludovic Ortega
06e5eb0704 ci: add create-tag workflow to streamline release process (#2493)
Signed-off-by: Ludovic Ortega <ludovic.ortega@adminafk.fr>
2026-02-18 12:22:16 +00:00
DataBitz
88afcc113d docs: media server support clarification [skip ci] (#2498) 2026-02-18 14:08:48 +05:00
Joe Harrison
4939f13dbe ci: update concurrency logic (#2481) 2026-02-17 14:47:16 +01:00
Dallas Gordon
5e57fdcf66 ci: add semantic-pr workflow to enforce conventional commits (#2472) 2026-02-17 13:33:41 +00:00
fallenbagel
cf4883a55e chore(github): add docs and maintenance issue templates (#2467) 2026-02-17 11:47:58 +01:00
blassley
5e64d49c32 docs(unraid): improve unraid migration guide (#2470) 2026-02-17 05:12:47 +05:00
fallenbagel
c6bcfe0ae4 perf: add missing indexes on all foreign key columns (#2461) 2026-02-17 00:36:55 +08:00
Jan Kleine
6076878f76 docs(notifications): revise web push notification docs [skip ci] (#2451)
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
2026-02-16 17:19:43 +05:00
Sandesh Koirala
8f0c904928 feat(helm): use an existing PVC as config volume (#2447) 2026-02-16 09:09:11 +01:00
Ludovic Ortega
04b9d87174 fix(helm): add "v" as prefix for appVersion tag (#2445)
Signed-off-by: Ludovic Ortega <ludovic.ortega@adminafk.fr>
2026-02-15 15:40:34 +01:00
fallenbagel
b499976902 fix(servarr): increase default API timeout from 5000ms to 10000ms (#2442) 2026-02-15 17:19:27 +05:00
Ludovic Ortega
87fb0dfd6c chore(helm): update ghcr.io/seerr-team/seerr ( 3.0.0 → 3.0.1 ) [skip-ci] (#2441) 2026-02-15 11:59:01 +00:00
mjonkus
b6a913211a docs: add Unraid installation and migration guide (#2440)
Co-authored-by: Mindaugas Jonkus <mjonkusgmail.com>

[skip ci]
2026-02-15 16:56:37 +05:00
49 changed files with 1029 additions and 103 deletions

View File

@@ -1,6 +1,7 @@
name: 🐛 Bug Report
description: Report a problem
labels: ['bug', 'awaiting triage']
labels: ['awaiting triage']
type: bug
body:
- type: markdown
attributes:

View File

@@ -0,0 +1,63 @@
name: 📚 Documentation
description: Report a docs problem or suggest a docs improvement
title: "[Docs]: "
labels: ["documentation", "awaiting triage"]
type: task
body:
- type: markdown
attributes:
value: |
Thanks for helping improve the docs!
Use this template for documentation issues (typos, unclear steps, missing info, outdated screenshots).
For app bugs or feature ideas, please use the other templates.
- type: input
id: doc-location
attributes:
label: Page / Location
description: Link to the docs page or the file/path (e.g. https://docs.seerr.dev/... or README.md)
placeholder: "https://docs.seerr.dev/..."
validations:
required: true
- type: dropdown
id: doc-area
attributes:
label: Docs Area
options:
- docs site
- migration guide
- README / repo docs
- API / integrations
- other
validations:
required: true
- type: textarea
id: problem
attributes:
label: Whats wrong / missing?
description: Describe the issue in the docs.
validations:
required: true
- type: textarea
id: suggested-fix
attributes:
label: Suggested change
description: If you know what should be changed, describe it (or paste proposed wording).
validations:
required: false
- type: checkboxes
id: search-existing
attributes:
label: Search Existing Issues
description: Have you searched existing issues to see if this has already been reported?
options:
- label: Yes, I have searched existing issues.
required: true
- type: checkboxes
id: terms
attributes:
label: Code of Conduct
description: By submitting this issue, you agree to follow our Code of Conduct.
options:
- label: I agree to follow Seerr's [Code of Conduct](https://github.com/seerr-team/seerr/blob/develop/CODE_OF_CONDUCT.md).
required: true

View File

@@ -1,6 +1,7 @@
name: ✨ Feature Request
description: Suggest an idea
labels: ['enhancement', 'awaiting triage']
labels: ['awaiting triage']
type: feature
body:
- type: markdown
attributes:

65
.github/ISSUE_TEMPLATE/maintenance.yml vendored Normal file
View File

@@ -0,0 +1,65 @@
name: 🧰 Maintenance / Chore
description: CI, GitHub Actions, build, dependencies, refactors (non-feature work)
title: "[Chore]: "
labels: ["maintenance", "awaiting triage"]
type: task
body:
- type: markdown
attributes:
value: |
Maintainers / contributors: use this for internal tasks (CI, workflows, tooling, refactors).
If you're reporting a user-facing bug or requesting a feature, use the other templates.
- type: dropdown
id: area
attributes:
label: Area
options:
- CI / GitHub Actions
- build / packaging
- dependencies
- release process
- refactor / tech debt
- tooling / scripts
- other
validations:
required: true
- type: textarea
id: summary
attributes:
label: Summary
description: What needs doing and why?
validations:
required: true
- type: textarea
id: acceptance
attributes:
label: Acceptance criteria
description: What does "done" look like?
placeholder: |
- [ ] ...
- [ ] ...
validations:
required: false
- type: input
id: related
attributes:
label: Related links
description: PRs, failing workflow runs, logs, or relevant issues.
validations:
required: false
- type: checkboxes
id: search-existing
attributes:
label: Search Existing Issues
description: Have you searched existing issues to see if this has already been reported?
options:
- label: Yes, I have searched existing issues.
required: true
- type: checkboxes
id: terms
attributes:
label: Code of Conduct
description: By submitting this issue, you agree to follow our Code of Conduct.
options:
- label: I agree to follow Seerr's [Code of Conduct](https://github.com/seerr-team/seerr/blob/develop/CODE_OF_CONDUCT.md).
required: true

View File

@@ -16,7 +16,7 @@
description: 'Update appVersion in Chart.yaml to match Docker image',
fileMatch: ['(^|/)Chart\\.yaml$'],
matchStrings: [
'#\\s+renovate:\\s+image=(?<depName>\\S*)\nappVersion:\\s+"(?<currentValue>\\S*)"',
"#\\s+renovate:\\s+image=(?<depName>\\S*)\nappVersion:\\s+'(?<currentValue>\\S*)'",
],
datasourceTemplate: 'docker',
},

View File

@@ -18,7 +18,7 @@ env:
DOCKER_HUB: seerr/seerr
concurrency:
group: ci-${{ github.ref }}
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
cancel-in-progress: true
jobs:
@@ -129,7 +129,7 @@ jobs:
build:
name: Build (per-arch, native runners)
if: github.ref == 'refs/heads/develop' && !contains(github.event.head_commit.message, '[skip ci]')
if: github.ref == 'refs/heads/develop'
strategy:
matrix:
include:
@@ -237,7 +237,7 @@ jobs:
discord:
name: Send Discord Notification
needs: publish
if: always() && github.event_name != 'pull_request' && !contains(github.event.head_commit.message, '[skip ci]')
if: always() && github.event_name != 'pull_request'
runs-on: ubuntu-24.04
steps:
- name: Determine Workflow Status

View File

@@ -20,7 +20,7 @@ permissions:
contents: read
concurrency:
group: codeql-${{ github.ref }}
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
cancel-in-progress: true
jobs:

View File

@@ -14,7 +14,7 @@ permissions:
contents: read
concurrency:
group: merge-conflict-${{ github.ref }}
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
cancel-in-progress: true
jobs:

87
.github/workflows/create-tag.yml vendored Normal file
View File

@@ -0,0 +1,87 @@
---
# yaml-language-server: $schema=https://json.schemastore.org/github-workflow.json
name: Create tag
on:
workflow_dispatch:
permissions:
contents: read
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
jobs:
determine-tag-version:
name: Determine tag version
if: github.ref == 'refs/heads/main'
runs-on: ubuntu-24.04
permissions:
contents: read
outputs:
tag_version: ${{ steps.git-cliff.outputs.tag_version }}
steps:
- name: Checkout
uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5.0.1
with:
fetch-depth: 0
persist-credentials: false
- name: Install git-cliff
uses: taiki-e/install-action@cede0bb282aae847dfa8aacca3a41c86d973d4d7 # v2.68.1
with:
tool: git-cliff
- name: Get tag version
id: git-cliff
run: |
tag_version=$(git-cliff -c .github/cliff.toml --bumped-version --unreleased)
echo "Next tag version is ${tag_version}"
echo "tag_version=${tag_version}" >> "$GITHUB_OUTPUT"
create-tag:
name: Create tag
if: github.ref == 'refs/heads/main'
runs-on: ubuntu-24.04
permissions:
contents: write
needs: determine-tag-version
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
TAG_VERSION: ${{ needs.determine-tag-version.outputs.tag_version }}
steps:
- name: Checkout
uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5.0.1
with:
ssh-key: '${{ secrets.COMMIT_KEY }}'
- name: Pnpm Setup
uses: pnpm/action-setup@41ff72655975bd51cab0327fa583b6e92b6d3061 # v4.2.0
- name: Set up Node.js
uses: actions/setup-node@a0853c24544627f65ddf259abe73b1d18a591444 # v5.0.0
with:
node-version-file: 'package.json'
# For workflows with elevated privileges we recommend disabling automatic caching.
# https://github.com/actions/setup-node
package-manager-cache: false
- name: Configure git
run: |
git config --global user.name "${{ github.actor }}"
git config --global user.email "${{ github.actor }}@users.noreply.github.com"
- name: Bump package.json
run: npm version ${TAG_VERSION} --no-commit-hooks --no-git-tag-version
- name: Commit updated files
run: |
git add package.json
git commit -m 'chore(release): prepare ${TAG_VERSION}'
git push
- name: Create git tag
run: |
git tag ${TAG_VERSION}
git push origin ${TAG_VERSION}

View File

@@ -28,7 +28,7 @@ permissions:
contents: read
concurrency:
group: cypress-${{ github.ref }}
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
cancel-in-progress: true
jobs:

View File

@@ -15,7 +15,7 @@ permissions:
contents: read
concurrency:
group: pages
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
jobs:

View File

@@ -25,7 +25,7 @@ permissions:
contents: read
concurrency:
group: docs-link-check-${{ github.ref }}
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
cancel-in-progress: true
jobs:

View File

@@ -14,7 +14,7 @@ permissions:
contents: read
concurrency:
group: helm-charts
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
jobs:

View File

@@ -18,7 +18,7 @@ permissions:
contents: read
concurrency:
group: charts-lint-${{ github.ref }}
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
cancel-in-progress: true
jobs:

View File

@@ -15,7 +15,7 @@ env:
DOCKER_HUB: seerr/seerr
concurrency:
group: preview-${{ github.ref }}
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
jobs:

View File

@@ -11,7 +11,7 @@ permissions:
contents: read
concurrency:
group: release-${{ github.ref }}
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
env:
@@ -304,42 +304,3 @@ jobs:
run: gh release edit "${{ env.VERSION }}" --draft=false --repo "${{ github.repository }}"
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
discord:
name: Send Discord Notification
needs: publish-release
if: always()
runs-on: ubuntu-24.04
steps:
- name: Determine status
id: status
run: |
case "${{ needs.publish-release.result }}" in
success) echo "status=Success" >> $GITHUB_OUTPUT; echo "colour=3066993" >> $GITHUB_OUTPUT ;;
failure) echo "status=Failure" >> $GITHUB_OUTPUT; echo "colour=15158332" >> $GITHUB_OUTPUT ;;
cancelled) echo "status=Cancelled" >> $GITHUB_OUTPUT; echo "colour=10181046" >> $GITHUB_OUTPUT ;;
*) echo "status=Skipped" >> $GITHUB_OUTPUT; echo "colour=9807270" >> $GITHUB_OUTPUT ;;
esac
- name: Send notification
run: |
WEBHOOK="${{ secrets.DISCORD_WEBHOOK }}"
PAYLOAD=$(cat <<EOF
{
"embeds": [{
"title": "${{ steps.status.outputs.status }}: ${{ github.workflow }}",
"color": ${{ steps.status.outputs.colour }},
"fields": [
{ "name": "Repository", "value": "[${{ github.repository }}](${{ github.server_url }}/${{ github.repository }})", "inline": true },
{ "name": "Ref", "value": "${{ github.ref }}", "inline": true },
{ "name": "Event", "value": "${{ github.event_name }}", "inline": true },
{ "name": "Triggered by", "value": "${{ github.actor }}", "inline": true },
{ "name": "Workflow", "value": "[${{ github.workflow }}](${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }})", "inline": true }
]
}]
}
EOF
)
curl -sS -H "Content-Type: application/json" -X POST -d "$PAYLOAD" "$WEBHOOK" || true

View File

@@ -12,7 +12,7 @@ on:
permissions: {}
concurrency:
group: renovate-helm-hooks-${{ github.ref }}
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
cancel-in-progress: true
jobs:

28
.github/workflows/semantic-pr.yml vendored Normal file
View File

@@ -0,0 +1,28 @@
name: "Semantic PR"
on:
pull_request_target:
types:
- opened
- reopened
- edited
- synchronize
permissions: {}
concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
cancel-in-progress: true
jobs:
main:
name: Validate PR Title
runs-on: ubuntu-slim
permissions:
contents: read
pull-requests: read
checks: write
steps:
- uses: amannn/action-semantic-pull-request@48f256284bd46cdaab1048c3721360e808335d50 # v6.1.1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}

View File

@@ -9,7 +9,7 @@ on:
permissions: {}
concurrency:
group: close-stale-${{ github.ref }}
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
jobs:

View File

@@ -14,7 +14,7 @@ permissions:
contents: read
concurrency:
group: docs-pr-${{ github.ref }}
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
cancel-in-progress: true
jobs:

View File

@@ -16,7 +16,7 @@ permissions:
contents: read
concurrency:
group: trivy-scan-${{ github.ref }}
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
jobs:

View File

@@ -6,6 +6,12 @@ All help is welcome and greatly appreciated! If you would like to contribute to
> [!IMPORTANT]
>
> Automated AI-generated contributions without human review are not allowed and will be rejected.
> This is an open-source project maintained by volunteers.
> We do not have the resources to review pull requests that could have been avoided with proper human oversight.
> While we have no issue with contributors using AI tools as an aid, it is your responsibility as a contributor to ensure that all submissions are carefully reviewed and meet our quality standards.
> Submissions that appear to be unreviewed AI output will be considered low-effort and may result in a ban.
>
> If you are using **any kind of AI assistance** to contribute to Seerr,
> it must be disclosed in the pull request.
@@ -122,7 +128,7 @@ Steps:
- If you are taking on an existing bug or feature ticket, please comment on the [issue](/../../issues) to avoid multiple people working on the same thing.
- All commits **must** follow [Conventional Commits](https://www.conventionalcommits.org/en/v1.0.0/)
- Pull requests with commits not following this standard will **not** be merged.
- Pull requests with titles or commits not following this standard will **not** be merged. PR titles are automatically checked for compliance.
- Please make meaningful commits, or squash them prior to opening a pull request.
- Do not squash commits once people have begun reviewing your changes.
- Always rebase your commit to the latest `develop` branch. Do **not** merge `develop` into your branch.

View File

@@ -3,9 +3,9 @@ kubeVersion: '>=1.23.0-0'
name: seerr-chart
description: Seerr helm chart for Kubernetes
type: application
version: 3.0.0
version: 3.1.0
# renovate: image=ghcr.io/seerr-team/seerr
appVersion: '3.0.0'
appVersion: 'v3.0.1'
maintainers:
- name: Seerr Team
url: https://github.com/orgs/seerr-team/people

View File

@@ -1,6 +1,6 @@
# seerr-chart
![Version: 3.0.0](https://img.shields.io/badge/Version-3.0.0-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 3.0.0](https://img.shields.io/badge/AppVersion-3.0.0-informational?style=flat-square)
![Version: 3.1.0](https://img.shields.io/badge/Version-3.1.0-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: v3.0.1](https://img.shields.io/badge/AppVersion-v3.0.1-informational?style=flat-square)
Seerr helm chart for Kubernetes
@@ -44,9 +44,10 @@ If `replicaCount` value was used - remove it. Helm update should work fine after
| Key | Type | Default | Description |
|-----|------|---------|-------------|
| affinity | object | `{}` | |
| config | object | `{"persistence":{"accessModes":["ReadWriteOnce"],"annotations":{},"name":"","size":"5Gi","volumeName":""}}` | Creating PVC to store configuration |
| config | object | `{"persistence":{"accessModes":["ReadWriteOnce"],"annotations":{},"existingClaim":"","name":"","size":"5Gi","volumeName":""}}` | Creating PVC to store configuration |
| config.persistence.accessModes | list | `["ReadWriteOnce"]` | Access modes of persistent disk |
| config.persistence.annotations | object | `{}` | Annotations for PVCs |
| config.persistence.existingClaim | string | `""` | Specify an existing `PersistentVolumeClaim` to use. If this value is provided, the default PVC will not be created |
| config.persistence.name | string | `""` | Config name |
| config.persistence.size | string | `"5Gi"` | Size of persistent disk |
| config.persistence.volumeName | string | `""` | Name of the permanent volume to reference in the claim. Can be used to bind to existing volumes. |

View File

@@ -1,3 +1,4 @@
{{- if not .Values.config.persistence.existingClaim -}}
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
@@ -22,3 +23,4 @@ spec:
resources:
requests:
storage: "{{ .Values.config.persistence.size }}"
{{- end -}}

View File

@@ -103,7 +103,7 @@ spec:
volumes:
- name: config
persistentVolumeClaim:
claimName: {{ include "seerr.configPersistenceName" . }}
claimName: {{ if .Values.config.persistence.existingClaim }}{{ .Values.config.persistence.existingClaim }}{{- else }}{{ include "seerr.configPersistenceName" . }}{{- end }}
{{- with .Values.volumes }}
{{- toYaml . | nindent 8 }}
{{- end }}

View File

@@ -86,6 +86,8 @@ config:
# -- Name of the permanent volume to reference in the claim.
# Can be used to bind to existing volumes.
volumeName: ''
# -- Specify an existing `PersistentVolumeClaim` to use. If this value is provided, the default PVC will not be created
existingClaim: ''
ingress:
enabled: false

View File

@@ -30,7 +30,7 @@ If your PostgreSQL server is configured to accept TCP connections, you can speci
```dotenv
DB_TYPE=postgres # Which DB engine to use, either sqlite or postgres. The default is sqlite.
DB_HOST="localhost" # (optional) The host (URL) of the database. The default is "localhost".
DB_HOST=localhost # (optional) The host (URL) of the database. The default is "localhost".
DB_PORT="5432" # (optional) The port to connect to. The default is "5432".
DB_USER= # (required) Username used to connect to the database.
DB_PASS= # (required) Password of the user used to connect to the database.

View File

@@ -0,0 +1,111 @@
---
title: Synology (Advanced)
description: Install Seerr on Synology NAS using SynoCommunity
sidebar_position: 5
---
# Synology
:::warning
Third-party installation methods are maintained by the community. The Seerr team is not responsible for these packages.
:::
:::warning
This method is not recommended for most users. It is intended for advanced users who are using Synology NAS.
:::
## Prerequisites
- Synology NAS running **DSM 7.2** or later
- 64-bit architecture (x86_64 or ARMv8)
- [SynoCommunity package source](https://synocommunity.com/) added to Package Center
## Adding the SynoCommunity Package Source
If you haven't already added SynoCommunity to your Package Center:
1. Open **Package Center** in DSM
2. Click **Settings** in the top-right corner
3. Go to the **Package Sources** tab
4. Click **Add**
5. Enter the following:
- **Name**: `SynoCommunity`
- **Location**: `https://packages.synocommunity.com`
6. Click **OK**
## Installation
1. In **Package Center**, search for **Seerr**
2. Click **Install**
3. Follow the installation wizard prompts
4. Package Center will automatically install any required dependencies (Node.js v22)
### Access Seerr
Once installed, access Seerr at:
```
http://<your-synology-ip>:5055
```
You can also click the **Open** button in Package Center or find Seerr in the DSM main menu.
## Configuration
Seerr's configuration files are stored at:
```
/var/packages/seerr/var/config
```
:::info
The Seerr package runs as a dedicated service user managed by DSM. No manual permission configuration is required.
:::
## Managing the Service
You can start, stop, and restart Seerr from **Package Center** → Find Seerr → Use the action buttons.
## Updating
When a new version is available:
1. Open **Package Center**
2. Go to **Installed** packages
3. Find **Seerr** and click **Update** if available
:::tip
Enable automatic updates in Package Center settings to keep Seerr up to date.
:::
## Troubleshooting
### Viewing Logs
Seerr logs are located at `/var/packages/seerr/var/config/logs` and can be accessed using:
- **File Browser** package (recommended for most users)
- SSH (advanced users)
### Port Conflicts
Seerr uses port 5055. If this port is already in use:
- **Docker containers**: Remap the conflicting container to a different port
- **Other packages**: The conflicting package will need to be uninstalled as Seerr's port cannot be changed
SynoCommunity ensures there are no port conflicts with other SynoCommunity packages or official Synology packages.
### Package Won't Start
Ensure Node.js v22 is installed and running by checking its status in **Package Center**.
## Uninstallation
1. Open **Package Center**
2. Find **Seerr** in your installed packages
3. Click **Uninstall**
:::caution
Uninstalling will remove the application but preserve your configuration data by default. Select "Remove data" during uninstallation if you want a complete removal.
:::

View File

@@ -4,12 +4,6 @@ description: Install Seerr using TrueNAS
sidebar_position: 4
---
# TrueNAS
:::danger
This method has not yet been updated for Seerr and is currently a work in progress.
You can follow the ongoing work on this issue https://github.com/truenas/apps/issues/3374.
:::
<!--
:::warning
Third-party installation methods are maintained by the community. The Seerr team is not responsible for these packages.
:::
@@ -17,4 +11,7 @@ Third-party installation methods are maintained by the community. The Seerr team
:::warning
This method is not recommended for most users. It is intended for advanced users who are using TrueNAS distribution.
:::
-->
## Installation
Go to the 'Apps' menu, click the 'Discover Apps' button in the top right, search for 'Seerr' in the search bar, and install the app.

View File

@@ -5,12 +5,7 @@ sidebar_position: 3
---
# Unraid
:::danger
This method has not yet been updated for Seerr and is awaiting a community contribution.
Feel free to open a pull request on GitHub to update this installation method.
:::
<!--
:::warning
Third-party installation methods are maintained by the community. The Seerr team is not responsible for these packages.
:::
@@ -19,9 +14,76 @@ Third-party installation methods are maintained by the community. The Seerr team
This method is not recommended for most users. It is intended for advanced users who are using Unraid.
:::
1. Ensure you have the **Community Applications** plugin installed.
2. Inside the **Community Applications** app store, search for **Seerr**.
3. Click the **Install Button**.
4. On the following **Add Container** screen, make changes to the **Host Port** and **Host Path 1** \(Appdata\) as needed.
5. Click apply and access "Seerr" at your `<ServerIP:HostPort>` in a web browser.
-->
If an official Unraid Community Applications template for Seerr isn't available in your catalog, you can install Seerr manually using Unraid's Docker UI.
## Fresh Installation
### 1. Create the config directory
:::note
Seerr is now rootless. Unraid typically runs Docker containers as `nobody:users` (UID 99, GID 100), but Seerr now runs internally as UID 1000, GID 1000. This creates a permission mismatch.
:::
:::info
**If migrating**: Copy your existing Jellyseerr/Overseerr config files (e.g., from `/mnt/user/appdata/overseerr/` or `/mnt/user/appdata/jellyseerr`) to `/mnt/user/appdata/seerr`, then apply the permissions below
:::
Open the Unraid terminal and run:
```bash
mkdir -p /mnt/user/appdata/seerr
chown -R 1000:1000 /mnt/user/appdata/seerr
```
### 2. Add the Docker container
Navigate to the **Docker** tab in Unraid and click **Add Container**. Fill in the following:
| Field | Value |
|---|---|
| **Name** | `seerr` |
| **Repository** | `ghcr.io/seerr-team/seerr:latest` |
| **Registry URL** (optional) | `https://ghcr.io` |
| **Icon URL** | `https://raw.githubusercontent.com/seerr-team/seerr/develop/public/android-chrome-512x512.png` |
| **WebUI** | `http://[IP]:[PORT:5055]` |
| **Extra Parameters** | `--init` |
| **Network Type** | `bridge` |
| **Privileged** | `Off` |
Then click **Add another Path, Port, Variable** to add:
**Port:**
| Field | Value |
|---|---|
| Container Port | `5055` |
| Host Port | `5055` |
| Connection Type | `TCP` |
**Path:**
| Field | Value |
|---|---|
| Container Path | `/app/config` |
| Host Path | `/mnt/user/appdata/seerr` |
**Variable:**
| Field | Value |
|---|---|
| Key | `TZ` |
| Value | Your [TZ database name](https://en.wikipedia.org/wiki/List_of_tz_database_time_zones) (e.g., `America/New_York`) |
**Variable (optional):**
| Field | Value |
|---|---|
| Key | `LOG_LEVEL` |
| Value | `info` |
Click **Apply** to create and start the container.
### 3. Access Seerr
Open the WebUI at `http://<your-unraid-ip>:5055` and follow the setup wizard.
:::info
The `--init` flag in **Extra Parameters** is required. Seerr does not include its own init process, so `--init` ensures proper signal handling and clean container shutdowns.
:::

View File

@@ -23,7 +23,6 @@ Installation methods are now divided into two categories: official and third-par
The Seerr team is only responsible for official installation methods, while third-party methods are maintained by the community.
Some methods are currently not maintained, but this does not mean they are permanently discontinued. The community may restore and support them if they choose to do so.
- **Unraid app:** Not maintained
- **Snap package:** Not maintained
:::
@@ -211,4 +210,106 @@ See https://aur.archlinux.org/packages/seerr
### TrueNAS
Waiting for https://github.com/truenas/apps/issues/3374
Refer to [Seerr TrueNAS Documentation](/getting-started/third-parties/truenas), all of our examples have been updated to reflect the below change.
<Tabs groupId="truenas-migration" queryString>
<TabItem value="hostpath" label="Host Path">
**This guide describes how to migrate from Host Path storage (not ixVolume).**
1. Stop Jellyseerr/Overseerr
2. Install Seerr and use the same Host Path storage that was used by Jellyseerr/Overseerr
3. Start Seerr app
4. Delete Jellyseerr/Overseerr app
</TabItem>
<TabItem value="ixvolume" label="ixVolume">
**This guide describes how to migrate from ixVolume storage (not Host Path).**
1. Stop Jellyseerr/Overseerr
2. Create a dataset for Seerr
If your apps normally store data under something like:
```
/mnt/storage/<app-name>
```
then create a dataset named:
```
storage/seerr
```
resulting in:
```
/mnt/storage/seerr
```
3. Copy ixVolume Data
Open System Settings → Shell, or SSH into your TrueNAS server as root and run :
```bash
rsync -av /mnt/.ix-apps/app_mounts/jellyseerr/ /mnt/storage/seerr/
```
4. Install Seerr and use the same Host Path storage that was created before (`/mnt/storage/seerr/config` in our example)
5. Start Seerr app
6. Delete Jellyseerr/Overseerr app
</TabItem>
</Tabs>
### Unraid
Refer to [Seerr Unraid Documentation](/getting-started/third-parties/unraid), all of our examples have been updated to reflect the below change.
Seerr will automatically migrate your existing Overseerr or Jellyseerr data on first startup. No manual database migration is needed.
1. Stop and remove the old Overseerr (or Jellyseerr) container from the Unraid **Docker** tab. Click the container icon, then **Stop**, then **Remove**. **⚠️ Do not delete the appdata folder ⚠️**
2. Back up your existing appdata folder:
```bash
cp -a /mnt/user/appdata/overseerr /mnt/user/appdata/overseerr-backup
```
3. Fix config folder permissions — Seerr runs as the `node` user (UID 1000) instead of root:
```bash
chown -R 1000:1000 /mnt/user/appdata/overseerr
```
For Jellyseerr users, replace `overseerr` with `jellyseerr` in the path above.
4. Add a new container in the Unraid **Docker** tab. Click **Add Container** and fill in the following:
| Field | Value |
|---|---|
| **Name** | `seerr` |
| **Repository** | `ghcr.io/seerr-team/seerr:latest` |
| **Registry URL** (optional) | `https://ghcr.io` |
| **Icon URL** | `https://raw.githubusercontent.com/seerr-team/seerr/develop/public/android-chrome-512x512.png` |
| **WebUI** | `http://[IP]:[PORT:5055]` |
| **Extra Parameters** | `--init` |
| **Network Type** | `bridge` |
| **Privileged** | `Off` |
Then click **Add another Path, Port, Variable** to add:
**Port:**
| Field | Value |
|---|---|
| Container Port | `5055` |
| Host Port | `5055` |
| Connection Type | `TCP` |
**Path** — point this to your existing config folder:
| Field | Value |
|---|---|
| Container Path | `/app/config` |
| Host Path | `/mnt/user/appdata/overseerr` |
For Jellyseerr users, use `/mnt/user/appdata/jellyseerr`.
**Variable:**
| Field | Value |
|---|---|
| Key | `TZ` |
| Value | Your [TZ database name](https://en.wikipedia.org/wiki/List_of_tz_database_time_zones) (e.g., `America/New_York`) |
**Variable (optional):**
| Field | Value |
|---|---|
| Key | `LOG_LEVEL` |
| Value | `info` |
5. Click **Apply** to start the container. Check the container logs to confirm the automatic migration completed successfully.
:::tip
If you are using a reverse proxy (such as SWAG or Nginx Proxy Manager), update your proxy configuration to point to the new container name `seerr`. The default port remains `5055`.
:::

View File

@@ -6,18 +6,22 @@ sidebar_position: 2
# Web Push
The web push notification agent enables you and your users to receive Seerr notifications in a supported browser.
This notification agent does not require any configuration, but is not enabled in Seerr
:::warning
Web push notifications require a secure connection to your Seerr instance. Refer to the [Reverse Proxy](/extending-seerr/reverse-proxy) documentation for more information.
:::
To set up web push notifications, simply enable the agent in **Settings → Notifications → Web Push**. You and your users will then be prompted to allow notifications in your web browser.
The web push notification agent enables you and your users to receive Seerr notifications in a supported browser. This offers a native notification experience without the need to install an app.
Users can opt out of these notifications, or customize the notification types they would like to subscribe to, in their user settings.
This notification agent does not require any configuration, but is not enabled by default in Seerr.
:::info
Web push notifications offer a native notification experience without the need to install an app.
To set up web push notifications, simply enable the agent in **Settings → Notifications → Web Push**.
You and your users have the option to enable web push notifications by going to your **User Profile → Edit Settings → Notifications → Web Push → Enable web push**. Here you can also customize the notifications you'd like to receive.
:::info[Mobile Users]
For Web Push notifications to work on mobile you need to add Seerr to your home screen as progressive web app (PWA).
:::
:::info[iOS Users]
On iOS you may need to enable the Safari notifications feature flag by going to **Settings → Safari → Advanced → Feature Flags** and enabling "Notifications".
:::

View File

@@ -19,7 +19,7 @@ Please check how to migrate to Seerr in our [migration guide](https://docs.seerr
Seerr brings several features that were previously available in Jellyseerr but missing from Overseerr. These additions improve flexibility, performance, and overall control for admins and power users:
* **Alternative media solution:** Added support for Jellyfin and Emby in addition to the existing Plex integration.
* **Alternative media solution:** Added support for Jellyfin and Emby as alternatives to Plex. Only one integration can be used at a time.
* **PostgreSQL support**: In addition to SQLite, you can now opt in to using a PostgreSQL database.
* **Blocklist for movies, series, and tags**: Allows permitted users to hide movies, series, or tags from regular users.
* **Override rules**: Adjust default request settings based on conditions such as user, tag, or other criteria.

View File

@@ -16,7 +16,12 @@ const config: Config = {
deploymentBranch: 'gh-pages',
onBrokenLinks: 'throw',
onBrokenMarkdownLinks: 'warn',
markdown: {
hooks: {
onBrokenMarkdownLinks: 'warn',
},
},
i18n: {
defaultLocale: 'en',

View File

@@ -92,7 +92,7 @@ class ServarrBase<QueueItemAppendT> extends ExternalAPI {
apiKey,
cacheName,
apiName,
timeout = 5000,
timeout = 10000,
}: {
url: string;
apiKey: string;

View File

@@ -36,6 +36,7 @@ export class Blocklist implements BlocklistItem {
@ManyToOne(() => User, (user) => user.id, {
eager: true,
})
@Index()
user?: User;
@OneToOne(() => Media, (media) => media.blocklist, {

View File

@@ -5,6 +5,7 @@ import {
AfterLoad,
Column,
Entity,
Index,
ManyToOne,
OneToMany,
PrimaryGeneratedColumn,
@@ -19,6 +20,7 @@ class Issue {
public id: number;
@Column({ type: 'int' })
@Index()
public issueType: IssueType;
@Column({ type: 'int', default: IssueStatus.OPEN })
@@ -34,12 +36,14 @@ class Issue {
eager: true,
onDelete: 'CASCADE',
})
@Index()
public media: Media;
@ManyToOne(() => User, (user) => user.createdIssues, {
eager: true,
onDelete: 'CASCADE',
})
@Index()
public createdBy: User;
@ManyToOne(() => User, {
@@ -47,6 +51,7 @@ class Issue {
onDelete: 'CASCADE',
nullable: true,
})
@Index()
public modifiedBy?: User;
@OneToMany(() => IssueComment, (comment) => comment.issue, {

View File

@@ -1,5 +1,11 @@
import { DbAwareColumn } from '@server/utils/DbColumnHelper';
import { Column, Entity, ManyToOne, PrimaryGeneratedColumn } from 'typeorm';
import {
Column,
Entity,
Index,
ManyToOne,
PrimaryGeneratedColumn,
} from 'typeorm';
import Issue from './Issue';
import { User } from './User';
@@ -12,11 +18,13 @@ class IssueComment {
eager: true,
onDelete: 'CASCADE',
})
@Index()
public user: User;
@ManyToOne(() => Issue, (issue) => issue.comments, {
onDelete: 'CASCADE',
})
@Index()
public issue: Issue;
@Column({ type: 'text' })

View File

@@ -206,6 +206,19 @@ class Media {
Object.assign(this, init);
}
public resetServiceData(): void {
this.serviceId = null;
this.serviceId4k = null;
this.externalServiceId = null;
this.externalServiceId4k = null;
this.externalServiceSlug = null;
this.externalServiceSlug4k = null;
this.ratingKey = null;
this.ratingKey4k = null;
this.jellyfinMediaId = null;
this.jellyfinMediaId4k = null;
}
@AfterLoad()
public setPlexUrls(): void {
const { machineId, webAppUrl } = getSettings().plex;

View File

@@ -521,12 +521,14 @@ export class MediaRequest {
eager: true,
onDelete: 'CASCADE',
})
@Index()
public media: Media;
@ManyToOne(() => User, (user) => user.requests, {
eager: true,
onDelete: 'CASCADE',
})
@Index()
public requestedBy: User;
@ManyToOne(() => User, {
@@ -535,6 +537,7 @@ export class MediaRequest {
eager: true,
onDelete: 'SET NULL',
})
@Index()
public modifiedBy?: User;
@DbAwareColumn({ type: 'datetime', default: () => 'CURRENT_TIMESTAMP' })

View File

@@ -1,6 +1,12 @@
import { MediaStatus } from '@server/constants/media';
import { DbAwareColumn } from '@server/utils/DbColumnHelper';
import { Column, Entity, ManyToOne, PrimaryGeneratedColumn } from 'typeorm';
import {
Column,
Entity,
Index,
ManyToOne,
PrimaryGeneratedColumn,
} from 'typeorm';
import Media from './Media';
@Entity()
@@ -20,6 +26,7 @@ class Season {
@ManyToOne(() => Media, (media) => media.seasons, {
onDelete: 'CASCADE',
})
@Index()
public media: Promise<Media>;
@DbAwareColumn({ type: 'datetime', default: () => 'CURRENT_TIMESTAMP' })

View File

@@ -1,6 +1,12 @@
import { MediaRequestStatus } from '@server/constants/media';
import { DbAwareColumn } from '@server/utils/DbColumnHelper';
import { Column, Entity, ManyToOne, PrimaryGeneratedColumn } from 'typeorm';
import {
Column,
Entity,
Index,
ManyToOne,
PrimaryGeneratedColumn,
} from 'typeorm';
import { MediaRequest } from './MediaRequest';
@Entity()
@@ -17,6 +23,7 @@ class SeasonRequest {
@ManyToOne(() => MediaRequest, (request) => request.seasons, {
onDelete: 'CASCADE',
})
@Index()
public request: MediaRequest;
@DbAwareColumn({ type: 'datetime', default: () => 'CURRENT_TIMESTAMP' })

View File

@@ -2,6 +2,7 @@ import { DbAwareColumn } from '@server/utils/DbColumnHelper';
import {
Column,
Entity,
Index,
ManyToOne,
PrimaryGeneratedColumn,
Unique,
@@ -18,6 +19,7 @@ export class UserPushSubscription {
eager: true,
onDelete: 'CASCADE',
})
@Index()
public user: User;
@Column()

View File

@@ -47,12 +47,14 @@ export class Watchlist implements WatchlistItem {
eager: true,
onDelete: 'CASCADE',
})
@Index()
public requestedBy: User;
@ManyToOne(() => Media, (media) => media.watchlists, {
eager: true,
onDelete: 'CASCADE',
})
@Index()
public media: Media;
@DbAwareColumn({ type: 'datetime', default: () => 'CURRENT_TIMESTAMP' })

View File

@@ -45,7 +45,7 @@ class WatchlistSync {
[
Permission.AUTO_REQUEST,
Permission.AUTO_REQUEST_MOVIE,
Permission.AUTO_APPROVE_TV,
Permission.AUTO_REQUEST_TV,
],
{ type: 'or' }
)
@@ -70,13 +70,33 @@ class WatchlistSync {
response.items.map((i) => i.tmdbId)
);
const watchlistTmdbIds = response.items.map((i) => i.tmdbId);
const requestRepository = getRepository(MediaRequest);
const existingAutoRequests = await requestRepository
.createQueryBuilder('request')
.leftJoinAndSelect('request.media', 'media')
.where('request.requestedBy = :userId', { userId: user.id })
.andWhere('request.isAutoRequest = true')
.andWhere('media.tmdbId IN (:...tmdbIds)', { tmdbIds: watchlistTmdbIds })
.getMany();
const autoRequestedTmdbIds = new Set(
existingAutoRequests
.filter((r) => r.media != null)
.map((r) => `${r.media.mediaType}:${r.media.tmdbId}`)
);
const unavailableItems = response.items.filter(
// If we can find watchlist items in our database that are also available, we should exclude them
(i) =>
!autoRequestedTmdbIds.has(
`${i.type === 'show' ? MediaType.TV : MediaType.MOVIE}:${i.tmdbId}`
) &&
!mediaItems.find(
(m) =>
m.tmdbId === i.tmdbId &&
((m.status !== MediaStatus.UNKNOWN && m.mediaType === 'movie') ||
(m.status === MediaStatus.BLOCKLISTED ||
(m.status !== MediaStatus.UNKNOWN && m.mediaType === 'movie') ||
(m.mediaType === 'tv' && m.status === MediaStatus.AVAILABLE))
)
);

View File

@@ -0,0 +1,153 @@
import type { MigrationInterface, QueryRunner } from 'typeorm';
export class AddForeignKeyIndexes1771259406751 implements MigrationInterface {
name = 'AddForeignKeyIndexes1771259406751';
public async up(queryRunner: QueryRunner): Promise<void> {
await queryRunner.query(
`ALTER TABLE "blocklist" DROP CONSTRAINT "FK_53c1ab62c3e5875bc3ac474823e"`
);
await queryRunner.query(
`ALTER TABLE "blocklist" DROP CONSTRAINT "FK_62b7ade94540f9f8d8bede54b99"`
);
await queryRunner.query(
`DROP INDEX "public"."IDX_6bbafa28411e6046421991ea21"`
);
await queryRunner.query(
`CREATE SEQUENCE IF NOT EXISTS "blocklist_id_seq" OWNED BY "blocklist"."id"`
);
await queryRunner.query(
`ALTER TABLE "blocklist" ALTER COLUMN "id" SET DEFAULT nextval('"blocklist_id_seq"')`
);
await queryRunner.query(
`ALTER TABLE "blocklist" ALTER COLUMN "id" DROP DEFAULT`
);
await queryRunner.query(
`CREATE INDEX "IDX_ae34e6b153a90672eb9dc4857d" ON "watchlist" ("requestedById") `
);
await queryRunner.query(
`CREATE INDEX "IDX_6641da8d831b93dfcb429f8b8b" ON "watchlist" ("mediaId") `
);
await queryRunner.query(
`CREATE INDEX "IDX_707b033c2d0653f75213614789" ON "issue_comment" ("userId") `
);
await queryRunner.query(
`CREATE INDEX "IDX_180710fead1c94ca499c57a7d4" ON "issue_comment" ("issueId") `
);
await queryRunner.query(
`CREATE INDEX "IDX_53d04c07c3f4f54eae372ed665" ON "issue" ("issueType") `
);
await queryRunner.query(
`CREATE INDEX "IDX_276e20d053f3cff1645803c95d" ON "issue" ("mediaId") `
);
await queryRunner.query(
`CREATE INDEX "IDX_10b17b49d1ee77e7184216001e" ON "issue" ("createdById") `
);
await queryRunner.query(
`CREATE INDEX "IDX_da88a1019c850d1a7b143ca02e" ON "issue" ("modifiedById") `
);
await queryRunner.query(
`CREATE INDEX "IDX_6f14737e346d6b27d8e50d2157" ON "season_request" ("requestId") `
);
await queryRunner.query(
`CREATE INDEX "IDX_a1aa713f41c99e9d10c48da75a" ON "media_request" ("mediaId") `
);
await queryRunner.query(
`CREATE INDEX "IDX_6997bee94720f1ecb7f3113709" ON "media_request" ("requestedById") `
);
await queryRunner.query(
`CREATE INDEX "IDX_f4fc4efa14c3ba2b29c4525fa1" ON "media_request" ("modifiedById") `
);
await queryRunner.query(
`CREATE INDEX "IDX_03f7958328e311761b0de675fb" ON "user_push_subscription" ("userId") `
);
await queryRunner.query(
`CREATE INDEX "IDX_09b94c932e84635c5461f3c0a9" ON "blocklist" ("tmdbId") `
);
await queryRunner.query(
`CREATE INDEX "IDX_356721a49f145aa439c16e6b99" ON "blocklist" ("userId") `
);
await queryRunner.query(
`CREATE INDEX "IDX_087099b39600be695591da9a49" ON "season" ("mediaId") `
);
await queryRunner.query(
`ALTER TABLE "blocklist" ADD CONSTRAINT "FK_356721a49f145aa439c16e6b999" FOREIGN KEY ("userId") REFERENCES "user"("id") ON DELETE NO ACTION ON UPDATE NO ACTION`
);
await queryRunner.query(
`ALTER TABLE "blocklist" ADD CONSTRAINT "FK_5c8af2d0e83b3be6d250eccc19d" FOREIGN KEY ("mediaId") REFERENCES "media"("id") ON DELETE CASCADE ON UPDATE NO ACTION`
);
}
public async down(queryRunner: QueryRunner): Promise<void> {
await queryRunner.query(
`ALTER TABLE "blocklist" DROP CONSTRAINT "FK_5c8af2d0e83b3be6d250eccc19d"`
);
await queryRunner.query(
`ALTER TABLE "blocklist" DROP CONSTRAINT "FK_356721a49f145aa439c16e6b999"`
);
await queryRunner.query(
`DROP INDEX "public"."IDX_087099b39600be695591da9a49"`
);
await queryRunner.query(
`DROP INDEX "public"."IDX_356721a49f145aa439c16e6b99"`
);
await queryRunner.query(
`DROP INDEX "public"."IDX_09b94c932e84635c5461f3c0a9"`
);
await queryRunner.query(
`DROP INDEX "public"."IDX_03f7958328e311761b0de675fb"`
);
await queryRunner.query(
`DROP INDEX "public"."IDX_f4fc4efa14c3ba2b29c4525fa1"`
);
await queryRunner.query(
`DROP INDEX "public"."IDX_6997bee94720f1ecb7f3113709"`
);
await queryRunner.query(
`DROP INDEX "public"."IDX_a1aa713f41c99e9d10c48da75a"`
);
await queryRunner.query(
`DROP INDEX "public"."IDX_6f14737e346d6b27d8e50d2157"`
);
await queryRunner.query(
`DROP INDEX "public"."IDX_da88a1019c850d1a7b143ca02e"`
);
await queryRunner.query(
`DROP INDEX "public"."IDX_10b17b49d1ee77e7184216001e"`
);
await queryRunner.query(
`DROP INDEX "public"."IDX_276e20d053f3cff1645803c95d"`
);
await queryRunner.query(
`DROP INDEX "public"."IDX_53d04c07c3f4f54eae372ed665"`
);
await queryRunner.query(
`DROP INDEX "public"."IDX_180710fead1c94ca499c57a7d4"`
);
await queryRunner.query(
`DROP INDEX "public"."IDX_707b033c2d0653f75213614789"`
);
await queryRunner.query(
`DROP INDEX "public"."IDX_6641da8d831b93dfcb429f8b8b"`
);
await queryRunner.query(
`DROP INDEX "public"."IDX_ae34e6b153a90672eb9dc4857d"`
);
await queryRunner.query(
`ALTER TABLE "blocklist" ALTER COLUMN "id" SET DEFAULT nextval('blacklist_id_seq')`
);
await queryRunner.query(
`ALTER TABLE "blocklist" ALTER COLUMN "id" DROP DEFAULT`
);
await queryRunner.query(`DROP SEQUENCE "blocklist_id_seq"`);
await queryRunner.query(
`CREATE INDEX "IDX_6bbafa28411e6046421991ea21" ON "blocklist" ("tmdbId") `
);
await queryRunner.query(
`ALTER TABLE "blocklist" ADD CONSTRAINT "FK_62b7ade94540f9f8d8bede54b99" FOREIGN KEY ("mediaId") REFERENCES "media"("id") ON DELETE CASCADE ON UPDATE NO ACTION`
);
await queryRunner.query(
`ALTER TABLE "blocklist" ADD CONSTRAINT "FK_53c1ab62c3e5875bc3ac474823e" FOREIGN KEY ("userId") REFERENCES "user"("id") ON DELETE NO ACTION ON UPDATE NO ACTION`
);
}
}

View File

@@ -0,0 +1,203 @@
import type { MigrationInterface, QueryRunner } from 'typeorm';
export class AddForeignKeyIndexes1771259394105 implements MigrationInterface {
name = 'AddForeignKeyIndexes1771259394105';
public async up(queryRunner: QueryRunner): Promise<void> {
await queryRunner.query(`DROP INDEX "IDX_6bbafa28411e6046421991ea21"`);
await queryRunner.query(
`CREATE TABLE "temporary_blocklist" ("id" integer PRIMARY KEY AUTOINCREMENT NOT NULL, "mediaType" varchar NOT NULL, "title" varchar, "tmdbId" integer NOT NULL, "blocklistedTags" varchar, "createdAt" datetime NOT NULL DEFAULT (datetime('now')), "userId" integer, "mediaId" integer, CONSTRAINT "REL_62b7ade94540f9f8d8bede54b9" UNIQUE ("mediaId"), CONSTRAINT "UQ_6bbafa28411e6046421991ea21c" UNIQUE ("tmdbId"))`
);
await queryRunner.query(
`INSERT INTO "temporary_blocklist"("id", "mediaType", "title", "tmdbId", "blocklistedTags", "createdAt", "userId", "mediaId") SELECT "id", "mediaType", "title", "tmdbId", "blocklistedTags", "createdAt", "userId", "mediaId" FROM "blocklist"`
);
await queryRunner.query(`DROP TABLE "blocklist"`);
await queryRunner.query(
`ALTER TABLE "temporary_blocklist" RENAME TO "blocklist"`
);
await queryRunner.query(
`CREATE INDEX "IDX_6bbafa28411e6046421991ea21" ON "blocklist" ("tmdbId") `
);
await queryRunner.query(`DROP INDEX "IDX_6bbafa28411e6046421991ea21"`);
await queryRunner.query(
`CREATE TABLE "temporary_user_push_subscription" ("id" integer PRIMARY KEY AUTOINCREMENT NOT NULL, "endpoint" varchar NOT NULL, "p256dh" varchar NOT NULL, "auth" varchar NOT NULL, "userId" integer, "userAgent" varchar, "createdAt" datetime DEFAULT (CURRENT_TIMESTAMP), CONSTRAINT "UQ_6427d07d9a171a3a1ab87480005" UNIQUE ("endpoint", "userId"), CONSTRAINT "UQ_f90ab5a4ed54905a4bb51a7148b" UNIQUE ("auth"), CONSTRAINT "FK_03f7958328e311761b0de675fbe" FOREIGN KEY ("userId") REFERENCES "user" ("id") ON DELETE CASCADE ON UPDATE NO ACTION)`
);
await queryRunner.query(
`INSERT INTO "temporary_user_push_subscription"("id", "endpoint", "p256dh", "auth", "userId", "userAgent", "createdAt") SELECT "id", "endpoint", "p256dh", "auth", "userId", "userAgent", "createdAt" FROM "user_push_subscription"`
);
await queryRunner.query(`DROP TABLE "user_push_subscription"`);
await queryRunner.query(
`ALTER TABLE "temporary_user_push_subscription" RENAME TO "user_push_subscription"`
);
await queryRunner.query(
`CREATE TABLE "temporary_user_push_subscription" ("id" integer PRIMARY KEY AUTOINCREMENT NOT NULL, "endpoint" varchar NOT NULL, "p256dh" varchar NOT NULL, "auth" varchar NOT NULL, "userId" integer, "userAgent" varchar, "createdAt" datetime DEFAULT (CURRENT_TIMESTAMP), CONSTRAINT "UQ_6427d07d9a171a3a1ab87480005" UNIQUE ("endpoint", "userId"), CONSTRAINT "UQ_f90ab5a4ed54905a4bb51a7148b" UNIQUE ("auth"), CONSTRAINT "FK_03f7958328e311761b0de675fbe" FOREIGN KEY ("userId") REFERENCES "user" ("id") ON DELETE CASCADE ON UPDATE NO ACTION)`
);
await queryRunner.query(
`INSERT INTO "temporary_user_push_subscription"("id", "endpoint", "p256dh", "auth", "userId", "userAgent", "createdAt") SELECT "id", "endpoint", "p256dh", "auth", "userId", "userAgent", "createdAt" FROM "user_push_subscription"`
);
await queryRunner.query(`DROP TABLE "user_push_subscription"`);
await queryRunner.query(
`ALTER TABLE "temporary_user_push_subscription" RENAME TO "user_push_subscription"`
);
await queryRunner.query(
`CREATE TABLE "temporary_blocklist" ("id" integer PRIMARY KEY AUTOINCREMENT NOT NULL, "mediaType" varchar NOT NULL, "title" varchar, "tmdbId" integer NOT NULL, "blocklistedTags" varchar, "createdAt" datetime NOT NULL DEFAULT (CURRENT_TIMESTAMP), "userId" integer, "mediaId" integer, CONSTRAINT "REL_62b7ade94540f9f8d8bede54b9" UNIQUE ("mediaId"), CONSTRAINT "UQ_6bbafa28411e6046421991ea21c" UNIQUE ("tmdbId"))`
);
await queryRunner.query(
`INSERT INTO "temporary_blocklist"("id", "mediaType", "title", "tmdbId", "blocklistedTags", "createdAt", "userId", "mediaId") SELECT "id", "mediaType", "title", "tmdbId", "blocklistedTags", "createdAt", "userId", "mediaId" FROM "blocklist"`
);
await queryRunner.query(`DROP TABLE "blocklist"`);
await queryRunner.query(
`ALTER TABLE "temporary_blocklist" RENAME TO "blocklist"`
);
await queryRunner.query(
`CREATE INDEX "IDX_ae34e6b153a90672eb9dc4857d" ON "watchlist" ("requestedById") `
);
await queryRunner.query(
`CREATE INDEX "IDX_6641da8d831b93dfcb429f8b8b" ON "watchlist" ("mediaId") `
);
await queryRunner.query(
`CREATE INDEX "IDX_707b033c2d0653f75213614789" ON "issue_comment" ("userId") `
);
await queryRunner.query(
`CREATE INDEX "IDX_180710fead1c94ca499c57a7d4" ON "issue_comment" ("issueId") `
);
await queryRunner.query(
`CREATE INDEX "IDX_53d04c07c3f4f54eae372ed665" ON "issue" ("issueType") `
);
await queryRunner.query(
`CREATE INDEX "IDX_276e20d053f3cff1645803c95d" ON "issue" ("mediaId") `
);
await queryRunner.query(
`CREATE INDEX "IDX_10b17b49d1ee77e7184216001e" ON "issue" ("createdById") `
);
await queryRunner.query(
`CREATE INDEX "IDX_da88a1019c850d1a7b143ca02e" ON "issue" ("modifiedById") `
);
await queryRunner.query(
`CREATE INDEX "IDX_6f14737e346d6b27d8e50d2157" ON "season_request" ("requestId") `
);
await queryRunner.query(
`CREATE INDEX "IDX_a1aa713f41c99e9d10c48da75a" ON "media_request" ("mediaId") `
);
await queryRunner.query(
`CREATE INDEX "IDX_6997bee94720f1ecb7f3113709" ON "media_request" ("requestedById") `
);
await queryRunner.query(
`CREATE INDEX "IDX_f4fc4efa14c3ba2b29c4525fa1" ON "media_request" ("modifiedById") `
);
await queryRunner.query(
`CREATE INDEX "IDX_03f7958328e311761b0de675fb" ON "user_push_subscription" ("userId") `
);
await queryRunner.query(
`CREATE INDEX "IDX_09b94c932e84635c5461f3c0a9" ON "blocklist" ("tmdbId") `
);
await queryRunner.query(
`CREATE INDEX "IDX_356721a49f145aa439c16e6b99" ON "blocklist" ("userId") `
);
await queryRunner.query(
`CREATE INDEX "IDX_087099b39600be695591da9a49" ON "season" ("mediaId") `
);
await queryRunner.query(`DROP INDEX "IDX_09b94c932e84635c5461f3c0a9"`);
await queryRunner.query(`DROP INDEX "IDX_356721a49f145aa439c16e6b99"`);
await queryRunner.query(
`CREATE TABLE "temporary_blocklist" ("id" integer PRIMARY KEY AUTOINCREMENT NOT NULL, "mediaType" varchar NOT NULL, "title" varchar, "tmdbId" integer NOT NULL, "blocklistedTags" varchar, "createdAt" datetime NOT NULL DEFAULT (CURRENT_TIMESTAMP), "userId" integer, "mediaId" integer, CONSTRAINT "REL_62b7ade94540f9f8d8bede54b9" UNIQUE ("mediaId"), CONSTRAINT "UQ_6bbafa28411e6046421991ea21c" UNIQUE ("tmdbId"), CONSTRAINT "FK_356721a49f145aa439c16e6b999" FOREIGN KEY ("userId") REFERENCES "user" ("id") ON DELETE NO ACTION ON UPDATE NO ACTION, CONSTRAINT "FK_5c8af2d0e83b3be6d250eccc19d" FOREIGN KEY ("mediaId") REFERENCES "media" ("id") ON DELETE CASCADE ON UPDATE NO ACTION)`
);
await queryRunner.query(
`INSERT INTO "temporary_blocklist"("id", "mediaType", "title", "tmdbId", "blocklistedTags", "createdAt", "userId", "mediaId") SELECT "id", "mediaType", "title", "tmdbId", "blocklistedTags", "createdAt", "userId", "mediaId" FROM "blocklist"`
);
await queryRunner.query(`DROP TABLE "blocklist"`);
await queryRunner.query(
`ALTER TABLE "temporary_blocklist" RENAME TO "blocklist"`
);
await queryRunner.query(
`CREATE INDEX "IDX_09b94c932e84635c5461f3c0a9" ON "blocklist" ("tmdbId") `
);
await queryRunner.query(
`CREATE INDEX "IDX_356721a49f145aa439c16e6b99" ON "blocklist" ("userId") `
);
}
public async down(queryRunner: QueryRunner): Promise<void> {
await queryRunner.query(`DROP INDEX "IDX_356721a49f145aa439c16e6b99"`);
await queryRunner.query(`DROP INDEX "IDX_09b94c932e84635c5461f3c0a9"`);
await queryRunner.query(
`ALTER TABLE "blocklist" RENAME TO "temporary_blocklist"`
);
await queryRunner.query(
`CREATE TABLE "blocklist" ("id" integer PRIMARY KEY AUTOINCREMENT NOT NULL, "mediaType" varchar NOT NULL, "title" varchar, "tmdbId" integer NOT NULL, "blocklistedTags" varchar, "createdAt" datetime NOT NULL DEFAULT (CURRENT_TIMESTAMP), "userId" integer, "mediaId" integer, CONSTRAINT "REL_62b7ade94540f9f8d8bede54b9" UNIQUE ("mediaId"), CONSTRAINT "UQ_6bbafa28411e6046421991ea21c" UNIQUE ("tmdbId"))`
);
await queryRunner.query(
`INSERT INTO "blocklist"("id", "mediaType", "title", "tmdbId", "blocklistedTags", "createdAt", "userId", "mediaId") SELECT "id", "mediaType", "title", "tmdbId", "blocklistedTags", "createdAt", "userId", "mediaId" FROM "temporary_blocklist"`
);
await queryRunner.query(`DROP TABLE "temporary_blocklist"`);
await queryRunner.query(
`CREATE INDEX "IDX_356721a49f145aa439c16e6b99" ON "blocklist" ("userId") `
);
await queryRunner.query(
`CREATE INDEX "IDX_09b94c932e84635c5461f3c0a9" ON "blocklist" ("tmdbId") `
);
await queryRunner.query(`DROP INDEX "IDX_087099b39600be695591da9a49"`);
await queryRunner.query(`DROP INDEX "IDX_356721a49f145aa439c16e6b99"`);
await queryRunner.query(`DROP INDEX "IDX_09b94c932e84635c5461f3c0a9"`);
await queryRunner.query(`DROP INDEX "IDX_03f7958328e311761b0de675fb"`);
await queryRunner.query(`DROP INDEX "IDX_f4fc4efa14c3ba2b29c4525fa1"`);
await queryRunner.query(`DROP INDEX "IDX_6997bee94720f1ecb7f3113709"`);
await queryRunner.query(`DROP INDEX "IDX_a1aa713f41c99e9d10c48da75a"`);
await queryRunner.query(`DROP INDEX "IDX_6f14737e346d6b27d8e50d2157"`);
await queryRunner.query(`DROP INDEX "IDX_da88a1019c850d1a7b143ca02e"`);
await queryRunner.query(`DROP INDEX "IDX_10b17b49d1ee77e7184216001e"`);
await queryRunner.query(`DROP INDEX "IDX_276e20d053f3cff1645803c95d"`);
await queryRunner.query(`DROP INDEX "IDX_53d04c07c3f4f54eae372ed665"`);
await queryRunner.query(`DROP INDEX "IDX_180710fead1c94ca499c57a7d4"`);
await queryRunner.query(`DROP INDEX "IDX_707b033c2d0653f75213614789"`);
await queryRunner.query(`DROP INDEX "IDX_6641da8d831b93dfcb429f8b8b"`);
await queryRunner.query(`DROP INDEX "IDX_ae34e6b153a90672eb9dc4857d"`);
await queryRunner.query(
`ALTER TABLE "blocklist" RENAME TO "temporary_blocklist"`
);
await queryRunner.query(
`CREATE TABLE "blocklist" ("id" integer PRIMARY KEY AUTOINCREMENT NOT NULL, "mediaType" varchar NOT NULL, "title" varchar, "tmdbId" integer NOT NULL, "blocklistedTags" varchar, "createdAt" datetime NOT NULL DEFAULT (datetime('now')), "userId" integer, "mediaId" integer, CONSTRAINT "REL_62b7ade94540f9f8d8bede54b9" UNIQUE ("mediaId"), CONSTRAINT "UQ_6bbafa28411e6046421991ea21c" UNIQUE ("tmdbId"))`
);
await queryRunner.query(
`INSERT INTO "blocklist"("id", "mediaType", "title", "tmdbId", "blocklistedTags", "createdAt", "userId", "mediaId") SELECT "id", "mediaType", "title", "tmdbId", "blocklistedTags", "createdAt", "userId", "mediaId" FROM "temporary_blocklist"`
);
await queryRunner.query(`DROP TABLE "temporary_blocklist"`);
await queryRunner.query(
`ALTER TABLE "user_push_subscription" RENAME TO "temporary_user_push_subscription"`
);
await queryRunner.query(
`CREATE TABLE "user_push_subscription" ("id" integer PRIMARY KEY AUTOINCREMENT NOT NULL, "endpoint" varchar NOT NULL, "p256dh" varchar NOT NULL, "auth" varchar NOT NULL, "userId" integer, "userAgent" varchar, "createdAt" datetime DEFAULT (CURRENT_TIMESTAMP), CONSTRAINT "UQ_6427d07d9a171a3a1ab87480005" UNIQUE ("endpoint", "userId"), CONSTRAINT "UQ_f90ab5a4ed54905a4bb51a7148b" UNIQUE ("auth"), CONSTRAINT "FK_03f7958328e311761b0de675fbe" FOREIGN KEY ("userId") REFERENCES "user" ("id") ON DELETE CASCADE ON UPDATE NO ACTION)`
);
await queryRunner.query(
`INSERT INTO "user_push_subscription"("id", "endpoint", "p256dh", "auth", "userId", "userAgent", "createdAt") SELECT "id", "endpoint", "p256dh", "auth", "userId", "userAgent", "createdAt" FROM "temporary_user_push_subscription"`
);
await queryRunner.query(`DROP TABLE "temporary_user_push_subscription"`);
await queryRunner.query(
`ALTER TABLE "user_push_subscription" RENAME TO "temporary_user_push_subscription"`
);
await queryRunner.query(
`CREATE TABLE "user_push_subscription" ("id" integer PRIMARY KEY AUTOINCREMENT NOT NULL, "endpoint" varchar NOT NULL, "p256dh" varchar NOT NULL, "auth" varchar NOT NULL, "userId" integer, "userAgent" varchar, "createdAt" datetime DEFAULT (CURRENT_TIMESTAMP), CONSTRAINT "UQ_6427d07d9a171a3a1ab87480005" UNIQUE ("endpoint", "userId"), CONSTRAINT "UQ_f90ab5a4ed54905a4bb51a7148b" UNIQUE ("auth"), CONSTRAINT "FK_03f7958328e311761b0de675fbe" FOREIGN KEY ("userId") REFERENCES "user" ("id") ON DELETE CASCADE ON UPDATE NO ACTION)`
);
await queryRunner.query(
`INSERT INTO "user_push_subscription"("id", "endpoint", "p256dh", "auth", "userId", "userAgent", "createdAt") SELECT "id", "endpoint", "p256dh", "auth", "userId", "userAgent", "createdAt" FROM "temporary_user_push_subscription"`
);
await queryRunner.query(`DROP TABLE "temporary_user_push_subscription"`);
await queryRunner.query(
`CREATE INDEX "IDX_6bbafa28411e6046421991ea21" ON "blocklist" ("tmdbId") `
);
await queryRunner.query(`DROP INDEX "IDX_6bbafa28411e6046421991ea21"`);
await queryRunner.query(
`ALTER TABLE "blocklist" RENAME TO "temporary_blocklist"`
);
await queryRunner.query(
`CREATE TABLE "blocklist" ("id" integer PRIMARY KEY AUTOINCREMENT NOT NULL, "mediaType" varchar NOT NULL, "title" varchar, "tmdbId" integer NOT NULL, "blocklistedTags" varchar, "createdAt" datetime NOT NULL DEFAULT (datetime('now')), "userId" integer, "mediaId" integer, CONSTRAINT "REL_62b7ade94540f9f8d8bede54b9" UNIQUE ("mediaId"), CONSTRAINT "UQ_6bbafa28411e6046421991ea21c" UNIQUE ("tmdbId"), CONSTRAINT "FK_62b7ade94540f9f8d8bede54b99" FOREIGN KEY ("mediaId") REFERENCES "media" ("id") ON DELETE CASCADE ON UPDATE NO ACTION, CONSTRAINT "FK_53c1ab62c3e5875bc3ac474823e" FOREIGN KEY ("userId") REFERENCES "user" ("id") ON DELETE NO ACTION ON UPDATE NO ACTION)`
);
await queryRunner.query(
`INSERT INTO "blocklist"("id", "mediaType", "title", "tmdbId", "blocklistedTags", "createdAt", "userId", "mediaId") SELECT "id", "mediaType", "title", "tmdbId", "blocklistedTags", "createdAt", "userId", "mediaId" FROM "temporary_blocklist"`
);
await queryRunner.query(`DROP TABLE "temporary_blocklist"`);
await queryRunner.query(
`CREATE INDEX "IDX_6bbafa28411e6046421991ea21" ON "blocklist" ("tmdbId") `
);
}
}

View File

@@ -174,7 +174,12 @@ mediaRoutes.delete(
where: { id: Number(req.params.id) },
});
await mediaRepository.remove(media);
if (media.status === MediaStatus.BLOCKLISTED) {
media.resetServiceData();
await mediaRepository.save(media);
} else {
await mediaRepository.remove(media);
}
return res.status(204).send();
} catch (e) {