From a3a3e8e5cb6252db82d294828b47db2d39879045 Mon Sep 17 00:00:00 2001 From: Benjamin Harder Date: Mon, 19 May 2025 19:50:56 +0200 Subject: [PATCH] Added recovery message when queue is empty readme update --- README.md | 8 +++++--- src/jobs/removal_job.py | 9 ++++----- src/jobs/strikes_handler.py | 7 +++++++ 3 files changed, 16 insertions(+), 8 deletions(-) diff --git a/README.md b/README.md index 03edcd8..cbffc08 100644 --- a/README.md +++ b/README.md @@ -17,6 +17,7 @@ _Like this app? Thanks for giving it a_ ⭐️ - [TIMER](#timer) - [SSL_VERIFICATION](#ssl_verification) - [IGNORE_DOWNLOAD_CLIENTS](#ignore_download_clients) + - [PRIVATE_TRACKER_HANDLING / PUBLIC_TRACKER_HANDLING](#private_tracker_handling--public_tracker_handling) - [OBSOLETE_TAG](#obsolete_tag) - [PROTECTED_TAGS](#protected_tag) - [Job Defaults](#job-defaults) @@ -320,7 +321,7 @@ Configures the general behavior of the application (across all features) - Note that this only works for qbittorrent currently (if you set up qbittorrent in your config) - "remove" means that torrents are removed (default behavior) - "skip" means they are disregarded (which some users might find handy to protect their private trackers prematurely, ie., before their seed targets are met) - - "obsolete_tag" means that rather than being removed, the torrents are tagged. This allows other applications (such as [qbit_manage](https://github.com/StuffAnThings/qbit_manage) to monitor them and remove them once seed targets are fulfilled + - "obsolete_tag" means that rather than being removed, the torrents are tagged. This allows other applications (such as [qbit_manage](https://github.com/StuffAnThings/qbit_manage) to monitor them and remove them once seed targets are fulfilled) - Type: String - Permissible Values: remove, skip, obsolete_tag - Is Mandatory: No (Defaults to remove) @@ -353,9 +354,10 @@ If a job has the same settings configured on job-level, the job-level settings w #### MAX_STRIKES - Certain jobs wait before removing a download, until the jobs have caught the same download a given number of times. This is defined by max_strikes -- max_strikes defines the total permissible counts a job can catch a download; catching it once more, and it will remove the ownload. +- max_strikes defines the number of consecutive times a download can fail before it is removed. +- If a download temporarily recovers the count is reset (for instance being caught twice for being slow and then picking up speed again before again being slow) - Type: Integer -- Unit: Number of times the job catches a download +- Unit: Number of consecutive misses - Is Mandatory: No (Defaults to 3) #### MIN_DAYS_BETWEEN_SEARCHES diff --git a/src/jobs/removal_job.py b/src/jobs/removal_job.py index 353d5cc..fcafb72 100644 --- a/src/jobs/removal_job.py +++ b/src/jobs/removal_job.py @@ -21,12 +21,15 @@ class RemovalJob(ABC): self.job_name = job_name self.job = getattr(self.settings.jobs, self.job_name) self.queue_manager = QueueManager(self.arr, self.settings) + self.strikes_handler = StrikesHandler( job_name=self.job_name, arr=self.arr, max_strikes=self.max_strikes, ) async def run(self): if not self.job.enabled: return 0 if await self.is_queue_empty(self.job_name, self.queue_scope): + if self.max_strikes: + self.strikes_handler.all_recovered() return 0 self.affected_items = await self._find_affected_items() self.affected_downloads = self.queue_manager.group_by_download_id(self.affected_items) @@ -36,11 +39,7 @@ class RemovalJob(ABC): self.max_strikes = getattr(self.job, "max_strikes", None) if self.max_strikes: - self.affected_downloads = StrikesHandler( - job_name=self.job_name, - arr=self.arr, - max_strikes=self.max_strikes, - ).check_permitted_strikes(self.affected_downloads) + self.affected_downloads = self.strikes_handler.check_permitted_strikes(self.affected_downloads) # -- Removal -- await RemovalHandler( diff --git a/src/jobs/strikes_handler.py b/src/jobs/strikes_handler.py index 106d5b2..53089fb 100644 --- a/src/jobs/strikes_handler.py +++ b/src/jobs/strikes_handler.py @@ -14,6 +14,13 @@ class StrikesHandler: return self._apply_strikes_and_filter(affected_downloads) + def all_recovered(self): + self.tracker.defective[self.job_name] = {} + logger.info( + ">>> No downloads any longer marked as %s (None in queue)", + self.job_name, + ) + def _recover_downloads(self, affected_downloads): recovered = [ d_id for d_id in self.tracker.defective[self.job_name]