Added recovery message when queue is empty

readme update
This commit is contained in:
Benjamin Harder
2025-05-19 19:50:56 +02:00
parent 1bee35c029
commit a3a3e8e5cb
3 changed files with 16 additions and 8 deletions

View File

@@ -17,6 +17,7 @@ _Like this app? Thanks for giving it a_ ⭐️
- [TIMER](#timer)
- [SSL_VERIFICATION](#ssl_verification)
- [IGNORE_DOWNLOAD_CLIENTS](#ignore_download_clients)
- [PRIVATE_TRACKER_HANDLING / PUBLIC_TRACKER_HANDLING](#private_tracker_handling--public_tracker_handling)
- [OBSOLETE_TAG](#obsolete_tag)
- [PROTECTED_TAGS](#protected_tag)
- [Job Defaults](#job-defaults)
@@ -320,7 +321,7 @@ Configures the general behavior of the application (across all features)
- Note that this only works for qbittorrent currently (if you set up qbittorrent in your config)
- "remove" means that torrents are removed (default behavior)
- "skip" means they are disregarded (which some users might find handy to protect their private trackers prematurely, ie., before their seed targets are met)
- "obsolete_tag" means that rather than being removed, the torrents are tagged. This allows other applications (such as [qbit_manage](https://github.com/StuffAnThings/qbit_manage) to monitor them and remove them once seed targets are fulfilled
- "obsolete_tag" means that rather than being removed, the torrents are tagged. This allows other applications (such as [qbit_manage](https://github.com/StuffAnThings/qbit_manage) to monitor them and remove them once seed targets are fulfilled)
- Type: String
- Permissible Values: remove, skip, obsolete_tag
- Is Mandatory: No (Defaults to remove)
@@ -353,9 +354,10 @@ If a job has the same settings configured on job-level, the job-level settings w
#### MAX_STRIKES
- Certain jobs wait before removing a download, until the jobs have caught the same download a given number of times. This is defined by max_strikes
- max_strikes defines the total permissible counts a job can catch a download; catching it once more, and it will remove the ownload.
- max_strikes defines the number of consecutive times a download can fail before it is removed.
- If a download temporarily recovers the count is reset (for instance being caught twice for being slow and then picking up speed again before again being slow)
- Type: Integer
- Unit: Number of times the job catches a download
- Unit: Number of consecutive misses
- Is Mandatory: No (Defaults to 3)
#### MIN_DAYS_BETWEEN_SEARCHES

View File

@@ -21,12 +21,15 @@ class RemovalJob(ABC):
self.job_name = job_name
self.job = getattr(self.settings.jobs, self.job_name)
self.queue_manager = QueueManager(self.arr, self.settings)
self.strikes_handler = StrikesHandler( job_name=self.job_name, arr=self.arr, max_strikes=self.max_strikes, )
async def run(self):
if not self.job.enabled:
return 0
if await self.is_queue_empty(self.job_name, self.queue_scope):
if self.max_strikes:
self.strikes_handler.all_recovered()
return 0
self.affected_items = await self._find_affected_items()
self.affected_downloads = self.queue_manager.group_by_download_id(self.affected_items)
@@ -36,11 +39,7 @@ class RemovalJob(ABC):
self.max_strikes = getattr(self.job, "max_strikes", None)
if self.max_strikes:
self.affected_downloads = StrikesHandler(
job_name=self.job_name,
arr=self.arr,
max_strikes=self.max_strikes,
).check_permitted_strikes(self.affected_downloads)
self.affected_downloads = self.strikes_handler.check_permitted_strikes(self.affected_downloads)
# -- Removal --
await RemovalHandler(

View File

@@ -14,6 +14,13 @@ class StrikesHandler:
return self._apply_strikes_and_filter(affected_downloads)
def all_recovered(self):
self.tracker.defective[self.job_name] = {}
logger.info(
">>> No downloads any longer marked as %s (None in queue)",
self.job_name,
)
def _recover_downloads(self, affected_downloads):
recovered = [
d_id for d_id in self.tracker.defective[self.job_name]