Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
9 changes: 4 additions & 5 deletions bazarr.py
Original file line number Diff line number Diff line change
Expand Up @@ -106,11 +106,10 @@ def check_status():
os.remove(restart_file)
except Exception:
print('Unable to delete restart file.')
finally:
terminate_child()
print("Bazarr is restarting...")
child_process = start_bazarr()
return
terminate_child()
print("Bazarr is restarting...")
child_process = start_bazarr()
return

if not is_process_running(child_process):
print("Bazarr child process has stopped unexpectedly. Shutting down...")
Expand Down
4 changes: 2 additions & 2 deletions bazarr/app/get_providers.py
Original file line number Diff line number Diff line change
Expand Up @@ -45,13 +45,13 @@ def time_until_midnight(timezone) -> datetime.timedelta:
# Titulky resets its download limits at the start of a new day from its perspective - the Europe/Prague timezone
# Needs to convert to offset-naive dt
def titulky_limit_reset_timedelta():
return time_until_midnight(timezone=datetime.datetime.now(ZoneInfo('Europe/Prague')))
return time_until_midnight(timezone=ZoneInfo('Europe/Prague'))


# LegendasDivx reset its searches limit at approximately midnight, Lisbon time, every day. We wait 1 more hours just
# to be sure.
def legendasdivx_limit_reset_timedelta():
return time_until_midnight(timezone=datetime.datetime.now(ZoneInfo('Europe/Lisbon'))) + datetime.timedelta(minutes=60)
return time_until_midnight(timezone=ZoneInfo('Europe/Lisbon')) + datetime.timedelta(minutes=60)


VALID_THROTTLE_EXCEPTIONS = (TooManyRequests, DownloadLimitExceeded, ServiceUnavailable, APIThrottled,
Expand Down
15 changes: 15 additions & 0 deletions bazarr/subtitles/download.py
Original file line number Diff line number Diff line change
Expand Up @@ -62,6 +62,21 @@ def generate_subtitles(path, languages, audio_language, sceneName, title, media_
saved_any = False

if providers:
if job_id:
from app.jobs_queue import jobs_queue as _jq
active_providers = [p for p in providers if p not in pool.discarded_providers]
_provider_count = len(active_providers)

def _on_provider(provider_name):
try:
idx = active_providers.index(provider_name) + 1
except ValueError:
idx = 0
_jq.update_job_progress(job_id=job_id,
progress_message=f"Searching {provider_name} ({idx}/{_provider_count})")

pool.provider_progress_callback = _on_provider

if forced_minimum_score:
min_score = int(forced_minimum_score) + 1
for language in language_set:
Expand Down
7 changes: 6 additions & 1 deletion bazarr/subtitles/mass_download/movies.py
Original file line number Diff line number Diff line change
Expand Up @@ -122,7 +122,7 @@ def movies_download_subtitles(no, job_id=None, job_sub_function=False):

def movie_download_specific_subtitles(radarr_id, language, hi, forced, job_id=None):
if not job_id:
return jobs_queue.add_job_from_function("Searching subtitles", progress_max=1, is_progress=False)
return jobs_queue.add_job_from_function("Searching subtitles", is_progress=True)

movieInfo = database.execute(
select(
Expand Down Expand Up @@ -153,6 +153,7 @@ def movie_download_specific_subtitles(radarr_id, language, hi, forced, job_id=No
language_str = language

jobs_queue.update_job_name(job_id=job_id, new_job_name=f"Searching {language_str.upper()} for {title}")
jobs_queue.update_job_progress(job_id=job_id, progress_message="Preparing search...")

audio_language_list = get_audio_profile_languages(movieInfo.audio_language)
if len(audio_language_list) > 0:
Expand All @@ -171,7 +172,11 @@ def movie_download_specific_subtitles(radarr_id, language, hi, forced, job_id=No
history_log_movie(1, radarr_id, result)
send_notifications_movie(radarr_id, result.message)
store_subtitles_movie(result.path, moviePath)
jobs_queue.update_job_progress(job_id=job_id, progress_value='max',
progress_message="Subtitle downloaded")
else:
jobs_queue.update_job_progress(job_id=job_id, progress_value='max',
progress_message="No subtitles found")
event_stream(type='movie', payload=radarr_id)
return '', 204
except OSError:
Expand Down
7 changes: 6 additions & 1 deletion bazarr/subtitles/mass_download/series.py
Original file line number Diff line number Diff line change
Expand Up @@ -183,7 +183,7 @@ def episode_download_subtitles(no, job_id=None, job_sub_function=False, provider

def episode_download_specific_subtitles(sonarr_series_id, sonarr_episode_id, language, hi, forced, job_id=None):
if not job_id:
return jobs_queue.add_job_from_function("Searching subtitles", progress_max=1, is_progress=False)
return jobs_queue.add_job_from_function("Searching subtitles", is_progress=True)

episodeInfo = database.execute(
select(TableEpisodes.path,
Expand Down Expand Up @@ -221,6 +221,7 @@ def episode_download_specific_subtitles(sonarr_series_id, sonarr_episode_id, lan

jobs_queue.update_job_name(job_id=job_id,
new_job_name=f"Searching {language_str.upper()} for {episode_long_title}")
jobs_queue.update_job_progress(job_id=job_id, progress_message="Preparing search...")

audio_language_list = get_audio_profile_languages(episodeInfo.audio_language)
if len(audio_language_list) > 0:
Expand All @@ -239,7 +240,11 @@ def episode_download_specific_subtitles(sonarr_series_id, sonarr_episode_id, lan
history_log(1, sonarr_series_id, sonarr_episode_id, result)
send_notifications(sonarr_series_id, sonarr_episode_id, result.message)
store_subtitles(result.path, episodePath)
jobs_queue.update_job_progress(job_id=job_id, progress_value='max',
progress_message="Subtitle downloaded")
else:
jobs_queue.update_job_progress(job_id=job_id, progress_value='max',
progress_message="No subtitles found")
event_stream(type='episode', payload=sonarr_episode_id)
return '', 204
except OSError:
Expand Down
96 changes: 75 additions & 21 deletions bazarr/subtitles/upgrade.py
Original file line number Diff line number Diff line change
Expand Up @@ -93,7 +93,14 @@ def upgrade_episodes_subtitles(job_id=None, sonarr_series_ids=None, wait_for_com
for item in episodes_data:
# do not consider subtitles that do not exist on disk anymore
if item['subtitles_path'] not in item['external_subtitles']:
continue
current_sub = _find_current_subtitle_for_language(item['language'], item['external_subtitles'])
if current_sub:
logging.debug(f"Upgrade candidate {item['id']} ({item['seriesTitle']} S{item['season']:02d}E"
f"{item['episode']:02d}): history path no longer on disk, using current subtitle "
f"for same language ({current_sub})")
item['subtitles_path'] = current_sub
else:
continue

# Mark upgradable and get original_id
item.update({'original_id': episodes_to_upgrade.get(item['id'])})
Expand Down Expand Up @@ -140,7 +147,7 @@ def upgrade_episodes_subtitles(job_id=None, sonarr_series_ids=None, wait_for_com
episode['seriesTitle'],
'series',
episode['profileId'],
forced_minimum_score=int(episode['score']) + 1,
forced_minimum_score=int(episode['score'] or 0) + 1,
is_upgrade=True,
previous_subtitles_to_delete=path_mappings.path_replace(
episode['subtitles_path']),
Expand Down Expand Up @@ -185,28 +192,48 @@ def upgrade_movies_subtitles(job_id=None, radarr_ids=None, wait_for_completion=F
if radarr_ids:
query = query.where(TableHistoryMovie.radarrId.in_(radarr_ids))

movies_data = [{
'id': x.id,
'title': x.title,
'language': x.language,
'audio_language': x.audio_language,
'video_path': x.video_path,
'sceneName': x.sceneName,
'score': x.score,
'radarrId': x.radarrId,
'path': x.path,
'profileId': x.profileId,
'subtitles_path': x.subtitles_path,
'external_subtitles': [y[1] for y in ast.literal_eval(x.external_subtitles) if y[1]],
} for x in database.execute(query)
.all() if _language_still_desired(x.language, x.profileId) and
x.video_path == x.path
]
all_rows = database.execute(query).all()
movies_data = []
for x in all_rows:
if not _language_still_desired(x.language, x.profileId):
if x.id in movies_to_upgrade:
logging.debug(f"Upgrade candidate {x.id} ({x.title}) dropped: language {x.language} no longer desired "
f"in profile {x.profileId}")
continue
if x.video_path != x.path:
if x.id in movies_to_upgrade:
logging.debug(f"Upgrade candidate {x.id} ({x.title}) dropped: video_path mismatch "
f"(history={x.video_path} != current={x.path})")
continue
movies_data.append({
'id': x.id,
'title': x.title,
'language': x.language,
'audio_language': x.audio_language,
'video_path': x.video_path,
'sceneName': x.sceneName,
'score': x.score,
'radarrId': x.radarrId,
'path': x.path,
'profileId': x.profileId,
'subtitles_path': x.subtitles_path,
'external_subtitles': [y[1] for y in ast.literal_eval(x.external_subtitles) if y[1]],
})

for item in movies_data:
# do not consider subtitles that do not exist on disk anymore
if item['subtitles_path'] not in item['external_subtitles']:
continue
# try to find a current subtitle for the same language (file may have been renamed/re-downloaded)
current_sub = _find_current_subtitle_for_language(item['language'], item['external_subtitles'])
if current_sub:
logging.debug(f"Upgrade candidate {item['id']} ({item['title']}): history path no longer on disk, "
f"using current subtitle for same language ({current_sub})")
item['subtitles_path'] = current_sub
else:
if item['id'] in movies_to_upgrade:
logging.debug(f"Upgrade candidate {item['id']} ({item['title']}) dropped: no subtitle for language "
f"{item['language']} found on disk")
continue

# Mark upgradable and get original_id
item.update({'original_id': movies_to_upgrade.get(item['id'])})
Expand Down Expand Up @@ -251,7 +278,7 @@ def upgrade_movies_subtitles(job_id=None, radarr_ids=None, wait_for_completion=F
movie['title'],
'movie',
movie['profileId'],
forced_minimum_score=int(movie['score']) + 1,
forced_minimum_score=int(movie['score'] or 0) + 1,
is_upgrade=True,
previous_subtitles_to_delete=path_mappings.path_replace_movie(
movie['subtitles_path']),
Expand Down Expand Up @@ -281,6 +308,33 @@ def get_queries_condition_parameters():
return [minimum_timestamp, query_actions]


def _find_current_subtitle_for_language(language_string, external_subtitles):
"""Find a current subtitle file on disk that matches the language from history.

When a subtitle was re-downloaded or renamed (e.g. .hu.hi.srt -> .hu.srt),
the history still references the old path. This finds the current file for
the same language so upgrades can still proceed.
"""
lang_code = language_string.split(':')[0]
is_hi = language_string.endswith(':hi')
is_forced = language_string.endswith(':forced')

for sub_path in external_subtitles:
sub_lower = sub_path.lower()
# Check if language code is in the filename
if f'.{lang_code.lower()}.' not in sub_lower and not sub_lower.endswith(f'.{lang_code.lower()}'):
continue
# Match HI/forced flags
has_hi = '.hi.' in sub_lower or sub_lower.endswith('.hi.srt')
has_forced = '.forced.' in sub_lower or sub_lower.endswith('.forced.srt')
if is_hi == has_hi and is_forced == has_forced:
return sub_path
# If we wanted HI but only non-HI exists (or vice versa), still a candidate
if not is_forced and not has_forced and lang_code.lower() in sub_lower:
return sub_path
return None


def parse_language_string(language_string):
if language_string.endswith('forced'):
language = language_string.split(':')[0]
Expand Down
5 changes: 5 additions & 0 deletions custom_libs/subliminal_patch/core.py
Original file line number Diff line number Diff line change
Expand Up @@ -238,6 +238,8 @@ def __init__(self, providers=None, provider_configs=None, blacklist=None, ban_li

self._born = time.time()

self.provider_progress_callback = None

if not self.throttle_callback:
self.throttle_callback = lambda x, y, ids=None, language=None: x

Expand Down Expand Up @@ -376,6 +378,9 @@ def list_subtitles_provider(self, provider, video, languages):

logger.info('Listing subtitles with provider %r and languages %r', provider, to_request)

if self.provider_progress_callback:
self.provider_progress_callback(provider)

try:
results = self[provider].list_subtitles(video, to_request)
seen = []
Expand Down
14 changes: 12 additions & 2 deletions custom_libs/subliminal_patch/providers/opensubtitles.py
Original file line number Diff line number Diff line change
Expand Up @@ -403,8 +403,18 @@ def use_token_or_login(self, func):

def initialize(self):
if self.use_web_scraper:
# Skip authentication for scraper mode
logger.debug("Web scraper mode - skipping authentication")
# Verify scraper service is reachable before searching
try:
base_url = self.scraper_service_url.rstrip('/')
if not base_url.startswith(('http://', 'https://')):
base_url = f'http://{base_url}'
resp = requests.get(f'{base_url}/health', timeout=5)
resp.raise_for_status()
logger.info("Scraper service at %s is healthy", self.scraper_service_url)
except Exception as e:
raise ServiceUnavailable(
f'OpenSubtitles scraper at {self.scraper_service_url} is not reachable: {e}'
)
self.server = None
self.token = None
return
Expand Down
Loading