-
Notifications
You must be signed in to change notification settings - Fork 8
Expand file tree
/
Copy pathapp_logic.py
More file actions
3187 lines (2633 loc) · 163 KB
/
app_logic.py
File metadata and controls
3187 lines (2633 loc) · 163 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
# --- Configuration ---
ENABLE_PROCESS_CPU_AFFINITY = False # Set to False to disable process-level CPU core limiting
import tkinter as tk
from tkinter import ttk, messagebox
import numpy as np
import cv2
import pytesseract
import threading
import time
import queue
import sys
from PIL import Image, ImageTk
import os
import re
import gc
import traceback
import io
import base64
import concurrent.futures
import webbrowser
from logger import log_debug, set_debug_logging_enabled, is_debug_logging_enabled
from resource_handler import get_resource_path
from marian_mt_translator import MarianMTTranslator, MARIANMT_AVAILABLE as MARIANMT_LIB_AVAILABLE
from config_manager import load_app_config, save_app_config, load_ocr_preview_geometry, save_ocr_preview_geometry
from gui_builder import create_main_tab, create_settings_tab, create_custom_prompt_tab, create_api_usage_tab, create_debug_tab
from ui_elements import create_scrollable_tab
from overlay_manager import (
select_source_area_om, select_target_area_om,
create_source_overlay_om, create_target_overlay_om,
toggle_source_visibility_om, toggle_target_visibility_om, load_areas_from_config_om
)
from worker_threads import run_capture_thread, run_ocr_thread, run_translation_thread
from language_manager import LanguageManager
from language_ui import UILanguageManager
from constants import APP_VERSION, APP_RELEASE_DATE, APP_RELEASE_DATE_POLISH
from update_checker import UpdateChecker
from handlers import (
CacheManager,
ConfigurationHandler,
DisplayManager,
HotkeyHandler,
StatisticsHandler,
TranslationHandler,
UIInteractionHandler
)
from handlers.gemini_models_manager import GeminiModelsManager
from handlers.openai_models_manager import OpenAIModelsManager
KEYBOARD_AVAILABLE = False
try:
import keyboard
KEYBOARD_AVAILABLE = True
except ImportError:
pass
GOOGLE_TRANSLATE_API_AVAILABLE = False
try:
from google.cloud import translate_v2 as google_translate
GOOGLE_TRANSLATE_API_AVAILABLE = True
except ImportError:
pass
DEEPL_API_AVAILABLE = False
try:
import deepl
DEEPL_API_AVAILABLE = True
except ImportError:
pass
GEMINI_API_AVAILABLE = False
try:
import google.generativeai as genai
GEMINI_API_AVAILABLE = True
except ImportError:
pass
OPENAI_API_AVAILABLE = False
try:
import openai
OPENAI_API_AVAILABLE = True
except ImportError:
pass
MARIANMT_AVAILABLE = MARIANMT_LIB_AVAILABLE
class GameChangingTranslator:
def __init__(self, root):
self.root = root
self.root.title("Game-Changing Translator")
self.root.geometry("750x480")
self.root.minsize(650, 430)
self.root.resizable(True, True)
self._fully_initialized = False # Flag for settings save callback
self.toggle_in_progress = False
self.KEYBOARD_AVAILABLE = KEYBOARD_AVAILABLE
self.GOOGLE_TRANSLATE_API_AVAILABLE = GOOGLE_TRANSLATE_API_AVAILABLE
self.DEEPL_API_AVAILABLE = DEEPL_API_AVAILABLE
self.GEMINI_API_AVAILABLE = GEMINI_API_AVAILABLE
self.OPENAI_API_AVAILABLE = OPENAI_API_AVAILABLE
self.MARIANMT_AVAILABLE = MARIANMT_AVAILABLE
# Debug: Log execution environment information
import sys
is_compiled = getattr(sys, 'frozen', False) and hasattr(sys, '_MEIPASS')
log_debug(f"Application execution environment:")
log_debug(f" Compiled/Frozen: {is_compiled}")
if is_compiled:
log_debug(f" Executable path: {sys.executable}")
log_debug(f" Bundle dir: {getattr(sys, '_MEIPASS', 'Unknown')}")
else:
log_debug(f" Python script mode")
log_debug(f" Script path: {__file__}")
log_debug(f"Library availability check:")
if not KEYBOARD_AVAILABLE: log_debug(" Keyboard library not available. Hotkeys disabled.")
else: log_debug(" Keyboard library: available")
if not GOOGLE_TRANSLATE_API_AVAILABLE: log_debug(" Google Translate API libraries not available.")
else: log_debug(" Google Translate API libraries: available")
if not DEEPL_API_AVAILABLE: log_debug(" DeepL API libraries not available.")
else: log_debug(" DeepL API libraries: available")
if not GEMINI_API_AVAILABLE: log_debug(" Gemini API libraries not available.")
else: log_debug(" Gemini API libraries: available")
if not OPENAI_API_AVAILABLE: log_debug(" OpenAI API libraries not available.")
else: log_debug(" OpenAI API libraries: available")
if not MARIANMT_AVAILABLE: log_debug(" MarianMT libraries not available.")
else: log_debug(" MarianMT libraries: available")
# Process-Level CPU Affinity: Limit application to exactly 3 cores
if ENABLE_PROCESS_CPU_AFFINITY:
try:
import psutil
cpu_count = os.cpu_count() or 2
if cpu_count >= 3: # Only limit if system has 3+ cores
# Use exactly 3 cores: [0, 1, 2]
available_cores = [0, 1, 2]
psutil.Process().cpu_affinity(available_cores)
# Also set environment variable for OpenMP (Tesseract) thread limiting
os.environ['OMP_NUM_THREADS'] = '3'
log_debug(f"Limited application to exactly 3 CPU cores: {available_cores} (out of {cpu_count} total)")
log_debug(f"Set OMP_NUM_THREADS=3 for Tesseract thread limiting")
else:
log_debug(f"System has {cpu_count} cores - no CPU limiting applied (need 3+ cores)")
except ImportError:
log_debug("psutil not available - CPU affinity not set. Install psutil for CPU core limiting.")
except Exception as e_cpu:
log_debug(f"Error setting CPU affinity: {e_cpu}")
else:
log_debug("Process-level CPU affinity DISABLED via configuration flag")
self.source_area = None
self.target_area = None
self.is_running = False
self.threads = []
self.last_image_hash = None
self.source_overlay = None
self.target_overlay = None
self.translation_text = None
self.text_stability_counter = 0
self.previous_text = ""
self.last_screenshot = None
self.last_processed_image = None
self.raw_image_for_gemini = None # WebP bytes ready for Gemini API
# Gemini OCR Batch Infrastructure (Phase 1)
self.last_processed_subtitle = None # Store last processed subtitle for successive comparison
self.batch_sequence_counter = 0 # Track batch sequence numbers
self.clear_timeout_timer_start = None # Timer for clear translation timeout
self.active_ocr_calls = set() # Track active async OCR calls
self.max_concurrent_ocr_calls = 8 # Limit concurrent OCR API calls (8 for Gemini)
# Gemini OCR Simple Management (No Queue for Gemini)
self.last_displayed_batch_sequence = 0 # Track chronological order
# Translation Async Processing Infrastructure (Phase 2)
self.translation_sequence_counter = 0 # Track translation sequence numbers
self.last_displayed_translation_sequence = 0 # Track chronological order for translations
self.active_translation_calls = set() # Track active async translation calls
self.max_concurrent_translation_calls = 6 # Limit concurrent translation API calls
# Initialize thread pools for optimized performance (especially for compiled version)
self.ocr_thread_pool = concurrent.futures.ThreadPoolExecutor(
max_workers=8,
thread_name_prefix="ApiOCR"
)
self.translation_thread_pool = concurrent.futures.ThreadPoolExecutor(
max_workers=6,
thread_name_prefix="Translation"
)
log_debug("Initialized thread pools for OCR and translation processing")
# Adaptive Scan Interval Infrastructure
self.base_scan_interval = 500 # User's preferred setting (will be updated from config)
self.current_scan_interval = 500 # Dynamic value used by capture thread
self.load_check_timer = 0
self.overload_detected = False
log_debug("Initialized adaptive scan interval infrastructure")
# OCR Preview window
self.ocr_preview_window = None
self.config = load_app_config()
self.language_manager = LanguageManager()
# Initialize UI language manager with the saved language if available
saved_language_display = self.config['Settings'].get('gui_language', 'English')
self.ui_lang = UILanguageManager()
if saved_language_display != 'English':
lang_code = self.ui_lang.get_language_code_from_name(saved_language_display)
if lang_code:
self.ui_lang.load_language(lang_code)
log_debug(f"Loaded UI language from config: {lang_code}")
self.root.bind('<Configure>', self.on_window_configure)
self._save_timer = None
self._save_settings_timer = None
# Initialize Tkinter Variables FIRST ---
self.source_colour_var = tk.StringVar(value=self.config['Settings'].get('source_area_colour', '#FFFF99'))
self.target_colour_var = tk.StringVar(value=self.config['Settings'].get('target_area_colour', '#663399'))
self.target_text_colour_var = tk.StringVar(value=self.config['Settings'].get('target_text_colour', '#FFFFFF'))
self.remove_trailing_garbage_var = tk.BooleanVar(value=self.config.getboolean('Settings', 'remove_trailing_garbage', fallback=False))
self.debug_logging_enabled_var = tk.BooleanVar(value=self.config.getboolean('Settings', 'debug_logging_enabled', fallback=True))
self.gui_language_var = tk.StringVar(value=self.config['Settings'].get('gui_language', 'English'))
self.check_for_updates_on_startup_var = tk.BooleanVar(value=self.config['Settings'].get('check_for_updates_on_startup', 'yes') == 'yes')
self.keep_linebreaks_var = tk.BooleanVar(value=self.config.getboolean('Settings', 'keep_linebreaks', fallback=False))
# OCR Model Selection (Phase 1 - Gemini OCR)
self.ocr_model_var = tk.StringVar(value=self.config['Settings'].get('ocr_model', 'tesseract'))
self.google_api_key_var = tk.StringVar(value=self.config['Settings'].get('google_translate_api_key', ''))
self.deepl_api_key_var = tk.StringVar(value=self.config['Settings'].get('deepl_api_key', ''))
self.gemini_api_key_var = tk.StringVar(value=self.config['Settings'].get('gemini_api_key', ''))
self.deepl_model_type_var = tk.StringVar(value=self.config['Settings'].get('deepl_model_type', 'latency_optimized'))
self.deepl_usage_var = tk.StringVar(value="Loading...")
translation_model_val = self.config['Settings'].get('translation_model', 'gemini_api')
# Fallback logic if configured model's library is not available
if translation_model_val == 'gemini_api' and not self.GEMINI_API_AVAILABLE:
log_debug("Configured Gemini API but library not available. Falling back...")
if self.GOOGLE_TRANSLATE_API_AVAILABLE: translation_model_val = 'google_api'
elif self.DEEPL_API_AVAILABLE: translation_model_val = 'deepl_api'
elif self.MARIANMT_AVAILABLE: translation_model_val = 'marianmt'
else: log_debug("No other translation libraries available for Gemini API fallback.")
elif translation_model_val == 'marianmt' and not self.MARIANMT_AVAILABLE:
log_debug("Configured MarianMT but library not available. Falling back...")
if self.GEMINI_API_AVAILABLE: translation_model_val = 'gemini_api'
elif self.GOOGLE_TRANSLATE_API_AVAILABLE: translation_model_val = 'google_api'
elif self.DEEPL_API_AVAILABLE: translation_model_val = 'deepl_api'
else: log_debug("No other translation libraries available, MarianMT will show error if selected.")
elif translation_model_val == 'google_api' and not self.GOOGLE_TRANSLATE_API_AVAILABLE:
log_debug("Configured Google API but library not available. Falling back...")
if self.GEMINI_API_AVAILABLE: translation_model_val = 'gemini_api'
elif self.DEEPL_API_AVAILABLE: translation_model_val = 'deepl_api'
elif self.MARIANMT_AVAILABLE: translation_model_val = 'marianmt'
else: log_debug("No other translation libraries available for Google API fallback.")
elif translation_model_val == 'deepl_api' and not self.DEEPL_API_AVAILABLE:
log_debug("Configured DeepL API but library not available. Falling back...")
if self.GEMINI_API_AVAILABLE: translation_model_val = 'gemini_api'
elif self.GOOGLE_TRANSLATE_API_AVAILABLE: translation_model_val = 'google_api'
elif self.MARIANMT_AVAILABLE: translation_model_val = 'marianmt'
else: log_debug("No other translation libraries available for DeepL API fallback.")
self.translation_model_var = tk.StringVar(value=translation_model_val)
# Define translation model names and values earlier
# Initialize with default values, will be updated with localized versions
self.translation_model_names = {
'gemini_api': 'Gemini 2.5 Flash-Lite',
'google_api': 'Google Translate API',
'deepl_api': 'DeepL API',
'marianmt': 'MarianMT (offline and free)'
}
# Initialize Gemini Models Manager before updating model names
self.gemini_models_manager = GeminiModelsManager()
# Initialize OpenAI Models Manager
self.openai_models_manager = OpenAIModelsManager()
# Update with localized names after UI language is loaded
self.update_translation_model_names()
self.translation_model_values = {v: k for k, v in self.translation_model_names.items()}
self.models_file_var = tk.StringVar(value=self.config['Settings'].get('marian_models_file'))
self.num_beams_var = tk.IntVar(value=int(self.config['Settings'].get('num_beams', '2')))
self.marian_model_var = tk.StringVar(value=self.config['Settings'].get('marian_model', '')) # Stores path
self.google_file_cache_var = tk.BooleanVar(value=self.config.getboolean('Settings', 'google_file_cache', fallback=True))
self.deepl_file_cache_var = tk.BooleanVar(value=self.config.getboolean('Settings', 'deepl_file_cache', fallback=True))
self.deepl_context_window_var = tk.IntVar(value=int(self.config['Settings'].get('deepl_context_window', '2')))
self.gemini_file_cache_var = tk.BooleanVar(value=self.config.getboolean('Settings', 'gemini_file_cache', fallback=True))
self.gemini_context_window_var = tk.IntVar(value=int(self.config['Settings'].get('gemini_context_window', '1')))
self.gemini_api_log_enabled_var = tk.BooleanVar(value=self.config.getboolean('Settings', 'gemini_api_log_enabled', fallback=True))
# OpenAI API variables
self.openai_file_cache_var = tk.BooleanVar(value=self.config.getboolean('Settings', 'openai_file_cache', fallback=True))
self.openai_context_window_var = tk.IntVar(value=int(self.config['Settings'].get('openai_context_window', '2')))
self.openai_api_log_enabled_var = tk.BooleanVar(value=self.config.getboolean('Settings', 'openai_api_log_enabled', fallback=True))
self.openai_api_key_var = tk.StringVar(value=self.config['Settings'].get('openai_api_key', ''))
# Separate Gemini model selection for OCR and Translation
self.gemini_translation_model_var = tk.StringVar(value=self.config['Settings'].get('gemini_translation_model', 'Gemini 2.5 Flash-Lite'))
self.gemini_ocr_model_var = tk.StringVar(value=self.config['Settings'].get('gemini_ocr_model', 'Gemini 2.5 Flash-Lite'))
# OpenAI model selection for OCR and Translation
self.openai_translation_model_var = tk.StringVar(value=self.config['Settings'].get('openai_translation_model', 'GPT-4o Mini'))
self.openai_ocr_model_var = tk.StringVar(value=self.config['Settings'].get('openai_ocr_model', 'GPT-4o'))
# Gemini statistics variables (initialized by GUI builder)
self.gemini_total_words_var = None
self.gemini_total_cost_var = None
# OpenAI statistics variables (initialized by GUI builder)
self.openai_total_words_var = None
self.openai_total_cost_var = None
tesseract_path_from_config = self.config['Settings'].get('tesseract_path', r'C:\Program Files\Tesseract-OCR\tesseract.exe')
self.tesseract_path_var = tk.StringVar(value=tesseract_path_from_config)
self.scan_interval_var = tk.IntVar(value=int(self.config['Settings'].get('scan_interval', '100')))
# Initialize adaptive scan interval values from user configuration
initial_scan_interval = self.scan_interval_var.get()
self.base_scan_interval = initial_scan_interval # Update with user's actual setting
self.current_scan_interval = initial_scan_interval # Start with user's setting
log_debug(f"Initialized adaptive scan interval: base={self.base_scan_interval}ms, current={self.current_scan_interval}ms")
self.clear_translation_timeout_var = tk.IntVar(value=int(self.config['Settings'].get('clear_translation_timeout', '3')))
self.stability_var = tk.IntVar(value=int(self.config['Settings'].get('stability_threshold', '2')))
self.confidence_var = tk.IntVar(value=int(self.config['Settings'].get('confidence_threshold', '60')))
self.preprocessing_mode_var = tk.StringVar(value=self.config['Settings'].get('image_preprocessing_mode', 'none'))
# Adaptive thresholding parameters
self.adaptive_block_size_var = tk.IntVar(value=int(self.config['Settings'].get('adaptive_block_size', '41')))
self.adaptive_c_var = tk.IntVar(value=int(self.config['Settings'].get('adaptive_c', '-60')))
# Create a translated display variable for preprocessing mode
self.preprocessing_display_var = tk.StringVar()
self.ocr_debugging_var = tk.BooleanVar(value=self.config.getboolean('Settings', 'ocr_debugging', fallback=False))
self.target_font_size_var = tk.IntVar(value=int(self.config['Settings'].get('target_font_size', '12')))
self.target_font_type_var = tk.StringVar(value=self.config['Settings'].get('target_font_type', 'Arial'))
self.target_opacity_var = tk.DoubleVar(value=float(self.config['Settings'].get('target_opacity', '0.15')))
self.target_text_opacity_var = tk.DoubleVar(value=float(self.config['Settings'].get('target_text_opacity', '1.0')))
# Initialize OCR model display variable here to ensure it persists across UI rebuilds
self.ocr_model_display_var = tk.StringVar()
initial_ocr_model_code = self.ocr_model_var.get()
initial_ocr_display_name = ""
if initial_ocr_model_code == 'tesseract':
initial_ocr_display_name = self.ui_lang.get_label("ocr_model_tesseract", "Tesseract (offline)")
elif self.is_gemini_model(initial_ocr_model_code):
saved_gemini_ocr_model = self.config['Settings'].get('gemini_ocr_model', '')
if saved_gemini_ocr_model and self.GEMINI_API_AVAILABLE and saved_gemini_ocr_model in self.gemini_models_manager.get_ocr_model_names():
initial_ocr_display_name = saved_gemini_ocr_model
elif self.is_openai_model(initial_ocr_model_code):
saved_openai_ocr_model = self.config['Settings'].get('openai_ocr_model', '')
if saved_openai_ocr_model and self.OPENAI_API_AVAILABLE and saved_openai_ocr_model in self.openai_models_manager.get_ocr_model_names():
initial_ocr_display_name = saved_openai_ocr_model
# Fallback if no specific display name was found
if not initial_ocr_display_name:
if self.GEMINI_API_AVAILABLE and self.gemini_models_manager.get_ocr_model_names():
initial_ocr_display_name = self.gemini_models_manager.get_ocr_model_names()[0]
elif self.OPENAI_API_AVAILABLE and self.openai_models_manager.get_ocr_model_names():
initial_ocr_display_name = self.openai_models_manager.get_ocr_model_names()[0]
else:
initial_ocr_display_name = self.ui_lang.get_label("ocr_model_tesseract", "Tesseract (offline)")
self.ocr_model_display_var.set(initial_ocr_display_name)
# Initialize Handlers
# self.cache_manager = CacheManager(self)
self.configuration_handler = ConfigurationHandler(self)
self.display_manager = DisplayManager(self)
self.hotkey_handler = HotkeyHandler(self)
self.statistics_handler = StatisticsHandler(self)
self.translation_handler = TranslationHandler(self)
self.ui_interaction_handler = UIInteractionHandler(self) # Needs self.translation_model_names
# Pre-initialize Gemini model for optimal performance (especially for compiled version)
self._pre_initialize_gemini_model()
# Initialize trace suppression mechanism and UI update detection
self._suppress_traces = False
self._ui_update_in_progress = False
def _settings_changed_callback_internal(*args, **kwargs):
if self._fully_initialized and not self._suppress_traces and not self._ui_update_in_progress:
self.save_settings()
elif self._suppress_traces:
log_debug("StringVar trace suppressed during UI update")
elif self._ui_update_in_progress:
log_debug("StringVar trace suppressed during UI update operation")
self.settings_changed_callback = _settings_changed_callback_internal
# Scan interval validation callback for Gemini OCR minimum
def _scan_interval_changed_callback(*args, **kwargs):
if self._fully_initialized and not self._suppress_traces and not self._ui_update_in_progress:
# Validate minimum scan interval for Gemini OCR
if self.get_ocr_model_setting() == 'gemini':
current_value = self.scan_interval_var.get()
if current_value < 500:
log_debug(f"Scan interval {current_value}ms too low for Gemini OCR, setting to 500ms minimum")
self.scan_interval_var.set(500)
return # Skip save_settings since we just changed the value
# Update adaptive scan interval when user changes scan interval
new_scan_interval = self.scan_interval_var.get()
if hasattr(self, 'base_scan_interval') and new_scan_interval != self.base_scan_interval:
self.base_scan_interval = new_scan_interval
# Reset to new base if not currently overloaded, or update overloaded value
if not self.overload_detected:
self.current_scan_interval = new_scan_interval
log_debug(f"Adaptive scan interval updated: base={self.base_scan_interval}ms, current={self.current_scan_interval}ms")
else:
self.current_scan_interval = int(new_scan_interval * 1.5) # Maintain 150% overload ratio
log_debug(f"Adaptive scan interval updated during overload: base={self.base_scan_interval}ms, current={self.current_scan_interval}ms")
self.save_settings()
elif self._suppress_traces:
log_debug("Scan interval trace suppressed during UI update")
elif self._ui_update_in_progress:
log_debug("Scan interval trace suppressed during UI update operation")
self.scan_interval_changed_callback = _scan_interval_changed_callback
# Add traces
self.source_colour_var.trace_add("write", self.settings_changed_callback)
self.target_colour_var.trace_add("write", self.settings_changed_callback)
self.target_text_colour_var.trace_add("write", self.settings_changed_callback)
self.remove_trailing_garbage_var.trace_add("write", self.settings_changed_callback)
self.debug_logging_enabled_var.trace_add("write", self.settings_changed_callback)
self.check_for_updates_on_startup_var.trace_add("write", self.settings_changed_callback)
self.keep_linebreaks_var.trace_add("write", self.settings_changed_callback)
self.google_api_key_var.trace_add("write", self.settings_changed_callback)
self.deepl_api_key_var.trace_add("write", self.settings_changed_callback)
self.deepl_model_type_var.trace_add("write", self.settings_changed_callback)
self.models_file_var.trace_add("write", self.settings_changed_callback)
self.google_file_cache_var.trace_add("write", self.settings_changed_callback)
self.deepl_file_cache_var.trace_add("write", self.settings_changed_callback)
self.deepl_context_window_var.trace_add("write", self.settings_changed_callback)
self.preprocessing_mode_var.trace_add("write", self.settings_changed_callback)
self.preprocessing_mode_var.trace_add("write", self.on_ocr_parameter_change)
self.adaptive_block_size_var.trace_add("write", self.settings_changed_callback)
self.adaptive_block_size_var.trace_add("write", self.on_ocr_parameter_change)
self.adaptive_c_var.trace_add("write", self.settings_changed_callback)
self.adaptive_c_var.trace_add("write", self.on_ocr_parameter_change)
self.ocr_debugging_var.trace_add("write", self.settings_changed_callback)
self.tesseract_path_var.trace_add("write", self.settings_changed_callback)
self.scan_interval_var.trace_add("write", self.scan_interval_changed_callback) # Special validation callback
self.clear_translation_timeout_var.trace_add("write", self.settings_changed_callback)
self.stability_var.trace_add("write", self.settings_changed_callback)
self.confidence_var.trace_add("write", self.settings_changed_callback)
self.target_font_size_var.trace_add("write", self.settings_changed_callback)
self.target_font_type_var.trace_add("write", self.settings_changed_callback)
self.target_opacity_var.trace_add("write", self.settings_changed_callback)
self.target_text_opacity_var.trace_add("write", self.settings_changed_callback)
self.num_beams_var.trace_add("write", self.settings_changed_callback)
self.marian_model_var.trace_add("write", self.settings_changed_callback)
self.gui_language_var.trace_add("write", self.settings_changed_callback)
self.ocr_model_var.trace_add("write", self.settings_changed_callback)
self.ocr_model_var.trace_add("write", self.on_ocr_model_change)
# Other instance variables
# Increased queue sizes from 4/3 to 8/6 to reduce queue management overhead
self.ocr_queue = queue.Queue(maxsize=8) # Increased from 4 for better buffering
self.translation_queue = queue.Queue(maxsize=6) # Increased from 3 for better buffering
self.last_successful_translation_time = 0.0
self.min_translation_interval = 0.3
self.last_translation_time = time.monotonic()
self.google_api_client = None
self.deepl_api_client = None
self.google_api_key_visible = False
self.deepl_api_key_visible = False
self.gemini_api_key_visible = False
self.openai_api_key_visible = False
self.marian_translator = None
self.marian_source_lang = None
self.marian_target_lang = None
self.google_source_lang = self.config['Settings'].get('google_source_lang', 'auto')
self.google_target_lang = self.config['Settings'].get('google_target_lang', 'en')
self.deepl_source_lang = self.config['Settings'].get('deepl_source_lang', 'auto')
self.deepl_target_lang = self.config['Settings'].get('deepl_target_lang', 'EN-GB')
self.gemini_source_lang = self.config['Settings'].get('gemini_source_lang', 'en')
self.gemini_target_lang = self.config['Settings'].get('gemini_target_lang', 'pl')
# OpenAI language settings
self.openai_source_lang = self.config['Settings'].get('openai_source_lang', 'en')
self.openai_target_lang = self.config['Settings'].get('openai_target_lang', 'pl')
if getattr(sys, 'frozen', False) and hasattr(sys, '_MEIPASS'):
base_dir = os.path.dirname(sys.executable)
else:
base_dir = os.path.dirname(os.path.abspath(__file__))
self.google_cache_file = os.path.join(base_dir, "googletrans_cache.txt")
self.deepl_cache_file = os.path.join(base_dir, "deepl_cache.txt")
self.gemini_cache_file = os.path.join(base_dir, "gemini_cache.txt")
self.openai_cache_file = os.path.join(base_dir, "openai_cache.txt")
self.custom_prompt_file = os.path.join(base_dir, "custom_prompt.txt")
log_debug(f"Cache file paths: Google: {self.google_cache_file}, DeepL: {self.deepl_cache_file}, Gemini: {self.gemini_cache_file}, OpenAI: {self.openai_cache_file}")
self.custom_prompt_text = ""
self.load_custom_prompt()
self.google_file_cache = {}
self.deepl_file_cache = {}
self.gemini_file_cache = {}
self.openai_file_cache = {}
self.translation_cache = {}
self.cache_manager = CacheManager(self)
# Initialize Update Checker
self.update_checker = UpdateChecker()
# Only set Tesseract path when actually using Tesseract OCR
if self.ocr_model_var.get() == 'tesseract':
pytesseract.pytesseract.tesseract_cmd = self.tesseract_path_var.get()
log_debug(f"Tesseract path set to: {self.tesseract_path_var.get()}")
else:
log_debug(f"Skipping Tesseract path initialization - using OCR model: {self.ocr_model_var.get()}")
self.stable_threshold = self.stability_var.get()
self.confidence_threshold = self.confidence_var.get()
self.clear_translation_timeout = self.clear_translation_timeout_var.get()
if not self.google_source_lang: self.google_source_lang = 'auto'
if not self.google_target_lang: self.google_target_lang = 'en'
if not self.deepl_source_lang: self.deepl_source_lang = 'auto'
if not self.deepl_target_lang: self.deepl_target_lang = 'EN-GB'
self.cache_manager.load_file_caches()
# Initialize debug logging state
set_debug_logging_enabled(self.debug_logging_enabled_var.get())
self.marian_models_dict, self.marian_models_list = self.configuration_handler.load_marian_models(localize_names=True)
self.configuration_handler.load_window_geometry()
# Initialize UI display StringVars here so they exist before create_settings_tab
self.source_display_var = tk.StringVar()
self.target_display_var = tk.StringVar()
configured_marian_path = self.marian_model_var.get()
initial_marian_display_name = ""
if configured_marian_path:
for display_name_iter, path_iter in self.marian_models_dict.items():
if path_iter == configured_marian_path:
initial_marian_display_name = display_name_iter
break
if not initial_marian_display_name and self.marian_models_list:
initial_marian_display_name = self.marian_models_list[0]
fallback_path = self.marian_models_dict.get(initial_marian_display_name, "")
if self.marian_model_var.get() != fallback_path :
self.marian_model_var.set(fallback_path)
self.marian_model_display_var = tk.StringVar(value=initial_marian_display_name)
# This uses self.translation_model_names, so it must be after its definition
initial_model_code_for_display = self.translation_model_var.get()
initial_display_name_for_model_combo = self.translation_model_names.get(initial_model_code_for_display, list(self.translation_model_names.values())[0])
self.translation_model_display_var = tk.StringVar(value=initial_display_name_for_model_combo)
self.tab_control = ttk.Notebook(root)
self.tab_control.pack(expand=True, fill="both", padx=5, pady=5)
# The tab frames will be created and assigned in the create_*_tab functions
# We'll temporarily set them to None
self.tab_main = None
self.tab_settings = None
self.tab_custom_prompt = None
self.tab_debug = None
self.tab_about = None
active_model_for_init = self.translation_model_var.get()
initial_source_val, initial_target_val = 'auto', 'en'
if active_model_for_init == 'google_api':
initial_source_val = self.google_source_lang
initial_target_val = self.google_target_lang
elif active_model_for_init == 'deepl_api':
initial_source_val = self.deepl_source_lang
initial_target_val = self.deepl_target_lang
elif active_model_for_init == 'gemini_api':
initial_source_val = self.gemini_source_lang
initial_target_val = self.gemini_target_lang
elif self.is_openai_model(active_model_for_init):
initial_source_val = self.openai_source_lang
initial_target_val = self.openai_target_lang
elif active_model_for_init == 'marianmt':
if self.marian_model_display_var.get():
# ui_interaction_handler is now defined
parsed_marian_langs_init = self.ui_interaction_handler.parse_marian_model_for_langs(self.marian_model_display_var.get()) or \
self.ui_interaction_handler.parse_marian_model_for_langs(self.marian_model_var.get())
if parsed_marian_langs_init:
initial_source_val = parsed_marian_langs_init[0]
initial_target_val = parsed_marian_langs_init[1]
self.marian_source_lang = initial_source_val
self.marian_target_lang = initial_target_val
else:
initial_source_val, initial_target_val = '', ''
else:
initial_source_val, initial_target_val = '', ''
self.source_lang_var = tk.StringVar(value=initial_source_val)
self.target_lang_var = tk.StringVar(value=initial_target_val)
self.lang_code_to_name = self.language_manager
# Create the main tabs
create_main_tab(self)
create_settings_tab(self)
create_custom_prompt_tab(self)
create_api_usage_tab(self)
create_debug_tab(self)
# Create About tab using the centralized function
self.create_about_tab()
# Handle tab change events to set focus appropriately
def on_tab_changed(event):
selected_tab_index = self.tab_control.index(self.tab_control.select())
if selected_tab_index == 0 and hasattr(self, 'main_tab_start_button') and self.main_tab_start_button.winfo_exists():
self.main_tab_start_button.focus_set()
elif selected_tab_index == 1 and hasattr(self, 'settings_tab_save_button') and self.settings_tab_save_button.winfo_exists():
self.settings_tab_save_button.focus_set()
elif selected_tab_index == 2 and hasattr(self, 'refresh_api_statistics'):
# API Usage tab - refresh statistics when accessed
self.root.after(100, self.refresh_api_statistics)
self.tab_control.bind("<<NotebookTabChanged>>", on_tab_changed)
self.ui_interaction_handler.on_translation_model_selection_changed(initial_setup=True)
# Initialize localized dropdowns after everything is set up
self.root.after(50, self.ui_interaction_handler.update_all_dropdowns_for_language_change)
self.root.after(100, self.load_initial_overlay_areas)
self.root.after(200, self.ensure_window_visible)
self.hotkey_handler.setup_hotkeys()
# Add periodic network cleanup
self.setup_network_cleanup()
log_debug(f"Application initialized. Stability: {self.stable_threshold}, Confidence: {self.confidence_threshold}")
self.root.protocol("WM_DELETE_WINDOW", self.on_closing)
self._fully_initialized = True
log_debug("GameChangingTranslator fully initialized.")
# Automatic update check for compiled version
import sys
is_compiled = getattr(sys, 'frozen', False) and hasattr(sys, '_MEIPASS')
if is_compiled and self.check_for_updates_on_startup_var.get():
log_debug("Compiled version detected with automatic update check enabled - scheduling automatic update check")
# Schedule the automatic update check to run after UI is fully loaded
self.root.after(1000, lambda: self.check_for_updates(auto_check=True))
elif is_compiled:
log_debug("Compiled version detected but automatic update check is disabled")
else:
log_debug("Source code version detected - no automatic update check")
# Ensure OCR model UI is correctly set up on initial load
if hasattr(self, 'ui_interaction_handler'):
self.ui_interaction_handler.update_ocr_model_ui()
# Update usage statistics for selected models - use after_idle to ensure GUI is ready
if hasattr(self, 'translation_model_var'):
selected_model = self.translation_model_var.get()
if selected_model == 'gemini_api':
self.root.after_idle(lambda: self._delayed_gemini_stats_update())
elif selected_model == 'deepl_api':
self.root.after_idle(lambda: self._delayed_deepl_usage_update())
# Always update DeepL usage since it's now always visible in API Usage tab
self.root.after_idle(lambda: self._delayed_deepl_usage_update())
# Refresh API statistics for the new API Usage tab
self.root.after_idle(lambda: self._delayed_api_stats_refresh())
def _delayed_api_stats_refresh(self):
"""Delayed API statistics refresh to ensure GUI is fully ready."""
try:
self.refresh_api_statistics()
except Exception as e:
log_debug(f"Error in delayed API statistics refresh: {e}")
def update_api_usage_tab_for_language(self):
"""Update API Usage tab labels when language changes."""
try:
# Update section labels
if hasattr(self, 'tab_api_usage'):
# Refresh the entire tab content since section labels are hard to update individually
# The next time the user clicks on the tab, the labels will be updated
log_debug("API Usage tab language update requested - will update on next tab access")
# Update button labels if they exist
if hasattr(self, 'refresh_stats_button') and self.refresh_stats_button.winfo_exists():
self.refresh_stats_button.config(text=self.ui_lang.get_label("api_usage_refresh_btn", "Refresh Statistics"))
if hasattr(self, 'export_csv_button') and self.export_csv_button.winfo_exists():
self.export_csv_button.config(text=self.ui_lang.get_label("api_usage_export_csv_btn", "Export to CSV"))
if hasattr(self, 'export_text_button') and self.export_text_button.winfo_exists():
self.export_text_button.config(text=self.ui_lang.get_label("api_usage_export_text_btn", "Export to Text"))
if hasattr(self, 'copy_stats_button') and self.copy_stats_button.winfo_exists():
self.copy_stats_button.config(text=self.ui_lang.get_label("api_usage_copy_btn", "Copy"))
# Update statistic labels if they exist
if hasattr(self, 'ocr_stat_labels'):
ocr_labels = [
("api_usage_total_ocr_calls", "Total OCR Calls:"),
("api_usage_avg_cost_per_call", "Average Cost per Call:"),
("api_usage_avg_cost_per_minute", "Average Cost per Minute:"),
("api_usage_avg_cost_per_hour", "Average Cost per Hour:"),
("api_usage_total_ocr_cost", "Total OCR Cost:")
]
for label_key, fallback_text in ocr_labels:
if label_key in self.ocr_stat_labels and self.ocr_stat_labels[label_key].winfo_exists():
self.ocr_stat_labels[label_key].config(text=self.ui_lang.get_label(label_key, fallback_text))
if hasattr(self, 'translation_stat_labels'):
translation_labels = [
("api_usage_total_translation_calls", "Total Translation Calls:"),
("api_usage_total_words_translated", "Total Words Translated:"),
("api_usage_words_per_minute", "Average Words per Minute:"),
("api_usage_avg_cost_per_word", "Average Cost per Word:"),
("api_usage_avg_cost_per_call", "Average Cost per Call:"),
("api_usage_avg_cost_per_minute", "Average Cost per Minute:"),
("api_usage_avg_cost_per_hour", "Average Cost per Hour:"),
("api_usage_total_translation_cost", "Total Translation Cost:")
]
for label_key, fallback_text in translation_labels:
if label_key in self.translation_stat_labels and self.translation_stat_labels[label_key].winfo_exists():
self.translation_stat_labels[label_key].config(text=self.ui_lang.get_label(label_key, fallback_text))
if hasattr(self, 'combined_stat_labels'):
combined_labels = [
("api_usage_combined_cost_per_minute", "Combined Cost per Minute:"),
("api_usage_combined_cost_per_hour", "Combined Cost per Hour:"),
("api_usage_total_api_cost", "Total API Cost:")
]
for label_key, fallback_text in combined_labels:
if label_key in self.combined_stat_labels and self.combined_stat_labels[label_key].winfo_exists():
self.combined_stat_labels[label_key].config(text=self.ui_lang.get_label(label_key, fallback_text))
# Update API usage info label
if hasattr(self, 'update_api_usage_info_for_language'):
self.update_api_usage_info_for_language()
log_debug("Updated API Usage tab labels for language change")
except Exception as e:
log_debug(f"Error updating API Usage tab for language change: {e}")
def ensure_window_visible(self):
"""Ensure the main window is visible after all initialization is complete."""
try:
if self.root.winfo_exists():
self.root.deiconify()
self.root.lift()
log_debug("Main window visibility ensured after initialization")
except Exception as e:
log_debug(f"Error ensuring window visibility: {e}")
def on_ocr_parameter_change(self, *args):
"""Called when OCR parameters change to refresh preview if it's open."""
if self.ocr_preview_window is not None:
try:
if self.ocr_preview_window.winfo_exists():
# Delay the refresh slightly to avoid too frequent updates
if hasattr(self, '_preview_refresh_timer'):
self.root.after_cancel(self._preview_refresh_timer)
self._preview_refresh_timer = self.root.after(200, self.refresh_ocr_preview)
else:
# Window was destroyed but reference wasn't cleared
self.ocr_preview_window = None
except tk.TclError:
# Window was destroyed
self.ocr_preview_window = None
def on_ocr_model_change(self, *args):
"""Called when OCR model selection changes to update UI visibility."""
try:
# End OCR session if switching away from Gemini and translation is running
if (hasattr(self, 'translation_handler') and self.is_running and
self.get_ocr_model_setting() != 'gemini'):
self.translation_handler.request_end_ocr_session()
# Start OCR session if switching to Gemini and translation is running
if (hasattr(self, 'translation_handler') and self.is_running and
self.get_ocr_model_setting() == 'gemini'):
self.translation_handler.start_ocr_session()
# Update UI to show/hide Tesseract-specific fields
if hasattr(self, 'ui_interaction_handler'):
self.ui_interaction_handler.update_ocr_model_ui()
# Update adaptive fields visibility based on new OCR model
if hasattr(self, 'update_adaptive_fields_visibility'):
self.update_adaptive_fields_visibility()
# Validate scan interval when switching to Gemini OCR
if self.get_ocr_model_setting() == 'gemini':
current_value = self.scan_interval_var.get()
if current_value < 500:
log_debug(f"OCR model changed to Gemini: updating scan interval from {current_value}ms to 500ms minimum")
self.scan_interval_var.set(500)
# Refresh OCR preview if it's open to use the new OCR model
if self.ocr_preview_window is not None:
try:
if self.ocr_preview_window.winfo_exists():
if hasattr(self, '_preview_refresh_timer'):
self.root.after_cancel(self._preview_refresh_timer)
self._preview_refresh_timer = self.root.after(200, self.refresh_ocr_preview)
else:
self.ocr_preview_window = None
except tk.TclError:
self.ocr_preview_window = None
log_debug(f"OCR model changed to: {self.ocr_model_var.get()}")
except Exception as e:
log_debug(f"Error in OCR model change callback: {e}")
def save_settings(self):
if self._fully_initialized:
return self.ui_interaction_handler.save_settings()
log_debug("Attempted to save settings before full initialization.")
return False
def suppress_traces(self):
"""Suppress StringVar traces during UI updates to prevent cascading saves"""
self._suppress_traces = True
log_debug("StringVar traces suppressed")
def restore_traces(self):
"""Restore StringVar traces after UI updates complete"""
self._suppress_traces = False
log_debug("StringVar traces restored")
def start_ui_update(self):
"""Mark the start of a UI update operation to suppress all saves"""
self._ui_update_in_progress = True
self.suppress_traces()
log_debug("UI update operation started - all saves suppressed")
def end_ui_update(self):
"""Mark the end of a UI update operation and restore normal save behavior"""
self._ui_update_in_progress = False
self.restore_traces()
log_debug("UI update operation ended - saves restored")
def on_window_configure(self, event):
self.configuration_handler.on_window_configure(event)
def save_current_window_geometry(self):
self.configuration_handler.save_current_window_geometry()
def get_tesseract_lang_code(self):
api_source_code = self.source_lang_var.get()
current_model = self.translation_model_var.get()
if current_model == 'marianmt' and self.marian_source_lang:
api_source_code = self.marian_source_lang
return self.language_manager.get_tesseract_code(api_source_code, current_model)
def browse_tesseract(self):
self.configuration_handler.browse_tesseract()
def browse_marian_models_file(self):
self.configuration_handler.browse_marian_models_file()
def update_translation_text(self, text_to_display):
self.display_manager.update_translation_text(text_to_display)
def update_debug_display(self, original_img_pil, processed_img_cv, ocr_text_content):
self.display_manager.update_debug_display(original_img_pil, processed_img_cv, ocr_text_content)
def _widget_exists_safely(self, widget):
"""Safely check if a widget exists - works with both tkinter and PySide widgets"""
if not widget:
return False
try:
# Try tkinter method first
if hasattr(widget, 'winfo_exists'):
return widget.winfo_exists()
# For PySide widgets, check if they're accessible
elif hasattr(widget, 'isVisible'):
return True # PySide widgets exist until destroyed
else:
return True # Assume widget exists if we can't check
except Exception as e:
log_debug(f"Error checking widget existence: {e}")
return False
def convert_to_webp_for_api(self, pil_image):
"""Convert PIL image to lossless WebP bytes for API calls."""
try:
# Optimize image for OCR if needed
if pil_image.mode in ('RGBA', 'LA'):
rgb_img = Image.new('RGB', pil_image.size, (255, 255, 255))
if pil_image.mode == 'RGBA':
rgb_img.paste(pil_image, mask=pil_image.split()[-1])
else:
rgb_img.paste(pil_image)
pil_image = rgb_img
# Create memory buffer
buffer = io.BytesIO()
# Save as WebP lossless in memory
pil_image.save(
buffer,
format='WebP',
lossless=True,
method=0,
exact=True
)
# Get bytes
webp_bytes = buffer.getvalue()
log_debug(f"Converted PIL image to WebP for API: {len(webp_bytes)} bytes")
return webp_bytes
except Exception as e:
log_debug(f"Error converting image to WebP for API: {e}")
return None
def _pre_initialize_gemini_model(self):
"""Pre-configure Gemini API at startup to avoid thread initialization delays."""
try:
if not self.GEMINI_API_AVAILABLE:
return
gemini_api_key = self.gemini_api_key_var.get().strip()
if not gemini_api_key:
return
# This is the only part that's still useful - it sets the global API key.
# The client itself will be created by the provider when needed.
import google.generativeai as genai
genai.configure(api_key=gemini_api_key)
log_debug("Gemini API pre-configured")
except Exception as e:
log_debug(f"Error in Gemini model pre-configuration: {e}")
# Gemini OCR Batch Processing Methods (Phase 1)
def get_ocr_model_setting(self):
"""Get the current OCR model setting."""
return self.ocr_model_var.get()
def update_adaptive_scan_interval(self):
"""Adjust scan interval based on current OCR API load to prevent bottlenecks."""
now = time.monotonic()
# Check load every 2 seconds
if now - self.load_check_timer < 2.0:
return
self.load_check_timer = now
# Measure current OCR load
active_ocr_count = len(self.active_ocr_calls)
max_ocr_calls = self.max_concurrent_ocr_calls
# DEBUG: Always log the current state
log_debug(f"ADAPTIVE: Checking OCR load - Active calls: {active_ocr_count}/{max_ocr_calls}, Current interval: {self.current_scan_interval}ms, Overload detected: {self.overload_detected}")
# Get user's preferred base interval
base_interval = self.scan_interval_var.get() # User's setting in milliseconds
# Update base_scan_interval to track user changes
self.base_scan_interval = base_interval
# Apply the user's specific requirements:
# If active OCR API calls > 5, increase scan interval to 150% of current value
# If active OCR API calls fall below 5, restore original scan interval
if active_ocr_count > 5:
if not self.overload_detected:
# First detection of overload
self.current_scan_interval = int(base_interval * 1.5) # 150%
self.overload_detected = True
log_debug(f"ADAPTIVE: OCR overload detected ({active_ocr_count} active calls), increasing scan interval to {self.current_scan_interval}ms")
else:
# Already in overload state, maintain increased interval
log_debug(f"ADAPTIVE: OCR still overloaded ({active_ocr_count} active calls), maintaining scan interval at {self.current_scan_interval}ms")
# Stay at increased interval while overloaded
elif active_ocr_count < 5:
if self.overload_detected: