Untitled
unknown
plain_text
5 months ago
6.5 kB
3
Indexable
def classify_image(self, image_path): try: print("\n=== Starting Image Classification ===") # Check internet connection has_internet = self.check_internet_connection() # Get root window and loading screen reference root = next(iter(CircularProgress._instances)).winfo_toplevel() language = getattr(root, 'LANGUAGE', 'EN') def attempt_openai_classification(loading_screen): try: base64_image = self.encode_image(image_path) prompt = self.bin_config.get_ai_prompt(language) # Create Event for timeout handling and thread cancellation response_received = threading.Event() cancel_thread = threading.Event() api_response = {'result': None, 'error': None} def api_call(): try: # Check if we're already cancelled if cancel_thread.is_set(): return response = self.client.chat.completions.create( model="gpt-4o-2024s-08-06", messages=[ { "role": "user", "content": [ {"type": "text", "text": prompt}, { "type": "image_url", "image_url": { "url": f"data:image/jpeg;base64,{base64_image}" } } ] } ], timeout=10 # 10 second timeout for the API call ) # Check if we were cancelled while waiting for response if cancel_thread.is_set(): return api_response['result'] = response.choices[0].message.content.strip() response_received.set() except Exception as e: if not cancel_thread.is_set(): # Only set error if we weren't cancelled api_response['error'] = str(e) response_received.set() # Start API call in separate thread api_thread = threading.Thread(target=api_call) api_thread.daemon = True api_thread.start() # Wait for response with timeout if not response_received.wait(15): # 15 second total timeout print("\nAPI timeout occurred - switching to local model") # Signal thread to cancel and ignore any late responses cancel_thread.set() loading_screen.destroy() # Destroy the old loading screen loading_screen = LoadingScreen( # Create a new one root, message=TRANSLATIONS[language].get('api_timeout', 'API taking too long. Switching to local model...'), dark_mode=getattr(root, 'DARK_MODE', False), language=language ) time.sleep(2) # Give user time to read the message return None, "timeout" # Check if we were cancelled if cancel_thread.is_set(): return None, "cancelled" if api_response['error']: print(f"\nAPI error occurred: {api_response['error']}") raise Exception(api_response['error']) return api_response['result'], None except Exception as e: print(f"\nError in OpenAI classification: {str(e)}") return None, str(e) if has_internet and self.client: print("Using OpenAI API classification") # Create initial loading screen loading_screen = LoadingScreen( root, message=TRANSLATIONS[language]['classifying_image'], dark_mode=getattr(root, 'DARK_MODE', False), language=language ) # Attempt OpenAI classification result, error = attempt_openai_classification(loading_screen) if result: loading_screen.destroy() print(f"\nClassification Result: {result}") return result else: # Create new loading screen for local model loading_screen.destroy() loading_screen = LoadingScreen( root, message=TRANSLATIONS[language].get('using_local_model', 'Using local model for classification...'), dark_mode=getattr(root, 'DARK_MODE', False), language=language ) result = self.classify_with_local_model(image_path) loading_screen.destroy() print(f"\nLocal Model Classification Result: {result}") return result else: print("Using local model classification") if not self.interpreter: raise Exception("Local model not initialized") loading_screen = LoadingScreen( root, message=TRANSLATIONS[language].get('using_local_model', 'Using local model for classification...'), dark_mode=getattr(root, 'DARK_MODE', False), language=language ) result = self.classify_with_local_model(image_path) loading_screen.destroy() print(f"\nLocal Model Classification Result: {result}") return result print("=== Classification Complete ===\n") except Exception as e: print(f"\n!!! Error during classification: {str(e)}") raise finally: self.cleanup_temp_files()
Editor is loading...
Leave a Comment