From 14cc8650efcff67aaf370b15433b5c1b9dc76663 Mon Sep 17 00:00:00 2001 From: Viet-Anh Nguyen Date: Sun, 8 Dec 2024 13:09:15 +0700 Subject: [PATCH] Fix crashing when missing cl100k_base + missing document icon --- LlamaAssistant.spec | 2 +- llama_assistant/llama_assistant_app.py | 19 +++++----------- pyproject.toml | 31 +++++++++++++------------- 3 files changed, 23 insertions(+), 29 deletions(-) diff --git a/LlamaAssistant.spec b/LlamaAssistant.spec index b25891a..00de570 100644 --- a/LlamaAssistant.spec +++ b/LlamaAssistant.spec @@ -32,7 +32,7 @@ a = Analysis( pathex=['llama_assistant'], binaries=[], datas=datas, - hiddenimports=["ffmpeg", "PyQt5"], + hiddenimports=["ffmpeg", "PyQt5", "tiktoken_ext.openai_public", "tiktoken_ext"], hookspath=[], runtime_hooks=[], excludes=[], diff --git a/llama_assistant/llama_assistant_app.py b/llama_assistant/llama_assistant_app.py index 2d72cc5..f6e4d5e 100644 --- a/llama_assistant/llama_assistant_app.py +++ b/llama_assistant/llama_assistant_app.py @@ -40,6 +40,7 @@ from llama_assistant.ui_manager import UIManager from llama_assistant.tray_manager import TrayManager from llama_assistant.setting_validator import validate_numeric_field +from llama_assistant.utils import load_image class LlamaAssistant(QMainWindow): @@ -91,17 +92,18 @@ def recursively_update_setting(self, setting, default_setting, validator): continue valid, message = validate_numeric_field(key, setting[key], validator[key]) - + if not valid: setting[key] = value warnings.warn(message + f". Using default value {value} instead.") - def load_settings(self): if config.settings_file.exists(): with open(config.settings_file, "r") as f: self.settings = json.load(f) - self.recursively_update_setting(self.settings, config.DEFAULT_SETTINGS, config.VALIDATOR) + self.recursively_update_setting( + self.settings, config.DEFAULT_SETTINGS, config.VALIDATOR + ) self.save_settings() else: self.settings = copy.deepcopy(config.DEFAULT_SETTINGS) @@ -494,16 +496,7 @@ def show_file_thumbnail(self, file_path): remove_button.clicked.connect(lambda: self.remove_file_thumbnail(container, file_path)) # Load and set the pixmap - import os - - print("Icon path:", str(config.document_icon), os.path.exists(str(config.document_icon))) - pixmap = QPixmap(str(config.document_icon)) - scaled_pixmap = pixmap.scaled( - 80, - 80, - Qt.AspectRatioMode.KeepAspectRatio, - Qt.TransformationMode.SmoothTransformation, - ) + scaled_pixmap = load_image(config.document_icon, size=(80, 80)) pixmap_label.setPixmap(scaled_pixmap) # Add the container to the layout diff --git a/pyproject.toml b/pyproject.toml index 114ef57..8f06937 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -25,21 +25,22 @@ classifiers = [ "Programming Language :: Python :: 3.12", ] dependencies = [ - "numpy", - "ffmpeg-python", - "PyQt5", - "markdown", - "llama-cpp-python", - "pynput", - "SpeechRecognition", - "huggingface_hub", - "openwakeword", - "whispercpp", - "llama-index-core", - "llama-index-readers-file", - "llama-index-embeddings-huggingface", - "docx2txt", - "mistune" + "numpy==1.26.4", + "PyQt5==5.15.6", + "SpeechRecognition==3.10.4", + "markdown==3.7", + "pynput==1.7.7", + "llama-cpp-python==0.3.1", + "huggingface_hub==0.25.1", + "openwakeword==0.6.0", + "pyinstaller==6.10.0", + "ffmpeg-python==0.2.0", + "llama-index-core==0.12.0", + "llama-index-readers-file==0.4.0", + "llama-index-embeddings-huggingface==0.4.0", + "docx2txt==0.8", + "mistune==3.0.2", + "whispercpp @ git+https://github.com/stlukey/whispercpp.py" ] dynamic = []