Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions BabbleApp/Locale/English/locale.json
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,7 @@
"error.size": "Size of frames to display are of unequal sizes",
"error.capture": "Frame capture issue detected",
"error.winmm": "Failed to load winmm",
"error.modelLoad": "Failed to load model:",
"warn.frameDrop": "Frame drop. Corrupted JPEG",
"warn.captureProblem": "Capture source problem, assuming camera disconnected, waiting for reconnect",
"warn.serialCapture": "Serial capture source problem, assuming camera disconnected, waiting for reconnect",
Expand Down
1 change: 1 addition & 0 deletions BabbleApp/Locale/Español/locale.json
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,7 @@
"error.size": "Los tamaños de los fotogramas para mostrar son de dimensiones desiguales",
"error.capture": "Detectado problema en la captura de fotogramas",
"error.winmm": "Error al cargar winmm",
"error.modelLoad": "Error al cargar Modelo:",
"warn.frameDrop": "Pérdida de fotogramas. JPEG dañado",
"warn.captureProblem": "Problema con la fuente de captura, se asume que la cámara está desconectada, esperando reconexión",
"warn.serialCapture": "Problema con la fuente de captura en serie, se asume que la cámara está desconectada, esperando reconexión",
Expand Down
1 change: 1 addition & 0 deletions BabbleApp/Locale/OwO/locale.json
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,7 @@
"error.size": "Size of frames to display are of unequal sizes",
"error.capture": "Frame capture issue detected",
"error.winmmDll": "Failed to load winmm",
"error.modelLoad": "Failed to load model:",
"warn.frameDrop": "Frame drop. Corrupted JPEG",
"warn.captureProblem": "Capture source problem, assuming camera disconnected, waiting for reconnect",
"warn.serialCapture": "Serial capture source problem, assuming camera disconnected, waiting for reconnect",
Expand Down
1 change: 1 addition & 0 deletions BabbleApp/Locale/Pirate Speak/locale.json
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,7 @@
"error.size": "Size of frames to display are of unequal sizes",
"error.capture": "Frame capture issue detected",
"error.winmmDll": "Failed to load winmm",
"error.modelLoad": "Failed to load mdoel:",
"warn.frameDrop": "Frame drop. Corrupted JPEG",
"warn.captureProblem": "Capture source problem, assuming camera disconnected, waiting for reconnect",
"warn.serialCapture": "Serial capture source problem, assuming camera disconnected, waiting for reconnect",
Expand Down
1 change: 1 addition & 0 deletions BabbleApp/algo_settings_widget.py
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,7 @@ def __init__(
size=(32),
tooltip=f'{lang._instance.get_string("algorithm.modelFileTooptip")}.',
),
sg.FolderBrowse(),
sg.Text(
f'{lang._instance.get_string("algorithm.inferenceThreads")}:',
background_color=bg_color_highlight,
Expand Down
26 changes: 20 additions & 6 deletions BabbleApp/babble_processor.py
Original file line number Diff line number Diff line change
Expand Up @@ -86,6 +86,8 @@ def __init__(
self.runtime = self.settings.gui_runtime
self.use_gpu = self.settings.gui_use_gpu
self.gpu_index = self.settings.gui_gpu_index
config_default: BabbleConfig = BabbleConfig()
self.default_model = config_default.settings.gui_model_file
self.output = []
self.val_list = []
self.calibrate_config = np.empty((1, 45))
Expand All @@ -103,12 +105,24 @@ def __init__(
provider = "DmlExecutionProvider"
else:
provider = "CPUExecutionProvider" # Build onnxruntime to get both DML and OpenVINO
self.sess = ort.InferenceSession(
f"{self.model}onnx/model.onnx",
self.opts,
providers=[provider],
provider_options=[{"device_id": self.gpu_index}],
)
try:
self.sess = ort.InferenceSession(
f"{self.model}/onnx/model.onnx",
self.opts,
providers=[provider],
provider_options=[{"device_id": self.gpu_index}],
)
except: # Load default model if we can't find the specified model
print(
f'\033[91m[{lang._instance.get_string("log.error")}] {lang._instance.get_string("error.modelLoad")} {self.model}\033[0m'
)
print(f'\033[91mLoading Default model: {self.default_model}.\033[0m')
self.sess = ort.InferenceSession(
f"{self.default_model}/onnx/model.onnx",
self.opts,
providers=[provider],
provider_options=[{"device_id": self.gpu_index}],
)
self.input_name = self.sess.get_inputs()[0].name
self.output_name = self.sess.get_outputs()[0].name
try:
Expand Down
2 changes: 1 addition & 1 deletion BabbleApp/babbleapp.py
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,7 @@
CALIB_SETTINGS_RADIO_NAME = "-CALIBSETTINGSRADIO-"

page_url = "https://github.com/SummerSigh/ProjectBabble/releases/latest"
appversion = "Babble v2.0.6 Alpha"
appversion = "Babble v2.1.0 Beta"

def timerResolution(toggle):
if winmm != None:
Expand Down
2 changes: 1 addition & 1 deletion BabbleApp/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ class BabbleSettingsConfig(BaseModel):
gui_ROSC: bool = False
gui_osc_location: str = ""
gui_multiply: float = 1
gui_model_file: str = "Models/3MEFFB0E7MSE/"
gui_model_file: str = "Models/3MEFFB0E7MSE"
gui_runtime: str = "ONNX"
gui_use_gpu: bool = False
gui_gpu_index: int = 0
Expand Down
Loading