Compare commits
4 Commits
4e4389a03f
...
main
| Author | SHA1 | Date | |
|---|---|---|---|
|
79f4156ffa
|
|||
|
7f4b559644
|
|||
|
98b5ab1f1c
|
|||
|
e628816ea8
|
@@ -1,6 +1,6 @@
|
||||
[project]
|
||||
name = "pyvtt"
|
||||
version = "0.4.5"
|
||||
version = "0.6.0"
|
||||
description = "Python Voice to Text + LLMA"
|
||||
authors = [{ name = "Max P.", email = "Mail@MPassarello.de" }]
|
||||
license = { text = "MIT" }
|
||||
|
||||
@@ -2,23 +2,26 @@
|
||||
"audio_file": "/tmp/pyvtt_recording.wav",
|
||||
"output_file": "/tmp/pyvtt_transcript.txt",
|
||||
"whisper_path": "/path/to/whisper-cli",
|
||||
"language": "en",
|
||||
"socket_path": "/tmp/pyvtt.sock",
|
||||
"ollama_url": "http://localhost",
|
||||
"ollama_path": "/api/chat",
|
||||
"ollama_port": 12345,
|
||||
"presets": [
|
||||
{
|
||||
"name": "Default",
|
||||
"language": "en",
|
||||
"whisper_model": "/path/to/default-whisper-model.bin",
|
||||
"ollama_model": "default-model",
|
||||
"ollama_prompt": "Provide a detailed response to the following text:\n\n"
|
||||
"ollama": "disable"
|
||||
},
|
||||
{
|
||||
"name": "Quick English",
|
||||
"whisper_model": "/path/to/quick-whisper-model.bin",
|
||||
"ollama_model": "quick-model",
|
||||
"ollama_prompt": "Quickly correct the following English text for grammar and punctuation:\n\n"
|
||||
"ollama_model": "gemma3:4b",
|
||||
"ollama_context": 131072,
|
||||
"ollama_prompt": [
|
||||
"Quickly correct the following English text for grammar and punctuation:\n",
|
||||
"\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "German Correction",
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
import json
|
||||
import requests
|
||||
from typing import Union, List, Optional
|
||||
|
||||
@@ -8,60 +9,95 @@ from pyvtt.models.config import AppConfig, PresetConfig
|
||||
class OllamaClient:
|
||||
def __init__(self, config: AppConfig):
|
||||
"""
|
||||
Initialisiert den Ollama-Client mit der Basis-Konfiguration aus der globalen App-Konfiguration.
|
||||
|
||||
:param config: AppConfig-Instanz mit Host und Port für den Ollama-Server.
|
||||
Initialisiert den API-Client (Ollama oder llama-swap) mit Basis-Konfiguration.
|
||||
"""
|
||||
self.base_url = config.ollama_url
|
||||
self.base_url = config.ollama_url.rstrip("/")
|
||||
self.port = config.ollama_port
|
||||
self.path = config.ollama_path or "/api/chat"
|
||||
# Falls llama-swap (OpenAI-API-Kompatibel), verwende den OpenAI-Pfad
|
||||
if "v1" in self.path or "completions" in self.path:
|
||||
self.is_llama_swap = True
|
||||
else:
|
||||
self.is_llama_swap = False
|
||||
|
||||
def send_chat(
|
||||
self,
|
||||
user_message: str,
|
||||
config: PresetConfig,
|
||||
) -> str:
|
||||
def send_chat(self, user_message: str, config: PresetConfig) -> str:
|
||||
"""
|
||||
Sendet eine Chat-Anfrage an den Ollama-Server basierend auf der spezifischen Preset-Konfiguration.
|
||||
|
||||
:param user_message: Der vom Nutzer erzeugte Eingabetext (z. B. Transkript).
|
||||
:param config: PresetConfig-Instanz mit modell-, prompt- und kontextbezogenen Parametern.
|
||||
:return: Der von Ollama zurückgegebene, formatierte Antworttext, die user_message
|
||||
unverändert zurückgibt, wenn Ollama deaktiviert ist oder none bei einem Fehler.
|
||||
Sendet eine Chat-Anfrage an Ollama oder llama-swap.
|
||||
"""
|
||||
if config.ollama and config.ollama.lower() == "disable":
|
||||
print("[OllamaClient] Ollama ist im Preset deaktiviert.")
|
||||
print("[OllamaClient] Gebe die Eingabe unverändert zurück.")
|
||||
return user_message
|
||||
|
||||
# Prompt als String aufbereiten – Liste wird zu Zeilen verbunden
|
||||
if isinstance(config.ollama_prompt, list):
|
||||
prompt_str = "\n".join(config.ollama_prompt)
|
||||
# Prompt aufbereiten
|
||||
prompt_str = (
|
||||
"\n".join(config.ollama_prompt)
|
||||
if isinstance(config.ollama_prompt, list)
|
||||
else str(config.ollama_prompt)
|
||||
)
|
||||
|
||||
# === Payload vorbereiten ===
|
||||
if self.is_llama_swap:
|
||||
# OpenAI-/llama-swap-kompatibles Format
|
||||
payload = {
|
||||
"model": config.ollama_model,
|
||||
"messages": [
|
||||
{"role": "system", "content": prompt_str},
|
||||
{"role": "user", "content": user_message},
|
||||
],
|
||||
"stream": False,
|
||||
}
|
||||
# Kontextgröße optional hinzufügen
|
||||
if config.ollama_context:
|
||||
payload["num_ctx"] = config.ollama_context
|
||||
else:
|
||||
prompt_str = config.ollama_prompt
|
||||
# Klassisches Ollama-Format
|
||||
payload = {
|
||||
"model": config.ollama_model,
|
||||
"messages": [
|
||||
{"role": "system", "content": prompt_str},
|
||||
{"role": "user", "content": user_message},
|
||||
],
|
||||
"options": (
|
||||
{"num_ctx": config.ollama_context}
|
||||
if config.ollama_context
|
||||
else {}
|
||||
),
|
||||
"stream": False,
|
||||
}
|
||||
|
||||
# Payload für die API-Anfrage vorbereiten
|
||||
payload = {
|
||||
"model": config.ollama_model,
|
||||
"messages": [
|
||||
{"role": "system", "content": prompt_str},
|
||||
{"role": "user", "content": user_message}
|
||||
],
|
||||
"options": {
|
||||
"num_ctx": config.ollama_context,
|
||||
} if config.ollama_context else {},
|
||||
"stream": False
|
||||
}
|
||||
endpoint = f"{self.base_url}:{self.port}{self.path}"
|
||||
|
||||
endpoint = f"{self.base_url}:{self.port}/api/chat"
|
||||
|
||||
# Anfrage an Ollama senden und Antwort extrahieren
|
||||
# === Anfrage senden ===
|
||||
try:
|
||||
response = requests.post(endpoint, json=payload)
|
||||
headers = {"Content-Type": "application/json"}
|
||||
if self.is_llama_swap:
|
||||
headers["Authorization"] = "Bearer no-key"
|
||||
|
||||
response = requests.post(endpoint, headers=headers, data=json.dumps(payload))
|
||||
response.raise_for_status()
|
||||
|
||||
json_response = response.json()
|
||||
content = json_response.get("message", {}).get("content", "").strip()
|
||||
|
||||
# === Antwort extrahieren ===
|
||||
if self.is_llama_swap:
|
||||
# OpenAI-kompatible Struktur
|
||||
content = (
|
||||
json_response.get("choices", [{}])[0]
|
||||
.get("message", {})
|
||||
.get("content", "")
|
||||
.strip()
|
||||
)
|
||||
else:
|
||||
# Ollama-eigene Struktur
|
||||
content = (
|
||||
json_response.get("message", {})
|
||||
.get("content", "")
|
||||
.strip()
|
||||
)
|
||||
|
||||
return "\n".join(line.strip() for line in content.splitlines())
|
||||
|
||||
except requests.exceptions.RequestException as e:
|
||||
print(f"[OllamaClient] HTTP-Fehler: {e}")
|
||||
notify("Fehler", "Ein Fehler bei der Kommunikation mit 'Ollama' ist aufgetreten!")
|
||||
notify("Fehler", "Kommunikationsfehler mit Ollama / llama-swap!")
|
||||
return ""
|
||||
@@ -20,6 +20,7 @@ class AppConfig(BaseModel):
|
||||
whisper_path: str
|
||||
socket_path: str
|
||||
ollama_url: str
|
||||
ollama_path: str
|
||||
ollama_port: int
|
||||
journal_path: str
|
||||
presets: List[PresetConfig]
|
||||
|
||||
Reference in New Issue
Block a user