mirror of
https://github.com/ijaric/voice_assistant.git
synced 2025-12-17 11:46:20 +00:00
feat: stt от openai
This commit is contained in:
@@ -11,6 +11,7 @@ import lib.app.errors as app_errors
|
||||
import lib.app.settings as app_settings
|
||||
import lib.app.split_settings as app_split_settings
|
||||
import lib.clients as clients
|
||||
import lib.stt as stt
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -68,6 +69,7 @@ class Application:
|
||||
# Services
|
||||
|
||||
logger.info("Initializing services")
|
||||
stt_service: stt.STTProtocol = stt.OpenaiSpeech(settings=settings) # type: ignore
|
||||
|
||||
# Handlers
|
||||
|
||||
|
||||
@@ -13,6 +13,10 @@ class Settings(pydantic_settings.BaseSettings):
|
||||
logger: app_split_settings.LoggingSettings = pydantic.Field(
|
||||
default_factory=lambda: app_split_settings.LoggingSettings()
|
||||
)
|
||||
openai: app_split_settings.OpenaiSettings = pydantic.Field(
|
||||
default_factory=lambda: app_split_settings.OpenaiSettings()
|
||||
)
|
||||
project: app_split_settings.ProjectSettings = pydantic.Field(
|
||||
default_factory=lambda: app_split_settings.ProjectSettings()
|
||||
)
|
||||
voice: app_split_settings.VoiceSettings = pydantic.Field(default_factory=lambda: app_split_settings.VoiceSettings())
|
||||
|
||||
@@ -1,14 +1,18 @@
|
||||
from .api import *
|
||||
from .app import *
|
||||
from .logger import *
|
||||
from .openai import *
|
||||
from .postgres import *
|
||||
from .project import *
|
||||
from .voice import *
|
||||
|
||||
__all__ = [
|
||||
"ApiSettings",
|
||||
"AppSettings",
|
||||
"LoggingSettings",
|
||||
"OpenaiSettings",
|
||||
"PostgresSettings",
|
||||
"ProjectSettings",
|
||||
"VoiceSettings",
|
||||
"get_logging_config",
|
||||
]
|
||||
|
||||
@@ -5,7 +5,9 @@ import lib.app.split_settings.utils as app_split_settings_utils
|
||||
|
||||
class LoggingSettings(pydantic_settings.BaseSettings):
|
||||
model_config = pydantic_settings.SettingsConfigDict(
|
||||
env_file=app_split_settings_utils.ENV_PATH, env_file_encoding="utf-8", extra="ignore"
|
||||
env_file=app_split_settings_utils.ENV_PATH,
|
||||
env_file_encoding="utf-8",
|
||||
extra="ignore",
|
||||
)
|
||||
|
||||
log_format: str = "%(asctime)s - %(name)s - %(levelname)s - %(message)s"
|
||||
|
||||
17
src/assistant/lib/app/split_settings/openai.py
Normal file
17
src/assistant/lib/app/split_settings/openai.py
Normal file
@@ -0,0 +1,17 @@
|
||||
import pydantic
|
||||
import pydantic_settings
|
||||
|
||||
import lib.app.split_settings.utils as app_split_settings_utils
|
||||
|
||||
|
||||
class OpenaiSettings(pydantic_settings.BaseSettings):
|
||||
model_config = pydantic_settings.SettingsConfigDict(
|
||||
env_file=app_split_settings_utils.ENV_PATH,
|
||||
env_prefix="OPENAI_",
|
||||
env_file_encoding="utf-8",
|
||||
extra="ignore",
|
||||
)
|
||||
|
||||
api_key: pydantic.SecretStr = pydantic.Field(
|
||||
default=..., validation_alias=pydantic.AliasChoices("api_key", "openai_api_key")
|
||||
)
|
||||
21
src/assistant/lib/app/split_settings/voice.py
Normal file
21
src/assistant/lib/app/split_settings/voice.py
Normal file
@@ -0,0 +1,21 @@
|
||||
import pydantic
|
||||
import pydantic_settings
|
||||
|
||||
import lib.app.split_settings.utils as app_split_settings_utils
|
||||
|
||||
|
||||
class VoiceSettings(pydantic_settings.BaseSettings):
|
||||
model_config = pydantic_settings.SettingsConfigDict(
|
||||
env_file=app_split_settings_utils.ENV_PATH,
|
||||
env_prefix="VOICE_",
|
||||
env_file_encoding="utf-8",
|
||||
extra="ignore",
|
||||
)
|
||||
|
||||
max_input_seconds: int = 30
|
||||
max_input_size: int = 5120 # 5MB
|
||||
available_formats: str = "wav,mp3,ogg"
|
||||
|
||||
@pydantic.field_validator("available_formats")
|
||||
def validate_available_formats(cls, v: str) -> list[str]:
|
||||
return v.split(",")
|
||||
@@ -1,4 +1,5 @@
|
||||
from .orm import Base, IdCreatedUpdatedBaseMixin
|
||||
from .stt_voice import SttVoice
|
||||
from .token import Token
|
||||
|
||||
__all__ = ["Base", "IdCreatedUpdatedBaseMixin", "Token"]
|
||||
__all__ = ["Base", "IdCreatedUpdatedBaseMixin", "SttVoice", "Token"]
|
||||
|
||||
25
src/assistant/lib/models/stt_voice.py
Normal file
25
src/assistant/lib/models/stt_voice.py
Normal file
@@ -0,0 +1,25 @@
|
||||
import typing
|
||||
|
||||
import pydantic
|
||||
|
||||
import lib.app.split_settings as app_split_settings
|
||||
|
||||
|
||||
class SttVoice(pydantic.BaseModel):
|
||||
audio_size: int
|
||||
audio_format: str
|
||||
audio_name: str = "123"
|
||||
audio_data: bytes
|
||||
voice_settings: app_split_settings.VoiceSettings
|
||||
|
||||
@pydantic.model_validator(mode="before")
|
||||
@classmethod
|
||||
def validate_audio(cls, v: dict[str, typing.Any]) -> dict[str, typing.Any]:
|
||||
settings: app_split_settings.VoiceSettings = v["voice_settings"]
|
||||
if v["audio_size"] > settings.max_input_size:
|
||||
raise ValueError(f"Audio size is too big: {v['audio_size']}")
|
||||
if v["audio_format"] not in settings.available_formats:
|
||||
raise ValueError(f"Audio format is not supported: {v['audio_format']}")
|
||||
if "audio_name" not in v or not v["audio_name"]:
|
||||
v["audio_name"] = f"audio.{v['audio_format']}"
|
||||
return v
|
||||
7
src/assistant/lib/stt/__init__.py
Normal file
7
src/assistant/lib/stt/__init__.py
Normal file
@@ -0,0 +1,7 @@
|
||||
from .openai_speech import *
|
||||
from .stt_protocol import *
|
||||
|
||||
__all__ = [
|
||||
"OpenaiSpeech",
|
||||
"stt_protocol",
|
||||
]
|
||||
47
src/assistant/lib/stt/openai_speech.py
Normal file
47
src/assistant/lib/stt/openai_speech.py
Normal file
@@ -0,0 +1,47 @@
|
||||
import mimetypes
|
||||
import tempfile
|
||||
|
||||
import magic
|
||||
import openai
|
||||
|
||||
import lib.app.settings as app_settings
|
||||
import lib.models as models
|
||||
|
||||
|
||||
class OpenaiSpeech:
|
||||
def __init__(self, settings: app_settings.Settings):
|
||||
self.settings = settings
|
||||
openai.api_key = self.settings.openai.api_key.get_secret_value()
|
||||
|
||||
@staticmethod
|
||||
def __get_file_extension_from_bytes(audio: bytes) -> str | None:
|
||||
mime: magic.Magic = magic.Magic(mime=True)
|
||||
mime_type: str = mime.from_buffer(audio)
|
||||
extension: str | None = mimetypes.guess_extension(mime_type)
|
||||
if extension:
|
||||
extension = extension.replace(".", "")
|
||||
return extension
|
||||
|
||||
async def recognize(self, audio: bytes) -> str:
|
||||
file_extension: str | None = self.__get_file_extension_from_bytes(audio)
|
||||
if not file_extension:
|
||||
raise ValueError("File extension is not supported")
|
||||
|
||||
voice: models.SttVoice = models.SttVoice(
|
||||
audio_size=int(len(audio) / 1024),
|
||||
audio_format=file_extension,
|
||||
audio_data=audio,
|
||||
voice_settings=self.settings.voice,
|
||||
)
|
||||
|
||||
try:
|
||||
with tempfile.NamedTemporaryFile(suffix=f".{file_extension}") as temp_file:
|
||||
temp_file.write(voice.audio_data)
|
||||
temp_file.seek(0)
|
||||
transcript = openai.Audio.transcribe("whisper-1", temp_file) # type: ignore
|
||||
except openai.error.InvalidRequestError as e: # type: ignore
|
||||
raise ValueError(f"OpenAI API error: {e}")
|
||||
except openai.error.OpenAIError as e: # type: ignore
|
||||
raise ValueError(f"OpenAI API error: {e}")
|
||||
|
||||
return transcript.text # type: ignore
|
||||
6
src/assistant/lib/stt/stt_protocol.py
Normal file
6
src/assistant/lib/stt/stt_protocol.py
Normal file
@@ -0,0 +1,6 @@
|
||||
from typing import Protocol
|
||||
|
||||
|
||||
class STTProtocol(Protocol):
|
||||
async def recognize(self, audio: bytes) -> str:
|
||||
...
|
||||
Reference in New Issue
Block a user