1
0
mirror of https://github.com/ijaric/voice_assistant.git synced 2025-12-17 01:06:18 +00:00

Merge branch 'features/#45_agent' into tasks/#45_agent

This commit is contained in:
Григорич
2023-10-14 23:56:38 +03:00
committed by GitHub
109 changed files with 3958 additions and 130 deletions

View File

@@ -15,14 +15,27 @@ NGINX_PORT=80
API_HOST=0.0.0.0
API_PORT=8000
TEST_API_PROTOCOL=http
TEST_API_HOST=api
TEST_API_PORT=8000
JWT_SECRET_KEY=v9LctjUWwol4XbvczPiLFMDtZ8aal7mm
JWT_ALGORITHM=HS256
APP_RELOAD=True
VOICE_AVAILABLE_FORMATS=mp3,ogg,wav
VOICE_AVAILABLE_FORMATS=mp3,ogg,wav,oga
VOICE_MAX_INPUT_SIZE=5120 # 5MB
VOICE_MAX_INPUT_SECONDS=30
OPENAI_API_KEY=sk-1234567890
OPENAI_STT_MODEL=whisper-1
TTS_YANDEX_API_KEY=
TTS_YANDEX_AUDIO_FORMAT=oggopus
TTS_YANDEX_SAMPLE_RATE_HERTZ=48000
TTS_YANDEX_TIMEOUT_SECONDS=30
TTS_ELEVEN_LABS_API_KEY=
TTS_ELEVEN_LABS_DEFAULT_VOICE_ID=EXAVITQu4vr4xnSDxMaL
TTS_ELEVEN_LABS_TIMEOUT_SECONDS=30

View File

@@ -13,7 +13,7 @@ COPY poetry.toml /opt/app/poetry.toml
WORKDIR /opt/app
RUN pip install poetry \
RUN pip install poetry \
&& poetry install --no-dev
COPY bin /opt/app/bin

View File

@@ -0,0 +1,18 @@
FROM python:3.11
RUN apt-get update
WORKDIR /opt/app
COPY pyproject.toml ./
COPY poetry.lock ./
RUN apt-get update \
&& pip install poetry \
&& poetry config virtualenvs.create false \
&& poetry install --no-dev
COPY tests tests
COPY lib lib
CMD ["pytest"]

View File

@@ -1,3 +1,3 @@
include ../../common_makefile.mk
PROJECT_FOLDERS = bin lib tests
PROJECT_FOLDERS = bin lib tests

View File

@@ -0,0 +1,56 @@
version: "3"
services:
postgres:
image: postgres:15.2
restart: always
environment:
POSTGRES_USER: ${POSTGRES_USER}
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD}
POSTGRES_DB: ${POSTGRES_NAME}
env_file:
- .env
expose:
- "${POSTGRES_PORT}"
volumes:
- postgres_data:/var/lib/postgresql/data/
networks:
- backend_network
api:
build:
context: .
container_name: api
image: fastapi_app
restart: always
entrypoint: ["/opt/app/entrypoint.sh"]
env_file:
- .env
expose:
- "${API_PORT}"
depends_on:
- postgres
networks:
- backend_network
- api_network
tests:
build:
context: .
dockerfile: "Dockerfile.tests"
env_file:
- .env
depends_on:
- postgres
- api
networks:
- api_network
volumes:
postgres_data:
networks:
api_network:
driver: bridge
backend_network:
driver: bridge

View File

@@ -1,4 +1,10 @@
from .agent import AgentHandler
from .health import basic_router
from .voice_responce_handler import VoiceResponseHandler
__all__ = ["AgentHandler", "basic_router"]
__all__ = [
"AgentHandler",
"VoiceResponseHandler",
"basic_router",
]

View File

@@ -0,0 +1,45 @@
import http
import io
import fastapi
import lib.stt.services as stt_services
# import lib.tts.services as tts_service
# import lib.models as models
class VoiceResponseHandler:
def __init__(
self,
stt: stt_services.SpeechService,
# tts: tts_service.TTSService,
):
self.stt = stt
# self.tts = tts
self.router = fastapi.APIRouter()
self.router.add_api_route(
"/",
self.voice_response,
methods=["POST"],
summary="Ответ голосового помощника",
description="Маршрут возвращает потоковый ответ аудио",
)
async def voice_response(
self,
voice: bytes = fastapi.File(...),
) -> fastapi.responses.StreamingResponse:
voice_text: str = await self.stt.recognize(voice)
if voice_text == "":
raise fastapi.HTTPException(status_code=http.HTTPStatus.BAD_REQUEST, detail="Speech recognition failed")
# TODO: Добавить обработку текста через клиента openai
# TODO: Добавить синтез речи через клиента tts
# TODO: Заменить заглушку на реальный ответ
# response = await self.tts.get_audio_as_bytes(
# models.TTSCreateRequestModel(
# text=voice_text,
# )
# )
# return fastapi.responses.StreamingResponse(io.BytesIO(response.audio_content), media_type="audio/ogg")
return fastapi.responses.StreamingResponse(io.BytesIO(voice), media_type="audio/ogg")

View File

@@ -1,3 +1,5 @@
from .base import HealthResponse
__all__ = ["HealthResponse"]
__all__ = [
"HealthResponse",
]

View File

@@ -12,7 +12,9 @@ import lib.app.errors as app_errors
import lib.app.settings as app_settings
import lib.app.split_settings as app_split_settings
import lib.clients as clients
import lib.models as models
import lib.stt as stt
import lib.tts as tts
logger = logging.getLogger(__name__)
@@ -60,15 +62,29 @@ class Application:
logger.info("Initializing clients")
http_yandex_tts_client = clients.AsyncHttpClient(
base_url="yandex", # todo add yandex api url from settings
proxy_settings=settings.proxy,
base_url=settings.tts_yandex.base_url,
headers=settings.tts_yandex.base_headers,
timeout=settings.tts_yandex.timeout_seconds,
)
http_eleven_labs_tts_client = clients.AsyncHttpClient(
base_url=settings.tts_eleven_labs.base_url,
headers=settings.tts_eleven_labs.base_headers,
timeout=settings.tts_eleven_labs.timeout_seconds,
)
disposable_resources.append(
DisposableResource(
name="http_client yandex",
dispose_callback=http_yandex_tts_client.close(),
)
)
disposable_resources.append(
DisposableResource(
name="http_client eleven labs",
dispose_callback=http_eleven_labs_tts_client.close(),
)
)
# Repositories
@@ -76,6 +92,16 @@ class Application:
stt_repository: stt.STTProtocol = stt.OpenaiSpeechRepository(settings=settings)
chat_history_repository = agent.ChatHistoryRepository(pg_async_session=postgres_client.get_async_session())
tts_yandex_repository = tts.TTSYandexRepository(
tts_settings=app_split_settings.TTSYandexSettings(),
client=http_yandex_tts_client,
)
tts_eleven_labs_repository = tts.TTSElevenLabsRepository(
tts_settings=app_split_settings.TTSElevenLabsSettings(),
client=http_eleven_labs_tts_client,
is_models_from_api=True,
)
# Caches
logger.info("Initializing caches")
@@ -85,12 +111,25 @@ class Application:
logger.info("Initializing services")
stt_service: stt.SpeechService = stt.SpeechService(repository=stt_repository) # type: ignore
tts_service: tts.TTSService = tts.TTSService( # type: ignore
repositories={
models.VoiceModelProvidersEnum.YANDEX: tts_yandex_repository,
models.VoiceModelProvidersEnum.ELEVEN_LABS: tts_eleven_labs_repository,
},
)
# Handlers
logger.info("Initializing handlers")
liveness_probe_handler = api_v1_handlers.basic_router
agent_handler = api_v1_handlers.AgentHandler(chat_history_repository=chat_history_repository).router
# TODO: объявить сервисы tts и openai и добавить их в voice_response_handler
voice_response_handler = api_v1_handlers.VoiceResponseHandler(
stt=stt_service,
# tts=tts_service, # TODO
).router
logger.info("Creating application")
fastapi_app = fastapi.FastAPI(
@@ -104,6 +143,7 @@ class Application:
# Routes
fastapi_app.include_router(liveness_probe_handler, prefix="/api/v1/health", tags=["health"])
fastapi_app.include_router(agent_handler, prefix="/api/v1/agent", tags=["testing"])
fastapi_app.include_router(voice_response_handler, prefix="/api/v1/voice", tags=["voice"])
application = Application(
settings=settings,

View File

@@ -1,24 +1,16 @@
import pydantic
import pydantic_settings
import lib.app.split_settings as app_split_settings
class Settings(pydantic_settings.BaseSettings):
api: app_split_settings.ApiSettings = pydantic.Field(default_factory=lambda: app_split_settings.ApiSettings())
app: app_split_settings.AppSettings = pydantic.Field(default_factory=lambda: app_split_settings.AppSettings())
postgres: app_split_settings.PostgresSettings = pydantic.Field(
default_factory=lambda: app_split_settings.PostgresSettings()
)
logger: app_split_settings.LoggingSettings = pydantic.Field(
default_factory=lambda: app_split_settings.LoggingSettings()
)
openai: app_split_settings.OpenaiSettings = pydantic.Field(
default_factory=lambda: app_split_settings.OpenaiSettings()
)
project: app_split_settings.ProjectSettings = pydantic.Field(
default_factory=lambda: app_split_settings.ProjectSettings()
)
proxy: app_split_settings.ProxySettings = pydantic.Field(default_factory=lambda: app_split_settings.ProxySettings())
voice: app_split_settings.VoiceSettings = pydantic.Field(default_factory=lambda: app_split_settings.VoiceSettings())
api: app_split_settings.ApiSettings = app_split_settings.ApiSettings()
app: app_split_settings.AppSettings = app_split_settings.AppSettings()
postgres: app_split_settings.PostgresSettings = app_split_settings.PostgresSettings()
logger: app_split_settings.LoggingSettings = app_split_settings.LoggingSettings()
openai: app_split_settings.OpenaiSettings = app_split_settings.OpenaiSettings()
project: app_split_settings.ProjectSettings = app_split_settings.ProjectSettings()
proxy: app_split_settings.ProxySettings = app_split_settings.ProxySettings()
voice: app_split_settings.VoiceSettings = app_split_settings.VoiceSettings()
tts_yandex: app_split_settings.TTSYandexSettings = app_split_settings.TTSYandexSettings()
tts_eleven_labs: app_split_settings.TTSElevenLabsSettings = app_split_settings.TTSElevenLabsSettings()

View File

@@ -5,6 +5,7 @@ from .openai import *
from .postgres import *
from .project import *
from .proxy import *
from .tts import *
from .voice import *
__all__ = [
@@ -15,6 +16,8 @@ __all__ = [
"PostgresSettings",
"ProjectSettings",
"ProxySettings",
"TTSElevenLabsSettings",
"TTSYandexSettings",
"VoiceSettings",
"get_logging_config",
]

View File

@@ -0,0 +1,7 @@
from .eleven_labs import *
from .yandex import *
__all__ = [
"TTSElevenLabsSettings",
"TTSYandexSettings",
]

View File

@@ -0,0 +1,26 @@
import pydantic
import pydantic_settings
import lib.app.split_settings.utils as app_split_settings_utils
class TTSElevenLabsSettings(pydantic_settings.BaseSettings):
model_config = pydantic_settings.SettingsConfigDict(
env_file=app_split_settings_utils.ENV_PATH,
env_prefix="TTS_ELEVEN_LABS_",
env_file_encoding="utf-8",
extra="ignore",
)
api_key: pydantic.SecretStr = pydantic.Field(default=...)
default_voice_id: str = "EXAVITQu4vr4xnSDxMaL"
base_url: str = "https://api.elevenlabs.io/v1/"
timeout_seconds: int = 30
@property
def base_headers(self) -> dict[str, str]:
return {
"Accept": "audio/mpeg",
"Content-Type": "application/json",
"xi-api-key": self.api_key.get_secret_value(),
}

View File

@@ -0,0 +1,28 @@
import typing
import pydantic
import pydantic_settings
import lib.app.split_settings.utils as app_split_settings_utils
class TTSYandexSettings(pydantic_settings.BaseSettings):
model_config = pydantic_settings.SettingsConfigDict(
env_file=app_split_settings_utils.ENV_PATH,
env_prefix="TTS_YANDEX_",
env_file_encoding="utf-8",
extra="ignore",
)
audio_format: typing.Literal["oggopus", "mp3", "lpcm"] = "oggopus"
sample_rate_hertz: int = 48000
api_key: pydantic.SecretStr = pydantic.Field(default=...)
base_url: str = "https://tts.api.cloud.yandex.net/speech/v1/"
timeout_seconds: int = 30
@property
def base_headers(self) -> dict[str, str]:
return {
"Authorization": f"Api-Key {self.api_key.get_secret_value()}",
"Content-Type": "application/x-www-form-urlencoded",
}

View File

@@ -8,7 +8,7 @@ import lib.app.split_settings as app_split_settings
class AsyncHttpClient(httpx.AsyncClient):
def __init__(
self,
proxy_settings: app_split_settings.ProxySettings,
proxy_settings: app_split_settings.ProxySettings | None = None,
base_url: str | None = None,
**client_params: typing.Any,
) -> None:
@@ -17,10 +17,10 @@ class AsyncHttpClient(httpx.AsyncClient):
self.proxies = self.__get_proxies_from_settings()
self.client_params = client_params
super().__init__(base_url=self.base_url, proxies=self.proxies, **client_params)
super().__init__(base_url=self.base_url, proxies=self.proxies, **client_params) # type: ignore[reportGeneralTypeIssues]
def __get_proxies_from_settings(self) -> dict[str, str] | None:
if not self.proxy_settings.enable:
if not self.proxy_settings or not self.proxy_settings.enable:
return None
proxies = {"all://": self.proxy_settings.dsn}
return proxies

View File

@@ -2,5 +2,27 @@ from .chat_history import Message, RequestChatHistory, RequestChatMessage, Reque
from .embedding import Embedding
from .movies import Movie
from .token import Token
from .tts import *
__all__ = ["Embedding", "Message", "Movie", "RequestChatHistory", "RequestChatMessage", "RequestLastSessionId", "Token"]
__all__ = [
"AVAILABLE_MODELS_TYPE",
"Base",
"BaseLanguageCodesEnum",
"BaseVoiceModel",
"ElevenLabsLanguageCodesEnum",
"ElevenLabsListVoiceModelsModel",
"ElevenLabsVoiceModel",
"IdCreatedUpdatedBaseMixin",
"LANGUAGE_CODES_ENUM_TYPE",
"LIST_VOICE_MODELS_TYPE",
"TTSCreateRequestModel",
"TTSCreateResponseModel",
"TTSSearchVoiceRequestModel",
"Token",
"VoiceModelProvidersEnum",
"YandexLanguageCodesEnum",
"YandexListVoiceModelsModel",
"YandexVoiceModel",
]

View File

@@ -0,0 +1,20 @@
from .models import *
from .voice import *
__all__ = [
"AVAILABLE_MODELS_TYPE",
"BaseLanguageCodesEnum",
"BaseVoiceModel",
"ElevenLabsLanguageCodesEnum",
"ElevenLabsListVoiceModelsModel",
"ElevenLabsVoiceModel",
"LANGUAGE_CODES_ENUM_TYPE",
"LIST_VOICE_MODELS_TYPE",
"TTSCreateRequestModel",
"TTSCreateResponseModel",
"TTSSearchVoiceRequestModel",
"VoiceModelProvidersEnum",
"YandexLanguageCodesEnum",
"YandexListVoiceModelsModel",
"YandexVoiceModel",
]

View File

@@ -0,0 +1,64 @@
import pydantic
import lib.models.tts.voice as models_tts_voice
import lib.models.tts.voice.languages as models_tts_languages
AVAILABLE_MODELS_TYPE = models_tts_voice.YandexVoiceModel | models_tts_voice.ElevenLabsVoiceModel
LIST_VOICE_MODELS_TYPE = models_tts_voice.YandexListVoiceModelsModel | models_tts_voice.ElevenLabsListVoiceModelsModel
DEFAULT_MODEL = models_tts_voice.ElevenLabsVoiceModel(
voice_id="eleven_multilingual_v2",
languages=[
models_tts_languages.ElevenLabsLanguageCodesEnum.ENGLISH,
models_tts_languages.ElevenLabsLanguageCodesEnum.JAPANESE,
models_tts_languages.ElevenLabsLanguageCodesEnum.CHINESE,
models_tts_languages.ElevenLabsLanguageCodesEnum.GERMAN,
models_tts_languages.ElevenLabsLanguageCodesEnum.HINDI,
models_tts_languages.ElevenLabsLanguageCodesEnum.FRENCH,
models_tts_languages.ElevenLabsLanguageCodesEnum.KOREAN,
models_tts_languages.ElevenLabsLanguageCodesEnum.PORTUGUESE,
models_tts_languages.ElevenLabsLanguageCodesEnum.ITALIAN,
models_tts_languages.ElevenLabsLanguageCodesEnum.SPANISH,
models_tts_languages.ElevenLabsLanguageCodesEnum.INDONESIAN,
models_tts_languages.ElevenLabsLanguageCodesEnum.DUTCH,
models_tts_languages.ElevenLabsLanguageCodesEnum.TURKISH,
models_tts_languages.ElevenLabsLanguageCodesEnum.FILIPINO,
models_tts_languages.ElevenLabsLanguageCodesEnum.POLISH,
models_tts_languages.ElevenLabsLanguageCodesEnum.SWEDISH,
models_tts_languages.ElevenLabsLanguageCodesEnum.BULGARIAN,
models_tts_languages.ElevenLabsLanguageCodesEnum.ROMANIAN,
models_tts_languages.ElevenLabsLanguageCodesEnum.ARABIC,
models_tts_languages.ElevenLabsLanguageCodesEnum.CZECH,
models_tts_languages.ElevenLabsLanguageCodesEnum.GREEK,
models_tts_languages.ElevenLabsLanguageCodesEnum.FINNISH,
models_tts_languages.ElevenLabsLanguageCodesEnum.CROATIAN,
models_tts_languages.ElevenLabsLanguageCodesEnum.MALAY,
models_tts_languages.ElevenLabsLanguageCodesEnum.SLOVAK,
models_tts_languages.ElevenLabsLanguageCodesEnum.DANISH,
models_tts_languages.ElevenLabsLanguageCodesEnum.TAMIL,
models_tts_languages.ElevenLabsLanguageCodesEnum.UKRAINIAN,
],
)
class TTSCreateRequestModel(pydantic.BaseModel):
model_config = pydantic.ConfigDict(use_enum_values=True)
voice_model: AVAILABLE_MODELS_TYPE = DEFAULT_MODEL
text: str
class TTSCreateResponseModel(pydantic.BaseModel):
audio_content: bytes
class TTSSearchVoiceRequestModel(pydantic.BaseModel):
voice_id: str | None = None
voice_name: str | None = None
languages: list[models_tts_languages.LANGUAGE_CODES_ENUM_TYPE] | None = None
company_name: str | None = None
@pydantic.model_validator(mode="after")
def check_at_least_one_field(self):
if not any((self.voice_name, self.languages, self.company_name)):
raise ValueError("At least one field required")
return self

View File

@@ -0,0 +1,17 @@
from .base import *
from .eleven_labs import *
from .languages import *
from .yandex import *
__all__ = [
"BaseLanguageCodesEnum",
"BaseVoiceModel",
"ElevenLabsLanguageCodesEnum",
"ElevenLabsListVoiceModelsModel",
"ElevenLabsVoiceModel",
"LANGUAGE_CODES_ENUM_TYPE",
"VoiceModelProvidersEnum",
"YandexLanguageCodesEnum",
"YandexListVoiceModelsModel",
"YandexVoiceModel",
]

View File

@@ -0,0 +1,29 @@
import enum
import typing
import pydantic
import lib.models.tts.voice.languages as models_tts_languages
class VoiceModelProvidersEnum(enum.Enum):
YANDEX = "yandex"
ELEVEN_LABS = "eleven_labs"
class BaseVoiceModel(pydantic.BaseModel):
voice_id: str
voice_name: str | None = None
languages: list[models_tts_languages.LANGUAGE_CODES_ENUM_TYPE]
provider: VoiceModelProvidersEnum
@pydantic.model_validator(mode="before")
@classmethod
def check_voice_name_exists(cls, data: typing.Any) -> typing.Any:
if not data:
return data
voice_id = data.get("voice_id")
voice_name = data.get("voice_name")
if not voice_name and voice_id:
data["voice_name"] = voice_id
return data

View File

@@ -0,0 +1,83 @@
import typing
import pydantic
import lib.models.tts.voice.base as models_tts_base
import lib.models.tts.voice.languages as models_tts_languages
class ElevenLabsVoiceModel(models_tts_base.BaseVoiceModel):
model_config = pydantic.ConfigDict(use_enum_values=True)
voice_id: str
voice_name: str | None = None
languages: list[models_tts_languages.LANGUAGE_CODES_ENUM_TYPE]
provider: models_tts_base.VoiceModelProvidersEnum = models_tts_base.VoiceModelProvidersEnum.ELEVEN_LABS
class ElevenLabsListVoiceModelsModel(pydantic.BaseModel):
models: list[ElevenLabsVoiceModel] = [
ElevenLabsVoiceModel(
voice_id="eleven_multilingual_v1",
languages=[
models_tts_languages.ElevenLabsLanguageCodesEnum.ENGLISH,
models_tts_languages.ElevenLabsLanguageCodesEnum.GERMAN,
models_tts_languages.ElevenLabsLanguageCodesEnum.POLISH,
models_tts_languages.ElevenLabsLanguageCodesEnum.SPANISH,
models_tts_languages.ElevenLabsLanguageCodesEnum.ITALIAN,
models_tts_languages.ElevenLabsLanguageCodesEnum.FRENCH,
models_tts_languages.ElevenLabsLanguageCodesEnum.PORTUGUESE,
models_tts_languages.ElevenLabsLanguageCodesEnum.HINDI,
models_tts_languages.ElevenLabsLanguageCodesEnum.ARABIC,
],
),
ElevenLabsVoiceModel(
voice_id="eleven_multilingual_v2",
languages=[
models_tts_languages.ElevenLabsLanguageCodesEnum.ENGLISH,
models_tts_languages.ElevenLabsLanguageCodesEnum.JAPANESE,
models_tts_languages.ElevenLabsLanguageCodesEnum.CHINESE,
models_tts_languages.ElevenLabsLanguageCodesEnum.GERMAN,
models_tts_languages.ElevenLabsLanguageCodesEnum.HINDI,
models_tts_languages.ElevenLabsLanguageCodesEnum.FRENCH,
models_tts_languages.ElevenLabsLanguageCodesEnum.KOREAN,
models_tts_languages.ElevenLabsLanguageCodesEnum.PORTUGUESE,
models_tts_languages.ElevenLabsLanguageCodesEnum.ITALIAN,
models_tts_languages.ElevenLabsLanguageCodesEnum.SPANISH,
models_tts_languages.ElevenLabsLanguageCodesEnum.INDONESIAN,
models_tts_languages.ElevenLabsLanguageCodesEnum.DUTCH,
models_tts_languages.ElevenLabsLanguageCodesEnum.TURKISH,
models_tts_languages.ElevenLabsLanguageCodesEnum.FILIPINO,
models_tts_languages.ElevenLabsLanguageCodesEnum.POLISH,
models_tts_languages.ElevenLabsLanguageCodesEnum.SWEDISH,
models_tts_languages.ElevenLabsLanguageCodesEnum.BULGARIAN,
models_tts_languages.ElevenLabsLanguageCodesEnum.ROMANIAN,
models_tts_languages.ElevenLabsLanguageCodesEnum.ARABIC,
models_tts_languages.ElevenLabsLanguageCodesEnum.CZECH,
models_tts_languages.ElevenLabsLanguageCodesEnum.GREEK,
models_tts_languages.ElevenLabsLanguageCodesEnum.FINNISH,
models_tts_languages.ElevenLabsLanguageCodesEnum.CROATIAN,
models_tts_languages.ElevenLabsLanguageCodesEnum.MALAY,
models_tts_languages.ElevenLabsLanguageCodesEnum.SLOVAK,
models_tts_languages.ElevenLabsLanguageCodesEnum.DANISH,
models_tts_languages.ElevenLabsLanguageCodesEnum.TAMIL,
models_tts_languages.ElevenLabsLanguageCodesEnum.UKRAINIAN,
],
),
ElevenLabsVoiceModel(
voice_id="eleven_multilingual_v2",
languages=[models_tts_languages.ElevenLabsLanguageCodesEnum.ENGLISH],
),
]
@classmethod
def from_api(cls, voice_models_from_api: list[dict[str, typing.Any]]) -> typing.Self:
voice_models = []
for voice_model in voice_models_from_api:
voice_model["voice_id"] = voice_model.pop("model_id")
voice_model["voice_name"] = voice_model.pop("name")
voice_model["languages"] = [
models_tts_languages.ElevenLabsLanguageCodesEnum(item.get("language_id"))
for item in voice_model.pop("languages")
]
voice_models.append(ElevenLabsVoiceModel.model_validate(voice_model))
return ElevenLabsListVoiceModelsModel(models=voice_models)

View File

@@ -0,0 +1,83 @@
import enum
class BaseLanguageCodesEnum(enum.Enum):
RUSSIAN = "ru"
ENGLISH = "en"
KAZAKH = "kk"
GERMAN = "de"
HEBREW = "he"
UZBEK = "uz"
JAPANESE = "ja"
CHINESE = "zh"
HINDI = "hi"
FRENCH = "fr"
KOREAN = "ko"
PORTUGUESE = "pt"
ITALIAN = "it"
SPANISH = "es"
INDONESIAN = "id"
DUTCH = "nl"
TURKISH = "tr"
FILIPINO = "fil"
POLISH = "pl"
SWEDISH = "sv"
BULGARIAN = "bg"
ROMANIAN = "ro"
ARABIC = "ar"
CZECH = "cs"
GREEK = "el"
FINNISH = "fi"
CROATIAN = "hr"
MALAY = "ms"
SLOVAK = "sk"
DANISH = "da"
TAMIL = "ta"
UKRAINIAN = "uk"
class ElevenLabsLanguageCodesEnum(enum.Enum):
RUSSIAN = "ru"
ENGLISH = "en"
KAZAKH = "kk"
GERMAN = "de"
HEBREW = "he"
UZBEK = "uz"
JAPANESE = "ja"
CHINESE = "zh"
HINDI = "hi"
FRENCH = "fr"
KOREAN = "ko"
PORTUGUESE = "pt"
ITALIAN = "it"
SPANISH = "es"
INDONESIAN = "id"
DUTCH = "nl"
TURKISH = "tr"
FILIPINO = "fil"
POLISH = "pl"
SWEDISH = "sv"
BULGARIAN = "bg"
ROMANIAN = "ro"
ARABIC = "ar"
CZECH = "cs"
GREEK = "el"
FINNISH = "fi"
CROATIAN = "hr"
MALAY = "ms"
SLOVAK = "sk"
DANISH = "da"
TAMIL = "ta"
UKRAINIAN = "uk"
class YandexLanguageCodesEnum(enum.Enum):
RUSSIAN = "ru-RU"
ENGLISH = "en-US"
KAZAKH = "kk-KK"
GERMAN = "de-DE"
HEBREW = "he-IL"
UZBEK = "uz-UZ"
LANGUAGE_CODES_ENUM_TYPE = BaseLanguageCodesEnum | ElevenLabsLanguageCodesEnum | YandexLanguageCodesEnum

View File

@@ -0,0 +1,99 @@
import typing
import pydantic
import lib.models.tts.voice.base as models_tts_base
import lib.models.tts.voice.languages as models_tts_languages
class YandexVoiceModel(models_tts_base.BaseVoiceModel):
voice_id: str
voice_name: str | None = None
languages: list[models_tts_languages.LANGUAGE_CODES_ENUM_TYPE]
provider: models_tts_base.VoiceModelProvidersEnum = models_tts_base.VoiceModelProvidersEnum.YANDEX
role: str | None = None
@pydantic.model_validator(mode="before")
@classmethod
def check_voice_name_exists(cls, data: typing.Any) -> typing.Any:
if not data:
return data
voice_id = data.get("voice_id")
voice_name = data.get("voice_name")
role = data.get("role")
if not voice_name and voice_id:
data["voice_name"] = f"{voice_id} {role}" if role else voice_id
return data
class YandexListVoiceModelsModel(pydantic.BaseModel):
models: list[YandexVoiceModel] = [
YandexVoiceModel(
voice_id="ermil", role="neutral", languages=[models_tts_languages.YandexLanguageCodesEnum.RUSSIAN]
),
YandexVoiceModel(
voice_id="ermil", role="good", languages=[models_tts_languages.YandexLanguageCodesEnum.RUSSIAN]
),
YandexVoiceModel(
voice_id="alena", role="neutral", languages=[models_tts_languages.YandexLanguageCodesEnum.RUSSIAN]
),
YandexVoiceModel(
voice_id="alena", role="good", languages=[models_tts_languages.YandexLanguageCodesEnum.RUSSIAN]
),
YandexVoiceModel(
voice_id="jane", role="neutral", languages=[models_tts_languages.YandexLanguageCodesEnum.RUSSIAN]
),
YandexVoiceModel(
voice_id="jane", role="good", languages=[models_tts_languages.YandexLanguageCodesEnum.RUSSIAN]
),
YandexVoiceModel(
voice_id="jane", role="evil", languages=[models_tts_languages.YandexLanguageCodesEnum.RUSSIAN]
),
YandexVoiceModel(
voice_id="omazh", role="neutral", languages=[models_tts_languages.YandexLanguageCodesEnum.RUSSIAN]
),
YandexVoiceModel(
voice_id="omazh", role="evil", languages=[models_tts_languages.YandexLanguageCodesEnum.RUSSIAN]
),
YandexVoiceModel(
voice_id="zahar", role="neutral", languages=[models_tts_languages.YandexLanguageCodesEnum.RUSSIAN]
),
YandexVoiceModel(
voice_id="zahar", role="good", languages=[models_tts_languages.YandexLanguageCodesEnum.RUSSIAN]
),
YandexVoiceModel(
voice_id="filipp", role=None, languages=[models_tts_languages.YandexLanguageCodesEnum.RUSSIAN]
),
YandexVoiceModel(
voice_id="madirus", role=None, languages=[models_tts_languages.YandexLanguageCodesEnum.RUSSIAN]
),
YandexVoiceModel(voice_id="dasha", role=None, languages=[models_tts_languages.YandexLanguageCodesEnum.RUSSIAN]),
YandexVoiceModel(voice_id="julia", role=None, languages=[models_tts_languages.YandexLanguageCodesEnum.RUSSIAN]),
YandexVoiceModel(voice_id="lera", role=None, languages=[models_tts_languages.YandexLanguageCodesEnum.RUSSIAN]),
YandexVoiceModel(
voice_id="marina", role=None, languages=[models_tts_languages.YandexLanguageCodesEnum.RUSSIAN]
),
YandexVoiceModel(
voice_id="alexander", role=None, languages=[models_tts_languages.YandexLanguageCodesEnum.RUSSIAN]
),
YandexVoiceModel(
voice_id="kirill", role=None, languages=[models_tts_languages.YandexLanguageCodesEnum.RUSSIAN]
),
YandexVoiceModel(voice_id="anton", role=None, languages=[models_tts_languages.YandexLanguageCodesEnum.RUSSIAN]),
YandexVoiceModel(voice_id="john", role=None, languages=[models_tts_languages.YandexLanguageCodesEnum.ENGLISH]),
YandexVoiceModel(voice_id="amira", role=None, languages=[models_tts_languages.YandexLanguageCodesEnum.KAZAKH]),
YandexVoiceModel(voice_id="madi", role=None, languages=[models_tts_languages.YandexLanguageCodesEnum.KAZAKH]),
YandexVoiceModel(voice_id="lea", role=None, languages=[models_tts_languages.YandexLanguageCodesEnum.GERMAN]),
YandexVoiceModel(
voice_id="naomi", role="modern", languages=[models_tts_languages.YandexLanguageCodesEnum.HEBREW]
),
YandexVoiceModel(
voice_id="naomi", role="classic", languages=[models_tts_languages.YandexLanguageCodesEnum.HEBREW]
),
YandexVoiceModel(voice_id="nigora", role=None, languages=[models_tts_languages.YandexLanguageCodesEnum.UZBEK]),
]
@classmethod
def from_api(cls, voice_models_from_api: list[dict[str, typing.Any]]) -> typing.Self:
voice_models = [YandexVoiceModel.model_validate(voice_model) for voice_model in voice_models_from_api]
return YandexListVoiceModelsModel(models=voice_models)

View File

@@ -1,8 +1,11 @@
import http
import mimetypes
import tempfile
import fastapi
import magic
import openai
import pydantic
import lib.app.settings as app_settings
import lib.stt as stt
@@ -24,15 +27,24 @@ class OpenaiSpeechRepository:
async def speech_to_text(self, audio: bytes) -> str:
file_extension = self.__get_file_extension_from_bytes(audio)
if not file_extension:
raise ValueError("File extension is not supported")
voice: stt.models.SttVoice = stt.models.SttVoice(
audio_size=len(audio) // 1024, # audio size in MB,
audio_format=file_extension,
audio_data=audio,
voice_settings=self.settings.voice,
)
if not file_extension or file_extension not in self.settings.voice.available_formats:
raise fastapi.HTTPException(
status_code=http.HTTPStatus.UNSUPPORTED_MEDIA_TYPE,
detail=f"File extension is not supported. "
f"Available extensions: {self.settings.voice.available_formats}",
)
try:
voice: stt.models.SttVoice = stt.models.SttVoice(
audio_size=len(audio) // 1024, # audio size in MB,
audio_format=file_extension,
audio_data=audio,
voice_settings=self.settings.voice,
)
except (pydantic.ValidationError, ValueError) as e:
raise fastapi.HTTPException(
status_code=http.HTTPStatus.BAD_REQUEST,
detail=f"Voice validation error: {e}",
)
try:
with tempfile.NamedTemporaryFile(suffix=f".{file_extension}") as temp_file:
@@ -40,8 +52,14 @@ class OpenaiSpeechRepository:
temp_file.seek(0)
transcript = openai.Audio.transcribe(self.settings.openai.stt_model, temp_file) # type: ignore
except openai.error.InvalidRequestError as e: # type: ignore[reportGeneralTypeIssues]
raise ValueError(f"OpenAI API error: {e}")
raise fastapi.HTTPException(
status_code=http.HTTPStatus.BAD_REQUEST,
detail=f"OpenAI request error: {e}",
)
except openai.error.OpenAIError as e: # type: ignore[reportGeneralTypeIssues]
raise ValueError(f"OpenAI API error: {e}")
raise fastapi.HTTPException(
status_code=http.HTTPStatus.BAD_REQUEST,
detail=f"OpenAI API error: {e}",
)
return transcript.text # type: ignore[reportUnknownVariableType]

View File

@@ -0,0 +1,9 @@
from .repositories import *
from .services import *
__all__ = [
"TTSBaseRepository",
"TTSElevenLabsRepository",
"TTSService",
"TTSYandexRepository",
]

View File

@@ -0,0 +1,5 @@
from .protocols import *
__all__ = [
"TTSRepositoryProtocol",
]

View File

@@ -0,0 +1,16 @@
import typing
import lib.models as models
class TTSRepositoryProtocol(typing.Protocol):
async def get_audio_as_bytes(self, request: models.TTSCreateRequestModel) -> models.TTSCreateResponseModel:
...
async def get_voice_model_by_name(self, voice_model_name: str) -> models.BaseVoiceModel | None:
...
async def get_voice_models_by_fields(
self, fields: models.TTSSearchVoiceRequestModel
) -> models.LIST_VOICE_MODELS_TYPE:
...

View File

@@ -0,0 +1,9 @@
from .base import *
from .eleven_labs import *
from .yandex import *
__all__ = [
"TTSBaseRepository",
"TTSElevenLabsRepository",
"TTSYandexRepository",
]

View File

@@ -0,0 +1,56 @@
import abc
import lib.clients as clients
import lib.models as models
class TTSBaseRepository(abc.ABC):
def __init__(self, client: clients.AsyncHttpClient, is_models_from_api: bool = False):
self.http_client = client
self.is_models_from_api = is_models_from_api
@property
@abc.abstractmethod
async def voice_models(self) -> models.LIST_VOICE_MODELS_TYPE:
raise NotImplementedError
@abc.abstractmethod
async def get_audio_as_bytes(self, request: models.TTSCreateRequestModel) -> models.TTSCreateResponseModel:
raise NotImplementedError
async def get_voice_model_by_name(self, voice_model_name: str) -> models.BaseVoiceModel | None:
"""
Search voice model by name
:param voice_model_name: String name
:return: Voice model that match the name
"""
voice_models = await self.voice_models
for voice_model in voice_models.models:
if voice_model.voice_name == voice_model_name:
return voice_model
async def get_list_voice_models_by_fields(
self, fields: models.TTSSearchVoiceRequestModel
) -> list[models.AVAILABLE_MODELS_TYPE]:
"""
Search voice model by fields
:param fields: Any fields from TTSSearchVoiceRequestModel
:return: All voice models that match the fields
"""
fields_dump = fields.model_dump(exclude_none=True)
voice_models_response = []
voice_models = await self.voice_models
for voice_model in voice_models.models:
for field, field_value in fields_dump.items():
if field == "languages": # language is a list
language_names: set[str] = {item.name for item in field_value}
voice_model_language_names: set[str] = {item.name for item in voice_model.languages}
if language_names.issubset(voice_model_language_names):
continue
break
voice_model_dump = voice_model.model_dump()
if voice_model_dump[field] != field_value.name:
break
else:
voice_models_response.append(voice_model)
return voice_models_response # type: ignore[reportUnknownVariableType]

View File

@@ -0,0 +1,42 @@
import typing
import lib.app.split_settings as app_split_settings
import lib.clients as clients
import lib.models as models
import lib.tts.repositories.base as tts_repositories_base
class TTSElevenLabsRepository(tts_repositories_base.TTSBaseRepository):
def __init__(
self,
tts_settings: app_split_settings.TTSElevenLabsSettings,
client: clients.AsyncHttpClient,
is_models_from_api: bool = False,
):
self.tts_settings = tts_settings
super().__init__(client, is_models_from_api)
@property
async def voice_models(self) -> models.ElevenLabsListVoiceModelsModel:
if self.is_models_from_api:
return models.ElevenLabsListVoiceModelsModel.from_api(await self.get_all_models_dict_from_api())
return models.ElevenLabsListVoiceModelsModel()
async def get_all_models_dict_from_api(self) -> list[dict[str, typing.Any]]:
response = await self.http_client.get("/models")
return response.json()
async def get_audio_as_bytes(self, request: models.TTSCreateRequestModel) -> models.TTSCreateResponseModel:
if not isinstance(request.voice_model, models.ElevenLabsVoiceModel):
raise ValueError("ElevenLabs TTS support only ElevenLabsVoiceModel")
response = await self.http_client.post(
f"/text-to-speech/{self.tts_settings.default_voice_id}",
json={"text": request.text, "model_id": request.voice_model.voice_id},
)
return models.TTSCreateResponseModel(audio_content=response.content)
async def get_voice_models_by_fields(
self, fields: models.TTSSearchVoiceRequestModel
) -> models.ElevenLabsListVoiceModelsModel:
list_voice_models = await self.get_list_voice_models_by_fields(fields)
return models.ElevenLabsListVoiceModelsModel(models=list_voice_models) # type: ignore

View File

@@ -0,0 +1,48 @@
import logging
import lib.app.split_settings as app_split_settings
import lib.clients as clients
import lib.models as models
import lib.tts.repositories.base as tts_repositories_base
logger = logging.getLogger(__name__)
class TTSYandexRepository(tts_repositories_base.TTSBaseRepository):
def __init__(
self,
tts_settings: app_split_settings.TTSYandexSettings,
client: clients.AsyncHttpClient,
is_models_from_api: bool = False,
):
self.tts_settings = tts_settings
if is_models_from_api:
logger.warning("Yandex TTS doesn't support getting models from API")
super().__init__(client, is_models_from_api=False)
@property
async def voice_models(self) -> models.YandexListVoiceModelsModel:
return models.YandexListVoiceModelsModel()
async def get_audio_as_bytes(self, request: models.TTSCreateRequestModel) -> models.TTSCreateResponseModel:
if not isinstance(request.voice_model, models.YandexVoiceModel):
raise ValueError("Yandex TTS support only YandexVoiceModel")
data = {
"text": request.text,
"lang": request.voice_model.languages[0].value,
"voice": request.voice_model.voice_id,
"emotion": request.voice_model.role,
"format": self.tts_settings.audio_format,
"sampleRateHertz": self.tts_settings.sample_rate_hertz,
}
response = await self.http_client.post(
"/tts:synthesize",
data=data,
)
return models.TTSCreateResponseModel(audio_content=response.content)
async def get_voice_models_by_fields(
self, fields: models.TTSSearchVoiceRequestModel
) -> models.YandexListVoiceModelsModel:
list_voice_models = await self.get_list_voice_models_by_fields(fields)
return models.YandexListVoiceModelsModel(models=list_voice_models) # type: ignore

View File

@@ -0,0 +1,33 @@
import lib.models as _models
import lib.tts.models as tts_models
class TTSService:
def __init__(
self,
repositories: dict[_models.VoiceModelProvidersEnum, tts_models.TTSRepositoryProtocol],
):
self.repositories = repositories
async def get_audio_as_bytes(self, request: _models.TTSCreateRequestModel) -> _models.TTSCreateResponseModel:
model = request.voice_model
repository = self.repositories[model.provider]
audio_response = await repository.get_audio_as_bytes(request)
return audio_response
async def get_voice_model_by_name(self, voice_model_name: str) -> _models.BaseVoiceModel | None:
for repository in self.repositories.values():
voice_model = await repository.get_voice_model_by_name(voice_model_name)
if voice_model:
return voice_model
raise ValueError("Voice model not found")
async def get_list_voice_models_by_fields(
self, fields: _models.TTSSearchVoiceRequestModel
) -> list[_models.AVAILABLE_MODELS_TYPE]:
response_models: list[_models.AVAILABLE_MODELS_TYPE] = []
for repository in self.repositories.values():
voice_models = await repository.get_voice_models_by_fields(fields)
if voice_models.models:
response_models.extend(voice_models.models)
return response_models

View File

@@ -1,4 +1,4 @@
# This file is automatically @generated by Poetry 1.6.1 and should not be changed by hand.
# This file is automatically @generated by Poetry 1.5.1 and should not be changed by hand.
[[package]]
name = "aiohttp"
@@ -645,7 +645,6 @@ files = [
{file = "greenlet-2.0.2-cp27-cp27m-win32.whl", hash = "sha256:6c3acb79b0bfd4fe733dff8bc62695283b57949ebcca05ae5c129eb606ff2d74"},
{file = "greenlet-2.0.2-cp27-cp27m-win_amd64.whl", hash = "sha256:283737e0da3f08bd637b5ad058507e578dd462db259f7f6e4c5c365ba4ee9343"},
{file = "greenlet-2.0.2-cp27-cp27mu-manylinux2010_x86_64.whl", hash = "sha256:d27ec7509b9c18b6d73f2f5ede2622441de812e7b1a80bbd446cb0633bd3d5ae"},
{file = "greenlet-2.0.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:d967650d3f56af314b72df7089d96cda1083a7fc2da05b375d2bc48c82ab3f3c"},
{file = "greenlet-2.0.2-cp310-cp310-macosx_11_0_x86_64.whl", hash = "sha256:30bcf80dda7f15ac77ba5af2b961bdd9dbc77fd4ac6105cee85b0d0a5fcf74df"},
{file = "greenlet-2.0.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:26fbfce90728d82bc9e6c38ea4d038cba20b7faf8a0ca53a9c07b67318d46088"},
{file = "greenlet-2.0.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9190f09060ea4debddd24665d6804b995a9c122ef5917ab26e1566dcc712ceeb"},
@@ -654,7 +653,6 @@ files = [
{file = "greenlet-2.0.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:76ae285c8104046b3a7f06b42f29c7b73f77683df18c49ab5af7983994c2dd91"},
{file = "greenlet-2.0.2-cp310-cp310-win_amd64.whl", hash = "sha256:2d4686f195e32d36b4d7cf2d166857dbd0ee9f3d20ae349b6bf8afc8485b3645"},
{file = "greenlet-2.0.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:c4302695ad8027363e96311df24ee28978162cdcdd2006476c43970b384a244c"},
{file = "greenlet-2.0.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:d4606a527e30548153be1a9f155f4e283d109ffba663a15856089fb55f933e47"},
{file = "greenlet-2.0.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c48f54ef8e05f04d6eff74b8233f6063cb1ed960243eacc474ee73a2ea8573ca"},
{file = "greenlet-2.0.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a1846f1b999e78e13837c93c778dcfc3365902cfb8d1bdb7dd73ead37059f0d0"},
{file = "greenlet-2.0.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3a06ad5312349fec0ab944664b01d26f8d1f05009566339ac6f63f56589bc1a2"},
@@ -684,7 +682,6 @@ files = [
{file = "greenlet-2.0.2-cp37-cp37m-win32.whl", hash = "sha256:3f6ea9bd35eb450837a3d80e77b517ea5bc56b4647f5502cd28de13675ee12f7"},
{file = "greenlet-2.0.2-cp37-cp37m-win_amd64.whl", hash = "sha256:7492e2b7bd7c9b9916388d9df23fa49d9b88ac0640db0a5b4ecc2b653bf451e3"},
{file = "greenlet-2.0.2-cp38-cp38-macosx_10_15_x86_64.whl", hash = "sha256:b864ba53912b6c3ab6bcb2beb19f19edd01a6bfcbdfe1f37ddd1778abfe75a30"},
{file = "greenlet-2.0.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:1087300cf9700bbf455b1b97e24db18f2f77b55302a68272c56209d5587c12d1"},
{file = "greenlet-2.0.2-cp38-cp38-manylinux2010_x86_64.whl", hash = "sha256:ba2956617f1c42598a308a84c6cf021a90ff3862eddafd20c3333d50f0edb45b"},
{file = "greenlet-2.0.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fc3a569657468b6f3fb60587e48356fe512c1754ca05a564f11366ac9e306526"},
{file = "greenlet-2.0.2-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8eab883b3b2a38cc1e050819ef06a7e6344d4a990d24d45bc6f2cf959045a45b"},
@@ -693,7 +690,6 @@ files = [
{file = "greenlet-2.0.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:b0ef99cdbe2b682b9ccbb964743a6aca37905fda5e0452e5ee239b1654d37f2a"},
{file = "greenlet-2.0.2-cp38-cp38-win32.whl", hash = "sha256:b80f600eddddce72320dbbc8e3784d16bd3fb7b517e82476d8da921f27d4b249"},
{file = "greenlet-2.0.2-cp38-cp38-win_amd64.whl", hash = "sha256:4d2e11331fc0c02b6e84b0d28ece3a36e0548ee1a1ce9ddde03752d9b79bba40"},
{file = "greenlet-2.0.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:8512a0c38cfd4e66a858ddd1b17705587900dd760c6003998e9472b77b56d417"},
{file = "greenlet-2.0.2-cp39-cp39-macosx_11_0_x86_64.whl", hash = "sha256:88d9ab96491d38a5ab7c56dd7a3cc37d83336ecc564e4e8816dbed12e5aaefc8"},
{file = "greenlet-2.0.2-cp39-cp39-manylinux2010_x86_64.whl", hash = "sha256:561091a7be172ab497a3527602d467e2b3fbe75f9e783d8b8ce403fa414f71a6"},
{file = "greenlet-2.0.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:971ce5e14dc5e73715755d0ca2975ac88cfdaefcaab078a284fea6cfabf866df"},
@@ -1203,25 +1199,71 @@ wandb = ["numpy", "openpyxl (>=3.0.7)", "pandas (>=1.2.3)", "pandas-stubs (>=1.1
[[package]]
name = "orjson"
version = "3.9.8"
version = "3.9.7"
description = "Fast, correct Python JSON library supporting dataclasses, datetimes, and numpy"
optional = false
python-versions = ">=3.8"
python-versions = ">=3.7"
files = [
{file = "orjson-3.9.8-cp310-cp310-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:823525bfb27b804b492acc59a45dc0973ea629d97557eac81dde7b34b5267611"},
{file = "orjson-3.9.8-cp310-none-win32.whl", hash = "sha256:2bcc9dc53f9e1d679515349bf299ed5e75310146c755d2ba227a7e37851ab3fb"},
{file = "orjson-3.9.8-cp310-none-win_amd64.whl", hash = "sha256:423774c85e73054acfef10fc3328f35c8d3e0193a7247d47308ebfccde70695d"},
{file = "orjson-3.9.8-cp311-cp311-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:8a1c92f467f5fd0f8fb79273006b563364b1e45667b3760423498348dc2e22fa"},
{file = "orjson-3.9.8-cp311-none-win32.whl", hash = "sha256:a119c73520192c2882d0549151b9cdd65e0bb5396bedf8951ba5f70d6a873879"},
{file = "orjson-3.9.8-cp311-none-win_amd64.whl", hash = "sha256:764306f6370e6c76cbbf3139dd9b05be9c4481ee0b15966bd1907827a5777216"},
{file = "orjson-3.9.8-cp312-cp312-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:af8e6185516ce0c93d6ce1f4105918504da629c631fd969686f32a1be3ed3c9b"},
{file = "orjson-3.9.8-cp312-none-win_amd64.whl", hash = "sha256:5c818f19315251d68954c529f5d8322053f1c35b500b47d008e968bf2d32ed97"},
{file = "orjson-3.9.8-cp38-cp38-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:e6a267c0fc64fc4d0b8fb146e1a060a40f570441a9390ec4bc6de0b5fda148cd"},
{file = "orjson-3.9.8-cp38-none-win32.whl", hash = "sha256:9df23493a72f073b2ab1005e628a963248dc577a2816e9c82caf09ff74908414"},
{file = "orjson-3.9.8-cp38-none-win_amd64.whl", hash = "sha256:34eec476141a043d478651d1efbf218162cdd57add24dfa659ac89e1a001477a"},
{file = "orjson-3.9.8-cp39-cp39-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:c9ae634b8a55539c3d5a53813552325733ab3da3601feef8e99f91cef634f3c4"},
{file = "orjson-3.9.8-cp39-none-win32.whl", hash = "sha256:ca4f3e15517bdcdb573dfe6c97d4171247ce50ec82e3a7b708941b53d5f4bc29"},
{file = "orjson-3.9.8-cp39-none-win_amd64.whl", hash = "sha256:52c0480d5be12697b10b4d748b86acd4999f47e1d8e44e49486d0a550f30fcba"},
{file = "orjson-3.9.7-cp310-cp310-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:b6df858e37c321cefbf27fe7ece30a950bcc3a75618a804a0dcef7ed9dd9c92d"},
{file = "orjson-3.9.7-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5198633137780d78b86bb54dafaaa9baea698b4f059456cd4554ab7009619221"},
{file = "orjson-3.9.7-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:5e736815b30f7e3c9044ec06a98ee59e217a833227e10eb157f44071faddd7c5"},
{file = "orjson-3.9.7-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a19e4074bc98793458b4b3ba35a9a1d132179345e60e152a1bb48c538ab863c4"},
{file = "orjson-3.9.7-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:80acafe396ab689a326ab0d80f8cc61dec0dd2c5dca5b4b3825e7b1e0132c101"},
{file = "orjson-3.9.7-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:355efdbbf0cecc3bd9b12589b8f8e9f03c813a115efa53f8dc2a523bfdb01334"},
{file = "orjson-3.9.7-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:3aab72d2cef7f1dd6104c89b0b4d6b416b0db5ca87cc2fac5f79c5601f549cc2"},
{file = "orjson-3.9.7-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:36b1df2e4095368ee388190687cb1b8557c67bc38400a942a1a77713580b50ae"},
{file = "orjson-3.9.7-cp310-none-win32.whl", hash = "sha256:e94b7b31aa0d65f5b7c72dd8f8227dbd3e30354b99e7a9af096d967a77f2a580"},
{file = "orjson-3.9.7-cp310-none-win_amd64.whl", hash = "sha256:82720ab0cf5bb436bbd97a319ac529aee06077ff7e61cab57cee04a596c4f9b4"},
{file = "orjson-3.9.7-cp311-cp311-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:1f8b47650f90e298b78ecf4df003f66f54acdba6a0f763cc4df1eab048fe3738"},
{file = "orjson-3.9.7-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f738fee63eb263530efd4d2e9c76316c1f47b3bbf38c1bf45ae9625feed0395e"},
{file = "orjson-3.9.7-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:38e34c3a21ed41a7dbd5349e24c3725be5416641fdeedf8f56fcbab6d981c900"},
{file = "orjson-3.9.7-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:21a3344163be3b2c7e22cef14fa5abe957a892b2ea0525ee86ad8186921b6cf0"},
{file = "orjson-3.9.7-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:23be6b22aab83f440b62a6f5975bcabeecb672bc627face6a83bc7aeb495dc7e"},
{file = "orjson-3.9.7-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e5205ec0dfab1887dd383597012199f5175035e782cdb013c542187d280ca443"},
{file = "orjson-3.9.7-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:8769806ea0b45d7bf75cad253fba9ac6700b7050ebb19337ff6b4e9060f963fa"},
{file = "orjson-3.9.7-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:f9e01239abea2f52a429fe9d95c96df95f078f0172489d691b4a848ace54a476"},
{file = "orjson-3.9.7-cp311-none-win32.whl", hash = "sha256:8bdb6c911dae5fbf110fe4f5cba578437526334df381b3554b6ab7f626e5eeca"},
{file = "orjson-3.9.7-cp311-none-win_amd64.whl", hash = "sha256:9d62c583b5110e6a5cf5169ab616aa4ec71f2c0c30f833306f9e378cf51b6c86"},
{file = "orjson-3.9.7-cp312-cp312-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:1c3cee5c23979deb8d1b82dc4cc49be59cccc0547999dbe9adb434bb7af11cf7"},
{file = "orjson-3.9.7-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a347d7b43cb609e780ff8d7b3107d4bcb5b6fd09c2702aa7bdf52f15ed09fa09"},
{file = "orjson-3.9.7-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:154fd67216c2ca38a2edb4089584504fbb6c0694b518b9020ad35ecc97252bb9"},
{file = "orjson-3.9.7-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7ea3e63e61b4b0beeb08508458bdff2daca7a321468d3c4b320a758a2f554d31"},
{file = "orjson-3.9.7-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1eb0b0b2476f357eb2975ff040ef23978137aa674cd86204cfd15d2d17318588"},
{file = "orjson-3.9.7-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:70b9a20a03576c6b7022926f614ac5a6b0914486825eac89196adf3267c6489d"},
{file = "orjson-3.9.7-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:915e22c93e7b7b636240c5a79da5f6e4e84988d699656c8e27f2ac4c95b8dcc0"},
{file = "orjson-3.9.7-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:f26fb3e8e3e2ee405c947ff44a3e384e8fa1843bc35830fe6f3d9a95a1147b6e"},
{file = "orjson-3.9.7-cp312-none-win_amd64.whl", hash = "sha256:d8692948cada6ee21f33db5e23460f71c8010d6dfcfe293c9b96737600a7df78"},
{file = "orjson-3.9.7-cp37-cp37m-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:7bab596678d29ad969a524823c4e828929a90c09e91cc438e0ad79b37ce41166"},
{file = "orjson-3.9.7-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:63ef3d371ea0b7239ace284cab9cd00d9c92b73119a7c274b437adb09bda35e6"},
{file = "orjson-3.9.7-cp37-cp37m-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:2f8fcf696bbbc584c0c7ed4adb92fd2ad7d153a50258842787bc1524e50d7081"},
{file = "orjson-3.9.7-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:90fe73a1f0321265126cbba13677dcceb367d926c7a65807bd80916af4c17047"},
{file = "orjson-3.9.7-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:45a47f41b6c3beeb31ac5cf0ff7524987cfcce0a10c43156eb3ee8d92d92bf22"},
{file = "orjson-3.9.7-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5a2937f528c84e64be20cb80e70cea76a6dfb74b628a04dab130679d4454395c"},
{file = "orjson-3.9.7-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:b4fb306c96e04c5863d52ba8d65137917a3d999059c11e659eba7b75a69167bd"},
{file = "orjson-3.9.7-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:410aa9d34ad1089898f3db461b7b744d0efcf9252a9415bbdf23540d4f67589f"},
{file = "orjson-3.9.7-cp37-none-win32.whl", hash = "sha256:26ffb398de58247ff7bde895fe30817a036f967b0ad0e1cf2b54bda5f8dcfdd9"},
{file = "orjson-3.9.7-cp37-none-win_amd64.whl", hash = "sha256:bcb9a60ed2101af2af450318cd89c6b8313e9f8df4e8fb12b657b2e97227cf08"},
{file = "orjson-3.9.7-cp38-cp38-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:5da9032dac184b2ae2da4bce423edff7db34bfd936ebd7d4207ea45840f03905"},
{file = "orjson-3.9.7-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7951af8f2998045c656ba8062e8edf5e83fd82b912534ab1de1345de08a41d2b"},
{file = "orjson-3.9.7-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:b8e59650292aa3a8ea78073fc84184538783966528e442a1b9ed653aa282edcf"},
{file = "orjson-3.9.7-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9274ba499e7dfb8a651ee876d80386b481336d3868cba29af839370514e4dce0"},
{file = "orjson-3.9.7-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ca1706e8b8b565e934c142db6a9592e6401dc430e4b067a97781a997070c5378"},
{file = "orjson-3.9.7-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:83cc275cf6dcb1a248e1876cdefd3f9b5f01063854acdfd687ec360cd3c9712a"},
{file = "orjson-3.9.7-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:11c10f31f2c2056585f89d8229a56013bc2fe5de51e095ebc71868d070a8dd81"},
{file = "orjson-3.9.7-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:cf334ce1d2fadd1bf3e5e9bf15e58e0c42b26eb6590875ce65bd877d917a58aa"},
{file = "orjson-3.9.7-cp38-none-win32.whl", hash = "sha256:76a0fc023910d8a8ab64daed8d31d608446d2d77c6474b616b34537aa7b79c7f"},
{file = "orjson-3.9.7-cp38-none-win_amd64.whl", hash = "sha256:7a34a199d89d82d1897fd4a47820eb50947eec9cda5fd73f4578ff692a912f89"},
{file = "orjson-3.9.7-cp39-cp39-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:e7e7f44e091b93eb39db88bb0cb765db09b7a7f64aea2f35e7d86cbf47046c65"},
{file = "orjson-3.9.7-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:01d647b2a9c45a23a84c3e70e19d120011cba5f56131d185c1b78685457320bb"},
{file = "orjson-3.9.7-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:0eb850a87e900a9c484150c414e21af53a6125a13f6e378cf4cc11ae86c8f9c5"},
{file = "orjson-3.9.7-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8f4b0042d8388ac85b8330b65406c84c3229420a05068445c13ca28cc222f1f7"},
{file = "orjson-3.9.7-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:cd3e7aae977c723cc1dbb82f97babdb5e5fbce109630fbabb2ea5053523c89d3"},
{file = "orjson-3.9.7-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4c616b796358a70b1f675a24628e4823b67d9e376df2703e893da58247458956"},
{file = "orjson-3.9.7-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:c3ba725cf5cf87d2d2d988d39c6a2a8b6fc983d78ff71bc728b0be54c869c884"},
{file = "orjson-3.9.7-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:4891d4c934f88b6c29b56395dfc7014ebf7e10b9e22ffd9877784e16c6b2064f"},
{file = "orjson-3.9.7-cp39-none-win32.whl", hash = "sha256:14d3fb6cd1040a4a4a530b28e8085131ed94ebc90d72793c59a713de34b60838"},
{file = "orjson-3.9.7-cp39-none-win_amd64.whl", hash = "sha256:9ef82157bbcecd75d6296d5d8b2d792242afcd064eb1ac573f8847b52e58f677"},
{file = "orjson-3.9.7.tar.gz", hash = "sha256:85e39198f78e2f7e054d296395f6c96f5e02892337746ef5b6a1bf3ed5910142"},
]
[[package]]
@@ -1600,13 +1642,13 @@ pytest = ">=4.6"
[[package]]
name = "pyright"
version = "1.1.330.post0"
version = "1.1.331"
description = "Command line wrapper for pyright"
optional = false
python-versions = ">=3.7"
files = [
{file = "pyright-1.1.330.post0-py3-none-any.whl", hash = "sha256:2e9e0878298685b66485b340a0aaa16342129eb03ff9ed0e3c1ab66b8bfbe914"},
{file = "pyright-1.1.330.post0.tar.gz", hash = "sha256:8e5b09cc5d1cfa0bcbf8824b0316d21c43fe229da7cef0a09cd12fcf6cb3eedd"},
{file = "pyright-1.1.331-py3-none-any.whl", hash = "sha256:d200a01794e7f2a04d5042a6c3abee36ce92780287d3037edfc3604d45488f0e"},
{file = "pyright-1.1.331.tar.gz", hash = "sha256:c3e7b86154cac86c3bd61ea0f963143d001c201e246825aaabdddfcce5d04293"},
]
[package.dependencies]
@@ -1649,6 +1691,22 @@ files = [
[package.dependencies]
six = ">=1.5"
name = "pytest-asyncio"
version = "0.21.1"
description = "Pytest support for asyncio"
optional = false
python-versions = ">=3.7"
files = [
{file = "pytest-asyncio-0.21.1.tar.gz", hash = "sha256:40a7eae6dded22c7b604986855ea48400ab15b069ae38116e8c01238e9eeb64d"},
{file = "pytest_asyncio-0.21.1-py3-none-any.whl", hash = "sha256:8666c1c8ac02631d7c51ba282e0c69a8a452b211ffedf2599099845da5c5c37b"},
]
[package.dependencies]
pytest = ">=7.0.0"
[package.extras]
docs = ["sphinx (>=5.3)", "sphinx-rtd-theme (>=1.0)"]
testing = ["coverage (>=6.2)", "flaky (>=3.5.0)", "hypothesis (>=5.7.1)", "mypy (>=0.931)", "pytest-trio (>=0.7.0)"]
[[package]]
name = "python-dotenv"
@@ -1696,6 +1754,20 @@ files = [
{file = "python_magic-0.4.27-py2.py3-none-any.whl", hash = "sha256:c212960ad306f700aa0d01e5d7a325d20548ff97eb9920dcd29513174f0294d3"},
]
[[package]]
name = "python-multipart"
version = "0.0.6"
description = "A streaming multipart parser for Python"
optional = false
python-versions = ">=3.7"
files = [
{file = "python_multipart-0.0.6-py3-none-any.whl", hash = "sha256:ee698bab5ef148b0a760751c261902cd096e57e10558e11aca17646b74ee1c18"},
{file = "python_multipart-0.0.6.tar.gz", hash = "sha256:e9925a80bb668529f1b67c7fdb0a5dacdd7cbfc6fb0bff3ea443fe22bdd62132"},
]
[package.extras]
dev = ["atomicwrites (==1.2.1)", "attrs (==19.2.0)", "coverage (==6.5.0)", "hatch", "invoke (==1.7.3)", "more-itertools (==4.3.0)", "pbr (==4.3.0)", "pluggy (==1.0.0)", "py (==1.11.0)", "pytest (==7.2.0)", "pytest-cov (==4.0.0)", "pytest-timeout (==2.1.0)", "pyyaml (==5.1)"]
[[package]]
name = "pyupgrade"
version = "3.15.0"
@@ -1874,56 +1946,64 @@ tokenize-rt = ">=3.0.1"
[[package]]
name = "sqlalchemy"
version = "2.0.21"
version = "2.0.22"
description = "Database Abstraction Library"
optional = false
python-versions = ">=3.7"
files = [
{file = "SQLAlchemy-2.0.21-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:1e7dc99b23e33c71d720c4ae37ebb095bebebbd31a24b7d99dfc4753d2803ede"},
{file = "SQLAlchemy-2.0.21-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:7f0c4ee579acfe6c994637527c386d1c22eb60bc1c1d36d940d8477e482095d4"},
{file = "SQLAlchemy-2.0.21-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3f7d57a7e140efe69ce2d7b057c3f9a595f98d0bbdfc23fd055efdfbaa46e3a5"},
{file = "SQLAlchemy-2.0.21-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7ca38746eac23dd7c20bec9278d2058c7ad662b2f1576e4c3dbfcd7c00cc48fa"},
{file = "SQLAlchemy-2.0.21-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:3cf229704074bce31f7f47d12883afee3b0a02bb233a0ba45ddbfe542939cca4"},
{file = "SQLAlchemy-2.0.21-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:fb87f763b5d04a82ae84ccff25554ffd903baafba6698e18ebaf32561f2fe4aa"},
{file = "SQLAlchemy-2.0.21-cp310-cp310-win32.whl", hash = "sha256:89e274604abb1a7fd5c14867a412c9d49c08ccf6ce3e1e04fffc068b5b6499d4"},
{file = "SQLAlchemy-2.0.21-cp310-cp310-win_amd64.whl", hash = "sha256:e36339a68126ffb708dc6d1948161cea2a9e85d7d7b0c54f6999853d70d44430"},
{file = "SQLAlchemy-2.0.21-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:bf8eebccc66829010f06fbd2b80095d7872991bfe8415098b9fe47deaaa58063"},
{file = "SQLAlchemy-2.0.21-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:b977bfce15afa53d9cf6a632482d7968477625f030d86a109f7bdfe8ce3c064a"},
{file = "SQLAlchemy-2.0.21-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6ff3dc2f60dbf82c9e599c2915db1526d65415be323464f84de8db3e361ba5b9"},
{file = "SQLAlchemy-2.0.21-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:44ac5c89b6896f4740e7091f4a0ff2e62881da80c239dd9408f84f75a293dae9"},
{file = "SQLAlchemy-2.0.21-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:87bf91ebf15258c4701d71dcdd9c4ba39521fb6a37379ea68088ce8cd869b446"},
{file = "SQLAlchemy-2.0.21-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:b69f1f754d92eb1cc6b50938359dead36b96a1dcf11a8670bff65fd9b21a4b09"},
{file = "SQLAlchemy-2.0.21-cp311-cp311-win32.whl", hash = "sha256:af520a730d523eab77d754f5cf44cc7dd7ad2d54907adeb3233177eeb22f271b"},
{file = "SQLAlchemy-2.0.21-cp311-cp311-win_amd64.whl", hash = "sha256:141675dae56522126986fa4ca713739d00ed3a6f08f3c2eb92c39c6dfec463ce"},
{file = "SQLAlchemy-2.0.21-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:7614f1eab4336df7dd6bee05bc974f2b02c38d3d0c78060c5faa4cd1ca2af3b8"},
{file = "SQLAlchemy-2.0.21-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d59cb9e20d79686aa473e0302e4a82882d7118744d30bb1dfb62d3c47141b3ec"},
{file = "SQLAlchemy-2.0.21-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a95aa0672e3065d43c8aa80080cdd5cc40fe92dc873749e6c1cf23914c4b83af"},
{file = "SQLAlchemy-2.0.21-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:8c323813963b2503e54d0944813cd479c10c636e3ee223bcbd7bd478bf53c178"},
{file = "SQLAlchemy-2.0.21-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:419b1276b55925b5ac9b4c7044e999f1787c69761a3c9756dec6e5c225ceca01"},
{file = "SQLAlchemy-2.0.21-cp37-cp37m-win32.whl", hash = "sha256:4615623a490e46be85fbaa6335f35cf80e61df0783240afe7d4f544778c315a9"},
{file = "SQLAlchemy-2.0.21-cp37-cp37m-win_amd64.whl", hash = "sha256:cca720d05389ab1a5877ff05af96551e58ba65e8dc65582d849ac83ddde3e231"},
{file = "SQLAlchemy-2.0.21-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:b4eae01faee9f2b17f08885e3f047153ae0416648f8e8c8bd9bc677c5ce64be9"},
{file = "SQLAlchemy-2.0.21-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:3eb7c03fe1cd3255811cd4e74db1ab8dca22074d50cd8937edf4ef62d758cdf4"},
{file = "SQLAlchemy-2.0.21-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c2d494b6a2a2d05fb99f01b84cc9af9f5f93bf3e1e5dbdafe4bed0c2823584c1"},
{file = "SQLAlchemy-2.0.21-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b19ae41ef26c01a987e49e37c77b9ad060c59f94d3b3efdfdbf4f3daaca7b5fe"},
{file = "SQLAlchemy-2.0.21-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:fc6b15465fabccc94bf7e38777d665b6a4f95efd1725049d6184b3a39fd54880"},
{file = "SQLAlchemy-2.0.21-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:014794b60d2021cc8ae0f91d4d0331fe92691ae5467a00841f7130fe877b678e"},
{file = "SQLAlchemy-2.0.21-cp38-cp38-win32.whl", hash = "sha256:0268256a34806e5d1c8f7ee93277d7ea8cc8ae391f487213139018b6805aeaf6"},
{file = "SQLAlchemy-2.0.21-cp38-cp38-win_amd64.whl", hash = "sha256:73c079e21d10ff2be54a4699f55865d4b275fd6c8bd5d90c5b1ef78ae0197301"},
{file = "SQLAlchemy-2.0.21-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:785e2f2c1cb50d0a44e2cdeea5fd36b5bf2d79c481c10f3a88a8be4cfa2c4615"},
{file = "SQLAlchemy-2.0.21-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:c111cd40910ffcb615b33605fc8f8e22146aeb7933d06569ac90f219818345ef"},
{file = "SQLAlchemy-2.0.21-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c9cba4e7369de663611ce7460a34be48e999e0bbb1feb9130070f0685e9a6b66"},
{file = "SQLAlchemy-2.0.21-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:50a69067af86ec7f11a8e50ba85544657b1477aabf64fa447fd3736b5a0a4f67"},
{file = "SQLAlchemy-2.0.21-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:ccb99c3138c9bde118b51a289d90096a3791658da9aea1754667302ed6564f6e"},
{file = "SQLAlchemy-2.0.21-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:513fd5b6513d37e985eb5b7ed89da5fd9e72354e3523980ef00d439bc549c9e9"},
{file = "SQLAlchemy-2.0.21-cp39-cp39-win32.whl", hash = "sha256:f9fefd6298433b6e9188252f3bff53b9ff0443c8fde27298b8a2b19f6617eeb9"},
{file = "SQLAlchemy-2.0.21-cp39-cp39-win_amd64.whl", hash = "sha256:2e617727fe4091cedb3e4409b39368f424934c7faa78171749f704b49b4bb4ce"},
{file = "SQLAlchemy-2.0.21-py3-none-any.whl", hash = "sha256:ea7da25ee458d8f404b93eb073116156fd7d8c2a776d8311534851f28277b4ce"},
{file = "SQLAlchemy-2.0.21.tar.gz", hash = "sha256:05b971ab1ac2994a14c56b35eaaa91f86ba080e9ad481b20d99d77f381bb6258"},
{file = "SQLAlchemy-2.0.22-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:f146c61ae128ab43ea3a0955de1af7e1633942c2b2b4985ac51cc292daf33222"},
{file = "SQLAlchemy-2.0.22-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:875de9414393e778b655a3d97d60465eb3fae7c919e88b70cc10b40b9f56042d"},
{file = "SQLAlchemy-2.0.22-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:13790cb42f917c45c9c850b39b9941539ca8ee7917dacf099cc0b569f3d40da7"},
{file = "SQLAlchemy-2.0.22-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e04ab55cf49daf1aeb8c622c54d23fa4bec91cb051a43cc24351ba97e1dd09f5"},
{file = "SQLAlchemy-2.0.22-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:a42c9fa3abcda0dcfad053e49c4f752eef71ecd8c155221e18b99d4224621176"},
{file = "SQLAlchemy-2.0.22-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:14cd3bcbb853379fef2cd01e7c64a5d6f1d005406d877ed9509afb7a05ff40a5"},
{file = "SQLAlchemy-2.0.22-cp310-cp310-win32.whl", hash = "sha256:d143c5a9dada696bcfdb96ba2de4a47d5a89168e71d05a076e88a01386872f97"},
{file = "SQLAlchemy-2.0.22-cp310-cp310-win_amd64.whl", hash = "sha256:ccd87c25e4c8559e1b918d46b4fa90b37f459c9b4566f1dfbce0eb8122571547"},
{file = "SQLAlchemy-2.0.22-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:4f6ff392b27a743c1ad346d215655503cec64405d3b694228b3454878bf21590"},
{file = "SQLAlchemy-2.0.22-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:f776c2c30f0e5f4db45c3ee11a5f2a8d9de68e81eb73ec4237de1e32e04ae81c"},
{file = "SQLAlchemy-2.0.22-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c8f1792d20d2f4e875ce7a113f43c3561ad12b34ff796b84002a256f37ce9437"},
{file = "SQLAlchemy-2.0.22-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d80eeb5189d7d4b1af519fc3f148fe7521b9dfce8f4d6a0820e8f5769b005051"},
{file = "SQLAlchemy-2.0.22-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:69fd9e41cf9368afa034e1c81f3570afb96f30fcd2eb1ef29cb4d9371c6eece2"},
{file = "SQLAlchemy-2.0.22-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:54bcceaf4eebef07dadfde424f5c26b491e4a64e61761dea9459103ecd6ccc95"},
{file = "SQLAlchemy-2.0.22-cp311-cp311-win32.whl", hash = "sha256:7ee7ccf47aa503033b6afd57efbac6b9e05180f492aeed9fcf70752556f95624"},
{file = "SQLAlchemy-2.0.22-cp311-cp311-win_amd64.whl", hash = "sha256:b560f075c151900587ade06706b0c51d04b3277c111151997ea0813455378ae0"},
{file = "SQLAlchemy-2.0.22-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:2c9bac865ee06d27a1533471405ad240a6f5d83195eca481f9fc4a71d8b87df8"},
{file = "SQLAlchemy-2.0.22-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:625b72d77ac8ac23da3b1622e2da88c4aedaee14df47c8432bf8f6495e655de2"},
{file = "SQLAlchemy-2.0.22-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b39a6e21110204a8c08d40ff56a73ba542ec60bab701c36ce721e7990df49fb9"},
{file = "SQLAlchemy-2.0.22-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:53a766cb0b468223cafdf63e2d37f14a4757476157927b09300c8c5832d88560"},
{file = "SQLAlchemy-2.0.22-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:0e1ce8ebd2e040357dde01a3fb7d30d9b5736b3e54a94002641dfd0aa12ae6ce"},
{file = "SQLAlchemy-2.0.22-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:505f503763a767556fa4deae5194b2be056b64ecca72ac65224381a0acab7ebe"},
{file = "SQLAlchemy-2.0.22-cp312-cp312-win32.whl", hash = "sha256:154a32f3c7b00de3d090bc60ec8006a78149e221f1182e3edcf0376016be9396"},
{file = "SQLAlchemy-2.0.22-cp312-cp312-win_amd64.whl", hash = "sha256:129415f89744b05741c6f0b04a84525f37fbabe5dc3774f7edf100e7458c48cd"},
{file = "SQLAlchemy-2.0.22-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:3940677d341f2b685a999bffe7078697b5848a40b5f6952794ffcf3af150c301"},
{file = "SQLAlchemy-2.0.22-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:55914d45a631b81a8a2cb1a54f03eea265cf1783241ac55396ec6d735be14883"},
{file = "SQLAlchemy-2.0.22-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2096d6b018d242a2bcc9e451618166f860bb0304f590d205173d317b69986c95"},
{file = "SQLAlchemy-2.0.22-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:19c6986cf2fb4bc8e0e846f97f4135a8e753b57d2aaaa87c50f9acbe606bd1db"},
{file = "SQLAlchemy-2.0.22-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:6ac28bd6888fe3c81fbe97584eb0b96804bd7032d6100b9701255d9441373ec1"},
{file = "SQLAlchemy-2.0.22-cp37-cp37m-win32.whl", hash = "sha256:cb9a758ad973e795267da334a92dd82bb7555cb36a0960dcabcf724d26299db8"},
{file = "SQLAlchemy-2.0.22-cp37-cp37m-win_amd64.whl", hash = "sha256:40b1206a0d923e73aa54f0a6bd61419a96b914f1cd19900b6c8226899d9742ad"},
{file = "SQLAlchemy-2.0.22-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:3aa1472bf44f61dd27987cd051f1c893b7d3b17238bff8c23fceaef4f1133868"},
{file = "SQLAlchemy-2.0.22-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:56a7e2bb639df9263bf6418231bc2a92a773f57886d371ddb7a869a24919face"},
{file = "SQLAlchemy-2.0.22-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ccca778c0737a773a1ad86b68bda52a71ad5950b25e120b6eb1330f0df54c3d0"},
{file = "SQLAlchemy-2.0.22-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7c6c3e9350f9fb16de5b5e5fbf17b578811a52d71bb784cc5ff71acb7de2a7f9"},
{file = "SQLAlchemy-2.0.22-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:564e9f9e4e6466273dbfab0e0a2e5fe819eec480c57b53a2cdee8e4fdae3ad5f"},
{file = "SQLAlchemy-2.0.22-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:af66001d7b76a3fab0d5e4c1ec9339ac45748bc4a399cbc2baa48c1980d3c1f4"},
{file = "SQLAlchemy-2.0.22-cp38-cp38-win32.whl", hash = "sha256:9e55dff5ec115316dd7a083cdc1a52de63693695aecf72bc53a8e1468ce429e5"},
{file = "SQLAlchemy-2.0.22-cp38-cp38-win_amd64.whl", hash = "sha256:4e869a8ff7ee7a833b74868a0887e8462445ec462432d8cbeff5e85f475186da"},
{file = "SQLAlchemy-2.0.22-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9886a72c8e6371280cb247c5d32c9c8fa141dc560124348762db8a8b236f8692"},
{file = "SQLAlchemy-2.0.22-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a571bc8ac092a3175a1d994794a8e7a1f2f651e7c744de24a19b4f740fe95034"},
{file = "SQLAlchemy-2.0.22-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8db5ba8b7da759b727faebc4289a9e6a51edadc7fc32207a30f7c6203a181592"},
{file = "SQLAlchemy-2.0.22-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0b0b3f2686c3f162123adba3cb8b626ed7e9b8433ab528e36ed270b4f70d1cdb"},
{file = "SQLAlchemy-2.0.22-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:0c1fea8c0abcb070ffe15311853abfda4e55bf7dc1d4889497b3403629f3bf00"},
{file = "SQLAlchemy-2.0.22-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:4bb062784f37b2d75fd9b074c8ec360ad5df71f933f927e9e95c50eb8e05323c"},
{file = "SQLAlchemy-2.0.22-cp39-cp39-win32.whl", hash = "sha256:58a3aba1bfb32ae7af68da3f277ed91d9f57620cf7ce651db96636790a78b736"},
{file = "SQLAlchemy-2.0.22-cp39-cp39-win_amd64.whl", hash = "sha256:92e512a6af769e4725fa5b25981ba790335d42c5977e94ded07db7d641490a85"},
{file = "SQLAlchemy-2.0.22-py3-none-any.whl", hash = "sha256:3076740335e4aaadd7deb3fe6dcb96b3015f1613bd190a4e1634e1b99b02ec86"},
{file = "SQLAlchemy-2.0.22.tar.gz", hash = "sha256:5434cc601aa17570d79e5377f5fd45ff92f9379e2abed0be5e8c2fba8d353d2b"},
]
[package.dependencies]
greenlet = {version = "!=0.4.17", markers = "platform_machine == \"aarch64\" or platform_machine == \"ppc64le\" or platform_machine == \"x86_64\" or platform_machine == \"amd64\" or platform_machine == \"AMD64\" or platform_machine == \"win32\" or platform_machine == \"WIN32\""}
greenlet = {version = "!=0.4.17", markers = "platform_machine == \"win32\" or platform_machine == \"WIN32\" or platform_machine == \"AMD64\" or platform_machine == \"amd64\" or platform_machine == \"x86_64\" or platform_machine == \"ppc64le\" or platform_machine == \"aarch64\""}
typing-extensions = ">=4.2.0"
[package.extras]
@@ -2272,4 +2352,4 @@ multidict = ">=4.0"
[metadata]
lock-version = "2.0"
python-versions = "^3.11"
content-hash = "5212b83adf6d4f20bc25f36c0f79039516153c470fce1975ed8a30596746a113"
content-hash = "cf7c2e88dd377d6929d87da3553dabdc48acaa30d58f7de2d8303159180b0c09"

View File

@@ -29,14 +29,19 @@ greenlet = "^2.0.2"
httpx = "^0.25.0"
langchain = "^0.0.312"
openai = "^0.28.1"
orjson = "^3.9.7"
pgvector = "^0.2.3"
multidict = "^6.0.4"
openai = "^0.28.1"
orjson = "3.9.7"
psycopg2-binary = "^2.9.9"
pydantic = {extras = ["email"], version = "^2.3.0"}
pydantic-settings = "^2.0.3"
pytest = "^7.4.2"
pytest-asyncio = "^0.21.1"
python = "^3.11"
python-jose = "^3.3.0"
python-magic = "^0.4.27"
python-multipart = "^0.0.6"
sqlalchemy = "^2.0.20"
uvicorn = "^0.23.2"
wrapt = "^1.15.0"
@@ -95,6 +100,7 @@ variable-rgx = "^_{0,2}[a-z][a-z0-9_]*$"
exclude = [
".venv",
"alembic"
".pytest_cache",
]
pythonPlatform = "All"
pythonVersion = "3.11"

View File

@@ -0,0 +1,70 @@
import asyncio
import typing
import fastapi
import httpx
import pytest_asyncio
import lib.app as lib_app
import tests.core.settings as tests_core_settings
import tests.functional.models as functional_models
@pytest_asyncio.fixture # type: ignore[reportUntypedFunctionDecorator]
async def http_client(
base_url: str = tests_core_settings.tests_settings.api.get_api_url,
) -> typing.AsyncGenerator[httpx.AsyncClient, typing.Any]:
session = httpx.AsyncClient(base_url=base_url)
yield session
await session.aclose()
@pytest_asyncio.fixture # type: ignore[reportUntypedFunctionDecorator]
async def make_request(http_client: httpx.AsyncClient):
async def inner(
api_method: str = "",
method: functional_models.MethodsEnum = functional_models.MethodsEnum.GET,
headers: dict[str, str] = tests_core_settings.tests_settings.api.headers,
body: dict[str, typing.Any] | None = None,
jwt_token: str | None = None,
) -> functional_models.HTTPResponse:
if jwt_token is not None:
headers["Authorization"] = f"Bearer {jwt_token}"
client_params = {"json": body, "headers": headers}
if method == functional_models.MethodsEnum.GET:
del client_params["json"]
response = await getattr(http_client, method.value)(api_method, **client_params)
return functional_models.HTTPResponse(
body=response.json(),
headers=response.headers,
status_code=response.status_code,
)
return inner
@pytest_asyncio.fixture(scope="session") # type: ignore[reportUntypedFunctionDecorator]
def app() -> fastapi.FastAPI:
settings = lib_app.Settings()
application = lib_app.Application.from_settings(settings)
fastapi_app = application._fastapi_app # type: ignore[reportPrivateUsage]
return fastapi_app
@pytest_asyncio.fixture # type: ignore[reportUntypedFunctionDecorator]
async def app_http_client(
app: fastapi.FastAPI,
base_url: str = tests_core_settings.tests_settings.api.get_api_url,
) -> typing.AsyncGenerator[httpx.AsyncClient, typing.Any]:
session = httpx.AsyncClient(app=app, base_url=base_url)
yield session
await session.aclose()
@pytest_asyncio.fixture(scope="session") # type: ignore[reportUntypedFunctionDecorator]
def event_loop():
loop = asyncio.get_event_loop_policy().new_event_loop()
yield loop
loop.close()

View File

@@ -0,0 +1,5 @@
from .settings import *
__all__ = [
"tests_settings",
]

View File

@@ -0,0 +1,17 @@
import pydantic
import pydantic_settings
import tests.core.split_settings as app_split_settings
class TestsSettings(pydantic_settings.BaseSettings):
api: app_split_settings.ApiSettings = pydantic.Field(default_factory=lambda: app_split_settings.ApiSettings())
postgres: app_split_settings.PostgresSettings = pydantic.Field(
default_factory=lambda: app_split_settings.PostgresSettings()
)
project: app_split_settings.ProjectSettings = pydantic.Field(
default_factory=lambda: app_split_settings.ProjectSettings()
)
tests_settings = TestsSettings()

View File

@@ -0,0 +1,9 @@
from .api import *
from .postgres import *
from .project import *
__all__ = [
"ApiSettings",
"PostgresSettings",
"ProjectSettings",
]

View File

@@ -0,0 +1,23 @@
import pydantic
import pydantic_settings
import lib.app.split_settings.utils as app_split_settings_utils
class ApiSettings(pydantic_settings.BaseSettings):
model_config = pydantic_settings.SettingsConfigDict(
env_file=app_split_settings_utils.ENV_PATH,
env_prefix="TEST_API_",
env_file_encoding="utf-8",
extra="ignore",
)
protocol: str = "http"
host: str = "0.0.0.0"
port: int = 8000
headers: dict[str, str] = {"Content-Type": "application/json"}
@pydantic.computed_field
@property
def get_api_url(self) -> str:
return f"{self.protocol}://{self.host}:{self.port}/api/v1"

View File

@@ -0,0 +1,42 @@
import pydantic
import pydantic_settings
import lib.app.split_settings.utils as app_split_settings_utils
class PostgresSettings(pydantic_settings.BaseSettings):
model_config = pydantic_settings.SettingsConfigDict(
env_file=app_split_settings_utils.ENV_PATH,
env_prefix="POSTGRES_",
env_file_encoding="utf-8",
extra="ignore",
)
name: str = "test_database_name"
host: str = "localhost"
port: int = 5432
user: str = "app"
password: pydantic.SecretStr = pydantic.Field(
default=...,
validation_alias=pydantic.AliasChoices("password", "postgres_password"),
)
@property
def db_uri_async(self) -> str:
db_uri: str = "postgresql+asyncpg://{pg_user}:{pg_pass}@{pg_host}/{pg_dbname}".format(
pg_user=self.user,
pg_pass=self.password.get_secret_value(),
pg_host=self.host,
pg_dbname=self.name,
)
return db_uri
@property
def db_uri_sync(self) -> str:
db_uri: str = "postgresql://{pg_user}:{pg_pass}@{pg_host}/{pg_dbname}".format(
pg_user=self.user,
pg_pass=self.password.get_secret_value(),
pg_host=self.host,
pg_dbname=self.name,
)
return db_uri

View File

@@ -0,0 +1,15 @@
import pydantic
import pydantic_settings
import lib.app.split_settings.utils as app_split_settings_utils
class ProjectSettings(pydantic_settings.BaseSettings):
model_config = pydantic_settings.SettingsConfigDict(
env_file=app_split_settings_utils.ENV_PATH,
env_file_encoding="utf-8",
extra="ignore",
)
debug: bool = False
jwt_secret_key: pydantic.SecretStr = pydantic.Field(default=..., validation_alias="jwt_secret_key")

View File

@@ -0,0 +1,4 @@
import pathlib
BASE_PATH = pathlib.Path(__file__).parent.parent.parent.parent.parent.resolve()
ENV_PATH = BASE_PATH / ".env"

View File

@@ -0,0 +1,7 @@
from .http import *
__all__ = [
"HTTPResponse",
"MakeResponseCallableType",
"MethodsEnum",
]

View File

@@ -0,0 +1,35 @@
import dataclasses
import enum
import typing
import multidict
import tests.core.settings as functional_settings
class MethodsEnum(enum.Enum):
GET = "get"
POST = "post"
PUT = "put"
DELETE = "delete"
PATCH = "patch"
@dataclasses.dataclass
class HTTPResponse:
body: dict[str, typing.Any] | str
headers: multidict.CIMultiDictProxy[str]
status_code: int
class MakeResponseCallableType(typing.Protocol):
async def __call__(
self,
api_method: str = "",
url: str = functional_settings.tests_settings.api.get_api_url,
method: MethodsEnum = MethodsEnum.GET,
headers: dict[str, str] = functional_settings.tests_settings.api.headers,
body: dict[str, typing.Any] | None = None,
jwt_token: str | None = None,
) -> HTTPResponse:
...

View File

@@ -0,0 +1,17 @@
# import http
# import pytest
# import tests.functional.models as tests_functional_models
# pytestmark = [pytest.mark.asyncio]
# async def test_health(
# make_request: tests_functional_models.MakeResponseCallableType,
# ):
# response = await make_request(
# method=tests_functional_models.MethodsEnum.GET,
# api_method=f"/health/",
# )
# assert response.status_code == http.HTTPStatus.OK

View File

View File

@@ -0,0 +1,3 @@
[pytest]
log_format = %(asctime)s %(levelname)s %(message)s
log_date_format = %Y-%m-%d %H:%M:%S

View File

View File

View File

@@ -0,0 +1,11 @@
import http
import httpx
import pytest
pytestmark = [pytest.mark.asyncio]
async def test_health(app_http_client: httpx.AsyncClient) -> None:
response = await app_http_client.get("/health/")
assert response.status_code == http.HTTPStatus.OK