1
0
mirror of https://github.com/ijaric/voice_assistant.git synced 2025-05-24 14:33:26 +00:00

fix: start error & linting

This commit is contained in:
Artem Litvinov 2023-10-15 08:28:09 +01:00
parent 288af57ab5
commit 2344b576d8
10 changed files with 61 additions and 151 deletions

View File

@ -1,5 +1,5 @@
from .services import AgentService # from .services import AgentService
__all__ = [ # __all__ = [
"AgentService", # "AgentService",
] # ]

View File

@ -1,7 +1,6 @@
import logging import logging
import typing import typing
import langchain.chat_models
import openai import openai
import openai.error import openai.error
@ -40,25 +39,3 @@ class EmbeddingRepository:
except openai.error.OpenAIError: except openai.error.OpenAIError:
self.logger.exception("Failed to get async embedding for: %s", text) self.logger.exception("Failed to get async embedding for: %s", text)
class LlmRepository:
"""A service for getting embeddings from OpenAI."""
def __init__(self, settings: app_settings.Settings) -> None:
"""Initialize the service with an OpenAI API key."""
self.llm = langchain.chat_models.ChatOpenAI(
temperature=0.7,
openai_api_key=self.settings.openai.api_key.get_secret_value()
)
async def get_chat_response(self, request: str, prompt: str) -> str:
"""Get the embedding for a given text."""
prompt = langchain.prompts.ChatPromptTemplate.from_messages(
[
("system", prompt),
]
)
chain = prompt | self.llm
response = await chain.ainvoke({"input": request})
return response.content

View File

@ -5,38 +5,33 @@ import langchain.agents
import langchain.agents.format_scratchpad import langchain.agents.format_scratchpad
import langchain.agents.output_parsers import langchain.agents.output_parsers
import langchain.chat_models import langchain.chat_models
import langchain.memory
import langchain.memory.chat_memory
import langchain.prompts import langchain.prompts
import langchain.schema import langchain.schema
import langchain.tools.render import langchain.tools.render
import langchain.memory
import langchain.memory.chat_memory
import lib.models as models
import lib.agent.repositories as lib_agent_repositories import lib.agent.repositories as lib_agent_repositories
import lib.agent.repositories.chat_repository as chat_repositories
import lib.app.settings as app_settings import lib.app.settings as app_settings
import lib.agent.repositories.chat_repository as _chat_repository import lib.models as models
class AgentService: class AgentService:
def __init__( def __init__(
self, self,
settings: app_settings.Settings, settings: app_settings.Settings,
chat_repository: _chat_repository.ChatHistoryRepository, chat_repository: chat_repositories.ChatHistoryRepository,
tools: lib_agent_repositories.OpenAIFunctions | None = None, tools: lib_agent_repositories.OpenAIFunctions,
) -> None: ) -> None:
self.settings = settings self.settings = settings
self.tools = tools self.tools = tools
self.chat_repository = chat_repository self.chat_repository = chat_repository
self.logger = logging.getLogger(__name__) self.logger = logging.getLogger(__name__)
async def process_request(self, request: models.AgentCreateRequestModel) -> models.AgentCreateResponseModel: async def process_request(self, request: models.AgentCreateRequestModel) -> models.AgentCreateResponseModel:
# Get session ID # Get session ID
session_request = models.RequestLastSessionId( session_request = models.RequestLastSessionId(channel=request.channel, user_id=request.user_id, minutes_ago=3)
channel=request.channel,
user_id=request.user_id,
minutes_ago=3
)
session_id = await self.chat_repository.get_last_session_id(session_request) session_id = await self.chat_repository.get_last_session_id(session_request)
if not session_id: if not session_id:
session_id = uuid.uuid4() session_id = uuid.uuid4()
@ -47,24 +42,27 @@ class AgentService:
name="GetMovieByDescription", name="GetMovieByDescription",
func=self.tools.get_movie_by_description, func=self.tools.get_movie_by_description,
coroutine=self.tools.get_movie_by_description, coroutine=self.tools.get_movie_by_description,
description="Get a movie by description" description="Get a movie by description",
), ),
] ]
llm = langchain.chat_models.ChatOpenAI(temperature=self.settings.openai.agent_temperature, openai_api_key=self.settings.openai.api_key.get_secret_value()) llm = langchain.chat_models.ChatOpenAI(
temperature=self.settings.openai.agent_temperature,
openai_api_key=self.settings.openai.api_key.get_secret_value(),
)
chat_history = [] chat_history = []
chat_history_name = f"{chat_history=}".partition("=")[0] chat_history_name = f"{chat_history=}".partition("=")[0]
request_chat_history = models.RequestChatHistory(session_id=session_id) request_chat_history = models.RequestChatHistory(session_id=session_id)
chat_history_source = await self.chat_repository.get_messages_by_sid(request_chat_history) chat_history_source = await self.chat_repository.get_messages_by_sid(request_chat_history)
if not chat_history_source:
for entry in chat_history_source: for entry in chat_history_source:
if entry.role == "user": if entry.role == "user":
chat_history.append(langchain.schema.HumanMessage(content=entry.content)) chat_history.append(langchain.schema.HumanMessage(content=entry.content))
elif entry.role == "agent": elif entry.role == "agent":
chat_history.append(langchain.schema.AIMessage(content=entry.content)) chat_history.append(langchain.schema.AIMessage(content=entry.content))
prompt = langchain.prompts.ChatPromptTemplate.from_messages( prompt = langchain.prompts.ChatPromptTemplate.from_messages(
[ [
( (
@ -94,7 +92,6 @@ class AgentService:
| langchain.agents.output_parsers.OpenAIFunctionsAgentOutputParser() | langchain.agents.output_parsers.OpenAIFunctionsAgentOutputParser()
) )
agent_executor = langchain.agents.AgentExecutor(agent=agent, tools=tools, verbose=False) agent_executor = langchain.agents.AgentExecutor(agent=agent, tools=tools, verbose=False)
chat_history = [] # temporary disable chat_history chat_history = [] # temporary disable chat_history
response = await agent_executor.ainvoke({"input": request.text, "chat_history": chat_history}) response = await agent_executor.ainvoke({"input": request.text, "chat_history": chat_history})
@ -103,16 +100,18 @@ class AgentService:
session_id=session_id, session_id=session_id,
user_id=request.user_id, user_id=request.user_id,
channel=request.channel, channel=request.channel,
message={"role": "user", "content": request.text} message={"role": "user", "content": request.text},
) )
ai_response = models.RequestChatMessage( ai_response = models.RequestChatMessage(
session_id=session_id, session_id=session_id,
user_id=request.user_id, user_id=request.user_id,
channel=request.channel, channel=request.channel,
message={"role": "assistant", "content": response["output"]} message={"role": "assistant", "content": response["output"]},
) )
await self.chat_repository.add_message(user_request) await self.chat_repository.add_message(user_request)
await self.chat_repository.add_message(ai_response) await self.chat_repository.add_message(ai_response)
return response print("RES:", response)
return models.AgentCreateResponseModel(text="response")

View File

@ -1,10 +1,7 @@
from .agent import AgentHandler
from .health import basic_router from .health import basic_router
from .voice_responce_handler import VoiceResponseHandler from .voice_responce_handler import VoiceResponseHandler
__all__ = [ __all__ = [
"AgentHandler",
"VoiceResponseHandler", "VoiceResponseHandler",
"basic_router", "basic_router",
] ]

View File

@ -1,59 +0,0 @@
import uuid
import fastapi
import lib.agent.repositories as agent_repositories
import lib.models as models
class AgentHandler:
def __init__(self, chat_history_repository: agent_repositories.ChatHistoryRepository):
self.chat_history_repository = chat_history_repository
self.router = fastapi.APIRouter()
self.router.add_api_route(
"/",
self.get_agent,
methods=["GET"],
summary="Статус работоспособности",
description="Проверяет доступность сервиса FastAPI.",
)
self.router.add_api_route(
"/add",
self.add_message,
methods=["GET"],
summary="Статус работоспособности",
description="Проверяет доступность сервиса FastAPI.",
)
self.router.add_api_route(
"/messages",
self.get_messages,
methods=["GET"],
summary="Статус работоспособности",
description="Проверяет доступность сервиса FastAPI.",
)
async def get_agent(self):
request = models.RequestLastSessionId(channel="test", user_id="user_id_1", minutes_ago=3)
response = await self.chat_history_repository.get_last_session_id(request=request)
print("RESPONSE: ", response)
return {"response": response}
async def add_message(self):
sid: uuid.UUID = uuid.UUID("0cd3c882-affd-4929-aff1-e1724f5b54f2")
import faker
fake = faker.Faker()
message = models.RequestChatMessage(
session_id=sid, user_id="user_id_1", channel="test", message={"role": "system", "content": fake.sentence()}
)
await self.chat_history_repository.add_message(request=message)
return {"response": "ok"}
async def get_messages(self):
sid: uuid.UUID = uuid.UUID("0cd3c882-affd-4929-aff1-e1724f5b54f2")
request = models.RequestChatHistory(session_id=sid)
response = await self.chat_history_repository.get_messages_by_sid(request=request)
print("RESPONSE: ", response)
return {"response": response}

View File

@ -3,10 +3,10 @@ import io
import fastapi import fastapi
import lib.stt.services as stt_services
import lib.agent.services as agent_service import lib.agent.services as agent_service
import lib.tts.services as tts_service
import lib.models as models import lib.models as models
import lib.stt.services as stt_services
import lib.tts.services as tts_service
class VoiceResponseHandler: class VoiceResponseHandler:
@ -14,11 +14,11 @@ class VoiceResponseHandler:
self, self,
stt: stt_services.SpeechService, stt: stt_services.SpeechService,
tts: tts_service.TTSService, tts: tts_service.TTSService,
# agent: agent_service.AgentService, agent: agent_service.AgentService,
): ):
self.stt = stt self.stt = stt
self.tts = tts self.tts = tts
# self.agent = agent self.agent = agent
self.router = fastapi.APIRouter() self.router = fastapi.APIRouter()
self.router.add_api_route( self.router.add_api_route(
"/", "/",
@ -38,12 +38,12 @@ class VoiceResponseHandler:
if voice_text == "": if voice_text == "":
raise fastapi.HTTPException(status_code=http.HTTPStatus.BAD_REQUEST, detail="Speech recognition failed") raise fastapi.HTTPException(status_code=http.HTTPStatus.BAD_REQUEST, detail="Speech recognition failed")
# agent_request = models.AgentCreateRequestModel(channel=channel, user_id=user_id, text=voice_text) agent_request = models.AgentCreateRequestModel(channel=channel, user_id=user_id, text=voice_text)
# reply_text = await self.agent.process_request(agent_request) reply_text = await self.agent.process_request(agent_request)
reply_text = "hi there"
response = await self.tts.get_audio_as_bytes( response = await self.tts.get_audio_as_bytes(
models.TTSCreateRequestModel( models.TTSCreateRequestModel(
text=reply_text, text=reply_text.text,
) )
) )
return fastapi.responses.StreamingResponse(io.BytesIO(response.audio_content), media_type="audio/ogg") return fastapi.responses.StreamingResponse(io.BytesIO(response.audio_content), media_type="audio/ogg")

View File

@ -6,6 +6,9 @@ import typing
import fastapi import fastapi
import uvicorn import uvicorn
import lib.agent.repositories as agent_repositories
import lib.agent.repositories.openai_functions as agent_functions
import lib.agent.services as agent_services
import lib.api.v1.handlers as api_v1_handlers import lib.api.v1.handlers as api_v1_handlers
import lib.app.errors as app_errors import lib.app.errors as app_errors
import lib.app.settings as app_settings import lib.app.settings as app_settings
@ -14,9 +17,6 @@ import lib.clients as clients
import lib.models as models import lib.models as models
import lib.stt as stt import lib.stt as stt
import lib.tts as tts import lib.tts as tts
import lib.agent.repositories as agent_repositories
import lib.agent.repositories.openai_functions as agent_functions
import lib.agent.services as agent_services
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@ -92,9 +92,14 @@ class Application:
logger.info("Initializing repositories") logger.info("Initializing repositories")
stt_repository: stt.STTProtocol = stt.OpenaiSpeechRepository(settings=settings) stt_repository: stt.STTProtocol = stt.OpenaiSpeechRepository(settings=settings)
chat_history_repository = agent_repositories.ChatHistoryRepository(pg_async_session=postgres_client.get_async_session()) chat_history_repository = agent_repositories.ChatHistoryRepository(
embedding_repository = agent_repositories.EmbeddingRepository(settings) pg_async_session=postgres_client.get_async_session()
)
embedding_repository = agent_repositories.EmbeddingRepository(settings=settings)
agent_tools = agent_functions.OpenAIFunctions(
repository=embedding_repository, pg_async_session=postgres_client.get_async_session()
)
agent_tools = None
tts_yandex_repository = tts.TTSYandexRepository( tts_yandex_repository = tts.TTSYandexRepository(
tts_settings=app_split_settings.TTSYandexSettings(), tts_settings=app_split_settings.TTSYandexSettings(),
client=http_yandex_tts_client, client=http_yandex_tts_client,
@ -105,43 +110,40 @@ class Application:
is_models_from_api=True, is_models_from_api=True,
) )
# Caches # Caches
logger.info("Initializing caches") logger.info("Initializing caches")
# Tools # Tools
agent_tools = agent_functions.OpenAIFunctions(repository=embedding_repository, pg_async_session=postgres_client.get_async_session())
# Services # Services
logger.info("Initializing services") logger.info("Initializing services")
stt_service: stt.SpeechService = stt.SpeechService(repository=stt_repository)
stt_service: stt.SpeechService = stt.SpeechService(repository=stt_repository) # type: ignore tts_service: tts.TTSService = tts.TTSService(
tts_service: tts.TTSService = tts.TTSService( # type: ignore
repositories={ repositories={
models.VoiceModelProvidersEnum.YANDEX: tts_yandex_repository, models.VoiceModelProvidersEnum.YANDEX: tts_yandex_repository,
models.VoiceModelProvidersEnum.ELEVEN_LABS: tts_eleven_labs_repository, models.VoiceModelProvidersEnum.ELEVEN_LABS: tts_eleven_labs_repository,
}, },
) )
agent_service: agent_services.AgentService(settings=settings, chat_repository=chat_history_repository) agent_service = agent_services.AgentService(
# agent_service: agent_services.AgentService(settings=settings, chat_repository=chat_history_repository, tools=agent_tools) settings=settings, chat_repository=chat_history_repository, tools=agent_tools
)
# Handlers # Handlers
logger.info("Initializing handlers") logger.info("Initializing handlers")
liveness_probe_handler = api_v1_handlers.basic_router liveness_probe_handler = api_v1_handlers.basic_router
agent_handler = api_v1_handlers.AgentHandler(chat_history_repository=chat_history_repository).router
# TODO: объявить сервисы tts и openai и добавить их в voice_response_handler # TODO: объявить сервисы tts и openai и добавить их в voice_response_handler
voice_response_handler = api_v1_handlers.VoiceResponseHandler( voice_response_handler = api_v1_handlers.VoiceResponseHandler(
stt=stt_service, stt=stt_service,
tts=tts_service, tts=tts_service,
agent=agent_services, agent=agent_service,
).router ).router
logger.info("Creating application") logger.info("Creating application")
@ -156,7 +158,6 @@ class Application:
# Routes # Routes
fastapi_app.include_router(liveness_probe_handler, prefix="/api/v1/health", tags=["health"]) fastapi_app.include_router(liveness_probe_handler, prefix="/api/v1/health", tags=["health"])
fastapi_app.include_router(agent_handler, prefix="/api/v1/agent", tags=["testing"])
fastapi_app.include_router(voice_response_handler, prefix="/api/v1/voice", tags=["voice"]) fastapi_app.include_router(voice_response_handler, prefix="/api/v1/voice", tags=["voice"])
application = Application( application = Application(

View File

@ -1,21 +1,19 @@
from .agent import *
from .chat_history import Message, RequestChatHistory, RequestChatMessage, RequestLastSessionId from .chat_history import Message, RequestChatHistory, RequestChatMessage, RequestLastSessionId
from .embedding import Embedding from .embedding import Embedding
from .movies import Movie from .movies import Movie
from .token import Token from .token import Token
from .tts import * from .tts import *
from .agent import *
# __all__ = ["Embedding", "Message", "Movie", "RequestChatHistory", "RequestChatMessage", "RequestLastSessionId", "Token"] # __all__ = ["Embedding", "Message", "Movie", "RequestChatHistory", "RequestChatMessage", "RequestLastSessionId", "Token"]
__all__ = [ __all__ = [
"AVAILABLE_MODELS_TYPE", "AVAILABLE_MODELS_TYPE",
# "Base", "AgentCreateRequestModel",
"BaseLanguageCodesEnum", "BaseLanguageCodesEnum",
"BaseVoiceModel", "BaseVoiceModel",
"ElevenLabsLanguageCodesEnum", "ElevenLabsLanguageCodesEnum",
"ElevenLabsListVoiceModelsModel", "ElevenLabsListVoiceModelsModel",
"ElevenLabsVoiceModel", "ElevenLabsVoiceModel",
# "IdCreatedUpdatedBaseMixin",
"LANGUAGE_CODES_ENUM_TYPE", "LANGUAGE_CODES_ENUM_TYPE",
"LIST_VOICE_MODELS_TYPE", "LIST_VOICE_MODELS_TYPE",
"TTSCreateRequestModel", "TTSCreateRequestModel",
@ -26,5 +24,4 @@ __all__ = [
"YandexLanguageCodesEnum", "YandexLanguageCodesEnum",
"YandexListVoiceModelsModel", "YandexListVoiceModelsModel",
"YandexVoiceModel", "YandexVoiceModel",
"AgentCreateRequestModel",
] ]

View File

@ -1,5 +1,3 @@
import uuid
import pydantic import pydantic

View File

@ -25,21 +25,21 @@ asyncpg = "^0.28.0"
fastapi = "0.103.1" fastapi = "0.103.1"
greenlet = "^2.0.2" greenlet = "^2.0.2"
httpx = "^0.25.0" httpx = "^0.25.0"
langchain = "^0.0.314"
multidict = "^6.0.4" multidict = "^6.0.4"
openai = "^0.28.1"
orjson = "^3.9.7" orjson = "^3.9.7"
pgvector = "^0.2.3"
psycopg2-binary = "^2.9.9" psycopg2-binary = "^2.9.9"
pydantic = {extras = ["email"], version = "^2.3.0"} pydantic = {extras = ["email"], version = "^2.3.0"}
pydantic-settings = "^2.0.3" pydantic-settings = "^2.0.3"
pytest-asyncio = "^0.21.1" pytest-asyncio = "^0.21.1"
python = "^3.11" python = "^3.11"
python-jose = "^3.3.0" python-jose = "^3.3.0"
python-magic = "^0.4.27"
python-multipart = "^0.0.6"
sqlalchemy = "^2.0.20" sqlalchemy = "^2.0.20"
uvicorn = "^0.23.2" uvicorn = "^0.23.2"
pgvector = "^0.2.3"
python-magic = "^0.4.27"
openai = "^0.28.1"
python-multipart = "^0.0.6"
langchain = "^0.0.314"
[tool.poetry.dev-dependencies] [tool.poetry.dev-dependencies]
black = "^23.7.0" black = "^23.7.0"