From 38a8f8f51894436f2dce1f0dc051a0cc0e632078 Mon Sep 17 00:00:00 2001 From: NotBioWaste905 Date: Tue, 30 Jul 2024 18:49:28 +0300 Subject: [PATCH] Figured out how to implement DeepEval functions --- chatsky/llm/conditions.py | 14 ++++++++++++++ chatsky/llm/{llm_response.py => wrapper.py} | 19 +++++++------------ 2 files changed, 21 insertions(+), 12 deletions(-) create mode 100644 chatsky/llm/conditions.py rename chatsky/llm/{llm_response.py => wrapper.py} (89%) diff --git a/chatsky/llm/conditions.py b/chatsky/llm/conditions.py new file mode 100644 index 000000000..c393d7a6c --- /dev/null +++ b/chatsky/llm/conditions.py @@ -0,0 +1,14 @@ +""" +LLM conditions. +--------- +In this file stored unified functions for some basic condition cases +including regex search, semantic distance (cosine) etc. +""" + +from chatsky.script.core.message import Message + +def regex_search(pattern: str) -> bool: + pass + +def semantic_distance(target: str | Message, threshold: float) -> bool: + pass \ No newline at end of file diff --git a/chatsky/llm/llm_response.py b/chatsky/llm/wrapper.py similarity index 89% rename from chatsky/llm/llm_response.py rename to chatsky/llm/wrapper.py index 6aa2f8127..8e1c12a68 100644 --- a/chatsky/llm/llm_response.py +++ b/chatsky/llm/wrapper.py @@ -11,13 +11,12 @@ from langchain_cohere import ChatCohere from langchain_mistralai import ChatMistralAI from langchain.output_parsers import ResponseSchema, StructuredOutputParser + from langchain_core.messages import HumanMessage, SystemMessage + from langchain_core.output_parsers import StrOutputParser langchain_available = True except ImportError: langchain_available = False -from langchain_core.messages import HumanMessage, SystemMessage -from langchain_core.output_parsers import StrOutputParser - import base64 import httpx import re @@ -78,15 +77,11 @@ def condition(self, prompt, request): return result # Helper functions for DeepEval custom LLM usage - def generate(self, prompt: str, schema: BaseModel): - # TODO: Remake this - schema_parser = StructuredOutputParser.from_response_schemas([ResponseSchema(base_model=schema)]) - chain = prompt | self.model | schema_parser - return chain.invoke({"input": prompt}) + def generate(self, prompt: str): + return self.model.invoke(prompt).content - async def a_generate(self, prompt: str, schema: BaseModel): - # TODO: Remake this - return self.generate(HumanMessage(prompt), schema) + async def a_generate(self, prompt: str): + return self.generate(prompt) def load_model(self): return self.model @@ -174,4 +169,4 @@ def message_to_langchain(message: Message): content.append( {"type": "image_url", "image_url": {"url": __attachment_to_content(image)}} ) - return HumanMessage(content=content) \ No newline at end of file + return HumanMessage(content=content)