From e7ef46e48817f45b61727859c380a20aad00489c Mon Sep 17 00:00:00 2001 From: Jordan Maynor Date: Wed, 11 Sep 2024 21:54:23 -0500 Subject: [PATCH] Additional logging --- src/dolores.py | 1 + src/modules/generation.py | 31 ++++++++++++++++++------------- 2 files changed, 19 insertions(+), 13 deletions(-) diff --git a/src/dolores.py b/src/dolores.py index 1e4eacb..d8aa1fb 100644 --- a/src/dolores.py +++ b/src/dolores.py @@ -58,6 +58,7 @@ async def handle_mention(message): clean_message = message.clean_content.replace("@Dolores", "Dolores") clean_message = clean_message.replace("@everyone", "everyone") clean_message = clean_message.replace("@Testie", "Testie") + logger.info(f"Generating reply to following message: {clean_message}") reply = text_instance.generate_reply(clean_message) else: reply = "Hi" diff --git a/src/modules/generation.py b/src/modules/generation.py index 011ccb5..e933f89 100644 --- a/src/modules/generation.py +++ b/src/modules/generation.py @@ -48,19 +48,24 @@ def generate_reply(self, message): # Add the user's message to the message history message_history.append({"role": "user", "content": message}) - # Generate a reply using the OpenAI API - response = openai.chat.completions.create( - model=os.environ["OPENAI_MODEL"], - messages=system_messages + list(message_history), - max_tokens=int(os.environ.get("MAX_TOKENS", 150)), - temperature=float(os.environ.get("TEMPERATURE", 0.9)), - top_p=float(os.environ.get("TOP_P", 1.0)), - frequency_penalty=float(os.environ.get("FREQUENCY_PENALTY", 0.0)), - presence_penalty=float(os.environ.get("PRESENCE_PENALTY", 0.6)), - ) - reply = response.choices[0].message.content - # Add the reply to the message history - message_history.append({"role": "assistant", "content": reply}) + try: + # Generate a reply using the OpenAI API + response = openai.chat.completions.create( + model=os.environ["OPENAI_MODEL"], + messages=system_messages + list(message_history), + max_tokens=int(os.environ.get("MAX_TOKENS", 150)), + temperature=float(os.environ.get("TEMPERATURE", 0.9)), + top_p=float(os.environ.get("TOP_P", 1.0)), + frequency_penalty=float(os.environ.get("FREQUENCY_PENALTY", 0.0)), + presence_penalty=float(os.environ.get("PRESENCE_PENALTY", 0.6)), + ) + reply = response.choices[0].message.content + logger.info(f"Reply generated: {reply}") + # Add the reply to the message history + message_history.append({"role": "assistant", "content": reply}) + except Exception as e: + logger.error(f"Error generating reply: {e}") + logger.error(f"Messages: {system_messages + list(message_history)}") # Use a self-hosted LLM to generate a reply elif reply_method == "self":