Skip to content

Commit

Permalink
Additional logging
Browse files Browse the repository at this point in the history
  • Loading branch information
JMaynor authored Sep 12, 2024
1 parent 644b2e4 commit e7ef46e
Show file tree
Hide file tree
Showing 2 changed files with 19 additions and 13 deletions.
1 change: 1 addition & 0 deletions src/dolores.py
Original file line number Diff line number Diff line change
Expand Up @@ -58,6 +58,7 @@ async def handle_mention(message):
clean_message = message.clean_content.replace("@Dolores", "Dolores")
clean_message = clean_message.replace("@everyone", "everyone")
clean_message = clean_message.replace("@Testie", "Testie")
logger.info(f"Generating reply to following message: {clean_message}")
reply = text_instance.generate_reply(clean_message)
else:
reply = "Hi"
Expand Down
31 changes: 18 additions & 13 deletions src/modules/generation.py
Original file line number Diff line number Diff line change
Expand Up @@ -48,19 +48,24 @@ def generate_reply(self, message):
# Add the user's message to the message history
message_history.append({"role": "user", "content": message})

# Generate a reply using the OpenAI API
response = openai.chat.completions.create(
model=os.environ["OPENAI_MODEL"],
messages=system_messages + list(message_history),
max_tokens=int(os.environ.get("MAX_TOKENS", 150)),
temperature=float(os.environ.get("TEMPERATURE", 0.9)),
top_p=float(os.environ.get("TOP_P", 1.0)),
frequency_penalty=float(os.environ.get("FREQUENCY_PENALTY", 0.0)),
presence_penalty=float(os.environ.get("PRESENCE_PENALTY", 0.6)),
)
reply = response.choices[0].message.content
# Add the reply to the message history
message_history.append({"role": "assistant", "content": reply})
try:
# Generate a reply using the OpenAI API
response = openai.chat.completions.create(
model=os.environ["OPENAI_MODEL"],
messages=system_messages + list(message_history),
max_tokens=int(os.environ.get("MAX_TOKENS", 150)),
temperature=float(os.environ.get("TEMPERATURE", 0.9)),
top_p=float(os.environ.get("TOP_P", 1.0)),
frequency_penalty=float(os.environ.get("FREQUENCY_PENALTY", 0.0)),
presence_penalty=float(os.environ.get("PRESENCE_PENALTY", 0.6)),
)
reply = response.choices[0].message.content
logger.info(f"Reply generated: {reply}")
# Add the reply to the message history
message_history.append({"role": "assistant", "content": reply})
except Exception as e:
logger.error(f"Error generating reply: {e}")
logger.error(f"Messages: {system_messages + list(message_history)}")

# Use a self-hosted LLM to generate a reply
elif reply_method == "self":
Expand Down

0 comments on commit e7ef46e

Please sign in to comment.