Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

fix: adding header and query to oai - Python #104

Open
wants to merge 1 commit into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 7 additions & 0 deletions .vscode/settings.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
{
"python.testing.pytestArgs": [
"py"
],
"python.testing.unittestEnabled": false,
"python.testing.pytestEnabled": true
}
46 changes: 46 additions & 0 deletions py/autoevals/oai.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
import asyncio
import json
import os
import sys
import textwrap
Expand Down Expand Up @@ -145,6 +146,51 @@ def prepare_openai(client: Optional[LLMClient] = None, is_async=False, api_key=N
# This is the new v1 API
is_v1 = True

default_headers = {}
default_query = {}

# Get headers from environment variables
if os.environ.get("OPENAI_DEFAULT_HEADERS"):
try:
default_headers = json.loads(os.environ.get("OPENAI_DEFAULT_HEADERS"))
except json.JSONDecodeError as e:
print(f"Error parsing OPENAI_DEFAULT_HEADERS: {e}")
default_headers = {}

# Get query params from environment variables
if os.environ.get("OPENAI_DEFAULT_QUERY"):
try:
default_query = json.loads(os.environ.get("OPENAI_DEFAULT_QUERY"))
except json.JSONDecodeError as e:
print(f"Error parsing OPENAI_DEFAULT_QUERY: {e}")
default_query = {}

# Add request source tracking header
default_headers["X-Request-Source"] = "autoevals"

print(f"default_headers: {default_headers}")
print(f"default_query: {default_query}")

if is_async:
openai_obj = openai.AsyncOpenAI(
api_key=api_key,
base_url=base_url,
default_headers=default_headers,
default_query=default_query
)
else:
openai_obj = openai.OpenAI(
api_key=api_key,
base_url=base_url,
default_headers=default_headers,
default_query=default_query
)
else:
if api_key:
openai.api_key = api_key
openai.api_base = base_url
# For v0 API, headers and query params need to be set per-request

if client is None:
# prepare the default openai sdk, if not provided
if api_key is None:
Expand Down
115 changes: 115 additions & 0 deletions py/autoevals/test_oai.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,115 @@
import pytest
from unittest.mock import Mock, patch
import json
import os

from . import oai
from .oai import LLMClient, prepare_openai, post_process_response, run_cached_request, arun_cached_request

class MockOpenAIResponse:
def dict(self):
return {"response": "test"}

class MockRateLimitError(Exception):
pass

class MockCompletions:
def create(self, **kwargs):
return MockOpenAIResponse()

class MockChat:
def __init__(self):
self.completions = MockCompletions()

class MockEmbeddings:
def create(self, **kwargs):
return MockOpenAIResponse()

class MockModerations:
def create(self, **kwargs):
return MockOpenAIResponse()

class MockOpenAI:
def __init__(self, **kwargs):
self.default_headers = kwargs.get('default_headers', {})
self.default_query = kwargs.get('default_query', {})
self.chat = MockChat()
self.embeddings = MockEmbeddings()
self.moderations = MockModerations()
self.RateLimitError = MockRateLimitError

def test_openai_sync():
"""Test basic OpenAI client functionality with a simple completion request"""
mock_openai = MockOpenAI()
client = LLMClient(
openai=mock_openai,
complete=mock_openai.chat.completions.create,
embed=mock_openai.embeddings.create,
moderation=mock_openai.moderations.create,
RateLimitError=MockRateLimitError
)

response = run_cached_request(
client=client,
request_type="complete",
messages=[
{
"role": "system",
"content": "You are a helpful assistant."
},
{
"role": "user",
"content": "What is 2+2?"
}
],
model="gpt-3.5-turbo",
max_tokens=50
)

assert response == {"response": "test"}

@patch('openai.OpenAI')
@patch.dict(os.environ, {'OPENAI_API_KEY': 'test-key'})
def test_openai_headers(mock_openai):
"""Test OpenAI client with custom headers"""
mock_instance = MockOpenAI(default_headers={"X-Custom-Header": "test", "X-Request-Source": "autoevals"})
mock_openai.return_value = mock_instance
with patch.dict(os.environ, {'OPENAI_DEFAULT_HEADERS': json.dumps({"X-Custom-Header": "test"})}):
client, wrapped = prepare_openai()
assert isinstance(client, LLMClient)
assert mock_instance.default_headers["X-Custom-Header"] == "test"
assert mock_instance.default_headers["X-Request-Source"] == "autoevals"

@patch('openai.OpenAI')
@patch.dict(os.environ, {'OPENAI_API_KEY': 'test-key'})
def test_openai_query_params(mock_openai):
"""Test OpenAI client with custom query parameters"""
mock_instance = MockOpenAI(default_query={"custom_param": "test"})
mock_openai.return_value = mock_instance
with patch.dict(os.environ, {'OPENAI_DEFAULT_QUERY': json.dumps({"custom_param": "test"})}):
client, wrapped = prepare_openai()
assert isinstance(client, LLMClient)
assert mock_instance.default_query["custom_param"] == "test"

@patch('openai.OpenAI')
@patch.dict(os.environ, {'OPENAI_API_KEY': 'test-key'})
def test_invalid_header_json(mock_openai):
"""Test handling of invalid header JSON"""
mock_instance = MockOpenAI(default_headers={"X-Request-Source": "autoevals"})
mock_openai.return_value = mock_instance
with patch.dict(os.environ, {'OPENAI_DEFAULT_HEADERS': 'invalid json'}):
client, wrapped = prepare_openai()
assert isinstance(client, LLMClient)
assert mock_instance.default_headers["X-Request-Source"] == "autoevals"
assert len(mock_instance.default_headers) == 1

@patch('openai.OpenAI')
@patch.dict(os.environ, {'OPENAI_API_KEY': 'test-key'})
def test_invalid_query_json(mock_openai):
"""Test handling of invalid query JSON"""
mock_instance = MockOpenAI()
mock_openai.return_value = mock_instance
with patch.dict(os.environ, {'OPENAI_DEFAULT_QUERY': 'invalid json'}):
client, wrapped = prepare_openai()
assert isinstance(client, LLMClient)
assert len(mock_instance.default_query) == 0