Skip to content

Commit

Permalink
Merge pull request #917 from PrefectHQ/logfire
Browse files Browse the repository at this point in the history
add logfire example
  • Loading branch information
zzstoatzz authored May 1, 2024
2 parents d825790 + d847f00 commit a35a7ba
Show file tree
Hide file tree
Showing 11 changed files with 231 additions and 7 deletions.
41 changes: 41 additions & 0 deletions cookbook/logfire/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,41 @@
## using logfire with marvin

[logfire](https://github.com/pydantic/logfire?tab=readme-ov-file#pydantic-logfire--uncomplicated-observability) is brand new (Apr 2024) and is an observability tool for python applications - [otel](https://opentelemetry.io/docs/what-is-opentelemetry/)-based tracing, metrics, and logging. its pretty [awesome](https://docs.pydantic.dev/logfire/#pydantic-logfire-the-observability-platform-you-deserve).

they also happen to wrap OpenAI pretty well out of the box! see `hello.py` for a simple example.

### setup
```conosle
pip install marvin
```
> [!NOTE]
> optionally, if you want to try out the fastapi integration
> ```console
> pip install 'logfire[fastapi]' uvicorn
> ```
login to logfire
```console
logfire auth
```
### usage
use of marvin should be no different than any other library. check out [logfire's documentation](https://docs.pydantic.dev/logfire/#pydantic-logfire-the-observability-platform-you-deserve) for more information.


### examples
```console
gh repo clone prefecthq/marvin && cd marvin
uvicorn cookbook.logfire.demo_app:app
```

in another terminal
```console
python cookbook/logfire/send_demo_request.py
```

check out the api docs at http://localhost:8000/docs or your logfire dashboard to see the traces and logs like:

<p align="center">
<img src="/docs/assets/images/docs/examples/logfire-span.jpeg" alt="logfire span"/>
</p>
7 changes: 7 additions & 0 deletions cookbook/logfire/auto_trace.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
import logfire

logfire.install_auto_tracing(modules=["hello"])

from hello import main # noqa

main()
79 changes: 79 additions & 0 deletions cookbook/logfire/demo_app.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,79 @@
from enum import Enum

import logfire
import openai
from fastapi import Body, FastAPI
from marvin import fn
from marvin.client import AsyncMarvinClient
from pydantic import BaseModel

app = FastAPI()
client = openai.AsyncClient()

logfire.configure(pydantic_plugin=logfire.PydanticPlugin(record="all"))
logfire.instrument_openai(client)
logfire.instrument_fastapi(app)


class Seniority(Enum):
"""ranked seniority levels for candidates"""

JUNIOR = 1
MID = 2
SENIOR = 3
STAFF = 4


class Candidate(BaseModel):
name: str
self_identified_seniority: Seniority
bio: str


class Role(BaseModel):
title: str
desired_seniority: Seniority
description: str


@fn(client=AsyncMarvinClient(client=client))
def choose_among_candidates(cohort: list[Candidate], role: Role) -> Candidate:
return (
f"We need a {role.desired_seniority.name} (at least) {role.title} that can "
f"most likely fulfill a job of this description:\n{role.description}\n"
)


@logfire.instrument("Dystopian Interview Process", extract_args=True)
def dystopian_interview_process(candidates: list[Candidate], role: Role) -> Candidate:
senior_enough_candidates = [
candidate
for candidate in candidates
if candidate.self_identified_seniority.value >= role.desired_seniority.value
]
logfire.info(
"Candidates at or above {seniority} level: {cohort}",
cohort=[c.name for c in senior_enough_candidates],
seniority=role.desired_seniority,
)
if len(senior_enough_candidates) == 1:
return senior_enough_candidates[0]

with logfire.span("Choosing among candidates"):
return choose_among_candidates(senior_enough_candidates, role)


@app.post("/interview")
async def interview(
candidates: list[Candidate] = Body(..., description="List of candidates"),
role: Role = Body(..., description="Role to fill"),
) -> Candidate:
best_candidate = dystopian_interview_process(candidates, role)
logfire.info("Best candidate: {best_candidate}", best_candidate=best_candidate)
return best_candidate


if __name__ == "__main__":
import uvicorn

uvicorn.run(app, host="localhost", port=8000)
40 changes: 40 additions & 0 deletions cookbook/logfire/hello.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,40 @@
"""
Example of using logfire to instrument OpenAI API calls
see https://x.com/Nathan_Nowack/status/1785413529232708087
"""

import logfire
import openai
from marvin import fn
from marvin.client import AsyncMarvinClient
from pydantic import BaseModel, Field

client = openai.AsyncClient()

logfire.instrument_openai(client)


class Ingredients(BaseModel):
name: str
approximate_price: float = Field(..., gt=0, description="Price in USD")
quantity: int


class Recipe(BaseModel):
ingredients: list[Ingredients]
steps: list[str]


@fn(client=AsyncMarvinClient(client=client))
def make_recipe(vibe: str) -> Recipe:
"""Generate a recipe based on a vibe"""


def main():
recipe = make_recipe("italian, for 4 people")
assert isinstance(recipe, Recipe)


if __name__ == "__main__":
main()
47 changes: 47 additions & 0 deletions cookbook/logfire/send_demo_request.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,47 @@
import asyncio
import json

import httpx


async def main():
candidates = [
{
"name": "Alice",
"self_identified_seniority": 3,
"bio": "10 years with postgres, 5 years with python, 3 years with django.",
},
{
"name": "Bob",
"self_identified_seniority": 1,
"bio": "I just graduated from a coding bootcamp and I'm ready to take on the world!",
},
{
"name": "Charlie",
"self_identified_seniority": 2,
"bio": "graduated 2 years ago and i can make you a react app in no time",
},
{
"name": "David",
"self_identified_seniority": 3,
"bio": "i just been riding that SCRUM wave for 10 years fam",
},
]

role = {
"title": "Senior Software Engineer",
"desired_seniority": 3,
"description": "Build and maintain a large-scale web application with a team of 10+ engineers.",
}

async with httpx.AsyncClient() as client:
response = await client.post(
"http://localhost:8000/interview",
json={"candidates": candidates, "role": role},
)
result = response.json()
print(json.dumps(result, indent=2))


if __name__ == "__main__":
asyncio.run(main())
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
1 change: 1 addition & 0 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -66,6 +66,7 @@ audio = [
"simpleaudio>=1.0",
]
video = ["opencv-python >= 4.5"]

slackbot = ["marvin[prefect]", "numpy", "raggy", "turbopuffer"]

[project.urls]
Expand Down
6 changes: 5 additions & 1 deletion src/marvin/beta/assistants/assistants.py
Original file line number Diff line number Diff line change
Expand Up @@ -223,7 +223,11 @@ async def chat_async(
):
"""Async method to start a chat session with the assistant."""
history = Path(assistant_dir) / "chat_history.txt" if assistant_dir else None
session = PromptSession(history=FileHistory(str(history)) if history else None)
if not history.exists():
history.parent.mkdir(parents=True, exist_ok=True)
session = PromptSession(
history=FileHistory(str(history.absolute().resolve())) if history else None
)
# send an initial message, if provided
if initial_message is not None:
await self.say_async(initial_message, **kwargs)
Expand Down
8 changes: 5 additions & 3 deletions src/marvin/settings.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,9 +36,7 @@ class ChatCompletionSettings(MarvinSettings):
model_config = SettingsConfigDict(
env_prefix="marvin_chat_completions_", extra="ignore"
)
model: str = Field(
description="The default chat model to use.", default="gpt-4-1106-preview"
)
model: str = Field(description="The default chat model to use.", default="gpt-4")

temperature: float = Field(description="The default temperature to use.", default=1)

Expand Down Expand Up @@ -292,6 +290,10 @@ class Settings(MarvinSettings):
"Whether to log verbose messages, such as full API requests and responses."
),
)
max_tool_output_length: int = Field(
150,
description="The maximum length of output from a tool before it is truncated.",
)

@field_validator("log_level", mode="after")
@classmethod
Expand Down
7 changes: 5 additions & 2 deletions src/marvin/utilities/tools.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@
from pydantic.fields import FieldInfo
from pydantic.json_schema import GenerateJsonSchema, JsonSchemaMode

import marvin
from marvin.types import Function, FunctionTool
from marvin.utilities.asyncio import run_sync
from marvin.utilities.logging import get_logger
Expand Down Expand Up @@ -168,14 +169,16 @@ def call_function_tool(

arguments = json.loads(function_arguments_json)
logger.debug_kv(
f"{tool.function.name}", f"called with arguments: {arguments}", "green"
f"{tool.function.name}",
f"called with arguments: {json.dumps(arguments, indent=2)}",
"green",
)
output = tool.function._python_fn(**arguments)
if inspect.isawaitable(output):
output = run_sync(output)
if isinstance(output, BaseModel):
output = output.model_dump(mode="json")
truncated_output = str(output)[:100]
truncated_output = str(output)[: marvin.settings.max_tool_output_length]
if len(truncated_output) < len(str(output)):
truncated_output += "..."
logger.debug_kv(f"{tool.function.name}", f"returned: {truncated_output}", "green")
Expand Down
2 changes: 1 addition & 1 deletion tests/ai/test_classifier.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ def test_is_enum():
class TestClassifier:
class TestSimple:
def test_color_red(self):
result = Color("rose")
result = Color("burgundy")
assert result == Color.RED

def test_color_green(self):
Expand Down

0 comments on commit a35a7ba

Please sign in to comment.