diff --git a/opentrons-ai-client/src/resources/utils/createProtocolUtils.tsx b/opentrons-ai-client/src/resources/utils/createProtocolUtils.tsx
index 06dd83061cb..b1510aaf89c 100644
--- a/opentrons-ai-client/src/resources/utils/createProtocolUtils.tsx
+++ b/opentrons-ai-client/src/resources/utils/createProtocolUtils.tsx
@@ -182,7 +182,8 @@ export function generateChatPrompt(
.join('\n')
: `- ${t(values.instruments.pipettes)}`
const flexGripper =
- values.instruments.flexGripper === FLEX_GRIPPER
+ values.instruments.flexGripper === FLEX_GRIPPER &&
+ values.instruments.robot === OPENTRONS_FLEX
? `\n- ${t('with_flex_gripper')}`
: ''
const modules = values.modules
diff --git a/opentrons-ai-server/api/domain/anthropic_predict.py b/opentrons-ai-server/api/domain/anthropic_predict.py
index abd94b631ba..4ab71c99488 100644
--- a/opentrons-ai-server/api/domain/anthropic_predict.py
+++ b/opentrons-ai-server/api/domain/anthropic_predict.py
@@ -1,6 +1,6 @@
import uuid
from pathlib import Path
-from typing import Any, Dict, List
+from typing import Any, Dict, List, Literal
import requests
import structlog
@@ -23,7 +23,7 @@ def __init__(self, settings: Settings) -> None:
self.model_name: str = settings.anthropic_model_name
self.system_prompt: str = SYSTEM_PROMPT
self.path_docs: Path = ROOT_PATH / "api" / "storage" / "docs"
- self._messages: List[MessageParam] = [
+ self.cached_docs: List[MessageParam] = [
{
"role": "user",
"content": [
@@ -77,19 +77,26 @@ def get_docs(self) -> str:
return "\n".join(xml_output)
@tracer.wrap()
- def generate_message(self, max_tokens: int = 4096) -> Message:
+ def _process_message(
+ self, user_id: str, messages: List[MessageParam], message_type: Literal["create", "update"], max_tokens: int = 4096
+ ) -> Message:
+ """
+ Internal method to handle message processing with different system prompts.
+ For now, system prompt is the same.
+ """
- response = self.client.messages.create(
+ response: Message = self.client.messages.create(
model=self.model_name,
system=self.system_prompt,
max_tokens=max_tokens,
- messages=self._messages,
+ messages=messages,
tools=self.tools, # type: ignore
extra_headers={"anthropic-beta": "prompt-caching-2024-07-31"},
+ metadata={"user_id": user_id},
)
logger.info(
- "Token usage",
+ f"Token usage: {message_type.capitalize()}",
extra={
"input_tokens": response.usage.input_tokens,
"output_tokens": response.usage.output_tokens,
@@ -100,15 +107,23 @@ def generate_message(self, max_tokens: int = 4096) -> Message:
return response
@tracer.wrap()
- def predict(self, prompt: str) -> str | None:
+ def process_message(
+ self, user_id: str, prompt: str, history: List[MessageParam] | None = None, message_type: Literal["create", "update"] = "create"
+ ) -> str | None:
+ """Unified method for creating and updating messages"""
try:
- self._messages.append({"role": "user", "content": PROMPT.format(USER_PROMPT=prompt)})
- response = self.generate_message()
+ messages: List[MessageParam] = self.cached_docs.copy()
+ if history:
+ messages += history
+
+ messages.append({"role": "user", "content": PROMPT.format(USER_PROMPT=prompt)})
+ response = self._process_message(user_id=user_id, messages=messages, message_type=message_type)
+
if response.content[-1].type == "tool_use":
tool_use = response.content[-1]
- self._messages.append({"role": "assistant", "content": response.content})
+ messages.append({"role": "assistant", "content": response.content})
result = self.handle_tool_use(tool_use.name, tool_use.input) # type: ignore
- self._messages.append(
+ messages.append(
{
"role": "user",
"content": [
@@ -120,25 +135,26 @@ def predict(self, prompt: str) -> str | None:
],
}
)
- follow_up = self.generate_message()
- response_text = follow_up.content[0].text # type: ignore
- self._messages.append({"role": "assistant", "content": response_text})
- return response_text
+ follow_up = self._process_message(user_id=user_id, messages=messages, message_type=message_type)
+ return follow_up.content[0].text # type: ignore
elif response.content[0].type == "text":
- response_text = response.content[0].text
- self._messages.append({"role": "assistant", "content": response_text})
- return response_text
+ return response.content[0].text
logger.error("Unexpected response type")
return None
- except IndexError as e:
- logger.error("Invalid response format", extra={"error": str(e)})
- return None
except Exception as e:
- logger.error("Error in predict method", extra={"error": str(e)})
+ logger.error(f"Error in {message_type} method", extra={"error": str(e)})
return None
+ @tracer.wrap()
+ def create(self, user_id: str, prompt: str, history: List[MessageParam] | None = None) -> str | None:
+ return self.process_message(user_id, prompt, history, "create")
+
+ @tracer.wrap()
+ def update(self, user_id: str, prompt: str, history: List[MessageParam] | None = None) -> str | None:
+ return self.process_message(user_id, prompt, history, "update")
+
@tracer.wrap()
def handle_tool_use(self, func_name: str, func_params: Dict[str, Any]) -> str:
if func_name == "simulate_protocol":
@@ -148,17 +164,6 @@ def handle_tool_use(self, func_name: str, func_params: Dict[str, Any]) -> str:
logger.error("Unknown tool", extra={"tool": func_name})
raise ValueError(f"Unknown tool: {func_name}")
- @tracer.wrap()
- def reset(self) -> None:
- self._messages = [
- {
- "role": "user",
- "content": [
- {"type": "text", "text": DOCUMENTS.format(doc_content=self.get_docs()), "cache_control": {"type": "ephemeral"}} # type: ignore
- ],
- }
- ]
-
@tracer.wrap()
def simulate_protocol(self, protocol: str) -> str:
url = "https://Opentrons-simulator.hf.space/protocol"
@@ -197,8 +202,9 @@ def main() -> None:
settings = Settings()
llm = AnthropicPredict(settings)
- prompt = Prompt.ask("Type a prompt to send to the Anthropic API:")
- completion = llm.predict(prompt)
+ Prompt.ask("Type a prompt to send to the Anthropic API:")
+
+ completion = llm.create(user_id="1", prompt="hi", history=None)
print(completion)
diff --git a/opentrons-ai-server/api/domain/config_anthropic.py b/opentrons-ai-server/api/domain/config_anthropic.py
index 9d511012592..beebc16d5ec 100644
--- a/opentrons-ai-server/api/domain/config_anthropic.py
+++ b/opentrons-ai-server/api/domain/config_anthropic.py
@@ -4,14 +4,11 @@
Your key responsibilities:
1. Welcome scientists warmly and understand their protocol needs
-2. Generate accurate Python protocols using standard Opentrons labware
+2. Generate accurate Python protocols using standard Opentrons labware (see in )
3. Provide clear explanations and documentation
4. Flag potential safety or compatibility issues
5. Suggest protocol optimizations when appropriate
-Call protocol simulation tool to validate the code - only when it is called explicitly by the user.
-For all other queries, provide direct responses.
-
Important guidelines:
- Always verify labware compatibility before generating protocols
- Include appropriate error handling in generated code
@@ -28,26 +25,25 @@
"""
PROMPT = """
-Here are the inputs you will work with:
-
-
-{USER_PROMPT}
-
-
Follow these instructions to handle the user's prompt:
-1. Analyze the user's prompt to determine if it's:
+1. :
a) A request to generate a protocol
- b) A question about the Opentrons Python API v2
+ b) A question about the Opentrons Python API v2 or about details of protocol
c) A common task (e.g., value changes, OT-2 to Flex conversion, slot correction)
d) An unrelated or unclear request
+ e) A tool calling. If a user calls simulate protocol explicity, then call.
+ f) A greeting. Respond kindly.
-2. If the prompt is unrelated or unclear, ask the user for clarification. For example:
- I apologize, but your prompt seems unclear. Could you please provide more details?
+ Note: when you respond you dont need mention the category or the type.
+2. If the prompt is unrelated or unclear, ask the user for clarification.
+ I'm sorry, but your prompt seems unclear. Could you please provide more details?
+ You dont need to mention
-3. If the prompt is a question about the API, answer it using only the information
+
+3. If the prompt is a question about the API or details, answer it using only the information
provided in the section. Provide references and place them under the tag.
Format your response like this:
API answer:
@@ -86,8 +82,8 @@
}}
requirements = {{
- 'robotType': '[Robot type based on user prompt, OT-2 or Flex, default is OT-2]',
- 'apiLevel': '[apiLevel, default is 2.19 ]'
+ 'robotType': '[Robot type: OT-2(default) for Opentrons OT-2, Flex for Opentrons Flex]',
+ 'apiLevel': '[apiLevel, default: 2.19]'
}}
def run(protocol: protocol_api.ProtocolContext):
@@ -214,4 +210,10 @@ def run(protocol: protocol_api.ProtocolContext):
as a reference to generate a basic protocol.
Remember to use only the information provided in the . Do not introduce any external information or assumptions.
+
+Here are the inputs you will work with:
+
+
+{USER_PROMPT}
+
"""
diff --git a/opentrons-ai-server/api/handler/fast.py b/opentrons-ai-server/api/handler/fast.py
index b93eb6580ce..a167693dc2c 100644
--- a/opentrons-ai-server/api/handler/fast.py
+++ b/opentrons-ai-server/api/handler/fast.py
@@ -199,10 +199,19 @@ async def create_chat_completion(
return ChatResponse(reply="Default fake response. ", fake=body.fake)
response: Optional[str] = None
+
+ if body.history and body.history[0].get("content") and "Write a protocol using" in body.history[0]["content"]: # type: ignore
+ protocol_option = "create"
+ else:
+ protocol_option = "update"
+
if "openai" in settings.model.lower():
response = openai.predict(prompt=body.message, chat_completion_message_params=body.history)
else:
- response = claude.predict(prompt=body.message)
+ if protocol_option == "create":
+ response = claude.create(user_id=str(user.sub), prompt=body.message, history=body.history) # type: ignore
+ else:
+ response = claude.update(user_id=str(user.sub), prompt=body.message, history=body.history) # type: ignore
if response is None or response == "":
return ChatResponse(reply="No response was generated", fake=bool(body.fake))
@@ -218,35 +227,36 @@ async def create_chat_completion(
@tracer.wrap()
@app.post(
- "/api/chat/updateProtocol",
+ "/api/chat/createProtocol",
response_model=Union[ChatResponse, ErrorResponse],
- summary="Updates protocol",
- description="Generate a chat response based on the provided prompt that will update an existing protocol with the required changes.",
+ summary="Creates protocol",
+ description="Generate a chat response based on the provided prompt that will create a new protocol with the required changes.",
)
-async def update_protocol(
- body: UpdateProtocol, user: Annotated[User, Security(auth.verify)]
+async def create_protocol(
+ body: CreateProtocol, user: Annotated[User, Security(auth.verify)]
) -> Union[ChatResponse, ErrorResponse]: # noqa: B008
"""
Generate an updated protocol using LLM.
- - **request**: The HTTP request containing the existing protocol and other relevant parameters.
+ - **request**: The HTTP request containing the chat message.
- **returns**: A chat response or an error message.
"""
- logger.info("POST /api/chat/updateProtocol", extra={"body": body.model_dump(), "user": user})
+ logger.info("POST /api/chat/createProtocol", extra={"body": body.model_dump(), "user": user})
try:
- if not body.protocol_text or body.protocol_text == "":
+
+ if not body.prompt or body.prompt == "":
raise HTTPException(
status_code=status.HTTP_400_BAD_REQUEST, detail=EmptyRequestError(message="Request body is empty").model_dump()
)
if body.fake:
- return ChatResponse(reply="Fake response", fake=bool(body.fake))
+ return ChatResponse(reply="Fake response", fake=body.fake)
response: Optional[str] = None
if "openai" in settings.model.lower():
- response = openai.predict(prompt=body.prompt, chat_completion_message_params=None)
+ response = openai.predict(prompt=str(body.model_dump()), chat_completion_message_params=None)
else:
- response = claude.predict(prompt=body.prompt)
+ response = claude.create(user_id=str(user.sub), prompt=body.prompt, history=None)
if response is None or response == "":
return ChatResponse(reply="No response was generated", fake=bool(body.fake))
@@ -254,7 +264,7 @@ async def update_protocol(
return ChatResponse(reply=response, fake=bool(body.fake))
except Exception as e:
- logger.exception("Error processing protocol update")
+ logger.exception("Error processing protocol creation")
raise HTTPException(
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail=InternalServerError(exception_object=e).model_dump()
) from e
@@ -262,36 +272,35 @@ async def update_protocol(
@tracer.wrap()
@app.post(
- "/api/chat/createProtocol",
+ "/api/chat/updateProtocol",
response_model=Union[ChatResponse, ErrorResponse],
- summary="Creates protocol",
- description="Generate a chat response based on the provided prompt that will create a new protocol with the required changes.",
+ summary="Updates protocol",
+ description="Generate a chat response based on the provided prompt that will update an existing protocol with the required changes.",
)
-async def create_protocol(
- body: CreateProtocol, user: Annotated[User, Security(auth.verify)]
+async def update_protocol(
+ body: UpdateProtocol, user: Annotated[User, Security(auth.verify)]
) -> Union[ChatResponse, ErrorResponse]: # noqa: B008
"""
Generate an updated protocol using LLM.
- - **request**: The HTTP request containing the chat message.
+ - **request**: The HTTP request containing the existing protocol and other relevant parameters.
- **returns**: A chat response or an error message.
"""
- logger.info("POST /api/chat/createProtocol", extra={"body": body.model_dump(), "user": user})
+ logger.info("POST /api/chat/updateProtocol", extra={"body": body.model_dump(), "user": user})
try:
-
- if not body.prompt or body.prompt == "":
+ if not body.protocol_text or body.protocol_text == "":
raise HTTPException(
status_code=status.HTTP_400_BAD_REQUEST, detail=EmptyRequestError(message="Request body is empty").model_dump()
)
if body.fake:
- return ChatResponse(reply="Fake response", fake=body.fake)
+ return ChatResponse(reply="Fake response", fake=bool(body.fake))
response: Optional[str] = None
if "openai" in settings.model.lower():
- response = openai.predict(prompt=str(body.model_dump()), chat_completion_message_params=None)
+ response = openai.predict(prompt=body.prompt, chat_completion_message_params=None)
else:
- response = claude.predict(prompt=str(body.model_dump()))
+ response = claude.update(user_id=str(user.sub), prompt=body.prompt, history=None)
if response is None or response == "":
return ChatResponse(reply="No response was generated", fake=bool(body.fake))
@@ -299,7 +308,7 @@ async def create_protocol(
return ChatResponse(reply=response, fake=bool(body.fake))
except Exception as e:
- logger.exception("Error processing protocol creation")
+ logger.exception("Error processing protocol update")
raise HTTPException(
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail=InternalServerError(exception_object=e).model_dump()
) from e
diff --git a/opentrons-ai-server/api/models/chat_request.py b/opentrons-ai-server/api/models/chat_request.py
index fb8c0942c9d..6135ba0618c 100644
--- a/opentrons-ai-server/api/models/chat_request.py
+++ b/opentrons-ai-server/api/models/chat_request.py
@@ -24,9 +24,13 @@ class Chat(BaseModel):
Field(None, description="Chat history in the form of a list of messages. Type is from OpenAI's ChatCompletionMessageParam"),
]
+ChatOptions = Literal["update", "create"]
+ChatOptionType = Annotated[Optional[ChatOptions], Field("create", description="which chat pathway did the user enter: create or update")]
+
class ChatRequest(BaseModel):
message: str = Field(..., description="The latest message to be processed.")
history: HistoryType
fake: bool = Field(True, description="When set to true, the response will be a fake. OpenAI API is not used.")
fake_key: FakeKeyType
+ chat_options: ChatOptionType
diff --git a/opentrons-ai-server/api/storage/docs/standard-api-v0.0.1.md b/opentrons-ai-server/api/storage/docs/standard-api-v0.0.1.md
deleted file mode 100644
index f4b54d4308a..00000000000
--- a/opentrons-ai-server/api/storage/docs/standard-api-v0.0.1.md
+++ /dev/null
@@ -1,157 +0,0 @@
-Standard API
-
-### Approved Pipette Loadnames
-
-Note that the labware names are hard to differentiate sometimes,
-since there are cases that they differ in terms of last digits only.
-
-#### OT-2 Approved Loadnames
-
-For OT-2 robots, use the following approved loadnames:
-
-- p20_single_gen2
-- p300_single_gen2
-- p1000_single_gen2
-- p300_multi_gen2
-- p20_multi_gen2
-
-#### Flex Approved Loadnames
-
-For Flex robots, use these approved loadnames:
-
-- flex_1channel_50
-- flex_1channel_1000
-- flex_8channel_50
-- flex_8channel_1000
-- flex_96channel_1000
-
-### Agilent Labware
-
-- Agilent 1 Well Reservoir 290 mL: agilent_1_reservoir_290ml
-
-### Applied Biosystems Labware
-
-- Applied Biosystems MicroAmp 384 Well Plate 40 uL: appliedbiosystemsmicroamp_384_wellplate_40ul
-
-### Axygen Labware
-
-- Axygen 1 Well Reservoir 90 mL: axygen_1_reservoir_90ml
-
-### Bio-Rad Labware
-
-- Bio-Rad 384 Well Plate 50 uL: biorad_384_wellplate_50ul
-- Bio-Rad 96 Well Plate 200 uL PCR: biorad_96_wellplate_200ul_pcr
-
-### Corning Labware
-
-- Corning 12 Well Plate 6.9 mL Flat: corning_12_wellplate_6.9ml_flat
-- Corning 24 Well Plate 3.4 mL Flat: corning_24_wellplate_3.4ml_flat
-- Corning 384 Well Plate 112 uL Flat: corning_384_wellplate_112ul_flat
-- Corning 48 Well Plate 1.6 mL Flat: corning_48_wellplate_1.6ml_flat
-- Corning 6 Well Plate 16.8 mL Flat: corning_6_wellplate_16.8ml_flat
-- Corning 96 Well Plate 360 uL Flat: corning_96_wellplate_360ul_flat
-
-### GEB Labware
-
-- GEB 96 Tip Rack 1000 uL: geb_96_tiprack_1000ul
-- GEB 96 Tip Rack 10 uL: geb_96_tiprack_10ul
-
-### NEST Labware
-
-- NEST 12 Well Reservoir 15 mL: nest_12_reservoir_15ml
-- NEST 1 Well Reservoir 195 mL: nest_1_reservoir_195ml
-- NEST 1 Well Reservoir 290 mL: nest_1_reservoir_290ml
-- NEST 96 Well Plate 100 uL PCR Full Skirt: nest_96_wellplate_100ul_pcr_full_skirt
-- NEST 96 Well Plate 200 uL Flat: nest_96_wellplate_200ul_flat
-- NEST 96 Deep Well Plate 2mL: nest_96_wellplate_2ml_deep
-
-### Opentrons Labware
-
-- Opentrons 10 Tube Rack with Falcon 4x50 mL, 6x15 mL Conical: opentrons_10_tuberack_falcon_4x50ml_6x15ml_conical
-- Opentrons 10 Tube Rack with NEST 4x50 mL, 6x15 mL Conical: opentrons_10_tuberack_nest_4x50ml_6x15ml_conical
-- Opentrons 15 Tube Rack with Falcon 15 mL Conical: opentrons_15_tuberack_falcon_15ml_conical
-- Opentrons 15 Tube Rack with NEST 15 mL Conical: opentrons_15_tuberack_nest_15ml_conical
-- Opentrons 24 Well Aluminum Block with Generic 2 mL Screwcap: opentrons_24_aluminumblock_generic_2ml_screwcap
-- Opentrons 24 Well Aluminum Block with NEST 0.5 mL Screwcap: opentrons_24_aluminumblock_nest_0.5ml_screwcap
-- Opentrons 24 Well Aluminum Block with NEST 1.5 mL Screwcap: opentrons_24_aluminumblock_nest_1.5ml_screwcap
-- Opentrons 24 Well Aluminum Block with NEST 1.5 mL Snapcap: opentrons_24_aluminumblock_nest_1.5ml_snapcap
-- Opentrons 24 Well Aluminum Block with NEST 2 mL Screwcap: opentrons_24_aluminumblock_nest_2ml_screwcap
-- Opentrons 24 Well Aluminum Block with NEST 2 mL Snapcap: opentrons_24_aluminumblock_nest_2ml_snapcap
-- Opentrons 24 Tube Rack with Eppendorf 1.5 mL Safe-Lock Snapcap: opentrons_24_tuberack_eppendorf_1.5ml_safelock_snapcap
-- Opentrons 24 Tube Rack with Eppendorf 2 mL Safe-Lock Snapcap: opentrons_24_tuberack_eppendorf_2ml_safelock_snapcap
-- Opentrons 24 Tube Rack with Generic 2 mL Screwcap: opentrons_24_tuberack_generic_2ml_screwcap
-- Opentrons 24 Tube Rack with NEST 0.5 mL Screwcap: opentrons_24_tuberack_nest_0.5ml_screwcap # not opentrons_24_tuberack_nest_0_5ml_screwcap
-- Opentrons 24 Tube Rack with NEST 1.5 mL Screwcap: opentrons_24_tuberack_nest_1.5ml_screwcap # not opentrons_24_tuberack_nest_1_5ml_screwcap
-- Opentrons 24 Tube Rack with NEST 1.5 mL Snapcap: opentrons_24_tuberack_nest_1.5ml_snapcap # note the use of dot. (`.`); opentrons_24_tuberack_nest_1_5ml_snapcap is incorrect
-- Opentrons 24 Tube Rack with NEST 2 mL Screwcap: opentrons_24_tuberack_nest_2ml_screwcap
-- Opentrons 24 Tube Rack with NEST 2 mL Snapcap: opentrons_24_tuberack_nest_2ml_snapcap
-- Opentrons 6 Tube Rack with Falcon 50 mL Conical: opentrons_6_tuberack_falcon_50ml_conical
-- Opentrons 6 Tube Rack with NEST 50 mL Conical: opentrons_6_tuberack_nest_50ml_conical
-- Opentrons 96 Well Aluminum Block with Bio-Rad Well Plate 200 uL: opentrons_96_aluminumblock_biorad_wellplate_200ul
-- Opentrons 96 Well Aluminum Block with Generic PCR Strip 200 uL: opentrons_96_aluminumblock_generic_pcr_strip_200ul
-- Opentrons 96 Well Aluminum Block with NEST Well Plate 100 uL: opentrons_96_aluminumblock_nest_wellplate_100ul
-- Opentrons 96 Deep Well Heater-Shaker Adapter: opentrons_96_deep_well_adapter
-- Opentrons 96 Deep Well Heater-Shaker Adapter with NEST Deep Well Plate 2 mL: opentrons_96_deep_well_adapter_nest_wellplate_2ml_deep
-- Opentrons OT-2 96 Filter Tip Rack 1000 uL: opentrons_96_filtertiprack_1000ul
-- Opentrons OT-2 96 Filter Tip Rack 10 uL: opentrons_96_filtertiprack_10ul
-- Opentrons OT-2 96 Filter Tip Rack 200 uL: opentrons_96_filtertiprack_200ul
-- Opentrons OT-2 96 Filter Tip Rack 20 uL: opentrons_96_filtertiprack_20ul
-- Opentrons 96 Flat Bottom Heater-Shaker Adapter: opentrons_96_flat_bottom_adapter
-- Opentrons 96 Flat Bottom Heater-Shaker Adapter with NEST 96 Well Plate 200 uL Flat: opentrons_96_flat_bottom_adapter_nest_wellplate_200ul_flat
-- Opentrons 96 PCR Heater-Shaker Adapter: opentrons_96_pcr_adapter
-- Opentrons 96 PCR Heater-Shaker Adapter with NEST Well Plate 100 ul: opentrons_96_pcr_adapter_nest_wellplate_100ul_pcr_full_skirt
-- Opentrons OT-2 96 Tip Rack 1000 uL: opentrons_96_tiprack_1000ul
-- Opentrons OT-2 96 Tip Rack 10 uL: opentrons_96_tiprack_10ul
-- Opentrons OT-2 96 Tip Rack 20 uL: opentrons_96_tiprack_20ul
-- Opentrons OT-2 96 Tip Rack 300 uL: opentrons_96_tiprack_300ul
-- Opentrons 96 Well Aluminum Block: opentrons_96_well_aluminum_block
-- Opentrons 96 Well Aluminum Block adapter: opentrons_96_well_aluminum_block
-- Opentrons Tough 96 Well Plate 200 uL PCR Full Skirt: opentrons_96_wellplate_200ul_pcr_full_skirt
-- Opentrons Aluminum Flat Bottom Plate: opentrons_aluminum_flat_bottom_plate
-- Opentrons Flex 96 Filter Tip Rack 1000 uL: opentrons_flex_96_filtertiprack_1000ul # note that 1000ul not 200ul
-- Opentrons Flex 96 Filter Tip Rack 200 uL: opentrons_flex_96_filtertiprack_200ul # note that 200ul not 1000ul
-- Opentrons Flex 96 Filter Tip Rack 50 uL: opentrons_flex_96_filtertiprack_50ul
-- Opentrons Flex 96 Tip Rack 1000 uL: opentrons_flex_96_tiprack_1000ul
-- Opentrons Flex 96 Tip Rack 200 uL: opentrons_flex_96_tiprack_200ul
-- Opentrons Flex 96 Tip Rack 50 uL: opentrons_flex_96_tiprack_50ul
-- Opentrons Flex 96 Tip Rack Adapter: opentrons_flex_96_tiprack_adapter
-- Opentrons Universal Flat Heater-Shaker Adapter: opentrons_universal_flat_adapter
-- Opentrons Universal Flat Heater-Shaker Adapter with Corning 384 Well Plate 112 ul Flat: opentrons_universal_flat_adapter_corning_384_wellplate_112ul_flat
-
-### Other Labware Brands
-
-- Thermo Scientific Nunc 96 Well Plate 1300 uL: thermoscientificnunc_96_wellplate_1300ul
-- Thermo Scientific Nunc 96 Well Plate 2000 uL: thermoscientificnunc_96_wellplate_2000ul
-- USA Scientific 12 Well Reservoir 22 mL: usascientific_12_reservoir_22ml
-- USA Scientific 96 Deep Well Plate 2.4 mL: usascientific_96_wellplate_2.4ml_deep
-
-### Additional Opentrons Tube Racks
-
-- 4-in-1 Tube Rack Set 15: opentrons_15_tuberack_nest_15ml_conical
-- 4-in-1 Tube Rack Set 50: opentrons_6_tuberack_nest_50ml_conical
-
-### Flex Pipettes
-
-- Flex 1-Channel 50 uL Pipette (single channel): flex_1channel_50
-- Flex 1-Channel 1000 uL Pipette (single channel): flex_1channel_1000
-- Flex 8-Channel 50 uL Pipette (multi-channel): flex_8channel_50
-- Flex 8-Channel 1000 uL Pipette (multi-channel): flex_8channel_1000
-- Flex 96-Channel 1000 uL Pipette (multi-channel): flex_96channel_1000
-
-### Modules
-
-- temperature module: temperature module gen2
-- thermocycler module: thermocycler module
-- thermocycler module gen2: thermocyclerModuleV2
-
-### Single channel pipettes:
-
-- Flex 1-Channel 50 uL Pipette
-- Flex 1-Channel 1000 uL Pipette
-- flex_1channel_1000
-
-### Multi channel pipettes:
-
-- Flex 8-Channel 50 uL Pipette
-- Flex 8-Channel 1000 uL Pipette
-- Flex 96-Channel 1000 uL Pipette
diff --git a/opentrons-ai-server/api/storage/docs/standard-loadname-info.md b/opentrons-ai-server/api/storage/docs/standard-loadname-info.md
new file mode 100644
index 00000000000..5ca402ec2f3
--- /dev/null
+++ b/opentrons-ai-server/api/storage/docs/standard-loadname-info.md
@@ -0,0 +1,599 @@
+
+Total number of labware: 73
+
+
+
+- Loadname: agilent_1_reservoir_290ml
+- Dimensions: 1 row × 1 column
+- Well count: 1
+- Max volume: 290 mL
+- Well shape: V-bottom
+
+
+
+- Loadname: appliedbiosystemsmicroamp_384_wellplate_40ul
+- Dimensions: 16 rows × 24 columns
+- Well count: 384
+- Max volume: 40 µL
+- Well shape: V-bottom
+- Note: Requires Opentrons software v5.0 or later
+
+
+
+- Loadname: axygen_1_reservoir_90ml
+- Dimensions: 1 row × 1 column
+- Well count: 1
+- Max volume: 90 mL
+- Well shape: Flat-bottom
+
+
+
+- Loadname: biorad_384_wellplate_50ul
+- Dimensions: 16 rows × 24 columns
+- Well count: 384
+- Max volume: 50 µL
+- Well shape: V-bottom
+- Note: Requires Opentrons software v5.0 or later
+
+
+
+- Loadname: biorad_96_wellplate_200ul_pcr
+- Dimensions: 8 rows × 12 columns
+- Well count: 96
+- Max volume: 200 µL
+- Well shape: V-bottom
+
+
+
+- Loadname: corning_12_wellplate_6.9ml_flat
+- Dimensions: 3 rows × 4 columns
+- Well count: 12
+- Max volume: 6.9 mL
+- Well shape: Flat-bottom
+
+
+
+- Loadname: corning_24_wellplate_3.4ml_flat
+- Dimensions: 4 rows × 6 columns
+- Well count: 24
+- Max volume: 3.4 mL
+- Well shape: Flat-bottom
+
+
+
+- Loadname: corning_384_wellplate_112ul_flat
+- Dimensions: 16 rows × 24 columns
+- Well count: 384
+- Max volume: 112 µL
+- Well shape: Flat-bottom
+
+
+
+- Loadname: corning_48_wellplate_1.6ml_flat
+- Dimensions: 6 rows × 8 columns
+- Well count: 48
+- Max volume: 1.6 mL
+- Well shape: Flat-bottom
+
+
+
+- Loadname: corning_6_wellplate_16.8ml_flat
+- Dimensions: 2 rows × 3 columns
+- Well count: 6
+- Max volume: 16.8 mL
+- Well shape: Flat-bottom
+
+
+
+- Loadname: corning_96_wellplate_360ul_flat
+- Dimensions: 8 rows × 12 columns
+- Well count: 96
+- Max volume: 360 µL
+- Well shape: Flat-bottom
+
+
+
+- Loadname: geb_96_tiprack_1000ul
+- Dimensions: 8 rows × 12 columns
+- Tip count: 96
+- Max volume: 1000 µL
+
+
+
+- Loadname: geb_96_tiprack_10ul
+- Dimensions: 8 rows × 12 columns
+- Tip count: 96
+- Max volume: 10 µL
+
+
+
+- Loadname: nest_12_reservoir_15ml
+- Dimensions: 1 row × 12 columns
+- Well count: 12
+- Max volume: 15 mL
+- Well shape: V-bottom
+
+
+
+- Loadname: nest_1_reservoir_195ml
+- Dimensions: 1 row × 1 column
+- Well count: 1
+- Max volume: 195 mL
+- Well shape: V-bottom
+
+
+
+- Loadname: nest_1_reservoir_290ml
+- Dimensions: 1 row × 1 column
+- Well count: 1
+- Max volume: 290 mL
+- Well shape: V-bottom
+
+
+
+- Loadname: nest_96_wellplate_100ul_pcr_full_skirt
+- Dimensions: 8 rows × 12 columns
+- Well count: 96
+- Max volume: 100 µL
+- Well shape: V-bottom
+
+
+
+- Loadname: nest_96_wellplate_200ul_flat
+- Dimensions: 8 rows × 12 columns
+- Well count: 96
+- Max volume: 200 µL
+- Well shape: Flat-bottom
+
+
+
+- Loadname: nest_96_wellplate_2ml_deep
+- Dimensions: 8 rows × 12 columns
+- Well count: 96
+- Max volume: 2000 µL
+- Well shape: V-bottom
+
+
+
+- Loadname: opentrons_10_tuberack_falcon_4x50ml_6x15ml_conical
+- Tube count: 10
+- Configuration:
+ - 6 × 15 mL tubes (V-bottom)
+ - 4 × 50 mL tubes (V-bottom)
+
+
+
+- Loadname: opentrons_10_tuberack_nest_4x50ml_6x15ml_conical
+- Tube count: 10
+- Configuration:
+ - 6 × 15 mL tubes (V-bottom)
+ - 4 × 50 mL tubes (V-bottom)
+
+
+
+- Loadname: opentrons_15_tuberack_falcon_15ml_conical
+- Dimensions: 3 rows × 5 columns
+- Tube count: 15
+- Max volume: 15 mL per tube
+- Tube shape: V-bottom
+
+
+
+- Loadname: opentrons_15_tuberack_nest_15ml_conical
+- Dimensions: 3 rows × 5 columns
+- Tube count: 15
+- Max volume: 15 mL per tube
+- Tube shape: V-bottom
+
+
+
+- Loadname: opentrons_24_aluminumblock_generic_2ml_screwcap
+- Dimensions: 4 rows × 6 columns
+- Tube count: 24
+- Max volume: 2 mL per tube
+- Tube shape: V-bottom
+
+
+
+- Loadname: opentrons_24_aluminumblock_nest_0.5ml_screwcap
+- Dimensions: 4 rows × 6 columns
+- Tube count: 24
+- Max volume: 0.5 mL per tube
+- Tube shape: V-bottom
+
+
+
+- Loadname: opentrons_24_aluminumblock_nest_1.5ml_screwcap
+- Dimensions: 4 rows × 6 columns
+- Tube count: 24
+- Max volume: 1.5 mL per tube
+- Tube shape: V-bottom
+
+
+
+- Loadname: opentrons_24_aluminumblock_nest_1.5ml_snapcap
+- Dimensions: 4 rows × 6 columns
+- Tube count: 24
+- Max volume: 1.5 mL per tube
+- Tube shape: V-bottom
+
+
+
+- Loadname: opentrons_24_aluminumblock_nest_2ml_screwcap
+- Dimensions: 4 rows × 6 columns
+- Tube count: 24
+- Max volume: 2 mL per tube
+- Tube shape: V-bottom
+
+
+
+- Loadname: opentrons_24_aluminumblock_nest_2ml_snapcap
+- Dimensions: 4 rows × 6 columns
+- Tube count: 24
+- Max volume: 2 mL per tube
+- Tube shape: U-bottom
+
+
+
+- Loadname: opentrons_24_tuberack_eppendorf_1.5ml_safelock_snapcap
+- Dimensions: 4 rows × 6 columns
+- Tube count: 24
+- Max volume: 1.5 mL per tube
+- Tube shape: V-bottom
+
+
+
+- Loadname: opentrons_24_tuberack_eppendorf_2ml_safelock_snapcap
+- Dimensions: 4 rows × 6 columns
+- Tube count: 24
+- Max volume: 2 mL per tube
+- Tube shape: U-bottom
+
+
+
+- Loadname: opentrons_24_tuberack_generic_2ml_screwcap
+- Dimensions: 4 rows × 6 columns
+- Tube count: 24
+- Max volume: 2 mL per tube
+- Tube shape: V-bottom
+
+
+
+- Loadname: opentrons_24_tuberack_nest_0.5ml_screwcap
+- Dimensions: 4 rows × 6 columns
+- Tube count: 24
+- Max volume: 0.5 mL per tube
+- Tube shape: V-bottom
+
+
+
+- Loadname: opentrons_24_tuberack_nest_1.5ml_screwcap
+- Dimensions: 4 rows × 6 columns
+- Tube count: 24
+- Max volume: 1.5 mL per tube
+- Tube shape: V-bottom
+
+
+
+- Loadname: opentrons_24_tuberack_nest_1.5ml_snapcap
+- Dimensions: 4 rows × 6 columns
+- Tube count: 24
+- Max volume: 1.5 mL per tube
+- Tube shape: V-bottom
+
+
+
+- Loadname: opentrons_24_tuberack_nest_2ml_screwcap
+- Dimensions: 4 rows × 6 columns
+- Tube count: 24
+- Max volume: 2 mL per tube
+- Tube shape: V-bottom
+
+
+
+- Loadname: opentrons_24_tuberack_nest_2ml_snapcap
+- Dimensions: 4 rows × 6 columns
+- Tube count: 24
+- Max volume: 2 mL per tube
+- Tube shape: U-bottom
+
+
+
+- Loadname: opentrons_6_tuberack_falcon_50ml_conical
+- Dimensions: 2 rows × 3 columns
+- Tube count: 6
+- Max volume: 50 mL per tube
+- Tube shape: V-bottom
+
+
+
+- Loadname: opentrons_6_tuberack_nest_50ml_conical
+- Dimensions: 2 rows × 3 columns
+- Tube count: 6
+- Max volume: 50 mL per tube
+- Tube shape: V-bottom
+
+
+
+- Loadname: opentrons_96_deep_well_temp_mod_adapter
+- Dimensions: 8 rows × 12 columns
+- Well count: 0
+- Max volume: Various
+- Note: Adapter only
+
+
+
+- Loadname: opentrons_96_aluminumblock_biorad_wellplate_200ul
+- Dimensions: 8 rows × 12 columns
+- Well count: 96
+- Max volume: 200 µL
+- Well shape: V-bottom
+
+
+
+- Loadname: opentrons_96_aluminumblock_generic_pcr_strip_200ul
+- Dimensions: 8 rows × 12 columns
+- Tube count: 96
+- Max volume: 200 µL
+- Tube shape: V-bottom
+
+
+
+- Loadname: opentrons_96_aluminumblock_nest_wellplate_100ul
+- Dimensions: 8 rows × 12 columns
+- Well count: 96
+- Max volume: 100 µL
+- Well shape: V-bottom
+
+
+
+- Loadname: opentrons_96_deep_well_adapter
+- Dimensions: 8 rows × 12 columns
+- Well count: 0
+- Max volume: Various
+- Note: Adapter only
+
+
+
+- Loadname: opentrons_96_deep_well_adapter_nest_wellplate_2ml_deep
+- Dimensions: 8 rows × 12 columns
+- Well count: 96
+- Max volume: 2000 µL
+- Well shape: V-bottom
+
+
+
+- Loadname: opentrons_96_filtertiprack_1000ul
+- Dimensions: 8 rows × 12 columns
+- Tip count: 96
+- Max volume: 1000 µL
+
+
+
+- Loadname: opentrons_96_filtertiprack_10ul
+- Dimensions: 8 rows × 12 columns
+- Tip count: 96
+- Max volume: 10 µL
+
+
+
+- Loadname: opentrons_96_filtertiprack_200ul
+- Dimensions: 8 rows × 12 columns
+- Tip count: 96
+- Max volume: 200 µL
+
+
+
+- Loadname: opentrons_96_filtertiprack_20ul
+- Dimensions: 8 rows × 12 columns
+- Tip count: 96
+- Max volume: 20 µL
+
+
+
+- Loadname: opentrons_96_flat_bottom_adapter
+- Dimensions: 8 rows × 12 columns
+- Well count: 0
+- Max volume: Various
+- Note: Adapter only
+
+
+
+- Loadname: opentrons_96_flat_bottom_adapter_nest_wellplate_200ul_flat
+- Dimensions: 8 rows × 12 columns
+- Well count: 96
+- Max volume: 200 µL
+- Well shape: Flat-bottom
+
+
+
+- Loadname: opentrons_96_pcr_adapter
+- Dimensions: 8 rows × 12 columns
+- Well count: 96
+- Max volume: Various
+- Well shape: V-bottom
+
+
+
+- Loadname: opentrons_96_pcr_adapter_nest_wellplate_100ul_pcr_full_skirt
+- Dimensions: 8 rows × 12 columns
+- Well count: 96
+- Max volume: 100 µL
+- Well shape: V-bottom
+
+
+
+- Loadname: opentrons_96_tiprack_1000ul
+- Dimensions: 8 rows × 12 columns
+- Tip count: 96
+- Max volume: 1000 µL
+
+
+
+- Loadname: opentrons_96_tiprack_10ul
+- Dimensions: 8 rows × 12 columns
+- Tip count: 96
+- Max volume: 10 µL
+
+
+
+- Loadname: opentrons_96_tiprack_20ul
+- Dimensions: 8 rows × 12 columns
+- Tip count: 96
+- Max volume: 20 µL
+
+
+
+- Loadname: opentrons_96_tiprack_300ul
+- Dimensions: 8 rows × 12 columns
+- Tip count: 96
+- Max volume: 300 µL
+
+
+
+- Loadname: opentrons_96_well_aluminum_block
+- Dimensions: 8 rows × 12 columns
+- Well count: 96
+- Max volume: Various
+- Well shape: V-bottom
+
+
+
+- Loadname: opentrons_96_wellplate_200ul_pcr_full_skirt
+- Dimensions: 8 rows × 12 columns
+- Well count: 96
+- Max volume: 200 µL
+- Well shape: V-bottom
+
+
+- Loadname: opentrons_aluminum_flat_bottom_plate
+- Dimensions: N/A (base plate)
+- Well count: 0
+- Max volume: Various
+- Note: Base plate only
+
+
+
+- Loadname: opentrons_flex_96_filtertiprack_1000ul
+- Dimensions: 8 rows × 12 columns
+- Tip count: 96
+- Max volume: 1000 µL
+
+
+
+- Loadname: opentrons_flex_96_filtertiprack_200ul
+- Dimensions: 8 rows × 12 columns
+- Tip count: 96
+- Max volume: 200 µL
+
+
+
+- Loadname: opentrons_flex_96_filtertiprack_50ul
+- Dimensions: 8 rows × 12 columns
+- Tip count: 96
+- Max volume: 50 µL
+
+
+
+- Loadname: opentrons_flex_96_tiprack_1000ul
+- Dimensions: 8 rows × 12 columns
+- Tip count: 96
+- Max volume: 1000 µL
+
+
+
+- Loadname: opentrons_flex_96_tiprack_200ul
+- Dimensions: 8 rows × 12 columns
+- Tip count: 96
+- Max volume: 200 µL
+
+
+
+- Loadname: opentrons_flex_96_tiprack_50ul
+- Dimensions: 8 rows × 12 columns
+- Tip count: 96
+- Max volume: 50 µL
+
+
+
+- Loadname: opentrons_flex_96_tiprack_adapter
+- Dimensions: 8 rows × 12 columns
+- Well count: 0
+- Max volume: Various
+- Note: Adapter only
+
+
+- Loadname: opentrons_universal_flat_adapter
+- Dimensions: N/A (universal adapter)
+- Well count: 0
+- Max volume: Various
+- Note: Adapter only
+
+
+
+- Loadname: opentrons_universal_flat_adapter_corning_384_wellplate_112ul_flat
+- Dimensions: 16 rows × 24 columns
+- Well count: 384
+- Max volume: 112 µL
+- Well shape: Flat-bottom
+
+
+
+- Loadname: thermoscientificnunc_96_wellplate_1300ul
+- Dimensions: 8 rows × 12 columns
+- Well count: 96
+- Max volume: 1300 µL
+- Well shape: U-bottom
+- Note: Requires Opentrons software v5.0 or later
+
+
+
+- Loadname: thermoscientificnunc_96_wellplate_2000ul
+- Dimensions: 8 rows × 12 columns
+- Well count: 96
+- Max volume: 2000 µL
+- Well shape: U-bottom
+- Note: Requires Opentrons software v5.0 or later
+
+
+
+- Loadname: usascientific_12_reservoir_22ml
+- Dimensions: 1 row × 12 columns
+- Well count: 12
+- Max volume: 22 mL
+- Well shape: V-bottom
+
+
+
+- Loadname: usascientific_96_wellplate_2.4ml_deep
+- Dimensions: 8 rows × 12 columns
+- Well count: 96
+- Max volume: 2.4 mL
+- Well shape: U-bottom
+
+
+
+ - p20_single_gen2
+ - volume: 1-20 µL
+ - p20_multi_gen2
+ - volume: 1-20 µL
+ - p300_single_gen2
+ - volume: 20-300 µL
+ - p300_multi_gen2
+ - volume: 20-200 µL
+ - p1000_single_gen2
+ - volume: 100-1000 µL
+
+
+ - flex_1channel_50
+ - volume: 1–50 µL
+ - flex_1channel_1000
+ - volume: 5–1000 µL
+ - flex_8channel_50
+ - volume: 1–50 µL
+ - flex_8channel_1000
+ - volume: 5–1000 µL
+ - flex_96channel_1000
+ - volume: 5–1000 µL
+
+
diff --git a/opentrons-ai-server/tests/helpers/client.py b/opentrons-ai-server/tests/helpers/client.py
index bf5a7febb3c..3b3dcfa7511 100644
--- a/opentrons-ai-server/tests/helpers/client.py
+++ b/opentrons-ai-server/tests/helpers/client.py
@@ -65,7 +65,7 @@ def get_health(self) -> Response:
@timeit
def get_chat_completion(self, message: str, fake: bool = True, fake_key: Optional[FakeKeys] = None, bad_auth: bool = False) -> Response:
"""Call the /chat/completion endpoint and return the response."""
- request = ChatRequest(message=message, fake=fake, fake_key=fake_key, history=None)
+ request = ChatRequest(message=message, fake=fake, fake_key=fake_key, history=None, chat_options=None)
headers = self.standard_headers if not bad_auth else self.invalid_auth_headers
return self.httpx.post("/chat/completion", headers=headers, json=request.model_dump())