Skip to content

Commit

Permalink
Merge pull request road-core#58 from tisnik/minor-fixes
Browse files Browse the repository at this point in the history
Minor fixes in service source code
  • Loading branch information
tisnik authored Oct 17, 2024
2 parents 3db4968 + 688e56f commit 78c8d79
Show file tree
Hide file tree
Showing 5 changed files with 8 additions and 8 deletions.
6 changes: 3 additions & 3 deletions ols/app/endpoints/health.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@

router = APIRouter(tags=["health"])
logger = logging.getLogger(__name__)
llm_is_ready_persistent_state = False
llm_is_ready_persistent_state: bool = False


def llm_is_ready() -> bool:
Expand All @@ -25,7 +25,7 @@ def llm_is_ready() -> bool:
If so, store the success to `llm_is_ready_persistent_state` to cache
the result for future calls.
"""
global llm_is_ready_persistent_state
global llm_is_ready_persistent_state # pylint: disable=global-statement
if llm_is_ready_persistent_state is True:
return True
try:
Expand All @@ -40,7 +40,7 @@ def llm_is_ready() -> bool:
return True
raise ValueError(f"Unexpected response from LLM: {response}")
except Exception as e:
logger.error(f"LLM connection check failed with - {e}")
logger.error("LLM connection check failed with - %s", e)
return False


Expand Down
4 changes: 3 additions & 1 deletion ols/app/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -119,7 +119,9 @@ async def stream_response_body(
)
yield chunk

if isinstance(response, StreamingResponse):
# current version of Starlette pass instance of _StreamingResponse class that is
# private. Thus we need to check if the body_iterator attribute exists
if hasattr(response, "body_iterator"):
# The response is already a streaming response
response.body_iterator = stream_response_body(response.body_iterator)
else:
Expand Down
2 changes: 1 addition & 1 deletion ols/app/models/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -540,7 +540,7 @@ def __init__(self, **data: Any) -> None:
super().__init__(**data)
# password should be read from file
if self.password_path is not None:
with open(self.password_path) as f:
with open(self.password_path, "r") as f:
self.password = f.read().rstrip()

@model_validator(mode="after")
Expand Down
2 changes: 1 addition & 1 deletion ols/src/llms/llm_loader.py
Original file line number Diff line number Diff line change
Expand Up @@ -89,7 +89,7 @@ def load_llm(
f"Unsupported LLM provider type '{provider_config.type}'."
)

logger.debug(f"loading LLM '{model}' from '{provider}'")
logger.debug("loading LLM model '%s' from provider '%s'", model, provider)

llm_provider = llm_providers_reg.llm_providers[provider_config.type]
return llm_provider(model, provider_config, generic_llm_params or {}).load()
2 changes: 0 additions & 2 deletions ols/src/prompts/prompts.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,8 +5,6 @@

from ols.constants import SUBJECT_ALLOWED, SUBJECT_REJECTED

# TODO: OLS-503 Fine tune system prompt

# Note::
# Right now templates are somewhat alligned to make granite work better.
# GPT still works well with this. Ideally we should have model specific tags.
Expand Down

0 comments on commit 78c8d79

Please sign in to comment.