Skip to content

Commit

Permalink
Merge pull request stitionai#160 from varshney-yash/patch-1
Browse files Browse the repository at this point in the history
add exception handling to ollama client inference
  • Loading branch information
mufeedvh authored Mar 28, 2024
2 parents de007c6 + 523cbcf commit c717e15
Showing 1 changed file with 10 additions and 8 deletions.
18 changes: 10 additions & 8 deletions src/llm/ollama_client.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,8 @@

from src.logger import Logger

logger = Logger()

client = Client(host=Config().get_ollama_api_endpoint())


Expand All @@ -13,15 +15,15 @@ def list_models():
try:
return client.list()["models"]
except httpx.ConnectError:
Logger().warning(
"Ollama server not running, please start the server to use models from Ollama."
)
logger.warning("Ollama server not running, please start the server to use models from Ollama.")
except Exception as e:
Logger().error(f"Failed to list Ollama models: {e}")

logger.error(f"Failed to list Ollama models: {e}")
return []

def inference(self, model_id: str, prompt: str) -> str:
response = client.generate(model=model_id, prompt=prompt.strip())

return response["response"]
try:
response = ollama.generate(model=model_id, prompt=prompt.strip())
return response['response']
except Exception as e:
logger.error(f"Error during model inference: {e}")
return ""

0 comments on commit c717e15

Please sign in to comment.