Skip to content

Commit

Permalink
add exception handling to ollama client inference
Browse files Browse the repository at this point in the history
  • Loading branch information
varshney-yash authored Mar 26, 2024
1 parent 086567c commit ac22917
Showing 1 changed file with 10 additions and 9 deletions.
19 changes: 10 additions & 9 deletions src/llm/ollama_client.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,23 +3,24 @@

from src.logger import Logger

logger = Logger()

class Ollama:
@staticmethod
def list_models():
try:
return ollama.list()["models"]
except httpx.ConnectError:
Logger().warning("Ollama server not running, please start the server to use models from Ollama.")
logger.warning("Ollama server not running, please start the server to use models from Ollama.")
except Exception as e:
Logger().error(f"Failed to list Ollama models: {e}")

logger.error(f"Failed to list Ollama models: {e}")
return []

def inference(self, model_id: str, prompt: str) -> str:
response = ollama.generate(
model = model_id,
prompt = prompt.strip()
)

return response['response']
try:
response = ollama.generate(model=model_id, prompt=prompt.strip())
return response['response']
except Exception as e:
logger.error(f"Error during model inference: {e}")
return ""

0 comments on commit ac22917

Please sign in to comment.