-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathollama_send.py
38 lines (36 loc) · 1.83 KB
/
ollama_send.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
import aiohttp
class OllamaSender:
def __init__(self, model="llama3"):
self.system_prompt = """Review the pull request diff. Assess code quality, readability, performance, and security. Check the documentation and test coverage to ensure they are thorough. Provide constructive feedback on alignment with project goals, suggesting improvements and optimizations where needed, all in a supportive and positive tone. be short and concise."""
self.model = model
async def send_to_ollama(self, diff):
headers = {
"Content-Type": "application/json"
}
data = {
"model": self.model,
"prompt": self.system_prompt + diff,
"stream": False,
"temperature": 0.5
}
max_retries = 3
for attempt in range(max_retries):
try:
async with aiohttp.ClientSession() as session:
async with session.post("http://localhost:8081/api/generate", headers=headers, json=data) as response:
if response.status == 200:
data = await response.json()
actual_response = (data["response"])
print(actual_response)
return actual_response
else:
text = await response.text()
print("Error:", text)
raise Exception('Failed to get a valid response from the model.')
except Exception as e:
print(e)
if attempt < max_retries - 1: # If this wasn't the last attempt
print(f"Attempt {attempt + 1} failed, retrying...")
else:
print("All attempts failed")
return None