-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathllm.py
37 lines (24 loc) · 990 Bytes
/
llm.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
import os
import sys
from llama_index.llms import Gemini
from llama_index.embeddings import GooglePaLMEmbedding
from dotenv import load_dotenv
load_dotenv()
import nest_asyncio
nest_asyncio.apply()
import warnings
warnings.filterwarnings('ignore')
# Initializing Google Gemini LLM model as the LLM
llm = Gemini()
# Initializing Google gecko embedding
model_name = "models/embedding-gecko-001"
embed_model = GooglePaLMEmbedding(model_name=model_name)
def get_response_from_llm(user_query, sql_query, sql_response, llm=llm):
prompt = f"""User Query: {user_query}
SQL Query: {sql_query}
SQL Response: {sql_response}
Using the query and response, write a natural languge response to the user query.
Please make sure not to make up answers. If you don't know, just say you don't know.
"""
llm_response = llm.complete(prompt)
return llm_response