Skip to content

Commit

Permalink
update: set default search engine to SearXNG, default llm to OpenAI
Browse files Browse the repository at this point in the history
  • Loading branch information
yokingma committed Apr 16, 2024
1 parent 5c27436 commit d885576
Show file tree
Hide file tree
Showing 5 changed files with 19 additions and 16 deletions.
4 changes: 3 additions & 1 deletion .env
Original file line number Diff line number Diff line change
Expand Up @@ -27,4 +27,6 @@ LEPTON_KEY=
# Local llm: Ollama hostname, could modify if you need.
OLLAMA_HOST=http://host.docker.internal:11434
# Searxng hostname, could modify if you need.
SEARXNG_HOSTNAME=http://searxng:8080
SEARXNG_HOSTNAME=http://searxng:8080
# The count of resources referenced
REFERENCE_COUNT = 8
2 changes: 0 additions & 2 deletions src/constant.ts
Original file line number Diff line number Diff line change
Expand Up @@ -9,8 +9,6 @@ export const BING_MKT = 'en-US';
// default timeout ms
export const DEFAULT_SEARCH_ENGINE_TIMEOUT = 20000;

export const REFERENCE_COUNT = 8;

// default search keywords
export const DefaultQuery = 'Who said \'live long and prosper';
export const DefaultSystem = 'You are a helpful assistant.';
Expand Down
25 changes: 12 additions & 13 deletions src/rag.ts
Original file line number Diff line number Diff line change
Expand Up @@ -2,10 +2,8 @@ import { EBackend, IChatInputMessage, IStreamHandler, SearchFunc } from './inter
import { searchWithBing, searchWithGoogle, searchWithSogou, searchWithSearXNG } from './service';
import { MoreQuestionsPrompt, RagQueryPrompt } from './prompt';
import { aliyun, baidu, openai, google, tencent, yi, moonshot, lepton, local } from './platform';
// import { memoryCache } from './utils';
import util from 'util';
import { REFERENCE_COUNT } from './constant';
import { AliyunModels, AllModels, BaiduModels, OpenAIModels, GoogleModels, TencentModels, YiModels, MoonshotModels, LeptonModels } from './constant';
import { AliyunModels, BaiduModels, OpenAIModels, GoogleModels, TencentModels, YiModels, MoonshotModels, LeptonModels } from './constant';

interface RagOptions {
backend?: EBackend
Expand All @@ -25,11 +23,11 @@ export class Rag {
private stream: boolean;

constructor(params?: RagOptions) {
const { backend = EBackend.BING, stream = true, model = AllModels.QWEN_MAX, locally } = params || {};
const { backend = EBackend.SEARXNG, stream = true, model = OpenAIModels.GPT35TURBO, locally } = params || {};
if (locally) {
this.chat = local.chatStream.bind(local);
} else {
this.chat = processModel(model, stream);
this.chat = processModel(model);
}
this.model = model;
this.stream = stream;
Expand All @@ -49,13 +47,14 @@ export class Rag {
this.search = searchWithSearXNG;
break;
default:
this.search = searchWithBing;
this.search = searchWithSearXNG;
}
}

public async query(query: string, onMessage?: (...args: any[]) => void) {
const contexts = await this.search(query);
const limitContexts = contexts.slice(0, REFERENCE_COUNT);
const REFERENCE_COUNT = process.env.REFERENCE_COUNT || 8;
const limitContexts = contexts.slice(0, +REFERENCE_COUNT);
if (!this.stream) {
const relatedPromise = this.getRelatedQuestions(query, limitContexts);
const answerPromise = this.getAiAnswer(query, contexts);
Expand Down Expand Up @@ -128,18 +127,18 @@ export class Rag {
// private saveResult(contexts: any[], llmResponse: string, relatedQuestionsFuture: any[], searchUUID: string) {}
}

function processModel(model = OpenAIModels.GPT35TURBO, stream = true) {
function processModel(model = OpenAIModels.GPT35TURBO) {
if (Object.values(AliyunModels).includes(model)) {
return stream ? aliyun.chatStream.bind(aliyun) : aliyun.chat.bind(aliyun);
return aliyun.chatStream.bind(aliyun);
}
if (Object.values(OpenAIModels).includes(model)) {
return stream ? openai.chatStream.bind(openai) : openai.chat.bind(openai);
return openai.chatStream.bind(openai);
}
if (Object.values(BaiduModels).includes(model)) {
return stream ? baidu.chatStream.bind(baidu) : baidu.chat.bind(baidu);
return baidu.chatStream.bind(baidu);
}
if (Object.values(GoogleModels).includes(model)) {
return stream ? google.chatStream.bind(google) : google.chat.bind(google);
return google.chatStream.bind(google);
}
if (Object.values(TencentModels).includes(model)) {
return tencent.chatStream.bind(tencent);
Expand All @@ -153,5 +152,5 @@ function processModel(model = OpenAIModels.GPT35TURBO, stream = true) {
if (Object.values(LeptonModels).includes(model)) {
return lepton.chatStream.bind(lepton);
}
return stream ? openai.chatStream.bind(openai) : openai.chat.bind(openai);
return openai.chatStream.bind(openai);
}
2 changes: 2 additions & 0 deletions web/src/components/localModels.vue
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,8 @@ onMounted(async () => {
await listModels();
if (appStore.localModel) {
model.value = appStore.localModel;
} else {
model.value = models.value[0];
}
});
Expand Down
2 changes: 2 additions & 0 deletions web/src/components/models.vue
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,8 @@ onMounted(async () => {
await listModels();
if (appStore.model) {
model.value = appStore.model;
} else {
model.value = models.value[0];
}
});
Expand Down

0 comments on commit d885576

Please sign in to comment.