forked from langgenius/dify
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathdebug.ts
226 lines (198 loc) · 4.83 KB
/
debug.ts
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
import type { AgentStrategy, ModelModeType, RETRIEVE_TYPE, ToolItem } from '@/types/app'
export type Inputs = Record<string, string | number | object>
export enum PromptMode {
simple = 'simple',
advanced = 'advanced',
}
export type PromptItem = {
role?: PromptRole
text: string
}
export type ChatPromptConfig = {
prompt: PromptItem[]
}
export type ConversationHistoriesRole = {
user_prefix: string
assistant_prefix: string
}
export type CompletionPromptConfig = {
prompt: PromptItem
conversation_histories_role: ConversationHistoriesRole
}
export type BlockStatus = {
context: boolean
history: boolean
query: boolean
}
export enum PromptRole {
system = 'system',
user = 'user',
assistant = 'assistant',
}
export type PromptVariable = {
key: string
name: string
type: string // "string" | "number" | "select",
default?: string | number
required?: boolean
options?: string[]
max_length?: number
is_context_var?: boolean
enabled?: boolean
config?: Record<string, any>
icon?: string
icon_background?: string
}
export type CompletionParams = {
max_tokens: number
temperature: number
top_p: number
presence_penalty: number
frequency_penalty: number
stop?: string[]
}
export type ModelId = 'gpt-3.5-turbo' | 'text-davinci-003'
export type PromptConfig = {
prompt_template: string
prompt_variables: PromptVariable[]
}
export type MoreLikeThisConfig = {
enabled: boolean
}
export type SuggestedQuestionsAfterAnswerConfig = MoreLikeThisConfig
export type SpeechToTextConfig = MoreLikeThisConfig
export type TextToSpeechConfig = {
enabled: boolean
voice?: string
language?: string
}
export type CitationConfig = MoreLikeThisConfig
export type AnnotationReplyConfig = {
id: string
enabled: boolean
score_threshold: number
embedding_model: {
embedding_provider_name: string
embedding_model_name: string
}
}
export type ModerationContentConfig = {
enabled: boolean
preset_response?: string
}
export type ModerationConfig = MoreLikeThisConfig & {
type?: string
config?: {
keywords?: string
api_based_extension_id?: string
inputs_config?: ModerationContentConfig
outputs_config?: ModerationContentConfig
} & Partial<Record<string, any>>
}
export type RetrieverResourceConfig = MoreLikeThisConfig
export type AgentConfig = {
enabled: boolean
strategy: AgentStrategy
max_iteration: number
tools: ToolItem[]
}
// frontend use. Not the same as backend
export type ModelConfig = {
provider: string // LLM Provider: for example "OPENAI"
model_id: string
mode: ModelModeType
configs: PromptConfig
opening_statement: string | null
more_like_this: MoreLikeThisConfig | null
suggested_questions_after_answer: SuggestedQuestionsAfterAnswerConfig | null
speech_to_text: SpeechToTextConfig | null
text_to_speech: TextToSpeechConfig | null
retriever_resource: RetrieverResourceConfig | null
sensitive_word_avoidance: ModerationConfig | null
dataSets: any[]
agentConfig: AgentConfig
}
export type DatasetConfigItem = {
enable: boolean
value: number
}
export type DatasetConfigs = {
retrieval_model: RETRIEVE_TYPE
reranking_model: {
reranking_provider_name: string
reranking_model_name: string
}
top_k: number
score_threshold_enabled: boolean
score_threshold?: number | null
datasets: {
datasets: {
enabled: boolean
id: string
}[]
}
}
export type DebugRequestBody = {
inputs: Inputs
query: string
completion_params: CompletionParams
model_config: ModelConfig
}
export type DebugResponse = {
id: string
answer: string
created_at: string
}
export type DebugResponseStream = {
id: string
data: string
created_at: string
}
export type FeedBackRequestBody = {
message_id: string
rating: 'like' | 'dislike'
content?: string
from_source: 'api' | 'log'
}
export type FeedBackResponse = {
message_id: string
rating: 'like' | 'dislike'
}
// Log session list
export type LogSessionListQuery = {
keyword?: string
start?: string // format datetime(YYYY-mm-dd HH:ii)
end?: string // format datetime(YYYY-mm-dd HH:ii)
page: number
limit: number // default 20. 1-100
}
export type LogSessionListResponse = {
data: {
id: string
conversation_id: string
query: string // user's query question
message: string // prompt send to LLM
answer: string
creat_at: string
}[]
total: number
page: number
}
// log session detail and debug
export type LogSessionDetailResponse = {
id: string
cnversation_id: string
model_provider: string
query: string
inputs: Record<string, string | number | object>[]
message: string
message_tokens: number // number of tokens in message
answer: string
answer_tokens: number // number of tokens in answer
provider_response_latency: number // used time in ms
from_source: 'api' | 'log'
}
export type SavedMessage = {
id: string
answer: string
}