-
Notifications
You must be signed in to change notification settings - Fork 13
/
Copy pathindex.js
396 lines (312 loc) · 13.5 KB
/
index.js
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
const fs = require('fs')
const OpenAI = require('openai')
const { GPTTokens } = require('../dist/index')
const [
apiKey = process.env.OPENAI_API_KEY,
fineTuneModel = process.env.FINE_TUNE_MODEL,
] = process.argv.slice(2)
if (!apiKey) {
console.error('No API key provided. Ignoring test.')
process.exit(0)
}
const openai = new OpenAI({ apiKey })
async function testGPTTokens(prompt) {
const messages = [
{ role: 'user', content: prompt },
]
const supportModels = GPTTokens.supportModels
.filter(model => !model.startsWith('ft:'))
const { length: modelsNum } = supportModels
for (let i = 0; i < modelsNum; i += 1) {
const model = supportModels[i]
console.info(`[${i + 1}/${modelsNum}]: Testing ${model}...`)
let ignoreModel = false
const chatCompletion = await openai.chat.completions.create({
model,
messages,
})
.catch(err => {
ignoreModel = true
console.info(`Ignore model ${model}:`)
console.info(err.message)
})
const openaiUsage = chatCompletion?.usage
const gptTokens = new GPTTokens({
model,
messages: [
...messages,
...[chatCompletion?.choices[0].message],
],
})
if (ignoreModel) continue
if (!openaiUsage) {
console.error(`Test ${model} failed (openai return usage is null)`)
continue
}
if (gptTokens.promptUsedTokens !== openaiUsage.prompt_tokens)
throw new Error(`Test ${model} promptUsedTokens failed (openai: ${openaiUsage.prompt_tokens}/ gpt-tokens: ${gptTokens.promptUsedTokens})`)
if (gptTokens.completionUsedTokens !== openaiUsage.completion_tokens)
throw new Error(`Test ${model} completionUsedTokens failed (openai: ${openaiUsage.completion_tokens}/ gpt-tokens: ${gptTokens.completionUsedTokens})`)
if (gptTokens.usedTokens !== openaiUsage?.total_tokens)
throw new Error(`Test ${model} usedTokens failed (openai: ${openaiUsage?.total_tokens}/ gpt-tokens: ${gptTokens.usedTokens})`)
console.info('Pass!')
}
console.info('Test success!')
}
async function testBasic(prompt) {
console.info('Testing GPT...')
await testGPTTokens(prompt)
}
function testTraining(filepath) {
console.info('Testing Create a fine-tuned model...')
const openaiUsedTokens = 4445
const gptTokens = new GPTTokens({
model : 'gpt-3.5-turbo-1106',
training: {
data : fs
.readFileSync(filepath, 'utf-8')
.split('\n')
.filter(Boolean)
.map(row => JSON.parse(row)),
epochs: 7,
},
})
if (gptTokens.usedTokens !== openaiUsedTokens) throw new Error(`Test training usedTokens failed (openai: ${openaiUsedTokens}/ gpt-tokens: ${gptTokens.usedTokens})`)
console.info('Pass!')
}
function testPerformance(messages) {
console.info('Testing performance...')
console.info('Messages:', JSON.stringify(messages))
for (let i = 0; i < 10; i++) {
console.time('GPTTokens')
const usageInfo = new GPTTokens({
model: 'gpt-3.5-turbo-0613',
messages,
})
usageInfo.usedTokens
usageInfo.promptUsedTokens
usageInfo.completionUsedTokens
usageInfo.usedUSD
console.timeEnd('GPTTokens')
}
}
async function testFunctionCalling() {
console.info('Testing function calling...')
await Promise.all([
functionCalling1(),
functionCalling2(),
])
console.info('Pass!')
async function functionCalling1() {
// https://platform.openai.com/docs/guides/function-calling
// Example dummy function hard coded to return the same weather
// In production, this could be your backend API or an external API
function getCurrentWeather(location) {
if (location.toLowerCase().includes('tokyo')) {
return JSON.stringify({ location: 'Tokyo', temperature: '10', unit: 'celsius' })
} else if (location.toLowerCase().includes('san francisco')) {
return JSON.stringify({ location: 'San Francisco', temperature: '72', unit: 'fahrenheit' })
} else if (location.toLowerCase().includes('paris')) {
return JSON.stringify({ location: 'Paris', temperature: '22', unit: 'fahrenheit' })
} else {
return JSON.stringify({ location, temperature: 'unknown' })
}
}
async function runConversation() {
// Step 1: send the conversation and available functions to the model
const model = 'gpt-3.5-turbo-1106'
const messages = [
{ role: 'user', content: 'What\'s the weather like in San Francisco and Paris?' },
]
const tools = [
{
type : 'function',
function: {
name : 'get_current_weather',
description: 'Get the current weather in a given location',
parameters : {
type : 'object',
properties: {
location: {
type : 'string',
description: 'The city and state, e.g. San Francisco, CA',
},
unit : {
type: 'string',
enum: ['celsius', 'fahrenheit'],
},
},
required : ['location'],
},
},
},
]
const response = await openai.chat.completions.create({
model,
messages,
tools,
tool_choice: 'auto', // auto is default, but we'll be explicit
})
const { usage: openaiUsage } = response
const gptTokens = new GPTTokens({
model,
messages,
tools,
})
if (gptTokens.usedTokens !== openaiUsage.prompt_tokens)
throw new Error(`Test function calling promptUsedTokens failed (openai: ${openaiUsage.prompt_tokens}/ gpt-tokens: ${gptTokens.usedTokens})`)
const responseMessage = response.choices[0].message
// Step 2: check if the model wanted to call a function
const toolCalls = responseMessage.tool_calls
if (responseMessage.tool_calls) {
// Step 3: call the function
// Note: the JSON response may not always be valid; be sure to handle errors
const availableFunctions = {
get_current_weather: getCurrentWeather,
} // only one function in this example, but you can have multiple
messages.push(responseMessage) // extend conversation with assistant's reply
for (const toolCall of toolCalls) {
const functionName = toolCall.function.name
const functionToCall = availableFunctions[functionName]
const functionArgs = JSON.parse(toolCall.function.arguments)
const functionResponse = functionToCall(
functionArgs.location,
functionArgs.unit,
)
messages.push({
tool_call_id: toolCall.id,
role : 'tool',
name : functionName,
content : functionResponse,
}) // extend conversation with function response
}
const secondResponse = await openai.chat.completions.create({
model : 'gpt-3.5-turbo-1106',
messages: messages,
}) // get a new response from the model where it can see the function response
return secondResponse.choices
}
}
await runConversation()
}
async function functionCalling2() {
// https://platform.openai.com/docs/guides/function-calling
// Example dummy function hard coded to return the same weather
// In production, this could be your backend API or an external API
function getProductPrice(store, product) {
return JSON.stringify({
store,
product,
price: (Math.random() * 1000).toFixed(0),
unit : '$',
})
}
async function runConversation() {
// Step 1: send the conversation and available functions to the model
const model = 'gpt-3.5-turbo-1106'
const messages = [
{ role: 'user', content: 'ps5 price in all stores' },
]
const tools = [
{
type : 'function',
function: {
name : 'get_product_price',
description: 'Get the price of an item in a specified store',
parameters : {
type : 'object',
properties: {
store : {
type : 'string',
description: 'The store name',
enum : ['Amazon', 'Ebay', 'TaoBao'],
},
product: {
type : 'string',
description: 'The product name e.g. MacbookPro',
},
},
required : ['product'],
},
},
},
]
const response = await openai.chat.completions.create({
model,
messages,
tools,
tool_choice: 'auto', // auto is default, but we'll be explicit
})
const { usage: openaiUsage } = response
const gptTokens = new GPTTokens({
model,
messages,
tools,
})
if (gptTokens.usedTokens !== openaiUsage.prompt_tokens)
throw new Error(`Test function calling promptUsedTokens failed (openai: ${openaiUsage.prompt_tokens}/ gpt-tokens: ${gptTokens.usedTokens})`)
const responseMessage = response.choices[0].message
// Step 2: check if the model wanted to call a function
const toolCalls = responseMessage.tool_calls
if (responseMessage.tool_calls) {
// Step 3: call the function
// Note: the JSON response may not always be valid; be sure to handle errors
const availableFunctions = {
get_product_price: getProductPrice,
} // only one function in this example, but you can have multiple
messages.push(responseMessage) // extend conversation with assistant's reply
for (const toolCall of toolCalls) {
const functionName = toolCall.function.name
const functionToCall = availableFunctions[functionName]
const functionArgs = JSON.parse(toolCall.function.arguments)
const functionResponse = functionToCall(
functionArgs.store,
functionArgs.product,
functionArgs.unit,
)
messages.push({
tool_call_id: toolCall.id,
role : 'tool',
name : functionName,
content : functionResponse,
}) // extend conversation with function response
}
const secondResponse = await openai.chat.completions.create({
model : 'gpt-3.5-turbo-1106',
messages: messages,
}) // get a new response from the model where it can see the function response
return secondResponse.choices
}
}
await runConversation()
}
}
async function testFineTune() {
console.info('Testing fine-tune...')
const messages = [{ role: 'system', content: 'You are a helpful assistant.' }]
const completion = await openai.chat.completions.create({
messages,
model: fineTuneModel,
})
const { usage: openaiUsage } = completion
const gptTokens = new GPTTokens({
fineTuneModel,
messages,
})
if (gptTokens.usedTokens !== openaiUsage.prompt_tokens)
throw new Error(`Test fine-tune promptUsedTokens failed (openai: ${openaiUsage.prompt_tokens}/ gpt-tokens: ${gptTokens.usedTokens})`)
console.info('Pass!')
}
async function start() {
await testBasic('How are u')
await testFunctionCalling()
await testFineTune()
testTraining('./fine-tuning-data.jsonl')
testPerformance([
{
role : 'user',
content: 'Hello world',
},
])
}
start().then()