Skip to content

Commit

Permalink
Update gpt-3.5-turbo
Browse files Browse the repository at this point in the history
  • Loading branch information
lvwzhen committed Apr 21, 2023
1 parent 438dfe9 commit abaf556
Show file tree
Hide file tree
Showing 3 changed files with 753 additions and 715 deletions.
6 changes: 3 additions & 3 deletions components/SearchDialog.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ import {
} from '@/components/ui/dialog'
import { Input } from '@/components/ui/input'
import { SSE } from 'sse.js'
import type { CreateCompletionResponse } from 'openai'
import type { CreateChatCompletionResponse } from 'openai'
import { X, Loader, User, Frown, CornerDownLeft, Search, Wand } from 'lucide-react'

function promptDataReducer(
Expand Down Expand Up @@ -133,8 +133,8 @@ export function SearchDialog() {
return
}

const completionResponse: CreateCompletionResponse = JSON.parse(e.data)
const text = completionResponse.choices[0].text
const completionResponse: CreateChatCompletionResponse = JSON.parse(e.data)
const text = completionResponse.choices[0].message

setAnswer((answer) => {
const currentAnswer = answer ?? ''
Expand Down
8 changes: 4 additions & 4 deletions pages/api/vector-search.ts
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@ import type { NextRequest } from 'next/server'
import { createClient } from '@supabase/supabase-js'
import { codeBlock, oneLine } from 'common-tags'
import GPT3Tokenizer from 'gpt3-tokenizer'
import { CreateCompletionRequest } from 'openai'
import { CreateChatCompletionRequest } from 'openai'
import { ApplicationError, UserError } from '@/lib/errors'

// OpenAIApi does currently not work in Vercel Edge Functions as it uses Axios under the hood.
Expand Down Expand Up @@ -133,9 +133,9 @@ export default async function handler(req: NextRequest) {
Answer:
`

const completionOptions: CreateCompletionRequest = {
model: 'text-davinci-003',
prompt,
const completionOptions: CreateChatCompletionRequest = {
model: "gpt-3.5-turbo",
messages: [{ role: "user", content: prompt }],
max_tokens: 256,
temperature: 0,
stream: true,
Expand Down
Loading

0 comments on commit abaf556

Please sign in to comment.