Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[pull] master from sashabaranov:master #40

Open
wants to merge 39 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from 1 commit
Commits
Show all changes
39 commits
Select commit Hold shift + click to select a range
dd7f582
fix: fullURL endpoint generation (#817)
eiixy Aug 16, 2024
d86425a
Allow structured outputs via function calling (#828)
greysteil Aug 16, 2024
6d02119
feat: Support Delete Message API (#799)
kappa-lab Aug 22, 2024
5162adb
Support http client middlewareing (#830)
michurin Aug 23, 2024
a3bd256
Improve handling of JSON Schema in OpenAI API Response Context (#819)
eiixy Aug 24, 2024
030b7cb
fix integration tests (#834)
sashabaranov Aug 24, 2024
c37cf9a
Dynamic model (#838)
mathisen99 Sep 1, 2024
643da8d
depricated model GPT3Ada changed to GPT3Babbage002 (#843)
Arundas666 Sep 4, 2024
194a03e
Add refusal (#844)
qhenkart Sep 11, 2024
a5fb553
Support OpenAI reasoning models (#850)
abatilo Sep 17, 2024
1ec8c24
fix: jsonschema integer validation (#852)
WeiAnAn Sep 20, 2024
9add1c3
add max_completions_tokens for o1 series models (#857)
chococola Sep 20, 2024
9a4f3a7
feat: add ParallelToolCalls to RunRequest (#847)
kenshin54 Sep 20, 2024
e095df5
run_id string Optional (#855)
floodwm Sep 20, 2024
38bdc81
Optimize Client Error Return (#856)
eiixy Sep 26, 2024
7f80303
Fix max_completion_tokens (#860)
alexsacr Sep 26, 2024
e9d8485
fix: ParallelToolCalls should be added to RunRequest (#861)
kenshin54 Sep 26, 2024
fdd59d9
feat: usage struct add CompletionTokensDetails (#863)
liushuangls Sep 26, 2024
bac7d59
fix MaxCompletionTokens typo (#862)
l-winston Oct 3, 2024
7c145eb
add jailbreak filter result, add ContentFilterResults on output (#864)
juliomartinsdev Oct 3, 2024
9913264
Completion API: add new params (#870)
isaacseymour Oct 9, 2024
cfe15ff
return response body as byte slice for RequestError type (#873)
AyushSawant18588 Oct 14, 2024
21f7134
Adding new moderation model constants (#875)
Mhjacobs Oct 14, 2024
b162541
Cleanup (#879)
sashabaranov Oct 15, 2024
9fe2c6c
Completion API: add Store and Metadata parameters (#878)
smackcrane Oct 15, 2024
fb15ff9
Handling for non-json response (#881)
AyushSawant18588 Oct 21, 2024
3672c0d
fix: Updated Assistent struct with latest fields based on OpenAI docs…
ecoralic Oct 21, 2024
6e08732
Updated checkPromptType function to handle prompt list in completions…
AyushSawant18588 Oct 25, 2024
d10f1b8
add chatcompletion stream delta refusal and logprobs (#882)
Yu0u Oct 29, 2024
f5e6e0e
Added Vector Store File List properties that allow for pagination (#891)
MattDavisRV Nov 8, 2024
6d066bb
Support Attachments in MessageRequest (#890)
kodernubie Nov 8, 2024
b3ece4d
Updated client_test to solve lint error (#900)
AyushSawant18588 Nov 19, 2024
1687616
o1 model support stream (#904)
ldnvnbl Nov 20, 2024
74ed75f
Make user field optional in embedding request (#899)
nagar-ajay Nov 20, 2024
21fa42c
feat: add gpt-4o-2024-11-20 model (#905)
liushuangls Nov 30, 2024
c203ca0
feat: add RecvRaw (#896)
WqyJh Nov 30, 2024
af5355f
Fix ID field to be optional (#911)
TimMisiak Dec 8, 2024
56a9acf
Ignore test.mp3 (#913)
sashabaranov Dec 8, 2024
2a0ff5a
Added additional_messages (#914)
sabuhigr Dec 27, 2024
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Prev Previous commit
Next Next commit
Updated checkPromptType function to handle prompt list in completions (
…sashabaranov#885)

* updated checkPromptType function to handle prompt list in completions

* removed generated test file

* added corresponding unit testcases

* Updated to use less nesting with early returns
  • Loading branch information
AyushSawant18588 authored Oct 25, 2024
commit 6e087322b77693e6e9227d9950a0c8d8a10a8d1a
18 changes: 17 additions & 1 deletion completion.go
Original file line number Diff line number Diff line change
Expand Up @@ -161,7 +161,23 @@ func checkEndpointSupportsModel(endpoint, model string) bool {
func checkPromptType(prompt any) bool {
_, isString := prompt.(string)
_, isStringSlice := prompt.([]string)
return isString || isStringSlice
if isString || isStringSlice {
return true
}

// check if it is prompt is []string hidden under []any
slice, isSlice := prompt.([]any)
if !isSlice {
return false
}

for _, item := range slice {
_, itemIsString := item.(string)
if !itemIsString {
return false
}
}
return true // all items in the slice are string, so it is []string
}

var unsupportedToolsForO1Models = map[ToolType]struct{}{
Expand Down
78 changes: 68 additions & 10 deletions completion_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -59,6 +59,38 @@ func TestCompletions(t *testing.T) {
checks.NoError(t, err, "CreateCompletion error")
}

// TestMultiplePromptsCompletionsWrong Tests the completions endpoint of the API using the mocked server
// where the completions requests has a list of prompts with wrong type.
func TestMultiplePromptsCompletionsWrong(t *testing.T) {
client, server, teardown := setupOpenAITestServer()
defer teardown()
server.RegisterHandler("/v1/completions", handleCompletionEndpoint)
req := openai.CompletionRequest{
MaxTokens: 5,
Model: "ada",
Prompt: []interface{}{"Lorem ipsum", 9},
}
_, err := client.CreateCompletion(context.Background(), req)
if !errors.Is(err, openai.ErrCompletionRequestPromptTypeNotSupported) {
t.Fatalf("CreateCompletion should return ErrCompletionRequestPromptTypeNotSupported, but returned: %v", err)
}
}

// TestMultiplePromptsCompletions Tests the completions endpoint of the API using the mocked server
// where the completions requests has a list of prompts.
func TestMultiplePromptsCompletions(t *testing.T) {
client, server, teardown := setupOpenAITestServer()
defer teardown()
server.RegisterHandler("/v1/completions", handleCompletionEndpoint)
req := openai.CompletionRequest{
MaxTokens: 5,
Model: "ada",
Prompt: []interface{}{"Lorem ipsum", "Lorem ipsum"},
}
_, err := client.CreateCompletion(context.Background(), req)
checks.NoError(t, err, "CreateCompletion error")
}

// handleCompletionEndpoint Handles the completion endpoint by the test server.
func handleCompletionEndpoint(w http.ResponseWriter, r *http.Request) {
var err error
Expand Down Expand Up @@ -87,24 +119,50 @@ func handleCompletionEndpoint(w http.ResponseWriter, r *http.Request) {
if n == 0 {
n = 1
}
// Handle different types of prompts: single string or list of strings
prompts := []string{}
switch v := completionReq.Prompt.(type) {
case string:
prompts = append(prompts, v)
case []interface{}:
for _, item := range v {
if str, ok := item.(string); ok {
prompts = append(prompts, str)
}
}
default:
http.Error(w, "Invalid prompt type", http.StatusBadRequest)
return
}

for i := 0; i < n; i++ {
// generate a random string of length completionReq.Length
completionStr := strings.Repeat("a", completionReq.MaxTokens)
if completionReq.Echo {
completionStr = completionReq.Prompt.(string) + completionStr
for _, prompt := range prompts {
// Generate a random string of length completionReq.MaxTokens
completionStr := strings.Repeat("a", completionReq.MaxTokens)
if completionReq.Echo {
completionStr = prompt + completionStr
}

res.Choices = append(res.Choices, openai.CompletionChoice{
Text: completionStr,
Index: len(res.Choices),
})
}
res.Choices = append(res.Choices, openai.CompletionChoice{
Text: completionStr,
Index: i,
})
}
inputTokens := numTokens(completionReq.Prompt.(string)) * n
completionTokens := completionReq.MaxTokens * n

inputTokens := 0
for _, prompt := range prompts {
inputTokens += numTokens(prompt)
}
inputTokens *= n
completionTokens := completionReq.MaxTokens * len(prompts) * n
res.Usage = openai.Usage{
PromptTokens: inputTokens,
CompletionTokens: completionTokens,
TotalTokens: inputTokens + completionTokens,
}

// Serialize the response and send it back
resBytes, _ = json.Marshal(res)
fmt.Fprintln(w, string(resBytes))
}
Expand Down