Skip to content

Commit

Permalink
🐛 fix: fix o1 series calling issue (lobehub#5714)
Browse files Browse the repository at this point in the history
  • Loading branch information
hezhijie0327 authored Feb 4, 2025
1 parent cc08844 commit d74653e
Show file tree
Hide file tree
Showing 2 changed files with 33 additions and 25 deletions.
4 changes: 2 additions & 2 deletions src/libs/agent-runtime/github/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@ import { LOBE_DEFAULT_MODEL_LIST } from '@/config/modelProviders';
import type { ChatModelCard } from '@/types/llm';

import { AgentRuntimeErrorType } from '../error';
import { pruneReasoningPayload, reasoningModels } from '../openai';
import { pruneReasoningPayload } from '../openai';
import { ModelProvider } from '../types';
import {
CHAT_MODELS_BLOCK_LIST,
Expand Down Expand Up @@ -37,7 +37,7 @@ export const LobeGithubAI = LobeOpenAICompatibleFactory({
handlePayload: (payload) => {
const { model } = payload;

if (reasoningModels.has(model)) {
if (model.startsWith('o1') || model.startsWith('o3')) {
return { ...pruneReasoningPayload(payload), stream: false } as any;
}

Expand Down
54 changes: 31 additions & 23 deletions src/libs/agent-runtime/openai/index.ts
Original file line number Diff line number Diff line change
@@ -1,37 +1,45 @@
import { ChatStreamPayload, ModelProvider, OpenAIChatMessage } from '../types';
import { LobeOpenAICompatibleFactory } from '../utils/openaiCompatibleFactory';

// TODO: 临时写法,后续要重构成 model card 展示配置
export const reasoningModels = new Set([
'o1-preview',
'o1-preview-2024-09-12',
'o1-mini',
'o1-mini-2024-09-12',
'o1',
'o1-2024-12-17',
'o3-mini',
'o3-mini-2025-01-31',
]);
export const pruneReasoningPayload = (payload: ChatStreamPayload) => {
// TODO: 临时写法,后续要重构成 model card 展示配置
const disableStreamModels = new Set([
'o1',
'o1-2024-12-17'
]);
const systemToUserModels = new Set([
'o1-preview',
'o1-preview-2024-09-12',
'o1-mini',
'o1-mini-2024-09-12',
]);

export const pruneReasoningPayload = (payload: ChatStreamPayload) => ({
...payload,
frequency_penalty: 0,
messages: payload.messages.map((message: OpenAIChatMessage) => ({
...message,
role: message.role === 'system' ? 'developer' : message.role,
})),
presence_penalty: 0,
temperature: 1,
top_p: 1,
});
return {
...payload,
frequency_penalty: 0,
messages: payload.messages.map((message: OpenAIChatMessage) => ({
...message,
role:
message.role === 'system'
? systemToUserModels.has(payload.model)
? 'user'
: 'developer'
: message.role,
})),
presence_penalty: 0,
stream: !disableStreamModels.has(payload.model),
temperature: 1,
top_p: 1,
};
};

export const LobeOpenAI = LobeOpenAICompatibleFactory({
baseURL: 'https://api.openai.com/v1',
chatCompletion: {
handlePayload: (payload) => {
const { model } = payload;

if (reasoningModels.has(model)) {
if (model.startsWith('o1') || model.startsWith('o3')) {
return pruneReasoningPayload(payload) as any;
}

Expand Down

0 comments on commit d74653e

Please sign in to comment.