Skip to content

Commit

Permalink
Include call options in invocation params (sent to tracer) (langchain…
Browse files Browse the repository at this point in the history
…-ai#1746)

* Include call options in invocation params (sent to tracer)

* Delete example
  • Loading branch information
nfcampos authored Jun 26, 2023
1 parent 7faa623 commit 827f8e4
Show file tree
Hide file tree
Showing 6 changed files with 38 additions and 40 deletions.
15 changes: 8 additions & 7 deletions langchain/src/chat_models/anthropic.ts
Original file line number Diff line number Diff line change
Expand Up @@ -178,13 +178,18 @@ export class ChatAnthropic extends BaseChatModel implements AnthropicInput {
/**
* Get the parameters used to invoke the model
*/
invocationParams(): Omit<SamplingParameters, "prompt"> & Kwargs {
invocationParams(
options?: this["ParsedCallOptions"]
): Omit<SamplingParameters, "prompt"> & Kwargs {
return {
model: this.modelName,
temperature: this.temperature,
top_k: this.topK,
top_p: this.topP,
stop_sequences: this.stopSequences ?? DEFAULT_STOP_SEQUENCES,
stop_sequences:
options?.stop?.concat(DEFAULT_STOP_SEQUENCES) ??
this.stopSequences ??
DEFAULT_STOP_SEQUENCES,
max_tokens_to_sample: this.maxTokensToSample,
stream: this.streaming,
...this.invocationKwargs,
Expand Down Expand Up @@ -234,11 +239,7 @@ export class ChatAnthropic extends BaseChatModel implements AnthropicInput {
);
}

const params = this.invocationParams();
params.stop_sequences = options.stop
? options.stop.concat(DEFAULT_STOP_SEQUENCES)
: params.stop_sequences;

const params = this.invocationParams(options);
const response = await this.completionWithRetry(
{
...params,
Expand Down
4 changes: 2 additions & 2 deletions langchain/src/chat_models/base.ts
Original file line number Diff line number Diff line change
Expand Up @@ -78,7 +78,7 @@ export abstract class BaseChatModel extends BaseLanguageModel {
);
const extra = {
options: parsedOptions,
invocation_params: this?.invocationParams(),
invocation_params: this?.invocationParams(parsedOptions),
};
const runManager = await callbackManager_?.handleChatModelStart(
this.toJSON(),
Expand Down Expand Up @@ -122,7 +122,7 @@ export abstract class BaseChatModel extends BaseLanguageModel {
* Get the parameters used to invoke the model
*/
// eslint-disable-next-line @typescript-eslint/no-explicit-any
invocationParams(): any {
invocationParams(_options?: this["ParsedCallOptions"]): any {
return {};
}

Expand Down
25 changes: 12 additions & 13 deletions langchain/src/chat_models/openai.ts
Original file line number Diff line number Diff line change
Expand Up @@ -249,7 +249,9 @@ export class ChatOpenAI
/**
* Get the parameters used to invoke the model
*/
invocationParams(): Omit<CreateChatCompletionRequest, "messages"> {
invocationParams(
options?: this["ParsedCallOptions"]
): Omit<CreateChatCompletionRequest, "messages"> {
return {
model: this.modelName,
temperature: this.temperature,
Expand All @@ -259,8 +261,14 @@ export class ChatOpenAI
max_tokens: this.maxTokens === -1 ? undefined : this.maxTokens,
n: this.n,
logit_bias: this.logitBias,
stop: this.stop,
stop: options?.stop ?? this.stop,
stream: this.streaming,
functions:
options?.functions ??
(options?.tools
? options?.tools.map(formatToOpenAIFunction)
: undefined),
function_call: options?.function_call,
...this.modelKwargs,
};
}
Expand All @@ -284,20 +292,11 @@ export class ChatOpenAI
/** @ignore */
async _generate(
messages: BaseChatMessage[],
options?: this["ParsedCallOptions"],
options: this["ParsedCallOptions"],
runManager?: CallbackManagerForLLMRun
): Promise<ChatResult> {
const tokenUsage: TokenUsage = {};
if (this.stop && options?.stop) {
throw new Error("Stop found in input and default params");
}

const params = this.invocationParams();
params.stop = options?.stop ?? params.stop;
params.functions =
options?.functions ??
(options?.tools ? options?.tools.map(formatToOpenAIFunction) : undefined);
params.function_call = options?.function_call;
const params = this.invocationParams(options);
const messagesMapped: ChatCompletionRequestMessage[] = messages.map(
(message) => ({
role: messageTypeToOpenAIRole(message._getType()),
Expand Down
9 changes: 6 additions & 3 deletions langchain/src/llms/base.ts
Original file line number Diff line number Diff line change
Expand Up @@ -83,14 +83,14 @@ export abstract class BaseLLM extends BaseLanguageModel {
* Get the parameters used to invoke the model
*/
// eslint-disable-next-line @typescript-eslint/no-explicit-any
invocationParams(): any {
invocationParams(_options?: this["ParsedCallOptions"]): any {
return {};
}

/** @ignore */
async _generateUncached(
prompts: string[],
options: this["CallOptions"],
options: this["ParsedCallOptions"],
callbacks?: Callbacks
): Promise<LLMResult> {
const callbackManager_ = await CallbackManager.configure(
Expand All @@ -100,7 +100,10 @@ export abstract class BaseLLM extends BaseLanguageModel {
this.tags,
{ verbose: this.verbose }
);
const extra = { options, invocation_params: this?.invocationParams() };
const extra = {
options,
invocation_params: this?.invocationParams(options),
};
const runManager = await callbackManager_?.handleLLMStart(
this.toJSON(),
prompts,
Expand Down
11 changes: 5 additions & 6 deletions langchain/src/llms/openai-chat.ts
Original file line number Diff line number Diff line change
Expand Up @@ -192,7 +192,9 @@ export class OpenAIChat
/**
* Get the parameters used to invoke the model
*/
invocationParams(): Omit<CreateChatCompletionRequest, "messages"> {
invocationParams(
options?: this["ParsedCallOptions"]
): Omit<CreateChatCompletionRequest, "messages"> {
return {
model: this.modelName,
temperature: this.temperature,
Expand All @@ -202,7 +204,7 @@ export class OpenAIChat
n: this.n,
logit_bias: this.logitBias,
max_tokens: this.maxTokens === -1 ? undefined : this.maxTokens,
stop: this.stop,
stop: options?.stop ?? this.stop,
stream: this.streaming,
...this.modelKwargs,
};
Expand Down Expand Up @@ -242,10 +244,7 @@ export class OpenAIChat
options: this["ParsedCallOptions"],
runManager?: CallbackManagerForLLMRun
): Promise<string> {
const { stop } = options;

const params = this.invocationParams();
params.stop = stop ?? params.stop;
const params = this.invocationParams(options);

const data = params.stream
? await new Promise<CreateChatCompletionResponse>((resolve, reject) => {
Expand Down
14 changes: 5 additions & 9 deletions langchain/src/llms/openai.ts
Original file line number Diff line number Diff line change
Expand Up @@ -209,7 +209,9 @@ export class OpenAI extends BaseLLM implements OpenAIInput, AzureOpenAIInput {
/**
* Get the parameters used to invoke the model
*/
invocationParams(): CreateCompletionRequest {
invocationParams(
options?: this["ParsedCallOptions"]
): CreateCompletionRequest {
return {
model: this.modelName,
temperature: this.temperature,
Expand All @@ -220,7 +222,7 @@ export class OpenAI extends BaseLLM implements OpenAIInput, AzureOpenAIInput {
n: this.n,
best_of: this.bestOf,
logit_bias: this.logitBias,
stop: this.stop,
stop: options?.stop ?? this.stop,
stream: this.streaming,
...this.modelKwargs,
};
Expand Down Expand Up @@ -262,17 +264,11 @@ export class OpenAI extends BaseLLM implements OpenAIInput, AzureOpenAIInput {
options: this["ParsedCallOptions"],
runManager?: CallbackManagerForLLMRun
): Promise<LLMResult> {
const { stop } = options;
const subPrompts = chunkArray(prompts, this.batchSize);
const choices: CreateCompletionResponseChoicesInner[] = [];
const tokenUsage: TokenUsage = {};

if (this.stop && stop) {
throw new Error("Stop found in input and default params");
}

const params = this.invocationParams();
params.stop = stop ?? params.stop;
const params = this.invocationParams(options);

if (params.max_tokens === -1) {
if (prompts.length !== 1) {
Expand Down

0 comments on commit 827f8e4

Please sign in to comment.