Skip to content

Commit

Permalink
Move OpenAI tests and add one for JSON mode caching (langchain-ai#3754)
Browse files Browse the repository at this point in the history
  • Loading branch information
jacoblee93 authored Dec 22, 2023
1 parent b05bba3 commit e37a3df
Show file tree
Hide file tree
Showing 8 changed files with 128 additions and 24 deletions.
1 change: 1 addition & 0 deletions libs/langchain-openai/package.json
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,7 @@
"test": "yarn run build:deps && NODE_OPTIONS=--experimental-vm-modules jest --testPathIgnorePatterns=\\.int\\.test.ts --testTimeout 30000 --maxWorkers=50%",
"test:watch": "yarn run build:deps && NODE_OPTIONS=--experimental-vm-modules jest --watch --testPathIgnorePatterns=\\.int\\.test.ts",
"test:single": "yarn run build:deps && NODE_OPTIONS=--experimental-vm-modules yarn run jest --config jest.config.cjs --testTimeout 100000",
"test:int": "NODE_OPTIONS=--experimental-vm-modules jest --testPathPattern=\\.int\\.test.ts --testTimeout 100000 --maxWorkers=50%",
"format": "prettier --write \"src\"",
"format:check": "prettier --check \"src\""
},
Expand Down
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
import { test, expect } from "@jest/globals";
import { ChatOpenAI } from "../openai.js";
import { HumanMessage, ToolMessage } from "../../schema/index.js";
import { test, expect, jest } from "@jest/globals";
import { HumanMessage, ToolMessage } from "@langchain/core/messages";
import { InMemoryCache } from "@langchain/core/caches";
import { ChatOpenAI } from "../chat_models.js";

test("Test ChatOpenAI JSON mode", async () => {
const chat = new ChatOpenAI({
Expand Down Expand Up @@ -174,3 +175,34 @@ test("Test ChatOpenAI tool calling with streaming", async () => {
console.log(finalChunk?.additional_kwargs.tool_calls);
expect(finalChunk?.additional_kwargs.tool_calls?.length).toBeGreaterThan(1);
});

test("ChatOpenAI in JSON mode can cache generations", async () => {
const memoryCache = new InMemoryCache();
const lookupSpy = jest.spyOn(memoryCache, "lookup");
const updateSpy = jest.spyOn(memoryCache, "update");
const chat = new ChatOpenAI({
modelName: "gpt-3.5-turbo-1106",
temperature: 1,
cache: memoryCache,
}).bind({
response_format: {
type: "json_object",
},
});
const message = new HumanMessage(
"Respond with a JSON object containing arbitrary fields."
);
const res = await chat.invoke([message]);
console.log(res);

const res2 = await chat.invoke([message]);
console.log(res2);

expect(res).toEqual(res2);

expect(lookupSpy).toHaveBeenCalledTimes(2);
expect(updateSpy).toHaveBeenCalledTimes(1);

lookupSpy.mockRestore();
updateSpy.mockRestore();
});
Original file line number Diff line number Diff line change
@@ -1,9 +1,9 @@
import { test } from "@jest/globals";
import { HumanMessage } from "@langchain/core/messages";
import * as fs from "node:fs/promises";
import { fileURLToPath } from "node:url";
import * as path from "node:path";
import { ChatOpenAI } from "../openai.js";
import { HumanMessage } from "../../schema/index.js";
import { ChatOpenAI } from "../chat_models.js";

test("Test ChatOpenAI with a file", async () => {
const __filename = fileURLToPath(import.meta.url);
Expand Down
Original file line number Diff line number Diff line change
@@ -1,23 +1,22 @@
import { test, jest, expect } from "@jest/globals";
import { ChatOpenAI } from "../openai.js";
import {
BaseMessage,
ChatMessage,
ChatGeneration,
HumanMessage,
LLMResult,
SystemMessage,
} from "../../schema/index.js";
import { ChatPromptValue } from "../../prompts/chat.js";
} from "@langchain/core/messages";
import { ChatGeneration, LLMResult } from "@langchain/core/outputs";
import { ChatPromptValue } from "@langchain/core/prompt_values";
import {
PromptTemplate,
ChatPromptTemplate,
HumanMessagePromptTemplate,
SystemMessagePromptTemplate,
} from "../../prompts/index.js";
import { CallbackManager } from "../../callbacks/index.js";
import { NewTokenIndices } from "../../callbacks/base.js";
import { InMemoryCache } from "../../cache/index.js";
} from "@langchain/core/prompts";
import { CallbackManager } from "@langchain/core/callbacks/manager";
import { NewTokenIndices } from "@langchain/core/callbacks/base";
import { InMemoryCache } from "@langchain/core/caches";
import { ChatOpenAI } from "../chat_models.js";

test("Test ChatOpenAI", async () => {
const chat = new ChatOpenAI({ modelName: "gpt-3.5-turbo", maxTokens: 10 });
Expand Down Expand Up @@ -360,11 +359,14 @@ test("Test ChatOpenAI stream method", async () => {

test("Test ChatOpenAI stream method with abort", async () => {
await expect(async () => {
const model = new ChatOpenAI({ maxTokens: 50, modelName: "gpt-3.5-turbo" });
const model = new ChatOpenAI({
maxTokens: 100,
modelName: "gpt-3.5-turbo",
});
const stream = await model.stream(
"How is your day going? Be extremely verbose.",
{
signal: AbortSignal.timeout(1000),
signal: AbortSignal.timeout(500),
}
);
for await (const chunk of stream) {
Expand Down
Binary file added libs/langchain-openai/src/tests/data/hotdog.jpg
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
69 changes: 69 additions & 0 deletions libs/langchain-openai/src/tests/embeddings.int.test
Original file line number Diff line number Diff line change
@@ -0,0 +1,69 @@
import { test, expect } from "@jest/globals";
import { OpenAIEmbeddings } from "../embeddings.js";

test("Test OpenAIEmbeddings.embedQuery", async () => {
const embeddings = new OpenAIEmbeddings();
const res = await embeddings.embedQuery("Hello world");
expect(typeof res[0]).toBe("number");
});

test("Test OpenAIEmbeddings.embedDocuments", async () => {
const embeddings = new OpenAIEmbeddings();
const res = await embeddings.embedDocuments(["Hello world", "Bye bye"]);
expect(res).toHaveLength(2);
expect(typeof res[0][0]).toBe("number");
expect(typeof res[1][0]).toBe("number");
});

test("Test OpenAIEmbeddings concurrency", async () => {
const embeddings = new OpenAIEmbeddings({
batchSize: 1,
maxConcurrency: 2,
});
const res = await embeddings.embedDocuments([
"Hello world",
"Bye bye",
"Hello world",
"Bye bye",
"Hello world",
"Bye bye",
]);
expect(res).toHaveLength(6);
expect(res.find((embedding) => typeof embedding[0] !== "number")).toBe(
undefined
);
});

test("Test timeout error thrown from SDK", async () => {
await expect(async () => {
const model = new OpenAIEmbeddings({
timeout: 1,
});
await model.embedDocuments([
"Hello world",
"Bye bye",
"Hello world",
"Bye bye",
"Hello world",
"Bye bye",
]);
}).rejects.toThrow();
});

test("Test OpenAI embeddings with an invalid org throws", async () => {
await expect(async () => {
const model = new OpenAIEmbeddings({
configuration: {
organization: "NOT_REAL",
},
});
await model.embedDocuments([
"Hello world",
"Bye bye",
"Hello world",
"Bye bye",
"Hello world",
"Bye bye",
]);
}).rejects.toThrow();
});
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
import { expect, test } from "@jest/globals";
import { OpenAIChat } from "../openai-chat.js";
import { CallbackManager } from "../../callbacks/index.js";
import { CallbackManager } from "@langchain/core/callbacks/manager";
import { OpenAIChat } from "../legacy.js";

test("Test OpenAI", async () => {
const model = new OpenAIChat({ modelName: "gpt-3.5-turbo", maxTokens: 10 });
Expand Down
Original file line number Diff line number Diff line change
@@ -1,10 +1,10 @@
import { test, expect } from "@jest/globals";
import { LLMResult } from "../../schema/index.js";
import { OpenAIChat } from "../openai-chat.js";
import { OpenAI } from "../openai.js";
import { StringPromptValue } from "../../prompts/index.js";
import { CallbackManager } from "../../callbacks/index.js";
import { NewTokenIndices } from "../../callbacks/base.js";
import { LLMResult } from "@langchain/core/outputs";
import { StringPromptValue } from "@langchain/core/prompt_values";
import { CallbackManager } from "@langchain/core/callbacks/manager";
import { NewTokenIndices } from "@langchain/core/callbacks/base";
import { OpenAIChat } from "../legacy.js";
import { OpenAI } from "../llms.js";

test("Test OpenAI", async () => {
const model = new OpenAI({
Expand Down

0 comments on commit e37a3df

Please sign in to comment.