diff --git a/libs/langchain-openai/package.json b/libs/langchain-openai/package.json index 5bd7b8b1bd17..986b8f13212d 100644 --- a/libs/langchain-openai/package.json +++ b/libs/langchain-openai/package.json @@ -28,6 +28,7 @@ "test": "yarn run build:deps && NODE_OPTIONS=--experimental-vm-modules jest --testPathIgnorePatterns=\\.int\\.test.ts --testTimeout 30000 --maxWorkers=50%", "test:watch": "yarn run build:deps && NODE_OPTIONS=--experimental-vm-modules jest --watch --testPathIgnorePatterns=\\.int\\.test.ts", "test:single": "yarn run build:deps && NODE_OPTIONS=--experimental-vm-modules yarn run jest --config jest.config.cjs --testTimeout 100000", + "test:int": "NODE_OPTIONS=--experimental-vm-modules jest --testPathPattern=\\.int\\.test.ts --testTimeout 100000 --maxWorkers=50%", "format": "prettier --write \"src\"", "format:check": "prettier --check \"src\"" }, diff --git a/langchain/src/chat_models/tests/chatopenai-extended.int.test.ts b/libs/langchain-openai/src/tests/chat_models-extended.int.test.ts similarity index 82% rename from langchain/src/chat_models/tests/chatopenai-extended.int.test.ts rename to libs/langchain-openai/src/tests/chat_models-extended.int.test.ts index 995da52a22f1..78c9f807b622 100644 --- a/langchain/src/chat_models/tests/chatopenai-extended.int.test.ts +++ b/libs/langchain-openai/src/tests/chat_models-extended.int.test.ts @@ -1,6 +1,7 @@ -import { test, expect } from "@jest/globals"; -import { ChatOpenAI } from "../openai.js"; -import { HumanMessage, ToolMessage } from "../../schema/index.js"; +import { test, expect, jest } from "@jest/globals"; +import { HumanMessage, ToolMessage } from "@langchain/core/messages"; +import { InMemoryCache } from "@langchain/core/caches"; +import { ChatOpenAI } from "../chat_models.js"; test("Test ChatOpenAI JSON mode", async () => { const chat = new ChatOpenAI({ @@ -174,3 +175,34 @@ test("Test ChatOpenAI tool calling with streaming", async () => { console.log(finalChunk?.additional_kwargs.tool_calls); expect(finalChunk?.additional_kwargs.tool_calls?.length).toBeGreaterThan(1); }); + +test("ChatOpenAI in JSON mode can cache generations", async () => { + const memoryCache = new InMemoryCache(); + const lookupSpy = jest.spyOn(memoryCache, "lookup"); + const updateSpy = jest.spyOn(memoryCache, "update"); + const chat = new ChatOpenAI({ + modelName: "gpt-3.5-turbo-1106", + temperature: 1, + cache: memoryCache, + }).bind({ + response_format: { + type: "json_object", + }, + }); + const message = new HumanMessage( + "Respond with a JSON object containing arbitrary fields." + ); + const res = await chat.invoke([message]); + console.log(res); + + const res2 = await chat.invoke([message]); + console.log(res2); + + expect(res).toEqual(res2); + + expect(lookupSpy).toHaveBeenCalledTimes(2); + expect(updateSpy).toHaveBeenCalledTimes(1); + + lookupSpy.mockRestore(); + updateSpy.mockRestore(); +}); diff --git a/langchain/src/chat_models/tests/chatopenai-vision.int.test.ts b/libs/langchain-openai/src/tests/chat_models-vision.int.test.ts similarity index 92% rename from langchain/src/chat_models/tests/chatopenai-vision.int.test.ts rename to libs/langchain-openai/src/tests/chat_models-vision.int.test.ts index 56dc7c381d25..94fa4c1cc998 100644 --- a/langchain/src/chat_models/tests/chatopenai-vision.int.test.ts +++ b/libs/langchain-openai/src/tests/chat_models-vision.int.test.ts @@ -1,9 +1,9 @@ import { test } from "@jest/globals"; +import { HumanMessage } from "@langchain/core/messages"; import * as fs from "node:fs/promises"; import { fileURLToPath } from "node:url"; import * as path from "node:path"; -import { ChatOpenAI } from "../openai.js"; -import { HumanMessage } from "../../schema/index.js"; +import { ChatOpenAI } from "../chat_models.js"; test("Test ChatOpenAI with a file", async () => { const __filename = fileURLToPath(import.meta.url); diff --git a/langchain/src/chat_models/tests/chatopenai.int.test.ts b/libs/langchain-openai/src/tests/chat_models.int.test.ts similarity index 97% rename from langchain/src/chat_models/tests/chatopenai.int.test.ts rename to libs/langchain-openai/src/tests/chat_models.int.test.ts index 5d712f2b3bb2..0ce5b095ae63 100644 --- a/langchain/src/chat_models/tests/chatopenai.int.test.ts +++ b/libs/langchain-openai/src/tests/chat_models.int.test.ts @@ -1,23 +1,22 @@ import { test, jest, expect } from "@jest/globals"; -import { ChatOpenAI } from "../openai.js"; import { BaseMessage, ChatMessage, - ChatGeneration, HumanMessage, - LLMResult, SystemMessage, -} from "../../schema/index.js"; -import { ChatPromptValue } from "../../prompts/chat.js"; +} from "@langchain/core/messages"; +import { ChatGeneration, LLMResult } from "@langchain/core/outputs"; +import { ChatPromptValue } from "@langchain/core/prompt_values"; import { PromptTemplate, ChatPromptTemplate, HumanMessagePromptTemplate, SystemMessagePromptTemplate, -} from "../../prompts/index.js"; -import { CallbackManager } from "../../callbacks/index.js"; -import { NewTokenIndices } from "../../callbacks/base.js"; -import { InMemoryCache } from "../../cache/index.js"; +} from "@langchain/core/prompts"; +import { CallbackManager } from "@langchain/core/callbacks/manager"; +import { NewTokenIndices } from "@langchain/core/callbacks/base"; +import { InMemoryCache } from "@langchain/core/caches"; +import { ChatOpenAI } from "../chat_models.js"; test("Test ChatOpenAI", async () => { const chat = new ChatOpenAI({ modelName: "gpt-3.5-turbo", maxTokens: 10 }); @@ -360,11 +359,14 @@ test("Test ChatOpenAI stream method", async () => { test("Test ChatOpenAI stream method with abort", async () => { await expect(async () => { - const model = new ChatOpenAI({ maxTokens: 50, modelName: "gpt-3.5-turbo" }); + const model = new ChatOpenAI({ + maxTokens: 100, + modelName: "gpt-3.5-turbo", + }); const stream = await model.stream( "How is your day going? Be extremely verbose.", { - signal: AbortSignal.timeout(1000), + signal: AbortSignal.timeout(500), } ); for await (const chunk of stream) { diff --git a/libs/langchain-openai/src/tests/data/hotdog.jpg b/libs/langchain-openai/src/tests/data/hotdog.jpg new file mode 100644 index 000000000000..dfab265903be Binary files /dev/null and b/libs/langchain-openai/src/tests/data/hotdog.jpg differ diff --git a/libs/langchain-openai/src/tests/embeddings.int.test b/libs/langchain-openai/src/tests/embeddings.int.test new file mode 100644 index 000000000000..101fb6a4dd83 --- /dev/null +++ b/libs/langchain-openai/src/tests/embeddings.int.test @@ -0,0 +1,69 @@ +import { test, expect } from "@jest/globals"; +import { OpenAIEmbeddings } from "../embeddings.js"; + +test("Test OpenAIEmbeddings.embedQuery", async () => { + const embeddings = new OpenAIEmbeddings(); + const res = await embeddings.embedQuery("Hello world"); + expect(typeof res[0]).toBe("number"); +}); + +test("Test OpenAIEmbeddings.embedDocuments", async () => { + const embeddings = new OpenAIEmbeddings(); + const res = await embeddings.embedDocuments(["Hello world", "Bye bye"]); + expect(res).toHaveLength(2); + expect(typeof res[0][0]).toBe("number"); + expect(typeof res[1][0]).toBe("number"); +}); + +test("Test OpenAIEmbeddings concurrency", async () => { + const embeddings = new OpenAIEmbeddings({ + batchSize: 1, + maxConcurrency: 2, + }); + const res = await embeddings.embedDocuments([ + "Hello world", + "Bye bye", + "Hello world", + "Bye bye", + "Hello world", + "Bye bye", + ]); + expect(res).toHaveLength(6); + expect(res.find((embedding) => typeof embedding[0] !== "number")).toBe( + undefined + ); +}); + +test("Test timeout error thrown from SDK", async () => { + await expect(async () => { + const model = new OpenAIEmbeddings({ + timeout: 1, + }); + await model.embedDocuments([ + "Hello world", + "Bye bye", + "Hello world", + "Bye bye", + "Hello world", + "Bye bye", + ]); + }).rejects.toThrow(); +}); + +test("Test OpenAI embeddings with an invalid org throws", async () => { + await expect(async () => { + const model = new OpenAIEmbeddings({ + configuration: { + organization: "NOT_REAL", + }, + }); + await model.embedDocuments([ + "Hello world", + "Bye bye", + "Hello world", + "Bye bye", + "Hello world", + "Bye bye", + ]); + }).rejects.toThrow(); +}); diff --git a/langchain/src/llms/tests/openai-chat.int.test.ts b/libs/langchain-openai/src/tests/legacy.int.test.ts similarity index 97% rename from langchain/src/llms/tests/openai-chat.int.test.ts rename to libs/langchain-openai/src/tests/legacy.int.test.ts index 43f737c552b8..3d896fdce495 100644 --- a/langchain/src/llms/tests/openai-chat.int.test.ts +++ b/libs/langchain-openai/src/tests/legacy.int.test.ts @@ -1,6 +1,6 @@ import { expect, test } from "@jest/globals"; -import { OpenAIChat } from "../openai-chat.js"; -import { CallbackManager } from "../../callbacks/index.js"; +import { CallbackManager } from "@langchain/core/callbacks/manager"; +import { OpenAIChat } from "../legacy.js"; test("Test OpenAI", async () => { const model = new OpenAIChat({ modelName: "gpt-3.5-turbo", maxTokens: 10 }); diff --git a/langchain/src/llms/tests/openai.int.test.ts b/libs/langchain-openai/src/tests/llms.int.test.ts similarity index 95% rename from langchain/src/llms/tests/openai.int.test.ts rename to libs/langchain-openai/src/tests/llms.int.test.ts index 53421b1ff326..37b3ff135af5 100644 --- a/langchain/src/llms/tests/openai.int.test.ts +++ b/libs/langchain-openai/src/tests/llms.int.test.ts @@ -1,10 +1,10 @@ import { test, expect } from "@jest/globals"; -import { LLMResult } from "../../schema/index.js"; -import { OpenAIChat } from "../openai-chat.js"; -import { OpenAI } from "../openai.js"; -import { StringPromptValue } from "../../prompts/index.js"; -import { CallbackManager } from "../../callbacks/index.js"; -import { NewTokenIndices } from "../../callbacks/base.js"; +import { LLMResult } from "@langchain/core/outputs"; +import { StringPromptValue } from "@langchain/core/prompt_values"; +import { CallbackManager } from "@langchain/core/callbacks/manager"; +import { NewTokenIndices } from "@langchain/core/callbacks/base"; +import { OpenAIChat } from "../legacy.js"; +import { OpenAI } from "../llms.js"; test("Test OpenAI", async () => { const model = new OpenAI({