forked from langchain-ai/langchainjs
-
Notifications
You must be signed in to change notification settings - Fork 0
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Update streaming examples to use request callbacks (langchain-ai#1033)
* Update streaming examples to use request callbacks * Add clearer docs for the various ways of using callbacks * Improve docs for tools * Fix broken links
- Loading branch information
Showing
19 changed files
with
190 additions
and
154 deletions.
There are no files selected for viewing
This file was deleted.
Oops, something went wrong.
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,30 +1,28 @@ | ||
import { OpenAI } from "langchain/llms/openai"; | ||
import { ZapierNLAWrapper } from "langchain/tools"; | ||
import { | ||
initializeAgentExecutorWithOptions, | ||
ZapierToolKit, | ||
} from "langchain/agents"; | ||
import { ZapierNLAWrapper } from "langchain/tools"; | ||
|
||
export const run = async () => { | ||
const model = new OpenAI({ temperature: 0 }); | ||
const zapier = new ZapierNLAWrapper(); | ||
const toolkit = await ZapierToolKit.fromZapierNLAWrapper(zapier); | ||
const model = new OpenAI({ temperature: 0 }); | ||
const zapier = new ZapierNLAWrapper(); | ||
const toolkit = await ZapierToolKit.fromZapierNLAWrapper(zapier); | ||
|
||
const executor = await initializeAgentExecutorWithOptions( | ||
toolkit.tools, | ||
model, | ||
{ | ||
agentType: "zero-shot-react-description", | ||
verbose: true, | ||
} | ||
); | ||
console.log("Loaded agent."); | ||
const executor = await initializeAgentExecutorWithOptions( | ||
toolkit.tools, | ||
model, | ||
{ | ||
agentType: "zero-shot-react-description", | ||
verbose: true, | ||
} | ||
); | ||
console.log("Loaded agent."); | ||
|
||
const input = `Summarize the last email I received regarding Silicon Valley Bank. Send the summary to the #test-zapier Slack channel.`; | ||
const input = `Summarize the last email I received regarding Silicon Valley Bank. Send the summary to the #test-zapier Slack channel.`; | ||
|
||
console.log(`Executing with input "${input}"...`); | ||
console.log(`Executing with input "${input}"...`); | ||
|
||
const result = await executor.call({ input }); | ||
const result = await executor.call({ input }); | ||
|
||
console.log(`Got output ${result.output}`); | ||
}; | ||
console.log(`Got output ${result.output}`); |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,8 @@ | ||
import { ConsoleCallbackHandler } from "langchain/callbacks"; | ||
import { OpenAI } from "langchain/llms/openai"; | ||
|
||
const llm = new OpenAI({ | ||
temperature: 0, | ||
// This handler will be used for all calls made with this LLM. | ||
callbacks: [new ConsoleCallbackHandler()], | ||
}); |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,11 @@ | ||
import { ConsoleCallbackHandler } from "langchain/callbacks"; | ||
import { OpenAI } from "langchain/llms/openai"; | ||
|
||
const llm = new OpenAI({ | ||
temperature: 0, | ||
}); | ||
|
||
// This handler will be used only for this call. | ||
const response = await llm.call("1 + 1 =", undefined, [ | ||
new ConsoleCallbackHandler(), | ||
]); |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,10 @@ | ||
import { PromptTemplate } from "langchain/prompts"; | ||
import { LLMChain } from "langchain/chains"; | ||
import { OpenAI } from "langchain/llms/openai"; | ||
|
||
const chain = new LLMChain({ | ||
llm: new OpenAI({ temperature: 0 }), | ||
prompt: PromptTemplate.fromTemplate("Hello, world!"), | ||
// This will enable logging of all Chain *and* LLM events to the console. | ||
verbose: true, | ||
}); |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,39 +1,40 @@ | ||
import { ChatOpenAI } from "langchain/chat_models/openai"; | ||
import { HumanChatMessage } from "langchain/schema"; | ||
|
||
export const run = async () => { | ||
const chat = new ChatOpenAI({ | ||
maxTokens: 25, | ||
streaming: true, | ||
callbacks: [ | ||
{ | ||
handleLLMNewToken(token: string) { | ||
console.log({ token }); | ||
}, | ||
}, | ||
], | ||
}); | ||
const chat = new ChatOpenAI({ | ||
maxTokens: 25, | ||
streaming: true, | ||
}); | ||
|
||
const response = await chat.call([new HumanChatMessage("Tell me a joke.")]); | ||
const response = await chat.call( | ||
[new HumanChatMessage("Tell me a joke.")], | ||
undefined, | ||
[ | ||
{ | ||
handleLLMNewToken(token: string) { | ||
console.log({ token }); | ||
}, | ||
}, | ||
] | ||
); | ||
|
||
console.log(response); | ||
// { token: '' } | ||
// { token: '\n\n' } | ||
// { token: 'Why' } | ||
// { token: ' don' } | ||
// { token: "'t" } | ||
// { token: ' scientists' } | ||
// { token: ' trust' } | ||
// { token: ' atoms' } | ||
// { token: '?\n\n' } | ||
// { token: 'Because' } | ||
// { token: ' they' } | ||
// { token: ' make' } | ||
// { token: ' up' } | ||
// { token: ' everything' } | ||
// { token: '.' } | ||
// { token: '' } | ||
// AIChatMessage { | ||
// text: "\n\nWhy don't scientists trust atoms?\n\nBecause they make up everything." | ||
// } | ||
}; | ||
console.log(response); | ||
// { token: '' } | ||
// { token: '\n\n' } | ||
// { token: 'Why' } | ||
// { token: ' don' } | ||
// { token: "'t" } | ||
// { token: ' scientists' } | ||
// { token: ' trust' } | ||
// { token: ' atoms' } | ||
// { token: '?\n\n' } | ||
// { token: 'Because' } | ||
// { token: ' they' } | ||
// { token: ' make' } | ||
// { token: ' up' } | ||
// { token: ' everything' } | ||
// { token: '.' } | ||
// { token: '' } | ||
// AIChatMessage { | ||
// text: "\n\nWhy don't scientists trust atoms?\n\nBecause they make up everything." | ||
// } |
Oops, something went wrong.