Skip to content

Commit

Permalink
add base agent (langchain-ai#578)
Browse files Browse the repository at this point in the history
* add base agent

* cr

* cr

* cr

* cr

* cr

* cr

* cr

* Fix allowedTools

* Rename existing example

---------

Co-authored-by: Nuno Campos <[email protected]>
  • Loading branch information
hwchase17 and nfcampos authored Apr 3, 2023
1 parent dcc113d commit 0da5c46
Show file tree
Hide file tree
Showing 13 changed files with 473 additions and 139 deletions.
13 changes: 13 additions & 0 deletions docs/docs/modules/agents/agents/custom_llm.mdx
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
---
hide_table_of_contents: true
sidebar_position: 1
---

import CodeBlock from "@theme/CodeBlock";
import Example from "@examples/agents/custom_llm_agent.ts";

# Custom LLM Agent

This example covers how to create a custom Agent powered by an LLM.

<CodeBlock language="typescript">{Example}</CodeBlock>
13 changes: 13 additions & 0 deletions docs/docs/modules/agents/agents/custom_llm_chat.mdx
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
---
hide_table_of_contents: true
sidebar_position: 1
---

import CodeBlock from "@theme/CodeBlock";
import Example from "@examples/agents/custom_llm_agent_chat.ts";

# Custom LLM Agent (with Chat Model)

This example covers how to create a custom Agent powered by a Chat Model.

<CodeBlock language="typescript">{Example}</CodeBlock>
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ hide_table_of_contents: true
import CodeBlock from "@theme/CodeBlock";
import Example from "@examples/chat/agent.ts";

# Custom Agent, using Chat Models
# Agent with Custom Prompt, using Chat Models

This example covers how to create a custom agent for a chat model. It will utilize chat specific prompts.

Expand Down
87 changes: 0 additions & 87 deletions docs/docs/modules/agents/agents/examples/custom_agent_llm.md

This file was deleted.

5 changes: 3 additions & 2 deletions examples/src/agents/concurrent_mrkl.ts
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
import { OpenAI } from "langchain";
import { initializeAgentExecutor } from "langchain/agents";
import { initializeAgentExecutor, Agent } from "langchain/agents";
import { SerpAPI, Calculator } from "langchain/tools";
import process from "process";
import {
Expand Down Expand Up @@ -57,7 +57,8 @@ export const run = async () => {
true,
callbackManager
);
executor.agent.llmChain.callbackManager = callbackManager;
const agent = executor.agent as Agent;
agent.llmChain.callbackManager = callbackManager;
executors.push(executor);
}

Expand Down
137 changes: 137 additions & 0 deletions examples/src/agents/custom_llm_agent.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,137 @@
import {
LLMSingleActionAgent,
AgentActionOutputParser,
AgentExecutor,
} from "langchain/agents";
import { LLMChain } from "langchain/chains";
import { OpenAI } from "langchain/llms";
import {
BasePromptTemplate,
BaseStringPromptTemplate,
SerializedBasePromptTemplate,
renderTemplate,
} from "langchain/prompts";
import {
InputValues,
PartialValues,
AgentStep,
AgentAction,
AgentFinish,
} from "langchain/schema";
import { SerpAPI, Calculator, Tool } from "langchain/tools";

const PREFIX = `Answer the following questions as best you can. You have access to the following tools:`;
const formatInstructions = (toolNames: string) => `Use the following format:
Question: the input question you must answer
Thought: you should always think about what to do
Action: the action to take, should be one of [${toolNames}]
Action Input: the input to the action
Observation: the result of the action
... (this Thought/Action/Action Input/Observation can repeat N times)
Thought: I now know the final answer
Final Answer: the final answer to the original input question`;
const SUFFIX = `Begin!
Question: {input}
Thought:{agent_scratchpad}`;

class CustomPromptTemplate extends BaseStringPromptTemplate {
tools: Tool[];

constructor(args: { tools: Tool[]; inputVariables: string[] }) {
super({ inputVariables: args.inputVariables });
this.tools = args.tools;
}

_getPromptType(): string {
throw new Error("Not implemented");
}

format(input: InputValues): Promise<string> {
/** Construct the final template */
const toolStrings = this.tools
.map((tool) => `${tool.name}: ${tool.description}`)
.join("\n");
const toolNames = this.tools.map((tool) => tool.name).join("\n");
const instructions = formatInstructions(toolNames);
const template = [PREFIX, toolStrings, instructions, SUFFIX].join("\n\n");
/** Construct the agent_scratchpad */
const intermediateSteps = input.intermediate_steps as AgentStep[];
const agentScratchpad = intermediateSteps.reduce(
(thoughts, { action, observation }) =>
thoughts +
[action.log, `\nObservation: ${observation}`, "Thought:"].join("\n"),
""
);
const newInput = { agent_scratchpad: agentScratchpad, ...input };
/** Format the template. */
return Promise.resolve(renderTemplate(template, "f-string", newInput));
}

partial(_values: PartialValues): Promise<BasePromptTemplate> {
throw new Error("Not implemented");
}

serialize(): SerializedBasePromptTemplate {
throw new Error("Not implemented");
}
}

class CustomOutputParser extends AgentActionOutputParser {
async parse(text: string): Promise<AgentAction | AgentFinish> {
if (text.includes("Final Answer:")) {
const parts = text.split("Final Answer:");
const input = parts[parts.length - 1].trim();
const finalAnswers = { output: input };
return { log: text, returnValues: finalAnswers };
}

const match = /Action: (.*)\nAction Input: (.*)/s.exec(text);
if (!match) {
throw new Error(`Could not parse LLM output: ${text}`);
}

return {
tool: match[1].trim(),
toolInput: match[2].trim().replace(/^"+|"+$/g, ""),
log: text,
};
}

getFormatInstructions(): string {
throw new Error("Not implemented");
}
}

export const run = async () => {
const model = new OpenAI({ temperature: 0 });
const tools = [new SerpAPI(), new Calculator()];

const llmChain = new LLMChain({
prompt: new CustomPromptTemplate({
tools,
inputVariables: ["input", "agent_scratchpad"],
}),
llm: model,
});

const agent = new LLMSingleActionAgent({
llmChain,
outputParser: new CustomOutputParser(),
stop: ["\nObservation"],
});
const executor = new AgentExecutor({
agent,
tools,
});
console.log("Loaded agent.");

const input = `Who is Olivia Wilde's boyfriend? What is his current age raised to the 0.23 power?`;

console.log(`Executing with input "${input}"...`);

const result = await executor.call({ input });

console.log(`Got output ${result.output}`);
};
Loading

0 comments on commit 0da5c46

Please sign in to comment.