Skip to content

Commit

Permalink
Merge branch 'develop' into pr-842
Browse files Browse the repository at this point in the history
  • Loading branch information
shakkernerd committed Dec 28, 2024
2 parents 0ee9e07 + bc5e50e commit c1624b8
Show file tree
Hide file tree
Showing 16 changed files with 359 additions and 32 deletions.
5 changes: 5 additions & 0 deletions .env.example
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@ DISCORD_VOICE_CHANNEL_ID= # The ID of the voice channel the bot should joi

# AI Model API Keys
OPENAI_API_KEY= # OpenAI API key, starting with sk-
OPENAI_API_URL= # OpenAI API Endpoint (optional), Default: https://api.openai.com/v1
SMALL_OPENAI_MODEL= # Default: gpt-4o-mini
MEDIUM_OPENAI_MODEL= # Default: gpt-4o
LARGE_OPENAI_MODEL= # Default: gpt-4o
Expand All @@ -35,6 +36,10 @@ SMALL_HYPERBOLIC_MODEL= # Default: meta-llama/Llama-3.2-3B-Instruct
MEDIUM_HYPERBOLIC_MODEL= # Default: meta-llama/Meta-Llama-3.1-70B-Instruct
LARGE_HYPERBOLIC_MODEL= # Default: meta-llama/Meta-Llama-3.1-405-Instruct

# Livepeer configuration
LIVEPEER_GATEWAY_URL= # Free inference gateways and docs: https://livepeer-eliza.com/
LIVEPEER_IMAGE_MODEL= # Default: ByteDance/SDXL-Lightning

# Speech Synthesis
ELEVENLABS_XI_API_KEY= # API key from elevenlabs

Expand Down
3 changes: 2 additions & 1 deletion agent/src/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -557,7 +557,8 @@ export async function createAgent(
getSecret(character, "FAL_API_KEY") ||
getSecret(character, "OPENAI_API_KEY") ||
getSecret(character, "VENICE_API_KEY") ||
getSecret(character, "HEURIST_API_KEY")
getSecret(character, "HEURIST_API_KEY") ||
getSecret(character, "LIVEPEER_GATEWAY_URL")
? imageGenerationPlugin
: null,
getSecret(character, "FAL_API_KEY") ? ThreeDGenerationPlugin : null,
Expand Down
10 changes: 10 additions & 0 deletions docs/api/enumerations/ModelProviderName.md
Original file line number Diff line number Diff line change
Expand Up @@ -233,3 +233,13 @@ Available model providers
#### Defined in

[packages/core/src/types.ts:240](https://github.com/elizaOS/eliza/blob/main/packages/core/src/types.ts#L240)

***

### LIVEPEER

> **LIVEPEER**: `"livepeer"`
#### Defined in

[packages/core/src/types.ts:241](https://github.com/elizaOS/eliza/blob/main/packages/core/src/types.ts#L241)
4 changes: 4 additions & 0 deletions docs/api/type-aliases/Models.md
Original file line number Diff line number Diff line change
Expand Up @@ -100,6 +100,10 @@ Model configurations by provider

> **akash\_chat\_api**: [`Model`](Model.md)
### livepeer

> **livepeer**: [`Model`](Model.md)
## Defined in

[packages/core/src/types.ts:188](https://github.com/elizaOS/eliza/blob/main/packages/core/src/types.ts#L188)
10 changes: 10 additions & 0 deletions docs/docs/api/enumerations/ModelProviderName.md
Original file line number Diff line number Diff line change
Expand Up @@ -119,3 +119,13 @@
#### Defined in

[packages/core/src/types.ts:132](https://github.com/elizaos/eliza/blob/4d1e66cbf7deea87a8a67525670a963cd00108bc/packages/core/src/types.ts#L132)

---

### LIVEPEER

> **LIVEPEER**: `"livepeer"`
#### Defined in

[packages/core/src/types.ts:133](https://github.com/elizaos/eliza/blob/4d1e66cbf7deea87a8a67525670a963cd00108bc/packages/core/src/types.ts#L133)
4 changes: 4 additions & 0 deletions docs/docs/api/type-aliases/Models.md
Original file line number Diff line number Diff line change
Expand Up @@ -52,6 +52,10 @@

> **heurist**: [`Model`](Model.md)
### livepeer

> **livepeer**: [`Model`](Model.md)
## Defined in

[packages/core/src/types.ts:105](https://github.com/elizaos/eliza/blob/7fcf54e7fb2ba027d110afcc319c0b01b3f181dc/packages/core/src/types.ts#L105)
3 changes: 3 additions & 0 deletions docs/docs/guides/configuration.md
Original file line number Diff line number Diff line change
Expand Up @@ -72,6 +72,9 @@ TOGETHER_API_KEY=
# Heurist Settings
HEURIST_API_KEY=

# Livepeer Settings
LIVEPEER_GATEWAY_URL=

# Local Model Settings
XAI_MODEL=meta-llama/Llama-3.1-7b-instruct
```
Expand Down
6 changes: 4 additions & 2 deletions docs/docs/quickstart.md
Original file line number Diff line number Diff line change
Expand Up @@ -82,6 +82,7 @@ pnpm build
OPENAI_API_KEY= # OpenAI API key
GROK_API_KEY= # Grok API key
ELEVENLABS_XI_API_KEY= # API key from elevenlabs (for voice)
LIVEPEER_GATEWAY_URL= # Livepeer gateway URL
```
## Choose Your Model
Expand All @@ -94,6 +95,7 @@ Eliza supports multiple AI models:
- **Llama**: Set `XAI_MODEL=meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo`
- **Grok**: Set `XAI_MODEL=grok-beta`
- **OpenAI**: Set `XAI_MODEL=gpt-4o-mini` or `gpt-4o`
- **Livepeer**: Set `LIVEPEER_IMAGE_MODEL` to your chosen Livepeer image model, available models [here](https://livepeer-eliza.com/)
You set which model to use inside the character JSON file
Expand Down Expand Up @@ -216,8 +218,8 @@ pnpm start --characters="characters/trump.character.json,characters/tate.charact
- Ensure Node.js 23.3.0 is installed
- Use `node -v` to check version
- Consider using [nvm](https://github.com/nvm-sh/nvm) to manage Node versions
NOTE: pnpm may be bundled with a different node version, ignoring nvm. If this is the case, you can use

NOTE: pnpm may be bundled with a different node version, ignoring nvm. If this is the case, you can use
```bash
pnpm env use --global 23.3.0
```
Expand Down
157 changes: 157 additions & 0 deletions packages/client-direct/src/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -445,6 +445,163 @@ export class DirectClient {
}
}
);

this.app.post("/:agentId/speak", async (req, res) => {
const agentId = req.params.agentId;
const roomId = stringToUuid(req.body.roomId ?? "default-room-" + agentId);
const userId = stringToUuid(req.body.userId ?? "user");
const text = req.body.text;

if (!text) {
res.status(400).send("No text provided");
return;
}

let runtime = this.agents.get(agentId);

// if runtime is null, look for runtime with the same name
if (!runtime) {
runtime = Array.from(this.agents.values()).find(
(a) => a.character.name.toLowerCase() === agentId.toLowerCase()
);
}

if (!runtime) {
res.status(404).send("Agent not found");
return;
}

try {
// Process message through agent (same as /message endpoint)
await runtime.ensureConnection(
userId,
roomId,
req.body.userName,
req.body.name,
"direct"
);

const messageId = stringToUuid(Date.now().toString());

const content: Content = {
text,
attachments: [],
source: "direct",
inReplyTo: undefined,
};

const userMessage = {
content,
userId,
roomId,
agentId: runtime.agentId,
};

const memory: Memory = {
id: messageId,
agentId: runtime.agentId,
userId,
roomId,
content,
createdAt: Date.now(),
};

await runtime.messageManager.createMemory(memory);

const state = await runtime.composeState(userMessage, {
agentName: runtime.character.name,
});

const context = composeContext({
state,
template: messageHandlerTemplate,
});

const response = await generateMessageResponse({
runtime: runtime,
context,
modelClass: ModelClass.LARGE,
});

// save response to memory
const responseMessage = {
...userMessage,
userId: runtime.agentId,
content: response,
};

await runtime.messageManager.createMemory(responseMessage);

if (!response) {
res.status(500).send("No response from generateMessageResponse");
return;
}

let message = null as Content | null;

await runtime.evaluate(memory, state);

const _result = await runtime.processActions(
memory,
[responseMessage],
state,
async (newMessages) => {
message = newMessages;
return [memory];
}
);

// Get the text to convert to speech
const textToSpeak = response.text;

// Convert to speech using ElevenLabs
const elevenLabsApiUrl = `https://api.elevenlabs.io/v1/text-to-speech/${process.env.ELEVENLABS_VOICE_ID}`;
const apiKey = process.env.ELEVENLABS_XI_API_KEY;

if (!apiKey) {
throw new Error("ELEVENLABS_XI_API_KEY not configured");
}

const speechResponse = await fetch(elevenLabsApiUrl, {
method: "POST",
headers: {
"Content-Type": "application/json",
"xi-api-key": apiKey,
},
body: JSON.stringify({
text: textToSpeak,
model_id: process.env.ELEVENLABS_MODEL_ID || "eleven_multilingual_v2",
voice_settings: {
stability: parseFloat(process.env.ELEVENLABS_VOICE_STABILITY || "0.5"),
similarity_boost: parseFloat(process.env.ELEVENLABS_VOICE_SIMILARITY_BOOST || "0.9"),
style: parseFloat(process.env.ELEVENLABS_VOICE_STYLE || "0.66"),
use_speaker_boost: process.env.ELEVENLABS_VOICE_USE_SPEAKER_BOOST === "true",
},
}),
});

if (!speechResponse.ok) {
throw new Error(`ElevenLabs API error: ${speechResponse.statusText}`);
}

const audioBuffer = await speechResponse.arrayBuffer();

// Set appropriate headers for audio streaming
res.set({
'Content-Type': 'audio/mpeg',
'Transfer-Encoding': 'chunked'
});

res.send(Buffer.from(audioBuffer));

} catch (error) {
console.error("Error processing message or generating speech:", error);
res.status(500).json({
error: "Error processing message or generating speech",
details: error.message
});
}
});
}

// agent/src/index.ts:startAgent calls this
Expand Down
7 changes: 2 additions & 5 deletions packages/client-github/src/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -82,11 +82,8 @@ export class GitHubClient {
`Successfully cloned repository from ${repositoryUrl}`
);
return;
} catch (error) {
elizaLogger.error(
`Failed to clone repository from ${repositoryUrl}. Retrying...`,
error
);
} catch {
elizaLogger.error(`Failed to clone repository from ${repositoryUrl}. Retrying...`);
retries++;
if (retries === maxRetries) {
throw new Error(
Expand Down
28 changes: 14 additions & 14 deletions packages/client-twitter/src/utils.ts
Original file line number Diff line number Diff line change
Expand Up @@ -171,10 +171,10 @@ export async function sendTweet(
twitterUsername: string,
inReplyTo: string
): Promise<Memory[]> {
const tweetChunks = splitTweetContent(
content.text,
client.twitterConfig.MAX_TWEET_LENGTH
);
const maxTweetLength = client.twitterConfig.MAX_TWEET_LENGTH;
const isLongTweet = maxTweetLength > 280;

const tweetChunks = splitTweetContent(content.text, maxTweetLength);
const sentTweets: Tweet[] = [];
let previousTweetId = inReplyTo;

Expand Down Expand Up @@ -212,20 +212,20 @@ export async function sendTweet(
})
);
}
const result = await client.requestQueue.add(
async () =>
await client.twitterClient.sendTweet(
chunk.trim(),
previousTweetId,
mediaData
)
const result = await client.requestQueue.add(async () =>
isLongTweet
? client.twitterClient.sendLongTweet(chunk.trim(), previousTweetId, mediaData)
: client.twitterClient.sendTweet(chunk.trim(), previousTweetId, mediaData)
);

const body = await result.json();
const tweetResult = isLongTweet
? body.data.notetweet_create.tweet_results.result
: body.data.create_tweet.tweet_results.result;

// if we have a response
if (body?.data?.create_tweet?.tweet_results?.result) {
if (tweetResult) {
// Parse the response
const tweetResult = body.data.create_tweet.tweet_results.result;
const finalTweet: Tweet = {
id: tweetResult.rest_id,
text: tweetResult.legacy.full_text,
Expand All @@ -245,7 +245,7 @@ export async function sendTweet(
sentTweets.push(finalTweet);
previousTweetId = finalTweet.id;
} else {
console.error("Error sending chunk", chunk, "response:", body);
elizaLogger.error("Error sending tweet chunk:", { chunk, response: body });
}

// Wait a bit between tweets to avoid rate limiting issues
Expand Down
2 changes: 1 addition & 1 deletion packages/core/src/embedding.ts
Original file line number Diff line number Diff line change
Expand Up @@ -189,7 +189,7 @@ export async function embed(runtime: IAgentRuntime, input: string) {
if (config.provider === EmbeddingProvider.OpenAI) {
return await getRemoteEmbedding(input, {
model: config.model,
endpoint: "https://api.openai.com/v1",
endpoint: settings.OPENAI_API_URL || "https://api.openai.com/v1",
apiKey: settings.OPENAI_API_KEY,
dimensions: config.dimensions,
});
Expand Down
Loading

0 comments on commit c1624b8

Please sign in to comment.