Skip to content

Commit

Permalink
Major change to move to OpenAI functions (#33)
Browse files Browse the repository at this point in the history
Change includes

OpenAI functions schema for all commands
Moved to functions with updated prompt
Changed to primary main loop model to be GPT-4 as its much more reliable in reasoning
Fixes issues related to development with remix package update.

Solves issues #24 #28 #31
  • Loading branch information
zabirauf authored Oct 27, 2023
1 parent 3898c09 commit 2388e3d
Show file tree
Hide file tree
Showing 29 changed files with 3,184 additions and 3,438 deletions.
70 changes: 61 additions & 9 deletions AutoGPT/commandPlugins/AgentCommandPlugins.ts
Original file line number Diff line number Diff line change
@@ -1,6 +1,9 @@
import { callLLMChatCompletion } from 'AutoGPT/utils/llmUtils';
import { CommandPlugin } from './CommandPlugin';
import { getConfig } from 'AutoGPT/utils/config';
import type { CommandPlugin } from './CommandPlugin';
import {
callLLMChatCompletion,
CallLLMChatCompletionResponseStatus,
} from "AutoGPT/utils/llmUtils";
import type { LLMMessage, LLMModel } from "AutoGPT/utils/types";

interface Agent {
Expand All @@ -21,7 +24,7 @@ async function startAgent(
model: LLMModel
) {
const firstMessage = `You are ${name}. Respond with: "Acknowledged".`;
const { key, agentReply } = await createAgent(
const { key } = await createAgent(
name,
task,
firstMessage,
Expand All @@ -41,9 +44,15 @@ async function createAgent(
): Promise<{ key: string; agentReply: string }> {
const messages: LLMMessage[] = [{ role: "user", content: prompt }];

const agentReply = await callLLMChatCompletion(messages, model);
const agentReply = await callLLMChatCompletion({ messages, model });

messages.push({ role: "assistant", content: agentReply });
messages.push({
role: "assistant",
content:
agentReply.status === CallLLMChatCompletionResponseStatus.Success
? agentReply.content
: "error",
});

const agent: Agent = {
name,
Expand All @@ -56,7 +65,13 @@ async function createAgent(

agents[key] = agent;

return { key, agentReply };
return {
key,
agentReply:
agentReply.status === CallLLMChatCompletionResponseStatus.Success
? agentReply.content
: "error",
};
}

async function messageAgent(
Expand All @@ -73,10 +88,10 @@ async function messageAgent(
content: `|Start of data|\n${data}\n|End of data|\n\n${message}`,
});

const agentReply = await callLLMChatCompletion(messages, model);
const agentReply = await callLLMChatCompletion({ messages, model });

messages.push({ role: "assistant", content: agentReply });
return agentReply;
messages.push({ role: "assistant", content: agentReply.status === CallLLMChatCompletionResponseStatus.Success ? agentReply.content : "error" });
return agentReply.status === CallLLMChatCompletionResponseStatus.Success ? agentReply.content : "error";
}

function listAgents(): [string, string][] {
Expand All @@ -102,6 +117,22 @@ const AgentCommandPlugins: CommandPlugin[] = [
prompt: "prompt",
data: "data_for_prompt",
},
argumentsV2: {
required: ["name", "task", "prompt", "data"],
args: {
name: { type: "string", description: "Name of the agent" },
task: {
type: "string",
description:
"Short description of what is the task that agent is going to perform",
},
prompt: { type: "string", description: "The prompt for the agent" },
data: {
type: "string",
description: "Data or context that agent should use",
},
},
},
execute: (args) =>
startAgent(
args["name"],
Expand All @@ -119,12 +150,27 @@ const AgentCommandPlugins: CommandPlugin[] = [
message: "message",
data: "data_for_message",
},
argumentsV2: {
required: ["key", "message", "data"],
args: {
key: { type: "integer", description: "The key of the agent to delete" },
message: { type: "string", description: "The message for the agent" },
data: {
type: "string",
description: "Data or context that agent should use",
},
},
},
execute: (args) => messageAgent(args["key"], args["message"], args["data"]),
},
{
command: "list_agents",
name: "List GPT Agents",
arguments: {},
argumentsV2: {
required: [],
args: {},
},
execute: async (args) => JSON.stringify(listAgents()),
},
{
Expand All @@ -133,6 +179,12 @@ const AgentCommandPlugins: CommandPlugin[] = [
arguments: {
key: "key",
},
argumentsV2: {
required: ["key"],
args: {
key: { type: "integer", description: "The key of the agent to delete" },
},
},
execute: async (args) =>
deleteAgent(args["key"])
? `Agent ${args["key"]} deleted.`
Expand Down
48 changes: 31 additions & 17 deletions AutoGPT/commandPlugins/BrowserCommandPlugins.ts
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
import { callLLMChatCompletion } from 'AutoGPT/utils/llmUtils';
import { CommandPlugin } from './CommandPlugin';
import { callLLMChatCompletion, CallLLMChatCompletionResponseStatus } from 'AutoGPT/utils/llmUtils';
import { countStringTokens } from 'AutoGPT/utils/tokenCounter';
import { getConfig } from 'AutoGPT/utils/config';
import type { CommandPlugin } from './CommandPlugin';
import type { LLMMessage, LLMModel } from "AutoGPT/utils/types";

let callProxyFn: (
Expand Down Expand Up @@ -87,7 +87,11 @@ function scrapSearchResults(query: string): Promise<string | string[]> {
);
}

function* splitText(text: string, model: LLMModel, maxTokens = 3000): Generator<string> {
function* splitText(
text: string,
model: LLMModel,
maxTokens = 3000
): Generator<string> {
const paragraphs = text.split("\n");
let currentLength = 0;
let currentChunk: string[] = [];
Expand All @@ -96,11 +100,11 @@ function* splitText(text: string, model: LLMModel, maxTokens = 3000): Generator<
const tokensInParagraph = countStringTokens(paragraph, model);
if (currentLength + tokensInParagraph <= maxTokens) {
currentChunk.push(paragraph);
currentLength += tokensInParagraph
currentLength += tokensInParagraph;
} else {
yield currentChunk.join("\n");
currentChunk = [paragraph];
currentLength = tokensInParagraph
currentLength = tokensInParagraph;
}
}

Expand Down Expand Up @@ -137,14 +141,13 @@ async function summarizeText(text: string, isWebsite = true): Promise<string> {
},
];

const summary = await callLLMChatCompletion(
const summary = await callLLMChatCompletion({
messages,
currentModel,
undefined /* temperature */,
300 /* maxTokens */
);
model: currentModel,
maxTokens: 300,
});

summaries.push(summary);
summaries.push(summary.status === CallLLMChatCompletionResponseStatus.Success ? summary.content : "error");
}

if (summaries.length === 1) {
Expand All @@ -169,13 +172,12 @@ async function summarizeText(text: string, isWebsite = true): Promise<string> {
},
];

const finalSummary = await callLLMChatCompletion(
const finalSummary = await callLLMChatCompletion({
messages,
currentModel,
undefined /* temperature */,
300 /* maxTokens */
);
return finalSummary;
model: currentModel,
maxTokens: 300,
});
return finalSummary.status === CallLLMChatCompletionResponseStatus.Success ? finalSummary.content : "error";
}

const BrowserCommandPlugins: CommandPlugin[] = [
Expand All @@ -185,6 +187,12 @@ const BrowserCommandPlugins: CommandPlugin[] = [
arguments: {
url: "url",
},
argumentsV2: {
required: ["url"],
args: {
url: { type: "string", description: "The URL of the website to visit" },
},
},
execute: async (args) => {
const url = args["url"];
const websiteText = await scrapText(url);
Expand All @@ -207,6 +215,12 @@ const BrowserCommandPlugins: CommandPlugin[] = [
arguments: {
query: "query",
},
argumentsV2: {
required: ["query"],
args: {
query: { type: "string", description: "The to search for" },
},
},
execute: async (args) => {
const result = await scrapSearchResults(args["query"]);

Expand Down
42 changes: 41 additions & 1 deletion AutoGPT/commandPlugins/CodeGenerationCommandPlugin.ts
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
import { callAIFunction } from 'AutoGPT/utils/llmUtils';
import { CommandPlugin } from './CommandPlugin';
import { getConfig } from 'AutoGPT/utils/config';
import type { CommandPlugin } from './CommandPlugin';

async function createCode(descriptionOfCode: string): Promise<string> {
const functionString = "function createCode(description: string): string {";
Expand Down Expand Up @@ -78,6 +78,15 @@ const CodeGenerationCommandPlugin: CommandPlugin[] = [
arguments: {
description: "description_of_code_to_create",
},
argumentsV2: {
required: ["description"],
args: {
description: {
type: "string",
description: "Description of the code to create",
},
},
},
execute: (args) => createCode(args["description"]),
},
{
Expand All @@ -86,6 +95,15 @@ const CodeGenerationCommandPlugin: CommandPlugin[] = [
arguments: {
code: "full_code_string",
},
argumentsV2: {
required: ["code"],
args: {
code: {
type: "string",
description: "Full code to evalute the result for",
},
},
},
execute: (args) => evaluateCode(args["code"]),
},
{
Expand All @@ -95,6 +113,18 @@ const CodeGenerationCommandPlugin: CommandPlugin[] = [
suggestions: "list_of_suggestions",
code: "full_code_string",
},
argumentsV2: {
required: ["suggestions", "code"],
args: {
code: { type: "string", description: "Full code to improve" },
suggestions: {
type: "array",
items: { type: "string" },
description:
"List of suggestion which will be used to improve the code",
},
},
},
execute: (args) => improveCode(args["suggestions"], args["code"]),
},
{
Expand All @@ -104,6 +134,16 @@ const CodeGenerationCommandPlugin: CommandPlugin[] = [
code: "full_code_string",
focus: "list_of_focus_areas",
},
argumentsV2: {
required: ["focus", "code"],
args: {
code: { type: "string", description: "Full code to test" },
focus: {
type: "string",
description: "List of focus areas for the test",
},
},
},
execute: (args) => writeTests(args["code"], args["focus"]),
},
];
Expand Down
27 changes: 21 additions & 6 deletions AutoGPT/commandPlugins/CommandPlugin.ts
Original file line number Diff line number Diff line change
@@ -1,8 +1,23 @@

export type CommandExecArgs = { [key: string]: any };

type StringExecArg = { type: "string" };
type IntExecArg = { type: "integer" };
type BoolExecArg = { type: "boolean" };
type ArrayExecArg = { type: "array"; items: ExecArgType };
type EnumExecArg = { type: "string"; enum: string[] };

type ExecArgType = StringExecArg | IntExecArg | ArrayExecArg | EnumExecArg | BoolExecArg;

export type CommandExecArgsV2 = {
[key: string]: {
description: string;
} & ExecArgType;
};

export interface CommandPlugin {
command: string;
name: string;
arguments: CommandExecArgs;
execute: (args: CommandExecArgs) => Promise<string>;
}
command: string;
name: string;
arguments: CommandExecArgs;
argumentsV2: { required: string[]; args: CommandExecArgsV2 };
execute: (args: CommandExecArgs) => Promise<string>;
}
Loading

0 comments on commit 2388e3d

Please sign in to comment.