Skip to content

Commit

Permalink
Merge pull request #93 from small-thinking/async-agent
Browse files Browse the repository at this point in the history
Sync agent
  • Loading branch information
yxjiang authored Jul 6, 2024
2 parents b143f34 + 21004a5 commit d4ab6d5
Show file tree
Hide file tree
Showing 4 changed files with 372 additions and 32 deletions.
140 changes: 118 additions & 22 deletions polymind/core/agent.py
Original file line number Diff line number Diff line change
@@ -1,64 +1,160 @@
from abc import ABC, abstractmethod
from typing import Dict

from pydantic import BaseModel, Field

from polymind.core.logger import Logger
from polymind.core.message import Message
from polymind.core.tool import BaseTool, LLMTool
from polymind.core.tool import (BaseTool, LLMTool, OptimizableBaseTool,
SyncLLMTool)


class Agent(BaseModel):
class AbstractAgent(BaseModel, ABC):
"""
Abstract base class for all agent types.
This class defines the common structure and interface for both synchronous
and asynchronous agents. It includes shared attributes and methods, as well
as abstract methods that must be implemented by subclasses.
"""

agent_name: str
# Persona of the agent indicates the role of the agent.
persona: str
tools: Dict[str, BaseTool] = Field(default=None, description="The tools that the agent can use.")
reasoner: LLMTool = Field(default=None, description="The reasoner that will be used in the thought process.")

def __init__(self, **kwargs):
super().__init__(**kwargs)
self._logger = Logger(__file__)

def __str__(self):
def __str__(self) -> str:
return self.agent_name

def _input_preprocess(self, input: Message) -> None:
"""Preprocess the input message before the agent starts working.
Now now the only thing to do is to add the persona to the input message.
"""
Preprocess the input message before the agent starts working.
Args:
input (Message): The input message to preprocess.
"""
input.content["persona"] = self.persona

async def _execute(self, input: Message) -> Message:
"""Execute the agent and return the result.
This method defines the behavior of the agent's thought process.
@abstractmethod
def _execute(self, input: Message) -> Message:
"""
Execute the agent and return the result.
Args:
input (Message): The input message to process.
Returns:
Message: The result of the agent's execution.
"""
pass

@abstractmethod
def __call__(self, input: Message) -> Message:
"""
Enable the agent to start working.
Args:
input (Message): The input to the thought process carried in a message.
input (Message): The input message to process.
Returns:
Message: The result of the thought process carried in a message.
Message: The result of the agent's work.
"""
if "requirement" in input.content:
self._logger.thought_process_log(
f"[{self.agent_name}], your requirement is: {input.content['requirement']}"
)
else:
pass


class Agent(AbstractAgent):
"""
Synchronous agent implementation.
This class represents a synchronous agent that uses OptimizableBaseTool
for its tools and SyncLLMTool for reasoning.
"""

tools: Dict[str, OptimizableBaseTool] = Field(default=None, description="The tools that the agent can use.")
reasoner: SyncLLMTool = Field(default=None, description="The reasoner that will be used in the thought process.")

def _execute(self, input: Message) -> Message:
"""
Synchronous execution of the agent.
Args:
input (Message): The input message to process.
Returns:
Message: The result of the agent's execution.
Raises:
ValueError: If the input message doesn't contain the 'requirement' field.
"""
if "requirement" not in input.content:
raise ValueError("The input message must contain the 'requirement' field.")

self._logger.thought_process_log(f"[{self.agent_name}], your requirement is: {input.content['requirement']}")

# Add logic for executing the thought process using tools and reasoner.
# This is a placeholder implementation.
result_content = {"output": f"Processed requirement: {input.content['requirement']}"}
return Message(content=result_content)

def __call__(self, input: Message) -> Message:
"""
Synchronous call method.
Args:
input (Message): The input message to process.
Returns:
Message: The result of the agent's work.
"""
self._input_preprocess(input=input)
return self._execute(input=input)


class AsyncAgent(AbstractAgent):
"""
Asynchronous agent implementation.
This class represents an asynchronous agent that uses BaseTool
for its tools and LLMTool for reasoning.
"""

tools: Dict[str, BaseTool] = Field(default=None, description="The tools that the agent can use.")
reasoner: LLMTool = Field(default=None, description="The reasoner that will be used in the thought process.")

async def _execute(self, input: Message) -> Message:
"""
Asynchronous execution of the agent.
Args:
input (Message): The input message to process.
Returns:
Message: The result of the agent's execution.
Raises:
ValueError: If the input message doesn't contain the 'requirement' field.
"""
if "requirement" not in input.content:
raise ValueError("The input message must contain the 'requirement' field.")

self._logger.thought_process_log(f"[{self.agent_name}], your requirement is: {input.content['requirement']}")

# Add async logic for executing the thought process using tools and reasoner.
# This is a placeholder implementation.
result_content = {"output": f"Processed requirement: {input.content['requirement']}"}
return Message(content=result_content)

async def __call__(self, input: Message) -> Message:
"""Enable the agent to start working.
The actual processing is driven by the agent itself.
"""
Asynchronous call method.
Args:
input (Message): The input message to the agent.
input (Message): The input message to process.
Returns:
Message: The output message from the agent.
Message: The result of the agent's work.
"""
self._input_preprocess(input=input)
return await self._execute(input=input)
123 changes: 123 additions & 0 deletions polymind/core/tool.py
Original file line number Diff line number Diff line change
Expand Up @@ -505,6 +505,129 @@ async def _execute(self, input: Message) -> Message:
return response_message


class SyncLLMTool(OptimizableBaseTool):
"""Synchronous LLM tool defines the basic properties of the language model tools.
This tool will get the prompt from "input" and return the response to "output".
"""

llm_name: str = Field(..., description="The name of the model.")
max_tokens: int = Field(..., description="The maximum number of tokens for the chat.")
temperature: float = Field(default=1.0, description="The temperature for the chat.")
top_p: float = Field(
default=0.1,
description="The top p for the chat. Top p is used to prevent the model from generating unlikely words.",
)
stop: str = Field(default=None, description="The stop sequence for the chat.")

def __init__(self, **kwargs):
super().__init__(**kwargs)
self._logger = Logger(__file__)
self._set_client()

def _set_client(self):
"""Set the client for the language model."""
# Implement the synchronous client setup here
pass

def input_spec(self) -> List[Param]:
return [
Param(
name="input",
type="str",
required=True,
description="The prompt for the chat.",
example="hello, how are you?",
),
Param(
name="system_prompt",
type="str",
required=False,
example="You are a helpful AI assistant.",
description="The system prompt for the chat.",
),
Param(
name="max_tokens",
type="int",
required=False,
example="1500",
description="The maximum number of tokens for the chat.",
),
Param(
name="temperature",
type="float",
required=False,
example="0.7",
description="The temperature for the chat.",
),
Param(
name="top_p",
type="float",
required=False,
example="0.1",
description="The top p for the chat.",
),
]

def output_spec(self) -> List[Param]:
return [
Param(
name="output",
type="str",
required=True,
description="The response from the chat.",
),
]

def _invoke(self, input: Message) -> Message:
"""Invoke the language model with the input message and return the response message.
Args:
input (Message): The input message to the language model. The message should contain the below keys:
- prompt: The prompt for the chat.
- system_prompt: The system prompt for the chat.
- max_tokens: The maximum number of tokens for the chat.
- temperature: The temperature for the chat.
- top_p: The top p for the chat.
- stop: The stop sequence for the chat.
Returns:
Message: The response message from the language model. The actual content is in the "output" field.
"""
# Implement the synchronous invocation of the language model here
# This is a placeholder implementation
return Message(content={"output": "Synchronous LLM response"})

def forward(self, **kwargs) -> Message:
"""Execute the tool and return the result synchronously."""
current_datetime = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")

# Validate and prepare input
prompt = kwargs.get("input", "")
system_prompt = kwargs.get("system_prompt", "")
if not prompt:
raise ValueError("Prompt in the field 'input' cannot be empty.")

input_message = Message(
content={
"input": prompt,
"system_prompt": system_prompt,
"max_tokens": kwargs.get("max_tokens", self.max_tokens),
"temperature": kwargs.get("temperature", self.temperature),
"top_p": kwargs.get("top_p", self.top_p),
"datetime": current_datetime,
}
)

if self.stop:
input_message.content["stop"] = self.stop

response_message = self._invoke(input_message)
if "output" not in response_message.content:
raise ValueError("The response message must contain the 'output' key.")

return response_message


class CombinedMeta(type(Module), type(BaseTool)):
pass

Expand Down
2 changes: 1 addition & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
[tool.poetry]
name = "polymind"
version = "0.0.52" # Update this version before publishing to PyPI
version = "0.0.53" # Update this version before publishing to PyPI
description = "PolyMind is a customizable collaborative multi-agent framework for collective intelligence and distributed problem solving."
authors = ["TechTao"]
license = "MIT License"
Expand Down
Loading

0 comments on commit d4ab6d5

Please sign in to comment.