Skip to content

🧬 Models

evoagentx.models

LLMOutputParser

LLMOutputParser(**kwargs)

Bases: Parser

A basic parser for LLM-generated content.

This parser stores the raw text generated by an LLM in the .content attribute and provides methods to extract structured data from this text using different parsing strategies.

Attributes:

Name Type Description
content str

The raw text generated by the LLM.

Source code in evoagentx/core/module.py
def __init__(self, **kwargs):
    """
    Initializes a BaseModule instance.

    Args:
        **kwargs (Any): Keyword arguments used to initialize the instance

    Raises:
        ValidationError: When parameter validation fails
        Exception: When other errors occur during initialization
    """

    try:
        for field_name, _ in type(self).model_fields.items():
            field_value = kwargs.get(field_name, None)
            if field_value:
                kwargs[field_name] = self._process_data(field_value)
            # if field_value and isinstance(field_value, dict) and "class_name" in field_value:
            #     class_name = field_value.get("class_name")
            #     sub_cls = MODULE_REGISTRY.get_module(cls_name=class_name)
            #     kwargs[field_name] = sub_cls._create_instance(field_value)
        super().__init__(**kwargs) 
        self.init_module()
    except (ValidationError, Exception) as e:
        exception_handler = callback_manager.get_callback("exception_buffer")
        if exception_handler is None:
            error_message = get_base_module_init_error_message(
                cls=self.__class__, 
                data=kwargs, 
                errors=e
            )
            logger.error(error_message)
            raise
        else:
            exception_handler.add(e)

get_attrs classmethod

get_attrs(return_type: bool = False) -> List[Union[str, tuple]]

Returns the attributes of the LLMOutputParser class.

Excludes ["class_name", "content"] by default.

Parameters:

Name Type Description Default
return_type bool

Whether to return the type of the attributes along with their names.

False

Returns:

Type Description
List[Union[str, tuple]]

If return_type is True, returns a list of tuples where each tuple contains

List[Union[str, tuple]]

the attribute name and its type. Otherwise, returns a list of attribute names.

Source code in evoagentx/models/base_model.py
@classmethod
def get_attrs(cls, return_type: bool = False) -> List[Union[str, tuple]]:
    """Returns the attributes of the LLMOutputParser class.

    Excludes ["class_name", "content"] by default.

    Args:
        return_type: Whether to return the type of the attributes along with their names.

    Returns:
        If `return_type` is True, returns a list of tuples where each tuple contains 
        the attribute name and its type. Otherwise, returns a list of attribute names.
    """
    attrs = [] 
    exclude_attrs = ["class_name", "content"]
    for field, field_info in cls.model_fields.items():
        if field not in exclude_attrs:
            if return_type:
                field_type = get_type_name(field_info.annotation)
                attrs.append((field, field_type))
            else:
                attrs.append(field)
    return attrs

get_attr_descriptions classmethod

get_attr_descriptions() -> dict

Returns the attributes and their descriptions.

Returns:

Type Description
dict

A dictionary mapping attribute names to their descriptions.

Source code in evoagentx/models/base_model.py
@classmethod
def get_attr_descriptions(cls) -> dict:
    """Returns the attributes and their descriptions.

    Returns:
        A dictionary mapping attribute names to their descriptions.
    """
    attrs = cls.get_attrs()
    results = {} 
    for field_name, field_info in cls.model_fields.items():
        if field_name not in attrs:
            continue
        field_desc = field_info.description if field_info.description is not None else "None"
        results[field_name] = field_desc
    return results

get_content_data classmethod

get_content_data(content: str, parse_mode: str = 'json', parse_func: Optional[Callable] = None, **kwargs) -> dict

Parses LLM-generated content into a dictionary.

This method takes content from an LLM response and converts it to a structured dictionary based on the specified parsing mode.

Parameters:

Name Type Description Default
content str

The content to parse.

required
parse_mode str

The mode to parse the content. Must be one of: - 'str': Assigns the raw text content to all attributes of the parser. - 'json': Extracts and parses JSON objects from LLM output. It will return a dictionary parsed from the first valid JSON string. - 'xml': Parses content using XML tags. It will return a dictionary parsed from the XML tags. - 'title': Parses content with Markdown-style headings. - 'custom': Uses custom parsing logic. Requires providing parse_func parameter as a custom parsing function.

'json'
parse_func Optional[Callable]

The function to parse the content, only valid when parse_mode is 'custom'.

None
**kwargs Any

Additional arguments passed to the parsing function.

{}

Returns:

Type Description
dict

The parsed content as a dictionary.

Raises:

Type Description
ValueError

If parse_mode is invalid or if parse_func is not provided when parse_mode is 'custom'.

Source code in evoagentx/models/base_model.py
@classmethod
def get_content_data(cls, content: str, parse_mode: str = "json", parse_func: Optional[Callable] = None, **kwargs) -> dict:
    """Parses LLM-generated content into a dictionary.

    This method takes content from an LLM response and converts it to a structured
    dictionary based on the specified parsing mode.

    Args:
        content: The content to parse.
        parse_mode: The mode to parse the content. Must be one of:
            - 'str': Assigns the raw text content to all attributes of the parser. 
            - 'json': Extracts and parses JSON objects from LLM output. It will return a dictionary parsed from the first valid JSON string.
            - 'xml': Parses content using XML tags. It will return a dictionary parsed from the XML tags.
            - 'title': Parses content with Markdown-style headings.
            - 'custom': Uses custom parsing logic. Requires providing `parse_func` parameter as a custom parsing function.
        parse_func: The function to parse the content, only valid when parse_mode is 'custom'.
        **kwargs (Any): Additional arguments passed to the parsing function.

    Returns:
        The parsed content as a dictionary.

    Raises:
        ValueError: If parse_mode is invalid or if parse_func is not provided when parse_mode is 'custom'.
    """
    attrs = cls.get_attrs()
    if len(attrs) <= 0:
        return {} 

    if parse_mode == "str":
        parse_func = cls._parse_str_content
    elif parse_mode == "json":
        parse_func = cls._parse_json_content
    elif parse_mode == "xml":
        parse_func = cls._parse_xml_content
    elif parse_mode == "title":
        parse_func = cls._parse_title_content
    elif parse_mode == "custom":
        if parse_func is None:
            raise ValueError("`parse_func` must be provided when `parse_mode` is 'custom'.")
        # obtain the function inputs
        signature = inspect.signature(parse_func)
        if "content" not in signature.parameters:
            raise ValueError("`parse_func` must have an input argument `content`.")

        func_args = {}
        func_args["content"] = content
        for param_name, param in signature.parameters.items():
            if param_name == "content":
                continue  # Already set
            if param_name in kwargs:
                func_args[param_name] = kwargs[param_name]
        data = parse_func(**func_args)
        if not isinstance(data, dict):
            raise ValueError(f"The output of `parse_func` must be a dictionary, but found {type(data)}.")
        return data
    else:
        raise ValueError(f"Invalid value '{parse_mode}' detected for `parse_mode`. Available choices: {PARSER_VALID_MODE}")
    data = parse_func(content=content, **kwargs)
    return data

parse classmethod

parse(content: str, parse_mode: str = 'json', parse_func: Optional[Callable] = None, **kwargs) -> LLMOutputParser

Parses LLM-generated text into a structured parser instance.

This is the main method for creating parser instances from LLM output.

Parameters:

Name Type Description Default
content str

The text generated by the LLM.

required
parse_mode str

The mode to parse the content, must be one of: - 'str': Assigns the raw text content to all attributes of the parser. - 'json': Extracts and parses JSON objects from LLM output. Uses the first valid JSON string to create an instance of LLMOutputParser. - 'xml': Parses content using XML tags. Uses the XML tags to create an instance of LLMOutputParser. - 'title': Parses content with Markdown-style headings. Uses the Markdown-style headings to create an instance of LLMOutputParser. The default title format is "## {title}", you can change it by providing title_format parameter, which should be a string that contains {title} placeholder. - 'custom': Uses custom parsing logic. Requires providing parse_func parameter as a custom parsing function. The parse_func must have a parameter named content and return a dictionary where the keys are the attribute names and the values are the parsed data.

'json'
parse_func Optional[Callable]

The function to parse the content, only valid when parse_mode is 'custom'.

None
**kwargs Any

Additional arguments passed to parsing functions, such as: - title_format for parse_mode="title".

{}

Returns:

Type Description
LLMOutputParser

An instance of LLMOutputParser containing the parsed data.

Raises:

Type Description
ValueError

If parse_mode is invalid or if content is not a string.

Source code in evoagentx/models/base_model.py
@classmethod
def parse(cls, content: str, parse_mode: str = "json", parse_func: Optional[Callable] = None, **kwargs) -> "LLMOutputParser":
    """Parses LLM-generated text into a structured parser instance.

    This is the main method for creating parser instances from LLM output.

    Args:
        content: The text generated by the LLM.
        parse_mode: The mode to parse the content, must be one of:
            - 'str': Assigns the raw text content to all attributes of the parser. 
            - 'json': Extracts and parses JSON objects from LLM output. Uses the first valid JSON string to create an instance of LLMOutputParser.
            - 'xml': Parses content using XML tags. Uses the XML tags to create an instance of LLMOutputParser.
            - 'title': Parses content with Markdown-style headings. Uses the Markdown-style headings to create an instance of LLMOutputParser. The default title format is "## {title}", you can change it by providing `title_format` parameter, which should be a string that contains `{title}` placeholder. 
            - 'custom': Uses custom parsing logic. Requires providing `parse_func` parameter as a custom parsing function. The `parse_func` must have a parameter named `content` and return a dictionary where the keys are the attribute names and the values are the parsed data. 
        parse_func: The function to parse the content, only valid when `parse_mode` is 'custom'.
        **kwargs (Any): Additional arguments passed to parsing functions, such as:
            - `title_format` for `parse_mode="title"`.

    Returns:
        An instance of LLMOutputParser containing the parsed data.

    Raises:
        ValueError: If parse_mode is invalid or if content is not a string.
    """
    if parse_mode not in PARSER_VALID_MODE:
        raise ValueError(f"'{parse_mode}' is an invalid value for `parse_mode`. Available choices: {PARSER_VALID_MODE}.")
    if not isinstance(content, str):
        raise ValueError(f"The input to {cls.__name__}.parse should be a str, but found {type(content)}.")
    data = cls.get_content_data(content=content, parse_mode=parse_mode, parse_func=parse_func, **kwargs)
    data.update({"content": content})
    parser = cls.from_dict(data, **kwargs)
    # parser.content = content
    return parser

__str__

__str__() -> str

Returns a string representation of the parser.

Source code in evoagentx/models/base_model.py
def __str__(self) -> str:
    """
    Returns a string representation of the parser.
    """
    return self.to_str()

to_str

to_str(**kwargs) -> str

Converts the parser to a string.

Source code in evoagentx/models/base_model.py
def to_str(self, **kwargs) -> str:
    """
    Converts the parser to a string.
    """
    return self.content

get_structured_data

get_structured_data() -> dict

Extracts structured data from the parser.

Returns:

Type Description
dict

A dictionary containing only the defined attributes and their values,

dict

excluding metadata like class_name.

Source code in evoagentx/models/base_model.py
def get_structured_data(self) -> dict:
    """Extracts structured data from the parser.

    Returns:
        A dictionary containing only the defined attributes and their values,
        excluding metadata like class_name.
    """
    attrs = type(self).get_attrs()
    data = self.to_dict(ignore=["class_name"])
    # structured_data = {attr: data[attr] for attr in attrs}
    structured_data = {key: value for key, value in data.items() if key in attrs}
    return structured_data

BaseConfig

BaseConfig(**kwargs)

Bases: BaseModule

Base configuration class that serves as parent for all configuration classes.

A config should inherit BaseConfig and specify the attributes and their types. Otherwise this will be an empty config.

Source code in evoagentx/core/module.py
def __init__(self, **kwargs):
    """
    Initializes a BaseModule instance.

    Args:
        **kwargs (Any): Keyword arguments used to initialize the instance

    Raises:
        ValidationError: When parameter validation fails
        Exception: When other errors occur during initialization
    """

    try:
        for field_name, _ in type(self).model_fields.items():
            field_value = kwargs.get(field_name, None)
            if field_value:
                kwargs[field_name] = self._process_data(field_value)
            # if field_value and isinstance(field_value, dict) and "class_name" in field_value:
            #     class_name = field_value.get("class_name")
            #     sub_cls = MODULE_REGISTRY.get_module(cls_name=class_name)
            #     kwargs[field_name] = sub_cls._create_instance(field_value)
        super().__init__(**kwargs) 
        self.init_module()
    except (ValidationError, Exception) as e:
        exception_handler = callback_manager.get_callback("exception_buffer")
        if exception_handler is None:
            error_message = get_base_module_init_error_message(
                cls=self.__class__, 
                data=kwargs, 
                errors=e
            )
            logger.error(error_message)
            raise
        else:
            exception_handler.add(e)

save

save(path: str, **kwargs) -> str

Save configuration to the specified path.

Parameters:

Name Type Description Default
path str

The file path to save the configuration

required
**kwargs Any

Additional keyword arguments passed to save_module method

{}

Returns:

Name Type Description
str str

The path where the file was saved

Source code in evoagentx/core/base_config.py
def save(self, path: str, **kwargs)-> str:

    """Save configuration to the specified path.

    Args:
        path: The file path to save the configuration
        **kwargs (Any): Additional keyword arguments passed to save_module method

    Returns:
        str: The path where the file was saved
    """
    return super().save_module(path, **kwargs)

get_config_params

get_config_params() -> List[str]

Get a list of configuration parameters.

Returns:

Type Description
List[str]

List[str]: List of configuration parameter names, excluding 'class_name'

Source code in evoagentx/core/base_config.py
def get_config_params(self) -> List[str]:
    """Get a list of configuration parameters.

    Returns:
        List[str]: List of configuration parameter names, excluding 'class_name'
    """
    config_params = list(type(self).model_fields.keys())
    config_params.remove("class_name")
    return config_params

get_set_params

get_set_params(ignore: List[str] = []) -> dict

Get a dictionary of explicitly set parameters.

Parameters:

Name Type Description Default
ignore List[str]

List of parameter names to ignore

[]

Returns:

Name Type Description
dict dict

Dictionary of explicitly set parameters, excluding 'class_name' and ignored parameters

Source code in evoagentx/core/base_config.py
def get_set_params(self, ignore: List[str] = []) -> dict:
    """Get a dictionary of explicitly set parameters.

    Args:
        ignore: List of parameter names to ignore

    Returns:
        dict: Dictionary of explicitly set parameters, excluding 'class_name' and ignored parameters
    """
    explicitly_set_fields = {field: getattr(self, field) for field in self.model_fields_set}
    if self.kwargs:
        explicitly_set_fields.update(self.kwargs)
    for field in ignore:
        explicitly_set_fields.pop(field, None)
    explicitly_set_fields.pop("class_name", None)
    return explicitly_set_fields

LiteLLM

LiteLLM(config: LLMConfig, **kwargs)

Bases: OpenAILLM

Source code in evoagentx/models/base_model.py
def __init__(self, config: LLMConfig, **kwargs):
    """Initializes the LLM with configuration.

    Args:
        config: Configuration object for the LLM.
        **kwargs (Any): Additional keyword arguments.
    """
    self.config = config
    self.kwargs = kwargs
    self.init_model()

init_model

init_model()

Initialize the model based on the configuration.

Source code in evoagentx/models/litellm_model.py
def init_model(self):
    """
    Initialize the model based on the configuration.
    """
    # Check if llm_type is correct
    if self.config.llm_type != "LiteLLM":
        raise ValueError("llm_type must be 'LiteLLM'")

    # Set model and extract the company name
    self.model = self.config.model
    self.api_base = self.config.api_base  # save api_base
    self.api_key = self.config.api_key
    # company = self.model.split("/")[0] if "/" in self.model else "openai"
    company = infer_litellm_company_from_model(self.model)

    if self.config.is_local or company == "local":  # update support local model
        if not self.api_base:
            raise ValueError("api_base is required for local models in LiteLLMConfig")
        # local llm doesn't need API key
        litellm.api_base = self.api_base  # set litellm global api_base
        litellm.api_key = self.api_key
    else:
        # Set environment variables based on the company
        if company == "openai":
            if not self.config.openai_key:
                raise ValueError("OpenAI API key is required for OpenAI models. You should set `openai_key` in LiteLLMConfig")
            os.environ["OPENAI_API_KEY"] = self.config.openai_key
        elif company == "azure":
            if not self.config.azure_key or not self.config.azure_endpoint:
                raise ValueError("Azure OpenAI key and endpoint are required for Azure models. You should set `azure_key` and `azure_endpoint` in LiteLLMConfig")
            os.environ["AZURE_API_KEY"] = self.config.azure_key
            os.environ["AZURE_API_BASE"] = self.config.azure_endpoint
            if self.config.api_version:
                os.environ["AZURE_API_VERSION"] = self.config.api_version
        elif company == "deepseek":
            if not self.config.deepseek_key:
                raise ValueError("DeepSeek API key is required for DeepSeek models. You should set `deepseek_key` in LiteLLMConfig")
            os.environ["DEEPSEEK_API_KEY"] = self.config.deepseek_key
        elif company == "anthropic":
            if not self.config.anthropic_key:
                raise ValueError("Anthropic API key is required for Anthropic models. You should set `anthropic_key` in LiteLLMConfig")
            os.environ["ANTHROPIC_API_KEY"] = self.config.anthropic_key
        elif company == "gemini":
            if not self.config.gemini_key:
                raise ValueError("Gemini API key is required for Gemini models. You should set `gemini_key` in LiteLLMConfig")
            os.environ["GEMINI_API_KEY"] = self.config.gemini_key 
        elif company == "meta_llama":
            if not self.config.meta_llama_key:
                raise ValueError("Meta Llama API key is required for Meta Llama models. You should set `meta_llama_key` in LiteLLMConfig")
            os.environ["LLAMA_API_KEY"] = self.config.meta_llama_key
        elif company == "openrouter":
            if not self.config.openrouter_key:
                raise ValueError("OpenRouter API key is required for OpenRouter models. You should set `openrouter_key` in LiteLLMConfig. You can also set `openrouter_base` in LiteLLMConfig to use a custom base URL [optional]")
            os.environ["OPENROUTER_API_KEY"] = self.config.openrouter_key
            os.environ["OPENROUTER_API_BASE"] = self.config.openrouter_base # [optional]
        elif company == "perplexity":
            if not self.config.perplexity_key:
                raise ValueError("Perplexity API key is required for Perplexity models. You should set `perplexity_key` in LiteLLMConfig")
            os.environ["PERPLEXITYAI_API_KEY"] = self.config.perplexity_key
        elif company == "groq":
            if not self.config.groq_key:
                raise ValueError("Groq API key is required for Groq models. You should set `groq_key` in LiteLLMConfig")
            os.environ["GROQ_API_KEY"] = self.config.groq_key
        else:
            raise ValueError(f"Unsupported company: {company}")

    self._default_ignore_fields = [
        "llm_type", "output_response", "openai_key", "deepseek_key", "anthropic_key", 
        "gemini_key", "meta_llama_key", "openrouter_key", "openrouter_base", "perplexity_key", 
        "groq_key", "api_base", "is_local", "azure_endpoint", "azure_key", "api_version", "api_key"
    ] # parameters in LiteLLMConfig that are not LiteLLM models' input parameters 

single_generate

single_generate(messages: List[dict], **kwargs) -> str

Generate a single response using the completion function.

Parameters:

Name Type Description Default
messages List[dict]

A list of dictionaries representing the conversation history.

required
**kwargs Any

Additional parameters to be passed to the completion function.

{}

Returns:

Name Type Description
str str

A string containing the model's response.

Source code in evoagentx/models/litellm_model.py
@retry(wait=wait_random_exponential(min=1, max=60), stop=stop_after_attempt(5))
def single_generate(self, messages: List[dict], **kwargs) -> str:

    """
    Generate a single response using the completion function.

    Args: 
        messages (List[dict]): A list of dictionaries representing the conversation history.
        **kwargs (Any): Additional parameters to be passed to the `completion` function.

    Returns: 
        str: A string containing the model's response.
    """
    stream = kwargs["stream"] if "stream" in kwargs else self.config.stream
    output_response = kwargs["output_response"] if "output_response" in kwargs else self.config.output_response

    try:
        completion_params = self.get_completion_params(**kwargs)
        company = infer_litellm_company_from_model(self.model)
        if self.config.is_local or company == "local":  # update save api_base for local model
            completion_params["api_base"] = self.api_base
        elif company == "azure":  # Add Azure OpenAI specific parameters
            completion_params["api_base"] = self.config.azure_endpoint
            completion_params["api_version"] = self.config.api_version
            completion_params["api_key"] = self.config.azure_key
        response = completion(messages=messages, **completion_params)
        if stream:
            output = self.get_stream_output(response, output_response=output_response)
            cost = self._stream_cost(messages=messages, output=output)
        else:
            output: str = self.get_completion_output(response=response, output_response=output_response)
            cost = self._completion_cost(response=response)
        self._update_cost(cost=cost)

    except Exception as e:
        raise RuntimeError(f"Error during single_generate: {str(e)}")

    return output

batch_generate

batch_generate(batch_messages: List[List[dict]], **kwargs) -> List[str]

Generate responses for a batch of messages.

Parameters:

Name Type Description Default
batch_messages List[List[dict]]

A list of message lists, where each sublist represents a conversation.

required
**kwargs Any

Additional parameters to be passed to the completion function.

{}

Returns:

Type Description
List[str]

List[str]: A list of responses for each conversation.

Source code in evoagentx/models/litellm_model.py
def batch_generate(self, batch_messages: List[List[dict]], **kwargs) -> List[str]:
    """
    Generate responses for a batch of messages.

    Args: 
        batch_messages (List[List[dict]]): A list of message lists, where each sublist represents a conversation.
        **kwargs (Any): Additional parameters to be passed to the `completion` function.

    Returns: 
        List[str]: A list of responses for each conversation.
    """
    results = []
    for messages in batch_messages:
        response = self.single_generate(messages, **kwargs)
        results.append(response)
    return results

single_generate_async async

single_generate_async(messages: List[dict], **kwargs) -> str

Generate a single response using the async completion function.

Parameters:

Name Type Description Default
messages List[dict]

A list of dictionaries representing the conversation history.

required
**kwargs Any

Additional parameters to be passed to the completion function.

{}

Returns:

Name Type Description
str str

A string containing the model's response.

Source code in evoagentx/models/litellm_model.py
async def single_generate_async(self, messages: List[dict], **kwargs) -> str:
    """
    Generate a single response using the async completion function.

    Args: 
        messages (List[dict]): A list of dictionaries representing the conversation history.
        **kwargs (Any): Additional parameters to be passed to the `completion` function.

    Returns: 
        str: A string containing the model's response.
    """
    stream = kwargs["stream"] if "stream" in kwargs else self.config.stream
    output_response = kwargs["output_response"] if "output_response" in kwargs else self.config.output_response

    try:
        completion_params = self.get_completion_params(**kwargs)
        company = infer_litellm_company_from_model(self.model)
        if self.config.is_local or company == "local":  # add api base for local model
            completion_params["api_base"] = self.api_base
        elif company == "azure":  # Add Azure OpenAI specific parameters
            completion_params["api_base"] = self.config.azure_endpoint
            completion_params["api_version"] = self.config.api_version
            completion_params["api_key"] = self.config.azure_key
        response = await acompletion(messages=messages, **completion_params)
        if stream:
            if hasattr(response, "__aiter__"):
                output = await self.get_stream_output_async(response, output_response=output_response)
            else:
                output = self.get_stream_output(response, output_response=output_response)
            cost = self._stream_cost(messages=messages, output=output)
        else:
            output: str = self.get_completion_output(response=response, output_response=output_response)
            cost = self._completion_cost(response=response)
        self._update_cost(cost=cost)
    except Exception as e:
        raise RuntimeError(f"Error during single_generate_async: {str(e)}")

    return output

OpenAILLM

OpenAILLM(config: LLMConfig, **kwargs)

Bases: BaseLLM

Source code in evoagentx/models/base_model.py
def __init__(self, config: LLMConfig, **kwargs):
    """Initializes the LLM with configuration.

    Args:
        config: Configuration object for the LLM.
        **kwargs (Any): Additional keyword arguments.
    """
    self.config = config
    self.kwargs = kwargs
    self.init_model()

get_stream_output

get_stream_output(response: Stream, output_response: bool = True) -> str

Process stream response and return the complete output.

Parameters:

Name Type Description Default
response Stream

The stream response from OpenAI

required
output_response bool

Whether to print the response in real-time

True

Returns:

Name Type Description
str str

The complete output text

Source code in evoagentx/models/openai_model.py
def get_stream_output(self, response: Stream, output_response: bool=True) -> str:
    """
    Process stream response and return the complete output.

    Args:
        response: The stream response from OpenAI
        output_response: Whether to print the response in real-time

    Returns:
        str: The complete output text
    """
    output = ""
    for chunk in response:
        content = chunk.choices[0].delta.content
        if content:
            if output_response:
                print(content, end="", flush=True)
            output += content
    if output_response:
        print("")
    return output

get_stream_output_async async

get_stream_output_async(response, output_response: bool = False) -> str

Process async stream response and return the complete output.

Parameters:

Name Type Description Default
response AsyncIterator[ChatCompletionChunk]

The async stream response from OpenAI

required
output_response bool

Whether to print the response in real-time

False

Returns:

Name Type Description
str str

The complete output text

Source code in evoagentx/models/openai_model.py
async def get_stream_output_async(self, response, output_response: bool = False) -> str:
    """
    Process async stream response and return the complete output.

    Args:
        response (AsyncIterator[ChatCompletionChunk]): The async stream response from OpenAI
        output_response (bool): Whether to print the response in real-time


    Returns:
        str: The complete output text
    """
    output = ""
    async for chunk in response:
        content = chunk.choices[0].delta.content
        if content:
            if output_response:
                print(content, end="", flush=True)
            output += content
    if output_response:
        print("")
    return output

BaseLLM

BaseLLM(config: LLMConfig, **kwargs)

Bases: ABC

Abstract base class for Large Language Model implementations.

This class defines the interface that all LLM implementations must follow, providing methods for generating text, formatting messages, and parsing output.

Attributes:

Name Type Description
config

Configuration for the LLM.

kwargs

Additional keyword arguments provided during initialization.

Initializes the LLM with configuration.

Parameters:

Name Type Description Default
config LLMConfig

Configuration object for the LLM.

required
**kwargs Any

Additional keyword arguments.

{}
Source code in evoagentx/models/base_model.py
def __init__(self, config: LLMConfig, **kwargs):
    """Initializes the LLM with configuration.

    Args:
        config: Configuration object for the LLM.
        **kwargs (Any): Additional keyword arguments.
    """
    self.config = config
    self.kwargs = kwargs
    self.init_model()

init_model abstractmethod

init_model()

Initializes the underlying model.

This method should be implemented by subclasses to set up the actual LLM.

Source code in evoagentx/models/base_model.py
@abstractmethod
def init_model(self):
    """Initializes the underlying model.

    This method should be implemented by subclasses to set up the actual LLM.
    """
    pass

__deepcopy__

__deepcopy__(memo) -> BaseLLM

Handles deep copying of the LLM instance.

Returns the same instance when deepcopy is called, as LLM instances often cannot be meaningfully deep-copied.

Parameters:

Name Type Description Default
memo Dict[int, Any]

Memo dictionary used by the deepcopy process.

required

Returns:

Type Description
BaseLLM

The same LLM instance.

Source code in evoagentx/models/base_model.py
def __deepcopy__(self, memo) -> "BaseLLM":
    """Handles deep copying of the LLM instance.

    Returns the same instance when deepcopy is called, as LLM instances
    often cannot be meaningfully deep-copied.

    Args:
        memo (Dict[int, Any]): Memo dictionary used by the deepcopy process.

    Returns:
        The same LLM instance.
    """
    # return the same instance when deepcopy
    memo[id(self)] = self
    return self

formulate_messages abstractmethod

formulate_messages(prompts: List[str], system_messages: Optional[List[str]] = None) -> List[List[dict]]

Converts input prompts into the chat format compatible with different LLMs.

Parameters:

Name Type Description Default
prompts List[str]

A list of user prompts that need to be converted.

required
system_messages Optional[List[str]]

An optional list of system messages that provide instructions or context to the model.

None

Returns:

Type Description
List[List[dict]]

A list of message lists, where each inner list contains messages in the chat format required by LLMs.

Source code in evoagentx/models/base_model.py
@abstractmethod
def formulate_messages(self, prompts: List[str], system_messages: Optional[List[str]] = None) -> List[List[dict]]:
    """Converts input prompts into the chat format compatible with different LLMs.

    Args:
        prompts: A list of user prompts that need to be converted.
        system_messages: An optional list of system messages that provide instructions or context to the model.

    Returns:
        A list of message lists, where each inner list contains messages in the chat format required by LLMs. 
    """
    pass

single_generate abstractmethod

single_generate(messages: List[dict], **kwargs) -> str

Generates LLM output for a single set of messages.

Parameters:

Name Type Description Default
messages List[dict]

The input messages to the LLM in chat format.

required
**kwargs Any

Additional keyword arguments for generation settings.

{}

Returns:

Type Description
str

The generated output text from the LLM.

Source code in evoagentx/models/base_model.py
@abstractmethod
def single_generate(self, messages: List[dict], **kwargs) -> str:
    """Generates LLM output for a single set of messages.

    Args:
        messages: The input messages to the LLM in chat format.
        **kwargs (Any): Additional keyword arguments for generation settings.

    Returns:
        The generated output text from the LLM.
    """
    pass

batch_generate abstractmethod

batch_generate(batch_messages: List[List[dict]], **kwargs) -> List[str]

Generates outputs for a batch of message sets.

Parameters:

Name Type Description Default
batch_messages List[List[dict]]

A list of message lists, where each inner list contains messages for a single generation.

required
**kwargs Any

Additional keyword arguments for generation settings.

{}

Returns:

Type Description
List[str]

A list of generated outputs from the LLM, one for each input message set.

Source code in evoagentx/models/base_model.py
@abstractmethod
def batch_generate(self, batch_messages: List[List[dict]], **kwargs) -> List[str]:
    """Generates outputs for a batch of message sets.

    Args: 
        batch_messages: A list of message lists, where each inner list contains messages for a single generation.
        **kwargs (Any): Additional keyword arguments for generation settings.

    Returns:
        A list of generated outputs from the LLM, one for each input message set.
    """
    pass

single_generate_async async

single_generate_async(messages: List[dict], **kwargs) -> str

Asynchronously generates LLM output for a single set of messages.

This default implementation wraps the synchronous method in an async executor. Subclasses should override this for true async implementation if supported.

Parameters:

Name Type Description Default
messages List[dict]

The input messages to the LLM in chat format.

required
**kwargs Any

Additional keyword arguments for generation settings.

{}

Returns:

Type Description
str

The generated output text from the LLM.

Source code in evoagentx/models/base_model.py
async def single_generate_async(self, messages: List[dict], **kwargs) -> str:
    """Asynchronously generates LLM output for a single set of messages.

    This default implementation wraps the synchronous method in an async executor.
    Subclasses should override this for true async implementation if supported.

    Args:
        messages: The input messages to the LLM in chat format.
        **kwargs (Any): Additional keyword arguments for generation settings.

    Returns:
        The generated output text from the LLM.
    """
    # Default implementation for backward compatibility
    loop = asyncio.get_event_loop()
    result = await loop.run_in_executor(None, self.single_generate, messages, **kwargs)
    return result

batch_generate_async async

batch_generate_async(batch_messages: List[List[dict]], **kwargs) -> List[str]

Asynchronously generates outputs for a batch of message sets.

This default implementation runs each generation as a separate async task. Subclasses should override this for more efficient async batching if supported.

Parameters:

Name Type Description Default
batch_messages List[List[dict]]

A list of message lists, where each inner list contains messages for a single generation.

required
**kwargs Any

Additional keyword arguments for generation settings.

{}

Returns:

Type Description
List[str]

A list of generated outputs from the LLM, one for each input message set.

Source code in evoagentx/models/base_model.py
async def batch_generate_async(self, batch_messages: List[List[dict]], **kwargs) -> List[str]:
    """Asynchronously generates outputs for a batch of message sets.

    This default implementation runs each generation as a separate async task.
    Subclasses should override this for more efficient async batching if supported.

    Args: 
        batch_messages: A list of message lists, where each inner list contains messages for a single generation.
        **kwargs (Any): Additional keyword arguments for generation settings.

    Returns:
        A list of generated outputs from the LLM, one for each input message set.
    """
    # Default implementation for backward compatibility
    tasks = [self.single_generate_async(messages, **kwargs) for messages in batch_messages]
    return await asyncio.gather(*tasks)

parse_generated_text

parse_generated_text(text: str, parser: Optional[Type[LLMOutputParser]] = None, parse_mode: Optional[str] = 'json', parse_func: Optional[Callable] = None, **kwargs) -> LLMOutputParser

Parses generated text into a structured output using a parser.

Parameters:

Name Type Description Default
text str

The text generated by the LLM.

required
parser Optional[Type[LLMOutputParser]]

An LLMOutputParser class to use for parsing. If None, the default LLMOutputParser is used.

None
parse_mode Optional[str]

The mode to use for parsing, must be the parse_mode supported by the parser.

'json'
**kwargs Any

Additional arguments passed to the parser.

{}

Returns:

Type Description
LLMOutputParser

An LLMOutputParser instance containing the parsed data.

Source code in evoagentx/models/base_model.py
def parse_generated_text(self, text: str, parser: Optional[Type[LLMOutputParser]]=None, parse_mode: Optional[str] = "json", parse_func: Optional[Callable] = None, **kwargs) -> LLMOutputParser:
    """Parses generated text into a structured output using a parser.

    Args: 
        text: The text generated by the LLM.
        parser: An LLMOutputParser class to use for parsing. If None, the default LLMOutputParser is used.
        parse_mode: The mode to use for parsing, must be the `parse_mode` supported by the `parser`. 
        **kwargs (Any): Additional arguments passed to the parser.

    Returns:
        An LLMOutputParser instance containing the parsed data.
    """
    if not parser:
        parser = LLMOutputParser
    return parser.parse(text, parse_mode=parse_mode, parse_func=parse_func)

parse_generated_texts

parse_generated_texts(texts: List[str], parser: Optional[Type[LLMOutputParser]] = None, parse_mode: Optional[str] = 'json', parse_func: Optional[Callable] = None, **kwargs) -> List[LLMOutputParser]

Parses multiple generated texts into structured outputs.

Parameters:

Name Type Description Default
texts List[str]

A list of texts generated by the LLM.

required
parser Optional[Type[LLMOutputParser]]

An LLMOutputParser class to use for parsing.

None
parse_mode Optional[str]

The mode to use for parsing, must be the parse_mode supported by the parser.

'json'
**kwargs Any

Additional arguments passed to the parser.

{}

Returns:

Type Description
List[LLMOutputParser]

A list of LLMOutputParser instances containing the parsed data.

Source code in evoagentx/models/base_model.py
def parse_generated_texts(self, texts: List[str], parser: Optional[Type[LLMOutputParser]]=None, parse_mode: Optional[str] = "json", parse_func: Optional[Callable] = None, **kwargs) -> List[LLMOutputParser]:
    """Parses multiple generated texts into structured outputs.

    Args:
        texts: A list of texts generated by the LLM.
        parser: An LLMOutputParser class to use for parsing.
        parse_mode: The mode to use for parsing, must be the `parse_mode` supported by the `parser`. 
        **kwargs (Any): Additional arguments passed to the parser.

    Returns:
        A list of LLMOutputParser instances containing the parsed data.
    """
    parsed_results = [self.parse_generated_text(text=text, parser=parser, parse_mode=parse_mode, parse_func=parse_func, **kwargs) for text in texts]
    return parsed_results

generate

generate(prompt: Optional[Union[str, List[str]]] = None, system_message: Optional[Union[str, List[str]]] = None, messages: Optional[Union[List[dict], List[List[dict]]]] = None, parser: Optional[Type[LLMOutputParser]] = None, parse_mode: Optional[str] = 'json', parse_func: Optional[Callable] = None, **kwargs) -> Union[LLMOutputParser, List[LLMOutputParser]]

Generates LLM output(s) and parses the result(s).

This is the main method for generating text with the LLM. It handles both single and batch generation, and automatically parses the outputs.

Parameters:

Name Type Description Default
prompt Optional[Union[str, List[str]]]

Input prompt(s) to the LLM.

None
system_message Optional[Union[str, List[str]]]

System message(s) for the LLM.

None
messages Optional[Union[List[dict], List[List[dict]]]]

Chat message(s) for the LLM, already in the required format (either prompt or messages must be provided).

None
parser Optional[Type[LLMOutputParser]]

Parser class to use for processing the output.

None
parse_mode Optional[str]

The mode to use for parsing, must be the parse_mode supported by the parser.

'json'
**kwargs Any

Additional generation configuration parameters.

{}

Returns:

Type Description
Union[LLMOutputParser, List[LLMOutputParser]]

For single generation: An LLMOutputParser instance.

Union[LLMOutputParser, List[LLMOutputParser]]

For batch generation: A list of LLMOutputParser instances.

Raises:

Type Description
ValueError

If the input format is invalid.

Note

Either prompt or messages must be provided. If both or neither is provided, an error will be raised.

Source code in evoagentx/models/base_model.py
def generate(
    self,
    prompt: Optional[Union[str, List[str]]] = None,
    system_message: Optional[Union[str, List[str]]] = None,
    messages: Optional[Union[List[dict],List[List[dict]]]] = None,
    parser: Optional[Type[LLMOutputParser]] = None,
    parse_mode: Optional[str] = "json", 
    parse_func: Optional[Callable] = None,
    **kwargs
) -> Union[LLMOutputParser, List[LLMOutputParser]]:
    """Generates LLM output(s) and parses the result(s).

    This is the main method for generating text with the LLM. It handles both
    single and batch generation, and automatically parses the outputs.

    Args:
        prompt: Input prompt(s) to the LLM.
        system_message: System message(s) for the LLM.
        messages: Chat message(s) for the LLM, already in the required format (either `prompt` or `messages` must be provided).
        parser: Parser class to use for processing the output.
        parse_mode: The mode to use for parsing, must be the `parse_mode` supported by the `parser`. 
        **kwargs (Any): Additional generation configuration parameters.

    Returns:
        For single generation: An LLMOutputParser instance.
        For batch generation: A list of LLMOutputParser instances.

    Raises:
        ValueError: If the input format is invalid.

    Note:
        Either prompt or messages must be provided. If both or neither is provided,
        an error will be raised.
    """
    prepared_messages, single_generate = self._prepare_messages(prompt, system_message, messages)
    if not prepared_messages:  # Handle empty messages case
        return []

    generated_texts = self.batch_generate(batch_messages=prepared_messages, **kwargs)
    parsed_outputs = self.parse_generated_texts(texts=generated_texts, parser=parser, parse_mode=parse_mode, parse_func=parse_func, **kwargs)
    return parsed_outputs[0] if single_generate else parsed_outputs

async_generate async

async_generate(prompt: Optional[Union[str, List[str]]] = None, system_message: Optional[Union[str, List[str]]] = None, messages: Optional[Union[List[dict], List[List[dict]]]] = None, parser: Optional[Type[LLMOutputParser]] = None, parse_mode: Optional[str] = 'json', parse_func: Optional[Callable] = None, **kwargs) -> Union[LLMOutputParser, List[LLMOutputParser]]

Asynchronously generates LLM output(s) and parses the result(s).

This is the async version of the generate method. It works identically but performs the generation asynchronously.

Source code in evoagentx/models/base_model.py
async def async_generate(
    self,
    prompt: Optional[Union[str, List[str]]] = None,
    system_message: Optional[Union[str, List[str]]] = None,
    messages: Optional[Union[List[dict],List[List[dict]]]] = None,
    parser: Optional[Type[LLMOutputParser]] = None,
    parse_mode: Optional[str] = "json", 
    parse_func: Optional[Callable] = None,
    **kwargs
) -> Union[LLMOutputParser, List[LLMOutputParser]]:
    """Asynchronously generates LLM output(s) and parses the result(s).

    This is the async version of the generate method. It works identically but
    performs the generation asynchronously.
    """
    prepared_messages, single_generate = self._prepare_messages(prompt, system_message, messages)
    if not prepared_messages:  # Handle empty messages case
        return []

    generated_texts = await self.batch_generate_async(batch_messages=prepared_messages, **kwargs)
    parsed_outputs = self.parse_generated_texts(texts=generated_texts, parser=parser, parse_mode=parse_mode, parse_func=parse_func, **kwargs)
    return parsed_outputs[0] if single_generate else parsed_outputs

AliyunLLM

AliyunLLM(config: LLMConfig, **kwargs)

Bases: BaseLLM

Source code in evoagentx/models/base_model.py
def __init__(self, config: LLMConfig, **kwargs):
    """Initializes the LLM with configuration.

    Args:
        config: Configuration object for the LLM.
        **kwargs (Any): Additional keyword arguments.
    """
    self.config = config
    self.kwargs = kwargs
    self.init_model()

init_model

init_model()

Initialize the DashScope Generation client.

Source code in evoagentx/models/aliyun_model.py
def init_model(self):
    """
    Initialize the DashScope Generation client.
    """
    config: AliyunLLMConfig = self.config
    if not config.aliyun_api_key:
        raise ValueError("Aliyun API key is required. You should set `aliyun_api_key` in AliyunLLMConfig")

    #  API key
    os.environ["DASHSCOPE_API_KEY"] = config.aliyun_api_key
    dashscope.api_key = config.aliyun_api_key

    # model
    self._client = Generation()
    self._default_ignore_fields = [
        "llm_type", "output_response", "aliyun_api_key", "aliyun_access_key_id",
        "aliyun_access_key_secret", "model_name"
    ]

formulate_messages

formulate_messages(prompts: List[str], system_messages: Optional[List[str]] = None) -> List[List[dict]]

Format messages for the Aliyun model.

Parameters:

Name Type Description Default
prompts List[str]

List of user prompts.

required
system_messages Optional[List[str]]

Optional list of system messages.

None

Returns:

Type Description
List[List[dict]]

List[List[dict]]: Formatted messages for the model.

Source code in evoagentx/models/aliyun_model.py
def formulate_messages(self, prompts: List[str], system_messages: Optional[List[str]] = None) -> List[List[dict]]:
    """
    Format messages for the Aliyun model.

    Args:
        prompts (List[str]): List of user prompts.
        system_messages (Optional[List[str]]): Optional list of system messages.

    Returns:
        List[List[dict]]: Formatted messages for the model.
    """
    if system_messages:
        assert len(prompts) == len(system_messages), f"the number of prompts ({len(prompts)}) is different from the number of system_messages ({len(system_messages)})"
    else:
        system_messages = [None] * len(prompts)

    messages_list = []
    for prompt, system_message in zip(prompts, system_messages):
        messages = []
        if system_message:
            messages.append({"role": "system", "content": system_message})
        messages.append({"role": "user", "content": prompt})
        messages_list.append(messages)
    return messages_list

update_completion_params

update_completion_params(params1: dict, params2: dict) -> dict

Update completion parameters with new values.

Parameters:

Name Type Description Default
params1 dict

Base parameters.

required
params2 dict

New parameters to update with.

required

Returns:

Name Type Description
dict dict

Updated parameters.

Source code in evoagentx/models/aliyun_model.py
def update_completion_params(self, params1: dict, params2: dict) -> dict:
    """
    Update completion parameters with new values.

    Args:
        params1 (dict): Base parameters.
        params2 (dict): New parameters to update with.

    Returns:
        dict: Updated parameters.
    """
    config_params: list = self.config.get_config_params()
    for key, value in params2.items():
        if key in self._default_ignore_fields:
            continue
        if key not in config_params:
            continue
        params1[key] = value
    return params1

get_completion_params

get_completion_params(**kwargs)

Get completion parameters for the model.

Returns:

Name Type Description
dict

Parameters for model completion.

Source code in evoagentx/models/aliyun_model.py
def get_completion_params(self, **kwargs):
    """
    Get completion parameters for the model.

    Returns:
        dict: Parameters for model completion.
    """
    completion_params = self.config.get_set_params(ignore=self._default_ignore_fields)
    completion_params = self.update_completion_params(completion_params, kwargs)
    completion_params["model"] = self.config.model
    return completion_params

get_stream_output

get_stream_output(response: Any, output_response: bool = True) -> str

Process streaming response from the model.

Parameters:

Name Type Description Default
response Any

The streaming response from the model.

required
output_response bool

Whether to print the response.

True

Returns:

Name Type Description
str str

The complete response text.

Source code in evoagentx/models/aliyun_model.py
def get_stream_output(self, response: Any, output_response: bool = True) -> str:
    """
    Process streaming response from the model.

    Args:
        response: The streaming response from the model.
        output_response (bool): Whether to print the response.

    Returns:
        str: The complete response text.
    """
    output = ""
    try:
        for chunk in response:
            if not hasattr(chunk, 'output') or chunk.output is None:
                error_msg = getattr(chunk, 'message', 'Invalid chunk format from model')
                raise ValueError(f"Model stream chunk error: {error_msg}")
            if hasattr(chunk.output, 'text'):
                content = chunk.output.text
            elif hasattr(chunk.output, 'choices') and chunk.output.choices:
                content = chunk.output.choices[0].message.content
            else:
                continue
            if content:
                if output_response:
                    print(content, end="", flush=True)
                output += content
    except Exception as e:
        print(f"Error processing stream: {str(e)}")
        if not output:
            raise RuntimeError(f"Failed to process stream response: {str(e)}")
    if output_response:
        print("")
    return output

get_stream_output_async async

get_stream_output_async(response: Any, output_response: bool = False) -> str

Process streaming response asynchronously.

Parameters:

Name Type Description Default
response Any

The streaming response from the model.

required
output_response bool

Whether to print the response.

False

Returns:

Name Type Description
str str

The complete response text.

Source code in evoagentx/models/aliyun_model.py
async def get_stream_output_async(self, response: Any, output_response: bool = False) -> str:
    """
    Process streaming response asynchronously.

    Args:
        response: The streaming response from the model.
        output_response (bool): Whether to print the response.

    Returns:
        str: The complete response text.
    """
    output = ""
    try:
        async for chunk in response:
            if not hasattr(chunk, 'output') or chunk.output is None:
                error_msg = getattr(chunk, 'message', 'Invalid chunk format from model')
                raise ValueError(f"Model stream chunk error: {error_msg}")
            if hasattr(chunk.output, 'text'):
                content = chunk.output.text
            elif hasattr(chunk.output, 'choices') and chunk.output.choices:
                content = chunk.output.choices[0].message.content
            else:
                continue
            if content:
                if output_response:
                    print(content, end="", flush=True)
                output += content
    except Exception as e:
        print(f"Error processing async stream: {str(e)}")
        if not output:
            raise RuntimeError(f"Failed to process async stream response: {str(e)}")
    if output_response:
        print("")
    return output

get_completion_output

get_completion_output(response: Any, output_response: bool = True) -> str

Process non-streaming response from the model.

Parameters:

Name Type Description Default
response Any

The response from the model.

required
output_response bool

Whether to print the response.

True

Returns:

Name Type Description
str str

The complete response text.

Source code in evoagentx/models/aliyun_model.py
def get_completion_output(self, response: Any, output_response: bool = True) -> str:
    """
    Process non-streaming response from the model.

    Args:
        response: The response from the model.
        output_response (bool): Whether to print the response.

    Returns:
        str: The complete response text.
    """
    try:
        if not hasattr(response, 'output') or response.output is None:
            error_msg = getattr(response, 'message', 'Invalid response format from model')
            raise ValueError(f"Model response error: {error_msg}")

        if hasattr(response.output, 'text'):
            output = response.output.text
        elif hasattr(response.output, 'choices') and response.output.choices:
            output = response.output.choices[0].message.content
        else:
            raise ValueError("Unexpected response format")

        if output_response:
            print(output)
        return output
    except Exception as e:
        raise RuntimeError(f"Error processing completion response: {str(e)}")

single_generate

single_generate(messages: List[dict], **kwargs) -> str

Generate a single response from the model.

Parameters:

Name Type Description Default
messages List[dict]

The conversation history.

required
**kwargs

Additional parameters for generation.

{}

Returns:

Name Type Description
str str

The generated response.

Source code in evoagentx/models/aliyun_model.py
@retry(wait=wait_random_exponential(min=1, max=60), stop=stop_after_attempt(5))
def single_generate(self, messages: List[dict], **kwargs) -> str:
    """
    Generate a single response from the model.

    Args:
        messages (List[dict]): The conversation history.
        **kwargs: Additional parameters for generation.

    Returns:
        str: The generated response.
    """
    stream = kwargs.get("stream", self.config.stream)
    output_response = kwargs.get("output_response", self.config.output_response)

    try:
        completion_params = self.get_completion_params(**kwargs)
        response = self._client.call(messages=messages, **completion_params)

        if response is None:
            raise RuntimeError("Received empty response from model")

        if stream:
            output = self.get_stream_output(response, output_response=output_response)
            cost = self._stream_cost(response)
        else:
            output = self.get_completion_output(response=response, output_response=output_response)
            cost = self._completion_cost(response)
        self._update_cost(cost=cost)
        return output
    except Exception as e:
        raise RuntimeError(f"Error during single_generate of AliyunLLM: {str(e)}")

batch_generate

batch_generate(batch_messages: List[List[dict]], **kwargs) -> List[str]

Generate responses for a batch of messages.

Parameters:

Name Type Description Default
batch_messages List[List[dict]]

List of conversation histories.

required
**kwargs

Additional parameters for generation.

{}

Returns:

Type Description
List[str]

List[str]: List of generated responses.

Source code in evoagentx/models/aliyun_model.py
def batch_generate(self, batch_messages: List[List[dict]], **kwargs) -> List[str]:
    """
    Generate responses for a batch of messages.

    Args:
        batch_messages (List[List[dict]]): List of conversation histories.
        **kwargs: Additional parameters for generation.

    Returns:
        List[str]: List of generated responses.
    """
    if not isinstance(batch_messages, list) or not batch_messages:
        raise ValueError("batch_messages must be a non-empty list of message lists")
    return [self.single_generate(messages=one_messages, **kwargs) for one_messages in batch_messages]

single_generate_async async

single_generate_async(messages: List[dict], **kwargs) -> str

Asynchronously generate a single response.

Parameters:

Name Type Description Default
messages List[dict]

The conversation history.

required
**kwargs

Additional parameters for the generation.

{}

Returns:

Name Type Description
str str

The generated response.

Source code in evoagentx/models/aliyun_model.py
async def single_generate_async(self, messages: List[dict], **kwargs) -> str:
    """
    Asynchronously generate a single response.

    Args:
        messages (List[dict]): The conversation history.
        **kwargs: Additional parameters for the generation.

    Returns:
        str: The generated response.
    """
    stream = kwargs.get("stream", self.config.stream)
    output_response = kwargs.get("output_response", self.config.output_response)

    try:
        completion_params = self.get_completion_params(**kwargs)
        loop = asyncio.get_event_loop()
        response = await loop.run_in_executor(
            None,
            lambda: self._client.call(messages=messages, **completion_params)
        )

        if stream:
            output = await self.get_stream_output_async(response, output_response=output_response)
            cost = self._stream_cost(response)
        else:
            output = self.get_completion_output(response=response, output_response=output_response)
            cost = self._completion_cost(response)

        self._update_cost(cost=cost)
        return output

    except Exception as e:
        raise RuntimeError(f"Error during single_generate_async of AliyunLLM: {str(e)}")

atomic_method

atomic_method(func)

threading safe decorator for class methods. If there are self._lock in the instance, it will use the lock. Otherwise, use nullcontext for execution.

Source code in evoagentx/core/decorators.py
def atomic_method(func):
    """
    threading safe decorator for class methods. 
    If there are self._lock in the instance, it will use the lock. Otherwise, use nullcontext for execution.
    """
    @wraps(func)
    def wrapper(self, *args, **kwargs):
        context = getattr(self, "_lock", nullcontext())
        with context:
            return func(self, *args, **kwargs)
    return wrapper