Skip to content

🧬 Models

evoagentx.models

LLMOutputParser

LLMOutputParser(**kwargs)

Bases: Parser

A basic parser for LLM-generated content.

This parser stores the raw text generated by an LLM in the .content attribute and provides methods to extract structured data from this text using different parsing strategies.

Attributes:

Name Type Description
content str

The raw text generated by the LLM.

Source code in evoagentx/core/module.py
def __init__(self, **kwargs):
    """
    Initializes a BaseModule instance.

    Args:
        **kwargs (Any): Keyword arguments used to initialize the instance

    Raises:
        ValidationError: When parameter validation fails
        Exception: When other errors occur during initialization
    """

    try:
        for field_name, _ in type(self).model_fields.items():
            field_value = kwargs.get(field_name, None)
            if field_value:
                kwargs[field_name] = self._process_data(field_value)
            # if field_value and isinstance(field_value, dict) and "class_name" in field_value:
            #     class_name = field_value.get("class_name")
            #     sub_cls = MODULE_REGISTRY.get_module(cls_name=class_name)
            #     kwargs[field_name] = sub_cls._create_instance(field_value)
        super().__init__(**kwargs) 
        self.init_module()
    except (ValidationError, Exception) as e:
        exception_handler = callback_manager.get_callback("exception_buffer")
        if exception_handler is None:
            error_message = get_base_module_init_error_message(
                cls=self.__class__, 
                data=kwargs, 
                errors=e
            )
            logger.error(error_message)
            raise
        else:
            exception_handler.add(e)

get_attrs classmethod

get_attrs(return_type: bool = False) -> List[Union[str, tuple]]

Returns the attributes of the LLMOutputParser class.

Excludes ["class_name", "content"] by default.

Parameters:

Name Type Description Default
return_type bool

Whether to return the type of the attributes along with their names.

False

Returns:

Type Description
List[Union[str, tuple]]

If return_type is True, returns a list of tuples where each tuple contains

List[Union[str, tuple]]

the attribute name and its type. Otherwise, returns a list of attribute names.

Source code in evoagentx/models/base_model.py
@classmethod
def get_attrs(cls, return_type: bool = False) -> List[Union[str, tuple]]:
    """Returns the attributes of the LLMOutputParser class.

    Excludes ["class_name", "content"] by default.

    Args:
        return_type: Whether to return the type of the attributes along with their names.

    Returns:
        If `return_type` is True, returns a list of tuples where each tuple contains 
        the attribute name and its type. Otherwise, returns a list of attribute names.
    """
    attrs = [] 
    exclude_attrs = ["class_name", "content"]
    for field, field_info in cls.model_fields.items():
        if field not in exclude_attrs:
            if return_type:
                field_type = get_type_name(field_info.annotation)
                attrs.append((field, field_type))
            else:
                attrs.append(field)
    return attrs

get_attr_descriptions classmethod

get_attr_descriptions() -> dict

Returns the attributes and their descriptions.

Returns:

Type Description
dict

A dictionary mapping attribute names to their descriptions.

Source code in evoagentx/models/base_model.py
@classmethod
def get_attr_descriptions(cls) -> dict:
    """Returns the attributes and their descriptions.

    Returns:
        A dictionary mapping attribute names to their descriptions.
    """
    attrs = cls.get_attrs()
    results = {} 
    for field_name, field_info in cls.model_fields.items():
        if field_name not in attrs:
            continue
        field_desc = field_info.description if field_info.description is not None else "None"
        results[field_name] = field_desc
    return results

get_content_data classmethod

get_content_data(content: str, parse_mode: str = 'json', parse_func: Optional[Callable] = None, **kwargs) -> dict

Parses LLM-generated content into a dictionary.

This method takes content from an LLM response and converts it to a structured dictionary based on the specified parsing mode.

Parameters:

Name Type Description Default
content str

The content to parse.

required
parse_mode str

The mode to parse the content. Must be one of: - 'str': Assigns the raw text content to all attributes of the parser. - 'json': Extracts and parses JSON objects from LLM output. It will return a dictionary parsed from the first valid JSON string. - 'xml': Parses content using XML tags. It will return a dictionary parsed from the XML tags. - 'title': Parses content with Markdown-style headings. - 'custom': Uses custom parsing logic. Requires providing parse_func parameter as a custom parsing function.

'json'
parse_func Optional[Callable]

The function to parse the content, only valid when parse_mode is 'custom'.

None
**kwargs Any

Additional arguments passed to the parsing function.

{}

Returns:

Type Description
dict

The parsed content as a dictionary.

Raises:

Type Description
ValueError

If parse_mode is invalid or if parse_func is not provided when parse_mode is 'custom'.

Source code in evoagentx/models/base_model.py
@classmethod
def get_content_data(cls, content: str, parse_mode: str = "json", parse_func: Optional[Callable] = None, **kwargs) -> dict:
    """Parses LLM-generated content into a dictionary.

    This method takes content from an LLM response and converts it to a structured
    dictionary based on the specified parsing mode.

    Args:
        content: The content to parse.
        parse_mode: The mode to parse the content. Must be one of:
            - 'str': Assigns the raw text content to all attributes of the parser. 
            - 'json': Extracts and parses JSON objects from LLM output. It will return a dictionary parsed from the first valid JSON string.
            - 'xml': Parses content using XML tags. It will return a dictionary parsed from the XML tags.
            - 'title': Parses content with Markdown-style headings.
            - 'custom': Uses custom parsing logic. Requires providing `parse_func` parameter as a custom parsing function.
        parse_func: The function to parse the content, only valid when parse_mode is 'custom'.
        **kwargs (Any): Additional arguments passed to the parsing function.

    Returns:
        The parsed content as a dictionary.

    Raises:
        ValueError: If parse_mode is invalid or if parse_func is not provided when parse_mode is 'custom'.
    """
    attrs = cls.get_attrs()
    if len(attrs) <= 0:
        return {} 

    if parse_mode == "str":
        parse_func = cls._parse_str_content
    elif parse_mode == "json":
        parse_func = cls._parse_json_content
    elif parse_mode == "xml":
        parse_func = cls._parse_xml_content
    elif parse_mode == "title":
        parse_func = cls._parse_title_content
    elif parse_mode == "custom":
        if parse_func is None:
            raise ValueError("`parse_func` must be provided when `parse_mode` is 'custom'.")
        # obtain the function inputs
        signature = inspect.signature(parse_func)
        if "content" not in signature.parameters:
            raise ValueError("`parse_func` must have an input argument `content`.")

        func_args = {}
        func_args["content"] = content
        for param_name, param in signature.parameters.items():
            if param_name == "content":
                continue  # Already set
            if param_name in kwargs:
                func_args[param_name] = kwargs[param_name]
        data = parse_func(**func_args)
        if not isinstance(data, dict):
            raise ValueError(f"The output of `parse_func` must be a dictionary, but found {type(data)}.")
        return data
    else:
        raise ValueError(f"Invalid value '{parse_mode}' detected for `parse_mode`. Available choices: {PARSER_VALID_MODE}")
    data = parse_func(content=content, **kwargs)
    return data

parse classmethod

parse(content: str, parse_mode: str = 'json', parse_func: Optional[Callable] = None, **kwargs) -> LLMOutputParser

Parses LLM-generated text into a structured parser instance.

This is the main method for creating parser instances from LLM output.

Parameters:

Name Type Description Default
content str

The text generated by the LLM.

required
parse_mode str

The mode to parse the content, must be one of: - 'str': Assigns the raw text content to all attributes of the parser. - 'json': Extracts and parses JSON objects from LLM output. Uses the first valid JSON string to create an instance of LLMOutputParser. - 'xml': Parses content using XML tags. Uses the XML tags to create an instance of LLMOutputParser. - 'title': Parses content with Markdown-style headings. Uses the Markdown-style headings to create an instance of LLMOutputParser. The default title format is "## {title}", you can change it by providing title_format parameter, which should be a string that contains {title} placeholder. - 'custom': Uses custom parsing logic. Requires providing parse_func parameter as a custom parsing function. The parse_func must have a parameter named content and return a dictionary where the keys are the attribute names and the values are the parsed data.

'json'
parse_func Optional[Callable]

The function to parse the content, only valid when parse_mode is 'custom'.

None
**kwargs Any

Additional arguments passed to parsing functions, such as: - title_format for parse_mode="title".

{}

Returns:

Type Description
LLMOutputParser

An instance of LLMOutputParser containing the parsed data.

Raises:

Type Description
ValueError

If parse_mode is invalid or if content is not a string.

Source code in evoagentx/models/base_model.py
@classmethod
def parse(cls, content: str, parse_mode: str = "json", parse_func: Optional[Callable] = None, **kwargs) -> "LLMOutputParser":
    """Parses LLM-generated text into a structured parser instance.

    This is the main method for creating parser instances from LLM output.

    Args:
        content: The text generated by the LLM.
        parse_mode: The mode to parse the content, must be one of:
            - 'str': Assigns the raw text content to all attributes of the parser. 
            - 'json': Extracts and parses JSON objects from LLM output. Uses the first valid JSON string to create an instance of LLMOutputParser.
            - 'xml': Parses content using XML tags. Uses the XML tags to create an instance of LLMOutputParser.
            - 'title': Parses content with Markdown-style headings. Uses the Markdown-style headings to create an instance of LLMOutputParser. The default title format is "## {title}", you can change it by providing `title_format` parameter, which should be a string that contains `{title}` placeholder. 
            - 'custom': Uses custom parsing logic. Requires providing `parse_func` parameter as a custom parsing function. The `parse_func` must have a parameter named `content` and return a dictionary where the keys are the attribute names and the values are the parsed data. 
        parse_func: The function to parse the content, only valid when `parse_mode` is 'custom'.
        **kwargs (Any): Additional arguments passed to parsing functions, such as:
            - `title_format` for `parse_mode="title"`.

    Returns:
        An instance of LLMOutputParser containing the parsed data.

    Raises:
        ValueError: If parse_mode is invalid or if content is not a string.
    """
    if parse_mode not in PARSER_VALID_MODE:
        raise ValueError(f"'{parse_mode}' is an invalid value for `parse_mode`. Available choices: {PARSER_VALID_MODE}.")
    if not isinstance(content, str):
        raise ValueError(f"The input to {cls.__name__}.parse should be a str, but found {type(content)}.")
    data = cls.get_content_data(content=content, parse_mode=parse_mode, parse_func=parse_func, **kwargs)
    data.update({"content": content})
    parser = cls.from_dict(data, **kwargs)
    # parser.content = content
    return parser

__str__

__str__() -> str

Returns a string representation of the parser.

Source code in evoagentx/models/base_model.py
def __str__(self) -> str:
    """
    Returns a string representation of the parser.
    """
    return self.to_str()

to_str

to_str(**kwargs) -> str

Converts the parser to a string.

Source code in evoagentx/models/base_model.py
def to_str(self, **kwargs) -> str:
    """
    Converts the parser to a string.
    """
    return self.content

get_structured_data

get_structured_data() -> dict

Extracts structured data from the parser.

Returns:

Type Description
dict

A dictionary containing only the defined attributes and their values,

dict

excluding metadata like class_name.

Source code in evoagentx/models/base_model.py
def get_structured_data(self) -> dict:
    """Extracts structured data from the parser.

    Returns:
        A dictionary containing only the defined attributes and their values,
        excluding metadata like class_name.
    """
    attrs = type(self).get_attrs()
    data = self.to_dict(ignore=["class_name"])
    # structured_data = {attr: data[attr] for attr in attrs}
    structured_data = {key: value for key, value in data.items() if key in attrs}
    return structured_data

BaseConfig

BaseConfig(**kwargs)

Bases: BaseModule

Base configuration class that serves as parent for all configuration classes.

A config should inherit BaseConfig and specify the attributes and their types. Otherwise this will be an empty config.

Source code in evoagentx/core/module.py
def __init__(self, **kwargs):
    """
    Initializes a BaseModule instance.

    Args:
        **kwargs (Any): Keyword arguments used to initialize the instance

    Raises:
        ValidationError: When parameter validation fails
        Exception: When other errors occur during initialization
    """

    try:
        for field_name, _ in type(self).model_fields.items():
            field_value = kwargs.get(field_name, None)
            if field_value:
                kwargs[field_name] = self._process_data(field_value)
            # if field_value and isinstance(field_value, dict) and "class_name" in field_value:
            #     class_name = field_value.get("class_name")
            #     sub_cls = MODULE_REGISTRY.get_module(cls_name=class_name)
            #     kwargs[field_name] = sub_cls._create_instance(field_value)
        super().__init__(**kwargs) 
        self.init_module()
    except (ValidationError, Exception) as e:
        exception_handler = callback_manager.get_callback("exception_buffer")
        if exception_handler is None:
            error_message = get_base_module_init_error_message(
                cls=self.__class__, 
                data=kwargs, 
                errors=e
            )
            logger.error(error_message)
            raise
        else:
            exception_handler.add(e)

save

save(path: str, **kwargs) -> str

Save configuration to the specified path.

Parameters:

Name Type Description Default
path str

The file path to save the configuration

required
**kwargs Any

Additional keyword arguments passed to save_module method

{}

Returns:

Name Type Description
str str

The path where the file was saved

Source code in evoagentx/core/base_config.py
def save(self, path: str, **kwargs)-> str:

    """Save configuration to the specified path.

    Args:
        path: The file path to save the configuration
        **kwargs (Any): Additional keyword arguments passed to save_module method

    Returns:
        str: The path where the file was saved
    """
    return super().save_module(path, **kwargs)

get_config_params

get_config_params() -> List[str]

Get a list of configuration parameters.

Returns:

Type Description
List[str]

List[str]: List of configuration parameter names, excluding 'class_name'

Source code in evoagentx/core/base_config.py
def get_config_params(self) -> List[str]:
    """Get a list of configuration parameters.

    Returns:
        List[str]: List of configuration parameter names, excluding 'class_name'
    """
    config_params = list(type(self).model_fields.keys())
    config_params.remove("class_name")
    return config_params

get_set_params

get_set_params(ignore: List[str] = []) -> dict

Get a dictionary of explicitly set parameters.

Parameters:

Name Type Description Default
ignore List[str]

List of parameter names to ignore

[]

Returns:

Name Type Description
dict dict

Dictionary of explicitly set parameters, excluding 'class_name' and ignored parameters

Source code in evoagentx/core/base_config.py
def get_set_params(self, ignore: List[str] = []) -> dict:
    """Get a dictionary of explicitly set parameters.

    Args:
        ignore: List of parameter names to ignore

    Returns:
        dict: Dictionary of explicitly set parameters, excluding 'class_name' and ignored parameters
    """
    explicitly_set_fields = {field: getattr(self, field) for field in self.model_fields_set}
    if self.kwargs:
        explicitly_set_fields.update(self.kwargs)
    for field in ignore:
        explicitly_set_fields.pop(field, None)
    explicitly_set_fields.pop("class_name", None)
    return explicitly_set_fields

LiteLLM

LiteLLM(config: LLMConfig, **kwargs)

Bases: OpenAILLM

Source code in evoagentx/models/base_model.py
def __init__(self, config: LLMConfig, **kwargs):
    """Initializes the LLM with configuration.

    Args:
        config: Configuration object for the LLM.
        **kwargs (Any): Additional keyword arguments.
    """
    self.config = config
    self.kwargs = kwargs
    self.init_model()

init_model

init_model()

Initialize the model based on the configuration.

Source code in evoagentx/models/litellm_model.py
def init_model(self):
    """
    Initialize the model based on the configuration.
    """
    # Check if llm_type is correct
    if self.config.llm_type != "LiteLLM":
        raise ValueError("llm_type must be 'LiteLLM'")

    # Set model and extract the company name
    self.model = self.config.model
    # company = self.model.split("/")[0] if "/" in self.model else "openai"
    company = infer_litellm_company_from_model(self.model)

    # Set environment variables based on the company
    if company == "openai":
        if not self.config.openai_key:
            raise ValueError("OpenAI API key is required for OpenAI models. You should set `openai_key` in LiteLLMConfig")
        os.environ["OPENAI_API_KEY"] = self.config.openai_key
    elif company == "deepseek":
        if not self.config.deepseek_key:
            raise ValueError("DeepSeek API key is required for DeepSeek models. You should set `deepseek_key` in LiteLLMConfig")
        os.environ["DEEPSEEK_API_KEY"] = self.config.deepseek_key
    elif company == "anthropic":
        if not self.config.anthropic_key:
            raise ValueError("Anthropic API key is required for Anthropic models. You should set `anthropic_key` in LiteLLMConfig")
        os.environ["ANTHROPIC_API_KEY"] = self.config.anthropic_key
    elif company == "gemini":
        if not self.config.gemini_key:
            raise ValueError("Gemini API key is required for Gemini models. You should set `gemini_key` in LiteLLMConfig")
        os.environ["GEMINI_API_KEY"] = self.config.gemini_key 
    elif company == "meta_llama":
        if not self.config.meta_llama_key:
            raise ValueError("Meta Llama API key is required for Meta Llama models. You should set `meta_llama_key` in LiteLLMConfig")
        os.environ["LLAMA_API_KEY"] = self.config.meta_llama_key
    elif company == "openrouter":
        if not self.config.openrouter_key:
            raise ValueError("OpenRouter API key is required for OpenRouter models. You should set `openrouter_key` in LiteLLMConfig. You can also set `openrouter_base` in LiteLLMConfig to use a custom base URL [optional]")
        os.environ["OPENROUTER_API_KEY"] = self.config.openrouter_key
        os.environ["OPENROUTER_API_BASE"] = self.config.openrouter_base # [optional]
    elif company == "perplexity":
        if not self.config.perplexity_key:
            raise ValueError("Perplexity API key is required for Perplexity models. You should set `perplexity_key` in LiteLLMConfig")
        os.environ["PERPLEXITYAI_API_KEY"] = self.config.perplexity_key
    elif company == "groq":
        if not self.config.groq_key:
            raise ValueError("Groq API key is required for Groq models. You should set `groq_key` in LiteLLMConfig")
        os.environ["GROQ_API_KEY"] = self.config.groq_key
    else:
        raise ValueError(f"Unsupported company: {company}")

    self._default_ignore_fields = [
        "llm_type", "output_response", "openai_key", "deepseek_key", "anthropic_key", 
        "gemini_key", "meta_llama_key", "openrouter_key", "openrouter_base", "perplexity_key", 
        "groq_key"
    ] # parameters in OpenAILLMConfig that are not OpenAI models' input parameters 

single_generate

single_generate(messages: List[dict], **kwargs) -> str

Generate a single response using the completion function.

Parameters:

Name Type Description Default
messages List[dict]

A list of dictionaries representing the conversation history.

required
**kwargs Any

Additional parameters to be passed to the completion function.

{}

Returns:

Name Type Description
str str

A string containing the model's response.

Source code in evoagentx/models/litellm_model.py
@retry(wait=wait_random_exponential(min=1, max=60), stop=stop_after_attempt(5))
def single_generate(self, messages: List[dict], **kwargs) -> str:

    """
    Generate a single response using the completion function.

    Args: 
        messages (List[dict]): A list of dictionaries representing the conversation history.
        **kwargs (Any): Additional parameters to be passed to the `completion` function.

    Returns: 
        str: A string containing the model's response.
    """
    stream = kwargs["stream"] if "stream" in kwargs else self.config.stream
    output_response = kwargs["output_response"] if "output_response" in kwargs else self.config.output_response

    try:
        completion_params = self.get_completion_params(**kwargs)
        response = completion(messages=messages, **completion_params)
        if stream:
            output = self.get_stream_output(response, output_response=output_response)
            cost = self._stream_cost(messages=messages, output=output)
        else:
            output: str = self.get_completion_output(response=response, output_response=output_response)
            cost = self._completion_cost(response=response)
        self._update_cost(cost=cost)
    except Exception as e:
        raise RuntimeError(f"Error during single_generate: {str(e)}")

    return output

batch_generate

batch_generate(batch_messages: List[List[dict]], **kwargs) -> List[str]

Generate responses for a batch of messages.

Parameters:

Name Type Description Default
batch_messages List[List[dict]]

A list of message lists, where each sublist represents a conversation.

required
**kwargs Any

Additional parameters to be passed to the completion function.

{}

Returns:

Type Description
List[str]

List[str]: A list of responses for each conversation.

Source code in evoagentx/models/litellm_model.py
def batch_generate(self, batch_messages: List[List[dict]], **kwargs) -> List[str]:
    """
    Generate responses for a batch of messages.

    Args: 
        batch_messages (List[List[dict]]): A list of message lists, where each sublist represents a conversation.
        **kwargs (Any): Additional parameters to be passed to the `completion` function.

    Returns: 
        List[str]: A list of responses for each conversation.
    """
    results = []
    for messages in batch_messages:
        response = self.single_generate(messages, **kwargs)
        results.append(response)
    return results

single_generate_async async

single_generate_async(messages: List[dict], **kwargs) -> str

Generate a single response using the async completion function.

Parameters:

Name Type Description Default
messages List[dict]

A list of dictionaries representing the conversation history.

required
**kwargs Any

Additional parameters to be passed to the completion function.

{}

Returns:

Name Type Description
str str

A string containing the model's response.

Source code in evoagentx/models/litellm_model.py
async def single_generate_async(self, messages: List[dict], **kwargs) -> str:
    """
    Generate a single response using the async completion function.

    Args: 
        messages (List[dict]): A list of dictionaries representing the conversation history.
        **kwargs (Any): Additional parameters to be passed to the `completion` function.

    Returns: 
        str: A string containing the model's response.
    """
    stream = kwargs["stream"] if "stream" in kwargs else self.config.stream
    output_response = kwargs["output_response"] if "output_response" in kwargs else self.config.output_response

    try:
        completion_params = self.get_completion_params(**kwargs)
        response = await acompletion(messages=messages, **completion_params)
        if stream:
            if hasattr(response, "__aiter__"):
                output = await self.get_stream_output_async(response, output_response=output_response)
            else:
                output = self.get_stream_output(response, output_response=output_response)
            cost = self._stream_cost(messages=messages, output=output)
        else:
            output: str = self.get_completion_output(response=response, output_response=output_response)
            cost = self._completion_cost(response=response)
        self._update_cost(cost=cost)
    except Exception as e:
        raise RuntimeError(f"Error during single_generate_async: {str(e)}")

    return output

completion_cost

completion_cost(completion_response=None, prompt='', messages: List = [], completion='', total_time=0.0, call_type='completion', size=None, quality=None, n=None) -> float

Calculate the cost of a given completion or other supported tasks.

Parameters:

Name Type Description Default
completion_response dict

The response received from a LiteLLM completion request.

None
prompt str

Input prompt text.

''
messages list

Conversation history.

[]
completion str

Output text from the LLM.

''
total_time float

Total time used for request.

0.0
call_type str

Type of request (e.g., "completion", "image_generation").

'completion'
size str

Image size for image generation.

None
quality str

Image quality for image generation.

None
n int

Number of generated images.

None

Returns:

Name Type Description
float float

The cost in USD.

Source code in evoagentx/models/litellm_model.py
def completion_cost(
    self,
    completion_response=None,
    prompt="",
    messages: List = [],
    completion="",
    total_time=0.0,
    call_type="completion",
    size=None,
    quality=None,
    n=None,
) -> float:
    """
    Calculate the cost of a given completion or other supported tasks.

    Args:
        completion_response (dict): The response received from a LiteLLM completion request.
        prompt (str): Input prompt text.
        messages (list): Conversation history.
        completion (str): Output text from the LLM.
        total_time (float): Total time used for request.
        call_type (str): Type of request (e.g., "completion", "image_generation").
        size (str): Image size for image generation.
        quality (str): Image quality for image generation.
        n (int): Number of generated images.

    Returns:
        float: The cost in USD.
    """
    try:
        # Default parameters
        prompt_tokens = 0
        completion_tokens = 0
        model = self.model  # Use the class model by default

        # Handle completion response
        if completion_response:
            prompt_tokens = completion_response.get("usage", {}).get("prompt_tokens", 0)
            completion_tokens = completion_response.get("usage", {}).get("completion_tokens", 0)
            model = completion_response.get("model", model)
            size = completion_response.get("_hidden_params", {}).get("optional_params", {}).get("size", size)
            quality = completion_response.get("_hidden_params", {}).get("optional_params", {}).get("quality", quality)
            n = completion_response.get("_hidden_params", {}).get("optional_params", {}).get("n", n)

        # Handle manual token counting
        else:
            if messages:
                prompt_tokens = token_counter(model=model, messages=messages)
            elif prompt:
                prompt_tokens = token_counter(model=model, text=prompt)
            completion_tokens = token_counter(model=model, text=completion)

        # Ensure model is valid
        if not model:
            raise ValueError("Model is not defined for cost calculation.")

        # Image generation cost calculation
        if call_type in ["image_generation", "aimage_generation"]:
            if size and "x" in size and "-x-" not in size:
                size = size.replace("x", "-x-")
            height, width = map(int, size.split("-x-"))
            return (
                litellm.model_cost[f"{size}/{model}"]["input_cost_per_pixel"]
                * height * width * (n or 1)
            )

        # Regular completion cost calculation
        prompt_cost, completion_cost = cost_per_token(
            model=model,
            prompt_tokens=prompt_tokens,
            completion_tokens=completion_tokens,
            response_time_ms=total_time,
        )
        return prompt_cost + completion_cost
    except Exception as e:
        print(f"Error calculating cost: {e}")
        return 0.0

BaseLLM

BaseLLM(config: LLMConfig, **kwargs)

Bases: ABC

Abstract base class for Large Language Model implementations.

This class defines the interface that all LLM implementations must follow, providing methods for generating text, formatting messages, and parsing output.

Attributes:

Name Type Description
config

Configuration for the LLM.

kwargs

Additional keyword arguments provided during initialization.

Initializes the LLM with configuration.

Parameters:

Name Type Description Default
config LLMConfig

Configuration object for the LLM.

required
**kwargs Any

Additional keyword arguments.

{}
Source code in evoagentx/models/base_model.py
def __init__(self, config: LLMConfig, **kwargs):
    """Initializes the LLM with configuration.

    Args:
        config: Configuration object for the LLM.
        **kwargs (Any): Additional keyword arguments.
    """
    self.config = config
    self.kwargs = kwargs
    self.init_model()

init_model abstractmethod

init_model()

Initializes the underlying model.

This method should be implemented by subclasses to set up the actual LLM.

Source code in evoagentx/models/base_model.py
@abstractmethod
def init_model(self):
    """Initializes the underlying model.

    This method should be implemented by subclasses to set up the actual LLM.
    """
    pass

__deepcopy__

__deepcopy__(memo) -> BaseLLM

Handles deep copying of the LLM instance.

Returns the same instance when deepcopy is called, as LLM instances often cannot be meaningfully deep-copied.

Parameters:

Name Type Description Default
memo Dict[int, Any]

Memo dictionary used by the deepcopy process.

required

Returns:

Type Description
BaseLLM

The same LLM instance.

Source code in evoagentx/models/base_model.py
def __deepcopy__(self, memo) -> "BaseLLM":
    """Handles deep copying of the LLM instance.

    Returns the same instance when deepcopy is called, as LLM instances
    often cannot be meaningfully deep-copied.

    Args:
        memo (Dict[int, Any]): Memo dictionary used by the deepcopy process.

    Returns:
        The same LLM instance.
    """
    # return the same instance when deepcopy
    memo[id(self)] = self
    return self

formulate_messages abstractmethod

formulate_messages(prompts: List[str], system_messages: Optional[List[str]] = None) -> List[List[dict]]

Converts input prompts into the chat format compatible with different LLMs.

Parameters:

Name Type Description Default
prompts List[str]

A list of user prompts that need to be converted.

required
system_messages Optional[List[str]]

An optional list of system messages that provide instructions or context to the model.

None

Returns:

Type Description
List[List[dict]]

A list of message lists, where each inner list contains messages in the chat format required by LLMs.

Source code in evoagentx/models/base_model.py
@abstractmethod
def formulate_messages(self, prompts: List[str], system_messages: Optional[List[str]] = None) -> List[List[dict]]:
    """Converts input prompts into the chat format compatible with different LLMs.

    Args:
        prompts: A list of user prompts that need to be converted.
        system_messages: An optional list of system messages that provide instructions or context to the model.

    Returns:
        A list of message lists, where each inner list contains messages in the chat format required by LLMs. 
    """
    pass

single_generate abstractmethod

single_generate(messages: List[dict], **kwargs) -> str

Generates LLM output for a single set of messages.

Parameters:

Name Type Description Default
messages List[dict]

The input messages to the LLM in chat format.

required
**kwargs Any

Additional keyword arguments for generation settings.

{}

Returns:

Type Description
str

The generated output text from the LLM.

Source code in evoagentx/models/base_model.py
@abstractmethod
def single_generate(self, messages: List[dict], **kwargs) -> str:
    """Generates LLM output for a single set of messages.

    Args:
        messages: The input messages to the LLM in chat format.
        **kwargs (Any): Additional keyword arguments for generation settings.

    Returns:
        The generated output text from the LLM.
    """
    pass

batch_generate abstractmethod

batch_generate(batch_messages: List[List[dict]], **kwargs) -> List[str]

Generates outputs for a batch of message sets.

Parameters:

Name Type Description Default
batch_messages List[List[dict]]

A list of message lists, where each inner list contains messages for a single generation.

required
**kwargs Any

Additional keyword arguments for generation settings.

{}

Returns:

Type Description
List[str]

A list of generated outputs from the LLM, one for each input message set.

Source code in evoagentx/models/base_model.py
@abstractmethod
def batch_generate(self, batch_messages: List[List[dict]], **kwargs) -> List[str]:
    """Generates outputs for a batch of message sets.

    Args: 
        batch_messages: A list of message lists, where each inner list contains messages for a single generation.
        **kwargs (Any): Additional keyword arguments for generation settings.

    Returns:
        A list of generated outputs from the LLM, one for each input message set.
    """
    pass

single_generate_async async

single_generate_async(messages: List[dict], **kwargs) -> str

Asynchronously generates LLM output for a single set of messages.

This default implementation wraps the synchronous method in an async executor. Subclasses should override this for true async implementation if supported.

Parameters:

Name Type Description Default
messages List[dict]

The input messages to the LLM in chat format.

required
**kwargs Any

Additional keyword arguments for generation settings.

{}

Returns:

Type Description
str

The generated output text from the LLM.

Source code in evoagentx/models/base_model.py
async def single_generate_async(self, messages: List[dict], **kwargs) -> str:
    """Asynchronously generates LLM output for a single set of messages.

    This default implementation wraps the synchronous method in an async executor.
    Subclasses should override this for true async implementation if supported.

    Args:
        messages: The input messages to the LLM in chat format.
        **kwargs (Any): Additional keyword arguments for generation settings.

    Returns:
        The generated output text from the LLM.
    """
    # Default implementation for backward compatibility
    loop = asyncio.get_event_loop()
    result = await loop.run_in_executor(None, self.single_generate, messages, **kwargs)
    return result

batch_generate_async async

batch_generate_async(batch_messages: List[List[dict]], **kwargs) -> List[str]

Asynchronously generates outputs for a batch of message sets.

This default implementation runs each generation as a separate async task. Subclasses should override this for more efficient async batching if supported.

Parameters:

Name Type Description Default
batch_messages List[List[dict]]

A list of message lists, where each inner list contains messages for a single generation.

required
**kwargs Any

Additional keyword arguments for generation settings.

{}

Returns:

Type Description
List[str]

A list of generated outputs from the LLM, one for each input message set.

Source code in evoagentx/models/base_model.py
async def batch_generate_async(self, batch_messages: List[List[dict]], **kwargs) -> List[str]:
    """Asynchronously generates outputs for a batch of message sets.

    This default implementation runs each generation as a separate async task.
    Subclasses should override this for more efficient async batching if supported.

    Args: 
        batch_messages: A list of message lists, where each inner list contains messages for a single generation.
        **kwargs (Any): Additional keyword arguments for generation settings.

    Returns:
        A list of generated outputs from the LLM, one for each input message set.
    """
    # Default implementation for backward compatibility
    tasks = [self.single_generate_async(messages, **kwargs) for messages in batch_messages]
    return await asyncio.gather(*tasks)

parse_generated_text

parse_generated_text(text: str, parser: Optional[Type[LLMOutputParser]] = None, parse_mode: Optional[str] = 'json', parse_func: Optional[Callable] = None, **kwargs) -> LLMOutputParser

Parses generated text into a structured output using a parser.

Parameters:

Name Type Description Default
text str

The text generated by the LLM.

required
parser Optional[Type[LLMOutputParser]]

An LLMOutputParser class to use for parsing. If None, the default LLMOutputParser is used.

None
parse_mode Optional[str]

The mode to use for parsing, must be the parse_mode supported by the parser.

'json'
**kwargs Any

Additional arguments passed to the parser.

{}

Returns:

Type Description
LLMOutputParser

An LLMOutputParser instance containing the parsed data.

Source code in evoagentx/models/base_model.py
def parse_generated_text(self, text: str, parser: Optional[Type[LLMOutputParser]]=None, parse_mode: Optional[str] = "json", parse_func: Optional[Callable] = None, **kwargs) -> LLMOutputParser:
    """Parses generated text into a structured output using a parser.

    Args: 
        text: The text generated by the LLM.
        parser: An LLMOutputParser class to use for parsing. If None, the default LLMOutputParser is used.
        parse_mode: The mode to use for parsing, must be the `parse_mode` supported by the `parser`. 
        **kwargs (Any): Additional arguments passed to the parser.

    Returns:
        An LLMOutputParser instance containing the parsed data.
    """
    if not parser:
        parser = LLMOutputParser
    return parser.parse(text, parse_mode=parse_mode, parse_func=parse_func)

parse_generated_texts

parse_generated_texts(texts: List[str], parser: Optional[Type[LLMOutputParser]] = None, parse_mode: Optional[str] = 'json', parse_func: Optional[Callable] = None, **kwargs) -> List[LLMOutputParser]

Parses multiple generated texts into structured outputs.

Parameters:

Name Type Description Default
texts List[str]

A list of texts generated by the LLM.

required
parser Optional[Type[LLMOutputParser]]

An LLMOutputParser class to use for parsing.

None
parse_mode Optional[str]

The mode to use for parsing, must be the parse_mode supported by the parser.

'json'
**kwargs Any

Additional arguments passed to the parser.

{}

Returns:

Type Description
List[LLMOutputParser]

A list of LLMOutputParser instances containing the parsed data.

Source code in evoagentx/models/base_model.py
def parse_generated_texts(self, texts: List[str], parser: Optional[Type[LLMOutputParser]]=None, parse_mode: Optional[str] = "json", parse_func: Optional[Callable] = None, **kwargs) -> List[LLMOutputParser]:
    """Parses multiple generated texts into structured outputs.

    Args:
        texts: A list of texts generated by the LLM.
        parser: An LLMOutputParser class to use for parsing.
        parse_mode: The mode to use for parsing, must be the `parse_mode` supported by the `parser`. 
        **kwargs (Any): Additional arguments passed to the parser.

    Returns:
        A list of LLMOutputParser instances containing the parsed data.
    """
    parsed_results = [self.parse_generated_text(text=text, parser=parser, parse_mode=parse_mode, parse_func=parse_func, **kwargs) for text in texts]
    return parsed_results

generate

generate(prompt: Optional[Union[str, List[str]]] = None, system_message: Optional[Union[str, List[str]]] = None, messages: Optional[Union[List[dict], List[List[dict]]]] = None, parser: Optional[Type[LLMOutputParser]] = None, parse_mode: Optional[str] = 'json', parse_func: Optional[Callable] = None, **kwargs) -> Union[LLMOutputParser, List[LLMOutputParser]]

Generates LLM output(s) and parses the result(s).

This is the main method for generating text with the LLM. It handles both single and batch generation, and automatically parses the outputs.

Parameters:

Name Type Description Default
prompt Optional[Union[str, List[str]]]

Input prompt(s) to the LLM.

None
system_message Optional[Union[str, List[str]]]

System message(s) for the LLM.

None
messages Optional[Union[List[dict], List[List[dict]]]]

Chat message(s) for the LLM, already in the required format (either prompt or messages must be provided).

None
parser Optional[Type[LLMOutputParser]]

Parser class to use for processing the output.

None
parse_mode Optional[str]

The mode to use for parsing, must be the parse_mode supported by the parser.

'json'
**kwargs Any

Additional generation configuration parameters.

{}

Returns:

Type Description
Union[LLMOutputParser, List[LLMOutputParser]]

For single generation: An LLMOutputParser instance.

Union[LLMOutputParser, List[LLMOutputParser]]

For batch generation: A list of LLMOutputParser instances.

Raises:

Type Description
ValueError

If the input format is invalid.

Note

Either prompt or messages must be provided. If both or neither is provided, an error will be raised.

Source code in evoagentx/models/base_model.py
def generate(
    self,
    prompt: Optional[Union[str, List[str]]] = None,
    system_message: Optional[Union[str, List[str]]] = None,
    messages: Optional[Union[List[dict],List[List[dict]]]] = None,
    parser: Optional[Type[LLMOutputParser]] = None,
    parse_mode: Optional[str] = "json", 
    parse_func: Optional[Callable] = None,
    **kwargs
) -> Union[LLMOutputParser, List[LLMOutputParser]]:
    """Generates LLM output(s) and parses the result(s).

    This is the main method for generating text with the LLM. It handles both
    single and batch generation, and automatically parses the outputs.

    Args:
        prompt: Input prompt(s) to the LLM.
        system_message: System message(s) for the LLM.
        messages: Chat message(s) for the LLM, already in the required format (either `prompt` or `messages` must be provided).
        parser: Parser class to use for processing the output.
        parse_mode: The mode to use for parsing, must be the `parse_mode` supported by the `parser`. 
        **kwargs (Any): Additional generation configuration parameters.

    Returns:
        For single generation: An LLMOutputParser instance.
        For batch generation: A list of LLMOutputParser instances.

    Raises:
        ValueError: If the input format is invalid.

    Note:
        Either prompt or messages must be provided. If both or neither is provided,
        an error will be raised.
    """
    prepared_messages, single_generate = self._prepare_messages(prompt, system_message, messages)
    if not prepared_messages:  # Handle empty messages case
        return []

    generated_texts = self.batch_generate(batch_messages=prepared_messages, **kwargs)
    parsed_outputs = self.parse_generated_texts(texts=generated_texts, parser=parser, parse_mode=parse_mode, parse_func=parse_func, **kwargs)
    return parsed_outputs[0] if single_generate else parsed_outputs

async_generate async

async_generate(prompt: Optional[Union[str, List[str]]] = None, system_message: Optional[Union[str, List[str]]] = None, messages: Optional[Union[List[dict], List[List[dict]]]] = None, parser: Optional[Type[LLMOutputParser]] = None, parse_mode: Optional[str] = 'json', parse_func: Optional[Callable] = None, **kwargs) -> Union[LLMOutputParser, List[LLMOutputParser]]

Asynchronously generates LLM output(s) and parses the result(s).

This is the async version of the generate method. It works identically but performs the generation asynchronously.

Source code in evoagentx/models/base_model.py
async def async_generate(
    self,
    prompt: Optional[Union[str, List[str]]] = None,
    system_message: Optional[Union[str, List[str]]] = None,
    messages: Optional[Union[List[dict],List[List[dict]]]] = None,
    parser: Optional[Type[LLMOutputParser]] = None,
    parse_mode: Optional[str] = "json", 
    parse_func: Optional[Callable] = None,
    **kwargs
) -> Union[LLMOutputParser, List[LLMOutputParser]]:
    """Asynchronously generates LLM output(s) and parses the result(s).

    This is the async version of the generate method. It works identically but
    performs the generation asynchronously.
    """
    prepared_messages, single_generate = self._prepare_messages(prompt, system_message, messages)
    if not prepared_messages:  # Handle empty messages case
        return []

    generated_texts = await self.batch_generate_async(batch_messages=prepared_messages, **kwargs)
    parsed_outputs = self.parse_generated_texts(texts=generated_texts, parser=parser, parse_mode=parse_mode, parse_func=parse_func, **kwargs)
    return parsed_outputs[0] if single_generate else parsed_outputs

OpenAILLM

OpenAILLM(config: LLMConfig, **kwargs)

Bases: BaseLLM

Source code in evoagentx/models/base_model.py
def __init__(self, config: LLMConfig, **kwargs):
    """Initializes the LLM with configuration.

    Args:
        config: Configuration object for the LLM.
        **kwargs (Any): Additional keyword arguments.
    """
    self.config = config
    self.kwargs = kwargs
    self.init_model()

get_stream_output

get_stream_output(response: Stream, output_response: bool = True) -> str

Process stream response and return the complete output.

Parameters:

Name Type Description Default
response Stream

The stream response from OpenAI

required
output_response bool

Whether to print the response in real-time

True

Returns:

Name Type Description
str str

The complete output text

Source code in evoagentx/models/openai_model.py
def get_stream_output(self, response: Stream, output_response: bool=True) -> str:
    """
    Process stream response and return the complete output.

    Args:
        response: The stream response from OpenAI
        output_response: Whether to print the response in real-time

    Returns:
        str: The complete output text
    """
    output = ""
    for chunk in response:
        content = chunk.choices[0].delta.content
        if content:
            if output_response:
                print(content, end="", flush=True)
            output += content
    if output_response:
        print("")
    return output

get_stream_output_async async

get_stream_output_async(response, output_response: bool = False) -> str

Process async stream response and return the complete output.

Parameters:

Name Type Description Default
response AsyncIterator[ChatCompletionChunk]

The async stream response from OpenAI

required
output_response bool

Whether to print the response in real-time

False

Returns:

Name Type Description
str str

The complete output text

Source code in evoagentx/models/openai_model.py
async def get_stream_output_async(self, response, output_response: bool = False) -> str:
    """
    Process async stream response and return the complete output.

    Args:
        response (AsyncIterator[ChatCompletionChunk]): The async stream response from OpenAI
        output_response (bool): Whether to print the response in real-time


    Returns:
        str: The complete output text
    """
    output = ""
    async for chunk in response:
        content = chunk.choices[0].delta.content
        if content:
            if output_response:
                print(content, end="", flush=True)
            output += content
    if output_response:
        print("")
    return output

atomic_method

atomic_method(func)

threading safe decorator for class methods. If there are self._lock in the instance, it will use the lock. Otherwise, use nullcontext for execution.

Source code in evoagentx/core/decorators.py
def atomic_method(func):
    """
    threading safe decorator for class methods. 
    If there are self._lock in the instance, it will use the lock. Otherwise, use nullcontext for execution.
    """
    @wraps(func)
    def wrapper(self, *args, **kwargs):
        context = getattr(self, "_lock", nullcontext())
        with context:
            return func(self, *args, **kwargs)
    return wrapper