Source code for autorag.nodes.generator.base

import functools
import logging
from pathlib import Path
from typing import Union, Tuple, List

import pandas as pd

from autorag import generator_models
from autorag.utils import result_to_dataframe

logger = logging.getLogger("AutoRAG")


[docs] def generator_node(func): @functools.wraps(func) @result_to_dataframe(["generated_texts", "generated_tokens", "generated_log_probs"]) def wrapper( project_dir: Union[str, Path], previous_result: pd.DataFrame, llm: str, **kwargs ) -> Tuple[List[str], List[List[int]], List[List[float]]]: """ This decorator makes a generator module to be a node. It automatically extracts prompts from previous_result and runs the generator function. Plus, it retrieves the llm instance from autorag.generator_models. :param project_dir: The project directory. :param previous_result: The previous result that contains prompts, :param llm: The llm name that you want to use. :param kwargs: The extra parameters for initializing the llm instance. :return: Pandas dataframe that contains generated texts, generated tokens, and generated log probs. Each column is "generated_texts", "generated_tokens", and "generated_log_probs". """ logger.info(f"Running generator node - {func.__name__} module...") assert ( "prompts" in previous_result.columns ), "previous_result must contain prompts column." prompts = previous_result["prompts"].tolist() if func.__name__ == "llama_index_llm": if llm not in generator_models: raise ValueError( f"{llm} is not a valid llm name. Please check the llm name." "You can check valid llm names from autorag.generator_models." ) batch = kwargs.pop("batch", 16) if llm == "huggingfacellm": model_name = kwargs.pop("model", None) if model_name is not None: kwargs["model_name"] = model_name else: if "model_name" not in kwargs.keys(): raise ValueError( "`model` or `model_name` parameter must be provided for using huggingfacellm." ) kwargs["tokenizer_name"] = kwargs["model_name"] llm_instance = generator_models[llm](**kwargs) result = func(prompts=prompts, llm=llm_instance, batch=batch) del llm_instance return result else: return func(prompts=prompts, llm=llm, **kwargs) return wrapper