autorag.nodes.generator package

Submodules

autorag.nodes.generator.base module

class autorag.nodes.generator.base.BaseGenerator(project_dir: str, llm: str, *args, **kwargs)[source]

Bases: BaseModule

abstract async astream(prompt: str, **kwargs)[source]
cast_to_run(previous_result: DataFrame, *args, **kwargs)[source]

This function is for cast function (a.k.a decorator) only for pure function in the whole node.

abstract stream(prompt: str, **kwargs)[source]
structured_output(prompts: List[str], output_cls)[source]
autorag.nodes.generator.base.generator_node(func)[source]

autorag.nodes.generator.llama_index_llm module

class autorag.nodes.generator.llama_index_llm.LlamaIndexLLM(project_dir: str, llm: str, batch: int = 16, *args, **kwargs)[source]

Bases: BaseGenerator

async astream(prompt: str, **kwargs)[source]
pure(previous_result: DataFrame, *args, **kwargs)[source]
stream(prompt: str, **kwargs)[source]

autorag.nodes.generator.openai_llm module

class autorag.nodes.generator.openai_llm.OpenAILLM(project_dir, llm: str, batch: int = 16, *args, **kwargs)[source]

Bases: BaseGenerator

async astream(prompt: str, **kwargs)[source]
async get_result(prompt: str, **kwargs)[source]
async get_result_o1(prompt: str, **kwargs)[source]
async get_structured_result(prompt: str, output_cls, **kwargs)[source]
pure(previous_result: DataFrame, *args, **kwargs)[source]
stream(prompt: str, **kwargs)[source]
structured_output(prompts: List[str], output_cls, **kwargs)[source]
autorag.nodes.generator.openai_llm.truncate_by_token(prompt: str, tokenizer: Encoding, max_token_size: int)[source]

autorag.nodes.generator.run module

autorag.nodes.generator.vllm module

class autorag.nodes.generator.vllm.Vllm(project_dir: str, llm: str, **kwargs)[source]

Bases: BaseGenerator

async astream(prompt: str, **kwargs)[source]
pure(previous_result: DataFrame, *args, **kwargs)[source]
stream(prompt: str, **kwargs)[source]

Module contents