[docs]classRefine(LlamaIndexCompressor):def_pure(self,queries:List[str],contents:List[List[str]],prompt:Optional[str]=None,chat_prompt:Optional[str]=None,batch:int=16,)->List[str]:""" Refine a response to a query across text chunks. This function is a wrapper for llama_index.response_synthesizers.Refine. For more information, visit https://docs.llamaindex.ai/en/stable/examples/response_synthesizers/refine/. :param queries: The queries for retrieved passages. :param contents: The contents of retrieved passages. :param prompt: The prompt template for refine. If you want to use chat prompt, you should pass chat_prompt instead. At prompt, you must specify where to put 'context_msg' and 'query_str'. Default is None. When it is None, it will use llama index default prompt. :param chat_prompt: The chat prompt template for refine. If you want to use normal prompt, you should pass prompt instead. At prompt, you must specify where to put 'context_msg' and 'query_str'. Default is None. When it is None, it will use llama index default chat prompt. :param batch: The batch size for llm. Set low if you face some errors. Default is 16. :return: The list of compressed texts. """ifpromptisnotNoneandnotis_chat_model(self.llm):refine_template=PromptTemplate(prompt,prompt_type=PromptType.REFINE)elifchat_promptisnotNoneandis_chat_model(self.llm):refine_template=PromptTemplate(chat_prompt,prompt_type=PromptType.REFINE)else:refine_template=Nonesummarizer=rf(llm=self.llm,refine_template=refine_template,verbose=True)tasks=[summarizer.aget_response(query,content)forquery,contentinzip(queries,contents)]loop=get_event_loop()results=loop.run_until_complete(process_batch(tasks,batch_size=batch))returnresults