Skip to content

Prompt Edirot API

llm_ie.prompt_editor.PromptEditor

PromptEditor(
    inference_engine: InferenceEngine,
    extractor: FrameExtractor,
    prompt_guide: str = None,
)

This class is a LLM agent that rewrite or comment a prompt draft based on the prompt guide of an extractor.

Parameters:

Name Type Description Default
inference_engine InferenceEngine

the LLM inferencing engine object. Must implements the chat() method.

required
extractor FrameExtractor

a FrameExtractor.

required
prompt_guide str

the prompt guide for the extractor. All built-in extractors have a prompt guide in the asset folder. Passing values to this parameter will override the built-in prompt guide which is not recommended. For custom extractors, this parameter must be provided.

None
Source code in package/llm-ie/src/llm_ie/prompt_editor.py
def __init__(self, inference_engine:InferenceEngine, extractor:FrameExtractor, prompt_guide:str=None):
    """
    This class is a LLM agent that rewrite or comment a prompt draft based on the prompt guide of an extractor.

    Parameters
    ----------
    inference_engine : InferenceEngine
        the LLM inferencing engine object. Must implements the chat() method.
    extractor : FrameExtractor
        a FrameExtractor. 
    prompt_guide : str, optional
        the prompt guide for the extractor. 
        All built-in extractors have a prompt guide in the asset folder. Passing values to this parameter 
        will override the built-in prompt guide which is not recommended.
        For custom extractors, this parameter must be provided.
    """
    self.inference_engine = inference_engine

    # if prompt_guide is provided, use it anyways
    if prompt_guide:
        self.prompt_guide = prompt_guide
    # if prompt_guide is not provided, get it from the extractor
    else:
        self.prompt_guide = extractor.get_prompt_guide()
        # when extractor does not have a prompt guide (e.g. custom extractor), ValueError
        if self.prompt_guide is None:
            raise ValueError(f"Prompt guide for {extractor.__class__.__name__} is not available. Use `prompt_guide` parameter to provide a prompt guide.")

    # get system prompt
    file_path = importlib.resources.files('llm_ie.asset.PromptEditor_prompts').joinpath('system.txt')
    with open(file_path, 'r') as f:
        self.system_prompt =  f.read()

rewrite

rewrite(draft: str) -> str

This method inputs a prompt draft and rewrites it following the extractor's guideline.

Source code in package/llm-ie/src/llm_ie/prompt_editor.py
def rewrite(self, draft:str) -> str:
    """
    This method inputs a prompt draft and rewrites it following the extractor's guideline.
    """
    file_path = importlib.resources.files('llm_ie.asset.PromptEditor_prompts').joinpath('rewrite.txt')
    with open(file_path, 'r') as f:
        rewrite_prompt_template = f.read()

    prompt = self._apply_prompt_template(text_content={"draft": draft, "prompt_guideline": self.prompt_guide}, 
                                         prompt_template=rewrite_prompt_template)
    messages = [{"role": "system", "content": self.system_prompt},
                {"role": "user", "content": prompt}]
    res = self.inference_engine.chat(messages, verbose=True)
    return res

comment

comment(draft: str) -> str

This method inputs a prompt draft and comment following the extractor's guideline.

Source code in package/llm-ie/src/llm_ie/prompt_editor.py
def comment(self, draft:str) -> str:
    """
    This method inputs a prompt draft and comment following the extractor's guideline.
    """
    file_path = importlib.resources.files('llm_ie.asset.PromptEditor_prompts').joinpath('comment.txt')
    with open(file_path, 'r') as f:
        comment_prompt_template = f.read()

    prompt = self._apply_prompt_template(text_content={"draft": draft, "prompt_guideline": self.prompt_guide}, 
                                         prompt_template=comment_prompt_template)
    messages = [{"role": "system", "content": self.system_prompt},
                {"role": "user", "content": prompt}]
    res = self.inference_engine.chat(messages, verbose=True)
    return res

chat

chat()

External method that detects the environment and calls the appropriate chat method.

Source code in package/llm-ie/src/llm_ie/prompt_editor.py
def chat(self):
    """
    External method that detects the environment and calls the appropriate chat method.
    """
    if 'ipykernel' in sys.modules:
        self._IPython_chat()
    else:
        self._terminal_chat()

chat_stream

chat_stream(
    messages: List[Dict[str, str]],
) -> Generator[str, None, None]

This method processes messages and yields response chunks from the inference engine. This is for frontend App.

Parameters:

messages : List[Dict[str, str]] List of message dictionaries (e.g., [{"role": "user", "content": "Hi"}]).

Yields:
Chunks of the assistant's response.
Source code in package/llm-ie/src/llm_ie/prompt_editor.py
def chat_stream(self, messages: List[Dict[str, str]]) -> Generator[str, None, None]:
    """
    This method processes messages and yields response chunks from the inference engine.
    This is for frontend App.

    Parameters:
    ----------
    messages : List[Dict[str, str]]
        List of message dictionaries (e.g., [{"role": "user", "content": "Hi"}]).

    Yields:
    -------
        Chunks of the assistant's response.
    """
    # Validate messages
    if not isinstance(messages, list) or not all(isinstance(m, dict) and 'role' in m and 'content' in m for m in messages):
         raise ValueError("Messages must be a list of dictionaries with 'role' and 'content' keys.")

    # Always append system prompt and initial user message
    file_path = importlib.resources.files('llm_ie.asset.PromptEditor_prompts').joinpath('chat.txt')
    with open(file_path, 'r') as f:
        chat_prompt_template = f.read()

    prompt = self._apply_prompt_template(text_content={"prompt_guideline": self.prompt_guide}, 
                                        prompt_template=chat_prompt_template)

    messages = [{"role": "system", "content": self.system_prompt},
                {"role": "user", "content": prompt}] + messages


    stream_generator = self.inference_engine.chat(messages, stream=True)
    yield from stream_generator