Skip to content

Prompt Edirot API

llm_ie.prompt_editor.PromptEditor

PromptEditor(
    inference_engine: InferenceEngine,
    extractor: FrameExtractor,
    prompt_guide: str = None,
)

This class is a LLM agent that rewrite or comment a prompt draft based on the prompt guide of an extractor.

Parameters:

Name Type Description Default
inference_engine InferenceEngine

the LLM inferencing engine object. Must implements the chat() method.

required
extractor FrameExtractor

a FrameExtractor.

required
prompt_guide str

the prompt guide for the extractor. All built-in extractors have a prompt guide in the asset folder. Passing values to this parameter will override the built-in prompt guide which is not recommended. For custom extractors, this parameter must be provided.

None
Source code in package/llm-ie/src/llm_ie/prompt_editor.py
def __init__(self, inference_engine:InferenceEngine, extractor:FrameExtractor, prompt_guide:str=None):
    """
    This class is a LLM agent that rewrite or comment a prompt draft based on the prompt guide of an extractor.

    Parameters
    ----------
    inference_engine : InferenceEngine
        the LLM inferencing engine object. Must implements the chat() method.
    extractor : FrameExtractor
        a FrameExtractor. 
    prompt_guide : str, optional
        the prompt guide for the extractor. 
        All built-in extractors have a prompt guide in the asset folder. Passing values to this parameter 
        will override the built-in prompt guide which is not recommended.
        For custom extractors, this parameter must be provided.
    """
    self.inference_engine = inference_engine

    # if prompt_guide is provided, use it anyways
    if prompt_guide:
        self.prompt_guide = prompt_guide
    # if prompt_guide is not provided, get it from the extractor
    else:
        self.prompt_guide = extractor.get_prompt_guide()
        # when extractor does not have a prompt guide (e.g. custom extractor), ValueError
        if self.prompt_guide is None:
            raise ValueError(f"Prompt guide for {extractor.__class__.__name__} is not available. Use `prompt_guide` parameter to provide a prompt guide.")

    # get system prompt
    file_path = importlib.resources.files('llm_ie.asset.PromptEditor_prompts').joinpath('system.txt')
    with open(file_path, 'r') as f:
        self.system_prompt =  f.read()

    # internal memory (history messages) for the `chat` method
    self.messages = []

rewrite

rewrite(draft: str) -> str

This method inputs a prompt draft and rewrites it following the extractor's guideline. This method is stateless.

Source code in package/llm-ie/src/llm_ie/prompt_editor.py
def rewrite(self, draft:str) -> str:
    """
    This method inputs a prompt draft and rewrites it following the extractor's guideline.
    This method is stateless.
    """
    file_path = importlib.resources.files('llm_ie.asset.PromptEditor_prompts').joinpath('rewrite.txt')
    with open(file_path, 'r') as f:
        rewrite_prompt_template = f.read()

    prompt = self._apply_prompt_template(text_content={"draft": draft, "prompt_guideline": self.prompt_guide}, 
                                         prompt_template=rewrite_prompt_template)
    messages = [{"role": "system", "content": self.system_prompt},
                {"role": "user", "content": prompt}]
    res = self.inference_engine.chat(messages, verbose=True)
    return res

comment

comment(draft: str) -> str

This method inputs a prompt draft and comment following the extractor's guideline. This method is stateless.

Source code in package/llm-ie/src/llm_ie/prompt_editor.py
def comment(self, draft:str) -> str:
    """
    This method inputs a prompt draft and comment following the extractor's guideline.
    This method is stateless.
    """
    file_path = importlib.resources.files('llm_ie.asset.PromptEditor_prompts').joinpath('comment.txt')
    with open(file_path, 'r') as f:
        comment_prompt_template = f.read()

    prompt = self._apply_prompt_template(text_content={"draft": draft, "prompt_guideline": self.prompt_guide}, 
                                         prompt_template=comment_prompt_template)
    messages = [{"role": "system", "content": self.system_prompt},
                {"role": "user", "content": prompt}]
    res = self.inference_engine.chat(messages, verbose=True)
    return res

clear_messages

clear_messages()

Clears the current chat history.

Source code in package/llm-ie/src/llm_ie/prompt_editor.py
def clear_messages(self):
    """
    Clears the current chat history.
    """
    self.messages = []

export_chat

export_chat(file_path: str)

Exports the current chat history to a JSON file.

Parameters:

Name Type Description Default
file_path str

path to the file where the chat history will be saved. Should have a .json extension.

required
Source code in package/llm-ie/src/llm_ie/prompt_editor.py
def export_chat(self, file_path: str):
    """
    Exports the current chat history to a JSON file.

    Parameters
    ----------
    file_path : str
        path to the file where the chat history will be saved.
        Should have a .json extension.
    """
    if not self.messages:
        raise ValueError("Chat history is empty. Nothing to export.")

    with open(file_path, 'w', encoding='utf-8') as f:
        json.dump(self.messages, f, indent=4)

import_chat

import_chat(file_path: str)

Imports a chat history from a JSON file, overwriting the current history.

Parameters:

Name Type Description Default
file_path str

The path to the .json file containing the chat history.

required
Source code in package/llm-ie/src/llm_ie/prompt_editor.py
def import_chat(self, file_path: str):
    """
    Imports a chat history from a JSON file, overwriting the current history.

    Parameters
    ----------
    file_path : str
        The path to the .json file containing the chat history.
    """
    with open(file_path, 'r', encoding='utf-8') as f:
        loaded_messages = json.load(f)

    # Validate the loaded messages format.
    if not isinstance(loaded_messages, list):
        raise TypeError("Invalid format: The file should contain a JSON list of messages.")
    for message in loaded_messages:
        if not (isinstance(message, dict) and 'role' in message and 'content' in message):
            raise ValueError("Invalid format: Each message must be a dictionary with 'role' and 'content' keys.")

    self.messages = loaded_messages

chat

chat()

External method that detects the environment and calls the appropriate chat method. This method use and updates the messages list (internal memory). This method is stateful.

Source code in package/llm-ie/src/llm_ie/prompt_editor.py
def chat(self):
    """
    External method that detects the environment and calls the appropriate chat method.
    This method use and updates the `messages` list (internal memory).
    This method is stateful.
    """
    # Check if the conversation is empty, if so, load the initial chat prompt template.
    if len(self.messages) == 0:
        file_path = importlib.resources.files('llm_ie.asset.PromptEditor_prompts').joinpath('chat.txt')
        with open(file_path, 'r') as f:
            chat_prompt_template = f.read()

        guideline = self._apply_prompt_template(text_content={"prompt_guideline": self.prompt_guide}, 
                                                prompt_template=chat_prompt_template)

        self.messages = [{"role": "system", "content": self.system_prompt + guideline}]

    if 'ipykernel' in sys.modules:
        self._IPython_chat()
    else:
        self._terminal_chat()

chat_stream

chat_stream(
    messages: List[Dict[str, str]],
) -> Generator[str, None, None]

This method processes messages and yields response chunks from the inference engine. This is for frontend App. This method is stateless.

Parameters:

messages : List[Dict[str, str]] List of message dictionaries (e.g., [{"role": "user", "content": "Hi"}]).

Yields:
Chunks of the assistant's response.
Source code in package/llm-ie/src/llm_ie/prompt_editor.py
def chat_stream(self, messages: List[Dict[str, str]]) -> Generator[str, None, None]:
    """
    This method processes messages and yields response chunks from the inference engine.
    This is for frontend App.
    This method is stateless.

    Parameters:
    ----------
    messages : List[Dict[str, str]]
        List of message dictionaries (e.g., [{"role": "user", "content": "Hi"}]).

    Yields:
    -------
        Chunks of the assistant's response.
    """
    # Validate messages
    if not isinstance(messages, list) or not all(isinstance(m, dict) and 'role' in m and 'content' in m for m in messages):
         raise ValueError("Messages must be a list of dictionaries with 'role' and 'content' keys.")

    # Always append system prompt and initial user message
    file_path = importlib.resources.files('llm_ie.asset.PromptEditor_prompts').joinpath('chat.txt')
    with open(file_path, 'r') as f:
        chat_prompt_template = f.read()

    guideline = self._apply_prompt_template(text_content={"prompt_guideline": self.prompt_guide}, 
                                            prompt_template=chat_prompt_template)

    messages = [{"role": "system", "content": self.system_prompt + guideline}] + messages

    stream_generator = self.inference_engine.chat(messages, stream=True)
    yield from stream_generator