GradLLM / runners /echo.py
johnbridges's picture
added test llm runner
8d27c84
raw
history blame
1.87 kB
# runners/echo.py
from __future__ import annotations
from typing import Any, Dict, Optional
from .base import ILLMRunner
from models import LLMServiceObj
from function_tracker import FunctionCallTracker
import logging
class EchoRunner(ILLMRunner):
Type = "TurboLLM"
IsEnabled = True
IsStateStarting = False
IsStateFailed = False
def __init__(self, publisher, settings):
self._pub = publisher
self._settings = settings
self._tracker = FunctionCallTracker()
self._log = logging.getLogger("EchoRunner")
async def StartProcess(self, llmServiceObj: dict) -> None:
self._log.debug(f"StartProcess called with: {llmServiceObj}")
# pretend to “warm up”
pass
async def RemoveProcess(self, sessionId: str) -> None:
self._log.debug(f"RemoveProcess called for session: {sessionId}")
# nothing to clean here
pass
async def StopRequest(self, sessionId: str) -> None:
self._log.debug(f"StopRequest called for session: {sessionId}")
# no streaming loop to stop in echo
pass
async def SendInputAndGetResponse(self, llmServiceObj: dict) -> None:
self._log.debug(f"SendInputAndGetResponse called with: {llmServiceObj}")
llm = LLMServiceObj(**llmServiceObj)
if llm.UserInput.startswith("<|START_AUDIO|>") or llm.UserInput.startswith("<|STOP_AUDIO|>"):
self._log.debug("Audio input detected, ignoring in echo.")
return
# Echo behavior (match UI format)
await self._pub.publish("llmServiceMessage", LLMServiceObj(LlmMessage=f"<User:> {llm.UserInput}\n\n"))
await self._pub.publish("llmServiceMessage", LLMServiceObj(LlmMessage=f"<Assistant:> You said: {llm.UserInput}\n"))
await self._pub.publish("llmServiceMessage", LLMServiceObj(LlmMessage="<end-of-line>"))