Commit
·
32b704b
1
Parent(s):
fa17022
fixed loffer to be consistent
Browse files- app.py +4 -1
- function_tracker.py +2 -0
- listener.py +1 -5
- rabbit_base.py +1 -6
- rabbit_repo.py +2 -1
- runners/echo.py +6 -6
- service.py +2 -0
- streaming.py +2 -0
app.py
CHANGED
|
@@ -13,7 +13,10 @@ from runners.base import ILLMRunner
|
|
| 13 |
from factory import default_runner_factory
|
| 14 |
|
| 15 |
# ---------- logging ----------
|
| 16 |
-
logging.basicConfig(
|
|
|
|
|
|
|
|
|
|
| 17 |
log = logging.getLogger("app")
|
| 18 |
|
| 19 |
# ---------- @spaces.GPU entrypoint ----------
|
|
|
|
| 13 |
from factory import default_runner_factory
|
| 14 |
|
| 15 |
# ---------- logging ----------
|
| 16 |
+
logging.basicConfig(
|
| 17 |
+
level=logging.INFO,
|
| 18 |
+
format="%(asctime)s [%(levelname)s] %(name)s: %(message)s"
|
| 19 |
+
)
|
| 20 |
log = logging.getLogger("app")
|
| 21 |
|
| 22 |
# ---------- @spaces.GPU entrypoint ----------
|
function_tracker.py
CHANGED
|
@@ -3,6 +3,8 @@ from __future__ import annotations
|
|
| 3 |
from dataclasses import dataclass
|
| 4 |
from typing import Dict, List
|
| 5 |
import random
|
|
|
|
|
|
|
| 6 |
|
| 7 |
@dataclass
|
| 8 |
class TrackedCall:
|
|
|
|
| 3 |
from dataclasses import dataclass
|
| 4 |
from typing import Dict, List
|
| 5 |
import random
|
| 6 |
+
import logging
|
| 7 |
+
logger = logging.getLogger(__name__)
|
| 8 |
|
| 9 |
@dataclass
|
| 10 |
class TrackedCall:
|
listener.py
CHANGED
|
@@ -6,11 +6,7 @@ import aio_pika
|
|
| 6 |
|
| 7 |
Handler = Callable[[Any], Awaitable[None]] # payload is envelope["data"]
|
| 8 |
|
| 9 |
-
|
| 10 |
-
logging.basicConfig(
|
| 11 |
-
level=logging.INFO,
|
| 12 |
-
format="%(asctime)s [%(levelname)s] %(name)s: %(message)s"
|
| 13 |
-
)
|
| 14 |
logger = logging.getLogger(__name__)
|
| 15 |
|
| 16 |
|
|
|
|
| 6 |
|
| 7 |
Handler = Callable[[Any], Awaitable[None]] # payload is envelope["data"]
|
| 8 |
|
| 9 |
+
import logging
|
|
|
|
|
|
|
|
|
|
|
|
|
| 10 |
logger = logging.getLogger(__name__)
|
| 11 |
|
| 12 |
|
rabbit_base.py
CHANGED
|
@@ -10,13 +10,8 @@ from config import settings
|
|
| 10 |
|
| 11 |
ExchangeResolver = Callable[[str], str] # exchangeName -> exchangeType
|
| 12 |
|
| 13 |
-
|
| 14 |
logger = logging.getLogger(__name__)
|
| 15 |
-
if not logger.handlers:
|
| 16 |
-
logging.basicConfig(
|
| 17 |
-
level=logging.INFO,
|
| 18 |
-
format="%(asctime)s [%(levelname)s] %(name)s: %(message)s"
|
| 19 |
-
)
|
| 20 |
|
| 21 |
def _normalize_exchange_type(val: str) -> aio_pika.ExchangeType:
|
| 22 |
if isinstance(val, str):
|
|
|
|
| 10 |
|
| 11 |
ExchangeResolver = Callable[[str], str] # exchangeName -> exchangeType
|
| 12 |
|
| 13 |
+
import logging
|
| 14 |
logger = logging.getLogger(__name__)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 15 |
|
| 16 |
def _normalize_exchange_type(val: str) -> aio_pika.ExchangeType:
|
| 17 |
if isinstance(val, str):
|
rabbit_repo.py
CHANGED
|
@@ -8,7 +8,8 @@ from config import settings
|
|
| 8 |
from models import CloudEvent
|
| 9 |
from rabbit_base import RabbitBase
|
| 10 |
from utils import to_json, json_compress_str
|
| 11 |
-
|
|
|
|
| 12 |
|
| 13 |
class RabbitRepo(RabbitBase):
|
| 14 |
def __init__(self, external_source: str):
|
|
|
|
| 8 |
from models import CloudEvent
|
| 9 |
from rabbit_base import RabbitBase
|
| 10 |
from utils import to_json, json_compress_str
|
| 11 |
+
import logging
|
| 12 |
+
logger = logging.getLogger(__name__)
|
| 13 |
|
| 14 |
class RabbitRepo(RabbitBase):
|
| 15 |
def __init__(self, external_source: str):
|
runners/echo.py
CHANGED
|
@@ -5,6 +5,7 @@ from .base import ILLMRunner
|
|
| 5 |
from models import LLMServiceObj
|
| 6 |
from function_tracker import FunctionCallTracker
|
| 7 |
import logging
|
|
|
|
| 8 |
|
| 9 |
class EchoRunner(ILLMRunner):
|
| 10 |
Type = "TurboLLM"
|
|
@@ -16,28 +17,27 @@ class EchoRunner(ILLMRunner):
|
|
| 16 |
self._pub = publisher
|
| 17 |
self._settings = settings
|
| 18 |
self._tracker = FunctionCallTracker()
|
| 19 |
-
self._log = logging.getLogger("EchoRunner")
|
| 20 |
|
| 21 |
async def StartProcess(self, llmServiceObj: dict) -> None:
|
| 22 |
-
|
| 23 |
# pretend to “warm up”
|
| 24 |
pass
|
| 25 |
|
| 26 |
async def RemoveProcess(self, sessionId: str) -> None:
|
| 27 |
-
|
| 28 |
# nothing to clean here
|
| 29 |
pass
|
| 30 |
|
| 31 |
async def StopRequest(self, sessionId: str) -> None:
|
| 32 |
-
|
| 33 |
# no streaming loop to stop in echo
|
| 34 |
pass
|
| 35 |
|
| 36 |
async def SendInputAndGetResponse(self, llmServiceObj: dict) -> None:
|
| 37 |
-
|
| 38 |
llm = LLMServiceObj(**llmServiceObj)
|
| 39 |
if llm.UserInput.startswith("<|START_AUDIO|>") or llm.UserInput.startswith("<|STOP_AUDIO|>"):
|
| 40 |
-
|
| 41 |
return
|
| 42 |
|
| 43 |
# Echo behavior (match UI format)
|
|
|
|
| 5 |
from models import LLMServiceObj
|
| 6 |
from function_tracker import FunctionCallTracker
|
| 7 |
import logging
|
| 8 |
+
logger = logging.getLogger(__name__)
|
| 9 |
|
| 10 |
class EchoRunner(ILLMRunner):
|
| 11 |
Type = "TurboLLM"
|
|
|
|
| 17 |
self._pub = publisher
|
| 18 |
self._settings = settings
|
| 19 |
self._tracker = FunctionCallTracker()
|
|
|
|
| 20 |
|
| 21 |
async def StartProcess(self, llmServiceObj: dict) -> None:
|
| 22 |
+
logger.info(f"StartProcess called with: {llmServiceObj}")
|
| 23 |
# pretend to “warm up”
|
| 24 |
pass
|
| 25 |
|
| 26 |
async def RemoveProcess(self, sessionId: str) -> None:
|
| 27 |
+
logger.info(f"RemoveProcess called for session: {sessionId}")
|
| 28 |
# nothing to clean here
|
| 29 |
pass
|
| 30 |
|
| 31 |
async def StopRequest(self, sessionId: str) -> None:
|
| 32 |
+
logger.info(f"StopRequest called for session: {sessionId}")
|
| 33 |
# no streaming loop to stop in echo
|
| 34 |
pass
|
| 35 |
|
| 36 |
async def SendInputAndGetResponse(self, llmServiceObj: dict) -> None:
|
| 37 |
+
logger.info(f"SendInputAndGetResponse called with: {llmServiceObj}")
|
| 38 |
llm = LLMServiceObj(**llmServiceObj)
|
| 39 |
if llm.UserInput.startswith("<|START_AUDIO|>") or llm.UserInput.startswith("<|STOP_AUDIO|>"):
|
| 40 |
+
logger.debug("Audio input detected, ignoring in echo.")
|
| 41 |
return
|
| 42 |
|
| 43 |
# Echo behavior (match UI format)
|
service.py
CHANGED
|
@@ -8,6 +8,8 @@ from models import LLMServiceObj, ResultObj
|
|
| 8 |
from rabbit_repo import RabbitRepo
|
| 9 |
from runners.base import ILLMRunner
|
| 10 |
from message_helper import success as _ok, error as _err
|
|
|
|
|
|
|
| 11 |
|
| 12 |
@dataclass
|
| 13 |
class _Session:
|
|
|
|
| 8 |
from rabbit_repo import RabbitRepo
|
| 9 |
from runners.base import ILLMRunner
|
| 10 |
from message_helper import success as _ok, error as _err
|
| 11 |
+
import logging
|
| 12 |
+
logger = logging.getLogger(__name__)
|
| 13 |
|
| 14 |
@dataclass
|
| 15 |
class _Session:
|
streaming.py
CHANGED
|
@@ -1,5 +1,7 @@
|
|
| 1 |
# streaming.py
|
| 2 |
import asyncio
|
|
|
|
|
|
|
| 3 |
|
| 4 |
async def stream_in_chunks(publish, exchange: str, llm_obj_builder, text: str,
|
| 5 |
batch_size: int = 3, max_chars: int = 100,
|
|
|
|
| 1 |
# streaming.py
|
| 2 |
import asyncio
|
| 3 |
+
import logging
|
| 4 |
+
logger = logging.getLogger(__name__)
|
| 5 |
|
| 6 |
async def stream_in_chunks(publish, exchange: str, llm_obj_builder, text: str,
|
| 7 |
batch_size: int = 3, max_chars: int = 100,
|