|
|
|
|
|
import time, logging |
|
|
from typing import Any, Dict, AsyncIterable |
|
|
|
|
|
from vllm.sampling_params import SamplingParams |
|
|
from backends_base import ChatBackend, ImagesBackend |
|
|
|
|
|
logger = logging.getLogger(__name__) |
|
|
|
|
|
try: |
|
|
import spaces |
|
|
except ImportError: |
|
|
spaces = None |
|
|
|
|
|
|
|
|
class VLLMChatBackend(ChatBackend): |
|
|
""" |
|
|
On ZeroGPU: build vLLM engine per request (no persistent state). |
|
|
Returns a single ChatCompletionChunk with the full text. |
|
|
""" |
|
|
|
|
|
async def stream(self, request: Dict[str, Any]) -> AsyncIterable[Dict[str, Any]]: |
|
|
messages = request.get("messages", []) |
|
|
prompt = messages[-1]["content"] if messages else "(empty)" |
|
|
|
|
|
params = SamplingParams( |
|
|
temperature=float(request.get("temperature", 0.7)), |
|
|
max_tokens=int(request.get("max_tokens", 512)) |
|
|
) |
|
|
|
|
|
rid = f"chatcmpl-local-{int(time.time())}" |
|
|
now = int(time.time()) |
|
|
model_name = request.get("model", "local-vllm") |
|
|
|
|
|
|
|
|
if spaces: |
|
|
@spaces.GPU(duration=60) |
|
|
def run_once(prompt: str) -> str: |
|
|
from vllm.engine.async_llm_engine import AsyncLLMEngine |
|
|
from vllm.engine.arg_utils import AsyncEngineArgs |
|
|
|
|
|
args = AsyncEngineArgs(model=model_name, trust_remote_code=True) |
|
|
engine = AsyncLLMEngine.from_engine_args(args) |
|
|
|
|
|
|
|
|
outputs = list(engine.generate(prompt, params, request_id=rid)) |
|
|
return outputs[-1].outputs[0].text if outputs else "" |
|
|
|
|
|
else: |
|
|
def run_once(prompt: str) -> str: |
|
|
from vllm.engine.async_llm_engine import AsyncLLMEngine |
|
|
from vllm.engine.arg_utils import AsyncEngineArgs |
|
|
|
|
|
args = AsyncEngineArgs(model=model_name, trust_remote_code=True) |
|
|
engine = AsyncLLMEngine.from_engine_args(args) |
|
|
|
|
|
outputs = list(engine.generate(prompt, params, request_id=rid)) |
|
|
return outputs[-1].outputs[0].text if outputs else "" |
|
|
|
|
|
try: |
|
|
text = run_once(prompt) |
|
|
yield { |
|
|
"id": rid, |
|
|
"object": "chat.completion.chunk", |
|
|
"created": now, |
|
|
"model": model_name, |
|
|
"choices": [ |
|
|
{"index": 0, "delta": {"content": text}, "finish_reason": "stop"} |
|
|
], |
|
|
} |
|
|
except Exception: |
|
|
logger.exception("vLLM inference failed") |
|
|
raise |
|
|
|
|
|
|
|
|
class StubImagesBackend(ImagesBackend): |
|
|
""" |
|
|
vLLM does not support image generation. |
|
|
For now, return a transparent PNG placeholder. |
|
|
""" |
|
|
async def generate_b64(self, request: Dict[str, Any]) -> str: |
|
|
logger.warning("Image generation not supported in local vLLM backend.") |
|
|
return ( |
|
|
"iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAQAAAC1HAwCAAAAC0lEQVR4nGP4BwQACfsD/etCJH0AAAAASUVORK5CYII=" |
|
|
) |
|
|
|