cheapvs_llm / llm.py
sangttruong's picture
add data
27dc10f
from vllm import SamplingParams
from vllm.sampling_params import GuidedDecodingParams
import torch
import vllm
import re
import torch.nn as nn
import torch.optim as optim
def setup_llm():
model_name = "google/gemma-3-27b-it"
output_regex = r"[\s\S]*Output:\s*[01]" # Regex remains the same
guide_params = GuidedDecodingParams(regex=output_regex)
sampling_params = SamplingParams(
n=1,
max_tokens=1024, # Adjust if reasoning gets truncated; Guided decoding adds overhead
temperature=0.1, # Low temp for deterministic choice based on reasoning
stop=["<end_of_turn>"], # Gemma's end-of-turn token
guided_decoding=guide_params
)
llm = vllm.LLM(model=model_name,
trust_remote_code=True,
dtype=torch.bfloat16,
max_model_len=4096,
tensor_parallel_size=1,
gpu_memory_utilization=0.90) # Adjust if needed
return llm, sampling_params
llm, sampling_params = setup_llm()
print(llm)