|
|
from vllm import SamplingParams |
|
|
from vllm.sampling_params import GuidedDecodingParams |
|
|
import torch |
|
|
import vllm |
|
|
import re |
|
|
import torch.nn as nn |
|
|
import torch.optim as optim |
|
|
|
|
|
def setup_llm(): |
|
|
model_name = "google/gemma-3-27b-it" |
|
|
output_regex = r"[\s\S]*Output:\s*[01]" |
|
|
guide_params = GuidedDecodingParams(regex=output_regex) |
|
|
|
|
|
sampling_params = SamplingParams( |
|
|
n=1, |
|
|
max_tokens=1024, |
|
|
temperature=0.1, |
|
|
stop=["<end_of_turn>"], |
|
|
guided_decoding=guide_params |
|
|
) |
|
|
llm = vllm.LLM(model=model_name, |
|
|
trust_remote_code=True, |
|
|
dtype=torch.bfloat16, |
|
|
max_model_len=4096, |
|
|
tensor_parallel_size=1, |
|
|
gpu_memory_utilization=0.90) |
|
|
return llm, sampling_params |
|
|
|
|
|
|
|
|
llm, sampling_params = setup_llm() |
|
|
print(llm) |