uyiosa commited on
Commit
91c1a32
·
verified ·
1 Parent(s): 3b8d33d

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +30 -0
README.md CHANGED
@@ -19,3 +19,33 @@ tags:
19
  - llama-3
20
  ---
21
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
19
  - llama-3
20
  ---
21
 
22
+ Quantized to FP8 using llm_compressor
23
+ ```python
24
+ from transformers import AutoTokenizer, AutoModelForCausalLM
25
+ from llmcompressor.transformers import oneshot
26
+ from llmcompressor.modifiers.quantization import QuantizationModifier
27
+
28
+ # Define the model ID for the model you want to quantize
29
+ MODEL_ID = "meta-llama/Llama-3.3-70B-Instruct"
30
+
31
+ # Load the model and tokenizer
32
+ model = AutoModelForCausalLM.from_pretrained(
33
+ MODEL_ID, device_map="auto", torch_dtype="auto"
34
+ )
35
+ tokenizer = AutoTokenizer.from_pretrained(MODEL_ID)
36
+
37
+ # Configure the quantization recipe
38
+ recipe = QuantizationModifier(targets="Linear", scheme="FP8_DYNAMIC", ignore=["lm_head"])
39
+
40
+ # Apply the quantization algorithm
41
+ oneshot(model=model, recipe=recipe)
42
+
43
+ # Define the directory to save the quantized model
44
+ SAVE_DIR = MODEL_ID.split("/")[1] + "-FP8-Dynamic"
45
+
46
+ # Save the quantized model and tokenizer
47
+ model.save_pretrained(SAVE_DIR)
48
+ tokenizer.save_pretrained(SAVE_DIR)
49
+
50
+ print(f"Quantized model saved to (SAVE_DIR)")
51
+ ```