forked from Lightning-AI/lit-llama
-
Notifications
You must be signed in to change notification settings - Fork 0
/
full.py
103 lines (86 loc) · 3.72 KB
/
full.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
import sys
import time
import warnings
from pathlib import Path
from typing import Optional
import lightning as L
import torch
# support running without installing as a package
wd = Path(__file__).absolute().parent.parent
sys.path.append(str(wd))
from lit_llama import LLaMA, Tokenizer
from lit_llama.utils import quantization
from scripts.prepare_alpaca import generate_prompt
from generate import generate
def main(
prompt: str = "Hello, my name is",
*,
num_samples: int = 1,
max_new_tokens: int = 50,
top_k: int = 200,
temperature: float = 0.8,
checkpoint_path: Optional[Path] = None,
tokenizer_path: Path = Path("checkpoints/lit-llama/tokenizer.model"),
model_size: str = "7B",
quantize: Optional[str] = None,
) -> None:
"""Generates text samples based on a pre-trained LLaMA model and tokenizer.
Args:
prompt: The prompt string to use for generating the samples.
num_samples: The number of text samples to generate.
max_new_tokens: The number of generation steps to take.
top_k: The number of top most probable tokens to consider in the sampling process.
temperature: A value controlling the randomness of the sampling process. Higher values result in more random
samples.
checkpoint_path: The checkpoint path to load.
tokenizer_path: The tokenizer path to load.
model_size: The model size to load.
quantize: Whether to quantize the model and using which method:
``"llm.int8"``: LLM.int8() mode,
``"gptq.int4"``: GPTQ 4-bit mode.
"""
if not checkpoint_path:
checkpoint_path = Path(f"checkpoints/lit-llama/{model_size}/lit-llama.pth")
assert checkpoint_path.is_file(), checkpoint_path
assert tokenizer_path.is_file(), tokenizer_path
precision = "bf16-true" if torch.cuda.is_available() and torch.cuda.is_bf16_supported() else "32-true"
fabric = L.Fabric(devices=1, precision=precision)
print("Loading model ...", file=sys.stderr)
t0 = time.time()
with fabric.init_module(empty_init=True), quantization(mode=quantize):
model = LLaMA.from_name(model_size)
checkpoint = torch.load(checkpoint_path)
model.load_state_dict(checkpoint)
print(f"Time to load model: {time.time() - t0:.02f} seconds.", file=sys.stderr)
model.eval()
model = fabric.setup(model)
tokenizer = Tokenizer(tokenizer_path)
sample = {"instruction": prompt, "input": input}
prompt = generate_prompt(sample)
encoded = tokenizer.encode(prompt, bos=True, eos=False, device=fabric.device)
prompt_length = encoded.size(0)
L.seed_everything(1234)
for i in range(num_samples):
t0 = time.perf_counter()
y = generate(model, encoded, max_new_tokens, temperature=temperature, top_k=top_k)
t = time.perf_counter() - t0
model.reset_cache()
print(tokenizer.decode(y))
tokens_generated = y.size(0) - prompt_length
print(f"Time for inference {i + 1}: {t:.02f} sec total, {tokens_generated / t:.02f} tokens/sec", file=sys.stderr)
if fabric.device.type == "cuda":
print(f"Memory used: {torch.cuda.max_memory_reserved() / 1e9:.02f} GB", file=sys.stderr)
if __name__ == "__main__":
from jsonargparse import CLI
torch.set_float32_matmul_precision("high")
warnings.filterwarnings(
# Triggered internally at ../aten/src/ATen/EmptyTensor.cpp:31
"ignore",
message="ComplexHalf support is experimental and many operators don't support it yet"
)
warnings.filterwarnings(
# Triggered in bitsandbytes/autograd/_functions.py:298
"ignore",
message="MatMul8bitLt: inputs will be cast from torch.bfloat16 to float16 during quantization",
)
CLI(main)