-
Notifications
You must be signed in to change notification settings - Fork 1
Expand file tree
/
Copy pathinference.py
More file actions
68 lines (57 loc) · 1.65 KB
/
inference.py
File metadata and controls
68 lines (57 loc) · 1.65 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
from transformers import AutoProcessor
import torch
import os
from v1 import V1ForConditionalGeneration, get_processor
from qwen_vl_utils import process_vision_info
hf_model_path = "kjunh/v1-7B"
processor = get_processor(hf_model_path)
model = V1ForConditionalGeneration.from_pretrained(
hf_model_path,
device_map="cuda",
torch_dtype=torch.float16,
attn_implementation="flash_attention_2",
)
system_message = """You are a helpful assistant."""
TEMPLATE_PROMPT = "{}\nPlease answer the question using a long-chain reasoning style and think step by step."
messages = [
{
"role": "system",
"content": [{"type": "text", "text": system_message}],
},
{
"role": "user",
"content": [
{
"type": "image",
"image": "https://farm8.staticflickr.com/7028/6680892455_f255f88ccc_z.jpg",
},
{
"type": "text",
"text": TEMPLATE_PROMPT.format("How many bears are in the picture?"),
},
],
},
]
text = processor.apply_chat_template(
messages, tokenize=False, add_generation_prompt=True
)
image = process_vision_info(messages)[0][0]
inputs = processor(
text=[text],
images=[image],
videos=None,
padding=True,
return_tensors="pt",
).to("cuda")
sampling_params = dict(
do_sample=False,
max_new_tokens=8192,
use_cache=True,
repetition_penalty=1.05
)
with torch.inference_mode():
generated_ids = model.generate(**inputs, **sampling_params)
output_text = processor.batch_decode(
generated_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False
)[0]
print(output_text)