-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathworker.py
138 lines (123 loc) · 4.29 KB
/
worker.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
import ray
import torch
import numpy as np
import transformers
import functools
import time
import lib
class MaxLengthLogitsProcessor(transformers.LogitsProcessor):
def __init__(self, max_length, eos_token_id):
self.max_length = max_length
self.eos_token_id = eos_token_id
def __call__(self, input_ids, scores):
if input_ids.shape[-1] > self.max_length:
scores = torch.full_like(scores, -float("inf"))
scores[:, self.eos_token_id] = 0.0
return scores
class DummyWorker:
def __init__(self, param):
self.param = param
@ray.method(concurrency_group="model")
def __call__(self, d):
return d
class Worker:
def __init__(self, param):
"""
param: {
"model_str": str,
"ref_model_str": str,
"title": str,
"device": str, # "cuda:0", "cpu", ...
}
"""
self.param = param
load_model_kwargs = {
"device_map": str(self.param["device"]),
"pretrained_model_name_or_path": self.param["model_str"],
"low_cpu_mem_usage": True,
}
if self.param["device"].startswith("cuda"):
load_model_kwargs["torch_dtype"] = torch.float16
load_ref_model_kwargs = {
**load_model_kwargs,
"pretrained_model_name_or_path": self.param["ref_model_str"],
}
self.model = transformers.AutoModelForCausalLM.from_pretrained(
**load_model_kwargs
)
self.tokenizer = transformers.AutoTokenizer.from_pretrained(
self.param["model_str"]
)
self.ref_model = transformers.AutoModelForCausalLM.from_pretrained(
**load_ref_model_kwargs
)
self.logits_processor = MaxLengthLogitsProcessor(1, self.tokenizer.eos_token_id)
def process(self, d):
"""
d: {
"prompt": str,
"seed": int,
"method": str, # "basic", "mc", "tmc"
"n": int,
"max_length": int,
}
return: {
# all input fields in d, except prompt
"output_ids": list[int],
"gen_seq_lens": list[int],
"output": str,
# timestamps
"t_got_input": float,
"t_got_first_output": float,
"t_got_last_output": float,
}
"""
torch.manual_seed(d["seed"])
self.logits_processor.max_length = d["max_length"]
input_ids = self.tokenizer(d["prompt"], return_tensors="pt")["input_ids"].to(
self.param["device"]
)
if d["method"] == "basic":
generator = lib.basic_sample_generator
elif d["method"] == "mc":
generator = lib.mc_sample_generator
elif d["method"] == "tmc":
generator = lib.tmc_sample_generator
else:
raise ValueError(f"unknown sampling method {d['method']}")
if d["method"] != "basic":
generator = functools.partial(generator, ref_model=self.ref_model)
gen = generator(
model=self.model,
input_ids=input_ids,
n=d["n"],
process_logits_kwargs={"logits_processor": self.logits_processor},
)
output_ids = []
gen_seq_lens = []
t_got_input = time.time()
t_got_first_output = None
for step_output_ids, step_output_logprobs in gen:
if t_got_first_output is None:
t_got_first_output = time.time()
output_ids.extend(step_output_ids[0].cpu().tolist())
gen_seq_lens.append(step_output_ids.shape[-1])
t_got_last_output = time.time()
output = self.tokenizer.decode(output_ids)
return {
**{k: v for k, v in d.items() if k != "prompt"},
# "output_ids": output_ids,
"gen_seq_lens": gen_seq_lens,
# "output": output,
"t_got_input": t_got_input,
"t_got_first_output": t_got_first_output,
"t_got_last_output": t_got_last_output,
}
@ray.method(concurrency_group="model")
def __call__(self, d):
"""
only batch_size=1 is supported
"""
o = self.process({k: v[0] for k, v in d.items()})
# print("output", o)
return {k: [v] for k, v in o.items()}