forked from sultanalnahian/RadQA-DPO
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathmodel_based_negative_examples_generator.py
More file actions
164 lines (137 loc) · 5.81 KB
/
model_based_negative_examples_generator.py
File metadata and controls
164 lines (137 loc) · 5.81 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
import torch
from transformers import (
AutoTokenizer,
AutoModelForSeq2SeqLM,
)
from radqa import RadQA
import pandas as pd
from tqdm import tqdm
import csv
import random
import argparse
from util import compute_f1_score
class LMInference:
def __init__(self, pretrained_model) -> None:
self.SEQ_LENGTH = 1024
self.device = torch.device(
"cuda" if torch.cuda.is_available() else "cpu")
self.tokenizer = AutoTokenizer.from_pretrained(pretrained_model, use_fast=False)
self.model = AutoModelForSeq2SeqLM.from_pretrained(pretrained_model)
self.model.to(self.device)
self.model.eval()
def generate(
self,
context: str):
# print("Generating questions...\n")
encoded_input = self.tokenizer(
context,
padding='max_length',
max_length=self.SEQ_LENGTH,
truncation=True,
return_tensors="pt",
).to(self.device)
outputs = self.model.generate(input_ids=encoded_input["input_ids"], max_length = 128)
answers = []
for each_output in outputs:
relation = self.tokenizer.decode(each_output, skip_special_tokens=True)
relation = relation.replace("<pad>", "")
relation = relation.strip()
answers.append(relation)
return answers
def get_preference_data(contexts, predicted_answers, original_answers, threshold=0.9):
preference_data = []
for i, context in enumerate(contexts):
pred = predicted_answers[i]
org_answer = original_answers[i]
f1_score = compute_f1_score([pred], [org_answer])
if pred != org_answer:
print(i, f1_score)
# if f1_score > threshold and f1_score <= (threshold + 0.20):
if f1_score <= threshold:
item = dict()
item['prompt'] = context
item['chosen'] = original_answers[i]
item['rejected'] = predicted_answers[i]
preference_data.append(item)
return preference_data
def is_in_dictionary(source, new_item):
for each_item in source:
if each_item['prompt'] == new_item['prompt'] and each_item['chosen'] == new_item['chosen'] and each_item['rejected'] == new_item['rejected']:
return True
return False
def merge_preference_file(file_list, directory, output_file):
# read csv
file_path = directory +"/" + file_list[0]
data = pd.read_csv(open(file_path,'r'), delimiter="\t")
# Convert the DataFrame to a Dictionary
data_dict = data.to_dict(orient='records')
for i in range(1,len(file_list)):
file_path = directory +"/" + file_list[i]
data = pd.read_csv(open(file_path,'r'), delimiter="\t")
_data_dict = data.to_dict(orient='records')
for each_item in _data_dict:
if not is_in_dictionary(data_dict, each_item):
data_dict.append(each_item)
random.shuffle(data_dict)
fields = ['prompt', 'chosen', 'rejected']
filepath = directory +"/" + output_file
with open(filepath, 'w') as csvfile:
csvwriter = csv.DictWriter(csvfile, fieldnames=fields, delimiter='\t')
csvwriter.writeheader()
csvwriter.writerows(data_dict)
def create_preference_files_from_threshold(source_file):
data = pd.read_csv(open(source_file,'r'), delimiter="\t")
contexts = data["prompt"]
predicted_answers = data['rejected']
original_answers = data['chosen']
thresholds = [0.80, 0.70, 0.60, 0.50]
for threshold in thresholds:
preference_data = get_preference_data(contexts, predicted_answers, original_answers, threshold)
random.seed(42)
random.shuffle(preference_data)
fields = ['prompt', 'chosen', 'rejected']
th1 = int(threshold*100)
th2 = th1+20
filepath = "output/preference_dataset/threshold/train_preference_{}_{}.tsv".format(th1, th2)
# filepath = "output/preference_dataset/threshold/train_preference_{}.tsv".format(th1)
with open(filepath, 'w') as csvfile:
csvwriter = csv.DictWriter(csvfile, fieldnames=fields, delimiter='\t')
csvwriter.writeheader()
csvwriter.writerows(preference_data)
def parse_args() -> argparse.Namespace:
parser = argparse.ArgumentParser()
parser.add_argument("--model", type=str, default="checkpoints/dpo/checkpoint-1829")
parser.add_argument("--input_file", type=str, default="dataset/train.json")
parser.add_argument("--output_file", type=str, default="dataset/preference_train.tsv")
return parser.parse_args()
if __name__ == "__main__":
args = parse_args()
model = LMInference(args.model)
radQA = RadQA(args.input_file)
results = []
predicted_answers = []
original_answers = []
contexts = []
for item in tqdm(radQA.data):
context = item['context']
output = model.generate(context)
output_item = dict()
output_item['context'] = context
context_arr = context.split("<context>")
context_arr = context_arr[0].split("<question>")
question = context_arr[1].strip()
output_item['question'] = question
output_item['original_answer'] = item['answer']
output_item['predicted_answer'] = output[0]
results.append(output_item)
original_answer = item['answer'].replace("\n","")
predicted_answers.append(output[0])
original_answers.append(original_answer)
contexts.append(context)
preference_data = get_preference_data(contexts, predicted_answers, original_answers)
fields = ['prompt', 'chosen', 'rejected']
filepath = args.output_file
with open(filepath, 'w') as csvfile:
csvwriter = csv.DictWriter(csvfile, fieldnames=fields, delimiter='\t')
csvwriter.writeheader()
csvwriter.writerows(preference_data)