|
- import os
- import json
- import numpy as np
- from model_url import get_model_resp, get_url_tokenizer
-
- def run_predict(url, log_path, few_shot = True):
- tokenizer = get_url_tokenizer()
-
- id_label = {0: 'false', 1: 'true'}
- MAIN_DIR = os.path.dirname(os.path.abspath(__file__))
- file_dir = MAIN_DIR + "/task_dataset/BoolQ/val.jsonl"
- count = 0
- correct_num = 0
- acc = 0
- with open(file_dir, "r", encoding="utf8") as f:
- for line in f.readlines():
- line = json.loads(line)
- question, passage, _, label = line["question"], line["passage"], line["idx"], line["label"]
-
- count += 1
- example = f"Passage: {passage}\n\nQuestion: {question}?\n"
- input_str_one = f"{example}No"
- input_str_two = f"{example}Yes"
- input_str = []
- input_str.append(input_str_one)
- input_str.append(input_str_two)
- mask_length_list = []
- input_length_list = []
- for pred in input_str:
- input_length_list.append(len(tokenizer.encode(pred)))
- mask_length_list.append(len(tokenizer.encode(example)))
- model_resp = get_model_resp(url=url, input_str=input_str, tokens_to_generate=0, top_k=1, logprobs=True)
- return_resp = []
- for resp_item, input_length, mask_length in zip(model_resp, input_length_list, mask_length_list):
- assert len(resp_item) == input_length - 1
- item = resp_item[mask_length - 1:input_length - 1]
- return_resp.append(item)
-
- pred_list = [sum(logprobs) / len(logprobs) for logprobs in return_resp]
- answers_pred = int(np.argmax(pred_list))
-
- if id_label[answers_pred] == label:
- correct_num += 1
- acc = correct_num / count
- print(f"boolq, 准确率Acc:{acc}, number: {count}")
-
- with open(log_path + '/boolq_zeroshot.txt', 'w') as file:
- file.write(f"boolq, zero shot , Acc: {acc}, number: {count}")
|