|
- import os
- import sys
- import json
- from model_url import get_model_resp, get_url_tokenizer
-
-
- def run_predict(url, log_path, few_shot = True):
- import numpy as np
- tokenizer = get_url_tokenizer()
- MAIN_DIR = os.path.dirname(os.path.abspath(__file__))
- id_label = {0: "false", 1: "true"}
-
- file_dir = MAIN_DIR + "/task_dataset/WiC/val.jsonl"
- count = 0
- correct_num = 0
- acc = 0
- with open(file_dir, "r", encoding="utf8") as f:
- for line in f.readlines():
- count += 1
- line = json.loads(line)
- word, sentence1, sentence2, label = line["word"], line["sentence1"], line["sentence2"], line["label"]
- example = ""
- input_str_one = f"Sentence 1: {sentence1}\nSentence 2: {sentence2}\n'{word}' in the above two sentenses are different."
- input_str_two = f"Sentence 1: {sentence1}\nSentence 2: {sentence2}\n'{word}' in the above two sentenses are the same."
- input_str = []
- input_str.append(input_str_one)
- input_str.append(input_str_two)
- mask_length_list = []
- input_length_list = []
- for pred in input_str:
- input_length_list.append(len(tokenizer.encode(pred)))
- mask_length_list.append(len(tokenizer.encode(example)))
- model_resp = get_model_resp(url=url, input_str=input_str, tokens_to_generate=0, top_k=1, logprobs=True)
- return_resp = []
- for resp_item, input_length, mask_length in zip(model_resp, input_length_list, mask_length_list):
- # assert len(resp_item) == input_length - 1
- item = resp_item[mask_length - 1:input_length - 1]
- return_resp.append(item)
-
- pred_list = [sum(logprobs) / len(logprobs) for logprobs in return_resp]
- answers_pred = int(np.argmax(pred_list))
- if id_label[answers_pred] == label:
- correct_num += 1
- acc = correct_num / count
-
- print(f"wic, 准确率Acc:{acc}, number: {count}")
-
- if not few_shot:
- with open(log_path + '/wic_zeroshot.txt', 'w') as file:
- file.write(f"wic, zero shot , Acc: {acc}, number: {count}")
- else:
- with open(log_path + '/wic_fewshot.txt', 'w') as file:
- file.write(f"wic, few shot , Acc: {acc}, number: {count}")
|