|
- # coding=utf-8
- # Copyright 2018 The Google AI Language Team Authors.
- #
- # Licensed under the Apache License, Version 2.0 (the "License");
- # you may not use this file except in compliance with the License.
- # You may obtain a copy of the License at
- #
- # http://www.apache.org/licenses/LICENSE-2.0
- #
- # Unless required by applicable law or agreed to in writing, software
- # distributed under the License is distributed on an "AS IS" BASIS,
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- # See the License for the specific language governing permissions and
- # limitations under the License.
- """BERT finetuning runner."""
-
- from __future__ import absolute_import
- from __future__ import division
- from __future__ import print_function
-
- ##set random seed
- import numpy as np
- np.random.seed(26)
- import tensorflow as tf
- tf.set_random_seed(26)
-
- import collections
- import csv
- import pandas as pd
- import os,sys
- import modeling
- # import optimization
- import optimization_layerwise as optimization
- # import accoptimization as optimization
- import tokenization
-
- import pickle
- import codecs
- from sklearn import metrics
-
- flags = tf.flags
-
- FLAGS = flags.FLAGS
-
- ## Required parameters
- flags.DEFINE_string(
- "data_dir", None,
- "The input data dir. Should contain the .tsv files (or other data files) "
- "for the task.")
-
- flags.DEFINE_string(
- "bert_config_file", None,
- "The config json file corresponding to the pre-trained BERT model. "
- "This specifies the model architecture.")
-
- flags.DEFINE_string("task_name", None, "The name of the task to train.")
-
- flags.DEFINE_string("vocab_file", None,
- "The vocabulary file that the BERT model was trained on.")
-
- flags.DEFINE_string(
- "output_dir", None,
- "The output directory where the model checkpoints will be written.")
-
- ## Other parameters
-
- flags.DEFINE_string(
- "init_checkpoint", None,
- "Initial checkpoint (usually from a pre-trained BERT model).")
-
- flags.DEFINE_bool(
- "do_lower_case", True,
- "Whether to lower case the input text. Should be True for uncased "
- "models and False for cased models.")
-
- flags.DEFINE_integer(
- "max_seq_length", 128,
- "The maximum total input sequence length after WordPiece tokenization. "
- "Sequences longer than this will be truncated, and sequences shorter "
- "than this will be padded.")
-
- flags.DEFINE_bool("do_train", False, "Whether to run training.")
-
- flags.DEFINE_bool("do_eval", False, "Whether to run eval on the dev set.")
-
- flags.DEFINE_bool(
- "do_predict", False,
- "Whether to run the model in inference mode on the test set.")
-
- flags.DEFINE_bool("clean", True, "Whether to clean last training files.")
-
- flags.DEFINE_integer("train_batch_size", 32, "Total batch size for training.")
-
- flags.DEFINE_integer("eval_batch_size", 8, "Total batch size for eval.")
-
- flags.DEFINE_integer("predict_batch_size", 8, "Total batch size for predict.")
-
- flags.DEFINE_float("learning_rate", 5e-5, "The initial learning rate for Adam.")
-
- flags.DEFINE_float("num_train_epochs", 3.0,
- "Total number of training epochs to perform.")
-
- flags.DEFINE_float(
- "warmup_proportion", 0.1,
- "Proportion of training to perform linear learning rate warmup for. "
- "E.g., 0.1 = 10% of training.")
-
- flags.DEFINE_integer("save_checkpoints_steps", 1000,
- "How often to save the model checkpoint.")
-
- flags.DEFINE_integer("iterations_per_loop", 1000,
- "How many steps to make in each estimator call.")
-
- flags.DEFINE_bool("use_tpu", False, "Whether to use TPU or GPU/CPU.")
-
- tf.flags.DEFINE_string(
- "tpu_name", None,
- "The Cloud TPU to use for training. This should be either the name "
- "used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 "
- "url.")
-
- tf.flags.DEFINE_string(
- "tpu_zone", None,
- "[Optional] GCE zone where the Cloud TPU is located in. If not "
- "specified, we will attempt to automatically detect the GCE project from "
- "metadata.")
-
- tf.flags.DEFINE_string(
- "gcp_project", None,
- "[Optional] Project name for the Cloud TPU-enabled project. If not "
- "specified, we will attempt to automatically detect the GCE project from "
- "metadata.")
-
- tf.flags.DEFINE_string("master", None, "[Optional] TensorFlow master URL.")
-
- flags.DEFINE_integer(
- "num_tpu_cores", 8,
- "Only used if `use_tpu` is True. Total number of TPU cores to use.")
-
-
- class InputExample(object):
- """A single training/test example for simple sequence classification."""
-
- def __init__(self, guid, text_a, text_b=None, label=None):
- """Constructs a InputExample.
-
- Args:
- guid: Unique id for the example.
- text_a: string. The untokenized text of the first sequence. For single
- sequence tasks, only this sequence must be specified.
- text_b: (Optional) string. The untokenized text of the second sequence.
- Only must be specified for sequence pair tasks.
- label: (Optional) string. The label of the example. This should be
- specified for train and dev examples, but not for test examples.
- """
- self.guid = guid
- self.text_a = text_a
- self.text_b = text_b
- self.label = label
-
-
- class PaddingInputExample(object):
- """Fake example so the num input examples is a multiple of the batch size.
-
- When running eval/predict on the TPU, we need to pad the number of examples
- to be a multiple of the batch size, because the TPU requires a fixed batch
- size. The alternative is to drop the last batch, which is bad because it means
- the entire output data won't be generated.
-
- We use this class instead of `None` because treating `None` as padding
- battches could cause silent errors.
- """
-
-
- class InputFeatures(object):
- """A single set of features of data."""
-
- def __init__(self,
- input_ids,
- input_mask,
- segment_ids,
- label_id,
- is_real_example=True):
- self.input_ids = input_ids
- self.input_mask = input_mask
- self.segment_ids = segment_ids
- self.label_id = label_id
- self.is_real_example = is_real_example
-
-
- class DataProcessor(object):
- """Base class for data converters for sequence classification data sets."""
-
- def get_train_examples(self, data_dir):
- """Gets a collection of `InputExample`s for the train set."""
- raise NotImplementedError()
-
- def get_dev_examples(self, data_dir):
- """Gets a collection of `InputExample`s for the dev set."""
- raise NotImplementedError()
-
- def get_test_examples(self, data_dir):
- """Gets a collection of `InputExample`s for prediction."""
- raise NotImplementedError()
-
- def get_labels(self):
- """Gets the list of labels for this data set."""
- raise NotImplementedError()
-
- @classmethod
- def _read_tsv(cls, input_file, quotechar=None):
- """Reads a tab separated value file."""
- with tf.gfile.Open(input_file, "r") as f:
- reader = csv.reader(f, delimiter="\t", quotechar=quotechar)
- lines = []
- for line in reader:
- lines.append(line)
- return lines
-
-
- class MatchProcessor(DataProcessor):
- """Processor for the MRPC data set (GLUE version)."""
-
- def get_train_examples(self, data_dir):
- """See base class."""
- return self._create_examples(
- self._read_tsv(os.path.join(data_dir, "FAQ-train-v1.txt")), "train")
-
- def get_dev_examples(self, data_dir):
- """See base class."""
- return self._create_examples(
- self._read_tsv(os.path.join(data_dir, "FAQ-test-v1.txt")), "dev")
-
- def get_test_examples(self, data_dir):
- """See base class."""
- return self._create_examples(
- self._read_tsv(os.path.join(data_dir, "FAQ-test-v1.txt")), "test")
-
- def get_labels(self):
- """See base class."""
- # labels = []
- # for datapath in ['norm_train.txt','norm_dev.txt']:
- # filepath = os.path.join(FLAGS.data_dir,datapath)
- # f = open(filepath,'r',encoding = 'utf-8')
- # lines = f.readlines()
- # f.close()
- # for line in lines:
- # content = line.strip().split('\t')
- # if len(content) >=2 and '-DOCSTART-' not in line:
- # l = content[-1]
- # if l not in labels:
- # labels.append(l)
-
- # labels = sorted(list(set(labels)))
- labels = ['0','1']
- return labels
-
- def _create_examples(self, lines, set_type):
- """Creates examples for the training and dev sets."""
- examples = []
- for (i, line) in enumerate(lines):
- # if i == 0:
- # continue
- guid = "%s-%s" % (set_type, i)
- text_a = tokenization.convert_to_unicode(line[0])
- text_b = tokenization.convert_to_unicode(line[1])
- if set_type == "test":
- label = "0"
- else:
- label = tokenization.convert_to_unicode(line[-1])
- examples.append(
- InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
- return examples
-
- def convert_single_example(ex_index, example, label_map, max_seq_length,
- tokenizer):
- """Converts a single `InputExample` into a single `InputFeatures`."""
-
- if isinstance(example, PaddingInputExample):
- return InputFeatures(
- input_ids=[0] * max_seq_length,
- input_mask=[0] * max_seq_length,
- segment_ids=[0] * max_seq_length,
- label_id=0,
- is_real_example=False)
-
- # label_map = {}
- # for (i, label) in enumerate(label_list):
- # label_map[label] = i
-
- tokens_a = tokenizer.tokenize(example.text_a)
- tokens_b = None
- if example.text_b:
- tokens_b = tokenizer.tokenize(example.text_b)
-
- if tokens_b:
- # Modifies `tokens_a` and `tokens_b` in place so that the total
- # length is less than the specified length.
- # Account for [CLS], [SEP], [SEP] with "- 3"
- _truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)
- else:
- # Account for [CLS] and [SEP] with "- 2"
- if len(tokens_a) > max_seq_length - 2:
- tokens_a = tokens_a[0:(max_seq_length - 2)]
-
- # The convention in BERT is:
- # (a) For sequence pairs:
- # tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
- # type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
- # (b) For single sequences:
- # tokens: [CLS] the dog is hairy . [SEP]
- # type_ids: 0 0 0 0 0 0 0
- #
- # Where "type_ids" are used to indicate whether this is the first
- # sequence or the second sequence. The embedding vectors for `type=0` and
- # `type=1` were learned during pre-training and are added to the wordpiece
- # embedding vector (and position vector). This is not *strictly* necessary
- # since the [SEP] token unambiguously separates the sequences, but it makes
- # it easier for the model to learn the concept of sequences.
- #
- # For classification tasks, the first vector (corresponding to [CLS]) is
- # used as the "sentence vector". Note that this only makes sense because
- # the entire model is fine-tuned.
- tokens = []
- segment_ids = []
- tokens.append("[CLS]")
- segment_ids.append(0)
- for token in tokens_a:
- tokens.append(token)
- segment_ids.append(0)
- tokens.append("[SEP]")
- segment_ids.append(0)
-
- if tokens_b:
- for token in tokens_b:
- tokens.append(token)
- segment_ids.append(1)
- tokens.append("[SEP]")
- segment_ids.append(1)
-
- input_ids = tokenizer.convert_tokens_to_ids(tokens)
-
- # The mask has 1 for real tokens and 0 for padding tokens. Only real
- # tokens are attended to.
- input_mask = [1] * len(input_ids)
-
- # Zero-pad up to the sequence length.
- while len(input_ids) < max_seq_length:
- input_ids.append(0)
- input_mask.append(0)
- segment_ids.append(0)
-
- assert len(input_ids) == max_seq_length
- assert len(input_mask) == max_seq_length
- assert len(segment_ids) == max_seq_length
-
- label_id = label_map[example.label]
- if ex_index < 5:
- tf.logging.info("*** Example ***")
- tf.logging.info("guid: %s" % (example.guid))
- tf.logging.info("tokens: %s" % " ".join(
- [tokenization.printable_text(x) for x in tokens]))
- tf.logging.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
- tf.logging.info("input_mask: %s" % " ".join([str(x) for x in input_mask]))
- tf.logging.info("segment_ids: %s" % " ".join([str(x) for x in segment_ids]))
- tf.logging.info("label: %s (id = %d)" % (example.label, label_id))
-
- feature = InputFeatures(
- input_ids=input_ids,
- input_mask=input_mask,
- segment_ids=segment_ids,
- label_id=label_id,
- is_real_example=True)
- return feature
-
-
- def file_based_convert_examples_to_features(
- examples, label_list, max_seq_length, tokenizer, output_file):
- """Convert a set of `InputExample`s to a TFRecord file."""
-
- writer = tf.python_io.TFRecordWriter(output_file)
-
- for (ex_index, example) in enumerate(examples):
- if ex_index % 10000 == 0:
- tf.logging.info("Writing example %d of %d" % (ex_index, len(examples)))
-
- feature = convert_single_example(ex_index, example, label_list,
- max_seq_length, tokenizer)
-
- def create_int_feature(values):
- f = tf.train.Feature(int64_list=tf.train.Int64List(value=list(values)))
- return f
-
- features = collections.OrderedDict()
- features["input_ids"] = create_int_feature(feature.input_ids)
- features["input_mask"] = create_int_feature(feature.input_mask)
- features["segment_ids"] = create_int_feature(feature.segment_ids)
- features["label_ids"] = create_int_feature([feature.label_id])
- features["is_real_example"] = create_int_feature(
- [int(feature.is_real_example)])
-
- tf_example = tf.train.Example(features=tf.train.Features(feature=features))
- writer.write(tf_example.SerializeToString())
- writer.close()
-
-
- def file_based_input_fn_builder(input_file, seq_length, is_training,
- drop_remainder):
- """Creates an `input_fn` closure to be passed to TPUEstimator."""
-
- name_to_features = {
- "input_ids": tf.FixedLenFeature([seq_length], tf.int64),
- "input_mask": tf.FixedLenFeature([seq_length], tf.int64),
- "segment_ids": tf.FixedLenFeature([seq_length], tf.int64),
- "label_ids": tf.FixedLenFeature([], tf.int64),
- "is_real_example": tf.FixedLenFeature([], tf.int64),
- }
-
- def _decode_record(record, name_to_features):
- """Decodes a record to a TensorFlow example."""
- example = tf.parse_single_example(record, name_to_features)
-
- # tf.Example only supports tf.int64, but the TPU only supports tf.int32.
- # So cast all int64 to int32.
- for name in list(example.keys()):
- t = example[name]
- if t.dtype == tf.int64:
- t = tf.to_int32(t)
- example[name] = t
-
- return example
-
- def input_fn(params):
- """The actual input function."""
- batch_size = params["batch_size"]
-
- # For training, we want a lot of parallel reading and shuffling.
- # For eval, we want no shuffling and parallel reading doesn't matter.
- d = tf.data.TFRecordDataset(input_file)
- if is_training:
-
- # d = d.repeat(1)
- d = d.shuffle(buffer_size=500)
-
- d = d.apply(
- tf.contrib.data.map_and_batch(
- lambda record: _decode_record(record, name_to_features),
- batch_size=batch_size,
- drop_remainder=drop_remainder))
-
- return d
-
- return input_fn
-
-
- def _truncate_seq_pair(tokens_a, tokens_b, max_length):
- """Truncates a sequence pair in place to the maximum length."""
-
- # This is a simple heuristic which will always truncate the longer sequence
- # one token at a time. This makes more sense than truncating an equal percent
- # of tokens from each, since if one sequence is very short then each token
- # that's truncated likely contains more information than a longer sequence.
- while True:
- total_length = len(tokens_a) + len(tokens_b)
- if total_length <= max_length:
- break
- if len(tokens_a) > len(tokens_b):
- tokens_a.pop()
- else:
- tokens_b.pop()
-
-
- def create_model(bert_config, is_training, input_ids, input_mask, segment_ids,
- labels, num_labels, use_one_hot_embeddings):
- """Creates a classification model."""
- model = modeling.BertModel(
- config=bert_config,
- is_training=is_training,
- input_ids=input_ids,
- input_mask=input_mask,
- token_type_ids=segment_ids,
- use_one_hot_embeddings=use_one_hot_embeddings)
-
- # In the demo, we are doing a simple classification task on the entire
- # segment.
- #
- # If you want to use the token-level output, use model.get_sequence_output()
- # instead.
- output_layer = model.get_pooled_output()
-
- hidden_size = output_layer.shape[-1].value
-
- # output_weights = tf.get_variable(
- # "output_weights", [num_labels, hidden_size],
- # initializer=tf.truncated_normal_initializer(stddev=0.02))
-
- # output_bias = tf.get_variable(
- # "output_bias", [num_labels], initializer=tf.zeros_initializer())
-
- # with tf.variable_scope("loss"):
- # if is_training:
- # # I.e., 0.1 dropout
- # output_layer = tf.nn.dropout(output_layer, keep_prob=0.9)
-
- # logits = tf.matmul(output_layer, output_weights, transpose_b=True)
- # logits = tf.nn.bias_add(logits, output_bias)
- # probabilities = tf.nn.softmax(logits, axis=-1)
- # log_probs = tf.nn.log_softmax(logits, axis=-1)
-
- # one_hot_labels = tf.one_hot(labels, depth=num_labels, dtype=tf.float32)
-
- # per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1)
- # loss = tf.reduce_mean(per_example_loss)
-
- # return (loss, per_example_loss, logits, probabilities)
-
- ## Use sigmod
- # output_weights = tf.get_variable(
- # "output_weights", [1, hidden_size],
- # initializer=tf.truncated_normal_initializer(stddev=0.02))
- # output_bias = tf.get_variable(
- # "output_bias", [1], initializer=tf.zeros_initializer())
- # with tf.variable_scope("loss"):
- # if is_training:
- # # I.e., 0.1 dropout
- # output_layer = tf.nn.dropout(output_layer, keep_prob=0.9)
- # logits = tf.matmul(output_layer, output_weights, transpose_b=True)
- # logits = tf.nn.bias_add(logits, output_bias)
- # probabilities = tf.nn.sigmoid(logits)
- # per_example_loss = tf.nn.sigmoid_cross_entropy_with_logits(labels=tf.cast(labels,tf.float32),logits=tf.squeeze(logits))
- # loss = tf.reduce_mean(per_example_loss)
- # return (loss, per_example_loss, logits, probabilities)
-
- ## Use CNN to extract keywords features
- # 获取对应的embedding 输入数据[batch_size, seq_length, embedding_size]
- output_layer = model.get_sequence_output()
- if is_training:
- # I.e., 0.1 dropout
- output_layer = tf.nn.dropout(output_layer, keep_prob=0.9)
- hidden_size = output_layer.shape[-1].value
- max_seq_length = output_layer.shape[1].value
- kernel_sizes = [1,3,5] # 分别用窗口大小为3/4/5的卷积核
- with tf.name_scope("mul_cnn"):
- pooled_outputs = []
- for kernel_size in kernel_sizes:
- # CNN layer
- conv = tf.layers.conv1d(output_layer, hidden_size//2, kernel_size, name='conv-%s' % kernel_size)
- # global max pooling layer
- gmp = tf.reduce_max(conv, reduction_indices=[1], name='gmp')
- pooled_outputs.append(gmp)
- h_pool = tf.concat(pooled_outputs, 1) #池化后进行拼接
- if is_training:
- # I.e., 0.3 dropout
- h_pool = tf.nn.dropout(h_pool, keep_prob=0.7)
- h_pool = tf.layers.dense(h_pool,h_pool.shape[-1].value//2,tf.nn.relu)
- # logits = tf.layers.dense(h_pool,1)
- # probabilities = tf.nn.sigmoid(logits)
- # per_example_loss = tf.nn.sigmoid_cross_entropy_with_logits(labels=tf.cast(labels,tf.float32),logits=tf.squeeze(logits))
- # loss = tf.reduce_mean(per_example_loss)
- # return (loss, per_example_loss, logits, probabilities)
-
-
- logits = tf.layers.dense(h_pool,num_labels)
- probabilities = tf.nn.softmax(logits, axis=-1)
- log_probs = tf.nn.log_softmax(logits, axis=-1)
- one_hot_labels = tf.one_hot(labels, depth=num_labels, dtype=tf.float32)
- per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1)
- loss = tf.reduce_mean(per_example_loss)
- return (loss, per_example_loss, logits, probabilities)
-
- def model_fn_builder(bert_config, num_labels, init_checkpoint, learning_rate,
- num_train_steps, num_warmup_steps, use_tpu,
- use_one_hot_embeddings):
- """Returns `model_fn` closure for TPUEstimator."""
-
- def model_fn(features, labels, mode, params): # pylint: disable=unused-argument
- """The `model_fn` for TPUEstimator."""
-
- tf.logging.info("*** Features ***")
- for name in sorted(features.keys()):
- tf.logging.info(" name = %s, shape = %s" % (name, features[name].shape))
-
- input_ids = features["input_ids"]
- input_mask = features["input_mask"]
- segment_ids = features["segment_ids"]
- label_ids = features["label_ids"]
- is_real_example = None
- if "is_real_example" in features:
- is_real_example = tf.cast(features["is_real_example"], dtype=tf.float32)
- else:
- is_real_example = tf.ones(tf.shape(label_ids), dtype=tf.float32)
-
- is_training = (mode == tf.estimator.ModeKeys.TRAIN)
-
- (total_loss, per_example_loss, logits, probabilities) = create_model(
- bert_config, is_training, input_ids, input_mask, segment_ids, label_ids,
- num_labels, use_one_hot_embeddings)
-
- tvars = tf.trainable_variables()
- initialized_variable_names = {}
- scaffold_fn = None
- if init_checkpoint:
- (assignment_map, initialized_variable_names
- ) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)
- if use_tpu:
-
- def tpu_scaffold():
- tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
- return tf.train.Scaffold()
-
- scaffold_fn = tpu_scaffold
- else:
- tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
-
- # tf.logging.info("**** Trainable Variables ****")
- # for var in tvars:
- # init_string = ""
- # if var.name in initialized_variable_names:
- # init_string = ", *INIT_FROM_CKPT*"
- # tf.logging.info(" name = %s, shape = %s%s", var.name, var.shape,
- # init_string)
-
- output_spec = None
- if mode == tf.estimator.ModeKeys.TRAIN:
-
- train_op = optimization.create_optimizer(
- total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu)
-
- output_spec = tf.contrib.tpu.TPUEstimatorSpec(
- mode=mode,
- loss=total_loss,
- train_op=train_op,
- scaffold_fn=scaffold_fn)
- elif mode == tf.estimator.ModeKeys.EVAL:
-
- def metric_fn(per_example_loss, label_ids, logits, is_real_example):
- predictions = tf.argmax(logits, axis=-1, output_type=tf.int32)
- accuracy = tf.metrics.accuracy(
- labels=label_ids, predictions=predictions, weights=is_real_example)
- loss = tf.metrics.mean(values=per_example_loss, weights=is_real_example)
- return {
- "eval_accuracy": accuracy,
- "eval_loss": loss,
- }
-
- eval_metrics = (metric_fn,
- [per_example_loss, label_ids, logits, is_real_example])
- output_spec = tf.contrib.tpu.TPUEstimatorSpec(
- mode=mode,
- loss=total_loss,
- eval_metrics=eval_metrics,
- scaffold_fn=scaffold_fn)
- else:
- output_spec = tf.contrib.tpu.TPUEstimatorSpec(
- mode=mode,
- predictions={"probabilities": probabilities},
- scaffold_fn=scaffold_fn)
- return output_spec
-
- return model_fn
-
- def labeltoid(label_list):
- label_map = {}
- # 1表示从1开始对label进行index化
- for (i, label) in enumerate(label_list):
- label_map[label] = i
- # 保存label->index 的map
- with codecs.open(os.path.join(FLAGS.output_dir, 'label2id.pkl'), 'wb') as w:
- pickle.dump(label_map, w)
-
- return label_map
-
- def save_best_model(cur_ckpt_path,best_model_path):
- cmd1 = 'cp '+cur_ckpt_path+'.index '+best_model_path+'.index'
- cmd2 = 'cp '+cur_ckpt_path+'.meta '+best_model_path+'.meta'
- cmd3 = 'cp '+cur_ckpt_path+'.data-00000-of-00001 '+best_model_path+'.data-00000-of-00001'
- os.system(cmd1)
- os.system(cmd2)
- os.system(cmd3)
-
- def report_metric(predict_examples, pred, out_file, id2label, probs):
- out_dev = open(out_file,'w')
- gold_str = []
- pred_str = []
- for exam, p, prob in zip(predict_examples,pred,probs):
- out_dev.write(exam.text_a+'\t'+str(exam.label)+'\t'+str(id2label[p])+'\t'+str(prob)+'\n')
- gold_str.append(str(exam.label))
- pred_str.append(str(id2label[p]))
- out_dev.close()
- ## write bad case
- out_dev_badcase = open(os.path.join(FLAGS.output_dir, 'dev_badcase.txt'),'w')
- for exam, p, prob in zip(predict_examples,pred, probs):
- if(str(exam.label)!=str(id2label[p])):
- out_dev_badcase.write(exam.text_a+'\t'+str(exam.label)+'\t'+str(id2label[p])+'\t'+str(prob)+'\n')
- out_dev_badcase.close()
- print(metrics.classification_report(gold_str,pred_str))
- return(metrics.f1_score(gold_str,pred_str,average='macro'))
-
- def main(_):
- tf.logging.set_verbosity(tf.logging.INFO)
-
- processors = {
- "match": MatchProcessor,
- }
-
- tokenization.validate_case_matches_checkpoint(FLAGS.do_lower_case,
- FLAGS.init_checkpoint)
-
- if not FLAGS.do_train and not FLAGS.do_eval and not FLAGS.do_predict:
- raise ValueError(
- "At least one of `do_train`, `do_eval` or `do_predict' must be True.")
-
- bert_config = modeling.BertConfig.from_json_file(FLAGS.bert_config_file)
-
- if FLAGS.max_seq_length > bert_config.max_position_embeddings:
- raise ValueError(
- "Cannot use sequence length %d because the BERT model "
- "was only trained up to sequence length %d" %
- (FLAGS.max_seq_length, bert_config.max_position_embeddings))
- ## del last training file
- if(FLAGS.do_train and FLAGS.clean):
- if os.path.exists(FLAGS.output_dir):
- def del_file(path):
- ls = os.listdir(path)
- for i in ls:
- c_path = os.path.join(path, i)
- if os.path.isdir(c_path):
- del_file(c_path)
- else:
- os.remove(c_path)
-
- try:
- del_file(FLAGS.output_dir)
- except Exception as e:
- print(e)
- print('pleace remove the files of output dir and data.conf')
- exit(-1)
-
-
- tf.gfile.MakeDirs(FLAGS.output_dir)
-
- task_name = FLAGS.task_name.lower()
-
- if task_name not in processors:
- raise ValueError("Task not found: %s" % (task_name))
-
- processor = processors[task_name]()
-
- label_list = processor.get_labels()
- label_map = labeltoid(label_list)
-
- tokenizer = tokenization.FullTokenizer(
- vocab_file=FLAGS.vocab_file, do_lower_case=FLAGS.do_lower_case)
-
- tpu_cluster_resolver = None
- if FLAGS.use_tpu and FLAGS.tpu_name:
- tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver(
- FLAGS.tpu_name, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project)
-
- is_per_host = tf.contrib.tpu.InputPipelineConfig.PER_HOST_V2
- run_config = tf.contrib.tpu.RunConfig(
- cluster=tpu_cluster_resolver,
- master=FLAGS.master,
- model_dir=None,
- save_checkpoints_steps=FLAGS.save_checkpoints_steps,
- tpu_config=tf.contrib.tpu.TPUConfig(
- iterations_per_loop=FLAGS.iterations_per_loop,
- num_shards=FLAGS.num_tpu_cores,
- per_host_input_for_training=is_per_host))
-
- train_examples = None
- num_train_steps = None
- num_warmup_steps = None
- if FLAGS.do_train:
- train_examples = processor.get_train_examples(FLAGS.data_dir)
- num_train_steps = int(
- len(train_examples) / FLAGS.train_batch_size * FLAGS.num_train_epochs)
- num_warmup_steps = int(num_train_steps * FLAGS.warmup_proportion)
-
- model_fn = model_fn_builder(
- bert_config=bert_config,
- num_labels=len(label_list),
- init_checkpoint=FLAGS.init_checkpoint,
- learning_rate=FLAGS.learning_rate,
- num_train_steps=num_train_steps,
- num_warmup_steps=num_warmup_steps,
- use_tpu=FLAGS.use_tpu,
- use_one_hot_embeddings=FLAGS.use_tpu)
-
- # If TPU is not available, this will fall back to normal Estimator on CPU
- # or GPU.
- estimator = tf.contrib.tpu.TPUEstimator(
- use_tpu=FLAGS.use_tpu,
- model_fn=model_fn,
- config=run_config,
- model_dir=FLAGS.output_dir,
- train_batch_size=FLAGS.train_batch_size,
- eval_batch_size=FLAGS.eval_batch_size,
- predict_batch_size=FLAGS.predict_batch_size)
-
- if FLAGS.do_train:
- train_file = os.path.join(FLAGS.output_dir, "train.tf_record")
- file_based_convert_examples_to_features(
- train_examples, label_map, FLAGS.max_seq_length, tokenizer, train_file)
- tf.logging.info("***** Running training *****")
- tf.logging.info(" Num examples = %d", len(train_examples))
- tf.logging.info(" Batch size = %d", FLAGS.train_batch_size)
- tf.logging.info(" Num steps = %d", num_train_steps)
- train_input_fn = file_based_input_fn_builder(
- input_file=train_file,
- seq_length=FLAGS.max_seq_length,
- is_training=True,
- drop_remainder=True)
-
- if FLAGS.do_eval:
- eval_examples = processor.get_dev_examples(FLAGS.data_dir)
- num_actual_eval_examples = len(eval_examples)
- eval_file = os.path.join(FLAGS.output_dir, "eval.tf_record")
- file_based_convert_examples_to_features(
- eval_examples, label_map, FLAGS.max_seq_length, tokenizer, eval_file)
- tf.logging.info("***** Running evaluation *****")
- tf.logging.info(" Num examples = %d (%d actual, %d padding)",
- len(eval_examples), num_actual_eval_examples,
- len(eval_examples) - num_actual_eval_examples)
- tf.logging.info(" Batch size = %d", FLAGS.eval_batch_size)
- eval_input_fn = file_based_input_fn_builder(
- input_file=eval_file,
- seq_length=FLAGS.max_seq_length,
- is_training=False,
- drop_remainder=False)
-
- ## Get id2label
- with codecs.open(os.path.join(FLAGS.output_dir, 'label2id.pkl'), 'rb') as rf:
- label2id = pickle.load(rf)
- id2label = {value: key for key, value in label2id.items()}
-
- best_result = 0
- all_results = []
- if FLAGS.do_train:
- for i in range(int(FLAGS.num_train_epochs)):
- print('**'*40)
- print('Train {} epoch'.format(i+1))
- estimator.train(input_fn=train_input_fn)
- ## Do Dev
- result = estimator.predict(input_fn=eval_input_fn)
- result = list(result)
- pred = []
- probs = []
- for item in result:
- ## softmax decode
- pred.append(np.argmax(list(item['probabilities'])))
-
- ## sigmoid decode
- # if(list(item['probabilities'])[0]>0.5):
- # pred.append(1)
- # else:
- # pred.append(0)
- probs.append(list(item['probabilities']))
-
- pred = np.array(pred)
- output_dev_file = os.path.join(FLAGS.output_dir, "label_dev.txt")
- tmp_result = report_metric(eval_examples, pred, output_dev_file, id2label, probs)
- print('Tmp result (macro F1) : ',tmp_result)
- all_results.append(tmp_result)
-
- if(tmp_result>best_result):
- print('**'*40)
- print('Found better model, saved!')
- best_result = tmp_result
- cur_ckpt_path = estimator.latest_checkpoint()
- best_model_path = '/'.join(cur_ckpt_path.split('/')[:-1]+['model.ckpt-best'])
- save_best_model(cur_ckpt_path,best_model_path)
- print('**'*40)
- print('Training completed!')
- print('all_results: ',all_results)
- print('Best result: ',np.max(all_results))
- print('Avg result: ',np.mean(all_results))
- # import sys
- # sys.exit(0)
-
- if FLAGS.do_predict:
- print('***********************Running Prediction************************')
- print('Use model which perform best on dev data')
- cur_ckpt_path = estimator.latest_checkpoint()
- best_model_path = '/'.join(cur_ckpt_path.split('/')[:-1]+['model.ckpt-best'])
- estimator = tf.contrib.tpu.TPUEstimator(
- use_tpu=FLAGS.use_tpu,
- model_fn=model_fn,
- config=run_config,
- model_dir=None,
- train_batch_size=FLAGS.train_batch_size,
- eval_batch_size=FLAGS.eval_batch_size,
- predict_batch_size=FLAGS.predict_batch_size,
- warm_start_from=best_model_path)
- predict_examples = processor.get_test_examples(FLAGS.data_dir)
- num_actual_predict_examples = len(predict_examples)
- predict_file = os.path.join(FLAGS.output_dir, "predict.tf_record")
- file_based_convert_examples_to_features(predict_examples, label_map,
- FLAGS.max_seq_length, tokenizer,
- predict_file)
- tf.logging.info("***** Running prediction*****")
- tf.logging.info(" Num examples = %d (%d actual, %d padding)",
- len(predict_examples), num_actual_predict_examples,
- len(predict_examples) - num_actual_predict_examples)
- tf.logging.info(" Batch size = %d", FLAGS.predict_batch_size)
- predict_input_fn = file_based_input_fn_builder(
- input_file=predict_file,
- seq_length=FLAGS.max_seq_length,
- is_training=False,
- drop_remainder=False)
-
- result = estimator.predict(input_fn=predict_input_fn)
- result = list(result)
- pred=[]
- probs = []
- for item in result:
- ## softmax decode
- pred.append(np.argmax(list(item['probabilities'])))
-
- # ## sigmoid decode
- # if(list(item['probabilities'])[0]>0.5):
- # pred.append(1)
- # else:
- # pred.append(0)
- probs.append(list(item['probabilities']))
- pred = np.array(pred)
- probs = np.array(probs)
- print(pred.shape)
- print(probs.shape)
- pred = [id2label[i] for i in pred]
- np.save(os.path.join(FLAGS.output_dir,'pred_best.npy'),pred)
- np.save(os.path.join(FLAGS.output_dir,'probs_best.npy'),probs)
-
- print('Use model which restore from last ckpt')
- estimator = tf.contrib.tpu.TPUEstimator(
- use_tpu=FLAGS.use_tpu,
- model_fn=model_fn,
- config=run_config,
- model_dir=None,
- train_batch_size=FLAGS.train_batch_size,
- eval_batch_size=FLAGS.eval_batch_size,
- predict_batch_size=FLAGS.predict_batch_size,
- warm_start_from=cur_ckpt_path)
- predict_examples = processor.get_test_examples(FLAGS.data_dir)
- num_actual_predict_examples = len(predict_examples)
- predict_file = os.path.join(FLAGS.output_dir, "predict.tf_record")
- file_based_convert_examples_to_features(predict_examples, label_map,
- FLAGS.max_seq_length, tokenizer,
- predict_file)
- tf.logging.info("***** Running prediction*****")
- tf.logging.info(" Num examples = %d (%d actual, %d padding)",
- len(predict_examples), num_actual_predict_examples,
- len(predict_examples) - num_actual_predict_examples)
- tf.logging.info(" Batch size = %d", FLAGS.predict_batch_size)
- predict_input_fn = file_based_input_fn_builder(
- input_file=predict_file,
- seq_length=FLAGS.max_seq_length,
- is_training=False,
- drop_remainder=False)
-
- result = estimator.predict(input_fn=predict_input_fn)
- result = list(result)
- pred=[]
- probs = []
- for item in result:
- ## softmax decode
- pred.append(np.argmax(list(item['probabilities'])))
-
- # ## sigmoid decode
- # if(list(item['probabilities'])[0]>0.5):
- # pred.append(1)
- # else:
- # pred.append(0)
- probs.append(list(item['probabilities']))
- pred = np.array(pred)
- probs = np.array(probs)
- print(pred.shape)
- print(probs.shape)
- pred = [id2label[i] for i in pred]
- np.save(os.path.join(FLAGS.output_dir,'pred_last.npy'),pred)
- np.save(os.path.join(FLAGS.output_dir,'probs_last.npy'),probs)
-
-
- if __name__ == "__main__":
- flags.mark_flag_as_required("data_dir")
- flags.mark_flag_as_required("task_name")
- flags.mark_flag_as_required("vocab_file")
- flags.mark_flag_as_required("bert_config_file")
- flags.mark_flag_as_required("output_dir")
- tf.app.run()
|