diff --git a/.flake8 b/.flake8 new file mode 100644 index 0000000..97603b7 --- /dev/null +++ b/.flake8 @@ -0,0 +1,15 @@ +[flake8] +# Intial set of rules +# Feel Free to add any new rule here with description of what it does. + +# E203 Whitespace before ':' +# E266 Too many leading '#' for block comment +# E501 Line too long (82 > 79 characters) +# W503 Line break occurred before a binary operator +# F405 '' may be undefined, or defined from star imports +# E402 module level import not at top of file +# E731 do not assign a lambda expression, use a def +# F821 undefined name 'get_ipython' --> from generated python files using nbconvert + +ignore = E203, E266, E501, W503, F405, E402, E731, F821 +max-line-length = 79 \ No newline at end of file diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 0000000..5cb831e --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,10 @@ +repos: +- repo: https://github.com/ambv/black + rev: stable + hooks: + - id: black + language_version: python3.6 +- repo: https://github.com/pre-commit/pre-commit-hooks + rev: v1.2.3 + hooks: + - id: flake8 \ No newline at end of file diff --git a/Contributing.md b/Contributing.md new file mode 100644 index 0000000..2a5d2a2 --- /dev/null +++ b/Contributing.md @@ -0,0 +1,32 @@ +# Contribution Guidelines + +## Steps to Contributing + +Here are the basic steps to get started with your first contribution. Please reach out with any questions. +1. [Fork the repo](https://help.github.com/articles/fork-a-repo/) so you can make and test local changes. +1. Create a new branch for the issue. We suggest prefixing the branch with your username and then the master branch name: (e.g. username/bertonazureml) +1. Make code changes. +1. We use [pre-commit](https://pre-commit.com/) package to run our pre-commit hooks. We use black formatter and flake8 linting on each commit. In order to set up pre-commit on your machine, follow the steps here, please note that you only need to run these steps the first time you use pre-commit for this project. + + * Update your conda environment, pre-commit is part of the yaml file or just do + ``` + $ pip install pre-commit + ``` + * Set up pre-commit by running following command, this will put pre-commit under your .git/hooks directory. + ``` + $ pre-commit install + ``` + ``` + $ git commit -m "message" + ``` + * Each time you commit, git will run the pre-commit hooks (black and flake8 for now) on any python files that are getting committed and are part of the git index. If black modifies/formats the file, or if flake8 finds any linting errors, the commit will not succeed. You will need to stage the file again if black changed the file, or fix the issues identified by flake8 and and stage it again. + + * To run pre-commit on all files just run + ``` + $ pre-commit run --all-files + ``` +1. Create a pull request against bertonazureml branch. + +Note: We use the bertonazureml branch to land all new features, so please remember to create the Pull Request against staging. + +Once the features included in a milestone are complete we will merge staging into master and make a release. \ No newline at end of file diff --git a/finetune/PyTorch/azureml_bert_util.py b/finetune/PyTorch/azureml_bert_util.py index dc60d8e..b7d261c 100644 --- a/finetune/PyTorch/azureml_bert_util.py +++ b/finetune/PyTorch/azureml_bert_util.py @@ -1,3 +1,4 @@ +# flake8: noqa from horovod.torch.mpi_ops import allreduce, allreduce_async_, synchronize from horovod.torch.compression import Compression import horovod.torch as hvd @@ -5,14 +6,17 @@ import time from collections import OrderedDict -try: + +try: from apex_C import flatten from apex_C import unflatten except ImportError: try: _ = warned_flatten except NameError: - print("Warning: apex was installed without --cpp_ext. Falling back to Python flatten and unflatten.") + print( + "Warning: apex was installed without --cpp_ext. Falling back to Python flatten and unflatten." + ) warned_flatten = True from torch._utils import _flatten_dense_tensors as flatten from torch._utils import _unflatten_dense_tensors as unflatten @@ -20,12 +24,14 @@ def warmup_linear(x, warmup=0.002): if x < warmup: - return x/warmup + return x / warmup return 1.0 - x def adjust_gradient_accumulation_steps(x, initial_steps, target_steps, warmup): - return min(max(int(x/warmup*target_steps), initial_steps), target_steps) + return min( + max(int(x / warmup * target_steps), initial_steps), target_steps + ) class DistributedCommunicator: @@ -38,12 +44,15 @@ def __init__(self, accumulation_step=1): self.node_count = self.world_size // self.n_gpu self.accumulation_step = accumulation_step self.count_down = accumulation_step - 1 - self._multi_node = self.node_count > 1 + self._multi_node = self.node_count > 1 if not self._multi_node: # use PyTorch build-in NCCL backend for single node training - torch.distributed.init_process_group(backend='nccl', init_method='tcp://127.0.0.1:6000', - world_size=self.n_gpu, rank=self.local_rank) - + torch.distributed.init_process_group( + backend="nccl", + init_method="tcp://127.0.0.1:6000", + world_size=self.n_gpu, + rank=self.local_rank, + ) def register_model(self, model, fp16): # broadcast model parameters @@ -54,12 +63,16 @@ def register_model(self, model, fp16): torch.distributed.broadcast_multigpu([param], 0) # register hook for reduce when backpropagate - self._parameter_names = {v: k for k, v in sorted(model.named_parameters())} + self._parameter_names = { + v: k for k, v in sorted(model.named_parameters()) + } self._handles = {} self._requires_update = set() self._grad_accs = [] self._grad = [] - self._compression = hvd.Compression.fp16 if fp16 else hvd.Compression.none + self._compression = ( + hvd.Compression.fp16 if fp16 else hvd.Compression.none + ) for p in model.parameters(): if p.requires_grad: p.grad = p.data.new(p.size()).zero_() @@ -69,26 +82,26 @@ def register_model(self, model, fp16): grad_acc.register_hook(self._make_hook(p)) self._grad_accs.append(grad_acc) - def _allreduce_tensor(self, p): assert p not in self._handles assert not p.grad.requires_grad tensor = p.grad name = self._parameter_names.get(p) - if self._multi_node: + if self._multi_node: tensor_compressed, ctx = self._compression.compress(tensor) - handle = allreduce_async_(tensor_compressed, average=True, name=name) + handle = allreduce_async_( + tensor_compressed, average=True, name=name + ) self._handles[p] = (handle, ctx) else: self._handles[p] = tensor - def _make_hook(self, p): def hook(*ignore): if self.count_down == 0: self._allreduce_tensor(p) - return hook + return hook def synchronize(self): synced = False @@ -101,7 +114,10 @@ def synchronize(self): for p, value in self._handles.items(): handle, ctx = value output = synchronize(handle) - p.grad.set_(self._compression.decompress(output, ctx) / self.accumulation_step) + p.grad.set_( + self._compression.decompress(output, ctx) + / self.accumulation_step + ) else: buckets = OrderedDict() for tensor in self._handles.values(): @@ -111,9 +127,15 @@ def synchronize(self): buckets[tp].append(tensor) for tp in buckets: bucket = buckets[tp] - coalesced = flatten(bucket) / self.world_size / self.accumulation_step + coalesced = ( + flatten(bucket) + / self.world_size + / self.accumulation_step + ) torch.distributed.all_reduce_multigpu([coalesced]) - for buf, synced in zip(bucket, unflatten(coalesced, bucket)): + for buf, synced in zip( + bucket, unflatten(coalesced, bucket) + ): buf.copy_(synced) self._handles.clear() synced = True @@ -124,4 +146,4 @@ def synchronize(self): def set_accumulation_step(self, accumulation_step): self.accumulation_step = accumulation_step - self.count_down = self.accumulation_step - 1 \ No newline at end of file + self.count_down = self.accumulation_step - 1 diff --git a/finetune/PyTorch/run_classifier_azureml.py b/finetune/PyTorch/run_classifier_azureml.py index 8791e4e..d01910e 100644 --- a/finetune/PyTorch/run_classifier_azureml.py +++ b/finetune/PyTorch/run_classifier_azureml.py @@ -1,3 +1,4 @@ +# flake8: noqa # coding=utf-8 # Copyright 2018 The Google AI Language Team Authors and The HugginFace Inc. team. # @@ -27,7 +28,12 @@ import numpy as np import torch -from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler +from torch.utils.data import ( + TensorDataset, + DataLoader, + RandomSampler, + SequentialSampler, +) from torch.utils.data.distributed import DistributedSampler import torch.multiprocessing as mp @@ -38,9 +44,11 @@ from azureml_bert_util import * from azureml.core.run import Run -logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s', - datefmt = '%m/%d/%Y %H:%M:%S', - level = logging.INFO) +logging.basicConfig( + format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", + datefmt="%m/%d/%Y %H:%M:%S", + level=logging.INFO, +) logger = logging.getLogger(__name__) @@ -106,14 +114,18 @@ class MrpcProcessor(DataProcessor): def get_train_examples(self, data_dir): """See base class.""" - logger.info("LOOKING AT {}".format(os.path.join(data_dir, "train.tsv"))) + logger.info( + "LOOKING AT {}".format(os.path.join(data_dir, "train.tsv")) + ) return self._create_examples( - self._read_tsv(os.path.join(data_dir, "train.tsv")), "train") + self._read_tsv(os.path.join(data_dir, "train.tsv")), "train" + ) def get_dev_examples(self, data_dir): """See base class.""" return self._create_examples( - self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev") + self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev" + ) def get_labels(self): """See base class.""" @@ -130,7 +142,10 @@ def _create_examples(self, lines, set_type): text_b = line[4] label = line[0] examples.append( - InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) + InputExample( + guid=guid, text_a=text_a, text_b=text_b, label=label + ) + ) return examples @@ -140,13 +155,15 @@ class MnliProcessor(DataProcessor): def get_train_examples(self, data_dir): """See base class.""" return self._create_examples( - self._read_tsv(os.path.join(data_dir, "train.tsv")), "train") + self._read_tsv(os.path.join(data_dir, "train.tsv")), "train" + ) def get_dev_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "dev_matched.tsv")), - "dev_matched") + "dev_matched", + ) def get_labels(self): """See base class.""" @@ -163,7 +180,10 @@ def _create_examples(self, lines, set_type): text_b = line[9] label = line[-1] examples.append( - InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) + InputExample( + guid=guid, text_a=text_a, text_b=text_b, label=label + ) + ) return examples @@ -173,12 +193,14 @@ class ColaProcessor(DataProcessor): def get_train_examples(self, data_dir): """See base class.""" return self._create_examples( - self._read_tsv(os.path.join(data_dir, "train.tsv")), "train") + self._read_tsv(os.path.join(data_dir, "train.tsv")), "train" + ) def get_dev_examples(self, data_dir): """See base class.""" return self._create_examples( - self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev") + self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev" + ) def get_labels(self): """See base class.""" @@ -192,11 +214,16 @@ def _create_examples(self, lines, set_type): text_a = line[3] label = line[1] examples.append( - InputExample(guid=guid, text_a=text_a, text_b=None, label=label)) + InputExample( + guid=guid, text_a=text_a, text_b=None, label=label + ) + ) return examples -def convert_examples_to_features(examples, label_list, max_seq_length, tokenizer): +def convert_examples_to_features( + examples, label_list, max_seq_length, tokenizer +): """Loads a data file into a list of `InputBatch`s.""" label_map = {} @@ -219,7 +246,7 @@ def convert_examples_to_features(examples, label_list, max_seq_length, tokenizer else: # Account for [CLS] and [SEP] with "- 2" if len(tokens_a) > max_seq_length - 2: - tokens_a = tokens_a[0:(max_seq_length - 2)] + tokens_a = tokens_a[0 : (max_seq_length - 2)] # The convention in BERT is: # (a) For sequence pairs: @@ -276,19 +303,26 @@ def convert_examples_to_features(examples, label_list, max_seq_length, tokenizer if ex_index < 5: logger.info("*** Example ***") logger.info("guid: %s" % (example.guid)) - logger.info("tokens: %s" % " ".join( - [str(x) for x in tokens])) - logger.info("input_ids: %s" % " ".join([str(x) for x in input_ids])) - logger.info("input_mask: %s" % " ".join([str(x) for x in input_mask])) + logger.info("tokens: %s" % " ".join([str(x) for x in tokens])) logger.info( - "segment_ids: %s" % " ".join([str(x) for x in segment_ids])) + "input_ids: %s" % " ".join([str(x) for x in input_ids]) + ) + logger.info( + "input_mask: %s" % " ".join([str(x) for x in input_mask]) + ) + logger.info( + "segment_ids: %s" % " ".join([str(x) for x in segment_ids]) + ) logger.info("label: %s (id = %d)" % (example.label, label_id)) features.append( - InputFeatures(input_ids=input_ids, - input_mask=input_mask, - segment_ids=segment_ids, - label_id=label_id)) + InputFeatures( + input_ids=input_ids, + input_mask=input_mask, + segment_ids=segment_ids, + label_id=label_id, + ) + ) return features @@ -308,34 +342,49 @@ def _truncate_seq_pair(tokens_a, tokens_b, max_length): else: tokens_b.pop() + def accuracy(out, labels): outputs = np.argmax(out, axis=1) return np.sum(outputs == labels) + def copy_optimizer_params_to_model(named_params_model, named_params_optimizer): """ Utility function for optimize_on_cpu and 16-bits training. Copy the parameters optimized on CPU/RAM back to the model on GPU """ - for (name_opti, param_opti), (name_model, param_model) in zip(named_params_optimizer, named_params_model): + for (name_opti, param_opti), (name_model, param_model) in zip( + named_params_optimizer, named_params_model + ): if name_opti != name_model: - logger.error("name_opti != name_model: {} {}".format(name_opti, name_model)) + logger.error( + "name_opti != name_model: {} {}".format(name_opti, name_model) + ) raise ValueError param_model.data.copy_(param_opti.data) -def set_optimizer_params_grad(named_params_optimizer, named_params_model, test_nan=False): + +def set_optimizer_params_grad( + named_params_optimizer, named_params_model, test_nan=False +): """ Utility function for optimize_on_cpu and 16-bits training. Copy the gradient of the GPU parameters to the CPU/RAMM copy of the model """ is_nan = False - for (name_opti, param_opti), (name_model, param_model) in zip(named_params_optimizer, named_params_model): + for (name_opti, param_opti), (name_model, param_model) in zip( + named_params_optimizer, named_params_model + ): if name_opti != name_model: - logger.error("name_opti != name_model: {} {}".format(name_opti, name_model)) + logger.error( + "name_opti != name_model: {} {}".format(name_opti, name_model) + ) raise ValueError if param_model.grad is not None: if test_nan and torch.isnan(param_model.grad).sum() > 0: is_nan = True if param_opti.grad is None: - param_opti.grad = torch.nn.Parameter(param_opti.data.new().resize_(*param_opti.data.size())) + param_opti.grad = torch.nn.Parameter( + param_opti.data.new().resize_(*param_opti.data.size()) + ) param_opti.grad.data.copy_(param_model.grad.data) else: param_opti.grad = None @@ -346,173 +395,262 @@ def main(): parser = argparse.ArgumentParser() ## Required parameters - parser.add_argument("--data_dir", - default=None, - type=str, - required=True, - help="The input data dir. Should contain the .tsv files (or other data files) for the task.") - parser.add_argument("--bert_model", default=None, type=str, required=True, - help="Bert pre-trained model selected in the list: bert-base-uncased, " - "bert-large-uncased, bert-base-cased, bert-base-multilingual, bert-base-chinese.") - parser.add_argument("--task_name", - default=None, - type=str, - required=True, - help="The name of the task to train.") - parser.add_argument("--output_dir", default=None, type=str, required=True, - help="The output directory where the model checkpoints will be written.") - + parser.add_argument( + "--data_dir", + default=None, + type=str, + required=True, + help="The input data dir. Should contain the .tsv files (or other data files) for the task.", + ) + parser.add_argument( + "--bert_model", + default=None, + type=str, + required=True, + help="Bert pre-trained model selected in the list: bert-base-uncased, " + "bert-large-uncased, bert-base-cased, bert-base-multilingual, bert-base-chinese.", + ) + parser.add_argument( + "--task_name", + default=None, + type=str, + required=True, + help="The name of the task to train.", + ) + parser.add_argument( + "--output_dir", + default=None, + type=str, + required=True, + help="The output directory where the model checkpoints will be written.", + ) + ## Other parameters - parser.add_argument("--max_seq_length", - default=128, - type=int, - help="The maximum total input sequence length after WordPiece tokenization. \n" - "Sequences longer than this will be truncated, and sequences shorter \n" - "than this will be padded.") - parser.add_argument("--do_train", - default=False, - action='store_true', - help="Whether to run training.") - parser.add_argument("--do_eval", - default=False, - action='store_true', - help="Whether to run eval on the dev set.") - parser.add_argument("--do_lower_case", - default=False, - action='store_true', - help="Set this flag if you are using an uncased model.") - parser.add_argument("--train_batch_size", - default=32, - type=int, - help="Total batch size for training.") - parser.add_argument("--eval_batch_size", - default=8, - type=int, - help="Total batch size for eval.") - parser.add_argument("--learning_rate", - default=5e-5, - type=float, - help="The initial learning rate for Adam.") - parser.add_argument("--num_train_epochs", - default=3.0, - type=float, - help="Total number of training epochs to perform.") - parser.add_argument("--warmup_proportion", - default=0.1, - type=float, - help="Proportion of training to perform linear learning rate warmup for. " - "E.g., 0.1 = 10%% of training.") - parser.add_argument("--no_cuda", - default=False, - action='store_true', - help="Whether not to use CUDA when available") - parser.add_argument("--local_rank", - type=int, - default=-1, - help="local_rank for distributed training on gpus") - parser.add_argument('--seed', - type=int, - default=42, - help="random seed for initialization") - parser.add_argument('--gradient_accumulation_steps', - type=int, - default=1, - help="Number of updates steps to accumulate before performing a backward/update pass.") - parser.add_argument('--optimize_on_cpu', - default=False, - action='store_true', - help="Whether to perform optimization and keep the optimizer averages on CPU") - parser.add_argument('--fp16', - default=False, - action='store_true', - help="Whether to use 16-bit float precision instead of 32-bit") - parser.add_argument('--loss_scale', - type=float, default=128, - help='Loss scaling, positive power of 2 values can improve fp16 convergence.') - parser.add_argument('--step_per_log', - type=int, default=5, - help='Number of updates steps to log metrics.') - parser.add_argument("--process_count_per_node", default=1, type=int, - help="Total number of process count to launch per node.") + parser.add_argument( + "--max_seq_length", + default=128, + type=int, + help="The maximum total input sequence length after WordPiece tokenization. \n" + "Sequences longer than this will be truncated, and sequences shorter \n" + "than this will be padded.", + ) + parser.add_argument( + "--do_train", + default=False, + action="store_true", + help="Whether to run training.", + ) + parser.add_argument( + "--do_eval", + default=False, + action="store_true", + help="Whether to run eval on the dev set.", + ) + parser.add_argument( + "--do_lower_case", + default=False, + action="store_true", + help="Set this flag if you are using an uncased model.", + ) + parser.add_argument( + "--train_batch_size", + default=32, + type=int, + help="Total batch size for training.", + ) + parser.add_argument( + "--eval_batch_size", + default=8, + type=int, + help="Total batch size for eval.", + ) + parser.add_argument( + "--learning_rate", + default=5e-5, + type=float, + help="The initial learning rate for Adam.", + ) + parser.add_argument( + "--num_train_epochs", + default=3.0, + type=float, + help="Total number of training epochs to perform.", + ) + parser.add_argument( + "--warmup_proportion", + default=0.1, + type=float, + help="Proportion of training to perform linear learning rate warmup for. " + "E.g., 0.1 = 10%% of training.", + ) + parser.add_argument( + "--no_cuda", + default=False, + action="store_true", + help="Whether not to use CUDA when available", + ) + parser.add_argument( + "--local_rank", + type=int, + default=-1, + help="local_rank for distributed training on gpus", + ) + parser.add_argument( + "--seed", type=int, default=42, help="random seed for initialization" + ) + parser.add_argument( + "--gradient_accumulation_steps", + type=int, + default=1, + help="Number of updates steps to accumulate before performing a backward/update pass.", + ) + parser.add_argument( + "--optimize_on_cpu", + default=False, + action="store_true", + help="Whether to perform optimization and keep the optimizer averages on CPU", + ) + parser.add_argument( + "--fp16", + default=False, + action="store_true", + help="Whether to use 16-bit float precision instead of 32-bit", + ) + parser.add_argument( + "--loss_scale", + type=float, + default=128, + help="Loss scaling, positive power of 2 values can improve fp16 convergence.", + ) + parser.add_argument( + "--step_per_log", + type=int, + default=5, + help="Number of updates steps to log metrics.", + ) + parser.add_argument( + "--process_count_per_node", + default=1, + type=int, + help="Total number of process count to launch per node.", + ) args = parser.parse_args() run = Run.get_context() - + processors = { "cola": ColaProcessor, "mnli": MnliProcessor, "mrpc": MrpcProcessor, } - comm = DistributedCommunicator(accumulation_step=args.gradient_accumulation_steps) + comm = DistributedCommunicator( + accumulation_step=args.gradient_accumulation_steps + ) rank = comm.rank local_rank = comm.local_rank world_size = comm.world_size is_master = rank == 0 - logger.info("world size: {}, local rank: {}, global rank: {}, fp16: {}".format(world_size, local_rank, rank, args.fp16)) + logger.info( + "world size: {}, local rank: {}, global rank: {}, fp16: {}".format( + world_size, local_rank, rank, args.fp16 + ) + ) torch.cuda.set_device(local_rank) device = torch.device("cuda", local_rank) if os.path.exists(args.output_dir) and os.listdir(args.output_dir): - raise ValueError("Output directory () already exists and is not empty.") + raise ValueError( + "Output directory () already exists and is not empty." + ) os.makedirs(args.output_dir, exist_ok=True) output_model_file = os.path.join(args.output_dir, "pytorch_model.bin") if args.gradient_accumulation_steps < 1: - raise ValueError("Invalid gradient_accumulation_steps parameter: {}, should be >= 1".format( - args.gradient_accumulation_steps)) + raise ValueError( + "Invalid gradient_accumulation_steps parameter: {}, should be >= 1".format( + args.gradient_accumulation_steps + ) + ) if local_rank == -1: - args.train_batch_size = int(args.train_batch_size / args.gradient_accumulation_steps) + args.train_batch_size = int( + args.train_batch_size / args.gradient_accumulation_steps + ) random.seed(args.seed) np.random.seed(args.seed) torch.manual_seed(args.seed) if not args.do_train and not args.do_eval: - raise ValueError("At least one of `do_train` or `do_eval` must be True.") + raise ValueError( + "At least one of `do_train` or `do_eval` must be True." + ) task_name = args.task_name.lower() - is_master = (local_rank == -1 or rank == 0) + is_master = local_rank == -1 or rank == 0 if task_name not in processors: raise ValueError("Task not found: %s" % (task_name)) processor = processors[task_name]() label_list = processor.get_labels() - tokenizer = BertTokenizer.from_pretrained(args.bert_model, do_lower_case=args.do_lower_case) + tokenizer = BertTokenizer.from_pretrained( + args.bert_model, do_lower_case=args.do_lower_case + ) train_examples = None num_train_steps = None if args.do_train: train_examples = processor.get_train_examples(args.data_dir) num_train_steps = int( - len(train_examples) / args.train_batch_size / args.gradient_accumulation_steps * args.num_train_epochs) + len(train_examples) + / args.train_batch_size + / args.gradient_accumulation_steps + * args.num_train_epochs + ) # Prepare model - model = BertForSequenceClassification.from_pretrained(args.bert_model, - cache_dir=PYTORCH_PRETRAINED_BERT_CACHE / 'distributed_{}'.format(local_rank)) + model = BertForSequenceClassification.from_pretrained( + args.bert_model, + cache_dir=PYTORCH_PRETRAINED_BERT_CACHE + / "distributed_{}".format(local_rank), + ) if args.fp16: model.half() model.to(device) comm.register_model(model, args.fp16) if args.do_train: - + param_optimizer = list(model.named_parameters()) # hack to remove pooler, which is not used # thus it produce None grad that break apex - param_optimizer = [n for n in param_optimizer if 'pooler' not in n[0]] + param_optimizer = [n for n in param_optimizer if "pooler" not in n[0]] - no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight'] + no_decay = ["bias", "LayerNorm.bias", "LayerNorm.weight"] optimizer_grouped_parameters = [ - {'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01}, - {'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0} - ] + { + "params": [ + p + for n, p in param_optimizer + if not any(nd in n for nd in no_decay) + ], + "weight_decay": 0.01, + }, + { + "params": [ + p + for n, p in param_optimizer + if any(nd in n for nd in no_decay) + ], + "weight_decay": 0.0, + }, + ] t_total = num_train_steps // world_size if args.fp16: @@ -520,45 +658,68 @@ def main(): from apex.optimizers import FP16_Optimizer from apex.optimizers import FusedAdam except ImportError: - raise ImportError("Please install apex from https://www.github.com/nvidia/apex to run this.") - - optimizer = FusedAdam(optimizer_grouped_parameters, - lr=args.learning_rate, - bias_correction=False, - max_grad_norm=1.0) + raise ImportError( + "Please install apex from https://www.github.com/nvidia/apex to run this." + ) + + optimizer = FusedAdam( + optimizer_grouped_parameters, + lr=args.learning_rate, + bias_correction=False, + max_grad_norm=1.0, + ) if args.loss_scale == 0: optimizer = FP16_Optimizer(optimizer, dynamic_loss_scale=True) else: - optimizer = FP16_Optimizer(optimizer, static_loss_scale=args.loss_scale) + optimizer = FP16_Optimizer( + optimizer, static_loss_scale=args.loss_scale + ) else: - optimizer = BertAdam(optimizer_grouped_parameters, - lr=args.learning_rate, - warmup=args.warmup_proportion, - t_total=t_total) + optimizer = BertAdam( + optimizer_grouped_parameters, + lr=args.learning_rate, + warmup=args.warmup_proportion, + t_total=t_total, + ) if is_master: - run.log('lr', np.float(args.learning_rate)) - + run.log("lr", np.float(args.learning_rate)) + train_features = convert_examples_to_features( - train_examples, label_list, args.max_seq_length, tokenizer) + train_examples, label_list, args.max_seq_length, tokenizer + ) logger.info("***** Running training *****") logger.info(" Num examples = %d", len(train_examples)) logger.info(" Batch size = %d", args.train_batch_size) logger.info(" Num steps = %d", num_train_steps) - all_input_ids = torch.tensor([f.input_ids for f in train_features], dtype=torch.long) - all_input_mask = torch.tensor([f.input_mask for f in train_features], dtype=torch.long) - all_segment_ids = torch.tensor([f.segment_ids for f in train_features], dtype=torch.long) - all_label_ids = torch.tensor([f.label_id for f in train_features], dtype=torch.long) - train_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids) + all_input_ids = torch.tensor( + [f.input_ids for f in train_features], dtype=torch.long + ) + all_input_mask = torch.tensor( + [f.input_mask for f in train_features], dtype=torch.long + ) + all_segment_ids = torch.tensor( + [f.segment_ids for f in train_features], dtype=torch.long + ) + all_label_ids = torch.tensor( + [f.label_id for f in train_features], dtype=torch.long + ) + train_data = TensorDataset( + all_input_ids, all_input_mask, all_segment_ids, all_label_ids + ) if local_rank != -1 and world_size > 1: train_sampler = DistributedSampler(train_data) else: train_sampler = RandomSampler(train_data) - train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=args.train_batch_size) + train_dataloader = DataLoader( + train_data, sampler=train_sampler, batch_size=args.train_batch_size + ) global_step, tr_loss = 0, 0 model.train() for _ in trange(int(args.num_train_epochs), desc="Epoch"): - for _, batch in enumerate(tqdm(train_dataloader, desc="Iteration")): + for _, batch in enumerate( + tqdm(train_dataloader, desc="Iteration") + ): batch = tuple(t.to(device) for t in batch) input_ids, input_mask, segment_ids, label_ids = batch loss = model(input_ids, segment_ids, input_mask, label_ids) @@ -567,13 +728,17 @@ def main(): global_step += 1 tr_loss += loss.item() if comm.synchronize(): - lr_this_step = args.learning_rate * warmup_linear(global_step/t_total, args.warmup_proportion) + lr_this_step = args.learning_rate * warmup_linear( + global_step / t_total, args.warmup_proportion + ) for param_group in optimizer.param_groups: - param_group['lr'] = lr_this_step + param_group["lr"] = lr_this_step optimizer.step() model.zero_grad() if is_master and (global_step + 1) % args.step_per_log == 0: - run.log('train_loss', np.float(tr_loss / args.step_per_log)) + run.log( + "train_loss", np.float(tr_loss / args.step_per_log) + ) tr_loss = 0 if is_master: # Save a trained model @@ -582,18 +747,31 @@ def main(): if args.do_eval and is_master: eval_examples = processor.get_dev_examples(args.data_dir) eval_features = convert_examples_to_features( - eval_examples, label_list, args.max_seq_length, tokenizer) + eval_examples, label_list, args.max_seq_length, tokenizer + ) logger.info("***** Running evaluation *****") logger.info(" Num examples = %d", len(eval_examples)) logger.info(" Batch size = %d", args.eval_batch_size) - all_input_ids = torch.tensor([f.input_ids for f in eval_features], dtype=torch.long) - all_input_mask = torch.tensor([f.input_mask for f in eval_features], dtype=torch.long) - all_segment_ids = torch.tensor([f.segment_ids for f in eval_features], dtype=torch.long) - all_label_ids = torch.tensor([f.label_id for f in eval_features], dtype=torch.long) - eval_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids) + all_input_ids = torch.tensor( + [f.input_ids for f in eval_features], dtype=torch.long + ) + all_input_mask = torch.tensor( + [f.input_mask for f in eval_features], dtype=torch.long + ) + all_segment_ids = torch.tensor( + [f.segment_ids for f in eval_features], dtype=torch.long + ) + all_label_ids = torch.tensor( + [f.label_id for f in eval_features], dtype=torch.long + ) + eval_data = TensorDataset( + all_input_ids, all_input_mask, all_segment_ids, all_label_ids + ) # Run prediction for full data eval_sampler = SequentialSampler(eval_data) - eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=args.eval_batch_size) + eval_dataloader = DataLoader( + eval_data, sampler=eval_sampler, batch_size=args.eval_batch_size + ) model.eval() eval_loss, eval_accuracy = 0, 0 nb_eval_steps, nb_eval_examples = 0, 0 @@ -603,10 +781,12 @@ def main(): segment_ids = segment_ids.to(device) label_ids = label_ids.to(device) with torch.no_grad(): - tmp_eval_loss = model(input_ids, segment_ids, input_mask, label_ids) + tmp_eval_loss = model( + input_ids, segment_ids, input_mask, label_ids + ) logits = model(input_ids, segment_ids, input_mask) logits = logits.detach().cpu().numpy() - label_ids = label_ids.to('cpu').numpy() + label_ids = label_ids.to("cpu").numpy() tmp_eval_accuracy = accuracy(logits, label_ids) eval_loss += tmp_eval_loss.mean().item() eval_accuracy += tmp_eval_accuracy @@ -615,8 +795,7 @@ def main(): eval_loss = eval_loss / nb_eval_steps eval_accuracy = eval_accuracy / nb_eval_examples - result = {'eval_loss': eval_loss, - 'eval_accuracy': eval_accuracy} + result = {"eval_loss": eval_loss, "eval_accuracy": eval_accuracy} logger.info("***** Eval results *****") for key in sorted(result.keys()): logger.info(" %s = %s", key, str(result[key])) diff --git a/finetune/TensorFlow/download_model_and_dataset.py b/finetune/TensorFlow/download_model_and_dataset.py index 843c2a9..1bd6e56 100644 --- a/finetune/TensorFlow/download_model_and_dataset.py +++ b/finetune/TensorFlow/download_model_and_dataset.py @@ -1,3 +1,4 @@ +# flake8: noqa from __future__ import print_function import argparse import sys @@ -9,64 +10,88 @@ parser = argparse.ArgumentParser() ## Required parameters -parser.add_argument("--bert_model_name", - default = None, - type = str, - required = True, - help = "Name of pretrained BERT model. Possible values: " - "uncased_L-12_H-768_A-12,uncased_L-24_H-1024_A-16,cased_L-12_H-768_A-12," - "multilingual_L-12_H-768_A-12,chinese_L-12_H-768_A-12") +parser.add_argument( + "--bert_model_name", + default=None, + type=str, + required=True, + help="Name of pretrained BERT model. Possible values: " + "uncased_L-12_H-768_A-12,uncased_L-24_H-1024_A-16,cased_L-12_H-768_A-12," + "multilingual_L-12_H-768_A-12,chinese_L-12_H-768_A-12", +) -parser.add_argument("--model_dump_path", - default = None, - type = str, - required = True, - help = "Path to the output model.") +parser.add_argument( + "--model_dump_path", + default=None, + type=str, + required=True, + help="Path to the output model.", +) -parser.add_argument("--glue_data_path", - default = None, - type = str, - required = True, - help = "Path to store downloaded GLUE dataset") +parser.add_argument( + "--glue_data_path", + default=None, + type=str, + required=True, + help="Path to store downloaded GLUE dataset", +) args = parser.parse_args() bert_model_url_map = { - 'uncased_L-12_H-768_A-12': 'https://storage.googleapis.com/bert_models/2018_10_18/uncased_L-12_H-768_A-12.zip', - 'uncased_L-24_H-1024_A-16': 'https://storage.googleapis.com/bert_models/2018_10_18/uncased_L-24_H-1024_A-16.zip', - 'cased_L-12_H-768_A-12': 'https://storage.googleapis.com/bert_models/2018_10_18/cased_L-12_H-768_A-12.zip', - 'multilingual_L-12_H-768_A-12': 'https://storage.googleapis.com/bert_models/2018_11_03/multilingual_L-12_H-768_A-12.zip', - 'chinese_L-12_H-768_A-12': 'https://storage.googleapis.com/bert_models/2018_11_03/chinese_L-12_H-768_A-12.zip' + "uncased_L-12_H-768_A-12": "https://storage.googleapis.com/bert_models/2018_10_18/uncased_L-12_H-768_A-12.zip", + "uncased_L-24_H-1024_A-16": "https://storage.googleapis.com/bert_models/2018_10_18/uncased_L-24_H-1024_A-16.zip", + "cased_L-12_H-768_A-12": "https://storage.googleapis.com/bert_models/2018_10_18/cased_L-12_H-768_A-12.zip", + "multilingual_L-12_H-768_A-12": "https://storage.googleapis.com/bert_models/2018_11_03/multilingual_L-12_H-768_A-12.zip", + "chinese_L-12_H-768_A-12": "https://storage.googleapis.com/bert_models/2018_11_03/chinese_L-12_H-768_A-12.zip", } if args.bert_model_name not in bert_model_url_map: - sys.stderr.write('Unknown BERT model name ' + args.bert_model_name) + sys.stderr.write("Unknown BERT model name " + args.bert_model_name) sys.exit(1) pretrained_model_url = bert_model_url_map.get(args.bert_model_name) # make local directory for pretrained tensorflow BERT model -tensorflow_model_dir = './tensorflow_model' +tensorflow_model_dir = "./tensorflow_model" if not os.path.exists(tensorflow_model_dir): os.makedirs(tensorflow_model_dir) # download and extract pretrained tensorflow BERT model -download_file_name = 'tensorflow_model.zip' +download_file_name = "tensorflow_model.zip" urllib.request.urlretrieve(pretrained_model_url, filename=download_file_name) -print('Extracting pretrained model...') -with zipfile.ZipFile(download_file_name, 'r') as z: +print("Extracting pretrained model...") +with zipfile.ZipFile(download_file_name, "r") as z: z.extractall(tensorflow_model_dir) # make destination path if not os.path.exists(args.model_dump_path): os.makedirs(args.model_dump_path) -files = ['bert_model.ckpt.meta', 'bert_model.ckpt.index', 'bert_model.ckpt.data-00000-of-00001', 'bert_config.json', 'vocab.txt'] +files = [ + "bert_model.ckpt.meta", + "bert_model.ckpt.index", + "bert_model.ckpt.data-00000-of-00001", + "bert_config.json", + "vocab.txt", +] for file in files: - shutil.copy(os.path.join(tensorflow_model_dir, args.bert_model_name, file), os.path.join(args.model_dump_path, file)) + shutil.copy( + os.path.join(tensorflow_model_dir, args.bert_model_name, file), + os.path.join(args.model_dump_path, file), + ) -print('Start to download GLUE dataset...\n') +print("Start to download GLUE dataset...\n") urllib.request.urlretrieve( - 'https://gist.githubusercontent.com/W4ngatang/60c2bdb54d156a41194446737ce03e2e/raw/17b8dd0d724281ed7c3b2aeeda662b92809aadd5/download_glue_data.py', - filename='download_glue_data.py') -if os.system('python download_glue_data.py --data_dir {0} --tasks all'.format(args.glue_data_path)) != 0: sys.exit(1) \ No newline at end of file + "https://gist.githubusercontent.com/W4ngatang/60c2bdb54d156a41194446737ce03e2e/raw/17b8dd0d724281ed7c3b2aeeda662b92809aadd5/download_glue_data.py", + filename="download_glue_data.py", +) +if ( + os.system( + "python download_glue_data.py --data_dir {0} --tasks all".format( + args.glue_data_path + ) + ) + != 0 +): + sys.exit(1) diff --git a/finetune/TensorFlow/run_classifier.py b/finetune/TensorFlow/run_classifier.py index bbe825d..34178a3 100644 --- a/finetune/TensorFlow/run_classifier.py +++ b/finetune/TensorFlow/run_classifier.py @@ -1,3 +1,4 @@ +# flake8: noqa # coding=utf-8 # Copyright 2018 The Google AI Language Team Authors. # @@ -27,6 +28,7 @@ import tensorflow as tf import numpy as np from azureml.core.run import Run + # get the Azure ML run object run = Run.get_context() flags = tf.flags @@ -35,48 +37,65 @@ ## Required parameters flags.DEFINE_string( - "data_dir", None, + "data_dir", + None, "The input data dir. Should contain the .tsv files (or other data files) " - "for the task.") + "for the task.", +) flags.DEFINE_string( - "bert_config_file", None, + "bert_config_file", + None, "The config json file corresponding to the pre-trained BERT model. " - "This specifies the model architecture.") + "This specifies the model architecture.", +) flags.DEFINE_string("task_name", None, "The name of the task to train.") -flags.DEFINE_string("vocab_file", None, - "The vocabulary file that the BERT model was trained on.") +flags.DEFINE_string( + "vocab_file", + None, + "The vocabulary file that the BERT model was trained on.", +) flags.DEFINE_string( - "output_dir", None, - "The output directory where the model checkpoints will be written.") + "output_dir", + None, + "The output directory where the model checkpoints will be written.", +) ## Other parameters flags.DEFINE_string( - "init_checkpoint", None, - "Initial checkpoint (usually from a pre-trained BERT model).") + "init_checkpoint", + None, + "Initial checkpoint (usually from a pre-trained BERT model).", +) flags.DEFINE_bool( - "do_lower_case", True, + "do_lower_case", + True, "Whether to lower case the input text. Should be True for uncased " - "models and False for cased models.") + "models and False for cased models.", +) flags.DEFINE_integer( - "max_seq_length", 128, + "max_seq_length", + 128, "The maximum total input sequence length after WordPiece tokenization. " "Sequences longer than this will be truncated, and sequences shorter " - "than this will be padded.") + "than this will be padded.", +) flags.DEFINE_bool("do_train", False, "Whether to run training.") flags.DEFINE_bool("do_eval", False, "Whether to run eval on the dev set.") flags.DEFINE_bool( - "do_predict", False, - "Whether to run the model in inference mode on the test set.") + "do_predict", + False, + "Whether to run the model in inference mode on the test set.", +) flags.DEFINE_integer("train_batch_size", 32, "Total batch size for training.") @@ -84,54 +103,71 @@ flags.DEFINE_integer("predict_batch_size", 8, "Total batch size for predict.") -flags.DEFINE_float("learning_rate", 5e-5, "The initial learning rate for Adam.") +flags.DEFINE_float( + "learning_rate", 5e-5, "The initial learning rate for Adam." +) -flags.DEFINE_float("num_train_epochs", 3.0, - "Total number of training epochs to perform.") +flags.DEFINE_float( + "num_train_epochs", 3.0, "Total number of training epochs to perform." +) flags.DEFINE_float( - "warmup_proportion", 0.1, + "warmup_proportion", + 0.1, "Proportion of training to perform linear learning rate warmup for. " - "E.g., 0.1 = 10% of training.") + "E.g., 0.1 = 10% of training.", +) -flags.DEFINE_integer("save_checkpoints_steps", 1000, - "How often to save the model checkpoint.") +flags.DEFINE_integer( + "save_checkpoints_steps", 1000, "How often to save the model checkpoint." +) -flags.DEFINE_integer("iterations_per_loop", 1000, - "How many steps to make in each estimator call.") +flags.DEFINE_integer( + "iterations_per_loop", + 1000, + "How many steps to make in each estimator call.", +) flags.DEFINE_bool("use_tpu", False, "Whether to use TPU or GPU/CPU.") tf.flags.DEFINE_string( - "tpu_name", None, + "tpu_name", + None, "The Cloud TPU to use for training. This should be either the name " "used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 " - "url.") + "url.", +) tf.flags.DEFINE_string( - "tpu_zone", None, + "tpu_zone", + None, "[Optional] GCE zone where the Cloud TPU is located in. If not " "specified, we will attempt to automatically detect the GCE project from " - "metadata.") + "metadata.", +) tf.flags.DEFINE_string( - "gcp_project", None, + "gcp_project", + None, "[Optional] Project name for the Cloud TPU-enabled project. If not " "specified, we will attempt to automatically detect the GCE project from " - "metadata.") + "metadata.", +) tf.flags.DEFINE_string("master", None, "[Optional] TensorFlow master URL.") flags.DEFINE_integer( - "num_tpu_cores", 8, - "Only used if `use_tpu` is True. Total number of TPU cores to use.") + "num_tpu_cores", + 8, + "Only used if `use_tpu` is True. Total number of TPU cores to use.", +) class InputExample(object): - """A single training/test example for simple sequence classification.""" + """A single training/test example for simple sequence classification.""" - def __init__(self, guid, text_a, text_b=None, label=None): - """Constructs a InputExample. + def __init__(self, guid, text_a, text_b=None, label=None): + """Constructs a InputExample. Args: guid: Unique id for the example. @@ -142,806 +178,948 @@ def __init__(self, guid, text_a, text_b=None, label=None): label: (Optional) string. The label of the example. This should be specified for train and dev examples, but not for test examples. """ - self.guid = guid - self.text_a = text_a - self.text_b = text_b - self.label = label + self.guid = guid + self.text_a = text_a + self.text_b = text_b + self.label = label class InputFeatures(object): - """A single set of features of data.""" + """A single set of features of data.""" - def __init__(self, input_ids, input_mask, segment_ids, label_id): - self.input_ids = input_ids - self.input_mask = input_mask - self.segment_ids = segment_ids - self.label_id = label_id + def __init__(self, input_ids, input_mask, segment_ids, label_id): + self.input_ids = input_ids + self.input_mask = input_mask + self.segment_ids = segment_ids + self.label_id = label_id class DataProcessor(object): - """Base class for data converters for sequence classification data sets.""" + """Base class for data converters for sequence classification data sets.""" - def get_train_examples(self, data_dir): - """Gets a collection of `InputExample`s for the train set.""" - raise NotImplementedError() + def get_train_examples(self, data_dir): + """Gets a collection of `InputExample`s for the train set.""" + raise NotImplementedError() - def get_dev_examples(self, data_dir): - """Gets a collection of `InputExample`s for the dev set.""" - raise NotImplementedError() + def get_dev_examples(self, data_dir): + """Gets a collection of `InputExample`s for the dev set.""" + raise NotImplementedError() - def get_test_examples(self, data_dir): - """Gets a collection of `InputExample`s for prediction.""" - raise NotImplementedError() + def get_test_examples(self, data_dir): + """Gets a collection of `InputExample`s for prediction.""" + raise NotImplementedError() - def get_labels(self): - """Gets the list of labels for this data set.""" - raise NotImplementedError() + def get_labels(self): + """Gets the list of labels for this data set.""" + raise NotImplementedError() - @classmethod - def _read_tsv(cls, input_file, quotechar=None): - """Reads a tab separated value file.""" - with tf.gfile.Open(input_file, "r") as f: - reader = csv.reader(f, delimiter="\t", quotechar=quotechar) - lines = [] - for line in reader: - lines.append(line) - return lines + @classmethod + def _read_tsv(cls, input_file, quotechar=None): + """Reads a tab separated value file.""" + with tf.gfile.Open(input_file, "r") as f: + reader = csv.reader(f, delimiter="\t", quotechar=quotechar) + lines = [] + for line in reader: + lines.append(line) + return lines class XnliProcessor(DataProcessor): - """Processor for the XNLI data set.""" - - def __init__(self): - self.language = "zh" - - def get_train_examples(self, data_dir): - """See base class.""" - lines = self._read_tsv( - os.path.join(data_dir, "multinli", - "multinli.train.%s.tsv" % self.language)) - examples = [] - for (i, line) in enumerate(lines): - if i == 0: - continue - guid = "train-%d" % (i) - text_a = tokenization.convert_to_unicode(line[0]) - text_b = tokenization.convert_to_unicode(line[1]) - label = tokenization.convert_to_unicode(line[2]) - if label == tokenization.convert_to_unicode("contradictory"): - label = tokenization.convert_to_unicode("contradiction") - examples.append( - InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) - return examples - - def get_dev_examples(self, data_dir): - """See base class.""" - lines = self._read_tsv(os.path.join(data_dir, "xnli.dev.tsv")) - examples = [] - for (i, line) in enumerate(lines): - if i == 0: - continue - guid = "dev-%d" % (i) - language = tokenization.convert_to_unicode(line[0]) - if language != tokenization.convert_to_unicode(self.language): - continue - text_a = tokenization.convert_to_unicode(line[6]) - text_b = tokenization.convert_to_unicode(line[7]) - label = tokenization.convert_to_unicode(line[1]) - examples.append( - InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) - return examples - - def get_labels(self): - """See base class.""" - return ["contradiction", "entailment", "neutral"] + """Processor for the XNLI data set.""" + + def __init__(self): + self.language = "zh" + + def get_train_examples(self, data_dir): + """See base class.""" + lines = self._read_tsv( + os.path.join( + data_dir, "multinli", "multinli.train.%s.tsv" % self.language + ) + ) + examples = [] + for (i, line) in enumerate(lines): + if i == 0: + continue + guid = "train-%d" % (i) + text_a = tokenization.convert_to_unicode(line[0]) + text_b = tokenization.convert_to_unicode(line[1]) + label = tokenization.convert_to_unicode(line[2]) + if label == tokenization.convert_to_unicode("contradictory"): + label = tokenization.convert_to_unicode("contradiction") + examples.append( + InputExample( + guid=guid, text_a=text_a, text_b=text_b, label=label + ) + ) + return examples + + def get_dev_examples(self, data_dir): + """See base class.""" + lines = self._read_tsv(os.path.join(data_dir, "xnli.dev.tsv")) + examples = [] + for (i, line) in enumerate(lines): + if i == 0: + continue + guid = "dev-%d" % (i) + language = tokenization.convert_to_unicode(line[0]) + if language != tokenization.convert_to_unicode(self.language): + continue + text_a = tokenization.convert_to_unicode(line[6]) + text_b = tokenization.convert_to_unicode(line[7]) + label = tokenization.convert_to_unicode(line[1]) + examples.append( + InputExample( + guid=guid, text_a=text_a, text_b=text_b, label=label + ) + ) + return examples + + def get_labels(self): + """See base class.""" + return ["contradiction", "entailment", "neutral"] class MnliProcessor(DataProcessor): - """Processor for the MultiNLI data set (GLUE version).""" - - def get_train_examples(self, data_dir): - """See base class.""" - return self._create_examples( - self._read_tsv(os.path.join(data_dir, "train.tsv")), "train") - - def get_dev_examples(self, data_dir): - """See base class.""" - return self._create_examples( - self._read_tsv(os.path.join(data_dir, "dev_matched.tsv")), - "dev_matched") - - def get_test_examples(self, data_dir): - """See base class.""" - return self._create_examples( - self._read_tsv(os.path.join(data_dir, "test_matched.tsv")), "test") - - def get_labels(self): - """See base class.""" - return ["contradiction", "entailment", "neutral"] - - def _create_examples(self, lines, set_type): - """Creates examples for the training and dev sets.""" - examples = [] - for (i, line) in enumerate(lines): - if i == 0: - continue - guid = "%s-%s" % (set_type, tokenization.convert_to_unicode(line[0])) - text_a = tokenization.convert_to_unicode(line[8]) - text_b = tokenization.convert_to_unicode(line[9]) - if set_type == "test": - label = "contradiction" - else: - label = tokenization.convert_to_unicode(line[-1]) - examples.append( - InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) - return examples + """Processor for the MultiNLI data set (GLUE version).""" + + def get_train_examples(self, data_dir): + """See base class.""" + return self._create_examples( + self._read_tsv(os.path.join(data_dir, "train.tsv")), "train" + ) + + def get_dev_examples(self, data_dir): + """See base class.""" + return self._create_examples( + self._read_tsv(os.path.join(data_dir, "dev_matched.tsv")), + "dev_matched", + ) + + def get_test_examples(self, data_dir): + """See base class.""" + return self._create_examples( + self._read_tsv(os.path.join(data_dir, "test_matched.tsv")), "test" + ) + + def get_labels(self): + """See base class.""" + return ["contradiction", "entailment", "neutral"] + + def _create_examples(self, lines, set_type): + """Creates examples for the training and dev sets.""" + examples = [] + for (i, line) in enumerate(lines): + if i == 0: + continue + guid = "%s-%s" % ( + set_type, + tokenization.convert_to_unicode(line[0]), + ) + text_a = tokenization.convert_to_unicode(line[8]) + text_b = tokenization.convert_to_unicode(line[9]) + if set_type == "test": + label = "contradiction" + else: + label = tokenization.convert_to_unicode(line[-1]) + examples.append( + InputExample( + guid=guid, text_a=text_a, text_b=text_b, label=label + ) + ) + return examples class MrpcProcessor(DataProcessor): - """Processor for the MRPC data set (GLUE version).""" - - def get_train_examples(self, data_dir): - """See base class.""" - return self._create_examples( - self._read_tsv(os.path.join(data_dir, "train.tsv")), "train") - - def get_dev_examples(self, data_dir): - """See base class.""" - return self._create_examples( - self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev") - - def get_test_examples(self, data_dir): - """See base class.""" - return self._create_examples( - self._read_tsv(os.path.join(data_dir, "test.tsv")), "test") - - def get_labels(self): - """See base class.""" - return ["0", "1"] - - def _create_examples(self, lines, set_type): - """Creates examples for the training and dev sets.""" - examples = [] - for (i, line) in enumerate(lines): - if i == 0: - continue - guid = "%s-%s" % (set_type, i) - text_a = tokenization.convert_to_unicode(line[3]) - text_b = tokenization.convert_to_unicode(line[4]) - if set_type == "test": - label = "0" - else: - label = tokenization.convert_to_unicode(line[0]) - examples.append( - InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) - return examples + """Processor for the MRPC data set (GLUE version).""" + + def get_train_examples(self, data_dir): + """See base class.""" + return self._create_examples( + self._read_tsv(os.path.join(data_dir, "train.tsv")), "train" + ) + + def get_dev_examples(self, data_dir): + """See base class.""" + return self._create_examples( + self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev" + ) + + def get_test_examples(self, data_dir): + """See base class.""" + return self._create_examples( + self._read_tsv(os.path.join(data_dir, "test.tsv")), "test" + ) + + def get_labels(self): + """See base class.""" + return ["0", "1"] + + def _create_examples(self, lines, set_type): + """Creates examples for the training and dev sets.""" + examples = [] + for (i, line) in enumerate(lines): + if i == 0: + continue + guid = "%s-%s" % (set_type, i) + text_a = tokenization.convert_to_unicode(line[3]) + text_b = tokenization.convert_to_unicode(line[4]) + if set_type == "test": + label = "0" + else: + label = tokenization.convert_to_unicode(line[0]) + examples.append( + InputExample( + guid=guid, text_a=text_a, text_b=text_b, label=label + ) + ) + return examples class ColaProcessor(DataProcessor): - """Processor for the CoLA data set (GLUE version).""" - - def get_train_examples(self, data_dir): - """See base class.""" - return self._create_examples( - self._read_tsv(os.path.join(data_dir, "train.tsv")), "train") - - def get_dev_examples(self, data_dir): - """See base class.""" - return self._create_examples( - self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev") - - def get_test_examples(self, data_dir): - """See base class.""" - return self._create_examples( - self._read_tsv(os.path.join(data_dir, "test.tsv")), "test") - - def get_labels(self): - """See base class.""" - return ["0", "1"] - - def _create_examples(self, lines, set_type): - """Creates examples for the training and dev sets.""" - examples = [] - for (i, line) in enumerate(lines): - # Only the test set has a header - if set_type == "test" and i == 0: - continue - guid = "%s-%s" % (set_type, i) - if set_type == "test": - text_a = tokenization.convert_to_unicode(line[1]) - label = "0" - else: - text_a = tokenization.convert_to_unicode(line[3]) - label = tokenization.convert_to_unicode(line[1]) - examples.append( - InputExample(guid=guid, text_a=text_a, text_b=None, label=label)) - return examples + """Processor for the CoLA data set (GLUE version).""" + + def get_train_examples(self, data_dir): + """See base class.""" + return self._create_examples( + self._read_tsv(os.path.join(data_dir, "train.tsv")), "train" + ) + + def get_dev_examples(self, data_dir): + """See base class.""" + return self._create_examples( + self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev" + ) + + def get_test_examples(self, data_dir): + """See base class.""" + return self._create_examples( + self._read_tsv(os.path.join(data_dir, "test.tsv")), "test" + ) + + def get_labels(self): + """See base class.""" + return ["0", "1"] + + def _create_examples(self, lines, set_type): + """Creates examples for the training and dev sets.""" + examples = [] + for (i, line) in enumerate(lines): + # Only the test set has a header + if set_type == "test" and i == 0: + continue + guid = "%s-%s" % (set_type, i) + if set_type == "test": + text_a = tokenization.convert_to_unicode(line[1]) + label = "0" + else: + text_a = tokenization.convert_to_unicode(line[3]) + label = tokenization.convert_to_unicode(line[1]) + examples.append( + InputExample( + guid=guid, text_a=text_a, text_b=None, label=label + ) + ) + return examples class _MetricLogger(tf.train.SessionRunHook): - def __init__(self, mode): - self._mode = mode - - def begin(self): - self._step = 0 - self._loss_total = 0 - - def before_run(self, run_context): - pass - self._step += 1 - graph = run_context.session.graph - tensor_name = "loss/Mean:0" - loss_tensor = graph.get_tensor_by_name(tensor_name) - return tf.train.SessionRunArgs(loss_tensor) - - def after_run(self, run_context, run_values): - loss_value = run_values.results - self._loss_total += loss_value - if self._step % 2 == 0: - mean_loss = self._loss_total / self._step - run.log('%s_mean_loss' % self._mode, mean_loss) - run.log('%s_example_loss' % self._mode, loss_value) - - -def convert_single_example(ex_index, example, label_list, max_seq_length, - tokenizer): - """Converts a single `InputExample` into a single `InputFeatures`.""" - label_map = {} - for (i, label) in enumerate(label_list): - label_map[label] = i - - tokens_a = tokenizer.tokenize(example.text_a) - tokens_b = None - if example.text_b: - tokens_b = tokenizer.tokenize(example.text_b) - - if tokens_b: - # Modifies `tokens_a` and `tokens_b` in place so that the total - # length is less than the specified length. - # Account for [CLS], [SEP], [SEP] with "- 3" - _truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3) - else: - # Account for [CLS] and [SEP] with "- 2" - if len(tokens_a) > max_seq_length - 2: - tokens_a = tokens_a[0:(max_seq_length - 2)] - - # The convention in BERT is: - # (a) For sequence pairs: - # tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP] - # type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1 - # (b) For single sequences: - # tokens: [CLS] the dog is hairy . [SEP] - # type_ids: 0 0 0 0 0 0 0 - # - # Where "type_ids" are used to indicate whether this is the first - # sequence or the second sequence. The embedding vectors for `type=0` and - # `type=1` were learned during pre-training and are added to the wordpiece - # embedding vector (and position vector). This is not *strictly* necessary - # since the [SEP] token unambiguously separates the sequences, but it makes - # it easier for the model to learn the concept of sequences. - # - # For classification tasks, the first vector (corresponding to [CLS]) is - # used as as the "sentence vector". Note that this only makes sense because - # the entire model is fine-tuned. - tokens = [] - segment_ids = [] - tokens.append("[CLS]") - segment_ids.append(0) - for token in tokens_a: - tokens.append(token) + def __init__(self, mode): + self._mode = mode + + def begin(self): + self._step = 0 + self._loss_total = 0 + + def before_run(self, run_context): + pass + self._step += 1 + graph = run_context.session.graph + tensor_name = "loss/Mean:0" + loss_tensor = graph.get_tensor_by_name(tensor_name) + return tf.train.SessionRunArgs(loss_tensor) + + def after_run(self, run_context, run_values): + loss_value = run_values.results + self._loss_total += loss_value + if self._step % 2 == 0: + mean_loss = self._loss_total / self._step + run.log("%s_mean_loss" % self._mode, mean_loss) + run.log("%s_example_loss" % self._mode, loss_value) + + +def convert_single_example( + ex_index, example, label_list, max_seq_length, tokenizer +): + """Converts a single `InputExample` into a single `InputFeatures`.""" + label_map = {} + for (i, label) in enumerate(label_list): + label_map[label] = i + + tokens_a = tokenizer.tokenize(example.text_a) + tokens_b = None + if example.text_b: + tokens_b = tokenizer.tokenize(example.text_b) + + if tokens_b: + # Modifies `tokens_a` and `tokens_b` in place so that the total + # length is less than the specified length. + # Account for [CLS], [SEP], [SEP] with "- 3" + _truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3) + else: + # Account for [CLS] and [SEP] with "- 2" + if len(tokens_a) > max_seq_length - 2: + tokens_a = tokens_a[0 : (max_seq_length - 2)] + + # The convention in BERT is: + # (a) For sequence pairs: + # tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP] + # type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1 + # (b) For single sequences: + # tokens: [CLS] the dog is hairy . [SEP] + # type_ids: 0 0 0 0 0 0 0 + # + # Where "type_ids" are used to indicate whether this is the first + # sequence or the second sequence. The embedding vectors for `type=0` and + # `type=1` were learned during pre-training and are added to the wordpiece + # embedding vector (and position vector). This is not *strictly* necessary + # since the [SEP] token unambiguously separates the sequences, but it makes + # it easier for the model to learn the concept of sequences. + # + # For classification tasks, the first vector (corresponding to [CLS]) is + # used as as the "sentence vector". Note that this only makes sense because + # the entire model is fine-tuned. + tokens = [] + segment_ids = [] + tokens.append("[CLS]") segment_ids.append(0) - tokens.append("[SEP]") - segment_ids.append(0) - - if tokens_b: - for token in tokens_b: - tokens.append(token) - segment_ids.append(1) + for token in tokens_a: + tokens.append(token) + segment_ids.append(0) tokens.append("[SEP]") - segment_ids.append(1) - - input_ids = tokenizer.convert_tokens_to_ids(tokens) - - # The mask has 1 for real tokens and 0 for padding tokens. Only real - # tokens are attended to. - input_mask = [1] * len(input_ids) - - # Zero-pad up to the sequence length. - while len(input_ids) < max_seq_length: - input_ids.append(0) - input_mask.append(0) segment_ids.append(0) - assert len(input_ids) == max_seq_length - assert len(input_mask) == max_seq_length - assert len(segment_ids) == max_seq_length - - label_id = label_map[example.label] - if ex_index < 5: - tf.logging.info("*** Example ***") - tf.logging.info("guid: %s" % (example.guid)) - tf.logging.info("tokens: %s" % " ".join( - [tokenization.printable_text(x) for x in tokens])) - tf.logging.info("input_ids: %s" % " ".join([str(x) for x in input_ids])) - tf.logging.info("input_mask: %s" % " ".join([str(x) for x in input_mask])) - tf.logging.info("segment_ids: %s" % " ".join([str(x) for x in segment_ids])) - tf.logging.info("label: %s (id = %d)" % (example.label, label_id)) - - feature = InputFeatures( - input_ids=input_ids, - input_mask=input_mask, - segment_ids=segment_ids, - label_id=label_id) - return feature + if tokens_b: + for token in tokens_b: + tokens.append(token) + segment_ids.append(1) + tokens.append("[SEP]") + segment_ids.append(1) + + input_ids = tokenizer.convert_tokens_to_ids(tokens) + + # The mask has 1 for real tokens and 0 for padding tokens. Only real + # tokens are attended to. + input_mask = [1] * len(input_ids) + + # Zero-pad up to the sequence length. + while len(input_ids) < max_seq_length: + input_ids.append(0) + input_mask.append(0) + segment_ids.append(0) + + assert len(input_ids) == max_seq_length + assert len(input_mask) == max_seq_length + assert len(segment_ids) == max_seq_length + + label_id = label_map[example.label] + if ex_index < 5: + tf.logging.info("*** Example ***") + tf.logging.info("guid: %s" % (example.guid)) + tf.logging.info( + "tokens: %s" + % " ".join([tokenization.printable_text(x) for x in tokens]) + ) + tf.logging.info( + "input_ids: %s" % " ".join([str(x) for x in input_ids]) + ) + tf.logging.info( + "input_mask: %s" % " ".join([str(x) for x in input_mask]) + ) + tf.logging.info( + "segment_ids: %s" % " ".join([str(x) for x in segment_ids]) + ) + tf.logging.info("label: %s (id = %d)" % (example.label, label_id)) + + feature = InputFeatures( + input_ids=input_ids, + input_mask=input_mask, + segment_ids=segment_ids, + label_id=label_id, + ) + return feature def file_based_convert_examples_to_features( - examples, label_list, max_seq_length, tokenizer, output_file): - """Convert a set of `InputExample`s to a TFRecord file.""" - - writer = tf.python_io.TFRecordWriter(output_file) - - for (ex_index, example) in enumerate(examples): - if ex_index % 10000 == 0: - tf.logging.info("Writing example %d of %d" % (ex_index, len(examples))) - - feature = convert_single_example(ex_index, example, label_list, - max_seq_length, tokenizer) - - def create_int_feature(values): - f = tf.train.Feature(int64_list=tf.train.Int64List(value=list(values))) - return f - - features = collections.OrderedDict() - features["input_ids"] = create_int_feature(feature.input_ids) - features["input_mask"] = create_int_feature(feature.input_mask) - features["segment_ids"] = create_int_feature(feature.segment_ids) - features["label_ids"] = create_int_feature([feature.label_id]) - - tf_example = tf.train.Example(features=tf.train.Features(feature=features)) - writer.write(tf_example.SerializeToString()) - - -def file_based_input_fn_builder(input_file, seq_length, is_training, - drop_remainder): - """Creates an `input_fn` closure to be passed to TPUEstimator.""" - - name_to_features = { - "input_ids": tf.FixedLenFeature([seq_length], tf.int64), - "input_mask": tf.FixedLenFeature([seq_length], tf.int64), - "segment_ids": tf.FixedLenFeature([seq_length], tf.int64), - "label_ids": tf.FixedLenFeature([], tf.int64), - } - - def _decode_record(record, name_to_features): - """Decodes a record to a TensorFlow example.""" - example = tf.parse_single_example(record, name_to_features) - - # tf.Example only supports tf.int64, but the TPU only supports tf.int32. - # So cast all int64 to int32. - for name in list(example.keys()): - t = example[name] - if t.dtype == tf.int64: - t = tf.to_int32(t) - example[name] = t - - return example - - def input_fn(params): - """The actual input function.""" - batch_size = params["batch_size"] - - # For training, we want a lot of parallel reading and shuffling. - # For eval, we want no shuffling and parallel reading doesn't matter. - d = tf.data.TFRecordDataset(input_file) - if is_training: - d = d.repeat() - d = d.shuffle(buffer_size=100) - - d = d.apply( - tf.contrib.data.map_and_batch( - lambda record: _decode_record(record, name_to_features), - batch_size=batch_size, - drop_remainder=drop_remainder)) - - return d - - return input_fn + examples, label_list, max_seq_length, tokenizer, output_file +): + """Convert a set of `InputExample`s to a TFRecord file.""" + + writer = tf.python_io.TFRecordWriter(output_file) + + for (ex_index, example) in enumerate(examples): + if ex_index % 10000 == 0: + tf.logging.info( + "Writing example %d of %d" % (ex_index, len(examples)) + ) + + feature = convert_single_example( + ex_index, example, label_list, max_seq_length, tokenizer + ) + + def create_int_feature(values): + f = tf.train.Feature( + int64_list=tf.train.Int64List(value=list(values)) + ) + return f + + features = collections.OrderedDict() + features["input_ids"] = create_int_feature(feature.input_ids) + features["input_mask"] = create_int_feature(feature.input_mask) + features["segment_ids"] = create_int_feature(feature.segment_ids) + features["label_ids"] = create_int_feature([feature.label_id]) + + tf_example = tf.train.Example( + features=tf.train.Features(feature=features) + ) + writer.write(tf_example.SerializeToString()) + + +def file_based_input_fn_builder( + input_file, seq_length, is_training, drop_remainder +): + """Creates an `input_fn` closure to be passed to TPUEstimator.""" + + name_to_features = { + "input_ids": tf.FixedLenFeature([seq_length], tf.int64), + "input_mask": tf.FixedLenFeature([seq_length], tf.int64), + "segment_ids": tf.FixedLenFeature([seq_length], tf.int64), + "label_ids": tf.FixedLenFeature([], tf.int64), + } + + def _decode_record(record, name_to_features): + """Decodes a record to a TensorFlow example.""" + example = tf.parse_single_example(record, name_to_features) + + # tf.Example only supports tf.int64, but the TPU only supports tf.int32. + # So cast all int64 to int32. + for name in list(example.keys()): + t = example[name] + if t.dtype == tf.int64: + t = tf.to_int32(t) + example[name] = t + + return example + + def input_fn(params): + """The actual input function.""" + batch_size = params["batch_size"] + + # For training, we want a lot of parallel reading and shuffling. + # For eval, we want no shuffling and parallel reading doesn't matter. + d = tf.data.TFRecordDataset(input_file) + if is_training: + d = d.repeat() + d = d.shuffle(buffer_size=100) + + d = d.apply( + tf.contrib.data.map_and_batch( + lambda record: _decode_record(record, name_to_features), + batch_size=batch_size, + drop_remainder=drop_remainder, + ) + ) + + return d + + return input_fn def _truncate_seq_pair(tokens_a, tokens_b, max_length): - """Truncates a sequence pair in place to the maximum length.""" - - # This is a simple heuristic which will always truncate the longer sequence - # one token at a time. This makes more sense than truncating an equal percent - # of tokens from each, since if one sequence is very short then each token - # that's truncated likely contains more information than a longer sequence. - while True: - total_length = len(tokens_a) + len(tokens_b) - if total_length <= max_length: - break - if len(tokens_a) > len(tokens_b): - tokens_a.pop() - else: - tokens_b.pop() - - -def create_model(bert_config, is_training, input_ids, input_mask, segment_ids, - labels, num_labels, use_one_hot_embeddings): - """Creates a classification model.""" - model = modeling.BertModel( - config=bert_config, - is_training=is_training, - input_ids=input_ids, - input_mask=input_mask, - token_type_ids=segment_ids, - use_one_hot_embeddings=use_one_hot_embeddings) - - # In the demo, we are doing a simple classification task on the entire - # segment. - # - # If you want to use the token-level output, use model.get_sequence_output() - # instead. - output_layer = model.get_pooled_output() - - hidden_size = output_layer.shape[-1].value - - output_weights = tf.get_variable( - "output_weights", [num_labels, hidden_size], - initializer=tf.truncated_normal_initializer(stddev=0.02)) - - output_bias = tf.get_variable( - "output_bias", [num_labels], initializer=tf.zeros_initializer()) - - with tf.variable_scope("loss"): - if is_training: - # I.e., 0.1 dropout - output_layer = tf.nn.dropout(output_layer, keep_prob=0.9) - - logits = tf.matmul(output_layer, output_weights, transpose_b=True) - logits = tf.nn.bias_add(logits, output_bias) - probabilities = tf.nn.softmax(logits, axis=-1) - log_probs = tf.nn.log_softmax(logits, axis=-1) - - one_hot_labels = tf.one_hot(labels, depth=num_labels, dtype=tf.float32) - - per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1) - loss = tf.reduce_mean(per_example_loss) - - return (loss, per_example_loss, logits, probabilities) - - -def model_fn_builder(bert_config, num_labels, init_checkpoint, learning_rate, - num_train_steps, num_warmup_steps, use_tpu, - use_one_hot_embeddings): - """Returns `model_fn` closure for TPUEstimator.""" - - def model_fn(features, labels, mode, params): # pylint: disable=unused-argument - """The `model_fn` for TPUEstimator.""" - - tf.logging.info("*** Features ***") - for name in sorted(features.keys()): - tf.logging.info(" name = %s, shape = %s" % (name, features[name].shape)) - - input_ids = features["input_ids"] - input_mask = features["input_mask"] - segment_ids = features["segment_ids"] - label_ids = features["label_ids"] - - is_training = (mode == tf.estimator.ModeKeys.TRAIN) - - (total_loss, per_example_loss, logits, probabilities) = create_model( - bert_config, is_training, input_ids, input_mask, segment_ids, label_ids, - num_labels, use_one_hot_embeddings) - - tvars = tf.trainable_variables() - initialized_variable_names = {} - scaffold_fn = None - if init_checkpoint: - (assignment_map, initialized_variable_names - ) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint) - if use_tpu: - - def tpu_scaffold(): - tf.train.init_from_checkpoint(init_checkpoint, assignment_map) - return tf.train.Scaffold() - - scaffold_fn = tpu_scaffold - else: - tf.train.init_from_checkpoint(init_checkpoint, assignment_map) - - tf.logging.info("**** Trainable Variables ****") - for var in tvars: - init_string = "" - if var.name in initialized_variable_names: - init_string = ", *INIT_FROM_CKPT*" - tf.logging.info(" name = %s, shape = %s%s", var.name, var.shape, - init_string) - - output_spec = None - if mode == tf.estimator.ModeKeys.TRAIN: - - train_op = optimization.create_optimizer( - total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu) - - output_spec = tf.contrib.tpu.TPUEstimatorSpec( - mode=mode, - loss=total_loss, - train_op=train_op, - scaffold_fn=scaffold_fn) - elif mode == tf.estimator.ModeKeys.EVAL: - - def metric_fn(per_example_loss, label_ids, logits): - predictions = tf.argmax(logits, axis=-1, output_type=tf.int32) - accuracy = tf.metrics.accuracy(label_ids, predictions) - loss = tf.metrics.mean(per_example_loss) - return { - "eval_accuracy": accuracy, - "eval_loss": loss, - } - - eval_metrics = (metric_fn, [per_example_loss, label_ids, logits]) - output_spec = tf.contrib.tpu.TPUEstimatorSpec( - mode=mode, - loss=total_loss, - eval_metrics=eval_metrics, - scaffold_fn=scaffold_fn) - else: - output_spec = tf.contrib.tpu.TPUEstimatorSpec( - mode=mode, predictions=probabilities, scaffold_fn=scaffold_fn) - return output_spec - - return model_fn + """Truncates a sequence pair in place to the maximum length.""" + + # This is a simple heuristic which will always truncate the longer sequence + # one token at a time. This makes more sense than truncating an equal percent + # of tokens from each, since if one sequence is very short then each token + # that's truncated likely contains more information than a longer sequence. + while True: + total_length = len(tokens_a) + len(tokens_b) + if total_length <= max_length: + break + if len(tokens_a) > len(tokens_b): + tokens_a.pop() + else: + tokens_b.pop() + + +def create_model( + bert_config, + is_training, + input_ids, + input_mask, + segment_ids, + labels, + num_labels, + use_one_hot_embeddings, +): + """Creates a classification model.""" + model = modeling.BertModel( + config=bert_config, + is_training=is_training, + input_ids=input_ids, + input_mask=input_mask, + token_type_ids=segment_ids, + use_one_hot_embeddings=use_one_hot_embeddings, + ) + + # In the demo, we are doing a simple classification task on the entire + # segment. + # + # If you want to use the token-level output, use model.get_sequence_output() + # instead. + output_layer = model.get_pooled_output() + + hidden_size = output_layer.shape[-1].value + + output_weights = tf.get_variable( + "output_weights", + [num_labels, hidden_size], + initializer=tf.truncated_normal_initializer(stddev=0.02), + ) + + output_bias = tf.get_variable( + "output_bias", [num_labels], initializer=tf.zeros_initializer() + ) + + with tf.variable_scope("loss"): + if is_training: + # I.e., 0.1 dropout + output_layer = tf.nn.dropout(output_layer, keep_prob=0.9) + + logits = tf.matmul(output_layer, output_weights, transpose_b=True) + logits = tf.nn.bias_add(logits, output_bias) + probabilities = tf.nn.softmax(logits, axis=-1) + log_probs = tf.nn.log_softmax(logits, axis=-1) + + one_hot_labels = tf.one_hot(labels, depth=num_labels, dtype=tf.float32) + + per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1) + loss = tf.reduce_mean(per_example_loss) + + return (loss, per_example_loss, logits, probabilities) + + +def model_fn_builder( + bert_config, + num_labels, + init_checkpoint, + learning_rate, + num_train_steps, + num_warmup_steps, + use_tpu, + use_one_hot_embeddings, +): + """Returns `model_fn` closure for TPUEstimator.""" + + def model_fn( + features, labels, mode, params + ): # pylint: disable=unused-argument + """The `model_fn` for TPUEstimator.""" + + tf.logging.info("*** Features ***") + for name in sorted(features.keys()): + tf.logging.info( + " name = %s, shape = %s" % (name, features[name].shape) + ) + + input_ids = features["input_ids"] + input_mask = features["input_mask"] + segment_ids = features["segment_ids"] + label_ids = features["label_ids"] + + is_training = mode == tf.estimator.ModeKeys.TRAIN + + (total_loss, per_example_loss, logits, probabilities) = create_model( + bert_config, + is_training, + input_ids, + input_mask, + segment_ids, + label_ids, + num_labels, + use_one_hot_embeddings, + ) + + tvars = tf.trainable_variables() + initialized_variable_names = {} + scaffold_fn = None + if init_checkpoint: + ( + assignment_map, + initialized_variable_names, + ) = modeling.get_assignment_map_from_checkpoint( + tvars, init_checkpoint + ) + if use_tpu: + + def tpu_scaffold(): + tf.train.init_from_checkpoint( + init_checkpoint, assignment_map + ) + return tf.train.Scaffold() + + scaffold_fn = tpu_scaffold + else: + tf.train.init_from_checkpoint(init_checkpoint, assignment_map) + + tf.logging.info("**** Trainable Variables ****") + for var in tvars: + init_string = "" + if var.name in initialized_variable_names: + init_string = ", *INIT_FROM_CKPT*" + tf.logging.info( + " name = %s, shape = %s%s", var.name, var.shape, init_string + ) + + output_spec = None + if mode == tf.estimator.ModeKeys.TRAIN: + + train_op = optimization.create_optimizer( + total_loss, + learning_rate, + num_train_steps, + num_warmup_steps, + use_tpu, + ) + + output_spec = tf.contrib.tpu.TPUEstimatorSpec( + mode=mode, + loss=total_loss, + train_op=train_op, + scaffold_fn=scaffold_fn, + ) + elif mode == tf.estimator.ModeKeys.EVAL: + + def metric_fn(per_example_loss, label_ids, logits): + predictions = tf.argmax(logits, axis=-1, output_type=tf.int32) + accuracy = tf.metrics.accuracy(label_ids, predictions) + loss = tf.metrics.mean(per_example_loss) + return {"eval_accuracy": accuracy, "eval_loss": loss} + + eval_metrics = (metric_fn, [per_example_loss, label_ids, logits]) + output_spec = tf.contrib.tpu.TPUEstimatorSpec( + mode=mode, + loss=total_loss, + eval_metrics=eval_metrics, + scaffold_fn=scaffold_fn, + ) + else: + output_spec = tf.contrib.tpu.TPUEstimatorSpec( + mode=mode, predictions=probabilities, scaffold_fn=scaffold_fn + ) + return output_spec + + return model_fn # This function is not used by this file but is still used by the Colab and # people who depend on it. def input_fn_builder(features, seq_length, is_training, drop_remainder): - """Creates an `input_fn` closure to be passed to TPUEstimator.""" - - all_input_ids = [] - all_input_mask = [] - all_segment_ids = [] - all_label_ids = [] - - for feature in features: - all_input_ids.append(feature.input_ids) - all_input_mask.append(feature.input_mask) - all_segment_ids.append(feature.segment_ids) - all_label_ids.append(feature.label_id) - - def input_fn(params): - """The actual input function.""" - batch_size = params["batch_size"] - - num_examples = len(features) - - # This is for demo purposes and does NOT scale to large data sets. We do - # not use Dataset.from_generator() because that uses tf.py_func which is - # not TPU compatible. The right way to load data is with TFRecordReader. - d = tf.data.Dataset.from_tensor_slices({ - "input_ids": - tf.constant( - all_input_ids, shape=[num_examples, seq_length], - dtype=tf.int32), - "input_mask": - tf.constant( - all_input_mask, - shape=[num_examples, seq_length], - dtype=tf.int32), - "segment_ids": - tf.constant( - all_segment_ids, - shape=[num_examples, seq_length], - dtype=tf.int32), - "label_ids": - tf.constant(all_label_ids, shape=[num_examples], dtype=tf.int32), - }) - - if is_training: - d = d.repeat() - d = d.shuffle(buffer_size=100) - - d = d.batch(batch_size=batch_size, drop_remainder=drop_remainder) - return d - - return input_fn + """Creates an `input_fn` closure to be passed to TPUEstimator.""" + + all_input_ids = [] + all_input_mask = [] + all_segment_ids = [] + all_label_ids = [] + + for feature in features: + all_input_ids.append(feature.input_ids) + all_input_mask.append(feature.input_mask) + all_segment_ids.append(feature.segment_ids) + all_label_ids.append(feature.label_id) + + def input_fn(params): + """The actual input function.""" + batch_size = params["batch_size"] + + num_examples = len(features) + + # This is for demo purposes and does NOT scale to large data sets. We do + # not use Dataset.from_generator() because that uses tf.py_func which is + # not TPU compatible. The right way to load data is with TFRecordReader. + d = tf.data.Dataset.from_tensor_slices( + { + "input_ids": tf.constant( + all_input_ids, + shape=[num_examples, seq_length], + dtype=tf.int32, + ), + "input_mask": tf.constant( + all_input_mask, + shape=[num_examples, seq_length], + dtype=tf.int32, + ), + "segment_ids": tf.constant( + all_segment_ids, + shape=[num_examples, seq_length], + dtype=tf.int32, + ), + "label_ids": tf.constant( + all_label_ids, shape=[num_examples], dtype=tf.int32 + ), + } + ) + + if is_training: + d = d.repeat() + d = d.shuffle(buffer_size=100) + + d = d.batch(batch_size=batch_size, drop_remainder=drop_remainder) + return d + + return input_fn # This function is not used by this file but is still used by the Colab and # people who depend on it. -def convert_examples_to_features(examples, label_list, max_seq_length, - tokenizer): - """Convert a set of `InputExample`s to a list of `InputFeatures`.""" +def convert_examples_to_features( + examples, label_list, max_seq_length, tokenizer +): + """Convert a set of `InputExample`s to a list of `InputFeatures`.""" - features = [] - for (ex_index, example) in enumerate(examples): - if ex_index % 10000 == 0: - tf.logging.info("Writing example %d of %d" % (ex_index, len(examples))) + features = [] + for (ex_index, example) in enumerate(examples): + if ex_index % 10000 == 0: + tf.logging.info( + "Writing example %d of %d" % (ex_index, len(examples)) + ) - feature = convert_single_example(ex_index, example, label_list, - max_seq_length, tokenizer) + feature = convert_single_example( + ex_index, example, label_list, max_seq_length, tokenizer + ) - features.append(feature) - return features + features.append(feature) + return features def main(_): - tf.logging.set_verbosity(tf.logging.INFO) - - processors = { - "cola": ColaProcessor, - "mnli": MnliProcessor, - "mrpc": MrpcProcessor, - "xnli": XnliProcessor, - } - - if not FLAGS.do_train and not FLAGS.do_eval and not FLAGS.do_predict: - raise ValueError( - "At least one of `do_train`, `do_eval` or `do_predict' must be True.") - - bert_config = modeling.BertConfig.from_json_file(FLAGS.bert_config_file) - - if FLAGS.max_seq_length > bert_config.max_position_embeddings: - raise ValueError( - "Cannot use sequence length %d because the BERT model " - "was only trained up to sequence length %d" % - (FLAGS.max_seq_length, bert_config.max_position_embeddings)) - - tf.gfile.MakeDirs(FLAGS.output_dir) - - task_name = FLAGS.task_name.lower() - - if task_name not in processors: - raise ValueError("Task not found: %s" % (task_name)) - - processor = processors[task_name]() - - label_list = processor.get_labels() - - tokenizer = tokenization.FullTokenizer( - vocab_file=FLAGS.vocab_file, do_lower_case=FLAGS.do_lower_case) - - tpu_cluster_resolver = None - if FLAGS.use_tpu and FLAGS.tpu_name: - tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver( - FLAGS.tpu_name, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project) - - is_per_host = tf.contrib.tpu.InputPipelineConfig.PER_HOST_V2 - run_config = tf.contrib.tpu.RunConfig( - cluster=tpu_cluster_resolver, - master=FLAGS.master, - model_dir=FLAGS.output_dir, - save_checkpoints_steps=FLAGS.save_checkpoints_steps, - tpu_config=tf.contrib.tpu.TPUConfig( - iterations_per_loop=FLAGS.iterations_per_loop, - num_shards=FLAGS.num_tpu_cores, - per_host_input_for_training=is_per_host)) - - train_examples = None - num_train_steps = None - num_warmup_steps = None - if FLAGS.do_train: - train_examples = processor.get_train_examples(FLAGS.data_dir) - num_train_steps = int( - len(train_examples) / FLAGS.train_batch_size * FLAGS.num_train_epochs) - num_warmup_steps = int(num_train_steps * FLAGS.warmup_proportion) - - model_fn = model_fn_builder( - bert_config=bert_config, - num_labels=len(label_list), - init_checkpoint=FLAGS.init_checkpoint, - learning_rate=FLAGS.learning_rate, - num_train_steps=num_train_steps, - num_warmup_steps=num_warmup_steps, - use_tpu=FLAGS.use_tpu, - use_one_hot_embeddings=FLAGS.use_tpu) - - run.log('lr', np.float(FLAGS.learning_rate)) - - # If TPU is not available, this will fall back to normal Estimator on CPU - # or GPU. - estimator = tf.contrib.tpu.TPUEstimator( - use_tpu=FLAGS.use_tpu, - model_fn=model_fn, - config=run_config, - train_batch_size=FLAGS.train_batch_size, - eval_batch_size=FLAGS.eval_batch_size, - predict_batch_size=FLAGS.predict_batch_size) - - if FLAGS.do_train: - train_file = os.path.join(FLAGS.output_dir, "train.tf_record") - file_based_convert_examples_to_features( - train_examples, label_list, FLAGS.max_seq_length, tokenizer, train_file) - tf.logging.info("***** Running training *****") - tf.logging.info(" Num examples = %d", len(train_examples)) - tf.logging.info(" Batch size = %d", FLAGS.train_batch_size) - tf.logging.info(" Num steps = %d", num_train_steps) - train_input_fn = file_based_input_fn_builder( - input_file=train_file, - seq_length=FLAGS.max_seq_length, - is_training=True, - drop_remainder=True) - for n in tf.get_default_graph().as_graph_def().node: - tf.logging.info(" Node Name = %s", n.name) - - estimator.train(input_fn=train_input_fn, max_steps=num_train_steps, hooks=[_MetricLogger("train")]) - - - if FLAGS.do_eval: - eval_examples = processor.get_dev_examples(FLAGS.data_dir) - eval_file = os.path.join(FLAGS.output_dir, "eval.tf_record") - file_based_convert_examples_to_features( - eval_examples, label_list, FLAGS.max_seq_length, tokenizer, eval_file) - - tf.logging.info("***** Running evaluation *****") - tf.logging.info(" Num examples = %d", len(eval_examples)) - tf.logging.info(" Batch size = %d", FLAGS.eval_batch_size) - - # This tells the estimator to run through the entire set. - eval_steps = None - # However, if running eval on the TPU, you will need to specify the - # number of steps. - if FLAGS.use_tpu: - # Eval will be slightly WRONG on the TPU because it will truncate - # the last batch. - eval_steps = int(len(eval_examples) / FLAGS.eval_batch_size) - - eval_drop_remainder = True if FLAGS.use_tpu else False - eval_input_fn = file_based_input_fn_builder( - input_file=eval_file, - seq_length=FLAGS.max_seq_length, - is_training=False, - drop_remainder=eval_drop_remainder) - - result = estimator.evaluate(input_fn=eval_input_fn, steps=eval_steps, hooks=[_MetricLogger("eval")]) - - output_eval_file = os.path.join(FLAGS.output_dir, "eval_results.txt") - with tf.gfile.GFile(output_eval_file, "w") as writer: - tf.logging.info("***** Eval results *****") - for key in sorted(result.keys()): - tf.logging.info(" %s = %s", key, str(result[key])) - writer.write("%s = %s\n" % (key, str(result[key]))) - run.log(key, result[key]) - - if FLAGS.do_predict: - predict_examples = processor.get_test_examples(FLAGS.data_dir) - predict_file = os.path.join(FLAGS.output_dir, "predict.tf_record") - file_based_convert_examples_to_features(predict_examples, label_list, - FLAGS.max_seq_length, tokenizer, - predict_file) - - tf.logging.info("***** Running prediction*****") - tf.logging.info(" Num examples = %d", len(predict_examples)) - tf.logging.info(" Batch size = %d", FLAGS.predict_batch_size) - - if FLAGS.use_tpu: - # Warning: According to tpu_estimator.py Prediction on TPU is an - # experimental feature and hence not supported here - raise ValueError("Prediction in TPU not supported") - - predict_drop_remainder = True if FLAGS.use_tpu else False - predict_input_fn = file_based_input_fn_builder( - input_file=predict_file, - seq_length=FLAGS.max_seq_length, - is_training=False, - drop_remainder=predict_drop_remainder) - - result = estimator.predict(input_fn=predict_input_fn) - - output_predict_file = os.path.join(FLAGS.output_dir, "test_results.tsv") - with tf.gfile.GFile(output_predict_file, "w") as writer: - tf.logging.info("***** Predict results *****") - for prediction in result: - output_line = "\t".join( - str(class_probability) for class_probability in prediction) + "\n" - writer.write(output_line) + tf.logging.set_verbosity(tf.logging.INFO) + + processors = { + "cola": ColaProcessor, + "mnli": MnliProcessor, + "mrpc": MrpcProcessor, + "xnli": XnliProcessor, + } + + if not FLAGS.do_train and not FLAGS.do_eval and not FLAGS.do_predict: + raise ValueError( + "At least one of `do_train`, `do_eval` or `do_predict' must be True." + ) + + bert_config = modeling.BertConfig.from_json_file(FLAGS.bert_config_file) + + if FLAGS.max_seq_length > bert_config.max_position_embeddings: + raise ValueError( + "Cannot use sequence length %d because the BERT model " + "was only trained up to sequence length %d" + % (FLAGS.max_seq_length, bert_config.max_position_embeddings) + ) + + tf.gfile.MakeDirs(FLAGS.output_dir) + + task_name = FLAGS.task_name.lower() + + if task_name not in processors: + raise ValueError("Task not found: %s" % (task_name)) + + processor = processors[task_name]() + + label_list = processor.get_labels() + + tokenizer = tokenization.FullTokenizer( + vocab_file=FLAGS.vocab_file, do_lower_case=FLAGS.do_lower_case + ) + + tpu_cluster_resolver = None + if FLAGS.use_tpu and FLAGS.tpu_name: + tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver( + FLAGS.tpu_name, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project + ) + + is_per_host = tf.contrib.tpu.InputPipelineConfig.PER_HOST_V2 + run_config = tf.contrib.tpu.RunConfig( + cluster=tpu_cluster_resolver, + master=FLAGS.master, + model_dir=FLAGS.output_dir, + save_checkpoints_steps=FLAGS.save_checkpoints_steps, + tpu_config=tf.contrib.tpu.TPUConfig( + iterations_per_loop=FLAGS.iterations_per_loop, + num_shards=FLAGS.num_tpu_cores, + per_host_input_for_training=is_per_host, + ), + ) + + train_examples = None + num_train_steps = None + num_warmup_steps = None + if FLAGS.do_train: + train_examples = processor.get_train_examples(FLAGS.data_dir) + num_train_steps = int( + len(train_examples) + / FLAGS.train_batch_size + * FLAGS.num_train_epochs + ) + num_warmup_steps = int(num_train_steps * FLAGS.warmup_proportion) + + model_fn = model_fn_builder( + bert_config=bert_config, + num_labels=len(label_list), + init_checkpoint=FLAGS.init_checkpoint, + learning_rate=FLAGS.learning_rate, + num_train_steps=num_train_steps, + num_warmup_steps=num_warmup_steps, + use_tpu=FLAGS.use_tpu, + use_one_hot_embeddings=FLAGS.use_tpu, + ) + + run.log("lr", np.float(FLAGS.learning_rate)) + + # If TPU is not available, this will fall back to normal Estimator on CPU + # or GPU. + estimator = tf.contrib.tpu.TPUEstimator( + use_tpu=FLAGS.use_tpu, + model_fn=model_fn, + config=run_config, + train_batch_size=FLAGS.train_batch_size, + eval_batch_size=FLAGS.eval_batch_size, + predict_batch_size=FLAGS.predict_batch_size, + ) + + if FLAGS.do_train: + train_file = os.path.join(FLAGS.output_dir, "train.tf_record") + file_based_convert_examples_to_features( + train_examples, + label_list, + FLAGS.max_seq_length, + tokenizer, + train_file, + ) + tf.logging.info("***** Running training *****") + tf.logging.info(" Num examples = %d", len(train_examples)) + tf.logging.info(" Batch size = %d", FLAGS.train_batch_size) + tf.logging.info(" Num steps = %d", num_train_steps) + train_input_fn = file_based_input_fn_builder( + input_file=train_file, + seq_length=FLAGS.max_seq_length, + is_training=True, + drop_remainder=True, + ) + for n in tf.get_default_graph().as_graph_def().node: + tf.logging.info(" Node Name = %s", n.name) + + estimator.train( + input_fn=train_input_fn, + max_steps=num_train_steps, + hooks=[_MetricLogger("train")], + ) + + if FLAGS.do_eval: + eval_examples = processor.get_dev_examples(FLAGS.data_dir) + eval_file = os.path.join(FLAGS.output_dir, "eval.tf_record") + file_based_convert_examples_to_features( + eval_examples, + label_list, + FLAGS.max_seq_length, + tokenizer, + eval_file, + ) + + tf.logging.info("***** Running evaluation *****") + tf.logging.info(" Num examples = %d", len(eval_examples)) + tf.logging.info(" Batch size = %d", FLAGS.eval_batch_size) + + # This tells the estimator to run through the entire set. + eval_steps = None + # However, if running eval on the TPU, you will need to specify the + # number of steps. + if FLAGS.use_tpu: + # Eval will be slightly WRONG on the TPU because it will truncate + # the last batch. + eval_steps = int(len(eval_examples) / FLAGS.eval_batch_size) + + eval_drop_remainder = True if FLAGS.use_tpu else False + eval_input_fn = file_based_input_fn_builder( + input_file=eval_file, + seq_length=FLAGS.max_seq_length, + is_training=False, + drop_remainder=eval_drop_remainder, + ) + + result = estimator.evaluate( + input_fn=eval_input_fn, + steps=eval_steps, + hooks=[_MetricLogger("eval")], + ) + + output_eval_file = os.path.join(FLAGS.output_dir, "eval_results.txt") + with tf.gfile.GFile(output_eval_file, "w") as writer: + tf.logging.info("***** Eval results *****") + for key in sorted(result.keys()): + tf.logging.info(" %s = %s", key, str(result[key])) + writer.write("%s = %s\n" % (key, str(result[key]))) + run.log(key, result[key]) + + if FLAGS.do_predict: + predict_examples = processor.get_test_examples(FLAGS.data_dir) + predict_file = os.path.join(FLAGS.output_dir, "predict.tf_record") + file_based_convert_examples_to_features( + predict_examples, + label_list, + FLAGS.max_seq_length, + tokenizer, + predict_file, + ) + + tf.logging.info("***** Running prediction*****") + tf.logging.info(" Num examples = %d", len(predict_examples)) + tf.logging.info(" Batch size = %d", FLAGS.predict_batch_size) + + if FLAGS.use_tpu: + # Warning: According to tpu_estimator.py Prediction on TPU is an + # experimental feature and hence not supported here + raise ValueError("Prediction in TPU not supported") + + predict_drop_remainder = True if FLAGS.use_tpu else False + predict_input_fn = file_based_input_fn_builder( + input_file=predict_file, + seq_length=FLAGS.max_seq_length, + is_training=False, + drop_remainder=predict_drop_remainder, + ) + + result = estimator.predict(input_fn=predict_input_fn) + + output_predict_file = os.path.join( + FLAGS.output_dir, "test_results.tsv" + ) + with tf.gfile.GFile(output_predict_file, "w") as writer: + tf.logging.info("***** Predict results *****") + for prediction in result: + output_line = ( + "\t".join( + str(class_probability) + for class_probability in prediction + ) + + "\n" + ) + writer.write(output_line) if __name__ == "__main__": - flags.mark_flag_as_required("data_dir") - flags.mark_flag_as_required("task_name") - flags.mark_flag_as_required("vocab_file") - flags.mark_flag_as_required("bert_config_file") - flags.mark_flag_as_required("output_dir") - tf.app.run() + flags.mark_flag_as_required("data_dir") + flags.mark_flag_as_required("task_name") + flags.mark_flag_as_required("vocab_file") + flags.mark_flag_as_required("bert_config_file") + flags.mark_flag_as_required("output_dir") + tf.app.run() diff --git a/pretrain/PyTorch/azureml_adapter.py b/pretrain/PyTorch/azureml_adapter.py index da639b6..ba9774c 100644 --- a/pretrain/PyTorch/azureml_adapter.py +++ b/pretrain/PyTorch/azureml_adapter.py @@ -2,36 +2,46 @@ def set_environment_variables_for_nccl_backend(single_node=False): - os.environ['RANK'] = os.environ['OMPI_COMM_WORLD_RANK'] - os.environ['WORLD_SIZE'] = os.environ['OMPI_COMM_WORLD_SIZE'] + os.environ["RANK"] = os.environ["OMPI_COMM_WORLD_RANK"] + os.environ["WORLD_SIZE"] = os.environ["OMPI_COMM_WORLD_SIZE"] - if not single_node: - master_node_params = os.environ['AZ_BATCH_MASTER_NODE'].split(':') - os.environ['MASTER_ADDR'] = master_node_params[0] - os.environ['MASTER_PORT'] = master_node_params[1] + if not single_node: + master_node_params = os.environ["AZ_BATCH_MASTER_NODE"].split(":") + os.environ["MASTER_ADDR"] = master_node_params[0] + os.environ["MASTER_PORT"] = master_node_params[1] else: - os.environ['MASTER_ADDR'] = os.environ['AZ_BATCHAI_MPI_MASTER_NODE'] - os.environ['MASTER_PORT'] = '54965' - print('NCCL_SOCKET_IFNAME original value = {}'.format(os.environ['NCCL_SOCKET_IFNAME'])) + os.environ["MASTER_ADDR"] = os.environ["AZ_BATCHAI_MPI_MASTER_NODE"] + os.environ["MASTER_PORT"] = "54965" + print( + "NCCL_SOCKET_IFNAME original value = {}".format( + os.environ["NCCL_SOCKET_IFNAME"] + ) + ) # TODO make this parameterizable - os.environ['NCCL_SOCKET_IFNAME'] = '^docker0,lo' + os.environ["NCCL_SOCKET_IFNAME"] = "^docker0,lo" - print('RANK = {}'.format(os.environ['RANK'])) - print('WORLD_SIZE = {}'.format(os.environ['WORLD_SIZE'])) - print('MASTER_ADDR = {}'.format(os.environ['MASTER_ADDR'])) - print('MASTER_PORT = {}'.format(os.environ['MASTER_PORT'])) + print("RANK = {}".format(os.environ["RANK"])) + print("WORLD_SIZE = {}".format(os.environ["WORLD_SIZE"])) + print("MASTER_ADDR = {}".format(os.environ["MASTER_ADDR"])) + print("MASTER_PORT = {}".format(os.environ["MASTER_PORT"])) # print('MASTER_NODE = {}'.format(os.environ['MASTER_NODE'])) - print('NCCL_SOCKET_IFNAME new value = {}'.format(os.environ['NCCL_SOCKET_IFNAME'])) + print( + "NCCL_SOCKET_IFNAME new value = {}".format( + os.environ["NCCL_SOCKET_IFNAME"] + ) + ) + def get_local_rank(): - return int(os.environ['OMPI_COMM_WORLD_LOCAL_RANK']) + return int(os.environ["OMPI_COMM_WORLD_LOCAL_RANK"]) + def get_global_size(): - return int(os.environ['OMPI_COMM_WORLD_SIZE']) + return int(os.environ["OMPI_COMM_WORLD_SIZE"]) + def get_local_size(): - return int(os.environ['OMPI_COMM_WORLD_LOCAL_SIZE']) + return int(os.environ["OMPI_COMM_WORLD_LOCAL_SIZE"]) def get_world_size(): - return int(os.environ['WORLD_SIZE']) - + return int(os.environ['WORLD_SIZE']) \ No newline at end of file diff --git a/pretrain/PyTorch/checkpoint.py b/pretrain/PyTorch/checkpoint.py index 6de1cd6..bc26ce1 100644 --- a/pretrain/PyTorch/checkpoint.py +++ b/pretrain/PyTorch/checkpoint.py @@ -1,18 +1,20 @@ -from logger import Logger import torch import os from operator import itemgetter -from torch import __init__ -def checkpoint_model(PATH, model, optimizer, epoch, last_global_step, **kwargs): +def checkpoint_model( + PATH, model, optimizer, epoch, last_global_step, **kwargs +): """Utility function for checkpointing model + optimizer dictionaries The main purpose for this is to be able to resume training from that instant again """ - checkpoint_state_dict = {'epoch': epoch, - 'last_global_step': last_global_step, - 'model_state_dict': model.network.module.state_dict(), - 'optimizer_state_dict': optimizer.state_dict()} + checkpoint_state_dict = { + "epoch": epoch, + "last_global_step": last_global_step, + "model_state_dict": model.network.module.state_dict(), + "optimizer_state_dict": optimizer.state_dict(), + } # Add extra kwargs too checkpoint_state_dict.update(kwargs) torch.save(checkpoint_state_dict, PATH) @@ -24,13 +26,14 @@ def load_checkpoint(model, optimizer, PATH): The main purpose for this is to be able to resume training from that instant again """ checkpoint_state_dict = torch.load(PATH, map_location=torch.device("cpu")) - #from train import model + # from train import model model.network.module.load_state_dict( - checkpoint_state_dict['model_state_dict']) - #from train import optimizer - optimizer.load_state_dict(checkpoint_state_dict['optimizer_state_dict']) - epoch = checkpoint_state_dict['epoch'] - last_global_step = checkpoint_state_dict['last_global_step'] + checkpoint_state_dict["model_state_dict"] + ) + # from train import optimizer + optimizer.load_state_dict(checkpoint_state_dict["optimizer_state_dict"]) + epoch = checkpoint_state_dict["epoch"] + last_global_step = checkpoint_state_dict["last_global_step"] del checkpoint_state_dict return (epoch + 1, last_global_step) @@ -45,19 +48,21 @@ def latest_checkpoint_file(reference_folder: str, no_cuda) -> str: # Extract sub-folders under the reference folder matching_sub_dirs = [d for d in os.listdir(reference_folder)] - logger = Logger(cuda=torch.cuda.is_available() and not no_cuda) - # For each of these folders, find those that correspond # to the proper architecture, and that contain .tar files candidate_files = [] for sub_dir in matching_sub_dirs: - for dir_path, dir_names, filenames in os.walk(os.path.join(reference_folder, sub_dir)): - if 'saved_models' in dir_path: - relevant_files = [f for f in filenames if f.endswith('.tar')] + for dir_path, dir_names, filenames in os.walk( + os.path.join(reference_folder, sub_dir) + ): + if "saved_models" in dir_path: + relevant_files = [f for f in filenames if f.endswith(".tar")] if relevant_files: - latest_file = max(relevant_files) # assumes that checkpoint number is of format 000x + latest_file = max( + relevant_files + ) # assumes that checkpoint number is of format 000x candidate_files.append((dir_path, latest_file)) - + checkpoint_file = max(candidate_files, key=itemgetter(1)) checkpoint_path = os.path.join(checkpoint_file[0], checkpoint_file[1]) diff --git a/pretrain/PyTorch/configuration.py b/pretrain/PyTorch/configuration.py index d869d47..b8d9c8d 100644 --- a/pretrain/PyTorch/configuration.py +++ b/pretrain/PyTorch/configuration.py @@ -4,16 +4,20 @@ # TODO better json handling class BertJobConfiguration: def __init__(self, config_file_path): - self.config = json.load(open(config_file_path, 'r', encoding='utf-8')) + self.config = json.load(open(config_file_path, "r", encoding="utf-8")) # TODO improve this implementation def replace_path_placeholders(self, files_location): - self.config['data']['datasets'] = {key: value.replace('placeholder/', files_location) - for (key, value) in self.config['data']['datasets'].items()} - self.config['validation']['path'] = self.config['validation']['path'].replace('placeholder/', files_location) + self.config["data"]["datasets"] = { + key: value.replace("placeholder/", files_location) + for (key, value) in self.config["data"]["datasets"].items() + } + self.config["validation"]["path"] = self.config["validation"][ + "path" + ].replace("placeholder/", files_location) def get_name(self): - return self.config['name'] + return self.config["name"] def get_token_file_type(self): return self.config["bert_token_file"] @@ -34,13 +38,13 @@ def get_total_epoch_count(self): return self.config["training"]["num_epochs"] def get_num_workers(self): - return self.config['training']['num_workers'] + return self.config["training"]["num_workers"] def get_validation_folder_path(self): - return self.config['validation']['path'] + return self.config["validation"]["path"] def get_wiki_pretrain_dataset_path(self): - return self.config["data"]["datasets"]['wiki_pretrain_dataset'] + return self.config["data"]["datasets"]["wiki_pretrain_dataset"] def get_decay_rate(self): return self.config["training"]["decay_rate"] diff --git a/pretrain/PyTorch/dataset.py b/pretrain/PyTorch/dataset.py index 71c8ece..389311e 100644 --- a/pretrain/PyTorch/dataset.py +++ b/pretrain/PyTorch/dataset.py @@ -1,8 +1,7 @@ import torch import os -from torch.utils.data import DataLoader, Dataset +from torch.utils.data import Dataset from enum import IntEnum -from random import choice import random import collections @@ -20,17 +19,27 @@ class PretrainDataType(IntEnum): WIKIPEDIA = 1 VALIDATION = 2 + MaskedLMInstance = collections.namedtuple( - "MaskedLMInstance", ["index", "label"]) + "MaskedLMInstance", ["index", "label"] +) PretrainBatch = collections.namedtuple( - 'PreTrainBatch', ['input_ids', 'input_mask', 'sequence_ids', - 'is_next_label', 'masked_lm_output'] + "PreTrainBatch", + [ + "input_ids", + "input_mask", + "sequence_ids", + "is_next_label", + "masked_lm_output", + ], ) + def get_random_partition(data_directory, index): - partitions = [os.path.join(data_directory, x) - for x in os.listdir(data_directory)] + partitions = [ + os.path.join(data_directory, x) for x in os.listdir(data_directory) + ] partitions = sorted(partitions) i = index % len(partitions) return partitions[i] @@ -60,15 +69,19 @@ def encode_sequence(seqA, seqB, max_seq_len, tokenizer): input_tokens = seqA + seqB input_ids = tokenizer.convert_tokens_to_ids(input_tokens) - sequence_ids = [0]*len(seqA) + [1]*len(seqB) - input_mask = [1]*len(input_ids) + sequence_ids = [0] * len(seqA) + [1] * len(seqB) + input_mask = [1] * len(input_ids) while len(input_ids) < max_seq_len: input_ids.append(PAD) sequence_ids.append(PAD) input_mask.append(PAD) - return (map_to_torch(input_ids), map_to_torch(input_mask), map_to_torch(sequence_ids)) + return ( + map_to_torch(input_ids), + map_to_torch(input_mask), + map_to_torch(sequence_ids), + ) def truncate_input_sequence(tokens_a, tokens_b, max_num_tokens): @@ -87,8 +100,19 @@ def truncate_input_sequence(tokens_a, tokens_b, max_num_tokens): else: trunc_tokens.pop() + class PreTrainingDataset(Dataset): - def __init__(self, tokenizer: BertTokenizer, folder: str, logger, max_seq_length, index, data_type: PretrainDataType = PretrainDataType.WIKIPEDIA, max_predictions_per_seq=20, masked_lm_prob=0.15): + def __init__( + self, + tokenizer: BertTokenizer, + folder: str, + logger, + max_seq_length, + index, + data_type: PretrainDataType = PretrainDataType.WIKIPEDIA, + max_predictions_per_seq=20, + masked_lm_prob=0.15, + ): self.tokenizer = tokenizer self.dir_path = folder self.max_seq_length = max_seq_length @@ -100,13 +124,15 @@ def __init__(self, tokenizer: BertTokenizer, folder: str, logger, max_seq_length path = get_random_partition(self.dir_path, index) logger.info(f"Loading Pretraining Data from {path}") + if data_type == PretrainDataType.WIKIPEDIA: self.data = GenericPretrainingDataCreator.load(path) elif data_type == PretrainDataType.VALIDATION: self.data = WikiPretrainingDataCreator.load(path) self.len = len(self.data) logger.info( - f"Data Loading Completed for Pretraining Data from {path} with {self.len} samples.") + f"Data Loading Completed for Pretraining Data from {path} with {self.len} samples." + ) def __len__(self): return self.len @@ -151,7 +177,14 @@ def create_training_instance(self, instance: TokenInstance): segment_ids.append(PAD) input_mask.append(PAD) masked_lm_output.append(-1) - return([map_to_torch([BatchType.PRETRAIN_BATCH]), map_to_torch(input_ids), map_to_torch(input_mask), map_to_torch(segment_ids), map_to_torch([is_next]), map_to_torch(masked_lm_output)]) + return [ + map_to_torch([BatchType.PRETRAIN_BATCH]), + map_to_torch(input_ids), + map_to_torch(input_mask), + map_to_torch(segment_ids), + map_to_torch([is_next]), + map_to_torch(masked_lm_output), + ] def create_masked_lm_predictions(self, tokens): cand_indexes = [] @@ -163,8 +196,10 @@ def create_masked_lm_predictions(self, tokens): random.shuffle(cand_indexes) output_tokens = list(tokens) - num_to_predict = min(self.max_predictions_per_seq, max( - 1, int(round(len(tokens) * self.masked_lm_prob)))) + num_to_predict = min( + self.max_predictions_per_seq, + max(1, int(round(len(tokens) * self.masked_lm_prob))), + ) masked_lms = [] covered_indexes = set() @@ -185,12 +220,14 @@ def create_masked_lm_predictions(self, tokens): masked_token = tokens[index] # 10% replace w/ random word else: - masked_token = self.vocab_words[random.randint( - 0, len(self.vocab_words) - 1)] + masked_token = self.vocab_words[ + random.randint(0, len(self.vocab_words) - 1) + ] output_tokens[index] = masked_token - masked_lms.append(MaskedLMInstance( - index=index, label=tokens[index])) + masked_lms.append( + MaskedLMInstance(index=index, label=tokens[index]) + ) masked_lms = sorted(masked_lms, key=lambda x: x.index) masked_lm_output = [-1] * len(output_tokens) diff --git a/pretrain/PyTorch/distributed_apex.py b/pretrain/PyTorch/distributed_apex.py index d68c512..6fc024e 100644 --- a/pretrain/PyTorch/distributed_apex.py +++ b/pretrain/PyTorch/distributed_apex.py @@ -9,7 +9,9 @@ try: _ = warned_flatten except NameError: - print("Warning: apex was installed without --cpp_ext. Falling back to Python flatten and unflatten.") + print( + "Warning: apex was installed without --cpp_ext. Falling back to Python flatten and unflatten." + ) warned_flatten = True from torch._utils import _flatten_dense_tensors as flatten from torch._utils import _unflatten_dense_tensors as unflatten @@ -26,7 +28,7 @@ def apply_flat_dist_call(bucket, call, extra_args=None): coalesced = flatten(bucket) - #print("Rank", dist.get_rank(), "Broadcasting ", coalesced.device, " Size", coalesced.size()) + # print("Rank", dist.get_rank(), "Broadcasting ", coalesced.device, " Size", coalesced.size()) if extra_args is not None: call(coalesced, *extra_args) else: @@ -40,8 +42,11 @@ def apply_flat_dist_call(bucket, call, extra_args=None): def split_half_float_double(tensors): - dtypes = ["torch.cuda.HalfTensor", - "torch.cuda.FloatTensor", "torch.cuda.DoubleTensor"] + dtypes = [ + "torch.cuda.HalfTensor", + "torch.cuda.FloatTensor", + "torch.cuda.DoubleTensor", + ] buckets = [] for i, dtype in enumerate(dtypes): bucket = [t for t in tensors if t.type() == dtype] @@ -59,6 +64,7 @@ def split_by_type(tensors): buckets[tp].append(tensor) return buckets + # flat_dist_call organizes 'tensors' by type. @@ -88,13 +94,13 @@ class Reducer(object): Unlike :class:`DistributedDataParallel`, :class:`Reducer` will not automatically allreduce parameters during ``backward()``. Instead, :class:`Reducer` waits for the user to call `.reduce()` manually. - This enables, for example, delaying the allreduce to be carried out every + This enables, for example, delaying the allreduce to be carried out every several iterations instead of every single iteration. - Like :class:`DistributedDataParallel`, :class:`Reducer` averages any tensors it allreduces + Like :class:`DistributedDataParallel`, :class:`Reducer` averages any tensors it allreduces over the number of participating processes. - :class:`Reducer` is designed to work with the upstream launch utility script + :class:`Reducer` is designed to work with the upstream launch utility script ``torch.distributed.launch`` with ``--nproc_per_node <= number of gpus per node``. When used with this launcher, :class:`Reducer` assumes 1:1 mapping of processes to GPUs. It also assumes that your script calls ``torch.cuda.set_device(args.rank)`` before creating the model. @@ -109,7 +115,10 @@ def __init__(self, module_or_grads_list): if isinstance(module_or_grads_list, Module): self.module = module_or_grads_list flat_dist_call( - [param.data for param in self.module.parameters()], dist.broadcast, (0,)) + [param.data for param in self.module.parameters()], + dist.broadcast, + (0,), + ) else: self.module = None @@ -118,8 +127,11 @@ def __init__(self, module_or_grads_list): def reduce(self): if self.module: - grads = [param.grad.data for param in self.module.parameters() - if param.grad is not None] + grads = [ + param.grad.data + for param in self.module.parameters() + if param.grad is not None + ] flat_dist_call(grads, dist.all_reduce) else: flat_dist_call(self.grads, dist.all_reduce) @@ -131,11 +143,11 @@ class DistributedDataParallel(Module): easy multiprocess distributed data parallel training, similar to ``torch.nn.parallel.DistributedDataParallel``. Parameters are broadcast across participating processes on initialization, and gradients are allreduced and averaged over processes during ``backward()``. - :class:`DistributedDataParallel` is optimized for use with NCCL. It achieves high performance by + :class:`DistributedDataParallel` is optimized for use with NCCL. It achieves high performance by overlapping communication with computation during ``backward()`` and bucketing smaller gradient transfers to reduce the total number of transfers required. - :class:`DistributedDataParallel` is designed to work with the upstream launch utility script + :class:`DistributedDataParallel` is designed to work with the upstream launch utility script ``torch.distributed.launch`` with ``--nproc_per_node <= number of gpus per node``. When used with this launcher, :class:`DistributedDataParallel` assumes 1:1 mapping of processes to GPUs. It also assumes that your script calls ``torch.cuda.set_device(args.rank)`` before creating the model. @@ -158,17 +170,19 @@ class DistributedDataParallel(Module): """ - def __init__(self, - module, - message_size=10000000, - delay_allreduce=False, - shared_param=None, - allreduce_trigger_params=None, - retain_allreduce_buffers=False, - allreduce_always_fp32=False, - gradient_average=True, - gradient_predivide_factor=1.0, - gradient_average_split_factor=None): + def __init__( + self, + module, + message_size=10000000, + delay_allreduce=False, + shared_param=None, + allreduce_trigger_params=None, + retain_allreduce_buffers=False, + allreduce_always_fp32=False, + gradient_average=True, + gradient_predivide_factor=1.0, + gradient_average_split_factor=None, + ): super(DistributedDataParallel, self).__init__() # Backward/forward compatibility around @@ -184,13 +198,19 @@ def __init__(self, self._backend = dist._backend self.backend_enum_holder = dist.dist_backend - self.warn_on_half = True if self._backend == self.backend_enum_holder.GLOO else False + self.warn_on_half = ( + True if self._backend == self.backend_enum_holder.GLOO else False + ) if shared_param is not None: - raise ValueError("shared_param is no longer supported as an option. It was misleadingly named from the start. It turns out overlapping communication with computation should work fine with shared parameters. If you still wish to delay communication to the end of the backward pass, use delay_allreduce=True|False instead.") + raise ValueError( + "shared_param is no longer supported as an option. It was misleadingly named from the start. It turns out overlapping communication with computation should work fine with shared parameters. If you still wish to delay communication to the end of the backward pass, use delay_allreduce=True|False instead." + ) if gradient_average_split_factor is not None: - print("Warning: gradient_average_split_factor has been renamed to gradient_predivide_factor. For now, gradient_average_split_factor will also work, but please update to gradient_predivide_factor instead.") + print( + "Warning: gradient_average_split_factor has been renamed to gradient_predivide_factor. For now, gradient_average_split_factor will also work, but please update to gradient_predivide_factor instead." + ) self.gradient_predivide_factor = gradient_average_split_factor self.world_size = float(dist.get_world_size()) @@ -204,29 +224,36 @@ def __init__(self, if allreduce_trigger_params is not None: if delay_allreduce: raise ValueError( - "Setting allreduce_trigger_params is only valid if delay_allreduce=False.") + "Setting allreduce_trigger_params is only valid if delay_allreduce=False." + ) self.custom_allreduce_triggers = True self.allreduce_trigger_params = set( - [id(param) for param in allreduce_trigger_params]) + [id(param) for param in allreduce_trigger_params] + ) self.delay_allreduce = delay_allreduce self.message_size = message_size self.reduction_stream = torch.cuda.Stream() self.reduction_event = torch.cuda.Event( - enable_timing=False, blocking=False) + enable_timing=False, blocking=False + ) self.module = module if self._backend == self.backend_enum_holder.NCCL: for param in self.module.parameters(): - assert param.is_cuda, "NCCL backend only supports model parameters to be on GPU." + assert ( + param.is_cuda + ), "NCCL backend only supports model parameters to be on GPU." self.active_params = [] - self.param_type_to_tmp_i = {"torch.cuda.HalfTensor": 0, - "torch.cuda.FloatTensor": 1, - "torch.cuda.DoubleTensor": 2} + self.param_type_to_tmp_i = { + "torch.cuda.HalfTensor": 0, + "torch.cuda.FloatTensor": 1, + "torch.cuda.DoubleTensor": 2, + } # to make sure reduction only happens after gradient accumulation self.need_reduction = False @@ -234,7 +261,10 @@ def __init__(self, self.create_hooks() flat_dist_call( - [param.data for param in self.module.parameters()], dist.broadcast, (0,)) + [param.data for param in self.module.parameters()], + dist.broadcast, + (0,), + ) def enable_need_reduction(self): self.need_reduction = True @@ -246,13 +276,14 @@ def __setstate__(self, state): super(DistributedDataParallel, self).__setstate__(state) self.reduction_stream = torch.cuda.Stream() self.reduction_event = torch.cuda.Event( - enable_timing=False, blocking=False) + enable_timing=False, blocking=False + ) def __getstate__(self): attrs = copy.copy(self.__dict__) if self._backend != self.backend_enum_holder.NCCL: - del attrs['self.reduction_stream'] - del attrs['self.reduction_event'] + del attrs["self.reduction_stream"] + del attrs["self.reduction_event"] return attrs # Broadcast rank 0's bucket structure across all processes, and have all processes @@ -266,31 +297,39 @@ def sync_bucket_structure(self): self.num_buckets = len(self.active_i_buckets) self.bucket_sizes = [len(bucket) for bucket in self.active_i_buckets] - info_tensor = torch.cuda.IntTensor([self.num_buckets] + - self.bucket_sizes + - list(chain(*self.active_i_buckets))) - #print("Sync Bucket Structure Broadcast. Rank", dist.get_rank(), "Tensor Size ", info_tensor.size(), "Device ", info_tensor.device, "Current Device ", torch.cuda.current_device()) + info_tensor = torch.cuda.IntTensor( + [self.num_buckets] + + self.bucket_sizes + + list(chain(*self.active_i_buckets)) + ) + # print("Sync Bucket Structure Broadcast. Rank", dist.get_rank(), "Tensor Size ", info_tensor.size(), "Device ", info_tensor.device, "Current Device ", torch.cuda.current_device()) dist.broadcast(info_tensor, 0) info = [int(entry) for entry in info_tensor] self.num_buckets = info[0] - self.bucket_sizes = info[1:self.num_buckets + 1] - self.buckets = [[None for _ in range(self.bucket_sizes[i])] - for i in range(self.num_buckets)] + self.bucket_sizes = info[1 : self.num_buckets + 1] + self.buckets = [ + [None for _ in range(self.bucket_sizes[i])] + for i in range(self.num_buckets) + ] # Technically, active_i_buckets' work is done. But the information is still useful to # keep around. Therefore, refresh active_i_buckets based on rank 0 as well. - self.active_i_buckets = [[None for _ in range(self.bucket_sizes[i])] - for i in range(self.num_buckets)] + self.active_i_buckets = [ + [None for _ in range(self.bucket_sizes[i])] + for i in range(self.num_buckets) + ] - flattened_buckets = info[self.num_buckets + 1:] + flattened_buckets = info[self.num_buckets + 1 :] flat_i = 0 for bucket_idx in range(self.num_buckets): for bucket_loc in range(self.bucket_sizes[bucket_idx]): param_i = flattened_buckets[flat_i] self.active_i_buckets[bucket_idx][bucket_loc] = param_i self.param_id_to_bucket[id(self.active_params[param_i])] = ( - bucket_idx, bucket_loc) + bucket_idx, + bucket_loc, + ) flat_i += 1 def create_hooks(self): @@ -314,28 +353,35 @@ def overlapping_backward_epilogue(): # Sanity checks that all the buckets were kicked off if self.next_bucket != self.num_buckets: - raise RuntimeError("In epilogue, next_bucket ({}) != num_buckets ({}). ".format( - self.next_bucket, self.num_buckets), - "This probably indicates some buckets were not allreduced.") - - for actual, expected in zip(self.buckets_ready_size, self.bucket_sizes): + raise RuntimeError( + "In epilogue, next_bucket ({}) != num_buckets ({}). ".format( + self.next_bucket, self.num_buckets + ), + "This probably indicates some buckets were not allreduced.", + ) + + for actual, expected in zip( + self.buckets_ready_size, self.bucket_sizes + ): if actual != expected: raise RuntimeError( - "Some param buckets were not allreduced.") + "Some param buckets were not allreduced." + ) self.grad_accs = [] for param in self.module.parameters(): if param.requires_grad: + def wrapper(param): param_tmp = param.expand_as(param) grad_acc = param_tmp.grad_fn.next_functions[0][0] def allreduce_hook(*unused): # user must explicitly specify when to do all reduce - if self.need_reduction == False: - #print("Does not need Reduction") + if self.need_reduction is False: + # print("Does not need Reduction") return - #print("Needs Reduction") + # print("Needs Reduction") if self.delay_allreduce or self.needs_refresh: # TODO: How do we want to handle multiple backward passes between # each forward, e.g., backward passes with retain_graph=True? @@ -345,36 +391,48 @@ def allreduce_hook(*unused): active_i = self.param_id_to_active_i[id(param)] # Float, half, and double tensors are grouped into buckets separately. - current_type = self.param_type_to_tmp_i[param.type( - )] + current_type = self.param_type_to_tmp_i[ + param.type() + ] self.tmp_buckets[current_type].append(active_i) ship_tmp_bucket = False if self.custom_allreduce_triggers: - if id(param) in self.allreduce_trigger_params: + if ( + id(param) + in self.allreduce_trigger_params + ): ship_tmp_bucket = True else: - self.tmp_numels[current_type] += param.numel() - if self.tmp_numels[current_type] >= self.message_size: + self.tmp_numels[ + current_type + ] += param.numel() + if ( + self.tmp_numels[current_type] + >= self.message_size + ): ship_tmp_bucket = True # To consider: If custom_allreduce_triggers are in use, ship all # tmp_buckets, not just tmp_buckets[current_type]. if ship_tmp_bucket: self.active_i_buckets.append( - self.tmp_buckets[current_type]) + self.tmp_buckets[current_type] + ) self.tmp_buckets[current_type] = [] self.tmp_numels[current_type] = 0 if not self.callback_queued: Variable._execution_engine.queue_callback( - allreduce_params) + allreduce_params + ) self.callback_queued = True else: if not self.callback_queued: Variable._execution_engine.queue_callback( - overlapping_backward_epilogue) + overlapping_backward_epilogue + ) self.callback_queued = True self.comm_ready_buckets(param) @@ -393,13 +451,14 @@ def allreduce_bucket(self, bucket): tensor_to_allreduce = tensor.float() if self.gradient_predivide_factor != 1.0: - tensor_to_allreduce.mul_(1./self.gradient_predivide_factor) + tensor_to_allreduce.mul_(1.0 / self.gradient_predivide_factor) dist.all_reduce(tensor_to_allreduce) if self.gradient_average: tensor_to_allreduce.mul_( - self.gradient_predivide_factor/self.world_size) + self.gradient_predivide_factor / self.world_size + ) if self.allreduce_always_fp32 and tensor is not tensor_to_allreduce: tensor.copy_(tensor_to_allreduce) @@ -410,16 +469,21 @@ def allreduce_maybe_retain(self, bucket, bucket_idx=-1): allreduced = self.allreduce_bucket(bucket) if self.retain_allreduce_buffers: if self.allreduce_buffers[bucket_idx] is not None: - raise RuntimeError("The backward pass is attempting to replace an already-filled " - "allreduce buffer. This is almost certainly an error.") + raise RuntimeError( + "The backward pass is attempting to replace an already-filled " + "allreduce buffer. This is almost certainly an error." + ) self.allreduce_buffers[bucket_idx] = allreduced else: for buf, synced in zip(bucket, unflatten(allreduced, bucket)): buf.copy_(synced) def allreduce_fallback(self): - grads = [param.grad.data for param in self.module.parameters() - if param.grad is not None] + grads = [ + param.grad.data + for param in self.module.parameters() + if param.grad is not None + ] split_buckets = split_half_float_double(grads) @@ -429,30 +493,36 @@ def allreduce_fallback(self): if self.retain_allreduce_buffers: self.allreduce_buffers = [None for _ in range(len(split_buckets))] - for i, bucket in enumerate(split_buckets): - allreduced = self.allreduce_maybe_retain(bucket, i) + # for i, bucket in enumerate(split_buckets): + # allreduced = self.allreduce_maybe_retain(bucket, i) def comm_ready_buckets(self, param): # Need to do this in every hook for compatibility with Ruberry's streaming backward PR. # self.reduction_stream.wait_stream(torch.cuda.current_stream()) - #if dist.get_rank() == 0: + # if dist.get_rank() == 0: # print("Parameter Name", param.name) bucket_idx, bucket_loc = self.param_id_to_bucket[id(param)] if self.buckets[bucket_idx][bucket_loc] is not None: - raise RuntimeError("The backward pass is attempting to replace an already-filled " - "bucket slot. This is almost certainly an error.") + raise RuntimeError( + "The backward pass is attempting to replace an already-filled " + "bucket slot. This is almost certainly an error." + ) self.buckets[bucket_idx][bucket_loc] = param.grad.data self.buckets_ready_size[bucket_idx] += 1 - if self.buckets_ready_size[bucket_idx] == self.bucket_sizes[bucket_idx]: + if ( + self.buckets_ready_size[bucket_idx] + == self.bucket_sizes[bucket_idx] + ): if bucket_idx == self.next_bucket: torch.cuda.current_stream().record_event(self.reduction_event) self.reduction_stream.wait_event(self.reduction_event) with torch.cuda.stream(self.reduction_stream): self.allreduce_maybe_retain( - self.buckets[bucket_idx], bucket_idx) + self.buckets[bucket_idx], bucket_idx + ) self.next_bucket += 1 @@ -470,11 +540,11 @@ def comm_ready_buckets(self, param): self.next_bucket += 1 else: raise ValueError( - "i should always be >= next_bucket") + "i should always be >= next_bucket" + ) else: self.ready_buckets_not_reduced.add(bucket_idx) - def needs_refresh(self): self.needs_refresh = True @@ -483,17 +553,29 @@ def forward(self, *inputs, **kwargs): if not self.delay_allreduce: param_list = [ - param for param in self.module.parameters() if param.requires_grad] + param + for param in self.module.parameters() + if param.requires_grad + ] # Conditions under which to refresh self.record # Forward has the authority to set needs_refresh to True, but only allreduce_params # in backward has the authority to set needs_refresh to False. # Parentheses are not necessary for correct order of operations, but make the intent clearer. - if ((not self.active_params) or - (len(param_list) != len(self.active_params)) or - any([param1 is not param2 for param1, param2 in zip(param_list, self.active_params)])): + if ( + (not self.active_params) + or (len(param_list) != len(self.active_params)) + or any( + [ + param1 is not param2 + for param1, param2 in zip( + param_list, self.active_params + ) + ] + ) + ): self.needs_refresh = True - #self.needs_refresh = True + # self.needs_refresh = True if self.needs_refresh: self.active_i_buckets = [] self.buckets = [] @@ -502,15 +584,19 @@ def forward(self, *inputs, **kwargs): self.tmp_numels = [0, 0, 0] self.bucket_sizes = [] self.param_id_to_active_i = { - id(param): i for i, param in enumerate(param_list)} + id(param): i for i, param in enumerate(param_list) + } self.param_id_to_bucket = {} else: - self.buckets = [[None for _ in range(self.bucket_sizes[i])] - for i in range(self.num_buckets)] + self.buckets = [ + [None for _ in range(self.bucket_sizes[i])] + for i in range(self.num_buckets) + ] self.buckets_ready_size = [0 for i in range(self.num_buckets)] - if(self.retain_allreduce_buffers): + if self.retain_allreduce_buffers: self.allreduce_buffers = [ - None for _ in range(self.num_buckets)] + None for _ in range(self.num_buckets) + ] self.next_bucket = 0 self.ready_buckets_not_reduced = set() diff --git a/pretrain/PyTorch/logger.py b/pretrain/PyTorch/logger.py index db7d571..d21785d 100644 --- a/pretrain/PyTorch/logger.py +++ b/pretrain/PyTorch/logger.py @@ -2,19 +2,21 @@ import os -logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', - datefmt='%m/%d/%Y %H:%M:%S', - level=logging.INFO) +logging.basicConfig( + format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", + datefmt="%m/%d/%Y %H:%M:%S", + level=logging.INFO, +) logger = logging.getLogger(__name__) -class Logger(): +class Logger: def __init__(self, cuda=False): self.logger = logging.getLogger(__name__) self.cuda = cuda def info(self, message, *args, **kwargs): - local_rank = int(os.environ['OMPI_COMM_WORLD_LOCAL_RANK']) + local_rank = int(os.environ["OMPI_COMM_WORLD_LOCAL_RANK"]) if (self.cuda and local_rank == 0) or not self.cuda: self.logger.info(message, *args, **kwargs) diff --git a/pretrain/PyTorch/models.py b/pretrain/PyTorch/models.py index 70be32b..c172dc5 100644 --- a/pretrain/PyTorch/models.py +++ b/pretrain/PyTorch/models.py @@ -1,21 +1,15 @@ -import argparse -import logging -import random -import numpy as np -import os import torch -import json import torch.nn as nn -import torch.nn.functional as F -import torch.distributed as dist -from torch.nn import CrossEntropyLoss, MSELoss +from torch.nn import CrossEntropyLoss from logger import Logger from dataset import BatchType -from pytorch_pretrained_bert.tokenization import BertTokenizer from pytorch_pretrained_bert.modeling import BertModel, BertConfig -from pytorch_pretrained_bert.modeling import BertPreTrainingHeads, BertPreTrainedModel, BertPreTrainingHeads, BertLMPredictionHead -from pytorch_pretrained_bert.file_utils import PYTORCH_PRETRAINED_BERT_CACHE +from pytorch_pretrained_bert.modeling import ( + # BertPreTrainingHeads, + BertPreTrainedModel, + BertPreTrainingHeads, +) class BertPretrainingLoss(BertPreTrainedModel): @@ -23,21 +17,38 @@ def __init__(self, bert_encoder, config): super(BertPretrainingLoss, self).__init__(config) self.bert = bert_encoder self.cls = BertPreTrainingHeads( - config, self.bert.embeddings.word_embeddings.weight) + config, self.bert.embeddings.word_embeddings.weight + ) self.cls.apply(self.init_bert_weights) - def forward(self, input_ids, token_type_ids=None, attention_mask=None, masked_lm_labels=None, next_sentence_label=None): - sequence_output, pooled_output = self.bert(input_ids, token_type_ids, attention_mask, - output_all_encoded_layers=False) + def forward( + self, + input_ids, + token_type_ids=None, + attention_mask=None, + masked_lm_labels=None, + next_sentence_label=None, + ): + sequence_output, pooled_output = self.bert( + input_ids, + token_type_ids, + attention_mask, + output_all_encoded_layers=False, + ) prediction_scores, seq_relationship_score = self.cls( - sequence_output, pooled_output) + sequence_output, pooled_output + ) if masked_lm_labels is not None and next_sentence_label is not None: loss_fct = CrossEntropyLoss(ignore_index=-1) next_sentence_loss = loss_fct( - seq_relationship_score.view(-1, 2), next_sentence_label.view(-1)) + seq_relationship_score.view(-1, 2), + next_sentence_label.view(-1), + ) masked_lm_loss = loss_fct( - prediction_scores.view(-1, self.config.vocab_size), masked_lm_labels.view(-1)) + prediction_scores.view(-1, self.config.vocab_size), + masked_lm_labels.view(-1), + ) total_loss = masked_lm_loss + next_sentence_loss return total_loss else: @@ -45,8 +56,9 @@ def forward(self, input_ids, token_type_ids=None, attention_mask=None, masked_lm class MTLRouting(nn.Module): - """This setup is to add MultiTask Training support in BERT Training. + """This setup is to add MultiTask Training support in BERT Training. """ + def __init__(self, encoder: BertModel, write_log, summary_writer): super(MTLRouting, self).__init__() self.bert_encoder = encoder @@ -58,27 +70,32 @@ def __init__(self, encoder: BertModel, write_log, summary_writer): self.logger = Logger(cuda=torch.cuda.is_available()) self.summary_writer = summary_writer - def register_batch(self, batch_type, module_name, loss_calculation: nn.Module): + def register_batch( + self, batch_type, module_name, loss_calculation: nn.Module + ): assert isinstance(loss_calculation, nn.Module) self._batch_loss_calculation[str(batch_type.value)] = loss_calculation self._batch_counter[batch_type] = 0 self._batch_module_name[batch_type] = module_name - def log_summary_writer(self, batch_type, logs: dict, base='Train'): + def log_summary_writer(self, batch_type, logs: dict, base="Train"): if self.write_log: counter = self._batch_counter[batch_type] module_name = self._batch_module_name.get( - batch_type, self._get_batch_type_error(batch_type)) + batch_type, self._get_batch_type_error(batch_type) + ) for key, log in logs.items(): self.summary_writer.add_scalar( - f'{base}/{module_name}/{key}', log, counter) + f"{base}/{module_name}/{key}", log, counter + ) self._batch_counter[batch_type] = counter + 1 def _get_batch_type_error(self, batch_type): def f(*args, **kwargs): - message = f'Misunderstood batch type of {batch_type}' + message = f"Misunderstood batch type of {batch_type}" self.logger.error(message) raise ValueError(message) + return f def forward(self, batch, log=True): @@ -88,19 +105,31 @@ def forward(self, batch, log=True): if batch_type == BatchType.PRETRAIN_BATCH: loss_function = self._batch_loss_calculation[str(batch_type)] - loss = loss_function(input_ids=batch[1], - token_type_ids=batch[3], - attention_mask=batch[2], - masked_lm_labels=batch[5], - next_sentence_label=batch[4]) + loss = loss_function( + input_ids=batch[1], + token_type_ids=batch[3], + attention_mask=batch[2], + masked_lm_labels=batch[5], + next_sentence_label=batch[4], + ) if log: self.log_summary_writer( - batch_type, logs={'pretrain_loss': loss.item()}) + batch_type, logs={"pretrain_loss": loss.item()} + ) return loss class BertMultiTask: - def __init__(self, job_config, use_pretrain, tokenizer, cache_dir, device, write_log, summary_writer): + def __init__( + self, + job_config, + use_pretrain, + tokenizer, + cache_dir, + device, + write_log, + summary_writer, + ): self.job_config = job_config if not use_pretrain: @@ -111,26 +140,42 @@ def __init__(self, job_config, use_pretrain, tokenizer, cache_dir, device, write self.bert_encoder = BertModel(bert_config) # Use pretrained bert weights else: - self.bert_encoder = BertModel.from_pretrained(self.job_config.get_model_file_type(), cache_dir=cache_dir) + self.bert_encoder = BertModel.from_pretrained( + self.job_config.get_model_file_type(), cache_dir=cache_dir + ) bert_config = self.bert_encoder.config - self.network=MTLRouting(self.bert_encoder, write_log = write_log, summary_writer = summary_writer) + self.network = MTLRouting( + self.bert_encoder, + write_log=write_log, + summary_writer=summary_writer, + ) - #config_data=self.config['data'] + # config_data=self.config['data'] # Pretrain Dataset - self.network.register_batch(BatchType.PRETRAIN_BATCH, "pretrain_dataset", loss_calculation=BertPretrainingLoss(self.bert_encoder, bert_config)) - - self.device=device + self.network.register_batch( + BatchType.PRETRAIN_BATCH, + "pretrain_dataset", + loss_calculation=BertPretrainingLoss( + self.bert_encoder, bert_config + ), + ) + + self.device = device # self.network = self.network.float() # print(f"Bert ID: {id(self.bert_encoder)} from GPU: {dist.get_rank()}") def save(self, filename: str): - network=self.network.module + network = self.network.module return torch.save(network.state_dict(), filename) def load(self, model_state_dict: str): - return self.network.module.load_state_dict(torch.load(model_state_dict, map_location=lambda storage, loc: storage)) + return self.network.module.load_state_dict( + torch.load( + model_state_dict, map_location=lambda storage, loc: storage + ) + ) def move_batch(self, batch, non_blocking=False): return batch.to(self.device, non_blocking) diff --git a/pretrain/PyTorch/optimization.py b/pretrain/PyTorch/optimization.py index 6b25b7c..2889cf3 100644 --- a/pretrain/PyTorch/optimization.py +++ b/pretrain/PyTorch/optimization.py @@ -1,36 +1,55 @@ import math + def warmup_linear(x, warmup=0.002): if warmup == 0.0: return 1.0 elif x < warmup: - return x/warmup + return x / warmup return 1.0 - x -def warmup_linear_decay_exp(global_step, decay_rate, decay_steps, total_steps, warmup=0.002): - x = global_step/total_steps +def warmup_linear_decay_exp( + global_step, decay_rate, decay_steps, total_steps, warmup=0.002 +): + x = global_step / total_steps warmup_end = warmup * total_steps if warmup == 0.0: return 1.0 elif x < warmup: - return x/warmup - return decay_rate**((global_step-warmup_end)/decay_steps) + return x / warmup + return decay_rate ** ((global_step - warmup_end) / decay_steps) + -class LinearWarmupExponentialSchedule(): - def __init__(self, warmup=0.002, t_total=-1, initial_lr = 2e-5, final_lr=5e-6, decay_rate=0.99): +class LinearWarmupExponentialSchedule: + def __init__( + self, + warmup=0.002, + t_total=-1, + initial_lr=2e-5, + final_lr=5e-6, + decay_rate=0.99, + ): self.warmup = warmup self.total_steps = t_total self.decay_rate = decay_rate self.warmup_end = self.warmup * t_total # Calculate the decay Steps - self.decay_steps = int(math.ceil((math.log(self.decay_rate)/ math.log(final_lr/initial_lr)) * (1.0 - warmup) * t_total)) + self.decay_steps = int( + math.ceil( + (math.log(self.decay_rate) / math.log(final_lr / initial_lr)) + * (1.0 - warmup) + * t_total + ) + ) def get_lr(self, global_step): - x = global_step/self.total_steps + x = global_step / self.total_steps if self.warmup == 0.0: return 1.0 elif x < self.warmup: - return x/self.warmup - return self.decay_rate**((global_step-self.warmup_end)/self.decay_steps) + return x / self.warmup + return self.decay_rate ** ( + (global_step - self.warmup_end) / self.decay_steps + ) diff --git a/pretrain/PyTorch/sources.py b/pretrain/PyTorch/sources.py index b138fd0..564d752 100644 --- a/pretrain/PyTorch/sources.py +++ b/pretrain/PyTorch/sources.py @@ -1,5 +1,4 @@ from tqdm import tqdm -from typing import Tuple from random import shuffle import pickle import random @@ -35,16 +34,24 @@ def get_values(self): class PretrainingDataCreator: - def __init__(self, path, tokenizer: BertTokenizer, max_seq_length, readin: int = 2000000, dupe_factor: int = 5, small_seq_prob: float = 0.1): + def __init__( + self, + path, + tokenizer: BertTokenizer, + max_seq_length, + readin: int = 2000000, + dupe_factor: int = 5, + small_seq_prob: float = 0.1, + ): self.dupe_factor = dupe_factor self.max_seq_length = max_seq_length self.small_seq_prob = small_seq_prob documents = [] instances = [] - with open(path, encoding='utf-8') as fd: + with open(path, encoding="utf-8") as fd: for i, line in enumerate(tqdm(fd)): - line = line.replace('\n', '') + line = line.replace("\n", "") # Expected format (Q,T,U,S,D) # query, title, url, snippet, document = line.split('\t') # ! remove this following line later @@ -82,13 +89,13 @@ def __setstate__(self, state): self.__dict__.update(state) def save(self, filename): - with open(filename, 'wb') as outfile: + with open(filename, "wb") as outfile: pickle.dump(self, outfile) @staticmethod def load(filename): print("Loading filename {}".format(filename)) - with open(filename, 'rb') as f: + with open(filename, "rb") as f: return pickle.load(f) def create_training_instance(self, index): @@ -115,7 +122,7 @@ def create_training_instance(self, index): segment = document[i] current_chunk.append(segment) current_length += len(segment) - if i == len(document)-1 or current_length >= target_seq_length: + if i == len(document) - 1 or current_length >= target_seq_length: if current_chunk: # `a_end` is how many segments from `current_chunk` go into the `A` # (first) sentence. @@ -138,12 +145,13 @@ def create_training_instance(self, index): # Pick a random document for _ in range(10): random_doc_index = random.randint( - 0, len(self.documents) - 1) + 0, len(self.documents) - 1 + ) if random_doc_index != index: break random_doc = self.documents[random_doc_index] - random_start = random.randint(0, len(random_doc)-1) + random_start = random.randint(0, len(random_doc) - 1) for j in range(random_start, len(random_doc)): tokens_b.extend(random_doc[j]) if len(tokens_b) >= target_b_length: @@ -165,8 +173,9 @@ def create_training_instance(self, index): assert len(tokens_a) >= 1 assert len(tokens_b) >= 1 - instances.append(TokenInstance( - tokens_a, tokens_b, int(is_random_next))) + instances.append( + TokenInstance(tokens_a, tokens_b, int(is_random_next)) + ) current_chunk = [] current_length = 0 @@ -176,24 +185,32 @@ def create_training_instance(self, index): class GenericPretrainingDataCreator(PretrainingDataCreator): - def __init__(self, path, tokenizer: BertTokenizer, max_seq_length: int = 512, readin: int = 2000000, dupe_factor: int = 6, small_seq_prob: float = 0.1): + def __init__( + self, + path, + tokenizer: BertTokenizer, + max_seq_length: int = 512, + readin: int = 2000000, + dupe_factor: int = 6, + small_seq_prob: float = 0.1, + ): self.dupe_factor = dupe_factor self.max_seq_length = max_seq_length self.small_seq_prob = small_seq_prob documents = [] instances = [] - with open(path, encoding='utf-8') as fd: + with open(path, encoding="utf-8") as fd: document = [] for i, line in enumerate(tqdm(fd)): - line = line.replace('\n', '') + line = line.replace("\n", "") # document = line # if len(document.split("")) <= 3: # continue if len(line) == 0: # This is end of document documents.append(document) document = [] - if len(line.split(' ')) > 2: + if len(line.split(" ")) > 2: document.append(tokenizer.tokenize(line)) if len(document) > 0: documents.append(document) @@ -212,25 +229,36 @@ def __init__(self, path, tokenizer: BertTokenizer, max_seq_length: int = 512, r self.documents = None documents = None + class WikiPretrainingDataCreator(PretrainingDataCreator): - def __init__(self, path, tokenizer: BertTokenizer, max_seq_length: int = 512, readin: int = 2000000, dupe_factor: int = 6, small_seq_prob: float = 0.1): + def __init__( + self, + path, + tokenizer: BertTokenizer, + max_seq_length: int = 512, + readin: int = 2000000, + dupe_factor: int = 6, + small_seq_prob: float = 0.1, + ): self.dupe_factor = dupe_factor self.max_seq_length = max_seq_length self.small_seq_prob = small_seq_prob documents = [] instances = [] - with open(path, encoding='utf-8') as fd: + with open(path, encoding="utf-8") as fd: document = [] for i, line in enumerate(tqdm(fd)): - line = line.replace('\n', '') + line = line.replace("\n", "") # document = line # if len(document.split("")) <= 3: # continue - if len(line) > 0 and line[:2] == "[[" : # This is end of document + if ( + len(line) > 0 and line[:2] == "[[" + ): # This is end of document documents.append(document) document = [] - if len(line.split(' ')) > 2: + if len(line.split(" ")) > 2: document.append(tokenizer.tokenize(line)) if len(document) > 0: documents.append(document) @@ -245,4 +273,4 @@ def __init__(self, path, tokenizer: BertTokenizer, max_seq_length: int = 512, r self.instances = instances self.len = len(self.instances) self.documents = None - documents = None \ No newline at end of file + documents = None diff --git a/pretrain/PyTorch/text.py b/pretrain/PyTorch/text.py index c6976f1..6177f4e 100644 --- a/pretrain/PyTorch/text.py +++ b/pretrain/PyTorch/text.py @@ -2,8 +2,10 @@ PAD = 0 + def mask(x): return x != PAD + def torch_long(x): return torch.LongTensor(x) diff --git a/pretrain/PyTorch/train.py b/pretrain/PyTorch/train.py index 18b3bfb..a277d59 100644 --- a/pretrain/PyTorch/train.py +++ b/pretrain/PyTorch/train.py @@ -1,10 +1,10 @@ +# flake8: noqa from datetime import datetime import numpy as np import random import os import sys -import json import torch import torch.nn as nn import torch.distributed as dist @@ -13,8 +13,11 @@ from torch.utils.data.distributed import DistributedSampler import argparse -from tqdm import tqdm -from checkpoint import checkpoint_model, load_checkpoint, latest_checkpoint_file +from checkpoint import ( + checkpoint_model, + load_checkpoint, + latest_checkpoint_file, +) from logger import Logger from utils import get_sample_writer from models import BertMultiTask @@ -23,8 +26,18 @@ from pytorch_pretrained_bert.tokenization import BertTokenizer from pytorch_pretrained_bert.optimization import BertAdam from optimization import warmup_linear_decay_exp -from azureml_adapter import set_environment_variables_for_nccl_backend, get_local_rank, get_global_size, get_local_size -from sources import PretrainingDataCreator, TokenInstance, GenericPretrainingDataCreator + +from azureml_adapter import ( + set_environment_variables_for_nccl_backend, + get_local_rank, + get_global_size, + get_local_size, +) +from sources import ( + PretrainingDataCreator, + TokenInstance, + GenericPretrainingDataCreator, +) from sources import WikiPretrainingDataCreator from configuration import BertJobConfiguration @@ -33,9 +46,16 @@ def get_effective_batch(total): if use_multigpu_with_single_device_per_process: - return total//dist.get_world_size()//train_batch_size//gradient_accumulation_steps + return ( + total + // dist.get_world_size() + // train_batch_size + // gradient_accumulation_steps + ) else: - return total//train_batch_size//gradient_accumulation_steps # Dividing with gradient_accumulation_steps since we multiplied it earlier + return ( + total // train_batch_size // gradient_accumulation_steps + ) # Dividing with gradient_accumulation_steps since we multiplied it earlier def get_dataloader(dataset: Dataset, eval_set=False): @@ -43,18 +63,29 @@ def get_dataloader(dataset: Dataset, eval_set=False): train_sampler = RandomSampler(dataset) else: train_sampler = DistributedSampler(dataset) - return (x for x in DataLoader(dataset, batch_size=train_batch_size // 2 if eval_set else train_batch_size, - sampler=train_sampler, num_workers=job_config.get_num_workers())) + return ( + x + for x in DataLoader( + dataset, + batch_size=train_batch_size // 2 if eval_set else train_batch_size, + sampler=train_sampler, + num_workers=job_config.get_num_workers(), + ) + ) def pretrain_validation(index): model.eval() - dataset = PreTrainingDataset(tokenizer=tokenizer, - folder=job_config.get_validation_folder_path(), - logger=logger, max_seq_length=max_seq_length, - index=index, data_type=PretrainDataType.VALIDATION, - max_predictions_per_seq=max_predictions_per_seq, - masked_lm_prob=masked_lm_prob) + dataset = PreTrainingDataset( + tokenizer=tokenizer, + folder=job_config.get_validation_folder_path(), + logger=logger, + max_seq_length=max_seq_length, + index=index, + data_type=PretrainDataType.VALIDATION, + max_predictions_per_seq=max_predictions_per_seq, + masked_lm_prob=masked_lm_prob, + ) data_batches = get_dataloader(dataset, eval_set=True) eval_loss = 0 nb_eval_steps = 0 @@ -70,7 +101,7 @@ def pretrain_validation(index): eval_loss = eval_loss / nb_eval_steps logger.info(f"Validation Loss for epoch {index + 1} is: {eval_loss}") if check_write_log(): - summary_writer.add_scalar(f'Validation/Loss', eval_loss, index + 1) + summary_writer.add_scalar(f"Validation/Loss", eval_loss, index + 1) run.log("validation_loss", np.float(eval_loss)) return @@ -84,19 +115,27 @@ def train(index): batchs_per_dataset = [] # Pretraining datasets - wiki_pretrain_dataset = PreTrainingDataset(tokenizer=tokenizer, - folder=job_config.get_wiki_pretrain_dataset_path(), - logger=logger, max_seq_length=max_seq_length, - index=index, data_type=PretrainDataType.WIKIPEDIA, - max_predictions_per_seq=max_predictions_per_seq, - masked_lm_prob=masked_lm_prob) + wiki_pretrain_dataset = PreTrainingDataset( + tokenizer=tokenizer, + folder=job_config.get_wiki_pretrain_dataset_path(), + logger=logger, + max_seq_length=max_seq_length, + index=index, + data_type=PretrainDataType.WIKIPEDIA, + max_predictions_per_seq=max_predictions_per_seq, + masked_lm_prob=masked_lm_prob, + ) datalengths.append(len(wiki_pretrain_dataset)) dataloaders[i] = get_dataloader(wiki_pretrain_dataset) num_batches_in_dataset = get_effective_batch(len(wiki_pretrain_dataset)) - logger.info('Wikpedia data file: Number of samples {}, number of batches required to process these samples: {}'.format(len(wiki_pretrain_dataset), num_batches_in_dataset)) - + logger.info( + "Wikpedia data file: Number of samples {}, number of batches required to process these samples: {}".format( + len(wiki_pretrain_dataset), num_batches_in_dataset + ) + ) + batchs_per_dataset.append(num_batches_in_dataset) i += 1 @@ -107,7 +146,11 @@ def train(index): dataset_batches = [] for i, batch_count in enumerate(batchs_per_dataset): dataset_batches.extend([i] * batch_count) - logger.info('Number of batches to process *all* data samples in this epoch: {}'.format(len(dataset_batches))) + logger.info( + "Number of batches to process *all* data samples in this epoch: {}".format( + len(dataset_batches) + ) + ) # shuffle random.shuffle(dataset_batches) @@ -116,9 +159,15 @@ def train(index): # data type, hence the multiplication with grad_accumulation_steps with dataset_batch_type dataset_picker = [] for dataset_batch_type in dataset_batches: - dataset_picker.extend([dataset_batch_type] * gradient_accumulation_steps ) - - logger.info('Number of steps to process all batches in this epoch: {}'.format(len(dataset_picker))) + dataset_picker.extend( + [dataset_batch_type] * gradient_accumulation_steps + ) + + logger.info( + "Number of steps to process all batches in this epoch: {}".format( + len(dataset_picker) + ) + ) model.train() # Counter of sequences in an "epoch" @@ -134,7 +183,11 @@ def train(index): batch = tuple(t.to(device) for t in batch) # Move to GPU if step > 1 and step % 1000 == 0: - logger.info("{} Number of sequences processed so far: {} (cumulative in {} steps)".format(datetime.utcnow(), sequences_counter, step)) + logger.info( + "{} Number of sequences processed so far: {} (cumulative in {} steps)".format( + datetime.utcnow(), sequences_counter, step + ) + ) # Calculate forward pass loss = model.network(batch) @@ -150,7 +203,10 @@ def train(index): # reduction only happens in backward if this method is called before # when using the distributed module if accumulate_gradients: - if use_multigpu_with_single_device_per_process and (step + 1) % gradient_accumulation_steps == 0: + if ( + use_multigpu_with_single_device_per_process + and (step + 1) % gradient_accumulation_steps == 0 + ): model.network.enable_need_reduction() else: model.network.disable_need_reduction() @@ -163,28 +219,31 @@ def train(index): if fp16: # modify learning rate with special warm up BERT uses # if fp16 is False, BertAdam is used that handles this automatically - lr_this_step = \ - job_config.get_learning_rate() * warmup_linear_decay_exp(global_step, - job_config.get_decay_rate(), - job_config.get_decay_step(), - job_config.get_total_training_steps(), - job_config.get_warmup_proportion()) + lr_this_step = job_config.get_learning_rate() * warmup_linear_decay_exp( + global_step, + job_config.get_decay_rate(), + job_config.get_decay_step(), + job_config.get_total_training_steps(), + job_config.get_warmup_proportion(), + ) for param_group in optimizer.param_groups: - param_group['lr'] = lr_this_step + param_group["lr"] = lr_this_step # Record the LR against global_step on tensorboard if check_write_log(): - summary_writer.add_scalar(f'Train/lr', lr_this_step, global_step) - + summary_writer.add_scalar( + f"Train/lr", lr_this_step, global_step + ) + optimizer.step() optimizer.zero_grad() global_step += 1 except StopIteration: continue - + if check_write_log(): run.log("training_loss", np.float(loss)) - + logger.info("Completed {} steps".format(step)) logger.info("Completed processing {} sequences".format(sequences_counter)) @@ -197,69 +256,117 @@ def train(index): def str2bool(val): return val.lower() == "true" or val.lower() == "t" or val.lower() == "1" + def check_write_log(): - return dist.get_rank() == 0 or not use_multigpu_with_single_device_per_process + return ( + dist.get_rank() == 0 or not use_multigpu_with_single_device_per_process + ) -if __name__ == '__main__': + +if __name__ == "__main__": print("The arguments are: " + str(sys.argv)) parser = argparse.ArgumentParser() # Required_parameters - parser.add_argument("--config_file", "--cf", - help="pointer to the configuration file of the experiment", type=str, required=True) - - parser.add_argument("--path", default=None, type=str, required=True, - help="The blob storage directory for data, config files, cache and output.") + parser.add_argument( + "--config_file", + "--cf", + help="pointer to the configuration file of the experiment", + type=str, + required=True, + ) + + parser.add_argument( + "--path", + default=None, + type=str, + required=True, + help="The blob storage directory for data, config files, cache and output.", + ) # Optional Params - parser.add_argument("--max_seq_length", default=512, type=int, - help="The maximum total input sequence length after WordPiece tokenization. Sequences " - "longer than this will be truncated, and sequences shorter than this will be padded.") - parser.add_argument("--max_predictions_per_seq", "--max_pred", default=80, type=int, - help="The maximum number of masked tokens in a sequence to be predicted.") - parser.add_argument("--masked_lm_prob", "--mlm_prob", default=0.15, - type=float, help="The masking probability for languge model.") - parser.add_argument("--train_batch_size", default=32, - type=int, help="Total batch size for training.") - parser.add_argument("--no_cuda", - type=str, - default='False', - help="Whether not to use CUDA when available") - parser.add_argument('--seed', - type=int, - default=42, - help="random seed for initialization") - parser.add_argument('--accumulate_gradients', - type=str, - default='True', - help="Enabling gradient accumulation optimization") - parser.add_argument('--gradient_accumulation_steps', - type=int, - default=1, - help="Number of updates steps to accumulate before performing a backward/update pass.") - parser.add_argument('--fp16', - type=str, - default='False', - help="Whether to use 16-bit float precision instead of 32-bit") - parser.add_argument('--use_pretrain', - type=str, - default='False', - help="Whether to use Bert Pretrain Weights or not") - - parser.add_argument('--loss_scale', - type=float, - default=0, - help='Loss scaling, positive power of 2 values can improve fp16 convergence.') - parser.add_argument('--load_training_checkpoint', '--load_cp', - type=str, - default='False', - help="This is the path to the TAR file which contains model+opt state_dict() checkpointed.") - - parser.add_argument('--use_multigpu_with_single_device_per_process', - type=str, - default='True', - help="Whether only one device is managed per process") + parser.add_argument( + "--max_seq_length", + default=512, + type=int, + help="The maximum total input sequence length after WordPiece tokenization. Sequences " + "longer than this will be truncated, and sequences shorter than this will be padded.", + ) + parser.add_argument( + "--max_predictions_per_seq", + "--max_pred", + default=80, + type=int, + help="The maximum number of masked tokens in a sequence to be predicted.", + ) + parser.add_argument( + "--masked_lm_prob", + "--mlm_prob", + default=0.15, + type=float, + help="The masking probability for languge model.", + ) + parser.add_argument( + "--train_batch_size", + default=32, + type=int, + help="Total batch size for training.", + ) + parser.add_argument( + "--no_cuda", + type=str, + default="False", + help="Whether not to use CUDA when available", + ) + parser.add_argument( + "--seed", type=int, default=42, help="random seed for initialization" + ) + parser.add_argument( + "--accumulate_gradients", + type=str, + default="True", + help="Enabling gradient accumulation optimization", + ) + parser.add_argument( + "--gradient_accumulation_steps", + type=int, + default=1, + help="Number of updates steps to accumulate before performing a backward/update pass.", + ) + parser.add_argument( + "--fp16", + type=str, + default="False", + help="Whether to use 16-bit float precision instead of 32-bit", + ) + parser.add_argument( + "--use_pretrain", + type=str, + default="False", + help="Whether to use Bert Pretrain Weights or not", + ) + + parser.add_argument( + "--loss_scale", + type=float, + default=0, + help="Loss scaling, positive power of 2 values can improve fp16 convergence.", + ) + parser.add_argument( + "--load_training_checkpoint", + "--load_cp", + type=str, + default="False", + help="This is the path to the TAR file which contains model+opt state_dict() checkpointed.", + ) + + parser.add_argument( + "--use_multigpu_with_single_device_per_process", + type=str, + default="True", + help="Whether only one device is managed per process", + ) args = parser.parse_args() @@ -267,9 +374,12 @@ def check_write_log(): fp16 = str2bool(args.fp16) accumulate_gradients = str2bool(args.accumulate_gradients) use_pretrain = str2bool(args.use_pretrain) - use_multigpu_with_single_device_per_process = str2bool(args.use_multigpu_with_single_device_per_process) - path= args.path + use_multigpu_with_single_device_per_process = str2bool( + args.use_multigpu_with_single_device_per_process + ) + + path = args.path config_file = args.config_file gradient_accumulation_steps = args.gradient_accumulation_steps train_batch_size = args.train_batch_size @@ -284,11 +394,11 @@ def check_write_log(): local_rank = get_local_rank() global_size = get_global_size() - local_size = get_local_size() - # TODO use logger - print('local_rank = {}'.format(local_rank)) - print('global_size = {}'.format(global_size)) - print('local_size = {}'.format(local_size)) + local_size = get_local_size() + # TODO use logger + print("local_rank = {}".format(local_rank)) + print("global_size = {}".format(global_size)) + print("local_size = {}".format(local_size)) set_environment_variables_for_nccl_backend(local_size == global_size) @@ -296,7 +406,9 @@ def check_write_log(): logger = Logger(cuda=torch.cuda.is_available()) # # Extact config file from blob storage - job_config = BertJobConfiguration(config_file_path=os.path.join(path, config_file)) + job_config = BertJobConfiguration( + config_file_path=os.path.join(path, config_file) + ) # Replace placeholder path prefix by path corresponding to "ds.path('data/bert_data/').as_mount()" job_config.replace_path_placeholders(path) @@ -312,16 +424,24 @@ def check_write_log(): device = torch.device("cuda", local_rank) n_gpu = 1 # Initializes the distributed backend which will take care of synchronizing nodes/GPUs - torch.distributed.init_process_group(backend='nccl') + torch.distributed.init_process_group(backend="nccl") if fp16: - logger.info("16-bits distributed training is not officially supported in the version of PyTorch currently used, but it works. Refer to https://github.com/pytorch/pytorch/pull/13496 for supported version.") + logger.info( + "16-bits distributed training is not officially supported in the version of PyTorch currently used, but it works. Refer to https://github.com/pytorch/pytorch/pull/13496 for supported version." + ) fp16 = True # - logger.info("device: {} n_gpu: {}, use_multigpu_with_single_device_per_process: {}, 16-bits training: {}".format( - device, n_gpu, use_multigpu_with_single_device_per_process, fp16)) + logger.info( + "device: {} n_gpu: {}, use_multigpu_with_single_device_per_process: {}, 16-bits training: {}".format( + device, n_gpu, use_multigpu_with_single_device_per_process, fp16 + ) + ) if gradient_accumulation_steps < 1: - raise ValueError("Invalid gradient_accumulation_steps parameter: {}, should be >= 1".format( - gradient_accumulation_steps)) + raise ValueError( + "Invalid gradient_accumulation_steps parameter: {}, should be >= 1".format( + gradient_accumulation_steps + ) + ) train_batch_size = int(train_batch_size / gradient_accumulation_steps) @@ -334,7 +454,7 @@ def check_write_log(): torch.cuda.manual_seed_all(seed) # Create an outputs/ folder in the blob storage - parent_dir = os.path.join(path, 'outputs', str(run.experiment.name)) + parent_dir = os.path.join(path, "outputs", str(run.experiment.name)) output_dir = os.path.join(parent_dir, str(run.id)) os.makedirs(output_dir, exist_ok=True) saved_model_path = os.path.join(output_dir, "saved_models", job_name) @@ -342,39 +462,55 @@ def check_write_log(): summary_writer = None # Prepare Summary Writer and saved_models path if check_write_log(): - #azureml.tensorboard only streams from /logs directory, therefore hardcoded - summary_writer = get_sample_writer( - name=job_name, base='./logs') + # azureml.tensorboard only streams from /logs directory, therefore hardcoded + summary_writer = get_sample_writer(name=job_name, base="./logs") os.makedirs(saved_model_path, exist_ok=True) # Loading Tokenizer (vocabulary from blob storage, if exists) logger.info("Extracting the vocabulary") - tokenizer = BertTokenizer.from_pretrained(job_config.get_token_file_type(), cache_dir=path) - logger.info("Vocabulary contains {} tokens".format(len(list(tokenizer.vocab.keys())))) - + tokenizer = BertTokenizer.from_pretrained( + job_config.get_token_file_type(), cache_dir=path + ) + logger.info( + "Vocabulary contains {} tokens".format( + len(list(tokenizer.vocab.keys())) + ) + ) # Loading Model logger.info("Initializing BertMultiTask model") - model = BertMultiTask(job_config = job_config, use_pretrain = use_pretrain, tokenizer = tokenizer, - cache_dir = path, device = device, write_log = check_write_log(), - summary_writer = summary_writer) + model = BertMultiTask( + job_config=job_config, + use_pretrain=use_pretrain, + tokenizer=tokenizer, + cache_dir=path, + device=device, + write_log=check_write_log(), + summary_writer=summary_writer, + ) logger.info("Converting the input parameters") if fp16: model.half() - + model.to(device) if use_multigpu_with_single_device_per_process: try: if accumulate_gradients: - logger.info("Enabling gradient accumulation by using a forked version of DistributedDataParallel implementation available in the branch bertonazureml/apex at https://www.github.com/microsoft/apex") + logger.info( + "Enabling gradient accumulation by using a forked version of DistributedDataParallel implementation available in the branch bertonazureml/apex at https://www.github.com/microsoft/apex" + ) from distributed_apex import DistributedDataParallel as DDP else: - logger.info("Using Default Apex DistributedDataParallel implementation") + logger.info( + "Using Default Apex DistributedDataParallel implementation" + ) from apex.parallel import DistributedDataParallel as DDP except ImportError: - raise ImportError("To use distributed and fp16 training, please install apex from the branch bertonazureml/apex at https://www.github.com/microsoft/apex.") + raise ImportError( + "To use distributed and fp16 training, please install apex from the branch bertonazureml/apex at https://www.github.com/microsoft/apex." + ) torch.cuda.set_device(local_rank) model.network = DDP(model.network, delay_allreduce=False) @@ -384,11 +520,25 @@ def check_write_log(): # Prepare Optimizer logger.info("Preparing the optimizer") param_optimizer = list(model.network.named_parameters()) - param_optimizer = [n for n in param_optimizer if 'pooler' not in n[0]] - no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight'] + param_optimizer = [n for n in param_optimizer if "pooler" not in n[0]] + no_decay = ["bias", "LayerNorm.bias", "LayerNorm.weight"] optimizer_grouped_parameters = [ - {'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01}, - {'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0} + { + "params": [ + p + for n, p in param_optimizer + if not any(nd in n for nd in no_decay) + ], + "weight_decay": 0.01, + }, + { + "params": [ + p + for n, p in param_optimizer + if any(nd in n for nd in no_decay) + ], + "weight_decay": 0.0, + }, ] logger.info("Loading Apex and building the FusedAdam optimizer") @@ -396,46 +546,70 @@ def check_write_log(): if fp16: try: from apex.optimizers import FP16_Optimizer, FusedAdam - except: - raise ImportError("To use distributed and fp16 training, please install apex from the branch bertonazureml/apex at https://www.github.com/microsoft/apex.") - - optimizer = FusedAdam(optimizer_grouped_parameters, - lr=job_config.get_learning_rate(), - bias_correction=False, - max_grad_norm=1.0) + except ImportError: + raise ImportError( + "To use distributed and fp16 training, please install apex from the branch bertonazureml/apex at https://www.github.com/microsoft/apex." + ) + + optimizer = FusedAdam( + optimizer_grouped_parameters, + lr=job_config.get_learning_rate(), + bias_correction=False, + max_grad_norm=1.0, + ) if loss_scale == 0: optimizer = FP16_Optimizer(optimizer, dynamic_loss_scale=True) else: - optimizer = FP16_Optimizer( - optimizer, static_loss_scale=loss_scale) + optimizer = FP16_Optimizer(optimizer, static_loss_scale=loss_scale) else: - optimizer = BertAdam(optimizer_grouped_parameters, - lr=job_config.get_learning_rate(), - warmup=job_config.get_warmup_proportion(), - t_total=job_config.get_total_training_steps()) + optimizer = BertAdam( + optimizer_grouped_parameters, + lr=job_config.get_learning_rate(), + warmup=job_config.get_warmup_proportion(), + t_total=job_config.get_total_training_steps(), + ) global_step = 0 start_epoch = 0 - + # if args.load_training_checkpoint is not None: - if load_training_checkpoint != 'False': + if load_training_checkpoint != "False": logger.info(f"Looking for previous training checkpoint.") latest_checkpoint_path = latest_checkpoint_file(parent_dir, no_cuda) - logger.info(f"Restoring previous training checkpoint from {latest_checkpoint_path}") - start_epoch, global_step = load_checkpoint(model, optimizer, latest_checkpoint_path) - logger.info(f"The model is loaded from last checkpoint at epoch {start_epoch} when the global steps were at {global_step}") - + logger.info( + f"Restoring previous training checkpoint from {latest_checkpoint_path}" + ) + start_epoch, global_step = load_checkpoint( + model, optimizer, latest_checkpoint_path + ) + logger.info( + f"The model is loaded from last checkpoint at epoch {start_epoch} when the global steps were at {global_step}" + ) logger.info("Training the model") for index in range(start_epoch, job_config.get_total_epoch_count()): logger.info(f"Training epoch: {index + 1}") - + train(index) if check_write_log(): - epoch_ckp_path = os.path.join(saved_model_path, "bert_encoder_epoch_{0:04d}.pt".format(index + 1)) - logger.info(f"Saving checkpoint of the model from epoch {index + 1} at {epoch_ckp_path}") + epoch_ckp_path = os.path.join( + saved_model_path, + "bert_encoder_epoch_{0:04d}.pt".format(index + 1), + ) + logger.info( + f"Saving checkpoint of the model from epoch {index + 1} at {epoch_ckp_path}" + ) model.save_bert(epoch_ckp_path) - checkpoint_model(os.path.join(saved_model_path, "training_state_checkpoint_{0:04d}.tar".format(index + 1)), model, optimizer, index, global_step) + checkpoint_model( + os.path.join( + saved_model_path, + "training_state_checkpoint_{0:04d}.tar".format(index + 1), + ), + model, + optimizer, + index, + global_step, + ) diff --git a/pretrain/PyTorch/utils.py b/pretrain/PyTorch/utils.py index 42d344a..2aeb5cd 100644 --- a/pretrain/PyTorch/utils.py +++ b/pretrain/PyTorch/utils.py @@ -1,14 +1,12 @@ -import sys as _sys - -from typing import List -from collections import _iskeyword # type: ignore from tensorboardX import SummaryWriter import os -SUMMARY_WRITER_DIR_NAME = 'runs' +SUMMARY_WRITER_DIR_NAME = "runs" def get_sample_writer(name, base=".."): """Returns a tensorboard summary writer """ - return SummaryWriter(log_dir=os.path.join(base, SUMMARY_WRITER_DIR_NAME, name)) + return SummaryWriter( + log_dir=os.path.join(base, SUMMARY_WRITER_DIR_NAME, name) + ) diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 0000000..9bd6669 --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,16 @@ +[tool.black] +line-length = 79 +include = '\.pyi?$' +exclude = ''' +/( + \.git + | \.hg + | \.mypy_cache + | \.tox + | \.venv + | _build + | buck-out + | build + | dist +)/ +''' \ No newline at end of file