|
- # coding=utf-8
- # Copyright 2018 The OpenAI Team Authors and HuggingFace Inc. team.
- # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
- #
- # Licensed under the Apache License, Version 2.0 (the "License");
- # you may not use this file except in compliance with the License.
- # You may obtain a copy of the License at
- #
- # http://www.apache.org/licenses/LICENSE-2.0
- #
- # Unless required by applicable law or agreed to in writing, software
- # distributed under the License is distributed on an "AS IS" BASIS,
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- # See the License for the specific language governing permissions and
- # limitations under the License.
- """ OpenAI GPT configuration """
-
- from __future__ import absolute_import, division, print_function, unicode_literals
-
- import json
- import logging
- import sys
- from io import open
-
- from .configuration_utils import PretrainedConfig
-
- logger = logging.getLogger(__name__)
-
- OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP = {
- "openai-gpt": "https://s3.amazonaws.com/models.huggingface.co/bert/openai-gpt-config.json"
- }
-
- class OpenAIGPTConfig(PretrainedConfig):
- """
- Configuration class to store the configuration of a `OpenAIGPTModel`.
-
- Args:
- vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in `OpenAIGPTModel` or a configuration json file.
- n_special: The number of special tokens to learn during fine-tuning ('[SEP]', '[CLF]', ...)
- n_positions: Number of positional embeddings.
- n_ctx: Size of the causal mask (usually same as n_positions).
- n_embd: Dimensionality of the embeddings and hidden states.
- n_layer: Number of hidden layers in the Transformer encoder.
- n_head: Number of attention heads for each attention layer in
- the Transformer encoder.
- afn: The non-linear activation function (function or string) in the
- encoder and pooler. If string, "gelu", "relu" and "swish" are supported.
- resid_pdrop: The dropout probabilitiy for all fully connected
- layers in the embeddings, encoder, and pooler.
- attn_pdrop: The dropout ratio for the attention
- probabilities.
- embd_pdrop: The dropout ratio for the embeddings.
- layer_norm_epsilon: epsilon to use in the layer norm layers
- initializer_range: The sttdev of the truncated_normal_initializer for
- initializing all weight matrices.
- predict_special_tokens: should we predict special tokens (when the model has a LM head)
- """
- pretrained_config_archive_map = OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP
-
- def __init__(
- self,
- vocab_size_or_config_json_file=40478,
- n_positions=512,
- n_ctx=512,
- n_embd=768,
- n_layer=12,
- n_head=12,
- afn="gelu",
- resid_pdrop=0.1,
- embd_pdrop=0.1,
- attn_pdrop=0.1,
- layer_norm_epsilon=1e-5,
- initializer_range=0.02,
- predict_special_tokens=True,
-
- num_labels=1,
- summary_type='cls_index',
- summary_use_proj=True,
- summary_activation=None,
- summary_proj_to_labels=True,
- summary_first_dropout=0.1,
- **kwargs
- ):
- """Constructs OpenAIGPTConfig.
- """
- super(OpenAIGPTConfig, self).__init__(**kwargs)
-
- if isinstance(vocab_size_or_config_json_file, str) or (sys.version_info[0] == 2
- and isinstance(vocab_size_or_config_json_file, unicode)):
- with open(vocab_size_or_config_json_file, "r", encoding="utf-8") as reader:
- json_config = json.loads(reader.read())
- for key, value in json_config.items():
- self.__dict__[key] = value
- elif isinstance(vocab_size_or_config_json_file, int):
- self.vocab_size = vocab_size_or_config_json_file
- self.n_ctx = n_ctx
- self.n_positions = n_positions
- self.n_embd = n_embd
- self.n_layer = n_layer
- self.n_head = n_head
- self.afn = afn
- self.resid_pdrop = resid_pdrop
- self.embd_pdrop = embd_pdrop
- self.attn_pdrop = attn_pdrop
- self.layer_norm_epsilon = layer_norm_epsilon
- self.initializer_range = initializer_range
- self.predict_special_tokens = predict_special_tokens
-
- self.num_labels = num_labels
- self.summary_type = summary_type
- self.summary_use_proj = summary_use_proj
- self.summary_activation = summary_activation
- self.summary_first_dropout = summary_first_dropout
- self.summary_proj_to_labels = summary_proj_to_labels
- else:
- raise ValueError(
- "First argument must be either a vocabulary size (int)"
- "or the path to a pretrained model config file (str)"
- )
-
- @property
- def max_position_embeddings(self):
- return self.n_positions
-
- @property
- def hidden_size(self):
- return self.n_embd
-
- @property
- def num_attention_heads(self):
- return self.n_head
-
- @property
- def num_hidden_layers(self):
- return self.n_layer
|