Skip to content

Commit

Permalink
Revert "Attempt #3"
Browse files Browse the repository at this point in the history
This reverts commit b9df7a0.
  • Loading branch information
Rocketknight1 committed Dec 6, 2023
1 parent 420c597 commit 75954b6
Show file tree
Hide file tree
Showing 66 changed files with 178 additions and 10,434 deletions.
195 changes: 4 additions & 191 deletions src/transformers/models/albert/modeling_tf_albert.py
Original file line number Diff line number Diff line change
Expand Up @@ -146,7 +146,7 @@ def __init__(self, config: AlbertConfig, **kwargs):
self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
self.dropout = tf.keras.layers.Dropout(rate=config.hidden_dropout_prob)

def build(self, input_shape=None):
def build(self, input_shape: tf.TensorShape):
with tf.name_scope("word_embeddings"):
self.weight = self.add_weight(
name="weight",
Expand All @@ -168,12 +168,7 @@ def build(self, input_shape=None):
initializer=get_initializer(self.initializer_range),
)

if self.built:
return
self.built = True
if getattr(self, "LayerNorm", None) is not None:
with tf.name_scope(self.LayerNorm.name):
self.LayerNorm.build([None, None, self.config.embedding_size])
super().build(input_shape)

# Copied from transformers.models.bert.modeling_tf_bert.TFBertEmbeddings.call
def call(
Expand Down Expand Up @@ -251,8 +246,6 @@ def __init__(self, config: AlbertConfig, **kwargs):
# Two different dropout probabilities; see https://github.com/google-research/albert/blob/master/modeling.py#L971-L993
self.attention_dropout = tf.keras.layers.Dropout(rate=config.attention_probs_dropout_prob)
self.output_dropout = tf.keras.layers.Dropout(rate=config.hidden_dropout_prob)
self.hidden_size = config.hidden_size
self.config = config

def transpose_for_scores(self, tensor: tf.Tensor, batch_size: int) -> tf.Tensor:
# Reshape from [batch_size, seq_length, all_head_size] to [batch_size, seq_length, num_attention_heads, attention_head_size]
Expand Down Expand Up @@ -314,26 +307,6 @@ def call(

return outputs

def build(self, input_shape=None):
if self.built:
return
self.built = True
if getattr(self, "query", None) is not None:
with tf.name_scope(self.query.name):
self.query.build(self.config.hidden_size)
if getattr(self, "key", None) is not None:
with tf.name_scope(self.key.name):
self.key.build(self.config.hidden_size)
if getattr(self, "value", None) is not None:
with tf.name_scope(self.value.name):
self.value.build(self.config.hidden_size)
if getattr(self, "dense", None) is not None:
with tf.name_scope(self.dense.name):
self.dense.build(self.config.hidden_size)
if getattr(self, "LayerNorm", None) is not None:
with tf.name_scope(self.LayerNorm.name):
self.LayerNorm.build([None, None, self.config.hidden_size])


class TFAlbertLayer(tf.keras.layers.Layer):
def __init__(self, config: AlbertConfig, **kwargs):
Expand All @@ -356,9 +329,6 @@ def __init__(self, config: AlbertConfig, **kwargs):
epsilon=config.layer_norm_eps, name="full_layer_layer_norm"
)
self.dropout = tf.keras.layers.Dropout(rate=config.hidden_dropout_prob)
self.intermediate_size = config.intermediate_size
self.hidden_size = config.hidden_size
self.config = config

def call(
self,
Expand Down Expand Up @@ -386,23 +356,6 @@ def call(

return outputs

def build(self, input_shape=None):
if self.built:
return
self.built = True
if getattr(self, "attention", None) is not None:
with tf.name_scope(self.attention.name):
self.attention.build(None)
if getattr(self, "ffn", None) is not None:
with tf.name_scope(self.ffn.name):
self.ffn.build(self.config.hidden_size)
if getattr(self, "ffn_output", None) is not None:
with tf.name_scope(self.ffn_output.name):
self.ffn_output.build(self.config.intermediate_size)
if getattr(self, "full_layer_layer_norm", None) is not None:
with tf.name_scope(self.full_layer_layer_norm.name):
self.full_layer_layer_norm.build([None, None, self.config.hidden_size])


class TFAlbertLayerGroup(tf.keras.layers.Layer):
def __init__(self, config: AlbertConfig, **kwargs):
Expand Down Expand Up @@ -446,15 +399,6 @@ def call(

return tuple(v for v in [hidden_states, layer_hidden_states, layer_attentions] if v is not None)

def build(self, input_shape=None):
if self.built:
return
self.built = True
if getattr(self, "albert_layers", None) is not None:
for layer in self.albert_layers:
with tf.name_scope(layer.name):
layer.build(None)


class TFAlbertTransformer(tf.keras.layers.Layer):
def __init__(self, config: AlbertConfig, **kwargs):
Expand All @@ -472,7 +416,6 @@ def __init__(self, config: AlbertConfig, **kwargs):
self.albert_layer_groups = [
TFAlbertLayerGroup(config, name=f"albert_layer_groups_._{i}") for i in range(config.num_hidden_groups)
]
self.config = config

def call(
self,
Expand Down Expand Up @@ -514,18 +457,6 @@ def call(
last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions
)

def build(self, input_shape=None):
if self.built:
return
self.built = True
if getattr(self, "embedding_hidden_mapping_in", None) is not None:
with tf.name_scope(self.embedding_hidden_mapping_in.name):
self.embedding_hidden_mapping_in.build(self.config.embedding_size)
if getattr(self, "albert_layer_groups", None) is not None:
for layer in self.albert_layer_groups:
with tf.name_scope(layer.name):
layer.build(None)


class TFAlbertPreTrainedModel(TFPreTrainedModel):
"""
Expand Down Expand Up @@ -556,23 +487,14 @@ def __init__(self, config: AlbertConfig, input_embeddings: tf.keras.layers.Layer
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.decoder = input_embeddings
self.hidden_size = config.hidden_size

def build(self, input_shape=None):
def build(self, input_shape: tf.TensorShape):
self.bias = self.add_weight(shape=(self.config.vocab_size,), initializer="zeros", trainable=True, name="bias")
self.decoder_bias = self.add_weight(
shape=(self.config.vocab_size,), initializer="zeros", trainable=True, name="decoder/bias"
)

if self.built:
return
self.built = True
if getattr(self, "dense", None) is not None:
with tf.name_scope(self.dense.name):
self.dense.build(self.config.hidden_size)
if getattr(self, "LayerNorm", None) is not None:
with tf.name_scope(self.LayerNorm.name):
self.LayerNorm.build([None, None, self.config.embedding_size])
super().build(input_shape)

def get_output_embeddings(self) -> tf.keras.layers.Layer:
return self.decoder
Expand Down Expand Up @@ -728,20 +650,6 @@ def call(
attentions=encoder_outputs.attentions,
)

def build(self, input_shape=None):
if self.built:
return
self.built = True
if getattr(self, "embeddings", None) is not None:
with tf.name_scope(self.embeddings.name):
self.embeddings.build(None)
if getattr(self, "encoder", None) is not None:
with tf.name_scope(self.encoder.name):
self.encoder.build(None)
if getattr(self, "pooler", None) is not None:
with tf.name_scope(self.pooler.name):
self.pooler.build(None) # TODO Matt might be wrong


@dataclass
class TFAlbertForPreTrainingOutput(ModelOutput):
Expand Down Expand Up @@ -917,14 +825,6 @@ def call(

return outputs

def build(self, input_shape=None):
if self.built:
return
self.built = True
if getattr(self, "albert", None) is not None:
with tf.name_scope(self.albert.name):
self.albert.build(None)


@add_start_docstrings(
"""
Expand Down Expand Up @@ -1021,20 +921,6 @@ def call(
attentions=outputs.attentions,
)

def build(self, input_shape=None):
if self.built:
return
self.built = True
if getattr(self, "albert", None) is not None:
with tf.name_scope(self.albert.name):
self.albert.build(None)
if getattr(self, "predictions", None) is not None:
with tf.name_scope(self.predictions.name):
self.predictions.build(None)
if getattr(self, "sop_classifier", None) is not None:
with tf.name_scope(self.sop_classifier.name):
self.sop_classifier.build(None)


class TFAlbertSOPHead(tf.keras.layers.Layer):
def __init__(self, config: AlbertConfig, **kwargs):
Expand All @@ -1046,23 +932,13 @@ def __init__(self, config: AlbertConfig, **kwargs):
kernel_initializer=get_initializer(config.initializer_range),
name="classifier",
)
self.hidden_size = config.hidden_size
self.config = config

def call(self, pooled_output: tf.Tensor, training: bool) -> tf.Tensor:
dropout_pooled_output = self.dropout(inputs=pooled_output, training=training)
logits = self.classifier(inputs=dropout_pooled_output)

return logits

def build(self, input_shape=None):
if self.built:
return
self.built = True
if getattr(self, "classifier", None) is not None:
with tf.name_scope(self.classifier.name):
self.classifier.build(self.config.hidden_size)


@add_start_docstrings("""Albert Model with a `language modeling` head on top.""", ALBERT_START_DOCSTRING)
class TFAlbertForMaskedLM(TFAlbertPreTrainedModel, TFMaskedLanguageModelingLoss):
Expand Down Expand Up @@ -1159,17 +1035,6 @@ def call(
attentions=outputs.attentions,
)

def build(self, input_shape=None):
if self.built:
return
self.built = True
if getattr(self, "albert", None) is not None:
with tf.name_scope(self.albert.name):
self.albert.build(None)
if getattr(self, "predictions", None) is not None:
with tf.name_scope(self.predictions.name):
self.predictions.build(None)


@add_start_docstrings(
"""
Expand All @@ -1193,8 +1058,6 @@ def __init__(self, config: AlbertConfig, *inputs, **kwargs):
self.classifier = tf.keras.layers.Dense(
units=config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="classifier"
)
self.hidden_size = config.hidden_size
self.config = config

@unpack_inputs
@add_start_docstrings_to_model_forward(ALBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
Expand Down Expand Up @@ -1254,17 +1117,6 @@ def call(
attentions=outputs.attentions,
)

def build(self, input_shape=None):
if self.built:
return
self.built = True
if getattr(self, "albert", None) is not None:
with tf.name_scope(self.albert.name):
self.albert.build(None)
if getattr(self, "classifier", None) is not None:
with tf.name_scope(self.classifier.name):
self.classifier.build(self.config.hidden_size)


@add_start_docstrings(
"""
Expand Down Expand Up @@ -1293,8 +1145,6 @@ def __init__(self, config: AlbertConfig, *inputs, **kwargs):
self.classifier = tf.keras.layers.Dense(
units=config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="classifier"
)
self.hidden_size = config.hidden_size
self.config = config

@unpack_inputs
@add_start_docstrings_to_model_forward(ALBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
Expand Down Expand Up @@ -1350,17 +1200,6 @@ def call(
attentions=outputs.attentions,
)

def build(self, input_shape=None):
if self.built:
return
self.built = True
if getattr(self, "albert", None) is not None:
with tf.name_scope(self.albert.name):
self.albert.build(None)
if getattr(self, "classifier", None) is not None:
with tf.name_scope(self.classifier.name):
self.classifier.build(self.config.hidden_size)


@add_start_docstrings(
"""
Expand All @@ -1382,8 +1221,6 @@ def __init__(self, config: AlbertConfig, *inputs, **kwargs):
self.qa_outputs = tf.keras.layers.Dense(
units=config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="qa_outputs"
)
self.hidden_size = config.hidden_size
self.config = config

@unpack_inputs
@add_start_docstrings_to_model_forward(ALBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
Expand Down Expand Up @@ -1458,17 +1295,6 @@ def call(
attentions=outputs.attentions,
)

def build(self, input_shape=None):
if self.built:
return
self.built = True
if getattr(self, "albert", None) is not None:
with tf.name_scope(self.albert.name):
self.albert.build(None)
if getattr(self, "qa_outputs", None) is not None:
with tf.name_scope(self.qa_outputs.name):
self.qa_outputs.build(self.config.hidden_size)


@add_start_docstrings(
"""
Expand All @@ -1490,8 +1316,6 @@ def __init__(self, config: AlbertConfig, *inputs, **kwargs):
self.classifier = tf.keras.layers.Dense(
units=1, kernel_initializer=get_initializer(config.initializer_range), name="classifier"
)
self.hidden_size = config.hidden_size
self.config = config

@unpack_inputs
@add_start_docstrings_to_model_forward(ALBERT_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length"))
Expand Down Expand Up @@ -1570,14 +1394,3 @@ def call(
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)

def build(self, input_shape=None):
if self.built:
return
self.built = True
if getattr(self, "albert", None) is not None:
with tf.name_scope(self.albert.name):
self.albert.build(None)
if getattr(self, "classifier", None) is not None:
with tf.name_scope(self.classifier.name):
self.classifier.build(self.config.hidden_size)
Loading

0 comments on commit 75954b6

Please sign in to comment.