diff --git a/tensorflow_examples/models/densenet/densenet.py b/tensorflow_examples/models/densenet/densenet.py index d98509a5dfd..2254a2dfef1 100644 --- a/tensorflow_examples/models/densenet/densenet.py +++ b/tensorflow_examples/models/densenet/densenet.py @@ -188,7 +188,7 @@ def __init__(self, num_filters, data_format, data_format=data_format, kernel_initializer="he_normal", kernel_regularizer=l2(weight_decay)) - self.avg_pool = tf.keras.layers.AveragePooling2D(data_format=data_format) + self.avg_pool = tf.keras.layers.AveragePooling2D(pool_size(2,2),data_format=data_format) def call(self, x, training=True): output = self.batchnorm(x, training=training) diff --git a/tensorflow_examples/models/nmt_with_attention/train.py b/tensorflow_examples/models/nmt_with_attention/train.py index 13914ce1fe9..705a322d76a 100644 --- a/tensorflow_examples/models/nmt_with_attention/train.py +++ b/tensorflow_examples/models/nmt_with_attention/train.py @@ -154,8 +154,8 @@ def training_loop(self, train_ds, test_ds): template = 'Epoch: {}, Train Loss: {}, Test Loss: {}' for epoch in range(self.epochs): - self.train_loss_metric.reset_states() - self.test_loss_metric.reset_states() + self.train_loss_metric.reset_state() + self.test_loss_metric.reset_state() for inp, targ in train_ds: self.train_step((inp, targ))