Skip to content
This repository was archived by the owner on Jul 7, 2023. It is now read-only.

Commit fb3c08f

Browse files
authored
Merge pull request #617 from aman-tiwari/master
Fixed use of name_scope by using named arguments whenever needed
2 parents f7d6cf0 + 880c356 commit fb3c08f

File tree

3 files changed

+13
-13
lines changed

3 files changed

+13
-13
lines changed

tensor2tensor/data_generators/imagenet.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -334,7 +334,7 @@ def distorted_bounding_box_crop(image,
334334
Returns:
335335
(cropped image `Tensor`, distorted bbox `Tensor`).
336336
"""
337-
with tf.name_scope(scope, "distorted_bounding_box_crop", [image, bbox]):
337+
with tf.name_scope(scope, default_name="distorted_bounding_box_crop", values=[image, bbox]):
338338
# Each bounding box has shape [1, num_boxes, box coords] and
339339
# the coordinates are ordered [ymin, xmin, ymax, xmax].
340340

tensor2tensor/layers/common_layers.py

Lines changed: 11 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -76,7 +76,7 @@ def comma_separated_string_to_integer_list(s):
7676

7777
def saturating_sigmoid(x):
7878
"""Saturating sigmoid: 1.2 * sigmoid(x) - 0.1 cut to [0, 1]."""
79-
with tf.name_scope("saturating_sigmoid", [x]):
79+
with tf.name_scope("saturating_sigmoid", values=[x]):
8080
y = tf.sigmoid(x)
8181
return tf.minimum(1.0, tf.maximum(0.0, 1.2 * y - 0.1))
8282

@@ -173,7 +173,7 @@ def shakeshake(xs, equal_grad=False):
173173

174174
def convert_rgb_to_real(x):
175175
"""Conversion of pixel values to real numbers."""
176-
with tf.name_scope("rgb_to_real", [x]):
176+
with tf.name_scope("rgb_to_real", values=[x]):
177177
x = tf.to_float(x)
178178
# Use the formula (value/128) - 1 to convert each channel value into a
179179
# real number in the range -1 to 1.
@@ -795,7 +795,7 @@ def subseparable_conv_block(inputs, filters, dilation_rates_and_kernel_sizes,
795795

796796
def pool(inputs, window_size, pooling_type, padding, strides=(1, 1)):
797797
"""Pooling (supports "LEFT")."""
798-
with tf.name_scope("pool", [inputs]):
798+
with tf.name_scope("pool", values=[inputs]):
799799
static_shape = inputs.get_shape()
800800
if not static_shape or len(static_shape) != 4:
801801
raise ValueError("Inputs to conv must have statically known rank 4.")
@@ -950,7 +950,7 @@ def simple_attention(target, source, bias=None):
950950
Returns:
951951
a `Tensor` with same shape as `target`
952952
"""
953-
with tf.name_scope("simple_attention", [target, source]):
953+
with tf.name_scope("simple_attention", values=[target, source]):
954954
target_shape = shape_list(target)
955955
source_shape = shape_list(source)
956956
target = tf.reshape(
@@ -1516,7 +1516,7 @@ def pad_to_same_length(x, y, final_length_divisible_by=1, axis=1):
15161516
"""Pad tensors x and y on axis 1 so that they have the same length."""
15171517
if axis not in [1, 2]:
15181518
raise ValueError("Only axis=1 and axis=2 supported for now.")
1519-
with tf.name_scope("pad_to_same_length", [x, y]):
1519+
with tf.name_scope("pad_to_same_length", values=[x, y]):
15201520
x_length = shape_list(x)[axis]
15211521
y_length = shape_list(y)[axis]
15221522
max_length = tf.maximum(x_length, y_length)
@@ -1551,7 +1551,7 @@ def padding_list(length_diff, arg):
15511551

15521552
def pad_with_zeros(logits, labels):
15531553
"""Pad labels on the length dimension to match logits length."""
1554-
with tf.name_scope("pad_with_zeros", [logits, labels]):
1554+
with tf.name_scope("pad_with_zeros", values=[logits, labels]):
15551555
logits, labels = pad_to_same_length(logits, labels)
15561556
if len(labels.shape.as_list()) == 3: # 2-d labels.
15571557
logits, labels = pad_to_same_length(logits, labels, axis=2)
@@ -1645,7 +1645,7 @@ def padded_cross_entropy(logits,
16451645
reduce_sum=reduce_sum)
16461646
confidence = 1.0 - label_smoothing
16471647
vocab_size = shape_list(logits)[-1]
1648-
with tf.name_scope("padded_cross_entropy", [logits, labels]):
1648+
with tf.name_scope("padded_cross_entropy", values=[logits, labels]):
16491649
if len(logits.get_shape().as_list()) == 2:
16501650
# Deal with the case where we did not insert extra dimensions due to
16511651
# TPU issues. No pad-to-same-length happens in this case.
@@ -1679,7 +1679,7 @@ def smoothing_cross_entropy(logits,
16791679
Returns:
16801680
16811681
"""
1682-
with tf.name_scope("smoothing_cross_entropy", [logits, labels]):
1682+
with tf.name_scope("smoothing_cross_entropy", values=[logits, labels]):
16831683
# Low confidence is given to all non-true labels, uniformly.
16841684
low_confidence = (1.0 - confidence) / tf.to_float(vocab_size - 1)
16851685
# Normalizing constant is the best cross-entropy value with soft targets.
@@ -1726,7 +1726,7 @@ def global_pool_1d(inputs, pooling_type="MAX", mask=None):
17261726
output: A tensor of dimensions batch_size x input_dims
17271727
dimension containing the sequences of transformed vectors.
17281728
"""
1729-
with tf.name_scope("global_pool", [inputs]):
1729+
with tf.name_scope("global_pool", values=[inputs]):
17301730
if mask is not None:
17311731
mask = tf.expand_dims(mask, axis=2)
17321732
inputs = tf.multiply(inputs, mask)
@@ -1763,7 +1763,7 @@ def running_global_pool_1d(inputs, pooling_type="MAX"):
17631763
dimension containing the running 'totals'.
17641764
"""
17651765
del pooling_type
1766-
with tf.name_scope("running_global_pool", [inputs]):
1766+
with tf.name_scope("running_global_pool", values=[inputs]):
17671767
scan_fct = tf.maximum
17681768
# Permute inputs so seq_length is first.
17691769
elems = tf.transpose(inputs, [1, 0, 2])
@@ -2119,7 +2119,7 @@ def padded_cross_entropy_factored(factored_logits,
21192119
a = factored_logits.a
21202120
b = factored_logits.b
21212121
confidence = 1.0 - label_smoothing
2122-
with tf.name_scope("padded_cross_entropy_factored", [a, b, labels]):
2122+
with tf.name_scope("padded_cross_entropy_factored", values=[a, b, labels]):
21232123
labels_flat = tf.reshape(labels, [-1])
21242124
a_flat = tf.reshape(a, [-1, shape_list(b)[1]])
21252125
xent = smoothing_cross_entropy_factored(a_flat, b, labels_flat,

tensor2tensor/layers/modalities.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -168,7 +168,7 @@ class CTCSymbolModality(SymbolModality):
168168

169169
def loss(self, logits, targets):
170170
"""Compute the CTC loss."""
171-
with tf.name_scope("ctc_loss", [logits, targets]):
171+
with tf.name_scope("ctc_loss", values=[logits, targets]):
172172
# For CTC we assume targets are 1d, [batch, length, 1, 1] here.
173173
targets_shape = targets.get_shape().as_list()
174174
assert len(targets_shape) == 4

0 commit comments

Comments
 (0)