@@ -76,7 +76,7 @@ def comma_separated_string_to_integer_list(s):
76
76
77
77
def saturating_sigmoid (x ):
78
78
"""Saturating sigmoid: 1.2 * sigmoid(x) - 0.1 cut to [0, 1]."""
79
- with tf .name_scope ("saturating_sigmoid" , [x ]):
79
+ with tf .name_scope ("saturating_sigmoid" , values = [x ]):
80
80
y = tf .sigmoid (x )
81
81
return tf .minimum (1.0 , tf .maximum (0.0 , 1.2 * y - 0.1 ))
82
82
@@ -173,7 +173,7 @@ def shakeshake(xs, equal_grad=False):
173
173
174
174
def convert_rgb_to_real (x ):
175
175
"""Conversion of pixel values to real numbers."""
176
- with tf .name_scope ("rgb_to_real" , [x ]):
176
+ with tf .name_scope ("rgb_to_real" , values = [x ]):
177
177
x = tf .to_float (x )
178
178
# Use the formula (value/128) - 1 to convert each channel value into a
179
179
# real number in the range -1 to 1.
@@ -795,7 +795,7 @@ def subseparable_conv_block(inputs, filters, dilation_rates_and_kernel_sizes,
795
795
796
796
def pool (inputs , window_size , pooling_type , padding , strides = (1 , 1 )):
797
797
"""Pooling (supports "LEFT")."""
798
- with tf .name_scope ("pool" , [inputs ]):
798
+ with tf .name_scope ("pool" , values = [inputs ]):
799
799
static_shape = inputs .get_shape ()
800
800
if not static_shape or len (static_shape ) != 4 :
801
801
raise ValueError ("Inputs to conv must have statically known rank 4." )
@@ -950,7 +950,7 @@ def simple_attention(target, source, bias=None):
950
950
Returns:
951
951
a `Tensor` with same shape as `target`
952
952
"""
953
- with tf .name_scope ("simple_attention" , [target , source ]):
953
+ with tf .name_scope ("simple_attention" , values = [target , source ]):
954
954
target_shape = shape_list (target )
955
955
source_shape = shape_list (source )
956
956
target = tf .reshape (
@@ -1516,7 +1516,7 @@ def pad_to_same_length(x, y, final_length_divisible_by=1, axis=1):
1516
1516
"""Pad tensors x and y on axis 1 so that they have the same length."""
1517
1517
if axis not in [1 , 2 ]:
1518
1518
raise ValueError ("Only axis=1 and axis=2 supported for now." )
1519
- with tf .name_scope ("pad_to_same_length" , [x , y ]):
1519
+ with tf .name_scope ("pad_to_same_length" , values = [x , y ]):
1520
1520
x_length = shape_list (x )[axis ]
1521
1521
y_length = shape_list (y )[axis ]
1522
1522
max_length = tf .maximum (x_length , y_length )
@@ -1551,7 +1551,7 @@ def padding_list(length_diff, arg):
1551
1551
1552
1552
def pad_with_zeros (logits , labels ):
1553
1553
"""Pad labels on the length dimension to match logits length."""
1554
- with tf .name_scope ("pad_with_zeros" , [logits , labels ]):
1554
+ with tf .name_scope ("pad_with_zeros" , values = [logits , labels ]):
1555
1555
logits , labels = pad_to_same_length (logits , labels )
1556
1556
if len (labels .shape .as_list ()) == 3 : # 2-d labels.
1557
1557
logits , labels = pad_to_same_length (logits , labels , axis = 2 )
@@ -1645,7 +1645,7 @@ def padded_cross_entropy(logits,
1645
1645
reduce_sum = reduce_sum )
1646
1646
confidence = 1.0 - label_smoothing
1647
1647
vocab_size = shape_list (logits )[- 1 ]
1648
- with tf .name_scope ("padded_cross_entropy" , [logits , labels ]):
1648
+ with tf .name_scope ("padded_cross_entropy" , values = [logits , labels ]):
1649
1649
if len (logits .get_shape ().as_list ()) == 2 :
1650
1650
# Deal with the case where we did not insert extra dimensions due to
1651
1651
# TPU issues. No pad-to-same-length happens in this case.
@@ -1679,7 +1679,7 @@ def smoothing_cross_entropy(logits,
1679
1679
Returns:
1680
1680
1681
1681
"""
1682
- with tf .name_scope ("smoothing_cross_entropy" , [logits , labels ]):
1682
+ with tf .name_scope ("smoothing_cross_entropy" , values = [logits , labels ]):
1683
1683
# Low confidence is given to all non-true labels, uniformly.
1684
1684
low_confidence = (1.0 - confidence ) / tf .to_float (vocab_size - 1 )
1685
1685
# Normalizing constant is the best cross-entropy value with soft targets.
@@ -1726,7 +1726,7 @@ def global_pool_1d(inputs, pooling_type="MAX", mask=None):
1726
1726
output: A tensor of dimensions batch_size x input_dims
1727
1727
dimension containing the sequences of transformed vectors.
1728
1728
"""
1729
- with tf .name_scope ("global_pool" , [inputs ]):
1729
+ with tf .name_scope ("global_pool" , values = [inputs ]):
1730
1730
if mask is not None :
1731
1731
mask = tf .expand_dims (mask , axis = 2 )
1732
1732
inputs = tf .multiply (inputs , mask )
@@ -1763,7 +1763,7 @@ def running_global_pool_1d(inputs, pooling_type="MAX"):
1763
1763
dimension containing the running 'totals'.
1764
1764
"""
1765
1765
del pooling_type
1766
- with tf .name_scope ("running_global_pool" , [inputs ]):
1766
+ with tf .name_scope ("running_global_pool" , values = [inputs ]):
1767
1767
scan_fct = tf .maximum
1768
1768
# Permute inputs so seq_length is first.
1769
1769
elems = tf .transpose (inputs , [1 , 0 , 2 ])
@@ -2119,7 +2119,7 @@ def padded_cross_entropy_factored(factored_logits,
2119
2119
a = factored_logits .a
2120
2120
b = factored_logits .b
2121
2121
confidence = 1.0 - label_smoothing
2122
- with tf .name_scope ("padded_cross_entropy_factored" , [a , b , labels ]):
2122
+ with tf .name_scope ("padded_cross_entropy_factored" , values = [a , b , labels ]):
2123
2123
labels_flat = tf .reshape (labels , [- 1 ])
2124
2124
a_flat = tf .reshape (a , [- 1 , shape_list (b )[1 ]])
2125
2125
xent = smoothing_cross_entropy_factored (a_flat , b , labels_flat ,
0 commit comments