Skip to content

[*.py] Use f-strings and os.path.join throughout #21343

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 6 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 7 additions & 3 deletions api_gen.py
Original file line number Diff line number Diff line change
Expand Up @@ -84,9 +84,13 @@ def create_legacy_directory(package_dir):
for fname in fnames:
if fname.endswith(".py"):
legacy_fpath = os.path.join(root, fname)
tf_keras_root = root.replace("/_legacy", "/_tf_keras/keras")
tf_keras_root = root.replace(
os.path.join(os.path.sep, "_legacy"),
os.path.join(os.path.sep, "_tf_keras", "keras"),
)
core_api_fpath = os.path.join(
root.replace("/_legacy", ""), fname
root.replace(os.path.join(os.path.sep, "_legacy"), ""),
fname,
)
if not os.path.exists(tf_keras_root):
os.makedirs(tf_keras_root)
Expand Down Expand Up @@ -125,7 +129,7 @@ def create_legacy_directory(package_dir):
r"\n",
core_api_contents,
)
legacy_contents = core_api_contents + "\n" + legacy_contents
legacy_contents = f"{core_api_contents}\n{legacy_contents}"
with open(tf_keras_fpath, "w") as f:
f.write(legacy_contents)

Expand Down
5 changes: 3 additions & 2 deletions guides/distributed_training_with_tensorflow.py
Original file line number Diff line number Diff line change
Expand Up @@ -194,7 +194,8 @@ def make_or_restore_model():
# Either restore the latest model, or create a fresh one
# if there is no checkpoint available.
checkpoints = [
checkpoint_dir + "/" + name for name in os.listdir(checkpoint_dir)
os.path.join(checkpoint_dir, name)
for name in os.listdir(checkpoint_dir)
]
if checkpoints:
latest_checkpoint = max(checkpoints, key=os.path.getctime)
Expand All @@ -216,7 +217,7 @@ def run_training(epochs=1):
# This callback saves a SavedModel every epoch
# We include the current epoch in the folder name.
keras.callbacks.ModelCheckpoint(
filepath=checkpoint_dir + "/ckpt-{epoch}.keras",
filepath=os.path.join(checkpoint_dir, "ckpt-{epoch}.keras"),
save_freq="epoch",
)
]
Expand Down
6 changes: 4 additions & 2 deletions guides/training_with_built_in_methods.py
Original file line number Diff line number Diff line change
Expand Up @@ -1133,7 +1133,8 @@ def make_or_restore_model():
# Either restore the latest model, or create a fresh one
# if there is no checkpoint available.
checkpoints = [
checkpoint_dir + "/" + name for name in os.listdir(checkpoint_dir)
os.path.join(checkpoint_dir, name)
for name in os.listdir(checkpoint_dir)
]
if checkpoints:
latest_checkpoint = max(checkpoints, key=os.path.getctime)
Expand All @@ -1148,7 +1149,8 @@ def make_or_restore_model():
# This callback saves the model every 100 batches.
# We include the training loss in the saved model name.
keras.callbacks.ModelCheckpoint(
filepath=checkpoint_dir + "/model-loss={loss:.2f}.keras", save_freq=100
filepath=os.path.join(checkpoint_dir, "model-loss={loss:.2f}.keras"),
save_freq=100,
)
]
model.fit(x_train, y_train, epochs=1, callbacks=callbacks)
Expand Down
15 changes: 7 additions & 8 deletions integration_tests/import_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -52,18 +52,17 @@ def manage_venv_installs(whl_path):
backend_pkg, backend_extra_url = BACKEND_REQ[backend.backend()]
install_setup = [
# Installs the backend's package and common requirements
"pip install " + backend_extra_url + backend_pkg,
f"pip install {backend_extra_url}{backend_pkg}",
"pip install -r requirements-common.txt",
"pip install pytest",
# Ensure other backends are uninstalled
"pip uninstall -y "
+ BACKEND_REQ[other_backends[0]][0]
+ " "
+ BACKEND_REQ[other_backends[1]][0]
+ " "
+ BACKEND_REQ[other_backends[2]][0],
"pip uninstall -y {0} {1} {2}".format(
BACKEND_REQ[other_backends[0]][0],
BACKEND_REQ[other_backends[1]][0],
BACKEND_REQ[other_backends[2]][0],
),
# Install `.whl` package
"pip install " + whl_path,
f"pip install {whl_path}",
]
run_commands_venv(install_setup)

Expand Down
6 changes: 3 additions & 3 deletions integration_tests/model_visualization_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@ def get_node_dict(graph, path=""):

for subgraph in graph.get_subgraphs():
sub_nodes = get_node_dict(
subgraph, path=path + subgraph.get_label() + " > "
subgraph, path=f"{path}{subgraph.get_label()} > "
)
nodes.update(sub_nodes)

Expand Down Expand Up @@ -85,7 +85,7 @@ def get_edges(graph):
class ModelVisualizationTest(testing.TestCase):
def multi_plot_model(self, model, name, expand_nested=False):
if expand_nested:
name = name + "-expand_nested"
name = f"{name}-expand_nested"

TEST_CASES = [
{},
Expand Down Expand Up @@ -130,7 +130,7 @@ def multi_plot_model(self, model, name, expand_nested=False):

for test_case in TEST_CASES:
tags = [v if k == "rankdir" else k for k, v in test_case.items()]
file_name = "-".join([name] + tags) + ".png"
file_name = f"{'-'.join([name] + tags)}.png"
plot_model(
model, file_name, expand_nested=expand_nested, **test_case
)
Expand Down
36 changes: 18 additions & 18 deletions keras/src/applications/convnext.py
Original file line number Diff line number Diff line change
Expand Up @@ -254,25 +254,25 @@ def apply(inputs):
kernel_size=7,
padding="same",
groups=projection_dim,
name=name + "_depthwise_conv",
name=f"{name}_depthwise_conv",
)(x)
x = layers.LayerNormalization(epsilon=1e-6, name=name + "_layernorm")(x)
x = layers.Dense(4 * projection_dim, name=name + "_pointwise_conv_1")(x)
x = layers.Activation("gelu", name=name + "_gelu")(x)
x = layers.Dense(projection_dim, name=name + "_pointwise_conv_2")(x)
x = layers.LayerNormalization(epsilon=1e-6, name=f"{name}_layernorm")(x)
x = layers.Dense(4 * projection_dim, name=f"{name}_pointwise_conv_1")(x)
x = layers.Activation("gelu", name=f"{name}_gelu")(x)
x = layers.Dense(projection_dim, name=f"{name}_pointwise_conv_2")(x)

if layer_scale_init_value is not None:
x = LayerScale(
layer_scale_init_value,
projection_dim,
name=name + "_layer_scale",
name=f"{name}_layer_scale",
)(x)
if drop_path_rate:
layer = StochasticDepth(
drop_path_rate, name=name + "_stochastic_depth"
drop_path_rate, name=f"{name}_stochastic_depth"
)
else:
layer = layers.Activation("linear", name=name + "_identity")
layer = layers.Activation("linear", name=f"{name}_identity")

return inputs + layer(x)

Expand All @@ -292,7 +292,7 @@ def apply(x):
(0.224 * 255) ** 2,
(0.225 * 255) ** 2,
],
name=name + "_prestem_normalization",
name=f"{name}_prestem_normalization",
)(x)
return x

Expand All @@ -314,14 +314,14 @@ def Head(num_classes=1000, classifier_activation=None, name=None):
name = str(backend.get_uid("head"))

def apply(x):
x = layers.GlobalAveragePooling2D(name=name + "_head_gap")(x)
x = layers.GlobalAveragePooling2D(name=f"{name}_head_gap")(x)
x = layers.LayerNormalization(
epsilon=1e-6, name=name + "_head_layernorm"
epsilon=1e-6, name=f"{name}_head_layernorm"
)(x)
x = layers.Dense(
num_classes,
activation=classifier_activation,
name=name + "_head_dense",
name=f"{name}_head_dense",
)(x)
return x

Expand Down Expand Up @@ -452,13 +452,13 @@ def ConvNeXt(
projection_dims[0],
kernel_size=4,
strides=4,
name=name + "_stem_conv",
name=f"{name}_stem_conv",
),
layers.LayerNormalization(
epsilon=1e-6, name=name + "_stem_layernorm"
epsilon=1e-6, name=f"{name}_stem_layernorm"
),
],
name=name + "_stem",
name=f"{name}_stem",
)

# Downsampling blocks.
Expand All @@ -471,16 +471,16 @@ def ConvNeXt(
[
layers.LayerNormalization(
epsilon=1e-6,
name=name + "_downsampling_layernorm_" + str(i),
name=f"{name}_downsampling_layernorm_{i}",
),
layers.Conv2D(
projection_dims[i + 1],
kernel_size=2,
strides=2,
name=name + "_downsampling_conv_" + str(i),
name=f"{name}_downsampling_conv_{i}",
),
],
name=name + "_downsampling_block_" + str(i),
name=f"{name}_downsampling_block_{i}",
)
downsample_layers.append(downsample_layer)

Expand Down
42 changes: 21 additions & 21 deletions keras/src/applications/densenet.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,25 +10,25 @@
"https://storage.googleapis.com/tensorflow/keras-applications/densenet/"
)
DENSENET121_WEIGHT_PATH = (
BASE_WEIGHTS_PATH + "densenet121_weights_tf_dim_ordering_tf_kernels.h5"
f"{BASE_WEIGHTS_PATH}densenet121_weights_tf_dim_ordering_tf_kernels.h5"
)
DENSENET121_WEIGHT_PATH_NO_TOP = (
BASE_WEIGHTS_PATH
+ "densenet121_weights_tf_dim_ordering_tf_kernels_notop.h5"
f"{BASE_WEIGHTS_PATH}"
"densenet121_weights_tf_dim_ordering_tf_kernels_notop.h5"
)
DENSENET169_WEIGHT_PATH = (
BASE_WEIGHTS_PATH + "densenet169_weights_tf_dim_ordering_tf_kernels.h5"
f"{BASE_WEIGHTS_PATH}densenet169_weights_tf_dim_ordering_tf_kernels.h5"
)
DENSENET169_WEIGHT_PATH_NO_TOP = (
BASE_WEIGHTS_PATH
+ "densenet169_weights_tf_dim_ordering_tf_kernels_notop.h5"
f"{BASE_WEIGHTS_PATH}"
"densenet169_weights_tf_dim_ordering_tf_kernels_notop.h5"
)
DENSENET201_WEIGHT_PATH = (
BASE_WEIGHTS_PATH + "densenet201_weights_tf_dim_ordering_tf_kernels.h5"
f"{BASE_WEIGHTS_PATH}densenet201_weights_tf_dim_ordering_tf_kernels.h5"
)
DENSENET201_WEIGHT_PATH_NO_TOP = (
BASE_WEIGHTS_PATH
+ "densenet201_weights_tf_dim_ordering_tf_kernels_notop.h5"
f"{BASE_WEIGHTS_PATH}"
"densenet201_weights_tf_dim_ordering_tf_kernels_notop.h5"
)


Expand All @@ -44,7 +44,7 @@ def dense_block(x, blocks, name):
Output tensor for the block.
"""
for i in range(blocks):
x = conv_block(x, 32, name=name + "_block" + str(i + 1))
x = conv_block(x, 32, name=f"{name}_block" + str(i + 1))
return x


Expand All @@ -61,16 +61,16 @@ def transition_block(x, reduction, name):
"""
bn_axis = 3 if backend.image_data_format() == "channels_last" else 1
x = layers.BatchNormalization(
axis=bn_axis, epsilon=1.001e-5, name=name + "_bn"
axis=bn_axis, epsilon=1.001e-5, name=f"{name}_bn"
)(x)
x = layers.Activation("relu", name=name + "_relu")(x)
x = layers.Activation("relu", name=f"{name}_relu")(x)
x = layers.Conv2D(
int(x.shape[bn_axis] * reduction),
1,
use_bias=False,
name=name + "_conv",
name=f"{name}_conv",
)(x)
x = layers.AveragePooling2D(2, strides=2, name=name + "_pool")(x)
x = layers.AveragePooling2D(2, strides=2, name=f"{name}_pool")(x)
return x


Expand All @@ -87,20 +87,20 @@ def conv_block(x, growth_rate, name):
"""
bn_axis = 3 if backend.image_data_format() == "channels_last" else 1
x1 = layers.BatchNormalization(
axis=bn_axis, epsilon=1.001e-5, name=name + "_0_bn"
axis=bn_axis, epsilon=1.001e-5, name=f"{name}_0_bn"
)(x)
x1 = layers.Activation("relu", name=name + "_0_relu")(x1)
x1 = layers.Activation("relu", name=f"{name}_0_relu")(x1)
x1 = layers.Conv2D(
4 * growth_rate, 1, use_bias=False, name=name + "_1_conv"
4 * growth_rate, 1, use_bias=False, name=f"{name}_1_conv"
)(x1)
x1 = layers.BatchNormalization(
axis=bn_axis, epsilon=1.001e-5, name=name + "_1_bn"
axis=bn_axis, epsilon=1.001e-5, name=f"{name}_1_bn"
)(x1)
x1 = layers.Activation("relu", name=name + "_1_relu")(x1)
x1 = layers.Activation("relu", name=f"{name}_1_relu")(x1)
x1 = layers.Conv2D(
growth_rate, 3, padding="same", use_bias=False, name=name + "_2_conv"
growth_rate, 3, padding="same", use_bias=False, name=f"{name}_2_conv"
)(x1)
x = layers.Concatenate(axis=bn_axis, name=name + "_concat")([x, x1])
x = layers.Concatenate(axis=bn_axis, name=f"{name}_concat")([x, x1])
return x


Expand Down
32 changes: 16 additions & 16 deletions keras/src/applications/efficientnet.py
Original file line number Diff line number Diff line change
Expand Up @@ -479,18 +479,18 @@ def block(
padding="same",
use_bias=False,
kernel_initializer=CONV_KERNEL_INITIALIZER,
name=name + "expand_conv",
name=f"{name}expand_conv",
)(inputs)
x = layers.BatchNormalization(axis=bn_axis, name=name + "expand_bn")(x)
x = layers.Activation(activation, name=name + "expand_activation")(x)
x = layers.BatchNormalization(axis=bn_axis, name=f"{name}expand_bn")(x)
x = layers.Activation(activation, name=f"{name}expand_activation")(x)
else:
x = inputs

# Depthwise Convolution
if strides == 2:
x = layers.ZeroPadding2D(
padding=imagenet_utils.correct_pad(x, kernel_size),
name=name + "dwconv_pad",
name=f"{name}dwconv_pad",
)(x)
conv_pad = "valid"
else:
Expand All @@ -501,37 +501,37 @@ def block(
padding=conv_pad,
use_bias=False,
depthwise_initializer=CONV_KERNEL_INITIALIZER,
name=name + "dwconv",
name=f"{name}dwconv",
)(x)
x = layers.BatchNormalization(axis=bn_axis, name=name + "bn")(x)
x = layers.Activation(activation, name=name + "activation")(x)
x = layers.BatchNormalization(axis=bn_axis, name=f"{name}bn")(x)
x = layers.Activation(activation, name=f"{name}activation")(x)

# Squeeze and Excitation phase
if 0 < se_ratio <= 1:
filters_se = max(1, int(filters_in * se_ratio))
se = layers.GlobalAveragePooling2D(name=name + "se_squeeze")(x)
se = layers.GlobalAveragePooling2D(name=f"{name}se_squeeze")(x)
if bn_axis == 1:
se_shape = (filters, 1, 1)
else:
se_shape = (1, 1, filters)
se = layers.Reshape(se_shape, name=name + "se_reshape")(se)
se = layers.Reshape(se_shape, name=f"{name}se_reshape")(se)
se = layers.Conv2D(
filters_se,
1,
padding="same",
activation=activation,
kernel_initializer=CONV_KERNEL_INITIALIZER,
name=name + "se_reduce",
name=f"{name}se_reduce",
)(se)
se = layers.Conv2D(
filters,
1,
padding="same",
activation="sigmoid",
kernel_initializer=CONV_KERNEL_INITIALIZER,
name=name + "se_expand",
name=f"{name}se_expand",
)(se)
x = layers.multiply([x, se], name=name + "se_excite")
x = layers.multiply([x, se], name=f"{name}se_excite")

# Output phase
x = layers.Conv2D(
Expand All @@ -540,15 +540,15 @@ def block(
padding="same",
use_bias=False,
kernel_initializer=CONV_KERNEL_INITIALIZER,
name=name + "project_conv",
name=f"{name}project_conv",
)(x)
x = layers.BatchNormalization(axis=bn_axis, name=name + "project_bn")(x)
x = layers.BatchNormalization(axis=bn_axis, name=f"{name}project_bn")(x)
if id_skip and strides == 1 and filters_in == filters_out:
if drop_rate > 0:
x = layers.Dropout(
drop_rate, noise_shape=(None, 1, 1, 1), name=name + "drop"
drop_rate, noise_shape=(None, 1, 1, 1), name=f"{name}drop"
)(x)
x = layers.add([x, inputs], name=name + "add")
x = layers.add([x, inputs], name=f"{name}add")
return x


Expand Down
Loading