diff --git a/ptsemseg/augmentations/__init__.py b/ptsemseg/augmentations/__init__.py index 810e0e9a..6fff2957 100644 --- a/ptsemseg/augmentations/__init__.py +++ b/ptsemseg/augmentations/__init__.py @@ -45,5 +45,5 @@ def get_composed_augmentations(aug_dict): augmentations = [] for aug_key, aug_param in aug_dict.items(): augmentations.append(key2aug[aug_key](aug_param)) - logger.info("Using {} aug with params {}".format(aug_key, aug_param)) + logger.info(f"Using {aug_key} aug with params {aug_param}") return Compose(augmentations) diff --git a/ptsemseg/augmentations/augmentations.py b/ptsemseg/augmentations/augmentations.py index 665f2b43..c727aa46 100644 --- a/ptsemseg/augmentations/augmentations.py +++ b/ptsemseg/augmentations/augmentations.py @@ -157,13 +157,8 @@ def __call__(self, img, mask): x_offset = int(2 * (random.random() - 0.5) * self.offset[0]) y_offset = int(2 * (random.random() - 0.5) * self.offset[1]) - x_crop_offset = x_offset - y_crop_offset = y_offset - if x_offset < 0: - x_crop_offset = 0 - if y_offset < 0: - y_crop_offset = 0 - + x_crop_offset = max(x_offset, 0) + y_crop_offset = max(y_offset, 0) cropped_img = tf.crop( img, y_crop_offset, @@ -175,13 +170,13 @@ def __call__(self, img, mask): if x_offset >= 0 and y_offset >= 0: padding_tuple = (0, 0, x_offset, y_offset) - elif x_offset >= 0 and y_offset < 0: + elif x_offset >= 0: padding_tuple = (0, abs(y_offset), x_offset, 0) - elif x_offset < 0 and y_offset >= 0: + elif y_offset >= 0: padding_tuple = (abs(x_offset), 0, 0, y_offset) - elif x_offset < 0 and y_offset < 0: + else: padding_tuple = (abs(x_offset), abs(y_offset), 0, 0) return ( @@ -237,11 +232,11 @@ def __call__(self, img, mask): if w > h: ow = self.size oh = int(self.size * h / w) - return (img.resize((ow, oh), Image.BILINEAR), mask.resize((ow, oh), Image.NEAREST)) else: oh = self.size ow = int(self.size * w / h) - return (img.resize((ow, oh), Image.BILINEAR), mask.resize((ow, oh), Image.NEAREST)) + + return (img.resize((ow, oh), Image.BILINEAR), mask.resize((ow, oh), Image.NEAREST)) class RandomSizedCrop(object): @@ -250,7 +245,7 @@ def __init__(self, size): def __call__(self, img, mask): assert img.size == mask.size - for attempt in range(10): + for _ in range(10): area = img.size[0] * img.size[1] target_area = random.uniform(0.45, 1.0) * area aspect_ratio = random.uniform(0.5, 2) diff --git a/ptsemseg/loader/ade20k_loader.py b/ptsemseg/loader/ade20k_loader.py index 8a37bed3..9757cce8 100644 --- a/ptsemseg/loader/ade20k_loader.py +++ b/ptsemseg/loader/ade20k_loader.py @@ -35,7 +35,7 @@ def __init__( if not self.test_mode: for split in ["training", "validation"]: file_list = recursive_glob( - rootdir=self.root + "images/" + self.split + "/", suffix=".jpg" + rootdir=f"{self.root}images/{self.split}/", suffix=".jpg" ) self.files[split] = file_list @@ -44,7 +44,7 @@ def __len__(self): def __getitem__(self, index): img_path = self.files[self.split][index].rstrip() - lbl_path = img_path[:-4] + "_seg.png" + lbl_path = f"{img_path[:-4]}_seg.png" img = m.imread(img_path) img = np.array(img, dtype=np.uint8) @@ -96,7 +96,7 @@ def decode_segmap(self, temp, plot=False): r = temp.copy() g = temp.copy() b = temp.copy() - for l in range(0, self.n_classes): + for l in range(self.n_classes): r[temp == l] = 10 * (l % 10) g[temp == l] = l b[temp == l] = 0 @@ -105,11 +105,10 @@ def decode_segmap(self, temp, plot=False): rgb[:, :, 0] = r / 255.0 rgb[:, :, 1] = g / 255.0 rgb[:, :, 2] = b / 255.0 - if plot: - plt.imshow(rgb) - plt.show() - else: + if not plot: return rgb + plt.imshow(rgb) + plt.show() if __name__ == "__main__": diff --git a/ptsemseg/loader/camvid_loader.py b/ptsemseg/loader/camvid_loader.py index 7e425dd9..931ba084 100644 --- a/ptsemseg/loader/camvid_loader.py +++ b/ptsemseg/loader/camvid_loader.py @@ -33,7 +33,7 @@ def __init__( if not self.test_mode: for split in ["train", "test", "val"]: - file_list = os.listdir(root + "/" + split) + file_list = os.listdir(f"{root}/{split}") self.files[split] = file_list def __len__(self): @@ -41,8 +41,8 @@ def __len__(self): def __getitem__(self, index): img_name = self.files[self.split][index] - img_path = self.root + "/" + self.split + "/" + img_name - lbl_path = self.root + "/" + self.split + "annot/" + img_name + img_path = f"{self.root}/{self.split}/{img_name}" + lbl_path = f"{self.root}/{self.split}annot/{img_name}" img = m.imread(img_path) img = np.array(img, dtype=np.uint8) @@ -107,7 +107,7 @@ def decode_segmap(self, temp, plot=False): r = temp.copy() g = temp.copy() b = temp.copy() - for l in range(0, self.n_classes): + for l in range(self.n_classes): r[temp == l] = label_colours[l, 0] g[temp == l] = label_colours[l, 1] b[temp == l] = label_colours[l, 2] diff --git a/ptsemseg/loader/cityscapes_loader.py b/ptsemseg/loader/cityscapes_loader.py index 9fd61705..28f793fd 100644 --- a/ptsemseg/loader/cityscapes_loader.py +++ b/ptsemseg/loader/cityscapes_loader.py @@ -133,7 +133,7 @@ def __init__( self.class_map = dict(zip(self.valid_classes, range(19))) if not self.files[split]: - raise Exception("No files for split=[%s] found in %s" % (split, self.images_base)) + raise Exception(f"No files for split=[{split}] found in {self.images_base}") print("Found %d %s images" % (len(self.files[split]), split)) @@ -150,7 +150,7 @@ def __getitem__(self, index): lbl_path = os.path.join( self.annotations_base, img_path.split(os.sep)[-2], - os.path.basename(img_path)[:-15] + "gtFine_labelIds.png", + f"{os.path.basename(img_path)[:-15]}gtFine_labelIds.png", ) img = m.imread(img_path) @@ -205,7 +205,7 @@ def decode_segmap(self, temp): r = temp.copy() g = temp.copy() b = temp.copy() - for l in range(0, self.n_classes): + for l in range(self.n_classes): r[temp == l] = self.label_colours[l][0] g[temp == l] = self.label_colours[l][1] b[temp == l] = self.label_colours[l][2] diff --git a/ptsemseg/loader/mapillary_vistas_loader.py b/ptsemseg/loader/mapillary_vistas_loader.py index 36e5c7a8..0ad6b87d 100644 --- a/ptsemseg/loader/mapillary_vistas_loader.py +++ b/ptsemseg/loader/mapillary_vistas_loader.py @@ -40,7 +40,7 @@ def __init__( self.ignore_id = 250 if not self.files[split]: - raise Exception("No files for split=[%s] found in %s" % (split, self.images_base)) + raise Exception(f"No files for split=[{split}] found in {self.images_base}") print("Found %d %s images" % (len(self.files[split]), split)) @@ -53,7 +53,7 @@ def parse_config(self): class_names = [] class_ids = [] class_colors = [] - print("There are {} labels in the config file".format(len(labels))) + print(f"There are {len(labels)} labels in the config file") for label_id, label in enumerate(labels): class_names.append(label["readable"]) class_ids.append(label_id) @@ -86,9 +86,7 @@ def __getitem__(self, index): return img, lbl def transform(self, img, lbl): - if self.img_size == ("same", "same"): - pass - else: + if self.img_size != ("same", "same"): img = img.resize( (self.img_size[0], self.img_size[1]), resample=Image.LANCZOS ) # uint8 with RGB mode @@ -103,7 +101,7 @@ def decode_segmap(self, temp): r = temp.copy() g = temp.copy() b = temp.copy() - for l in range(0, self.n_classes): + for l in range(self.n_classes): r[temp == l] = self.class_colors[l][0] g[temp == l] = self.class_colors[l][1] b[temp == l] = self.class_colors[l][2] diff --git a/ptsemseg/loader/mit_sceneparsing_benchmark_loader.py b/ptsemseg/loader/mit_sceneparsing_benchmark_loader.py index 4b666da2..0c3698c1 100644 --- a/ptsemseg/loader/mit_sceneparsing_benchmark_loader.py +++ b/ptsemseg/loader/mit_sceneparsing_benchmark_loader.py @@ -57,7 +57,7 @@ def __init__( self.files[split] = recursive_glob(rootdir=self.images_base, suffix=".jpg") if not self.files[split]: - raise Exception("No files for split=[%s] found in %s" % (split, self.images_base)) + raise Exception(f"No files for split=[{split}] found in {self.images_base}") print("Found %d %s images" % (len(self.files[split]), split)) @@ -71,7 +71,9 @@ def __getitem__(self, index): :param index: """ img_path = self.files[self.split][index].rstrip() - lbl_path = os.path.join(self.annotations_base, os.path.basename(img_path)[:-4] + ".png") + lbl_path = os.path.join( + self.annotations_base, f"{os.path.basename(img_path)[:-4]}.png" + ) img = m.imread(img_path, mode="RGB") img = np.array(img, dtype=np.uint8) @@ -93,9 +95,7 @@ def transform(self, img, lbl): :param img: :param lbl: """ - if self.img_size == ("same", "same"): - pass - else: + if self.img_size != ("same", "same"): img = m.imresize(img, (self.img_size[0], self.img_size[1])) # uint8 with RGB mode img = img[:, :, ::-1] # RGB -> BGR img = img.astype(np.float64) @@ -109,9 +109,7 @@ def transform(self, img, lbl): classes = np.unique(lbl) lbl = lbl.astype(float) - if self.img_size == ("same", "same"): - pass - else: + if self.img_size != ("same", "same"): lbl = m.imresize(lbl, (self.img_size[0], self.img_size[1]), "nearest", mode="F") lbl = lbl.astype(int) diff --git a/ptsemseg/loader/nyuv2_loader.py b/ptsemseg/loader/nyuv2_loader.py index 16b27d1b..b9413d5d 100644 --- a/ptsemseg/loader/nyuv2_loader.py +++ b/ptsemseg/loader/nyuv2_loader.py @@ -58,7 +58,7 @@ def __getitem__(self, index): img_path = self.files[self.split][index].rstrip() img_number = img_path.split("_")[-1][:4] lbl_path = os.path.join( - self.root, self.split + "_annot", "new_nyu_class13_" + img_number + ".png" + self.root, f"{self.split}_annot", f"new_nyu_class13_{img_number}.png" ) img = m.imread(img_path) @@ -67,7 +67,7 @@ def __getitem__(self, index): lbl = m.imread(lbl_path) lbl = np.array(lbl, dtype=np.uint8) - if not (len(img.shape) == 3 and len(lbl.shape) == 2): + if len(img.shape) != 3 or len(lbl.shape) != 2: return self.__getitem__(np.random.randint(0, self.__len__())) if self.augmentations is not None: @@ -128,7 +128,7 @@ def decode_segmap(self, temp): r = temp.copy() g = temp.copy() b = temp.copy() - for l in range(0, self.n_classes): + for l in range(self.n_classes): r[temp == l] = self.cmap[l, 0] g[temp == l] = self.cmap[l, 1] b[temp == l] = self.cmap[l, 2] diff --git a/ptsemseg/loader/pascal_voc_loader.py b/ptsemseg/loader/pascal_voc_loader.py index 2f610964..99d68749 100644 --- a/ptsemseg/loader/pascal_voc_loader.py +++ b/ptsemseg/loader/pascal_voc_loader.py @@ -66,7 +66,7 @@ def __init__( if not self.test_mode: for split in ["train", "val", "trainval"]: - path = pjoin(self.root, "ImageSets/Segmentation", split + ".txt") + path = pjoin(self.root, "ImageSets/Segmentation", f"{split}.txt") file_list = tuple(open(path, "r")) file_list = [id_.rstrip() for id_ in file_list] self.files[split] = file_list @@ -84,8 +84,8 @@ def __len__(self): def __getitem__(self, index): im_name = self.files[self.split][index] - im_path = pjoin(self.root, "JPEGImages", im_name + ".jpg") - lbl_path = pjoin(self.root, "SegmentationClass/pre_encoded", im_name + ".png") + im_path = pjoin(self.root, "JPEGImages", f"{im_name}.jpg") + lbl_path = pjoin(self.root, "SegmentationClass/pre_encoded", f"{im_name}.png") im = Image.open(im_path) lbl = Image.open(lbl_path) if self.augmentations is not None: @@ -95,9 +95,7 @@ def __getitem__(self, index): return im, lbl def transform(self, img, lbl): - if self.img_size == ("same", "same"): - pass - else: + if self.img_size != ("same", "same"): img = img.resize((self.img_size[0], self.img_size[1])) # uint8 with RGB mode lbl = lbl.resize((self.img_size[0], self.img_size[1])) img = self.tf(img) @@ -171,7 +169,7 @@ def decode_segmap(self, label_mask, plot=False): r = label_mask.copy() g = label_mask.copy() b = label_mask.copy() - for ll in range(0, self.n_classes): + for ll in range(self.n_classes): r[label_mask == ll] = label_colours[ll, 0] g[label_mask == ll] = label_colours[ll, 1] b[label_mask == ll] = label_colours[ll, 2] @@ -179,11 +177,10 @@ def decode_segmap(self, label_mask, plot=False): rgb[:, :, 0] = r / 255.0 rgb[:, :, 1] = g / 255.0 rgb[:, :, 2] = b / 255.0 - if plot: - plt.imshow(rgb) - plt.show() - else: + if not plot: return rgb + plt.imshow(rgb) + plt.show() def setup_annotations(self): """Sets up Berkley annotations by adding image indices to the @@ -213,14 +210,14 @@ def setup_annotations(self): if len(pre_encoded) != expected: print("Pre-encoding segmentation masks...") for ii in tqdm(sbd_train_list): - lbl_path = pjoin(sbd_path, "dataset/cls", ii + ".mat") + lbl_path = pjoin(sbd_path, "dataset/cls", f"{ii}.mat") data = io.loadmat(lbl_path) lbl = data["GTcls"][0]["Segmentation"][0].astype(np.int32) lbl = m.toimage(lbl, high=lbl.max(), low=lbl.min()) - m.imsave(pjoin(target_path, ii + ".png"), lbl) + m.imsave(pjoin(target_path, f"{ii}.png"), lbl) for ii in tqdm(self.files["trainval"]): - fname = ii + ".png" + fname = f"{ii}.png" lbl_path = pjoin(self.root, "SegmentationClass", fname) lbl = self.encode_segmap(m.imread(lbl_path)) lbl = m.toimage(lbl, high=lbl.max(), low=lbl.min()) diff --git a/ptsemseg/loader/sunrgbd_loader.py b/ptsemseg/loader/sunrgbd_loader.py index 9befb615..cb4d38b2 100644 --- a/ptsemseg/loader/sunrgbd_loader.py +++ b/ptsemseg/loader/sunrgbd_loader.py @@ -53,7 +53,9 @@ def __init__( for split in ["train", "test"]: file_list = sorted( - recursive_glob(rootdir=self.root + "annotations/" + split + "/", suffix="png") + recursive_glob( + rootdir=f"{self.root}annotations/{split}/", suffix="png" + ) ) self.anno_files[split] = file_list @@ -72,7 +74,7 @@ def __getitem__(self, index): lbl = m.imread(lbl_path) lbl = np.array(lbl, dtype=np.uint8) - if not (len(img.shape) == 3 and len(lbl.shape) == 2): + if len(img.shape) != 3 or len(lbl.shape) != 2: return self.__getitem__(np.random.randint(0, self.__len__())) if self.augmentations is not None: @@ -133,7 +135,7 @@ def decode_segmap(self, temp): r = temp.copy() g = temp.copy() b = temp.copy() - for l in range(0, self.n_classes): + for l in range(self.n_classes): r[temp == l] = self.cmap[l, 0] g[temp == l] = self.cmap[l, 1] b[temp == l] = self.cmap[l, 2] diff --git a/ptsemseg/loss/__init__.py b/ptsemseg/loss/__init__.py index 0ef1fcf2..c123b501 100644 --- a/ptsemseg/loss/__init__.py +++ b/ptsemseg/loss/__init__.py @@ -28,7 +28,7 @@ def get_loss_function(cfg): loss_params = {k: v for k, v in loss_dict.items() if k != "name"} if loss_name not in key2loss: - raise NotImplementedError("Loss {} not implemented".format(loss_name)) + raise NotImplementedError(f"Loss {loss_name} not implemented") - logger.info("Using {} with {} params".format(loss_name, loss_params)) + logger.info(f"Using {loss_name} with {loss_params} params") return functools.partial(key2loss[loss_name], **loss_params) diff --git a/ptsemseg/loss/loss.py b/ptsemseg/loss/loss.py index 83c5a137..e9bedebc 100644 --- a/ptsemseg/loss/loss.py +++ b/ptsemseg/loss/loss.py @@ -12,10 +12,13 @@ def cross_entropy2d(input, target, weight=None, size_average=True): input = input.transpose(1, 2).transpose(2, 3).contiguous().view(-1, c) target = target.view(-1) - loss = F.cross_entropy( - input, target, weight=weight, size_average=size_average, ignore_index=250 + return F.cross_entropy( + input, + target, + weight=weight, + size_average=size_average, + ignore_index=250, ) - return loss def multi_scale_cross_entropy2d(input, target, weight=None, size_average=True, scale_weight=None): diff --git a/ptsemseg/metrics.py b/ptsemseg/metrics.py index 36fcb08e..fc94b497 100644 --- a/ptsemseg/metrics.py +++ b/ptsemseg/metrics.py @@ -11,10 +11,10 @@ def __init__(self, n_classes): def _fast_hist(self, label_true, label_pred, n_class): mask = (label_true >= 0) & (label_true < n_class) - hist = np.bincount( - n_class * label_true[mask].astype(int) + label_pred[mask], minlength=n_class ** 2 + return np.bincount( + n_class * label_true[mask].astype(int) + label_pred[mask], + minlength=n_class**2, ).reshape(n_class, n_class) - return hist def update(self, label_trues, label_preds): for lt, lp in zip(label_trues, label_preds): diff --git a/ptsemseg/models/__init__.py b/ptsemseg/models/__init__.py index 533a8c5f..574810c6 100644 --- a/ptsemseg/models/__init__.py +++ b/ptsemseg/models/__init__.py @@ -63,4 +63,4 @@ def _get_model_instance(name): "frrnB": frrn, }[name] except: - raise ("Model {} not available".format(name)) + raise f"Model {name} not available" diff --git a/ptsemseg/models/fcn.py b/ptsemseg/models/fcn.py index 9485a7bc..5c95d63b 100644 --- a/ptsemseg/models/fcn.py +++ b/ptsemseg/models/fcn.py @@ -83,9 +83,7 @@ def forward(self, x): score = self.classifier(conv5) - out = F.upsample(score, x.size()[2:]) - - return out + return F.upsample(score, x.size()[2:]) def init_vgg16_params(self, vgg16, copy_fc8=True): blocks = [ @@ -200,9 +198,7 @@ def forward(self, x): score = F.upsample(score, score_pool4.size()[2:]) score += score_pool4 - out = F.upsample(score, x.size()[2:]) - - return out + return F.upsample(score, x.size()[2:]) def init_vgg16_params(self, vgg16, copy_fc8=True): blocks = [ diff --git a/ptsemseg/models/frrn.py b/ptsemseg/models/frrn.py index e2ec72b0..d83eda91 100644 --- a/ptsemseg/models/frrn.py +++ b/ptsemseg/models/frrn.py @@ -40,7 +40,7 @@ def __init__(self, n_classes=21, model_type="B", group_norm=False, n_groups=16): self.up_residual_units = [] self.down_residual_units = [] - for i in range(3): + for _ in range(3): self.up_residual_units.append( RU( channels=48, diff --git a/ptsemseg/models/icnet.py b/ptsemseg/models/icnet.py index 05275ca8..8934814c 100644 --- a/ptsemseg/models/icnet.py +++ b/ptsemseg/models/icnet.py @@ -217,14 +217,13 @@ def forward(self, x): if self.training: return (sub124_cls, sub24_cls, sub4_cls) - else: - sub124_cls = F.interpolate( - sub124_cls, - size=get_interp_size(sub124_cls, z_factor=4), - mode="bilinear", - align_corners=True, - ) - return sub124_cls + sub124_cls = F.interpolate( + sub124_cls, + size=get_interp_size(sub124_cls, z_factor=4), + mode="bilinear", + align_corners=True, + ) + return sub124_cls def load_pretrained_model(self, model_path): """ @@ -290,7 +289,7 @@ def _no_affine_bn(module=None): if isinstance(module, nn.BatchNorm2d): module.affine = False - if len([m for m in module.children()]) > 0: + if list(module.children()): for child in module.children(): _no_affine_bn(child) @@ -316,7 +315,7 @@ def _transfer_conv(layer_name, module): module.bias.data.copy_(torch.from_numpy(bias).view_as(module.bias)) def _transfer_bn(conv_layer_name, bn_module): - mean, var, gamma, beta = layer_params[conv_layer_name + "/bn"] + mean, var, gamma, beta = layer_params[f"{conv_layer_name}/bn"] print( "BN {}: Original {} and trans weights {}".format( conv_layer_name, bn_module.running_mean.size(), mean.shape @@ -331,7 +330,7 @@ def _transfer_conv_bn(conv_layer_name, mother_module): conv_module = mother_module[0] _transfer_conv(conv_layer_name, conv_module) - if conv_layer_name + "/bn" in layer_params.keys(): + if f"{conv_layer_name}/bn" in layer_params: bn_module = mother_module[1] _transfer_bn(conv_layer_name, bn_module) @@ -342,10 +341,10 @@ def _transfer_residual(block_name, block): if ("bottleneck" in block_name) or ("identity" not in block_name): # Conv block bottleneck = block_module.layers[0] bottleneck_conv_bn_dic = { - prefix + "_1_1x1_reduce": bottleneck.cbr1.cbr_unit, - prefix + "_1_3x3": bottleneck.cbr2.cbr_unit, - prefix + "_1_1x1_proj": bottleneck.cb4.cb_unit, - prefix + "_1_1x1_increase": bottleneck.cb3.cb_unit, + f"{prefix}_1_1x1_reduce": bottleneck.cbr1.cbr_unit, + f"{prefix}_1_3x3": bottleneck.cbr2.cbr_unit, + f"{prefix}_1_1x1_proj": bottleneck.cb4.cb_unit, + f"{prefix}_1_1x1_increase": bottleneck.cb3.cb_unit, } for k, v in bottleneck_conv_bn_dic.items(): @@ -535,4 +534,4 @@ def tile_predict(self, imgs, include_flip_mode=True): # torch.save(state, os.path.join(checkpoints_dir_path, "icnetBN_cityscapes_train_30k.pth")) # torch.save(state, os.path.join(checkpoints_dir_path, "icnet_cityscapes_trainval_90k.pth")) # torch.save(state, os.path.join(checkpoints_dir_path, "icnetBN_cityscapes_trainval_90k.pth")) - print("Output Shape {} \t Input Shape {}".format(out.shape, img.shape)) + print(f"Output Shape {out.shape} \t Input Shape {img.shape}") diff --git a/ptsemseg/models/linknet.py b/ptsemseg/models/linknet.py index cb17ed96..7dd53ae7 100644 --- a/ptsemseg/models/linknet.py +++ b/ptsemseg/models/linknet.py @@ -66,11 +66,9 @@ def _make_layer(self, block, planes, blocks, stride=1): ), nn.BatchNorm2d(planes * block.expansion), ) - layers = [] - layers.append(block(self.inplanes, planes, stride, downsample)) + layers = [block(self.inplanes, planes, stride, downsample)] self.inplanes = planes * block.expansion - for i in range(1, blocks): - layers.append(block(self.inplanes, planes)) + layers.extend(block(self.inplanes, planes) for _ in range(1, blocks)) return nn.Sequential(*layers) def forward(self, x): @@ -95,6 +93,4 @@ def forward(self, x): # Final Classification f1 = self.finaldeconvbnrelu1(d1) f2 = self.finalconvbnrelu2(f1) - f3 = self.finalconv3(f2) - - return f3 + return self.finalconv3(f2) diff --git a/ptsemseg/models/pspnet.py b/ptsemseg/models/pspnet.py index 84152767..de8bedbe 100644 --- a/ptsemseg/models/pspnet.py +++ b/ptsemseg/models/pspnet.py @@ -112,10 +112,7 @@ def forward(self, x): x = self.classification(x) x = F.interpolate(x, size=inp_shape, mode="bilinear", align_corners=True) - if self.training: - return (x, x_aux) - else: # eval mode - return x + return (x, x_aux) if self.training else x def load_pretrained_model(self, model_path): """ @@ -172,7 +169,7 @@ def _no_affine_bn(module=None): if isinstance(module, nn.BatchNorm2d): module.affine = False - if len([m for m in module.children()]) > 0: + if list(module.children()): for child in module.children(): _no_affine_bn(child) @@ -203,7 +200,7 @@ def _transfer_conv_bn(conv_layer_name, mother_module): _transfer_conv(conv_layer_name, conv_module) - mean, var, gamma, beta = layer_params[conv_layer_name + "/bn"] + mean, var, gamma, beta = layer_params[f"{conv_layer_name}/bn"] print( "BN {}: Original {} and trans weights {}".format( conv_layer_name, bn_module.running_mean.size(), mean.shape @@ -219,10 +216,10 @@ def _transfer_residual(prefix, block): bottleneck = block_module.layers[0] bottleneck_conv_bn_dic = { - prefix + "_1_1x1_reduce": bottleneck.cbr1.cbr_unit, - prefix + "_1_3x3": bottleneck.cbr2.cbr_unit, - prefix + "_1_1x1_proj": bottleneck.cb4.cb_unit, - prefix + "_1_1x1_increase": bottleneck.cb3.cb_unit, + f"{prefix}_1_1x1_reduce": bottleneck.cbr1.cbr_unit, + f"{prefix}_1_3x3": bottleneck.cbr2.cbr_unit, + f"{prefix}_1_1x1_proj": bottleneck.cb4.cb_unit, + f"{prefix}_1_1x1_increase": bottleneck.cb3.cb_unit, } for k, v in bottleneck_conv_bn_dic.items(): @@ -252,8 +249,8 @@ def _transfer_residual(prefix, block): "conv5_3_pool2_conv": self.pyramid_pooling.paths[2].cbr_unit, "conv5_3_pool1_conv": self.pyramid_pooling.paths[3].cbr_unit, "conv5_4": self.cbr_final.cbr_unit, - "conv4_" + str(self.block_config[2] + 1): self.convbnrelu4_aux.cbr_unit, - } # Auxiliary layers for training + f"conv4_{str(self.block_config[2] + 1)}": self.convbnrelu4_aux.cbr_unit, + } residual_layers = { "conv2": [self.res_block2, self.block_config[0]], @@ -395,4 +392,4 @@ def tile_predict(self, imgs, include_flip_mode=True): torch.save(state, os.path.join(checkpoints_dir_path, "pspnet_101_cityscapes.pth")) # torch.save(state, os.path.join(checkpoints_dir_path, "pspnet_50_ade20k.pth")) # torch.save(state, os.path.join(checkpoints_dir_path, "pspnet_101_pascalvoc.pth")) - print("Output Shape {} \t Input Shape {}".format(out.shape, img.shape)) + print(f"Output Shape {out.shape} \t Input Shape {img.shape}") diff --git a/ptsemseg/models/segnet.py b/ptsemseg/models/segnet.py index e23ca7c3..a45922cb 100644 --- a/ptsemseg/models/segnet.py +++ b/ptsemseg/models/segnet.py @@ -43,11 +43,7 @@ def init_vgg16_params(self, vgg16): features = list(vgg16.features.children()) - vgg_layers = [] - for _layer in features: - if isinstance(_layer, nn.Conv2d): - vgg_layers.append(_layer) - + vgg_layers = [_layer for _layer in features if isinstance(_layer, nn.Conv2d)] merged_layers = [] for idx, conv_block in enumerate(blocks): if idx < 2: @@ -59,10 +55,9 @@ def init_vgg16_params(self, vgg16): conv_block.conv3.cbr_unit, ] for _unit in units: - for _layer in _unit: - if isinstance(_layer, nn.Conv2d): - merged_layers.append(_layer) - + merged_layers.extend( + _layer for _layer in _unit if isinstance(_layer, nn.Conv2d) + ) assert len(vgg_layers) == len(merged_layers) for l1, l2 in zip(vgg_layers, merged_layers): diff --git a/ptsemseg/models/utils.py b/ptsemseg/models/utils.py index ec24bba0..f7f9cbda 100644 --- a/ptsemseg/models/utils.py +++ b/ptsemseg/models/utils.py @@ -36,8 +36,7 @@ def __init__( self.cb_unit = nn.Sequential(conv_mod) def forward(self, inputs): - outputs = self.cb_unit(inputs) - return outputs + return self.cb_unit(inputs) class conv2DGroupNorm(nn.Module): @@ -59,8 +58,7 @@ def __init__( self.cg_unit = nn.Sequential(conv_mod, nn.GroupNorm(n_groups, int(n_filters))) def forward(self, inputs): - outputs = self.cg_unit(inputs) - return outputs + return self.cg_unit(inputs) class deconv2DBatchNorm(nn.Module): @@ -80,8 +78,7 @@ def __init__(self, in_channels, n_filters, k_size, stride, padding, bias=True): ) def forward(self, inputs): - outputs = self.dcb_unit(inputs) - return outputs + return self.dcb_unit(inputs) class conv2DBatchNormRelu(nn.Module): @@ -116,8 +113,7 @@ def __init__( self.cbr_unit = nn.Sequential(conv_mod, nn.ReLU(inplace=True)) def forward(self, inputs): - outputs = self.cbr_unit(inputs) - return outputs + return self.cbr_unit(inputs) class conv2DGroupNormRelu(nn.Module): @@ -141,8 +137,7 @@ def __init__( ) def forward(self, inputs): - outputs = self.cgr_unit(inputs) - return outputs + return self.cgr_unit(inputs) class deconv2DBatchNormRelu(nn.Module): @@ -163,8 +158,7 @@ def __init__(self, in_channels, n_filters, k_size, stride, padding, bias=True): ) def forward(self, inputs): - outputs = self.dcbr_unit(inputs) - return outputs + return self.dcbr_unit(inputs) class unetConv2(nn.Module): @@ -521,19 +515,18 @@ def __init__( bias = not is_batchnorm self.paths = [] - for i in range(len(pool_sizes)): - self.paths.append( - conv2DBatchNormRelu( - in_channels, - int(in_channels / len(pool_sizes)), - 1, - 1, - 0, - bias=bias, - is_batchnorm=is_batchnorm, - ) + self.paths.extend( + conv2DBatchNormRelu( + in_channels, + int(in_channels / len(pool_sizes)), + 1, + 1, + 0, + bias=bias, + is_batchnorm=is_batchnorm, ) - + for _ in range(len(pool_sizes)) + ) self.path_module_list = nn.ModuleList(self.paths) self.pool_sizes = pool_sizes self.model_name = model_name @@ -702,13 +695,16 @@ def __init__( ) ) if include_range in ["all", "identity"]: - for i in range(n_blocks - 1): - layers.append( - bottleNeckIdentifyPSP( - out_channels, mid_channels, stride, dilation, is_batchnorm=is_batchnorm - ) + layers.extend( + bottleNeckIdentifyPSP( + out_channels, + mid_channels, + stride, + dilation, + is_batchnorm=is_batchnorm, ) - + for _ in range(n_blocks - 1) + ) self.layers = nn.Sequential(*layers) def forward(self, x): @@ -777,8 +773,7 @@ def get_interp_size(input, s_factor=1, z_factor=1): # for caffe ori_h = ori_h + (ori_h - 1) * (z_factor - 1) ori_w = ori_w + (ori_w - 1) * (z_factor - 1) - resize_shape = (int(ori_h), int(ori_w)) - return resize_shape + return int(ori_h), int(ori_w) def interp(input, output_size, mode="bilinear"): @@ -803,10 +798,7 @@ def interp(input, output_size, mode="bilinear"): def get_upsampling_weight(in_channels, out_channels, kernel_size): """Make a 2D bilinear kernel suitable for upsampling""" factor = (kernel_size + 1) // 2 - if kernel_size % 2 == 1: - center = factor - 1 - else: - center = factor - 0.5 + center = factor - 1 if kernel_size % 2 == 1 else factor - 0.5 og = np.ogrid[:kernel_size, :kernel_size] filt = (1 - abs(og[0] - center) / factor) * (1 - abs(og[1] - center) / factor) weight = np.zeros((in_channels, out_channels, kernel_size, kernel_size), dtype=np.float64) diff --git a/ptsemseg/optimizers/__init__.py b/ptsemseg/optimizers/__init__.py index 3d799672..20481726 100644 --- a/ptsemseg/optimizers/__init__.py +++ b/ptsemseg/optimizers/__init__.py @@ -23,7 +23,7 @@ def get_optimizer(cfg): else: opt_name = cfg["training"]["optimizer"]["name"] if opt_name not in key2opt: - raise NotImplementedError("Optimizer {} not implemented".format(opt_name)) + raise NotImplementedError(f"Optimizer {opt_name} not implemented") - logger.info("Using {} optimizer".format(opt_name)) + logger.info(f"Using {opt_name} optimizer") return key2opt[opt_name] diff --git a/ptsemseg/schedulers/__init__.py b/ptsemseg/schedulers/__init__.py index 0a4c9dbf..426c14bc 100644 --- a/ptsemseg/schedulers/__init__.py +++ b/ptsemseg/schedulers/__init__.py @@ -23,7 +23,7 @@ def get_scheduler(optimizer, scheduler_dict): s_type = scheduler_dict["name"] scheduler_dict.pop("name") - logging.info("Using {} scheduler with {} params".format(s_type, scheduler_dict)) + logging.info(f"Using {s_type} scheduler with {scheduler_dict} params") warmup_dict = {} if "warmup_iters" in scheduler_dict: @@ -33,9 +33,7 @@ def get_scheduler(optimizer, scheduler_dict): warmup_dict["gamma"] = scheduler_dict.get("warmup_factor", 0.2) logger.info( - "Using Warmup with {} iters {} gamma and {} mode".format( - warmup_dict["warmup_iters"], warmup_dict["gamma"], warmup_dict["mode"] - ) + f'Using Warmup with {warmup_dict["warmup_iters"]} iters {warmup_dict["gamma"]} gamma and {warmup_dict["mode"]} mode' ) scheduler_dict.pop("warmup_iters", None) diff --git a/ptsemseg/schedulers/schedulers.py b/ptsemseg/schedulers/schedulers.py index ee78a443..43a0ae90 100644 --- a/ptsemseg/schedulers/schedulers.py +++ b/ptsemseg/schedulers/schedulers.py @@ -6,7 +6,7 @@ def __init__(self, optimizer, last_epoch=-1): super(ConstantLR, self).__init__(optimizer, last_epoch) def get_lr(self): - return [base_lr for base_lr in self.base_lrs] + return list(self.base_lrs) class PolynomialLR(_LRScheduler): @@ -18,10 +18,9 @@ def __init__(self, optimizer, max_iter, decay_iter=1, gamma=0.9, last_epoch=-1): def get_lr(self): if self.last_epoch % self.decay_iter or self.last_epoch % self.max_iter: - return [base_lr for base_lr in self.base_lrs] - else: - factor = (1 - self.last_epoch / float(self.max_iter)) ** self.gamma - return [base_lr * factor for base_lr in self.base_lrs] + return list(self.base_lrs) + factor = (1 - self.last_epoch / float(self.max_iter)) ** self.gamma + return [base_lr * factor for base_lr in self.base_lrs] class WarmUpLR(_LRScheduler): @@ -45,7 +44,7 @@ def get_lr(self): elif self.mode == "constant": factor = self.gamma else: - raise KeyError("WarmUp type {} not implemented".format(self.mode)) + raise KeyError(f"WarmUp type {self.mode} not implemented") return [factor * base_lr for base_lr in cold_lrs] diff --git a/ptsemseg/utils.py b/ptsemseg/utils.py index 4760ee60..d168e0d7 100644 --- a/ptsemseg/utils.py +++ b/ptsemseg/utils.py @@ -51,7 +51,7 @@ def get_logger(logdir): logger = logging.getLogger("ptsemseg") ts = str(datetime.datetime.now()).split(".")[0].replace(" ", "_") ts = ts.replace(":", "_").replace("-", "_") - file_path = os.path.join(logdir, "run_{}.log".format(ts)) + file_path = os.path.join(logdir, f"run_{ts}.log") hdlr = logging.FileHandler(file_path) formatter = logging.Formatter("%(asctime)s %(levelname)s %(message)s") hdlr.setFormatter(formatter) diff --git a/test.py b/test.py index 3123d806..bef464cd 100644 --- a/test.py +++ b/test.py @@ -26,7 +26,7 @@ def test(args): model_name = model_file_name[: model_file_name.find("_")] # Setup image - print("Read Input Image from : {}".format(args.img_path)) + print(f"Read Input Image from : {args.img_path}") img = misc.imread(args.img_path) data_loader = get_loader(args.dataset) @@ -82,9 +82,9 @@ def test(args): q = d.inference(50) mask = np.argmax(q, axis=0).reshape(w, h).transpose(1, 0) decoded_crf = loader.decode_segmap(np.array(mask, dtype=np.uint8)) - dcrf_path = args.out_path[:-4] + "_drf.png" + dcrf_path = f"{args.out_path[:-4]}_drf.png" misc.imsave(dcrf_path, decoded_crf) - print("Dense CRF Processed Mask Saved at: {}".format(dcrf_path)) + print(f"Dense CRF Processed Mask Saved at: {dcrf_path}") pred = np.squeeze(outputs.data.max(1)[1].cpu().numpy(), axis=0) if model_name in ["pspnet", "icnet", "icnetBN"]: @@ -95,7 +95,7 @@ def test(args): decoded = loader.decode_segmap(pred) print("Classes found: ", np.unique(pred)) misc.imsave(args.out_path, decoded) - print("Segmentation Mask Saved at: {}".format(args.out_path)) + print(f"Segmentation Mask Saved at: {args.out_path}") if __name__ == "__main__": diff --git a/train.py b/train.py index eeabc578..cbe17b51 100644 --- a/train.py +++ b/train.py @@ -81,18 +81,18 @@ def train(cfg, writer, logger): optimizer_params = {k: v for k, v in cfg["training"]["optimizer"].items() if k != "name"} optimizer = optimizer_cls(model.parameters(), **optimizer_params) - logger.info("Using optimizer {}".format(optimizer)) + logger.info(f"Using optimizer {optimizer}") scheduler = get_scheduler(optimizer, cfg["training"]["lr_schedule"]) loss_fn = get_loss_function(cfg) - logger.info("Using loss {}".format(loss_fn)) + logger.info(f"Using loss {loss_fn}") start_iter = 0 if cfg["training"]["resume"] is not None: if os.path.isfile(cfg["training"]["resume"]): logger.info( - "Loading model and optimizer from checkpoint '{}'".format(cfg["training"]["resume"]) + f"""Loading model and optimizer from checkpoint '{cfg["training"]["resume"]}'""" ) checkpoint = torch.load(cfg["training"]["resume"]) model.load_state_dict(checkpoint["model_state"]) @@ -100,12 +100,10 @@ def train(cfg, writer, logger): scheduler.load_state_dict(checkpoint["scheduler_state"]) start_iter = checkpoint["epoch"] logger.info( - "Loaded checkpoint '{}' (iter {})".format( - cfg["training"]["resume"], checkpoint["epoch"] - ) + f"""Loaded checkpoint '{cfg["training"]["resume"]}' (iter {start_iter})""" ) else: - logger.info("No checkpoint found at '{}'".format(cfg["training"]["resume"])) + logger.info(f"""No checkpoint found at '{cfg["training"]["resume"]}'""") val_loss_meter = averageMeter() time_meter = averageMeter() @@ -171,12 +169,12 @@ def train(cfg, writer, logger): score, class_iou = running_metrics_val.get_scores() for k, v in score.items(): print(k, v) - logger.info("{}: {}".format(k, v)) - writer.add_scalar("val_metrics/{}".format(k), v, i + 1) + logger.info(f"{k}: {v}") + writer.add_scalar(f"val_metrics/{k}", v, i + 1) for k, v in class_iou.items(): - logger.info("{}: {}".format(k, v)) - writer.add_scalar("val_metrics/cls_{}".format(k), v, i + 1) + logger.info(f"{k}: {v}") + writer.add_scalar(f"val_metrics/cls_{k}", v, i + 1) val_loss_meter.reset() running_metrics_val.reset() @@ -192,7 +190,7 @@ def train(cfg, writer, logger): } save_path = os.path.join( writer.file_writer.get_logdir(), - "{}_{}_best_model.pkl".format(cfg["model"]["arch"], cfg["data"]["dataset"]), + f'{cfg["model"]["arch"]}_{cfg["data"]["dataset"]}_best_model.pkl', ) torch.save(state, save_path) @@ -220,7 +218,7 @@ def train(cfg, writer, logger): logdir = os.path.join("runs", os.path.basename(args.config)[:-4], str(run_id)) writer = SummaryWriter(log_dir=logdir) - print("RUNDIR: {}".format(logdir)) + print(f"RUNDIR: {logdir}") shutil.copy(args.config, logdir) logger = get_logger(logdir)