Skip to content

Setup and train OneFormer in Google Colab problems #138

@dsinapova

Description

@dsinapova

Hi,

I am trying to setup and train OneFormer in Colab on a custom dataset. However I am facing a few problems:

  1. Cannot import modules:
  • detectron2:

from detectron2.utils import setup_logger
Error:

Image

  • oneformer:
from oneformer import (
    add_oneformer_config,
    add_common_config,
    add_swin_config,
    add_dinat_config,
    add_convnext_config,
)

Error:

Image

NOTE: Those work if I clone the demo repository: https://github.com/SHI-Labs/OneFormer-Colab.git, however I CANNOT train my model there!

  1. Cannot install detectron2 in the way listed in the tutorial:
  • working:

!python -m pip install -e /content/drive/MyDrive/OneF/OneFormer/detectron2

  • not working:
dist = distutils.core.run_setup("./detectron2/setup.py")
!python -m pip install {' '.join([f"'{x}'" for x in dist.install_requires])} --quiet
  1. Requirements and dependencies take too long to install (especially natten)

Here is my code for reference

from google.colab import drive
drive.mount('/content/drive')
%cd /content/drive/MyDrive/OneF/

!git clone https://github.com/SHI-Labs/OneFormer.git
%cd /content/drive/MyDrive/OneF/OneFormer

!pip3 install -U opencv-python --quiet
!pip3 install natten -f https://shi-labs.com/natten/wheels/cu113/torch1.10.1/index.html --quiet

!pip3 install git+https://github.com/cocodataset/panopticapi.git --quiet
!pip3 install git+https://github.com/mcordts/cityscapesScripts.git --quiet

!pip3 install -r requirements.txt --quiet
!pip3 install ipython-autotime --quiet
!pip3 install imutils --quiet

import sys, os, distutils.core
!git clone 'https://github.com/facebookresearch/detectron2'
dist = distutils.core.run_setup("./detectron2/setup.py")
!python -m pip install {' '.join([f"'{x}'" for x in dist.install_requires])} --quiet
sys.path.insert(0, os.path.abspath('/content/drive/MyDrive/OneF/OneFormer/detectron2'))

import detectron2
from detectron2.utils import setup_logger
setup_logger()
setup_logger(name="oneformer")

# Import libraries
import numpy as np
import cv2
import torch
from google.colab.patches import cv2_imshow
import imutils

# Import detectron2 utilities
from detectron2.config import get_cfg
from detectron2.projects.deeplab import add_deeplab_config
from detectron2.data import MetadataCatalog

# import OneFormer Project
from oneformer import (
    add_oneformer_config,
    add_common_config,
    add_swin_config,
    add_dinat_config,
    add_convnext_config,
)

# Map custom dataset in detectron2 format
import numpy as np
import os, json, cv2, random
import matplotlib.pyplot as plt
import torch

from detectron2.data import MetadataCatalog, DatasetCatalog

from detectron2.structures import BoxMode

def get_d_dicts(img_dir):
    # print(img_dir)
    json_file = os.path.join(img_dir, "via_region_data.json")
    with open(json_file) as f:
        imgs_anns = json.load(f)

    dataset_dicts = []
    for idx, v in enumerate(imgs_anns.values()):
        record = {}

        filename = os.path.join(img_dir, v["filename"])
        # print(filename)
        height, width = cv2.imread(filename).shape[:2]

        record["file_name"] = filename
        record["image_id"] = idx
        record["height"] = height
        record["width"] = width

        annos = v["regions"]
        objs = []
        for _, anno in annos.items():
            # assert not anno["region_attributes"]
            anno = anno["shape_attributes"]
            px = anno["all_points_x"]
            py = anno["all_points_y"]
            poly = [(x + 0.5, y + 0.5) for x, y in zip(px, py)]
            poly = [p for x in poly for p in x]
            obj = {
                "bbox": [np.min(px), np.min(py), np.max(px), np.max(py)],
                "bbox_mode": BoxMode.XYXY_ABS, 
                "segmentation": [poly],
                "category_id": 0,
            }
            objs.append(obj)
        record["annotations"] = objs
        dataset_dicts.append(record)
    return dataset_dicts

img_dir = '/content/drive/MyDrive/OneF/dataset'
for d in ["train", "val"]:
    ds = "d_" + d
    if ds in DatasetCatalog.list():
        DatasetCatalog.remove(ds)
        MetadataCatalog.remove(ds)
    if not ds in DatasetCatalog.list():
        DatasetCatalog.register("d_" + d, lambda d=d: get_d_dicts(img_dir + "/" + d))
        MetadataCatalog.get("d_" + d).set(thing_classes=["pan"])

import wandb
wandb.login()

!python train_net.py --dist-url 'tcp://127.0.0.1:50163' \
    --num-gpus 1 \
    --config-file /content/drive/MyDrive/OneF/OneFormer/configs/pan/swin/oneformer_swin_large_bs16_100ep.yaml \
    OUTPUT_DIR /content/drive/MyDrive/OneF/Checkpoints/pan_swin_large WANDB.NAME pan_swin_large

I need help to setup my workspace with the correct dependencies, so I can train the model on my custom dataset in Colab.

Metadata

Metadata

Assignees

No one assigned

    Labels

    No labels
    No labels

    Type

    No type

    Projects

    No projects

    Milestone

    No milestone

    Relationships

    None yet

    Development

    No branches or pull requests

    Issue actions