|
| 1 | +""" |
| 2 | +COCO dataset which returns image_id for evaluation. |
| 3 | +
|
| 4 | +Mostly copy-paste from https://github.com/pytorch/vision/blob/13b35ff/references/detection/coco_utils.py |
| 5 | +""" |
| 6 | +from pathlib import Path |
| 7 | + |
| 8 | +import torch |
| 9 | +import torch.utils.data |
| 10 | +from pycocotools import mask as coco_mask |
| 11 | + |
| 12 | +from .torchvision_datasets import CocoDetection as TvCocoDetection |
| 13 | +from util.misc import get_local_rank, get_local_size |
| 14 | +import datasets.transforms as T |
| 15 | +import random |
| 16 | + |
| 17 | + |
| 18 | +class CocoDetection(TvCocoDetection): |
| 19 | + def __init__(self, img_folder, ann_file, transforms, return_masks, cache_mode=False, local_rank=0, local_size=1): |
| 20 | + super(CocoDetection, self).__init__(img_folder, ann_file, |
| 21 | + cache_mode=cache_mode, local_rank=local_rank, local_size=local_size) |
| 22 | + self._transforms = transforms |
| 23 | + self.prepare = ConvertCocoPolysToMask(return_masks) |
| 24 | + |
| 25 | + def __getitem__(self, idx): |
| 26 | + |
| 27 | + instance_check = False |
| 28 | + while not instance_check: |
| 29 | + img, target = super(CocoDetection, self).__getitem__(idx) |
| 30 | + image_id = self.ids[idx] |
| 31 | + target = {'image_id': image_id, 'annotations': target} |
| 32 | + img, target = self.prepare(img, target) |
| 33 | + if self._transforms is not None: |
| 34 | + img, target = self._transforms(img, target) |
| 35 | + |
| 36 | + if len(target['labels']) == 0: # None instance |
| 37 | + idx = random.randint(0,self.__len__()-1) |
| 38 | + else: |
| 39 | + instance_check=True |
| 40 | + |
| 41 | + return img, target |
| 42 | + |
| 43 | + |
| 44 | +def convert_coco_poly_to_mask(segmentations, height, width): |
| 45 | + masks = [] |
| 46 | + for polygons in segmentations: |
| 47 | + rles = coco_mask.frPyObjects(polygons, height, width) |
| 48 | + mask = coco_mask.decode(rles) |
| 49 | + if len(mask.shape) < 3: |
| 50 | + mask = mask[..., None] |
| 51 | + mask = torch.as_tensor(mask, dtype=torch.uint8) |
| 52 | + mask = mask.any(dim=2) |
| 53 | + masks.append(mask) |
| 54 | + if masks: |
| 55 | + masks = torch.stack(masks, dim=0) |
| 56 | + else: |
| 57 | + masks = torch.zeros((0, height, width), dtype=torch.uint8) |
| 58 | + return masks |
| 59 | + |
| 60 | + |
| 61 | +class ConvertCocoPolysToMask(object): |
| 62 | + def __init__(self, return_masks=False): |
| 63 | + self.return_masks = return_masks |
| 64 | + |
| 65 | + def __call__(self, image, target): |
| 66 | + w, h = image.size |
| 67 | + |
| 68 | + image_id = target["image_id"] |
| 69 | + image_id = torch.tensor([image_id]) |
| 70 | + |
| 71 | + anno = target["annotations"] |
| 72 | + |
| 73 | + anno = [obj for obj in anno if 'iscrowd' not in obj or obj['iscrowd'] == 0] |
| 74 | + |
| 75 | + boxes = [obj["bbox"] for obj in anno] |
| 76 | + # guard against no boxes via resizing |
| 77 | + boxes = torch.as_tensor(boxes, dtype=torch.float32).reshape(-1, 4) |
| 78 | + boxes[:, 2:] += boxes[:, :2] |
| 79 | + boxes[:, 0::2].clamp_(min=0, max=w) |
| 80 | + boxes[:, 1::2].clamp_(min=0, max=h) |
| 81 | + |
| 82 | + classes = [obj["category_id"] for obj in anno] |
| 83 | + classes = torch.tensor(classes, dtype=torch.int64) |
| 84 | + |
| 85 | + if self.return_masks: |
| 86 | + segmentations = [obj["segmentation_refined"] for obj in anno] |
| 87 | + masks = convert_coco_poly_to_mask(segmentations, h, w) |
| 88 | + |
| 89 | + keypoints = None |
| 90 | + if anno and "keypoints" in anno[0]: |
| 91 | + keypoints = [obj["keypoints"] for obj in anno] |
| 92 | + keypoints = torch.as_tensor(keypoints, dtype=torch.float32) |
| 93 | + num_keypoints = keypoints.shape[0] |
| 94 | + if num_keypoints: |
| 95 | + keypoints = keypoints.view(num_keypoints, -1, 3) |
| 96 | + |
| 97 | + keep = (boxes[:, 3] > boxes[:, 1]) & (boxes[:, 2] > boxes[:, 0]) |
| 98 | + boxes = boxes[keep] |
| 99 | + classes = classes[keep] |
| 100 | + if self.return_masks: |
| 101 | + masks = masks[keep] |
| 102 | + if keypoints is not None: |
| 103 | + keypoints = keypoints[keep] |
| 104 | + |
| 105 | + target = {} |
| 106 | + target["boxes"] = boxes |
| 107 | + target["labels"] = classes |
| 108 | + if self.return_masks: |
| 109 | + target["masks"] = masks |
| 110 | + target["image_id"] = image_id |
| 111 | + if keypoints is not None: |
| 112 | + target["keypoints"] = keypoints |
| 113 | + |
| 114 | + # for conversion to coco api |
| 115 | + area = torch.tensor([obj["area"] for obj in anno]) |
| 116 | + iscrowd = torch.tensor([obj["iscrowd"] if "iscrowd" in obj else 0 for obj in anno]) |
| 117 | + target["area"] = area[keep] |
| 118 | + target["iscrowd"] = iscrowd[keep] |
| 119 | + |
| 120 | + target["orig_size"] = torch.as_tensor([int(h), int(w)]) |
| 121 | + target["size"] = torch.as_tensor([int(h), int(w)]) |
| 122 | + |
| 123 | + return image, target |
| 124 | + |
| 125 | + |
| 126 | +def make_coco_transforms(image_set): |
| 127 | + |
| 128 | + normalize = T.Compose([ |
| 129 | + T.ToTensor(), |
| 130 | + T.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) |
| 131 | + ]) |
| 132 | + |
| 133 | + scales = [480, 512, 544, 576, 608, 640, 672, 704, 736, 768] |
| 134 | + # scales = [296, 328, 360, 392] |
| 135 | + |
| 136 | + if image_set == 'train': |
| 137 | + return T.Compose([ |
| 138 | + T.RandomHorizontalFlip(), |
| 139 | + T.RandomSelect( |
| 140 | + T.RandomResize(scales, max_size=1333), |
| 141 | + T.Compose([ |
| 142 | + T.RandomResize([400, 500, 600]), |
| 143 | + T.RandomSizeCrop(384, 600), |
| 144 | + T.RandomResize(scales, max_size=1333), |
| 145 | + ]) |
| 146 | + ), |
| 147 | + normalize, |
| 148 | + ]) |
| 149 | + |
| 150 | + if image_set == 'val': |
| 151 | + return T.Compose([ |
| 152 | + T.RandomResize([800], max_size=1333), |
| 153 | + normalize, |
| 154 | + ]) |
| 155 | + |
| 156 | + raise ValueError(f'unknown {image_set}') |
| 157 | + |
| 158 | + |
| 159 | +def build(image_set, args): |
| 160 | + root = Path(args.coco_path) |
| 161 | + assert root.exists(), f'provided COCO path {root} does not exist' |
| 162 | + mode = 'instances' |
| 163 | + dataset_type = args.dataset_type |
| 164 | + if args.dataset_file == 'coco': |
| 165 | + PATHS = { |
| 166 | + "train": (root / "train2017", root / "annotations" / f'{mode}_train2017.json'), |
| 167 | + "val": (root / "val2017", root / "annotations" / f'{mode}_val2017.json'), |
| 168 | + } |
| 169 | + |
| 170 | + |
| 171 | + img_folder, ann_file = PATHS[image_set] |
| 172 | + dataset = CocoDetection(img_folder, ann_file, transforms=make_coco_transforms(image_set), return_masks=args.masks, |
| 173 | + cache_mode=args.cache_mode, local_rank=get_local_rank(), local_size=get_local_size()) |
| 174 | + return dataset |
| 175 | + |
| 176 | + |
0 commit comments