diff --git a/examples/mlperf/dataloader.py b/examples/mlperf/dataloader.py index 3f30437fb1..d991879afe 100644 --- a/examples/mlperf/dataloader.py +++ b/examples/mlperf/dataloader.py @@ -351,9 +351,9 @@ def batch_load_unet3d(preprocessed_dataset_dir:Path, batch_size:int=6, val:bool= ### RetinaNet def load_retinanet_data(base_dir:Path, val:bool, queue_in:Queue, queue_out:Queue, X:Tensor, - Y_boxes:Tensor, Y_labels:Tensor, matches:Tensor, anchors:np.ndarray, seed:Optional[int]=None): + Y_boxes:Tensor, Y_labels:Tensor, matches:Tensor, anchors:Tensor, seed:Optional[int]=None): from extra.datasets.openimages import image_load, prepare_target, random_horizontal_flip, resize - from examples.mlperf.helpers import box_iou, find_matches + from examples.mlperf.helpers import box_iou, find_matches, generate_anchors import torch while (data:=queue_in.get()) is not None: @@ -371,7 +371,7 @@ def load_retinanet_data(base_dir:Path, val:bool, queue_in:Queue, queue_out:Queue img, tgt = random_horizontal_flip(img, tgt) img, tgt, _ = resize(img, tgt=tgt) - match_quality_matrix = box_iou(tgt["boxes"], anchors) + match_quality_matrix = box_iou(tgt["boxes"], (anchor := np.concatenate(generate_anchors((800, 800))))) match_idxs = find_matches(match_quality_matrix, allow_low_quality_matches=True) clipped_match_idxs = np.clip(match_idxs, 0, None) boxes, labels = tgt["boxes"][clipped_match_idxs], tgt["labels"][clipped_match_idxs] @@ -379,13 +379,14 @@ def load_retinanet_data(base_dir:Path, val:bool, queue_in:Queue, queue_out:Queue Y_boxes[idx].contiguous().realize().lazydata.realized.as_buffer(force_zero_copy=True)[:] = boxes.tobytes() Y_labels[idx].contiguous().realize().lazydata.realized.as_buffer(force_zero_copy=True)[:] = labels.tobytes() matches[idx].contiguous().realize().lazydata.realized.as_buffer(force_zero_copy=True)[:] = match_idxs.tobytes() + anchors[idx].contiguous().realize().lazydata.realized.as_buffer(force_zero_copy=True)[:] = anchor.tobytes() X[idx].contiguous().realize().lazydata.realized.as_buffer(force_zero_copy=True)[:] = img.tobytes() queue_out.put(idx) queue_out.put(None) -def batch_load_retinanet(dataset, val:bool, anchors:np.ndarray, base_dir:Path, batch_size:int=32, shuffle:bool=True, seed:Optional[int]=None): +def batch_load_retinanet(dataset, val:bool, base_dir:Path, batch_size:int=32, shuffle:bool=True, seed:Optional[int]=None): def _enqueue_batch(bc): for idx in range(bc * batch_size, (bc+1) * batch_size): img = dataset.loadImgs(next(dataset_iter))[0] @@ -407,6 +408,7 @@ def batch_load_retinanet(dataset, val:bool, anchors:np.ndarray, base_dir:Path, b shm_y_boxes, Y_boxes = _setup_shared_mem("retinanet_y_boxes", (batch_size * batch_count, 120087, 4), dtypes.float32) shm_y_labels, Y_labels = _setup_shared_mem("retinanet_y_labels", (batch_size * batch_count, 120087), dtypes.int64) shm_matches, matches = _setup_shared_mem("retinanet_matches", (batch_size * batch_count, 120087), dtypes.int64) + shm_anchors, anchors = _setup_shared_mem("retinanet_anchors", (batch_size * batch_count, 120087, 4), dtypes.int64) shutdown = False class Cookie: @@ -449,6 +451,7 @@ def batch_load_retinanet(dataset, val:bool, anchors:np.ndarray, base_dir:Path, b Y_boxes[bc * batch_size:(bc + 1) * batch_size], Y_labels[bc * batch_size:(bc + 1) * batch_size], matches[bc * batch_size:(bc + 1) * batch_size], + anchors[bc * batch_size:(bc + 1) * batch_size], Cookie(bc)) finally: shutdown = True @@ -467,11 +470,13 @@ def batch_load_retinanet(dataset, val:bool, anchors:np.ndarray, base_dir:Path, b shm_y_boxes.close() shm_y_labels.close() shm_matches.close() + shm_anchors.close() try: shm_x.unlink() shm_y_boxes.unlink() shm_y_labels.unlink() shm_matches.unlink() + shm_anchors.unlink() except FileNotFoundError: # happens with BENCHMARK set pass diff --git a/examples/mlperf/model_train.py b/examples/mlperf/model_train.py index e830377437..7c3c37e6df 100644 --- a/examples/mlperf/model_train.py +++ b/examples/mlperf/model_train.py @@ -345,7 +345,6 @@ def train_resnet(): def train_retinanet(): from examples.mlperf.dataloader import batch_load_retinanet - from examples.mlperf.helpers import generate_anchors from examples.mlperf.initializers import FrozenBatchNorm2d from extra.datasets.openimages import MLPERF_CLASSES, BASEDIR, download_dataset, normalize from extra.models.retinanet import RetinaNet @@ -373,8 +372,8 @@ def train_retinanet(): layer.requires_grad = False def _data_get(it): - x, y_bboxes, y_labels, matches, cookie = next(it) - return x.shard(GPUS, axis=0).realize(), y_bboxes.shard(GPUS, axis=0), y_labels.shard(GPUS, axis=0), matches.shard(GPUS, axis=0), cookie + x, y_bboxes, y_labels, matches, anchors, cookie = next(it) + return x.shard(GPUS, axis=0).realize(), y_bboxes.shard(GPUS, axis=0), y_labels.shard(GPUS, axis=0), matches.shard(GPUS, axis=0), anchors.shard(GPUS, axis=0), cookie def _create_lr_scheduler(optim, start_iter, warmup_iters, warmup_factor): # TODO: refactor this a bit more so we don't have to recreate it, unlike what MLPerf script is doing @@ -435,9 +434,6 @@ def train_retinanet(): optim = Adam(params, lr=lr) # ** dataset ** - anchors = generate_anchors((800, 800), batch_size=bs) - batched_anchors = Tensor.stack(*[Tensor(a, requires_grad=False) for a in anchors]).shard(GPUS, axis=0) - train_dataset = COCO(download_dataset(BASE_DIR, "train")) val_dataset = COCO(download_dataset(BASE_DIR, "validation")) @@ -445,7 +441,7 @@ def train_retinanet(): # ** training loop ** for e in range(1, num_epochs + 1): - train_dataloader = batch_load_retinanet(train_dataset, False, anchors[0], Path(BASE_DIR), batch_size=bs, seed=seed) + train_dataloader = batch_load_retinanet(train_dataset, False, Path(BASE_DIR), batch_size=bs, seed=seed) it = iter(tqdm(train_dataloader, total=steps_in_train_epoch, desc=f"epoch {e}")) i, proc = 0, _data_get(it) @@ -461,8 +457,8 @@ def train_retinanet(): while proc is not None: GlobalCounters.reset() - x, y_bboxes, y_labels, matches, proc = proc - loss, losses = _train_step(model, optim, lr_scheduler, x, labels=y_labels, matches=matches, anchors=batched_anchors, bboxes=y_bboxes) + x, y_bboxes, y_labels, matches, anchors, proc = proc + loss, losses = _train_step(model, optim, lr_scheduler, x, labels=y_labels, matches=matches, anchors=anchors, bboxes=y_bboxes) pt = time.perf_counter() diff --git a/test/external/external_test_datasets.py b/test/external/external_test_datasets.py index d9dc9691fe..c774b236f9 100644 --- a/test/external/external_test_datasets.py +++ b/test/external/external_test_datasets.py @@ -128,9 +128,9 @@ class TestOpenImagesDataset(ExternalTestDatasets): dataset = get_openimages(ann_file.stem, base_dir, subset, transforms) return iter(dataset) - def _create_tinygrad_dataloader(self, base_dir, ann_file, subset, anchors, batch_size=1, seed=42): + def _create_tinygrad_dataloader(self, base_dir, ann_file, subset, batch_size=1, seed=42): dataset = COCO(ann_file) - dataloader = batch_load_retinanet(dataset, subset == "validation", anchors, base_dir, batch_size=batch_size, shuffle=False, seed=seed) + dataloader = batch_load_retinanet(dataset, subset == "validation", base_dir, batch_size=batch_size, shuffle=False, seed=seed) return iter(dataloader) @classmethod @@ -140,11 +140,11 @@ class TestOpenImagesDataset(ExternalTestDatasets): def test_training_set(self): img_size, img_mean, img_std, anchors = (800, 800), [0.0, 0.0, 0.0], [1.0, 1.0, 1.0], torch.ones((120087, 4)) - tinygrad_dataloader = self._create_tinygrad_dataloader(self.base_dir, self.train_ann_file, subset := "train", anchors.numpy()) + tinygrad_dataloader = self._create_tinygrad_dataloader(self.base_dir, self.train_ann_file, subset := "train") ref_dataloader = self._create_ref_dataloader(self.base_dir, self.train_ann_file, subset) transform = GeneralizedRCNNTransform(img_size, img_mean, img_std) - for ((tinygrad_img, tinygrad_boxes, tinygrad_labels, _, _), (ref_img, ref_tgt)) in zip(tinygrad_dataloader, ref_dataloader): + for ((tinygrad_img, tinygrad_boxes, tinygrad_labels, _, _, _), (ref_img, ref_tgt)) in zip(tinygrad_dataloader, ref_dataloader): ref_tgt = [ref_tgt] ref_img, ref_tgt = transform(ref_img.unsqueeze(0), ref_tgt) @@ -156,8 +156,8 @@ class TestOpenImagesDataset(ExternalTestDatasets): np.testing.assert_equal(tinygrad_labels[0].numpy(), ref_labels.numpy()) def test_validation_set(self): - img_size, img_mean, img_std, anchors = (800, 800), [0.0, 0.0, 0.0], [1.0, 1.0, 1.0], torch.ones((120087, 4)) - tinygrad_dataloader = self._create_tinygrad_dataloader(self.base_dir, self.val_ann_file, "validation", anchors.numpy()) + img_size, img_mean, img_std = (800, 800), [0.0, 0.0, 0.0], [1.0, 1.0, 1.0] + tinygrad_dataloader = self._create_tinygrad_dataloader(self.base_dir, self.val_ann_file, "validation") ref_dataloader = self._create_ref_dataloader(self.base_dir, self.val_ann_file, "val") transform = GeneralizedRCNNTransform(img_size, img_mean, img_std) diff --git a/test/external/mlperf_retinanet/openimages.py b/test/external/mlperf_retinanet/openimages.py index 35a42c419a..2ac48a09f8 100644 --- a/test/external/mlperf_retinanet/openimages.py +++ b/test/external/mlperf_retinanet/openimages.py @@ -110,7 +110,7 @@ def postprocess_targets(targets, anchors): match_quality_matrix = box_iou(targets_per_image['boxes'], anchors_per_image) matched_idxs.append(proposal_matcher(match_quality_matrix)) - for targets_per_image, matched_idxs_per_image in zip(targets, matched_idxs): + for targets_per_image, matched_idxs_per_image in zip(targets, matched_idxs): foreground_idxs_per_image = matched_idxs_per_image >= 0 targets_per_image["boxes"] = targets_per_image["boxes"][matched_idxs_per_image[foreground_idxs_per_image]] targets_per_image["labels"] = targets_per_image["labels"][matched_idxs_per_image[foreground_idxs_per_image]]