M2cai16-tool-locations
def __getitem__(self, idx): img_path, ann = self.samples[idx] image = Image.open(img_path).convert('RGB') # Parse annotations: list of [x1, y1, x2, y2, class_id] boxes = [] labels = [] for obj in ann.get('objects', []): x1, y1, x2, y2 = obj['bbox'] # absolute pixel coords label = self.CLASSES.index(obj['class_name']) boxes.append([x1, y1, x2, y2]) labels.append(label) boxes = torch.as_tensor(boxes, dtype=torch.float32) labels = torch.as_tensor(labels, dtype=torch.int64) image_id = torch.tensor([idx]) area = (boxes[:, 3] - boxes[:, 1]) * (boxes[:, 2] - boxes[:, 0]) iscrowd = torch.zeros((len(boxes),), dtype=torch.int64) target = { 'boxes': boxes, 'labels': labels, 'image_id': image_id, 'area': area, 'iscrowd': iscrowd } if self.transform: image, target = self.transform(image, target) return image, target Use matplotlib and torchvision.utils.draw_bounding_boxes :
def __len__(self): return len(self.samples) m2cai16-tool-locations
import matplotlib.pyplot as plt from torchvision.utils import draw_bounding_boxes from torchvision.transforms import ToTensor def show_annotations(dataset, idx=0): img, target = dataset[idx] if isinstance(img, torch.Tensor): img = (img * 255).byte() if img.max() <= 1 else img else: img = ToTensor()(img).byte() def __getitem__(self, idx): img_path, ann = self
import json import os from PIL import Image import torch from torch.utils.data import Dataset from torchvision.ops import box_convert class M2CAI16ToolLocations(Dataset): """Dataset for m2cai16-tool-locations bounding box annotations.""" y2]) labels.append(label) boxes = torch.as_tensor(boxes
# Draw boxes img_with_boxes = draw_bounding_boxes(img, boxes, labels=[class_names[l] for l in labels], colors='red', width=2) plt.figure(figsize=(10, 8)) plt.imshow(img_with_boxes.permute(1,2,0)) plt.axis('off') plt.title(f"Frame {idx} — {len(boxes)} tools detected") plt.show() dataset = M2CAI16ToolLocations('./m2cai16-tool-locations') show_annotations(dataset, idx=0) 4. Useful Preprocessing for Training Convert to COCO format (for Detectron2, MMDetection, etc.):