Untitled
unknown
plain_text
2 years ago
1.3 kB
7
Indexable
# Extract the labels for the sample
labels = [shape['label'] for shape in annotation_df.loc[file_id]['shapes']]
# Extract the polygon points for segmentation mask
shape_points = [shape['points'] for shape in annotation_df.loc[file_id]['shapes']]
# Format polygon points for PIL
xy_coords = [[tuple(p) for p in points] for points in shape_points]
# Generate mask images from polygons
mask_imgs = [create_polygon_mask(sample_img.size, xy) for xy in xy_coords]
# Convert mask images to tensors
masks = torch.concat([Mask(transforms.PILToTensor()(mask_img), dtype=torch.bool) for mask_img in mask_imgs])
# Generate bounding box annotations from segmentation masks
bboxes = torchvision.ops.masks_to_boxes(masks)
# Annotate the sample image with segmentation masks
annotated_tensor = draw_segmentation_masks(
image=transforms.PILToTensor()(sample_img),
masks=masks,
alpha=0.3,
colors=[int_colors[i] for i in [class_names.index(label) for label in labels]]
)
# Annotate the sample image with labels and bounding boxes
annotated_tensor = draw_bboxes(
image=annotated_tensor,
boxes=bboxes,
labels=labels,
colors=[int_colors[i] for i in [class_names.index(label) for label in labels]]
)
tensor_to_pil(annotated_tensor)Editor is loading...
Leave a Comment