Image Segmentation Tutorial#
Run on Google Colab | View source on GitHub | Download notebook |
Connect to EvaDB#
%pip install --quiet "evadb[vision,notebook]"
import evadb
cursor = evadb.connect().cursor()
[notice] A new release of pip is available: 23.0.1 -> 23.1.2
[notice] To update, run: pip install --upgrade pip
Note: you may need to restart the kernel to use updated packages.
Download the Videos#
# # Getting the video files
!wget -nc "https://www.dropbox.com/s/k00wge9exwkfxz6/ua_detrac.mp4?raw=1" -O ua_detrac.mp4
File ‘ua_detrac.mp4’ already there; not retrieving.
Load sample video from DAVIS dataset for analysis#
cursor.query('DROP TABLE IF EXISTS VideoForSegmentation;').df()
cursor.query('LOAD VIDEO "ua_detrac.mp4" INTO VideoForSegmentation;').df()
06-08-2023 00:30:18 WARNING[drop_object_executor:drop_object_executor.py:_handle_drop_table:0050] Table: VideoForSegmentation does not exist
0 | |
---|---|
0 | Number of loaded VIDEO: 1 |
Register Hugging Face Segmentation Model as an User-Defined Function (UDF) in EvaDB#
### Using HuggingFace with EvaDB requires specifying the task
### The task here is 'image-segmentation'
### The model is 'facebook/detr-resnet-50-panoptic'
cursor.query("""CREATE UDF IF NOT EXISTS HFSegmentation
TYPE HuggingFace
'task' 'image-segmentation'
'model' 'facebook/detr-resnet-50-panoptic';
""").df()
Could not find image processor class in the image processor config or the model config. Loading based on pattern matching with the model's feature extractor configuration.
The `max_size` parameter is deprecated and will be removed in v4.26. Please specify in `size['longest_edge'] instead`.
`label_ids_to_fuse` unset. No instance will be fused.
0 | |
---|---|
0 | UDF HFSegmentation already exists, nothing added. |
Run Image Segmentation on the video#
response = cursor.query("""SELECT HFSegmentation(data)
FROM VideoForSegmentation SAMPLE 5
WHERE id < 20;""").df()
2023-06-08 01:29:43,430 INFO worker.py:1625 -- Started a local Ray instance.
(ray_parallel pid=1193859) Could not find image processor class in the image processor config or the model config. Loading based on pattern matching with the model's feature extractor configuration.
(ray_parallel pid=1193859) The `max_size` parameter is deprecated and will be removed in v4.26. Please specify in `size['longest_edge'] instead`.
(ray_parallel pid=1193859) Could not find image processor class in the image processor config or the model config. Loading based on pattern matching with the model's feature extractor configuration.
(ray_parallel pid=1193859) `label_ids_to_fuse` unset. No instance will be fused.
Visualizing output of the Image Segmenter on the video#
import numpy as np
from PIL import Image
import matplotlib.patches as mpatches
import matplotlib.pyplot as plt
import cv2
def get_color_mapping(all_labels):
unique_labels = set(label for labels in all_labels for label in labels)
num_colors = len(unique_labels)
colormap = plt.colormaps["tab20"]
colors = [colormap(i % 20)[:3] for i in range(num_colors)]
colors = [tuple(int(c * 255) for c in color) for color in colors]
color_mapping = {label: color for label, color in zip(unique_labels, colors)}
return color_mapping
def annotate_single_frame(frame, segments, labels, color_mapping):
overlay = np.zeros_like(frame)
# Overlay segments
for mask, label in zip(segments, labels):
mask_np = np.array(mask).astype(bool)
overlay[mask_np] = color_mapping[label]
# Combine original frame with overlay
new_frame = Image.blend(
Image.fromarray(frame.astype(np.uint8)),
Image.fromarray(overlay.astype(np.uint8)),
alpha=0.5,
)
return new_frame
def annotate_video(segmentations, input_video_path, output_video_path, model_name = 'hfsegmentation'):
all_segments = segmentations[f'{model_name}.mask']
all_labels = segmentations[f'{model_name}.label']
color_mapping = get_color_mapping(all_labels)
vcap = cv2.VideoCapture(input_video_path)
width = int(vcap.get(3))
height = int(vcap.get(4))
fps = vcap.get(5)
fourcc = cv2.VideoWriter_fourcc('m', 'p', '4', 'v') #codec
video=cv2.VideoWriter(output_video_path, fourcc, fps, (width,height))
frame_id = 0
ret, frame = vcap.read()
while ret and frame_id < len(all_segments):
segments = all_segments[frame_id]
labels = all_labels[frame_id]
new_frame = annotate_single_frame(frame, segments, labels, color_mapping)
video.write(np.array(new_frame))
if frame_id % 5 == 0:
legend_patches = [mpatches.Patch(color=np.array(color_mapping[label])/255, label=label) for label in set(labels)]
plt.imshow(new_frame)
plt.legend(handles=legend_patches, bbox_to_anchor=(1.05, 1), loc='upper left', borderaxespad=0.)
plt.axis('off')
plt.tight_layout()
plt.show()
frame_id += 1
ret, frame = vcap.read()
video.release()
vcap.release()
from ipywidgets import Video
input_path = 'ua_detrac.mp4'
output_path = 'video.mp4'
annotate_video(response, input_path, output_path)
Video.from_file(output_path)