Instance Segmentation1. Model Introduction2. Instance segmentation: imageEffect preview3. Instance segmentation: videoEffect preview4. Instance segmentation: real-time detection4.1. USB cameraEffect preview4.2, CSI cameraEffect previewReferences
Use Python to demonstrate the effects of Ultralytics: Instance Segmentation in images, videos, and real-time detection.
Instance segmentation goes a step further than object detection. It involves identifying individual objects in an image and separating them from the rest of the image.
The output of the instance segmentation model is a set of masks or outlines that outline each object in the image, as well as the class label and confidence score for each object. Instance segmentation is very useful when you need to know not only the location of objects in the image, but also their specific shape.
Use yolo11n-seg.pt to predict the pictures that come with the ultralytics project.
Enter the code folder:
xxxxxxxxxx
cd /home/pi/ultralytics/ultralytics/yahboom_demo
Run the code:
xxxxxxxxxx
python3 02.segmentation_image.py
Yolo recognition output image location: /home/pi/ultralytics/ultralytics/output/
Sample code:
xxxxxxxxxx
from ultralytics import YOLO
# Load a model
model = YOLO("/home/pi/ultralytics/ultralytics/yolo11n-seg.pt")
# Run batched inference on a list of images
results = model("/home/pi/ultralytics/ultralytics/assets/zidane.jpg") # return a list of Results objects
# Process results list
for result in results:
# boxes = result.boxes # Boxes object for bounding box outputs
masks = result.masks # Masks object for segmentation masks outputs
# keypoints = result.keypoints # Keypoints object for pose outputs
# probs = result.probs # Probs object for classification outputs
# obb = result.obb # Oriented boxes object for OBB outputs
result.show() # display to screen
result.save(filename="/home/pi/ultralytics/ultralytics/output/zidane_output.jpg") # save to disk
Use yolo11n-seg.pt to predict videos under the ultralytics project (not the videos that come with ultralytics).
Enter the code folder:
xxxxxxxxxx
cd /home/pi/ultralytics/ultralytics/yahboom_demo
Run the code:
xxxxxxxxxx
python3 02.segmentation_video.py
Video location of yolo recognition output: /home/pi/ultralytics/ultralytics/output/
Sample code:
xxxxxxxxxx
import cv2
from ultralytics import YOLO
# Load the YOLO model
model = YOLO("/home/pi/ultralytics/ultralytics/yolo11n-seg.pt")
# Open the video file
video_path = "/home/pi/ultralytics/ultralytics/videos/people_animals.mp4"
cap = cv2.VideoCapture(video_path)
# Get the video frame size and frame rate
frame_width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
frame_height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
fps = int(cap.get(cv2.CAP_PROP_FPS))
# Define the codec and create a VideoWriter object to output the processed video
output_path = "/home/pi/ultralytics/ultralytics/output/02.people_animals_output.mp4"
fourcc = cv2.VideoWriter_fourcc(*'mp4v') # You can use 'XVID' or 'mp4v' depending on your platform
out = cv2.VideoWriter(output_path, fourcc, fps, (frame_width, frame_height))
# Loop through the video frames
while cap.isOpened():
# Read a frame from the video
success, frame = cap.read()
if success:
# Run YOLO inference on the frame
results = model(frame)
# Visualize the results on the frame
annotated_frame = results[0].plot()
# Write the annotated frame to the output video file
out.write(annotated_frame)
# Display the annotated frame
cv2.imshow("YOLO Inference", cv2.resize(annotated_frame, (640, 480)))
# Break the loop if 'q' is pressed
if cv2.waitKey(1) & 0xFF == ord("q"):
break
else:
# Break the loop if the end of the video is reached
break
# Release the video capture and writer objects, and close the display window
cap.release()
out.release()
cv2.destroyAllWindows()
Use yolo11n-seg.pt to predict the USB camera screen.
Enter the code folder:
xxxxxxxxxx
cd /home/pi/ultralytics/ultralytics/yahboom_demo
Run the code: Click the preview screen and press the q key to terminate the program!
xxxxxxxxxx
python3 02.segmentation_camera_usb.py
Yolo recognizes the output video location: /home/pi/ultralytics/ultralytics/output/
Sample code:
xxxxxxxxxx
import cv2
from ultralytics import YOLO
# Load the YOLO model
model = YOLO("/home/pi/ultralytics/ultralytics/yolo11n-seg.pt")
# Open the cammera
cap = cv2.VideoCapture(0)
# Get the video frame size and frame rate
frame_width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
frame_height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
fps = int(cap.get(cv2.CAP_PROP_FPS))
# Define the codec and create a VideoWriter object to output the processed video
output_path = "/home/pi/ultralytics/ultralytics/output/02.segmentation_camera_usb.mp4"
fourcc = cv2.VideoWriter_fourcc(*'mp4v') # You can use 'XVID' or 'mp4v' depending on your platform
out = cv2.VideoWriter(output_path, fourcc, fps, (frame_width, frame_height))
# Loop through the video frames
while cap.isOpened():
# Read a frame from the video
success, frame = cap.read()
if success:
# Run YOLO inference on the frame
results = model(frame)
# Visualize the results on the frame
annotated_frame = results[0].plot()
# Write the annotated frame to the output video file
out.write(annotated_frame)
# Display the annotated frame
cv2.imshow("YOLO Inference", cv2.resize(annotated_frame, (640, 480)))
# Break the loop if 'q' is pressed
if cv2.waitKey(1) & 0xFF == ord("q"):
break
else:
# Break the loop if the end of the video is reached
break
# Release the video capture and writer objects, and close the display window
cap.release()
out.release()
cv2.destroyAllWindows()
Use yolo11n-seg.pt to predict the CSI camera image.
Enter the code folder:
xxxxxxxxxx
cd /home/pi/ultralytics/ultralytics/yahboom_demo
Run the code: Click the preview image, press the q key to terminate the program!
xxxxxxxxxx
python3 02.segmentation_camera_csi.py
Yolo recognizes the output video location: /home/pi/ultralytics/ultralytics/output/
Sample code:
xxxxxxxxxx
import cv2
from picamera2 import Picamera2
from ultralytics import YOLO
# Initialize the Picamera2
picam2 = Picamera2()
picam2.preview_configuration.main.size = (640, 480)
picam2.preview_configuration.main.format = "RGB888"
picam2.preview_configuration.align()
picam2.configure("preview")
picam2.start()
# Load the YOLO11 model
model = YOLO("/home/pi/ultralytics/ultralytics/yolo11n-seg.pt")
# Set up video output
output_path = "/home/pi/ultralytics/ultralytics/output/02.segmentation_camera_csi.mp4"
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
out = cv2.VideoWriter(output_path, fourcc, 30, (640, 480))
# Loop through the video frames
while True:
# Capture frame-by-frame
frame = picam2.capture_array()
# Run YOLO11 inference on the frame
results = model(frame)
# Visualize the results on the frame
annotated_frame = results[0].plot()
# Write the frame to the video file
out.write(annotated_frame)
# Display the resulting frame
cv2.imshow("Camera", annotated_frame)
# Break the loop if 'q' is pressed
if cv2.waitKey(1) == ord("q"):
break
# Release resources and close windows
picam2.close()
out.release()
cv2.destroyAllWindows()