Instance segmentation1. Enable optimal performance of the motherboard1.1. Enable MAX power mode1.2. Enable Jetson Clocks2. Instance segmentation: imageEffect preview3. Instance segmentation: videoEffect preview4. Instance segmentation: real-time detection4.1. USB cameraEffect preview4.2, CSI cameraEffect previewReferences
Using Python to demonstrate Ultralytics: Instance Segmentation in images, videos, and real-time detection.
Enabling MAX Power Mode on Jetson will ensure that all CPU and GPU cores are turned on:
sudo nvpmodel -m 0
Enabling Jetson Clocks will ensure that all CPU and GPU cores run at maximum frequency:
xxxxxxxxxx
sudo jetson_clocks
Use yolo11n-seg.pt to predict the images provided by the ultralytics project.
Enter the code folder:
xxxxxxxxxx
cd /home/jetson/ultralytics/ultralytics/yahboom_demo
Run the code:
xxxxxxxxxx
python3 02.segmentation_image.py
Yolo recognizes the output image location:/home/jetson/ultralytics/ultralytics/output/
Sample Code:
xfrom ultralytics import YOLO
# Load a model
model = YOLO("/home/jetson/ultralytics/ultralytics/yolo11n-seg.pt")
# Run batched inference on a list of images
results = model("/home/jetson/ultralytics/ultralytics/assets/zidane.jpg") # return a list of Results objects
# Process results list
for result in results:
# boxes = result.boxes # Boxes object for bounding box outputs
masks = result.masks # Masks object for segmentation masks outputs
# keypoints = result.keypoints # Keypoints object for pose outputs
# probs = result.probs # Probs object for classification outputs
# obb = result.obb # Oriented boxes object for OBB outputs
result.show() # display to screen
result.save(filename="/home/jetson/ultralytics/ultralytics/output/zidane_output.jpg") # save to disk
Use yolo11n-seg.pt to predict videos under the ultralytics project (not the videos that come with ultralytics).
Enter the code folder:
xxxxxxxxxx
cd /home/jetson/ultralytics/ultralytics/yahboom_demo
Run the code:
xxxxxxxxxx
python3 02.segmentation_video.py
Yolo identifies the output video location:/home/jetson/ultralytics/ultralytics/output/
Sample Code:
xxxxxxxxxx
import cv2
from ultralytics import YOLO
# Load the YOLO model
model = YOLO("/home/jetson/ultralytics/ultralytics/yolo11n-seg.pt")
# Open the video file
video_path = "/home/jetson/ultralytics/ultralytics/videos/people_animals.mp4"
cap = cv2.VideoCapture(video_path)
# Get the video frame size and frame rate
frame_width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
frame_height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
fps = int(cap.get(cv2.CAP_PROP_FPS))
# Define the codec and create a VideoWriter object to output the processed video
output_path = "/home/jetson/ultralytics/ultralytics/output/02.people_animals_output.mp4"
fourcc = cv2.VideoWriter_fourcc(*'mp4v') # You can use 'XVID' or 'mp4v' depending on your platform
out = cv2.VideoWriter(output_path, fourcc, fps, (frame_width, frame_height))
# Loop through the video frames
while cap.isOpened():
# Read a frame from the video
success, frame = cap.read()
if success:
# Run YOLO inference on the frame
results = model(frame)
# Visualize the results on the frame
annotated_frame = results[0].plot()
# Write the annotated frame to the output video file
out.write(annotated_frame)
# Display the annotated frame
cv2.imshow("YOLO Inference", cv2.resize(annotated_frame, (640, 480)))
# Break the loop if 'q' is pressed
if cv2.waitKey(1) & 0xFF == ord("q"):
break
else:
# Break the loop if the end of the video is reached
break
# Release the video capture and writer objects, and close the display window
cap.release()
out.release()
cv2.destroyAllWindows()
Use yolo11n-seg.pt to predict the USB camera image.
Enter the code folder:
xxxxxxxxxx
cd /home/jetson/ultralytics/ultralytics/yahboom_demo
Run the code: Click the preview screen and press the q key to terminate the program!
xxxxxxxxxx
python3 02.segmentation_camera_usb.py
Yolo identifies the output video location:/home/jetson/ultralytics/ultralytics/output/
Sample code:
xxxxxxxxxx
import cv2
from ultralytics import YOLO
# Load the YOLO model
model = YOLO("/home/jetson/ultralytics/ultralytics/yolo11n-seg.pt")
# Open the cammera
cap = cv2.VideoCapture(0)
# Get the video frame size and frame rate
frame_width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
frame_height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
fps = int(cap.get(cv2.CAP_PROP_FPS))
# Define the codec and create a VideoWriter object to output the processed video
output_path = "/home/jetson/ultralytics/ultralytics/output/02.segmentation_camera_usb.mp4"
fourcc = cv2.VideoWriter_fourcc(*'mp4v') # You can use 'XVID' or 'mp4v' depending on your platform
out = cv2.VideoWriter(output_path, fourcc, fps, (frame_width, frame_height))
# Loop through the video frames
while cap.isOpened():
# Read a frame from the video
success, frame = cap.read()
if success:
# Run YOLO inference on the frame
results = model(frame)
# Visualize the results on the frame
annotated_frame = results[0].plot()
# Write the annotated frame to the output video file
out.write(annotated_frame)
# Display the annotated frame
cv2.imshow("YOLO Inference", cv2.resize(annotated_frame, (640, 480)))
# Break the loop if 'q' is pressed
if cv2.waitKey(1) & 0xFF == ord("q"):
break
else:
# Break the loop if the end of the video is reached
break
# Release the video capture and writer objects, and close the display window
cap.release()
out.release()
cv2.destroyAllWindows()
Use yolo11n-seg.pt to predict the CSI camera image.
Enter the code folder:
xxxxxxxxxx
cd /home/jetson/ultralytics/ultralytics/yahboom_demo
Run the code: Click the preview screen and press the q key to terminate the program!
xxxxxxxxxx
python3 02.segmentation_camera_csi.py
Yolo identifies the output video location:/home/jetson/ultralytics/ultralytics/output/
Sample code:
xxxxxxxxxx
import cv2
from ultralytics import YOLO
from jetcam.csi_camera import CSICamera
# Load the YOLO model
model = YOLO("/home/jetson/ultralytics/ultralytics/yolo11n-seg.pt")
# Open the camera (CSI Camera)
cap = CSICamera(capture_device=0, width=640, height=480)
# Get the video frame size and frame rate
frame_width = 640
frame_height = 480
fps = 30
# Define the codec and create a VideoWriter object to output the processed video
output_path = "/home/jetson/ultralytics/ultralytics/output/02.segmentation_camera_csi.mp4"
fourcc = cv2.VideoWriter_fourcc(*'mp4v') # You can use 'XVID' or 'mp4v' depending on your platform
out = cv2.VideoWriter(output_path, fourcc, fps, (frame_width, frame_height))
# Loop through the video frames
while True:
# Read a frame from the camera
frame = cap.read()
if frame is not None:
# Run YOLO inference on the frame
results = model(frame)
# Visualize the results on the frame
annotated_frame = results[0].plot()
# Write the annotated frame to the output video file
out.write(annotated_frame)
# Display the annotated frame
cv2.imshow("YOLO Inference", cv2.resize(annotated_frame, (640, 480)))
# Break the loop if 'q' is pressed
if cv2.waitKey(1) & 0xFF == ord("q"):
break
else:
# Break the loop if no frame is received (camera error or end of stream)
print("No frame received, breaking the loop.")
break
# Release the video capture and writer objects, and close the display window
cap.release()
out.release()
cv2.destroyAllWindows()