CLI currently only supports calling USB cameras. CSI camera users can directly modify the previous python code to call onnx and ncnn models!
xxxxxxxxxx
cd ultralytics/ultralytics/data/yahboom_data/orange_data/runs/detect/train/weights
yolo predict model=best.onnx source=0 save=False show # For multiple cameras, follow the number after the ring source
yolo predict model='best_ncnn_model' source=0 save=False show
Use Python to call USB camera and CSI camera to identify oranges.
Use best.engine to predict the camera screen:
xxxxxxxxxx
cd /home/pi/ultralytics/ultralytics/yahboom_demo
Run the code: Click the preview screen, press the q key to terminate the program!
xxxxxxxxxx
python3 06.orange_camera_usb.py
Yolo recognizes the output video location: /home/pi/ultralytics/ultralytics/output/
Sample code:
xxxxxxxxxx
import cv2
from ultralytics import YOLO
# Load the YOLO model
# model = YOLO("/home/pi/ultralytics/ultralytics/data/yahboom_data/orange_data/runs/detect/train/weights/best.pt")
# model = YOLO("/home/pi/ultralytics/ultralytics/data/yahboom_data/orange_data/runs/detect/train/weights/best.onnx")
model = YOLO("/home/pi/ultralytics/ultralytics/data/yahboom_data/orange_data/runs/detect/train/weights/best_ncnn_model")
# Open the cammera
cap = cv2.VideoCapture(0)
# Get the video frame size and frame rate
frame_width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
frame_height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
fps = int(cap.get(cv2.CAP_PROP_FPS))
# Define the codec and create a VideoWriter object to output the processed video
output_path = "/home/pi/ultralytics/ultralytics/output/06.orange_camera_usb.mp4"
fourcc = cv2.VideoWriter_fourcc(*'mp4v') # You can use 'XVID' or 'mp4v' depending on your platform
out = cv2.VideoWriter(output_path, fourcc, fps, (frame_width, frame_height))
# Loop through the video frames
while cap.isOpened():
# Read a frame from the video
success, frame = cap.read()
if success:
# Run YOLO inference on the frame
results = model(frame)
# Visualize the results on the frame
annotated_frame = results[0].plot()
# Write the annotated frame to the output video file
out.write(annotated_frame)
# Display the annotated frame
cv2.imshow("YOLO Inference", cv2.resize(annotated_frame, (640, 480)))
# Break the loop if 'q' is pressed
if cv2.waitKey(1) & 0xFF == ord("q"):
break
else:
# Break the loop if the end of the video is reached
break
# Release the video capture and writer objects, and close the display window
cap.release()
out.release()
cv2.destroyAllWindows()
Use best.engine to predict the camera screen:
xxxxxxxxxx
cd /home/pi/ultralytics/ultralytics/yahboom_demo
Run the code: Click the preview screen, press the q key to terminate the program!
xxxxxxxxxx
python3 06.orange_camera_csi.py
Yolo recognizes the output video location: /home/pi/ultralytics/ultralytics/output/
Sample code:
xxxxxxxxxx
import cv2
from picamera2 import Picamera2
from ultralytics import YOLO
# Initialize the Picamera2
picam2 = Picamera2()
picam2.preview_configuration.main.size = (640, 480)
picam2.preview_configuration.main.format = "RGB888"
picam2.preview_configuration.align()
picam2.configure("preview")
picam2.start()
# Load the YOLO11 model
# model = YOLO("/home/pi/ultralytics/ultralytics/data/yahboom_data/orange_data/runs/detect/train/weights/best.pt")
# model = YOLO("/home/pi/ultralytics/ultralytics/data/yahboom_data/orange_data/runs/detect/train/weights/best.onnx")
model = YOLO("/home/pi/ultralytics/ultralytics/data/yahboom_data/orange_data/runs/detect/train/weights/best_ncnn_model")
# Set up video output
output_path = "/home/pi/ultralytics/ultralytics/output/06.orange_camera_csi.mp4"
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
out = cv2.VideoWriter(output_path, fourcc, 30, (640, 480))
# Loop through the video frames
while True:
# Capture frame-by-frame
frame = picam2.capture_array()
# Run YOLO11 inference on the frame
results = model(frame)
# Visualize the results on the frame
annotated_frame = results[0].plot()
# Write the frame to the video file
out.write(annotated_frame)
# Display the resulting frame
cv2.imshow("Camera", annotated_frame)
# Break the loop if 'q' is pressed
if cv2.waitKey(1) == ord("q"):
break
# Release resources and close windows
picam2.close()
out.release()
cv2.destroyAllWindows()