Permalink
Cannot retrieve contributors at this time
Name already in use
A tag already exists with the provided branch name. Many Git commands accept both tag and branch names, so creating this branch may cause unexpected behavior. Are you sure you want to create this branch?
5062CEM_OpenCV_Demo_Python/object_detection.py
Go to fileThis commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
78 lines (60 sloc)
3.4 KB
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
from classes.ObjectDetector import ObjectDetector | |
import cv2 | |
import time | |
if __name__ == "__main__": | |
# Creates an instance of my class for detecting an object | |
object_detector = ObjectDetector() | |
# Creates a pipeline to the webcam (video2 = Sony A7r) to capture an input | |
PIPELINE = "v4l2src device=/dev/video2 ! video/x-raw, width=720,height=480, framerate=30/1 ! videoconvert ! videoscale ! appsink"; | |
cap = cv2.VideoCapture(PIPELINE, cv2.CAP_GSTREAMER) | |
# Create an instance of the class using the SURF (or BRISK) feature detector | |
object_detector.set_detector("SURF") | |
# Load the training image that we want to use to detect | |
training_img = cv2.imread("./images/staff_id.jpg", cv2.IMREAD_COLOR) | |
# Grab some key areas of interest from the training image | |
training_keypoints = object_detector.get_keypoints(training_img) | |
# Extract a descriptor from the training image using the key areas of interest | |
training_keypoints, training_descriptor = object_detector.get_descriptor(training_img, training_keypoints) | |
# Set the video capture method to use our in-built webcam and check whether we have opened the camera or not. | |
if cap.isOpened() is False: | |
raise Exception("Opening the Camera Failed") | |
# Set a frame number | |
frame_number = 0 | |
# Get the start time | |
start_time = time.time() | |
# Use an infinite loop to grab frames from the webcam, we can break this later on | |
while True: | |
# Read a frame from the camera, and a return value on whether it is grabbing a frame | |
ret, frame = cap.read() | |
# If the frame is none (i.e. empty) then we can throw an error | |
if frame is None: | |
raise Exception("Error reading frame") | |
# Find key areas of interest from the frame of the camera | |
keypoints = object_detector.get_keypoints(frame) | |
# If there are no keypoints, the frame is black, and we need to skip until the camera frame is not black | |
if keypoints is None: | |
continue | |
raise Exception("Error reading frame") | |
# Generate a descriptor from the key areas of interest | |
keypoints, descriptor = object_detector.get_descriptor(frame, keypoints) | |
# Perform a match between the descriptor of the training image and the frame to determine if | |
# the object can be found. | |
matches = object_detector.match(training_descriptor, descriptor) | |
# Get the detected boundaries of the object | |
detected_boundaries = object_detector.detect_object(training_img, training_keypoints, keypoints, matches) | |
# Draw the detected boundaries onto the frame | |
cv2.polylines(frame, [detected_boundaries], True, (0, 255, 0), 1, cv2.LINE_AA, 0) | |
# Put the FPS in the top-left corner of the image | |
frame_number += 1 | |
cv2.putText(frame, str(object_detector.calculate_fps(frame_number, start_time)), (0, 15), | |
cv2.FONT_HERSHEY_COMPLEX, 0.5, (255, 255, 255), 1, cv2.LINE_AA) | |
# Displays just the camera, with a bounding box around the detected image. | |
cv2.imshow("Window", frame) | |
# Sets a wait key for one second, and listens for ESC key to break the while loop | |
if cv2.waitKey(1) == 27: | |
# Releases the camera when the while loop has ended | |
cap.release() | |
# Destroys any windows that were created | |
cv2.destroyAllWindows() | |
# Now lets break | |
break |