Untitled

 avatar
unknown
python
3 years ago
5.1 kB
5
Indexable
import time
import numpy as np
import cv2

#Video capture
cap = cv2.VideoCapture(r'D:\University\Fontys\Year4\Minor\Vision\Level 2\Lv2_Ass10 - People counter\video.mp4')
size = (int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)), int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)))
fourcc = cv2.VideoWriter_fourcc(*'DIVX')
video = cv2.VideoWriter(r'E:/6.avi', fourcc, 25, size)

#Filtering
#fgbg = cv2.createBackgroundSubtractorMOG2() # background sub option 1
fgbg = cv2.createBackgroundSubtractorKNN() # background sub option 2

#Global variables
current_people_detected = 0
prev_people_detected = 0
total_people_detected = 0
start_detected_timer = 0
stop_detected_timer = 0

while(1):
    ret, frame = cap.read()
    if not ret:
        break

    # single frame from video
    frame = cv2.convertScaleAbs(frame) 
    # grayscale conversion
    imgray = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY) 
    # frame with background subtraction
    fgmask = fgbg.apply(imgray) 
    # apply gaussian blur
    blur_video = cv2.GaussianBlur(fgmask, (9, 9), 0)
    # apply bilateral blur
    bilateral_video = cv2.bilateralFilter(blur_video,20,200,300)
    # detect blobs (people)
    params = cv2.SimpleBlobDetector_Params()

    ''' Set parameters of blob detection'''
    params.minThreshold = 0
    params.maxThreshold = 255

    params.filterByArea = True
    params.minArea = 2500
    params.maxArea = 5000

    params.filterByCircularity = True
    params.minCircularity = 0.1
    params.maxCircularity = 1

    params.filterByConvexity = True
    params.minConvexity = 0.1
    params.maxConvexity = 1

    params.filterByInertia = True
    params.minInertiaRatio = 0.2
    params.maxInertiaRatio = 1

    params.filterByColor = True
    params.blobColor = 255 # 0 = black / 255 = white

    params.minDistBetweenBlobs = 50
    ''' End of parameter settings'''

    ver = (cv2.__version__).split('.')
    if int(ver[0]) < 3:
        detector = cv2.SimpleBlobDetector(params) # Create a detector with the parameters
    else:
        detector = cv2.SimpleBlobDetector_create(params) # Create a detector with the parameters

    # Detect the blobs on the filtered video
    keypoints_bg = detector.detect(blur_video)
    # Draw blobs with apropriate size on the filtered video
    im_with_keypoints_bg = cv2.drawKeypoints(blur_video, keypoints_bg, np.array([]), (0,0, 255), cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
    # Draw blobs with apropriate size on the normal video
    im_with_keypoints_normal = cv2.drawKeypoints(frame, keypoints_bg, np.array([]), (0,0, 255), cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)

    ''' People Counter'''
    current_people_detected = len(keypoints_bg) # The current people on screen are the current blobs detected
    if current_people_detected < prev_people_detected: # if there's less blobs now than before 'start' timer
        start_detected_timer = time.time()

    if current_people_detected > prev_people_detected: # if there's more blobs now than before 'stop' timer
        stop_detected_timer = time.time()

        time_detected_person = stop_detected_timer - start_detected_timer # calculate the time between blob detections
        
        # threshold by testing; if too low to many flickers are detected; if too high clumps of people are not detected
        # what worked during testing is anywhere from 0.2 to 0.35
        # video presented was shot on 0.25; 0.3 gives less faulty positives but has trouble on clumps of people
        if time_detected_person < 0.25: 
            total_people_detected = total_people_detected
        else:
            total_people_detected = total_people_detected + current_people_detected - prev_people_detected

    prev_people_detected = current_people_detected
    ''' End of People Counter'''

    if ret == True:
        # Text for current people detected
        text_current_people = "People on Screen: " + str(len(keypoints_bg))
        cv2.putText(im_with_keypoints_bg, text_current_people, (10, 275),cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 255), 1)
        # Text for total number of people detected
        text_total_people = "Total People: " + str(total_people_detected)
        cv2.putText(im_with_keypoints_bg, text_total_people, (230, 275),cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 255), 1)
        # Display filtered video clip with blobs
        video.write(im_with_keypoints_bg)
        cv2.imshow('People Counter - Filtered View', im_with_keypoints_bg)

        # Text for current people detected
        cv2.putText(im_with_keypoints_normal, text_current_people, (10, 275),cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 255), 1)
        # Text for total number of people detected
        cv2.putText(im_with_keypoints_normal, text_total_people, (230, 275),cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 255), 1)
        # Display normal clip with blobs
        cv2.imshow('People Counter - Normal View',im_with_keypoints_normal)

    else:
        cap.release()
        break
    k = cv2.waitKey(10) & 0xff
    if k == 27:
        break
Editor is loading...