Untitled

 avatar
unknown
plain_text
a year ago
5.9 kB
10
Indexable
import os
import sys
import time
from datetime import datetime
from pathlib import Path
import numpy as np
import cv2
import json
import torch
from dotenv import load_dotenv

# DeepSORT dependencies
from deep_sort.utils.parser import get_config
from deep_sort.deep_sort import DeepSort

load_dotenv("/home/cctv/plitter/camera_config.env")

# Load YOLOv5 model
def get_slice_bboxes(image_height, image_width, slice_height, slice_width, overlap_height_ratio, overlap_width_ratio):
    slice_bboxes = []
    y_max = y_min = 0
    y_overlap = int(overlap_height_ratio * slice_height)
    x_overlap = int(overlap_width_ratio * slice_width)
    while y_max < image_height:
        x_min = x_max = 0
        y_max = y_min + slice_height
        while x_max < image_width:
            x_max = x_min + slice_width
            if y_max > image_height or x_max > image_width:
                xmax = min(image_width, x_max)
                ymax = min(image_height, y_max)
                xmin = max(0, xmax - slice_width)
                ymin = max(0, ymax - slice_height)
                slice_bboxes.append([xmin, ymin, xmax, ymax])
            else:
                slice_bboxes.append([x_min, y_min, x_max, y_max])
            x_min = x_max - x_overlap
        y_min = y_max - y_overlap
    return slice_bboxes

colors = [(0,255,255), (0,0,255), (255,0,0), (0,255,0)]*20

root_dir = os.getenv('root_dir', '/'.join(os.path.abspath(__file__).split('/')[0:-2]))

yolo_weights = Path(root_dir) / 'models' / os.getenv('weights', 'pLitterFloat_800x752_to_640x640.pt')

FRAME_WIDTH = int(os.getenv('frame_width', 1920))
FRAME_HEIGHT = int(os.getenv('frame_height', 1080))
interval = int(os.getenv('interval', 10))
work_in_night = os.getenv('work_in_night', True)
weights_url = os.getenv('weights_url', None)

# Ensure DeepSORT is in sys.path
if os.path.join(root_dir, 'Yolov5_StrongSORT_OSNet') not in sys.path:
    sys.path.append(os.path.join(root_dir, 'Yolov5_StrongSORT_OSNet'))
if os.path.join(root_dir, 'Yolov5_StrongSORT_OSNet/yolov5') not in sys.path:
    sys.path.append(os.path.join(root_dir, 'Yolov5_StrongSORT_OSNet/yolov5'))

# Import YOLOv5 and DeepSORT
from yolov5.models.common import DetectMultiBackend
from yolov5.utils.general import (check_img_size, non_max_suppression, scale_boxes, cv2)

# Initialize DeepSORT
cfg = get_config()
cfg.merge_from_file(os.path.join(root_dir, 'Yolov5_StrongSORT_OSNet/deep_sort/configs/deep_sort.yaml'))
deepsort = DeepSort(
    cfg.DEEPSORT.REID_CKPT,
    max_dist=cfg.DEEPSORT.MAX_DIST,
    max_iou_distance=cfg.DEEPSORT.MAX_IOU_DISTANCE,
    max_age=cfg.DEEPSORT.MAX_AGE,
    n_init=cfg.DEEPSORT.N_INIT,
    nn_budget=cfg.DEEPSORT.NN_BUDGET,
    use_cuda=True
)

slice_width = int(os.getenv("slice_width", 800))
slice_height = int(os.getenv("slice_height", 752))
slice_boxes = get_slice_bboxes(FRAME_HEIGHT, FRAME_WIDTH, slice_height, slice_width, 0.04, 0.04)

device = torch.device('cuda:0')
half = True

# Load YOLOv5 model
if not os.path.isfile(yolo_weights):
    try:
        torch.hub.download_url_to_file(weights_url, yolo_weights)
    except:
        yolo_weights = Path(root_dir) / 'models/yolov5s.pt'

model = DetectMultiBackend(yolo_weights, device=device, fp16=half)
stride, names, pt = model.stride, model.names, model.pt

cap = cv2.VideoCapture(0)
cap.set(cv2.CAP_PROP_FRAME_WIDTH, int(FRAME_WIDTH))
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, int(FRAME_HEIGHT))
imgsz = 640

with torch.no_grad():
    while True:
        current_time = datetime.now().strftime("%H:%M:%S")
        if current_time >= '18:00:00' or current_time < '06:00:00':
            if work_in_night in (False, 'False'):
                print('Night mode turning off')
                time.sleep(60)
                continue

        ret, img0 = cap.read()
        if img0 is None or img0.all() is None:
            continue

        preds = torch.tensor([], dtype=torch.float16)

        for box in slice_boxes:
            img = img0[box[1]:box[3], box[0]:box[2], :]
            h, w, _ = img.shape
            h_r, w_r = h/imgsz, w/imgsz
            img = cv2.resize(img, (imgsz, imgsz), interpolation=cv2.INTER_LINEAR)
            img = img.transpose((2, 0, 1))[::-1]  # HWC to CHW, BGR to RGB
            img = np.ascontiguousarray(img)
            img = torch.from_numpy(img).to(device)
            img = img.half() if half else img.float()
            img /= 255.0

            if img.ndimension() == 3:
                img = img.unsqueeze(0)

            pred = model(img)
            pred = non_max_suppression(pred, 0.4, 0.5)
            proc_pred = pred[0].cpu()

            for i, det in enumerate(proc_pred):
                proc_pred[i][0] = proc_pred[i][0] * w_r + box[0]
                proc_pred[i][1] = proc_pred[i][1] * h_r + box[1]
                proc_pred[i][2] = proc_pred[i][2] * w_r + box[0]
                proc_pred[i][3] = proc_pred[i][3] * h_r + box[1]
            preds = torch.cat((preds, proc_pred), 0)

        if len(preds):
            xywhs = xyxy2xywh(preds[:, 0:4])
            confs = preds[:, 4]
            clss = preds[:, 5]

            # Update DeepSORT tracker
            outputs = deepsort.update(xywhs.cpu(), confs.cpu(), clss.cpu(), img0)

            for j, (output) in enumerate(outputs):
                bbox_left, bbox_top, bbox_w, bbox_h, track_id, class_id = output
                pred_json['preds'].append({
                    'id': track_id,
                    'category': model.names[class_id],
                    'bbox': [bbox_left, bbox_top, bbox_w, bbox_h]
                })

        # Save detection and tracking results
        im_save = cv2.imwrite(data_dir + '/' + im_name + '.jpg', img0)
        json.dump(pred_json, open(data_dir + '/' + im_name + '.json', 'w'))
        time.sleep(interval)
Editor is loading...
Leave a Comment