-
Notifications
You must be signed in to change notification settings - Fork 2
/
Copy pathyl2ds.py
158 lines (153 loc) · 5.9 KB
/
yl2ds.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
import argparse
import os
import time
from distutils.util import strtobool
import cv2
import numpy as np
from tqdm import tqdm
from yolov5_dt import Yolov5Detector
from sort import Sort
from deep_sort import DeepSort
from util import draw_bboxes, draw_detections
def main():
args = get_parser().parse_args()
if args.display:
cv2.namedWindow("out_vid", cv2.WINDOW_NORMAL)
cv2.resizeWindow("out_vid", 960, 720)
detector = Yolov5Detector(args.weights, args.img_size, args.conf_thres, args.nms_max_overlap, args.device, args.agnostic_nms)
sort = Sort()
deepsort = DeepSort(args.deepsort_checkpoint, nms_max_overlap=args.nms_max_overlap, use_cuda=bool(strtobool(args.use_cuda)))
assert os.path.isfile(args.input), "Error: path error, input file not found"
if args.out_vid:
out_vid = cv2.VideoWriter(
filename=args.out_vid,
fourcc=cv2.VideoWriter_fourcc(*'MJPG'),
fps=1.0,
frameSize=(1920, 1440),
)
if args.out_txt:
out_txt = open(args.out_txt, "w+")
total_counter = [0]*1000
inp_vid = cv2.VideoCapture(args.input)
num_frames = int(inp_vid.get(cv2.CAP_PROP_FRAME_COUNT))
for frameID in tqdm(range(num_frames)):
ret, im = inp_vid.read()
start = time.time()
dets = detector.detect(im)
if args.tracker == 'sort':
if len(dets):
dets = np.array(dets)
else:
dets = np.empty((0,5))
outputs = sort.update(dets)
outputs = np.array([element.clip(min=0) for element in outputs]).astype(int)
else:
if len(dets):
ccwh_boxes = []
for det in dets:
ccwh_boxes.append([(det[0]+det[2])/2, (det[1]+det[3])/2, det[2]-det[0], det[3]-det[1]])
ccwh_boxes = np.array(ccwh_boxes)
confidences = np.ones(len(dets))
outputs, __ = deepsort.update(ccwh_boxes, confidences, im)
else:
outputs = []
current_counter = []
if len(outputs):
tlbr_boxes = outputs[:, :4]
identities = current_counter = outputs[:, -1]
ordered_identities = []
for identity in identities:
if not total_counter[identity]:
total_counter[identity] = max(total_counter) + 1
ordered_identities.append(total_counter[identity])
im = draw_bboxes(im, tlbr_boxes, ordered_identities)
if args.out_txt:
for i in range(len(ordered_identities)):
tlbr = tlbr_boxes[i]
line = [frameID+1, ordered_identities[i], tlbr[0], tlbr[1], tlbr[2]-tlbr[0], tlbr[3]-tlbr[1], 1, 1, 1]
out_txt.write(",".join(str(item) for item in line) + "\n")
end = time.time()
im = cv2.putText(im, "Frame ID: "+str(frameID+1), (20,30), 0, 5e-3 * 200, (0,255,0), 2)
time_fps = "Time: {}s, fps: {}".format(round(end - start, 2), round(1 / (end - start), 2))
im = cv2.putText(im, time_fps,(20, 60), 0, 5e-3 * 200, (0,255,0), 3)
im = cv2.putText(im, args.weights + ' ' + args.tracker, (20, 90), 0, 5e-3*200, (0,255,0), 3)
im = cv2.putText(im, "Current Hand Counter: "+str(len(current_counter)),(20, 120), 0, 5e-3 * 200, (0,255,0), 2)
im = cv2.putText(im, "Total Hand Counter: "+str(max(total_counter)), (20, 150), 0, 5e-3 * 200, (0,255,0), 2)
if args.display:
cv2.imshow("out_vid", im)
cv2.waitKey(1)
if args.out_vid:
out_vid.write(im)
frameID+=1
def get_parser():
parser = argparse.ArgumentParser(description="Yolov5 to (Deep)SORT demo")
parser.add_argument("--input",
type=str,
default='/media/data3/EgoCentric_Nafosted/non_skip/train/',
help='path to input video',
)
parser.add_argument("--tracker",
type=str,
default='sort',
help='tracker type, sort or deepsort',
)
parser.add_argument("--deepsort_checkpoint",
type=str,
default="deep_sort/deep/checkpoint/ckpt.t7",
help='Cosine metric learning model checkpoint',
)
parser.add_argument(
"--max_dist",
type=float,
default=0.3,
help="Max cosine distance",
)
parser.add_argument("--nms_max_overlap",
type=float,
default=0.5,
help='Non-max suppression threshold',
)
parser.add_argument('--weights', type=str, default='weights/best.pt', help='model.pt path')
parser.add_argument(
"--conf-thres",
type=float,
default=0.5,
help="Minimum score for instance predictions to be shown",
)
parser.add_argument('--img-size', type=int, default=640, help='inference size (pixels)')
parser.add_argument('--agnostic-nms', action='store_true', help='class-agnostic NMS')
parser.add_argument('--augment', action='store_true', help='augmented inference')
parser.add_argument('--device', default='cuda:0', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
parser.add_argument(
"--display",
type=bool,
default=False,
help="Streaming frames to display",
)
parser.add_argument(
"--out_vid",
type=str,
default="output_video.avi",
help="Output video",
)
parser.add_argument(
"--use_cuda",
type=str,
default="True",
help="Use GPU if true, else use CPU only",
)
parser.add_argument(
"--out_txt",
type=str,
default="output_txt.txt",
help="Write tracking results in MOT16 format to file seqtxt2write. To evaluate using pymotmetrics",
)
parser.add_argument(
"--opts",
help="Modify config options using the command-line 'KEY VALUE' pairs",
default=[],
nargs=argparse.REMAINDER,
)
return parser
if __name__ == "__main__":
main()