1
- import numpy as np
2
1
import math
2
+
3
3
import cv2
4
4
import matplotlib
5
+ import matplotlib .pyplot as plt
6
+ import numpy as np
5
7
from matplotlib .backends .backend_agg import FigureCanvasAgg as FigureCanvas
6
8
from matplotlib .figure import Figure
7
- import numpy as np
8
- import matplotlib .pyplot as plt
9
- import cv2
10
9
11
10
12
11
def padRightDownCorner (img , stride , padValue ):
13
12
h = img .shape [0 ]
14
13
w = img .shape [1 ]
15
14
16
15
pad = 4 * [None ]
17
- pad [0 ] = 0 # up
18
- pad [1 ] = 0 # left
19
- pad [2 ] = 0 if (h % stride == 0 ) else stride - (h % stride ) # down
20
- pad [3 ] = 0 if (w % stride == 0 ) else stride - (w % stride ) # right
16
+ pad [0 ] = 0 # up
17
+ pad [1 ] = 0 # left
18
+ pad [2 ] = 0 if (h % stride == 0 ) else stride - (h % stride ) # down
19
+ pad [3 ] = 0 if (w % stride == 0 ) else stride - (w % stride ) # right
21
20
22
21
img_padded = img
23
- pad_up = np .tile (img_padded [0 :1 , :, :]* 0 + padValue , (pad [0 ], 1 , 1 ))
22
+ pad_up = np .tile (img_padded [0 :1 , :, :] * 0 + padValue , (pad [0 ], 1 , 1 ))
24
23
img_padded = np .concatenate ((pad_up , img_padded ), axis = 0 )
25
- pad_left = np .tile (img_padded [:, 0 :1 , :]* 0 + padValue , (1 , pad [1 ], 1 ))
24
+ pad_left = np .tile (img_padded [:, 0 :1 , :] * 0 + padValue , (1 , pad [1 ], 1 ))
26
25
img_padded = np .concatenate ((pad_left , img_padded ), axis = 1 )
27
- pad_down = np .tile (img_padded [- 2 :- 1 , :, :]* 0 + padValue , (pad [2 ], 1 , 1 ))
26
+ pad_down = np .tile (img_padded [- 2 :- 1 , :, :] * 0 + padValue , (pad [2 ], 1 , 1 ))
28
27
img_padded = np .concatenate ((img_padded , pad_down ), axis = 0 )
29
- pad_right = np .tile (img_padded [:, - 2 :- 1 , :]* 0 + padValue , (1 , pad [3 ], 1 ))
28
+ pad_right = np .tile (img_padded [:, - 2 :- 1 , :] * 0 + padValue , (1 , pad [3 ], 1 ))
30
29
img_padded = np .concatenate ((img_padded , pad_right ), axis = 1 )
31
30
32
31
return img_padded , pad
33
32
33
+
34
34
# transfer caffe model to pytorch which will match the layer name
35
35
def transfer (model , model_weights ):
36
36
transfered_model_weights = {}
37
37
for weights_name in model .state_dict ().keys ():
38
38
transfered_model_weights [weights_name ] = model_weights ['.' .join (weights_name .split ('.' )[1 :])]
39
39
return transfered_model_weights
40
40
41
+
42
+ def transfer2coordinate (candidate , subset ):
43
+ coordinates = []
44
+ keyMap = ['nose' , 'neck' ,
45
+ 'left_shoulder' , 'left_elbow' , 'left_wrist' ,
46
+ 'right_shoulder' , 'right_elbow' , 'right_wrist' ,
47
+ 'left_hip' , 'left_knee' , 'left_ankle' ,
48
+ 'right_hip' , 'right_knee' , 'right_ankle' ,
49
+ 'left_eyebrow_peak' , 'left_eyebrow_tail' ,
50
+ 'right_eyebrow_peak' , 'right_eyebrow_tail'
51
+ ]
52
+ for n in range (len (subset )):
53
+ keypoint = {}
54
+ for i in range (18 ):
55
+ index = int (subset [n ][i ])
56
+ if index == - 1 :
57
+ continue
58
+ x , y = candidate [index ][0 :2 ]
59
+ key = keyMap [i ]
60
+ keypoint [key ] = (x , y )
61
+ coordinates .append (keypoint )
62
+ return coordinates
63
+
41
64
# draw the body keypoint and lims
42
65
def draw_bodypose (canvas , candidate , subset ):
43
66
stickwidth = 4
@@ -74,6 +97,7 @@ def draw_bodypose(canvas, candidate, subset):
74
97
# plt.imshow(canvas[:, :, [2, 1, 0]])
75
98
return canvas
76
99
100
+
77
101
def draw_handpose (canvas , all_hand_peaks , show_number = False ):
78
102
edges = [[0 , 1 ], [1 , 2 ], [2 , 3 ], [3 , 4 ], [0 , 5 ], [5 , 6 ], [6 , 7 ], [7 , 8 ], [0 , 9 ], [9 , 10 ], \
79
103
[10 , 11 ], [11 , 12 ], [0 , 13 ], [13 , 14 ], [14 , 15 ], [15 , 16 ], [0 , 17 ], [17 , 18 ], [18 , 19 ], [19 , 20 ]]
@@ -90,10 +114,10 @@ def draw_handpose(canvas, all_hand_peaks, show_number=False):
90
114
91
115
for peaks in all_hand_peaks :
92
116
for ie , e in enumerate (edges ):
93
- if np .sum (np .all (peaks [e ], axis = 1 )== 0 ) == 0 :
117
+ if np .sum (np .all (peaks [e ], axis = 1 ) == 0 ) == 0 :
94
118
x1 , y1 = peaks [e [0 ]]
95
119
x2 , y2 = peaks [e [1 ]]
96
- ax .plot ([x1 , x2 ], [y1 , y2 ], color = matplotlib .colors .hsv_to_rgb ([ie / float (len (edges )), 1.0 , 1.0 ]))
120
+ ax .plot ([x1 , x2 ], [y1 , y2 ], color = matplotlib .colors .hsv_to_rgb ([ie / float (len (edges )), 1.0 , 1.0 ]))
97
121
98
122
for i , keyponit in enumerate (peaks ):
99
123
x , y = keyponit
@@ -104,17 +128,19 @@ def draw_handpose(canvas, all_hand_peaks, show_number=False):
104
128
canvas = np .fromstring (bg .tostring_rgb (), dtype = 'uint8' ).reshape (int (height ), int (width ), 3 )
105
129
return canvas
106
130
131
+
107
132
# image drawed by opencv is not good.
108
133
def draw_handpose_by_opencv (canvas , peaks , show_number = False ):
109
134
edges = [[0 , 1 ], [1 , 2 ], [2 , 3 ], [3 , 4 ], [0 , 5 ], [5 , 6 ], [6 , 7 ], [7 , 8 ], [0 , 9 ], [9 , 10 ], \
110
135
[10 , 11 ], [11 , 12 ], [0 , 13 ], [13 , 14 ], [14 , 15 ], [15 , 16 ], [0 , 17 ], [17 , 18 ], [18 , 19 ], [19 , 20 ]]
111
136
# cv2.rectangle(canvas, (x, y), (x+w, y+w), (0, 255, 0), 2, lineType=cv2.LINE_AA)
112
137
# cv2.putText(canvas, 'left' if is_left else 'right', (x, y), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)
113
138
for ie , e in enumerate (edges ):
114
- if np .sum (np .all (peaks [e ], axis = 1 )== 0 ) == 0 :
139
+ if np .sum (np .all (peaks [e ], axis = 1 ) == 0 ) == 0 :
115
140
x1 , y1 = peaks [e [0 ]]
116
141
x2 , y2 = peaks [e [1 ]]
117
- cv2 .line (canvas , (x1 , y1 ), (x2 , y2 ), matplotlib .colors .hsv_to_rgb ([ie / float (len (edges )), 1.0 , 1.0 ])* 255 , thickness = 2 )
142
+ cv2 .line (canvas , (x1 , y1 ), (x2 , y2 ), matplotlib .colors .hsv_to_rgb ([ie / float (len (edges )), 1.0 , 1.0 ]) * 255 ,
143
+ thickness = 2 )
118
144
119
145
for i , keyponit in enumerate (peaks ):
120
146
x , y = keyponit
@@ -123,6 +149,7 @@ def draw_handpose_by_opencv(canvas, peaks, show_number=False):
123
149
cv2 .putText (canvas , str (i ), (x , y ), cv2 .FONT_HERSHEY_SIMPLEX , 0.3 , (0 , 0 , 0 ), lineType = cv2 .LINE_AA )
124
150
return canvas
125
151
152
+
126
153
# detect hand according to body pose keypoints
127
154
# please refer to https://github.com/CMU-Perceptual-Computing-Lab/openpose/blob/master/src/openpose/hand/handDetector.cpp
128
155
def handDetect (candidate , subset , oriImg ):
@@ -138,7 +165,7 @@ def handDetect(candidate, subset, oriImg):
138
165
if not (has_left or has_right ):
139
166
continue
140
167
hands = []
141
- #left hand
168
+ # left hand
142
169
if has_left :
143
170
left_shoulder_index , left_elbow_index , left_wrist_index = person [[5 , 6 , 7 ]]
144
171
x1 , y1 = candidate [left_shoulder_index ][:2 ]
@@ -189,6 +216,7 @@ def handDetect(candidate, subset, oriImg):
189
216
'''
190
217
return detect_result
191
218
219
+
192
220
# get max index of 2d array
193
221
def npmax (array ):
194
222
arrayindex = array .argmax (1 )
0 commit comments