-
Notifications
You must be signed in to change notification settings - Fork 2
/
aruco_detect.py
344 lines (254 loc) · 12.4 KB
/
aruco_detect.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
#Author: Karla R.
#Sept 2020-
#Python 3.7
import numpy as np
import cv2
import cv2.aruco as aruco
import pyrealsense2 as rs
import itertools
import pandas as pd
import openpyxl
from relativepose import *
#source 1
#'https://medium.com/@muralimahadev40/aruco-markers-usage-in-computer-vision-using-opencv-python-cbdcf6ff5172'
#Source 2
#'https://medium.com/@aliyasineser/aruco-marker-tracking-with-opencv-8cb844c26628'
#Source 3
#'https://docs.opencv.org/master/d9/d6a/group__aruco.html#gab9159aa69250d8d3642593e508cb6baa'
def combpairs(markerids):
#Create indices for the marker ids
#Output pairs of possible id marker combinations using the N choose K tool in itertools
indices = [*range(len(markerids))]
comb = list(itertools.combinations(range(len(markerids)), 2))
#print(comb)
#unpack each pair
comb = [[*pairs] for pairs in comb]
return comb
def stereoRGB_depth(corners):
#Inputs:
#corners [[Nx4] and their corresponding pixels]
#N is number of markers
#Recall that order of corners are clockwise
"""Averaging"""
#Get average x and y coordinate for all Nx4 corners
print('corners',corners)
#Ensure integer pixels
x_center= [int(sum([i[0] for i in marker])/4) for N in corners for marker in N]
y_center = [int(sum([i[1] for i in marker])/4) for N in corners for marker in N]
center_pixels = list(zip(x_center, y_center))
### if too close will get an error because the mean c center
### is being calculated outside of its boundaries... but that is weird because
#pose estimation finds the center without a proble
return center_pixels
#This is INCORRECT##
"""def center_reprojection(color_image,id_tvec, id_rvec, camera_matrix, dist_coef):
#Purpose is to see if a reporjection from world coordinate into
#image plane can locate the centroid of the aruco marker better
#mg = cv2.imread('left12.jpg')
h, w = color_image.shape[:2]
newcameramtx, roi=cv2.getOptimalNewCameraMatrix(camera_matrix, dist_coef,(w,h),1,(w,h))
#Undistort image
dst = cv2.undistort(color_image, camera_matrix, dist_coef, None, newcameramtx)
# Projection matrix P=K*RT https://docs.opencv.org/3.4/d0/daf/group__projection.html
center_WORLD_image = []
for i, center_coord in enumerate(id_tvec):
##result = cv2.projectPoints(center_coord, id_rvec[i], id_tvec[i], camera_matrix, dist_coef)
R, __ = cv2.Rodrigues(id_rvec[i])
T = center_coord.reshape(3,1)
RT = np.c_[R, T]
world = np.append(T,1)
result = np.matmul(newcameramtx.reshape(3,3),RT)
result=np.matmul(result,world)
center_WORLD_image.append(result)
return center_WORLD_image"""
def evalDepth(center_pixels, depth_val, id_tvec):
"""Error caused due to centerpixels: Note Dec 7,2020"""
""" The center pixels is outputing center pixels that are out of frame"""
#Exception has occurred: IndexError index 516 is out of bounds for axis 0 with size 480
##INPUTS###
#id_tvec ==> translation vectors for each marker (x,y,z)) * intersted in z only here
#id_vec is (N,1,1,3)
#depth_image is the depth frame data that is aligned to RGB frameset
#depth_val is the value calculated for the entire depth data set *depth_scale()
#############
#Get z-depth value from ARUCO RGB measurements
centerdepth_aruco=[[vect[2] for vect in marker] for N in id_tvec for marker in N]
print('centerdepth',centerdepth_aruco)
# Initialize empty lists
#center_pixels = []
centerdepth_val = []
print('center pixels',center_pixels)
for pixelset in center_pixels:
#pixel[0] is x and [1] is y
print('xpixel', pixelset[0])
print('ypix',pixelset[1])
#d=depth_val[pixelset[0]][pixelset[1]]
#print(d)
""" to call np.array you have to do [row!!!] [col!] """
centerdepth_val.append(depth_val[pixelset[1]][pixelset[0]])
#error_RGB_depth = [abs(i-j)/i*100 if i != 0 else None for i, j in zip(centerdepth_val, centerdepth_aruco)]
error_RGB_depth = [(i-j)/i*100 if i != 0 else None for i, j in zip(centerdepth_val, centerdepth_aruco)]
print('Error',error_RGB_depth)
return error_RGB_depth, centerdepth_val
#Specify Data OUTPUT path
data_path = r'C:\Users\karla\OneDrive\Documents\GitHub\KUL_Thesis\Depth Estimation Eval 1'
#Get Camera Parameters
path=r'C:\Users\karla\OneDrive\Documents\GitHub\KUL_Thesis\calibration.txt'
#Load Intrinsics
param = cv2.FileStorage(path, cv2.FILE_STORAGE_READ)
camera_matrix = param.getNode("K").mat()
dist_coef = param.getNode("D").mat()
############################ Set up realsense pipeline #########################
# Create a pipeline
pipeline = rs.pipeline()
#Create a config and configure the pipeline to stream different resolutions of color and depth streams
config = rs.config()
config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30) #16 bit linear depth values
config.enable_stream(rs.stream.color, 640, 480,rs.format.bgr8, 30) # 8 bit bgr
# Start streaming
profile = pipeline.start(config)
#Get Depth Scale in METERS
depth_sensor = profile.get_device().first_depth_sensor()
depth_scale = depth_sensor.get_depth_scale()
# Instruct alignment Depth to RBG
align_to = rs.stream.color
align = rs.align(align_to)
#Calll aruco dictionary
aruco_dict = aruco.Dictionary_get(aruco.DICT_4X4_1000)
#Create an empty output storage for maker ids and tvectors
output_data=[]
# Initialize a counter for detection rate
counter=0
################# Streaming loop###############
try:
while True:
#Update counter
counter+=1
# Access canera streams/framesets: RBG and depth
frames = pipeline.wait_for_frames()
# Run Alignment Process on accessed framesets
aligned_frames = align.process(frames)
# Get aligned frames (modified depth frame)
aligned_depth_frame = aligned_frames.get_depth_frame() # aligned_depth_frame is a 640x480 depth image
color_frame = aligned_frames.get_color_frame()
# Validate that both frames are valid
if not aligned_depth_frame or not color_frame:
continue
#Convert depth and RGB frames to numpy array
depth_image = np.asanyarray(aligned_depth_frame.get_data())
color_image = np.asanyarray(color_frame.get_data())
#Get Depth Values (implemented Dec 1/2020)
#### CHECK #################################################
#### NOT SURE WHAT THE DIFFERENCE IS WITH THIS VS .get_distance ( pixel x, y)
depth_val = depth_image*depth_scale
#print('shape of depthval', depth_val.shape)
# Find RGB's corresponding Grayscale values(ARUCO requires grayscale for the threshold operations)
gray_image=cv2.cvtColor(color_image,cv2.COLOR_BGR2GRAY)
#Detect Aruco
#Draw detected markers on RGB image
arucoParameters = aruco.DetectorParameters_create()
corners, ids, rejectedImgPoints = aruco.detectMarkers(gray_image, aruco_dict, parameters=arucoParameters, cameraMatrix=camera_matrix, distCoeff=dist_coef)
color_image=aruco.drawDetectedMarkers(color_image, corners,ids)
#ids : ndarray
#corners: list
print("Read ids",ids)
################# TESTING!!############
"""Find Corner Centers using AVERAGING"""
print("Shape of corners", np.shape(corners))
print('corners', corners)
center_pixels = stereoRGB_depth(corners)
#Estimate ARUCO Pose
markerlen=3.19/100 #Square dimension [m]
axis_len=0.04 #length of axis (select)
#Test whether all array elements along a given axis evaluate to True.
if np.all(ids != None):
#Create empty lists to store read id values
id_rvec=[]
id_tvec=[]
id_hold=[]
#For Identified Markers find POSE estimation:
for i in range(0,len(ids)):
#Get translation and rotation vectors (1,1,3 shape)
print(i)
print('Corresponding ID value is', ids[i])
rvec, tvec, markerPoints = aruco.estimatePoseSingleMarkers(corners[i], markerlen,camera_matrix,dist_coef)
#("ID marker", ids[i], "Z-dpeth",tvec[1,1,3])
#print(rvec)
print("tvec")
print(tvec)
#Store Rvec and Tvec values
id_rvec.append(rvec)
id_tvec.append(tvec)
print('id_tvec shape',np.shape(id_tvec))
print('id_tvec example',id_tvec)
#Draw aruco pose axes
aruco.drawAxis(color_image, camera_matrix, dist_coef, rvec, tvec, axis_len)
#print('shape of tvec', np.shape(id_tvec))==> get all arrays. Shape is (length isd, 1, 1,3)
############### Append OUTPUT data############
temp_vector=[]
#zipped_data=list(zip(ids,id_tvec))
#print('zipped data',zipped_data)
#[[temp.append(np.reshape(vector,(1,3)))] for N in id_tvec for marker in N for vector in marker]
reduced_tvec=np.squeeze(np.array(id_tvec))
#Does NOT work!
"""Find Centers using Projection/Transformation"
center_WORLD_image= center=center_reprojection(color_image,id_tvec, id_rvec, camera_matrix, dist_coef)
print('average center',center_pixels)
print('WORLD Transformation',center_WORLD_image) """
########### Calculate Error Amongst Methods########
error_RGB_depth, centerdepth_val= evalDepth(center_pixels, depth_val,id_tvec)
print('Percent Error Between RGB and DEPTH ',error_RGB_depth)
###############Record Depth RGB and Camera values#########
#Reduce dimensionality (3,1,1,3)==> (3,3)
red_tvec=np.squeeze(np.array(id_tvec))
#np.array ids
ID=np.array(ids)
#condition if only 1 marker is present/read
if len(ID)==1:
data_store=np.insert(red_tvec,0,np.squeeze(ID))
data_store=np.insert(data_store,3,centerdepth_val[0])
#data_store=np.squeeze(data_store)
print('data single shape',data_store)
output_data.append(data_store)
else:
data_store = np.insert(red_tvec, 0, ID.T, axis=1)
data_store = np.insert(data_store, 3, np.array(centerdepth_val).T,axis=1)
#append data
for row in data_store:
output_data.append(row)
#############################Relative Aruco Pose#############################
#find possible combinations using comprehension lists
comb=combpairs(ids)
print(comb)
r_rel,t_rel = append_rel(comb,id_rvec,id_tvec)
print('Relative Rotation',r_rel,'for pairs', comb)
print('Relative',t_rel,'for pairs', comb)
#Make depth image have similar [3 channel] structure as RGB
depth_image_3d = np.dstack((depth_image,depth_image,depth_image))
# Render images
depth_colormap = cv2.applyColorMap(cv2.convertScaleAbs(depth_image, alpha=0.03), cv2.COLORMAP_JET)
images = np.hstack((color_image, depth_colormap))
cv2.namedWindow('Detect Aruco Markers', cv2.WINDOW_AUTOSIZE)
cv2.imshow('Aruco Markers', images)
key = cv2.waitKey(3)
# Press esc or 'q' to close the image window
if key & 0xFF == ord('q') or key == 27:
cv2.destroyAllWindows()
break
finally:
pipeline.stop()
#################Execute analysis ################
#initalize data frame
col_labels=['ID', 'Xc','Yc','Zc','Depth_RS']
df=pd.DataFrame(data=output_data,index=None,columns=col_labels)
# writing to Excel
datatoexcel = pd.ExcelWriter('Test Count Rate GIPHY Dec14_2020.xlsx')
# write DataFrame to excel
df.to_excel(datatoexcel)
# save the excel
datatoexcel.save()
print('DataFrame is written to Excel File successfully.')
grouped_ID=df.groupby('ID').agg({'Zc':['count','mean','median','min','max','std']})
print(grouped_ID)
#print count total
print('Total Count Instances',counter)