This repository has been archived by the owner on Dec 13, 2023. It is now read-only.
-
Notifications
You must be signed in to change notification settings - Fork 0
/
RGB-Stereo-Correlation.py
136 lines (112 loc) · 4.3 KB
/
RGB-Stereo-Correlation.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
import cv2
import numpy as np
import depthai as dai
# Weights to use when blending depth/rgb image (should equal 1.0)
rgbWeight = 0.4
depthWeight = 0.6
msgs = dict()
def add_msg(msg, name, seq = None):
if seq is None:
seq = msg.getSequenceNum()
seq = str(seq)
if seq not in msgs:
msgs[seq] = dict()
msgs[seq][name] = msg
def get_msgs():
global msgs
seq_remove = [] # Arr of sequence numbers to get deleted
for seq, syncMsgs in msgs.items():
seq_remove.append(seq) # Will get removed from dict if we find synced msgs pair
# Check if we have both detections and color frame with this sequence number
if len(syncMsgs) == 2: # rgb + depth
for rm in seq_remove:
del msgs[rm]
return syncMsgs # Returned synced msgs
return None
def updateBlendWeights(percent_rgb):
"""
Update the rgb and depth weights used to blend depth/rgb image
@param[in] percent_rgb The rgb weight expressed as a percentage (0..100)
"""
global depthWeight
global rgbWeight
rgbWeight = float(percent_rgb)/100.0
depthWeight = 1.0 - rgbWeight
# Optional. If set (True), the ColorCamera is downscaled from 1080p to 720p.
# Otherwise (False), the aligned depth is automatically upscaled to 1080p
downscaleColor = True
fps = 30
# The disparity is computed at this resolution, then upscaled to RGB resolution
monoResolution = dai.MonoCameraProperties.SensorResolution.THE_720_P
# Create pipeline
pipeline = dai.Pipeline()
device = dai.Device()
# Define sources and outputs
camRgb = pipeline.create(dai.node.ColorCamera)
left = pipeline.create(dai.node.MonoCamera)
right = pipeline.create(dai.node.MonoCamera)
stereo = pipeline.create(dai.node.StereoDepth)
rgbOut = pipeline.create(dai.node.XLinkOut)
disparityOut = pipeline.create(dai.node.XLinkOut)
rgbOut.setStreamName("rgb")
disparityOut.setStreamName("disp")
#Properties
camRgb.setBoardSocket(dai.CameraBoardSocket.RGB)
camRgb.setResolution(dai.ColorCameraProperties.SensorResolution.THE_1080_P)
camRgb.setFps(fps)
if downscaleColor: camRgb.setIspScale(2, 3)
# For now, RGB needs fixed focus to properly align with depth.
# This value was used during calibration
try:
calibData = device.readCalibration2()
lensPosition = calibData.getLensPosition(dai.CameraBoardSocket.RGB)
if lensPosition:
camRgb.initialControl.setManualFocus(lensPosition)
except:
raise
left.setResolution(monoResolution)
left.setBoardSocket(dai.CameraBoardSocket.LEFT)
left.setFps(fps)
right.setResolution(monoResolution)
right.setBoardSocket(dai.CameraBoardSocket.RIGHT)
right.setFps(fps)
stereo.setDefaultProfilePreset(dai.node.StereoDepth.PresetMode.HIGH_DENSITY)
# LR-check is required for depth alignment
stereo.setLeftRightCheck(True)
stereo.setDepthAlign(dai.CameraBoardSocket.RGB)
# Linking
camRgb.isp.link(rgbOut.input)
left.out.link(stereo.left)
right.out.link(stereo.right)
stereo.disparity.link(disparityOut.input)
# Connect to device and start pipeline
with device:
device.startPipeline(pipeline)
frameRgb = None
frameDisp = None
# Configure windows; trackbar adjusts blending ratio of rgb/depth
blendedWindowName = "rgb-depth"
cv2.namedWindow(blendedWindowName)
cv2.createTrackbar('RGB Weight %', blendedWindowName, int(rgbWeight*100), 100, updateBlendWeights)
while True:
for name in ['rgb', 'disp']:
msg = device.getOutputQueue(name).tryGet()
if msg is not None:
add_msg(msg, name)
synced = get_msgs()
if synced:
frameRgb = synced["rgb"].getCvFrame()
frameDisp = synced["disp"].getFrame()
maxDisparity = stereo.initialConfig.getMaxDisparity()
frameDisp = (frameDisp * 255. / maxDisparity).astype(np.uint8)
frameDisp = cv2.applyColorMap(frameDisp, cv2.COLORMAP_TURBO)
frameDisp = np.ascontiguousarray(frameDisp)
# Need to have both frames in BGR format before blending
if len(frameDisp.shape) < 3:
frameDisp = cv2.cvtColor(frameDisp, cv2.COLOR_GRAY2BGR)
blended = cv2.addWeighted(frameRgb, rgbWeight, frameDisp, depthWeight, 0)
cv2.imshow(blendedWindowName, blended)
frameRgb = None
frameDisp = None
if cv2.waitKey(1) == ord('q'):
break