#!/usr/bin/env python3
import cv2
import depthai as dai
# Create pipeline
pipeline = dai.Pipeline()
# Define sources and outputs
monoLeft = pipeline.create(dai.node.ColorCamera)
monoRight = pipeline.create(dai.node.ColorCamera)
ve1Out = pipeline.create(dai.node.XLinkOut)
ve3Out = pipeline.create(dai.node.XLinkOut)
ve1Out.setStreamName("ve1Out")
ve3Out.setStreamName("ve3Out")
# Properties
monoLeft.setBoardSocket(dai.CameraBoardSocket.CAM_B)
monoLeft.setResolution(dai.ColorCameraProperties.SensorResolution.THE_800_P)
monoLeft.setFps(30)
monoRight.setBoardSocket(dai.CameraBoardSocket.CAM_C)
monoRight.setResolution(dai.ColorCameraProperties.SensorResolution.THE_800_P)
monoRight.setFps(30)
# Linking
monoLeft.isp.link(ve1Out.input)
monoRight.isp.link(ve3Out.input)
# Connect to device and start pipeline
with dai.Device(pipeline) as device:
print("Connected cameras:")
for p in device.getConnectedCameraFeatures():
print(
f" -socket {p.socket.name:6}: {p.sensorName:6} {p.width:4} x {p.height:4} focus:", end="")
print("auto " if p.hasAutofocus else "fixed", "- ", end="")
print(*[type_.name for type_ in p.supportedTypes])
print("USB speed:", device.getUsbSpeed().name)
print("IR drivers:", device.getIrDrivers())
# Output queues will be used to get the encoded data from the outputs defined above
outQ1 = device.getOutputQueue(name="ve1Out", maxSize=30, blocking=True)
outQ3 = device.getOutputQueue(name="ve3Out", maxSize=30, blocking=True)
# The .h264 / .h265 files are raw stream files (not playable yet)
fourcc = cv2.VideoWriter_fourcc(*"mp4v")
out1 = cv2.VideoWriter("mono1.mp4", fourcc, 30, (1280, 800))
out3 = cv2.VideoWriter("mono2.mp4", fourcc, 30, (1280, 800))
print("Press Ctrl+C to stop encoding...")
while True:
try:
out1.write(outQ1.get().getCvFrame())
out3.write(outQ3.get().getCvFrame())
except KeyboardInterrupt:
# Keyboard interrupt (Ctrl + C) detected
break
out1.release()
out3.release()