forked from luxonis/depthai-experiments
-
Notifications
You must be signed in to change notification settings - Fork 0
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Add redirect messages to migrated / renamed examples (luxonis#39)
* remove gen2 examples (moved to depthai-python) * add redirect info
- Loading branch information
Showing
19 changed files
with
16 additions
and
1,083 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1 @@ | ||
Moved to https://github.com/luxonis/depthai-experiments/tree/master/gen2-gaze-estimation |
This file was deleted.
Oops, something went wrong.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,39 +1 @@ | ||
#!/usr/bin/env python3 | ||
|
||
import cv2 | ||
import depthai as dai | ||
import numpy as np | ||
|
||
# Start defining a pipeline | ||
pipeline = dai.Pipeline() | ||
|
||
# Define a source - color camera | ||
cam_rgb = pipeline.createColorCamera() | ||
cam_rgb.setPreviewSize(300, 300) | ||
cam_rgb.setBoardSocket(dai.CameraBoardSocket.RGB) | ||
cam_rgb.setResolution(dai.ColorCameraProperties.SensorResolution.THE_1080_P) | ||
cam_rgb.setInterleaved(False) | ||
|
||
# Create output | ||
xout_rgb = pipeline.createXLinkOut() | ||
xout_rgb.setStreamName("rgb") | ||
cam_rgb.preview.link(xout_rgb.input) | ||
|
||
# Pipeline defined, now the device is assigned and pipeline is started | ||
device = dai.Device(pipeline) | ||
device.startPipeline() | ||
|
||
# Output queue will be used to get the rgb frames from the output defined above | ||
q_rgb = device.getOutputQueue(name="rgb", maxSize=4, blocking=False) | ||
|
||
while True: | ||
in_rgb = q_rgb.get() # blocking call, will wait until a new data has arrived | ||
# data is originally represented as a flat 1D array, it needs to be converted into HxWxC form | ||
shape = (3, in_rgb.getHeight(), in_rgb.getWidth()) | ||
frame_rgb = in_rgb.getData().reshape(shape).transpose(1, 2, 0).astype(np.uint8) | ||
frame_rgb = np.ascontiguousarray(frame_rgb) | ||
# frame is transformed and ready to be shown | ||
cv2.imshow("rgb", frame_rgb) | ||
|
||
if cv2.waitKey(1) == ord('q'): | ||
break | ||
print("Please visit https://docs.luxonis.com/projects/api/en/gen2_develop/samples/01_rgb_preview/") |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,60 +1 @@ | ||
#!/usr/bin/env python3 | ||
|
||
import cv2 | ||
import depthai as dai | ||
import numpy as np | ||
|
||
# Start defining a pipeline | ||
pipeline = dai.Pipeline() | ||
|
||
# Define a source - two mono (grayscale) cameras | ||
cam_left = pipeline.createMonoCamera() | ||
cam_left.setBoardSocket(dai.CameraBoardSocket.LEFT) | ||
cam_left.setResolution(dai.MonoCameraProperties.SensorResolution.THE_720_P) | ||
|
||
cam_right = pipeline.createMonoCamera() | ||
cam_right.setBoardSocket(dai.CameraBoardSocket.RIGHT) | ||
cam_right.setResolution(dai.MonoCameraProperties.SensorResolution.THE_720_P) | ||
|
||
# Create outputs | ||
xout_left = pipeline.createXLinkOut() | ||
xout_left.setStreamName('left') | ||
cam_left.out.link(xout_left.input) | ||
xout_right = pipeline.createXLinkOut() | ||
xout_right.setStreamName('right') | ||
cam_right.out.link(xout_right.input) | ||
|
||
# Pipeline defined, now the device is assigned and pipeline is started | ||
device = dai.Device(pipeline) | ||
device.startPipeline() | ||
|
||
# Output queues will be used to get the grayscale frames from the outputs defined above | ||
q_left = device.getOutputQueue(name="left", maxSize=4, blocking=False) | ||
q_right = device.getOutputQueue(name="right", maxSize=4, blocking=False) | ||
|
||
frame_left = None | ||
frame_right = None | ||
|
||
while True: | ||
# instead of get (blocking) used tryGet (nonblocking) which will return the available data or None otherwise | ||
in_left = q_left.tryGet() | ||
in_right = q_right.tryGet() | ||
|
||
if in_left is not None: | ||
# if the data from the left camera is available, transform the 1D data into a frame | ||
frame_left = in_left.getData().reshape((in_left.getHeight(), in_left.getWidth())).astype(np.uint8) | ||
frame_left = np.ascontiguousarray(frame_left) | ||
|
||
if in_right is not None: | ||
# if the data from the right camera is available, transform the 1D data into a frame | ||
frame_right = in_right.getData().reshape((in_right.getHeight(), in_right.getWidth())).astype(np.uint8) | ||
frame_right = np.ascontiguousarray(frame_right) | ||
|
||
# show the frames if available | ||
if frame_left is not None: | ||
cv2.imshow("left", frame_left) | ||
if frame_right is not None: | ||
cv2.imshow("right", frame_right) | ||
|
||
if cv2.waitKey(1) == ord('q'): | ||
break | ||
print("Please visit https://docs.luxonis.com/projects/api/en/gen2_develop/samples/02_mono_preview/") |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,48 +1 @@ | ||
#!/usr/bin/env python3 | ||
|
||
import cv2 | ||
import depthai as dai | ||
import numpy as np | ||
|
||
# Start defining a pipeline | ||
pipeline = dai.Pipeline() | ||
|
||
# Define a source - two mono (grayscale) cameras | ||
left = pipeline.createMonoCamera() | ||
left.setResolution(dai.MonoCameraProperties.SensorResolution.THE_720_P) | ||
left.setBoardSocket(dai.CameraBoardSocket.LEFT) | ||
|
||
right = pipeline.createMonoCamera() | ||
right.setResolution(dai.MonoCameraProperties.SensorResolution.THE_720_P) | ||
right.setBoardSocket(dai.CameraBoardSocket.RIGHT) | ||
|
||
# Create a node that will produce the depth map | ||
depth = pipeline.createStereoDepth() | ||
depth.setConfidenceThreshold(200) | ||
left.out.link(depth.left) | ||
right.out.link(depth.right) | ||
|
||
# Create output | ||
xout = pipeline.createXLinkOut() | ||
xout.setStreamName("disparity") | ||
depth.disparity.link(xout.input) | ||
|
||
# Pipeline defined, now the device is assigned and pipeline is started | ||
device = dai.Device(pipeline) | ||
device.startPipeline() | ||
|
||
# Output queue will be used to get the disparity frames from the outputs defined above | ||
q = device.getOutputQueue(name="disparity", maxSize=4, blocking=False) | ||
|
||
while True: | ||
in_rgb = q.get() # blocking call, will wait until a new data has arrived | ||
# data is originally represented as a flat 1D array, it needs to be converted into HxW form | ||
frame = in_rgb.getData().reshape((in_rgb.getHeight(), in_rgb.getWidth())).astype(np.uint8) | ||
frame = np.ascontiguousarray(frame) | ||
# frame is transformed, the color map will be applied to highlight the depth info | ||
frame = cv2.applyColorMap(frame, cv2.COLORMAP_JET) | ||
# frame is ready to be shown | ||
cv2.imshow("disparity", frame) | ||
|
||
if cv2.waitKey(1) == ord('q'): | ||
break | ||
print("Please visit https://docs.luxonis.com/projects/api/en/gen2_develop/samples/03_depth_preview/") |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,45 +1 @@ | ||
#!/usr/bin/env python3 | ||
|
||
import subprocess | ||
import depthai as dai | ||
|
||
# Start defining a pipeline | ||
pipeline = dai.Pipeline() | ||
|
||
# Define a source - color camera | ||
cam = pipeline.createColorCamera() | ||
cam.setBoardSocket(dai.CameraBoardSocket.RGB) | ||
cam.setResolution(dai.ColorCameraProperties.SensorResolution.THE_4_K) | ||
|
||
# Create an encoder, consuming the frames and encoding them using H.265 encoding | ||
videoEncoder = pipeline.createVideoEncoder() | ||
videoEncoder.setDefaultProfilePreset(3840, 2160, 30, dai.VideoEncoderProperties.Profile.H265_MAIN) | ||
cam.video.link(videoEncoder.input) | ||
|
||
# Create output | ||
videoOut = pipeline.createXLinkOut() | ||
videoOut.setStreamName('h265') | ||
videoEncoder.bitstream.link(videoOut.input) | ||
|
||
# Pipeline defined, now the device is assigned and pipeline is started | ||
device = dai.Device(pipeline) | ||
device.startPipeline() | ||
|
||
# Output queue will be used to get the encoded data from the output defined above | ||
q = device.getOutputQueue(name="h265", maxSize=30, blocking=True) | ||
|
||
# The .h265 file is a raw stream file (not playable yet) | ||
with open('video.h265','wb') as videoFile: | ||
print("Press Ctrl+C to stop encoding...") | ||
try: | ||
while True: | ||
h264Packet = q.get() # blocking call, will wait until a new data has arrived | ||
h264Packet.getData().tofile(videoFile) # appends the packet data to the opened file | ||
except KeyboardInterrupt: | ||
# Keyboard interrupt (Ctrl + C) detected | ||
pass | ||
|
||
print("Converting stream file (.h265) into a video file (.mp4)...") | ||
# ffmpeg is used to convert a raw .h265 file to the playable .mp4 one | ||
subprocess.check_call("ffmpeg -framerate 30 -i video.h265 -c copy video.mp4".split()) | ||
print("Conversion successful, check video.mp4") | ||
print("Please visit https://docs.luxonis.com/projects/api/en/gen2_develop/samples/04_rgb_encoding/") |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,76 +1 @@ | ||
#!/usr/bin/env python3 | ||
|
||
import subprocess | ||
import depthai as dai | ||
|
||
# Start defining a pipeline | ||
pipeline = dai.Pipeline() | ||
|
||
# Define a source - color and mono cameras | ||
colorCam = pipeline.createColorCamera() | ||
monoCam = pipeline.createMonoCamera() | ||
monoCam.setBoardSocket(dai.CameraBoardSocket.LEFT) | ||
monoCam2 = pipeline.createMonoCamera() | ||
monoCam2.setBoardSocket(dai.CameraBoardSocket.RIGHT) | ||
|
||
# Create encoders, one for each camera, consuming the frames and encoding them using H.264 / H.265 encoding | ||
ve1 = pipeline.createVideoEncoder() | ||
ve1.setDefaultProfilePreset(1280, 720, 30, dai.VideoEncoderProperties.Profile.H264_MAIN) | ||
monoCam.out.link(ve1.input) | ||
|
||
ve2 = pipeline.createVideoEncoder() | ||
ve2.setDefaultProfilePreset(1920, 1080, 30, dai.VideoEncoderProperties.Profile.H265_MAIN) | ||
colorCam.video.link(ve2.input) | ||
|
||
ve3 = pipeline.createVideoEncoder() | ||
ve3.setDefaultProfilePreset(1280, 720, 30, dai.VideoEncoderProperties.Profile.H264_MAIN) | ||
monoCam2.out.link(ve3.input) | ||
|
||
# Create outputs | ||
ve1Out = pipeline.createXLinkOut() | ||
ve1Out.setStreamName('ve1Out') | ||
ve1.bitstream.link(ve1Out.input) | ||
|
||
ve2Out = pipeline.createXLinkOut() | ||
ve2Out.setStreamName('ve2Out') | ||
ve2.bitstream.link(ve2Out.input) | ||
|
||
ve3Out = pipeline.createXLinkOut() | ||
ve3Out.setStreamName('ve3Out') | ||
ve3.bitstream.link(ve3Out.input) | ||
|
||
|
||
# Pipeline defined, now the device is assigned and pipeline is started | ||
dev = dai.Device(pipeline) | ||
dev.startPipeline() | ||
|
||
# Output queues will be used to get the encoded data from the outputs defined above | ||
outQ1 = dev.getOutputQueue(name='ve1Out', maxSize=30, blocking=True) | ||
outQ2 = dev.getOutputQueue(name='ve2Out', maxSize=30, blocking=True) | ||
outQ3 = dev.getOutputQueue(name='ve3Out', maxSize=30, blocking=True) | ||
|
||
# The .h264 / .h265 files are raw stream files (not playable yet) | ||
with open('mono1.h264', 'wb') as file_mono1_h264, open('color.h265', 'wb') as file_color_h265, open('mono2.h264', 'wb') as file_mono2_h264: | ||
print("Press Ctrl+C to stop encoding...") | ||
while True: | ||
try: | ||
# Empty each queue | ||
while outQ1.has(): | ||
outQ1.get().getData().tofile(file_mono1_h264) | ||
|
||
while outQ2.has(): | ||
outQ2.get().getData().tofile(file_color_h265) | ||
|
||
while outQ3.has(): | ||
outQ3.get().getData().tofile(file_mono2_h264) | ||
except KeyboardInterrupt: | ||
# Keyboard interrupt (Ctrl + C) detected | ||
break | ||
|
||
print("Converting stream file (.h264/.h265) into a video file (.mp4)...") | ||
# ffmpeg is used to convert a raw stream file to the playable .mp4 one | ||
cmd = "ffmpeg -framerate 30 -i {} -c copy {}" | ||
subprocess.check_call(cmd.format("mono1.h264", "mono1.mp4").split()) | ||
subprocess.check_call(cmd.format("mono2.h264", "mono2.mp4").split()) | ||
subprocess.check_call(cmd.format("color.h265", "color.mp4").split()) | ||
print("Conversion successful, check mono1.mp4 / mono2.mp4 / color.mp4") | ||
print("Please visit https://docs.luxonis.com/projects/api/en/gen2_develop/samples/05_rgb_mono_encoding/") |
47 changes: 1 addition & 46 deletions
47
gen2_examples/06_rgb_full_resolution_saver.py
100755 → 100644
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,46 +1 @@ | ||
#!/usr/bin/env python3 | ||
|
||
import time | ||
from pathlib import Path | ||
|
||
import cv2 | ||
import depthai as dai | ||
import numpy as np | ||
|
||
# Start defining a pipeline | ||
pipeline = dai.Pipeline() | ||
|
||
# Define a source - color camera | ||
cam_rgb = pipeline.createColorCamera() | ||
cam_rgb.setPreviewSize(3840, 2160) | ||
cam_rgb.setResolution(dai.ColorCameraProperties.SensorResolution.THE_4_K) | ||
cam_rgb.setInterleaved(False) | ||
|
||
# Create output | ||
xout_rgb = pipeline.createXLinkOut() | ||
xout_rgb.setStreamName("rgb") | ||
cam_rgb.preview.link(xout_rgb.input) | ||
|
||
# Pipeline defined, now the device is assigned and pipeline is started | ||
device = dai.Device(pipeline) | ||
device.startPipeline() | ||
|
||
# Output queue will be used to get the rgb frames from the output defined above | ||
q_rgb = device.getOutputQueue(name="rgb", maxSize=4, blocking=False) | ||
|
||
# Make sure the destination path is present before starting to store the examples | ||
Path('06_data').mkdir(parents=True, exist_ok=True) | ||
|
||
while True: | ||
in_rgb = q_rgb.get() # blocking call, will wait until a new data has arrived | ||
# data is originally represented as a flat 1D array, it needs to be converted into HxWxC form | ||
shape = (3, in_rgb.getHeight(), in_rgb.getWidth()) | ||
frame_rgb = in_rgb.getData().reshape(shape).transpose(1, 2, 0).astype(np.uint8) | ||
frame_rgb = np.ascontiguousarray(frame_rgb) | ||
# frame is transformed and ready to be shown | ||
cv2.imshow("rgb", frame_rgb) | ||
# after showing the frame, it's being stored inside a target directory as a PNG image | ||
cv2.imwrite(f"06_data/{int(time.time() * 10000)}.png", frame_rgb) | ||
|
||
if cv2.waitKey(1) == ord('q'): | ||
break | ||
print("Please visit https://docs.luxonis.com/projects/api/en/gen2_develop/samples/06_rgb_full_resolution_saver/") |
46 changes: 1 addition & 45 deletions
46
gen2_examples/07_mono_full_resolution_saver.py
100755 → 100644
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,45 +1 @@ | ||
#!/usr/bin/env python3 | ||
|
||
import time | ||
from pathlib import Path | ||
|
||
import cv2 | ||
import depthai as dai | ||
import numpy as np | ||
|
||
# Start defining a pipeline | ||
pipeline = dai.Pipeline() | ||
|
||
# Define a source - mono (grayscale) camera | ||
cam_left = pipeline.createMonoCamera() | ||
cam_left.setBoardSocket(dai.CameraBoardSocket.LEFT) | ||
cam_left.setResolution(dai.MonoCameraProperties.SensorResolution.THE_720_P) | ||
|
||
# Create output | ||
xout_rgb = pipeline.createXLinkOut() | ||
xout_rgb.setStreamName("left") | ||
cam_left.out.link(xout_rgb.input) | ||
|
||
# Pipeline defined, now the device is assigned and pipeline is started | ||
device = dai.Device(pipeline) | ||
device.startPipeline() | ||
|
||
# Output queue will be used to get the grayscale frames from the output defined above | ||
q_left = device.getOutputQueue(name="left", maxSize=4, blocking=False) | ||
|
||
# Make sure the destination path is present before starting to store the examples | ||
Path('07_data').mkdir(parents=True, exist_ok=True) | ||
|
||
while True: | ||
in_left = q_left.get() # blocking call, will wait until a new data has arrived | ||
# data is originally represented as a flat 1D array, it needs to be converted into HxW form | ||
shape = (in_left.getHeight(), in_left.getWidth()) | ||
frame_left = in_left.getData().reshape(shape).astype(np.uint8) | ||
frame_left = np.ascontiguousarray(frame_left) | ||
# frame is transformed and ready to be shown | ||
cv2.imshow("left", frame_left) | ||
# after showing the frame, it's being stored inside a target directory as a PNG image | ||
cv2.imwrite(f"07_data/{int(time.time() * 10000)}.png", frame_left) | ||
|
||
if cv2.waitKey(1) == ord('q'): | ||
break | ||
print("Please visit https://docs.luxonis.com/projects/api/en/gen2_develop/samples/07_mono_full_resolution_saver/") |
Oops, something went wrong.