Skip to content

Commit bcf9474

Browse files
Update experiments
1 parent 9b49e4e commit bcf9474

File tree

484 files changed

+1596
-1211
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

484 files changed

+1596
-1211
lines changed

gen2-age-gender/main.py

+37
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,37 @@
1+
from depthai_sdk import OakCamera, TwoStagePacket, AspectRatioResizeMode, Visualizer, TextPosition, BboxStyle
2+
import numpy as np
3+
import cv2
4+
5+
with OakCamera() as oak:
6+
color = oak.create_camera('color')
7+
8+
det = oak.create_nn('face-detection-retail-0004', color)
9+
# AspectRatioResizeMode has to be CROP for 2-stage pipelines at the moment
10+
det.config_nn(aspectRatioResizeMode=AspectRatioResizeMode.CROP)
11+
12+
age_gender = oak.create_nn('age-gender-recognition-retail-0013', input=det)
13+
# age_gender.config_multistage_nn(show_cropped_frames=True) # For debugging
14+
15+
def cb(packet: TwoStagePacket, visualizer: Visualizer):
16+
for det, rec in zip(packet.detections, packet.nnData):
17+
age = int(float(np.squeeze(np.array(rec.getLayerFp16('age_conv3')))) * 100)
18+
gender = np.squeeze(np.array(rec.getLayerFp16('prob')))
19+
gender_str = "woman" if gender[0] > gender[1] else "man"
20+
21+
visualizer.add_text(f'{gender_str}\nage: {age}',
22+
bbox=(*det.top_left, *det.bottom_right),
23+
position=TextPosition.BOTTOM_RIGHT)
24+
25+
frame = visualizer.draw(packet.frame)
26+
cv2.imshow('Age-gender estimation', frame)
27+
28+
29+
# Visualize detections on the frame. Don't show the frame but send the packet
30+
# to the callback function (where it will be displayed)
31+
oak.visualize(age_gender, callback=cb).detections(fill_transparency=0.1)
32+
oak.visualize(det.out.passthrough)
33+
oak.visualize(age_gender.out.twostage_crops, scale=3.0)
34+
35+
36+
# oak.show_graph() # Show pipeline graph, no need for now
37+
oak.start(blocking=True) # This call will block until the app is stopped (by pressing 'Q' button)

gen2-age-gender/requirements.txt

+1
Original file line numberDiff line numberDiff line change
@@ -0,0 +1 @@
1+
depthai-sdk==1.9.1
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.

gen2/gen2-class-saver-jpeg/main.py gen2-class-saver-jpeg/main.py

+1
Original file line numberDiff line numberDiff line change
@@ -95,6 +95,7 @@ def store_data(in_frame, detections):
9595
thread = None
9696
detections = []
9797

98+
9899
while True:
99100
# instead of get (blocking) used tryGet (nonblocking) which will return the available data or None otherwise
100101
in_rgb = q_rgb.tryGet()
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
Original file line numberDiff line numberDiff line change
@@ -1,63 +1,63 @@
1-
import numpy as np
2-
import cv2
3-
import depthai as dai
4-
5-
SHAPE = 300
6-
7-
p = dai.Pipeline()
8-
p.setOpenVINOVersion(dai.OpenVINO.VERSION_2021_4)
9-
10-
camRgb = p.create(dai.node.ColorCamera)
11-
camRgb.setPreviewSize(SHAPE, SHAPE)
12-
camRgb.setInterleaved(False)
13-
camRgb.setColorOrder(dai.ColorCameraProperties.ColorOrder.BGR)
14-
15-
left = p.create(dai.node.MonoCamera)
16-
left.setBoardSocket(dai.CameraBoardSocket.LEFT)
17-
left.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P)
18-
19-
# ImageManip for cropping (face detection NN requires input image of 300x300) and to change frame type
20-
manipLeft = p.create(dai.node.ImageManip)
21-
manipLeft.initialConfig.setResize(300, 300)
22-
# The NN model expects BGR input. By default ImageManip output type would be same as input (gray in this case)
23-
manipLeft.initialConfig.setFrameType(dai.RawImgFrame.Type.BGR888p)
24-
left.out.link(manipLeft.inputImage)
25-
26-
right = p.create(dai.node.MonoCamera)
27-
right.setBoardSocket(dai.CameraBoardSocket.RIGHT)
28-
right.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P)
29-
30-
# ImageManip for cropping (face detection NN requires input image of 300x300) and to change frame type
31-
manipRight = p.create(dai.node.ImageManip)
32-
manipRight.initialConfig.setResize(300, 300)
33-
# The NN model expects BGR input. By default ImageManip output type would be same as input (gray in this case)
34-
manipRight.initialConfig.setFrameType(dai.RawImgFrame.Type.BGR888p)
35-
right.out.link(manipRight.inputImage)
36-
37-
# NN that detects faces in the image
38-
nn = p.create(dai.node.NeuralNetwork)
39-
nn.setBlobPath("models/concat_openvino_2021.4_6shave.blob")
40-
nn.setNumInferenceThreads(2)
41-
42-
manipLeft.out.link(nn.inputs['img1'])
43-
camRgb.preview.link(nn.inputs['img2'])
44-
manipRight.out.link(nn.inputs['img3'])
45-
46-
# Send bouding box from the NN to the host via XLink
47-
nn_xout = p.create(dai.node.XLinkOut)
48-
nn_xout.setStreamName("nn")
49-
nn.out.link(nn_xout.input)
50-
51-
# Pipeline is defined, now we can connect to the device
52-
with dai.Device(p) as device:
53-
qNn = device.getOutputQueue(name="nn", maxSize=4, blocking=False)
54-
shape = (3, SHAPE, SHAPE * 3)
55-
56-
while True:
57-
inNn = np.array(qNn.get().getData())
58-
frame = inNn.view(np.float16).reshape(shape).transpose(1, 2, 0).astype(np.uint8).copy()
59-
60-
cv2.imshow("Concat", frame)
61-
62-
if cv2.waitKey(1) == ord('q'):
63-
break
1+
import numpy as np
2+
import cv2
3+
import depthai as dai
4+
5+
SHAPE = 300
6+
7+
p = dai.Pipeline()
8+
p.setOpenVINOVersion(dai.OpenVINO.VERSION_2021_4)
9+
10+
camRgb = p.create(dai.node.ColorCamera)
11+
camRgb.setPreviewSize(SHAPE, SHAPE)
12+
camRgb.setInterleaved(False)
13+
camRgb.setColorOrder(dai.ColorCameraProperties.ColorOrder.BGR)
14+
15+
left = p.create(dai.node.MonoCamera)
16+
left.setBoardSocket(dai.CameraBoardSocket.LEFT)
17+
left.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P)
18+
19+
# ImageManip for cropping (face detection NN requires input image of 300x300) and to change frame type
20+
manipLeft = p.create(dai.node.ImageManip)
21+
manipLeft.initialConfig.setResize(300, 300)
22+
# The NN model expects BGR input. By default ImageManip output type would be same as input (gray in this case)
23+
manipLeft.initialConfig.setFrameType(dai.RawImgFrame.Type.BGR888p)
24+
left.out.link(manipLeft.inputImage)
25+
26+
right = p.create(dai.node.MonoCamera)
27+
right.setBoardSocket(dai.CameraBoardSocket.RIGHT)
28+
right.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P)
29+
30+
# ImageManip for cropping (face detection NN requires input image of 300x300) and to change frame type
31+
manipRight = p.create(dai.node.ImageManip)
32+
manipRight.initialConfig.setResize(300, 300)
33+
# The NN model expects BGR input. By default ImageManip output type would be same as input (gray in this case)
34+
manipRight.initialConfig.setFrameType(dai.RawImgFrame.Type.BGR888p)
35+
right.out.link(manipRight.inputImage)
36+
37+
# NN that detects faces in the image
38+
nn = p.create(dai.node.NeuralNetwork)
39+
nn.setBlobPath("models/concat_openvino_2021.4_6shave.blob")
40+
nn.setNumInferenceThreads(2)
41+
42+
manipLeft.out.link(nn.inputs['img1'])
43+
camRgb.preview.link(nn.inputs['img2'])
44+
manipRight.out.link(nn.inputs['img3'])
45+
46+
# Send bouding box from the NN to the host via XLink
47+
nn_xout = p.create(dai.node.XLinkOut)
48+
nn_xout.setStreamName("nn")
49+
nn.out.link(nn_xout.input)
50+
51+
# Pipeline is defined, now we can connect to the device
52+
with dai.Device(p) as device:
53+
qNn = device.getOutputQueue(name="nn", maxSize=4, blocking=False)
54+
shape = (3, SHAPE, SHAPE * 3)
55+
56+
while True:
57+
inNn = np.array(qNn.get().getData())
58+
frame = inNn.view(np.float16).reshape(shape).transpose(1, 2, 0).astype(np.uint8).copy()
59+
60+
cv2.imshow("Concat", frame)
61+
62+
if cv2.waitKey(1) == ord('q'):
63+
break
File renamed without changes.
File renamed without changes.
File renamed without changes.

gen2/gen2-deeplabv3_depth/main.py gen2-deeplabv3_depth/main.py

-1
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,6 @@
1414
blob = dai.OpenVINO.Blob(blobconverter.from_zoo(name="deeplab_v3_mnv2_256x256", zoo_type="depthai", shaves=6))
1515
# for name,tensorInfo in blob.networkInputs.items(): print(name, tensorInfo.dims)
1616
INPUT_SHAPE = blob.networkInputs['Input'].dims[:2]
17-
print(INPUT_SHAPE)
1817
TARGET_SHAPE = (400,400)
1918

2019
def decode_deeplabv3p(output_tensor):

0 commit comments

Comments
 (0)