Skip to content

Commit 5e0812d

Browse files
committed
feat(examples): added suncg_with_light_projector
1 parent e04e03f commit 5e0812d

File tree

8 files changed

+249
-1
lines changed

8 files changed

+249
-1
lines changed

blenderproc/python/modules/lighting/LightInterface.py

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -63,4 +63,8 @@ def _add_light_source(self, config):
6363
light.set_rotation_euler(config.get_list("rotation", [0, 0, 0]))
6464
light.set_energy(config.get_float("energy", 10.))
6565
light.set_color(config.get_list("color", [1, 1, 1])[:3])
66-
light.set_distance(config.get_float("distance", 0))
66+
light.set_distance(config.get_float("distance", 0))
67+
if config.get_bool("use_projector", False):
68+
light.setup_as_projector(config.get_string("path", "examples/advanced/stereo_matching_with_projector"
69+
"/patterns/random_pattern_00256.png"))
70+

blenderproc/python/types/LightUtility.py

Lines changed: 53 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -62,6 +62,59 @@ def set_type(self, type: str, frame: int = None):
6262
self.blender_obj.data.type = type
6363
Utility.insert_keyframe(self.blender_obj.data, "type", frame)
6464

65+
def setup_as_projector(self, path: str, frame: int = None):
66+
if path is None or path == "none":
67+
print('Path is None')
68+
return
69+
70+
# Set location to camera -- COPY TRANSFORMS
71+
self.blender_obj.constraints.new('COPY_TRANSFORMS')
72+
self.blender_obj.constraints['Copy Transforms'].target = bpy.context.scene.camera
73+
74+
# Setup nodes for projecting image
75+
self.blender_obj.data.use_nodes = True
76+
self.blender_obj.data.shadow_soft_size = 0
77+
self.blender_obj.data.spot_size = 2.0944
78+
self.blender_obj.data.cycles.cast_shadow = False
79+
80+
nodes = self.blender_obj.data.node_tree.nodes
81+
links = self.blender_obj.data.node_tree.links
82+
83+
img_name = path.split('/')[-1]
84+
print(path)
85+
print(f'\nUsing {img_name} as projector image\n')
86+
bpy.data.images.load(path, check_existing=False)
87+
88+
node_ox = nodes.get('Emission')
89+
90+
# Set Up Nodes
91+
node_pattern = nodes.new(type="ShaderNodeTexImage") # Texture Image
92+
node_pattern.image = bpy.data.images[img_name]
93+
node_pattern.extension = 'CLIP'
94+
95+
node_mapping = nodes.new(type="ShaderNodeMapping") # Mapping
96+
node_mapping.inputs[1].default_value[0] = 0.5 # Location X
97+
node_mapping.inputs[1].default_value[1] = 0.5 # Location Y
98+
node_mapping.inputs[3].default_value[0] = 0.4 # Scaling X
99+
node_mapping.inputs[3].default_value[1] = 0.4 # Scaling Y
100+
101+
node_coord = nodes.new(type="ShaderNodeTexCoord") # Texture Coordinate
102+
103+
divide = nodes.new(type="ShaderNodeVectorMath")
104+
divide.operation = 'DIVIDE'
105+
106+
z_component = nodes.new(type="ShaderNodeSeparateXYZ")
107+
108+
# Set Up Links
109+
links.new(node_pattern.outputs["Color"], node_ox.inputs["Color"]) # Link Image Texture to Emission
110+
links.new(node_mapping.outputs["Vector"], node_pattern.inputs["Vector"]) # Link Mapping to Image Texture
111+
links.new(divide.outputs["Vector"], node_mapping.inputs["Vector"]) # Link Texture Coordinate to Mapping
112+
links.new(node_coord.outputs["Normal"], divide.inputs[0])
113+
links.new(node_coord.outputs["Normal"], z_component.inputs["Vector"])
114+
links.new(z_component.outputs["Z"], divide.inputs[1])
115+
116+
Utility.insert_keyframe(self.blender_obj.data, "use_projector", frame)
117+
65118

66119
def get_energy(self, frame: int = None) -> float:
67120
""" Returns the energy of the light.
Lines changed: 59 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,59 @@
1+
# Stereo Matching
2+
![](../../../images/stereo_matching_stereo_pair.jpg)
3+
![](../../../images/stereo_matching_stereo_depth.jpg)
4+
5+
In the first row we can see the rendered stereo RGB images, left and right respectively, and beneath them we can view
6+
the computed depth image using stereo matching. Note that due to a high discrepancy between the TV and the rest
7+
of the rendered scene, the visualization is not descriptive enough. This discrepancy or high depth values at the TV
8+
is due to a lack of gradient or useful features for stereo matching in this area. However, the depth values in other
9+
areas are consistent and close to the rendered depth images.
10+
11+
## Usage
12+
13+
Execute in the BlenderProc main directory:
14+
15+
```
16+
blenderproc run examples/advanced/stereo_matching/main.py <path to cam_pose file> <path to house.json> examples/advanced/stereo_matching/output
17+
```
18+
19+
* `examples/advanced/stereo_matching/main.py`: path to the main python file to run.
20+
* `<path to cam_pose file>`: Should point to a file which describes one camera pose per line (here the output of `scn2cam` from the `SUNCGToolbox` can be used).
21+
* `<path to house.json>`: Path to the house.json file of the SUNCG scene you want to render. Which should be either located inside the SUNCG directory, or the SUNCG directory path should be added to the config file.
22+
* `examples/advanced/stereo_matching/output`: path to the output directory.
23+
24+
## Visualizaton
25+
Visualize the generated data:
26+
```
27+
blenderproc vis hdf5 examples/advanced/stereo_matching/output/1.hdf5
28+
```
29+
30+
## Implementation
31+
32+
```python
33+
# Enable stereo mode and set baseline
34+
bproc.camera.set_stereo_parameters(interocular_distance=0.05, convergence_mode="PARALLEL")
35+
```
36+
37+
Here we enable stereo rendering and specify the camera parameters, some notable points are:
38+
* Setting the `interocular_distance` which is the stereo baseline.
39+
* Specifying `convergence_mode` to be `"PARALLEL"` (i.e. both cameras lie on the same line and are just shifted by `interocular_distance`, and are trivially coplanar).
40+
* Other options are `OFF-AXIS` where the cameras rotate inwards (converge) up to some plane.
41+
* `convergence_distance` is the distance from the cameras to the aforementioned plane they converge to in case of `OFF-AXIS` convergence mode. In this case, this parameter is ignored by Blender, but it is added here for clarification.
42+
43+
```python
44+
# Apply stereo matching to each pair of images
45+
data["stereo-depth"], data["disparity"] = bproc.postprocessing.stereo_global_matching(data["colors"], disparity_filter=False)
46+
```
47+
48+
Here we apply the stereo matching.
49+
* It is based on OpenCV's [implementation](https://docs.opencv.org/2.4/modules/calib3d/doc/camera_calibration_and_3d_reconstruction.html?highlight=sgbm#stereosgbm-stereosgbm) of [stereo semi global matching](https://elib.dlr.de/73119/1/180Hirschmueller.pdf).
50+
* Its pipeline runs as follows:
51+
* Compute the disparity map between the two images. After specifying the required parameters.
52+
* Optional use of a disparity filter (namely `wls_filter`). Enabled by setting `disparity_filter` (Enabling it could possibly lead to less accurate depth values. One should experiment with this parameter).
53+
* Triangulate the depth values using the focal length and disparity.
54+
* Clip the depth map from 0 to `depth_max`, where this value is retrieved from `renderer.Renderer`.
55+
* Apply an optional [depth completion routine](https://github.com/kujason/ip_basic/blob/master/ip_basic/depth_map_utils.py), based on simple image processing techniques. This is enabled by setting `depth_completion`.
56+
* There are some stereo semi global matching parameters that can be tuned (see fct docs), such as:
57+
* `window_size`
58+
* `num_disparities`
59+
* `min_disparity`
Lines changed: 73 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,73 @@
1+
# Args: <cam_file> <obj_file> <output_dir>
2+
{
3+
"version": 3,
4+
"setup": {
5+
"blender_install_path": "/home_local/<env:USER>/blender/",
6+
"pip": [
7+
"h5py",
8+
"python-dateutil==2.1",
9+
"numpy",
10+
"Pillow",
11+
"opencv-contrib-python",
12+
"scipy"
13+
]
14+
},
15+
"modules": [
16+
{
17+
"module": "main.Initializer",
18+
"config": {
19+
"global": {
20+
"output_dir": "<args:2>"
21+
}
22+
}
23+
},
24+
{
25+
"module": "loader.SuncgLoader",
26+
"config": {
27+
"path": "<args:1>"
28+
}
29+
},
30+
{
31+
"module": "camera.CameraLoader",
32+
"config": {
33+
"path": "<args:0>",
34+
"file_format": "location rotation/value _ _ _ _ _ _",
35+
"world_frame_change": ["X", "-Z", "Y"],
36+
"default_cam_param": {
37+
"rotation": {
38+
"format": "forward_vec"
39+
}
40+
},
41+
"intrinsics": {
42+
"interocular_distance": 0.05,
43+
"stereo_convergence_mode": "PARALLEL",
44+
"convergence_distance": 0.00001,
45+
"cam_K": [650.018, 0, 637.962, 0, 650.018, 355.984, 0, 0 ,1],
46+
"resolution_x": 1280,
47+
"resolution_y": 720
48+
},
49+
}
50+
},
51+
{
52+
"module": "lighting.SuncgLighting",
53+
"config": {}
54+
},
55+
{
56+
"module": "renderer.RgbRenderer",
57+
"config": {
58+
"render_distance": true,
59+
"stereo": true,
60+
"use_alpha": true,
61+
}
62+
},
63+
{
64+
"module": "writer.StereoGlobalMatchingWriter",
65+
"config": {
66+
"disparity_filter": false
67+
}
68+
},
69+
{
70+
"module": "writer.Hdf5Writer",
71+
}
72+
]
73+
}
Lines changed: 59 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,59 @@
1+
import blenderproc as bproc
2+
import argparse
3+
import os
4+
import numpy as np
5+
6+
parser = argparse.ArgumentParser()
7+
parser.add_argument('camera', help="Path to the camera file which describes one camera pose per line, here the output of scn2cam from the SUNCGToolbox can be used")
8+
parser.add_argument('house', help="Path to the house.json file of the SUNCG scene to load")
9+
parser.add_argument('projector', help="Path to the image, which should be projected onto the scene")
10+
parser.add_argument('output_dir', nargs='?', default="examples/datasets/suncg_basic/output", help="Path to where the final files, will be saved")
11+
args = parser.parse_args()
12+
#print(f"\nUsing {args.projector} as projector image\n")
13+
bproc.init()
14+
15+
# load the objects into the scene
16+
label_mapping = bproc.utility.LabelIdMapping.from_csv(bproc.utility.resolve_resource(os.path.join('id_mappings', 'nyu_idset.csv')))
17+
objs = bproc.loader.load_suncg(args.house, label_mapping=label_mapping)
18+
19+
# define the camera intrinsics
20+
K = np.array([
21+
[650.018, 0, 637.962],
22+
[0, 650.018, 355.984],
23+
[0, 0, 1]
24+
])
25+
bproc.camera.set_intrinsics_from_K_matrix(K, 1280, 720)
26+
# Enable stereo mode and set baseline
27+
bproc.camera.set_stereo_parameters(interocular_distance=0.05, convergence_mode="PARALLEL", convergence_distance=0.00001)
28+
29+
# define a new light and set it as projector
30+
light = bproc.types.Light()
31+
light.set_type('SPOT')
32+
light.set_energy(3000)
33+
light.setup_as_projector(args.projector)
34+
35+
# read the camera positions file and convert into homogeneous camera-world transformation
36+
with open(args.camera, "r") as f:
37+
for line in f.readlines():
38+
line = [float(x) for x in line.split()]
39+
position = bproc.math.change_coordinate_frame_of_point(line[:3], ["X", "-Z", "Y"])
40+
rotation = bproc.math.change_coordinate_frame_of_point(line[3:6], ["X", "-Z", "Y"])
41+
matrix_world = bproc.math.build_transformation_mat(position, bproc.camera.rotation_from_forward_vec(rotation))
42+
bproc.camera.add_camera_pose(matrix_world)
43+
44+
# makes Suncg objects emit light
45+
bproc.lighting.light_suncg_scene()
46+
47+
# activate normal and depth rendering
48+
bproc.renderer.enable_depth_output(activate_antialiasing=False)
49+
bproc.material.add_alpha_channel_to_textures(blurry_edges=True)
50+
bproc.renderer.toggle_stereo(True)
51+
52+
# render the whole pipeline
53+
data = bproc.renderer.render()
54+
55+
# Apply stereo matching to each pair of images
56+
data["stereo-depth"], data["disparity"] = bproc.postprocessing.stereo_global_matching(data["colors"], disparity_filter=False)
57+
58+
# write the data to a .hdf5 container
59+
bproc.writer.write_hdf5(args.output_dir, data)
171 KB
Loading
487 KB
Loading
2.71 MB
Loading

0 commit comments

Comments
 (0)