diff --git a/ChangeLog.txt b/ChangeLog.txt index c49191a2f7..c059cb9485 100644 --- a/ChangeLog.txt +++ b/ChangeLog.txt @@ -77,8 +77,10 @@ ViSP 3.5.1 (under development) https://visp-doc.inria.fr/doxygen/visp-daily/tutorial-pcl-viewer.html . New tutorial: Tracking with MegaPose https://visp-doc.inria.fr/doxygen/visp-daily/tutorial-tracking-megapose.html - . New tutorial: Exporting a 3D model to MegaPose + . New tutorial: Exporting a 3D model to MegaPose after reconstruction with NeRF https://visp-doc.inria.fr/doxygen/visp-daily/tutorial-megapose-model.html + . New tutorial: Generating synthetic data for deep learning with Blenderproc + https://visp-doc.inria.fr/doxygen/visp-daily/tutorial-synthetic-blenderproc.html . New tutorial: Gradient-based Circle Hough Transform https://visp-doc.inria.fr/doxygen/visp-daily/tutorial-imgproc-cht.html - Bug fixed diff --git a/doc/config-doxygen.in b/doc/config-doxygen.in index e24f0c181f..b2e49cdc13 100644 --- a/doc/config-doxygen.in +++ b/doc/config-doxygen.in @@ -314,7 +314,7 @@ OPTIMIZE_OUTPUT_SLICE = NO # Note that for custom extensions you also need to set FILE_PATTERNS otherwise # the files are not read by doxygen. -EXTENSION_MAPPING = +EXTENSION_MAPPING = json=JavaScript # If the MARKDOWN_SUPPORT tag is enabled then doxygen pre-processes all comments # according to the Markdown format, which allows for more readable @@ -857,7 +857,8 @@ FILE_PATTERNS = *.h \ *.cpp \ *.doc \ *.dox \ - *.mm + *.mm \ + *.json # The RECURSIVE tag can be used to specify whether or not subdirectories should # be searched for input files as well. @@ -915,6 +916,8 @@ EXAMPLE_PATH = "@VISP_SOURCE_DIR@/example" \ "@VISP_SOURCE_DIR@/tutorial" \ "@VISP_SOURCE_DIR@/demo" \ "@VISP_SOURCE_DIR@/modules" \ + "@VISP_SOURCE_DIR@/script" \ + # If the value of the EXAMPLE_PATH tag contains directories, you can use the # EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp and diff --git a/doc/image/tutorial/misc/blenderproc_rgb_example.png b/doc/image/tutorial/misc/blenderproc_rgb_example.png new file mode 100644 index 0000000000..4e37901b92 Binary files /dev/null and b/doc/image/tutorial/misc/blenderproc_rgb_example.png differ diff --git a/doc/image/tutorial/misc/blenderproc_viz.png b/doc/image/tutorial/misc/blenderproc_viz.png new file mode 100644 index 0000000000..b35f304bb0 Binary files /dev/null and b/doc/image/tutorial/misc/blenderproc_viz.png differ diff --git a/doc/image/tutorial/tracking/megapose/megapose_pipeline.png b/doc/image/tutorial/tracking/megapose/megapose_pipeline.png new file mode 100644 index 0000000000..f6afb2e706 Binary files /dev/null and b/doc/image/tutorial/tracking/megapose/megapose_pipeline.png differ diff --git a/doc/tutorial/misc/tutorial-synthetic-blenderproc.dox b/doc/tutorial/misc/tutorial-synthetic-blenderproc.dox new file mode 100644 index 0000000000..23e2602aba --- /dev/null +++ b/doc/tutorial/misc/tutorial-synthetic-blenderproc.dox @@ -0,0 +1,783 @@ +/** + +\page tutorial-synthetic-blenderproc Tutorial: Generating synthetic data for deep learning with Blenderproc +\tableofcontents + +\section dnn_synthetic_intro Introduction + +In this tutorial, we will show how to generate synthetic data that can be used to train a neural network, thanks to blenderproc. + +Most of the (manual) work when training a neural network resides in acquiring and labelling data. This process can be slow, tedious and error prone. +A solution to avoid this step is to use synthetic data, generated by a simulator/computer program. This approach comes with multiple advantages: +- Data acquisition is fast +- It is easy to acquire accurate ground truth labels +- Variations in the training data can be easily added + +There are however, some drawbacks: +- More knowledge of the scene is required: in the case of detection, we require a 3D model of the object, which is not the case for true images +- A difference between simulated and real data can be apparent and negatively impact network performance (this is called the Sim2Real gap) + +The latter point is heavily dependent on the quality of the generated images and the more realistic the images, the better the expected results. + +Blender, using ray tracing, can generate realistic images. To perform data generation, Blenderproc has been developed and is an extremely useful and flexible tool to generate realistic scenes from Python code. + +Along with RGB images, Blenderproc can generate different labels or inputs: +- Depth map +- Normals +- Semantic segmentation +- Instance segmentation +- Bounding box +- Optical flow (not provided in our generation script) + +In this tutorial, we will install blenderproc and use it to generate simple but varied scenes containing objects of interest. +We provide a simple, object-centric generation script that should suffice in many cases. +However, since Blenderproc is easy to use, with many examples included in the documentation, readapting this script to your needs should be easy. + +\section dnn_synthetic_install Requirements + +First, you should start by installing blenderproc. First, start by creating a new conda environment to avoid potential conflicts with other Python packages. +\code{.sh} +$ conda create --name blenderproc python=3.10 pip +$ conda activate blenderproc +$ pip install blenderproc +\endcode +\note Our generation script has been tested with blenderproc 2.5.0 (with Blender 3.3 under the hood) and python 3.10. + +You can then run the Blenderproc sample example with: +\code{.sh} +(blenderproc) $ blenderproc quickstart +\endcode +This may take some time, as Blenderproc downloads its own version of Blender and sets up its own environment. This setup will only be performed once. + +Once Blenderproc is done, you can check its output with: +\code{.sh} +(blenderproc) $ blenderproc vis hdf5 output/0.hdf5 +\endcode + + +Blenderproc stores its output in HDF5 file format. Each HDF5 **may** contain the RGB image, along with depth, normals, and other modalities. + +For the simulator to provide useful data, we should obtain a set of realistic textures (thus helping close the Sim2Real gap). +Thankfully, Blenderproc provides a helpful script to download a dataset of materials from cc0textures.com, containing more than 1500 high resolution materials. +To download the materials, run +\code{.sh} +(blenderproc) $ blenderproc download cc_textures path/to/folder/where/to/save/materials +\endcode +\warning Because the materials are in high definition, downloading the full dataset may take a large amount of disk space (30+ GB). If this is too much for you, you can safely delete some of the materials or stop the script after it has acquired enough materials. While using a small number of materials can be useful when performing quick tests, using the full set should be preferred as variety helps when transferring your deep learning model to real world data. + + +\section dnn_synthetic_script Running the object-centric generation script + +We will now run the generation script. +The script places a random set of objects in a simple cubic room, with added distractors. Materials of the walls and distractors are randomized. + +This script and an example configuration file can be found in the `script/dataset_generator` folder of your ViSP source directory. + + +The basic algorithm is: +\verbatim +For each scene: + Choose N target objects from the provided models + Add noise to the N objects (material properties, size, geometry) + Generate a scene: + - compute s = length of the larget diagonal of the axis-aligned bounding box of the largest object + - set room_size = s * random_scale_factor + - Create the ground, walls and ceiling of the room (with size room_size) and select a random material for each of them + - Add random distractors, sampled from spheres, cubes, cylinders and monkey heads (Suzanne) + For each distractor: + - Sample a random position and orientation in the room + - Select a random material from cc0 + - Add noise to PBR + - Potentially add displacement + - Potentially set distractor as emissive (emitting light) + - Add random lights, either point lights or spots + For each light: + - Sample a random intensity and position + - Sample a random color + - If the light is a spot, orient it so that it focuses on a target object + If simulating physics: + - Simulate physics for a fixed time and set final object poses + Remove objects that left the room (Physics collisions) + Sample camera poses: + For each sample to generate: + Do while camera pose is not correct: + - Select a target object + - Select a point of interest in the bounding box of the object + - Sample a random camera location in a clamped ball around the object + - Camera position is set to have a minimum/maximum distance to the point of interest that is dependent on the object size + - Set camera orientation to look at the point of interest, with a random rotation around the optical axis + - Camera pose is correct if target object is visible and camera does not clip through an object or a wall + - Call blender rendering + - Save data in HDF5 format + - If required, compute occlusion-aware bounding boxes + - If required, save object pose in camera frame +\endverbatim +Many randomization parameters can be modified to alter the rendering, as explained in \ref dnn_input_configuration. + + + +With this simple approach, we can obtain images such as: +\image html misc/blenderproc_rgb_example.png + + +\subsection dnn_input_objects 3D model format +To use this data generation tool, you should first provide the 3D models. +You can provide multiple models, which will be sampled randomly during generation. + +The models should be contained in a folder as such: +\verbatim +- models + - objectA + - model.obj + - model.mtl + - texture.png + - objectB + - another_model.obj + - another_model.mtl +\endverbatim + +When setting up the configuration file in \ref dnn_input_configuration, "models_path" should point to the root folder, models. +Each subfolder should contain a single object, in `.obj` format (with potential materials and textures). Each object will be considered as having its own class, the class name being the name of the subfolder (e.g., objectA or objectB). +The class indices start with 1, and are sorted alphabetically depending on the name of the class (e.g., objectA = 1, objectB = 2). + +\subsection dnn_input_configuration Generation configuration +Configuring the dataset generation is done through a JSON file. An example configuration file can be seen below: +\include example_config.json + +The general parameters are: + + + + + + + + + + + + + + + + + + + + + + +
NameType, possible valuesDescription
numpy_seedIntSeed for numpy's random functions. Allows for reproducible results.
blenderproc_seedStringSeed for Blenderproc. Allows for reproducible results.
models_pathStringPath to the folder containing the models. See \ref dnn_input_objects
cc_textures_pathStringPath to the folder containing the CC0 materials.
+ +You can also control some of the rendering parameters. This will impact the rendering time and the quality of the generated RGB images. +These parameters are located in the "rendering" field. + + + + + + + + + + + + +
NameType, possible valuesDescription
max_num_samplesInt > 0Number of rays per pixels. A lower number results in noisier images (especially in scenes with large variations).
denoiserOne of [null, "INTEL", "OPTIX"]Which denoiser to use after performing ray tracing. null indicates that no denoiser is used. "OPTIX" requires a compatible Nvidia GPU. + Using a denoiser allows to obtain a clean image, with a low number of rays per pixels.
+ +You can also modify the camera's intrinsic parameters. The camera uses an undistorted perspective projection model. For more information on camera parameters, see vpCameraParameters. +These parameters are found in the "camera" field of the configuration. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameType, possible valuesDescription
pxFloatSee vpCameraParameters
pyFloatSee vpCameraParameters
v0FloatSee vpCameraParameters
u0FloatSee vpCameraParameters
hIntHeight of the generated images.
wIntWidth of the generated images.
randomize_params_percentFloat, [0, 1) + Controls the randomization of the camera parameters \f$p_x, p_y, u_0, v_0\f$. If randomize_params_percent > 0, then, each time a scene is created the intrinsics are perturbed around the given values. + For example, if this parameters is equal to 0.10 and \f$p_x = 500\f$, then the used \f$p_x\f$ when generating images will be in the range [450, 550]. +
+ + +To customize the scene, you can change the parameters in the "scene" field: + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameType, possible valuesDescription
room_size_multiplier_minFloat > 1.0, < room_size_multiplier_max + Minimum room size as a factor of the biggest sampled target object. The room is cubic. + The size of the biggest object is the length of the largest diagonal of its axis-aligned bounding box. This tends to overestimate the size of the object. + If the size of the biggest object is 0.5m, room_size_multiplier_max = 2 and room_size_multiplier_max = 4, then the room's size will be randomly sampled to be between 1m and 2m. +
room_size_multiplier_maxFloat > room_size_multiplier_min + Minimum room size as a factor of the biggest sampled target object. The room is cubic. + The size of the biggest object is the length of the largest diagonal of its axis-aligned bounding box. This tends to overestimate the size of the object. + If the size of the biggest object is 0.5m, room_size_multiplier_max = 2 and room_size_multiplier_max = 4, then the room's size will be randomly sampled to be between 1m and 2m. +
simulate_physicsBooleanWhether to simulate physics. If false, then objects will be floating across the room. If true, then objects will fall to the ground.
max_num_texturesInt > 0Max number of textures per blenderproc run. If scenes_per_run is 1, max_num_textures = 50 and the number of distractors is more than 50, then the 50 textures will be used across all distractors (and walls). In this case, new materials will be sampled for each scene.
distractorsDictionarySee below
lightsDictionarySee below
objectsDictionarySee below
+ +Distractors are small, simple objects that are added along with the target objects to create some variations and occlusions. You can also load custom objects as distractors. +To modify their properties, you can change the "distractors" field of the scene + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameType, possible valuesDescription
min_countInt >= 0, < max_count + Minimum number of distractors to place in the room. +
max_countInt > min_count + Maximum number of distractors to place in the room. +
custom_distractorsstring, or null + If not null, path to a folder containing custom distractor objects, in .obj or .ply format. +
custom_distractor_probafloat, >= 0.0, <= 1.0 + If custom_distractors is not null, probability that a distractor is sampled from the user specified distractors. +
min_size_rel_sceneFloat > 0.0 + Minimum size of the distractors, relative to the room size. + If the room size is 0.5m and min_size_rel_scene = 0.1, then the minimum size of distractor will be 0.05m. + Scale is applied independently on each axis. +
max_size_rel_sceneFloat < 1.0, > min_size_rel_scene + maximum size of the distractors, relative to the room size. + If the room size is 0.5m and max_size_rel_scene = 0.2, then the maximum size of distractor will be 0.1m. + Scale is applied independently on each axis. +
displacement_max_amountFloat >= 0.0 + Amount of displacement to apply to distractors. + Displacement subdivides the mesh and displaces each of the distractor's vertices according to a random noise pattern. + This option greatly slows down rendering: set it to 0 if needed. +
pbr_noiseFloat >= 0.0 + Amount of noise to add to the material properties of the distractors. + These properties include the specularity, the "metallicness" and the roughness of the material, according to Blender's principled BSDF. +
emissive_probFloat >= 0.0 , <= 1.0 + Probability that a distractor becomes a light source: its surface emits light. Set to more than 0 to add more light variations and shadows. +
emissive_min_strengthFloat >= 0.0, < emissive_max_strength + Minimum emission strength for a distractor that emits lights. In Watts/m². +
emissive_max_strengthFloat > emissive_min_strength + Maxmimum emission strength for a distractor that emits lights. In Watts/m². +
+ +To change the lighting behaviour, see the options below: + + + + + + + + + + + + + + + + + + + + + + +
NameType, possible valuesDescription
min_countInt >= 0, < max_count + Minimum number of lights in the scene. +
max_countInt > min_count + Maximum number of lights in the scene +
min_intensityFloat > 0.0, < max_intensity + Minimum intensity of a light. In Watts. +
max_intensityFloat > min_intensity + Maximum intensity of a light. In Watts. +
+ +To change the sampling behaviour of target objects, see the properties below: + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameType, possible valuesDescription
min_countInt >= 0, < max_count + Minimum number of target objects in the scene. +
max_countInt > min_count + Maximum number of target objects in the scene. +
multiple_occurencesBoolean + Whether a single object can appear multiple times in the same scene (sampling with replacement). +
scale_noiseFloat >= 0.0 + Object size noise. if scale_noise > 0.0, the object is scaled uniformly on all axes (it does not appear stretched) +
displacement_max_amountFloat >= 0.0 + Amount of displacement to apply to target objects. + Displacement subdivides the mesh and displaces each of the distractor's vertices according to a random noise pattern. + This option greatly slows down rendering: set it to 0 if needed. + Note that this is in absolute units: and does not vary depending on the size of the object. +
pbr_noiseFloat >= 0.0 + Amount of noise to add to the material properties of the target objects. + These properties include the specularity, the "metallicness" and the roughness of the material, according to Blender's principled BSDF. +
cam_min_dist_relFloat >= 0.0, < cam_max_dist_rel + Minimum distance of the camera to the point of interest of the object when sampling camera poses. This is expressed in terms of the size of the target object. + If the target object has a size of 0.5m and cam_min_dist_rel = 1.5, then the closest possible camera will be at 0.75m away from the point of interest. +
cam_max_dist_relFloat >= cam_min_dist_rel + Maximum distance of the camera to the point of interest of the object when sampling camera poses. This is expressed in terms of the size of the target object. + If the target object has a size of 0.5m and cam_max_dist_rel = 2.0, then the farthest possible camera will be 1m away from the point of interest. +
+ +Finally, we can customize the dataset that we will generate from the given scenes. +This includes the number of scenes, images, and what information to save. + +All the data will be stored in HDF5 format, which can then be unpacked later. + +To customize the dataset, modify the options in the "dataset" field: + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameType, possible valuesDescription
save_pathStringPath to the folder that will contain the final dataset. This folder will contain one folder per scene, and each sample of a scene will be its own HDF5 file.
scenes_per_runInt > 0 + Number of scenes to generate per blenderproc run. Between blenderproc runs, Blender is restarted in order to avoid memory issues. +
num_scenesInt > 0 + Total number of scenes to generate. Generating many scenes will add more diversity to the dataset as object placement, materials and lighting are randomized once per scene. +
images_per_sceneInt > 0 + Number of images to generate per scene. The total number of samples in the dataset will be num_scenes * (images_per_scene + empty_images_per_scene). +
empty_images_per_sceneInt >= 0, <= images_per_scene + Number of images without target objects to generate per scene. The camera poses for these images are sampled from the poses used to generate images with target objects. Thus, the only difference will be that the objects are not present, the rest of the scene is left untouched. +
poseBoolean + Whether to save the pose of target objects that are visible in the camera. The pose of the objects are expressed in the camera frame as an homogeneous matrix \f$^{c}\mathbf{T}_{o}\f$ +
depthBoolean + Whether to save the depth buffer associated to the RGB image. Same size as the RGB image. +
normalsBoolean + Whether to save the normal map associated to the RGB image. Same size as the RGB image. + The normals are 3D unit vectors, expressed in the camera frame. +
segmentationBoolean + Whether to save the segmentation maps (by class and by instance). + Segmentation by class only contains the target objects (class >= 1). + Segmentation by instance includes every visible object. +
detectionBoolean + Whether to save the bounding box detections. In this case, bounding boxes are not computed from the segmentation map (also possible with Blenderproc), but rather in way such that occlusion does not influence the final bounding box. + The detections can be filtered with the parameters in "detection_params". +
detection_params:min_size_size_pxInt >= 0 + Minimum side length of a detection for it to be considered as valid. Used to filter really far or small objects, for which detection would be hard. +
detection_params:min_visibilityFloat [0.0, 1.0] + Percentage of the object that must be visible for a detection to be considered as valid. The visibility score is computed as such: + First, the vertices of the mesh that are behind the camera are filtered. Then, the vertices that are outside of the camera's field of view are filtered. Then, we randomly sample "detection_params:points_sampling_occlusion" points to test whether the object is occluded (test done through ray casting). + If too many points are filtered, then the object is considered as not visible and detection is invalid. +
+ + +\section dnn_run_script Running the script to generate data + +Once you have configured the generation to your liking, navigate to the `script/dataset_generator` located in your ViSP source directory. + +You can then run the `generate_dataset.py` script as such +\code{.sh} +(blenderproc) user@machine:~/visp/script/dataset_generator $ python generate_dataset.py --config path/to/config.json +\endcode + +If all is well setup, then the dataset generation should start and run. + + +\warning If during generation, you encounter a message about invalid camera placement, try to make room_size_multiplier_min and room_size_multiplier_max larger, so that more space is available for object placement. + +To give an idea of generation time, generating 1000 images (with a resolution of 640 x 480) and detections of a single object, with a few added distractors, takes around 30mins on a Quadro RTX 6000. + +Once generation is finished, you are ready to leverage the data to train your neural network. + +\section dnn_output Using and parsing the generation output + +The dataset generated by Blender is located in the "dataset:save_path" path that you specified in your JSON configuration file. + +The dataset has the following structure +\verbatim +- dataset + - 0 + - 0.hdf5 + - 1.hdf5 + - ... + - 1 + - 0.hdf5 + - 1.hdf5 + - ... + - ... + - classes.txt +\endverbatim + +There is one subfolder per scene, and each HDF5 file is a single sample that may contain RGB, depth, normals, etc. + +You can visualise the generated images (RGB, depth, normals, segmentation maps) by running +\code{.sh} +(blenderproc) $ blenderproc vis hdf5 path/to/your/output/0/*.hdf5 +\endcode + +You will then see something that looks like this, depending on the outputs that you chose in the configuration file: +\image html blenderproc_viz.png + +where you can replace the "0" in the path with another number, which corresponds to the generated scene index. +You can also specify the path to a single HDF5 file to view one sample at a time. + +\subsection dnn_output_yolov7 Using detections to finetune a YoloV7 + +To help, we provide a script that reformats a blenderproc dataset to the format expected by YoloV7. +This script can be run like this: +\code{.sh} +(blenderproc) $ python export_for_yolov7.py --input path/to/dataset --output path/to/yolodataset --train-split 0.8 +\endcode + +here "--input" indicates the path to the location of the blenderproc dataset, while "--output" points to the folder where the dataset in the format that YoloV7 expects will be saved. "--train-split" is an argument that indicates how much of the dataset is kept for training. A value of 0.8 indicates that 80% of the dataset is used for training, while 20% is used for validation. The split is performed randomly across all scenes (a scene may be visible in both train and validation sets). + +Once the script has run, the folder "path/to/yolodataset" should be created and contain the dataset as expected by YoloV7. +This folder contains a "dataset.yml" file, which will be used when training a YoloV7. It contains: +the following: + +``` +names: +- esa +nc: 1 +train: /local/sfelton/yolov7_esa_dataset/images/train +val: /local/sfelton/yolov7_esa_dataset/images/val +``` +where nc is the number of class, "names" are the class names, and "train" and "val" are the paths to the dataset splits. + +To start training a YoloV7, you should download the repository and install the required dependencies. Again, we will create a conda environment. You can also use a docker container, as explained in the documentation. We also download the pretrained yolo model, that we will finetune on our own dataset. +``` +~ $ git clone https://github.com/WongKinYiu/yolov7.git +~ $ cd yolov7 +~/yolov7 $ conda create --name yolov7 pip +~/yolov7 $ conda activate yolov7 +(yolov7) ~/yolov7 $ pip install -r requirements.txt +(yolov7) ~/yolov7 $ wget https://github.com/WongKinYiu/yolov7/releases/download/v0.1/yolov7-tiny.pt +``` + +To fine-tune a YoloV7, we should create two new files: the network configuration and the hyperparameters. We will reuse the ones provided for the tiny model. +``` +CFG=cfg/training/yolov7-tiny-custom.yaml +cp cfg/training/yolov7-tiny.yaml $CFG + +HYP=data/hyp.scratch.custom.yaml +cp data/hyp.scratch.tiny.yaml $HYP +``` +Next open the new cfg file, and modify the number of classes (set "nc" from 80 to the number classes you have in your dataset) + +You can also modify the hyperparameters file to add more augmentation during training. + +To fine-tune the YoloV7-tiny on our data, run +``` +YOLO_DATASET=/path/to/dataset +IMG_SIZE=640 +YOLO_NAME=blenderproc-tiny +python train.py --workers 8 --device 0 --batch-size 64 --data "${YOLO_DATASET}/dataset.yaml" --img $IMG_SIZE $IMG_SIZE --cfg $CFG --weights yolov7-tiny.pt --name $YOLO_NAME --hyp $HYP +``` + +Note that if your run fine-tuning multiple times, then a number will be appended to the name that you have given to your model. + +If you run out of memory during training, change the batch size to a lower value. + +Here is an overview of the generated images and the resulting detections for a simple model (cube) and a NeRF-reconstructed one: +\htmlonly +

+ + +

+\endhtmlonly + + + + +\subsection dnn_output_custom_parsing Parsing HDF5 with a custom script + +In Python, an HDF5 file can be read like a dictionary. + +With the following script, you can inspect the content of an HDF5 file: +\code{.py} +from pathlib import Path +import numpy as np +import h5py +import json + +if __name__ == '__main__': + dataset_path = Path('output') + for scene_folder in dataset_path.iterdir(): + if not scene_folder.is_dir(): + continue + print(f'Reading scene {scene_folder.name}') + for sample in scene_folder.iterdir(): + if not sample.name.endswith('.hdf5'): + continue + print(f'\tReading sample {sample.name}') + with h5py.File(sample) as f: + # Image data + keys = ['colors', 'depth', 'normals', 'class_segmaps', 'instance_segmaps'] + for key in keys: + if key in f: + data = np.array(f[key]) + print(f'\t\t{key}: shape = {data.shape}, type = {data.dtype}') + # Json objects + if 'instance_attribute_maps' in f: + print('\t\tMapping between instance and class:') + text = np.array(f['instance_attribute_maps']).tobytes() + json_rep = json.loads(text) + print(json_rep) + print() + + if 'object_data' in f: + text = np.array(f['object_data']).tobytes() + json_rep = json.loads(text) + + for object_idx, object_data in enumerate(json_rep): + print(f'\t\tObject {object_idx} data: ') + for obj_k in object_data.keys(): + print(f'\t\t\t{obj_k} = {object_data[obj_k]}') + print() +\endcode + +Running on a custom dataset with all outputs enabled (see \ref dnn_input_configuration) we obtain the following output: +\verbatim +... +Reading scene 4 + Reading sample 0.hdf5 + colors: shape = (480, 640, 3), type = uint8 + depth: shape = (480, 640), type = float32 + normals: shape = (480, 640, 3), type = float32 + class_segmaps: shape = (480, 640), type = uint8 + instance_segmaps: shape = (480, 640), type = uint8 + Mapping between instance and class: + [{'idx': 5, 'category_id': 2}, {'idx': 7, 'category_id': 3}, {'idx': 10, 'category_id': 0}, {'idx': 11, 'category_id': 0}, + {'idx': 14, 'category_id': 0}, {'idx': 18, 'category_id': 0}, {'idx': 22, 'category_id': 0}, {'idx': 32, 'category_id': 0}, + {'idx': 37, 'category_id': 0}] + + Object 0 data: + class = 2 + name = cube.001 + cTo = [[-0.9067991971969604, -0.12124679982662201, 0.40374961495399475, 0.06561152129493264], + [-0.399792343378067, 0.5511342883110046, -0.7324047088623047, 0.31461843930247957], + [-0.1337185800075531, -0.8255603313446045, -0.548241913318634, 0.01648345626311576], + [0.0, 0.0, 0.0, 1.0]] + bounding_box = [138.9861569133729, 254.62236838239235, 100.20349472431002, 107.79005297420909] + + Object 1 data: + class = 3 + name = dragon.002 + cTo = [[-0.9067991971969604, -0.12124679982662201, 0.40374961495399475, 0.06561152129493264], + [-0.399792343378067, 0.5511342883110046, -0.7324047088623047, 0.31461843930247957], + [-0.1337185800075531, -0.8255603313446045, -0.548241913318634, 0.01648345626311576], + [0.0, 0.0, 0.0, 1.0]] + bounding_box = [314.54058632091767, 202.86607727119753, 70.64506150004479, 90.54480823214647] +... +\endverbatim + +- Both depth and normals are represented as floating points, conserving accuracy. +- The object data is represented as a JSON document. Which you can directly save or reparse to save only the information of interest. +- Object poses are expressed in the camera frame and are represented as homogeneous matrix. +- Bounding boxes coordinates are in pixels, and the values are [x_min, y_min, width, height] + +You can modify this script to export the dataset to another format, as it was done in \ref dnn_output_yolov7. + +\section dnn_synthetic_next Next steps + +If you use this generator to train a detection network, you can combine it with Megapose to perform 6D pose estimation and tracking. See \ref tutorial-tracking-megapose. + +*/ diff --git a/doc/tutorial/tracking/tutorial-tracking-megapose-object-export.dox b/doc/tutorial/tracking/tutorial-tracking-megapose-object-export.dox index ac5a9ac2f2..01ba376270 100644 --- a/doc/tutorial/tracking/tutorial-tracking-megapose-object-export.dox +++ b/doc/tutorial/tracking/tutorial-tracking-megapose-object-export.dox @@ -1,6 +1,6 @@ /** -\page tutorial-megapose-model Tutorial: Exporting a 3D model to MegaPose +\page tutorial-megapose-model Tutorial: Exporting a 3D model to MegaPose after reconstruction with NeRF \tableofcontents This tutorial follows \ref tutorial-tracking-megapose and details how to export your 3D model for usage with MegaPose. @@ -259,9 +259,9 @@ When using with MegaPose, you may encounter multiple issues: - MegaPose relies on Panda3D to perform 3D rendering. Using gltf (or their binary version, glb) files requires a different rendering pipeline which is not the default choice for megapose - Prefer an export to .obj -\section megapose_model_testing Testing your model - -To test whether your model is correct and can be used for pose estimation or tracking, you should follow \ref tutorial-tracking-megapose and run the sample program. +\section megapose_model_testing Next steps +- To test whether your model is correct and can be used for pose estimation or tracking, you should follow \ref tutorial-tracking-megapose and run the sample program. +- Since Megapose requires a way to detect the object, you can train a neural network to do so entirely from synthetic data by using the 3D model that you just obtained. The process is detailed in \ref tutorial-synthetic-blenderproc. */ \ No newline at end of file diff --git a/doc/tutorial/tracking/tutorial-tracking-megapose.dox b/doc/tutorial/tracking/tutorial-tracking-megapose.dox index 3507971a11..8911afb7dd 100644 --- a/doc/tutorial/tracking/tutorial-tracking-megapose.dox +++ b/doc/tutorial/tracking/tutorial-tracking-megapose.dox @@ -28,19 +28,25 @@ It has however, several drawbacks: You may thus require a way to detect the object, such as an object detection neural network (available in ViSP with the class vpDetectorDNNOpenCV, see \ref tutorial-detection-dnn). For initial tests, the bounding box can also be provided by the user via click. +For the 3D model and detection inputs required by megapose, we provide tutorials to help you get setup. See \ref tutorial-megapose-model for the 3D model creation and \ref tutorial-synthetic-blenderproc to train a detection network. +With these tutorials and the tools presented therein, the work to use megapose can be almost fully automated as summed up in the figure below: + +\image html tutorial/tracking/megapose/megapose_pipeline.png + + The MegaPose integration in ViSP is based on a client-server model: - The client, that uses either vpMegaPose or vpMegaPoseTracker, is C++-based. It sends pose estimation requests to the server. -- The server is written in Python. It wraps around the MegaPose model. Each time a pose estimation is requested, the server reshapes the data and forwards it to MegaPose. - It then sends back the information to the client. +- The server is written in Python. It wraps around the MegaPose model. Each time a pose estimation is requested, the server reshapes the data and forwards it to MegaPose. + It then sends back the information to the client. -\note The computer running the server needs a GPU. The client can run on the same computer as the server. It can also run on another computer without a GPU. - As Ethernet is used to communicate between client and server, both computers must be accessible on the Ethernet network. +\note The computer running the server needs a GPU. The client can run on the same computer as the server. It can also run on another computer without a GPU. To obtain have a decent tracking speed, it is recommended to have both machines on the same network. This tutorial will explain how to install and run MegaPose and then demonstrate its usage with a simple object tracking application. + \section megapose_install Installation \subsection megapose_cpp_install Installing the client @@ -298,11 +304,11 @@ Once MegaPose has been called, we can display the results in the image. We plot: We have walked through the code of a single object tracking with MegaPose. You may wish to save the results. You can do so by serializing to JSON, as explained in \ref tutorial-json -\subsection megapose_adaptation Adapting this tutorial for your use case +\section megapose_adaptation Adapting this tutorial for your use case This program can run with new 3D models, as MegaPose does require retraining. To adapt this script to your problem, you will require multiple things: - The intrinsic parameters of your camera. To calibrate your camera, see \ref tutorial-calibration-intrinsic - The 3D model of your object. See \ref tutorial-megapose-model -- Optionally (but recommended), an automated way to detect the object. You can for instance train a deep neural network and use it in ViSP, as explained in \ref tutorial-detection-dnn. +- Optionally (but recommended), an automated way to detect the object. You can for instance train a deep neural network and use it in ViSP, as explained in \ref tutorial-detection-dnn. Since you already have your 3D model, you can use Blender to generate a synthetic dataset and train a detection network without manually annotating images. This process is explained in \ref tutorial-synthetic-blenderproc. */ diff --git a/doc/tutorial/tutorial-users.dox b/doc/tutorial/tutorial-users.dox index 96609e86b1..8440e9a3f2 100644 --- a/doc/tutorial/tutorial-users.dox +++ b/doc/tutorial/tutorial-users.dox @@ -99,7 +99,7 @@ This page introduces the user to the way to track objects in images. - \subpage tutorial-tracking-mb-generic-rgbd-Blender
This tutorial shows how to use Blender to generate simulated data to test the model-based tracker. - \subpage tutorial-tracking-tt
This tutorial focuses on template trackers based on image registration approaches. - \subpage tutorial-tracking-megapose
This tutorial shows how to use megapose, a deep learning pose estimation solution, to perform single object tracking. -- \subpage tutorial-megapose-model
This tutorial shows how to prepare your 3D model for usage in megapose. +- \subpage tutorial-megapose-model
This tutorial shows how to prepare your 3D model for usage in megapose after reconstruction with Neural Radiance Fields (NeRF). */ @@ -170,6 +170,7 @@ This page introduces the user to other tools that may be useful. in one thread and display these images in an other thread. - \subpage tutorial-pcl-viewer
This tutorial explains how to use a threaded PCL-based point cloud visualizer. - \subpage tutorial-json
This tutorial explains how to read and save data in the portable JSON format. It focuses on saving the data generated by a visual servoing experiment and exporting it to Python in order to generate plots. +- \subpage tutorial-synthetic-blenderproc
This tutorial shows you how to easily generate synthetic data from the 3D model of an object and obtain various modalities. This data can then be used to train a neural network for your own task. */ /*! \page tutorial_munkres Munkres Assignment Algorithm diff --git a/script/dataset_generator/example_config.json b/script/dataset_generator/example_config.json new file mode 100644 index 0000000000..26f1602404 --- /dev/null +++ b/script/dataset_generator/example_config.json @@ -0,0 +1,72 @@ +{ + "numpy_seed": 19, + "blenderproc_seed": "69", + + "models_path": "./megapose-models", + "cc_textures_path": "./cc0", + "camera": { + "px": 600, + "py": 600, + "u0": 320, + "v0": 240, + "h": 480, + "w": 640, + "randomize_params_percent": 5.0 + }, + "rendering": { + "max_num_samples": 32, + "denoiser": "OPTIX" + }, + "scene": { + "room_size_multiplier_min": 5.0, + "room_size_multiplier_max": 10.0, + "simulate_physics": false, + "max_num_textures": 50, + "distractors": { + "min_count": 20, + "max_count": 50, + "min_size_rel_scene": 0.05, + "max_size_rel_scene": 0.1, + "custom_distractors": "./megapose-distractors", + "custom_distractor_proba": 0.5, + "displacement_max_amount": 0.0, + "pbr_noise": 0.5, + "emissive_prob": 0.0, + "emissive_min_strength": 2.0, + "emissive_max_strength": 5.0 + }, + "lights": { + "min_count": 3, + "max_count": 6, + "min_intensity": 50, + "max_intensity": 200 + }, + "objects": { + "min_count": 2, + "max_count": 5, + "multiple_occurences": true, + "scale_noise": 0.2, + "displacement_max_amount": 0.0, + "pbr_noise": 0.3, + "cam_min_dist_rel": 1.0, + "cam_max_dist_rel": 3.0 + } + }, + "dataset": { + "save_path": "./output_blenderproc", + "scenes_per_run": 1, + "num_scenes": 100, + "images_per_scene": 10, + "empty_images_per_scene": 2, + "pose": true, + "depth": false, + "normals": false, + "segmentation": false, + "detection": true, + "detection_params": { + "min_side_size_px": 10, + "min_visibility_percentage": 0.3, + "points_sampling_occlusion": 100 + } + } +} diff --git a/script/dataset_generator/export_for_yolov7.py b/script/dataset_generator/export_for_yolov7.py new file mode 100644 index 0000000000..813f4c9d97 --- /dev/null +++ b/script/dataset_generator/export_for_yolov7.py @@ -0,0 +1,122 @@ +''' +Script to export an hdf5 stored dataset to the format expected by a YoloV7. +This requires bounding boxes to be generated along with the RGB images. +This is done through the json config file. + +''' + +from pathlib import Path +import argparse +from typing import List +import h5py +from PIL import Image +import numpy as np +import json +import progressbar +import yaml + +def make_name(hdf5_path: Path): + hdf5_name = hdf5_path.name[:-len('.hdf5')] + return hdf5_path.parent.name + '_' + hdf5_name + +def export_split(hdf5_paths: List[Path], images_export_path: Path, labels_export_path: Path): + bar = progressbar.ProgressBar() + detection_data = {} + for hdf5_path in bar(hdf5_paths): + with h5py.File(str(hdf5_path)) as hdf5: + rgb = np.array(hdf5['colors']) + img = Image.fromarray(rgb) + base_name = make_name(hdf5_path) + img.save(images_export_path / (base_name + '.png')) + h, w = rgb.shape[:2] + text = np.array(hdf5['object_data']).tobytes() + with open(labels_export_path / (base_name + '.txt'), 'w') as label_file: + if len(text) > 0: + object_data = json.loads(text) + for object in object_data: + if 'bounding_box' in object: + bb = object['bounding_box'] + # Yolo format: [x_center, y_center, width, height]. all coordinates normalized by image dimensions + x_center, y_center = bb[0] + bb[2] / 2, bb[1] + bb[3] / 2 + x, width = x_center / w, bb[2] / w + y, height = y_center / h, bb[3] / h + bb = [x, y, width, height] + cls = object['class'] + fmt = f'{cls - 1} {x} {y} {width} {height}\n' + label_file.write(fmt) + if cls not in detection_data: + detection_data[cls] = 0 + detection_data[cls] += 1 + + print('Detection summary:') + for cls in detection_data.keys(): + print(f'\tClass: {cls}\t Detection count: {detection_data[cls]}') + +if __name__ == '__main__': + parser = argparse.ArgumentParser('Convert an HDF5 dataset generated with Blenderproc to a YoloV7-ready format') + parser.add_argument('--input', type=str, required=True, help='''Path to the HDF5 dataset. It is the root folder of the dataset, + containing one subfolder for each generated scene''') + parser.add_argument('--output', type=str, required=True, help='''Where to export the dataset to YoloV7 format''') + parser.add_argument('--train-split', required=True, type=float, help='''Portion of the dataset to take for training. Between 0 and 1''') + + args = parser.parse_args() + + input_path = Path(args.input) + assert input_path.exists(), f'Path to the HDF5 dataset must be valid and exist, but got {input_path}' + + output_path = Path(args.output) + assert not output_path.exists() or len(list(output_path.iterdir())), f'The path to the exported folder should not exist, or should be empty' + output_path.mkdir(exist_ok=True) + + train_split = args.train_split + assert train_split > 0 and train_split < 1, 'Train split should be in range (0, 1)' + + + num_scenes = 0 + hdf5_paths = [] + print('Collecting hdf5 paths...') + for scene_dir in input_path.iterdir(): + if not scene_dir.is_dir(): + continue + for file in scene_dir.iterdir(): + if file.name.endswith('.hdf5'): + hdf5_paths.append(file.absolute()) + num_scenes += 1 + print(f'Found {num_scenes} scenes, for a total of {len(hdf5_paths)} images!') + train_count = int(len(hdf5_paths) * train_split) + val_count = len(hdf5_paths) - train_count + print(f'Splitting randomly: taking {train_count} images for test, {val_count} for validation') + train_hdf5, val_hdf5 = [], [] + train_indices = set(np.random.choice(len(hdf5_paths), size=train_count, replace=False)) + for i in range(len(hdf5_paths)): + train_hdf5.append(hdf5_paths[i]) if i in train_indices else val_hdf5.append(hdf5_paths[i]) + print(len(train_hdf5), len(val_hdf5)) + + images_path = output_path / 'images' + labels_path = output_path / 'labels' + images_train_path = images_path / 'train' + images_val_path = images_path / 'val' + labels_train_path = labels_path / 'train' + labels_val_path = labels_path / 'val' + + for path in [images_path, labels_path, images_train_path, images_val_path, labels_train_path, labels_val_path]: + path.mkdir() + + classes = [] + with open(input_path / 'classes.txt') as cls_file: + for line in cls_file.readlines(): + classes.append(line.strip()) + with open(output_path / 'dataset.yaml', 'w') as yaml_file: + data = { + 'train': str(images_train_path.absolute()), + 'val': str(images_val_path.absolute()), + 'nc': len(classes), + 'names': classes + } + yaml.dump(data, yaml_file) + + print('Generating train split...') + export_split(train_hdf5, images_train_path, labels_train_path) + print('Generating validation split...') + export_split(val_hdf5, images_val_path, labels_val_path) + diff --git a/script/dataset_generator/generate_dataset.py b/script/dataset_generator/generate_dataset.py new file mode 100644 index 0000000000..ccc7ba6b8d --- /dev/null +++ b/script/dataset_generator/generate_dataset.py @@ -0,0 +1,58 @@ + +''' +Script to generate a synthetic dataset, stored in hdf5 format. +This dataset can contain: + - RGB images + - Object poses + - Segmentation maps + - Bounding boxes + - Depth map + - Normal map + +To run, blenderproc and other things should be installed (virtual environment recommended): +$ conda activate dataset_generation +$ pip install blenderproc numpy +$ blenderproc quickstart # verify that it works, install minor dependencies + + +''' + +import os +import numpy as np +import argparse +import json +from pathlib import Path +import subprocess + + +if __name__ == '__main__': + + parser = argparse.ArgumentParser() + parser.add_argument('--config', required=True, type=str, help='Path to the JSON configuration file for the dataset generation script') + args = parser.parse_args() + config_path = Path(args.config) + assert config_path.exists(), f'Config file {config_path} does not exist' + with open(config_path, 'r') as json_config_file: + json_config = json.load(json_config_file) + + num_scenes = json_config['dataset']['num_scenes'] + scenes_per_run = json_config['dataset']['scenes_per_run'] + + print(__file__) + scene_generator = (Path(__file__).parent / 'generate_scene.py').absolute() + import time + for i in range(num_scenes): + args = [ + 'blenderproc', 'run', str(scene_generator), + '--config', str(config_path.absolute()), + '--scene-index', str(i * scenes_per_run), + '--scene-count', str(scenes_per_run) + ] + t = time.time() + result = subprocess.run(args) + print(f'Generating scene {i} took {time.time() - t}s') + if result.returncode != 0: + raise RuntimeError(f'Generation of scene {i} failed!') + + + diff --git a/script/dataset_generator/generate_scene.py b/script/dataset_generator/generate_scene.py new file mode 100644 index 0000000000..c9b2eb1363 --- /dev/null +++ b/script/dataset_generator/generate_scene.py @@ -0,0 +1,791 @@ +import blenderproc as bproc +''' +Script to generate a synthetic dataset, stored in hdf5 format. +This dataset can contain: + - RGB images + - Object poses + - Segmentation maps + - Bounding boxes + - Depth map + - Normal map + +To run, blenderproc and other things should be installed (virtual environment recommended): +$ conda activate dataset_generation +$ pip install blenderproc numpy +$ blenderproc quickstart # verify that it works, install minor dependencies + + +''' + +import os +import numpy as np +import argparse +import json +from operator import itemgetter +from pathlib import Path +from typing import Callable, Dict, List, Tuple +import time + +from blenderproc.python.types import MeshObjectUtility, EntityUtility +from blenderproc.python.renderer import RendererUtility +import bpy + +def bounding_box_2d_from_vertices(object: bproc.types.MeshObject, K: np.ndarray, camTworld: np.ndarray) -> Tuple[List[float], float, np.ndarray]: + ''' + Compute the 2D bounding from an object's vertices + returns A tuple containing: + [xmin, ymin, xmax, ymax] in pixels + the proportion of visible vertices (that are not behind the camera, i.e., negative Z) + The 2D points, in normalized units in camera space, in ViSP/OpenCV frame + ''' + worldTobj = homogeneous_no_scaling(object) + obj = object.blender_obj + verts = np.ones(len(obj.data.vertices) * 3) + obj.data.vertices.foreach_get("co", verts) + points = verts.reshape(-1, 3) + # verts = [v.co for v in obj.data.vertices] + camTobj = camTworld @ worldTobj + points_cam = camTobj @ np.concatenate((points, np.ones((len(points), 1))), axis=-1).T + points_cam = convert_points_to_visp_frame((points_cam[:3] / points_cam[3, None]).T) + + visible_points = points_cam[points_cam[:, 2] > 0] + visible_points_m_2d = visible_points[:, :2] / visible_points[:, 2, None] + visible_points_px_2d = K @ np.concatenate((visible_points_m_2d, np.ones((len(visible_points_m_2d), 1))), axis=-1).T + visible_points_px_2d = visible_points_px_2d.T[:, :2] / visible_points_px_2d.T[:, 2, None] + + mins = np.min(visible_points_px_2d, axis=0) + assert len(mins) == 2 + maxes = np.max(visible_points_px_2d, axis=0) + + return [mins[0], mins[1], maxes[0], maxes[1]], len(visible_points) / len(points_cam), visible_points_m_2d + +def homogeneous_inverse(aTb): + ''' + Inverse of a 4x4 homogeneous transformation matrix + ''' + bTa = aTb.copy() + bTa[:3, :3] = bTa[:3, :3].T + bTa[:3, 3] = -bTa[:3, :3] @ bTa[:3, 3] + return bTa + +def homogeneous_no_scaling(object: bproc.types.MeshObject, frame=None): + ''' + Get the homogeneous transformation of an object, but without potential scaling + object.local2world() may contain scaling factors. + ''' + localTworld = np.eye(4) + localTworld[:3, :3] = object.get_rotation_mat(frame) + localTworld[:3, 3] = object.get_location(frame) + return localTworld + +def convert_to_visp_frame(aTb): + ''' + Blender uses a frame that is different to ViSP's. + Blender: +X = Right, +Y = Up, +Z = Behind + Same as converting to an OpenCV frame + ''' + return bproc.math.change_source_coordinate_frame_of_transformation_matrix(aTb, ["X", "-Y", "-Z"]) + +def convert_points_to_visp_frame(points: np.ndarray): + ''' + Convert a set of points to ViSP coordinate frame + ''' + points = points.copy() + points[:, 1:3] = -points[:, 1:3] + return points + +def point_in_bounding_box(object: bproc.types.MeshObject, bb_scale=1.0): + ''' + Sample a random point in the AXIS-ALIGNED bounding box of an object. + the bounding box can be scaled with bb_scale if we wish to sample a point "near" the true bounding box + Returned coordinates are in world frame + ''' + + bb = object.get_bound_box() * bb_scale # 8 x 3 + worldTlocal = homogeneous_no_scaling(object) # local2world includes scaling + localTworld = homogeneous_inverse(worldTlocal) + # go to object space to sample with axis aligned bb, since sampling in world space would be harder + bb_local = localTworld @ np.concatenate((bb, np.ones((8, 1))), axis=-1).T + bb_local = bb_local.T[:, :3] # 8 X 3 + mins, maxes = np.min(bb_local, axis=0), np.max(bb_local, axis=0) + point_local = np.zeros(3) + for i in range(3): + point_local[i] = np.random.uniform(mins[i], maxes[i]) + point = worldTlocal @ np.concatenate((point_local, np.ones(1)))[:, None] + point = point[:3, 0] + return point + +def object_bb_length(object: bproc.types.MeshObject) -> float: + size = 0.0 + bb = object.get_bound_box() + for corner_index in range(len(bb) - 1): + dists = np.linalg.norm(bb[corner_index+1:] - bb[corner_index], axis=-1, ord=2) + size = max(np.max(dists), size) + return size + +def randomize_pbr(objects: List[bproc.types.MeshObject], noise): + ''' + Randomize the physical properties of objects. + Objects should have the Principled BSDF for changes to be applied. + + ''' + # set shading and physics properties and randomize PBR materials + for j, obj in enumerate(objects): + # rand_angle = np.random.uniform(30, 90) + # obj.set_shading_mode('auto', rand_angle) + for mat in obj.get_materials(): + keys = ["Roughness", "Specular", "Metallic"] + for k in keys: + base_value = mat.get_principled_shader_value(k) + new_value = None + if isinstance(base_value, bpy.types.NodeSocketColor): + new_value = base_value.default_value + np.concatenate((np.random.uniform(-noise, noise, size=3), [1.0])) + #base_value.default_value = new_value + new_value = base_value + elif isinstance(base_value, float): + new_value = max(0.0, min(1.0, base_value + np.random.uniform(-noise, noise))) + if new_value is not None: + mat.set_principled_shader_value(k, new_value) + +def add_displacement(objects: List[bproc.types.MeshObject], max_displacement_strength=0.05): + for obj in objects: + obj.add_uv_mapping("cylinder") + # Create a random procedural texture + noise_models = ["CLOUDS", "DISTORTED_NOISE", "MAGIC", "MARBLE", + "MUSGRAVE", "NOISE", "STUCCI", "VORONOI", "WOOD"] + texture = bproc.material.create_procedural_texture(noise_models[np.random.choice(len(noise_models))]) + # Displace the vertices of the object based on that random texture + obj.add_displace_modifier( + texture=texture, + strength=np.random.uniform(0, max_displacement_strength), + subdiv_level=1, + ) + + +class Scene: + def __init__(self, size: float, target_objects: List[bproc.types.MeshObject], + distractors: List[bproc.types.MeshObject], room_objects: List[bproc.types.MeshObject], lights: List[bproc.types.MeshObject]): + self.size = size + self.target_objects = target_objects + self.distractors = distractors + self.room_objects = room_objects + self.lights = lights + + def cleanup(self): + objects = self.target_objects + self.distractors + self.room_objects + for object in objects: + object.delete(True) + for light in self.lights: + light.delete(True) + +class Generator: + def __init__(self, config_path: Path, scene_index: int, scene_count: int): + self.config_path = config_path + with open(self.config_path, 'r') as json_config_file: + self.json_config = json.load(json_config_file) + print(self.json_config) + np.random.seed(self.json_config['numpy_seed'] * (scene_index + 1)) + self.scene_index = scene_index + self.scene_count = scene_count + blender_seed_int = int(self.json_config['blenderproc_seed']) + os.environ['BLENDER_PROC_RANDOM_SEED'] = str(blender_seed_int * (scene_index + 1)) + self.objects = None + self.classes = None + + def init(self): + ''' + Initialize the scene generation. + Load the target objects and the materials. + Setup the rendering + ''' + print('Loading objects...') + if self.objects is not None: + for k in self.objects: + self.objects[k].delete() + self.objects, self.classes = self.load_objects() + self.save_class_file() + print('Preloading CC0 textures...') + self.cc_textures = bproc.loader.load_ccmaterials(self.json_config['cc_textures_path']) + self.cc_textures = np.random.choice(self.cc_textures, size=self.json_config['scene']['max_num_textures']) + self.setup_renderer() + + def load_objects(self) -> Tuple[Dict[str, bproc.types.MeshObject], Dict[str, int]]: + ''' + Load the objects in the models directory + The directory should have the following structure: + - models/ + --- obj_1_name/ + ----- model.obj + --- obj_2_name/ + ----- model.obj + Returns + - a dict where keys are the model names + (from the containing folder name of each object, + in the example case: 'obj_1_name' and 'obj_2_name') + and the values are the loaded model + - A dict where keys are model names and values are the class indices (from 1 to N for N objects) + ''' + models_dict = {} + class_dict = {} + models_path = Path(self.json_config['models_path']).absolute() + cls = 1 + assert models_path.exists() and models_path.is_dir(), f'Models path {models_path} must exist and be a directory' + for model_dir in sorted(models_path.iterdir()): + if not model_dir.is_dir(): + continue + model_name = model_dir.name + for content in sorted(model_dir.iterdir()): + if not content.is_file(): + continue + load_fn: Callable[[str], List[bproc.types.MeshObject]] = None + if content.name.endswith('.obj') or content.name.endswith('.ply'): + load_fn = bproc.loader.load_obj + if load_fn is not None: + assert model_name not in models_dict, f'A folder should contain a single object, but {content} contains multiple objects (.blend or .obj)' + models = load_fn(str(content.absolute())) + assert len(models) > 0, f'Loaded an empty file: {content}' + model = models[0] + if len(models) > 1: + model.join_with_other_objects(models[1:]) + model.set_cp('category_id', cls) + model.set_location([-1000.0, -1000.0, -1000.0]) # Avoid warning about collisions with other objects in the scene + model.hide() # Hide by default + model.set_name(model_name) + models_dict[model_name] = model + class_dict[model_name] = cls + cls += 1 + return models_dict, class_dict + + def save_class_file(self): + ''' + Save the class for the dataset. + Write the classes to a text file. + Each line contain + This method only changes something if the class file does not exist or the generated scene is the first one. + ''' + save_path = Path(self.json_config['dataset']['save_path']).absolute() + cls_file = save_path / 'classes.txt' + if self.scene_index == 0 or not cls_file.exists(): + with open(cls_file, 'w') as f: + cls_to_model_name = {self.classes[k]: k for k in self.classes.keys()} + cls_indices = sorted(cls_to_model_name.keys()) + lines = [] + for i in cls_indices: + lines.append(cls_to_model_name[i] + '\n') + print(f'Writing classes to file: \n {lines}') + f.writelines(lines) + + def setup_renderer(self): + depth = itemgetter('depth')(self.json_config['dataset']) + bproc.renderer.set_max_amount_of_samples(self.json_config['rendering']['max_num_samples']) + RendererUtility.set_denoiser(self.json_config['rendering']['denoiser']) + if depth: + bproc.renderer.enable_depth_output(activate_antialiasing=False) + + + def render(self) -> Dict: + ''' + Call blender to render the frames. + Camera poses should already be sampled, and the scene created. + This returns a dictionary where each key (e.g "colors" for RGB rendering) contains a list of numpy arrays (1 numpy array per frame) + ''' + normals, segmentation = itemgetter('normals', 'segmentation')(self.json_config['dataset']) + if normals: # Enabling normals should be done every at every render + bproc.renderer.enable_normals_output() + data = bproc.renderer.render() + + if segmentation: + data.update(bproc.renderer.render_segmap(map_by=["instance", "class"])) + return data + + def save_data(self, path: Path, objects: List[bproc.types.MeshObject], data: Dict): + ''' + Save the data to HDF5 format (one file per frame). + In addition, this function will compute the object information (bounding box, pose, class) if required. + + path: Path to the folder in which to save the HDF5 files + ''' + json_dataset = self.json_config['dataset'] + num_frames_objects = json_dataset['images_per_scene'] + + out_object_pose, out_bounding_box = itemgetter('pose', 'detection')(json_dataset) + keys = ['min_side_size_px', 'min_visibility_percentage', 'points_sampling_occlusion'] + min_side_px, min_visibility, points_sampling_occlusion = itemgetter(*keys)(json_dataset['detection_params']) + + width, height = itemgetter('w', 'h')(self.json_config['camera']) + frames_data = [] + + for frame in range(bproc.utility.num_frames()): + worldTcam = bproc.camera.get_camera_pose(frame) + camTworld = homogeneous_inverse(worldTcam) + K = bproc.camera.get_intrinsics_as_K_matrix() + + minx, miny = (-K[0, 2]) / K[0, 0], (-K[1, 2]) / K[1, 1] + maxx, maxy = (width -K[0, 2]) / K[0, 0], (height -K[1, 2]) / K[1, 1] + + visible_objects = bproc.camera.visible_objects(worldTcam, min(width, height) // min_side_px) + objects_data = [] + # Stop when parsing frames with no objects: blenderproc API still detects objects even though they're not here for these frames + if frame >= num_frames_objects: + frames_data.append({}) + continue + for object in objects: + if object not in visible_objects: + continue + object_data = { + 'class': object.get_cp('category_id', frame), + 'name': object.get_name() + } + if out_object_pose: + worldTobj = homogeneous_no_scaling(object, frame) + camTobj = camTworld @ worldTobj + object_data['cTo'] = convert_to_visp_frame(camTobj) + if out_bounding_box: + bb_corners, z_front_proportion, points_im = bounding_box_2d_from_vertices(object, K, camTworld) + if z_front_proportion < min_visibility: + continue + mins = np.array([bb_corners[0], bb_corners[1]]) + maxes = np.array([bb_corners[2], bb_corners[3]]) + + original_size = maxes - mins + original_area = np.prod(original_size) + mins = np.maximum(mins, [0, 0]) + maxes = np.minimum(maxes, [width, height]) + size = maxes - mins + area = np.prod(size) + + # Filter objects that are too small to be believably detectable + if size[0] < min_side_px or size[1] < min_side_px: + continue + + vis_points_in_image = points_im[(points_im[:, 0] > minx) & ((points_im[:, 0] < maxx)) & (points_im[:, 1] > miny) & ((points_im[:, 1] < maxy))] + base_visibility = z_front_proportion * (len(vis_points_in_image) / len(points_im)) + + # Camera clipping removes too much of the object + if base_visibility < min_visibility: + continue + + if points_sampling_occlusion > 0: + point_count = len(vis_points_in_image) + points = vis_points_in_image[np.random.choice(point_count, size=min(point_count, points_sampling_occlusion))] + points = np.concatenate((points, np.ones((len(points), 1))), axis=-1) + + points_cam = convert_points_to_visp_frame(points) # Convert back to blender frame + points_world = worldTcam @ np.concatenate((points_cam, np.ones((len(points_cam), 1))), axis=-1).T + points_world = (points_world[:3] / points_world[3, None]).T + ray_directions = points_world - worldTcam[None, :3, 3] + hit_count = 0 + for ray in ray_directions: + hit_object = bproc.object.scene_ray_cast(worldTcam[:3, 3], ray)[-2] + if hit_object == object: + hit_count += 1 + + final_visibility = base_visibility * (hit_count / len(ray_directions)) + print(final_visibility) + # Including occlusions, the object is now not visible enough to be detectable + if final_visibility < min_visibility: + print(f'Filtered object {object.get_name()}, because of occlusions: {final_visibility}') + continue + object_data['bounding_box'] = [mins[0], mins[1], size[0], size[1]] + + objects_data.append(object_data) + + frames_data.append(objects_data) + + data['object_data'] = frames_data + bproc.writer.write_hdf5(str(path.absolute()), data, append_to_existing_output=False) + + def set_camera_intrinsics(self) -> None: + ''' + Set camera intrinsics from config. + Randomized depending on randomize_params_percent. This does not impact image resolution. + ''' + px, py, u0, v0, h, w , r = itemgetter('px', 'py', 'u0', 'v0', 'h', 'w', 'randomize_params_percent')(self.json_config['camera']) + r = r / 100.0 + randomize = lambda x: x + np.random.uniform(-x * r, x * r) + K = [ + [randomize(px), 0, randomize(u0)], + [0, randomize(py), randomize(v0)], + [0, 0, 1], + ] + bproc.camera.set_intrinsics_from_K_matrix(K, w, h) + + def create_distractors(self, scene_size: float) -> List[bproc.types.MeshObject]: + ''' + Add distractor objects + ''' + json_distractors = self.json_config['scene']['distractors'] + min_count, max_count = itemgetter('min_count', 'max_count')(json_distractors) + + custom_distractors_path, custom_distractors_proba = itemgetter('custom_distractors', 'custom_distractor_proba')(json_distractors) + count = np.random.randint(min_count, max_count + 1) + if custom_distractors_path is not None: + custom_count = np.sum(np.random.choice(2, size=count, replace=True, p=[1 - custom_distractors_proba, custom_distractors_proba])) + else: + custom_count = 0 + simple_count = count - custom_count + return self.create_simple_distractors(scene_size, simple_count) + self.create_custom_distractors(scene_size, custom_count) + + def create_custom_distractors(self, scene_size, count: int) -> List[bproc.types.MeshObject]: + json_distractors = self.json_config['scene']['distractors'] + min_size, max_size, pbr_noise = itemgetter('min_size_rel_scene', 'max_size_rel_scene', 'pbr_noise')(json_distractors) + custom_distractors_path = itemgetter('custom_distractors')(json_distractors) + if count == 0: + return [] + distractor_files = [] + assert Path(custom_distractors_path).exists(), f'Custom distractors folder {custom_distractors_path} does not exist' + for file in Path(custom_distractors_path).iterdir(): + if file.name.endswith(('.ply', '.obj')): + distractor_files.append(file) + assert len(distractor_files) > 0, f'Requested custom distractors from folder but {custom_distractors_path} did not find any!' + + chosen_files_list = np.random.choice(distractor_files, size=count, replace=True) + chosen_files_set = set(chosen_files_list) + path_to_object = {} + for path in chosen_files_set: + obj = bproc.loader.load_obj(str(path.absolute()))[0] + obj.set_location([100000.0, 100000.0, 10000.0]) + path_to_object[path] = obj + + chosen_distractors = [] + for path in chosen_files_list: + distractor = path_to_object[path].duplicate() + chosen_distractors.append(distractor) + object_size = object_bb_length(distractor) + rescale_size = np.random.uniform((scene_size * min_size) / object_size, (scene_size * max_size) / object_size) + distractor.set_scale([rescale_size, rescale_size, rescale_size]) + + def sample_pose(obj: bproc.types.MeshObject): + loc = np.random.uniform([-scene_size / 2, -scene_size / 2, -scene_size / 2], [scene_size / 2, scene_size / 2, scene_size / 2]) + obj.set_location(loc) + obj.set_rotation_euler(bproc.sampler.uniformSO3()) + + bproc.object.sample_poses( + chosen_distractors, + sample_pose_func=sample_pose, + objects_to_check_collisions=None + ) + randomize_pbr(chosen_distractors, pbr_noise) + return chosen_distractors + + def create_simple_distractors(self, scene_size: float, count: int) -> List[bproc.types.MeshObject]: + ''' + Add simple objects to the scene. + These objects have no class (no bounding box computed and does not appear in segmentation) + They are meant to add variation to the scene + Their position, texture and shape are randomized. + Their size is also randomized, in a range that is dependent on the scale of the room + The 3D models are simple primitives (cube, sphere, cone, etc.) and they are distorted through the displacement_max_amount_params + ''' + json_distractors = self.json_config['scene']['distractors'] + min_size, max_size = itemgetter('min_size_rel_scene', 'max_size_rel_scene')(json_distractors) + displacement_strength, pbr_noise = itemgetter('displacement_max_amount', 'pbr_noise')(json_distractors) + emissive_proba, emissive_min_strength, emissive_max_strength = itemgetter(*['emissive_' + s for s in ['prob', 'min_strength', 'max_strength']])(json_distractors) + if count == 0: + return [] + def sample_pose(obj: bproc.types.MeshObject): + loc = np.random.uniform([-scene_size / 2, -scene_size / 2, -scene_size / 2], [scene_size / 2, scene_size / 2, scene_size / 2]) + obj.set_location(loc) + obj.set_scale(np.random.uniform(scene_size * min_size, scene_size * max_size, size=3)) + obj.set_rotation_euler(bproc.sampler.uniformSO3()) + distractor_type = np.random.choice(['CUBE', 'CYLINDER', 'CONE', 'SPHERE', 'MONKEY'], size=count, replace=True) + distractors = [bproc.object.create_primitive(distractor_type[c]) for c in range(count)] + + bproc.object.sample_poses( + distractors, + sample_pose_func=sample_pose, + objects_to_check_collisions=None + ) + + for obj in distractors: + random_texture = np.random.choice(self.cc_textures) + obj.replace_materials(random_texture) + randomize_pbr(distractors, pbr_noise) + if displacement_strength > 0.0: + add_displacement(distractors, displacement_strength) + if emissive_proba > 0.0: + for distractor in distractors: + is_emissive = np.random.uniform(0.0, 1.0) < emissive_proba + if is_emissive: + for material in distractor.get_materials(): + emission_strength = np.random.uniform(emissive_min_strength, emissive_max_strength) + material.make_emissive(emission_strength) + return distractors + + def create_lights(self, scene_size: float, target_objects: List[bproc.types.MeshObject]) -> List[bproc.types.Light]: + ''' + Add lights to the environment. + Sample n lights between min_count and max_count: + For each light: + - Randomly choose whether it is a point light (uniform lighting) or a spot (lighting in cone, focused on random target object) + - Randomly set its intensity between min_intensity and max_intensity + - Randomly set its position + - Randomly set its color + ''' + light_json = self.json_config['scene']['lights'] + min_count, max_count = itemgetter('min_count', 'max_count')(light_json) + min_intensity, max_intensity = itemgetter('min_intensity', 'max_intensity')(light_json) + + count = np.random.randint(min_count, max_count + 1) + point_light_count = np.sum(np.random.choice(2, size=count, replace=True)) + spot_light_count = count - point_light_count + + lights = [] + def basic_light_sampling(light: bproc.types.Light) -> None: + loc = np.random.uniform([-scene_size / 2, -scene_size / 2, -scene_size / 2], [scene_size / 2, scene_size / 2, scene_size / 2]) + light.set_location(loc) + light.set_energy(np.random.uniform(min_intensity, max_intensity)) + light.set_color(np.random.uniform(0.5, 1.0, size=3)) + + for _ in range(point_light_count): + light = bproc.types.Light('POINT') + basic_light_sampling(light) + lights.append(light) + for _ in range(spot_light_count): + light = bproc.types.Light('SPOT') + basic_light_sampling(light) + # Point towards a target object + looked_at_obj = np.random.choice(len(target_objects)) + looked_at_obj: bproc.types.MeshObject = target_objects[looked_at_obj] + poi = point_in_bounding_box(looked_at_obj) + R = bproc.camera.rotation_from_forward_vec(poi - light.get_location()) + light.set_rotation_mat(R) + lights.append(light) + + return lights + + def create_target_objects(self) -> List[bproc.types.MeshObject]: + ''' + Sample targets objects (3D models that are provided by the user). These target objects have a class and their bounding box and pose may be computed. + N objects are sampled, between min_count and max_count. + If multiple_occurences is true, then a single object may appear multiple times in scene (and in the images) + For each object: + - Randomly set its pose in the room + - Randomly change its scale if scale_noise > 0.0 + Note that the target models should already be loaded before calling this function + ''' + json_objects = self.json_config['scene']['objects'] + min_count, max_count, replace = itemgetter('min_count', 'max_count', 'multiple_occurences')(json_objects) + scale_noise, displacement_amount, pbr_noise = itemgetter('scale_noise', 'displacement_max_amount', 'pbr_noise')(json_objects) + + object_keys = list(self.objects.keys()) + + if not replace: + max_count = min(max_count, len(object_keys)) + min_count = min(min_count, max_count) + count = np.random.randint(min_count, max_count + 1) + selected_key_indices = np.random.choice(len(object_keys), size=count, replace=replace) + + objects: List[bproc.types.MeshObject] = [self.objects[object_keys[index]].duplicate() for index in selected_key_indices] + for object in objects: + object.hide(False) + if scale_noise > 0.0: + random_scale = np.random.uniform(-scale_noise, scale_noise) + 1.0 + object.set_scale([random_scale, random_scale, random_scale]) # Uniform scaling + object.set_location([0.0, 0.0, 0.0]) + object.persist_transformation_into_mesh() + + if displacement_amount > 0.0: + add_displacement(objects, displacement_amount) + if pbr_noise > 0.0: + randomize_pbr(objects, pbr_noise) + + return objects + + def create_room(self, size: float) -> List[bproc.types.MeshObject]: + ''' + Create a basic square room, with size size. + The materials of the walls are randomly sampled + ''' + ground = bproc.object.create_primitive('PLANE') + ground.set_location([0, 0, -size / 2]) + ground.set_scale([size / 2, size / 2, 1]) + room_objects = [ground] + wall_data = [ + {'loc': [size / 2, 0, 0], 'rot': [0, np.pi / 2, 0]}, + {'loc': [-size / 2, 0, 0], 'rot': [0, np.pi / 2, 0]}, + {'loc': [0, size / 2, 0], 'rot': [np.pi / 2, 0, 0]}, + {'loc': [0, -size / 2, 0], 'rot': [np.pi / 2, 0, 0]}, + {'loc': [0, 0, size / 2], 'rot': [0.0, 0, 0]}, + ] + for w_data in wall_data: + wall = bproc.object.create_primitive('PLANE') + wall.set_location(w_data['loc']) + wall.set_rotation_euler(w_data['rot']) + wall.set_scale([size / 2, size / 2, 1]) + room_objects.append(wall) + + for obj in room_objects: + random_texture = np.random.choice(self.cc_textures) + obj.replace_materials(random_texture) + randomize_pbr(room_objects, 0.2) + return room_objects + + def create_scene(self): + ''' + Create a basic scene, a square room. + The size of the room is dependent on the size of the biggest object multiplied by a user supplied param. + Each wall has a random texture, sampled from cc0 materials. + Distractors are added randomly in the room. + Random lights are also placed. + If simulate physics is true, then objects are dropped on the ground. + When using simulate physics, some objects may leave the room (through weird collisions): they are deleted. + + ''' + objects = [] + + room_size = 0.0 + room_size_multiplier_min, room_size_multiplier_max = itemgetter('room_size_multiplier_min', 'room_size_multiplier_max')(self.json_config['scene']) + simulate_physics = self.json_config['scene']['simulate_physics'] + + assert room_size_multiplier_min >= 1.0, 'Room size multiplier should be more than one' + objects = self.create_target_objects() + + for object in objects: + room_size = max(object_bb_length(object), room_size) + size = room_size * np.random.uniform(room_size_multiplier_min, room_size_multiplier_max) + + room_objects = self.create_room(size) + + def sample_pose(obj: bproc.types.MeshObject): + loc = np.random.uniform([-size / 2, -size / 2, -size / 2], [size / 2, size / 2, size / 2]) + obj.set_location(loc) + obj.set_rotation_euler(bproc.sampler.uniformSO3()) + + bproc.object.sample_poses( + objects, + sample_pose_func=sample_pose, + objects_to_check_collisions=None + ) + + for object in objects: + object.persist_transformation_into_mesh() + + distractors = self.create_distractors(size) + + if simulate_physics: + for object in objects + distractors: + object.enable_rigidbody(True) + for object in room_objects: + object.enable_rigidbody(False, collision_shape='BOX') + + bproc.object.simulate_physics_and_fix_final_poses(min_simulation_time=3, max_simulation_time=3.5, check_object_interval=1) + for object in objects: + object.persist_transformation_into_mesh() + def filter_objects_outside_room(objects: List[bproc.types.MeshObject]) -> List[bproc.types.MeshObject]: + inside = [] + outside = [] + is_inside = lambda loc: np.all(np.logical_and(loc < size / 2, loc > -size / 2)) + for object in objects: + inside.append(object) if is_inside(object.get_location()) else outside.append(object) + for object in outside: + object.delete() + + print(f'Filtered {len(objects) - len(inside)} objects') + return inside + + objects = filter_objects_outside_room(objects) + distractors = filter_objects_outside_room(distractors) + lights = self.create_lights(size, objects) # do this after potential physics sampling so that spots are correctly oriented + scene = Scene(size, objects, distractors, room_objects, lights) + return scene + + def sample_camera_poses(self, scene: Scene) -> None: + ''' + Sample camera poses in a scene. + Camera are sampled as such: + - Select a target object: choose a point of interest in its bounding box + - Choose a camera position in a "hollow ball": the position should be at at least (cam_min_dist_rel * object_size) meters from the object + and at at most (cam_max_dist_rel * object_size). Note that the object size is computed from the largest diagonal of the axis aligned bounding box and is thus overestimated. + - Fix camera orientation so that it points towards the point of interest + ''' + dataset_config = self.json_config['dataset'] + images_per_scene = dataset_config['images_per_scene'] + cam_min_dist, cam_max_dist = itemgetter('cam_min_dist_rel', 'cam_max_dist_rel')(self.json_config['scene']['objects']) + hs = scene.size / 2 + bvh = bproc.object.create_bvh_tree_multi_objects(scene.target_objects + scene.distractors + scene.room_objects) + for frame in range(images_per_scene): + tries = 0 + good = False + while not good and tries < 1000: + object = scene.target_objects[np.random.choice(len(scene.target_objects))] + object_size = object_bb_length(object) + poi = point_in_bounding_box(object, bb_scale=1) + tries_distance = 0 + # To fight the potential bias of having many more far camera than near cameras, Generate multiple hypothesis for a single distance before rejecting it. + # This gives more chance to near camera locations, that have a higher risk of occlusion. + distance = np.random.uniform(cam_min_dist, cam_max_dist) + while not good and tries_distance < 100: + location = bproc.sampler.sphere(object.get_location(), distance * object_size, 'SURFACE') + rotation_matrix = bproc.camera.rotation_from_forward_vec(poi - location, inplane_rot=np.random.uniform(-0.7854, 0.7854)) + + cam = bpy.context.scene.camera.data + clip_start = cam.clip_start + focal_length = cam.lens / 1000.0 + + cam2world_matrix = bproc.math.build_transformation_mat(location, rotation_matrix) + if object in bproc.camera.visible_objects(cam2world_matrix, 10) and bproc.camera.perform_obstacle_in_view_check(cam2world_matrix, {'min': focal_length + clip_start}, bvh): + good = True + else: + good = False + tries += 1 + tries_distance += 1 + if tries >= 1000: + print('Warning! Could not correctly place camera, results may be wrong') + bproc.camera.add_camera_pose(cam2world_matrix) + def add_empty_images(self, count: int, scene: Scene): + ''' + Add empty images to the rendering queue. The camera poses for the empty images are sampled from already existing poses (where the target object should be visible) + The number of empty images cannot thus be more than the number of images with target objects present. + ''' + num_frames = bproc.utility.num_frames() + assert count <= num_frames, 'Number of empty images cannot be greater than the number of images with objects' + empty_frame_chosen_indices = np.random.choice(num_frames, size=count, replace=False) + for object in scene.target_objects: + location = object.get_location() + for i in range(num_frames): + object.set_location(location, frame=i) + for i, f in enumerate(empty_frame_chosen_indices): + bproc.camera.add_camera_pose(bproc.camera.get_camera_pose(f)) + for object in scene.target_objects: + object.set_location([10000.0, 10000.0, 10000.0], frame=num_frames + i) + def run(self): + ''' + Generate the scenes and save the results. + ''' + + save_path = Path(self.json_config['dataset']['save_path']) + save_path.mkdir(exist_ok=True) + self.init() + for i in range(self.scene_count): + bproc.utility.reset_keyframes() + self.set_camera_intrinsics() + scene = self.create_scene() + while len(scene.target_objects) == 0: + scene.cleanup() + scene = self.create_scene() + self.sample_camera_poses(scene) + self.add_empty_images(self.json_config['dataset']['empty_images_per_scene'], scene) + # bpy.context.scene.render.use_persistent_data = False + data = self.render() + path = save_path / str(self.scene_index + i) + path.mkdir(exist_ok=True) + self.save_data(path, scene.target_objects, data) + scene.cleanup() + +if __name__ == '__main__': + + parser = argparse.ArgumentParser() + parser.add_argument('--config', required=True, type=str, help='Path to the JSON configuration file for the dataset generation script') + parser.add_argument('--scene-index', required=True, type=int, help='Index of the first scene to generate') + parser.add_argument('--scene-count', required=True, type=int, help='Number of scenes to render') + args = parser.parse_args() + + generator = Generator(args.config, args.scene_index, args.scene_count) + # RendererUtility.set_render_devices(use_only_cpu=True) + bproc.clean_up(clean_up_camera=True) + bproc.init() # Works if you have a GPU + + generator.run() + + diff --git a/script/dataset_generator/viz_bb.py b/script/dataset_generator/viz_bb.py new file mode 100644 index 0000000000..ff1f2eca97 --- /dev/null +++ b/script/dataset_generator/viz_bb.py @@ -0,0 +1,35 @@ +import h5py +from pathlib import Path +import numpy as np +import json +import matplotlib.pyplot as plt +import matplotlib.patches as patches + +import argparse + +def show(file: h5py.File): + fig = plt.figure() + rgb = np.array(file['colors']) + text = np.array(f["object_data"]).tostring() + print(text) + if len(text) > 0: + object_data = json.loads(text) + for object in object_data: + if 'bounding_box' in object: + bb = object['bounding_box'] + rect = patches.Rectangle((bb[0], bb[1]), bb[2], bb[3], linewidth=1, edgecolor='r', facecolor='none') + plt.gca().add_patch(rect) + plt.imshow(rgb) + plt.show() + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument('--path', type=str, help='Path to a folder containing hdf5 files') + args = parser.parse_args() + folder = Path(args.path) + assert folder.exists() + for file in folder.iterdir(): + if file.name.endswith('.hdf5'): + with h5py.File(str(file)) as f: + print(f'Visualizing file {f}') + show(f)