|
| 1 | +# ============================================================================================================== |
| 2 | +# The following code demonstrates the usage of kaolin's "Structured Point Cloud (SPC)" 3d convolution |
| 3 | +# functionality. Note that this sample does NOT demonstrate how to use Kaolin's Pytorch 3d convolution layers. |
| 4 | +# Rather, 3d convolutions are used to 'filter' color data useful for level-of-detail management during |
| 5 | +# rendering. This can be thought of as the 3d analog of generating a 2d mipmap. |
| 6 | +# |
| 7 | +# Note this is a low level interface: practitioners are encouraged to visit the references below. |
| 8 | +# ============================================================================================================== |
| 9 | +# See also: |
| 10 | +# |
| 11 | +# - Code: kaolin.ops.spc.SPC |
| 12 | +# https://kaolin.readthedocs.io/en/latest/modules/kaolin.rep.html?highlight=SPC#kaolin.rep.Spc |
| 13 | +# |
| 14 | +# - Tutorial: Understanding Structured Point Clouds (SPCs) |
| 15 | +# https://github.com/NVIDIAGameWorks/kaolin/blob/master/examples/tutorial/understanding_spcs_tutorial.ipynb |
| 16 | +# |
| 17 | +# - Documentation: Structured Point Clouds |
| 18 | +# https://kaolin.readthedocs.io/en/latest/modules/kaolin.ops.spc.html?highlight=spc#kaolin-ops-spc |
| 19 | +# ============================================================================================================== |
| 20 | + |
| 21 | +import torch |
| 22 | +import kaolin |
| 23 | + |
| 24 | +# The following function applies a series of SPC convolutions to encode the entire hierarchy into a single tensor. |
| 25 | +# Each step applies a convolution on the "highest" level of the SPC with some averaging kernel. |
| 26 | +# Therefore, each step locally averages the "colored point hierarchy", where each "colored point" |
| 27 | +# corresponds to a point in the SPC point hierarchy. |
| 28 | +# For a description of inputs 'octree', 'point_hierachy', 'level', 'pyramids', and 'exsum', as well a |
| 29 | +# detailed description of the mathematics of SPC convolutions, see: |
| 30 | +# https://kaolin.readthedocs.io/en/latest/modules/kaolin.ops.spc.html?highlight=SPC#kaolin.ops.spc.Conv3d |
| 31 | +# The input 'color' is Pytorch tensor containing color features corresponding to some 'level' of the hierarchy. |
| 32 | +def encode(colors, octree, point_hierachy, pyramids, exsum, level): |
| 33 | + |
| 34 | + # SPC convolutions are characterized by a set of 'kernel vectors' and corresponding 'weights'. |
| 35 | + |
| 36 | + # kernel_vectors is the "kernel support" - |
| 37 | + # a listing of 3D coordinates where the weights of the convolution are non-null, |
| 38 | + # in this case a it's a simple dense 2x2x2 grid. |
| 39 | + kernel_vectors = torch.tensor([[0,0,0],[0,0,1],[0,1,0],[0,1,1], |
| 40 | + [1,0,0],[1,0,1],[1,1,0],[1,1,1]], |
| 41 | + dtype=torch.short, device='cuda') |
| 42 | + |
| 43 | + # The weights specify how the input colors 'under' the kernel are mapped to an output color, |
| 44 | + # in this case a simple average. |
| 45 | + weights = torch.diag(torch.tensor([0.125, 0.125, 0.125, 0.125], |
| 46 | + dtype=torch.float32, device='cuda')) # Tensor of (4, 4) |
| 47 | + weights = weights.repeat(8,1,1).contiguous() # Tensor of (8, 4, 4) |
| 48 | + |
| 49 | + # Storage for the output color hierarchy is allocated. This includes points at the bottom of the hierarchy, |
| 50 | + # as well as intermediate SPC levels (which may store different features) |
| 51 | + color_hierarchy = torch.empty((pyramids[0,1,level+1],4), dtype=torch.float32, device='cuda') |
| 52 | + # Copy the input colors into the highest level of color_hierarchy. pyramids is used here to select all leaf |
| 53 | + # points at the bottom of the hierarchy and set them to some pre-sampled random color. Points at intermediate |
| 54 | + # levels are left empty. |
| 55 | + color_hierarchy[pyramids[0,1,level]:pyramids[0,1,level+1]] = colors[:] |
| 56 | + |
| 57 | + # Performs the 3d convolutions in a bottom up fashion to 'filter' colors from the previous level |
| 58 | + for l in range(level,0,-1): |
| 59 | + |
| 60 | + # Apply the 3d convolution. Note that jump=1 means the inputs and outputs differ by 1 level |
| 61 | + # This is analogous to to a stride=2 in grid based convolutions |
| 62 | + colors, ll = kaolin.ops.spc.conv3d(octree, |
| 63 | + point_hierachy, |
| 64 | + l, |
| 65 | + pyramids, |
| 66 | + exsum, |
| 67 | + colors, |
| 68 | + weights, |
| 69 | + kernel_vectors, |
| 70 | + jump=1) |
| 71 | + # Copy the output colors into the color hierarchy |
| 72 | + color_hierarchy[pyramids[0,1,ll]:pyramids[0,1,l]] = colors[:] |
| 73 | + print(f"At level {l}, output feature shape is:\n{colors.shape}") |
| 74 | + |
| 75 | + # Normalize the colors. |
| 76 | + color_hierarchy /= color_hierarchy[:,3:] |
| 77 | + # Normalization is needed here due to the sparse nature of SPCs. When a point under a kernel is not |
| 78 | + # present in the point hierarchy, the corresponding data is treated as zeros. Normalization is equivalent |
| 79 | + # to having the filter weights sum to one. This may not always be desirable, e.g. alpha blending. |
| 80 | + |
| 81 | + return color_hierarchy |
| 82 | + |
| 83 | + |
| 84 | +# Highest level of SPC |
| 85 | +level = 3 |
| 86 | + |
| 87 | +# Construct a fully occupied Structured Point Cloud with N levels of detail |
| 88 | +# See https://kaolin.readthedocs.io/en/latest/modules/kaolin.rep.html?highlight=SPC#kaolin.rep.Spc |
| 89 | +spc = kaolin.rep.Spc.make_dense(level, device='cuda') |
| 90 | + |
| 91 | +# In kaolin, operations are batched by default, the spc object above contains a single item batch, hence [0] |
| 92 | +num_points_last_lod = spc.num_points(level)[0] |
| 93 | + |
| 94 | +# Create tensor of random colors for all points in the highest level of detail |
| 95 | +colors = torch.rand((num_points_last_lod, 4), dtype=torch.float32, device='cuda') |
| 96 | +# Set 4th color channel to one for subsequent color normalization |
| 97 | +colors[:,3] = 1 |
| 98 | + |
| 99 | +print(f'Input SPC features: {colors.shape}') |
| 100 | + |
| 101 | +# Encode color hierarchy by invoking a series of convolutions, until we end up with a single tensor. |
| 102 | +color_hierarchy = encode(colors=colors, |
| 103 | + octree=spc.octrees, |
| 104 | + point_hierachy=spc.point_hierarchies, |
| 105 | + pyramids=spc.pyramids, |
| 106 | + exsum=spc.exsum, |
| 107 | + level=level) |
| 108 | + |
| 109 | +# Print root node color |
| 110 | +print(f'Final encoded value (average of averages):') |
| 111 | +print(color_hierarchy[0]) |
| 112 | +# This will be the average of averages, over the entire spc hierarchy. Since the initial random colors |
| 113 | +# came from a uniform distribution, this should approach [0.5, 0.5, 0.5, 1.0] as 'level' increases |
0 commit comments