diff --git a/docs/tutorials/fit_simple_neural_radiance_field.ipynb b/docs/tutorials/fit_simple_neural_radiance_field.ipynb index d74bbc6b6..6490c5270 100644 --- a/docs/tutorials/fit_simple_neural_radiance_field.ipynb +++ b/docs/tutorials/fit_simple_neural_radiance_field.ipynb @@ -100,7 +100,7 @@ "from pytorch3d.transforms import so3_exp_map\n", "from pytorch3d.renderer import (\n", " FoVPerspectiveCameras, \n", - " NDCGridRaysampler,\n", + " NDCMultinomialRaysampler,\n", " MonteCarloRaysampler,\n", " EmissionAbsorptionRaymarcher,\n", " ImplicitRenderer,\n", @@ -186,7 +186,7 @@ "The renderer is composed of a *raymarcher* and a *raysampler*.\n", "- The *raysampler* is responsible for emitting rays from image pixels and sampling the points along them. Here, we use two different raysamplers:\n", " - `MonteCarloRaysampler` is used to generate rays from a random subset of pixels of the image plane. The random subsampling of pixels is carried out during **training** to decrease the memory consumption of the implicit model.\n", - " - `NDCGridRaysampler` which follows the standard PyTorch3D coordinate grid convention (+X from right to left; +Y from bottom to top; +Z away from the user). In combination with the implicit model of the scene, `NDCGridRaysampler` consumes a large amount of memory and, hence, is only used for visualizing the results of the training at **test** time.\n", + " - `NDCMultinomialRaysampler` which follows the standard PyTorch3D coordinate grid convention (+X from right to left; +Y from bottom to top; +Z away from the user). In combination with the implicit model of the scene, `NDCMultinomialRaysampler` consumes a large amount of memory and, hence, is only used for visualizing the results of the training at **test** time.\n", "- The *raymarcher* takes the densities and colors sampled along each ray and renders each ray into a color and an opacity value of the ray's source pixel. Here we use the `EmissionAbsorptionRaymarcher` which implements the standard Emission-Absorption raymarching algorithm." ] }, @@ -211,10 +211,10 @@ "\n", "# 1) Instantiate the raysamplers.\n", "\n", - "# Here, NDCGridRaysampler generates a rectangular image\n", + "# Here, NDCMultinomialRaysampler generates a rectangular image\n", "# grid of rays whose coordinates follow the PyTorch3D\n", "# coordinate conventions.\n", - "raysampler_grid = NDCGridRaysampler(\n", + "raysampler_grid = NDCMultinomialRaysampler(\n", " image_height=render_size,\n", " image_width=render_size,\n", " n_pts_per_ray=128,\n", @@ -844,7 +844,7 @@ " fov=target_cameras.fov[0],\n", " device=device,\n", " )\n", - " # Note that we again render with `NDCGridRaySampler`\n", + " # Note that we again render with `NDCMultinomialRaysampler`\n", " # and the batched_forward function of neural_radiance_field.\n", " frames.append(\n", " renderer_grid(\n", @@ -867,7 +867,7 @@ "source": [ "## 6. Conclusion\n", "\n", - "In this tutorial, we have shown how to optimize an implicit representation of a scene such that the renders of the scene from known viewpoints match the observed images for each viewpoint. The rendering was carried out using the PyTorch3D's implicit function renderer composed of either a `MonteCarloRaysampler` or `NDCGridRaysampler`, and an `EmissionAbsorptionRaymarcher`." + "In this tutorial, we have shown how to optimize an implicit representation of a scene such that the renders of the scene from known viewpoints match the observed images for each viewpoint. The rendering was carried out using the PyTorch3D's implicit function renderer composed of either a `MonteCarloRaysampler` or `NDCMultinomialRaysampler`, and an `EmissionAbsorptionRaymarcher`." ] } ], diff --git a/docs/tutorials/fit_textured_volume.ipynb b/docs/tutorials/fit_textured_volume.ipynb index 6c0938b05..bae176bf4 100644 --- a/docs/tutorials/fit_textured_volume.ipynb +++ b/docs/tutorials/fit_textured_volume.ipynb @@ -89,7 +89,7 @@ "from pytorch3d.renderer import (\n", " FoVPerspectiveCameras, \n", " VolumeRenderer,\n", - " NDCGridRaysampler,\n", + " NDCMultinomialRaysampler,\n", " EmissionAbsorptionRaymarcher\n", ")\n", "from pytorch3d.transforms import so3_exp_map\n", @@ -164,7 +164,7 @@ "The following initializes a volumetric renderer that emits a ray from each pixel of a target image and samples a set of uniformly-spaced points along the ray. At each ray-point, the corresponding density and color value is obtained by querying the corresponding location in the volumetric model of the scene (the model is described & instantiated in a later cell).\n", "\n", "The renderer is composed of a *raymarcher* and a *raysampler*.\n", - "- The *raysampler* is responsible for emitting rays from image pixels and sampling the points along them. Here, we use the `NDCGridRaysampler` which follows the standard PyTorch3D coordinate grid convention (+X from right to left; +Y from bottom to top; +Z away from the user).\n", + "- The *raysampler* is responsible for emitting rays from image pixels and sampling the points along them. Here, we use the `NDCMultinomialRaysampler` which follows the standard PyTorch3D coordinate grid convention (+X from right to left; +Y from bottom to top; +Z away from the user).\n", "- The *raymarcher* takes the densities and colors sampled along each ray and renders each ray into a color and an opacity value of the ray's source pixel. Here we use the `EmissionAbsorptionRaymarcher` which implements the standard Emission-Absorption raymarching algorithm." ] }, @@ -186,14 +186,14 @@ "volume_extent_world = 3.0\n", "\n", "# 1) Instantiate the raysampler.\n", - "# Here, NDCGridRaysampler generates a rectangular image\n", + "# Here, NDCMultinomialRaysampler generates a rectangular image\n", "# grid of rays whose coordinates follow the PyTorch3D\n", "# coordinate conventions.\n", "# Since we use a volume of size 128^3, we sample n_pts_per_ray=150,\n", "# which roughly corresponds to a one ray-point per voxel.\n", "# We further set the min_depth=0.1 since there is no surface within\n", "# 0.1 units of any camera plane.\n", - "raysampler = NDCGridRaysampler(\n", + "raysampler = NDCMultinomialRaysampler(\n", " image_width=render_size,\n", " image_height=render_size,\n", " n_pts_per_ray=150,\n", @@ -462,7 +462,7 @@ "source": [ "## 6. Conclusion\n", "\n", - "In this tutorial, we have shown how to optimize a 3D volumetric representation of a scene such that the renders of the volume from known viewpoints match the observed images for each viewpoint. The rendering was carried out using the PyTorch3D's volumetric renderer composed of an `NDCGridRaysampler` and an `EmissionAbsorptionRaymarcher`." + "In this tutorial, we have shown how to optimize a 3D volumetric representation of a scene such that the renders of the volume from known viewpoints match the observed images for each viewpoint. The rendering was carried out using the PyTorch3D's volumetric renderer composed of an `NDCMultinomialRaysampler` and an `EmissionAbsorptionRaymarcher`." ] } ], diff --git a/projects/nerf/nerf/raysampler.py b/projects/nerf/nerf/raysampler.py index c1f5453c4..a6b3c2705 100644 --- a/projects/nerf/nerf/raysampler.py +++ b/projects/nerf/nerf/raysampler.py @@ -8,7 +8,7 @@ from typing import List import torch -from pytorch3d.renderer import MonteCarloRaysampler, NDCGridRaysampler, RayBundle +from pytorch3d.renderer import MonteCarloRaysampler, NDCMultinomialRaysampler, RayBundle from pytorch3d.renderer.cameras import CamerasBase from pytorch3d.renderer.implicit.sample_pdf import sample_pdf @@ -150,7 +150,7 @@ def __init__( self._stratified_test = stratified_test # Initialize the grid ray sampler. - self._grid_raysampler = NDCGridRaysampler( + self._grid_raysampler = NDCMultinomialRaysampler( image_width=image_width, image_height=image_height, n_pts_per_ray=n_pts_per_ray, diff --git a/tests/test_camera_pixels.py b/tests/test_camera_pixels.py index 98f2621ec..72feaeb21 100644 --- a/tests/test_camera_pixels.py +++ b/tests/test_camera_pixels.py @@ -10,7 +10,7 @@ from common_testing import TestCaseMixin from pytorch3d.renderer import ( MeshRasterizer, - NDCGridRaysampler, + NDCMultinomialRaysampler, PerspectiveCameras, PointsRasterizationSettings, PointsRasterizer, @@ -172,7 +172,7 @@ def test_pointcloud(self): def test_raysampler(self): data = _CommonData() - gridsampler = NDCGridRaysampler( + gridsampler = NDCMultinomialRaysampler( image_width=data.W, image_height=data.H, n_pts_per_ray=2, diff --git a/tests/test_render_implicit.py b/tests/test_render_implicit.py index ca8c0829b..60ca2d236 100644 --- a/tests/test_render_implicit.py +++ b/tests/test_render_implicit.py @@ -12,13 +12,13 @@ from pytorch3d.renderer import ( BlendParams, EmissionAbsorptionRaymarcher, - GridRaysampler, ImplicitRenderer, Materials, MeshRasterizer, MeshRenderer, MonteCarloRaysampler, - NDCGridRaysampler, + MultinomialRaysampler, + NDCMultinomialRaysampler, PointLights, RasterizationSettings, RayBundle, @@ -142,7 +142,7 @@ def test_input_types(self): # init a trivial renderer renderer = ImplicitRenderer( - raysampler=NDCGridRaysampler( + raysampler=NDCMultinomialRaysampler( image_width=100, image_height=100, n_pts_per_ray=10, @@ -180,7 +180,7 @@ def _compare_with_meshes_renderer( sphere_centroid.requires_grad = True # init the grid raysampler with the ndc grid - raysampler = NDCGridRaysampler( + raysampler = NDCMultinomialRaysampler( image_width=image_size[1], image_height=image_size[0], n_pts_per_ray=256, @@ -355,7 +355,7 @@ def _rotating_gif(self, image_size, n_frames=50, fps=15, sphere_diameter=0.5): cameras = init_cameras(n_frames, image_size=image_size) # init the grid raysampler - raysampler = GridRaysampler( + raysampler = MultinomialRaysampler( min_x=0.5, max_x=image_size[1] - 0.5, min_y=0.5, diff --git a/tests/test_render_volumes.py b/tests/test_render_volumes.py index f546cad39..76b015b80 100644 --- a/tests/test_render_volumes.py +++ b/tests/test_render_volumes.py @@ -15,9 +15,9 @@ AbsorptionOnlyRaymarcher, AlphaCompositor, EmissionAbsorptionRaymarcher, - GridRaysampler, MonteCarloRaysampler, - NDCGridRaysampler, + MultinomialRaysampler, + NDCMultinomialRaysampler, PerspectiveCameras, PointsRasterizationSettings, PointsRasterizer, @@ -228,7 +228,7 @@ def test_input_types(self, batch_size: int = 10): with self.assertRaises(ValueError): VolumeRenderer(raysampler=bad_raysampler, raymarcher=bad_raymarcher) - raysampler = NDCGridRaysampler( + raysampler = NDCMultinomialRaysampler( image_width=100, image_height=100, n_pts_per_ray=10, @@ -339,7 +339,7 @@ def test_compare_with_pointclouds_renderer( # init the grid raysampler with the ndc grid coord_range = 1.0 half_pix_size = coord_range / max(*image_size) - raysampler = NDCGridRaysampler( + raysampler = NDCMultinomialRaysampler( image_width=image_size[1], image_height=image_size[0], n_pts_per_ray=256, @@ -431,7 +431,7 @@ def test_monte_carlo_rendering( ): """ Tests that rendering with the MonteCarloRaysampler matches the - rendering with GridRaysampler sampled at the corresponding + rendering with MultinomialRaysampler sampled at the corresponding MonteCarlo locations. """ volumes = init_boundary_volume( @@ -442,7 +442,7 @@ def test_monte_carlo_rendering( cameras = init_cameras(n_frames, image_size=image_size) # init the grid raysampler - raysampler_grid = GridRaysampler( + raysampler_multinomial = MultinomialRaysampler( min_x=0.5, max_x=image_size[1] - 0.5, min_y=0.5, @@ -475,11 +475,11 @@ def test_monte_carlo_rendering( (images_opacities_grid, ray_bundle_grid), ) = [ VolumeRenderer( - raysampler=raysampler_grid, + raysampler=raysampler_multinomial, raymarcher=raymarcher, sample_mode="bilinear", )(cameras=cameras, volumes=volumes) - for raysampler in (raysampler_mc, raysampler_grid) + for raysampler in (raysampler_mc, raysampler_multinomial) ] # convert the mc sampling locations to [-1, 1] @@ -523,7 +523,7 @@ def _rotating_gif( cameras = init_cameras(n_frames, image_size=image_size) # init the grid raysampler - raysampler = GridRaysampler( + raysampler = MultinomialRaysampler( min_x=0.5, max_x=image_size[1] - 0.5, min_y=0.5, @@ -614,7 +614,7 @@ def test_rotating_cube_volume_render(self): volumes.features().requires_grad = True volumes.densities().requires_grad = True - raysampler = GridRaysampler( + raysampler = MultinomialRaysampler( min_x=0.5, max_x=image_size[1] - 0.5, min_y=0.5,