Pipelines
colored_icp_registration.py
1# ----------------------------------------------------------------------------
2# - Open3D: www.open3d.org -
3# ----------------------------------------------------------------------------
4# Copyright (c) 2018-2023 www.open3d.org
5# SPDX-License-Identifier: MIT
6# ----------------------------------------------------------------------------
7"""ICP variant that uses both geometry and color for registration"""
8
9import open3d as o3d
10import numpy as np
11import copy
12
13
14def draw_registration_result(source, target, transformation):
15 source_temp = copy.deepcopy(source)
16 source_temp.transform(transformation)
17 o3d.visualization.draw([source_temp, target])
18
19
20print("Load two point clouds and show initial pose ...")
21ply_data = o3d.data.DemoColoredICPPointClouds()
22source = o3d.io.read_point_cloud(ply_data.paths[0])
23target = o3d.io.read_point_cloud(ply_data.paths[1])
24
25if __name__ == "__main__":
26 # Draw initial alignment.
27 current_transformation = np.identity(4)
28 # draw_registration_result(source, target, current_transformation)
29 print(current_transformation)
30
31 # Colored pointcloud registration.
32 # This is implementation of following paper:
33 # J. Park, Q.-Y. Zhou, V. Koltun,
34 # Colored Point Cloud Registration Revisited, ICCV 2017.
35 voxel_radius = [0.04, 0.02, 0.01]
36 max_iter = [50, 30, 14]
37 current_transformation = np.identity(4)
38 print("Colored point cloud registration ...\n")
39 for scale in range(3):
40 iter = max_iter[scale]
41 radius = voxel_radius[scale]
42 print([iter, radius, scale])
43
44 print("1. Downsample with a voxel size %.2f" % radius)
45 source_down = source.voxel_down_sample(radius)
46 target_down = target.voxel_down_sample(radius)
47
48 print("2. Estimate normal")
49 source_down.estimate_normals(
50 o3d.geometry.KDTreeSearchParamHybrid(radius=radius * 2, max_nn=30))
51 target_down.estimate_normals(
52 o3d.geometry.KDTreeSearchParamHybrid(radius=radius * 2, max_nn=30))
53
54 print("3. Applying colored point cloud registration")
55 result_icp = o3d.pipelines.registration.registration_colored_icp(
56 source_down, target_down, radius, current_transformation,
57 o3d.pipelines.registration.TransformationEstimationForColoredICP(),
58 o3d.pipelines.registration.ICPConvergenceCriteria(
59 relative_fitness=1e-6, relative_rmse=1e-6, max_iteration=iter))
60 current_transformation = result_icp.transformation
61 print(result_icp, "\n")
62 # draw_registration_result(source, target, result_icp.transformation)
63 print(current_transformation)
icp_registration.py
1# ----------------------------------------------------------------------------
2# - Open3D: www.open3d.org -
3# ----------------------------------------------------------------------------
4# Copyright (c) 2018-2023 www.open3d.org
5# SPDX-License-Identifier: MIT
6# ----------------------------------------------------------------------------
7"""ICP (Iterative Closest Point) registration algorithm"""
8
9import open3d as o3d
10import numpy as np
11import copy
12
13
14def draw_registration_result(source, target, transformation):
15 source_temp = copy.deepcopy(source)
16 target_temp = copy.deepcopy(target)
17 source_temp.paint_uniform_color([1, 0.706, 0])
18 target_temp.paint_uniform_color([0, 0.651, 0.929])
19 source_temp.transform(transformation)
20 o3d.visualization.draw([source_temp, target_temp])
21
22
23def point_to_point_icp(source, target, threshold, trans_init):
24 print("Apply point-to-point ICP")
25 reg_p2p = o3d.pipelines.registration.registration_icp(
26 source, target, threshold, trans_init,
27 o3d.pipelines.registration.TransformationEstimationPointToPoint())
28 print(reg_p2p)
29 print("Transformation is:")
30 print(reg_p2p.transformation, "\n")
31 draw_registration_result(source, target, reg_p2p.transformation)
32
33
34def point_to_plane_icp(source, target, threshold, trans_init):
35 print("Apply point-to-plane ICP")
36 reg_p2l = o3d.pipelines.registration.registration_icp(
37 source, target, threshold, trans_init,
38 o3d.pipelines.registration.TransformationEstimationPointToPlane())
39 print(reg_p2l)
40 print("Transformation is:")
41 print(reg_p2l.transformation, "\n")
42 draw_registration_result(source, target, reg_p2l.transformation)
43
44
45if __name__ == "__main__":
46 pcd_data = o3d.data.DemoICPPointClouds()
47 source = o3d.io.read_point_cloud(pcd_data.paths[0])
48 target = o3d.io.read_point_cloud(pcd_data.paths[1])
49 threshold = 0.02
50 trans_init = np.asarray([[0.862, 0.011, -0.507, 0.5],
51 [-0.139, 0.967, -0.215, 0.7],
52 [0.487, 0.255, 0.835, -1.4], [0.0, 0.0, 0.0, 1.0]])
53 draw_registration_result(source, target, trans_init)
54
55 print("Initial alignment")
56 evaluation = o3d.pipelines.registration.evaluate_registration(
57 source, target, threshold, trans_init)
58 print(evaluation, "\n")
59
60 point_to_point_icp(source, target, threshold, trans_init)
61 point_to_plane_icp(source, target, threshold, trans_init)
multiway_registration.py
1# ----------------------------------------------------------------------------
2# - Open3D: www.open3d.org -
3# ----------------------------------------------------------------------------
4# Copyright (c) 2018-2023 www.open3d.org
5# SPDX-License-Identifier: MIT
6# ----------------------------------------------------------------------------
7"""Align multiple pieces of geometry in a global space"""
8
9import open3d as o3d
10import numpy as np
11
12
13def load_point_clouds(voxel_size=0.0):
14 pcd_data = o3d.data.DemoICPPointClouds()
15 pcds = []
16 for i in range(3):
17 pcd = o3d.io.read_point_cloud(pcd_data.paths[i])
18 pcd_down = pcd.voxel_down_sample(voxel_size=voxel_size)
19 pcds.append(pcd_down)
20 return pcds
21
22
23def pairwise_registration(source, target, max_correspondence_distance_coarse,
24 max_correspondence_distance_fine):
25 print("Apply point-to-plane ICP")
26 icp_coarse = o3d.pipelines.registration.registration_icp(
27 source, target, max_correspondence_distance_coarse, np.identity(4),
28 o3d.pipelines.registration.TransformationEstimationPointToPlane())
29 icp_fine = o3d.pipelines.registration.registration_icp(
30 source, target, max_correspondence_distance_fine,
31 icp_coarse.transformation,
32 o3d.pipelines.registration.TransformationEstimationPointToPlane())
33 transformation_icp = icp_fine.transformation
34 information_icp = o3d.pipelines.registration.get_information_matrix_from_point_clouds(
35 source, target, max_correspondence_distance_fine,
36 icp_fine.transformation)
37 return transformation_icp, information_icp
38
39
40def full_registration(pcds, max_correspondence_distance_coarse,
41 max_correspondence_distance_fine):
42 pose_graph = o3d.pipelines.registration.PoseGraph()
43 odometry = np.identity(4)
44 pose_graph.nodes.append(o3d.pipelines.registration.PoseGraphNode(odometry))
45 n_pcds = len(pcds)
46 for source_id in range(n_pcds):
47 for target_id in range(source_id + 1, n_pcds):
48 transformation_icp, information_icp = pairwise_registration(
49 pcds[source_id], pcds[target_id],
50 max_correspondence_distance_coarse,
51 max_correspondence_distance_fine)
52 print("Build o3d.pipelines.registration.PoseGraph")
53 if target_id == source_id + 1: # odometry case
54 odometry = np.dot(transformation_icp, odometry)
55 pose_graph.nodes.append(
56 o3d.pipelines.registration.PoseGraphNode(
57 np.linalg.inv(odometry)))
58 pose_graph.edges.append(
59 o3d.pipelines.registration.PoseGraphEdge(source_id,
60 target_id,
61 transformation_icp,
62 information_icp,
63 uncertain=False))
64 else: # loop closure case
65 pose_graph.edges.append(
66 o3d.pipelines.registration.PoseGraphEdge(source_id,
67 target_id,
68 transformation_icp,
69 information_icp,
70 uncertain=True))
71 return pose_graph
72
73
74if __name__ == "__main__":
75 voxel_size = 0.02
76 pcds_down = load_point_clouds(voxel_size)
77 o3d.visualization.draw(pcds_down)
78
79 print("Full registration ...")
80 max_correspondence_distance_coarse = voxel_size * 15
81 max_correspondence_distance_fine = voxel_size * 1.5
82 with o3d.utility.VerbosityContextManager(
83 o3d.utility.VerbosityLevel.Debug) as cm:
84 pose_graph = full_registration(pcds_down,
85 max_correspondence_distance_coarse,
86 max_correspondence_distance_fine)
87
88 print("Optimizing PoseGraph ...")
89 option = o3d.pipelines.registration.GlobalOptimizationOption(
90 max_correspondence_distance=max_correspondence_distance_fine,
91 edge_prune_threshold=0.25,
92 reference_node=0)
93 with o3d.utility.VerbosityContextManager(
94 o3d.utility.VerbosityLevel.Debug) as cm:
95 o3d.pipelines.registration.global_optimization(
96 pose_graph,
97 o3d.pipelines.registration.GlobalOptimizationLevenbergMarquardt(),
98 o3d.pipelines.registration.GlobalOptimizationConvergenceCriteria(),
99 option)
100
101 print("Transform points and display")
102 for point_id in range(len(pcds_down)):
103 print(pose_graph.nodes[point_id].pose)
104 pcds_down[point_id].transform(pose_graph.nodes[point_id].pose)
105 o3d.visualization.draw(pcds_down)
pose_graph_optimization.py
1# ----------------------------------------------------------------------------
2# - Open3D: www.open3d.org -
3# ----------------------------------------------------------------------------
4# Copyright (c) 2018-2023 www.open3d.org
5# SPDX-License-Identifier: MIT
6# ----------------------------------------------------------------------------
7
8import open3d as o3d
9import numpy as np
10import os
11
12if __name__ == "__main__":
13
14 o3d.utility.set_verbosity_level(o3d.utility.VerbosityLevel.Debug)
15
16 print("")
17 print(
18 "Parameters for o3d.pipelines.registration.PoseGraph optimization ...")
19 method = o3d.pipelines.registration.GlobalOptimizationLevenbergMarquardt()
20 criteria = o3d.pipelines.registration.GlobalOptimizationConvergenceCriteria(
21 )
22 option = o3d.pipelines.registration.GlobalOptimizationOption()
23 print("")
24 print(method)
25 print(criteria)
26 print(option)
27 print("")
28
29 print(
30 "Optimizing Fragment o3d.pipelines.registration.PoseGraph using open3d ..."
31 )
32
33 pose_graph_data = o3d.data.DemoPoseGraphOptimization()
34 pose_graph_fragment = o3d.io.read_pose_graph(
35 pose_graph_data.pose_graph_fragment_path)
36 print(pose_graph_fragment)
37 o3d.pipelines.registration.global_optimization(pose_graph_fragment, method,
38 criteria, option)
39 o3d.io.write_pose_graph(
40 os.path.join('pose_graph_example_fragment_optimized.json'),
41 pose_graph_fragment)
42 print("")
43
44 print(
45 "Optimizing Global o3d.pipelines.registration.PoseGraph using open3d ..."
46 )
47 pose_graph_global = o3d.io.read_pose_graph(
48 pose_graph_data.pose_graph_global_path)
49 print(pose_graph_global)
50 o3d.pipelines.registration.global_optimization(pose_graph_global, method,
51 criteria, option)
52 o3d.io.write_pose_graph(
53 os.path.join('pose_graph_example_global_optimized.json'),
54 pose_graph_global)
registration_fgr.py
1# ----------------------------------------------------------------------------
2# - Open3D: www.open3d.org -
3# ----------------------------------------------------------------------------
4# Copyright (c) 2018-2023 www.open3d.org
5# SPDX-License-Identifier: MIT
6# ----------------------------------------------------------------------------
7
8import open3d as o3d
9
10import argparse
11
12
13def preprocess_point_cloud(pcd, voxel_size):
14 pcd_down = pcd.voxel_down_sample(voxel_size)
15 pcd_down.estimate_normals(
16 o3d.geometry.KDTreeSearchParamHybrid(radius=voxel_size * 2.0,
17 max_nn=30))
18 pcd_fpfh = o3d.pipelines.registration.compute_fpfh_feature(
19 pcd_down,
20 o3d.geometry.KDTreeSearchParamHybrid(radius=voxel_size * 5.0,
21 max_nn=100))
22 return (pcd_down, pcd_fpfh)
23
24
25if __name__ == '__main__':
26 pcd_data = o3d.data.DemoICPPointClouds()
27 parser = argparse.ArgumentParser(
28 'Global point cloud registration example with RANSAC')
29 parser.add_argument('src',
30 type=str,
31 default=pcd_data.paths[0],
32 nargs='?',
33 help='path to src point cloud')
34 parser.add_argument('dst',
35 type=str,
36 default=pcd_data.paths[1],
37 nargs='?',
38 help='path to dst point cloud')
39 parser.add_argument('--voxel_size',
40 type=float,
41 default=0.05,
42 help='voxel size in meter used to downsample inputs')
43 parser.add_argument(
44 '--distance_multiplier',
45 type=float,
46 default=1.5,
47 help='multipler used to compute distance threshold'
48 'between correspondences.'
49 'Threshold is computed by voxel_size * distance_multiplier.')
50 parser.add_argument('--max_iterations',
51 type=int,
52 default=64,
53 help='number of max FGR iterations')
54 parser.add_argument(
55 '--max_tuples',
56 type=int,
57 default=1000,
58 help='max number of accepted tuples for correspondence filtering')
59
60 args = parser.parse_args()
61
62 voxel_size = args.voxel_size
63 distance_threshold = args.distance_multiplier * voxel_size
64
65 o3d.utility.set_verbosity_level(o3d.utility.VerbosityLevel.Debug)
66 print('Reading inputs')
67 src = o3d.io.read_point_cloud(args.src)
68 dst = o3d.io.read_point_cloud(args.dst)
69
70 print('Downsampling inputs')
71 src_down, src_fpfh = preprocess_point_cloud(src, voxel_size)
72 dst_down, dst_fpfh = preprocess_point_cloud(dst, voxel_size)
73
74 print('Running FGR')
75 result = o3d.pipelines.registration.registration_fgr_based_on_feature_matching(
76 src_down, dst_down, src_fpfh, dst_fpfh,
77 o3d.pipelines.registration.FastGlobalRegistrationOption(
78 maximum_correspondence_distance=distance_threshold,
79 iteration_number=args.max_iterations,
80 maximum_tuple_count=args.max_tuples))
81
82 src.paint_uniform_color([1, 0, 0])
83 dst.paint_uniform_color([0, 1, 0])
84 o3d.visualization.draw([src.transform(result.transformation), dst])
registration_ransac.py
1# ----------------------------------------------------------------------------
2# - Open3D: www.open3d.org -
3# ----------------------------------------------------------------------------
4# Copyright (c) 2018-2023 www.open3d.org
5# SPDX-License-Identifier: MIT
6# ----------------------------------------------------------------------------
7
8import open3d as o3d
9
10import argparse
11
12
13def preprocess_point_cloud(pcd, voxel_size):
14 pcd_down = pcd.voxel_down_sample(voxel_size)
15 pcd_down.estimate_normals(
16 o3d.geometry.KDTreeSearchParamHybrid(radius=voxel_size * 2.0,
17 max_nn=30))
18 pcd_fpfh = o3d.pipelines.registration.compute_fpfh_feature(
19 pcd_down,
20 o3d.geometry.KDTreeSearchParamHybrid(radius=voxel_size * 5.0,
21 max_nn=100))
22 return (pcd_down, pcd_fpfh)
23
24
25if __name__ == '__main__':
26 pcd_data = o3d.data.DemoICPPointClouds()
27 parser = argparse.ArgumentParser(
28 'Global point cloud registration example with RANSAC')
29 parser.add_argument('src',
30 type=str,
31 default=pcd_data.paths[0],
32 nargs='?',
33 help='path to src point cloud')
34 parser.add_argument('dst',
35 type=str,
36 default=pcd_data.paths[1],
37 nargs='?',
38 help='path to dst point cloud')
39 parser.add_argument('--voxel_size',
40 type=float,
41 default=0.05,
42 help='voxel size in meter used to downsample inputs')
43 parser.add_argument(
44 '--distance_multiplier',
45 type=float,
46 default=1.5,
47 help='multipler used to compute distance threshold'
48 'between correspondences.'
49 'Threshold is computed by voxel_size * distance_multiplier.')
50 parser.add_argument('--max_iterations',
51 type=int,
52 default=1000000,
53 help='number of max RANSAC iterations')
54 parser.add_argument('--confidence',
55 type=float,
56 default=0.999,
57 help='RANSAC confidence')
58 parser.add_argument(
59 '--mutual_filter',
60 action='store_true',
61 help='whether to use mutual filter for putative correspondences')
62
63 args = parser.parse_args()
64
65 voxel_size = args.voxel_size
66 distance_threshold = args.distance_multiplier * voxel_size
67
68 o3d.utility.set_verbosity_level(o3d.utility.VerbosityLevel.Debug)
69 print('Reading inputs')
70 src = o3d.io.read_point_cloud(args.src)
71 dst = o3d.io.read_point_cloud(args.dst)
72
73 print('Downsampling inputs')
74 src_down, src_fpfh = preprocess_point_cloud(src, voxel_size)
75 dst_down, dst_fpfh = preprocess_point_cloud(dst, voxel_size)
76
77 print('Running RANSAC')
78 result = o3d.pipelines.registration.registration_ransac_based_on_feature_matching(
79 src_down,
80 dst_down,
81 src_fpfh,
82 dst_fpfh,
83 mutual_filter=args.mutual_filter,
84 max_correspondence_distance=distance_threshold,
85 estimation_method=o3d.pipelines.registration.
86 TransformationEstimationPointToPoint(False),
87 ransac_n=3,
88 checkers=[
89 o3d.pipelines.registration.CorrespondenceCheckerBasedOnEdgeLength(
90 0.9),
91 o3d.pipelines.registration.CorrespondenceCheckerBasedOnDistance(
92 distance_threshold)
93 ],
94 criteria=o3d.pipelines.registration.RANSACConvergenceCriteria(
95 args.max_iterations, args.confidence))
96
97 src.paint_uniform_color([1, 0, 0])
98 dst.paint_uniform_color([0, 1, 0])
99 o3d.visualization.draw([src.transform(result.transformation), dst])
rgbd_integration_uniform.py
1# ----------------------------------------------------------------------------
2# - Open3D: www.open3d.org -
3# ----------------------------------------------------------------------------
4# Copyright (c) 2018-2023 www.open3d.org
5# SPDX-License-Identifier: MIT
6# ----------------------------------------------------------------------------
7
8import open3d as o3d
9import numpy as np
10
11import os, sys
12
13pyexample_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
14sys.path.append(pyexample_path)
15
16from open3d_example import read_trajectory
17
18if __name__ == "__main__":
19 rgbd_data = o3d.data.SampleRedwoodRGBDImages()
20 camera_poses = read_trajectory(rgbd_data.odometry_log_path)
21 camera_intrinsics = o3d.camera.PinholeCameraIntrinsic(
22 o3d.camera.PinholeCameraIntrinsicParameters.PrimeSenseDefault)
23 volume = o3d.pipelines.integration.UniformTSDFVolume(
24 length=4.0,
25 resolution=512,
26 sdf_trunc=0.04,
27 color_type=o3d.pipelines.integration.TSDFVolumeColorType.RGB8,
28 )
29
30 for i in range(len(camera_poses)):
31 print("Integrate {:d}-th image into the volume.".format(i))
32 color = o3d.io.read_image(rgbd_data.color_paths[i])
33 depth = o3d.io.read_image(rgbd_data.depth_paths[i])
34
35 rgbd = o3d.geometry.RGBDImage.create_from_color_and_depth(
36 color, depth, depth_trunc=4.0, convert_rgb_to_intensity=False)
37 volume.integrate(
38 rgbd,
39 camera_intrinsics,
40 np.linalg.inv(camera_poses[i].pose),
41 )
42
43 print("Extract triangle mesh")
44 mesh = volume.extract_triangle_mesh()
45 mesh.compute_vertex_normals()
46 o3d.visualization.draw_geometries([mesh])
47
48 print("Extract voxel-aligned debugging point cloud")
49 voxel_pcd = volume.extract_voxel_point_cloud()
50 o3d.visualization.draw_geometries([voxel_pcd])
51
52 print("Extract voxel-aligned debugging voxel grid")
53 voxel_grid = volume.extract_voxel_grid()
54 # o3d.visualization.draw_geometries([voxel_grid])
55
56 # print("Extract point cloud")
57 # pcd = volume.extract_point_cloud()
58 # o3d.visualization.draw_geometries([pcd])
rgbd_odometry.py
1# ----------------------------------------------------------------------------
2# - Open3D: www.open3d.org -
3# ----------------------------------------------------------------------------
4# Copyright (c) 2018-2023 www.open3d.org
5# SPDX-License-Identifier: MIT
6# ----------------------------------------------------------------------------
7"""Find camera movement between two consecutive RGBD image pairs"""
8
9import open3d as o3d
10import numpy as np
11
12if __name__ == "__main__":
13 pinhole_camera_intrinsic = o3d.camera.PinholeCameraIntrinsic(
14 o3d.camera.PinholeCameraIntrinsicParameters.PrimeSenseDefault)
15 rgbd_data = o3d.data.SampleRedwoodRGBDImages()
16 source_color = o3d.io.read_image(rgbd_data.color_paths[0])
17 source_depth = o3d.io.read_image(rgbd_data.depth_paths[0])
18 target_color = o3d.io.read_image(rgbd_data.color_paths[1])
19 target_depth = o3d.io.read_image(rgbd_data.depth_paths[1])
20
21 source_rgbd_image = o3d.geometry.RGBDImage.create_from_color_and_depth(
22 source_color, source_depth)
23 target_rgbd_image = o3d.geometry.RGBDImage.create_from_color_and_depth(
24 target_color, target_depth)
25 target_pcd = o3d.geometry.PointCloud.create_from_rgbd_image(
26 target_rgbd_image, pinhole_camera_intrinsic)
27
28 option = o3d.pipelines.odometry.OdometryOption()
29 odo_init = np.identity(4)
30 print(option)
31
32 [success_color_term, trans_color_term,
33 info] = o3d.pipelines.odometry.compute_rgbd_odometry(
34 source_rgbd_image, target_rgbd_image,
35 pinhole_camera_intrinsic, odo_init,
36 o3d.pipelines.odometry.RGBDOdometryJacobianFromColorTerm(), option)
37 [success_hybrid_term, trans_hybrid_term,
38 info] = o3d.pipelines.odometry.compute_rgbd_odometry(
39 source_rgbd_image, target_rgbd_image,
40 pinhole_camera_intrinsic, odo_init,
41 o3d.pipelines.odometry.RGBDOdometryJacobianFromHybridTerm(), option)
42
43 if success_color_term:
44 print("Using RGB-D Odometry")
45 print(trans_color_term)
46 source_pcd_color_term = o3d.geometry.PointCloud.create_from_rgbd_image(
47 source_rgbd_image, pinhole_camera_intrinsic)
48 source_pcd_color_term.transform(trans_color_term)
49 o3d.visualization.draw([target_pcd, source_pcd_color_term])
50
51 if success_hybrid_term:
52 print("Using Hybrid RGB-D Odometry")
53 print(trans_hybrid_term)
54 source_pcd_hybrid_term = o3d.geometry.PointCloud.create_from_rgbd_image(
55 source_rgbd_image, pinhole_camera_intrinsic)
56 source_pcd_hybrid_term.transform(trans_hybrid_term)
57 o3d.visualization.draw([target_pcd, source_pcd_hybrid_term])
robust_icp.py
1# ----------------------------------------------------------------------------
2# - Open3D: www.open3d.org -
3# ----------------------------------------------------------------------------
4# Copyright (c) 2018-2023 www.open3d.org
5# SPDX-License-Identifier: MIT
6# ----------------------------------------------------------------------------
7"""Outlier rejection using robust kernels for ICP"""
8
9import open3d as o3d
10import numpy as np
11import copy
12
13
14def draw_registration_result(source, target, transformation):
15 source_temp = copy.deepcopy(source)
16 target_temp = copy.deepcopy(target)
17 source_temp.paint_uniform_color([1, 0.706, 0])
18 target_temp.paint_uniform_color([0, 0.651, 0.929])
19 source_temp.transform(transformation)
20 o3d.visualization.draw([source_temp, target_temp])
21
22
23def apply_noise(pcd, mu, sigma):
24 noisy_pcd = copy.deepcopy(pcd)
25 points = np.asarray(noisy_pcd.points)
26 points += np.random.normal(mu, sigma, size=points.shape)
27 noisy_pcd.points = o3d.utility.Vector3dVector(points)
28 return noisy_pcd
29
30
31if __name__ == "__main__":
32 pcd_data = o3d.data.DemoICPPointClouds()
33 source = o3d.io.read_point_cloud(pcd_data.paths[0])
34 target = o3d.io.read_point_cloud(pcd_data.paths[1])
35 trans_init = np.asarray([[0.862, 0.011, -0.507, 0.5],
36 [-0.139, 0.967, -0.215, 0.7],
37 [0.487, 0.255, 0.835, -1.4], [0.0, 0.0, 0.0, 1.0]])
38
39 # Mean and standard deviation.
40 mu, sigma = 0, 0.1
41 source_noisy = apply_noise(source, mu, sigma)
42
43 print("Displaying source point cloud + noise:")
44 o3d.visualization.draw([source_noisy])
45
46 print(
47 "Displaying original source and target point cloud with initial transformation:"
48 )
49 draw_registration_result(source, target, trans_init)
50
51 threshold = 1.0
52 print("Using the noisy source pointcloud to perform robust ICP.\n")
53 print("Robust point-to-plane ICP, threshold={}:".format(threshold))
54 loss = o3d.pipelines.registration.TukeyLoss(k=sigma)
55 print("Using robust loss:", loss)
56 p2l = o3d.pipelines.registration.TransformationEstimationPointToPlane(loss)
57 reg_p2l = o3d.pipelines.registration.registration_icp(
58 source_noisy, target, threshold, trans_init, p2l)
59 print(reg_p2l)
60 print("Transformation is:")
61 print(reg_p2l.transformation)
62 draw_registration_result(source, target, reg_p2l.transformation)