Open3D (C++ API)  0.16.1
RGBDOdometryJacobianImpl.h
Go to the documentation of this file.
1// ----------------------------------------------------------------------------
2// - Open3D: www.open3d.org -
3// ----------------------------------------------------------------------------
4// The MIT License (MIT)
5//
6// Copyright (c) 2018-2021 www.open3d.org
7//
8// Permission is hereby granted, free of charge, to any person obtaining a copy
9// of this software and associated documentation files (the "Software"), to deal
10// in the Software without restriction, including without limitation the rights
11// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
12// copies of the Software, and to permit persons to whom the Software is
13// furnished to do so, subject to the following conditions:
14//
15// The above copyright notice and this permission notice shall be included in
16// all copies or substantial portions of the Software.
17//
18// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
21// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
23// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24// IN THE SOFTWARE.
25// ----------------------------------------------------------------------------
26
27// Private header. Do not include in Open3d.h.
28
29#include "open3d/core/Tensor.h"
32
33namespace open3d {
34namespace t {
35namespace pipelines {
36namespace kernel {
37namespace odometry {
38
40using t::geometry::kernel::TransformIndexer;
41
42#ifndef __CUDACC__
43using std::abs;
44using std::isnan;
45using std::max;
46#endif
47
48inline OPEN3D_HOST_DEVICE float HuberDeriv(float r, float delta) {
49 float abs_r = abs(r);
50 return abs_r < delta ? r : delta * Sign(r);
51}
52
53inline OPEN3D_HOST_DEVICE float HuberLoss(float r, float delta) {
54 float abs_r = abs(r);
55 return abs_r < delta ? 0.5 * r * r : delta * abs_r - 0.5 * delta * delta;
56}
57
59 int x,
60 int y,
61 const float depth_outlier_trunc,
62 const NDArrayIndexer& source_vertex_indexer,
63 const NDArrayIndexer& target_vertex_indexer,
64 const NDArrayIndexer& target_normal_indexer,
65 const TransformIndexer& ti,
66 float* J_ij,
67 float& r) {
68 float* source_v = source_vertex_indexer.GetDataPtr<float>(x, y);
69 if (isnan(source_v[0])) {
70 return false;
71 }
72
73 // Transform source points to the target camera's coordinate space.
74 float T_source_to_target_v[3], u, v;
75 ti.RigidTransform(source_v[0], source_v[1], source_v[2],
76 &T_source_to_target_v[0], &T_source_to_target_v[1],
77 &T_source_to_target_v[2]);
78 ti.Project(T_source_to_target_v[0], T_source_to_target_v[1],
79 T_source_to_target_v[2], &u, &v);
80 u = roundf(u);
81 v = roundf(v);
82
83 if (T_source_to_target_v[2] < 0 ||
84 !target_vertex_indexer.InBoundary(u, v)) {
85 return false;
86 }
87
88 int ui = static_cast<int>(u);
89 int vi = static_cast<int>(v);
90 float* target_v = target_vertex_indexer.GetDataPtr<float>(ui, vi);
91 float* target_n = target_normal_indexer.GetDataPtr<float>(ui, vi);
92 if (isnan(target_v[0]) || isnan(target_n[0])) {
93 return false;
94 }
95
96 r = (T_source_to_target_v[0] - target_v[0]) * target_n[0] +
97 (T_source_to_target_v[1] - target_v[1]) * target_n[1] +
98 (T_source_to_target_v[2] - target_v[2]) * target_n[2];
99 if (abs(r) > depth_outlier_trunc) {
100 return false;
101 }
102
103 J_ij[0] = -T_source_to_target_v[2] * target_n[1] +
104 T_source_to_target_v[1] * target_n[2];
105 J_ij[1] = T_source_to_target_v[2] * target_n[0] -
106 T_source_to_target_v[0] * target_n[2];
107 J_ij[2] = -T_source_to_target_v[1] * target_n[0] +
108 T_source_to_target_v[0] * target_n[1];
109 J_ij[3] = target_n[0];
110 J_ij[4] = target_n[1];
111 J_ij[5] = target_n[2];
112
113 return true;
114}
115
117 int x,
118 int y,
119 const float depth_outlier_trunc,
120 const NDArrayIndexer& source_depth_indexer,
121 const NDArrayIndexer& target_depth_indexer,
122 const NDArrayIndexer& source_intensity_indexer,
123 const NDArrayIndexer& target_intensity_indexer,
124 const NDArrayIndexer& target_intensity_dx_indexer,
125 const NDArrayIndexer& target_intensity_dy_indexer,
126 const NDArrayIndexer& source_vertex_indexer,
127 const TransformIndexer& ti,
128 float* J_I,
129 float& r_I) {
130 const float sobel_scale = 0.125;
131
132 float* source_v = source_vertex_indexer.GetDataPtr<float>(x, y);
133 if (isnan(source_v[0])) {
134 return false;
135 }
136
137 // Transform source points to the target camera's coordinate space.
138 float T_source_to_target_v[3], u_tf, v_tf;
139 ti.RigidTransform(source_v[0], source_v[1], source_v[2],
140 &T_source_to_target_v[0], &T_source_to_target_v[1],
141 &T_source_to_target_v[2]);
142 ti.Project(T_source_to_target_v[0], T_source_to_target_v[1],
143 T_source_to_target_v[2], &u_tf, &v_tf);
144 int u_t = int(roundf(u_tf));
145 int v_t = int(roundf(v_tf));
146
147 if (T_source_to_target_v[2] < 0 ||
148 !target_depth_indexer.InBoundary(u_t, v_t)) {
149 return false;
150 }
151
152 float fx, fy;
153 ti.GetFocalLength(&fx, &fy);
154
155 float depth_t = *target_depth_indexer.GetDataPtr<float>(u_t, v_t);
156 float diff_D = depth_t - T_source_to_target_v[2];
157 if (isnan(depth_t) || abs(diff_D) > depth_outlier_trunc) {
158 return false;
159 }
160
161 float diff_I = *target_intensity_indexer.GetDataPtr<float>(u_t, v_t) -
162 *source_intensity_indexer.GetDataPtr<float>(x, y);
163 float dIdx = sobel_scale *
164 (*target_intensity_dx_indexer.GetDataPtr<float>(u_t, v_t));
165 float dIdy = sobel_scale *
166 (*target_intensity_dy_indexer.GetDataPtr<float>(u_t, v_t));
167
168 float invz = 1 / T_source_to_target_v[2];
169 float c0 = dIdx * fx * invz;
170 float c1 = dIdy * fy * invz;
171 float c2 = -(c0 * T_source_to_target_v[0] + c1 * T_source_to_target_v[1]) *
172 invz;
173
174 J_I[0] = (-T_source_to_target_v[2] * c1 + T_source_to_target_v[1] * c2);
175 J_I[1] = (T_source_to_target_v[2] * c0 - T_source_to_target_v[0] * c2);
176 J_I[2] = (-T_source_to_target_v[1] * c0 + T_source_to_target_v[0] * c1);
177 J_I[3] = (c0);
178 J_I[4] = (c1);
179 J_I[5] = (c2);
180 r_I = diff_I;
181
182 return true;
183}
184
186 int x,
187 int y,
188 const float depth_outlier_trunc,
189 const NDArrayIndexer& source_depth_indexer,
190 const NDArrayIndexer& target_depth_indexer,
191 const NDArrayIndexer& source_intensity_indexer,
192 const NDArrayIndexer& target_intensity_indexer,
193 const NDArrayIndexer& target_depth_dx_indexer,
194 const NDArrayIndexer& target_depth_dy_indexer,
195 const NDArrayIndexer& target_intensity_dx_indexer,
196 const NDArrayIndexer& target_intensity_dy_indexer,
197 const NDArrayIndexer& source_vertex_indexer,
198 const TransformIndexer& ti,
199 float* J_I,
200 float* J_D,
201 float& r_I,
202 float& r_D) {
203 // sqrt 0.5, according to
204 // http://redwood-data.org/indoor_lidar_rgbd/supp.pdf
205 const float sqrt_lambda_intensity = 0.707;
206 const float sqrt_lambda_depth = 0.707;
207 const float sobel_scale = 0.125;
208
209 float* source_v = source_vertex_indexer.GetDataPtr<float>(x, y);
210 if (isnan(source_v[0])) {
211 return false;
212 }
213
214 // Transform source points to the target camera coordinate space.
215 float T_source_to_target_v[3], u_tf, v_tf;
216 ti.RigidTransform(source_v[0], source_v[1], source_v[2],
217 &T_source_to_target_v[0], &T_source_to_target_v[1],
218 &T_source_to_target_v[2]);
219 ti.Project(T_source_to_target_v[0], T_source_to_target_v[1],
220 T_source_to_target_v[2], &u_tf, &v_tf);
221 int u_t = int(roundf(u_tf));
222 int v_t = int(roundf(v_tf));
223
224 if (T_source_to_target_v[2] < 0 ||
225 !target_depth_indexer.InBoundary(u_t, v_t)) {
226 return false;
227 }
228
229 float fx, fy;
230 ti.GetFocalLength(&fx, &fy);
231
232 float depth_t = *target_depth_indexer.GetDataPtr<float>(u_t, v_t);
233 float diff_D = depth_t - T_source_to_target_v[2];
234 if (isnan(depth_t) || abs(diff_D) > depth_outlier_trunc) {
235 return false;
236 }
237
238 float dDdx = sobel_scale *
239 (*target_depth_dx_indexer.GetDataPtr<float>(u_t, v_t));
240 float dDdy = sobel_scale *
241 (*target_depth_dy_indexer.GetDataPtr<float>(u_t, v_t));
242 if (isnan(dDdx) || isnan(dDdy)) {
243 return false;
244 }
245
246 float diff_I = *target_intensity_indexer.GetDataPtr<float>(u_t, v_t) -
247 *source_intensity_indexer.GetDataPtr<float>(x, y);
248 float dIdx = sobel_scale *
249 (*target_intensity_dx_indexer.GetDataPtr<float>(u_t, v_t));
250 float dIdy = sobel_scale *
251 (*target_intensity_dy_indexer.GetDataPtr<float>(u_t, v_t));
252
253 float invz = 1 / T_source_to_target_v[2];
254 float c0 = dIdx * fx * invz;
255 float c1 = dIdy * fy * invz;
256 float c2 = -(c0 * T_source_to_target_v[0] + c1 * T_source_to_target_v[1]) *
257 invz;
258 float d0 = dDdx * fx * invz;
259 float d1 = dDdy * fy * invz;
260 float d2 = -(d0 * T_source_to_target_v[0] + d1 * T_source_to_target_v[1]) *
261 invz;
262
263 J_I[0] = sqrt_lambda_intensity *
264 (-T_source_to_target_v[2] * c1 + T_source_to_target_v[1] * c2);
265 J_I[1] = sqrt_lambda_intensity *
266 (T_source_to_target_v[2] * c0 - T_source_to_target_v[0] * c2);
267 J_I[2] = sqrt_lambda_intensity *
268 (-T_source_to_target_v[1] * c0 + T_source_to_target_v[0] * c1);
269 J_I[3] = sqrt_lambda_intensity * (c0);
270 J_I[4] = sqrt_lambda_intensity * (c1);
271 J_I[5] = sqrt_lambda_intensity * (c2);
272 r_I = sqrt_lambda_intensity * diff_I;
273
274 J_D[0] = sqrt_lambda_depth *
275 ((-T_source_to_target_v[2] * d1 + T_source_to_target_v[1] * d2) -
276 T_source_to_target_v[1]);
277 J_D[1] = sqrt_lambda_depth *
278 ((T_source_to_target_v[2] * d0 - T_source_to_target_v[0] * d2) +
279 T_source_to_target_v[0]);
280 J_D[2] = sqrt_lambda_depth *
281 ((-T_source_to_target_v[1] * d0 + T_source_to_target_v[0] * d1));
282 J_D[3] = sqrt_lambda_depth * (d0);
283 J_D[4] = sqrt_lambda_depth * (d1);
284 J_D[5] = sqrt_lambda_depth * (d2 - 1.0f);
285
286 r_D = sqrt_lambda_depth * diff_D;
287
288 return true;
289}
290
291} // namespace odometry
292} // namespace kernel
293} // namespace pipelines
294} // namespace t
295} // namespace open3d
#define OPEN3D_HOST_DEVICE
Definition: CUDAUtils.h:63
OPEN3D_HOST_DEVICE int Sign(int x)
Definition: GeometryMacros.h:96
Helper class for converting coordinates/indices between 3D/3D, 3D/2D, 2D/3D.
Definition: GeometryIndexer.h:44
OPEN3D_HOST_DEVICE void Project(float x_in, float y_in, float z_in, float *u_out, float *v_out) const
Project a 3D coordinate in camera coordinate to a 2D uv coordinate.
Definition: GeometryIndexer.h:119
OPEN3D_HOST_DEVICE void RigidTransform(float x_in, float y_in, float z_in, float *x_out, float *y_out, float *z_out) const
Transform a 3D coordinate in camera coordinate to world coordinate.
Definition: GeometryIndexer.h:81
OPEN3D_HOST_DEVICE void GetFocalLength(float *fx, float *fy) const
Definition: GeometryIndexer.h:141
const char const char value recording_handle imu_sample recording_handle uint8_t size_t data_size k4a_record_configuration_t config target_format k4a_capture_t capture_handle k4a_imu_sample_t imu_sample playback_handle k4a_logging_message_cb_t void min_level device_handle k4a_imu_sample_t timeout_in_ms capture_handle capture_handle capture_handle image_handle temperature_c int
Definition: K4aPlugin.cpp:493
TArrayIndexer< int64_t > NDArrayIndexer
Definition: GeometryIndexer.h:379
OPEN3D_HOST_DEVICE bool GetJacobianHybrid(int x, int y, const float depth_outlier_trunc, const NDArrayIndexer &source_depth_indexer, const NDArrayIndexer &target_depth_indexer, const NDArrayIndexer &source_intensity_indexer, const NDArrayIndexer &target_intensity_indexer, const NDArrayIndexer &target_depth_dx_indexer, const NDArrayIndexer &target_depth_dy_indexer, const NDArrayIndexer &target_intensity_dx_indexer, const NDArrayIndexer &target_intensity_dy_indexer, const NDArrayIndexer &source_vertex_indexer, const TransformIndexer &ti, float *J_I, float *J_D, float &r_I, float &r_D)
Definition: RGBDOdometryJacobianImpl.h:185
OPEN3D_HOST_DEVICE float HuberDeriv(float r, float delta)
Definition: RGBDOdometryJacobianImpl.h:48
OPEN3D_HOST_DEVICE bool GetJacobianPointToPlane(int x, int y, const float depth_outlier_trunc, const NDArrayIndexer &source_vertex_indexer, const NDArrayIndexer &target_vertex_indexer, const NDArrayIndexer &target_normal_indexer, const TransformIndexer &ti, float *J_ij, float &r)
Definition: RGBDOdometryJacobianImpl.h:58
OPEN3D_HOST_DEVICE bool GetJacobianIntensity(int x, int y, const float depth_outlier_trunc, const NDArrayIndexer &source_depth_indexer, const NDArrayIndexer &target_depth_indexer, const NDArrayIndexer &source_intensity_indexer, const NDArrayIndexer &target_intensity_indexer, const NDArrayIndexer &target_intensity_dx_indexer, const NDArrayIndexer &target_intensity_dy_indexer, const NDArrayIndexer &source_vertex_indexer, const TransformIndexer &ti, float *J_I, float &r_I)
Definition: RGBDOdometryJacobianImpl.h:116
OPEN3D_HOST_DEVICE float HuberLoss(float r, float delta)
Definition: RGBDOdometryJacobianImpl.h:53
Definition: PinholeCameraIntrinsic.cpp:35