comparison OrthancStone/Sources/Toolbox/FiniteProjectiveCamera.cpp @ 1512:244ad1e4e76a

reorganization of folders
author Sebastien Jodogne <s.jodogne@gmail.com>
date Tue, 07 Jul 2020 16:21:02 +0200
parents Framework/Toolbox/FiniteProjectiveCamera.cpp@30deba7bc8e2
children 4fb8fdf03314
comparison
equal deleted inserted replaced
1511:9dfeee74c1e6 1512:244ad1e4e76a
1 /**
2 * Stone of Orthanc
3 * Copyright (C) 2012-2016 Sebastien Jodogne, Medical Physics
4 * Department, University Hospital of Liege, Belgium
5 * Copyright (C) 2017-2020 Osimis S.A., Belgium
6 *
7 * This program is free software: you can redistribute it and/or
8 * modify it under the terms of the GNU Affero General Public License
9 * as published by the Free Software Foundation, either version 3 of
10 * the License, or (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Affero General Public License for more details.
16 *
17 * You should have received a copy of the GNU Affero General Public License
18 * along with this program. If not, see <http://www.gnu.org/licenses/>.
19 **/
20
21
22 #include "FiniteProjectiveCamera.h"
23
24 #include "GeometryToolbox.h"
25 #include "SubpixelReader.h"
26
27 #include <Logging.h>
28 #include <OrthancException.h>
29 #include <Images/Image.h>
30 #include <Images/ImageProcessing.h>
31
32 namespace OrthancStone
33 {
34 void FiniteProjectiveCamera::ComputeMInverse()
35 {
36 using namespace boost::numeric::ublas;
37
38 // inv(M) = inv(K * R) = inv(R) * inv(K) = R' * inv(K). This
39 // matrix is always invertible, by definition of finite
40 // projective cameras (page 157).
41 Matrix kinv;
42 LinearAlgebra::InvertUpperTriangularMatrix(kinv, k_);
43 minv_ = prod(trans(r_), kinv);
44 }
45
46
47 void FiniteProjectiveCamera::Setup(const Matrix& k,
48 const Matrix& r,
49 const Vector& c)
50 {
51 if (k.size1() != 3 ||
52 k.size2() != 3 ||
53 !LinearAlgebra::IsCloseToZero(k(1, 0)) ||
54 !LinearAlgebra::IsCloseToZero(k(2, 0)) ||
55 !LinearAlgebra::IsCloseToZero(k(2, 1)))
56 {
57 LOG(ERROR) << "Invalid intrinsic parameters";
58 throw Orthanc::OrthancException(Orthanc::ErrorCode_ParameterOutOfRange);
59 }
60
61 if (r.size1() != 3 ||
62 r.size2() != 3)
63 {
64 LOG(ERROR) << "Invalid size for a 3D rotation matrix";
65 throw Orthanc::OrthancException(Orthanc::ErrorCode_ParameterOutOfRange);
66 }
67
68 if (!LinearAlgebra::IsRotationMatrix(r, 100.0 * std::numeric_limits<float>::epsilon()))
69 {
70 LOG(ERROR) << "Invalid rotation matrix";
71 throw Orthanc::OrthancException(Orthanc::ErrorCode_ParameterOutOfRange);
72 }
73
74 if (c.size() != 3)
75 {
76 LOG(ERROR) << "Invalid camera center";
77 throw Orthanc::OrthancException(Orthanc::ErrorCode_ParameterOutOfRange);
78 }
79
80 k_ = k;
81 r_ = r;
82 c_ = c;
83
84 ComputeMInverse();
85
86 Matrix tmp = LinearAlgebra::IdentityMatrix(3);
87 tmp.resize(3, 4);
88 tmp(0, 3) = -c[0];
89 tmp(1, 3) = -c[1];
90 tmp(2, 3) = -c[2];
91
92 p_ = LinearAlgebra::Product(k, r, tmp);
93
94 assert(p_.size1() == 3 &&
95 p_.size2() == 4);
96
97 }
98
99
100 void FiniteProjectiveCamera::Setup(const Matrix& p)
101 {
102 if (p.size1() != 3 ||
103 p.size2() != 4)
104 {
105 LOG(ERROR) << "Invalid camera matrix";
106 throw Orthanc::OrthancException(Orthanc::ErrorCode_ParameterOutOfRange);
107 }
108
109 p_ = p;
110
111 // M is the left 3x3 submatrix of "P"
112 Matrix m = p;
113 m.resize(3, 3);
114
115 // p4 is the last column of "P"
116 Vector p4(3);
117 p4[0] = p(0, 3);
118 p4[1] = p(1, 3);
119 p4[2] = p(2, 3);
120
121 // The RQ decomposition is explained on page 157
122 LinearAlgebra::RQDecomposition3x3(k_, r_, m);
123 ComputeMInverse();
124
125 c_ = LinearAlgebra::Product(-minv_, p4);
126 }
127
128
129 FiniteProjectiveCamera::FiniteProjectiveCamera(const double k[9],
130 const double r[9],
131 const double c[3])
132 {
133 Matrix kk, rr;
134 Vector cc;
135
136 LinearAlgebra::FillMatrix(kk, 3, 3, k);
137 LinearAlgebra::FillMatrix(rr, 3, 3, r);
138 LinearAlgebra::FillVector(cc, 3, c);
139
140 Setup(kk, rr, cc);
141 }
142
143
144 FiniteProjectiveCamera::FiniteProjectiveCamera(const double p[12])
145 {
146 Matrix pp;
147 LinearAlgebra::FillMatrix(pp, 3, 4, p);
148 Setup(pp);
149 }
150
151
152 Vector FiniteProjectiveCamera::GetRayDirection(double x,
153 double y) const
154 {
155 // This derives from Equation (6.14) on page 162, taking "mu =
156 // 1" and noticing that "-inv(M)*p4" corresponds to the camera
157 // center in finite projective cameras
158
159 // The (x,y) coordinates on the imaged plane, as an homogeneous vector
160 Vector xx(3);
161 xx[0] = x;
162 xx[1] = y;
163 xx[2] = 1.0;
164
165 return boost::numeric::ublas::prod(minv_, xx);
166 }
167
168
169
170 static Vector SetupApply(const Vector& v,
171 bool infinityAllowed)
172 {
173 if (v.size() == 3)
174 {
175 // Vector "v" in non-homogeneous coordinates, add the homogeneous component
176 Vector vv;
177 LinearAlgebra::AssignVector(vv, v[0], v[1], v[2], 1.0);
178 return vv;
179 }
180 else if (v.size() == 4)
181 {
182 // Vector "v" is already in homogeneous coordinates
183
184 if (!infinityAllowed &&
185 LinearAlgebra::IsCloseToZero(v[3]))
186 {
187 LOG(ERROR) << "Cannot apply a finite projective camera to a "
188 << "point at infinity with this method";
189 throw Orthanc::OrthancException(Orthanc::ErrorCode_ParameterOutOfRange);
190 }
191
192 return v;
193 }
194 else
195 {
196 LOG(ERROR) << "The input vector must represent a point in 3D";
197 throw Orthanc::OrthancException(Orthanc::ErrorCode_ParameterOutOfRange);
198 }
199 }
200
201
202 void FiniteProjectiveCamera::ApplyFinite(double& x,
203 double& y,
204 const Vector& v) const
205 {
206 Vector p = boost::numeric::ublas::prod(p_, SetupApply(v, false));
207
208 if (LinearAlgebra::IsCloseToZero(p[2]))
209 {
210 // Point at infinity: Should not happen with a finite input point
211 throw Orthanc::OrthancException(Orthanc::ErrorCode_InternalError);
212 }
213 else
214 {
215 x = p[0] / p[2];
216 y = p[1] / p[2];
217 }
218 }
219
220
221 Vector FiniteProjectiveCamera::ApplyGeneral(const Vector& v) const
222 {
223 return boost::numeric::ublas::prod(p_, SetupApply(v, true));
224 }
225
226
227 static Vector AddHomogeneousCoordinate(const Vector& p)
228 {
229 assert(p.size() == 3);
230 return LinearAlgebra::CreateVector(p[0], p[1], p[2], 1);
231 }
232
233
234 FiniteProjectiveCamera::FiniteProjectiveCamera(const Vector& camera,
235 const Vector& principalPoint,
236 double angle,
237 unsigned int imageWidth,
238 unsigned int imageHeight,
239 double pixelSpacingX,
240 double pixelSpacingY)
241 {
242 if (camera.size() != 3 ||
243 principalPoint.size() != 3 ||
244 LinearAlgebra::IsCloseToZero(pixelSpacingX) ||
245 LinearAlgebra::IsCloseToZero(pixelSpacingY))
246 {
247 throw Orthanc::OrthancException(Orthanc::ErrorCode_ParameterOutOfRange);
248 }
249
250 const double focal = boost::numeric::ublas::norm_2(camera - principalPoint);
251
252 if (LinearAlgebra::IsCloseToZero(focal))
253 {
254 LOG(ERROR) << "Camera lies on the image plane";
255 throw Orthanc::OrthancException(Orthanc::ErrorCode_ParameterOutOfRange);
256 }
257
258 Matrix a;
259 GeometryToolbox::AlignVectorsWithRotation(a, camera - principalPoint,
260 LinearAlgebra::CreateVector(0, 0, -1));
261
262 Matrix r = LinearAlgebra::Product(GeometryToolbox::CreateRotationMatrixAlongZ(angle), a);
263
264 Matrix k = LinearAlgebra::ZeroMatrix(3, 3);
265 k(0,0) = focal / pixelSpacingX;
266 k(1,1) = focal / pixelSpacingY;
267 k(0,2) = static_cast<double>(imageWidth) / 2.0;
268 k(1,2) = static_cast<double>(imageHeight) / 2.0;
269 k(2,2) = 1;
270
271 Setup(k, r, camera);
272
273 {
274 // Sanity checks
275 Vector v1 = LinearAlgebra::Product(p_, AddHomogeneousCoordinate(camera));
276 Vector v2 = LinearAlgebra::Product(p_, AddHomogeneousCoordinate(principalPoint));
277
278 if (!LinearAlgebra::IsCloseToZero(v1[2]) || // Camera is mapped to singularity
279 LinearAlgebra::IsCloseToZero(v2[2]))
280 {
281 throw Orthanc::OrthancException(Orthanc::ErrorCode_InternalError);
282 }
283
284 // The principal point must be mapped to the center of the image
285 v2 /= v2[2];
286
287 if (!LinearAlgebra::IsNear(v2[0], static_cast<double>(imageWidth) / 2.0) ||
288 !LinearAlgebra::IsNear(v2[1], static_cast<double>(imageHeight) / 2.0))
289 {
290 throw Orthanc::OrthancException(Orthanc::ErrorCode_InternalError);
291 }
292 }
293 }
294
295
296 template <Orthanc::PixelFormat TargetFormat,
297 Orthanc::PixelFormat SourceFormat,
298 bool MIP>
299 static void ApplyRaytracerInternal(Orthanc::ImageAccessor& target,
300 const FiniteProjectiveCamera& camera,
301 const ImageBuffer3D& source,
302 const VolumeImageGeometry& geometry,
303 VolumeProjection projection)
304 {
305 if (source.GetFormat() != SourceFormat ||
306 target.GetFormat() != TargetFormat ||
307 !std::numeric_limits<float>::is_iec559 ||
308 sizeof(float) != 4)
309 {
310 throw Orthanc::OrthancException(Orthanc::ErrorCode_InternalError);
311 }
312
313 LOG(WARNING) << "Input volume size: " << source.GetWidth() << "x"
314 << source.GetHeight() << "x" << source.GetDepth();
315 LOG(WARNING) << "Input pixel format: " << Orthanc::EnumerationToString(source.GetFormat());
316 LOG(WARNING) << "Output image size: " << target.GetWidth() << "x" << target.GetHeight();
317 LOG(WARNING) << "Output pixel format: " << Orthanc::EnumerationToString(target.GetFormat());
318
319 const unsigned int slicesCount = geometry.GetProjectionDepth(projection);
320 const OrthancStone::Vector pixelSpacing = geometry.GetVoxelDimensions(projection);
321 const unsigned int targetWidth = target.GetWidth();
322 const unsigned int targetHeight = target.GetHeight();
323
324 Orthanc::Image accumulator(Orthanc::PixelFormat_Float32, targetWidth, targetHeight, false);
325 Orthanc::Image counter(Orthanc::PixelFormat_Grayscale16, targetWidth, targetHeight, false);
326 Orthanc::ImageProcessing::Set(accumulator, 0);
327 Orthanc::ImageProcessing::Set(counter, 0);
328
329 typedef SubpixelReader<SourceFormat, ImageInterpolation_Nearest> SourceReader;
330
331 for (unsigned int z = 0; z < slicesCount; z++)
332 {
333 LOG(INFO) << "Applying raytracer on slice: " << z << "/" << slicesCount;
334
335 OrthancStone::CoordinateSystem3D slice = geometry.GetProjectionSlice(projection, z);
336 OrthancStone::ImageBuffer3D::SliceReader sliceReader(source, projection, static_cast<unsigned int>(z));
337
338 SourceReader pixelReader(sliceReader.GetAccessor());
339
340 for (unsigned int y = 0; y < targetHeight; y++)
341 {
342 float *qacc = reinterpret_cast<float*>(accumulator.GetRow(y));
343 uint16_t *qcount = reinterpret_cast<uint16_t*>(counter.GetRow(y));
344
345 for (unsigned int x = 0; x < targetWidth; x++)
346 {
347 // Backproject the ray originating from the center of the target pixel
348 OrthancStone::Vector direction = camera.GetRayDirection(static_cast<double>(x + 0.5),
349 static_cast<double>(y + 0.5));
350
351 // Compute the 3D intersection of the ray with the slice plane
352 OrthancStone::Vector p;
353 if (slice.IntersectLine(p, camera.GetCenter(), direction))
354 {
355 // Compute the 2D coordinates of the intersections, in slice coordinates
356 double ix, iy;
357 slice.ProjectPoint(ix, iy, p);
358
359 ix /= pixelSpacing[0];
360 iy /= pixelSpacing[1];
361
362 // Read and accumulate the value of the pixel
363 float pixel;
364 if (pixelReader.GetFloatValue(
365 pixel, static_cast<float>(ix), static_cast<float>(iy)))
366 {
367 if (MIP)
368 {
369 // MIP rendering
370 if (*qcount == 0)
371 {
372 (*qacc) = pixel;
373 (*qcount) = 1;
374 }
375 else if (pixel > *qacc)
376 {
377 (*qacc) = pixel;
378 }
379 }
380 else
381 {
382 // Mean intensity
383 (*qacc) += pixel;
384 (*qcount) ++;
385 }
386 }
387 }
388
389 qacc++;
390 qcount++;
391 }
392 }
393 }
394
395
396 typedef Orthanc::PixelTraits<TargetFormat> TargetTraits;
397
398 // "Flatten" the accumulator image to create the target image
399 for (unsigned int y = 0; y < targetHeight; y++)
400 {
401 const float *qacc = reinterpret_cast<const float*>(accumulator.GetConstRow(y));
402 const uint16_t *qcount = reinterpret_cast<const uint16_t*>(counter.GetConstRow(y));
403 typename TargetTraits::PixelType *p = reinterpret_cast<typename TargetTraits::PixelType*>(target.GetRow(y));
404
405 for (unsigned int x = 0; x < targetWidth; x++)
406 {
407 if (*qcount == 0)
408 {
409 TargetTraits::SetZero(*p);
410 }
411 else
412 {
413 TargetTraits::FloatToPixel(*p, *qacc / static_cast<float>(*qcount));
414 }
415
416 p++;
417 qacc++;
418 qcount++;
419 }
420 }
421 }
422
423
424 Orthanc::ImageAccessor*
425 FiniteProjectiveCamera::ApplyRaytracer(const ImageBuffer3D& source,
426 const VolumeImageGeometry& geometry,
427 Orthanc::PixelFormat targetFormat,
428 unsigned int targetWidth,
429 unsigned int targetHeight,
430 bool mip) const
431 {
432 // TODO - We consider the axial projection of the volume, but we
433 // should choose the projection that is the "most perpendicular"
434 // to the line joining the camera center and the principal point
435 const VolumeProjection projection = VolumeProjection_Axial;
436
437 std::unique_ptr<Orthanc::ImageAccessor> target
438 (new Orthanc::Image(targetFormat, targetWidth, targetHeight, false));
439
440 if (targetFormat == Orthanc::PixelFormat_Grayscale16 &&
441 source.GetFormat() == Orthanc::PixelFormat_Grayscale16 && mip)
442 {
443 ApplyRaytracerInternal<Orthanc::PixelFormat_Grayscale16,
444 Orthanc::PixelFormat_Grayscale16, true>
445 (*target, *this, source, geometry, projection);
446 }
447 else if (targetFormat == Orthanc::PixelFormat_Grayscale16 &&
448 source.GetFormat() == Orthanc::PixelFormat_Grayscale16 && !mip)
449 {
450 ApplyRaytracerInternal<Orthanc::PixelFormat_Grayscale16,
451 Orthanc::PixelFormat_Grayscale16, false>
452 (*target, *this, source, geometry, projection);
453 }
454 else
455 {
456 throw Orthanc::OrthancException(Orthanc::ErrorCode_NotImplemented);
457 }
458
459 return target.release();
460 }
461 }