Point Cloud Library (PCL)  1.9.1-dev
texture_mapping.hpp
1 /*
2  * Software License Agreement (BSD License)
3  *
4  * Copyright (c) 2010, Willow Garage, Inc.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * * Redistributions of source code must retain the above copyright
12  * notice, this list of conditions and the following disclaimer.
13  * * Redistributions in binary form must reproduce the above
14  * copyright notice, this list of conditions and the following
15  * disclaimer in the documentation and/or other materials provided
16  * with the distribution.
17  * * Neither the name of Willow Garage, Inc. nor the names of its
18  * contributors may be used to endorse or promote products derived
19  * from this software without specific prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25  * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
29  * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
31  * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32  * POSSIBILITY OF SUCH DAMAGE.
33  *
34  * $Id$
35  *
36  */
37 
38 #ifndef PCL_SURFACE_IMPL_TEXTURE_MAPPING_HPP_
39 #define PCL_SURFACE_IMPL_TEXTURE_MAPPING_HPP_
40 
41 #include <pcl/common/distances.h>
42 #include <pcl/surface/texture_mapping.h>
43 
44 ///////////////////////////////////////////////////////////////////////////////////////////////
45 template<typename PointInT> std::vector<Eigen::Vector2f, Eigen::aligned_allocator<Eigen::Vector2f> >
47  const Eigen::Vector3f &p1,
48  const Eigen::Vector3f &p2,
49  const Eigen::Vector3f &p3)
50 {
51  std::vector<Eigen::Vector2f, Eigen::aligned_allocator<Eigen::Vector2f> > tex_coordinates;
52  // process for each face
53  Eigen::Vector3f p1p2 (p2[0] - p1[0], p2[1] - p1[1], p2[2] - p1[2]);
54  Eigen::Vector3f p1p3 (p3[0] - p1[0], p3[1] - p1[1], p3[2] - p1[2]);
55  Eigen::Vector3f p2p3 (p3[0] - p2[0], p3[1] - p2[1], p3[2] - p2[2]);
56 
57  // Normalize
58  p1p2 = p1p2 / std::sqrt (p1p2.dot (p1p2));
59  p1p3 = p1p3 / std::sqrt (p1p3.dot (p1p3));
60  p2p3 = p2p3 / std::sqrt (p2p3.dot (p2p3));
61 
62  // compute vector normal of a face
63  Eigen::Vector3f f_normal = p1p2.cross (p1p3);
64  f_normal = f_normal / std::sqrt (f_normal.dot (f_normal));
65 
66  // project vector field onto the face: vector v1_projected = v1 - Dot(v1, n) * n;
67  Eigen::Vector3f f_vector_field = vector_field_ - vector_field_.dot (f_normal) * f_normal;
68 
69  // Normalize
70  f_vector_field = f_vector_field / std::sqrt (f_vector_field.dot (f_vector_field));
71 
72  // texture coordinates
73  Eigen::Vector2f tp1, tp2, tp3;
74 
75  double alpha = std::acos (f_vector_field.dot (p1p2));
76 
77  // distance between 3 vertices of triangles
78  double e1 = (p2 - p3).norm () / f_;
79  double e2 = (p1 - p3).norm () / f_;
80  double e3 = (p1 - p2).norm () / f_;
81 
82  // initialize
83  tp1[0] = 0.0;
84  tp1[1] = 0.0;
85 
86  tp2[0] = static_cast<float> (e3);
87  tp2[1] = 0.0;
88 
89  // determine texture coordinate tp3;
90  double cos_p1 = (e2 * e2 + e3 * e3 - e1 * e1) / (2 * e2 * e3);
91  double sin_p1 = sqrt (1 - (cos_p1 * cos_p1));
92 
93  tp3[0] = static_cast<float> (cos_p1 * e2);
94  tp3[1] = static_cast<float> (sin_p1 * e2);
95 
96  // rotating by alpha (angle between V and pp1 & pp2)
97  Eigen::Vector2f r_tp2, r_tp3;
98  r_tp2[0] = static_cast<float> (tp2[0] * std::cos (alpha) - tp2[1] * std::sin (alpha));
99  r_tp2[1] = static_cast<float> (tp2[0] * std::sin (alpha) + tp2[1] * std::cos (alpha));
100 
101  r_tp3[0] = static_cast<float> (tp3[0] * std::cos (alpha) - tp3[1] * std::sin (alpha));
102  r_tp3[1] = static_cast<float> (tp3[0] * std::sin (alpha) + tp3[1] * std::cos (alpha));
103 
104  // shifting
105  tp1[0] = tp1[0];
106  tp2[0] = r_tp2[0];
107  tp3[0] = r_tp3[0];
108  tp1[1] = tp1[1];
109  tp2[1] = r_tp2[1];
110  tp3[1] = r_tp3[1];
111 
112  float min_x = tp1[0];
113  float min_y = tp1[1];
114  if (min_x > tp2[0])
115  min_x = tp2[0];
116  if (min_x > tp3[0])
117  min_x = tp3[0];
118  if (min_y > tp2[1])
119  min_y = tp2[1];
120  if (min_y > tp3[1])
121  min_y = tp3[1];
122 
123  if (min_x < 0)
124  {
125  tp1[0] = tp1[0] - min_x;
126  tp2[0] = tp2[0] - min_x;
127  tp3[0] = tp3[0] - min_x;
128  }
129  if (min_y < 0)
130  {
131  tp1[1] = tp1[1] - min_y;
132  tp2[1] = tp2[1] - min_y;
133  tp3[1] = tp3[1] - min_y;
134  }
135 
136  tex_coordinates.push_back (tp1);
137  tex_coordinates.push_back (tp2);
138  tex_coordinates.push_back (tp3);
139  return (tex_coordinates);
140 }
141 
142 ///////////////////////////////////////////////////////////////////////////////////////////////
143 template<typename PointInT> void
145 {
146  // mesh information
147  int nr_points = tex_mesh.cloud.width * tex_mesh.cloud.height;
148  int point_size = static_cast<int> (tex_mesh.cloud.data.size ()) / nr_points;
149 
150  // temporary PointXYZ
151  float x, y, z;
152  // temporary face
153  Eigen::Vector3f facet[3];
154 
155  // texture coordinates for each mesh
156  std::vector<std::vector<Eigen::Vector2f, Eigen::aligned_allocator<Eigen::Vector2f> > >texture_map;
157 
158  for (size_t m = 0; m < tex_mesh.tex_polygons.size (); ++m)
159  {
160  // texture coordinates for each mesh
161  std::vector<Eigen::Vector2f, Eigen::aligned_allocator<Eigen::Vector2f> > texture_map_tmp;
162 
163  // processing for each face
164  for (size_t i = 0; i < tex_mesh.tex_polygons[m].size (); ++i)
165  {
166  size_t idx;
167 
168  // get facet information
169  for (size_t j = 0; j < tex_mesh.tex_polygons[m][i].vertices.size (); ++j)
170  {
171  idx = tex_mesh.tex_polygons[m][i].vertices[j];
172  memcpy (&x, &tex_mesh.cloud.data[idx * point_size + tex_mesh.cloud.fields[0].offset], sizeof(float));
173  memcpy (&y, &tex_mesh.cloud.data[idx * point_size + tex_mesh.cloud.fields[1].offset], sizeof(float));
174  memcpy (&z, &tex_mesh.cloud.data[idx * point_size + tex_mesh.cloud.fields[2].offset], sizeof(float));
175  facet[j][0] = x;
176  facet[j][1] = y;
177  facet[j][2] = z;
178  }
179 
180  // get texture coordinates of each face
181  std::vector<Eigen::Vector2f, Eigen::aligned_allocator<Eigen::Vector2f> > tex_coordinates = mapTexture2Face (facet[0], facet[1], facet[2]);
182  for (size_t n = 0; n < tex_coordinates.size (); ++n)
183  texture_map_tmp.push_back (tex_coordinates[n]);
184  }// end faces
185 
186  // texture materials
187  std::stringstream tex_name;
188  tex_name << "material_" << m;
189  tex_name >> tex_material_.tex_name;
190  tex_material_.tex_file = tex_files_[m];
191  tex_mesh.tex_materials.push_back (tex_material_);
192 
193  // texture coordinates
194  tex_mesh.tex_coordinates.push_back (texture_map_tmp);
195  }// end meshes
196 }
197 
198 ///////////////////////////////////////////////////////////////////////////////////////////////
199 template<typename PointInT> void
201 {
202  // mesh information
203  int nr_points = tex_mesh.cloud.width * tex_mesh.cloud.height;
204  int point_size = static_cast<int> (tex_mesh.cloud.data.size ()) / nr_points;
205 
206  float x_lowest = 100000;
207  float x_highest = 0;
208  float y_lowest = 100000;
209  //float y_highest = 0 ;
210  float z_lowest = 100000;
211  float z_highest = 0;
212  float x_, y_, z_;
213 
214  for (int i = 0; i < nr_points; ++i)
215  {
216  memcpy (&x_, &tex_mesh.cloud.data[i * point_size + tex_mesh.cloud.fields[0].offset], sizeof(float));
217  memcpy (&y_, &tex_mesh.cloud.data[i * point_size + tex_mesh.cloud.fields[1].offset], sizeof(float));
218  memcpy (&z_, &tex_mesh.cloud.data[i * point_size + tex_mesh.cloud.fields[2].offset], sizeof(float));
219  // x
220  if (x_ <= x_lowest)
221  x_lowest = x_;
222  if (x_ > x_lowest)
223  x_highest = x_;
224 
225  // y
226  if (y_ <= y_lowest)
227  y_lowest = y_;
228  //if (y_ > y_lowest) y_highest = y_;
229 
230  // z
231  if (z_ <= z_lowest)
232  z_lowest = z_;
233  if (z_ > z_lowest)
234  z_highest = z_;
235  }
236  // x
237  float x_range = (x_lowest - x_highest) * -1;
238  float x_offset = 0 - x_lowest;
239  // x
240  // float y_range = (y_lowest - y_highest)*-1;
241  // float y_offset = 0 - y_lowest;
242  // z
243  float z_range = (z_lowest - z_highest) * -1;
244  float z_offset = 0 - z_lowest;
245 
246  // texture coordinates for each mesh
247  std::vector<std::vector<Eigen::Vector2f, Eigen::aligned_allocator<Eigen::Vector2f> > >texture_map;
248 
249  for (size_t m = 0; m < tex_mesh.tex_polygons.size (); ++m)
250  {
251  // texture coordinates for each mesh
252  std::vector<Eigen::Vector2f, Eigen::aligned_allocator<Eigen::Vector2f> > texture_map_tmp;
253 
254  // processing for each face
255  for (size_t i = 0; i < tex_mesh.tex_polygons[m].size (); ++i)
256  {
257  size_t idx;
258  Eigen::Vector2f tmp_VT;
259  for (size_t j = 0; j < tex_mesh.tex_polygons[m][i].vertices.size (); ++j)
260  {
261  idx = tex_mesh.tex_polygons[m][i].vertices[j];
262  memcpy (&x_, &tex_mesh.cloud.data[idx * point_size + tex_mesh.cloud.fields[0].offset], sizeof(float));
263  memcpy (&y_, &tex_mesh.cloud.data[idx * point_size + tex_mesh.cloud.fields[1].offset], sizeof(float));
264  memcpy (&z_, &tex_mesh.cloud.data[idx * point_size + tex_mesh.cloud.fields[2].offset], sizeof(float));
265 
266  // calculate uv coordinates
267  tmp_VT[0] = (x_ + x_offset) / x_range;
268  tmp_VT[1] = (z_ + z_offset) / z_range;
269  texture_map_tmp.push_back (tmp_VT);
270  }
271  }// end faces
272 
273  // texture materials
274  std::stringstream tex_name;
275  tex_name << "material_" << m;
276  tex_name >> tex_material_.tex_name;
277  tex_material_.tex_file = tex_files_[m];
278  tex_mesh.tex_materials.push_back (tex_material_);
279 
280  // texture coordinates
281  tex_mesh.tex_coordinates.push_back (texture_map_tmp);
282  }// end meshes
283 }
284 
285 ///////////////////////////////////////////////////////////////////////////////////////////////
286 template<typename PointInT> void
288 {
289 
290  if (tex_mesh.tex_polygons.size () != cams.size () + 1)
291  {
292  PCL_ERROR ("The mesh should be divided into nbCamera+1 sub-meshes.\n");
293  PCL_ERROR ("You provided %d cameras and a mesh containing %d sub-meshes.\n", cams.size (), tex_mesh.tex_polygons.size ());
294  return;
295  }
296 
297  PCL_INFO ("You provided %d cameras and a mesh containing %d sub-meshes.\n", cams.size (), tex_mesh.tex_polygons.size ());
298 
299  typename pcl::PointCloud<PointInT>::Ptr originalCloud (new pcl::PointCloud<PointInT>);
300  typename pcl::PointCloud<PointInT>::Ptr camera_transformed_cloud (new pcl::PointCloud<PointInT>);
301 
302  // convert mesh's cloud to pcl format for ease
303  pcl::fromPCLPointCloud2 (tex_mesh.cloud, *originalCloud);
304 
305  // texture coordinates for each mesh
306  std::vector<std::vector<Eigen::Vector2f, Eigen::aligned_allocator<Eigen::Vector2f> > > texture_map;
307 
308  for (size_t m = 0; m < cams.size (); ++m)
309  {
310  // get current camera parameters
311  Camera current_cam = cams[m];
312 
313  // get camera transform
314  Eigen::Affine3f cam_trans = current_cam.pose;
315 
316  // transform cloud into current camera frame
317  pcl::transformPointCloud (*originalCloud, *camera_transformed_cloud, cam_trans.inverse ());
318 
319  // vector of texture coordinates for each face
320  std::vector<Eigen::Vector2f, Eigen::aligned_allocator<Eigen::Vector2f> > texture_map_tmp;
321 
322  // processing each face visible by this camera
323  PointInT pt;
324  size_t idx;
325  for (size_t i = 0; i < tex_mesh.tex_polygons[m].size (); ++i)
326  {
327  Eigen::Vector2f tmp_VT;
328  // for each point of this face
329  for (size_t j = 0; j < tex_mesh.tex_polygons[m][i].vertices.size (); ++j)
330  {
331  // get point
332  idx = tex_mesh.tex_polygons[m][i].vertices[j];
333  pt = camera_transformed_cloud->points[idx];
334 
335  // compute UV coordinates for this point
336  getPointUVCoordinates (pt, current_cam, tmp_VT);
337  texture_map_tmp.push_back (tmp_VT);
338 
339  }// end points
340  }// end faces
341 
342  // texture materials
343  std::stringstream tex_name;
344  tex_name << "material_" << m;
345  tex_name >> tex_material_.tex_name;
346  tex_material_.tex_file = current_cam.texture_file;
347  tex_mesh.tex_materials.push_back (tex_material_);
348 
349  // texture coordinates
350  tex_mesh.tex_coordinates.push_back (texture_map_tmp);
351  }// end cameras
352 
353  // push on extra empty UV map (for unseen faces) so that obj writer does not crash!
354  std::vector<Eigen::Vector2f, Eigen::aligned_allocator<Eigen::Vector2f> > texture_map_tmp;
355  for (size_t i = 0; i < tex_mesh.tex_polygons[cams.size ()].size (); ++i)
356  for (size_t j = 0; j < tex_mesh.tex_polygons[cams.size ()][i].vertices.size (); ++j)
357  {
358  Eigen::Vector2f tmp_VT;
359  tmp_VT[0] = -1;
360  tmp_VT[1] = -1;
361  texture_map_tmp.push_back (tmp_VT);
362  }
363 
364  tex_mesh.tex_coordinates.push_back (texture_map_tmp);
365 
366  // push on an extra dummy material for the same reason
367  std::stringstream tex_name;
368  tex_name << "material_" << cams.size ();
369  tex_name >> tex_material_.tex_name;
370  tex_material_.tex_file = "occluded.jpg";
371  tex_mesh.tex_materials.push_back (tex_material_);
372 
373 }
374 
375 ///////////////////////////////////////////////////////////////////////////////////////////////
376 template<typename PointInT> bool
378 {
379  Eigen::Vector3f direction;
380  direction (0) = pt.x;
381  direction (1) = pt.y;
382  direction (2) = pt.z;
383 
384  std::vector<int> indices;
385 
386  PointCloudConstPtr cloud (new PointCloud());
387  cloud = octree->getInputCloud();
388 
389  double distance_threshold = octree->getResolution();
390 
391  // raytrace
392  octree->getIntersectedVoxelIndices(direction, -direction, indices);
393 
394  int nbocc = static_cast<int> (indices.size ());
395  for (size_t j = 0; j < indices.size (); j++)
396  {
397  // if intersected point is on the over side of the camera
398  if (pt.z * cloud->points[indices[j]].z < 0)
399  {
400  nbocc--;
401  continue;
402  }
403 
404  if (fabs (cloud->points[indices[j]].z - pt.z) <= distance_threshold)
405  {
406  // points are very close to each-other, we do not consider the occlusion
407  nbocc--;
408  }
409  }
410 
411  if (nbocc == 0)
412  return (false);
413  else
414  return (true);
415 }
416 
417 ///////////////////////////////////////////////////////////////////////////////////////////////
418 template<typename PointInT> void
420  PointCloudPtr &filtered_cloud,
421  const double octree_voxel_size, std::vector<int> &visible_indices,
422  std::vector<int> &occluded_indices)
423 {
424  // variable used to filter occluded points by depth
425  double maxDeltaZ = octree_voxel_size;
426 
427  // create an octree to perform rayTracing
428  OctreePtr octree (new Octree (octree_voxel_size));
429  // create octree structure
430  octree->setInputCloud (input_cloud);
431  // update bounding box automatically
432  octree->defineBoundingBox ();
433  // add points in the tree
434  octree->addPointsFromInputCloud ();
435 
436  visible_indices.clear ();
437 
438  // for each point of the cloud, raycast toward camera and check intersected voxels.
439  Eigen::Vector3f direction;
440  std::vector<int> indices;
441  for (size_t i = 0; i < input_cloud->points.size (); ++i)
442  {
443  direction (0) = input_cloud->points[i].x;
444  direction (1) = input_cloud->points[i].y;
445  direction (2) = input_cloud->points[i].z;
446 
447  // if point is not occluded
448  octree->getIntersectedVoxelIndices (direction, -direction, indices);
449 
450  int nbocc = static_cast<int> (indices.size ());
451  for (size_t j = 0; j < indices.size (); j++)
452  {
453  // if intersected point is on the over side of the camera
454  if (input_cloud->points[i].z * input_cloud->points[indices[j]].z < 0)
455  {
456  nbocc--;
457  continue;
458  }
459 
460  if (fabs (input_cloud->points[indices[j]].z - input_cloud->points[i].z) <= maxDeltaZ)
461  {
462  // points are very close to each-other, we do not consider the occlusion
463  nbocc--;
464  }
465  }
466 
467  if (nbocc == 0)
468  {
469  // point is added in the filtered mesh
470  filtered_cloud->points.push_back (input_cloud->points[i]);
471  visible_indices.push_back (static_cast<int> (i));
472  }
473  else
474  {
475  occluded_indices.push_back (static_cast<int> (i));
476  }
477  }
478 
479 }
480 
481 ///////////////////////////////////////////////////////////////////////////////////////////////
482 template<typename PointInT> void
483 pcl::TextureMapping<PointInT>::removeOccludedPoints (const pcl::TextureMesh &tex_mesh, pcl::TextureMesh &cleaned_mesh, const double octree_voxel_size)
484 {
485  // copy mesh
486  cleaned_mesh = tex_mesh;
487 
489  typename pcl::PointCloud<PointInT>::Ptr filtered_cloud (new pcl::PointCloud<PointInT>);
490 
491  // load points into a PCL format
492  pcl::fromPCLPointCloud2 (tex_mesh.cloud, *cloud);
493 
494  std::vector<int> visible, occluded;
495  removeOccludedPoints (cloud, filtered_cloud, octree_voxel_size, visible, occluded);
496 
497  // Now that we know which points are visible, let's iterate over each face.
498  // if the face has one invisible point => out!
499  for (size_t polygons = 0; polygons < cleaned_mesh.tex_polygons.size (); ++polygons)
500  {
501  // remove all faces from cleaned mesh
502  cleaned_mesh.tex_polygons[polygons].clear ();
503  // iterate over faces
504  for (size_t faces = 0; faces < tex_mesh.tex_polygons[polygons].size (); ++faces)
505  {
506  // check if all the face's points are visible
507  bool faceIsVisible = true;
508  std::vector<int>::iterator it;
509 
510  // iterate over face's vertex
511  for (size_t points = 0; points < tex_mesh.tex_polygons[polygons][faces].vertices.size (); ++points)
512  {
513  it = find (occluded.begin (), occluded.end (), tex_mesh.tex_polygons[polygons][faces].vertices[points]);
514 
515  if (it == occluded.end ())
516  {
517  // point is not in the occluded vector
518  // PCL_INFO (" VISIBLE!\n");
519  }
520  else
521  {
522  // point was occluded
523  // PCL_INFO(" OCCLUDED!\n");
524  faceIsVisible = false;
525  }
526  }
527 
528  if (faceIsVisible)
529  {
530  cleaned_mesh.tex_polygons[polygons].push_back (tex_mesh.tex_polygons[polygons][faces]);
531  }
532 
533  }
534  }
535 }
536 
537 ///////////////////////////////////////////////////////////////////////////////////////////////
538 template<typename PointInT> void
540  const double octree_voxel_size)
541 {
542  PointCloudPtr cloud (new PointCloud);
543 
544  // load points into a PCL format
545  pcl::fromPCLPointCloud2 (tex_mesh.cloud, *cloud);
546 
547  std::vector<int> visible, occluded;
548  removeOccludedPoints (cloud, filtered_cloud, octree_voxel_size, visible, occluded);
549 
550 }
551 
552 ///////////////////////////////////////////////////////////////////////////////////////////////
553 template<typename PointInT> int
555  const pcl::texture_mapping::CameraVector &cameras, const double octree_voxel_size,
556  PointCloud &visible_pts)
557 {
558  if (tex_mesh.tex_polygons.size () != 1)
559  {
560  PCL_ERROR ("The mesh must contain only 1 sub-mesh!\n");
561  return (-1);
562  }
563 
564  if (cameras.size () == 0)
565  {
566  PCL_ERROR ("Must provide at least one camera info!\n");
567  return (-1);
568  }
569 
570  // copy mesh
571  sorted_mesh = tex_mesh;
572  // clear polygons from cleaned_mesh
573  sorted_mesh.tex_polygons.clear ();
574 
575  typename pcl::PointCloud<PointInT>::Ptr original_cloud (new pcl::PointCloud<PointInT>);
576  typename pcl::PointCloud<PointInT>::Ptr transformed_cloud (new pcl::PointCloud<PointInT>);
577  typename pcl::PointCloud<PointInT>::Ptr filtered_cloud (new pcl::PointCloud<PointInT>);
578 
579  // load points into a PCL format
580  pcl::fromPCLPointCloud2 (tex_mesh.cloud, *original_cloud);
581 
582  // for each camera
583  for (size_t cam = 0; cam < cameras.size (); ++cam)
584  {
585  // get camera pose as transform
586  Eigen::Affine3f cam_trans = cameras[cam].pose;
587 
588  // transform original cloud in camera coordinates
589  pcl::transformPointCloud (*original_cloud, *transformed_cloud, cam_trans.inverse ());
590 
591  // find occlusions on transformed cloud
592  std::vector<int> visible, occluded;
593  removeOccludedPoints (transformed_cloud, filtered_cloud, octree_voxel_size, visible, occluded);
594  visible_pts = *filtered_cloud;
595 
596  // find visible faces => add them to polygon N for camera N
597  // add polygon group for current camera in clean
598  std::vector<pcl::Vertices> visibleFaces_currentCam;
599  // iterate over the faces of the current mesh
600  for (size_t faces = 0; faces < tex_mesh.tex_polygons[0].size (); ++faces)
601  {
602  // check if all the face's points are visible
603  bool faceIsVisible = true;
604  std::vector<int>::iterator it;
605 
606  // iterate over face's vertex
607  for (size_t current_pt_indice = 0; faceIsVisible && current_pt_indice < tex_mesh.tex_polygons[0][faces].vertices.size (); ++current_pt_indice)
608  {
609  // TODO this is far too long! Better create an helper function that raycasts here.
610  it = find (occluded.begin (), occluded.end (), tex_mesh.tex_polygons[0][faces].vertices[current_pt_indice]);
611 
612  if (it == occluded.end ())
613  {
614  // point is not occluded
615  // does it land on the camera's image plane?
616  PointInT pt = transformed_cloud->points[tex_mesh.tex_polygons[0][faces].vertices[current_pt_indice]];
617  Eigen::Vector2f dummy_UV;
618  if (!getPointUVCoordinates (pt, cameras[cam], dummy_UV))
619  {
620  // point is not visible by the camera
621  faceIsVisible = false;
622  }
623  }
624  else
625  {
626  faceIsVisible = false;
627  }
628  }
629 
630  if (faceIsVisible)
631  {
632  // push current visible face into the sorted mesh
633  visibleFaces_currentCam.push_back (tex_mesh.tex_polygons[0][faces]);
634  // remove it from the unsorted mesh
635  tex_mesh.tex_polygons[0].erase (tex_mesh.tex_polygons[0].begin () + faces);
636  faces--;
637  }
638 
639  }
640  sorted_mesh.tex_polygons.push_back (visibleFaces_currentCam);
641  }
642 
643  // we should only have occluded and non-visible faces left in tex_mesh.tex_polygons[0]
644  // we need to add them as an extra polygon in the sorted mesh
645  sorted_mesh.tex_polygons.push_back (tex_mesh.tex_polygons[0]);
646  return (0);
647 }
648 
649 ///////////////////////////////////////////////////////////////////////////////////////////////
650 template<typename PointInT> void
653  const double octree_voxel_size, const bool show_nb_occlusions,
654  const int max_occlusions)
655  {
656  // variable used to filter occluded points by depth
657  double maxDeltaZ = octree_voxel_size * 2.0;
658 
659  // create an octree to perform rayTracing
661  octree = new pcl::octree::OctreePointCloudSearch<PointInT> (octree_voxel_size);
662  // create octree structure
663  octree->setInputCloud (input_cloud);
664  // update bounding box automatically
665  octree->defineBoundingBox ();
666  // add points in the tree
667  octree->addPointsFromInputCloud ();
668 
669  // ray direction
670  Eigen::Vector3f direction;
671 
672  std::vector<int> indices;
673  // point from where we ray-trace
674  pcl::PointXYZI pt;
675 
676  std::vector<double> zDist;
677  std::vector<double> ptDist;
678  // for each point of the cloud, ray-trace toward the camera and check intersected voxels.
679  for (size_t i = 0; i < input_cloud->points.size (); ++i)
680  {
681  direction (0) = input_cloud->points[i].x;
682  pt.x = input_cloud->points[i].x;
683  direction (1) = input_cloud->points[i].y;
684  pt.y = input_cloud->points[i].y;
685  direction (2) = input_cloud->points[i].z;
686  pt.z = input_cloud->points[i].z;
687 
688  // get number of occlusions for that point
689  indices.clear ();
690  int nbocc = octree->getIntersectedVoxelIndices (direction, -direction, indices);
691 
692  nbocc = static_cast<int> (indices.size ());
693 
694  // TODO need to clean this up and find tricks to get remove aliasaing effect on planes
695  for (size_t j = 0; j < indices.size (); j++)
696  {
697  // if intersected point is on the over side of the camera
698  if (pt.z * input_cloud->points[indices[j]].z < 0)
699  {
700  nbocc--;
701  }
702  else if (fabs (input_cloud->points[indices[j]].z - pt.z) <= maxDeltaZ)
703  {
704  // points are very close to each-other, we do not consider the occlusion
705  nbocc--;
706  }
707  else
708  {
709  zDist.push_back (fabs (input_cloud->points[indices[j]].z - pt.z));
710  ptDist.push_back (pcl::euclideanDistance (input_cloud->points[indices[j]], pt));
711  }
712  }
713 
714  if (show_nb_occlusions)
715  (nbocc <= max_occlusions) ? (pt.intensity = static_cast<float> (nbocc)) : (pt.intensity = static_cast<float> (max_occlusions));
716  else
717  (nbocc == 0) ? (pt.intensity = 0) : (pt.intensity = 1);
718 
719  colored_cloud->points.push_back (pt);
720  }
721 
722  if (zDist.size () >= 2)
723  {
724  std::sort (zDist.begin (), zDist.end ());
725  std::sort (ptDist.begin (), ptDist.end ());
726  }
727 }
728 
729 ///////////////////////////////////////////////////////////////////////////////////////////////
730 template<typename PointInT> void
732  double octree_voxel_size, bool show_nb_occlusions, int max_occlusions)
733 {
734  // load points into a PCL format
736  pcl::fromPCLPointCloud2 (tex_mesh.cloud, *cloud);
737 
738  showOcclusions (cloud, colored_cloud, octree_voxel_size, show_nb_occlusions, max_occlusions);
739 }
740 
741 ///////////////////////////////////////////////////////////////////////////////////////////////
742 template<typename PointInT> void
744 {
745 
746  if (mesh.tex_polygons.size () != 1)
747  return;
748 
750 
751  pcl::fromPCLPointCloud2 (mesh.cloud, *mesh_cloud);
752 
753  std::vector<pcl::Vertices> faces;
754 
755  for (int current_cam = 0; current_cam < static_cast<int> (cameras.size ()); ++current_cam)
756  {
757  PCL_INFO ("Processing camera %d of %d.\n", current_cam+1, cameras.size ());
758 
759  // transform mesh into camera's frame
760  typename pcl::PointCloud<PointInT>::Ptr camera_cloud (new pcl::PointCloud<PointInT>);
761  pcl::transformPointCloud (*mesh_cloud, *camera_cloud, cameras[current_cam].pose.inverse ());
762 
763  // CREATE UV MAP FOR CURRENT FACES
765  std::vector<pcl::Vertices>::iterator current_face;
766  std::vector<bool> visibility;
767  visibility.resize (mesh.tex_polygons[current_cam].size ());
768  std::vector<UvIndex> indexes_uv_to_points;
769  // for each current face
770 
771  //TODO change this
772  pcl::PointXY nan_point;
773  nan_point.x = std::numeric_limits<float>::quiet_NaN ();
774  nan_point.y = std::numeric_limits<float>::quiet_NaN ();
775  UvIndex u_null;
776  u_null.idx_cloud = -1;
777  u_null.idx_face = -1;
778 
779  int cpt_invisible=0;
780  for (int idx_face = 0; idx_face < static_cast<int> (mesh.tex_polygons[current_cam].size ()); ++idx_face)
781  {
782  //project each vertice, if one is out of view, stop
783  pcl::PointXY uv_coord1;
784  pcl::PointXY uv_coord2;
785  pcl::PointXY uv_coord3;
786 
787  if (isFaceProjected (cameras[current_cam],
788  camera_cloud->points[mesh.tex_polygons[current_cam][idx_face].vertices[0]],
789  camera_cloud->points[mesh.tex_polygons[current_cam][idx_face].vertices[1]],
790  camera_cloud->points[mesh.tex_polygons[current_cam][idx_face].vertices[2]],
791  uv_coord1,
792  uv_coord2,
793  uv_coord3))
794  {
795  // face is in the camera's FOV
796 
797  // add UV coordinates
798  projections->points.push_back (uv_coord1);
799  projections->points.push_back (uv_coord2);
800  projections->points.push_back (uv_coord3);
801 
802  // remember corresponding face
803  UvIndex u1, u2, u3;
804  u1.idx_cloud = mesh.tex_polygons[current_cam][idx_face].vertices[0];
805  u2.idx_cloud = mesh.tex_polygons[current_cam][idx_face].vertices[1];
806  u3.idx_cloud = mesh.tex_polygons[current_cam][idx_face].vertices[2];
807  u1.idx_face = idx_face; u2.idx_face = idx_face; u3.idx_face = idx_face;
808  indexes_uv_to_points.push_back (u1);
809  indexes_uv_to_points.push_back (u2);
810  indexes_uv_to_points.push_back (u3);
811 
812  //keep track of visibility
813  visibility[idx_face] = true;
814  }
815  else
816  {
817  projections->points.push_back (nan_point);
818  projections->points.push_back (nan_point);
819  projections->points.push_back (nan_point);
820  indexes_uv_to_points.push_back (u_null);
821  indexes_uv_to_points.push_back (u_null);
822  indexes_uv_to_points.push_back (u_null);
823  //keep track of visibility
824  visibility[idx_face] = false;
825  cpt_invisible++;
826  }
827  }
828 
829  // projections contains all UV points of the current faces
830  // indexes_uv_to_points links a uv point to its point in the camera cloud
831  // visibility contains tells if a face was in the camera FOV (false = skip)
832 
833  // TODO handle case were no face could be projected
834  if (visibility.size () - cpt_invisible !=0)
835  {
836  //create kdtree
838  kdtree.setInputCloud (projections);
839 
840  std::vector<int> idxNeighbors;
841  std::vector<float> neighborsSquaredDistance;
842  // af first (idx_pcan < current_cam), check if some of the faces attached to previous cameras occlude the current faces
843  // then (idx_pcam == current_cam), check for self occlusions. At this stage, we skip faces that were already marked as occluded
844  cpt_invisible = 0;
845  for (int idx_pcam = 0 ; idx_pcam <= current_cam ; ++idx_pcam)
846  {
847  // project all faces
848  for (int idx_face = 0; idx_face < static_cast<int> (mesh.tex_polygons[idx_pcam].size ()); ++idx_face)
849  {
850 
851  if (idx_pcam == current_cam && !visibility[idx_face])
852  {
853  // we are now checking for self occlusions within the current faces
854  // the current face was already declared as occluded.
855  // therefore, it cannot occlude another face anymore => we skip it
856  continue;
857  }
858 
859  // project each vertice, if one is out of view, stop
860  pcl::PointXY uv_coord1;
861  pcl::PointXY uv_coord2;
862  pcl::PointXY uv_coord3;
863 
864  if (isFaceProjected (cameras[current_cam],
865  camera_cloud->points[mesh.tex_polygons[idx_pcam][idx_face].vertices[0]],
866  camera_cloud->points[mesh.tex_polygons[idx_pcam][idx_face].vertices[1]],
867  camera_cloud->points[mesh.tex_polygons[idx_pcam][idx_face].vertices[2]],
868  uv_coord1,
869  uv_coord2,
870  uv_coord3))
871  {
872  // face is in the camera's FOV
873  //get its circumsribed circle
874  double radius;
875  pcl::PointXY center;
876  // getTriangleCircumcenterAndSize (uv_coord1, uv_coord2, uv_coord3, center, radius);
877  getTriangleCircumcscribedCircleCentroid(uv_coord1, uv_coord2, uv_coord3, center, radius); // this function yields faster results than getTriangleCircumcenterAndSize
878 
879  // get points inside circ.circle
880  if (kdtree.radiusSearch (center, radius, idxNeighbors, neighborsSquaredDistance) > 0 )
881  {
882  // for each neighbor
883  for (size_t i = 0; i < idxNeighbors.size (); ++i)
884  {
885  if (std::max (camera_cloud->points[mesh.tex_polygons[idx_pcam][idx_face].vertices[0]].z,
886  std::max (camera_cloud->points[mesh.tex_polygons[idx_pcam][idx_face].vertices[1]].z,
887  camera_cloud->points[mesh.tex_polygons[idx_pcam][idx_face].vertices[2]].z))
888  < camera_cloud->points[indexes_uv_to_points[idxNeighbors[i]].idx_cloud].z)
889  {
890  // neighbor is farther than all the face's points. Check if it falls into the triangle
891  if (checkPointInsideTriangle(uv_coord1, uv_coord2, uv_coord3, projections->points[idxNeighbors[i]]))
892  {
893  // current neighbor is inside triangle and is closer => the corresponding face
894  visibility[indexes_uv_to_points[idxNeighbors[i]].idx_face] = false;
895  cpt_invisible++;
896  //TODO we could remove the projections of this face from the kd-tree cloud, but I fond it slower, and I need the point to keep ordered to querry UV coordinates later
897  }
898  }
899  }
900  }
901  }
902  }
903  }
904  }
905 
906  // now, visibility is true for each face that belongs to the current camera
907  // if a face is not visible, we push it into the next one.
908 
909  if (static_cast<int> (mesh.tex_coordinates.size ()) <= current_cam)
910  {
911  std::vector<Eigen::Vector2f, Eigen::aligned_allocator<Eigen::Vector2f> > dummy_container;
912  mesh.tex_coordinates.push_back (dummy_container);
913  }
914  mesh.tex_coordinates[current_cam].resize (3 * visibility.size ());
915 
916  std::vector<pcl::Vertices> occluded_faces;
917  occluded_faces.resize (visibility.size ());
918  std::vector<pcl::Vertices> visible_faces;
919  visible_faces.resize (visibility.size ());
920 
921  int cpt_occluded_faces = 0;
922  int cpt_visible_faces = 0;
923 
924  for (size_t idx_face = 0 ; idx_face < visibility.size () ; ++idx_face)
925  {
926  if (visibility[idx_face])
927  {
928  // face is visible by the current camera copy UV coordinates
929  mesh.tex_coordinates[current_cam][cpt_visible_faces * 3](0) = projections->points[idx_face*3].x;
930  mesh.tex_coordinates[current_cam][cpt_visible_faces * 3](1) = projections->points[idx_face*3].y;
931 
932  mesh.tex_coordinates[current_cam][cpt_visible_faces * 3 + 1](0) = projections->points[idx_face*3 + 1].x;
933  mesh.tex_coordinates[current_cam][cpt_visible_faces * 3 + 1](1) = projections->points[idx_face*3 + 1].y;
934 
935  mesh.tex_coordinates[current_cam][cpt_visible_faces * 3 + 2](0) = projections->points[idx_face*3 + 2].x;
936  mesh.tex_coordinates[current_cam][cpt_visible_faces * 3 + 2](1) = projections->points[idx_face*3 + 2].y;
937 
938  visible_faces[cpt_visible_faces] = mesh.tex_polygons[current_cam][idx_face];
939 
940  cpt_visible_faces++;
941  }
942  else
943  {
944  // face is occluded copy face into temp vector
945  occluded_faces[cpt_occluded_faces] = mesh.tex_polygons[current_cam][idx_face];
946  cpt_occluded_faces++;
947  }
948  }
949  mesh.tex_coordinates[current_cam].resize (cpt_visible_faces*3);
950 
951  occluded_faces.resize (cpt_occluded_faces);
952  mesh.tex_polygons.push_back (occluded_faces);
953 
954  visible_faces.resize (cpt_visible_faces);
955  mesh.tex_polygons[current_cam].clear ();
956  mesh.tex_polygons[current_cam] = visible_faces;
957 
958  int nb_faces = 0;
959  for (int i = 0; i < static_cast<int> (mesh.tex_polygons.size ()); i++)
960  nb_faces += static_cast<int> (mesh.tex_polygons[i].size ());
961  }
962 
963  // we have been through all the cameras.
964  // if any faces are left, they were not visible by any camera
965  // we still need to produce uv coordinates for them
966 
967  if (mesh.tex_coordinates.size() <= cameras.size ())
968  {
969  std::vector<Eigen::Vector2f, Eigen::aligned_allocator<Eigen::Vector2f> > dummy_container;
970  mesh.tex_coordinates.push_back(dummy_container);
971  }
972 
973 
974  for(size_t idx_face = 0 ; idx_face < mesh.tex_polygons[cameras.size()].size() ; ++idx_face)
975  {
976  Eigen::Vector2f UV1, UV2, UV3;
977  UV1(0) = -1.0; UV1(1) = -1.0;
978  UV2(0) = -1.0; UV2(1) = -1.0;
979  UV3(0) = -1.0; UV3(1) = -1.0;
980  mesh.tex_coordinates[cameras.size()].push_back(UV1);
981  mesh.tex_coordinates[cameras.size()].push_back(UV2);
982  mesh.tex_coordinates[cameras.size()].push_back(UV3);
983  }
984 
985 }
986 
987 ///////////////////////////////////////////////////////////////////////////////////////////////
988 template<typename PointInT> inline void
990 {
991  // we simplify the problem by translating the triangle's origin to its first point
992  pcl::PointXY ptB, ptC;
993  ptB.x = p2.x - p1.x; ptB.y = p2.y - p1.y; // B'=B-A
994  ptC.x = p3.x - p1.x; ptC.y = p3.y - p1.y; // C'=C-A
995 
996  double D = 2.0*(ptB.x*ptC.y - ptB.y*ptC.x); // D'=2(B'x*C'y - B'y*C'x)
997 
998  // Safety check to avoid division by zero
999  if(D == 0)
1000  {
1001  circomcenter.x = p1.x;
1002  circomcenter.y = p1.y;
1003  }
1004  else
1005  {
1006  // compute squares once
1007  double bx2 = ptB.x * ptB.x; // B'x^2
1008  double by2 = ptB.y * ptB.y; // B'y^2
1009  double cx2 = ptC.x * ptC.x; // C'x^2
1010  double cy2 = ptC.y * ptC.y; // C'y^2
1011 
1012  // compute circomcenter's coordinates (translate back to original coordinates)
1013  circomcenter.x = static_cast<float> (p1.x + (ptC.y*(bx2 + by2) - ptB.y*(cx2 + cy2)) / D);
1014  circomcenter.y = static_cast<float> (p1.y + (ptB.x*(cx2 + cy2) - ptC.x*(bx2 + by2)) / D);
1015  }
1016 
1017  radius = sqrt( (circomcenter.x - p1.x)*(circomcenter.x - p1.x) + (circomcenter.y - p1.y)*(circomcenter.y - p1.y));//2.0* (p1.x*(p2.y - p3.y) + p2.x*(p3.y - p1.y) + p3.x*(p1.y - p2.y));
1018 }
1019 
1020 ///////////////////////////////////////////////////////////////////////////////////////////////
1021 template<typename PointInT> inline void
1023 {
1024  // compute centroid's coordinates (translate back to original coordinates)
1025  circumcenter.x = static_cast<float> (p1.x + p2.x + p3.x ) / 3;
1026  circumcenter.y = static_cast<float> (p1.y + p2.y + p3.y ) / 3;
1027  double r1 = (circumcenter.x - p1.x) * (circumcenter.x - p1.x) + (circumcenter.y - p1.y) * (circumcenter.y - p1.y) ;
1028  double r2 = (circumcenter.x - p2.x) * (circumcenter.x - p2.x) + (circumcenter.y - p2.y) * (circumcenter.y - p2.y) ;
1029  double r3 = (circumcenter.x - p3.x) * (circumcenter.x - p3.x) + (circumcenter.y - p3.y) * (circumcenter.y - p3.y) ;
1030 
1031  // radius
1032  radius = std::sqrt( std::max( r1, std::max( r2, r3) )) ;
1033 }
1034 
1035 
1036 ///////////////////////////////////////////////////////////////////////////////////////////////
1037 template<typename PointInT> inline bool
1038 pcl::TextureMapping<PointInT>::getPointUVCoordinates(const PointInT &pt, const Camera &cam, pcl::PointXY &UV_coordinates)
1039 {
1040  if (pt.z > 0)
1041  {
1042  // compute image center and dimension
1043  double sizeX = cam.width;
1044  double sizeY = cam.height;
1045  double cx, cy;
1046  if (cam.center_w > 0)
1047  cx = cam.center_w;
1048  else
1049  cx = sizeX / 2.0;
1050  if (cam.center_h > 0)
1051  cy = cam.center_h;
1052  else
1053  cy = sizeY / 2.0;
1054 
1055  double focal_x, focal_y;
1056  if (cam.focal_length_w > 0)
1057  focal_x = cam.focal_length_w;
1058  else
1059  focal_x = cam.focal_length;
1060  if (cam.focal_length_h > 0)
1061  focal_y = cam.focal_length_h;
1062  else
1063  focal_y = cam.focal_length;
1064 
1065  // project point on camera's image plane
1066  UV_coordinates.x = static_cast<float> ((focal_x * (pt.x / pt.z) + cx) / sizeX); //horizontal
1067  UV_coordinates.y = 1.0f - static_cast<float> ((focal_y * (pt.y / pt.z) + cy) / sizeY); //vertical
1068 
1069  // point is visible!
1070  if (UV_coordinates.x >= 0.0 && UV_coordinates.x <= 1.0 && UV_coordinates.y >= 0.0 && UV_coordinates.y <= 1.0)
1071  return (true); // point was visible by the camera
1072  }
1073 
1074  // point is NOT visible by the camera
1075  UV_coordinates.x = -1.0f;
1076  UV_coordinates.y = -1.0f;
1077  return (false); // point was not visible by the camera
1078 }
1079 
1080 ///////////////////////////////////////////////////////////////////////////////////////////////
1081 template<typename PointInT> inline bool
1083 {
1084  // Compute vectors
1085  Eigen::Vector2d v0, v1, v2;
1086  v0(0) = p3.x - p1.x; v0(1) = p3.y - p1.y; // v0= C - A
1087  v1(0) = p2.x - p1.x; v1(1) = p2.y - p1.y; // v1= B - A
1088  v2(0) = pt.x - p1.x; v2(1) = pt.y - p1.y; // v2= P - A
1089 
1090  // Compute dot products
1091  double dot00 = v0.dot(v0); // dot00 = dot(v0, v0)
1092  double dot01 = v0.dot(v1); // dot01 = dot(v0, v1)
1093  double dot02 = v0.dot(v2); // dot02 = dot(v0, v2)
1094  double dot11 = v1.dot(v1); // dot11 = dot(v1, v1)
1095  double dot12 = v1.dot(v2); // dot12 = dot(v1, v2)
1096 
1097  // Compute barycentric coordinates
1098  double invDenom = 1.0 / (dot00*dot11 - dot01*dot01);
1099  double u = (dot11*dot02 - dot01*dot12) * invDenom;
1100  double v = (dot00*dot12 - dot01*dot02) * invDenom;
1101 
1102  // Check if point is in triangle
1103  return ((u >= 0) && (v >= 0) && (u + v < 1));
1104 }
1105 
1106 ///////////////////////////////////////////////////////////////////////////////////////////////
1107 template<typename PointInT> inline bool
1108 pcl::TextureMapping<PointInT>::isFaceProjected (const Camera &camera, const PointInT &p1, const PointInT &p2, const PointInT &p3, pcl::PointXY &proj1, pcl::PointXY &proj2, pcl::PointXY &proj3)
1109 {
1110  return (getPointUVCoordinates(p1, camera, proj1)
1111  &&
1112  getPointUVCoordinates(p2, camera, proj2)
1113  &&
1114  getPointUVCoordinates(p3, camera, proj3)
1115  );
1116 }
1117 
1118 #define PCL_INSTANTIATE_TextureMapping(T) \
1119  template class PCL_EXPORTS pcl::TextureMapping<T>;
1120 
1121 #endif /* TEXTURE_MAPPING_HPP_ */
1122 
int radiusSearch(const PointT &point, double radius, std::vector< int > &k_indices, std::vector< float > &k_sqr_distances, unsigned int max_nn=0) const
Search for all the nearest neighbors of the query point in a given radius.
void mapMultipleTexturesToMeshUV(pcl::TextureMesh &tex_mesh, pcl::texture_mapping::CameraVector &cams)
Map textures acquired from a set of cameras onto a mesh.
void getTriangleCircumcscribedCircleCentroid(const pcl::PointXY &p1, const pcl::PointXY &p2, const pcl::PointXY &p3, pcl::PointXY &circumcenter, double &radius)
Returns the centroid of a triangle and the corresponding circumscribed circle&#39;s radius.
void fromPCLPointCloud2(const pcl::PCLPointCloud2 &msg, pcl::PointCloud< PointT > &cloud, const MsgFieldMap &field_map)
Convert a PCLPointCloud2 binary data blob into a pcl::PointCloud<T> object using a field_map...
Definition: conversions.h:168
KdTreeFLANN is a generic type of 3D spatial locator using kD-tree structures.
Definition: kdtree_flann.h:68
std::vector< std::vector< Eigen::Vector2f, Eigen::aligned_allocator< Eigen::Vector2f > > > tex_coordinates
Definition: TextureMesh.h:100
void addPointsFromInputCloud()
Add points from input point cloud to octree.
std::vector< PointT, Eigen::aligned_allocator< PointT > > points
The point data.
Definition: point_cloud.h:409
void setInputCloud(const PointCloudConstPtr &cloud, const IndicesConstPtr &indices=IndicesConstPtr())
Provide a pointer to the input dataset.
std::vector< Camera, Eigen::aligned_allocator< Camera > > CameraVector
void mapTexture2MeshUV(pcl::TextureMesh &tex_mesh)
Map texture to a mesh UV mapping.
PointCloud::ConstPtr PointCloudConstPtr
std::vector< pcl::TexMaterial > tex_materials
Definition: TextureMesh.h:101
void setInputCloud(const PointCloudConstPtr &cloud_arg, const IndicesConstPtr &indices_arg=IndicesConstPtr())
Provide a pointer to the input data set.
bool getPointUVCoordinates(const PointInT &pt, const Camera &cam, Eigen::Vector2f &UV_coordinates)
computes UV coordinates of point, observed by one particular camera
void textureMeshwithMultipleCameras(pcl::TextureMesh &mesh, const pcl::texture_mapping::CameraVector &cameras)
Segment and texture faces by camera visibility.
void removeOccludedPoints(const PointCloudPtr &input_cloud, PointCloudPtr &filtered_cloud, const double octree_voxel_size, std::vector< int > &visible_indices, std::vector< int > &occluded_indices)
Remove occluded points from a point cloud.
A 2D point structure representing Euclidean xy coordinates.
float euclideanDistance(const PointType1 &p1, const PointType2 &p2)
Calculate the euclidean distance between the two given points.
Definition: distances.h:196
boost::shared_ptr< PointCloud< PointT > > Ptr
Definition: point_cloud.h:427
void mapTexture2Mesh(pcl::TextureMesh &tex_mesh)
Map texture to a mesh synthesis algorithm.
void defineBoundingBox()
Investigate dimensions of pointcloud data set and define corresponding bounding box for octree...
bool isFaceProjected(const Camera &camera, const PointInT &p1, const PointInT &p2, const PointInT &p3, pcl::PointXY &proj1, pcl::PointXY &proj2, pcl::PointXY &proj3)
Returns true if all the vertices of one face are projected on the camera&#39;s image plane.
void transformPointCloud(const pcl::PointCloud< PointT > &cloud_in, pcl::PointCloud< PointT > &cloud_out, const Eigen::Transform< Scalar, 3, Eigen::Affine > &transform, bool copy_all_fields=true)
Apply an affine transform defined by an Eigen Transform.
Definition: transforms.hpp:215
pcl::uint32_t width
bool isPointOccluded(const PointInT &pt, const OctreePtr octree)
Check if a point is occluded using raycasting on octree.
Structure to store camera pose and focal length.
Structure that links a uv coordinate to its 3D point and face.
int getIntersectedVoxelIndices(Eigen::Vector3f origin, Eigen::Vector3f direction, std::vector< int > &k_indices, int max_voxel_count=0) const
Get indices of all voxels that are intersected by a ray (origin, direction).
std::vector< Eigen::Vector2f, Eigen::aligned_allocator< Eigen::Vector2f > > mapTexture2Face(const Eigen::Vector3f &p1, const Eigen::Vector3f &p2, const Eigen::Vector3f &p3)
Map texture to a face.
std::vector< ::pcl::PCLPointField > fields
pcl::uint32_t height
Octree pointcloud search class
Definition: octree_search.h:56
pcl::PCLPointCloud2 cloud
Definition: TextureMesh.h:95
void showOcclusions(const PointCloudPtr &input_cloud, pcl::PointCloud< pcl::PointXYZI >::Ptr &colored_cloud, const double octree_voxel_size, const bool show_nb_occlusions=true, const int max_occlusions=4)
Colors a point cloud, depending on its occlusions.
PointCloud::Ptr PointCloudPtr
void getTriangleCircumcenterAndSize(const pcl::PointXY &p1, const pcl::PointXY &p2, const pcl::PointXY &p3, pcl::PointXY &circumcenter, double &radius)
Returns the circumcenter of a triangle and the circle&#39;s radius.
int sortFacesByCamera(pcl::TextureMesh &tex_mesh, pcl::TextureMesh &sorted_mesh, const pcl::texture_mapping::CameraVector &cameras, const double octree_voxel_size, PointCloud &visible_pts)
Segment faces by camera visibility.
bool checkPointInsideTriangle(const pcl::PointXY &p1, const pcl::PointXY &p2, const pcl::PointXY &p3, const pcl::PointXY &pt)
Returns True if a point lays within a triangle.
std::vector< pcl::uint8_t > data
std::vector< std::vector< pcl::Vertices > > tex_polygons
Definition: TextureMesh.h:99