Point Cloud Library (PCL)  1.9.1-dev
utils.hpp
1 /*
2  * Software License Agreement (BSD License)
3  *
4  * Point Cloud Library (PCL) - www.pointclouds.org
5  * Copyright (c) 2011, Willow Garage, Inc.
6  *
7  * All rights reserved.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  *
13  * * Redistributions of source code must retain the above copyright
14  * notice, this list of conditions and the following disclaimer.
15  * * Redistributions in binary form must reproduce the above
16  * copyright notice, this list of conditions and the following
17  * disclaimer in the documentation and/or other materials provided
18  * with the distribution.
19  * * Neither the name of Willow Garage, Inc. nor the names of its
20  * contributors may be used to endorse or promote products derived
21  * from this software without specific prior written permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
24  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
25  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
26  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
27  * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
28  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
29  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
30  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
31  * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
33  * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34  * POSSIBILITY OF SUCH DAMAGE.
35  *
36  */
37 
38 
39 #ifndef PCL_GPU_KINFU_CUDA_UTILS_HPP_
40 #define PCL_GPU_KINFU_CUDA_UTILS_HPP_
41 
42 #include <limits>
43 
44 #include <cuda.h>
45 
46 namespace pcl
47 {
48  namespace device
49  {
50  template <class T>
51  __device__ __host__ __forceinline__ void swap ( T& a, T& b )
52  {
53  T c(a); a=b; b=c;
54  }
55 
56  __device__ __forceinline__ float
57  dot(const float3& v1, const float3& v2)
58  {
59  return v1.x * v2.x + v1.y*v2.y + v1.z*v2.z;
60  }
61 
62  __device__ __forceinline__ float3&
63  operator+=(float3& vec, const float& v)
64  {
65  vec.x += v; vec.y += v; vec.z += v; return vec;
66  }
67 
68  __device__ __forceinline__ float3
69  operator+(const float3& v1, const float3& v2)
70  {
71  return make_float3(v1.x + v2.x, v1.y + v2.y, v1.z + v2.z);
72  }
73 
74  __device__ __forceinline__ float3&
75  operator*=(float3& vec, const float& v)
76  {
77  vec.x *= v; vec.y *= v; vec.z *= v; return vec;
78  }
79 
80  __device__ __forceinline__ float3
81  operator-(const float3& v1, const float3& v2)
82  {
83  return make_float3(v1.x - v2.x, v1.y - v2.y, v1.z - v2.z);
84  }
85 
86  __device__ __forceinline__ float3
87  operator*(const float3& v1, const float& v)
88  {
89  return make_float3(v1.x * v, v1.y * v, v1.z * v);
90  }
91 
92  __device__ __forceinline__ float
93  norm(const float3& v)
94  {
95  return sqrt(dot(v, v));
96  }
97 
98  __device__ __forceinline__ float3
99  normalized(const float3& v)
100  {
101  return v * rsqrt(dot(v, v));
102  }
103 
104  __device__ __host__ __forceinline__ float3
105  cross(const float3& v1, const float3& v2)
106  {
107  return make_float3(v1.y * v2.z - v1.z * v2.y, v1.z * v2.x - v1.x * v2.z, v1.x * v2.y - v1.y * v2.x);
108  }
109 
110  __device__ __forceinline__ void computeRoots2(const float& b, const float& c, float3& roots)
111  {
112  roots.x = 0.f;
113  float d = b * b - 4.f * c;
114  if (d < 0.f) // no real roots!!!! THIS SHOULD NOT HAPPEN!
115  d = 0.f;
116 
117  float sd = sqrtf(d);
118 
119  roots.z = 0.5f * (b + sd);
120  roots.y = 0.5f * (b - sd);
121  }
122 
123  __device__ __forceinline__ void
124  computeRoots3(float c0, float c1, float c2, float3& roots)
125  {
126  if ( std::abs(c0) < std::numeric_limits<float>::epsilon())// one root is 0 -> quadratic equation
127  {
128  computeRoots2 (c2, c1, roots);
129  }
130  else
131  {
132  const float s_inv3 = 1.f/3.f;
133  const float s_sqrt3 = sqrtf(3.f);
134  // Construct the parameters used in classifying the roots of the equation
135  // and in solving the equation for the roots in closed form.
136  float c2_over_3 = c2 * s_inv3;
137  float a_over_3 = (c1 - c2*c2_over_3)*s_inv3;
138  if (a_over_3 > 0.f)
139  a_over_3 = 0.f;
140 
141  float half_b = 0.5f * (c0 + c2_over_3 * (2.f * c2_over_3 * c2_over_3 - c1));
142 
143  float q = half_b * half_b + a_over_3 * a_over_3 * a_over_3;
144  if (q > 0.f)
145  q = 0.f;
146 
147  // Compute the eigenvalues by solving for the roots of the polynomial.
148  float rho = sqrtf(-a_over_3);
149  float theta = std::atan2 (sqrtf (-q), half_b)*s_inv3;
150  float cos_theta = __cosf (theta);
151  float sin_theta = __sinf (theta);
152  roots.x = c2_over_3 + 2.f * rho * cos_theta;
153  roots.y = c2_over_3 - rho * (cos_theta + s_sqrt3 * sin_theta);
154  roots.z = c2_over_3 - rho * (cos_theta - s_sqrt3 * sin_theta);
155 
156  // Sort in increasing order.
157  if (roots.x >= roots.y)
158  swap(roots.x, roots.y);
159 
160  if (roots.y >= roots.z)
161  {
162  swap(roots.y, roots.z);
163 
164  if (roots.x >= roots.y)
165  swap (roots.x, roots.y);
166  }
167  if (roots.x <= 0) // eigenval for symmetric positive semi-definite matrix can not be negative! Set it to 0
168  computeRoots2 (c2, c1, roots);
169  }
170  }
171 
172  struct Eigen33
173  {
174  public:
175  template<int Rows>
176  struct MiniMat
177  {
178  float3 data[Rows];
179  __device__ __host__ __forceinline__ float3& operator[](int i) { return data[i]; }
180  __device__ __host__ __forceinline__ const float3& operator[](int i) const { return data[i]; }
181  };
182  using Mat33 = MiniMat<3>;
183  using Mat43 = MiniMat<4>;
184 
185 
186  static __forceinline__ __device__ float3
187  unitOrthogonal (const float3& src)
188  {
189  float3 perp;
190  /* Let us compute the crossed product of *this with a vector
191  * that is not too close to being colinear to *this.
192  */
193 
194  /* unless the x and y coords are both close to zero, we can
195  * simply take ( -y, x, 0 ) and normalize it.
196  */
197  if(!isMuchSmallerThan(src.x, src.z) || !isMuchSmallerThan(src.y, src.z))
198  {
199  float invnm = rsqrtf(src.x*src.x + src.y*src.y);
200  perp.x = -src.y * invnm;
201  perp.y = src.x * invnm;
202  perp.z = 0.0f;
203  }
204  /* if both x and y are close to zero, then the vector is close
205  * to the z-axis, so it's far from colinear to the x-axis for instance.
206  * So we take the crossed product with (1,0,0) and normalize it.
207  */
208  else
209  {
210  float invnm = rsqrtf(src.z * src.z + src.y * src.y);
211  perp.x = 0.0f;
212  perp.y = -src.z * invnm;
213  perp.z = src.y * invnm;
214  }
215 
216  return perp;
217  }
218 
219  __device__ __forceinline__
220  Eigen33(volatile float* mat_pkg_arg) : mat_pkg(mat_pkg_arg) {}
221  __device__ __forceinline__ void
222  compute(Mat33& tmp, Mat33& vec_tmp, Mat33& evecs, float3& evals)
223  {
224  // Scale the matrix so its entries are in [-1,1]. The scaling is applied
225  // only when at least one matrix entry has magnitude larger than 1.
226 
227  float max01 = fmaxf( std::abs(mat_pkg[0]), std::abs(mat_pkg[1]) );
228  float max23 = fmaxf( std::abs(mat_pkg[2]), std::abs(mat_pkg[3]) );
229  float max45 = fmaxf( std::abs(mat_pkg[4]), std::abs(mat_pkg[5]) );
230  float m0123 = fmaxf( max01, max23);
231  float scale = fmaxf( max45, m0123);
232 
233  if (scale <= std::numeric_limits<float>::min())
234  scale = 1.f;
235 
236  mat_pkg[0] /= scale;
237  mat_pkg[1] /= scale;
238  mat_pkg[2] /= scale;
239  mat_pkg[3] /= scale;
240  mat_pkg[4] /= scale;
241  mat_pkg[5] /= scale;
242 
243  // The characteristic equation is x^3 - c2*x^2 + c1*x - c0 = 0. The
244  // eigenvalues are the roots to this equation, all guaranteed to be
245  // real-valued, because the matrix is symmetric.
246  float c0 = m00() * m11() * m22()
247  + 2.f * m01() * m02() * m12()
248  - m00() * m12() * m12()
249  - m11() * m02() * m02()
250  - m22() * m01() * m01();
251  float c1 = m00() * m11() -
252  m01() * m01() +
253  m00() * m22() -
254  m02() * m02() +
255  m11() * m22() -
256  m12() * m12();
257  float c2 = m00() + m11() + m22();
258 
259  computeRoots3(c0, c1, c2, evals);
260 
261  if(evals.z - evals.x <= std::numeric_limits<float>::epsilon())
262  {
263  evecs[0] = make_float3(1.f, 0.f, 0.f);
264  evecs[1] = make_float3(0.f, 1.f, 0.f);
265  evecs[2] = make_float3(0.f, 0.f, 1.f);
266  }
267  else if (evals.y - evals.x <= std::numeric_limits<float>::epsilon() )
268  {
269  // first and second equal
270  tmp[0] = row0(); tmp[1] = row1(); tmp[2] = row2();
271  tmp[0].x -= evals.z; tmp[1].y -= evals.z; tmp[2].z -= evals.z;
272 
273  vec_tmp[0] = cross(tmp[0], tmp[1]);
274  vec_tmp[1] = cross(tmp[0], tmp[2]);
275  vec_tmp[2] = cross(tmp[1], tmp[2]);
276 
277  float len1 = dot (vec_tmp[0], vec_tmp[0]);
278  float len2 = dot (vec_tmp[1], vec_tmp[1]);
279  float len3 = dot (vec_tmp[2], vec_tmp[2]);
280 
281  if (len1 >= len2 && len1 >= len3)
282  {
283  evecs[2] = vec_tmp[0] * rsqrtf (len1);
284  }
285  else if (len2 >= len1 && len2 >= len3)
286  {
287  evecs[2] = vec_tmp[1] * rsqrtf (len2);
288  }
289  else
290  {
291  evecs[2] = vec_tmp[2] * rsqrtf (len3);
292  }
293 
294  evecs[1] = unitOrthogonal(evecs[2]);
295  evecs[0] = cross(evecs[1], evecs[2]);
296  }
297  else if (evals.z - evals.y <= std::numeric_limits<float>::epsilon() )
298  {
299  // second and third equal
300  tmp[0] = row0(); tmp[1] = row1(); tmp[2] = row2();
301  tmp[0].x -= evals.x; tmp[1].y -= evals.x; tmp[2].z -= evals.x;
302 
303  vec_tmp[0] = cross(tmp[0], tmp[1]);
304  vec_tmp[1] = cross(tmp[0], tmp[2]);
305  vec_tmp[2] = cross(tmp[1], tmp[2]);
306 
307  float len1 = dot(vec_tmp[0], vec_tmp[0]);
308  float len2 = dot(vec_tmp[1], vec_tmp[1]);
309  float len3 = dot(vec_tmp[2], vec_tmp[2]);
310 
311  if (len1 >= len2 && len1 >= len3)
312  {
313  evecs[0] = vec_tmp[0] * rsqrtf(len1);
314  }
315  else if (len2 >= len1 && len2 >= len3)
316  {
317  evecs[0] = vec_tmp[1] * rsqrtf(len2);
318  }
319  else
320  {
321  evecs[0] = vec_tmp[2] * rsqrtf(len3);
322  }
323 
324  evecs[1] = unitOrthogonal( evecs[0] );
325  evecs[2] = cross(evecs[0], evecs[1]);
326  }
327  else
328  {
329 
330  tmp[0] = row0(); tmp[1] = row1(); tmp[2] = row2();
331  tmp[0].x -= evals.z; tmp[1].y -= evals.z; tmp[2].z -= evals.z;
332 
333  vec_tmp[0] = cross(tmp[0], tmp[1]);
334  vec_tmp[1] = cross(tmp[0], tmp[2]);
335  vec_tmp[2] = cross(tmp[1], tmp[2]);
336 
337  float len1 = dot(vec_tmp[0], vec_tmp[0]);
338  float len2 = dot(vec_tmp[1], vec_tmp[1]);
339  float len3 = dot(vec_tmp[2], vec_tmp[2]);
340 
341  float mmax[3];
342 
343  unsigned int min_el = 2;
344  unsigned int max_el = 2;
345  if (len1 >= len2 && len1 >= len3)
346  {
347  mmax[2] = len1;
348  evecs[2] = vec_tmp[0] * rsqrtf (len1);
349  }
350  else if (len2 >= len1 && len2 >= len3)
351  {
352  mmax[2] = len2;
353  evecs[2] = vec_tmp[1] * rsqrtf (len2);
354  }
355  else
356  {
357  mmax[2] = len3;
358  evecs[2] = vec_tmp[2] * rsqrtf (len3);
359  }
360 
361  tmp[0] = row0(); tmp[1] = row1(); tmp[2] = row2();
362  tmp[0].x -= evals.y; tmp[1].y -= evals.y; tmp[2].z -= evals.y;
363 
364  vec_tmp[0] = cross(tmp[0], tmp[1]);
365  vec_tmp[1] = cross(tmp[0], tmp[2]);
366  vec_tmp[2] = cross(tmp[1], tmp[2]);
367 
368  len1 = dot(vec_tmp[0], vec_tmp[0]);
369  len2 = dot(vec_tmp[1], vec_tmp[1]);
370  len3 = dot(vec_tmp[2], vec_tmp[2]);
371 
372  if (len1 >= len2 && len1 >= len3)
373  {
374  mmax[1] = len1;
375  evecs[1] = vec_tmp[0] * rsqrtf (len1);
376  min_el = len1 <= mmax[min_el] ? 1 : min_el;
377  max_el = len1 > mmax[max_el] ? 1 : max_el;
378  }
379  else if (len2 >= len1 && len2 >= len3)
380  {
381  mmax[1] = len2;
382  evecs[1] = vec_tmp[1] * rsqrtf (len2);
383  min_el = len2 <= mmax[min_el] ? 1 : min_el;
384  max_el = len2 > mmax[max_el] ? 1 : max_el;
385  }
386  else
387  {
388  mmax[1] = len3;
389  evecs[1] = vec_tmp[2] * rsqrtf (len3);
390  min_el = len3 <= mmax[min_el] ? 1 : min_el;
391  max_el = len3 > mmax[max_el] ? 1 : max_el;
392  }
393 
394  tmp[0] = row0(); tmp[1] = row1(); tmp[2] = row2();
395  tmp[0].x -= evals.x; tmp[1].y -= evals.x; tmp[2].z -= evals.x;
396 
397  vec_tmp[0] = cross(tmp[0], tmp[1]);
398  vec_tmp[1] = cross(tmp[0], tmp[2]);
399  vec_tmp[2] = cross(tmp[1], tmp[2]);
400 
401  len1 = dot (vec_tmp[0], vec_tmp[0]);
402  len2 = dot (vec_tmp[1], vec_tmp[1]);
403  len3 = dot (vec_tmp[2], vec_tmp[2]);
404 
405 
406  if (len1 >= len2 && len1 >= len3)
407  {
408  mmax[0] = len1;
409  evecs[0] = vec_tmp[0] * rsqrtf (len1);
410  min_el = len3 <= mmax[min_el] ? 0 : min_el;
411  max_el = len3 > mmax[max_el] ? 0 : max_el;
412  }
413  else if (len2 >= len1 && len2 >= len3)
414  {
415  mmax[0] = len2;
416  evecs[0] = vec_tmp[1] * rsqrtf (len2);
417  min_el = len3 <= mmax[min_el] ? 0 : min_el;
418  max_el = len3 > mmax[max_el] ? 0 : max_el;
419  }
420  else
421  {
422  mmax[0] = len3;
423  evecs[0] = vec_tmp[2] * rsqrtf (len3);
424  min_el = len3 <= mmax[min_el] ? 0 : min_el;
425  max_el = len3 > mmax[max_el] ? 0 : max_el;
426  }
427 
428  unsigned mid_el = 3 - min_el - max_el;
429  evecs[min_el] = normalized( cross( evecs[(min_el+1) % 3], evecs[(min_el+2) % 3] ) );
430  evecs[mid_el] = normalized( cross( evecs[(mid_el+1) % 3], evecs[(mid_el+2) % 3] ) );
431  }
432  // Rescale back to the original size.
433  evals *= scale;
434  }
435  private:
436  volatile float* mat_pkg;
437 
438  __device__ __forceinline__ float m00() const { return mat_pkg[0]; }
439  __device__ __forceinline__ float m01() const { return mat_pkg[1]; }
440  __device__ __forceinline__ float m02() const { return mat_pkg[2]; }
441  __device__ __forceinline__ float m10() const { return mat_pkg[1]; }
442  __device__ __forceinline__ float m11() const { return mat_pkg[3]; }
443  __device__ __forceinline__ float m12() const { return mat_pkg[4]; }
444  __device__ __forceinline__ float m20() const { return mat_pkg[2]; }
445  __device__ __forceinline__ float m21() const { return mat_pkg[4]; }
446  __device__ __forceinline__ float m22() const { return mat_pkg[5]; }
447 
448  __device__ __forceinline__ float3 row0() const { return make_float3( m00(), m01(), m02() ); }
449  __device__ __forceinline__ float3 row1() const { return make_float3( m10(), m11(), m12() ); }
450  __device__ __forceinline__ float3 row2() const { return make_float3( m20(), m21(), m22() ); }
451 
452  __device__ __forceinline__ static bool isMuchSmallerThan (float x, float y)
453  {
454  // copied from <eigen>/include/Eigen/src/Core/NumTraits.h
455  const float prec_sqr = std::numeric_limits<float>::epsilon() * std::numeric_limits<float>::epsilon();
456  return x * x <= prec_sqr * y * y;
457  }
458  };
459 
460  struct Block
461  {
462  static __device__ __forceinline__ unsigned int stride()
463  {
464  return blockDim.x * blockDim.y * blockDim.z;
465  }
466 
467  static __device__ __forceinline__ int
469  {
470  return threadIdx.z * blockDim.x * blockDim.y + threadIdx.y * blockDim.x + threadIdx.x;
471  }
472 
473  template<int CTA_SIZE, typename T, class BinOp>
474  static __device__ __forceinline__ void reduce(volatile T* buffer, BinOp op)
475  {
476  int tid = flattenedThreadId();
477  T val = buffer[tid];
478 
479  if (CTA_SIZE >= 1024) { if (tid < 512) buffer[tid] = val = op(val, buffer[tid + 512]); __syncthreads(); }
480  if (CTA_SIZE >= 512) { if (tid < 256) buffer[tid] = val = op(val, buffer[tid + 256]); __syncthreads(); }
481  if (CTA_SIZE >= 256) { if (tid < 128) buffer[tid] = val = op(val, buffer[tid + 128]); __syncthreads(); }
482  if (CTA_SIZE >= 128) { if (tid < 64) buffer[tid] = val = op(val, buffer[tid + 64]); __syncthreads(); }
483 
484  if (tid < 32)
485  {
486  if (CTA_SIZE >= 64) { buffer[tid] = val = op(val, buffer[tid + 32]); }
487  if (CTA_SIZE >= 32) { buffer[tid] = val = op(val, buffer[tid + 16]); }
488  if (CTA_SIZE >= 16) { buffer[tid] = val = op(val, buffer[tid + 8]); }
489  if (CTA_SIZE >= 8) { buffer[tid] = val = op(val, buffer[tid + 4]); }
490  if (CTA_SIZE >= 4) { buffer[tid] = val = op(val, buffer[tid + 2]); }
491  if (CTA_SIZE >= 2) { buffer[tid] = val = op(val, buffer[tid + 1]); }
492  }
493  }
494 
495  template<int CTA_SIZE, typename T, class BinOp>
496  static __device__ __forceinline__ T reduce(volatile T* buffer, T init, BinOp op)
497  {
498  int tid = flattenedThreadId();
499  T val = buffer[tid] = init;
500  __syncthreads();
501 
502  if (CTA_SIZE >= 1024) { if (tid < 512) buffer[tid] = val = op(val, buffer[tid + 512]); __syncthreads(); }
503  if (CTA_SIZE >= 512) { if (tid < 256) buffer[tid] = val = op(val, buffer[tid + 256]); __syncthreads(); }
504  if (CTA_SIZE >= 256) { if (tid < 128) buffer[tid] = val = op(val, buffer[tid + 128]); __syncthreads(); }
505  if (CTA_SIZE >= 128) { if (tid < 64) buffer[tid] = val = op(val, buffer[tid + 64]); __syncthreads(); }
506 
507  if (tid < 32)
508  {
509  if (CTA_SIZE >= 64) { buffer[tid] = val = op(val, buffer[tid + 32]); }
510  if (CTA_SIZE >= 32) { buffer[tid] = val = op(val, buffer[tid + 16]); }
511  if (CTA_SIZE >= 16) { buffer[tid] = val = op(val, buffer[tid + 8]); }
512  if (CTA_SIZE >= 8) { buffer[tid] = val = op(val, buffer[tid + 4]); }
513  if (CTA_SIZE >= 4) { buffer[tid] = val = op(val, buffer[tid + 2]); }
514  if (CTA_SIZE >= 2) { buffer[tid] = val = op(val, buffer[tid + 1]); }
515  }
516  __syncthreads();
517  return buffer[0];
518  }
519  };
520 
521  struct Warp
522  {
523  enum
524  {
525  LOG_WARP_SIZE = 5,
526  WARP_SIZE = 1 << LOG_WARP_SIZE,
527  STRIDE = WARP_SIZE
528  };
529 
530  /** \brief Returns the warp lane ID of the calling thread. */
531  static __device__ __forceinline__ unsigned int
533  {
534  unsigned int ret;
535  asm("mov.u32 %0, %laneid;" : "=r"(ret) );
536  return ret;
537  }
538 
539  static __device__ __forceinline__ unsigned int id()
540  {
541  int tid = threadIdx.z * blockDim.x * blockDim.y + threadIdx.y * blockDim.x + threadIdx.x;
542  return tid >> LOG_WARP_SIZE;
543  }
544 
545  static __device__ __forceinline__
547  {
548  unsigned int ret;
549  asm("mov.u32 %0, %lanemask_lt;" : "=r"(ret) );
550  return ret;
551  }
552 
553  static __device__ __forceinline__ int binaryExclScan(int ballot_mask)
554  {
555  return __popc(Warp::laneMaskLt() & ballot_mask);
556  }
557  };
558 
559 
560  struct Emulation
561  {
562  static __device__ __forceinline__ int
563  warp_reduce ( volatile int *ptr , const unsigned int tid)
564  {
565  const unsigned int lane = tid & 31; // index of thread in warp (0..31)
566 
567  if (lane < 16)
568  {
569  int partial = ptr[tid];
570 
571  ptr[tid] = partial = partial + ptr[tid + 16];
572  ptr[tid] = partial = partial + ptr[tid + 8];
573  ptr[tid] = partial = partial + ptr[tid + 4];
574  ptr[tid] = partial = partial + ptr[tid + 2];
575  ptr[tid] = partial = partial + ptr[tid + 1];
576  }
577  return ptr[tid - lane];
578  }
579 
580  static __forceinline__ __device__ int
581  Ballot(int predicate, volatile int* cta_buffer)
582  {
583 #if CUDA_VERSION >= 9000
584  (void)cta_buffer;
585  return __ballot_sync (__activemask (), predicate);
586 #else
587  (void)cta_buffer;
588  return __ballot(predicate);
589 #endif
590  }
591 
592  static __forceinline__ __device__ bool
593  All(int predicate, volatile int* cta_buffer)
594  {
595 #if CUDA_VERSION >= 9000
596  (void)cta_buffer;
597  return __all_sync (__activemask (), predicate);
598 #else
599  (void)cta_buffer;
600  return __all(predicate);
601 #endif
602  }
603  };
604  }
605 }
606 
607 #endif /* PCL_GPU_KINFU_CUDA_UTILS_HPP_ */
__device__ __host__ __forceinline__ void swap(T &a, T &b)
Definition: utils.hpp:51
static __device__ __forceinline__ unsigned int stride()
Definition: utils.hpp:462
static __device__ __forceinline__ void reduce(volatile T *buffer, BinOp op)
Definition: utils.hpp:474
__device__ __forceinline__ float3 & operator+=(float3 &vec, const float &v)
Definition: utils.hpp:63
This file defines compatibility wrappers for low level I/O functions.
Definition: convolution.h:45
static __device__ __forceinline__ T reduce(volatile T *buffer, T init, BinOp op)
Definition: utils.hpp:496
__device__ __forceinline__ float3 operator-(const float3 &v1, const float3 &v2)
Definition: utils.hpp:81
__device__ __forceinline__ float3 normalized(const float3 &v)
Definition: utils.hpp:99
__device__ __host__ __forceinline__ const float3 & operator[](int i) const
Definition: utils.hpp:180
__device__ __forceinline__ float3 operator*(const Mat33 &m, const float3 &vec)
Definition: device.hpp:74
__device__ __host__ __forceinline__ float3 cross(const float3 &v1, const float3 &v2)
Definition: utils.hpp:105
static __device__ __forceinline__ int binaryExclScan(int ballot_mask)
Definition: utils.hpp:553
static __device__ __forceinline__ unsigned int id()
Definition: utils.hpp:539
__device__ __forceinline__ void compute(Mat33 &tmp, Mat33 &vec_tmp, Mat33 &evecs, float3 &evals)
Definition: utils.hpp:222
static __device__ __forceinline__ int laneMaskLt()
Definition: utils.hpp:546
__device__ __forceinline__ float dot(const float3 &v1, const float3 &v2)
Definition: utils.hpp:57
static __forceinline__ __device__ bool All(int predicate, volatile int *cta_buffer)
Definition: utils.hpp:593
static __forceinline__ __device__ int Ballot(int predicate, volatile int *cta_buffer)
Definition: utils.hpp:581
static __forceinline__ __device__ float3 unitOrthogonal(const float3 &src)
Definition: utils.hpp:187
__device__ __forceinline__ float3 & operator*=(float3 &vec, const float &v)
Definition: utils.hpp:75
__device__ __forceinline__ void computeRoots2(const float &b, const float &c, float3 &roots)
Definition: eigen.hpp:102
__device__ __forceinline__ float3 operator+(const float3 &v1, const float3 &v2)
Definition: utils.hpp:69
static __device__ __forceinline__ int flattenedThreadId()
Definition: utils.hpp:468
static __device__ __forceinline__ unsigned int laneId()
Returns the warp lane ID of the calling thread.
Definition: utils.hpp:532
__device__ __host__ __forceinline__ float norm(const float3 &v1, const float3 &v2)
__device__ __forceinline__ Eigen33(volatile float *mat_pkg_arg)
Definition: utils.hpp:220
__device__ __host__ __forceinline__ float3 & operator[](int i)
Definition: utils.hpp:179
__device__ __forceinline__ void computeRoots3(float c0, float c1, float c2, float3 &roots)
Definition: eigen.hpp:115
static __device__ __forceinline__ int warp_reduce(volatile int *ptr, const unsigned int tid)
Definition: utils.hpp:563