Caffe
math_functions.hpp
1 #ifndef CAFFE_UTIL_MATH_FUNCTIONS_H_
2 #define CAFFE_UTIL_MATH_FUNCTIONS_H_
3 
4 #include <stdint.h>
5 #include <cmath> // for std::fabs and std::signbit
6 
7 #include "glog/logging.h"
8 
9 #include "caffe/common.hpp"
10 #include "caffe/util/device_alternate.hpp"
11 #include "caffe/util/mkl_alternate.hpp"
12 
13 namespace caffe {
14 
15 // Caffe gemm provides a simpler interface to the gemm functions, with the
16 // limitation that the data has to be contiguous in memory.
17 template <typename Dtype>
18 void caffe_cpu_gemm(const CBLAS_TRANSPOSE TransA,
19  const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K,
20  const Dtype alpha, const Dtype* A, const Dtype* B, const Dtype beta,
21  Dtype* C);
22 
23 template <typename Dtype>
24 void caffe_cpu_gemv(const CBLAS_TRANSPOSE TransA, const int M, const int N,
25  const Dtype alpha, const Dtype* A, const Dtype* x, const Dtype beta,
26  Dtype* y);
27 
28 template <typename Dtype>
29 void caffe_axpy(const int N, const Dtype alpha, const Dtype* X,
30  Dtype* Y);
31 
32 template <typename Dtype>
33 void caffe_cpu_axpby(const int N, const Dtype alpha, const Dtype* X,
34  const Dtype beta, Dtype* Y);
35 
36 template <typename Dtype>
37 void caffe_copy(const int N, const Dtype *X, Dtype *Y);
38 
39 template <typename Dtype>
40 void caffe_set(const int N, const Dtype alpha, Dtype *X);
41 
42 inline void caffe_memset(const size_t N, const int alpha, void* X) {
43  memset(X, alpha, N); // NOLINT(caffe/alt_fn)
44 }
45 
46 template <typename Dtype>
47 void caffe_add_scalar(const int N, const Dtype alpha, Dtype *X);
48 
49 template <typename Dtype>
50 void caffe_scal(const int N, const Dtype alpha, Dtype *X);
51 
52 template <typename Dtype>
53 void caffe_sqr(const int N, const Dtype* a, Dtype* y);
54 
55 template <typename Dtype>
56 void caffe_add(const int N, const Dtype* a, const Dtype* b, Dtype* y);
57 
58 template <typename Dtype>
59 void caffe_sub(const int N, const Dtype* a, const Dtype* b, Dtype* y);
60 
61 template <typename Dtype>
62 void caffe_mul(const int N, const Dtype* a, const Dtype* b, Dtype* y);
63 
64 template <typename Dtype>
65 void caffe_div(const int N, const Dtype* a, const Dtype* b, Dtype* y);
66 
67 template <typename Dtype>
68 void caffe_powx(const int n, const Dtype* a, const Dtype b, Dtype* y);
69 
70 unsigned int caffe_rng_rand();
71 
72 template <typename Dtype>
73 Dtype caffe_nextafter(const Dtype b);
74 
75 template <typename Dtype>
76 void caffe_rng_uniform(const int n, const Dtype a, const Dtype b, Dtype* r);
77 
78 template <typename Dtype>
79 void caffe_rng_gaussian(const int n, const Dtype mu, const Dtype sigma,
80  Dtype* r);
81 
82 template <typename Dtype>
83 void caffe_rng_bernoulli(const int n, const Dtype p, int* r);
84 
85 template <typename Dtype>
86 void caffe_rng_bernoulli(const int n, const Dtype p, unsigned int* r);
87 
88 template <typename Dtype>
89 void caffe_exp(const int n, const Dtype* a, Dtype* y);
90 
91 template <typename Dtype>
92 void caffe_log(const int n, const Dtype* a, Dtype* y);
93 
94 template <typename Dtype>
95 void caffe_abs(const int n, const Dtype* a, Dtype* y);
96 
97 template <typename Dtype>
98 Dtype caffe_cpu_dot(const int n, const Dtype* x, const Dtype* y);
99 
100 template <typename Dtype>
101 Dtype caffe_cpu_strided_dot(const int n, const Dtype* x, const int incx,
102  const Dtype* y, const int incy);
103 
104 // Returns the sum of the absolute values of the elements of vector x
105 template <typename Dtype>
106 Dtype caffe_cpu_asum(const int n, const Dtype* x);
107 
108 // the branchless, type-safe version from
109 // http://stackoverflow.com/questions/1903954/is-there-a-standard-sign-function-signum-sgn-in-c-c
110 template<typename Dtype>
111 inline int8_t caffe_sign(Dtype val) {
112  return (Dtype(0) < val) - (val < Dtype(0));
113 }
114 
115 // The following two macros are modifications of DEFINE_VSL_UNARY_FUNC
116 // in include/caffe/util/mkl_alternate.hpp authored by @Rowland Depp.
117 // Please refer to commit 7e8ef25c7 of the boost-eigen branch.
118 // Git cherry picking that commit caused a conflict hard to resolve and
119 // copying that file in convenient for code reviewing.
120 // So they have to be pasted here temporarily.
121 #define DEFINE_CAFFE_CPU_UNARY_FUNC(name, operation) \
122  template<typename Dtype> \
123  void caffe_cpu_##name(const int n, const Dtype* x, Dtype* y) { \
124  CHECK_GT(n, 0); CHECK(x); CHECK(y); \
125  for (int i = 0; i < n; ++i) { \
126  operation; \
127  } \
128  }
129 
130 // output is 1 for the positives, 0 for zero, and -1 for the negatives
131 DEFINE_CAFFE_CPU_UNARY_FUNC(sign, y[i] = caffe_sign<Dtype>(x[i]));
132 
133 // This returns a nonzero value if the input has its sign bit set.
134 // The name sngbit is meant to avoid conflicts with std::signbit in the macro.
135 // The extra parens are needed because CUDA < 6.5 defines signbit as a macro,
136 // and we don't want that to expand here when CUDA headers are also included.
137 DEFINE_CAFFE_CPU_UNARY_FUNC(sgnbit, \
138  y[i] = static_cast<bool>((std::signbit)(x[i])));
139 
140 DEFINE_CAFFE_CPU_UNARY_FUNC(fabs, y[i] = std::fabs(x[i]));
141 
142 template <typename Dtype>
143 void caffe_cpu_scale(const int n, const Dtype alpha, const Dtype *x, Dtype* y);
144 
145 #ifndef CPU_ONLY // GPU
146 
147 // Decaf gpu gemm provides an interface that is almost the same as the cpu
148 // gemm function - following the c convention and calling the fortran-order
149 // gpu code under the hood.
150 template <typename Dtype>
151 void caffe_gpu_gemm(const CBLAS_TRANSPOSE TransA,
152  const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K,
153  const Dtype alpha, const Dtype* A, const Dtype* B, const Dtype beta,
154  Dtype* C);
155 
156 template <typename Dtype>
157 void caffe_gpu_gemv(const CBLAS_TRANSPOSE TransA, const int M, const int N,
158  const Dtype alpha, const Dtype* A, const Dtype* x, const Dtype beta,
159  Dtype* y);
160 
161 template <typename Dtype>
162 void caffe_gpu_axpy(const int N, const Dtype alpha, const Dtype* X,
163  Dtype* Y);
164 
165 template <typename Dtype>
166 void caffe_gpu_axpby(const int N, const Dtype alpha, const Dtype* X,
167  const Dtype beta, Dtype* Y);
168 
169 void caffe_gpu_memcpy(const size_t N, const void *X, void *Y);
170 
171 template <typename Dtype>
172 void caffe_gpu_set(const int N, const Dtype alpha, Dtype *X);
173 
174 inline void caffe_gpu_memset(const size_t N, const int alpha, void* X) {
175 #ifndef CPU_ONLY
176  CUDA_CHECK(cudaMemset(X, alpha, N)); // NOLINT(caffe/alt_fn)
177 #else
178  NO_GPU;
179 #endif
180 }
181 
182 template <typename Dtype>
183 void caffe_gpu_add_scalar(const int N, const Dtype alpha, Dtype *X);
184 
185 template <typename Dtype>
186 void caffe_gpu_scal(const int N, const Dtype alpha, Dtype *X);
187 
188 #ifndef CPU_ONLY
189 template <typename Dtype>
190 void caffe_gpu_scal(const int N, const Dtype alpha, Dtype* X, cudaStream_t str);
191 #endif
192 
193 template <typename Dtype>
194 void caffe_gpu_add(const int N, const Dtype* a, const Dtype* b, Dtype* y);
195 
196 template <typename Dtype>
197 void caffe_gpu_sub(const int N, const Dtype* a, const Dtype* b, Dtype* y);
198 
199 template <typename Dtype>
200 void caffe_gpu_mul(const int N, const Dtype* a, const Dtype* b, Dtype* y);
201 
202 template <typename Dtype>
203 void caffe_gpu_div(const int N, const Dtype* a, const Dtype* b, Dtype* y);
204 
205 template <typename Dtype>
206 void caffe_gpu_abs(const int n, const Dtype* a, Dtype* y);
207 
208 template <typename Dtype>
209 void caffe_gpu_exp(const int n, const Dtype* a, Dtype* y);
210 
211 template <typename Dtype>
212 void caffe_gpu_log(const int n, const Dtype* a, Dtype* y);
213 
214 template <typename Dtype>
215 void caffe_gpu_powx(const int n, const Dtype* a, const Dtype b, Dtype* y);
216 
217 // caffe_gpu_rng_uniform with two arguments generates integers in the range
218 // [0, UINT_MAX].
219 void caffe_gpu_rng_uniform(const int n, unsigned int* r);
220 
221 // caffe_gpu_rng_uniform with four arguments generates floats in the range
222 // (a, b] (strictly greater than a, less than or equal to b) due to the
223 // specification of curandGenerateUniform. With a = 0, b = 1, just calls
224 // curandGenerateUniform; with other limits will shift and scale the outputs
225 // appropriately after calling curandGenerateUniform.
226 template <typename Dtype>
227 void caffe_gpu_rng_uniform(const int n, const Dtype a, const Dtype b, Dtype* r);
228 
229 template <typename Dtype>
230 void caffe_gpu_rng_gaussian(const int n, const Dtype mu, const Dtype sigma,
231  Dtype* r);
232 
233 template <typename Dtype>
234 void caffe_gpu_rng_bernoulli(const int n, const Dtype p, int* r);
235 
236 template <typename Dtype>
237 void caffe_gpu_dot(const int n, const Dtype* x, const Dtype* y, Dtype* out);
238 
239 template <typename Dtype>
240 void caffe_gpu_asum(const int n, const Dtype* x, Dtype* y);
241 
242 template<typename Dtype>
243 void caffe_gpu_sign(const int n, const Dtype* x, Dtype* y);
244 
245 template<typename Dtype>
246 void caffe_gpu_sgnbit(const int n, const Dtype* x, Dtype* y);
247 
248 template <typename Dtype>
249 void caffe_gpu_fabs(const int n, const Dtype* x, Dtype* y);
250 
251 template <typename Dtype>
252 void caffe_gpu_scale(const int n, const Dtype alpha, const Dtype *x, Dtype* y);
253 
254 #define DEFINE_AND_INSTANTIATE_GPU_UNARY_FUNC(name, operation) \
255 template<typename Dtype> \
256 __global__ void name##_kernel(const int n, const Dtype* x, Dtype* y) { \
257  CUDA_KERNEL_LOOP(index, n) { \
258  operation; \
259  } \
260 } \
261 template <> \
262 void caffe_gpu_##name<float>(const int n, const float* x, float* y) { \
263  /* NOLINT_NEXT_LINE(whitespace/operators) */ \
264  name##_kernel<float><<<CAFFE_GET_BLOCKS(n), CAFFE_CUDA_NUM_THREADS>>>( \
265  n, x, y); \
266 } \
267 template <> \
268 void caffe_gpu_##name<double>(const int n, const double* x, double* y) { \
269  /* NOLINT_NEXT_LINE(whitespace/operators) */ \
270  name##_kernel<double><<<CAFFE_GET_BLOCKS(n), CAFFE_CUDA_NUM_THREADS>>>( \
271  n, x, y); \
272 }
273 
274 #endif // !CPU_ONLY
275 
276 } // namespace caffe
277 
278 #endif // CAFFE_UTIL_MATH_FUNCTIONS_H_
A layer factory that allows one to register layers. During runtime, registered layers can be called b...
Definition: blob.hpp:14