Logo ROOT   6.30.04
Reference Guide
 All Namespaces Files Pages
ActivationFunctions.hxx
Go to the documentation of this file.
1 // @(#)root/tmva/tmva/dnn:$Id$
2 // Author: Simon Pfreundschuh 19/07/16
3 
4 /*************************************************************************
5  * Copyright (C) 2016, Simon Pfreundschuh *
6  * All rights reserved. *
7  * *
8  * For the licensing terms see $ROOTSYS/LICENSE. *
9  * For the list of contributors see $ROOTSYS/README/CREDITS. *
10  *************************************************************************/
11 
12  ///////////////////////////////////////////////////////////////////
13  // Implementation of the activation functions for multi-threaded //
14  // CPU architectures using Roots TThreadExecutor and BLAS. //
15  ///////////////////////////////////////////////////////////////////
16 
18 #include <math.h>
19 
20 #ifdef R__HAS_VDT
21 #include "vdt/tanh.h"
22 #define TANH_IMPL_X vdt::fast_tanhf(x)
23 #else
24 #define TANH_IMPL_X tanh(x)
25 #endif
26 
27 
28 namespace TMVA
29 {
30 namespace DNN
31 {
32 
33 //______________________________________________________________________________
34 template<typename AFloat>
35 void TCpu<AFloat>::ActivationFunctionForward(Tensor_t & X, EActivationFunction activFunct,
36  const ActivationDescriptor_t /* activationDescr */,
37  const double /* coef */, const AFloat /*alpha */, const AFloat /*beta*/)
38 {
39  // scaling and translation is not yet implemented
40  TMVA::DNN::evaluate<TCpu<AFloat>>( X, activFunct);
41 }
42 //______________________________________________________________________________
43 template<typename AFloat>
44 void TCpu<AFloat>::ActivationFunctionBackward(Tensor_t & dX, const Tensor_t & /* Y */,
45  const Tensor_t & dY, const Tensor_t & X,
46  EActivationFunction activFunct,
47  const ActivationDescriptor_t /* activationDescr */,
48  const AFloat /* alpha */, const AFloat /* beta */)
49 {
50  // scaling and translation not yet implemented
51  // output tensor (Y) could also be used to speed up derivative calculation
52  // compute dx = f'(x)
53  TMVA::DNN::evaluateDerivative<TCpu<AFloat>>(dX, activFunct, X);
54  // Compute element-wise product. dx = f'(x) * dY
55  Hadamard(dX, dY);
56 }
57 //______________________________________________________________________________
58 template<typename AFloat>
59 void TCpu<AFloat>::IdentityDerivative(TCpuTensor<AFloat> & B,
60  const TCpuTensor<AFloat> &/*A*/)
61 {
62  auto f = [](AFloat) {return 1.0;};
63  B.Map(f);
64 }
65 
66 //______________________________________________________________________________
67 template<typename AFloat>
68 void TCpu<AFloat>::Relu(TCpuTensor<AFloat> & B)
69 {
70  auto f = [](AFloat x) {return (x < 0.0) ? 0.0 : x;};
71  B.Map(f);
72 }
73 
74 //______________________________________________________________________________
75 template<typename AFloat>
76 void TCpu<AFloat>::ReluDerivative(TCpuTensor<AFloat> & B,
77  const TCpuTensor<AFloat> &A)
78 {
79  auto f = [](AFloat x) {return (x < 0.0) ? 0.0 : 1.0;};
80  B.MapFrom(f, A);
81 }
82 
83 //______________________________________________________________________________
84 template<typename AFloat>
85 void TCpu<AFloat>::Sigmoid(TCpuTensor<AFloat> & B)
86 {
87  auto f = [](AFloat x) {return 1.0 / (1.0 + exp(-x));};
88  B.Map(f);
89 }
90 
91 //______________________________________________________________________________
92 template<typename AFloat>
93 void TCpu<AFloat>::SigmoidDerivative(TCpuTensor<AFloat> & B,
94  const TCpuTensor<AFloat> &A)
95 {
96  auto f = [](AFloat x) {
97  AFloat sig = 1.0 / (1.0 + exp(-x));
98  return sig * (1.0 - sig);
99  };
100  B.MapFrom(f, A);
101 }
102 
103 //______________________________________________________________________________
104 template<typename AFloat>
105 void TCpu<AFloat>::Tanh(TCpuTensor<AFloat> & B)
106 {
107  auto f = [](AFloat x) {return TANH_IMPL_X;};
108  B.Map(f);
109 }
110 
111 //______________________________________________________________________________
112 template<typename AFloat>
113 void TCpu<AFloat>::TanhDerivative(TCpuTensor<AFloat> & B,
114  const TCpuTensor<AFloat> &A)
115 {
116  auto f = [](AFloat x) {
117  AFloat t = TANH_IMPL_X;
118  return 1 - t * t;
119  };
120  B.MapFrom(f, A);
121 }
122 
123 //______________________________________________________________________________
124 template<typename AFloat>
125 void TCpu<AFloat>::SymmetricRelu(TCpuTensor<AFloat> & B)
126 {
127  auto f = [](AFloat x) {return fabs(x);};
128  B.Map(f);
129 }
130 
131 //______________________________________________________________________________
132 template<typename AFloat>
133 void TCpu<AFloat>::SymmetricReluDerivative(TCpuTensor<AFloat> & B,
134  const TCpuTensor<AFloat> &A)
135 {
136  auto f = [](AFloat x) {
137  return (x < 0.0) ? -1.0 : 1.0;
138  };
139  B.MapFrom(f, A);
140 }
141 
142 //______________________________________________________________________________
143 template<typename AFloat>
144 void TCpu<AFloat>::SoftSign(TCpuTensor<AFloat> & B)
145 {
146  auto f = [](AFloat x) {return x / (1 + fabs(x));};
147  B.Map(f);
148 }
149 
150 //______________________________________________________________________________
151 template<typename AFloat>
152 void TCpu<AFloat>::SoftSignDerivative(TCpuTensor<AFloat> & B,
153  const TCpuTensor<AFloat> &A)
154 {
155  auto f = [](AFloat x) {
156  x = 1.0 + fabs(x);
157  x = 1.0 / (x * x);
158  return x;
159  };
160  B.MapFrom(f, A);
161 }
162 
163 //______________________________________________________________________________
164 template<typename AFloat>
165 void TCpu<AFloat>::Gauss(TCpuTensor<AFloat> & B)
166 {
167  auto f = [](AFloat x) {return exp(- x * x);};
168  B.Map(f);
169 }
170 
171 //______________________________________________________________________________
172 template<typename AFloat>
173 void TCpu<AFloat>::GaussDerivative(TCpuTensor<AFloat> & B,
174  const TCpuTensor<AFloat> &A)
175 {
176  auto f = [](AFloat x) {return - 2.0 * x * exp(- x * x);};
177  B.MapFrom(f, A);
178 }
179 
180 } // namespace DNN
181 } // namespace TMVA