Jpp 20.0.0-rc.9-29-gccc23c492-D
the software that should make you happy
Loading...
Searching...
No Matches
JGradient.hh
Go to the documentation of this file.
1#ifndef __JFIT__JGRADIENT__
2#define __JFIT__JGRADIENT__
3
4#include <limits>
5#include <vector>
6#include <cmath>
7#include <memory>
8#include <ostream>
9#include <iomanip>
10
11#include "JLang/JManip.hh"
12#include "JLang/JException.hh"
13#include "Jeep/JMessage.hh"
14
15
16/**
17 * \author mdejong
18 */
19
20namespace JFIT {}
21namespace JPP { using namespace JFIT; }
22
23namespace JFIT {
24
25 /**
26 * Auxiliary data structure for fit parameter.
27 */
28 struct JParameter_t {
29 /**
30 * Virtual destructor.
31 */
32 virtual ~JParameter_t()
33 {}
34
35 /**
36 * Apply step.
37 *
38 * \param step step
39 */
40 virtual void apply(const double step) = 0;
41 };
42
43
44 /**
45 * Auxiliary data structure for editable parameter.
46 */
47 struct JModifier_t :
48 public std::shared_ptr<JParameter_t>
49 {
50 /**
51 * Constructor.
52 *
53 * \param name name
54 * \param parameter parameter
55 * \param value value
56 */
57 JModifier_t(const std::string& name,
58 JParameter_t* parameter,
59 const double value) :
60 std::shared_ptr<JParameter_t>(parameter),
61 name (name),
63 {}
64
65 std::string name;
66 double value;
67 };
68
69
70 /**
71 * Conjugate gradient fit.
72 */
73 struct JGradient :
74 public std::vector<JModifier_t>
75 {
76 /**
77 * Constructor.
78 *
79 * The number of iterations and epsilon refer to the number of steps and
80 * the distance to the minimum, respectively.\n
81 * The number of extra steps can be used to overcome a possible hurdle on the way.
82 *
83 * \param Nmax maximum number of iterations
84 * \param Nextra maximum number of extra steps
85 * \param epsilon epsilon
86 * \param debug debug
87 */
88 JGradient(const size_t Nmax = std::numeric_limits<size_t>::max(),
89 const size_t Nextra = 0,
90 const double epsilon = 1.0e-4,
91 const int debug = 3) :
92 Nmax (Nmax),
93 Nextra (Nextra),
95 debug (debug)
96 {}
97
98
99 /**
100 * Fit.
101 *
102 * The template parameter should provide for the following function operator.
103 * <pre>
104 * double operator()(int option);
105 * </pre>
106 * The value of the option corresponds to the following cases.
107 * - 0 => step wise improvement of the chi2;
108 * - 1 => evaluation of the chi2 before the determination of the gradient of the chi2; and
109 * - 2 => evaluation of the derivative of the chi2 to each fit parameter.
110 *
111 * \param getChi2 chi2 function
112 * \return chi2
113 */
114 template<class T>
115 double operator()(const T& getChi2)
116 {
117 using namespace std;
118 using namespace JPP;
119
120 vector<double> chi2(5, numeric_limits<double>::max());
121
122 if (this->empty()) {
123 return numeric_limits<double>::max();
124 }
125
126 chi2[0] = this->evaluate(getChi2);
127
128 const size_t N = this->size();
129
130 vector<double> H(N);
131 vector<double> G(N);
132
133 for (size_t i = 0; i != N; ++i) {
134 G[i] = -1.0 * gradient[i];
135 H[i] = G[i];
136 gradient[i] = H[i];
137 }
138
140
142
143 DEBUG("chi2[0] " << setw(4) << numberOfIterations << ' ' << FIXED(12,5) << chi2[0] << endl);
144
145 // minimise chi2 in direction of gradient
146
147 chi2[1] = chi2[0];
148 chi2[2] = chi2[1];
149
150 size_t m = 0;
151
152 for (double ds = 1.0; ds > 1.0e-3; ) {
153
154 this->move(+1.0 * ds);
155
156 chi2[3] = getChi2(0);
157
158 DEBUG("chi2[3] " << setw(4) << m << ' ' << FIXED(12,5) << chi2[3] << ' ' << FIXED(12,5) << ds << endl);
159
160 if (chi2[3] < chi2[2]) {
161
162 chi2[1] = chi2[2];
163 chi2[2] = chi2[3];
164
165 m = 0;
166
167 continue;
168 }
169
170 if (ds == 1.0) {
171
172 if (m == 0) {
173 chi2[4] = chi2[3];
174 }
175
176 if (m != Nextra) {
177
178 ++m;
179
180 continue;
181
182 } else {
183
184 for ( ; m != 0; --m) {
185 this->move(-1.0 * ds);
186 }
187
188 chi2[3] = chi2[4];
189 }
190 }
191
192 this->move(-1.0 * ds);
193
194 if (chi2[2] < chi2[3]) {
195
196 // final step based on parabolic interpolation through following points
197 //
198 // x1 = -1 * ds -> chi2[1]
199 // x2 = 0 * ds -> chi2[2]
200 // x3 = +1 * ds -> chi2[3]
201
202 const double f21 = chi2[2] - chi2[1]; // f(x2) - f(x1)
203 const double f23 = chi2[2] - chi2[3]; // f(x2) - f(x3)
204
205 const double xs = 0.5 * (f21 - f23) / (f23 + f21);
206
207 this->move(+1.0 * xs * ds);
208
209 chi2[3] = getChi2(0);
210
211 if (chi2[3] < chi2[2]) {
212
213 chi2[2] = chi2[3];
214
215 } else {
216
217 this->move(-1.0 * xs * ds);
218
219 chi2[2] = getChi2(0);
220 }
221
222 DEBUG("chi2[2] " << setw(4) << numberOfIterations << ' ' << FIXED(12,5) << chi2[2] << ' ' << SCIENTIFIC(12,5) << ds << endl);
223
224 break;
225
226 } else {
227
228 ds *= 0.5;
229 }
230 }
231
232 if (fabs(chi2[2] - chi2[0]) < epsilon * 0.5 * (fabs(chi2[0]) + fabs(chi2[2]))) {
233
234 chi2[0] = chi2[2];
235
236 break;
237 }
238
239 chi2[0] = this->evaluate(getChi2);
240
241 double gg = 0.0;
242 double dgg = 0.0;
243
244 for (size_t i = 0; i != N; ++i){
245 gg += G[i]*G[i];
246 dgg += (gradient[i] + G[i]) * gradient[i];
247 }
248
249 if (gg == 0.0) {
250 break;
251 }
252
253 dgg /= gg;
254
255 for (size_t i = 0; i != N; ++i){
256 G[i] = -1.0 * gradient[i];
257 H[i] = G[i] + dgg * H[i];
258 gradient[i] = H[i];
259 }
260 }
261
262 DEBUG("chi2[0] " << setw(4) << numberOfIterations << ' ' << FIXED(12,5) << chi2[0] << endl);
263
264 return chi2[0];
265 }
266
267
268 size_t Nmax; //!< maximum number of iterations
269 size_t Nextra; //!< maximum number of extra steps
270 double epsilon; //!< epsilon
271 int debug; //!< debug
272
274
275 private:
276 /**
277 * Evaluate gradient.
278 *
279 * \return chi2
280 */
281 template<class T>
282 double evaluate(const T& getChi2)
283 {
284 using namespace std;
285 using namespace JPP;
286
287 const size_t N = this->size();
288
289 gradient.resize(N);
290
291 for (std::vector<double>::iterator i = gradient.begin(); i != gradient.end(); ++i) {
292 *i = 0.0;
293 }
294
295 const double x0 = getChi2(1);
296
297 size_t width = 1;
298
299 for (size_t i = 0; i != N; ++i) {
300 if ((*this)[i].name.size() > width) {
301 width = (*this)[i].name.size();
302 }
303 }
304
305 double V = 0.0;
306
307 for (size_t i = 0; i != N; ++i) {
308
309 if ((*this)[i].value != 0.0) {
310
311 (*this)[i]->apply(+0.5 * (*this)[i].value);
312
313 const double x1 = getChi2(2);
314
315 (*this)[i]->apply(-0.5 * (*this)[i].value);
316 (*this)[i]->apply(-0.5 * (*this)[i].value);
317
318 const double x2 = getChi2(2);
319
320 gradient[i] = x1 - x2;
321
322 (*this)[i]->apply(+0.5 * (*this)[i].value);
323
324 DEBUG(setw(width) << left << (*this)[i].name << right << ' ' << FIXED(12,5) << (*this)[i].value << ' ' << FIXED(12,5) << gradient[i] << endl);
325
326 } else {
327
328 gradient[i] = 0.0;
329 }
330
331 V += gradient[i] * gradient[i];
332 }
333
334 DEBUG(setw(width) << left << "|gradient|" << right << ' ' << FIXED(12,5) << sqrt(V) << endl);
335
336 return x0;
337 }
338
339
340 /**
341 * Move.
342 *
343 * \param factor factor
344 */
345 void move(const double factor)
346 {
347 if (factor > 0.0) {
348 for (size_t i = 0; i != this->size(); ++i) {
349 (*this)[ i ]->apply((*this)[ i ].value * gradient[ i ] * factor);
350 }
351 } else if (factor < 0.0) {
352 for (size_t i = this->size(); i != 0; --i) {
353 (*this)[i-1]->apply((*this)[i-1].value * gradient[i-1] * factor);
354 }
355 }
356 }
357
358 std::vector<double> gradient;
359 };
360}
361
362#endif
Auxiliary classes and methods for linear and iterative data regression.
Definition JEnergy.hh:15
double getChi2(const double P)
Get chi2 corresponding to given probability.
Conjugate gradient fit.
Definition JGradient.hh:75
size_t Nmax
maximum number of iterations
Definition JGradient.hh:268
void move(const double factor)
Move.
Definition JGradient.hh:345
std::vector< double > gradient
Definition JGradient.hh:358
double evaluate(const T &getChi2)
Evaluate gradient.
Definition JGradient.hh:282
double operator()(const T &getChi2)
Fit.
Definition JGradient.hh:115
double epsilon
epsilon
Definition JGradient.hh:270
JGradient(const size_t Nmax=std::numeric_limits< size_t >::max(), const size_t Nextra=0, const double epsilon=1.0e-4, const int debug=3)
Constructor.
Definition JGradient.hh:88
size_t numberOfIterations
Definition JGradient.hh:273
size_t Nextra
maximum number of extra steps
Definition JGradient.hh:269
Auxiliary data structure for editable parameter.
Definition JGradient.hh:49
std::string name
Definition JGradient.hh:65
JModifier_t(const std::string &name, JParameter_t *parameter, const double value)
Constructor.
Definition JGradient.hh:57
Auxiliary data structure for fit parameter.
Definition JGradient.hh:28
virtual void apply(const double step)=0
Apply step.
virtual ~JParameter_t()
Virtual destructor.
Definition JGradient.hh:32