GeneralFunctionMinimizer_implementation.h

00001 //===========================================================================
00002 // GoTools - SINTEF Geometry Tools version 1.1
00003 //
00004 // GoTools module: CORE
00005 //
00006 // Copyright (C) 2000-2007 SINTEF ICT, Applied Mathematics, Norway.
00007 //
00008 // This program is free software; you can redistribute it and/or          
00009 // modify it under the terms of the GNU General Public License            
00010 // as published by the Free Software Foundation version 2 of the License. 
00011 //
00012 // This program is distributed in the hope that it will be useful,        
00013 // but WITHOUT ANY WARRANTY; without even the implied warranty of         
00014 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the          
00015 // GNU General Public License for more details.                           
00016 //
00017 // You should have received a copy of the GNU General Public License      
00018 // along with this program; if not, write to the Free Software            
00019 // Foundation, Inc.,                                                      
00020 // 59 Temple Place - Suite 330,                                           
00021 // Boston, MA  02111-1307, USA.                                           
00022 //
00023 // Contact information: E-mail: tor.dokken@sintef.no                      
00024 // SINTEF ICT, Department of Applied Mathematics,                         
00025 // P.O. Box 124 Blindern,                                                 
00026 // 0314 Oslo, Norway.                                                     
00027 //
00028 // Other licenses are also available for this software, notably licenses
00029 // for:
00030 // - Building commercial software.                                        
00031 // - Building software whose source code you wish to keep private.        
00032 //===========================================================================
00033 #ifndef _GENERAL_FUNCTIONMINIMIZER_IMPLEMENTATION_H
00034 #define _GENERAL_FUNCTIONMINIMIZER_IMPLEMENTATION_H
00035 
00036 #include <limits>
00037 #include <assert.h>
00038 #include "errormacros.h"
00039 #include <algorithm>
00040 #include <iostream> // @@debug
00041 
00042 namespace Go {
00045 
00046 
00047 //===========================================================================
00048 template<class Functor>
00049 const double FunctionMinimizer<Functor>::root_machine_precision_ = 
00050 sqrt(numerical_tolerance(1));
00051 
00052 template<class Functor>
00053 const double FunctionMinimizer<Functor>::rel_tol_ = 1.0e-6;
00054 
00055 template<class Functor>
00056 const double FunctionMinimizer<Functor>::perturbation_ = 1.0e-10; 
00057 
00058 template<class Functor>
00059 const double FunctionMinimizer<Functor>::golden_ratio_ = 0.61803399;
00060 
00061 template<class Functor>
00062 const double FunctionMinimizer<Functor>::default_partition_ = 
00063 1.0 / 20.0; // default partition ratio of an interval
00064 
00065 template<class Functor>
00066 const double FunctionMinimizer<Functor>::tiny_ = 1.0e-20;  // a really tiny number
00067 
00068 template<class Functor>
00069 const int FunctionMinimizer<Functor>::max_iter_ = 100;
00070 //===========================================================================
00071 
00072 //===========================================================================
00073 template<class Functor>
00074 void minimise_conjugated_gradient(FunctionMinimizer<Functor>& dfmin)
00075 //===========================================================================
00076 {
00077     const double TOL = std::numeric_limits<double>::epsilon(); //1.0e-8;
00078     const double EPS = 1.0e-10;
00079     // minimising the 'dfmin' function using conjugated gradients.
00080     const int N = dfmin.numPars();
00081     Point gradient(N), old_gradient(N), dir(N);
00082     dfmin.grad(old_gradient);
00083     dir = -old_gradient;
00084     double old_val = dfmin.fval();
00085     while(true) {
00086 
00087         // make sure direction is not uphill (is this already guaranteed??)
00088         // and truncating it if we are at the border of the domain
00089         if (dir * old_gradient > 0) {
00090             dir *= -1;
00091         }
00092         for (int i = 0; i < N; ++i) {
00093             if ((dfmin.atMin(i) && dir[i] < 0) || (dfmin.atMax(i) && dir[i] > 0)) {
00094                 dir[i] = 0;
00095                 // conjugate gradient value is broken since we modified dir.
00096                 // Restarting cycle.
00097             }
00098         }
00099 
00100         // minimize along this direction
00101         bool hit_domain_edge = false;
00102         double new_val = dfmin.minimize(dir, hit_domain_edge); 
00103         if (2.0 * fabs(new_val - old_val) <= TOL  * (fabs(new_val) + fabs(old_val)+ EPS)) {
00104             // we have reached a minimum
00105             break;
00106         } else {
00107             old_val = new_val;
00108         }
00109 
00110         // choose new direction 
00111         dfmin.grad(gradient);
00112         if (!hit_domain_edge) {
00113             // we are still in a conjugated gradient cycle.  Choose new direction 
00114             // using conjugated gradients (Polak-Ribiere variant)
00115 
00116             Point diff = gradient - old_gradient;
00117             double factor = gradient * diff / old_gradient.length2();
00118             Point dir_saved = dir;
00119             dir *= factor;
00120             dir -= gradient;
00121 
00122             if (dir * old_gradient > 0) {
00123                 dir *= -1;
00124             }
00125 
00126             bool on_boundary = false;
00127             for (int i = 0; i != N; ++i) {
00128                 if ((dfmin.atMin(i) && dir[i] < 0) || (dfmin.atMax(i) && dir[i] > 0)) {
00129                     on_boundary = true;
00130                     gradient[i] = old_gradient[i] = 0;  
00131                     dir_saved[i] = 0; // we will have to recalculated 'dir' based on this vector
00132                 }
00133             }
00134 
00135             if (on_boundary) {
00136                 // the direction we choose will take us out of the domain.  Reduce problem
00137                 // to conjugated gradient in a lower dimension.
00138                 dir = dir_saved;
00139                 diff = gradient - old_gradient;
00140                 factor = gradient * diff / old_gradient.length2();
00141                 dir *= factor;
00142                 dir -= gradient;        
00143             } 
00144             old_gradient = gradient;
00145         } else {
00146             // We ran into an edge of the domain.  Re-initialising conjugate
00147             // gradient method using steepest descent.
00148             dir = - gradient;
00149             for (int i = 0; i != N; ++i) {
00150                 if ((dfmin.atMin(i) && dir[i] < 0) || (dfmin.atMax(i) && dir[i] > 0)) {
00151                     dir[i] = 0;
00152                     gradient[i] = 0;
00153                 }
00154             }
00155             old_gradient = gradient;
00156         }
00157     }
00158 }
00159 
00160 
00161 //===========================================================================
00162 template<class Functor>
00163 inline FunctionMinimizer<Functor>::
00164 FunctionMinimizer(int num_param, const Functor& fun, const double* const seed, double tol)
00165     : fun_(fun), par_(seed, seed + num_param), 
00166       at_min_(num_param), at_max_(num_param), cached_value_updated_(false), 
00167       cached_grad_updated_(false),  cached_grad_(num_param), 
00168       minimization_tol_(tol > root_machine_precision_ ? tol : root_machine_precision_), 
00169       param_tol_(num_param)
00170 //===========================================================================
00171 {
00172     for (int i = 0; i < num_param; ++i) {
00173         param_tol_[i] = rel_tol_ * (fun_.maxPar(i) - fun_.minPar(i));
00174     }
00175     
00176     checkBorder(); // determine at_min_ and at_max_
00177     resetCache();
00178 }
00179 
00180 //===========================================================================
00181 template<class Functor>
00182 inline void FunctionMinimizer<Functor>::resetCache() const
00183 //===========================================================================
00184 {
00185     cached_value_updated_ = false;
00186     cached_grad_updated_ = false;
00187 }
00188 
00189 //===========================================================================
00190 template<class Functor>
00191 inline void FunctionMinimizer<Functor>::checkBorder()
00192 //===========================================================================
00193 {
00194     int n = numPars();
00195     for (int i = 0; i < n; ++i) {
00196         at_min_[i] = par_[i] < (fun_.minPar(i) + param_tol_[i]);
00197         at_max_[i] = par_[i] > (fun_.maxPar(i) - param_tol_[i]);
00198     }
00199 }
00200 
00201 //===========================================================================
00202 template<class Functor>
00203 inline double FunctionMinimizer<Functor>::minimize(const Point& dir, 
00204                                                    bool& hit_domain_edge)
00205 //===========================================================================
00206 {
00207     static bool rerun = false; // 'true' if we have recursively entered this function
00208     // flipping dir if it conflicts with gradient of function (will this ever happen??)
00209     Point oriented_dir(numPars());
00210     if (!orientDirection(dir, oriented_dir)) {
00211         // gradient of function was exactly perpendicular to 'dir'.  We are already at
00212         // a minimum along this direction
00213         return fval();
00214     }
00215     // deciding maximum possible steplength in the given direction
00216     double max_step = determineMaxSteplength(oriented_dir);
00217     if (max_step < tiny_) {
00218         // already on boundary in this direction.
00219         return fval();
00220     }
00221     // bracketing minimum in the given direction
00222     double bracket[3]; // startpoint, midpoint and endpoint of bracketing interval
00223     double fval_brak[3]; // function value at the three bracketing points
00224     
00225     if (bracketInterval(oriented_dir, max_step, bracket, fval_brak)) {
00226         // bracketing of interval successful.  Applying Brent
00227         hit_domain_edge = false;
00228         return linminBrent(oriented_dir, bracket, fval_brak);
00229     } 
00230     // We were not able to bracket the interval.  This may either be because the search
00231     // interval grew all the way to 'max_step' without being able to bracket a minimum,
00232     // or that the search interval shrank and collapsed in a singularity, indicating 
00233     // that although we know this to be a direction of descent, numerically the function
00234     // is not able to produce a smaller value in the neighbourhood of the departing point.
00235     if (fval_brak[1] >= fval_brak[0]) {
00236         ASSERT(bracket[2] < max_step);
00237         // the interval has collapsed onto the starting point.  We consider this to be 
00238         // a minimum (its directional derivative, although not zero, is so small that 
00239         // it does not modify the function within machine precision).
00240         return fval();
00241     }
00242     ASSERT(bracket[2] == max_step);
00243     // If we got here, we have maximally expanded the interval without being able to
00244     // verify that we have bracketed a minimum. The conclusion is that the minimum
00245     // is located very close to max_step.  Our strategy now is to find outwhat happens
00246     // at the end of the interval.  If the gradient is positive, we know that we have 
00247     // just passed the minimum, and we can search backwards from the endpoint.  If not,
00248     // we conclude that this is a minimum.  We check if this point gives a smaller 
00249     // function value than where we stand now, and if that is the case, we move there
00250     // before returning.
00251     Point end_grad(numPars());
00252     Point temp= par_ + max_step * oriented_dir;
00253     grad(temp, end_grad); 
00254     double max_step_val = fval(temp);
00255 
00256     if (scalarProductSign(end_grad, oriented_dir) == 1) {
00257         // derivative is POSITIVE at 'max_step'.  There MUST be a minimum that we 
00258         // missed.  Let us search backwards for it.
00259         if (!rerun) {
00260             moveUV(oriented_dir, max_step); // moving to end of interval
00261             rerun = true;
00262             double tmp = minimize(dir, hit_domain_edge); // search backwds
00263             rerun = false;
00264             return tmp;
00265         }
00266         MESSAGE("Warning: unable to pinpoint exact minimum in FunctionMinimizer::minimize()");
00267     }
00268     // the derivative is NEGATIVE at 'max_step', and we conclude that this is a minimum
00269     // as good as any.  If it gives a smaller function value than where we stand now,
00270     // we will move there.  Otherwise, we stay put and consider ourselves already on
00271     // a minimum.
00272     if (max_step_val < fval()) {
00273         hit_domain_edge = true;
00274         moveUV(oriented_dir, max_step);
00275         return max_step_val;
00276     }
00277     // unable to find any point better than where we are currently standing
00278     hit_domain_edge = false;
00279     return fval();
00280 }
00281 
00282 //===========================================================================
00283 template<class Functor> inline 
00284 int FunctionMinimizer<Functor>::scalarProductSign(const Point& p1, const Point& p2)
00285 //===========================================================================
00286 {
00287     double prod = p1 * p2;
00288     double l1 = *std::max_element(p1.begin(), p1.end());
00289     double l2 = *std::max_element(p2.begin(), p2.end());
00290     // find the numerical tolerance related to the point coordinate of the largest
00291     // magnitude (its error dwarfs the errors of the others).
00292     double tol = 10 * numerical_tolerance(l1 > l2 ? l1 : l2);
00293 
00294     if (prod < -tol) return -1;
00295     if (prod > tol ) return 1;
00296 
00297     // indiscernable from 0
00298     return 0;
00299 }
00300 
00301 //===========================================================================
00302 template<class Functor> inline bool 
00303 FunctionMinimizer<Functor>::orientDirection(const Point& dir, Point& result)
00304 //===========================================================================
00305 {
00306     Point start_grad(numPars());
00307     grad(start_grad);
00308 
00309     result = dir / dir.lengthInf();
00310 
00311     int sign = scalarProductSign(start_grad, result);
00312 
00313     if (sign == 1) {
00314         result *= -1;
00315     }
00316     return (sign != 0);
00317 }
00318 
00319 //===========================================================================
00320 template<class Functor>
00321 inline double FunctionMinimizer<Functor>::linminBrent(const Point& dir, 
00322                                                       const double* bracket,
00323                                                       const double* fval_brak)
00324 //===========================================================================
00325 {
00326     const int MAXITER = 100;
00327     double a = bracket[0];  double fa = fval_brak[0];
00328     double b = bracket[1];  double fb = fval_brak[1];
00329     double c = bracket[2];  double fc = fval_brak[2];
00330 
00331     ASSERT(a <= b && b <= c);
00332     ASSERT(fa >= fb && fb <= fc);
00333 
00334     double b2, fb2, b3, fb3;
00335     if (fa < fc) {
00336         b2 = a; fb2 = fa;
00337         b3 = c; fb3 = fc;
00338     } else {
00339         b2 = c; fb2 = fc;
00340         b3 = a; fb3 = fa;
00341     }
00342     
00343     double u = b;
00344     double last_step = std::numeric_limits<double>::max();
00345     double before_last_step = last_step;
00346     double fu;
00347     int iter;
00348     // a step of 'delta' moves u and v by respectively delta * dir[0] and delta * dir[1].
00349     // therefore, if we want a precision of (minimization_tol_ * central bracket value)
00350     // in each parameter, we must use the tolerance below:
00351     const double central_value = fabs(b) > perturbation_ ? fabs(b) : perturbation_;
00352     double tol_1 = minimization_tol_ * central_value;
00353     // Since a, b, c and u are "fictive" function arguments (the real argument in R^n being 
00354     // 'par_ + u * dir'), we must check if the minimum tolerance of this last expression
00355     // exceeds the tolerance we have chosen.  In that case, increase the tolerance to this
00356     // new value.
00357     double tol_2 = numerical_tolerance(par_, dir); 
00358     const double tol = tol_1 > tol_2 ? tol_1 : tol_2;
00359 
00360     // three below variables to speed up convergence when suspecting to be really close
00361     // to the function minimum
00362     bool last_step_was_tol = false;
00363     bool last_step_was_positive = false;
00364     double btemp = b2;
00365     for (iter = 0; iter < MAXITER; ++iter) {
00366         // -------------------------------------
00367         // STEP 1: coming up with a point to try
00368         // -------------------------------------
00369         
00370         // trying parabolic interpolation
00371         bool use_parabolic = parabolicFit(b, fb, b2, fb2, b3, fb3, 0.5 * before_last_step, u);
00372         before_last_step = last_step;
00373 
00374         if (use_parabolic) {
00375             // so far, it seems like we can use a parabolic fit.  But is it inside the interval?
00376             use_parabolic = (u > a + tol && u < c - tol); // useful only if inside the interval
00377         }
00378         // here, we should know whether the parabolic fit is useful or not
00379         if (!use_parabolic) {
00380             // we must come up with another point to try.  Resorting to golden search
00381             const double rdiff = c - b;
00382             const double ldiff = b - a;
00383             if (rdiff < ldiff) {
00384                 u = a + golden_ratio_ * ldiff;
00385             } else {
00386                 u = c - golden_ratio_ * rdiff;
00387             }
00388         }
00389         // assuring a minimum steplength
00390         if (fabs(u - b) < tol) {
00391             if (last_step_was_tol && btemp == b) { // exact equality is justified here!
00392                 u = last_step_was_positive ? b - tol : b + tol;
00393             } else {
00394                 u = (u > b) ? b + tol : b - tol;
00395             }
00396             btemp = b;
00397             last_step_was_positive = (u > b);
00398             last_step_was_tol = true;
00399 
00400         } else {
00401             last_step_was_tol = false;
00402         }
00403         last_step = fabs(b - u);
00404         
00405         // --------------------------------------------------------
00406         // STEP 2: trying the point and deciding what to do with it
00407         // --------------------------------------------------------
00408         
00409         // evaluating function in u (and evaluating gradient if premature end is allowed)
00410         
00411         fu = fval(par_ + u * dir);
00412         adjustBrackets(a, fa, b, fb, c, fc, b2, fb2, b3, fb3, u, fu);
00413 
00414         // end criteria
00415         if (fabs(b - (0.5 * (c + a))) < 2 * tol - 0.5 * (c - a)) {
00416             break;
00417         }
00418     }
00419     if (iter == MAXITER) {
00420         MESSAGE("Failed to converge properly in linminBrent.");
00421         //throw runtime_error("Failed to converge in linminBrent.");
00422     }
00423 
00424     // setting current point at the found minimum
00425     moveUV(dir, b);    
00426 
00427     return fb;
00428 }
00429 
00430 //===========================================================================
00431 template<class Functor>
00432 inline void FunctionMinimizer<Functor>::adjustBrackets(double& a, double& fa,
00433                                                        double& b, double& fb,
00434                                                        double& c, double& fc,
00435                                                        double& b2, double& fb2,
00436                                                        double& b3, double& fb3,
00437                                                        double u, double fu)
00438 //===========================================================================
00439 {
00440     if (fu < fb) {
00441         // narrowing down brackets
00442         if (u < b) { 
00443             c = b; fc = fb;
00444         } else {
00445             a = b; fa = fb;
00446         }
00447         // updating minima
00448         b3 = b2; fb3 = fb2;
00449         b2 = b; fb2 = fb;
00450         b = u; fb = fu;
00451     } else {
00452         // we did not find a better minima, but we can still narrow down brackets
00453         if (u < b) {
00454             a = u; fa = fu;
00455         } else {
00456             c = u; fc = fu;
00457         }
00458 
00459         // checking if previous minima should be updated
00460         if (fu < fb2) {
00461             b3 = b2; fb3 = fb2;
00462             b2 = u; fb2 = fu;
00463         } else if (fu < fb3) {
00464             b3 = u; fb3 = fu;
00465         }
00466     }
00467 }
00468 
00469 //===========================================================================
00470 template<class Functor> inline double 
00471 FunctionMinimizer<Functor>::determineMaxSteplength(const Point& dir) const
00472 //===========================================================================
00473 {
00474     const int n = numPars();
00475     std::vector<double> max_uv(n);
00476 
00477     for (int i = 0; i < n; ++i) {
00478         if (dir[i] > param_tol_[i]) {
00479             double delta = fun_.maxPar(i) - par_[i];
00480             max_uv[i] = delta / dir[i]; // a positive value
00481         } else if (dir[i] < -param_tol_[i]) {
00482             double delta = fun_.minPar(i) - par_[i];
00483             max_uv[i] = delta / dir[i]; // a positive value
00484         } else {
00485             max_uv[i] = std::numeric_limits<double>::max(); // largest machine-repr. number
00486         }
00487     }
00488     return *(min_element(max_uv.begin(), max_uv.end()));
00489 }
00490 
00491 //===========================================================================
00492 template<class Functor> inline double 
00493 FunctionMinimizer<Functor>::parabolicEstimate(double p2, double fp2, const Point& dir)
00494 //===========================================================================
00495 {
00496     Point gradient(numPars());
00497     grad(gradient);
00498     double directional_deriv = gradient * dir;
00499     return -directional_deriv * p2 * p2 / (2 * (fp2 - fval() - p2 * directional_deriv));
00500 }
00501 
00502 //===========================================================================
00503 template<class Functor> inline bool
00504 FunctionMinimizer<Functor>::bracketInterval(const Point& dir,  // points to 2-array
00505                                             const double max_step,
00506                                             //const double oper_tol, // operating tolerance
00507                                             double* bracket,   // points to 3-array
00508                                             double* fval_brak) // points to 3-array
00509 //===========================================================================
00510 {
00511     // We suppose that 'dir' is a direciton of descent - anything else is an error!
00512     // we want to find three collinear points (a, b, c) on the line with direction 'dir'
00513     // and passing through the point (curU(), curV()) such that f(a) > f(b) < f(c). 
00514 
00515     ASSERT(max_step > 0);
00516     //ASSERT(oper_tol > 0);
00517     //ASSERT(max_step > oper_tol);
00518 
00519     double& a = bracket[0]; // the three points we want to determine
00520     double& b = bracket[1];
00521     double& c = bracket[2];
00522     double& fa = fval_brak[0];
00523     double& fb = fval_brak[1];
00524     double& fc = fval_brak[2]; // function value in these points
00525 
00526     a = 0;
00527     fa = fval(); // current point (curU(), curV())
00528 
00529     b = default_partition_ * max_step;
00530     fb = fval(par_ + b * dir);
00531 
00532     // determining tolerance based on numerical precision of machine
00533     Point g;  grad(g);
00534     double num_tolerance = numerical_tolerance(par_, dir, fa, dir * g);
00535 
00536     if (fb >= fa) {
00537         // we know there is a minimum between a and b, since 'dir' is assumed to be a 
00538         // direction of descent.  We now only have to bisect the interval sufficiently
00539         // many times towards 'a' in order to bracket the minimum
00540         //std::cout << "Fallen into slow bracketing." << std::endl;
00541         do {
00542             c = b; fc = fb;
00543             b = parabolicEstimate(c, fc, dir);
00544             fb = fval(par_ + b * dir);
00545             ASSERT(b > a && b < c);
00546         } while (fb >= fa && fabs(b-a) > num_tolerance);
00547 
00548         if (fb >= fa) {
00549             // we did not succeed in bracketing a minimum between a and c, due to
00550             // numerical issues (although 'dir' is direction of descent, we were
00551             // unable to find a smaller function value within a distance from the
00552             // startpoint bigger than the numerical tolerance).
00553             return false;
00554         }
00555         ASSERT(fb < fa && fb <= fc);
00556         return true;
00557     }
00558 
00559     // if we got here, (fb < fa) --  we have (probably) not yet arrived at the minimum value
00560     // the below code is based on ideas from the book "Numerical Recipes in C" for bracketing
00561     // a minimum.
00562     c = b + (1 + golden_ratio_) * (b - a);
00563     c = (c < max_step) ? c : max_step;
00564     fc = fval(par_ + c * dir);
00565 
00566     //std::cout << "Entering normal bracketing." << std::endl;
00567     // we here know that a < b < c <= max_step, and that fb < fa
00568     while (fb > fc) {
00569         // coming up with suggestion for new point, based on parabolic interp. of a, b and c.
00570         double fu, u;
00571         parabolicFit(a, fa, b, fb, c, fc, max_step - b, u);
00572 
00573         double interval_length = c - a;
00574         if (fabs(u-b) > interval_length) {
00575             u = (u > b) ? b + interval_length : b - interval_length;
00576         }
00577         u = (u < max_step) ? u : max_step; // not really necessary, but safeguard against roundoff
00578         
00579         // deciding what to do with new, estimated point
00580         if (b < u && u < c) {
00581             fu = fval(par_ + u * dir);
00582             if (fu < fc) { // minimum between b and c
00583                 a = b; fa = fb;
00584                 b = u; fb = fu;
00585                 return true;
00586             } else if (fu > fb) { // minimum between a and u
00587                 c = u; fc = fu;
00588                 return true;
00589             }
00590             // no help in parabolic fit
00591             u = c + (1 + golden_ratio_) * (c - b);
00592             u = (u < max_step) ? u : max_step;
00593         } else if (c < u) {
00594             fu = fval(par_ + u * dir);
00595             if (fu < fc) {
00596                 shift3(b, c, u, u + (1 + golden_ratio_) * (u - c));
00597                 u = (u < max_step) ? u : max_step;
00598                 shift3(fb, fc, fu, fval(par_ + u * dir));
00599             }
00600         } else { // reject parabolic u
00601             u = c + (1 + golden_ratio_) * (c - b);
00602             u = (u < max_step) ? u : max_step;
00603             fu = fval(par_ + u * dir);
00604         }
00605         shift3(a, b, c, u);
00606         shift3(fa, fb, fc, fu);
00607         if (c == max_step && fc <= fb) { // yes, equality check of doubles is justified here
00608             return false;
00609         }
00610 //      if (fabs(c - max_step) < minimization_tol_ && fc <= fb) {
00611 //              return false;
00612 //      }
00613     }
00614     // if we got here, then fa > fb < fc
00615     ASSERT(fb < fa && fb <= fc);
00616     return true;
00617 }
00618 
00619 //===========================================================================
00620 template<class Functor>
00621 inline bool FunctionMinimizer<Functor>::parabolicFit(double a, double fa, 
00622                                                      double b, double fb, 
00623                                                      double c, double fc,
00624                                                      double max_from_b, double&u)
00625 //===========================================================================
00626 {
00627     // returns 'true' if it could determine a minimum on the parabola containing 
00628     // the points (a, fa), (b, fb) and (c, fc).  If no such minimum could be found
00629     // (ie. the function is linear), then it returns false.
00630     ASSERT(max_from_b > 0);
00631 
00632     double tmp1 = (b - a) * (fb - fc);
00633     double tmp2 = (b - c) * (fb - fa);
00634     double denom = 2 * (tmp1 - tmp2); // denominator
00635     u = (b - a) * tmp1 - (b - c) * tmp2; // nominator
00636     
00637     if (fabs(u) < fabs(denom) * max_from_b) {
00638         u /= denom;
00639         u = b - u;
00640         return true;
00641     } else {
00642         // we would have to step farther than allowed.  Set to max and return false.
00643         u = (u * denom > 0) ? b - max_from_b : b + max_from_b;
00644         return false;
00645     }
00646     // never get here, but keep compiler happy
00647     return true;
00648 }
00649 
00650 //===========================================================================    
00651 template<class Functor>
00652 inline double FunctionMinimizer<Functor>::numerical_tolerance(double c)
00653 //===========================================================================    
00654 {
00655     if (c == double(0)) {
00656         return tiny_;
00657     }
00658     static const double eps = std::numeric_limits<double>::epsilon();
00659     return fabs(c) * eps;
00660 }
00661 
00662 //===========================================================================    
00663 template<class Functor>
00664 inline double FunctionMinimizer<Functor>::numerical_tolerance(const Point& x, 
00665                                                               const Point& dir)
00666 //===========================================================================    
00667 {
00668     // we want to determine the smallest positive double value 'a' such that 
00669     // (x + a * dir) != x.  If we define tol(x_i) to be the numerical precision
00670     // around the i-component of x, and dir_i to be the i-component of y, 
00671     // then we search for the value:
00672     // minimize_over_i (tol(x_i) / |dir_i|)
00673 
00674     double res = std::numeric_limits<double>::max();
00675     for (int i = 0; i < x.size(); ++i) {
00676         if (dir[i] != double(0.0)) {
00677             double temp = numerical_tolerance(x[i]) / fabs(dir[i]);
00678             res = res < temp ? res : temp;
00679             //double temp = fabs(x[i]) * std::numeric_limits<double>::epsilon(); //tol(x_i)
00680             //temp /= fabs(dir[i]); // |dir_i|
00681             //res = res < temp ? res : temp;
00682         }
00683     }
00684     return res;
00685 }
00686 
00687 //===========================================================================    
00688 template<class Functor>
00689 inline double FunctionMinimizer<Functor>::numerical_tolerance(const Point& x, 
00690                                                               const Point& dir,
00691                                                               const double fx,
00692                                                               const double dfx)
00693 //===========================================================================    
00694 {
00695     //    we want to determine the smallest positive double value 'a' such that 
00696     // -> (x + a * dir) != x
00697     //    and such that 
00698     // -> f(x + a * dir) != f(x).  
00699     //    To satisfy the last criteria, we use the function value in x, given by
00700     //    'fx', as well as the directional derivative, which is given by 'dfx'. 
00701 
00702 
00703     double a1 = numerical_tolerance(x, dir);
00704 
00705     double a2 = (fabs(fx) + perturbation_) * std::numeric_limits<double>::epsilon();
00706     a2 /= fabs(dfx);
00707 
00708     return a1 > a2 ? a1 : a2;
00709 }
00710 
00711 //===========================================================================    
00712 template<class Functor>
00713 inline double FunctionMinimizer<Functor>::fval() const
00714 //===========================================================================
00715 {
00716     if (!cached_value_updated_) {
00717         cached_value_ = fun_(par_.begin());
00718         cached_value_updated_ = true;
00719     }
00720     return cached_value_;
00721 }
00722 
00723 //===========================================================================
00724 template<class Functor>
00725 inline double FunctionMinimizer<Functor>::fval(const Point& par) const
00726 //===========================================================================
00727 {
00728     return fun_(par.begin());
00729 }
00730 
00731 //===========================================================================
00732 template<class Functor>
00733 inline void FunctionMinimizer<Functor>::grad(Point& result) const
00734 //===========================================================================
00735 {
00736     if (!cached_grad_updated_) {
00737         fun_.grad(par_.begin(), cached_grad_.begin());
00738         cached_grad_updated_ = true;
00739     }
00740     result = cached_grad_;
00741 }
00742 
00743 //===========================================================================
00744 template<class Functor>
00745 inline void FunctionMinimizer<Functor>::grad(const Point& param, Point& result) const
00746 //===========================================================================
00747 {
00748     fun_.grad(param.begin(), result.begin());
00749 }
00750 
00751 //===========================================================================
00752 template<class Functor>
00753 inline void FunctionMinimizer<Functor>::moveUV(const Point& dir, double multiplier)
00754 //===========================================================================
00755 {
00756     par_ += dir * multiplier;
00757     checkBorder();
00758     resetCache();
00759 }
00760 
00761 
00763 }; // end namespace Go
00764 
00765 #endif // _GENERAL_FUNCTIONMINIMIZER_IMPLEMENTATION_H
00766 
00767 

Generated on Mon Jun 11 14:48:18 2007 for GoTools Core Library by  doxygen 1.5.1