00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013
00014
00015
00016
00017
00018
00019
00020
00021
00022
00023
00024
00025
00026
00027
00028
00029
00030
00031
00040 #ifndef OPTIMIZERS_H
00041 #define OPTIMIZERS_H
00042
00043 #include <fastlib/fastlib.h>
00044
00045 const fx_entry_doc opt_entries[] = {
00046 {"method", FX_PARAM, FX_STR, NULL,
00047 " The method used to optimize.\n"},
00048 {"param_space_dim", FX_RESERVED, FX_INT, NULL,
00049 " The dimension of the parameter space.\n"},
00050 {"init_opt", FX_TIMER, FX_CUSTOM, NULL,
00051 " The time taken to initialize the optimizer.\n"},
00052 {"get_init_pt", FX_TIMER, FX_CUSTOM, NULL,
00053 " The time taken to generate the initial point.\n"},
00054 {"get_init_pts", FX_TIMER, FX_CUSTOM, NULL,
00055 " The time taken to obtain the 'dim+1' points required"
00056 " to use the NelderMead optimizer.\n"},
00057 {"optimizing", FX_TIMER, FX_CUSTOM, NULL,
00058 " The time taken to get to the optimal value.\n"},
00059 {"tolerance", FX_PARAM, FX_DOUBLE, NULL,
00060 " The tolerance value for the parameters"
00061 " (defaults to 1.0e-5).\n"},
00062 {"MAX_FUNC_EVAL", FX_PARAM, FX_INT, NULL,
00063 " The maximum number of function evaluations"
00064 " allowed to the NelderMead optimizer (defaults"
00065 " to 50000).\n"},
00066 {"func_evals", FX_RESULT, FX_INT, NULL,
00067 " The number of function evaluations taken by the algorithm.\n"},
00068 {"EPSILON", FX_PARAM, FX_DOUBLE, NULL,
00069 " Value of epsilon.\n"},
00070 {"TOLERANCE", FX_PARAM, FX_DOUBLE, NULL,
00071 " Tolerance for the minimum movement for the parameter value.\n"},
00072 {"gtol", FX_PARAM, FX_DOUBLE, NULL,
00073 " Tolerance value for the gradient of the function.\n"},
00074 {"MAX_STEP_SIZE", FX_PARAM, FX_DOUBLE, NULL,
00075 " The maximum step size in the direction of the gradient.\n"},
00076 {"MAX_ITERS", FX_PARAM, FX_INT, NULL,
00077 " The maximum number of iterations allowed to the function.\n"},
00078 {"iters", FX_RESULT, FX_INT, NULL,
00079 " The number of iterations the algorithm actually went through"
00080 " before reaching the apparent optimum.\n"},
00081 FX_ENTRY_DOC_DONE
00082 };
00083
00084 const fx_module_doc opt_doc = {
00085 opt_entries, NULL,
00086 " This file containes two optimizers.\n"
00087 };
00088
00117 class NelderMead {
00118
00119 private:
00120 index_t dimension_;
00121 Matrix data_;
00122 long double (*func_ptr_)(Vector&, const Matrix&);
00123 datanode *opt_module_;
00124
00125 public:
00126
00127 NelderMead() {
00128 }
00129
00130 ~NelderMead() {
00131 }
00132
00133 void Init(long double (*fun)(Vector&, const Matrix&),
00134 Matrix& data, datanode *opt_module) {
00135 data_.Copy(data);
00136 func_ptr_ = fun;
00137 opt_module_ = opt_module;
00138 dimension_ = fx_param_int_req(opt_module_, "param_space_dim");
00139 }
00140
00141 const Matrix& data() {
00142 return data_;
00143 }
00144
00145 index_t dimension() {
00146 return dimension_;
00147 }
00148
00149 void Eval(double **pts);
00150 long double ModSimplex_(double **pts, long double *y,
00151 double *psum, index_t ihi, float fac);
00152 };
00153
00182 class QuasiNewton {
00183
00184 private:
00185 index_t dimension_;
00186 Matrix data_;
00187 long double (*func_ptr_)(Vector&, const Matrix&, Vector*);
00188 datanode *opt_module_;
00189
00190 public:
00191
00192 QuasiNewton(){
00193 }
00194
00195 ~QuasiNewton(){
00196 }
00197
00198 void Init(long double (*fun)(Vector&, const Matrix&, Vector*),
00199 Matrix& data, datanode *opt_module){
00200
00201 data_.Copy(data);
00202 func_ptr_ = fun;
00203 opt_module_ = opt_module;
00204 dimension_ = fx_param_int_req(opt_module_, "param_space_dim");
00205 }
00206
00207 const Matrix data() {
00208 return data_;
00209 }
00210
00211 index_t dimension() {
00212 return dimension_;
00213 }
00214
00215 void Eval(double *pt);
00216 void LineSearch_(Vector pold, long double fold, Vector *grad,
00217 Vector *xi, Vector *pnew, long double *f_min,
00218 long double maximum_step_length);
00219 };
00220
00221 #endif