00001 #ifndef OptBCNewtonLike_h
00002 #define OptBCNewtonLike_h
00003
00004
00005
00006
00007
00008
00009 #include "Opt.h"
00010 #include "NLF.h"
00011
00012 using std::ostream;
00013
00014 namespace OPTPP {
00015
00032 class OptBCNewtonLike: public OptimizeClass {
00033 protected:
00034 virtual NLP1 *nlprob() const = 0;
00035
00036 NEWMAT::ColumnVector gprev;
00037 NEWMAT::SymmetricMatrix Hessian;
00038 int grad_evals;
00039 SearchStrategy strategy;
00040 DerivOption finitediff;
00041 real TR_size;
00042 real gradMult;
00043 int searchSize;
00044 int m_nconvgd;
00045 bool WarmStart;
00046
00047 void defaultAcceptStep(int, int);
00048 NEWMAT::ColumnVector defaultComputeSearch(NEWMAT::SymmetricMatrix& );
00049
00050 public:
00051
00059 OptBCNewtonLike() {;}
00065 OptBCNewtonLike(int n):
00066 OptimizeClass(n), gprev(n), Hessian(n), grad_evals(0),
00067 strategy(LineSearch), finitediff(ForwardDiff), TR_size(0.0),
00068 gradMult(0.1), searchSize(64), WarmStart(false){;}
00075 OptBCNewtonLike(int n, UPDATEFCN u):
00076 OptimizeClass(n), gprev(n), Hessian(n),grad_evals(0),
00077 strategy(LineSearch), finitediff(ForwardDiff),TR_size(0.0),
00078 gradMult(0.1), searchSize(64), WarmStart(false)
00079 {update_fcn = u;}
00086 OptBCNewtonLike(int n, TOLS t):
00087 OptimizeClass(n,t), gprev(n), Hessian(n), grad_evals(0),
00088 strategy(LineSearch), finitediff(ForwardDiff),TR_size(0.0),
00089 gradMult(0.1), searchSize(64), WarmStart(false){;}
00090
00094 virtual ~OptBCNewtonLike(){;}
00095
00096
00097
00098
00099
00100
00101
00102
00103 virtual void acceptStep(int k, int step_type)
00104 {defaultAcceptStep(k, step_type);}
00105
00107 virtual NEWMAT::ColumnVector computeSearch(NEWMAT::SymmetricMatrix& H )
00108 {return defaultComputeSearch(H);}
00109
00110 virtual void updateModel(int k, int ndim, NEWMAT::ColumnVector x)
00111 {OptimizeClass::defaultUpdateModel(k, ndim, x);}
00112
00113 virtual void reset()
00114 {int ndim = nlprob()->getDim(); OptimizeClass::defaultReset(ndim);}
00115
00117 virtual int updateConstraints(int ) {return 0;}
00118
00119
00120
00121
00123 virtual int checkConvg();
00124
00126 virtual int checkDeriv();
00127
00130 virtual double computeMaxStep(NEWMAT::ColumnVector&) {return FLT_MAX;}
00131
00133 virtual int computeStep(NEWMAT::ColumnVector sk);
00134
00136 virtual void initHessian();
00137
00139 virtual void initOpt();
00140
00142 virtual double initTrustRegionSize() const;
00143
00145 virtual void optimize();
00146
00148 virtual void readOptInput();
00149
00151 virtual NEWMAT::SymmetricMatrix updateH(NEWMAT::SymmetricMatrix& H, int k) = 0;
00152
00153
00154
00155
00157 int checkAnalyticFDGrad() ;
00158
00160 void printStatus(char *);
00161
00165 int getFevals() const {return fcn_evals;}
00166
00170 int getGevals() const {return grad_evals;}
00171
00175 DerivOption getDerivOption() const {return finitediff;}
00177 void setDerivOption(DerivOption d) {finitediff = d;}
00178
00182 real getTRSize() const {return TR_size;}
00184 void setTRSize(real delta) {TR_size = delta;}
00185
00189 real getGradMult() const {return gradMult;}
00191 void setGradMult(real tau) {gradMult = tau;}
00192
00197 int getSearchSize() const {return searchSize;}
00199 void setSearchSize(int sss) {searchSize = sss;}
00200
00201 bool getWarmStart() const {return WarmStart;}
00202 void useWarmStart(NEWMAT::SymmetricMatrix& H) {Hessian = H; WarmStart = true;}
00203
00207 SearchStrategy getSearchStrategy() const {return strategy;}
00209 void setSearchStrategy(SearchStrategy s) {strategy = s;}
00210
00214 NEWMAT::SymmetricMatrix getHessian() const {return Hessian;}
00216 void setHessian(NEWMAT::SymmetricMatrix& H) {Hessian = H;}
00217
00218 friend int trustregion(NLP1*, ostream*, NEWMAT::SymmetricMatrix&,
00219 NEWMAT::ColumnVector&, NEWMAT::ColumnVector&,
00220 real&, real&, real stpmax, real stpmin);
00221
00222 friend int trustpds(NLP1*, ostream*, NEWMAT::SymmetricMatrix&,
00223 NEWMAT::ColumnVector&, NEWMAT::ColumnVector&,
00224 real&, real&, real stpmax, real stpmin, int);
00225 };
00226
00231 class OptBCNewton1Deriv: public OptBCNewtonLike {
00232 public:
00233 OptBCNewton1Deriv():
00234 OptBCNewtonLike(), mem_nlp(0){;}
00235
00236 OptBCNewton1Deriv(NLP1* p):
00237 OptBCNewtonLike(p->getDim()), mem_nlp(p){;}
00238
00239 OptBCNewton1Deriv(NLP1* p, UPDATEFCN u):
00240 OptBCNewtonLike(p->getDim(),u), mem_nlp(p)
00241 {update_fcn = u;}
00242
00243 OptBCNewton1Deriv(NLP1* p, TOLS t):
00244 OptBCNewtonLike(p->getDim(),t), mem_nlp(p) {;}
00245
00246 virtual ~OptBCNewton1Deriv(){;}
00247
00248
00249 protected:
00251 NLP1* nlprob() const { return mem_nlp; }
00252
00253 private:
00254 NLP1* mem_nlp;
00255 };
00256
00261 class OptBCNewton2Deriv: public OptBCNewtonLike {
00262 public:
00263 OptBCNewton2Deriv():
00264 OptBCNewtonLike(), mem_nlp(0) {;}
00265
00266 OptBCNewton2Deriv(NLP2* p):
00267 OptBCNewtonLike(p->getDim()), mem_nlp(p){;}
00268
00269 OptBCNewton2Deriv(NLP2* p, UPDATEFCN u):
00270 OptBCNewtonLike(p->getDim(),u), mem_nlp(p){ update_fcn = u; }
00271
00272 OptBCNewton2Deriv(NLP2* p, TOLS t):
00273 OptBCNewtonLike(p->getDim(),t), mem_nlp(p) {}
00274
00275 protected:
00277 NLP1* nlprob() const {return mem_nlp;}
00279 NLP2* nlprob2() const {return mem_nlp; }
00280
00281 private:
00282 NLP2* mem_nlp;
00283
00284 };
00285
00286 }
00287
00288 #endif