OptNewtonLike.h

00001 #ifndef OptNewtonLike_h
00002 #define OptNewtonLike_h
00003 
00004 /*--------------------------------------------------------------------
00005  Copyright (c) 2001, Sandia Corporation.
00006  J.C. Meza, Sandia National Laboratories, meza@ca.sandia.gov
00007  ---------------------------------------------------------------------*/
00008 
00009 
00010 #include "Opt.h"
00011 
00012 using std::ostream;
00013 
00014 namespace OPTPP {
00015 
00025 class OptNewtonLike: public OptimizeClass {
00026 protected:
00027   virtual NLP1 *nlprob() const = 0;  
00028   NEWMAT::ColumnVector gprev;           
00029   NEWMAT::SymmetricMatrix Hessian;      
00030   int grad_evals;               
00031   SearchStrategy strategy;      
00032   DerivOption finitediff;       
00033   real TR_size;                 
00034   real gradMult;                
00035   int searchSize;               
00036   void defaultAcceptStep(int, int);
00037   NEWMAT::ColumnVector defaultComputeSearch(NEWMAT::SymmetricMatrix& );
00038   bool WarmStart;
00039 
00040 public:
00041 
00048   OptNewtonLike() {}
00049 
00053   OptNewtonLike(int n): 
00054     OptimizeClass(n), gprev(n), Hessian(n), grad_evals(0),
00055     strategy(TrustRegion), finitediff(ForwardDiff), TR_size(0.0),
00056     gradMult(0.1), searchSize(64), WarmStart(false){}
00057 
00062   OptNewtonLike(int n, UPDATEFCN u): 
00063     OptimizeClass(n), gprev(n), Hessian(n),grad_evals(0),
00064     strategy(TrustRegion), finitediff(ForwardDiff),TR_size(0.0),
00065     gradMult(0.1), searchSize(64), WarmStart(false){update_fcn = u;}
00070   OptNewtonLike(int n, TOLS t): 
00071     OptimizeClass(n,t), gprev(n), Hessian(n), grad_evals(0),
00072     strategy(TrustRegion), finitediff(ForwardDiff),TR_size(0.0),
00073     gradMult(0.1), searchSize(64), WarmStart(false){}
00074   
00078   virtual ~OptNewtonLike(){}
00079 
00080 //-----------------------------------------------------------------
00081 // Virtual functions 
00082 //-----------------------------------------------------------------
00083 
00084 //-----------------------------------------------------------------
00085 // These have default values
00086 //-----------------------------------------------------------------
00087   virtual void acceptStep(int k, int step_type)
00088     {defaultAcceptStep(k, step_type);}
00089 
00091   virtual NEWMAT::ColumnVector computeSearch(NEWMAT::SymmetricMatrix& H )
00092     {return defaultComputeSearch(H);}
00093 
00094   virtual void updateModel(int k, int ndim, NEWMAT::ColumnVector x)
00095     {OptimizeClass::defaultUpdateModel(k, ndim, x);}
00096 
00097 //-----------------------------------------------------------------
00098 // These have to be defined by other classes
00099 //-----------------------------------------------------------------
00100 
00102   virtual int  checkConvg();
00103 
00105   virtual int  checkDeriv();
00106 
00107 
00109   virtual int  computeStep(NEWMAT::ColumnVector sk);
00110 
00112   virtual void initOpt();
00113 
00115   virtual void initHessian();
00116 
00119   virtual double initTrustRegionSize() const;
00120  
00122   virtual void optimize();
00123 
00125   virtual void readOptInput();
00126 
00128   virtual void reset();
00129 
00130 //-----------------------------------------------------------------
00131 // These are used by all derived classes
00132 //-----------------------------------------------------------------
00133 
00135   int checkAnalyticFDGrad();
00136 
00140   int getFevals() const {return fcn_evals;}
00141 
00145   int getGevals() const {return grad_evals;}
00146 
00150   real getTRSize() const {return TR_size;}
00152   void setTRSize(real delta) {TR_size = delta;}
00153 
00157   real getGradMult() const {return gradMult;}
00159   void setGradMult(real tau) {gradMult = tau;}
00160 
00161 
00166   int getSearchSize() const {return searchSize;}
00168   void setSearchSize(int sss) {searchSize = sss;}
00169 
00170   bool getWarmStart() const {return WarmStart;}
00171   void UseWarmStart(NEWMAT::SymmetricMatrix& H) {Hessian = H; WarmStart = true;}
00172 
00176   void setSearchStrategy(SearchStrategy s) {strategy = s;}
00178   SearchStrategy getSearchStrategy() const {return strategy;}
00179 
00180   
00184   void setDerivOption(DerivOption d) {finitediff = d;}
00186   DerivOption getDerivOption() const {return finitediff;}
00187 
00191   NEWMAT::SymmetricMatrix getHessian() const {return Hessian;}
00193   void setHessian(NEWMAT::SymmetricMatrix& H) {Hessian = H;}
00194   
00196   virtual NEWMAT::SymmetricMatrix updateH(NEWMAT::SymmetricMatrix& H, int k) = 0;
00197 
00199   void printStatus(char *);
00200 
00201   friend int trustregion(NLP1*, ostream*, NEWMAT::SymmetricMatrix&,
00202                          NEWMAT::ColumnVector&, NEWMAT::ColumnVector&,
00203                          real&, real&, real stpmax, real stpmin);
00204 
00205   friend int trustpds(NLP1*, ostream*, NEWMAT::SymmetricMatrix&,
00206                       NEWMAT::ColumnVector&, NEWMAT::ColumnVector&,
00207                       real&, real&, real stpmax, real stpmin, int);
00208 
00209 };
00210 
00215 class OptNewton1Deriv: public OptNewtonLike {
00216 public:
00217   OptNewton1Deriv() {}
00218 
00219   OptNewton1Deriv(NLP1* p): 
00220     OptNewtonLike(p->getDim()), mem_nlp(p){}
00221 
00222   OptNewton1Deriv(NLP1* p, UPDATEFCN u): 
00223     OptNewtonLike(p->getDim(),u), mem_nlp(p){}
00224 
00225   OptNewton1Deriv(NLP1* p, TOLS t): 
00226     OptNewtonLike(p->getDim(),t), mem_nlp(p){}
00227   
00228   virtual ~OptNewton1Deriv(){}
00229 
00230 private:
00231   NLP1* mem_nlp;
00232   
00233 protected:
00235   NLP1* nlprob() const { return mem_nlp; }
00236 };
00237 
00242 class OptNewton2Deriv: public OptNewtonLike {
00243 public:
00244   OptNewton2Deriv() {}
00245 
00246   OptNewton2Deriv(NLP2* p): 
00247     OptNewtonLike(p->getDim()), mem_nlp(p){}
00248 
00249   OptNewton2Deriv(NLP2* p, UPDATEFCN u): 
00250     OptNewtonLike(p->getDim(),u), mem_nlp(p){}
00251 
00252   OptNewton2Deriv(NLP2* p, TOLS t): 
00253     OptNewtonLike(p->getDim(),t), mem_nlp(p){}
00254   
00255   virtual ~OptNewton2Deriv(){}
00256 
00257 private:
00258   NLP2* mem_nlp;
00259   
00260 protected:
00262   NLP1* nlprob() const { return mem_nlp; }
00264   NLP2* nlprob2() const { return mem_nlp; }
00265 };
00266 
00267 } // namespace OPTPP
00268 
00269 #endif
Generated on Mon Jan 24 12:04:37 2011 for FASTlib by  doxygen 1.6.3