23 #ifndef O2SCL_MMIN_CONP_H 24 #define O2SCL_MMIN_CONP_H 29 #include <gsl/gsl_blas.h> 30 #include <gsl/gsl_multimin.h> 31 #include <o2scl/mmin_conf.h> 33 #ifndef DOXYGEN_NO_O2NS 52 class def_auto_grad_t =
53 gradient_gsl<multi_funct11,boost::numeric::ublas::vector<double> > >
55 public mmin_conf<func_t,vec_t,dfunc_t,auto_grad_t,def_auto_grad_t> {
74 double fa = this->
it_min, fb, fc;
76 double stepa = 0.0, stepb, stepc=this->
step;
82 for(
size_t i=0;i<this->
dim;i++) dx[i]=0.0;
84 "in mmin_conp::iterate().",
93 dir = (pg >= 0.0) ? +1.0 : -1.0;
107 this->
step = stepc * 2.0;
109 for(
size_t i=0;i<this->
dim;i++) {
114 (*this->
grad)(this->dim,this->
x1,gradient);
116 (*this->
agrad)(this->dim,this->
x1,gradient);
127 stepa, stepc, fa, fc, this->
x1, this->
dx1,
128 gradient, &stepb, &fb);
135 this->
min(x,this->
p,dir / this->
pnorm,stepa,stepb,stepc, fa,
136 fb, fc, this->
tol, this->
x1, this->
dx1, this->
x2,
137 dx, gradient, &(this->
step), &(this->
it_min), &g1norm);
139 for(
size_t i=0;i<this->
dim;i++) x[i]=this->
x2[i];
143 this->
iter = (this->
iter + 1) % this->dim;
145 if (this->
iter == 0) {
146 for(
size_t i=0;i<this->
dim;i++) this->
p[i]=gradient[i];
147 this->
pnorm = g1norm;
165 for(
size_t i=0;i<this->
dim;i++) {
166 this->
g0[i]=gradient[i];
173 virtual const char *
type() {
return "mmin_conp";}
175 #ifndef DOXYGEN_INTERNAL 182 (
const mmin_conp<func_t,vec_t,dfunc_t,auto_grad_t,def_auto_grad_t>&);
188 #ifndef DOXYGEN_NO_O2NS Class for automatically computing gradients [abstract base].
The main O<span style='position: relative; top: 0.3em; font-size: 0.8em'>2</span>scl O$_2$scl names...
bool grad_given
If true, a gradient has been specified.
void min(const vec_t &x, const vec_t &xp, double lambda, double stepa, double stepb, double stepc, double fa, double fb, double fc, double xtol, vec_t &x1x, vec_t &dx1x, vec_t &x2x, vec_t &dx2x, vec_t &gradient, double *xstep, double *f, double *gnorm_u)
Perform the minimization.
void intermediate_point(const vec_t &x, const vec_t &px, double lambda, double pg, double stepa, double stepc, double fa, double fc, vec_t &x1x, vec_t &dx, vec_t &gradient, double *stepx, double *f)
Line minimization.
func_t * func
User-specified function.
#define O2SCL_CONV2_RET(d, d2, n, b)
Set an error and return the error value, two-string version.
int iter
Iteration number.
double dnrm2(const size_t N, const vec_t &X)
Compute the norm of the vector X.
bool err_nonconv
If true, call the error handler if the routine does not "converge".
virtual int iterate()
Perform an iteration.
iteration is not making progress toward solution
auto_grad_t * agrad
Automatic gradient object.
double ddot(const size_t N, const vec_t &X, const vec2_t &Y)
Compute .
#define O2SCL_ERR(d, n)
Set an error with message d and code n.
vec_t ugx
Proposed minimum.
void dscal(const double alpha, const size_t N, vec_t &X)
Compute .
std::function< int(size_t, boost::numeric::ublas::vector< double > &, boost::numeric::ublas::vector< double > &)> grad_funct11
Array of multi-dimensional functions typedef.
void daxpy(const double alpha, const size_t N, const vec_t &X, vec2_t &Y)
Compute .
Multidimensional minimization by the Polak-Ribiere conjugate gradient algorithm (GSL) ...
virtual const char * type()
Return string denoting type("mmin_conp")
std::function< double(size_t, const boost::numeric::ublas::vector< double > &)> multi_funct11
Multi-dimensional function typedef.
Multidimensional minimization by the Fletcher-Reeves conjugate gradient gradient algorithm (GSL) ...
dfunc_t * grad
User-specified gradient.
void take_step(const vec_t &x, const vec_t &px, double stepx, double lambda, vec_t &x1x, vec_t &dx)
Take a step.