27 #include "casadi/core/core.hpp"
38 int CASADI_NLPSOL_SCPGEN_EXPORT
41 plugin->name =
"scpgen";
43 plugin->version = CASADI_VERSION;
54 casadi_warning(
"SCPgen is under development");
65 "The QP solver to be used by the SQP method"}},
68 "Options to be passed to the QP solver"}},
69 {
"hessian_approximation",
71 "gauss-newton|exact"}},
74 "Maximum number of SQP iterations"}},
77 "Maximum number of linesearch iterations"}},
80 "Stopping criterion for primal infeasibility"}},
83 "Stopping criterion for dual infeasability"}},
86 "Stopping criterion for regularization"}},
89 "Stopping criterion for the step size"}},
92 "Armijo condition, coefficient of decrease in merit"}},
95 "Line-search parameter, restoration factor of stepsize"}},
98 "Size of memory to store history of merit function values"}},
101 "Lower bound for the merit function parameter"}},
104 "Size of L-BFGS memory."}},
107 "Automatic regularization of Lagrange Hessian."}},
110 "Print the header with problem statistics"}},
113 "C-code generation"}},
116 "Threshold for the regularization."}},
119 "Names of the variables."}},
122 "Which variables to print."}}
145 std::string hessian_approximation =
"exact";
146 std::string qpsol_plugin;
151 for (
auto&& op : opts) {
152 if (op.first==
"max_iter") {
154 }
else if (op.first==
"max_iter_ls") {
156 }
else if (op.first==
"c1") {
158 }
else if (op.first==
"beta") {
160 }
else if (op.first==
"lbfgs_memory") {
162 }
else if (op.first==
"tol_pr") {
164 }
else if (op.first==
"tol_du") {
166 }
else if (op.first==
"tol_reg") {
168 }
else if (op.first==
"regularize") {
170 }
else if (op.first==
"codegen") {
172 }
else if (op.first==
"reg_threshold") {
174 }
else if (op.first==
"tol_pr_step") {
176 }
else if (op.first==
"merit_memsize") {
178 }
else if (op.first==
"merit_start") {
180 }
else if (op.first==
"hessian_approximation") {
181 hessian_approximation = op.second.to_string();
182 }
else if (op.first==
"name_x") {
184 }
else if (op.first==
"print_x") {
186 }
else if (op.first==
"qpsol") {
187 qpsol_plugin = op.second.to_string();
188 }
else if (op.first==
"qpsol_options") {
189 qpsol_options = op.second;
190 }
else if (op.first==
"print_header") {
201 for (casadi_int i=0; i<
nx_; ++i) {
202 std::stringstream ss;
218 std::vector<MX> vdef_in = vdef_fcn.
mx_in();
219 std::vector<MX> vdef_out = vdef_fcn(vdef_in);
222 MX x = vdef_in.at(0);
223 MX p = vdef_in.at(1);
224 v_.resize(vdef_in.size()-2);
225 for (casadi_int i=0; i<
v_.size(); ++i) {
226 v_[i].v = vdef_in.at(i+2);
227 v_[i].v_def = vdef_out.at(i+2);
228 v_[i].n =
v_[i].v.nnz();
242 f =
dot(vdef_out[0], vdef_out[0])/2;
243 gL_defL = vdef_out[0];
250 std::stringstream ss;
252 for (std::vector<Var>::iterator it=
v_.begin(); it!=
v_.end(); ++it) {
253 ss.str(std::string());
254 ss <<
"lam_x" << i++;
255 it->v_lam =
MX::sym(ss.str(), it->v.sparsity());
262 uout() <<
"Allocated intermediate variables." << std::endl;
267 std::vector<std::vector<MX> > aseed(1), asens(1);
268 aseed[0].push_back(1.0);
269 aseed[0].push_back(g_lam);
270 for (std::vector<Var>::iterator it=
v_.begin(); it!=
v_.end(); ++it) {
271 aseed[0].push_back(it->v_lam);
273 vdef_fcn->
call_reverse(vdef_in, vdef_out, aseed, asens,
true,
false);
276 gL_defL = asens[0].at(i++);
279 p_defL = asens[0].at(i++);
282 for (std::vector<Var>::iterator it=
v_.begin(); it!=
v_.end(); ++it) {
283 it->v_defL = asens[0].at(i++);
284 if (it->v_defL.is_null()) {
285 it->v_defL =
MX::zeros(it->v.sparsity());
290 uout() <<
"Generated the gradient of the Lagrangian." << std::endl;
297 std::vector<MX> res_fcn_in;
299 res_fcn_in.push_back(x);
res_x_ = n++;
300 res_fcn_in.push_back(p);
res_p_ = n++;
301 for (std::vector<Var>::iterator it=
v_.begin(); it!=
v_.end(); ++it) {
302 res_fcn_in.push_back(it->v); it->res_var = n++;
305 res_fcn_in.push_back(g_lam);
res_g_lam_ = n++;
306 for (std::vector<Var>::iterator it=
v_.begin(); it!=
v_.end(); ++it) {
307 res_fcn_in.push_back(it->v_lam); it->res_lam = n++;
312 std::vector<MX> res_fcn_out;
314 res_fcn_out.push_back(f);
res_f_ = n++;
315 res_fcn_out.push_back(gL_defL);
res_gl_ = n++;
316 res_fcn_out.push_back(vdef_out[1]);
res_g_ = n++;
317 res_fcn_out.push_back(p_defL);
res_p_d_ = n++;
318 for (std::vector<Var>::iterator it=
v_.begin(); it!=
v_.end(); ++it) {
319 res_fcn_out.push_back(it->v_def - it->v); it->res_d = n++;
323 for (std::vector<Var>::iterator it=
v_.begin(); it!=
v_.end(); ++it) {
324 res_fcn_out.push_back(it->v_defL - it->v_lam); it->res_lam_d = n++;
329 Function res_fcn(
"res_fcn", res_fcn_in, res_fcn_out);
331 uout() <<
"Generated residual function ( " << res_fcn.
n_nodes() <<
" nodes)." << std::endl;
335 std::stringstream ss;
337 for (std::vector<Var>::iterator it=
v_.begin(); it!=
v_.end(); ++it) {
338 ss.str(std::string());
340 it->d =
MX::sym(ss.str(), it->v.sparsity());
341 it->d_def = it->v_def - it->d;
347 for (std::vector<Var>::iterator it=
v_.begin(); it!=
v_.end(); ++it) {
348 ss.str(std::string());
349 ss <<
"d_lam" << i++;
350 it->d_lam =
MX::sym(ss.str(), it->v.sparsity());
351 it->d_defL = it->v_defL - it->d_lam;
356 std::vector<MX> svar, sdef;
357 for (std::vector<Var>::iterator it=
v_.begin(); it!=
v_.end(); ++it) {
358 svar.push_back(it->v);
359 sdef.push_back(it->d_def);
362 for (std::vector<Var>::reverse_iterator it=
v_.rbegin(); it!=
v_.rend(); ++it) {
363 svar.push_back(it->v_lam);
364 sdef.push_back(it->d_defL);
368 std::vector<MX> ex(4);
374 substitute_inplace(svar, sdef, ex,
false);
376 for (std::vector<Var>::iterator it=
v_.begin(); it!=
v_.end(); ++it) {
377 it->d_def = sdef[i++];
380 for (std::vector<Var>::reverse_iterator it=
v_.rbegin(); it!=
v_.rend(); ++it) {
381 it->d_defL = sdef[i++];
391 std::vector<MX> mfcn_in;
393 mfcn_in.push_back(p);
mod_p_ = n++;
394 mfcn_in.push_back(x);
mod_x_ = n++;
395 for (std::vector<Var>::iterator it=
v_.begin(); it!=
v_.end(); ++it) {
396 mfcn_in.push_back(it->d); it->mod_var = n++;
401 std::vector<MX> mfcn_out;
402 mfcn_out.push_back(g_z);
mod_g_ = n++;
408 for (std::vector<Var>::iterator it=
v_.begin(); it!=
v_.end(); ++it) {
409 mfcn_in.push_back(it->d_lam); it->mod_lam = n++;
415 mfcn_out.push_back(f_z);
mod_f_ = n++;
416 mfcn_out.push_back(gL_z);
mod_gl_ = n++;
420 if (
verbose_) casadi_message(
"Formed Jacobian of the constraints.");
425 if (
verbose_) casadi_message(
"Formed square root of Gauss-Newton Hessian.");
427 if (
verbose_) casadi_message(
"Formed Hessian of the Lagrangian.");
432 std::vector<MX> mat_out;
433 mat_out.push_back(jac);
mat_jac_ = n++;
434 mat_out.push_back(hes);
mat_hes_ = n++;
435 Function mat_fcn(
"mat_fcn", mfcn_in, mat_out);
439 for (std::vector<Var>::iterator it=
v_.begin(); it!=
v_.end(); ++it) {
440 mfcn_out.push_back(it->d_def); it->mod_def = n++;
442 mfcn_out.push_back(it->d_defL); it->mod_defL = n++;
447 Function mfcn(
"mfcn", mfcn_in, mfcn_out);
450 std::vector<std::vector<MX> > mfcn_fwdSeed(1, mfcn_in), mfcn_fwdSens(1, mfcn_out);
453 std::fill(mfcn_fwdSeed[0].begin(), mfcn_fwdSeed[0].end(),
MX());
454 for (std::vector<Var>::iterator it=
v_.begin(); it!=
v_.end(); ++it) {
455 mfcn_fwdSeed[0][it->mod_var] = it->d;
457 mfcn_fwdSeed[0][it->mod_lam] = it->d_lam;
460 mfcn->
call_forward(mfcn_in, mfcn_out, mfcn_fwdSeed, mfcn_fwdSens,
true,
false);
463 MX b_gf = densify(mfcn_fwdSens[0][
mod_gl_]);
464 MX b_g = densify(mfcn_fwdSens[0][
mod_g_]);
467 std::vector<MX> vec_fcn_out;
469 vec_fcn_out.push_back(b_gf);
vec_gf_ = n++;
470 vec_fcn_out.push_back(b_g);
vec_g_ = n++;
471 casadi_assert_dev(n==vec_fcn_out.size());
473 Function vec_fcn(
"vec_fcn", mfcn_in, vec_fcn_out);
475 uout() <<
"Generated linearization function ( " << vec_fcn.
n_nodes()
476 <<
" nodes)." << std::endl;
487 std::fill(mfcn_fwdSeed[0].begin(), mfcn_fwdSeed[0].end(),
MX());
488 mfcn_fwdSeed[0][
mod_x_] = du;
489 for (std::vector<Var>::iterator it=
v_.begin(); it!=
v_.end(); ++it) {
490 mfcn_fwdSeed[0][it->mod_var] = -it->d;
494 for (std::vector<Var>::iterator it=
v_.begin(); it!=
v_.end(); ++it) {
495 mfcn_fwdSeed[0][it->mod_lam] = -it->d_lam;
498 mfcn->
call_forward(mfcn_in, mfcn_out, mfcn_fwdSeed, mfcn_fwdSens,
true,
false);
502 mfcn_in.push_back(du);
mod_du_ = n++;
508 std::vector<MX> exp_fcn_out;
510 for (std::vector<Var>::iterator it=
v_.begin(); it!=
v_.end(); ++it) {
511 exp_fcn_out.push_back(mfcn_fwdSens[0][it->mod_def]); it->exp_def = n++;
515 for (std::vector<Var>::iterator it=
v_.begin(); it!=
v_.end(); ++it) {
516 exp_fcn_out.push_back(mfcn_fwdSens[0][it->mod_defL]); it->exp_defL = n++;
521 Function exp_fcn(
"exp_fcn", mfcn_in, exp_fcn_out);
523 uout() <<
"Generated step expansion function ( " << exp_fcn.
n_nodes() <<
" nodes)."
541 uout() <<
"Generating \"" << cname <<
"\"" << std::endl;
543 std::string name = cname.substr(0, cname.find_first_of(
'.'));
548 uout() <<
"Starting compilation" << std::endl;
550 time_t time1 = time(
nullptr);
552 time_t time2 = time(
nullptr);
553 double comp_time = difftime(time2, time1);
555 uout() <<
"Compilation completed after " << comp_time <<
" s." << std::endl;
574 casadi_assert(!qpsol_plugin.empty(),
"'qpsol' option has not been set");
578 uout() <<
"Allocated QP solver." << std::endl;
582 uout() <<
"NLP preparation completed" << std::endl;
587 uout() <<
"-------------------------------------------" << std::endl;
588 uout() <<
"This is casadi::SCPgen." << std::endl;
590 uout() <<
"Using Gauss-Newton Hessian" << std::endl;
592 uout() <<
"Using exact Hessian" << std::endl;
596 casadi_int n_lifted = 0;
597 for (std::vector<Var>::const_iterator i=
v_.begin(); i!=
v_.end(); ++i) {
603 <<
"Number of reduced variables: "
604 << std::setw(9) <<
nx_ << std::endl
605 <<
"Number of reduced constraints: "
606 << std::setw(9) <<
ng_ << std::endl
607 <<
"Number of lifted variables/constraints: "
608 << std::setw(9) << n_lifted << std::endl
609 <<
"Number of parameters: "
610 << std::setw(9) <<
np_ << std::endl
611 <<
"Total number of variables: "
612 << std::setw(9) << (
nx_+n_lifted) << std::endl
613 <<
"Total number of constraints: "
614 << std::setw(9) << (
ng_+n_lifted) << std::endl
618 <<
"Iteration options:" << std::endl
619 <<
"{ \"max_iter\":" <<
max_iter_ <<
", "
621 <<
"\"c1\":" <<
c1_ <<
", "
622 <<
"\"beta\":" <<
beta_ <<
", "
627 <<
"\"tol_pr\":" <<
tol_pr_ <<
", "
628 <<
"\"tol_du\":" <<
tol_du_ <<
", "
629 <<
"\"tol_reg\":" <<
tol_reg_ <<
", "
644 for (casadi_int i=0; i<
v_.size(); ++i) {
645 casadi_int n =
v_[i].n;
693 for (casadi_int i=0; i<
v_.size(); ++i) {
694 m->lifted_mem[i].n =
v_[i].n;
701 casadi_int*& iw,
double*& w)
const {
708 m->dxk = w; w +=
nx_;
709 m->dlam = w; w +=
nx_ +
ng_;
710 m->gfk = w; w +=
nx_;
713 m->b_gn = w; w +=
ngn_;
717 for (
auto&& v : m->lifted_mem) {
724 v.dlam = w; w += v.n;
725 v.resL = w; w += v.n;
732 m->qpB = w; w +=
ng_;
735 m->qpG = w; w +=
ngn_;
737 m->qpG = w; w +=
nx_;
739 m->qpH_times_du = w; w +=
nx_;
742 m->lbdz = w; w +=
nx_+
ng_;
743 m->ubdz = w; w +=
nx_+
ng_;
749 for (
auto&& v : m->lifted_mem)
casadi_clear(v.res, v.n);
751 for (
auto&& v : m->lifted_mem)
casadi_clear(v.resL, v.n);
757 auto d_nlp = &m->
d_nlp;
762 m->arg[0] = d_nlp->z;
763 m->arg[1] = d_nlp->p;
765 for (casadi_int i=0; i<
v_.size(); ++i) {
766 m->res[i] = m->lifted_mem[i].x0;
771 uout() <<
"Passed initial guess" << std::endl;
777 for (
auto&& v : m->lifted_mem) {
787 for (
auto&& v : m->lifted_mem) {
792 double time1 = clock();
793 m->t_eval_mat = m->t_eval_res = m->t_eval_vec = m->t_eval_exp = m->t_solve_qp = 0;
806 casadi_int ls_iter = 0;
807 bool ls_success =
true;
813 m->iteration_note =
nullptr;
835 ls_iter, ls_success);
839 converged = converged && du_inf <=
tol_du_;
841 uout() << std::endl <<
"casadi::SCPgen: Convergence achieved after "
842 << m->iter_count <<
" iterations." << std::endl;
848 uout() <<
"casadi::SCPgen: Maximum number of iterations reached." << std::endl;
853 if (d_nlp->objective!=d_nlp->objective || m->pr_step != m->pr_step || pr_inf != pr_inf) {
854 uout() <<
"casadi::SCPgen: Aborted, nan detected" << std::endl;
876 double time2 = clock();
877 m->t_mainloop = (time2-time1)/CLOCKS_PER_SEC;
880 uout() <<
"optimal cost = " << d_nlp->objective << std::endl;
885 uout() <<
"time spent in eval_mat: "
886 << std::setw(9) << m->t_eval_mat <<
" s." << std::endl;
887 uout() <<
"time spent in eval_res: "
888 << std::setw(9) << m->t_eval_res <<
" s." << std::endl;
889 uout() <<
"time spent in eval_vec: "
890 << std::setw(9) << m->t_eval_vec <<
" s." << std::endl;
891 uout() <<
"time spent in eval_exp: "
892 << std::setw(9) << m->t_eval_exp <<
" s." << std::endl;
893 uout() <<
"time spent in solve_qp: "
894 << std::setw(9) << m->t_solve_qp <<
" s." << std::endl;
895 uout() <<
"time spent in main loop: "
896 << std::setw(9) << m->t_mainloop <<
" s." << std::endl;
904 auto d_nlp = &m->
d_nlp;
920 stream << std::setw(4) <<
"iter";
921 stream << std::setw(14) <<
"objective";
922 stream << std::setw(11) <<
"inf_pr";
923 stream << std::setw(11) <<
"inf_du";
924 stream << std::setw(11) <<
"pr_step";
925 stream << std::setw(11) <<
"du_step";
926 stream << std::setw(8) <<
"lg(rg)";
927 stream << std::setw(3) <<
"ls";
931 for (std::vector<casadi_int>::const_iterator i=
print_x_.begin(); i!=
print_x_.end(); ++i) {
932 stream << std::setw(9) <<
name_x_.at(*i);
936 stream.unsetf(std::ios::floatfield);
940 double pr_inf,
double du_inf,
double rg, casadi_int ls_trials,
941 bool ls_success)
const {
942 auto d_nlp = &m->
d_nlp;
943 stream << std::setw(4) << iter;
944 stream << std::scientific;
945 stream << std::setw(14) << std::setprecision(6) << obj;
946 stream << std::setw(11) << std::setprecision(2) << pr_inf;
947 stream << std::setw(11);
948 stream << std::setprecision(2) << du_inf;
949 stream << std::setw(11) << std::setprecision(2) << m->
pr_step;
950 stream << std::setw(11);
951 stream << std::setprecision(2) << m->
du_step;
952 stream << std::fixed;
954 stream << std::setw(8) << std::setprecision(2) << log10(rg);
956 stream << std::setw(8) <<
"-";
958 stream << std::setw(3) << ls_trials;
959 stream << (ls_success ?
' ' :
'F');
962 for (std::vector<casadi_int>::const_iterator i=
print_x_.begin(); i!=
print_x_.end(); ++i) {
963 stream << std::setw(9) << std::setprecision(4) << d_nlp->z[*i];
972 stream.unsetf(std::ios::floatfield);
977 auto d_nlp = &m->
d_nlp;
979 double time1 = clock();
985 for (
size_t i=0; i<
v_.size(); ++i) {
990 for (
size_t i=0; i<
v_.size(); ++i) {
1018 double time2 = clock();
1019 m->
t_eval_mat += (time2-time1)/CLOCKS_PER_SEC;
1023 auto d_nlp = &m->
d_nlp;
1025 double time1 = clock();
1031 for (
size_t i=0; i<
v_.size(); ++i) {
1036 for (
size_t i=0; i<
v_.size(); ++i) {
1046 for (
size_t i=0; i<
v_.size(); ++i) {
1057 double time2 = clock();
1058 m->
t_eval_res += (time2-time1)/CLOCKS_PER_SEC;
1062 auto d_nlp = &m->
d_nlp;
1064 double time1 = clock();
1070 for (
size_t i=0; i<
v_.size(); ++i) {
1075 for (
size_t i=0; i<
v_.size(); ++i) {
1099 double time2 = clock();
1100 m->
t_eval_vec += (time2-time1)/CLOCKS_PER_SEC;
1110 double a = m->
qpH[0];
1111 double b = m->
qpH[2];
1112 double c = m->
qpH[1];
1113 double d = m->
qpH[3];
1116 casadi_assert_dev(a==a && b==b && c==c && d==d);
1120 if (fabs(b-c)>=1e-10) casadi_warning(
"Hessian is not symmetric: "
1121 +
str(b) +
" != " +
str(c));
1125 double eig_smallest = (a+d)/2 - std::sqrt(4*b*c + (a-d)*(a-d))/2;
1135 auto d_nlp = &m->
d_nlp;
1137 double time1 = clock();
1172 double time2 = clock();
1173 m->
t_solve_qp += (time2-time1)/CLOCKS_PER_SEC;
1177 auto d_nlp = &m->
d_nlp;
1183 m->
iteration_note =
"Hessian indefinite in the search direction";
1192 double L1dir = F_sens - m->
sigma * l1_infeas;
1193 double L1merit = d_nlp->objective + m->
sigma * l1_infeas;
1202 if (meritmax < m->merit_mem[i]) meritmax = m->
merit_mem[i];
1206 double t = 1.0, t_prev = 0.0;
1209 double L1merit_cand = 0;
1221 double dt = t-t_prev;
1238 L1merit_cand = d_nlp->objective + m->
sigma * l1_infeas;
1239 if (L1merit_cand <= meritmax + t *
c1_ * L1dir) {
1270 auto d_nlp = &m->
d_nlp;
1272 double time1 = clock();
1279 for (
size_t i=0; i<
v_.size(); ++i) {
1285 for (
size_t i=0; i<
v_.size(); ++i) {
1292 for (casadi_int i=0; i<
v_.size(); ++ i) {
1302 double time2 = clock();
1303 m->
t_eval_exp += (time2-time1)/CLOCKS_PER_SEC;
1310 stats[
"t_eval_res"] = m->t_eval_res;
1311 stats[
"t_eval_vec"] = m->t_eval_vec;
1312 stats[
"t_eval_exp"] = m->t_eval_exp;
1313 stats[
"t_solve_qp"] = m->t_solve_qp;
1314 stats[
"t_mainloop"] = m->t_mainloop;
1315 stats[
"iter_count"] = m->iter_count;
Helper class for C code generation.
void add(const Function &f, bool with_jac_sparsity=false)
Add a function (name generated)
std::string generate(const std::string &prefix="")
Generate file(s)
virtual void call_forward(const std::vector< MX > &arg, const std::vector< MX > &res, const std::vector< std::vector< MX > > &fseed, std::vector< std::vector< MX > > &fsens, bool always_inline, bool never_inline) const
Forward mode AD, virtual functions overloaded in derived classes.
std::string compiler_plugin_
Just-in-time compiler.
virtual void call_reverse(const std::vector< MX > &arg, const std::vector< MX > &res, const std::vector< std::vector< MX > > &aseed, std::vector< std::vector< MX > > &asens, bool always_inline, bool never_inline) const
Reverse mode, virtual functions overloaded in derived classes.
void alloc_w(size_t sz_w, bool persistent=false)
Ensure required length of w field.
void alloc(const Function &f, bool persistent=false, int num_threads=1)
Ensure work vectors long enough to evaluate function.
const MX mx_in(casadi_int ind) const
Get symbolic primitives equivalent to the input expressions.
const Sparsity & sparsity_out(casadi_int ind) const
Get sparsity of a given output.
casadi_int n_nodes() const
Number of nodes in the algorithm.
casadi_int n_out() const
Get the number of function outputs.
casadi_int n_in() const
Get the number of function inputs.
void generate_lifted(Function &vdef_fcn, Function &vinit_fcn) const
Extract the functions needed for the Lifted Newton method.
casadi_int nnz() const
Get the number of (structural) non-zero elements.
static MX sym(const std::string &name, casadi_int nrow=1, casadi_int ncol=1)
Create an nrow-by-ncol symbolic primitive.
static MX zeros(casadi_int nrow=1, casadi_int ncol=1)
Create a dense matrix or a matrix with specified sparsity with all entries zero.
bool is_null() const
Is a null pointer?
const Sparsity & sparsity() const
Get the sparsity pattern.
static MX jacobian(const MX &f, const MX &x, const Dict &opts=Dict())
NLP solver storage class.
Dict get_stats(void *mem) const override
Get all statistics.
static const Options options_
Options.
void init(const Dict &opts) override
Initialize.
casadi_int ng_
Number of constraints.
int init_mem(void *mem) const override
Initalize memory block.
casadi_int np_
Number of parameters.
casadi_int nx_
Number of variables.
void set_work(void *mem, const double **&arg, double **&res, casadi_int *&iw, double *&w) const override
Set the (persistent) work vectors.
const Function & oracle() const override
Get oracle.
static void registerPlugin(const Plugin &plugin, bool needs_lock=true)
Register an integrator in the factory.
bool verbose_
Verbose printout.
void clear_mem()
Clear all memory (called from destructor)
static const std::string meta_doc
A documentation string.
void eval_mat(ScpgenMemory *m) const
Function exp_fcn_
Step expansion.
void eval_vec(ScpgenMemory *m) const
int init_mem(void *mem) const override
Initalize memory block.
static const Options options_
Options.
Function res_fcn_
Residual function.
Scpgen(const std::string &name, const Function &nlp)
bool gauss_newton_
use Gauss-Newton Hessian
void solve_qp(ScpgenMemory *m) const
bool codegen_
Enable Code generation.
double tol_du_
Tolerance on dual infeasibility.
std::vector< std::string > name_x_
void line_search(ScpgenMemory *m, casadi_int &ls_iter, bool &ls_success) const
bool regularize_
Regularization.
casadi_int merit_memsize_
void regularize(ScpgenMemory *m) const
Function qpsol_
QP solver for the subproblems.
Function vinit_fcn_
Generate initial guess for lifted variables.
double tol_pr_step_
stopping criterion for the stepsize
double tol_pr_
Tolerance on primal infeasibility.
void eval_exp(ScpgenMemory *m) const
int solve(void *mem) const override
casadi_int max_iter_
maximum number of sqp iterations
double primalInfeasibility(ScpgenMemory *m) const
std::vector< casadi_int > print_x_
void printIteration(ScpgenMemory *m, std::ostream &stream) const
void eval_res(ScpgenMemory *m) const
casadi_int lbfgs_memory_
Memory size of L-BFGS method.
Function vec_fcn_
Quadratic approximation.
void set_work(void *mem, const double **&arg, double **&res, casadi_int *&iw, double *&w) const override
Set the (persistent) work vectors.
bool print_time_
Print timers.
static Nlpsol * creator(const std::string &name, const Function &nlp)
Create a new NLP Solver.
Dict get_stats(void *mem) const override
Get all statistics.
double dualInfeasibility(ScpgenMemory *m) const
double tol_reg_
Tolerance on regularization.
void init(const Dict &opts) override
Initialize.
Sparsity T() const
Transpose the matrix.
casadi_int nnz() const
Get the number of (structural) non-zeros.
bool is_dense() const
Is dense?
Function conic(const std::string &name, const std::string &solver, const SpDict &qp, const Dict &opts)
int CASADI_NLPSOL_SCPGEN_EXPORT casadi_register_nlpsol_scpgen(Nlpsol::Plugin *plugin)
T1 casadi_norm_1(casadi_int n, const T1 *x)
NORM_1: ||x||_1 -> return.
@ CONIC_UBA
dense, (nc x 1)
@ CONIC_A
The matrix A: sparse, (nc x n) - product with x must be dense.
@ CONIC_G
The vector g: dense, (n x 1)
@ CONIC_LBA
dense, (nc x 1)
@ CONIC_UBX
dense, (n x 1)
@ CONIC_LBX
dense, (n x 1)
T1 casadi_bilin(const T1 *A, const casadi_int *sp_A, const T1 *x, const T1 *y)
std::string temporary_file(const std::string &prefix, const std::string &suffix)
T1 casadi_sum_viol(casadi_int n, const T1 *x, const T1 *lb, const T1 *ub)
Sum of bound violations.
void casadi_copy(const T1 *x, casadi_int n, T1 *y)
COPY: y <-x.
std::string str(const T &v)
String representation, any type.
GenericType::Dict Dict
C++ equivalent of Python's dict or MATLAB's struct.
void casadi_mtimes(const T1 *x, const casadi_int *sp_x, const T1 *y, const casadi_int *sp_y, T1 *z, const casadi_int *sp_z, T1 *w, casadi_int tr)
Sparse matrix-matrix multiplication: z <- z + x*y.
T1 casadi_dot(casadi_int n, const T1 *x, const T1 *y)
Inner product.
T dot(const std::vector< T > &a, const std::vector< T > &b)
void casadi_scal(casadi_int n, T1 alpha, T1 *x)
SCAL: x <- alpha*x.
void casadi_axpy(casadi_int n, T1 alpha, const T1 *x, T1 *y)
AXPY: y <- a*x + y.
void CASADI_NLPSOL_SCPGEN_EXPORT casadi_load_nlpsol_scpgen()
T1 casadi_norm_inf(casadi_int n, const T1 *x)
void casadi_clear(T1 *x, casadi_int n)
CLEAR: x <- 0.
void casadi_mv(const T1 *x, const casadi_int *sp_x, const T1 *y, T1 *z, casadi_int tr)
Sparse matrix-vector multiplication: z <- z + x*y.
Function external(const std::string &name, const Importer &li, const Dict &opts)
Load a just-in-time compiled external function.
@ CONIC_X
The primal solution.
@ CONIC_LAM_A
The dual solution corresponding to linear bounds.
@ CONIC_LAM_X
The dual solution corresponding to simple bounds.
casadi_nlpsol_data< double > d_nlp
Options metadata for a class.
const char * iteration_note
std::vector< VarMem > lifted_mem