25 #include "sqpmethod.hpp"
27 #include "casadi/core/casadi_misc.hpp"
28 #include "casadi/core/calculus.hpp"
29 #include "casadi/core/conic.hpp"
30 #include "casadi/core/conic_impl.hpp"
31 #include "casadi/core/convexify.hpp"
42 int CASADI_NLPSOL_SQPMETHOD_EXPORT
45 plugin->name =
"sqpmethod";
47 plugin->version = CASADI_VERSION;
70 "The QP solver to be used by the SQP method [qpoases]"}},
73 "Options to be passed to the QP solver"}},
74 {
"hessian_approximation",
76 "limited-memory|exact"}},
79 "Maximum number of SQP iterations"}},
82 "Minimum number of SQP iterations"}},
85 "Maximum number of linesearch iterations"}},
88 "Stopping criterion for primal infeasibility"}},
91 "Stopping criterion for dual infeasability"}},
94 "Armijo condition, coefficient of decrease in merit"}},
97 "Line-search parameter, restoration factor of stepsize"}},
100 "Size of memory to store history of merit function values"}},
103 "Size of L-BFGS memory."}},
106 "Print the header with problem statistics"}},
109 "Print the iterations"}},
112 "Print a status message after solving"}},
115 "The size (inf-norm) of the step size should not become smaller than this."}},
118 "Function for calculating the Hessian of the Lagrangian (autogenerated by default)"}},
121 "Function for calculating the gradient of the objective and Jacobian of the constraints "
122 "(autogenerated by default)"}},
123 {
"convexify_strategy",
125 "NONE|regularize|eigen-reflect|eigen-clip. "
126 "Strategy to convexify the Lagrange Hessian before passing it to the solver."}},
129 "When using a convexification strategy, make sure that "
130 "the smallest eigenvalue is at least this (default: 1e-7)."}},
133 "Maximum number of iterations to compute an eigenvalue decomposition (default: 50)."}},
136 "Enable the elastic mode which is used when the QP is infeasible (default: false)."}},
139 "Starting value for the penalty parameter of elastic mode (default: 1)."}},
142 "Maximum value for the penalty parameter of elastic mode (default: 1e20)."}},
145 "Minimum value for gamma_1 (default: 1e-5)."}},
146 {
"second_order_corrections",
148 "Enable second order corrections. "
149 "These are used when a step is considered bad by the merit function and constraint norm "
150 "(default: false)."}},
153 "Initialize the QP subproblems with a feasible initial value (default: false)."}}
171 std::string hessian_approximation =
"exact";
173 std::string qpsol_plugin =
"qpoases";
185 std::string convexify_strategy =
"none";
186 double convexify_margin = 1e-7;
187 casadi_int max_iter_eig = 200;
190 for (
auto&& op : opts) {
191 if (op.first==
"max_iter") {
193 }
else if (op.first==
"min_iter") {
195 }
else if (op.first==
"max_iter_ls") {
197 }
else if (op.first==
"c1") {
199 }
else if (op.first==
"beta") {
201 }
else if (op.first==
"merit_memory") {
203 }
else if (op.first==
"lbfgs_memory") {
205 }
else if (op.first==
"tol_pr") {
207 }
else if (op.first==
"tol_du") {
209 }
else if (op.first==
"hessian_approximation") {
210 hessian_approximation = op.second.to_string();
211 }
else if (op.first==
"min_step_size") {
213 }
else if (op.first==
"qpsol") {
214 qpsol_plugin = op.second.to_string();
215 }
else if (op.first==
"qpsol_options") {
216 qpsol_options = op.second;
217 }
else if (op.first==
"print_header") {
219 }
else if (op.first==
"print_iteration") {
221 }
else if (op.first==
"print_status") {
223 }
else if (op.first==
"hess_lag") {
225 casadi_assert_dev(f.
n_in()==4);
226 casadi_assert_dev(f.
n_out()==1);
228 }
else if (op.first==
"jac_fg") {
230 casadi_assert_dev(f.
n_in()==2);
231 casadi_assert_dev(f.
n_out()==4);
233 }
else if (op.first==
"convexify_strategy") {
234 convexify_strategy = op.second.to_string();
235 }
else if (op.first==
"convexify_margin") {
236 convexify_margin = op.second;
237 }
else if (op.first==
"max_iter_eig") {
238 max_iter_eig = op.second;
239 }
else if (op.first==
"elastic_mode") {
241 }
else if (op.first==
"gamma_0") {
243 }
else if (op.first==
"gamma_max") {
245 }
else if (op.first==
"gamma_1_min") {
247 }
else if (op.first==
"second_order_corrections") {
249 }
else if (op.first==
"init_feasible") {
255 auto it = qpsol_options.find(
"error_on_fail");
256 if (it==qpsol_options.end()) {
257 qpsol_options[
"error_on_fail"] =
false;
259 casadi_assert(!it->second,
260 "QP solver with setting error_on_fail is incompatible with elastic mode sqpmethod.");
275 {
"f",
"grad:f:x",
"g",
"jac:g:x"});
282 {
"hess:gamma:x:x"}, {{
"gamma", {
"f",
"g"}}});
286 if (convexify_strategy!=
"none") {
289 opts[
"strategy"] = convexify_strategy;
290 opts[
"margin"] = convexify_margin;
291 opts[
"max_iter_eig"] = max_iter_eig;
299 casadi_assert(!qpsol_plugin.empty(),
"'qpsol' option has not been set");
309 std::vector<casadi_int> n_v =
range(
nx_);
317 Dict qpsol_ela_options =
Dict(qpsol_options);
319 casadi_assert(!qpsol_plugin.empty(),
"'qpsol' option has not been set");
320 qpsol_ela_ =
conic(
"qpsol_ela", qpsol_plugin, {{
"h", Hsp_ela}, {
"a", Asp_ela}},
333 print(
"-------------------------------------------\n");
334 print(
"This is casadi::Sqpmethod.\n");
336 print(
"Using exact Hessian\n");
338 print(
"Using limited memory BFGS Hessian approximation\n");
340 print(
"Number of variables: %9d\n",
nx_);
341 print(
"Number of constraints: %9d\n",
ng_);
342 print(
"Number of nonzeros in constraint Jacobian: %9d\n",
Asp_.
nnz());
343 print(
"Number of nonzeros in Lagrangian Hessian: %9d\n",
Hsp_.
nnz());
348 set_sqpmethod_prob();
360 void Sqpmethod::set_sqpmethod_prob() {
369 casadi_int*& iw,
double*& w)
const {
388 m->add_stat(
"linesearch");
401 auto d_nlp = &m->
d_nlp;
408 casadi_int ls_iter = 0;
411 bool ls_success =
true;
414 bool so_succes =
false;
425 const double one = 1.;
428 std::string
info =
"";
431 double gamma_1 = 0.0;
434 casadi_int ela_it = -1;
441 m->arg[0] = d_nlp->z;
442 m->arg[1] = d_nlp->p;
443 m->res[0] = &d_nlp->objective;
445 m->res[2] = d_nlp->z +
nx_;
449 m->return_status =
"Non_Regular_Sensitivities";
452 print(
"MESSAGE(sqpmethod): No regularity of sensitivities at current point.\n");
476 print_iteration(m->iter_count, d_nlp->objective, pr_inf, du_inf, dx_norminf,
477 m->reg, ls_iter, ls_success, so_succes,
info);
485 m->return_status =
"User_Requested_Stop";
492 print(
"MESSAGE(sqpmethod): Convergence achieved after %d iterations\n", m->iter_count);
493 m->return_status =
"Solve_Succeeded";
499 if (
print_status_)
print(
"MESSAGE(sqpmethod): Maximum number of iterations reached.\n");
500 m->return_status =
"Maximum_Iterations_Exceeded";
506 if (
print_status_)
print(
"MESSAGE(sqpmethod): Search direction becomes too small without "
507 "convergence criteria being met.\n");
508 m->return_status =
"Search_Direction_Becomes_Too_Small";
514 m->arg[0] = d_nlp->z;
515 m->arg[1] = d_nlp->p;
517 m->arg[3] = d_nlp->lam +
nx_;
524 }
else if (m->iter_count==0) {
528 casadi_bfgs_reset(
Hsp_, d->Bk);
534 casadi_bfgs(
Hsp_, d->Bk, d->dx, d->gLag, d->gLag_old, m->w);
559 int ret =
solve_QP(m, d->Bk, d->gf, d->lbdz, d->ubdz, d->Jk, d->dx, d->dlam, 0);
571 pr_inf, du_inf, dx_norminf, &
info, 0);
574 }
else if (ela_it == -1) {
578 if (pi_inf > gamma_1) {
581 pr_inf, du_inf, dx_norminf, &
info, 0);
594 double l1_infeas, l1;
601 l1 = d_nlp->objective + m->sigma * l1_infeas;
605 double l1_infeas_cand, l1_cand, fk_cand;
613 m->arg[0] = d->z_cand;
614 m->arg[1] = d_nlp->p;
615 m->res[0] = &fk_cand;
616 m->res[1] = d->z_cand +
nx_;
621 l1_cand = fk_cand + m->sigma*l1_infeas_cand;
625 if (
so_corr_ && l1_cand > l1 && l1_infeas_cand > l1_infeas) {
654 for (casadi_int i = 0; i <
nx_; ++i) {
655 if (d->lbdz[i] > 0) d->dx[i] = d->lbdz[i];
656 else if (d->ubdz[i] < 0) d->dx[i] = d->ubdz[i];
661 ret =
solve_QP(m, d->Bk, d->gf, d->lbdz, d->ubdz, d->Jk,
673 pr_inf, du_inf, dx_norminf, &
info, 1);
684 double l1_cand_norm = l1_cand;
692 m->arg[0] = d->z_cand;
693 m->arg[1] = d_nlp->p;
694 m->res[0] = &fk_cand;
695 m->res[1] = d->z_cand +
nx_;
700 l1_cand_soc = fk_cand + m->sigma*l1_infeas_cand;
703 if (l1_cand_norm < l1_cand_soc) {
727 double tl1 =
casadi_dot(
nx_, d->dx, d->gf) - m->sigma*l1_infeas;
730 d->merit_mem[m->merit_ind] = l1;
748 m->arg[0] = d->z_cand;
749 m->arg[1] = d_nlp->p;
750 m->res[0] = &fk_cand;
751 m->res[1] = d->z_cand +
nx_;
767 if (l1_cand <= l1 + t *
c1_ * tl1) {
812 print(
"%4s %14s %9s %9s %9s %7s %2s %7s\n",
"iter",
"objective",
"inf_pr",
813 "inf_du",
"||d||",
"lg(rg)",
"ls",
"info");
817 double pr_inf,
double du_inf,
818 double dx_norm,
double rg,
819 casadi_int ls_trials,
bool ls_success,
820 bool so_succes, std::string info)
const {
821 print(
"%4d %14.6e %9.2e %9.2e %9.2e ", iter, obj, pr_inf, du_inf, dx_norm);
823 print(
"%7.2f ", log10(rg));
828 print(
"%2d", ls_trials);
845 const double* lbdz,
const double* ubdz,
const double* A,
846 double* x_opt,
double* dlam,
int mode)
const {
874 if (!m_qpsol->d_qp.success) {
886 const double* lbdz,
const double* ubdz,
const double* A,
887 double* x_opt,
double* dlam)
const {
915 if (!m_qpsol_ela->d_qp.success) {
927 casadi_int* ela_it,
double gamma_1,
928 casadi_int ls_iter,
bool ls_success,
bool so_succes,
double pr_inf,
929 double du_inf,
double dx_norminf, std::string* info,
int mode)
const {
930 auto d_nlp = &m->
d_nlp;
933 if (mode != 0 && mode != 1) casadi_error(
"Wrong mode provided to solve_elastic_mode.");
937 if (mode == 0) (*ela_it)++;
940 double *temp_1, *temp_2;
949 temp_1 = d->lbdz +
nx_;
950 temp_2 = d->lbdz +
nx_+2*
ng_;
954 temp_1 = d->ubdz +
nx_;
955 temp_2 = d->ubdz +
nx_+2*
ng_;
960 gamma = pow(10, *ela_it * (*ela_it - 1) / 2) * gamma_1;
966 casadi_error(
"Error in elastic mode of QP solver."
967 "Gamma became larger than gamma_max.");
974 m->
reg, ls_iter, ls_success, so_succes, *
info);
978 temp_1 = d->gf +
nx_;
989 for (casadi_int i = 0; i <
nx_; ++i) {
990 if (d->lbdz[i] > 0) d->dx[i] = d->lbdz[i];
991 else if (d->ubdz[i] < 0) d->dx[i] = d->ubdz[i];
996 for (casadi_int i = 0; i <
ng_; ++i) {
997 if (d->ubdz[
nx_+2*
ng_+i]-d->temp_mem[i] < 0) {
998 d->dx[
nx_+i] = -d->ubdz[
nx_+2*
ng_+i]+d->temp_mem[i];
1001 if (d->lbdz[
nx_+2*
ng_+i]-d->temp_mem[i] > 0) {
1002 d->dx[
nx_+
ng_+i] = d->lbdz[
nx_+2*
ng_+i]-d->temp_mem[i];
1008 int ret =
solve_ela_QP(m, d->Bk, d->gf, d->lbdz, d->ubdz, d->Jk, d->dx, d->dlam);
1010 if (mode == 0) *
info =
"Elastic mode QP (gamma = " +
str(gamma) +
")";
1041 g.
local(
"d",
"struct casadi_sqpmethod_data*");
1043 g.
local(
"p",
"struct casadi_sqpmethod_prob");
1045 g <<
"d->prob = &p;\n";
1050 g <<
"p.nlp = &p_nlp;\n";
1051 g <<
"casadi_sqpmethod_init(d, &arg, &res, &iw, &w, "
1055 g.
local(
"gamma_1",
"double");
1056 g.
local(
"ela_it",
"casadi_int");
1058 g.
local(
"temp_norm",
"double");
1060 g.
local(
"ret",
"int");
1062 g.
local(
"iter_count",
"casadi_int");
1067 g.
local(
"sigma",
"casadi_real");
1071 g.
local(
"ls_iter",
"casadi_int");
1073 g.
local(
"t",
"casadi_real");
1078 g.
local(
"ls_success",
"casadi_int");
1082 g << g.
clear(
"d->dx",
nx_) <<
"\n";
1083 g.
comment(
"MAIN OPTIMIZATION LOOP");
1084 g <<
"while (1) {\n";
1085 g.
comment(
"Evaluate f, g and first order derivative information");
1086 g <<
"d->arg[0] = d_nlp.z;\n";
1087 g <<
"d->arg[1] = d_nlp.p;\n";
1088 g <<
"d->res[0] = &d_nlp.objective;\n";
1089 g <<
"d->res[1] = d->gf;\n";
1090 g <<
"d->res[2] = d_nlp.z+" +
str(
nx_) +
";\n";
1091 g <<
"d->res[3] = d->Jk;\n";
1092 std::string nlp_jac_fg = g(
get_function(
"nlp_jac_fg"),
"d->arg",
"d->res",
"d->iw",
"d->w");
1093 g <<
"if (" + nlp_jac_fg +
") return 1;\n";
1094 g.
comment(
"Evaluate the gradient of the Lagrangian");
1095 g << g.
copy(
"d->gf",
nx_,
"d->gLag") <<
"\n";
1096 g << g.
mv(
"d->Jk",
Asp_,
"d_nlp.lam+"+
str(
nx_),
"d->gLag",
true) <<
"\n";
1097 g << g.
axpy(
nx_,
"1.0",
"d_nlp.lam",
"d->gLag") <<
"\n";
1098 g.
comment(
"Primal infeasability");
1099 g.
local(
"pr_inf",
"casadi_real");
1100 g <<
"pr_inf = " << g.
max_viol(
nx_+
ng_,
"d_nlp.z",
"d_nlp.lbz",
"d_nlp.ubz") <<
";\n";
1101 g.
comment(
"inf-norm of lagrange gradient");
1102 g.
local(
"du_inf",
"casadi_real");
1103 g <<
"du_inf = " << g.
norm_inf(
nx_,
"d->gLag") <<
";\n";
1104 g.
comment(
"inf-norm of step");
1105 g.
local(
"dx_norminf",
"casadi_real");
1106 g <<
"dx_norminf = " << g.
norm_inf(
nx_,
"d->dx") <<
";\n";
1107 g.
comment(
"Checking convergence criteria");
1109 " && du_inf < " <<
tol_du_ <<
") break;\n";
1110 g <<
"if (iter_count >= " <<
max_iter_ <<
") break;\n";
1111 g <<
"if (iter_count >= 1 && iter_count >= " <<
min_iter_ <<
" && dx_norminf <= " <<
1114 g.
comment(
"Update/reset exact Hessian");
1115 g <<
"d->arg[0] = d_nlp.z;\n";
1116 g <<
"d->arg[1] = d_nlp.p;\n";
1117 g.
local(
"one",
"const casadi_real");
1119 g <<
"d->arg[2] = &one;\n";
1120 g <<
"d->arg[3] = d_nlp.lam+" +
str(
nx_) +
";\n";
1121 g <<
"d->res[0] = d->Bk;\n";
1122 std::string nlp_hess_l = g(
get_function(
"nlp_hess_l"),
"d->arg",
"d->res",
"d->iw",
"d->w");
1123 g <<
"if (" + nlp_hess_l +
") return 1;\n";
1127 g <<
"if (" << ret <<
") return 1;\n";
1130 g <<
"if (iter_count==0) {\n";
1133 g <<
"casadi_bfgs_reset(p.sp_h, d->Bk);\n";
1137 g <<
"casadi_bfgs_reset(p.sp_h, d->Bk);\n";
1138 g.
comment(
"Update the Hessian approximation");
1139 g <<
"casadi_bfgs(p.sp_h, d->Bk, d->dx, d->gLag, d->gLag_old, d->w);\n";
1143 g.
comment(
"Formulate the QP");
1144 g << g.
copy(
"d_nlp.lbz",
nx_+
ng_,
"d->lbdz") <<
"\n";
1145 g << g.
axpy(
nx_+
ng_,
"-1.0",
"d_nlp.z",
"d->lbdz") <<
"\n";
1146 g << g.
copy(
"d_nlp.ubz",
nx_+
ng_,
"d->ubdz") <<
"\n";
1147 g << g.
axpy(
nx_+
ng_,
"-1.0",
"d_nlp.z",
"d->ubdz") <<
"\n";
1149 g << g.
copy(
"d_nlp.lam",
nx_+
ng_,
"d->dlam") <<
"\n";
1150 g << g.
clear(
"d->dx",
nx_) <<
"\n";
1153 g.
comment(
"Make initial guess feasible");
1154 g <<
"for (casadi_int i = 0; i < " <<
nx_ <<
"; ++i) {\n";
1155 g <<
"if (d->lbdz[i] > 0) d->dx[i] = d->lbdz[i];\n";
1156 g <<
"else if (d->ubdz[i] < 0) d->dx[i] = d->ubdz[i];\n";
1160 g.
comment(
"Increase counter");
1161 g <<
"iter_count++;\n";
1163 codegen_qp_solve(g,
"d->Bk",
"d->gf",
"d->lbdz",
"d->ubdz",
"d->Jk",
"d->dx",
"d->dlam", 0);
1166 g.
comment(
"Elastic mode calculations");
1167 g <<
"if (ret == " << 0 <<
") {\n";
1168 g <<
"ela_it = -1;\n";
1170 g <<
"if (ela_it == -1) {\n";
1171 g <<
"ela_it = 0;\n";
1177 g <<
"} else if (ela_it == -1) {\n";
1180 g <<
"if (pi_inf > gamma_1) {\n";
1181 g <<
"ela_it = 0;\n";
1189 g.
comment(
"Calculate penalty parameter of merit function");
1190 g <<
"sigma = " << g.
fmax(
"sigma",
"(1.01*" + g.
norm_inf(
nx_+
ng_,
"d->dlam")+
")") <<
";\n";
1191 g.
comment(
"Calculate L1-merit function in the actual iterate");
1192 g.
local(
"l1_infeas",
"casadi_real");
1193 g <<
"l1_infeas = " << g.
sum_viol(
nx_+
ng_,
"d_nlp.z",
"d_nlp.lbz",
"d_nlp.ubz") <<
";\n";
1194 g.
local(
"l1",
"casadi_real");
1195 g <<
"l1 = d_nlp.objective + sigma * l1_infeas;\n";
1198 g.
local(
"l1_infeas_cand",
"casadi_real");
1200 g.
local(
"l1_cand",
"casadi_real");
1201 g.
local(
"fk_cand",
"casadi_real");
1202 g.
comment(
"Take candidate step");
1203 g << g.
copy(
"d_nlp.z",
nx_,
"d->z_cand") <<
";\n";
1204 g << g.
axpy(
nx_,
"1.",
"d->dx",
"d->z_cand") <<
";\n";
1206 g.
comment(
"Evaluate objective and constraints");
1207 g <<
"d->arg[0] = d->z_cand;\n;";
1208 g <<
"d->arg[1] = d_nlp.p;\n;";
1209 g <<
"d->res[0] = &fk_cand;\n;";
1210 g <<
"d->res[1] = d->z_cand+" +
str(
nx_) +
";\n;";
1211 std::string nlp_fg = g(
get_function(
"nlp_fg"),
"d->arg",
"d->res",
"d->iw",
"d->w");
1212 g <<
"if (" << nlp_fg <<
") {\n";
1213 g <<
"l1_cand = -casadi_inf;\n";
1215 g <<
"l1_infeas_cand = " << g.
sum_viol(
nx_+
ng_,
"d->z_cand",
"d_nlp.lbz",
"d_nlp.ubz") <<
";\n";
1216 g <<
"l1_cand = fk_cand + sigma*l1_infeas_cand;\n";
1219 g <<
"if (l1_cand > l1 && l1_infeas_cand > l1_infeas) {\n";
1220 g.
comment(
"Copy in case of fail");
1221 g << g.
copy(
"d->dx",
nx_,
"d->temp_sol") <<
"\n";
1224 g.
comment(
"Add gradient times proposal step to bounds");
1226 g << g.
mv(
"d->Jk",
Asp_,
"d->dx",
"d->lbdz+" +
str(
nx_),
false) <<
";\n";
1227 g << g.
copy(
"d->lbdz",
nx_+
ng_,
"d->ubdz") <<
";\n";
1230 g << g.
axpy(
nx_+
ng_,
"1.",
"d_nlp.lbz",
"d->lbdz") <<
";\n";
1231 g << g.
axpy(
nx_+
ng_,
"1.",
"d_nlp.ubz",
"d->ubdz") <<
";\n";
1233 g.
comment(
"Subtract constraints in candidate step from bounds");
1234 g << g.
axpy(
nx_,
"-1.",
"d_nlp.z",
"d->lbdz") <<
";\n";
1236 g << g.
axpy(
nx_,
"-1.",
"d_nlp.z",
"d->ubdz") <<
";\n";
1241 g << g.
copy(
"d_nlp.lam",
nx_+
ng_,
"d->dlam") <<
"\n";
1245 g.
comment(
"Make initial guess feasible");
1246 g <<
"for (casadi_int i = 0; i < " <<
nx_ <<
"; ++i) {\n";
1247 g <<
"if (d->lbdz[i] > 0) d->dx[i] = d->lbdz[i];\n";
1248 g <<
"else if (d->ubdz[i] < 0) d->dx[i] = d->ubdz[i];\n";
1252 codegen_qp_solve(g,
"d->Bk",
"d->gf",
"d->lbdz",
"d->ubdz",
"d->Jk",
"d->dx",
"d->dlam", 1);
1255 g.
comment(
"Second order corrections without elastic mode");
1256 g <<
"if (ela_it == -1) {\n";
1259 g << g.
copy(
"d_nlp.lam",
nx_+
ng_,
"d->dlam") <<
"\n";
1263 g.
comment(
"Make initial guess feasible");
1264 g <<
"for (casadi_int i = 0; i < " <<
nx_ <<
"; ++i) {\n";
1265 g <<
"if (d->lbdz[i] > 0) d->dx[i] = d->lbdz[i];\n";
1266 g <<
"else if (d->ubdz[i] < 0) d->dx[i] = d->ubdz[i];\n";
1270 codegen_qp_solve(g,
"d->Bk",
"d->gf",
"d->lbdz",
"d->ubdz",
"d->Jk",
"d->dx",
"d->dlam", 1);
1273 g.
comment(
"Second order corrections in elastic mode");
1275 g <<
"if (ela_it == -1) {\n";
1276 g <<
"ela_it = 0;\n";
1283 g.
comment(
"Fallback on previous solution if the second order correction failed");
1284 g <<
"if (ret != " << 0 <<
") {\n";
1285 g << g.
copy(
"d->temp_sol",
nx_,
"d->dx") <<
"\n";
1288 g.
comment(
"Check if corrected step is better than the original one using the merit function");
1289 g <<
"double l1_cand_norm = l1_cand;\n";
1290 g <<
"double l1_cand_soc;\n";
1292 g.
comment(
"Take candidate step");
1293 g << g.
copy(
"d_nlp.z",
nx_,
"d->z_cand") <<
"\n";
1294 g << g.
axpy(
nx_,
"1.",
"d->dx",
"d->z_cand") <<
"\n";
1296 g.
comment(
"Evaluate objective and constraints");
1297 g <<
"d->arg[0] = d->z_cand;\n;";
1298 g <<
"d->arg[1] = d_nlp.p;\n;";
1299 g <<
"d->res[0] = &fk_cand;\n;";
1300 g <<
"d->res[1] = d->z_cand+" +
str(
nx_) +
";\n;";
1301 nlp_fg = g(
get_function(
"nlp_fg"),
"d->arg",
"d->res",
"d->iw",
"d->w");
1302 g <<
"if (" << nlp_fg <<
") {\n";
1303 g <<
"l1_cand_soc = casadi_inf;\n";
1305 g <<
"l1_infeas_cand = " << g.
sum_viol(
nx_+
ng_,
"d->z_cand",
"d_nlp.lbz",
"d_nlp.ubz") <<
";\n";
1306 g <<
"l1_cand_soc = fk_cand + sigma*l1_infeas_cand;\n";
1309 g <<
"if (l1_cand_norm < l1_cand_soc) {\n";
1310 g.
comment(
"Copy normal step if merit function increases");
1311 g << g.
copy(
"d->temp_sol",
nx_,
"d->dx") <<
"\n";
1319 g.
comment(
"Detecting indefiniteness");
1320 g.
comment(
"Right-hand side of Armijo condition");
1321 g.
local(
"F_sens",
"casadi_real");
1322 g <<
"F_sens = " << g.
dot(
nx_,
"d->dx",
"d->gf") <<
";\n";
1323 g.
local(
"tl1",
"casadi_real");
1324 g <<
"tl1 = F_sens - sigma * l1_infeas;\n";
1335 g.
local(
"fk_cand",
"casadi_real");
1336 g.
comment(
"Merit function value in candidate");
1337 g.
local(
"l1_cand",
"casadi_real");
1338 g <<
"l1_cand = 0.0;\n";
1339 g.
comment(
"Reset line-search counter, success marker");
1340 g <<
"ls_iter = 0;\n";
1342 g.
comment(
"Line-search loop");
1343 g <<
"while (1) {\n";
1344 g.
comment(
" Increase counter");
1345 g <<
"ls_iter++;\n";
1348 g << g.
copy(
"d_nlp.z",
nx_,
"d->z_cand") <<
"\n";
1349 g << g.
axpy(
nx_,
"t",
"d->dx",
"d->z_cand") <<
"\n";
1350 g.
comment(
"Evaluating objective and constraints");
1351 g <<
"d->arg[0] = d->z_cand;\n";
1352 g <<
"d->arg[1] = d_nlp.p;\n";
1353 g <<
"d->res[0] = &fk_cand;\n";
1354 g <<
"d->res[1] = d->z_cand+" +
str(
nx_) +
";\n";
1355 std::string nlp_fg = g(
get_function(
"nlp_fg"),
"d->arg",
"d->res",
"d->iw",
"d->w");
1356 g <<
"if (" << nlp_fg <<
") {\n";
1357 g.
comment(
"Avoid infinite recursion");
1362 g.
comment(
"line-search failed, skip iteration");
1363 g <<
"t = " <<
beta_ <<
"* t;\n";
1367 g.
comment(
"Calculating merit-function in candidate");
1368 g <<
"l1_cand = fk_cand + sigma * "
1369 << g.
sum_viol(
nx_+
ng_,
"d->z_cand",
"d_nlp.lbz",
"d_nlp.ubz") +
";\n";
1370 g <<
"if (l1_cand <= l1 + t * " <<
c1_ <<
"* tl1) {\n";
1373 g.
comment(
"Line-search not successful, but we accept it.");
1379 g <<
"t = " <<
beta_ <<
"* t;\n";
1381 g.
comment(
"Candidate accepted, update dual variables");
1382 g << g.
scal(
nx_+
ng_,
"1-t",
"d_nlp.lam") <<
"\n";
1383 g << g.
axpy(
nx_+
ng_,
"t",
"d->dlam",
"d_nlp.lam") <<
"\n";
1384 g << g.
scal(
nx_,
"t",
"d->dx") <<
"\n";
1387 g << g.
copy(
"d->dlam",
nx_ +
ng_,
"d_nlp.lam") <<
"\n";
1391 g << g.
axpy(
nx_,
"1.0",
"d->dx",
"d_nlp.z") <<
"\n";
1394 g.
comment(
"Evaluate the gradient of the Lagrangian with the old x but new lam (for BFGS)");
1395 g << g.
copy(
"d->gf",
nx_,
"d->gLag_old") <<
"\n";
1396 g << g.
mv(
"d->Jk",
Asp_,
"d_nlp.lam+"+
str(
nx_),
"d->gLag_old",
true) <<
"\n";
1397 g << g.
axpy(
nx_,
"1.0",
"d_nlp.lam",
"d->gLag_old") <<
"\n";
1401 g.
comment(
"If linesearch failed enter elastic mode");
1402 g <<
"if (ls_success == 0 && ela_it == -1) {\n";
1403 g <<
"ela_it = 0;\n";
1411 const std::string& lbdz,
const std::string& ubdz,
1412 const std::string& A,
const std::string& x_opt,
const std::string& dlam,
int mode)
const {
1413 for (casadi_int i=0;i<
qpsol_.
n_in();++i) cg <<
"d->arg[" << i <<
"] = 0;\n";
1414 cg <<
"d->arg[" <<
CONIC_H <<
"] = " << H <<
";\n";
1415 cg <<
"d->arg[" <<
CONIC_G <<
"] = " << g <<
";\n";
1416 cg <<
"d->arg[" <<
CONIC_X0 <<
"] = " << x_opt <<
";\n";
1417 cg <<
"d->arg[" <<
CONIC_LAM_X0 <<
"] = " << dlam <<
";\n";
1418 cg <<
"d->arg[" <<
CONIC_LAM_A0 <<
"] = " << dlam <<
"+" <<
nx_ <<
";\n";
1419 cg <<
"d->arg[" <<
CONIC_LBX <<
"] = " << lbdz <<
";\n";
1420 cg <<
"d->arg[" <<
CONIC_UBX <<
"] = " << ubdz <<
";\n";
1421 cg <<
"d->arg[" <<
CONIC_A <<
"] = " << A <<
";\n";
1422 cg <<
"d->arg[" <<
CONIC_LBA <<
"] = " << lbdz <<
"+" <<
nx_ <<
";\n";
1423 cg <<
"d->arg[" <<
CONIC_UBA <<
"] = " << ubdz <<
"+" <<
nx_ <<
";\n";
1424 for (casadi_int i=0;i<
qpsol_.
n_out();++i) cg <<
"d->res[" << i <<
"] = 0;\n";
1425 cg <<
"d->res[" <<
CONIC_X <<
"] = " << x_opt <<
";\n";
1426 cg <<
"d->res[" <<
CONIC_LAM_X <<
"] = " << dlam <<
";\n";
1427 cg <<
"d->res[" <<
CONIC_LAM_A <<
"] = " << dlam <<
"+" <<
nx_ <<
";\n";
1428 std::string flag = cg(
qpsol_,
"d->arg",
"d->res",
"d->iw",
"d->w");
1429 cg <<
"ret = " << flag <<
";\n";
1430 cg <<
"if (ret == -1000) return -1000;\n";
1434 const std::string& g,
const std::string& lbdz,
const std::string& ubdz,
1435 const std::string& A,
const std::string& x_opt,
const std::string& dlam)
const {
1436 for (casadi_int i=0;i<
qpsol_ela_.
n_in();++i) cg <<
"d->arg[" << i <<
"] = 0;\n";
1437 cg <<
"d->arg[" <<
CONIC_H <<
"] = " << H <<
";\n";
1438 cg <<
"d->arg[" <<
CONIC_G <<
"] = " << g <<
";\n";
1439 cg <<
"d->arg[" <<
CONIC_X0 <<
"] = " << x_opt <<
";\n";
1440 cg <<
"d->arg[" <<
CONIC_LAM_X0 <<
"] = " << dlam <<
";\n";
1442 cg <<
"d->arg[" <<
CONIC_LBX <<
"] = " << lbdz <<
";\n";
1443 cg <<
"d->arg[" <<
CONIC_UBX <<
"] = " << ubdz <<
";\n";
1444 cg <<
"d->arg[" <<
CONIC_A <<
"] = " << A <<
";\n";
1445 cg <<
"d->arg[" <<
CONIC_LBA <<
"] = " << lbdz <<
"+" <<
nx_+2*
ng_ <<
";\n";
1446 cg <<
"d->arg[" <<
CONIC_UBA <<
"] = " << ubdz <<
"+" <<
nx_+2*
ng_ <<
";\n";
1447 for (casadi_int i=0;i<
qpsol_.
n_out();++i) cg <<
"d->res[" << i <<
"] = 0;\n";
1448 cg <<
"d->res[" <<
CONIC_X <<
"] = " << x_opt <<
";\n";
1449 cg <<
"d->res[" <<
CONIC_LAM_X <<
"] = " << dlam <<
";\n";
1451 std::string flag = cg(
qpsol_ela_,
"d->arg",
"d->res",
"d->iw",
"d->w");
1452 cg <<
"ret = " << flag <<
";\n";
1453 cg <<
"if (ret == -1000) return -1000;\n";
1457 cg <<
"double gamma = 0.;\n";
1459 if (mode == 0) cg <<
"ela_it++;\n";
1461 cg.
comment(
"Temp datastructs for data copy");
1462 cg <<
"double *temp_1, *temp_2;\n";
1464 cg.
comment(
"Make larger jacobian (has 2 extra diagonal matrices with -1 and 1 respectively)");
1465 cg <<
"temp_1 = d->Jk + " <<
Asp_.
nnz() <<
";\n";
1466 cg << cg.
fill(
"temp_1",
ng_,
"-1.") <<
";\n";
1467 cg <<
"temp_1 += " <<
ng_ <<
";\n";
1468 cg << cg.
fill(
"temp_1",
ng_,
"1.") <<
";\n";
1470 cg.
comment(
"Initialize bounds");
1471 cg <<
"temp_1 = d->lbdz + " <<
nx_ <<
";\n";
1472 cg <<
"temp_2 = d->lbdz + " <<
nx_ + 2*
ng_ <<
";\n";
1473 cg << cg.
copy(
"temp_1",
ng_,
"temp_2") <<
";\n";
1474 cg << cg.
clear(
"temp_1", 2*
ng_) <<
";\n";
1475 cg <<
"temp_1 = d->ubdz + " <<
nx_ <<
";\n";
1476 cg <<
"temp_2 = d->ubdz + " <<
nx_ + 2*
ng_ <<
";\n";
1477 cg << cg.
copy(
"temp_1",
ng_,
"temp_2") <<
";\n";
1480 cg <<
"if (ela_it > 1) {\n";
1481 cg <<
"gamma = pow(10, ela_it*(ela_it-1)/2)*gamma_1;\n";
1483 cg <<
"gamma = gamma_1;\n";
1485 cg <<
"if (gamma > " <<
gamma_max_ <<
") " <<
"return -1" <<
";\n";
1487 cg.
comment(
"Make larger gradient (has gamma for slack variables)");
1488 cg <<
"temp_1 = d->gf + " <<
nx_ <<
";\n";
1489 cg << cg.
fill(
"temp_1", 2*
ng_,
"gamma") <<
";\n";
1493 cg << cg.
copy(
"d_nlp.lam",
nx_,
"d->dlam") <<
"\n";
1498 cg.
comment(
"Make initial guess feasible on x values");
1499 cg <<
"for (casadi_int i = 0; i < " <<
nx_ <<
"; ++i) {\n";
1500 cg <<
"if (d->lbdz[i] > 0) d->dx[i] = d->lbdz[i];\n";
1501 cg <<
"else if (d->ubdz[i] < 0) d->dx[i] = d->ubdz[i];\n";
1504 cg.
comment(
"Make initial guess feasible on constraints by altering slack variables");
1505 cg << cg.
mv(
"d->Jk",
Asp_,
"d->dx",
"d->temp_mem",
false) <<
"\n";
1506 cg <<
"for (casadi_int i = 0; i < " <<
ng_ <<
"; ++i) {\n";
1507 cg <<
"if (d->ubdz[" <<
nx_+2*
ng_ <<
"+i]-d->temp_mem[i] < 0) {\n";
1508 cg <<
"d->dx[" <<
nx_ <<
"+i] = -d->ubdz[" <<
nx_+2*
ng_ <<
"+i]+d->temp_mem[i];\n";
1511 cg <<
"if (d->lbdz[" <<
nx_+2*
ng_ <<
"+i]-d->temp_mem[i] > 0) {\n";
1512 cg <<
"d->dx[" <<
nx_+
ng_ <<
"+i] = d->lbdz[" <<
nx_+2*
ng_ <<
"+i]-d->temp_mem[i];\n";
1520 cg.
comment(
"Copy constraint dlam to the right place");
1534 stats[
"iter_count"] = m->iter_count;
1539 int version = s.
version(
"Sqpmethod", 1, 3);
1579 s.
unpack(
"Sqpmethod::Hrsp", Hrsp);
1583 double convexify_margin;
1584 s.
unpack(
"Sqpmethod::convexify_margin", convexify_margin);
1585 char convexify_strategy;
1586 s.
unpack(
"Sqpmethod::convexify_strategy", convexify_strategy);
1587 casadi_assert(convexify_strategy==0,
"deserializtion failed.");
1589 s.
unpack(
"Sqpmethod::Hsp_project", Hsp_project);
1591 s.
unpack(
"Sqpmethod::scc_transform", scc_transform);
1592 std::vector<casadi_int> scc_offset;
1593 s.
unpack(
"Sqpmethod::scc_offset", scc_offset);
1594 std::vector<casadi_int> scc_mapping;
1595 s.
unpack(
"Sqpmethod::scc_mapping", scc_mapping);
1596 casadi_int max_iter_eig;
1597 s.
unpack(
"Sqpmethod::max_iter_eig", max_iter_eig);
1598 casadi_int block_size;
1599 s.
unpack(
"Sqpmethod::block_size", block_size);
1601 s.
unpack(
"Sqpmethod::scc_sp", scc_sp);
1608 set_sqpmethod_prob();
Helper class for C code generation.
std::string fill(const std::string &res, std::size_t n, const std::string &v)
Create a fill operation.
std::string axpy(casadi_int n, const std::string &a, const std::string &x, const std::string &y)
Codegen axpy: y += a*x.
std::string add_dependency(const Function &f)
Add a function dependency.
std::string copy(const std::string &arg, std::size_t n, const std::string &res)
Create a copy operation.
void comment(const std::string &s)
Write a comment line (ignored if not verbose)
std::string constant(const std::vector< casadi_int > &v)
Represent an array constant; adding it when new.
std::string scal(casadi_int n, const std::string &alpha, const std::string &x)
What does scal do??
std::string sum_viol(casadi_int n, const std::string &x, const std::string &lb, const std::string &ub)
sum_viol
std::string mv(const std::string &x, const Sparsity &sp_x, const std::string &y, const std::string &z, bool tr)
Codegen sparse matrix-vector multiplication.
void local(const std::string &name, const std::string &type, const std::string &ref="")
Declare a local variable.
std::string norm_inf(casadi_int n, const std::string &x)
norm_inf
void init_local(const std::string &name, const std::string &def)
Specify the default value for a local variable.
std::string dot(casadi_int n, const std::string &x, const std::string &y)
Codegen inner product.
std::string max_viol(casadi_int n, const std::string &x, const std::string &lb, const std::string &ub)
max_viol
std::string convexify_eval(const ConvexifyData &d, const std::string &Hin, const std::string &Hout, const std::string &iw, const std::string &w)
convexify
std::string sparsity(const Sparsity &sp, bool canonical=true)
std::string fmax(const std::string &x, const std::string &y)
fmax
std::string clear(const std::string &res, std::size_t n)
Create a fill operation.
void add_auxiliary(Auxiliary f, const std::vector< std::string > &inst={"casadi_real"})
Add a built-in auxiliary function.
static void serialize(SerializingStream &s, const std::string &prefix, const ConvexifyData &d)
static Sparsity setup(ConvexifyData &d, const Sparsity &H, const Dict &opts=Dict(), bool inplace=true)
static MXNode * deserialize(DeserializingStream &s)
Deserialize without type information.
Helper class for Serialization.
void unpack(Sparsity &e)
Reconstruct an object from the input stream.
void version(const std::string &name, int v)
void alloc_iw(size_t sz_iw, bool persistent=false)
Ensure required length of iw field.
std::string codegen_mem(CodeGenerator &g, const std::string &index="mem") const
Get thread-local memory object.
size_t sz_w() const
Get required length of w field.
virtual Dict info() const
void alloc_w(size_t sz_w, bool persistent=false)
Ensure required length of w field.
void alloc(const Function &f, bool persistent=false, int num_threads=1)
Ensure work vectors long enough to evaluate function.
size_t sz_iw() const
Get required length of iw field.
void release(int mem) const
Release a memory object.
casadi_int n_out() const
Get the number of function outputs.
casadi_int n_in() const
Get the number of function inputs.
NLP solver storage class.
void codegen_body_exit(CodeGenerator &g) const override
Generate code for the function body.
Dict get_stats(void *mem) const override
Get all statistics.
static const Options options_
Options.
void codegen_body_enter(CodeGenerator &g) const override
Generate code for the function body.
void codegen_declarations(CodeGenerator &g) const override
Generate code for the declarations of the C function.
void init(const Dict &opts) override
Initialize.
casadi_int ng_
Number of constraints.
int init_mem(void *mem) const override
Initalize memory block.
casadi_nlpsol_prob< double > p_nlp_
void serialize_body(SerializingStream &s) const override
Serialize an object without type information.
int callback(NlpsolMemory *m) const
casadi_int nx_
Number of variables.
void set_work(void *mem, const double **&arg, double **&res, casadi_int *&iw, double *&w) const override
Set the (persistent) work vectors.
void set_function(const Function &fcn, const std::string &fname, bool jit=false)
Function create_function(const Function &oracle, const std::string &fname, const std::vector< std::string > &s_in, const std::vector< std::string > &s_out, const Function::AuxOut &aux=Function::AuxOut(), const Dict &opts=Dict())
int calc_function(OracleMemory *m, const std::string &fcn, const double *const *arg=nullptr, int thread_id=0) const
std::vector< std::string > get_function() const override
Get list of dependency functions.
bool has_function(const std::string &fname) const override
static void registerPlugin(const Plugin &plugin, bool needs_lock=true)
Register an integrator in the factory.
void print(const char *fmt,...) const
C-style formatted printing during evaluation.
int checkout() const
Checkout a memory object.
void * memory(int ind) const
Memory objects.
bool verbose_
Verbose printout.
void clear_mem()
Clear all memory (called from destructor)
Helper class for Serialization.
void version(const std::string &name, int v)
void pack(const Sparsity &e)
Serializes an object to the output stream.
void enlarge(casadi_int nrow, casadi_int ncol, const std::vector< casadi_int > &rr, const std::vector< casadi_int > &cc, bool ind1=false)
Enlarge matrix.
static Sparsity diag(casadi_int nrow)
Create diagonal sparsity pattern *.
static Sparsity dense(casadi_int nrow, casadi_int ncol=1)
Create a dense rectangular sparsity pattern *.
casadi_int nnz() const
Get the number of (structural) non-zeros.
void appendColumns(const Sparsity &sp)
Append another sparsity patten horizontally.
bool is_symmetric() const
Is symmetric?
void codegen_qp_ela_solve(CodeGenerator &cg, const std::string &H, const std::string &g, const std::string &lbdz, const std::string &ubdz, const std::string &A, const std::string &x_opt, const std::string &dlam) const
double calc_gamma_1(SqpmethodMemory *m) const
static ProtoFunction * deserialize(DeserializingStream &s)
Deserialize into MX.
Function qpsol_
QP solver for the subproblems.
bool init_feasible_
Initialize feasible qp's.
void print_iteration() const
Print iteration header.
casadi_int lbfgs_memory_
Memory size of L-BFGS method.
virtual int solve_QP(SqpmethodMemory *m, const double *H, const double *g, const double *lbdz, const double *ubdz, const double *A, double *x_opt, double *dlam, int mode) const
static Nlpsol * creator(const std::string &name, const Function &nlp)
Create a new NLP Solver.
virtual int solve_ela_QP(SqpmethodMemory *m, const double *H, const double *g, const double *lbdz, const double *ubdz, const double *A, double *x_opt, double *dlam) const
void codegen_qp_solve(CodeGenerator &cg, const std::string &H, const std::string &g, const std::string &lbdz, const std::string &ubdz, const std::string &A, const std::string &x_opt, const std::string &dlam, int mode) const
double gamma_0_
Initial and maximum penalty parameter for elastic mode.
bool elastic_mode_
Elastic mode.
int solve(void *mem) const override
casadi_sqpmethod_prob< double > p_
void set_work(void *mem, const double **&arg, double **&res, casadi_int *&iw, double *&w) const override
Set the (persistent) work vectors.
ConvexifyData convexify_data_
Data for convexification.
static const Options options_
Options.
virtual int solve_elastic_mode(SqpmethodMemory *m, casadi_int *ela_it, double gamma_1, casadi_int ls_iter, bool ls_success, bool so_succes, double pr_inf, double du_inf, double dx_norminf, std::string *info, int mode) const
void codegen_solve_elastic_mode(CodeGenerator &cg, int mode) const
Dict get_stats(void *mem) const override
Get all statistics.
int init_mem(void *mem) const override
Initalize memory block.
casadi_int merit_memsize_
void free_mem(void *mem) const override
Free memory block.
double min_step_size_
Minimum step size allowed.
casadi_int max_iter_
Maximum, minimum number of SQP iterations.
bool convexify_
convexify?
bool exact_hessian_
Exact Hessian?
void codegen_body(CodeGenerator &g) const override
Generate code for the function body.
void codegen_calc_gamma_1(CodeGenerator &cg) const
void init(const Dict &opts) override
Initialize.
void serialize_body(SerializingStream &s) const override
Serialize an object without type information.
static const std::string meta_doc
A documentation string.
void codegen_declarations(CodeGenerator &g) const override
Generate code for the declarations of the C function.
Sqpmethod(const std::string &name, const Function &nlp)
Function qpsol_ela_
QP solver for elastic mode subproblems.
double tol_pr_
Tolerance of primal and dual infeasibility.
Function conic(const std::string &name, const std::string &solver, const SpDict &qp, const Dict &opts)
T1 casadi_max_viol(casadi_int n, const T1 *x, const T1 *lb, const T1 *ub)
Largest bound violation.
std::vector< casadi_int > range(casadi_int start, casadi_int stop, casadi_int step, casadi_int len)
Range function.
@ CONIC_UBA
dense, (nc x 1)
@ CONIC_A
The matrix A: sparse, (nc x n) - product with x must be dense.
@ CONIC_G
The vector g: dense, (n x 1)
@ CONIC_LBA
dense, (nc x 1)
@ CONIC_UBX
dense, (n x 1)
@ CONIC_LBX
dense, (n x 1)
T1 casadi_bilin(const T1 *A, const casadi_int *sp_A, const T1 *x, const T1 *y)
T1 casadi_sum_viol(casadi_int n, const T1 *x, const T1 *lb, const T1 *ub)
Sum of bound violations.
void casadi_copy(const T1 *x, casadi_int n, T1 *y)
COPY: y <-x.
void casadi_fill(T1 *x, casadi_int n, T1 alpha)
FILL: x <- alpha.
std::string str(const T &v)
String representation, any type.
GenericType::Dict Dict
C++ equivalent of Python's dict or MATLAB's struct.
int CASADI_NLPSOL_SQPMETHOD_EXPORT casadi_register_nlpsol_sqpmethod(Nlpsol::Plugin *plugin)
T1 casadi_dot(casadi_int n, const T1 *x, const T1 *y)
Inner product.
const double nan
Not a number.
void casadi_scal(casadi_int n, T1 alpha, T1 *x)
SCAL: x <- alpha*x.
void casadi_axpy(casadi_int n, T1 alpha, const T1 *x, T1 *y)
AXPY: y <- a*x + y.
void CASADI_NLPSOL_SQPMETHOD_EXPORT casadi_load_nlpsol_sqpmethod()
T1 casadi_norm_inf(casadi_int n, const T1 *x)
void casadi_clear(T1 *x, casadi_int n)
CLEAR: x <- 0.
void casadi_mv(const T1 *x, const casadi_int *sp_x, const T1 *y, T1 *z, casadi_int tr)
Sparse matrix-vector multiplication: z <- z + x*y.
@ CONIC_X
The primal solution.
@ CONIC_LAM_A
The dual solution corresponding to linear bounds.
@ CONIC_COST
The optimal cost.
@ CONIC_LAM_X
The dual solution corresponding to simple bounds.
casadi_convexify_config< double > config
casadi_nlpsol_data< double > d_nlp
Options metadata for a class.
std::map< std::string, FStats > fstats
void add_stat(const std::string &s)
int iter_count
Iteration count.
const char * return_status
Last return status.
casadi_sqpmethod_data< double > d
double reg
Hessian regularization.
const casadi_nlpsol_prob< T1 > * nlp