feasiblesqpmethod.cpp
1 /*
2  * This file is part of CasADi.
3  *
4  * CasADi -- A symbolic framework for dynamic optimization.
5  * Copyright (C) 2010-2023 Joel Andersson, Joris Gillis, Moritz Diehl,
6  * KU Leuven. All rights reserved.
7  * Copyright (C) 2011-2014 Greg Horn
8  * Copyright (C) 2022-2023 David Kiessling
9  *
10  * CasADi is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU Lesser General Public
12  * License as published by the Free Software Foundation; either
13  * version 3 of the License, or (at your option) any later version.
14  *
15  * CasADi is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18  * Lesser General Public License for more details.
19  *
20  * You should have received a copy of the GNU Lesser General Public
21  * License along with CasADi; if not, write to the Free Software
22  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23  *
24  */
25 
26 #include "feasiblesqpmethod.hpp"
27 
28 #include "casadi/core/casadi_misc.hpp"
29 #include "casadi/core/calculus.hpp"
30 #include "casadi/core/conic.hpp"
31 #include "casadi/core/conic_impl.hpp"
32 #include "casadi/core/convexify.hpp"
33 
34 #include <ctime>
35 #include <iomanip>
36 #include <fstream>
37 #include <cmath>
38 #include <cfloat>
39 #include <signal.h>
40 
41 namespace casadi {
42 
43 
44  extern "C"
45  int CASADI_NLPSOL_FEASIBLESQPMETHOD_EXPORT
46  casadi_register_nlpsol_feasiblesqpmethod(Nlpsol::Plugin* plugin) {
47  plugin->creator = Feasiblesqpmethod::creator;
48  plugin->name = "feasiblesqpmethod";
49  plugin->doc = Feasiblesqpmethod::meta_doc.c_str();
50  plugin->version = CASADI_VERSION;
51  plugin->options = &Feasiblesqpmethod::options_;
52  plugin->deserialize = &Feasiblesqpmethod::deserialize;
53  return 0;
54  }
55 
56  extern "C"
57  void CASADI_NLPSOL_FEASIBLESQPMETHOD_EXPORT casadi_load_nlpsol_feasiblesqpmethod() {
59  }
60 
61  Feasiblesqpmethod::Feasiblesqpmethod(const std::string& name, const Function& nlp)
62  : Nlpsol(name, nlp) {
63  }
64 
66  clear_mem();
67  }
68 
70  = {{&Nlpsol::options_},
71  {{"solve_type",
72  {OT_STRING,
73  "The solver type: Either SQP or SLP. Defaults to SQP"}},
74  {"qpsol",
75  {OT_STRING,
76  "The QP solver to be used by the SQP method [qpoases]"}},
77  {"qpsol_options",
78  {OT_DICT,
79  "Options to be passed to the QP solver"}},
80  {"hessian_approximation",
81  {OT_STRING,
82  "limited-memory|exact"}},
83  {"max_iter",
84  {OT_INT,
85  "Maximum number of SQP iterations"}},
86  {"min_iter",
87  {OT_INT,
88  "Minimum number of SQP iterations"}},
89  {"tol_pr",
90  {OT_DOUBLE,
91  "Stopping criterion for primal infeasibility"}},
92  {"tol_du",
93  {OT_DOUBLE,
94  "Stopping criterion for dual infeasability"}},
95  {"merit_memory",
96  {OT_INT,
97  "Size of memory to store history of merit function values"}},
98  {"lbfgs_memory",
99  {OT_INT,
100  "Size of L-BFGS memory."}},
101  {"print_header",
102  {OT_BOOL,
103  "Print the header with problem statistics"}},
104  {"print_iteration",
105  {OT_BOOL,
106  "Print the iterations"}},
107  {"print_status",
108  {OT_BOOL,
109  "Print a status message after solving"}},
110  {"f",
111  {OT_FUNCTION,
112  "Function for calculating the objective function (autogenerated by default)"}},
113  {"g",
114  {OT_FUNCTION,
115  "Function for calculating the constraints (autogenerated by default)"}},
116  {"grad_f",
117  {OT_FUNCTION,
118  "Function for calculating the gradient of the objective (autogenerated by default)"}},
119  {"jac_g",
120  {OT_FUNCTION,
121  "Function for calculating the Jacobian of the constraints (autogenerated by default)"}},
122  {"hess_lag",
123  {OT_FUNCTION,
124  "Function for calculating the Hessian of the Lagrangian (autogenerated by default)"}},
125  {"convexify_strategy",
126  {OT_STRING,
127  "NONE|regularize|eigen-reflect|eigen-clip. "
128  "Strategy to convexify the Lagrange Hessian before passing it to the solver."}},
129  {"convexify_margin",
130  {OT_DOUBLE,
131  "When using a convexification strategy, make sure that "
132  "the smallest eigenvalue4 is at least this (default: 1e-7)."}},
133  {"max_iter_eig",
134  {OT_DOUBLE,
135  "Maximum number of iterations to compute an eigenvalue decomposition (default: 50)."}},
136  {"init_feasible",
137  {OT_BOOL,
138  "Initialize the QP subproblems with a feasible initial value (default: false)."}},
139  {"optim_tol",
140  {OT_DOUBLE,
141  "Optimality tolerance. Below this value an iterate is considered to be optimal."}},
142  {"feas_tol",
143  {OT_DOUBLE,
144  "Feasibility tolerance. Below this tolerance an iterate is considered to be feasible."}},
145  {"tr_rad0",
146  {OT_DOUBLE,
147  "Initial trust-region radius."}},
148  {"tr_eta1",
149  {OT_DOUBLE,
150  "Lower eta in trust-region acceptance criterion."}},
151  {"tr_eta2",
152  {OT_DOUBLE,
153  "Upper eta in trust-region acceptance criterion."}},
154  {"tr_alpha1",
155  {OT_DOUBLE,
156  "Lower alpha in trust-region size criterion."}},
157  {"tr_alpha2",
158  {OT_DOUBLE,
159  "Upper alpha in trust-region size criterion."}},
160  {"tr_tol",
161  {OT_DOUBLE,
162  "Trust-region tolerance. "
163  "Below this value another scalar is equal to the trust region radius."}},
164  {"tr_acceptance",
165  {OT_DOUBLE,
166  "Is the trust-region ratio above this value, the step is accepted."}},
167  {"tr_rad_min",
168  {OT_DOUBLE,
169  "Minimum trust-region radius."}},
170  {"tr_rad_max",
171  {OT_DOUBLE,
172  "Maximum trust-region radius."}},
173  {"tr_scale_vector",
175  "Vector that tells where trust-region is applied."}},
176  {"contraction_acceptance_value",
177  {OT_DOUBLE,
178  "If the empirical contraction rate in the feasibility iterations "
179  "is above this value in the heuristics the iterations are aborted."}},
180  {"watchdog",
181  {OT_INT,
182  "Number of watchdog iterations in feasibility iterations. "
183  "After this amount of iterations, it is checked with the contraction acceptance value, "
184  "if iterations are converging."}},
185  {"max_inner_iter",
186  {OT_DOUBLE,
187  "Maximum number of inner iterations."}},
188  {"use_anderson",
189  {OT_BOOL,
190  "Use Anderson Acceleration. (default false)"}},
191  {"anderson_memory",
192  {OT_INT,
193  "Anderson memory. If Anderson is used default is 1, else default is 0."}},
194  }
195  };
196 
197  void Feasiblesqpmethod::init(const Dict& opts) {
198  // Call the init method of the base class
199  Nlpsol::init(opts);
200 
201  // Default options
202  min_iter_ = 0;
203  max_iter_ = 50;
204  lbfgs_memory_ = 10;
205  tol_pr_ = 1e-6;
206  tol_du_ = 1e-6;
207  std::string hessian_approximation = "exact";
208  // min_step_size_ = 1e-10;
209  std::string solve_type = "SQP";
210  std::string qpsol_plugin = "qpoases";
211  Dict qpsol_options;
212  print_header_ = true;
213  print_iteration_ = true;
214  print_status_ = true;
215  // so_corr_ = false;
216  init_feasible_ = false;
217 
218  // parameters and options for FP-SQP solver
219  optim_tol_ = 1e-8;
220  feas_tol_ = 1e-8;
221  tr_eta1_ = 0.25;
222  tr_eta2_ = 0.75;
223  tr_alpha1_ = 0.5;
224  tr_alpha2_ = 2.0;
225  tr_tol_ = 1e-8;
226  tr_acceptance_ = 1e-8;
227  tr_rad_min_ = 1e-10; //is this valid??
228  tr_rad_max_ = 10.0;
229  tr_rad0_ = 1.0;
230  tr_scale_vector_ = std::vector<double>(nx_, 1.0);
232  watchdog_ = 5;
233  max_inner_iter_ = 50;
234  use_anderson_ = false;
236 
237 
238  std::string convexify_strategy = "none";
239  double convexify_margin = 1e-7;
240  casadi_int max_iter_eig = 200;
241 
242  // Read user options
243  for (auto&& op : opts) {
244  if (op.first=="max_iter") {
245  max_iter_ = op.second;
246  } else if (op.first=="min_iter") {
247  min_iter_ = op.second;
248 
249  } else if (op.first=="use_anderson") {
250  use_anderson_ = op.second;
251  } else if (op.first=="anderson_memory") {
252  sz_anderson_memory_ = op.second;
253 
254  } else if (op.first=="lbfgs_memory") {
255  lbfgs_memory_ = op.second;
256  } else if (op.first=="tol_pr") {
257  tol_pr_ = op.second;
258  } else if (op.first=="tol_du") {
259  tol_du_ = op.second;
260  } else if (op.first=="hessian_approximation") {
261  hessian_approximation = op.second.to_string();
262  } else if (op.first=="solve_type") {
263  solve_type = op.second.to_string();
264  } else if (op.first=="qpsol") {
265  qpsol_plugin = op.second.to_string();
266  } else if (op.first=="qpsol_options") {
267  qpsol_options = op.second;
268  } else if (op.first=="print_header") {
269  print_header_ = op.second;
270  } else if (op.first=="print_iteration") {
271  print_iteration_ = op.second;
272  } else if (op.first=="print_status") {
273  print_status_ = op.second;
274  } else if (op.first=="hess_lag") {
275  Function f = op.second;
276  casadi_assert_dev(f.n_in()==4);
277  casadi_assert_dev(f.n_out()==1);
278  set_function(f, "nlp_hess_l");
279  } else if (op.first=="jac_g") {
280  Function f = op.second;
281  casadi_assert_dev(f.n_in()==2);
282  casadi_assert_dev(f.n_out()==1);
283  set_function(f, "nlp_jac_g");
284  } else if (op.first=="grad_f") {
285  Function f = op.second;
286  casadi_assert_dev(f.n_in()==2);
287  casadi_assert_dev(f.n_out()==1);
288  set_function(f, "nlp_grad_f");
289  } else if (op.first=="f") {
290  Function f = op.second;
291  casadi_assert_dev(f.n_in()==2);
292  casadi_assert_dev(f.n_out()==1);
293  set_function(f, "nlp_f");
294  } else if (op.first=="g") {
295  Function f = op.second;
296  casadi_assert_dev(f.n_in()==2);
297  casadi_assert_dev(f.n_out()==1);
298  set_function(f, "nlp_g");
299  /*
300  else if (op.first=="nlp_jac_fg") {
301  Function f = op.second;
302  casadi_assert_dev(f.n_in()==2);
303  casadi_assert_dev(f.n_out()==4);
304  set_function(f, "nlp_jac_fg");
305  }*/
306  } else if (op.first=="convexify_strategy") {
307  convexify_strategy = op.second.to_string();
308  } else if (op.first=="convexify_margin") {
309  convexify_margin = op.second;
310  } else if (op.first=="max_iter_eig") {
311  max_iter_eig = op.second;
312  } else if (op.first=="init_feasible") {
313  init_feasible_ = op.second;
314 
315  // from here FP-SQP
316  } else if (op.first == "optim_tol") {
317  optim_tol_ = op.second;
318  } else if (op.first == "feas_tol") {
319  feas_tol_ = op.second;
320  } else if (op.first == "tr_rad0") {
321  tr_rad0_ = op.second;
322  } else if (op.first == "tr_eta1") {
323  tr_eta1_ = op.second;
324  } else if (op.first == "tr_eta2") {
325  tr_eta2_ = op.second;
326  } else if (op.first == "tr_alpha1") {
327  tr_alpha1_ = op.second;
328  } else if (op.first == "tr_alpha2") {
329  tr_alpha2_ = op.second;
330  } else if (op.first == "tr_tol") {
331  tr_tol_ = op.second;
332  } else if (op.first == "tr_acceptance") {
333  tr_acceptance_ = op.second;
334  } else if (op.first == "tr_rad_min") {
335  tr_rad_min_ = op.second;
336  } else if (op.first == "tr_rad_max") {
337  tr_rad_max_ = op.second;
338  } else if (op.first == "tr_scale_vector") {
339  tr_scale_vector_ = op.second;
340  } else if (op.first == "contraction_acceptance_value") {
341  contraction_acceptance_value_ = op.second;
342  } else if (op.first == "watchdog") {
343  watchdog_ = op.second;
344  } else if (op.first == "max_inner_iter") {
345  max_inner_iter_ = op.second;
346  }
347  }
348 
349  // Use exact Hessian?
350  exact_hessian_ = hessian_approximation =="exact";
351  uout() << "print solve type" << solve_type << std::endl;
352  use_sqp_ = solve_type=="SQP";
353 
354  convexify_ = false;
355 
356  // Get/generate required functions
357  //if (max_iter_ls_ || so_corr_) create_function("nlp_fg", {"x", "p"}, {"f", "g"});
358  // First order derivative information
359 
360  if (!has_function("nlp_f")) {
361  create_function("nlp_f", {"x", "p"},
362  {"f"});
363  }
364  if (!has_function("nlp_g")) {
365  create_function("nlp_g", {"x", "p"},
366  {"g"});
367  }
368  if (!has_function("nlp_jac_g")) {
369  create_function("nlp_jac_g", {"x", "p"},
370  {"jac:g:x"});
371  }
372  if (!has_function("nlp_grad_f")) {
373  create_function("nlp_grad_f", {"x", "p"},
374  {"grad:f:x"});
375  }
376  Asp_ = get_function("nlp_jac_g").sparsity_out(0);
377 
378  /*
379  if (!has_function("nlp_jac_fg")) {
380  create_function("nlp_jac_fg", {"x", "p"},
381  {"f", "grad:f:x", "g", "jac:g:x"});
382  }
383  Asp_ = get_function("nlp_jac_fg").sparsity_out(3);*/
384  if (use_sqp_) {
385  if (exact_hessian_) {
386  if (!has_function("nlp_hess_l")) {
387  create_function("nlp_hess_l", {"x", "p", "lam:f", "lam:g"},
388  {"hess:gamma:x:x"}, {{"gamma", {"f", "g"}}});
389  }
390  Hsp_ = get_function("nlp_hess_l").sparsity_out(0);
391  uout() << "Sparsity pattern: " << Hsp_ << std::endl;
392  casadi_assert(Hsp_.is_symmetric(), "Hessian must be symmetric");
393  if (convexify_strategy!="none") {
394  convexify_ = true;
395  Dict opts;
396  opts["strategy"] = convexify_strategy;
397  opts["margin"] = convexify_margin;
398  opts["max_iter_eig"] = max_iter_eig;
399  opts["verbose"] = verbose_;
401  }
402  } else {
404  }
405  }
406 
407  casadi_assert(!qpsol_plugin.empty(), "'qpsol' option has not been set");
408  // qpsol_options["dump_in"] = true;
409  // qpsol_options["dump_out"] = true;
410  // qpsol_options["dump"] = true;
411  // qpsol_options["print_out"] = true;
412  // qpsol_options["error_on_fail"] = false;
413 
414  // Hsp_.to_file("h.mtx");
415  // Asp_.to_file("a.mtx");
416  // uout() << qpsol_options << std::endl;
417  if (use_sqp_) {
418  qpsol_ = conic("qpsol", qpsol_plugin, {{"h", Hsp_}, {"a", Asp_}},
419  qpsol_options);
420  // cout << qpsol_ <<std::endl;
421  } else {
422  Hsp_ = Sparsity(nx_, nx_);
423  uout() << "Sparsity pattern: " << Hsp_ << std::endl;
424  uout() << "Sparsity pattern: " << Asp_ << std::endl;
425  // uout() << "Nonzeros: " << Hsp_.nnz() << std::endl;
426  // qpsol_ = conic("qpsol", qpsol_plugin, {{"h", Hsp_}, {"a", Asp_}},
427  // qpsol_options);
428  qpsol_ = conic("qpsol", qpsol_plugin, {{"a", Asp_}},
429  qpsol_options);
430  // qpsol_ = Function::load("/home/david/testproblems_feasible_casadi/qpsol.casadi");
431  // cout << qpsol_ <<std::endl;
432  }
433 
434  alloc(qpsol_);
435 
436  // BFGS?
437  if (!exact_hessian_) {
438  alloc_w(2*nx_); // casadi_bfgs
439  }
440 
441  // Header
442  if (print_header_) {
443  print("-------------------------------------------\n");
444  print("This is casadi::Feasiblesqpmethod.\n");
445  if (exact_hessian_) {
446  print("Using exact Hessian\n");
447  } else {
448  print("Using limited memory BFGS Hessian approximation\n");
449  }
450  print("Number of variables: %9d\n", nx_);
451  print("Number of constraints: %9d\n", ng_);
452  print("Number of nonzeros in constraint Jacobian: %9d\n", Asp_.nnz());
453  print("Number of nonzeros in Lagrangian Hessian: %9d\n", Hsp_.nnz());
454  print("\n");
455  }
456 
457 
458  set_feasiblesqpmethod_prob();
459  // Allocate memory
460  casadi_int sz_w, sz_iw;
461  casadi_feasiblesqpmethod_work(&p_, &sz_iw, &sz_w, sz_anderson_memory_);
462  alloc_iw(sz_iw, true);
463  alloc_w(sz_w, true);
464  if (convexify_) {
467  }
468  }
469 
470  void Feasiblesqpmethod::set_feasiblesqpmethod_prob() {
471 
472  p_.sp_h = Hsp_;
473 
474  p_.sp_a = Asp_;
475  // p_.merit_memsize = merit_memsize_;
476  // p_.max_iter_ls = max_iter_ls_;
477  p_.nlp = &p_nlp_;
478  }
479 
480  void Feasiblesqpmethod::set_work(void* mem, const double**& arg, double**& res,
481  casadi_int*& iw, double*& w) const {
482  auto m = static_cast<FeasiblesqpmethodMemory*>(mem);
483 
484  // Set work in base classes
485  Nlpsol::set_work(mem, arg, res, iw, w);
486 
487  m->d.prob = &p_;
488  casadi_feasiblesqpmethod_init(&m->d, &iw, &w, sz_anderson_memory_);
489 
490  m->iter_count = -1;
491  }
492 
493  int Feasiblesqpmethod::init_mem(void* mem) const {
494  if (Nlpsol::init_mem(mem)) return 1;
495  auto m = static_cast<FeasiblesqpmethodMemory*>(mem);
496 
497  if (convexify_) m->add_stat("convexify");
498  m->add_stat("BFGS");
499  m->add_stat("QP");
500  return 0;
501  }
502 
503 double Feasiblesqpmethod::eval_m_k(void* mem) const {
504  auto m = static_cast<FeasiblesqpmethodMemory*>(mem);
505  auto d = &m->d;
506  if (use_sqp_) {
507  return 0.5*casadi_bilin(d->Bk, Hsp_, d->dx, d->dx) + casadi_dot(nx_, d->gf, d->dx);
508  } else {
509  return casadi_dot(nx_, d->gf, d->dx);
510  }
511 }
512 
513 double Feasiblesqpmethod::eval_tr_ratio(double val_f, double val_f_corr, double val_m_k) const {
514  return (val_f - val_f_corr) / (-val_m_k);
515 }
516 
517 void Feasiblesqpmethod::tr_update(void* mem, double& tr_rad, double tr_ratio) const {
518  auto m = static_cast<FeasiblesqpmethodMemory*>(mem);
519  auto d = &m->d;
520 
521  if (tr_ratio < tr_eta1_) {
522  tr_rad = tr_alpha1_ * casadi_masked_norm_inf(nx_, d->dx, d->tr_mask);
523  } else if (tr_ratio > tr_eta2_ &&
524  abs(casadi_masked_norm_inf(nx_, d->dx, d->tr_mask) - tr_rad) < optim_tol_) {
525  tr_rad = fmin(tr_alpha2_*tr_rad, tr_rad_max_);
526  }
527  // else: keep trust-region as it is....
528 }
529 
530 int Feasiblesqpmethod::step_update(void* mem, double tr_ratio) const {
531  auto m = static_cast<FeasiblesqpmethodMemory*>(mem);
532  auto d_nlp = &m->d_nlp;
533  auto d = &m->d;
534 
535  if (tr_ratio > tr_acceptance_) {
536  // This is not properly implemented yet: d_nlp->z_old = d_mlp->z;
537  casadi_copy(d->z_feas, nx_ + ng_, d_nlp->z);
538  d_nlp->objective = d->f_feas;
539  casadi_copy(d->dlam_feas, nx_ + ng_, d_nlp->lam);
540 
541  uout() << "ACCEPTED" << std::endl;
542  return 0;
543  } else {
544  uout() << "REJECTED" << std::endl;
545  return -1;
546  }
547 }
548 
549 
550 /*
551 Do the Anderson step update here and also update the memory here
552 */
553 void Feasiblesqpmethod::anderson_acc_step_update(void* mem, casadi_int iter_index) const {
554  auto m = static_cast<FeasiblesqpmethodMemory*>(mem);
555  auto d = &m->d;
556 
557  if (sz_anderson_memory_ == 1) {
558  // Calculte gamma
559  casadi_copy(d->dx_feas, nx_, d->z_tmp);
560  casadi_axpy(nx_, -1.0, d->anderson_memory_step, d->z_tmp);
561  *d->gamma = casadi_dot(nx_, d->dx_feas, d->z_tmp) / casadi_dot(nx_, d->z_tmp, d->z_tmp);
562  // DM(gamma).to_file("gamma.mtx");
563 
564 
565  // Prepare the step update
566  casadi_copy(d->z_feas, nx_, d->z_tmp);
567  casadi_axpy(nx_, -1.0, d->anderson_memory_iterate, d->z_tmp);
568  casadi_axpy(nx_, 1.0, d->dx_feas, d->z_tmp);
569  casadi_axpy(nx_, -1.0, d->anderson_memory_step, d->z_tmp);
570 
571  // Update the Anderson memory
572  anderson_acc_update_memory(mem, d->dx_feas, d->z_feas);
573  // casadi_copy(d->dx_feas, nx_, d->anderson_memory_step);
574  // casadi_copy(d->z_feas, nx_, d->anderson_memory_iterate);
575 
576  // Do the step update
577  double beta = 1.0;
578  casadi_axpy(nx_, beta, d->dx_feas, d->z_feas);
579  casadi_axpy(nx_, -*d->gamma, d->z_tmp, d->z_feas);
580  // DM(std::vector<double>(d->z_feas,d->z_feas+nx_)).to_file("dx_anderson2.mtx");
581 
582  } else {
583  print("This is not implemented yet!!!");
584  casadi_int curr_stage = fmin(iter_index+1, sz_anderson_memory_);
585  // Create matrix F_k
586  casadi_copy(d->dx_feas, nx_, d->z_tmp);
587  casadi_copy(d->anderson_memory_step, (curr_stage-1)*nx_, d->z_tmp+nx_);
588  casadi_axpy(curr_stage*nx_, -1.0, d->anderson_memory_step+nx_, d->z_tmp);
589 
590  // Solve the least-squares problem
591  casadi_dense_lsqr_solve(d->z_tmp, d->dx_feas, 1, 1, curr_stage, nx_, d->gamma);
592 
593  // Update the Anderson memory
594  anderson_acc_update_memory(mem, d->dx_feas, d->z_feas);
595 
597  double beta = 1.0;
598  // Calculate E_k + beta*F_k
599  casadi_axpy(nx_, 1.0, d->z_feas, d->z_tmp);
600  casadi_axpy((curr_stage-1)*nx_, 1.0, d->anderson_memory_iterate, d->z_tmp+nx_);
601  casadi_axpy(curr_stage*nx_, -1.0, d->anderson_memory_iterate, d->z_tmp);
602  // Do the final update
603  casadi_axpy(nx_, beta, d->dx_feas, d->z_feas);
604  casadi_axpy(nx_, -*d->gamma, d->z_tmp, d->z_feas);
605 
606  }
607 }
608 
609 /*
610 Initialize the memory of the Anderson acceleration
611 */
612 void Feasiblesqpmethod::anderson_acc_init_memory(void* mem, double* step, double* iterate) const {
613  auto m = static_cast<FeasiblesqpmethodMemory*>(mem);
614  auto d = &m->d;
615 
616  casadi_clear(d->anderson_memory_step, sz_anderson_memory_*nx_);
617  casadi_clear(d->anderson_memory_iterate, sz_anderson_memory_*nx_);
618 
619  // if (sz_anderson_memory_ == 1) {
620  // casadi_copy(step, nx_, d->anderson_memory_step);
621  // casadi_copy(x, nx_, d->anderson_memory_iterate);
622  // } else {
623  // print("This is not implemented yet!!!");
624  // }
625 
626  casadi_copy(step, nx_, d->anderson_memory_step);
627  casadi_copy(iterate, nx_, d->anderson_memory_iterate);
628 
629 }
630 
631 /*
632 Update the memory of the Anderson acceleration
633 */
634 void Feasiblesqpmethod::anderson_acc_update_memory(void* mem, double* step, double* iterate) const {
635  auto m = static_cast<FeasiblesqpmethodMemory*>(mem);
636  auto d = &m->d;
637 
638  if (sz_anderson_memory_ == 1) {
639  casadi_copy(step, nx_, d->anderson_memory_step);
640  casadi_copy(iterate, nx_, d->anderson_memory_iterate);
641  } else {
642  // Shift old values further
643  casadi_copy(d->anderson_memory_step,
644  (sz_anderson_memory_-1)*nx_, d->anderson_memory_step + nx_);
645  casadi_copy(d->anderson_memory_iterate,
646  (sz_anderson_memory_-1)*nx_, d->anderson_memory_iterate + nx_);
647  // Insert new values
648  casadi_copy(step, nx_, d->anderson_memory_step);
649  casadi_copy(iterate, nx_, d->anderson_memory_iterate);
650  }
651 }
652 
653 
654 /*
655 Calculates the feasibility_iterations. If iterations are accepted return 0.
656 If iterations are aborted return -1.
657 */
658 
659 int Feasiblesqpmethod::feasibility_iterations(void* mem, double tr_rad) const {
660  auto m = static_cast<FeasiblesqpmethodMemory*>(mem);
661  auto d_nlp = &m->d_nlp;
662  auto d = &m->d;
663 
664  // p_tmp = p
665  casadi_copy(d->dx, nx_, d->dx_feas);
666 
667 // lam_p_g_tmp = self.lam_p_g_k
668 // lam_p_x_tmp = self.lam_p_x_k
669  casadi_copy(d->dlam, nx_ + ng_, d->dlam_feas);
670 
671  // Why do we do this at the moment??
672  casadi_copy(d->dlam, nx_+ng_, d->z_tmp);
673  casadi_axpy(nx_+ng_, -1.0, d_nlp->lam, d->z_tmp);
674 
675  // this is in solve in fslp.py
676  double step_inf_norm = casadi_masked_norm_inf(nx_, d->dx, d->tr_mask);
677  double prev_step_inf_norm = step_inf_norm;
678 
679  // bool kappa_acceptance = false;
680 
681  // self.x_tmp = self.x_k + p_tmp
682  casadi_copy(d_nlp->z, nx_+ng_, d->z_feas);
683  casadi_axpy(nx_, 1., d->dx_feas, d->z_feas);
684 
685  if (use_anderson_) {
686  // anderson_acc_init_memory(mem, d->dx_feas, d->z_feas);
687  anderson_acc_init_memory(mem, d->dx_feas, d_nlp->z);
688  }
689 
690  // DM(std::vector<double>(d->z_feas,d->z_feas+nx_)).to_file("dx_anderson1.mtx");
691  // Evaluate g
692  // self.g_tmp = self.__eval_g(self.x_tmp)
693  m->arg[0] = d->z_feas;
694  m->arg[1] = d_nlp->p;
695  m->res[0] = d->z_feas + nx_;
696  if (calc_function(m, "nlp_g")) {
697  uout() << "What does it mean that calc_function fails here??" << std::endl;
698  }
699  int inner_iter = 0;
700 
701 // asymptotic_exactness = []
702  // double asymptotic_exactness = 0.0;
703 // self.prev_infeas = self.feasibility_measure(self.x_tmp, self.g_tmp)
704 // self.curr_infeas = self.feasibility_measure(self.x_tmp, self.g_tmp)
705 
706  double prev_infeas = casadi_max_viol(nx_+ng_, d->z_feas, d_nlp->lbz, d_nlp->ubz);
707  double curr_infeas = prev_infeas;
708 // feasibilities = [self.prev_infeas]
709 // step_norms = []
710 // kappas = []
711 
712 // watchdog_prev_inf_norm = self.prev_step_inf_norm
713 // accumulated_as_ex = 0
714 
715  // Calculate asymptotic exactness of current step
716  casadi_copy(d->dx, nx_, d->z_tmp);
717  casadi_axpy(nx_, -1., d->z_feas, d->z_tmp);
718  casadi_axpy(nx_, 1., d_nlp->z, d->z_tmp);
719  double as_exac = casadi_norm_2(nx_, d->z_tmp) / casadi_norm_2(nx_, d->dx);
720 
721  double kappa_watchdog = 0.0;
722  double kappa = 0.0;
723  double acc_as_exac = 0.0;
724 
725 
726  double watchdog_prev_inf_norm = prev_step_inf_norm; // until here everything is correct!
727 
728  for (int j=0; j<max_inner_iter_; ++j) {
729  if (curr_infeas < feas_tol_) {
730  inner_iter = j;
731  // kappa_acceptance = true;
732  if (as_exac < 0.5) {
733  return 0;
734  } else {
735  return -1;
736  }
737  } else if (j>0 && (curr_infeas > 1.0 || as_exac > 1.0)) {
738  // kappa_acceptance = false;
739  return -1;
740  }
741  inner_iter = j+1;
742 
743  // self.lam_tmp_g = self.lam_p_g_k
744  // self.lam_tmp_x = self.lam_p_x_k
745 
746  // create corrected gradient here -----------------------------
747  casadi_copy(d->z_feas, nx_, d->z_tmp);
748  casadi_axpy(nx_, -1., d_nlp->z, d->z_tmp);
749  casadi_copy(d->gf, nx_, d->gf_feas);
750  // In case of SQP we need to multiply with
751  if (use_sqp_) {
752  casadi_mv(d->Bk, Hsp_, d->z_tmp, d->gf_feas, true);
753  }
754 
755  // create bounds of correction QP -----------------------------
756  // upper bounds of constraints
757  casadi_copy(d_nlp->ubz + nx_, ng_, d->ubdz_feas + nx_);
758  casadi_axpy(ng_, -1., d->z_feas + nx_, d->ubdz_feas + nx_);
759 
760  // lower bounds of constraints
761  casadi_copy(d_nlp->lbz + nx_, ng_, d->lbdz_feas + nx_);
762  casadi_axpy(ng_, -1., d->z_feas + nx_, d->lbdz_feas + nx_);
763 
764  // lower bounds of variables
765  // lbp = cs.fmax(-self.tr_rad_k*self.tr_scale_mat_inv_k @
766  // cs.DM.ones(self.nx, 1) - (self.x_tmp-self.x_k),
767  // self.lbx - self.x_tmp)
768 
769  casadi_copy(d_nlp->lbz, nx_, d->lbdz_feas);
770  casadi_clip_min(d->lbdz_feas, nx_, -tr_rad, d->tr_mask);
771  // DM(std::vector<double>(d->lbdz_feas,d->lbdz_feas+nx_)).to_file("lbx_feas_part1_part1.mtx");
772  // casadi_fill(d->lbdz_feas, nx_, -tr_rad);
773  casadi_axpy(nx_, -1., d->z_feas, d->lbdz_feas);
774  casadi_axpy(nx_, 1., d_nlp->z, d->lbdz_feas);
775  // DM(std::vector<double>(d->lbdz_feas,d->lbdz_feas+nx_)).to_file("lbx_feas_part1.mtx");
776 
777 
778  casadi_copy(d_nlp->lbz, nx_, d->z_tmp);
779  casadi_axpy(nx_, -1., d->z_feas, d->z_tmp);
780  // DM(std::vector<double>(d->z_tmp,d->z_tmp+nx_)).to_file("lbx_feas_part2.mtx");
781 
782  // comparison of both vectors
783  casadi_vector_fmax(nx_, d->z_tmp, d->lbdz_feas, d->lbdz_feas);
784  // DM(std::vector<double>(d->lbdz_feas,d->lbdz_feas+nx_)).to_file("lbx_feas_end.mtx");
785 
786 
787  // upper bounds of variables
788  // ubp = cs.fmin(self.tr_rad_k*self.tr_scale_mat_inv_k @
789  // cs.DM.ones(self.nx, 1) - (self.x_tmp-self.x_k),
790  // self.ubx - self.x_tmp)
791 
792 
793  casadi_copy(d_nlp->ubz, nx_, d->ubdz_feas);
794  casadi_clip_max(d->ubdz_feas, nx_, tr_rad, d->tr_mask);
795  // DM(std::vector<double>(d->ubdz_feas,d->ubdz_feas+nx_)).to_file("ubx_feas_part1_part1.mtx");
796  // casadi_fill(d->ubdz_feas, nx_, tr_rad);
797  casadi_axpy(nx_, -1., d->z_feas, d->ubdz_feas);
798  // DM(std::vector<double>(d->z_feas,d->z_feas+nx_)).to_file("ubx_feas_part1_zfeas.mtx");
799  casadi_axpy(nx_, 1., d_nlp->z, d->ubdz_feas);
800 
801  // DM(std::vector<double>(d->ubdz_feas,d->ubdz_feas+nx_)).to_file("ubx_feas_part1.mtx");
802 
803  casadi_copy(d_nlp->ubz, nx_, d->z_tmp);
804  casadi_axpy(nx_, -1., d->z_feas, d->z_tmp);
805  // DM(std::vector<double>(d->z_tmp,d->z_tmp+nx_)).to_file("ubx_feas_part2.mtx");
806  // comparison of both vectors
807  casadi_vector_fmin(nx_, d->z_tmp, d->ubdz_feas, d->ubdz_feas);
808  // DM(std::vector<double>(d->ubdz_feas,d->ubdz_feas+nx_)).to_file("ubx_feas_end.mtx");
809 
810 
811  // std::string suffix = str(j);
812  // DM(std::vector<double>(d_nlp->lbz,d_nlp->lbz+nx_+ng_)).to_file("nlp_lbz"+suffix+".mtx");
813  // DM(std::vector<double>(d_nlp->ubz,d_nlp->ubz+nx_+ng_)).to_file("nlp_ubz"+suffix+".mtx");
814  // DM(std::vector<double>(d->lbdz_feas,d->lbdz_feas+nx_+ng_)).to_file("lbz"+suffix+".mtx");
815  // DM(std::vector<double>(d->ubdz_feas,d->ubdz_feas+nx_+ng_)).to_file("ubz"+suffix+".mtx");
816  // DM(std::vector<double>(d->z_feas,d->z_feas+nx_+ng_)).to_file("z_feas"+suffix+".mtx");
817  // DM(std::vector<double>(d->dx_feas,d->dx_feas+nx_)).to_file("dx_feas"+suffix+".mtx");
818  // copy back d->dx back to d->dx_feas
819  // casadi_copy(d->dx, nx_, d->x_tmp);
820 
821  //prepare step_inf_norm
822  // DM(std::vector<double>(d->dx_feas,d->dx_feas+nx_)).to_file("dx_input.mtx");
823  // DM(std::vector<double>(d->gf_feas,d->gf_feas+nx_)).to_file("gf_feas.mtx");
824  // DM(std::vector<double>(d->lbdz_feas, d->lbdz_feas+nx_)).to_file("lb_var_correction.mtx");
825  // DM(std::vector<double>(d->ubdz_feas, d->ubdz_feas+nx_)).to_file("ub_var_correction.mtx");
826  // DM(std::vector<double>(d->lbdz_feas+nx_,
827  // d->lbdz_feas+nx_+ng_)).to_file("lba_correction.mtx");
828  // DM(std::vector<double>(d->ubdz_feas+nx_,
829  // d->ubdz_feas+nx_+ng_)).to_file("uba_correction.mtx");
830  // DM(std::vector<double>(d->dlam_feas, d->dlam_feas+nx_)).to_file("lam_x.mtx");
831  // DM(std::vector<double>(d->dlam_feas+nx_, d->dlam_feas+nx_+ng_)).to_file("lam_g.mtx");
832 
833  if (use_sqp_) {
834  solve_QP(m, d->Bk, d->gf_feas, d->lbdz_feas, d->ubdz_feas,
835  d->Jk, d->dx_feas, d->dlam_feas, 0);
836  } else {
837  solve_LP(m, d->gf_feas, d->lbdz_feas, d->ubdz_feas,
838  d->Jk, d->dx_feas, d->dlam_feas, 0);
839  }
840 
841  // put definition of ret out of loop
842 
843  // DM(std::vector<double>(d->dx_feas,d->dx_feas+nx_)).to_file("dx_feas.mtx");
844  // uout() << "Feas QP step: " << std::vector<double>(d->dx_feas, d->dx_feas+nx_) << std::endl;
845  //MISSING: Depending on the result terminate program
846 
847  //MISSING: Calculate the step_inf_norm
848  // self.step_inf_norm = cs.fmax(
849  // cs.norm_inf(self.p_k),
850  // cs.fmax(
851  // cs.norm_inf(self.lam_tmp_g-self.lam_p_g_k),
852  // cs.norm_inf(self.lam_tmp_x-self.lam_p_x_k)
853  // )
854  // )
855 
856  // TODO(david) the python code has a bug and does not consider the step in lambda
857  // here!!
858  // casadi_copy(d->dlam_feas, nx_+ng_, d->z_tmp);
859  // casadi_axpy(nx_+ng_, -1.0, d->dlam, d->z_tmp);
860  step_inf_norm = casadi_masked_norm_inf(nx_, d->dx_feas, d->tr_mask);
861 
862  // self.x_tmp = self.x_tmp + p_tmp
863  // self.g_tmp = self.__eval_g(self.x_tmp) # x_tmp = x_{tmp-1} + p_tmp
864 
865  if (use_anderson_) {
866  anderson_acc_step_update(mem, j);
867  } else {
868  casadi_axpy(nx_, 1., d->dx_feas, d->z_feas);
869  }
870 
871  // Evaluate g
872  m->arg[0] = d->z_feas;
873  m->arg[1] = d_nlp->p;
874  m->res[0] = d->z_feas + nx_;
875  if (calc_function(m, "nlp_g")) {
876  uout() << "What does it mean that calc_function fails here??" << std::endl;
877  }
878 
879  // self.curr_infeas = self.feasibility_measure(self.x_tmp, self.g_tmp)
880  // self.prev_infeas = self.curr_infeas
881  prev_infeas = casadi_max_viol(nx_+ng_, d->z_feas, d_nlp->lbz, d_nlp->ubz);
882  curr_infeas = prev_infeas;
883  // kappa = self.step_inf_norm/self.prev_step_inf_norm
884  // kappas.append(kappa)
885  kappa = step_inf_norm/prev_step_inf_norm;
886  //MISSING: as_exac:
887 
888  // DM(std::vector<double>(d_nlp->z, d_nlp->z+nx_)).to_file("x_k.mtx");
889  // DM(std::vector<double>(d->z_feas,d->z_feas+nx_)).to_file("x_tmp.mtx");
890  // DM(std::vector<double>(d->dx_feas,d->dx_feas+nx_)).to_file("dx_feas"+suffix+".mtx");
891  // DM(std::vector<double>(d->dx,d->dx+nx_)).to_file("p_k.mtx");
892 
893 
894  casadi_copy(d->dx, nx_, d->z_tmp);
895  casadi_axpy(nx_, -1., d->z_feas, d->z_tmp);
896  casadi_axpy(nx_, 1., d_nlp->z, d->z_tmp);
897  as_exac = casadi_norm_2(nx_, d->z_tmp) / casadi_norm_2(nx_, d->dx);
898 
899 
900  // as_exac = cs.norm_2(
901  // self.p_k - (self.x_tmp - self.x_k)) / cs.norm_2(self.p_k)
902 
903  // if self.verbose:
904  // print("Kappa: ", kappa,
905  // "Infeasibility", self.feasibility_measure(
906  // self.x_tmp, self.g_tmp),
907  // "Asymptotic Exactness: ", as_exac)
908  print("%6s %9.10f %14s %9.10f %20s %9.10f\n", "Kappa:", kappa,
909  "Infeasibility:", curr_infeas, "AsymptoticExactness:", as_exac);
910 
911  // accumulated_as_ex += as_exac
912  acc_as_exac += as_exac;
913  // if inner_iter % self.watchdog == 0:
914  // kappa_watch = self.step_inf_norm/watchdog_prev_inf_norm
915  // watchdog_prev_inf_norm = self.step_inf_norm
916  // if self.verbose:
917  // print("kappa watchdog: ", kappa_watch)
918  // if self.curr_infeas < self.feas_tol and as_exac < 0.5:
919  // self.kappa_acceptance = True
920  // break
921  if (inner_iter % watchdog_ == 0) {
922  kappa_watchdog = step_inf_norm / watchdog_prev_inf_norm;
923  watchdog_prev_inf_norm = step_inf_norm;
924  print("Kappa watchdog: %9.10f\n", kappa_watchdog);
925  if (curr_infeas < feas_tol_ && as_exac < 0.5) {
926  // kappa_acceptance = true;
927  return 0;
928  }
929  // if kappa_watch > self.contraction_acceptance or
930  // accumulated_as_ex/self.watchdog > 0.5:
931  // self.kappa_acceptance = False
932  // break
933  if (kappa_watchdog > contraction_acceptance_value_ || acc_as_exac/watchdog_ > 0.5) {
934  // kappa_acceptance = false;
935  return -1;
936  }
937  // accumulated_as_ex = 0
938  acc_as_exac = 0.0;
939  }
940 
941  // Do some saving here??
942  // feasibilities.append(
943  // self.feasibility_measure(self.x_tmp, self.g_tmp))
944  // asymptotic_exactness.append(as_exac)
945  // step_norms.append(cs.norm_inf(p_tmp))
946 
947  // self.prev_step_inf_norm = self.step_inf_norm
948  // self.lam_tmp_g = self.lam_p_g_k
949  // self.lam_tmp_x = self.lam_p_x_k
950  prev_step_inf_norm = step_inf_norm;
951  }
952  //maximum iterations reached
953  // kappa_acceptance = false;
954  return -1;
955 }
956 
957 int Feasiblesqpmethod::solve(void* mem) const {
958  auto m = static_cast<FeasiblesqpmethodMemory*>(mem);
959  auto d_nlp = &m->d_nlp;
960  auto d = &m->d;
961 
962  // DM(std::vector<double>(d_nlp->z, d_nlp->z+nx_)).to_file("x0.mtx");
963  // Number of SQP iterations
964  m->iter_count = 0;
965 
966  // Reset
967  // m->merit_ind = 0;
968  // m->sigma = 0.; // NOTE: Move this into the main optimization loop
969  // m->reg = 0;
970  int step_accepted = 0;
971 
972  // Default quadratic model value of objective
973  double m_k = -1.0;
974 
975  double tr_ratio = 0.0;
976 
977  double tr_rad = tr_rad0_;
978  double tr_rad_prev = tr_rad0_;
979 
980  // transfer the scale vector to the problem
981  casadi_copy(get_ptr(tr_scale_vector_), nx_, d->tr_scale_vector);
982 
983  for (casadi_int i=0;i<nx_;++i) {
984  d->tr_mask[i] = d->tr_scale_vector[i]!=0;
985  }
986 
987  // For seeds ---- This is so far needed!!! ------------------
988  const double one = 1.;
989 
990  // Info for printing
991  std::string info = "";
992 
993  casadi_clear(d->dx, nx_);
994 
995  // ------------------------------------------------------------------------
996  // MAIN OPTIMIZATION LOOP
997  // ------------------------------------------------------------------------
998  while (true) {
999  // Evaluate f, g and first order derivative information
1000  /*m->arg[0] = d_nlp->z;
1001  m->arg[1] = d_nlp->p;
1002  m->res[0] = &d_nlp->objective;
1003  m->res[1] = d->gf;
1004  m->res[2] = d_nlp->z + nx_;
1005  m->res[3] = d->Jk;
1006  switch (calc_function(m, "nlp_jac_fg")) {
1007  case -1:
1008  m->return_status = "Non_Regular_Sensitivities";
1009  m->unified_return_status = SOLVER_RET_NAN;
1010  if (print_status_)
1011  print("MESSAGE(feasiblesqpmethod): No regularity of sensitivities at current point.\n");
1012  return 1;
1013  case 0:
1014  break;
1015  default:
1016  return 1;
1017  }*/
1018  if (m->iter_count == 0) {
1019  // Evaluate the sensitivities -------------------------------------------
1020  // Evaluate f
1021  m->arg[0] = d_nlp->z;
1022  m->arg[1] = d_nlp->p;
1023  m->res[0] = &d_nlp->objective;
1024  if (calc_function(m, "nlp_f")) {
1025  uout() << "What does it mean that calc_function fails here??" << std::endl;
1026  }
1027  // Evaluate g
1028  m->arg[0] = d_nlp->z;
1029  m->arg[1] = d_nlp->p;
1030  m->res[0] = d_nlp->z + nx_;
1031  if (calc_function(m, "nlp_g")) {
1032  uout() << "What does it mean that calc_function fails here??" << std::endl;
1033  }
1034  // Evaluate grad_f
1035  m->arg[0] = d_nlp->z;
1036  m->arg[1] = d_nlp->p;
1037  m->res[0] = d->gf;
1038  if (calc_function(m, "nlp_grad_f")) {
1039  uout() << "What does it mean that calc_function fails here??" << std::endl;
1040  }
1041  // Evaluate jac_g
1042  m->arg[0] = d_nlp->z;
1043  m->arg[1] = d_nlp->p;
1044  m->res[0] = d->Jk;
1045  switch (calc_function(m, "nlp_jac_g")) {
1046  case -1:
1047  m->return_status = "Non_Regular_Sensitivities";
1048  m->unified_return_status = SOLVER_RET_NAN;
1049  if (print_status_)
1050  print("MESSAGE(feasiblesqpmethod): "
1051  "No regularity of sensitivities at current point.\n");
1052  return 1;
1053  case 0:
1054  break;
1055  default:
1056  return 1;
1057 
1058  }
1059  // uout() << "x0: " << *d_nlp->z << std::endl;
1060  // uout() << "nlp_f: " << d_nlp->objective << std::endl;
1061  // uout() << "nlp_g: " << *(d_nlp->z + nx_) << std::endl;
1062 
1063  if (use_sqp_) {
1064  if (exact_hessian_) {
1065  // Update/reset exact Hessian
1066  m->arg[0] = d_nlp->z;
1067  m->arg[1] = d_nlp->p;
1068  m->arg[2] = &one;
1069  m->arg[3] = d_nlp->lam + nx_;
1070  m->res[0] = d->Bk;
1071  if (calc_function(m, "nlp_hess_l")) return 1;
1072  if (convexify_) {
1073  ScopedTiming tic(m->fstats.at("convexify"));
1074  if (convexify_eval(&convexify_data_.config, d->Bk, d->Bk, m->iw, m->w)) return 1;
1075  }
1076  } else if (m->iter_count==0) {
1077  ScopedTiming tic(m->fstats.at("BFGS"));
1078  // Initialize BFGS
1079  casadi_fill(d->Bk, Hsp_.nnz(), 1.);
1080  casadi_bfgs_reset(Hsp_, d->Bk);
1081  } else {
1082  ScopedTiming tic(m->fstats.at("BFGS"));
1083  // Update BFGS
1084  if (m->iter_count % lbfgs_memory_ == 0) casadi_bfgs_reset(Hsp_, d->Bk);
1085  // Update the Hessian approximation
1086  casadi_bfgs(Hsp_, d->Bk, d->dx, d->gLag, d->gLag_old, m->w);
1087  }
1088 
1089  // test if initialization is feasible
1090  if (casadi_max_viol(nx_ + ng_, d_nlp->z, d_nlp->lbz, d_nlp->ubz) > feas_tol_) {
1091  if (print_status_)print("MESSAGE(feasiblesqpmethod): "
1092  "No feasible initialization given! "
1093  "Find feasible initialization.\n");
1094  m->return_status = "No_Feasible_Initialization";
1095  break;
1096  }
1097  }
1098 
1099  } else if (step_accepted == 0) {
1100  // Evaluate grad_f
1101  m->arg[0] = d_nlp->z;
1102  m->arg[1] = d_nlp->p;
1103  m->res[0] = d->gf;
1104  if (calc_function(m, "nlp_grad_f")) {
1105  uout() << "What does it mean that calc_function fails here??" << std::endl;
1106  }
1107  // Evaluate jac_g
1108  m->arg[0] = d_nlp->z;
1109  m->arg[1] = d_nlp->p;
1110  m->res[0] = d->Jk;
1111  switch (calc_function(m, "nlp_jac_g")) {
1112  case -1:
1113  m->return_status = "Non_Regular_Sensitivities";
1114  m->unified_return_status = SOLVER_RET_NAN;
1115  if (print_status_)
1116  print("MESSAGE(feasiblesqpmethod): "
1117  "No regularity of sensitivities at current point.\n");
1118  return 1;
1119  case 0:
1120  break;
1121  default:
1122  return 1;
1123  }
1124  // uout() << "x0: " << *d_nlp->z << std::endl;
1125  // uout() << "nlp_f: " << d_nlp->objective << std::endl;
1126  // uout() << "nlp_g: " << *(d_nlp->z + nx_) << std::endl;
1127 
1128  if (use_sqp_) {
1129  if (exact_hessian_) {
1130  // Update/reset exact Hessian
1131  m->arg[0] = d_nlp->z;
1132  m->arg[1] = d_nlp->p;
1133  m->arg[2] = &one;
1134  m->arg[3] = d_nlp->lam + nx_;
1135  m->res[0] = d->Bk;
1136  if (calc_function(m, "nlp_hess_l")) return 1;
1137  if (convexify_) {
1138  ScopedTiming tic(m->fstats.at("convexify"));
1139  if (convexify_eval(&convexify_data_.config, d->Bk, d->Bk, m->iw, m->w)) return 1;
1140  }
1141  } else if (m->iter_count==0) {
1142  ScopedTiming tic(m->fstats.at("BFGS"));
1143  // Initialize BFGS
1144  casadi_fill(d->Bk, Hsp_.nnz(), 1.);
1145  casadi_bfgs_reset(Hsp_, d->Bk);
1146  } else {
1147  ScopedTiming tic(m->fstats.at("BFGS"));
1148  // Update BFGS
1149  if (m->iter_count % lbfgs_memory_ == 0) casadi_bfgs_reset(Hsp_, d->Bk);
1150  // Update the Hessian approximation
1151  casadi_bfgs(Hsp_, d->Bk, d->dx, d->gLag, d->gLag_old, m->w);
1152  }
1153  }
1154  }
1155 
1156  // Evaluate the gradient of the Lagrangian
1157  casadi_copy(d->gf, nx_, d->gLag);
1158  casadi_mv(d->Jk, Asp_, d_nlp->lam+nx_, d->gLag, true);
1159  casadi_axpy(nx_, 1., d_nlp->lam, d->gLag);
1160 
1161  // Primal infeasability
1162  double pr_inf = casadi_max_viol(nx_+ng_, d_nlp->z, d_nlp->lbz, d_nlp->ubz);
1163  // uout() << "pr_inf: " << pr_inf << std::endl;
1164  // inf-norm of Lagrange gradient
1165  // uout() << "grad Lag: " << std::vector<double>(*d->gLag,0,nx_) << std::endl;
1166  double du_inf = casadi_norm_inf(nx_, d->gLag);
1167  // uout() << "du_inf: " << du_inf << std::endl;
1168 
1169  // inf-norm of step, d->dx is a nullptr???
1170  // uout() << "HERE!!!!" << *d->dx << std::endl;
1171  double dx_norminf = casadi_norm_inf(nx_, d->dx);
1172 
1173  // uout() << "objective value: " << d_nlp->objective << std::endl;
1174  // Printing information about the actual iterate
1175  if (print_iteration_) {
1176  // if (m->iter_count % 10 == 0) print_iteration();
1177  print_iteration();
1178  print_iteration(m->iter_count, d_nlp->objective, m_k, tr_ratio,
1179  pr_inf, du_inf, dx_norminf, m->reg, tr_rad_prev, info);
1180  info = "";
1181  }
1182  tr_rad_prev = tr_rad;
1183 
1184  // Callback function
1185  if (callback(m)) {
1186  if (print_status_) print("WARNING(feasiblesqpmethod): Aborted by callback...\n");
1187  m->return_status = "User_Requested_Stop";
1188  break;
1189  }
1190 
1191  // Checking convergence criteria
1192  // Where is the complementarity condition??
1193  // if (m->iter_count >= min_iter_ && pr_inf < tol_pr_ && du_inf < tol_du_) {
1194  // if (print_status_)
1195  // print("MESSAGE(feasiblesqpmethod): "
1196  // "Convergence achieved after %d iterations\n", m->iter_count);
1197  // m->return_status = "Solve_Succeeded";
1198  // m->success = true;
1199  // break;
1200  // }
1201 
1202  if (m->iter_count >= max_iter_) {
1203  if (print_status_) {
1204  print("MESSAGE(feasiblesqpmethod): Maximum number of iterations reached.\n");
1205  }
1206  m->return_status = "Maximum_Iterations_Exceeded";
1207  m->unified_return_status = SOLVER_RET_LIMITED;
1208  break;
1209  }
1210 
1211  // Formulate the QP
1212  // Define lower bounds
1213  casadi_copy(d_nlp->lbz, nx_+ng_, d->lbdz);
1214  casadi_axpy(nx_+ng_, -1., d_nlp->z, d->lbdz);
1215  casadi_clip_min(d->lbdz, nx_, -tr_rad, d->tr_mask);
1216  // uout() << "lbdz: " << std::vector<double>(d->lbdz, d->lbdz+nx_) << std::endl;
1217 
1218 
1219  // Define upper bounds
1220  casadi_copy(d_nlp->ubz, nx_+ng_, d->ubdz);
1221  casadi_axpy(nx_+ng_, -1., d_nlp->z, d->ubdz);
1222  casadi_clip_max(d->ubdz, nx_, tr_rad, d->tr_mask);
1223  // uout() << "ubdz: " << std::vector<double>(d->ubdz, d->ubdz+nx_) << std::endl;
1224 
1225  // Initial guess
1226  casadi_copy(d_nlp->lam, nx_+ng_, d->dlam);
1227  //casadi_clear(d->dx, nx_);
1228 
1229  // Increase counter
1230  m->iter_count++;
1231 
1232  // DM(std::vector<double>(d->gf,d->gf+nx_)).to_file("gf.mtx");
1233  // DM(std::vector<double>(d->lbdz, d->lbdz+nx_)).to_file("lb_var.mtx");
1234  // DM(std::vector<double>(d->ubdz, d->ubdz+nx_)).to_file("ub_var.mtx");
1235  // DM(std::vector<double>(d->lbdz+nx_, d->lbdz+nx_+ng_)).to_file("lba.mtx");
1236  // DM(std::vector<double>(d->ubdz+nx_, d->ubdz+nx_+ng_)).to_file("uba.mtx");
1237  // // DM(std::vector<double>(d->Bk, d->Bk+Hsp_.nnz())).to_file("Bk.mtx");
1238  // DM(std::vector<double>(d->Jk, d->Jk+Asp_.nnz())).to_file("Jk.mtx");
1239  // DM(std::vector<double>(d->dlam, d->dlam+nx_)).to_file("lam_x.mtx");
1240  // DM(std::vector<double>(d->dx, d->dx+nx_)).to_file("dx_input.mtx");
1241  // DM(std::vector<double>(d->dlam+nx_, d->dlam+nx_+ng_)).to_file("lam_g.mtx");
1242 
1243  int ret = 0;
1244  // Solve the QP
1245  if (use_sqp_) {
1246  ret = solve_QP(m, d->Bk, d->gf, d->lbdz, d->ubdz, d->Jk,
1247  d->dx, d->dlam, 0);
1248  } else {
1249  ret = solve_LP(m, d->gf, d->lbdz, d->ubdz, d->Jk,
1250  d->dx, d->dlam, 0);
1251  }
1252 
1253  // DM(std::vector<double>(d->dx,d->dx+nx_)).to_file("dx_out.mtx");
1254  // Eval quadratic model and check for convergence
1255  m_k = eval_m_k(mem);
1256  if (fabs(m_k) < optim_tol_) {
1257  if (print_status_)
1258  print("MESSAGE(feasiblesqpmethod): "
1259  "Optimal Point Found? Quadratic model is zero. "
1260  "After %d iterations\n", m->iter_count-1);
1261  m->return_status = "Solve_Succeeded";
1262  m->success = true;
1263  break;
1264  }
1265 
1266  // uout() << "QP step: " << std::vector<double>(d->dx, d->dx+nx_) << std::endl;
1267  // Detecting indefiniteness
1268  if (use_sqp_) {
1269  double gain = casadi_bilin(d->Bk, Hsp_, d->dx, d->dx);
1270  if (gain < 0) {
1271  if (print_status_) print("WARNING(feasiblesqpmethod): Indefinite Hessian detected\n");
1272  }
1273  }
1274 
1275  // Do the feasibility iterations here
1276  ret = feasibility_iterations(mem, tr_rad);
1277 
1278  // Check if step was accepted or not
1279  if (ret < 0) {
1280  uout() << "Rejected inner iterates" << std::endl;
1281  // uout() << casadi_masked_norm_inf(nx_, d->dx, d->tr_mask) << std::endl;
1282 
1283  tr_rad = 0.5 * casadi_masked_norm_inf(nx_, d->dx, d->tr_mask);
1284  } else {
1285  // Evaluate f
1286  m->arg[0] = d->z_feas;
1287  m->arg[1] = d_nlp->p;
1288  m->res[0] = &d->f_feas;
1289  if (calc_function(m, "nlp_f")) {
1290  uout() << "What does it mean that calc_function fails here??" << std::endl;
1291  }
1292  tr_ratio = eval_tr_ratio(d_nlp->objective, d->f_feas, m_k);
1293  tr_update(mem, tr_rad, tr_ratio);
1294  if (tr_rad < feas_tol_) {
1295  if (print_status_) print("MESSAGE(feasiblesqpmethod): "
1296  "Trust-region radius smaller than feasibility!! "
1297  "Abort!!.\n");
1298  m->return_status = "Trust_Region_Radius_Becomes_Too_Small";
1299  break;
1300  }
1301 
1302  step_accepted = step_update(mem, tr_ratio);
1303  }
1304 
1305  if (!exact_hessian_) {
1306  // Evaluate the gradient of the Lagrangian with the old x but new lam (for BFGS)
1307  casadi_copy(d->gf, nx_, d->gLag_old);
1308  casadi_mv(d->Jk, Asp_, d_nlp->lam+nx_, d->gLag_old, true);
1309  casadi_axpy(nx_, 1., d_nlp->lam, d->gLag_old);
1310  }
1311  }
1312 
1313  return 0;
1314  }
1315 
1317  print("%4s %9s %14s %9s %9s %9s %9s %7s %5s %7s\n",
1318  "iter", "m_k", "objective", "tr_ratio", "inf_pr",
1319  "inf_du", "||d||", "lg(rg)", "tr_rad", "info");
1320  }
1321 
1322  void Feasiblesqpmethod::print_iteration(casadi_int iter, double obj,
1323  double m_k, double tr_ratio,
1324  double pr_inf, double du_inf,
1325  double dx_norm, double rg,
1326  double tr_rad,
1327  std::string info) const {
1328  print("%4d %9.2e %14.6e %9.2e %9.2e %9.2e %9.2e ",
1329  iter, m_k, obj, tr_ratio, pr_inf, du_inf, dx_norm);
1330  if (rg>0) {
1331  print("%7.2f ", log10(rg));
1332  } else {
1333  print("%7s ", "-");
1334  }
1335 
1336  print("%9.5e", tr_rad);
1337  // if (!ls_success) {
1338  // print("F");
1339  // } else {
1340  // print (" ");
1341  // }
1342 
1343  print(" - ");
1344  print(info.c_str());
1345  print("\n");
1346  }
1347 
1349  const double* lbdz, const double* ubdz, const double* A,
1350  double* x_opt, double* dlam, int mode) const {
1351  ScopedTiming tic(m->fstats.at("QP"));
1352  // Inputs
1353  std::fill_n(m->arg, qpsol_.n_in(), nullptr);
1354  // double lol;
1355  m->arg[CONIC_H] = nullptr;
1356  m->arg[CONIC_G] = g;
1357  m->arg[CONIC_X0] = x_opt;
1358  m->arg[CONIC_LAM_X0] = dlam;
1359  m->arg[CONIC_LAM_A0] = dlam + nx_;
1360  m->arg[CONIC_LBX] = lbdz;
1361  m->arg[CONIC_UBX] = ubdz;
1362  m->arg[CONIC_A] = A;
1363  m->arg[CONIC_LBA] = lbdz+nx_;
1364  m->arg[CONIC_UBA] = ubdz+nx_;
1365 
1366  // Outputs
1367  std::fill_n(m->res, qpsol_.n_out(), nullptr);
1368  m->res[CONIC_X] = x_opt;
1369  m->res[CONIC_LAM_X] = dlam;
1370  m->res[CONIC_LAM_A] = dlam + nx_;
1371  double obj;
1372  m->res[CONIC_COST] = &obj;
1373  // m->res[CONIC_COST] = nullptr;
1374 
1375 
1376  // Solve the QP
1377  qpsol_(m->arg, m->res, m->iw, m->w);
1378 
1379  if (verbose_) print("QP solved\n");
1380  return 0;
1381  }
1382 
1383  int Feasiblesqpmethod::solve_QP(FeasiblesqpmethodMemory* m, const double* H, const double* g,
1384  const double* lbdz, const double* ubdz, const double* A,
1385  double* x_opt, double* dlam, int mode) const {
1386  ScopedTiming tic(m->fstats.at("QP"));
1387  // Inputs
1388  std::fill_n(m->arg, qpsol_.n_in(), nullptr);
1389  m->arg[CONIC_H] = H;
1390  m->arg[CONIC_G] = g;
1391  m->arg[CONIC_X0] = x_opt;
1392  m->arg[CONIC_LAM_X0] = dlam;
1393  m->arg[CONIC_LAM_A0] = dlam + nx_;
1394  m->arg[CONIC_LBX] = lbdz;
1395  m->arg[CONIC_UBX] = ubdz;
1396  m->arg[CONIC_A] = A;
1397  m->arg[CONIC_LBA] = lbdz+nx_;
1398  m->arg[CONIC_UBA] = ubdz+nx_;
1399 
1400  // Outputs
1401  std::fill_n(m->res, qpsol_.n_out(), nullptr);
1402  m->res[CONIC_X] = x_opt;
1403  m->res[CONIC_LAM_X] = dlam;
1404  m->res[CONIC_LAM_A] = dlam + nx_;
1405  double obj;
1406  m->res[CONIC_COST] = &obj;
1407  // m->res[CONIC_COST] = nullptr;
1408 
1409 
1410  // Solve the QP
1411  qpsol_(m->arg, m->res, m->iw, m->w);
1412 
1413  if (verbose_) print("QP solved\n");
1414  return 0;
1415  }
1416 
1419  g.add_dependency(get_function("nlp_grad_f"));
1420  g.add_dependency(get_function("nlp_jac_g"));
1421  g.add_dependency(get_function("nlp_g"));
1422  g.add_dependency(get_function("nlp_f"));
1423  if (exact_hessian_) g.add_dependency(get_function("nlp_hess_l"));
1425  }
1426 
1429  codegen_body_enter(g);
1430  // From nlpsol
1431  g.local("m_p", "const casadi_real", "*");
1432  g.init_local("m_p", g.arg(NLPSOL_P));
1433  g.local("m_f", "casadi_real");
1434  g.local("m_f_feas", "casadi_real");
1435  g.copy_default(g.arg(NLPSOL_X0), nx_, "d_nlp.z", "0", false);
1436  g.copy_default(g.arg(NLPSOL_LAM_X0), nx_, "d_nlp.lam", "0", false);
1437  g.copy_default(g.arg(NLPSOL_LAM_G0), ng_, "d_nlp.lam+"+str(nx_), "0", false);
1438  g.copy_default(g.arg(NLPSOL_LBX), nx_, "d_nlp.lbz", "-casadi_inf", false);
1439  g.copy_default(g.arg(NLPSOL_UBX), nx_, "d_nlp.ubz", "casadi_inf", false);
1440  g.copy_default(g.arg(NLPSOL_LBG), ng_, "d_nlp.lbz+"+str(nx_),
1441  "-casadi_inf", false);
1442  g.copy_default("d_nlp.ubg", ng_, "d_nlp.ubz+"+str(nx_),
1443  "casadi_inf", false);
1444  casadi_assert(exact_hessian_, "Codegen implemented for exact Hessian only.", false);
1445 
1446  // auto m = static_cast<FeasiblesqpmethodMemory*>(mem);
1447  // auto d_nlp = &m->d_nlp;
1448  // auto d = &m->d;
1449  // Define the problem struct and problem data struct here
1450  g.local("d", "struct casadi_feasiblesqpmethod_data");
1451  g.local("p", "struct casadi_feasiblesqpmethod_prob");
1452 
1453  g << "d.prob = &p;\n";
1454  g << "p.sp_h = " << g.sparsity(Hsp_) << ";\n";
1455  g << "p.sp_a = " << g.sparsity(Asp_) << ";\n";
1456  g << "p.nlp = &p_nlp;\n";
1457  g << "casadi_feasiblesqpmethod_init(&d, &iw, &w, " << sz_anderson_memory_ << ");\n";
1458 
1459  g.local("m_w", "casadi_real", "*");
1460  g << "m_w = w;\n";
1461  g.local("m_iw", "casadi_int", "*");
1462  g << "m_iw = iw;\n";
1463  g.local("m_arg", "const casadi_real", "**");
1464  g.init_local("m_arg", "arg+" + str(NLPSOL_NUM_IN));
1465  g.local("m_res", "casadi_real", "**");
1466  g.init_local("m_res", "res+" + str(NLPSOL_NUM_OUT));
1467 
1468  // g.local("ret", "int");
1469  g.local("ret", "casadi_int");
1470 
1471  // Number of SQP iterations
1472  // m->iter_count = 0;
1473  g.local("iter_count", "casadi_int");
1474  g.init_local("iter_count", "0");
1475 
1476  // Reset
1477  // int step_accepted = 0;
1478  g.local("step_accepted", "casadi_int");
1479  g.init_local("step_accepted", "0");
1480 
1481  // Default quadratic model value of objective
1482  // double m_k = -1.0;
1483  g.local("m_k", "casadi_real");
1484  g.init_local("m_k", "-1.0");
1485 
1486  // double tr_ratio = 0.0;
1487  g.local("tr_ratio", "casadi_real");
1488  g.init_local("tr_ratio", "0.0");
1489 
1490  // double tr_rad = tr_rad0_;
1491  // double tr_rad_prev = tr_rad0_;
1492  g.local("tr_rad", "casadi_real");
1493  g << "tr_rad = " << tr_rad0_ << ";\n";
1494  g.local("tr_rad_prev", "casadi_real");
1495  g << "tr_rad_prev = " << tr_rad0_ << ";\n";
1496 
1497  // transfer the scale vector to the problem
1498  // casadi_copy(get_ptr(tr_scale_vector_), nx_, d->tr_scale_vector);
1499  //transfer the scale vector to the problem
1500  g << g.copy(g.constant(tr_scale_vector_), nx_, "d.tr_scale_vector") << "\n";
1501  // g << g.copy("casadi_tr_scale_vector_", nx_, "d.tr_scale_vector") << "\n";
1502 
1503  // for (casadi_int i=0;i<nx_;++i) {
1504  // d->tr_mask[i] = d->tr_scale_vector[i]!=0;
1505  // }
1506  g << "for (casadi_int i = 0; i < " << nx_ << "; ++i) {\n";
1507  g << "d.tr_mask[i] = d.tr_scale_vector != 0;\n";
1508  g << "}\n";
1509 
1510 
1511  // HERE STARTS THE MAIN OPTIMIZATION LOOP ---------------------------------
1512  // For seeds ---- This is so far needed!!! ------------------
1513  // const double one = 1.;
1514  g.local("one", "const casadi_real");
1515  g.init_local("one", "1");
1516 
1517  // Info for printing
1518  // string info = "";
1519 
1520  // casadi_clear(d->dx, nx_);
1521  g << g.clear("d.dx", nx_) << "\n";
1522 
1523  // ------------------------------------------------------------------------
1524  // MAIN OPTIMIZATION LOOP
1525  // ------------------------------------------------------------------------
1526  // while (true) {
1527  g.comment("MAIN OPTIMIZATION LOOP");
1528  g << "while (1) {\n";
1529  // if(m->iter_count == 0) {
1530  g << "if (iter_count == 0) {;\n";
1531  // Evaluate the sensitivities -------------------------------------------
1532  // Evaluate f
1533  // m->arg[0] = d_nlp->z;
1534  // m->arg[1] = d_nlp->p;
1535  // m->res[0] = &d_nlp->f;
1536  // if (calc_function(m, "nlp_f")) {
1537  // uout() << "What does it mean that calc_function fails here??" << std::endl;
1538  // }
1539  g.comment("Evaluate f");
1540  g << "m_arg[0] = d_nlp.z;\n";
1541  g << "m_arg[1] = m_p;\n";
1542  g << "m_res[0] = &m_f;\n";
1543  std::string nlp_f = g(get_function("nlp_f"), "m_arg", "m_res", "m_iw", "m_w");
1544  // g << "if (" + nlp_f + ") return 1;\n";
1545  g << "if (" + nlp_f + ") return 10;\n";
1546 
1547  // Evaluate g
1548  // m->arg[0] = d_nlp->z;
1549  // m->arg[1] = d_nlp->p;
1550  // m->res[0] = d_nlp->z + nx_;
1551  // if (calc_function(m, "nlp_g")) {
1552  // uout() << "What does it mean that calc_function fails here??" << std::endl;
1553  // }
1554  g.comment("Evaluate g");
1555  g << "m_arg[0] = d_nlp.z;\n";
1556  g << "m_arg[1] = m_p;\n";
1557  g << "m_res[0] = d_nlp.z+" + str(nx_) + ";\n";
1558  std::string nlp_g = g(get_function("nlp_g"), "m_arg", "m_res", "m_iw", "m_w");
1559  // g << "if (" + nlp_g + ") return 1;\n";
1560  g << "if (" + nlp_g + ") return 20;\n";
1561 
1562  // Evaluate grad_f
1563  // m->arg[0] = d_nlp->z;
1564  // m->arg[1] = d_nlp->p;
1565  // m->res[0] = d->gf;
1566  // if (calc_function(m, "nlp_grad_f")) {
1567  // uout() << "What does it mean that calc_function fails here??" << std::endl;
1568  // }
1569  g.comment("Evaluate grad f");
1570  g << "m_arg[0] = d_nlp.z;\n";
1571  g << "m_arg[1] = m_p;\n";
1572  g << "m_res[0] = d.gf;\n";
1573  std::string nlp_grad_f = g(get_function("nlp_grad_f"), "m_arg", "m_res", "m_iw", "m_w");
1574  // g << "if (" + nlp_grad_f + ") return 1;\n";
1575  g << "if (" + nlp_grad_f + ") return 30;\n";
1576 
1577  // Evaluate jac_g
1578  // m->arg[0] = d_nlp->z;
1579  // m->arg[1] = d_nlp->p;
1580  // m->res[0] = d->Jk;
1581  // switch (calc_function(m, "nlp_jac_g")) {
1582  // case -1:
1583  // m->return_status = "Non_Regular_Sensitivities";
1584  // m->unified_return_status = SOLVER_RET_NAN;
1585  // if (print_status_)
1586  // print("MESSAGE(feasiblesqpmethod): "
1587  // "No regularity of sensitivities at current point.\n");
1588  // return 1;
1589  // case 0:
1590  // break;
1591  // default:
1592  // return 1;
1593 
1594  // }
1595  g.comment("Evaluate jac g");
1596  g << "m_arg[0] = d_nlp.z;\n";
1597  g << "m_arg[1] = m_p;\n";
1598  g << "m_res[0] = d.Jk;\n";
1599  std::string nlp_jac_g = g(get_function("nlp_jac_g"), "m_arg", "m_res", "m_iw", "m_w");
1600  // g << "if (" + nlp_jac_g + ") return 1;\n";
1601  g << "if (" + nlp_jac_g + ") return 40;\n";
1602 
1603  // if (use_sqp_) {
1604  // if (exact_hessian_) {
1605  // // Update/reset exact Hessian
1606  // m->arg[0] = d_nlp->z;
1607  // m->arg[1] = d_nlp->p;
1608  // m->arg[2] = &one;
1609  // m->arg[3] = d_nlp->lam + nx_;
1610  // m->res[0] = d->Bk;
1611  // if (calc_function(m, "nlp_hess_l")) return 1;
1612  // if (convexify_) {
1613  // ScopedTiming tic(m->fstats.at("convexify"));
1614  // if (convexify_eval(&convexify_data_.config, d->Bk, d->Bk, m->iw, m->w)) return 1;
1615  // }
1616  // } else if (m->iter_count==0) {
1617  // ScopedTiming tic(m->fstats.at("BFGS"));
1618  // // Initialize BFGS
1619  // casadi_fill(d->Bk, Hsp_.nnz(), 1.);
1620  // casadi_bfgs_reset(Hsp_, d->Bk);
1621  // } else {
1622  // ScopedTiming tic(m->fstats.at("BFGS"));
1623  // // Update BFGS
1624  // if (m->iter_count % lbfgs_memory_ == 0) casadi_bfgs_reset(Hsp_, d->Bk);
1625  // // Update the Hessian approximation
1626  // casadi_bfgs(Hsp_, d->Bk, d->dx, d->gLag, d->gLag_old, m->w);
1627  // }
1628  g.comment("Just exact Hessian implemented, GN would be possible!");
1629  g << "m_arg[0] = d_nlp.z;\n";
1630  g << "m_arg[1] = m_p;\n";
1631  g << "m_arg[2] = &one;\n";
1632  g << "m_arg[3] = d_nlp.lam+" + str(nx_) + ";\n";
1633  g << "m_res[0] = d.Bk;\n";
1634  std::string nlp_hess_l = g(get_function("nlp_hess_l"), "m_arg", "m_res", "m_iw", "m_w");
1635  // g << "if (" + nlp_hess_l + ") return 1;\n";
1636  g << "if (" + nlp_hess_l + ") return 70;\n";
1637 
1638  // }
1639  // test if initialization is feasible
1640  // if (casadi_max_viol(nx_ + ng_, d_nlp->z, d_nlp->lbz, d_nlp->ubz) > feas_tol_) {
1641  // if (print_status_) print("MESSAGE(feasiblesqpmethod): "
1642  // "No feasible initialization given! "
1643  // "Find feasible initialization.\n");
1644  // m->return_status = "No_Feasible_Initialization";
1645  // break;
1646  // }
1647  std::string viol = g.max_viol(nx_+ ng_, "d_nlp.z", "d_nlp.lbz", "d_nlp.ubz");
1648  g << "if (" << viol << "> " << feas_tol_ << ") {\n";
1649  g << "printf(\"MESSAGE(feasiblesqpmethod): "
1650  "No feasible initialization given! Find feasible initialization.\\n\");\n";
1651  g << "break;\n";
1652  g << "}\n";
1653 
1654  // } else if (step_accepted == 0) {
1655  g << "} else if (step_accepted == 0) {\n";
1656  // Evaluate grad_f
1657  // m->arg[0] = d_nlp->z;
1658  // m->arg[1] = d_nlp->p;
1659  // m->res[0] = d->gf;
1660  // if (calc_function(m, "nlp_grad_f")) {
1661  // uout() << "What does it mean that calc_function fails here??" << std::endl;
1662  // }
1663  g.comment("Evaluate grad f");
1664  g << "m_arg[0] = d_nlp.z;\n";
1665  g << "m_arg[1] = m_p;\n";
1666  g << "m_res[0] = d.gf;\n";
1667  nlp_grad_f = g(get_function("nlp_grad_f"), "m_arg", "m_res", "m_iw", "m_w");
1668  // g << "if (" + nlp_grad_f + ") return 1;\n";
1669  g << "if (" + nlp_grad_f + ") return 50;\n";
1670 
1671  // Evaluate jac_g
1672  // m->arg[0] = d_nlp->z;
1673  // m->arg[1] = d_nlp->p;
1674  // m->res[0] = d->Jk;
1675  // switch (calc_function(m, "nlp_jac_g")) {
1676  // case -1:
1677  // m->return_status = "Non_Regular_Sensitivities";
1678  // m->unified_return_status = SOLVER_RET_NAN;
1679  // if (print_status_)
1680  // print("MESSAGE(feasiblesqpmethod): "
1681  // "No regularity of sensitivities at current point.\n");
1682  // return 1;
1683  // case 0:
1684  // break;
1685  // default:
1686  // return 1;
1687  // }
1688  g.comment("Evaluate jac g");
1689  g << "m_arg[0] = d_nlp.z;\n";
1690  g << "m_arg[1] = m_p;\n";
1691  g << "m_res[0] = d.Jk;\n";
1692  nlp_jac_g = g(get_function("nlp_jac_g"), "m_arg", "m_res", "m_iw", "m_w");
1693  // g << "if (" + nlp_jac_g + ") return 1;\n";
1694  g << "if (" + nlp_jac_g + ") return 60;\n";
1695 
1696  // if (use_sqp_) {
1697  // if (exact_hessian_) {
1698  // // Update/reset exact Hessian
1699  // m->arg[0] = d_nlp->z;
1700  // m->arg[1] = d_nlp->p;
1701  // m->arg[2] = &one;
1702  // m->arg[3] = d_nlp->lam + nx_;
1703  // m->res[0] = d->Bk;
1704  // if (calc_function(m, "nlp_hess_l")) return 1;
1705  // if (convexify_) {
1706  // ScopedTiming tic(m->fstats.at("convexify"));
1707  // if (convexify_eval(&convexify_data_.config, d->Bk, d->Bk, m->iw, m->w)) return 1;
1708  // }
1709  // } else if (m->iter_count==0) {
1710  // ScopedTiming tic(m->fstats.at("BFGS"));
1711  // // Initialize BFGS
1712  // casadi_fill(d->Bk, Hsp_.nnz(), 1.);
1713  // casadi_bfgs_reset(Hsp_, d->Bk);
1714  // } else {
1715  // ScopedTiming tic(m->fstats.at("BFGS"));
1716  // // Update BFGS
1717  // if (m->iter_count % lbfgs_memory_ == 0) casadi_bfgs_reset(Hsp_, d->Bk);
1718  // // Update the Hessian approximation
1719  // casadi_bfgs(Hsp_, d->Bk, d->dx, d->gLag, d->gLag_old, m->w);
1720  // }
1721  // }
1722  g.comment("Just exact Hessian implemented, GN would be possible!");
1723  g << "m_arg[0] = d_nlp.z;\n";
1724  g << "m_arg[1] = m_p;\n";
1725  g << "m_arg[2] = &one;\n";
1726  g << "m_arg[3] = d_nlp.lam+" + str(nx_) + ";\n";
1727  g << "m_res[0] = d.Bk;\n";
1728  nlp_hess_l = g(get_function("nlp_hess_l"), "m_arg", "m_res", "m_iw", "m_w");
1729  // g << "if (" + nlp_hess_l + ") return 1;\n";
1730  g << "if (" + nlp_hess_l + ") return 70;\n";
1731 
1732  // }
1733  g << "}\n";
1734 
1735  // // Evaluate the gradient of the Lagrangian
1736  // casadi_copy(d->gf, nx_, d->gLag);
1737  // casadi_mv(d->Jk, Asp_, d_nlp->lam+nx_, d->gLag, true);
1738  // casadi_axpy(nx_, 1., d_nlp->lam, d->gLag);
1739  g.comment("Evaluate the gradient of the Lagrangian");
1740  g << g.copy("d.gf", nx_, "d.gLag") << "\n";
1741  g << g.mv("d.Jk", Asp_, "d_nlp.lam+"+str(nx_), "d.gLag", true) << "\n";
1742  g << g.axpy(nx_, "1.0", "d_nlp.lam", "d.gLag") << "\n";
1743 
1744  // Primal infeasability
1745  // double pr_inf = casadi_max_viol(nx_+ng_, d_nlp->z, d_nlp->lbz, d_nlp->ubz);
1746  g.comment("Primal infeasability");
1747  g.local("pr_inf", "casadi_real");
1748  g << "pr_inf = " << g.max_viol(nx_+ng_, "d_nlp.z", "d_nlp.lbz", "d_nlp.ubz") << ";\n";
1749 
1750  // inf-norm of Lagrange gradient
1751  // double du_inf = casadi_norm_inf(nx_, d->gLag);
1752  g.comment("inf-norm of lagrange gradient");
1753  g.local("du_inf", "casadi_real");
1754  g << "du_inf = " << g.norm_inf(nx_, "d.gLag") << ";\n";
1755 
1756  // inf-norm of step, d->dx is a nullptr???
1757  // double dx_norminf = casadi_norm_inf(nx_, d->dx);
1758  g.comment("inf-norm of step");
1759  g.local("dx_norminf", "casadi_real");
1760  g << "dx_norminf = " << g.norm_inf(nx_, "d.dx") << ";\n";
1761 
1762  // Printing information about the actual iterate
1763  // if (print_iteration_) {
1764  // // if (m->iter_count % 10 == 0) print_iteration();
1765  // print_iteration();
1766  // print_iteration(m->iter_count, d_nlp->f, m_k, tr_ratio,
1767  // pr_inf, du_inf, dx_norminf, m->reg, tr_rad_prev, info);
1768  // info = "";
1769  // }
1770  g << "if (" << print_iteration_ << ") {\n";
1771  g << "printf(\"%4s %9s %14s %9s %9s %9s %9s %5s\\n\", "
1772  "\"iter\", \"m_k\", \"objective\", \"tr_ratio\", "
1773  "\"inf_pr\",\"inf_du\", \"||d||\", \"tr_rad\");\n";
1774  g << "printf(\"%4lld %9.2e %14.6e %9.2e %9.2e %9.2e %9.2e %5.2e\\n\", "
1775  "iter_count, m_k, m_f, tr_ratio, pr_inf, du_inf, dx_norminf, tr_rad_prev);";
1776  g << "}\n";
1777 
1778  // tr_rad_prev = tr_rad;
1779  g << "tr_rad_prev = tr_rad;\n";
1780 
1781  // // Callback function NOT IMPLEMENTED IN CODEGEN
1782  // if (callback(m)) {
1783  // if (print_status_) print("WARNING(feasiblesqpmethod): Aborted by callback...\n");
1784  // m->return_status = "User_Requested_Stop";
1785  // break;
1786  // }
1787 
1788  // Checking convergence criteria
1789  // Where is the complementarity condition??
1790  // if (m->iter_count >= min_iter_ && pr_inf < tol_pr_ && du_inf < tol_du_) {
1791  // if (print_status_)
1792  // print("MESSAGE(feasiblesqpmethod): "
1793  // "Convergence achieved after %d iterations\n", m->iter_count);
1794  // m->return_status = "Solve_Succeeded";
1795  // m->success = true;
1796  // break;
1797  // }
1798 
1799 
1800  g << "if (iter_count >= " << max_iter_ << ") {\n";
1801  g << "if (" << print_status_ << ") {\n";
1802  g << g.printf("MESSAGE(feasiblesqpmethod): "
1803  "Maximum number of iterations reached.\\n") << "\n";
1804  g << "break;\n";
1805  g << "}\n";
1806  g << "}\n";
1807 
1808  // Formulate the QP
1809  // Define lower bounds
1810  // casadi_copy(d_nlp->lbz, nx_+ng_, d->lbdz);
1811  // casadi_axpy(nx_+ng_, -1., d_nlp->z, d->lbdz);
1812  // casadi_clip_min(d->lbdz, nx_, -tr_rad, d->tr_mask);
1813  g.comment("Formulate the QP");
1814  g.comment("Define the lower bounds");
1815  g << g.copy("d_nlp.lbz", nx_+ng_, "d.lbdz") << "\n";
1816  g << g.axpy(nx_+ng_, "-1.0", "d_nlp.z", "d.lbdz") << "\n";
1817  g << g.clip_min("d.lbdz", nx_, "-tr_rad", "d.tr_mask") << "\n";
1818 
1819 
1820  // Define upper bounds
1821  // casadi_copy(d_nlp->ubz, nx_+ng_, d->ubdz);
1822  // casadi_axpy(nx_+ng_, -1., d_nlp->z, d->ubdz);
1823  // casadi_clip_max(d->ubdz, nx_, tr_rad, d->tr_mask);
1824  g.comment("Define the upper bounds");
1825  g << g.copy("d_nlp.ubz", nx_+ng_, "d.ubdz") << "\n";
1826  g << g.axpy(nx_+ng_, "-1.0", "d_nlp.z", "d.ubdz") << "\n";
1827  g << g.clip_max("d.ubdz", nx_, "tr_rad", "d.tr_mask") << "\n";
1828 
1829  // // Initial guess
1830  // casadi_copy(d_nlp->lam, nx_+ng_, d->dlam);
1831  g.comment("Initial guess");
1832  g << g.copy("d_nlp.lam", nx_+ng_, "d.dlam") << "\n";
1833 
1834  // Increase counter
1835  // m->iter_count++;
1836  g.comment("Increase counter");
1837  g << "++iter_count;\n";
1838 
1839  // int ret = 0;
1840  // g << "ret = 0;\n";
1841 
1842  // Solve the QP
1843  // if (use_sqp_) {
1844  // ret = solve_QP(m, d->Bk, d->gf, d->lbdz, d->ubdz, d->Jk,
1845  // d->dx, d->dlam, 0);
1846  // } else {
1847  // ret = solve_LP(m, d->gf, d->lbdz, d->ubdz, d->Jk,
1848  // d->dx, d->dlam, 0);
1849  // }
1850  g.comment("Solve the QP");
1851  codegen_qp_solve(g, "d.Bk", "d.gf", "d.lbdz", "d.ubdz", "d.Jk", "d.dx", "d.dlam", 0);
1852 
1853  // // Eval quadratic model and check for convergence
1854  // m_k = eval_m_k(mem);
1855 
1856  g.comment("Eval quadratic model and check for convergence");
1857  codegen_eval_m_k(g);
1858 
1859  g.comment("Checking convergence criteria");
1860  g << "if (fabs(m_k) < " << optim_tol_ << ") {\n";
1861  g << "printf(\"MESSAGE(feasiblesqpmethod): Optimal Point Found? "
1862  "Quadratic model is zero. After %lld iterations.\\n\", iter_count-1);\n";
1863  g << "break;\n";
1864  g << "}\n";
1865 
1866  // uout() << "QP step: " << std::vector<double>(d->dx, d->dx+nx_) << std::endl;
1867  // Detecting indefiniteness
1868  // if (use_sqp_) {
1869  // double gain = casadi_bilin(d->Bk, Hsp_, d->dx, d->dx);
1870  // if (gain < 0) {
1871  // if (print_status_) print("WARNING(feasiblesqpmethod): Indefinite Hessian detected\n");
1872  // }
1873  // }
1874  g.comment("Detecting indefiniteness");
1875  g.comment("TBD");
1876 
1877  // Do the feasibility iterations here
1878  // ret = feasibility_iterations(mem, tr_rad);
1879  g.comment("Do the feasibility iterations here");
1880  codegen_feasibility_iterations(g, "tr_rad");
1881 
1882  g << "if (ret < 0) {\n";
1883  g << "printf(\"Rejected inner iterates\\n\");\n";
1884  g << "tr_rad = 0.5*" << g.masked_norm_inf(nx_, "d.dx", "d.tr_mask") << ";\n";
1885  g << "} else {\n";
1886  g.comment("Evaluate f");
1887  g << "m_arg[0] = d.z_feas;\n";
1888  g << "m_arg[1] = m_p;\n";
1889  g << "m_res[0] = &m_f_feas;\n";
1890  nlp_f = g(get_function("nlp_f"), "m_arg", "m_res", "m_iw", "m_w");
1891  g << "if (" + nlp_f + ") return 1;\n";
1892 
1893  codegen_eval_tr_ratio(g, "m_f", "m_f_feas", "m_k");
1894  codegen_tr_update(g, "tr_rad", "tr_ratio");
1895 
1896  g << "if (tr_rad < "<< feas_tol_ << ") {\n";
1897  g << "if (" << print_status_ << ") {\n";
1898  g << "printf(\"MESSAGE: Trust-Region radius smaller than feasibilty!!\\n\");\n";
1899  g << "}\n";
1900  g << "break;";
1901  g << "}\n";
1902 
1903  codegen_step_update(g, "tr_ratio");
1904  g.comment("Close the step acceptance loop");
1905  g << "}\n";
1906 
1907  // if (!exact_hessian_) {
1908  // // Evaluate the gradient of the Lagrangian with the old x but new lam (for BFGS)
1909  // casadi_copy(d->gf, nx_, d->gLag_old);
1910  // casadi_mv(d->Jk, Asp_, d_nlp->lam+nx_, d->gLag_old, true);
1911  // casadi_axpy(nx_, 1., d_nlp->lam, d->gLag_old);
1912  // }
1913  // }
1914  // g << "}\n";
1915  // g << "if (!" << exact_hessian_ << ") {\n";
1916  // g << g.copy("d.gf", nx_, "d.gLag_old") << "\n";
1917  // g << g.mv("d.Jk", Asp_, "d_nlp.lam+" + str(nx_), "d.gLag_old", true) << ";\n";
1918  // g << g.axpy(nx_, "1.", "d_nlp.lam", "d.gLag_old") << "\n";
1919  // g << "}\n";
1920 
1921  // return 0;
1922  // }
1923  //Close next loop
1924  // g << "}\n";
1925 
1926  // g << "return 0;\n"; // Do we need this??
1927  // Close the loop optimization problem
1928  g.comment("Close the loop optimization problem");
1929  g << "}\n";
1930 
1931  if (bound_consistency_) {
1932  g << g.bound_consistency(nx_+ng_, "d_nlp.z", "d_nlp.lam", "d_nlp.lbz", "d_nlp.ubz") << ";\n";
1933  }
1934  g.copy_check("d_nlp.z", nx_, g.res(NLPSOL_X), false, true);
1935  g.copy_check("d_nlp.z+" + str(nx_), ng_, g.res(NLPSOL_G), false, true);
1936  g.copy_check("d_nlp.lam", nx_, g.res(NLPSOL_LAM_X), false, true);
1937  g.copy_check("d_nlp.lam+"+str(nx_), ng_, g.res(NLPSOL_LAM_G), false, true);
1938  g.copy_check("d_nlp.lam_p", np_, g.res(NLPSOL_LAM_P), false, true);
1939  g.copy_check("&m_f", 1, g.res(NLPSOL_F), false, true);
1940  }
1941 
1942 
1944  const std::string& H, const std::string& g,
1945  const std::string& lbdz, const std::string& ubdz,
1946  const std::string& A, const std::string& x_opt,
1947  const std::string& dlam, int mode) const {
1948  for (casadi_int i=0;i<qpsol_.n_in();++i) cg << "m_arg[" << i << "] = 0;\n";
1949  cg << "m_arg[" << CONIC_H << "] = " << H << ";\n";
1950  cg << "m_arg[" << CONIC_G << "] = " << g << ";\n";
1951  cg << "m_arg[" << CONIC_X0 << "] = " << x_opt << ";\n";
1952  cg << "m_arg[" << CONIC_LAM_X0 << "] = " << dlam << ";\n";
1953  cg << "m_arg[" << CONIC_LAM_A0 << "] = " << dlam << "+" << nx_ << ";\n";
1954  cg << "m_arg[" << CONIC_LBX << "] = " << lbdz << ";\n";
1955  cg << "m_arg[" << CONIC_UBX << "] = " << ubdz << ";\n";
1956  cg << "m_arg[" << CONIC_A << "] = " << A << ";\n";
1957  cg << "m_arg[" << CONIC_LBA << "] = " << lbdz << "+" << nx_ << ";\n";
1958  cg << "m_arg[" << CONIC_UBA << "] = " << ubdz << "+" << nx_ << ";\n";
1959  for (casadi_int i=0;i<qpsol_.n_out();++i) cg << "m_res[" << i << "] = 0;\n";
1960  cg << "m_res[" << CONIC_X << "] = " << x_opt << ";\n";
1961  cg << "m_res[" << CONIC_LAM_X << "] = " << dlam << ";\n";
1962  cg << "m_res[" << CONIC_LAM_A << "] = " << dlam << "+" << nx_ << ";\n";
1963  std::string flag = cg(qpsol_, "m_arg", "m_res", "m_iw", "m_w");
1964  cg << "ret = " << flag << ";\n";
1965  cg << "if (ret == -1000) return -1000;\n"; // equivalent to raise Exception
1966  }
1967 
1969  const std::string& tr_rad, const std::string& tr_ratio) const {
1970  cg << "if (tr_ratio < " << tr_eta1_ << ") {\n";
1971  cg << "tr_rad = " << tr_alpha1_ <<"*" << cg.masked_norm_inf(nx_, "d.dx", "d.tr_mask") << ";\n";
1972  std::string tol = "fabs(" + cg.masked_norm_inf(nx_, "d.dx", "d.tr_mask") + " - tr_rad)";
1973  cg << "} else if (tr_ratio > " << tr_eta2_ << " && " << tol << " < " << optim_tol_ << " ) {\n";
1974  cg << "tr_rad = " << cg.fmin(str(tr_alpha2_)+"*tr_rad", str(tr_rad_max_)) << ";\n";
1975  cg << "}\n";
1976  cg.comment("else: keep trust-region as it is....");
1977  }
1978 
1980  // auto m = static_cast<FeasiblesqpmethodMemory*>(mem);
1981  // auto d = &m->d;
1982  // if (use_sqp_) {
1983  // return 0.5*casadi_bilin(d->Bk, Hsp_, d->dx, d->dx) + casadi_dot(nx_, d->gf, d->dx);
1984  // } else {
1985  // return casadi_dot(nx_, d->gf, d->dx);
1986  // }
1987  cg << "m_k = 0.5*" << cg.bilin("d.Bk", Hsp_, "d.dx", "d.dx")
1988  << "+" << cg.dot(nx_, "d.gf", "d.dx") << ";\n";
1989 }
1990 
1992  const std::string& val_f, const std::string& val_f_corr, const std::string& val_m_k) const {
1993  // return (val_f - val_f_corr) / (-val_m_k);
1994  cg << "tr_ratio = (" + val_f + "-" + val_f_corr + ") / (-" + val_m_k + ");\n";
1995  }
1996 
1998  const std::string& tr_ratio) const {
1999  // auto m = static_cast<FeasiblesqpmethodMemory*>(mem);
2000  // auto d_nlp = &m->d_nlp;
2001  // auto d = &m->d;
2002 
2003  // if (tr_ratio > tr_acceptance_) {
2004  // // This is not properly implemented yet: d_nlp->z_old = d_mlp->z;
2005  // casadi_copy(d->z_feas, nx_ + ng_, d_nlp->z);
2006  // d_nlp->f = d->f_feas;
2007  // casadi_copy(d->dlam_feas, nx_ + ng_, d_nlp->lam);
2008 
2009  // uout() << "ACCEPTED" << std::endl;
2010  // return 0;
2011  // } else {
2012  // uout() << "REJECTED" << std::endl;
2013  // return -1;
2014  // }
2015  cg << "if(" + tr_ratio + ">" << tr_acceptance_ << ") {\n";
2016  cg << cg.copy("d.z_feas", nx_ + ng_, "d_nlp.z") << "\n";
2017  cg << "m_f = m_f_feas;\n";
2018  cg << cg.copy("d.dlam_feas", nx_ + ng_, "d_nlp.lam") << "\n";
2019  cg << "printf(\"ACCEPTED\\n\");\n";
2020  cg << "ret = 0;\n";
2021  cg << "} else {\n";
2022  cg << "printf(\"REJECTED\\n\");\n";
2023  cg << "ret = -1;\n";
2024  cg << "}\n";
2025 }
2026 
2028  const std::string& tr_rad) const {
2029  // cg.local("ret", "casadi_int");
2030  cg.init_local("ret", "0");
2031 
2032  // casadi_copy(d->dx, nx_, d->dx_feas);
2033  cg << cg.copy("d.dx", nx_, "d.dx_feas") << "\n";
2034 
2035  // casadi_copy(d->dlam, nx_ + ng_, d->dlam_feas);
2036  cg << cg.copy("d.dlam", nx_, "d.dlam_feas") << "\n";
2037 
2038  // Why do we do this at the moment??
2039  // casadi_copy(d->dlam, nx_+ng_, d->z_tmp);
2040  // casadi_axpy(nx_+ng_, -1.0, d_nlp->lam, d->z_tmp);
2041  cg << cg.copy("d.dlam", nx_+ng_, "d.z_tmp") << "\n";
2042  cg << cg.axpy(nx_+ng_, "-1.0", "d_nlp.lam", "d.z_tmp") << "\n";
2043 
2044  // this is in solve in fslp.py
2045  // double step_inf_norm = casadi_masked_norm_inf(nx_, d->dx, d->tr_mask);
2046  // double prev_step_inf_norm = step_inf_norm;
2047  cg.local("step_inf_norm", "casadi_real");
2048  cg << "step_inf_norm = " << cg.masked_norm_inf(nx_, "d.dx", "d.tr_mask") << ";\n";
2049  cg.local("prev_step_inf_norm", "casadi_real");
2050  cg << "prev_step_inf_norm = step_inf_norm;\n";
2051  // cg.init_local("prev_step_inf_norm", "step_inf_norm");
2052 
2053  // self.x_tmp = self.x_k + p_tmp
2054  // casadi_copy(d_nlp->z, nx_+ng_, d->z_feas);
2055  // casadi_axpy(nx_, 1., d->dx_feas, d->z_feas);
2056  cg << cg.copy("d_nlp.z", nx_+ng_, "d.z_feas") << "\n";
2057  cg << cg.axpy(nx_, "1.0", "d.dx_feas", "d.z_feas") << "\n";
2058 
2059 
2060  // if (use_anderson_) {
2061  // // anderson_acc_init_memory(mem, d->dx_feas, d->z_feas);
2062  // anderson_acc_init_memory(mem, d->dx_feas, d_nlp->z);
2063  // }
2064  // cg << "if (" << use_anderson_ << ") {\n";
2065  // cg << cg.codegen_anderson_acc_init_memory(cg, "d.dx_feas", "d_nlp.z");
2066  // cg << "}\n";
2067 
2068  // Evaluate g
2069  // self.g_tmp = self.__eval_g(self.x_tmp)
2070  // m->arg[0] = d->z_feas;
2071  // m->arg[1] = d_nlp->p;
2072  // m->res[0] = d->z_feas + nx_;
2073  // if (calc_function(m, "nlp_g")) {
2074  // uout() << "What does it mean that calc_function fails here??" << std::endl;
2075  // }
2076  cg.comment("Evaluate g");
2077  cg << "m_arg[0] = d.z_feas;\n";
2078  cg << "m_arg[1] = m_p;\n";
2079  cg << "m_res[0] = d.z_feas+" + str(nx_) + ";\n";
2080  std::string nlp_g = cg(get_function("nlp_g"), "m_arg", "m_res", "m_iw", "m_w");
2081  // cg << "if (" + nlp_g + ") return 1;\n";
2082  cg << "if (" + nlp_g + ") return 100;\n";
2083 
2084 
2085  // int inner_iter = 0;
2086  cg.local("inner_iter", "casadi_int");
2087  cg.init_local("inner_iter", "0");
2088 
2089  // double prev_infeas = casadi_max_viol(nx_+ng_, d->z_feas, d_nlp->lbz, d_nlp->ubz);
2090  // double curr_infeas = prev_infeas;
2091  cg.local("prev_infeas", "casadi_real");
2092  cg << "prev_infeas =" << cg.max_viol(nx_+ng_, "d.z_feas", "d_nlp.lbz", "d_nlp.ubz") << ";\n";
2093  cg.local("curr_infeas", "casadi_real");
2094  cg << "curr_infeas = prev_infeas;\n";
2095 
2096 
2097  // Calculate asymptotic exactness of current step
2098  // casadi_copy(d->dx, nx_, d->z_tmp);
2099  // casadi_axpy(nx_, -1., d->z_feas, d->z_tmp);
2100  // casadi_axpy(nx_, 1., d_nlp->z, d->z_tmp);
2101  // double as_exac = casadi_norm_2(nx_, d->z_tmp) / casadi_norm_2(nx_, d->dx);
2102  cg << cg.copy("d.dx", nx_, "d.z_tmp") << "\n";
2103  cg << cg.axpy(nx_, "-1.0", "d.z_feas", "d.z_tmp") << "\n";
2104  cg << cg.axpy(nx_, "1.0", "d_nlp.z", "d.z_tmp") << "\n";
2105  cg.local("as_exac", "casadi_real");
2106  cg << "as_exac =" << cg.norm_2(nx_, "d.z_tmp") << "/" << cg.norm_2(nx_, "d.dx") << ";\n";
2107 
2108  // double kappa_watchdog = 0.0;
2109  // double kappa = 0.0;
2110  // double acc_as_exac = 0.0;
2111  cg.local("kappa_watchdog", "casadi_real");
2112  cg.init_local("kappa_watchdog", "0.0");
2113  cg.local("kappa", "casadi_real");
2114  cg.init_local("kappa", "0.0");
2115  cg.local("acc_as_exac", "casadi_real");
2116  cg << "acc_as_exac = 0.0;\n";
2117 
2118  // double watchdog_prev_inf_norm = prev_step_inf_norm; // until here everything is correct!
2119  cg.local("watchdog_prev_inf_norm", "casadi_real");
2120  // cg.init_local("watchdog_prev_inf_norm", "prev_step_inf_norm");
2121  cg << "watchdog_prev_inf_norm = prev_step_inf_norm;\n";
2122 
2123  // for (int j=0; j<max_inner_iter_; ++j) {
2124  // if (curr_infeas < feas_tol_) {
2125  // inner_iter = j;
2126  // // kappa_acceptance = true;
2127  // if (as_exac < 0.5) {
2128  // return 0;
2129  // } else {
2130  // return -1;
2131  // }
2132  // } else if (j>0 && (curr_infeas > 1.0 || as_exac > 1.0)) {
2133  // // kappa_acceptance = false;
2134  // return -1;
2135  // }
2136  cg << "for (int j=0;j<" << max_inner_iter_ << "; ++j) {\n";
2137  cg << "if (curr_infeas < " << feas_tol_ << ") {\n";
2138  cg << "inner_iter = j;\n";
2139  cg << "if (as_exac < 0.5) {\n";
2140  cg << "ret = 0; \n";
2141  cg << "break; \n";
2142  cg << "} else {\n";
2143  cg << "ret = -1;\n";
2144  cg << "break; \n";
2145  cg << "}\n";
2146  cg << "} else if (j>0 && (curr_infeas > 1.0 || as_exac > 1.0)) {\n";
2147  cg << "ret = -1;\n";
2148  cg << "break; \n";
2149  cg << "}\n";
2150 
2151 
2152  // inner_iter = j+1;
2153  cg << "inner_iter =j+1;\n";
2154 
2155  // create corrected gradient here -----------------------------
2156  // casadi_copy(d->z_feas, nx_, d->z_tmp);
2157  // casadi_axpy(nx_, -1., d_nlp->z, d->z_tmp);
2158  // casadi_copy(d->gf, nx_, d->gf_feas);
2159  cg << cg.copy("d.z_feas", nx_, "d.z_tmp") << "\n";
2160  cg << cg.axpy(nx_, "-1.0", "d_nlp.z", "d.z_tmp") << "\n";
2161  cg << cg.copy("d.gf", nx_, "d.gf_feas") << "\n";
2162  // In case of SQP we need to multiply with
2163  // if (use_sqp_) {
2164  // casadi_mv(d->Bk, Hsp_, d->z_tmp, d->gf_feas, true);
2165  // }
2166  cg.comment("Just SQP implemented so far!");
2167  // cg << "if (" << use_sqp_ << ") {\n";
2168  cg << cg.mv("d.Bk", Hsp_, "d.z_tmp", "d.gf_feas", true) << "\n";
2169  // cg << "}\n";
2170 
2171  // create bounds of correction QP -----------------------------
2172  // upper bounds of constraints
2173  // casadi_copy(d_nlp->ubz + nx_, ng_, d->ubdz_feas + nx_);
2174  // casadi_axpy(ng_, -1., d->z_feas + nx_, d->ubdz_feas + nx_);
2175  cg << cg.copy("d_nlp.ubz+"+str(nx_), ng_, "d.ubdz_feas+"+str(nx_)) << "\n";
2176  cg << cg.axpy(ng_, "-1.0", "d.z_feas+"+str(nx_), "d.ubdz_feas+"+str(nx_)) << "\n";
2177 
2178  // lower bounds of constraints
2179  // casadi_copy(d_nlp->lbz + nx_, ng_, d->lbdz_feas + nx_);
2180  // casadi_axpy(ng_, -1., d->z_feas + nx_, d->lbdz_feas + nx_);
2181  cg << cg.copy("d_nlp.lbz+"+str(nx_), ng_, "d.lbdz_feas+"+str(nx_)) << "\n";
2182  cg << cg.axpy(ng_, "-1.0", "d.z_feas+"+str(nx_), "d.lbdz_feas+"+str(nx_)) << "\n";
2183 
2184 
2185  // casadi_copy(d_nlp->lbz, nx_, d->lbdz_feas);
2186  // casadi_clip_min(d->lbdz_feas, nx_, -tr_rad, d->tr_mask);
2187  cg << cg.copy("d_nlp.lbz", nx_, "d.lbdz_feas") << "\n";
2188  cg << cg.clip_min("d.lbdz_feas", nx_, "-tr_rad", "d.tr_mask") << "\n";
2189 
2190 
2191  // casadi_axpy(nx_, -1., d->z_feas, d->lbdz_feas);
2192  // casadi_axpy(nx_, 1., d_nlp->z, d->lbdz_feas);
2193  cg << cg.axpy(nx_, "-1.0", "d.z_feas", "d.lbdz_feas") << "\n";
2194  cg << cg.axpy(nx_, "1.0", "d_nlp.z", "d.lbdz_feas") << "\n";
2195 
2196 
2197  // casadi_copy(d_nlp->lbz, nx_, d->z_tmp);
2198  // casadi_axpy(nx_, -1., d->z_feas, d->z_tmp);
2199  cg << cg.copy("d_nlp.lbz", nx_, "d.z_tmp") << "\n";
2200  cg << cg.axpy(nx_, "-1.0", "d.z_feas", "d.z_tmp") << "\n";
2201 
2202  // comparison of both vectors
2203  // casadi_vector_fmax(nx_, d->z_tmp, d->lbdz_feas, d->lbdz_feas);
2204  cg << cg.vector_fmax(nx_, "d.z_tmp", "d.lbdz_feas", "d.lbdz_feas");
2205 
2206  // casadi_copy(d_nlp->ubz, nx_, d->ubdz_feas);
2207  // casadi_clip_max(d->ubdz_feas, nx_, tr_rad, d->tr_mask);
2208  cg << cg.copy("d_nlp.ubz", nx_, "d.ubdz_feas") << "\n";
2209  cg << cg.clip_max("d.ubdz_feas", nx_, "tr_rad", "d.tr_mask") << ";\n";
2210 
2211  // casadi_axpy(nx_, -1., d->z_feas, d->ubdz_feas);
2212  // casadi_axpy(nx_, 1., d_nlp->z, d->ubdz_feas);
2213  cg << cg.axpy(nx_, "-1.0", "d.z_feas", "d.ubdz_feas") << "\n";
2214  cg << cg.axpy(nx_, "1.0", "d_nlp.z", "d.ubdz_feas") << "\n";
2215 
2216 
2217  // casadi_copy(d_nlp->ubz, nx_, d->z_tmp);
2218  // casadi_axpy(nx_, -1., d->z_feas, d->z_tmp);
2219  // casadi_vector_fmin(nx_, d->z_tmp, d->ubdz_feas, d->ubdz_feas);
2220  cg << cg.copy("d_nlp.ubz", nx_, "d.z_tmp") << "\n";
2221  cg << cg.axpy(nx_, "-1.0", "d.z_feas", "d.z_tmp") << "\n";
2222  cg << cg.vector_fmin(nx_, "d.z_tmp", "d.ubdz_feas", "d.ubdz_feas");
2223 
2224  // if (use_sqp_) {
2225  // int ret = solve_QP(m, d->Bk, d->gf_feas, d->lbdz_feas, d->ubdz_feas,
2226  // d->Jk, d->dx_feas, d->dlam_feas, 0);
2227  // } else {
2228  // int ret = solve_LP(m, d->gf_feas, d->lbdz_feas, d->ubdz_feas,
2229  // d->Jk, d->dx_feas, d->dlam_feas, 0);
2230  // }
2231  cg.comment("Just SQP implemented. Solve the feasible QP");
2232  codegen_qp_solve(cg, "d.Bk", "d.gf_feas", "d.lbdz_feas", "d.ubdz_feas",
2233  "d.Jk", "d.dx_feas", "d.dlam_feas", 0);
2234 
2235 
2236  // step_inf_norm = casadi_masked_norm_inf(nx_, d->dx_feas, d->tr_mask);
2237  cg << "step_inf_norm = " << cg.masked_norm_inf(nx_, "d.dx_feas", "d.tr_mask") << ";\n";
2238 
2239  // if (use_anderson_) {
2240  // anderson_acc_step_update(mem, j);
2241  // } else {
2242  // casadi_axpy(nx_, 1., d->dx_feas, d->z_feas);
2243  // }
2244  cg.comment("No Anderson Acceleration implemented yet.");
2245  cg << cg.axpy(nx_, "1.0", "d.dx_feas", "d.z_feas") << "\n";
2246 
2247  // Evaluate g
2248  // m->arg[0] = d->z_feas;
2249  // m->arg[1] = d_nlp->p;
2250  // m->res[0] = d->z_feas + nx_;
2251  // if (calc_function(m, "nlp_g")) {
2252  // uout() << "What does it mean that calc_function fails here??" << std::endl;
2253  // }
2254  cg.comment("Evaluate g");
2255  cg << "m_arg[0] = d.z_feas;\n";
2256  cg << "m_arg[1] = m_p;\n";
2257  cg << "m_res[0] = d.z_feas+" + str(nx_) + ";\n";
2258  nlp_g = cg(get_function("nlp_g"), "m_arg", "m_res", "m_iw", "m_w");
2259  // cg << "if (" + nlp_g + ") return 1;\n";
2260  cg << "if (" + nlp_g + ") return 100;\n";
2261 
2262  // prev_infeas = casadi_max_viol(nx_+ng_, d->z_feas, d_nlp->lbz, d_nlp->ubz);
2263  // curr_infeas = prev_infeas;
2264  // kappa = step_inf_norm/prev_step_inf_norm;
2265  cg << "prev_infeas =" << cg.max_viol(nx_+ng_, "d.z_feas", "d_nlp.lbz", "d_nlp.ubz") << ";\n";
2266  cg << "curr_infeas = prev_infeas;\n";
2267  cg << "kappa = step_inf_norm/prev_step_inf_norm;";
2268 
2269  // casadi_copy(d->dx, nx_, d->z_tmp);
2270  // casadi_axpy(nx_, -1., d->z_feas, d->z_tmp);
2271  // casadi_axpy(nx_, 1., d_nlp->z, d->z_tmp);
2272  // as_exac = casadi_norm_2(nx_, d->z_tmp) / casadi_norm_2(nx_, d->dx);
2273  cg << cg.copy("d.dx", nx_, "d.z_tmp") << "\n";
2274  cg << cg.axpy(nx_, "-1.0", "d.z_feas", "d.z_tmp") << "\n";
2275  cg << cg.axpy(nx_, "1.0", "d_nlp.z", "d.z_tmp") << "\n";
2276  cg.local("as_exac", "casadi_real");
2277  cg << "as_exac =" << cg.norm_2(nx_, "d.z_tmp") << "/" << cg.norm_2(nx_, "d.dx") << ";\n";
2278 
2279  cg << "printf(\"Kappa: %9.10f, Infeasibility: %9.10f, "
2280  "AsymptoticExctness: %9.10f\\n\", kappa, curr_infeas, as_exac);\n";
2281 
2282  // acc_as_exac += as_exac;
2283  cg << "acc_as_exac += as_exac;\n";
2284 
2285  // if (inner_iter % watchdog_ == 0) {
2286  // kappa_watchdog = step_inf_norm / watchdog_prev_inf_norm;
2287  // watchdog_prev_inf_norm = step_inf_norm;
2288  // print("Kappa watchdog: %9.10f\n", kappa_watchdog);
2289  // if (curr_infeas < feas_tol_ && as_exac < 0.5) {
2290  // // kappa_acceptance = true;
2291  // return 0;
2292  // }
2293 
2294  // if (kappa_watchdog > contraction_acceptance_value_ || acc_as_exac/watchdog_ > 0.5) {
2295  // // kappa_acceptance = false;
2296  // return -1;
2297  // }
2298  // // accumulated_as_ex = 0
2299  // acc_as_exac = 0.0;
2300  // }
2301  cg << "if (inner_iter % " << watchdog_ << "== 0) {\n";
2302  cg << "kappa_watchdog = step_inf_norm / watchdog_prev_inf_norm;\n";
2303  cg << "watchdog_prev_inf_norm = step_inf_norm;\n";
2304  cg << "printf(\"Kappa watchdog: %9.10f\\n\", kappa_watchdog);\n";
2305  cg << "if (curr_infeas < "<< feas_tol_ << "&& as_exac < 0.5) {\n";
2306  cg << "ret = 0;\n";
2307  cg << "break; \n";
2308  cg << "}\n";
2309  cg << "if (kappa_watchdog > " << contraction_acceptance_value_ << " || "
2310  << "acc_as_exac/" << watchdog_ << "> 0.5) {\n";
2311  cg << "ret = -1;\n";
2312  cg << "break;\n";
2313  cg << "}\n";
2314 
2315  cg << "acc_as_exac = 0.0;\n";
2316  cg << "}\n"; //Added
2317 
2318  // prev_step_inf_norm = step_inf_norm;
2319  cg << "prev_step_inf_norm = step_inf_norm;\n";
2320  // }
2321  cg << "}\n";
2322  //maximum iterations reached
2323  // kappa_acceptance = false;
2324 // return -1;
2325 // }
2326  cg << "if (inner_iter >=" << max_inner_iter_ << ") {\n";
2327  cg << "ret = -1;\n";
2328  cg << "}\n";
2329 
2330  }
2331 
2333  Dict stats = Nlpsol::get_stats(mem);
2334  auto m = static_cast<FeasiblesqpmethodMemory*>(mem);
2335  stats["return_status"] = m->return_status;
2336  stats["iter_count"] = m->iter_count;
2337  return stats;
2338  }
2339 
2341  int version = s.version("Feasiblesqpmethod", 1, 3);
2342  s.unpack("Feasiblesqpmethod::qpsol", qpsol_);
2343  if (version>=3) {
2344  s.unpack("Feasiblesqpmethod::qpsol_ela", qpsol_ela_);
2345  }
2346  s.unpack("Feasiblesqpmethod::exact_hessian", exact_hessian_);
2347  s.unpack("Feasiblesqpmethod::max_iter", max_iter_);
2348  s.unpack("Feasiblesqpmethod::min_iter", min_iter_);
2349  s.unpack("Feasiblesqpmethod::lbfgs_memory", lbfgs_memory_);
2350  s.unpack("Feasiblesqpmethod::tol_pr_", tol_pr_);
2351  s.unpack("Feasiblesqpmethod::tol_du_", tol_du_);
2352  s.unpack("Feasiblesqpmethod::print_header", print_header_);
2353  s.unpack("Feasiblesqpmethod::print_iteration", print_iteration_);
2354  s.unpack("Feasiblesqpmethod::print_status", print_status_);
2355 
2356  // if (version>=3) {
2357  // s.unpack("Feasiblesqpmethod::elastic_mode", elastic_mode_);
2358  // s.unpack("Feasiblesqpmethod::gamma_0", gamma_0_);
2359  // s.unpack("Feasiblesqpmethod::gamma_max", gamma_max_);
2360  // s.unpack("Feasiblesqpmethod::gamma_1_min", gamma_1_min_);
2361  // s.unpack("Feasiblesqpmethod::init_feasible", init_feasible_);
2362  // s.unpack("Feasiblesqpmethod::so_corr", so_corr_);
2363  // } else {
2364  // // elastic_mode_ = false;
2365  // // gamma_0_ = 0;
2366  // // gamma_max_ = 0;
2367  // // gamma_1_min_ = 0;
2368  // init_feasible_ = false;
2369  // // so_corr_ = false;
2370  // }
2371 
2372  s.unpack("Feasiblesqpmethod::Hsp", Hsp_);
2373  if (version==1) {
2374  Sparsity Hrsp;
2375  s.unpack("Feasiblesqpmethod::Hrsp", Hrsp);
2376  }
2377  s.unpack("Feasiblesqpmethod::Asp", Asp_);
2378  if (version==1) {
2379  double convexify_margin;
2380  s.unpack("Feasiblesqpmethod::convexify_margin", convexify_margin);
2381  char convexify_strategy;
2382  s.unpack("Feasiblesqpmethod::convexify_strategy", convexify_strategy);
2383  casadi_assert(convexify_strategy==0, "deserializtion failed.");
2384  bool Hsp_project;
2385  s.unpack("Feasiblesqpmethod::Hsp_project", Hsp_project);
2386  bool scc_transform;
2387  s.unpack("Feasiblesqpmethod::scc_transform", scc_transform);
2388  std::vector<casadi_int> scc_offset;
2389  s.unpack("Feasiblesqpmethod::scc_offset", scc_offset);
2390  std::vector<casadi_int> scc_mapping;
2391  s.unpack("Feasiblesqpmethod::scc_mapping", scc_mapping);
2392  casadi_int max_iter_eig;
2393  s.unpack("Feasiblesqpmethod::max_iter_eig", max_iter_eig);
2394  casadi_int block_size;
2395  s.unpack("Feasiblesqpmethod::block_size", block_size);
2396  Sparsity scc_sp;
2397  s.unpack("Feasiblesqpmethod::scc_sp", scc_sp);
2398  convexify_ = false;
2399  }
2400  if (version>=2) {
2401  s.unpack("Feasiblesqpmethod::convexify", convexify_);
2402  if (convexify_) Convexify::deserialize(s, "Feasiblesqpmethod::", convexify_data_);
2403  }
2404  set_feasiblesqpmethod_prob();
2405  }
2406 
2409  s.version("Feasiblesqpmethod", 3);
2410  s.pack("Feasiblesqpmethod::qpsol", qpsol_);
2411  // s.pack("Feasiblesqpmethod::qpsol_ela", qpsol_ela_);
2412  s.pack("Feasiblesqpmethod::exact_hessian", exact_hessian_);
2413  s.pack("Feasiblesqpmethod::max_iter", max_iter_);
2414  s.pack("Feasiblesqpmethod::min_iter", min_iter_);
2415  s.pack("Feasiblesqpmethod::lbfgs_memory", lbfgs_memory_);
2416  s.pack("Feasiblesqpmethod::tol_pr_", tol_pr_);
2417  s.pack("Feasiblesqpmethod::tol_du_", tol_du_);
2418  s.pack("Feasiblesqpmethod::print_header", print_header_);
2419  s.pack("Feasiblesqpmethod::print_iteration", print_iteration_);
2420  s.pack("Feasiblesqpmethod::print_status", print_status_);
2421 
2422  s.pack("Feasiblesqpmethod::init_feasible", init_feasible_);
2423  s.pack("Feasiblesqpmethod::Hsp", Hsp_);
2424  s.pack("Feasiblesqpmethod::Asp", Asp_);
2425  s.pack("Feasiblesqpmethod::convexify", convexify_);
2426  if (convexify_) Convexify::serialize(s, "Feasiblesqpmethod::", convexify_data_);
2427  }
2428 } // namespace casadi
Helper class for C code generation.
std::string axpy(casadi_int n, const std::string &a, const std::string &x, const std::string &y)
Codegen axpy: y += a*x.
std::string clip_min(const std::string &x, casadi_int n, const std::string &min, const std::string &mask)
Codegen clip_min: Clips the smaller entries in a vector than min to the min.
std::string add_dependency(const Function &f)
Add a function dependency.
std::string arg(casadi_int i) const
Refer to argument.
std::string norm_2(casadi_int n, const std::string &x)
norm_2
std::string copy(const std::string &arg, std::size_t n, const std::string &res)
Create a copy operation.
void comment(const std::string &s)
Write a comment line (ignored if not verbose)
std::string masked_norm_inf(casadi_int n, const std::string &x, const std::string &mask)
codegen masked_norm_inf: The mask tells what entry is used in the inf-norm.
std::string constant(const std::vector< casadi_int > &v)
Represent an array constant; adding it when new.
std::string fmin(const std::string &x, const std::string &y)
fmin
std::string printf(const std::string &str, const std::vector< std::string > &arg=std::vector< std::string >())
Printf.
std::string bilin(const std::string &A, const Sparsity &sp_A, const std::string &x, const std::string &y)
Codegen bilinear form.
std::string bound_consistency(casadi_int n, const std::string &x, const std::string &lam, const std::string &lbx, const std::string &ubx)
bound_consistency
std::string vector_fmax(casadi_int n, const std::string &x, const std::string &y, const std::string &z)
Codegen vector_fmax: Takes vectorwise max of a vector and writes the result to second vector.
std::string mv(const std::string &x, const Sparsity &sp_x, const std::string &y, const std::string &z, bool tr)
Codegen sparse matrix-vector multiplication.
void local(const std::string &name, const std::string &type, const std::string &ref="")
Declare a local variable.
std::string res(casadi_int i) const
Refer to resuly.
std::string norm_inf(casadi_int n, const std::string &x)
norm_inf
std::string vector_fmin(casadi_int n, const std::string &x, const std::string &y, const std::string &z)
Codegen vector_fmin: Takes vectorwise min of a vector and writes the result to second vector.
void init_local(const std::string &name, const std::string &def)
Specify the default value for a local variable.
std::string dot(casadi_int n, const std::string &x, const std::string &y)
Codegen inner product.
std::string clip_max(const std::string &x, casadi_int n, const std::string &min, const std::string &mask)
Codegen clip_max: Clips the larger entries in a vector than max to the max.
void copy_check(const std::string &arg, std::size_t n, const std::string &res, bool check_lhs=true, bool check_rhs=true)
std::string max_viol(casadi_int n, const std::string &x, const std::string &lb, const std::string &ub)
max_viol
std::string sparsity(const Sparsity &sp, bool canonical=true)
void copy_default(const std::string &arg, std::size_t n, const std::string &res, const std::string &def, bool check_rhs=true)
std::string clear(const std::string &res, std::size_t n)
Create a fill operation.
void add_auxiliary(Auxiliary f, const std::vector< std::string > &inst={"casadi_real"})
Add a built-in auxiliary function.
static void serialize(SerializingStream &s, const std::string &prefix, const ConvexifyData &d)
Definition: convexify.cpp:111
static Sparsity setup(ConvexifyData &d, const Sparsity &H, const Dict &opts=Dict(), bool inplace=true)
Definition: convexify.cpp:166
static MXNode * deserialize(DeserializingStream &s)
Deserialize without type information.
Definition: convexify.hpp:104
Helper class for Serialization.
void unpack(Sparsity &e)
Reconstruct an object from the input stream.
void version(const std::string &name, int v)
void codegen_eval_tr_ratio(CodeGenerator &cg, const std::string &val_f, const std::string &val_f_corr, const std::string &val_m_k) const
void codegen_body(CodeGenerator &g) const override
Generate code for the function body.
Feasiblesqpmethod(const std::string &name, const Function &nlp)
static const std::string meta_doc
A documentation string.
void codegen_eval_m_k(CodeGenerator &cg) const
int step_update(void *mem, double tr_ratio) const
static const Options options_
Options.
casadi_int lbfgs_memory_
Memory size of L-BFGS method.
void codegen_step_update(CodeGenerator &cg, const std::string &tr_ratio) const
int init_mem(void *mem) const override
Initalize memory block.
void print_iteration() const
Print iteration header.
bool init_feasible_
Initialize feasible qp's.
ConvexifyData convexify_data_
Data for convexification.
void codegen_declarations(CodeGenerator &g) const override
Generate code for the declarations of the C function.
void codegen_feasibility_iterations(CodeGenerator &cg, const std::string &tr_rad) const
bool use_anderson_
Use Anderson Acceleration.
Dict get_stats(void *mem) const override
Get all statistics.
void init(const Dict &opts) override
Initialize.
int feasibility_iterations(void *mem, double tr_rad) const
int solve(void *mem) const override
virtual int solve_QP(FeasiblesqpmethodMemory *m, const double *H, const double *g, const double *lbdz, const double *ubdz, const double *A, double *x_opt, double *dlam, int mode) const
casadi_int max_iter_
Maximum, minimum number of SQP iterations.
virtual int solve_LP(FeasiblesqpmethodMemory *m, const double *g, const double *lbdz, const double *ubdz, const double *A, double *x_opt, double *dlam, int mode) const
casadi_feasiblesqpmethod_prob< double > p_
void tr_update(void *mem, double &tr_rad, double tr_ratio) const
std::vector< double > tr_scale_vector_
static Nlpsol * creator(const std::string &name, const Function &nlp)
Create a new NLP Solver.
double eval_tr_ratio(double val_f, double val_f_corr, double val_m_k) const
void anderson_acc_step_update(void *mem, casadi_int iter_index) const
Function qpsol_ela_
QP solver for elastic mode subproblems.
Function qpsol_
QP solver for the subproblems.
void codegen_qp_solve(CodeGenerator &cg, const std::string &H, const std::string &g, const std::string &lbdz, const std::string &ubdz, const std::string &A, const std::string &x_opt, const std::string &dlam, int mode) const
static ProtoFunction * deserialize(DeserializingStream &s)
Deserialize into MX.
bool exact_hessian_
Exact Hessian?
double eval_m_k(void *mem) const
void anderson_acc_init_memory(void *mem, double *step, double *iterate) const
void set_work(void *mem, const double **&arg, double **&res, casadi_int *&iw, double *&w) const override
Set the (persistent) work vectors.
void anderson_acc_update_memory(void *mem, double *step, double *iterate) const
double tol_pr_
Tolerance of primal and dual infeasibility.
void codegen_tr_update(CodeGenerator &cg, const std::string &tr_rad, const std::string &tr_ratio) const
void serialize_body(SerializingStream &s) const override
Serialize an object without type information.
void alloc_iw(size_t sz_iw, bool persistent=false)
Ensure required length of iw field.
size_t sz_w() const
Get required length of w field.
void alloc_w(size_t sz_w, bool persistent=false)
Ensure required length of w field.
void alloc(const Function &f, bool persistent=false, int num_threads=1)
Ensure work vectors long enough to evaluate function.
size_t sz_iw() const
Get required length of iw field.
Function object.
Definition: function.hpp:60
casadi_int n_out() const
Get the number of function outputs.
Definition: function.cpp:823
casadi_int n_in() const
Get the number of function inputs.
Definition: function.cpp:819
NLP solver storage class.
Definition: nlpsol_impl.hpp:59
Dict get_stats(void *mem) const override
Get all statistics.
Definition: nlpsol.cpp:1162
static const Options options_
Options.
void codegen_body_enter(CodeGenerator &g) const override
Generate code for the function body.
Definition: nlpsol.cpp:1179
void codegen_declarations(CodeGenerator &g) const override
Generate code for the declarations of the C function.
Definition: nlpsol.cpp:1250
void init(const Dict &opts) override
Initialize.
Definition: nlpsol.cpp:420
casadi_int ng_
Number of constraints.
Definition: nlpsol_impl.hpp:69
int init_mem(void *mem) const override
Initalize memory block.
Definition: nlpsol.cpp:603
casadi_nlpsol_prob< double > p_nlp_
Definition: nlpsol_impl.hpp:63
void serialize_body(SerializingStream &s) const override
Serialize an object without type information.
Definition: nlpsol.cpp:1306
casadi_int np_
Number of parameters.
Definition: nlpsol_impl.hpp:72
int callback(NlpsolMemory *m) const
Definition: nlpsol.cpp:1121
casadi_int nx_
Number of variables.
Definition: nlpsol_impl.hpp:66
void set_work(void *mem, const double **&arg, double **&res, casadi_int *&iw, double *&w) const override
Set the (persistent) work vectors.
Definition: nlpsol.cpp:795
bool bound_consistency_
Options.
Definition: nlpsol_impl.hpp:98
void set_function(const Function &fcn, const std::string &fname, bool jit=false)
Function create_function(const Function &oracle, const std::string &fname, const std::vector< std::string > &s_in, const std::vector< std::string > &s_out, const Function::AuxOut &aux=Function::AuxOut(), const Dict &opts=Dict())
int calc_function(OracleMemory *m, const std::string &fcn, const double *const *arg=nullptr, int thread_id=0) const
std::vector< std::string > get_function() const override
Get list of dependency functions.
bool has_function(const std::string &fname) const override
static void registerPlugin(const Plugin &plugin, bool needs_lock=true)
Register an integrator in the factory.
void print(const char *fmt,...) const
C-style formatted printing during evaluation.
bool verbose_
Verbose printout.
void clear_mem()
Clear all memory (called from destructor)
Helper class for Serialization.
void version(const std::string &name, int v)
void pack(const Sparsity &e)
Serializes an object to the output stream.
General sparsity class.
Definition: sparsity.hpp:106
static Sparsity dense(casadi_int nrow, casadi_int ncol=1)
Create a dense rectangular sparsity pattern *.
Definition: sparsity.cpp:1012
casadi_int nnz() const
Get the number of (structural) non-zeros.
Definition: sparsity.cpp:148
bool is_symmetric() const
Is symmetric?
Definition: sparsity.cpp:317
Function conic(const std::string &name, const std::string &solver, const SpDict &qp, const Dict &opts)
Definition: conic.cpp:44
The casadi namespace.
Definition: archiver.cpp:28
@ NLPSOL_P
Value of fixed parameters (np x 1)
Definition: nlpsol.hpp:198
@ NLPSOL_UBX
Decision variables upper bound (nx x 1), default +inf.
Definition: nlpsol.hpp:202
@ NLPSOL_X0
Decision variables, initial guess (nx x 1)
Definition: nlpsol.hpp:196
@ NLPSOL_LAM_G0
Lagrange multipliers for bounds on G, initial guess (ng x 1)
Definition: nlpsol.hpp:210
@ NLPSOL_LAM_X0
Lagrange multipliers for bounds on X, initial guess (nx x 1)
Definition: nlpsol.hpp:208
@ NLPSOL_NUM_IN
Definition: nlpsol.hpp:211
@ NLPSOL_LBG
Constraints lower bound (ng x 1), default -inf.
Definition: nlpsol.hpp:204
@ NLPSOL_LBX
Decision variables lower bound (nx x 1), default -inf.
Definition: nlpsol.hpp:200
T1 casadi_max_viol(casadi_int n, const T1 *x, const T1 *lb, const T1 *ub)
Largest bound violation.
@ NLPSOL_G
Constraints function at the optimal solution (ng x 1)
Definition: nlpsol.hpp:221
@ NLPSOL_X
Decision variables at the optimal solution (nx x 1)
Definition: nlpsol.hpp:217
@ NLPSOL_NUM_OUT
Definition: nlpsol.hpp:228
@ NLPSOL_LAM_P
Lagrange multipliers for bounds on P at the solution (np x 1)
Definition: nlpsol.hpp:227
@ NLPSOL_F
Cost function value at the optimal solution (1 x 1)
Definition: nlpsol.hpp:219
@ NLPSOL_LAM_G
Lagrange multipliers for bounds on G at the solution (ng x 1)
Definition: nlpsol.hpp:225
@ NLPSOL_LAM_X
Lagrange multipliers for bounds on X at the solution (nx x 1)
Definition: nlpsol.hpp:223
@ CONIC_UBA
dense, (nc x 1)
Definition: conic.hpp:181
@ CONIC_X0
dense, (n x 1)
Definition: conic.hpp:187
@ CONIC_A
The matrix A: sparse, (nc x n) - product with x must be dense.
Definition: conic.hpp:177
@ CONIC_G
The vector g: dense, (n x 1)
Definition: conic.hpp:175
@ CONIC_LBA
dense, (nc x 1)
Definition: conic.hpp:179
@ CONIC_UBX
dense, (n x 1)
Definition: conic.hpp:185
@ CONIC_H
Definition: conic.hpp:173
@ CONIC_LAM_A0
dense
Definition: conic.hpp:191
@ CONIC_LBX
dense, (n x 1)
Definition: conic.hpp:183
@ CONIC_LAM_X0
dense
Definition: conic.hpp:189
T1 casadi_bilin(const T1 *A, const casadi_int *sp_A, const T1 *x, const T1 *y)
void casadi_copy(const T1 *x, casadi_int n, T1 *y)
COPY: y <-x.
void casadi_fill(T1 *x, casadi_int n, T1 alpha)
FILL: x <- alpha.
@ OT_DOUBLEVECTOR
T1 casadi_norm_2(casadi_int n, const T1 *x)
NORM_2: ||x||_2 -> return.
int CASADI_NLPSOL_FEASIBLESQPMETHOD_EXPORT casadi_register_nlpsol_feasiblesqpmethod(Nlpsol::Plugin *plugin)
std::string str(const T &v)
String representation, any type.
GenericType::Dict Dict
C++ equivalent of Python's dict or MATLAB's struct.
T1 casadi_dot(casadi_int n, const T1 *x, const T1 *y)
Inner product.
void CASADI_NLPSOL_FEASIBLESQPMETHOD_EXPORT casadi_load_nlpsol_feasiblesqpmethod()
T * get_ptr(std::vector< T > &v)
Get a pointer to the data contained in the vector.
void casadi_axpy(casadi_int n, T1 alpha, const T1 *x, T1 *y)
AXPY: y <- a*x + y.
T1 casadi_norm_inf(casadi_int n, const T1 *x)
void casadi_clear(T1 *x, casadi_int n)
CLEAR: x <- 0.
void casadi_mv(const T1 *x, const casadi_int *sp_x, const T1 *y, T1 *z, casadi_int tr)
Sparse matrix-vector multiplication: z <- z + x*y.
std::ostream & uout()
@ SOLVER_RET_NAN
@ SOLVER_RET_LIMITED
@ CONIC_X
The primal solution.
Definition: conic.hpp:201
@ CONIC_LAM_A
The dual solution corresponding to linear bounds.
Definition: conic.hpp:205
@ CONIC_COST
The optimal cost.
Definition: conic.hpp:203
@ CONIC_LAM_X
The dual solution corresponding to simple bounds.
Definition: conic.hpp:207
casadi_convexify_config< double > config
Definition: mx.hpp:61
casadi_int sz_iw
Definition: mx.hpp:62
casadi_int sz_w
Definition: mx.hpp:63
const char * return_status
Last return status.
casadi_feasiblesqpmethod_data< double > d
casadi_nlpsol_data< double > d_nlp
Definition: nlpsol_impl.hpp:42
Options metadata for a class.
Definition: options.hpp:40
std::map< std::string, FStats > fstats
void add_stat(const std::string &s)
const casadi_nlpsol_prob< T1 > * nlp