blocksqp.cpp
1 /*
2  * This file is part of CasADi.
3  *
4  * CasADi -- A symbolic framework for dynamic optimization.
5  * Copyright (C) 2010-2023 Joel Andersson, Joris Gillis, Moritz Diehl,
6  * KU Leuven. All rights reserved.
7  * Copyright (C) 2011-2014 Greg Horn
8  *
9  * CasADi is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU Lesser General Public
11  * License as published by the Free Software Foundation; either
12  * version 3 of the License, or (at your option) any later version.
13  *
14  * CasADi is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17  * Lesser General Public License for more details.
18  *
19  * You should have received a copy of the GNU Lesser General Public
20  * License along with CasADi; if not, write to the Free Software
21  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22  *
23  */
24 
25 
26 #include "blocksqp.hpp"
27 #include "casadi/core/casadi_misc.hpp"
28 #include "casadi/core/conic.hpp"
29 
30 namespace casadi {
31 
32  extern "C"
33  int CASADI_NLPSOL_BLOCKSQP_EXPORT
34  casadi_register_nlpsol_blocksqp(Nlpsol::Plugin* plugin) {
35  plugin->creator = Blocksqp::creator;
36  plugin->name = "blocksqp";
37  plugin->doc = Blocksqp::meta_doc.c_str();
38  plugin->version = CASADI_VERSION;
39  plugin->options = &Blocksqp::options_;
40  plugin->deserialize = &Blocksqp::deserialize;
41  return 0;
42  }
43 
44  extern "C"
45  void CASADI_NLPSOL_BLOCKSQP_EXPORT casadi_load_nlpsol_blocksqp() {
47  }
48 
49  Blocksqp::Blocksqp(const std::string& name, const Function& nlp)
50  : Nlpsol(name, nlp) {
51  }
52 
53 
55  clear_mem();
56  }
57 
59  = {{&Nlpsol::options_},
60  {{"qpsol",
61  {OT_STRING,
62  "The QP solver to be used by the SQP method"}},
63  {"qpsol_options",
64  {OT_DICT,
65  "Options to be passed to the QP solver"}},
66  {"linsol",
67  {OT_STRING,
68  "The linear solver to be used by the QP method"}},
69  {"print_header",
70  {OT_BOOL,
71  "Print solver header at startup"}},
72  {"print_iteration",
73  {OT_BOOL,
74  "Print SQP iterations"}},
75  {"eps",
76  {OT_DOUBLE,
77  "Values smaller than this are regarded as numerically zero"}},
78  {"opttol",
79  {OT_DOUBLE,
80  "Optimality tolerance"}},
81  {"nlinfeastol",
82  {OT_DOUBLE,
83  "Nonlinear feasibility tolerance"}},
84  {"schur",
85  {OT_BOOL,
86  "Use qpOASES Schur compliment approach"}},
87  {"globalization",
88  {OT_BOOL,
89  "Enable globalization"}},
90  {"restore_feas",
91  {OT_BOOL,
92  "Use feasibility restoration phase"}},
93  {"max_line_search",
94  {OT_INT,
95  "Maximum number of steps in line search"}},
96  {"max_consec_reduced_steps",
97  {OT_INT,
98  "Maximum number of consecutive reduced steps"}},
99  {"max_consec_skipped_updates",
100  {OT_INT,
101  "Maximum number of consecutive skipped updates"}},
102  {"max_iter",
103  {OT_INT,
104  "Maximum number of SQP iterations"}},
105  {"warmstart",
106  {OT_BOOL,
107  "Use warmstarting"}},
108  {"qp_init",
109  {OT_BOOL,
110  "Use warmstarting"}},
111  {"max_it_qp",
112  {OT_INT,
113  "Maximum number of QP iterations per SQP iteration"}},
114  {"block_hess",
115  {OT_INT,
116  "Blockwise Hessian approximation?"}},
117  {"hess_scaling",
118  {OT_INT,
119  "Scaling strategy for Hessian approximation"}},
120  {"fallback_scaling",
121  {OT_INT,
122  "If indefinite update is used, the type of fallback strategy"}},
123  {"max_time_qp",
124  {OT_DOUBLE,
125  "Maximum number of time in seconds per QP solve per SQP iteration"}},
126  {"ini_hess_diag",
127  {OT_DOUBLE,
128  "Initial Hessian guess: diagonal matrix diag(iniHessDiag)"}},
129  {"col_eps",
130  {OT_DOUBLE,
131  "Epsilon for COL scaling strategy"}},
132  {"col_tau1",
133  {OT_DOUBLE,
134  "tau1 for COL scaling strategy"}},
135  {"col_tau2",
136  {OT_DOUBLE,
137  "tau2 for COL scaling strategy"}},
138  {"hess_damp",
139  {OT_INT,
140  "Activate Powell damping for BFGS"}},
141  {"hess_damp_fac",
142  {OT_DOUBLE,
143  "Damping factor for BFGS Powell modification"}},
144  {"hess_update",
145  {OT_INT,
146  "Type of Hessian approximation"}},
147  {"fallback_update",
148  {OT_INT,
149  "If indefinite update is used, the type of fallback strategy"}},
150  {"hess_lim_mem",
151  {OT_INT,
152  "Full or limited memory"}},
153  {"hess_memsize",
154  {OT_INT,
155  "Memory size for L-BFGS updates"}},
156  {"which_second_derv",
157  {OT_INT,
158  "For which block should second derivatives be provided by the user"}},
159  {"skip_first_globalization",
160  {OT_BOOL,
161  "No globalization strategy in first iteration"}},
162  {"conv_strategy",
163  {OT_INT,
164  "Convexification strategy"}},
165  {"max_conv_qp",
166  {OT_INT,
167  "How many additional QPs may be solved for convexification per iteration?"}},
168  {"max_soc_iter",
169  {OT_INT,
170  "Maximum number of SOC line search iterations"}},
171  {"gamma_theta",
172  {OT_DOUBLE,
173  "Filter line search parameter, cf. IPOPT paper"}},
174  {"gamma_f",
175  {OT_DOUBLE,
176  "Filter line search parameter, cf. IPOPT paper"}},
177  {"kappa_soc",
178  {OT_DOUBLE,
179  "Filter line search parameter, cf. IPOPT paper"}},
180  {"kappa_f",
181  {OT_DOUBLE,
182  "Filter line search parameter, cf. IPOPT paper"}},
183  {"theta_max",
184  {OT_DOUBLE,
185  "Filter line search parameter, cf. IPOPT paper"}},
186  {"theta_min",
187  {OT_DOUBLE,
188  "Filter line search parameter, cf. IPOPT paper"}},
189  {"delta",
190  {OT_DOUBLE,
191  "Filter line search parameter, cf. IPOPT paper"}},
192  {"s_theta",
193  {OT_DOUBLE,
194  "Filter line search parameter, cf. IPOPT paper"}},
195  {"s_f",
196  {OT_DOUBLE,
197  "Filter line search parameter, cf. IPOPT paper"}},
198  {"kappa_minus",
199  {OT_DOUBLE,
200  "Filter line search parameter, cf. IPOPT paper"}},
201  {"kappa_plus",
202  {OT_DOUBLE,
203  "Filter line search parameter, cf. IPOPT paper"}},
204  {"kappa_plus_max",
205  {OT_DOUBLE,
206  "Filter line search parameter, cf. IPOPT paper"}},
207  {"delta_h0",
208  {OT_DOUBLE,
209  "Filter line search parameter, cf. IPOPT paper"}},
210  {"eta",
211  {OT_DOUBLE,
212  "Filter line search parameter, cf. IPOPT paper"}},
213  {"obj_lo",
214  {OT_DOUBLE,
215  "Lower bound on objective function [-inf]"}},
216  {"obj_up",
217  {OT_DOUBLE,
218  "Upper bound on objective function [inf]"}},
219  {"rho",
220  {OT_DOUBLE,
221  "Feasibility restoration phase parameter"}},
222  {"zeta",
223  {OT_DOUBLE,
224  "Feasibility restoration phase parameter"}},
225  {"print_maxit_reached",
226  {OT_BOOL,
227  "Print error when maximum number of SQP iterations reached"}}
228  }
229  };
230 
231  void Blocksqp::init(const Dict& opts) {
232  // Call the init method of the base class
233  Nlpsol::init(opts);
234 
235  // Set default options
236  //string qpsol_plugin = "qpoases";
237  //Dict qpsol_options;
238  linsol_plugin_ = "ma27";
239  print_header_ = true;
240  print_iteration_ = true;
241  eps_ = 1.0e-16;
242  opttol_ = 1.0e-6;
243  nlinfeastol_ = 1.0e-6;
244  schur_ = true;
245  globalization_ = true;
246  restore_feas_ = true;
247  max_line_search_ = 20;
250  max_iter_ = 100;
251  warmstart_ = false;
252  max_it_qp_ = 5000;
253  block_hess_ = true;
254  hess_scaling_ = 2;
255  fallback_scaling_ = 4;
256  max_time_qp_ = 10000.0;
257  ini_hess_diag_ = 1.0;
258  col_eps_ = 0.1;
259  col_tau1_ = 0.5;
260  col_tau2_ = 1.0e4;
261  hess_damp_ = 1;
262  hess_damp_fac_ = 0.2;
263  hess_update_ = 1;
264  fallback_update_ = 2;
265  hess_lim_mem_ = 1;
266  hess_memsize_ = 20;
267  which_second_derv_ = 0;
269  conv_strategy_ = 0;
270  max_conv_qp_ = 1;
271  max_soc_iter_ = 3;
272  gamma_theta_ = 1.0e-5;
273  gamma_f_ = 1.0e-5;
274  kappa_soc_ = 0.99;
275  kappa_f_ = 0.999;
276  theta_max_ = 1.0e7;
277  theta_min_ = 1.0e-5;
278  delta_ = 1.0;
279  s_theta_ = 1.1;
280  s_f_ = 2.3;
281  kappa_minus_ = 0.333;
282  kappa_plus_ = 8.0;
283  kappa_plus_max_ = 100.0;
284  delta_h0_ = 1.0e-4;
285  eta_ = 1.0e-4;
286  obj_lo_ = -inf;
287  obj_up_ = inf;
288  rho_ = 1.0e3;
289  zeta_ = 1.0e-3;
290  print_maxit_reached_ = true;
291  qp_init_ = true;
292 
293  // Read user options
294  for (auto&& op : opts) {
295  if (op.first=="qpsol") {
296  //qpsol_plugin = op.second.to_string();
297  casadi_warning("Option 'qpsol' currently not supported, ignored");
298  } else if (op.first=="qpsol_options") {
299  //qpsol_options = op.second;
300  casadi_warning("Option 'qpsol_options' currently not supported, ignored");
301  } else if (op.first=="linsol") {
302  linsol_plugin_ = std::string(op.second);
303  } else if (op.first=="print_header") {
304  print_header_ = op.second;
305  } else if (op.first=="print_iteration") {
306  print_iteration_ = op.second;
307  } else if (op.first=="eps") {
308  eps_ = op.second;
309  } else if (op.first=="opttol") {
310  opttol_ = op.second;
311  } else if (op.first=="nlinfeastol") {
312  nlinfeastol_ = op.second;
313  } else if (op.first=="schur") {
314  schur_ = op.second;
315  } else if (op.first=="globalization") {
316  globalization_ = op.second;
317  } else if (op.first=="restore_feas") {
318  restore_feas_ = op.second;
319  } else if (op.first=="max_line_search") {
320  max_line_search_ = op.second;
321  } else if (op.first=="max_consec_reduced_steps") {
322  max_consec_reduced_steps_ = op.second;
323  } else if (op.first=="max_consec_skipped_updates") {
324  max_consec_skipped_updates_ = op.second;
325  } else if (op.first=="max_iter") {
326  max_iter_ = op.second;
327  } else if (op.first=="warmstart") {
328  warmstart_ = op.second;
329  } else if (op.first=="qp_init") {
330  qp_init_ = op.second;
331  } else if (op.first=="max_it_qp") {
332  max_it_qp_ = op.second;
333  } else if (op.first=="block_hess") {
334  block_hess_ = op.second;
335  } else if (op.first=="hess_scaling") {
336  hess_scaling_ = op.second;
337  } else if (op.first=="fallback_scaling") {
338  fallback_scaling_ = op.second;
339  } else if (op.first=="max_time_qp") {
340  max_time_qp_ = op.second;
341  } else if (op.first=="ini_hess_diag") {
342  ini_hess_diag_ = op.second;
343  } else if (op.first=="col_eps") {
344  col_eps_ = op.second;
345  } else if (op.first=="col_tau1") {
346  col_tau1_ = op.second;
347  } else if (op.first=="col_tau2") {
348  col_tau2_ = op.second;
349  } else if (op.first=="hess_damp") {
350  hess_damp_ = op.second;
351  } else if (op.first=="hess_damp_fac") {
352  hess_damp_fac_ = op.second;
353  } else if (op.first=="hess_update") {
354  hess_update_ = op.second;
355  } else if (op.first=="fallback_update") {
356  fallback_update_ = op.second;
357  } else if (op.first=="hess_lim_mem") {
358  hess_lim_mem_ = op.second;
359  } else if (op.first=="hess_memsize") {
360  hess_memsize_ = op.second;
361  } else if (op.first=="which_second_derv") {
362  which_second_derv_ = op.second;
363  } else if (op.first=="skip_first_globalization") {
364  skip_first_globalization_ = op.second;
365  } else if (op.first=="conv_strategy") {
366  conv_strategy_ = op.second;
367  } else if (op.first=="max_conv_qp") {
368  max_conv_qp_ = op.second;
369  } else if (op.first=="max_soc_iter") {
370  max_soc_iter_ = op.second;
371  } else if (op.first=="gamma_theta") {
372  gamma_theta_ = op.second;
373  } else if (op.first=="gamma_f") {
374  gamma_f_ = op.second;
375  } else if (op.first=="kappa_soc") {
376  kappa_soc_ = op.second;
377  } else if (op.first=="kappa_f") {
378  kappa_f_ = op.second;
379  } else if (op.first=="theta_max") {
380  theta_max_ = op.second;
381  } else if (op.first=="theta_min") {
382  theta_min_ = op.second;
383  } else if (op.first=="delta") {
384  delta_ = op.second;
385  } else if (op.first=="s_theta") {
386  s_theta_ = op.second;
387  } else if (op.first=="s_f") {
388  s_f_ = op.second;
389  } else if (op.first=="kappa_minus") {
390  kappa_minus_ = op.second;
391  } else if (op.first=="kappa_plus") {
392  kappa_plus_ = op.second;
393  } else if (op.first=="kappa_plus_max") {
394  kappa_plus_max_ = op.second;
395  } else if (op.first=="delta_h0") {
396  delta_h0_ = op.second;
397  } else if (op.first=="eta") {
398  eta_ = op.second;
399  } else if (op.first=="obj_lo") {
400  obj_lo_ = op.second;
401  } else if (op.first=="obj_up") {
402  obj_up_ = op.second;
403  } else if (op.first=="rho") {
404  rho_ = op.second;
405  } else if (op.first=="zeta") {
406  zeta_ = op.second;
407  } else if (op.first=="print_maxit_reached") {
408  print_maxit_reached_ = op.second;
409  }
410  }
411 
412  // If we compute second constraints derivatives switch to
413  // finite differences Hessian (convenience)
414  if (which_second_derv_ == 2) {
415  hess_update_ = 4;
416  block_hess_ = true;
417  }
418 
419  // If we don't use limited memory BFGS we need to store only one vector.
420  if (!hess_lim_mem_) hess_memsize_ = 1;
421  if (!schur_ && hess_update_ == 1) {
422  print("***WARNING: SR1 update only works with qpOASES Schur complement version. "
423  "Using BFGS updates instead.\n");
424  hess_update_ = 2;
426  }
427 
428  // Setup feasibility restoration phase
429  if (restore_feas_) {
430  // get orignal nlp
431  Function nlp = oracle_;
432  std::vector<MX> resv;
433  std::vector<MX> argv = nlp.mx_in();
434 
435  // build fesibility restoration phase nlp
436  MX p = MX::sym("p", nlp.size1_in("x"));
437  MX s = MX::sym("s", nlp.size1_out("g"));
438 
439  MX d = fmin(1.0, 1.0/abs(p)) * (argv.at(0) - p);
440  MX f_rp = 0.5 * rho_ * dot(s, s) + zeta_/2.0 * dot(d, d);
441  MX g_rp = nlp(argv).at(1) - s;
442 
443  MXDict nlp_rp = {{"x", MX::vertcat({argv.at(0), s})},
444  {"p", MX::vertcat({argv.at(1), p})},
445  {"f", f_rp},
446  {"g", g_rp}};
447 
448  // Set options for the SQP method for the restoration problem
449  Dict solver_options;
450  solver_options["globalization"] = true;
451  solver_options["which_second_derv"] = 0;
452  solver_options["restore_feas"] = false;
453  solver_options["hess_update"] = 2;
454  solver_options["hess_lim_mem"] = 1;
455  solver_options["hess_scaling"] = 2;
456  solver_options["opttol"] = opttol_;
457  solver_options["nlinfeastol"] = nlinfeastol_;
458  solver_options["max_iter"] = 1;
459  solver_options["print_time"] = false;
460  solver_options["print_header"] = false;
461  solver_options["print_iteration"] = false;
462  solver_options["print_maxit_reached"] = false;
463 
464  // Create and initialize solver for the restoration problem
465  rp_solver_ = nlpsol("rpsolver", "blocksqp", nlp_rp, solver_options);
466  }
467 
468  // Setup NLP functions
469  create_function("nlp_fg", {"x", "p"}, {"f", "g"});
470  Function gf_jg = create_function("nlp_gf_jg", {"x", "p"},
471  {"f", "g", "grad:f:x", "jac:g:x"});
472  Asp_ = gf_jg.sparsity_out("jac_g_x");
473 
474  if (!block_hess_) {
475  // No block-structured Hessian
476  blocks_ = {0, nx_};
477  which_second_derv_ = 0;
479  } else {
480  // Detect block structure
481 
482  // Get the sparsity pattern for the Hessian of the Lagrangian
483  Function grad_lag = oracle_.factory("grad_lag",
484  {"x", "p", "lam:f", "lam:g"}, {"grad:gamma:x"},
485  {{"gamma", {"f", "g"}}});
486  Hsp_ = grad_lag.sparsity_jac("x", "grad_gamma_x", false, true);
487 
488  // Make sure diagonal exists
490 
491  // Find the strongly connected components of the Hessian
492  // Unlike Sparsity::scc, assume ordered
493  const casadi_int* colind = Hsp_.colind();
494  const casadi_int* row = Hsp_.row();
495  blocks_.push_back(0);
496  casadi_int ind = 0;
497  while (ind < nx_) {
498  // Find the next cutoff
499  casadi_int next=ind+1;
500  while (ind<next && ind<nx_) {
501  for (casadi_int k=colind[ind]; k<colind[ind+1]; ++k) next = std::max(next, 1+row[k]);
502  ind++;
503  }
504  blocks_.push_back(next);
505  }
506  }
507 
508  // Number of blocks
509  nblocks_ = blocks_.size()-1;
510 
511  // Blocksizes
512  dim_.resize(nblocks_);
513  casadi_int max_size = 0;
514  nnz_H_ = 0;
515  for (casadi_int i=0; i<nblocks_; ++i) {
516  dim_[i] = blocks_[i+1]-blocks_[i];
517  max_size = std::max(max_size, dim_[i]);
518  nnz_H_ += dim_[i]*dim_[i];
519  }
520 
521  create_function("nlp_hess_l", {"x", "p", "lam:f", "lam:g"},
522  {"triu:hess:gamma:x:x"}, {{"gamma", {"f", "g"}}});
523  exact_hess_lag_sp_ = get_function("nlp_hess_l").sparsity_out(0);
524 
525  if (verbose_) casadi_message(str(nblocks_) + " blocks of max size " + str(max_size) + ".");
526 
527  // Allocate a QP solver
528  //casadi_assert(!qpsol_plugin.empty(), "'qpsol' option has not been set");
529  //qpsol_ = conic("qpsol", qpsol_plugin, {{"h", Hsp_}, {"a", Asp_}},
530  // qpsol_options);
531  //alloc(qpsol_);
532 
533  // Allocate memory
534  alloc_w(Asp_.nnz(), true); // jac
535  alloc_w(nx_, true); // lam_xk
536  alloc_w(ng_, true); // lam_gk
537  alloc_w(ng_, true); // gk
538  alloc_w(nx_, true); // grad_fk
539  alloc_w(nx_, true); // grad_lagk
540  alloc_w(nx_+ng_, true); // lam_qp
541  alloc_w(nblocks_, true); // delta_norm
542  alloc_w(nblocks_, true); // delta_norm_old
543  alloc_w(nblocks_, true); // delta_gamma
544  alloc_w(nblocks_, true); // delta_gamma_old
545  alloc_w(nblocks_, true); // delta_h
546  alloc_w(nx_, true); // trial_xk
547  alloc_w(nx_, true); // lbx_qp
548  alloc_w(nx_, true); // ubx_qp
549  alloc_w(ng_, true); // lba_qp
550  alloc_w(ng_, true); // uba_qp
551  alloc_w(ng_, true); // jac_times_dxk
552  alloc_w(nx_*hess_memsize_, true); // deltaMat
553  alloc_w(nx_*hess_memsize_, true); // gammaMat
554  alloc_w(Asp_.nnz(), true); // jac_g
555  alloc_w(nnz_H_, true); // hess_lag
556  alloc_iw(nblocks_, true); // noUpdateCounter
557 
558  // Allocate block diagonal Hessian(s)
559  casadi_int n_hess = hess_update_==1 || hess_update_==4 ? 2 : 1;
560  alloc_res(nblocks_*n_hess, true);
561  alloc_w(n_hess*nnz_H_, true);
562  alloc_iw(nnz_H_ + (nx_+1) + nx_, true); // hessIndRow
563  alloc_w(exact_hess_lag_sp_.nnz(), true); // exact_hess_lag
564  }
565 
566  int Blocksqp::init_mem(void* mem) const {
567  if (Nlpsol::init_mem(mem)) return 1;
568  auto m = static_cast<BlocksqpMemory*>(mem);
569 
570  // Create qpOASES memory
571  if (schur_) {
572  m->qpoases_mem = new QpoasesMemory();
573  m->qpoases_mem->linsol_plugin = linsol_plugin_;
574  }
575 
576  m->qp = nullptr;
577  m->colind.resize(Asp_.size2()+1);
578  m->row.resize(Asp_.nnz());
579  return 0;
580  }
581 
582  void Blocksqp::set_work(void* mem, const double**& arg, double**& res,
583  casadi_int*& iw, double*& w) const {
584  auto m = static_cast<BlocksqpMemory*>(mem);
585 
586  // Set work in base classes
587  Nlpsol::set_work(mem, arg, res, iw, w);
588 
589  // Temporary memory
590  m->jac = w; w += Asp_.nnz();
591  m->lam_xk = w; w += nx_;
592  m->lam_gk = w; w += ng_;
593  m->gk = w; w += ng_;
594  m->grad_fk = w; w += nx_;
595  m->grad_lagk = w; w += nx_;
596  m->lam_qp = w; w += nx_+ng_;
597  m->delta_norm = w; w += nblocks_;
598  m->delta_norm_old = w; w += nblocks_;
599  m->delta_gamma = w; w += nblocks_;
600  m->delta_gamma_old = w; w += nblocks_;
601  m->delta_h = w; w += nblocks_;
602  m->trial_xk = w; w += nx_;
603  m->lbx_qp = w; w += nx_;
604  m->ubx_qp = w; w += nx_;
605  m->lba_qp = w; w += ng_;
606  m->uba_qp = w; w += ng_;
607  m->jac_times_dxk = w; w += ng_;
608  m->deltaMat = w; w += nx_*hess_memsize_;
609  m->gammaMat = w; w += nx_*hess_memsize_;
610  m->jac_g = w; w += Asp_.nnz();
611  m->hess_lag = w; w += nnz_H_;
612  m->hessIndRow = reinterpret_cast<int*>(iw); iw += nnz_H_ + (nx_+1) + nx_;
613  m->noUpdateCounter = iw; iw += nblocks_;
614 
615  // First Hessian
616  m->hess1 = res; res += nblocks_;
617  for (casadi_int b=0; b<nblocks_; b++) {
618  m->hess1[b] = w; w += dim_[b]*dim_[b];
619  }
620 
621  // Second Hessian, for SR1 or finite differences
622  if (hess_update_ == 1 || hess_update_ == 4) {
623  m->hess2 = res; res += nblocks_;
624  for (casadi_int b=0; b<nblocks_; b++) {
625  m->hess2[b] = w; w += dim_[b]*dim_[b];
626  }
627  } else {
628  m->hess2 = nullptr;
629  }
630 
631  m->exact_hess_lag = w; w += exact_hess_lag_sp_.nnz();
632  }
633 
634  int Blocksqp::solve(void* mem) const {
635  auto m = static_cast<BlocksqpMemory*>(mem);
636  auto d_nlp = &m->d_nlp;
637 
638  casadi_int ret = 0;
639 
640  // Create problem evaluation object
641  std::vector<casadi_int> blocks = blocks_;
642 
643  /*-------------------------------------------------*/
644  /* Create blockSQP method object and run algorithm */
645  /*-------------------------------------------------*/
646  m->itCount = 0;
647  m->qpItTotal = 0;
648  m->qpIterations = 0;
649  m->qpIterations2 = 0;
650  m->qpResolve = 0;
651  m->rejectedSR1 = 0;
652  m->hessSkipped = 0;
653  m->hessDamped = 0;
654  m->averageSizingFactor = 0.0;
655  m->nFunCalls = 0;
656  m->nDerCalls = 0;
657  m->nRestHeurCalls = 0;
658  m->nRestPhaseCalls = 0;
659 
660  m->nTotalUpdates = 0;
661  m->nTotalSkippedUpdates = 0;
662 
663  casadi_int maxblocksize = 1;
664 
665  for (casadi_int k=0; k<nblocks_+1; k++) {
666  if (k > 0)
667  if (blocks_[k] - blocks_[k-1] > maxblocksize)
668  maxblocksize = blocks_[k] - blocks_[k-1];
669  }
670 
671  if (hess_lim_mem_ && hess_memsize_ == 0)
672  const_cast<Blocksqp*>(this)->hess_memsize_ = maxblocksize;
673 
674  // Reset the SQP metod
675  reset_sqp(m);
676 
677  // Free existing memory, if any
678  if (qp_init_) {
679  delete m->qp;
680  m->qp = nullptr;
681  }
682  if (!m->qp) {
683  if (schur_) {
684  m->qp = new qpOASES::SQProblemSchur(nx_, ng_, qpOASES::HST_UNKNOWN, 50,
685  m->qpoases_mem,
690  } else {
691  m->qp = new qpOASES::SQProblem(nx_, ng_);
692  }
693  }
694 
695  // Print header and information about the algorithmic parameters
696  if (print_header_) printInfo(m);
697 
698  // Open output files
699  initStats(m);
700  initIterate(m);
701 
702  // Initialize filter with pair (maxConstrViolation, objLowerBound)
703  initializeFilter(m);
704 
705  // Primal-dual initial guess
706  casadi_copy(d_nlp->lam, nx_, m->lam_xk);
707  casadi_scal(nx_, -1., m->lam_xk);
708  casadi_copy(d_nlp->lam + nx_, ng_, m->lam_gk);
709  casadi_scal(ng_, -1., m->lam_gk);
710 
711  casadi_copy(m->lam_xk, nx_, m->lam_qp);
712  casadi_copy(m->lam_gk, ng_, m->lam_qp+nx_);
713 
714  ret = run(m, max_iter_, warmstart_);
715 
716  m->success = ret==0;
717  m->ret_ = ret;
718 
719  if (ret==1) {
720  if (print_maxit_reached_) print("***WARNING: Maximum number of iterations reached\n");
721  m->unified_return_status = SOLVER_RET_LIMITED;
722  }
723 
724 
725  // Get optimal cost
726  d_nlp->objective = m->obj;
727  // Get constraints at solution
728  casadi_copy(m->gk, ng_, d_nlp->z + nx_);
729  // Get dual solution (simple bounds)
730  if (d_nlp->lam) {
731  casadi_copy(m->lam_xk, nx_, d_nlp->lam);
732  casadi_scal(nx_, -1., d_nlp->lam);
733  }
734  // Get dual solution (nonlinear bounds)
735  casadi_copy(m->lam_gk, ng_, d_nlp->lam + nx_);
736  casadi_scal(ng_, -1., d_nlp->lam + nx_);
737  return 0;
738  }
739 
740  casadi_int Blocksqp::run(BlocksqpMemory* m, casadi_int maxIt, casadi_int warmStart) const {
741  casadi_int it, infoQP = 0;
742  bool skipLineSearch = false;
743  bool hasConverged = false;
744 
745  if (warmStart == 0 || m->itCount == 0) {
746  // SQP iteration 0
747 
750 
752  switch (evaluate(m, &m->obj, m->gk, m->grad_fk, m->jac_g)) {
753  case -1:
755  return -1;
756  case 0:
757  break;
758  default:
759  return 1;
760  }
761  m->nDerCalls++;
762 
764  hasConverged = calcOptTol(m);
766  updateStats(m);
767  if (hasConverged) {
768  if (print_iteration_ && m->steptype < 2) {
769  print("\n***CONVERGENCE ACHIEVED!***\n");
770  }
771  return 0;
772  }
773  m->itCount++;
774  }
775 
776  /*
777  * SQP Loop: during first iteration, m->itCount = 1
778  */
779  for (it=0; it<maxIt; it++) {
781  updateStepBounds(m, 0);
782  infoQP = solveQP(m, m->dxk, m->lam_qp);
783 
784  if (infoQP == 1) {
785  // 1.) Maximum number of iterations reached
786  print("***WARNING: Maximum number of QP iterations exceeded.***\n");
787  } else if (infoQP == 2 || infoQP > 3) {
788  // 2.) QP error (e.g., unbounded), solve again with pos.def. diagonal matrix (identity)
789  print("***WARNING: QP error. Solve again with identity matrix.***\n");
790  resetHessian(m);
791  infoQP = solveQP(m, m->dxk, m->lam_qp);
792  if (infoQP) {
793  // If there is still an error, terminate.
794  print("***WARNING: QP error. Stop.***\n");
795  return -1;
796  } else {
797  m->steptype = 1;
798  }
799  } else if (infoQP == 3) {
800  // 3.) QP infeasible, try to restore feasibility
801  bool qpError = true;
802  skipLineSearch = true; // don't do line search with restoration step
803 
804  // Try to reduce constraint violation by heuristic
805  if (m->steptype < 2) {
806  print("***WARNING: QP infeasible. Trying to reduce constraint violation ...");
807  qpError = feasibilityRestorationHeuristic(m);
808  if (!qpError) {
809  m->steptype = 2;
810  print("Success.***\n");
811  } else {
812  print("Failure.***\n");
813  }
814  }
815 
816  // Invoke feasibility restoration phase
817  //if (qpError && m->steptype < 3 && restore_feas_)
818  if (qpError && restore_feas_ && m->cNorm > 0.01 * nlinfeastol_) {
819  print("***Start feasibility restoration phase.***\n");
820  m->steptype = 3;
821  qpError = feasibilityRestorationPhase(m);
822  }
823 
824  // If everything failed, abort.
825  if (qpError) {
826  print("***WARNING: QP error. Stop.\n");
827  return -1;
828  }
829  }
830 
832  if (!globalization_ || (skip_first_globalization_ && m->itCount == 1)) {
833  // No globalization strategy, but reduce step if function cannot be evaluated
834  if (fullstep(m)) {
835  print("***WARNING: Constraint or objective could "
836  "not be evaluated at new point. Stop.***\n");
837  return -1;
838  }
839  m->steptype = 0;
840  } else if (globalization_ && !skipLineSearch) {
841  // Filter line search based on Waechter et al., 2006 (Ipopt paper)
843  // Filter line search did not produce a step. Now there are a few things we can try ...
844  bool lsError = true;
845 
846  // Heuristic 1: Check if the full step reduces the KKT error by at
847  // least kappaF, if so, accept the step.
848  lsError = kktErrorReduction(m);
849  if (!lsError)
850  m->steptype = -1;
851 
852  // Heuristic 2: Try to reduce constraint violation by closing
853  // continuity gaps to produce an admissable iterate
854  if (lsError && m->cNorm > 0.01 * nlinfeastol_ && m->steptype < 2) {
855  // Don't do this twice in a row!
856  print("***WARNING: Steplength too short. "
857  "Trying to reduce constraint violation...");
858 
859  // Integration over whole time interval
860  lsError = feasibilityRestorationHeuristic(m);
861  if (!lsError) {
862  m->steptype = 2;
863  print("Success.***\n");
864  } else {
865  print("***WARNING: Failed.***\n");
866  }
867  }
868 
869  // Heuristic 3: Recompute step with a diagonal Hessian
870  if (lsError && m->steptype != 1 && m->steptype != 2) {
871  // After closing continuity gaps, we already take a step with initial Hessian.
872  // If this step is not accepted then this will cause an infinite loop!
873 
874  print("***WARNING: Steplength too short. "
875  "Trying to find a new step with identity Hessian.***\n");
876  m->steptype = 1;
877 
878  resetHessian(m);
879  continue;
880  }
881 
882  // If this does not yield a successful step, start restoration phase
883  if (lsError && m->cNorm > 0.01 * nlinfeastol_ && restore_feas_) {
884  print("***WARNING: Steplength too short. "
885  "Start feasibility restoration phase.***\n");
886  m->steptype = 3;
887 
888  // Solve NLP with minimum norm objective
889  lsError = feasibilityRestorationPhase(m);
890  }
891 
892  // If everything failed, abort.
893  if (lsError) {
894  print("***WARNING: Line search error. Stop.***\n");
895  return -1;
896  }
897  } else {
898  m->steptype = 0;
899  }
900  }
901 
903  calcLagrangeGradient(m, m->gamma, 0);
904 
906  (void)evaluate(m, &m->obj, m->gk, m->grad_fk, m->jac_g);
907  m->nDerCalls++;
908 
910  hasConverged = calcOptTol(m);
911 
914  updateStats(m);
915  if (hasConverged && m->steptype < 2) {
916  if (print_iteration_) print("\n***CONVERGENCE ACHIEVED!***\n");
917  m->itCount++;
918  return 0; //Convergence achieved!
919  }
920 
922  // gamma = -gamma + dL(xi_k+1, lambda_k+1)
923  calcLagrangeGradient(m, m->gamma, 1);
924 
926  if (hess_update_ < 4 && !hess_lim_mem_) {
928  } else if (hess_update_ < 4 && hess_lim_mem_) {
930  } else if (hess_update_ == 4) {
932  }
933 
934  // If limited memory updates are used, set pointers deltaXi and
935  // gamma to the next column in deltaMat and gammaMat
936  updateDeltaGamma(m);
937 
938  m->itCount++;
939  skipLineSearch = false;
940  }
941 
942  return 1;
943  }
944 
954  const double* lam_x, const double* lam_g,
955  const double* grad_f, const double *jacNz,
956  double* grad_lag, casadi_int flag) const {
957 
958  // Objective gradient
959  if (flag == 0) {
960  casadi_copy(grad_f, nx_, grad_lag);
961  } else if (flag == 1) {
962  casadi_scal(nx_, -1., grad_lag);
963  casadi_axpy(nx_, 1., grad_f, grad_lag);
964  } else {
965  casadi_fill(grad_lag, nx_, 0.);
966  }
967 
968  // - lambdaT * constrJac
969  const casadi_int* jacIndRow = Asp_.row();
970  const casadi_int* jacIndCol = Asp_.colind();
971  for (casadi_int iVar=0; iVar<nx_; iVar++) {
972  for (casadi_int iCon=jacIndCol[iVar]; iCon<jacIndCol[iVar+1]; iCon++) {
973  grad_lag[iVar] -= lam_g[jacIndRow[iCon]] * jacNz[iCon];
974  }
975  }
976 
977  // - lambdaT * simpleBounds
978  casadi_axpy(nx_, -1., lam_x, grad_lag);
979  }
980 
985  calcLagrangeGradient(BlocksqpMemory* m, double* grad_lag, casadi_int flag) const {
986  calcLagrangeGradient(m, m->lam_xk, m->lam_gk, m->grad_fk, m->jac_g,
987  grad_lag, flag);
988  }
989 
990 
998  auto d_nlp = &m->d_nlp;
999  // scaled norm of Lagrangian gradient
1000  calcLagrangeGradient(m, m->grad_lagk, 0);
1002  m->tol = m->gradNorm/(1.0+fmax(casadi_norm_inf(nx_, m->lam_xk),
1003  casadi_norm_inf(ng_, m->lam_gk)));
1004 
1005  // norm of constraint violation
1006  m->cNorm = lInfConstraintNorm(m, d_nlp->z, m->gk);
1007  m->cNormS = m->cNorm /(1.0 + casadi_norm_inf(nx_, d_nlp->z));
1008 
1009  return m->tol <= opttol_ && m->cNormS <= nlinfeastol_;
1010  }
1011 
1013  char hessString1[100];
1014  char hessString2[100];
1015  char globString[100];
1016  char qpString[100];
1017 
1018  /* QP Solver */
1019  if (schur_)
1020  strcpy(qpString, "sparse, Schur complement approach");
1021  else
1022  strcpy(qpString, "sparse, reduced Hessian factorization");
1023 
1024  /* Globalization */
1025  if (globalization_) {
1026  strcpy(globString, "filter line search");
1027  } else {
1028  strcpy(globString, "none (full step)");
1029  }
1030 
1031  /* Hessian approximation */
1032  if (block_hess_ && (hess_update_ == 1 || hess_update_ == 2))
1033  strcpy(hessString1, "block ");
1034  else
1035  strcpy(hessString1, "");
1036 
1037  if (hess_lim_mem_ && (hess_update_ == 1 || hess_update_ == 2))
1038  strcat(hessString1, "L-");
1039 
1040  /* Fallback Hessian */
1041  if (hess_update_ == 1 || hess_update_ == 4
1042  || (hess_update_ == 2 && !hess_damp_)) {
1043  strcpy(hessString2, hessString1);
1044 
1045  /* Fallback Hessian update type */
1046  if (fallback_update_ == 0) {
1047  strcat(hessString2, "Id");
1048  } else if (fallback_update_ == 1) {
1049  strcat(hessString2, "SR1");
1050  } else if (fallback_update_ == 2) {
1051  strcat(hessString2, "BFGS");
1052  } else if (fallback_update_ == 4) {
1053  strcat(hessString2, "Finite differences");
1054  }
1055 
1056  /* Fallback Hessian scaling */
1057  if (fallback_scaling_ == 1) {
1058  strcat(hessString2, ", SP");
1059  } else if (fallback_scaling_ == 2) {
1060  strcat(hessString2, ", OL");
1061  } else if (fallback_scaling_ == 3) {
1062  strcat(hessString2, ", mean");
1063  } else if (fallback_scaling_ == 4) {
1064  strcat(hessString2, ", selective sizing");
1065  }
1066  } else {
1067  strcpy(hessString2, "-");
1068  }
1069 
1070  /* First Hessian update type */
1071  if (hess_update_ == 0) {
1072  strcat(hessString1, "Id");
1073  } else if (hess_update_ == 1) {
1074  strcat(hessString1, "SR1");
1075  } else if (hess_update_ == 2) {
1076  strcat(hessString1, "BFGS");
1077  } else if (hess_update_ == 4) {
1078  strcat(hessString1, "Finite differences");
1079  }
1080 
1081  /* First Hessian scaling */
1082  if (hess_scaling_ == 1) {
1083  strcat(hessString1, ", SP");
1084  } else if (hess_scaling_ == 2) {
1085  strcat(hessString1, ", OL");
1086  } else if (hess_scaling_ == 3) {
1087  strcat(hessString1, ", mean");
1088  } else if (hess_scaling_ == 4) {
1089  strcat(hessString1, ", selective sizing");
1090  }
1091 
1092  print("\n+---------------------------------------------------------------+\n");
1093  print("| Starting blockSQP with the following algorithmic settings: |\n");
1094  print("+---------------------------------------------------------------+\n");
1095  print("| qpOASES flavor | %-34s|\n", qpString);
1096  print("| Globalization | %-34s|\n", globString);
1097  print("| 1st Hessian approximation | %-34s|\n", hessString1);
1098  print("| 2nd Hessian approximation | %-34s|\n", hessString2);
1099  print("+---------------------------------------------------------------+\n\n");
1100  }
1101 
1103  acceptStep(BlocksqpMemory* m, double alpha) const {
1104  acceptStep(m, m->dxk, m->lam_qp, alpha, 0);
1105  }
1106 
1108  acceptStep(BlocksqpMemory* m, const double* deltaXi,
1109  const double* lambdaQP, double alpha, casadi_int nSOCS) const {
1110  auto d_nlp = &m->d_nlp;
1111  double lStpNorm;
1112 
1113  // Current alpha
1114  m->alpha = alpha;
1115  m->nSOCS = nSOCS;
1116 
1117  // Set new x by accepting the current trial step
1118  for (casadi_int k=0; k<nx_; k++) {
1119  d_nlp->z[k] = m->trial_xk[k];
1120  m->dxk[k] = alpha * deltaXi[k];
1121  }
1122 
1123  // Store the infinity norm of the multiplier step
1124  m->lambdaStepNorm = 0.0;
1125  for (casadi_int k=0; k<nx_; k++)
1126  if ((lStpNorm = fabs(alpha*lambdaQP[k] - alpha*m->lam_xk[k])) > m->lambdaStepNorm)
1127  m->lambdaStepNorm = lStpNorm;
1128  for (casadi_int k=0; k<ng_; k++)
1129  if ((lStpNorm = fabs(alpha*lambdaQP[nx_+k] - alpha*m->lam_gk[k])) > m->lambdaStepNorm)
1130  m->lambdaStepNorm = lStpNorm;
1131 
1132  // Set new multipliers
1133  for (casadi_int k=0; k<nx_; k++)
1134  m->lam_xk[k] = (1.0 - alpha)*m->lam_xk[k] + alpha*lambdaQP[k];
1135  for (casadi_int k=0; k<ng_; k++)
1136  m->lam_gk[k] = (1.0 - alpha)*m->lam_gk[k] + alpha*lambdaQP[nx_+k];
1137 
1138  // Count consecutive reduced steps
1139  if (m->alpha < 1.0)
1140  m->reducedStepCount++;
1141  else
1142  m->reducedStepCount = 0;
1143  }
1144 
1146  reduceStepsize(BlocksqpMemory* m, double *alpha) const {
1147  *alpha = (*alpha) * 0.5;
1148  }
1149 
1151  reduceSOCStepsize(BlocksqpMemory* m, double *alphaSOC) const {
1152  auto d_nlp = &m->d_nlp;
1153  // Update bounds on linearized constraints for the next SOC QP:
1154  // That is different from the update for the first SOC QP!
1155  for (casadi_int i=0; i<ng_; i++) {
1156  double lbg = d_nlp->lbz[i + nx_];
1157  double ubg = d_nlp->ubz[i + nx_];
1158  if (lbg != inf) {
1159  m->lba_qp[i] = *alphaSOC * m->lba_qp[i] - m->gk[i];
1160  } else {
1161  m->lba_qp[i] = inf;
1162  }
1163 
1164  if (ubg != inf) {
1165  m->uba_qp[i] = *alphaSOC * m->uba_qp[i] - m->gk[i];
1166  } else {
1167  m->uba_qp[i] = inf;
1168  }
1169  }
1170 
1171  *alphaSOC *= 0.5;
1172  }
1173 
1179  casadi_int Blocksqp::fullstep(BlocksqpMemory* m) const {
1180  auto d_nlp = &m->d_nlp;
1181  double alpha;
1182  double objTrial, cNormTrial;
1183 
1184  // Backtracking line search
1185  alpha = 1.0;
1186  for (casadi_int k=0; k<10; k++) {
1187  // Compute new trial point
1188  for (casadi_int i=0; i<nx_; i++)
1189  m->trial_xk[i] = d_nlp->z[i] + alpha * m->dxk[i];
1190 
1191  // Compute problem functions at trial point
1192  casadi_int info = evaluate(m, m->trial_xk, &objTrial, m->gk);
1193  m->nFunCalls++;
1194  cNormTrial = lInfConstraintNorm(m, m->trial_xk, m->gk);
1195  // Reduce step if evaluation fails, if lower bound is violated
1196  // or if objective or a constraint is NaN
1197  if (info != 0 || objTrial < obj_lo_ || objTrial > obj_up_
1198  || !(objTrial == objTrial) || !(cNormTrial == cNormTrial)) {
1199  print("info=%i, objTrial=%g\n", info, objTrial);
1200  // evaluation error, reduce stepsize
1201  reduceStepsize(m, &alpha);
1202  continue;
1203  } else {
1204  acceptStep(m, alpha);
1205  return 0;
1206  }
1207  }
1208  return 1;
1209  }
1210 
1218  auto d_nlp = &m->d_nlp;
1219  double alpha = 1.0;
1220  double cNormTrial=0, objTrial, dfTdeltaXi=0;
1221 
1222  // Compute ||constr(xi)|| at old point
1223  double cNorm = lInfConstraintNorm(m, d_nlp->z, m->gk);
1224 
1225  // Backtracking line search
1226  casadi_int k;
1227  for (k=0; k<max_line_search_; k++) {
1228  // Compute new trial point
1229  for (casadi_int i=0; i<nx_; i++)
1230  m->trial_xk[i] = d_nlp->z[i] + alpha * m->dxk[i];
1231 
1232  // Compute grad(f)^T * deltaXi
1233  dfTdeltaXi = 0.0;
1234  for (casadi_int i=0; i<nx_; i++)
1235  dfTdeltaXi += m->grad_fk[i] * m->dxk[i];
1236 
1237  // Compute objective and at ||constr(trial_xk)||_1 at trial point
1238  casadi_int info = evaluate(m, m->trial_xk, &objTrial, m->gk);
1239  m->nFunCalls++;
1240  cNormTrial = lInfConstraintNorm(m, m->trial_xk, m->gk);
1241  // Reduce step if evaluation fails, if lower bound is violated or if objective is NaN
1242  if (info != 0 || objTrial < obj_lo_ || objTrial > obj_up_
1243  || !(objTrial == objTrial) || !(cNormTrial == cNormTrial)) {
1244  // evaluation error, reduce stepsize
1245  reduceStepsize(m, &alpha);
1246  continue;
1247  }
1248 
1249  // Check acceptability to the filter
1250  if (pairInFilter(m, cNormTrial, objTrial)) {
1251  // Trial point is in the prohibited region defined by
1252  // the filter, try second order correction
1253  if (secondOrderCorrection(m, cNorm, cNormTrial, dfTdeltaXi, 0, k)) {
1254  break; // SOC yielded suitable alpha, stop
1255  } else {
1256  reduceStepsize(m, &alpha);
1257  continue;
1258  }
1259  }
1260 
1261  // Check sufficient decrease, case I:
1262  // If we are (almost) feasible and a "switching condition" is satisfied
1263  // require sufficient progress in the objective instead of bi-objective condition
1264  if (cNorm <= theta_min_) {
1265  // Switching condition, part 1: grad(f)^T * deltaXi < 0 ?
1266  if (dfTdeltaXi < 0)
1267  // Switching condition, part 2: alpha * (- grad(f)^T * deltaXi)**sF
1268  // > delta * cNorm**sTheta ?
1269  if (alpha * pow((-dfTdeltaXi), s_f_)
1270  > delta_ * pow(cNorm, s_theta_)) {
1271  // Switching conditions hold: Require satisfaction of Armijo condition for objective
1272  if (objTrial > m->obj + eta_*alpha*dfTdeltaXi) {
1273  // Armijo condition violated, try second order correction
1274  if (secondOrderCorrection(m, cNorm, cNormTrial, dfTdeltaXi, 1, k)) {
1275  break; // SOC yielded suitable alpha, stop
1276  } else {
1277  reduceStepsize(m, &alpha);
1278  continue;
1279  }
1280  } else {
1281  // found suitable alpha, stop
1282  acceptStep(m, alpha);
1283  break;
1284  }
1285  }
1286  }
1287 
1288  // Check sufficient decrease, case II:
1289  // Bi-objective (filter) condition
1290  if (cNormTrial < (1.0 - gamma_theta_) * cNorm
1291  || objTrial < m->obj - gamma_f_ * cNorm) {
1292  // found suitable alpha, stop
1293  acceptStep(m, alpha);
1294  break;
1295  } else {
1296  // Trial point is dominated by current point, try second order correction
1297  if (secondOrderCorrection(m, cNorm, cNormTrial, dfTdeltaXi, 0, k)) {
1298  break; // SOC yielded suitable alpha, stop
1299  } else {
1300  reduceStepsize(m, &alpha);
1301  continue;
1302  }
1303  }
1304  }
1305 
1306  // No step could be found by the line search
1307  if (k == max_line_search_) return 1;
1308 
1309  // Augment the filter if switching condition or Armijo condition does not hold
1310  if (dfTdeltaXi >= 0) {
1311  augmentFilter(m, cNormTrial, objTrial);
1312  } else if (alpha * pow((-dfTdeltaXi), s_f_) > delta_ * pow(cNorm, s_theta_)) {
1313  // careful with neg. exponents!
1314  augmentFilter(m, cNormTrial, objTrial);
1315  } else if (objTrial <= m->obj + eta_*alpha*dfTdeltaXi) {
1316  augmentFilter(m, cNormTrial, objTrial);
1317  }
1318 
1319  return 0;
1320  }
1321 
1322 
1332  secondOrderCorrection(BlocksqpMemory* m, double cNorm, double cNormTrial,
1333  double dfTdeltaXi, bool swCond, casadi_int it) const {
1334  auto d_nlp = &m->d_nlp;
1335 
1336  // Perform SOC only on the first iteration of backtracking line search
1337  if (it > 0) return false;
1338  // If constraint violation of the trialstep is lower than the current one skip SOC
1339  if (cNormTrial < cNorm) return false;
1340 
1341  casadi_int nSOCS = 0;
1342  double cNormTrialSOC, cNormOld, objTrialSOC;
1343 
1344  // m->gk contains result at first trial point: c(xi+deltaXi)
1345  // m->jac_times_dxk and m->grad_fk are unchanged so far.
1346 
1347  // First SOC step
1348  std::vector<double> deltaXiSOC(nx_, 0.);
1349  std::vector<double> lambdaQPSOC(nx_+ng_, 0.);
1350 
1351  // Second order correction loop
1352  cNormOld = cNorm;
1353  for (casadi_int k=0; k<max_soc_iter_; k++) {
1354  nSOCS++;
1355 
1356  // Update bounds for SOC QP
1357  updateStepBounds(m, 1);
1358 
1359  // Solve SOC QP to obtain new, corrected deltaXi
1360  // (store in separate vector to avoid conflict with original deltaXi
1361  // -> need it in linesearch!)
1362  casadi_int info = solveQP(m, get_ptr(deltaXiSOC), get_ptr(lambdaQPSOC), false);
1363  if (info != 0) return false; // Could not solve QP, abort SOC
1364 
1365  // Set new SOC trial point
1366  for (casadi_int i=0; i<nx_; i++) {
1367  m->trial_xk[i] = d_nlp->z[i] + deltaXiSOC[i];
1368  }
1369 
1370  // Compute objective and ||constr(trialXiSOC)||_1 at SOC trial point
1371  info = evaluate(m, m->trial_xk, &objTrialSOC, m->gk);
1372  m->nFunCalls++;
1373  cNormTrialSOC = lInfConstraintNorm(m, m->trial_xk, m->gk);
1374  if (info != 0 || objTrialSOC < obj_lo_ || objTrialSOC > obj_up_
1375  || !(objTrialSOC == objTrialSOC) || !(cNormTrialSOC == cNormTrialSOC)) {
1376  return false; // evaluation error, abort SOC
1377  }
1378 
1379  // Check acceptability to the filter (in SOC)
1380  if (pairInFilter(m, cNormTrialSOC, objTrialSOC)) {
1381  // Trial point is in the prohibited region defined by the filter, abort SOC
1382  return false;
1383  }
1384 
1385  // Check sufficient decrease, case I (in SOC)
1386  // (Almost feasible and switching condition holds for line search alpha)
1387  if (cNorm <= theta_min_ && swCond) {
1388  if (objTrialSOC > m->obj + eta_*dfTdeltaXi) {
1389  // Armijo condition does not hold for SOC step, next SOC step
1390 
1391  // If constraint violation gets worse by SOC, abort
1392  if (cNormTrialSOC > kappa_soc_ * cNormOld) {
1393  return false;
1394  } else {
1395  cNormOld = cNormTrialSOC;
1396  }
1397  continue;
1398  } else {
1399  // found suitable alpha during SOC, stop
1400  acceptStep(m, get_ptr(deltaXiSOC), get_ptr(lambdaQPSOC), 1.0, nSOCS);
1401  return true;
1402  }
1403  }
1404 
1405  // Check sufficient decrease, case II (in SOC)
1406  if (cNorm > theta_min_ || !swCond) {
1407  if (cNormTrialSOC < (1.0 - gamma_theta_) * cNorm
1408  || objTrialSOC < m->obj - gamma_f_ * cNorm) {
1409  // found suitable alpha during SOC, stop
1410  acceptStep(m, get_ptr(deltaXiSOC), get_ptr(lambdaQPSOC), 1.0, nSOCS);
1411  return true;
1412  } else {
1413  // Trial point is dominated by current point, next SOC step
1414 
1415  // If constraint violation gets worse by SOC, abort
1416  if (cNormTrialSOC > kappa_soc_ * cNormOld) {
1417  return false;
1418  } else {
1419  cNormOld = cNormTrialSOC;
1420  }
1421  continue;
1422  }
1423  }
1424  }
1425 
1426  return false;
1427  }
1428 
1435  auto d_nlp = &m->d_nlp;
1436  // No Feasibility restoration phase
1437  if (!restore_feas_) return -1;
1438 
1439  m->nRestPhaseCalls++;
1440 
1441  casadi_int ret, info;
1442  casadi_int maxRestIt = 100;
1443  casadi_int warmStart;
1444  double cNormTrial, objTrial, lStpNorm;
1445 
1446 
1447  // Create Input for the minimum norm NLP
1448  DMDict solver_in;
1449 
1450  // The reference point is the starting value for the restoration phase
1451  std::vector<double> in_x0(d_nlp->z, d_nlp->z+nx_);
1452 
1453  // Initialize slack variables such that the constraints are feasible
1454  for (casadi_int i=0; i<ng_; i++) {
1455  if (m->gk[i] <= d_nlp->lbz[i+nx_])
1456  in_x0.push_back(m->gk[i] - d_nlp->lbz[i+nx_]);
1457  else if (m->gk[i] > d_nlp->ubz[i+nx_])
1458  in_x0.push_back(m->gk[i] - d_nlp->ubz[i+nx_]);
1459  else
1460  in_x0.push_back(0.0);
1461  }
1462 
1463  // Add current iterate xk to parameter p
1464  std::vector<double> in_p(d_nlp->p, d_nlp->p+np_);
1465  std::vector<double> in_p2(d_nlp->z, d_nlp->z+nx_);
1466  in_p.insert(in_p.end(), in_p2.begin(), in_p2.end());
1467 
1468  // Set bounds for variables
1469  std::vector<double> in_lbx(d_nlp->lbz, d_nlp->lbz+nx_);
1470  std::vector<double> in_ubx(d_nlp->ubz, d_nlp->ubz+nx_);
1471  for (casadi_int i=0; i<ng_; i++) {
1472  in_lbx.push_back(-inf);
1473  in_ubx.push_back(inf);
1474  }
1475 
1476  // Set bounds for constraints
1477  std::vector<double> in_lbg(d_nlp->lbz+nx_, d_nlp->lbz+nx_+ng_);
1478  std::vector<double> in_ubg(d_nlp->ubz+nx_, d_nlp->ubz+nx_+ng_);
1479 
1480  solver_in["x0"] = in_x0;
1481  solver_in["p"] = in_p;
1482  solver_in["lbx"] = in_lbx;
1483  solver_in["ubx"] = in_ubx;
1484  solver_in["lbg"] = in_lbg;
1485  solver_in["ubg"] = in_ubg;
1486 
1487  /*
1488 
1489  Consider the following simple call:
1490  auto solver_out = rp_solver_(solver_in);
1491 
1492  This call in fact allocates memory,
1493  calls a memory-less eval(),
1494  and clears up the memory again.
1495 
1496  Since we want to access the memory later on,
1497  we need to unravel the simple call into its parts,
1498  and avoid the memory cleanup
1499 
1500  */
1501 
1502  // perform first iteration for the minimum norm NLP
1503 
1504  // Get the number of inputs and outputs
1505  int n_in = rp_solver_.n_in();
1506  int n_out = rp_solver_.n_out();
1507 
1508  // Get default inputs
1509  std::vector<DM> arg_v(n_in);
1510  for (casadi_int i=0; i<arg_v.size(); i++)
1511  arg_v[i] = DM::repmat(rp_solver_.default_in(i), rp_solver_.size1_in(i), 1);
1512 
1513  // Assign provided inputs
1514  for (auto&& e : solver_in) arg_v.at(rp_solver_.index_in(e.first)) = e.second;
1515 
1516  // Check sparsities
1517  for (casadi_int i=0; i<arg_v.size(); i++)
1518  casadi_assert_dev(arg_v[i].sparsity()==rp_solver_.sparsity_in(i));
1519 
1520  // Allocate results
1521  std::vector<DM> res(n_out);
1522  for (casadi_int i=0; i<n_out; i++) {
1523  if (res[i].sparsity()!=rp_solver_.sparsity_out(i))
1524  res[i] = DM::zeros(rp_solver_.sparsity_out(i));
1525  }
1526 
1527  // Allocate temporary memory if needed
1528  std::vector<casadi_int> iw_tmp(rp_solver_.sz_iw());
1529  std::vector<double> w_tmp(rp_solver_.sz_w());
1530 
1531  // Get pointers to input arguments
1532  std::vector<const double*> argp(rp_solver_.sz_arg());
1533  for (casadi_int i=0; i<n_in; ++i) argp[i] = get_ptr(arg_v[i]);
1534 
1535  // Get pointers to output arguments
1536  std::vector<double*> resp(rp_solver_.sz_res());
1537  for (casadi_int i=0; i<n_out; i++) resp[i] = get_ptr(res[i]);
1538 
1539  void* mem2 = rp_solver_.memory(0);
1540 
1541  // perform The m
1542  rp_solver_->eval(get_ptr(argp), get_ptr(resp), get_ptr(iw_tmp), get_ptr(w_tmp), mem2);
1543 
1544  // Get BlocksqpMemory and Blocksqp from restoration phase
1545  auto m2 = static_cast<BlocksqpMemory*>(mem2);
1546  const Blocksqp* bp = static_cast<const Blocksqp*>(rp_solver_.get());
1547  ret = m2->ret_;
1548 
1549  warmStart = 1;
1550  for (casadi_int it=0; it<maxRestIt; it++) {
1551  // One iteration for minimum norm NLP
1552  if (it > 0)
1553  ret = bp->run(m2, 1, warmStart);
1554 
1555  // If restMethod yields error, stop restoration phase
1556  if (ret == -1)
1557  break;
1558 
1559  // Get new xi from the restoration phase
1560  for (casadi_int i=0; i<nx_; i++)
1561  m->trial_xk[i] = m2->d_nlp.z[i];
1562 
1563  // Compute objective at trial point
1564  info = evaluate(m, m->trial_xk, &objTrial, m->gk);
1565  m->nFunCalls++;
1566  cNormTrial = lInfConstraintNorm(m, m->trial_xk, m->gk);
1567  if ( info != 0 || objTrial < obj_lo_ || objTrial > obj_up_ ||
1568  !(objTrial == objTrial) || !(cNormTrial == cNormTrial) )
1569  continue;
1570 
1571  // Is this iterate acceptable for the filter?
1572  if (!pairInFilter(m, cNormTrial, objTrial)) {
1573  // success
1574  print("Found a point acceptable for the filter.\n");
1575  ret = 0;
1576  break;
1577  }
1578 
1579  // If minimum norm NLP has converged, declare local infeasibility
1580  if (m2->tol < opttol_ && m2->cNormS < nlinfeastol_) {
1581  ret = 1;
1582  break;
1583  }
1584  }
1585 
1586  // Success or locally infeasible
1587  if (ret == 0 || ret == 1) {
1588  // Store the infinity norm of the multiplier step
1589  m->lambdaStepNorm = 0.0;
1590  // Compute restoration step
1591  for (casadi_int k=0; k<nx_; k++) {
1592  m->dxk[k] = d_nlp->z[k];
1593 
1594  d_nlp->z[k] = m->trial_xk[k];
1595 
1596  // Store lInf norm of dual step
1597  if ((lStpNorm = fabs(m2->lam_xk[k] - m->lam_xk[k])) > m->lambdaStepNorm)
1598  m->lambdaStepNorm = lStpNorm;
1599  m->lam_xk[k] = m2->lam_xk[k];
1600  m->lam_qp[k] = m2->lam_qp[k];
1601 
1602  m->dxk[k] -= d_nlp->z[k];
1603  }
1604  for (casadi_int k=0; k<ng_; k++) {
1605  // skip the dual variables for the slack variables in the restoration problem
1606  if ((lStpNorm = fabs(m2->lam_gk[k] - m->lam_gk[k])) > m->lambdaStepNorm)
1607  m->lambdaStepNorm = lStpNorm;
1608  m->lam_gk[k] = m2->lam_gk[k];
1609  m->lam_qp[k] = m2->lam_qp[nx_+ng_+k];
1610  }
1611  m->alpha = 1.0;
1612  m->nSOCS = 0;
1613 
1614  // reset reduced step counter
1615  m->reducedStepCount = 0;
1616 
1617  // reset Hessian and limited memory information
1618  resetHessian(m);
1619  }
1620 
1621  if (ret == 1) {
1623  updateStats(m);
1624  print("The problem seems to be locally infeasible. Infeasibilities minimized.\n");
1625  }
1626 
1627  return ret;
1628  }
1629 
1630 
1637  auto d_nlp = &m->d_nlp;
1638  m->nRestHeurCalls++;
1639 
1640  // Call problem specific heuristic to reduce constraint violation.
1641  // For shooting methods that means setting consistent values for
1642  // shooting nodes by one forward integration.
1643  for (casadi_int k=0; k<nx_; k++) // input: last successful step
1644  m->trial_xk[k] = d_nlp->z[k];
1645 
1646  // FIXME(@jaeandersson) Not implemented
1647  return -1;
1648  }
1649 
1650 
1655  auto d_nlp = &m->d_nlp;
1656  casadi_int info = 0;
1657  double objTrial, cNormTrial, trialGradNorm, trialTol;
1658 
1659  // Compute new trial point
1660  for (casadi_int i=0; i<nx_; i++)
1661  m->trial_xk[i] = d_nlp->z[i] + m->dxk[i];
1662 
1663  // Compute objective and ||constr(trial_xk)|| at trial point
1664  std::vector<double> trialConstr(ng_, 0.);
1665  info = evaluate(m, m->trial_xk, &objTrial, get_ptr(trialConstr));
1666  m->nFunCalls++;
1667  cNormTrial = lInfConstraintNorm(m, m->trial_xk, get_ptr(trialConstr));
1668  if (info != 0 || objTrial < obj_lo_ || objTrial > obj_up_
1669  || !(objTrial == objTrial) || !(cNormTrial == cNormTrial)) {
1670  // evaluation error
1671  return 1;
1672  }
1673 
1674  // Compute KKT error of the new point
1675 
1676  // scaled norm of Lagrangian gradient
1677  std::vector<double> trialGradLagrange(nx_, 0.);
1679  m->jac_g,
1680  get_ptr(trialGradLagrange), 0);
1681 
1682  trialGradNorm = casadi_norm_inf(nx_, get_ptr(trialGradLagrange));
1683  trialTol = trialGradNorm/(1.0+casadi_norm_inf(nx_+ng_, m->lam_qp));
1684 
1685  if (fmax(cNormTrial, trialTol) < kappa_f_ * fmax(m->cNorm, m->tol)) {
1686  acceptStep(m, 1.0);
1687  return 0;
1688  } else {
1689  return 1;
1690  }
1691  }
1692 
1698  pairInFilter(BlocksqpMemory* m, double cNorm, double obj) const {
1699  /*
1700  * A pair is in the filter if:
1701  * - it increases the objective and
1702  * - it also increases the constraint violation
1703  * The second expression in the if-clause states that we exclude
1704  * entries that are within the feasibility tolerance, e.g.
1705  * if an entry improves the constraint violation from 1e-16 to 1e-17,
1706  * but increases the objective considerably we also think of this entry
1707  * as dominated
1708  */
1709 
1710  for (auto&& f : m->filter) {
1711  if ((cNorm >= (1.0 - gamma_theta_) * f.first ||
1712  (cNorm < 0.01 * nlinfeastol_ && f.first < 0.01 * nlinfeastol_)) &&
1713  obj >= f.second - gamma_f_ * f.first) {
1714  return 1;
1715  }
1716  }
1717 
1718  return 0;
1719  }
1720 
1721 
1723  std::pair<double, double> initPair(theta_max_, obj_lo_);
1724 
1725  // Remove all elements
1726  auto iter=m->filter.begin();
1727  while (iter != m->filter.end()) {
1728  std::set< std::pair<double, double> >::iterator iterToRemove = iter;
1729  iter++;
1730  m->filter.erase(iterToRemove);
1731  }
1732 
1733  // Initialize with pair (maxConstrViolation, objLowerBound);
1734  m->filter.insert(initPair);
1735  }
1736 
1742  augmentFilter(BlocksqpMemory* m, double cNorm, double obj) const {
1743  std::pair<double, double> entry((1-gamma_theta_)*cNorm,
1744  obj-gamma_f_*cNorm);
1745 
1746  // Augment filter by current element
1747  m->filter.insert(entry);
1748 
1749  // Remove dominated elements
1750  auto iter=m->filter.begin();
1751  while (iter != m->filter.end()) {
1752  if (iter->first > entry.first && iter->second > entry.second) {
1753  auto iterToRemove = iter;
1754  iter++;
1755  m->filter.erase(iterToRemove);
1756  } else {
1757  iter++;
1758  }
1759  }
1760  }
1761 
1766  for (casadi_int b=0; b<nblocks_; b++)
1767  //if objective derv is computed exactly, don't set the last block!
1768  if (!(which_second_derv_ == 1 && block_hess_
1769  && b == nblocks_-1))
1770  calcInitialHessian(m, b);
1771  }
1772 
1773 
1777  void Blocksqp::calcInitialHessian(BlocksqpMemory* m, casadi_int b) const {
1778  casadi_int dim = dim_[b];
1779  casadi_fill(m->hess[b], dim*dim, 0.);
1780 
1781  // Each block is a diagonal matrix
1782  for (casadi_int i=0; i<dim; i++)
1783  m->hess[b][i+i*dim] = ini_hess_diag_;
1784 
1785  // If we maintain 2 Hessians, also reset the second one
1786  if (m->hess2 != nullptr) {
1787  casadi_fill(m->hess2[b], dim*dim, 0.);
1788  for (casadi_int i=0; i<dim; i++)
1789  m->hess2[b][i+i*dim] = ini_hess_diag_;
1790  }
1791  }
1792 
1793 
1795  for (casadi_int b=0; b<nblocks_; b++) {
1796  if (!(which_second_derv_ == 1 && block_hess_ && b == nblocks_ - 1)) {
1797  // if objective derv is computed exactly, don't set the last block!
1798  resetHessian(m, b);
1799  }
1800  }
1801  }
1802 
1803 
1804  void Blocksqp::resetHessian(BlocksqpMemory* m, casadi_int b) const {
1805  casadi_int dim = dim_[b];
1806 
1807  // smallGamma and smallDelta are either subvectors of gamma and delta
1808  // or submatrices of gammaMat, deltaMat, i.e. subvectors of gamma and delta
1809  // from m prev. iterations (for L-BFGS)
1810  double *smallGamma = m->gammaMat + blocks_[b];
1811  double *smallDelta = m->deltaMat + blocks_[b];
1812 
1813  for (casadi_int i=0; i<hess_memsize_; ++i) {
1814  // Remove past information on Lagrangian gradient difference
1815  casadi_fill(smallGamma, dim, 0.);
1816  smallGamma += nx_;
1817 
1818  // Remove past information on steps
1819  smallDelta += nx_;
1820  casadi_fill(smallDelta, dim, 0.);
1821  }
1822 
1823  // Remove information on old scalars (used for COL sizing)
1824  m->delta_norm[b] = 1.0;
1825  m->delta_gamma[b] = 0.0;
1826  m->delta_norm_old[b] = 1.0;
1827  m->delta_gamma_old[b] = 0.0;
1828 
1829  m->noUpdateCounter[b] = -1;
1830 
1831  calcInitialHessian(m, b);
1832  }
1833 
1835  sizeInitialHessian(BlocksqpMemory* m, const double* gamma,
1836  const double* delta, casadi_int b, casadi_int option) const {
1837  casadi_int dim = dim_[b];
1838  double scale;
1839  double myEps = 1.0e3 * eps_;
1840 
1841  if (option == 1) {
1842  // Shanno-Phua
1843  scale = casadi_dot(dim, gamma, gamma)
1844  / fmax(casadi_dot(dim, delta, gamma), myEps);
1845  } else if (option == 2) {
1846  // Oren-Luenberger
1847  scale = casadi_dot(dim, delta, gamma)
1848  / fmax(casadi_dot(dim, delta, delta), myEps);
1849  scale = fmin(scale, 1.0);
1850  } else if (option == 3) {
1851  // Geometric mean of 1 and 2
1852  scale = sqrt(casadi_dot(dim, gamma, gamma)
1853  / fmax(casadi_dot(dim, delta, delta), myEps));
1854  } else {
1855  // Invalid option, ignore
1856  return;
1857  }
1858 
1859  if (scale > 0.0) {
1860  scale = fmax(scale, myEps);
1861  for (casadi_int i=0; i<dim; i++)
1862  for (casadi_int j=0; j<dim; j++)
1863  m->hess[b][i+j*dim] *= scale;
1864  } else {
1865  scale = 1.0;
1866  }
1867 
1868  // statistics: average sizing factor
1869  m->averageSizingFactor += scale;
1870  }
1871 
1872 
1874  sizeHessianCOL(BlocksqpMemory* m, const double* gamma,
1875  const double* delta, casadi_int b) const {
1876  casadi_int dim = dim_[b];
1877  double theta, scale, myEps = 1.0e3 * eps_;
1878  double deltaNorm, deltaNormOld, deltaGamma, deltaGammaOld, deltaBdelta;
1879 
1880  // Get sTs, sTs_, sTy, sTy_, sTBs
1881  deltaNorm = m->delta_norm[b];
1882  deltaGamma = m->delta_gamma[b];
1883  deltaNormOld = m->delta_norm_old[b];
1884  deltaGammaOld = m->delta_gamma_old[b];
1885  deltaBdelta = 0.0;
1886  for (casadi_int i=0; i<dim; i++)
1887  for (casadi_int j=0; j<dim; j++)
1888  deltaBdelta += delta[i] * m->hess[b][i+j*dim] * delta[j];
1889 
1890  // Centered Oren-Luenberger factor
1891  if (m->noUpdateCounter[b] == -1) {
1892  // in the first iteration, this should equal the OL factor
1893  theta = 1.0;
1894  } else {
1895  theta = fmin(col_tau1_, col_tau2_ * deltaNorm);
1896  }
1897  if (deltaNorm > myEps && deltaNormOld > myEps) {
1898  scale = (1.0 - theta)*deltaGammaOld / deltaNormOld + theta*deltaBdelta / deltaNorm;
1899  if (scale > eps_)
1900  scale = ((1.0 - theta)*deltaGammaOld / deltaNormOld + theta*deltaGamma / deltaNorm) / scale;
1901  } else {
1902  scale = 1.0;
1903  }
1904 
1905  // Size only if factor is between zero and one
1906  if (scale < 1.0 && scale > 0.0) {
1907  scale = fmax(col_eps_, scale);
1908  //print("Sizing value (COL) block %i = %g\n", b, scale);
1909  for (casadi_int i=0; i<dim; i++)
1910  for (casadi_int j=0; j<dim; j++)
1911  m->hess[b][i+j*dim] *= scale;
1912 
1913  // statistics: average sizing factor
1914  m->averageSizingFactor += scale;
1915  } else {
1916  m->averageSizingFactor += 1.0;
1917  }
1918  }
1919 
1924  calcHessianUpdate(BlocksqpMemory* m, casadi_int updateType, casadi_int hessScaling) const {
1925  casadi_int nBlocks;
1926  bool firstIter;
1927 
1928  //if objective derv is computed exactly, don't set the last block!
1929  if (which_second_derv_ == 1 && block_hess_)
1930  nBlocks = nblocks_ - 1;
1931  else
1932  nBlocks = nblocks_;
1933 
1934  // Statistics: how often is damping active, what is the average COL sizing factor?
1935  m->hessDamped = 0;
1936  m->averageSizingFactor = 0.0;
1937 
1938  for (casadi_int b=0; b<nBlocks; b++) {
1939  casadi_int dim = dim_[b];
1940 
1941  // smallGamma and smallDelta are subvectors of gamma and delta,
1942  // corresponding to partially separability
1943  double* smallGamma = m->gammaMat + blocks_[b];
1944  double* smallDelta = m->deltaMat + blocks_[b];
1945 
1946  // Is this the first iteration or the first after a Hessian reset?
1947  firstIter = (m->noUpdateCounter[b] == -1);
1948 
1949  // Update sTs, sTs_ and sTy, sTy_
1950  m->delta_norm_old[b] = m->delta_norm[b];
1951  m->delta_gamma_old[b] = m->delta_gamma[b];
1952  m->delta_norm[b] = casadi_dot(dim, smallDelta, smallDelta);
1953  m->delta_gamma[b] = casadi_dot(dim, smallDelta, smallGamma);
1954 
1955  // Sizing before the update
1956  if (hessScaling < 4 && firstIter)
1957  sizeInitialHessian(m, smallGamma, smallDelta, b, hessScaling);
1958  else if (hessScaling == 4)
1959  sizeHessianCOL(m, smallGamma, smallDelta, b);
1960 
1961  // Compute the new update
1962  if (updateType == 1) {
1963  calcSR1(m, smallGamma, smallDelta, b);
1964 
1965  // Prepare to compute fallback update as well
1966  m->hess = m->hess2;
1967 
1968  // Sizing the fallback update
1969  if (fallback_scaling_ < 4 && firstIter)
1970  sizeInitialHessian(m, smallGamma, smallDelta, b, fallback_scaling_);
1971  else if (fallback_scaling_ == 4)
1972  sizeHessianCOL(m, smallGamma, smallDelta, b);
1973 
1974  // Compute fallback update
1975  if (fallback_update_ == 2)
1976  calcBFGS(m, smallGamma, smallDelta, b);
1977 
1978  // Reset pointer
1979  m->hess = m->hess1;
1980  } else if (updateType == 2) {
1981  calcBFGS(m, smallGamma, smallDelta, b);
1982  }
1983 
1984  // If an update is skipped to often, reset Hessian block
1986  resetHessian(m, b);
1987  }
1988  }
1989 
1990  // statistics: average sizing factor
1991  m->averageSizingFactor /= nBlocks;
1992  }
1993 
1994 
1997  casadi_int updateType, casadi_int hessScaling) const {
1998  casadi_int nBlocks;
1999  casadi_int m2, pos, posOldest, posNewest;
2000  casadi_int hessDamped, hessSkipped;
2001  double averageSizingFactor;
2002 
2003  //if objective derv is computed exactly, don't set the last block!
2004  if (which_second_derv_ == 1 && block_hess_) {
2005  nBlocks = nblocks_ - 1;
2006  } else {
2007  nBlocks = nblocks_;
2008  }
2009 
2010  // Statistics: how often is damping active, what is the average COL sizing factor?
2011  m->hessDamped = 0;
2012  m->hessSkipped = 0;
2013  m->averageSizingFactor = 0.0;
2014 
2015  for (casadi_int b=0; b<nBlocks; b++) {
2016  casadi_int dim = dim_[b];
2017 
2018  // smallGamma and smallDelta are submatrices of gammaMat, deltaMat,
2019  // i.e. subvectors of gamma and delta from m prev. iterations
2020  double *smallGamma = m->gammaMat + blocks_[b];
2021  double *smallDelta = m->deltaMat + blocks_[b];
2022 
2023  // Memory structure
2024  if (m->itCount > hess_memsize_) {
2025  m2 = hess_memsize_;
2026  posOldest = m->itCount % m2;
2027  posNewest = (m->itCount-1) % m2;
2028  } else {
2029  m2 = m->itCount;
2030  posOldest = 0;
2031  posNewest = m2-1;
2032  }
2033 
2034  // Set B_0 (pretend it's the first step)
2035  calcInitialHessian(m, b);
2036  m->delta_norm[b] = 1.0;
2037  m->delta_norm_old[b] = 1.0;
2038  m->delta_gamma[b] = 0.0;
2039  m->delta_gamma_old[b] = 0.0;
2040  m->noUpdateCounter[b] = -1;
2041 
2042  // Size the initial update, but with the most recent delta/gamma-pair
2043  double *gammai = smallGamma + nx_*posNewest;
2044  double *deltai = smallDelta + nx_*posNewest;
2045  sizeInitialHessian(m, gammai, deltai, b, hessScaling);
2046 
2047  for (casadi_int i=0; i<m2; i++) {
2048  pos = (posOldest+i) % m2;
2049 
2050  // Get new vector from list
2051  gammai = smallGamma + nx_*pos;
2052  deltai = smallDelta + nx_*pos;
2053 
2054  // Update sTs, sTs_ and sTy, sTy_
2055  m->delta_norm_old[b] = m->delta_norm[b];
2056  m->delta_gamma_old[b] = m->delta_gamma[b];
2057  m->delta_norm[b] = casadi_dot(dim, deltai, deltai);
2058  m->delta_gamma[b] = casadi_dot(dim, gammai, deltai);
2059 
2060  // Save statistics, we want to record them only for the most recent update
2061  averageSizingFactor = m->averageSizingFactor;
2062  hessDamped = m->hessDamped;
2063  hessSkipped = m->hessSkipped;
2064 
2065  // Selective sizing before the update
2066  if (hessScaling == 4) sizeHessianCOL(m, gammai, deltai, b);
2067 
2068  // Compute the new update
2069  if (updateType == 1) {
2070  calcSR1(m, gammai, deltai, b);
2071  } else if (updateType == 2) {
2072  calcBFGS(m, gammai, deltai, b);
2073  }
2074 
2075  m->nTotalUpdates++;
2076 
2077  // Count damping statistics only for the most recent update
2078  if (pos != posNewest) {
2079  m->hessDamped = hessDamped;
2080  m->hessSkipped = hessSkipped;
2081  if (hessScaling == 4)
2082  m->averageSizingFactor = averageSizingFactor;
2083  }
2084  }
2085 
2086  // If an update is skipped to often, reset Hessian block
2088  resetHessian(m, b);
2089  }
2090  }
2091  //blocks
2092  m->averageSizingFactor /= nBlocks;
2093  }
2094 
2095 
2098  // compute exact hessian
2099  (void)evaluate(m, m->exact_hess_lag);
2100 
2101  // assign exact hessian to blocks
2102  const casadi_int* col = exact_hess_lag_sp_.colind();
2103  const casadi_int* row = exact_hess_lag_sp_.row();
2104  casadi_int s, dim;
2105 
2106  for (casadi_int k=0; k<nblocks_; k++) {
2107  s = blocks_[k];
2108  dim = dim_[k];
2109  for (casadi_int i=0; i<dim; i++)
2110  // Set diagonal to 0 (may have been 1 because of CalcInitialHessian)
2111  m->hess[k][i + i*dim] = 0.0;
2112  for (casadi_int j=0; j<dim; j++) {
2113  for (casadi_int i=col[j+s]; i<col[j+1+s]; i++) {
2114  m->hess[k][row[i]-row[col[s]] + j*dim] = m->exact_hess_lag[i];
2115  if (row[i]-row[col[s]] < j)
2116  m->hess[k][j + (row[i]-row[col[s]])*dim] = m->exact_hess_lag[i];
2117  }
2118  }
2119  }
2120 
2121  // Prepare to compute fallback update as well
2122  m->hess = m->hess2;
2123  if (fallback_update_ == 2 && !hess_lim_mem_)
2125  else if (fallback_update_ == 0)
2126  calcInitialHessian(m); // Set fallback as Identity
2127 
2128  // Reset pointer
2129  m->hess = m->hess1;
2130  }
2131 
2132 
2134  calcBFGS(BlocksqpMemory* m, const double* gamma,
2135  const double* delta, casadi_int b) const {
2136  casadi_int dim = dim_[b];
2137  double h1 = 0.0;
2138  double h2 = 0.0;
2139  double thetaPowell = 0.0;
2140  casadi_int damped;
2141 
2142  /* Work with a local copy of gamma because damping may need to change gamma.
2143  * Note that m->gamma needs to remain unchanged!
2144  * This may be important in a limited memory context:
2145  * When information is "forgotten", B_i-1 is different and the
2146  * original gamma might lead to an undamped update with the new B_i-1! */
2147  std::vector<double> gamma2(gamma, gamma+dim);
2148 
2149  double *B = m->hess[b];
2150 
2151  // Bdelta = B*delta (if sizing is enabled, B is the sized B!)
2152  // h1 = delta^T * B * delta
2153  // h2 = delta^T * gamma
2154  std::vector<double> Bdelta(dim, 0.0);
2155  for (casadi_int i=0; i<dim; i++) {
2156  for (casadi_int k=0; k<dim; k++)
2157  Bdelta[i] += B[i+k*dim] * delta[k];
2158 
2159  h1 += delta[i] * Bdelta[i];
2160  //h2 += delta[i] * gamma[i];
2161  }
2162  h2 = m->delta_gamma[b];
2163 
2164  /* Powell's damping strategy to maintain pos. def. (Nocedal/Wright p.537; SNOPT paper)
2165  * Interpolates between current approximation and unmodified BFGS */
2166  damped = 0;
2167  if (hess_damp_)
2168  if (h2 < hess_damp_fac_ * h1 / m->alpha && fabs(h1 - h2) > 1.0e-12) {
2169  // At the first iteration h1 and h2 are equal due to COL scaling
2170 
2171  thetaPowell = (1.0 - hess_damp_fac_)*h1 / (h1 - h2);
2172 
2173  // Redefine gamma and h2 = delta^T * gamma
2174  h2 = 0.0;
2175  for (casadi_int i=0; i<dim; i++) {
2176  gamma2[i] = thetaPowell*gamma2[i] + (1.0 - thetaPowell)*Bdelta[i];
2177  h2 += delta[i] * gamma2[i];
2178  }
2179 
2180  // Also redefine deltaGamma for computation of sizing factor in the next iteration
2181  m->delta_gamma[b] = h2;
2182 
2183  damped = 1;
2184  }
2185 
2186  // For statistics: count number of damped blocks
2187  m->hessDamped += damped;
2188 
2189  // B_k+1 = B_k - Bdelta * (Bdelta)^T / h1 + gamma * gamma^T / h2
2190  double myEps = 1.0e2 * eps_;
2191  if (fabs(h1) < myEps || fabs(h2) < myEps) {
2192  // don't perform update because of bad condition, might introduce negative eigenvalues
2193  m->noUpdateCounter[b]++;
2194  m->hessDamped -= damped;
2195  m->hessSkipped++;
2196  m->nTotalSkippedUpdates++;
2197  } else {
2198  for (casadi_int i=0; i<dim; i++)
2199  for (casadi_int j=0; j<dim; j++)
2200  B[i+j*dim] += - Bdelta[i]*Bdelta[j]/h1 + gamma2[i]*gamma2[j]/h2;
2201 
2202  m->noUpdateCounter[b] = 0;
2203  }
2204  }
2205 
2206 
2208  calcSR1(BlocksqpMemory* m, const double* gamma,
2209  const double* delta, casadi_int b) const {
2210  casadi_int dim = dim_[b];
2211  double *B = m->hess[b];
2212  double myEps = 1.0e2 * eps_;
2213  double r = 1.0e-8;
2214  double h = 0.0;
2215 
2216  // gmBdelta = gamma - B*delta
2217  // h = (gamma - B*delta)^T * delta
2218  std::vector<double> gmBdelta(dim);
2219  for (casadi_int i=0; i<dim; i++) {
2220  gmBdelta[i] = gamma[i];
2221  for (casadi_int k=0; k<dim; k++)
2222  gmBdelta[i] -= B[i+k*dim] * delta[k];
2223 
2224  h += (gmBdelta[i] * delta[i]);
2225  }
2226 
2227  // B_k+1 = B_k + gmBdelta * gmBdelta^T / h
2228  if (fabs(h) < r * casadi_norm_2(dim, delta)
2229  *casadi_norm_2(dim, get_ptr(gmBdelta)) || fabs(h) < myEps) {
2230  // Skip update if denominator is too small
2231  m->noUpdateCounter[b]++;
2232  m->hessSkipped++;
2233  m->nTotalSkippedUpdates++;
2234  } else {
2235  for (casadi_int i=0; i<dim; i++)
2236  for (casadi_int j=0; j<dim; j++)
2237  B[i+j*dim] += gmBdelta[i]*gmBdelta[j]/h;
2238  m->noUpdateCounter[b] = 0;
2239  }
2240  }
2241 
2242 
2248  if (hess_memsize_ == 1) return;
2249 
2250  m->dxk = m->deltaMat + nx_*(m->itCount % hess_memsize_);
2251  m->gamma = m->gammaMat + nx_*(m->itCount % hess_memsize_);
2252  }
2253 
2255  computeNextHessian(BlocksqpMemory* m, casadi_int idx, casadi_int maxQP) const {
2256  // Compute fallback update only once
2257  if (idx == 1) {
2258  // Switch storage
2259  m->hess = m->hess2;
2260 
2261  // If last block contains exact Hessian, we need to copy it
2262  if (which_second_derv_ == 1) {
2263  casadi_int dim = dim_[nblocks_-1];
2264  casadi_copy(m->hess1[nblocks_-1], dim*dim, m->hess2[nblocks_-1]);
2265  }
2266 
2267  // Limited memory: compute fallback update only when needed
2268  if (hess_lim_mem_) {
2269  m->itCount--;
2270  casadi_int hessDampSave = hess_damp_;
2271  const_cast<Blocksqp*>(this)->hess_damp_ = 1;
2273  const_cast<Blocksqp*>(this)->hess_damp_ = hessDampSave;
2274  m->itCount++;
2275  }
2276  /* Full memory: both updates must be computed in every iteration
2277  * so switching storage is enough */
2278  }
2279 
2280  // 'Nontrivial' convex combinations
2281  if (maxQP > 2) {
2282  /* Convexification parameter: mu_l = l / (maxQP-1).
2283  * Compute it only in the first iteration, afterwards update
2284  * by recursion: mu_l/mu_(l-1) */
2285  double idxF = idx;
2286  double mu = (idx==1) ? 1.0 / (maxQP-1) : idxF / (idxF - 1.0);
2287  double mu1 = 1.0 - mu;
2288  for (casadi_int b=0; b<nblocks_; b++) {
2289  casadi_int dim = dim_[b];
2290  for (casadi_int i=0; i<dim; i++) {
2291  for (casadi_int j=0; j<dim; j++) {
2292  m->hess2[b][i+j*dim] *= mu;
2293  m->hess2[b][i+j*dim] += mu1 * m->hess1[b][i+j*dim];
2294  }
2295  }
2296  }
2297  }
2298  }
2299 
2300 
2305  casadi_int Blocksqp::
2306  solveQP(BlocksqpMemory* m, double* deltaXi, double* lambdaQP,
2307  bool matricesChanged) const {
2308  casadi_int maxQP, l;
2309  if (globalization_ &&
2310  (hess_update_ == 1 || hess_update_ == 4) &&
2311  matricesChanged &&
2312  m->itCount > 1) {
2313  maxQP = max_conv_qp_ + 1;
2314  } else {
2315  maxQP = 1;
2316  }
2317 
2318  /*
2319  * Prepare for qpOASES
2320  */
2321 
2322  // Setup QProblem data
2323  if (matricesChanged) {
2324  delete m->A;
2325  m->A = nullptr;
2326  copy_vector(Asp_.colind(), m->colind);
2327  copy_vector(Asp_.row(), m->row);
2328  int* jacIndRow = get_ptr(m->row);
2329  int* jacIndCol = get_ptr(m->colind);
2330  m->A = new qpOASES::SparseMatrix(ng_, nx_,
2331  jacIndRow, jacIndCol, m->jac_g);
2332  }
2333  double *g = m->grad_fk;
2334  double *lb = m->lbx_qp;
2335  double *lu = m->ubx_qp;
2336  double *lbA = m->lba_qp;
2337  double *luA = m->uba_qp;
2338 
2339  // qpOASES options
2340  qpOASES::Options opts;
2341  if (matricesChanged && maxQP > 1)
2342  opts.enableInertiaCorrection = qpOASES::BT_FALSE;
2343  else
2344  opts.enableInertiaCorrection = qpOASES::BT_TRUE;
2345  opts.enableEqualities = qpOASES::BT_TRUE;
2346  opts.initialStatusBounds = qpOASES::ST_INACTIVE;
2347  opts.printLevel = qpOASES::PL_NONE;
2348  opts.numRefinementSteps = 2;
2349  opts.epsLITests = 2.2204e-08;
2350  m->qp->setOptions(opts);
2351 
2352  // Other variables for qpOASES
2353  double cpuTime = matricesChanged ? max_time_qp_ : 0.1*max_time_qp_;
2354  int maxIt = matricesChanged ? max_it_qp_ : static_cast<int>(0.1*max_it_qp_);
2355  qpOASES::SolutionAnalysis solAna;
2356  qpOASES::returnValue ret = qpOASES::RET_INFO_UNDEFINED;
2357 
2358  /*
2359  * QP solving loop for convex combinations (sequential)
2360  */
2361  for (l=0; l<maxQP; l++) {
2362  /*
2363  * Compute a new Hessian
2364  */
2365  if (l > 0) {
2366  // If the solution of the first QP was rejected, consider second Hessian
2367  m->qpResolve++;
2368  computeNextHessian(m, l, maxQP);
2369  }
2370 
2371  if (l == maxQP-1) {
2372  // Enable inertia correction for supposedly convex QPs, just in case
2373  opts.enableInertiaCorrection = qpOASES::BT_TRUE;
2374  m->qp->setOptions(opts);
2375  }
2376 
2377  /*
2378  * Prepare the current Hessian for qpOASES
2379  */
2380  if (matricesChanged) {
2381  // Convert block-Hessian to sparse format
2382  convertHessian(m);
2383  delete m->H;
2384  m->H = nullptr;
2385  m->H = new qpOASES::SymSparseMat(nx_, nx_,
2386  m->hessIndRow, m->hessIndCol,
2387  m->hess_lag);
2388  m->H->createDiagInfo();
2389  }
2390 
2391  /*
2392  * Call qpOASES
2393  */
2394  if (matricesChanged) {
2395  maxIt = max_it_qp_;
2396  cpuTime = max_time_qp_;
2397  if (m->qp->getStatus() == qpOASES::QPS_HOMOTOPYQPSOLVED ||
2398  m->qp->getStatus() == qpOASES::QPS_SOLVED) {
2399  ret = m->qp->hotstart(m->H, g, m->A, lb, lu, lbA, luA, maxIt, &cpuTime);
2400  } else {
2401  if (warmstart_) {
2402  ret = m->qp->init(m->H, g, m->A, lb, lu, lbA, luA, maxIt, &cpuTime,
2403  deltaXi, lambdaQP);
2404  } else {
2405  ret = m->qp->init(m->H, g, m->A, lb, lu, lbA, luA, maxIt, &cpuTime);
2406  }
2407  }
2408  } else {
2409  // Second order correction: H and A do not change
2410  maxIt = static_cast<int>(0.1*max_it_qp_);
2411  cpuTime = 0.1*max_time_qp_;
2412  ret = m->qp->hotstart(g, lb, lu, lbA, luA, maxIt, &cpuTime);
2413  }
2414 
2415  /*
2416  * Check assumption (G3*) if nonconvex QP was solved
2417  */
2418  if (l < maxQP-1 && matricesChanged) {
2419  if (ret == qpOASES::SUCCESSFUL_RETURN) {
2420  if (schur_) {
2421  ret = solAna.checkCurvatureOnStronglyActiveConstraints(
2422  dynamic_cast<qpOASES::SQProblemSchur*>(m->qp));
2423  } else {
2424  ret = solAna.checkCurvatureOnStronglyActiveConstraints(m->qp);
2425  }
2426  }
2427 
2428  if (ret == qpOASES::SUCCESSFUL_RETURN) {
2429  // QP was solved successfully and curvature is positive after removing bounds
2430  m->qpIterations = maxIt + 1;
2431  break; // Success!
2432  } else {
2433  // QP solution is rejected, save statistics
2434  if (ret == qpOASES::RET_SETUP_AUXILIARYQP_FAILED)
2435  m->qpIterations2++;
2436  else
2437  m->qpIterations2 += maxIt + 1;
2438  m->rejectedSR1++;
2439  }
2440  } else {
2441  // Convex QP was solved, no need to check assumption (G3*)
2442  m->qpIterations += maxIt + 1;
2443  }
2444 
2445  } // End of QP solving loop
2446 
2447  /*
2448  * Post-processing
2449  */
2450 
2451  // Get solution from qpOASES
2452  m->qp->getPrimalSolution(deltaXi);
2453  m->qp->getDualSolution(lambdaQP);
2454  m->qpObj = m->qp->getObjVal();
2455 
2456  // Compute constrJac*deltaXi, need this for second order correction step
2457  casadi_fill(m->jac_times_dxk, ng_, 0.);
2458  casadi_mv(m->jac_g, Asp_, deltaXi, m->jac_times_dxk, 0);
2459 
2460  // Print qpOASES error code, if any
2461  if (ret != qpOASES::SUCCESSFUL_RETURN && matricesChanged)
2462  print("***WARNING: qpOASES error message: \"%s\"\n",
2463  qpOASES::MessageHandling::getErrorCodeMessage(ret));
2464 
2465  // Point Hessian again to the first Hessian
2466  m->hess = m->hess1;
2467 
2468  /* For full-memory Hessian: Restore fallback Hessian if convex combinations
2469  * were used during the loop */
2470  if (!hess_lim_mem_ && maxQP > 2 && matricesChanged) {
2471  double mu = 1.0 / l;
2472  double mu1 = 1.0 - mu;
2473  casadi_int nBlocks = (which_second_derv_ == 1) ? nblocks_-1 : nblocks_;
2474  for (casadi_int b=0; b<nBlocks; b++) {
2475  casadi_int dim = dim_[b];
2476  for (casadi_int i=0; i<dim; i++) {
2477  for (casadi_int j=0; j<dim; j++) {
2478  m->hess2[b][i+j*dim] *= mu;
2479  m->hess2[b][i+j*dim] += mu1 * m->hess1[b][i+j*dim];
2480  }
2481  }
2482  }
2483  }
2484 
2485  /* Return code depending on qpOASES returnvalue
2486  * 0: Success
2487  * 1: Maximum number of iterations reached
2488  * 2: Unbounded
2489  * 3: Infeasible
2490  * 4: Other error */
2491  switch (ret) {
2492  case qpOASES::SUCCESSFUL_RETURN:
2493  return 0;
2494  case qpOASES::RET_MAX_NWSR_REACHED:
2495  return 1;
2496  case qpOASES::RET_HESSIAN_NOT_SPD:
2497  case qpOASES::RET_HESSIAN_INDEFINITE:
2498  case qpOASES::RET_INIT_FAILED_UNBOUNDEDNESS:
2499  case qpOASES::RET_QP_UNBOUNDED:
2500  case qpOASES::RET_HOTSTART_STOPPED_UNBOUNDEDNESS:
2501  return 2;
2502  case qpOASES::RET_INIT_FAILED_INFEASIBILITY:
2503  case qpOASES::RET_QP_INFEASIBLE:
2504  case qpOASES::RET_HOTSTART_STOPPED_INFEASIBILITY:
2505  return 3;
2506  default:
2507  return 4;
2508  }
2509  }
2510 
2516  void Blocksqp::updateStepBounds(BlocksqpMemory* m, bool soc) const {
2517  auto d_nlp = &m->d_nlp;
2518  // Bounds on step
2519  for (casadi_int i=0; i<nx_; i++) {
2520  double lbx = d_nlp->lbz[i];
2521  if (lbx != inf) {
2522  m->lbx_qp[i] = lbx - d_nlp->z[i];
2523  } else {
2524  m->lbx_qp[i] = inf;
2525  }
2526 
2527  double ubx = d_nlp->ubz[i];
2528  if (ubx != inf) {
2529  m->ubx_qp[i] = ubx - d_nlp->z[i];
2530  } else {
2531  m->ubx_qp[i] = inf;
2532  }
2533  }
2534 
2535  // Bounds on linearized constraints
2536  for (casadi_int i=0; i<ng_; i++) {
2537  double lbg = d_nlp->lbz[i+nx_];
2538  if (lbg != inf) {
2539  m->lba_qp[i] = lbg - m->gk[i];
2540  if (soc) m->lba_qp[i] += m->jac_times_dxk[i];
2541  } else {
2542  m->lba_qp[i] = inf;
2543  }
2544 
2545  double ubg = d_nlp->ubz[i+nx_];
2546  if (ubg != inf) {
2547  m->uba_qp[i] = ubg - m->gk[i];
2548  if (soc) m->uba_qp[i] += m->jac_times_dxk[i];
2549  } else {
2550  m->uba_qp[i] = inf;
2551  }
2552  }
2553  }
2554 
2556  /*
2557  * m->steptype:
2558  *-1: full step was accepted because it reduces the KKT error although line search failed
2559  * 0: standard line search step
2560  * 1: Hessian has been reset to identity
2561  * 2: feasibility restoration heuristic has been called
2562  * 3: feasibility restoration phase has been called
2563  */
2564 
2565  // Print headline every twenty iterations
2566  if (m->itCount % 20 == 0) {
2567  print("%-8s", " it");
2568  print("%-21s", " qpIt");
2569  print("%-9s", "obj");
2570  print("%-11s", "feas");
2571  print("%-7s", "opt");
2572  print("%-11s", "|lgrd|");
2573  print("%-9s", "|stp|");
2574  print("%-10s", "|lstp|");
2575  print("%-8s", "alpha");
2576  print("%-6s", "nSOCS");
2577  print("%-18s", "sk, da, sca");
2578  print("%-6s", "QPr,mu");
2579  print("\n");
2580  }
2581 
2582  if (m->itCount == 0) {
2583  // Values for first iteration
2584  print("%5i ", m->itCount);
2585  print("%11i ", 0);
2586  print("% 10e ", m->obj);
2587  print("%-10.2e", m->cNormS);
2588  print("%-10.2e", m->tol);
2589  print("\n");
2590  } else {
2591  // All values
2592  print("%5i ", m->itCount);
2593  print("%5i+%5i ", m->qpIterations, m->qpIterations2);
2594  print("% 10e ", m->obj);
2595  print("%-10.2e", m->cNormS);
2596  print("%-10.2e", m->tol);
2597  print("%-10.2e", m->gradNorm);
2598  print("%-10.2e", casadi_norm_inf(nx_, m->dxk));
2599  print("%-10.2e", m->lambdaStepNorm);
2600  print("%-9.1e", m->alpha);
2601  print("%5i", m->nSOCS);
2602  print("%3i, %3i, %-9.1e", m->hessSkipped, m->hessDamped, m->averageSizingFactor);
2603  print("%i, %-9.1e", m->qpResolve, casadi_norm_1(nblocks_, m->delta_h)/nblocks_);
2604  print("\n");
2605  }
2606  }
2607 
2609  m->itCount = 0;
2610  m->qpItTotal = 0;
2611  m->qpIterations = 0;
2612  m->hessSkipped = 0;
2613  m->hessDamped = 0;
2614  m->averageSizingFactor = 0.0;
2615  }
2616 
2618  // Do not accidentally print hessSkipped in the next iteration
2619  m->hessSkipped = 0;
2620  m->hessDamped = 0;
2621 
2622  // qpIterations = number of iterations for the QP that determines the step,
2623  // can be a resolve (+SOC)
2624  // qpIterations2 = number of iterations for a QP which solution was discarded
2625  m->qpItTotal += m->qpIterations;
2626  m->qpItTotal += m->qpIterations2;
2627  m->qpIterations = 0;
2628  m->qpIterations2 = 0;
2629  m->qpResolve = 0;
2630  }
2631 
2638  // dual variables (for general constraints and variable bounds)
2639  casadi_fill(m->lam_xk, nx_, 0.);
2640  casadi_fill(m->lam_gk, ng_, 0.);
2641 
2642  // constraint vector with lower and upper bounds
2643  // (Box constraints are not included in the constraint list)
2644  casadi_fill(m->gk, ng_, 0.);
2645 
2646  // gradient of objective
2647  casadi_fill(m->grad_fk, nx_, 0.);
2648 
2649  // gradient of Lagrangian
2650  casadi_fill(m->grad_lagk, nx_, 0.);
2651 
2652  // current step
2654  m->dxk = m->deltaMat;
2655 
2656  // trial step (temporary variable, for line search)
2657  casadi_fill(m->trial_xk, nx_, 0.);
2658 
2659  // bounds for step (QP subproblem)
2660  casadi_fill(m->lbx_qp, nx_, 0.);
2661  casadi_fill(m->ubx_qp, nx_, 0.);
2662  casadi_fill(m->lba_qp, ng_, 0.);
2663  casadi_fill(m->uba_qp, ng_, 0.);
2664 
2665  // product of constraint Jacobian with step (deltaXi)
2666  casadi_fill(m->jac_times_dxk, ng_, 0.);
2667 
2668  // dual variables of QP (simple bounds and general constraints)
2669  casadi_fill(m->lam_qp, nx_+ng_, 0.);
2670 
2671  // line search parameters
2672  casadi_fill(m->delta_h, nblocks_, 0.);
2673 
2674  // filter as a set of pairs
2675  m->filter.clear();
2676 
2677  // difference of Lagrangian gradients
2679  m->gamma = m->gammaMat;
2680 
2681  // Scalars that are used in various Hessian update procedures
2682  casadi_fill(m->noUpdateCounter, nblocks_, casadi_int(-1));
2683 
2684  // For selective sizing: for each block save sTs, sTs_, sTy, sTy_
2685  casadi_fill(m->delta_norm, nblocks_, 1.);
2687  casadi_fill(m->delta_gamma, nblocks_, 0.);
2689 
2690  // Create one Matrix for one diagonal block in the Hessian
2691  for (casadi_int b=0; b<nblocks_; b++) {
2692  casadi_int dim = dim_[b];
2693  casadi_fill(m->hess1[b], dim*dim, 0.);
2694  }
2695 
2696  // For SR1 or finite differences, maintain two Hessians
2697  if (hess_update_ == 1 || hess_update_ == 4) {
2698  for (casadi_int b=0; b<nblocks_; b++) {
2699  casadi_int dim = dim_[b];
2700  casadi_fill(m->hess2[b], dim*dim, 0.);
2701  }
2702  }
2703 
2704  // Set Hessian pointer
2705  m->hess = m->hess1;
2706  }
2707 
2713  convertHessian(BlocksqpMemory* m) const {
2714  casadi_int count, colCountTotal, rowOffset;
2715  casadi_int nnz;
2716 
2717  // 1) count nonzero elements
2718  nnz = 0;
2719  for (casadi_int b=0; b<nblocks_; b++) {
2720  casadi_int dim = dim_[b];
2721  for (casadi_int i=0; i<dim; i++) {
2722  for (casadi_int j=0; j<dim; j++) {
2723  if (fabs(m->hess[b][i+j*dim]) > eps_) {
2724  nnz++;
2725  }
2726  }
2727  }
2728  }
2729 
2730  m->hessIndCol = m->hessIndRow + nnz;
2731  m->hessIndLo = m->hessIndCol + (nx_+1);
2732 
2733  // 2) store matrix entries columnwise in hessNz
2734  count = 0; // runs over all nonzero elements
2735  colCountTotal = 0; // keep track of position in large matrix
2736  rowOffset = 0;
2737  for (casadi_int b=0; b<nblocks_; b++) {
2738  casadi_int dim = dim_[b];
2739 
2740  for (casadi_int i=0; i<dim; i++) {
2741  // column 'colCountTotal' starts at element 'count'
2742  m->hessIndCol[colCountTotal] = count;
2743 
2744  for (casadi_int j=0; j<dim; j++) {
2745  if (fabs(m->hess[b][i+j*dim]) > eps_) {
2746  m->hess_lag[count] = m->hess[b][i+j*dim];
2747  m->hessIndRow[count] = j + rowOffset;
2748  count++;
2749  }
2750  }
2751  colCountTotal++;
2752  }
2753  rowOffset += dim;
2754  }
2755  m->hessIndCol[colCountTotal] = count;
2756 
2757  // 3) Set reference to lower triangular matrix
2758  for (casadi_int j=0; j<nx_; j++) {
2759  casadi_int i;
2760  for (i=m->hessIndCol[j]; i<m->hessIndCol[j+1] && m->hessIndRow[i]<j; i++) {}
2761  m->hessIndLo[j] = i;
2762  }
2763 
2764  if (count != nnz)
2765  print("***WARNING: Error in convertHessian: %i elements processed, "
2766  "should be %i elements!\n", count, nnz);
2767  }
2768 
2770  m->alpha = 1.0;
2771  m->nSOCS = 0;
2772  m->reducedStepCount = 0;
2773  m->steptype = 0;
2774 
2775  m->obj = inf;
2776  m->tol = inf;
2777  m->cNorm = theta_max_;
2778  m->gradNorm = inf;
2779  m->lambdaStepNorm = 0.0;
2780  }
2781 
2782  casadi_int Blocksqp::
2784  double *f, double *g,
2785  double *grad_f, double *jac_g) const {
2786  auto d_nlp = &m->d_nlp;
2787  m->arg[0] = d_nlp->z; // x
2788  m->arg[1] = d_nlp->p; // p
2789  m->res[0] = f; // f
2790  m->res[1] = g; // g
2791  m->res[2] = grad_f; // grad:f:x
2792  m->res[3] = jac_g; // jac:g:x
2793  return calc_function(m, "nlp_gf_jg");
2794  }
2795 
2796  casadi_int Blocksqp::
2797  evaluate(BlocksqpMemory* m, const double *xk, double *f,
2798  double *g) const {
2799  auto d_nlp = &m->d_nlp;
2800  m->arg[0] = xk; // x
2801  m->arg[1] = d_nlp->p; // p
2802  m->res[0] = f; // f
2803  m->res[1] = g; // g
2804  return calc_function(m, "nlp_fg");
2805  }
2806 
2807  casadi_int Blocksqp::
2809  double *exact_hess_lag) const {
2810  auto d_nlp = &m->d_nlp;
2811  static std::vector<double> ones;
2812  ones.resize(nx_);
2813  for (casadi_int i=0; i<nx_; ++i) ones[i] = 1.0;
2814  static std::vector<double> minus_lam_gk;
2815  minus_lam_gk.resize(ng_);
2816  // Langrange function in blocksqp is L = f - lambdaT * g, whereas + in casadi
2817  for (casadi_int i=0; i<ng_; ++i) minus_lam_gk[i] = -m->lam_gk[i];
2818  m->arg[0] = d_nlp->z; // x
2819  m->arg[1] = d_nlp->p; // p
2820  m->arg[2] = get_ptr(ones); // lam:f
2821  m->arg[3] = get_ptr(minus_lam_gk); // lam:g
2822  m->res[0] = exact_hess_lag; // hess:gamma:x:x
2823  return calc_function(m, "nlp_hess_l");;
2824  }
2825 
2827  qpoases_mem = nullptr;
2828  H = nullptr;
2829  A = nullptr;
2830  qp = nullptr;
2831  }
2832 
2834  delete qpoases_mem;
2835  delete H;
2836  delete A;
2837  delete qp;
2838  }
2839 
2840  double Blocksqp::
2841  lInfConstraintNorm(BlocksqpMemory* m, const double* xk, const double* g) const {
2842  auto d_nlp = &m->d_nlp;
2843  return fmax(casadi_max_viol(nx_, xk, d_nlp->lbz, d_nlp->ubz),
2844  casadi_max_viol(ng_, g, d_nlp->lbz+nx_, d_nlp->ubz+nx_));
2845  }
2846 
2847 
2849  s.version("Blocksqp", 1);
2850  s.unpack("Blocksqp::nblocks", nblocks_);
2851  s.unpack("Blocksqp::blocks", blocks_);
2852  s.unpack("Blocksqp::dim", dim_);
2853  s.unpack("Blocksqp::nnz_H", nnz_H_);
2854  s.unpack("Blocksqp::Asp", Asp_);
2855  s.unpack("Blocksqp::Hsp", Hsp_);
2856  s.unpack("Blocksqp::exact_hess_lag_sp_", exact_hess_lag_sp_);
2857  s.unpack("Blocksqp::linsol_plugin", linsol_plugin_);
2858  s.unpack("Blocksqp::print_header", print_header_);
2859  s.unpack("Blocksqp::print_iteration", print_iteration_);
2860  s.unpack("Blocksqp::eps", eps_);
2861  s.unpack("Blocksqp::opttol", opttol_);
2862  s.unpack("Blocksqp::nlinfeastol", nlinfeastol_);
2863  s.unpack("Blocksqp::schur", schur_);
2864  s.unpack("Blocksqp::globalization", globalization_);
2865  s.unpack("Blocksqp::restore_feas", restore_feas_);
2866  s.unpack("Blocksqp::max_line_search", max_line_search_);
2867  s.unpack("Blocksqp::max_consec_reduced_steps", max_consec_reduced_steps_);
2868  s.unpack("Blocksqp::max_consec_skipped_updates", max_consec_skipped_updates_);
2869  s.unpack("Blocksqp::max_it_qp", max_it_qp_);
2870  s.unpack("Blocksqp::max_iter", max_iter_);
2871  s.unpack("Blocksqp::warmstart", warmstart_);
2872  s.unpack("Blocksqp::qp_init", qp_init_);
2873  s.unpack("Blocksqp::block_hess", block_hess_);
2874  s.unpack("Blocksqp::hess_scaling", hess_scaling_);
2875  s.unpack("Blocksqp::fallback_scaling", fallback_scaling_);
2876  s.unpack("Blocksqp::max_time_qp", max_time_qp_);
2877  s.unpack("Blocksqp::ini_hess_diag", ini_hess_diag_);
2878  s.unpack("Blocksqp::col_eps", col_eps_);
2879  s.unpack("Blocksqp::col_tau1", col_tau1_);
2880  s.unpack("Blocksqp::col_tau2", col_tau2_);
2881  s.unpack("Blocksqp::hess_damp", hess_damp_);
2882  s.unpack("Blocksqp::hess_damp_fac", hess_damp_fac_);
2883  s.unpack("Blocksqp::hess_update", hess_update_);
2884  s.unpack("Blocksqp::fallback_update", fallback_update_);
2885  s.unpack("Blocksqp::hess_lim_mem", hess_lim_mem_);
2886  s.unpack("Blocksqp::hess_memsize", hess_memsize_);
2887  s.unpack("Blocksqp::which_second_derv", which_second_derv_);
2888  s.unpack("Blocksqp::skip_first_globalization", skip_first_globalization_);
2889  s.unpack("Blocksqp::conv_strategy", conv_strategy_);
2890  s.unpack("Blocksqp::max_conv_qp", max_conv_qp_);
2891  s.unpack("Blocksqp::max_soc_iter", max_soc_iter_);
2892  s.unpack("Blocksqp::gamma_theta", gamma_theta_);
2893  s.unpack("Blocksqp::gamma_f", gamma_f_);
2894  s.unpack("Blocksqp::kappa_soc", kappa_soc_);
2895  s.unpack("Blocksqp::kappa_f", kappa_f_);
2896  s.unpack("Blocksqp::theta_max", theta_max_);
2897  s.unpack("Blocksqp::theta_min", theta_min_);
2898  s.unpack("Blocksqp::delta", delta_);
2899  s.unpack("Blocksqp::s_theta", s_theta_);
2900  s.unpack("Blocksqp::s_f", s_f_);
2901  s.unpack("Blocksqp::kappa_minus", kappa_minus_);
2902  s.unpack("Blocksqp::kappa_plus", kappa_plus_);
2903  s.unpack("Blocksqp::kappa_plus_max", kappa_plus_max_);
2904  s.unpack("Blocksqp::delta_h0", delta_h0_);
2905  s.unpack("Blocksqp::eta", eta_);
2906  s.unpack("Blocksqp::obj_lo", obj_lo_);
2907  s.unpack("Blocksqp::obj_up", obj_up_);
2908  s.unpack("Blocksqp::rho", rho_);
2909  s.unpack("Blocksqp::zeta", zeta_);
2910  s.unpack("Blocksqp::rp_solver", rp_solver_);
2911  s.unpack("Blocksqp::print_maxit_reached", print_maxit_reached_);
2912 
2913  }
2914 
2917  s.version("Blocksqp", 1);
2918  s.pack("Blocksqp::nblocks", nblocks_);
2919  s.pack("Blocksqp::blocks", blocks_);
2920  s.pack("Blocksqp::dim", dim_);
2921  s.pack("Blocksqp::nnz_H", nnz_H_);
2922  s.pack("Blocksqp::Asp", Asp_);
2923  s.pack("Blocksqp::Hsp", Hsp_);
2924  s.pack("Blocksqp::exact_hess_lag_sp_", exact_hess_lag_sp_);
2925  s.pack("Blocksqp::linsol_plugin", linsol_plugin_);
2926  s.pack("Blocksqp::print_header", print_header_);
2927  s.pack("Blocksqp::print_iteration", print_iteration_);
2928  s.pack("Blocksqp::eps", eps_);
2929  s.pack("Blocksqp::opttol", opttol_);
2930  s.pack("Blocksqp::nlinfeastol", nlinfeastol_);
2931  s.pack("Blocksqp::schur", schur_);
2932  s.pack("Blocksqp::globalization", globalization_);
2933  s.pack("Blocksqp::restore_feas", restore_feas_);
2934  s.pack("Blocksqp::max_line_search", max_line_search_);
2935  s.pack("Blocksqp::max_consec_reduced_steps", max_consec_reduced_steps_);
2936  s.pack("Blocksqp::max_consec_skipped_updates", max_consec_skipped_updates_);
2937  s.pack("Blocksqp::max_it_qp", max_it_qp_);
2938  s.pack("Blocksqp::max_iter", max_iter_);
2939  s.pack("Blocksqp::warmstart", warmstart_);
2940  s.pack("Blocksqp::qp_init", qp_init_);
2941  s.pack("Blocksqp::block_hess", block_hess_);
2942  s.pack("Blocksqp::hess_scaling", hess_scaling_);
2943  s.pack("Blocksqp::fallback_scaling", fallback_scaling_);
2944  s.pack("Blocksqp::max_time_qp", max_time_qp_);
2945  s.pack("Blocksqp::ini_hess_diag", ini_hess_diag_);
2946  s.pack("Blocksqp::col_eps", col_eps_);
2947  s.pack("Blocksqp::col_tau1", col_tau1_);
2948  s.pack("Blocksqp::col_tau2", col_tau2_);
2949  s.pack("Blocksqp::hess_damp", hess_damp_);
2950  s.pack("Blocksqp::hess_damp_fac", hess_damp_fac_);
2951  s.pack("Blocksqp::hess_update", hess_update_);
2952  s.pack("Blocksqp::fallback_update", fallback_update_);
2953  s.pack("Blocksqp::hess_lim_mem", hess_lim_mem_);
2954  s.pack("Blocksqp::hess_memsize", hess_memsize_);
2955  s.pack("Blocksqp::which_second_derv", which_second_derv_);
2956  s.pack("Blocksqp::skip_first_globalization", skip_first_globalization_);
2957  s.pack("Blocksqp::conv_strategy", conv_strategy_);
2958  s.pack("Blocksqp::max_conv_qp", max_conv_qp_);
2959  s.pack("Blocksqp::max_soc_iter", max_soc_iter_);
2960  s.pack("Blocksqp::gamma_theta", gamma_theta_);
2961  s.pack("Blocksqp::gamma_f", gamma_f_);
2962  s.pack("Blocksqp::kappa_soc", kappa_soc_);
2963  s.pack("Blocksqp::kappa_f", kappa_f_);
2964  s.pack("Blocksqp::theta_max", theta_max_);
2965  s.pack("Blocksqp::theta_min", theta_min_);
2966  s.pack("Blocksqp::delta", delta_);
2967  s.pack("Blocksqp::s_theta", s_theta_);
2968  s.pack("Blocksqp::s_f", s_f_);
2969  s.pack("Blocksqp::kappa_minus", kappa_minus_);
2970  s.pack("Blocksqp::kappa_plus", kappa_plus_);
2971  s.pack("Blocksqp::kappa_plus_max", kappa_plus_max_);
2972  s.pack("Blocksqp::delta_h0", delta_h0_);
2973  s.pack("Blocksqp::eta", eta_);
2974  s.pack("Blocksqp::obj_lo", obj_lo_);
2975  s.pack("Blocksqp::obj_up", obj_up_);
2976  s.pack("Blocksqp::rho", rho_);
2977  s.pack("Blocksqp::zeta", zeta_);
2978  s.pack("Blocksqp::rp_solver", rp_solver_);
2979  s.pack("Blocksqp::print_maxit_reached", print_maxit_reached_);
2980  }
2981 
2982 } // namespace casadi
'blocksqp' plugin for Nlpsol
Definition: blocksqp.hpp:151
void updateDeltaGamma(BlocksqpMemory *m) const
Definition: blocksqp.cpp:2247
void resetHessian(BlocksqpMemory *m) const
Definition: blocksqp.cpp:1794
casadi_int nblocks_
Definition: blocksqp.hpp:196
casadi_int hess_scaling_
Definition: blocksqp.hpp:357
casadi_int hess_lim_mem_
Definition: blocksqp.hpp:368
casadi_int conv_strategy_
Definition: blocksqp.hpp:373
double nlinfeastol_
Definition: blocksqp.hpp:343
casadi_int hess_update_
Definition: blocksqp.hpp:366
void reset_sqp(BlocksqpMemory *m) const
Reset variables that any SQP code needs.
Definition: blocksqp.cpp:2637
double kappa_plus_
Definition: blocksqp.hpp:389
void initializeFilter(BlocksqpMemory *m) const
Definition: blocksqp.cpp:1722
double kappa_plus_max_
Definition: blocksqp.hpp:390
double kappa_minus_
Definition: blocksqp.hpp:388
void calcLagrangeGradient(BlocksqpMemory *m, const double *lam_x, const double *lam_g, const double *grad_f, const double *jacNz, double *grad_lag, casadi_int flag) const
Compute gradient of Lagrangian function (sparse version)
Definition: blocksqp.cpp:953
static Nlpsol * creator(const std::string &name, const Function &nlp)
Create a new NLP Solver.
Definition: blocksqp.hpp:163
casadi_int fallback_update_
Definition: blocksqp.hpp:367
casadi_int fallback_scaling_
Definition: blocksqp.hpp:358
void printProgress(BlocksqpMemory *m) const
Print one line of output to stdout about the current iteration.
Definition: blocksqp.cpp:2555
casadi_int max_soc_iter_
Definition: blocksqp.hpp:378
void printInfo(BlocksqpMemory *m) const
Print information about the SQP method.
Definition: blocksqp.cpp:1012
casadi_int run(BlocksqpMemory *m, casadi_int maxIt, casadi_int warmStart=0) const
Main Loop of SQP method.
Definition: blocksqp.cpp:740
casadi_int hess_memsize_
Definition: blocksqp.hpp:369
void acceptStep(BlocksqpMemory *m, const double *deltaXi, const double *lambdaQP, double alpha, casadi_int nSOCS) const
Set new primal dual iterate.
Definition: blocksqp.cpp:1108
std::string linsol_plugin_
QP solver for the subproblems.
Definition: blocksqp.hpp:336
void sizeHessianCOL(BlocksqpMemory *m, const double *gamma, const double *delta, casadi_int b) const
Definition: blocksqp.cpp:1874
void sizeInitialHessian(BlocksqpMemory *m, const double *gamma, const double *delta, casadi_int b, casadi_int option) const
Definition: blocksqp.cpp:1835
int init_mem(void *mem) const override
Initalize memory block.
Definition: blocksqp.cpp:566
std::vector< casadi_int > dim_
Definition: blocksqp.hpp:198
double max_time_qp_
Definition: blocksqp.hpp:359
casadi_int filterLineSearch(BlocksqpMemory *m) const
Definition: blocksqp.cpp:1217
void reduceStepsize(BlocksqpMemory *m, double *alpha) const
Definition: blocksqp.cpp:1146
casadi_int feasibilityRestorationHeuristic(BlocksqpMemory *m) const
Definition: blocksqp.cpp:1636
casadi_int solveQP(BlocksqpMemory *m, double *deltaXi, double *lambdaQP, bool matricesChanged=true) const
Definition: blocksqp.cpp:2306
casadi_int max_it_qp_
Definition: blocksqp.hpp:352
bool pairInFilter(BlocksqpMemory *m, double cNorm, double obj) const
Definition: blocksqp.cpp:1698
void calcInitialHessian(BlocksqpMemory *m) const
Definition: blocksqp.cpp:1765
void computeNextHessian(BlocksqpMemory *m, casadi_int idx, casadi_int maxQP) const
Definition: blocksqp.cpp:2255
casadi_int max_consec_skipped_updates_
Definition: blocksqp.hpp:351
void reduceSOCStepsize(BlocksqpMemory *m, double *alphaSOC) const
Definition: blocksqp.cpp:1151
static const Options options_
Options.
Definition: blocksqp.hpp:169
Function rp_solver_
Definition: blocksqp.hpp:399
void convertHessian(BlocksqpMemory *m) const
Convert *hess to column compressed sparse format.
Definition: blocksqp.cpp:2713
bool print_maxit_reached_
Definition: blocksqp.hpp:400
void calcHessianUpdateLimitedMemory(BlocksqpMemory *m, casadi_int updateType, casadi_int hessScaling) const
Definition: blocksqp.cpp:1996
casadi_int fullstep(BlocksqpMemory *m) const
No globalization strategy.
Definition: blocksqp.cpp:1179
void initStats(BlocksqpMemory *m) const
Definition: blocksqp.cpp:2608
void serialize_body(SerializingStream &s) const override
Serialize an object without type information.
Definition: blocksqp.cpp:2915
static const std::string meta_doc
A documentation string.
Definition: blocksqp.hpp:193
double lInfConstraintNorm(BlocksqpMemory *m, const double *xk, const double *g) const
Definition: blocksqp.cpp:2841
double hess_damp_fac_
Definition: blocksqp.hpp:365
Blocksqp(const std::string &name, const Function &nlp)
Definition: blocksqp.cpp:49
double ini_hess_diag_
Definition: blocksqp.hpp:360
void init(const Dict &opts) override
Initialize.
Definition: blocksqp.cpp:231
void calcBFGS(BlocksqpMemory *m, const double *gamma, const double *delta, casadi_int b) const
Definition: blocksqp.cpp:2134
void initIterate(BlocksqpMemory *m) const
Set initial filter, objective function, tolerances etc.
Definition: blocksqp.cpp:2769
bool secondOrderCorrection(BlocksqpMemory *m, double cNorm, double cNormTrial, double dfTdeltaXi, bool swCond, casadi_int it) const
Definition: blocksqp.cpp:1332
void augmentFilter(BlocksqpMemory *m, double cNorm, double obj) const
Definition: blocksqp.cpp:1742
casadi_int evaluate(BlocksqpMemory *m, double *f, double *g, double *grad_f, double *jac_g) const
Evaluate objective and constraints, including derivatives.
Definition: blocksqp.cpp:2783
void calcHessianUpdateExact(BlocksqpMemory *m) const
Definition: blocksqp.cpp:2097
casadi_int which_second_derv_
Definition: blocksqp.hpp:370
casadi_int max_consec_reduced_steps_
Definition: blocksqp.hpp:350
int solve(void *mem) const override
Definition: blocksqp.cpp:634
std::vector< casadi_int > blocks_
Definition: blocksqp.hpp:197
~Blocksqp() override
Definition: blocksqp.cpp:54
bool calcOptTol(BlocksqpMemory *m) const
Update optimization tolerance (similar to SNOPT) in current iterate.
Definition: blocksqp.cpp:997
casadi_int max_line_search_
Definition: blocksqp.hpp:349
void updateStats(BlocksqpMemory *m) const
Definition: blocksqp.cpp:2617
void set_work(void *mem, const double **&arg, double **&res, casadi_int *&iw, double *&w) const override
Set the (persistent) work vectors.
Definition: blocksqp.cpp:582
void calcSR1(BlocksqpMemory *m, const double *gamma, const double *delta, casadi_int b) const
Definition: blocksqp.cpp:2208
bool skip_first_globalization_
Definition: blocksqp.hpp:372
casadi_int kktErrorReduction(BlocksqpMemory *m) const
Definition: blocksqp.cpp:1654
casadi_int hess_damp_
Definition: blocksqp.hpp:364
static ProtoFunction * deserialize(DeserializingStream &s)
Deserialize into MX.
Definition: blocksqp.hpp:406
void updateStepBounds(BlocksqpMemory *m, bool soc) const
Definition: blocksqp.cpp:2516
casadi_int max_iter_
Definition: blocksqp.hpp:353
casadi_int max_conv_qp_
Definition: blocksqp.hpp:374
Sparsity exact_hess_lag_sp_
Definition: blocksqp.hpp:203
void calcHessianUpdate(BlocksqpMemory *m, casadi_int updateType, casadi_int hessScaling) const
Definition: blocksqp.cpp:1924
casadi_int nnz_H_
Definition: blocksqp.hpp:199
double gamma_theta_
Definition: blocksqp.hpp:379
casadi_int feasibilityRestorationPhase(BlocksqpMemory *m) const
Definition: blocksqp.cpp:1434
Helper class for Serialization.
void unpack(Sparsity &e)
Reconstruct an object from the input stream.
void version(const std::string &name, int v)
void alloc_iw(size_t sz_iw, bool persistent=false)
Ensure required length of iw field.
void alloc_res(size_t sz_res, bool persistent=false)
Ensure required length of res field.
virtual int eval(const double **arg, double **res, casadi_int *iw, double *w, void *mem) const
Evaluate numerically.
void alloc_w(size_t sz_w, bool persistent=false)
Ensure required length of w field.
Function object.
Definition: function.hpp:60
size_t sz_res() const
Get required length of res field.
Definition: function.cpp:1085
const MX mx_in(casadi_int ind) const
Get symbolic primitives equivalent to the input expressions.
Definition: function.cpp:1584
const Sparsity & sparsity_out(casadi_int ind) const
Get sparsity of a given output.
Definition: function.cpp:1031
FunctionInternal * get() const
Definition: function.cpp:353
casadi_int size1_in(casadi_int ind) const
Get input dimension.
Definition: function.cpp:827
casadi_int index_in(const std::string &name) const
Find the index for a string describing a particular entry of an input scheme.
Definition: function.cpp:969
const Sparsity sparsity_jac(casadi_int iind, casadi_int oind, bool compact=false, bool symmetric=false) const
Definition: function.cpp:907
const Sparsity & sparsity_in(casadi_int ind) const
Get sparsity of a given input.
Definition: function.cpp:1015
size_t sz_iw() const
Get required length of iw field.
Definition: function.cpp:1087
casadi_int n_out() const
Get the number of function outputs.
Definition: function.cpp:823
casadi_int n_in() const
Get the number of function inputs.
Definition: function.cpp:819
void * memory(int ind) const
Get memory object.
Definition: function.cpp:1781
size_t sz_w() const
Get required length of w field.
Definition: function.cpp:1089
size_t sz_arg() const
Get required length of arg field.
Definition: function.cpp:1083
casadi_int size1_out(casadi_int ind) const
Get output dimension.
Definition: function.cpp:835
Function factory(const std::string &name, const std::vector< std::string > &s_in, const std::vector< std::string > &s_out, const AuxOut &aux=AuxOut(), const Dict &opts=Dict()) const
Definition: function.cpp:1812
double default_in(casadi_int ind) const
Get default input value.
Definition: function.cpp:1480
static MX sym(const std::string &name, casadi_int nrow=1, casadi_int ncol=1)
Create an nrow-by-ncol symbolic primitive.
static MatType zeros(casadi_int nrow=1, casadi_int ncol=1)
Create a dense matrix or a matrix with specified sparsity with all entries zero.
MX - Matrix expression.
Definition: mx.hpp:92
static MX vertcat(const std::vector< MX > &x)
Definition: mx.cpp:1099
NLP solver storage class.
Definition: nlpsol_impl.hpp:59
static const Options options_
Options.
void init(const Dict &opts) override
Initialize.
Definition: nlpsol.cpp:420
casadi_int ng_
Number of constraints.
Definition: nlpsol_impl.hpp:69
int init_mem(void *mem) const override
Initalize memory block.
Definition: nlpsol.cpp:603
void serialize_body(SerializingStream &s) const override
Serialize an object without type information.
Definition: nlpsol.cpp:1306
casadi_int np_
Number of parameters.
Definition: nlpsol_impl.hpp:72
casadi_int nx_
Number of variables.
Definition: nlpsol_impl.hpp:66
void set_work(void *mem, const double **&arg, double **&res, casadi_int *&iw, double *&w) const override
Set the (persistent) work vectors.
Definition: nlpsol.cpp:795
Function oracle_
Oracle: Used to generate other functions.
Function create_function(const Function &oracle, const std::string &fname, const std::vector< std::string > &s_in, const std::vector< std::string > &s_out, const Function::AuxOut &aux=Function::AuxOut(), const Dict &opts=Dict())
int calc_function(OracleMemory *m, const std::string &fcn, const double *const *arg=nullptr, int thread_id=0) const
std::vector< std::string > get_function() const override
Get list of dependency functions.
static void registerPlugin(const Plugin &plugin, bool needs_lock=true)
Register an integrator in the factory.
void print(const char *fmt,...) const
C-style formatted printing during evaluation.
bool verbose_
Verbose printout.
void clear_mem()
Clear all memory (called from destructor)
static int qpoases_init(void *mem, int dim, int nnz, const int *row, const int *col)
qpOASES linear solver initialization
static int qpoases_solve(void *mem, int nrhs, double *rhs)
qpOASES linear solver solve
static int qpoases_sfact(void *mem, const double *vals)
qpOASES linear solver symbolical factorization
static int qpoases_nfact(void *mem, const double *vals, int *neig, int *rank)
qpOASES linear solver numerical factorization
Helper class for Serialization.
void version(const std::string &name, int v)
void pack(const Sparsity &e)
Serializes an object to the output stream.
static MatType repmat(const MatType &x, casadi_int n, casadi_int m=1)
static Sparsity diag(casadi_int nrow)
Create diagonal sparsity pattern *.
Definition: sparsity.hpp:190
static Sparsity dense(casadi_int nrow, casadi_int ncol=1)
Create a dense rectangular sparsity pattern *.
Definition: sparsity.cpp:1012
casadi_int nnz() const
Get the number of (structural) non-zeros.
Definition: sparsity.cpp:148
casadi_int size2() const
Get the number of columns.
Definition: sparsity.cpp:128
const casadi_int * row() const
Get a reference to row-vector,.
Definition: sparsity.cpp:164
const casadi_int * colind() const
Get a reference to the colindex of all column element (see class description)
Definition: sparsity.cpp:168
Function nlpsol(const std::string &name, const std::string &solver, const SXDict &nlp, const Dict &opts)
Definition: nlpsol.cpp:118
The casadi namespace.
Definition: archiver.cpp:28
std::map< std::string, MX > MXDict
Definition: mx.hpp:1009
T1 casadi_max_viol(casadi_int n, const T1 *x, const T1 *lb, const T1 *ub)
Largest bound violation.
T1 casadi_norm_1(casadi_int n, const T1 *x)
NORM_1: ||x||_1 -> return.
void copy_vector(const std::vector< S > &s, std::vector< D > &d)
void casadi_copy(const T1 *x, casadi_int n, T1 *y)
COPY: y <-x.
void casadi_fill(T1 *x, casadi_int n, T1 alpha)
FILL: x <- alpha.
T1 casadi_norm_2(casadi_int n, const T1 *x)
NORM_2: ||x||_2 -> return.
std::string str(const T &v)
String representation, any type.
GenericType::Dict Dict
C++ equivalent of Python's dict or MATLAB's struct.
const double inf
infinity
Definition: calculus.hpp:50
T1 casadi_dot(casadi_int n, const T1 *x, const T1 *y)
Inner product.
T dot(const std::vector< T > &a, const std::vector< T > &b)
void casadi_scal(casadi_int n, T1 alpha, T1 *x)
SCAL: x <- alpha*x.
void CASADI_NLPSOL_BLOCKSQP_EXPORT casadi_load_nlpsol_blocksqp()
Definition: blocksqp.cpp:45
int CASADI_NLPSOL_BLOCKSQP_EXPORT casadi_register_nlpsol_blocksqp(Nlpsol::Plugin *plugin)
Definition: blocksqp.cpp:34
T * get_ptr(std::vector< T > &v)
Get a pointer to the data contained in the vector.
void casadi_axpy(casadi_int n, T1 alpha, const T1 *x, T1 *y)
AXPY: y <- a*x + y.
T1 casadi_norm_inf(casadi_int n, const T1 *x)
void casadi_mv(const T1 *x, const casadi_int *sp_x, const T1 *y, T1 *z, casadi_int tr)
Sparse matrix-vector multiplication: z <- z + x*y.
std::map< std::string, DM > DMDict
Definition: dm_fwd.hpp:36
@ SOLVER_RET_NAN
@ SOLVER_RET_LIMITED
qpOASES::Matrix * A
Definition: blocksqp.hpp:59
qpOASES::SQProblem * qp
Definition: blocksqp.hpp:60
casadi_int rejectedSR1
Definition: blocksqp.hpp:75
casadi_int reducedStepCount
Definition: blocksqp.hpp:133
QpoasesMemory * qpoases_mem
Definition: blocksqp.hpp:63
casadi_int qpIterations
Definition: blocksqp.hpp:67
casadi_int qpResolve
Definition: blocksqp.hpp:70
std::vector< int > colind
Definition: blocksqp.hpp:138
casadi_int hessSkipped
Definition: blocksqp.hpp:76
BlocksqpMemory()
Constructor.
Definition: blocksqp.cpp:2826
casadi_int nDerCalls
Definition: blocksqp.hpp:72
casadi_int nRestHeurCalls
Definition: blocksqp.hpp:73
std::vector< int > row
Definition: blocksqp.hpp:138
casadi_int nRestPhaseCalls
Definition: blocksqp.hpp:74
casadi_int nTotalSkippedUpdates
Definition: blocksqp.hpp:79
casadi_int nFunCalls
Definition: blocksqp.hpp:71
casadi_int nTotalUpdates
Definition: blocksqp.hpp:78
casadi_int * noUpdateCounter
Definition: blocksqp.hpp:125
casadi_int qpIterations2
Definition: blocksqp.hpp:68
std::set< std::pair< double, double > > filter
Definition: blocksqp.hpp:136
qpOASES::SymSparseMat * H
Definition: blocksqp.hpp:58
~BlocksqpMemory()
Destructor.
Definition: blocksqp.cpp:2833
casadi_int qpItTotal
Definition: blocksqp.hpp:69
casadi_int hessDamped
Definition: blocksqp.hpp:77
UnifiedReturnStatus unified_return_status
Definition: nlpsol_impl.hpp:48
casadi_nlpsol_data< double > d_nlp
Definition: nlpsol_impl.hpp:42
Options metadata for a class.
Definition: options.hpp:40