fmu_function.cpp
1 /*
2  * This file is part of CasADi.
3  *
4  * CasADi -- A symbolic framework for dynamic optimization.
5  * Copyright (C) 2010-2023 Joel Andersson, Joris Gillis, Moritz Diehl,
6  * KU Leuven. All rights reserved.
7  * Copyright (C) 2011-2014 Greg Horn
8  *
9  * CasADi is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU Lesser General Public
11  * License as published by the Free Software Foundation; either
12  * version 3 of the License, or (at your option) any later version.
13  *
14  * CasADi is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17  * Lesser General Public License for more details.
18  *
19  * You should have received a copy of the GNU Lesser General Public
20  * License along with CasADi; if not, write to the Free Software
21  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22  *
23  */
24 
25 
26 #include "fmu_function.hpp"
27 #include "casadi_misc.hpp"
28 #include "serializing_stream.hpp"
29 #include "dae_builder_internal.hpp"
30 #include "filesystem_impl.hpp"
31 
32 #include <fstream>
33 #include <iostream>
34 #include <sstream>
35 #include <algorithm>
36 
37 #ifdef WITH_OPENMP
38 #include <omp.h>
39 #endif // WITH_OPENMP
40 
41 #ifdef CASADI_WITH_THREAD
42 #ifdef CASADI_WITH_THREAD_MINGW
43 #include <mingw.thread.h>
44 #else // CASADI_WITH_THREAD_MINGW
45 #include <thread>
46 #endif // CASADI_WITH_THREAD_MINGW
47 #endif // CASADI_WITH_THREAD
48 
49 namespace casadi {
50 
51 void FmuFunction::check_mem_count(casadi_int n) const {
53  casadi_error("FMU '" + fmu_.instance_name() + "' [" + fmu_.class_name() + "] "
54  "declares 'canBeInstantiatedOnlyOncePerProcess' to be true. "
55  "Regenerate your FMU with this option set to false.");
56  }
57 }
58 
59 int FmuFunction::init_mem(void* mem) const {
60  casadi_assert(mem != 0, "Memory is null");
61  // Instantiate base classes
62  if (FunctionInternal::init_mem(mem)) return 1;
63  // Number of memory instances needed
64  casadi_int n_mem = std::max(static_cast<casadi_int>(1),
65  std::max(max_jac_tasks_, max_hess_tasks_));
66  // Initialize master and all slaves
67  FmuMemory* m = static_cast<FmuMemory*>(mem);
68  for (casadi_int i = 0; i < n_mem; ++i) {
69  // Initialize the memory object itself or a slave
70  FmuMemory* m1 = i == 0 ? m : m->slaves.at(i - 1);
71  if (fmu_.init_mem(m1)) return 1;
72  }
73  return 0;
74 }
75 
76 void* FmuFunction::alloc_mem() const {
77  // Create (master) memory object
78  FmuMemory* m = new FmuMemory(*this);
79  // Attach additional (slave) memory objects
80  for (casadi_int i = 1; i < max_jac_tasks_; ++i) {
81  m->slaves.push_back(new FmuMemory(*this));
82  }
83  return m;
84 }
85 
86 void FmuFunction::free_mem(void *mem) const {
87  // Consistency check
88  casadi_assert(mem != nullptr, "Memory is null");
89  FmuMemory* m = static_cast<FmuMemory*>(mem);
90  // Free slave memory
91  for (FmuMemory*& s : m->slaves) {
92  if (!s) continue;
93  // Free FMU memory
94  if (s->instance) {
96  s->instance = nullptr;
97  }
98  // Free the slave
99  delete s;
100  }
101  // Free FMI memory
102  if (m->instance) {
104  m->instance = nullptr;
105  }
106  // Free the memory object
107  delete m;
108 }
109 
110 FmuFunction::FmuFunction(const std::string& name, const Fmu& fmu,
111  const std::vector<std::string>& name_in,
112  const std::vector<std::string>& name_out)
113  : FunctionInternal(name), fmu_(fmu) {
114  // Parse input IDs
115  in_.resize(name_in.size());
116  for (size_t k = 0; k < name_in.size(); ++k) {
117  try {
118  in_[k] = InputStruct::parse(name_in[k], &fmu);
119  } catch (std::exception& e) {
120  casadi_error("Cannot process input " + name_in[k] + ": " + std::string(e.what()));
121  }
122  }
123  // Parse output IDs
124  out_.resize(name_out.size());
125  for (size_t k = 0; k < name_out.size(); ++k) {
126  try {
127  out_[k] = OutputStruct::parse(name_out[k], &fmu);
128  } catch (std::exception& e) {
129  casadi_error("Cannot process output " + name_out[k] + ": " + std::string(e.what()));
130  }
131  }
132  // Which inputs and outputs exist
133  has_fwd_ = has_adj_ = has_jac_ = has_hess_ = false;
134  for (auto&& i : out_) {
135  switch (i.type) {
136  case OutputType::JAC:
138  has_jac_ = true;
139  break;
140  case OutputType::FWD:
141  has_fwd_ = true;
142  break;
143  case OutputType::ADJ:
144  has_adj_ = true;
145  break;
146  case OutputType::HESS:
147  has_adj_ = true;
148  has_hess_ = true;
149  default:
150  break;
151  }
152  }
153  // Set input/output names
154  name_in_ = name_in;
155  name_out_ = name_out;
156  // Default options
159  validate_forward_ = false;
160  validate_hessian_ = false;
161  validate_ad_file_ = "";
162  make_symmetric_ = true;
163  nfwd_ = has_fwd_ ? 1 : 0;
164  nadj_ = has_adj_ ? 1 : 0;
165  // Use FD for second and higher order derivatives
167  step_ = 1e-6;
168  abstol_ = 1e-3;
169  reltol_ = 1e-3;
170  print_progress_ = false;
171  new_jacobian_ = true;
172  new_forward_ = true;
173  new_hessian_ = true;
174  hessian_coloring_ = true;
176  // Number of parallel tasks, by default
177  max_n_tasks_ = 1;
179 }
180 
181 void FmuFunction::change_option(const std::string& option_name,
182  const GenericType& option_value) {
183  if (option_name == "print_progress") {
184  print_progress_ = option_value;
185  } else {
186  // Option not found - continue to base classes
187  FunctionInternal::change_option(option_name, option_value);
188  }
189 }
190 
192  // Free memory
193  clear_mem();
194 }
195 
198  {{"scheme_in",
200  "Names of the inputs in the scheme"}},
201  {"scheme_out",
203  "Names of the outputs in the scheme"}},
204  {"scheme",
205  {OT_DICT,
206  "Definitions of the scheme variables"}},
207  {"aux",
209  "Auxilliary variables"}},
210  {"enable_ad",
211  {OT_BOOL,
212  "[DEPRECATED] Renamed uses_directional_derivatives"}},
213  {"nfwd",
214  {OT_INT,
215  "Number of forward sensitivities to be calculated [1]"}},
216  {"nadj",
217  {OT_INT,
218  "Number of adjoint sensitivities to be calculated [1]"}},
219  {"uses_directional_derivatives",
220  {OT_BOOL,
221  "Use the analytic forward directional derivative support in the FMU"}},
222  {"validate_forward",
223  {OT_BOOL,
224  "Compare forward derivatives with finite differences for validation"}},
225  {"validate_hessian",
226  {OT_BOOL,
227  "Validate entries of the Hessian for self-consistency"}},
228  {"validate_ad",
229  {OT_BOOL,
230  "[DEPRECATED] Renamed 'validate_forward'"}},
231  {"validate_ad_file",
232  {OT_STRING,
233  "Redirect results of Hessian validation to a file instead of generating a warning"}},
234  {"check_hessian",
235  {OT_BOOL,
236  "[DEPRECATED] Renamed 'validate_hessian'"}},
237  {"make_symmetric",
238  {OT_BOOL,
239  "Ensure Hessian is symmetric"}},
240  {"step",
241  {OT_DOUBLE,
242  "Step size, scaled by nominal value"}},
243  {"abstol",
244  {OT_DOUBLE,
245  "Absolute error tolerance, scaled by nominal value"}},
246  {"reltol",
247  {OT_DOUBLE,
248  "Relative error tolerance"}},
249  {"parallelization",
250  {OT_STRING,
251  "Parallelization [SERIAL|openmp|thread]"}},
252  {"print_progress",
253  {OT_BOOL,
254  "Print progress during Jacobian/Hessian evaluation"}},
255  {"new_forward",
256  {OT_BOOL,
257  "Use forward AD implementation in class (conversion option, to be removed)"}},
258  {"new_jacobian",
259  {OT_BOOL,
260  "Use Jacobian implementation in class (conversion option, to be removed)"}},
261  {"new_hessian",
262  {OT_BOOL,
263  "Use Hessian implementation in class (conversion option, to be removed)"}},
264  {"hessian_coloring",
265  {OT_BOOL,
266  "Enable the use of graph coloring (star coloring) for Hessian calculation. "
267  "Note that disabling the coloring can improve symmetry check diagnostics."}}
268  }
269 };
270 
271 void FmuFunction::init(const Dict& opts) {
272  // Read options
273  for (auto&& op : opts) {
274  if (op.first=="enable_ad") {
275  casadi_warning("Option 'enable_ad' has been renamed 'uses_directional_derivatives'");
276  uses_directional_derivatives_ = op.second;
277  } else if (op.first=="uses_directional_derivatives") {
278  uses_directional_derivatives_ = op.second;
279  } else if (op.first=="nfwd") {
280  nfwd_ = op.second;
281  } else if (op.first=="nadj") {
282  nadj_ = op.second;
283  } else if (op.first=="uses_adjoint_derivatives") {
284  uses_adjoint_derivatives_ = op.second;
285  } else if (op.first=="validate_forward") {
286  validate_forward_ = op.second;
287  } else if (op.first=="validate_hessian") {
288  validate_hessian_ = op.second;
289  } else if (op.first=="validate_ad") {
290  casadi_warning("Option 'validate_ad' has been renamed 'validate_forward'");
291  validate_forward_ = op.second;
292  } else if (op.first=="check_hessian") {
293  casadi_warning("Option 'check_hessian' has been renamed 'validate_hessian'");
294  validate_hessian_ = op.second;
295  } else if (op.first=="validate_ad_file") {
296  validate_ad_file_ = op.second.to_string();
297  } else if (op.first=="make_symmetric") {
298  make_symmetric_ = op.second;
299  } else if (op.first=="step") {
300  step_ = op.second;
301  } else if (op.first=="abstol") {
302  abstol_ = op.second;
303  } else if (op.first=="reltol") {
304  reltol_ = op.second;
305  } else if (op.first=="parallelization") {
306  parallelization_ = to_enum<Parallelization>(op.second, "serial");
307  } else if (op.first=="print_progress") {
308  print_progress_ = op.second;
309  } else if (op.first=="new_forward") {
310  new_forward_ = op.second;
311  } else if (op.first=="new_jacobian") {
312  new_jacobian_ = op.second;
313  } else if (op.first=="new_hessian") {
314  new_hessian_ = op.second;
315  } else if (op.first=="hessian_coloring") {
316  hessian_coloring_ = op.second;
317  }
318  }
319 
320  // Call the initialization method of the base class
322 
323  // Read FD mode
324  fd_ = to_enum<FdMode>(fd_method_, "forward");
325 
326  // Consistency checks
328  "FMU does not provide support for analytic derivatives");
329  if (validate_forward_ && !uses_directional_derivatives_) casadi_error("Inconsistent options");
331  "FMU does not provide support for adjoint derivatives");
332 
333  // New AD validation file, if any
334  if (!validate_ad_file_.empty()) {
335  std::ofstream valfile;
337  valfile << "Output Input Value Nominal Min Max AD FD Step Offset Stencil" << std::endl;
338  }
339 
340  // Quick return if no Jacobian calculation
341  if (!has_jac_ && !has_adj_ && !has_hess_) return;
342 
343  // Parallelization
344  switch (parallelization_) {
346  if (verbose_) casadi_message("Serial evaluation");
347  break;
348 #ifdef WITH_OPENMP
350  max_n_tasks_ = omp_get_max_threads();
351  if (verbose_) casadi_message("OpenMP using at most " + str(max_n_tasks_) + " threads");
352  break;
353 #endif // WITH_OPENMP
354 #ifdef CASADI_WITH_THREAD
356  max_n_tasks_ = std::thread::hardware_concurrency();
357  if (verbose_) casadi_message("std::thread using at most " + str(max_n_tasks_) + " threads");
358  break;
359 #endif // CASADI_WITH_THREAD
360  default:
361  casadi_warning("Parallelization " + to_string(parallelization_)
362  + " not enabled during compilation. Falling back to serial evaluation");
364  break;
365  }
366 
367  // Collect all inputs in any Jacobian, Hessian or adjoint block
368  std::vector<size_t> in_jac(fmu_.n_in(), 0);
369  jac_in_.clear();
370  jac_nom_in_.clear();
371  for (auto&& i : out_) {
372  if (i.type == OutputType::JAC || i.type == OutputType::JAC_TRANS
373  || i.type == OutputType::ADJ || i.type == OutputType::HESS) {
374  // Get input indices
375  const std::vector<size_t>& iind = fmu_.ired(i.wrt);
376  // Skip if no entries
377  if (iind.empty()) continue;
378  // Consistency check
379  bool exists = in_jac[iind.front()] > 0;
380  for (size_t j : iind) casadi_assert((in_jac[j] > 0) == exists, "Jacobian not a block");
381  // Add selection
382  if (!exists) {
383  for (size_t j : iind) {
384  jac_in_.push_back(j);
385  jac_nom_in_.push_back(fmu_.nominal_in(j));
386  in_jac[j] = jac_in_.size();
387  }
388  }
389  // Add column interval
390  i.cbegin = in_jac[iind.front()] - 1;
391  i.cend = i.cbegin + iind.size();
392  // Also rows for Hessian blocks
393  if (i.type == OutputType::HESS) {
394  // Get input indices
395  const std::vector<size_t>& iind = fmu_.ired(i.ind);
396  // Skip if no entries
397  if (iind.empty()) continue;
398  // Consistency check
399  bool exists = in_jac[iind.front()] > 0;
400  for (size_t j : iind) casadi_assert((in_jac[j] > 0) == exists, "Hessian not a block");
401  // Add selection
402  if (!exists) {
403  for (size_t j : iind) {
404  jac_in_.push_back(j);
405  jac_nom_in_.push_back(fmu_.nominal_in(j));
406  in_jac[j] = jac_in_.size();
407  }
408  }
409  // Add column interval
410  i.rbegin = in_jac[iind.front()] - 1;
411  i.rend = i.rbegin + iind.size();
412  }
413  }
414  }
415 
416  // Transpose of Jacobian sparsity
417  sp_trans_map_.resize(out_.size(), -1);
418  sp_trans_.clear();
419 
420  // Collect all outputs in any Jacobian or adjoint block
421  in_jac.resize(fmu_.n_out());
422  std::fill(in_jac.begin(), in_jac.end(), 0);
423  jac_out_.clear();
424  for (size_t k = 0; k < out_.size(); ++k) {
425  OutputStruct& i = out_[k];
426  if (i.type == OutputType::JAC || i.type == OutputType::JAC_TRANS) {
427  // Get output indices
428  const std::vector<size_t>& oind = fmu_.ored(i.ind);
429  // Skip if no entries
430  if (oind.empty()) continue;
431  // Consistency check
432  bool exists = in_jac[oind.front()] > 0;
433  for (size_t j : oind) casadi_assert((in_jac[j] > 0) == exists, "Jacobian not a block");
434  // Add selection
435  if (!exists) {
436  for (size_t j : oind) {
437  jac_out_.push_back(j);
438  in_jac[j] = jac_out_.size();
439  }
440  }
441  // Add row interval
442  i.rbegin = in_jac[oind.front()] - 1;
443  i.rend = i.rbegin + oind.size();
444  // Additional memory for transpose
445  if (i.type == OutputType::JAC_TRANS) {
446  // Retrieve the sparsity pattern
447  const Sparsity& sp = sparsity_out(k);
448  // Save transpose of sparsity pattern
449  sp_trans_map_.at(k) = sp_trans_.size();
450  sp_trans_.push_back(sp.T());
451  // Work vectors for casadi_trans
452  alloc_w(sp.nnz());
453  alloc_iw(sp.size2());
454  }
455  }
456  }
457  // NOTE(@jaeandersson): Make conditional !need_jac && uses_adjoint_derivatives_?
458  for (auto&& i : in_) {
459  if (i.type == InputType::ADJ) {
460  // Get output indices
461  const std::vector<size_t>& oind = fmu_.ored(i.ind);
462  // Skip if no entries
463  if (oind.empty()) continue;
464  // Consistency check
465  bool exists = in_jac[oind.front()] > 0;
466  for (size_t j : oind) casadi_assert((in_jac[j] > 0) == exists, "Jacobian not a block");
467  // Add selection
468  if (!exists) {
469  for (size_t j : oind) {
470  jac_out_.push_back(j);
471  in_jac[j] = jac_out_.size();
472  }
473  }
474  }
475  }
476 
477  // Get sparsity pattern for extended Jacobian
479 
480  // Calculate graph coloring
482  if (verbose_) casadi_message("Jacobian graph coloring: " + str(jac_sp_.size2())
483  + " -> " + str(jac_colors_.size2()) + " directions");
484 
485  // Setup Jacobian memory
486  casadi_jac_setup(&p_, jac_sp_, jac_colors_);
490 
491  // Do not use more threads than there are colors in the Jacobian
493 
494  // Work vector for storing extended Jacobian, shared between threads
495  if (has_jac_) {
496  alloc_w(jac_sp_.nnz(), true); // jac_nz
497  }
498 
499  // Work vectors for adjoint derivative calculation, shared between threads
500  if (has_adj_) {
501  alloc_w(nadj_ * fmu_.n_out(), true); // aseed
502  alloc_w(nadj_ * fmu_.n_in(), true); // asens
503  }
504 
505  // If Hessian calculation is needed
506  if (has_hess_) {
507  // Get sparsity pattern for extended Hessian
509  casadi_assert(hess_sp_.size1() == jac_in_.size(), "Inconsistent Hessian dimensions");
510  casadi_assert(hess_sp_.size2() == jac_in_.size(), "Inconsistent Hessian dimensions");
511  const casadi_int *hess_row = hess_sp_.row();
512  casadi_int hess_nnz = hess_sp_.nnz();
513 
514  // Get nonlinearly entering variables
515  std::vector<bool> is_nonlin(jac_in_.size(), false);
516  for (casadi_int k = 0; k < hess_nnz; ++k) is_nonlin[hess_row[k]] = true;
517  nonlin_.clear();
518  for (casadi_int c = 0; c < jac_in_.size(); ++c) {
519  if (is_nonlin[c]) nonlin_.push_back(c);
520  }
521  // Calculate graph coloring
522  if (hessian_coloring_) {
523  // Star coloring
525  if (verbose_) casadi_message("Hessian graph coloring: " + str(nonlin_.size())
526  + " -> " + str(hess_colors_.size2()) + " directions");
527  // Indices of non-nonlinearly entering variables
528  std::vector<casadi_int> zind;
529  for (casadi_int c = 0; c < jac_in_.size(); ++c) {
530  if (!is_nonlin[c]) zind.push_back(c);
531  }
532  // Zero out corresponding rows
534  } else {
535  // One color for each nonlinear variable
536  hess_colors_ = Sparsity(jac_in_.size(), nonlin_.size(), range(nonlin_.size() + 1), nonlin_);
537  if (verbose_) casadi_message("Hessian calculation for " + str(nonlin_.size()) + " variables");
538  }
539 
540  // Number of threads to be used for Hessian calculation
542 
543  // Work vector for storing extended Hessian, shared between threads
544  alloc_w(hess_sp_.nnz(), true); // hess_nz
545 
546  // Work vector for perturbed adjoint sensitivities
547  alloc_w(max_hess_tasks_ * fmu_.n_in(), true); // pert_asens
548 
549  // Work vector for avoiding conflicting assignments for star coloring
550  alloc_iw(max_hess_tasks_ * fmu_.n_in(), true); // star_iw
551 
552  // Work vector for making symmetric or checking symmetry
554  }
555 
556  // Total number of threads used for Jacobian/adjoint calculation
557  // Note: Adjoint calculation also used for Hessian
559  if (verbose_) casadi_message("Jacobian calculation with " + str(max_n_tasks_) + " threads");
560 
561  // Work vectors for Jacobian/adjoint/Hessian calculation, for each thread
562  casadi_int jac_iw, jac_w;
563  casadi_jac_work(&p_, &jac_iw, &jac_w);
564  alloc_iw(max_n_tasks_ * jac_iw, true);
565  alloc_w(max_n_tasks_ * jac_w, true);
566 }
567 
569  std::vector<std::string>* scheme_in,
570  std::vector<std::string>* scheme_out,
571  const std::vector<std::string>& name_in,
572  const std::vector<std::string>& name_out) {
573  // Clear returns
574  if (scheme_in) scheme_in->clear();
575  if (scheme_out) scheme_out->clear();
576  // Parse FmuFunction inputs
577  for (const std::string& n : name_in) {
578  try {
579  (void)InputStruct::parse(n, 0, scheme_in, scheme_out);
580  } catch (std::exception& e) {
581  casadi_error("Cannot process input " + n + ": " + std::string(e.what()));
582  }
583  }
584  // Parse FmuFunction outputs
585  for (const std::string& n : name_out) {
586  try {
587  (void)OutputStruct::parse(n, 0, scheme_in, scheme_out);
588  } catch (std::exception& e) {
589  casadi_error("Cannot process output " + n + ": " + std::string(e.what()));
590  }
591  }
592  // Remove duplicates in scheme_in, also sorts alphabetically
593  if (scheme_in) {
594  std::set<std::string> s(scheme_in->begin(), scheme_in->end());
595  scheme_in->assign(s.begin(), s.end());
596  }
597  // Remove duplicates in scheme_out, also sorts alphabetically
598  if (scheme_out) {
599  std::set<std::string> s(scheme_out->begin(), scheme_out->end());
600  scheme_out->assign(s.begin(), s.end());
601  }
602 }
603 
604 InputStruct InputStruct::parse(const std::string& n, const Fmu* fmu,
605  std::vector<std::string>* name_in, std::vector<std::string>* name_out) {
606  // Return value
607  InputStruct s;
608  // Look for a prefix
609  if (has_prefix(n)) {
610  // Get the prefix
611  std::string pref, rem;
612  pref = pop_prefix(n, &rem);
613  if (pref == "out") {
614  if (has_prefix(rem)) {
615  // Second order function output (unused): Get the prefix
616  pref = pop_prefix(rem, &rem);
617  if (pref == "adj") {
619  s.ind = fmu ? fmu->index_in(rem) : -1;
620  if (name_in) name_in->push_back(rem);
621  } else {
622  casadi_error("Cannot process: " + n);
623  }
624  } else {
625  // Nondifferentiated function output (unused)
626  s.type = InputType::OUT;
627  s.ind = fmu ? fmu->index_out(rem) : -1;
628  if (name_out) name_out->push_back(rem);
629  }
630  } else if (pref == "fwd") {
631  // Forward seed
632  s.type = InputType::FWD;
633  s.ind = fmu ? fmu->index_in(rem) : 0;
634  if (name_in) name_in->push_back(rem);
635  } else if (pref == "adj") {
636  // Adjoint seed
637  s.type = InputType::ADJ;
638  s.ind = fmu ? fmu->index_out(rem) : 0;
639  if (name_out) name_out->push_back(rem);
640  } else {
641  // No such prefix
642  casadi_error("No such prefix: " + pref);
643  }
644  } else {
645  // No prefix - regular input
646  s.type = InputType::REG;
647  s.ind = fmu ? fmu->index_in(n) : 0;
648  if (name_in) name_in->push_back(n);
649  }
650  // Return input struct
651  return s;
652 }
653 
654 OutputStruct OutputStruct::parse(const std::string& n, const Fmu* fmu,
655  std::vector<std::string>* name_in, std::vector<std::string>* name_out) {
656  // Return value
657  OutputStruct s;
658  // Look for prefix
659  if (has_prefix(n)) {
660  // Get the prefix
661  std::string pref, rem;
662  pref = pop_prefix(n, &rem);
663  if (pref == "jac") {
664  // Jacobian block
665  casadi_assert(has_prefix(rem), "Two arguments expected for Jacobian block");
666  pref = pop_prefix(rem, &rem);
667  if (pref == "adj") {
668  // Jacobian of adjoint sensitivity
669  casadi_assert(has_prefix(rem), "Two arguments expected for Jacobian block");
670  pref = pop_prefix(rem, &rem);
671  if (has_prefix(rem)) {
672  // Jacobian with respect to a sensitivity seed
673  std::string sens = pref;
674  pref = pop_prefix(rem, &rem);
675  if (pref == "adj") {
676  // Jacobian of adjoint sensitivity w.r.t. adjoint seed -> Transpose of Jacobian
678  s.ind = fmu ? fmu->index_out(rem) : -1;
679  if (name_out) name_out->push_back(rem);
680  s.wrt = fmu ? fmu->index_in(sens) : -1;
681  if (name_in) name_in->push_back(sens);
682  } else if (pref == "out") {
683  // Jacobian w.r.t. to dummy output
685  s.ind = fmu ? fmu->index_in(sens) : -1;
686  if (name_in) name_in->push_back(sens);
687  s.wrt = fmu ? fmu->index_out(rem) : -1;
688  if (name_in) name_out->push_back(rem);
689  } else {
690  casadi_error("No such prefix: " + pref);
691  }
692  } else {
693  // Hessian output
695  s.ind = fmu ? fmu->index_in(pref) : -1;
696  if (name_in) name_in->push_back(pref);
697  s.wrt = fmu ? fmu->index_in(rem) : -1;
698  if (name_in) name_in->push_back(rem);
699  }
700  } else {
701  if (has_prefix(rem)) {
702  std::string out = pref;
703  pref = pop_prefix(rem, &rem);
704  if (pref == "adj") {
705  // Jacobian of regular output w.r.t. adjoint sensitivity seed
707  s.ind = fmu ? fmu->index_out(out) : -1;
708  if (name_out) name_out->push_back(out);
709  s.wrt = fmu ? fmu->index_out(rem) : -1;
710  if (name_out) name_out->push_back(rem);
711  } else {
712  casadi_error("No such prefix: " + pref);
713  }
714  } else {
715  // Regular Jacobian
716  s.type = OutputType::JAC;
717  s.ind = fmu ? fmu->index_out(pref) : -1;
718  if (name_out) name_out->push_back(pref);
719  s.wrt = fmu ? fmu->index_in(rem) : -1;
720  if (name_in) name_in->push_back(rem);
721  }
722  }
723  } else if (pref == "fwd") {
724  // Forward sensitivity
725  s.type = OutputType::FWD;
726  s.ind = fmu ? fmu->index_out(rem) : -1;
727  if (name_out) name_out->push_back(rem);
728  } else if (pref == "adj") {
729  // Adjoint sensitivity
730  s.type = OutputType::ADJ;
731  s.wrt = fmu ? fmu->index_in(rem) : -1;
732  if (name_in) name_in->push_back(rem);
733  } else {
734  // No such prefix
735  casadi_error("No such prefix: " + pref);
736  }
737  } else {
738  // No prefix - regular output
739  s.type = OutputType::REG;
740  s.ind = fmu ? fmu->index_out(n) : -1;
741  if (name_out) name_out->push_back(n);
742  }
743  // Return output struct
744  return s;
745 }
746 
748  switch (in_.at(i).type) {
749  case InputType::REG:
750  return Sparsity::dense(fmu_.ired(in_.at(i).ind).size(), 1);
751  case InputType::FWD:
752  return Sparsity::dense(fmu_.ired(in_.at(i).ind).size(), nfwd_);
753  case InputType::ADJ:
754  return Sparsity::dense(fmu_.ored(in_.at(i).ind).size(), nadj_);
755  case InputType::OUT:
756  return Sparsity(fmu_.ored(in_.at(i).ind).size(), 1);
757  case InputType::ADJ_OUT:
758  return Sparsity(fmu_.ired(in_.at(i).ind).size(), 1);
759  }
760  return Sparsity();
761 }
762 
764  const OutputStruct& s = out_.at(i);
765  switch (out_.at(i).type) {
766  case OutputType::REG:
767  return Sparsity::dense(fmu_.ored(s.ind).size(), 1);
768  case OutputType::FWD:
769  return Sparsity::dense(fmu_.ored(s.ind).size(), nfwd_);
770  case OutputType::ADJ:
771  return Sparsity::dense(fmu_.ired(s.wrt).size(), nadj_);
772  case OutputType::JAC:
773  return fmu_.jac_sparsity(s.ind, s.wrt);
775  return fmu_.jac_sparsity(s.ind, s.wrt).T();
777  return Sparsity(fmu_.ired(s.ind).size(), fmu_.ored(s.wrt).size());
779  return Sparsity(fmu_.ored(s.ind).size(), fmu_.ored(s.wrt).size());
780  case OutputType::HESS:
781  return fmu_.hess_sparsity(s.ind, s.wrt);
782  }
783  return Sparsity();
784 }
785 
786 std::vector<double> FmuFunction::get_nominal_in(casadi_int i) const {
787  switch (in_.at(i).type) {
788  case InputType::REG:
789  return fmu_.all_nominal_in(in_.at(i).ind);
790  case InputType::FWD:
791  break;
792  case InputType::ADJ:
793  break;
794  case InputType::OUT:
795  return fmu_.all_nominal_out(in_.at(i).ind);
796  case InputType::ADJ_OUT:
797  break;
798  }
799  // Default: Base class
801 }
802 
803 std::vector<double> FmuFunction::get_nominal_out(casadi_int i) const {
804  switch (out_.at(i).type) {
805  case OutputType::REG:
806  return fmu_.all_nominal_out(out_.at(i).ind);
807  case OutputType::FWD:
808  break;
809  case OutputType::ADJ:
810  break;
811  case OutputType::JAC:
812  casadi_warning("FmuFunction::get_nominal_out not implemented for OutputType::JAC");
813  break;
815  casadi_warning("FmuFunction::get_nominal_out not implemented for OutputType::JAC_TRANS");
816  break;
818  casadi_warning("FmuFunction::get_nominal_out not implemented for OutputType::JAC_ADJ_OUT");
819  break;
821  casadi_warning("FmuFunction::get_nominal_out not implemented for OutputType::JAC_REG_ADJ");
822  break;
823  case OutputType::HESS:
824  casadi_warning("FmuFunction::get_nominal_out not implemented for OutputType::HESS");
825  break;
826  }
827  // Default: Base class
829 }
830 
831 int FmuFunction::eval(const double** arg, double** res, casadi_int* iw, double* w,
832  void* mem) const {
833  // Get memory struct
834  FmuMemory* m = static_cast<FmuMemory*>(mem);
835  casadi_assert(m != 0, "Memory is null");
836 
837  setup(mem, arg, res, iw, w);
838 
839  // What blocks are there?
840  bool need_jac = false, need_fwd = false, need_adj = false, need_hess = false;
841  for (size_t k = 0; k < out_.size(); ++k) {
842  if (res[k]) {
843  switch (out_[k].type) {
844  case OutputType::JAC:
846  need_jac = true;
847  break;
848  case OutputType::FWD:
849  need_fwd = true;
850  break;
851  case OutputType::ADJ:
852  need_adj = true;
853  break;
854  case OutputType::HESS:
855  need_adj = true;
856  need_hess = true;
857  break;
858  default:
859  break;
860  }
861  }
862  }
863  // Work vectors, shared between threads
864  double *aseed = 0, *asens = 0, *jac_nz = 0, *hess_nz = 0;
865  if (need_jac) {
866  // Jacobian nonzeros, initialize to NaN
867  jac_nz = w; w += jac_sp_.nnz();
868  std::fill(jac_nz, jac_nz + jac_sp_.nnz(), casadi::nan);
869  }
870  if (need_adj) {
871  // Set up vectors
872  aseed = w; w += nadj_ * fmu_.n_out();
873  asens = w; w += nadj_ * fmu_.n_in();
874  // Clear seed/sensitivity vectors
875  std::fill(aseed, aseed + nadj_ * fmu_.n_out(), 0);
876  std::fill(asens, asens + nadj_ * fmu_.n_in(), 0);
877  // Copy adjoint seeds to aseed
878  for (size_t i = 0; i < in_.size(); ++i) {
879  if (arg[i] && in_[i].type == InputType::ADJ) {
880  const std::vector<size_t>& oind = fmu_.ored(in_[i].ind);
881  for (casadi_int d = 0; d < nadj_; ++d) {
882  size_t aseed_off = d * fmu_.n_out();
883  size_t off = d * size1_in(i);
884  for (size_t k = 0; k < oind.size(); ++k) aseed[oind[k] + aseed_off] = arg[i][k + off];
885  }
886  }
887  }
888  }
889  if (need_hess) {
890  // Hessian nonzeros, initialize to NaN
891  hess_nz = w; w += hess_sp_.nnz();
892  std::fill(hess_nz, hess_nz + hess_sp_.nnz(), casadi::nan);
893  }
894  // Setup memory for threads
895  for (casadi_int task = 0; task < max_n_tasks_; ++task) {
896  FmuMemory* s = task == 0 ? m : m->slaves.at(task - 1);
897  // Shared memory
898  s->arg = arg;
899  s->res = res;
900  s->aseed = aseed;
901  s->asens = asens;
902  s->jac_nz = jac_nz;
903  s->hess_nz = hess_nz;
904  // Thread specific memory
905  casadi_jac_init(&p_, &s->d, &iw, &w);
906  if (task < max_hess_tasks_) {
907  // Perturbed adjoint sensitivities
908  s->pert_asens = w;
909  w += fmu_.n_in();
910  // Work vector for avoiding assignment of illegal nonzeros
911  s->star_iw = iw;
912  iw += fmu_.n_in();
913  }
914  }
915  // Evaluate everything except Hessian, possibly in parallel
916  if (verbose_) casadi_message("Evaluating regular outputs, forward sens, extended Jacobian");
917  if (eval_all(m, max_jac_tasks_, true, need_jac, need_fwd, need_adj, false)) return 1;
918  // Evaluate Hessian
919  if (need_hess) {
920  if (verbose_) casadi_message("Evaluating extended Hessian");
921  if (eval_all(m, max_hess_tasks_, false, false, false, false, true)) return 1;
922  // Post-process Hessian
923  remove_nans(hess_nz, iw);
924  if (validate_hessian_) check_hessian(m, hess_nz, iw);
925  if (make_symmetric_) make_symmetric(hess_nz, iw);
926  }
927  // Fetch calculated blocks
928  for (size_t k = 0; k < out_.size(); ++k) {
929  // Get nonzeros, skip if not needed
930  double* r = res[k];
931  if (!r) continue;
932  // Get by type
933  switch (out_[k].type) {
934  case OutputType::JAC:
935  casadi_get_sub(r, jac_sp_, jac_nz,
936  out_[k].rbegin, out_[k].rend, out_[k].cbegin, out_[k].cend);
937  break;
939  casadi_get_sub(w, jac_sp_, jac_nz,
940  out_[k].rbegin, out_[k].rend, out_[k].cbegin, out_[k].cend);
942  break;
943  case OutputType::ADJ:
944  // If adjoint sensitivities have not already been set
945  if (need_jac || !uses_adjoint_derivatives_) {
946  for (casadi_int d = 0; d < nadj_; ++d) {
947  size_t asens_off = d * fmu_.n_in();
948  for (size_t id : fmu_.ired(out_[k].wrt)) *r++ = asens[id + asens_off];
949  }
950  }
951  break;
952  case OutputType::HESS:
953  casadi_get_sub(r, hess_sp_, hess_nz,
954  out_[k].rbegin, out_[k].rend, out_[k].cbegin, out_[k].cend);
955  break;
956  default:
957  break;
958  }
959  }
960  // Successful return
961  return 0;
962 }
963 
964 int FmuFunction::eval_all(FmuMemory* m, casadi_int n_task,
965  bool need_nondiff, bool need_jac, bool need_fwd, bool need_adj, bool need_hess) const {
966  // Return flag
967  int flag = 0;
968  // Evaluate, serially or in parallel
969  if (parallelization_ == Parallelization::SERIAL || n_task == 1
970  || (!need_jac && !need_adj && !need_hess)) {
971  // Evaluate serially
972  flag = eval_task(m, 0, 1, need_nondiff, need_jac, need_fwd, need_adj, need_hess);
974  #ifdef WITH_OPENMP
975  // Parallel region
976  #pragma omp parallel reduction(||:flag)
977  {
978  // Get thread number
979  casadi_int task = omp_get_thread_num();
980  // Get number of threads in region
981  casadi_int num_threads = omp_get_num_threads();
982  // Number of threads that are actually used
983  casadi_int num_used_threads = std::min(num_threads, n_task);
984  // Evaluate in parallel
985  if (task < num_used_threads) {
986  FmuMemory* s = task == 0 ? m : m->slaves.at(task - 1);
987  flag = eval_task(s, task, num_used_threads, need_nondiff && task == 0,
988  need_jac, need_fwd && task < nfwd_, need_adj, need_hess);
989  } else {
990  // Nothing to do for thread
991  flag = 0;
992  }
993  }
994  #else // WITH_OPENMP
995  flag = 1;
996  #endif // WITH_OPENMP
998  #ifdef CASADI_WITH_THREAD
999  // Return value for each thread
1000  std::vector<int> flag_task(n_task);
1001  // Spawn threads
1002  std::vector<std::thread> threads;
1003  for (casadi_int task = 0; task < n_task; ++task) {
1004  threads.emplace_back(
1005  [&, task](int* fl) {
1006  FmuMemory* s = task == 0 ? m : m->slaves.at(task - 1);
1007  *fl = eval_task(s, task, n_task, need_nondiff && task == 0,
1008  need_jac, need_fwd && task < nfwd_, need_adj, need_hess);
1009  }, &flag_task[task]);
1010  }
1011  // Join threads
1012  for (auto&& th : threads) th.join();
1013  // Join return flags
1014  for (int fl : flag_task) flag = flag || fl;
1015  #else // CASADI_WITH_THREAD
1016  flag = 1;
1017  #endif // CASADI_WITH_THREAD
1018  } else {
1019  casadi_error("Unknown parallelization: " + to_string(parallelization_));
1020  }
1021  // Return combined error flag
1022  return flag;
1023 }
1024 
1025 int FmuFunction::eval_task(FmuMemory* m, casadi_int task, casadi_int n_task,
1026  bool need_nondiff, bool need_jac, bool need_fwd, bool need_adj, bool need_hess) const {
1027  // Pass all regular inputs
1028  for (size_t k = 0; k < in_.size(); ++k) {
1029  if (in_[k].type == InputType::REG) {
1030  fmu_.set(m, in_[k].ind, m->arg[k]);
1031  }
1032  }
1033  // Request all regular outputs to be evaluated
1034  for (size_t k = 0; k < out_.size(); ++k) {
1035  if (m->res[k] && out_[k].type == OutputType::REG) {
1036  fmu_.request(m, out_[k].ind);
1037  }
1038  }
1039  // Evaluate
1040  if (fmu_.eval(m)) return 1;
1041  // Get regular outputs (master thread only)
1042  if (need_nondiff) {
1043  for (size_t k = 0; k < out_.size(); ++k) {
1044  if (m->res[k] && out_[k].type == OutputType::REG) {
1045  fmu_.get(m, out_[k].ind, m->res[k]);
1046  }
1047  }
1048  }
1049  // Forward derivatives
1050  if (need_fwd) {
1051  // Selection of forward derivatives to be evaluated for the thread
1052  casadi_int d_begin = (task * nfwd_) / n_task;
1053  casadi_int d_end = ((task + 1) * nfwd_) / n_task;
1054  // Loop over forward derivatives
1055  for (casadi_int d = d_begin; d < d_end; ++d) {
1056  // Print progress
1057  if (print_progress_) print("Forward sensitivities, thread %d/%d: Direction %d/%d\n",
1058  task + 1, n_task, d - d_begin + 1, d_end - d_begin);
1059  // Pass all forward seeds
1060  for (size_t k = 0; k < in_.size(); ++k) {
1061  if (m->arg[k] && in_[k].type == InputType::FWD) {
1062  fmu_.set_fwd(m, in_[k].ind, m->arg[k] + d * size1_in(k));
1063  }
1064  }
1065  // Request forward sensitivities
1066  for (size_t k = 0; k < out_.size(); ++k) {
1067  if (m->res[k] && out_[k].type == OutputType::FWD) {
1068  fmu_.request_fwd(m, out_[k].ind);
1069  }
1070  }
1071  // Calculate derivatives
1072  if (fmu_.eval_fwd(m, false)) return 1;
1073  // Collect forward sensitivities
1074  for (size_t k = 0; k < out_.size(); ++k) {
1075  if (m->res[k] && out_[k].type == OutputType::FWD) {
1076  fmu_.get_fwd(m, out_[k].ind, m->res[k] + d * size1_out(k));
1077  }
1078  }
1079  }
1080  }
1081  // Evalute extended Jacobian and/or adjoint derivatives
1082  if (need_jac || (need_adj && !uses_adjoint_derivatives_)) {
1083  // Selection of colors to be evaluated for the thread
1084  casadi_int c_begin = (task * jac_colors_.size2()) / n_task;
1085  casadi_int c_end = ((task + 1) * jac_colors_.size2()) / n_task;
1086  // Loop over colors
1087  for (casadi_int c = c_begin; c < c_end; ++c) {
1088  // Print progress
1089  if (print_progress_) print("Jacobian calculation, thread %d/%d: Seeding variable %d/%d\n",
1090  task + 1, n_task, c - c_begin + 1, c_end - c_begin);
1091  // Get derivative directions
1092  casadi_jac_pre(&p_, &m->d, c);
1093  // Calculate derivatives
1094  fmu_.set_fwd(m, m->d.nseed, m->d.iseed, m->d.seed);
1095  fmu_.request_fwd(m, m->d.nsens, m->d.isens, m->d.wrt);
1096  if (fmu_.eval_fwd(m, true)) return 1;
1097  fmu_.get_fwd(m, m->d.nsens, m->d.isens, m->d.sens);
1098  // Scale derivatives
1099  casadi_jac_scale(&p_, &m->d);
1100  // Collect Jacobian nonzeros
1101  if (need_jac) {
1102  for (casadi_int i = 0; i < m->d.nsens; ++i) {
1103  m->jac_nz[m->d.nzind[i]] = m->d.sens[i];
1104  }
1105  }
1106  // Propagate adjoint sensitivities
1107  if (need_adj) {
1108  for (casadi_int d = 0; d < nadj_; ++d) {
1109  size_t aseed_off = d * fmu_.n_out();
1110  size_t asens_off = d * fmu_.n_in();
1111  for (casadi_int i = 0; i < m->d.nsens; ++i) {
1112  m->asens[m->d.wrt[i] + asens_off] += m->aseed[m->d.isens[i] + aseed_off]
1113  * m->d.sens[i];
1114  }
1115  }
1116  }
1117  }
1118  } else if (need_adj) { // Adjoint derivatives, without forming the extended Jacobian
1119  // Selection of forward derivatives to be evaluated for the thread
1120  casadi_int d_begin = (task * nadj_) / n_task;
1121  casadi_int d_end = ((task + 1) * nadj_) / n_task;
1122  // Loop over forward derivatives
1123  for (casadi_int d = d_begin; d < d_end; ++d) {
1124  // Print progress
1125  if (print_progress_) print("Adjoint sensitivities, thread %d/%d: Direction %d/%d\n",
1126  task + 1, n_task, d - d_begin + 1, d_end - d_begin);
1127  // Pass all adjoint seeds
1128  for (size_t k = 0; k < in_.size(); ++k) {
1129  if (m->arg[k] && in_[k].type == InputType::ADJ) {
1130  fmu_.set_adj(m, in_[k].ind, m->arg[k] + d * size1_in(k));
1131  }
1132  }
1133  // Request adjoint sensitivities
1134  for (size_t k = 0; k < out_.size(); ++k) {
1135  if (m->res[k] && out_[k].type == OutputType::ADJ) {
1136  fmu_.request_adj(m, out_[k].wrt);
1137  }
1138  }
1139  // Calculate derivatives
1140  if (fmu_.eval_adj(m)) return 1;
1141  // Collect adjoint sensitivities
1142  for (size_t k = 0; k < out_.size(); ++k) {
1143  if (m->res[k] && out_[k].type == OutputType::ADJ) {
1144  fmu_.get_adj(m, out_[k].wrt, m->res[k] + d * size1_out(k));
1145  }
1146  }
1147  }
1148  }
1149  // Evaluate extended Hessian
1150  if (need_hess) {
1151  // Hessian coloring
1152  casadi_int n_hc = hess_colors_.size2();
1153  const casadi_int *hc_colind = hess_colors_.colind(), *hc_row = hess_colors_.row();
1154  // Hessian sparsity
1155  const casadi_int *hess_colind = hess_sp_.colind(), *hess_row = hess_sp_.row();
1156  // Selection of colors to be evaluated for the thread
1157  casadi_int c_begin = (task * n_hc) / n_task;
1158  casadi_int c_end = ((task + 1) * n_hc) / n_task;
1159  // Unperturbed values, step size
1160  std::vector<double> x, h;
1161  // Loop over colors
1162  for (casadi_int c = c_begin; c < c_end; ++c) {
1163  // Print progress
1164  if (print_progress_) print("Hessian calculation, thread %d/%d: Seeding variable %d/%d\n",
1165  task + 1, n_task, c - c_begin + 1, c_end - c_begin);
1166  // Variables being seeded
1167  casadi_int v_begin = hc_colind[c];
1168  casadi_int v_end = hc_colind[c + 1];
1169  casadi_int nv = v_end - v_begin;
1170  // Loop over variables being seeded for color
1171  x.resize(nv);
1172  h.resize(nv);
1173  for (casadi_int v = 0; v < nv; ++v) {
1174  // Corresponding input in Fmu
1175  casadi_int ind1 = hc_row[v_begin + v];
1176  casadi_int id = jac_in_.at(ind1);
1177  // Get unperturbed value
1178  x[v] = m->ibuf_.at(id);
1179  // Step size
1180  h[v] = m->self.step_ * fmu_.nominal_in(id);
1181  // Make sure a a forward step remains in bounds
1182  if (x[v] + h[v] > fmu_.max_in(id)) {
1183  // Ensure a negative step is possible
1184  if (m->ibuf_.at(id) - h[v] < fmu_.min_in(id)) {
1185  std::stringstream ss;
1186  ss << "Cannot perturb " << fmu_.desc_in(m, id) << " at " << x[v]
1187  << " with step size " << m->self.step_;
1188  casadi_warning(ss.str());
1189  return 1;
1190  }
1191  // Take reverse step instead
1192  h[v] = -h[v];
1193  }
1194  // Perturb the input
1195  m->ibuf_.at(id) += h[v];
1196  m->imarked_.at(id) = true;
1197  // Inverse of step size
1198  h[v] = 1. / h[v];
1199  }
1200  // Request all outputs
1201  for (size_t i : jac_out_) {
1202  m->omarked_.at(i) = true;
1203  m->wrt_.at(i) = -1;
1204  }
1205  // Calculate perturbed inputs
1206  if (fmu_.eval(m)) return 1;
1207  // Clear perturbed adjoint sensitivities
1208  std::fill(m->pert_asens, m->pert_asens + fmu_.n_in(), 0);
1209  // Loop over colors of the Jacobian
1210  for (casadi_int c1 = 0; c1 < jac_colors_.size2(); ++c1) {
1211  // Get derivative directions
1212  casadi_jac_pre(&p_, &m->d, c1);
1213  // Calculate derivatives
1214  fmu_.set_fwd(m, m->d.nseed, m->d.iseed, m->d.seed);
1215  fmu_.request_fwd(m, m->d.nsens, m->d.isens, m->d.wrt);
1216  if (fmu_.eval_fwd(m, true)) return 1;
1217  fmu_.get_fwd(m, m->d.nsens, m->d.isens, m->d.sens);
1218  // Scale derivatives
1219  casadi_jac_scale(&p_, &m->d);
1220  // Propagate adjoint sensitivities
1221  for (casadi_int i = 0; i < m->d.nsens; ++i)
1222  m->pert_asens[m->d.wrt[i]] += m->aseed[m->d.isens[i]] * m->d.sens[i];
1223  }
1224  // Count how many times each input is calculated
1225  std::fill(m->star_iw, m->star_iw + fmu_.n_in(), 0);
1226  for (casadi_int v = 0; v < nv; ++v) {
1227  casadi_int ind1 = hc_row[v_begin + v];
1228  for (casadi_int k = hess_colind[ind1]; k < hess_colind[ind1 + 1]; ++k) {
1229  casadi_int id2 = jac_in_.at(hess_row[k]);
1230  m->star_iw[id2]++;
1231  }
1232  }
1233  // Loop over variables being seeded for color
1234  for (casadi_int v = 0; v < nv; ++v) {
1235  // Corresponding input in Fmu
1236  casadi_int ind1 = hc_row[v_begin + v];
1237  casadi_int id = jac_in_.at(ind1);
1238  // Restore input
1239  m->ibuf_.at(id) = x[v];
1240  m->imarked_.at(id) = true;
1241  // Get column in Hessian
1242  for (casadi_int k = hess_colind[ind1]; k < hess_colind[ind1 + 1]; ++k) {
1243  casadi_int id2 = jac_in_.at(hess_row[k]);
1244  // Save
1245  if (m->star_iw[id2] > 1) {
1246  // Input gets perturbed by multiple variables for the same color:
1247  // Replace with NaN to use mirror element instead
1248  m->hess_nz[k] = casadi::nan;
1249  } else {
1250  // Finite difference approximation
1251  m->hess_nz[k] = h[v] * (m->pert_asens[id2] - m->asens[id2]);
1252  }
1253  }
1254  }
1255  }
1256  }
1257  // Successful return
1258  return 0;
1259 }
1260 
1261 void FmuFunction::check_hessian(FmuMemory* m, const double *hess_nz, casadi_int* iw) const {
1262  // Get Hessian sparsity pattern
1263  casadi_int n = hess_sp_.size1();
1264  const casadi_int *colind = hess_sp_.colind(), *row = hess_sp_.row();
1265  // Nonzero counters for transpose
1266  casadi_copy(colind, n, iw);
1267  // Loop over Hessian columns
1268  for (casadi_int c = 0; c < n; ++c) {
1269  // Loop over nonzeros for the column
1270  for (casadi_int k = colind[c]; k < colind[c + 1]; ++k) {
1271  // Get row of Hessian
1272  casadi_int r = row[k];
1273  // Get nonzero of transpose
1274  casadi_int k_tr = iw[r]++;
1275  // Get indices
1276  casadi_int id_c = jac_in_[c], id_r = jac_in_[r];
1277  // Nonzero
1278  double nz = hess_nz[k], nz_tr = hess_nz[k_tr];
1279  // Check if entry is NaN of inf
1280  if (std::isnan(nz) || std::isinf(nz)) {
1281  std::stringstream ss;
1282  ss << "Second derivative w.r.t. " << fmu_.desc_in(m, id_r) << " and "
1283  << fmu_.desc_in(m, id_r) << " is " << nz;
1284  casadi_warning(ss.str());
1285  // Further checks not needed for entry
1286  continue;
1287  }
1288  // Symmetry check (strict upper triangular part only)
1289  if (r < c) {
1290  // Normaliation factor to be used for relative tolerance
1291  double nz_max = std::fmax(std::fabs(nz), std::fabs(nz_tr));
1292  // Check if above absolute and relative tolerance bounds
1293  if (nz_max > abstol_ && std::fabs(nz - nz_tr) > nz_max * reltol_) {
1294  std::stringstream ss;
1295  ss << "Hessian appears nonsymmetric. Got " << nz << " vs. " << nz_tr
1296  << " for second derivative w.r.t. " << fmu_.desc_in(m, id_r) << " and "
1297  << fmu_.desc_in(m, id_c) << ", hess_nz = " << k << "/" << k_tr;
1298  casadi_warning(ss.str());
1299  }
1300  }
1301  }
1302  }
1303 }
1304 
1305 void FmuFunction::remove_nans(double *hess_nz, casadi_int* iw) const {
1306  // Get Hessian sparsity pattern
1307  casadi_int n = hess_sp_.size1();
1308  const casadi_int *colind = hess_sp_.colind(), *row = hess_sp_.row();
1309  // Mark variables that have been perturbed
1310  std::fill(iw, iw + n, 0);
1311  casadi_int hess_colors_nnz = hess_colors_.nnz();
1312  const casadi_int* hess_colors_row = hess_colors_.row();
1313  for (casadi_int k = 0; k < hess_colors_nnz; ++k)
1314  iw[hess_colors_row[k]] = 1;
1315  // Nonzero counters for transpose
1316  casadi_copy(colind, n, iw);
1317  // Loop over Hessian columns
1318  for (casadi_int c = 0; c < n; ++c) {
1319  // Loop over nonzeros for the column
1320  for (casadi_int k = colind[c]; k < colind[c + 1]; ++k) {
1321  // Get row of Hessian
1322  casadi_int r = row[k];
1323  // Get nonzero of transpose
1324  casadi_int k_tr = iw[r]++;
1325  // If NaN, use element from transpose
1326  if (std::isnan(hess_nz[k])) {
1327  hess_nz[k] = hess_nz[k_tr];
1328  }
1329  }
1330  }
1331 }
1332 
1333 void FmuFunction::make_symmetric(double *hess_nz, casadi_int* iw) const {
1334  // Get Hessian sparsity pattern
1335  casadi_int n = hess_sp_.size1();
1336  const casadi_int *colind = hess_sp_.colind(), *row = hess_sp_.row();
1337  // Nonzero counters for transpose
1338  casadi_copy(colind, n, iw);
1339  // Loop over Hessian columns
1340  for (casadi_int c = 0; c < n; ++c) {
1341  // Loop over nonzeros for the column
1342  for (casadi_int k = colind[c]; k < colind[c + 1]; ++k) {
1343  // Get row of Hessian
1344  casadi_int r = row[k];
1345  // Get nonzero of transpose
1346  casadi_int k_tr = iw[r]++;
1347  // Make symmetric
1348  if (r < c) hess_nz[k] = hess_nz[k_tr] = 0.5 * (hess_nz[k] + hess_nz[k_tr]);
1349  }
1350  }
1351 }
1352 
1353 std::string to_string(Parallelization v) {
1354  switch (v) {
1355  case Parallelization::SERIAL: return "serial";
1356  case Parallelization::OPENMP: return "openmp";
1357  case Parallelization::THREAD: return "thread";
1358  default: break;
1359  }
1360  return "";
1361 }
1362 
1363 bool has_prefix(const std::string& s) {
1364  return s.find('_') < s.size();
1365 }
1366 
1367 std::string pop_prefix(const std::string& s, std::string* rem) {
1368  // Get prefix
1369  casadi_assert_dev(!s.empty());
1370  size_t pos = s.find('_');
1371  casadi_assert(pos < s.size(), "Cannot process \"" + s + "\"");
1372  // Get prefix
1373  std::string r = s.substr(0, pos);
1374  // Remainder, if requested (note that rem == &s is possible)
1375  if (rem) *rem = s.substr(pos+1, std::string::npos);
1376  // Return prefix
1377  return r;
1378 }
1379 
1381  // Look for any non-regular input
1382  for (auto&& e : in_) if (e.type != InputType::REG) return false;
1383  // Look for any non-regular output
1384  for (auto&& e : out_) if (e.type != OutputType::REG) return false;
1385  // Only regular inputs and outputs
1386  return true;
1387 }
1388 
1390  // Check inputs
1391  for (auto&& e : in_) {
1392  switch (e.type) {
1393  // Supported for derivative calculations
1394  case InputType::REG:
1395  case InputType::OUT:
1396  break;
1397  // Supported if one derivative
1398  case InputType::FWD:
1399  if (nfwd_ > 1) return false;
1400  break;
1401  case InputType::ADJ:
1402  if (nadj_ > 1) return false;
1403  break;
1404  // Not supported
1405  default:
1406  return false;
1407  }
1408  }
1409  // Check outputs
1410  for (auto&& e : out_) {
1411  // Supported for derivative calculations
1412  switch (e.type) {
1413  case OutputType::REG:
1414  case OutputType::ADJ:
1415  break;
1416  // Not supported
1417  default:
1418  return false;
1419  }
1420  }
1421  // OK if reached this point
1422  return true;
1423 }
1424 
1425 Function FmuFunction::factory(const std::string& name,
1426  const std::vector<std::string>& s_in,
1427  const std::vector<std::string>& s_out,
1428  const Function::AuxOut& aux,
1429  const Dict& opts) const {
1430  // Assume we can call constructor directly
1431  try {
1432  // Hack: Inherit parallelization, verbosity option
1433  Dict opts1 = opts;
1434  opts1["parallelization"] = to_string(parallelization_);
1435  opts1["verbose"] = verbose_;
1436  opts1["print_progress"] = print_progress_;
1437  // Replace ':' with '_' in s_in and s_out
1438  std::vector<std::string> s_in_mod = s_in, s_out_mod = s_out;
1439  for (std::string& s : s_in_mod) std::replace(s.begin(), s.end(), ':', '_');
1440  for (std::string& s : s_out_mod) std::replace(s.begin(), s.end(), ':', '_');
1441  // New instance of the same class, using the same Fmu instance
1442  Function ret;
1443  ret.own(new FmuFunction(name, fmu_, s_in_mod, s_out_mod));
1444  ret->construct(opts1);
1445  return ret;
1446  } catch (std::exception& e) {
1447  casadi_warning("FmuFunction::factory call for constructing " + name + " from " + name_
1448  + " failed:\n" + std::string(e.what()) + "\nFalling back to base class implementation");
1449  }
1450  // Fall back to base class
1451  return FunctionInternal::factory(name, s_in, s_out, aux, opts);
1452 }
1453 
1455  // Calculation of Hessian inside FmuFunction (in development)
1456  if (new_jacobian_ && all_vectors()) return true;
1457  // Only first order
1458  return all_regular();
1459 }
1460 
1461 Function FmuFunction::get_jacobian(const std::string& name, const std::vector<std::string>& inames,
1462  const std::vector<std::string>& onames, const Dict& opts) const {
1463  // Hack: Inherit parallelization, verbosity option
1464  Dict opts1 = opts;
1465  opts1["parallelization"] = to_string(parallelization_);
1466  opts1["verbose"] = verbose_;
1467  opts1["print_progress"] = print_progress_;
1468  // Return new instance of class
1469  Function ret;
1470  ret.own(new FmuFunction(name, fmu_, inames, onames));
1471  ret->construct(opts1);
1472  return ret;
1473 }
1474 
1475 bool FmuFunction::has_forward(casadi_int nfwd) const {
1476  // Only implemented if "new_forward" is enabled
1477  if (!new_forward_) return FunctionInternal::has_forward(nfwd);
1478  // Only first order analytic derivative possible
1479  if (!all_regular()) return false;
1480  // Use analytic forward derivatives
1481  return true;
1482 }
1483 
1484 Function FmuFunction::get_forward(casadi_int nfwd, const std::string& name,
1485  const std::vector<std::string>& inames,
1486  const std::vector<std::string>& onames,
1487  const Dict& opts) const {
1488  // Only implemented if "new_forward" is enabled
1489  if (!new_forward_) return FunctionInternal::get_forward(nfwd, name, inames, onames, opts);
1490  // Pass options
1491  Dict opts1 = opts;
1492  opts1["parallelization"] = to_string(parallelization_);
1493  opts1["verbose"] = verbose_;
1494  opts1["print_progress"] = print_progress_;
1495  opts1["nfwd"] = nfwd;
1496  // Return new instance of class
1497  Function ret;
1498  ret.own(new FmuFunction(name, fmu_, inames, onames));
1499  ret->construct(opts1);
1500  return ret;
1501 }
1502 
1503 bool FmuFunction::has_reverse(casadi_int nadj) const {
1504  // Only first order analytic derivative possible
1505  if (!all_regular()) return false;
1506  // Use analytic adjoint derivatives
1507  return true;
1508 }
1509 
1510 Function FmuFunction::get_reverse(casadi_int nadj, const std::string& name,
1511  const std::vector<std::string>& inames,
1512  const std::vector<std::string>& onames,
1513  const Dict& opts) const {
1514  // Hack: Inherit parallelization option
1515  Dict opts1 = opts;
1516  opts1["parallelization"] = to_string(parallelization_);
1517  opts1["verbose"] = verbose_;
1518  opts1["new_jacobian"] = new_hessian_;
1519  opts1["print_progress"] = print_progress_;
1520  opts1["nadj"] = nadj;
1521  // Return new instance of class
1522  Function ret;
1523  ret.own(new FmuFunction(name, fmu_, inames, onames));
1524  ret->construct(opts1);
1525  return ret;
1526 }
1527 
1528 bool FmuFunction::has_jac_sparsity(casadi_int oind, casadi_int iind) const {
1529  // Available in the FMU meta information
1530  if (out_.at(oind).type == OutputType::REG) {
1531  if (in_.at(iind).type == InputType::REG) {
1532  return true;
1533  } else if (in_.at(iind).type == InputType::ADJ) {
1534  return true;
1535  }
1536  } else if (out_.at(oind).type == OutputType::ADJ) {
1537  if (in_.at(iind).type == InputType::REG) {
1538  return true;
1539  } else if (in_.at(iind).type == InputType::ADJ) {
1540  return true;
1541  }
1542  }
1543  // Not available
1544  return false;
1545 }
1546 
1547 Sparsity FmuFunction::get_jac_sparsity(casadi_int oind, casadi_int iind,
1548  bool symmetric) const {
1549  // Available in the FMU meta information
1550  if (out_.at(oind).type == OutputType::REG) {
1551  if (in_.at(iind).type == InputType::REG) {
1552  return fmu_.jac_sparsity(out_.at(oind).ind, in_.at(iind).ind);
1553  } else if (in_.at(iind).type == InputType::ADJ) {
1554  return Sparsity(nnz_out(oind), nnz_in(iind));
1555  }
1556  } else if (out_.at(oind).type == OutputType::ADJ) {
1557  if (in_.at(iind).type == InputType::REG) {
1558  return fmu_.hess_sparsity(out_.at(oind).wrt, in_.at(iind).ind);
1559  } else if (in_.at(iind).type == InputType::ADJ) {
1560  return fmu_.jac_sparsity(in_.at(iind).ind, out_.at(oind).wrt).T();
1561  }
1562  }
1563  // Not available
1564  casadi_error("Implementation error");
1565  return Sparsity();
1566 }
1567 
1568 Dict FmuFunction::get_stats(void *mem) const {
1569  // Get the stats from the base classes
1570  Dict stats = FunctionInternal::get_stats(mem);
1571  // Get memory object
1572  FmuMemory* m = static_cast<FmuMemory*>(mem);
1573  // Get auxilliary variables from Fmu
1574  fmu_.get_stats(m, &stats, name_in_, get_ptr(in_));
1575  // Return stats
1576  return stats;
1577 }
1578 
1581  s.version("FmuFunction", 3);
1582 
1583  s.pack("FmuFunction::Fmu", fmu_);
1584 
1585  casadi_assert_dev(in_.size()==n_in_);
1586  for (const InputStruct& e : in_) {
1587  s.pack("FmuFunction::in::type", static_cast<int>(e.type));
1588  s.pack("FmuFunction::in::ind", e.ind);
1589  }
1590  casadi_assert_dev(out_.size()==n_out_);
1591  for (const OutputStruct& e : out_) {
1592  s.pack("FmuFunction::out::type", static_cast<int>(e.type));
1593  s.pack("FmuFunction::out::ind", e.ind);
1594  s.pack("FmuFunction::out::wrt", e.wrt);
1595  s.pack("FmuFunction::out::rbegin", e.rbegin);
1596  s.pack("FmuFunction::out::rend", e.rend);
1597  s.pack("FmuFunction::out::cbegin", e.cbegin);
1598  s.pack("FmuFunction::out::cend", e.cend);
1599  }
1600  s.pack("FmuFunction::jac_in", jac_in_);
1601  s.pack("FmuFunction::jac_out", jac_out_);
1602  s.pack("FmuFunction::jac_nom_in", jac_nom_in_);
1603  s.pack("FmuFunction::sp_trans", sp_trans_);
1604  s.pack("FmuFunction::sp_trans_map", sp_trans_map_);
1605 
1606  s.pack("FmuFunction::has_jac", has_jac_);
1607  s.pack("FmuFunction::has_fwd", has_fwd_);
1608  s.pack("FmuFunction::has_adj", has_adj_);
1609  s.pack("FmuFunction::has_hess", has_hess_);
1610 
1611  s.pack("FmuFunction::uses_directional_derivatives", uses_directional_derivatives_);
1612  s.pack("FmuFunction::uses_adjoint_derivatives", uses_adjoint_derivatives_);
1613  s.pack("FmuFunction::validate_forward", validate_forward_);
1614  s.pack("FmuFunction::validate_hessian", validate_hessian_);
1615  s.pack("FmuFunction::make_symmetric", make_symmetric_);
1616  s.pack("FmuFunction::step", step_);
1617  s.pack("FmuFunction::abstol", abstol_);
1618  s.pack("FmuFunction::reltol", reltol_);
1619  s.pack("FmuFunction::print_progress", print_progress_);
1620  s.pack("FmuFunction::new_jacobian", new_jacobian_);
1621  s.pack("FmuFunction::new_forward", new_forward_);
1622  s.pack("FmuFunction::new_hessian", new_hessian_);
1623  s.pack("FmuFunction::hessian_coloring", hessian_coloring_);
1624  s.pack("FmuFunction::validate_ad_file", validate_ad_file_);
1625 
1626  s.pack("FmuFunction::fd", static_cast<int>(fd_));
1627  s.pack("FmuFunction::parallelization", static_cast<int>(parallelization_));
1628  s.pack("FmuFunction::init_stats", init_stats_);
1629 
1630  s.pack("FmuFunction::jac_sp", jac_sp_);
1631  s.pack("FmuFunction::hess_sp", hess_sp_);
1632  s.pack("FmuFunction::jac_colors", jac_colors_);
1633  s.pack("FmuFunction::hess_colors", hess_colors_);
1634  s.pack("FmuFunction::nonlin", nonlin_);
1635 
1636 
1637  s.pack("FmuFunction::max_jac_tasks", max_jac_tasks_);
1638  s.pack("FmuFunction::max_hess_tasks", max_hess_tasks_);
1639  s.pack("FmuFunction::max_n_tasks", max_n_tasks_);
1640 
1641 }
1642 
1644  s.version("FmuFunction", 3);
1645 
1646  s.unpack("FmuFunction::Fmu", fmu_);
1647 
1648  in_.resize(n_in_);
1649  for (InputStruct& e : in_) {
1650  int t = 0;
1651  s.unpack("FmuFunction::in::type", t);
1652  e.type = static_cast<InputType>(t);
1653  s.unpack("FmuFunction::in::ind", e.ind);
1654  }
1655  out_.resize(n_out_);
1656  for (OutputStruct& e : out_) {
1657  int t = 0;
1658  s.unpack("FmuFunction::out::type", t);
1659  e.type = static_cast<OutputType>(t);
1660  s.unpack("FmuFunction::out::ind", e.ind);
1661  s.unpack("FmuFunction::out::wrt", e.wrt);
1662  s.unpack("FmuFunction::out::rbegin", e.rbegin);
1663  s.unpack("FmuFunction::out::rend", e.rend);
1664  s.unpack("FmuFunction::out::cbegin", e.cbegin);
1665  s.unpack("FmuFunction::out::cend", e.cend);
1666  }
1667 
1668  s.unpack("FmuFunction::jac_in", jac_in_);
1669  s.unpack("FmuFunction::jac_out", jac_out_);
1670 
1671  s.unpack("FmuFunction::jac_nom_in", jac_nom_in_);
1672  s.unpack("FmuFunction::sp_trans", sp_trans_);
1673  s.unpack("FmuFunction::sp_trans_map", sp_trans_map_);
1674 
1675  s.unpack("FmuFunction::has_jac", has_jac_);
1676  s.unpack("FmuFunction::has_fwd", has_fwd_);
1677  s.unpack("FmuFunction::has_adj", has_adj_);
1678  s.unpack("FmuFunction::has_hess", has_hess_);
1679 
1680  s.unpack("FmuFunction::uses_directional_derivatives", uses_directional_derivatives_);
1681  s.unpack("FmuFunction::uses_adjoint_derivatives", uses_adjoint_derivatives_);
1682  s.unpack("FmuFunction::validate_forward", validate_forward_);
1683  s.unpack("FmuFunction::validate_hessian", validate_hessian_);
1684  s.unpack("FmuFunction::make_symmetric", make_symmetric_);
1685  s.unpack("FmuFunction::step", step_);
1686  s.unpack("FmuFunction::abstol", abstol_);
1687  s.unpack("FmuFunction::reltol", reltol_);
1688  s.unpack("FmuFunction::print_progress", print_progress_);
1689  s.unpack("FmuFunction::new_jacobian", new_jacobian_);
1690  s.unpack("FmuFunction::new_forward", new_forward_);
1691  s.unpack("FmuFunction::new_hessian", new_hessian_);
1692  s.unpack("FmuFunction::hessian_coloring", hessian_coloring_);
1693  s.unpack("FmuFunction::validate_ad_file", validate_ad_file_);
1694 
1695  int fd = 0;
1696  s.unpack("FmuFunction::fd", fd);
1697  fd_ = static_cast<FdMode>(fd);
1698  int parallelization = 0;
1699  s.unpack("FmuFunction::parallelization", parallelization);
1700  parallelization_ = static_cast<Parallelization>(parallelization);
1701 
1702  s.unpack("FmuFunction::init_stats", init_stats_);
1703 
1704  s.unpack("FmuFunction::jac_sp", jac_sp_);
1705  s.unpack("FmuFunction::hess_sp", hess_sp_);
1706  s.unpack("FmuFunction::jac_colors", jac_colors_);
1707  s.unpack("FmuFunction::hess_colors", hess_colors_);
1708  s.unpack("FmuFunction::nonlin", nonlin_);
1709 
1710  s.unpack("FmuFunction::max_jac_tasks", max_jac_tasks_);
1711  s.unpack("FmuFunction::max_hess_tasks", max_hess_tasks_);
1712  s.unpack("FmuFunction::max_n_tasks", max_n_tasks_);
1713 
1714  if (has_jac_ || has_adj_ || has_hess_) {
1715  // Setup Jacobian memory
1716  casadi_jac_setup(&p_, jac_sp_, jac_colors_);
1719  p_.map_in = get_ptr(jac_in_);
1720  }
1721 
1722 }
1723 
1724 //void pack(SerializingStream&s, );
1725 
1726 
1727 } // namespace casadi
Helper class for Serialization.
void unpack(Sparsity &e)
Reconstruct an object from the input stream.
void version(const std::string &name, int v)
static void open(std::ofstream &, const std::string &path, std::ios_base::openmode mode=std::ios_base::out)
Definition: filesystem.cpp:115
bool has_jacobian() const override
Full Jacobian.
Function get_forward(casadi_int nfwd, const std::string &name, const std::vector< std::string > &inames, const std::vector< std::string > &onames, const Dict &opts) const override
Return function that calculates forward derivatives.
bool all_vectors() const
std::vector< InputStruct > in_
~FmuFunction() override
Destructor.
Function get_jacobian(const std::string &name, const std::vector< std::string > &inames, const std::vector< std::string > &onames, const Dict &opts) const override
Full Jacobian.
std::vector< casadi_int > sp_trans_map_
Function factory(const std::string &name, const std::vector< std::string > &s_in, const std::vector< std::string > &s_out, const Function::AuxOut &aux, const Dict &opts) const override
casadi_jac_prob< double > p_
Function get_reverse(casadi_int nadj, const std::string &name, const std::vector< std::string > &inames, const std::vector< std::string > &onames, const Dict &opts) const override
Reverse mode AD.
int eval(const double **arg, double **res, casadi_int *iw, double *w, void *mem) const override
Evaluate numerically.
void init(const Dict &opts) override
Initialize.
Sparsity get_jac_sparsity(casadi_int oind, casadi_int iind, bool symmetric) const override
Return sparsity of Jacobian of an output respect to an input.
bool has_forward(casadi_int nfwd) const override
Return function that calculates forward derivatives.
std::vector< casadi_int > nonlin_
Dict get_stats(void *mem) const override
Get all statistics.
Parallelization parallelization_
casadi_int nfwd_
Number of sensitivities.
std::string validate_ad_file_
void check_mem_count(casadi_int n) const override
Check for validatity of memory object count.
bool has_reverse(casadi_int nadj) const override
Reverse mode AD.
std::vector< Sparsity > sp_trans_
bool all_regular() const
static void identify_io(std::vector< std::string > *scheme_in, std::vector< std::string > *scheme_out, const std::vector< std::string > &name_in, const std::vector< std::string > &name_out)
bool has_jac_sparsity(casadi_int oind, casadi_int iind) const override
Return sparsity of Jacobian of an output respect to an input.
Sparsity get_sparsity_in(casadi_int i) override
Retreive sparsities.
int eval_task(FmuMemory *m, casadi_int task, casadi_int n_task, bool need_nondiff, bool need_jac, bool need_fwd, bool need_adj, bool need_hess) const
static const Options options_
Options.
Sparsity get_sparsity_out(casadi_int i) override
Retreive sparsities.
std::vector< OutputStruct > out_
std::vector< double > get_nominal_in(casadi_int i) const override
Retreive nominal values.
casadi_int max_jac_tasks_
casadi_int max_hess_tasks_
FmuFunction(const std::string &name, const Fmu &fmu, const std::vector< std::string > &name_in, const std::vector< std::string > &name_out)
Constructor.
std::vector< size_t > jac_in_
void free_mem(void *mem) const override
Free memory block.
int eval_all(FmuMemory *m, casadi_int n_task, bool need_nondiff, bool need_jac, bool need_fwd, bool need_adj, bool need_hess) const
void make_symmetric(double *hess_nz, casadi_int *iw) const
void change_option(const std::string &option_name, const GenericType &option_value) override
Change option after object creation for debugging.
int init_mem(void *mem) const override
Initalize memory block.
void remove_nans(double *hess_nz, casadi_int *iw) const
std::vector< double > get_nominal_out(casadi_int i) const override
Retreive nominal values.
std::vector< size_t > jac_out_
std::vector< double > jac_nom_in_
void * alloc_mem() const override
Create memory block.
void check_hessian(FmuMemory *m, const double *hess_nz, casadi_int *iw) const
void serialize_body(SerializingStream &s) const override
Serialize an object without type information.
size_t index_out(const std::string &n) const
Definition: fmu.cpp:633
size_t index_in(const std::string &n) const
Definition: fmu.cpp:623
Interface to binary FMU.
Definition: fmu.hpp:61
void set(FmuMemory *m, size_t ind, const double *value) const
Definition: fmu.cpp:277
void get_fwd(FmuMemory *m, casadi_int nsens, const casadi_int *id, double *v) const
Definition: fmu.cpp:350
const std::vector< size_t > & ored(size_t ind) const
Definition: fmu.cpp:155
int eval_adj(FmuMemory *m) const
Definition: fmu.cpp:399
void get_stats(FmuMemory *m, Dict *stats, const std::vector< std::string > &name_in, const InputStruct *in) const
Get stats.
Definition: fmu.cpp:423
Sparsity hess_sparsity(const std::vector< size_t > &r, const std::vector< size_t > &c) const
Definition: fmu.cpp:252
bool can_be_instantiated_only_once_per_process() const
Does the FMU declare restrictions on instantiation?
Definition: fmu.cpp:235
std::vector< double > all_nominal_out(size_t ind) const
Definition: fmu.cpp:203
bool provides_adjoint_derivatives() const
Does the FMU provide support for adjoint directional derivatives.
Definition: fmu.cpp:227
Sparsity jac_sparsity(const std::vector< size_t > &osub, const std::vector< size_t > &isub) const
Definition: fmu.cpp:243
int eval_fwd(FmuMemory *m, bool independent_seeds) const
Definition: fmu.cpp:342
double nominal_in(size_t ind) const
Definition: fmu.cpp:163
void get_adj(FmuMemory *m, casadi_int nsens, const casadi_int *id, double *v) const
Definition: fmu.cpp:407
double max_in(size_t ind) const
Definition: fmu.cpp:187
void set_fwd(FmuMemory *m, casadi_int nseed, const casadi_int *id, const double *v) const
Definition: fmu.cpp:309
size_t n_out() const
Get the number of scheme outputs.
Definition: fmu.cpp:123
const std::string & instance_name() const
Name of the FMU.
Definition: fmu.cpp:106
std::vector< double > all_nominal_in(size_t ind) const
Definition: fmu.cpp:195
double min_in(size_t ind) const
Definition: fmu.cpp:179
FmuInternal * get() const
Definition: fmu.cpp:93
void set_adj(FmuMemory *m, casadi_int nseed, const casadi_int *id, const double *v) const
Definition: fmu.cpp:366
int init_mem(FmuMemory *m) const
Initalize memory block.
Definition: fmu.cpp:260
bool provides_directional_derivatives() const
Does the FMU provide support for forward directional derivatives.
Definition: fmu.cpp:219
int eval(FmuMemory *m) const
Definition: fmu.cpp:293
const std::vector< size_t > & ired(size_t ind) const
Definition: fmu.cpp:147
void request_adj(FmuMemory *m, casadi_int nsens, const casadi_int *id, const casadi_int *wrt_id) const
Definition: fmu.cpp:382
size_t n_in() const
Get the number of scheme inputs.
Definition: fmu.cpp:115
void free_instance(void *instance) const
Definition: fmu.cpp:269
void request(FmuMemory *m, size_t ind) const
Definition: fmu.cpp:285
void request_fwd(FmuMemory *m, casadi_int nsens, const casadi_int *id, const casadi_int *wrt_id) const
Definition: fmu.cpp:325
std::string desc_in(FmuMemory *m, size_t id, bool more=true) const
Definition: fmu.cpp:211
Internal class for Function.
casadi_int size1_in(casadi_int ind) const
Input/output dimensions.
void alloc_iw(size_t sz_iw, bool persistent=false)
Ensure required length of iw field.
Dict get_stats(void *mem) const override
Get all statistics.
void init(const Dict &opts) override
Initialize.
virtual bool has_forward(casadi_int nfwd) const
Return function that calculates forward derivatives.
void serialize_body(SerializingStream &s) const override
Serialize an object without type information.
virtual Function factory(const std::string &name, const std::vector< std::string > &s_in, const std::vector< std::string > &s_out, const Function::AuxOut &aux, const Dict &opts) const
virtual std::vector< double > get_nominal_out(casadi_int ind) const
size_t n_in_
Number of inputs and outputs.
casadi_int size1_out(casadi_int ind) const
Input/output dimensions.
casadi_int nnz_in() const
Number of input/output nonzeros.
static const Options options_
Options.
virtual std::vector< double > get_nominal_in(casadi_int ind) const
const Sparsity & sparsity_out(casadi_int ind) const
Input/output sparsity.
void alloc_w(size_t sz_w, bool persistent=false)
Ensure required length of w field.
casadi_int nnz_out() const
Number of input/output nonzeros.
void setup(void *mem, const double **arg, double **res, casadi_int *iw, double *w) const
Set the (persistent and temporary) work vectors.
void change_option(const std::string &option_name, const GenericType &option_value) override
Change option after object creation for debugging.
std::vector< std::string > name_out_
virtual Function get_forward(casadi_int nfwd, const std::string &name, const std::vector< std::string > &inames, const std::vector< std::string > &onames, const Dict &opts) const
Return function that calculates forward derivatives.
std::vector< std::string > name_in_
Input and output scheme.
Function object.
Definition: function.hpp:60
std::map< std::string, std::vector< std::string > > AuxOut
Definition: function.hpp:404
void own(Internal *node)
Generic data type, can hold different types such as bool, casadi_int, std::string etc.
void construct(const Dict &opts)
Construct.
virtual int init_mem(void *mem) const
Initalize memory block.
void print(const char *fmt,...) const
C-style formatted printing during evaluation.
bool verbose_
Verbose printout.
void clear_mem()
Clear all memory (called from destructor)
Helper class for Serialization.
void version(const std::string &name, int v)
void pack(const Sparsity &e)
Serializes an object to the output stream.
std::string class_name() const
Get class name.
General sparsity class.
Definition: sparsity.hpp:106
std::vector< casadi_int > erase(const std::vector< casadi_int > &rr, const std::vector< casadi_int > &cc, bool ind1=false)
Erase rows and/or columns of a matrix.
Definition: sparsity.cpp:339
casadi_int size1() const
Get the number of rows.
Definition: sparsity.cpp:124
Sparsity star_coloring(casadi_int ordering=1, casadi_int cutoff=std::numeric_limits< casadi_int >::max()) const
Perform a star coloring of a symmetric matrix:
Definition: sparsity.cpp:757
static Sparsity dense(casadi_int nrow, casadi_int ncol=1)
Create a dense rectangular sparsity pattern *.
Definition: sparsity.cpp:1012
Sparsity T() const
Transpose the matrix.
Definition: sparsity.cpp:394
casadi_int nnz() const
Get the number of (structural) non-zeros.
Definition: sparsity.cpp:148
casadi_int size2() const
Get the number of columns.
Definition: sparsity.cpp:128
const casadi_int * row() const
Get a reference to row-vector,.
Definition: sparsity.cpp:164
Sparsity uni_coloring(const Sparsity &AT=Sparsity(), casadi_int cutoff=std::numeric_limits< casadi_int >::max()) const
Perform a unidirectional coloring: A greedy distance-2 coloring algorithm.
Definition: sparsity.cpp:749
const casadi_int * colind() const
Get a reference to the colindex of all column element (see class description)
Definition: sparsity.cpp:168
The casadi namespace.
Definition: archiver.cpp:28
std::vector< casadi_int > range(casadi_int start, casadi_int stop, casadi_int step, casadi_int len)
Range function.
FdMode
Variable type.
bool has_prefix(const std::string &s)
void casadi_copy(const T1 *x, casadi_int n, T1 *y)
COPY: y <-x.
@ OT_STRINGVECTOR
std::string str(const T &v)
String representation, any type.
GenericType::Dict Dict
C++ equivalent of Python's dict or MATLAB's struct.
Parallelization
Type of parallelization.
std::string to_string(TypeFmi2 v)
const double nan
Not a number.
Definition: calculus.hpp:53
T * get_ptr(std::vector< T > &v)
Get a pointer to the data contained in the vector.
void casadi_trans(const T1 *x, const casadi_int *sp_x, T1 *y, const casadi_int *sp_y, casadi_int *tmp)
TRANS: y <- trans(x) , w work vector (length >= rows x)
std::string pop_prefix(const std::string &s, std::string *rem)
const FmuFunction & self
std::vector< size_t > wrt_
const double ** arg
std::vector< bool > omarked_
casadi_jac_data< double > d
std::vector< double > ibuf_
std::vector< FmuMemory * > slaves
std::vector< bool > imarked_
casadi_int * star_iw
static InputStruct parse(const std::string &n, const Fmu *fmu, std::vector< std::string > *name_in=0, std::vector< std::string > *name_out=0)
Options metadata for a class.
Definition: options.hpp:40
static OutputStruct parse(const std::string &n, const Fmu *fmu, std::vector< std::string > *name_in=0, std::vector< std::string > *name_out=0)
casadi_int nseed
Definition: casadi_jac.hpp:79
casadi_int * nzind
Definition: casadi_jac.hpp:93
casadi_int * isens
Definition: casadi_jac.hpp:85
casadi_int * iseed
Definition: casadi_jac.hpp:81
casadi_int * wrt
Definition: casadi_jac.hpp:91
casadi_int nsens
Definition: casadi_jac.hpp:79
const size_t * map_in
Definition: casadi_jac.hpp:39
const T1 * nom_in
Definition: casadi_jac.hpp:35
const size_t * map_out
Definition: casadi_jac.hpp:37