fmu_function.cpp
1 /*
2  * This file is part of CasADi.
3  *
4  * CasADi -- A symbolic framework for dynamic optimization.
5  * Copyright (C) 2010-2023 Joel Andersson, Joris Gillis, Moritz Diehl,
6  * KU Leuven. All rights reserved.
7  * Copyright (C) 2011-2014 Greg Horn
8  *
9  * CasADi is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU Lesser General Public
11  * License as published by the Free Software Foundation; either
12  * version 3 of the License, or (at your option) any later version.
13  *
14  * CasADi is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17  * Lesser General Public License for more details.
18  *
19  * You should have received a copy of the GNU Lesser General Public
20  * License along with CasADi; if not, write to the Free Software
21  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22  *
23  */
24 
25 
26 #include "fmu_function.hpp"
27 #include "casadi_misc.hpp"
28 #include "serializing_stream.hpp"
29 #include "dae_builder_internal.hpp"
30 #include "filesystem_impl.hpp"
31 
32 #include <fstream>
33 #include <iostream>
34 #include <sstream>
35 #include <algorithm>
36 
37 #ifdef WITH_OPENMP
38 #include <omp.h>
39 #endif // WITH_OPENMP
40 
41 #ifdef CASADI_WITH_THREAD
42 #ifdef CASADI_WITH_THREAD_MINGW
43 #include <mingw.thread.h>
44 #else // CASADI_WITH_THREAD_MINGW
45 #include <thread>
46 #endif // CASADI_WITH_THREAD_MINGW
47 #endif // CASADI_WITH_THREAD
48 
49 namespace casadi {
50 
51 void FmuFunction::check_mem_count(casadi_int n) const {
53  casadi_error("FMU '" + fmu_.instance_name() + "' [" + fmu_.class_name() + "] "
54  "declares 'canBeInstantiatedOnlyOncePerProcess' to be true. "
55  "Regenerate your FMU with this option set to false.");
56  }
57 }
58 
59 int FmuFunction::init_mem(void* mem) const {
60  casadi_assert(mem != 0, "Memory is null");
61  // Instantiate base classes
62  if (FunctionInternal::init_mem(mem)) return 1;
63  // Number of memory instances needed
64  casadi_int n_mem = std::max(static_cast<casadi_int>(1),
65  std::max(max_jac_tasks_, max_hess_tasks_));
66  // Initialize master and all slaves
67  FmuMemory* m = static_cast<FmuMemory*>(mem);
68  for (casadi_int i = 0; i < n_mem; ++i) {
69  // Initialize the memory object itself or a slave
70  FmuMemory* m1 = i == 0 ? m : m->slaves.at(i - 1);
71  if (fmu_.init_mem(m1)) return 1;
72  }
73  // Make sure we can query stats, even before numerical evaluation
74  m->stats_available = true;
75  return 0;
76 }
77 
78 void* FmuFunction::alloc_mem() const {
79  // Create (master) memory object
80  FmuMemory* m = fmu_.alloc_mem(*this);
81  // Attach additional (slave) memory objects
82  for (casadi_int i = 1; i < max_jac_tasks_; ++i) {
83  m->slaves.push_back(fmu_.alloc_mem(*this));
84  }
85  return m;
86 }
87 
88 void FmuFunction::free_mem(void *mem) const {
89  // Consistency check
90  casadi_assert(mem != nullptr, "Memory is null");
91  FmuMemory* m = static_cast<FmuMemory*>(mem);
92  // Free slave memory
93  for (FmuMemory*& s : m->slaves) {
94  if (!s) continue;
95  // Free FMU memory
96  if (s->instance) {
98  s->instance = nullptr;
99  }
100  // Free the slave
101  fmu_.free_mem(s);
102  }
103  // Free FMI memory
104  if (m->instance) {
106  m->instance = nullptr;
107  }
108  // Free the memory object
109  fmu_.free_mem(m);
110 }
111 
112 FmuFunction::FmuFunction(const std::string& name, const Fmu& fmu,
113  const std::vector<std::string>& name_in,
114  const std::vector<std::string>& name_out)
115  : FunctionInternal(name), fmu_(fmu) {
116  // Parse input IDs
117  in_.resize(name_in.size());
118  for (size_t k = 0; k < name_in.size(); ++k) {
119  try {
120  in_[k] = InputStruct::parse(name_in[k], &fmu);
121  } catch (std::exception& e) {
122  casadi_error("Cannot process input " + name_in[k] + ": " + std::string(e.what()));
123  }
124  }
125  // Parse output IDs
126  out_.resize(name_out.size());
127  for (size_t k = 0; k < name_out.size(); ++k) {
128  try {
129  out_[k] = OutputStruct::parse(name_out[k], &fmu);
130  } catch (std::exception& e) {
131  casadi_error("Cannot process output " + name_out[k] + ": " + std::string(e.what()));
132  }
133  }
134  // Which inputs and outputs exist
135  has_fwd_ = has_adj_ = has_jac_ = has_hess_ = false;
136  for (auto&& i : out_) {
137  switch (i.type) {
138  case OutputType::JAC:
140  has_jac_ = true;
141  break;
142  case OutputType::FWD:
143  has_fwd_ = true;
144  break;
145  case OutputType::ADJ:
146  has_adj_ = true;
147  break;
148  case OutputType::HESS:
149  has_adj_ = true;
150  has_hess_ = true;
151  default:
152  break;
153  }
154  }
155  // Set input/output names
156  name_in_ = name_in;
157  name_out_ = name_out;
158  // Default options
161  validate_forward_ = false;
162  validate_hessian_ = false;
163  validate_ad_file_ = "";
164  make_symmetric_ = true;
165  nfwd_ = has_fwd_ ? 1 : 0;
166  nadj_ = has_adj_ ? 1 : 0;
167  // Use FD for second and higher order derivatives
169  step_ = 1e-6;
170  abstol_ = 1e-3;
171  reltol_ = 1e-3;
172  print_progress_ = false;
173  new_jacobian_ = true;
174  new_forward_ = true;
175  new_hessian_ = true;
176  hessian_coloring_ = true;
178  // Number of parallel tasks, by default
179  max_n_tasks_ = 1;
181 }
182 
183 void FmuFunction::change_option(const std::string& option_name,
184  const GenericType& option_value) {
185  if (option_name == "print_progress") {
186  print_progress_ = option_value;
187  } else {
188  // Option not found - continue to base classes
189  FunctionInternal::change_option(option_name, option_value);
190  }
191 }
192 
194  // Free memory
195  clear_mem();
196 }
197 
200  {{"scheme_in",
202  "Names of the inputs in the scheme"}},
203  {"scheme_out",
205  "Names of the outputs in the scheme"}},
206  {"scheme",
207  {OT_DICT,
208  "Definitions of the scheme variables"}},
209  {"aux",
211  "Auxilliary variables"}},
212  {"enable_ad",
213  {OT_BOOL,
214  "[DEPRECATED] Renamed uses_directional_derivatives"}},
215  {"nfwd",
216  {OT_INT,
217  "Number of forward sensitivities to be calculated [1]"}},
218  {"nadj",
219  {OT_INT,
220  "Number of adjoint sensitivities to be calculated [1]"}},
221  {"uses_directional_derivatives",
222  {OT_BOOL,
223  "Use the analytic forward directional derivative support in the FMU"}},
224  {"validate_forward",
225  {OT_BOOL,
226  "Compare forward derivatives with finite differences for validation"}},
227  {"validate_hessian",
228  {OT_BOOL,
229  "Validate entries of the Hessian for self-consistency"}},
230  {"validate_ad",
231  {OT_BOOL,
232  "[DEPRECATED] Renamed 'validate_forward'"}},
233  {"validate_ad_file",
234  {OT_STRING,
235  "Redirect results of Hessian validation to a file instead of generating a warning"}},
236  {"check_hessian",
237  {OT_BOOL,
238  "[DEPRECATED] Renamed 'validate_hessian'"}},
239  {"make_symmetric",
240  {OT_BOOL,
241  "Ensure Hessian is symmetric"}},
242  {"step",
243  {OT_DOUBLE,
244  "Step size, scaled by nominal value"}},
245  {"abstol",
246  {OT_DOUBLE,
247  "Absolute error tolerance, scaled by nominal value"}},
248  {"reltol",
249  {OT_DOUBLE,
250  "Relative error tolerance"}},
251  {"parallelization",
252  {OT_STRING,
253  "Parallelization [SERIAL|openmp|thread]"}},
254  {"print_progress",
255  {OT_BOOL,
256  "Print progress during Jacobian/Hessian evaluation"}},
257  {"new_forward",
258  {OT_BOOL,
259  "Use forward AD implementation in class (conversion option, to be removed)"}},
260  {"new_jacobian",
261  {OT_BOOL,
262  "Use Jacobian implementation in class (conversion option, to be removed)"}},
263  {"new_hessian",
264  {OT_BOOL,
265  "Use Hessian implementation in class (conversion option, to be removed)"}},
266  {"hessian_coloring",
267  {OT_BOOL,
268  "Enable the use of graph coloring (star coloring) for Hessian calculation. "
269  "Note that disabling the coloring can improve symmetry check diagnostics."}}
270  }
271 };
272 
273 void FmuFunction::init(const Dict& opts) {
274  // Read options
275  for (auto&& op : opts) {
276  if (op.first=="enable_ad") {
277  casadi_warning("Option 'enable_ad' has been renamed 'uses_directional_derivatives'");
278  uses_directional_derivatives_ = op.second;
279  } else if (op.first=="uses_directional_derivatives") {
280  uses_directional_derivatives_ = op.second;
281  } else if (op.first=="nfwd") {
282  nfwd_ = op.second;
283  } else if (op.first=="nadj") {
284  nadj_ = op.second;
285  } else if (op.first=="uses_adjoint_derivatives") {
286  uses_adjoint_derivatives_ = op.second;
287  } else if (op.first=="validate_forward") {
288  validate_forward_ = op.second;
289  } else if (op.first=="validate_hessian") {
290  validate_hessian_ = op.second;
291  } else if (op.first=="validate_ad") {
292  casadi_warning("Option 'validate_ad' has been renamed 'validate_forward'");
293  validate_forward_ = op.second;
294  } else if (op.first=="check_hessian") {
295  casadi_warning("Option 'check_hessian' has been renamed 'validate_hessian'");
296  validate_hessian_ = op.second;
297  } else if (op.first=="validate_ad_file") {
298  validate_ad_file_ = op.second.to_string();
299  } else if (op.first=="make_symmetric") {
300  make_symmetric_ = op.second;
301  } else if (op.first=="step") {
302  step_ = op.second;
303  } else if (op.first=="abstol") {
304  abstol_ = op.second;
305  } else if (op.first=="reltol") {
306  reltol_ = op.second;
307  } else if (op.first=="parallelization") {
308  parallelization_ = to_enum<Parallelization>(op.second, "serial");
309  } else if (op.first=="print_progress") {
310  print_progress_ = op.second;
311  } else if (op.first=="new_forward") {
312  new_forward_ = op.second;
313  } else if (op.first=="new_jacobian") {
314  new_jacobian_ = op.second;
315  } else if (op.first=="new_hessian") {
316  new_hessian_ = op.second;
317  } else if (op.first=="hessian_coloring") {
318  hessian_coloring_ = op.second;
319  }
320  }
321 
322  // Call the initialization method of the base class
324 
325  // Read FD mode
326  fd_ = to_enum<FdMode>(fd_method_, "forward");
327 
328  // Consistency checks
330  "FMU does not provide support for analytic derivatives");
331  if (validate_forward_ && !uses_directional_derivatives_) casadi_error("Inconsistent options");
333  "FMU does not provide support for adjoint derivatives");
334 
335  // New AD validation file, if any
336  if (!validate_ad_file_.empty()) {
337  auto valfile_ptr = Filesystem::ofstream_ptr(validate_ad_file_);
338  std::ostream& valfile = *valfile_ptr;
339  valfile << "Output Input Value Nominal Min Max AD FD Step Offset Stencil" << std::endl;
340  }
341 
342  // Quick return if no Jacobian calculation
343  if (!has_jac_ && !has_adj_ && !has_hess_) return;
344 
345  // Parallelization
346  switch (parallelization_) {
348  if (verbose_) casadi_message("Serial evaluation");
349  break;
350 #ifdef WITH_OPENMP
352  max_n_tasks_ = omp_get_max_threads();
353  if (verbose_) casadi_message("OpenMP using at most " + str(max_n_tasks_) + " threads");
354  break;
355 #endif // WITH_OPENMP
356 #ifdef CASADI_WITH_THREAD
358  max_n_tasks_ = std::thread::hardware_concurrency();
359  if (verbose_) casadi_message("std::thread using at most " + str(max_n_tasks_) + " threads");
360  break;
361 #endif // CASADI_WITH_THREAD
362  default:
363  casadi_warning("Parallelization " + to_string(parallelization_)
364  + " not enabled during compilation. Falling back to serial evaluation");
366  break;
367  }
368 
369  // Collect all inputs in any Jacobian, Hessian or adjoint block
370  std::vector<size_t> in_jac(fmu_.n_in(), 0);
371  jac_in_.clear();
372  jac_nom_in_.clear();
373  for (auto&& i : out_) {
374  if (i.type == OutputType::JAC || i.type == OutputType::JAC_TRANS
375  || i.type == OutputType::ADJ || i.type == OutputType::HESS) {
376  // Get input indices
377  const std::vector<size_t>& iind = fmu_.ired(i.wrt);
378  // Skip if no entries
379  if (iind.empty()) continue;
380  // Consistency check
381  bool exists = in_jac[iind.front()] > 0;
382  for (size_t j : iind) casadi_assert((in_jac[j] > 0) == exists, "Jacobian not a block");
383  // Add selection
384  if (!exists) {
385  for (size_t j : iind) {
386  jac_in_.push_back(j);
387  jac_nom_in_.push_back(fmu_.nominal_in(j));
388  in_jac[j] = jac_in_.size();
389  }
390  }
391  // Add column interval
392  i.cbegin = in_jac[iind.front()] - 1;
393  i.cend = i.cbegin + iind.size();
394  // Also rows for Hessian blocks
395  if (i.type == OutputType::HESS) {
396  // Get input indices
397  const std::vector<size_t>& iind = fmu_.ired(i.ind);
398  // Skip if no entries
399  if (iind.empty()) continue;
400  // Consistency check
401  bool exists = in_jac[iind.front()] > 0;
402  for (size_t j : iind) casadi_assert((in_jac[j] > 0) == exists, "Hessian not a block");
403  // Add selection
404  if (!exists) {
405  for (size_t j : iind) {
406  jac_in_.push_back(j);
407  jac_nom_in_.push_back(fmu_.nominal_in(j));
408  in_jac[j] = jac_in_.size();
409  }
410  }
411  // Add column interval
412  i.rbegin = in_jac[iind.front()] - 1;
413  i.rend = i.rbegin + iind.size();
414  }
415  }
416  }
417 
418  // Transpose of Jacobian sparsity
419  sp_trans_map_.resize(out_.size(), -1);
420  sp_trans_.clear();
421 
422  // Collect all outputs in any Jacobian or adjoint block
423  in_jac.resize(fmu_.n_out());
424  std::fill(in_jac.begin(), in_jac.end(), 0);
425  jac_out_.clear();
426  for (size_t k = 0; k < out_.size(); ++k) {
427  OutputStruct& i = out_[k];
428  if (i.type == OutputType::JAC || i.type == OutputType::JAC_TRANS) {
429  // Get output indices
430  const std::vector<size_t>& oind = fmu_.ored(i.ind);
431  // Skip if no entries
432  if (oind.empty()) continue;
433  // Consistency check
434  bool exists = in_jac[oind.front()] > 0;
435  for (size_t j : oind) casadi_assert((in_jac[j] > 0) == exists, "Jacobian not a block");
436  // Add selection
437  if (!exists) {
438  for (size_t j : oind) {
439  jac_out_.push_back(j);
440  in_jac[j] = jac_out_.size();
441  }
442  }
443  // Add row interval
444  i.rbegin = in_jac[oind.front()] - 1;
445  i.rend = i.rbegin + oind.size();
446  // Additional memory for transpose
447  if (i.type == OutputType::JAC_TRANS) {
448  // Retrieve the sparsity pattern
449  const Sparsity& sp = sparsity_out(k);
450  // Save transpose of sparsity pattern
451  sp_trans_map_.at(k) = sp_trans_.size();
452  sp_trans_.push_back(sp.T());
453  // Work vectors for casadi_trans
454  alloc_w(sp.nnz());
455  alloc_iw(sp.size2());
456  }
457  }
458  }
459  // NOTE(@jaeandersson): Make conditional !need_jac && uses_adjoint_derivatives_?
460  for (auto&& i : in_) {
461  if (i.type == InputType::ADJ) {
462  // Get output indices
463  const std::vector<size_t>& oind = fmu_.ored(i.ind);
464  // Skip if no entries
465  if (oind.empty()) continue;
466  // Consistency check
467  bool exists = in_jac[oind.front()] > 0;
468  for (size_t j : oind) casadi_assert((in_jac[j] > 0) == exists, "Jacobian not a block");
469  // Add selection
470  if (!exists) {
471  for (size_t j : oind) {
472  jac_out_.push_back(j);
473  in_jac[j] = jac_out_.size();
474  }
475  }
476  }
477  }
478 
479  // Get sparsity pattern for extended Jacobian
481 
482  // Calculate graph coloring
484  if (verbose_) casadi_message("Jacobian graph coloring: " + str(jac_sp_.size2())
485  + " -> " + str(jac_colors_.size2()) + " directions");
486 
487  // Setup Jacobian memory
488  casadi_jac_setup(&p_, jac_sp_, jac_colors_);
492 
493  // Do not use more threads than there are colors in the Jacobian
495 
496  // Work vector for storing extended Jacobian, shared between threads
497  if (has_jac_) {
498  alloc_w(jac_sp_.nnz(), true); // jac_nz
499  }
500 
501  // Work vectors for adjoint derivative calculation, shared between threads
502  if (has_adj_) {
503  alloc_w(nadj_ * fmu_.n_out(), true); // aseed
504  alloc_w(nadj_ * fmu_.n_in(), true); // asens
505  }
506 
507  // If Hessian calculation is needed
508  if (has_hess_) {
509  // Get sparsity pattern for extended Hessian
511  casadi_assert(hess_sp_.size1() == jac_in_.size(), "Inconsistent Hessian dimensions");
512  casadi_assert(hess_sp_.size2() == jac_in_.size(), "Inconsistent Hessian dimensions");
513  const casadi_int *hess_row = hess_sp_.row();
514  casadi_int hess_nnz = hess_sp_.nnz();
515 
516  // Get nonlinearly entering variables
517  std::vector<bool> is_nonlin(jac_in_.size(), false);
518  for (casadi_int k = 0; k < hess_nnz; ++k) is_nonlin[hess_row[k]] = true;
519  nonlin_.clear();
520  for (casadi_int c = 0; c < jac_in_.size(); ++c) {
521  if (is_nonlin[c]) nonlin_.push_back(c);
522  }
523  // Calculate graph coloring
524  if (hessian_coloring_) {
525  // Star coloring
527  if (verbose_) casadi_message("Hessian graph coloring: " + str(nonlin_.size())
528  + " -> " + str(hess_colors_.size2()) + " directions");
529  // Indices of non-nonlinearly entering variables
530  std::vector<casadi_int> zind;
531  for (casadi_int c = 0; c < jac_in_.size(); ++c) {
532  if (!is_nonlin[c]) zind.push_back(c);
533  }
534  // Zero out corresponding rows
536  } else {
537  // One color for each nonlinear variable
538  hess_colors_ = Sparsity(jac_in_.size(), nonlin_.size(), range(nonlin_.size() + 1), nonlin_);
539  if (verbose_) casadi_message("Hessian calculation for " + str(nonlin_.size()) + " variables");
540  }
541 
542  // Number of threads to be used for Hessian calculation
544 
545  // Work vector for storing extended Hessian, shared between threads
546  alloc_w(hess_sp_.nnz(), true); // hess_nz
547 
548  // Work vector for perturbed adjoint sensitivities
549  alloc_w(max_hess_tasks_ * fmu_.n_in(), true); // pert_asens
550 
551  // Work vector for avoiding conflicting assignments for star coloring
552  alloc_iw(max_hess_tasks_ * fmu_.n_in(), true); // star_iw
553 
554  // Work vector for making symmetric or checking symmetry
556  }
557 
558  // Total number of threads used for Jacobian/adjoint calculation
559  // Note: Adjoint calculation also used for Hessian
561  if (verbose_) casadi_message("Jacobian calculation with " + str(max_n_tasks_) + " threads");
562 
563  // Work vectors for Jacobian/adjoint/Hessian calculation, for each thread
564  casadi_int jac_iw, jac_w;
565  casadi_jac_work(&p_, &jac_iw, &jac_w);
566  alloc_iw(max_n_tasks_ * jac_iw, true);
567  alloc_w(max_n_tasks_ * jac_w, true);
568 }
569 
571  std::vector<std::string>* scheme_in,
572  std::vector<std::string>* scheme_out,
573  const std::vector<std::string>& name_in,
574  const std::vector<std::string>& name_out) {
575  // Clear returns
576  if (scheme_in) scheme_in->clear();
577  if (scheme_out) scheme_out->clear();
578  // Parse FmuFunction inputs
579  for (const std::string& n : name_in) {
580  try {
581  (void)InputStruct::parse(n, 0, scheme_in, scheme_out);
582  } catch (std::exception& e) {
583  casadi_error("Cannot process input " + n + ": " + std::string(e.what()));
584  }
585  }
586  // Parse FmuFunction outputs
587  for (const std::string& n : name_out) {
588  try {
589  (void)OutputStruct::parse(n, 0, scheme_in, scheme_out);
590  } catch (std::exception& e) {
591  casadi_error("Cannot process output " + n + ": " + std::string(e.what()));
592  }
593  }
594  // Remove duplicates in scheme_in, also sorts alphabetically
595  if (scheme_in) {
596  std::set<std::string> s(scheme_in->begin(), scheme_in->end());
597  scheme_in->assign(s.begin(), s.end());
598  }
599  // Remove duplicates in scheme_out, also sorts alphabetically
600  if (scheme_out) {
601  std::set<std::string> s(scheme_out->begin(), scheme_out->end());
602  scheme_out->assign(s.begin(), s.end());
603  }
604 }
605 
606 InputStruct InputStruct::parse(const std::string& n, const Fmu* fmu,
607  std::vector<std::string>* name_in, std::vector<std::string>* name_out) {
608  // Return value
609  InputStruct s;
610  // Look for a prefix
611  if (has_prefix(n)) {
612  // Get the prefix
613  std::string pref, rem;
614  pref = pop_prefix(n, &rem);
615  if (pref == "out") {
616  if (has_prefix(rem)) {
617  // Second order function output (unused): Get the prefix
618  pref = pop_prefix(rem, &rem);
619  if (pref == "adj") {
621  s.ind = fmu ? fmu->index_in(rem) : -1;
622  if (name_in) name_in->push_back(rem);
623  } else {
624  casadi_error("Cannot process: " + n);
625  }
626  } else {
627  // Nondifferentiated function output (unused)
628  s.type = InputType::OUT;
629  s.ind = fmu ? fmu->index_out(rem) : -1;
630  if (name_out) name_out->push_back(rem);
631  }
632  } else if (pref == "fwd") {
633  // Forward seed
634  s.type = InputType::FWD;
635  s.ind = fmu ? fmu->index_in(rem) : 0;
636  if (name_in) name_in->push_back(rem);
637  } else if (pref == "adj") {
638  // Adjoint seed
639  s.type = InputType::ADJ;
640  s.ind = fmu ? fmu->index_out(rem) : 0;
641  if (name_out) name_out->push_back(rem);
642  } else {
643  // No such prefix
644  casadi_error("No such prefix: " + pref);
645  }
646  } else {
647  // No prefix - regular input
648  s.type = InputType::REG;
649  s.ind = fmu ? fmu->index_in(n) : 0;
650  if (name_in) name_in->push_back(n);
651  }
652  // Return input struct
653  return s;
654 }
655 
656 OutputStruct OutputStruct::parse(const std::string& n, const Fmu* fmu,
657  std::vector<std::string>* name_in, std::vector<std::string>* name_out) {
658  // Return value
659  OutputStruct s;
660  // Look for prefix
661  if (has_prefix(n)) {
662  // Get the prefix
663  std::string pref, rem;
664  pref = pop_prefix(n, &rem);
665  if (pref == "jac") {
666  // Jacobian block
667  casadi_assert(has_prefix(rem), "Two arguments expected for Jacobian block");
668  pref = pop_prefix(rem, &rem);
669  if (pref == "adj") {
670  // Jacobian of adjoint sensitivity
671  casadi_assert(has_prefix(rem), "Two arguments expected for Jacobian block");
672  pref = pop_prefix(rem, &rem);
673  if (has_prefix(rem)) {
674  // Jacobian with respect to a sensitivity seed
675  std::string sens = pref;
676  pref = pop_prefix(rem, &rem);
677  if (pref == "adj") {
678  // Jacobian of adjoint sensitivity w.r.t. adjoint seed -> Transpose of Jacobian
680  s.ind = fmu ? fmu->index_out(rem) : -1;
681  if (name_out) name_out->push_back(rem);
682  s.wrt = fmu ? fmu->index_in(sens) : -1;
683  if (name_in) name_in->push_back(sens);
684  } else if (pref == "out") {
685  // Jacobian w.r.t. to dummy output
687  s.ind = fmu ? fmu->index_in(sens) : -1;
688  if (name_in) name_in->push_back(sens);
689  s.wrt = fmu ? fmu->index_out(rem) : -1;
690  if (name_in) name_out->push_back(rem);
691  } else {
692  casadi_error("No such prefix: " + pref);
693  }
694  } else {
695  // Hessian output
697  s.ind = fmu ? fmu->index_in(pref) : -1;
698  if (name_in) name_in->push_back(pref);
699  s.wrt = fmu ? fmu->index_in(rem) : -1;
700  if (name_in) name_in->push_back(rem);
701  }
702  } else {
703  if (has_prefix(rem)) {
704  std::string out = pref;
705  pref = pop_prefix(rem, &rem);
706  if (pref == "adj") {
707  // Jacobian of regular output w.r.t. adjoint sensitivity seed
709  s.ind = fmu ? fmu->index_out(out) : -1;
710  if (name_out) name_out->push_back(out);
711  s.wrt = fmu ? fmu->index_out(rem) : -1;
712  if (name_out) name_out->push_back(rem);
713  } else {
714  casadi_error("No such prefix: " + pref);
715  }
716  } else {
717  // Regular Jacobian
718  s.type = OutputType::JAC;
719  s.ind = fmu ? fmu->index_out(pref) : -1;
720  if (name_out) name_out->push_back(pref);
721  s.wrt = fmu ? fmu->index_in(rem) : -1;
722  if (name_in) name_in->push_back(rem);
723  }
724  }
725  } else if (pref == "fwd") {
726  // Forward sensitivity
727  s.type = OutputType::FWD;
728  s.ind = fmu ? fmu->index_out(rem) : -1;
729  if (name_out) name_out->push_back(rem);
730  } else if (pref == "adj") {
731  // Adjoint sensitivity
732  s.type = OutputType::ADJ;
733  s.wrt = fmu ? fmu->index_in(rem) : -1;
734  if (name_in) name_in->push_back(rem);
735  } else {
736  // No such prefix
737  casadi_error("No such prefix: " + pref);
738  }
739  } else {
740  // No prefix - regular output
741  s.type = OutputType::REG;
742  s.ind = fmu ? fmu->index_out(n) : -1;
743  if (name_out) name_out->push_back(n);
744  }
745  // Return output struct
746  return s;
747 }
748 
750  switch (in_.at(i).type) {
751  case InputType::REG:
752  return Sparsity::dense(fmu_.ired(in_.at(i).ind).size(), 1);
753  case InputType::FWD:
754  return Sparsity::dense(fmu_.ired(in_.at(i).ind).size(), nfwd_);
755  case InputType::ADJ:
756  return Sparsity::dense(fmu_.ored(in_.at(i).ind).size(), nadj_);
757  case InputType::OUT:
758  return Sparsity(fmu_.ored(in_.at(i).ind).size(), 1);
759  case InputType::ADJ_OUT:
760  return Sparsity(fmu_.ired(in_.at(i).ind).size(), 1);
761  }
762  return Sparsity();
763 }
764 
766  const OutputStruct& s = out_.at(i);
767  switch (out_.at(i).type) {
768  case OutputType::REG:
769  return Sparsity::dense(fmu_.ored(s.ind).size(), 1);
770  case OutputType::FWD:
771  return Sparsity::dense(fmu_.ored(s.ind).size(), nfwd_);
772  case OutputType::ADJ:
773  return Sparsity::dense(fmu_.ired(s.wrt).size(), nadj_);
774  case OutputType::JAC:
775  return fmu_.jac_sparsity(s.ind, s.wrt);
777  return fmu_.jac_sparsity(s.ind, s.wrt).T();
779  return Sparsity(fmu_.ired(s.ind).size(), fmu_.ored(s.wrt).size());
781  return Sparsity(fmu_.ored(s.ind).size(), fmu_.ored(s.wrt).size());
782  case OutputType::HESS:
783  return fmu_.hess_sparsity(s.ind, s.wrt);
784  }
785  return Sparsity();
786 }
787 
788 std::vector<double> FmuFunction::get_nominal_in(casadi_int i) const {
789  switch (in_.at(i).type) {
790  case InputType::REG:
791  return fmu_.all_nominal_in(in_.at(i).ind);
792  case InputType::FWD:
793  break;
794  case InputType::ADJ:
795  break;
796  case InputType::OUT:
797  return fmu_.all_nominal_out(in_.at(i).ind);
798  case InputType::ADJ_OUT:
799  break;
800  }
801  // Default: Base class
803 }
804 
805 std::vector<double> FmuFunction::get_nominal_out(casadi_int i) const {
806  switch (out_.at(i).type) {
807  case OutputType::REG:
808  return fmu_.all_nominal_out(out_.at(i).ind);
809  case OutputType::FWD:
810  break;
811  case OutputType::ADJ:
812  break;
813  case OutputType::JAC:
814  casadi_warning("FmuFunction::get_nominal_out not implemented for OutputType::JAC");
815  break;
817  casadi_warning("FmuFunction::get_nominal_out not implemented for OutputType::JAC_TRANS");
818  break;
820  casadi_warning("FmuFunction::get_nominal_out not implemented for OutputType::JAC_ADJ_OUT");
821  break;
823  casadi_warning("FmuFunction::get_nominal_out not implemented for OutputType::JAC_REG_ADJ");
824  break;
825  case OutputType::HESS:
826  casadi_warning("FmuFunction::get_nominal_out not implemented for OutputType::HESS");
827  break;
828  }
829  // Default: Base class
831 }
832 
833 int FmuFunction::eval(const double** arg, double** res, casadi_int* iw, double* w,
834  void* mem) const {
835  // Get memory struct
836  FmuMemory* m = static_cast<FmuMemory*>(mem);
837  casadi_assert(m != 0, "Memory is null");
838 
839  setup(mem, arg, res, iw, w);
840 
841  // What blocks are there?
842  bool need_jac = false, need_fwd = false, need_adj = false, need_hess = false;
843  for (size_t k = 0; k < out_.size(); ++k) {
844  if (res[k]) {
845  switch (out_[k].type) {
846  case OutputType::JAC:
848  need_jac = true;
849  break;
850  case OutputType::FWD:
851  need_fwd = true;
852  break;
853  case OutputType::ADJ:
854  need_adj = true;
855  break;
856  case OutputType::HESS:
857  need_adj = true;
858  need_hess = true;
859  break;
860  default:
861  break;
862  }
863  }
864  }
865  // Work vectors, shared between threads
866  double *aseed = 0, *asens = 0, *jac_nz = 0, *hess_nz = 0;
867  if (need_jac) {
868  // Jacobian nonzeros, initialize to NaN
869  jac_nz = w; w += jac_sp_.nnz();
870  std::fill(jac_nz, jac_nz + jac_sp_.nnz(), casadi::nan);
871  }
872  if (need_adj) {
873  // Set up vectors
874  aseed = w; w += nadj_ * fmu_.n_out();
875  asens = w; w += nadj_ * fmu_.n_in();
876  // Clear seed/sensitivity vectors
877  std::fill(aseed, aseed + nadj_ * fmu_.n_out(), 0);
878  std::fill(asens, asens + nadj_ * fmu_.n_in(), 0);
879  // Copy adjoint seeds to aseed
880  for (size_t i = 0; i < in_.size(); ++i) {
881  if (arg[i] && in_[i].type == InputType::ADJ) {
882  const std::vector<size_t>& oind = fmu_.ored(in_[i].ind);
883  for (casadi_int d = 0; d < nadj_; ++d) {
884  size_t aseed_off = d * fmu_.n_out();
885  size_t off = d * size1_in(i);
886  for (size_t k = 0; k < oind.size(); ++k) aseed[oind[k] + aseed_off] = arg[i][k + off];
887  }
888  }
889  }
890  }
891  if (need_hess) {
892  // Hessian nonzeros, initialize to NaN
893  hess_nz = w; w += hess_sp_.nnz();
894  std::fill(hess_nz, hess_nz + hess_sp_.nnz(), casadi::nan);
895  }
896  // Setup memory for threads
897  for (casadi_int task = 0; task < max_n_tasks_; ++task) {
898  FmuMemory* s = task == 0 ? m : m->slaves.at(task - 1);
899  // Shared memory
900  s->arg = arg;
901  s->res = res;
902  s->aseed = aseed;
903  s->asens = asens;
904  s->jac_nz = jac_nz;
905  s->hess_nz = hess_nz;
906  // Thread specific memory
907  casadi_jac_init(&p_, &s->d, &iw, &w);
908  if (task < max_hess_tasks_) {
909  // Perturbed adjoint sensitivities
910  s->pert_asens = w;
911  w += fmu_.n_in();
912  // Work vector for avoiding assignment of illegal nonzeros
913  s->star_iw = iw;
914  iw += fmu_.n_in();
915  }
916  }
917  // Evaluate everything except Hessian, possibly in parallel
918  if (verbose_) casadi_message("Evaluating regular outputs, forward sens, extended Jacobian");
919  if (eval_all(m, max_jac_tasks_, true, need_jac, need_fwd, need_adj, false)) return 1;
920  // Evaluate Hessian
921  if (need_hess) {
922  if (verbose_) casadi_message("Evaluating extended Hessian");
923  if (eval_all(m, max_hess_tasks_, false, false, false, false, true)) return 1;
924  // Post-process Hessian
925  remove_nans(hess_nz, iw);
926  if (validate_hessian_) check_hessian(m, hess_nz, iw);
927  if (make_symmetric_) make_symmetric(hess_nz, iw);
928  }
929  // Fetch calculated blocks
930  for (size_t k = 0; k < out_.size(); ++k) {
931  // Get nonzeros, skip if not needed
932  double* r = res[k];
933  if (!r) continue;
934  // Get by type
935  switch (out_[k].type) {
936  case OutputType::JAC:
937  casadi_get_sub(r, jac_sp_, jac_nz,
938  out_[k].rbegin, out_[k].rend, out_[k].cbegin, out_[k].cend);
939  break;
941  casadi_get_sub(w, jac_sp_, jac_nz,
942  out_[k].rbegin, out_[k].rend, out_[k].cbegin, out_[k].cend);
944  break;
945  case OutputType::ADJ:
946  // If adjoint sensitivities have not already been set
947  if (need_jac || !uses_adjoint_derivatives_) {
948  for (casadi_int d = 0; d < nadj_; ++d) {
949  size_t asens_off = d * fmu_.n_in();
950  for (size_t id : fmu_.ired(out_[k].wrt)) *r++ = asens[id + asens_off];
951  }
952  }
953  break;
954  case OutputType::HESS:
955  casadi_get_sub(r, hess_sp_, hess_nz,
956  out_[k].rbegin, out_[k].rend, out_[k].cbegin, out_[k].cend);
957  break;
958  default:
959  break;
960  }
961  }
962  // Successful return
963  return 0;
964 }
965 
966 int FmuFunction::eval_all(FmuMemory* m, casadi_int n_task,
967  bool need_nondiff, bool need_jac, bool need_fwd, bool need_adj, bool need_hess) const {
968  // Return flag
969  int flag = 0;
970  // Evaluate, serially or in parallel
971  if (parallelization_ == Parallelization::SERIAL || n_task == 1
972  || (!need_jac && !need_adj && !need_hess)) {
973  // Evaluate serially
974  flag = eval_task(m, 0, 1, need_nondiff, need_jac, need_fwd, need_adj, need_hess);
976  #ifdef WITH_OPENMP
977  // Parallel region
978  #pragma omp parallel reduction(||:flag)
979  {
980  // Get thread number
981  casadi_int task = omp_get_thread_num();
982  // Get number of threads in region
983  casadi_int num_threads = omp_get_num_threads();
984  // Number of threads that are actually used
985  casadi_int num_used_threads = std::min(num_threads, n_task);
986  // Evaluate in parallel
987  if (task < num_used_threads) {
988  FmuMemory* s = task == 0 ? m : m->slaves.at(task - 1);
989  flag = eval_task(s, task, num_used_threads, need_nondiff && task == 0,
990  need_jac, need_fwd && task < nfwd_, need_adj, need_hess);
991  } else {
992  // Nothing to do for thread
993  flag = 0;
994  }
995  }
996  #else // WITH_OPENMP
997  flag = 1;
998  #endif // WITH_OPENMP
1000  #ifdef CASADI_WITH_THREAD
1001  // Return value for each thread
1002  std::vector<int> flag_task(n_task);
1003  // Spawn threads
1004  std::vector<std::thread> threads;
1005  for (casadi_int task = 0; task < n_task; ++task) {
1006  threads.emplace_back(
1007  [&, task](int* fl) {
1008  FmuMemory* s = task == 0 ? m : m->slaves.at(task - 1);
1009  *fl = eval_task(s, task, n_task, need_nondiff && task == 0,
1010  need_jac, need_fwd && task < nfwd_, need_adj, need_hess);
1011  }, &flag_task[task]);
1012  }
1013  // Join threads
1014  for (auto&& th : threads) th.join();
1015  // Join return flags
1016  for (int fl : flag_task) flag = flag || fl;
1017  #else // CASADI_WITH_THREAD
1018  flag = 1;
1019  #endif // CASADI_WITH_THREAD
1020  } else {
1021  casadi_error("Unknown parallelization: " + to_string(parallelization_));
1022  }
1023  // Return combined error flag
1024  return flag;
1025 }
1026 
1027 int FmuFunction::eval_task(FmuMemory* m, casadi_int task, casadi_int n_task,
1028  bool need_nondiff, bool need_jac, bool need_fwd, bool need_adj, bool need_hess) const {
1029  // Pass all regular inputs
1030  for (size_t k = 0; k < in_.size(); ++k) {
1031  if (in_[k].type == InputType::REG) {
1032  fmu_.set(m, in_[k].ind, m->arg[k]);
1033  }
1034  }
1035  // Request all regular outputs to be evaluated
1036  for (size_t k = 0; k < out_.size(); ++k) {
1037  if (m->res[k] && out_[k].type == OutputType::REG) {
1038  fmu_.request(m, out_[k].ind);
1039  }
1040  }
1041  // Evaluate
1042  if (fmu_.eval(m)) return 1;
1043  // Get regular outputs (master thread only)
1044  if (need_nondiff) {
1045  for (size_t k = 0; k < out_.size(); ++k) {
1046  if (m->res[k] && out_[k].type == OutputType::REG) {
1047  fmu_.get(m, out_[k].ind, m->res[k]);
1048  }
1049  }
1050  }
1051  // Forward derivatives
1052  if (need_fwd) {
1053  // Selection of forward derivatives to be evaluated for the thread
1054  casadi_int d_begin = (task * nfwd_) / n_task;
1055  casadi_int d_end = ((task + 1) * nfwd_) / n_task;
1056  // Loop over forward derivatives
1057  for (casadi_int d = d_begin; d < d_end; ++d) {
1058  // Print progress
1059  if (print_progress_) print("Forward sensitivities, thread %d/%d: Direction %d/%d\n",
1060  task + 1, n_task, d - d_begin + 1, d_end - d_begin);
1061  // Pass all forward seeds
1062  for (size_t k = 0; k < in_.size(); ++k) {
1063  if (m->arg[k] && in_[k].type == InputType::FWD) {
1064  fmu_.set_fwd(m, in_[k].ind, m->arg[k] + d * size1_in(k));
1065  }
1066  }
1067  // Request forward sensitivities
1068  for (size_t k = 0; k < out_.size(); ++k) {
1069  if (m->res[k] && out_[k].type == OutputType::FWD) {
1070  fmu_.request_fwd(m, out_[k].ind);
1071  }
1072  }
1073  // Calculate derivatives
1074  if (fmu_.eval_fwd(m, false)) return 1;
1075  // Collect forward sensitivities
1076  for (size_t k = 0; k < out_.size(); ++k) {
1077  if (m->res[k] && out_[k].type == OutputType::FWD) {
1078  fmu_.get_fwd(m, out_[k].ind, m->res[k] + d * size1_out(k));
1079  }
1080  }
1081  }
1082  }
1083  // Evalute extended Jacobian and/or adjoint derivatives
1084  if (need_jac || (need_adj && !uses_adjoint_derivatives_)) {
1085  // Selection of colors to be evaluated for the thread
1086  casadi_int c_begin = (task * jac_colors_.size2()) / n_task;
1087  casadi_int c_end = ((task + 1) * jac_colors_.size2()) / n_task;
1088  // Loop over colors
1089  for (casadi_int c = c_begin; c < c_end; ++c) {
1090  // Print progress
1091  if (print_progress_) print("Jacobian calculation, thread %d/%d: Seeding variable %d/%d\n",
1092  task + 1, n_task, c - c_begin + 1, c_end - c_begin);
1093  // Get derivative directions
1094  casadi_jac_pre(&p_, &m->d, c);
1095  // Calculate derivatives
1096  fmu_.set_fwd(m, m->d.nseed, m->d.iseed, m->d.seed);
1097  fmu_.request_fwd(m, m->d.nsens, m->d.isens, m->d.wrt);
1098  if (fmu_.eval_fwd(m, true)) return 1;
1099  fmu_.get_fwd(m, m->d.nsens, m->d.isens, m->d.sens);
1100  // Scale derivatives
1101  casadi_jac_scale(&p_, &m->d);
1102  // Collect Jacobian nonzeros
1103  if (need_jac) {
1104  for (casadi_int i = 0; i < m->d.nsens; ++i) {
1105  m->jac_nz[m->d.nzind[i]] = m->d.sens[i];
1106  }
1107  }
1108  // Propagate adjoint sensitivities
1109  if (need_adj) {
1110  for (casadi_int d = 0; d < nadj_; ++d) {
1111  size_t aseed_off = d * fmu_.n_out();
1112  size_t asens_off = d * fmu_.n_in();
1113  for (casadi_int i = 0; i < m->d.nsens; ++i) {
1114  m->asens[m->d.wrt[i] + asens_off] += m->aseed[m->d.isens[i] + aseed_off]
1115  * m->d.sens[i];
1116  }
1117  }
1118  }
1119  }
1120  } else if (need_adj) { // Adjoint derivatives, without forming the extended Jacobian
1121  // Selection of forward derivatives to be evaluated for the thread
1122  casadi_int d_begin = (task * nadj_) / n_task;
1123  casadi_int d_end = ((task + 1) * nadj_) / n_task;
1124  // Loop over forward derivatives
1125  for (casadi_int d = d_begin; d < d_end; ++d) {
1126  // Print progress
1127  if (print_progress_) print("Adjoint sensitivities, thread %d/%d: Direction %d/%d\n",
1128  task + 1, n_task, d - d_begin + 1, d_end - d_begin);
1129  // Pass all adjoint seeds
1130  for (size_t k = 0; k < in_.size(); ++k) {
1131  if (m->arg[k] && in_[k].type == InputType::ADJ) {
1132  fmu_.set_adj(m, in_[k].ind, m->arg[k] + d * size1_in(k));
1133  }
1134  }
1135  // Request adjoint sensitivities
1136  for (size_t k = 0; k < out_.size(); ++k) {
1137  if (m->res[k] && out_[k].type == OutputType::ADJ) {
1138  fmu_.request_adj(m, out_[k].wrt);
1139  }
1140  }
1141  // Calculate derivatives
1142  if (fmu_.eval_adj(m)) return 1;
1143  // Collect adjoint sensitivities
1144  for (size_t k = 0; k < out_.size(); ++k) {
1145  if (m->res[k] && out_[k].type == OutputType::ADJ) {
1146  fmu_.get_adj(m, out_[k].wrt, m->res[k] + d * size1_out(k));
1147  }
1148  }
1149  }
1150  }
1151  // Evaluate extended Hessian
1152  if (need_hess) {
1153  // Hessian coloring
1154  casadi_int n_hc = hess_colors_.size2();
1155  const casadi_int *hc_colind = hess_colors_.colind(), *hc_row = hess_colors_.row();
1156  // Hessian sparsity
1157  const casadi_int *hess_colind = hess_sp_.colind(), *hess_row = hess_sp_.row();
1158  // Selection of colors to be evaluated for the thread
1159  casadi_int c_begin = (task * n_hc) / n_task;
1160  casadi_int c_end = ((task + 1) * n_hc) / n_task;
1161  // Unperturbed values, step size
1162  std::vector<double> x, h;
1163  // Loop over colors
1164  for (casadi_int c = c_begin; c < c_end; ++c) {
1165  // Print progress
1166  if (print_progress_) print("Hessian calculation, thread %d/%d: Seeding variable %d/%d\n",
1167  task + 1, n_task, c - c_begin + 1, c_end - c_begin);
1168  // Variables being seeded
1169  casadi_int v_begin = hc_colind[c];
1170  casadi_int v_end = hc_colind[c + 1];
1171  casadi_int nv = v_end - v_begin;
1172  // Loop over variables being seeded for color
1173  x.resize(nv);
1174  h.resize(nv);
1175  for (casadi_int v = 0; v < nv; ++v) {
1176  // Corresponding input in Fmu
1177  casadi_int ind1 = hc_row[v_begin + v];
1178  casadi_int id = jac_in_.at(ind1);
1179  // Get unperturbed value
1180  x[v] = m->ibuf_.at(id);
1181  // Step size
1182  h[v] = m->self.step_ * fmu_.nominal_in(id);
1183  // Make sure a a forward step remains in bounds
1184  if (x[v] + h[v] > fmu_.max_in(id)) {
1185  // Ensure a negative step is possible
1186  if (m->ibuf_.at(id) - h[v] < fmu_.min_in(id)) {
1187  std::stringstream ss;
1188  ss << "Cannot perturb " << fmu_.desc_in(m, id) << " at " << x[v]
1189  << " with step size " << m->self.step_;
1190  casadi_warning(ss.str());
1191  return 1;
1192  }
1193  // Take reverse step instead
1194  h[v] = -h[v];
1195  }
1196  // Perturb the input
1197  m->ibuf_.at(id) += h[v];
1198  m->imarked_.at(id) = true;
1199  // Inverse of step size
1200  h[v] = 1. / h[v];
1201  }
1202  // Request all outputs
1203  for (size_t i : jac_out_) {
1204  m->omarked_.at(i) = true;
1205  m->wrt_.at(i) = -1;
1206  }
1207  // Calculate perturbed inputs
1208  if (fmu_.eval(m)) return 1;
1209  // Clear perturbed adjoint sensitivities
1210  std::fill(m->pert_asens, m->pert_asens + fmu_.n_in(), 0);
1211  // Loop over colors of the Jacobian
1212  for (casadi_int c1 = 0; c1 < jac_colors_.size2(); ++c1) {
1213  // Get derivative directions
1214  casadi_jac_pre(&p_, &m->d, c1);
1215  // Calculate derivatives
1216  fmu_.set_fwd(m, m->d.nseed, m->d.iseed, m->d.seed);
1217  fmu_.request_fwd(m, m->d.nsens, m->d.isens, m->d.wrt);
1218  if (fmu_.eval_fwd(m, true)) return 1;
1219  fmu_.get_fwd(m, m->d.nsens, m->d.isens, m->d.sens);
1220  // Scale derivatives
1221  casadi_jac_scale(&p_, &m->d);
1222  // Propagate adjoint sensitivities
1223  for (casadi_int i = 0; i < m->d.nsens; ++i)
1224  m->pert_asens[m->d.wrt[i]] += m->aseed[m->d.isens[i]] * m->d.sens[i];
1225  }
1226  // Count how many times each input is calculated
1227  std::fill(m->star_iw, m->star_iw + fmu_.n_in(), 0);
1228  for (casadi_int v = 0; v < nv; ++v) {
1229  casadi_int ind1 = hc_row[v_begin + v];
1230  for (casadi_int k = hess_colind[ind1]; k < hess_colind[ind1 + 1]; ++k) {
1231  casadi_int id2 = jac_in_.at(hess_row[k]);
1232  m->star_iw[id2]++;
1233  }
1234  }
1235  // Loop over variables being seeded for color
1236  for (casadi_int v = 0; v < nv; ++v) {
1237  // Corresponding input in Fmu
1238  casadi_int ind1 = hc_row[v_begin + v];
1239  casadi_int id = jac_in_.at(ind1);
1240  // Restore input
1241  m->ibuf_.at(id) = x[v];
1242  m->imarked_.at(id) = true;
1243  // Get column in Hessian
1244  for (casadi_int k = hess_colind[ind1]; k < hess_colind[ind1 + 1]; ++k) {
1245  casadi_int id2 = jac_in_.at(hess_row[k]);
1246  // Save
1247  if (m->star_iw[id2] > 1) {
1248  // Input gets perturbed by multiple variables for the same color:
1249  // Replace with NaN to use mirror element instead
1250  m->hess_nz[k] = casadi::nan;
1251  } else {
1252  // Finite difference approximation
1253  m->hess_nz[k] = h[v] * (m->pert_asens[id2] - m->asens[id2]);
1254  }
1255  }
1256  }
1257  }
1258  }
1259  // Successful return
1260  return 0;
1261 }
1262 
1263 void FmuFunction::check_hessian(FmuMemory* m, const double *hess_nz, casadi_int* iw) const {
1264  // Get Hessian sparsity pattern
1265  casadi_int n = hess_sp_.size1();
1266  const casadi_int *colind = hess_sp_.colind(), *row = hess_sp_.row();
1267  // Nonzero counters for transpose
1268  casadi_copy(colind, n, iw);
1269  // Loop over Hessian columns
1270  for (casadi_int c = 0; c < n; ++c) {
1271  // Loop over nonzeros for the column
1272  for (casadi_int k = colind[c]; k < colind[c + 1]; ++k) {
1273  // Get row of Hessian
1274  casadi_int r = row[k];
1275  // Get nonzero of transpose
1276  casadi_int k_tr = iw[r]++;
1277  // Get indices
1278  casadi_int id_c = jac_in_[c], id_r = jac_in_[r];
1279  // Nonzero
1280  double nz = hess_nz[k], nz_tr = hess_nz[k_tr];
1281  // Check if entry is NaN of inf
1282  if (std::isnan(nz) || std::isinf(nz)) {
1283  std::stringstream ss;
1284  ss << "Second derivative w.r.t. " << fmu_.desc_in(m, id_r) << " and "
1285  << fmu_.desc_in(m, id_r) << " is " << nz;
1286  casadi_warning(ss.str());
1287  // Further checks not needed for entry
1288  continue;
1289  }
1290  // Symmetry check (strict upper triangular part only)
1291  if (r < c) {
1292  // Normaliation factor to be used for relative tolerance
1293  double nz_max = std::fmax(std::fabs(nz), std::fabs(nz_tr));
1294  // Check if above absolute and relative tolerance bounds
1295  if (nz_max > abstol_ && std::fabs(nz - nz_tr) > nz_max * reltol_) {
1296  std::stringstream ss;
1297  ss << "Hessian appears nonsymmetric. Got " << nz << " vs. " << nz_tr
1298  << " for second derivative w.r.t. " << fmu_.desc_in(m, id_r) << " and "
1299  << fmu_.desc_in(m, id_c) << ", hess_nz = " << k << "/" << k_tr;
1300  casadi_warning(ss.str());
1301  }
1302  }
1303  }
1304  }
1305 }
1306 
1307 void FmuFunction::remove_nans(double *hess_nz, casadi_int* iw) const {
1308  // Get Hessian sparsity pattern
1309  casadi_int n = hess_sp_.size1();
1310  const casadi_int *colind = hess_sp_.colind(), *row = hess_sp_.row();
1311  // Mark variables that have been perturbed
1312  std::fill(iw, iw + n, 0);
1313  casadi_int hess_colors_nnz = hess_colors_.nnz();
1314  const casadi_int* hess_colors_row = hess_colors_.row();
1315  for (casadi_int k = 0; k < hess_colors_nnz; ++k)
1316  iw[hess_colors_row[k]] = 1;
1317  // Nonzero counters for transpose
1318  casadi_copy(colind, n, iw);
1319  // Loop over Hessian columns
1320  for (casadi_int c = 0; c < n; ++c) {
1321  // Loop over nonzeros for the column
1322  for (casadi_int k = colind[c]; k < colind[c + 1]; ++k) {
1323  // Get row of Hessian
1324  casadi_int r = row[k];
1325  // Get nonzero of transpose
1326  casadi_int k_tr = iw[r]++;
1327  // If NaN, use element from transpose
1328  if (std::isnan(hess_nz[k])) {
1329  hess_nz[k] = hess_nz[k_tr];
1330  }
1331  }
1332  }
1333 }
1334 
1335 void FmuFunction::make_symmetric(double *hess_nz, casadi_int* iw) const {
1336  // Get Hessian sparsity pattern
1337  casadi_int n = hess_sp_.size1();
1338  const casadi_int *colind = hess_sp_.colind(), *row = hess_sp_.row();
1339  // Nonzero counters for transpose
1340  casadi_copy(colind, n, iw);
1341  // Loop over Hessian columns
1342  for (casadi_int c = 0; c < n; ++c) {
1343  // Loop over nonzeros for the column
1344  for (casadi_int k = colind[c]; k < colind[c + 1]; ++k) {
1345  // Get row of Hessian
1346  casadi_int r = row[k];
1347  // Get nonzero of transpose
1348  casadi_int k_tr = iw[r]++;
1349  // Make symmetric
1350  if (r < c) hess_nz[k] = hess_nz[k_tr] = 0.5 * (hess_nz[k] + hess_nz[k_tr]);
1351  }
1352  }
1353 }
1354 
1355 std::string to_string(Parallelization v) {
1356  switch (v) {
1357  case Parallelization::SERIAL: return "serial";
1358  case Parallelization::OPENMP: return "openmp";
1359  case Parallelization::THREAD: return "thread";
1360  default: break;
1361  }
1362  return "";
1363 }
1364 
1365 bool has_prefix(const std::string& s) {
1366  return s.find('_') < s.size();
1367 }
1368 
1369 std::string pop_prefix(const std::string& s, std::string* rem) {
1370  // Get prefix
1371  casadi_assert_dev(!s.empty());
1372  size_t pos = s.find('_');
1373  casadi_assert(pos < s.size(), "Cannot process \"" + s + "\"");
1374  // Get prefix
1375  std::string r = s.substr(0, pos);
1376  // Remainder, if requested (note that rem == &s is possible)
1377  if (rem) *rem = s.substr(pos+1, std::string::npos);
1378  // Return prefix
1379  return r;
1380 }
1381 
1383  // Look for any non-regular input
1384  for (auto&& e : in_) if (e.type != InputType::REG) return false;
1385  // Look for any non-regular output
1386  for (auto&& e : out_) if (e.type != OutputType::REG) return false;
1387  // Only regular inputs and outputs
1388  return true;
1389 }
1390 
1392  // Check inputs
1393  for (auto&& e : in_) {
1394  switch (e.type) {
1395  // Supported for derivative calculations
1396  case InputType::REG:
1397  case InputType::OUT:
1398  break;
1399  // Supported if one derivative
1400  case InputType::FWD:
1401  if (nfwd_ > 1) return false;
1402  break;
1403  case InputType::ADJ:
1404  if (nadj_ > 1) return false;
1405  break;
1406  // Not supported
1407  default:
1408  return false;
1409  }
1410  }
1411  // Check outputs
1412  for (auto&& e : out_) {
1413  // Supported for derivative calculations
1414  switch (e.type) {
1415  case OutputType::REG:
1416  case OutputType::ADJ:
1417  break;
1418  // Not supported
1419  default:
1420  return false;
1421  }
1422  }
1423  // OK if reached this point
1424  return true;
1425 }
1426 
1427 Function FmuFunction::factory(const std::string& name,
1428  const std::vector<std::string>& s_in,
1429  const std::vector<std::string>& s_out,
1430  const Function::AuxOut& aux,
1431  const Dict& opts) const {
1432  // Assume we can call constructor directly
1433  try {
1434  // Hack: Inherit parallelization, verbosity option
1435  Dict opts1 = opts;
1436  opts1["parallelization"] = to_string(parallelization_);
1437  opts1["verbose"] = verbose_;
1438  opts1["print_progress"] = print_progress_;
1439  // Replace ':' with '_' in s_in and s_out
1440  std::vector<std::string> s_in_mod = s_in, s_out_mod = s_out;
1441  for (std::string& s : s_in_mod) std::replace(s.begin(), s.end(), ':', '_');
1442  for (std::string& s : s_out_mod) std::replace(s.begin(), s.end(), ':', '_');
1443  // New instance of the same class, using the same Fmu instance
1444  Function ret;
1445  ret.own(new FmuFunction(name, fmu_, s_in_mod, s_out_mod));
1446  ret->construct(opts1);
1447  return ret;
1448  } catch (std::exception& e) {
1449  casadi_warning("FmuFunction::factory call for constructing " + name + " from " + name_
1450  + " failed:\n" + std::string(e.what()) + "\nFalling back to base class implementation");
1451  }
1452  // Fall back to base class
1453  return FunctionInternal::factory(name, s_in, s_out, aux, opts);
1454 }
1455 
1457  // Calculation of Hessian inside FmuFunction (in development)
1458  if (new_jacobian_ && all_vectors()) return true;
1459  // Only first order
1460  return all_regular();
1461 }
1462 
1463 Function FmuFunction::get_jacobian(const std::string& name, const std::vector<std::string>& inames,
1464  const std::vector<std::string>& onames, const Dict& opts) const {
1465  // Hack: Inherit parallelization, verbosity option
1466  Dict opts1 = opts;
1467  opts1["parallelization"] = to_string(parallelization_);
1468  opts1["verbose"] = verbose_;
1469  opts1["print_progress"] = print_progress_;
1470  // Return new instance of class
1471  Function ret;
1472  ret.own(new FmuFunction(name, fmu_, inames, onames));
1473  ret->construct(opts1);
1474  return ret;
1475 }
1476 
1477 bool FmuFunction::has_forward(casadi_int nfwd) const {
1478  // Only implemented if "new_forward" is enabled
1479  if (!new_forward_) return FunctionInternal::has_forward(nfwd);
1480  // Only first order analytic derivative possible
1481  if (!all_regular()) return false;
1482  // Use analytic forward derivatives
1483  return true;
1484 }
1485 
1486 Function FmuFunction::get_forward(casadi_int nfwd, const std::string& name,
1487  const std::vector<std::string>& inames,
1488  const std::vector<std::string>& onames,
1489  const Dict& opts) const {
1490  // Only implemented if "new_forward" is enabled
1491  if (!new_forward_) return FunctionInternal::get_forward(nfwd, name, inames, onames, opts);
1492  // Pass options
1493  Dict opts1 = opts;
1494  opts1["parallelization"] = to_string(parallelization_);
1495  opts1["verbose"] = verbose_;
1496  opts1["print_progress"] = print_progress_;
1497  opts1["nfwd"] = nfwd;
1498  // Return new instance of class
1499  Function ret;
1500  ret.own(new FmuFunction(name, fmu_, inames, onames));
1501  ret->construct(opts1);
1502  return ret;
1503 }
1504 
1505 bool FmuFunction::has_reverse(casadi_int nadj) const {
1506  // Only first order analytic derivative possible
1507  if (!all_regular()) return false;
1508  // Use analytic adjoint derivatives
1509  return true;
1510 }
1511 
1512 Function FmuFunction::get_reverse(casadi_int nadj, const std::string& name,
1513  const std::vector<std::string>& inames,
1514  const std::vector<std::string>& onames,
1515  const Dict& opts) const {
1516  // Hack: Inherit parallelization option
1517  Dict opts1 = opts;
1518  opts1["parallelization"] = to_string(parallelization_);
1519  opts1["verbose"] = verbose_;
1520  opts1["new_jacobian"] = new_hessian_;
1521  opts1["print_progress"] = print_progress_;
1522  opts1["nadj"] = nadj;
1523  // Return new instance of class
1524  Function ret;
1525  ret.own(new FmuFunction(name, fmu_, inames, onames));
1526  ret->construct(opts1);
1527  return ret;
1528 }
1529 
1530 bool FmuFunction::has_jac_sparsity(casadi_int oind, casadi_int iind) const {
1531  // Available in the FMU meta information
1532  if (out_.at(oind).type == OutputType::REG) {
1533  if (in_.at(iind).type == InputType::REG) {
1534  return true;
1535  } else if (in_.at(iind).type == InputType::ADJ) {
1536  return true;
1537  }
1538  } else if (out_.at(oind).type == OutputType::ADJ) {
1539  if (in_.at(iind).type == InputType::REG) {
1540  return true;
1541  } else if (in_.at(iind).type == InputType::ADJ) {
1542  return true;
1543  }
1544  }
1545  // Not available
1546  return false;
1547 }
1548 
1549 Sparsity FmuFunction::get_jac_sparsity(casadi_int oind, casadi_int iind,
1550  bool symmetric) const {
1551  // Available in the FMU meta information
1552  if (out_.at(oind).type == OutputType::REG) {
1553  if (in_.at(iind).type == InputType::REG) {
1554  return fmu_.jac_sparsity(out_.at(oind).ind, in_.at(iind).ind);
1555  } else if (in_.at(iind).type == InputType::ADJ) {
1556  return Sparsity(nnz_out(oind), nnz_in(iind));
1557  }
1558  } else if (out_.at(oind).type == OutputType::ADJ) {
1559  if (in_.at(iind).type == InputType::REG) {
1560  return fmu_.hess_sparsity(out_.at(oind).wrt, in_.at(iind).ind);
1561  } else if (in_.at(iind).type == InputType::ADJ) {
1562  return fmu_.jac_sparsity(in_.at(iind).ind, out_.at(oind).wrt).T();
1563  }
1564  }
1565  // Not available
1566  casadi_error("Implementation error");
1567  return Sparsity();
1568 }
1569 
1570 Dict FmuFunction::get_stats(void *mem) const {
1571  // Get the stats from the base classes
1572  Dict stats = FunctionInternal::get_stats(mem);
1573  // Get memory object
1574  FmuMemory* m = static_cast<FmuMemory*>(mem);
1575  // Get auxilliary variables from Fmu
1576  fmu_.get_stats(m, &stats, name_in_, get_ptr(in_));
1577  // Return stats
1578  return stats;
1579 }
1580 
1583  s.version("FmuFunction", 4);
1584 
1585  s.pack("FmuFunction::Fmu", fmu_);
1586 
1587  casadi_assert_dev(in_.size()==n_in_);
1588  for (const InputStruct& e : in_) {
1589  s.pack("FmuFunction::in::type", static_cast<int>(e.type));
1590  s.pack("FmuFunction::in::ind", e.ind);
1591  }
1592  casadi_assert_dev(out_.size()==n_out_);
1593  for (const OutputStruct& e : out_) {
1594  s.pack("FmuFunction::out::type", static_cast<int>(e.type));
1595  s.pack("FmuFunction::out::ind", e.ind);
1596  s.pack("FmuFunction::out::wrt", e.wrt);
1597  s.pack("FmuFunction::out::rbegin", e.rbegin);
1598  s.pack("FmuFunction::out::rend", e.rend);
1599  s.pack("FmuFunction::out::cbegin", e.cbegin);
1600  s.pack("FmuFunction::out::cend", e.cend);
1601  }
1602  s.pack("FmuFunction::jac_in", jac_in_);
1603  s.pack("FmuFunction::jac_out", jac_out_);
1604  s.pack("FmuFunction::jac_nom_in", jac_nom_in_);
1605  s.pack("FmuFunction::sp_trans", sp_trans_);
1606  s.pack("FmuFunction::sp_trans_map", sp_trans_map_);
1607 
1608  s.pack("FmuFunction::has_jac", has_jac_);
1609  s.pack("FmuFunction::has_fwd", has_fwd_);
1610  s.pack("FmuFunction::has_adj", has_adj_);
1611  s.pack("FmuFunction::has_hess", has_hess_);
1612 
1613  s.pack("FmuFunction::uses_directional_derivatives", uses_directional_derivatives_);
1614  s.pack("FmuFunction::uses_adjoint_derivatives", uses_adjoint_derivatives_);
1615  s.pack("FmuFunction::nfwd", nfwd_);
1616  s.pack("FmuFunction::nadj", nadj_);
1617  s.pack("FmuFunction::validate_forward", validate_forward_);
1618  s.pack("FmuFunction::validate_hessian", validate_hessian_);
1619  s.pack("FmuFunction::make_symmetric", make_symmetric_);
1620  s.pack("FmuFunction::step", step_);
1621  s.pack("FmuFunction::abstol", abstol_);
1622  s.pack("FmuFunction::reltol", reltol_);
1623  s.pack("FmuFunction::print_progress", print_progress_);
1624  s.pack("FmuFunction::new_jacobian", new_jacobian_);
1625  s.pack("FmuFunction::new_forward", new_forward_);
1626  s.pack("FmuFunction::new_hessian", new_hessian_);
1627  s.pack("FmuFunction::hessian_coloring", hessian_coloring_);
1628  s.pack("FmuFunction::validate_ad_file", validate_ad_file_);
1629 
1630  s.pack("FmuFunction::fd", static_cast<int>(fd_));
1631  s.pack("FmuFunction::parallelization", static_cast<int>(parallelization_));
1632  s.pack("FmuFunction::init_stats", init_stats_);
1633 
1634  s.pack("FmuFunction::jac_sp", jac_sp_);
1635  s.pack("FmuFunction::hess_sp", hess_sp_);
1636  s.pack("FmuFunction::jac_colors", jac_colors_);
1637  s.pack("FmuFunction::hess_colors", hess_colors_);
1638  s.pack("FmuFunction::nonlin", nonlin_);
1639 
1640 
1641  s.pack("FmuFunction::max_jac_tasks", max_jac_tasks_);
1642  s.pack("FmuFunction::max_hess_tasks", max_hess_tasks_);
1643  s.pack("FmuFunction::max_n_tasks", max_n_tasks_);
1644 
1645 }
1646 
1648  s.version("FmuFunction", 3, 4);
1649 
1650  s.unpack("FmuFunction::Fmu", fmu_);
1651 
1652  in_.resize(n_in_);
1653  for (InputStruct& e : in_) {
1654  int t = 0;
1655  s.unpack("FmuFunction::in::type", t);
1656  e.type = static_cast<InputType>(t);
1657  s.unpack("FmuFunction::in::ind", e.ind);
1658  }
1659  out_.resize(n_out_);
1660  for (OutputStruct& e : out_) {
1661  int t = 0;
1662  s.unpack("FmuFunction::out::type", t);
1663  e.type = static_cast<OutputType>(t);
1664  s.unpack("FmuFunction::out::ind", e.ind);
1665  s.unpack("FmuFunction::out::wrt", e.wrt);
1666  s.unpack("FmuFunction::out::rbegin", e.rbegin);
1667  s.unpack("FmuFunction::out::rend", e.rend);
1668  s.unpack("FmuFunction::out::cbegin", e.cbegin);
1669  s.unpack("FmuFunction::out::cend", e.cend);
1670  }
1671 
1672  s.unpack("FmuFunction::jac_in", jac_in_);
1673  s.unpack("FmuFunction::jac_out", jac_out_);
1674 
1675  s.unpack("FmuFunction::jac_nom_in", jac_nom_in_);
1676  s.unpack("FmuFunction::sp_trans", sp_trans_);
1677  s.unpack("FmuFunction::sp_trans_map", sp_trans_map_);
1678 
1679  s.unpack("FmuFunction::has_jac", has_jac_);
1680  s.unpack("FmuFunction::has_fwd", has_fwd_);
1681  s.unpack("FmuFunction::has_adj", has_adj_);
1682  s.unpack("FmuFunction::has_hess", has_hess_);
1683 
1684  s.unpack("FmuFunction::uses_directional_derivatives", uses_directional_derivatives_);
1685  s.unpack("FmuFunction::uses_adjoint_derivatives", uses_adjoint_derivatives_);
1686  s.unpack("FmuFunction::nfwd", nfwd_);
1687  s.unpack("FmuFunction::nadj", nadj_);
1688  s.unpack("FmuFunction::validate_forward", validate_forward_);
1689  s.unpack("FmuFunction::validate_hessian", validate_hessian_);
1690  s.unpack("FmuFunction::make_symmetric", make_symmetric_);
1691  s.unpack("FmuFunction::step", step_);
1692  s.unpack("FmuFunction::abstol", abstol_);
1693  s.unpack("FmuFunction::reltol", reltol_);
1694  s.unpack("FmuFunction::print_progress", print_progress_);
1695  s.unpack("FmuFunction::new_jacobian", new_jacobian_);
1696  s.unpack("FmuFunction::new_forward", new_forward_);
1697  s.unpack("FmuFunction::new_hessian", new_hessian_);
1698  s.unpack("FmuFunction::hessian_coloring", hessian_coloring_);
1699  s.unpack("FmuFunction::validate_ad_file", validate_ad_file_);
1700 
1701  int fd = 0;
1702  s.unpack("FmuFunction::fd", fd);
1703  fd_ = static_cast<FdMode>(fd);
1704  int parallelization = 0;
1705  s.unpack("FmuFunction::parallelization", parallelization);
1706  parallelization_ = static_cast<Parallelization>(parallelization);
1707 
1708  s.unpack("FmuFunction::init_stats", init_stats_);
1709 
1710  s.unpack("FmuFunction::jac_sp", jac_sp_);
1711  s.unpack("FmuFunction::hess_sp", hess_sp_);
1712  s.unpack("FmuFunction::jac_colors", jac_colors_);
1713  s.unpack("FmuFunction::hess_colors", hess_colors_);
1714  s.unpack("FmuFunction::nonlin", nonlin_);
1715 
1716  s.unpack("FmuFunction::max_jac_tasks", max_jac_tasks_);
1717  s.unpack("FmuFunction::max_hess_tasks", max_hess_tasks_);
1718  s.unpack("FmuFunction::max_n_tasks", max_n_tasks_);
1719 
1720  if (has_jac_ || has_adj_ || has_hess_) {
1721  // Setup Jacobian memory
1722  casadi_jac_setup(&p_, jac_sp_, jac_colors_);
1725  p_.map_in = get_ptr(jac_in_);
1726  }
1727 
1728 }
1729 
1730 //void pack(SerializingStream&s, );
1731 
1732 
1733 } // namespace casadi
Helper class for Serialization.
void unpack(Sparsity &e)
Reconstruct an object from the input stream.
void version(const std::string &name, int v)
static std::unique_ptr< std::ostream > ofstream_ptr(const std::string &path, std::ios_base::openmode mode=std::ios_base::out)
Definition: filesystem.cpp:115
bool has_jacobian() const override
Full Jacobian.
Function get_forward(casadi_int nfwd, const std::string &name, const std::vector< std::string > &inames, const std::vector< std::string > &onames, const Dict &opts) const override
Return function that calculates forward derivatives.
bool all_vectors() const
std::vector< InputStruct > in_
~FmuFunction() override
Destructor.
Function get_jacobian(const std::string &name, const std::vector< std::string > &inames, const std::vector< std::string > &onames, const Dict &opts) const override
Full Jacobian.
std::vector< casadi_int > sp_trans_map_
Function factory(const std::string &name, const std::vector< std::string > &s_in, const std::vector< std::string > &s_out, const Function::AuxOut &aux, const Dict &opts) const override
casadi_jac_prob< double > p_
Function get_reverse(casadi_int nadj, const std::string &name, const std::vector< std::string > &inames, const std::vector< std::string > &onames, const Dict &opts) const override
Reverse mode AD.
int eval(const double **arg, double **res, casadi_int *iw, double *w, void *mem) const override
Evaluate numerically.
void init(const Dict &opts) override
Initialize.
Sparsity get_jac_sparsity(casadi_int oind, casadi_int iind, bool symmetric) const override
Return sparsity of Jacobian of an output respect to an input.
bool has_forward(casadi_int nfwd) const override
Return function that calculates forward derivatives.
std::vector< casadi_int > nonlin_
Dict get_stats(void *mem) const override
Get all statistics.
Parallelization parallelization_
casadi_int nfwd_
Number of sensitivities.
std::string validate_ad_file_
void check_mem_count(casadi_int n) const override
Check for validatity of memory object count.
bool has_reverse(casadi_int nadj) const override
Reverse mode AD.
std::vector< Sparsity > sp_trans_
bool all_regular() const
static void identify_io(std::vector< std::string > *scheme_in, std::vector< std::string > *scheme_out, const std::vector< std::string > &name_in, const std::vector< std::string > &name_out)
bool has_jac_sparsity(casadi_int oind, casadi_int iind) const override
Return sparsity of Jacobian of an output respect to an input.
Sparsity get_sparsity_in(casadi_int i) override
Retreive sparsities.
int eval_task(FmuMemory *m, casadi_int task, casadi_int n_task, bool need_nondiff, bool need_jac, bool need_fwd, bool need_adj, bool need_hess) const
static const Options options_
Options.
Sparsity get_sparsity_out(casadi_int i) override
Retreive sparsities.
std::vector< OutputStruct > out_
std::vector< double > get_nominal_in(casadi_int i) const override
Retreive nominal values.
casadi_int max_jac_tasks_
casadi_int max_hess_tasks_
FmuFunction(const std::string &name, const Fmu &fmu, const std::vector< std::string > &name_in, const std::vector< std::string > &name_out)
Constructor.
std::vector< size_t > jac_in_
void free_mem(void *mem) const override
Free memory block.
int eval_all(FmuMemory *m, casadi_int n_task, bool need_nondiff, bool need_jac, bool need_fwd, bool need_adj, bool need_hess) const
void make_symmetric(double *hess_nz, casadi_int *iw) const
void change_option(const std::string &option_name, const GenericType &option_value) override
Change option after object creation for debugging.
int init_mem(void *mem) const override
Initalize memory block.
void remove_nans(double *hess_nz, casadi_int *iw) const
std::vector< double > get_nominal_out(casadi_int i) const override
Retreive nominal values.
std::vector< size_t > jac_out_
std::vector< double > jac_nom_in_
void * alloc_mem() const override
Create memory block.
void check_hessian(FmuMemory *m, const double *hess_nz, casadi_int *iw) const
void serialize_body(SerializingStream &s) const override
Serialize an object without type information.
size_t index_out(const std::string &n) const
Definition: fmu.cpp:655
size_t index_in(const std::string &n) const
Definition: fmu.cpp:645
Interface to binary FMU.
Definition: fmu.hpp:62
void set(FmuMemory *m, size_t ind, const double *value) const
Definition: fmu.cpp:285
void get_fwd(FmuMemory *m, casadi_int nsens, const casadi_int *id, double *v) const
Definition: fmu.cpp:358
const std::vector< size_t > & ored(size_t ind) const
Definition: fmu.cpp:163
int eval_adj(FmuMemory *m) const
Definition: fmu.cpp:407
void get_stats(FmuMemory *m, Dict *stats, const std::vector< std::string > &name_in, const InputStruct *in) const
Get stats.
Definition: fmu.cpp:431
Sparsity hess_sparsity(const std::vector< size_t > &r, const std::vector< size_t > &c) const
Definition: fmu.cpp:260
bool can_be_instantiated_only_once_per_process() const
Does the FMU declare restrictions on instantiation?
Definition: fmu.cpp:243
std::vector< double > all_nominal_out(size_t ind) const
Definition: fmu.cpp:211
bool provides_adjoint_derivatives() const
Does the FMU provide support for adjoint directional derivatives.
Definition: fmu.cpp:235
Sparsity jac_sparsity(const std::vector< size_t > &osub, const std::vector< size_t > &isub) const
Definition: fmu.cpp:251
int eval_fwd(FmuMemory *m, bool independent_seeds) const
Definition: fmu.cpp:350
double nominal_in(size_t ind) const
Definition: fmu.cpp:171
FmuMemory * alloc_mem(const FmuFunction &f) const
Create memory block.
Definition: fmu.cpp:93
void get_adj(FmuMemory *m, casadi_int nsens, const casadi_int *id, double *v) const
Definition: fmu.cpp:415
double max_in(size_t ind) const
Definition: fmu.cpp:195
void set_fwd(FmuMemory *m, casadi_int nseed, const casadi_int *id, const double *v) const
Definition: fmu.cpp:317
size_t n_out() const
Get the number of scheme outputs.
Definition: fmu.cpp:131
const std::string & instance_name() const
Name of the FMU.
Definition: fmu.cpp:114
std::vector< double > all_nominal_in(size_t ind) const
Definition: fmu.cpp:203
double min_in(size_t ind) const
Definition: fmu.cpp:187
void free_mem(void *mem) const
Free memory block.
Definition: fmu.cpp:97
FmuInternal * get() const
Definition: fmu.cpp:101
void set_adj(FmuMemory *m, casadi_int nseed, const casadi_int *id, const double *v) const
Definition: fmu.cpp:374
int init_mem(FmuMemory *m) const
Initalize memory block.
Definition: fmu.cpp:268
bool provides_directional_derivatives() const
Does the FMU provide support for forward directional derivatives.
Definition: fmu.cpp:227
int eval(FmuMemory *m) const
Definition: fmu.cpp:301
const std::vector< size_t > & ired(size_t ind) const
Definition: fmu.cpp:155
void request_adj(FmuMemory *m, casadi_int nsens, const casadi_int *id, const casadi_int *wrt_id) const
Definition: fmu.cpp:390
size_t n_in() const
Get the number of scheme inputs.
Definition: fmu.cpp:123
void free_instance(void *instance) const
Definition: fmu.cpp:277
void request(FmuMemory *m, size_t ind) const
Definition: fmu.cpp:293
void request_fwd(FmuMemory *m, casadi_int nsens, const casadi_int *id, const casadi_int *wrt_id) const
Definition: fmu.cpp:333
std::string desc_in(FmuMemory *m, size_t id, bool more=true) const
Definition: fmu.cpp:219
Internal class for Function.
casadi_int size1_in(casadi_int ind) const
Input/output dimensions.
void alloc_iw(size_t sz_iw, bool persistent=false)
Ensure required length of iw field.
Dict get_stats(void *mem) const override
Get all statistics.
void init(const Dict &opts) override
Initialize.
virtual bool has_forward(casadi_int nfwd) const
Return function that calculates forward derivatives.
void serialize_body(SerializingStream &s) const override
Serialize an object without type information.
virtual Function factory(const std::string &name, const std::vector< std::string > &s_in, const std::vector< std::string > &s_out, const Function::AuxOut &aux, const Dict &opts) const
virtual std::vector< double > get_nominal_out(casadi_int ind) const
size_t n_in_
Number of inputs and outputs.
casadi_int size1_out(casadi_int ind) const
Input/output dimensions.
casadi_int nnz_in() const
Number of input/output nonzeros.
static const Options options_
Options.
virtual std::vector< double > get_nominal_in(casadi_int ind) const
const Sparsity & sparsity_out(casadi_int ind) const
Input/output sparsity.
void alloc_w(size_t sz_w, bool persistent=false)
Ensure required length of w field.
casadi_int nnz_out() const
Number of input/output nonzeros.
void setup(void *mem, const double **arg, double **res, casadi_int *iw, double *w) const
Set the (persistent and temporary) work vectors.
void change_option(const std::string &option_name, const GenericType &option_value) override
Change option after object creation for debugging.
std::vector< std::string > name_out_
virtual Function get_forward(casadi_int nfwd, const std::string &name, const std::vector< std::string > &inames, const std::vector< std::string > &onames, const Dict &opts) const
Return function that calculates forward derivatives.
std::vector< std::string > name_in_
Input and output scheme.
Function object.
Definition: function.hpp:60
std::map< std::string, std::vector< std::string > > AuxOut
Definition: function.hpp:404
void own(Internal *node)
Generic data type, can hold different types such as bool, casadi_int, std::string etc.
void construct(const Dict &opts)
Construct.
virtual int init_mem(void *mem) const
Initalize memory block.
void print(const char *fmt,...) const
C-style formatted printing during evaluation.
bool verbose_
Verbose printout.
void clear_mem()
Clear all memory (called from destructor)
Helper class for Serialization.
void version(const std::string &name, int v)
void pack(const Sparsity &e)
Serializes an object to the output stream.
std::string class_name() const
Get class name.
General sparsity class.
Definition: sparsity.hpp:106
std::vector< casadi_int > erase(const std::vector< casadi_int > &rr, const std::vector< casadi_int > &cc, bool ind1=false)
Erase rows and/or columns of a matrix.
Definition: sparsity.cpp:339
casadi_int size1() const
Get the number of rows.
Definition: sparsity.cpp:124
Sparsity star_coloring(casadi_int ordering=1, casadi_int cutoff=std::numeric_limits< casadi_int >::max()) const
Perform a star coloring of a symmetric matrix:
Definition: sparsity.cpp:757
static Sparsity dense(casadi_int nrow, casadi_int ncol=1)
Create a dense rectangular sparsity pattern *.
Definition: sparsity.cpp:1012
Sparsity T() const
Transpose the matrix.
Definition: sparsity.cpp:394
casadi_int nnz() const
Get the number of (structural) non-zeros.
Definition: sparsity.cpp:148
casadi_int size2() const
Get the number of columns.
Definition: sparsity.cpp:128
const casadi_int * row() const
Get a reference to row-vector,.
Definition: sparsity.cpp:164
Sparsity uni_coloring(const Sparsity &AT=Sparsity(), casadi_int cutoff=std::numeric_limits< casadi_int >::max()) const
Perform a unidirectional coloring: A greedy distance-2 coloring algorithm.
Definition: sparsity.cpp:749
const casadi_int * colind() const
Get a reference to the colindex of all column element (see class description)
Definition: sparsity.cpp:168
The casadi namespace.
Definition: archiver.cpp:28
std::vector< casadi_int > range(casadi_int start, casadi_int stop, casadi_int step, casadi_int len)
Range function.
FdMode
Variable type.
bool has_prefix(const std::string &s)
void casadi_copy(const T1 *x, casadi_int n, T1 *y)
COPY: y <-x.
@ OT_STRINGVECTOR
std::string str(const T &v)
String representation, any type.
GenericType::Dict Dict
C++ equivalent of Python's dict or MATLAB's struct.
Parallelization
Type of parallelization.
std::string to_string(TypeFmi2 v)
const double nan
Not a number.
Definition: calculus.hpp:53
T * get_ptr(std::vector< T > &v)
Get a pointer to the data contained in the vector.
void casadi_trans(const T1 *x, const casadi_int *sp_x, T1 *y, const casadi_int *sp_y, casadi_int *tmp)
TRANS: y <- trans(x) , w work vector (length >= rows x)
std::string pop_prefix(const std::string &s, std::string *rem)
const FmuFunction & self
std::vector< size_t > wrt_
const double ** arg
std::vector< bool > omarked_
casadi_jac_data< double > d
std::vector< double > ibuf_
std::vector< FmuMemory * > slaves
std::vector< bool > imarked_
casadi_int * star_iw
static InputStruct parse(const std::string &n, const Fmu *fmu, std::vector< std::string > *name_in=0, std::vector< std::string > *name_out=0)
Options metadata for a class.
Definition: options.hpp:40
static OutputStruct parse(const std::string &n, const Fmu *fmu, std::vector< std::string > *name_in=0, std::vector< std::string > *name_out=0)
casadi_int nseed
Definition: casadi_jac.hpp:79
casadi_int * nzind
Definition: casadi_jac.hpp:93
casadi_int * isens
Definition: casadi_jac.hpp:85
casadi_int * iseed
Definition: casadi_jac.hpp:81
casadi_int * wrt
Definition: casadi_jac.hpp:91
casadi_int nsens
Definition: casadi_jac.hpp:79
const size_t * map_in
Definition: casadi_jac.hpp:39
const T1 * nom_in
Definition: casadi_jac.hpp:35
const size_t * map_out
Definition: casadi_jac.hpp:37