// Copyright (c) 2010-2023, Lawrence Livermore National Security, LLC. Produced // at the Lawrence Livermore National Laboratory. All Rights reserved. See files // LICENSE and NOTICE for details. LLNL-CODE-806117. // // This file is part of the MFEM library. For more information and source code // availability visit https://mfem.org. // // MFEM is free software; you can redistribute it and/or modify it under the // terms of the BSD-3 license. We welcome feedback and contributions, see file // CONTRIBUTING.md for details. #ifndef ADMFEM_HPP #define ADMFEM_HPP #include "mfem.hpp" #include "../../linalg/dual.hpp" #include "tadvector.hpp" #include "taddensemat.hpp" #ifdef MFEM_USE_CODIPACK #include namespace mfem { namespace ad { #ifdef MFEM_USE_ADFORWARD /// Forward AD type declaration typedef codi::RealForward ADFloatType; /// Vector type for AD-numbers typedef TAutoDiffVector ADVectorType; /// Matrix type for AD-numbers typedef TAutoDiffDenseMatrix ADMatrixType; #else /// Reverse AD type declaration typedef codi::RealReverse ADFloatType; /// Vector type for AD-numbers typedef TAutoDiffVector ADVectorType; /// Matrix type for AD-numbers typedef TAutoDiffDenseMatrix ADMatrixType; #endif } /// The class provides an evaluation of the Jacobian of a templated vector /// function provided in the constructor. The Jacobian is evaluated with the /// help of automatic differentiation (AD). The template parameters specify the /// size of the return vector (vector_size), the size of the input vector /// (state_size), and the size of the parameters supplied to the function. template class VectorFuncAutoDiff { public: /// F_ is user implemented function to be differentiated by /// VectorFuncAutoDiff. The signature of the function is: F_(mfem::Vector& /// parameters, ad::ADVectorType& state_vector, ad::ADVectorType& result). /// The parameters vector should have size param_size. The state_vector /// should have size state_size, and the result vector should have size /// vector_size. All size parameters are teplate parameters in /// VectorFuncAutoDiff. VectorFuncAutoDiff( std::function F_) { F=F_; } /// Evaluates the Jacobian of the vector function F_ for a set of parameters /// (vparam) and state vector vstate. The Jacobian (jac) has dimensions /// [vector_size x state_size]. void Jacobian(mfem::Vector &vparam, mfem::Vector &vstate, mfem::DenseMatrix &jac) { #ifdef MFEM_USE_ADFORWARD // use forward mode jac.SetSize(vector_size, state_size); jac = 0.0; { ad::ADVectorType ad_state(state_size); ad::ADVectorType ad_result(vector_size); for (int i=0; i F; }; // VectorFuncAutoDiff /// The class provides an evaluation of the Jacobian of a templated vector /// function provided as a functor TFunctor. The Jacobian is evaluated with the /// help of automatic differentiation (AD). The template parameters specify the /// size of the return vector (vector_size), the size of the input vector /// (state_size), and the size of the parameters supplied to the function. The /// TFunctor functor is a template class with parameters [Float data type], /// [Vector type for the additional parameters], [Vector type for the state /// vector and the return residual]. The integer template parameters are the /// same ones passed to QVectorFuncAutoDiff. template class TFunctor , int vector_size=1, int state_size=1, int param_size=0> class QVectorFuncAutoDiff { public: /// Evaluates the vector function for given set of parameters and state /// values in vector uu. The result is returned in vector rr. void VectorFunc(const mfem::Vector &vparam, mfem::Vector &uu, mfem::Vector& rr) { rf(vparam,uu,rr); } /// Returns the gradient of TFunctor(...) in the dense matrix jac. The /// dimensions of jac are vector_size x state_size, where state_size is the /// length of vector uu. void Jacobian(mfem::Vector &vparam, mfem::Vector &uu, mfem::DenseMatrix &jac) { #ifdef MFEM_USE_ADFORWARD // use forward mode jac.SetSize(vector_size, state_size); jac = 0.0; { ad::ADVectorType aduu(state_size); ad::ADVectorType rr(vector_size); for (int i=0; i tf; TFunctor rf; }; /// The class provides an evaluation of the first derivatives and the Hessian of /// a templated scalar function provided as a functor TFunctor. Both the first /// and the second derivatives are evaluated with the help of automatic /// differentiation (AD). The template parameters specify the size of the input /// vector (state_size) and the size of the parameters supplied to the /// function. The TFunctor functor is a template class with parameters [Float /// data type], [Vector type for the additional parameters], [Vector type for /// the state vector and the return residual]. The integer template parameters /// are the same ones passed to QFunctionAutoDiff. template class TFunctor , int state_size=1, int param_size=0> class QFunctionAutoDiff { public: /// Evaluates a function for arguments vparam and uu. The evaluation is /// based on the operator() in the user provided functor TFunctor. double Eval(const mfem::Vector &vparam, mfem::Vector &uu) { return rf(vparam,uu); } /// Provides the same functionality as Grad. void VectorFunc(const mfem::Vector &vparam, mfem::Vector &uu, mfem::Vector &rr) { Grad(vparam,uu,rr); } /// Returns the first derivative of TFunctor(...) with respect to the active /// arguments proved in vector uu. The length of rr is the same as for uu. void Grad(const mfem::Vector &vparam, mfem::Vector &uu, mfem::Vector &rr) { #ifdef MFEM_USE_ADFORWARD // use forward mode rr.SetSize(state_size); { ad::ADVectorType aduu(state_size); for (int i=0; i ADFType; typedef TAutoDiffVector ADFVector; typedef TAutoDiffDenseMatrix ADFDenseMatrix; typedef codi::RealForwardGen ADSType; typedef TAutoDiffVector ADSVector; typedef TAutoDiffDenseMatrix ADSDenseMatrix; #else //use mixed forward and reverse mode typedef codi::RealForwardGen ADFType; typedef TAutoDiffVector ADFVector; typedef TAutoDiffDenseMatrix ADFDenseMatrix; typedef codi::RealReverseGen ADSType; typedef TAutoDiffVector ADSVector; typedef TAutoDiffDenseMatrix ADSDenseMatrix; #endif /// Returns the Hessian of TFunctor(...) in the dense matrix jac. The /// dimensions of jac are state_size x state_size, where state_size is the /// length of vector uu. void Hessian(mfem::Vector &vparam, mfem::Vector &uu, mfem::DenseMatrix &jac) { #ifdef MFEM_USE_ADFORWARD // use forward-forward mode jac.SetSize(state_size); jac=0.0; { ADSVector aduu(state_size); for (int ii = 0; ii < state_size; ii++) { aduu[ii].value().value()=uu[ii]; aduu[ii].value().gradient()=0.0; aduu[ii].gradient().value()=0.0; aduu[ii].gradient().gradient()=0.0; } for (int ii = 0; ii < state_size; ii++) { aduu[ii].value().gradient()=1.0; for (int jj=0; jj<(ii+1); jj++) { aduu[jj].gradient().value()=1.0; ADSType rez=sf(vparam,aduu); jac(ii,jj)=rez.gradient().gradient(); jac(jj,ii)=jac(ii,jj); aduu[jj].gradient().value()=0.0; } aduu[ii].value().gradient()=0.0; } } #else // use mixed forward and reverse mode jac.SetSize(state_size); jac=0.0; { ADSVector aduu(state_size); for (int ii=0; ii < state_size ; ii++) { aduu[ii].value().value()=uu[ii]; } ADSType rez; ADSType::TapeType& tape = ADSType::getGlobalTape(); typename ADSType::TapeType::Position pos; for (int ii = 0; ii < state_size ; ii++) { pos=tape.getPosition(); tape.setActive(); for (int jj=0; jj < state_size; jj++) { if (jj==ii) {aduu[jj].value().gradient()=1.0;} else {aduu[jj].value().gradient()=0.0;} tape.registerInput(aduu[jj]); } rez=sf(vparam,aduu); tape.registerOutput(rez); tape.setPassive(); rez.gradient().value()=1.0; tape.evaluate(); for (int jj=0; jj<(ii+1); jj++) { jac(ii,jj)=aduu[jj].gradient().gradient(); jac(jj,ii)=jac(ii,jj); } tape.reset(pos); } } #endif } private: TFunctor rf; TFunctor tf; TFunctor sf; }; } #else // end MFEM_USE_CODIPACK // USE NATIVE IMPLEMENTATION namespace mfem { namespace ad { /// MFEM native forward AD-type typedef internal::dual ADFloatType; /// Vector type for AD-type numbers typedef TAutoDiffVector ADVectorType; /// Matrix type for AD-type numbers typedef TAutoDiffDenseMatrix ADMatrixType; } /// The class provides an evaluation of the Jacobian of a templated vector /// function provided in the constructor. The Jacobian is evaluated with the /// help of automatic differentiation (AD). The template parameters specify the /// size of the return vector (vector_size), the size of the input vector /// (state_size), and the size of the parameters supplied to the function. template class VectorFuncAutoDiff { public: /// F_ is user implemented function to be differentiated by /// VectorFuncAutoDiff. The signature of the function is: F_(mfem::Vector& /// parameters, ad::ADVectroType& state_vector, ad::ADVectorType& result). /// The parameters vector should have size param_size. The state_vector /// should have size state_size, and the result vector should have size /// vector_size. All size parameters are teplate parameters in /// VectorFuncAutoDiff. VectorFuncAutoDiff( std::function F_) { F=F_; } /// Evaluates the Jacobian of the vector function F_ for a set of parameters /// (vparam) and state vector uu. The Jacobian (jac) has dimensions /// [vector_size x state_size]. void Jacobian(mfem::Vector &vparam, mfem::Vector &uu, mfem::DenseMatrix &jac) { jac.SetSize(vector_size, state_size); jac = 0.0; { ad::ADVectorType aduu(uu); // all dual numbers are initialized to zero ad::ADVectorType rr(vector_size); for (int ii = 0; ii < state_size; ii++) { aduu[ii].gradient = 1.0; F(vparam,aduu,rr); for (int jj = 0; jj < vector_size; jj++) { jac(jj, ii) = rr[jj].gradient; } aduu[ii].gradient = 0.0; } } } private: std::function F; }; /// The class provides an evaluation of the Jacobian of a templated vector /// function provided as a functor TFunctor. The Jacobian is evaluated with the /// help of automatic differentiation (AD). The template parameters specify the /// size of the return vector (vector_size), the size of the input vector /// (state_size), and the size of the parameters supplied to the function. The /// TFunctor functor is a template class with parameters [Float data type], /// [Vector type for the additional parameters], [Vector type for the state /// vector and the return residual]. /// The integer template parameters are the same ones /// passed to QVectorFuncAutoDiff. \n /// Example: f={sin(a*x*y), cos(b*x*y*z), x*x+y*x} \n /// The vector function has vector_size=3, and state_size=3, i.e., it has /// three arguments [x,y,z]. The parameters [a,b] size is 2. /// The functor class will have the following form /// \code{.cpp} /// template /// class MyVectorFunction{ /// public: /// TDataType operator() (TParamVector& vparam, TStateVector& uu, TStateVector& rr) /// { /// auto a=vparam[0]; /// auto b=vparam[1]; /// rr[0]=sin(a*uu[0]*uu[1]); /// rr[1]=cos(b*uu[0]*uu[1]*uu[2]); /// rr[2]=uu[0]*uu[0]+uu[0]*uu[1]; /// } // /// }; /// \endcode template class TFunctor , int vector_size=1, int state_size=1, int param_size=0> class QVectorFuncAutoDiff { private: /// MFEM native forward AD-type typedef internal::dual ADFType; /// Vector type for AD-type numbers typedef TAutoDiffVector ADFVector; /// Matrix type for AD-type numbers typedef TAutoDiffDenseMatrix ADFDenseMatrix; public: /// Returns a vector valued function rr for supplied passive arguments /// vparam and active arguments uu. The evaluation is based on the user /// supplied TFunctor template class. void VectorFunc(const Vector &vparam, Vector &uu, Vector &rr) { func(vparam, uu, rr); } /// Returns the gradient of TFunctor(...) residual in the dense matrix jac. /// The dimensions of jac are vector_size x state_size, where state_size is /// the length of vector uu. void Jacobian(mfem::Vector &vparam, mfem::Vector &uu, mfem::DenseMatrix &jac) { // use native AD package jac.SetSize(vector_size, state_size); jac = 0.0; { ADFVector aduu(uu); // all dual numbers are initialized to zero ADFVector rr(vector_size); for (int ii = 0; ii < state_size; ii++) { aduu[ii].gradient = 1.0; Eval(vparam, aduu, rr); for (int jj = 0; jj < vector_size; jj++) { jac(jj, ii) = rr[jj].gradient; } aduu[ii].gradient = 0.0; } } } private: /// Evaluates the residual from TFunctor(...). /// Intended for internal use only. void Eval(const Vector &vparam, ADFVector &uu, ADFVector &rr) { tf(vparam, uu, rr); } TFunctor func; TFunctor tf; }; /// The class provides an evaluation of the first derivatives and the Hessian of /// a templated scalar function provided as a functor TFunctor. Both the first /// and the second derivatives are evaluated with the help of automatic /// differentiation (AD). The template parameters specify the size of the input /// vector (state_size) and the size of the parameters supplied to the /// function. The TFunctor functor is a template class with parameters [Float /// data type], [Vector type for the additional parameters], [Vector type for /// the state vector and the return residual]. The integer template parameters /// are the same ones passed to QFunctionAutoDiff. The class duplicates Grad and /// Hessian, i.e., VectorFunc calls Grad, and Jacobian calls Hessian. The main /// reason is to provide the same interface as the QVectorFuncAutoDiff class /// used to differentiate vector functions. Such compatibility allows users to /// start implementation of their problem based only on some energy or a weak /// form. The gradients, computed with Grad/VectorFunc, of the function will /// contribute to the FE residual. Computed with Hessian/Jacobian, the Hessian /// will contribute to the tangent matrix in Newton's iterations. Once the /// implementation is complete and tested, the users can start improving the /// performance by replacing Grad/VectorFunc with a hand-coded version. The /// gradient is a vector function and can be differentiated with the /// functionality implemented in QVectorFuncAutoDiff. Thus, the user can /// directly employ AD for computing the contributions to the global tangent /// matrix. The main code will not require changes as the names Grad/VectorFunc /// and Hessian/Jacobian are mirrored. template class TFunctor , int state_size=1, int param_size=0> class QFunctionAutoDiff { private: /// MFEM native AD-type for first derivatives typedef internal::dual ADFType; /// Vector type for AD-numbers(first derivatives) typedef TAutoDiffVector ADFVector; /// Matrix type for AD-numbers(first derivatives) typedef TAutoDiffDenseMatrix ADFDenseMatrix; /// MFEM native AD-type for second derivatives typedef internal::dual ADSType; /// Vector type for AD-numbers (second derivatives) typedef TAutoDiffVector ADSVector; /// Vector type for AD-numbers (second derivatives) typedef TAutoDiffDenseMatrix ADSDenseMatrix; public: /// Evaluates a function for arguments vparam and uu. The evaluation is /// based on the operator() in the user provided functor TFunctor. double Eval(const Vector &vparam, Vector &uu) { return tf(vparam,uu); } /// Provides the same functionality as Grad. void VectorFunc(const Vector &vparam, Vector &uu, Vector &rr) { Grad(vparam,uu,rr); } /// Returns the first derivative of TFunctor(...) with respect to the active /// arguments proved in vector uu. The length of rr is the same as for uu. void Grad(const Vector &vparam, Vector &uu, Vector &rr) { int n = uu.Size(); rr.SetSize(n); ADFVector aduu(uu); ADFType rez; for (int ii = 0; ii < n; ii++) { aduu[ii].gradient = 1.0; rez = ff(vparam, aduu); rr[ii] = rez.gradient; aduu[ii].gradient = 0.0; } } /// Provides same functionality as Hessian. void Jacobian(mfem::Vector &vparam, mfem::Vector &uu, mfem::DenseMatrix &jac) { Hessian(vparam,uu,jac); } /// Returns the Hessian of TFunctor(...) in the dense matrix jac. The /// dimensions of jac are state_size x state_size, where state_size is the /// length of vector uu. void Hessian(mfem::Vector &vparam, mfem::Vector &uu, mfem::DenseMatrix &jac) { int n = uu.Size(); jac.SetSize(n); jac = 0.0; { ADSVector aduu(n); for (int ii = 0; ii < n; ii++) { aduu[ii].value = ADFType{uu[ii], 0.0}; aduu[ii].gradient = ADFType{0.0, 0.0}; } for (int ii = 0; ii < n; ii++) { aduu[ii].value = ADFType{uu[ii], 1.0}; for (int jj = 0; jj < (ii + 1); jj++) { aduu[jj].gradient = ADFType{1.0, 0.0}; ADSType rez = sf(vparam, aduu); jac(ii, jj) = rez.gradient.gradient; jac(jj, ii) = rez.gradient.gradient; aduu[jj].gradient = ADFType{0.0, 0.0}; } aduu[ii].value = ADFType{uu[ii], 0.0}; } } } private: TFunctor tf; TFunctor ff; TFunctor sf; }; } // end namespace mfem #endif // NATIVE #endif // ADMFEM_HPP