#ifndef PARSPARSEMATRIX_H #define PARSPARSEMATRIX_H #include "mpi.h" #include "MPI_Wrappers.h" #include "SparseMatrix.h" #include "DiagonalMatrix.h" #include namespace ATC_matrix { /** * @class ParSparseMatrix * @brief Parallelized version of SparseMatrix class. * * ParSparseMatrix::MultMv is used in LinearSolver, which is then * used in NonLinearSolver, PoissonSolver, and SchrodingerSolver. These * parallelized solvers are used in the following locations: * * - LinearSolver * - ExtrinsicModelDriftDiffusion.cpp (lines 511 and 522) * - AtomicRegulator.cpp (line 926) * - TransferLibrary.cpp (lines 72 and 260) * - PoissonSolver * - ExtrinsicModelDriftDiffusion.cpp (line 232) * - SchrodingerSolver * - ExtrinsicModelDriftDiffusion.cpp (line 251) * - SliceSchrodingerSolver * - ExtrinsicModelDriftDiffusion.cpp (line 246) */ template class ParSparseMatrix : public SparseMatrix { public: ParSparseMatrix(MPI_Comm comm, INDEX rows = 0, INDEX cols = 0) : SparseMatrix(rows, cols), _comm(comm){} ParSparseMatrix(MPI_Comm comm, const SparseMatrix &c) : SparseMatrix(c), _comm(comm){} ParSparseMatrix(MPI_Comm comm, INDEX* rows, INDEX* cols, T* vals, INDEX size, INDEX nRows, INDEX nCols, INDEX nRowsCRS) : SparseMatrix(rows, cols, vals, size, nRows, nCols, nRowsCRS) ,_comm(comm){} ParSparseMatrix(MPI_Comm comm) : SparseMatrix(), _comm(comm){} virtual void operator=(const SparseMatrix &source) { copy(source); } template friend void ParMultAB(MPI_Comm comm, const SparseMatrix& A, const Matrix& B, DenseMatrix& C); private: MPI_Comm _comm; }; template <> class ParSparseMatrix : public SparseMatrix { public: // All the same constructors as for SparseMatrix ParSparseMatrix(MPI_Comm comm, INDEX rows = 0, INDEX cols=0); ParSparseMatrix(MPI_Comm comm, const SparseMatrix &c); ParSparseMatrix(MPI_Comm comm, INDEX* rows, INDEX* cols, double* vals, INDEX size, INDEX nRows, INDEX nCols, INDEX nRowsCRS); // Parallel sparse matrix multiplication functions void MultMv(const Vector& v, DenseVector& c) const; DenseVector transMat(const Vector& v) const; void MultAB(const Matrix& B, DenseMatrix& C) const; DenseMatrix transMat(const DenseMatrix& B) const; DenseMatrix transMat(const SparseMatrix& B) const; virtual void operator=(const SparseMatrix &source); template friend void ParMultAB(MPI_Comm comm, const SparseMatrix& A, const Matrix& B, DenseMatrix& C); private: void partition(ParSparseMatrix& A_local) const; void finalize(); MPI_Comm _comm; }; // The SparseMatrix versions of these functions will call the correct // MultMv/MultAB: // DenseVector operator*(const ParSparseMatrix &A, const Vector &v); // DenseVector operator*(const Vector &v, const ParSparseMatrix &A); // DenseMatrix operator*(const ParSparseMatrix &A, const Matrix &B); template void ParMultAB(MPI_Comm comm, const SparseMatrix& A, const Matrix& B, DenseMatrix& C) { SparseMatrix::compress(A); INDEX M = A.nRows(), N = B.nCols(); if (!C.is_size(M, N)) { C.resize(M, N); C.zero(); } // Temporarily put fields into a ParSparseMatrix for distributed multiplication ParSparseMatrix Ap(comm); Ap._nRows = A._nRows; Ap._nCols = A._nCols; Ap._size = A._size; Ap._nRowsCRS = A._nRowsCRS; Ap._val = A._val; Ap._ja = A._ja; Ap._ia = A._ia; Ap.hasTemplate_ = A.hasTemplate_; // MultAB calls compress(), but we hope that does nothing because we just // compressed A. If it did something, it might mess up other members // (e.g. _tri). Ap.MultAB(B, C); // We're not changing the matrix's values, so we can justify calling A const. SparseMatrix &Avar = const_cast &>(A); Avar._nRows = Ap._nRows; Avar._nCols = Ap._nCols; Avar._size = Ap._size; Avar._nRowsCRS = Ap._nRowsCRS; Avar._val = Ap._val; Avar._ja = Ap._ja; Avar._ia = Ap._ia; Avar.hasTemplate_ = Ap.hasTemplate_; // Avoid catastrophe Ap._val = NULL; Ap._ja = NULL; Ap._ia = NULL; } /*SparseMatrix operator*(const ParSparseMatrix &A, const SparseMatrix &B); SparseMatrix operator*(const ParSparseMatrix &A, const DiagonalMatrix &B); */ } // end namespace #endif