// Copyright (c) 2017-2022, Lawrence Livermore National Security, LLC and other CEED contributors. // All Rights Reserved. See the top-level LICENSE and NOTICE files for details. // // SPDX-License-Identifier: BSD-2-Clause // // This file is part of CEED: http://github.com/ceed /// @file /// Internal header for MAGMA tensor basis gradient in 2D #ifndef CEED_MAGMA_BASIS_GRAD_2D_H #define CEED_MAGMA_BASIS_GRAD_2D_H #include "magma-common-tensor.h" // macros to abstract access of shared memory and reg. file #define sT(i, j) sT[(j)*P + (i)] #define sTmp(i, j, ldw) sTmp[(j) * (ldw) + (i)] ////////////////////////////////////////////////////////////////////////////////////////// // grad basis action (2D) // This function is called two times at a higher level for 2D // DIM_U -- for the size of rU[DIM_U * NUM_COMP * MAX_P_Q] // DIM_V -- for the size of rV[DIM_V * NUM_COMP * MAX_P_Q] // i_DIM -- the index of the outermost loop over dimensions in grad // i_DIM_U -- which dim index of rU is accessed (always 0 for notrans, 0 or 1 for trans) // i_DIM_V -- which dim index of rV is accessed (0 or 1 for notrans, always 0 for trans) // the scalar beta is used to specify whether to accumulate to rV, or overwrite it template static __device__ __inline__ void magma_grad_2d_device(const T *sTinterp, const T *sTgrad, T rU[DIM_U][NUM_COMP][rU_SIZE], T rV[DIM_V][NUM_COMP][rV_SIZE], T beta, const int tx, T rTmp, T *swork) { // Assumptions // 0. This device routine applies grad for one dim only (i_DIM), so it should be called twice for 2D // 1. 1D threads of size max(P,Q) // 2. input: rU[DIM_U x NUM_COMP x P] in registers (per thread) // 3. output: rV[DIM_V x NUM_COMP x Q] in registers (per thread) // 4. Two products per each (dim,component) pair // 4.1 Batch P of (1xP) matrices times (PxQ) matrix => Batch P of (1xQ) matrices // 4.2 Batch 1 of (QxP) matrix times (PxQ) matrix => (QxQ) matrix // 6. Each thread computes one row of the output of each product // 7. Sync is recommended before and after the call for (int comp = 0; comp < NUM_COMP; comp++) { // 1st product -- Batch P of (1xP) matrices [reg] x (PxQ) [shmem] => Batch P of (1xQ) matrices // the batch output P x (1xQ) is written on the fly to shmem if (tx < P) { const int batchid = tx; const int sld = 1; const T *sT = (i_DIM == 0) ? sTgrad : sTinterp; T *sTmp = swork + batchid * (1 * Q); for (int j = 0; j < Q; j++) { rTmp = 0.0; for (int i = 0; i < P; i++) { rTmp += rU[i_DIM_U][comp][i] * sT(i, j); } sTmp(0, j, sld) = rTmp; } } // end of: if (tx < P) __syncthreads(); // 2nd product -- Batch 1 of a (QxP) matrix [shmem] x (PxQ) [shmem] => (QxQ) matrix [reg] if (tx < Q) { const int batchid = 0; const int sld = Q; const T *sT = (i_DIM == 1) ? sTgrad : sTinterp; T *sTmp = swork + batchid * (Q * P); for (int j = 0; j < Q; j++) { rTmp = 0.0; for (int i = 0; i < P; i++) { rTmp += sTmp(tx, i, sld) * sT(i, j); } rV[i_DIM_V][comp][j] *= beta; rV[i_DIM_V][comp][j] += rTmp; } } __syncthreads(); } // loop over NUM_COMP } ////////////////////////////////////////////////////////////////////////////////////////// extern "C" __launch_bounds__(MAGMA_BASIS_BOUNDS(BASIS_MAX_P_Q, MAGMA_MAXTHREADS_2D)) __global__ void magma_gradn_2d_kernel(const CeedScalar *dinterp1d, const CeedScalar *dgrad1d, const CeedScalar *dU, const int estrdU, const int cstrdU, const int dstrdU, CeedScalar *dV, const int estrdV, const int cstrdV, const int dstrdV, const int nelem) { MAGMA_DEVICE_SHARED(CeedScalar, shared_data) const int tx = threadIdx.x; const int ty = threadIdx.y; const int elem_id = (blockIdx.x * blockDim.y) + ty; magma_trans_t transT = MagmaNoTrans; if (elem_id >= nelem) return; CeedScalar rU[1][BASIS_NUM_COMP][BASIS_P] = {0.0}; // here DIM_U = 1, but might be different for a fused operator CeedScalar rV[1][BASIS_NUM_COMP][BASIS_Q] = {0.0}; // here DIM_V = 1, but might be different for a fused operator CeedScalar rTmp = 0.0; // shift global memory pointers by elem stride dU += elem_id * estrdU; dV += elem_id * estrdV; // assign shared memory pointers CeedScalar *sTinterp = (CeedScalar *)shared_data; CeedScalar *sTgrad = sTinterp + BASIS_P * BASIS_Q; CeedScalar *sTmp = sTgrad + BASIS_P * BASIS_Q; sTmp += ty * (BASIS_P * BASIS_MAX_P_Q); // read T if (ty == 0) { dread_T_gm2sm(tx, transT, dinterp1d, sTinterp); dread_T_gm2sm(tx, transT, dgrad1d, sTgrad); } // No need to read V ( required only in transposed grad ) const CeedScalar beta = 0.0; /* read U (idim = 0 for dU, i_DIM = 0 for rU) -- there is a sync at the end of this function */ readU_2d(dU + (0 * dstrdU), cstrdU, rU, sTmp, tx); /* first call (i_DIM = 0, i_DIM_U = 0, i_DIM_V = 0) -- output from rV[0][][] into dV (idim = 0) */ magma_grad_2d_device(sTinterp, sTgrad, rU, rV, beta, tx, rTmp, sTmp); /* there is a sync at the end of magma_grad_2d_device */ writeV_2d(dV + (0 * dstrdV), cstrdV, rV, tx); /* second call (i_DIM = 1, i_DIM_U = 0, i_DIM_V = 0) -- output from rV[0][][] into dV (idim = 1) */ magma_grad_2d_device(sTinterp, sTgrad, rU, rV, beta, tx, rTmp, sTmp); /* there is a sync at the end of magma_grad_2d_device */ writeV_2d(dV + (1 * dstrdV), cstrdV, rV, tx); } ////////////////////////////////////////////////////////////////////////////////////////// extern "C" __launch_bounds__(MAGMA_BASIS_BOUNDS(BASIS_MAX_P_Q, MAGMA_MAXTHREADS_2D)) __global__ void magma_gradt_2d_kernel(const CeedScalar *dinterp1d, const CeedScalar *dgrad1d, const CeedScalar *dU, const int estrdU, const int cstrdU, const int dstrdU, CeedScalar *dV, const int estrdV, const int cstrdV, const int dstrdV, const int nelem) { MAGMA_DEVICE_SHARED(CeedScalar, shared_data) const int tx = threadIdx.x; const int ty = threadIdx.y; const int elem_id = (blockIdx.x * blockDim.y) + ty; magma_trans_t transT = MagmaTrans; if (elem_id >= nelem) return; CeedScalar rU[1][BASIS_NUM_COMP][BASIS_Q] = {0.0}; // here DIM_U = 1, but might be different for a fused operator CeedScalar rV[1][BASIS_NUM_COMP][BASIS_P] = {0.0}; // here DIM_V = 1, but might be different for a fused operator CeedScalar rTmp = 0.0; // shift global memory pointers by elem stride dU += elem_id * estrdU; dV += elem_id * estrdV; // assign shared memory pointers CeedScalar *sTinterp = (CeedScalar *)shared_data; CeedScalar *sTgrad = sTinterp + BASIS_Q * BASIS_P; CeedScalar *sTmp = sTgrad + BASIS_Q * BASIS_P; sTmp += ty * (BASIS_Q * BASIS_MAX_P_Q); // read T if (ty == 0) { dread_T_gm2sm(tx, transT, dinterp1d, sTinterp); dread_T_gm2sm(tx, transT, dgrad1d, sTgrad); } __syncthreads(); /* read V (since this is transposed mode -- idim = 0 for dV, i_DIM = 0 for rV) */ const CeedScalar beta = 1.0; readV_2d(dV + (0 * dstrdV), cstrdV, rV, tx); /* read U (idim = 0 for dU, i_DIM = 0 for rU) -- there is a sync at the end of this function */ readU_2d(dU + (0 * dstrdU), cstrdU, rU, sTmp, tx); /* first call (i_DIM = 0, i_DIM_U = 0, i_DIM_V = 0) */ magma_grad_2d_device(sTinterp, sTgrad, rU, rV, beta, tx, rTmp, sTmp); /* there is a sync at the end of magma_grad_2d_device */ /* read U (idim = 1 for dU, i_DIM = 0 for rU) -- there is a sync at the end of this function */ readU_2d(dU + (1 * dstrdU), cstrdU, rU, sTmp, tx); /* second call (i_DIM = 1, i_DIM_U = 0, i_DIM_V = 0) */ magma_grad_2d_device(sTinterp, sTgrad, rU, rV, beta, tx, rTmp, sTmp); /* there is a sync at the end of magma_grad_2d_device */ // write V writeV_2d(dV + (0 * dstrdV), cstrdV, rV, tx); } #endif // CEED_MAGMA_BASIS_GRAD_2D_H