/*************************************************************************************************** * Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ #pragma once #include #include #include namespace cute { template struct Copy_Traits> { // Logical thread id to thread idx (one-thread) using ThrID = Layout<_1>; // Map from (src-thr,src-val) to bit using SrcLayout = Layout::value>>>; // Map from (dst-thr,dst-val) to bit using DstLayout = Layout::value>>>; // Reference map from (thr,val) to bit using RefLayout = SrcLayout; // Construct a zfill variant with a given predicate value CUTE_HOST_DEVICE constexpr Copy_Traits> with(bool pred) const { return {pred}; } }; template struct Copy_Traits> { // Logical thread id to thread idx (one-thread) using ThrID = Layout<_1>; // Map from (src-thr,src-val) to bit using SrcLayout = Layout::value>>>; // Map from (dst-thr,dst-val) to bit using DstLayout = Layout::value>>>; // Reference map from (thr,val) to bit using RefLayout = SrcLayout; // Construct a zfill variant with a given predicate value CUTE_HOST_DEVICE constexpr Copy_Traits> with(bool pred) const { return {pred}; } }; template struct Copy_Traits> { // Logical thread id to thread idx (one-thread) using ThrID = Layout<_1>; // Map from (src-thr,src-val) to bit using SrcLayout = Layout::value>>>; // Map from (dst-thr,dst-val) to bit using DstLayout = Layout::value>>>; // Reference map from (thr,val) to bit using RefLayout = SrcLayout; // Predicate value that determines whether to load or zfill bool pred = false; // Overload copy_unpack for zfill variant to pass the predicate into the op template CUTE_HOST_DEVICE friend constexpr void copy_unpack(Copy_Traits const& traits, Tensor const& src, Tensor & dst) { static_assert(is_gmem::value, "Expected gmem source for cp.async."); static_assert(is_smem::value, "Expected smem destination for cp.async."); Tensor rS = recast(src); Tensor rD = recast(dst); CUTE_STATIC_ASSERT_V(size(rS) == Int<1>{}, "In CopyAtom, src layout doesn't vectorize into registers. This src layout is incompatible with this tiled copy."); CUTE_STATIC_ASSERT_V(size(rD) == Int<1>{}, "In CopyAtom, dst layout doesn't vectorize into registers. This dst layout is incompatible with this tiled copy."); SM80_CP_ASYNC_CACHEALWAYS_ZFILL::copy(rS[0], rD[0], traits.pred); } }; template struct Copy_Traits> { // Logical thread id to thread idx (one-thread) using ThrID = Layout<_1>; // Map from (src-thr,src-val) to bit using SrcLayout = Layout::value>>>; // Map from (dst-thr,dst-val) to bit using DstLayout = Layout::value>>>; // Reference map from (thr,val) to bit using RefLayout = SrcLayout; // Predicate value that determines whether to load or zfill bool pred = false; // Overload copy_unpack for zfill variant to pass the predicate into the op template CUTE_HOST_DEVICE friend constexpr void copy_unpack(Copy_Traits const& traits, Tensor const& src, Tensor & dst) { static_assert(is_gmem::value, "Expected gmem source for cp.async."); static_assert(is_smem::value, "Expected smem destination for cp.async."); Tensor rS = recast(src); Tensor rD = recast(dst); CUTE_STATIC_ASSERT_V(size(rS) == Int<1>{}, "In CopyAtom, src layout doesn't vectorize into registers. This src layout is incompatible with this tiled copy."); CUTE_STATIC_ASSERT_V(size(rD) == Int<1>{}, "In CopyAtom, dst layout doesn't vectorize into registers. This dst layout is incompatible with this tiled copy."); SM80_CP_ASYNC_CACHEGLOBAL_ZFILL::copy(rS[0], rD[0], traits.pred); } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // Element copy selector template CUTE_HOST_DEVICE constexpr auto select_elementwise_copy(SrcTensor const&, DstTensor const&) { using SrcType = typename SrcTensor::value_type; using DstType = typename DstTensor::value_type; #if defined(CUTE_ARCH_CP_ASYNC_SM80_ENABLED) if constexpr (is_gmem::value && is_smem::value && sizeof(SrcType) == sizeof(DstType) && (sizeof(SrcType) == 4 || sizeof(SrcType) == 8 || sizeof(SrcType) == 16)) { return SM80_CP_ASYNC_CACHEALWAYS{}; } else { return UniversalCopy{}; } CUTE_GCC_UNREACHABLE; #else return UniversalCopy{}; #endif } }