/*************************************************************************************************** * Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Additional permutation information for the example. */ #include "cutlass/layout/permute.h" #include "cutlass/gemm/gemm.h" namespace example { using namespace cute; // This struct is specialized below for different CUTLASS 2.x permutation ops // to describe the operation in terms of target CuTe shape and stride order. template struct PermuteTraits {}; // Use X as a placeholder for shape division result using X = Underscore; // Reshape a rank-2 shape into a multidimensional shape. // Input: // shape = (A, B, ...) // target_shape = ((A1, ..., X, ..., Am), (B1, ..., X, ..., Bn), ...) // Output: // ((A1, ..., A/prod(A1..Am), ..., Am), (B1, ..., B/prod(B1..Bn), ..., Bn), ...) template constexpr auto reshape(Shape const& shape, TargetShape const& target_shape) { if constexpr (is_tuple::value) { return cute::transform(shape, target_shape, [](auto && s, auto && t){ return reshape(s, t); }); } else { auto idx = find_if(target_shape, [](auto x){ return is_underscore{}; }); constexpr int I = decltype(idx)::value; static_assert(I < tuple_size_v, "Each mode of TargetShape must contain a placeholder X"); auto divisors = remove(target_shape); assert(shape % product(divisors) == 0); return replace(target_shape, shape / product(divisors)); } } // Given a tensor layout, compute a permutation layout consisting of: // - sub-modes corresponding to the implied multidimensional shape of the source tensor // - strides accounting for the permutation operation being performed template constexpr auto make_permute_layout(Layout const& layout) { static_assert(cute::rank(Shape{}) == 3, "Only rank-3 layouts are supported"); if constexpr (Transpose) { // Deal with tensor B by transposing appropriately before and after computing the permute layout. // Its CuTe-canonical mode order is [N,K,L], while permute operations expect [row,col,batch]. return select<1,0,2>(make_permute_layout(select<1,0,2>(layout))); } else { if constexpr (cutlass::layout::is_trivial_permute) { // Special case for NoPermute. Use a depth-2 layout for consistency with other permutations. using ShapeProfile = tuple, tuple, tuple>; return unflatten(layout, ShapeProfile{}); } else { // Here's where the permutation layout is actually built using ShapeProfile = typename PermuteTraits::ShapeProfile; using StrideOrder = typename PermuteTraits::StrideOrder; return make_ordered_layout(reshape(layout.shape(), ShapeProfile{}), StrideOrder{}); } } } namespace detail { template struct is_constant_pred { template constexpr auto operator()(T) { return is_constant{}; } }; template constexpr auto inverse_impl(Permutation const & perm, seq) { return cute::make_tuple(Int{})>{}...); } } // namespace detail // Compute an inverse of a permutation represented as a tuple of cute::Int<> template constexpr auto inverse(Permutation const & perm) { auto flat_perm = flatten(perm); return unflatten(detail::inverse_impl(flat_perm, tuple_seq{}), perm); } template using inverse_t = decltype(inverse(T{})); // Given a rank-2 layout of tensor that is assumed to have been permuted, // compute the original rank-2 layout of the tensor prior to the permutation. // This is needed to form the correct input to the standalone permutation kernel. template constexpr auto make_original_layout(Layout const& layout) { static_assert(cute::rank(Shape{}) == 3, "Only rank-3 layouts are supported"); if constexpr (Transpose) { // Deal with tensor B by transposing appropriately before and after computing the permute layout. // Its CuTe-canonical mode order is [N,K,L], while permute operations expect [row,col,batch]. return select<1,0,2>(make_original_layout(select<1,0,2>(layout))); } else { using ShapeProfile = typename PermuteTraits::ShapeProfile; using IndexOrder = typename PermuteTraits::IndexOrder; using OrigOrder = conditional_t(), seq<0,1,2>, seq<1,0,2>>; auto orig_shape = select(flatten(reshape(layout.shape(), ShapeProfile{})), IndexOrder{}); // print("Permuted shape: "); print(reshape(layout.shape(), ShapeProfile{})); print("\n"); // print("Original shape: "); print(orig_shape); print("\n"); return make_ordered_layout(product_each(orig_shape), OrigOrder{}); } } /////////////// Tensor4DPermute0213 //////////////////// template struct PermuteTraits> { static constexpr bool kBatched = false; using ShapeProfile = Shape>, Shape,X>, Shape>; using IndexOrder = Step, Step<_1,_3>, Step<_4>>; using StrideOrder = inverse_t; // Step, Step<_1,_3>, Step<_4>>; }; template struct PermuteTraits> { static constexpr bool kBatched = false; using ShapeProfile = Shape>, Shape,X>, Shape>; using IndexOrder = Step, Step<_1,_3>, Step<_4>>; using StrideOrder = inverse_t; // Step, Step<_1,_3>, Step<_4>>; }; template struct PermuteTraits> { static constexpr bool kBatched = false; using ShapeProfile = Shape,X>, Shape>, Shape>; using IndexOrder = Step, Step<_0,_2>, Step<_4>>; using StrideOrder = Step, Step<_0,_2>, Step<_4>>; }; template struct PermuteTraits> { static constexpr bool kBatched = false; using ShapeProfile = Shape,X>, Shape>, Shape>; using IndexOrder = Step, Step<_0,_2>, Step<_4>>; using StrideOrder = Step, Step<_0,_2>, Step<_4>>; }; /////////////// Tensor4DPermuteBMM0321 //////////////////// template struct PermuteTraits> { static constexpr bool kBatched = true; using ShapeProfile = Shape, Shape, Shape,X>>; using IndexOrder = Step, Step<_1>, Step<_3>>; using StrideOrder = Step, Step<_2>, Step<_1,_3>>; }; template struct PermuteTraits> { static constexpr bool kBatched = true; using ShapeProfile = Shape>, Shape, Shape>; using IndexOrder = Step, Step<_2>, Step<_1,_3>>; using StrideOrder = Step, Step<_1>, Step<_3>>; }; /////////////// Tensor4DPermuteBMM0213 //////////////////// template struct PermuteTraits> { static constexpr bool kBatched = true; using ShapeProfile = Shape, Shape, Shape,X>>; using IndexOrder = Step, Step<_1,_2>, Step<_3>>; using StrideOrder = Step, Step<_0>, Step<_1,_3>>; }; template struct PermuteTraits> { static constexpr bool kBatched = true; using ShapeProfile = Shape, Shape>, Shape>; using IndexOrder = Step, Step<_1>, Step<_2,_3>>; using StrideOrder = Step, Step<_0,_2>, Step<_3>>; }; /////////////// Tensor5DPermute02413 //////////////////// template struct PermuteTraits> { static constexpr bool kBatched = false; using ShapeProfile = Shape>, Shape,Int,X>, Shape>; using IndexOrder = Step, Step<_4,_1,_3>, Step<_5>>; using StrideOrder = inverse_t; // Step, Step<_1,_4,_2>, Step<_5>>; }; template struct PermuteTraits> { static constexpr bool kBatched = false; using ShapeProfile = Shape>, Shape,Int>, Shape>; using IndexOrder = Step, Step<_1,_4,_2>, Step<_5>>; using StrideOrder = inverse_t; // Step, Step<_4,_1,_3>, Step<_5>>; }; /////////////// Tensor5DPermute20314 //////////////////// template struct PermuteTraits> { static constexpr bool kBatched = false; using ShapeProfile = Shape,X>, Shape,Int>, Shape>; using IndexOrder = Step, Step<_3,_1,_4>, Step<_5>>; using StrideOrder = Step, Step<_0,_2,_4>, Step<_5>>; }; template struct PermuteTraits> { static constexpr bool kBatched = false; using ShapeProfile = Shape>, Shape,Int>, Shape>; using IndexOrder = Step, Step<_2,_4,_1>, Step<_5>>; using StrideOrder = Step, Step<_0,_3,_1>, Step<_5>>; }; } // namespace example