/*M/////////////////////////////////////////////////////////////////////////////////////// // // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. // // By downloading, copying, installing or using the software you agree to this license. // If you do not agree to this license, do not download, install, // copy or use the software. // // // License Agreement // For Open Source Computer Vision Library // // Copyright (C) 2000-2008, Intel Corporation, all rights reserved. // Copyright (C) 2009, Willow Garage Inc., all rights reserved. // Copyright (C) 2013, OpenCV Foundation, all rights reserved. // Third party copyrights are property of their respective owners. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistribution's of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // // * Redistribution's in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // * The name of the copyright holders may not be used to endorse or promote products // derived from this software without specific prior written permission. // // This software is provided by the copyright holders and contributors "as is" and // any express or implied warranties, including, but not limited to, the implied // warranties of merchantability and fitness for a particular purpose are disclaimed. // In no event shall the Intel Corporation or contributors be liable for any direct, // indirect, incidental, special, exemplary, or consequential damages // (including, but not limited to, procurement of substitute goods or services; // loss of use, data, or profits; or business interruption) however caused // and on any theory of liability, whether in contract, strict liability, // or tort (including negligence or otherwise) arising in any way out of // the use of this software, even if advised of the possibility of such damage. // //M*/ #pragma once #ifndef __OPENCV_CUDEV_GRID_REDUCE_DETAIL_HPP__ #define __OPENCV_CUDEV_GRID_REDUCE_DETAIL_HPP__ #include "../../common.hpp" #include "../../util/tuple.hpp" #include "../../util/saturate_cast.hpp" #include "../../util/atomic.hpp" #include "../../util/vec_traits.hpp" #include "../../util/type_traits.hpp" #include "../../util/limits.hpp" #include "../../block/reduce.hpp" #include "../../functional/functional.hpp" #include "../../ptr2d/traits.hpp" namespace cv { namespace cudev { namespace grid_reduce_detail { // Unroll template struct Unroll; template <> struct Unroll<1> { template __device__ __forceinline__ static volatile R* smem(R* ptr) { return ptr; } template __device__ __forceinline__ static R& res(R& val) { return val; } template __device__ __forceinline__ static const Op& op(const Op& aop) { return aop; } }; template <> struct Unroll<2> { template __device__ __forceinline__ static tuple smem(R* ptr) { return smem_tuple(ptr, ptr + BLOCK_SIZE); } template __device__ __forceinline__ static tuple::elem_type&, typename VecTraits::elem_type&> res(R& val) { return tie(val.x, val.y); } template __device__ __forceinline__ static tuple op(const Op& aop) { return make_tuple(aop, aop); } }; template <> struct Unroll<3> { template __device__ __forceinline__ static tuple smem(R* ptr) { return smem_tuple(ptr, ptr + BLOCK_SIZE, ptr + 2 * BLOCK_SIZE); } template __device__ __forceinline__ static tuple::elem_type&, typename VecTraits::elem_type&, typename VecTraits::elem_type&> res(R& val) { return tie(val.x, val.y, val.z); } template __device__ __forceinline__ static tuple op(const Op& aop) { return make_tuple(aop, aop, aop); } }; template <> struct Unroll<4> { template __device__ __forceinline__ static tuple smem(R* ptr) { return smem_tuple(ptr, ptr + BLOCK_SIZE, ptr + 2 * BLOCK_SIZE, ptr + 3 * BLOCK_SIZE); } template __device__ __forceinline__ static tuple::elem_type&, typename VecTraits::elem_type&, typename VecTraits::elem_type&, typename VecTraits::elem_type&> res(R& val) { return tie(val.x, val.y, val.z, val.w); } template __device__ __forceinline__ static tuple op(const Op& aop) { return make_tuple(aop, aop, aop, aop); } }; // AtomicUnroll template struct AtomicUnroll; template struct AtomicUnroll { __device__ __forceinline__ static void add(R* ptr, R val) { atomicAdd(ptr, val); } __device__ __forceinline__ static void min(R* ptr, R val) { atomicMin(ptr, val); } __device__ __forceinline__ static void max(R* ptr, R val) { atomicMax(ptr, val); } }; template struct AtomicUnroll { typedef typename MakeVec::type val_type; __device__ __forceinline__ static void add(R* ptr, val_type val) { atomicAdd(ptr, val.x); atomicAdd(ptr + 1, val.y); } __device__ __forceinline__ static void min(R* ptr, val_type val) { atomicMin(ptr, val.x); atomicMin(ptr + 1, val.y); } __device__ __forceinline__ static void max(R* ptr, val_type val) { atomicMax(ptr, val.x); atomicMax(ptr + 1, val.y); } }; template struct AtomicUnroll { typedef typename MakeVec::type val_type; __device__ __forceinline__ static void add(R* ptr, val_type val) { atomicAdd(ptr, val.x); atomicAdd(ptr + 1, val.y); atomicAdd(ptr + 2, val.z); } __device__ __forceinline__ static void min(R* ptr, val_type val) { atomicMin(ptr, val.x); atomicMin(ptr + 1, val.y); atomicMin(ptr + 2, val.z); } __device__ __forceinline__ static void max(R* ptr, val_type val) { atomicMax(ptr, val.x); atomicMax(ptr + 1, val.y); atomicMax(ptr + 2, val.z); } }; template struct AtomicUnroll { typedef typename MakeVec::type val_type; __device__ __forceinline__ static void add(R* ptr, val_type val) { atomicAdd(ptr, val.x); atomicAdd(ptr + 1, val.y); atomicAdd(ptr + 2, val.z); atomicAdd(ptr + 3, val.w); } __device__ __forceinline__ static void min(R* ptr, val_type val) { atomicMin(ptr, val.x); atomicMin(ptr + 1, val.y); atomicMin(ptr + 2, val.z); atomicMin(ptr + 3, val.w); } __device__ __forceinline__ static void max(R* ptr, val_type val) { atomicMax(ptr, val.x); atomicMax(ptr + 1, val.y); atomicMax(ptr + 2, val.z); atomicMax(ptr + 3, val.w); } }; // SumReductor template struct SumReductor { typedef typename VecTraits::elem_type work_elem_type; enum { cn = VecTraits::cn }; work_type sum; __device__ __forceinline__ SumReductor() { sum = VecTraits::all(0); } __device__ __forceinline__ void reduceVal(typename TypeTraits::parameter_type srcVal) { sum = sum + saturate_cast(srcVal); } template __device__ void reduceGrid(work_elem_type* result, int tid) { __shared__ work_elem_type smem[BLOCK_SIZE * cn]; blockReduce(Unroll::template smem(smem), Unroll::res(sum), tid, Unroll::op(plus())); if (tid == 0) AtomicUnroll::add(result, sum); } }; // MinMaxReductor template struct minop : minimum { __device__ __forceinline__ static T initial() { return numeric_limits::max(); } __device__ __forceinline__ static void atomic(T* result, T myval) { atomicMin(result, myval); } }; template struct maxop : maximum { __device__ __forceinline__ static T initial() { return -numeric_limits::max(); } __device__ __forceinline__ static void atomic(T* result, T myval) { atomicMax(result, myval); } }; struct both { }; template struct MinMaxReductor { work_type myval; __device__ __forceinline__ MinMaxReductor() { myval = Op::initial(); } __device__ __forceinline__ void reduceVal(typename TypeTraits::parameter_type srcVal) { Op op; myval = op(myval, srcVal); } template __device__ void reduceGrid(work_type* result, int tid) { __shared__ work_type smem[BLOCK_SIZE]; Op op; blockReduce(smem, myval, tid, op); if (tid == 0) Op::atomic(result, myval); } }; template struct MinMaxReductor { work_type mymin; work_type mymax; __device__ __forceinline__ MinMaxReductor() { mymin = numeric_limits::max(); mymax = -numeric_limits::max(); } __device__ __forceinline__ void reduceVal(typename TypeTraits::parameter_type srcVal) { minimum minOp; maximum maxOp; mymin = minOp(mymin, srcVal); mymax = maxOp(mymax, srcVal); } template __device__ void reduceGrid(work_type* result, int tid) { __shared__ work_type sminval[BLOCK_SIZE]; __shared__ work_type smaxval[BLOCK_SIZE]; minimum minOp; maximum maxOp; blockReduce(smem_tuple(sminval, smaxval), tie(mymin, mymax), tid, make_tuple(minOp, maxOp)); if (tid == 0) { atomicMin(result, mymin); atomicMax(result + 1, mymax); } } }; // glob_reduce template __global__ void reduce(const SrcPtr src, ResType* result, const MaskPtr mask, const int rows, const int cols) { const int x0 = blockIdx.x * blockDim.x * PATCH_X + threadIdx.x; const int y0 = blockIdx.y * blockDim.y * PATCH_Y + threadIdx.y; Reductor reductor; for (int i = 0, y = y0; i < PATCH_Y && y < rows; ++i, y += blockDim.y) { for (int j = 0, x = x0; j < PATCH_X && x < cols; ++j, x += blockDim.x) { if (mask(y, x)) { reductor.reduceVal(src(y, x)); } } } const int tid = threadIdx.y * blockDim.x + threadIdx.x; reductor.template reduceGrid(result, tid); } template __host__ void reduce(const SrcPtr& src, ResType* result, const MaskPtr& mask, int rows, int cols, cudaStream_t stream) { const dim3 block(Policy::block_size_x, Policy::block_size_y); const dim3 grid(divUp(cols, block.x * Policy::patch_size_x), divUp(rows, block.y * Policy::patch_size_y)); reduce<<>>(src, result, mask, rows, cols); CV_CUDEV_SAFE_CALL( cudaGetLastError() ); if (stream == 0) CV_CUDEV_SAFE_CALL( cudaDeviceSynchronize() ); } // callers template __host__ void sum(const SrcPtr& src, ResType* result, const MaskPtr& mask, int rows, int cols, cudaStream_t stream) { typedef typename PtrTraits::value_type src_type; typedef typename VecTraits::elem_type res_elem_type; reduce, Policy>(src, (res_elem_type*) result, mask, rows, cols, stream); } template __host__ void minVal(const SrcPtr& src, ResType* result, const MaskPtr& mask, int rows, int cols, cudaStream_t stream) { typedef typename PtrTraits::value_type src_type; reduce, src_type, ResType>, Policy>(src, result, mask, rows, cols, stream); } template __host__ void maxVal(const SrcPtr& src, ResType* result, const MaskPtr& mask, int rows, int cols, cudaStream_t stream) { typedef typename PtrTraits::value_type src_type; reduce, src_type, ResType>, Policy>(src, result, mask, rows, cols, stream); } template __host__ void minMaxVal(const SrcPtr& src, ResType* result, const MaskPtr& mask, int rows, int cols, cudaStream_t stream) { typedef typename PtrTraits::value_type src_type; reduce, Policy>(src, result, mask, rows, cols, stream); } } }} #endif