/*M/////////////////////////////////////////////////////////////////////////////////////// // // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. // // By downloading, copying, installing or using the software you agree to this license. // If you do not agree to this license, do not download, install, // copy or use the software. // // // License Agreement // For Open Source Computer Vision Library // // Copyright (C) 2000-2008, Intel Corporation, all rights reserved. // Copyright (C) 2009, Willow Garage Inc., all rights reserved. // Copyright (C) 2013, OpenCV Foundation, all rights reserved. // Third party copyrights are property of their respective owners. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistribution's of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // // * Redistribution's in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // * The name of the copyright holders may not be used to endorse or promote products // derived from this software without specific prior written permission. // // This software is provided by the copyright holders and contributors "as is" and // any express or implied warranties, including, but not limited to, the implied // warranties of merchantability and fitness for a particular purpose are disclaimed. // In no event shall the Intel Corporation or contributors be liable for any direct, // indirect, incidental, special, exemplary, or consequential damages // (including, but not limited to, procurement of substitute goods or services; // loss of use, data, or profits; or business interruption) however caused // and on any theory of liability, whether in contract, strict liability, // or tort (including negligence or otherwise) arising in any way out of // the use of this software, even if advised of the possibility of such damage. // //M*/ #pragma once #ifndef __OPENCV_CUDEV_PTR2D_GPUMAT_DETAIL_HPP__ #define __OPENCV_CUDEV_PTR2D_GPUMAT_DETAIL_HPP__ #include "../gpumat.hpp" namespace cv { namespace cudev { template __host__ GpuMat_::GpuMat_(Allocator* allocator) : GpuMat(allocator) { flags = (flags & ~CV_MAT_TYPE_MASK) | DataType::type; } template __host__ GpuMat_::GpuMat_(int arows, int acols, Allocator* allocator) : GpuMat(arows, acols, DataType::type, allocator) { } template __host__ GpuMat_::GpuMat_(Size asize, Allocator* allocator) : GpuMat(asize.height, asize.width, DataType::type, allocator) { } template __host__ GpuMat_::GpuMat_(int arows, int acols, Scalar val, Allocator* allocator) : GpuMat(arows, acols, DataType::type, val, allocator) { } template __host__ GpuMat_::GpuMat_(Size asize, Scalar val, Allocator* allocator) : GpuMat(asize.height, asize.width, DataType::type, val, allocator) { } template __host__ GpuMat_::GpuMat_(const GpuMat_& m) : GpuMat(m) { } template __host__ GpuMat_::GpuMat_(const GpuMat& m, Allocator* allocator) : GpuMat(allocator) { flags = (flags & ~CV_MAT_TYPE_MASK) | DataType::type; if (DataType::type == m.type()) { GpuMat::operator =(m); return; } if (DataType::depth == m.depth()) { GpuMat::operator =(m.reshape(DataType::channels, m.rows)); return; } CV_Assert( DataType::channels == m.channels() ); m.convertTo(*this, type()); } template __host__ GpuMat_::GpuMat_(int arows, int acols, T* adata, size_t astep) : GpuMat(arows, acols, DataType::type, adata, astep) { } template __host__ GpuMat_::GpuMat_(Size asize, T* adata, size_t astep) : GpuMat(asize.height, asize.width, DataType::type, adata, astep) { } template __host__ GpuMat_::GpuMat_(const GpuMat_& m, Range arowRange, Range acolRange) : GpuMat(m, arowRange, acolRange) { } template __host__ GpuMat_::GpuMat_(const GpuMat_& m, Rect roi) : GpuMat(m, roi) { } template __host__ GpuMat_::GpuMat_(InputArray arr, Allocator* allocator) : GpuMat(allocator) { flags = (flags & ~CV_MAT_TYPE_MASK) | DataType::type; upload(arr); } template __host__ GpuMat_& GpuMat_::operator =(const GpuMat_& m) { GpuMat::operator =(m); return *this; } template __host__ void GpuMat_::create(int arows, int acols) { GpuMat::create(arows, acols, DataType::type); } template __host__ void GpuMat_::create(Size asize) { GpuMat::create(asize, DataType::type); } template __host__ void GpuMat_::swap(GpuMat_& mat) { GpuMat::swap(mat); } template __host__ void GpuMat_::upload(InputArray arr) { CV_Assert( arr.type() == DataType::type ); GpuMat::upload(arr); } template __host__ void GpuMat_::upload(InputArray arr, Stream& stream) { CV_Assert( arr.type() == DataType::type ); GpuMat::upload(arr, stream); } template __host__ GpuMat_::operator GlobPtrSz() const { return globPtr((T*) data, step, rows, cols); } template __host__ GpuMat_::operator GlobPtr() const { return globPtr((T*) data, step); } template __host__ GpuMat_ GpuMat_::clone() const { return GpuMat_(GpuMat::clone()); } template __host__ GpuMat_ GpuMat_::row(int y) const { return GpuMat_(*this, Range(y, y+1), Range::all()); } template __host__ GpuMat_ GpuMat_::col(int x) const { return GpuMat_(*this, Range::all(), Range(x, x+1)); } template __host__ GpuMat_ GpuMat_::rowRange(int startrow, int endrow) const { return GpuMat_(*this, Range(startrow, endrow), Range::all()); } template __host__ GpuMat_ GpuMat_::rowRange(Range r) const { return GpuMat_(*this, r, Range::all()); } template __host__ GpuMat_ GpuMat_::colRange(int startcol, int endcol) const { return GpuMat_(*this, Range::all(), Range(startcol, endcol)); } template __host__ GpuMat_ GpuMat_::colRange(Range r) const { return GpuMat_(*this, Range::all(), r); } template __host__ GpuMat_ GpuMat_::operator ()(Range _rowRange, Range _colRange) const { return GpuMat_(*this, _rowRange, _colRange); } template __host__ GpuMat_ GpuMat_::operator ()(Rect roi) const { return GpuMat_(*this, roi); } template __host__ GpuMat_& GpuMat_::adjustROI(int dtop, int dbottom, int dleft, int dright) { return (GpuMat_&)(GpuMat::adjustROI(dtop, dbottom, dleft, dright)); } template __host__ size_t GpuMat_::elemSize() const { CV_DbgAssert( GpuMat::elemSize() == sizeof(T) ); return sizeof(T); } template __host__ size_t GpuMat_::elemSize1() const { CV_DbgAssert( GpuMat::elemSize1() == sizeof(T) / DataType::channels ); return sizeof(T) / DataType::channels; } template __host__ int GpuMat_::type() const { CV_DbgAssert( GpuMat::type() == DataType::type ); return DataType::type; } template __host__ int GpuMat_::depth() const { CV_DbgAssert( GpuMat::depth() == DataType::depth ); return DataType::depth; } template __host__ int GpuMat_::channels() const { CV_DbgAssert( GpuMat::channels() == DataType::channels ); return DataType::channels; } template __host__ size_t GpuMat_::stepT() const { return step / elemSize(); } template __host__ size_t GpuMat_::step1() const { return step / elemSize1(); } template __host__ T* GpuMat_::operator [](int y) { return (T*)ptr(y); } template __host__ const T* GpuMat_::operator [](int y) const { return (const T*)ptr(y); } template template __host__ GpuMat_::GpuMat_(const Expr& expr) : GpuMat() { flags = (flags & ~CV_MAT_TYPE_MASK) | DataType::type; *this = expr; } template template __host__ GpuMat_& GpuMat_::operator =(const Expr& expr) { expr.body.assignTo(*this); return *this; } template template __host__ GpuMat_& GpuMat_::assign(const Expr& expr, Stream& stream) { expr.body.assignTo(*this, stream); return *this; } }} // Input / Output Arrays namespace cv { template __host__ _InputArray::_InputArray(const cudev::GpuMat_<_Tp>& m) : flags(FIXED_TYPE + CUDA_GPU_MAT + DataType<_Tp>::type), obj((void*)&m) {} template __host__ _OutputArray::_OutputArray(cudev::GpuMat_<_Tp>& m) : _InputArray(m) {} template __host__ _OutputArray::_OutputArray(const cudev::GpuMat_<_Tp>& m) : _InputArray(m) { flags |= FIXED_SIZE; } } #endif