/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#ifndef __OPENCV_IMGPROC_TYPES_C_H__
#define __OPENCV_IMGPROC_TYPES_C_H__
#include "opencv2/core/core_c.h"
#ifdef __cplusplus
extern "C" {
#endif
/** @addtogroup imgproc_c
@{
*/
/** Connected component structure */
typedef struct CvConnectedComp
{
double area; /** DBL_EPSILON ? 1./std::sqrt(am00) : 0;
}
operator cv::Moments() const
{
return cv::Moments(m00, m10, m01, m20, m11, m02, m30, m21, m12, m03);
}
#endif
}
CvMoments;
/** Hu invariants */
typedef struct CvHuMoments
{
double hu1, hu2, hu3, hu4, hu5, hu6, hu7; /**< Hu invariants */
}
CvHuMoments;
/** Template matching methods */
enum
{
CV_TM_SQDIFF =0,
CV_TM_SQDIFF_NORMED =1,
CV_TM_CCORR =2,
CV_TM_CCORR_NORMED =3,
CV_TM_CCOEFF =4,
CV_TM_CCOEFF_NORMED =5
};
typedef float (CV_CDECL * CvDistanceFunction)( const float* a, const float* b, void* user_param );
/** Contour retrieval modes */
enum
{
CV_RETR_EXTERNAL=0,
CV_RETR_LIST=1,
CV_RETR_CCOMP=2,
CV_RETR_TREE=3,
CV_RETR_FLOODFILL=4
};
/** Contour approximation methods */
enum
{
CV_CHAIN_CODE=0,
CV_CHAIN_APPROX_NONE=1,
CV_CHAIN_APPROX_SIMPLE=2,
CV_CHAIN_APPROX_TC89_L1=3,
CV_CHAIN_APPROX_TC89_KCOS=4,
CV_LINK_RUNS=5
};
/*
Internal structure that is used for sequential retrieving contours from the image.
It supports both hierarchical and plane variants of Suzuki algorithm.
*/
typedef struct _CvContourScanner* CvContourScanner;
/** Freeman chain reader state */
typedef struct CvChainPtReader
{
CV_SEQ_READER_FIELDS()
char code;
CvPoint pt;
schar deltas[8][2];
}
CvChainPtReader;
/** initializes 8-element array for fast access to 3x3 neighborhood of a pixel */
#define CV_INIT_3X3_DELTAS( deltas, step, nch ) \
((deltas)[0] = (nch), (deltas)[1] = -(step) + (nch), \
(deltas)[2] = -(step), (deltas)[3] = -(step) - (nch), \
(deltas)[4] = -(nch), (deltas)[5] = (step) - (nch), \
(deltas)[6] = (step), (deltas)[7] = (step) + (nch))
/** Contour approximation algorithms */
enum
{
CV_POLY_APPROX_DP = 0
};
/** @brief Shape matching methods
\f$A\f$ denotes object1,\f$B\f$ denotes object2
\f$\begin{array}{l} m^A_i = \mathrm{sign} (h^A_i) \cdot \log{h^A_i} \\ m^B_i = \mathrm{sign} (h^B_i) \cdot \log{h^B_i} \end{array}\f$
and \f$h^A_i, h^B_i\f$ are the Hu moments of \f$A\f$ and \f$B\f$ , respectively.
*/
enum ShapeMatchModes
{
CV_CONTOURS_MATCH_I1 =1, //!< \f[I_1(A,B) = \sum _{i=1...7} \left | \frac{1}{m^A_i} - \frac{1}{m^B_i} \right |\f]
CV_CONTOURS_MATCH_I2 =2, //!< \f[I_2(A,B) = \sum _{i=1...7} \left | m^A_i - m^B_i \right |\f]
CV_CONTOURS_MATCH_I3 =3 //!< \f[I_3(A,B) = \max _{i=1...7} \frac{ \left| m^A_i - m^B_i \right| }{ \left| m^A_i \right| }\f]
};
/** Shape orientation */
enum
{
CV_CLOCKWISE =1,
CV_COUNTER_CLOCKWISE =2
};
/** Convexity defect */
typedef struct CvConvexityDefect
{
CvPoint* start; /**< point of the contour where the defect begins */
CvPoint* end; /**< point of the contour where the defect ends */
CvPoint* depth_point; /**< the farthest from the convex hull point within the defect */
float depth; /**< distance between the farthest point and the convex hull */
} CvConvexityDefect;
/** Histogram comparison methods */
enum
{
CV_COMP_CORREL =0,
CV_COMP_CHISQR =1,
CV_COMP_INTERSECT =2,
CV_COMP_BHATTACHARYYA =3,
CV_COMP_HELLINGER =CV_COMP_BHATTACHARYYA,
CV_COMP_CHISQR_ALT =4,
CV_COMP_KL_DIV =5
};
/** Mask size for distance transform */
enum
{
CV_DIST_MASK_3 =3,
CV_DIST_MASK_5 =5,
CV_DIST_MASK_PRECISE =0
};
/** Content of output label array: connected components or pixels */
enum
{
CV_DIST_LABEL_CCOMP = 0,
CV_DIST_LABEL_PIXEL = 1
};
/** Distance types for Distance Transform and M-estimators */
enum
{
CV_DIST_USER =-1, /**< User defined distance */
CV_DIST_L1 =1, /**< distance = |x1-x2| + |y1-y2| */
CV_DIST_L2 =2, /**< the simple euclidean distance */
CV_DIST_C =3, /**< distance = max(|x1-x2|,|y1-y2|) */
CV_DIST_L12 =4, /**< L1-L2 metric: distance = 2(sqrt(1+x*x/2) - 1)) */
CV_DIST_FAIR =5, /**< distance = c^2(|x|/c-log(1+|x|/c)), c = 1.3998 */
CV_DIST_WELSCH =6, /**< distance = c^2/2(1-exp(-(x/c)^2)), c = 2.9846 */
CV_DIST_HUBER =7 /**< distance = |x| threshold ? max_value : 0 */
CV_THRESH_BINARY_INV =1, /**< value = value > threshold ? 0 : max_value */
CV_THRESH_TRUNC =2, /**< value = value > threshold ? threshold : value */
CV_THRESH_TOZERO =3, /**< value = value > threshold ? value : 0 */
CV_THRESH_TOZERO_INV =4, /**< value = value > threshold ? 0 : value */
CV_THRESH_MASK =7,
CV_THRESH_OTSU =8, /**< use Otsu algorithm to choose the optimal threshold value;
combine the flag with one of the above CV_THRESH_* values */
CV_THRESH_TRIANGLE =16 /**< use Triangle algorithm to choose the optimal threshold value;
combine the flag with one of the above CV_THRESH_* values, but not
with CV_THRESH_OTSU */
};
/** Adaptive threshold methods */
enum
{
CV_ADAPTIVE_THRESH_MEAN_C =0,
CV_ADAPTIVE_THRESH_GAUSSIAN_C =1
};
/** FloodFill flags */
enum
{
CV_FLOODFILL_FIXED_RANGE =(1 << 16),
CV_FLOODFILL_MASK_ONLY =(1 << 17)
};
/** Canny edge detector flags */
enum
{
CV_CANNY_L2_GRADIENT =(1 << 31)
};
/** Variants of a Hough transform */
enum
{
CV_HOUGH_STANDARD =0,
CV_HOUGH_PROBABILISTIC =1,
CV_HOUGH_MULTI_SCALE =2,
CV_HOUGH_GRADIENT =3
};
/* Fast search data structures */
struct CvFeatureTree;
struct CvLSH;
struct CvLSHOperations;
/** @} */
#ifdef __cplusplus
}
#endif
#endif