python: 'cv2.' -> 'cv.' via 'import cv2 as cv'

This commit is contained in:
Alexander Alekhin
2017-12-11 12:55:03 +03:00
parent 9665dde678
commit 5560db73bf
162 changed files with 2083 additions and 2084 deletions

View File

@@ -3,18 +3,18 @@
'''
Algorithm serializaion test
'''
import cv2
import cv2 as cv
from tests_common import NewOpenCVTests
class algorithm_rw_test(NewOpenCVTests):
def test_algorithm_rw(self):
# some arbitrary non-default parameters
gold = cv2.AKAZE_create(descriptor_size=1, descriptor_channels=2, nOctaves=3, threshold=4.0)
gold.write(cv2.FileStorage("params.yml", 1), "AKAZE")
gold = cv.AKAZE_create(descriptor_size=1, descriptor_channels=2, nOctaves=3, threshold=4.0)
gold.write(cv.FileStorage("params.yml", 1), "AKAZE")
fs = cv2.FileStorage("params.yml", 0)
algorithm = cv2.AKAZE_create()
fs = cv.FileStorage("params.yml", 0)
algorithm = cv.AKAZE_create()
algorithm.read(fs.getNode("AKAZE"))
self.assertEqual(algorithm.getDescriptorSize(), 1)

View File

@@ -9,7 +9,7 @@ reads distorted images, calculates the calibration and write undistorted images
from __future__ import print_function
import numpy as np
import cv2
import cv2 as cv
from tests_common import NewOpenCVTests
@@ -38,10 +38,10 @@ class calibration_test(NewOpenCVTests):
continue
h, w = img.shape[:2]
found, corners = cv2.findChessboardCorners(img, pattern_size)
found, corners = cv.findChessboardCorners(img, pattern_size)
if found:
term = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_COUNT, 30, 0.1)
cv2.cornerSubPix(img, corners, (5, 5), (-1, -1), term)
term = (cv.TERM_CRITERIA_EPS + cv.TERM_CRITERIA_COUNT, 30, 0.1)
cv.cornerSubPix(img, corners, (5, 5), (-1, -1), term)
if not found:
continue
@@ -50,7 +50,7 @@ class calibration_test(NewOpenCVTests):
obj_points.append(pattern_points)
# calculate camera distortion
rms, camera_matrix, dist_coefs, _rvecs, _tvecs = cv2.calibrateCamera(obj_points, img_points, (w, h), None, None, flags = 0)
rms, camera_matrix, dist_coefs, _rvecs, _tvecs = cv.calibrateCamera(obj_points, img_points, (w, h), None, None, flags = 0)
eps = 0.01
normCamEps = 10.0
@@ -64,8 +64,8 @@ class calibration_test(NewOpenCVTests):
1.21234330e-03, -1.40825372e-04, 1.54865844e-01]
self.assertLess(abs(rms - 0.196334638034), eps)
self.assertLess(cv2.norm(camera_matrix - cameraMatrixTest, cv2.NORM_L1), normCamEps)
self.assertLess(cv2.norm(dist_coefs - distCoeffsTest, cv2.NORM_L1), normDistEps)
self.assertLess(cv.norm(camera_matrix - cameraMatrixTest, cv.NORM_L1), normCamEps)
self.assertLess(cv.norm(dist_coefs - distCoeffsTest, cv.NORM_L1), normDistEps)

View File

@@ -21,7 +21,7 @@ if PY3:
xrange = range
import numpy as np
import cv2
import cv2 as cv
from tst_scene_render import TestSceneRender
from tests_common import NewOpenCVTests, intersectionRate
@@ -53,8 +53,8 @@ class camshift_test(NewOpenCVTests):
while True:
framesCounter += 1
self.frame = self.render.getNextFrame()
hsv = cv2.cvtColor(self.frame, cv2.COLOR_BGR2HSV)
mask = cv2.inRange(hsv, np.array((0., 60., 32.)), np.array((180., 255., 255.)))
hsv = cv.cvtColor(self.frame, cv.COLOR_BGR2HSV)
mask = cv.inRange(hsv, np.array((0., 60., 32.)), np.array((180., 255., 255.)))
if self.selection:
x0, y0, x1, y1 = self.render.getCurrentRect() + 50
@@ -63,17 +63,17 @@ class camshift_test(NewOpenCVTests):
hsv_roi = hsv[y0:y1, x0:x1]
mask_roi = mask[y0:y1, x0:x1]
hist = cv2.calcHist( [hsv_roi], [0], mask_roi, [16], [0, 180] )
cv2.normalize(hist, hist, 0, 255, cv2.NORM_MINMAX)
hist = cv.calcHist( [hsv_roi], [0], mask_roi, [16], [0, 180] )
cv.normalize(hist, hist, 0, 255, cv.NORM_MINMAX)
self.hist = hist.reshape(-1)
self.selection = False
if self.track_window and self.track_window[2] > 0 and self.track_window[3] > 0:
self.selection = None
prob = cv2.calcBackProject([hsv], [0], self.hist, [0, 180], 1)
prob = cv.calcBackProject([hsv], [0], self.hist, [0, 180], 1)
prob &= mask
term_crit = ( cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 1 )
_track_box, self.track_window = cv2.CamShift(prob, self.track_window, term_crit)
term_crit = ( cv.TERM_CRITERIA_EPS | cv.TERM_CRITERIA_COUNT, 10, 1 )
_track_box, self.track_window = cv.CamShift(prob, self.track_window, term_crit)
trackingRect = np.array(self.track_window)
trackingRect[2] += trackingRect[0]

View File

@@ -7,7 +7,7 @@ Test for disctrete fourier transform (dft)
# Python 2/3 compatibility
from __future__ import print_function
import cv2
import cv2 as cv
import numpy as np
import sys
@@ -24,26 +24,26 @@ class dft_test(NewOpenCVTests):
refDftShift = np.fft.fftshift(refDft)
refMagnitide = np.log(1.0 + np.abs(refDftShift))
testDft = cv2.dft(np.float32(img),flags = cv2.DFT_COMPLEX_OUTPUT)
testDft = cv.dft(np.float32(img),flags = cv.DFT_COMPLEX_OUTPUT)
testDftShift = np.fft.fftshift(testDft)
testMagnitude = np.log(1.0 + cv2.magnitude(testDftShift[:,:,0], testDftShift[:,:,1]))
testMagnitude = np.log(1.0 + cv.magnitude(testDftShift[:,:,0], testDftShift[:,:,1]))
refMagnitide = cv2.normalize(refMagnitide, 0.0, 1.0, cv2.NORM_MINMAX)
testMagnitude = cv2.normalize(testMagnitude, 0.0, 1.0, cv2.NORM_MINMAX)
refMagnitide = cv.normalize(refMagnitide, 0.0, 1.0, cv.NORM_MINMAX)
testMagnitude = cv.normalize(testMagnitude, 0.0, 1.0, cv.NORM_MINMAX)
self.assertLess(cv2.norm(refMagnitide - testMagnitude), eps)
self.assertLess(cv.norm(refMagnitide - testMagnitude), eps)
#test inverse transform
img_back = np.fft.ifft2(refDft)
img_back = np.abs(img_back)
img_backTest = cv2.idft(testDft)
img_backTest = cv2.magnitude(img_backTest[:,:,0], img_backTest[:,:,1])
img_backTest = cv.idft(testDft)
img_backTest = cv.magnitude(img_backTest[:,:,0], img_backTest[:,:,1])
img_backTest = cv2.normalize(img_backTest, 0.0, 1.0, cv2.NORM_MINMAX)
img_back = cv2.normalize(img_back, 0.0, 1.0, cv2.NORM_MINMAX)
img_backTest = cv.normalize(img_backTest, 0.0, 1.0, cv.NORM_MINMAX)
img_back = cv.normalize(img_back, 0.0, 1.0, cv.NORM_MINMAX)
self.assertLess(cv2.norm(img_back - img_backTest), eps)
self.assertLess(cv.norm(img_back - img_backTest), eps)
if __name__ == '__main__':

View File

@@ -28,7 +28,7 @@ from __future__ import print_function
# built-in modules
from multiprocessing.pool import ThreadPool
import cv2
import cv2 as cv
import numpy as np
from numpy.linalg import norm
@@ -48,12 +48,12 @@ def split2d(img, cell_size, flatten=True):
return cells
def deskew(img):
m = cv2.moments(img)
m = cv.moments(img)
if abs(m['mu02']) < 1e-2:
return img.copy()
skew = m['mu11']/m['mu02']
M = np.float32([[1, skew, -0.5*SZ*skew], [0, 1, 0]])
img = cv2.warpAffine(img, M, (SZ, SZ), flags=cv2.WARP_INVERSE_MAP | cv2.INTER_LINEAR)
img = cv.warpAffine(img, M, (SZ, SZ), flags=cv.WARP_INVERSE_MAP | cv.INTER_LINEAR)
return img
class StatModel(object):
@@ -65,10 +65,10 @@ class StatModel(object):
class KNearest(StatModel):
def __init__(self, k = 3):
self.k = k
self.model = cv2.ml.KNearest_create()
self.model = cv.ml.KNearest_create()
def train(self, samples, responses):
self.model.train(samples, cv2.ml.ROW_SAMPLE, responses)
self.model.train(samples, cv.ml.ROW_SAMPLE, responses)
def predict(self, samples):
_retval, results, _neigh_resp, _dists = self.model.findNearest(samples, self.k)
@@ -76,14 +76,14 @@ class KNearest(StatModel):
class SVM(StatModel):
def __init__(self, C = 1, gamma = 0.5):
self.model = cv2.ml.SVM_create()
self.model = cv.ml.SVM_create()
self.model.setGamma(gamma)
self.model.setC(C)
self.model.setKernel(cv2.ml.SVM_RBF)
self.model.setType(cv2.ml.SVM_C_SVC)
self.model.setKernel(cv.ml.SVM_RBF)
self.model.setType(cv.ml.SVM_C_SVC)
def train(self, samples, responses):
self.model.train(samples, cv2.ml.ROW_SAMPLE, responses)
self.model.train(samples, cv.ml.ROW_SAMPLE, responses)
def predict(self, samples):
return self.model.predict(samples)[1].ravel()
@@ -105,9 +105,9 @@ def preprocess_simple(digits):
def preprocess_hog(digits):
samples = []
for img in digits:
gx = cv2.Sobel(img, cv2.CV_32F, 1, 0)
gy = cv2.Sobel(img, cv2.CV_32F, 0, 1)
mag, ang = cv2.cartToPolar(gx, gy)
gx = cv.Sobel(img, cv.CV_32F, 1, 0)
gy = cv.Sobel(img, cv.CV_32F, 0, 1)
mag, ang = cv.cartToPolar(gx, gy)
bin_n = 16
bin = np.int32(bin_n*ang/(2*np.pi))
bin_cells = bin[:10,:10], bin[10:,:10], bin[:10,10:], bin[10:,10:]
@@ -190,8 +190,8 @@ class digits_test(NewOpenCVTests):
[ 0, 0, 0, 0, 0, 0, 0, 0, 47, 0],
[ 0, 1, 0, 1, 0, 0, 0, 0, 1, 45]]
self.assertLess(cv2.norm(confusionMatrixes[0] - confusionKNN, cv2.NORM_L1), normEps)
self.assertLess(cv2.norm(confusionMatrixes[1] - confusionSVM, cv2.NORM_L1), normEps)
self.assertLess(cv.norm(confusionMatrixes[0] - confusionKNN, cv.NORM_L1), normEps)
self.assertLess(cv.norm(confusionMatrixes[1] - confusionSVM, cv.NORM_L1), normEps)
self.assertLess(errors[0] - 0.034, eps)
self.assertLess(errors[1] - 0.018, eps)

View File

@@ -8,11 +8,11 @@ face detection using haar cascades
from __future__ import print_function
import numpy as np
import cv2
import cv2 as cv
def detect(img, cascade):
rects = cascade.detectMultiScale(img, scaleFactor=1.3, minNeighbors=4, minSize=(30, 30),
flags=cv2.CASCADE_SCALE_IMAGE)
flags=cv.CASCADE_SCALE_IMAGE)
if len(rects) == 0:
return []
rects[:,2:] += rects[:,:2]
@@ -26,8 +26,8 @@ class facedetect_test(NewOpenCVTests):
cascade_fn = self.repoPath + '/data/haarcascades/haarcascade_frontalface_alt.xml'
nested_fn = self.repoPath + '/data/haarcascades/haarcascade_eye.xml'
cascade = cv2.CascadeClassifier(cascade_fn)
nested = cv2.CascadeClassifier(nested_fn)
cascade = cv.CascadeClassifier(cascade_fn)
nested = cv.CascadeClassifier(nested_fn)
samples = ['samples/data/lena.jpg', 'cv/cascadeandhog/images/mona-lisa.png']
@@ -49,8 +49,8 @@ class facedetect_test(NewOpenCVTests):
for sample in samples:
img = self.get_sample( sample)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (5, 5), 5.1)
gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
gray = cv.GaussianBlur(gray, (5, 5), 5.1)
rects = detect(gray, cascade)
faces.append(rects)

View File

@@ -13,7 +13,7 @@ PlaneTracker class in plane_tracker.py
from __future__ import print_function
import numpy as np
import cv2
import cv2 as cv
import sys
PY3 = sys.version_info[0] == 3
@@ -28,8 +28,8 @@ def intersectionRate(s1, s2):
x1, y1, x2, y2 = s1
s1 = np.array([[x1, y1], [x2,y1], [x2, y2], [x1, y2]])
area, _intersection = cv2.intersectConvexConvex(s1, np.array(s2))
return 2 * area / (cv2.contourArea(s1) + cv2.contourArea(np.array(s2)))
area, _intersection = cv.intersectConvexConvex(s1, np.array(s2))
return 2 * area / (cv.contourArea(s1) + cv.contourArea(np.array(s2)))
from tests_common import NewOpenCVTests
@@ -92,8 +92,8 @@ TrackedTarget = namedtuple('TrackedTarget', 'target, p0, p1, H, quad')
class PlaneTracker:
def __init__(self):
self.detector = cv2.AKAZE_create(threshold = 0.003)
self.matcher = cv2.FlannBasedMatcher(flann_params, {}) # bug : need to pass empty dict (#1329)
self.detector = cv.AKAZE_create(threshold = 0.003)
self.matcher = cv.FlannBasedMatcher(flann_params, {}) # bug : need to pass empty dict (#1329)
self.targets = []
self.frame_points = []
@@ -137,7 +137,7 @@ class PlaneTracker:
p0 = [target.keypoints[m.trainIdx].pt for m in matches]
p1 = [self.frame_points[m.queryIdx].pt for m in matches]
p0, p1 = np.float32((p0, p1))
H, status = cv2.findHomography(p0, p1, cv2.RANSAC, 3.0)
H, status = cv.findHomography(p0, p1, cv.RANSAC, 3.0)
status = status.ravel() != 0
if status.sum() < MIN_MATCH_COUNT:
continue
@@ -145,7 +145,7 @@ class PlaneTracker:
x0, y0, x1, y1 = target.rect
quad = np.float32([[x0, y0], [x1, y0], [x1, y1], [x0, y1]])
quad = cv2.perspectiveTransform(quad.reshape(1, -1, 2), H).reshape(-1, 2)
quad = cv.perspectiveTransform(quad.reshape(1, -1, 2), H).reshape(-1, 2)
track = TrackedTarget(target=target, p0=p0, p1=p1, H=H, quad=quad)
tracked.append(track)

View File

@@ -4,7 +4,7 @@
Robust line fitting.
==================
Example of using cv2.fitLine function for fitting line
Example of using cv.fitLine function for fitting line
to points in presence of outliers.
Switch through different M-estimator functions and see,
@@ -19,7 +19,7 @@ import sys
PY3 = sys.version_info[0] == 3
import numpy as np
import cv2
import cv2 as cv
from tests_common import NewOpenCVTests
@@ -53,17 +53,17 @@ class fitline_test(NewOpenCVTests):
lines = []
for name in dist_func_names:
func = getattr(cv2, name)
vx, vy, cx, cy = cv2.fitLine(np.float32(points), func, 0, 0.01, 0.01)
func = getattr(cv, name)
vx, vy, cx, cy = cv.fitLine(np.float32(points), func, 0, 0.01, 0.01)
line = [float(vx), float(vy), float(cx), float(cy)]
lines.append(line)
eps = 0.05
refVec = (np.float32(p1) - p0) / cv2.norm(np.float32(p1) - p0)
refVec = (np.float32(p1) - p0) / cv.norm(np.float32(p1) - p0)
for i in range(len(lines)):
self.assertLessEqual(cv2.norm(refVec - lines[i][0:2], cv2.NORM_L2), eps)
self.assertLessEqual(cv.norm(refVec - lines[i][0:2], cv.NORM_L2), eps)
if __name__ == '__main__':

View File

@@ -10,7 +10,7 @@ if PY3:
import numpy as np
from numpy import random
import cv2
import cv2 as cv
def make_gaussians(cluster_n, img_size):
points = []
@@ -38,9 +38,9 @@ class gaussian_mix_test(NewOpenCVTests):
points, ref_distrs = make_gaussians(cluster_n, img_size)
em = cv2.ml.EM_create()
em = cv.ml.EM_create()
em.setClustersNumber(cluster_n)
em.setCovarianceMatrixType(cv2.ml.EM_COV_MAT_GENERIC)
em.setCovarianceMatrixType(cv.ml.EM_COV_MAT_GENERIC)
em.trainEM(points)
means = em.getMeans()
covs = em.getCovs() # Known bug: https://github.com/opencv/opencv/pull/4232
@@ -53,8 +53,8 @@ class gaussian_mix_test(NewOpenCVTests):
for i in range(cluster_n):
for j in range(cluster_n):
if (cv2.norm(means[i] - ref_distrs[j][0], cv2.NORM_L2) / cv2.norm(ref_distrs[j][0], cv2.NORM_L2) < meanEps and
cv2.norm(covs[i] - ref_distrs[j][1], cv2.NORM_L2) / cv2.norm(ref_distrs[j][1], cv2.NORM_L2) < covEps):
if (cv.norm(means[i] - ref_distrs[j][0], cv.NORM_L2) / cv.norm(ref_distrs[j][0], cv.NORM_L2) < meanEps and
cv.norm(covs[i] - ref_distrs[j][1], cv.NORM_L2) / cv.norm(ref_distrs[j][1], cv.NORM_L2) < covEps):
matches_count += 1
self.assertEqual(matches_count, cluster_n)

View File

@@ -3,7 +3,7 @@
# Python 2/3 compatibility
from __future__ import print_function
import cv2
import cv2 as cv
import numpy as np
from tests_common import NewOpenCVTests
@@ -15,16 +15,16 @@ class TestGoodFeaturesToTrack_test(NewOpenCVTests):
threshes = [ x / 100. for x in range(1,10) ]
numPoints = 20000
results = dict([(t, cv2.goodFeaturesToTrack(arr, numPoints, t, 2, useHarrisDetector=True)) for t in threshes])
results = dict([(t, cv.goodFeaturesToTrack(arr, numPoints, t, 2, useHarrisDetector=True)) for t in threshes])
# Check that GoodFeaturesToTrack has not modified input image
self.assertTrue(arr.tostring() == original.tostring())
# Check for repeatability
for i in range(1):
results2 = dict([(t, cv2.goodFeaturesToTrack(arr, numPoints, t, 2, useHarrisDetector=True)) for t in threshes])
results2 = dict([(t, cv.goodFeaturesToTrack(arr, numPoints, t, 2, useHarrisDetector=True)) for t in threshes])
for t in threshes:
self.assertTrue(len(results2[t]) == len(results[t]))
for i in range(len(results[t])):
self.assertTrue(cv2.norm(results[t][i][0] - results2[t][i][0]) == 0)
self.assertTrue(cv.norm(results[t][i][0] - results2[t][i][0]) == 0)
for t0,t1 in zip(threshes, threshes[1:]):
r0 = results[t0]
@@ -33,7 +33,7 @@ class TestGoodFeaturesToTrack_test(NewOpenCVTests):
self.assertTrue(len(r0) > len(r1))
# Increasing thresh should monly truncate result list
for i in range(len(r1)):
self.assertTrue(cv2.norm(r1[i][0] - r0[i][0])==0)
self.assertTrue(cv.norm(r1[i][0] - r0[i][0])==0)
if __name__ == '__main__':

View File

@@ -9,7 +9,7 @@ Interactive Image Segmentation using GrabCut algorithm.
from __future__ import print_function
import numpy as np
import cv2
import cv2 as cv
import sys
from tests_common import NewOpenCVTests
@@ -26,7 +26,7 @@ class grabcut_test(NewOpenCVTests):
def scaleMask(self, mask):
return np.where((mask==cv2.GC_FGD) + (mask==cv2.GC_PR_FGD),255,0).astype('uint8')
return np.where((mask==cv.GC_FGD) + (mask==cv.GC_PR_FGD),255,0).astype('uint8')
def test_grabcut(self):
@@ -42,27 +42,27 @@ class grabcut_test(NewOpenCVTests):
mask = np.zeros(img.shape[:2], dtype = np.uint8)
bgdModel = np.zeros((1,65),np.float64)
fgdModel = np.zeros((1,65),np.float64)
cv2.grabCut(img, mask, rect, bgdModel, fgdModel, 0, cv2.GC_INIT_WITH_RECT)
cv2.grabCut(img, mask, rect, bgdModel, fgdModel, 2, cv2.GC_EVAL)
cv.grabCut(img, mask, rect, bgdModel, fgdModel, 0, cv.GC_INIT_WITH_RECT)
cv.grabCut(img, mask, rect, bgdModel, fgdModel, 2, cv.GC_EVAL)
if mask_prob is None:
mask_prob = mask.copy()
cv2.imwrite(self.extraTestDataPath + '/cv/grabcut/mask_probpy.png', mask_prob)
cv.imwrite(self.extraTestDataPath + '/cv/grabcut/mask_probpy.png', mask_prob)
if exp_mask1 is None:
exp_mask1 = self.scaleMask(mask)
cv2.imwrite(self.extraTestDataPath + '/cv/grabcut/exp_mask1py.png', exp_mask1)
cv.imwrite(self.extraTestDataPath + '/cv/grabcut/exp_mask1py.png', exp_mask1)
self.assertEqual(self.verify(self.scaleMask(mask), exp_mask1), True)
mask = mask_prob
bgdModel = np.zeros((1,65),np.float64)
fgdModel = np.zeros((1,65),np.float64)
cv2.grabCut(img, mask, rect, bgdModel, fgdModel, 0, cv2.GC_INIT_WITH_MASK)
cv2.grabCut(img, mask, rect, bgdModel, fgdModel, 1, cv2.GC_EVAL)
cv.grabCut(img, mask, rect, bgdModel, fgdModel, 0, cv.GC_INIT_WITH_MASK)
cv.grabCut(img, mask, rect, bgdModel, fgdModel, 1, cv.GC_EVAL)
if exp_mask2 is None:
exp_mask2 = self.scaleMask(mask)
cv2.imwrite(self.extraTestDataPath + '/cv/grabcut/exp_mask2py.png', exp_mask2)
cv.imwrite(self.extraTestDataPath + '/cv/grabcut/exp_mask2py.png', exp_mask2)
self.assertEqual(self.verify(self.scaleMask(mask), exp_mask2), True)

View File

@@ -1,13 +1,13 @@
#!/usr/bin/python
'''
This example illustrates how to use cv2.HoughCircles() function.
This example illustrates how to use cv.HoughCircles() function.
'''
# Python 2/3 compatibility
from __future__ import print_function
import cv2
import cv2 as cv
import numpy as np
import sys
from numpy import pi, sin, cos
@@ -27,10 +27,10 @@ def circleApproximation(circle):
def convContoursIntersectiponRate(c1, c2):
s1 = cv2.contourArea(c1)
s2 = cv2.contourArea(c2)
s1 = cv.contourArea(c1)
s2 = cv.contourArea(c2)
s, _ = cv2.intersectConvexConvex(c1, c2)
s, _ = cv.intersectConvexConvex(c1, c2)
return 2*s/(s1+s2)
@@ -41,10 +41,10 @@ class houghcircles_test(NewOpenCVTests):
fn = "samples/data/board.jpg"
src = self.get_sample(fn, 1)
img = cv2.cvtColor(src, cv2.COLOR_BGR2GRAY)
img = cv2.medianBlur(img, 5)
img = cv.cvtColor(src, cv.COLOR_BGR2GRAY)
img = cv.medianBlur(img, 5)
circles = cv2.HoughCircles(img, cv2.HOUGH_GRADIENT, 1, 10, np.array([]), 100, 30, 1, 30)[0]
circles = cv.HoughCircles(img, cv.HOUGH_GRADIENT, 1, 10, np.array([]), 100, 30, 1, 30)[0]
testCircles = [[38, 181, 17.6],
[99.7, 166, 13.12],

View File

@@ -7,7 +7,7 @@ This example illustrates how to use Hough Transform to find lines
# Python 2/3 compatibility
from __future__ import print_function
import cv2
import cv2 as cv
import numpy as np
import sys
import math
@@ -16,9 +16,9 @@ from tests_common import NewOpenCVTests
def linesDiff(line1, line2):
norm1 = cv2.norm(line1 - line2, cv2.NORM_L2)
norm1 = cv.norm(line1 - line2, cv.NORM_L2)
line3 = line1[2:4] + line1[0:2]
norm2 = cv2.norm(line3 - line2, cv2.NORM_L2)
norm2 = cv.norm(line3 - line2, cv.NORM_L2)
return min(norm1, norm2)
@@ -29,9 +29,9 @@ class houghlines_test(NewOpenCVTests):
fn = "/samples/data/pic1.png"
src = self.get_sample(fn)
dst = cv2.Canny(src, 50, 200)
dst = cv.Canny(src, 50, 200)
lines = cv2.HoughLinesP(dst, 1, math.pi/180.0, 40, np.array([]), 50, 10)[:,0,:]
lines = cv.HoughLinesP(dst, 1, math.pi/180.0, 40, np.array([]), 50, 10)[:,0,:]
eps = 5
testLines = [

View File

@@ -8,7 +8,7 @@ K-means clusterization test
from __future__ import print_function
import numpy as np
import cv2
import cv2 as cv
from numpy import random
import sys
PY3 = sys.version_info[0] == 3
@@ -58,8 +58,8 @@ class kmeans_test(NewOpenCVTests):
points, _, clusterSizes = make_gaussians(cluster_n, img_size)
term_crit = (cv2.TERM_CRITERIA_EPS, 30, 0.1)
_ret, labels, centers = cv2.kmeans(points, cluster_n, None, term_crit, 10, 0)
term_crit = (cv.TERM_CRITERIA_EPS, 30, 0.1)
_ret, labels, centers = cv.kmeans(points, cluster_n, None, term_crit, 10, 0)
self.assertEqual(len(centers), cluster_n)

View File

@@ -2,7 +2,7 @@
from __future__ import print_function
import numpy as np
import cv2
import cv2 as cv
from tests_common import NewOpenCVTests
@@ -11,13 +11,13 @@ class Hackathon244Tests(NewOpenCVTests):
def test_int_array(self):
a = np.array([-1, 2, -3, 4, -5])
absa0 = np.abs(a)
self.assertTrue(cv2.norm(a, cv2.NORM_L1) == 15)
absa1 = cv2.absdiff(a, 0)
self.assertEqual(cv2.norm(absa1, absa0, cv2.NORM_INF), 0)
self.assertTrue(cv.norm(a, cv.NORM_L1) == 15)
absa1 = cv.absdiff(a, 0)
self.assertEqual(cv.norm(absa1, absa0, cv.NORM_INF), 0)
def test_imencode(self):
a = np.zeros((480, 640), dtype=np.uint8)
flag, ajpg = cv2.imencode("img_q90.jpg", a, [cv2.IMWRITE_JPEG_QUALITY, 90])
flag, ajpg = cv.imencode("img_q90.jpg", a, [cv.IMWRITE_JPEG_QUALITY, 90])
self.assertEqual(flag, True)
self.assertEqual(ajpg.dtype, np.uint8)
self.assertGreater(ajpg.shape[0], 1)
@@ -25,8 +25,8 @@ class Hackathon244Tests(NewOpenCVTests):
def test_projectPoints(self):
objpt = np.float64([[1,2,3]])
imgpt0, jac0 = cv2.projectPoints(objpt, np.zeros(3), np.zeros(3), np.eye(3), np.float64([]))
imgpt1, jac1 = cv2.projectPoints(objpt, np.zeros(3), np.zeros(3), np.eye(3), None)
imgpt0, jac0 = cv.projectPoints(objpt, np.zeros(3), np.zeros(3), np.eye(3), np.float64([]))
imgpt1, jac1 = cv.projectPoints(objpt, np.zeros(3), np.zeros(3), np.eye(3), None)
self.assertEqual(imgpt0.shape, (objpt.shape[0], 1, 2))
self.assertEqual(imgpt1.shape, imgpt0.shape)
self.assertEqual(jac0.shape, jac1.shape)
@@ -37,17 +37,17 @@ class Hackathon244Tests(NewOpenCVTests):
pattern_points = np.zeros((np.prod(pattern_size), 3), np.float32)
pattern_points[:,:2] = np.indices(pattern_size).T.reshape(-1, 2)
pattern_points *= 10
(retval, out, inliers) = cv2.estimateAffine3D(pattern_points, pattern_points)
(retval, out, inliers) = cv.estimateAffine3D(pattern_points, pattern_points)
self.assertEqual(retval, 1)
if cv2.norm(out[2,:]) < 1e-3:
if cv.norm(out[2,:]) < 1e-3:
out[2,2]=1
self.assertLess(cv2.norm(out, np.float64([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0]])), 1e-3)
self.assertEqual(cv2.countNonZero(inliers), pattern_size[0]*pattern_size[1])
self.assertLess(cv.norm(out, np.float64([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0]])), 1e-3)
self.assertEqual(cv.countNonZero(inliers), pattern_size[0]*pattern_size[1])
def test_fast(self):
fd = cv2.FastFeatureDetector_create(30, True)
fd = cv.FastFeatureDetector_create(30, True)
img = self.get_sample("samples/data/right02.jpg", 0)
img = cv2.medianBlur(img, 3)
img = cv.medianBlur(img, 3)
keypoints = fd.detect(img)
self.assertTrue(600 <= len(keypoints) <= 700)
for kpt in keypoints:
@@ -71,9 +71,9 @@ class Hackathon244Tests(NewOpenCVTests):
np.random.seed(244)
a = np.random.randn(npt,2).astype('float32')*50 + 150
be = cv2.fitEllipse(a)
br = cv2.minAreaRect(a)
mc, mr = cv2.minEnclosingCircle(a)
be = cv.fitEllipse(a)
br = cv.minAreaRect(a)
mc, mr = cv.minEnclosingCircle(a)
be0 = ((150.2511749267578, 150.77322387695312), (158.024658203125, 197.57696533203125), 37.57804489135742)
br0 = ((161.2974090576172, 154.41793823242188), (199.2301483154297, 207.7177734375), -9.164555549621582)

View File

@@ -24,7 +24,7 @@ and the remaining 10000 - to test the classifier.
from __future__ import print_function
import numpy as np
import cv2
import cv2 as cv
def load_base(fn):
a = np.loadtxt(fn, np.float32, delimiter=',', converters={ 0 : lambda ch : ord(ch)-ord('A') })
@@ -56,12 +56,12 @@ class LetterStatModel(object):
class RTrees(LetterStatModel):
def __init__(self):
self.model = cv2.ml.RTrees_create()
self.model = cv.ml.RTrees_create()
def train(self, samples, responses):
#sample_n, var_n = samples.shape
self.model.setMaxDepth(20)
self.model.train(samples, cv2.ml.ROW_SAMPLE, responses.astype(int))
self.model.train(samples, cv.ml.ROW_SAMPLE, responses.astype(int))
def predict(self, samples):
_ret, resp = self.model.predict(samples)
@@ -70,10 +70,10 @@ class RTrees(LetterStatModel):
class KNearest(LetterStatModel):
def __init__(self):
self.model = cv2.ml.KNearest_create()
self.model = cv.ml.KNearest_create()
def train(self, samples, responses):
self.model.train(samples, cv2.ml.ROW_SAMPLE, responses)
self.model.train(samples, cv.ml.ROW_SAMPLE, responses)
def predict(self, samples):
_retval, results, _neigh_resp, _dists = self.model.findNearest(samples, k = 10)
@@ -82,17 +82,17 @@ class KNearest(LetterStatModel):
class Boost(LetterStatModel):
def __init__(self):
self.model = cv2.ml.Boost_create()
self.model = cv.ml.Boost_create()
def train(self, samples, responses):
_sample_n, var_n = samples.shape
new_samples = self.unroll_samples(samples)
new_responses = self.unroll_responses(responses)
var_types = np.array([cv2.ml.VAR_NUMERICAL] * var_n + [cv2.ml.VAR_CATEGORICAL, cv2.ml.VAR_CATEGORICAL], np.uint8)
var_types = np.array([cv.ml.VAR_NUMERICAL] * var_n + [cv.ml.VAR_CATEGORICAL, cv.ml.VAR_CATEGORICAL], np.uint8)
self.model.setWeakCount(15)
self.model.setMaxDepth(10)
self.model.train(cv2.ml.TrainData_create(new_samples, cv2.ml.ROW_SAMPLE, new_responses.astype(int), varType = var_types))
self.model.train(cv.ml.TrainData_create(new_samples, cv.ml.ROW_SAMPLE, new_responses.astype(int), varType = var_types))
def predict(self, samples):
new_samples = self.unroll_samples(samples)
@@ -103,14 +103,14 @@ class Boost(LetterStatModel):
class SVM(LetterStatModel):
def __init__(self):
self.model = cv2.ml.SVM_create()
self.model = cv.ml.SVM_create()
def train(self, samples, responses):
self.model.setType(cv2.ml.SVM_C_SVC)
self.model.setType(cv.ml.SVM_C_SVC)
self.model.setC(1)
self.model.setKernel(cv2.ml.SVM_RBF)
self.model.setKernel(cv.ml.SVM_RBF)
self.model.setGamma(.1)
self.model.train(samples, cv2.ml.ROW_SAMPLE, responses.astype(int))
self.model.train(samples, cv.ml.ROW_SAMPLE, responses.astype(int))
def predict(self, samples):
_ret, resp = self.model.predict(samples)
@@ -119,7 +119,7 @@ class SVM(LetterStatModel):
class MLP(LetterStatModel):
def __init__(self):
self.model = cv2.ml.ANN_MLP_create()
self.model = cv.ml.ANN_MLP_create()
def train(self, samples, responses):
_sample_n, var_n = samples.shape
@@ -127,13 +127,13 @@ class MLP(LetterStatModel):
layer_sizes = np.int32([var_n, 100, 100, self.class_n])
self.model.setLayerSizes(layer_sizes)
self.model.setTrainMethod(cv2.ml.ANN_MLP_BACKPROP)
self.model.setTrainMethod(cv.ml.ANN_MLP_BACKPROP)
self.model.setBackpropMomentumScale(0)
self.model.setBackpropWeightScale(0.001)
self.model.setTermCriteria((cv2.TERM_CRITERIA_COUNT, 20, 0.01))
self.model.setActivationFunction(cv2.ml.ANN_MLP_SIGMOID_SYM, 2, 1)
self.model.setTermCriteria((cv.TERM_CRITERIA_COUNT, 20, 0.01))
self.model.setActivationFunction(cv.ml.ANN_MLP_SIGMOID_SYM, 2, 1)
self.model.train(samples, cv2.ml.ROW_SAMPLE, np.float32(new_responses))
self.model.train(samples, cv.ml.ROW_SAMPLE, np.float32(new_responses))
def predict(self, samples):
_ret, resp = self.model.predict(samples)

View File

@@ -11,7 +11,7 @@ between frames. Finds homography between reference and current views.
from __future__ import print_function
import numpy as np
import cv2
import cv2 as cv
#local modules
from tst_scene_render import TestSceneRender
@@ -19,7 +19,7 @@ from tests_common import NewOpenCVTests, isPointInRect
lk_params = dict( winSize = (19, 19),
maxLevel = 2,
criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03))
criteria = (cv.TERM_CRITERIA_EPS | cv.TERM_CRITERIA_COUNT, 10, 0.03))
feature_params = dict( maxCorners = 1000,
qualityLevel = 0.01,
@@ -27,8 +27,8 @@ feature_params = dict( maxCorners = 1000,
blockSize = 19 )
def checkedTrace(img0, img1, p0, back_threshold = 1.0):
p1, _st, _err = cv2.calcOpticalFlowPyrLK(img0, img1, p0, None, **lk_params)
p0r, _st, _err = cv2.calcOpticalFlowPyrLK(img1, img0, p1, None, **lk_params)
p1, _st, _err = cv.calcOpticalFlowPyrLK(img0, img1, p0, None, **lk_params)
p0r, _st, _err = cv.calcOpticalFlowPyrLK(img1, img0, p1, None, **lk_params)
d = abs(p0-p0r).reshape(-1, 2).max(-1)
status = d < back_threshold
return p1, status
@@ -48,9 +48,9 @@ class lk_homography_test(NewOpenCVTests):
self.get_sample('samples/data/box.png'), noise = 0.1, speed = 1.0)
frame = self.render.getNextFrame()
frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
frame_gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)
self.frame0 = frame.copy()
self.p0 = cv2.goodFeaturesToTrack(frame_gray, **feature_params)
self.p0 = cv.goodFeaturesToTrack(frame_gray, **feature_params)
isForegroundHomographyFound = False
@@ -66,7 +66,7 @@ class lk_homography_test(NewOpenCVTests):
while self.framesCounter < 200:
self.framesCounter += 1
frame = self.render.getNextFrame()
frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
frame_gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)
if self.p0 is not None:
p2, trace_status = checkedTrace(self.gray1, frame_gray, self.p1)
@@ -77,7 +77,7 @@ class lk_homography_test(NewOpenCVTests):
if len(self.p0) < 4:
self.p0 = None
continue
_H, status = cv2.findHomography(self.p0, self.p1, cv2.RANSAC, 5.0)
_H, status = cv.findHomography(self.p0, self.p1, cv.RANSAC, 5.0)
goodPointsInRect = 0
goodPointsOutsideRect = 0
@@ -91,7 +91,7 @@ class lk_homography_test(NewOpenCVTests):
isForegroundHomographyFound = True
self.assertGreater(float(goodPointsInRect) / (self.numFeaturesInRectOnStart + 1), 0.6)
else:
self.p0 = cv2.goodFeaturesToTrack(frame_gray, **feature_params)
self.p0 = cv.goodFeaturesToTrack(frame_gray, **feature_params)
self.assertEqual(isForegroundHomographyFound, True)

View File

@@ -13,7 +13,7 @@ between frames.
from __future__ import print_function
import numpy as np
import cv2
import cv2 as cv
#local modules
from tst_scene_render import TestSceneRender
@@ -21,7 +21,7 @@ from tests_common import NewOpenCVTests, intersectionRate, isPointInRect
lk_params = dict( winSize = (15, 15),
maxLevel = 2,
criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03))
criteria = (cv.TERM_CRITERIA_EPS | cv.TERM_CRITERIA_COUNT, 10, 0.03))
feature_params = dict( maxCorners = 500,
qualityLevel = 0.3,
@@ -32,7 +32,7 @@ def getRectFromPoints(points):
distances = []
for point in points:
distances.append(cv2.norm(point, cv2.NORM_L2))
distances.append(cv.norm(point, cv.NORM_L2))
x0, y0 = points[np.argmin(distances)]
x1, y1 = points[np.argmax(distances)]
@@ -58,13 +58,13 @@ class lk_track_test(NewOpenCVTests):
while True:
frame = self.render.getNextFrame()
frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
frame_gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)
if len(self.tracks) > 0:
img0, img1 = self.prev_gray, frame_gray
p0 = np.float32([tr[-1][0] for tr in self.tracks]).reshape(-1, 1, 2)
p1, _st, _err = cv2.calcOpticalFlowPyrLK(img0, img1, p0, None, **lk_params)
p0r, _st, _err = cv2.calcOpticalFlowPyrLK(img1, img0, p1, None, **lk_params)
p1, _st, _err = cv.calcOpticalFlowPyrLK(img0, img1, p0, None, **lk_params)
p0r, _st, _err = cv.calcOpticalFlowPyrLK(img1, img0, p1, None, **lk_params)
d = abs(p0-p0r).reshape(-1, 2).max(-1)
good = d < 1
new_tracks = []
@@ -98,8 +98,8 @@ class lk_track_test(NewOpenCVTests):
mask = np.zeros_like(frame_gray)
mask[:] = 255
for x, y in [np.int32(tr[-1][0]) for tr in self.tracks]:
cv2.circle(mask, (x, y), 5, 0, -1)
p = cv2.goodFeaturesToTrack(frame_gray, mask = mask, **feature_params)
cv.circle(mask, (x, y), 5, 0, -1)
p = cv.goodFeaturesToTrack(frame_gray, mask = mask, **feature_params)
if p is not None:
for x, y in np.float32(p).reshape(-1, 2):
self.tracks.append([[(x, y), self.frame_idx]])

View File

@@ -2,18 +2,18 @@
from __future__ import print_function
import numpy as np
import cv2
import cv2 as cv
from tests_common import NewOpenCVTests
class Bindings(NewOpenCVTests):
def test_inheritance(self):
bm = cv2.StereoBM_create()
bm = cv.StereoBM_create()
bm.getPreFilterCap() # from StereoBM
bm.getBlockSize() # from SteroMatcher
boost = cv2.ml.Boost_create()
boost = cv.ml.Boost_create()
boost.getBoostType() # from ml::Boost
boost.getMaxDepth() # from ml::DTrees
boost.isClassifier() # from ml::StatModel

View File

@@ -10,7 +10,7 @@ import sys
PY3 = sys.version_info[0] == 3
import numpy as np
import cv2
import cv2 as cv
from tests_common import NewOpenCVTests
@@ -43,8 +43,8 @@ class morphology_test(NewOpenCVTests):
str_name = 'MORPH_' + cur_str_mode.upper()
oper_name = 'MORPH_' + op.upper()
st = cv2.getStructuringElement(getattr(cv2, str_name), (sz, sz))
return cv2.morphologyEx(img, getattr(cv2, oper_name), st, iterations=iters)
st = cv.getStructuringElement(getattr(cv, str_name), (sz, sz))
return cv.morphologyEx(img, getattr(cv, oper_name), st, iterations=iters)
for mode in modes:
res = update(mode)

View File

@@ -7,7 +7,7 @@ MSER detector test
from __future__ import print_function
import numpy as np
import cv2
import cv2 as cv
from tests_common import NewOpenCVTests
@@ -33,7 +33,7 @@ class mser_test(NewOpenCVTests):
]
thresharr = [ 0, 70, 120, 180, 255 ]
kDelta = 5
mserExtractor = cv2.MSER_create()
mserExtractor = cv.MSER_create()
mserExtractor.setDelta(kDelta)
np.random.seed(10)
@@ -53,11 +53,11 @@ class mser_test(NewOpenCVTests):
mserExtractor.setMinArea(kMinArea)
mserExtractor.setMaxArea(kMaxArea)
if invert:
cv2.bitwise_not(src, src)
cv.bitwise_not(src, src)
if binarize:
_, src = cv2.threshold(src, thresh, 255, cv2.THRESH_BINARY)
_, src = cv.threshold(src, thresh, 255, cv.THRESH_BINARY)
if blur:
src = cv2.GaussianBlur(src, (5, 5), 1.5, 1.5)
src = cv.GaussianBlur(src, (5, 5), 1.5, 1.5)
minRegs = 7 if use_big_image else 2
maxRegs = 1000 if use_big_image else 20
if binarize and (thresh == 0 or thresh == 255):

View File

@@ -8,7 +8,7 @@ example to detect upright people in images using HOG features
from __future__ import print_function
import numpy as np
import cv2
import cv2 as cv
def inside(r, q):
@@ -21,8 +21,8 @@ from tests_common import NewOpenCVTests, intersectionRate
class peopledetect_test(NewOpenCVTests):
def test_peopledetect(self):
hog = cv2.HOGDescriptor()
hog.setSVMDetector( cv2.HOGDescriptor_getDefaultPeopleDetector() )
hog = cv.HOGDescriptor()
hog.setSVMDetector( cv.HOGDescriptor_getDefaultPeopleDetector() )
dirPath = 'samples/data/'
samples = ['basketball1.png', 'basketball2.png']

View File

@@ -1,5 +1,5 @@
#!/usr/bin/env python
import cv2
import cv2 as cv
from tests_common import NewOpenCVTests
@@ -7,14 +7,14 @@ class shape_test(NewOpenCVTests):
def test_computeDistance(self):
a = self.get_sample('samples/data/shape_sample/1.png', cv2.IMREAD_GRAYSCALE)
b = self.get_sample('samples/data/shape_sample/2.png', cv2.IMREAD_GRAYSCALE)
a = self.get_sample('samples/data/shape_sample/1.png', cv.IMREAD_GRAYSCALE)
b = self.get_sample('samples/data/shape_sample/2.png', cv.IMREAD_GRAYSCALE)
_, ca, _ = cv2.findContours(a, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_TC89_KCOS)
_, cb, _ = cv2.findContours(b, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_TC89_KCOS)
_, ca, _ = cv.findContours(a, cv.RETR_CCOMP, cv.CHAIN_APPROX_TC89_KCOS)
_, cb, _ = cv.findContours(b, cv.RETR_CCOMP, cv.CHAIN_APPROX_TC89_KCOS)
hd = cv2.createHausdorffDistanceExtractor()
sd = cv2.createShapeContextDistanceExtractor()
hd = cv.createHausdorffDistanceExtractor()
sd = cv.createShapeContextDistanceExtractor()
d1 = hd.computeDistance(ca[0], cb[0])
d2 = sd.computeDistance(ca[0], cb[0])

View File

@@ -14,7 +14,7 @@ if PY3:
xrange = range
import numpy as np
import cv2
import cv2 as cv
def angle_cos(p0, p1, p2):
@@ -22,20 +22,20 @@ def angle_cos(p0, p1, p2):
return abs( np.dot(d1, d2) / np.sqrt( np.dot(d1, d1)*np.dot(d2, d2) ) )
def find_squares(img):
img = cv2.GaussianBlur(img, (5, 5), 0)
img = cv.GaussianBlur(img, (5, 5), 0)
squares = []
for gray in cv2.split(img):
for gray in cv.split(img):
for thrs in xrange(0, 255, 26):
if thrs == 0:
bin = cv2.Canny(gray, 0, 50, apertureSize=5)
bin = cv2.dilate(bin, None)
bin = cv.Canny(gray, 0, 50, apertureSize=5)
bin = cv.dilate(bin, None)
else:
_retval, bin = cv2.threshold(gray, thrs, 255, cv2.THRESH_BINARY)
bin, contours, _hierarchy = cv2.findContours(bin, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
_retval, bin = cv.threshold(gray, thrs, 255, cv.THRESH_BINARY)
bin, contours, _hierarchy = cv.findContours(bin, cv.RETR_LIST, cv.CHAIN_APPROX_SIMPLE)
for cnt in contours:
cnt_len = cv2.arcLength(cnt, True)
cnt = cv2.approxPolyDP(cnt, 0.02*cnt_len, True)
if len(cnt) == 4 and cv2.contourArea(cnt) > 1000 and cv2.isContourConvex(cnt):
cnt_len = cv.arcLength(cnt, True)
cnt = cv.approxPolyDP(cnt, 0.02*cnt_len, True)
if len(cnt) == 4 and cv.contourArea(cnt) > 1000 and cv.isContourConvex(cnt):
cnt = cnt.reshape(-1, 2)
max_cos = np.max([angle_cos( cnt[i], cnt[(i+1) % 4], cnt[(i+2) % 4] ) for i in xrange(4)])
if max_cos < 0.1 and filterSquares(squares, cnt):
@@ -44,8 +44,8 @@ def find_squares(img):
return squares
def intersectionRate(s1, s2):
area, _intersection = cv2.intersectConvexConvex(np.array(s1), np.array(s2))
return 2 * area / (cv2.contourArea(np.array(s1)) + cv2.contourArea(np.array(s2)))
area, _intersection = cv.intersectConvexConvex(np.array(s1), np.array(s2))
return 2 * area / (cv.contourArea(np.array(s1)) + cv.contourArea(np.array(s2)))
def filterSquares(squares, square):

View File

@@ -1,5 +1,5 @@
#!/usr/bin/env python
import cv2
import cv2 as cv
from tests_common import NewOpenCVTests
@@ -10,11 +10,11 @@ class stitching_test(NewOpenCVTests):
img1 = self.get_sample('stitching/a1.png')
img2 = self.get_sample('stitching/a2.png')
stitcher = cv2.createStitcher(False)
stitcher = cv.createStitcher(False)
(_result, pano) = stitcher.stitch((img1, img2))
#cv2.imshow("pano", pano)
#cv2.waitKey()
#cv.imshow("pano", pano)
#cv.waitKey()
self.assertAlmostEqual(pano.shape[0], 685, delta=100, msg="rows: %r" % list(pano.shape))
self.assertAlmostEqual(pano.shape[1], 1025, delta=100, msg="cols: %r" % list(pano.shape))

View File

@@ -3,7 +3,7 @@
'''
Texture flow direction estimation.
Sample shows how cv2.cornerEigenValsAndVecs function can be used
Sample shows how cv.cornerEigenValsAndVecs function can be used
to estimate image texture flow direction.
'''
@@ -11,7 +11,7 @@ to estimate image texture flow direction.
from __future__ import print_function
import numpy as np
import cv2
import cv2 as cv
import sys
from tests_common import NewOpenCVTests
@@ -23,10 +23,10 @@ class texture_flow_test(NewOpenCVTests):
img = self.get_sample('samples/data/chessboard.png')
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
h, w = img.shape[:2]
eigen = cv2.cornerEigenValsAndVecs(gray, 5, 3)
eigen = cv.cornerEigenValsAndVecs(gray, 5, 3)
eigen = eigen.reshape(h, w, 3, 2) # [[e1, e2], v1, v2]
flow = eigen[:,:,2]
@@ -40,8 +40,8 @@ class texture_flow_test(NewOpenCVTests):
textureVectors.append(np.int32(flow[y, x]*d))
for i in range(len(textureVectors)):
self.assertTrue(cv2.norm(textureVectors[i], cv2.NORM_L2) < eps
or abs(cv2.norm(textureVectors[i], cv2.NORM_L2) - d) < eps)
self.assertTrue(cv.norm(textureVectors[i], cv.NORM_L2) < eps
or abs(cv.norm(textureVectors[i], cv.NORM_L2) - d) < eps)
if __name__ == '__main__':
NewOpenCVTests.bootstrap()

View File

@@ -2,7 +2,7 @@
from __future__ import print_function
import numpy as np
import cv2
import cv2 as cv
from tests_common import NewOpenCVTests
@@ -11,39 +11,39 @@ class UMat(NewOpenCVTests):
def test_umat_construct(self):
data = np.random.random([512, 512])
# UMat constructors
data_um = cv2.UMat(data) # from ndarray
data_sub_um = cv2.UMat(data_um, [128, 256], [128, 256]) # from UMat
data_dst_um = cv2.UMat(128, 128, cv2.CV_64F) # from size/type
data_um = cv.UMat(data) # from ndarray
data_sub_um = cv.UMat(data_um, [128, 256], [128, 256]) # from UMat
data_dst_um = cv.UMat(128, 128, cv.CV_64F) # from size/type
# test continuous and submatrix flags
assert data_um.isContinuous() and not data_um.isSubmatrix()
assert not data_sub_um.isContinuous() and data_sub_um.isSubmatrix()
# test operation on submatrix
cv2.multiply(data_sub_um, 2., dst=data_dst_um)
cv.multiply(data_sub_um, 2., dst=data_dst_um)
assert np.allclose(2. * data[128:256, 128:256], data_dst_um.get())
def test_umat_handle(self):
a_um = cv2.UMat(256, 256, cv2.CV_32F)
_ctx_handle = cv2.UMat.context() # obtain context handle
_queue_handle = cv2.UMat.queue() # obtain queue handle
_a_handle = a_um.handle(cv2.ACCESS_READ) # obtain buffer handle
a_um = cv.UMat(256, 256, cv.CV_32F)
_ctx_handle = cv.UMat.context() # obtain context handle
_queue_handle = cv.UMat.queue() # obtain queue handle
_a_handle = a_um.handle(cv.ACCESS_READ) # obtain buffer handle
_offset = a_um.offset # obtain buffer offset
def test_umat_matching(self):
img1 = self.get_sample("samples/data/right01.jpg")
img2 = self.get_sample("samples/data/right02.jpg")
orb = cv2.ORB_create()
orb = cv.ORB_create()
img1, img2 = cv2.UMat(img1), cv2.UMat(img2)
img1, img2 = cv.UMat(img1), cv.UMat(img2)
ps1, descs_umat1 = orb.detectAndCompute(img1, None)
ps2, descs_umat2 = orb.detectAndCompute(img2, None)
self.assertIsInstance(descs_umat1, cv2.UMat)
self.assertIsInstance(descs_umat2, cv2.UMat)
self.assertIsInstance(descs_umat1, cv.UMat)
self.assertIsInstance(descs_umat2, cv.UMat)
self.assertGreater(len(ps1), 0)
self.assertGreater(len(ps2), 0)
bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
bf = cv.BFMatcher(cv.NORM_HAMMING, crossCheck=True)
res_umats = bf.match(descs_umat1, descs_umat2)
res = bf.match(descs_umat1.get(), descs_umat2.get())
@@ -52,8 +52,8 @@ class UMat(NewOpenCVTests):
self.assertEqual(len(res_umats), len(res))
def test_umat_optical_flow(self):
img1 = self.get_sample("samples/data/right01.jpg", cv2.IMREAD_GRAYSCALE)
img2 = self.get_sample("samples/data/right02.jpg", cv2.IMREAD_GRAYSCALE)
img1 = self.get_sample("samples/data/right01.jpg", cv.IMREAD_GRAYSCALE)
img2 = self.get_sample("samples/data/right02.jpg", cv.IMREAD_GRAYSCALE)
# Note, that if you want to see performance boost by OCL implementation - you need enough data
# For example you can increase maxCorners param to 10000 and increase img1 and img2 in such way:
# img = np.hstack([np.vstack([img] * 6)] * 6)
@@ -63,19 +63,19 @@ class UMat(NewOpenCVTests):
minDistance=7,
blockSize=7)
p0 = cv2.goodFeaturesToTrack(img1, mask=None, **feature_params)
p0_umat = cv2.goodFeaturesToTrack(cv2.UMat(img1), mask=None, **feature_params)
p0 = cv.goodFeaturesToTrack(img1, mask=None, **feature_params)
p0_umat = cv.goodFeaturesToTrack(cv.UMat(img1), mask=None, **feature_params)
self.assertEqual(p0_umat.get().shape, p0.shape)
p0 = np.array(sorted(p0, key=lambda p: tuple(p[0])))
p0_umat = cv2.UMat(np.array(sorted(p0_umat.get(), key=lambda p: tuple(p[0]))))
p0_umat = cv.UMat(np.array(sorted(p0_umat.get(), key=lambda p: tuple(p[0]))))
self.assertTrue(np.allclose(p0_umat.get(), p0))
_p1_mask_err = cv2.calcOpticalFlowPyrLK(img1, img2, p0, None)
_p1_mask_err = cv.calcOpticalFlowPyrLK(img1, img2, p0, None)
_p1_mask_err_umat0 = map(cv2.UMat.get, cv2.calcOpticalFlowPyrLK(img1, img2, p0_umat, None))
_p1_mask_err_umat1 = map(cv2.UMat.get, cv2.calcOpticalFlowPyrLK(cv2.UMat(img1), img2, p0_umat, None))
_p1_mask_err_umat2 = map(cv2.UMat.get, cv2.calcOpticalFlowPyrLK(img1, cv2.UMat(img2), p0_umat, None))
_p1_mask_err_umat0 = map(cv.UMat.get, cv.calcOpticalFlowPyrLK(img1, img2, p0_umat, None))
_p1_mask_err_umat1 = map(cv.UMat.get, cv.calcOpticalFlowPyrLK(cv.UMat(img1), img2, p0_umat, None))
_p1_mask_err_umat2 = map(cv.UMat.get, cv.calcOpticalFlowPyrLK(img1, cv.UMat(img2), p0_umat, None))
# # results of OCL optical flow differs from CPU implementation, so result can not be easily compared
# for p1_mask_err_umat in [p1_mask_err_umat0, p1_mask_err_umat1, p1_mask_err_umat2]:

View File

@@ -8,7 +8,7 @@ Watershed segmentation test
from __future__ import print_function
import numpy as np
import cv2
import cv2 as cv
from tests_common import NewOpenCVTests
@@ -23,14 +23,14 @@ class watershed_test(NewOpenCVTests):
self.assertEqual(0, 1, 'Missing test data')
colors = np.int32( list(np.ndindex(3, 3, 3)) ) * 122
cv2.watershed(img, np.int32(markers))
cv.watershed(img, np.int32(markers))
segments = colors[np.maximum(markers, 0)]
if refSegments is None:
refSegments = segments.copy()
cv2.imwrite(self.extraTestDataPath + '/cv/watershed/wshed_segments.png', refSegments)
cv.imwrite(self.extraTestDataPath + '/cv/watershed/wshed_segments.png', refSegments)
self.assertLess(cv2.norm(segments - refSegments, cv2.NORM_L1) / 255.0, 50)
self.assertLess(cv.norm(segments - refSegments, cv.NORM_L1) / 255.0, 50)
if __name__ == '__main__':
NewOpenCVTests.bootstrap()

View File

@@ -10,7 +10,7 @@ import random
import argparse
import numpy as np
import cv2
import cv2 as cv
# Python 3 moved urlopen to urllib.requests
try:
@@ -26,7 +26,7 @@ class NewOpenCVTests(unittest.TestCase):
# github repository url
repoUrl = 'https://raw.github.com/opencv/opencv/master'
def get_sample(self, filename, iscolor = cv2.IMREAD_COLOR):
def get_sample(self, filename, iscolor = cv.IMREAD_COLOR):
if not filename in self.image_cache:
filedata = None
if NewOpenCVTests.repoPath is not None:
@@ -41,11 +41,11 @@ class NewOpenCVTests(unittest.TestCase):
filedata = f.read()
if filedata is None:
return None#filedata = urlopen(NewOpenCVTests.repoUrl + '/' + filename).read()
self.image_cache[filename] = cv2.imdecode(np.fromstring(filedata, dtype=np.uint8), iscolor)
self.image_cache[filename] = cv.imdecode(np.fromstring(filedata, dtype=np.uint8), iscolor)
return self.image_cache[filename]
def setUp(self):
cv2.setRNGSeed(10)
cv.setRNGSeed(10)
self.image_cache = {}
def hashimg(self, im):
@@ -73,7 +73,7 @@ class NewOpenCVTests(unittest.TestCase):
parser.add_argument('--data', help='<not used> use data files from local folder (path to folder), '
'if not set, data files will be downloaded from docs.opencv.org')
args, other = parser.parse_known_args()
print("Testing OpenCV", cv2.__version__)
print("Testing OpenCV", cv.__version__)
print("Local repo path:", args.repo)
NewOpenCVTests.repoPath = args.repo
try:
@@ -93,8 +93,8 @@ def intersectionRate(s1, s2):
x1, y1, x2, y2 = s2
s2 = np.array([[x1, y1], [x2,y1], [x2, y2], [x1, y2]])
area, _intersection = cv2.intersectConvexConvex(s1, s2)
return 2 * area / (cv2.contourArea(s1) + cv2.contourArea(s2))
area, _intersection = cv.intersectConvexConvex(s1, s2)
return 2 * area / (cv.contourArea(s1) + cv.contourArea(s2))
def isPointInRect(p, rect):
if rect[0] <= p[0] and rect[1] <=p[1] and p[0] <= rect[2] and p[1] <= rect[3]:

View File

@@ -7,7 +7,7 @@ from __future__ import print_function
import numpy as np
from numpy import pi, sin, cos
import cv2
import cv2 as cv
defaultSize = 512
@@ -88,14 +88,14 @@ class TestSceneRender():
self.currentRect = self.initialRect + np.int( 30*cos(self.time) + 50*sin(self.time/3))
if self.deformation:
self.currentRect[1:3] += int(self.h/20*cos(self.time))
cv2.fillConvexPoly(img, self.currentRect, (0, 0, 255))
cv.fillConvexPoly(img, self.currentRect, (0, 0, 255))
self.time += self.timeStep
if self.noise:
noise = np.zeros(self.sceneBg.shape, np.int8)
cv2.randn(noise, np.zeros(3), np.ones(3)*255*self.noise)
img = cv2.add(img, noise, dtype=cv2.CV_8UC3)
cv.randn(noise, np.zeros(3), np.ones(3)*255*self.noise)
img = cv.add(img, noise, dtype=cv.CV_8UC3)
return img
def resetTime(self):
@@ -104,16 +104,16 @@ class TestSceneRender():
if __name__ == '__main__':
backGr = cv2.imread('../../../samples/data/lena.jpg')
backGr = cv.imread('../../../samples/data/lena.jpg')
render = TestSceneRender(backGr, noise = 0.5)
while True:
img = render.getNextFrame()
cv2.imshow('img', img)
cv.imshow('img', img)
ch = cv2.waitKey(3)
ch = cv.waitKey(3)
if ch == 27:
break
cv2.destroyAllWindows()
cv.destroyAllWindows()