diff --git a/testdata/dnn/darknet/generate_darknet_models.py b/testdata/dnn/darknet/generate_darknet_models.py index 532daea7b..eeaf67dc7 100644 --- a/testdata/dnn/darknet/generate_darknet_models.py +++ b/testdata/dnn/darknet/generate_darknet_models.py @@ -1,5 +1,6 @@ # export PYTHONPATH=/path/to/darknet/python/:$PYTHONPATH # export LD_LIBRARY_PATH=/path/to/darknet/:$LD_LIBRARY_PATH +from builtins import range import darknet as dn import numpy as np diff --git a/testdata/dnn/download_models.py b/testdata/dnn/download_models.py index cd3258641..862e92544 100755 --- a/testdata/dnn/download_models.py +++ b/testdata/dnn/download_models.py @@ -1,18 +1,23 @@ #!/usr/bin/env python from __future__ import print_function +from __future__ import division +from future import standard_library +standard_library.install_aliases() +from builtins import object +from past.utils import old_div import hashlib import sys import tarfile import requests if sys.version_info[0] < 3: - from urllib2 import urlopen + from urllib.request import urlopen else: from urllib.request import urlopen -class Model: +class Model(object): MB = 1024*1024 BUFSIZE = 10*MB @@ -33,7 +38,7 @@ def getMB(r): d = dict(r.info()) for c in ['content-length', 'Content-Length']: if c in d: - return int(d[c]) / self.MB + return old_div(int(d[c]), self.MB) return '' print(' {} {} [{} Mb]'.format(r.getcode(), r.msg, getMB(r))) @@ -72,7 +77,7 @@ def get(self): assert self.downloader print(' hash check failed - downloading') sz = self.downloader(self.filename) - print(' size = %.2f Mb' % (sz / (1024.0 * 1024))) + print(' size = %.2f Mb' % (old_div(sz, (1024.0 * 1024)))) print(' done') print(' file {}'.format(self.filename)) @@ -115,7 +120,7 @@ def download_gdrive(dst): response = session.get(URL, params = { 'id' : gid }, stream = True) def get_confirm_token(response): # in case of large files - for key, value in response.cookies.items(): + for key, value in list(response.cookies.items()): if key.startswith('download_warning'): return value return None diff --git a/testdata/dnn/layers/run.py b/testdata/dnn/layers/run.py index 23d4117c9..88fea48c2 100644 --- a/testdata/dnn/layers/run.py +++ b/testdata/dnn/layers/run.py @@ -1,5 +1,6 @@ # coding: utf-8 +from __future__ import print_function import sys, os, glob CAFFE_ROOT = "/home/vitaliy/opencv/caffe/" @@ -20,7 +21,7 @@ def get_cafe_output(inp_blob, proto_name, caffemodel_name): out_blob = net.blobs['output'].data[...]; if net.params.get('output'): - print "Params count:", len(net.params['output']) + print("Params count:", len(net.params['output'])) net.save(caffemodel_name) return out_blob @@ -37,9 +38,9 @@ def get_cafe_output(inp_blob, proto_name, caffemodel_name): inp_blob_name = proto_basename + ".input.npy" inp_blob = np.load(inp_blob_name) if os.path.exists(inp_blob_name) else np.load('blob.npy') - print "\nGenerate data for:" - print cfmod_basename, inp_blob.shape + print("\nGenerate data for:") + print(cfmod_basename, inp_blob.shape) out_blob = get_cafe_output(inp_blob, proto_filename, cfmod_basename) - print out_blob.shape + print(out_blob.shape) np.save(npy_filename, out_blob) diff --git a/testdata/dnn/onnx/generate_onnx_models.py b/testdata/dnn/onnx/generate_onnx_models.py index 9fe187b8d..9a512f849 100644 --- a/testdata/dnn/onnx/generate_onnx_models.py +++ b/testdata/dnn/onnx/generate_onnx_models.py @@ -1,4 +1,7 @@ from __future__ import print_function +from __future__ import division +from builtins import str +from past.utils import old_div import torch from torch.autograd import Variable import torch.nn.init as init @@ -12,7 +15,7 @@ def assertExpected(s): - if not (isinstance(s, str) or (sys.version_info[0] == 2 and isinstance(s, unicode))): + if not (isinstance(s, str) or (sys.version_info[0] == 2 and isinstance(s, str))): raise TypeError("assertExpected is strings only") def assertONNXExpected(binary_pb): @@ -556,7 +559,7 @@ def __init__(self): def forward(self, x): exp = torch.exp(x) sum = torch.sum(exp, dim=2, keepdim=True) - return exp / sum + return old_div(exp, sum) input = Variable(torch.randn(1, 2, 4, 3)) model = SoftMaxUnfused() @@ -587,7 +590,7 @@ def forward(self, image): channels = image.size(1) h = image.size(2) w = image.size(3) - image = image.view(batch_size, channels*h* (w / 2), -1) + image = image.view(batch_size, channels*h* (old_div(w, 2)), -1) return image input = Variable(torch.randn(1, 2, 3, 4)) @@ -600,7 +603,7 @@ def __init__(self): super(Broadcast, self).__init__() def forward(self, x, y): - return x * y + (x - x) / y - y + return x * y + old_div((x - x), y) - y input1 = Variable(torch.randn(1, 4, 1, 2)) input2 = Variable(torch.randn(1, 4, 1, 1)) @@ -746,7 +749,7 @@ def forward(self, x): norm = torch.norm(x, p=2, dim=1, keepdim=True) clip = torch.clamp(norm, min=0) expand = clip.expand_as(x) - return x / expand + return old_div(x, expand) model = NormL2() x = Variable(torch.randn(1, 2, 3, 4)) @@ -813,7 +816,7 @@ def forward(self, x, y): save_data_and_model_multy_inputs("upsample_unfused_two_inputs_opset9_torch1.4", UpsampleUnfusedTwoInput(), input_0, input_1, version=9) save_data_and_model_multy_inputs("upsample_unfused_two_inputs_opset11_torch1.4", UpsampleUnfusedTwoInput(), input_0, input_1, version=11) - class FrozenBatchNorm2d(nn.Module): +class FrozenBatchNorm2d(nn.Module): def __init__(self, n): super(FrozenBatchNorm2d, self).__init__() self.register_buffer("weight", torch.ones(n)) @@ -832,7 +835,7 @@ def forward(self, x): model = FrozenBatchNorm2d(2) save_data_and_model("batch_norm_subgraph", x, model) - class GatherScalar(nn.Module): +class GatherScalar(nn.Module): def forward(self, x): return x[1] @@ -840,7 +843,7 @@ def forward(self, x): model = GatherScalar() save_data_and_model("gather_scalar", x, model) - class Gather(nn.Module): +class Gather(nn.Module): def forward(self, x): return x[..., 1] diff --git a/testdata/dnn/tensorflow/generate_tf2_models.py b/testdata/dnn/tensorflow/generate_tf2_models.py index c157ff185..627011051 100644 --- a/testdata/dnn/tensorflow/generate_tf2_models.py +++ b/testdata/dnn/tensorflow/generate_tf2_models.py @@ -1,4 +1,5 @@ # This script is used to generate test data for OpenCV deep learning module. +from builtins import next import numpy as np import tensorflow as tf import shutil @@ -37,7 +38,7 @@ def save(model, name, **kwargs): assert(len(kwargs) == 1) - inputData = gen_data(next(iter(kwargs.values()))) + inputData = gen_data(next(iter(list(kwargs.values())))) outputData = model(inputData) writeBlob(inputData, name + '_in') diff --git a/testdata/dnn/tensorflow/generate_tf_models.py b/testdata/dnn/tensorflow/generate_tf_models.py index 3897d96de..cad89e4eb 100644 --- a/testdata/dnn/tensorflow/generate_tf_models.py +++ b/testdata/dnn/tensorflow/generate_tf_models.py @@ -1,4 +1,7 @@ +from __future__ import division # This script is used to generate test data for OpenCV deep learning module. +from builtins import zip +from past.utils import old_div import numpy as np import tensorflow as tf import os @@ -869,8 +872,8 @@ def pad_depth(x, desired_channels): inp = tf.placeholder(tf.float32, [1, 9, 6, 2], 'input') conv = tf.layers.conv2d(inp, filters=2, kernel_size=[1, 1]) shape_input = tf.shape(inp) -hi = shape_input[1] / 3 -wi = shape_input[2] / 2 +hi = old_div(shape_input[1], 3) +wi = old_div(shape_input[2], 2) input_down = tf.image.resize(conv, size=[hi,wi], method=0, name='resize_down') save(inp, input_down, 'resize_bilinear_down') ################################################################################ diff --git a/testdata/perf/append.py b/testdata/perf/append.py index 391cc100c..ddbb1d6fe 100644 --- a/testdata/perf/append.py +++ b/testdata/perf/append.py @@ -1,3 +1,4 @@ +from __future__ import print_function import sys, re, os.path from xml.dom.minidom import parse @@ -22,7 +23,7 @@ def processLogFile(outname, inname): if case.nodeName in tests: del tests[case.nodeName] - for case in tests.items(): + for case in list(tests.items()): fstorage.appendChild(case[1]) if tests: @@ -39,7 +40,7 @@ def processLogFile(outname, inname): if __name__ == "__main__": if len(sys.argv) < 3: - print "Usage:\n", os.path.basename(sys.argv[0]), ".xml .xml" + print("Usage:\n", os.path.basename(sys.argv[0]), ".xml .xml") exit(0) processLogFile(sys.argv[1], sys.argv[2]) diff --git a/testdata/perf/clean_regex.py b/testdata/perf/clean_regex.py index c27eba3dc..c0c697754 100644 --- a/testdata/perf/clean_regex.py +++ b/testdata/perf/clean_regex.py @@ -1,43 +1,44 @@ -import sys, re, os.path -from xml.dom.minidom import parse - -def parseLogFile(filename): - tests = [] - log = parse(open(filename, 'rb')) - fstorage = log.firstChild - #print help(log) - for case in fstorage.childNodes: - if case.nodeName == "#text": - continue - #print case.nodeName - tests.append(case.nodeName) - return tests - -def process(filename, expr, save_results): - log = parse(open(filename, 'rb')) - fstorage = log.firstChild - for case in fstorage.childNodes: - if case.nodeName == "#text": - continue - if expr.search(case.nodeName): - print case.nodeName - fstorage.removeChild(case) - - if save_results: - xmlstr = log.toxml() - xmlstr = re.sub(r"(\s*\n)+", "\n", xmlstr) - xmlstr = re.sub(r"(\s*\r\n)+", "\r\n", xmlstr) - xmlstr = re.sub(r"<(\w*)/>", "<\\1>", xmlstr) - xmlstr = xmlstr.replace(""", "\"") - f = open(filename, 'wb') - f.write(xmlstr) - f.close() - -if __name__ == "__main__": - if len(sys.argv) < 3: - print "This script is used to remove entries from sanity xml" - print " Usage:\n", os.path.basename(sys.argv[0]), ".xml " - exit(0) - - process(sys.argv[1], re.compile(sys.argv[2]), len(sys.argv) == 4) - +from __future__ import print_function +import sys, re, os.path +from xml.dom.minidom import parse + +def parseLogFile(filename): + tests = [] + log = parse(open(filename, 'rb')) + fstorage = log.firstChild + #print help(log) + for case in fstorage.childNodes: + if case.nodeName == "#text": + continue + #print case.nodeName + tests.append(case.nodeName) + return tests + +def process(filename, expr, save_results): + log = parse(open(filename, 'rb')) + fstorage = log.firstChild + for case in fstorage.childNodes: + if case.nodeName == "#text": + continue + if expr.search(case.nodeName): + print(case.nodeName) + fstorage.removeChild(case) + + if save_results: + xmlstr = log.toxml() + xmlstr = re.sub(r"(\s*\n)+", "\n", xmlstr) + xmlstr = re.sub(r"(\s*\r\n)+", "\r\n", xmlstr) + xmlstr = re.sub(r"<(\w*)/>", "<\\1>", xmlstr) + xmlstr = xmlstr.replace(""", "\"") + f = open(filename, 'wb') + f.write(xmlstr) + f.close() + +if __name__ == "__main__": + if len(sys.argv) < 3: + print("This script is used to remove entries from sanity xml") + print(" Usage:\n", os.path.basename(sys.argv[0]), ".xml ") + exit(0) + + process(sys.argv[1], re.compile(sys.argv[2]), len(sys.argv) == 4) + diff --git a/testdata/perf/clean_unused.py b/testdata/perf/clean_unused.py index a52df253e..98638094b 100644 --- a/testdata/perf/clean_unused.py +++ b/testdata/perf/clean_unused.py @@ -1,3 +1,4 @@ +from __future__ import print_function import sys, re, os.path from xml.dom.minidom import parse @@ -33,7 +34,7 @@ def processLogFile(outname, inname, tests): if __name__ == "__main__": if len(sys.argv) < 3: - print "Usage:\n", os.path.basename(sys.argv[0]), ".xml .backup.xml" + print("Usage:\n", os.path.basename(sys.argv[0]), ".xml .backup.xml") exit(0) tests = parseLogFile(sys.argv[1])