diff options
Diffstat (limited to 'lib/oeqa')
18 files changed, 183 insertions, 570 deletions
diff --git a/lib/oeqa/runtime/cases/cyclictest.py b/lib/oeqa/runtime/cases/cyclictest.py new file mode 100644 index 00000000..8890651a --- /dev/null +++ b/lib/oeqa/runtime/cases/cyclictest.py | |||
| @@ -0,0 +1,39 @@ | |||
| 1 | from oeqa.runtime.case import OERuntimeTestCase | ||
| 2 | from oeqa.core.decorator.depends import OETestDepends | ||
| 3 | from oeqa.runtime.decorator.package import OEHasPackage | ||
| 4 | import os | ||
| 5 | import subprocess | ||
| 6 | import datetime | ||
| 7 | |||
| 8 | class CyclicTest(OERuntimeTestCase): | ||
| 9 | |||
| 10 | @OEHasPackage(['rt-tests']) | ||
| 11 | @OETestDepends(['ssh.SSHTest.test_ssh']) | ||
| 12 | def test_cyclic(self): | ||
| 13 | # Cyclictest command and argument based on public setup for Intel(R) Core(TM) i7-6700 | ||
| 14 | # https://www.osadl.org/Latency-plot-of-system-in-rack-9-slot.qa-latencyplot-r9s5.0.html?shadow=1 | ||
| 15 | # Command line: cyclictest -l100000000 -m -Sp99 -i200 -h400 -q | ||
| 16 | status, output = self.target.run('cyclictest -l100000000 -m -Sp99 -i200 -h400') | ||
| 17 | self.assertEqual(status, 0, msg='status and output: %s and %s' % (status, output)) | ||
| 18 | test_log_dir = self.td.get('TEST_LOG_DIR', '') | ||
| 19 | |||
| 20 | if not test_log_dir: | ||
| 21 | test_log_dir = os.path.join(self.td.get('WORKDIR', ''), 'testimage') | ||
| 22 | |||
| 23 | cyclic_log_dir = os.path.join(test_log_dir, '%s.%s' % ('cyclic_test', datetime.datetime.now().strftime('%Y%m%d%H%M%S'))) | ||
| 24 | os.makedirs(cyclic_log_dir) | ||
| 25 | log_path = os.path.join(cyclic_log_dir, 'cyclic_log') | ||
| 26 | with open(log_path, 'w') as f: | ||
| 27 | f.write(output) | ||
| 28 | |||
| 29 | max_latency = subprocess.check_output(('grep "Max Latencies" %s | tr " " "\n" | sort -n | tail -1 | sed s/^0*//') % log_path, shell=True).strip() | ||
| 30 | max_latency = int(max_latency) | ||
| 31 | |||
| 32 | # Default target latency based on max latency (24us) captured at public execution multiple by 1.2 (20% buffer) | ||
| 33 | # https://www.osadl.org/Latency-plot-of-system-in-rack-9-slot.qa-latencyplot-r9s5.0.html?shadow=1 | ||
| 34 | target_latency = 1.2*24 | ||
| 35 | user_defined_target_latency = self.tc.td.get("RTKERNEL_TARGET_LATENCY") | ||
| 36 | if user_defined_target_latency: | ||
| 37 | target_latency = int(user_defined_target_latency) | ||
| 38 | self.assertTrue(max_latency < target_latency, | ||
| 39 | msg="Max latency (%sus) is greater than target (%sus)." % (max_latency, target_latency)) | ||
diff --git a/lib/oeqa/runtime/cases/dldt_inference_engine.py b/lib/oeqa/runtime/cases/dldt_inference_engine.py deleted file mode 100644 index 2e969975..00000000 --- a/lib/oeqa/runtime/cases/dldt_inference_engine.py +++ /dev/null | |||
| @@ -1,101 +0,0 @@ | |||
| 1 | from oeqa.runtime.case import OERuntimeTestCase | ||
| 2 | from oeqa.runtime.decorator.package import OEHasPackage | ||
| 3 | from oeqa.core.decorator.depends import OETestDepends | ||
| 4 | from oeqa.runtime.miutils.targets.oeqatarget import OEQATarget | ||
| 5 | from oeqa.runtime.miutils.tests.squeezenet_model_download_test import SqueezenetModelDownloadTest | ||
| 6 | from oeqa.runtime.miutils.tests.dldt_model_optimizer_test import DldtModelOptimizerTest | ||
| 7 | from oeqa.runtime.miutils.tests.dldt_inference_engine_test import DldtInferenceEngineTest | ||
| 8 | from oeqa.runtime.miutils.dldtutils import get_testdata_config | ||
| 9 | |||
| 10 | class DldtInferenceEngine(OERuntimeTestCase): | ||
| 11 | |||
| 12 | @classmethod | ||
| 13 | def setUpClass(cls): | ||
| 14 | cls.sqn_download = SqueezenetModelDownloadTest(OEQATarget(cls.tc.target), '/tmp/ie/md') | ||
| 15 | cls.sqn_download.setup() | ||
| 16 | cls.dldt_mo = DldtModelOptimizerTest(OEQATarget(cls.tc.target), '/tmp/ie/ir') | ||
| 17 | cls.dldt_mo.setup() | ||
| 18 | cls.dldt_ie = DldtInferenceEngineTest(OEQATarget(cls.tc.target), '/tmp/ie/inputs') | ||
| 19 | cls.dldt_ie.setup() | ||
| 20 | cls.ir_files_dir = cls.dldt_mo.work_dir | ||
| 21 | |||
| 22 | @classmethod | ||
| 23 | def tearDownClass(cls): | ||
| 24 | cls.dldt_ie.tear_down() | ||
| 25 | cls.dldt_mo.tear_down() | ||
| 26 | cls.sqn_download.tear_down() | ||
| 27 | |||
| 28 | @OEHasPackage(['dldt-model-optimizer']) | ||
| 29 | @OEHasPackage(['wget']) | ||
| 30 | def test_dldt_ie_can_create_ir_and_download_input(self): | ||
| 31 | proxy_port = get_testdata_config(self.tc.td, 'DLDT_PIP_PROXY') | ||
| 32 | if not proxy_port: | ||
| 33 | self.skipTest('Need to configure bitbake configuration (DLDT_PIP_PROXY="proxy.server:port").') | ||
| 34 | (status, output) = self.sqn_download.test_can_download_squeezenet_model(proxy_port) | ||
| 35 | self.assertEqual(status, 0, msg='status and output: %s and %s' % (status, output)) | ||
| 36 | (status, output) = self.sqn_download.test_can_download_squeezenet_prototxt(proxy_port) | ||
| 37 | self.assertEqual(status, 0, msg='status and output: %s and %s' % (status, output)) | ||
| 38 | |||
| 39 | mo_exe_dir = get_testdata_config(self.tc.td, 'DLDT_MO_EXE_DIR') | ||
| 40 | if not mo_exe_dir: | ||
| 41 | self.skipTest('Need to configure bitbake configuration (DLDT_MO_EXE_DIR="directory_to_mo.py").') | ||
| 42 | mo_files_dir = self.sqn_download.work_dir | ||
| 43 | (status, output) = self.dldt_mo.test_dldt_mo_can_create_ir(mo_exe_dir, mo_files_dir) | ||
| 44 | self.assertEqual(status, 0, msg='status and output: %s and %s' % (status, output)) | ||
| 45 | |||
| 46 | (status, output) = self.dldt_ie.test_can_download_input_file(proxy_port) | ||
| 47 | self.assertEqual(status, 0, msg='status and output: %s and %s' % (status, output)) | ||
| 48 | |||
| 49 | @OETestDepends(['dldt_inference_engine.DldtInferenceEngine.test_dldt_ie_can_create_ir_and_download_input']) | ||
| 50 | @OEHasPackage(['dldt-inference-engine']) | ||
| 51 | @OEHasPackage(['dldt-inference-engine-samples']) | ||
| 52 | def test_dldt_ie_classification_with_cpu(self): | ||
| 53 | (status, output) = self.dldt_ie.test_dldt_ie_classification_with_device('CPU', self.ir_files_dir) | ||
| 54 | self.assertEqual(status, 0, msg='status and output: %s and %s' % (status, output)) | ||
| 55 | |||
| 56 | @OETestDepends(['dldt_inference_engine.DldtInferenceEngine.test_dldt_ie_can_create_ir_and_download_input']) | ||
| 57 | @OEHasPackage(['dldt-inference-engine']) | ||
| 58 | @OEHasPackage(['dldt-inference-engine-samples']) | ||
| 59 | @OEHasPackage(['intel-compute-runtime']) | ||
| 60 | @OEHasPackage(['opencl-icd-loader']) | ||
| 61 | def test_dldt_ie_classification_with_gpu(self): | ||
| 62 | (status, output) = self.dldt_ie.test_dldt_ie_classification_with_device('GPU', self.ir_files_dir) | ||
| 63 | self.assertEqual(status, 0, msg='status and output: %s and %s' % (status, output)) | ||
| 64 | |||
| 65 | @OETestDepends(['dldt_inference_engine.DldtInferenceEngine.test_dldt_ie_can_create_ir_and_download_input']) | ||
| 66 | @OEHasPackage(['dldt-inference-engine']) | ||
| 67 | @OEHasPackage(['dldt-inference-engine-samples']) | ||
| 68 | @OEHasPackage(['dldt-inference-engine-vpu-firmware']) | ||
| 69 | def test_dldt_ie_classification_with_myriad(self): | ||
| 70 | (status, output) = self.dldt_ie.test_dldt_ie_classification_with_device('MYRIAD', self.ir_files_dir) | ||
| 71 | self.assertEqual(status, 0, msg='status and output: %s and %s' % (status, output)) | ||
| 72 | |||
| 73 | @OETestDepends(['dldt_inference_engine.DldtInferenceEngine.test_dldt_ie_can_create_ir_and_download_input']) | ||
| 74 | @OEHasPackage(['dldt-inference-engine']) | ||
| 75 | @OEHasPackage(['dldt-inference-engine-python3']) | ||
| 76 | @OEHasPackage(['python3-opencv']) | ||
| 77 | @OEHasPackage(['python3-numpy']) | ||
| 78 | def test_dldt_ie_classification_python_api_with_cpu(self): | ||
| 79 | (status, output) = self.dldt_ie.test_dldt_ie_classification_python_api_with_device('CPU', self.ir_files_dir, 'libcpu_extension.so') | ||
| 80 | self.assertEqual(status, 0, msg='status and output: %s and %s' % (status, output)) | ||
| 81 | |||
| 82 | @OETestDepends(['dldt_inference_engine.DldtInferenceEngine.test_dldt_ie_can_create_ir_and_download_input']) | ||
| 83 | @OEHasPackage(['dldt-inference-engine']) | ||
| 84 | @OEHasPackage(['dldt-inference-engine-python3']) | ||
| 85 | @OEHasPackage(['intel-compute-runtime']) | ||
| 86 | @OEHasPackage(['opencl-icd-loader']) | ||
| 87 | @OEHasPackage(['python3-opencv']) | ||
| 88 | @OEHasPackage(['python3-numpy']) | ||
| 89 | def test_dldt_ie_classification_python_api_with_gpu(self): | ||
| 90 | (status, output) = self.dldt_ie.test_dldt_ie_classification_python_api_with_device('GPU', self.ir_files_dir) | ||
| 91 | self.assertEqual(status, 0, msg='status and output: %s and %s' % (status, output)) | ||
| 92 | |||
| 93 | @OETestDepends(['dldt_inference_engine.DldtInferenceEngine.test_dldt_ie_can_create_ir_and_download_input']) | ||
| 94 | @OEHasPackage(['dldt-inference-engine']) | ||
| 95 | @OEHasPackage(['dldt-inference-engine-python3']) | ||
| 96 | @OEHasPackage(['dldt-inference-engine-vpu-firmware']) | ||
| 97 | @OEHasPackage(['python3-opencv']) | ||
| 98 | @OEHasPackage(['python3-numpy']) | ||
| 99 | def test_dldt_ie_classification_python_api_with_myriad(self): | ||
| 100 | (status, output) = self.dldt_ie.test_dldt_ie_classification_python_api_with_device('MYRIAD', self.ir_files_dir) | ||
| 101 | self.assertEqual(status, 0, msg='status and output: %s and %s' % (status, output)) | ||
diff --git a/lib/oeqa/runtime/cases/dldt_model_optimizer.py b/lib/oeqa/runtime/cases/dldt_model_optimizer.py deleted file mode 100644 index 736ea661..00000000 --- a/lib/oeqa/runtime/cases/dldt_model_optimizer.py +++ /dev/null | |||
| @@ -1,38 +0,0 @@ | |||
| 1 | from oeqa.runtime.case import OERuntimeTestCase | ||
| 2 | from oeqa.runtime.decorator.package import OEHasPackage | ||
| 3 | from oeqa.runtime.miutils.targets.oeqatarget import OEQATarget | ||
| 4 | from oeqa.runtime.miutils.tests.squeezenet_model_download_test import SqueezenetModelDownloadTest | ||
| 5 | from oeqa.runtime.miutils.tests.dldt_model_optimizer_test import DldtModelOptimizerTest | ||
| 6 | from oeqa.runtime.miutils.dldtutils import get_testdata_config | ||
| 7 | |||
| 8 | class DldtModelOptimizer(OERuntimeTestCase): | ||
| 9 | |||
| 10 | @classmethod | ||
| 11 | def setUpClass(cls): | ||
| 12 | cls.sqn_download = SqueezenetModelDownloadTest(OEQATarget(cls.tc.target), '/tmp/mo/md') | ||
| 13 | cls.sqn_download.setup() | ||
| 14 | cls.dldt_mo = DldtModelOptimizerTest(OEQATarget(cls.tc.target), '/tmp/mo/ir') | ||
| 15 | cls.dldt_mo.setup() | ||
| 16 | |||
| 17 | @classmethod | ||
| 18 | def tearDownClass(cls): | ||
| 19 | cls.dldt_mo.tear_down() | ||
| 20 | cls.sqn_download.tear_down() | ||
| 21 | |||
| 22 | @OEHasPackage(['dldt-model-optimizer']) | ||
| 23 | @OEHasPackage(['wget']) | ||
| 24 | def test_dldt_mo_can_create_ir(self): | ||
| 25 | proxy_port = get_testdata_config(self.tc.td, 'DLDT_PIP_PROXY') | ||
| 26 | if not proxy_port: | ||
| 27 | self.skipTest('Need to configure bitbake configuration (DLDT_PIP_PROXY="proxy.server:port").') | ||
| 28 | (status, output) = self.sqn_download.test_can_download_squeezenet_model(proxy_port) | ||
| 29 | self.assertEqual(status, 0, msg='status and output: %s and %s' % (status, output)) | ||
| 30 | (status, output) = self.sqn_download.test_can_download_squeezenet_prototxt(proxy_port) | ||
| 31 | self.assertEqual(status, 0, msg='status and output: %s and %s' % (status, output)) | ||
| 32 | |||
| 33 | mo_exe_dir = get_testdata_config(self.tc.td, 'DLDT_MO_EXE_DIR') | ||
| 34 | if not mo_exe_dir: | ||
| 35 | self.skipTest('Need to configure bitbake configuration (DLDT_MO_EXE_DIR="directory_to_mo.py").') | ||
| 36 | mo_files_dir = self.sqn_download.work_dir | ||
| 37 | (status, output) = self.dldt_mo.test_dldt_mo_can_create_ir(mo_exe_dir, mo_files_dir) | ||
| 38 | self.assertEqual(status, 0, msg='status and output: %s and %s' % (status, output)) | ||
diff --git a/lib/oeqa/runtime/cases/intel_vaapi_driver.py b/lib/oeqa/runtime/cases/intel_vaapi_driver.py index d5989044..31e11a81 100644 --- a/lib/oeqa/runtime/cases/intel_vaapi_driver.py +++ b/lib/oeqa/runtime/cases/intel_vaapi_driver.py | |||
| @@ -17,7 +17,7 @@ class VaapiDriverTest(OERuntimeTestCase): | |||
| 17 | self.assertEqual(status, 0, msg='status and output: %s and %s' % (status, output)) | 17 | self.assertEqual(status, 0, msg='status and output: %s and %s' % (status, output)) |
| 18 | 18 | ||
| 19 | (status, output) = self.target.run('gst-launch-1.0 -ev videotestsrc num-buffers=60 ! ' | 19 | (status, output) = self.target.run('gst-launch-1.0 -ev videotestsrc num-buffers=60 ! ' |
| 20 | 'timeoverlay ! vaapih264enc ! mp4mux ! filesink location=/tmp/vtest_h264.mp4') | 20 | 'timeoverlay ! vaapih264enc ! h264parse ! mp4mux ! filesink location=/tmp/vtest_h264.mp4') |
| 21 | self.assertEqual(status, 0, msg='status and output: %s and %s' % (status, output)) | 21 | self.assertEqual(status, 0, msg='status and output: %s and %s' % (status, output)) |
| 22 | 22 | ||
| 23 | @OETestDepends(['intel_vaapi_driver.VaapiDriverTest.test_gstreamer_can_encode_with_intel_vaapi_driver']) | 23 | @OETestDepends(['intel_vaapi_driver.VaapiDriverTest.test_gstreamer_can_encode_with_intel_vaapi_driver']) |
diff --git a/lib/oeqa/runtime/cases/libipt.py b/lib/oeqa/runtime/cases/libipt.py new file mode 100644 index 00000000..4adb13f0 --- /dev/null +++ b/lib/oeqa/runtime/cases/libipt.py | |||
| @@ -0,0 +1,23 @@ | |||
| 1 | from oeqa.runtime.case import OERuntimeTestCase | ||
| 2 | from oeqa.runtime.decorator.package import OEHasPackage | ||
| 3 | from oeqa.core.decorator.depends import OETestDepends | ||
| 4 | |||
| 5 | class LibiptTest(OERuntimeTestCase): | ||
| 6 | libipt_bin_dir = '/usr/bin/libipt/' | ||
| 7 | |||
| 8 | @classmethod | ||
| 9 | def tearDownClass(cls): | ||
| 10 | cls.tc.target.run('rm /tmp/loop-tnt*') | ||
| 11 | |||
| 12 | @OEHasPackage(['libipt', 'libipt2']) | ||
| 13 | @OEHasPackage(['libipt-test']) | ||
| 14 | @OEHasPackage(['yasm']) | ||
| 15 | def test_libipt_can_generate_trace_packet(self): | ||
| 16 | (status, output) = self.target.run('cd /tmp; %spttc %s/tests/loop-tnt.ptt' % | ||
| 17 | (self.libipt_bin_dir, self.libipt_bin_dir)) | ||
| 18 | self.assertEqual(status, 0, msg='status and output: %s and %s' % (status, output)) | ||
| 19 | |||
| 20 | @OETestDepends(['libipt.LibiptTest.test_libipt_can_generate_trace_packet']) | ||
| 21 | def test_libipt_can_perform_trace_packet_dump(self): | ||
| 22 | (status, output) = self.target.run('cd /tmp; %sptdump loop-tnt.pt' % self.libipt_bin_dir) | ||
| 23 | self.assertEqual(status, 0, msg='status and output: %s and %s' % (status, output)) | ||
diff --git a/lib/oeqa/runtime/cases/libxcam.py b/lib/oeqa/runtime/cases/libxcam.py new file mode 100644 index 00000000..57192f07 --- /dev/null +++ b/lib/oeqa/runtime/cases/libxcam.py | |||
| @@ -0,0 +1,37 @@ | |||
| 1 | from oeqa.runtime.case import OERuntimeTestCase | ||
| 2 | from oeqa.runtime.decorator.package import OEHasPackage | ||
| 3 | from oeqa.core.decorator.depends import OETestDepends | ||
| 4 | |||
| 5 | class LibxcamTest(OERuntimeTestCase): | ||
| 6 | yuv_file = 'vtest.yuv' | ||
| 7 | soft_test_app_file = 'test-soft-image' | ||
| 8 | libxcam_test_app_dir = '/usr/bin/libxcam/' | ||
| 9 | libxcam_file_dir = '/tmp/' | ||
| 10 | |||
| 11 | @classmethod | ||
| 12 | def tearDownClass(cls): | ||
| 13 | cls.tc.target.run("rm %s%s" % (cls.libxcam_file_dir, cls.yuv_file)) | ||
| 14 | |||
| 15 | @OEHasPackage(['gstreamer1.0-plugins-base']) | ||
| 16 | @OEHasPackage(['gstreamer1.0-plugins-good']) | ||
| 17 | @OEHasPackage(['gstreamer1.0-vaapi']) | ||
| 18 | @OEHasPackage(['intel-vaapi-driver']) | ||
| 19 | def test_libxcam_can_generate_yuv_file_with_gstreamer(self): | ||
| 20 | (status, output) = self.target.run('gst-inspect-1.0 vaapi') | ||
| 21 | self.assertEqual(status, 0, msg='status and output: %s and %s' % (status, output)) | ||
| 22 | |||
| 23 | (status, output) = self.target.run('gst-launch-1.0 -ev videotestsrc num-buffers=60 ! ' | ||
| 24 | 'timeoverlay ! filesink location=%s%s' % | ||
| 25 | (self.libxcam_file_dir, self.yuv_file)) | ||
| 26 | self.assertEqual(status, 0, msg='status and output: %s and %s' % (status, output)) | ||
| 27 | |||
| 28 | @OEHasPackage(['libxcam']) | ||
| 29 | @OEHasPackage(['libxcam-test']) | ||
| 30 | @OETestDepends(['libxcam.LibxcamTest.test_libxcam_can_generate_yuv_file_with_gstreamer']) | ||
| 31 | def test_libxcam_can_execute_soft_image_sample_app(self): | ||
| 32 | (status, output) = self.target.run('%s%s --type remap --input0 %s%s --output soft_out.nv12 --save false' % | ||
| 33 | (self.libxcam_test_app_dir, | ||
| 34 | self.soft_test_app_file, | ||
| 35 | self.libxcam_file_dir, | ||
| 36 | self.yuv_file)) | ||
| 37 | self.assertEqual(status, 0, msg='status and output: %s and %s' % (status, output)) | ||
diff --git a/lib/oeqa/runtime/cases/microcode.py b/lib/oeqa/runtime/cases/microcode.py index 6ce36a6f..52c1cdb4 100644 --- a/lib/oeqa/runtime/cases/microcode.py +++ b/lib/oeqa/runtime/cases/microcode.py | |||
| @@ -16,20 +16,15 @@ class MicrocodeTest(OERuntimeTestCase): | |||
| 16 | 16 | ||
| 17 | @OEHasPackage(["iucode-tool"]) | 17 | @OEHasPackage(["iucode-tool"]) |
| 18 | def test_microcode_update(self): | 18 | def test_microcode_update(self): |
| 19 | (status, output) = self.target.run('iucode_tool /lib/firmware/intel-ucode/ -tb -lS | grep rev') | 19 | (status, output) = self.target.run('iucode_tool /lib/firmware/intel-ucode/ -tb -l --scan-system=2 | grep rev') |
| 20 | if status: | ||
| 21 | self.skipTest("The iucode_tool detected no microcode for update.") | ||
| 22 | 20 | ||
| 23 | selected_microcodes = output.splitlines() | 21 | selected_microcodes = output.splitlines() |
| 24 | selected_rev_list = self.get_revision_from_microcode_string_list(selected_microcodes, "rev (\w*)") | 22 | selected_rev_list = self.get_revision_from_microcode_string_list(selected_microcodes, "rev (\w*)") |
| 25 | self.assertTrue(selected_rev_list, msg="Could not find any rev from iucode_tool selected microcode.") | ||
| 26 | 23 | ||
| 27 | (status, output) = self.target.run('dmesg | grep microcode') | 24 | (status, output) = self.target.run("dmesg | grep 'microcode updated early'") |
| 28 | self.assertEqual(status, 0, msg='status and output: %s and %s' % (status, output)) | ||
| 29 | 25 | ||
| 30 | updated_microcodes = output.splitlines() | 26 | updated_microcodes = output.splitlines() |
| 31 | updated_rev_list = self.get_revision_from_microcode_string_list(updated_microcodes, "revision=(\w*)") | 27 | updated_rev_list = self.get_revision_from_microcode_string_list(updated_microcodes, "revision (\w*)") |
| 32 | self.assertTrue(updated_rev_list, msg="Could not find any updated revision from microcode dmesg.") | ||
| 33 | 28 | ||
| 34 | for ul in updated_rev_list: | 29 | for ul in updated_rev_list: |
| 35 | self.assertTrue(ul in selected_rev_list, msg="Updated revision, %s, not in selected revision list (%s)" % | 30 | self.assertTrue(ul in selected_rev_list, msg="Updated revision, %s, not in selected revision list (%s)" % |
diff --git a/lib/oeqa/runtime/cases/mkl_dnn.py b/lib/oeqa/runtime/cases/mkl_dnn.py index c7994b13..8d50df54 100644 --- a/lib/oeqa/runtime/cases/mkl_dnn.py +++ b/lib/oeqa/runtime/cases/mkl_dnn.py | |||
| @@ -14,9 +14,9 @@ class MklDnn(OERuntimeTestCase): | |||
| 14 | def tearDownClass(cls): | 14 | def tearDownClass(cls): |
| 15 | cls.mkldnntest.tear_down() | 15 | cls.mkldnntest.tear_down() |
| 16 | 16 | ||
| 17 | @OEHasPackage(['libdnnl', 'libdnnl1']) | 17 | @OEHasPackage(['onednn', 'libdnnl2']) |
| 18 | @OEHasPackage(['libdnnl-src']) | 18 | @OEHasPackage(['onednn-src', 'libdnnl-src']) |
| 19 | @OEHasPackage(['libdnnl-dev']) | 19 | @OEHasPackage(['onednn-dev', 'libdnnl-dev']) |
| 20 | @OEHasPackage(['gcc']) | 20 | @OEHasPackage(['gcc']) |
| 21 | @OEHasPackage(['gcc-symlinks']) | 21 | @OEHasPackage(['gcc-symlinks']) |
| 22 | @OEHasPackage(['libstdc++-dev']) | 22 | @OEHasPackage(['libstdc++-dev']) |
| @@ -25,8 +25,8 @@ class MklDnn(OERuntimeTestCase): | |||
| 25 | (status, output) = self.mkldnntest.test_mkldnn_can_compile_and_execute() | 25 | (status, output) = self.mkldnntest.test_mkldnn_can_compile_and_execute() |
| 26 | self.assertEqual(status, 0, msg='status and output: %s and %s' % (status, output)) | 26 | self.assertEqual(status, 0, msg='status and output: %s and %s' % (status, output)) |
| 27 | 27 | ||
| 28 | @OEHasPackage(['mkl-dnn', 'libdnnl1']) | 28 | @OEHasPackage(['onednn', 'libdnnl2']) |
| 29 | @OEHasPackage(['mkl-dnn-test', 'libdnnl-test']) | 29 | @OEHasPackage(['onednn-test', 'libdnnl-test']) |
| 30 | def test_mkldnn_benchdnn_package_available(self): | 30 | def test_mkldnn_benchdnn_package_available(self): |
| 31 | (status, output) = self.mkldnntest.test_mkldnn_benchdnn_package_available() | 31 | (status, output) = self.mkldnntest.test_mkldnn_benchdnn_package_available() |
| 32 | self.assertEqual(status, 0, msg='status and output: %s and %s' % (status, output)) | 32 | self.assertEqual(status, 0, msg='status and output: %s and %s' % (status, output)) |
diff --git a/lib/oeqa/runtime/cases/parselogs-ignores-intel-core2-32.txt b/lib/oeqa/runtime/cases/parselogs-ignores-intel-core2-32.txt new file mode 100644 index 00000000..84ce8168 --- /dev/null +++ b/lib/oeqa/runtime/cases/parselogs-ignores-intel-core2-32.txt | |||
| @@ -0,0 +1,9 @@ | |||
| 1 | # These should be reviewed to see if they are still needed | ||
| 2 | ACPI: No _BQC method, cannot determine initial brightness | ||
| 3 | [Firmware Bug]: ACPI: No _BQC method, cannot determine initial brightness | ||
| 4 | (EE) Failed to load module "psb" | ||
| 5 | (EE) Failed to load module "psbdrv" | ||
| 6 | (EE) open /dev/fb0: No such file or directory | ||
| 7 | (EE) AIGLX: reverting to software rendering | ||
| 8 | dmi: Firmware registration failed. | ||
| 9 | ioremap error for 0x78 | ||
diff --git a/lib/oeqa/runtime/cases/parselogs-ignores-intel-corei7-64.txt b/lib/oeqa/runtime/cases/parselogs-ignores-intel-corei7-64.txt new file mode 100644 index 00000000..5c9b4bc7 --- /dev/null +++ b/lib/oeqa/runtime/cases/parselogs-ignores-intel-corei7-64.txt | |||
| @@ -0,0 +1,14 @@ | |||
| 1 | # These should be reviewed to see if they are still needed | ||
| 2 | can't set Max Payload Size to 256 | ||
| 3 | intel_punit_ipc: can't request region for resource | ||
| 4 | [drm] parse error at position 4 in video mode 'efifb' | ||
| 5 | ACPI Error: Could not enable RealTimeClock event | ||
| 6 | ACPI Warning: Could not enable fixed event - RealTimeClock | ||
| 7 | hci_intel INT33E1:00: Unable to retrieve gpio | ||
| 8 | hci_intel: probe of INT33E1:00 failed | ||
| 9 | can't derive routing for PCI INT A | ||
| 10 | failed to read out thermal zone | ||
| 11 | Bluetooth: hci0: Setting Intel event mask failed | ||
| 12 | ttyS2 - failed to request DMA | ||
| 13 | Bluetooth: hci0: Failed to send firmware data (-38) | ||
| 14 | atkbd serio0: Failed to enable keyboard on isa0060/serio0 | ||
diff --git a/lib/oeqa/runtime/cases/thermald.py b/lib/oeqa/runtime/cases/thermald.py new file mode 100644 index 00000000..a0b6a92b --- /dev/null +++ b/lib/oeqa/runtime/cases/thermald.py | |||
| @@ -0,0 +1,47 @@ | |||
| 1 | from oeqa.runtime.case import OERuntimeTestCase | ||
| 2 | from oeqa.core.decorator.depends import OETestDepends | ||
| 3 | from oeqa.runtime.decorator.package import OEHasPackage | ||
| 4 | import threading | ||
| 5 | import time | ||
| 6 | import re | ||
| 7 | |||
| 8 | class ThermaldTest(OERuntimeTestCase): | ||
| 9 | def get_thermal_zone_with_target_type(self, target_type): | ||
| 10 | i = 0 | ||
| 11 | while True: | ||
| 12 | status, output = self.target.run('cat /sys/class/thermal/thermal_zone%s/type' % i) | ||
| 13 | if status: | ||
| 14 | return -1 | ||
| 15 | if output == target_type: | ||
| 16 | return i | ||
| 17 | i = i + 1 | ||
| 18 | |||
| 19 | def run_thermald_emulation_to_exceed_setpoint_then_end_thermald_process(self, run_args): | ||
| 20 | time.sleep(2) | ||
| 21 | self.target.run('echo 106000 > /sys/class/thermal/thermal_zone%s/emul_temp' % run_args) | ||
| 22 | time.sleep(5) | ||
| 23 | __, output = self.target.run('pidof thermald') | ||
| 24 | self.target.run('kill -9 %s' % output) | ||
| 25 | |||
| 26 | def test_thermald_emulation_mode(self): | ||
| 27 | # Thermald test depend on thermal emulation, where CONFIG_THERMAL_EMULATION=y was required | ||
| 28 | # To enable thermal emulation, refer to https://github.com/intel/thermal_daemon/blob/master/test/readme_test.txt | ||
| 29 | (status, output) = self.target.run('gzip -dc /proc/config.gz | grep CONFIG_THERMAL_EMULATION=y') | ||
| 30 | if status: | ||
| 31 | self.skipTest("CONFIG_THERMAL_EMULATION is not set") | ||
| 32 | |||
| 33 | @OEHasPackage(['thermald']) | ||
| 34 | @OETestDepends(['thermald.ThermaldTest.test_thermald_emulation_mode']) | ||
| 35 | def test_thermald_can_track_thermal_exceed_setpoint(self): | ||
| 36 | x86_thermal_zone_index = self.get_thermal_zone_with_target_type('x86_pkg_temp') | ||
| 37 | if x86_thermal_zone_index < 0: | ||
| 38 | self.skipTest('Could not get the thermal zone index for target type (%s)' % 'x86_pkg_temp') | ||
| 39 | td_thread = threading.Thread(target=self.run_thermald_emulation_to_exceed_setpoint_then_end_thermald_process, | ||
| 40 | args=(x86_thermal_zone_index,)) | ||
| 41 | td_thread.start() | ||
| 42 | td_thread.join() | ||
| 43 | status, output = self.target.run('timeout 3s thermald --no-daemon --loglevel=info') | ||
| 44 | regex_search = ".*thd_cdev_set_state.*106000" | ||
| 45 | regex_comp = re.compile(regex_search) | ||
| 46 | m = regex_comp.search(output) | ||
| 47 | self.assertTrue(m, msg='status and output: %s and %s' % (status, output)) | ||
diff --git a/lib/oeqa/runtime/files/dldt-inference-engine/classification_sample.py b/lib/oeqa/runtime/files/dldt-inference-engine/classification_sample.py deleted file mode 100644 index 1906e9fe..00000000 --- a/lib/oeqa/runtime/files/dldt-inference-engine/classification_sample.py +++ /dev/null | |||
| @@ -1,135 +0,0 @@ | |||
| 1 | #!/usr/bin/env python3 | ||
| 2 | """ | ||
| 3 | Copyright (C) 2018-2019 Intel Corporation | ||
| 4 | |||
| 5 | Licensed under the Apache License, Version 2.0 (the "License"); | ||
| 6 | you may not use this file except in compliance with the License. | ||
| 7 | You may obtain a copy of the License at | ||
| 8 | |||
| 9 | http://www.apache.org/licenses/LICENSE-2.0 | ||
| 10 | |||
| 11 | Unless required by applicable law or agreed to in writing, software | ||
| 12 | distributed under the License is distributed on an "AS IS" BASIS, | ||
| 13 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
| 14 | See the License for the specific language governing permissions and | ||
| 15 | limitations under the License. | ||
| 16 | """ | ||
| 17 | from __future__ import print_function | ||
| 18 | import sys | ||
| 19 | import os | ||
| 20 | from argparse import ArgumentParser, SUPPRESS | ||
| 21 | import cv2 | ||
| 22 | import numpy as np | ||
| 23 | import logging as log | ||
| 24 | from time import time | ||
| 25 | from openvino.inference_engine import IENetwork, IECore | ||
| 26 | |||
| 27 | |||
| 28 | def build_argparser(): | ||
| 29 | parser = ArgumentParser(add_help=False) | ||
| 30 | args = parser.add_argument_group('Options') | ||
| 31 | args.add_argument('-h', '--help', action='help', default=SUPPRESS, help='Show this help message and exit.') | ||
| 32 | args.add_argument("-m", "--model", help="Required. Path to an .xml file with a trained model.", required=True, | ||
| 33 | type=str) | ||
| 34 | args.add_argument("-i", "--input", help="Required. Path to a folder with images or path to an image files", | ||
| 35 | required=True, | ||
| 36 | type=str, nargs="+") | ||
| 37 | args.add_argument("-l", "--cpu_extension", | ||
| 38 | help="Optional. Required for CPU custom layers. " | ||
| 39 | "MKLDNN (CPU)-targeted custom layers. Absolute path to a shared library with the" | ||
| 40 | " kernels implementations.", type=str, default=None) | ||
| 41 | args.add_argument("-d", "--device", | ||
| 42 | help="Optional. Specify the target device to infer on; CPU, GPU, FPGA, HDDL, MYRIAD or HETERO: is " | ||
| 43 | "acceptable. The sample will look for a suitable plugin for device specified. Default " | ||
| 44 | "value is CPU", | ||
| 45 | default="CPU", type=str) | ||
| 46 | args.add_argument("--labels", help="Optional. Path to a labels mapping file", default=None, type=str) | ||
| 47 | args.add_argument("-nt", "--number_top", help="Optional. Number of top results", default=10, type=int) | ||
| 48 | |||
| 49 | return parser | ||
| 50 | |||
| 51 | |||
| 52 | def main(): | ||
| 53 | log.basicConfig(format="[ %(levelname)s ] %(message)s", level=log.INFO, stream=sys.stdout) | ||
| 54 | args = build_argparser().parse_args() | ||
| 55 | model_xml = args.model | ||
| 56 | model_bin = os.path.splitext(model_xml)[0] + ".bin" | ||
| 57 | |||
| 58 | # Plugin initialization for specified device and load extensions library if specified | ||
| 59 | log.info("Creating Inference Engine") | ||
| 60 | ie = IECore() | ||
| 61 | if args.cpu_extension and 'CPU' in args.device: | ||
| 62 | ie.add_extension(args.cpu_extension, "CPU") | ||
| 63 | # Read IR | ||
| 64 | log.info("Loading network files:\n\t{}\n\t{}".format(model_xml, model_bin)) | ||
| 65 | net = IENetwork(model=model_xml, weights=model_bin) | ||
| 66 | |||
| 67 | if "CPU" in args.device: | ||
| 68 | supported_layers = ie.query_network(net, "CPU") | ||
| 69 | not_supported_layers = [l for l in net.layers.keys() if l not in supported_layers] | ||
| 70 | if len(not_supported_layers) != 0: | ||
| 71 | log.error("Following layers are not supported by the plugin for specified device {}:\n {}". | ||
| 72 | format(args.device, ', '.join(not_supported_layers))) | ||
| 73 | log.error("Please try to specify cpu extensions library path in sample's command line parameters using -l " | ||
| 74 | "or --cpu_extension command line argument") | ||
| 75 | sys.exit(1) | ||
| 76 | |||
| 77 | assert len(net.inputs.keys()) == 1, "Sample supports only single input topologies" | ||
| 78 | assert len(net.outputs) == 1, "Sample supports only single output topologies" | ||
| 79 | |||
| 80 | log.info("Preparing input blobs") | ||
| 81 | input_blob = next(iter(net.inputs)) | ||
| 82 | out_blob = next(iter(net.outputs)) | ||
| 83 | net.batch_size = len(args.input) | ||
| 84 | |||
| 85 | # Read and pre-process input images | ||
| 86 | n, c, h, w = net.inputs[input_blob].shape | ||
| 87 | images = np.ndarray(shape=(n, c, h, w)) | ||
| 88 | for i in range(n): | ||
| 89 | image = cv2.imread(args.input[i]) | ||
| 90 | if image.shape[:-1] != (h, w): | ||
| 91 | log.warning("Image {} is resized from {} to {}".format(args.input[i], image.shape[:-1], (h, w))) | ||
| 92 | image = cv2.resize(image, (w, h)) | ||
| 93 | image = image.transpose((2, 0, 1)) # Change data layout from HWC to CHW | ||
| 94 | images[i] = image | ||
| 95 | log.info("Batch size is {}".format(n)) | ||
| 96 | |||
| 97 | # Loading model to the plugin | ||
| 98 | log.info("Loading model to the plugin") | ||
| 99 | exec_net = ie.load_network(network=net, device_name=args.device) | ||
| 100 | |||
| 101 | # Start sync inference | ||
| 102 | log.info("Starting inference in synchronous mode") | ||
| 103 | res = exec_net.infer(inputs={input_blob: images}) | ||
| 104 | |||
| 105 | # Processing output blob | ||
| 106 | log.info("Processing output blob") | ||
| 107 | res = res[out_blob] | ||
| 108 | log.info("Top {} results: ".format(args.number_top)) | ||
| 109 | if args.labels: | ||
| 110 | with open(args.labels, 'r') as f: | ||
| 111 | labels_map = [x.split(sep=' ', maxsplit=1)[-1].strip() for x in f] | ||
| 112 | else: | ||
| 113 | labels_map = None | ||
| 114 | classid_str = "classid" | ||
| 115 | probability_str = "probability" | ||
| 116 | for i, probs in enumerate(res): | ||
| 117 | probs = np.squeeze(probs) | ||
| 118 | top_ind = np.argsort(probs)[-args.number_top:][::-1] | ||
| 119 | print("Image {}\n".format(args.input[i])) | ||
| 120 | print(classid_str, probability_str) | ||
| 121 | print("{} {}".format('-' * len(classid_str), '-' * len(probability_str))) | ||
| 122 | for id in top_ind: | ||
| 123 | det_label = labels_map[id] if labels_map else "{}".format(id) | ||
| 124 | label_length = len(det_label) | ||
| 125 | space_num_before = (len(classid_str) - label_length) // 2 | ||
| 126 | space_num_after = len(classid_str) - (space_num_before + label_length) + 2 | ||
| 127 | space_num_before_prob = (len(probability_str) - len(str(probs[id]))) // 2 | ||
| 128 | print("{}{}{}{}{:.7f}".format(' ' * space_num_before, det_label, | ||
| 129 | ' ' * space_num_after, ' ' * space_num_before_prob, | ||
| 130 | probs[id])) | ||
| 131 | print("\n") | ||
| 132 | log.info("This sample is an API example, for any performance measurements please use the dedicated benchmark_app tool\n") | ||
| 133 | |||
| 134 | if __name__ == '__main__': | ||
| 135 | sys.exit(main() or 0) | ||
diff --git a/lib/oeqa/runtime/miutils/dldtutils.py b/lib/oeqa/runtime/miutils/dldtutils.py deleted file mode 100644 index 45bf2e12..00000000 --- a/lib/oeqa/runtime/miutils/dldtutils.py +++ /dev/null | |||
| @@ -1,3 +0,0 @@ | |||
| 1 | |||
| 2 | def get_testdata_config(testdata, config): | ||
| 3 | return testdata.get(config) | ||
diff --git a/lib/oeqa/runtime/miutils/tests/dldt_inference_engine_test.py b/lib/oeqa/runtime/miutils/tests/dldt_inference_engine_test.py deleted file mode 100644 index a44f9027..00000000 --- a/lib/oeqa/runtime/miutils/tests/dldt_inference_engine_test.py +++ /dev/null | |||
| @@ -1,48 +0,0 @@ | |||
| 1 | import os | ||
| 2 | script_path = os.path.dirname(os.path.realpath(__file__)) | ||
| 3 | files_path = os.path.join(script_path, '../../files/') | ||
| 4 | |||
| 5 | class DldtInferenceEngineTest(object): | ||
| 6 | ie_input_files = {'ie_python_sample': 'classification_sample.py', | ||
| 7 | 'input': 'chicky_512.png', | ||
| 8 | 'input_download': 'https://raw.githubusercontent.com/opencv/opencv/master/samples/data/chicky_512.png', | ||
| 9 | 'model': 'squeezenet_v1.1.xml'} | ||
| 10 | |||
| 11 | def __init__(self, target, work_dir): | ||
| 12 | self.target = target | ||
| 13 | self.work_dir = work_dir | ||
| 14 | |||
| 15 | def setup(self): | ||
| 16 | self.target.run('mkdir -p %s' % self.work_dir) | ||
| 17 | self.target.copy_to(os.path.join(files_path, 'dldt-inference-engine', self.ie_input_files['ie_python_sample']), | ||
| 18 | self.work_dir) | ||
| 19 | |||
| 20 | def tear_down(self): | ||
| 21 | self.target.run('rm -rf %s' % self.work_dir) | ||
| 22 | |||
| 23 | def test_can_download_input_file(self, proxy_port): | ||
| 24 | return self.target.run('cd %s; wget %s -e https_proxy=%s' % | ||
| 25 | (self.work_dir, | ||
| 26 | self.ie_input_files['input_download'], | ||
| 27 | proxy_port)) | ||
| 28 | |||
| 29 | def test_dldt_ie_classification_with_device(self, device, ir_files_dir): | ||
| 30 | return self.target.run('classification_sample_async -d %s -i %s -m %s' % | ||
| 31 | (device, | ||
| 32 | os.path.join(self.work_dir, self.ie_input_files['input']), | ||
| 33 | os.path.join(ir_files_dir, self.ie_input_files['model']))) | ||
| 34 | |||
| 35 | def test_dldt_ie_classification_python_api_with_device(self, device, ir_files_dir, extension=''): | ||
| 36 | if extension: | ||
| 37 | return self.target.run('python3 %s -d %s -i %s -m %s -l %s' % | ||
| 38 | (os.path.join(self.work_dir, self.ie_input_files['ie_python_sample']), | ||
| 39 | device, | ||
| 40 | os.path.join(self.work_dir, self.ie_input_files['input']), | ||
| 41 | os.path.join(ir_files_dir, self.ie_input_files['model']), | ||
| 42 | extension)) | ||
| 43 | else: | ||
| 44 | return self.target.run('python3 %s -d %s -i %s -m %s' % | ||
| 45 | (os.path.join(self.work_dir, self.ie_input_files['ie_python_sample']), | ||
| 46 | device, | ||
| 47 | os.path.join(self.work_dir, self.ie_input_files['input']), | ||
| 48 | os.path.join(ir_files_dir, self.ie_input_files['model']))) | ||
diff --git a/lib/oeqa/runtime/miutils/tests/dldt_model_optimizer_test.py b/lib/oeqa/runtime/miutils/tests/dldt_model_optimizer_test.py deleted file mode 100644 index 7d3db15b..00000000 --- a/lib/oeqa/runtime/miutils/tests/dldt_model_optimizer_test.py +++ /dev/null | |||
| @@ -1,23 +0,0 @@ | |||
| 1 | import os | ||
| 2 | |||
| 3 | class DldtModelOptimizerTest(object): | ||
| 4 | mo_input_files = {'model': 'squeezenet_v1.1.caffemodel', | ||
| 5 | 'prototxt': 'deploy.prototxt'} | ||
| 6 | mo_exe = 'mo.py' | ||
| 7 | |||
| 8 | def __init__(self, target, work_dir): | ||
| 9 | self.target = target | ||
| 10 | self.work_dir = work_dir | ||
| 11 | |||
| 12 | def setup(self): | ||
| 13 | self.target.run('mkdir -p %s' % self.work_dir) | ||
| 14 | |||
| 15 | def tear_down(self): | ||
| 16 | self.target.run('rm -rf %s' % self.work_dir) | ||
| 17 | |||
| 18 | def test_dldt_mo_can_create_ir(self, mo_exe_dir, mo_files_dir): | ||
| 19 | return self.target.run('python3 %s --input_model %s --input_proto %s --output_dir %s --data_type FP16' % | ||
| 20 | (os.path.join(mo_exe_dir, self.mo_exe), | ||
| 21 | os.path.join(mo_files_dir, self.mo_input_files['model']), | ||
| 22 | os.path.join(mo_files_dir, self.mo_input_files['prototxt']), | ||
| 23 | self.work_dir)) | ||
diff --git a/lib/oeqa/runtime/miutils/tests/mkl_dnn_test.py b/lib/oeqa/runtime/miutils/tests/mkl_dnn_test.py index 13afd1a4..869a4cbe 100644 --- a/lib/oeqa/runtime/miutils/tests/mkl_dnn_test.py +++ b/lib/oeqa/runtime/miutils/tests/mkl_dnn_test.py | |||
| @@ -9,13 +9,13 @@ class MkldnnTest(object): | |||
| 9 | self.target.run('rm /tmp/%s' % self.mkldnn_target_test_filename) | 9 | self.target.run('rm /tmp/%s' % self.mkldnn_target_test_filename) |
| 10 | 10 | ||
| 11 | def test_mkldnn_can_compile_and_execute(self): | 11 | def test_mkldnn_can_compile_and_execute(self): |
| 12 | mkldnn_src_dir = '/usr/src/debug/mkl-dnn/' | 12 | mkldnn_src_dir = '/usr/src/debug/onednn/' |
| 13 | mkldnn_src_test_filename = 'api.c' | 13 | mkldnn_src_test_filename = 'api.c' |
| 14 | mkldnn_src_test_file = '' | 14 | mkldnn_src_test_file = '' |
| 15 | 15 | ||
| 16 | (status, output) = self.target.run('cd %s; find -name %s' % (mkldnn_src_dir, mkldnn_src_test_filename)) | 16 | (__, output) = self.target.run('cd %s; find -name %s' % (mkldnn_src_dir, mkldnn_src_test_filename)) |
| 17 | if status: | 17 | if 'No such file or directory' in output: |
| 18 | return status, output | 18 | return -1, output |
| 19 | 19 | ||
| 20 | mkldnn_src_test_file = os.path.join(mkldnn_src_dir, output) | 20 | mkldnn_src_test_file = os.path.join(mkldnn_src_dir, output) |
| 21 | (status, output) = self.target.run('gcc %s -o /tmp/%s -ldnnl' % (mkldnn_src_test_file, self.mkldnn_target_test_filename)) | 21 | (status, output) = self.target.run('gcc %s -o /tmp/%s -ldnnl' % (mkldnn_src_test_file, self.mkldnn_target_test_filename)) |
| @@ -49,9 +49,7 @@ class MkldnnTest(object): | |||
| 49 | return self._run_mkldnn_benchdnn_test('./benchdnn --reorder --batch=inputs/reorder/test_reorder_bfloat16') | 49 | return self._run_mkldnn_benchdnn_test('./benchdnn --reorder --batch=inputs/reorder/test_reorder_bfloat16') |
| 50 | 50 | ||
| 51 | def test_mkldnn_rnn_api(self): | 51 | def test_mkldnn_rnn_api(self): |
| 52 | # test_rnn_inference was not yet ready and was expected to fail | 52 | return self._run_mkldnn_benchdnn_test('./benchdnn --rnn --batch=inputs/rnn/test_rnn_all') |
| 53 | # while waiting it to be ready, use test_rnn_small for now | ||
| 54 | return self._run_mkldnn_benchdnn_test('./benchdnn --rnn --batch=inputs/rnn/test_rnn_small') | ||
| 55 | 53 | ||
| 56 | def test_mkldnn_shuffle_api(self): | 54 | def test_mkldnn_shuffle_api(self): |
| 57 | return self._run_mkldnn_benchdnn_test('./benchdnn --shuffle --batch=inputs/shuffle/test_shuffle_bfloat16') \ No newline at end of file | 55 | return self._run_mkldnn_benchdnn_test('./benchdnn --shuffle --batch=inputs/shuffle/test_shuffle_bfloat16') \ No newline at end of file |
diff --git a/lib/oeqa/runtime/miutils/tests/squeezenet_model_download_test.py b/lib/oeqa/runtime/miutils/tests/squeezenet_model_download_test.py deleted file mode 100644 index a3e46a0a..00000000 --- a/lib/oeqa/runtime/miutils/tests/squeezenet_model_download_test.py +++ /dev/null | |||
| @@ -1,25 +0,0 @@ | |||
| 1 | class SqueezenetModelDownloadTest(object): | ||
| 2 | download_files = {'squeezenet1.1.prototxt': 'https://raw.githubusercontent.com/DeepScale/SqueezeNet/a47b6f13d30985279789d08053d37013d67d131b/SqueezeNet_v1.1/deploy.prototxt', | ||
| 3 | 'squeezenet1.1.caffemodel': 'https://github.com/DeepScale/SqueezeNet/raw/a47b6f13d30985279789d08053d37013d67d131b/SqueezeNet_v1.1/squeezenet_v1.1.caffemodel'} | ||
| 4 | |||
| 5 | def __init__(self, target, work_dir): | ||
| 6 | self.target = target | ||
| 7 | self.work_dir = work_dir | ||
| 8 | |||
| 9 | def setup(self): | ||
| 10 | self.target.run('mkdir -p %s' % self.work_dir) | ||
| 11 | |||
| 12 | def tear_down(self): | ||
| 13 | self.target.run('rm -rf %s' % self.work_dir) | ||
| 14 | |||
| 15 | def test_can_download_squeezenet_model(self, proxy_port): | ||
| 16 | return self.target.run('cd %s; wget %s -e https_proxy=%s' % | ||
| 17 | (self.work_dir, | ||
| 18 | self.download_files['squeezenet1.1.caffemodel'], | ||
| 19 | proxy_port)) | ||
| 20 | |||
| 21 | def test_can_download_squeezenet_prototxt(self, proxy_port): | ||
| 22 | return self.target.run('cd %s; wget %s -e https_proxy=%s' % | ||
| 23 | (self.work_dir, | ||
| 24 | self.download_files['squeezenet1.1.prototxt'], | ||
| 25 | proxy_port)) | ||
diff --git a/lib/oeqa/selftest/cases/secureboot.py b/lib/oeqa/selftest/cases/secureboot.py deleted file mode 100644 index 4c059e25..00000000 --- a/lib/oeqa/selftest/cases/secureboot.py +++ /dev/null | |||
| @@ -1,176 +0,0 @@ | |||
| 1 | #!/usr/bin/env python | ||
| 2 | # ex:ts=4:sw=4:sts=4:et | ||
| 3 | # -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- | ||
| 4 | # | ||
| 5 | # Copyright (c) 2017, Intel Corporation. | ||
| 6 | # All rights reserved. | ||
| 7 | # | ||
| 8 | # This program is free software; you can redistribute it and/or modify | ||
| 9 | # it under the terms of the GNU General Public License version 2 as | ||
| 10 | # published by the Free Software Foundation. | ||
| 11 | # | ||
| 12 | # This program is distributed in the hope that it will be useful, | ||
| 13 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 14 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 15 | # GNU General Public License for more details. | ||
| 16 | # | ||
| 17 | # You should have received a copy of the GNU General Public License along | ||
| 18 | # with this program; if not, write to the Free Software Foundation, Inc., | ||
| 19 | # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. | ||
| 20 | # | ||
| 21 | # AUTHORS | ||
| 22 | # Mikko Ylinen <mikko.ylinen@linux.intel.com> | ||
| 23 | # | ||
| 24 | # Based on meta/lib/oeqa/selftest/* and meta-refkit/lib/oeqa/selftest/* | ||
| 25 | |||
| 26 | """Test cases for secure boot with QEMU running OVMF.""" | ||
| 27 | |||
| 28 | import os | ||
| 29 | import unittest | ||
| 30 | import re | ||
| 31 | import glob | ||
| 32 | from shutil import rmtree, copy | ||
| 33 | |||
| 34 | from oeqa.core.decorator.depends import OETestDepends | ||
| 35 | from oeqa.selftest.case import OESelftestTestCase | ||
| 36 | from oeqa.utils.commands import runCmd, bitbake, get_bb_var, get_bb_vars, runqemu | ||
| 37 | |||
| 38 | class SecureBootTests(OESelftestTestCase): | ||
| 39 | """Secure Boot test class.""" | ||
| 40 | |||
| 41 | ovmf_keys_enrolled = False | ||
| 42 | ovmf_qemuparams = '' | ||
| 43 | ovmf_dir = '' | ||
| 44 | test_image_unsigned = 'secureboot-selftest-image-unsigned' | ||
| 45 | test_image_signed = 'secureboot-selftest-image-signed' | ||
| 46 | correct_key = 'refkit-db' | ||
| 47 | incorrect_key = 'incorrect' | ||
| 48 | |||
| 49 | @classmethod | ||
| 50 | def setUpLocal(self): | ||
| 51 | |||
| 52 | if not SecureBootTests.ovmf_keys_enrolled: | ||
| 53 | bitbake('ovmf ovmf-shell-image-enrollkeys', output_log=self.logger) | ||
| 54 | |||
| 55 | bb_vars = get_bb_vars(['TMPDIR', 'DEPLOY_DIR_IMAGE']) | ||
| 56 | |||
| 57 | SecureBootTests.ovmf_dir = os.path.join(bb_vars['TMPDIR'], 'oeselftest', 'secureboot', 'ovmf') | ||
| 58 | bb.utils.mkdirhier(SecureBootTests.ovmf_dir) | ||
| 59 | |||
| 60 | # Copy (all) OVMF in a temporary location | ||
| 61 | for src in glob.glob('%s/ovmf.*' % bb_vars['DEPLOY_DIR_IMAGE']): | ||
| 62 | copy(src, SecureBootTests.ovmf_dir) | ||
| 63 | |||
| 64 | SecureBootTests.ovmf_qemuparams = '-drive if=pflash,format=qcow2,file=%s/ovmf.secboot.qcow2' % SecureBootTests.ovmf_dir | ||
| 65 | |||
| 66 | cmd = ("runqemu " | ||
| 67 | "qemuparams='%s' " | ||
| 68 | "ovmf-shell-image-enrollkeys wic intel-corei7-64 " | ||
| 69 | "nographic slirp") % SecureBootTests.ovmf_qemuparams | ||
| 70 | print('Running "%s"' % cmd) | ||
| 71 | status = runCmd(cmd) | ||
| 72 | |||
| 73 | if not re.search('info: success', status.output, re.M): | ||
| 74 | self.fail('Failed to enroll keys. EFI shell log:\n%s' % status.output) | ||
| 75 | else: | ||
| 76 | # keys enrolled in ovmf.secboot.vars | ||
| 77 | SecureBootTests.ovmf_keys_enrolled = True | ||
| 78 | |||
| 79 | @classmethod | ||
| 80 | def tearDownLocal(self): | ||
| 81 | # Seems this is mandatory between the tests (a signed image is booted | ||
| 82 | # when running test_boot_unsigned_image after test_boot_signed_image). | ||
| 83 | # bitbake('-c clean %s' % test_image, output_log=self.logger) | ||
| 84 | # | ||
| 85 | # Whatever the problem was, it no longer seems to be necessary, so | ||
| 86 | # we can skip the time-consuming clean + full rebuild (5:04 min instead | ||
| 87 | # of 6:55min here). | ||
| 88 | pass | ||
| 89 | |||
| 90 | @classmethod | ||
| 91 | def tearDownClass(self): | ||
| 92 | bitbake('ovmf-shell-image-enrollkeys:do_cleanall', output_log=self.logger) | ||
| 93 | rmtree(self.ovmf_dir, ignore_errors=True) | ||
| 94 | |||
| 95 | def secureboot_with_image(self, boot_timeout=300, signing_key=None): | ||
| 96 | """Boot the image with UEFI SecureBoot enabled and see the result. """ | ||
| 97 | |||
| 98 | config = "" | ||
| 99 | |||
| 100 | if signing_key: | ||
| 101 | test_image = self.test_image_signed | ||
| 102 | config += 'SECURE_BOOT_SIGNING_KEY = "${THISDIR}/files/%s.key"\n' % signing_key | ||
| 103 | config += 'SECURE_BOOT_SIGNING_CERT = "${THISDIR}/files/%s.crt"\n' % signing_key | ||
| 104 | else: | ||
| 105 | test_image = self.test_image_unsigned | ||
| 106 | |||
| 107 | self.write_config(config) | ||
| 108 | bitbake(test_image, output_log=self.logger) | ||
| 109 | self.remove_config(config) | ||
| 110 | |||
| 111 | # Some of the cases depend on the timeout to expire. Allow overrides | ||
| 112 | # so that we don't have to wait 1000s which is the default. | ||
| 113 | overrides = { | ||
| 114 | 'TEST_QEMUBOOT_TIMEOUT': boot_timeout, | ||
| 115 | } | ||
| 116 | |||
| 117 | print('Booting %s' % test_image) | ||
| 118 | |||
| 119 | try: | ||
| 120 | with runqemu(test_image, ssh=False, | ||
| 121 | runqemuparams='nographic slirp', | ||
| 122 | qemuparams=self.ovmf_qemuparams, | ||
| 123 | overrides=overrides, | ||
| 124 | image_fstype='wic') as qemu: | ||
| 125 | |||
| 126 | cmd = 'uname -a' | ||
| 127 | |||
| 128 | status, output = qemu.run_serial(cmd) | ||
| 129 | |||
| 130 | self.assertTrue(status, 'Could not run \'uname -a\' (status=%s):\n%s' % (status, output)) | ||
| 131 | |||
| 132 | # if we got this far without a correctly signed image, something went wrong | ||
| 133 | if signing_key != self.correct_key: | ||
| 134 | self.fail('The image not give a Security violation when expected. Boot log:\n%s' % output) | ||
| 135 | |||
| 136 | |||
| 137 | except Exception: | ||
| 138 | |||
| 139 | # Currently runqemu() fails if 'login:' prompt is not seen and it's | ||
| 140 | # not possible to login as 'root'. Those conditions aren't met when | ||
| 141 | # booting to EFI shell (See [YOCTO #11438]). We catch the failure | ||
| 142 | # and parse the boot log to determine the success. Note: the | ||
| 143 | # timeout triggers verbose bb.error() but that's normal with some | ||
| 144 | # of the test cases. | ||
| 145 | |||
| 146 | workdir = get_bb_var('WORKDIR', test_image) | ||
| 147 | bootlog = "%s/testimage/qemu_boot_log" % workdir | ||
| 148 | |||
| 149 | with open(bootlog, "r") as log: | ||
| 150 | |||
| 151 | # This isn't right but all we can do at this point. The right | ||
| 152 | # approach would run commands in the EFI shell to determine | ||
| 153 | # the BIOS rejects unsigned and/or images signed with keys in | ||
| 154 | # dbx key store but that needs changes in oeqa framework. | ||
| 155 | |||
| 156 | output = log.read() | ||
| 157 | |||
| 158 | # PASS if we see a security violation on unsigned or incorrectly signed images, otherwise fail | ||
| 159 | if signing_key == self.correct_key: | ||
| 160 | self.fail('Correctly signed image failed to boot. Boot log:\n%s' % output) | ||
| 161 | elif not re.search('Security Violation', output): | ||
| 162 | self.fail('The image not give a Security violation when expected. Boot log:\n%s' % output) | ||
| 163 | |||
| 164 | def test_boot_unsigned_image(self): | ||
| 165 | """ Boot unsigned image with secureboot enabled in UEFI.""" | ||
| 166 | self.secureboot_with_image(boot_timeout=120, signing_key=None) | ||
| 167 | |||
| 168 | @OETestDepends(['secureboot.SecureBootTests.test_boot_unsigned_image']) | ||
| 169 | def test_boot_incorrectly_signed_image(self): | ||
| 170 | """ Boot (correctly) signed image with secureboot enabled in UEFI.""" | ||
| 171 | self.secureboot_with_image(boot_timeout=120, signing_key=self.incorrect_key) | ||
| 172 | |||
| 173 | @OETestDepends(['secureboot.SecureBootTests.test_boot_incorrectly_signed_image']) | ||
| 174 | def test_boot_correctly_signed_image(self): | ||
| 175 | """ Boot (correctly) signed image with secureboot enabled in UEFI.""" | ||
| 176 | self.secureboot_with_image(boot_timeout=150, signing_key=self.correct_key) | ||
