summaryrefslogtreecommitdiffstats
path: root/lib/oeqa
diff options
context:
space:
mode:
authorYeoh Ee Peng <ee.peng.yeoh@intel.com>2019-12-04 11:17:42 +0800
committerAnuj Mittal <anuj.mittal@intel.com>2019-12-10 13:31:24 +0800
commit1e514838dfad1d2339f896f850425bc33621d297 (patch)
treef93a016de883f4af953592e7c635cae6e6f56308 /lib/oeqa
parentf39ad91524768c729cd68ce367dacf9b4dfc4cf7 (diff)
downloadmeta-intel-1e514838dfad1d2339f896f850425bc33621d297.tar.gz
oeqa/runtime/cases/dldt: Enable inference engine and model optimizer tests
Add sanity tests for inference engine: - test inference engine c/cpp shared library - test inference engine python api - test inference engine cpu, gpu, myriad plugin Add sanity tests for model optimizer - test model optmizer can generate ir Licenses: - classification_sample.py license: Apache 2.0 source: <install_root>/deployment_tools/inference_engine/samples/* Signed-off-by: Yeoh Ee Peng <ee.peng.yeoh@intel.com> Signed-off-by: Anuj Mittal <anuj.mittal@intel.com>
Diffstat (limited to 'lib/oeqa')
-rw-r--r--lib/oeqa/runtime/cases/dldt_inference_engine.py101
-rw-r--r--lib/oeqa/runtime/cases/dldt_model_optimizer.py38
-rw-r--r--lib/oeqa/runtime/files/dldt-inference-engine/classification_sample.py135
-rw-r--r--lib/oeqa/runtime/miutils/dldtutils.py3
-rw-r--r--lib/oeqa/runtime/miutils/targets/oeqatarget.py11
-rw-r--r--lib/oeqa/runtime/miutils/tests/dldt_inference_engine_test.py48
-rw-r--r--lib/oeqa/runtime/miutils/tests/dldt_model_optimizer_test.py23
-rw-r--r--lib/oeqa/runtime/miutils/tests/squeezenet_model_download_test.py25
8 files changed, 384 insertions, 0 deletions
diff --git a/lib/oeqa/runtime/cases/dldt_inference_engine.py b/lib/oeqa/runtime/cases/dldt_inference_engine.py
new file mode 100644
index 00000000..2e969975
--- /dev/null
+++ b/lib/oeqa/runtime/cases/dldt_inference_engine.py
@@ -0,0 +1,101 @@
1from oeqa.runtime.case import OERuntimeTestCase
2from oeqa.runtime.decorator.package import OEHasPackage
3from oeqa.core.decorator.depends import OETestDepends
4from oeqa.runtime.miutils.targets.oeqatarget import OEQATarget
5from oeqa.runtime.miutils.tests.squeezenet_model_download_test import SqueezenetModelDownloadTest
6from oeqa.runtime.miutils.tests.dldt_model_optimizer_test import DldtModelOptimizerTest
7from oeqa.runtime.miutils.tests.dldt_inference_engine_test import DldtInferenceEngineTest
8from oeqa.runtime.miutils.dldtutils import get_testdata_config
9
10class DldtInferenceEngine(OERuntimeTestCase):
11
12 @classmethod
13 def setUpClass(cls):
14 cls.sqn_download = SqueezenetModelDownloadTest(OEQATarget(cls.tc.target), '/tmp/ie/md')
15 cls.sqn_download.setup()
16 cls.dldt_mo = DldtModelOptimizerTest(OEQATarget(cls.tc.target), '/tmp/ie/ir')
17 cls.dldt_mo.setup()
18 cls.dldt_ie = DldtInferenceEngineTest(OEQATarget(cls.tc.target), '/tmp/ie/inputs')
19 cls.dldt_ie.setup()
20 cls.ir_files_dir = cls.dldt_mo.work_dir
21
22 @classmethod
23 def tearDownClass(cls):
24 cls.dldt_ie.tear_down()
25 cls.dldt_mo.tear_down()
26 cls.sqn_download.tear_down()
27
28 @OEHasPackage(['dldt-model-optimizer'])
29 @OEHasPackage(['wget'])
30 def test_dldt_ie_can_create_ir_and_download_input(self):
31 proxy_port = get_testdata_config(self.tc.td, 'DLDT_PIP_PROXY')
32 if not proxy_port:
33 self.skipTest('Need to configure bitbake configuration (DLDT_PIP_PROXY="proxy.server:port").')
34 (status, output) = self.sqn_download.test_can_download_squeezenet_model(proxy_port)
35 self.assertEqual(status, 0, msg='status and output: %s and %s' % (status, output))
36 (status, output) = self.sqn_download.test_can_download_squeezenet_prototxt(proxy_port)
37 self.assertEqual(status, 0, msg='status and output: %s and %s' % (status, output))
38
39 mo_exe_dir = get_testdata_config(self.tc.td, 'DLDT_MO_EXE_DIR')
40 if not mo_exe_dir:
41 self.skipTest('Need to configure bitbake configuration (DLDT_MO_EXE_DIR="directory_to_mo.py").')
42 mo_files_dir = self.sqn_download.work_dir
43 (status, output) = self.dldt_mo.test_dldt_mo_can_create_ir(mo_exe_dir, mo_files_dir)
44 self.assertEqual(status, 0, msg='status and output: %s and %s' % (status, output))
45
46 (status, output) = self.dldt_ie.test_can_download_input_file(proxy_port)
47 self.assertEqual(status, 0, msg='status and output: %s and %s' % (status, output))
48
49 @OETestDepends(['dldt_inference_engine.DldtInferenceEngine.test_dldt_ie_can_create_ir_and_download_input'])
50 @OEHasPackage(['dldt-inference-engine'])
51 @OEHasPackage(['dldt-inference-engine-samples'])
52 def test_dldt_ie_classification_with_cpu(self):
53 (status, output) = self.dldt_ie.test_dldt_ie_classification_with_device('CPU', self.ir_files_dir)
54 self.assertEqual(status, 0, msg='status and output: %s and %s' % (status, output))
55
56 @OETestDepends(['dldt_inference_engine.DldtInferenceEngine.test_dldt_ie_can_create_ir_and_download_input'])
57 @OEHasPackage(['dldt-inference-engine'])
58 @OEHasPackage(['dldt-inference-engine-samples'])
59 @OEHasPackage(['intel-compute-runtime'])
60 @OEHasPackage(['opencl-icd-loader'])
61 def test_dldt_ie_classification_with_gpu(self):
62 (status, output) = self.dldt_ie.test_dldt_ie_classification_with_device('GPU', self.ir_files_dir)
63 self.assertEqual(status, 0, msg='status and output: %s and %s' % (status, output))
64
65 @OETestDepends(['dldt_inference_engine.DldtInferenceEngine.test_dldt_ie_can_create_ir_and_download_input'])
66 @OEHasPackage(['dldt-inference-engine'])
67 @OEHasPackage(['dldt-inference-engine-samples'])
68 @OEHasPackage(['dldt-inference-engine-vpu-firmware'])
69 def test_dldt_ie_classification_with_myriad(self):
70 (status, output) = self.dldt_ie.test_dldt_ie_classification_with_device('MYRIAD', self.ir_files_dir)
71 self.assertEqual(status, 0, msg='status and output: %s and %s' % (status, output))
72
73 @OETestDepends(['dldt_inference_engine.DldtInferenceEngine.test_dldt_ie_can_create_ir_and_download_input'])
74 @OEHasPackage(['dldt-inference-engine'])
75 @OEHasPackage(['dldt-inference-engine-python3'])
76 @OEHasPackage(['python3-opencv'])
77 @OEHasPackage(['python3-numpy'])
78 def test_dldt_ie_classification_python_api_with_cpu(self):
79 (status, output) = self.dldt_ie.test_dldt_ie_classification_python_api_with_device('CPU', self.ir_files_dir, 'libcpu_extension.so')
80 self.assertEqual(status, 0, msg='status and output: %s and %s' % (status, output))
81
82 @OETestDepends(['dldt_inference_engine.DldtInferenceEngine.test_dldt_ie_can_create_ir_and_download_input'])
83 @OEHasPackage(['dldt-inference-engine'])
84 @OEHasPackage(['dldt-inference-engine-python3'])
85 @OEHasPackage(['intel-compute-runtime'])
86 @OEHasPackage(['opencl-icd-loader'])
87 @OEHasPackage(['python3-opencv'])
88 @OEHasPackage(['python3-numpy'])
89 def test_dldt_ie_classification_python_api_with_gpu(self):
90 (status, output) = self.dldt_ie.test_dldt_ie_classification_python_api_with_device('GPU', self.ir_files_dir)
91 self.assertEqual(status, 0, msg='status and output: %s and %s' % (status, output))
92
93 @OETestDepends(['dldt_inference_engine.DldtInferenceEngine.test_dldt_ie_can_create_ir_and_download_input'])
94 @OEHasPackage(['dldt-inference-engine'])
95 @OEHasPackage(['dldt-inference-engine-python3'])
96 @OEHasPackage(['dldt-inference-engine-vpu-firmware'])
97 @OEHasPackage(['python3-opencv'])
98 @OEHasPackage(['python3-numpy'])
99 def test_dldt_ie_classification_python_api_with_myriad(self):
100 (status, output) = self.dldt_ie.test_dldt_ie_classification_python_api_with_device('MYRIAD', self.ir_files_dir)
101 self.assertEqual(status, 0, msg='status and output: %s and %s' % (status, output))
diff --git a/lib/oeqa/runtime/cases/dldt_model_optimizer.py b/lib/oeqa/runtime/cases/dldt_model_optimizer.py
new file mode 100644
index 00000000..736ea661
--- /dev/null
+++ b/lib/oeqa/runtime/cases/dldt_model_optimizer.py
@@ -0,0 +1,38 @@
1from oeqa.runtime.case import OERuntimeTestCase
2from oeqa.runtime.decorator.package import OEHasPackage
3from oeqa.runtime.miutils.targets.oeqatarget import OEQATarget
4from oeqa.runtime.miutils.tests.squeezenet_model_download_test import SqueezenetModelDownloadTest
5from oeqa.runtime.miutils.tests.dldt_model_optimizer_test import DldtModelOptimizerTest
6from oeqa.runtime.miutils.dldtutils import get_testdata_config
7
8class DldtModelOptimizer(OERuntimeTestCase):
9
10 @classmethod
11 def setUpClass(cls):
12 cls.sqn_download = SqueezenetModelDownloadTest(OEQATarget(cls.tc.target), '/tmp/mo/md')
13 cls.sqn_download.setup()
14 cls.dldt_mo = DldtModelOptimizerTest(OEQATarget(cls.tc.target), '/tmp/mo/ir')
15 cls.dldt_mo.setup()
16
17 @classmethod
18 def tearDownClass(cls):
19 cls.dldt_mo.tear_down()
20 cls.sqn_download.tear_down()
21
22 @OEHasPackage(['dldt-model-optimizer'])
23 @OEHasPackage(['wget'])
24 def test_dldt_mo_can_create_ir(self):
25 proxy_port = get_testdata_config(self.tc.td, 'DLDT_PIP_PROXY')
26 if not proxy_port:
27 self.skipTest('Need to configure bitbake configuration (DLDT_PIP_PROXY="proxy.server:port").')
28 (status, output) = self.sqn_download.test_can_download_squeezenet_model(proxy_port)
29 self.assertEqual(status, 0, msg='status and output: %s and %s' % (status, output))
30 (status, output) = self.sqn_download.test_can_download_squeezenet_prototxt(proxy_port)
31 self.assertEqual(status, 0, msg='status and output: %s and %s' % (status, output))
32
33 mo_exe_dir = get_testdata_config(self.tc.td, 'DLDT_MO_EXE_DIR')
34 if not mo_exe_dir:
35 self.skipTest('Need to configure bitbake configuration (DLDT_MO_EXE_DIR="directory_to_mo.py").')
36 mo_files_dir = self.sqn_download.work_dir
37 (status, output) = self.dldt_mo.test_dldt_mo_can_create_ir(mo_exe_dir, mo_files_dir)
38 self.assertEqual(status, 0, msg='status and output: %s and %s' % (status, output))
diff --git a/lib/oeqa/runtime/files/dldt-inference-engine/classification_sample.py b/lib/oeqa/runtime/files/dldt-inference-engine/classification_sample.py
new file mode 100644
index 00000000..1906e9fe
--- /dev/null
+++ b/lib/oeqa/runtime/files/dldt-inference-engine/classification_sample.py
@@ -0,0 +1,135 @@
1#!/usr/bin/env python3
2"""
3 Copyright (C) 2018-2019 Intel Corporation
4
5 Licensed under the Apache License, Version 2.0 (the "License");
6 you may not use this file except in compliance with the License.
7 You may obtain a copy of the License at
8
9 http://www.apache.org/licenses/LICENSE-2.0
10
11 Unless required by applicable law or agreed to in writing, software
12 distributed under the License is distributed on an "AS IS" BASIS,
13 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 See the License for the specific language governing permissions and
15 limitations under the License.
16"""
17from __future__ import print_function
18import sys
19import os
20from argparse import ArgumentParser, SUPPRESS
21import cv2
22import numpy as np
23import logging as log
24from time import time
25from openvino.inference_engine import IENetwork, IECore
26
27
28def build_argparser():
29 parser = ArgumentParser(add_help=False)
30 args = parser.add_argument_group('Options')
31 args.add_argument('-h', '--help', action='help', default=SUPPRESS, help='Show this help message and exit.')
32 args.add_argument("-m", "--model", help="Required. Path to an .xml file with a trained model.", required=True,
33 type=str)
34 args.add_argument("-i", "--input", help="Required. Path to a folder with images or path to an image files",
35 required=True,
36 type=str, nargs="+")
37 args.add_argument("-l", "--cpu_extension",
38 help="Optional. Required for CPU custom layers. "
39 "MKLDNN (CPU)-targeted custom layers. Absolute path to a shared library with the"
40 " kernels implementations.", type=str, default=None)
41 args.add_argument("-d", "--device",
42 help="Optional. Specify the target device to infer on; CPU, GPU, FPGA, HDDL, MYRIAD or HETERO: is "
43 "acceptable. The sample will look for a suitable plugin for device specified. Default "
44 "value is CPU",
45 default="CPU", type=str)
46 args.add_argument("--labels", help="Optional. Path to a labels mapping file", default=None, type=str)
47 args.add_argument("-nt", "--number_top", help="Optional. Number of top results", default=10, type=int)
48
49 return parser
50
51
52def main():
53 log.basicConfig(format="[ %(levelname)s ] %(message)s", level=log.INFO, stream=sys.stdout)
54 args = build_argparser().parse_args()
55 model_xml = args.model
56 model_bin = os.path.splitext(model_xml)[0] + ".bin"
57
58 # Plugin initialization for specified device and load extensions library if specified
59 log.info("Creating Inference Engine")
60 ie = IECore()
61 if args.cpu_extension and 'CPU' in args.device:
62 ie.add_extension(args.cpu_extension, "CPU")
63 # Read IR
64 log.info("Loading network files:\n\t{}\n\t{}".format(model_xml, model_bin))
65 net = IENetwork(model=model_xml, weights=model_bin)
66
67 if "CPU" in args.device:
68 supported_layers = ie.query_network(net, "CPU")
69 not_supported_layers = [l for l in net.layers.keys() if l not in supported_layers]
70 if len(not_supported_layers) != 0:
71 log.error("Following layers are not supported by the plugin for specified device {}:\n {}".
72 format(args.device, ', '.join(not_supported_layers)))
73 log.error("Please try to specify cpu extensions library path in sample's command line parameters using -l "
74 "or --cpu_extension command line argument")
75 sys.exit(1)
76
77 assert len(net.inputs.keys()) == 1, "Sample supports only single input topologies"
78 assert len(net.outputs) == 1, "Sample supports only single output topologies"
79
80 log.info("Preparing input blobs")
81 input_blob = next(iter(net.inputs))
82 out_blob = next(iter(net.outputs))
83 net.batch_size = len(args.input)
84
85 # Read and pre-process input images
86 n, c, h, w = net.inputs[input_blob].shape
87 images = np.ndarray(shape=(n, c, h, w))
88 for i in range(n):
89 image = cv2.imread(args.input[i])
90 if image.shape[:-1] != (h, w):
91 log.warning("Image {} is resized from {} to {}".format(args.input[i], image.shape[:-1], (h, w)))
92 image = cv2.resize(image, (w, h))
93 image = image.transpose((2, 0, 1)) # Change data layout from HWC to CHW
94 images[i] = image
95 log.info("Batch size is {}".format(n))
96
97 # Loading model to the plugin
98 log.info("Loading model to the plugin")
99 exec_net = ie.load_network(network=net, device_name=args.device)
100
101 # Start sync inference
102 log.info("Starting inference in synchronous mode")
103 res = exec_net.infer(inputs={input_blob: images})
104
105 # Processing output blob
106 log.info("Processing output blob")
107 res = res[out_blob]
108 log.info("Top {} results: ".format(args.number_top))
109 if args.labels:
110 with open(args.labels, 'r') as f:
111 labels_map = [x.split(sep=' ', maxsplit=1)[-1].strip() for x in f]
112 else:
113 labels_map = None
114 classid_str = "classid"
115 probability_str = "probability"
116 for i, probs in enumerate(res):
117 probs = np.squeeze(probs)
118 top_ind = np.argsort(probs)[-args.number_top:][::-1]
119 print("Image {}\n".format(args.input[i]))
120 print(classid_str, probability_str)
121 print("{} {}".format('-' * len(classid_str), '-' * len(probability_str)))
122 for id in top_ind:
123 det_label = labels_map[id] if labels_map else "{}".format(id)
124 label_length = len(det_label)
125 space_num_before = (len(classid_str) - label_length) // 2
126 space_num_after = len(classid_str) - (space_num_before + label_length) + 2
127 space_num_before_prob = (len(probability_str) - len(str(probs[id]))) // 2
128 print("{}{}{}{}{:.7f}".format(' ' * space_num_before, det_label,
129 ' ' * space_num_after, ' ' * space_num_before_prob,
130 probs[id]))
131 print("\n")
132 log.info("This sample is an API example, for any performance measurements please use the dedicated benchmark_app tool\n")
133
134if __name__ == '__main__':
135 sys.exit(main() or 0)
diff --git a/lib/oeqa/runtime/miutils/dldtutils.py b/lib/oeqa/runtime/miutils/dldtutils.py
new file mode 100644
index 00000000..45bf2e12
--- /dev/null
+++ b/lib/oeqa/runtime/miutils/dldtutils.py
@@ -0,0 +1,3 @@
1
2def get_testdata_config(testdata, config):
3 return testdata.get(config)
diff --git a/lib/oeqa/runtime/miutils/targets/oeqatarget.py b/lib/oeqa/runtime/miutils/targets/oeqatarget.py
new file mode 100644
index 00000000..a9f7f1b4
--- /dev/null
+++ b/lib/oeqa/runtime/miutils/targets/oeqatarget.py
@@ -0,0 +1,11 @@
1
2class OEQATarget(object):
3
4 def __init__(self, target):
5 self.target = target
6
7 def run(self, cmd):
8 return self.target.run(cmd)
9
10 def copy_to(self, source, destination_dir):
11 self.target.copyTo(source, destination_dir)
diff --git a/lib/oeqa/runtime/miutils/tests/dldt_inference_engine_test.py b/lib/oeqa/runtime/miutils/tests/dldt_inference_engine_test.py
new file mode 100644
index 00000000..a44f9027
--- /dev/null
+++ b/lib/oeqa/runtime/miutils/tests/dldt_inference_engine_test.py
@@ -0,0 +1,48 @@
1import os
2script_path = os.path.dirname(os.path.realpath(__file__))
3files_path = os.path.join(script_path, '../../files/')
4
5class DldtInferenceEngineTest(object):
6 ie_input_files = {'ie_python_sample': 'classification_sample.py',
7 'input': 'chicky_512.png',
8 'input_download': 'https://raw.githubusercontent.com/opencv/opencv/master/samples/data/chicky_512.png',
9 'model': 'squeezenet_v1.1.xml'}
10
11 def __init__(self, target, work_dir):
12 self.target = target
13 self.work_dir = work_dir
14
15 def setup(self):
16 self.target.run('mkdir -p %s' % self.work_dir)
17 self.target.copy_to(os.path.join(files_path, 'dldt-inference-engine', self.ie_input_files['ie_python_sample']),
18 self.work_dir)
19
20 def tear_down(self):
21 self.target.run('rm -rf %s' % self.work_dir)
22
23 def test_can_download_input_file(self, proxy_port):
24 return self.target.run('cd %s; wget %s -e https_proxy=%s' %
25 (self.work_dir,
26 self.ie_input_files['input_download'],
27 proxy_port))
28
29 def test_dldt_ie_classification_with_device(self, device, ir_files_dir):
30 return self.target.run('classification_sample_async -d %s -i %s -m %s' %
31 (device,
32 os.path.join(self.work_dir, self.ie_input_files['input']),
33 os.path.join(ir_files_dir, self.ie_input_files['model'])))
34
35 def test_dldt_ie_classification_python_api_with_device(self, device, ir_files_dir, extension=''):
36 if extension:
37 return self.target.run('python3 %s -d %s -i %s -m %s -l %s' %
38 (os.path.join(self.work_dir, self.ie_input_files['ie_python_sample']),
39 device,
40 os.path.join(self.work_dir, self.ie_input_files['input']),
41 os.path.join(ir_files_dir, self.ie_input_files['model']),
42 extension))
43 else:
44 return self.target.run('python3 %s -d %s -i %s -m %s' %
45 (os.path.join(self.work_dir, self.ie_input_files['ie_python_sample']),
46 device,
47 os.path.join(self.work_dir, self.ie_input_files['input']),
48 os.path.join(ir_files_dir, self.ie_input_files['model'])))
diff --git a/lib/oeqa/runtime/miutils/tests/dldt_model_optimizer_test.py b/lib/oeqa/runtime/miutils/tests/dldt_model_optimizer_test.py
new file mode 100644
index 00000000..7d3db15b
--- /dev/null
+++ b/lib/oeqa/runtime/miutils/tests/dldt_model_optimizer_test.py
@@ -0,0 +1,23 @@
1import os
2
3class DldtModelOptimizerTest(object):
4 mo_input_files = {'model': 'squeezenet_v1.1.caffemodel',
5 'prototxt': 'deploy.prototxt'}
6 mo_exe = 'mo.py'
7
8 def __init__(self, target, work_dir):
9 self.target = target
10 self.work_dir = work_dir
11
12 def setup(self):
13 self.target.run('mkdir -p %s' % self.work_dir)
14
15 def tear_down(self):
16 self.target.run('rm -rf %s' % self.work_dir)
17
18 def test_dldt_mo_can_create_ir(self, mo_exe_dir, mo_files_dir):
19 return self.target.run('python3 %s --input_model %s --input_proto %s --output_dir %s --data_type FP16' %
20 (os.path.join(mo_exe_dir, self.mo_exe),
21 os.path.join(mo_files_dir, self.mo_input_files['model']),
22 os.path.join(mo_files_dir, self.mo_input_files['prototxt']),
23 self.work_dir))
diff --git a/lib/oeqa/runtime/miutils/tests/squeezenet_model_download_test.py b/lib/oeqa/runtime/miutils/tests/squeezenet_model_download_test.py
new file mode 100644
index 00000000..a3e46a0a
--- /dev/null
+++ b/lib/oeqa/runtime/miutils/tests/squeezenet_model_download_test.py
@@ -0,0 +1,25 @@
1class SqueezenetModelDownloadTest(object):
2 download_files = {'squeezenet1.1.prototxt': 'https://raw.githubusercontent.com/DeepScale/SqueezeNet/a47b6f13d30985279789d08053d37013d67d131b/SqueezeNet_v1.1/deploy.prototxt',
3 'squeezenet1.1.caffemodel': 'https://github.com/DeepScale/SqueezeNet/raw/a47b6f13d30985279789d08053d37013d67d131b/SqueezeNet_v1.1/squeezenet_v1.1.caffemodel'}
4
5 def __init__(self, target, work_dir):
6 self.target = target
7 self.work_dir = work_dir
8
9 def setup(self):
10 self.target.run('mkdir -p %s' % self.work_dir)
11
12 def tear_down(self):
13 self.target.run('rm -rf %s' % self.work_dir)
14
15 def test_can_download_squeezenet_model(self, proxy_port):
16 return self.target.run('cd %s; wget %s -e https_proxy=%s' %
17 (self.work_dir,
18 self.download_files['squeezenet1.1.caffemodel'],
19 proxy_port))
20
21 def test_can_download_squeezenet_prototxt(self, proxy_port):
22 return self.target.run('cd %s; wget %s -e https_proxy=%s' %
23 (self.work_dir,
24 self.download_files['squeezenet1.1.prototxt'],
25 proxy_port))