summaryrefslogtreecommitdiffstats
path: root/recipes-support/opencv/opencv/OpenCV_DNN_examples.patch
diff options
context:
space:
mode:
Diffstat (limited to 'recipes-support/opencv/opencv/OpenCV_DNN_examples.patch')
-rw-r--r--recipes-support/opencv/opencv/OpenCV_DNN_examples.patch148
1 files changed, 148 insertions, 0 deletions
diff --git a/recipes-support/opencv/opencv/OpenCV_DNN_examples.patch b/recipes-support/opencv/opencv/OpenCV_DNN_examples.patch
new file mode 100644
index 00000000..569d5b02
--- /dev/null
+++ b/recipes-support/opencv/opencv/OpenCV_DNN_examples.patch
@@ -0,0 +1,148 @@
1From 3c4daafb54f961e376104a461ca7ec114ff0331a Mon Sep 17 00:00:00 2001
2From: Ludek Slosarcik <ludek.slosarcik@nxp.com>
3Date: Fri, 14 Feb 2020 15:46:50 +0100
4Subject: [PATCH] opencv_dnn: added video device for 2 examples, and change text labels
5
6Signed-off-by: Ludek Slosarcik <ludek.slosarcik@nxp.com>
7
8Upstream-Status: Pending
9---
10 samples/cpp/logistic_regression.cpp | 2 +-
11 samples/dnn/classification.cpp | 7 ++++---
12 samples/dnn/object_detection.cpp | 10 +++++-----
13 samples/dnn/segmentation.cpp | 2 +-
14 samples/dnn/text_detection.cpp | 5 +++--
15 5 files changed, 14 insertions(+), 12 deletions(-)
16
17Index: git/samples/cpp/logistic_regression.cpp
18===================================================================
19--- git.orig/samples/cpp/logistic_regression.cpp
20+++ git/samples/cpp/logistic_regression.cpp
21@@ -83,7 +83,7 @@ static float calculateAccuracyPercent(co
22
23 int main()
24 {
25- const String filename = samples::findFile("data01.xml");
26+ const String filename = samples::findFile("../data/data01.xml");
27 cout << "**********************************************************************" << endl;
28 cout << filename
29 << " contains digits 0 and 1 of 20 samples each, collected on an Android device" << endl;
30Index: git/samples/dnn/classification.cpp
31===================================================================
32--- git.orig/samples/dnn/classification.cpp
33+++ git/samples/dnn/classification.cpp
34@@ -11,6 +11,7 @@ std::string keys =
35 "{ help h | | Print help message. }"
36 "{ @alias | | An alias name of model to extract preprocessing parameters from models.yml file. }"
37 "{ zoo | models.yml | An optional path to file with preprocessing parameters }"
38+ "{ device | 0 | camera device number. }"
39 "{ input i | | Path to input image or video file. Skip this argument to capture frames from a camera.}"
40 "{ framework f | | Optional name of an origin framework of the model. Detect it automatically if it does not set. }"
41 "{ classes | | Optional path to a text file with names of classes. }"
42@@ -94,7 +95,7 @@ int main(int argc, char** argv)
43 if (parser.has("input"))
44 cap.open(parser.get<String>("input"));
45 else
46- cap.open(0);
47+ cap.open(parser.get<int>("device"));
48 //! [Open a video file or an image file or a camera stream]
49
50 // Process frames.
51@@ -131,13 +132,13 @@ int main(int argc, char** argv)
52 double freq = getTickFrequency() / 1000;
53 double t = net.getPerfProfile(layersTimes) / freq;
54 std::string label = format("Inference time: %.2f ms", t);
55- putText(frame, label, Point(0, 15), FONT_HERSHEY_SIMPLEX, 0.5, Scalar(0, 255, 0));
56+ putText(frame, label, Point(0, 20), FONT_HERSHEY_SIMPLEX, 0.8, Scalar(0, 0, 255), 2, 8, false);
57
58 // Print predicted class.
59 label = format("%s: %.4f", (classes.empty() ? format("Class #%d", classId).c_str() :
60 classes[classId].c_str()),
61 confidence);
62- putText(frame, label, Point(0, 40), FONT_HERSHEY_SIMPLEX, 0.5, Scalar(0, 255, 0));
63+ putText(frame, label, Point(0, 45), FONT_HERSHEY_SIMPLEX, 0.8, Scalar(0, 0, 255), 2, 8, false);
64
65 imshow(kWinName, frame);
66 }
67Index: git/samples/dnn/object_detection.cpp
68===================================================================
69--- git.orig/samples/dnn/object_detection.cpp
70+++ git/samples/dnn/object_detection.cpp
71@@ -251,13 +251,13 @@ int main(int argc, char** argv)
72 if (predictionsQueue.counter > 1)
73 {
74 std::string label = format("Camera: %.2f FPS", framesQueue.getFPS());
75- putText(frame, label, Point(0, 15), FONT_HERSHEY_SIMPLEX, 0.5, Scalar(0, 255, 0));
76+ putText(frame, label, Point(0, 20), FONT_HERSHEY_SIMPLEX, 0.8, Scalar(0, 0, 255), 2, 8, false);
77
78 label = format("Network: %.2f FPS", predictionsQueue.getFPS());
79- putText(frame, label, Point(0, 30), FONT_HERSHEY_SIMPLEX, 0.5, Scalar(0, 255, 0));
80+ putText(frame, label, Point(0, 45), FONT_HERSHEY_SIMPLEX, 0.8, Scalar(0, 0, 255), 2, 8, false);
81
82 label = format("Skipped frames: %d", framesQueue.counter - predictionsQueue.counter);
83- putText(frame, label, Point(0, 45), FONT_HERSHEY_SIMPLEX, 0.5, Scalar(0, 255, 0));
84+ putText(frame, label, Point(0, 70), FONT_HERSHEY_SIMPLEX, 0.8, Scalar(0, 0, 255), 2, 8, false);
85 }
86 imshow(kWinName, frame);
87 }
88@@ -293,7 +293,7 @@ int main(int argc, char** argv)
89 double freq = getTickFrequency() / 1000;
90 double t = net.getPerfProfile(layersTimes) / freq;
91 std::string label = format("Inference time: %.2f ms", t);
92- putText(frame, label, Point(0, 15), FONT_HERSHEY_SIMPLEX, 0.5, Scalar(0, 255, 0));
93+ putText(frame, label, Point(0, 20), FONT_HERSHEY_SIMPLEX, 0.8, Scalar(0, 0, 255), 2, 8, false);
94
95 imshow(kWinName, frame);
96 }
97@@ -462,7 +462,7 @@ void drawPred(int classId, float conf, i
98 top = max(top, labelSize.height);
99 rectangle(frame, Point(left, top - labelSize.height),
100 Point(left + labelSize.width, top + baseLine), Scalar::all(255), FILLED);
101- putText(frame, label, Point(left, top), FONT_HERSHEY_SIMPLEX, 0.5, Scalar());
102+ putText(frame, label, Point(left, top), FONT_HERSHEY_SIMPLEX, 0.8, Scalar());
103 }
104
105 void callback(int pos, void*)
106Index: git/samples/dnn/segmentation.cpp
107===================================================================
108--- git.orig/samples/dnn/segmentation.cpp
109+++ git/samples/dnn/segmentation.cpp
110@@ -157,7 +157,7 @@ int main(int argc, char** argv)
111 double freq = getTickFrequency() / 1000;
112 double t = net.getPerfProfile(layersTimes) / freq;
113 std::string label = format("Inference time: %.2f ms", t);
114- putText(frame, label, Point(0, 15), FONT_HERSHEY_SIMPLEX, 0.5, Scalar(0, 255, 0));
115+ putText(frame, label, Point(0, 20), FONT_HERSHEY_SIMPLEX, 0.8, Scalar(0, 0, 255), 2, 8, false);
116
117 imshow(kWinName, frame);
118 if (!classes.empty())
119Index: git/samples/dnn/text_detection.cpp
120===================================================================
121--- git.orig/samples/dnn/text_detection.cpp
122+++ git/samples/dnn/text_detection.cpp
123@@ -25,6 +25,7 @@ using namespace cv::dnn;
124 const char* keys =
125 "{ help h | | Print help message. }"
126 "{ input i | | Path to input image or video file. Skip this argument to capture frames from a camera.}"
127+ "{ device | 0 | camera device number. }"
128 "{ model m | | Path to a binary .pb file contains trained detector network.}"
129 "{ ocr | | Path to a binary .pb or .onnx file contains trained recognition network.}"
130 "{ width | 320 | Preprocess input image by resizing to a specific width. It should be multiple by 32. }"
131@@ -75,7 +76,7 @@ int main(int argc, char** argv)
132
133 // Open a video file or an image file or a camera stream.
134 VideoCapture cap;
135- bool openSuccess = parser.has("input") ? cap.open(parser.get<String>("input")) : cap.open(0);
136+ bool openSuccess = parser.has("input") ? cap.open(parser.get<String>("input")) : cap.open(parser.get<int>("device"));
137 CV_Assert(openSuccess);
138
139 static const std::string kWinName = "EAST: An Efficient and Accurate Scene Text Detector";
140@@ -156,7 +157,7 @@ int main(int argc, char** argv)
141
142 // Put efficiency information.
143 std::string label = format("Inference time: %.2f ms", tickMeter.getTimeMilli());
144- putText(frame, label, Point(0, 15), FONT_HERSHEY_SIMPLEX, 0.5, Scalar(0, 255, 0));
145+ putText(frame, label, Point(0, 20), FONT_HERSHEY_SIMPLEX, 0.8, Scalar(0, 0, 255), 2, 8, false);
146
147 imshow(kWinName, frame);
148