diff options
author | Michael Wood <michael.g.wood@intel.com> | 2016-05-19 13:59:32 +0100 |
---|---|---|
committer | Richard Purdie <richard.purdie@linuxfoundation.org> | 2016-05-20 10:09:03 +0100 |
commit | 0664a89ef02e2ec98892e4d9227fca3d4fadd40c (patch) | |
tree | fc759964c3913a6cd758b500d9b671b12c3d9d20 /bitbake/lib/toaster/tests/builds | |
parent | 085688c8ed2a95ff47ca10275726ae7cbbab0902 (diff) | |
download | poky-0664a89ef02e2ec98892e4d9227fca3d4fadd40c.tar.gz |
bitbake: toaster: tests build Add a test for a build of core-image-minimal
This is a port of the oe self test to the django test framework from
oe-core meta/lib/oeqa/selftest/_toaster.py
(Bitbake rev: 94418b1b2f9466d35461acdb982fd6b130b2331c)
Signed-off-by: Michael Wood <michael.g.wood@intel.com>
Signed-off-by: Elliot Smith <elliot.smith@intel.com>
Signed-off-by: Richard Purdie <richard.purdie@linuxfoundation.org>
Diffstat (limited to 'bitbake/lib/toaster/tests/builds')
-rw-r--r-- | bitbake/lib/toaster/tests/builds/test_core_image_min.py | 389 |
1 files changed, 389 insertions, 0 deletions
diff --git a/bitbake/lib/toaster/tests/builds/test_core_image_min.py b/bitbake/lib/toaster/tests/builds/test_core_image_min.py new file mode 100644 index 0000000000..87d29c72aa --- /dev/null +++ b/bitbake/lib/toaster/tests/builds/test_core_image_min.py | |||
@@ -0,0 +1,389 @@ | |||
1 | #! /usr/bin/env python | ||
2 | # ex:ts=4:sw=4:sts=4:et | ||
3 | # -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- | ||
4 | # | ||
5 | # BitBake Toaster Implementation | ||
6 | # | ||
7 | # Copyright (C) 2016 Intel Corporation | ||
8 | # | ||
9 | # This program is free software; you can redistribute it and/or modify | ||
10 | # it under the terms of the GNU General Public License version 2 as | ||
11 | # published by the Free Software Foundation. | ||
12 | # | ||
13 | # This program is distributed in the hope that it will be useful, | ||
14 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
15 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
16 | # GNU General Public License for more details. | ||
17 | # | ||
18 | # You should have received a copy of the GNU General Public License along | ||
19 | # with this program; if not, write to the Free Software Foundation, Inc., | ||
20 | # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. | ||
21 | |||
22 | # Tests were part of openembedded-core oe selftest Authored by: Lucian Musat | ||
23 | # Ionut Chisanovici, Paul Eggleton and Cristian Iorga | ||
24 | |||
25 | import os | ||
26 | |||
27 | from django.db.models import Q | ||
28 | |||
29 | from orm.models import Target_Image_File, Target_Installed_Package, Task | ||
30 | from orm.models import Package_Dependency, Recipe_Dependency, Build | ||
31 | from orm.models import Task_Dependency, Package, Target, Recipe | ||
32 | from orm.models import CustomImagePackage | ||
33 | |||
34 | from buildtest import BuildTest | ||
35 | |||
36 | |||
37 | class BuildCoreImageMinimal(BuildTest): | ||
38 | """Build core-image-minimal and test the results""" | ||
39 | |||
40 | def setUp(self): | ||
41 | self.build("core-image-minimal") | ||
42 | |||
43 | # Check if build name is unique - tc_id=795 | ||
44 | def test_Build_Unique_Name(self): | ||
45 | all_builds = Build.objects.all().count() | ||
46 | distinct_builds = Build.objects.values('id').distinct().count() | ||
47 | self.assertEqual(distinct_builds, | ||
48 | all_builds, | ||
49 | msg='Build name is not unique') | ||
50 | |||
51 | # Check if build cooker log path is unique - tc_id=819 | ||
52 | def test_Build_Unique_Cooker_Log_Path(self): | ||
53 | distinct_path = Build.objects.values( | ||
54 | 'cooker_log_path').distinct().count() | ||
55 | total_builds = Build.objects.values('id').count() | ||
56 | self.assertEqual(distinct_path, | ||
57 | total_builds, | ||
58 | msg='Build cooker log path is not unique') | ||
59 | |||
60 | # Check if task order is unique for one build - tc=824 | ||
61 | def test_Task_Unique_Order(self): | ||
62 | builds = Build.objects.values('id') | ||
63 | cnt_err = [] | ||
64 | |||
65 | for build in builds: | ||
66 | total_task_order = Task.objects.filter( | ||
67 | build=build['id']).values('order').count() | ||
68 | distinct_task_order = Task.objects.filter( | ||
69 | build=build['id']).values('order').distinct().count() | ||
70 | |||
71 | if (total_task_order != distinct_task_order): | ||
72 | cnt_err.append(build['id']) | ||
73 | |||
74 | self.assertEqual(len(cnt_err), | ||
75 | 0, | ||
76 | msg='Errors for build id: %s' % cnt_err) | ||
77 | |||
78 | # Check task order sequence for one build - tc=825 | ||
79 | def test_Task_Order_Sequence(self): | ||
80 | builds = builds = Build.objects.values('id') | ||
81 | cnt_err = [] | ||
82 | for build in builds: | ||
83 | tasks = Task.objects.filter( | ||
84 | Q(build=build['id']), | ||
85 | ~Q(order=None), | ||
86 | ~Q(task_name__contains='_setscene') | ||
87 | ).values('id', 'order').order_by("order") | ||
88 | |||
89 | cnt_tasks = 0 | ||
90 | for task in tasks: | ||
91 | cnt_tasks += 1 | ||
92 | if (task['order'] != cnt_tasks): | ||
93 | cnt_err.append(task['id']) | ||
94 | self.assertEqual( | ||
95 | len(cnt_err), 0, msg='Errors for task id: %s' % cnt_err) | ||
96 | |||
97 | # Check if disk_io matches the difference between EndTimeIO and | ||
98 | # StartTimeIO in build stats - tc=828 | ||
99 | # def test_Task_Disk_IO_TC828(self): | ||
100 | |||
101 | # Check if outcome = 2 (SSTATE) then sstate_result must be 3 (RESTORED) - | ||
102 | # tc=832 | ||
103 | def test_Task_If_Outcome_2_Sstate_Result_Must_Be_3(self): | ||
104 | tasks = Task.objects.filter(outcome=2).values('id', 'sstate_result') | ||
105 | cnt_err = [] | ||
106 | for task in tasks: | ||
107 | if (task['sstate_result'] != 3): | ||
108 | cnt_err.append(task['id']) | ||
109 | |||
110 | self.assertEqual(len(cnt_err), | ||
111 | 0, | ||
112 | msg='Errors for task id: %s' % cnt_err) | ||
113 | |||
114 | # Check if outcome = 1 (COVERED) or 3 (EXISTING) then sstate_result must | ||
115 | # be 0 (SSTATE_NA) - tc=833 | ||
116 | def test_Task_If_Outcome_1_3_Sstate_Result_Must_Be_0(self): | ||
117 | tasks = Task.objects.filter( | ||
118 | outcome__in=(1, 3)).values('id', 'sstate_result') | ||
119 | cnt_err = [] | ||
120 | |||
121 | for task in tasks: | ||
122 | if (task['sstate_result'] != 0): | ||
123 | cnt_err.append(task['id']) | ||
124 | |||
125 | self.assertEqual(len(cnt_err), | ||
126 | 0, | ||
127 | msg='Errors for task id: %s' % cnt_err) | ||
128 | |||
129 | # Check if outcome is 0 (SUCCESS) or 4 (FAILED) then sstate_result must be | ||
130 | # 0 (NA), 1 (MISS) or 2 (FAILED) - tc=834 | ||
131 | def test_Task_If_Outcome_0_4_Sstate_Result_Must_Be_0_1_2(self): | ||
132 | tasks = Task.objects.filter( | ||
133 | outcome__in=(0, 4)).values('id', 'sstate_result') | ||
134 | cnt_err = [] | ||
135 | |||
136 | for task in tasks: | ||
137 | if (task['sstate_result'] not in [0, 1, 2]): | ||
138 | cnt_err.append(task['id']) | ||
139 | |||
140 | self.assertEqual(len(cnt_err), | ||
141 | 0, | ||
142 | msg='Errors for task id: %s' % cnt_err) | ||
143 | |||
144 | # Check if task_executed = TRUE (1), script_type must be 0 (CODING_NA), 2 | ||
145 | # (CODING_PYTHON), 3 (CODING_SHELL) - tc=891 | ||
146 | def test_Task_If_Task_Executed_True_Script_Type_0_2_3(self): | ||
147 | tasks = Task.objects.filter( | ||
148 | task_executed=1).values('id', 'script_type') | ||
149 | cnt_err = [] | ||
150 | |||
151 | for task in tasks: | ||
152 | if (task['script_type'] not in [0, 2, 3]): | ||
153 | cnt_err.append(task['id']) | ||
154 | self.assertEqual(len(cnt_err), | ||
155 | 0, | ||
156 | msg='Errors for task id: %s' % cnt_err) | ||
157 | |||
158 | # Check if task_executed = TRUE (1), outcome must be 0 (SUCCESS) or 4 | ||
159 | # (FAILED) - tc=836 | ||
160 | def test_Task_If_Task_Executed_True_Outcome_0_4(self): | ||
161 | tasks = Task.objects.filter(task_executed=1).values('id', 'outcome') | ||
162 | cnt_err = [] | ||
163 | |||
164 | for task in tasks: | ||
165 | if (task['outcome'] not in [0, 4]): | ||
166 | cnt_err.append(task['id']) | ||
167 | |||
168 | self.assertEqual(len(cnt_err), | ||
169 | 0, | ||
170 | msg='Errors for task id: %s' % cnt_err) | ||
171 | |||
172 | # Check if task_executed = FALSE (0), script_type must be 0 - tc=890 | ||
173 | def test_Task_If_Task_Executed_False_Script_Type_0(self): | ||
174 | tasks = Task.objects.filter( | ||
175 | task_executed=0).values('id', 'script_type') | ||
176 | cnt_err = [] | ||
177 | |||
178 | for task in tasks: | ||
179 | if (task['script_type'] != 0): | ||
180 | cnt_err.append(task['id']) | ||
181 | |||
182 | self.assertEqual(len(cnt_err), | ||
183 | 0, | ||
184 | msg='Errors for task id: %s' % cnt_err) | ||
185 | |||
186 | # Check if task_executed = FALSE (0) and build outcome = SUCCEEDED (0), | ||
187 | # task outcome must be 1 (COVERED), 2 (CACHED), 3 (PREBUILT), 5 (EMPTY) - | ||
188 | # tc=837 | ||
189 | def test_Task_If_Task_Executed_False_Outcome_1_2_3_5(self): | ||
190 | builds = Build.objects.filter(outcome=0).values('id') | ||
191 | cnt_err = [] | ||
192 | for build in builds: | ||
193 | tasks = Task.objects.filter( | ||
194 | build=build['id'], task_executed=0).values('id', 'outcome') | ||
195 | for task in tasks: | ||
196 | if (task['outcome'] not in [1, 2, 3, 5]): | ||
197 | cnt_err.append(task['id']) | ||
198 | |||
199 | self.assertEqual(len(cnt_err), | ||
200 | 0, | ||
201 | msg='Errors for task id: %s' % cnt_err) | ||
202 | |||
203 | # Key verification - tc=888 | ||
204 | def test_Target_Installed_Package(self): | ||
205 | rows = Target_Installed_Package.objects.values('id', | ||
206 | 'target_id', | ||
207 | 'package_id') | ||
208 | cnt_err = [] | ||
209 | |||
210 | for row in rows: | ||
211 | target = Target.objects.filter(id=row['target_id']).values('id') | ||
212 | package = Package.objects.filter(id=row['package_id']).values('id') | ||
213 | if (not target or not package): | ||
214 | cnt_err.append(row['id']) | ||
215 | self.assertEqual(len(cnt_err), | ||
216 | 0, | ||
217 | msg='Errors for target installed package id: %s' % | ||
218 | cnt_err) | ||
219 | |||
220 | # Key verification - tc=889 | ||
221 | def test_Task_Dependency(self): | ||
222 | rows = Task_Dependency.objects.values('id', | ||
223 | 'task_id', | ||
224 | 'depends_on_id') | ||
225 | cnt_err = [] | ||
226 | for row in rows: | ||
227 | task_id = Task.objects.filter(id=row['task_id']).values('id') | ||
228 | depends_on_id = Task.objects.filter( | ||
229 | id=row['depends_on_id']).values('id') | ||
230 | if (not task_id or not depends_on_id): | ||
231 | cnt_err.append(row['id']) | ||
232 | self.assertEqual(len(cnt_err), | ||
233 | 0, | ||
234 | msg='Errors for task dependency id: %s' % cnt_err) | ||
235 | |||
236 | # Check if build target file_name is populated only if is_image=true AND | ||
237 | # orm_build.outcome=0 then if the file exists and its size matches | ||
238 | # the file_size value. Need to add the tc in the test run | ||
239 | def test_Target_File_Name_Populated(self): | ||
240 | builds = Build.objects.filter(outcome=0).values('id') | ||
241 | for build in builds: | ||
242 | targets = Target.objects.filter( | ||
243 | build_id=build['id'], is_image=1).values('id') | ||
244 | for target in targets: | ||
245 | target_files = Target_Image_File.objects.filter( | ||
246 | target_id=target['id']).values('id', | ||
247 | 'file_name', | ||
248 | 'file_size') | ||
249 | cnt_err = [] | ||
250 | for file_info in target_files: | ||
251 | target_id = file_info['id'] | ||
252 | target_file_name = file_info['file_name'] | ||
253 | target_file_size = file_info['file_size'] | ||
254 | if (not target_file_name or not target_file_size): | ||
255 | cnt_err.append(target_id) | ||
256 | else: | ||
257 | if (not os.path.exists(target_file_name)): | ||
258 | cnt_err.append(target_id) | ||
259 | else: | ||
260 | if (os.path.getsize(target_file_name) != | ||
261 | target_file_size): | ||
262 | cnt_err.append(target_id) | ||
263 | self.assertEqual(len(cnt_err), 0, | ||
264 | msg='Errors for target image file id: %s' % | ||
265 | cnt_err) | ||
266 | |||
267 | # Key verification - tc=884 | ||
268 | def test_Package_Dependency(self): | ||
269 | cnt_err = [] | ||
270 | deps = Package_Dependency.objects.values( | ||
271 | 'id', 'package_id', 'depends_on_id') | ||
272 | for dep in deps: | ||
273 | if (dep['package_id'] == dep['depends_on_id']): | ||
274 | cnt_err.append(dep['id']) | ||
275 | self.assertEqual(len(cnt_err), 0, | ||
276 | msg='Errors for package dependency id: %s' % cnt_err) | ||
277 | |||
278 | # Recipe key verification, recipe name does not depends on a recipe having | ||
279 | # the same name - tc=883 | ||
280 | def test_Recipe_Dependency(self): | ||
281 | deps = Recipe_Dependency.objects.values( | ||
282 | 'id', 'recipe_id', 'depends_on_id') | ||
283 | cnt_err = [] | ||
284 | for dep in deps: | ||
285 | if (not dep['recipe_id'] or not dep['depends_on_id']): | ||
286 | cnt_err.append(dep['id']) | ||
287 | else: | ||
288 | name = Recipe.objects.filter( | ||
289 | id=dep['recipe_id']).values('name') | ||
290 | dep_name = Recipe.objects.filter( | ||
291 | id=dep['depends_on_id']).values('name') | ||
292 | if (name == dep_name): | ||
293 | cnt_err.append(dep['id']) | ||
294 | self.assertEqual(len(cnt_err), 0, | ||
295 | msg='Errors for recipe dependency id: %s' % cnt_err) | ||
296 | |||
297 | # Check if package name does not start with a number (0-9) - tc=846 | ||
298 | def test_Package_Name_For_Number(self): | ||
299 | packages = Package.objects.filter(~Q(size=-1)).values('id', 'name') | ||
300 | cnt_err = [] | ||
301 | for package in packages: | ||
302 | if (package['name'][0].isdigit() is True): | ||
303 | cnt_err.append(package['id']) | ||
304 | self.assertEqual( | ||
305 | len(cnt_err), 0, msg='Errors for package id: %s' % cnt_err) | ||
306 | |||
307 | # Check if package version starts with a number (0-9) - tc=847 | ||
308 | def test_Package_Version_Starts_With_Number(self): | ||
309 | packages = Package.objects.filter( | ||
310 | ~Q(size=-1)).values('id', 'version') | ||
311 | cnt_err = [] | ||
312 | for package in packages: | ||
313 | if (package['version'][0].isdigit() is False): | ||
314 | cnt_err.append(package['id']) | ||
315 | self.assertEqual( | ||
316 | len(cnt_err), 0, msg='Errors for package id: %s' % cnt_err) | ||
317 | |||
318 | # Check if package revision starts with 'r' - tc=848 | ||
319 | def test_Package_Revision_Starts_With_r(self): | ||
320 | packages = Package.objects.filter( | ||
321 | ~Q(size=-1)).values('id', 'revision') | ||
322 | cnt_err = [] | ||
323 | for package in packages: | ||
324 | if (package['revision'][0].startswith("r") is False): | ||
325 | cnt_err.append(package['id']) | ||
326 | self.assertEqual( | ||
327 | len(cnt_err), 0, msg='Errors for package id: %s' % cnt_err) | ||
328 | |||
329 | # Check the validity of the package build_id | ||
330 | # TC must be added in test run | ||
331 | def test_Package_Build_Id(self): | ||
332 | packages = Package.objects.filter( | ||
333 | ~Q(size=-1)).values('id', 'build_id') | ||
334 | cnt_err = [] | ||
335 | for package in packages: | ||
336 | build_id = Build.objects.filter( | ||
337 | id=package['build_id']).values('id') | ||
338 | if (not build_id): | ||
339 | # They have no build_id but if they are | ||
340 | # CustomImagePackage that's expected | ||
341 | try: | ||
342 | CustomImagePackage.objects.get(pk=package['id']) | ||
343 | except CustomImagePackage.DoesNotExist: | ||
344 | cnt_err.append(package['id']) | ||
345 | |||
346 | self.assertEqual(len(cnt_err), | ||
347 | 0, | ||
348 | msg="Errors for package id: %s they have no build" | ||
349 | "associated with them" % cnt_err) | ||
350 | |||
351 | # Check the validity of package recipe_id | ||
352 | # TC must be added in test run | ||
353 | def test_Package_Recipe_Id(self): | ||
354 | packages = Package.objects.filter( | ||
355 | ~Q(size=-1)).values('id', 'recipe_id') | ||
356 | cnt_err = [] | ||
357 | for package in packages: | ||
358 | recipe_id = Recipe.objects.filter( | ||
359 | id=package['recipe_id']).values('id') | ||
360 | if (not recipe_id): | ||
361 | cnt_err.append(package['id']) | ||
362 | self.assertEqual( | ||
363 | len(cnt_err), 0, msg='Errors for package id: %s' % cnt_err) | ||
364 | |||
365 | # Check if package installed_size field is not null | ||
366 | # TC must be aded in test run | ||
367 | def test_Package_Installed_Size_Not_NULL(self): | ||
368 | packages = Package.objects.filter( | ||
369 | installed_size__isnull=True).values('id') | ||
370 | cnt_err = [] | ||
371 | for package in packages: | ||
372 | cnt_err.append(package['id']) | ||
373 | self.assertEqual( | ||
374 | len(cnt_err), 0, msg='Errors for package id: %s' % cnt_err) | ||
375 | |||
376 | def test_custom_packages_generated(self): | ||
377 | """Test if there is a corresponding generated CustomImagePackage""" | ||
378 | """ for each of the packages generated""" | ||
379 | missing_packages = [] | ||
380 | |||
381 | for package in Package.objects.all(): | ||
382 | try: | ||
383 | CustomImagePackage.objects.get(name=package.name) | ||
384 | except CustomImagePackage.DoesNotExist: | ||
385 | missing_packages.append(package.name) | ||
386 | |||
387 | self.assertEqual(len(missing_packages), 0, | ||
388 | "Some package were created from the build but their" | ||
389 | " corresponding CustomImagePackage was not found") | ||