diff options
author | Tudor Florea <tudor.florea@enea.com> | 2015-10-09 20:59:03 (GMT) |
---|---|---|
committer | Tudor Florea <tudor.florea@enea.com> | 2015-10-09 20:59:03 (GMT) |
commit | 972dcfcdbfe75dcfeb777150c136576cf1a71e99 (patch) | |
tree | 97a61cd7e293d7ae9d56ef7ed0f81253365bb026 /scripts | |
download | poky-972dcfcdbfe75dcfeb777150c136576cf1a71e99.tar.gz |
initial commit for Enea Linux 5.0 arm
Signed-off-by: Tudor Florea <tudor.florea@enea.com>
Diffstat (limited to 'scripts')
326 files changed, 50887 insertions, 0 deletions
diff --git a/scripts/README b/scripts/README new file mode 100644 index 0000000..1b8d127 --- /dev/null +++ b/scripts/README | |||
@@ -0,0 +1 @@ | |||
This directory contains Various useful scripts for working with OE builds | |||
diff --git a/scripts/bitbake-prserv-tool b/scripts/bitbake-prserv-tool new file mode 100755 index 0000000..28c2416 --- /dev/null +++ b/scripts/bitbake-prserv-tool | |||
@@ -0,0 +1,103 @@ | |||
1 | #!/usr/bin/env bash | ||
2 | |||
3 | help () | ||
4 | { | ||
5 | base=`basename $0` | ||
6 | echo -e "Usage: $base command" | ||
7 | echo "Avaliable commands:" | ||
8 | echo -e "\texport <file.conf>: export and lock down the AUTOPR values from the PR service into a file for release." | ||
9 | echo -e "\timport <file.conf>: import the AUTOPR values from the exported file into the PR service." | ||
10 | } | ||
11 | |||
12 | clean_cache() | ||
13 | { | ||
14 | s=`bitbake -e | grep ^CACHE= | cut -f2 -d\"` | ||
15 | if [ "x${s}" != "x" ]; then | ||
16 | rm -rf ${s} | ||
17 | fi | ||
18 | } | ||
19 | |||
20 | do_export () | ||
21 | { | ||
22 | file=$1 | ||
23 | [ "x${file}" == "x" ] && help && exit 1 | ||
24 | rm -f ${file} | ||
25 | |||
26 | clean_cache | ||
27 | bitbake -R conf/prexport.conf -p | ||
28 | s=`bitbake -R conf/prexport.conf -e | grep ^PRSERV_DUMPFILE= | cut -f2 -d\"` | ||
29 | if [ "x${s}" != "x" ]; | ||
30 | then | ||
31 | [ -e $s ] && mv -f $s $file && echo "Exporting to file $file succeeded!" | ||
32 | return 0 | ||
33 | fi | ||
34 | echo "Exporting to file $file failed!" | ||
35 | return 1 | ||
36 | } | ||
37 | |||
38 | do_import () | ||
39 | { | ||
40 | file=$1 | ||
41 | [ "x${file}" == "x" ] && help && exit 1 | ||
42 | |||
43 | clean_cache | ||
44 | bitbake -R conf/primport.conf -R $file -p | ||
45 | ret=$? | ||
46 | [ $ret -eq 0 ] && echo "Importing from file $file succeeded!" || echo "Importing from file $file failed!" | ||
47 | return $ret | ||
48 | } | ||
49 | |||
50 | do_migrate_localcount () | ||
51 | { | ||
52 | df=`bitbake -R conf/migrate_localcount.conf -e | \ | ||
53 | grep ^LOCALCOUNT_DUMPFILE= | cut -f2 -d\"` | ||
54 | if [ "x${df}" == "x" ]; | ||
55 | then | ||
56 | echo "LOCALCOUNT_DUMPFILE is not defined!" | ||
57 | return 1 | ||
58 | fi | ||
59 | |||
60 | rm -rf $df | ||
61 | clean_cache | ||
62 | echo "Exporting LOCALCOUNT to AUTOINCs..." | ||
63 | bitbake -R conf/migrate_localcount.conf -p | ||
64 | [ ! $? -eq 0 ] && echo "Exporting to file $df failed!" && exit 1 | ||
65 | |||
66 | if [ -e $df ]; | ||
67 | then | ||
68 | echo "Exporting to file $df succeeded!" | ||
69 | else | ||
70 | echo "Exporting to file $df failed!" | ||
71 | exit 1 | ||
72 | fi | ||
73 | |||
74 | echo "Importing generated AUTOINC entries..." | ||
75 | [ -e $df ] && do_import $df | ||
76 | |||
77 | if [ ! $? -eq 0 ] | ||
78 | then | ||
79 | echo "Migration from LOCALCOUNT to AUTOINCs failed!" | ||
80 | return 1 | ||
81 | fi | ||
82 | |||
83 | echo "Migration from LOCALCOUNT to AUTOINCs succeeded!" | ||
84 | return 0 | ||
85 | } | ||
86 | |||
87 | [ $# -eq 0 ] && help && exit 1 | ||
88 | |||
89 | case $1 in | ||
90 | export) | ||
91 | do_export $2 | ||
92 | ;; | ||
93 | import) | ||
94 | do_import $2 | ||
95 | ;; | ||
96 | migrate_localcount) | ||
97 | do_migrate_localcount | ||
98 | ;; | ||
99 | *) | ||
100 | help | ||
101 | exit 1 | ||
102 | ;; | ||
103 | esac | ||
diff --git a/scripts/bitbake-whatchanged b/scripts/bitbake-whatchanged new file mode 100755 index 0000000..55cfe4b --- /dev/null +++ b/scripts/bitbake-whatchanged | |||
@@ -0,0 +1,339 @@ | |||
1 | #!/usr/bin/env python | ||
2 | # ex:ts=4:sw=4:sts=4:et | ||
3 | # -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- | ||
4 | |||
5 | # Copyright (c) 2013 Wind River Systems, Inc. | ||
6 | # | ||
7 | # This program is free software; you can redistribute it and/or modify | ||
8 | # it under the terms of the GNU General Public License version 2 as | ||
9 | # published by the Free Software Foundation. | ||
10 | # | ||
11 | # This program is distributed in the hope that it will be useful, | ||
12 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. | ||
14 | # See the GNU General Public License for more details. | ||
15 | # | ||
16 | # You should have received a copy of the GNU General Public License | ||
17 | # along with this program; if not, write to the Free Software | ||
18 | # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
19 | |||
20 | from __future__ import print_function | ||
21 | import os | ||
22 | import sys | ||
23 | import getopt | ||
24 | import shutil | ||
25 | import re | ||
26 | import warnings | ||
27 | import subprocess | ||
28 | from optparse import OptionParser | ||
29 | |||
30 | scripts_path = os.path.abspath(os.path.dirname(os.path.abspath(sys.argv[0]))) | ||
31 | lib_path = scripts_path + '/lib' | ||
32 | sys.path = sys.path + [lib_path] | ||
33 | |||
34 | import scriptpath | ||
35 | |||
36 | # Figure out where is the bitbake/lib/bb since we need bb.siggen and bb.process | ||
37 | bitbakepath = scriptpath.add_bitbake_lib_path() | ||
38 | if not bitbakepath: | ||
39 | sys.stderr.write("Unable to find bitbake by searching parent directory of this script or PATH\n") | ||
40 | sys.exit(1) | ||
41 | |||
42 | import bb.siggen | ||
43 | import bb.process | ||
44 | |||
45 | # Match the stamp's filename | ||
46 | # group(1): PE_PV (may no PE) | ||
47 | # group(2): PR | ||
48 | # group(3): TASK | ||
49 | # group(4): HASH | ||
50 | stamp_re = re.compile("(?P<pv>.*)-(?P<pr>r\d+)\.(?P<task>do_\w+)\.(?P<hash>[^\.]*)") | ||
51 | sigdata_re = re.compile(".*\.sigdata\..*") | ||
52 | |||
53 | def gen_dict(stamps): | ||
54 | """ | ||
55 | Generate the dict from the stamps dir. | ||
56 | The output dict format is: | ||
57 | {fake_f: {pn: PN, pv: PV, pr: PR, task: TASK, path: PATH}} | ||
58 | Where: | ||
59 | fake_f: pv + task + hash | ||
60 | path: the path to the stamp file | ||
61 | """ | ||
62 | # The member of the sub dict (A "path" will be appended below) | ||
63 | sub_mem = ("pv", "pr", "task") | ||
64 | d = {} | ||
65 | for dirpath, _, files in os.walk(stamps): | ||
66 | for f in files: | ||
67 | # The "bitbake -S" would generate ".sigdata", but no "_setscene". | ||
68 | fake_f = re.sub('_setscene.', '.', f) | ||
69 | fake_f = re.sub('.sigdata', '', fake_f) | ||
70 | subdict = {} | ||
71 | tmp = stamp_re.match(fake_f) | ||
72 | if tmp: | ||
73 | for i in sub_mem: | ||
74 | subdict[i] = tmp.group(i) | ||
75 | if len(subdict) != 0: | ||
76 | pn = os.path.basename(dirpath) | ||
77 | subdict['pn'] = pn | ||
78 | # The path will be used by os.stat() and bb.siggen | ||
79 | subdict['path'] = dirpath + "/" + f | ||
80 | fake_f = tmp.group('pv') + tmp.group('task') + tmp.group('hash') | ||
81 | d[fake_f] = subdict | ||
82 | return d | ||
83 | |||
84 | # Re-construct the dict | ||
85 | def recon_dict(dict_in): | ||
86 | """ | ||
87 | The output dict format is: | ||
88 | {pn_task: {pv: PV, pr: PR, path: PATH}} | ||
89 | """ | ||
90 | dict_out = {} | ||
91 | for k in dict_in.keys(): | ||
92 | subdict = {} | ||
93 | # The key | ||
94 | pn_task = "%s_%s" % (dict_in.get(k).get('pn'), dict_in.get(k).get('task')) | ||
95 | # If more than one stamps are found, use the latest one. | ||
96 | if pn_task in dict_out: | ||
97 | full_path_pre = dict_out.get(pn_task).get('path') | ||
98 | full_path_cur = dict_in.get(k).get('path') | ||
99 | if os.stat(full_path_pre).st_mtime > os.stat(full_path_cur).st_mtime: | ||
100 | continue | ||
101 | subdict['pv'] = dict_in.get(k).get('pv') | ||
102 | subdict['pr'] = dict_in.get(k).get('pr') | ||
103 | subdict['path'] = dict_in.get(k).get('path') | ||
104 | dict_out[pn_task] = subdict | ||
105 | |||
106 | return dict_out | ||
107 | |||
108 | def split_pntask(s): | ||
109 | """ | ||
110 | Split the pn_task in to (pn, task) and return it | ||
111 | """ | ||
112 | tmp = re.match("(.*)_(do_.*)", s) | ||
113 | return (tmp.group(1), tmp.group(2)) | ||
114 | |||
115 | |||
116 | def print_added(d_new = None, d_old = None): | ||
117 | """ | ||
118 | Print the newly added tasks | ||
119 | """ | ||
120 | added = {} | ||
121 | for k in d_new.keys(): | ||
122 | if k not in d_old: | ||
123 | # Add the new one to added dict, and remove it from | ||
124 | # d_new, so the remaining ones are the changed ones | ||
125 | added[k] = d_new.get(k) | ||
126 | del(d_new[k]) | ||
127 | |||
128 | if not added: | ||
129 | return 0 | ||
130 | |||
131 | # Format the output, the dict format is: | ||
132 | # {pn: task1, task2 ...} | ||
133 | added_format = {} | ||
134 | counter = 0 | ||
135 | for k in added.keys(): | ||
136 | pn, task = split_pntask(k) | ||
137 | if pn in added_format: | ||
138 | # Append the value | ||
139 | added_format[pn] = "%s %s" % (added_format.get(pn), task) | ||
140 | else: | ||
141 | added_format[pn] = task | ||
142 | counter += 1 | ||
143 | print("=== Newly added tasks: (%s tasks)" % counter) | ||
144 | for k in added_format.keys(): | ||
145 | print(" %s: %s" % (k, added_format.get(k))) | ||
146 | |||
147 | return counter | ||
148 | |||
149 | def print_vrchanged(d_new = None, d_old = None, vr = None): | ||
150 | """ | ||
151 | Print the pv or pr changed tasks. | ||
152 | The arg "vr" is "pv" or "pr" | ||
153 | """ | ||
154 | pvchanged = {} | ||
155 | counter = 0 | ||
156 | for k in d_new.keys(): | ||
157 | if d_new.get(k).get(vr) != d_old.get(k).get(vr): | ||
158 | counter += 1 | ||
159 | pn, task = split_pntask(k) | ||
160 | if pn not in pvchanged: | ||
161 | # Format the output, we only print pn (no task) since | ||
162 | # all the tasks would be changed when pn or pr changed, | ||
163 | # the dict format is: | ||
164 | # {pn: pv/pr_old -> pv/pr_new} | ||
165 | pvchanged[pn] = "%s -> %s" % (d_old.get(k).get(vr), d_new.get(k).get(vr)) | ||
166 | del(d_new[k]) | ||
167 | |||
168 | if not pvchanged: | ||
169 | return 0 | ||
170 | |||
171 | print("\n=== %s changed: (%s tasks)" % (vr.upper(), counter)) | ||
172 | for k in pvchanged.keys(): | ||
173 | print(" %s: %s" % (k, pvchanged.get(k))) | ||
174 | |||
175 | return counter | ||
176 | |||
177 | def print_depchanged(d_new = None, d_old = None, verbose = False): | ||
178 | """ | ||
179 | Print the dependency changes | ||
180 | """ | ||
181 | depchanged = {} | ||
182 | counter = 0 | ||
183 | for k in d_new.keys(): | ||
184 | counter += 1 | ||
185 | pn, task = split_pntask(k) | ||
186 | if (verbose): | ||
187 | full_path_old = d_old.get(k).get("path") | ||
188 | full_path_new = d_new.get(k).get("path") | ||
189 | # No counter since it is not ready here | ||
190 | if sigdata_re.match(full_path_old) and sigdata_re.match(full_path_new): | ||
191 | output = bb.siggen.compare_sigfiles(full_path_old, full_path_new) | ||
192 | if output: | ||
193 | print("\n=== The verbose changes of %s.do_%s:" % (pn, task)) | ||
194 | print('\n'.join(output)) | ||
195 | else: | ||
196 | # Format the output, the format is: | ||
197 | # {pn: task1, task2, ...} | ||
198 | if pn in depchanged: | ||
199 | depchanged[pn] = "%s %s" % (depchanged.get(pn), task) | ||
200 | else: | ||
201 | depchanged[pn] = task | ||
202 | |||
203 | if len(depchanged) > 0: | ||
204 | print("\n=== Dependencies changed: (%s tasks)" % counter) | ||
205 | for k in depchanged.keys(): | ||
206 | print(" %s: %s" % (k, depchanged[k])) | ||
207 | |||
208 | return counter | ||
209 | |||
210 | |||
211 | def main(): | ||
212 | """ | ||
213 | Print what will be done between the current and last builds: | ||
214 | 1) Run "STAMPS_DIR=<path> bitbake -S recipe" to re-generate the stamps | ||
215 | 2) Figure out what are newly added and changed, can't figure out | ||
216 | what are removed since we can't know the previous stamps | ||
217 | clearly, for example, if there are several builds, we can't know | ||
218 | which stamps the last build has used exactly. | ||
219 | 3) Use bb.siggen.compare_sigfiles to diff the old and new stamps | ||
220 | """ | ||
221 | |||
222 | parser = OptionParser( | ||
223 | version = "1.0", | ||
224 | usage = """%prog [options] [package ...] | ||
225 | print what will be done between the current and last builds, for example: | ||
226 | |||
227 | $ bitbake core-image-sato | ||
228 | # Edit the recipes | ||
229 | $ bitbake-whatchanged core-image-sato | ||
230 | |||
231 | The changes will be printed" | ||
232 | |||
233 | Note: | ||
234 | The amount of tasks is not accurate when the task is "do_build" since | ||
235 | it usually depends on other tasks. | ||
236 | The "nostamp" task is not included. | ||
237 | """ | ||
238 | ) | ||
239 | parser.add_option("-v", "--verbose", help = "print the verbose changes", | ||
240 | action = "store_true", dest = "verbose") | ||
241 | |||
242 | options, args = parser.parse_args(sys.argv) | ||
243 | |||
244 | verbose = options.verbose | ||
245 | |||
246 | if len(args) != 2: | ||
247 | parser.error("Incorrect number of arguments") | ||
248 | else: | ||
249 | recipe = args[1] | ||
250 | |||
251 | # Get the STAMPS_DIR | ||
252 | print("Figuring out the STAMPS_DIR ...") | ||
253 | cmdline = "bitbake -e | sed -ne 's/^STAMPS_DIR=\"\(.*\)\"/\\1/p'" | ||
254 | try: | ||
255 | stampsdir, err = bb.process.run(cmdline) | ||
256 | except: | ||
257 | raise | ||
258 | if not stampsdir: | ||
259 | print("ERROR: No STAMPS_DIR found for '%s'" % recipe, file=sys.stderr) | ||
260 | return 2 | ||
261 | stampsdir = stampsdir.rstrip("\n") | ||
262 | if not os.path.isdir(stampsdir): | ||
263 | print("ERROR: stamps directory \"%s\" not found!" % stampsdir, file=sys.stderr) | ||
264 | return 2 | ||
265 | |||
266 | # The new stamps dir | ||
267 | new_stampsdir = stampsdir + ".bbs" | ||
268 | if os.path.exists(new_stampsdir): | ||
269 | print("ERROR: %s already exists!" % new_stampsdir, file=sys.stderr) | ||
270 | return 2 | ||
271 | |||
272 | try: | ||
273 | # Generate the new stamps dir | ||
274 | print("Generating the new stamps ... (need several minutes)") | ||
275 | cmdline = "STAMPS_DIR=%s bitbake -S none %s" % (new_stampsdir, recipe) | ||
276 | # FIXME | ||
277 | # The "bitbake -S" may fail, not fatal error, the stamps will still | ||
278 | # be generated, this might be a bug of "bitbake -S". | ||
279 | try: | ||
280 | bb.process.run(cmdline) | ||
281 | except Exception as exc: | ||
282 | print(exc) | ||
283 | |||
284 | # The dict for the new and old stamps. | ||
285 | old_dict = gen_dict(stampsdir) | ||
286 | new_dict = gen_dict(new_stampsdir) | ||
287 | |||
288 | # Remove the same one from both stamps. | ||
289 | cnt_unchanged = 0 | ||
290 | for k in new_dict.keys(): | ||
291 | if k in old_dict: | ||
292 | cnt_unchanged += 1 | ||
293 | del(new_dict[k]) | ||
294 | del(old_dict[k]) | ||
295 | |||
296 | # Re-construct the dict to easily find out what is added or changed. | ||
297 | # The dict format is: | ||
298 | # {pn_task: {pv: PV, pr: PR, path: PATH}} | ||
299 | new_recon = recon_dict(new_dict) | ||
300 | old_recon = recon_dict(old_dict) | ||
301 | |||
302 | del new_dict | ||
303 | del old_dict | ||
304 | |||
305 | # Figure out what are changed, the new_recon would be changed | ||
306 | # by the print_xxx function. | ||
307 | # Newly added | ||
308 | cnt_added = print_added(new_recon, old_recon) | ||
309 | |||
310 | # PV (including PE) and PR changed | ||
311 | # Let the bb.siggen handle them if verbose | ||
312 | cnt_rv = {} | ||
313 | if not verbose: | ||
314 | for i in ('pv', 'pr'): | ||
315 | cnt_rv[i] = print_vrchanged(new_recon, old_recon, i) | ||
316 | |||
317 | # Dependencies changed (use bitbake-diffsigs) | ||
318 | cnt_dep = print_depchanged(new_recon, old_recon, verbose) | ||
319 | |||
320 | total_changed = cnt_added + (cnt_rv.get('pv') or 0) + (cnt_rv.get('pr') or 0) + cnt_dep | ||
321 | |||
322 | print("\n=== Summary: (%s changed, %s unchanged)" % (total_changed, cnt_unchanged)) | ||
323 | if verbose: | ||
324 | print("Newly added: %s\nDependencies changed: %s\n" % \ | ||
325 | (cnt_added, cnt_dep)) | ||
326 | else: | ||
327 | print("Newly added: %s\nPV changed: %s\nPR changed: %s\nDependencies changed: %s\n" % \ | ||
328 | (cnt_added, cnt_rv.get('pv') or 0, cnt_rv.get('pr') or 0, cnt_dep)) | ||
329 | except: | ||
330 | print("ERROR occurred!") | ||
331 | raise | ||
332 | finally: | ||
333 | # Remove the newly generated stamps dir | ||
334 | if os.path.exists(new_stampsdir): | ||
335 | print("Removing the newly generated stamps dir ...") | ||
336 | shutil.rmtree(new_stampsdir) | ||
337 | |||
338 | if __name__ == "__main__": | ||
339 | sys.exit(main()) | ||
diff --git a/scripts/buildhistory-collect-srcrevs b/scripts/buildhistory-collect-srcrevs new file mode 100755 index 0000000..58a2708 --- /dev/null +++ b/scripts/buildhistory-collect-srcrevs | |||
@@ -0,0 +1,109 @@ | |||
1 | #!/usr/bin/env python | ||
2 | # | ||
3 | # Collects the recorded SRCREV values from buildhistory and reports on them | ||
4 | # | ||
5 | # Copyright 2013 Intel Corporation | ||
6 | # Authored-by: Paul Eggleton <paul.eggleton@intel.com> | ||
7 | # | ||
8 | # This program is free software; you can redistribute it and/or modify | ||
9 | # it under the terms of the GNU General Public License version 2 as | ||
10 | # published by the Free Software Foundation. | ||
11 | # | ||
12 | # This program is distributed in the hope that it will be useful, | ||
13 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
15 | # GNU General Public License for more details. | ||
16 | # | ||
17 | # You should have received a copy of the GNU General Public License along | ||
18 | # with this program; if not, write to the Free Software Foundation, Inc., | ||
19 | # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. | ||
20 | |||
21 | import os, sys | ||
22 | import optparse | ||
23 | import logging | ||
24 | |||
25 | def logger_create(): | ||
26 | logger = logging.getLogger("buildhistory") | ||
27 | loggerhandler = logging.StreamHandler() | ||
28 | loggerhandler.setFormatter(logging.Formatter("%(levelname)s: %(message)s")) | ||
29 | logger.addHandler(loggerhandler) | ||
30 | logger.setLevel(logging.INFO) | ||
31 | return logger | ||
32 | |||
33 | logger = logger_create() | ||
34 | |||
35 | def main(): | ||
36 | parser = optparse.OptionParser( | ||
37 | description = "Collects the recorded SRCREV values from buildhistory and reports on them.", | ||
38 | usage = """ | ||
39 | %prog [options]""") | ||
40 | |||
41 | parser.add_option("-a", "--report-all", | ||
42 | help = "Report all SRCREV values, not just ones where AUTOREV has been used", | ||
43 | action="store_true", dest="reportall") | ||
44 | parser.add_option("-f", "--forcevariable", | ||
45 | help = "Use forcevariable override for all output lines", | ||
46 | action="store_true", dest="forcevariable") | ||
47 | parser.add_option("-p", "--buildhistory-dir", | ||
48 | help = "Specify path to buildhistory directory (defaults to buildhistory/ under cwd)", | ||
49 | action="store", dest="buildhistory_dir", default='buildhistory/') | ||
50 | |||
51 | options, args = parser.parse_args(sys.argv) | ||
52 | |||
53 | if len(args) > 1: | ||
54 | sys.stderr.write('Invalid argument(s) specified: %s\n\n' % ' '.join(args[1:])) | ||
55 | parser.print_help() | ||
56 | sys.exit(1) | ||
57 | |||
58 | if not os.path.exists(options.buildhistory_dir): | ||
59 | sys.stderr.write('Buildhistory directory "%s" does not exist\n\n' % options.buildhistory_dir) | ||
60 | parser.print_help() | ||
61 | sys.exit(1) | ||
62 | |||
63 | if options.forcevariable: | ||
64 | forcevariable = '_forcevariable' | ||
65 | else: | ||
66 | forcevariable = '' | ||
67 | |||
68 | lastdir = '' | ||
69 | for root, dirs, files in os.walk(options.buildhistory_dir): | ||
70 | if '.git' in dirs: | ||
71 | dirs.remove('.git') | ||
72 | for fn in files: | ||
73 | if fn == 'latest_srcrev': | ||
74 | curdir = os.path.basename(os.path.dirname(root)) | ||
75 | if lastdir != curdir: | ||
76 | print('# %s' % curdir) | ||
77 | lastdir = curdir | ||
78 | fullpath = os.path.join(root, fn) | ||
79 | pn = os.path.basename(root) | ||
80 | srcrev = None | ||
81 | orig_srcrev = None | ||
82 | orig_srcrevs = {} | ||
83 | srcrevs = {} | ||
84 | with open(fullpath) as f: | ||
85 | for line in f: | ||
86 | if '=' in line: | ||
87 | splitval = line.split('=') | ||
88 | value = splitval[1].strip('" \t\n\r') | ||
89 | if line.startswith('# SRCREV = '): | ||
90 | orig_srcrev = value | ||
91 | elif line.startswith('# SRCREV_'): | ||
92 | splitval = line.split('=') | ||
93 | name = splitval[0].split('_')[1].strip() | ||
94 | orig_srcrevs[name] = value | ||
95 | elif line.startswith('SRCREV ='): | ||
96 | srcrev = value | ||
97 | elif line.startswith('SRCREV_'): | ||
98 | name = splitval[0].split('_')[1].strip() | ||
99 | srcrevs[name] = value | ||
100 | if srcrev and (options.reportall or srcrev != orig_srcrev): | ||
101 | print('SRCREV_pn-%s%s = "%s"' % (pn, forcevariable, srcrev)) | ||
102 | for name, value in srcrevs.items(): | ||
103 | orig = orig_srcrevs.get(name, orig_srcrev) | ||
104 | if options.reportall or value != orig: | ||
105 | print('SRCREV_%s_pn-%s%s = "%s"' % (name, pn, forcevariable, value)) | ||
106 | |||
107 | |||
108 | if __name__ == "__main__": | ||
109 | main() | ||
diff --git a/scripts/buildhistory-diff b/scripts/buildhistory-diff new file mode 100755 index 0000000..dfebcdd --- /dev/null +++ b/scripts/buildhistory-diff | |||
@@ -0,0 +1,100 @@ | |||
1 | #!/usr/bin/env python | ||
2 | |||
3 | # Report significant differences in the buildhistory repository since a specific revision | ||
4 | # | ||
5 | # Copyright (C) 2013 Intel Corporation | ||
6 | # Author: Paul Eggleton <paul.eggleton@linux.intel.com> | ||
7 | |||
8 | import sys | ||
9 | import os | ||
10 | import optparse | ||
11 | from distutils.version import LooseVersion | ||
12 | |||
13 | # Ensure PythonGit is installed (buildhistory_analysis needs it) | ||
14 | try: | ||
15 | import git | ||
16 | except ImportError: | ||
17 | print("Please install GitPython (python-git) 0.3.1 or later in order to use this script") | ||
18 | sys.exit(1) | ||
19 | |||
20 | def main(): | ||
21 | parser = optparse.OptionParser( | ||
22 | description = "Reports significant differences in the buildhistory repository.", | ||
23 | usage = """ | ||
24 | %prog [options] [from-revision [to-revision]] | ||
25 | (if not specified, from-revision defaults to build-minus-1, and to-revision defaults to HEAD)""") | ||
26 | |||
27 | parser.add_option("-p", "--buildhistory-dir", | ||
28 | help = "Specify path to buildhistory directory (defaults to buildhistory/ under cwd)", | ||
29 | action="store", dest="buildhistory_dir", default='buildhistory/') | ||
30 | parser.add_option("-v", "--report-version", | ||
31 | help = "Report changes in PKGE/PKGV/PKGR even when the values are still the default (PE/PV/PR)", | ||
32 | action="store_true", dest="report_ver", default=False) | ||
33 | parser.add_option("-a", "--report-all", | ||
34 | help = "Report all changes, not just the default significant ones", | ||
35 | action="store_true", dest="report_all", default=False) | ||
36 | |||
37 | options, args = parser.parse_args(sys.argv) | ||
38 | |||
39 | if len(args) > 3: | ||
40 | sys.stderr.write('Invalid argument(s) specified: %s\n\n' % ' '.join(args[3:])) | ||
41 | parser.print_help() | ||
42 | sys.exit(1) | ||
43 | |||
44 | if LooseVersion(git.__version__) < '0.3.1': | ||
45 | sys.stderr.write("Version of GitPython is too old, please install GitPython (python-git) 0.3.1 or later in order to use this script\n") | ||
46 | sys.exit(1) | ||
47 | |||
48 | if not os.path.exists(options.buildhistory_dir): | ||
49 | sys.stderr.write('Buildhistory directory "%s" does not exist\n\n' % options.buildhistory_dir) | ||
50 | parser.print_help() | ||
51 | sys.exit(1) | ||
52 | |||
53 | scripts_path = os.path.abspath(os.path.dirname(os.path.abspath(sys.argv[0]))) | ||
54 | lib_path = scripts_path + '/lib' | ||
55 | sys.path = sys.path + [lib_path] | ||
56 | |||
57 | import scriptpath | ||
58 | |||
59 | # Set path to OE lib dir so we can import the buildhistory_analysis module | ||
60 | scriptpath.add_oe_lib_path() | ||
61 | # Set path to bitbake lib dir so the buildhistory_analysis module can load bb.utils | ||
62 | bitbakepath = scriptpath.add_bitbake_lib_path() | ||
63 | if not bitbakepath: | ||
64 | sys.stderr.write("Unable to find bitbake by searching parent directory of this script or PATH\n") | ||
65 | sys.exit(1) | ||
66 | |||
67 | import oe.buildhistory_analysis | ||
68 | |||
69 | fromrev = 'build-minus-1' | ||
70 | torev = 'HEAD' | ||
71 | if len(args) > 1: | ||
72 | if len(args) == 2 and '..' in args[1]: | ||
73 | revs = args[1].split('..') | ||
74 | fromrev = revs[0] | ||
75 | if revs[1]: | ||
76 | torev = revs[1] | ||
77 | else: | ||
78 | fromrev = args[1] | ||
79 | if len(args) > 2: | ||
80 | torev = args[2] | ||
81 | |||
82 | import gitdb | ||
83 | try: | ||
84 | changes = oe.buildhistory_analysis.process_changes(options.buildhistory_dir, fromrev, torev, options.report_all, options.report_ver) | ||
85 | except gitdb.exc.BadObject as e: | ||
86 | if len(args) == 1: | ||
87 | sys.stderr.write("Unable to find previous build revision in buildhistory repository\n\n") | ||
88 | parser.print_help() | ||
89 | else: | ||
90 | sys.stderr.write('Specified git revision "%s" is not valid\n' % e.args[0]) | ||
91 | sys.exit(1) | ||
92 | |||
93 | for chg in changes: | ||
94 | print('%s' % chg) | ||
95 | |||
96 | sys.exit(0) | ||
97 | |||
98 | |||
99 | if __name__ == "__main__": | ||
100 | main() | ||
diff --git a/scripts/cleanup-workdir b/scripts/cleanup-workdir new file mode 100755 index 0000000..a7f5a3a --- /dev/null +++ b/scripts/cleanup-workdir | |||
@@ -0,0 +1,198 @@ | |||
1 | #!/usr/bin/env python | ||
2 | |||
3 | # Copyright (c) 2012 Wind River Systems, Inc. | ||
4 | # | ||
5 | # This program is free software; you can redistribute it and/or modify | ||
6 | # it under the terms of the GNU General Public License version 2 as | ||
7 | # published by the Free Software Foundation. | ||
8 | # | ||
9 | # This program is distributed in the hope that it will be useful, | ||
10 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. | ||
12 | # See the GNU General Public License for more details. | ||
13 | # | ||
14 | # You should have received a copy of the GNU General Public License | ||
15 | # along with this program; if not, write to the Free Software | ||
16 | # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
17 | |||
18 | import os | ||
19 | import sys | ||
20 | import optparse | ||
21 | import re | ||
22 | import subprocess | ||
23 | import shutil | ||
24 | |||
25 | pkg_cur_dirs = {} | ||
26 | obsolete_dirs = [] | ||
27 | parser = None | ||
28 | |||
29 | def err_quit(msg): | ||
30 | print msg | ||
31 | parser.print_usage() | ||
32 | sys.exit(1) | ||
33 | |||
34 | def parse_version(verstr): | ||
35 | elems = verstr.split(':') | ||
36 | epoch = elems[0] | ||
37 | if len(epoch) == 0: | ||
38 | return elems[1] | ||
39 | else: | ||
40 | return epoch + '_' + elems[1] | ||
41 | |||
42 | def run_command(cmd): | ||
43 | pipe = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True) | ||
44 | output = pipe.communicate()[0] | ||
45 | if pipe.returncode != 0: | ||
46 | print "Execute command '%s' failed." % cmd | ||
47 | sys.exit(1) | ||
48 | return output | ||
49 | |||
50 | def get_cur_arch_dirs(workdir, arch_dirs): | ||
51 | pattern = workdir + '/(.*?)/' | ||
52 | |||
53 | cmd = "bitbake -e | grep ^SDK_ARCH=" | ||
54 | output = run_command(cmd) | ||
55 | sdk_arch = output.split('"')[1] | ||
56 | |||
57 | # select thest 5 packages to get the dirs of current arch | ||
58 | pkgs = ['hicolor-icon-theme', 'base-files', 'acl-native', 'binutils-crosssdk-' + sdk_arch, 'nativesdk-autoconf'] | ||
59 | |||
60 | for pkg in pkgs: | ||
61 | cmd = "bitbake -e " + pkg + " | grep ^IMAGE_ROOTFS=" | ||
62 | output = run_command(cmd) | ||
63 | output = output.split('"')[1] | ||
64 | m = re.match(pattern, output) | ||
65 | arch_dirs.append(m.group(1)) | ||
66 | |||
67 | def main(): | ||
68 | global parser | ||
69 | parser = optparse.OptionParser( | ||
70 | usage = """%prog | ||
71 | |||
72 | %prog removes the obsolete packages' build directories in WORKDIR. | ||
73 | This script must be ran under BUILDDIR after source file \"oe-init-build-env\". | ||
74 | |||
75 | Any file or directory under WORKDIR which is not created by Yocto | ||
76 | will be deleted. Be CAUTIOUS.""") | ||
77 | |||
78 | options, args = parser.parse_args(sys.argv) | ||
79 | |||
80 | builddir = run_command('echo $BUILDDIR').strip() | ||
81 | if len(builddir) == 0: | ||
82 | err_quit("Please source file \"oe-init-build-env\" first.\n") | ||
83 | |||
84 | if os.getcwd() != builddir: | ||
85 | err_quit("Please run %s under: %s\n" % (os.path.basename(args[0]), builddir)) | ||
86 | |||
87 | print 'Updating bitbake caches...' | ||
88 | cmd = "bitbake -s" | ||
89 | output = run_command(cmd) | ||
90 | |||
91 | output = output.split('\n') | ||
92 | index = 0 | ||
93 | while len(output[index]) > 0: | ||
94 | index += 1 | ||
95 | alllines = output[index+1:] | ||
96 | |||
97 | for line in alllines: | ||
98 | # empty again means end of the versions output | ||
99 | if len(line) == 0: | ||
100 | break | ||
101 | line = line.strip() | ||
102 | line = re.sub('\s+', ' ', line) | ||
103 | elems = line.split(' ') | ||
104 | if len(elems) == 2: | ||
105 | version = parse_version(elems[1]) | ||
106 | else: | ||
107 | version = parse_version(elems[2]) | ||
108 | pkg_cur_dirs[elems[0]] = version | ||
109 | |||
110 | cmd = "bitbake -e" | ||
111 | output = run_command(cmd) | ||
112 | |||
113 | tmpdir = None | ||
114 | image_rootfs = None | ||
115 | output = output.split('\n') | ||
116 | for line in output: | ||
117 | if tmpdir and image_rootfs: | ||
118 | break | ||
119 | |||
120 | if not tmpdir: | ||
121 | m = re.match('TMPDIR="(.*)"', line) | ||
122 | if m: | ||
123 | tmpdir = m.group(1) | ||
124 | |||
125 | if not image_rootfs: | ||
126 | m = re.match('IMAGE_ROOTFS="(.*)"', line) | ||
127 | if m: | ||
128 | image_rootfs = m.group(1) | ||
129 | |||
130 | # won't fail just in case | ||
131 | if not tmpdir or not image_rootfs: | ||
132 | print "Can't get TMPDIR or IMAGE_ROOTFS." | ||
133 | return 1 | ||
134 | |||
135 | pattern = tmpdir + '/(.*?)/(.*?)/' | ||
136 | m = re.match(pattern, image_rootfs) | ||
137 | if not m: | ||
138 | print "Can't get WORKDIR." | ||
139 | return 1 | ||
140 | |||
141 | workdir = os.path.join(tmpdir, m.group(1)) | ||
142 | |||
143 | # we only deal the dirs of current arch, total numbers of dirs are 6 | ||
144 | cur_arch_dirs = [m.group(2)] | ||
145 | get_cur_arch_dirs(workdir, cur_arch_dirs) | ||
146 | |||
147 | for workroot, dirs, files in os.walk(workdir): | ||
148 | # For the files, they should NOT exist in WORKDIR. Remove them. | ||
149 | for f in files: | ||
150 | obsolete_dirs.append(os.path.join(workroot, f)) | ||
151 | |||
152 | for d in dirs: | ||
153 | if d not in cur_arch_dirs: | ||
154 | continue | ||
155 | |||
156 | for pkgroot, pkgdirs, filenames in os.walk(os.path.join(workroot, d)): | ||
157 | for f in filenames: | ||
158 | obsolete_dirs.append(os.path.join(pkgroot, f)) | ||
159 | |||
160 | for pkgdir in sorted(pkgdirs): | ||
161 | if pkgdir not in pkg_cur_dirs: | ||
162 | obsolete_dirs.append(os.path.join(pkgroot, pkgdir)) | ||
163 | else: | ||
164 | for verroot, verdirs, verfiles in os.walk(os.path.join(pkgroot, pkgdir)): | ||
165 | for f in verfiles: | ||
166 | obsolete_dirs.append(os.path.join(pkgroot, f)) | ||
167 | for v in sorted(verdirs): | ||
168 | if v not in pkg_cur_dirs[pkgdir]: | ||
169 | obsolete_dirs.append(os.path.join(pkgroot, pkgdir, v)) | ||
170 | break | ||
171 | |||
172 | # just process the top dir of every package under tmp/work/*/, | ||
173 | # then jump out of the above os.walk() | ||
174 | break | ||
175 | |||
176 | # it is convenient to use os.walk() to get dirs and files at same time | ||
177 | # both of them have been dealed in the loop, so jump out | ||
178 | break | ||
179 | |||
180 | for d in obsolete_dirs: | ||
181 | print "Deleting %s" % d | ||
182 | shutil.rmtree(d, True) | ||
183 | |||
184 | if len(obsolete_dirs): | ||
185 | print '\nTotal %d items.' % len(obsolete_dirs) | ||
186 | else: | ||
187 | print '\nNo obsolete directory found under %s.' % workdir | ||
188 | |||
189 | return 0 | ||
190 | |||
191 | if __name__ == '__main__': | ||
192 | try: | ||
193 | ret = main() | ||
194 | except Exception: | ||
195 | ret = 2 | ||
196 | import traceback | ||
197 | traceback.print_exc(3) | ||
198 | sys.exit(ret) | ||
diff --git a/scripts/combo-layer b/scripts/combo-layer new file mode 100755 index 0000000..19d64e6 --- /dev/null +++ b/scripts/combo-layer | |||
@@ -0,0 +1,600 @@ | |||
1 | #!/usr/bin/env python | ||
2 | # ex:ts=4:sw=4:sts=4:et | ||
3 | # -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- | ||
4 | # | ||
5 | # Copyright 2011 Intel Corporation | ||
6 | # Authored-by: Yu Ke <ke.yu@intel.com> | ||
7 | # Paul Eggleton <paul.eggleton@intel.com> | ||
8 | # Richard Purdie <richard.purdie@intel.com> | ||
9 | # | ||
10 | # This program is free software; you can redistribute it and/or modify | ||
11 | # it under the terms of the GNU General Public License version 2 as | ||
12 | # published by the Free Software Foundation. | ||
13 | # | ||
14 | # This program is distributed in the hope that it will be useful, | ||
15 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
16 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
17 | # GNU General Public License for more details. | ||
18 | # | ||
19 | # You should have received a copy of the GNU General Public License along | ||
20 | # with this program; if not, write to the Free Software Foundation, Inc., | ||
21 | # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. | ||
22 | |||
23 | import os, sys | ||
24 | import optparse | ||
25 | import logging | ||
26 | import subprocess | ||
27 | import ConfigParser | ||
28 | import re | ||
29 | |||
30 | __version__ = "0.2.1" | ||
31 | |||
32 | def logger_create(): | ||
33 | logger = logging.getLogger("") | ||
34 | loggerhandler = logging.StreamHandler() | ||
35 | loggerhandler.setFormatter(logging.Formatter("[%(asctime)s] %(message)s","%H:%M:%S")) | ||
36 | logger.addHandler(loggerhandler) | ||
37 | logger.setLevel(logging.INFO) | ||
38 | return logger | ||
39 | |||
40 | logger = logger_create() | ||
41 | |||
42 | def get_current_branch(repodir=None): | ||
43 | try: | ||
44 | if not os.path.exists(os.path.join(repodir if repodir else '', ".git")): | ||
45 | # Repo not created yet (i.e. during init) so just assume master | ||
46 | return "master" | ||
47 | branchname = runcmd("git symbolic-ref HEAD 2>/dev/null", repodir).strip() | ||
48 | if branchname.startswith("refs/heads/"): | ||
49 | branchname = branchname[11:] | ||
50 | return branchname | ||
51 | except subprocess.CalledProcessError: | ||
52 | return "" | ||
53 | |||
54 | class Configuration(object): | ||
55 | """ | ||
56 | Manages the configuration | ||
57 | |||
58 | For an example config file, see combo-layer.conf.example | ||
59 | |||
60 | """ | ||
61 | def __init__(self, options): | ||
62 | for key, val in options.__dict__.items(): | ||
63 | setattr(self, key, val) | ||
64 | |||
65 | def readsection(parser, section, repo): | ||
66 | for (name, value) in parser.items(section): | ||
67 | if value.startswith("@"): | ||
68 | self.repos[repo][name] = eval(value.strip("@")) | ||
69 | else: | ||
70 | self.repos[repo][name] = value | ||
71 | |||
72 | logger.debug("Loading config file %s" % self.conffile) | ||
73 | self.parser = ConfigParser.ConfigParser() | ||
74 | with open(self.conffile) as f: | ||
75 | self.parser.readfp(f) | ||
76 | |||
77 | self.repos = {} | ||
78 | for repo in self.parser.sections(): | ||
79 | self.repos[repo] = {} | ||
80 | readsection(self.parser, repo, repo) | ||
81 | |||
82 | # Load local configuration, if available | ||
83 | self.localconffile = None | ||
84 | self.localparser = None | ||
85 | self.combobranch = None | ||
86 | if self.conffile.endswith('.conf'): | ||
87 | lcfile = self.conffile.replace('.conf', '-local.conf') | ||
88 | if os.path.exists(lcfile): | ||
89 | # Read combo layer branch | ||
90 | self.combobranch = get_current_branch() | ||
91 | logger.debug("Combo layer branch is %s" % self.combobranch) | ||
92 | |||
93 | self.localconffile = lcfile | ||
94 | logger.debug("Loading local config file %s" % self.localconffile) | ||
95 | self.localparser = ConfigParser.ConfigParser() | ||
96 | with open(self.localconffile) as f: | ||
97 | self.localparser.readfp(f) | ||
98 | |||
99 | for section in self.localparser.sections(): | ||
100 | if '|' in section: | ||
101 | sectionvals = section.split('|') | ||
102 | repo = sectionvals[0] | ||
103 | if sectionvals[1] != self.combobranch: | ||
104 | continue | ||
105 | else: | ||
106 | repo = section | ||
107 | if repo in self.repos: | ||
108 | readsection(self.localparser, section, repo) | ||
109 | |||
110 | def update(self, repo, option, value, initmode=False): | ||
111 | if self.localparser: | ||
112 | parser = self.localparser | ||
113 | section = "%s|%s" % (repo, self.combobranch) | ||
114 | conffile = self.localconffile | ||
115 | if initmode and not parser.has_section(section): | ||
116 | parser.add_section(section) | ||
117 | else: | ||
118 | parser = self.parser | ||
119 | section = repo | ||
120 | conffile = self.conffile | ||
121 | parser.set(section, option, value) | ||
122 | with open(conffile, "w") as f: | ||
123 | parser.write(f) | ||
124 | |||
125 | def sanity_check(self, initmode=False): | ||
126 | required_options=["src_uri", "local_repo_dir", "dest_dir", "last_revision"] | ||
127 | if initmode: | ||
128 | required_options.remove("last_revision") | ||
129 | msg = "" | ||
130 | missing_options = [] | ||
131 | for name in self.repos: | ||
132 | for option in required_options: | ||
133 | if option not in self.repos[name]: | ||
134 | msg = "%s\nOption %s is not defined for component %s" %(msg, option, name) | ||
135 | missing_options.append(option) | ||
136 | if msg != "": | ||
137 | logger.error("configuration file %s has the following error: %s" % (self.conffile,msg)) | ||
138 | if self.localconffile and 'last_revision' in missing_options: | ||
139 | logger.error("local configuration file %s may be missing configuration for combo branch %s" % (self.localconffile, self.combobranch)) | ||
140 | sys.exit(1) | ||
141 | |||
142 | # filterdiff is required by action_splitpatch, so check its availability | ||
143 | if subprocess.call("which filterdiff > /dev/null 2>&1", shell=True) != 0: | ||
144 | logger.error("ERROR: patchutils package is missing, please install it (e.g. # apt-get install patchutils)") | ||
145 | sys.exit(1) | ||
146 | |||
147 | def runcmd(cmd,destdir=None,printerr=True): | ||
148 | """ | ||
149 | execute command, raise CalledProcessError if fail | ||
150 | return output if succeed | ||
151 | """ | ||
152 | logger.debug("run cmd '%s' in %s" % (cmd, os.getcwd() if destdir is None else destdir)) | ||
153 | out = os.tmpfile() | ||
154 | try: | ||
155 | subprocess.check_call(cmd, stdout=out, stderr=out, cwd=destdir, shell=True) | ||
156 | except subprocess.CalledProcessError,e: | ||
157 | out.seek(0) | ||
158 | if printerr: | ||
159 | logger.error("%s" % out.read()) | ||
160 | raise e | ||
161 | |||
162 | out.seek(0) | ||
163 | output = out.read() | ||
164 | logger.debug("output: %s" % output ) | ||
165 | return output | ||
166 | |||
167 | def action_init(conf, args): | ||
168 | """ | ||
169 | Clone component repositories | ||
170 | Check git is initialised; if not, copy initial data from component repos | ||
171 | """ | ||
172 | for name in conf.repos: | ||
173 | ldir = conf.repos[name]['local_repo_dir'] | ||
174 | if not os.path.exists(ldir): | ||
175 | logger.info("cloning %s to %s" %(conf.repos[name]['src_uri'], ldir)) | ||
176 | subprocess.check_call("git clone %s %s" % (conf.repos[name]['src_uri'], ldir), shell=True) | ||
177 | if not os.path.exists(".git"): | ||
178 | runcmd("git init") | ||
179 | for name in conf.repos: | ||
180 | repo = conf.repos[name] | ||
181 | ldir = repo['local_repo_dir'] | ||
182 | branch = repo.get('branch', "master") | ||
183 | lastrev = repo.get('last_revision', None) | ||
184 | if lastrev and lastrev != "HEAD": | ||
185 | initialrev = lastrev | ||
186 | if branch: | ||
187 | if not check_rev_branch(name, ldir, lastrev, branch): | ||
188 | sys.exit(1) | ||
189 | logger.info("Copying data from %s at specified revision %s..." % (name, lastrev)) | ||
190 | else: | ||
191 | lastrev = None | ||
192 | initialrev = branch | ||
193 | logger.info("Copying data from %s..." % name) | ||
194 | dest_dir = repo['dest_dir'] | ||
195 | if dest_dir and dest_dir != ".": | ||
196 | extract_dir = os.path.join(os.getcwd(), dest_dir) | ||
197 | if not os.path.exists(extract_dir): | ||
198 | os.makedirs(extract_dir) | ||
199 | else: | ||
200 | extract_dir = os.getcwd() | ||
201 | file_filter = repo.get('file_filter', "") | ||
202 | runcmd("git archive %s | tar -x -C %s %s" % (initialrev, extract_dir, file_filter), ldir) | ||
203 | if not lastrev: | ||
204 | lastrev = runcmd("git rev-parse %s" % initialrev, ldir).strip() | ||
205 | conf.update(name, "last_revision", lastrev, initmode=True) | ||
206 | runcmd("git add .") | ||
207 | if conf.localconffile: | ||
208 | localadded = True | ||
209 | try: | ||
210 | runcmd("git rm --cached %s" % conf.localconffile, printerr=False) | ||
211 | except subprocess.CalledProcessError: | ||
212 | localadded = False | ||
213 | if localadded: | ||
214 | localrelpath = os.path.relpath(conf.localconffile) | ||
215 | runcmd("grep -q %s .gitignore || echo %s >> .gitignore" % (localrelpath, localrelpath)) | ||
216 | runcmd("git add .gitignore") | ||
217 | logger.info("Added local configuration file %s to .gitignore", localrelpath) | ||
218 | logger.info("Initial combo layer repository data has been created; please make any changes if desired and then use 'git commit' to make the initial commit.") | ||
219 | else: | ||
220 | logger.info("Repository already initialised, nothing to do.") | ||
221 | |||
222 | |||
223 | def check_repo_clean(repodir): | ||
224 | """ | ||
225 | check if the repo is clean | ||
226 | exit if repo is dirty | ||
227 | """ | ||
228 | output=runcmd("git status --porcelain", repodir) | ||
229 | r = re.compile('\?\? patch-.*/') | ||
230 | dirtyout = [item for item in output.splitlines() if not r.match(item)] | ||
231 | if dirtyout: | ||
232 | logger.error("git repo %s is dirty, please fix it first", repodir) | ||
233 | sys.exit(1) | ||
234 | |||
235 | def check_patch(patchfile): | ||
236 | f = open(patchfile) | ||
237 | ln = f.readline() | ||
238 | of = None | ||
239 | in_patch = False | ||
240 | beyond_msg = False | ||
241 | pre_buf = '' | ||
242 | while ln: | ||
243 | if not beyond_msg: | ||
244 | if ln == '---\n': | ||
245 | if not of: | ||
246 | break | ||
247 | in_patch = False | ||
248 | beyond_msg = True | ||
249 | elif ln.startswith('--- '): | ||
250 | # We have a diff in the commit message | ||
251 | in_patch = True | ||
252 | if not of: | ||
253 | print('WARNING: %s contains a diff in its commit message, indenting to avoid failure during apply' % patchfile) | ||
254 | of = open(patchfile + '.tmp', 'w') | ||
255 | of.write(pre_buf) | ||
256 | pre_buf = '' | ||
257 | elif in_patch and not ln[0] in '+-@ \n\r': | ||
258 | in_patch = False | ||
259 | if of: | ||
260 | if in_patch: | ||
261 | of.write(' ' + ln) | ||
262 | else: | ||
263 | of.write(ln) | ||
264 | else: | ||
265 | pre_buf += ln | ||
266 | ln = f.readline() | ||
267 | f.close() | ||
268 | if of: | ||
269 | of.close() | ||
270 | os.rename(patchfile + '.tmp', patchfile) | ||
271 | |||
272 | def drop_to_shell(workdir=None): | ||
273 | shell = os.environ.get('SHELL', 'bash') | ||
274 | print('Dropping to shell "%s"\n' \ | ||
275 | 'When you are finished, run the following to continue:\n' \ | ||
276 | ' exit -- continue to apply the patches\n' \ | ||
277 | ' exit 1 -- abort\n' % shell); | ||
278 | ret = subprocess.call([shell], cwd=workdir) | ||
279 | if ret != 0: | ||
280 | print "Aborting" | ||
281 | return False | ||
282 | else: | ||
283 | return True | ||
284 | |||
285 | def check_rev_branch(component, repodir, rev, branch): | ||
286 | try: | ||
287 | actualbranch = runcmd("git branch --contains %s" % rev, repodir, printerr=False) | ||
288 | except subprocess.CalledProcessError as e: | ||
289 | if e.returncode == 129: | ||
290 | actualbranch = "" | ||
291 | else: | ||
292 | raise | ||
293 | |||
294 | if not actualbranch: | ||
295 | logger.error("%s: specified revision %s is invalid!" % (component, rev)) | ||
296 | return False | ||
297 | |||
298 | branches = [] | ||
299 | branchlist = actualbranch.split("\n") | ||
300 | for b in branchlist: | ||
301 | branches.append(b.strip().split(' ')[-1]) | ||
302 | |||
303 | if branch not in branches: | ||
304 | logger.error("%s: specified revision %s is not on specified branch %s!" % (component, rev, branch)) | ||
305 | return False | ||
306 | return True | ||
307 | |||
308 | def get_repos(conf, args): | ||
309 | repos = [] | ||
310 | if len(args) > 1: | ||
311 | for arg in args[1:]: | ||
312 | if arg.startswith('-'): | ||
313 | break | ||
314 | else: | ||
315 | repos.append(arg) | ||
316 | for repo in repos: | ||
317 | if not repo in conf.repos: | ||
318 | logger.error("Specified component '%s' not found in configuration" % repo) | ||
319 | sys.exit(0) | ||
320 | |||
321 | if not repos: | ||
322 | repos = conf.repos | ||
323 | |||
324 | return repos | ||
325 | |||
326 | def action_pull(conf, args): | ||
327 | """ | ||
328 | update the component repos only | ||
329 | """ | ||
330 | repos = get_repos(conf, args) | ||
331 | |||
332 | # make sure all repos are clean | ||
333 | for name in repos: | ||
334 | check_repo_clean(conf.repos[name]['local_repo_dir']) | ||
335 | |||
336 | for name in repos: | ||
337 | repo = conf.repos[name] | ||
338 | ldir = repo['local_repo_dir'] | ||
339 | branch = repo.get('branch', "master") | ||
340 | runcmd("git checkout %s" % branch, ldir) | ||
341 | logger.info("git pull for component repo %s in %s ..." % (name, ldir)) | ||
342 | output=runcmd("git pull", ldir) | ||
343 | logger.info(output) | ||
344 | |||
345 | def action_update(conf, args): | ||
346 | """ | ||
347 | update the component repos | ||
348 | generate the patch list | ||
349 | apply the generated patches | ||
350 | """ | ||
351 | repos = get_repos(conf, args) | ||
352 | |||
353 | # make sure combo repo is clean | ||
354 | check_repo_clean(os.getcwd()) | ||
355 | |||
356 | import uuid | ||
357 | patch_dir = "patch-%s" % uuid.uuid4() | ||
358 | if not os.path.exists(patch_dir): | ||
359 | os.mkdir(patch_dir) | ||
360 | |||
361 | # Step 1: update the component repos | ||
362 | if conf.nopull: | ||
363 | logger.info("Skipping pull (-n)") | ||
364 | else: | ||
365 | action_pull(conf, args) | ||
366 | |||
367 | for name in repos: | ||
368 | repo = conf.repos[name] | ||
369 | ldir = repo['local_repo_dir'] | ||
370 | dest_dir = repo['dest_dir'] | ||
371 | branch = repo.get('branch', "master") | ||
372 | repo_patch_dir = os.path.join(os.getcwd(), patch_dir, name) | ||
373 | |||
374 | # Step 2: generate the patch list and store to patch dir | ||
375 | logger.info("Generating patches from %s..." % name) | ||
376 | if dest_dir != ".": | ||
377 | prefix = "--src-prefix=a/%s/ --dst-prefix=b/%s/" % (dest_dir, dest_dir) | ||
378 | else: | ||
379 | prefix = "" | ||
380 | if repo['last_revision'] == "": | ||
381 | logger.info("Warning: last_revision of component %s is not set, starting from the first commit" % name) | ||
382 | patch_cmd_range = "--root %s" % branch | ||
383 | rev_cmd_range = branch | ||
384 | else: | ||
385 | if not check_rev_branch(name, ldir, repo['last_revision'], branch): | ||
386 | sys.exit(1) | ||
387 | patch_cmd_range = "%s..%s" % (repo['last_revision'], branch) | ||
388 | rev_cmd_range = patch_cmd_range | ||
389 | |||
390 | file_filter = repo.get('file_filter',"") | ||
391 | |||
392 | patch_cmd = "git format-patch -N %s --output-directory %s %s -- %s" % \ | ||
393 | (prefix,repo_patch_dir, patch_cmd_range, file_filter) | ||
394 | output = runcmd(patch_cmd, ldir) | ||
395 | logger.debug("generated patch set:\n%s" % output) | ||
396 | patchlist = output.splitlines() | ||
397 | |||
398 | rev_cmd = "git rev-list --no-merges %s -- %s" % (rev_cmd_range, file_filter) | ||
399 | revlist = runcmd(rev_cmd, ldir).splitlines() | ||
400 | |||
401 | # Step 3: Call repo specific hook to adjust patch | ||
402 | if 'hook' in repo: | ||
403 | # hook parameter is: ./hook patchpath revision reponame | ||
404 | count=len(revlist)-1 | ||
405 | for patch in patchlist: | ||
406 | runcmd("%s %s %s %s" % (repo['hook'], patch, revlist[count], name)) | ||
407 | count=count-1 | ||
408 | |||
409 | # Step 4: write patch list and revision list to file, for user to edit later | ||
410 | patchlist_file = os.path.join(os.getcwd(), patch_dir, "patchlist-%s" % name) | ||
411 | repo['patchlist'] = patchlist_file | ||
412 | f = open(patchlist_file, 'w') | ||
413 | count=len(revlist)-1 | ||
414 | for patch in patchlist: | ||
415 | f.write("%s %s\n" % (patch, revlist[count])) | ||
416 | check_patch(os.path.join(patch_dir, patch)) | ||
417 | count=count-1 | ||
418 | f.close() | ||
419 | |||
420 | # Step 5: invoke bash for user to edit patch and patch list | ||
421 | if conf.interactive: | ||
422 | print('You may now edit the patch and patch list in %s\n' \ | ||
423 | 'For example, you can remove unwanted patch entries from patchlist-*, so that they will be not applied later' % patch_dir); | ||
424 | if not drop_to_shell(patch_dir): | ||
425 | sys.exit(0) | ||
426 | |||
427 | # Step 6: apply the generated and revised patch | ||
428 | apply_patchlist(conf, repos) | ||
429 | runcmd("rm -rf %s" % patch_dir) | ||
430 | |||
431 | # Step 7: commit the updated config file if it's being tracked | ||
432 | relpath = os.path.relpath(conf.conffile) | ||
433 | try: | ||
434 | output = runcmd("git status --porcelain %s" % relpath, printerr=False) | ||
435 | except: | ||
436 | # Outside the repository | ||
437 | output = None | ||
438 | if output: | ||
439 | logger.info("Committing updated configuration file") | ||
440 | if output.lstrip().startswith("M"): | ||
441 | runcmd('git commit -m "Automatic commit to update last_revision" %s' % relpath) | ||
442 | |||
443 | def apply_patchlist(conf, repos): | ||
444 | """ | ||
445 | apply the generated patch list to combo repo | ||
446 | """ | ||
447 | for name in repos: | ||
448 | repo = conf.repos[name] | ||
449 | lastrev = repo["last_revision"] | ||
450 | prevrev = lastrev | ||
451 | |||
452 | # Get non-blank lines from patch list file | ||
453 | patchlist = [] | ||
454 | if os.path.exists(repo['patchlist']) or not conf.interactive: | ||
455 | # Note: we want this to fail here if the file doesn't exist and we're not in | ||
456 | # interactive mode since the file should exist in this case | ||
457 | with open(repo['patchlist']) as f: | ||
458 | for line in f: | ||
459 | line = line.rstrip() | ||
460 | if line: | ||
461 | patchlist.append(line) | ||
462 | |||
463 | if patchlist: | ||
464 | logger.info("Applying patches from %s..." % name) | ||
465 | linecount = len(patchlist) | ||
466 | i = 1 | ||
467 | for line in patchlist: | ||
468 | patchfile = line.split()[0] | ||
469 | lastrev = line.split()[1] | ||
470 | patchdisp = os.path.relpath(patchfile) | ||
471 | if os.path.getsize(patchfile) == 0: | ||
472 | logger.info("(skipping %d/%d %s - no changes)" % (i, linecount, patchdisp)) | ||
473 | else: | ||
474 | cmd = "git am --keep-cr -s -p1 %s" % patchfile | ||
475 | logger.info("Applying %d/%d: %s" % (i, linecount, patchdisp)) | ||
476 | try: | ||
477 | runcmd(cmd) | ||
478 | except subprocess.CalledProcessError: | ||
479 | logger.info('Running "git am --abort" to cleanup repo') | ||
480 | runcmd("git am --abort") | ||
481 | logger.error('"%s" failed' % cmd) | ||
482 | logger.info("Please manually apply patch %s" % patchdisp) | ||
483 | logger.info("Note: if you exit and continue applying without manually applying the patch, it will be skipped") | ||
484 | if not drop_to_shell(): | ||
485 | if prevrev != repo['last_revision']: | ||
486 | conf.update(name, "last_revision", prevrev) | ||
487 | sys.exit(0) | ||
488 | prevrev = lastrev | ||
489 | i += 1 | ||
490 | else: | ||
491 | logger.info("No patches to apply from %s" % name) | ||
492 | ldir = conf.repos[name]['local_repo_dir'] | ||
493 | branch = conf.repos[name].get('branch', "master") | ||
494 | lastrev = runcmd("git rev-parse %s" % branch, ldir).strip() | ||
495 | |||
496 | if lastrev != repo['last_revision']: | ||
497 | conf.update(name, "last_revision", lastrev) | ||
498 | |||
499 | def action_splitpatch(conf, args): | ||
500 | """ | ||
501 | generate the commit patch and | ||
502 | split the patch per repo | ||
503 | """ | ||
504 | logger.debug("action_splitpatch") | ||
505 | if len(args) > 1: | ||
506 | commit = args[1] | ||
507 | else: | ||
508 | commit = "HEAD" | ||
509 | patchdir = "splitpatch-%s" % commit | ||
510 | if not os.path.exists(patchdir): | ||
511 | os.mkdir(patchdir) | ||
512 | |||
513 | # filerange_root is for the repo whose dest_dir is root "." | ||
514 | # and it should be specified by excluding all other repo dest dir | ||
515 | # like "-x repo1 -x repo2 -x repo3 ..." | ||
516 | filerange_root = "" | ||
517 | for name in conf.repos: | ||
518 | dest_dir = conf.repos[name]['dest_dir'] | ||
519 | if dest_dir != ".": | ||
520 | filerange_root = '%s -x "%s/*"' % (filerange_root, dest_dir) | ||
521 | |||
522 | for name in conf.repos: | ||
523 | dest_dir = conf.repos[name]['dest_dir'] | ||
524 | patch_filename = "%s/%s.patch" % (patchdir, name) | ||
525 | if dest_dir == ".": | ||
526 | cmd = "git format-patch -n1 --stdout %s^..%s | filterdiff -p1 %s > %s" % (commit, commit, filerange_root, patch_filename) | ||
527 | else: | ||
528 | cmd = "git format-patch --no-prefix -n1 --stdout %s^..%s -- %s > %s" % (commit, commit, dest_dir, patch_filename) | ||
529 | runcmd(cmd) | ||
530 | # Detect empty patches (including those produced by filterdiff above | ||
531 | # that contain only preamble text) | ||
532 | if os.path.getsize(patch_filename) == 0 or runcmd("filterdiff %s" % patch_filename) == "": | ||
533 | os.remove(patch_filename) | ||
534 | logger.info("(skipping %s - no changes)", name) | ||
535 | else: | ||
536 | logger.info(patch_filename) | ||
537 | |||
538 | def action_error(conf, args): | ||
539 | logger.info("invalid action %s" % args[0]) | ||
540 | |||
541 | actions = { | ||
542 | "init": action_init, | ||
543 | "update": action_update, | ||
544 | "pull": action_pull, | ||
545 | "splitpatch": action_splitpatch, | ||
546 | } | ||
547 | |||
548 | def main(): | ||
549 | parser = optparse.OptionParser( | ||
550 | version = "Combo Layer Repo Tool version %s" % __version__, | ||
551 | usage = """%prog [options] action | ||
552 | |||
553 | Create and update a combination layer repository from multiple component repositories. | ||
554 | |||
555 | Action: | ||
556 | init initialise the combo layer repo | ||
557 | update [components] get patches from component repos and apply them to the combo repo | ||
558 | pull [components] just pull component repos only | ||
559 | splitpatch [commit] generate commit patch and split per component, default commit is HEAD""") | ||
560 | |||
561 | parser.add_option("-c", "--conf", help = "specify the config file (conf/combo-layer.conf is the default).", | ||
562 | action = "store", dest = "conffile", default = "conf/combo-layer.conf") | ||
563 | |||
564 | parser.add_option("-i", "--interactive", help = "interactive mode, user can edit the patch list and patches", | ||
565 | action = "store_true", dest = "interactive", default = False) | ||
566 | |||
567 | parser.add_option("-D", "--debug", help = "output debug information", | ||
568 | action = "store_true", dest = "debug", default = False) | ||
569 | |||
570 | parser.add_option("-n", "--no-pull", help = "skip pulling component repos during update", | ||
571 | action = "store_true", dest = "nopull", default = False) | ||
572 | |||
573 | options, args = parser.parse_args(sys.argv) | ||
574 | |||
575 | # Dispatch to action handler | ||
576 | if len(args) == 1: | ||
577 | logger.error("No action specified, exiting") | ||
578 | parser.print_help() | ||
579 | elif args[1] not in actions: | ||
580 | logger.error("Unsupported action %s, exiting\n" % (args[1])) | ||
581 | parser.print_help() | ||
582 | elif not os.path.exists(options.conffile): | ||
583 | logger.error("No valid config file, exiting\n") | ||
584 | parser.print_help() | ||
585 | else: | ||
586 | if options.debug: | ||
587 | logger.setLevel(logging.DEBUG) | ||
588 | confdata = Configuration(options) | ||
589 | initmode = (args[1] == 'init') | ||
590 | confdata.sanity_check(initmode) | ||
591 | actions.get(args[1], action_error)(confdata, args[1:]) | ||
592 | |||
593 | if __name__ == "__main__": | ||
594 | try: | ||
595 | ret = main() | ||
596 | except Exception: | ||
597 | ret = 1 | ||
598 | import traceback | ||
599 | traceback.print_exc(5) | ||
600 | sys.exit(ret) | ||
diff --git a/scripts/combo-layer-hook-default.sh b/scripts/combo-layer-hook-default.sh new file mode 100755 index 0000000..8b148ac --- /dev/null +++ b/scripts/combo-layer-hook-default.sh | |||
@@ -0,0 +1,13 @@ | |||
1 | #!/bin/sh | ||
2 | # Hook to add source component/revision info to commit message | ||
3 | # Parameter: | ||
4 | # $1 patch-file | ||
5 | # $2 revision | ||
6 | # $3 reponame | ||
7 | |||
8 | patchfile=$1 | ||
9 | rev=$2 | ||
10 | reponame=$3 | ||
11 | |||
12 | sed -i -e "s#^Subject: \[PATCH\] \(.*\)#Subject: \[PATCH\] $reponame: \1#" $patchfile | ||
13 | sed -i -e "0,/^Signed-off-by:/s#\(^Signed-off-by:.*\)#\($reponame rev: $rev\)\n\n\1#" $patchfile | ||
diff --git a/scripts/combo-layer.conf.example b/scripts/combo-layer.conf.example new file mode 100644 index 0000000..010a692 --- /dev/null +++ b/scripts/combo-layer.conf.example | |||
@@ -0,0 +1,56 @@ | |||
1 | # combo-layer example configuration file | ||
2 | |||
3 | # component name | ||
4 | [bitbake] | ||
5 | # mandatory options | ||
6 | # git upstream uri | ||
7 | src_uri = git://git.openembedded.org/bitbake | ||
8 | |||
9 | # the directory to clone the component repo | ||
10 | local_repo_dir = /home/kyu3/src/test/bitbake | ||
11 | |||
12 | # the relative dir within the combo repo to put the component files | ||
13 | # use "." if the files should be in the root dir | ||
14 | dest_dir = bitbake | ||
15 | |||
16 | # the last update revision. | ||
17 | # "init" will set this to the latest revision automatically, however if it | ||
18 | # is empty when "update" is run, the tool will start from the first commit. | ||
19 | # Note that this value will get updated by "update" if the component repo's | ||
20 | # latest revision changed and the operation completes successfully. | ||
21 | last_revision = | ||
22 | |||
23 | # optional options: | ||
24 | |||
25 | # branch: specify the branch in the component repo to pull from | ||
26 | # (master if not specified) | ||
27 | |||
28 | # file_filter: only include the specified file(s) | ||
29 | # file_filter = [path] [path] ... | ||
30 | # example: | ||
31 | # file_filter = src/ : only include the subdir src | ||
32 | # file_filter = src/*.c : only include the src *.c file | ||
33 | # file_filter = src/main.c src/Makefile.am : only include these two files | ||
34 | |||
35 | # hook: if provided, the tool will call the hook to process the generated | ||
36 | # patch from upstream, and then apply the modified patch to the combo | ||
37 | # repo. | ||
38 | # the hook script is called as follows: ./hook patchpath revision reponame | ||
39 | # example: | ||
40 | # hook = combo-layer-hook-default.sh | ||
41 | |||
42 | [oe-core] | ||
43 | src_uri = git://git.openembedded.org/openembedded-core | ||
44 | local_repo_dir = /home/kyu3/src/test/oecore | ||
45 | dest_dir = . | ||
46 | last_revision = | ||
47 | |||
48 | # It is also possible to embed python code in the config values. Similar | ||
49 | # to bitbake it considers every value starting with @ to be a python | ||
50 | # script. | ||
51 | # e.g. local_repo_dir could easily be configured using an environment | ||
52 | # variable: | ||
53 | # | ||
54 | # [bitbake] | ||
55 | # local_repo_dir = @os.getenv("LOCAL_REPO_DIR") + "/bitbake" | ||
56 | # | ||
diff --git a/scripts/contrib/bb-perf/bb-matrix-plot.sh b/scripts/contrib/bb-perf/bb-matrix-plot.sh new file mode 100755 index 0000000..136a255 --- /dev/null +++ b/scripts/contrib/bb-perf/bb-matrix-plot.sh | |||
@@ -0,0 +1,137 @@ | |||
1 | #!/bin/bash | ||
2 | # | ||
3 | # Copyright (c) 2011, Intel Corporation. | ||
4 | # All rights reserved. | ||
5 | # | ||
6 | # This program is free software; you can redistribute it and/or modify | ||
7 | # it under the terms of the GNU General Public License as published by | ||
8 | # the Free Software Foundation; either version 2 of the License, or | ||
9 | # (at your option) any later version. | ||
10 | # | ||
11 | # This program is distributed in the hope that it will be useful, | ||
12 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | # GNU General Public License for more details. | ||
15 | # | ||
16 | # You should have received a copy of the GNU General Public License | ||
17 | # along with this program; if not, write to the Free Software | ||
18 | # Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
19 | # | ||
20 | # DESCRIPTION | ||
21 | # This script operates on the .dat file generated by bb-matrix.sh. It tolerates | ||
22 | # the header by skipping the first line, but error messages and bad data records | ||
23 | # need to be removed first. It will generate three views of the plot, and leave | ||
24 | # an interactive view open for further analysis. | ||
25 | # | ||
26 | # AUTHORS | ||
27 | # Darren Hart <dvhart@linux.intel.com> | ||
28 | # | ||
29 | |||
30 | # Setup the defaults | ||
31 | DATFILE="bb-matrix.dat" | ||
32 | XLABEL="BB_NUMBER_THREADS" | ||
33 | YLABEL="PARALLEL_MAKE" | ||
34 | FIELD=3 | ||
35 | DEF_TITLE="Elapsed Time (seconds)" | ||
36 | PM3D_FRAGMENT="unset surface; set pm3d at s hidden3d 100" | ||
37 | SIZE="640,480" | ||
38 | |||
39 | function usage { | ||
40 | CMD=$(basename $0) | ||
41 | cat <<EOM | ||
42 | Usage: $CMD [-d datfile] [-f field] [-h] [-t title] [-w] | ||
43 | -d datfile The data file generated by bb-matrix.sh (default: $DATFILE) | ||
44 | -f field The field index to plot as the Z axis from the data file | ||
45 | (default: $FIELD, "$DEF_TITLE") | ||
46 | -h Display this help message | ||
47 | -s W,H PNG and window size in pixels (default: $SIZE) | ||
48 | -t title The title to display, should describe the field (-f) and units | ||
49 | (default: "$DEF_TITLE") | ||
50 | -w Render the plot as wireframe with a 2D colormap projected on the | ||
51 | XY plane rather than as the texture for the surface | ||
52 | EOM | ||
53 | } | ||
54 | |||
55 | # Parse and validate arguments | ||
56 | while getopts "d:f:hs:t:w" OPT; do | ||
57 | case $OPT in | ||
58 | d) | ||
59 | DATFILE="$OPTARG" | ||
60 | ;; | ||
61 | f) | ||
62 | FIELD="$OPTARG" | ||
63 | ;; | ||
64 | h) | ||
65 | usage | ||
66 | exit 0 | ||
67 | ;; | ||
68 | s) | ||
69 | SIZE="$OPTARG" | ||
70 | ;; | ||
71 | t) | ||
72 | TITLE="$OPTARG" | ||
73 | ;; | ||
74 | w) | ||
75 | PM3D_FRAGMENT="set pm3d at b" | ||
76 | W="-w" | ||
77 | ;; | ||
78 | *) | ||
79 | usage | ||
80 | exit 1 | ||
81 | ;; | ||
82 | esac | ||
83 | done | ||
84 | |||
85 | # Ensure the data file exists | ||
86 | if [ ! -f "$DATFILE" ]; then | ||
87 | echo "ERROR: $DATFILE does not exist" | ||
88 | usage | ||
89 | exit 1 | ||
90 | fi | ||
91 | PLOT_BASENAME=${DATFILE%.*}-f$FIELD$W | ||
92 | |||
93 | # Set a sane title | ||
94 | # TODO: parse the header and define titles for each format parameter for TIME(1) | ||
95 | if [ -z "$TITLE" ]; then | ||
96 | if [ ! "$FIELD" == "3" ]; then | ||
97 | TITLE="Field $FIELD" | ||
98 | else | ||
99 | TITLE="$DEF_TITLE" | ||
100 | fi | ||
101 | fi | ||
102 | |||
103 | # Determine the dgrid3d mesh dimensions size | ||
104 | MIN=$(tail -n +2 "$DATFILE" | cut -d ' ' -f 1 | sed 's/^0*//' | sort -n | uniq | head -n1) | ||
105 | MAX=$(tail -n +2 "$DATFILE" | cut -d ' ' -f 1 | sed 's/^0*//' | sort -n | uniq | tail -n1) | ||
106 | BB_CNT=$[${MAX} - $MIN + 1] | ||
107 | MIN=$(tail -n +2 "$DATFILE" | cut -d ' ' -f 2 | sed 's/^0*//' | sort -n | uniq | head -n1) | ||
108 | MAX=$(tail -n +2 "$DATFILE" | cut -d ' ' -f 2 | sed 's/^0*//' | sort -n | uniq | tail -n1) | ||
109 | PM_CNT=$[${MAX} - $MIN + 1] | ||
110 | |||
111 | |||
112 | (cat <<EOF | ||
113 | set title "$TITLE" | ||
114 | set xlabel "$XLABEL" | ||
115 | set ylabel "$YLABEL" | ||
116 | set style line 100 lt 5 lw 1.5 | ||
117 | $PM3D_FRAGMENT | ||
118 | set dgrid3d $PM_CNT,$BB_CNT splines | ||
119 | set ticslevel 0.2 | ||
120 | |||
121 | set term png size $SIZE | ||
122 | set output "$PLOT_BASENAME.png" | ||
123 | splot "$DATFILE" every ::1 using 1:2:$FIELD with lines ls 100 | ||
124 | |||
125 | set view 90,0 | ||
126 | set output "$PLOT_BASENAME-bb.png" | ||
127 | replot | ||
128 | |||
129 | set view 90,90 | ||
130 | set output "$PLOT_BASENAME-pm.png" | ||
131 | replot | ||
132 | |||
133 | set view 60,30 | ||
134 | set term wxt size $SIZE | ||
135 | replot | ||
136 | EOF | ||
137 | ) | gnuplot --persist | ||
diff --git a/scripts/contrib/bb-perf/bb-matrix.sh b/scripts/contrib/bb-perf/bb-matrix.sh new file mode 100755 index 0000000..1064565 --- /dev/null +++ b/scripts/contrib/bb-perf/bb-matrix.sh | |||
@@ -0,0 +1,79 @@ | |||
1 | #!/bin/bash | ||
2 | # | ||
3 | # Copyright (c) 2011, Intel Corporation. | ||
4 | # All rights reserved. | ||
5 | # | ||
6 | # This program is free software; you can redistribute it and/or modify | ||
7 | # it under the terms of the GNU General Public License as published by | ||
8 | # the Free Software Foundation; either version 2 of the License, or | ||
9 | # (at your option) any later version. | ||
10 | # | ||
11 | # This program is distributed in the hope that it will be useful, | ||
12 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | # GNU General Public License for more details. | ||
15 | # | ||
16 | # You should have received a copy of the GNU General Public License | ||
17 | # along with this program; if not, write to the Free Software | ||
18 | # Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
19 | # | ||
20 | # DESCRIPTION | ||
21 | # This script runs BB_CMD (typically building core-image-sato) for all | ||
22 | # combincations of BB_RANGE and PM_RANGE values. It saves off all the console | ||
23 | # logs, the buildstats directories, and creates a bb-pm-runtime.dat file which | ||
24 | # can be used to postprocess the results with a plotting tool, spreadsheet, etc. | ||
25 | # Before running this script, it is recommended that you pre-download all the | ||
26 | # necessary sources by performing the BB_CMD once manually. It is also a good | ||
27 | # idea to disable cron to avoid runtime variations caused by things like the | ||
28 | # locate process. Be sure to sanitize the dat file prior to post-processing as | ||
29 | # it may contain error messages or bad runs that should be removed. | ||
30 | # | ||
31 | # AUTHORS | ||
32 | # Darren Hart <dvhart@linux.intel.com> | ||
33 | # | ||
34 | |||
35 | # The following ranges are appropriate for a 4 core system with 8 logical units | ||
36 | # Use leading 0s to ensure all digits are the same string length, this results | ||
37 | # in nice log file names and columnar dat files. | ||
38 | BB_RANGE="04 05 06 07 08 09 10 11 12 13 14 15 16" | ||
39 | PM_RANGE="04 05 06 07 08 09 10 11 12 13 14 15 16" | ||
40 | |||
41 | DATADIR="bb-matrix-$$" | ||
42 | BB_CMD="bitbake core-image-minimal" | ||
43 | RUNTIME_LOG="$DATADIR/bb-matrix.dat" | ||
44 | |||
45 | # See TIME(1) for a description of the time format parameters | ||
46 | # The following all report 0: W K r s t w | ||
47 | TIME_STR="%e %S %U %P %c %w %R %F %M %x" | ||
48 | |||
49 | # Prepare the DATADIR | ||
50 | mkdir $DATADIR | ||
51 | if [ $? -ne 0 ]; then | ||
52 | echo "Failed to create $DATADIR." | ||
53 | exit 1 | ||
54 | fi | ||
55 | |||
56 | # Add a simple header | ||
57 | echo "BB PM $TIME_STR" > $RUNTIME_LOG | ||
58 | for BB in $BB_RANGE; do | ||
59 | for PM in $PM_RANGE; do | ||
60 | RUNDIR="$DATADIR/$BB-$PM-build" | ||
61 | mkdir $RUNDIR | ||
62 | BB_LOG=$RUNDIR/$BB-$PM-bitbake.log | ||
63 | date | ||
64 | echo "BB=$BB PM=$PM Logging to $BB_LOG" | ||
65 | |||
66 | echo -n " Preparing the work directory... " | ||
67 | rm -rf pseudodone tmp sstate-cache tmp-eglibc &> /dev/null | ||
68 | echo "done" | ||
69 | |||
70 | # Export the variables under test and run the bitbake command | ||
71 | # Strip any leading zeroes before passing to bitbake | ||
72 | export BB_NUMBER_THREADS=$(echo $BB | sed 's/^0*//') | ||
73 | export PARALLEL_MAKE="-j $(echo $PM | sed 's/^0*//')" | ||
74 | /usr/bin/time -f "$BB $PM $TIME_STR" -a -o $RUNTIME_LOG $BB_CMD &> $BB_LOG | ||
75 | |||
76 | echo " $(tail -n1 $RUNTIME_LOG)" | ||
77 | cp -a tmp/buildstats $RUNDIR/$BB-$PM-buildstats | ||
78 | done | ||
79 | done | ||
diff --git a/scripts/contrib/bbvars.py b/scripts/contrib/bbvars.py new file mode 100755 index 0000000..0896d64 --- /dev/null +++ b/scripts/contrib/bbvars.py | |||
@@ -0,0 +1,186 @@ | |||
1 | #!/usr/bin/env python | ||
2 | |||
3 | # This program is free software; you can redistribute it and/or modify | ||
4 | # it under the terms of the GNU General Public License as published by | ||
5 | # the Free Software Foundation; either version 2 of the License, or | ||
6 | # (at your option) any later version. | ||
7 | # | ||
8 | # This program is distributed in the hope that it will be useful, | ||
9 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
11 | # GNU General Public License for more details. | ||
12 | # | ||
13 | # You should have received a copy of the GNU General Public License | ||
14 | # along with this program; if not, write to the Free Software | ||
15 | # Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
16 | # | ||
17 | # Copyright (C) Darren Hart <dvhart@linux.intel.com>, 2010 | ||
18 | |||
19 | |||
20 | import sys | ||
21 | import getopt | ||
22 | import os | ||
23 | import os.path | ||
24 | import re | ||
25 | |||
26 | def usage(): | ||
27 | print 'Usage: %s -d FILENAME [-d FILENAME]* -m METADIR [-m MATADIR]*' % os.path.basename(sys.argv[0]) | ||
28 | print ' -d FILENAME documentation file to search' | ||
29 | print ' -h, --help display this help and exit' | ||
30 | print ' -m METADIR meta directory to search for recipes' | ||
31 | print ' -t FILENAME documentation config file (for doc tags)' | ||
32 | print ' -T Only display variables with doc tags (requires -t)' | ||
33 | |||
34 | def recipe_bbvars(recipe): | ||
35 | ''' Return a unique set of every bbvar encountered in the recipe ''' | ||
36 | prog = re.compile("[A-Z_]+") | ||
37 | vset = set() | ||
38 | try: | ||
39 | r = open(recipe) | ||
40 | except IOError as (errno, strerror): | ||
41 | print 'WARNING: Failed to open recipe ', recipe | ||
42 | print strerror | ||
43 | |||
44 | for line in r: | ||
45 | # Strip any comments from the line | ||
46 | line = line.rsplit('#')[0] | ||
47 | vset = vset.union(set(prog.findall(line))) | ||
48 | r.close() | ||
49 | |||
50 | bbvars = {} | ||
51 | for v in vset: | ||
52 | bbvars[v] = 1 | ||
53 | |||
54 | return bbvars | ||
55 | |||
56 | def collect_bbvars(metadir): | ||
57 | ''' Walk the metadir and collect the bbvars from each recipe found ''' | ||
58 | bbvars = {} | ||
59 | for root,dirs,files in os.walk(metadir): | ||
60 | for name in files: | ||
61 | if name.find(".bb") >= 0: | ||
62 | for key in recipe_bbvars(os.path.join(root,name)).iterkeys(): | ||
63 | if bbvars.has_key(key): | ||
64 | bbvars[key] = bbvars[key] + 1 | ||
65 | else: | ||
66 | bbvars[key] = 1 | ||
67 | return bbvars | ||
68 | |||
69 | def bbvar_is_documented(var, docfiles): | ||
70 | prog = re.compile(".*($|[^A-Z_])%s([^A-Z_]|$)" % (var)) | ||
71 | for doc in docfiles: | ||
72 | try: | ||
73 | f = open(doc) | ||
74 | except IOError as (errno, strerror): | ||
75 | print 'WARNING: Failed to open doc ', doc | ||
76 | print strerror | ||
77 | for line in f: | ||
78 | if prog.match(line): | ||
79 | return True | ||
80 | f.close() | ||
81 | return False | ||
82 | |||
83 | def bbvar_doctag(var, docconf): | ||
84 | prog = re.compile('^%s\[doc\] *= *"(.*)"' % (var)) | ||
85 | if docconf == "": | ||
86 | return "?" | ||
87 | |||
88 | try: | ||
89 | f = open(docconf) | ||
90 | except IOError as (errno, strerror): | ||
91 | return strerror | ||
92 | |||
93 | for line in f: | ||
94 | m = prog.search(line) | ||
95 | if m: | ||
96 | return m.group(1) | ||
97 | |||
98 | f.close() | ||
99 | return "" | ||
100 | |||
101 | def main(): | ||
102 | docfiles = [] | ||
103 | metadirs = [] | ||
104 | bbvars = {} | ||
105 | undocumented = [] | ||
106 | docconf = "" | ||
107 | onlydoctags = False | ||
108 | |||
109 | # Collect and validate input | ||
110 | try: | ||
111 | opts, args = getopt.getopt(sys.argv[1:], "d:hm:t:T", ["help"]) | ||
112 | except getopt.GetoptError, err: | ||
113 | print '%s' % str(err) | ||
114 | usage() | ||
115 | sys.exit(2) | ||
116 | |||
117 | for o, a in opts: | ||
118 | if o in ('-h', '--help'): | ||
119 | usage() | ||
120 | sys.exit(0) | ||
121 | elif o == '-d': | ||
122 | if os.path.isfile(a): | ||
123 | docfiles.append(a) | ||
124 | else: | ||
125 | print 'ERROR: documentation file %s is not a regular file' % (a) | ||
126 | sys.exit(3) | ||
127 | elif o == '-m': | ||
128 | if os.path.isdir(a): | ||
129 | metadirs.append(a) | ||
130 | else: | ||
131 | print 'ERROR: meta directory %s is not a directory' % (a) | ||
132 | sys.exit(4) | ||
133 | elif o == "-t": | ||
134 | if os.path.isfile(a): | ||
135 | docconf = a | ||
136 | elif o == "-T": | ||
137 | onlydoctags = True | ||
138 | else: | ||
139 | assert False, "unhandled option" | ||
140 | |||
141 | if len(docfiles) == 0: | ||
142 | print 'ERROR: no docfile specified' | ||
143 | usage() | ||
144 | sys.exit(5) | ||
145 | |||
146 | if len(metadirs) == 0: | ||
147 | print 'ERROR: no metadir specified' | ||
148 | usage() | ||
149 | sys.exit(6) | ||
150 | |||
151 | if onlydoctags and docconf == "": | ||
152 | print 'ERROR: no docconf specified' | ||
153 | usage() | ||
154 | sys.exit(7) | ||
155 | |||
156 | # Collect all the variable names from the recipes in the metadirs | ||
157 | for m in metadirs: | ||
158 | for key,cnt in collect_bbvars(m).iteritems(): | ||
159 | if bbvars.has_key(key): | ||
160 | bbvars[key] = bbvars[key] + cnt | ||
161 | else: | ||
162 | bbvars[key] = cnt | ||
163 | |||
164 | # Check each var for documentation | ||
165 | varlen = 0 | ||
166 | for v in bbvars.iterkeys(): | ||
167 | if len(v) > varlen: | ||
168 | varlen = len(v) | ||
169 | if not bbvar_is_documented(v, docfiles): | ||
170 | undocumented.append(v) | ||
171 | undocumented.sort() | ||
172 | varlen = varlen + 1 | ||
173 | |||
174 | # Report all undocumented variables | ||
175 | print 'Found %d undocumented bb variables (out of %d):' % (len(undocumented), len(bbvars)) | ||
176 | header = '%s%s%s' % (str("VARIABLE").ljust(varlen), str("COUNT").ljust(6), str("DOCTAG").ljust(7)) | ||
177 | print header | ||
178 | print str("").ljust(len(header), '=') | ||
179 | for v in undocumented: | ||
180 | doctag = bbvar_doctag(v, docconf) | ||
181 | if not onlydoctags or not doctag == "": | ||
182 | print '%s%s%s' % (v.ljust(varlen), str(bbvars[v]).ljust(6), doctag) | ||
183 | |||
184 | |||
185 | if __name__ == "__main__": | ||
186 | main() | ||
diff --git a/scripts/contrib/build-perf-test.sh b/scripts/contrib/build-perf-test.sh new file mode 100755 index 0000000..cdd7885 --- /dev/null +++ b/scripts/contrib/build-perf-test.sh | |||
@@ -0,0 +1,369 @@ | |||
1 | #!/bin/bash | ||
2 | # | ||
3 | # This script runs a series of tests (with and without sstate) and reports build time (and tmp/ size) | ||
4 | # | ||
5 | # Build performance test script | ||
6 | # | ||
7 | # Copyright 2013 Intel Corporation | ||
8 | # | ||
9 | # This program is free software; you can redistribute it and/or modify | ||
10 | # it under the terms of the GNU General Public License as published by | ||
11 | # the Free Software Foundation; either version 2 of the License, or | ||
12 | # (at your option) any later version. | ||
13 | # | ||
14 | # This program is distributed in the hope that it will be useful, | ||
15 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
16 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
17 | # GNU General Public License for more details. | ||
18 | # | ||
19 | # You should have received a copy of the GNU General Public License | ||
20 | # along with this program; if not, write to the Free Software | ||
21 | # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
22 | # | ||
23 | # | ||
24 | # AUTHORS: | ||
25 | # Stefan Stanacar <stefanx.stanacar@intel.com> | ||
26 | |||
27 | |||
28 | ME=$(basename $0) | ||
29 | |||
30 | # | ||
31 | # usage and setup | ||
32 | # | ||
33 | |||
34 | usage () { | ||
35 | cat << EOT | ||
36 | Usage: $ME [-h] | ||
37 | $ME [-c <commit>] [-v] [-m <val>] [-j <val>] [-t <val>] [-i <image-name>] [-d <path>] | ||
38 | Options: | ||
39 | -h | ||
40 | Display this help and exit. | ||
41 | -c <commit> | ||
42 | git checkout <commit> before anything else | ||
43 | -v | ||
44 | Show bitbake output, don't redirect it to a log. | ||
45 | -m <machine> | ||
46 | Value for MACHINE. Default is qemux86. | ||
47 | -j <val> | ||
48 | Value for PARALLEL_MAKE. Default is 8. | ||
49 | -t <val> | ||
50 | Value for BB_NUMBER_THREADS. Default is 8. | ||
51 | -i <image-name> | ||
52 | Instead of timing against core-image-sato, use <image-name> | ||
53 | -d <path> | ||
54 | Use <path> as DL_DIR | ||
55 | -p <githash> | ||
56 | Cherry pick githash onto the commit | ||
57 | |||
58 | Note: current working directory must be inside a poky git clone. | ||
59 | |||
60 | EOT | ||
61 | } | ||
62 | |||
63 | |||
64 | if clonedir=$(git rev-parse --show-toplevel); then | ||
65 | cd $clonedir | ||
66 | else | ||
67 | echo "The current working dir doesn't seem to be a poky git clone. Please cd there before running $ME" | ||
68 | exit 1 | ||
69 | fi | ||
70 | |||
71 | IMAGE="core-image-sato" | ||
72 | verbose=0 | ||
73 | dldir= | ||
74 | commit= | ||
75 | pmake= | ||
76 | cherrypicks= | ||
77 | while getopts "hvc:m:j:t:i:d:p:" opt; do | ||
78 | case $opt in | ||
79 | h) usage | ||
80 | exit 0 | ||
81 | ;; | ||
82 | v) verbose=1 | ||
83 | ;; | ||
84 | c) commit=$OPTARG | ||
85 | ;; | ||
86 | m) export MACHINE=$OPTARG | ||
87 | ;; | ||
88 | j) pmake=$OPTARG | ||
89 | ;; | ||
90 | t) export BB_NUMBER_THREADS=$OPTARG | ||
91 | ;; | ||
92 | i) IMAGE=$OPTARG | ||
93 | ;; | ||
94 | d) dldir=$OPTARG | ||
95 | ;; | ||
96 | p) cherrypicks="$cherrypicks $OPTARG" | ||
97 | ;; | ||
98 | *) usage | ||
99 | exit 1 | ||
100 | ;; | ||
101 | esac | ||
102 | done | ||
103 | |||
104 | |||
105 | #drop cached credentials and test for sudo access without a password | ||
106 | sudo -k -n ls > /dev/null 2>&1 | ||
107 | reqpass=$? | ||
108 | if [ $reqpass -ne 0 ]; then | ||
109 | echo "The script requires sudo access to drop caches between builds (echo 3 > /proc/sys/vm/drop_caches)" | ||
110 | read -s -p "Please enter your sudo password: " pass | ||
111 | echo | ||
112 | fi | ||
113 | |||
114 | if [ -n "$commit" ]; then | ||
115 | echo "git checkout -f $commit" | ||
116 | git pull > /dev/null 2>&1 | ||
117 | git checkout -f $commit || exit 1 | ||
118 | git pull > /dev/null 2>&1 | ||
119 | fi | ||
120 | |||
121 | if [ -n "$cherrypicks" ]; then | ||
122 | for c in $cherrypicks; do | ||
123 | git cherry-pick $c | ||
124 | done | ||
125 | fi | ||
126 | |||
127 | rev=$(git rev-parse --short HEAD) || exit 1 | ||
128 | OUTDIR="$clonedir/build-perf-test/results-$rev-`date "+%Y%m%d%H%M%S"`" | ||
129 | BUILDDIR="$OUTDIR/build" | ||
130 | resultsfile="$OUTDIR/results.log" | ||
131 | bboutput="$OUTDIR/bitbake.log" | ||
132 | myoutput="$OUTDIR/output.log" | ||
133 | globalres="$clonedir/build-perf-test/globalres.log" | ||
134 | |||
135 | mkdir -p $OUTDIR || exit 1 | ||
136 | |||
137 | log () { | ||
138 | local msg="$1" | ||
139 | echo "`date`: $msg" | tee -a $myoutput | ||
140 | } | ||
141 | |||
142 | |||
143 | # | ||
144 | # Config stuff | ||
145 | # | ||
146 | |||
147 | branch=`git branch 2>&1 | grep "^* " | tr -d "* "` | ||
148 | gitcommit=$(git rev-parse HEAD) || exit 1 | ||
149 | log "Running on $branch:$gitcommit" | ||
150 | |||
151 | source ./oe-init-build-env $OUTDIR/build >/dev/null || exit 1 | ||
152 | cd $OUTDIR/build | ||
153 | |||
154 | [ -n "$MACHINE" ] || export MACHINE="qemux86" | ||
155 | [ -n "$BB_NUMBER_THREADS" ] || export BB_NUMBER_THREADS="8" | ||
156 | |||
157 | if [ -n "$pmake" ]; then | ||
158 | export PARALLEL_MAKE="-j $pmake" | ||
159 | else | ||
160 | export PARALLEL_MAKE="-j 8" | ||
161 | fi | ||
162 | |||
163 | if [ -n "$dldir" ]; then | ||
164 | echo "DL_DIR = \"$dldir\"" >> conf/local.conf | ||
165 | else | ||
166 | echo "DL_DIR = \"$clonedir/build-perf-test/downloads\"" >> conf/local.conf | ||
167 | fi | ||
168 | |||
169 | # Sometimes I've noticed big differences in timings for the same commit, on the same machine | ||
170 | # Disabling the network sanity check helps a bit (because of my crappy network connection and/or proxy) | ||
171 | echo "CONNECTIVITY_CHECK_URIS =\"\"" >> conf/local.conf | ||
172 | |||
173 | |||
174 | # | ||
175 | # Functions | ||
176 | # | ||
177 | |||
178 | declare -a TIMES | ||
179 | time_count=0 | ||
180 | declare -a SIZES | ||
181 | size_count=0 | ||
182 | |||
183 | bbtime () { | ||
184 | local arg="$@" | ||
185 | log " Timing: bitbake ${arg}" | ||
186 | |||
187 | if [ $verbose -eq 0 ]; then | ||
188 | /usr/bin/time -v -o $resultsfile bitbake ${arg} >> $bboutput | ||
189 | else | ||
190 | /usr/bin/time -v -o $resultsfile bitbake ${arg} | ||
191 | fi | ||
192 | ret=$? | ||
193 | if [ $ret -eq 0 ]; then | ||
194 | t=`grep wall $resultsfile | sed 's/.*m:ss): //'` | ||
195 | log " TIME: $t" | ||
196 | TIMES[(( time_count++ ))]="$t" | ||
197 | else | ||
198 | log "ERROR: exit status was non-zero, will report time as 0." | ||
199 | TIMES[(( time_count++ ))]="0" | ||
200 | fi | ||
201 | |||
202 | #time by default overwrites the output file and we want to keep the results | ||
203 | #it has an append option but I don't want to clobber the results in the same file | ||
204 | i=`ls $OUTDIR/results.log* |wc -l` | ||
205 | mv $resultsfile "${resultsfile}.${i}" | ||
206 | log "More stats can be found in ${resultsfile}.${i}" | ||
207 | } | ||
208 | |||
209 | #we don't time bitbake here | ||
210 | bbnotime () { | ||
211 | local arg="$@" | ||
212 | log " Running: bitbake ${arg}" | ||
213 | if [ $verbose -eq 0 ]; then | ||
214 | bitbake ${arg} >> $bboutput | ||
215 | else | ||
216 | bitbake ${arg} | ||
217 | fi | ||
218 | ret=$? | ||
219 | if [ $ret -eq 0 ]; then | ||
220 | log " Finished bitbake ${arg}" | ||
221 | else | ||
222 | log "ERROR: exit status was non-zero. Exit.." | ||
223 | exit $ret | ||
224 | fi | ||
225 | |||
226 | } | ||
227 | |||
228 | do_rmtmp() { | ||
229 | log " Removing tmp" | ||
230 | rm -rf bitbake.lock pseudodone conf/sanity_info cache tmp | ||
231 | } | ||
232 | do_rmsstate () { | ||
233 | log " Removing sstate-cache" | ||
234 | rm -rf sstate-cache | ||
235 | } | ||
236 | do_sync () { | ||
237 | log " Syncing and dropping caches" | ||
238 | sync; sync | ||
239 | if [ $reqpass -eq 0 ]; then | ||
240 | sudo sh -c "echo 3 > /proc/sys/vm/drop_caches" | ||
241 | else | ||
242 | echo "$pass" | sudo -S sh -c "echo 3 > /proc/sys/vm/drop_caches" | ||
243 | echo | ||
244 | fi | ||
245 | sleep 3 | ||
246 | } | ||
247 | |||
248 | write_results() { | ||
249 | echo -n "`uname -n`,$branch:$gitcommit,`git describe`," >> $globalres | ||
250 | for i in "${TIMES[@]}"; do | ||
251 | echo -n "$i," >> $globalres | ||
252 | done | ||
253 | for i in "${SIZES[@]}"; do | ||
254 | echo -n "$i," >> $globalres | ||
255 | done | ||
256 | echo >> $globalres | ||
257 | sed -i '$ s/,$//' $globalres | ||
258 | } | ||
259 | |||
260 | #### | ||
261 | |||
262 | # | ||
263 | # Test 1 | ||
264 | # Measure: Wall clock of "bitbake core-image-sato" and size of tmp/dir (w/o rm_work and w/ rm_work) | ||
265 | # Pre: Downloaded sources, no sstate | ||
266 | # Steps: | ||
267 | # Part1: | ||
268 | # - fetchall | ||
269 | # - clean build dir | ||
270 | # - time bitbake core-image-sato | ||
271 | # - collect data | ||
272 | # Part2: | ||
273 | # - bitbake virtual/kernel -c cleansstate | ||
274 | # - time bitbake virtual/kernel | ||
275 | # Part3: | ||
276 | # - add INHERIT to local.conf | ||
277 | # - clean build dir | ||
278 | # - build | ||
279 | # - report size, remove INHERIT | ||
280 | |||
281 | test1_p1 () { | ||
282 | log "Running Test 1, part 1/3: Measure wall clock of bitbake $IMAGE and size of tmp/ dir" | ||
283 | bbnotime $IMAGE -c fetchall | ||
284 | do_rmtmp | ||
285 | do_rmsstate | ||
286 | do_sync | ||
287 | bbtime $IMAGE | ||
288 | s=`du -s tmp | sed 's/tmp//' | sed 's/[ \t]*$//'` | ||
289 | SIZES[(( size_count++ ))]="$s" | ||
290 | log "SIZE of tmp dir is: $s" | ||
291 | log "Buildstats are saved in $OUTDIR/buildstats-test1" | ||
292 | mv tmp/buildstats $OUTDIR/buildstats-test1 | ||
293 | } | ||
294 | |||
295 | |||
296 | test1_p2 () { | ||
297 | log "Running Test 1, part 2/3: bitbake virtual/kernel -c cleansstate and time bitbake virtual/kernel" | ||
298 | bbnotime virtual/kernel -c cleansstate | ||
299 | do_sync | ||
300 | bbtime virtual/kernel | ||
301 | } | ||
302 | |||
303 | test1_p3 () { | ||
304 | log "Running Test 1, part 3/3: Build $IMAGE w/o sstate and report size of tmp/dir with rm_work enabled" | ||
305 | echo "INHERIT += \"rm_work\"" >> conf/local.conf | ||
306 | do_rmtmp | ||
307 | do_rmsstate | ||
308 | do_sync | ||
309 | bbtime $IMAGE | ||
310 | sed -i 's/INHERIT += \"rm_work\"//' conf/local.conf | ||
311 | s=`du -s tmp | sed 's/tmp//' | sed 's/[ \t]*$//'` | ||
312 | SIZES[(( size_count++ ))]="$s" | ||
313 | log "SIZE of tmp dir is: $s" | ||
314 | log "Buildstats are saved in $OUTDIR/buildstats-test13" | ||
315 | mv tmp/buildstats $OUTDIR/buildstats-test13 | ||
316 | } | ||
317 | |||
318 | |||
319 | # | ||
320 | # Test 2 | ||
321 | # Measure: Wall clock of "bitbake core-image-sato" and size of tmp/dir | ||
322 | # Pre: populated sstate cache | ||
323 | |||
324 | test2 () { | ||
325 | # Assuming test 1 has run | ||
326 | log "Running Test 2: Measure wall clock of bitbake $IMAGE -c rootfs with sstate" | ||
327 | do_rmtmp | ||
328 | do_sync | ||
329 | bbtime $IMAGE -c rootfs | ||
330 | } | ||
331 | |||
332 | |||
333 | # Test 3 | ||
334 | # parsing time metrics | ||
335 | # | ||
336 | # Start with | ||
337 | # i) "rm -rf tmp/cache; time bitbake -p" | ||
338 | # ii) "rm -rf tmp/cache/default-glibc/; time bitbake -p" | ||
339 | # iii) "time bitbake -p" | ||
340 | |||
341 | |||
342 | test3 () { | ||
343 | log "Running Test 3: Parsing time metrics (bitbake -p)" | ||
344 | log " Removing tmp/cache && cache" | ||
345 | rm -rf tmp/cache cache | ||
346 | bbtime -p | ||
347 | log " Removing tmp/cache/default-glibc/" | ||
348 | rm -rf tmp/cache/default-glibc/ | ||
349 | bbtime -p | ||
350 | bbtime -p | ||
351 | } | ||
352 | |||
353 | |||
354 | |||
355 | # RUN! | ||
356 | |||
357 | test1_p1 | ||
358 | test1_p2 | ||
359 | test1_p3 | ||
360 | test2 | ||
361 | test3 | ||
362 | |||
363 | # if we got til here write to global results | ||
364 | write_results | ||
365 | |||
366 | log "All done, cleaning up..." | ||
367 | |||
368 | do_rmtmp | ||
369 | do_rmsstate | ||
diff --git a/scripts/contrib/ddimage b/scripts/contrib/ddimage new file mode 100755 index 0000000..a503f11 --- /dev/null +++ b/scripts/contrib/ddimage | |||
@@ -0,0 +1,104 @@ | |||
1 | #!/bin/sh | ||
2 | |||
3 | # Default to avoiding the first two disks on typical Linux and Mac OS installs | ||
4 | # Better safe than sorry :-) | ||
5 | BLACKLIST_DEVICES="/dev/sda /dev/sdb /dev/disk1 /dev/disk2" | ||
6 | |||
7 | # 1MB blocksize | ||
8 | BLOCKSIZE=1048576 | ||
9 | |||
10 | usage() { | ||
11 | echo "Usage: $(basename $0) IMAGE DEVICE" | ||
12 | } | ||
13 | |||
14 | image_details() { | ||
15 | IMG=$1 | ||
16 | echo "Image details" | ||
17 | echo "=============" | ||
18 | echo " image: $(basename $IMG)" | ||
19 | # stat format is different on Mac OS and Linux | ||
20 | if [ "$(uname)" = "Darwin" ]; then | ||
21 | echo " size: $(stat -L -f '%z bytes' $IMG)" | ||
22 | echo " modified: $(stat -L -f '%Sm' $IMG)" | ||
23 | else | ||
24 | echo " size: $(stat -L -c '%s bytes' $IMG)" | ||
25 | echo " modified: $(stat -L -c '%y' $IMG)" | ||
26 | fi | ||
27 | echo " type: $(file -L -b $IMG)" | ||
28 | echo "" | ||
29 | } | ||
30 | |||
31 | device_details() { | ||
32 | DEV=$1 | ||
33 | BLOCK_SIZE=512 | ||
34 | |||
35 | echo "Device details" | ||
36 | echo "==============" | ||
37 | |||
38 | # Collect disk info using diskutil on Mac OS | ||
39 | if [ "$(uname)" = "Darwin" ]; then | ||
40 | diskutil info $DEVICE | egrep "(Device Node|Media Name|Total Size)" | ||
41 | return | ||
42 | fi | ||
43 | |||
44 | # Default / Linux information collection | ||
45 | echo " device: $DEVICE" | ||
46 | if [ -f "/sys/class/block/$DEV/device/vendor" ]; then | ||
47 | echo " vendor: $(cat /sys/class/block/$DEV/device/vendor)" | ||
48 | else | ||
49 | echo " vendor: UNKOWN" | ||
50 | fi | ||
51 | if [ -f "/sys/class/block/$DEV/device/model" ]; then | ||
52 | echo " model: $(cat /sys/class/block/$DEV/device/model)" | ||
53 | else | ||
54 | echo " model: UNKNOWN" | ||
55 | fi | ||
56 | if [ -f "/sys/class/block/$DEV/size" ]; then | ||
57 | echo " size: $(($(cat /sys/class/block/$DEV/size) * $BLOCK_SIZE)) bytes" | ||
58 | else | ||
59 | echo " size: UNKNOWN" | ||
60 | fi | ||
61 | echo "" | ||
62 | } | ||
63 | |||
64 | if [ $# -ne 2 ]; then | ||
65 | usage | ||
66 | exit 1 | ||
67 | fi | ||
68 | |||
69 | IMAGE=$1 | ||
70 | DEVICE=$2 | ||
71 | |||
72 | if [ ! -e "$IMAGE" ]; then | ||
73 | echo "ERROR: Image $IMAGE does not exist" | ||
74 | usage | ||
75 | exit 1 | ||
76 | fi | ||
77 | |||
78 | |||
79 | for i in ${BLACKLIST_DEVICES}; do | ||
80 | if [ "$i" = "$DEVICE" ]; then | ||
81 | echo "ERROR: Device $DEVICE is blacklisted" | ||
82 | exit 1 | ||
83 | fi | ||
84 | done | ||
85 | |||
86 | if [ ! -w "$DEVICE" ]; then | ||
87 | echo "ERROR: Device $DEVICE does not exist or is not writable" | ||
88 | usage | ||
89 | exit 1 | ||
90 | fi | ||
91 | |||
92 | image_details $IMAGE | ||
93 | device_details $(basename $DEVICE) | ||
94 | |||
95 | printf "Write $IMAGE to $DEVICE [y/N]? " | ||
96 | read RESPONSE | ||
97 | if [ "$RESPONSE" != "y" ]; then | ||
98 | echo "Write aborted" | ||
99 | exit 0 | ||
100 | fi | ||
101 | |||
102 | echo "Writing image..." | ||
103 | dd if="$IMAGE" of="$DEVICE" bs="$BLOCKSIZE" | ||
104 | sync | ||
diff --git a/scripts/contrib/dialog-power-control b/scripts/contrib/dialog-power-control new file mode 100755 index 0000000..7550ea5 --- /dev/null +++ b/scripts/contrib/dialog-power-control | |||
@@ -0,0 +1,53 @@ | |||
1 | #!/bin/sh | ||
2 | # | ||
3 | # Simple script to show a manual power prompt for when you want to use | ||
4 | # automated hardware testing with testimage.bbclass but you don't have a | ||
5 | # web-enabled power strip or similar to do the power on/off/cycle. | ||
6 | # | ||
7 | # You can enable it by enabling testimage (see the Yocto Project | ||
8 | # Development manual "Performing Automated Runtime Testing" section) | ||
9 | # and setting the following in your local.conf: | ||
10 | # | ||
11 | # TEST_POWERCONTROL_CMD = "${COREBASE}/scripts/contrib/dialog-power-control" | ||
12 | # | ||
13 | |||
14 | PROMPT="" | ||
15 | while true; do | ||
16 | case $1 in | ||
17 | on) | ||
18 | PROMPT="Please turn device power on";; | ||
19 | off) | ||
20 | PROMPT="Please turn device power off";; | ||
21 | cycle) | ||
22 | PROMPT="Please click Done, then turn the device power off then on";; | ||
23 | "") | ||
24 | break;; | ||
25 | esac | ||
26 | shift | ||
27 | done | ||
28 | |||
29 | if [ "$PROMPT" = "" ] ; then | ||
30 | echo "ERROR: no power action specified on command line" | ||
31 | exit 2 | ||
32 | fi | ||
33 | |||
34 | if [ "`which kdialog 2>/dev/null`" != "" ] ; then | ||
35 | DIALOGUTIL="kdialog" | ||
36 | elif [ "`which zenity 2>/dev/null`" != "" ] ; then | ||
37 | DIALOGUTIL="zenity" | ||
38 | else | ||
39 | echo "ERROR: couldn't find program to display a message, install kdialog or zenity" | ||
40 | exit 3 | ||
41 | fi | ||
42 | |||
43 | if [ "$DIALOGUTIL" = "kdialog" ] ; then | ||
44 | kdialog --yesno "$PROMPT" --title "TestImage Power Control" --yes-label "Done" --no-label "Cancel test" | ||
45 | elif [ "$DIALOGUTIL" = "zenity" ] ; then | ||
46 | zenity --question --text="$PROMPT" --title="TestImage Power Control" --ok-label="Done" --cancel-label="Cancel test" | ||
47 | fi | ||
48 | |||
49 | if [ "$?" != "0" ] ; then | ||
50 | echo "User cancelled test at power prompt" | ||
51 | exit 1 | ||
52 | fi | ||
53 | |||
diff --git a/scripts/contrib/documentation-audit.sh b/scripts/contrib/documentation-audit.sh new file mode 100755 index 0000000..2144aac --- /dev/null +++ b/scripts/contrib/documentation-audit.sh | |||
@@ -0,0 +1,94 @@ | |||
1 | #!/bin/bash | ||
2 | # | ||
3 | # Perform an audit of which packages provide documentation and which | ||
4 | # are missing -doc packages. | ||
5 | # | ||
6 | # Setup requirements: be sure to be building for MACHINE=qemux86. Run | ||
7 | # this script after source'ing the build environment script, so you're | ||
8 | # running it from build/ directory. | ||
9 | # | ||
10 | # Maintainer: Scott Garman <scott.a.garman@intel.com> | ||
11 | |||
12 | REPORT_DOC_SIMPLE="documentation_exists.txt" | ||
13 | REPORT_DOC_DETAIL="documentation_exists_detail.txt" | ||
14 | REPORT_MISSING_SIMPLE="documentation_missing.txt" | ||
15 | REPORT_MISSING_DETAIL="documentation_missing_detail.txt" | ||
16 | REPORT_BUILD_ERRORS="build_errors.txt" | ||
17 | |||
18 | rm -rf $REPORT_DOC_SIMPLE $REPORT_DOC_DETAIL $REPORT_MISSING_SIMPLE $REPORT_MISSING_DETAIL | ||
19 | |||
20 | BITBAKE=`which bitbake` | ||
21 | if [ -z "$BITBAKE" ]; then | ||
22 | echo "Error: bitbake command not found." | ||
23 | echo "Did you forget to source the build environment script?" | ||
24 | exit 1 | ||
25 | fi | ||
26 | |||
27 | echo "REMINDER: you need to build for MACHINE=qemux86 or you won't get useful results" | ||
28 | echo "REMINDER: you need to set LICENSE_FLAGS_WHITELIST appropriately in local.conf or " | ||
29 | echo " you'll get false positives. For example, LICENSE_FLAGS_WHITELIST = \"Commercial\"" | ||
30 | |||
31 | for pkg in `bitbake -s | awk '{ print \$1 }'`; do | ||
32 | if [[ "$pkg" == "Loading" || "$pkg" == "Loaded" || | ||
33 | "$pkg" == "Recipe" || | ||
34 | "$pkg" == "Parsing" || "$pkg" == "Package" || | ||
35 | "$pkg" == "NOTE:" || "$pkg" == "WARNING:" || | ||
36 | "$pkg" == "done." || "$pkg" == "===========" ]] | ||
37 | then | ||
38 | # Skip initial bitbake output | ||
39 | continue | ||
40 | fi | ||
41 | if [[ "$pkg" =~ -native$ || "$pkg" =~ -nativesdk$ || | ||
42 | "$pkg" =~ -cross-canadian ]]; then | ||
43 | # Skip native/nativesdk/cross-canadian recipes | ||
44 | continue | ||
45 | fi | ||
46 | if [[ "$pkg" =~ ^meta- || "$pkg" =~ ^packagegroup- || "$pkg" =~ -image ]]; then | ||
47 | # Skip meta, task and image recipes | ||
48 | continue | ||
49 | fi | ||
50 | if [[ "$pkg" =~ ^glibc- || "$pkg" =~ ^libiconv$ || | ||
51 | "$pkg" =~ -toolchain$ || "$pkg" =~ ^package-index$ || | ||
52 | "$pkg" =~ ^linux- || "$pkg" =~ ^adt-installer$ || | ||
53 | "$pkg" =~ ^eds-tools$ || "$pkg" =~ ^external-python-tarball$ || | ||
54 | "$pkg" =~ ^qt4-embedded$ || "$pkg" =~ ^qt-mobility ]]; then | ||
55 | # Skip glibc, libiconv, -toolchain, and other recipes known | ||
56 | # to cause build conflicts or trigger false positives. | ||
57 | continue | ||
58 | fi | ||
59 | |||
60 | echo "Building package $pkg..." | ||
61 | bitbake $pkg > /dev/null | ||
62 | if [ $? -ne 0 ]; then | ||
63 | echo "There was an error building package $pkg" >> "$REPORT_MISSING_DETAIL" | ||
64 | echo "$pkg" >> $REPORT_BUILD_ERRORS | ||
65 | |||
66 | # Do not skip the remaining tests, as sometimes the | ||
67 | # exit status is 1 due to QA errors, and we can still | ||
68 | # perform the -doc checks. | ||
69 | fi | ||
70 | |||
71 | echo "$pkg built successfully, checking for a documentation package..." | ||
72 | WORKDIR=`bitbake -e $pkg | grep ^WORKDIR | awk -F '=' '{ print \$2 }' | awk -F '"' '{ print \$2 }'` | ||
73 | FIND_DOC_PKG=`find $WORKDIR/packages-split/*-doc -maxdepth 0 -type d` | ||
74 | if [ -z "$FIND_DOC_PKG" ]; then | ||
75 | # No -doc package was generated: | ||
76 | echo "No -doc package: $pkg" >> "$REPORT_MISSING_DETAIL" | ||
77 | echo "$pkg" >> $REPORT_MISSING_SIMPLE | ||
78 | continue | ||
79 | fi | ||
80 | |||
81 | FIND_DOC_FILES=`find $FIND_DOC_PKG -type f` | ||
82 | if [ -z "$FIND_DOC_FILES" ]; then | ||
83 | # No files shipped with the -doc package: | ||
84 | echo "No files shipped with the -doc package: $pkg" >> "$REPORT_MISSING_DETAIL" | ||
85 | echo "$pkg" >> $REPORT_MISSING_SIMPLE | ||
86 | continue | ||
87 | fi | ||
88 | |||
89 | echo "Documentation shipped with $pkg:" >> "$REPORT_DOC_DETAIL" | ||
90 | echo "$FIND_DOC_FILES" >> "$REPORT_DOC_DETAIL" | ||
91 | echo "" >> "$REPORT_DOC_DETAIL" | ||
92 | |||
93 | echo "$pkg" >> "$REPORT_DOC_SIMPLE" | ||
94 | done | ||
diff --git a/scripts/contrib/graph-tool b/scripts/contrib/graph-tool new file mode 100755 index 0000000..6dc7d33 --- /dev/null +++ b/scripts/contrib/graph-tool | |||
@@ -0,0 +1,92 @@ | |||
1 | #!/usr/bin/env python | ||
2 | |||
3 | # Simple graph query utility | ||
4 | # useful for getting answers from .dot files produced by bitbake -g | ||
5 | # | ||
6 | # Written by: Paul Eggleton <paul.eggleton@linux.intel.com> | ||
7 | # | ||
8 | # Copyright 2013 Intel Corporation | ||
9 | # | ||
10 | # This program is free software; you can redistribute it and/or modify | ||
11 | # it under the terms of the GNU General Public License version 2 as | ||
12 | # published by the Free Software Foundation. | ||
13 | # | ||
14 | # This program is distributed in the hope that it will be useful, | ||
15 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
16 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
17 | # GNU General Public License for more details. | ||
18 | # | ||
19 | # You should have received a copy of the GNU General Public License along | ||
20 | # with this program; if not, write to the Free Software Foundation, Inc., | ||
21 | # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. | ||
22 | # | ||
23 | |||
24 | import sys | ||
25 | |||
26 | def get_path_networkx(dotfile, fromnode, tonode): | ||
27 | try: | ||
28 | import networkx | ||
29 | except ImportError: | ||
30 | print('ERROR: Please install the networkx python module') | ||
31 | sys.exit(1) | ||
32 | |||
33 | graph = networkx.DiGraph(networkx.read_dot(dotfile)) | ||
34 | |||
35 | def node_missing(node): | ||
36 | import difflib | ||
37 | close_matches = difflib.get_close_matches(node, graph.nodes(), cutoff=0.7) | ||
38 | if close_matches: | ||
39 | print('ERROR: no node "%s" in graph. Close matches:\n %s' % (node, '\n '.join(close_matches))) | ||
40 | sys.exit(1) | ||
41 | |||
42 | if not fromnode in graph: | ||
43 | node_missing(fromnode) | ||
44 | if not tonode in graph: | ||
45 | node_missing(tonode) | ||
46 | return networkx.all_simple_paths(graph, source=fromnode, target=tonode) | ||
47 | |||
48 | |||
49 | def find_paths(args, usage): | ||
50 | if len(args) < 3: | ||
51 | usage() | ||
52 | sys.exit(1) | ||
53 | |||
54 | fromnode = args[1] | ||
55 | tonode = args[2] | ||
56 | paths = list(get_path_networkx(args[0], fromnode, tonode)) | ||
57 | if paths: | ||
58 | for path in paths: | ||
59 | print ' -> '.join(path) | ||
60 | else: | ||
61 | print("ERROR: no path from %s to %s in graph" % (fromnode, tonode)) | ||
62 | sys.exit(1) | ||
63 | |||
64 | def main(): | ||
65 | import optparse | ||
66 | parser = optparse.OptionParser( | ||
67 | usage = '''%prog [options] <command> <arguments> | ||
68 | |||
69 | Available commands: | ||
70 | find-paths <dotfile> <from> <to> | ||
71 | Find all of the paths between two nodes in a dot graph''') | ||
72 | |||
73 | #parser.add_option("-d", "--debug", | ||
74 | # help = "Report all SRCREV values, not just ones where AUTOREV has been used", | ||
75 | # action="store_true", dest="debug", default=False) | ||
76 | |||
77 | options, args = parser.parse_args(sys.argv) | ||
78 | args = args[1:] | ||
79 | |||
80 | if len(args) < 1: | ||
81 | parser.print_help() | ||
82 | sys.exit(1) | ||
83 | |||
84 | if args[0] == "find-paths": | ||
85 | find_paths(args[1:], parser.print_help) | ||
86 | else: | ||
87 | parser.print_help() | ||
88 | sys.exit(1) | ||
89 | |||
90 | |||
91 | if __name__ == "__main__": | ||
92 | main() | ||
diff --git a/scripts/contrib/list-packageconfig-flags.py b/scripts/contrib/list-packageconfig-flags.py new file mode 100755 index 0000000..598b5c3 --- /dev/null +++ b/scripts/contrib/list-packageconfig-flags.py | |||
@@ -0,0 +1,181 @@ | |||
1 | #!/usr/bin/env python | ||
2 | |||
3 | # This program is free software; you can redistribute it and/or modify | ||
4 | # it under the terms of the GNU General Public License as published by | ||
5 | # the Free Software Foundation; either version 2 of the License, or | ||
6 | # (at your option) any later version. | ||
7 | # | ||
8 | # This program is distributed in the hope that it will be useful, | ||
9 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
11 | # GNU General Public License for more details. | ||
12 | # | ||
13 | # You should have received a copy of the GNU General Public License | ||
14 | # along with this program; if not, write to the Free Software Foundation. | ||
15 | # | ||
16 | # Copyright (C) 2013 Wind River Systems, Inc. | ||
17 | # Copyright (C) 2014 Intel Corporation | ||
18 | # | ||
19 | # - list available recipes which have PACKAGECONFIG flags | ||
20 | # - list available PACKAGECONFIG flags and all affected recipes | ||
21 | # - list all recipes and PACKAGECONFIG information | ||
22 | |||
23 | import sys | ||
24 | import optparse | ||
25 | import os | ||
26 | |||
27 | |||
28 | scripts_path = os.path.abspath(os.path.dirname(os.path.abspath(sys.argv[0]))) | ||
29 | lib_path = os.path.abspath(scripts_path + '/../lib') | ||
30 | sys.path = sys.path + [lib_path] | ||
31 | |||
32 | import scriptpath | ||
33 | |||
34 | # For importing the following modules | ||
35 | bitbakepath = scriptpath.add_bitbake_lib_path() | ||
36 | if not bitbakepath: | ||
37 | sys.stderr.write("Unable to find bitbake by searching parent directory of this script or PATH\n") | ||
38 | sys.exit(1) | ||
39 | |||
40 | import bb.cache | ||
41 | import bb.cooker | ||
42 | import bb.providers | ||
43 | import bb.tinfoil | ||
44 | |||
45 | def get_fnlist(bbhandler, pkg_pn, preferred): | ||
46 | ''' Get all recipe file names ''' | ||
47 | if preferred: | ||
48 | (latest_versions, preferred_versions) = bb.providers.findProviders(bbhandler.config_data, bbhandler.cooker.recipecache, pkg_pn) | ||
49 | |||
50 | fn_list = [] | ||
51 | for pn in sorted(pkg_pn): | ||
52 | if preferred: | ||
53 | fn_list.append(preferred_versions[pn][1]) | ||
54 | else: | ||
55 | fn_list.extend(pkg_pn[pn]) | ||
56 | |||
57 | return fn_list | ||
58 | |||
59 | def get_recipesdata(bbhandler, preferred): | ||
60 | ''' Get data of all available recipes which have PACKAGECONFIG flags ''' | ||
61 | pkg_pn = bbhandler.cooker.recipecache.pkg_pn | ||
62 | |||
63 | data_dict = {} | ||
64 | for fn in get_fnlist(bbhandler, pkg_pn, preferred): | ||
65 | data = bb.cache.Cache.loadDataFull(fn, bbhandler.cooker.collection.get_file_appends(fn), bbhandler.config_data) | ||
66 | flags = data.getVarFlags("PACKAGECONFIG") | ||
67 | flags.pop('doc', None) | ||
68 | flags.pop('defaultval', None) | ||
69 | if flags: | ||
70 | data_dict[fn] = data | ||
71 | |||
72 | return data_dict | ||
73 | |||
74 | def collect_pkgs(data_dict): | ||
75 | ''' Collect available pkgs in which have PACKAGECONFIG flags ''' | ||
76 | # pkg_dict = {'pkg1': ['flag1', 'flag2',...]} | ||
77 | pkg_dict = {} | ||
78 | for fn in data_dict: | ||
79 | pkgconfigflags = data_dict[fn].getVarFlags("PACKAGECONFIG") | ||
80 | pkgconfigflags.pop('doc', None) | ||
81 | pkgconfigflags.pop('defaultval', None) | ||
82 | pkgname = data_dict[fn].getVar("P", True) | ||
83 | pkg_dict[pkgname] = sorted(pkgconfigflags.keys()) | ||
84 | |||
85 | return pkg_dict | ||
86 | |||
87 | def collect_flags(pkg_dict): | ||
88 | ''' Collect available PACKAGECONFIG flags and all affected pkgs ''' | ||
89 | # flag_dict = {'flag': ['pkg1', 'pkg2',...]} | ||
90 | flag_dict = {} | ||
91 | for pkgname, flaglist in pkg_dict.iteritems(): | ||
92 | for flag in flaglist: | ||
93 | if flag in flag_dict: | ||
94 | flag_dict[flag].append(pkgname) | ||
95 | else: | ||
96 | flag_dict[flag] = [pkgname] | ||
97 | |||
98 | return flag_dict | ||
99 | |||
100 | def display_pkgs(pkg_dict): | ||
101 | ''' Display available pkgs which have PACKAGECONFIG flags ''' | ||
102 | pkgname_len = len("RECIPE NAME") + 1 | ||
103 | for pkgname in pkg_dict: | ||
104 | if pkgname_len < len(pkgname): | ||
105 | pkgname_len = len(pkgname) | ||
106 | pkgname_len += 1 | ||
107 | |||
108 | header = '%-*s%s' % (pkgname_len, str("RECIPE NAME"), str("PACKAGECONFIG FLAGS")) | ||
109 | print header | ||
110 | print str("").ljust(len(header), '=') | ||
111 | for pkgname in sorted(pkg_dict): | ||
112 | print('%-*s%s' % (pkgname_len, pkgname, ' '.join(pkg_dict[pkgname]))) | ||
113 | |||
114 | |||
115 | def display_flags(flag_dict): | ||
116 | ''' Display available PACKAGECONFIG flags and all affected pkgs ''' | ||
117 | flag_len = len("PACKAGECONFIG FLAG") + 5 | ||
118 | |||
119 | header = '%-*s%s' % (flag_len, str("PACKAGECONFIG FLAG"), str("RECIPE NAMES")) | ||
120 | print header | ||
121 | print str("").ljust(len(header), '=') | ||
122 | |||
123 | for flag in sorted(flag_dict): | ||
124 | print('%-*s%s' % (flag_len, flag, ' '.join(sorted(flag_dict[flag])))) | ||
125 | |||
126 | def display_all(data_dict): | ||
127 | ''' Display all pkgs and PACKAGECONFIG information ''' | ||
128 | print str("").ljust(50, '=') | ||
129 | for fn in data_dict: | ||
130 | print('%s' % data_dict[fn].getVar("P", True)) | ||
131 | print fn | ||
132 | packageconfig = data_dict[fn].getVar("PACKAGECONFIG", True) or '' | ||
133 | if packageconfig.strip() == '': | ||
134 | packageconfig = 'None' | ||
135 | print('PACKAGECONFIG %s' % packageconfig) | ||
136 | |||
137 | for flag,flag_val in data_dict[fn].getVarFlags("PACKAGECONFIG").iteritems(): | ||
138 | if flag in ["defaultval", "doc"]: | ||
139 | continue | ||
140 | print('PACKAGECONFIG[%s] %s' % (flag, flag_val)) | ||
141 | print '' | ||
142 | |||
143 | def main(): | ||
144 | pkg_dict = {} | ||
145 | flag_dict = {} | ||
146 | |||
147 | # Collect and validate input | ||
148 | parser = optparse.OptionParser( | ||
149 | description = "Lists recipes and PACKAGECONFIG flags. Without -a or -f, recipes and their available PACKAGECONFIG flags are listed.", | ||
150 | usage = """ | ||
151 | %prog [options]""") | ||
152 | |||
153 | parser.add_option("-f", "--flags", | ||
154 | help = "list available PACKAGECONFIG flags and affected recipes", | ||
155 | action="store_const", dest="listtype", const="flags", default="recipes") | ||
156 | parser.add_option("-a", "--all", | ||
157 | help = "list all recipes and PACKAGECONFIG information", | ||
158 | action="store_const", dest="listtype", const="all") | ||
159 | parser.add_option("-p", "--preferred-only", | ||
160 | help = "where multiple recipe versions are available, list only the preferred version", | ||
161 | action="store_true", dest="preferred", default=False) | ||
162 | |||
163 | options, args = parser.parse_args(sys.argv) | ||
164 | |||
165 | bbhandler = bb.tinfoil.Tinfoil() | ||
166 | bbhandler.prepare() | ||
167 | print("Gathering recipe data...") | ||
168 | data_dict = get_recipesdata(bbhandler, options.preferred) | ||
169 | |||
170 | if options.listtype == 'flags': | ||
171 | pkg_dict = collect_pkgs(data_dict) | ||
172 | flag_dict = collect_flags(pkg_dict) | ||
173 | display_flags(flag_dict) | ||
174 | elif options.listtype == 'recipes': | ||
175 | pkg_dict = collect_pkgs(data_dict) | ||
176 | display_pkgs(pkg_dict) | ||
177 | elif options.listtype == 'all': | ||
178 | display_all(data_dict) | ||
179 | |||
180 | if __name__ == "__main__": | ||
181 | main() | ||
diff --git a/scripts/contrib/mkefidisk.sh b/scripts/contrib/mkefidisk.sh new file mode 100755 index 0000000..b96b7d4 --- /dev/null +++ b/scripts/contrib/mkefidisk.sh | |||
@@ -0,0 +1,396 @@ | |||
1 | #!/bin/sh | ||
2 | # | ||
3 | # Copyright (c) 2012, Intel Corporation. | ||
4 | # All rights reserved. | ||
5 | # | ||
6 | # This program is free software; you can redistribute it and/or modify | ||
7 | # it under the terms of the GNU General Public License as published by | ||
8 | # the Free Software Foundation; either version 2 of the License, or | ||
9 | # (at your option) any later version. | ||
10 | # | ||
11 | # This program is distributed in the hope that it will be useful, | ||
12 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See | ||
14 | # the GNU General Public License for more details. | ||
15 | # | ||
16 | # You should have received a copy of the GNU General Public License | ||
17 | # along with this program; if not, write to the Free Software | ||
18 | # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
19 | # | ||
20 | |||
21 | LANG=C | ||
22 | |||
23 | # Set to 1 to enable additional output | ||
24 | DEBUG=0 | ||
25 | OUT="/dev/null" | ||
26 | |||
27 | # | ||
28 | # Defaults | ||
29 | # | ||
30 | # 20 Mb for the boot partition | ||
31 | BOOT_SIZE=20 | ||
32 | # 5% for swap | ||
33 | SWAP_RATIO=5 | ||
34 | |||
35 | # Cleanup after die() | ||
36 | cleanup() { | ||
37 | debug "Syncing and unmounting devices" | ||
38 | # Unmount anything we mounted | ||
39 | unmount $ROOTFS_MNT || error "Failed to unmount $ROOTFS_MNT" | ||
40 | unmount $BOOTFS_MNT || error "Failed to unmount $BOOTFS_MNT" | ||
41 | unmount $HDDIMG_ROOTFS_MNT || error "Failed to unmount $HDDIMG_ROOTFS_MNT" | ||
42 | unmount $HDDIMG_MNT || error "Failed to unmount $HDDIMG_MNT" | ||
43 | |||
44 | # Remove the TMPDIR | ||
45 | debug "Removing temporary files" | ||
46 | if [ -d "$TMPDIR" ]; then | ||
47 | rm -rf $TMPDIR || error "Failed to remove $TMPDIR" | ||
48 | fi | ||
49 | } | ||
50 | |||
51 | trap 'die "Signal Received, Aborting..."' HUP INT TERM | ||
52 | |||
53 | # Logging routines | ||
54 | WARNINGS=0 | ||
55 | ERRORS=0 | ||
56 | CLEAR="$(tput sgr0)" | ||
57 | INFO="$(tput bold)" | ||
58 | RED="$(tput setaf 1)$(tput bold)" | ||
59 | GREEN="$(tput setaf 2)$(tput bold)" | ||
60 | YELLOW="$(tput setaf 3)$(tput bold)" | ||
61 | info() { | ||
62 | echo "${INFO}$1${CLEAR}" | ||
63 | } | ||
64 | error() { | ||
65 | ERRORS=$((ERRORS+1)) | ||
66 | echo "${RED}$1${CLEAR}" | ||
67 | } | ||
68 | warn() { | ||
69 | WARNINGS=$((WARNINGS+1)) | ||
70 | echo "${YELLOW}$1${CLEAR}" | ||
71 | } | ||
72 | success() { | ||
73 | echo "${GREEN}$1${CLEAR}" | ||
74 | } | ||
75 | die() { | ||
76 | error "$1" | ||
77 | cleanup | ||
78 | exit 1 | ||
79 | } | ||
80 | debug() { | ||
81 | if [ $DEBUG -eq 1 ]; then | ||
82 | echo "$1" | ||
83 | fi | ||
84 | } | ||
85 | |||
86 | usage() { | ||
87 | echo "Usage: $(basename $0) [-v] DEVICE HDDIMG TARGET_DEVICE" | ||
88 | echo " -v: Verbose debug" | ||
89 | echo " DEVICE: The device to write the image to, e.g. /dev/sdh" | ||
90 | echo " HDDIMG: The hddimg file to generate the efi disk from" | ||
91 | echo " TARGET_DEVICE: The device the target will boot from, e.g. /dev/mmcblk0" | ||
92 | } | ||
93 | |||
94 | image_details() { | ||
95 | IMG=$1 | ||
96 | info "Image details" | ||
97 | echo " image: $(stat --printf '%N\n' $IMG)" | ||
98 | echo " size: $(stat -L --printf '%s bytes\n' $IMG)" | ||
99 | echo " modified: $(stat -L --printf '%y\n' $IMG)" | ||
100 | echo " type: $(file -L -b $IMG)" | ||
101 | echo "" | ||
102 | } | ||
103 | |||
104 | device_details() { | ||
105 | DEV=$1 | ||
106 | BLOCK_SIZE=512 | ||
107 | |||
108 | info "Device details" | ||
109 | echo " device: $DEVICE" | ||
110 | if [ -f "/sys/class/block/$DEV/device/vendor" ]; then | ||
111 | echo " vendor: $(cat /sys/class/block/$DEV/device/vendor)" | ||
112 | else | ||
113 | echo " vendor: UNKOWN" | ||
114 | fi | ||
115 | if [ -f "/sys/class/block/$DEV/device/model" ]; then | ||
116 | echo " model: $(cat /sys/class/block/$DEV/device/model)" | ||
117 | else | ||
118 | echo " model: UNKNOWN" | ||
119 | fi | ||
120 | if [ -f "/sys/class/block/$DEV/size" ]; then | ||
121 | echo " size: $(($(cat /sys/class/block/$DEV/size) * $BLOCK_SIZE)) bytes" | ||
122 | else | ||
123 | echo " size: UNKNOWN" | ||
124 | fi | ||
125 | echo "" | ||
126 | } | ||
127 | |||
128 | unmount_device() { | ||
129 | grep -q $DEVICE /proc/mounts | ||
130 | if [ $? -eq 0 ]; then | ||
131 | warn "$DEVICE listed in /proc/mounts, attempting to unmount" | ||
132 | umount $DEVICE* 2>/dev/null | ||
133 | return $? | ||
134 | fi | ||
135 | return 0 | ||
136 | } | ||
137 | |||
138 | unmount() { | ||
139 | grep -q $1 /proc/mounts | ||
140 | if [ $? -eq 0 ]; then | ||
141 | debug "Unmounting $1" | ||
142 | umount $1 | ||
143 | return $? | ||
144 | fi | ||
145 | return 0 | ||
146 | } | ||
147 | |||
148 | # | ||
149 | # Parse and validate arguments | ||
150 | # | ||
151 | if [ $# -lt 3 ] || [ $# -gt 4 ]; then | ||
152 | usage | ||
153 | exit 1 | ||
154 | fi | ||
155 | |||
156 | if [ "$1" = "-v" ]; then | ||
157 | DEBUG=1 | ||
158 | OUT="1" | ||
159 | shift | ||
160 | fi | ||
161 | |||
162 | DEVICE=$1 | ||
163 | HDDIMG=$2 | ||
164 | TARGET_DEVICE=$3 | ||
165 | |||
166 | LINK=$(readlink $DEVICE) | ||
167 | if [ $? -eq 0 ]; then | ||
168 | DEVICE="$LINK" | ||
169 | fi | ||
170 | |||
171 | if [ ! -w "$DEVICE" ]; then | ||
172 | usage | ||
173 | die "Device $DEVICE does not exist or is not writable" | ||
174 | fi | ||
175 | |||
176 | if [ ! -e "$HDDIMG" ]; then | ||
177 | usage | ||
178 | die "HDDIMG $HDDIMG does not exist" | ||
179 | fi | ||
180 | |||
181 | # | ||
182 | # Ensure the hddimg is not mounted | ||
183 | # | ||
184 | unmount "$HDDIMG" || die "Failed to unmount $HDDIMG" | ||
185 | |||
186 | # | ||
187 | # Check if any $DEVICE partitions are mounted | ||
188 | # | ||
189 | unmount_device || die "Failed to unmount $DEVICE" | ||
190 | |||
191 | # | ||
192 | # Confirm device with user | ||
193 | # | ||
194 | image_details $HDDIMG | ||
195 | device_details $(basename $DEVICE) | ||
196 | echo -n "${INFO}Prepare EFI image on $DEVICE [y/N]?${CLEAR} " | ||
197 | read RESPONSE | ||
198 | if [ "$RESPONSE" != "y" ]; then | ||
199 | echo "Image creation aborted" | ||
200 | exit 0 | ||
201 | fi | ||
202 | |||
203 | |||
204 | # | ||
205 | # Prepare the temporary working space | ||
206 | # | ||
207 | TMPDIR=$(mktemp -d mkefidisk-XXX) || die "Failed to create temporary mounting directory." | ||
208 | HDDIMG_MNT=$TMPDIR/hddimg | ||
209 | HDDIMG_ROOTFS_MNT=$TMPDIR/hddimg_rootfs | ||
210 | ROOTFS_MNT=$TMPDIR/rootfs | ||
211 | BOOTFS_MNT=$TMPDIR/bootfs | ||
212 | mkdir $HDDIMG_MNT || die "Failed to create $HDDIMG_MNT" | ||
213 | mkdir $HDDIMG_ROOTFS_MNT || die "Failed to create $HDDIMG_ROOTFS_MNT" | ||
214 | mkdir $ROOTFS_MNT || die "Failed to create $ROOTFS_MNT" | ||
215 | mkdir $BOOTFS_MNT || die "Failed to create $BOOTFS_MNT" | ||
216 | |||
217 | |||
218 | # | ||
219 | # Partition $DEVICE | ||
220 | # | ||
221 | DEVICE_SIZE=$(parted $DEVICE unit mb print | grep ^Disk | cut -d" " -f 3 | sed -e "s/MB//") | ||
222 | # If the device size is not reported there may not be a valid label | ||
223 | if [ "$DEVICE_SIZE" = "" ] ; then | ||
224 | parted $DEVICE mklabel msdos || die "Failed to create MSDOS partition table" | ||
225 | DEVICE_SIZE=$(parted $DEVICE unit mb print | grep ^Disk | cut -d" " -f 3 | sed -e "s/MB//") | ||
226 | fi | ||
227 | SWAP_SIZE=$((DEVICE_SIZE*SWAP_RATIO/100)) | ||
228 | ROOTFS_SIZE=$((DEVICE_SIZE-BOOT_SIZE-SWAP_SIZE)) | ||
229 | ROOTFS_START=$((BOOT_SIZE)) | ||
230 | ROOTFS_END=$((ROOTFS_START+ROOTFS_SIZE)) | ||
231 | SWAP_START=$((ROOTFS_END)) | ||
232 | |||
233 | # MMC devices use a partition prefix character 'p' | ||
234 | PART_PREFIX="" | ||
235 | if [ ! "${DEVICE#/dev/mmcblk}" = "${DEVICE}" ] || [ ! "${DEVICE#/dev/loop}" = "${DEVICE}" ]; then | ||
236 | PART_PREFIX="p" | ||
237 | fi | ||
238 | BOOTFS=$DEVICE${PART_PREFIX}1 | ||
239 | ROOTFS=$DEVICE${PART_PREFIX}2 | ||
240 | SWAP=$DEVICE${PART_PREFIX}3 | ||
241 | |||
242 | TARGET_PART_PREFIX="" | ||
243 | if [ ! "${TARGET_DEVICE#/dev/mmcblk}" = "${TARGET_DEVICE}" ]; then | ||
244 | TARGET_PART_PREFIX="p" | ||
245 | fi | ||
246 | TARGET_ROOTFS=$TARGET_DEVICE${TARGET_PART_PREFIX}2 | ||
247 | TARGET_SWAP=$TARGET_DEVICE${TARGET_PART_PREFIX}3 | ||
248 | |||
249 | echo "" | ||
250 | info "Boot partition size: $BOOT_SIZE MB ($BOOTFS)" | ||
251 | info "ROOTFS partition size: $ROOTFS_SIZE MB ($ROOTFS)" | ||
252 | info "Swap partition size: $SWAP_SIZE MB ($SWAP)" | ||
253 | echo "" | ||
254 | |||
255 | # Use MSDOS by default as GPT cannot be reliably distributed in disk image form | ||
256 | # as it requires the backup table to be on the last block of the device, which | ||
257 | # of course varies from device to device. | ||
258 | |||
259 | info "Partitioning installation media ($DEVICE)" | ||
260 | |||
261 | debug "Deleting partition table on $DEVICE" | ||
262 | dd if=/dev/zero of=$DEVICE bs=512 count=2 >$OUT 2>&1 || die "Failed to zero beginning of $DEVICE" | ||
263 | |||
264 | debug "Creating new partition table (MSDOS) on $DEVICE" | ||
265 | parted $DEVICE mklabel msdos >$OUT 2>&1 || die "Failed to create MSDOS partition table" | ||
266 | |||
267 | debug "Creating boot partition on $BOOTFS" | ||
268 | parted $DEVICE mkpart primary 0% $BOOT_SIZE >$OUT 2>&1 || die "Failed to create BOOT partition" | ||
269 | |||
270 | debug "Enabling boot flag on $BOOTFS" | ||
271 | parted $DEVICE set 1 boot on >$OUT 2>&1 || die "Failed to enable boot flag" | ||
272 | |||
273 | debug "Creating ROOTFS partition on $ROOTFS" | ||
274 | parted $DEVICE mkpart primary $ROOTFS_START $ROOTFS_END >$OUT 2>&1 || die "Failed to create ROOTFS partition" | ||
275 | |||
276 | debug "Creating swap partition on $SWAP" | ||
277 | parted $DEVICE mkpart primary $SWAP_START 100% >$OUT 2>&1 || die "Failed to create SWAP partition" | ||
278 | |||
279 | if [ $DEBUG -eq 1 ]; then | ||
280 | parted $DEVICE print | ||
281 | fi | ||
282 | |||
283 | |||
284 | # | ||
285 | # Check if any $DEVICE partitions are mounted after partitioning | ||
286 | # | ||
287 | unmount_device || die "Failed to unmount $DEVICE partitions" | ||
288 | |||
289 | |||
290 | # | ||
291 | # Format $DEVICE partitions | ||
292 | # | ||
293 | info "Formatting partitions" | ||
294 | debug "Formatting $BOOTFS as vfat" | ||
295 | if [ ! "${DEVICE#/dev/loop}" = "${DEVICE}" ]; then | ||
296 | mkfs.vfat -I $BOOTFS -n "EFI" >$OUT 2>&1 || die "Failed to format $BOOTFS" | ||
297 | else | ||
298 | mkfs.vfat $BOOTFS -n "EFI" >$OUT 2>&1 || die "Failed to format $BOOTFS" | ||
299 | fi | ||
300 | |||
301 | debug "Formatting $ROOTFS as ext3" | ||
302 | mkfs.ext3 -F $ROOTFS -L "ROOT" >$OUT 2>&1 || die "Failed to format $ROOTFS" | ||
303 | |||
304 | debug "Formatting swap partition ($SWAP)" | ||
305 | mkswap $SWAP >$OUT 2>&1 || die "Failed to prepare swap" | ||
306 | |||
307 | |||
308 | # | ||
309 | # Installing to $DEVICE | ||
310 | # | ||
311 | debug "Mounting images and device in preparation for installation" | ||
312 | mount -o loop $HDDIMG $HDDIMG_MNT >$OUT 2>&1 || error "Failed to mount $HDDIMG" | ||
313 | mount -o loop $HDDIMG_MNT/rootfs.img $HDDIMG_ROOTFS_MNT >$OUT 2>&1 || error "Failed to mount rootfs.img" | ||
314 | mount $ROOTFS $ROOTFS_MNT >$OUT 2>&1 || error "Failed to mount $ROOTFS on $ROOTFS_MNT" | ||
315 | mount $BOOTFS $BOOTFS_MNT >$OUT 2>&1 || error "Failed to mount $BOOTFS on $BOOTFS_MNT" | ||
316 | |||
317 | info "Preparing boot partition" | ||
318 | EFIDIR="$BOOTFS_MNT/EFI/BOOT" | ||
319 | cp $HDDIMG_MNT/vmlinuz $BOOTFS_MNT >$OUT 2>&1 || error "Failed to copy vmlinuz" | ||
320 | # Copy the efi loader and configs (booti*.efi and grub.cfg if it exists) | ||
321 | cp -r $HDDIMG_MNT/EFI $BOOTFS_MNT >$OUT 2>&1 || error "Failed to copy EFI dir" | ||
322 | # Silently ignore a missing gummiboot loader dir (we might just be a GRUB image) | ||
323 | cp -r $HDDIMG_MNT/loader $BOOTFS_MNT >$OUT 2>&1 | ||
324 | |||
325 | # Update the boot loaders configurations for an installed image | ||
326 | # Remove any existing root= kernel parameters and: | ||
327 | # o Add a root= parameter with the target rootfs | ||
328 | # o Specify ro so fsck can be run during boot | ||
329 | # o Specify rootwait in case the target media is an asyncronous block device | ||
330 | # such as MMC or USB disks | ||
331 | # o Specify "quiet" to minimize boot time when using slow serial consoles | ||
332 | |||
333 | # Look for a GRUB installation | ||
334 | GRUB_CFG="$EFIDIR/grub.cfg" | ||
335 | if [ -e "$GRUB_CFG" ]; then | ||
336 | info "Configuring GRUB" | ||
337 | # Delete the install entry | ||
338 | sed -i "/menuentry 'install'/,/^}/d" $GRUB_CFG | ||
339 | # Delete the initrd lines | ||
340 | sed -i "/initrd /d" $GRUB_CFG | ||
341 | # Delete any LABEL= strings | ||
342 | sed -i "s/ LABEL=[^ ]*/ /" $GRUB_CFG | ||
343 | |||
344 | sed -i "s@ root=[^ ]*@ @" $GRUB_CFG | ||
345 | sed -i "s@vmlinuz @vmlinuz root=$TARGET_ROOTFS ro rootwait quiet @" $GRUB_CFG | ||
346 | fi | ||
347 | |||
348 | # Look for a gummiboot installation | ||
349 | GUMMI_ENTRIES="$BOOTFS_MNT/loader/entries" | ||
350 | GUMMI_CFG="$GUMMI_ENTRIES/boot.conf" | ||
351 | if [ -d "$GUMMI_ENTRIES" ]; then | ||
352 | info "Configuring Gummiboot" | ||
353 | # remove the install target if it exists | ||
354 | rm $GUMMI_ENTRIES/install.conf >$OUT 2>&1 | ||
355 | |||
356 | if [ ! -e "$GUMMI_CFG" ]; then | ||
357 | echo "ERROR: $GUMMI_CFG not found" | ||
358 | fi | ||
359 | |||
360 | sed -i "/initrd /d" $GUMMI_CFG | ||
361 | sed -i "s@ root=[^ ]*@ @" $GUMMI_CFG | ||
362 | sed -i "s@options *LABEL=boot @options LABEL=Boot root=$TARGET_ROOTFS ro rootwait quiet @" $GUMMI_CFG | ||
363 | fi | ||
364 | |||
365 | # Ensure we have at least one EFI bootloader configured | ||
366 | if [ ! -e $GRUB_CFG ] && [ ! -e $GUMMI_CFG ]; then | ||
367 | die "No EFI bootloader configuration found" | ||
368 | fi | ||
369 | |||
370 | |||
371 | info "Copying ROOTFS files (this may take a while)" | ||
372 | cp -a $HDDIMG_ROOTFS_MNT/* $ROOTFS_MNT >$OUT 2>&1 || die "Root FS copy failed" | ||
373 | |||
374 | echo "$TARGET_SWAP swap swap defaults 0 0" >> $ROOTFS_MNT/etc/fstab | ||
375 | |||
376 | # We dont want udev to mount our root device while we're booting... | ||
377 | if [ -d $ROOTFS_MNT/etc/udev/ ] ; then | ||
378 | echo "$TARGET_DEVICE" >> $ROOTFS_MNT/etc/udev/mount.blacklist | ||
379 | fi | ||
380 | |||
381 | |||
382 | # Call cleanup to unmount devices and images and remove the TMPDIR | ||
383 | cleanup | ||
384 | |||
385 | echo "" | ||
386 | if [ $WARNINGS -ne 0 ] && [ $ERRORS -eq 0 ]; then | ||
387 | echo "${YELLOW}Installation completed with warnings${CLEAR}" | ||
388 | echo "${YELLOW}Warnings: $WARNINGS${CLEAR}" | ||
389 | elif [ $ERRORS -ne 0 ]; then | ||
390 | echo "${RED}Installation encountered errors${CLEAR}" | ||
391 | echo "${RED}Errors: $ERRORS${CLEAR}" | ||
392 | echo "${YELLOW}Warnings: $WARNINGS${CLEAR}" | ||
393 | else | ||
394 | success "Installation completed successfully" | ||
395 | fi | ||
396 | echo "" | ||
diff --git a/scripts/contrib/python/generate-manifest-2.7.py b/scripts/contrib/python/generate-manifest-2.7.py new file mode 100755 index 0000000..68c42b0 --- /dev/null +++ b/scripts/contrib/python/generate-manifest-2.7.py | |||
@@ -0,0 +1,391 @@ | |||
1 | #!/usr/bin/env python | ||
2 | |||
3 | # generate Python Manifest for the OpenEmbedded build system | ||
4 | # (C) 2002-2010 Michael 'Mickey' Lauer <mlauer@vanille-media.de> | ||
5 | # (C) 2007 Jeremy Laine | ||
6 | # licensed under MIT, see COPYING.MIT | ||
7 | # | ||
8 | # June 22, 2011 -- Mark Hatle <mark.hatle@windriver.com> | ||
9 | # * Updated to no longer generate special -dbg package, instead use the | ||
10 | # single system -dbg | ||
11 | # * Update version with ".1" to indicate this change | ||
12 | |||