diff options
Diffstat (limited to 'scripts/contrib')
-rwxr-xr-x | scripts/contrib/bb-perf/bb-matrix-plot.sh | 137 | ||||
-rwxr-xr-x | scripts/contrib/bb-perf/bb-matrix.sh | 79 | ||||
-rwxr-xr-x | scripts/contrib/bbvars.py | 186 | ||||
-rwxr-xr-x | scripts/contrib/build-perf-test.sh | 369 | ||||
-rwxr-xr-x | scripts/contrib/ddimage | 104 | ||||
-rwxr-xr-x | scripts/contrib/dialog-power-control | 53 | ||||
-rwxr-xr-x | scripts/contrib/documentation-audit.sh | 94 | ||||
-rwxr-xr-x | scripts/contrib/graph-tool | 92 | ||||
-rwxr-xr-x | scripts/contrib/list-packageconfig-flags.py | 181 | ||||
-rwxr-xr-x | scripts/contrib/mkefidisk.sh | 396 | ||||
-rwxr-xr-x | scripts/contrib/python/generate-manifest-2.7.py | 391 | ||||
-rwxr-xr-x | scripts/contrib/python/generate-manifest-3.3.py | 386 | ||||
-rwxr-xr-x | scripts/contrib/serdevtry | 60 | ||||
-rwxr-xr-x | scripts/contrib/test_build_time.sh | 237 | ||||
-rwxr-xr-x | scripts/contrib/test_build_time_worker.sh | 37 | ||||
-rwxr-xr-x | scripts/contrib/verify-homepage.py | 63 |
16 files changed, 2865 insertions, 0 deletions
diff --git a/scripts/contrib/bb-perf/bb-matrix-plot.sh b/scripts/contrib/bb-perf/bb-matrix-plot.sh new file mode 100755 index 0000000000..136a25570d --- /dev/null +++ b/scripts/contrib/bb-perf/bb-matrix-plot.sh | |||
@@ -0,0 +1,137 @@ | |||
1 | #!/bin/bash | ||
2 | # | ||
3 | # Copyright (c) 2011, Intel Corporation. | ||
4 | # All rights reserved. | ||
5 | # | ||
6 | # This program is free software; you can redistribute it and/or modify | ||
7 | # it under the terms of the GNU General Public License as published by | ||
8 | # the Free Software Foundation; either version 2 of the License, or | ||
9 | # (at your option) any later version. | ||
10 | # | ||
11 | # This program is distributed in the hope that it will be useful, | ||
12 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | # GNU General Public License for more details. | ||
15 | # | ||
16 | # You should have received a copy of the GNU General Public License | ||
17 | # along with this program; if not, write to the Free Software | ||
18 | # Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
19 | # | ||
20 | # DESCRIPTION | ||
21 | # This script operates on the .dat file generated by bb-matrix.sh. It tolerates | ||
22 | # the header by skipping the first line, but error messages and bad data records | ||
23 | # need to be removed first. It will generate three views of the plot, and leave | ||
24 | # an interactive view open for further analysis. | ||
25 | # | ||
26 | # AUTHORS | ||
27 | # Darren Hart <dvhart@linux.intel.com> | ||
28 | # | ||
29 | |||
30 | # Setup the defaults | ||
31 | DATFILE="bb-matrix.dat" | ||
32 | XLABEL="BB_NUMBER_THREADS" | ||
33 | YLABEL="PARALLEL_MAKE" | ||
34 | FIELD=3 | ||
35 | DEF_TITLE="Elapsed Time (seconds)" | ||
36 | PM3D_FRAGMENT="unset surface; set pm3d at s hidden3d 100" | ||
37 | SIZE="640,480" | ||
38 | |||
39 | function usage { | ||
40 | CMD=$(basename $0) | ||
41 | cat <<EOM | ||
42 | Usage: $CMD [-d datfile] [-f field] [-h] [-t title] [-w] | ||
43 | -d datfile The data file generated by bb-matrix.sh (default: $DATFILE) | ||
44 | -f field The field index to plot as the Z axis from the data file | ||
45 | (default: $FIELD, "$DEF_TITLE") | ||
46 | -h Display this help message | ||
47 | -s W,H PNG and window size in pixels (default: $SIZE) | ||
48 | -t title The title to display, should describe the field (-f) and units | ||
49 | (default: "$DEF_TITLE") | ||
50 | -w Render the plot as wireframe with a 2D colormap projected on the | ||
51 | XY plane rather than as the texture for the surface | ||
52 | EOM | ||
53 | } | ||
54 | |||
55 | # Parse and validate arguments | ||
56 | while getopts "d:f:hs:t:w" OPT; do | ||
57 | case $OPT in | ||
58 | d) | ||
59 | DATFILE="$OPTARG" | ||
60 | ;; | ||
61 | f) | ||
62 | FIELD="$OPTARG" | ||
63 | ;; | ||
64 | h) | ||
65 | usage | ||
66 | exit 0 | ||
67 | ;; | ||
68 | s) | ||
69 | SIZE="$OPTARG" | ||
70 | ;; | ||
71 | t) | ||
72 | TITLE="$OPTARG" | ||
73 | ;; | ||
74 | w) | ||
75 | PM3D_FRAGMENT="set pm3d at b" | ||
76 | W="-w" | ||
77 | ;; | ||
78 | *) | ||
79 | usage | ||
80 | exit 1 | ||
81 | ;; | ||
82 | esac | ||
83 | done | ||
84 | |||
85 | # Ensure the data file exists | ||
86 | if [ ! -f "$DATFILE" ]; then | ||
87 | echo "ERROR: $DATFILE does not exist" | ||
88 | usage | ||
89 | exit 1 | ||
90 | fi | ||
91 | PLOT_BASENAME=${DATFILE%.*}-f$FIELD$W | ||
92 | |||
93 | # Set a sane title | ||
94 | # TODO: parse the header and define titles for each format parameter for TIME(1) | ||
95 | if [ -z "$TITLE" ]; then | ||
96 | if [ ! "$FIELD" == "3" ]; then | ||
97 | TITLE="Field $FIELD" | ||
98 | else | ||
99 | TITLE="$DEF_TITLE" | ||
100 | fi | ||
101 | fi | ||
102 | |||
103 | # Determine the dgrid3d mesh dimensions size | ||
104 | MIN=$(tail -n +2 "$DATFILE" | cut -d ' ' -f 1 | sed 's/^0*//' | sort -n | uniq | head -n1) | ||
105 | MAX=$(tail -n +2 "$DATFILE" | cut -d ' ' -f 1 | sed 's/^0*//' | sort -n | uniq | tail -n1) | ||
106 | BB_CNT=$[${MAX} - $MIN + 1] | ||
107 | MIN=$(tail -n +2 "$DATFILE" | cut -d ' ' -f 2 | sed 's/^0*//' | sort -n | uniq | head -n1) | ||
108 | MAX=$(tail -n +2 "$DATFILE" | cut -d ' ' -f 2 | sed 's/^0*//' | sort -n | uniq | tail -n1) | ||
109 | PM_CNT=$[${MAX} - $MIN + 1] | ||
110 | |||
111 | |||
112 | (cat <<EOF | ||
113 | set title "$TITLE" | ||
114 | set xlabel "$XLABEL" | ||
115 | set ylabel "$YLABEL" | ||
116 | set style line 100 lt 5 lw 1.5 | ||
117 | $PM3D_FRAGMENT | ||
118 | set dgrid3d $PM_CNT,$BB_CNT splines | ||
119 | set ticslevel 0.2 | ||
120 | |||
121 | set term png size $SIZE | ||
122 | set output "$PLOT_BASENAME.png" | ||
123 | splot "$DATFILE" every ::1 using 1:2:$FIELD with lines ls 100 | ||
124 | |||
125 | set view 90,0 | ||
126 | set output "$PLOT_BASENAME-bb.png" | ||
127 | replot | ||
128 | |||
129 | set view 90,90 | ||
130 | set output "$PLOT_BASENAME-pm.png" | ||
131 | replot | ||
132 | |||
133 | set view 60,30 | ||
134 | set term wxt size $SIZE | ||
135 | replot | ||
136 | EOF | ||
137 | ) | gnuplot --persist | ||
diff --git a/scripts/contrib/bb-perf/bb-matrix.sh b/scripts/contrib/bb-perf/bb-matrix.sh new file mode 100755 index 0000000000..106456584d --- /dev/null +++ b/scripts/contrib/bb-perf/bb-matrix.sh | |||
@@ -0,0 +1,79 @@ | |||
1 | #!/bin/bash | ||
2 | # | ||
3 | # Copyright (c) 2011, Intel Corporation. | ||
4 | # All rights reserved. | ||
5 | # | ||
6 | # This program is free software; you can redistribute it and/or modify | ||
7 | # it under the terms of the GNU General Public License as published by | ||
8 | # the Free Software Foundation; either version 2 of the License, or | ||
9 | # (at your option) any later version. | ||
10 | # | ||
11 | # This program is distributed in the hope that it will be useful, | ||
12 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | # GNU General Public License for more details. | ||
15 | # | ||
16 | # You should have received a copy of the GNU General Public License | ||
17 | # along with this program; if not, write to the Free Software | ||
18 | # Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
19 | # | ||
20 | # DESCRIPTION | ||
21 | # This script runs BB_CMD (typically building core-image-sato) for all | ||
22 | # combincations of BB_RANGE and PM_RANGE values. It saves off all the console | ||
23 | # logs, the buildstats directories, and creates a bb-pm-runtime.dat file which | ||
24 | # can be used to postprocess the results with a plotting tool, spreadsheet, etc. | ||
25 | # Before running this script, it is recommended that you pre-download all the | ||
26 | # necessary sources by performing the BB_CMD once manually. It is also a good | ||
27 | # idea to disable cron to avoid runtime variations caused by things like the | ||
28 | # locate process. Be sure to sanitize the dat file prior to post-processing as | ||
29 | # it may contain error messages or bad runs that should be removed. | ||
30 | # | ||
31 | # AUTHORS | ||
32 | # Darren Hart <dvhart@linux.intel.com> | ||
33 | # | ||
34 | |||
35 | # The following ranges are appropriate for a 4 core system with 8 logical units | ||
36 | # Use leading 0s to ensure all digits are the same string length, this results | ||
37 | # in nice log file names and columnar dat files. | ||
38 | BB_RANGE="04 05 06 07 08 09 10 11 12 13 14 15 16" | ||
39 | PM_RANGE="04 05 06 07 08 09 10 11 12 13 14 15 16" | ||
40 | |||
41 | DATADIR="bb-matrix-$$" | ||
42 | BB_CMD="bitbake core-image-minimal" | ||
43 | RUNTIME_LOG="$DATADIR/bb-matrix.dat" | ||
44 | |||
45 | # See TIME(1) for a description of the time format parameters | ||
46 | # The following all report 0: W K r s t w | ||
47 | TIME_STR="%e %S %U %P %c %w %R %F %M %x" | ||
48 | |||
49 | # Prepare the DATADIR | ||
50 | mkdir $DATADIR | ||
51 | if [ $? -ne 0 ]; then | ||
52 | echo "Failed to create $DATADIR." | ||
53 | exit 1 | ||
54 | fi | ||
55 | |||
56 | # Add a simple header | ||
57 | echo "BB PM $TIME_STR" > $RUNTIME_LOG | ||
58 | for BB in $BB_RANGE; do | ||
59 | for PM in $PM_RANGE; do | ||
60 | RUNDIR="$DATADIR/$BB-$PM-build" | ||
61 | mkdir $RUNDIR | ||
62 | BB_LOG=$RUNDIR/$BB-$PM-bitbake.log | ||
63 | date | ||
64 | echo "BB=$BB PM=$PM Logging to $BB_LOG" | ||
65 | |||
66 | echo -n " Preparing the work directory... " | ||
67 | rm -rf pseudodone tmp sstate-cache tmp-eglibc &> /dev/null | ||
68 | echo "done" | ||
69 | |||
70 | # Export the variables under test and run the bitbake command | ||
71 | # Strip any leading zeroes before passing to bitbake | ||
72 | export BB_NUMBER_THREADS=$(echo $BB | sed 's/^0*//') | ||
73 | export PARALLEL_MAKE="-j $(echo $PM | sed 's/^0*//')" | ||
74 | /usr/bin/time -f "$BB $PM $TIME_STR" -a -o $RUNTIME_LOG $BB_CMD &> $BB_LOG | ||
75 | |||
76 | echo " $(tail -n1 $RUNTIME_LOG)" | ||
77 | cp -a tmp/buildstats $RUNDIR/$BB-$PM-buildstats | ||
78 | done | ||
79 | done | ||
diff --git a/scripts/contrib/bbvars.py b/scripts/contrib/bbvars.py new file mode 100755 index 0000000000..0896d64445 --- /dev/null +++ b/scripts/contrib/bbvars.py | |||
@@ -0,0 +1,186 @@ | |||
1 | #!/usr/bin/env python | ||
2 | |||
3 | # This program is free software; you can redistribute it and/or modify | ||
4 | # it under the terms of the GNU General Public License as published by | ||
5 | # the Free Software Foundation; either version 2 of the License, or | ||
6 | # (at your option) any later version. | ||
7 | # | ||
8 | # This program is distributed in the hope that it will be useful, | ||
9 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
11 | # GNU General Public License for more details. | ||
12 | # | ||
13 | # You should have received a copy of the GNU General Public License | ||
14 | # along with this program; if not, write to the Free Software | ||
15 | # Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
16 | # | ||
17 | # Copyright (C) Darren Hart <dvhart@linux.intel.com>, 2010 | ||
18 | |||
19 | |||
20 | import sys | ||
21 | import getopt | ||
22 | import os | ||
23 | import os.path | ||
24 | import re | ||
25 | |||
26 | def usage(): | ||
27 | print 'Usage: %s -d FILENAME [-d FILENAME]* -m METADIR [-m MATADIR]*' % os.path.basename(sys.argv[0]) | ||
28 | print ' -d FILENAME documentation file to search' | ||
29 | print ' -h, --help display this help and exit' | ||
30 | print ' -m METADIR meta directory to search for recipes' | ||
31 | print ' -t FILENAME documentation config file (for doc tags)' | ||
32 | print ' -T Only display variables with doc tags (requires -t)' | ||
33 | |||
34 | def recipe_bbvars(recipe): | ||
35 | ''' Return a unique set of every bbvar encountered in the recipe ''' | ||
36 | prog = re.compile("[A-Z_]+") | ||
37 | vset = set() | ||
38 | try: | ||
39 | r = open(recipe) | ||
40 | except IOError as (errno, strerror): | ||
41 | print 'WARNING: Failed to open recipe ', recipe | ||
42 | print strerror | ||
43 | |||
44 | for line in r: | ||
45 | # Strip any comments from the line | ||
46 | line = line.rsplit('#')[0] | ||
47 | vset = vset.union(set(prog.findall(line))) | ||
48 | r.close() | ||
49 | |||
50 | bbvars = {} | ||
51 | for v in vset: | ||
52 | bbvars[v] = 1 | ||
53 | |||
54 | return bbvars | ||
55 | |||
56 | def collect_bbvars(metadir): | ||
57 | ''' Walk the metadir and collect the bbvars from each recipe found ''' | ||
58 | bbvars = {} | ||
59 | for root,dirs,files in os.walk(metadir): | ||
60 | for name in files: | ||
61 | if name.find(".bb") >= 0: | ||
62 | for key in recipe_bbvars(os.path.join(root,name)).iterkeys(): | ||
63 | if bbvars.has_key(key): | ||
64 | bbvars[key] = bbvars[key] + 1 | ||
65 | else: | ||
66 | bbvars[key] = 1 | ||
67 | return bbvars | ||
68 | |||
69 | def bbvar_is_documented(var, docfiles): | ||
70 | prog = re.compile(".*($|[^A-Z_])%s([^A-Z_]|$)" % (var)) | ||
71 | for doc in docfiles: | ||
72 | try: | ||
73 | f = open(doc) | ||
74 | except IOError as (errno, strerror): | ||
75 | print 'WARNING: Failed to open doc ', doc | ||
76 | print strerror | ||
77 | for line in f: | ||
78 | if prog.match(line): | ||
79 | return True | ||
80 | f.close() | ||
81 | return False | ||
82 | |||
83 | def bbvar_doctag(var, docconf): | ||
84 | prog = re.compile('^%s\[doc\] *= *"(.*)"' % (var)) | ||
85 | if docconf == "": | ||
86 | return "?" | ||
87 | |||
88 | try: | ||
89 | f = open(docconf) | ||
90 | except IOError as (errno, strerror): | ||
91 | return strerror | ||
92 | |||
93 | for line in f: | ||
94 | m = prog.search(line) | ||
95 | if m: | ||
96 | return m.group(1) | ||
97 | |||
98 | f.close() | ||
99 | return "" | ||
100 | |||
101 | def main(): | ||
102 | docfiles = [] | ||
103 | metadirs = [] | ||
104 | bbvars = {} | ||
105 | undocumented = [] | ||
106 | docconf = "" | ||
107 | onlydoctags = False | ||
108 | |||
109 | # Collect and validate input | ||
110 | try: | ||
111 | opts, args = getopt.getopt(sys.argv[1:], "d:hm:t:T", ["help"]) | ||
112 | except getopt.GetoptError, err: | ||
113 | print '%s' % str(err) | ||
114 | usage() | ||
115 | sys.exit(2) | ||
116 | |||
117 | for o, a in opts: | ||
118 | if o in ('-h', '--help'): | ||
119 | usage() | ||
120 | sys.exit(0) | ||
121 | elif o == '-d': | ||
122 | if os.path.isfile(a): | ||
123 | docfiles.append(a) | ||
124 | else: | ||
125 | print 'ERROR: documentation file %s is not a regular file' % (a) | ||
126 | sys.exit(3) | ||
127 | elif o == '-m': | ||
128 | if os.path.isdir(a): | ||
129 | metadirs.append(a) | ||
130 | else: | ||
131 | print 'ERROR: meta directory %s is not a directory' % (a) | ||
132 | sys.exit(4) | ||
133 | elif o == "-t": | ||
134 | if os.path.isfile(a): | ||
135 | docconf = a | ||
136 | elif o == "-T": | ||
137 | onlydoctags = True | ||
138 | else: | ||
139 | assert False, "unhandled option" | ||
140 | |||
141 | if len(docfiles) == 0: | ||
142 | print 'ERROR: no docfile specified' | ||
143 | usage() | ||
144 | sys.exit(5) | ||
145 | |||
146 | if len(metadirs) == 0: | ||
147 | print 'ERROR: no metadir specified' | ||
148 | usage() | ||
149 | sys.exit(6) | ||
150 | |||
151 | if onlydoctags and docconf == "": | ||
152 | print 'ERROR: no docconf specified' | ||
153 | usage() | ||
154 | sys.exit(7) | ||
155 | |||
156 | # Collect all the variable names from the recipes in the metadirs | ||
157 | for m in metadirs: | ||
158 | for key,cnt in collect_bbvars(m).iteritems(): | ||
159 | if bbvars.has_key(key): | ||
160 | bbvars[key] = bbvars[key] + cnt | ||
161 | else: | ||
162 | bbvars[key] = cnt | ||
163 | |||
164 | # Check each var for documentation | ||
165 | varlen = 0 | ||
166 | for v in bbvars.iterkeys(): | ||
167 | if len(v) > varlen: | ||
168 | varlen = len(v) | ||
169 | if not bbvar_is_documented(v, docfiles): | ||
170 | undocumented.append(v) | ||
171 | undocumented.sort() | ||
172 | varlen = varlen + 1 | ||
173 | |||
174 | # Report all undocumented variables | ||
175 | print 'Found %d undocumented bb variables (out of %d):' % (len(undocumented), len(bbvars)) | ||
176 | header = '%s%s%s' % (str("VARIABLE").ljust(varlen), str("COUNT").ljust(6), str("DOCTAG").ljust(7)) | ||
177 | print header | ||
178 | print str("").ljust(len(header), '=') | ||
179 | for v in undocumented: | ||
180 | doctag = bbvar_doctag(v, docconf) | ||
181 | if not onlydoctags or not doctag == "": | ||
182 | print '%s%s%s' % (v.ljust(varlen), str(bbvars[v]).ljust(6), doctag) | ||
183 | |||
184 | |||
185 | if __name__ == "__main__": | ||
186 | main() | ||
diff --git a/scripts/contrib/build-perf-test.sh b/scripts/contrib/build-perf-test.sh new file mode 100755 index 0000000000..cdd7885dca --- /dev/null +++ b/scripts/contrib/build-perf-test.sh | |||
@@ -0,0 +1,369 @@ | |||
1 | #!/bin/bash | ||
2 | # | ||
3 | # This script runs a series of tests (with and without sstate) and reports build time (and tmp/ size) | ||
4 | # | ||
5 | # Build performance test script | ||
6 | # | ||
7 | # Copyright 2013 Intel Corporation | ||
8 | # | ||
9 | # This program is free software; you can redistribute it and/or modify | ||
10 | # it under the terms of the GNU General Public License as published by | ||
11 | # the Free Software Foundation; either version 2 of the License, or | ||
12 | # (at your option) any later version. | ||
13 | # | ||
14 | # This program is distributed in the hope that it will be useful, | ||
15 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
16 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
17 | # GNU General Public License for more details. | ||
18 | # | ||
19 | # You should have received a copy of the GNU General Public License | ||
20 | # along with this program; if not, write to the Free Software | ||
21 | # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
22 | # | ||
23 | # | ||
24 | # AUTHORS: | ||
25 | # Stefan Stanacar <stefanx.stanacar@intel.com> | ||
26 | |||
27 | |||
28 | ME=$(basename $0) | ||
29 | |||
30 | # | ||
31 | # usage and setup | ||
32 | # | ||
33 | |||
34 | usage () { | ||
35 | cat << EOT | ||
36 | Usage: $ME [-h] | ||
37 | $ME [-c <commit>] [-v] [-m <val>] [-j <val>] [-t <val>] [-i <image-name>] [-d <path>] | ||
38 | Options: | ||
39 | -h | ||
40 | Display this help and exit. | ||
41 | -c <commit> | ||
42 | git checkout <commit> before anything else | ||
43 | -v | ||
44 | Show bitbake output, don't redirect it to a log. | ||
45 | -m <machine> | ||
46 | Value for MACHINE. Default is qemux86. | ||
47 | -j <val> | ||
48 | Value for PARALLEL_MAKE. Default is 8. | ||
49 | -t <val> | ||
50 | Value for BB_NUMBER_THREADS. Default is 8. | ||
51 | -i <image-name> | ||
52 | Instead of timing against core-image-sato, use <image-name> | ||
53 | -d <path> | ||
54 | Use <path> as DL_DIR | ||
55 | -p <githash> | ||
56 | Cherry pick githash onto the commit | ||
57 | |||
58 | Note: current working directory must be inside a poky git clone. | ||
59 | |||
60 | EOT | ||
61 | } | ||
62 | |||
63 | |||
64 | if clonedir=$(git rev-parse --show-toplevel); then | ||
65 | cd $clonedir | ||
66 | else | ||
67 | echo "The current working dir doesn't seem to be a poky git clone. Please cd there before running $ME" | ||
68 | exit 1 | ||
69 | fi | ||
70 | |||
71 | IMAGE="core-image-sato" | ||
72 | verbose=0 | ||
73 | dldir= | ||
74 | commit= | ||
75 | pmake= | ||
76 | cherrypicks= | ||
77 | while getopts "hvc:m:j:t:i:d:p:" opt; do | ||
78 | case $opt in | ||
79 | h) usage | ||
80 | exit 0 | ||
81 | ;; | ||
82 | v) verbose=1 | ||
83 | ;; | ||
84 | c) commit=$OPTARG | ||
85 | ;; | ||
86 | m) export MACHINE=$OPTARG | ||
87 | ;; | ||
88 | j) pmake=$OPTARG | ||
89 | ;; | ||
90 | t) export BB_NUMBER_THREADS=$OPTARG | ||
91 | ;; | ||
92 | i) IMAGE=$OPTARG | ||
93 | ;; | ||
94 | d) dldir=$OPTARG | ||
95 | ;; | ||
96 | p) cherrypicks="$cherrypicks $OPTARG" | ||
97 | ;; | ||
98 | *) usage | ||
99 | exit 1 | ||
100 | ;; | ||
101 | esac | ||
102 | done | ||
103 | |||
104 | |||
105 | #drop cached credentials and test for sudo access without a password | ||
106 | sudo -k -n ls > /dev/null 2>&1 | ||
107 | reqpass=$? | ||
108 | if [ $reqpass -ne 0 ]; then | ||
109 | echo "The script requires sudo access to drop caches between builds (echo 3 > /proc/sys/vm/drop_caches)" | ||
110 | read -s -p "Please enter your sudo password: " pass | ||
111 | echo | ||
112 | fi | ||
113 | |||
114 | if [ -n "$commit" ]; then | ||
115 | echo "git checkout -f $commit" | ||
116 | git pull > /dev/null 2>&1 | ||
117 | git checkout -f $commit || exit 1 | ||
118 | git pull > /dev/null 2>&1 | ||
119 | fi | ||
120 | |||
121 | if [ -n "$cherrypicks" ]; then | ||
122 | for c in $cherrypicks; do | ||
123 | git cherry-pick $c | ||
124 | done | ||
125 | fi | ||
126 | |||
127 | rev=$(git rev-parse --short HEAD) || exit 1 | ||
128 | OUTDIR="$clonedir/build-perf-test/results-$rev-`date "+%Y%m%d%H%M%S"`" | ||
129 | BUILDDIR="$OUTDIR/build" | ||
130 | resultsfile="$OUTDIR/results.log" | ||
131 | bboutput="$OUTDIR/bitbake.log" | ||
132 | myoutput="$OUTDIR/output.log" | ||
133 | globalres="$clonedir/build-perf-test/globalres.log" | ||
134 | |||
135 | mkdir -p $OUTDIR || exit 1 | ||
136 | |||
137 | log () { | ||
138 | local msg="$1" | ||
139 | echo "`date`: $msg" | tee -a $myoutput | ||
140 | } | ||
141 | |||
142 | |||
143 | # | ||
144 | # Config stuff | ||
145 | # | ||
146 | |||
147 | branch=`git branch 2>&1 | grep "^* " | tr -d "* "` | ||
148 | gitcommit=$(git rev-parse HEAD) || exit 1 | ||
149 | log "Running on $branch:$gitcommit" | ||
150 | |||
151 | source ./oe-init-build-env $OUTDIR/build >/dev/null || exit 1 | ||
152 | cd $OUTDIR/build | ||
153 | |||
154 | [ -n "$MACHINE" ] || export MACHINE="qemux86" | ||
155 | [ -n "$BB_NUMBER_THREADS" ] || export BB_NUMBER_THREADS="8" | ||
156 | |||
157 | if [ -n "$pmake" ]; then | ||
158 | export PARALLEL_MAKE="-j $pmake" | ||
159 | else | ||
160 | export PARALLEL_MAKE="-j 8" | ||
161 | fi | ||
162 | |||
163 | if [ -n "$dldir" ]; then | ||
164 | echo "DL_DIR = \"$dldir\"" >> conf/local.conf | ||
165 | else | ||
166 | echo "DL_DIR = \"$clonedir/build-perf-test/downloads\"" >> conf/local.conf | ||
167 | fi | ||
168 | |||
169 | # Sometimes I've noticed big differences in timings for the same commit, on the same machine | ||
170 | # Disabling the network sanity check helps a bit (because of my crappy network connection and/or proxy) | ||
171 | echo "CONNECTIVITY_CHECK_URIS =\"\"" >> conf/local.conf | ||
172 | |||
173 | |||
174 | # | ||
175 | # Functions | ||
176 | # | ||
177 | |||
178 | declare -a TIMES | ||
179 | time_count=0 | ||
180 | declare -a SIZES | ||
181 | size_count=0 | ||
182 | |||
183 | bbtime () { | ||
184 | local arg="$@" | ||
185 | log " Timing: bitbake ${arg}" | ||
186 | |||
187 | if [ $verbose -eq 0 ]; then | ||
188 | /usr/bin/time -v -o $resultsfile bitbake ${arg} >> $bboutput | ||
189 | else | ||
190 | /usr/bin/time -v -o $resultsfile bitbake ${arg} | ||
191 | fi | ||
192 | ret=$? | ||
193 | if [ $ret -eq 0 ]; then | ||
194 | t=`grep wall $resultsfile | sed 's/.*m:ss): //'` | ||
195 | log " TIME: $t" | ||
196 | TIMES[(( time_count++ ))]="$t" | ||
197 | else | ||
198 | log "ERROR: exit status was non-zero, will report time as 0." | ||
199 | TIMES[(( time_count++ ))]="0" | ||
200 | fi | ||
201 | |||
202 | #time by default overwrites the output file and we want to keep the results | ||
203 | #it has an append option but I don't want to clobber the results in the same file | ||
204 | i=`ls $OUTDIR/results.log* |wc -l` | ||
205 | mv $resultsfile "${resultsfile}.${i}" | ||
206 | log "More stats can be found in ${resultsfile}.${i}" | ||
207 | } | ||
208 | |||
209 | #we don't time bitbake here | ||
210 | bbnotime () { | ||
211 | local arg="$@" | ||
212 | log " Running: bitbake ${arg}" | ||
213 | if [ $verbose -eq 0 ]; then | ||
214 | bitbake ${arg} >> $bboutput | ||
215 | else | ||
216 | bitbake ${arg} | ||
217 | fi | ||
218 | ret=$? | ||
219 | if [ $ret -eq 0 ]; then | ||
220 | log " Finished bitbake ${arg}" | ||
221 | else | ||
222 | log "ERROR: exit status was non-zero. Exit.." | ||
223 | exit $ret | ||
224 | fi | ||
225 | |||
226 | } | ||
227 | |||
228 | do_rmtmp() { | ||
229 | log " Removing tmp" | ||
230 | rm -rf bitbake.lock pseudodone conf/sanity_info cache tmp | ||
231 | } | ||
232 | do_rmsstate () { | ||
233 | log " Removing sstate-cache" | ||
234 | rm -rf sstate-cache | ||
235 | } | ||
236 | do_sync () { | ||
237 | log " Syncing and dropping caches" | ||
238 | sync; sync | ||
239 | if [ $reqpass -eq 0 ]; then | ||
240 | sudo sh -c "echo 3 > /proc/sys/vm/drop_caches" | ||
241 | else | ||
242 | echo "$pass" | sudo -S sh -c "echo 3 > /proc/sys/vm/drop_caches" | ||
243 | echo | ||
244 | fi | ||
245 | sleep 3 | ||
246 | } | ||
247 | |||
248 | write_results() { | ||
249 | echo -n "`uname -n`,$branch:$gitcommit,`git describe`," >> $globalres | ||
250 | for i in "${TIMES[@]}"; do | ||
251 | echo -n "$i," >> $globalres | ||
252 | done | ||
253 | for i in "${SIZES[@]}"; do | ||
254 | echo -n "$i," >> $globalres | ||
255 | done | ||
256 | echo >> $globalres | ||
257 | sed -i '$ s/,$//' $globalres | ||
258 | } | ||
259 | |||
260 | #### | ||
261 | |||
262 | # | ||
263 | # Test 1 | ||
264 | # Measure: Wall clock of "bitbake core-image-sato" and size of tmp/dir (w/o rm_work and w/ rm_work) | ||
265 | # Pre: Downloaded sources, no sstate | ||
266 | # Steps: | ||
267 | # Part1: | ||
268 | # - fetchall | ||
269 | # - clean build dir | ||
270 | # - time bitbake core-image-sato | ||
271 | # - collect data | ||
272 | # Part2: | ||
273 | # - bitbake virtual/kernel -c cleansstate | ||
274 | # - time bitbake virtual/kernel | ||
275 | # Part3: | ||
276 | # - add INHERIT to local.conf | ||
277 | # - clean build dir | ||
278 | # - build | ||
279 | # - report size, remove INHERIT | ||
280 | |||
281 | test1_p1 () { | ||
282 | log "Running Test 1, part 1/3: Measure wall clock of bitbake $IMAGE and size of tmp/ dir" | ||
283 | bbnotime $IMAGE -c fetchall | ||
284 | do_rmtmp | ||
285 | do_rmsstate | ||
286 | do_sync | ||
287 | bbtime $IMAGE | ||
288 | s=`du -s tmp | sed 's/tmp//' | sed 's/[ \t]*$//'` | ||
289 | SIZES[(( size_count++ ))]="$s" | ||
290 | log "SIZE of tmp dir is: $s" | ||
291 | log "Buildstats are saved in $OUTDIR/buildstats-test1" | ||
292 | mv tmp/buildstats $OUTDIR/buildstats-test1 | ||
293 | } | ||
294 | |||
295 | |||
296 | test1_p2 () { | ||
297 | log "Running Test 1, part 2/3: bitbake virtual/kernel -c cleansstate and time bitbake virtual/kernel" | ||
298 | bbnotime virtual/kernel -c cleansstate | ||
299 | do_sync | ||
300 | bbtime virtual/kernel | ||
301 | } | ||
302 | |||
303 | test1_p3 () { | ||
304 | log "Running Test 1, part 3/3: Build $IMAGE w/o sstate and report size of tmp/dir with rm_work enabled" | ||
305 | echo "INHERIT += \"rm_work\"" >> conf/local.conf | ||
306 | do_rmtmp | ||
307 | do_rmsstate | ||
308 | do_sync | ||
309 | bbtime $IMAGE | ||
310 | sed -i 's/INHERIT += \"rm_work\"//' conf/local.conf | ||
311 | s=`du -s tmp | sed 's/tmp//' | sed 's/[ \t]*$//'` | ||
312 | SIZES[(( size_count++ ))]="$s" | ||
313 | log "SIZE of tmp dir is: $s" | ||
314 | log "Buildstats are saved in $OUTDIR/buildstats-test13" | ||
315 | mv tmp/buildstats $OUTDIR/buildstats-test13 | ||
316 | } | ||
317 | |||
318 | |||
319 | # | ||
320 | # Test 2 | ||
321 | # Measure: Wall clock of "bitbake core-image-sato" and size of tmp/dir | ||
322 | # Pre: populated sstate cache | ||
323 | |||
324 | test2 () { | ||
325 | # Assuming test 1 has run | ||
326 | log "Running Test 2: Measure wall clock of bitbake $IMAGE -c rootfs with sstate" | ||
327 | do_rmtmp | ||
328 | do_sync | ||
329 | bbtime $IMAGE -c rootfs | ||
330 | } | ||
331 | |||
332 | |||
333 | # Test 3 | ||
334 | # parsing time metrics | ||
335 | # | ||
336 | # Start with | ||
337 | # i) "rm -rf tmp/cache; time bitbake -p" | ||
338 | # ii) "rm -rf tmp/cache/default-glibc/; time bitbake -p" | ||
339 | # iii) "time bitbake -p" | ||
340 | |||
341 | |||
342 | test3 () { | ||
343 | log "Running Test 3: Parsing time metrics (bitbake -p)" | ||
344 | log " Removing tmp/cache && cache" | ||
345 | rm -rf tmp/cache cache | ||
346 | bbtime -p | ||
347 | log " Removing tmp/cache/default-glibc/" | ||
348 | rm -rf tmp/cache/default-glibc/ | ||
349 | bbtime -p | ||
350 | bbtime -p | ||
351 | } | ||
352 | |||
353 | |||
354 | |||
355 | # RUN! | ||
356 | |||
357 | test1_p1 | ||
358 | test1_p2 | ||
359 | test1_p3 | ||
360 | test2 | ||
361 | test3 | ||
362 | |||
363 | # if we got til here write to global results | ||
364 | write_results | ||
365 | |||
366 | log "All done, cleaning up..." | ||
367 | |||
368 | do_rmtmp | ||
369 | do_rmsstate | ||
diff --git a/scripts/contrib/ddimage b/scripts/contrib/ddimage new file mode 100755 index 0000000000..a503f11d0d --- /dev/null +++ b/scripts/contrib/ddimage | |||
@@ -0,0 +1,104 @@ | |||
1 | #!/bin/sh | ||
2 | |||
3 | # Default to avoiding the first two disks on typical Linux and Mac OS installs | ||
4 | # Better safe than sorry :-) | ||
5 | BLACKLIST_DEVICES="/dev/sda /dev/sdb /dev/disk1 /dev/disk2" | ||
6 | |||
7 | # 1MB blocksize | ||
8 | BLOCKSIZE=1048576 | ||
9 | |||
10 | usage() { | ||
11 | echo "Usage: $(basename $0) IMAGE DEVICE" | ||
12 | } | ||
13 | |||
14 | image_details() { | ||
15 | IMG=$1 | ||
16 | echo "Image details" | ||
17 | echo "=============" | ||
18 | echo " image: $(basename $IMG)" | ||
19 | # stat format is different on Mac OS and Linux | ||
20 | if [ "$(uname)" = "Darwin" ]; then | ||
21 | echo " size: $(stat -L -f '%z bytes' $IMG)" | ||
22 | echo " modified: $(stat -L -f '%Sm' $IMG)" | ||
23 | else | ||
24 | echo " size: $(stat -L -c '%s bytes' $IMG)" | ||
25 | echo " modified: $(stat -L -c '%y' $IMG)" | ||
26 | fi | ||
27 | echo " type: $(file -L -b $IMG)" | ||
28 | echo "" | ||
29 | } | ||
30 | |||
31 | device_details() { | ||
32 | DEV=$1 | ||
33 | BLOCK_SIZE=512 | ||
34 | |||
35 | echo "Device details" | ||
36 | echo "==============" | ||
37 | |||
38 | # Collect disk info using diskutil on Mac OS | ||
39 | if [ "$(uname)" = "Darwin" ]; then | ||
40 | diskutil info $DEVICE | egrep "(Device Node|Media Name|Total Size)" | ||
41 | return | ||
42 | fi | ||
43 | |||
44 | # Default / Linux information collection | ||
45 | echo " device: $DEVICE" | ||
46 | if [ -f "/sys/class/block/$DEV/device/vendor" ]; then | ||
47 | echo " vendor: $(cat /sys/class/block/$DEV/device/vendor)" | ||
48 | else | ||
49 | echo " vendor: UNKOWN" | ||
50 | fi | ||
51 | if [ -f "/sys/class/block/$DEV/device/model" ]; then | ||
52 | echo " model: $(cat /sys/class/block/$DEV/device/model)" | ||
53 | else | ||
54 | echo " model: UNKNOWN" | ||
55 | fi | ||
56 | if [ -f "/sys/class/block/$DEV/size" ]; then | ||
57 | echo " size: $(($(cat /sys/class/block/$DEV/size) * $BLOCK_SIZE)) bytes" | ||
58 | else | ||
59 | echo " size: UNKNOWN" | ||
60 | fi | ||
61 | echo "" | ||
62 | } | ||
63 | |||
64 | if [ $# -ne 2 ]; then | ||
65 | usage | ||
66 | exit 1 | ||
67 | fi | ||
68 | |||
69 | IMAGE=$1 | ||
70 | DEVICE=$2 | ||
71 | |||
72 | if [ ! -e "$IMAGE" ]; then | ||
73 | echo "ERROR: Image $IMAGE does not exist" | ||
74 | usage | ||
75 | exit 1 | ||
76 | fi | ||
77 | |||
78 | |||
79 | for i in ${BLACKLIST_DEVICES}; do | ||
80 | if [ "$i" = "$DEVICE" ]; then | ||
81 | echo "ERROR: Device $DEVICE is blacklisted" | ||
82 | exit 1 | ||
83 | fi | ||
84 | done | ||
85 | |||
86 | if [ ! -w "$DEVICE" ]; then | ||
87 | echo "ERROR: Device $DEVICE does not exist or is not writable" | ||
88 | usage | ||
89 | exit 1 | ||
90 | fi | ||
91 | |||
92 | image_details $IMAGE | ||
93 | device_details $(basename $DEVICE) | ||
94 | |||
95 | printf "Write $IMAGE to $DEVICE [y/N]? " | ||
96 | read RESPONSE | ||
97 | if [ "$RESPONSE" != "y" ]; then | ||
98 | echo "Write aborted" | ||
99 | exit 0 | ||
100 | fi | ||
101 | |||
102 | echo "Writing image..." | ||
103 | dd if="$IMAGE" of="$DEVICE" bs="$BLOCKSIZE" | ||
104 | sync | ||
diff --git a/scripts/contrib/dialog-power-control b/scripts/contrib/dialog-power-control new file mode 100755 index 0000000000..7550ea53be --- /dev/null +++ b/scripts/contrib/dialog-power-control | |||
@@ -0,0 +1,53 @@ | |||
1 | #!/bin/sh | ||
2 | # | ||
3 | # Simple script to show a manual power prompt for when you want to use | ||
4 | # automated hardware testing with testimage.bbclass but you don't have a | ||
5 | # web-enabled power strip or similar to do the power on/off/cycle. | ||
6 | # | ||
7 | # You can enable it by enabling testimage (see the Yocto Project | ||
8 | # Development manual "Performing Automated Runtime Testing" section) | ||
9 | # and setting the following in your local.conf: | ||
10 | # | ||
11 | # TEST_POWERCONTROL_CMD = "${COREBASE}/scripts/contrib/dialog-power-control" | ||
12 | # | ||
13 | |||
14 | PROMPT="" | ||
15 | while true; do | ||
16 | case $1 in | ||
17 | on) | ||
18 | PROMPT="Please turn device power on";; | ||
19 | off) | ||
20 | PROMPT="Please turn device power off";; | ||
21 | cycle) | ||
22 | PROMPT="Please click Done, then turn the device power off then on";; | ||
23 | "") | ||
24 | break;; | ||
25 | esac | ||
26 | shift | ||
27 | done | ||
28 | |||
29 | if [ "$PROMPT" = "" ] ; then | ||
30 | echo "ERROR: no power action specified on command line" | ||
31 | exit 2 | ||
32 | fi | ||
33 | |||
34 | if [ "`which kdialog 2>/dev/null`" != "" ] ; then | ||
35 | DIALOGUTIL="kdialog" | ||
36 | elif [ "`which zenity 2>/dev/null`" != "" ] ; then | ||
37 | DIALOGUTIL="zenity" | ||
38 | else | ||
39 | echo "ERROR: couldn't find program to display a message, install kdialog or zenity" | ||
40 | exit 3 | ||
41 | fi | ||
42 | |||
43 | if [ "$DIALOGUTIL" = "kdialog" ] ; then | ||
44 | kdialog --yesno "$PROMPT" --title "TestImage Power Control" --yes-label "Done" --no-label "Cancel test" | ||
45 | elif [ "$DIALOGUTIL" = "zenity" ] ; then | ||
46 | zenity --question --text="$PROMPT" --title="TestImage Power Control" --ok-label="Done" --cancel-label="Cancel test" | ||
47 | fi | ||
48 | |||
49 | if [ "$?" != "0" ] ; then | ||
50 | echo "User cancelled test at power prompt" | ||
51 | exit 1 | ||
52 | fi | ||
53 | |||
diff --git a/scripts/contrib/documentation-audit.sh b/scripts/contrib/documentation-audit.sh new file mode 100755 index 0000000000..2144aac936 --- /dev/null +++ b/scripts/contrib/documentation-audit.sh | |||
@@ -0,0 +1,94 @@ | |||
1 | #!/bin/bash | ||
2 | # | ||
3 | # Perform an audit of which packages provide documentation and which | ||
4 | # are missing -doc packages. | ||
5 | # | ||
6 | # Setup requirements: be sure to be building for MACHINE=qemux86. Run | ||
7 | # this script after source'ing the build environment script, so you're | ||
8 | # running it from build/ directory. | ||
9 | # | ||
10 | # Maintainer: Scott Garman <scott.a.garman@intel.com> | ||
11 | |||
12 | REPORT_DOC_SIMPLE="documentation_exists.txt" | ||
13 | REPORT_DOC_DETAIL="documentation_exists_detail.txt" | ||
14 | REPORT_MISSING_SIMPLE="documentation_missing.txt" | ||
15 | REPORT_MISSING_DETAIL="documentation_missing_detail.txt" | ||
16 | REPORT_BUILD_ERRORS="build_errors.txt" | ||
17 | |||
18 | rm -rf $REPORT_DOC_SIMPLE $REPORT_DOC_DETAIL $REPORT_MISSING_SIMPLE $REPORT_MISSING_DETAIL | ||
19 | |||
20 | BITBAKE=`which bitbake` | ||
21 | if [ -z "$BITBAKE" ]; then | ||
22 | echo "Error: bitbake command not found." | ||
23 | echo "Did you forget to source the build environment script?" | ||
24 | exit 1 | ||
25 | fi | ||
26 | |||
27 | echo "REMINDER: you need to build for MACHINE=qemux86 or you won't get useful results" | ||
28 | echo "REMINDER: you need to set LICENSE_FLAGS_WHITELIST appropriately in local.conf or " | ||
29 | echo " you'll get false positives. For example, LICENSE_FLAGS_WHITELIST = \"Commercial\"" | ||
30 | |||
31 | for pkg in `bitbake -s | awk '{ print \$1 }'`; do | ||
32 | if [[ "$pkg" == "Loading" || "$pkg" == "Loaded" || | ||
33 | "$pkg" == "Recipe" || | ||
34 | "$pkg" == "Parsing" || "$pkg" == "Package" || | ||
35 | "$pkg" == "NOTE:" || "$pkg" == "WARNING:" || | ||
36 | "$pkg" == "done." || "$pkg" == "===========" ]] | ||
37 | then | ||
38 | # Skip initial bitbake output | ||
39 | continue | ||
40 | fi | ||
41 | if [[ "$pkg" =~ -native$ || "$pkg" =~ -nativesdk$ || | ||
42 | "$pkg" =~ -cross-canadian ]]; then | ||
43 | # Skip native/nativesdk/cross-canadian recipes | ||
44 | continue | ||
45 | fi | ||
46 | if [[ "$pkg" =~ ^meta- || "$pkg" =~ ^packagegroup- || "$pkg" =~ -image ]]; then | ||
47 | # Skip meta, task and image recipes | ||
48 | continue | ||
49 | fi | ||
50 | if [[ "$pkg" =~ ^glibc- || "$pkg" =~ ^libiconv$ || | ||
51 | "$pkg" =~ -toolchain$ || "$pkg" =~ ^package-index$ || | ||
52 | "$pkg" =~ ^linux- || "$pkg" =~ ^adt-installer$ || | ||
53 | "$pkg" =~ ^eds-tools$ || "$pkg" =~ ^external-python-tarball$ || | ||
54 | "$pkg" =~ ^qt4-embedded$ || "$pkg" =~ ^qt-mobility ]]; then | ||
55 | # Skip glibc, libiconv, -toolchain, and other recipes known | ||
56 | # to cause build conflicts or trigger false positives. | ||
57 | continue | ||
58 | fi | ||
59 | |||
60 | echo "Building package $pkg..." | ||
61 | bitbake $pkg > /dev/null | ||
62 | if [ $? -ne 0 ]; then | ||
63 | echo "There was an error building package $pkg" >> "$REPORT_MISSING_DETAIL" | ||
64 | echo "$pkg" >> $REPORT_BUILD_ERRORS | ||
65 | |||
66 | # Do not skip the remaining tests, as sometimes the | ||
67 | # exit status is 1 due to QA errors, and we can still | ||
68 | # perform the -doc checks. | ||
69 | fi | ||
70 | |||
71 | echo "$pkg built successfully, checking for a documentation package..." | ||
72 | WORKDIR=`bitbake -e $pkg | grep ^WORKDIR | awk -F '=' '{ print \$2 }' | awk -F '"' '{ print \$2 }'` | ||
73 | FIND_DOC_PKG=`find $WORKDIR/packages-split/*-doc -maxdepth 0 -type d` | ||
74 | if [ -z "$FIND_DOC_PKG" ]; then | ||
75 | # No -doc package was generated: | ||
76 | echo "No -doc package: $pkg" >> "$REPORT_MISSING_DETAIL" | ||
77 | echo "$pkg" >> $REPORT_MISSING_SIMPLE | ||
78 | continue | ||
79 | fi | ||
80 | |||
81 | FIND_DOC_FILES=`find $FIND_DOC_PKG -type f` | ||
82 | if [ -z "$FIND_DOC_FILES" ]; then | ||
83 | # No files shipped with the -doc package: | ||
84 | echo "No files shipped with the -doc package: $pkg" >> "$REPORT_MISSING_DETAIL" | ||
85 | echo "$pkg" >> $REPORT_MISSING_SIMPLE | ||
86 | continue | ||
87 | fi | ||
88 | |||
89 | echo "Documentation shipped with $pkg:" >> "$REPORT_DOC_DETAIL" | ||
90 | echo "$FIND_DOC_FILES" >> "$REPORT_DOC_DETAIL" | ||
91 | echo "" >> "$REPORT_DOC_DETAIL" | ||
92 | |||
93 | echo "$pkg" >> "$REPORT_DOC_SIMPLE" | ||
94 | done | ||
diff --git a/scripts/contrib/graph-tool b/scripts/contrib/graph-tool new file mode 100755 index 0000000000..6dc7d337f8 --- /dev/null +++ b/scripts/contrib/graph-tool | |||
@@ -0,0 +1,92 @@ | |||
1 | #!/usr/bin/env python | ||
2 | |||
3 | # Simple graph query utility | ||
4 | # useful for getting answers from .dot files produced by bitbake -g | ||
5 | # | ||
6 | # Written by: Paul Eggleton <paul.eggleton@linux.intel.com> | ||
7 | # | ||
8 | # Copyright 2013 Intel Corporation | ||
9 | # | ||
10 | # This program is free software; you can redistribute it and/or modify | ||
11 | # it under the terms of the GNU General Public License version 2 as | ||
12 | # published by the Free Software Foundation. | ||
13 | # | ||
14 | # This program is distributed in the hope that it will be useful, | ||
15 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
16 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
17 | # GNU General Public License for more details. | ||
18 | # | ||
19 | # You should have received a copy of the GNU General Public License along | ||
20 | # with this program; if not, write to the Free Software Foundation, Inc., | ||
21 | # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. | ||
22 | # | ||
23 | |||
24 | import sys | ||
25 | |||
26 | def get_path_networkx(dotfile, fromnode, tonode): | ||
27 | try: | ||
28 | import networkx | ||
29 | except ImportError: | ||
30 | print('ERROR: Please install the networkx python module') | ||
31 | sys.exit(1) | ||
32 | |||
33 | graph = networkx.DiGraph(networkx.read_dot(dotfile)) | ||
34 | |||
35 | def node_missing(node): | ||
36 | import difflib | ||
37 | close_matches = difflib.get_close_matches(node, graph.nodes(), cutoff=0.7) | ||
38 | if close_matches: | ||
39 | print('ERROR: no node "%s" in graph. Close matches:\n %s' % (node, '\n '.join(close_matches))) | ||
40 | sys.exit(1) | ||
41 | |||
42 | if not fromnode in graph: | ||
43 | node_missing(fromnode) | ||
44 | if not tonode in graph: | ||
45 | node_missing(tonode) | ||
46 | return networkx.all_simple_paths(graph, source=fromnode, target=tonode) | ||
47 | |||
48 | |||
49 | def find_paths(args, usage): | ||
50 | if len(args) < 3: | ||
51 | usage() | ||
52 | sys.exit(1) | ||
53 | |||
54 | fromnode = args[1] | ||
55 | tonode = args[2] | ||
56 | paths = list(get_path_networkx(args[0], fromnode, tonode)) | ||
57 | if paths: | ||
58 | for path in paths: | ||
59 | print ' -> '.join(path) | ||
60 | else: | ||
61 | print("ERROR: no path from %s to %s in graph" % (fromnode, tonode)) | ||
62 | sys.exit(1) | ||
63 | |||
64 | def main(): | ||
65 | import optparse | ||
66 | parser = optparse.OptionParser( | ||
67 | usage = '''%prog [options] <command> <arguments> | ||
68 | |||
69 | Available commands: | ||
70 | find-paths <dotfile> <from> <to> | ||
71 | Find all of the paths between two nodes in a dot graph''') | ||
72 | |||
73 | #parser.add_option("-d", "--debug", | ||
74 | # help = "Report all SRCREV values, not just ones where AUTOREV has been used", | ||
75 | # action="store_true", dest="debug", default=False) | ||
76 | |||
77 | options, args = parser.parse_args(sys.argv) | ||
78 | args = args[1:] | ||
79 | |||
80 | if len(args) < 1: | ||
81 | parser.print_help() | ||
82 | sys.exit(1) | ||
83 | |||
84 | if args[0] == "find-paths": | ||
85 | find_paths(args[1:], parser.print_help) | ||
86 | else: | ||
87 | parser.print_help() | ||
88 | sys.exit(1) | ||
89 | |||
90 | |||
91 | if __name__ == "__main__": | ||
92 | main() | ||
diff --git a/scripts/contrib/list-packageconfig-flags.py b/scripts/contrib/list-packageconfig-flags.py new file mode 100755 index 0000000000..598b5c3fc6 --- /dev/null +++ b/scripts/contrib/list-packageconfig-flags.py | |||
@@ -0,0 +1,181 @@ | |||
1 | #!/usr/bin/env python | ||
2 | |||
3 | # This program is free software; you can redistribute it and/or modify | ||
4 | # it under the terms of the GNU General Public License as published by | ||
5 | # the Free Software Foundation; either version 2 of the License, or | ||
6 | # (at your option) any later version. | ||
7 | # | ||
8 | # This program is distributed in the hope that it will be useful, | ||
9 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
11 | # GNU General Public License for more details. | ||
12 | # | ||
13 | # You should have received a copy of the GNU General Public License | ||
14 | # along with this program; if not, write to the Free Software Foundation. | ||
15 | # | ||
16 | # Copyright (C) 2013 Wind River Systems, Inc. | ||
17 | # Copyright (C) 2014 Intel Corporation | ||
18 | # | ||
19 | # - list available recipes which have PACKAGECONFIG flags | ||
20 | # - list available PACKAGECONFIG flags and all affected recipes | ||
21 | # - list all recipes and PACKAGECONFIG information | ||
22 | |||
23 | import sys | ||
24 | import optparse | ||
25 | import os | ||
26 | |||
27 | |||
28 | scripts_path = os.path.abspath(os.path.dirname(os.path.abspath(sys.argv[0]))) | ||
29 | lib_path = os.path.abspath(scripts_path + '/../lib') | ||
30 | sys.path = sys.path + [lib_path] | ||
31 | |||
32 | import scriptpath | ||
33 | |||
34 | # For importing the following modules | ||
35 | bitbakepath = scriptpath.add_bitbake_lib_path() | ||
36 | if not bitbakepath: | ||
37 | sys.stderr.write("Unable to find bitbake by searching parent directory of this script or PATH\n") | ||
38 | sys.exit(1) | ||
39 | |||
40 | import bb.cache | ||
41 | import bb.cooker | ||
42 | import bb.providers | ||
43 | import bb.tinfoil | ||
44 | |||
45 | def get_fnlist(bbhandler, pkg_pn, preferred): | ||
46 | ''' Get all recipe file names ''' | ||
47 | if preferred: | ||
48 | (latest_versions, preferred_versions) = bb.providers.findProviders(bbhandler.config_data, bbhandler.cooker.recipecache, pkg_pn) | ||
49 | |||
50 | fn_list = [] | ||
51 | for pn in sorted(pkg_pn): | ||
52 | if preferred: | ||
53 | fn_list.append(preferred_versions[pn][1]) | ||
54 | else: | ||
55 | fn_list.extend(pkg_pn[pn]) | ||
56 | |||
57 | return fn_list | ||
58 | |||
59 | def get_recipesdata(bbhandler, preferred): | ||
60 | ''' Get data of all available recipes which have PACKAGECONFIG flags ''' | ||
61 | pkg_pn = bbhandler.cooker.recipecache.pkg_pn | ||
62 | |||
63 | data_dict = {} | ||
64 | for fn in get_fnlist(bbhandler, pkg_pn, preferred): | ||
65 | data = bb.cache.Cache.loadDataFull(fn, bbhandler.cooker.collection.get_file_appends(fn), bbhandler.config_data) | ||
66 | flags = data.getVarFlags("PACKAGECONFIG") | ||
67 | flags.pop('doc', None) | ||
68 | flags.pop('defaultval', None) | ||
69 | if flags: | ||
70 | data_dict[fn] = data | ||
71 | |||
72 | return data_dict | ||
73 | |||
74 | def collect_pkgs(data_dict): | ||
75 | ''' Collect available pkgs in which have PACKAGECONFIG flags ''' | ||
76 | # pkg_dict = {'pkg1': ['flag1', 'flag2',...]} | ||
77 | pkg_dict = {} | ||
78 | for fn in data_dict: | ||
79 | pkgconfigflags = data_dict[fn].getVarFlags("PACKAGECONFIG") | ||
80 | pkgconfigflags.pop('doc', None) | ||
81 | pkgconfigflags.pop('defaultval', None) | ||
82 | pkgname = data_dict[fn].getVar("P", True) | ||
83 | pkg_dict[pkgname] = sorted(pkgconfigflags.keys()) | ||
84 | |||
85 | return pkg_dict | ||
86 | |||
87 | def collect_flags(pkg_dict): | ||
88 | ''' Collect available PACKAGECONFIG flags and all affected pkgs ''' | ||
89 | # flag_dict = {'flag': ['pkg1', 'pkg2',...]} | ||
90 | flag_dict = {} | ||
91 | for pkgname, flaglist in pkg_dict.iteritems(): | ||
92 | for flag in flaglist: | ||
93 | if flag in flag_dict: | ||
94 | flag_dict[flag].append(pkgname) | ||
95 | else: | ||
96 | flag_dict[flag] = [pkgname] | ||
97 | |||
98 | return flag_dict | ||
99 | |||
100 | def display_pkgs(pkg_dict): | ||
101 | ''' Display available pkgs which have PACKAGECONFIG flags ''' | ||
102 | pkgname_len = len("RECIPE NAME") + 1 | ||
103 | for pkgname in pkg_dict: | ||
104 | if pkgname_len < len(pkgname): | ||
105 | pkgname_len = len(pkgname) | ||
106 | pkgname_len += 1 | ||
107 | |||
108 | header = '%-*s%s' % (pkgname_len, str("RECIPE NAME"), str("PACKAGECONFIG FLAGS")) | ||
109 | print header | ||
110 | print str("").ljust(len(header), '=') | ||
111 | for pkgname in sorted(pkg_dict): | ||
112 | print('%-*s%s' % (pkgname_len, pkgname, ' '.join(pkg_dict[pkgname]))) | ||
113 | |||
114 | |||
115 | def display_flags(flag_dict): | ||
116 | ''' Display available PACKAGECONFIG flags and all affected pkgs ''' | ||
117 | flag_len = len("PACKAGECONFIG FLAG") + 5 | ||
118 | |||
119 | header = '%-*s%s' % (flag_len, str("PACKAGECONFIG FLAG"), str("RECIPE NAMES")) | ||
120 | print header | ||
121 | print str("").ljust(len(header), '=') | ||
122 | |||
123 | for flag in sorted(flag_dict): | ||
124 | print('%-*s%s' % (flag_len, flag, ' '.join(sorted(flag_dict[flag])))) | ||
125 | |||
126 | def display_all(data_dict): | ||
127 | ''' Display all pkgs and PACKAGECONFIG information ''' | ||
128 | print str("").ljust(50, '=') | ||
129 | for fn in data_dict: | ||
130 | print('%s' % data_dict[fn].getVar("P", True)) | ||
131 | print fn | ||
132 | packageconfig = data_dict[fn].getVar("PACKAGECONFIG", True) or '' | ||
133 | if packageconfig.strip() == '': | ||
134 | packageconfig = 'None' | ||
135 | print('PACKAGECONFIG %s' % packageconfig) | ||
136 | |||
137 | for flag,flag_val in data_dict[fn].getVarFlags("PACKAGECONFIG").iteritems(): | ||
138 | if flag in ["defaultval", "doc"]: | ||
139 | continue | ||
140 | print('PACKAGECONFIG[%s] %s' % (flag, flag_val)) | ||
141 | print '' | ||
142 | |||
143 | def main(): | ||
144 | pkg_dict = {} | ||
145 | flag_dict = {} | ||
146 | |||
147 | # Collect and validate input | ||
148 | parser = optparse.OptionParser( | ||
149 | description = "Lists recipes and PACKAGECONFIG flags. Without -a or -f, recipes and their available PACKAGECONFIG flags are listed.", | ||
150 | usage = """ | ||
151 | %prog [options]""") | ||
152 | |||
153 | parser.add_option("-f", "--flags", | ||
154 | help = "list available PACKAGECONFIG flags and affected recipes", | ||
155 | action="store_const", dest="listtype", const="flags", default="recipes") | ||
156 | parser.add_option("-a", "--all", | ||
157 | help = "list all recipes and PACKAGECONFIG information", | ||
158 | action="store_const", dest="listtype", const="all") | ||
159 | parser.add_option("-p", "--preferred-only", | ||
160 | help = "where multiple recipe versions are available, list only the preferred version", | ||
161 | action="store_true", dest="preferred", default=False) | ||
162 | |||
163 | options, args = parser.parse_args(sys.argv) | ||
164 | |||
165 | bbhandler = bb.tinfoil.Tinfoil() | ||
166 | bbhandler.prepare() | ||
167 | print("Gathering recipe data...") | ||
168 | data_dict = get_recipesdata(bbhandler, options.preferred) | ||
169 | |||
170 | if options.listtype == 'flags': | ||
171 | pkg_dict = collect_pkgs(data_dict) | ||
172 | flag_dict = collect_flags(pkg_dict) | ||
173 | display_flags(flag_dict) | ||
174 | elif options.listtype == 'recipes': | ||
175 | pkg_dict = collect_pkgs(data_dict) | ||
176 | display_pkgs(pkg_dict) | ||
177 | elif options.listtype == 'all': | ||
178 | display_all(data_dict) | ||
179 | |||
180 | if __name__ == "__main__": | ||
181 | main() | ||
diff --git a/scripts/contrib/mkefidisk.sh b/scripts/contrib/mkefidisk.sh new file mode 100755 index 0000000000..b96b7d4f7d --- /dev/null +++ b/scripts/contrib/mkefidisk.sh | |||
@@ -0,0 +1,396 @@ | |||
1 | #!/bin/sh | ||
2 | # | ||
3 | # Copyright (c) 2012, Intel Corporation. | ||
4 | # All rights reserved. | ||
5 | # | ||
6 | # This program is free software; you can redistribute it and/or modify | ||
7 | # it under the terms of the GNU General Public License as published by | ||
8 | # the Free Software Foundation; either version 2 of the License, or | ||
9 | # (at your option) any later version. | ||
10 | # | ||
11 | # This program is distributed in the hope that it will be useful, | ||
12 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See | ||
14 | # the GNU General Public License for more details. | ||
15 | # | ||
16 | # You should have received a copy of the GNU General Public License | ||
17 | # along with this program; if not, write to the Free Software | ||
18 | # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
19 | # | ||
20 | |||
21 | LANG=C | ||
22 | |||
23 | # Set to 1 to enable additional output | ||
24 | DEBUG=0 | ||
25 | OUT="/dev/null" | ||
26 | |||
27 | # | ||
28 | # Defaults | ||
29 | # | ||
30 | # 20 Mb for the boot partition | ||
31 | BOOT_SIZE=20 | ||
32 | # 5% for swap | ||
33 | SWAP_RATIO=5 | ||
34 | |||
35 | # Cleanup after die() | ||
36 | cleanup() { | ||
37 | debug "Syncing and unmounting devices" | ||
38 | # Unmount anything we mounted | ||
39 | unmount $ROOTFS_MNT || error "Failed to unmount $ROOTFS_MNT" | ||
40 | unmount $BOOTFS_MNT || error "Failed to unmount $BOOTFS_MNT" | ||
41 | unmount $HDDIMG_ROOTFS_MNT || error "Failed to unmount $HDDIMG_ROOTFS_MNT" | ||
42 | unmount $HDDIMG_MNT || error "Failed to unmount $HDDIMG_MNT" | ||
43 | |||
44 | # Remove the TMPDIR | ||
45 | debug "Removing temporary files" | ||
46 | if [ -d "$TMPDIR" ]; then | ||
47 | rm -rf $TMPDIR || error "Failed to remove $TMPDIR" | ||
48 | fi | ||
49 | } | ||
50 | |||
51 | trap 'die "Signal Received, Aborting..."' HUP INT TERM | ||
52 | |||
53 | # Logging routines | ||
54 | WARNINGS=0 | ||
55 | ERRORS=0 | ||
56 | CLEAR="$(tput sgr0)" | ||
57 | INFO="$(tput bold)" | ||
58 | RED="$(tput setaf 1)$(tput bold)" | ||
59 | GREEN="$(tput setaf 2)$(tput bold)" | ||
60 | YELLOW="$(tput setaf 3)$(tput bold)" | ||
61 | info() { | ||
62 | echo "${INFO}$1${CLEAR}" | ||
63 | } | ||
64 | error() { | ||
65 | ERRORS=$((ERRORS+1)) | ||
66 | echo "${RED}$1${CLEAR}" | ||
67 | } | ||
68 | warn() { | ||
69 | WARNINGS=$((WARNINGS+1)) | ||
70 | echo "${YELLOW}$1${CLEAR}" | ||
71 | } | ||
72 | success() { | ||
73 | echo "${GREEN}$1${CLEAR}" | ||
74 | } | ||
75 | die() { | ||
76 | error "$1" | ||
77 | cleanup | ||
78 | exit 1 | ||
79 | } | ||
80 | debug() { | ||
81 | if [ $DEBUG -eq 1 ]; then | ||
82 | echo "$1" | ||
83 | fi | ||
84 | } | ||
85 | |||
86 | usage() { | ||
87 | echo "Usage: $(basename $0) [-v] DEVICE HDDIMG TARGET_DEVICE" | ||
88 | echo " -v: Verbose debug" | ||
89 | echo " DEVICE: The device to write the image to, e.g. /dev/sdh" | ||
90 | echo " HDDIMG: The hddimg file to generate the efi disk from" | ||
91 | echo " TARGET_DEVICE: The device the target will boot from, e.g. /dev/mmcblk0" | ||
92 | } | ||
93 | |||
94 | image_details() { | ||
95 | IMG=$1 | ||
96 | info "Image details" | ||
97 | echo " image: $(stat --printf '%N\n' $IMG)" | ||
98 | echo " size: $(stat -L --printf '%s bytes\n' $IMG)" | ||
99 | echo " modified: $(stat -L --printf '%y\n' $IMG)" | ||
100 | echo " type: $(file -L -b $IMG)" | ||
101 | echo "" | ||
102 | } | ||
103 | |||
104 | device_details() { | ||
105 | DEV=$1 | ||
106 | BLOCK_SIZE=512 | ||
107 | |||
108 | info "Device details" | ||
109 | echo " device: $DEVICE" | ||
110 | if [ -f "/sys/class/block/$DEV/device/vendor" ]; then | ||
111 | echo " vendor: $(cat /sys/class/block/$DEV/device/vendor)" | ||
112 | else | ||
113 | echo " vendor: UNKOWN" | ||
114 | fi | ||
115 | if [ -f "/sys/class/block/$DEV/device/model" ]; then | ||
116 | echo " model: $(cat /sys/class/block/$DEV/device/model)" | ||
117 | else | ||
118 | echo " model: UNKNOWN" | ||
119 | fi | ||
120 | if [ -f "/sys/class/block/$DEV/size" ]; then | ||
121 | echo " size: $(($(cat /sys/class/block/$DEV/size) * $BLOCK_SIZE)) bytes" | ||
122 | else | ||
123 | echo " size: UNKNOWN" | ||
124 | fi | ||
125 | echo "" | ||
126 | } | ||
127 | |||
128 | unmount_device() { | ||
129 | grep -q $DEVICE /proc/mounts | ||
130 | if [ $? -eq 0 ]; then | ||
131 | warn "$DEVICE listed in /proc/mounts, attempting to unmount" | ||
132 | umount $DEVICE* 2>/dev/null | ||
133 | return $? | ||
134 | fi | ||
135 | return 0 | ||
136 | } | ||
137 | |||
138 | unmount() { | ||
139 | grep -q $1 /proc/mounts | ||
140 | if [ $? -eq 0 ]; then | ||
141 | debug "Unmounting $1" | ||
142 | umount $1 | ||
143 | return $? | ||
144 | fi | ||
145 | return 0 | ||
146 | } | ||
147 | |||
148 | # | ||
149 | # Parse and validate arguments | ||
150 | # | ||
151 | if [ $# -lt 3 ] || [ $# -gt 4 ]; then | ||
152 | usage | ||
153 | exit 1 | ||
154 | fi | ||
155 | |||
156 | if [ "$1" = "-v" ]; then | ||
157 | DEBUG=1 | ||
158 | OUT="1" | ||
159 | shift | ||
160 | fi | ||
161 | |||
162 | DEVICE=$1 | ||
163 | HDDIMG=$2 | ||
164 | TARGET_DEVICE=$3 | ||
165 | |||
166 | LINK=$(readlink $DEVICE) | ||
167 | if [ $? -eq 0 ]; then | ||
168 | DEVICE="$LINK" | ||
169 | fi | ||
170 | |||
171 | if [ ! -w "$DEVICE" ]; then | ||
172 | usage | ||
173 | die "Device $DEVICE does not exist or is not writable" | ||
174 | fi | ||
175 | |||
176 | if [ ! -e "$HDDIMG" ]; then | ||
177 | usage | ||
178 | die "HDDIMG $HDDIMG does not exist" | ||
179 | fi | ||
180 | |||
181 | # | ||
182 | # Ensure the hddimg is not mounted | ||
183 | # | ||
184 | unmount "$HDDIMG" || die "Failed to unmount $HDDIMG" | ||
185 | |||
186 | # | ||
187 | # Check if any $DEVICE partitions are mounted | ||
188 | # | ||
189 | unmount_device || die "Failed to unmount $DEVICE" | ||
190 | |||
191 | # | ||
192 | # Confirm device with user | ||
193 | # | ||
194 | image_details $HDDIMG | ||
195 | device_details $(basename $DEVICE) | ||
196 | echo -n "${INFO}Prepare EFI image on $DEVICE [y/N]?${CLEAR} " | ||
197 | read RESPONSE | ||
198 | if [ "$RESPONSE" != "y" ]; then | ||
199 | echo "Image creation aborted" | ||
200 | exit 0 | ||
201 | fi | ||
202 | |||
203 | |||
204 | # | ||
205 | # Prepare the temporary working space | ||
206 | # | ||
207 | TMPDIR=$(mktemp -d mkefidisk-XXX) || die "Failed to create temporary mounting directory." | ||
208 | HDDIMG_MNT=$TMPDIR/hddimg | ||
209 | HDDIMG_ROOTFS_MNT=$TMPDIR/hddimg_rootfs | ||
210 | ROOTFS_MNT=$TMPDIR/rootfs | ||
211 | BOOTFS_MNT=$TMPDIR/bootfs | ||
212 | mkdir $HDDIMG_MNT || die "Failed to create $HDDIMG_MNT" | ||
213 | mkdir $HDDIMG_ROOTFS_MNT || die "Failed to create $HDDIMG_ROOTFS_MNT" | ||
214 | mkdir $ROOTFS_MNT || die "Failed to create $ROOTFS_MNT" | ||
215 | mkdir $BOOTFS_MNT || die "Failed to create $BOOTFS_MNT" | ||
216 | |||
217 | |||
218 | # | ||
219 | # Partition $DEVICE | ||
220 | # | ||
221 | DEVICE_SIZE=$(parted $DEVICE unit mb print | grep ^Disk | cut -d" " -f 3 | sed -e "s/MB//") | ||
222 | # If the device size is not reported there may not be a valid label | ||
223 | if [ "$DEVICE_SIZE" = "" ] ; then | ||
224 | parted $DEVICE mklabel msdos || die "Failed to create MSDOS partition table" | ||
225 | DEVICE_SIZE=$(parted $DEVICE unit mb print | grep ^Disk | cut -d" " -f 3 | sed -e "s/MB//") | ||
226 | fi | ||
227 | SWAP_SIZE=$((DEVICE_SIZE*SWAP_RATIO/100)) | ||
228 | ROOTFS_SIZE=$((DEVICE_SIZE-BOOT_SIZE-SWAP_SIZE)) | ||
229 | ROOTFS_START=$((BOOT_SIZE)) | ||
230 | ROOTFS_END=$((ROOTFS_START+ROOTFS_SIZE)) | ||
231 | SWAP_START=$((ROOTFS_END)) | ||
232 | |||
233 | # MMC devices use a partition prefix character 'p' | ||
234 | PART_PREFIX="" | ||
235 | if [ ! "${DEVICE#/dev/mmcblk}" = "${DEVICE}" ] || [ ! "${DEVICE#/dev/loop}" = "${DEVICE}" ]; then | ||
236 | PART_PREFIX="p" | ||
237 | fi | ||
238 | BOOTFS=$DEVICE${PART_PREFIX}1 | ||
239 | ROOTFS=$DEVICE${PART_PREFIX}2 | ||
240 | SWAP=$DEVICE${PART_PREFIX}3 | ||
241 | |||
242 | TARGET_PART_PREFIX="" | ||
243 | if [ ! "${TARGET_DEVICE#/dev/mmcblk}" = "${TARGET_DEVICE}" ]; then | ||
244 | TARGET_PART_PREFIX="p" | ||
245 | fi | ||
246 | TARGET_ROOTFS=$TARGET_DEVICE${TARGET_PART_PREFIX}2 | ||
247 | TARGET_SWAP=$TARGET_DEVICE${TARGET_PART_PREFIX}3 | ||
248 | |||
249 | echo "" | ||
250 | info "Boot partition size: $BOOT_SIZE MB ($BOOTFS)" | ||
251 | info "ROOTFS partition size: $ROOTFS_SIZE MB ($ROOTFS)" | ||
252 | info "Swap partition size: $SWAP_SIZE MB ($SWAP)" | ||
253 | echo "" | ||
254 | |||
255 | # Use MSDOS by default as GPT cannot be reliably distributed in disk image form | ||
256 | # as it requires the backup table to be on the last block of the device, which | ||
257 | # of course varies from device to device. | ||
258 | |||
259 | info "Partitioning installation media ($DEVICE)" | ||
260 | |||
261 | debug "Deleting partition table on $DEVICE" | ||
262 | dd if=/dev/zero of=$DEVICE bs=512 count=2 >$OUT 2>&1 || die "Failed to zero beginning of $DEVICE" | ||
263 | |||
264 | debug "Creating new partition table (MSDOS) on $DEVICE" | ||
265 | parted $DEVICE mklabel msdos >$OUT 2>&1 || die "Failed to create MSDOS partition table" | ||
266 | |||
267 | debug "Creating boot partition on $BOOTFS" | ||
268 | parted $DEVICE mkpart primary 0% $BOOT_SIZE >$OUT 2>&1 || die "Failed to create BOOT partition" | ||
269 | |||
270 | debug "Enabling boot flag on $BOOTFS" | ||
271 | parted $DEVICE set 1 boot on >$OUT 2>&1 || die "Failed to enable boot flag" | ||
272 | |||
273 | debug "Creating ROOTFS partition on $ROOTFS" | ||
274 | parted $DEVICE mkpart primary $ROOTFS_START $ROOTFS_END >$OUT 2>&1 || die "Failed to create ROOTFS partition" | ||
275 | |||
276 | debug "Creating swap partition on $SWAP" | ||
277 | parted $DEVICE mkpart primary $SWAP_START 100% >$OUT 2>&1 || die "Failed to create SWAP partition" | ||
278 | |||
279 | if [ $DEBUG -eq 1 ]; then | ||
280 | parted $DEVICE print | ||
281 | fi | ||
282 | |||
283 | |||
284 | # | ||
285 | # Check if any $DEVICE partitions are mounted after partitioning | ||
286 | # | ||
287 | unmount_device || die "Failed to unmount $DEVICE partitions" | ||
288 | |||
289 | |||
290 | # | ||
291 | # Format $DEVICE partitions | ||
292 | # | ||
293 | info "Formatting partitions" | ||
294 | debug "Formatting $BOOTFS as vfat" | ||
295 | if [ ! "${DEVICE#/dev/loop}" = "${DEVICE}" ]; then | ||
296 | mkfs.vfat -I $BOOTFS -n "EFI" >$OUT 2>&1 || die "Failed to format $BOOTFS" | ||
297 | else | ||
298 | mkfs.vfat $BOOTFS -n "EFI" >$OUT 2>&1 || die "Failed to format $BOOTFS" | ||
299 | fi | ||
300 | |||
301 | debug "Formatting $ROOTFS as ext3" | ||
302 | mkfs.ext3 -F $ROOTFS -L "ROOT" >$OUT 2>&1 || die "Failed to format $ROOTFS" | ||
303 | |||
304 | debug "Formatting swap partition ($SWAP)" | ||
305 | mkswap $SWAP >$OUT 2>&1 || die "Failed to prepare swap" | ||
306 | |||
307 | |||
308 | # | ||
309 | # Installing to $DEVICE | ||
310 | # | ||
311 | debug "Mounting images and device in preparation for installation" | ||
312 | mount -o loop $HDDIMG $HDDIMG_MNT >$OUT 2>&1 || error "Failed to mount $HDDIMG" | ||
313 | mount -o loop $HDDIMG_MNT/rootfs.img $HDDIMG_ROOTFS_MNT >$OUT 2>&1 || error "Failed to mount rootfs.img" | ||
314 | mount $ROOTFS $ROOTFS_MNT >$OUT 2>&1 || error "Failed to mount $ROOTFS on $ROOTFS_MNT" | ||
315 | mount $BOOTFS $BOOTFS_MNT >$OUT 2>&1 || error "Failed to mount $BOOTFS on $BOOTFS_MNT" | ||
316 | |||
317 | info "Preparing boot partition" | ||
318 | EFIDIR="$BOOTFS_MNT/EFI/BOOT" | ||
319 | cp $HDDIMG_MNT/vmlinuz $BOOTFS_MNT >$OUT 2>&1 || error "Failed to copy vmlinuz" | ||
320 | # Copy the efi loader and configs (booti*.efi and grub.cfg if it exists) | ||
321 | cp -r $HDDIMG_MNT/EFI $BOOTFS_MNT >$OUT 2>&1 || error "Failed to copy EFI dir" | ||
322 | # Silently ignore a missing gummiboot loader dir (we might just be a GRUB image) | ||
323 | cp -r $HDDIMG_MNT/loader $BOOTFS_MNT >$OUT 2>&1 | ||
324 | |||
325 | # Update the boot loaders configurations for an installed image | ||
326 | # Remove any existing root= kernel parameters and: | ||
327 | # o Add a root= parameter with the target rootfs | ||
328 | # o Specify ro so fsck can be run during boot | ||
329 | # o Specify rootwait in case the target media is an asyncronous block device | ||
330 | # such as MMC or USB disks | ||
331 | # o Specify "quiet" to minimize boot time when using slow serial consoles | ||
332 | |||
333 | # Look for a GRUB installation | ||
334 | GRUB_CFG="$EFIDIR/grub.cfg" | ||
335 | if [ -e "$GRUB_CFG" ]; then | ||
336 | info "Configuring GRUB" | ||
337 | # Delete the install entry | ||
338 | sed -i "/menuentry 'install'/,/^}/d" $GRUB_CFG | ||
339 | # Delete the initrd lines | ||
340 | sed -i "/initrd /d" $GRUB_CFG | ||
341 | # Delete any LABEL= strings | ||
342 | sed -i "s/ LABEL=[^ ]*/ /" $GRUB_CFG | ||
343 | |||
344 | sed -i "s@ root=[^ ]*@ @" $GRUB_CFG | ||
345 | sed -i "s@vmlinuz @vmlinuz root=$TARGET_ROOTFS ro rootwait quiet @" $GRUB_CFG | ||
346 | fi | ||
347 | |||
348 | # Look for a gummiboot installation | ||
349 | GUMMI_ENTRIES="$BOOTFS_MNT/loader/entries" | ||
350 | GUMMI_CFG="$GUMMI_ENTRIES/boot.conf" | ||
351 | if [ -d "$GUMMI_ENTRIES" ]; then | ||
352 | info "Configuring Gummiboot" | ||
353 | # remove the install target if it exists | ||
354 | rm $GUMMI_ENTRIES/install.conf >$OUT 2>&1 | ||
355 | |||
356 | if [ ! -e "$GUMMI_CFG" ]; then | ||
357 | echo "ERROR: $GUMMI_CFG not found" | ||
358 | fi | ||
359 | |||
360 | sed -i "/initrd /d" $GUMMI_CFG | ||
361 | sed -i "s@ root=[^ ]*@ @" $GUMMI_CFG | ||
362 | sed -i "s@options *LABEL=boot @options LABEL=Boot root=$TARGET_ROOTFS ro rootwait quiet @" $GUMMI_CFG | ||
363 | fi | ||
364 | |||
365 | # Ensure we have at least one EFI bootloader configured | ||
366 | if [ ! -e $GRUB_CFG ] && [ ! -e $GUMMI_CFG ]; then | ||
367 | die "No EFI bootloader configuration found" | ||
368 | fi | ||
369 | |||
370 | |||
371 | info "Copying ROOTFS files (this may take a while)" | ||
372 | cp -a $HDDIMG_ROOTFS_MNT/* $ROOTFS_MNT >$OUT 2>&1 || die "Root FS copy failed" | ||
373 | |||
374 | echo "$TARGET_SWAP swap swap defaults 0 0" >> $ROOTFS_MNT/etc/fstab | ||
375 | |||
376 | # We dont want udev to mount our root device while we're booting... | ||
377 | if [ -d $ROOTFS_MNT/etc/udev/ ] ; then | ||
378 | echo "$TARGET_DEVICE" >> $ROOTFS_MNT/etc/udev/mount.blacklist | ||
379 | fi | ||
380 | |||
381 | |||
382 | # Call cleanup to unmount devices and images and remove the TMPDIR | ||
383 | cleanup | ||
384 | |||
385 | echo "" | ||
386 | if [ $WARNINGS -ne 0 ] && [ $ERRORS -eq 0 ]; then | ||
387 | echo "${YELLOW}Installation completed with warnings${CLEAR}" | ||
388 | echo "${YELLOW}Warnings: $WARNINGS${CLEAR}" | ||
389 | elif [ $ERRORS -ne 0 ]; then | ||
390 | echo "${RED}Installation encountered errors${CLEAR}" | ||
391 | echo "${RED}Errors: $ERRORS${CLEAR}" | ||
392 | echo "${YELLOW}Warnings: $WARNINGS${CLEAR}" | ||
393 | else | ||
394 | success "Installation completed successfully" | ||
395 | fi | ||
396 | echo "" | ||
diff --git a/scripts/contrib/python/generate-manifest-2.7.py b/scripts/contrib/python/generate-manifest-2.7.py new file mode 100755 index 0000000000..68c42b0a54 --- /dev/null +++ b/scripts/contrib/python/generate-manifest-2.7.py | |||
@@ -0,0 +1,391 @@ | |||
1 | #!/usr/bin/env python | ||
2 | |||
3 | # generate Python Manifest for the OpenEmbedded build system | ||
4 | # (C) 2002-2010 Michael 'Mickey' Lauer <mlauer@vanille-media.de> | ||
5 | # (C) 2007 Jeremy Laine | ||
6 | # licensed under MIT, see COPYING.MIT | ||
7 | # | ||
8 | # June 22, 2011 -- Mark Hatle <mark.hatle@windriver.com> | ||
9 | # * Updated to no longer generate special -dbg package, instead use the | ||
10 | # single system -dbg | ||
11 | # * Update version with ".1" to indicate this change | ||
12 | |||
13 | import os | ||
14 | import sys | ||
15 | import time | ||
16 | |||
17 | VERSION = "2.7.2" | ||
18 | |||
19 | __author__ = "Michael 'Mickey' Lauer <mlauer@vanille-media.de>" | ||
20 | __version__ = "20110222.2" | ||
21 | |||
22 | class MakefileMaker: | ||
23 | |||
24 | def __init__( self, outfile ): | ||
25 | """initialize""" | ||
26 | self.packages = {} | ||
27 | self.targetPrefix = "${libdir}/python%s/" % VERSION[:3] | ||
28 | self.output = outfile | ||
29 | self.out( """ | ||
30 | # WARNING: This file is AUTO GENERATED: Manual edits will be lost next time I regenerate the file. | ||
31 | # Generator: '%s' Version %s (C) 2002-2010 Michael 'Mickey' Lauer <mlauer@vanille-media.de> | ||
32 | # Visit the Python for Embedded Systems Site => http://www.Vanille.de/projects/python.spy | ||
33 | """ % ( sys.argv[0], __version__ ) ) | ||
34 | |||
35 | # | ||
36 | # helper functions | ||
37 | # | ||
38 | |||
39 | def out( self, data ): | ||
40 | """print a line to the output file""" | ||
41 | self.output.write( "%s\n" % data ) | ||
42 | |||
43 | def setPrefix( self, targetPrefix ): | ||
44 | """set a file prefix for addPackage files""" | ||
45 | self.targetPrefix = targetPrefix | ||
46 | |||
47 | def doProlog( self ): | ||
48 | self.out( """ """ ) | ||
49 | self.out( "" ) | ||
50 | |||
51 | def addPackage( self, name, description, dependencies, filenames ): | ||
52 | """add a package to the Makefile""" | ||
53 | if type( filenames ) == type( "" ): | ||
54 | filenames = filenames.split() | ||
55 | fullFilenames = [] | ||
56 | for filename in filenames: | ||
57 | if filename[0] != "$": | ||
58 | fullFilenames.append( "%s%s" % ( self.targetPrefix, filename ) ) | ||
59 | else: | ||
60 | fullFilenames.append( filename ) | ||
61 | self.packages[name] = description, dependencies, fullFilenames | ||
62 | |||
63 | def doBody( self ): | ||
64 | """generate body of Makefile""" | ||
65 | |||
66 | global VERSION | ||
67 | |||
68 | # | ||
69 | # generate provides line | ||
70 | # | ||
71 | |||
72 | provideLine = 'PROVIDES+="' | ||
73 | for name in sorted(self.packages): | ||
74 | provideLine += "%s " % name | ||
75 | provideLine += '"' | ||
76 | |||
77 | self.out( provideLine ) | ||
78 | self.out( "" ) | ||
79 | |||
80 | # | ||
81 | # generate package line | ||
82 | # | ||
83 | |||
84 | packageLine = 'PACKAGES="${PN}-dbg ' | ||
85 | for name in sorted(self.packages): | ||
86 | if name.startswith("${PN}-distutils"): | ||
87 | if name == "${PN}-distutils": | ||
88 | packageLine += "%s-staticdev %s " % (name, name) | ||
89 | elif name != '${PN}-dbg': | ||
90 | packageLine += "%s " % name | ||
91 | packageLine += '${PN}-modules"' | ||
92 | |||
93 | self.out( packageLine ) | ||
94 | self.out( "" ) | ||
95 | |||
96 | # | ||
97 | # generate package variables | ||
98 | # | ||
99 | |||
100 | for name, data in sorted(self.packages.iteritems()): | ||
101 | desc, deps, files = data | ||
102 | |||
103 | # | ||
104 | # write out the description, revision and dependencies | ||
105 | # | ||
106 | self.out( 'SUMMARY_%s="%s"' % ( name, desc ) ) | ||
107 | self.out( 'RDEPENDS_%s="%s"' % ( name, deps ) ) | ||
108 | |||
109 | line = 'FILES_%s="' % name | ||
110 | |||
111 | # | ||
112 | # check which directories to make in the temporary directory | ||
113 | # | ||
114 | |||
115 | dirset = {} # if python had a set-datatype this would be sufficient. for now, we're using a dict instead. | ||
116 | for target in files: | ||
117 | dirset[os.path.dirname( target )] = True | ||
118 | |||
119 | # | ||
120 | # generate which files to copy for the target (-dfR because whole directories are also allowed) | ||
121 | # | ||
122 | |||
123 | for target in files: | ||
124 | line += "%s " % target | ||
125 | |||
126 | line += '"' | ||
127 | self.out( line ) | ||
128 | self.out( "" ) | ||
129 | |||
130 | self.out( 'SUMMARY_${PN}-modules="All Python modules"' ) | ||
131 | line = 'RDEPENDS_${PN}-modules="' | ||
132 | |||
133 | for name, data in sorted(self.packages.iteritems()): | ||
134 | if name not in ['${PN}-dev', '${PN}-distutils-staticdev']: | ||
135 | line += "%s " % name | ||
136 | |||
137 | self.out( "%s \"" % line ) | ||
138 | self.out( 'ALLOW_EMPTY_${PN}-modules = "1"' ) | ||
139 | |||
140 | def doEpilog( self ): | ||
141 | self.out( """""" ) | ||
142 | self.out( "" ) | ||
143 | |||
144 | def make( self ): | ||
145 | self.doProlog() | ||
146 | self.doBody() | ||
147 | self.doEpilog() | ||
148 | |||
149 | if __name__ == "__main__": | ||
150 | |||
151 | if len( sys.argv ) > 1: | ||
152 | try: | ||
153 | os.unlink(sys.argv[1]) | ||
154 | except Exception: | ||
155 | sys.exc_clear() | ||
156 | outfile = file( sys.argv[1], "w" ) | ||
157 | else: | ||
158 | outfile = sys.stdout | ||
159 | |||
160 | m = MakefileMaker( outfile ) | ||
161 | |||
162 | # Add packages here. Only specify dlopen-style library dependencies here, no ldd-style dependencies! | ||
163 | # Parameters: revision, name, description, dependencies, filenames | ||
164 | # | ||
165 | |||
166 | m.addPackage( "${PN}-core", "Python interpreter and core modules", "${PN}-lang ${PN}-re", | ||
167 | "__future__.* _abcoll.* abc.* copy.* copy_reg.* ConfigParser.* " + | ||
168 | "genericpath.* getopt.* linecache.* new.* " + | ||
169 | "os.* posixpath.* struct.* " + | ||
170 | "warnings.* site.* stat.* " + | ||
171 | "UserDict.* UserList.* UserString.* " + | ||
172 | "lib-dynload/binascii.so lib-dynload/_struct.so lib-dynload/time.so " + | ||
173 | "lib-dynload/xreadlines.so types.* platform.* ${bindir}/python* " + | ||
174 | "_weakrefset.* sysconfig.* config/Makefile " + | ||
175 | "${includedir}/python${PYTHON_MAJMIN}/pyconfig*.h " + | ||
176 | "${libdir}/python${PYTHON_MAJMIN}/sitecustomize.py ") | ||
177 | |||
178 | m.addPackage( "${PN}-dev", "Python development package", "${PN}-core", | ||
179 | "${includedir} " + | ||
180 | "${libdir}/lib*${SOLIBSDEV} " + | ||
181 | "${libdir}/*.la " + | ||
182 | "${libdir}/*.a " + | ||
183 | "${libdir}/*.o " + | ||
184 | "${libdir}/pkgconfig " + | ||
185 | "${base_libdir}/*.a " + | ||
186 | "${base_libdir}/*.o " + | ||
187 | "${datadir}/aclocal " + | ||
188 | "${datadir}/pkgconfig " ) | ||
189 | |||
190 | m.addPackage( "${PN}-2to3", "Python automated Python 2 to 3 code translator", "${PN}-core", | ||
191 | "${bindir}/2to3 lib2to3" ) # package | ||
192 | |||
193 | m.addPackage( "${PN}-idle", "Python Integrated Development Environment", "${PN}-core ${PN}-tkinter", | ||
194 | "${bindir}/idle idlelib" ) # package | ||
195 | |||
196 | m.addPackage( "${PN}-pydoc", "Python interactive help support", "${PN}-core ${PN}-lang ${PN}-stringold ${PN}-re", | ||
197 | "${bindir}/pydoc pydoc.* pydoc_data" ) | ||
198 | |||
199 | m.addPackage( "${PN}-smtpd", "Python Simple Mail Transport Daemon", "${PN}-core ${PN}-netserver ${PN}-email ${PN}-mime", | ||
200 | "${bindir}/smtpd.* smtpd.*" ) | ||
201 | |||
202 | m.addPackage( "${PN}-audio", "Python Audio Handling", "${PN}-core", | ||
203 | "wave.* chunk.* sndhdr.* lib-dynload/ossaudiodev.so lib-dynload/audioop.so audiodev.* sunaudio.* sunau.* toaiff.*" ) | ||
204 | |||
205 | m.addPackage( "${PN}-bsddb", "Python bindings for the Berkeley Database", "${PN}-core", | ||
206 | "bsddb lib-dynload/_bsddb.so" ) # package | ||
207 | |||
208 | m.addPackage( "${PN}-codecs", "Python codecs, encodings & i18n support", "${PN}-core ${PN}-lang", | ||
209 | "codecs.* encodings gettext.* locale.* lib-dynload/_locale.so lib-dynload/_codecs* lib-dynload/_multibytecodec.so lib-dynload/unicodedata.so stringprep.* xdrlib.*" ) | ||
210 | |||
211 | m.addPackage( "${PN}-compile", "Python bytecode compilation support", "${PN}-core", | ||
212 | "py_compile.* compileall.*" ) | ||
213 | |||
214 | m.addPackage( "${PN}-compiler", "Python compiler support", "${PN}-core", | ||
215 | "compiler" ) # package | ||
216 | |||
217 | m.addPackage( "${PN}-compression", "Python high-level compression support", "${PN}-core ${PN}-zlib", | ||
218 | "gzip.* zipfile.* tarfile.* lib-dynload/bz2.so" ) | ||
219 | |||
220 | m.addPackage( "${PN}-crypt", "Python basic cryptographic and hashing support", "${PN}-core", | ||
221 | "hashlib.* md5.* sha.* lib-dynload/crypt.so lib-dynload/_hashlib.so lib-dynload/_sha256.so lib-dynload/_sha512.so" ) | ||
222 | |||
223 | m.addPackage( "${PN}-textutils", "Python option parsing, text wrapping and CSV support", "${PN}-core ${PN}-io ${PN}-re ${PN}-stringold", | ||
224 | "lib-dynload/_csv.so csv.* optparse.* textwrap.*" ) | ||
225 | |||
226 | m.addPackage( "${PN}-curses", "Python curses support", "${PN}-core", | ||
227 | "curses lib-dynload/_curses.so lib-dynload/_curses_panel.so" ) # directory + low level module | ||
228 | |||
229 | m.addPackage( "${PN}-ctypes", "Python C types support", "${PN}-core", | ||
230 | "ctypes lib-dynload/_ctypes.so lib-dynload/_ctypes_test.so" ) # directory + low level module | ||
231 | |||
232 | m.addPackage( "${PN}-datetime", "Python calendar and time support", "${PN}-core ${PN}-codecs", | ||
233 | "_strptime.* calendar.* lib-dynload/datetime.so" ) | ||
234 | |||
235 | m.addPackage( "${PN}-db", "Python file-based database support", "${PN}-core", | ||
236 | "anydbm.* dumbdbm.* whichdb.* " ) | ||
237 | |||
238 | m.addPackage( "${PN}-debugger", "Python debugger", "${PN}-core ${PN}-io ${PN}-lang ${PN}-re ${PN}-stringold ${PN}-shell ${PN}-pprint", | ||
239 | "bdb.* pdb.*" ) | ||
240 | |||
241 | m.addPackage( "${PN}-difflib", "Python helpers for computing deltas between objects", "${PN}-lang ${PN}-re", | ||
242 | "difflib.*" ) | ||
243 | |||
244 | m.addPackage( "${PN}-distutils-staticdev", "Python distribution utilities (static libraries)", "${PN}-distutils", | ||
245 | "config/lib*.a" ) # package | ||
246 | |||
247 | m.addPackage( "${PN}-distutils", "Python Distribution Utilities", "${PN}-core", | ||
248 | "config distutils" ) # package | ||
249 | |||
250 | m.addPackage( "${PN}-doctest", "Python framework for running examples in docstrings", "${PN}-core ${PN}-lang ${PN}-io ${PN}-re ${PN}-unittest ${PN}-debugger ${PN}-difflib", | ||
251 | "doctest.*" ) | ||
252 | |||
253 | # FIXME consider adding to some higher level package | ||
254 | m.addPackage( "${PN}-elementtree", "Python elementree", "${PN}-core", | ||
255 | "lib-dynload/_elementtree.so" ) | ||
256 | |||
257 | m.addPackage( "${PN}-email", "Python email support", "${PN}-core ${PN}-io ${PN}-re ${PN}-mime ${PN}-audio ${PN}-image ${PN}-netclient", | ||
258 | "imaplib.* email" ) # package | ||
259 | |||
260 | m.addPackage( "${PN}-fcntl", "Python's fcntl interface", "${PN}-core", | ||
261 | "lib-dynload/fcntl.so" ) | ||
262 | |||
263 | m.addPackage( "${PN}-hotshot", "Python hotshot performance profiler", "${PN}-core", | ||
264 | "hotshot lib-dynload/_hotshot.so" ) | ||
265 | |||
266 | m.addPackage( "${PN}-html", "Python HTML processing support", "${PN}-core", | ||
267 | "formatter.* htmlentitydefs.* htmllib.* markupbase.* sgmllib.* HTMLParser.* " ) | ||
268 | |||
269 | m.addPackage( "${PN}-importlib", "Python import implementation library", "${PN}-core", | ||
270 | "importlib" ) | ||
271 | |||
272 | m.addPackage( "${PN}-gdbm", "Python GNU database support", "${PN}-core", | ||
273 | "lib-dynload/gdbm.so" ) | ||
274 | |||
275 | m.addPackage( "${PN}-image", "Python graphical image handling", "${PN}-core", | ||
276 | "colorsys.* imghdr.* lib-dynload/imageop.so lib-dynload/rgbimg.so" ) | ||
277 | |||
278 | m.addPackage( "${PN}-io", "Python low-level I/O", "${PN}-core ${PN}-math ${PN}-textutils ${PN}-netclient", | ||
279 | "lib-dynload/_socket.so lib-dynload/_io.so lib-dynload/_ssl.so lib-dynload/select.so lib-dynload/termios.so lib-dynload/cStringIO.so " + | ||
280 | "pipes.* socket.* ssl.* tempfile.* StringIO.* io.* _pyio.*" ) | ||
281 | |||
282 | m.addPackage( "${PN}-json", "Python JSON support", "${PN}-core ${PN}-math ${PN}-re ${PN}-codecs", | ||
283 | "json lib-dynload/_json.so" ) # package | ||
284 | |||
285 | m.addPackage( "${PN}-lang", "Python low-level language support", "${PN}-core", | ||
286 | "lib-dynload/_bisect.so lib-dynload/_collections.so lib-dynload/_heapq.so lib-dynload/_weakref.so lib-dynload/_functools.so " + | ||
287 | "lib-dynload/array.so lib-dynload/itertools.so lib-dynload/operator.so lib-dynload/parser.so " + | ||
288 | "atexit.* bisect.* code.* codeop.* collections.* dis.* functools.* heapq.* inspect.* keyword.* opcode.* symbol.* repr.* token.* " + | ||
289 | "tokenize.* traceback.* weakref.*" ) | ||
290 | |||
291 | m.addPackage( "${PN}-logging", "Python logging support", "${PN}-core ${PN}-io ${PN}-lang ${PN}-pickle ${PN}-stringold", | ||
292 | "logging" ) # package | ||
293 | |||
294 | m.addPackage( "${PN}-mailbox", "Python mailbox format support", "${PN}-core ${PN}-mime", | ||
295 | "mailbox.*" ) | ||
296 | |||
297 | m.addPackage( "${PN}-math", "Python math support", "${PN}-core ${PN}-crypt", | ||
298 | "lib-dynload/cmath.so lib-dynload/math.so lib-dynload/_random.so random.* sets.*" ) | ||
299 | |||
300 | m.addPackage( "${PN}-mime", "Python MIME handling APIs", "${PN}-core ${PN}-io", | ||
301 | "mimetools.* uu.* quopri.* rfc822.* MimeWriter.*" ) | ||
302 | |||
303 | m.addPackage( "${PN}-mmap", "Python memory-mapped file support", "${PN}-core ${PN}-io", | ||
304 | "lib-dynload/mmap.so " ) | ||
305 | |||
306 | m.addPackage( "${PN}-multiprocessing", "Python multiprocessing support", "${PN}-core ${PN}-io ${PN}-lang ${PN}-pickle ${PN}-threading ${PN}-ctypes ${PN}-mmap", | ||
307 | "lib-dynload/_multiprocessing.so multiprocessing" ) # package | ||
308 | |||
309 | m.addPackage( "${PN}-netclient", "Python Internet Protocol clients", "${PN}-core ${PN}-crypt ${PN}-datetime ${PN}-io ${PN}-lang ${PN}-logging ${PN}-mime", | ||
310 | "*Cookie*.* " + | ||
311 | "base64.* cookielib.* ftplib.* gopherlib.* hmac.* httplib.* mimetypes.* nntplib.* poplib.* smtplib.* telnetlib.* urllib.* urllib2.* urlparse.* uuid.* rfc822.* mimetools.*" ) | ||
312 | |||
313 | m.addPackage( "${PN}-netserver", "Python Internet Protocol servers", "${PN}-core ${PN}-netclient", | ||
314 | "cgi.* *HTTPServer.* SocketServer.*" ) | ||
315 | |||
316 | m.addPackage( "${PN}-numbers", "Python number APIs", "${PN}-core ${PN}-lang ${PN}-re", | ||
317 | "decimal.* numbers.*" ) | ||
318 | |||
319 | m.addPackage( "${PN}-pickle", "Python serialisation/persistence support", "${PN}-core ${PN}-codecs ${PN}-io ${PN}-re", | ||
320 | "pickle.* shelve.* lib-dynload/cPickle.so pickletools.*" ) | ||
321 | |||
322 | m.addPackage( "${PN}-pkgutil", "Python package extension utility support", "${PN}-core", | ||
323 | "pkgutil.*") | ||
324 | |||
325 | m.addPackage( "${PN}-pprint", "Python pretty-print support", "${PN}-core ${PN}-io", | ||
326 | "pprint.*" ) | ||
327 | |||
328 | m.addPackage( "${PN}-profile", "Python basic performance profiling support", "${PN}-core ${PN}-textutils", | ||
329 | "profile.* pstats.* cProfile.* lib-dynload/_lsprof.so" ) | ||
330 | |||
331 | m.addPackage( "${PN}-re", "Python Regular Expression APIs", "${PN}-core", | ||
332 | "re.* sre.* sre_compile.* sre_constants* sre_parse.*" ) # _sre is builtin | ||
333 | |||
334 | m.addPackage( "${PN}-readline", "Python readline support", "${PN}-core", | ||
335 | "lib-dynload/readline.so rlcompleter.*" ) | ||
336 | |||
337 | m.addPackage( "${PN}-resource", "Python resource control interface", "${PN}-core", | ||
338 | "lib-dynload/resource.so" ) | ||
339 | |||
340 | m.addPackage( "${PN}-shell", "Python shell-like functionality", "${PN}-core ${PN}-re", | ||
341 | "cmd.* commands.* dircache.* fnmatch.* glob.* popen2.* shlex.* shutil.*" ) | ||
342 | |||
343 | m.addPackage( "${PN}-robotparser", "Python robots.txt parser", "${PN}-core ${PN}-netclient", | ||
344 | "robotparser.*") | ||
345 | |||
346 | m.addPackage( "${PN}-subprocess", "Python subprocess support", "${PN}-core ${PN}-io ${PN}-re ${PN}-fcntl ${PN}-pickle", | ||
347 | "subprocess.*" ) | ||
348 | |||
349 | m.addPackage( "${PN}-sqlite3", "Python Sqlite3 database support", "${PN}-core ${PN}-datetime ${PN}-lang ${PN}-crypt ${PN}-io ${PN}-threading ${PN}-zlib", | ||
350 | "lib-dynload/_sqlite3.so sqlite3/dbapi2.* sqlite3/__init__.* sqlite3/dump.*" ) | ||
351 | |||
352 | m.addPackage( "${PN}-sqlite3-tests", "Python Sqlite3 database support tests", "${PN}-core ${PN}-sqlite3", | ||
353 | "sqlite3/test" ) | ||
354 | |||
355 | m.addPackage( "${PN}-stringold", "Python string APIs [deprecated]", "${PN}-core ${PN}-re", | ||
356 | "lib-dynload/strop.so string.* stringold.*" ) | ||
357 | |||
358 | m.addPackage( "${PN}-syslog", "Python syslog interface", "${PN}-core", | ||
359 | "lib-dynload/syslog.so" ) | ||
360 | |||
361 | m.addPackage( "${PN}-terminal", "Python terminal controlling support", "${PN}-core ${PN}-io", | ||
362 | "pty.* tty.*" ) | ||
363 | |||
364 | m.addPackage( "${PN}-tests", "Python tests", "${PN}-core", | ||
365 | "test" ) # package | ||
366 | |||
367 | m.addPackage( "${PN}-threading", "Python threading & synchronization support", "${PN}-core ${PN}-lang", | ||
368 | "_threading_local.* dummy_thread.* dummy_threading.* mutex.* threading.* Queue.*" ) | ||
369 | |||
370 | m.addPackage( "${PN}-tkinter", "Python Tcl/Tk bindings", "${PN}-core", | ||
371 | "lib-dynload/_tkinter.so lib-tk" ) # package | ||
372 | |||
373 | m.addPackage( "${PN}-unittest", "Python unit testing framework", "${PN}-core ${PN}-stringold ${PN}-lang ${PN}-io ${PN}-difflib ${PN}-pprint ${PN}-shell", | ||
374 | "unittest/" ) | ||
375 | |||
376 | m.addPackage( "${PN}-unixadmin", "Python Unix administration support", "${PN}-core", | ||
377 | "lib-dynload/nis.so lib-dynload/grp.so lib-dynload/pwd.so getpass.*" ) | ||
378 | |||
379 | m.addPackage( "${PN}-xml", "Python basic XML support", "${PN}-core ${PN}-elementtree ${PN}-re", | ||
380 | "lib-dynload/pyexpat.so xml xmllib.*" ) # package | ||
381 | |||
382 | m.addPackage( "${PN}-xmlrpc", "Python XML-RPC support", "${PN}-core ${PN}-xml ${PN}-netserver ${PN}-lang", | ||
383 | "xmlrpclib.* SimpleXMLRPCServer.* DocXMLRPCServer.*" ) | ||
384 | |||
385 | m.addPackage( "${PN}-zlib", "Python zlib compression support", "${PN}-core", | ||
386 | "lib-dynload/zlib.so" ) | ||
387 | |||
388 | m.addPackage( "${PN}-mailbox", "Python mailbox format support", "${PN}-core ${PN}-mime", | ||
389 | "mailbox.*" ) | ||
390 | |||
391 | m.make() | ||
diff --git a/scripts/contrib/python/generate-manifest-3.3.py b/scripts/contrib/python/generate-manifest-3.3.py new file mode 100755 index 0000000000..48cc84d4e0 --- /dev/null +++ b/scripts/contrib/python/generate-manifest-3.3.py | |||
@@ -0,0 +1,386 @@ | |||
1 | #!/usr/bin/env python | ||
2 | |||
3 | # generate Python Manifest for the OpenEmbedded build system | ||
4 | # (C) 2002-2010 Michael 'Mickey' Lauer <mlauer@vanille-media.de> | ||
5 | # (C) 2007 Jeremy Laine | ||
6 | # licensed under MIT, see COPYING.MIT | ||
7 | # | ||
8 | # June 22, 2011 -- Mark Hatle <mark.hatle@windriver.com> | ||
9 | # * Updated to no longer generate special -dbg package, instead use the | ||
10 | # single system -dbg | ||
11 | # * Update version with ".1" to indicate this change | ||
12 | # | ||
13 | # 2014 Khem Raj <raj.khem@gmail.com> | ||
14 | # Added python3 support | ||
15 | # | ||
16 | import os | ||
17 | import sys | ||
18 | import time | ||
19 | |||
20 | VERSION = "3.3.3" | ||
21 | |||
22 | __author__ = "Michael 'Mickey' Lauer <mlauer@vanille-media.de>" | ||
23 | __version__ = "20140131" | ||
24 | |||
25 | class MakefileMaker: | ||
26 | |||
27 | def __init__( self, outfile ): | ||
28 | """initialize""" | ||
29 | self.packages = {} | ||
30 | self.targetPrefix = "${libdir}/python%s/" % VERSION[:3] | ||
31 | self.output = outfile | ||
32 | self.out( """ | ||
33 | # WARNING: This file is AUTO GENERATED: Manual edits will be lost next time I regenerate the file. | ||
34 | # Generator: '%s' Version %s (C) 2002-2010 Michael 'Mickey' Lauer <mlauer@vanille-media.de> | ||
35 | # Visit the Python for Embedded Systems Site => http://www.Vanille.de/projects/python.spy | ||
36 | """ % ( sys.argv[0], __version__ ) ) | ||
37 | |||
38 | # | ||
39 | # helper functions | ||
40 | # | ||
41 | |||
42 | def out( self, data ): | ||
43 | """print a line to the output file""" | ||
44 | self.output.write( "%s\n" % data ) | ||
45 | |||
46 | def setPrefix( self, targetPrefix ): | ||
47 | """set a file prefix for addPackage files""" | ||
48 | self.targetPrefix = targetPrefix | ||
49 | |||
50 | def doProlog( self ): | ||
51 | self.out( """ """ ) | ||
52 | self.out( "" ) | ||
53 | |||
54 | def addPackage( self, name, description, dependencies, filenames ): | ||
55 | """add a package to the Makefile""" | ||
56 | if type( filenames ) == type( "" ): | ||
57 | filenames = filenames.split() | ||
58 | fullFilenames = [] | ||
59 | for filename in filenames: | ||
60 | if filename[0] != "$": | ||
61 | fullFilenames.append( "%s%s" % ( self.targetPrefix, filename ) ) | ||
62 | else: | ||
63 | fullFilenames.append( filename ) | ||
64 | self.packages[name] = description, dependencies, fullFilenames | ||
65 | |||
66 | def doBody( self ): | ||
67 | """generate body of Makefile""" | ||
68 | |||
69 | global VERSION | ||
70 | |||
71 | # | ||
72 | # generate provides line | ||
73 | # | ||
74 | |||
75 | provideLine = 'PROVIDES+="' | ||
76 | for name in sorted(self.packages): | ||
77 | provideLine += "%s " % name | ||
78 | provideLine += '"' | ||
79 | |||
80 | self.out( provideLine ) | ||
81 | self.out( "" ) | ||
82 | |||
83 | # | ||
84 | # generate package line | ||
85 | # | ||
86 | |||
87 | packageLine = 'PACKAGES="${PN}-dbg ' | ||
88 | for name in sorted(self.packages): | ||
89 | if name.startswith("${PN}-distutils"): | ||
90 | if name == "${PN}-distutils": | ||
91 | packageLine += "%s-staticdev %s " % (name, name) | ||
92 | elif name != '${PN}-dbg': | ||
93 | packageLine += "%s " % name | ||
94 | packageLine += '${PN}-modules"' | ||
95 | |||
96 | self.out( packageLine ) | ||
97 | self.out( "" ) | ||
98 | |||
99 | # | ||
100 | # generate package variables | ||
101 | # | ||
102 | |||
103 | for name, data in sorted(self.packages.iteritems()): | ||
104 | desc, deps, files = data | ||
105 | |||
106 | # | ||
107 | # write out the description, revision and dependencies | ||
108 | # | ||
109 | self.out( 'SUMMARY_%s="%s"' % ( name, desc ) ) | ||
110 | self.out( 'RDEPENDS_%s="%s"' % ( name, deps ) ) | ||
111 | |||
112 | line = 'FILES_%s="' % name | ||
113 | |||
114 | # | ||
115 | # check which directories to make in the temporary directory | ||
116 | # | ||
117 | |||
118 | dirset = {} # if python had a set-datatype this would be sufficient. for now, we're using a dict instead. | ||
119 | for target in files: | ||
120 | dirset[os.path.dirname( target )] = True | ||
121 | |||
122 | # | ||
123 | # generate which files to copy for the target (-dfR because whole directories are also allowed) | ||
124 | # | ||
125 | |||
126 | for target in files: | ||
127 | line += "%s " % target | ||
128 | |||
129 | line += '"' | ||
130 | self.out( line ) | ||
131 | self.out( "" ) | ||
132 | |||
133 | self.out( 'SUMMARY_${PN}-modules="All Python modules"' ) | ||
134 | line = 'RDEPENDS_${PN}-modules="' | ||
135 | |||
136 | for name, data in sorted(self.packages.iteritems()): | ||
137 | if name not in ['${PN}-dev', '${PN}-distutils-staticdev']: | ||
138 | line += "%s " % name | ||
139 | |||
140 | self.out( "%s \"" % line ) | ||
141 | self.out( 'ALLOW_EMPTY_${PN}-modules = "1"' ) | ||
142 | |||
143 | def doEpilog( self ): | ||
144 | self.out( """""" ) | ||
145 | self.out( "" ) | ||
146 | |||
147 | def make( self ): | ||
148 | self.doProlog() | ||
149 | self.doBody() | ||
150 | self.doEpilog() | ||
151 | |||
152 | if __name__ == "__main__": | ||
153 | |||
154 | if len( sys.argv ) > 1: | ||
155 | try: | ||
156 | os.unlink(sys.argv[1]) | ||
157 | except Exception: | ||
158 | sys.exc_clear() | ||
159 | outfile = file( sys.argv[1], "w" ) | ||
160 | else: | ||
161 | outfile = sys.stdout | ||
162 | |||
163 | m = MakefileMaker( outfile ) | ||
164 | |||
165 | # Add packages here. Only specify dlopen-style library dependencies here, no ldd-style dependencies! | ||
166 | # Parameters: revision, name, description, dependencies, filenames | ||
167 | # | ||
168 | |||
169 | m.addPackage( "${PN}-core", "Python interpreter and core modules", "${PN}-lang ${PN}-re ${PN}-reprlib ${PN}-codecs ${PN}-io ${PN}-math", | ||
170 | "__future__.* _abcoll.* abc.* copy.* copyreg.* ConfigParser.* " + | ||
171 | "genericpath.* getopt.* linecache.* new.* " + | ||
172 | "os.* posixpath.* struct.* " + | ||
173 | "warnings.* site.* stat.* " + | ||
174 | "UserDict.* UserList.* UserString.* " + | ||
175 | "lib-dynload/binascii.*.so lib-dynload/_struct.*.so lib-dynload/time.*.so " + | ||
176 | "lib-dynload/xreadlines.*.so types.* platform.* ${bindir}/python* " + | ||
177 | "_weakrefset.* sysconfig.* _sysconfigdata.* config/Makefile " + | ||
178 | "${includedir}/python${PYTHON_MAJMIN}/pyconfig*.h " + | ||
179 | "${libdir}/python${PYTHON_MAJMIN}/collections " + | ||
180 | "${libdir}/python${PYTHON_MAJMIN}/sitecustomize.py ") | ||
181 | |||
182 | m.addPackage( "${PN}-dev", "Python development package", "${PN}-core", | ||
183 | "${includedir} " + | ||
184 | "${libdir}/lib*${SOLIBSDEV} " + | ||
185 | "${libdir}/*.la " + | ||
186 | "${libdir}/*.a " + | ||
187 | "${libdir}/*.o " + | ||
188 | "${libdir}/pkgconfig " + | ||
189 | "${base_libdir}/*.a " + | ||
190 | "${base_libdir}/*.o " + | ||
191 | "${datadir}/aclocal " + | ||
192 | "${datadir}/pkgconfig " ) | ||
193 | |||
194 | m.addPackage( "${PN}-2to3", "Python automated Python 2 to 3 code translator", "${PN}-core", | ||
195 | "${bindir}/2to3 lib2to3" ) # package | ||
196 | |||
197 | m.addPackage( "${PN}-idle", "Python Integrated Development Environment", "${PN}-core ${PN}-tkinter", | ||
198 | "${bindir}/idle idlelib" ) # package | ||
199 | |||
200 | m.addPackage( "${PN}-pydoc", "Python interactive help support", "${PN}-core ${PN}-lang ${PN}-stringold ${PN}-re", | ||
201 | "${bindir}/pydoc pydoc.* pydoc_data" ) | ||
202 | |||
203 | m.addPackage( "${PN}-smtpd", "Python Simple Mail Transport Daemon", "${PN}-core ${PN}-netserver ${PN}-email ${PN}-mime", | ||
204 | "${bindir}/smtpd.* smtpd.*" ) | ||
205 | |||
206 | m.addPackage( "${PN}-audio", "Python Audio Handling", "${PN}-core", | ||
207 | "wave.* chunk.* sndhdr.* lib-dynload/ossaudiodev.*.so lib-dynload/audioop.*.so audiodev.* sunaudio.* sunau.* toaiff.*" ) | ||
208 | |||
209 | m.addPackage( "${PN}-codecs", "Python codecs, encodings & i18n support", "${PN}-core ${PN}-lang", | ||
210 | "codecs.* encodings gettext.* locale.* lib-dynload/_locale.*.so lib-dynload/_codecs* lib-dynload/_multibytecodec.*.so lib-dynload/unicodedata.*.so stringprep.* xdrlib.*" ) | ||
211 | |||
212 | m.addPackage( "${PN}-compile", "Python bytecode compilation support", "${PN}-core", | ||
213 | "py_compile.* compileall.*" ) | ||
214 | |||
215 | m.addPackage( "${PN}-compression", "Python high-level compression support", "${PN}-core ${PN}-codecs", | ||
216 | "gzip.* zipfile.* tarfile.* lib-dynload/bz2.*.so" ) | ||
217 | |||
218 | m.addPackage( "${PN}-crypt", "Python basic cryptographic and hashing support", "${PN}-core", | ||
219 | "hashlib.* md5.* sha.* lib-dynload/crypt.*.so lib-dynload/_hashlib.*.so lib-dynload/_sha256.*.so lib-dynload/_sha512.*.so" ) | ||
220 | |||
221 | m.addPackage( "${PN}-textutils", "Python option parsing, text wrapping and CSV support", "${PN}-core ${PN}-io ${PN}-re ${PN}-stringold", | ||
222 | "lib-dynload/_csv.*.so csv.* optparse.* textwrap.*" ) | ||
223 | |||
224 | m.addPackage( "${PN}-curses", "Python curses support", "${PN}-core", | ||
225 | "curses lib-dynload/_curses.*.so lib-dynload/_curses_panel.*.so" ) # directory + low level module | ||
226 | |||
227 | m.addPackage( "${PN}-ctypes", "Python C types support", "${PN}-core", | ||
228 | "ctypes lib-dynload/_ctypes.*.so lib-dynload/_ctypes_test.*.so" ) # directory + low level module | ||
229 | |||
230 | m.addPackage( "${PN}-datetime", "Python calendar and time support", "${PN}-core ${PN}-codecs", | ||
231 | "_strptime.* calendar.* lib-dynload/datetime.*.so" ) | ||
232 | |||
233 | m.addPackage( "${PN}-db", "Python file-based database support", "${PN}-core", | ||
234 | "anydbm.* dumbdbm.* whichdb.* dbm lib-dynload/_dbm.*.so" ) | ||
235 | |||
236 | m.addPackage( "${PN}-debugger", "Python debugger", "${PN}-core ${PN}-io ${PN}-lang ${PN}-re ${PN}-stringold ${PN}-shell ${PN}-pprint", | ||
237 | "bdb.* pdb.*" ) | ||
238 | |||
239 | m.addPackage( "${PN}-difflib", "Python helpers for computing deltas between objects", "${PN}-lang ${PN}-re", | ||
240 | "difflib.*" ) | ||
241 | |||
242 | m.addPackage( "${PN}-distutils-staticdev", "Python distribution utilities (static libraries)", "${PN}-distutils", | ||
243 | "config/lib*.a" ) # package | ||
244 | |||
245 | m.addPackage( "${PN}-distutils", "Python Distribution Utilities", "${PN}-core", | ||
246 | "config distutils" ) # package | ||
247 | |||
248 | m.addPackage( "${PN}-doctest", "Python framework for running examples in docstrings", "${PN}-core ${PN}-lang ${PN}-io ${PN}-re ${PN}-unittest ${PN}-debugger ${PN}-difflib", | ||
249 | "doctest.*" ) | ||
250 | |||
251 | # FIXME consider adding to some higher level package | ||
252 | m.addPackage( "${PN}-elementtree", "Python elementree", "${PN}-core", | ||
253 | "lib-dynload/_elementtree.*.so" ) | ||
254 | |||
255 | m.addPackage( "${PN}-email", "Python email support", "${PN}-core ${PN}-io ${PN}-re ${PN}-mime ${PN}-audio ${PN}-image ${PN}-netclient", | ||
256 | "imaplib.* email" ) # package | ||
257 | |||
258 | m.addPackage( "${PN}-fcntl", "Python's fcntl interface", "${PN}-core", | ||
259 | "lib-dynload/fcntl.*.so" ) | ||
260 | |||
261 | m.addPackage( "${PN}-html", "Python HTML processing support", "${PN}-core", | ||
262 | "formatter.* htmlentitydefs.* htmllib.* markupbase.* sgmllib.* HTMLParser.* " ) | ||
263 | |||
264 | m.addPackage( "${PN}-importlib", "Python import implementation library", "${PN}-core", | ||
265 | "importlib" ) | ||
266 | |||
267 | m.addPackage( "${PN}-gdbm", "Python GNU database support", "${PN}-core", | ||
268 | "lib-dynload/_gdbm.*.so" ) | ||
269 | |||
270 | m.addPackage( "${PN}-image", "Python graphical image handling", "${PN}-core", | ||
271 | "colorsys.* imghdr.* lib-dynload/imageop.*.so lib-dynload/rgbimg.*.so" ) | ||
272 | |||
273 | m.addPackage( "${PN}-io", "Python low-level I/O", "${PN}-core ${PN}-math", | ||
274 | "lib-dynload/_socket.*.so lib-dynload/_io.*.so lib-dynload/_ssl.*.so lib-dynload/select.*.so lib-dynload/termios.*.so lib-dynload/cStringIO.*.so " + | ||
275 | "pipes.* socket.* ssl.* tempfile.* StringIO.* io.* _pyio.*" ) | ||
276 | |||
277 | m.addPackage( "${PN}-json", "Python JSON support", "${PN}-core ${PN}-math ${PN}-re", | ||
278 | "json lib-dynload/_json.*.so" ) # package | ||
279 | |||
280 | m.addPackage( "${PN}-lang", "Python low-level language support", "${PN}-core", | ||
281 | "lib-dynload/_bisect.*.so lib-dynload/_collections.*.so lib-dynload/_heapq.*.so lib-dynload/_weakref.*.so lib-dynload/_functools.*.so " + | ||
282 | "lib-dynload/array.*.so lib-dynload/itertools.*.so lib-dynload/operator.*.so lib-dynload/parser.*.so " + | ||
283 | "atexit.* bisect.* code.* codeop.* collections.* dis.* functools.* heapq.* inspect.* keyword.* opcode.* symbol.* repr.* token.* " + | ||
284 | "tokenize.* traceback.* weakref.*" ) | ||
285 | |||
286 | m.addPackage( "${PN}-logging", "Python logging support", "${PN}-core ${PN}-io ${PN}-lang ${PN}-pickle ${PN}-stringold", | ||
287 | "logging" ) # package | ||
288 | |||
289 | m.addPackage( "${PN}-mailbox", "Python mailbox format support", "${PN}-core ${PN}-mime", | ||
290 | "mailbox.*" ) | ||
291 | |||
292 | m.addPackage( "${PN}-math", "Python math support", "${PN}-core", | ||
293 | "lib-dynload/cmath.*.so lib-dynload/math.*.so lib-dynload/_random.*.so random.* sets.*" ) | ||
294 | |||
295 | m.addPackage( "${PN}-mime", "Python MIME handling APIs", "${PN}-core ${PN}-io", | ||
296 | "mimetools.* uu.* quopri.* rfc822.* MimeWriter.*" ) | ||
297 | |||
298 | m.addPackage( "${PN}-mmap", "Python memory-mapped file support", "${PN}-core ${PN}-io", | ||
299 | "lib-dynload/mmap.*.so " ) | ||
300 | |||
301 | m.addPackage( "${PN}-multiprocessing", "Python multiprocessing support", "${PN}-core ${PN}-io ${PN}-lang ${PN}-pickle ${PN}-threading ${PN}-ctypes ${PN}-mmap", | ||
302 | "lib-dynload/_multiprocessing.*.so multiprocessing" ) # package | ||
303 | |||
304 | m.addPackage( "${PN}-netclient", "Python Internet Protocol clients", "${PN}-core ${PN}-crypt ${PN}-datetime ${PN}-io ${PN}-lang ${PN}-logging ${PN}-mime", | ||
305 | "*Cookie*.* " + | ||
306 | "base64.* cookielib.* ftplib.* gopherlib.* hmac.* httplib.* mimetypes.* nntplib.* poplib.* smtplib.* telnetlib.* urllib.* urllib2.* urlparse.* uuid.* rfc822.* mimetools.*" ) | ||
307 | |||
308 | m.addPackage( "${PN}-netserver", "Python Internet Protocol servers", "${PN}-core ${PN}-netclient", | ||
309 | "cgi.* *HTTPServer.* SocketServer.*" ) | ||
310 | |||
311 | m.addPackage( "${PN}-numbers", "Python number APIs", "${PN}-core ${PN}-lang ${PN}-re", | ||
312 | "decimal.* numbers.*" ) | ||
313 | |||
314 | m.addPackage( "${PN}-pickle", "Python serialisation/persistence support", "${PN}-core ${PN}-codecs ${PN}-io ${PN}-re", | ||
315 | "pickle.* shelve.* lib-dynload/cPickle.*.so pickletools.*" ) | ||
316 | |||
317 | m.addPackage( "${PN}-pkgutil", "Python package extension utility support", "${PN}-core", | ||
318 | "pkgutil.*") | ||
319 | |||
320 | m.addPackage( "${PN}-pprint", "Python pretty-print support", "${PN}-core ${PN}-io", | ||
321 | "pprint.*" ) | ||
322 | |||
323 | m.addPackage( "${PN}-profile", "Python basic performance profiling support", "${PN}-core ${PN}-textutils", | ||
324 | "profile.* pstats.* cProfile.* lib-dynload/_lsprof.*.so" ) | ||
325 | |||
326 | m.addPackage( "${PN}-re", "Python Regular Expression APIs", "${PN}-core", | ||
327 | "re.* sre.* sre_compile.* sre_constants* sre_parse.*" ) # _sre is builtin | ||
328 | |||
329 | m.addPackage( "${PN}-readline", "Python readline support", "${PN}-core", | ||
330 | "lib-dynload/readline.*.so rlcompleter.*" ) | ||
331 | |||
332 | m.addPackage( "${PN}-reprlib", "Python alternate repr() implementation", "${PN}-core", | ||
333 | "${libdir}/python3.3/reprlib.py" ) | ||
334 | |||
335 | m.addPackage( "${PN}-resource", "Python resource control interface", "${PN}-core", | ||
336 | "lib-dynload/resource.*.so" ) | ||
337 | |||
338 | m.addPackage( "${PN}-shell", "Python shell-like functionality", "${PN}-core ${PN}-re", | ||
339 | "cmd.* commands.* dircache.* fnmatch.* glob.* popen2.* shlex.* shutil.*" ) | ||
340 | |||
341 | m.addPackage( "${PN}-robotparser", "Python robots.txt parser", "${PN}-core ${PN}-netclient", | ||
342 | "urllib/robotparser.*") | ||
343 | |||
344 | m.addPackage( "${PN}-subprocess", "Python subprocess support", "${PN}-core ${PN}-io ${PN}-re ${PN}-fcntl ${PN}-pickle", | ||
345 | "subprocess.*" ) | ||
346 | |||
347 | m.addPackage( "${PN}-sqlite3", "Python Sqlite3 database support", "${PN}-core ${PN}-datetime ${PN}-lang ${PN}-crypt ${PN}-io ${PN}-threading", | ||
348 | "lib-dynload/_sqlite3.*.so sqlite3/dbapi2.* sqlite3/__init__.* sqlite3/dump.*" ) | ||
349 | |||
350 | m.addPackage( "${PN}-sqlite3-tests", "Python Sqlite3 database support tests", "${PN}-core ${PN}-sqlite3", | ||
351 | "sqlite3/test" ) | ||
352 | |||
353 | m.addPackage( "${PN}-stringold", "Python string APIs [deprecated]", "${PN}-core ${PN}-re", | ||
354 | "lib-dynload/strop.*.so string.* stringold.*" ) | ||
355 | |||
356 | m.addPackage( "${PN}-syslog", "Python syslog interface", "${PN}-core", | ||
357 | "lib-dynload/syslog.*.so" ) | ||
358 | |||
359 | m.addPackage( "${PN}-terminal", "Python terminal controlling support", "${PN}-core ${PN}-io", | ||
360 | "pty.* tty.*" ) | ||
361 | |||
362 | m.addPackage( "${PN}-tests", "Python tests", "${PN}-core", | ||
363 | "test" ) # package | ||
364 | |||
365 | m.addPackage( "${PN}-threading", "Python threading & synchronization support", "${PN}-core ${PN}-lang", | ||
366 | "_threading_local.* dummy_thread.* dummy_threading.* mutex.* threading.* Queue.*" ) | ||
367 | |||
368 | m.addPackage( "${PN}-tkinter", "Python Tcl/Tk bindings", "${PN}-core", | ||
369 | "lib-dynload/_tkinter.*.so lib-tk tkinter" ) # package | ||
370 | |||
371 | m.addPackage( "${PN}-unittest", "Python unit testing framework", "${PN}-core ${PN}-stringold ${PN}-lang ${PN}-io ${PN}-difflib ${PN}-pprint ${PN}-shell", | ||
372 | "unittest/" ) | ||
373 | |||
374 | m.addPackage( "${PN}-unixadmin", "Python Unix administration support", "${PN}-core", | ||
375 | "lib-dynload/nis.*.so lib-dynload/grp.*.so lib-dynload/pwd.*.so getpass.*" ) | ||
376 | |||
377 | m.addPackage( "${PN}-xml", "Python basic XML support", "${PN}-core ${PN}-elementtree ${PN}-re", | ||
378 | "lib-dynload/pyexpat.*.so xml xmllib.*" ) # package | ||
379 | |||
380 | m.addPackage( "${PN}-xmlrpc", "Python XML-RPC support", "${PN}-core ${PN}-xml ${PN}-netserver ${PN}-lang", | ||
381 | "xmlrpclib.* SimpleXMLRPCServer.* DocXMLRPCServer.* xmlrpc" ) | ||
382 | |||
383 | m.addPackage( "${PN}-mailbox", "Python mailbox format support", "${PN}-core ${PN}-mime", | ||
384 | "mailbox.*" ) | ||
385 | |||
386 | m.make() | ||
diff --git a/scripts/contrib/serdevtry b/scripts/contrib/serdevtry new file mode 100755 index 0000000000..74bd7b7161 --- /dev/null +++ b/scripts/contrib/serdevtry | |||
@@ -0,0 +1,60 @@ | |||
1 | #!/bin/sh | ||
2 | |||
3 | # Copyright (C) 2014 Intel Corporation | ||
4 | # | ||
5 | # Released under the MIT license (see COPYING.MIT) | ||
6 | |||
7 | if [ "$1" = "" -o "$1" = "--help" ] ; then | ||
8 | echo "Usage: $0 <serial terminal command>" | ||
9 | echo | ||
10 | echo "Simple script to handle maintaining a terminal for serial devices that" | ||
11 | echo "disappear when a device is powered down or reset, such as the USB" | ||
12 | echo "serial console on the original BeagleBone (white version)." | ||
13 | echo | ||
14 | echo "e.g. $0 picocom -b 115200 /dev/ttyUSB0" | ||
15 | echo | ||
16 | exit | ||
17 | fi | ||
18 | |||
19 | args="$@" | ||
20 | DEVICE="" | ||
21 | while [ "$1" != "" ]; do | ||
22 | case "$1" in | ||
23 | /dev/*) | ||
24 | DEVICE=$1 | ||
25 | break;; | ||
26 | esac | ||
27 | shift | ||
28 | done | ||
29 | |||
30 | if [ "$DEVICE" != "" ] ; then | ||
31 | while true; do | ||
32 | if [ ! -e $DEVICE ] ; then | ||
33 | echo "serdevtry: waiting for $DEVICE to exist..." | ||
34 | while [ ! -e $DEVICE ]; do | ||
35 | sleep 0.1 | ||
36 | done | ||
37 | fi | ||
38 | if [ ! -w $DEVICE ] ; then | ||
39 | # Sometimes (presumably because of a race with udev) we get to | ||
40 | # the device before its permissions have been set up | ||
41 | RETRYNUM=0 | ||
42 | while [ ! -w $DEVICE ]; do | ||
43 | if [ "$RETRYNUM" = "2" ] ; then | ||
44 | echo "Device $DEVICE exists but is not writable!" | ||
45 | exit 1 | ||
46 | fi | ||
47 | RETRYNUM=$((RETRYNUM+1)) | ||
48 | sleep 0.1 | ||
49 | done | ||
50 | fi | ||
51 | $args | ||
52 | if [ -e $DEVICE ] ; then | ||
53 | break | ||
54 | fi | ||
55 | done | ||
56 | else | ||
57 | echo "Unable to determine device node from command: $args" | ||
58 | exit 1 | ||
59 | fi | ||
60 | |||
diff --git a/scripts/contrib/test_build_time.sh b/scripts/contrib/test_build_time.sh new file mode 100755 index 0000000000..9e5725ae54 --- /dev/null +++ b/scripts/contrib/test_build_time.sh | |||
@@ -0,0 +1,237 @@ | |||
1 | #!/bin/bash | ||
2 | |||
3 | # Build performance regression test script | ||
4 | # | ||
5 | # Copyright 2011 Intel Corporation | ||
6 | # All rights reserved. | ||
7 | # | ||
8 | # This program is free software; you can redistribute it and/or modify | ||
9 | # it under the terms of the GNU General Public License as published by | ||
10 | # the Free Software Foundation; either version 2 of the License, or | ||
11 | # (at your option) any later version. | ||
12 | # | ||
13 | # This program is distributed in the hope that it will be useful, | ||
14 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
15 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
16 | # GNU General Public License for more details. | ||
17 | # | ||
18 | # You should have received a copy of the GNU General Public License | ||
19 | # along with this program; if not, write to the Free Software | ||
20 | # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
21 | # | ||
22 | # | ||
23 | # DESCRIPTION | ||
24 | # This script is intended to be used in conjunction with "git bisect run" | ||
25 | # in order to find regressions in build time, however it can also be used | ||
26 | # independently. It cleans out the build output directories, runs a | ||
27 | # specified worker script (an example is test_build_time_worker.sh) under | ||
28 | # TIME(1), logs the results to TEST_LOGDIR (default /tmp) and returns a | ||
29 | # value telling "git bisect run" whether the build time is good (under | ||
30 | # the specified threshold) or bad (over it). There is also a tolerance | ||
31 | # option but it is not particularly useful as it only subtracts the | ||
32 | # tolerance from the given threshold and uses it as the actual threshold. | ||
33 | # | ||
34 | # It is also capable of taking a file listing git revision hashes to be | ||
35 | # test-applied to the repository in order to get past build failures that | ||
36 | # would otherwise cause certain revisions to have to be skipped; if a | ||
37 | # revision does not apply cleanly then the script assumes it does not | ||
38 | # need to be applied and ignores it. | ||
39 | # | ||
40 | # Please see the help output (syntax below) for some important setup | ||
41 | # instructions. | ||
42 | # | ||
43 | # AUTHORS | ||
44 | # Paul Eggleton <paul.eggleton@linux.intel.com> | ||
45 | |||
46 | |||
47 | syntax() { | ||
48 | echo "syntax: $0 <script> <time> <tolerance> [patchrevlist]" | ||
49 | echo "" | ||
50 | echo " script - worker script file (if in current dir, prefix with ./)" | ||
51 | echo " time - time threshold (in seconds, suffix m for minutes)" | ||
52 | echo " tolerance - tolerance (in seconds, suffix m for minutes or % for" | ||
53 | echo " percentage, can be 0)" | ||
54 | echo " patchrevlist - optional file listing revisions to apply as patches on top" | ||
55 | echo "" | ||
56 | echo "You must set TEST_BUILDDIR to point to a previously created build directory," | ||
57 | echo "however please note that this script will wipe out the TMPDIR defined in" | ||
58 | echo "TEST_BUILDDIR/conf/local.conf as part of its initial setup (as well as your" | ||
59 | echo "~/.ccache)" | ||
60 | echo "" | ||
61 | echo "To get rid of the sudo prompt, please add the following line to /etc/sudoers" | ||
62 | echo "(use 'visudo' to edit this; also it is assumed that the user you are running" | ||
63 | echo "as is a member of the 'wheel' group):" | ||
64 | echo "" | ||
65 | echo "%wheel ALL=(ALL) NOPASSWD: /sbin/sysctl -w vm.drop_caches=[1-3]" | ||
66 | echo "" | ||
67 | echo "Note: it is recommended that you disable crond and any other process that" | ||
68 | echo "may cause significant CPU or I/O usage during build performance tests." | ||
69 | } | ||
70 | |||
71 | # Note - we exit with 250 here because that will tell git bisect run that | ||
72 | # something bad happened and stop | ||
73 | if [ "$1" = "" ] ; then | ||
74 | syntax | ||
75 | exit 250 | ||
76 | fi | ||
77 | |||
78 | if [ "$2" = "" ] ; then | ||
79 | syntax | ||
80 | exit 250 | ||
81 | fi | ||
82 | |||
83 | if [ "$3" = "" ] ; then | ||
84 | syntax | ||
85 | exit 250 | ||
86 | fi | ||
87 | |||
88 | if ! [[ "$2" =~ ^[0-9][0-9m.]*$ ]] ; then | ||
89 | echo "'$2' is not a valid number for threshold" | ||
90 | exit 250 | ||
91 | fi | ||
92 | |||
93 | if ! [[ "$3" =~ ^[0-9][0-9m.%]*$ ]] ; then | ||
94 | echo "'$3' is not a valid number for tolerance" | ||
95 | exit 250 | ||
96 | fi | ||
97 | |||
98 | if [ "$TEST_BUILDDIR" = "" ] ; then | ||
99 | echo "Please set TEST_BUILDDIR to a previously created build directory" | ||
100 | exit 250 | ||
101 | fi | ||
102 | |||
103 | if [ ! -d "$TEST_BUILDDIR" ] ; then | ||
104 | echo "TEST_BUILDDIR $TEST_BUILDDIR not found" | ||
105 | exit 250 | ||
106 | fi | ||
107 | |||
108 | git diff --quiet | ||
109 | if [ $? != 0 ] ; then | ||
110 | echo "Working tree is dirty, cannot proceed" | ||
111 | exit 251 | ||
112 | fi | ||
113 | |||
114 | if [ "$BB_ENV_EXTRAWHITE" != "" ] ; then | ||
115 | echo "WARNING: you are running after sourcing the build environment script, this is not recommended" | ||
116 | fi | ||
117 | |||
118 | runscript=$1 | ||
119 | timethreshold=$2 | ||
120 | tolerance=$3 | ||
121 | |||
122 | if [ "$4" != "" ] ; then | ||
123 | patchrevlist=`cat $4` | ||
124 | else | ||
125 | patchrevlist="" | ||
126 | fi | ||
127 | |||
128 | if [[ timethreshold == *m* ]] ; then | ||
129 | timethreshold=`echo $timethreshold | sed s/m/*60/ | bc` | ||
130 | fi | ||
131 | |||
132 | if [[ $tolerance == *m* ]] ; then | ||
133 | tolerance=`echo $tolerance | sed s/m/*60/ | bc` | ||
134 | elif [[ $tolerance == *%* ]] ; then | ||
135 | tolerance=`echo $tolerance | sed s/%//` | ||
136 | tolerance=`echo "scale = 2; (($tolerance * $timethreshold) / 100)" | bc` | ||
137 | fi | ||
138 | |||
139 | tmpdir=`grep "^TMPDIR" $TEST_BUILDDIR/conf/local.conf | sed -e 's/TMPDIR[ \t]*=[ \t\?]*"//' -e 's/"//'` | ||
140 | if [ "x$tmpdir" = "x" ]; then | ||
141 | echo "Unable to determine TMPDIR from $TEST_BUILDDIR/conf/local.conf, bailing out" | ||
142 | exit 250 | ||
143 | fi | ||
144 | sstatedir=`grep "^SSTATE_DIR" $TEST_BUILDDIR/conf/local.conf | sed -e 's/SSTATE_DIR[ \t\?]*=[ \t]*"//' -e 's/"//'` | ||
145 | if [ "x$sstatedir" = "x" ]; then | ||
146 | echo "Unable to determine SSTATE_DIR from $TEST_BUILDDIR/conf/local.conf, bailing out" | ||
147 | exit 250 | ||
148 | fi | ||
149 | |||
150 | if [ `expr length $tmpdir` -lt 4 ] ; then | ||
151 | echo "TMPDIR $tmpdir is less than 4 characters, bailing out" | ||
152 | exit 250 | ||
153 | fi | ||
154 | |||
155 | if [ `expr length $sstatedir` -lt 4 ] ; then | ||
156 | echo "SSTATE_DIR $sstatedir is less than 4 characters, bailing out" | ||
157 | exit 250 | ||
158 | fi | ||
159 | |||
160 | echo -n "About to wipe out TMPDIR $tmpdir, press Ctrl+C to break out... " | ||
161 | for i in 9 8 7 6 5 4 3 2 1 | ||
162 | do | ||
163 | echo -ne "\x08$i" | ||
164 | sleep 1 | ||
165 | done | ||
166 | echo | ||
167 | |||
168 | pushd . > /dev/null | ||
169 | |||
170 | rm -f pseudodone | ||
171 | echo "Removing TMPDIR $tmpdir..." | ||
172 | rm -rf $tmpdir | ||
173 | echo "Removing TMPDIR $tmpdir-*libc..." | ||
174 | rm -rf $tmpdir-*libc | ||
175 | echo "Removing SSTATE_DIR $sstatedir..." | ||
176 | rm -rf $sstatedir | ||
177 | echo "Removing ~/.ccache..." | ||
178 | rm -rf ~/.ccache | ||
179 | |||
180 | echo "Syncing..." | ||
181 | sync | ||
182 | sync | ||
183 | echo "Dropping VM cache..." | ||
184 | #echo 3 > /proc/sys/vm/drop_caches | ||
185 | sudo /sbin/sysctl -w vm.drop_caches=3 > /dev/null | ||
186 | |||
187 | if [ "$TEST_LOGDIR" = "" ] ; then | ||
188 | logdir="/tmp" | ||
189 | else | ||
190 | logdir="$TEST_LOGDIR" | ||
191 | fi | ||
192 | rev=`git rev-parse HEAD` | ||
193 | logfile="$logdir/timelog_$rev.log" | ||
194 | echo -n > $logfile | ||
195 | |||
196 | gitroot=`git rev-parse --show-toplevel` | ||
197 | cd $gitroot | ||
198 | for patchrev in $patchrevlist ; do | ||
199 | echo "Applying $patchrev" | ||
200 | patchfile=`mktemp` | ||
201 | git show $patchrev > $patchfile | ||
202 | git apply --check $patchfile &> /dev/null | ||
203 | if [ $? != 0 ] ; then | ||
204 | echo " ... patch does not apply without errors, ignoring" | ||
205 | else | ||
206 | echo "Applied $patchrev" >> $logfile | ||
207 | git apply $patchfile &> /dev/null | ||
208 | fi | ||
209 | rm $patchfile | ||
210 | done | ||
211 | |||
212 | sync | ||
213 | echo "Quiescing for 5s..." | ||
214 | sleep 5 | ||
215 | |||
216 | echo "Running $runscript at $rev..." | ||
217 | timeoutfile=`mktemp` | ||
218 | /usr/bin/time -o $timeoutfile -f "%e\nreal\t%E\nuser\t%Us\nsys\t%Ss\nmaxm\t%Mk" $runscript 2>&1 | tee -a $logfile | ||
219 | exitstatus=$PIPESTATUS | ||
220 | |||
221 | git reset --hard HEAD > /dev/null | ||
222 | popd > /dev/null | ||
223 | |||
224 | timeresult=`head -n1 $timeoutfile` | ||
225 | cat $timeoutfile | tee -a $logfile | ||
226 | rm $timeoutfile | ||
227 | |||
228 | if [ $exitstatus != 0 ] ; then | ||
229 | # Build failed, exit with 125 to tell git bisect run to skip this rev | ||
230 | echo "*** Build failed (exit code $exitstatus), skipping..." | tee -a $logfile | ||
231 | exit 125 | ||
232 | fi | ||
233 | |||
234 | ret=`echo "scale = 2; $timeresult > $timethreshold - $tolerance" | bc` | ||
235 | echo "Returning $ret" | tee -a $logfile | ||
236 | exit $ret | ||
237 | |||
diff --git a/scripts/contrib/test_build_time_worker.sh b/scripts/contrib/test_build_time_worker.sh new file mode 100755 index 0000000000..8e20a9ea7d --- /dev/null +++ b/scripts/contrib/test_build_time_worker.sh | |||
@@ -0,0 +1,37 @@ | |||
1 | #!/bin/bash | ||
2 | |||
3 | # This is an example script to be used in conjunction with test_build_time.sh | ||
4 | |||
5 | if [ "$TEST_BUILDDIR" = "" ] ; then | ||
6 | echo "TEST_BUILDDIR is not set" | ||
7 | exit 1 | ||
8 | fi | ||
9 | |||
10 | buildsubdir=`basename $TEST_BUILDDIR` | ||
11 | if [ ! -d $buildsubdir ] ; then | ||
12 | echo "Unable to find build subdir $buildsubdir in current directory" | ||
13 | exit 1 | ||
14 | fi | ||
15 | |||
16 | if [ -f oe-init-build-env ] ; then | ||
17 | . ./oe-init-build-env $buildsubdir | ||
18 | elif [ -f poky-init-build-env ] ; then | ||
19 | . ./poky-init-build-env $buildsubdir | ||
20 | else | ||
21 | echo "Unable to find build environment setup script" | ||
22 | exit 1 | ||
23 | fi | ||
24 | |||
25 | if [ -f ../meta/recipes-sato/images/core-image-sato.bb ] ; then | ||
26 | target="core-image-sato" | ||
27 | else | ||
28 | target="poky-image-sato" | ||
29 | fi | ||
30 | |||
31 | echo "Build started at `date "+%Y-%m-%d %H:%M:%S"`" | ||
32 | echo "bitbake $target" | ||
33 | bitbake $target | ||
34 | ret=$? | ||
35 | echo "Build finished at `date "+%Y-%m-%d %H:%M:%S"`" | ||
36 | exit $ret | ||
37 | |||
diff --git a/scripts/contrib/verify-homepage.py b/scripts/contrib/verify-homepage.py new file mode 100755 index 0000000000..86cc82bca3 --- /dev/null +++ b/scripts/contrib/verify-homepage.py | |||
@@ -0,0 +1,63 @@ | |||
1 | #!/usr/bin/env python | ||
2 | |||
3 | # This script is used for verify HOMEPAGE. | ||
4 | # The result is influenced by network environment, since the timeout of connect url is 5 seconds as default. | ||
5 | |||
6 | import sys | ||
7 | import os | ||
8 | import subprocess | ||
9 | import urllib2 | ||
10 | |||
11 | def search_bitbakepath(): | ||
12 | bitbakepath = "" | ||
13 | |||
14 | # Search path to bitbake lib dir in order to load bb modules | ||
15 | if os.path.exists(os.path.join(os.path.dirname(sys.argv[0]), '../../bitbake/lib/bb')): | ||
16 | bitbakepath = os.path.join(os.path.dirname(sys.argv[0]), '../../bitbake/lib') | ||
17 | bitbakepath = os.path.abspath(bitbakepath) | ||
18 | else: | ||
19 | # Look for bitbake/bin dir in PATH | ||
20 | for pth in os.environ['PATH'].split(':'): | ||
21 | if os.path.exists(os.path.join(pth, '../lib/bb')): | ||
22 | bitbakepath = os.path.abspath(os.path.join(pth, '../lib')) | ||
23 | break | ||
24 | if not bitbakepath: | ||
25 | sys.stderr.write("Unable to find bitbake by searching parent directory of this script or PATH\n") | ||
26 | sys.exit(1) | ||
27 | return bitbakepath | ||
28 | |||
29 | # For importing the following modules | ||
30 | sys.path.insert(0, search_bitbakepath()) | ||
31 | import bb.tinfoil | ||
32 | |||
33 | def wgetHomepage(pn, homepage): | ||
34 | result = subprocess.call('wget ' + '-q -T 5 -t 1 --spider ' + homepage, shell = True) | ||
35 | if result: | ||
36 | bb.warn("Failed to verify HOMEPAGE (%s) of %s" % (homepage, pn)) | ||
37 | return 1 | ||
38 | else: | ||
39 | return 0 | ||
40 | |||
41 | def verifyHomepage(bbhandler): | ||
42 | pkg_pn = bbhandler.cooker.recipecache.pkg_pn | ||
43 | pnlist = sorted(pkg_pn) | ||
44 | count = 0 | ||
45 | for pn in pnlist: | ||
46 | fn = pkg_pn[pn].pop() | ||
47 | data = bb.cache.Cache.loadDataFull(fn, bbhandler.cooker.collection.get_file_appends(fn), bbhandler.config_data) | ||
48 | homepage = data.getVar("HOMEPAGE") | ||
49 | if homepage: | ||
50 | try: | ||
51 | urllib2.urlopen(homepage, timeout=5) | ||
52 | except Exception: | ||
53 | count = count + wgetHomepage(pn, homepage) | ||
54 | return count | ||
55 | |||
56 | if __name__=='__main__': | ||
57 | failcount = 0 | ||
58 | bbhandler = bb.tinfoil.Tinfoil() | ||
59 | bbhandler.prepare() | ||
60 | print "Start to verify HOMEPAGE:" | ||
61 | failcount = verifyHomepage(bbhandler) | ||
62 | print "finish to verify HOMEPAGE." | ||
63 | print "Summary: %s failed" % failcount | ||