diff options
author | Richard Purdie <richard@openedhand.com> | 2006-11-16 15:02:15 +0000 |
---|---|---|
committer | Richard Purdie <richard@openedhand.com> | 2006-11-16 15:02:15 +0000 |
commit | 306b7c7a9757ead077363074e7bbac2e5c03e7c5 (patch) | |
tree | 6935017a9af749c46816881c86258f514384ba1c /bitbake/bin | |
parent | 65930a38e415ae4a0182e1cea1be838e0ada50ee (diff) | |
download | poky-306b7c7a9757ead077363074e7bbac2e5c03e7c5.tar.gz |
bitbake: Upgrade from 1.4 -> 1.7.4ish
git-svn-id: https://svn.o-hand.com/repos/poky/trunk@863 311d38ba-8fff-0310-9ca6-ca027cbcb966
Diffstat (limited to 'bitbake/bin')
-rwxr-xr-x | bitbake/bin/bbimage | 5 | ||||
-rwxr-xr-x | bitbake/bin/bitbake | 892 | ||||
-rwxr-xr-x | bitbake/bin/bitdoc | 2 |
3 files changed, 311 insertions, 588 deletions
diff --git a/bitbake/bin/bbimage b/bitbake/bin/bbimage index df6caa28ed..9adedbfc63 100755 --- a/bitbake/bin/bbimage +++ b/bitbake/bin/bbimage | |||
@@ -18,15 +18,16 @@ | |||
18 | # Place, Suite 330, Boston, MA 02111-1307 USA. | 18 | # Place, Suite 330, Boston, MA 02111-1307 USA. |
19 | 19 | ||
20 | import sys, os | 20 | import sys, os |
21 | sys.path.append(os.path.join(os.path.dirname(os.path.dirname(sys.argv[0])), 'lib')) | 21 | sys.path.insert(0,os.path.join(os.path.dirname(os.path.dirname(sys.argv[0])), 'lib')) |
22 | import bb | 22 | import bb |
23 | from bb import * | 23 | from bb import * |
24 | 24 | ||
25 | __version__ = 1.0 | 25 | __version__ = 1.1 |
26 | type = "jffs2" | 26 | type = "jffs2" |
27 | cfg_bb = data.init() | 27 | cfg_bb = data.init() |
28 | cfg_oespawn = data.init() | 28 | cfg_oespawn = data.init() |
29 | 29 | ||
30 | bb.msg.set_debug_level(0) | ||
30 | 31 | ||
31 | def usage(): | 32 | def usage(): |
32 | print "Usage: bbimage [options ...]" | 33 | print "Usage: bbimage [options ...]" |
diff --git a/bitbake/bin/bitbake b/bitbake/bin/bitbake index 7fbe7ed5eb..85a0cbc398 100755 --- a/bitbake/bin/bitbake +++ b/bitbake/bin/bitbake | |||
@@ -7,6 +7,7 @@ | |||
7 | # Copyright (C) 2003 - 2005 Michael 'Mickey' Lauer | 7 | # Copyright (C) 2003 - 2005 Michael 'Mickey' Lauer |
8 | # Copyright (C) 2005 Holger Hans Peter Freyther | 8 | # Copyright (C) 2005 Holger Hans Peter Freyther |
9 | # Copyright (C) 2005 ROAD GmbH | 9 | # Copyright (C) 2005 ROAD GmbH |
10 | # Copyright (C) 2006 Richard Purdie | ||
10 | # | 11 | # |
11 | # This program is free software; you can redistribute it and/or modify it under | 12 | # This program is free software; you can redistribute it and/or modify it under |
12 | # the terms of the GNU General Public License as published by the Free Software | 13 | # the terms of the GNU General Public License as published by the Free Software |
@@ -24,136 +25,13 @@ | |||
24 | import sys, os, getopt, glob, copy, os.path, re, time | 25 | import sys, os, getopt, glob, copy, os.path, re, time |
25 | sys.path.insert(0,os.path.join(os.path.dirname(os.path.dirname(sys.argv[0])), 'lib')) | 26 | sys.path.insert(0,os.path.join(os.path.dirname(os.path.dirname(sys.argv[0])), 'lib')) |
26 | import bb | 27 | import bb |
27 | from bb import utils, data, parse, debug, event, fatal, cache | 28 | from bb import utils, data, parse, event, cache, providers, taskdata, runqueue |
28 | from sets import Set | 29 | from sets import Set |
29 | import itertools, optparse | 30 | import itertools, optparse |
30 | 31 | ||
31 | parsespin = itertools.cycle( r'|/-\\' ) | 32 | parsespin = itertools.cycle( r'|/-\\' ) |
32 | bbdebug = 0 | ||
33 | |||
34 | __version__ = "1.4.3" | ||
35 | |||
36 | #============================================================================# | ||
37 | # BBParsingStatus | ||
38 | #============================================================================# | ||
39 | class BBParsingStatus: | ||
40 | """ | ||
41 | The initial idea for this status class is to use the data when it is | ||
42 | already loaded instead of loading it from various place over and over | ||
43 | again. | ||
44 | """ | ||
45 | |||
46 | def __init__(self): | ||
47 | self.providers = {} | ||
48 | self.rproviders = {} | ||
49 | self.packages = {} | ||
50 | self.packages_dynamic = {} | ||
51 | self.bbfile_priority = {} | ||
52 | self.bbfile_config_priorities = [] | ||
53 | self.ignored_dependencies = None | ||
54 | self.possible_world = [] | ||
55 | self.world_target = Set() | ||
56 | self.pkg_pn = {} | ||
57 | self.pkg_fn = {} | ||
58 | self.pkg_pvpr = {} | ||
59 | self.pkg_dp = {} | ||
60 | self.pn_provides = {} | ||
61 | self.all_depends = Set() | ||
62 | self.build_all = {} | ||
63 | self.rundeps = {} | ||
64 | self.runrecs = {} | ||
65 | self.stamp = {} | ||
66 | |||
67 | def handle_bb_data(self, file_name, bb_cache, cached): | ||
68 | """ | ||
69 | We will fill the dictionaries with the stuff we | ||
70 | need for building the tree more fast | ||
71 | """ | ||
72 | |||
73 | pn = bb_cache.getVar('PN', file_name, True) | ||
74 | pv = bb_cache.getVar('PV', file_name, True) | ||
75 | pr = bb_cache.getVar('PR', file_name, True) | ||
76 | dp = int(bb_cache.getVar('DEFAULT_PREFERENCE', file_name, True) or "0") | ||
77 | provides = Set([pn] + (bb_cache.getVar("PROVIDES", file_name, True) or "").split()) | ||
78 | depends = (bb_cache.getVar("DEPENDS", file_name, True) or "").split() | ||
79 | packages = (bb_cache.getVar('PACKAGES', file_name, True) or "").split() | ||
80 | packages_dynamic = (bb_cache.getVar('PACKAGES_DYNAMIC', file_name, True) or "").split() | ||
81 | rprovides = (bb_cache.getVar("RPROVIDES", file_name, True) or "").split() | ||
82 | |||
83 | # build PackageName to FileName lookup table | ||
84 | if pn not in self.pkg_pn: | ||
85 | self.pkg_pn[pn] = [] | ||
86 | self.pkg_pn[pn].append(file_name) | ||
87 | |||
88 | self.build_all[file_name] = int(bb_cache.getVar('BUILD_ALL_DEPS', file_name, True) or "0") | ||
89 | self.stamp[file_name] = bb_cache.getVar('STAMP', file_name, True) | ||
90 | |||
91 | # build FileName to PackageName lookup table | ||
92 | self.pkg_fn[file_name] = pn | ||
93 | self.pkg_pvpr[file_name] = (pv,pr) | ||
94 | self.pkg_dp[file_name] = dp | ||
95 | |||
96 | # Build forward and reverse provider hashes | ||
97 | # Forward: virtual -> [filenames] | ||
98 | # Reverse: PN -> [virtuals] | ||
99 | if pn not in self.pn_provides: | ||
100 | self.pn_provides[pn] = Set() | ||
101 | self.pn_provides[pn] |= provides | ||
102 | |||
103 | for provide in provides: | ||
104 | if provide not in self.providers: | ||
105 | self.providers[provide] = [] | ||
106 | self.providers[provide].append(file_name) | ||
107 | |||
108 | for dep in depends: | ||
109 | self.all_depends.add(dep) | ||
110 | |||
111 | # Build reverse hash for PACKAGES, so runtime dependencies | ||
112 | # can be be resolved (RDEPENDS, RRECOMMENDS etc.) | ||
113 | for package in packages: | ||
114 | if not package in self.packages: | ||
115 | self.packages[package] = [] | ||
116 | self.packages[package].append(file_name) | ||
117 | rprovides += (bb_cache.getVar("RPROVIDES_%s" % package, file_name, 1) or "").split() | ||
118 | |||
119 | for package in packages_dynamic: | ||
120 | if not package in self.packages_dynamic: | ||
121 | self.packages_dynamic[package] = [] | ||
122 | self.packages_dynamic[package].append(file_name) | ||
123 | |||
124 | for rprovide in rprovides: | ||
125 | if not rprovide in self.rproviders: | ||
126 | self.rproviders[rprovide] = [] | ||
127 | self.rproviders[rprovide].append(file_name) | ||
128 | |||
129 | # Build hash of runtime depeneds and rececommends | ||
130 | |||
131 | def add_dep(deplist, deps): | ||
132 | for dep in deps: | ||
133 | if not dep in deplist: | ||
134 | deplist[dep] = "" | ||
135 | |||
136 | if not file_name in self.rundeps: | ||
137 | self.rundeps[file_name] = {} | ||
138 | if not file_name in self.runrecs: | ||
139 | self.runrecs[file_name] = {} | ||
140 | |||
141 | for package in packages + [pn]: | ||
142 | if not package in self.rundeps[file_name]: | ||
143 | self.rundeps[file_name][package] = {} | ||
144 | if not package in self.runrecs[file_name]: | ||
145 | self.runrecs[file_name][package] = {} | ||
146 | |||
147 | add_dep(self.rundeps[file_name][package], bb.utils.explode_deps(bb_cache.getVar('RDEPENDS', file_name, True) or "")) | ||
148 | add_dep(self.runrecs[file_name][package], bb.utils.explode_deps(bb_cache.getVar('RRECOMMENDS', file_name, True) or "")) | ||
149 | add_dep(self.rundeps[file_name][package], bb.utils.explode_deps(bb_cache.getVar("RDEPENDS_%s" % package, file_name, True) or "")) | ||
150 | add_dep(self.runrecs[file_name][package], bb.utils.explode_deps(bb_cache.getVar("RRECOMMENDS_%s" % package, file_name, True) or "")) | ||
151 | |||
152 | # Collect files we may need for possible world-dep | ||
153 | # calculations | ||
154 | if not bb_cache.getVar('BROKEN', file_name, True) and not bb_cache.getVar('EXCLUDE_FROM_WORLD', file_name, True): | ||
155 | self.possible_world.append(file_name) | ||
156 | 33 | ||
34 | __version__ = "1.7.4" | ||
157 | 35 | ||
158 | #============================================================================# | 36 | #============================================================================# |
159 | # BBStatistics | 37 | # BBStatistics |
@@ -198,207 +76,63 @@ class BBCooker: | |||
198 | Manages one bitbake build run | 76 | Manages one bitbake build run |
199 | """ | 77 | """ |
200 | 78 | ||
201 | ParsingStatus = BBParsingStatus # make it visible from the shell | ||
202 | Statistics = BBStatistics # make it visible from the shell | 79 | Statistics = BBStatistics # make it visible from the shell |
203 | 80 | ||
204 | def __init__( self ): | 81 | def __init__( self ): |
205 | self.build_cache_fail = [] | 82 | self.build_cache_fail = [] |
206 | self.build_cache = [] | 83 | self.build_cache = [] |
207 | self.rbuild_cache = [] | ||
208 | self.building_list = [] | ||
209 | self.build_path = [] | ||
210 | self.consider_msgs_cache = [] | ||
211 | self.preferred = {} | ||
212 | self.stats = BBStatistics() | 84 | self.stats = BBStatistics() |
213 | self.status = None | 85 | self.status = None |
214 | 86 | ||
215 | self.cache = None | 87 | self.cache = None |
216 | self.bb_cache = None | 88 | self.bb_cache = None |
217 | 89 | ||
218 | def tryBuildPackage( self, fn, item, the_data ): | 90 | def tryBuildPackage(self, fn, item, task, the_data, build_depends): |
219 | """Build one package""" | 91 | """ |
92 | Build one task of a package, optionally build following task depends | ||
93 | """ | ||
220 | bb.event.fire(bb.event.PkgStarted(item, the_data)) | 94 | bb.event.fire(bb.event.PkgStarted(item, the_data)) |
221 | try: | 95 | try: |
222 | self.stats.attempt += 1 | 96 | self.stats.attempt += 1 |
223 | if self.configuration.force: | 97 | if self.configuration.force: |
224 | bb.data.setVarFlag('do_%s' % self.configuration.cmd, 'force', 1, the_data) | 98 | bb.data.setVarFlag('do_%s' % task, 'force', 1, the_data) |
99 | if not build_depends: | ||
100 | bb.data.setVarFlag('do_%s' % task, 'dontrundeps', 1, the_data) | ||
225 | if not self.configuration.dry_run: | 101 | if not self.configuration.dry_run: |
226 | bb.build.exec_task('do_%s' % self.configuration.cmd, the_data) | 102 | bb.build.exec_task('do_%s' % task, the_data) |
227 | bb.event.fire(bb.event.PkgSucceeded(item, the_data)) | 103 | bb.event.fire(bb.event.PkgSucceeded(item, the_data)) |
228 | self.build_cache.append(fn) | 104 | self.build_cache.append(fn) |
229 | return True | 105 | return True |
230 | except bb.build.FuncFailed: | 106 | except bb.build.FuncFailed: |
231 | self.stats.fail += 1 | 107 | self.stats.fail += 1 |
232 | bb.error("task stack execution failed") | 108 | bb.msg.error(bb.msg.domain.Build, "task stack execution failed") |
233 | bb.event.fire(bb.event.PkgFailed(item, the_data)) | 109 | bb.event.fire(bb.event.PkgFailed(item, the_data)) |
234 | self.build_cache_fail.append(fn) | 110 | self.build_cache_fail.append(fn) |
235 | raise | 111 | raise |
236 | except bb.build.EventException, e: | 112 | except bb.build.EventException, e: |
237 | self.stats.fail += 1 | 113 | self.stats.fail += 1 |
238 | event = e.args[1] | 114 | event = e.args[1] |
239 | bb.error("%s event exception, aborting" % bb.event.getName(event)) | 115 | bb.msg.error(bb.msg.domain.Build, "%s event exception, aborting" % bb.event.getName(event)) |
240 | bb.event.fire(bb.event.PkgFailed(item, the_data)) | 116 | bb.event.fire(bb.event.PkgFailed(item, the_data)) |
241 | self.build_cache_fail.append(fn) | 117 | self.build_cache_fail.append(fn) |
242 | raise | 118 | raise |
243 | 119 | ||
244 | def tryBuild( self, fn, virtual , buildAllDeps , build_depends = []): | 120 | def tryBuild( self, fn, build_depends): |
245 | """ | 121 | """ |
246 | Build a provider and its dependencies. | 122 | Build a provider and its dependencies. |
247 | build_depends is a list of previous build dependencies (not runtime) | 123 | build_depends is a list of previous build dependencies (not runtime) |
248 | If build_depends is empty, we're dealing with a runtime depends | 124 | If build_depends is empty, we're dealing with a runtime depends |
249 | """ | 125 | """ |
250 | 126 | ||
251 | the_data = self.bb_cache.loadDataFull(fn, self) | 127 | the_data = self.bb_cache.loadDataFull(fn, self.configuration.data) |
252 | |||
253 | # Only follow all (runtime) dependencies if doing a build | ||
254 | if not buildAllDeps and self.configuration.cmd is "build": | ||
255 | buildAllDeps = self.status.build_all[fn] | ||
256 | |||
257 | # Error on build time dependency loops | ||
258 | if build_depends and build_depends.count(fn) > 1: | ||
259 | bb.error("%s depends on itself (eventually)" % fn) | ||
260 | bb.error("upwards chain is: %s" % (" -> ".join(self.build_path))) | ||
261 | return False | ||
262 | |||
263 | # See if this is a runtime dependency we've already built | ||
264 | # Or a build dependency being handled in a different build chain | ||
265 | if fn in self.building_list: | ||
266 | return self.addRunDeps(fn, virtual , buildAllDeps) | ||
267 | 128 | ||
268 | item = self.status.pkg_fn[fn] | 129 | item = self.status.pkg_fn[fn] |
269 | 130 | ||
270 | self.building_list.append(fn) | 131 | if bb.build.stamp_is_current('do_%s' % self.configuration.cmd, the_data) and not self.configuration.force: |
271 | 132 | self.build_cache.append(fn) | |
272 | pathstr = "%s (%s)" % (item, virtual) | 133 | return True |
273 | self.build_path.append(pathstr) | ||
274 | |||
275 | depends_list = (bb.data.getVar('DEPENDS', the_data, True) or "").split() | ||
276 | |||
277 | if self.configuration.verbose: | ||
278 | bb.note("current path: %s" % (" -> ".join(self.build_path))) | ||
279 | bb.note("dependencies for %s are: %s" % (item, " ".join(depends_list))) | ||
280 | |||
281 | try: | ||
282 | failed = False | ||
283 | |||
284 | depcmd = self.configuration.cmd | ||
285 | bbdepcmd = bb.data.getVarFlag('do_%s' % self.configuration.cmd, 'bbdepcmd', the_data) | ||
286 | if bbdepcmd is not None: | ||
287 | if bbdepcmd == "": | ||
288 | depcmd = None | ||
289 | else: | ||
290 | depcmd = bbdepcmd | ||
291 | |||
292 | if depcmd: | ||
293 | oldcmd = self.configuration.cmd | ||
294 | self.configuration.cmd = depcmd | ||
295 | |||
296 | for dependency in depends_list: | ||
297 | if dependency in self.status.ignored_dependencies: | ||
298 | continue | ||
299 | if not depcmd: | ||
300 | continue | ||
301 | if self.buildProvider( dependency , buildAllDeps , build_depends ) == 0: | ||
302 | bb.error("dependency %s (for %s) not satisfied" % (dependency,item)) | ||
303 | failed = True | ||
304 | if self.configuration.abort: | ||
305 | break | ||
306 | |||
307 | if depcmd: | ||
308 | self.configuration.cmd = oldcmd | ||
309 | |||
310 | if failed: | ||
311 | self.stats.deps += 1 | ||
312 | return False | ||
313 | |||
314 | if not self.addRunDeps(fn, virtual , buildAllDeps): | ||
315 | return False | ||
316 | |||
317 | if bb.build.stamp_is_current('do_%s' % self.configuration.cmd, the_data): | ||
318 | self.build_cache.append(fn) | ||
319 | return True | ||
320 | |||
321 | return self.tryBuildPackage( fn, item, the_data ) | ||
322 | |||
323 | finally: | ||
324 | self.building_list.remove(fn) | ||
325 | self.build_path.remove(pathstr) | ||
326 | |||
327 | def findBestProvider( self, pn, pkg_pn = None): | ||
328 | """ | ||
329 | If there is a PREFERRED_VERSION, find the highest-priority bbfile | ||
330 | providing that version. If not, find the latest version provided by | ||
331 | an bbfile in the highest-priority set. | ||
332 | """ | ||
333 | if not pkg_pn: | ||
334 | pkg_pn = self.status.pkg_pn | ||
335 | |||
336 | files = pkg_pn[pn] | ||
337 | priorities = {} | ||
338 | for f in files: | ||
339 | priority = self.status.bbfile_priority[f] | ||
340 | if priority not in priorities: | ||
341 | priorities[priority] = [] | ||
342 | priorities[priority].append(f) | ||
343 | p_list = priorities.keys() | ||
344 | p_list.sort(lambda a, b: a - b) | ||
345 | tmp_pn = [] | ||
346 | for p in p_list: | ||
347 | tmp_pn = [priorities[p]] + tmp_pn | ||
348 | |||
349 | preferred_file = None | ||
350 | |||
351 | localdata = data.createCopy(self.configuration.data) | ||
352 | bb.data.setVar('OVERRIDES', "%s:%s" % (pn, data.getVar('OVERRIDES', localdata)), localdata) | ||
353 | bb.data.update_data(localdata) | ||
354 | |||
355 | preferred_v = bb.data.getVar('PREFERRED_VERSION_%s' % pn, localdata, True) | ||
356 | if preferred_v: | ||
357 | m = re.match('(.*)_(.*)', preferred_v) | ||
358 | if m: | ||
359 | preferred_v = m.group(1) | ||
360 | preferred_r = m.group(2) | ||
361 | else: | ||
362 | preferred_r = None | ||
363 | |||
364 | for file_set in tmp_pn: | ||
365 | for f in file_set: | ||
366 | pv,pr = self.status.pkg_pvpr[f] | ||
367 | if preferred_v == pv and (preferred_r == pr or preferred_r == None): | ||
368 | preferred_file = f | ||
369 | preferred_ver = (pv, pr) | ||
370 | break | ||
371 | if preferred_file: | ||
372 | break; | ||
373 | if preferred_r: | ||
374 | pv_str = '%s-%s' % (preferred_v, preferred_r) | ||
375 | else: | ||
376 | pv_str = preferred_v | ||
377 | if preferred_file is None: | ||
378 | bb.note("preferred version %s of %s not available" % (pv_str, pn)) | ||
379 | else: | ||
380 | bb.debug(1, "selecting %s as PREFERRED_VERSION %s of package %s" % (preferred_file, pv_str, pn)) | ||
381 | |||
382 | del localdata | ||
383 | |||
384 | # get highest priority file set | ||
385 | files = tmp_pn[0] | ||
386 | latest = None | ||
387 | latest_p = 0 | ||
388 | latest_f = None | ||
389 | for file_name in files: | ||
390 | pv,pr = self.status.pkg_pvpr[file_name] | ||
391 | dp = self.status.pkg_dp[file_name] | ||
392 | |||
393 | if (latest is None) or ((latest_p == dp) and (utils.vercmp(latest, (pv, pr)) < 0)) or (dp > latest_p): | ||
394 | latest = (pv, pr) | ||
395 | latest_f = file_name | ||
396 | latest_p = dp | ||
397 | if preferred_file is None: | ||
398 | preferred_file = latest_f | ||
399 | preferred_ver = latest | ||
400 | 134 | ||
401 | return (latest,latest_f,preferred_ver, preferred_file) | 135 | return self.tryBuildPackage(fn, item, self.configuration.cmd, the_data, build_depends) |
402 | 136 | ||
403 | def showVersions( self ): | 137 | def showVersions( self ): |
404 | pkg_pn = self.status.pkg_pn | 138 | pkg_pn = self.status.pkg_pn |
@@ -407,7 +141,7 @@ class BBCooker: | |||
407 | 141 | ||
408 | # Sort by priority | 142 | # Sort by priority |
409 | for pn in pkg_pn.keys(): | 143 | for pn in pkg_pn.keys(): |
410 | (last_ver,last_file,pref_ver,pref_file) = self.findBestProvider(pn) | 144 | (last_ver,last_file,pref_ver,pref_file) = bb.providers.findBestProvider(pn, self.configuration.data, self.status) |
411 | preferred_versions[pn] = (pref_ver, pref_file) | 145 | preferred_versions[pn] = (pref_ver, pref_file) |
412 | latest_versions[pn] = (last_ver, last_file) | 146 | latest_versions[pn] = (last_ver, last_file) |
413 | 147 | ||
@@ -425,7 +159,7 @@ class BBCooker: | |||
425 | 159 | ||
426 | print "%-30s %20s %20s" % (p, latest[0][0] + "-" + latest[0][1], | 160 | print "%-30s %20s %20s" % (p, latest[0][0] + "-" + latest[0][1], |
427 | prefstr) | 161 | prefstr) |
428 | 162 | ||
429 | 163 | ||
430 | def showEnvironment( self ): | 164 | def showEnvironment( self ): |
431 | """Show the outer or per-package environment""" | 165 | """Show the outer or per-package environment""" |
@@ -433,268 +167,190 @@ class BBCooker: | |||
433 | self.cb = None | 167 | self.cb = None |
434 | self.bb_cache = bb.cache.init(self) | 168 | self.bb_cache = bb.cache.init(self) |
435 | try: | 169 | try: |
436 | self.configuration.data = self.bb_cache.loadDataFull(self.configuration.buildfile, self) | 170 | self.configuration.data = self.bb_cache.loadDataFull(self.configuration.buildfile, self.configuration.data) |
437 | except IOError, e: | 171 | except IOError, e: |
438 | fatal("Unable to read %s: %s" % ( self.configuration.buildfile, e )) | 172 | bb.msg.fatal(bb.msg.domain.Parsing, "Unable to read %s: %s" % ( self.configuration.buildfile, e )) |
439 | except Exception, e: | 173 | except Exception, e: |
440 | fatal("%s" % e) | 174 | bb.msg.fatal(bb.msg.domain.Parsing, "%s" % e) |
441 | # emit variables and shell functions | 175 | # emit variables and shell functions |
442 | try: | 176 | try: |
443 | data.update_data( self.configuration.data ) | 177 | data.update_data( self.configuration.data ) |
444 | data.emit_env(sys.__stdout__, self.configuration.data, True) | 178 | data.emit_env(sys.__stdout__, self.configuration.data, True) |
445 | except Exception, e: | 179 | except Exception, e: |
446 | fatal("%s" % e) | 180 | bb.msg.fatal(bb.msg.domain.Parsing, "%s" % e) |
447 | # emit the metadata which isnt valid shell | 181 | # emit the metadata which isnt valid shell |
182 | data.expandKeys( self.configuration.data ) | ||
448 | for e in self.configuration.data.keys(): | 183 | for e in self.configuration.data.keys(): |
449 | if data.getVarFlag( e, 'python', self.configuration.data ): | 184 | if data.getVarFlag( e, 'python', self.configuration.data ): |
450 | sys.__stdout__.write("\npython %s () {\n%s}\n" % (e, data.getVar(e, self.configuration.data, 1))) | 185 | sys.__stdout__.write("\npython %s () {\n%s}\n" % (e, data.getVar(e, self.configuration.data, 1))) |
451 | 186 | ||
452 | def filterProviders(self, providers, item): | 187 | def generateDotGraph( self, pkgs_to_build, ignore_deps ): |
453 | """ | 188 | """ |
454 | Take a list of providers and filter/reorder according to the | 189 | Generate two graphs one for the DEPENDS and RDEPENDS. The current |
455 | environment variables and previous build results | 190 | implementation creates crappy graphs ;) |
456 | """ | ||
457 | eligible = [] | ||
458 | preferred_versions = {} | ||
459 | |||
460 | # Collate providers by PN | ||
461 | pkg_pn = {} | ||
462 | for p in providers: | ||
463 | pn = self.status.pkg_fn[p] | ||
464 | if pn not in pkg_pn: | ||
465 | pkg_pn[pn] = [] | ||
466 | pkg_pn[pn].append(p) | ||
467 | |||
468 | bb.debug(1, "providers for %s are: %s" % (item, pkg_pn.keys())) | ||
469 | |||
470 | for pn in pkg_pn.keys(): | ||
471 | preferred_versions[pn] = self.findBestProvider(pn, pkg_pn)[2:4] | ||
472 | eligible.append(preferred_versions[pn][1]) | ||
473 | |||
474 | for p in eligible: | ||
475 | if p in self.build_cache_fail: | ||
476 | bb.debug(1, "rejecting already-failed %s" % p) | ||
477 | eligible.remove(p) | ||
478 | |||
479 | if len(eligible) == 0: | ||
480 | bb.error("no eligible providers for %s" % item) | ||
481 | return 0 | ||
482 | 191 | ||
483 | # look to see if one of them is already staged, or marked as preferred. | 192 | pkgs_to_build A list of packages that needs to be built |
484 | # if so, bump it to the head of the queue | 193 | ignore_deps A list of names where processing of dependencies |
485 | for p in providers: | 194 | should be stopped. e.g. dependencies that get |
486 | pn = self.status.pkg_fn[p] | ||
487 | pv, pr = self.status.pkg_pvpr[p] | ||
488 | |||
489 | stamp = '%s.do_populate_staging' % self.status.stamp[p] | ||
490 | if os.path.exists(stamp): | ||
491 | (newvers, fn) = preferred_versions[pn] | ||
492 | if not fn in eligible: | ||
493 | # package was made ineligible by already-failed check | ||
494 | continue | ||
495 | oldver = "%s-%s" % (pv, pr) | ||
496 | newver = '-'.join(newvers) | ||
497 | if (newver != oldver): | ||
498 | extra_chat = "%s (%s) already staged but upgrading to %s to satisfy %s" % (pn, oldver, newver, item) | ||
499 | else: | ||
500 | extra_chat = "Selecting already-staged %s (%s) to satisfy %s" % (pn, oldver, item) | ||
501 | if self.configuration.verbose: | ||
502 | bb.note("%s" % extra_chat) | ||
503 | eligible.remove(fn) | ||
504 | eligible = [fn] + eligible | ||
505 | discriminated = True | ||
506 | break | ||
507 | |||
508 | return eligible | ||
509 | |||
510 | def buildProvider( self, item , buildAllDeps , build_depends = [] ): | ||
511 | """ | 195 | """ |
512 | Build something to provide a named build requirement | ||
513 | (takes item names from DEPENDS namespace) | ||
514 | """ | ||
515 | |||
516 | fn = None | ||
517 | discriminated = False | ||
518 | |||
519 | if not item in self.status.providers: | ||
520 | bb.error("Nothing provides dependency %s" % item) | ||
521 | bb.event.fire(bb.event.NoProvider(item,self.configuration.data)) | ||
522 | return 0 | ||
523 | 196 | ||
524 | all_p = self.status.providers[item] | 197 | def myFilterProvider( providers, item): |
525 | 198 | """ | |
526 | for p in all_p: | 199 | Take a list of providers and filter according to environment |
527 | if p in self.build_cache: | 200 | variables. In contrast to filterProviders we do not discriminate |
528 | bb.debug(1, "already built %s in this run\n" % p) | 201 | and take PREFERRED_PROVIDER into account. |
529 | return 1 | 202 | """ |
530 | 203 | eligible = [] | |
531 | eligible = self.filterProviders(all_p, item) | 204 | preferred_versions = {} |
205 | |||
206 | # Collate providers by PN | ||
207 | pkg_pn = {} | ||
208 | for p in providers: | ||
209 | pn = self.status.pkg_fn[p] | ||
210 | if pn not in pkg_pn: | ||
211 | pkg_pn[pn] = [] | ||
212 | pkg_pn[pn].append(p) | ||
532 | 213 | ||
533 | if not eligible: | 214 | bb.msg.debug(1, bb.msg.domain.Provider, "providers for %s are: %s" % (item, pkg_pn.keys())) |
534 | return 0 | ||
535 | 215 | ||
536 | prefervar = bb.data.getVar('PREFERRED_PROVIDER_%s' % item, self.configuration.data, 1) | 216 | for pn in pkg_pn.keys(): |
537 | if prefervar: | 217 | preferred_versions[pn] = bb.providers.findBestProvider(pn, self.configuration.data, self.status, pkg_pn)[2:4] |
538 | self.preferred[item] = prefervar | 218 | eligible.append(preferred_versions[pn][1]) |
539 | 219 | ||
540 | if item in self.preferred: | ||
541 | for p in eligible: | 220 | for p in eligible: |
542 | pn = self.status.pkg_fn[p] | 221 | if p in self.build_cache_fail: |
543 | if self.preferred[item] == pn: | 222 | bb.msg.debug(1, bb.msg.domain.Provider, "rejecting already-failed %s" % p) |
544 | if self.configuration.verbose: | ||
545 | bb.note("selecting %s to satisfy %s due to PREFERRED_PROVIDERS" % (pn, item)) | ||
546 | eligible.remove(p) | 223 | eligible.remove(p) |
547 | eligible = [p] + eligible | ||
548 | discriminated = True | ||
549 | break | ||
550 | 224 | ||
551 | if len(eligible) > 1 and discriminated == False: | 225 | if len(eligible) == 0: |
552 | if item not in self.consider_msgs_cache: | 226 | bb.msg.error(bb.msg.domain.Provider, "no eligible providers for %s" % item) |
553 | providers_list = [] | 227 | return 0 |
554 | for fn in eligible: | ||
555 | providers_list.append(self.status.pkg_fn[fn]) | ||
556 | bb.note("multiple providers are available (%s);" % ", ".join(providers_list)) | ||
557 | bb.note("consider defining PREFERRED_PROVIDER_%s" % item) | ||
558 | bb.event.fire(bb.event.MultipleProviders(item,providers_list,self.configuration.data)) | ||
559 | self.consider_msgs_cache.append(item) | ||
560 | 228 | ||
229 | prefervar = bb.data.getVar('PREFERRED_PROVIDER_%s' % item, self.configuration.data, 1) | ||
561 | 230 | ||
562 | # run through the list until we find one that we can build | 231 | # try the preferred provider first |
563 | for fn in eligible: | 232 | if prefervar: |
564 | bb.debug(2, "selecting %s to satisfy %s" % (fn, item)) | 233 | for p in eligible: |
565 | if self.tryBuild(fn, item, buildAllDeps, build_depends + [fn]): | 234 | if prefervar == self.status.pkg_fn[p]: |
566 | return 1 | 235 | bb.msg.note(1, bb.msg.domain.Provider, "Selecting PREFERRED_PROVIDER %s" % prefervar) |
236 | eligible.remove(p) | ||
237 | eligible = [p] + eligible | ||
567 | 238 | ||
568 | bb.note("no buildable providers for %s" % item) | 239 | return eligible |
569 | bb.event.fire(bb.event.NoProvider(item,self.configuration.data)) | ||
570 | return 0 | ||
571 | 240 | ||
572 | def buildRProvider( self, item , buildAllDeps ): | ||
573 | """ | ||
574 | Build something to provide a named runtime requirement | ||
575 | (takes item names from RDEPENDS/PACKAGES namespace) | ||
576 | """ | ||
577 | 241 | ||
578 | fn = None | 242 | # try to avoid adding the same rdepends over an over again |
579 | all_p = [] | 243 | seen_depends = [] |
580 | discriminated = False | 244 | seen_rdepends = [] |
581 | 245 | ||
582 | if not buildAllDeps: | ||
583 | return True | ||
584 | 246 | ||
585 | all_p = self.getProvidersRun(item) | 247 | def add_depends(package_list): |
248 | """ | ||
249 | Add all depends of all packages from this list | ||
250 | """ | ||
251 | for package in package_list: | ||
252 | if package in seen_depends or package in ignore_deps: | ||
253 | continue | ||
586 | 254 | ||
587 | if not all_p: | 255 | seen_depends.append( package ) |
588 | bb.error("Nothing provides runtime dependency %s" % (item)) | 256 | if not package in self.status.providers: |
589 | bb.event.fire(bb.event.NoProvider(item,self.configuration.data,runtime=True)) | 257 | """ |
590 | return False | 258 | We have not seen this name -> error in |
259 | dependency handling | ||
260 | """ | ||
261 | bb.msg.note(1, bb.msg.domain.Depends, "ERROR with provider: %(package)s" % vars() ) | ||
262 | print >> depends_file, '"%(package)s" -> ERROR' % vars() | ||
263 | continue | ||
591 | 264 | ||
592 | for p in all_p: | 265 | # get all providers for this package |
593 | if p in self.rbuild_cache: | 266 | providers = self.status.providers[package] |
594 | bb.debug(2, "Already built %s providing runtime %s\n" % (p,item)) | ||
595 | return True | ||
596 | if p in self.build_cache: | ||
597 | bb.debug(2, "Already built %s but adding any further RDEPENDS for %s\n" % (p, item)) | ||
598 | return self.addRunDeps(p, item , buildAllDeps) | ||
599 | 267 | ||
600 | eligible = self.filterProviders(all_p, item) | 268 | # now let us find the bestProvider for it |
601 | if not eligible: | 269 | fn = myFilterProvider(providers, package)[0] |
602 | return 0 | ||
603 | 270 | ||
604 | preferred = [] | 271 | depends = bb.utils.explode_deps(self.bb_cache.getVar('DEPENDS', fn, True) or "") |
605 | for p in eligible: | 272 | version = self.bb_cache.getVar('PV', fn, True ) + '-' + self.bb_cache.getVar('PR', fn, True) |
606 | pn = self.status.pkg_fn[p] | 273 | add_depends ( depends ) |
607 | provides = self.status.pn_provides[pn] | ||
608 | for provide in provides: | ||
609 | prefervar = bb.data.getVar('PREFERRED_PROVIDER_%s' % provide, self.configuration.data, 1) | ||
610 | if prefervar == pn: | ||
611 | if self.configuration.verbose: | ||
612 | bb.note("selecting %s to satisfy runtime %s due to PREFERRED_PROVIDERS" % (pn, item)) | ||
613 | eligible.remove(p) | ||
614 | eligible = [p] + eligible | ||
615 | preferred.append(p) | ||
616 | |||
617 | if len(eligible) > 1 and len(preferred) == 0: | ||
618 | if item not in self.consider_msgs_cache: | ||
619 | providers_list = [] | ||
620 | for fn in eligible: | ||
621 | providers_list.append(self.status.pkg_fn[fn]) | ||
622 | bb.note("multiple providers are available (%s);" % ", ".join(providers_list)) | ||
623 | bb.note("consider defining a PREFERRED_PROVIDER to match runtime %s" % item) | ||
624 | bb.event.fire(bb.event.MultipleProviders(item,providers_list,self.configuration.data,runtime=True)) | ||
625 | self.consider_msgs_cache.append(item) | ||
626 | |||
627 | if len(preferred) > 1: | ||
628 | if item not in self.consider_msgs_cache: | ||
629 | providers_list = [] | ||
630 | for fn in preferred: | ||
631 | providers_list.append(self.status.pkg_fn[fn]) | ||
632 | bb.note("multiple preferred providers are available (%s);" % ", ".join(providers_list)) | ||
633 | bb.note("consider defining only one PREFERRED_PROVIDER to match runtime %s" % item) | ||
634 | bb.event.fire(bb.event.MultipleProviders(item,providers_list,self.configuration.data,runtime=True)) | ||
635 | self.consider_msgs_cache.append(item) | ||
636 | |||
637 | # run through the list until we find one that we can build | ||
638 | for fn in eligible: | ||
639 | bb.debug(2, "selecting %s to satisfy runtime %s" % (fn, item)) | ||
640 | if self.tryBuild(fn, item, buildAllDeps): | ||
641 | return True | ||
642 | |||
643 | bb.error("No buildable providers for runtime %s" % item) | ||
644 | bb.event.fire(bb.event.NoProvider(item,self.configuration.data)) | ||
645 | return False | ||
646 | |||
647 | def getProvidersRun(self, rdepend): | ||
648 | """ | ||
649 | Return any potential providers of runtime rdepend | ||
650 | """ | ||
651 | rproviders = [] | ||
652 | 274 | ||
653 | if rdepend in self.status.rproviders: | 275 | # now create the node |
654 | rproviders += self.status.rproviders[rdepend] | 276 | print >> depends_file, '"%(package)s" [label="%(package)s\\n%(version)s"]' % vars() |
655 | 277 | ||
656 | if rdepend in self.status.packages: | 278 | depends = filter( (lambda x: x not in ignore_deps), depends ) |
657 | rproviders += self.status.packages[rdepend] | 279 | for depend in depends: |
280 | print >> depends_file, '"%(package)s" -> "%(depend)s"' % vars() | ||
658 | 281 | ||
659 | if rproviders: | ||
660 | return rproviders | ||
661 | 282 | ||
662 | # Only search dynamic packages if we can't find anything in other variables | 283 | def add_all_depends( the_depends, the_rdepends ): |
663 | for pattern in self.status.packages_dynamic: | 284 | """ |
664 | regexp = re.compile(pattern) | 285 | Add both DEPENDS and RDEPENDS. RDEPENDS will get dashed |
665 | if regexp.match(rdepend): | 286 | lines |
666 | rproviders += self.status.packages_dynamic[pattern] | 287 | """ |
288 | package_list = the_depends + the_rdepends | ||
289 | for package in package_list: | ||
290 | if package in seen_rdepends or package in ignore_deps: | ||
291 | continue | ||
667 | 292 | ||
668 | return rproviders | 293 | seen_rdepends.append( package ) |
294 | |||
295 | # Let us find out if the package is a DEPENDS or RDEPENDS | ||
296 | # and we will set 'providers' with the avilable providers | ||
297 | # for the package. | ||
298 | if package in the_depends: | ||
299 | if not package in self.status.providers: | ||
300 | bb.msg.note(1, bb.msg.domain.Depends, "ERROR with provider: %(package)s" % vars() ) | ||
301 | print >> alldepends_file, '"%(package)s" -> ERROR' % vars() | ||
302 | continue | ||
303 | |||
304 | providers = self.status.providers[package] | ||
305 | elif package in the_rdepends: | ||
306 | if len(bb.providers.getRuntimeProviders(self.status, package)) == 0: | ||
307 | bb.msg.note(1, bb.msg.domain.Depends, "ERROR with rprovider: %(package)s" % vars() ) | ||
308 | print >> alldepends_file, '"%(package)s" -> ERROR [style="dashed"]' % vars() | ||
309 | continue | ||
310 | |||
311 | providers = bb.providers.getRuntimeProviders(self.status, package) | ||
312 | else: | ||
313 | # something went wrong... | ||
314 | print "Complete ERROR! %s" % package | ||
315 | continue | ||
669 | 316 | ||
670 | def addRunDeps(self , fn, item , buildAllDeps): | 317 | # now let us find the bestProvider for it |
671 | """ | 318 | fn = myFilterProvider(providers, package)[0] |
672 | Add any runtime dependencies of runtime item provided by fn | ||
673 | as long as item has't previously been processed by this function. | ||
674 | """ | ||
675 | 319 | ||
676 | if item in self.rbuild_cache: | 320 | # Now we have a filename let us get the depends and RDEPENDS of it |
677 | return True | 321 | depends = bb.utils.explode_deps(self.bb_cache.getVar('DEPENDS', fn, True) or "") |
322 | if fn in self.status.rundeps and package in self.status.rundeps[fn]: | ||
323 | rdepends= self.status.rundeps[fn][package].keys() | ||
324 | else: | ||
325 | rdepends = [] | ||
326 | version = self.bb_cache.getVar('PV', fn, True ) + '-' + self.bb_cache.getVar('PR', fn, True) | ||
678 | 327 | ||
679 | if not buildAllDeps: | 328 | # handle all the depends and rdepends of package |
680 | return True | 329 | add_all_depends ( depends, rdepends ) |
681 | 330 | ||
682 | rdepends = [] | 331 | # now create the node using package name |
683 | self.rbuild_cache.append(item) | 332 | print >> alldepends_file, '"%(package)s" [label="%(package)s\\n%(version)s"]' % vars() |
684 | 333 | ||
685 | if fn in self.status.rundeps and item in self.status.rundeps[fn]: | 334 | # remove the stuff we want to ignore and add the edges |
686 | rdepends += self.status.rundeps[fn][item].keys() | 335 | depends = filter( (lambda x: x not in ignore_deps), depends ) |
687 | if fn in self.status.runrecs and item in self.status.runrecs[fn]: | 336 | rdepends = filter( (lambda x: x not in ignore_deps), rdepends ) |
688 | rdepends += self.status.runrecs[fn][item].keys() | 337 | for depend in depends: |
338 | print >> alldepends_file, '"%(package)s" -> "%(depend)s"' % vars() | ||
339 | for depend in rdepends: | ||
340 | print >> alldepends_file, '"%(package)s" -> "%(depend)s" [style=dashed]' % vars() | ||
689 | 341 | ||
690 | bb.debug(2, "Additional runtime dependencies for %s are: %s" % (item, " ".join(rdepends))) | ||
691 | 342 | ||
692 | for rdepend in rdepends: | 343 | # Add depends now |
693 | if rdepend in self.status.ignored_dependencies: | 344 | depends_file = file('depends.dot', 'w' ) |
694 | continue | 345 | print >> depends_file, "digraph depends {" |
695 | if not self.buildRProvider(rdepend, buildAllDeps): | 346 | add_depends( pkgs_to_build ) |
696 | return False | 347 | print >> depends_file, "}" |
697 | return True | 348 | |
349 | # Add all depends now | ||
350 | alldepends_file = file('alldepends.dot', 'w' ) | ||
351 | print >> alldepends_file, "digraph alldepends {" | ||
352 | add_all_depends( pkgs_to_build, [] ) | ||
353 | print >> alldepends_file, "}" | ||
698 | 354 | ||
699 | def buildDepgraph( self ): | 355 | def buildDepgraph( self ): |
700 | all_depends = self.status.all_depends | 356 | all_depends = self.status.all_depends |
@@ -702,6 +358,7 @@ class BBCooker: | |||
702 | 358 | ||
703 | localdata = data.createCopy(self.configuration.data) | 359 | localdata = data.createCopy(self.configuration.data) |
704 | bb.data.update_data(localdata) | 360 | bb.data.update_data(localdata) |
361 | bb.data.expandKeys(localdata) | ||
705 | 362 | ||
706 | def calc_bbfile_priority(filename): | 363 | def calc_bbfile_priority(filename): |
707 | for (regex, pri) in self.status.bbfile_config_priorities: | 364 | for (regex, pri) in self.status.bbfile_config_priorities: |
@@ -712,9 +369,9 @@ class BBCooker: | |||
712 | # Handle PREFERRED_PROVIDERS | 369 | # Handle PREFERRED_PROVIDERS |
713 | for p in (bb.data.getVar('PREFERRED_PROVIDERS', localdata, 1) or "").split(): | 370 | for p in (bb.data.getVar('PREFERRED_PROVIDERS', localdata, 1) or "").split(): |
714 | (providee, provider) = p.split(':') | 371 | (providee, provider) = p.split(':') |
715 | if providee in self.preferred and self.preferred[providee] != provider: | 372 | if providee in self.status.preferred and self.status.preferred[providee] != provider: |
716 | bb.error("conflicting preferences for %s: both %s and %s specified" % (providee, provider, self.preferred[providee])) | 373 | bb.msg.error(bb.msg.domain.Provider, "conflicting preferences for %s: both %s and %s specified" % (providee, provider, self.status.preferred[providee])) |
717 | self.preferred[providee] = provider | 374 | self.status.preferred[providee] = provider |
718 | 375 | ||
719 | # Calculate priorities for each file | 376 | # Calculate priorities for each file |
720 | for p in self.status.pkg_fn.keys(): | 377 | for p in self.status.pkg_fn.keys(): |
@@ -726,19 +383,19 @@ class BBCooker: | |||
726 | """ | 383 | """ |
727 | all_depends = self.status.all_depends | 384 | all_depends = self.status.all_depends |
728 | pn_provides = self.status.pn_provides | 385 | pn_provides = self.status.pn_provides |
729 | bb.debug(1, "collating packages for \"world\"") | 386 | bb.msg.debug(1, bb.msg.domain.Parsing, "collating packages for \"world\"") |
730 | for f in self.status.possible_world: | 387 | for f in self.status.possible_world: |
731 | terminal = True | 388 | terminal = True |
732 | pn = self.status.pkg_fn[f] | 389 | pn = self.status.pkg_fn[f] |
733 | 390 | ||
734 | for p in pn_provides[pn]: | 391 | for p in pn_provides[pn]: |
735 | if p.startswith('virtual/'): | 392 | if p.startswith('virtual/'): |
736 | bb.debug(2, "skipping %s due to %s provider starting with virtual/" % (f, p)) | 393 | bb.msg.debug(2, bb.msg.domain.Parsing, "World build skipping %s due to %s provider starting with virtual/" % (f, p)) |
737 | terminal = False | 394 | terminal = False |
738 | break | 395 | break |
739 | for pf in self.status.providers[p]: | 396 | for pf in self.status.providers[p]: |
740 | if self.status.pkg_fn[pf] != pn: | 397 | if self.status.pkg_fn[pf] != pn: |
741 | bb.debug(2, "skipping %s due to both us and %s providing %s" % (f, pf, p)) | 398 | bb.msg.debug(2, bb.msg.domain.Parsing, "World build skipping %s due to both us and %s providing %s" % (f, pf, p)) |
742 | terminal = False | 399 | terminal = False |
743 | break | 400 | break |
744 | if terminal: | 401 | if terminal: |
@@ -748,13 +405,8 @@ class BBCooker: | |||
748 | self.status.possible_world = None | 405 | self.status.possible_world = None |
749 | self.status.all_depends = None | 406 | self.status.all_depends = None |
750 | 407 | ||
751 | def myProgressCallback( self, x, y, f, bb_cache, from_cache ): | 408 | def myProgressCallback( self, x, y, f, from_cache ): |
752 | # feed the status with new input | 409 | """Update any tty with the progress change""" |
753 | |||
754 | self.status.handle_bb_data(f, bb_cache, from_cache) | ||
755 | |||
756 | if bbdebug > 0: | ||
757 | return | ||
758 | if os.isatty(sys.stdout.fileno()): | 410 | if os.isatty(sys.stdout.fileno()): |
759 | sys.stdout.write("\rNOTE: Handling BitBake files: %s (%04d/%04d) [%2d %%]" % ( parsespin.next(), x, y, x*100/y ) ) | 411 | sys.stdout.write("\rNOTE: Handling BitBake files: %s (%04d/%04d) [%2d %%]" % ( parsespin.next(), x, y, x*100/y ) ) |
760 | sys.stdout.flush() | 412 | sys.stdout.flush() |
@@ -771,9 +423,10 @@ class BBCooker: | |||
771 | try: | 423 | try: |
772 | from bb import shell | 424 | from bb import shell |
773 | except ImportError, details: | 425 | except ImportError, details: |
774 | bb.fatal("Sorry, shell not available (%s)" % details ) | 426 | bb.msg.fatal(bb.msg.domain.Parsing, "Sorry, shell not available (%s)" % details ) |
775 | else: | 427 | else: |
776 | bb.data.update_data( self.configuration.data ) | 428 | bb.data.update_data( self.configuration.data ) |
429 | bb.data.expandKeys(localdata) | ||
777 | shell.start( self ) | 430 | shell.start( self ) |
778 | sys.exit( 0 ) | 431 | sys.exit( 0 ) |
779 | 432 | ||
@@ -796,9 +449,9 @@ class BBCooker: | |||
796 | bb.event.register(var,bb.data.getVar(var, data)) | 449 | bb.event.register(var,bb.data.getVar(var, data)) |
797 | 450 | ||
798 | except IOError: | 451 | except IOError: |
799 | bb.fatal( "Unable to open %s" % afile ) | 452 | bb.msg.fatal(bb.msg.domain.Parsing, "Unable to open %s" % afile ) |
800 | except bb.parse.ParseError, details: | 453 | except bb.parse.ParseError, details: |
801 | bb.fatal( "Unable to parse %s (%s)" % (afile, details) ) | 454 | bb.msg.fatal(bb.msg.domain.Parsing, "Unable to parse %s (%s)" % (afile, details) ) |
802 | 455 | ||
803 | def handleCollections( self, collections ): | 456 | def handleCollections( self, collections ): |
804 | """Handle collections""" | 457 | """Handle collections""" |
@@ -807,22 +460,22 @@ class BBCooker: | |||
807 | for c in collection_list: | 460 | for c in collection_list: |
808 | regex = bb.data.getVar("BBFILE_PATTERN_%s" % c, self.configuration.data, 1) | 461 | regex = bb.data.getVar("BBFILE_PATTERN_%s" % c, self.configuration.data, 1) |
809 | if regex == None: | 462 | if regex == None: |
810 | bb.error("BBFILE_PATTERN_%s not defined" % c) | 463 | bb.msg.error(bb.msg.domain.Parsing, "BBFILE_PATTERN_%s not defined" % c) |
811 | continue | 464 | continue |
812 | priority = bb.data.getVar("BBFILE_PRIORITY_%s" % c, self.configuration.data, 1) | 465 | priority = bb.data.getVar("BBFILE_PRIORITY_%s" % c, self.configuration.data, 1) |
813 | if priority == None: | 466 | if priority == None: |
814 | bb.error("BBFILE_PRIORITY_%s not defined" % c) | 467 | bb.msg.error(bb.msg.domain.Parsing, "BBFILE_PRIORITY_%s not defined" % c) |
815 | continue | 468 | continue |
816 | try: | 469 | try: |
817 | cre = re.compile(regex) | 470 | cre = re.compile(regex) |
818 | except re.error: | 471 | except re.error: |
819 | bb.error("BBFILE_PATTERN_%s \"%s\" is not a valid regular expression" % (c, regex)) | 472 | bb.msg.error(bb.msg.domain.Parsing, "BBFILE_PATTERN_%s \"%s\" is not a valid regular expression" % (c, regex)) |
820 | continue | 473 | continue |
821 | try: | 474 | try: |
822 | pri = int(priority) | 475 | pri = int(priority) |
823 | self.status.bbfile_config_priorities.append((cre, pri)) | 476 | self.status.bbfile_config_priorities.append((cre, pri)) |
824 | except ValueError: | 477 | except ValueError: |
825 | bb.error("invalid value for BBFILE_PRIORITY_%s: \"%s\"" % (c, priority)) | 478 | bb.msg.error(bb.msg.domain.Parsing, "invalid value for BBFILE_PRIORITY_%s: \"%s\"" % (c, priority)) |
826 | 479 | ||
827 | 480 | ||
828 | def cook( self, configuration, args ): | 481 | def cook( self, configuration, args ): |
@@ -834,11 +487,16 @@ class BBCooker: | |||
834 | 487 | ||
835 | self.configuration = configuration | 488 | self.configuration = configuration |
836 | 489 | ||
837 | if not self.configuration.cmd: | 490 | if self.configuration.verbose: |
838 | self.configuration.cmd = "build" | 491 | bb.msg.set_verbose(True) |
839 | 492 | ||
840 | if self.configuration.debug: | 493 | if self.configuration.debug: |
841 | bb.debug_level = self.configuration.debug | 494 | bb.msg.set_debug_level(self.configuration.debug) |
495 | else: | ||
496 | bb.msg.set_debug_level(0) | ||
497 | |||
498 | if self.configuration.debug_domains: | ||
499 | bb.msg.set_debug_domains(self.configuration.debug_domains) | ||
842 | 500 | ||
843 | self.configuration.data = bb.data.init() | 501 | self.configuration.data = bb.data.init() |
844 | 502 | ||
@@ -847,6 +505,12 @@ class BBCooker: | |||
847 | 505 | ||
848 | self.parseConfigurationFile( os.path.join( "conf", "bitbake.conf" ) ) | 506 | self.parseConfigurationFile( os.path.join( "conf", "bitbake.conf" ) ) |
849 | 507 | ||
508 | if not self.configuration.cmd: | ||
509 | self.configuration.cmd = bb.data.getVar("BB_DEFAULT_TASK", self.configuration.data) | ||
510 | |||
511 | # For backwards compatibility - REMOVE ME | ||
512 | if not self.configuration.cmd: | ||
513 | self.configuration.cmd = "build" | ||
850 | 514 | ||
851 | # | 515 | # |
852 | # Special updated configuration we use for firing events | 516 | # Special updated configuration we use for firing events |
@@ -871,20 +535,34 @@ class BBCooker: | |||
871 | if self.configuration.buildfile is not None: | 535 | if self.configuration.buildfile is not None: |
872 | bf = os.path.abspath( self.configuration.buildfile ) | 536 | bf = os.path.abspath( self.configuration.buildfile ) |
873 | try: | 537 | try: |
874 | bbfile_data = bb.parse.handle(bf, self.configuration.data) | 538 | os.stat(bf) |
875 | except IOError: | 539 | except OSError: |
876 | bb.fatal("Unable to open %s" % bf) | 540 | (filelist, masked) = self.collect_bbfiles() |
541 | regexp = re.compile(self.configuration.buildfile) | ||
542 | matches = [] | ||
543 | for f in filelist: | ||
544 | if regexp.search(f) and os.path.isfile(f): | ||
545 | bf = f | ||
546 | matches.append(f) | ||
547 | if len(matches) != 1: | ||
548 | bb.msg.error(bb.msg.domain.Parsing, "Unable to match %s (%s matches found):" % (self.configuration.buildfile, len(matches))) | ||
549 | for f in matches: | ||
550 | bb.msg.error(bb.msg.domain.Parsing, " %s" % f) | ||
551 | sys.exit(1) | ||
552 | bf = matches[0] | ||
553 | |||
554 | bbfile_data = bb.parse.handle(bf, self.configuration.data) | ||
877 | 555 | ||
878 | item = bb.data.getVar('PN', bbfile_data, 1) | 556 | item = bb.data.getVar('PN', bbfile_data, 1) |
879 | try: | 557 | try: |
880 | self.tryBuildPackage( bf, item, bbfile_data ) | 558 | self.tryBuildPackage(bf, item, self.configuration.cmd, bbfile_data, True) |
881 | except bb.build.EventException: | 559 | except bb.build.EventException: |
882 | bb.error( "Build of '%s' failed" % item ) | 560 | bb.msg.error(bb.msg.domain.Build, "Build of '%s' failed" % item ) |
883 | 561 | ||
884 | sys.exit( self.stats.show() ) | 562 | sys.exit( self.stats.show() ) |
885 | 563 | ||
886 | # initialise the parsing status now we know we will need deps | 564 | # initialise the parsing status now we know we will need deps |
887 | self.status = BBParsingStatus() | 565 | self.status = bb.cache.CacheData() |
888 | 566 | ||
889 | ignore = bb.data.getVar("ASSUME_PROVIDED", self.configuration.data, 1) or "" | 567 | ignore = bb.data.getVar("ASSUME_PROVIDED", self.configuration.data, 1) or "" |
890 | self.status.ignored_dependencies = Set( ignore.split() ) | 568 | self.status.ignored_dependencies = Set( ignore.split() ) |
@@ -912,23 +590,23 @@ class BBCooker: | |||
912 | try: | 590 | try: |
913 | import psyco | 591 | import psyco |
914 | except ImportError: | 592 | except ImportError: |
915 | if bbdebug == 0: | 593 | bb.msg.note(1, bb.msg.domain.Collection, "Psyco JIT Compiler (http://psyco.sf.net) not available. Install it to increase performance.") |
916 | bb.note("Psyco JIT Compiler (http://psyco.sf.net) not available. Install it to increase performance.") | ||
917 | else: | 594 | else: |
918 | psyco.bind( self.collect_bbfiles ) | 595 | psyco.bind( self.parse_bbfiles ) |
919 | else: | 596 | else: |
920 | bb.note("You have disabled Psyco. This decreases performance.") | 597 | bb.msg.note(1, bb.msg.domain.Collection, "You have disabled Psyco. This decreases performance.") |
921 | 598 | ||
922 | try: | 599 | try: |
923 | bb.debug(1, "collecting .bb files") | 600 | bb.msg.debug(1, bb.msg.domain.Collection, "collecting .bb files") |
924 | self.collect_bbfiles( self.myProgressCallback ) | 601 | (filelist, masked) = self.collect_bbfiles() |
925 | bb.debug(1, "parsing complete") | 602 | self.parse_bbfiles(filelist, masked, self.myProgressCallback) |
926 | if bbdebug == 0: | 603 | bb.msg.debug(1, bb.msg.domain.Collection, "parsing complete") |
927 | 604 | ||
928 | if self.configuration.parse_only: | 605 | if self.configuration.parse_only: |
929 | print "Requested parsing .bb files only. Exiting." | 606 | bb.msg.note(1, bb.msg.domain.Collection, "Requested parsing .bb files only. Exiting.") |
930 | return | 607 | return |
931 | 608 | ||
609 | |||
932 | self.buildDepgraph() | 610 | self.buildDepgraph() |
933 | 611 | ||
934 | if self.configuration.show_versions: | 612 | if self.configuration.show_versions: |
@@ -940,30 +618,41 @@ class BBCooker: | |||
940 | for t in self.status.world_target: | 618 | for t in self.status.world_target: |
941 | pkgs_to_build.append(t) | 619 | pkgs_to_build.append(t) |
942 | 620 | ||
621 | if self.configuration.dot_graph: | ||
622 | self.generateDotGraph( pkgs_to_build, self.configuration.ignored_dot_deps ) | ||
623 | sys.exit( 0 ) | ||
624 | |||
943 | bb.event.fire(bb.event.BuildStarted(buildname, pkgs_to_build, self.configuration.event_data)) | 625 | bb.event.fire(bb.event.BuildStarted(buildname, pkgs_to_build, self.configuration.event_data)) |
944 | 626 | ||
945 | failures = 0 | 627 | localdata = data.createCopy(self.configuration.data) |
946 | for k in pkgs_to_build: | 628 | bb.data.update_data(localdata) |
947 | failed = False | 629 | bb.data.expandKeys(localdata) |
948 | try: | 630 | |
949 | if self.buildProvider( k , False ) == 0: | 631 | taskdata = bb.taskdata.TaskData(self.configuration.abort) |
950 | # already diagnosed | 632 | |
951 | failed = True | 633 | runlist = [] |
952 | except bb.build.EventException: | 634 | try: |
953 | bb.error("Build of " + k + " failed") | 635 | for k in pkgs_to_build: |
954 | failed = True | 636 | taskdata.add_provider(localdata, self.status, k) |
955 | 637 | runlist.append([k, "do_%s" % self.configuration.cmd]) | |
956 | if failed: | 638 | taskdata.add_unresolved(localdata, self.status) |
957 | failures += failures | 639 | except bb.providers.NoProvider: |
958 | if self.configuration.abort: | 640 | sys.exit(1) |
959 | sys.exit(1) | 641 | |
642 | rq = bb.runqueue.RunQueue() | ||
643 | rq.prepare_runqueue(self.configuration.data, self.status, taskdata, runlist) | ||
644 | try: | ||
645 | failures = rq.execute_runqueue(self, self.configuration.data, self.status, taskdata, runlist) | ||
646 | except runqueue.TaskFailure, (fnid, fn, taskname): | ||
647 | bb.msg.error(bb.msg.domain.Build, "'%s, %s' failed" % (fn, taskname)) | ||
648 | sys.exit(1) | ||
960 | 649 | ||
961 | bb.event.fire(bb.event.BuildCompleted(buildname, pkgs_to_build, self.configuration.event_data, failures)) | 650 | bb.event.fire(bb.event.BuildCompleted(buildname, pkgs_to_build, self.configuration.event_data, failures)) |
962 | 651 | ||
963 | sys.exit( self.stats.show() ) | 652 | sys.exit( self.stats.show() ) |
964 | 653 | ||
965 | except KeyboardInterrupt: | 654 | except KeyboardInterrupt: |
966 | print "\nNOTE: KeyboardInterrupt - Build not completed." | 655 | bb.msg.note(1, bb.msg.domain.Collection, "KeyboardInterrupt - Build not completed.") |
967 | sys.exit(1) | 656 | sys.exit(1) |
968 | 657 | ||
969 | def get_bbfiles( self, path = os.getcwd() ): | 658 | def get_bbfiles( self, path = os.getcwd() ): |
@@ -985,9 +674,8 @@ class BBCooker: | |||
985 | return [] | 674 | return [] |
986 | return finddata.readlines() | 675 | return finddata.readlines() |
987 | 676 | ||
988 | def collect_bbfiles( self, progressCallback ): | 677 | def collect_bbfiles( self ): |
989 | """Collect all available .bb build files""" | 678 | """Collect all available .bb build files""" |
990 | self.cb = progressCallback | ||
991 | parsed, cached, skipped, masked = 0, 0, 0, 0 | 679 | parsed, cached, skipped, masked = 0, 0, 0, 0 |
992 | self.bb_cache = bb.cache.init(self) | 680 | self.bb_cache = bb.cache.init(self) |
993 | 681 | ||
@@ -998,7 +686,7 @@ class BBCooker: | |||
998 | files = self.get_bbfiles() | 686 | files = self.get_bbfiles() |
999 | 687 | ||
1000 | if not len(files): | 688 | if not len(files): |
1001 | bb.error("no files to build.") | 689 | bb.msg.error(bb.msg.domain.Collection, "no files to build.") |
1002 | 690 | ||
1003 | newfiles = [] | 691 | newfiles = [] |
1004 | for f in files: | 692 | for f in files: |
@@ -1009,62 +697,80 @@ class BBCooker: | |||
1009 | continue | 697 | continue |
1010 | newfiles += glob.glob(f) or [ f ] | 698 | newfiles += glob.glob(f) or [ f ] |
1011 | 699 | ||
1012 | bbmask = bb.data.getVar('BBMASK', self.configuration.data, 1) or "" | 700 | bbmask = bb.data.getVar('BBMASK', self.configuration.data, 1) |
701 | |||
702 | if not bbmask: | ||
703 | return (newfiles, 0) | ||
704 | |||
1013 | try: | 705 | try: |
1014 | bbmask_compiled = re.compile(bbmask) | 706 | bbmask_compiled = re.compile(bbmask) |
1015 | except sre_constants.error: | 707 | except sre_constants.error: |
1016 | bb.fatal("BBMASK is not a valid regular expression.") | 708 | bb.msg.fatal(bb.msg.domain.Collection, "BBMASK is not a valid regular expression.") |
1017 | 709 | ||
710 | finalfiles = [] | ||
1018 | for i in xrange( len( newfiles ) ): | 711 | for i in xrange( len( newfiles ) ): |
1019 | f = newfiles[i] | 712 | f = newfiles[i] |
1020 | if bbmask and bbmask_compiled.search(f): | 713 | if bbmask and bbmask_compiled.search(f): |
1021 | bb.debug(1, "bbmake: skipping %s" % f) | 714 | bb.msg.debug(1, bb.msg.domain.Collection, "skipping masked file %s" % f) |
1022 | masked += 1 | 715 | masked += 1 |
1023 | continue | 716 | continue |
1024 | debug(1, "bbmake: parsing %s" % f) | 717 | finalfiles.append(f) |
718 | |||
719 | return (finalfiles, masked) | ||
720 | |||
721 | def parse_bbfiles(self, filelist, masked, progressCallback = None): | ||
722 | parsed, cached, skipped = 0, 0, 0 | ||
723 | for i in xrange( len( filelist ) ): | ||
724 | f = filelist[i] | ||
725 | |||
726 | bb.msg.debug(1, bb.msg.domain.Collection, "parsing %s" % f) | ||
1025 | 727 | ||
1026 | # read a file's metadata | 728 | # read a file's metadata |
1027 | try: | 729 | try: |
1028 | fromCache, skip = self.bb_cache.loadData(f, self) | 730 | fromCache, skip = self.bb_cache.loadData(f, self.configuration.data) |
1029 | if skip: | 731 | if skip: |
1030 | skipped += 1 | 732 | skipped += 1 |
1031 | #bb.note("Skipping %s" % f) | 733 | bb.msg.debug(2, bb.msg.domain.Collection, "skipping %s" % f) |
1032 | self.bb_cache.skip(f) | 734 | self.bb_cache.skip(f) |
1033 | continue | 735 | continue |
1034 | elif fromCache: cached += 1 | 736 | elif fromCache: cached += 1 |
1035 | else: parsed += 1 | 737 | else: parsed += 1 |
1036 | deps = None | 738 | deps = None |
1037 | 739 | ||
740 | # Disabled by RP as was no longer functional | ||
1038 | # allow metadata files to add items to BBFILES | 741 | # allow metadata files to add items to BBFILES |
1039 | #data.update_data(self.pkgdata[f]) | 742 | #data.update_data(self.pkgdata[f]) |
1040 | addbbfiles = self.bb_cache.getVar('BBFILES', f, False) or None | 743 | #addbbfiles = self.bb_cache.getVar('BBFILES', f, False) or None |
1041 | if addbbfiles: | 744 | #if addbbfiles: |
1042 | for aof in addbbfiles.split(): | 745 | # for aof in addbbfiles.split(): |
1043 | if not files.count(aof): | 746 | # if not files.count(aof): |
1044 | if not os.path.isabs(aof): | 747 | # if not os.path.isabs(aof): |
1045 | aof = os.path.join(os.path.dirname(f),aof) | 748 | # aof = os.path.join(os.path.dirname(f),aof) |
1046 | files.append(aof) | 749 | # files.append(aof) |
750 | |||
751 | self.bb_cache.handle_data(f, self.status) | ||
1047 | 752 | ||
1048 | # now inform the caller | 753 | # now inform the caller |
1049 | if self.cb is not None: | 754 | if progressCallback is not None: |
1050 | self.cb( i + 1, len( newfiles ), f, self.bb_cache, fromCache ) | 755 | progressCallback( i + 1, len( filelist ), f, fromCache ) |
1051 | 756 | ||
1052 | except IOError, e: | 757 | except IOError, e: |
1053 | self.bb_cache.remove(f) | 758 | self.bb_cache.remove(f) |
1054 | bb.error("opening %s: %s" % (f, e)) | 759 | bb.msg.error(bb.msg.domain.Collection, "opening %s: %s" % (f, e)) |
1055 | pass | 760 | pass |
1056 | except KeyboardInterrupt: | 761 | except KeyboardInterrupt: |
1057 | self.bb_cache.sync() | 762 | self.bb_cache.sync() |
1058 | raise | 763 | raise |
1059 | except Exception, e: | 764 | except Exception, e: |
1060 | self.bb_cache.remove(f) | 765 | self.bb_cache.remove(f) |
1061 | bb.error("%s while parsing %s" % (e, f)) | 766 | bb.msg.error(bb.msg.domain.Collection, "%s while parsing %s" % (e, f)) |
1062 | except: | 767 | except: |
1063 | self.bb_cache.remove(f) | 768 | self.bb_cache.remove(f) |
1064 | raise | 769 | raise |
1065 | 770 | ||
1066 | if self.cb is not None: | 771 | if progressCallback is not None: |
1067 | print "\rNOTE: Parsing finished. %d cached, %d parsed, %d skipped, %d masked." % ( cached, parsed, skipped, masked ), | 772 | print "\r" # need newline after Handling Bitbake files message |
773 | bb.msg.note(1, bb.msg.domain.Collection, "Parsing finished. %d cached, %d parsed, %d skipped, %d masked." % ( cached, parsed, skipped, masked )) | ||
1068 | 774 | ||
1069 | self.bb_cache.sync() | 775 | self.bb_cache.sync() |
1070 | 776 | ||
@@ -1090,11 +796,11 @@ Default BBFILES are the .bb files in the current directory.""" ) | |||
1090 | parser.add_option( "-f", "--force", help = "force run of specified cmd, regardless of stamp status", | 796 | parser.add_option( "-f", "--force", help = "force run of specified cmd, regardless of stamp status", |
1091 | action = "store_true", dest = "force", default = False ) | 797 | action = "store_true", dest = "force", default = False ) |
1092 | 798 | ||
1093 | parser.add_option( "-i", "--interactive", help = "drop into the interactive mode.", | 799 | parser.add_option( "-i", "--interactive", help = "drop into the interactive mode also called the BitBake shell.", |
1094 | action = "store_true", dest = "interactive", default = False ) | 800 | action = "store_true", dest = "interactive", default = False ) |
1095 | 801 | ||
1096 | parser.add_option( "-c", "--cmd", help = "Specify task to execute. Note that this only executes the specified task for the providee and the packages it depends on, i.e. 'compile' does not implicitly call stage for the dependencies (IOW: use only if you know what you are doing)", | 802 | parser.add_option( "-c", "--cmd", help = "Specify task to execute. Note that this only executes the specified task for the providee and the packages it depends on, i.e. 'compile' does not implicitly call stage for the dependencies (IOW: use only if you know what you are doing). Depending on the base.bbclass a listtaks tasks is defined and will show available tasks", |
1097 | action = "store", dest = "cmd", default = "build" ) | 803 | action = "store", dest = "cmd" ) |
1098 | 804 | ||
1099 | parser.add_option( "-r", "--read", help = "read the specified file before bitbake.conf", | 805 | parser.add_option( "-r", "--read", help = "read the specified file before bitbake.conf", |
1100 | action = "append", dest = "file", default = [] ) | 806 | action = "append", dest = "file", default = [] ) |
@@ -1102,7 +808,7 @@ Default BBFILES are the .bb files in the current directory.""" ) | |||
1102 | parser.add_option( "-v", "--verbose", help = "output more chit-chat to the terminal", | 808 | parser.add_option( "-v", "--verbose", help = "output more chit-chat to the terminal", |
1103 | action = "store_true", dest = "verbose", default = False ) | 809 | action = "store_true", dest = "verbose", default = False ) |
1104 | 810 | ||
1105 | parser.add_option( "-D", "--debug", help = "Increase the debug level", | 811 | parser.add_option( "-D", "--debug", help = "Increase the debug level. You can specify this more than once.", |
1106 | action = "count", dest="debug", default = 0) | 812 | action = "count", dest="debug", default = 0) |
1107 | 813 | ||
1108 | parser.add_option( "-n", "--dry-run", help = "don't execute, just go through the motions", | 814 | parser.add_option( "-n", "--dry-run", help = "don't execute, just go through the motions", |
@@ -1120,6 +826,16 @@ Default BBFILES are the .bb files in the current directory.""" ) | |||
1120 | parser.add_option( "-e", "--environment", help = "show the global or per-package environment (this is what used to be bbread)", | 826 | parser.add_option( "-e", "--environment", help = "show the global or per-package environment (this is what used to be bbread)", |
1121 | action = "store_true", dest = "show_environment", default = False ) | 827 | action = "store_true", dest = "show_environment", default = False ) |
1122 | 828 | ||
829 | parser.add_option( "-g", "--graphviz", help = "emit the dependency trees of the specified packages in the dot syntax", | ||
830 | action = "store_true", dest = "dot_graph", default = False ) | ||
831 | |||
832 | parser.add_option( "-I", "--ignore-deps", help = """Stop processing at the given list of dependencies when generating dependency graphs. This can help to make the graph more appealing""", | ||
833 | action = "append", dest = "ignored_dot_deps", default = [] ) | ||
834 | |||
835 | parser.add_option( "-l", "--log-domains", help = """Show debug logging for the specified logging domains""", | ||
836 | action = "append", dest = "debug_domains", default = [] ) | ||
837 | |||
838 | |||
1123 | options, args = parser.parse_args( sys.argv ) | 839 | options, args = parser.parse_args( sys.argv ) |
1124 | 840 | ||
1125 | cooker = BBCooker() | 841 | cooker = BBCooker() |
@@ -1129,3 +845,9 @@ Default BBFILES are the .bb files in the current directory.""" ) | |||
1129 | 845 | ||
1130 | if __name__ == "__main__": | 846 | if __name__ == "__main__": |
1131 | main() | 847 | main() |
848 | sys.exit(0) | ||
849 | import profile | ||
850 | profile.run('main()', "profile.log") | ||
851 | import pstats | ||
852 | p = pstats.Stats('profile.log') | ||
853 | p.print_stats() | ||
diff --git a/bitbake/bin/bitdoc b/bitbake/bin/bitdoc index 84d2ee23ce..e865e1b998 100755 --- a/bitbake/bin/bitdoc +++ b/bitbake/bin/bitdoc | |||
@@ -442,7 +442,7 @@ Create a set of html pages (documentation) for a bitbake.conf.... | |||
442 | options, args = parser.parse_args( sys.argv ) | 442 | options, args = parser.parse_args( sys.argv ) |
443 | 443 | ||
444 | if options.debug: | 444 | if options.debug: |
445 | bb.debug_level = options.debug | 445 | bb.msg.set_debug_level(options.debug) |
446 | 446 | ||
447 | return options.config, options.output | 447 | return options.config, options.output |
448 | 448 | ||