diff options
| author | Richard Purdie <richard@openedhand.com> | 2006-11-29 22:52:37 +0000 |
|---|---|---|
| committer | Richard Purdie <richard@openedhand.com> | 2006-11-29 22:52:37 +0000 |
| commit | 681d6c18ad59dac9e53f769a568835241d7fa9b7 (patch) | |
| tree | 243418a546b89650d28580f7721b8324586146e4 | |
| parent | adabf6c0931af1282a7c75321cd8b050e8d05c95 (diff) | |
| download | poky-681d6c18ad59dac9e53f769a568835241d7fa9b7.tar.gz | |
bitbake: Sync with bitbake trunk for bugfixes and improved dot file generation code
git-svn-id: https://svn.o-hand.com/repos/poky/trunk@987 311d38ba-8fff-0310-9ca6-ca027cbcb966
| -rwxr-xr-x | bitbake/bin/bitbake | 223 | ||||
| -rw-r--r-- | bitbake/lib/bb/data.py | 4 | ||||
| -rw-r--r-- | bitbake/lib/bb/methodpool.py | 1 | ||||
| -rw-r--r-- | bitbake/lib/bb/parse/parse_py/BBHandler.py | 77 | ||||
| -rw-r--r-- | bitbake/lib/bb/runqueue.py | 72 | ||||
| -rw-r--r-- | bitbake/lib/bb/shell.py | 5 |
6 files changed, 155 insertions, 227 deletions
diff --git a/bitbake/bin/bitbake b/bitbake/bin/bitbake index 85a0cbc398..36322d2a0e 100755 --- a/bitbake/bin/bitbake +++ b/bitbake/bin/bitbake | |||
| @@ -186,171 +186,82 @@ class BBCooker: | |||
| 186 | 186 | ||
| 187 | def generateDotGraph( self, pkgs_to_build, ignore_deps ): | 187 | def generateDotGraph( self, pkgs_to_build, ignore_deps ): |
| 188 | """ | 188 | """ |
| 189 | Generate two graphs one for the DEPENDS and RDEPENDS. The current | 189 | Generate a task dependency graph. |
| 190 | implementation creates crappy graphs ;) | ||
| 191 | 190 | ||
| 192 | pkgs_to_build A list of packages that needs to be built | 191 | pkgs_to_build A list of packages that needs to be built |
| 193 | ignore_deps A list of names where processing of dependencies | 192 | ignore_deps A list of names where processing of dependencies |
| 194 | should be stopped. e.g. dependencies that get | 193 | should be stopped. e.g. dependencies that get |
| 195 | """ | 194 | """ |
| 196 | 195 | ||
| 197 | def myFilterProvider( providers, item): | 196 | for dep in ignore_deps: |
| 198 | """ | 197 | self.status.ignored_dependencies.add(dep) |
| 199 | Take a list of providers and filter according to environment | ||
| 200 | variables. In contrast to filterProviders we do not discriminate | ||
| 201 | and take PREFERRED_PROVIDER into account. | ||
| 202 | """ | ||
| 203 | eligible = [] | ||
| 204 | preferred_versions = {} | ||
| 205 | |||
| 206 | # Collate providers by PN | ||
| 207 | pkg_pn = {} | ||
| 208 | for p in providers: | ||
| 209 | pn = self.status.pkg_fn[p] | ||
| 210 | if pn not in pkg_pn: | ||
| 211 | pkg_pn[pn] = [] | ||
| 212 | pkg_pn[pn].append(p) | ||
| 213 | |||
| 214 | bb.msg.debug(1, bb.msg.domain.Provider, "providers for %s are: %s" % (item, pkg_pn.keys())) | ||
| 215 | |||
| 216 | for pn in pkg_pn.keys(): | ||
| 217 | preferred_versions[pn] = bb.providers.findBestProvider(pn, self.configuration.data, self.status, pkg_pn)[2:4] | ||
| 218 | eligible.append(preferred_versions[pn][1]) | ||
| 219 | |||
| 220 | for p in eligible: | ||
| 221 | if p in self.build_cache_fail: | ||
| 222 | bb.msg.debug(1, bb.msg.domain.Provider, "rejecting already-failed %s" % p) | ||
| 223 | eligible.remove(p) | ||
| 224 | |||
| 225 | if len(eligible) == 0: | ||
| 226 | bb.msg.error(bb.msg.domain.Provider, "no eligible providers for %s" % item) | ||
| 227 | return 0 | ||
| 228 | |||
| 229 | prefervar = bb.data.getVar('PREFERRED_PROVIDER_%s' % item, self.configuration.data, 1) | ||
| 230 | |||
| 231 | # try the preferred provider first | ||
| 232 | if prefervar: | ||
| 233 | for p in eligible: | ||
| 234 | if prefervar == self.status.pkg_fn[p]: | ||
| 235 | bb.msg.note(1, bb.msg.domain.Provider, "Selecting PREFERRED_PROVIDER %s" % prefervar) | ||
| 236 | eligible.remove(p) | ||
| 237 | eligible = [p] + eligible | ||
| 238 | |||
| 239 | return eligible | ||
| 240 | |||
| 241 | |||
| 242 | # try to avoid adding the same rdepends over an over again | ||
| 243 | seen_depends = [] | ||
| 244 | seen_rdepends = [] | ||
| 245 | |||
| 246 | |||
| 247 | def add_depends(package_list): | ||
| 248 | """ | ||
| 249 | Add all depends of all packages from this list | ||
| 250 | """ | ||
| 251 | for package in package_list: | ||
| 252 | if package in seen_depends or package in ignore_deps: | ||
| 253 | continue | ||
| 254 | |||
| 255 | seen_depends.append( package ) | ||
| 256 | if not package in self.status.providers: | ||
| 257 | """ | ||
| 258 | We have not seen this name -> error in | ||
| 259 | dependency handling | ||
| 260 | """ | ||
| 261 | bb.msg.note(1, bb.msg.domain.Depends, "ERROR with provider: %(package)s" % vars() ) | ||
| 262 | print >> depends_file, '"%(package)s" -> ERROR' % vars() | ||
| 263 | continue | ||
| 264 | |||
| 265 | # get all providers for this package | ||
| 266 | providers = self.status.providers[package] | ||
| 267 | |||
| 268 | # now let us find the bestProvider for it | ||
| 269 | fn = myFilterProvider(providers, package)[0] | ||
| 270 | |||
| 271 | depends = bb.utils.explode_deps(self.bb_cache.getVar('DEPENDS', fn, True) or "") | ||
| 272 | version = self.bb_cache.getVar('PV', fn, True ) + '-' + self.bb_cache.getVar('PR', fn, True) | ||
| 273 | add_depends ( depends ) | ||
| 274 | |||
| 275 | # now create the node | ||
| 276 | print >> depends_file, '"%(package)s" [label="%(package)s\\n%(version)s"]' % vars() | ||
| 277 | |||
| 278 | depends = filter( (lambda x: x not in ignore_deps), depends ) | ||
| 279 | for depend in depends: | ||
| 280 | print >> depends_file, '"%(package)s" -> "%(depend)s"' % vars() | ||
| 281 | |||
| 282 | |||
| 283 | def add_all_depends( the_depends, the_rdepends ): | ||
| 284 | """ | ||
| 285 | Add both DEPENDS and RDEPENDS. RDEPENDS will get dashed | ||
| 286 | lines | ||
| 287 | """ | ||
| 288 | package_list = the_depends + the_rdepends | ||
| 289 | for package in package_list: | ||
| 290 | if package in seen_rdepends or package in ignore_deps: | ||
| 291 | continue | ||
| 292 | |||
| 293 | seen_rdepends.append( package ) | ||
| 294 | |||
| 295 | # Let us find out if the package is a DEPENDS or RDEPENDS | ||
| 296 | # and we will set 'providers' with the avilable providers | ||
| 297 | # for the package. | ||
| 298 | if package in the_depends: | ||
| 299 | if not package in self.status.providers: | ||
| 300 | bb.msg.note(1, bb.msg.domain.Depends, "ERROR with provider: %(package)s" % vars() ) | ||
| 301 | print >> alldepends_file, '"%(package)s" -> ERROR' % vars() | ||
| 302 | continue | ||
| 303 | |||
| 304 | providers = self.status.providers[package] | ||
| 305 | elif package in the_rdepends: | ||
| 306 | if len(bb.providers.getRuntimeProviders(self.status, package)) == 0: | ||
| 307 | bb.msg.note(1, bb.msg.domain.Depends, "ERROR with rprovider: %(package)s" % vars() ) | ||
| 308 | print >> alldepends_file, '"%(package)s" -> ERROR [style="dashed"]' % vars() | ||
| 309 | continue | ||
| 310 | |||
| 311 | providers = bb.providers.getRuntimeProviders(self.status, package) | ||
| 312 | else: | ||
| 313 | # something went wrong... | ||
| 314 | print "Complete ERROR! %s" % package | ||
| 315 | continue | ||
| 316 | |||
| 317 | # now let us find the bestProvider for it | ||
| 318 | fn = myFilterProvider(providers, package)[0] | ||
| 319 | |||
| 320 | # Now we have a filename let us get the depends and RDEPENDS of it | ||
| 321 | depends = bb.utils.explode_deps(self.bb_cache.getVar('DEPENDS', fn, True) or "") | ||
| 322 | if fn in self.status.rundeps and package in self.status.rundeps[fn]: | ||
| 323 | rdepends= self.status.rundeps[fn][package].keys() | ||
| 324 | else: | ||
| 325 | rdepends = [] | ||
| 326 | version = self.bb_cache.getVar('PV', fn, True ) + '-' + self.bb_cache.getVar('PR', fn, True) | ||
| 327 | 198 | ||
| 328 | # handle all the depends and rdepends of package | 199 | localdata = data.createCopy(self.configuration.data) |
| 329 | add_all_depends ( depends, rdepends ) | 200 | bb.data.update_data(localdata) |
| 330 | 201 | bb.data.expandKeys(localdata) | |
| 331 | # now create the node using package name | 202 | taskdata = bb.taskdata.TaskData(self.configuration.abort) |
| 332 | print >> alldepends_file, '"%(package)s" [label="%(package)s\\n%(version)s"]' % vars() | ||
| 333 | |||
| 334 | # remove the stuff we want to ignore and add the edges | ||
| 335 | depends = filter( (lambda x: x not in ignore_deps), depends ) | ||
| 336 | rdepends = filter( (lambda x: x not in ignore_deps), rdepends ) | ||
| 337 | for depend in depends: | ||
| 338 | print >> alldepends_file, '"%(package)s" -> "%(depend)s"' % vars() | ||
| 339 | for depend in rdepends: | ||
| 340 | print >> alldepends_file, '"%(package)s" -> "%(depend)s" [style=dashed]' % vars() | ||
| 341 | 203 | ||
| 204 | runlist = [] | ||
| 205 | try: | ||
| 206 | for k in pkgs_to_build: | ||
| 207 | taskdata.add_provider(localdata, self.status, k) | ||
| 208 | runlist.append([k, "do_%s" % self.configuration.cmd]) | ||
| 209 | taskdata.add_unresolved(localdata, self.status) | ||
| 210 | except bb.providers.NoProvider: | ||
| 211 | sys.exit(1) | ||
| 212 | rq = bb.runqueue.RunQueue() | ||
| 213 | rq.prepare_runqueue(self.configuration.data, self.status, taskdata, runlist) | ||
| 342 | 214 | ||
| 343 | # Add depends now | 215 | seen_fnids = [] |
| 344 | depends_file = file('depends.dot', 'w' ) | 216 | depends_file = file('depends.dot', 'w' ) |
| 217 | tdepends_file = file('task-depends.dot', 'w' ) | ||
| 345 | print >> depends_file, "digraph depends {" | 218 | print >> depends_file, "digraph depends {" |
| 346 | add_depends( pkgs_to_build ) | 219 | print >> tdepends_file, "digraph depends {" |
| 220 | rq.prio_map.reverse() | ||
| 221 | for task1 in range(len(rq.runq_fnid)): | ||
| 222 | task = rq.prio_map[task1] | ||
| 223 | taskname = rq.runq_task[task] | ||
| 224 | fnid = rq.runq_fnid[task] | ||
| 225 | fn = taskdata.fn_index[fnid] | ||
| 226 | pn = self.status.pkg_fn[fn] | ||
| 227 | version = self.bb_cache.getVar('PV', fn, True ) + '-' + self.bb_cache.getVar('PR', fn, True) | ||
| 228 | print >> tdepends_file, '"%s.%s" [label="%s %s\\n%s\\n%s"]' % (pn, taskname, pn, taskname, version, fn) | ||
| 229 | for dep in rq.runq_depends[task]: | ||
| 230 | depfn = taskdata.fn_index[rq.runq_fnid[dep]] | ||
| 231 | deppn = self.status.pkg_fn[depfn] | ||
| 232 | print >> tdepends_file, '"%s.%s" -> "%s.%s"' % (pn, rq.runq_task[task], deppn, rq.runq_task[dep]) | ||
| 233 | if fnid not in seen_fnids: | ||
| 234 | seen_fnids.append(fnid) | ||
| 235 | packages = [] | ||
| 236 | print >> depends_file, '"%s" [label="%s %s\\n%s"]' % (pn, pn, version, fn) | ||
| 237 | for depend in self.status.deps[fn]: | ||
| 238 | print >> depends_file, '"%s" -> "%s"' % (pn, depend) | ||
| 239 | rdepends = self.status.rundeps[fn] | ||
| 240 | for package in rdepends: | ||
| 241 | for rdepend in rdepends[package]: | ||
| 242 | print >> depends_file, '"%s" -> "%s" [style=dashed]' % (package, rdepend) | ||
| 243 | packages.append(package) | ||
| 244 | rrecs = self.status.runrecs[fn] | ||
| 245 | for package in rrecs: | ||
| 246 | for rdepend in rrecs[package]: | ||
| 247 | print >> depends_file, '"%s" -> "%s" [style=dashed]' % (package, rdepend) | ||
| 248 | if not package in packages: | ||
| 249 | packages.append(package) | ||
| 250 | for package in packages: | ||
| 251 | if package != pn: | ||
| 252 | print >> depends_file, '"%s" [label="%s(%s) %s\\n%s"]' % (package, package, pn, version, fn) | ||
| 253 | for depend in self.status.deps[fn]: | ||
| 254 | print >> depends_file, '"%s" -> "%s"' % (package, depend) | ||
| 255 | # Prints a flattened form of the above where subpackages of a package are merged into the main pn | ||
| 256 | #print >> depends_file, '"%s" [label="%s %s\\n%s\\n%s"]' % (pn, pn, taskname, version, fn) | ||
| 257 | #for rdep in taskdata.rdepids[fnid]: | ||
| 258 | # print >> depends_file, '"%s" -> "%s" [style=dashed]' % (pn, taskdata.run_names_index[rdep]) | ||
| 259 | #for dep in taskdata.depids[fnid]: | ||
| 260 | # print >> depends_file, '"%s" -> "%s"' % (pn, taskdata.build_names_index[dep]) | ||
| 347 | print >> depends_file, "}" | 261 | print >> depends_file, "}" |
| 348 | 262 | print >> tdepends_file, "}" | |
| 349 | # Add all depends now | 263 | bb.msg.note(1, bb.msg.domain.Collection, "Dependencies saved to 'depends.dot'") |
| 350 | alldepends_file = file('alldepends.dot', 'w' ) | 264 | bb.msg.note(1, bb.msg.domain.Collection, "Task dependencies saved to 'task-depends.dot'") |
| 351 | print >> alldepends_file, "digraph alldepends {" | ||
| 352 | add_all_depends( pkgs_to_build, [] ) | ||
| 353 | print >> alldepends_file, "}" | ||
| 354 | 265 | ||
| 355 | def buildDepgraph( self ): | 266 | def buildDepgraph( self ): |
| 356 | all_depends = self.status.all_depends | 267 | all_depends = self.status.all_depends |
| @@ -643,10 +554,10 @@ class BBCooker: | |||
| 643 | rq.prepare_runqueue(self.configuration.data, self.status, taskdata, runlist) | 554 | rq.prepare_runqueue(self.configuration.data, self.status, taskdata, runlist) |
| 644 | try: | 555 | try: |
| 645 | failures = rq.execute_runqueue(self, self.configuration.data, self.status, taskdata, runlist) | 556 | failures = rq.execute_runqueue(self, self.configuration.data, self.status, taskdata, runlist) |
| 646 | except runqueue.TaskFailure, (fnid, fn, taskname): | 557 | except runqueue.TaskFailure, fnids: |
| 647 | bb.msg.error(bb.msg.domain.Build, "'%s, %s' failed" % (fn, taskname)) | 558 | for fnid in fnids: |
| 559 | bb.msg.error(bb.msg.domain.Build, "'%s' failed" % taskdata.fn_index[fnid]) | ||
| 648 | sys.exit(1) | 560 | sys.exit(1) |
| 649 | |||
| 650 | bb.event.fire(bb.event.BuildCompleted(buildname, pkgs_to_build, self.configuration.event_data, failures)) | 561 | bb.event.fire(bb.event.BuildCompleted(buildname, pkgs_to_build, self.configuration.event_data, failures)) |
| 651 | 562 | ||
| 652 | sys.exit( self.stats.show() ) | 563 | sys.exit( self.stats.show() ) |
diff --git a/bitbake/lib/bb/data.py b/bitbake/lib/bb/data.py index 19066c9adc..9f7e4be4c8 100644 --- a/bitbake/lib/bb/data.py +++ b/bitbake/lib/bb/data.py | |||
| @@ -542,8 +542,8 @@ def update_data(d): | |||
| 542 | 542 | ||
| 543 | 543 | ||
| 544 | def inherits_class(klass, d): | 544 | def inherits_class(klass, d): |
| 545 | val = getVar('__inherit_cache', d) or "" | 545 | val = getVar('__inherit_cache', d) or [] |
| 546 | if os.path.join('classes', '%s.bbclass' % klass) in val.split(): | 546 | if os.path.join('classes', '%s.bbclass' % klass) in val: |
| 547 | return True | 547 | return True |
| 548 | return False | 548 | return False |
| 549 | 549 | ||
diff --git a/bitbake/lib/bb/methodpool.py b/bitbake/lib/bb/methodpool.py index e14986bc19..f0565ce790 100644 --- a/bitbake/lib/bb/methodpool.py +++ b/bitbake/lib/bb/methodpool.py | |||
| @@ -83,6 +83,7 @@ def check_insert_method(modulename, code, fn): | |||
| 83 | """ | 83 | """ |
| 84 | if not modulename in _parsed_methods: | 84 | if not modulename in _parsed_methods: |
| 85 | return insert_method(modulename, code, fn) | 85 | return insert_method(modulename, code, fn) |
| 86 | _parsed_methods[modulename] = 1 | ||
| 86 | 87 | ||
| 87 | def parsed_module(modulename): | 88 | def parsed_module(modulename): |
| 88 | """ | 89 | """ |
diff --git a/bitbake/lib/bb/parse/parse_py/BBHandler.py b/bitbake/lib/bb/parse/parse_py/BBHandler.py index 34f4d25996..42b0369428 100644 --- a/bitbake/lib/bb/parse/parse_py/BBHandler.py +++ b/bitbake/lib/bb/parse/parse_py/BBHandler.py | |||
| @@ -40,7 +40,6 @@ __word__ = re.compile(r"\S+") | |||
| 40 | __infunc__ = "" | 40 | __infunc__ = "" |
| 41 | __inpython__ = False | 41 | __inpython__ = False |
| 42 | __body__ = [] | 42 | __body__ = [] |
| 43 | __bbpath_found__ = 0 | ||
| 44 | __classname__ = "" | 43 | __classname__ = "" |
| 45 | classes = [ None, ] | 44 | classes = [ None, ] |
| 46 | 45 | ||
| @@ -58,25 +57,24 @@ def supports(fn, d): | |||
| 58 | return localfn[-3:] == ".bb" or localfn[-8:] == ".bbclass" or localfn[-4:] == ".inc" | 57 | return localfn[-3:] == ".bb" or localfn[-8:] == ".bbclass" or localfn[-4:] == ".inc" |
| 59 | 58 | ||
| 60 | def inherit(files, d): | 59 | def inherit(files, d): |
| 61 | __inherit_cache = data.getVar('__inherit_cache', d) or "" | 60 | __inherit_cache = data.getVar('__inherit_cache', d) or [] |
| 62 | fn = "" | 61 | fn = "" |
| 63 | lineno = 0 | 62 | lineno = 0 |
| 64 | for f in files: | 63 | files = data.expand(files, d) |
| 65 | file = data.expand(f, d) | 64 | for file in files: |
| 66 | if file[0] != "/" and file[-8:] != ".bbclass": | 65 | if file[0] != "/" and file[-8:] != ".bbclass": |
| 67 | file = os.path.join('classes', '%s.bbclass' % file) | 66 | file = os.path.join('classes', '%s.bbclass' % file) |
| 68 | 67 | ||
| 69 | if not file in __inherit_cache.split(): | 68 | if not file in __inherit_cache: |
| 70 | bb.msg.debug(2, bb.msg.domain.Parsing, "BB %s:%d: inheriting %s" % (fn, lineno, file)) | 69 | bb.msg.debug(2, bb.msg.domain.Parsing, "BB %s:%d: inheriting %s" % (fn, lineno, file)) |
| 71 | __inherit_cache += " %s" % file | 70 | __inherit_cache.append( file ) |
| 72 | include(fn, file, d, "inherit") | 71 | include(fn, file, d, "inherit") |
| 73 | data.setVar('__inherit_cache', __inherit_cache, d) | 72 | data.setVar('__inherit_cache', __inherit_cache, d) |
| 74 | 73 | ||
| 75 | 74 | ||
| 76 | def handle(fn, d, include = 0): | 75 | def handle(fn, d, include = 0): |
| 77 | global __func_start_regexp__, __inherit_regexp__, __export_func_regexp__, __addtask_regexp__, __addhandler_regexp__, __infunc__, __body__, __bbpath_found__, __residue__ | 76 | global __func_start_regexp__, __inherit_regexp__, __export_func_regexp__, __addtask_regexp__, __addhandler_regexp__, __infunc__, __body__, __residue__ |
| 78 | __body__ = [] | 77 | __body__ = [] |
| 79 | __bbpath_found__ = 0 | ||
| 80 | __infunc__ = "" | 78 | __infunc__ = "" |
| 81 | __classname__ = "" | 79 | __classname__ = "" |
| 82 | __residue__ = [] | 80 | __residue__ = [] |
| @@ -104,7 +102,6 @@ def handle(fn, d, include = 0): | |||
| 104 | if not os.path.isabs(fn): | 102 | if not os.path.isabs(fn): |
| 105 | f = None | 103 | f = None |
| 106 | for p in bbpath: | 104 | for p in bbpath: |
| 107 | p = data.expand(p, d) | ||
| 108 | j = os.path.join(p, fn) | 105 | j = os.path.join(p, fn) |
| 109 | if os.access(j, os.R_OK): | 106 | if os.access(j, os.R_OK): |
| 110 | abs_fn = j | 107 | abs_fn = j |
| @@ -147,39 +144,35 @@ def handle(fn, d, include = 0): | |||
| 147 | data.expandKeys(d) | 144 | data.expandKeys(d) |
| 148 | data.update_data(d) | 145 | data.update_data(d) |
| 149 | anonqueue = data.getVar("__anonqueue", d, 1) or [] | 146 | anonqueue = data.getVar("__anonqueue", d, 1) or [] |
| 150 | for anon in anonqueue: | 147 | body = [x['content'] for x in anonqueue] |
| 151 | data.setVar("__anonfunc", anon["content"], d) | 148 | flag = { 'python' : 1, 'func' : 1 } |
| 152 | data.setVarFlags("__anonfunc", anon["flags"], d) | 149 | data.setVar("__anonfunc", "\n".join(body), d) |
| 153 | from bb import build | 150 | data.setVarFlags("__anonfunc", flag, d) |
| 154 | try: | 151 | from bb import build |
| 155 | t = data.getVar('T', d) | 152 | try: |
| 156 | data.setVar('T', '${TMPDIR}/', d) | 153 | t = data.getVar('T', d) |
| 157 | build.exec_func("__anonfunc", d) | 154 | data.setVar('T', '${TMPDIR}/', d) |
| 158 | data.delVar('T', d) | 155 | build.exec_func("__anonfunc", d) |
| 159 | if t: | 156 | data.delVar('T', d) |
| 160 | data.setVar('T', t, d) | 157 | if t: |
| 161 | except Exception, e: | 158 | data.setVar('T', t, d) |
| 162 | bb.msg.debug(1, bb.msg.domain.Parsing, "executing anonymous function: %s" % e) | 159 | except Exception, e: |
| 163 | raise | 160 | bb.msg.debug(1, bb.msg.domain.Parsing, "executing anonymous function: %s" % e) |
| 161 | raise | ||
| 164 | data.delVar("__anonqueue", d) | 162 | data.delVar("__anonqueue", d) |
| 165 | data.delVar("__anonfunc", d) | 163 | data.delVar("__anonfunc", d) |
| 166 | set_additional_vars(fn, d, include) | 164 | set_additional_vars(fn, d, include) |
| 167 | data.update_data(d) | 165 | data.update_data(d) |
| 168 | 166 | ||
| 169 | all_handlers = {} | 167 | all_handlers = {} |
| 170 | for var in data.keys(d): | 168 | for var in data.getVar('__BBHANDLERS', d) or []: |
| 171 | # try to add the handler | 169 | # try to add the handler |
| 172 | # if we added it remember the choiche | 170 | # if we added it remember the choiche |
| 173 | if data.getVarFlag(var, 'handler', d): | 171 | handler = data.getVar(var,d) |
| 174 | handler = data.getVar(var,d) | 172 | if bb.event.register(var,handler) == bb.event.Registered: |
| 175 | if bb.event.register(var,handler) == bb.event.Registered: | 173 | all_handlers[var] = handler |
| 176 | all_handlers[var] = handler | ||
| 177 | |||
| 178 | continue | ||
| 179 | |||
| 180 | if not data.getVarFlag(var, 'task', d): | ||
| 181 | continue | ||
| 182 | 174 | ||
| 175 | for var in data.getVar('__BBTASKS', d) or []: | ||
| 183 | deps = data.getVarFlag(var, 'deps', d) or [] | 176 | deps = data.getVarFlag(var, 'deps', d) or [] |
| 184 | postdeps = data.getVarFlag(var, 'postdeps', d) or [] | 177 | postdeps = data.getVarFlag(var, 'postdeps', d) or [] |
| 185 | bb.build.add_task(var, deps, d) | 178 | bb.build.add_task(var, deps, d) |
| @@ -204,7 +197,7 @@ def handle(fn, d, include = 0): | |||
| 204 | return d | 197 | return d |
| 205 | 198 | ||
| 206 | def feeder(lineno, s, fn, root, d): | 199 | def feeder(lineno, s, fn, root, d): |
| 207 | global __func_start_regexp__, __inherit_regexp__, __export_func_regexp__, __addtask_regexp__, __addhandler_regexp__, __def_regexp__, __python_func_regexp__, __inpython__,__infunc__, __body__, __bbpath_found__, classes, bb, __residue__ | 200 | global __func_start_regexp__, __inherit_regexp__, __export_func_regexp__, __addtask_regexp__, __addhandler_regexp__, __def_regexp__, __python_func_regexp__, __inpython__,__infunc__, __body__, classes, bb, __residue__ |
| 208 | if __infunc__: | 201 | if __infunc__: |
| 209 | if s == '}': | 202 | if s == '}': |
| 210 | __body__.append('') | 203 | __body__.append('') |
| @@ -336,6 +329,10 @@ def feeder(lineno, s, fn, root, d): | |||
| 336 | 329 | ||
| 337 | data.setVarFlag(var, "task", 1, d) | 330 | data.setVarFlag(var, "task", 1, d) |
| 338 | 331 | ||
| 332 | bbtasks = data.getVar('__BBTASKS', d) or [] | ||
| 333 | bbtasks.append(var) | ||
| 334 | data.setVar('__BBTASKS', bbtasks, d) | ||
| 335 | |||
| 339 | if after is not None: | 336 | if after is not None: |
| 340 | # set up deps for function | 337 | # set up deps for function |
| 341 | data.setVarFlag(var, "deps", after.split(), d) | 338 | data.setVarFlag(var, "deps", after.split(), d) |
| @@ -348,8 +345,11 @@ def feeder(lineno, s, fn, root, d): | |||
| 348 | if m: | 345 | if m: |
| 349 | fns = m.group(1) | 346 | fns = m.group(1) |
| 350 | hs = __word__.findall(fns) | 347 | hs = __word__.findall(fns) |
| 348 | bbhands = data.getVar('__BBHANDLERS', d) or [] | ||
| 351 | for h in hs: | 349 | for h in hs: |
| 350 | bbhands.append(h) | ||
| 352 | data.setVarFlag(h, "handler", 1, d) | 351 | data.setVarFlag(h, "handler", 1, d) |
| 352 | data.setVar('__BBHANDLERS', bbhands, d) | ||
| 353 | return | 353 | return |
| 354 | 354 | ||
| 355 | m = __inherit_regexp__.match(s) | 355 | m = __inherit_regexp__.match(s) |
| @@ -386,16 +386,11 @@ def set_additional_vars(file, d, include): | |||
| 386 | 386 | ||
| 387 | bb.msg.debug(2, bb.msg.domain.Parsing, "BB %s: set_additional_vars" % file) | 387 | bb.msg.debug(2, bb.msg.domain.Parsing, "BB %s: set_additional_vars" % file) |
| 388 | 388 | ||
| 389 | src_uri = data.getVar('SRC_URI', d) | 389 | src_uri = data.getVar('SRC_URI', d, 1) |
| 390 | if not src_uri: | 390 | if not src_uri: |
| 391 | return | 391 | return |
| 392 | src_uri = data.expand(src_uri, d) | ||
| 393 | 392 | ||
| 394 | a = data.getVar('A', d) | 393 | a = (data.getVar('A', d, 1) or '').split() |
| 395 | if a: | ||
| 396 | a = data.expand(a, d).split() | ||
| 397 | else: | ||
| 398 | a = [] | ||
| 399 | 394 | ||
| 400 | from bb import fetch | 395 | from bb import fetch |
| 401 | try: | 396 | try: |
diff --git a/bitbake/lib/bb/runqueue.py b/bitbake/lib/bb/runqueue.py index 3dde9a9ffb..07821e23de 100644 --- a/bitbake/lib/bb/runqueue.py +++ b/bitbake/lib/bb/runqueue.py | |||
| @@ -1,4 +1,4 @@ | |||
| 1 | #!/usr/bin/env python | 1 | #!/usr/bin/env python |
| 2 | # ex:ts=4:sw=4:sts=4:et | 2 | # ex:ts=4:sw=4:sts=4:et |
| 3 | # -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- | 3 | # -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- |
| 4 | """ | 4 | """ |
| @@ -25,9 +25,8 @@ import bb, os, sys | |||
| 25 | 25 | ||
| 26 | class TaskFailure(Exception): | 26 | class TaskFailure(Exception): |
| 27 | """Exception raised when a task in a runqueue fails""" | 27 | """Exception raised when a task in a runqueue fails""" |
| 28 | 28 | def __init__(self, x): | |
| 29 | def __init__(self, fnid, fn, taskname): | 29 | self.args = x |
| 30 | self.args = fnid, fn, taskname | ||
| 31 | 30 | ||
| 32 | class RunQueue: | 31 | class RunQueue: |
| 33 | """ | 32 | """ |
| @@ -319,21 +318,23 @@ class RunQueue: | |||
| 319 | 318 | ||
| 320 | failures = 0 | 319 | failures = 0 |
| 321 | while 1: | 320 | while 1: |
| 322 | try: | 321 | failed_fnids = self.execute_runqueue_internal(cooker, cfgData, dataCache, taskData) |
| 323 | self.execute_runqueue_internal(cooker, cfgData, dataCache, taskData) | 322 | if len(failed_fnids) == 0: |
| 324 | return failures | 323 | return failures |
| 325 | except bb.runqueue.TaskFailure, (fnid, taskData.fn_index[fnid], taskname): | 324 | if taskData.abort: |
| 326 | if taskData.abort: | 325 | raise bb.runqueue.TaskFailure(failed_fnids) |
| 327 | raise | 326 | for fnid in failed_fnids: |
| 327 | #print "Failure: %s %s %s" % (fnid, taskData.fn_index[fnid], self.runq_task[fnid]) | ||
| 328 | taskData.fail_fnid(fnid) | 328 | taskData.fail_fnid(fnid) |
| 329 | self.reset_runqueue() | ||
| 330 | self.prepare_runqueue(cfgData, dataCache, taskData, runlist) | ||
| 331 | failures = failures + 1 | 329 | failures = failures + 1 |
| 330 | self.reset_runqueue() | ||
| 331 | self.prepare_runqueue(cfgData, dataCache, taskData, runlist) | ||
| 332 | 332 | ||
| 333 | def execute_runqueue_internal(self, cooker, cfgData, dataCache, taskData): | 333 | def execute_runqueue_internal(self, cooker, cfgData, dataCache, taskData): |
| 334 | """ | 334 | """ |
| 335 | Run the tasks in a queue prepared by prepare_runqueue | 335 | Run the tasks in a queue prepared by prepare_runqueue |
| 336 | """ | 336 | """ |
| 337 | import signal | ||
| 337 | 338 | ||
| 338 | bb.msg.note(1, bb.msg.domain.RunQueue, "Executing runqueue") | 339 | bb.msg.note(1, bb.msg.domain.RunQueue, "Executing runqueue") |
| 339 | 340 | ||
| @@ -342,11 +343,15 @@ class RunQueue: | |||
| 342 | runq_complete = [] | 343 | runq_complete = [] |
| 343 | active_builds = 0 | 344 | active_builds = 0 |
| 344 | build_pids = {} | 345 | build_pids = {} |
| 346 | failed_fnids = [] | ||
| 345 | 347 | ||
| 346 | if len(self.runq_fnid) == 0: | 348 | if len(self.runq_fnid) == 0: |
| 347 | # nothing to do | 349 | # nothing to do |
| 348 | return | 350 | return |
| 349 | 351 | ||
| 352 | def sigint_handler(signum, frame): | ||
| 353 | raise KeyboardInterrupt | ||
| 354 | |||
| 350 | def get_next_task(data): | 355 | def get_next_task(data): |
| 351 | """ | 356 | """ |
| 352 | Return the id of the highest priority task that is buildable | 357 | Return the id of the highest priority task that is buildable |
| @@ -414,6 +419,11 @@ class RunQueue: | |||
| 414 | except OSError, e: | 419 | except OSError, e: |
| 415 | bb.msg.fatal(bb.msg.domain.RunQueue, "fork failed: %d (%s)" % (e.errno, e.strerror)) | 420 | bb.msg.fatal(bb.msg.domain.RunQueue, "fork failed: %d (%s)" % (e.errno, e.strerror)) |
| 416 | if pid == 0: | 421 | if pid == 0: |
| 422 | # Bypass finally below | ||
| 423 | active_builds = 0 | ||
| 424 | # Stop Ctrl+C being sent to children | ||
| 425 | signal.signal(signal.SIGINT, signal.SIG_IGN) | ||
| 426 | sys.stdin = open('/dev/null', 'r') | ||
| 417 | cooker.configuration.cmd = taskname[3:] | 427 | cooker.configuration.cmd = taskname[3:] |
| 418 | try: | 428 | try: |
| 419 | cooker.tryBuild(fn, False) | 429 | cooker.tryBuild(fn, False) |
| @@ -434,26 +444,36 @@ class RunQueue: | |||
| 434 | active_builds = active_builds - 1 | 444 | active_builds = active_builds - 1 |
| 435 | task = build_pids[result[0]] | 445 | task = build_pids[result[0]] |
| 436 | if result[1] != 0: | 446 | if result[1] != 0: |
| 447 | del build_pids[result[0]] | ||
| 437 | bb.msg.error(bb.msg.domain.RunQueue, "Task %s (%s) failed" % (task, self.get_user_idstring(task, taskData))) | 448 | bb.msg.error(bb.msg.domain.RunQueue, "Task %s (%s) failed" % (task, self.get_user_idstring(task, taskData))) |
| 438 | raise bb.runqueue.TaskFailure(self.runq_fnid[task], taskData.fn_index[self.runq_fnid[task]], self.runq_task[task]) | 449 | failed_fnids.append(self.runq_fnid[task]) |
| 450 | break | ||
| 439 | task_complete(self, task) | 451 | task_complete(self, task) |
| 440 | del build_pids[result[0]] | 452 | del build_pids[result[0]] |
| 441 | continue | 453 | continue |
| 442 | break | 454 | break |
| 443 | except SystemExit: | 455 | finally: |
| 444 | raise | 456 | try: |
| 445 | except: | 457 | while active_builds > 0: |
| 446 | bb.msg.error(bb.msg.domain.RunQueue, "Exception received") | 458 | bb.msg.note(1, bb.msg.domain.RunQueue, "Waiting for %s active tasks to finish" % active_builds) |
| 447 | while active_builds > 0: | 459 | tasknum = 1 |
| 448 | bb.msg.note(1, bb.msg.domain.RunQueue, "Waiting for %s active tasks to finish" % active_builds) | 460 | for k, v in build_pids.iteritems(): |
| 449 | tasknum = 1 | 461 | bb.msg.note(1, bb.msg.domain.RunQueue, "%s: %s (%s)" % (tasknum, self.get_user_idstring(v, taskData), k)) |
| 462 | tasknum = tasknum + 1 | ||
| 463 | result = os.waitpid(-1, 0) | ||
| 464 | task = build_pids[result[0]] | ||
| 465 | if result[1] != 0: | ||
| 466 | bb.msg.error(bb.msg.domain.RunQueue, "Task %s (%s) failed" % (task, self.get_user_idstring(task, taskData))) | ||
| 467 | failed_fnids.append(self.runq_fnid[task]) | ||
| 468 | del build_pids[result[0]] | ||
| 469 | active_builds = active_builds - 1 | ||
| 470 | if len(failed_fnids) > 0: | ||
| 471 | return failed_fnids | ||
| 472 | except: | ||
| 473 | bb.msg.note(1, bb.msg.domain.RunQueue, "Sending SIGTERM to remaining %s tasks" % active_builds) | ||
| 450 | for k, v in build_pids.iteritems(): | 474 | for k, v in build_pids.iteritems(): |
| 451 | bb.msg.note(1, bb.msg.domain.RunQueue, "%s: %s (%s)" % (tasknum, self.get_user_idstring(v, taskData), k)) | 475 | os.kill(k, signal.SIGTERM) |
| 452 | tasknum = tasknum + 1 | 476 | raise |
| 453 | result = os.waitpid(-1, 0) | ||
| 454 | del build_pids[result[0]] | ||
| 455 | active_builds = active_builds - 1 | ||
| 456 | raise | ||
| 457 | 477 | ||
| 458 | # Sanity Checks | 478 | # Sanity Checks |
| 459 | for task in range(len(self.runq_fnid)): | 479 | for task in range(len(self.runq_fnid)): |
| @@ -464,7 +484,7 @@ class RunQueue: | |||
| 464 | if runq_complete[task] == 0: | 484 | if runq_complete[task] == 0: |
| 465 | bb.msg.error(bb.msg.domain.RunQueue, "Task %s never completed!" % task) | 485 | bb.msg.error(bb.msg.domain.RunQueue, "Task %s never completed!" % task) |
| 466 | 486 | ||
| 467 | return 0 | 487 | return failed_fnids |
| 468 | 488 | ||
| 469 | def dump_data(self, taskQueue): | 489 | def dump_data(self, taskQueue): |
| 470 | """ | 490 | """ |
diff --git a/bitbake/lib/bb/shell.py b/bitbake/lib/bb/shell.py index 760c371d90..711cd4335f 100644 --- a/bitbake/lib/bb/shell.py +++ b/bitbake/lib/bb/shell.py | |||
| @@ -179,8 +179,9 @@ class BitBakeShellCommands: | |||
| 179 | global last_exception | 179 | global last_exception |
| 180 | last_exception = Providers.NoProvider | 180 | last_exception = Providers.NoProvider |
| 181 | 181 | ||
| 182 | except runqueue.TaskFailure, (fnid, fn, taskname): | 182 | except runqueue.TaskFailure, fnids: |
| 183 | print "ERROR: '%s, %s' failed" % (fn, taskname) | 183 | for fnid in fnids: |
| 184 | print "ERROR: '%s' failed" % td.fn_index[fnid]) | ||
| 184 | global last_exception | 185 | global last_exception |
| 185 | last_exception = runqueue.TaskFailure | 186 | last_exception = runqueue.TaskFailure |
| 186 | 187 | ||
