diff options
Diffstat (limited to 'bitbake')
-rw-r--r-- | bitbake/ChangeLog | 5 | ||||
-rw-r--r-- | bitbake/lib/bb/cache.py | 78 | ||||
-rw-r--r-- | bitbake/lib/bb/cooker.py | 7 | ||||
-rw-r--r-- | bitbake/lib/bb/runqueue.py | 130 | ||||
-rw-r--r-- | bitbake/lib/bb/taskdata.py | 14 |
5 files changed, 152 insertions, 82 deletions
diff --git a/bitbake/ChangeLog b/bitbake/ChangeLog index d074a5e239..7bed88112e 100644 --- a/bitbake/ChangeLog +++ b/bitbake/ChangeLog | |||
@@ -1,4 +1,5 @@ | |||
1 | Changes in BitBake 1.8.x: | 1 | Changes in BitBake 1.8.x: |
2 | - Fix -f (force) in conjunction with -b | ||
2 | - Fix exit code for build failures in --continue mode | 3 | - Fix exit code for build failures in --continue mode |
3 | - Fix git branch tags fetching | 4 | - Fix git branch tags fetching |
4 | - Change parseConfigurationFile so it works on real data, not a copy | 5 | - Change parseConfigurationFile so it works on real data, not a copy |
@@ -21,6 +22,10 @@ Changes in BitBake 1.8.x: | |||
21 | the way bitbake schedules tasks | 22 | the way bitbake schedules tasks |
22 | - Add BB_STAMP_POLICY variable/option ("perfile" or "full") controlling | 23 | - Add BB_STAMP_POLICY variable/option ("perfile" or "full") controlling |
23 | how extensively stamps are looked at for validity | 24 | how extensively stamps are looked at for validity |
25 | - When handling build target failures make sure idepends are checked and | ||
26 | failed where needed. Fixes --continue mode crashes. | ||
27 | - Fix problems with recrdeptask handling where some idepends weren't handled | ||
28 | correctly. | ||
24 | 29 | ||
25 | Changes in BitBake 1.8.10: | 30 | Changes in BitBake 1.8.10: |
26 | - Psyco is available only for x86 - do not use it on other architectures. | 31 | - Psyco is available only for x86 - do not use it on other architectures. |
diff --git a/bitbake/lib/bb/cache.py b/bitbake/lib/bb/cache.py index 60e863d52d..9acd301f52 100644 --- a/bitbake/lib/bb/cache.py +++ b/bitbake/lib/bb/cache.py | |||
@@ -80,7 +80,7 @@ class Cache: | |||
80 | if old_mtime > newest_mtime: | 80 | if old_mtime > newest_mtime: |
81 | newest_mtime = old_mtime | 81 | newest_mtime = old_mtime |
82 | 82 | ||
83 | if self.mtime(self.cachefile) >= newest_mtime: | 83 | if bb.parse.cached_mtime_noerror(self.cachefile) >= newest_mtime: |
84 | try: | 84 | try: |
85 | p = pickle.Unpickler(file(self.cachefile, "rb")) | 85 | p = pickle.Unpickler(file(self.cachefile, "rb")) |
86 | self.depends_cache, version_data = p.load() | 86 | self.depends_cache, version_data = p.load() |
@@ -91,7 +91,7 @@ class Cache: | |||
91 | except EOFError: | 91 | except EOFError: |
92 | bb.msg.note(1, bb.msg.domain.Cache, "Truncated cache found, rebuilding...") | 92 | bb.msg.note(1, bb.msg.domain.Cache, "Truncated cache found, rebuilding...") |
93 | self.depends_cache = {} | 93 | self.depends_cache = {} |
94 | except (ValueError, KeyError): | 94 | except: |
95 | bb.msg.note(1, bb.msg.domain.Cache, "Invalid cache found, rebuilding...") | 95 | bb.msg.note(1, bb.msg.domain.Cache, "Invalid cache found, rebuilding...") |
96 | self.depends_cache = {} | 96 | self.depends_cache = {} |
97 | else: | 97 | else: |
@@ -199,31 +199,34 @@ class Cache: | |||
199 | self.remove(fn) | 199 | self.remove(fn) |
200 | return False | 200 | return False |
201 | 201 | ||
202 | mtime = bb.parse.cached_mtime_noerror(fn) | ||
203 | |||
202 | # Check file still exists | 204 | # Check file still exists |
203 | if self.mtime(fn) == 0: | 205 | if mtime == 0: |
204 | bb.msg.debug(2, bb.msg.domain.Cache, "Cache: %s not longer exists" % fn) | 206 | bb.msg.debug(2, bb.msg.domain.Cache, "Cache: %s not longer exists" % fn) |
205 | self.remove(fn) | 207 | self.remove(fn) |
206 | return False | 208 | return False |
207 | 209 | ||
208 | # Check the file's timestamp | 210 | # Check the file's timestamp |
209 | if bb.parse.cached_mtime(fn) > self.getVar("CACHETIMESTAMP", fn, True): | 211 | if mtime > self.getVar("CACHETIMESTAMP", fn, True): |
210 | bb.msg.debug(2, bb.msg.domain.Cache, "Cache: %s changed" % fn) | 212 | bb.msg.debug(2, bb.msg.domain.Cache, "Cache: %s changed" % fn) |
211 | self.remove(fn) | 213 | self.remove(fn) |
212 | return False | 214 | return False |
213 | 215 | ||
214 | # Check dependencies are still valid | 216 | # Check dependencies are still valid |
215 | depends = self.getVar("__depends", fn, True) | 217 | depends = self.getVar("__depends", fn, True) |
216 | for f,old_mtime in depends: | 218 | if depends: |
217 | # Check if file still exists | 219 | for f,old_mtime in depends: |
218 | if self.mtime(f) == 0: | 220 | fmtime = bb.parse.cached_mtime_noerror(f) |
219 | self.remove(fn) | 221 | # Check if file still exists |
220 | return False | 222 | if fmtime == 0: |
221 | 223 | self.remove(fn) | |
222 | new_mtime = bb.parse.cached_mtime(f) | 224 | return False |
223 | if (new_mtime > old_mtime): | 225 | |
224 | bb.msg.debug(2, bb.msg.domain.Cache, "Cache: %s's dependency %s changed" % (fn, f)) | 226 | if (fmtime > old_mtime): |
225 | self.remove(fn) | 227 | bb.msg.debug(2, bb.msg.domain.Cache, "Cache: %s's dependency %s changed" % (fn, f)) |
226 | return False | 228 | self.remove(fn) |
229 | return False | ||
227 | 230 | ||
228 | #bb.msg.debug(2, bb.msg.domain.Cache, "Depends Cache: %s is clean" % fn) | 231 | #bb.msg.debug(2, bb.msg.domain.Cache, "Depends Cache: %s is clean" % fn) |
229 | if not fn in self.clean: | 232 | if not fn in self.clean: |
@@ -284,7 +287,6 @@ class Cache: | |||
284 | pv = self.getVar('PV', file_name, True) | 287 | pv = self.getVar('PV', file_name, True) |
285 | pr = self.getVar('PR', file_name, True) | 288 | pr = self.getVar('PR', file_name, True) |
286 | dp = int(self.getVar('DEFAULT_PREFERENCE', file_name, True) or "0") | 289 | dp = int(self.getVar('DEFAULT_PREFERENCE', file_name, True) or "0") |
287 | provides = Set([pn] + (self.getVar("PROVIDES", file_name, True) or "").split()) | ||
288 | depends = bb.utils.explode_deps(self.getVar("DEPENDS", file_name, True) or "") | 290 | depends = bb.utils.explode_deps(self.getVar("DEPENDS", file_name, True) or "") |
289 | packages = (self.getVar('PACKAGES', file_name, True) or "").split() | 291 | packages = (self.getVar('PACKAGES', file_name, True) or "").split() |
290 | packages_dynamic = (self.getVar('PACKAGES_DYNAMIC', file_name, True) or "").split() | 292 | packages_dynamic = (self.getVar('PACKAGES_DYNAMIC', file_name, True) or "").split() |
@@ -304,24 +306,31 @@ class Cache: | |||
304 | cacheData.pkg_pepvpr[file_name] = (pe,pv,pr) | 306 | cacheData.pkg_pepvpr[file_name] = (pe,pv,pr) |
305 | cacheData.pkg_dp[file_name] = dp | 307 | cacheData.pkg_dp[file_name] = dp |
306 | 308 | ||
309 | provides = [pn] | ||
310 | for provide in (self.getVar("PROVIDES", file_name, True) or "").split(): | ||
311 | if provide not in provides: | ||
312 | provides.append(provide) | ||
313 | |||
307 | # Build forward and reverse provider hashes | 314 | # Build forward and reverse provider hashes |
308 | # Forward: virtual -> [filenames] | 315 | # Forward: virtual -> [filenames] |
309 | # Reverse: PN -> [virtuals] | 316 | # Reverse: PN -> [virtuals] |
310 | if pn not in cacheData.pn_provides: | 317 | if pn not in cacheData.pn_provides: |
311 | cacheData.pn_provides[pn] = Set() | 318 | cacheData.pn_provides[pn] = [] |
312 | cacheData.pn_provides[pn] |= provides | ||
313 | 319 | ||
314 | cacheData.fn_provides[file_name] = Set() | 320 | cacheData.fn_provides[file_name] = provides |
315 | for provide in provides: | 321 | for provide in provides: |
316 | if provide not in cacheData.providers: | 322 | if provide not in cacheData.providers: |
317 | cacheData.providers[provide] = [] | 323 | cacheData.providers[provide] = [] |
318 | cacheData.providers[provide].append(file_name) | 324 | cacheData.providers[provide].append(file_name) |
319 | cacheData.fn_provides[file_name].add(provide) | 325 | if not provide in cacheData.pn_provides[pn]: |
326 | cacheData.pn_provides[pn].append(provide) | ||
320 | 327 | ||
321 | cacheData.deps[file_name] = Set() | 328 | cacheData.deps[file_name] = [] |
322 | for dep in depends: | 329 | for dep in depends: |
323 | cacheData.all_depends.add(dep) | 330 | if not dep in cacheData.deps[file_name]: |
324 | cacheData.deps[file_name].add(dep) | 331 | cacheData.deps[file_name].append(dep) |
332 | if not dep in cacheData.all_depends: | ||
333 | cacheData.all_depends.append(dep) | ||
325 | 334 | ||
326 | # Build reverse hash for PACKAGES, so runtime dependencies | 335 | # Build reverse hash for PACKAGES, so runtime dependencies |
327 | # can be be resolved (RDEPENDS, RRECOMMENDS etc.) | 336 | # can be be resolved (RDEPENDS, RRECOMMENDS etc.) |
@@ -343,28 +352,21 @@ class Cache: | |||
343 | 352 | ||
344 | # Build hash of runtime depends and rececommends | 353 | # Build hash of runtime depends and rececommends |
345 | 354 | ||
346 | def add_dep(deplist, deps): | ||
347 | for dep in deps: | ||
348 | if not dep in deplist: | ||
349 | deplist[dep] = "" | ||
350 | |||
351 | if not file_name in cacheData.rundeps: | 355 | if not file_name in cacheData.rundeps: |
352 | cacheData.rundeps[file_name] = {} | 356 | cacheData.rundeps[file_name] = {} |
353 | if not file_name in cacheData.runrecs: | 357 | if not file_name in cacheData.runrecs: |
354 | cacheData.runrecs[file_name] = {} | 358 | cacheData.runrecs[file_name] = {} |
355 | 359 | ||
356 | rdepends = bb.utils.explode_deps(self.getVar('RDEPENDS', file_name, True) or "") | 360 | rdepends = self.getVar('RDEPENDS', file_name, True) or "" |
357 | rrecommends = bb.utils.explode_deps(self.getVar('RRECOMMENDS', file_name, True) or "") | 361 | rrecommends = self.getVar('RRECOMMENDS', file_name, True) or "" |
358 | for package in packages + [pn]: | 362 | for package in packages + [pn]: |
359 | if not package in cacheData.rundeps[file_name]: | 363 | if not package in cacheData.rundeps[file_name]: |
360 | cacheData.rundeps[file_name][package] = {} | 364 | cacheData.rundeps[file_name][package] = [] |
361 | if not package in cacheData.runrecs[file_name]: | 365 | if not package in cacheData.runrecs[file_name]: |
362 | cacheData.runrecs[file_name][package] = {} | 366 | cacheData.runrecs[file_name][package] = [] |
363 | 367 | ||
364 | add_dep(cacheData.rundeps[file_name][package], rdepends) | 368 | cacheData.rundeps[file_name][package] = rdepends + " " + (self.getVar("RDEPENDS_%s" % package, file_name, True) or "") |
365 | add_dep(cacheData.runrecs[file_name][package], rrecommends) | 369 | cacheData.runrecs[file_name][package] = rrecommends + " " + (self.getVar("RRECOMMENDS_%s" % package, file_name, True) or "") |
366 | add_dep(cacheData.rundeps[file_name][package], bb.utils.explode_deps(self.getVar("RDEPENDS_%s" % package, file_name, True) or "")) | ||
367 | add_dep(cacheData.runrecs[file_name][package], bb.utils.explode_deps(self.getVar("RRECOMMENDS_%s" % package, file_name, True) or "")) | ||
368 | 370 | ||
369 | # Collect files we may need for possible world-dep | 371 | # Collect files we may need for possible world-dep |
370 | # calculations | 372 | # calculations |
@@ -385,7 +387,7 @@ class Cache: | |||
385 | data.setVar('TMPDIR', data.getVar('TMPDIR', config, 1) or "", config) | 387 | data.setVar('TMPDIR', data.getVar('TMPDIR', config, 1) or "", config) |
386 | bbfile_loc = os.path.abspath(os.path.dirname(bbfile)) | 388 | bbfile_loc = os.path.abspath(os.path.dirname(bbfile)) |
387 | oldpath = os.path.abspath(os.getcwd()) | 389 | oldpath = os.path.abspath(os.getcwd()) |
388 | if self.mtime(bbfile_loc): | 390 | if bb.parse.cached_mtime_noerror(bbfile_loc): |
389 | os.chdir(bbfile_loc) | 391 | os.chdir(bbfile_loc) |
390 | bb_data = data.init_db(config) | 392 | bb_data = data.init_db(config) |
391 | try: | 393 | try: |
@@ -444,7 +446,7 @@ class CacheData: | |||
444 | self.pkg_dp = {} | 446 | self.pkg_dp = {} |
445 | self.pn_provides = {} | 447 | self.pn_provides = {} |
446 | self.fn_provides = {} | 448 | self.fn_provides = {} |
447 | self.all_depends = Set() | 449 | self.all_depends = [] |
448 | self.deps = {} | 450 | self.deps = {} |
449 | self.rundeps = {} | 451 | self.rundeps = {} |
450 | self.runrecs = {} | 452 | self.runrecs = {} |
diff --git a/bitbake/lib/bb/cooker.py b/bitbake/lib/bb/cooker.py index 38a8209760..619791f174 100644 --- a/bitbake/lib/bb/cooker.py +++ b/bitbake/lib/bb/cooker.py | |||
@@ -473,11 +473,11 @@ class BBCooker: | |||
473 | 473 | ||
474 | # Load data into the cache for fn | 474 | # Load data into the cache for fn |
475 | self.bb_cache = bb.cache.init(self) | 475 | self.bb_cache = bb.cache.init(self) |
476 | self.bb_cache.loadData(fn, self.configuration.data) | 476 | self.bb_cache.loadData(fn, self.configuration.data) |
477 | 477 | ||
478 | # Parse the loaded cache data | 478 | # Parse the loaded cache data |
479 | self.status = bb.cache.CacheData() | 479 | self.status = bb.cache.CacheData() |
480 | self.bb_cache.handle_data(fn, self.status) | 480 | self.bb_cache.handle_data(fn, self.status) |
481 | 481 | ||
482 | # Tweak some variables | 482 | # Tweak some variables |
483 | item = self.bb_cache.getVar('PN', fn, True) | 483 | item = self.bb_cache.getVar('PN', fn, True) |
@@ -493,7 +493,7 @@ class BBCooker: | |||
493 | # Remove stamp for target if force mode active | 493 | # Remove stamp for target if force mode active |
494 | if self.configuration.force: | 494 | if self.configuration.force: |
495 | bb.msg.note(2, bb.msg.domain.RunQueue, "Remove stamp %s, %s" % (self.configuration.cmd, fn)) | 495 | bb.msg.note(2, bb.msg.domain.RunQueue, "Remove stamp %s, %s" % (self.configuration.cmd, fn)) |
496 | bb.build.del_stamp('do_%s' % self.configuration.cmd, bbfile_data) | 496 | bb.build.del_stamp('do_%s' % self.configuration.cmd, self.configuration.data) |
497 | 497 | ||
498 | # Setup taskdata structure | 498 | # Setup taskdata structure |
499 | taskdata = bb.taskdata.TaskData(self.configuration.abort) | 499 | taskdata = bb.taskdata.TaskData(self.configuration.abort) |
@@ -573,6 +573,7 @@ class BBCooker: | |||
573 | 573 | ||
574 | bb.msg.debug(1, bb.msg.domain.Collection, "collecting .bb files") | 574 | bb.msg.debug(1, bb.msg.domain.Collection, "collecting .bb files") |
575 | (filelist, masked) = self.collect_bbfiles() | 575 | (filelist, masked) = self.collect_bbfiles() |
576 | bb.data.renameVar("__depends", "__base_depends", self.configuration.data) | ||
576 | self.parse_bbfiles(filelist, masked, self.myProgressCallback) | 577 | self.parse_bbfiles(filelist, masked, self.myProgressCallback) |
577 | bb.msg.debug(1, bb.msg.domain.Collection, "parsing complete") | 578 | bb.msg.debug(1, bb.msg.domain.Collection, "parsing complete") |
578 | 579 | ||
diff --git a/bitbake/lib/bb/runqueue.py b/bitbake/lib/bb/runqueue.py index 2765343a3e..7b3defd343 100644 --- a/bitbake/lib/bb/runqueue.py +++ b/bitbake/lib/bb/runqueue.py | |||
@@ -317,6 +317,7 @@ class RunQueue: | |||
317 | 317 | ||
318 | depends = [] | 318 | depends = [] |
319 | runq_build = [] | 319 | runq_build = [] |
320 | recursive_tdepends = {} | ||
320 | 321 | ||
321 | taskData = self.taskData | 322 | taskData = self.taskData |
322 | 323 | ||
@@ -382,14 +383,45 @@ class RunQueue: | |||
382 | # e.g. do_sometask[depends] = "targetname:do_someothertask" | 383 | # e.g. do_sometask[depends] = "targetname:do_someothertask" |
383 | # (makes sure sometask runs after targetname's someothertask) | 384 | # (makes sure sometask runs after targetname's someothertask) |
384 | idepends = taskData.tasks_idepends[task] | 385 | idepends = taskData.tasks_idepends[task] |
385 | for idepend in idepends: | 386 | for (depid, idependtask) in idepends: |
386 | depid = int(idepend.split(":")[0]) | ||
387 | if depid in taskData.build_targets: | 387 | if depid in taskData.build_targets: |
388 | # Won't be in build_targets if ASSUME_PROVIDED | 388 | # Won't be in build_targets if ASSUME_PROVIDED |
389 | depdata = taskData.build_targets[depid][0] | 389 | depdata = taskData.build_targets[depid][0] |
390 | if depdata is not None: | 390 | if depdata is not None: |
391 | dep = taskData.fn_index[depdata] | 391 | dep = taskData.fn_index[depdata] |
392 | depends.append(taskData.gettask_id(dep, idepend.split(":")[1])) | 392 | depends.append(taskData.gettask_id(dep, idependtask)) |
393 | |||
394 | # Create a list of recursive dependent tasks (from tdepends) and cache | ||
395 | def get_recursive_tdepends(task): | ||
396 | if not task: | ||
397 | return [] | ||
398 | if task in recursive_tdepends: | ||
399 | return recursive_tdepends[task] | ||
400 | rectdepends = [task] | ||
401 | nextdeps = [task] | ||
402 | while len(nextdeps) != 0: | ||
403 | newdeps = [] | ||
404 | for nextdep in nextdeps: | ||
405 | for tdepend in taskData.tasks_tdepends[nextdep]: | ||
406 | if tdepend not in rectdepends: | ||
407 | rectdepends.append(tdepend) | ||
408 | newdeps.append(tdepend) | ||
409 | nextdeps = newdeps | ||
410 | recursive_tdepends[task] = rectdepends | ||
411 | return rectdepends | ||
412 | |||
413 | # Using the list of tdepends for this task create a list of | ||
414 | # the recursive idepends we have | ||
415 | def get_recursive_idepends(task): | ||
416 | if not task: | ||
417 | return [] | ||
418 | rectdepends = get_recursive_tdepends(task) | ||
419 | |||
420 | recidepends = [] | ||
421 | for tdepend in rectdepends: | ||
422 | for idepend in taskData.tasks_idepends[tdepend]: | ||
423 | recidepends.append(idepend) | ||
424 | return recidepends | ||
393 | 425 | ||
394 | def add_recursive_build(depid, depfnid): | 426 | def add_recursive_build(depid, depfnid): |
395 | """ | 427 | """ |
@@ -404,13 +436,11 @@ class RunQueue: | |||
404 | depdata = taskData.build_targets[depid][0] | 436 | depdata = taskData.build_targets[depid][0] |
405 | if depdata is not None: | 437 | if depdata is not None: |
406 | dep = taskData.fn_index[depdata] | 438 | dep = taskData.fn_index[depdata] |
407 | idepends = [] | ||
408 | # Need to avoid creating new tasks here | 439 | # Need to avoid creating new tasks here |
409 | taskid = taskData.gettask_id(dep, taskname, False) | 440 | taskid = taskData.gettask_id(dep, taskname, False) |
410 | if taskid is not None: | 441 | if taskid is not None: |
411 | depends.append(taskid) | 442 | depends.append(taskid) |
412 | fnid = taskData.tasks_fnid[taskid] | 443 | fnid = taskData.tasks_fnid[taskid] |
413 | idepends = taskData.tasks_idepends[taskid] | ||
414 | #print "Added %s (%s) due to %s" % (taskid, taskData.fn_index[fnid], taskData.fn_index[depfnid]) | 444 | #print "Added %s (%s) due to %s" % (taskid, taskData.fn_index[fnid], taskData.fn_index[depfnid]) |
415 | else: | 445 | else: |
416 | fnid = taskData.getfn_id(dep) | 446 | fnid = taskData.getfn_id(dep) |
@@ -420,10 +450,9 @@ class RunQueue: | |||
420 | for nextdepid in taskData.rdepids[fnid]: | 450 | for nextdepid in taskData.rdepids[fnid]: |
421 | if nextdepid not in rdep_seen: | 451 | if nextdepid not in rdep_seen: |
422 | add_recursive_run(nextdepid, fnid) | 452 | add_recursive_run(nextdepid, fnid) |
423 | for idepend in idepends: | 453 | for (idependid, idependtask) in get_recursive_idepends(taskid): |
424 | nextdepid = int(idepend.split(":")[0]) | 454 | if idependid not in dep_seen: |
425 | if nextdepid not in dep_seen: | 455 | add_recursive_build(idependid, fnid) |
426 | add_recursive_build(nextdepid, fnid) | ||
427 | 456 | ||
428 | def add_recursive_run(rdepid, depfnid): | 457 | def add_recursive_run(rdepid, depfnid): |
429 | """ | 458 | """ |
@@ -438,13 +467,11 @@ class RunQueue: | |||
438 | depdata = taskData.run_targets[rdepid][0] | 467 | depdata = taskData.run_targets[rdepid][0] |
439 | if depdata is not None: | 468 | if depdata is not None: |
440 | dep = taskData.fn_index[depdata] | 469 | dep = taskData.fn_index[depdata] |
441 | idepends = [] | ||
442 | # Need to avoid creating new tasks here | 470 | # Need to avoid creating new tasks here |
443 | taskid = taskData.gettask_id(dep, taskname, False) | 471 | taskid = taskData.gettask_id(dep, taskname, False) |
444 | if taskid is not None: | 472 | if taskid is not None: |
445 | depends.append(taskid) | 473 | depends.append(taskid) |
446 | fnid = taskData.tasks_fnid[taskid] | 474 | fnid = taskData.tasks_fnid[taskid] |
447 | idepends = taskData.tasks_idepends[taskid] | ||
448 | #print "Added %s (%s) due to %s" % (taskid, taskData.fn_index[fnid], taskData.fn_index[depfnid]) | 475 | #print "Added %s (%s) due to %s" % (taskid, taskData.fn_index[fnid], taskData.fn_index[depfnid]) |
449 | else: | 476 | else: |
450 | fnid = taskData.getfn_id(dep) | 477 | fnid = taskData.getfn_id(dep) |
@@ -454,10 +481,9 @@ class RunQueue: | |||
454 | for nextdepid in taskData.rdepids[fnid]: | 481 | for nextdepid in taskData.rdepids[fnid]: |
455 | if nextdepid not in rdep_seen: | 482 | if nextdepid not in rdep_seen: |
456 | add_recursive_run(nextdepid, fnid) | 483 | add_recursive_run(nextdepid, fnid) |
457 | for idepend in idepends: | 484 | for (idependid, idependtask) in get_recursive_idepends(taskid): |
458 | nextdepid = int(idepend.split(":")[0]) | 485 | if idependid not in dep_seen: |
459 | if nextdepid not in dep_seen: | 486 | add_recursive_build(idependid, fnid) |
460 | add_recursive_build(nextdepid, fnid) | ||
461 | 487 | ||
462 | # Resolve recursive 'recrdeptask' dependencies | 488 | # Resolve recursive 'recrdeptask' dependencies |
463 | # | 489 | # |
@@ -472,9 +498,9 @@ class RunQueue: | |||
472 | add_recursive_build(depid, fnid) | 498 | add_recursive_build(depid, fnid) |
473 | for rdepid in taskData.rdepids[fnid]: | 499 | for rdepid in taskData.rdepids[fnid]: |
474 | add_recursive_run(rdepid, fnid) | 500 | add_recursive_run(rdepid, fnid) |
475 | for idepend in idepends: | 501 | deptaskid = taskData.gettask_id(fn, taskname, False) |
476 | depid = int(idepend.split(":")[0]) | 502 | for (idependid, idependtask) in get_recursive_idepends(deptaskid): |
477 | add_recursive_build(depid, fnid) | 503 | add_recursive_build(idependid, fnid) |
478 | 504 | ||
479 | # Rmove all self references | 505 | # Rmove all self references |
480 | if task in depends: | 506 | if task in depends: |
@@ -659,6 +685,16 @@ class RunQueue: | |||
659 | if len(self.runq_depends[task]) == 0: | 685 | if len(self.runq_depends[task]) == 0: |
660 | buildable.append(task) | 686 | buildable.append(task) |
661 | 687 | ||
688 | def check_buildable(self, task, buildable): | ||
689 | for revdep in self.runq_revdeps[task]: | ||
690 | alldeps = 1 | ||
691 | for dep in self.runq_depends[revdep]: | ||
692 | if dep in unchecked: | ||
693 | alldeps = 0 | ||
694 | if alldeps == 1: | ||
695 | if revdep in unchecked: | ||
696 | buildable.append(revdep) | ||
697 | |||
662 | for task in range(len(self.runq_fnid)): | 698 | for task in range(len(self.runq_fnid)): |
663 | if task not in unchecked: | 699 | if task not in unchecked: |
664 | continue | 700 | continue |
@@ -669,12 +705,14 @@ class RunQueue: | |||
669 | if not os.access(stampfile, os.F_OK): | 705 | if not os.access(stampfile, os.F_OK): |
670 | del unchecked[task] | 706 | del unchecked[task] |
671 | notcurrent.append(task) | 707 | notcurrent.append(task) |
708 | check_buildable(self, task, buildable) | ||
672 | continue | 709 | continue |
673 | # If its a 'nostamp' task, it's not current | 710 | # If its a 'nostamp' task, it's not current |
674 | taskdep = self.dataCache.task_deps[fn] | 711 | taskdep = self.dataCache.task_deps[fn] |
675 | if 'nostamp' in taskdep and task in taskdep['nostamp']: | 712 | if 'nostamp' in taskdep and task in taskdep['nostamp']: |
676 | del unchecked[task] | 713 | del unchecked[task] |
677 | notcurrent.append(task) | 714 | notcurrent.append(task) |
715 | check_buildable(self, task, buildable) | ||
678 | continue | 716 | continue |
679 | 717 | ||
680 | while (len(buildable) > 0): | 718 | while (len(buildable) > 0): |
@@ -705,14 +743,7 @@ class RunQueue: | |||
705 | else: | 743 | else: |
706 | notcurrent.append(task) | 744 | notcurrent.append(task) |
707 | 745 | ||
708 | for revdep in self.runq_revdeps[task]: | 746 | check_buildable(self, task, nextbuildable) |
709 | alldeps = 1 | ||
710 | for dep in self.runq_depends[revdep]: | ||
711 | if dep in unchecked: | ||
712 | alldeps = 0 | ||
713 | if alldeps == 1: | ||
714 | if revdep in unchecked: | ||
715 | nextbuildable.append(revdep) | ||
716 | 747 | ||
717 | buildable = nextbuildable | 748 | buildable = nextbuildable |
718 | 749 | ||
@@ -729,6 +760,40 @@ class RunQueue: | |||
729 | bb.fatal("check_stamps fatal internal error") | 760 | bb.fatal("check_stamps fatal internal error") |
730 | return current | 761 | return current |
731 | 762 | ||
763 | def check_stamp(self, task): | ||
764 | |||
765 | if self.stamppolicy == "perfile": | ||
766 | fulldeptree = False | ||
767 | else: | ||
768 | fulldeptree = True | ||
769 | |||
770 | fn = self.taskData.fn_index[self.runq_fnid[task]] | ||
771 | taskname = self.runq_task[task] | ||
772 | stampfile = "%s.%s" % (self.dataCache.stamp[fn], taskname) | ||
773 | # If the stamp is missing its not current | ||
774 | if not os.access(stampfile, os.F_OK): | ||
775 | return False | ||
776 | # If its a 'nostamp' task, it's not current | ||
777 | taskdep = self.dataCache.task_deps[fn] | ||
778 | if 'nostamp' in taskdep and task in taskdep['nostamp']: | ||
779 | return False | ||
780 | |||
781 | iscurrent = True | ||
782 | t1 = os.stat(stampfile)[stat.ST_MTIME] | ||
783 | for dep in self.runq_depends[task]: | ||
784 | if iscurrent: | ||
785 | fn2 = self.taskData.fn_index[self.runq_fnid[dep]] | ||
786 | taskname2 = self.runq_task[dep] | ||
787 | stampfile2 = "%s.%s" % (self.dataCache.stamp[fn2], taskname2) | ||
788 | if fulldeptree or fn == fn2: | ||
789 | try: | ||
790 | t2 = os.stat(stampfile2)[stat.ST_MTIME] | ||
791 | if t1 < t2: | ||
792 | iscurrent = False | ||
793 | except: | ||
794 | iscurrent = False | ||
795 | |||
796 | return iscurrent | ||
732 | 797 | ||
733 | def execute_runqueue(self): | 798 | def execute_runqueue(self): |
734 | """ | 799 | """ |
@@ -817,25 +882,18 @@ class RunQueue: | |||
817 | 882 | ||
818 | event.fire(bb.event.StampUpdate(self.target_pairs, self.dataCache.stamp, self.cfgdata)) | 883 | event.fire(bb.event.StampUpdate(self.target_pairs, self.dataCache.stamp, self.cfgdata)) |
819 | 884 | ||
820 | # Find out which tasks have current stamps which we can skip when the | ||
821 | # time comes | ||
822 | currentstamps = self.check_stamps() | ||
823 | self.stats.taskSkipped(len(currentstamps)) | ||
824 | self.stats.taskCompleted(len(currentstamps)) | ||
825 | |||
826 | while True: | 885 | while True: |
827 | task = self.sched.next() | 886 | task = self.sched.next() |
828 | if task is not None: | 887 | if task is not None: |
829 | fn = self.taskData.fn_index[self.runq_fnid[task]] | 888 | fn = self.taskData.fn_index[self.runq_fnid[task]] |
830 | 889 | ||
831 | taskname = self.runq_task[task] | 890 | taskname = self.runq_task[task] |
832 | if task in currentstamps: | 891 | if self.check_stamp(task): |
833 | #if bb.build.stamp_is_current(taskname, self.dataCache, fn): | ||
834 | bb.msg.debug(2, bb.msg.domain.RunQueue, "Stamp current task %s (%s)" % (task, self.get_user_idstring(task))) | 892 | bb.msg.debug(2, bb.msg.domain.RunQueue, "Stamp current task %s (%s)" % (task, self.get_user_idstring(task))) |
835 | self.runq_running[task] = 1 | 893 | self.runq_running[task] = 1 |
836 | self.task_complete(task) | 894 | self.task_complete(task) |
837 | #self.stats.taskCompleted() | 895 | self.stats.taskCompleted() |
838 | #self.stats.taskSkipped() | 896 | self.stats.taskSkipped() |
839 | continue | 897 | continue |
840 | 898 | ||
841 | bb.msg.note(1, bb.msg.domain.RunQueue, "Running task %d of %d (ID: %s, %s)" % (self.stats.completed + self.active_builds + 1, len(self.runq_fnid), task, self.get_user_idstring(task))) | 899 | bb.msg.note(1, bb.msg.domain.RunQueue, "Running task %d of %d (ID: %s, %s)" % (self.stats.completed + self.active_builds + 1, len(self.runq_fnid), task, self.get_user_idstring(task))) |
diff --git a/bitbake/lib/bb/taskdata.py b/bitbake/lib/bb/taskdata.py index 4a79e7a56d..0fb34ad748 100644 --- a/bitbake/lib/bb/taskdata.py +++ b/bitbake/lib/bb/taskdata.py | |||
@@ -150,7 +150,7 @@ class TaskData: | |||
150 | ids = [] | 150 | ids = [] |
151 | for dep in task_deps['depends'][task].split(): | 151 | for dep in task_deps['depends'][task].split(): |
152 | if dep: | 152 | if dep: |
153 | ids.append(str(self.getbuild_id(dep.split(":")[0])) + ":" + dep.split(":")[1]) | 153 | ids.append(((self.getbuild_id(dep.split(":")[0])), dep.split(":")[1])) |
154 | self.tasks_idepends[taskid].extend(ids) | 154 | self.tasks_idepends[taskid].extend(ids) |
155 | 155 | ||
156 | # Work out build dependencies | 156 | # Work out build dependencies |
@@ -167,11 +167,11 @@ class TaskData: | |||
167 | rdepends = dataCache.rundeps[fn] | 167 | rdepends = dataCache.rundeps[fn] |
168 | rrecs = dataCache.runrecs[fn] | 168 | rrecs = dataCache.runrecs[fn] |
169 | for package in rdepends: | 169 | for package in rdepends: |
170 | for rdepend in rdepends[package]: | 170 | for rdepend in bb.utils.explode_deps(rdepends[package]): |
171 | bb.msg.debug(2, bb.msg.domain.TaskData, "Added runtime dependency %s for %s" % (rdepend, fn)) | 171 | bb.msg.debug(2, bb.msg.domain.TaskData, "Added runtime dependency %s for %s" % (rdepend, fn)) |
172 | rdependids[self.getrun_id(rdepend)] = None | 172 | rdependids[self.getrun_id(rdepend)] = None |
173 | for package in rrecs: | 173 | for package in rrecs: |
174 | for rdepend in rrecs[package]: | 174 | for rdepend in bb.utils.explode_deps(rrecs[package]): |
175 | bb.msg.debug(2, bb.msg.domain.TaskData, "Added runtime recommendation %s for %s" % (rdepend, fn)) | 175 | bb.msg.debug(2, bb.msg.domain.TaskData, "Added runtime recommendation %s for %s" % (rdepend, fn)) |
176 | rdependids[self.getrun_id(rdepend)] = None | 176 | rdependids[self.getrun_id(rdepend)] = None |
177 | self.rdepids[fnid] = rdependids.keys() | 177 | self.rdepids[fnid] = rdependids.keys() |
@@ -458,8 +458,6 @@ class TaskData: | |||
458 | """ | 458 | """ |
459 | if fnid in self.failed_fnids: | 459 | if fnid in self.failed_fnids: |
460 | return | 460 | return |
461 | if not missing_list: | ||
462 | missing_list = [fnid] | ||
463 | bb.msg.debug(1, bb.msg.domain.Provider, "File '%s' is unbuildable, removing..." % self.fn_index[fnid]) | 461 | bb.msg.debug(1, bb.msg.domain.Provider, "File '%s' is unbuildable, removing..." % self.fn_index[fnid]) |
464 | self.failed_fnids.append(fnid) | 462 | self.failed_fnids.append(fnid) |
465 | for target in self.build_targets: | 463 | for target in self.build_targets: |
@@ -487,6 +485,12 @@ class TaskData: | |||
487 | dependees = self.get_dependees(targetid) | 485 | dependees = self.get_dependees(targetid) |
488 | for fnid in dependees: | 486 | for fnid in dependees: |
489 | self.fail_fnid(fnid, missing_list) | 487 | self.fail_fnid(fnid, missing_list) |
488 | for taskid in range(len(self.tasks_idepends)): | ||
489 | idepends = self.tasks_idepends[taskid] | ||
490 | for (idependid, idependtask) in idepends: | ||
491 | if idependid == targetid: | ||
492 | self.fail_fnid(self.tasks_fnid[taskid], missing_list) | ||
493 | |||
490 | if self.abort and targetid in self.external_targets: | 494 | if self.abort and targetid in self.external_targets: |
491 | bb.msg.error(bb.msg.domain.Provider, "Required build target '%s' has no buildable providers.\nMissing or unbuildable dependency chain was: %s" % (self.build_names_index[targetid], missing_list)) | 495 | bb.msg.error(bb.msg.domain.Provider, "Required build target '%s' has no buildable providers.\nMissing or unbuildable dependency chain was: %s" % (self.build_names_index[targetid], missing_list)) |
492 | raise bb.providers.NoProvider | 496 | raise bb.providers.NoProvider |