diff options
Diffstat (limited to 'bitbake/lib')
-rw-r--r-- | bitbake/lib/bb/cache.py | 53 | ||||
-rw-r--r-- | bitbake/lib/bb/cooker.py | 294 | ||||
-rw-r--r-- | bitbake/lib/bb/cookerdata.py | 59 | ||||
-rw-r--r-- | bitbake/lib/bb/runqueue.py | 474 | ||||
-rw-r--r-- | bitbake/lib/bb/siggen.py | 15 | ||||
-rw-r--r-- | bitbake/lib/bb/tinfoil.py | 4 | ||||
-rw-r--r-- | bitbake/lib/bblayers/action.py | 2 | ||||
-rw-r--r-- | bitbake/lib/bblayers/query.py | 12 |
8 files changed, 527 insertions, 386 deletions
diff --git a/bitbake/lib/bb/cache.py b/bitbake/lib/bb/cache.py index 5f302d68b4..0d5a034b53 100644 --- a/bitbake/lib/bb/cache.py +++ b/bitbake/lib/bb/cache.py | |||
@@ -248,6 +248,11 @@ def virtualfn2realfn(virtualfn): | |||
248 | """ | 248 | """ |
249 | Convert a virtual file name to a real one + the associated subclass keyword | 249 | Convert a virtual file name to a real one + the associated subclass keyword |
250 | """ | 250 | """ |
251 | mc = "" | ||
252 | if virtualfn.startswith('multiconfig:'): | ||
253 | elems = virtualfn.split(':') | ||
254 | mc = elems[1] | ||
255 | virtualfn = ":".join(elems[2:]) | ||
251 | 256 | ||
252 | fn = virtualfn | 257 | fn = virtualfn |
253 | cls = "" | 258 | cls = "" |
@@ -255,15 +260,32 @@ def virtualfn2realfn(virtualfn): | |||
255 | elems = virtualfn.split(':') | 260 | elems = virtualfn.split(':') |
256 | cls = ":".join(elems[1:-1]) | 261 | cls = ":".join(elems[1:-1]) |
257 | fn = elems[-1] | 262 | fn = elems[-1] |
258 | return (fn, cls) | ||
259 | 263 | ||
260 | def realfn2virtual(realfn, cls): | 264 | return (fn, cls, mc) |
265 | |||
266 | def realfn2virtual(realfn, cls, mc): | ||
267 | """ | ||
268 | Convert a real filename + the associated subclass keyword to a virtual filename | ||
269 | """ | ||
270 | if cls: | ||
271 | realfn = "virtual:" + cls + ":" + realfn | ||
272 | if mc: | ||
273 | realfn = "multiconfig:" + mc + ":" + realfn | ||
274 | return realfn | ||
275 | |||
276 | def variant2virtual(realfn, variant): | ||
261 | """ | 277 | """ |
262 | Convert a real filename + the associated subclass keyword to a virtual filename | 278 | Convert a real filename + the associated subclass keyword to a virtual filename |
263 | """ | 279 | """ |
264 | if cls == "": | 280 | if variant == "": |
265 | return realfn | 281 | return realfn |
266 | return "virtual:" + cls + ":" + realfn | 282 | if variant.startswith("multiconfig:"): |
283 | elems = variant.split(":") | ||
284 | if elems[2]: | ||
285 | return "multiconfig:" + elems[1] + ":virtual:" + ":".join(elems[2:]) + ":" + realfn | ||
286 | return "multiconfig:" + elems[1] + ":" + realfn | ||
287 | return "virtual:" + variant + ":" + realfn | ||
288 | |||
267 | 289 | ||
268 | class NoCache(object): | 290 | class NoCache(object): |
269 | 291 | ||
@@ -277,7 +299,7 @@ class NoCache(object): | |||
277 | To do this, we need to parse the file. | 299 | To do this, we need to parse the file. |
278 | """ | 300 | """ |
279 | logger.debug(1, "Parsing %s (full)" % virtualfn) | 301 | logger.debug(1, "Parsing %s (full)" % virtualfn) |
280 | (fn, virtual) = virtualfn2realfn(virtualfn) | 302 | (fn, virtual, mc) = virtualfn2realfn(virtualfn) |
281 | bb_data = self.load_bbfile(virtualfn, appends, virtonly=True) | 303 | bb_data = self.load_bbfile(virtualfn, appends, virtonly=True) |
282 | return bb_data[virtual] | 304 | return bb_data[virtual] |
283 | 305 | ||
@@ -288,8 +310,8 @@ class NoCache(object): | |||
288 | """ | 310 | """ |
289 | 311 | ||
290 | if virtonly: | 312 | if virtonly: |
291 | (bbfile, virtual) = virtualfn2realfn(bbfile) | 313 | (bbfile, virtual, mc) = virtualfn2realfn(bbfile) |
292 | bb_data = self.data.createCopy() | 314 | bb_data = self.databuilder.mcdata[mc].createCopy() |
293 | bb_data.setVar("__BBMULTICONFIG", mc) | 315 | bb_data.setVar("__BBMULTICONFIG", mc) |
294 | bb_data.setVar("__ONLYFINALISE", virtual or "default") | 316 | bb_data.setVar("__ONLYFINALISE", virtual or "default") |
295 | datastores = self._load_bbfile(bb_data, bbfile, appends) | 317 | datastores = self._load_bbfile(bb_data, bbfile, appends) |
@@ -298,6 +320,15 @@ class NoCache(object): | |||
298 | bb_data = self.data.createCopy() | 320 | bb_data = self.data.createCopy() |
299 | datastores = self._load_bbfile(bb_data, bbfile, appends) | 321 | datastores = self._load_bbfile(bb_data, bbfile, appends) |
300 | 322 | ||
323 | for mc in self.databuilder.mcdata: | ||
324 | if not mc: | ||
325 | continue | ||
326 | bb_data = self.databuilder.mcdata[mc].createCopy() | ||
327 | bb_data.setVar("__BBMULTICONFIG", mc) | ||
328 | newstores = self._load_bbfile(bb_data, bbfile, appends) | ||
329 | for ns in newstores: | ||
330 | datastores["multiconfig:%s:%s" % (mc, ns)] = newstores[ns] | ||
331 | |||
301 | return datastores | 332 | return datastores |
302 | 333 | ||
303 | def _load_bbfile(self, bb_data, bbfile, appends): | 334 | def _load_bbfile(self, bb_data, bbfile, appends): |
@@ -451,7 +482,7 @@ class Cache(NoCache): | |||
451 | for variant, data in sorted(datastores.items(), | 482 | for variant, data in sorted(datastores.items(), |
452 | key=lambda i: i[0], | 483 | key=lambda i: i[0], |
453 | reverse=True): | 484 | reverse=True): |
454 | virtualfn = realfn2virtual(filename, variant) | 485 | virtualfn = variant2virtual(filename, variant) |
455 | variants.append(variant) | 486 | variants.append(variant) |
456 | depends = depends + (data.getVar("__depends", False) or []) | 487 | depends = depends + (data.getVar("__depends", False) or []) |
457 | if depends and not variant: | 488 | if depends and not variant: |
@@ -480,7 +511,7 @@ class Cache(NoCache): | |||
480 | # info_array item is a list of [CoreRecipeInfo, XXXRecipeInfo] | 511 | # info_array item is a list of [CoreRecipeInfo, XXXRecipeInfo] |
481 | info_array = self.depends_cache[filename] | 512 | info_array = self.depends_cache[filename] |
482 | for variant in info_array[0].variants: | 513 | for variant in info_array[0].variants: |
483 | virtualfn = realfn2virtual(filename, variant) | 514 | virtualfn = variant2virtual(filename, variant) |
484 | infos.append((virtualfn, self.depends_cache[virtualfn])) | 515 | infos.append((virtualfn, self.depends_cache[virtualfn])) |
485 | else: | 516 | else: |
486 | return self.parse(filename, appends, configdata, self.caches_array) | 517 | return self.parse(filename, appends, configdata, self.caches_array) |
@@ -601,7 +632,7 @@ class Cache(NoCache): | |||
601 | 632 | ||
602 | invalid = False | 633 | invalid = False |
603 | for cls in info_array[0].variants: | 634 | for cls in info_array[0].variants: |
604 | virtualfn = realfn2virtual(fn, cls) | 635 | virtualfn = variant2virtual(fn, cls) |
605 | self.clean.add(virtualfn) | 636 | self.clean.add(virtualfn) |
606 | if virtualfn not in self.depends_cache: | 637 | if virtualfn not in self.depends_cache: |
607 | logger.debug(2, "Cache: %s is not cached", virtualfn) | 638 | logger.debug(2, "Cache: %s is not cached", virtualfn) |
@@ -613,7 +644,7 @@ class Cache(NoCache): | |||
613 | # If any one of the variants is not present, mark as invalid for all | 644 | # If any one of the variants is not present, mark as invalid for all |
614 | if invalid: | 645 | if invalid: |
615 | for cls in info_array[0].variants: | 646 | for cls in info_array[0].variants: |
616 | virtualfn = realfn2virtual(fn, cls) | 647 | virtualfn = variant2virtual(fn, cls) |
617 | if virtualfn in self.clean: | 648 | if virtualfn in self.clean: |
618 | logger.debug(2, "Cache: Removing %s from cache", virtualfn) | 649 | logger.debug(2, "Cache: Removing %s from cache", virtualfn) |
619 | self.clean.remove(virtualfn) | 650 | self.clean.remove(virtualfn) |
diff --git a/bitbake/lib/bb/cooker.py b/bitbake/lib/bb/cooker.py index fe95e73a12..d1ab4aa17b 100644 --- a/bitbake/lib/bb/cooker.py +++ b/bitbake/lib/bb/cooker.py | |||
@@ -166,7 +166,7 @@ class BBCooker: | |||
166 | """ | 166 | """ |
167 | 167 | ||
168 | def __init__(self, configuration, featureSet=None): | 168 | def __init__(self, configuration, featureSet=None): |
169 | self.recipecache = None | 169 | self.recipecaches = None |
170 | self.skiplist = {} | 170 | self.skiplist = {} |
171 | self.featureset = CookerFeatures() | 171 | self.featureset = CookerFeatures() |
172 | if featureSet: | 172 | if featureSet: |
@@ -521,11 +521,14 @@ class BBCooker: | |||
521 | nice = int(nice) - curnice | 521 | nice = int(nice) - curnice |
522 | buildlog.verbose("Renice to %s " % os.nice(nice)) | 522 | buildlog.verbose("Renice to %s " % os.nice(nice)) |
523 | 523 | ||
524 | if self.recipecache: | 524 | if self.recipecaches: |
525 | del self.recipecache | 525 | del self.recipecaches |
526 | self.recipecache = bb.cache.CacheData(self.caches_array) | 526 | self.multiconfigs = self.databuilder.mcdata.keys() |
527 | self.recipecaches = {} | ||
528 | for mc in self.multiconfigs: | ||
529 | self.recipecaches[mc] = bb.cache.CacheData(self.caches_array) | ||
527 | 530 | ||
528 | self.handleCollections( self.data.getVar("BBFILE_COLLECTIONS", True) ) | 531 | self.handleCollections(self.data.getVar("BBFILE_COLLECTIONS", True)) |
529 | 532 | ||
530 | def updateConfigOpts(self, options, environment): | 533 | def updateConfigOpts(self, options, environment): |
531 | clean = True | 534 | clean = True |
@@ -569,8 +572,8 @@ class BBCooker: | |||
569 | 572 | ||
570 | def showVersions(self): | 573 | def showVersions(self): |
571 | 574 | ||
572 | pkg_pn = self.recipecache.pkg_pn | 575 | pkg_pn = self.recipecaches[''].pkg_pn |
573 | (latest_versions, preferred_versions) = bb.providers.findProviders(self.data, self.recipecache, pkg_pn) | 576 | (latest_versions, preferred_versions) = bb.providers.findProviders(self.data, self.recipecaches[''], pkg_pn) |
574 | 577 | ||
575 | logger.plain("%-35s %25s %25s", "Recipe Name", "Latest Version", "Preferred Version") | 578 | logger.plain("%-35s %25s %25s", "Recipe Name", "Latest Version", "Preferred Version") |
576 | logger.plain("%-35s %25s %25s\n", "===========", "==============", "=================") | 579 | logger.plain("%-35s %25s %25s\n", "===========", "==============", "=================") |
@@ -601,17 +604,18 @@ class BBCooker: | |||
601 | # this showEnvironment() code path doesn't use the cache | 604 | # this showEnvironment() code path doesn't use the cache |
602 | self.parseConfiguration() | 605 | self.parseConfiguration() |
603 | 606 | ||
604 | fn, cls = bb.cache.virtualfn2realfn(buildfile) | 607 | fn, cls, mc = bb.cache.virtualfn2realfn(buildfile) |
605 | fn = self.matchFile(fn) | 608 | fn = self.matchFile(fn) |
606 | fn = bb.cache.realfn2virtual(fn, cls) | 609 | fn = bb.cache.realfn2virtual(fn, cls, mc) |
607 | elif len(pkgs_to_build) == 1: | 610 | elif len(pkgs_to_build) == 1: |
608 | ignore = self.expanded_data.getVar("ASSUME_PROVIDED", True) or "" | 611 | ignore = self.expanded_data.getVar("ASSUME_PROVIDED", True) or "" |
609 | if pkgs_to_build[0] in set(ignore.split()): | 612 | if pkgs_to_build[0] in set(ignore.split()): |
610 | bb.fatal("%s is in ASSUME_PROVIDED" % pkgs_to_build[0]) | 613 | bb.fatal("%s is in ASSUME_PROVIDED" % pkgs_to_build[0]) |
611 | 614 | ||
612 | taskdata, runlist, pkgs_to_build = self.buildTaskData(pkgs_to_build, None, self.configuration.abort, allowincomplete=True) | 615 | taskdata, runlist = self.buildTaskData(pkgs_to_build, None, self.configuration.abort, allowincomplete=True) |
613 | 616 | ||
614 | fn = taskdata.build_targets[pkgs_to_build[0]][0] | 617 | mc = runlist[0][0] |
618 | fn = runlist[0][3] | ||
615 | else: | 619 | else: |
616 | envdata = self.data | 620 | envdata = self.data |
617 | 621 | ||
@@ -652,29 +656,43 @@ class BBCooker: | |||
652 | task = self.configuration.cmd | 656 | task = self.configuration.cmd |
653 | 657 | ||
654 | fulltargetlist = self.checkPackages(pkgs_to_build) | 658 | fulltargetlist = self.checkPackages(pkgs_to_build) |
659 | taskdata = {} | ||
660 | localdata = {} | ||
655 | 661 | ||
656 | localdata = data.createCopy(self.data) | 662 | for mc in self.multiconfigs: |
657 | bb.data.update_data(localdata) | 663 | taskdata[mc] = bb.taskdata.TaskData(abort, skiplist=self.skiplist, allowincomplete=allowincomplete) |
658 | bb.data.expandKeys(localdata) | 664 | localdata[mc] = data.createCopy(self.databuilder.mcdata[mc]) |
659 | taskdata = bb.taskdata.TaskData(abort, skiplist=self.skiplist, allowincomplete=allowincomplete) | 665 | bb.data.update_data(localdata[mc]) |
666 | bb.data.expandKeys(localdata[mc]) | ||
660 | 667 | ||
661 | current = 0 | 668 | current = 0 |
662 | runlist = [] | 669 | runlist = [] |
663 | for k in fulltargetlist: | 670 | for k in fulltargetlist: |
671 | mc = "" | ||
672 | if k.startswith("multiconfig:"): | ||
673 | mc = k.split(":")[1] | ||
674 | k = ":".join(k.split(":")[2:]) | ||
664 | ktask = task | 675 | ktask = task |
665 | if ":do_" in k: | 676 | if ":do_" in k: |
666 | k2 = k.split(":do_") | 677 | k2 = k.split(":do_") |
667 | k = k2[0] | 678 | k = k2[0] |
668 | ktask = k2[1] | 679 | ktask = k2[1] |
669 | taskdata.add_provider(localdata, self.recipecache, k) | 680 | taskdata[mc].add_provider(localdata[mc], self.recipecaches[mc], k) |
670 | current += 1 | 681 | current += 1 |
671 | if not ktask.startswith("do_"): | 682 | if not ktask.startswith("do_"): |
672 | ktask = "do_%s" % ktask | 683 | ktask = "do_%s" % ktask |
673 | runlist.append([k, ktask]) | 684 | if k not in taskdata[mc].build_targets or not taskdata[mc].build_targets[k]: |
685 | # e.g. in ASSUME_PROVIDED | ||
686 | continue | ||
687 | fn = taskdata[mc].build_targets[k][0] | ||
688 | runlist.append([mc, k, ktask, fn]) | ||
674 | bb.event.fire(bb.event.TreeDataPreparationProgress(current, len(fulltargetlist)), self.data) | 689 | bb.event.fire(bb.event.TreeDataPreparationProgress(current, len(fulltargetlist)), self.data) |
675 | taskdata.add_unresolved(localdata, self.recipecache) | 690 | |
691 | for mc in self.multiconfigs: | ||
692 | taskdata[mc].add_unresolved(localdata[mc], self.recipecaches[mc]) | ||
693 | |||
676 | bb.event.fire(bb.event.TreeDataPreparationCompleted(len(fulltargetlist)), self.data) | 694 | bb.event.fire(bb.event.TreeDataPreparationCompleted(len(fulltargetlist)), self.data) |
677 | return taskdata, runlist, fulltargetlist | 695 | return taskdata, runlist |
678 | 696 | ||
679 | def prepareTreeData(self, pkgs_to_build, task): | 697 | def prepareTreeData(self, pkgs_to_build, task): |
680 | """ | 698 | """ |
@@ -683,7 +701,7 @@ class BBCooker: | |||
683 | 701 | ||
684 | # We set abort to False here to prevent unbuildable targets raising | 702 | # We set abort to False here to prevent unbuildable targets raising |
685 | # an exception when we're just generating data | 703 | # an exception when we're just generating data |
686 | taskdata, runlist, pkgs_to_build = self.buildTaskData(pkgs_to_build, task, False, allowincomplete=True) | 704 | taskdata, runlist = self.buildTaskData(pkgs_to_build, task, False, allowincomplete=True) |
687 | 705 | ||
688 | return runlist, taskdata | 706 | return runlist, taskdata |
689 | 707 | ||
@@ -695,10 +713,15 @@ class BBCooker: | |||
695 | information. | 713 | information. |
696 | """ | 714 | """ |
697 | runlist, taskdata = self.prepareTreeData(pkgs_to_build, task) | 715 | runlist, taskdata = self.prepareTreeData(pkgs_to_build, task) |
698 | rq = bb.runqueue.RunQueue(self, self.data, self.recipecache, taskdata, runlist) | 716 | rq = bb.runqueue.RunQueue(self, self.data, self.recipecaches, taskdata, runlist) |
699 | rq.rqdata.prepare() | 717 | rq.rqdata.prepare() |
700 | return self.buildDependTree(rq, taskdata) | 718 | return self.buildDependTree(rq, taskdata) |
701 | 719 | ||
720 | @staticmethod | ||
721 | def add_mc_prefix(mc, pn): | ||
722 | if mc: | ||
723 | return "multiconfig:%s.%s" % (mc, pn) | ||
724 | return pn | ||
702 | 725 | ||
703 | def buildDependTree(self, rq, taskdata): | 726 | def buildDependTree(self, rq, taskdata): |
704 | seen_fns = [] | 727 | seen_fns = [] |
@@ -711,24 +734,27 @@ class BBCooker: | |||
711 | depend_tree["rdepends-pkg"] = {} | 734 | depend_tree["rdepends-pkg"] = {} |
712 | depend_tree["rrecs-pkg"] = {} | 735 | depend_tree["rrecs-pkg"] = {} |
713 | depend_tree['providermap'] = {} | 736 | depend_tree['providermap'] = {} |
714 | depend_tree["layer-priorities"] = self.recipecache.bbfile_config_priorities | 737 | depend_tree["layer-priorities"] = self.bbfile_config_priorities |
715 | 738 | ||
716 | for name, fn in list(taskdata.get_providermap().items()): | 739 | for mc in taskdata: |
717 | pn = self.recipecache.pkg_fn[fn] | 740 | for name, fn in list(taskdata[mc].get_providermap().items()): |
718 | if name != pn: | 741 | pn = self.recipecaches[mc].pkg_fn[fn] |
719 | version = "%s:%s-%s" % self.recipecache.pkg_pepvpr[fn] | 742 | pn = self.add_mc_prefix(mc, pn) |
720 | depend_tree['providermap'][name] = (pn, version) | 743 | if name != pn: |
744 | version = "%s:%s-%s" % self.recipecaches[mc].pkg_pepvpr[fn] | ||
745 | depend_tree['providermap'][name] = (pn, version) | ||
721 | 746 | ||
722 | for tid in rq.rqdata.runtaskentries: | 747 | for tid in rq.rqdata.runtaskentries: |
723 | taskname = bb.runqueue.taskname_from_tid(tid) | 748 | (mc, fn, taskname) = bb.runqueue.split_tid(tid) |
724 | fn = bb.runqueue.fn_from_tid(tid) | 749 | taskfn = bb.runqueue.taskfn_fromtid(tid) |
725 | pn = self.recipecache.pkg_fn[fn] | 750 | pn = self.recipecaches[mc].pkg_fn[taskfn] |
726 | version = "%s:%s-%s" % self.recipecache.pkg_pepvpr[fn] | 751 | pn = self.add_mc_prefix(mc, pn) |
752 | version = "%s:%s-%s" % self.recipecaches[mc].pkg_pepvpr[taskfn] | ||
727 | if pn not in depend_tree["pn"]: | 753 | if pn not in depend_tree["pn"]: |
728 | depend_tree["pn"][pn] = {} | 754 | depend_tree["pn"][pn] = {} |
729 | depend_tree["pn"][pn]["filename"] = fn | 755 | depend_tree["pn"][pn]["filename"] = taskfn |
730 | depend_tree["pn"][pn]["version"] = version | 756 | depend_tree["pn"][pn]["version"] = version |
731 | depend_tree["pn"][pn]["inherits"] = self.recipecache.inherits.get(fn, None) | 757 | depend_tree["pn"][pn]["inherits"] = self.recipecaches[mc].inherits.get(taskfn, None) |
732 | 758 | ||
733 | # if we have extra caches, list all attributes they bring in | 759 | # if we have extra caches, list all attributes they bring in |
734 | extra_info = [] | 760 | extra_info = [] |
@@ -739,36 +765,37 @@ class BBCooker: | |||
739 | 765 | ||
740 | # for all attributes stored, add them to the dependency tree | 766 | # for all attributes stored, add them to the dependency tree |
741 | for ei in extra_info: | 767 | for ei in extra_info: |
742 | depend_tree["pn"][pn][ei] = vars(self.recipecache)[ei][fn] | 768 | depend_tree["pn"][pn][ei] = vars(self.recipecaches[mc])[ei][taskfn] |
743 | 769 | ||
744 | 770 | ||
745 | for dep in rq.rqdata.runtaskentries[tid].depends: | 771 | for dep in rq.rqdata.runtaskentries[tid].depends: |
746 | depfn = bb.runqueue.fn_from_tid(dep) | 772 | (depmc, depfn, deptaskname) = bb.runqueue.split_tid(dep) |
747 | deppn = self.recipecache.pkg_fn[depfn] | 773 | deptaskfn = bb.runqueue.taskfn_fromtid(dep) |
774 | deppn = self.recipecaches[mc].pkg_fn[deptaskfn] | ||
748 | dotname = "%s.%s" % (pn, bb.runqueue.taskname_from_tid(tid)) | 775 | dotname = "%s.%s" % (pn, bb.runqueue.taskname_from_tid(tid)) |
749 | if not dotname in depend_tree["tdepends"]: | 776 | if not dotname in depend_tree["tdepends"]: |
750 | depend_tree["tdepends"][dotname] = [] | 777 | depend_tree["tdepends"][dotname] = [] |
751 | depend_tree["tdepends"][dotname].append("%s.%s" % (deppn, bb.runqueue.taskname_from_tid(dep))) | 778 | depend_tree["tdepends"][dotname].append("%s.%s" % (deppn, bb.runqueue.taskname_from_tid(dep))) |
752 | if fn not in seen_fns: | 779 | if taskfn not in seen_fns: |
753 | seen_fns.append(fn) | 780 | seen_fns.append(taskfn) |
754 | packages = [] | 781 | packages = [] |
755 | 782 | ||
756 | depend_tree["depends"][pn] = [] | 783 | depend_tree["depends"][pn] = [] |
757 | for dep in taskdata.depids[fn]: | 784 | for dep in taskdata[mc].depids[taskfn]: |
758 | depend_tree["depends"][pn].append(dep) | 785 | depend_tree["depends"][pn].append(dep) |
759 | 786 | ||
760 | depend_tree["rdepends-pn"][pn] = [] | 787 | depend_tree["rdepends-pn"][pn] = [] |
761 | for rdep in taskdata.rdepids[fn]: | 788 | for rdep in taskdata[mc].rdepids[taskfn]: |
762 | depend_tree["rdepends-pn"][pn].append(rdep) | 789 | depend_tree["rdepends-pn"][pn].append(rdep) |
763 | 790 | ||
764 | rdepends = self.recipecache.rundeps[fn] | 791 | rdepends = self.recipecaches[mc].rundeps[taskfn] |
765 | for package in rdepends: | 792 | for package in rdepends: |
766 | depend_tree["rdepends-pkg"][package] = [] | 793 | depend_tree["rdepends-pkg"][package] = [] |
767 | for rdepend in rdepends[package]: | 794 | for rdepend in rdepends[package]: |
768 | depend_tree["rdepends-pkg"][package].append(rdepend) | 795 | depend_tree["rdepends-pkg"][package].append(rdepend) |
769 | packages.append(package) | 796 | packages.append(package) |
770 | 797 | ||
771 | rrecs = self.recipecache.runrecs[fn] | 798 | rrecs = self.recipecaches[mc].runrecs[taskfn] |
772 | for package in rrecs: | 799 | for package in rrecs: |
773 | depend_tree["rrecs-pkg"][package] = [] | 800 | depend_tree["rrecs-pkg"][package] = [] |
774 | for rdepend in rrecs[package]: | 801 | for rdepend in rrecs[package]: |
@@ -780,7 +807,7 @@ class BBCooker: | |||
780 | if package not in depend_tree["packages"]: | 807 | if package not in depend_tree["packages"]: |
781 | depend_tree["packages"][package] = {} | 808 | depend_tree["packages"][package] = {} |
782 | depend_tree["packages"][package]["pn"] = pn | 809 | depend_tree["packages"][package]["pn"] = pn |
783 | depend_tree["packages"][package]["filename"] = fn | 810 | depend_tree["packages"][package]["filename"] = taskfn |
784 | depend_tree["packages"][package]["version"] = version | 811 | depend_tree["packages"][package]["version"] = version |
785 | 812 | ||
786 | return depend_tree | 813 | return depend_tree |
@@ -807,44 +834,54 @@ class BBCooker: | |||
807 | cachefields = getattr(cache_class, 'cachefields', []) | 834 | cachefields = getattr(cache_class, 'cachefields', []) |
808 | extra_info = extra_info + cachefields | 835 | extra_info = extra_info + cachefields |
809 | 836 | ||
810 | for tid in taskdata.taskentries: | 837 | tids = [] |
811 | fn = bb.runqueue.fn_from_tid(tid) | 838 | for mc in taskdata: |
812 | pn = self.recipecache.pkg_fn[fn] | 839 | for tid in taskdata[mc].taskentries: |
840 | tids.append(tid) | ||
841 | |||
842 | for tid in tids: | ||
843 | (mc, fn, taskname) = bb.runqueue.split_tid(tid) | ||
844 | taskfn = bb.runqueue.taskfn_fromtid(tid) | ||
845 | |||
846 | pn = self.recipecaches[mc].pkg_fn[taskfn] | ||
847 | pn = self.add_mc_prefix(mc, pn) | ||
813 | 848 | ||
814 | if pn not in depend_tree["pn"]: | 849 | if pn not in depend_tree["pn"]: |
815 | depend_tree["pn"][pn] = {} | 850 | depend_tree["pn"][pn] = {} |
816 | depend_tree["pn"][pn]["filename"] = fn | 851 | depend_tree["pn"][pn]["filename"] = taskfn |
817 | version = "%s:%s-%s" % self.recipecache.pkg_pepvpr[fn] | 852 | version = "%s:%s-%s" % self.recipecaches[mc].pkg_pepvpr[taskfn] |
818 | depend_tree["pn"][pn]["version"] = version | 853 | depend_tree["pn"][pn]["version"] = version |
819 | rdepends = self.recipecache.rundeps[fn] | 854 | rdepends = self.recipecaches[mc].rundeps[taskfn] |
820 | rrecs = self.recipecache.runrecs[fn] | 855 | rrecs = self.recipecaches[mc].runrecs[taskfn] |
821 | depend_tree["pn"][pn]["inherits"] = self.recipecache.inherits.get(fn, None) | 856 | depend_tree["pn"][pn]["inherits"] = self.recipecaches[mc].inherits.get(taskfn, None) |
822 | 857 | ||
823 | # for all extra attributes stored, add them to the dependency tree | 858 | # for all extra attributes stored, add them to the dependency tree |
824 | for ei in extra_info: | 859 | for ei in extra_info: |
825 | depend_tree["pn"][pn][ei] = vars(self.recipecache)[ei][fn] | 860 | depend_tree["pn"][pn][ei] = vars(self.recipecaches[mc])[ei][taskfn] |
826 | 861 | ||
827 | if fn not in seen_fns: | 862 | if taskfn not in seen_fns: |
828 | seen_fns.append(fn) | 863 | seen_fns.append(taskfn) |
829 | 864 | ||
830 | depend_tree["depends"][pn] = [] | 865 | depend_tree["depends"][pn] = [] |
831 | for item in taskdata.depids[fn]: | 866 | for item in taskdata[mc].depids[taskfn]: |
832 | pn_provider = "" | 867 | pn_provider = "" |
833 | if dep in taskdata.build_targets and taskdata.build_targets[dep]: | 868 | if dep in taskdata[mc].build_targets and taskdata[mc].build_targets[dep]: |
834 | fn_provider = taskdata.build_targets[dep][0] | 869 | fn_provider = taskdata[mc].build_targets[dep][0] |
835 | pn_provider = self.recipecache.pkg_fn[fn_provider] | 870 | pn_provider = self.recipecaches[mc].pkg_fn[fn_provider] |
836 | else: | 871 | else: |
837 | pn_provider = item | 872 | pn_provider = item |
873 | pn_provider = self.add_mc_prefix(mc, pn_provider) | ||
838 | depend_tree["depends"][pn].append(pn_provider) | 874 | depend_tree["depends"][pn].append(pn_provider) |
839 | 875 | ||
840 | depend_tree["rdepends-pn"][pn] = [] | 876 | depend_tree["rdepends-pn"][pn] = [] |
841 | for rdep in taskdata.rdepids[fn]: | 877 | for rdep in taskdata[mc].rdepids[taskfn]: |
842 | pn_rprovider = "" | 878 | pn_rprovider = "" |
843 | if rdep in taskdata.run_targets and taskdata.run_targets[rdep]: | 879 | if rdep in taskdata[mc].run_targets and taskdata[mc].run_targets[rdep]: |
844 | fn_rprovider = taskdata.run_targets[rdep][0] | 880 | fn_rprovider = taskdata[mc].run_targets[rdep][0] |
845 | pn_rprovider = self.recipecache.pkg_fn[fn_rprovider] | 881 | pn_rprovider = self.recipecaches[mc].pkg_fn[fn_rprovider] |
846 | else: | 882 | else: |
847 | pn_rprovider = rdep | 883 | pn_rprovider = rdep |
884 | pn_rprovider = self.add_mc_prefix(mc, pn_rprovider) | ||
848 | depend_tree["rdepends-pn"][pn].append(pn_rprovider) | 885 | depend_tree["rdepends-pn"][pn].append(pn_rprovider) |
849 | 886 | ||
850 | depend_tree["rdepends-pkg"].update(rdepends) | 887 | depend_tree["rdepends-pkg"].update(rdepends) |
@@ -928,7 +965,7 @@ class BBCooker: | |||
928 | # Determine which bbappends haven't been applied | 965 | # Determine which bbappends haven't been applied |
929 | 966 | ||
930 | # First get list of recipes, including skipped | 967 | # First get list of recipes, including skipped |
931 | recipefns = list(self.recipecache.pkg_fn.keys()) | 968 | recipefns = list(self.recipecaches[''].pkg_fn.keys()) |
932 | recipefns.extend(self.skiplist.keys()) | 969 | recipefns.extend(self.skiplist.keys()) |
933 | 970 | ||
934 | # Work out list of bbappends that have been applied | 971 | # Work out list of bbappends that have been applied |
@@ -952,20 +989,21 @@ class BBCooker: | |||
952 | 989 | ||
953 | def handlePrefProviders(self): | 990 | def handlePrefProviders(self): |
954 | 991 | ||
955 | localdata = data.createCopy(self.data) | 992 | for mc in self.multiconfigs: |
956 | bb.data.update_data(localdata) | 993 | localdata = data.createCopy(self.databuilder.mcdata[mc]) |
957 | bb.data.expandKeys(localdata) | 994 | bb.data.update_data(localdata) |
995 | bb.data.expandKeys(localdata) | ||
958 | 996 | ||
959 | # Handle PREFERRED_PROVIDERS | 997 | # Handle PREFERRED_PROVIDERS |
960 | for p in (localdata.getVar('PREFERRED_PROVIDERS', True) or "").split(): | 998 | for p in (localdata.getVar('PREFERRED_PROVIDERS', True) or "").split(): |
961 | try: | 999 | try: |
962 | (providee, provider) = p.split(':') | 1000 | (providee, provider) = p.split(':') |
963 | except: | 1001 | except: |
964 | providerlog.critical("Malformed option in PREFERRED_PROVIDERS variable: %s" % p) | 1002 | providerlog.critical("Malformed option in PREFERRED_PROVIDERS variable: %s" % p) |
965 | continue | 1003 | continue |
966 | if providee in self.recipecache.preferred and self.recipecache.preferred[providee] != provider: | 1004 | if providee in self.recipecaches[mc].preferred and self.recipecaches[mc].preferred[providee] != provider: |
967 | providerlog.error("conflicting preferences for %s: both %s and %s specified", providee, provider, self.recipecache.preferred[providee]) | 1005 | providerlog.error("conflicting preferences for %s: both %s and %s specified", providee, provider, self.recipecaches[mc].preferred[providee]) |
968 | self.recipecache.preferred[providee] = provider | 1006 | self.recipecaches[mc].preferred[providee] = provider |
969 | 1007 | ||
970 | def findCoreBaseFiles(self, subdir, configfile): | 1008 | def findCoreBaseFiles(self, subdir, configfile): |
971 | corebase = self.data.getVar('COREBASE', True) or "" | 1009 | corebase = self.data.getVar('COREBASE', True) or "" |
@@ -1060,10 +1098,10 @@ class BBCooker: | |||
1060 | """ | 1098 | """ |
1061 | pkg_list = [] | 1099 | pkg_list = [] |
1062 | 1100 | ||
1063 | for pfn in self.recipecache.pkg_fn: | 1101 | for pfn in self.recipecaches[''].pkg_fn: |
1064 | inherits = self.recipecache.inherits.get(pfn, None) | 1102 | inherits = self.recipecaches[''].inherits.get(pfn, None) |
1065 | if inherits and klass in inherits: | 1103 | if inherits and klass in inherits: |
1066 | pkg_list.append(self.recipecache.pkg_fn[pfn]) | 1104 | pkg_list.append(self.recipecaches[''].pkg_fn[pfn]) |
1067 | 1105 | ||
1068 | return pkg_list | 1106 | return pkg_list |
1069 | 1107 | ||
@@ -1096,10 +1134,10 @@ class BBCooker: | |||
1096 | shell.start( self ) | 1134 | shell.start( self ) |
1097 | 1135 | ||
1098 | 1136 | ||
1099 | def handleCollections( self, collections ): | 1137 | def handleCollections(self, collections): |
1100 | """Handle collections""" | 1138 | """Handle collections""" |
1101 | errors = False | 1139 | errors = False |
1102 | self.recipecache.bbfile_config_priorities = [] | 1140 | self.bbfile_config_priorities = [] |
1103 | if collections: | 1141 | if collections: |
1104 | collection_priorities = {} | 1142 | collection_priorities = {} |
1105 | collection_depends = {} | 1143 | collection_depends = {} |
@@ -1177,7 +1215,7 @@ class BBCooker: | |||
1177 | parselog.error("BBFILE_PATTERN_%s \"%s\" is not a valid regular expression", c, regex) | 1215 | parselog.error("BBFILE_PATTERN_%s \"%s\" is not a valid regular expression", c, regex) |
1178 | errors = True | 1216 | errors = True |
1179 | continue | 1217 | continue |
1180 | self.recipecache.bbfile_config_priorities.append((c, regex, cre, collection_priorities[c])) | 1218 | self.bbfile_config_priorities.append((c, regex, cre, collection_priorities[c])) |
1181 | if errors: | 1219 | if errors: |
1182 | # We've already printed the actual error(s) | 1220 | # We've already printed the actual error(s) |
1183 | raise CollectionError("Errors during parsing layer configuration") | 1221 | raise CollectionError("Errors during parsing layer configuration") |
@@ -1200,7 +1238,7 @@ class BBCooker: | |||
1200 | if bf.startswith("/") or bf.startswith("../"): | 1238 | if bf.startswith("/") or bf.startswith("../"): |
1201 | bf = os.path.abspath(bf) | 1239 | bf = os.path.abspath(bf) |
1202 | 1240 | ||
1203 | self.collection = CookerCollectFiles(self.recipecache.bbfile_config_priorities) | 1241 | self.collection = CookerCollectFiles(self.bbfile_config_priorities) |
1204 | filelist, masked = self.collection.collect_bbfiles(self.data, self.expanded_data) | 1242 | filelist, masked = self.collection.collect_bbfiles(self.data, self.expanded_data) |
1205 | try: | 1243 | try: |
1206 | os.stat(bf) | 1244 | os.stat(bf) |
@@ -1250,7 +1288,7 @@ class BBCooker: | |||
1250 | if (task == None): | 1288 | if (task == None): |
1251 | task = self.configuration.cmd | 1289 | task = self.configuration.cmd |
1252 | 1290 | ||
1253 | fn, cls = bb.cache.virtualfn2realfn(buildfile) | 1291 | fn, cls, mc = bb.cache.virtualfn2realfn(buildfile) |
1254 | fn = self.matchFile(fn) | 1292 | fn = self.matchFile(fn) |
1255 | 1293 | ||
1256 | self.buildSetVars() | 1294 | self.buildSetVars() |
@@ -1260,7 +1298,7 @@ class BBCooker: | |||
1260 | infos = bb_cache.parse(fn, self.collection.get_file_appends(fn)) | 1298 | infos = bb_cache.parse(fn, self.collection.get_file_appends(fn)) |
1261 | infos = dict(infos) | 1299 | infos = dict(infos) |
1262 | 1300 | ||
1263 | fn = bb.cache.realfn2virtual(fn, cls) | 1301 | fn = bb.cache.realfn2virtual(fn, cls, mc) |
1264 | try: | 1302 | try: |
1265 | info_array = infos[fn] | 1303 | info_array = infos[fn] |
1266 | except KeyError: | 1304 | except KeyError: |
@@ -1269,29 +1307,30 @@ class BBCooker: | |||
1269 | if info_array[0].skipped: | 1307 | if info_array[0].skipped: |
1270 | bb.fatal("%s was skipped: %s" % (fn, info_array[0].skipreason)) | 1308 | bb.fatal("%s was skipped: %s" % (fn, info_array[0].skipreason)) |
1271 | 1309 | ||
1272 | self.recipecache.add_from_recipeinfo(fn, info_array) | 1310 | self.recipecaches[mc].add_from_recipeinfo(fn, info_array) |
1273 | 1311 | ||
1274 | # Tweak some variables | 1312 | # Tweak some variables |
1275 | item = info_array[0].pn | 1313 | item = info_array[0].pn |
1276 | self.recipecache.ignored_dependencies = set() | 1314 | self.recipecaches[mc].ignored_dependencies = set() |
1277 | self.recipecache.bbfile_priority[fn] = 1 | 1315 | self.recipecaches[mc].bbfile_priority[fn] = 1 |
1278 | 1316 | ||
1279 | # Remove external dependencies | 1317 | # Remove external dependencies |
1280 | self.recipecache.task_deps[fn]['depends'] = {} | 1318 | self.recipecaches[mc].task_deps[fn]['depends'] = {} |
1281 | self.recipecache.deps[fn] = [] | 1319 | self.recipecaches[mc].deps[fn] = [] |
1282 | self.recipecache.rundeps[fn] = [] | 1320 | self.recipecaches[mc].rundeps[fn] = [] |
1283 | self.recipecache.runrecs[fn] = [] | 1321 | self.recipecaches[mc].runrecs[fn] = [] |
1284 | 1322 | ||
1285 | # Invalidate task for target if force mode active | 1323 | # Invalidate task for target if force mode active |
1286 | if self.configuration.force: | 1324 | if self.configuration.force: |
1287 | logger.verbose("Invalidate task %s, %s", task, fn) | 1325 | logger.verbose("Invalidate task %s, %s", task, fn) |
1288 | if not task.startswith("do_"): | 1326 | if not task.startswith("do_"): |
1289 | task = "do_%s" % task | 1327 | task = "do_%s" % task |
1290 | bb.parse.siggen.invalidate_task(task, self.recipecache, fn) | 1328 | bb.parse.siggen.invalidate_task(task, self.recipecaches[mc], fn) |
1291 | 1329 | ||
1292 | # Setup taskdata structure | 1330 | # Setup taskdata structure |
1293 | taskdata = bb.taskdata.TaskData(self.configuration.abort) | 1331 | taskdata = {} |
1294 | taskdata.add_provider(self.data, self.recipecache, item) | 1332 | taskdata[mc] = bb.taskdata.TaskData(self.configuration.abort) |
1333 | taskdata[mc].add_provider(self.data, self.recipecaches[mc], item) | ||
1295 | 1334 | ||
1296 | buildname = self.data.getVar("BUILDNAME", True) | 1335 | buildname = self.data.getVar("BUILDNAME", True) |
1297 | bb.event.fire(bb.event.BuildStarted(buildname, [item]), self.expanded_data) | 1336 | bb.event.fire(bb.event.BuildStarted(buildname, [item]), self.expanded_data) |
@@ -1299,9 +1338,9 @@ class BBCooker: | |||
1299 | # Execute the runqueue | 1338 | # Execute the runqueue |
1300 | if not task.startswith("do_"): | 1339 | if not task.startswith("do_"): |
1301 | task = "do_%s" % task | 1340 | task = "do_%s" % task |
1302 | runlist = [[item, task]] | 1341 | runlist = [[mc, item, task, fn]] |
1303 | 1342 | ||
1304 | rq = bb.runqueue.RunQueue(self, self.data, self.recipecache, taskdata, runlist) | 1343 | rq = bb.runqueue.RunQueue(self, self.data, self.recipecaches, taskdata, runlist) |
1305 | 1344 | ||
1306 | def buildFileIdle(server, rq, abort): | 1345 | def buildFileIdle(server, rq, abort): |
1307 | 1346 | ||
@@ -1382,23 +1421,20 @@ class BBCooker: | |||
1382 | packages = ["%s:%s" % (target, task) for target in targets] | 1421 | packages = ["%s:%s" % (target, task) for target in targets] |
1383 | bb.event.fire(bb.event.BuildInit(packages), self.expanded_data) | 1422 | bb.event.fire(bb.event.BuildInit(packages), self.expanded_data) |
1384 | 1423 | ||
1385 | taskdata, runlist, fulltargetlist = self.buildTaskData(targets, task, self.configuration.abort) | 1424 | taskdata, runlist = self.buildTaskData(targets, task, self.configuration.abort) |
1386 | 1425 | ||
1387 | buildname = self.data.getVar("BUILDNAME", False) | 1426 | buildname = self.data.getVar("BUILDNAME", False) |
1388 | 1427 | ||
1389 | # make targets to always look as <target>:do_<task> | 1428 | # make targets to always look as <target>:do_<task> |
1390 | ntargets = [] | 1429 | ntargets = [] |
1391 | for target in fulltargetlist: | 1430 | for target in runlist: |
1392 | if ":" in target: | 1431 | if target[0]: |
1393 | if ":do_" not in target: | 1432 | ntargets.append("multiconfig:%s:%s:%s" % (target[0], target[1], target[2])) |
1394 | target = "%s:do_%s" % tuple(target.split(":", 1)) | 1433 | ntargets.append("%s:%s" % (target[1], target[2])) |
1395 | else: | ||
1396 | target = "%s:%s" % (target, task) | ||
1397 | ntargets.append(target) | ||
1398 | 1434 | ||
1399 | bb.event.fire(bb.event.BuildStarted(buildname, ntargets), self.data) | 1435 | bb.event.fire(bb.event.BuildStarted(buildname, ntargets), self.data) |
1400 | 1436 | ||
1401 | rq = bb.runqueue.RunQueue(self, self.data, self.recipecache, taskdata, runlist) | 1437 | rq = bb.runqueue.RunQueue(self, self.data, self.recipecaches, taskdata, runlist) |
1402 | if 'universe' in targets: | 1438 | if 'universe' in targets: |
1403 | rq.rqdata.warn_multi_bb = True | 1439 | rq.rqdata.warn_multi_bb = True |
1404 | 1440 | ||
@@ -1513,13 +1549,14 @@ class BBCooker: | |||
1513 | if CookerFeatures.SEND_SANITYEVENTS in self.featureset: | 1549 | if CookerFeatures.SEND_SANITYEVENTS in self.featureset: |
1514 | bb.event.fire(bb.event.SanityCheck(False), self.data) | 1550 | bb.event.fire(bb.event.SanityCheck(False), self.data) |
1515 | 1551 | ||
1516 | ignore = self.expanded_data.getVar("ASSUME_PROVIDED", True) or "" | 1552 | for mc in self.multiconfigs: |
1517 | self.recipecache.ignored_dependencies = set(ignore.split()) | 1553 | ignore = self.databuilder.mcdata[mc].getVar("ASSUME_PROVIDED", True) or "" |
1554 | self.recipecaches[mc].ignored_dependencies = set(ignore.split()) | ||
1518 | 1555 | ||
1519 | for dep in self.configuration.extra_assume_provided: | 1556 | for dep in self.configuration.extra_assume_provided: |
1520 | self.recipecache.ignored_dependencies.add(dep) | 1557 | self.recipecaches[mc].ignored_dependencies.add(dep) |
1521 | 1558 | ||
1522 | self.collection = CookerCollectFiles(self.recipecache.bbfile_config_priorities) | 1559 | self.collection = CookerCollectFiles(self.bbfile_config_priorities) |
1523 | (filelist, masked) = self.collection.collect_bbfiles(self.data, self.expanded_data) | 1560 | (filelist, masked) = self.collection.collect_bbfiles(self.data, self.expanded_data) |
1524 | 1561 | ||
1525 | self.parser = CookerParser(self, filelist, masked) | 1562 | self.parser = CookerParser(self, filelist, masked) |
@@ -1533,13 +1570,15 @@ class BBCooker: | |||
1533 | raise bb.BBHandledException() | 1570 | raise bb.BBHandledException() |
1534 | self.show_appends_with_no_recipes() | 1571 | self.show_appends_with_no_recipes() |
1535 | self.handlePrefProviders() | 1572 | self.handlePrefProviders() |
1536 | self.recipecache.bbfile_priority = self.collection.collection_priorities(self.recipecache.pkg_fn, self.data) | 1573 | for mc in self.multiconfigs: |
1574 | self.recipecaches[mc].bbfile_priority = self.collection.collection_priorities(self.recipecaches[mc].pkg_fn, self.data) | ||
1537 | self.state = state.running | 1575 | self.state = state.running |
1538 | 1576 | ||
1539 | # Send an event listing all stamps reachable after parsing | 1577 | # Send an event listing all stamps reachable after parsing |
1540 | # which the metadata may use to clean up stale data | 1578 | # which the metadata may use to clean up stale data |
1541 | event = bb.event.ReachableStamps(self.recipecache.stamp) | 1579 | for mc in self.multiconfigs: |
1542 | bb.event.fire(event, self.expanded_data) | 1580 | event = bb.event.ReachableStamps(self.recipecaches[mc].stamp) |
1581 | bb.event.fire(event, self.databuilder.mcdata[mc]) | ||
1543 | return None | 1582 | return None |
1544 | 1583 | ||
1545 | return True | 1584 | return True |
@@ -1558,23 +1597,26 @@ class BBCooker: | |||
1558 | parselog.warning("Explicit target \"%s\" is in ASSUME_PROVIDED, ignoring" % pkg) | 1597 | parselog.warning("Explicit target \"%s\" is in ASSUME_PROVIDED, ignoring" % pkg) |
1559 | 1598 | ||
1560 | if 'world' in pkgs_to_build: | 1599 | if 'world' in pkgs_to_build: |
1561 | bb.providers.buildWorldTargetList(self.recipecache) | ||
1562 | pkgs_to_build.remove('world') | 1600 | pkgs_to_build.remove('world') |
1563 | for t in self.recipecache.world_target: | 1601 | for mc in self.multiconfigs: |
1564 | pkgs_to_build.append(t) | 1602 | bb.providers.buildWorldTargetList(self.recipecaches[mc]) |
1603 | for t in self.recipecaches[mc].world_target: | ||
1604 | if mc: | ||
1605 | t = "multiconfig:" + mc + ":" + t | ||
1606 | pkgs_to_build.append(t) | ||
1565 | 1607 | ||
1566 | if 'universe' in pkgs_to_build: | 1608 | if 'universe' in pkgs_to_build: |
1567 | parselog.warning("The \"universe\" target is only intended for testing and may produce errors.") | 1609 | parselog.warning("The \"universe\" target is only intended for testing and may produce errors.") |
1568 | parselog.debug(1, "collating packages for \"universe\"") | 1610 | parselog.debug(1, "collating packages for \"universe\"") |
1569 | pkgs_to_build.remove('universe') | 1611 | pkgs_to_build.remove('universe') |
1570 | for t in self.recipecache.universe_target: | 1612 | for mc in self.multiconfigs: |
1571 | pkgs_to_build.append(t) | 1613 | for t in self.recipecaches[mc].universe_target: |
1614 | if mc: | ||
1615 | t = "multiconfig:" + mc + ":" + t | ||
1616 | pkgs_to_build.append(t) | ||
1572 | 1617 | ||
1573 | return pkgs_to_build | 1618 | return pkgs_to_build |
1574 | 1619 | ||
1575 | |||
1576 | |||
1577 | |||
1578 | def pre_serve(self): | 1620 | def pre_serve(self): |
1579 | # Empty the environment. The environment will be populated as | 1621 | # Empty the environment. The environment will be populated as |
1580 | # necessary from the data store. | 1622 | # necessary from the data store. |
@@ -1823,7 +1865,7 @@ class CookerCollectFiles(object): | |||
1823 | # Calculate priorities for each file | 1865 | # Calculate priorities for each file |
1824 | matched = set() | 1866 | matched = set() |
1825 | for p in pkgfns: | 1867 | for p in pkgfns: |
1826 | realfn, cls = bb.cache.virtualfn2realfn(p) | 1868 | realfn, cls, mc = bb.cache.virtualfn2realfn(p) |
1827 | priorities[p] = self.calc_bbfile_priority(realfn, matched) | 1869 | priorities[p] = self.calc_bbfile_priority(realfn, matched) |
1828 | 1870 | ||
1829 | # Don't show the warning if the BBFILE_PATTERN did match .bbappend files | 1871 | # Don't show the warning if the BBFILE_PATTERN did match .bbappend files |
@@ -2164,11 +2206,13 @@ class CookerParser(object): | |||
2164 | if info_array[0].skipped: | 2206 | if info_array[0].skipped: |
2165 | self.skipped += 1 | 2207 | self.skipped += 1 |
2166 | self.cooker.skiplist[virtualfn] = SkippedPackage(info_array[0]) | 2208 | self.cooker.skiplist[virtualfn] = SkippedPackage(info_array[0]) |
2167 | self.bb_cache.add_info(virtualfn, info_array, self.cooker.recipecache, | 2209 | (fn, cls, mc) = bb.cache.virtualfn2realfn(virtualfn) |
2210 | self.bb_cache.add_info(virtualfn, info_array, self.cooker.recipecaches[mc], | ||
2168 | parsed=parsed, watcher = self.cooker.add_filewatch) | 2211 | parsed=parsed, watcher = self.cooker.add_filewatch) |
2169 | return True | 2212 | return True |
2170 | 2213 | ||
2171 | def reparse(self, filename): | 2214 | def reparse(self, filename): |
2172 | infos = self.bb_cache.parse(filename, self.cooker.collection.get_file_appends(filename)) | 2215 | infos = self.bb_cache.parse(filename, self.cooker.collection.get_file_appends(filename)) |
2173 | for vfn, info_array in infos: | 2216 | for vfn, info_array in infos: |
2174 | self.cooker.recipecache.add_from_recipeinfo(vfn, info_array) | 2217 | (fn, cls, mc) = bb.cache.virtualfn2realfn(vfn) |
2218 | self.cooker.recipecaches[mc].add_from_recipeinfo(vfn, info_array) | ||
diff --git a/bitbake/lib/bb/cookerdata.py b/bitbake/lib/bb/cookerdata.py index 71021a3510..fa1de7a22f 100644 --- a/bitbake/lib/bb/cookerdata.py +++ b/bitbake/lib/bb/cookerdata.py | |||
@@ -237,9 +237,9 @@ class CookerDataBuilder(object): | |||
237 | 237 | ||
238 | bb.utils.set_context(bb.utils.clean_context()) | 238 | bb.utils.set_context(bb.utils.clean_context()) |
239 | bb.event.set_class_handlers(bb.event.clean_class_handlers()) | 239 | bb.event.set_class_handlers(bb.event.clean_class_handlers()) |
240 | self.data = bb.data.init() | 240 | self.basedata = bb.data.init() |
241 | if self.tracking: | 241 | if self.tracking: |
242 | self.data.enableTracking() | 242 | self.basedata.enableTracking() |
243 | 243 | ||
244 | # Keep a datastore of the initial environment variables and their | 244 | # Keep a datastore of the initial environment variables and their |
245 | # values from when BitBake was launched to enable child processes | 245 | # values from when BitBake was launched to enable child processes |
@@ -250,15 +250,40 @@ class CookerDataBuilder(object): | |||
250 | self.savedenv.setVar(k, cookercfg.env[k]) | 250 | self.savedenv.setVar(k, cookercfg.env[k]) |
251 | 251 | ||
252 | filtered_keys = bb.utils.approved_variables() | 252 | filtered_keys = bb.utils.approved_variables() |
253 | bb.data.inheritFromOS(self.data, self.savedenv, filtered_keys) | 253 | bb.data.inheritFromOS(self.basedata, self.savedenv, filtered_keys) |
254 | self.data.setVar("BB_ORIGENV", self.savedenv) | 254 | self.basedata.setVar("BB_ORIGENV", self.savedenv) |
255 | 255 | ||
256 | if worker: | 256 | if worker: |
257 | self.data.setVar("BB_WORKERCONTEXT", "1") | 257 | self.basedata.setVar("BB_WORKERCONTEXT", "1") |
258 | |||
259 | self.data = self.basedata | ||
260 | self.mcdata = {} | ||
258 | 261 | ||
259 | def parseBaseConfiguration(self): | 262 | def parseBaseConfiguration(self): |
260 | try: | 263 | try: |
261 | self.parseConfigurationFiles() | 264 | bb.parse.init_parser(self.basedata) |
265 | self.data = self.parseConfigurationFiles(self.prefiles, self.postfiles) | ||
266 | |||
267 | if self.data.getVar("BB_WORKERCONTEXT", False) is None: | ||
268 | bb.fetch.fetcher_init(self.data) | ||
269 | bb.codeparser.parser_cache_init(self.data) | ||
270 | |||
271 | bb.event.fire(bb.event.ConfigParsed(), self.data) | ||
272 | |||
273 | if self.data.getVar("BB_INVALIDCONF", False) is True: | ||
274 | self.data.setVar("BB_INVALIDCONF", False) | ||
275 | self.data = self.parseConfigurationFiles(self.prefiles, self.postfiles) | ||
276 | |||
277 | bb.parse.init_parser(self.data) | ||
278 | self.data_hash = self.data.get_hash() | ||
279 | self.mcdata[''] = self.data | ||
280 | |||
281 | multiconfig = (self.data.getVar("BBMULTICONFIG", True) or "").split() | ||
282 | for config in multiconfig: | ||
283 | mcdata = self.parseConfigurationFiles(['conf/multiconfig/%s.conf' % config] + self.prefiles, self.postfiles) | ||
284 | bb.event.fire(bb.event.ConfigParsed(), mcdata) | ||
285 | self.mcdata[config] = mcdata | ||
286 | |||
262 | except SyntaxError: | 287 | except SyntaxError: |
263 | raise bb.BBHandledException | 288 | raise bb.BBHandledException |
264 | except bb.data_smart.ExpansionError as e: | 289 | except bb.data_smart.ExpansionError as e: |
@@ -271,11 +296,8 @@ class CookerDataBuilder(object): | |||
271 | def _findLayerConf(self, data): | 296 | def _findLayerConf(self, data): |
272 | return findConfigFile("bblayers.conf", data) | 297 | return findConfigFile("bblayers.conf", data) |
273 | 298 | ||
274 | def parseConfigurationFiles(self): | 299 | def parseConfigurationFiles(self, prefiles, postfiles): |
275 | data = self.data | 300 | data = bb.data.createCopy(self.basedata) |
276 | prefiles = self.prefiles | ||
277 | postfiles = self.postfiles | ||
278 | bb.parse.init_parser(data) | ||
279 | 301 | ||
280 | # Parse files for loading *before* bitbake.conf and any includes | 302 | # Parse files for loading *before* bitbake.conf and any includes |
281 | for f in prefiles: | 303 | for f in prefiles: |
@@ -338,20 +360,7 @@ class CookerDataBuilder(object): | |||
338 | handlerln = int(data.getVarFlag(var, "lineno", False)) | 360 | handlerln = int(data.getVarFlag(var, "lineno", False)) |
339 | bb.event.register(var, data.getVar(var, False), (data.getVarFlag(var, "eventmask", True) or "").split(), handlerfn, handlerln) | 361 | bb.event.register(var, data.getVar(var, False), (data.getVarFlag(var, "eventmask", True) or "").split(), handlerfn, handlerln) |
340 | 362 | ||
341 | if data.getVar("BB_WORKERCONTEXT", False) is None: | ||
342 | bb.fetch.fetcher_init(data) | ||
343 | bb.codeparser.parser_cache_init(data) | ||
344 | bb.event.fire(bb.event.ConfigParsed(), data) | ||
345 | |||
346 | if data.getVar("BB_INVALIDCONF", False) is True: | ||
347 | data.setVar("BB_INVALIDCONF", False) | ||
348 | self.parseConfigurationFiles() | ||
349 | return | ||
350 | |||
351 | bb.parse.init_parser(data) | ||
352 | data.setVar('BBINCLUDED',bb.parse.get_file_depends(data)) | 363 | data.setVar('BBINCLUDED',bb.parse.get_file_depends(data)) |
353 | self.data = data | ||
354 | self.data_hash = data.get_hash() | ||
355 | |||
356 | 364 | ||
365 | return data | ||
357 | 366 | ||
diff --git a/bitbake/lib/bb/runqueue.py b/bitbake/lib/bb/runqueue.py index 6a953b844a..ce30fccd43 100644 --- a/bitbake/lib/bb/runqueue.py +++ b/bitbake/lib/bb/runqueue.py | |||
@@ -48,6 +48,31 @@ def fn_from_tid(tid): | |||
48 | def taskname_from_tid(tid): | 48 | def taskname_from_tid(tid): |
49 | return tid.rsplit(":", 1)[1] | 49 | return tid.rsplit(":", 1)[1] |
50 | 50 | ||
51 | def split_tid(tid): | ||
52 | if tid.startswith('multiconfig:'): | ||
53 | elems = tid.split(':') | ||
54 | mc = elems[1] | ||
55 | fn = ":".join(elems[2:-1]) | ||
56 | taskname = elems[-1] | ||
57 | else: | ||
58 | tid = tid.rsplit(":", 1) | ||
59 | mc = "" | ||
60 | fn = tid[0] | ||
61 | taskname = tid[1] | ||
62 | |||
63 | return (mc, fn, taskname) | ||
64 | |||
65 | def build_tid(mc, fn, taskname): | ||
66 | if mc: | ||
67 | return "multiconfig:" + mc + ":" + fn + ":" + taskname | ||
68 | return fn + ":" + taskname | ||
69 | |||
70 | def taskfn_fromtid(tid): | ||
71 | (mc, fn, taskname) = split_tid(tid) | ||
72 | if mc: | ||
73 | return "multiconfig:" + mc + ":" + fn | ||
74 | return fn | ||
75 | |||
51 | class RunQueueStats: | 76 | class RunQueueStats: |
52 | """ | 77 | """ |
53 | Holds statistics on the tasks handled by the associated runQueue | 78 | Holds statistics on the tasks handled by the associated runQueue |
@@ -110,9 +135,9 @@ class RunQueueScheduler(object): | |||
110 | self.buildable = [] | 135 | self.buildable = [] |
111 | self.stamps = {} | 136 | self.stamps = {} |
112 | for tid in self.rqdata.runtaskentries: | 137 | for tid in self.rqdata.runtaskentries: |
113 | fn = fn_from_tid(tid) | 138 | (mc, fn, taskname) = split_tid(tid) |
114 | taskname = taskname_from_tid(tid) | 139 | taskfn = taskfn_fromtid(tid) |
115 | self.stamps[tid] = bb.build.stampfile(taskname, self.rqdata.dataCache, fn) | 140 | self.stamps[tid] = bb.build.stampfile(taskname, self.rqdata.dataCaches[mc], taskfn) |
116 | if tid in self.rq.runq_buildable: | 141 | if tid in self.rq.runq_buildable: |
117 | self.buildable.append(tid) | 142 | self.buildable.append(tid) |
118 | 143 | ||
@@ -230,9 +255,9 @@ class RunQueueData: | |||
230 | """ | 255 | """ |
231 | BitBake Run Queue implementation | 256 | BitBake Run Queue implementation |
232 | """ | 257 | """ |
233 | def __init__(self, rq, cooker, cfgData, dataCache, taskData, targets): | 258 | def __init__(self, rq, cooker, cfgData, dataCaches, taskData, targets): |
234 | self.cooker = cooker | 259 | self.cooker = cooker |
235 | self.dataCache = dataCache | 260 | self.dataCaches = dataCaches |
236 | self.taskData = taskData | 261 | self.taskData = taskData |
237 | self.targets = targets | 262 | self.targets = targets |
238 | self.rq = rq | 263 | self.rq = rq |
@@ -264,8 +289,8 @@ class RunQueueData: | |||
264 | return tid + task_name_suffix | 289 | return tid + task_name_suffix |
265 | 290 | ||
266 | def get_short_user_idstring(self, task, task_name_suffix = ""): | 291 | def get_short_user_idstring(self, task, task_name_suffix = ""): |
267 | fn = fn_from_tid(task) | 292 | (mc, fn, taskname) = split_tid(task) |
268 | pn = self.dataCache.pkg_fn[fn] | 293 | pn = self.dataCaches[mc].pkg_fn[fn] |
269 | taskname = taskname_from_tid(task) + task_name_suffix | 294 | taskname = taskname_from_tid(task) + task_name_suffix |
270 | return "%s:%s" % (pn, taskname) | 295 | return "%s:%s" % (pn, taskname) |
271 | 296 | ||
@@ -429,7 +454,12 @@ class RunQueueData: | |||
429 | 454 | ||
430 | taskData = self.taskData | 455 | taskData = self.taskData |
431 | 456 | ||
432 | if len(taskData.taskentries) == 0: | 457 | found = False |
458 | for mc in self.taskData: | ||
459 | if len(taskData[mc].taskentries) > 0: | ||
460 | found = True | ||
461 | break | ||
462 | if not found: | ||
433 | # Nothing to do | 463 | # Nothing to do |
434 | return 0 | 464 | return 0 |
435 | 465 | ||
@@ -447,55 +477,60 @@ class RunQueueData: | |||
447 | # process is repeated for each type of dependency (tdepends, deptask, | 477 | # process is repeated for each type of dependency (tdepends, deptask, |
448 | # rdeptast, recrdeptask, idepends). | 478 | # rdeptast, recrdeptask, idepends). |
449 | 479 | ||
450 | def add_build_dependencies(depids, tasknames, depends): | 480 | def add_build_dependencies(depids, tasknames, depends, mc): |
451 | for depname in depids: | 481 | for depname in depids: |
452 | # Won't be in build_targets if ASSUME_PROVIDED | 482 | # Won't be in build_targets if ASSUME_PROVIDED |
453 | if depname not in taskData.build_targets or not taskData.build_targets[depname]: | 483 | if depname not in taskData[mc].build_targets or not taskData[mc].build_targets[depname]: |
454 | continue | 484 | continue |
455 | depdata = taskData.build_targets[depname][0] | 485 | depdata = taskData[mc].build_targets[depname][0] |
456 | if depdata is None: | 486 | if depdata is None: |
457 | continue | 487 | continue |
458 | for taskname in tasknames: | 488 | for taskname in tasknames: |
459 | t = depdata + ":" + taskname | 489 | t = depdata + ":" + taskname |
460 | if t in taskData.taskentries: | 490 | if t in taskData[mc].taskentries: |
461 | depends.add(t) | 491 | depends.add(t) |
462 | 492 | ||
463 | def add_runtime_dependencies(depids, tasknames, depends): | 493 | def add_runtime_dependencies(depids, tasknames, depends, mc): |
464 | for depname in depids: | 494 | for depname in depids: |
465 | if depname not in taskData.run_targets or not taskData.run_targets[depname]: | 495 | if depname not in taskData[mc].run_targets or not taskData[mc].run_targets[depname]: |
466 | continue | 496 | continue |
467 | depdata = taskData.run_targets[depname][0] | 497 | depdata = taskData[mc].run_targets[depname][0] |
468 | if depdata is None: | 498 | if depdata is None: |
469 | continue | 499 | continue |
470 | for taskname in tasknames: | 500 | for taskname in tasknames: |
471 | t = depdata + ":" + taskname | 501 | t = depdata + ":" + taskname |
472 | if t in taskData.taskentries: | 502 | if t in taskData[mc].taskentries: |
473 | depends.add(t) | 503 | depends.add(t) |
474 | 504 | ||
475 | def add_resolved_dependencies(fn, tasknames, depends): | 505 | def add_resolved_dependencies(mc, fn, tasknames, depends): |
476 | for taskname in tasknames: | 506 | for taskname in tasknames: |
477 | tid = fn + ":" + taskname | 507 | tid = build_tid(mc, fn, taskname) |
478 | if tid in self.runtaskentries: | 508 | if tid in self.runtaskentries: |
479 | depends.add(tid) | 509 | depends.add(tid) |
480 | 510 | ||
481 | for tid in taskData.taskentries: | 511 | for mc in taskData: |
512 | for tid in taskData[mc].taskentries: | ||
482 | 513 | ||
483 | fn = fn_from_tid(tid) | 514 | (mc, fn, taskname) = split_tid(tid) |
484 | taskname = taskname_from_tid(tid) | 515 | #runtid = build_tid(mc, fn, taskname) |
516 | taskfn = taskfn_fromtid(tid) | ||
485 | 517 | ||
486 | depends = set() | 518 | #logger.debug(2, "Processing %s,%s:%s", mc, fn, taskname) |
487 | task_deps = self.dataCache.task_deps[fn] | ||
488 | 519 | ||
489 | self.runtaskentries[tid] = RunTaskEntry() | 520 | depends = set() |
521 | task_deps = self.dataCaches[mc].task_deps[taskfn] | ||
490 | 522 | ||
491 | #logger.debug(2, "Processing %s:%s", fn, taskname) | 523 | self.runtaskentries[tid] = RunTaskEntry() |
492 | 524 | ||
493 | if fn not in taskData.failed_fns: | 525 | if fn in taskData[mc].failed_fns: |
526 | continue | ||
494 | 527 | ||
495 | # Resolve task internal dependencies | 528 | # Resolve task internal dependencies |
496 | # | 529 | # |
497 | # e.g. addtask before X after Y | 530 | # e.g. addtask before X after Y |
498 | depends.update(taskData.taskentries[tid].tdepends) | 531 | for t in taskData[mc].taskentries[tid].tdepends: |
532 | (_, depfn, deptaskname) = split_tid(t) | ||
533 | depends.add(build_tid(mc, depfn, deptaskname)) | ||
499 | 534 | ||
500 | # Resolve 'deptask' dependencies | 535 | # Resolve 'deptask' dependencies |
501 | # | 536 | # |
@@ -503,7 +538,7 @@ class RunQueueData: | |||
503 | # (makes sure sometask runs after someothertask of all DEPENDS) | 538 | # (makes sure sometask runs after someothertask of all DEPENDS) |
504 | if 'deptask' in task_deps and taskname in task_deps['deptask']: | 539 | if 'deptask' in task_deps and taskname in task_deps['deptask']: |
505 | tasknames = task_deps['deptask'][taskname].split() | 540 | tasknames = task_deps['deptask'][taskname].split() |
506 | add_build_dependencies(taskData.depids[fn], tasknames, depends) | 541 | add_build_dependencies(taskData[mc].depids[taskfn], tasknames, depends, mc) |
507 | 542 | ||
508 | # Resolve 'rdeptask' dependencies | 543 | # Resolve 'rdeptask' dependencies |
509 | # | 544 | # |
@@ -511,31 +546,31 @@ class RunQueueData: | |||
511 | # (makes sure sometask runs after someothertask of all RDEPENDS) | 546 | # (makes sure sometask runs after someothertask of all RDEPENDS) |
512 | if 'rdeptask' in task_deps and taskname in task_deps['rdeptask']: | 547 | if 'rdeptask' in task_deps and taskname in task_deps['rdeptask']: |
513 | tasknames = task_deps['rdeptask'][taskname].split() | 548 | tasknames = task_deps['rdeptask'][taskname].split() |
514 | add_runtime_dependencies(taskData.rdepids[fn], tasknames, depends) | 549 | add_runtime_dependencies(taskData[mc].rdepids[taskfn], tasknames, depends, mc) |
515 | 550 | ||
516 | # Resolve inter-task dependencies | 551 | # Resolve inter-task dependencies |
517 | # | 552 | # |
518 | # e.g. do_sometask[depends] = "targetname:do_someothertask" | 553 | # e.g. do_sometask[depends] = "targetname:do_someothertask" |
519 | # (makes sure sometask runs after targetname's someothertask) | 554 | # (makes sure sometask runs after targetname's someothertask) |
520 | idepends = taskData.taskentries[tid].idepends | 555 | idepends = taskData[mc].taskentries[tid].idepends |
521 | for (depname, idependtask) in idepends: | 556 | for (depname, idependtask) in idepends: |
522 | if depname in taskData.build_targets and taskData.build_targets[depname] and not depname in taskData.failed_deps: | 557 | if depname in taskData[mc].build_targets and taskData[mc].build_targets[depname] and not depname in taskData[mc].failed_deps: |
523 | # Won't be in build_targets if ASSUME_PROVIDED | 558 | # Won't be in build_targets if ASSUME_PROVIDED |
524 | depdata = taskData.build_targets[depname][0] | 559 | depdata = taskData[mc].build_targets[depname][0] |
525 | if depdata is not None: | 560 | if depdata is not None: |
526 | t = depdata + ":" + idependtask | 561 | t = depdata + ":" + idependtask |
527 | depends.add(t) | 562 | depends.add(t) |
528 | if t not in taskData.taskentries: | 563 | if t not in taskData[mc].taskentries: |
529 | bb.msg.fatal("RunQueue", "Task %s in %s depends upon non-existent task %s in %s" % (taskname, fn, idependtask, depdata)) | 564 | bb.msg.fatal("RunQueue", "Task %s in %s depends upon non-existent task %s in %s" % (taskname, fn, idependtask, depdata)) |
530 | irdepends = taskData.taskentries[tid].irdepends | 565 | irdepends = taskData[mc].taskentries[tid].irdepends |
531 | for (depname, idependtask) in irdepends: | 566 | for (depname, idependtask) in irdepends: |
532 | if depname in taskData.run_targets: | 567 | if depname in taskData[mc].run_targets: |
533 | # Won't be in run_targets if ASSUME_PROVIDED | 568 | # Won't be in run_targets if ASSUME_PROVIDED |
534 | depdata = taskData.run_targets[depname][0] | 569 | depdata = taskData[mc].run_targets[depname][0] |
535 | if depdata is not None: | 570 | if depdata is not None: |
536 | t = depdata + ":" + idependtask | 571 | t = depdata + ":" + idependtask |
537 | depends.add(t) | 572 | depends.add(t) |
538 | if t not in taskData.taskentries: | 573 | if t not in taskData[mc].taskentries: |
539 | bb.msg.fatal("RunQueue", "Task %s in %s rdepends upon non-existent task %s in %s" % (taskname, fn, idependtask, depdata)) | 574 | bb.msg.fatal("RunQueue", "Task %s in %s rdepends upon non-existent task %s in %s" % (taskname, fn, idependtask, depdata)) |
540 | 575 | ||
541 | # Resolve recursive 'recrdeptask' dependencies (Part A) | 576 | # Resolve recursive 'recrdeptask' dependencies (Part A) |
@@ -546,18 +581,20 @@ class RunQueueData: | |||
546 | if 'recrdeptask' in task_deps and taskname in task_deps['recrdeptask']: | 581 | if 'recrdeptask' in task_deps and taskname in task_deps['recrdeptask']: |
547 | tasknames = task_deps['recrdeptask'][taskname].split() | 582 | tasknames = task_deps['recrdeptask'][taskname].split() |
548 | recursivetasks[tid] = tasknames | 583 | recursivetasks[tid] = tasknames |
549 | add_build_dependencies(taskData.depids[fn], tasknames, depends) | 584 | add_build_dependencies(taskData[mc].depids[taskfn], tasknames, depends, mc) |
550 | add_runtime_dependencies(taskData.rdepids[fn], tasknames, depends) | 585 | add_runtime_dependencies(taskData[mc].rdepids[taskfn], tasknames, depends, mc) |
551 | if taskname in tasknames: | 586 | if taskname in tasknames: |
552 | recursivetasksselfref.add(tid) | 587 | recursivetasksselfref.add(tid) |
553 | 588 | ||
554 | if 'recideptask' in task_deps and taskname in task_deps['recideptask']: | 589 | if 'recideptask' in task_deps and taskname in task_deps['recideptask']: |
555 | recursiveitasks[tid] = [] | 590 | recursiveitasks[tid] = [] |
556 | for t in task_deps['recideptask'][taskname].split(): | 591 | for t in task_deps['recideptask'][taskname].split(): |
557 | newdep = fn + ":" + t | 592 | newdep = build_tid(mc, fn, t) |
558 | recursiveitasks[tid].append(newdep) | 593 | recursiveitasks[tid].append(newdep) |
559 | 594 | ||
560 | self.runtaskentries[tid].depends = depends | 595 | self.runtaskentries[tid].depends = depends |
596 | |||
597 | #self.dump_data() | ||
561 | 598 | ||
562 | # Resolve recursive 'recrdeptask' dependencies (Part B) | 599 | # Resolve recursive 'recrdeptask' dependencies (Part B) |
563 | # | 600 | # |
@@ -574,7 +611,8 @@ class RunQueueData: | |||
574 | 611 | ||
575 | def generate_recdeps(t): | 612 | def generate_recdeps(t): |
576 | newdeps = set() | 613 | newdeps = set() |
577 | add_resolved_dependencies(fn_from_tid(t), tasknames, newdeps) | 614 | (mc, fn, taskname) = split_tid(t) |
615 | add_resolved_dependencies(mc, fn, tasknames, newdeps) | ||
578 | extradeps[tid].update(newdeps) | 616 | extradeps[tid].update(newdeps) |
579 | seendeps.add(t) | 617 | seendeps.add(t) |
580 | newdeps.add(t) | 618 | newdeps.add(t) |
@@ -606,6 +644,8 @@ class RunQueueData: | |||
606 | 644 | ||
607 | self.init_progress_reporter.next_stage() | 645 | self.init_progress_reporter.next_stage() |
608 | 646 | ||
647 | #self.dump_data() | ||
648 | |||
609 | # Step B - Mark all active tasks | 649 | # Step B - Mark all active tasks |
610 | # | 650 | # |
611 | # Start with the tasks we were asked to run and mark all dependencies | 651 | # Start with the tasks we were asked to run and mark all dependencies |
@@ -629,31 +669,30 @@ class RunQueueData: | |||
629 | for depend in depends: | 669 | for depend in depends: |
630 | mark_active(depend, depth+1) | 670 | mark_active(depend, depth+1) |
631 | 671 | ||
632 | self.target_pairs = [] | 672 | self.target_tids = [] |
633 | for target in self.targets: | 673 | for (mc, target, task, fn) in self.targets: |
634 | if target[0] not in taskData.build_targets or not taskData.build_targets[target[0]]: | 674 | |
675 | if target not in taskData[mc].build_targets or not taskData[mc].build_targets[target]: | ||
635 | continue | 676 | continue |
636 | 677 | ||
637 | if target[0] in taskData.failed_deps: | 678 | if target in taskData[mc].failed_deps: |
638 | continue | 679 | continue |
639 | 680 | ||
640 | fn = taskData.build_targets[target[0]][0] | ||
641 | task = target[1] | ||
642 | parents = False | 681 | parents = False |
643 | if task.endswith('-'): | 682 | if task.endswith('-'): |
644 | parents = True | 683 | parents = True |
645 | task = task[:-1] | 684 | task = task[:-1] |
646 | 685 | ||
647 | self.target_pairs.append((fn, task)) | 686 | if fn in taskData[mc].failed_fns: |
648 | |||
649 | if fn in taskData.failed_fns: | ||
650 | continue | 687 | continue |
651 | 688 | ||
689 | # fn already has mc prefix | ||
652 | tid = fn + ":" + task | 690 | tid = fn + ":" + task |
653 | if tid not in taskData.taskentries: | 691 | self.target_tids.append(tid) |
692 | if tid not in taskData[mc].taskentries: | ||
654 | import difflib | 693 | import difflib |
655 | tasks = [] | 694 | tasks = [] |
656 | for x in taskData.taskentries: | 695 | for x in taskData[mc].taskentries: |
657 | if x.startswith(fn + ":"): | 696 | if x.startswith(fn + ":"): |
658 | tasks.append(taskname_from_tid(x)) | 697 | tasks.append(taskname_from_tid(x)) |
659 | close_matches = difflib.get_close_matches(task, tasks, cutoff=0.7) | 698 | close_matches = difflib.get_close_matches(task, tasks, cutoff=0.7) |
@@ -661,7 +700,7 @@ class RunQueueData: | |||
661 | extra = ". Close matches:\n %s" % "\n ".join(close_matches) | 700 | extra = ". Close matches:\n %s" % "\n ".join(close_matches) |
662 | else: | 701 | else: |
663 | extra = "" | 702 | extra = "" |
664 | bb.msg.fatal("RunQueue", "Task %s does not exist for target %s%s" % (task, target[0], extra)) | 703 | bb.msg.fatal("RunQueue", "Task %s does not exist for target %s (%s)%s" % (task, target, tid, extra)) |
665 | 704 | ||
666 | # For tasks called "XXXX-", ony run their dependencies | 705 | # For tasks called "XXXX-", ony run their dependencies |
667 | if parents: | 706 | if parents: |
@@ -690,7 +729,7 @@ class RunQueueData: | |||
690 | 729 | ||
691 | # Check to make sure we still have tasks to run | 730 | # Check to make sure we still have tasks to run |
692 | if len(self.runtaskentries) == 0: | 731 | if len(self.runtaskentries) == 0: |
693 | if not taskData.abort: | 732 | if not taskData[''].abort: |
694 | bb.msg.fatal("RunQueue", "All buildable tasks have been run but the build is incomplete (--continue mode). Errors for the tasks that failed will have been printed above.") | 733 | bb.msg.fatal("RunQueue", "All buildable tasks have been run but the build is incomplete (--continue mode). Errors for the tasks that failed will have been printed above.") |
695 | else: | 734 | else: |
696 | bb.msg.fatal("RunQueue", "No active tasks and not in --continue mode?! Please report this bug.") | 735 | bb.msg.fatal("RunQueue", "No active tasks and not in --continue mode?! Please report this bug.") |
@@ -717,7 +756,6 @@ class RunQueueData: | |||
717 | endpoints.append(tid) | 756 | endpoints.append(tid) |
718 | for dep in revdeps: | 757 | for dep in revdeps: |
719 | if dep in self.runtaskentries[tid].depends: | 758 | if dep in self.runtaskentries[tid].depends: |
720 | #self.dump_data(taskData) | ||
721 | bb.msg.fatal("RunQueue", "Task %s has circular dependency on %s" % (tid, dep)) | 759 | bb.msg.fatal("RunQueue", "Task %s has circular dependency on %s" % (tid, dep)) |
722 | 760 | ||
723 | 761 | ||
@@ -732,24 +770,31 @@ class RunQueueData: | |||
732 | self.init_progress_reporter.next_stage() | 770 | self.init_progress_reporter.next_stage() |
733 | 771 | ||
734 | # Sanity Check - Check for multiple tasks building the same provider | 772 | # Sanity Check - Check for multiple tasks building the same provider |
735 | prov_list = {} | 773 | for mc in self.dataCaches: |
736 | seen_fn = [] | 774 | prov_list = {} |
737 | for tid in self.runtaskentries: | 775 | seen_fn = [] |
738 | fn = fn_from_tid(tid) | 776 | for tid in self.runtaskentries: |
739 | if fn in seen_fn: | 777 | (tidmc, fn, taskname) = split_tid(tid) |
740 | continue | 778 | taskfn = taskfn_fromtid(tid) |
741 | seen_fn.append(fn) | 779 | if taskfn in seen_fn: |
742 | for prov in self.dataCache.fn_provides[fn]: | 780 | continue |
743 | if prov not in prov_list: | 781 | if mc != tidmc: |
744 | prov_list[prov] = [fn] | 782 | continue |
745 | elif fn not in prov_list[prov]: | 783 | seen_fn.append(taskfn) |
746 | prov_list[prov].append(fn) | 784 | for prov in self.dataCaches[mc].fn_provides[taskfn]: |
747 | for prov in prov_list: | 785 | if prov not in prov_list: |
748 | if len(prov_list[prov]) > 1 and prov not in self.multi_provider_whitelist: | 786 | prov_list[prov] = [taskfn] |
787 | elif taskfn not in prov_list[prov]: | ||
788 | prov_list[prov].append(taskfn) | ||
789 | for prov in prov_list: | ||
790 | if len(prov_list[prov]) < 2: | ||
791 | continue | ||
792 | if prov in self.multi_provider_whitelist: | ||
793 | continue | ||
749 | seen_pn = [] | 794 | seen_pn = [] |
750 | # If two versions of the same PN are being built its fatal, we don't support it. | 795 | # If two versions of the same PN are being built its fatal, we don't support it. |
751 | for fn in prov_list[prov]: | 796 | for fn in prov_list[prov]: |
752 | pn = self.dataCache.pkg_fn[fn] | 797 | pn = self.dataCaches[mc].pkg_fn[fn] |
753 | if pn not in seen_pn: | 798 | if pn not in seen_pn: |
754 | seen_pn.append(pn) | 799 | seen_pn.append(pn) |
755 | else: | 800 | else: |
@@ -790,16 +835,16 @@ class RunQueueData: | |||
790 | commonprovs = None | 835 | commonprovs = None |
791 | commonrprovs = None | 836 | commonrprovs = None |
792 | for provfn in prov_list[prov]: | 837 | for provfn in prov_list[prov]: |
793 | provides = set(self.dataCache.fn_provides[provfn]) | 838 | provides = set(self.dataCaches[mc].fn_provides[provfn]) |
794 | rprovides = set() | 839 | rprovides = set() |
795 | for rprovide in self.dataCache.rproviders: | 840 | for rprovide in self.dataCaches[mc].rproviders: |
796 | if provfn in self.dataCache.rproviders[rprovide]: | 841 | if provfn in self.dataCaches[mc].rproviders[rprovide]: |
797 | rprovides.add(rprovide) | 842 | rprovides.add(rprovide) |
798 | for package in self.dataCache.packages: | 843 | for package in self.dataCaches[mc].packages: |
799 | if provfn in self.dataCache.packages[package]: | 844 | if provfn in self.dataCaches[mc].packages[package]: |
800 | rprovides.add(package) | 845 | rprovides.add(package) |
801 | for package in self.dataCache.packages_dynamic: | 846 | for package in self.dataCaches[mc].packages_dynamic: |
802 | if provfn in self.dataCache.packages_dynamic[package]: | 847 | if provfn in self.dataCaches[mc].packages_dynamic[package]: |
803 | rprovides.add(package) | 848 | rprovides.add(package) |
804 | if not commonprovs: | 849 | if not commonprovs: |
805 | commonprovs = set(provides) | 850 | commonprovs = set(provides) |
@@ -825,13 +870,14 @@ class RunQueueData: | |||
825 | self.init_progress_reporter.next_stage() | 870 | self.init_progress_reporter.next_stage() |
826 | 871 | ||
827 | # Create a whitelist usable by the stamp checks | 872 | # Create a whitelist usable by the stamp checks |
828 | stampfnwhitelist = [] | 873 | self.stampfnwhitelist = {} |
829 | for entry in self.stampwhitelist.split(): | 874 | for mc in self.taskData: |
830 | if entry not in self.taskData.build_targets: | 875 | self.stampfnwhitelist[mc] = [] |
831 | continue | 876 | for entry in self.stampwhitelist.split(): |
832 | fn = self.taskData.build_targets[entry][0] | 877 | if entry not in self.taskData[mc].build_targets: |
833 | stampfnwhitelist.append(fn) | 878 | continue |
834 | self.stampfnwhitelist = stampfnwhitelist | 879 | fn = self.taskData.build_targets[entry][0] |
880 | self.stampfnwhitelist[mc].append(fn) | ||
835 | 881 | ||
836 | self.init_progress_reporter.next_stage() | 882 | self.init_progress_reporter.next_stage() |
837 | 883 | ||
@@ -839,16 +885,16 @@ class RunQueueData: | |||
839 | self.runq_setscene_tids = [] | 885 | self.runq_setscene_tids = [] |
840 | if not self.cooker.configuration.nosetscene: | 886 | if not self.cooker.configuration.nosetscene: |
841 | for tid in self.runtaskentries: | 887 | for tid in self.runtaskentries: |
842 | setscenetid = tid + "_setscene" | 888 | (mc, fn, taskname) = split_tid(tid) |
843 | if setscenetid not in taskData.taskentries: | 889 | setscenetid = fn + ":" + taskname + "_setscene" |
890 | if setscenetid not in taskData[mc].taskentries: | ||
844 | continue | 891 | continue |
845 | task = self.runtaskentries[tid].task | ||
846 | self.runq_setscene_tids.append(tid) | 892 | self.runq_setscene_tids.append(tid) |
847 | 893 | ||
848 | def invalidate_task(fn, taskname, error_nostamp): | 894 | def invalidate_task(tid, error_nostamp): |
849 | taskdep = self.dataCache.task_deps[fn] | 895 | (mc, fn, taskname) = split_tid(tid) |
850 | tid = fn + ":" + taskname | 896 | taskdep = self.dataCaches[mc].task_deps[fn] |
851 | if tid not in taskData.taskentries: | 897 | if fn + ":" + taskname not in taskData[mc].taskentries: |
852 | logger.warning("Task %s does not exist, invalidating this task will have no effect" % taskname) | 898 | logger.warning("Task %s does not exist, invalidating this task will have no effect" % taskname) |
853 | if 'nostamp' in taskdep and taskname in taskdep['nostamp']: | 899 | if 'nostamp' in taskdep and taskname in taskdep['nostamp']: |
854 | if error_nostamp: | 900 | if error_nostamp: |
@@ -857,33 +903,35 @@ class RunQueueData: | |||
857 | bb.debug(1, "Task %s is marked nostamp, cannot invalidate this task" % taskname) | 903 | bb.debug(1, "Task %s is marked nostamp, cannot invalidate this task" % taskname) |
858 | else: | 904 | else: |
859 | logger.verbose("Invalidate task %s, %s", taskname, fn) | 905 | logger.verbose("Invalidate task %s, %s", taskname, fn) |
860 | bb.parse.siggen.invalidate_task(taskname, self.dataCache, fn) | 906 | bb.parse.siggen.invalidate_task(taskname, self.dataCaches[mc], fn) |
861 | 907 | ||
862 | self.init_progress_reporter.next_stage() | 908 | self.init_progress_reporter.next_stage() |
863 | 909 | ||
864 | # Invalidate task if force mode active | 910 | # Invalidate task if force mode active |
865 | if self.cooker.configuration.force: | 911 | if self.cooker.configuration.force: |
866 | for (fn, target) in self.target_pairs: | 912 | for tid in self.target_tids: |
867 | invalidate_task(fn, target, False) | 913 | invalidate_task(tid, False) |
868 | 914 | ||
869 | # Invalidate task if invalidate mode active | 915 | # Invalidate task if invalidate mode active |
870 | if self.cooker.configuration.invalidate_stamp: | 916 | if self.cooker.configuration.invalidate_stamp: |
871 | for (fn, target) in self.target_pairs: | 917 | for tid in self.target_tids: |
918 | fn = fn_from_tid(tid) | ||
872 | for st in self.cooker.configuration.invalidate_stamp.split(','): | 919 | for st in self.cooker.configuration.invalidate_stamp.split(','): |
873 | if not st.startswith("do_"): | 920 | if not st.startswith("do_"): |
874 | st = "do_%s" % st | 921 | st = "do_%s" % st |
875 | invalidate_task(fn, st, True) | 922 | invalidate_task(fn + ":" + st, True) |
876 | 923 | ||
877 | self.init_progress_reporter.next_stage() | 924 | self.init_progress_reporter.next_stage() |
878 | 925 | ||
879 | # Create and print to the logs a virtual/xxxx -> PN (fn) table | 926 | # Create and print to the logs a virtual/xxxx -> PN (fn) table |
880 | virtmap = taskData.get_providermap(prefix="virtual/") | 927 | for mc in taskData: |
881 | virtpnmap = {} | 928 | virtmap = taskData[mc].get_providermap(prefix="virtual/") |
882 | for v in virtmap: | 929 | virtpnmap = {} |
883 | virtpnmap[v] = self.dataCache.pkg_fn[virtmap[v]] | 930 | for v in virtmap: |
884 | bb.debug(2, "%s resolved to: %s (%s)" % (v, virtpnmap[v], virtmap[v])) | 931 | virtpnmap[v] = self.dataCaches[mc].pkg_fn[virtmap[v]] |
885 | if hasattr(bb.parse.siggen, "tasks_resolved"): | 932 | bb.debug(2, "%s resolved to: %s (%s)" % (v, virtpnmap[v], virtmap[v])) |
886 | bb.parse.siggen.tasks_resolved(virtmap, virtpnmap, self.dataCache) | 933 | if hasattr(bb.parse.siggen, "tasks_resolved"): |
934 | bb.parse.siggen.tasks_resolved(virtmap, virtpnmap, self.dataCaches[mc]) | ||
887 | 935 | ||
888 | self.init_progress_reporter.next_stage() | 936 | self.init_progress_reporter.next_stage() |
889 | 937 | ||
@@ -898,13 +946,17 @@ class RunQueueData: | |||
898 | procdep = [] | 946 | procdep = [] |
899 | for dep in self.runtaskentries[tid].depends: | 947 | for dep in self.runtaskentries[tid].depends: |
900 | procdep.append(fn_from_tid(dep) + "." + taskname_from_tid(dep)) | 948 | procdep.append(fn_from_tid(dep) + "." + taskname_from_tid(dep)) |
901 | self.runtaskentries[tid].hash = bb.parse.siggen.get_taskhash(fn_from_tid(tid), taskname_from_tid(tid), procdep, self.dataCache) | 949 | (mc, fn, taskname) = split_tid(tid) |
950 | taskfn = taskfn_fromtid(tid) | ||
951 | self.runtaskentries[tid].hash = bb.parse.siggen.get_taskhash(taskfn, taskname, procdep, self.dataCaches[mc]) | ||
902 | task = self.runtaskentries[tid].task | 952 | task = self.runtaskentries[tid].task |
903 | 953 | ||
904 | bb.parse.siggen.writeout_file_checksum_cache() | 954 | bb.parse.siggen.writeout_file_checksum_cache() |
955 | |||
956 | #self.dump_data() | ||
905 | return len(self.runtaskentries) | 957 | return len(self.runtaskentries) |
906 | 958 | ||
907 | def dump_data(self, taskQueue): | 959 | def dump_data(self): |
908 | """ | 960 | """ |
909 | Dump some debug information on the internal data structures | 961 | Dump some debug information on the internal data structures |
910 | """ | 962 | """ |
@@ -915,24 +967,17 @@ class RunQueueData: | |||
915 | self.runtaskentries[tid].depends, | 967 | self.runtaskentries[tid].depends, |
916 | self.runtaskentries[tid].revdeps) | 968 | self.runtaskentries[tid].revdeps) |
917 | 969 | ||
918 | logger.debug(3, "sorted_tasks:") | ||
919 | for tid in self.prio_map: | ||
920 | logger.debug(3, " %s: %s Deps %s RevDeps %s", tid, | ||
921 | self.runtaskentries[tid].weight, | ||
922 | self.runtaskentries[tid].depends, | ||
923 | self.runtaskentries[tid].revdeps) | ||
924 | |||
925 | class RunQueueWorker(): | 970 | class RunQueueWorker(): |
926 | def __init__(self, process, pipe): | 971 | def __init__(self, process, pipe): |
927 | self.process = process | 972 | self.process = process |
928 | self.pipe = pipe | 973 | self.pipe = pipe |
929 | 974 | ||
930 | class RunQueue: | 975 | class RunQueue: |
931 | def __init__(self, cooker, cfgData, dataCache, taskData, targets): | 976 | def __init__(self, cooker, cfgData, dataCaches, taskData, targets): |
932 | 977 | ||
933 | self.cooker = cooker | 978 | self.cooker = cooker |
934 | self.cfgData = cfgData | 979 | self.cfgData = cfgData |
935 | self.rqdata = RunQueueData(self, cooker, cfgData, dataCache, taskData, targets) | 980 | self.rqdata = RunQueueData(self, cooker, cfgData, dataCaches, taskData, targets) |
936 | 981 | ||
937 | self.stamppolicy = cfgData.getVar("BB_STAMP_POLICY", True) or "perfile" | 982 | self.stamppolicy = cfgData.getVar("BB_STAMP_POLICY", True) or "perfile" |
938 | self.hashvalidate = cfgData.getVar("BB_HASHCHECK_FUNCTION", True) or None | 983 | self.hashvalidate = cfgData.getVar("BB_HASHCHECK_FUNCTION", True) or None |
@@ -948,7 +993,7 @@ class RunQueue: | |||
948 | self.worker = {} | 993 | self.worker = {} |
949 | self.fakeworker = {} | 994 | self.fakeworker = {} |
950 | 995 | ||
951 | def _start_worker(self, fakeroot = False, rqexec = None): | 996 | def _start_worker(self, mc, fakeroot = False, rqexec = None): |
952 | logger.debug(1, "Starting bitbake-worker") | 997 | logger.debug(1, "Starting bitbake-worker") |
953 | magic = "decafbad" | 998 | magic = "decafbad" |
954 | if self.cooker.configuration.profile: | 999 | if self.cooker.configuration.profile: |
@@ -971,10 +1016,10 @@ class RunQueue: | |||
971 | runqhash[tid] = self.rqdata.runtaskentries[tid].hash | 1016 | runqhash[tid] = self.rqdata.runtaskentries[tid].hash |
972 | 1017 | ||
973 | workerdata = { | 1018 | workerdata = { |
974 | "taskdeps" : self.rqdata.dataCache.task_deps, | 1019 | "taskdeps" : self.rqdata.dataCaches[mc].task_deps, |
975 | "fakerootenv" : self.rqdata.dataCache.fakerootenv, | 1020 | "fakerootenv" : self.rqdata.dataCaches[mc].fakerootenv, |
976 | "fakerootdirs" : self.rqdata.dataCache.fakerootdirs, | 1021 | "fakerootdirs" : self.rqdata.dataCaches[mc].fakerootdirs, |
977 | "fakerootnoenv" : self.rqdata.dataCache.fakerootnoenv, | 1022 | "fakerootnoenv" : self.rqdata.dataCaches[mc].fakerootnoenv, |
978 | "sigdata" : bb.parse.siggen.get_taskdata(), | 1023 | "sigdata" : bb.parse.siggen.get_taskdata(), |
979 | "runq_hash" : runqhash, | 1024 | "runq_hash" : runqhash, |
980 | "logdefaultdebug" : bb.msg.loggerDefaultDebugLevel, | 1025 | "logdefaultdebug" : bb.msg.loggerDefaultDebugLevel, |
@@ -1014,11 +1059,13 @@ class RunQueue: | |||
1014 | if self.worker: | 1059 | if self.worker: |
1015 | self.teardown_workers() | 1060 | self.teardown_workers() |
1016 | self.teardown = False | 1061 | self.teardown = False |
1017 | self.worker[''] = self._start_worker() | 1062 | for mc in self.rqdata.dataCaches: |
1063 | self.worker[mc] = self._start_worker(mc) | ||
1018 | 1064 | ||
1019 | def start_fakeworker(self, rqexec): | 1065 | def start_fakeworker(self, rqexec): |
1020 | if not self.fakeworker: | 1066 | if not self.fakeworker: |
1021 | self.fakeworker[''] = self._start_worker(True, rqexec) | 1067 | for mc in self.rqdata.dataCaches: |
1068 | self.fakeworker[mc] = self._start_worker(mc, True, rqexec) | ||
1022 | 1069 | ||
1023 | def teardown_workers(self): | 1070 | def teardown_workers(self): |
1024 | self.teardown = True | 1071 | self.teardown = True |
@@ -1052,26 +1099,27 @@ class RunQueue: | |||
1052 | except: | 1099 | except: |
1053 | return None | 1100 | return None |
1054 | 1101 | ||
1102 | (mc, fn, tn) = split_tid(tid) | ||
1103 | taskfn = taskfn_fromtid(tid) | ||
1104 | if taskname is None: | ||
1105 | taskname = tn | ||
1106 | |||
1055 | if self.stamppolicy == "perfile": | 1107 | if self.stamppolicy == "perfile": |
1056 | fulldeptree = False | 1108 | fulldeptree = False |
1057 | else: | 1109 | else: |
1058 | fulldeptree = True | 1110 | fulldeptree = True |
1059 | stampwhitelist = [] | 1111 | stampwhitelist = [] |
1060 | if self.stamppolicy == "whitelist": | 1112 | if self.stamppolicy == "whitelist": |
1061 | stampwhitelist = self.rqdata.stampfnwhitelist | 1113 | stampwhitelist = self.rqdata.stampfnwhitelist[mc] |
1062 | 1114 | ||
1063 | fn = fn_from_tid(tid) | 1115 | stampfile = bb.build.stampfile(taskname, self.rqdata.dataCaches[mc], taskfn) |
1064 | if taskname is None: | ||
1065 | taskname = taskname_from_tid(tid) | ||
1066 | |||
1067 | stampfile = bb.build.stampfile(taskname, self.rqdata.dataCache, fn) | ||
1068 | 1116 | ||
1069 | # If the stamp is missing, it's not current | 1117 | # If the stamp is missing, it's not current |
1070 | if not os.access(stampfile, os.F_OK): | 1118 | if not os.access(stampfile, os.F_OK): |
1071 | logger.debug(2, "Stampfile %s not available", stampfile) | 1119 | logger.debug(2, "Stampfile %s not available", stampfile) |
1072 | return False | 1120 | return False |
1073 | # If it's a 'nostamp' task, it's not current | 1121 | # If it's a 'nostamp' task, it's not current |
1074 | taskdep = self.rqdata.dataCache.task_deps[fn] | 1122 | taskdep = self.rqdata.dataCaches[mc].task_deps[taskfn] |
1075 | if 'nostamp' in taskdep and taskname in taskdep['nostamp']: | 1123 | if 'nostamp' in taskdep and taskname in taskdep['nostamp']: |
1076 | logger.debug(2, "%s.%s is nostamp\n", fn, taskname) | 1124 | logger.debug(2, "%s.%s is nostamp\n", fn, taskname) |
1077 | return False | 1125 | return False |
@@ -1086,10 +1134,10 @@ class RunQueue: | |||
1086 | t1 = get_timestamp(stampfile) | 1134 | t1 = get_timestamp(stampfile) |
1087 | for dep in self.rqdata.runtaskentries[tid].depends: | 1135 | for dep in self.rqdata.runtaskentries[tid].depends: |
1088 | if iscurrent: | 1136 | if iscurrent: |
1089 | fn2 = fn_from_tid(dep) | 1137 | (mc2, fn2, taskname2) = split_tid(dep) |
1090 | taskname2 = taskname_from_tid(dep) | 1138 | taskfn2 = taskfn_fromtid(dep) |
1091 | stampfile2 = bb.build.stampfile(taskname2, self.rqdata.dataCache, fn2) | 1139 | stampfile2 = bb.build.stampfile(taskname2, self.rqdata.dataCaches[mc2], taskfn2) |
1092 | stampfile3 = bb.build.stampfile(taskname2 + "_setscene", self.rqdata.dataCache, fn2) | 1140 | stampfile3 = bb.build.stampfile(taskname2 + "_setscene", self.rqdata.dataCaches[mc2], taskfn2) |
1093 | t2 = get_timestamp(stampfile2) | 1141 | t2 = get_timestamp(stampfile2) |
1094 | t3 = get_timestamp(stampfile3) | 1142 | t3 = get_timestamp(stampfile3) |
1095 | if t3 and not t2: | 1143 | if t3 and not t2: |
@@ -1196,10 +1244,11 @@ class RunQueue: | |||
1196 | logger.info("Tasks Summary: Attempted %d tasks of which %d didn't need to be rerun and all succeeded.", self.rqexe.stats.completed, self.rqexe.stats.skipped) | 1244 | logger.info("Tasks Summary: Attempted %d tasks of which %d didn't need to be rerun and all succeeded.", self.rqexe.stats.completed, self.rqexe.stats.skipped) |
1197 | 1245 | ||
1198 | if self.state is runQueueFailed: | 1246 | if self.state is runQueueFailed: |
1199 | if not self.rqdata.taskData.tryaltconfigs: | 1247 | if not self.rqdata.taskData[''].tryaltconfigs: |
1200 | raise bb.runqueue.TaskFailure(self.rqexe.failed_fns) | 1248 | raise bb.runqueue.TaskFailure(self.rqexe.failed_tids) |
1201 | for fn in self.rqexe.failed_fns: | 1249 | for tid in self.rqexe.failed_tids: |
1202 | self.rqdata.taskData.fail_fn(fn) | 1250 | (mc, fn, tn) = split_tid(tid) |
1251 | self.rqdata.taskData[mc].fail_fn(fn) | ||
1203 | self.rqdata.reset() | 1252 | self.rqdata.reset() |
1204 | 1253 | ||
1205 | if self.state is runQueueComplete: | 1254 | if self.state is runQueueComplete: |
@@ -1246,13 +1295,14 @@ class RunQueue: | |||
1246 | def dump_signatures(self, options): | 1295 | def dump_signatures(self, options): |
1247 | done = set() | 1296 | done = set() |
1248 | bb.note("Reparsing files to collect dependency data") | 1297 | bb.note("Reparsing files to collect dependency data") |
1298 | bb_cache = bb.cache.NoCache(self.cooker.databuilder) | ||
1249 | for tid in self.rqdata.runtaskentries: | 1299 | for tid in self.rqdata.runtaskentries: |
1250 | fn = fn_from_tid(tid) | 1300 | fn = taskfn_fromtid(tid) |
1251 | if fn not in done: | 1301 | if fn not in done: |
1252 | the_data = bb.cache.Cache.loadDataFull(fn, self.cooker.collection.get_file_appends(fn), self.cooker.data) | 1302 | the_data = bb_cache.loadDataFull(fn, self.cooker.collection.get_file_appends(fn)) |
1253 | done.add(fn) | 1303 | done.add(fn) |
1254 | 1304 | ||
1255 | bb.parse.siggen.dump_sigs(self.rqdata.dataCache, options) | 1305 | bb.parse.siggen.dump_sigs(self.rqdata.dataCaches, options) |
1256 | 1306 | ||
1257 | return | 1307 | return |
1258 | 1308 | ||
@@ -1269,16 +1319,16 @@ class RunQueue: | |||
1269 | valid_new = set() | 1319 | valid_new = set() |
1270 | 1320 | ||
1271 | for tid in self.rqdata.runtaskentries: | 1321 | for tid in self.rqdata.runtaskentries: |
1272 | fn = fn_from_tid(tid) | 1322 | (mc, fn, taskname) = split_tid(tid) |
1273 | taskname = taskname_from_tid(tid) | 1323 | taskfn = taskfn_fromtid(tid) |
1274 | taskdep = self.rqdata.dataCache.task_deps[fn] | 1324 | taskdep = self.rqdata.dataCaches[mc].task_deps[taskfn] |
1275 | 1325 | ||
1276 | if 'noexec' in taskdep and taskname in taskdep['noexec']: | 1326 | if 'noexec' in taskdep and taskname in taskdep['noexec']: |
1277 | noexec.append(tid) | 1327 | noexec.append(tid) |
1278 | continue | 1328 | continue |
1279 | 1329 | ||
1280 | sq_fn.append(fn) | 1330 | sq_fn.append(fn) |
1281 | sq_hashfn.append(self.rqdata.dataCache.hashfn[fn]) | 1331 | sq_hashfn.append(self.rqdata.dataCaches[mc].hashfn[fn]) |
1282 | sq_hash.append(self.rqdata.runtaskentries[tid].hash) | 1332 | sq_hash.append(self.rqdata.runtaskentries[tid].hash) |
1283 | sq_taskname.append(taskname) | 1333 | sq_taskname.append(taskname) |
1284 | sq_task.append(tid) | 1334 | sq_task.append(tid) |
@@ -1358,9 +1408,8 @@ class RunQueue: | |||
1358 | 1408 | ||
1359 | 1409 | ||
1360 | for tid in invalidtasks: | 1410 | for tid in invalidtasks: |
1361 | fn = fn_from_tid(tid) | 1411 | (mc, fn, taskname) = split_tid(tid) |
1362 | pn = self.rqdata.dataCache.pkg_fn[fn] | 1412 | pn = self.rqdata.dataCaches[mc].pkg_fn[fn] |
1363 | taskname = taskname_from_tid(tid) | ||
1364 | h = self.rqdata.runtaskentries[tid].hash | 1413 | h = self.rqdata.runtaskentries[tid].hash |
1365 | matches = bb.siggen.find_siginfo(pn, taskname, [], self.cfgData) | 1414 | matches = bb.siggen.find_siginfo(pn, taskname, [], self.cfgData) |
1366 | match = None | 1415 | match = None |
@@ -1393,7 +1442,7 @@ class RunQueueExecute: | |||
1393 | 1442 | ||
1394 | self.build_stamps = {} | 1443 | self.build_stamps = {} |
1395 | self.build_stamps2 = [] | 1444 | self.build_stamps2 = [] |
1396 | self.failed_fns = [] | 1445 | self.failed_tids = [] |
1397 | 1446 | ||
1398 | self.stampcache = {} | 1447 | self.stampcache = {} |
1399 | 1448 | ||
@@ -1434,7 +1483,7 @@ class RunQueueExecute: | |||
1434 | # worker must have died? | 1483 | # worker must have died? |
1435 | pass | 1484 | pass |
1436 | 1485 | ||
1437 | if len(self.failed_fns) != 0: | 1486 | if len(self.failed_tids) != 0: |
1438 | self.rq.state = runQueueFailed | 1487 | self.rq.state = runQueueFailed |
1439 | return | 1488 | return |
1440 | 1489 | ||
@@ -1449,7 +1498,7 @@ class RunQueueExecute: | |||
1449 | self.rq.read_workers() | 1498 | self.rq.read_workers() |
1450 | return self.rq.active_fds() | 1499 | return self.rq.active_fds() |
1451 | 1500 | ||
1452 | if len(self.failed_fns) != 0: | 1501 | if len(self.failed_tids) != 0: |
1453 | self.rq.state = runQueueFailed | 1502 | self.rq.state = runQueueFailed |
1454 | return True | 1503 | return True |
1455 | 1504 | ||
@@ -1463,9 +1512,8 @@ class RunQueueExecute: | |||
1463 | taskdata = {} | 1512 | taskdata = {} |
1464 | taskdeps.add(task) | 1513 | taskdeps.add(task) |
1465 | for dep in taskdeps: | 1514 | for dep in taskdeps: |
1466 | fn = fn_from_tid(dep) | 1515 | (mc, fn, taskname) = split_tid(dep) |
1467 | pn = self.rqdata.dataCache.pkg_fn[fn] | 1516 | pn = self.rqdata.dataCaches[mc].pkg_fn[fn] |
1468 | taskname = taskname_from_tid(dep) | ||
1469 | taskdata[dep] = [pn, taskname, fn] | 1517 | taskdata[dep] = [pn, taskname, fn] |
1470 | call = self.rq.depvalidate + "(task, taskdata, notneeded, d)" | 1518 | call = self.rq.depvalidate + "(task, taskdata, notneeded, d)" |
1471 | locs = { "task" : task, "taskdata" : taskdata, "notneeded" : self.scenequeue_notneeded, "d" : self.cooker.expanded_data } | 1519 | locs = { "task" : task, "taskdata" : taskdata, "notneeded" : self.scenequeue_notneeded, "d" : self.cooker.expanded_data } |
@@ -1519,10 +1567,10 @@ class RunQueueExecuteTasks(RunQueueExecute): | |||
1519 | tasknames = {} | 1567 | tasknames = {} |
1520 | fns = {} | 1568 | fns = {} |
1521 | for tid in self.rqdata.runtaskentries: | 1569 | for tid in self.rqdata.runtaskentries: |
1522 | fn = fn_from_tid(tid) | 1570 | (mc, fn, taskname) = split_tid(tid) |
1523 | taskname = taskname_from_tid(tid) | 1571 | taskfn = taskfn_fromtid(tid) |
1524 | taskdep = self.rqdata.dataCache.task_deps[fn] | 1572 | taskdep = self.rqdata.dataCaches[mc].task_deps[taskfn] |
1525 | fns[tid] = fn | 1573 | fns[tid] = taskfn |
1526 | tasknames[tid] = taskname | 1574 | tasknames[tid] = taskname |
1527 | if 'noexec' in taskdep and taskname in taskdep['noexec']: | 1575 | if 'noexec' in taskdep and taskname in taskdep['noexec']: |
1528 | continue | 1576 | continue |
@@ -1539,9 +1587,10 @@ class RunQueueExecuteTasks(RunQueueExecute): | |||
1539 | covered_remove = bb.utils.better_eval(call, locs) | 1587 | covered_remove = bb.utils.better_eval(call, locs) |
1540 | 1588 | ||
1541 | def removecoveredtask(tid): | 1589 | def removecoveredtask(tid): |
1542 | fn = fn_from_tid(tid) | 1590 | (mc, fn, taskname) = split_tid(tid) |
1543 | taskname = taskname_from_tid(tid) + '_setscene' | 1591 | taskname = taskname + '_setscene' |
1544 | bb.build.del_stamp(taskname, self.rqdata.dataCache, fn) | 1592 | taskfn = taskfn_fromtid(tid) |
1593 | bb.build.del_stamp(taskname, self.rqdata.dataCaches[mc], taskfn) | ||
1545 | self.rq.scenequeue_covered.remove(tid) | 1594 | self.rq.scenequeue_covered.remove(tid) |
1546 | 1595 | ||
1547 | toremove = covered_remove | 1596 | toremove = covered_remove |
@@ -1562,7 +1611,15 @@ class RunQueueExecuteTasks(RunQueueExecute): | |||
1562 | 1611 | ||
1563 | logger.debug(1, 'Full skip list %s', self.rq.scenequeue_covered) | 1612 | logger.debug(1, 'Full skip list %s', self.rq.scenequeue_covered) |
1564 | 1613 | ||
1565 | event.fire(bb.event.StampUpdate(self.rqdata.target_pairs, self.rqdata.dataCache.stamp), self.cfgData) | 1614 | |
1615 | for mc in self.rqdata.dataCaches: | ||
1616 | target_pairs = [] | ||
1617 | for tid in self.rqdata.target_tids: | ||
1618 | (tidmc, fn, taskname) = split_tid(tid) | ||
1619 | if tidmc == mc: | ||
1620 | target_pairs.append((fn, taskname)) | ||
1621 | |||
1622 | event.fire(bb.event.StampUpdate(target_pairs, self.rqdata.dataCaches[mc].stamp), self.cfgData) | ||
1566 | 1623 | ||
1567 | schedulers = self.get_schedulers() | 1624 | schedulers = self.get_schedulers() |
1568 | for scheduler in schedulers: | 1625 | for scheduler in schedulers: |
@@ -1633,10 +1690,9 @@ class RunQueueExecuteTasks(RunQueueExecute): | |||
1633 | Updates the state engine with the failure | 1690 | Updates the state engine with the failure |
1634 | """ | 1691 | """ |
1635 | self.stats.taskFailed() | 1692 | self.stats.taskFailed() |
1636 | fn = fn_from_tid(task) | 1693 | self.failed_tids.append(task) |
1637 | self.failed_fns.append(fn) | ||
1638 | bb.event.fire(runQueueTaskFailed(task, self.stats, exitcode, self.rq), self.cfgData) | 1694 | bb.event.fire(runQueueTaskFailed(task, self.stats, exitcode, self.rq), self.cfgData) |
1639 | if self.rqdata.taskData.abort: | 1695 | if self.rqdata.taskData[''].abort: |
1640 | self.rq.state = runQueueCleanUp | 1696 | self.rq.state = runQueueCleanUp |
1641 | 1697 | ||
1642 | def task_skip(self, task, reason): | 1698 | def task_skip(self, task, reason): |
@@ -1655,8 +1711,7 @@ class RunQueueExecuteTasks(RunQueueExecute): | |||
1655 | if self.rqdata.setscenewhitelist: | 1711 | if self.rqdata.setscenewhitelist: |
1656 | # Check tasks that are going to run against the whitelist | 1712 | # Check tasks that are going to run against the whitelist |
1657 | def check_norun_task(tid, showerror=False): | 1713 | def check_norun_task(tid, showerror=False): |
1658 | fn = fn_from_tid(tid) | 1714 | (mc, fn, taskname) = split_tid(tid) |
1659 | taskname = taskname_from_tid(tid) | ||
1660 | # Ignore covered tasks | 1715 | # Ignore covered tasks |
1661 | if tid in self.rq.scenequeue_covered: | 1716 | if tid in self.rq.scenequeue_covered: |
1662 | return False | 1717 | return False |
@@ -1664,11 +1719,11 @@ class RunQueueExecuteTasks(RunQueueExecute): | |||
1664 | if self.rq.check_stamp_task(tid, taskname, cache=self.stampcache): | 1719 | if self.rq.check_stamp_task(tid, taskname, cache=self.stampcache): |
1665 | return False | 1720 | return False |
1666 | # Ignore noexec tasks | 1721 | # Ignore noexec tasks |
1667 | taskdep = self.rqdata.dataCache.task_deps[fn] | 1722 | taskdep = self.rqdata.dataCaches[mc].task_deps[fn] |
1668 | if 'noexec' in taskdep and taskname in taskdep['noexec']: | 1723 | if 'noexec' in taskdep and taskname in taskdep['noexec']: |
1669 | return False | 1724 | return False |
1670 | 1725 | ||
1671 | pn = self.rqdata.dataCache.pkg_fn[fn] | 1726 | pn = self.rqdata.dataCaches[mc].pkg_fn[fn] |
1672 | if not check_setscene_enforce_whitelist(pn, taskname, self.rqdata.setscenewhitelist): | 1727 | if not check_setscene_enforce_whitelist(pn, taskname, self.rqdata.setscenewhitelist): |
1673 | if showerror: | 1728 | if showerror: |
1674 | if tid in self.rqdata.runq_setscene_tids: | 1729 | if tid in self.rqdata.runq_setscene_tids: |
@@ -1704,8 +1759,8 @@ class RunQueueExecuteTasks(RunQueueExecute): | |||
1704 | 1759 | ||
1705 | task = self.sched.next() | 1760 | task = self.sched.next() |
1706 | if task is not None: | 1761 | if task is not None: |
1707 | fn = fn_from_tid(task) | 1762 | (mc, fn, taskname) = split_tid(task) |
1708 | taskname = taskname_from_tid(task) | 1763 | taskfn = taskfn_fromtid(task) |
1709 | 1764 | ||
1710 | if task in self.rq.scenequeue_covered: | 1765 | if task in self.rq.scenequeue_covered: |
1711 | logger.debug(2, "Setscene covered task %s", task) | 1766 | logger.debug(2, "Setscene covered task %s", task) |
@@ -1718,7 +1773,7 @@ class RunQueueExecuteTasks(RunQueueExecute): | |||
1718 | self.task_skip(task, "existing") | 1773 | self.task_skip(task, "existing") |
1719 | return True | 1774 | return True |
1720 | 1775 | ||
1721 | taskdep = self.rqdata.dataCache.task_deps[fn] | 1776 | taskdep = self.rqdata.dataCaches[mc].task_deps[taskfn] |
1722 | if 'noexec' in taskdep and taskname in taskdep['noexec']: | 1777 | if 'noexec' in taskdep and taskname in taskdep['noexec']: |
1723 | startevent = runQueueTaskStarted(task, self.stats, self.rq, | 1778 | startevent = runQueueTaskStarted(task, self.stats, self.rq, |
1724 | noexec=True) | 1779 | noexec=True) |
@@ -1726,7 +1781,7 @@ class RunQueueExecuteTasks(RunQueueExecute): | |||
1726 | self.runq_running.add(task) | 1781 | self.runq_running.add(task) |
1727 | self.stats.taskActive() | 1782 | self.stats.taskActive() |
1728 | if not self.cooker.configuration.dry_run: | 1783 | if not self.cooker.configuration.dry_run: |
1729 | bb.build.make_stamp(taskname, self.rqdata.dataCache, fn) | 1784 | bb.build.make_stamp(taskname, self.rqdata.dataCaches[mc], taskfn) |
1730 | self.task_complete(task) | 1785 | self.task_complete(task) |
1731 | return True | 1786 | return True |
1732 | else: | 1787 | else: |
@@ -1735,7 +1790,7 @@ class RunQueueExecuteTasks(RunQueueExecute): | |||
1735 | 1790 | ||
1736 | taskdepdata = self.build_taskdepdata(task) | 1791 | taskdepdata = self.build_taskdepdata(task) |
1737 | 1792 | ||
1738 | taskdep = self.rqdata.dataCache.task_deps[fn] | 1793 | taskdep = self.rqdata.dataCaches[mc].task_deps[taskfn] |
1739 | if 'fakeroot' in taskdep and taskname in taskdep['fakeroot'] and not self.cooker.configuration.dry_run: | 1794 | if 'fakeroot' in taskdep and taskname in taskdep['fakeroot'] and not self.cooker.configuration.dry_run: |
1740 | if not self.rq.fakeworker: | 1795 | if not self.rq.fakeworker: |
1741 | try: | 1796 | try: |
@@ -1744,13 +1799,13 @@ class RunQueueExecuteTasks(RunQueueExecute): | |||
1744 | logger.critical("Failed to spawn fakeroot worker to run %s: %s" % (task, str(exc))) | 1799 | logger.critical("Failed to spawn fakeroot worker to run %s: %s" % (task, str(exc))) |
1745 | self.rq.state = runQueueFailed | 1800 | self.rq.state = runQueueFailed |
1746 | return True | 1801 | return True |
1747 | self.rq.fakeworker[''].process.stdin.write(b"<runtask>" + pickle.dumps((fn, task, taskname, False, self.cooker.collection.get_file_appends(fn), taskdepdata)) + b"</runtask>") | 1802 | self.rq.fakeworker[mc].process.stdin.write(b"<runtask>" + pickle.dumps((taskfn, task, taskname, False, self.cooker.collection.get_file_appends(fn), taskdepdata)) + b"</runtask>") |
1748 | self.rq.fakeworker[''].process.stdin.flush() | 1803 | self.rq.fakeworker[mc].process.stdin.flush() |
1749 | else: | 1804 | else: |
1750 | self.rq.worker[''].process.stdin.write(b"<runtask>" + pickle.dumps((fn, task, taskname, False, self.cooker.collection.get_file_appends(fn), taskdepdata)) + b"</runtask>") | 1805 | self.rq.worker[mc].process.stdin.write(b"<runtask>" + pickle.dumps((taskfn, task, taskname, False, self.cooker.collection.get_file_appends(taskfn), taskdepdata)) + b"</runtask>") |
1751 | self.rq.worker[''].process.stdin.flush() | 1806 | self.rq.worker[mc].process.stdin.flush() |
1752 | 1807 | ||
1753 | self.build_stamps[task] = bb.build.stampfile(taskname, self.rqdata.dataCache, fn) | 1808 | self.build_stamps[task] = bb.build.stampfile(taskname, self.rqdata.dataCaches[mc], taskfn) |
1754 | self.build_stamps2.append(self.build_stamps[task]) | 1809 | self.build_stamps2.append(self.build_stamps[task]) |
1755 | self.runq_running.add(task) | 1810 | self.runq_running.add(task) |
1756 | self.stats.taskActive() | 1811 | self.stats.taskActive() |
@@ -1761,7 +1816,7 @@ class RunQueueExecuteTasks(RunQueueExecute): | |||
1761 | self.rq.read_workers() | 1816 | self.rq.read_workers() |
1762 | return self.rq.active_fds() | 1817 | return self.rq.active_fds() |
1763 | 1818 | ||
1764 | if len(self.failed_fns) != 0: | 1819 | if len(self.failed_tids) != 0: |
1765 | self.rq.state = runQueueFailed | 1820 | self.rq.state = runQueueFailed |
1766 | return True | 1821 | return True |
1767 | 1822 | ||
@@ -1784,11 +1839,11 @@ class RunQueueExecuteTasks(RunQueueExecute): | |||
1784 | while next: | 1839 | while next: |
1785 | additional = [] | 1840 | additional = [] |
1786 | for revdep in next: | 1841 | for revdep in next: |
1787 | fn = fn_from_tid(revdep) | 1842 | (mc, fn, taskname) = split_tid(revdep) |
1788 | pn = self.rqdata.dataCache.pkg_fn[fn] | 1843 | taskfn = taskfn_fromtid(revdep) |
1789 | taskname = taskname_from_tid(revdep) | 1844 | pn = self.rqdata.dataCaches[mc].pkg_fn[taskfn] |
1790 | deps = self.rqdata.runtaskentries[revdep].depends | 1845 | deps = self.rqdata.runtaskentries[revdep].depends |
1791 | provides = self.rqdata.dataCache.fn_provides[fn] | 1846 | provides = self.rqdata.dataCaches[mc].fn_provides[taskfn] |
1792 | taskdepdata[revdep] = [pn, taskname, fn, deps, provides] | 1847 | taskdepdata[revdep] = [pn, taskname, fn, deps, provides] |
1793 | for revdep2 in deps: | 1848 | for revdep2 in deps: |
1794 | if revdep2 not in taskdepdata: | 1849 | if revdep2 not in taskdepdata: |
@@ -1928,14 +1983,15 @@ class RunQueueExecuteScenequeue(RunQueueExecute): | |||
1928 | # e.g. do_sometask_setscene[depends] = "targetname:do_someothertask_setscene" | 1983 | # e.g. do_sometask_setscene[depends] = "targetname:do_someothertask_setscene" |
1929 | # Note that anything explicitly depended upon will have its reverse dependencies removed to avoid circular dependencies | 1984 | # Note that anything explicitly depended upon will have its reverse dependencies removed to avoid circular dependencies |
1930 | for tid in self.rqdata.runq_setscene_tids: | 1985 | for tid in self.rqdata.runq_setscene_tids: |
1931 | realtid = tid + "_setscene" | 1986 | (mc, fn, taskname) = split_tid(tid) |
1932 | idepends = self.rqdata.taskData.taskentries[realtid].idepends | 1987 | realtid = fn + ":" + taskname + "_setscene" |
1988 | idepends = self.rqdata.taskData[mc].taskentries[realtid].idepends | ||
1933 | for (depname, idependtask) in idepends: | 1989 | for (depname, idependtask) in idepends: |
1934 | 1990 | ||
1935 | if depname not in self.rqdata.taskData.build_targets: | 1991 | if depname not in self.rqdata.taskData[mc].build_targets: |
1936 | continue | 1992 | continue |
1937 | 1993 | ||
1938 | depfn = self.rqdata.taskData.build_targets[depname][0] | 1994 | depfn = self.rqdata.taskData[mc].build_targets[depname][0] |
1939 | if depfn is None: | 1995 | if depfn is None: |
1940 | continue | 1996 | continue |
1941 | deptid = depfn + ":" + idependtask.replace("_setscene", "") | 1997 | deptid = depfn + ":" + idependtask.replace("_setscene", "") |
@@ -1991,15 +2047,15 @@ class RunQueueExecuteScenequeue(RunQueueExecute): | |||
1991 | noexec = [] | 2047 | noexec = [] |
1992 | stamppresent = [] | 2048 | stamppresent = [] |
1993 | for tid in self.sq_revdeps: | 2049 | for tid in self.sq_revdeps: |
1994 | fn = fn_from_tid(tid) | 2050 | (mc, fn, taskname) = split_tid(tid) |
1995 | taskname = taskname_from_tid(tid) | 2051 | taskfn = taskfn_fromtid(tid) |
1996 | 2052 | ||
1997 | taskdep = self.rqdata.dataCache.task_deps[fn] | 2053 | taskdep = self.rqdata.dataCaches[mc].task_deps[fn] |
1998 | 2054 | ||
1999 | if 'noexec' in taskdep and taskname in taskdep['noexec']: | 2055 | if 'noexec' in taskdep and taskname in taskdep['noexec']: |
2000 | noexec.append(tid) | 2056 | noexec.append(tid) |
2001 | self.task_skip(tid) | 2057 | self.task_skip(tid) |
2002 | bb.build.make_stamp(taskname + "_setscene", self.rqdata.dataCache, fn) | 2058 | bb.build.make_stamp(taskname + "_setscene", self.rqdata.dataCaches[mc], taskfn) |
2003 | continue | 2059 | continue |
2004 | 2060 | ||
2005 | if self.rq.check_stamp_task(tid, taskname + "_setscene", cache=self.stampcache): | 2061 | if self.rq.check_stamp_task(tid, taskname + "_setscene", cache=self.stampcache): |
@@ -2015,7 +2071,7 @@ class RunQueueExecuteScenequeue(RunQueueExecute): | |||
2015 | continue | 2071 | continue |
2016 | 2072 | ||
2017 | sq_fn.append(fn) | 2073 | sq_fn.append(fn) |
2018 | sq_hashfn.append(self.rqdata.dataCache.hashfn[fn]) | 2074 | sq_hashfn.append(self.rqdata.dataCaches[mc].hashfn[fn]) |
2019 | sq_hash.append(self.rqdata.runtaskentries[tid].hash) | 2075 | sq_hash.append(self.rqdata.runtaskentries[tid].hash) |
2020 | sq_taskname.append(taskname) | 2076 | sq_taskname.append(taskname) |
2021 | sq_task.append(tid) | 2077 | sq_task.append(tid) |
@@ -2063,9 +2119,8 @@ class RunQueueExecuteScenequeue(RunQueueExecute): | |||
2063 | def check_taskfail(self, task): | 2119 | def check_taskfail(self, task): |
2064 | if self.rqdata.setscenewhitelist: | 2120 | if self.rqdata.setscenewhitelist: |
2065 | realtask = task.split('_setscene')[0] | 2121 | realtask = task.split('_setscene')[0] |
2066 | fn = fn_from_tid(realtask) | 2122 | (mc, fn, taskname) = split_tid(realtask) |
2067 | taskname = taskname_from_tid(realtask) | 2123 | pn = self.rqdata.dataCaches[mc].pkg_fn[fn] |
2068 | pn = self.rqdata.dataCache.pkg_fn[fn] | ||
2069 | if not check_setscene_enforce_whitelist(pn, taskname, self.rqdata.setscenewhitelist): | 2124 | if not check_setscene_enforce_whitelist(pn, taskname, self.rqdata.setscenewhitelist): |
2070 | logger.error('Task %s.%s failed' % (pn, taskname + "_setscene")) | 2125 | logger.error('Task %s.%s failed' % (pn, taskname + "_setscene")) |
2071 | self.rq.state = runQueueCleanUp | 2126 | self.rq.state = runQueueCleanUp |
@@ -2114,10 +2169,9 @@ class RunQueueExecuteScenequeue(RunQueueExecute): | |||
2114 | if nexttask not in self.unskippable and len(self.sq_revdeps[nexttask]) > 0 and self.sq_revdeps[nexttask].issubset(self.scenequeue_covered) and self.check_dependencies(nexttask, self.sq_revdeps[nexttask], True): | 2169 | if nexttask not in self.unskippable and len(self.sq_revdeps[nexttask]) > 0 and self.sq_revdeps[nexttask].issubset(self.scenequeue_covered) and self.check_dependencies(nexttask, self.sq_revdeps[nexttask], True): |
2115 | fn = fn_from_tid(nexttask) | 2170 | fn = fn_from_tid(nexttask) |
2116 | foundtarget = False | 2171 | foundtarget = False |
2117 | for target in self.rqdata.target_pairs: | 2172 | |
2118 | if target[0] == fn and target[1] == taskname_from_tid(nexttask): | 2173 | if nexttask in self.rqdata.target_tids: |
2119 | foundtarget = True | 2174 | foundtarget = True |
2120 | break | ||
2121 | if not foundtarget: | 2175 | if not foundtarget: |
2122 | logger.debug(2, "Skipping setscene for task %s" % nexttask) | 2176 | logger.debug(2, "Skipping setscene for task %s" % nexttask) |
2123 | self.task_skip(nexttask) | 2177 | self.task_skip(nexttask) |
@@ -2129,18 +2183,18 @@ class RunQueueExecuteScenequeue(RunQueueExecute): | |||
2129 | task = nexttask | 2183 | task = nexttask |
2130 | break | 2184 | break |
2131 | if task is not None: | 2185 | if task is not None: |
2132 | fn = fn_from_tid(task) | 2186 | (mc, fn, taskname) = split_tid(task) |
2133 | taskname = taskname_from_tid(task) + "_setscene" | 2187 | taskfn = taskfn_fromtid(task) |
2188 | taskname = taskname + "_setscene" | ||
2134 | if self.rq.check_stamp_task(task, taskname_from_tid(task), recurse = True, cache=self.stampcache): | 2189 | if self.rq.check_stamp_task(task, taskname_from_tid(task), recurse = True, cache=self.stampcache): |
2135 | logger.debug(2, 'Stamp for underlying task %s is current, so skipping setscene variant', task) | 2190 | logger.debug(2, 'Stamp for underlying task %s is current, so skipping setscene variant', task) |
2136 | self.task_failoutright(task) | 2191 | self.task_failoutright(task) |
2137 | return True | 2192 | return True |
2138 | 2193 | ||
2139 | if self.cooker.configuration.force: | 2194 | if self.cooker.configuration.force: |
2140 | for target in self.rqdata.target_pairs: | 2195 | if task in self.rqdata.target_tids: |
2141 | if target[0] == fn and target[1] == taskname_from_tid(task): | 2196 | self.task_failoutright(task) |
2142 | self.task_failoutright(task) | 2197 | return True |
2143 | return True | ||
2144 | 2198 | ||
2145 | if self.rq.check_stamp_task(task, taskname, cache=self.stampcache): | 2199 | if self.rq.check_stamp_task(task, taskname, cache=self.stampcache): |
2146 | logger.debug(2, 'Setscene stamp current task %s, so skip it and its dependencies', task) | 2200 | logger.debug(2, 'Setscene stamp current task %s, so skip it and its dependencies', task) |
@@ -2150,15 +2204,15 @@ class RunQueueExecuteScenequeue(RunQueueExecute): | |||
2150 | startevent = sceneQueueTaskStarted(task, self.stats, self.rq) | 2204 | startevent = sceneQueueTaskStarted(task, self.stats, self.rq) |
2151 | bb.event.fire(startevent, self.cfgData) | 2205 | bb.event.fire(startevent, self.cfgData) |
2152 | 2206 | ||
2153 | taskdep = self.rqdata.dataCache.task_deps[fn] | 2207 | taskdep = self.rqdata.dataCaches[mc].task_deps[taskfn] |
2154 | if 'fakeroot' in taskdep and taskname in taskdep['fakeroot'] and not self.cooker.configuration.dry_run: | 2208 | if 'fakeroot' in taskdep and taskname in taskdep['fakeroot'] and not self.cooker.configuration.dry_run: |
2155 | if not self.rq.fakeworker: | 2209 | if not self.rq.fakeworker: |
2156 | self.rq.start_fakeworker(self) | 2210 | self.rq.start_fakeworker(self) |
2157 | self.rq.fakeworker[''].process.stdin.write(b"<runtask>" + pickle.dumps((fn, task, taskname, True, self.cooker.collection.get_file_appends(fn), None)) + b"</runtask>") | 2211 | self.rq.fakeworker[mc].process.stdin.write(b"<runtask>" + pickle.dumps((taskfn, task, taskname, True, self.cooker.collection.get_file_appends(taskfn), None)) + b"</runtask>") |
2158 | self.rq.fakeworker[''].process.stdin.flush() | 2212 | self.rq.fakeworker[mc].process.stdin.flush() |
2159 | else: | 2213 | else: |
2160 | self.rq.worker[''].process.stdin.write(b"<runtask>" + pickle.dumps((fn, task, taskname, True, self.cooker.collection.get_file_appends(fn), None)) + b"</runtask>") | 2214 | self.rq.worker[mc].process.stdin.write(b"<runtask>" + pickle.dumps((taskfn, task, taskname, True, self.cooker.collection.get_file_appends(taskfn), None)) + b"</runtask>") |
2161 | self.rq.worker[''].process.stdin.flush() | 2215 | self.rq.worker[mc].process.stdin.flush() |
2162 | 2216 | ||
2163 | self.runq_running.add(task) | 2217 | self.runq_running.add(task) |
2164 | self.stats.taskActive() | 2218 | self.stats.taskActive() |
diff --git a/bitbake/lib/bb/siggen.py b/bitbake/lib/bb/siggen.py index 9b2f658a46..0862cff78a 100644 --- a/bitbake/lib/bb/siggen.py +++ b/bitbake/lib/bb/siggen.py | |||
@@ -144,8 +144,9 @@ class SignatureGeneratorBasic(SignatureGenerator): | |||
144 | 144 | ||
145 | def finalise(self, fn, d, variant): | 145 | def finalise(self, fn, d, variant): |
146 | 146 | ||
147 | if variant: | 147 | mc = d.getVar("__BBMULTICONFIG", False) or "" |
148 | fn = "virtual:" + variant + ":" + fn | 148 | if variant or mc: |
149 | fn = bb.cache.realfn2virtual(fn, variant, mc) | ||
149 | 150 | ||
150 | try: | 151 | try: |
151 | taskdeps = self._build_data(fn, d) | 152 | taskdeps = self._build_data(fn, d) |
@@ -300,16 +301,18 @@ class SignatureGeneratorBasic(SignatureGenerator): | |||
300 | bb.error("Taskhash mismatch %s versus %s for %s" % (computed_taskhash, self.taskhash[k], k)) | 301 | bb.error("Taskhash mismatch %s versus %s for %s" % (computed_taskhash, self.taskhash[k], k)) |
301 | 302 | ||
302 | 303 | ||
303 | def dump_sigs(self, dataCache, options): | 304 | def dump_sigs(self, dataCaches, options): |
304 | for fn in self.taskdeps: | 305 | for fn in self.taskdeps: |
305 | for task in self.taskdeps[fn]: | 306 | for task in self.taskdeps[fn]: |
307 | tid = fn + ":" + task | ||
308 | (mc, _, _) = bb.runqueue.split_tid(tid) | ||
306 | k = fn + "." + task | 309 | k = fn + "." + task |
307 | if k not in self.taskhash: | 310 | if k not in self.taskhash: |
308 | continue | 311 | continue |
309 | if dataCache.basetaskhash[k] != self.basehash[k]: | 312 | if dataCaches[mc].basetaskhash[k] != self.basehash[k]: |
310 | bb.error("Bitbake's cached basehash does not match the one we just generated (%s)!" % k) | 313 | bb.error("Bitbake's cached basehash does not match the one we just generated (%s)!" % k) |
311 | bb.error("The mismatched hashes were %s and %s" % (dataCache.basetaskhash[k], self.basehash[k])) | 314 | bb.error("The mismatched hashes were %s and %s" % (dataCaches[mc].basetaskhash[k], self.basehash[k])) |
312 | self.dump_sigtask(fn, task, dataCache.stamp[fn], True) | 315 | self.dump_sigtask(fn, task, dataCaches[mc].stamp[fn], True) |
313 | 316 | ||
314 | class SignatureGeneratorBasicHash(SignatureGeneratorBasic): | 317 | class SignatureGeneratorBasicHash(SignatureGeneratorBasic): |
315 | name = "basichash" | 318 | name = "basichash" |
diff --git a/bitbake/lib/bb/tinfoil.py b/bitbake/lib/bb/tinfoil.py index 7aa653f1aa..95608ae3bd 100644 --- a/bitbake/lib/bb/tinfoil.py +++ b/bitbake/lib/bb/tinfoil.py | |||
@@ -74,13 +74,13 @@ class Tinfoil: | |||
74 | self.logger.setLevel(logging.INFO) | 74 | self.logger.setLevel(logging.INFO) |
75 | sys.stderr.write("done.\n") | 75 | sys.stderr.write("done.\n") |
76 | 76 | ||
77 | self.cooker_data = self.cooker.recipecache | 77 | self.cooker_data = self.cooker.recipecaches[''] |
78 | 78 | ||
79 | def prepare(self, config_only = False): | 79 | def prepare(self, config_only = False): |
80 | if not self.cooker_data: | 80 | if not self.cooker_data: |
81 | if config_only: | 81 | if config_only: |
82 | self.cooker.parseConfiguration() | 82 | self.cooker.parseConfiguration() |
83 | self.cooker_data = self.cooker.recipecache | 83 | self.cooker_data = self.cooker.recipecaches[''] |
84 | else: | 84 | else: |
85 | self.parseRecipes() | 85 | self.parseRecipes() |
86 | 86 | ||
diff --git a/bitbake/lib/bblayers/action.py b/bitbake/lib/bblayers/action.py index d4c1792f60..739ae27b97 100644 --- a/bitbake/lib/bblayers/action.py +++ b/bitbake/lib/bblayers/action.py | |||
@@ -173,7 +173,7 @@ build results (as the layer priority order has effectively changed). | |||
173 | # have come from) | 173 | # have come from) |
174 | first_regex = None | 174 | first_regex = None |
175 | layerdir = layers[0] | 175 | layerdir = layers[0] |
176 | for layername, pattern, regex, _ in self.tinfoil.cooker.recipecache.bbfile_config_priorities: | 176 | for layername, pattern, regex, _ in self.tinfoil.cooker.bbfile_config_priorities: |
177 | if regex.match(os.path.join(layerdir, 'test')): | 177 | if regex.match(os.path.join(layerdir, 'test')): |
178 | first_regex = regex | 178 | first_regex = regex |
179 | break | 179 | break |
diff --git a/bitbake/lib/bblayers/query.py b/bitbake/lib/bblayers/query.py index 6e62082a2e..ee1e7c8a1c 100644 --- a/bitbake/lib/bblayers/query.py +++ b/bitbake/lib/bblayers/query.py | |||
@@ -23,7 +23,7 @@ class QueryPlugin(LayerPlugin): | |||
23 | """show current configured layers.""" | 23 | """show current configured layers.""" |
24 | logger.plain("%s %s %s" % ("layer".ljust(20), "path".ljust(40), "priority")) | 24 | logger.plain("%s %s %s" % ("layer".ljust(20), "path".ljust(40), "priority")) |
25 | logger.plain('=' * 74) | 25 | logger.plain('=' * 74) |
26 | for layer, _, regex, pri in self.tinfoil.cooker.recipecache.bbfile_config_priorities: | 26 | for layer, _, regex, pri in self.tinfoil.cooker.bbfile_config_priorities: |
27 | layerdir = self.bbfile_collections.get(layer, None) | 27 | layerdir = self.bbfile_collections.get(layer, None) |
28 | layername = self.get_layer_name(layerdir) | 28 | layername = self.get_layer_name(layerdir) |
29 | logger.plain("%s %s %d" % (layername.ljust(20), layerdir.ljust(40), pri)) | 29 | logger.plain("%s %s %d" % (layername.ljust(20), layerdir.ljust(40), pri)) |
@@ -121,9 +121,9 @@ skipped recipes will also be listed, with a " (skipped)" suffix. | |||
121 | logger.error('No class named %s found in BBPATH', classfile) | 121 | logger.error('No class named %s found in BBPATH', classfile) |
122 | sys.exit(1) | 122 | sys.exit(1) |
123 | 123 | ||
124 | pkg_pn = self.tinfoil.cooker.recipecache.pkg_pn | 124 | pkg_pn = self.tinfoil.cooker.recipecaches[''].pkg_pn |
125 | (latest_versions, preferred_versions) = bb.providers.findProviders(self.tinfoil.config_data, self.tinfoil.cooker.recipecache, pkg_pn) | 125 | (latest_versions, preferred_versions) = bb.providers.findProviders(self.tinfoil.config_data, self.tinfoil.cooker.recipecaches[''], pkg_pn) |
126 | allproviders = bb.providers.allProviders(self.tinfoil.cooker.recipecache) | 126 | allproviders = bb.providers.allProviders(self.tinfoil.cooker.recipecaches['']) |
127 | 127 | ||
128 | # Ensure we list skipped recipes | 128 | # Ensure we list skipped recipes |
129 | # We are largely guessing about PN, PV and the preferred version here, | 129 | # We are largely guessing about PN, PV and the preferred version here, |
@@ -176,7 +176,7 @@ skipped recipes will also be listed, with a " (skipped)" suffix. | |||
176 | # We only display once per recipe, we should prefer non extended versions of the | 176 | # We only display once per recipe, we should prefer non extended versions of the |
177 | # recipe if present (so e.g. in OpenEmbedded, openssl rather than nativesdk-openssl | 177 | # recipe if present (so e.g. in OpenEmbedded, openssl rather than nativesdk-openssl |
178 | # which would otherwise sort first). | 178 | # which would otherwise sort first). |
179 | if realfn[1] and realfn[0] in self.tinfoil.cooker.recipecache.pkg_fn: | 179 | if realfn[1] and realfn[0] in self.tinfoil.cooker.recipecaches[''].pkg_fn: |
180 | continue | 180 | continue |
181 | 181 | ||
182 | if inherits: | 182 | if inherits: |
@@ -297,7 +297,7 @@ Lists recipes with the bbappends that apply to them as subitems. | |||
297 | def get_appends_for_files(self, filenames): | 297 | def get_appends_for_files(self, filenames): |
298 | appended, notappended = [], [] | 298 | appended, notappended = [], [] |
299 | for filename in filenames: | 299 | for filename in filenames: |
300 | _, cls = bb.cache.virtualfn2realfn(filename) | 300 | _, cls, _ = bb.cache.virtualfn2realfn(filename) |
301 | if cls: | 301 | if cls: |
302 | continue | 302 | continue |
303 | 303 | ||