diff options
-rw-r--r-- | bitbake/ChangeLog | 3 | ||||
-rw-r--r-- | bitbake/lib/bb/providers.py | 8 | ||||
-rw-r--r-- | bitbake/lib/bb/runqueue.py | 23 |
3 files changed, 31 insertions, 3 deletions
diff --git a/bitbake/ChangeLog b/bitbake/ChangeLog index 871f260c48..d00a52d2b6 100644 --- a/bitbake/ChangeLog +++ b/bitbake/ChangeLog | |||
@@ -30,6 +30,9 @@ Changes in BitBake 1.8.x: | |||
30 | - Add proxy support to the CVS fetcher (from Cyril Chemparathy) | 30 | - Add proxy support to the CVS fetcher (from Cyril Chemparathy) |
31 | - Improve runfetchcmd so errors are seen and various GIT variables are exported | 31 | - Improve runfetchcmd so errors are seen and various GIT variables are exported |
32 | - Add ability to fetchers to check URL validity without downloading | 32 | - Add ability to fetchers to check URL validity without downloading |
33 | - Improve runtime PREFERRED_PROVIDERS warning message | ||
34 | - Add BB_STAMP_WHITELIST option which contains a list of stamps to ignore when | ||
35 | checking stamp dependencies and using a BB_STAMP_POLICY of "whitelist" | ||
33 | 36 | ||
34 | Changes in BitBake 1.8.10: | 37 | Changes in BitBake 1.8.10: |
35 | - Psyco is available only for x86 - do not use it on other architectures. | 38 | - Psyco is available only for x86 - do not use it on other architectures. |
diff --git a/bitbake/lib/bb/providers.py b/bitbake/lib/bb/providers.py index 2f6b620b3d..cb0ca3ff24 100644 --- a/bitbake/lib/bb/providers.py +++ b/bitbake/lib/bb/providers.py | |||
@@ -283,13 +283,16 @@ def filterProvidersRunTime(providers, item, cfgData, dataCache): | |||
283 | 283 | ||
284 | # Should use dataCache.preferred here? | 284 | # Should use dataCache.preferred here? |
285 | preferred = [] | 285 | preferred = [] |
286 | preferred_vars = [] | ||
286 | for p in eligible: | 287 | for p in eligible: |
287 | pn = dataCache.pkg_fn[p] | 288 | pn = dataCache.pkg_fn[p] |
288 | provides = dataCache.pn_provides[pn] | 289 | provides = dataCache.pn_provides[pn] |
289 | for provide in provides: | 290 | for provide in provides: |
290 | prefervar = bb.data.getVar('PREFERRED_PROVIDER_%s' % provide, cfgData, 1) | 291 | prefervar = bb.data.getVar('PREFERRED_PROVIDER_%s' % provide, cfgData, 1) |
291 | if prefervar == pn: | 292 | if prefervar == pn: |
292 | bb.msg.note(2, bb.msg.domain.Provider, "selecting %s to satisfy runtime %s due to PREFERRED_PROVIDERS" % (pn, item)) | 293 | var = "PREFERRED_PROVIDERS_%s = %s" % (provide, prefervar) |
294 | bb.msg.note(2, bb.msg.domain.Provider, "selecting %s to satisfy runtime %s due to %s" % (pn, item, var)) | ||
295 | preferred_vars.append(var) | ||
293 | eligible.remove(p) | 296 | eligible.remove(p) |
294 | eligible = [p] + eligible | 297 | eligible = [p] + eligible |
295 | preferred.append(p) | 298 | preferred.append(p) |
@@ -297,6 +300,9 @@ def filterProvidersRunTime(providers, item, cfgData, dataCache): | |||
297 | 300 | ||
298 | numberPreferred = len(preferred) | 301 | numberPreferred = len(preferred) |
299 | 302 | ||
303 | if numberPreferred > 1: | ||
304 | bb.msg.error(bb.msg.domain.Provider, "Conflicting PREFERRED_PROVIDERS entries were found which resulted in an attempt to select multiple providers (%s) for runtime dependecy %s\nThe entries resulting in this conflict were: %s" % (preferred, item, preferred_vars)) | ||
305 | |||
300 | bb.msg.debug(1, bb.msg.domain.Provider, "sorted providers for %s are: %s" % (item, eligible)) | 306 | bb.msg.debug(1, bb.msg.domain.Provider, "sorted providers for %s are: %s" % (item, eligible)) |
301 | 307 | ||
302 | return eligible, numberPreferred | 308 | return eligible, numberPreferred |
diff --git a/bitbake/lib/bb/runqueue.py b/bitbake/lib/bb/runqueue.py index 7b3defd343..3560996b9d 100644 --- a/bitbake/lib/bb/runqueue.py +++ b/bitbake/lib/bb/runqueue.py | |||
@@ -150,6 +150,7 @@ class RunQueue: | |||
150 | self.multi_provider_whitelist = (bb.data.getVar("MULTI_PROVIDER_WHITELIST", cfgData, 1) or "").split() | 150 | self.multi_provider_whitelist = (bb.data.getVar("MULTI_PROVIDER_WHITELIST", cfgData, 1) or "").split() |
151 | self.scheduler = bb.data.getVar("BB_SCHEDULER", cfgData, 1) or "speed" | 151 | self.scheduler = bb.data.getVar("BB_SCHEDULER", cfgData, 1) or "speed" |
152 | self.stamppolicy = bb.data.getVar("BB_STAMP_POLICY", cfgData, 1) or "perfile" | 152 | self.stamppolicy = bb.data.getVar("BB_STAMP_POLICY", cfgData, 1) or "perfile" |
153 | self.stampwhitelist = bb.data.getVar("BB_STAMP_WHITELIST", cfgData, 1) or [] | ||
153 | 154 | ||
154 | def reset_runqueue(self): | 155 | def reset_runqueue(self): |
155 | 156 | ||
@@ -667,6 +668,18 @@ class RunQueue: | |||
667 | #if error: | 668 | #if error: |
668 | # bb.msg.fatal(bb.msg.domain.RunQueue, "Corrupted metadata configuration detected, aborting...") | 669 | # bb.msg.fatal(bb.msg.domain.RunQueue, "Corrupted metadata configuration detected, aborting...") |
669 | 670 | ||
671 | |||
672 | # Create a whitelist usable by the stamp checks | ||
673 | stampfnwhitelist = [] | ||
674 | for entry in self.stampwhitelist.split(): | ||
675 | entryid = self.taskData.getbuild_id(entry) | ||
676 | if entryid not in self.taskData.build_targets: | ||
677 | continue | ||
678 | fnid = self.taskData.build_targets[entryid][0] | ||
679 | fn = self.taskData.fn_index[fnid] | ||
680 | stampfnwhitelist.append(fn) | ||
681 | self.stampfnwhitelist = stampfnwhitelist | ||
682 | |||
670 | #self.dump_data(taskData) | 683 | #self.dump_data(taskData) |
671 | 684 | ||
672 | def check_stamps(self): | 685 | def check_stamps(self): |
@@ -679,6 +692,9 @@ class RunQueue: | |||
679 | fulldeptree = False | 692 | fulldeptree = False |
680 | else: | 693 | else: |
681 | fulldeptree = True | 694 | fulldeptree = True |
695 | stampwhitelist = [] | ||
696 | if self.stamppolicy == "whitelist": | ||
697 | stampwhitelist = self.self.stampfnwhitelist | ||
682 | 698 | ||
683 | for task in range(len(self.runq_fnid)): | 699 | for task in range(len(self.runq_fnid)): |
684 | unchecked[task] = "" | 700 | unchecked[task] = "" |
@@ -730,7 +746,7 @@ class RunQueue: | |||
730 | fn2 = self.taskData.fn_index[self.runq_fnid[dep]] | 746 | fn2 = self.taskData.fn_index[self.runq_fnid[dep]] |
731 | taskname2 = self.runq_task[dep] | 747 | taskname2 = self.runq_task[dep] |
732 | stampfile2 = "%s.%s" % (self.dataCache.stamp[fn2], taskname2) | 748 | stampfile2 = "%s.%s" % (self.dataCache.stamp[fn2], taskname2) |
733 | if fulldeptree or fn == fn2: | 749 | if fn == fn2 or (fulldeptree and fn2 not in stampwhitelist): |
734 | if dep in notcurrent: | 750 | if dep in notcurrent: |
735 | iscurrent = False | 751 | iscurrent = False |
736 | else: | 752 | else: |
@@ -766,6 +782,9 @@ class RunQueue: | |||
766 | fulldeptree = False | 782 | fulldeptree = False |
767 | else: | 783 | else: |
768 | fulldeptree = True | 784 | fulldeptree = True |
785 | stampwhitelist = [] | ||
786 | if self.stamppolicy == "whitelist": | ||
787 | stampwhitelist = self.stampfnwhitelist | ||
769 | 788 | ||
770 | fn = self.taskData.fn_index[self.runq_fnid[task]] | 789 | fn = self.taskData.fn_index[self.runq_fnid[task]] |
771 | taskname = self.runq_task[task] | 790 | taskname = self.runq_task[task] |
@@ -785,7 +804,7 @@ class RunQueue: | |||
785 | fn2 = self.taskData.fn_index[self.runq_fnid[dep]] | 804 | fn2 = self.taskData.fn_index[self.runq_fnid[dep]] |
786 | taskname2 = self.runq_task[dep] | 805 | taskname2 = self.runq_task[dep] |
787 | stampfile2 = "%s.%s" % (self.dataCache.stamp[fn2], taskname2) | 806 | stampfile2 = "%s.%s" % (self.dataCache.stamp[fn2], taskname2) |
788 | if fulldeptree or fn == fn2: | 807 | if fn == fn2 or (fulldeptree and fn2 not in stampwhitelist): |
789 | try: | 808 | try: |
790 | t2 = os.stat(stampfile2)[stat.ST_MTIME] | 809 | t2 = os.stat(stampfile2)[stat.ST_MTIME] |
791 | if t1 < t2: | 810 | if t1 < t2: |