diff options
Diffstat (limited to 'bitbake/lib/bb/runqueue.py')
-rw-r--r-- | bitbake/lib/bb/runqueue.py | 999 |
1 files changed, 654 insertions, 345 deletions
diff --git a/bitbake/lib/bb/runqueue.py b/bitbake/lib/bb/runqueue.py index 54ef245a63..bc7e18175d 100644 --- a/bitbake/lib/bb/runqueue.py +++ b/bitbake/lib/bb/runqueue.py | |||
@@ -24,6 +24,7 @@ import pickle | |||
24 | from multiprocessing import Process | 24 | from multiprocessing import Process |
25 | import shlex | 25 | import shlex |
26 | import pprint | 26 | import pprint |
27 | import time | ||
27 | 28 | ||
28 | bblogger = logging.getLogger("BitBake") | 29 | bblogger = logging.getLogger("BitBake") |
29 | logger = logging.getLogger("BitBake.RunQueue") | 30 | logger = logging.getLogger("BitBake.RunQueue") |
@@ -85,15 +86,19 @@ class RunQueueStats: | |||
85 | """ | 86 | """ |
86 | Holds statistics on the tasks handled by the associated runQueue | 87 | Holds statistics on the tasks handled by the associated runQueue |
87 | """ | 88 | """ |
88 | def __init__(self, total): | 89 | def __init__(self, total, setscene_total): |
89 | self.completed = 0 | 90 | self.completed = 0 |
90 | self.skipped = 0 | 91 | self.skipped = 0 |
91 | self.failed = 0 | 92 | self.failed = 0 |
92 | self.active = 0 | 93 | self.active = 0 |
94 | self.setscene_active = 0 | ||
95 | self.setscene_covered = 0 | ||
96 | self.setscene_notcovered = 0 | ||
97 | self.setscene_total = setscene_total | ||
93 | self.total = total | 98 | self.total = total |
94 | 99 | ||
95 | def copy(self): | 100 | def copy(self): |
96 | obj = self.__class__(self.total) | 101 | obj = self.__class__(self.total, self.setscene_total) |
97 | obj.__dict__.update(self.__dict__) | 102 | obj.__dict__.update(self.__dict__) |
98 | return obj | 103 | return obj |
99 | 104 | ||
@@ -112,6 +117,13 @@ class RunQueueStats: | |||
112 | def taskActive(self): | 117 | def taskActive(self): |
113 | self.active = self.active + 1 | 118 | self.active = self.active + 1 |
114 | 119 | ||
120 | def updateCovered(self, covered, notcovered): | ||
121 | self.setscene_covered = covered | ||
122 | self.setscene_notcovered = notcovered | ||
123 | |||
124 | def updateActiveSetscene(self, active): | ||
125 | self.setscene_active = active | ||
126 | |||
115 | # These values indicate the next step due to be run in the | 127 | # These values indicate the next step due to be run in the |
116 | # runQueue state machine | 128 | # runQueue state machine |
117 | runQueuePrepare = 2 | 129 | runQueuePrepare = 2 |
@@ -143,11 +155,82 @@ class RunQueueScheduler(object): | |||
143 | self.stamps = {} | 155 | self.stamps = {} |
144 | for tid in self.rqdata.runtaskentries: | 156 | for tid in self.rqdata.runtaskentries: |
145 | (mc, fn, taskname, taskfn) = split_tid_mcfn(tid) | 157 | (mc, fn, taskname, taskfn) = split_tid_mcfn(tid) |
146 | self.stamps[tid] = bb.build.stampfile(taskname, self.rqdata.dataCaches[mc], taskfn, noextra=True) | 158 | self.stamps[tid] = bb.parse.siggen.stampfile_mcfn(taskname, taskfn, extrainfo=False) |
147 | if tid in self.rq.runq_buildable: | 159 | if tid in self.rq.runq_buildable: |
148 | self.buildable.append(tid) | 160 | self.buildable.add(tid) |
149 | 161 | ||
150 | self.rev_prio_map = None | 162 | self.rev_prio_map = None |
163 | self.is_pressure_usable() | ||
164 | |||
165 | def is_pressure_usable(self): | ||
166 | """ | ||
167 | If monitoring pressure, return True if pressure files can be open and read. For example | ||
168 | openSUSE /proc/pressure/* files have readable file permissions but when read the error EOPNOTSUPP (Operation not supported) | ||
169 | is returned. | ||
170 | """ | ||
171 | if self.rq.max_cpu_pressure or self.rq.max_io_pressure or self.rq.max_memory_pressure: | ||
172 | try: | ||
173 | with open("/proc/pressure/cpu") as cpu_pressure_fds, \ | ||
174 | open("/proc/pressure/io") as io_pressure_fds, \ | ||
175 | open("/proc/pressure/memory") as memory_pressure_fds: | ||
176 | |||
177 | self.prev_cpu_pressure = cpu_pressure_fds.readline().split()[4].split("=")[1] | ||
178 | self.prev_io_pressure = io_pressure_fds.readline().split()[4].split("=")[1] | ||
179 | self.prev_memory_pressure = memory_pressure_fds.readline().split()[4].split("=")[1] | ||
180 | self.prev_pressure_time = time.time() | ||
181 | self.check_pressure = True | ||
182 | except: | ||
183 | bb.note("The /proc/pressure files can't be read. Continuing build without monitoring pressure") | ||
184 | self.check_pressure = False | ||
185 | else: | ||
186 | self.check_pressure = False | ||
187 | |||
188 | def exceeds_max_pressure(self): | ||
189 | """ | ||
190 | Monitor the difference in total pressure at least once per second, if | ||
191 | BB_PRESSURE_MAX_{CPU|IO|MEMORY} are set, return True if above threshold. | ||
192 | """ | ||
193 | if self.check_pressure: | ||
194 | with open("/proc/pressure/cpu") as cpu_pressure_fds, \ | ||
195 | open("/proc/pressure/io") as io_pressure_fds, \ | ||
196 | open("/proc/pressure/memory") as memory_pressure_fds: | ||
197 | # extract "total" from /proc/pressure/{cpu|io} | ||
198 | curr_cpu_pressure = cpu_pressure_fds.readline().split()[4].split("=")[1] | ||
199 | curr_io_pressure = io_pressure_fds.readline().split()[4].split("=")[1] | ||
200 | curr_memory_pressure = memory_pressure_fds.readline().split()[4].split("=")[1] | ||
201 | now = time.time() | ||
202 | tdiff = now - self.prev_pressure_time | ||
203 | psi_accumulation_interval = 1.0 | ||
204 | cpu_pressure = (float(curr_cpu_pressure) - float(self.prev_cpu_pressure)) / tdiff | ||
205 | io_pressure = (float(curr_io_pressure) - float(self.prev_io_pressure)) / tdiff | ||
206 | memory_pressure = (float(curr_memory_pressure) - float(self.prev_memory_pressure)) / tdiff | ||
207 | exceeds_cpu_pressure = self.rq.max_cpu_pressure and cpu_pressure > self.rq.max_cpu_pressure | ||
208 | exceeds_io_pressure = self.rq.max_io_pressure and io_pressure > self.rq.max_io_pressure | ||
209 | exceeds_memory_pressure = self.rq.max_memory_pressure and memory_pressure > self.rq.max_memory_pressure | ||
210 | |||
211 | if tdiff > psi_accumulation_interval: | ||
212 | self.prev_cpu_pressure = curr_cpu_pressure | ||
213 | self.prev_io_pressure = curr_io_pressure | ||
214 | self.prev_memory_pressure = curr_memory_pressure | ||
215 | self.prev_pressure_time = now | ||
216 | |||
217 | pressure_state = (exceeds_cpu_pressure, exceeds_io_pressure, exceeds_memory_pressure) | ||
218 | pressure_values = (round(cpu_pressure,1), self.rq.max_cpu_pressure, round(io_pressure,1), self.rq.max_io_pressure, round(memory_pressure,1), self.rq.max_memory_pressure) | ||
219 | if hasattr(self, "pressure_state") and pressure_state != self.pressure_state: | ||
220 | bb.note("Pressure status changed to CPU: %s, IO: %s, Mem: %s (CPU: %s/%s, IO: %s/%s, Mem: %s/%s) - using %s/%s bitbake threads" % (pressure_state + pressure_values + (len(self.rq.runq_running.difference(self.rq.runq_complete)), self.rq.number_tasks))) | ||
221 | self.pressure_state = pressure_state | ||
222 | return (exceeds_cpu_pressure or exceeds_io_pressure or exceeds_memory_pressure) | ||
223 | elif self.rq.max_loadfactor: | ||
224 | limit = False | ||
225 | loadfactor = float(os.getloadavg()[0]) / os.cpu_count() | ||
226 | # bb.warn("Comparing %s to %s" % (loadfactor, self.rq.max_loadfactor)) | ||
227 | if loadfactor > self.rq.max_loadfactor: | ||
228 | limit = True | ||
229 | if hasattr(self, "loadfactor_limit") and limit != self.loadfactor_limit: | ||
230 | bb.note("Load average limiting set to %s as load average: %s - using %s/%s bitbake threads" % (limit, loadfactor, len(self.rq.runq_running.difference(self.rq.runq_complete)), self.rq.number_tasks)) | ||
231 | self.loadfactor_limit = limit | ||
232 | return limit | ||
233 | return False | ||
151 | 234 | ||
152 | def next_buildable_task(self): | 235 | def next_buildable_task(self): |
153 | """ | 236 | """ |
@@ -161,6 +244,12 @@ class RunQueueScheduler(object): | |||
161 | if not buildable: | 244 | if not buildable: |
162 | return None | 245 | return None |
163 | 246 | ||
247 | # Bitbake requires that at least one task be active. Only check for pressure if | ||
248 | # this is the case, otherwise the pressure limitation could result in no tasks | ||
249 | # being active and no new tasks started thereby, at times, breaking the scheduler. | ||
250 | if self.rq.stats.active and self.exceeds_max_pressure(): | ||
251 | return None | ||
252 | |||
164 | # Filter out tasks that have a max number of threads that have been exceeded | 253 | # Filter out tasks that have a max number of threads that have been exceeded |
165 | skip_buildable = {} | 254 | skip_buildable = {} |
166 | for running in self.rq.runq_running.difference(self.rq.runq_complete): | 255 | for running in self.rq.runq_running.difference(self.rq.runq_complete): |
@@ -191,11 +280,11 @@ class RunQueueScheduler(object): | |||
191 | best = None | 280 | best = None |
192 | bestprio = None | 281 | bestprio = None |
193 | for tid in buildable: | 282 | for tid in buildable: |
194 | taskname = taskname_from_tid(tid) | ||
195 | if taskname in skip_buildable and skip_buildable[taskname] >= int(self.skip_maxthread[taskname]): | ||
196 | continue | ||
197 | prio = self.rev_prio_map[tid] | 283 | prio = self.rev_prio_map[tid] |
198 | if bestprio is None or bestprio > prio: | 284 | if bestprio is None or bestprio > prio: |
285 | taskname = taskname_from_tid(tid) | ||
286 | if taskname in skip_buildable and skip_buildable[taskname] >= int(self.skip_maxthread[taskname]): | ||
287 | continue | ||
199 | stamp = self.stamps[tid] | 288 | stamp = self.stamps[tid] |
200 | if stamp in self.rq.build_stamps.values(): | 289 | if stamp in self.rq.build_stamps.values(): |
201 | continue | 290 | continue |
@@ -374,10 +463,9 @@ class RunQueueData: | |||
374 | self.rq = rq | 463 | self.rq = rq |
375 | self.warn_multi_bb = False | 464 | self.warn_multi_bb = False |
376 | 465 | ||
377 | self.stampwhitelist = cfgData.getVar("BB_STAMP_WHITELIST") or "" | 466 | self.multi_provider_allowed = (cfgData.getVar("BB_MULTI_PROVIDER_ALLOWED") or "").split() |
378 | self.multi_provider_whitelist = (cfgData.getVar("MULTI_PROVIDER_WHITELIST") or "").split() | 467 | self.setscene_ignore_tasks = get_setscene_enforce_ignore_tasks(cfgData, targets) |
379 | self.setscenewhitelist = get_setscene_enforce_whitelist(cfgData, targets) | 468 | self.setscene_ignore_tasks_checked = False |
380 | self.setscenewhitelist_checked = False | ||
381 | self.setscene_enforce = (cfgData.getVar('BB_SETSCENE_ENFORCE') == "1") | 469 | self.setscene_enforce = (cfgData.getVar('BB_SETSCENE_ENFORCE') == "1") |
382 | self.init_progress_reporter = bb.progress.DummyMultiStageProcessProgressReporter() | 470 | self.init_progress_reporter = bb.progress.DummyMultiStageProcessProgressReporter() |
383 | 471 | ||
@@ -475,7 +563,7 @@ class RunQueueData: | |||
475 | msgs.append(" Task %s (dependent Tasks %s)\n" % (dep, self.runq_depends_names(self.runtaskentries[dep].depends))) | 563 | msgs.append(" Task %s (dependent Tasks %s)\n" % (dep, self.runq_depends_names(self.runtaskentries[dep].depends))) |
476 | msgs.append("\n") | 564 | msgs.append("\n") |
477 | if len(valid_chains) > 10: | 565 | if len(valid_chains) > 10: |
478 | msgs.append("Aborted dependency loops search after 10 matches.\n") | 566 | msgs.append("Halted dependency loops search after 10 matches.\n") |
479 | raise TooManyLoops | 567 | raise TooManyLoops |
480 | continue | 568 | continue |
481 | scan = False | 569 | scan = False |
@@ -536,7 +624,7 @@ class RunQueueData: | |||
536 | next_points.append(revdep) | 624 | next_points.append(revdep) |
537 | task_done[revdep] = True | 625 | task_done[revdep] = True |
538 | endpoints = next_points | 626 | endpoints = next_points |
539 | if len(next_points) == 0: | 627 | if not next_points: |
540 | break | 628 | break |
541 | 629 | ||
542 | # Circular dependency sanity check | 630 | # Circular dependency sanity check |
@@ -578,15 +666,18 @@ class RunQueueData: | |||
578 | 666 | ||
579 | found = False | 667 | found = False |
580 | for mc in self.taskData: | 668 | for mc in self.taskData: |
581 | if len(taskData[mc].taskentries) > 0: | 669 | if taskData[mc].taskentries: |
582 | found = True | 670 | found = True |
583 | break | 671 | break |
584 | if not found: | 672 | if not found: |
585 | # Nothing to do | 673 | # Nothing to do |
586 | return 0 | 674 | return 0 |
587 | 675 | ||
676 | bb.parse.siggen.setup_datacache(self.dataCaches) | ||
677 | |||
588 | self.init_progress_reporter.start() | 678 | self.init_progress_reporter.start() |
589 | self.init_progress_reporter.next_stage() | 679 | self.init_progress_reporter.next_stage() |
680 | bb.event.check_for_interrupts(self.cooker.data) | ||
590 | 681 | ||
591 | # Step A - Work out a list of tasks to run | 682 | # Step A - Work out a list of tasks to run |
592 | # | 683 | # |
@@ -632,6 +723,8 @@ class RunQueueData: | |||
632 | frommc = mcdependency[1] | 723 | frommc = mcdependency[1] |
633 | mcdep = mcdependency[2] | 724 | mcdep = mcdependency[2] |
634 | deptask = mcdependency[4] | 725 | deptask = mcdependency[4] |
726 | if mcdep not in taskData: | ||
727 | bb.fatal("Multiconfig '%s' is referenced in multiconfig dependency '%s' but not enabled in BBMULTICONFIG?" % (mcdep, dep)) | ||
635 | if mc == frommc: | 728 | if mc == frommc: |
636 | fn = taskData[mcdep].build_targets[pn][0] | 729 | fn = taskData[mcdep].build_targets[pn][0] |
637 | newdep = '%s:%s' % (fn,deptask) | 730 | newdep = '%s:%s' % (fn,deptask) |
@@ -733,6 +826,7 @@ class RunQueueData: | |||
733 | #self.dump_data() | 826 | #self.dump_data() |
734 | 827 | ||
735 | self.init_progress_reporter.next_stage() | 828 | self.init_progress_reporter.next_stage() |
829 | bb.event.check_for_interrupts(self.cooker.data) | ||
736 | 830 | ||
737 | # Resolve recursive 'recrdeptask' dependencies (Part B) | 831 | # Resolve recursive 'recrdeptask' dependencies (Part B) |
738 | # | 832 | # |
@@ -762,7 +856,7 @@ class RunQueueData: | |||
762 | # Find the dependency chain endpoints | 856 | # Find the dependency chain endpoints |
763 | endpoints = set() | 857 | endpoints = set() |
764 | for tid in self.runtaskentries: | 858 | for tid in self.runtaskentries: |
765 | if len(deps[tid]) == 0: | 859 | if not deps[tid]: |
766 | endpoints.add(tid) | 860 | endpoints.add(tid) |
767 | # Iterate the chains collating dependencies | 861 | # Iterate the chains collating dependencies |
768 | while endpoints: | 862 | while endpoints: |
@@ -773,11 +867,11 @@ class RunQueueData: | |||
773 | cumulativedeps[dep].update(cumulativedeps[tid]) | 867 | cumulativedeps[dep].update(cumulativedeps[tid]) |
774 | if tid in deps[dep]: | 868 | if tid in deps[dep]: |
775 | deps[dep].remove(tid) | 869 | deps[dep].remove(tid) |
776 | if len(deps[dep]) == 0: | 870 | if not deps[dep]: |
777 | next.add(dep) | 871 | next.add(dep) |
778 | endpoints = next | 872 | endpoints = next |
779 | #for tid in deps: | 873 | #for tid in deps: |
780 | # if len(deps[tid]) != 0: | 874 | # if deps[tid]: |
781 | # bb.warn("Sanity test failure, dependencies left for %s (%s)" % (tid, deps[tid])) | 875 | # bb.warn("Sanity test failure, dependencies left for %s (%s)" % (tid, deps[tid])) |
782 | 876 | ||
783 | # Loop here since recrdeptasks can depend upon other recrdeptasks and we have to | 877 | # Loop here since recrdeptasks can depend upon other recrdeptasks and we have to |
@@ -829,6 +923,7 @@ class RunQueueData: | |||
829 | self.runtaskentries[tid].depends.difference_update(recursivetasksselfref) | 923 | self.runtaskentries[tid].depends.difference_update(recursivetasksselfref) |
830 | 924 | ||
831 | self.init_progress_reporter.next_stage() | 925 | self.init_progress_reporter.next_stage() |
926 | bb.event.check_for_interrupts(self.cooker.data) | ||
832 | 927 | ||
833 | #self.dump_data() | 928 | #self.dump_data() |
834 | 929 | ||
@@ -867,7 +962,7 @@ class RunQueueData: | |||
867 | bb.debug(1, "Task %s is marked nostamp, cannot invalidate this task" % taskname) | 962 | bb.debug(1, "Task %s is marked nostamp, cannot invalidate this task" % taskname) |
868 | else: | 963 | else: |
869 | logger.verbose("Invalidate task %s, %s", taskname, fn) | 964 | logger.verbose("Invalidate task %s, %s", taskname, fn) |
870 | bb.parse.siggen.invalidate_task(taskname, self.dataCaches[mc], taskfn) | 965 | bb.parse.siggen.invalidate_task(taskname, taskfn) |
871 | 966 | ||
872 | self.target_tids = [] | 967 | self.target_tids = [] |
873 | for (mc, target, task, fn) in self.targets: | 968 | for (mc, target, task, fn) in self.targets: |
@@ -910,47 +1005,54 @@ class RunQueueData: | |||
910 | mark_active(tid, 1) | 1005 | mark_active(tid, 1) |
911 | 1006 | ||
912 | self.init_progress_reporter.next_stage() | 1007 | self.init_progress_reporter.next_stage() |
1008 | bb.event.check_for_interrupts(self.cooker.data) | ||
913 | 1009 | ||
914 | # Step C - Prune all inactive tasks | 1010 | # Step C - Prune all inactive tasks |
915 | # | 1011 | # |
916 | # Once all active tasks are marked, prune the ones we don't need. | 1012 | # Once all active tasks are marked, prune the ones we don't need. |
917 | 1013 | ||
918 | delcount = {} | ||
919 | for tid in list(self.runtaskentries.keys()): | ||
920 | if tid not in runq_build: | ||
921 | delcount[tid] = self.runtaskentries[tid] | ||
922 | del self.runtaskentries[tid] | ||
923 | |||
924 | # Handle --runall | 1014 | # Handle --runall |
925 | if self.cooker.configuration.runall: | 1015 | if self.cooker.configuration.runall: |
926 | # re-run the mark_active and then drop unused tasks from new list | 1016 | # re-run the mark_active and then drop unused tasks from new list |
927 | runq_build = {} | ||
928 | 1017 | ||
929 | for task in self.cooker.configuration.runall: | 1018 | runall_tids = set() |
930 | if not task.startswith("do_"): | 1019 | added = True |
931 | task = "do_{0}".format(task) | 1020 | while added: |
932 | runall_tids = set() | 1021 | reduced_tasklist = set(self.runtaskentries.keys()) |
933 | for tid in list(self.runtaskentries): | 1022 | for tid in list(self.runtaskentries.keys()): |
934 | wanttid = "{0}:{1}".format(fn_from_tid(tid), task) | 1023 | if tid not in runq_build: |
935 | if wanttid in delcount: | 1024 | reduced_tasklist.remove(tid) |
936 | self.runtaskentries[wanttid] = delcount[wanttid] | 1025 | runq_build = {} |
937 | if wanttid in self.runtaskentries: | ||
938 | runall_tids.add(wanttid) | ||
939 | |||
940 | for tid in list(runall_tids): | ||
941 | mark_active(tid,1) | ||
942 | if self.cooker.configuration.force: | ||
943 | invalidate_task(tid, False) | ||
944 | 1026 | ||
945 | for tid in list(self.runtaskentries.keys()): | 1027 | orig = runall_tids |
946 | if tid not in runq_build: | 1028 | runall_tids = set() |
947 | delcount[tid] = self.runtaskentries[tid] | 1029 | for task in self.cooker.configuration.runall: |
948 | del self.runtaskentries[tid] | 1030 | if not task.startswith("do_"): |
1031 | task = "do_{0}".format(task) | ||
1032 | for tid in reduced_tasklist: | ||
1033 | wanttid = "{0}:{1}".format(fn_from_tid(tid), task) | ||
1034 | if wanttid in self.runtaskentries: | ||
1035 | runall_tids.add(wanttid) | ||
1036 | |||
1037 | for tid in list(runall_tids): | ||
1038 | mark_active(tid, 1) | ||
1039 | self.target_tids.append(tid) | ||
1040 | if self.cooker.configuration.force: | ||
1041 | invalidate_task(tid, False) | ||
1042 | added = runall_tids - orig | ||
1043 | |||
1044 | delcount = set() | ||
1045 | for tid in list(self.runtaskentries.keys()): | ||
1046 | if tid not in runq_build: | ||
1047 | delcount.add(tid) | ||
1048 | del self.runtaskentries[tid] | ||
949 | 1049 | ||
950 | if len(self.runtaskentries) == 0: | 1050 | if self.cooker.configuration.runall: |
1051 | if not self.runtaskentries: | ||
951 | bb.msg.fatal("RunQueue", "Could not find any tasks with the tasknames %s to run within the recipes of the taskgraphs of the targets %s" % (str(self.cooker.configuration.runall), str(self.targets))) | 1052 | bb.msg.fatal("RunQueue", "Could not find any tasks with the tasknames %s to run within the recipes of the taskgraphs of the targets %s" % (str(self.cooker.configuration.runall), str(self.targets))) |
952 | 1053 | ||
953 | self.init_progress_reporter.next_stage() | 1054 | self.init_progress_reporter.next_stage() |
1055 | bb.event.check_for_interrupts(self.cooker.data) | ||
954 | 1056 | ||
955 | # Handle runonly | 1057 | # Handle runonly |
956 | if self.cooker.configuration.runonly: | 1058 | if self.cooker.configuration.runonly: |
@@ -960,19 +1062,19 @@ class RunQueueData: | |||
960 | for task in self.cooker.configuration.runonly: | 1062 | for task in self.cooker.configuration.runonly: |
961 | if not task.startswith("do_"): | 1063 | if not task.startswith("do_"): |
962 | task = "do_{0}".format(task) | 1064 | task = "do_{0}".format(task) |
963 | runonly_tids = { k: v for k, v in self.runtaskentries.items() if taskname_from_tid(k) == task } | 1065 | runonly_tids = [k for k in self.runtaskentries.keys() if taskname_from_tid(k) == task] |
964 | 1066 | ||
965 | for tid in list(runonly_tids): | 1067 | for tid in runonly_tids: |
966 | mark_active(tid,1) | 1068 | mark_active(tid, 1) |
967 | if self.cooker.configuration.force: | 1069 | if self.cooker.configuration.force: |
968 | invalidate_task(tid, False) | 1070 | invalidate_task(tid, False) |
969 | 1071 | ||
970 | for tid in list(self.runtaskentries.keys()): | 1072 | for tid in list(self.runtaskentries.keys()): |
971 | if tid not in runq_build: | 1073 | if tid not in runq_build: |
972 | delcount[tid] = self.runtaskentries[tid] | 1074 | delcount.add(tid) |
973 | del self.runtaskentries[tid] | 1075 | del self.runtaskentries[tid] |
974 | 1076 | ||
975 | if len(self.runtaskentries) == 0: | 1077 | if not self.runtaskentries: |
976 | bb.msg.fatal("RunQueue", "Could not find any tasks with the tasknames %s to run within the taskgraphs of the targets %s" % (str(self.cooker.configuration.runonly), str(self.targets))) | 1078 | bb.msg.fatal("RunQueue", "Could not find any tasks with the tasknames %s to run within the taskgraphs of the targets %s" % (str(self.cooker.configuration.runonly), str(self.targets))) |
977 | 1079 | ||
978 | # | 1080 | # |
@@ -980,8 +1082,8 @@ class RunQueueData: | |||
980 | # | 1082 | # |
981 | 1083 | ||
982 | # Check to make sure we still have tasks to run | 1084 | # Check to make sure we still have tasks to run |
983 | if len(self.runtaskentries) == 0: | 1085 | if not self.runtaskentries: |
984 | if not taskData[''].abort: | 1086 | if not taskData[''].halt: |
985 | bb.msg.fatal("RunQueue", "All buildable tasks have been run but the build is incomplete (--continue mode). Errors for the tasks that failed will have been printed above.") | 1087 | bb.msg.fatal("RunQueue", "All buildable tasks have been run but the build is incomplete (--continue mode). Errors for the tasks that failed will have been printed above.") |
986 | else: | 1088 | else: |
987 | bb.msg.fatal("RunQueue", "No active tasks and not in --continue mode?! Please report this bug.") | 1089 | bb.msg.fatal("RunQueue", "No active tasks and not in --continue mode?! Please report this bug.") |
@@ -991,6 +1093,7 @@ class RunQueueData: | |||
991 | logger.verbose("Assign Weightings") | 1093 | logger.verbose("Assign Weightings") |
992 | 1094 | ||
993 | self.init_progress_reporter.next_stage() | 1095 | self.init_progress_reporter.next_stage() |
1096 | bb.event.check_for_interrupts(self.cooker.data) | ||
994 | 1097 | ||
995 | # Generate a list of reverse dependencies to ease future calculations | 1098 | # Generate a list of reverse dependencies to ease future calculations |
996 | for tid in self.runtaskentries: | 1099 | for tid in self.runtaskentries: |
@@ -998,13 +1101,14 @@ class RunQueueData: | |||
998 | self.runtaskentries[dep].revdeps.add(tid) | 1101 | self.runtaskentries[dep].revdeps.add(tid) |
999 | 1102 | ||
1000 | self.init_progress_reporter.next_stage() | 1103 | self.init_progress_reporter.next_stage() |
1104 | bb.event.check_for_interrupts(self.cooker.data) | ||
1001 | 1105 | ||
1002 | # Identify tasks at the end of dependency chains | 1106 | # Identify tasks at the end of dependency chains |
1003 | # Error on circular dependency loops (length two) | 1107 | # Error on circular dependency loops (length two) |
1004 | endpoints = [] | 1108 | endpoints = [] |
1005 | for tid in self.runtaskentries: | 1109 | for tid in self.runtaskentries: |
1006 | revdeps = self.runtaskentries[tid].revdeps | 1110 | revdeps = self.runtaskentries[tid].revdeps |
1007 | if len(revdeps) == 0: | 1111 | if not revdeps: |
1008 | endpoints.append(tid) | 1112 | endpoints.append(tid) |
1009 | for dep in revdeps: | 1113 | for dep in revdeps: |
1010 | if dep in self.runtaskentries[tid].depends: | 1114 | if dep in self.runtaskentries[tid].depends: |
@@ -1014,12 +1118,14 @@ class RunQueueData: | |||
1014 | logger.verbose("Compute totals (have %s endpoint(s))", len(endpoints)) | 1118 | logger.verbose("Compute totals (have %s endpoint(s))", len(endpoints)) |
1015 | 1119 | ||
1016 | self.init_progress_reporter.next_stage() | 1120 | self.init_progress_reporter.next_stage() |
1121 | bb.event.check_for_interrupts(self.cooker.data) | ||
1017 | 1122 | ||
1018 | # Calculate task weights | 1123 | # Calculate task weights |
1019 | # Check of higher length circular dependencies | 1124 | # Check of higher length circular dependencies |
1020 | self.runq_weight = self.calculate_task_weights(endpoints) | 1125 | self.runq_weight = self.calculate_task_weights(endpoints) |
1021 | 1126 | ||
1022 | self.init_progress_reporter.next_stage() | 1127 | self.init_progress_reporter.next_stage() |
1128 | bb.event.check_for_interrupts(self.cooker.data) | ||
1023 | 1129 | ||
1024 | # Sanity Check - Check for multiple tasks building the same provider | 1130 | # Sanity Check - Check for multiple tasks building the same provider |
1025 | for mc in self.dataCaches: | 1131 | for mc in self.dataCaches: |
@@ -1040,7 +1146,7 @@ class RunQueueData: | |||
1040 | for prov in prov_list: | 1146 | for prov in prov_list: |
1041 | if len(prov_list[prov]) < 2: | 1147 | if len(prov_list[prov]) < 2: |
1042 | continue | 1148 | continue |
1043 | if prov in self.multi_provider_whitelist: | 1149 | if prov in self.multi_provider_allowed: |
1044 | continue | 1150 | continue |
1045 | seen_pn = [] | 1151 | seen_pn = [] |
1046 | # If two versions of the same PN are being built its fatal, we don't support it. | 1152 | # If two versions of the same PN are being built its fatal, we don't support it. |
@@ -1050,12 +1156,12 @@ class RunQueueData: | |||
1050 | seen_pn.append(pn) | 1156 | seen_pn.append(pn) |
1051 | else: | 1157 | else: |
1052 | bb.fatal("Multiple versions of %s are due to be built (%s). Only one version of a given PN should be built in any given build. You likely need to set PREFERRED_VERSION_%s to select the correct version or don't depend on multiple versions." % (pn, " ".join(prov_list[prov]), pn)) | 1158 | bb.fatal("Multiple versions of %s are due to be built (%s). Only one version of a given PN should be built in any given build. You likely need to set PREFERRED_VERSION_%s to select the correct version or don't depend on multiple versions." % (pn, " ".join(prov_list[prov]), pn)) |
1053 | msg = "Multiple .bb files are due to be built which each provide %s:\n %s" % (prov, "\n ".join(prov_list[prov])) | 1159 | msgs = ["Multiple .bb files are due to be built which each provide %s:\n %s" % (prov, "\n ".join(prov_list[prov]))] |
1054 | # | 1160 | # |
1055 | # Construct a list of things which uniquely depend on each provider | 1161 | # Construct a list of things which uniquely depend on each provider |
1056 | # since this may help the user figure out which dependency is triggering this warning | 1162 | # since this may help the user figure out which dependency is triggering this warning |
1057 | # | 1163 | # |
1058 | msg += "\nA list of tasks depending on these providers is shown and may help explain where the dependency comes from." | 1164 | msgs.append("\nA list of tasks depending on these providers is shown and may help explain where the dependency comes from.") |
1059 | deplist = {} | 1165 | deplist = {} |
1060 | commondeps = None | 1166 | commondeps = None |
1061 | for provfn in prov_list[prov]: | 1167 | for provfn in prov_list[prov]: |
@@ -1075,12 +1181,12 @@ class RunQueueData: | |||
1075 | commondeps &= deps | 1181 | commondeps &= deps |
1076 | deplist[provfn] = deps | 1182 | deplist[provfn] = deps |
1077 | for provfn in deplist: | 1183 | for provfn in deplist: |
1078 | msg += "\n%s has unique dependees:\n %s" % (provfn, "\n ".join(deplist[provfn] - commondeps)) | 1184 | msgs.append("\n%s has unique dependees:\n %s" % (provfn, "\n ".join(deplist[provfn] - commondeps))) |
1079 | # | 1185 | # |
1080 | # Construct a list of provides and runtime providers for each recipe | 1186 | # Construct a list of provides and runtime providers for each recipe |
1081 | # (rprovides has to cover RPROVIDES, PACKAGES, PACKAGES_DYNAMIC) | 1187 | # (rprovides has to cover RPROVIDES, PACKAGES, PACKAGES_DYNAMIC) |
1082 | # | 1188 | # |
1083 | msg += "\nIt could be that one recipe provides something the other doesn't and should. The following provider and runtime provider differences may be helpful." | 1189 | msgs.append("\nIt could be that one recipe provides something the other doesn't and should. The following provider and runtime provider differences may be helpful.") |
1084 | provide_results = {} | 1190 | provide_results = {} |
1085 | rprovide_results = {} | 1191 | rprovide_results = {} |
1086 | commonprovs = None | 1192 | commonprovs = None |
@@ -1107,30 +1213,20 @@ class RunQueueData: | |||
1107 | else: | 1213 | else: |
1108 | commonrprovs &= rprovides | 1214 | commonrprovs &= rprovides |
1109 | rprovide_results[provfn] = rprovides | 1215 | rprovide_results[provfn] = rprovides |
1110 | #msg += "\nCommon provides:\n %s" % ("\n ".join(commonprovs)) | 1216 | #msgs.append("\nCommon provides:\n %s" % ("\n ".join(commonprovs))) |
1111 | #msg += "\nCommon rprovides:\n %s" % ("\n ".join(commonrprovs)) | 1217 | #msgs.append("\nCommon rprovides:\n %s" % ("\n ".join(commonrprovs))) |
1112 | for provfn in prov_list[prov]: | 1218 | for provfn in prov_list[prov]: |
1113 | msg += "\n%s has unique provides:\n %s" % (provfn, "\n ".join(provide_results[provfn] - commonprovs)) | 1219 | msgs.append("\n%s has unique provides:\n %s" % (provfn, "\n ".join(provide_results[provfn] - commonprovs))) |
1114 | msg += "\n%s has unique rprovides:\n %s" % (provfn, "\n ".join(rprovide_results[provfn] - commonrprovs)) | 1220 | msgs.append("\n%s has unique rprovides:\n %s" % (provfn, "\n ".join(rprovide_results[provfn] - commonrprovs))) |
1115 | 1221 | ||
1116 | if self.warn_multi_bb: | 1222 | if self.warn_multi_bb: |
1117 | logger.verbnote(msg) | 1223 | logger.verbnote("".join(msgs)) |
1118 | else: | 1224 | else: |
1119 | logger.error(msg) | 1225 | logger.error("".join(msgs)) |
1120 | 1226 | ||
1121 | self.init_progress_reporter.next_stage() | 1227 | self.init_progress_reporter.next_stage() |
1122 | |||
1123 | # Create a whitelist usable by the stamp checks | ||
1124 | self.stampfnwhitelist = {} | ||
1125 | for mc in self.taskData: | ||
1126 | self.stampfnwhitelist[mc] = [] | ||
1127 | for entry in self.stampwhitelist.split(): | ||
1128 | if entry not in self.taskData[mc].build_targets: | ||
1129 | continue | ||
1130 | fn = self.taskData.build_targets[entry][0] | ||
1131 | self.stampfnwhitelist[mc].append(fn) | ||
1132 | |||
1133 | self.init_progress_reporter.next_stage() | 1228 | self.init_progress_reporter.next_stage() |
1229 | bb.event.check_for_interrupts(self.cooker.data) | ||
1134 | 1230 | ||
1135 | # Iterate over the task list looking for tasks with a 'setscene' function | 1231 | # Iterate over the task list looking for tasks with a 'setscene' function |
1136 | self.runq_setscene_tids = set() | 1232 | self.runq_setscene_tids = set() |
@@ -1143,6 +1239,7 @@ class RunQueueData: | |||
1143 | self.runq_setscene_tids.add(tid) | 1239 | self.runq_setscene_tids.add(tid) |
1144 | 1240 | ||
1145 | self.init_progress_reporter.next_stage() | 1241 | self.init_progress_reporter.next_stage() |
1242 | bb.event.check_for_interrupts(self.cooker.data) | ||
1146 | 1243 | ||
1147 | # Invalidate task if force mode active | 1244 | # Invalidate task if force mode active |
1148 | if self.cooker.configuration.force: | 1245 | if self.cooker.configuration.force: |
@@ -1159,6 +1256,7 @@ class RunQueueData: | |||
1159 | invalidate_task(fn + ":" + st, True) | 1256 | invalidate_task(fn + ":" + st, True) |
1160 | 1257 | ||
1161 | self.init_progress_reporter.next_stage() | 1258 | self.init_progress_reporter.next_stage() |
1259 | bb.event.check_for_interrupts(self.cooker.data) | ||
1162 | 1260 | ||
1163 | # Create and print to the logs a virtual/xxxx -> PN (fn) table | 1261 | # Create and print to the logs a virtual/xxxx -> PN (fn) table |
1164 | for mc in taskData: | 1262 | for mc in taskData: |
@@ -1171,18 +1269,20 @@ class RunQueueData: | |||
1171 | bb.parse.siggen.tasks_resolved(virtmap, virtpnmap, self.dataCaches[mc]) | 1269 | bb.parse.siggen.tasks_resolved(virtmap, virtpnmap, self.dataCaches[mc]) |
1172 | 1270 | ||
1173 | self.init_progress_reporter.next_stage() | 1271 | self.init_progress_reporter.next_stage() |
1272 | bb.event.check_for_interrupts(self.cooker.data) | ||
1174 | 1273 | ||
1175 | bb.parse.siggen.set_setscene_tasks(self.runq_setscene_tids) | 1274 | bb.parse.siggen.set_setscene_tasks(self.runq_setscene_tids) |
1176 | 1275 | ||
1177 | # Iterate over the task list and call into the siggen code | 1276 | # Iterate over the task list and call into the siggen code |
1178 | dealtwith = set() | 1277 | dealtwith = set() |
1179 | todeal = set(self.runtaskentries) | 1278 | todeal = set(self.runtaskentries) |
1180 | while len(todeal) > 0: | 1279 | while todeal: |
1181 | for tid in todeal.copy(): | 1280 | for tid in todeal.copy(): |
1182 | if len(self.runtaskentries[tid].depends - dealtwith) == 0: | 1281 | if not (self.runtaskentries[tid].depends - dealtwith): |
1183 | dealtwith.add(tid) | 1282 | dealtwith.add(tid) |
1184 | todeal.remove(tid) | 1283 | todeal.remove(tid) |
1185 | self.prepare_task_hash(tid) | 1284 | self.prepare_task_hash(tid) |
1285 | bb.event.check_for_interrupts(self.cooker.data) | ||
1186 | 1286 | ||
1187 | bb.parse.siggen.writeout_file_checksum_cache() | 1287 | bb.parse.siggen.writeout_file_checksum_cache() |
1188 | 1288 | ||
@@ -1190,9 +1290,8 @@ class RunQueueData: | |||
1190 | return len(self.runtaskentries) | 1290 | return len(self.runtaskentries) |
1191 | 1291 | ||
1192 | def prepare_task_hash(self, tid): | 1292 | def prepare_task_hash(self, tid): |
1193 | dc = bb.parse.siggen.get_data_caches(self.dataCaches, mc_from_tid(tid)) | 1293 | bb.parse.siggen.prep_taskhash(tid, self.runtaskentries[tid].depends, self.dataCaches) |
1194 | bb.parse.siggen.prep_taskhash(tid, self.runtaskentries[tid].depends, dc) | 1294 | self.runtaskentries[tid].hash = bb.parse.siggen.get_taskhash(tid, self.runtaskentries[tid].depends, self.dataCaches) |
1195 | self.runtaskentries[tid].hash = bb.parse.siggen.get_taskhash(tid, self.runtaskentries[tid].depends, dc) | ||
1196 | self.runtaskentries[tid].unihash = bb.parse.siggen.get_unihash(tid) | 1295 | self.runtaskentries[tid].unihash = bb.parse.siggen.get_unihash(tid) |
1197 | 1296 | ||
1198 | def dump_data(self): | 1297 | def dump_data(self): |
@@ -1218,7 +1317,6 @@ class RunQueue: | |||
1218 | self.cfgData = cfgData | 1317 | self.cfgData = cfgData |
1219 | self.rqdata = RunQueueData(self, cooker, cfgData, dataCaches, taskData, targets) | 1318 | self.rqdata = RunQueueData(self, cooker, cfgData, dataCaches, taskData, targets) |
1220 | 1319 | ||
1221 | self.stamppolicy = cfgData.getVar("BB_STAMP_POLICY") or "perfile" | ||
1222 | self.hashvalidate = cfgData.getVar("BB_HASHCHECK_FUNCTION") or None | 1320 | self.hashvalidate = cfgData.getVar("BB_HASHCHECK_FUNCTION") or None |
1223 | self.depvalidate = cfgData.getVar("BB_SETSCENE_DEPVALID") or None | 1321 | self.depvalidate = cfgData.getVar("BB_SETSCENE_DEPVALID") or None |
1224 | 1322 | ||
@@ -1237,30 +1335,40 @@ class RunQueue: | |||
1237 | self.worker = {} | 1335 | self.worker = {} |
1238 | self.fakeworker = {} | 1336 | self.fakeworker = {} |
1239 | 1337 | ||
1338 | @staticmethod | ||
1339 | def send_pickled_data(worker, data, name): | ||
1340 | msg = bytearray() | ||
1341 | msg.extend(b"<" + name.encode() + b">") | ||
1342 | pickled_data = pickle.dumps(data) | ||
1343 | msg.extend(len(pickled_data).to_bytes(4, 'big')) | ||
1344 | msg.extend(pickled_data) | ||
1345 | msg.extend(b"</" + name.encode() + b">") | ||
1346 | worker.stdin.write(msg) | ||
1347 | |||
1240 | def _start_worker(self, mc, fakeroot = False, rqexec = None): | 1348 | def _start_worker(self, mc, fakeroot = False, rqexec = None): |
1241 | logger.debug("Starting bitbake-worker") | 1349 | logger.debug("Starting bitbake-worker") |
1242 | magic = "decafbad" | 1350 | magic = "decafbad" |
1243 | if self.cooker.configuration.profile: | 1351 | if self.cooker.configuration.profile: |
1244 | magic = "decafbadbad" | 1352 | magic = "decafbadbad" |
1353 | fakerootlogs = None | ||
1354 | |||
1355 | workerscript = os.path.realpath(os.path.dirname(__file__) + "/../../bin/bitbake-worker") | ||
1245 | if fakeroot: | 1356 | if fakeroot: |
1246 | magic = magic + "beef" | 1357 | magic = magic + "beef" |
1247 | mcdata = self.cooker.databuilder.mcdata[mc] | 1358 | mcdata = self.cooker.databuilder.mcdata[mc] |
1248 | fakerootcmd = shlex.split(mcdata.getVar("FAKEROOTCMD")) | 1359 | fakerootcmd = shlex.split(mcdata.getVar("FAKEROOTCMD")) |
1249 | fakerootenv = (mcdata.getVar("FAKEROOTBASEENV") or "").split() | 1360 | fakerootenv = (mcdata.getVar("FAKEROOTBASEENV") or "").split() |
1250 | env = os.environ.copy() | 1361 | env = os.environ.copy() |
1251 | for key, value in (var.split('=') for var in fakerootenv): | 1362 | for key, value in (var.split('=',1) for var in fakerootenv): |
1252 | env[key] = value | 1363 | env[key] = value |
1253 | worker = subprocess.Popen(fakerootcmd + ["bitbake-worker", magic], stdout=subprocess.PIPE, stdin=subprocess.PIPE, env=env) | 1364 | worker = subprocess.Popen(fakerootcmd + [sys.executable, workerscript, magic], stdout=subprocess.PIPE, stdin=subprocess.PIPE, env=env) |
1365 | fakerootlogs = self.rqdata.dataCaches[mc].fakerootlogs | ||
1254 | else: | 1366 | else: |
1255 | worker = subprocess.Popen(["bitbake-worker", magic], stdout=subprocess.PIPE, stdin=subprocess.PIPE) | 1367 | worker = subprocess.Popen([sys.executable, workerscript, magic], stdout=subprocess.PIPE, stdin=subprocess.PIPE) |
1256 | bb.utils.nonblockingfd(worker.stdout) | 1368 | bb.utils.nonblockingfd(worker.stdout) |
1257 | workerpipe = runQueuePipe(worker.stdout, None, self.cfgData, self, rqexec) | 1369 | workerpipe = runQueuePipe(worker.stdout, None, self.cfgData, self, rqexec, fakerootlogs=fakerootlogs) |
1258 | 1370 | ||
1259 | workerdata = { | 1371 | workerdata = { |
1260 | "taskdeps" : self.rqdata.dataCaches[mc].task_deps, | ||
1261 | "fakerootenv" : self.rqdata.dataCaches[mc].fakerootenv, | ||
1262 | "fakerootdirs" : self.rqdata.dataCaches[mc].fakerootdirs, | ||
1263 | "fakerootnoenv" : self.rqdata.dataCaches[mc].fakerootnoenv, | ||
1264 | "sigdata" : bb.parse.siggen.get_taskdata(), | 1372 | "sigdata" : bb.parse.siggen.get_taskdata(), |
1265 | "logdefaultlevel" : bb.msg.loggerDefaultLogLevel, | 1373 | "logdefaultlevel" : bb.msg.loggerDefaultLogLevel, |
1266 | "build_verbose_shell" : self.cooker.configuration.build_verbose_shell, | 1374 | "build_verbose_shell" : self.cooker.configuration.build_verbose_shell, |
@@ -1274,9 +1382,9 @@ class RunQueue: | |||
1274 | "umask" : self.cfgData.getVar("BB_DEFAULT_UMASK"), | 1382 | "umask" : self.cfgData.getVar("BB_DEFAULT_UMASK"), |
1275 | } | 1383 | } |
1276 | 1384 | ||
1277 | worker.stdin.write(b"<cookerconfig>" + pickle.dumps(self.cooker.configuration) + b"</cookerconfig>") | 1385 | RunQueue.send_pickled_data(worker, self.cooker.configuration, "cookerconfig") |
1278 | worker.stdin.write(b"<extraconfigdata>" + pickle.dumps(self.cooker.extraconfigdata) + b"</extraconfigdata>") | 1386 | RunQueue.send_pickled_data(worker, self.cooker.extraconfigdata, "extraconfigdata") |
1279 | worker.stdin.write(b"<workerdata>" + pickle.dumps(workerdata) + b"</workerdata>") | 1387 | RunQueue.send_pickled_data(worker, workerdata, "workerdata") |
1280 | worker.stdin.flush() | 1388 | worker.stdin.flush() |
1281 | 1389 | ||
1282 | return RunQueueWorker(worker, workerpipe) | 1390 | return RunQueueWorker(worker, workerpipe) |
@@ -1286,7 +1394,7 @@ class RunQueue: | |||
1286 | return | 1394 | return |
1287 | logger.debug("Teardown for bitbake-worker") | 1395 | logger.debug("Teardown for bitbake-worker") |
1288 | try: | 1396 | try: |
1289 | worker.process.stdin.write(b"<quit></quit>") | 1397 | RunQueue.send_pickled_data(worker.process, b"", "quit") |
1290 | worker.process.stdin.flush() | 1398 | worker.process.stdin.flush() |
1291 | worker.process.stdin.close() | 1399 | worker.process.stdin.close() |
1292 | except IOError: | 1400 | except IOError: |
@@ -1298,12 +1406,12 @@ class RunQueue: | |||
1298 | continue | 1406 | continue |
1299 | worker.pipe.close() | 1407 | worker.pipe.close() |
1300 | 1408 | ||
1301 | def start_worker(self): | 1409 | def start_worker(self, rqexec): |
1302 | if self.worker: | 1410 | if self.worker: |
1303 | self.teardown_workers() | 1411 | self.teardown_workers() |
1304 | self.teardown = False | 1412 | self.teardown = False |
1305 | for mc in self.rqdata.dataCaches: | 1413 | for mc in self.rqdata.dataCaches: |
1306 | self.worker[mc] = self._start_worker(mc) | 1414 | self.worker[mc] = self._start_worker(mc, False, rqexec) |
1307 | 1415 | ||
1308 | def start_fakeworker(self, rqexec, mc): | 1416 | def start_fakeworker(self, rqexec, mc): |
1309 | if not mc in self.fakeworker: | 1417 | if not mc in self.fakeworker: |
@@ -1345,15 +1453,7 @@ class RunQueue: | |||
1345 | if taskname is None: | 1453 | if taskname is None: |
1346 | taskname = tn | 1454 | taskname = tn |
1347 | 1455 | ||
1348 | if self.stamppolicy == "perfile": | 1456 | stampfile = bb.parse.siggen.stampfile_mcfn(taskname, taskfn) |
1349 | fulldeptree = False | ||
1350 | else: | ||
1351 | fulldeptree = True | ||
1352 | stampwhitelist = [] | ||
1353 | if self.stamppolicy == "whitelist": | ||
1354 | stampwhitelist = self.rqdata.stampfnwhitelist[mc] | ||
1355 | |||
1356 | stampfile = bb.build.stampfile(taskname, self.rqdata.dataCaches[mc], taskfn) | ||
1357 | 1457 | ||
1358 | # If the stamp is missing, it's not current | 1458 | # If the stamp is missing, it's not current |
1359 | if not os.access(stampfile, os.F_OK): | 1459 | if not os.access(stampfile, os.F_OK): |
@@ -1365,7 +1465,7 @@ class RunQueue: | |||
1365 | logger.debug2("%s.%s is nostamp\n", fn, taskname) | 1465 | logger.debug2("%s.%s is nostamp\n", fn, taskname) |
1366 | return False | 1466 | return False |
1367 | 1467 | ||
1368 | if taskname != "do_setscene" and taskname.endswith("_setscene"): | 1468 | if taskname.endswith("_setscene"): |
1369 | return True | 1469 | return True |
1370 | 1470 | ||
1371 | if cache is None: | 1471 | if cache is None: |
@@ -1376,15 +1476,15 @@ class RunQueue: | |||
1376 | for dep in self.rqdata.runtaskentries[tid].depends: | 1476 | for dep in self.rqdata.runtaskentries[tid].depends: |
1377 | if iscurrent: | 1477 | if iscurrent: |
1378 | (mc2, fn2, taskname2, taskfn2) = split_tid_mcfn(dep) | 1478 | (mc2, fn2, taskname2, taskfn2) = split_tid_mcfn(dep) |
1379 | stampfile2 = bb.build.stampfile(taskname2, self.rqdata.dataCaches[mc2], taskfn2) | 1479 | stampfile2 = bb.parse.siggen.stampfile_mcfn(taskname2, taskfn2) |
1380 | stampfile3 = bb.build.stampfile(taskname2 + "_setscene", self.rqdata.dataCaches[mc2], taskfn2) | 1480 | stampfile3 = bb.parse.siggen.stampfile_mcfn(taskname2 + "_setscene", taskfn2) |
1381 | t2 = get_timestamp(stampfile2) | 1481 | t2 = get_timestamp(stampfile2) |
1382 | t3 = get_timestamp(stampfile3) | 1482 | t3 = get_timestamp(stampfile3) |
1383 | if t3 and not t2: | 1483 | if t3 and not t2: |
1384 | continue | 1484 | continue |
1385 | if t3 and t3 > t2: | 1485 | if t3 and t3 > t2: |
1386 | continue | 1486 | continue |
1387 | if fn == fn2 or (fulldeptree and fn2 not in stampwhitelist): | 1487 | if fn == fn2: |
1388 | if not t2: | 1488 | if not t2: |
1389 | logger.debug2('Stampfile %s does not exist', stampfile2) | 1489 | logger.debug2('Stampfile %s does not exist', stampfile2) |
1390 | iscurrent = False | 1490 | iscurrent = False |
@@ -1434,10 +1534,11 @@ class RunQueue: | |||
1434 | """ | 1534 | """ |
1435 | Run the tasks in a queue prepared by rqdata.prepare() | 1535 | Run the tasks in a queue prepared by rqdata.prepare() |
1436 | Upon failure, optionally try to recover the build using any alternate providers | 1536 | Upon failure, optionally try to recover the build using any alternate providers |
1437 | (if the abort on failure configuration option isn't set) | 1537 | (if the halt on failure configuration option isn't set) |
1438 | """ | 1538 | """ |
1439 | 1539 | ||
1440 | retval = True | 1540 | retval = True |
1541 | bb.event.check_for_interrupts(self.cooker.data) | ||
1441 | 1542 | ||
1442 | if self.state is runQueuePrepare: | 1543 | if self.state is runQueuePrepare: |
1443 | # NOTE: if you add, remove or significantly refactor the stages of this | 1544 | # NOTE: if you add, remove or significantly refactor the stages of this |
@@ -1466,10 +1567,13 @@ class RunQueue: | |||
1466 | 1567 | ||
1467 | if not self.dm_event_handler_registered: | 1568 | if not self.dm_event_handler_registered: |
1468 | res = bb.event.register(self.dm_event_handler_name, | 1569 | res = bb.event.register(self.dm_event_handler_name, |
1469 | lambda x: self.dm.check(self) if self.state in [runQueueRunning, runQueueCleanUp] else False, | 1570 | lambda x, y: self.dm.check(self) if self.state in [runQueueRunning, runQueueCleanUp] else False, |
1470 | ('bb.event.HeartbeatEvent',), data=self.cfgData) | 1571 | ('bb.event.HeartbeatEvent',), data=self.cfgData) |
1471 | self.dm_event_handler_registered = True | 1572 | self.dm_event_handler_registered = True |
1472 | 1573 | ||
1574 | self.rqdata.init_progress_reporter.next_stage() | ||
1575 | self.rqexe = RunQueueExecute(self) | ||
1576 | |||
1473 | dump = self.cooker.configuration.dump_signatures | 1577 | dump = self.cooker.configuration.dump_signatures |
1474 | if dump: | 1578 | if dump: |
1475 | self.rqdata.init_progress_reporter.finish() | 1579 | self.rqdata.init_progress_reporter.finish() |
@@ -1481,16 +1585,14 @@ class RunQueue: | |||
1481 | self.state = runQueueComplete | 1585 | self.state = runQueueComplete |
1482 | 1586 | ||
1483 | if self.state is runQueueSceneInit: | 1587 | if self.state is runQueueSceneInit: |
1484 | self.rqdata.init_progress_reporter.next_stage() | 1588 | self.start_worker(self.rqexe) |
1485 | self.start_worker() | 1589 | self.rqdata.init_progress_reporter.finish() |
1486 | self.rqdata.init_progress_reporter.next_stage() | ||
1487 | self.rqexe = RunQueueExecute(self) | ||
1488 | 1590 | ||
1489 | # If we don't have any setscene functions, skip execution | 1591 | # If we don't have any setscene functions, skip execution |
1490 | if len(self.rqdata.runq_setscene_tids) == 0: | 1592 | if not self.rqdata.runq_setscene_tids: |
1491 | logger.info('No setscene tasks') | 1593 | logger.info('No setscene tasks') |
1492 | for tid in self.rqdata.runtaskentries: | 1594 | for tid in self.rqdata.runtaskentries: |
1493 | if len(self.rqdata.runtaskentries[tid].depends) == 0: | 1595 | if not self.rqdata.runtaskentries[tid].depends: |
1494 | self.rqexe.setbuildable(tid) | 1596 | self.rqexe.setbuildable(tid) |
1495 | self.rqexe.tasks_notcovered.add(tid) | 1597 | self.rqexe.tasks_notcovered.add(tid) |
1496 | self.rqexe.sqdone = True | 1598 | self.rqexe.sqdone = True |
@@ -1563,29 +1665,28 @@ class RunQueue: | |||
1563 | else: | 1665 | else: |
1564 | self.rqexe.finish() | 1666 | self.rqexe.finish() |
1565 | 1667 | ||
1566 | def rq_dump_sigfn(self, fn, options): | 1668 | def _rq_dump_sigtid(self, tids): |
1567 | bb_cache = bb.cache.NoCache(self.cooker.databuilder) | 1669 | for tid in tids: |
1568 | mc = bb.runqueue.mc_from_tid(fn) | 1670 | (mc, fn, taskname, taskfn) = split_tid_mcfn(tid) |
1569 | the_data = bb_cache.loadDataFull(fn, self.cooker.collections[mc].get_file_appends(fn)) | 1671 | dataCaches = self.rqdata.dataCaches |
1570 | siggen = bb.parse.siggen | 1672 | bb.parse.siggen.dump_sigtask(taskfn, taskname, dataCaches[mc].stamp[taskfn], True) |
1571 | dataCaches = self.rqdata.dataCaches | ||
1572 | siggen.dump_sigfn(fn, dataCaches, options) | ||
1573 | 1673 | ||
1574 | def dump_signatures(self, options): | 1674 | def dump_signatures(self, options): |
1575 | fns = set() | 1675 | if bb.cooker.CookerFeatures.RECIPE_SIGGEN_INFO not in self.cooker.featureset: |
1576 | bb.note("Reparsing files to collect dependency data") | 1676 | bb.fatal("The dump signatures functionality needs the RECIPE_SIGGEN_INFO feature enabled") |
1577 | 1677 | ||
1578 | for tid in self.rqdata.runtaskentries: | 1678 | bb.note("Writing task signature files") |
1579 | fn = fn_from_tid(tid) | ||
1580 | fns.add(fn) | ||
1581 | 1679 | ||
1582 | max_process = int(self.cfgData.getVar("BB_NUMBER_PARSE_THREADS") or os.cpu_count() or 1) | 1680 | max_process = int(self.cfgData.getVar("BB_NUMBER_PARSE_THREADS") or os.cpu_count() or 1) |
1681 | def chunkify(l, n): | ||
1682 | return [l[i::n] for i in range(n)] | ||
1683 | tids = chunkify(list(self.rqdata.runtaskentries), max_process) | ||
1583 | # We cannot use the real multiprocessing.Pool easily due to some local data | 1684 | # We cannot use the real multiprocessing.Pool easily due to some local data |
1584 | # that can't be pickled. This is a cheap multi-process solution. | 1685 | # that can't be pickled. This is a cheap multi-process solution. |
1585 | launched = [] | 1686 | launched = [] |
1586 | while fns: | 1687 | while tids: |
1587 | if len(launched) < max_process: | 1688 | if len(launched) < max_process: |
1588 | p = Process(target=self.rq_dump_sigfn, args=(fns.pop(), options)) | 1689 | p = Process(target=self._rq_dump_sigtid, args=(tids.pop(), )) |
1589 | p.start() | 1690 | p.start() |
1590 | launched.append(p) | 1691 | launched.append(p) |
1591 | for q in launched: | 1692 | for q in launched: |
@@ -1600,6 +1701,17 @@ class RunQueue: | |||
1600 | return | 1701 | return |
1601 | 1702 | ||
1602 | def print_diffscenetasks(self): | 1703 | def print_diffscenetasks(self): |
1704 | def get_root_invalid_tasks(task, taskdepends, valid, noexec, visited_invalid): | ||
1705 | invalidtasks = [] | ||
1706 | for t in taskdepends[task].depends: | ||
1707 | if t not in valid and t not in visited_invalid: | ||
1708 | invalidtasks.extend(get_root_invalid_tasks(t, taskdepends, valid, noexec, visited_invalid)) | ||
1709 | visited_invalid.add(t) | ||
1710 | |||
1711 | direct_invalid = [t for t in taskdepends[task].depends if t not in valid] | ||
1712 | if not direct_invalid and task not in noexec: | ||
1713 | invalidtasks = [task] | ||
1714 | return invalidtasks | ||
1603 | 1715 | ||
1604 | noexec = [] | 1716 | noexec = [] |
1605 | tocheck = set() | 1717 | tocheck = set() |
@@ -1633,46 +1745,49 @@ class RunQueue: | |||
1633 | valid_new.add(dep) | 1745 | valid_new.add(dep) |
1634 | 1746 | ||
1635 | invalidtasks = set() | 1747 | invalidtasks = set() |
1636 | for tid in self.rqdata.runtaskentries: | ||
1637 | if tid not in valid_new and tid not in noexec: | ||
1638 | invalidtasks.add(tid) | ||
1639 | 1748 | ||
1640 | found = set() | 1749 | toptasks = set(["{}:{}".format(t[3], t[2]) for t in self.rqdata.targets]) |
1641 | processed = set() | 1750 | for tid in toptasks: |
1642 | for tid in invalidtasks: | ||
1643 | toprocess = set([tid]) | 1751 | toprocess = set([tid]) |
1644 | while toprocess: | 1752 | while toprocess: |
1645 | next = set() | 1753 | next = set() |
1754 | visited_invalid = set() | ||
1646 | for t in toprocess: | 1755 | for t in toprocess: |
1647 | for dep in self.rqdata.runtaskentries[t].depends: | 1756 | if t not in valid_new and t not in noexec: |
1648 | if dep in invalidtasks: | 1757 | invalidtasks.update(get_root_invalid_tasks(t, self.rqdata.runtaskentries, valid_new, noexec, visited_invalid)) |
1649 | found.add(tid) | 1758 | continue |
1650 | if dep not in processed: | 1759 | if t in self.rqdata.runq_setscene_tids: |
1651 | processed.add(dep) | 1760 | for dep in self.rqexe.sqdata.sq_deps[t]: |
1652 | next.add(dep) | 1761 | next.add(dep) |
1762 | continue | ||
1763 | |||
1764 | for dep in self.rqdata.runtaskentries[t].depends: | ||
1765 | next.add(dep) | ||
1766 | |||
1653 | toprocess = next | 1767 | toprocess = next |
1654 | if tid in found: | ||
1655 | toprocess = set() | ||
1656 | 1768 | ||
1657 | tasklist = [] | 1769 | tasklist = [] |
1658 | for tid in invalidtasks.difference(found): | 1770 | for tid in invalidtasks: |
1659 | tasklist.append(tid) | 1771 | tasklist.append(tid) |
1660 | 1772 | ||
1661 | if tasklist: | 1773 | if tasklist: |
1662 | bb.plain("The differences between the current build and any cached tasks start at the following tasks:\n" + "\n".join(tasklist)) | 1774 | bb.plain("The differences between the current build and any cached tasks start at the following tasks:\n" + "\n".join(tasklist)) |
1663 | 1775 | ||
1664 | return invalidtasks.difference(found) | 1776 | return invalidtasks |
1665 | 1777 | ||
1666 | def write_diffscenetasks(self, invalidtasks): | 1778 | def write_diffscenetasks(self, invalidtasks): |
1779 | bb.siggen.check_siggen_version(bb.siggen) | ||
1667 | 1780 | ||
1668 | # Define recursion callback | 1781 | # Define recursion callback |
1669 | def recursecb(key, hash1, hash2): | 1782 | def recursecb(key, hash1, hash2): |
1670 | hashes = [hash1, hash2] | 1783 | hashes = [hash1, hash2] |
1784 | bb.debug(1, "Recursively looking for recipe {} hashes {}".format(key, hashes)) | ||
1671 | hashfiles = bb.siggen.find_siginfo(key, None, hashes, self.cfgData) | 1785 | hashfiles = bb.siggen.find_siginfo(key, None, hashes, self.cfgData) |
1786 | bb.debug(1, "Found hashfiles:\n{}".format(hashfiles)) | ||
1672 | 1787 | ||
1673 | recout = [] | 1788 | recout = [] |
1674 | if len(hashfiles) == 2: | 1789 | if len(hashfiles) == 2: |
1675 | out2 = bb.siggen.compare_sigfiles(hashfiles[hash1], hashfiles[hash2], recursecb) | 1790 | out2 = bb.siggen.compare_sigfiles(hashfiles[hash1]['path'], hashfiles[hash2]['path'], recursecb) |
1676 | recout.extend(list(' ' + l for l in out2)) | 1791 | recout.extend(list(' ' + l for l in out2)) |
1677 | else: | 1792 | else: |
1678 | recout.append("Unable to find matching sigdata for %s with hashes %s or %s" % (key, hash1, hash2)) | 1793 | recout.append("Unable to find matching sigdata for %s with hashes %s or %s" % (key, hash1, hash2)) |
@@ -1683,20 +1798,25 @@ class RunQueue: | |||
1683 | for tid in invalidtasks: | 1798 | for tid in invalidtasks: |
1684 | (mc, fn, taskname, taskfn) = split_tid_mcfn(tid) | 1799 | (mc, fn, taskname, taskfn) = split_tid_mcfn(tid) |
1685 | pn = self.rqdata.dataCaches[mc].pkg_fn[taskfn] | 1800 | pn = self.rqdata.dataCaches[mc].pkg_fn[taskfn] |
1686 | h = self.rqdata.runtaskentries[tid].hash | 1801 | h = self.rqdata.runtaskentries[tid].unihash |
1687 | matches = bb.siggen.find_siginfo(pn, taskname, [], self.cfgData) | 1802 | bb.debug(1, "Looking for recipe {} task {}".format(pn, taskname)) |
1803 | matches = bb.siggen.find_siginfo(pn, taskname, [], self.cooker.databuilder.mcdata[mc]) | ||
1804 | bb.debug(1, "Found hashfiles:\n{}".format(matches)) | ||
1688 | match = None | 1805 | match = None |
1689 | for m in matches: | 1806 | for m in matches.values(): |
1690 | if h in m: | 1807 | if h in m['path']: |
1691 | match = m | 1808 | match = m['path'] |
1692 | if match is None: | 1809 | if match is None: |
1693 | bb.fatal("Can't find a task we're supposed to have written out? (hash: %s)?" % h) | 1810 | bb.fatal("Can't find a task we're supposed to have written out? (hash: %s tid: %s)?" % (h, tid)) |
1694 | matches = {k : v for k, v in iter(matches.items()) if h not in k} | 1811 | matches = {k : v for k, v in iter(matches.items()) if h not in k} |
1812 | matches_local = {k : v for k, v in iter(matches.items()) if h not in k and not v['sstate']} | ||
1813 | if matches_local: | ||
1814 | matches = matches_local | ||
1695 | if matches: | 1815 | if matches: |
1696 | latestmatch = sorted(matches.keys(), key=lambda f: matches[f])[-1] | 1816 | latestmatch = matches[sorted(matches.keys(), key=lambda h: matches[h]['time'])[-1]]['path'] |
1697 | prevh = __find_sha256__.search(latestmatch).group(0) | 1817 | prevh = __find_sha256__.search(latestmatch).group(0) |
1698 | output = bb.siggen.compare_sigfiles(latestmatch, match, recursecb) | 1818 | output = bb.siggen.compare_sigfiles(latestmatch, match, recursecb) |
1699 | bb.plain("\nTask %s:%s couldn't be used from the cache because:\n We need hash %s, closest matching task was %s\n " % (pn, taskname, h, prevh) + '\n '.join(output)) | 1819 | bb.plain("\nTask %s:%s couldn't be used from the cache because:\n We need hash %s, most recent matching task was %s\n " % (pn, taskname, h, prevh) + '\n '.join(output)) |
1700 | 1820 | ||
1701 | 1821 | ||
1702 | class RunQueueExecute: | 1822 | class RunQueueExecute: |
@@ -1709,6 +1829,10 @@ class RunQueueExecute: | |||
1709 | 1829 | ||
1710 | self.number_tasks = int(self.cfgData.getVar("BB_NUMBER_THREADS") or 1) | 1830 | self.number_tasks = int(self.cfgData.getVar("BB_NUMBER_THREADS") or 1) |
1711 | self.scheduler = self.cfgData.getVar("BB_SCHEDULER") or "speed" | 1831 | self.scheduler = self.cfgData.getVar("BB_SCHEDULER") or "speed" |
1832 | self.max_cpu_pressure = self.cfgData.getVar("BB_PRESSURE_MAX_CPU") | ||
1833 | self.max_io_pressure = self.cfgData.getVar("BB_PRESSURE_MAX_IO") | ||
1834 | self.max_memory_pressure = self.cfgData.getVar("BB_PRESSURE_MAX_MEMORY") | ||
1835 | self.max_loadfactor = self.cfgData.getVar("BB_LOADFACTOR_MAX") | ||
1712 | 1836 | ||
1713 | self.sq_buildable = set() | 1837 | self.sq_buildable = set() |
1714 | self.sq_running = set() | 1838 | self.sq_running = set() |
@@ -1726,6 +1850,8 @@ class RunQueueExecute: | |||
1726 | self.build_stamps2 = [] | 1850 | self.build_stamps2 = [] |
1727 | self.failed_tids = [] | 1851 | self.failed_tids = [] |
1728 | self.sq_deferred = {} | 1852 | self.sq_deferred = {} |
1853 | self.sq_needed_harddeps = set() | ||
1854 | self.sq_harddep_deferred = set() | ||
1729 | 1855 | ||
1730 | self.stampcache = {} | 1856 | self.stampcache = {} |
1731 | 1857 | ||
@@ -1733,17 +1859,39 @@ class RunQueueExecute: | |||
1733 | self.holdoff_need_update = True | 1859 | self.holdoff_need_update = True |
1734 | self.sqdone = False | 1860 | self.sqdone = False |
1735 | 1861 | ||
1736 | self.stats = RunQueueStats(len(self.rqdata.runtaskentries)) | 1862 | self.stats = RunQueueStats(len(self.rqdata.runtaskentries), len(self.rqdata.runq_setscene_tids)) |
1737 | self.sq_stats = RunQueueStats(len(self.rqdata.runq_setscene_tids)) | ||
1738 | |||
1739 | for mc in rq.worker: | ||
1740 | rq.worker[mc].pipe.setrunqueueexec(self) | ||
1741 | for mc in rq.fakeworker: | ||
1742 | rq.fakeworker[mc].pipe.setrunqueueexec(self) | ||
1743 | 1863 | ||
1744 | if self.number_tasks <= 0: | 1864 | if self.number_tasks <= 0: |
1745 | bb.fatal("Invalid BB_NUMBER_THREADS %s" % self.number_tasks) | 1865 | bb.fatal("Invalid BB_NUMBER_THREADS %s" % self.number_tasks) |
1746 | 1866 | ||
1867 | lower_limit = 1.0 | ||
1868 | upper_limit = 1000000.0 | ||
1869 | if self.max_cpu_pressure: | ||
1870 | self.max_cpu_pressure = float(self.max_cpu_pressure) | ||
1871 | if self.max_cpu_pressure < lower_limit: | ||
1872 | bb.fatal("Invalid BB_PRESSURE_MAX_CPU %s, minimum value is %s." % (self.max_cpu_pressure, lower_limit)) | ||
1873 | if self.max_cpu_pressure > upper_limit: | ||
1874 | bb.warn("Your build will be largely unregulated since BB_PRESSURE_MAX_CPU is set to %s. It is very unlikely that such high pressure will be experienced." % (self.max_cpu_pressure)) | ||
1875 | |||
1876 | if self.max_io_pressure: | ||
1877 | self.max_io_pressure = float(self.max_io_pressure) | ||
1878 | if self.max_io_pressure < lower_limit: | ||
1879 | bb.fatal("Invalid BB_PRESSURE_MAX_IO %s, minimum value is %s." % (self.max_io_pressure, lower_limit)) | ||
1880 | if self.max_io_pressure > upper_limit: | ||
1881 | bb.warn("Your build will be largely unregulated since BB_PRESSURE_MAX_IO is set to %s. It is very unlikely that such high pressure will be experienced." % (self.max_io_pressure)) | ||
1882 | |||
1883 | if self.max_memory_pressure: | ||
1884 | self.max_memory_pressure = float(self.max_memory_pressure) | ||
1885 | if self.max_memory_pressure < lower_limit: | ||
1886 | bb.fatal("Invalid BB_PRESSURE_MAX_MEMORY %s, minimum value is %s." % (self.max_memory_pressure, lower_limit)) | ||
1887 | if self.max_memory_pressure > upper_limit: | ||
1888 | bb.warn("Your build will be largely unregulated since BB_PRESSURE_MAX_MEMORY is set to %s. It is very unlikely that such high pressure will be experienced." % (self.max_io_pressure)) | ||
1889 | |||
1890 | if self.max_loadfactor: | ||
1891 | self.max_loadfactor = float(self.max_loadfactor) | ||
1892 | if self.max_loadfactor <= 0: | ||
1893 | bb.fatal("Invalid BB_LOADFACTOR_MAX %s, needs to be greater than zero." % (self.max_loadfactor)) | ||
1894 | |||
1747 | # List of setscene tasks which we've covered | 1895 | # List of setscene tasks which we've covered |
1748 | self.scenequeue_covered = set() | 1896 | self.scenequeue_covered = set() |
1749 | # List of tasks which are covered (including setscene ones) | 1897 | # List of tasks which are covered (including setscene ones) |
@@ -1753,11 +1901,6 @@ class RunQueueExecute: | |||
1753 | self.tasks_notcovered = set() | 1901 | self.tasks_notcovered = set() |
1754 | self.scenequeue_notneeded = set() | 1902 | self.scenequeue_notneeded = set() |
1755 | 1903 | ||
1756 | # We can't skip specified target tasks which aren't setscene tasks | ||
1757 | self.cantskip = set(self.rqdata.target_tids) | ||
1758 | self.cantskip.difference_update(self.rqdata.runq_setscene_tids) | ||
1759 | self.cantskip.intersection_update(self.rqdata.runtaskentries) | ||
1760 | |||
1761 | schedulers = self.get_schedulers() | 1904 | schedulers = self.get_schedulers() |
1762 | for scheduler in schedulers: | 1905 | for scheduler in schedulers: |
1763 | if self.scheduler == scheduler.name: | 1906 | if self.scheduler == scheduler.name: |
@@ -1768,11 +1911,29 @@ class RunQueueExecute: | |||
1768 | bb.fatal("Invalid scheduler '%s'. Available schedulers: %s" % | 1911 | bb.fatal("Invalid scheduler '%s'. Available schedulers: %s" % |
1769 | (self.scheduler, ", ".join(obj.name for obj in schedulers))) | 1912 | (self.scheduler, ", ".join(obj.name for obj in schedulers))) |
1770 | 1913 | ||
1771 | #if len(self.rqdata.runq_setscene_tids) > 0: | 1914 | #if self.rqdata.runq_setscene_tids: |
1772 | self.sqdata = SQData() | 1915 | self.sqdata = SQData() |
1773 | build_scenequeue_data(self.sqdata, self.rqdata, self.rq, self.cooker, self.stampcache, self) | 1916 | build_scenequeue_data(self.sqdata, self.rqdata, self) |
1917 | |||
1918 | update_scenequeue_data(self.sqdata.sq_revdeps, self.sqdata, self.rqdata, self.rq, self.cooker, self.stampcache, self, summary=True) | ||
1919 | |||
1920 | # Compute a list of 'stale' sstate tasks where the current hash does not match the one | ||
1921 | # in any stamp files. Pass the list out to metadata as an event. | ||
1922 | found = {} | ||
1923 | for tid in self.rqdata.runq_setscene_tids: | ||
1924 | (mc, fn, taskname, taskfn) = split_tid_mcfn(tid) | ||
1925 | stamps = bb.build.find_stale_stamps(taskname, taskfn) | ||
1926 | if stamps: | ||
1927 | if mc not in found: | ||
1928 | found[mc] = {} | ||
1929 | found[mc][tid] = stamps | ||
1930 | for mc in found: | ||
1931 | event = bb.event.StaleSetSceneTasks(found[mc]) | ||
1932 | bb.event.fire(event, self.cooker.databuilder.mcdata[mc]) | ||
1933 | |||
1934 | self.build_taskdepdata_cache() | ||
1774 | 1935 | ||
1775 | def runqueue_process_waitpid(self, task, status): | 1936 | def runqueue_process_waitpid(self, task, status, fakerootlog=None): |
1776 | 1937 | ||
1777 | # self.build_stamps[pid] may not exist when use shared work directory. | 1938 | # self.build_stamps[pid] may not exist when use shared work directory. |
1778 | if task in self.build_stamps: | 1939 | if task in self.build_stamps: |
@@ -1785,9 +1946,10 @@ class RunQueueExecute: | |||
1785 | else: | 1946 | else: |
1786 | self.sq_task_complete(task) | 1947 | self.sq_task_complete(task) |
1787 | self.sq_live.remove(task) | 1948 | self.sq_live.remove(task) |
1949 | self.stats.updateActiveSetscene(len(self.sq_live)) | ||
1788 | else: | 1950 | else: |
1789 | if status != 0: | 1951 | if status != 0: |
1790 | self.task_fail(task, status) | 1952 | self.task_fail(task, status, fakerootlog=fakerootlog) |
1791 | else: | 1953 | else: |
1792 | self.task_complete(task) | 1954 | self.task_complete(task) |
1793 | return True | 1955 | return True |
@@ -1795,20 +1957,20 @@ class RunQueueExecute: | |||
1795 | def finish_now(self): | 1957 | def finish_now(self): |
1796 | for mc in self.rq.worker: | 1958 | for mc in self.rq.worker: |
1797 | try: | 1959 | try: |
1798 | self.rq.worker[mc].process.stdin.write(b"<finishnow></finishnow>") | 1960 | RunQueue.send_pickled_data(self.rq.worker[mc].process, b"", "finishnow") |
1799 | self.rq.worker[mc].process.stdin.flush() | 1961 | self.rq.worker[mc].process.stdin.flush() |
1800 | except IOError: | 1962 | except IOError: |
1801 | # worker must have died? | 1963 | # worker must have died? |
1802 | pass | 1964 | pass |
1803 | for mc in self.rq.fakeworker: | 1965 | for mc in self.rq.fakeworker: |
1804 | try: | 1966 | try: |
1805 | self.rq.fakeworker[mc].process.stdin.write(b"<finishnow></finishnow>") | 1967 | RunQueue.send_pickled_data(self.rq.fakeworker[mc].process, b"", "finishnow") |
1806 | self.rq.fakeworker[mc].process.stdin.flush() | 1968 | self.rq.fakeworker[mc].process.stdin.flush() |
1807 | except IOError: | 1969 | except IOError: |
1808 | # worker must have died? | 1970 | # worker must have died? |
1809 | pass | 1971 | pass |
1810 | 1972 | ||
1811 | if len(self.failed_tids) != 0: | 1973 | if self.failed_tids: |
1812 | self.rq.state = runQueueFailed | 1974 | self.rq.state = runQueueFailed |
1813 | return | 1975 | return |
1814 | 1976 | ||
@@ -1818,13 +1980,13 @@ class RunQueueExecute: | |||
1818 | def finish(self): | 1980 | def finish(self): |
1819 | self.rq.state = runQueueCleanUp | 1981 | self.rq.state = runQueueCleanUp |
1820 | 1982 | ||
1821 | active = self.stats.active + self.sq_stats.active | 1983 | active = self.stats.active + len(self.sq_live) |
1822 | if active > 0: | 1984 | if active > 0: |
1823 | bb.event.fire(runQueueExitWait(active), self.cfgData) | 1985 | bb.event.fire(runQueueExitWait(active), self.cfgData) |
1824 | self.rq.read_workers() | 1986 | self.rq.read_workers() |
1825 | return self.rq.active_fds() | 1987 | return self.rq.active_fds() |
1826 | 1988 | ||
1827 | if len(self.failed_tids) != 0: | 1989 | if self.failed_tids: |
1828 | self.rq.state = runQueueFailed | 1990 | self.rq.state = runQueueFailed |
1829 | return True | 1991 | return True |
1830 | 1992 | ||
@@ -1851,7 +2013,7 @@ class RunQueueExecute: | |||
1851 | return valid | 2013 | return valid |
1852 | 2014 | ||
1853 | def can_start_task(self): | 2015 | def can_start_task(self): |
1854 | active = self.stats.active + self.sq_stats.active | 2016 | active = self.stats.active + len(self.sq_live) |
1855 | can_start = active < self.number_tasks | 2017 | can_start = active < self.number_tasks |
1856 | return can_start | 2018 | return can_start |
1857 | 2019 | ||
@@ -1871,8 +2033,7 @@ class RunQueueExecute: | |||
1871 | try: | 2033 | try: |
1872 | module = __import__(modname, fromlist=(name,)) | 2034 | module = __import__(modname, fromlist=(name,)) |
1873 | except ImportError as exc: | 2035 | except ImportError as exc: |
1874 | logger.critical("Unable to import scheduler '%s' from '%s': %s" % (name, modname, exc)) | 2036 | bb.fatal("Unable to import scheduler '%s' from '%s': %s" % (name, modname, exc)) |
1875 | raise SystemExit(1) | ||
1876 | else: | 2037 | else: |
1877 | schedulers.add(getattr(module, name)) | 2038 | schedulers.add(getattr(module, name)) |
1878 | return schedulers | 2039 | return schedulers |
@@ -1902,21 +2063,52 @@ class RunQueueExecute: | |||
1902 | self.setbuildable(revdep) | 2063 | self.setbuildable(revdep) |
1903 | logger.debug("Marking task %s as buildable", revdep) | 2064 | logger.debug("Marking task %s as buildable", revdep) |
1904 | 2065 | ||
2066 | found = None | ||
2067 | for t in sorted(self.sq_deferred.copy()): | ||
2068 | if self.sq_deferred[t] == task: | ||
2069 | # Allow the next deferred task to run. Any other deferred tasks should be deferred after that task. | ||
2070 | # We shouldn't allow all to run at once as it is prone to races. | ||
2071 | if not found: | ||
2072 | bb.debug(1, "Deferred task %s now buildable" % t) | ||
2073 | del self.sq_deferred[t] | ||
2074 | update_scenequeue_data([t], self.sqdata, self.rqdata, self.rq, self.cooker, self.stampcache, self, summary=False) | ||
2075 | found = t | ||
2076 | else: | ||
2077 | bb.debug(1, "Deferring %s after %s" % (t, found)) | ||
2078 | self.sq_deferred[t] = found | ||
2079 | |||
1905 | def task_complete(self, task): | 2080 | def task_complete(self, task): |
1906 | self.stats.taskCompleted() | 2081 | self.stats.taskCompleted() |
1907 | bb.event.fire(runQueueTaskCompleted(task, self.stats, self.rq), self.cfgData) | 2082 | bb.event.fire(runQueueTaskCompleted(task, self.stats, self.rq), self.cfgData) |
1908 | self.task_completeoutright(task) | 2083 | self.task_completeoutright(task) |
1909 | self.runq_tasksrun.add(task) | 2084 | self.runq_tasksrun.add(task) |
1910 | 2085 | ||
1911 | def task_fail(self, task, exitcode): | 2086 | def task_fail(self, task, exitcode, fakerootlog=None): |
1912 | """ | 2087 | """ |
1913 | Called when a task has failed | 2088 | Called when a task has failed |
1914 | Updates the state engine with the failure | 2089 | Updates the state engine with the failure |
1915 | """ | 2090 | """ |
1916 | self.stats.taskFailed() | 2091 | self.stats.taskFailed() |
1917 | self.failed_tids.append(task) | 2092 | self.failed_tids.append(task) |
1918 | bb.event.fire(runQueueTaskFailed(task, self.stats, exitcode, self.rq), self.cfgData) | 2093 | |
1919 | if self.rqdata.taskData[''].abort: | 2094 | fakeroot_log = [] |
2095 | if fakerootlog and os.path.exists(fakerootlog): | ||
2096 | with open(fakerootlog) as fakeroot_log_file: | ||
2097 | fakeroot_failed = False | ||
2098 | for line in reversed(fakeroot_log_file.readlines()): | ||
2099 | for fakeroot_error in ['mismatch', 'error', 'fatal']: | ||
2100 | if fakeroot_error in line.lower(): | ||
2101 | fakeroot_failed = True | ||
2102 | if 'doing new pid setup and server start' in line: | ||
2103 | break | ||
2104 | fakeroot_log.append(line) | ||
2105 | |||
2106 | if not fakeroot_failed: | ||
2107 | fakeroot_log = [] | ||
2108 | |||
2109 | bb.event.fire(runQueueTaskFailed(task, self.stats, exitcode, self.rq, fakeroot_log=("".join(fakeroot_log) or None)), self.cfgData) | ||
2110 | |||
2111 | if self.rqdata.taskData[''].halt: | ||
1920 | self.rq.state = runQueueCleanUp | 2112 | self.rq.state = runQueueCleanUp |
1921 | 2113 | ||
1922 | def task_skip(self, task, reason): | 2114 | def task_skip(self, task, reason): |
@@ -1931,7 +2123,7 @@ class RunQueueExecute: | |||
1931 | err = False | 2123 | err = False |
1932 | if not self.sqdone: | 2124 | if not self.sqdone: |
1933 | logger.debug('We could skip tasks %s', "\n".join(sorted(self.scenequeue_covered))) | 2125 | logger.debug('We could skip tasks %s', "\n".join(sorted(self.scenequeue_covered))) |
1934 | completeevent = sceneQueueComplete(self.sq_stats, self.rq) | 2126 | completeevent = sceneQueueComplete(self.stats, self.rq) |
1935 | bb.event.fire(completeevent, self.cfgData) | 2127 | bb.event.fire(completeevent, self.cfgData) |
1936 | if self.sq_deferred: | 2128 | if self.sq_deferred: |
1937 | logger.error("Scenequeue had deferred entries: %s" % pprint.pformat(self.sq_deferred)) | 2129 | logger.error("Scenequeue had deferred entries: %s" % pprint.pformat(self.sq_deferred)) |
@@ -1943,6 +2135,10 @@ class RunQueueExecute: | |||
1943 | logger.error("Scenequeue had holdoff tasks: %s" % pprint.pformat(self.holdoff_tasks)) | 2135 | logger.error("Scenequeue had holdoff tasks: %s" % pprint.pformat(self.holdoff_tasks)) |
1944 | err = True | 2136 | err = True |
1945 | 2137 | ||
2138 | for tid in self.scenequeue_covered.intersection(self.scenequeue_notcovered): | ||
2139 | # No task should end up in both covered and uncovered, that is a bug. | ||
2140 | logger.error("Setscene task %s in both covered and notcovered." % tid) | ||
2141 | |||
1946 | for tid in self.rqdata.runq_setscene_tids: | 2142 | for tid in self.rqdata.runq_setscene_tids: |
1947 | if tid not in self.scenequeue_covered and tid not in self.scenequeue_notcovered: | 2143 | if tid not in self.scenequeue_covered and tid not in self.scenequeue_notcovered: |
1948 | err = True | 2144 | err = True |
@@ -1961,7 +2157,7 @@ class RunQueueExecute: | |||
1961 | if x not in self.tasks_scenequeue_done: | 2157 | if x not in self.tasks_scenequeue_done: |
1962 | logger.error("Task %s was never processed by the setscene code" % x) | 2158 | logger.error("Task %s was never processed by the setscene code" % x) |
1963 | err = True | 2159 | err = True |
1964 | if len(self.rqdata.runtaskentries[x].depends) == 0 and x not in self.runq_buildable: | 2160 | if not self.rqdata.runtaskentries[x].depends and x not in self.runq_buildable: |
1965 | logger.error("Task %s was never marked as buildable by the setscene code" % x) | 2161 | logger.error("Task %s was never marked as buildable by the setscene code" % x) |
1966 | err = True | 2162 | err = True |
1967 | return err | 2163 | return err |
@@ -1984,8 +2180,11 @@ class RunQueueExecute: | |||
1984 | if not self.sqdone and self.can_start_task(): | 2180 | if not self.sqdone and self.can_start_task(): |
1985 | # Find the next setscene to run | 2181 | # Find the next setscene to run |
1986 | for nexttask in self.sorted_setscene_tids: | 2182 | for nexttask in self.sorted_setscene_tids: |
1987 | if nexttask in self.sq_buildable and nexttask not in self.sq_running and self.sqdata.stamps[nexttask] not in self.build_stamps.values(): | 2183 | if nexttask in self.sq_buildable and nexttask not in self.sq_running and self.sqdata.stamps[nexttask] not in self.build_stamps.values() and nexttask not in self.sq_harddep_deferred: |
1988 | if nexttask not in self.sqdata.unskippable and len(self.sqdata.sq_revdeps[nexttask]) > 0 and self.sqdata.sq_revdeps[nexttask].issubset(self.scenequeue_covered) and self.check_dependencies(nexttask, self.sqdata.sq_revdeps[nexttask]): | 2184 | if nexttask not in self.sqdata.unskippable and self.sqdata.sq_revdeps[nexttask] and \ |
2185 | nexttask not in self.sq_needed_harddeps and \ | ||
2186 | self.sqdata.sq_revdeps[nexttask].issubset(self.scenequeue_covered) and \ | ||
2187 | self.check_dependencies(nexttask, self.sqdata.sq_revdeps[nexttask]): | ||
1989 | if nexttask not in self.rqdata.target_tids: | 2188 | if nexttask not in self.rqdata.target_tids: |
1990 | logger.debug2("Skipping setscene for task %s" % nexttask) | 2189 | logger.debug2("Skipping setscene for task %s" % nexttask) |
1991 | self.sq_task_skip(nexttask) | 2190 | self.sq_task_skip(nexttask) |
@@ -1993,6 +2192,19 @@ class RunQueueExecute: | |||
1993 | if nexttask in self.sq_deferred: | 2192 | if nexttask in self.sq_deferred: |
1994 | del self.sq_deferred[nexttask] | 2193 | del self.sq_deferred[nexttask] |
1995 | return True | 2194 | return True |
2195 | if nexttask in self.sqdata.sq_harddeps_rev and not self.sqdata.sq_harddeps_rev[nexttask].issubset(self.scenequeue_covered | self.scenequeue_notcovered): | ||
2196 | logger.debug2("Deferring %s due to hard dependencies" % nexttask) | ||
2197 | updated = False | ||
2198 | for dep in self.sqdata.sq_harddeps_rev[nexttask]: | ||
2199 | if dep not in self.sq_needed_harddeps: | ||
2200 | logger.debug2("Enabling task %s as it is a hard dependency" % dep) | ||
2201 | self.sq_buildable.add(dep) | ||
2202 | self.sq_needed_harddeps.add(dep) | ||
2203 | updated = True | ||
2204 | self.sq_harddep_deferred.add(nexttask) | ||
2205 | if updated: | ||
2206 | return True | ||
2207 | continue | ||
1996 | # If covered tasks are running, need to wait for them to complete | 2208 | # If covered tasks are running, need to wait for them to complete |
1997 | for t in self.sqdata.sq_covered_tasks[nexttask]: | 2209 | for t in self.sqdata.sq_covered_tasks[nexttask]: |
1998 | if t in self.runq_running and t not in self.runq_complete: | 2210 | if t in self.runq_running and t not in self.runq_complete: |
@@ -2007,8 +2219,6 @@ class RunQueueExecute: | |||
2007 | logger.debug("%s didn't become valid, skipping setscene" % nexttask) | 2219 | logger.debug("%s didn't become valid, skipping setscene" % nexttask) |
2008 | self.sq_task_failoutright(nexttask) | 2220 | self.sq_task_failoutright(nexttask) |
2009 | return True | 2221 | return True |
2010 | else: | ||
2011 | self.sqdata.outrightfail.remove(nexttask) | ||
2012 | if nexttask in self.sqdata.outrightfail: | 2222 | if nexttask in self.sqdata.outrightfail: |
2013 | logger.debug2('No package found, so skipping setscene task %s', nexttask) | 2223 | logger.debug2('No package found, so skipping setscene task %s', nexttask) |
2014 | self.sq_task_failoutright(nexttask) | 2224 | self.sq_task_failoutright(nexttask) |
@@ -2040,28 +2250,42 @@ class RunQueueExecute: | |||
2040 | self.sq_task_failoutright(task) | 2250 | self.sq_task_failoutright(task) |
2041 | return True | 2251 | return True |
2042 | 2252 | ||
2043 | startevent = sceneQueueTaskStarted(task, self.sq_stats, self.rq) | 2253 | startevent = sceneQueueTaskStarted(task, self.stats, self.rq) |
2044 | bb.event.fire(startevent, self.cfgData) | 2254 | bb.event.fire(startevent, self.cfgData) |
2045 | 2255 | ||
2046 | taskdepdata = self.sq_build_taskdepdata(task) | ||
2047 | |||
2048 | taskdep = self.rqdata.dataCaches[mc].task_deps[taskfn] | 2256 | taskdep = self.rqdata.dataCaches[mc].task_deps[taskfn] |
2049 | taskhash = self.rqdata.get_task_hash(task) | 2257 | realfn = bb.cache.virtualfn2realfn(taskfn)[0] |
2050 | unihash = self.rqdata.get_task_unihash(task) | 2258 | runtask = { |
2259 | 'fn' : taskfn, | ||
2260 | 'task' : task, | ||
2261 | 'taskname' : taskname, | ||
2262 | 'taskhash' : self.rqdata.get_task_hash(task), | ||
2263 | 'unihash' : self.rqdata.get_task_unihash(task), | ||
2264 | 'quieterrors' : True, | ||
2265 | 'appends' : self.cooker.collections[mc].get_file_appends(taskfn), | ||
2266 | 'layername' : self.cooker.collections[mc].calc_bbfile_priority(realfn)[2], | ||
2267 | 'taskdepdata' : self.sq_build_taskdepdata(task), | ||
2268 | 'dry_run' : False, | ||
2269 | 'taskdep': taskdep, | ||
2270 | 'fakerootenv' : self.rqdata.dataCaches[mc].fakerootenv[taskfn], | ||
2271 | 'fakerootdirs' : self.rqdata.dataCaches[mc].fakerootdirs[taskfn], | ||
2272 | 'fakerootnoenv' : self.rqdata.dataCaches[mc].fakerootnoenv[taskfn] | ||
2273 | } | ||
2274 | |||
2051 | if 'fakeroot' in taskdep and taskname in taskdep['fakeroot'] and not self.cooker.configuration.dry_run: | 2275 | if 'fakeroot' in taskdep and taskname in taskdep['fakeroot'] and not self.cooker.configuration.dry_run: |
2052 | if not mc in self.rq.fakeworker: | 2276 | if not mc in self.rq.fakeworker: |
2053 | self.rq.start_fakeworker(self, mc) | 2277 | self.rq.start_fakeworker(self, mc) |
2054 | self.rq.fakeworker[mc].process.stdin.write(b"<runtask>" + pickle.dumps((taskfn, task, taskname, taskhash, unihash, True, self.cooker.collections[mc].get_file_appends(taskfn), taskdepdata, False)) + b"</runtask>") | 2278 | RunQueue.send_pickled_data(self.rq.fakeworker[mc].process, runtask, "runtask") |
2055 | self.rq.fakeworker[mc].process.stdin.flush() | 2279 | self.rq.fakeworker[mc].process.stdin.flush() |
2056 | else: | 2280 | else: |
2057 | self.rq.worker[mc].process.stdin.write(b"<runtask>" + pickle.dumps((taskfn, task, taskname, taskhash, unihash, True, self.cooker.collections[mc].get_file_appends(taskfn), taskdepdata, False)) + b"</runtask>") | 2281 | RunQueue.send_pickled_data(self.rq.worker[mc].process, runtask, "runtask") |
2058 | self.rq.worker[mc].process.stdin.flush() | 2282 | self.rq.worker[mc].process.stdin.flush() |
2059 | 2283 | ||
2060 | self.build_stamps[task] = bb.build.stampfile(taskname, self.rqdata.dataCaches[mc], taskfn, noextra=True) | 2284 | self.build_stamps[task] = bb.parse.siggen.stampfile_mcfn(taskname, taskfn, extrainfo=False) |
2061 | self.build_stamps2.append(self.build_stamps[task]) | 2285 | self.build_stamps2.append(self.build_stamps[task]) |
2062 | self.sq_running.add(task) | 2286 | self.sq_running.add(task) |
2063 | self.sq_live.add(task) | 2287 | self.sq_live.add(task) |
2064 | self.sq_stats.taskActive() | 2288 | self.stats.updateActiveSetscene(len(self.sq_live)) |
2065 | if self.can_start_task(): | 2289 | if self.can_start_task(): |
2066 | return True | 2290 | return True |
2067 | 2291 | ||
@@ -2092,9 +2316,9 @@ class RunQueueExecute: | |||
2092 | if task is not None: | 2316 | if task is not None: |
2093 | (mc, fn, taskname, taskfn) = split_tid_mcfn(task) | 2317 | (mc, fn, taskname, taskfn) = split_tid_mcfn(task) |
2094 | 2318 | ||
2095 | if self.rqdata.setscenewhitelist is not None: | 2319 | if self.rqdata.setscene_ignore_tasks is not None: |
2096 | if self.check_setscenewhitelist(task): | 2320 | if self.check_setscene_ignore_tasks(task): |
2097 | self.task_fail(task, "setscene whitelist") | 2321 | self.task_fail(task, "setscene ignore_tasks") |
2098 | return True | 2322 | return True |
2099 | 2323 | ||
2100 | if task in self.tasks_covered: | 2324 | if task in self.tasks_covered: |
@@ -2117,18 +2341,32 @@ class RunQueueExecute: | |||
2117 | self.runq_running.add(task) | 2341 | self.runq_running.add(task) |
2118 | self.stats.taskActive() | 2342 | self.stats.taskActive() |
2119 | if not (self.cooker.configuration.dry_run or self.rqdata.setscene_enforce): | 2343 | if not (self.cooker.configuration.dry_run or self.rqdata.setscene_enforce): |
2120 | bb.build.make_stamp(taskname, self.rqdata.dataCaches[mc], taskfn) | 2344 | bb.build.make_stamp_mcfn(taskname, taskfn) |
2121 | self.task_complete(task) | 2345 | self.task_complete(task) |
2122 | return True | 2346 | return True |
2123 | else: | 2347 | else: |
2124 | startevent = runQueueTaskStarted(task, self.stats, self.rq) | 2348 | startevent = runQueueTaskStarted(task, self.stats, self.rq) |
2125 | bb.event.fire(startevent, self.cfgData) | 2349 | bb.event.fire(startevent, self.cfgData) |
2126 | 2350 | ||
2127 | taskdepdata = self.build_taskdepdata(task) | ||
2128 | |||
2129 | taskdep = self.rqdata.dataCaches[mc].task_deps[taskfn] | 2351 | taskdep = self.rqdata.dataCaches[mc].task_deps[taskfn] |
2130 | taskhash = self.rqdata.get_task_hash(task) | 2352 | realfn = bb.cache.virtualfn2realfn(taskfn)[0] |
2131 | unihash = self.rqdata.get_task_unihash(task) | 2353 | runtask = { |
2354 | 'fn' : taskfn, | ||
2355 | 'task' : task, | ||
2356 | 'taskname' : taskname, | ||
2357 | 'taskhash' : self.rqdata.get_task_hash(task), | ||
2358 | 'unihash' : self.rqdata.get_task_unihash(task), | ||
2359 | 'quieterrors' : False, | ||
2360 | 'appends' : self.cooker.collections[mc].get_file_appends(taskfn), | ||
2361 | 'layername' : self.cooker.collections[mc].calc_bbfile_priority(realfn)[2], | ||
2362 | 'taskdepdata' : self.build_taskdepdata(task), | ||
2363 | 'dry_run' : self.rqdata.setscene_enforce, | ||
2364 | 'taskdep': taskdep, | ||
2365 | 'fakerootenv' : self.rqdata.dataCaches[mc].fakerootenv[taskfn], | ||
2366 | 'fakerootdirs' : self.rqdata.dataCaches[mc].fakerootdirs[taskfn], | ||
2367 | 'fakerootnoenv' : self.rqdata.dataCaches[mc].fakerootnoenv[taskfn] | ||
2368 | } | ||
2369 | |||
2132 | if 'fakeroot' in taskdep and taskname in taskdep['fakeroot'] and not (self.cooker.configuration.dry_run or self.rqdata.setscene_enforce): | 2370 | if 'fakeroot' in taskdep and taskname in taskdep['fakeroot'] and not (self.cooker.configuration.dry_run or self.rqdata.setscene_enforce): |
2133 | if not mc in self.rq.fakeworker: | 2371 | if not mc in self.rq.fakeworker: |
2134 | try: | 2372 | try: |
@@ -2138,31 +2376,31 @@ class RunQueueExecute: | |||
2138 | self.rq.state = runQueueFailed | 2376 | self.rq.state = runQueueFailed |
2139 | self.stats.taskFailed() | 2377 | self.stats.taskFailed() |
2140 | return True | 2378 | return True |
2141 | self.rq.fakeworker[mc].process.stdin.write(b"<runtask>" + pickle.dumps((taskfn, task, taskname, taskhash, unihash, False, self.cooker.collections[mc].get_file_appends(taskfn), taskdepdata, self.rqdata.setscene_enforce)) + b"</runtask>") | 2379 | RunQueue.send_pickled_data(self.rq.fakeworker[mc].process, runtask, "runtask") |
2142 | self.rq.fakeworker[mc].process.stdin.flush() | 2380 | self.rq.fakeworker[mc].process.stdin.flush() |
2143 | else: | 2381 | else: |
2144 | self.rq.worker[mc].process.stdin.write(b"<runtask>" + pickle.dumps((taskfn, task, taskname, taskhash, unihash, False, self.cooker.collections[mc].get_file_appends(taskfn), taskdepdata, self.rqdata.setscene_enforce)) + b"</runtask>") | 2382 | RunQueue.send_pickled_data(self.rq.worker[mc].process, runtask, "runtask") |
2145 | self.rq.worker[mc].process.stdin.flush() | 2383 | self.rq.worker[mc].process.stdin.flush() |
2146 | 2384 | ||
2147 | self.build_stamps[task] = bb.build.stampfile(taskname, self.rqdata.dataCaches[mc], taskfn, noextra=True) | 2385 | self.build_stamps[task] = bb.parse.siggen.stampfile_mcfn(taskname, taskfn, extrainfo=False) |
2148 | self.build_stamps2.append(self.build_stamps[task]) | 2386 | self.build_stamps2.append(self.build_stamps[task]) |
2149 | self.runq_running.add(task) | 2387 | self.runq_running.add(task) |
2150 | self.stats.taskActive() | 2388 | self.stats.taskActive() |
2151 | if self.can_start_task(): | 2389 | if self.can_start_task(): |
2152 | return True | 2390 | return True |
2153 | 2391 | ||
2154 | if self.stats.active > 0 or self.sq_stats.active > 0: | 2392 | if self.stats.active > 0 or self.sq_live: |
2155 | self.rq.read_workers() | 2393 | self.rq.read_workers() |
2156 | return self.rq.active_fds() | 2394 | return self.rq.active_fds() |
2157 | 2395 | ||
2158 | # No more tasks can be run. If we have deferred setscene tasks we should run them. | 2396 | # No more tasks can be run. If we have deferred setscene tasks we should run them. |
2159 | if self.sq_deferred: | 2397 | if self.sq_deferred: |
2160 | tid = self.sq_deferred.pop(list(self.sq_deferred.keys())[0]) | 2398 | deferred_tid = list(self.sq_deferred.keys())[0] |
2161 | logger.warning("Runqeueue deadlocked on deferred tasks, forcing task %s" % tid) | 2399 | blocking_tid = self.sq_deferred.pop(deferred_tid) |
2162 | self.sq_task_failoutright(tid) | 2400 | logger.warning("Runqueue deadlocked on deferred tasks, forcing task %s blocked by %s" % (deferred_tid, blocking_tid)) |
2163 | return True | 2401 | return True |
2164 | 2402 | ||
2165 | if len(self.failed_tids) != 0: | 2403 | if self.failed_tids: |
2166 | self.rq.state = runQueueFailed | 2404 | self.rq.state = runQueueFailed |
2167 | return True | 2405 | return True |
2168 | 2406 | ||
@@ -2195,6 +2433,22 @@ class RunQueueExecute: | |||
2195 | ret.add(dep) | 2433 | ret.add(dep) |
2196 | return ret | 2434 | return ret |
2197 | 2435 | ||
2436 | # Build the individual cache entries in advance once to save time | ||
2437 | def build_taskdepdata_cache(self): | ||
2438 | taskdepdata_cache = {} | ||
2439 | for task in self.rqdata.runtaskentries: | ||
2440 | (mc, fn, taskname, taskfn) = split_tid_mcfn(task) | ||
2441 | pn = self.rqdata.dataCaches[mc].pkg_fn[taskfn] | ||
2442 | deps = self.rqdata.runtaskentries[task].depends | ||
2443 | provides = self.rqdata.dataCaches[mc].fn_provides[taskfn] | ||
2444 | taskhash = self.rqdata.runtaskentries[task].hash | ||
2445 | unihash = self.rqdata.runtaskentries[task].unihash | ||
2446 | deps = self.filtermcdeps(task, mc, deps) | ||
2447 | hashfn = self.rqdata.dataCaches[mc].hashfn[taskfn] | ||
2448 | taskdepdata_cache[task] = [pn, taskname, fn, deps, provides, taskhash, unihash, hashfn] | ||
2449 | |||
2450 | self.taskdepdata_cache = taskdepdata_cache | ||
2451 | |||
2198 | # We filter out multiconfig dependencies from taskdepdata we pass to the tasks | 2452 | # We filter out multiconfig dependencies from taskdepdata we pass to the tasks |
2199 | # as most code can't handle them | 2453 | # as most code can't handle them |
2200 | def build_taskdepdata(self, task): | 2454 | def build_taskdepdata(self, task): |
@@ -2206,15 +2460,9 @@ class RunQueueExecute: | |||
2206 | while next: | 2460 | while next: |
2207 | additional = [] | 2461 | additional = [] |
2208 | for revdep in next: | 2462 | for revdep in next: |
2209 | (mc, fn, taskname, taskfn) = split_tid_mcfn(revdep) | 2463 | self.taskdepdata_cache[revdep][6] = self.rqdata.runtaskentries[revdep].unihash |
2210 | pn = self.rqdata.dataCaches[mc].pkg_fn[taskfn] | 2464 | taskdepdata[revdep] = self.taskdepdata_cache[revdep] |
2211 | deps = self.rqdata.runtaskentries[revdep].depends | 2465 | for revdep2 in self.taskdepdata_cache[revdep][3]: |
2212 | provides = self.rqdata.dataCaches[mc].fn_provides[taskfn] | ||
2213 | taskhash = self.rqdata.runtaskentries[revdep].hash | ||
2214 | unihash = self.rqdata.runtaskentries[revdep].unihash | ||
2215 | deps = self.filtermcdeps(task, mc, deps) | ||
2216 | taskdepdata[revdep] = [pn, taskname, fn, deps, provides, taskhash, unihash] | ||
2217 | for revdep2 in deps: | ||
2218 | if revdep2 not in taskdepdata: | 2466 | if revdep2 not in taskdepdata: |
2219 | additional.append(revdep2) | 2467 | additional.append(revdep2) |
2220 | next = additional | 2468 | next = additional |
@@ -2228,7 +2476,7 @@ class RunQueueExecute: | |||
2228 | return | 2476 | return |
2229 | 2477 | ||
2230 | notcovered = set(self.scenequeue_notcovered) | 2478 | notcovered = set(self.scenequeue_notcovered) |
2231 | notcovered |= self.cantskip | 2479 | notcovered |= self.sqdata.cantskip |
2232 | for tid in self.scenequeue_notcovered: | 2480 | for tid in self.scenequeue_notcovered: |
2233 | notcovered |= self.sqdata.sq_covered_tasks[tid] | 2481 | notcovered |= self.sqdata.sq_covered_tasks[tid] |
2234 | notcovered |= self.sqdata.unskippable.difference(self.rqdata.runq_setscene_tids) | 2482 | notcovered |= self.sqdata.unskippable.difference(self.rqdata.runq_setscene_tids) |
@@ -2241,7 +2489,7 @@ class RunQueueExecute: | |||
2241 | covered.intersection_update(self.tasks_scenequeue_done) | 2489 | covered.intersection_update(self.tasks_scenequeue_done) |
2242 | 2490 | ||
2243 | for tid in notcovered | covered: | 2491 | for tid in notcovered | covered: |
2244 | if len(self.rqdata.runtaskentries[tid].depends) == 0: | 2492 | if not self.rqdata.runtaskentries[tid].depends: |
2245 | self.setbuildable(tid) | 2493 | self.setbuildable(tid) |
2246 | elif self.rqdata.runtaskentries[tid].depends.issubset(self.runq_complete): | 2494 | elif self.rqdata.runtaskentries[tid].depends.issubset(self.runq_complete): |
2247 | self.setbuildable(tid) | 2495 | self.setbuildable(tid) |
@@ -2273,10 +2521,19 @@ class RunQueueExecute: | |||
2273 | self.updated_taskhash_queue.remove((tid, unihash)) | 2521 | self.updated_taskhash_queue.remove((tid, unihash)) |
2274 | 2522 | ||
2275 | if unihash != self.rqdata.runtaskentries[tid].unihash: | 2523 | if unihash != self.rqdata.runtaskentries[tid].unihash: |
2276 | hashequiv_logger.verbose("Task %s unihash changed to %s" % (tid, unihash)) | 2524 | # Make sure we rehash any other tasks with the same task hash that we're deferred against. |
2277 | self.rqdata.runtaskentries[tid].unihash = unihash | 2525 | torehash = [tid] |
2278 | bb.parse.siggen.set_unihash(tid, unihash) | 2526 | for deftid in self.sq_deferred: |
2279 | toprocess.add(tid) | 2527 | if self.sq_deferred[deftid] == tid: |
2528 | torehash.append(deftid) | ||
2529 | for hashtid in torehash: | ||
2530 | hashequiv_logger.verbose("Task %s unihash changed to %s" % (hashtid, unihash)) | ||
2531 | self.rqdata.runtaskentries[hashtid].unihash = unihash | ||
2532 | bb.parse.siggen.set_unihash(hashtid, unihash) | ||
2533 | toprocess.add(hashtid) | ||
2534 | if torehash: | ||
2535 | # Need to save after set_unihash above | ||
2536 | bb.parse.siggen.save_unitaskhashes() | ||
2280 | 2537 | ||
2281 | # Work out all tasks which depend upon these | 2538 | # Work out all tasks which depend upon these |
2282 | total = set() | 2539 | total = set() |
@@ -2294,7 +2551,7 @@ class RunQueueExecute: | |||
2294 | # Now iterate those tasks in dependency order to regenerate their taskhash/unihash | 2551 | # Now iterate those tasks in dependency order to regenerate their taskhash/unihash |
2295 | next = set() | 2552 | next = set() |
2296 | for p in total: | 2553 | for p in total: |
2297 | if len(self.rqdata.runtaskentries[p].depends) == 0: | 2554 | if not self.rqdata.runtaskentries[p].depends: |
2298 | next.add(p) | 2555 | next.add(p) |
2299 | elif self.rqdata.runtaskentries[p].depends.isdisjoint(total): | 2556 | elif self.rqdata.runtaskentries[p].depends.isdisjoint(total): |
2300 | next.add(p) | 2557 | next.add(p) |
@@ -2304,11 +2561,10 @@ class RunQueueExecute: | |||
2304 | current = next.copy() | 2561 | current = next.copy() |
2305 | next = set() | 2562 | next = set() |
2306 | for tid in current: | 2563 | for tid in current: |
2307 | if len(self.rqdata.runtaskentries[p].depends) and not self.rqdata.runtaskentries[tid].depends.isdisjoint(total): | 2564 | if self.rqdata.runtaskentries[p].depends and not self.rqdata.runtaskentries[tid].depends.isdisjoint(total): |
2308 | continue | 2565 | continue |
2309 | orighash = self.rqdata.runtaskentries[tid].hash | 2566 | orighash = self.rqdata.runtaskentries[tid].hash |
2310 | dc = bb.parse.siggen.get_data_caches(self.rqdata.dataCaches, mc_from_tid(tid)) | 2567 | newhash = bb.parse.siggen.get_taskhash(tid, self.rqdata.runtaskentries[tid].depends, self.rqdata.dataCaches) |
2311 | newhash = bb.parse.siggen.get_taskhash(tid, self.rqdata.runtaskentries[tid].depends, dc) | ||
2312 | origuni = self.rqdata.runtaskentries[tid].unihash | 2568 | origuni = self.rqdata.runtaskentries[tid].unihash |
2313 | newuni = bb.parse.siggen.get_unihash(tid) | 2569 | newuni = bb.parse.siggen.get_unihash(tid) |
2314 | # FIXME, need to check it can come from sstate at all for determinism? | 2570 | # FIXME, need to check it can come from sstate at all for determinism? |
@@ -2334,9 +2590,9 @@ class RunQueueExecute: | |||
2334 | 2590 | ||
2335 | if changed: | 2591 | if changed: |
2336 | for mc in self.rq.worker: | 2592 | for mc in self.rq.worker: |
2337 | self.rq.worker[mc].process.stdin.write(b"<newtaskhashes>" + pickle.dumps(bb.parse.siggen.get_taskhashes()) + b"</newtaskhashes>") | 2593 | RunQueue.send_pickled_data(self.rq.worker[mc].process, bb.parse.siggen.get_taskhashes(), "newtaskhashes") |
2338 | for mc in self.rq.fakeworker: | 2594 | for mc in self.rq.fakeworker: |
2339 | self.rq.fakeworker[mc].process.stdin.write(b"<newtaskhashes>" + pickle.dumps(bb.parse.siggen.get_taskhashes()) + b"</newtaskhashes>") | 2595 | RunQueue.send_pickled_data(self.rq.fakeworker[mc].process, bb.parse.siggen.get_taskhashes(), "newtaskhashes") |
2340 | 2596 | ||
2341 | hashequiv_logger.debug(pprint.pformat("Tasks changed:\n%s" % (changed))) | 2597 | hashequiv_logger.debug(pprint.pformat("Tasks changed:\n%s" % (changed))) |
2342 | 2598 | ||
@@ -2370,7 +2626,7 @@ class RunQueueExecute: | |||
2370 | self.tasks_scenequeue_done.remove(tid) | 2626 | self.tasks_scenequeue_done.remove(tid) |
2371 | for dep in self.sqdata.sq_covered_tasks[tid]: | 2627 | for dep in self.sqdata.sq_covered_tasks[tid]: |
2372 | if dep in self.runq_complete and dep not in self.runq_tasksrun: | 2628 | if dep in self.runq_complete and dep not in self.runq_tasksrun: |
2373 | bb.error("Task %s marked as completed but now needing to rerun? Aborting build." % dep) | 2629 | bb.error("Task %s marked as completed but now needing to rerun? Halting build." % dep) |
2374 | self.failed_tids.append(tid) | 2630 | self.failed_tids.append(tid) |
2375 | self.rq.state = runQueueCleanUp | 2631 | self.rq.state = runQueueCleanUp |
2376 | return | 2632 | return |
@@ -2383,17 +2639,6 @@ class RunQueueExecute: | |||
2383 | self.sq_buildable.remove(tid) | 2639 | self.sq_buildable.remove(tid) |
2384 | if tid in self.sq_running: | 2640 | if tid in self.sq_running: |
2385 | self.sq_running.remove(tid) | 2641 | self.sq_running.remove(tid) |
2386 | harddepfail = False | ||
2387 | for t in self.sqdata.sq_harddeps: | ||
2388 | if tid in self.sqdata.sq_harddeps[t] and t in self.scenequeue_notcovered: | ||
2389 | harddepfail = True | ||
2390 | break | ||
2391 | if not harddepfail and self.sqdata.sq_revdeps[tid].issubset(self.scenequeue_covered | self.scenequeue_notcovered): | ||
2392 | if tid not in self.sq_buildable: | ||
2393 | self.sq_buildable.add(tid) | ||
2394 | if len(self.sqdata.sq_revdeps[tid]) == 0: | ||
2395 | self.sq_buildable.add(tid) | ||
2396 | |||
2397 | if tid in self.sqdata.outrightfail: | 2642 | if tid in self.sqdata.outrightfail: |
2398 | self.sqdata.outrightfail.remove(tid) | 2643 | self.sqdata.outrightfail.remove(tid) |
2399 | if tid in self.scenequeue_notcovered: | 2644 | if tid in self.scenequeue_notcovered: |
@@ -2404,7 +2649,7 @@ class RunQueueExecute: | |||
2404 | self.scenequeue_notneeded.remove(tid) | 2649 | self.scenequeue_notneeded.remove(tid) |
2405 | 2650 | ||
2406 | (mc, fn, taskname, taskfn) = split_tid_mcfn(tid) | 2651 | (mc, fn, taskname, taskfn) = split_tid_mcfn(tid) |
2407 | self.sqdata.stamps[tid] = bb.build.stampfile(taskname + "_setscene", self.rqdata.dataCaches[mc], taskfn, noextra=True) | 2652 | self.sqdata.stamps[tid] = bb.parse.siggen.stampfile_mcfn(taskname, taskfn, extrainfo=False) |
2408 | 2653 | ||
2409 | if tid in self.stampcache: | 2654 | if tid in self.stampcache: |
2410 | del self.stampcache[tid] | 2655 | del self.stampcache[tid] |
@@ -2412,28 +2657,62 @@ class RunQueueExecute: | |||
2412 | if tid in self.build_stamps: | 2657 | if tid in self.build_stamps: |
2413 | del self.build_stamps[tid] | 2658 | del self.build_stamps[tid] |
2414 | 2659 | ||
2415 | update_tasks.append((tid, harddepfail, tid in self.sqdata.valid)) | 2660 | update_tasks.append(tid) |
2661 | |||
2662 | update_tasks2 = [] | ||
2663 | for tid in update_tasks: | ||
2664 | harddepfail = False | ||
2665 | for t in self.sqdata.sq_harddeps_rev[tid]: | ||
2666 | if t in self.scenequeue_notcovered: | ||
2667 | harddepfail = True | ||
2668 | break | ||
2669 | if not harddepfail and self.sqdata.sq_revdeps[tid].issubset(self.scenequeue_covered | self.scenequeue_notcovered): | ||
2670 | if tid not in self.sq_buildable: | ||
2671 | self.sq_buildable.add(tid) | ||
2672 | if not self.sqdata.sq_revdeps[tid]: | ||
2673 | self.sq_buildable.add(tid) | ||
2416 | 2674 | ||
2417 | if update_tasks: | 2675 | update_tasks2.append((tid, harddepfail, tid in self.sqdata.valid)) |
2676 | |||
2677 | if update_tasks2: | ||
2418 | self.sqdone = False | 2678 | self.sqdone = False |
2419 | update_scenequeue_data([t[0] for t in update_tasks], self.sqdata, self.rqdata, self.rq, self.cooker, self.stampcache, self, summary=False) | 2679 | for mc in sorted(self.sqdata.multiconfigs): |
2680 | for tid in sorted([t[0] for t in update_tasks2]): | ||
2681 | if mc_from_tid(tid) != mc: | ||
2682 | continue | ||
2683 | h = pending_hash_index(tid, self.rqdata) | ||
2684 | if h in self.sqdata.hashes and tid != self.sqdata.hashes[h]: | ||
2685 | self.sq_deferred[tid] = self.sqdata.hashes[h] | ||
2686 | bb.note("Deferring %s after %s" % (tid, self.sqdata.hashes[h])) | ||
2687 | update_scenequeue_data([t[0] for t in update_tasks2], self.sqdata, self.rqdata, self.rq, self.cooker, self.stampcache, self, summary=False) | ||
2420 | 2688 | ||
2421 | for (tid, harddepfail, origvalid) in update_tasks: | 2689 | for (tid, harddepfail, origvalid) in update_tasks2: |
2422 | if tid in self.sqdata.valid and not origvalid: | 2690 | if tid in self.sqdata.valid and not origvalid: |
2423 | hashequiv_logger.verbose("Setscene task %s became valid" % tid) | 2691 | hashequiv_logger.verbose("Setscene task %s became valid" % tid) |
2424 | if harddepfail: | 2692 | if harddepfail: |
2693 | logger.debug2("%s has an unavailable hard dependency so skipping" % (tid)) | ||
2425 | self.sq_task_failoutright(tid) | 2694 | self.sq_task_failoutright(tid) |
2426 | 2695 | ||
2427 | if changed: | 2696 | if changed: |
2697 | self.stats.updateCovered(len(self.scenequeue_covered), len(self.scenequeue_notcovered)) | ||
2698 | self.sq_needed_harddeps = set() | ||
2699 | self.sq_harddep_deferred = set() | ||
2428 | self.holdoff_need_update = True | 2700 | self.holdoff_need_update = True |
2429 | 2701 | ||
2430 | def scenequeue_updatecounters(self, task, fail=False): | 2702 | def scenequeue_updatecounters(self, task, fail=False): |
2431 | 2703 | ||
2432 | for dep in sorted(self.sqdata.sq_deps[task]): | 2704 | if fail and task in self.sqdata.sq_harddeps: |
2433 | if fail and task in self.sqdata.sq_harddeps and dep in self.sqdata.sq_harddeps[task]: | 2705 | for dep in sorted(self.sqdata.sq_harddeps[task]): |
2706 | if dep in self.scenequeue_covered or dep in self.scenequeue_notcovered: | ||
2707 | # dependency could be already processed, e.g. noexec setscene task | ||
2708 | continue | ||
2709 | noexec, stamppresent = check_setscene_stamps(dep, self.rqdata, self.rq, self.stampcache) | ||
2710 | if noexec or stamppresent: | ||
2711 | continue | ||
2434 | logger.debug2("%s was unavailable and is a hard dependency of %s so skipping" % (task, dep)) | 2712 | logger.debug2("%s was unavailable and is a hard dependency of %s so skipping" % (task, dep)) |
2435 | self.sq_task_failoutright(dep) | 2713 | self.sq_task_failoutright(dep) |
2436 | continue | 2714 | continue |
2715 | for dep in sorted(self.sqdata.sq_deps[task]): | ||
2437 | if self.sqdata.sq_revdeps[dep].issubset(self.scenequeue_covered | self.scenequeue_notcovered): | 2716 | if self.sqdata.sq_revdeps[dep].issubset(self.scenequeue_covered | self.scenequeue_notcovered): |
2438 | if dep not in self.sq_buildable: | 2717 | if dep not in self.sq_buildable: |
2439 | self.sq_buildable.add(dep) | 2718 | self.sq_buildable.add(dep) |
@@ -2452,6 +2731,14 @@ class RunQueueExecute: | |||
2452 | new.add(dep) | 2731 | new.add(dep) |
2453 | next = new | 2732 | next = new |
2454 | 2733 | ||
2734 | # If this task was one which other setscene tasks have a hard dependency upon, we need | ||
2735 | # to walk through the hard dependencies and allow execution of those which have completed dependencies. | ||
2736 | if task in self.sqdata.sq_harddeps: | ||
2737 | for dep in self.sq_harddep_deferred.copy(): | ||
2738 | if self.sqdata.sq_harddeps_rev[dep].issubset(self.scenequeue_covered | self.scenequeue_notcovered): | ||
2739 | self.sq_harddep_deferred.remove(dep) | ||
2740 | |||
2741 | self.stats.updateCovered(len(self.scenequeue_covered), len(self.scenequeue_notcovered)) | ||
2455 | self.holdoff_need_update = True | 2742 | self.holdoff_need_update = True |
2456 | 2743 | ||
2457 | def sq_task_completeoutright(self, task): | 2744 | def sq_task_completeoutright(self, task): |
@@ -2466,22 +2753,20 @@ class RunQueueExecute: | |||
2466 | self.scenequeue_updatecounters(task) | 2753 | self.scenequeue_updatecounters(task) |
2467 | 2754 | ||
2468 | def sq_check_taskfail(self, task): | 2755 | def sq_check_taskfail(self, task): |
2469 | if self.rqdata.setscenewhitelist is not None: | 2756 | if self.rqdata.setscene_ignore_tasks is not None: |
2470 | realtask = task.split('_setscene')[0] | 2757 | realtask = task.split('_setscene')[0] |
2471 | (mc, fn, taskname, taskfn) = split_tid_mcfn(realtask) | 2758 | (mc, fn, taskname, taskfn) = split_tid_mcfn(realtask) |
2472 | pn = self.rqdata.dataCaches[mc].pkg_fn[taskfn] | 2759 | pn = self.rqdata.dataCaches[mc].pkg_fn[taskfn] |
2473 | if not check_setscene_enforce_whitelist(pn, taskname, self.rqdata.setscenewhitelist): | 2760 | if not check_setscene_enforce_ignore_tasks(pn, taskname, self.rqdata.setscene_ignore_tasks): |
2474 | logger.error('Task %s.%s failed' % (pn, taskname + "_setscene")) | 2761 | logger.error('Task %s.%s failed' % (pn, taskname + "_setscene")) |
2475 | self.rq.state = runQueueCleanUp | 2762 | self.rq.state = runQueueCleanUp |
2476 | 2763 | ||
2477 | def sq_task_complete(self, task): | 2764 | def sq_task_complete(self, task): |
2478 | self.sq_stats.taskCompleted() | 2765 | bb.event.fire(sceneQueueTaskCompleted(task, self.stats, self.rq), self.cfgData) |
2479 | bb.event.fire(sceneQueueTaskCompleted(task, self.sq_stats, self.rq), self.cfgData) | ||
2480 | self.sq_task_completeoutright(task) | 2766 | self.sq_task_completeoutright(task) |
2481 | 2767 | ||
2482 | def sq_task_fail(self, task, result): | 2768 | def sq_task_fail(self, task, result): |
2483 | self.sq_stats.taskFailed() | 2769 | bb.event.fire(sceneQueueTaskFailed(task, self.stats, result, self), self.cfgData) |
2484 | bb.event.fire(sceneQueueTaskFailed(task, self.sq_stats, result, self), self.cfgData) | ||
2485 | self.scenequeue_notcovered.add(task) | 2770 | self.scenequeue_notcovered.add(task) |
2486 | self.scenequeue_updatecounters(task, True) | 2771 | self.scenequeue_updatecounters(task, True) |
2487 | self.sq_check_taskfail(task) | 2772 | self.sq_check_taskfail(task) |
@@ -2489,8 +2774,6 @@ class RunQueueExecute: | |||
2489 | def sq_task_failoutright(self, task): | 2774 | def sq_task_failoutright(self, task): |
2490 | self.sq_running.add(task) | 2775 | self.sq_running.add(task) |
2491 | self.sq_buildable.add(task) | 2776 | self.sq_buildable.add(task) |
2492 | self.sq_stats.taskSkipped() | ||
2493 | self.sq_stats.taskCompleted() | ||
2494 | self.scenequeue_notcovered.add(task) | 2777 | self.scenequeue_notcovered.add(task) |
2495 | self.scenequeue_updatecounters(task, True) | 2778 | self.scenequeue_updatecounters(task, True) |
2496 | 2779 | ||
@@ -2498,8 +2781,6 @@ class RunQueueExecute: | |||
2498 | self.sq_running.add(task) | 2781 | self.sq_running.add(task) |
2499 | self.sq_buildable.add(task) | 2782 | self.sq_buildable.add(task) |
2500 | self.sq_task_completeoutright(task) | 2783 | self.sq_task_completeoutright(task) |
2501 | self.sq_stats.taskSkipped() | ||
2502 | self.sq_stats.taskCompleted() | ||
2503 | 2784 | ||
2504 | def sq_build_taskdepdata(self, task): | 2785 | def sq_build_taskdepdata(self, task): |
2505 | def getsetscenedeps(tid): | 2786 | def getsetscenedeps(tid): |
@@ -2530,7 +2811,8 @@ class RunQueueExecute: | |||
2530 | provides = self.rqdata.dataCaches[mc].fn_provides[taskfn] | 2811 | provides = self.rqdata.dataCaches[mc].fn_provides[taskfn] |
2531 | taskhash = self.rqdata.runtaskentries[revdep].hash | 2812 | taskhash = self.rqdata.runtaskentries[revdep].hash |
2532 | unihash = self.rqdata.runtaskentries[revdep].unihash | 2813 | unihash = self.rqdata.runtaskentries[revdep].unihash |
2533 | taskdepdata[revdep] = [pn, taskname, fn, deps, provides, taskhash, unihash] | 2814 | hashfn = self.rqdata.dataCaches[mc].hashfn[taskfn] |
2815 | taskdepdata[revdep] = [pn, taskname, fn, deps, provides, taskhash, unihash, hashfn] | ||
2534 | for revdep2 in deps: | 2816 | for revdep2 in deps: |
2535 | if revdep2 not in taskdepdata: | 2817 | if revdep2 not in taskdepdata: |
2536 | additional.append(revdep2) | 2818 | additional.append(revdep2) |
@@ -2539,8 +2821,8 @@ class RunQueueExecute: | |||
2539 | #bb.note("Task %s: " % task + str(taskdepdata).replace("], ", "],\n")) | 2821 | #bb.note("Task %s: " % task + str(taskdepdata).replace("], ", "],\n")) |
2540 | return taskdepdata | 2822 | return taskdepdata |
2541 | 2823 | ||
2542 | def check_setscenewhitelist(self, tid): | 2824 | def check_setscene_ignore_tasks(self, tid): |
2543 | # Check task that is going to run against the whitelist | 2825 | # Check task that is going to run against the ignore tasks list |
2544 | (mc, fn, taskname, taskfn) = split_tid_mcfn(tid) | 2826 | (mc, fn, taskname, taskfn) = split_tid_mcfn(tid) |
2545 | # Ignore covered tasks | 2827 | # Ignore covered tasks |
2546 | if tid in self.tasks_covered: | 2828 | if tid in self.tasks_covered: |
@@ -2554,14 +2836,15 @@ class RunQueueExecute: | |||
2554 | return False | 2836 | return False |
2555 | 2837 | ||
2556 | pn = self.rqdata.dataCaches[mc].pkg_fn[taskfn] | 2838 | pn = self.rqdata.dataCaches[mc].pkg_fn[taskfn] |
2557 | if not check_setscene_enforce_whitelist(pn, taskname, self.rqdata.setscenewhitelist): | 2839 | if not check_setscene_enforce_ignore_tasks(pn, taskname, self.rqdata.setscene_ignore_tasks): |
2558 | if tid in self.rqdata.runq_setscene_tids: | 2840 | if tid in self.rqdata.runq_setscene_tids: |
2559 | msg = 'Task %s.%s attempted to execute unexpectedly and should have been setscened' % (pn, taskname) | 2841 | msg = ['Task %s.%s attempted to execute unexpectedly and should have been setscened' % (pn, taskname)] |
2560 | else: | 2842 | else: |
2561 | msg = 'Task %s.%s attempted to execute unexpectedly' % (pn, taskname) | 2843 | msg = ['Task %s.%s attempted to execute unexpectedly' % (pn, taskname)] |
2562 | for t in self.scenequeue_notcovered: | 2844 | for t in self.scenequeue_notcovered: |
2563 | msg = msg + "\nTask %s, unihash %s, taskhash %s" % (t, self.rqdata.runtaskentries[t].unihash, self.rqdata.runtaskentries[t].hash) | 2845 | msg.append("\nTask %s, unihash %s, taskhash %s" % (t, self.rqdata.runtaskentries[t].unihash, self.rqdata.runtaskentries[t].hash)) |
2564 | logger.error(msg + '\nThis is usually due to missing setscene tasks. Those missing in this build were: %s' % pprint.pformat(self.scenequeue_notcovered)) | 2846 | msg.append('\nThis is usually due to missing setscene tasks. Those missing in this build were: %s' % pprint.pformat(self.scenequeue_notcovered)) |
2847 | logger.error("".join(msg)) | ||
2565 | return True | 2848 | return True |
2566 | return False | 2849 | return False |
2567 | 2850 | ||
@@ -2573,6 +2856,7 @@ class SQData(object): | |||
2573 | self.sq_revdeps = {} | 2856 | self.sq_revdeps = {} |
2574 | # Injected inter-setscene task dependencies | 2857 | # Injected inter-setscene task dependencies |
2575 | self.sq_harddeps = {} | 2858 | self.sq_harddeps = {} |
2859 | self.sq_harddeps_rev = {} | ||
2576 | # Cache of stamp files so duplicates can't run in parallel | 2860 | # Cache of stamp files so duplicates can't run in parallel |
2577 | self.stamps = {} | 2861 | self.stamps = {} |
2578 | # Setscene tasks directly depended upon by the build | 2862 | # Setscene tasks directly depended upon by the build |
@@ -2582,12 +2866,17 @@ class SQData(object): | |||
2582 | # A list of normal tasks a setscene task covers | 2866 | # A list of normal tasks a setscene task covers |
2583 | self.sq_covered_tasks = {} | 2867 | self.sq_covered_tasks = {} |
2584 | 2868 | ||
2585 | def build_scenequeue_data(sqdata, rqdata, rq, cooker, stampcache, sqrq): | 2869 | def build_scenequeue_data(sqdata, rqdata, sqrq): |
2586 | 2870 | ||
2587 | sq_revdeps = {} | 2871 | sq_revdeps = {} |
2588 | sq_revdeps_squash = {} | 2872 | sq_revdeps_squash = {} |
2589 | sq_collated_deps = {} | 2873 | sq_collated_deps = {} |
2590 | 2874 | ||
2875 | # We can't skip specified target tasks which aren't setscene tasks | ||
2876 | sqdata.cantskip = set(rqdata.target_tids) | ||
2877 | sqdata.cantskip.difference_update(rqdata.runq_setscene_tids) | ||
2878 | sqdata.cantskip.intersection_update(rqdata.runtaskentries) | ||
2879 | |||
2591 | # We need to construct a dependency graph for the setscene functions. Intermediate | 2880 | # We need to construct a dependency graph for the setscene functions. Intermediate |
2592 | # dependencies between the setscene tasks only complicate the code. This code | 2881 | # dependencies between the setscene tasks only complicate the code. This code |
2593 | # therefore aims to collapse the huge runqueue dependency tree into a smaller one | 2882 | # therefore aims to collapse the huge runqueue dependency tree into a smaller one |
@@ -2600,7 +2889,7 @@ def build_scenequeue_data(sqdata, rqdata, rq, cooker, stampcache, sqrq): | |||
2600 | for tid in rqdata.runtaskentries: | 2889 | for tid in rqdata.runtaskentries: |
2601 | sq_revdeps[tid] = copy.copy(rqdata.runtaskentries[tid].revdeps) | 2890 | sq_revdeps[tid] = copy.copy(rqdata.runtaskentries[tid].revdeps) |
2602 | sq_revdeps_squash[tid] = set() | 2891 | sq_revdeps_squash[tid] = set() |
2603 | if (len(sq_revdeps[tid]) == 0) and tid not in rqdata.runq_setscene_tids: | 2892 | if not sq_revdeps[tid] and tid not in rqdata.runq_setscene_tids: |
2604 | #bb.warn("Added endpoint %s" % (tid)) | 2893 | #bb.warn("Added endpoint %s" % (tid)) |
2605 | endpoints[tid] = set() | 2894 | endpoints[tid] = set() |
2606 | 2895 | ||
@@ -2634,16 +2923,15 @@ def build_scenequeue_data(sqdata, rqdata, rq, cooker, stampcache, sqrq): | |||
2634 | sq_revdeps_squash[point] = set() | 2923 | sq_revdeps_squash[point] = set() |
2635 | if point in rqdata.runq_setscene_tids: | 2924 | if point in rqdata.runq_setscene_tids: |
2636 | sq_revdeps_squash[point] = tasks | 2925 | sq_revdeps_squash[point] = tasks |
2637 | tasks = set() | ||
2638 | continue | 2926 | continue |
2639 | for dep in rqdata.runtaskentries[point].depends: | 2927 | for dep in rqdata.runtaskentries[point].depends: |
2640 | if point in sq_revdeps[dep]: | 2928 | if point in sq_revdeps[dep]: |
2641 | sq_revdeps[dep].remove(point) | 2929 | sq_revdeps[dep].remove(point) |
2642 | if tasks: | 2930 | if tasks: |
2643 | sq_revdeps_squash[dep] |= tasks | 2931 | sq_revdeps_squash[dep] |= tasks |
2644 | if len(sq_revdeps[dep]) == 0 and dep not in rqdata.runq_setscene_tids: | 2932 | if not sq_revdeps[dep] and dep not in rqdata.runq_setscene_tids: |
2645 | newendpoints[dep] = task | 2933 | newendpoints[dep] = task |
2646 | if len(newendpoints) != 0: | 2934 | if newendpoints: |
2647 | process_endpoints(newendpoints) | 2935 | process_endpoints(newendpoints) |
2648 | 2936 | ||
2649 | process_endpoints(endpoints) | 2937 | process_endpoints(endpoints) |
@@ -2655,16 +2943,16 @@ def build_scenequeue_data(sqdata, rqdata, rq, cooker, stampcache, sqrq): | |||
2655 | # Take the build endpoints (no revdeps) and find the sstate tasks they depend upon | 2943 | # Take the build endpoints (no revdeps) and find the sstate tasks they depend upon |
2656 | new = True | 2944 | new = True |
2657 | for tid in rqdata.runtaskentries: | 2945 | for tid in rqdata.runtaskentries: |
2658 | if len(rqdata.runtaskentries[tid].revdeps) == 0: | 2946 | if not rqdata.runtaskentries[tid].revdeps: |
2659 | sqdata.unskippable.add(tid) | 2947 | sqdata.unskippable.add(tid) |
2660 | sqdata.unskippable |= sqrq.cantskip | 2948 | sqdata.unskippable |= sqdata.cantskip |
2661 | while new: | 2949 | while new: |
2662 | new = False | 2950 | new = False |
2663 | orig = sqdata.unskippable.copy() | 2951 | orig = sqdata.unskippable.copy() |
2664 | for tid in sorted(orig, reverse=True): | 2952 | for tid in sorted(orig, reverse=True): |
2665 | if tid in rqdata.runq_setscene_tids: | 2953 | if tid in rqdata.runq_setscene_tids: |
2666 | continue | 2954 | continue |
2667 | if len(rqdata.runtaskentries[tid].depends) == 0: | 2955 | if not rqdata.runtaskentries[tid].depends: |
2668 | # These are tasks which have no setscene tasks in their chain, need to mark as directly buildable | 2956 | # These are tasks which have no setscene tasks in their chain, need to mark as directly buildable |
2669 | sqrq.setbuildable(tid) | 2957 | sqrq.setbuildable(tid) |
2670 | sqdata.unskippable |= rqdata.runtaskentries[tid].depends | 2958 | sqdata.unskippable |= rqdata.runtaskentries[tid].depends |
@@ -2679,8 +2967,8 @@ def build_scenequeue_data(sqdata, rqdata, rq, cooker, stampcache, sqrq): | |||
2679 | for taskcounter, tid in enumerate(rqdata.runtaskentries): | 2967 | for taskcounter, tid in enumerate(rqdata.runtaskentries): |
2680 | if tid in rqdata.runq_setscene_tids: | 2968 | if tid in rqdata.runq_setscene_tids: |
2681 | pass | 2969 | pass |
2682 | elif len(sq_revdeps_squash[tid]) != 0: | 2970 | elif sq_revdeps_squash[tid]: |
2683 | bb.msg.fatal("RunQueue", "Something went badly wrong during scenequeue generation, aborting. Please report this problem.") | 2971 | bb.msg.fatal("RunQueue", "Something went badly wrong during scenequeue generation, halting. Please report this problem.") |
2684 | else: | 2972 | else: |
2685 | del sq_revdeps_squash[tid] | 2973 | del sq_revdeps_squash[tid] |
2686 | rqdata.init_progress_reporter.update(taskcounter) | 2974 | rqdata.init_progress_reporter.update(taskcounter) |
@@ -2694,7 +2982,9 @@ def build_scenequeue_data(sqdata, rqdata, rq, cooker, stampcache, sqrq): | |||
2694 | (mc, fn, taskname, taskfn) = split_tid_mcfn(tid) | 2982 | (mc, fn, taskname, taskfn) = split_tid_mcfn(tid) |
2695 | realtid = tid + "_setscene" | 2983 | realtid = tid + "_setscene" |
2696 | idepends = rqdata.taskData[mc].taskentries[realtid].idepends | 2984 | idepends = rqdata.taskData[mc].taskentries[realtid].idepends |
2697 | sqdata.stamps[tid] = bb.build.stampfile(taskname + "_setscene", rqdata.dataCaches[mc], taskfn, noextra=True) | 2985 | sqdata.stamps[tid] = bb.parse.siggen.stampfile_mcfn(taskname, taskfn, extrainfo=False) |
2986 | |||
2987 | sqdata.sq_harddeps_rev[tid] = set() | ||
2698 | for (depname, idependtask) in idepends: | 2988 | for (depname, idependtask) in idepends: |
2699 | 2989 | ||
2700 | if depname not in rqdata.taskData[mc].build_targets: | 2990 | if depname not in rqdata.taskData[mc].build_targets: |
@@ -2707,20 +2997,15 @@ def build_scenequeue_data(sqdata, rqdata, rq, cooker, stampcache, sqrq): | |||
2707 | if deptid not in rqdata.runtaskentries: | 2997 | if deptid not in rqdata.runtaskentries: |
2708 | bb.msg.fatal("RunQueue", "Task %s depends upon non-existent task %s:%s" % (realtid, depfn, idependtask)) | 2998 | bb.msg.fatal("RunQueue", "Task %s depends upon non-existent task %s:%s" % (realtid, depfn, idependtask)) |
2709 | 2999 | ||
3000 | logger.debug2("Adding hard setscene dependency %s for %s" % (deptid, tid)) | ||
3001 | |||
2710 | if not deptid in sqdata.sq_harddeps: | 3002 | if not deptid in sqdata.sq_harddeps: |
2711 | sqdata.sq_harddeps[deptid] = set() | 3003 | sqdata.sq_harddeps[deptid] = set() |
2712 | sqdata.sq_harddeps[deptid].add(tid) | 3004 | sqdata.sq_harddeps[deptid].add(tid) |
2713 | 3005 | sqdata.sq_harddeps_rev[tid].add(deptid) | |
2714 | sq_revdeps_squash[tid].add(deptid) | ||
2715 | # Have to zero this to avoid circular dependencies | ||
2716 | sq_revdeps_squash[deptid] = set() | ||
2717 | 3006 | ||
2718 | rqdata.init_progress_reporter.next_stage() | 3007 | rqdata.init_progress_reporter.next_stage() |
2719 | 3008 | ||
2720 | for task in sqdata.sq_harddeps: | ||
2721 | for dep in sqdata.sq_harddeps[task]: | ||
2722 | sq_revdeps_squash[dep].add(task) | ||
2723 | |||
2724 | rqdata.init_progress_reporter.next_stage() | 3009 | rqdata.init_progress_reporter.next_stage() |
2725 | 3010 | ||
2726 | #for tid in sq_revdeps_squash: | 3011 | #for tid in sq_revdeps_squash: |
@@ -2744,16 +3029,47 @@ def build_scenequeue_data(sqdata, rqdata, rq, cooker, stampcache, sqrq): | |||
2744 | sqdata.multiconfigs = set() | 3029 | sqdata.multiconfigs = set() |
2745 | for tid in sqdata.sq_revdeps: | 3030 | for tid in sqdata.sq_revdeps: |
2746 | sqdata.multiconfigs.add(mc_from_tid(tid)) | 3031 | sqdata.multiconfigs.add(mc_from_tid(tid)) |
2747 | if len(sqdata.sq_revdeps[tid]) == 0: | 3032 | if not sqdata.sq_revdeps[tid]: |
2748 | sqrq.sq_buildable.add(tid) | 3033 | sqrq.sq_buildable.add(tid) |
2749 | 3034 | ||
2750 | rqdata.init_progress_reporter.finish() | 3035 | rqdata.init_progress_reporter.next_stage() |
2751 | 3036 | ||
2752 | sqdata.noexec = set() | 3037 | sqdata.noexec = set() |
2753 | sqdata.stamppresent = set() | 3038 | sqdata.stamppresent = set() |
2754 | sqdata.valid = set() | 3039 | sqdata.valid = set() |
2755 | 3040 | ||
2756 | update_scenequeue_data(sqdata.sq_revdeps, sqdata, rqdata, rq, cooker, stampcache, sqrq, summary=True) | 3041 | sqdata.hashes = {} |
3042 | sqrq.sq_deferred = {} | ||
3043 | for mc in sorted(sqdata.multiconfigs): | ||
3044 | for tid in sorted(sqdata.sq_revdeps): | ||
3045 | if mc_from_tid(tid) != mc: | ||
3046 | continue | ||
3047 | h = pending_hash_index(tid, rqdata) | ||
3048 | if h not in sqdata.hashes: | ||
3049 | sqdata.hashes[h] = tid | ||
3050 | else: | ||
3051 | sqrq.sq_deferred[tid] = sqdata.hashes[h] | ||
3052 | bb.debug(1, "Deferring %s after %s" % (tid, sqdata.hashes[h])) | ||
3053 | |||
3054 | def check_setscene_stamps(tid, rqdata, rq, stampcache, noexecstamp=False): | ||
3055 | |||
3056 | (mc, fn, taskname, taskfn) = split_tid_mcfn(tid) | ||
3057 | |||
3058 | taskdep = rqdata.dataCaches[mc].task_deps[taskfn] | ||
3059 | |||
3060 | if 'noexec' in taskdep and taskname in taskdep['noexec']: | ||
3061 | bb.build.make_stamp_mcfn(taskname + "_setscene", taskfn) | ||
3062 | return True, False | ||
3063 | |||
3064 | if rq.check_stamp_task(tid, taskname + "_setscene", cache=stampcache): | ||
3065 | logger.debug2('Setscene stamp current for task %s', tid) | ||
3066 | return False, True | ||
3067 | |||
3068 | if rq.check_stamp_task(tid, taskname, recurse = True, cache=stampcache): | ||
3069 | logger.debug2('Normal stamp current for task %s', tid) | ||
3070 | return False, True | ||
3071 | |||
3072 | return False, False | ||
2757 | 3073 | ||
2758 | def update_scenequeue_data(tids, sqdata, rqdata, rq, cooker, stampcache, sqrq, summary=True): | 3074 | def update_scenequeue_data(tids, sqdata, rqdata, rq, cooker, stampcache, sqrq, summary=True): |
2759 | 3075 | ||
@@ -2764,55 +3080,42 @@ def update_scenequeue_data(tids, sqdata, rqdata, rq, cooker, stampcache, sqrq, s | |||
2764 | sqdata.stamppresent.remove(tid) | 3080 | sqdata.stamppresent.remove(tid) |
2765 | if tid in sqdata.valid: | 3081 | if tid in sqdata.valid: |
2766 | sqdata.valid.remove(tid) | 3082 | sqdata.valid.remove(tid) |
3083 | if tid in sqdata.outrightfail: | ||
3084 | sqdata.outrightfail.remove(tid) | ||
2767 | 3085 | ||
2768 | (mc, fn, taskname, taskfn) = split_tid_mcfn(tid) | 3086 | noexec, stamppresent = check_setscene_stamps(tid, rqdata, rq, stampcache, noexecstamp=True) |
2769 | |||
2770 | taskdep = rqdata.dataCaches[mc].task_deps[taskfn] | ||
2771 | 3087 | ||
2772 | if 'noexec' in taskdep and taskname in taskdep['noexec']: | 3088 | if noexec: |
2773 | sqdata.noexec.add(tid) | 3089 | sqdata.noexec.add(tid) |
2774 | sqrq.sq_task_skip(tid) | 3090 | sqrq.sq_task_skip(tid) |
2775 | bb.build.make_stamp(taskname + "_setscene", rqdata.dataCaches[mc], taskfn) | 3091 | logger.debug2("%s is noexec so skipping setscene" % (tid)) |
2776 | continue | ||
2777 | |||
2778 | if rq.check_stamp_task(tid, taskname + "_setscene", cache=stampcache): | ||
2779 | logger.debug2('Setscene stamp current for task %s', tid) | ||
2780 | sqdata.stamppresent.add(tid) | ||
2781 | sqrq.sq_task_skip(tid) | ||
2782 | continue | 3092 | continue |
2783 | 3093 | ||
2784 | if rq.check_stamp_task(tid, taskname, recurse = True, cache=stampcache): | 3094 | if stamppresent: |
2785 | logger.debug2('Normal stamp current for task %s', tid) | ||
2786 | sqdata.stamppresent.add(tid) | 3095 | sqdata.stamppresent.add(tid) |
2787 | sqrq.sq_task_skip(tid) | 3096 | sqrq.sq_task_skip(tid) |
3097 | logger.debug2("%s has a valid stamp, skipping" % (tid)) | ||
2788 | continue | 3098 | continue |
2789 | 3099 | ||
2790 | tocheck.add(tid) | 3100 | tocheck.add(tid) |
2791 | 3101 | ||
2792 | sqdata.valid |= rq.validate_hashes(tocheck, cooker.data, len(sqdata.stamppresent), False, summary=summary) | 3102 | sqdata.valid |= rq.validate_hashes(tocheck, cooker.data, len(sqdata.stamppresent), False, summary=summary) |
2793 | 3103 | ||
2794 | sqdata.hashes = {} | 3104 | for tid in tids: |
2795 | for mc in sorted(sqdata.multiconfigs): | 3105 | if tid in sqdata.stamppresent: |
2796 | for tid in sorted(sqdata.sq_revdeps): | 3106 | continue |
2797 | if mc_from_tid(tid) != mc: | 3107 | if tid in sqdata.valid: |
2798 | continue | 3108 | continue |
2799 | if tid in sqdata.stamppresent: | 3109 | if tid in sqdata.noexec: |
2800 | continue | 3110 | continue |
2801 | if tid in sqdata.valid: | 3111 | if tid in sqrq.scenequeue_covered: |
2802 | continue | 3112 | continue |
2803 | if tid in sqdata.noexec: | 3113 | if tid in sqrq.scenequeue_notcovered: |
2804 | continue | 3114 | continue |
2805 | if tid in sqrq.scenequeue_notcovered: | 3115 | if tid in sqrq.sq_deferred: |
2806 | continue | 3116 | continue |
2807 | sqdata.outrightfail.add(tid) | 3117 | sqdata.outrightfail.add(tid) |
2808 | 3118 | logger.debug2("%s already handled (fallthrough), skipping" % (tid)) | |
2809 | h = pending_hash_index(tid, rqdata) | ||
2810 | if h not in sqdata.hashes: | ||
2811 | sqdata.hashes[h] = tid | ||
2812 | else: | ||
2813 | sqrq.sq_deferred[tid] = sqdata.hashes[h] | ||
2814 | bb.note("Deferring %s after %s" % (tid, sqdata.hashes[h])) | ||
2815 | |||
2816 | 3119 | ||
2817 | class TaskFailure(Exception): | 3120 | class TaskFailure(Exception): |
2818 | """ | 3121 | """ |
@@ -2876,12 +3179,16 @@ class runQueueTaskFailed(runQueueEvent): | |||
2876 | """ | 3179 | """ |
2877 | Event notifying a task failed | 3180 | Event notifying a task failed |
2878 | """ | 3181 | """ |
2879 | def __init__(self, task, stats, exitcode, rq): | 3182 | def __init__(self, task, stats, exitcode, rq, fakeroot_log=None): |
2880 | runQueueEvent.__init__(self, task, stats, rq) | 3183 | runQueueEvent.__init__(self, task, stats, rq) |
2881 | self.exitcode = exitcode | 3184 | self.exitcode = exitcode |
3185 | self.fakeroot_log = fakeroot_log | ||
2882 | 3186 | ||
2883 | def __str__(self): | 3187 | def __str__(self): |
2884 | return "Task (%s) failed with exit code '%s'" % (self.taskstring, self.exitcode) | 3188 | if self.fakeroot_log: |
3189 | return "Task (%s) failed with exit code '%s' \nPseudo log:\n%s" % (self.taskstring, self.exitcode, self.fakeroot_log) | ||
3190 | else: | ||
3191 | return "Task (%s) failed with exit code '%s'" % (self.taskstring, self.exitcode) | ||
2885 | 3192 | ||
2886 | class sceneQueueTaskFailed(sceneQueueEvent): | 3193 | class sceneQueueTaskFailed(sceneQueueEvent): |
2887 | """ | 3194 | """ |
@@ -2933,18 +3240,16 @@ class runQueuePipe(): | |||
2933 | """ | 3240 | """ |
2934 | Abstraction for a pipe between a worker thread and the server | 3241 | Abstraction for a pipe between a worker thread and the server |
2935 | """ | 3242 | """ |
2936 | def __init__(self, pipein, pipeout, d, rq, rqexec): | 3243 | def __init__(self, pipein, pipeout, d, rq, rqexec, fakerootlogs=None): |
2937 | self.input = pipein | 3244 | self.input = pipein |
2938 | if pipeout: | 3245 | if pipeout: |
2939 | pipeout.close() | 3246 | pipeout.close() |
2940 | bb.utils.nonblockingfd(self.input) | 3247 | bb.utils.nonblockingfd(self.input) |
2941 | self.queue = b"" | 3248 | self.queue = bytearray() |
2942 | self.d = d | 3249 | self.d = d |
2943 | self.rq = rq | 3250 | self.rq = rq |
2944 | self.rqexec = rqexec | 3251 | self.rqexec = rqexec |
2945 | 3252 | self.fakerootlogs = fakerootlogs | |
2946 | def setrunqueueexec(self, rqexec): | ||
2947 | self.rqexec = rqexec | ||
2948 | 3253 | ||
2949 | def read(self): | 3254 | def read(self): |
2950 | for workers, name in [(self.rq.worker, "Worker"), (self.rq.fakeworker, "Fakeroot")]: | 3255 | for workers, name in [(self.rq.worker, "Worker"), (self.rq.fakeworker, "Fakeroot")]: |
@@ -2956,13 +3261,13 @@ class runQueuePipe(): | |||
2956 | 3261 | ||
2957 | start = len(self.queue) | 3262 | start = len(self.queue) |
2958 | try: | 3263 | try: |
2959 | self.queue = self.queue + (self.input.read(102400) or b"") | 3264 | self.queue.extend(self.input.read(102400) or b"") |
2960 | except (OSError, IOError) as e: | 3265 | except (OSError, IOError) as e: |
2961 | if e.errno != errno.EAGAIN: | 3266 | if e.errno != errno.EAGAIN: |
2962 | raise | 3267 | raise |
2963 | end = len(self.queue) | 3268 | end = len(self.queue) |
2964 | found = True | 3269 | found = True |
2965 | while found and len(self.queue): | 3270 | while found and self.queue: |
2966 | found = False | 3271 | found = False |
2967 | index = self.queue.find(b"</event>") | 3272 | index = self.queue.find(b"</event>") |
2968 | while index != -1 and self.queue.startswith(b"<event>"): | 3273 | while index != -1 and self.queue.startswith(b"<event>"): |
@@ -2987,7 +3292,11 @@ class runQueuePipe(): | |||
2987 | task, status = pickle.loads(self.queue[10:index]) | 3292 | task, status = pickle.loads(self.queue[10:index]) |
2988 | except (ValueError, pickle.UnpicklingError, AttributeError, IndexError) as e: | 3293 | except (ValueError, pickle.UnpicklingError, AttributeError, IndexError) as e: |
2989 | bb.msg.fatal("RunQueue", "failed load pickle '%s': '%s'" % (e, self.queue[10:index])) | 3294 | bb.msg.fatal("RunQueue", "failed load pickle '%s': '%s'" % (e, self.queue[10:index])) |
2990 | self.rqexec.runqueue_process_waitpid(task, status) | 3295 | (_, _, _, taskfn) = split_tid_mcfn(task) |
3296 | fakerootlog = None | ||
3297 | if self.fakerootlogs and taskfn and taskfn in self.fakerootlogs: | ||
3298 | fakerootlog = self.fakerootlogs[taskfn] | ||
3299 | self.rqexec.runqueue_process_waitpid(task, status, fakerootlog=fakerootlog) | ||
2991 | found = True | 3300 | found = True |
2992 | self.queue = self.queue[index+11:] | 3301 | self.queue = self.queue[index+11:] |
2993 | index = self.queue.find(b"</exitcode>") | 3302 | index = self.queue.find(b"</exitcode>") |
@@ -2996,16 +3305,16 @@ class runQueuePipe(): | |||
2996 | def close(self): | 3305 | def close(self): |
2997 | while self.read(): | 3306 | while self.read(): |
2998 | continue | 3307 | continue |
2999 | if len(self.queue) > 0: | 3308 | if self.queue: |
3000 | print("Warning, worker left partial message: %s" % self.queue) | 3309 | print("Warning, worker left partial message: %s" % self.queue) |
3001 | self.input.close() | 3310 | self.input.close() |
3002 | 3311 | ||
3003 | def get_setscene_enforce_whitelist(d, targets): | 3312 | def get_setscene_enforce_ignore_tasks(d, targets): |
3004 | if d.getVar('BB_SETSCENE_ENFORCE') != '1': | 3313 | if d.getVar('BB_SETSCENE_ENFORCE') != '1': |
3005 | return None | 3314 | return None |
3006 | whitelist = (d.getVar("BB_SETSCENE_ENFORCE_WHITELIST") or "").split() | 3315 | ignore_tasks = (d.getVar("BB_SETSCENE_ENFORCE_IGNORE_TASKS") or "").split() |
3007 | outlist = [] | 3316 | outlist = [] |
3008 | for item in whitelist[:]: | 3317 | for item in ignore_tasks[:]: |
3009 | if item.startswith('%:'): | 3318 | if item.startswith('%:'): |
3010 | for (mc, target, task, fn) in targets: | 3319 | for (mc, target, task, fn) in targets: |
3011 | outlist.append(target + ':' + item.split(':')[1]) | 3320 | outlist.append(target + ':' + item.split(':')[1]) |
@@ -3013,12 +3322,12 @@ def get_setscene_enforce_whitelist(d, targets): | |||
3013 | outlist.append(item) | 3322 | outlist.append(item) |
3014 | return outlist | 3323 | return outlist |
3015 | 3324 | ||
3016 | def check_setscene_enforce_whitelist(pn, taskname, whitelist): | 3325 | def check_setscene_enforce_ignore_tasks(pn, taskname, ignore_tasks): |
3017 | import fnmatch | 3326 | import fnmatch |
3018 | if whitelist is not None: | 3327 | if ignore_tasks is not None: |
3019 | item = '%s:%s' % (pn, taskname) | 3328 | item = '%s:%s' % (pn, taskname) |
3020 | for whitelist_item in whitelist: | 3329 | for ignore_tasks in ignore_tasks: |
3021 | if fnmatch.fnmatch(item, whitelist_item): | 3330 | if fnmatch.fnmatch(item, ignore_tasks): |
3022 | return True | 3331 | return True |
3023 | return False | 3332 | return False |
3024 | return True | 3333 | return True |