diff options
Diffstat (limited to 'bitbake/lib/bb/runqueue.py')
-rw-r--r-- | bitbake/lib/bb/runqueue.py | 1146 |
1 files changed, 762 insertions, 384 deletions
diff --git a/bitbake/lib/bb/runqueue.py b/bitbake/lib/bb/runqueue.py index 54ef245a63..80f3d3282f 100644 --- a/bitbake/lib/bb/runqueue.py +++ b/bitbake/lib/bb/runqueue.py | |||
@@ -14,6 +14,7 @@ import os | |||
14 | import sys | 14 | import sys |
15 | import stat | 15 | import stat |
16 | import errno | 16 | import errno |
17 | import itertools | ||
17 | import logging | 18 | import logging |
18 | import re | 19 | import re |
19 | import bb | 20 | import bb |
@@ -24,6 +25,7 @@ import pickle | |||
24 | from multiprocessing import Process | 25 | from multiprocessing import Process |
25 | import shlex | 26 | import shlex |
26 | import pprint | 27 | import pprint |
28 | import time | ||
27 | 29 | ||
28 | bblogger = logging.getLogger("BitBake") | 30 | bblogger = logging.getLogger("BitBake") |
29 | logger = logging.getLogger("BitBake.RunQueue") | 31 | logger = logging.getLogger("BitBake.RunQueue") |
@@ -85,15 +87,19 @@ class RunQueueStats: | |||
85 | """ | 87 | """ |
86 | Holds statistics on the tasks handled by the associated runQueue | 88 | Holds statistics on the tasks handled by the associated runQueue |
87 | """ | 89 | """ |
88 | def __init__(self, total): | 90 | def __init__(self, total, setscene_total): |
89 | self.completed = 0 | 91 | self.completed = 0 |
90 | self.skipped = 0 | 92 | self.skipped = 0 |
91 | self.failed = 0 | 93 | self.failed = 0 |
92 | self.active = 0 | 94 | self.active = 0 |
95 | self.setscene_active = 0 | ||
96 | self.setscene_covered = 0 | ||
97 | self.setscene_notcovered = 0 | ||
98 | self.setscene_total = setscene_total | ||
93 | self.total = total | 99 | self.total = total |
94 | 100 | ||
95 | def copy(self): | 101 | def copy(self): |
96 | obj = self.__class__(self.total) | 102 | obj = self.__class__(self.total, self.setscene_total) |
97 | obj.__dict__.update(self.__dict__) | 103 | obj.__dict__.update(self.__dict__) |
98 | return obj | 104 | return obj |
99 | 105 | ||
@@ -112,10 +118,18 @@ class RunQueueStats: | |||
112 | def taskActive(self): | 118 | def taskActive(self): |
113 | self.active = self.active + 1 | 119 | self.active = self.active + 1 |
114 | 120 | ||
121 | def updateCovered(self, covered, notcovered): | ||
122 | self.setscene_covered = covered | ||
123 | self.setscene_notcovered = notcovered | ||
124 | |||
125 | def updateActiveSetscene(self, active): | ||
126 | self.setscene_active = active | ||
127 | |||
115 | # These values indicate the next step due to be run in the | 128 | # These values indicate the next step due to be run in the |
116 | # runQueue state machine | 129 | # runQueue state machine |
117 | runQueuePrepare = 2 | 130 | runQueuePrepare = 2 |
118 | runQueueSceneInit = 3 | 131 | runQueueSceneInit = 3 |
132 | runQueueDumpSigs = 4 | ||
119 | runQueueRunning = 6 | 133 | runQueueRunning = 6 |
120 | runQueueFailed = 7 | 134 | runQueueFailed = 7 |
121 | runQueueCleanUp = 8 | 135 | runQueueCleanUp = 8 |
@@ -143,11 +157,82 @@ class RunQueueScheduler(object): | |||
143 | self.stamps = {} | 157 | self.stamps = {} |
144 | for tid in self.rqdata.runtaskentries: | 158 | for tid in self.rqdata.runtaskentries: |
145 | (mc, fn, taskname, taskfn) = split_tid_mcfn(tid) | 159 | (mc, fn, taskname, taskfn) = split_tid_mcfn(tid) |
146 | self.stamps[tid] = bb.build.stampfile(taskname, self.rqdata.dataCaches[mc], taskfn, noextra=True) | 160 | self.stamps[tid] = bb.parse.siggen.stampfile_mcfn(taskname, taskfn, extrainfo=False) |
147 | if tid in self.rq.runq_buildable: | 161 | if tid in self.rq.runq_buildable: |
148 | self.buildable.append(tid) | 162 | self.buildable.add(tid) |
149 | 163 | ||
150 | self.rev_prio_map = None | 164 | self.rev_prio_map = None |
165 | self.is_pressure_usable() | ||
166 | |||
167 | def is_pressure_usable(self): | ||
168 | """ | ||
169 | If monitoring pressure, return True if pressure files can be open and read. For example | ||
170 | openSUSE /proc/pressure/* files have readable file permissions but when read the error EOPNOTSUPP (Operation not supported) | ||
171 | is returned. | ||
172 | """ | ||
173 | if self.rq.max_cpu_pressure or self.rq.max_io_pressure or self.rq.max_memory_pressure: | ||
174 | try: | ||
175 | with open("/proc/pressure/cpu") as cpu_pressure_fds, \ | ||
176 | open("/proc/pressure/io") as io_pressure_fds, \ | ||
177 | open("/proc/pressure/memory") as memory_pressure_fds: | ||
178 | |||
179 | self.prev_cpu_pressure = cpu_pressure_fds.readline().split()[4].split("=")[1] | ||
180 | self.prev_io_pressure = io_pressure_fds.readline().split()[4].split("=")[1] | ||
181 | self.prev_memory_pressure = memory_pressure_fds.readline().split()[4].split("=")[1] | ||
182 | self.prev_pressure_time = time.time() | ||
183 | self.check_pressure = True | ||
184 | except: | ||
185 | bb.note("The /proc/pressure files can't be read. Continuing build without monitoring pressure") | ||
186 | self.check_pressure = False | ||
187 | else: | ||
188 | self.check_pressure = False | ||
189 | |||
190 | def exceeds_max_pressure(self): | ||
191 | """ | ||
192 | Monitor the difference in total pressure at least once per second, if | ||
193 | BB_PRESSURE_MAX_{CPU|IO|MEMORY} are set, return True if above threshold. | ||
194 | """ | ||
195 | if self.check_pressure: | ||
196 | with open("/proc/pressure/cpu") as cpu_pressure_fds, \ | ||
197 | open("/proc/pressure/io") as io_pressure_fds, \ | ||
198 | open("/proc/pressure/memory") as memory_pressure_fds: | ||
199 | # extract "total" from /proc/pressure/{cpu|io} | ||
200 | curr_cpu_pressure = cpu_pressure_fds.readline().split()[4].split("=")[1] | ||
201 | curr_io_pressure = io_pressure_fds.readline().split()[4].split("=")[1] | ||
202 | curr_memory_pressure = memory_pressure_fds.readline().split()[4].split("=")[1] | ||
203 | now = time.time() | ||
204 | tdiff = now - self.prev_pressure_time | ||
205 | psi_accumulation_interval = 1.0 | ||
206 | cpu_pressure = (float(curr_cpu_pressure) - float(self.prev_cpu_pressure)) / tdiff | ||
207 | io_pressure = (float(curr_io_pressure) - float(self.prev_io_pressure)) / tdiff | ||
208 | memory_pressure = (float(curr_memory_pressure) - float(self.prev_memory_pressure)) / tdiff | ||
209 | exceeds_cpu_pressure = self.rq.max_cpu_pressure and cpu_pressure > self.rq.max_cpu_pressure | ||
210 | exceeds_io_pressure = self.rq.max_io_pressure and io_pressure > self.rq.max_io_pressure | ||
211 | exceeds_memory_pressure = self.rq.max_memory_pressure and memory_pressure > self.rq.max_memory_pressure | ||
212 | |||
213 | if tdiff > psi_accumulation_interval: | ||
214 | self.prev_cpu_pressure = curr_cpu_pressure | ||
215 | self.prev_io_pressure = curr_io_pressure | ||
216 | self.prev_memory_pressure = curr_memory_pressure | ||
217 | self.prev_pressure_time = now | ||
218 | |||
219 | pressure_state = (exceeds_cpu_pressure, exceeds_io_pressure, exceeds_memory_pressure) | ||
220 | pressure_values = (round(cpu_pressure,1), self.rq.max_cpu_pressure, round(io_pressure,1), self.rq.max_io_pressure, round(memory_pressure,1), self.rq.max_memory_pressure) | ||
221 | if hasattr(self, "pressure_state") and pressure_state != self.pressure_state: | ||
222 | bb.note("Pressure status changed to CPU: %s, IO: %s, Mem: %s (CPU: %s/%s, IO: %s/%s, Mem: %s/%s) - using %s/%s bitbake threads" % (pressure_state + pressure_values + (len(self.rq.runq_running.difference(self.rq.runq_complete)), self.rq.number_tasks))) | ||
223 | self.pressure_state = pressure_state | ||
224 | return (exceeds_cpu_pressure or exceeds_io_pressure or exceeds_memory_pressure) | ||
225 | elif self.rq.max_loadfactor: | ||
226 | limit = False | ||
227 | loadfactor = float(os.getloadavg()[0]) / os.cpu_count() | ||
228 | # bb.warn("Comparing %s to %s" % (loadfactor, self.rq.max_loadfactor)) | ||
229 | if loadfactor > self.rq.max_loadfactor: | ||
230 | limit = True | ||
231 | if hasattr(self, "loadfactor_limit") and limit != self.loadfactor_limit: | ||
232 | bb.note("Load average limiting set to %s as load average: %s - using %s/%s bitbake threads" % (limit, loadfactor, len(self.rq.runq_running.difference(self.rq.runq_complete)), self.rq.number_tasks)) | ||
233 | self.loadfactor_limit = limit | ||
234 | return limit | ||
235 | return False | ||
151 | 236 | ||
152 | def next_buildable_task(self): | 237 | def next_buildable_task(self): |
153 | """ | 238 | """ |
@@ -161,6 +246,12 @@ class RunQueueScheduler(object): | |||
161 | if not buildable: | 246 | if not buildable: |
162 | return None | 247 | return None |
163 | 248 | ||
249 | # Bitbake requires that at least one task be active. Only check for pressure if | ||
250 | # this is the case, otherwise the pressure limitation could result in no tasks | ||
251 | # being active and no new tasks started thereby, at times, breaking the scheduler. | ||
252 | if self.rq.stats.active and self.exceeds_max_pressure(): | ||
253 | return None | ||
254 | |||
164 | # Filter out tasks that have a max number of threads that have been exceeded | 255 | # Filter out tasks that have a max number of threads that have been exceeded |
165 | skip_buildable = {} | 256 | skip_buildable = {} |
166 | for running in self.rq.runq_running.difference(self.rq.runq_complete): | 257 | for running in self.rq.runq_running.difference(self.rq.runq_complete): |
@@ -191,11 +282,11 @@ class RunQueueScheduler(object): | |||
191 | best = None | 282 | best = None |
192 | bestprio = None | 283 | bestprio = None |
193 | for tid in buildable: | 284 | for tid in buildable: |
194 | taskname = taskname_from_tid(tid) | ||
195 | if taskname in skip_buildable and skip_buildable[taskname] >= int(self.skip_maxthread[taskname]): | ||
196 | continue | ||
197 | prio = self.rev_prio_map[tid] | 285 | prio = self.rev_prio_map[tid] |
198 | if bestprio is None or bestprio > prio: | 286 | if bestprio is None or bestprio > prio: |
287 | taskname = taskname_from_tid(tid) | ||
288 | if taskname in skip_buildable and skip_buildable[taskname] >= int(self.skip_maxthread[taskname]): | ||
289 | continue | ||
199 | stamp = self.stamps[tid] | 290 | stamp = self.stamps[tid] |
200 | if stamp in self.rq.build_stamps.values(): | 291 | if stamp in self.rq.build_stamps.values(): |
201 | continue | 292 | continue |
@@ -374,10 +465,9 @@ class RunQueueData: | |||
374 | self.rq = rq | 465 | self.rq = rq |
375 | self.warn_multi_bb = False | 466 | self.warn_multi_bb = False |
376 | 467 | ||
377 | self.stampwhitelist = cfgData.getVar("BB_STAMP_WHITELIST") or "" | 468 | self.multi_provider_allowed = (cfgData.getVar("BB_MULTI_PROVIDER_ALLOWED") or "").split() |
378 | self.multi_provider_whitelist = (cfgData.getVar("MULTI_PROVIDER_WHITELIST") or "").split() | 469 | self.setscene_ignore_tasks = get_setscene_enforce_ignore_tasks(cfgData, targets) |
379 | self.setscenewhitelist = get_setscene_enforce_whitelist(cfgData, targets) | 470 | self.setscene_ignore_tasks_checked = False |
380 | self.setscenewhitelist_checked = False | ||
381 | self.setscene_enforce = (cfgData.getVar('BB_SETSCENE_ENFORCE') == "1") | 471 | self.setscene_enforce = (cfgData.getVar('BB_SETSCENE_ENFORCE') == "1") |
382 | self.init_progress_reporter = bb.progress.DummyMultiStageProcessProgressReporter() | 472 | self.init_progress_reporter = bb.progress.DummyMultiStageProcessProgressReporter() |
383 | 473 | ||
@@ -387,7 +477,6 @@ class RunQueueData: | |||
387 | self.runtaskentries = {} | 477 | self.runtaskentries = {} |
388 | 478 | ||
389 | def runq_depends_names(self, ids): | 479 | def runq_depends_names(self, ids): |
390 | import re | ||
391 | ret = [] | 480 | ret = [] |
392 | for id in ids: | 481 | for id in ids: |
393 | nam = os.path.basename(id) | 482 | nam = os.path.basename(id) |
@@ -475,7 +564,7 @@ class RunQueueData: | |||
475 | msgs.append(" Task %s (dependent Tasks %s)\n" % (dep, self.runq_depends_names(self.runtaskentries[dep].depends))) | 564 | msgs.append(" Task %s (dependent Tasks %s)\n" % (dep, self.runq_depends_names(self.runtaskentries[dep].depends))) |
476 | msgs.append("\n") | 565 | msgs.append("\n") |
477 | if len(valid_chains) > 10: | 566 | if len(valid_chains) > 10: |
478 | msgs.append("Aborted dependency loops search after 10 matches.\n") | 567 | msgs.append("Halted dependency loops search after 10 matches.\n") |
479 | raise TooManyLoops | 568 | raise TooManyLoops |
480 | continue | 569 | continue |
481 | scan = False | 570 | scan = False |
@@ -536,7 +625,7 @@ class RunQueueData: | |||
536 | next_points.append(revdep) | 625 | next_points.append(revdep) |
537 | task_done[revdep] = True | 626 | task_done[revdep] = True |
538 | endpoints = next_points | 627 | endpoints = next_points |
539 | if len(next_points) == 0: | 628 | if not next_points: |
540 | break | 629 | break |
541 | 630 | ||
542 | # Circular dependency sanity check | 631 | # Circular dependency sanity check |
@@ -578,15 +667,18 @@ class RunQueueData: | |||
578 | 667 | ||
579 | found = False | 668 | found = False |
580 | for mc in self.taskData: | 669 | for mc in self.taskData: |
581 | if len(taskData[mc].taskentries) > 0: | 670 | if taskData[mc].taskentries: |
582 | found = True | 671 | found = True |
583 | break | 672 | break |
584 | if not found: | 673 | if not found: |
585 | # Nothing to do | 674 | # Nothing to do |
586 | return 0 | 675 | return 0 |
587 | 676 | ||
677 | bb.parse.siggen.setup_datacache(self.dataCaches) | ||
678 | |||
588 | self.init_progress_reporter.start() | 679 | self.init_progress_reporter.start() |
589 | self.init_progress_reporter.next_stage() | 680 | self.init_progress_reporter.next_stage() |
681 | bb.event.check_for_interrupts(self.cooker.data) | ||
590 | 682 | ||
591 | # Step A - Work out a list of tasks to run | 683 | # Step A - Work out a list of tasks to run |
592 | # | 684 | # |
@@ -632,9 +724,13 @@ class RunQueueData: | |||
632 | frommc = mcdependency[1] | 724 | frommc = mcdependency[1] |
633 | mcdep = mcdependency[2] | 725 | mcdep = mcdependency[2] |
634 | deptask = mcdependency[4] | 726 | deptask = mcdependency[4] |
727 | if mcdep not in taskData: | ||
728 | bb.fatal("Multiconfig '%s' is referenced in multiconfig dependency '%s' but not enabled in BBMULTICONFIG?" % (mcdep, dep)) | ||
635 | if mc == frommc: | 729 | if mc == frommc: |
636 | fn = taskData[mcdep].build_targets[pn][0] | 730 | fn = taskData[mcdep].build_targets[pn][0] |
637 | newdep = '%s:%s' % (fn,deptask) | 731 | newdep = '%s:%s' % (fn,deptask) |
732 | if newdep not in taskData[mcdep].taskentries: | ||
733 | bb.fatal("Task mcdepends on non-existent task %s" % (newdep)) | ||
638 | taskData[mc].taskentries[tid].tdepends.append(newdep) | 734 | taskData[mc].taskentries[tid].tdepends.append(newdep) |
639 | 735 | ||
640 | for mc in taskData: | 736 | for mc in taskData: |
@@ -733,6 +829,7 @@ class RunQueueData: | |||
733 | #self.dump_data() | 829 | #self.dump_data() |
734 | 830 | ||
735 | self.init_progress_reporter.next_stage() | 831 | self.init_progress_reporter.next_stage() |
832 | bb.event.check_for_interrupts(self.cooker.data) | ||
736 | 833 | ||
737 | # Resolve recursive 'recrdeptask' dependencies (Part B) | 834 | # Resolve recursive 'recrdeptask' dependencies (Part B) |
738 | # | 835 | # |
@@ -762,7 +859,7 @@ class RunQueueData: | |||
762 | # Find the dependency chain endpoints | 859 | # Find the dependency chain endpoints |
763 | endpoints = set() | 860 | endpoints = set() |
764 | for tid in self.runtaskentries: | 861 | for tid in self.runtaskentries: |
765 | if len(deps[tid]) == 0: | 862 | if not deps[tid]: |
766 | endpoints.add(tid) | 863 | endpoints.add(tid) |
767 | # Iterate the chains collating dependencies | 864 | # Iterate the chains collating dependencies |
768 | while endpoints: | 865 | while endpoints: |
@@ -773,11 +870,11 @@ class RunQueueData: | |||
773 | cumulativedeps[dep].update(cumulativedeps[tid]) | 870 | cumulativedeps[dep].update(cumulativedeps[tid]) |
774 | if tid in deps[dep]: | 871 | if tid in deps[dep]: |
775 | deps[dep].remove(tid) | 872 | deps[dep].remove(tid) |
776 | if len(deps[dep]) == 0: | 873 | if not deps[dep]: |
777 | next.add(dep) | 874 | next.add(dep) |
778 | endpoints = next | 875 | endpoints = next |
779 | #for tid in deps: | 876 | #for tid in deps: |
780 | # if len(deps[tid]) != 0: | 877 | # if deps[tid]: |
781 | # bb.warn("Sanity test failure, dependencies left for %s (%s)" % (tid, deps[tid])) | 878 | # bb.warn("Sanity test failure, dependencies left for %s (%s)" % (tid, deps[tid])) |
782 | 879 | ||
783 | # Loop here since recrdeptasks can depend upon other recrdeptasks and we have to | 880 | # Loop here since recrdeptasks can depend upon other recrdeptasks and we have to |
@@ -829,6 +926,7 @@ class RunQueueData: | |||
829 | self.runtaskentries[tid].depends.difference_update(recursivetasksselfref) | 926 | self.runtaskentries[tid].depends.difference_update(recursivetasksselfref) |
830 | 927 | ||
831 | self.init_progress_reporter.next_stage() | 928 | self.init_progress_reporter.next_stage() |
929 | bb.event.check_for_interrupts(self.cooker.data) | ||
832 | 930 | ||
833 | #self.dump_data() | 931 | #self.dump_data() |
834 | 932 | ||
@@ -867,7 +965,7 @@ class RunQueueData: | |||
867 | bb.debug(1, "Task %s is marked nostamp, cannot invalidate this task" % taskname) | 965 | bb.debug(1, "Task %s is marked nostamp, cannot invalidate this task" % taskname) |
868 | else: | 966 | else: |
869 | logger.verbose("Invalidate task %s, %s", taskname, fn) | 967 | logger.verbose("Invalidate task %s, %s", taskname, fn) |
870 | bb.parse.siggen.invalidate_task(taskname, self.dataCaches[mc], taskfn) | 968 | bb.parse.siggen.invalidate_task(taskname, taskfn) |
871 | 969 | ||
872 | self.target_tids = [] | 970 | self.target_tids = [] |
873 | for (mc, target, task, fn) in self.targets: | 971 | for (mc, target, task, fn) in self.targets: |
@@ -910,47 +1008,54 @@ class RunQueueData: | |||
910 | mark_active(tid, 1) | 1008 | mark_active(tid, 1) |
911 | 1009 | ||
912 | self.init_progress_reporter.next_stage() | 1010 | self.init_progress_reporter.next_stage() |
1011 | bb.event.check_for_interrupts(self.cooker.data) | ||
913 | 1012 | ||
914 | # Step C - Prune all inactive tasks | 1013 | # Step C - Prune all inactive tasks |
915 | # | 1014 | # |
916 | # Once all active tasks are marked, prune the ones we don't need. | 1015 | # Once all active tasks are marked, prune the ones we don't need. |
917 | 1016 | ||
918 | delcount = {} | ||
919 | for tid in list(self.runtaskentries.keys()): | ||
920 | if tid not in runq_build: | ||
921 | delcount[tid] = self.runtaskentries[tid] | ||
922 | del self.runtaskentries[tid] | ||
923 | |||
924 | # Handle --runall | 1017 | # Handle --runall |
925 | if self.cooker.configuration.runall: | 1018 | if self.cooker.configuration.runall: |
926 | # re-run the mark_active and then drop unused tasks from new list | 1019 | # re-run the mark_active and then drop unused tasks from new list |
927 | runq_build = {} | ||
928 | 1020 | ||
929 | for task in self.cooker.configuration.runall: | 1021 | runall_tids = set() |
930 | if not task.startswith("do_"): | 1022 | added = True |
931 | task = "do_{0}".format(task) | 1023 | while added: |
932 | runall_tids = set() | 1024 | reduced_tasklist = set(self.runtaskentries.keys()) |
933 | for tid in list(self.runtaskentries): | 1025 | for tid in list(self.runtaskentries.keys()): |
934 | wanttid = "{0}:{1}".format(fn_from_tid(tid), task) | 1026 | if tid not in runq_build: |
935 | if wanttid in delcount: | 1027 | reduced_tasklist.remove(tid) |
936 | self.runtaskentries[wanttid] = delcount[wanttid] | 1028 | runq_build = {} |
937 | if wanttid in self.runtaskentries: | ||
938 | runall_tids.add(wanttid) | ||
939 | |||
940 | for tid in list(runall_tids): | ||
941 | mark_active(tid,1) | ||
942 | if self.cooker.configuration.force: | ||
943 | invalidate_task(tid, False) | ||
944 | 1029 | ||
945 | for tid in list(self.runtaskentries.keys()): | 1030 | orig = runall_tids |
946 | if tid not in runq_build: | 1031 | runall_tids = set() |
947 | delcount[tid] = self.runtaskentries[tid] | 1032 | for task in self.cooker.configuration.runall: |
948 | del self.runtaskentries[tid] | 1033 | if not task.startswith("do_"): |
1034 | task = "do_{0}".format(task) | ||
1035 | for tid in reduced_tasklist: | ||
1036 | wanttid = "{0}:{1}".format(fn_from_tid(tid), task) | ||
1037 | if wanttid in self.runtaskentries: | ||
1038 | runall_tids.add(wanttid) | ||
1039 | |||
1040 | for tid in list(runall_tids): | ||
1041 | mark_active(tid, 1) | ||
1042 | self.target_tids.append(tid) | ||
1043 | if self.cooker.configuration.force: | ||
1044 | invalidate_task(tid, False) | ||
1045 | added = runall_tids - orig | ||
1046 | |||
1047 | delcount = set() | ||
1048 | for tid in list(self.runtaskentries.keys()): | ||
1049 | if tid not in runq_build: | ||
1050 | delcount.add(tid) | ||
1051 | del self.runtaskentries[tid] | ||
949 | 1052 | ||
950 | if len(self.runtaskentries) == 0: | 1053 | if self.cooker.configuration.runall: |
1054 | if not self.runtaskentries: | ||
951 | bb.msg.fatal("RunQueue", "Could not find any tasks with the tasknames %s to run within the recipes of the taskgraphs of the targets %s" % (str(self.cooker.configuration.runall), str(self.targets))) | 1055 | bb.msg.fatal("RunQueue", "Could not find any tasks with the tasknames %s to run within the recipes of the taskgraphs of the targets %s" % (str(self.cooker.configuration.runall), str(self.targets))) |
952 | 1056 | ||
953 | self.init_progress_reporter.next_stage() | 1057 | self.init_progress_reporter.next_stage() |
1058 | bb.event.check_for_interrupts(self.cooker.data) | ||
954 | 1059 | ||
955 | # Handle runonly | 1060 | # Handle runonly |
956 | if self.cooker.configuration.runonly: | 1061 | if self.cooker.configuration.runonly: |
@@ -960,19 +1065,19 @@ class RunQueueData: | |||
960 | for task in self.cooker.configuration.runonly: | 1065 | for task in self.cooker.configuration.runonly: |
961 | if not task.startswith("do_"): | 1066 | if not task.startswith("do_"): |
962 | task = "do_{0}".format(task) | 1067 | task = "do_{0}".format(task) |
963 | runonly_tids = { k: v for k, v in self.runtaskentries.items() if taskname_from_tid(k) == task } | 1068 | runonly_tids = [k for k in self.runtaskentries.keys() if taskname_from_tid(k) == task] |
964 | 1069 | ||
965 | for tid in list(runonly_tids): | 1070 | for tid in runonly_tids: |
966 | mark_active(tid,1) | 1071 | mark_active(tid, 1) |
967 | if self.cooker.configuration.force: | 1072 | if self.cooker.configuration.force: |
968 | invalidate_task(tid, False) | 1073 | invalidate_task(tid, False) |
969 | 1074 | ||
970 | for tid in list(self.runtaskentries.keys()): | 1075 | for tid in list(self.runtaskentries.keys()): |
971 | if tid not in runq_build: | 1076 | if tid not in runq_build: |
972 | delcount[tid] = self.runtaskentries[tid] | 1077 | delcount.add(tid) |
973 | del self.runtaskentries[tid] | 1078 | del self.runtaskentries[tid] |
974 | 1079 | ||
975 | if len(self.runtaskentries) == 0: | 1080 | if not self.runtaskentries: |
976 | bb.msg.fatal("RunQueue", "Could not find any tasks with the tasknames %s to run within the taskgraphs of the targets %s" % (str(self.cooker.configuration.runonly), str(self.targets))) | 1081 | bb.msg.fatal("RunQueue", "Could not find any tasks with the tasknames %s to run within the taskgraphs of the targets %s" % (str(self.cooker.configuration.runonly), str(self.targets))) |
977 | 1082 | ||
978 | # | 1083 | # |
@@ -980,8 +1085,8 @@ class RunQueueData: | |||
980 | # | 1085 | # |
981 | 1086 | ||
982 | # Check to make sure we still have tasks to run | 1087 | # Check to make sure we still have tasks to run |
983 | if len(self.runtaskentries) == 0: | 1088 | if not self.runtaskentries: |
984 | if not taskData[''].abort: | 1089 | if not taskData[''].halt: |
985 | bb.msg.fatal("RunQueue", "All buildable tasks have been run but the build is incomplete (--continue mode). Errors for the tasks that failed will have been printed above.") | 1090 | bb.msg.fatal("RunQueue", "All buildable tasks have been run but the build is incomplete (--continue mode). Errors for the tasks that failed will have been printed above.") |
986 | else: | 1091 | else: |
987 | bb.msg.fatal("RunQueue", "No active tasks and not in --continue mode?! Please report this bug.") | 1092 | bb.msg.fatal("RunQueue", "No active tasks and not in --continue mode?! Please report this bug.") |
@@ -991,6 +1096,7 @@ class RunQueueData: | |||
991 | logger.verbose("Assign Weightings") | 1096 | logger.verbose("Assign Weightings") |
992 | 1097 | ||
993 | self.init_progress_reporter.next_stage() | 1098 | self.init_progress_reporter.next_stage() |
1099 | bb.event.check_for_interrupts(self.cooker.data) | ||
994 | 1100 | ||
995 | # Generate a list of reverse dependencies to ease future calculations | 1101 | # Generate a list of reverse dependencies to ease future calculations |
996 | for tid in self.runtaskentries: | 1102 | for tid in self.runtaskentries: |
@@ -998,13 +1104,14 @@ class RunQueueData: | |||
998 | self.runtaskentries[dep].revdeps.add(tid) | 1104 | self.runtaskentries[dep].revdeps.add(tid) |
999 | 1105 | ||
1000 | self.init_progress_reporter.next_stage() | 1106 | self.init_progress_reporter.next_stage() |
1107 | bb.event.check_for_interrupts(self.cooker.data) | ||
1001 | 1108 | ||
1002 | # Identify tasks at the end of dependency chains | 1109 | # Identify tasks at the end of dependency chains |
1003 | # Error on circular dependency loops (length two) | 1110 | # Error on circular dependency loops (length two) |
1004 | endpoints = [] | 1111 | endpoints = [] |
1005 | for tid in self.runtaskentries: | 1112 | for tid in self.runtaskentries: |
1006 | revdeps = self.runtaskentries[tid].revdeps | 1113 | revdeps = self.runtaskentries[tid].revdeps |
1007 | if len(revdeps) == 0: | 1114 | if not revdeps: |
1008 | endpoints.append(tid) | 1115 | endpoints.append(tid) |
1009 | for dep in revdeps: | 1116 | for dep in revdeps: |
1010 | if dep in self.runtaskentries[tid].depends: | 1117 | if dep in self.runtaskentries[tid].depends: |
@@ -1014,12 +1121,14 @@ class RunQueueData: | |||
1014 | logger.verbose("Compute totals (have %s endpoint(s))", len(endpoints)) | 1121 | logger.verbose("Compute totals (have %s endpoint(s))", len(endpoints)) |
1015 | 1122 | ||
1016 | self.init_progress_reporter.next_stage() | 1123 | self.init_progress_reporter.next_stage() |
1124 | bb.event.check_for_interrupts(self.cooker.data) | ||
1017 | 1125 | ||
1018 | # Calculate task weights | 1126 | # Calculate task weights |
1019 | # Check of higher length circular dependencies | 1127 | # Check of higher length circular dependencies |
1020 | self.runq_weight = self.calculate_task_weights(endpoints) | 1128 | self.runq_weight = self.calculate_task_weights(endpoints) |
1021 | 1129 | ||
1022 | self.init_progress_reporter.next_stage() | 1130 | self.init_progress_reporter.next_stage() |
1131 | bb.event.check_for_interrupts(self.cooker.data) | ||
1023 | 1132 | ||
1024 | # Sanity Check - Check for multiple tasks building the same provider | 1133 | # Sanity Check - Check for multiple tasks building the same provider |
1025 | for mc in self.dataCaches: | 1134 | for mc in self.dataCaches: |
@@ -1040,7 +1149,7 @@ class RunQueueData: | |||
1040 | for prov in prov_list: | 1149 | for prov in prov_list: |
1041 | if len(prov_list[prov]) < 2: | 1150 | if len(prov_list[prov]) < 2: |
1042 | continue | 1151 | continue |
1043 | if prov in self.multi_provider_whitelist: | 1152 | if prov in self.multi_provider_allowed: |
1044 | continue | 1153 | continue |
1045 | seen_pn = [] | 1154 | seen_pn = [] |
1046 | # If two versions of the same PN are being built its fatal, we don't support it. | 1155 | # If two versions of the same PN are being built its fatal, we don't support it. |
@@ -1050,12 +1159,12 @@ class RunQueueData: | |||
1050 | seen_pn.append(pn) | 1159 | seen_pn.append(pn) |
1051 | else: | 1160 | else: |
1052 | bb.fatal("Multiple versions of %s are due to be built (%s). Only one version of a given PN should be built in any given build. You likely need to set PREFERRED_VERSION_%s to select the correct version or don't depend on multiple versions." % (pn, " ".join(prov_list[prov]), pn)) | 1161 | bb.fatal("Multiple versions of %s are due to be built (%s). Only one version of a given PN should be built in any given build. You likely need to set PREFERRED_VERSION_%s to select the correct version or don't depend on multiple versions." % (pn, " ".join(prov_list[prov]), pn)) |
1053 | msg = "Multiple .bb files are due to be built which each provide %s:\n %s" % (prov, "\n ".join(prov_list[prov])) | 1162 | msgs = ["Multiple .bb files are due to be built which each provide %s:\n %s" % (prov, "\n ".join(prov_list[prov]))] |
1054 | # | 1163 | # |
1055 | # Construct a list of things which uniquely depend on each provider | 1164 | # Construct a list of things which uniquely depend on each provider |
1056 | # since this may help the user figure out which dependency is triggering this warning | 1165 | # since this may help the user figure out which dependency is triggering this warning |
1057 | # | 1166 | # |
1058 | msg += "\nA list of tasks depending on these providers is shown and may help explain where the dependency comes from." | 1167 | msgs.append("\nA list of tasks depending on these providers is shown and may help explain where the dependency comes from.") |
1059 | deplist = {} | 1168 | deplist = {} |
1060 | commondeps = None | 1169 | commondeps = None |
1061 | for provfn in prov_list[prov]: | 1170 | for provfn in prov_list[prov]: |
@@ -1075,12 +1184,12 @@ class RunQueueData: | |||
1075 | commondeps &= deps | 1184 | commondeps &= deps |
1076 | deplist[provfn] = deps | 1185 | deplist[provfn] = deps |
1077 | for provfn in deplist: | 1186 | for provfn in deplist: |
1078 | msg += "\n%s has unique dependees:\n %s" % (provfn, "\n ".join(deplist[provfn] - commondeps)) | 1187 | msgs.append("\n%s has unique dependees:\n %s" % (provfn, "\n ".join(deplist[provfn] - commondeps))) |
1079 | # | 1188 | # |
1080 | # Construct a list of provides and runtime providers for each recipe | 1189 | # Construct a list of provides and runtime providers for each recipe |
1081 | # (rprovides has to cover RPROVIDES, PACKAGES, PACKAGES_DYNAMIC) | 1190 | # (rprovides has to cover RPROVIDES, PACKAGES, PACKAGES_DYNAMIC) |
1082 | # | 1191 | # |
1083 | msg += "\nIt could be that one recipe provides something the other doesn't and should. The following provider and runtime provider differences may be helpful." | 1192 | msgs.append("\nIt could be that one recipe provides something the other doesn't and should. The following provider and runtime provider differences may be helpful.") |
1084 | provide_results = {} | 1193 | provide_results = {} |
1085 | rprovide_results = {} | 1194 | rprovide_results = {} |
1086 | commonprovs = None | 1195 | commonprovs = None |
@@ -1107,30 +1216,20 @@ class RunQueueData: | |||
1107 | else: | 1216 | else: |
1108 | commonrprovs &= rprovides | 1217 | commonrprovs &= rprovides |
1109 | rprovide_results[provfn] = rprovides | 1218 | rprovide_results[provfn] = rprovides |
1110 | #msg += "\nCommon provides:\n %s" % ("\n ".join(commonprovs)) | 1219 | #msgs.append("\nCommon provides:\n %s" % ("\n ".join(commonprovs))) |
1111 | #msg += "\nCommon rprovides:\n %s" % ("\n ".join(commonrprovs)) | 1220 | #msgs.append("\nCommon rprovides:\n %s" % ("\n ".join(commonrprovs))) |
1112 | for provfn in prov_list[prov]: | 1221 | for provfn in prov_list[prov]: |
1113 | msg += "\n%s has unique provides:\n %s" % (provfn, "\n ".join(provide_results[provfn] - commonprovs)) | 1222 | msgs.append("\n%s has unique provides:\n %s" % (provfn, "\n ".join(provide_results[provfn] - commonprovs))) |
1114 | msg += "\n%s has unique rprovides:\n %s" % (provfn, "\n ".join(rprovide_results[provfn] - commonrprovs)) | 1223 | msgs.append("\n%s has unique rprovides:\n %s" % (provfn, "\n ".join(rprovide_results[provfn] - commonrprovs))) |
1115 | 1224 | ||
1116 | if self.warn_multi_bb: | 1225 | if self.warn_multi_bb: |
1117 | logger.verbnote(msg) | 1226 | logger.verbnote("".join(msgs)) |
1118 | else: | 1227 | else: |
1119 | logger.error(msg) | 1228 | logger.error("".join(msgs)) |
1120 | 1229 | ||
1121 | self.init_progress_reporter.next_stage() | 1230 | self.init_progress_reporter.next_stage() |
1122 | |||
1123 | # Create a whitelist usable by the stamp checks | ||
1124 | self.stampfnwhitelist = {} | ||
1125 | for mc in self.taskData: | ||
1126 | self.stampfnwhitelist[mc] = [] | ||
1127 | for entry in self.stampwhitelist.split(): | ||
1128 | if entry not in self.taskData[mc].build_targets: | ||
1129 | continue | ||
1130 | fn = self.taskData.build_targets[entry][0] | ||
1131 | self.stampfnwhitelist[mc].append(fn) | ||
1132 | |||
1133 | self.init_progress_reporter.next_stage() | 1231 | self.init_progress_reporter.next_stage() |
1232 | bb.event.check_for_interrupts(self.cooker.data) | ||
1134 | 1233 | ||
1135 | # Iterate over the task list looking for tasks with a 'setscene' function | 1234 | # Iterate over the task list looking for tasks with a 'setscene' function |
1136 | self.runq_setscene_tids = set() | 1235 | self.runq_setscene_tids = set() |
@@ -1143,6 +1242,7 @@ class RunQueueData: | |||
1143 | self.runq_setscene_tids.add(tid) | 1242 | self.runq_setscene_tids.add(tid) |
1144 | 1243 | ||
1145 | self.init_progress_reporter.next_stage() | 1244 | self.init_progress_reporter.next_stage() |
1245 | bb.event.check_for_interrupts(self.cooker.data) | ||
1146 | 1246 | ||
1147 | # Invalidate task if force mode active | 1247 | # Invalidate task if force mode active |
1148 | if self.cooker.configuration.force: | 1248 | if self.cooker.configuration.force: |
@@ -1159,6 +1259,7 @@ class RunQueueData: | |||
1159 | invalidate_task(fn + ":" + st, True) | 1259 | invalidate_task(fn + ":" + st, True) |
1160 | 1260 | ||
1161 | self.init_progress_reporter.next_stage() | 1261 | self.init_progress_reporter.next_stage() |
1262 | bb.event.check_for_interrupts(self.cooker.data) | ||
1162 | 1263 | ||
1163 | # Create and print to the logs a virtual/xxxx -> PN (fn) table | 1264 | # Create and print to the logs a virtual/xxxx -> PN (fn) table |
1164 | for mc in taskData: | 1265 | for mc in taskData: |
@@ -1171,30 +1272,45 @@ class RunQueueData: | |||
1171 | bb.parse.siggen.tasks_resolved(virtmap, virtpnmap, self.dataCaches[mc]) | 1272 | bb.parse.siggen.tasks_resolved(virtmap, virtpnmap, self.dataCaches[mc]) |
1172 | 1273 | ||
1173 | self.init_progress_reporter.next_stage() | 1274 | self.init_progress_reporter.next_stage() |
1275 | bb.event.check_for_interrupts(self.cooker.data) | ||
1174 | 1276 | ||
1175 | bb.parse.siggen.set_setscene_tasks(self.runq_setscene_tids) | 1277 | bb.parse.siggen.set_setscene_tasks(self.runq_setscene_tids) |
1176 | 1278 | ||
1279 | starttime = time.time() | ||
1280 | lasttime = starttime | ||
1281 | |||
1177 | # Iterate over the task list and call into the siggen code | 1282 | # Iterate over the task list and call into the siggen code |
1178 | dealtwith = set() | 1283 | dealtwith = set() |
1179 | todeal = set(self.runtaskentries) | 1284 | todeal = set(self.runtaskentries) |
1180 | while len(todeal) > 0: | 1285 | while todeal: |
1286 | ready = set() | ||
1181 | for tid in todeal.copy(): | 1287 | for tid in todeal.copy(): |
1182 | if len(self.runtaskentries[tid].depends - dealtwith) == 0: | 1288 | if not (self.runtaskentries[tid].depends - dealtwith): |
1183 | dealtwith.add(tid) | 1289 | self.runtaskentries[tid].taskhash_deps = bb.parse.siggen.prep_taskhash(tid, self.runtaskentries[tid].depends, self.dataCaches) |
1184 | todeal.remove(tid) | 1290 | # get_taskhash for a given tid *must* be called before get_unihash* below |
1185 | self.prepare_task_hash(tid) | 1291 | self.runtaskentries[tid].hash = bb.parse.siggen.get_taskhash(tid, self.runtaskentries[tid].depends, self.dataCaches) |
1292 | ready.add(tid) | ||
1293 | unihashes = bb.parse.siggen.get_unihashes(ready) | ||
1294 | for tid in ready: | ||
1295 | dealtwith.add(tid) | ||
1296 | todeal.remove(tid) | ||
1297 | self.runtaskentries[tid].unihash = unihashes[tid] | ||
1298 | |||
1299 | bb.event.check_for_interrupts(self.cooker.data) | ||
1300 | |||
1301 | if time.time() > (lasttime + 30): | ||
1302 | lasttime = time.time() | ||
1303 | hashequiv_logger.verbose("Initial setup loop progress: %s of %s in %s" % (len(todeal), len(self.runtaskentries), lasttime - starttime)) | ||
1304 | |||
1305 | endtime = time.time() | ||
1306 | if (endtime-starttime > 60): | ||
1307 | hashequiv_logger.verbose("Initial setup loop took: %s" % (endtime-starttime)) | ||
1186 | 1308 | ||
1187 | bb.parse.siggen.writeout_file_checksum_cache() | 1309 | bb.parse.siggen.writeout_file_checksum_cache() |
1188 | 1310 | ||
1189 | #self.dump_data() | 1311 | #self.dump_data() |
1190 | return len(self.runtaskentries) | 1312 | return len(self.runtaskentries) |
1191 | 1313 | ||
1192 | def prepare_task_hash(self, tid): | ||
1193 | dc = bb.parse.siggen.get_data_caches(self.dataCaches, mc_from_tid(tid)) | ||
1194 | bb.parse.siggen.prep_taskhash(tid, self.runtaskentries[tid].depends, dc) | ||
1195 | self.runtaskentries[tid].hash = bb.parse.siggen.get_taskhash(tid, self.runtaskentries[tid].depends, dc) | ||
1196 | self.runtaskentries[tid].unihash = bb.parse.siggen.get_unihash(tid) | ||
1197 | |||
1198 | def dump_data(self): | 1314 | def dump_data(self): |
1199 | """ | 1315 | """ |
1200 | Dump some debug information on the internal data structures | 1316 | Dump some debug information on the internal data structures |
@@ -1218,7 +1334,6 @@ class RunQueue: | |||
1218 | self.cfgData = cfgData | 1334 | self.cfgData = cfgData |
1219 | self.rqdata = RunQueueData(self, cooker, cfgData, dataCaches, taskData, targets) | 1335 | self.rqdata = RunQueueData(self, cooker, cfgData, dataCaches, taskData, targets) |
1220 | 1336 | ||
1221 | self.stamppolicy = cfgData.getVar("BB_STAMP_POLICY") or "perfile" | ||
1222 | self.hashvalidate = cfgData.getVar("BB_HASHCHECK_FUNCTION") or None | 1337 | self.hashvalidate = cfgData.getVar("BB_HASHCHECK_FUNCTION") or None |
1223 | self.depvalidate = cfgData.getVar("BB_SETSCENE_DEPVALID") or None | 1338 | self.depvalidate = cfgData.getVar("BB_SETSCENE_DEPVALID") or None |
1224 | 1339 | ||
@@ -1237,30 +1352,40 @@ class RunQueue: | |||
1237 | self.worker = {} | 1352 | self.worker = {} |
1238 | self.fakeworker = {} | 1353 | self.fakeworker = {} |
1239 | 1354 | ||
1355 | @staticmethod | ||
1356 | def send_pickled_data(worker, data, name): | ||
1357 | msg = bytearray() | ||
1358 | msg.extend(b"<" + name.encode() + b">") | ||
1359 | pickled_data = pickle.dumps(data) | ||
1360 | msg.extend(len(pickled_data).to_bytes(4, 'big')) | ||
1361 | msg.extend(pickled_data) | ||
1362 | msg.extend(b"</" + name.encode() + b">") | ||
1363 | worker.stdin.write(msg) | ||
1364 | |||
1240 | def _start_worker(self, mc, fakeroot = False, rqexec = None): | 1365 | def _start_worker(self, mc, fakeroot = False, rqexec = None): |
1241 | logger.debug("Starting bitbake-worker") | 1366 | logger.debug("Starting bitbake-worker") |
1242 | magic = "decafbad" | 1367 | magic = "decafbad" |
1243 | if self.cooker.configuration.profile: | 1368 | if self.cooker.configuration.profile: |
1244 | magic = "decafbadbad" | 1369 | magic = "decafbadbad" |
1370 | fakerootlogs = None | ||
1371 | |||
1372 | workerscript = os.path.realpath(os.path.dirname(__file__) + "/../../bin/bitbake-worker") | ||
1245 | if fakeroot: | 1373 | if fakeroot: |
1246 | magic = magic + "beef" | 1374 | magic = magic + "beef" |
1247 | mcdata = self.cooker.databuilder.mcdata[mc] | 1375 | mcdata = self.cooker.databuilder.mcdata[mc] |
1248 | fakerootcmd = shlex.split(mcdata.getVar("FAKEROOTCMD")) | 1376 | fakerootcmd = shlex.split(mcdata.getVar("FAKEROOTCMD")) |
1249 | fakerootenv = (mcdata.getVar("FAKEROOTBASEENV") or "").split() | 1377 | fakerootenv = (mcdata.getVar("FAKEROOTBASEENV") or "").split() |
1250 | env = os.environ.copy() | 1378 | env = os.environ.copy() |
1251 | for key, value in (var.split('=') for var in fakerootenv): | 1379 | for key, value in (var.split('=',1) for var in fakerootenv): |
1252 | env[key] = value | 1380 | env[key] = value |
1253 | worker = subprocess.Popen(fakerootcmd + ["bitbake-worker", magic], stdout=subprocess.PIPE, stdin=subprocess.PIPE, env=env) | 1381 | worker = subprocess.Popen(fakerootcmd + [sys.executable, workerscript, magic], stdout=subprocess.PIPE, stdin=subprocess.PIPE, env=env) |
1382 | fakerootlogs = self.rqdata.dataCaches[mc].fakerootlogs | ||
1254 | else: | 1383 | else: |
1255 | worker = subprocess.Popen(["bitbake-worker", magic], stdout=subprocess.PIPE, stdin=subprocess.PIPE) | 1384 | worker = subprocess.Popen([sys.executable, workerscript, magic], stdout=subprocess.PIPE, stdin=subprocess.PIPE) |
1256 | bb.utils.nonblockingfd(worker.stdout) | 1385 | bb.utils.nonblockingfd(worker.stdout) |
1257 | workerpipe = runQueuePipe(worker.stdout, None, self.cfgData, self, rqexec) | 1386 | workerpipe = runQueuePipe(worker.stdout, None, self.cfgData, self, rqexec, fakerootlogs=fakerootlogs) |
1258 | 1387 | ||
1259 | workerdata = { | 1388 | workerdata = { |
1260 | "taskdeps" : self.rqdata.dataCaches[mc].task_deps, | ||
1261 | "fakerootenv" : self.rqdata.dataCaches[mc].fakerootenv, | ||
1262 | "fakerootdirs" : self.rqdata.dataCaches[mc].fakerootdirs, | ||
1263 | "fakerootnoenv" : self.rqdata.dataCaches[mc].fakerootnoenv, | ||
1264 | "sigdata" : bb.parse.siggen.get_taskdata(), | 1389 | "sigdata" : bb.parse.siggen.get_taskdata(), |
1265 | "logdefaultlevel" : bb.msg.loggerDefaultLogLevel, | 1390 | "logdefaultlevel" : bb.msg.loggerDefaultLogLevel, |
1266 | "build_verbose_shell" : self.cooker.configuration.build_verbose_shell, | 1391 | "build_verbose_shell" : self.cooker.configuration.build_verbose_shell, |
@@ -1274,9 +1399,9 @@ class RunQueue: | |||
1274 | "umask" : self.cfgData.getVar("BB_DEFAULT_UMASK"), | 1399 | "umask" : self.cfgData.getVar("BB_DEFAULT_UMASK"), |
1275 | } | 1400 | } |
1276 | 1401 | ||
1277 | worker.stdin.write(b"<cookerconfig>" + pickle.dumps(self.cooker.configuration) + b"</cookerconfig>") | 1402 | RunQueue.send_pickled_data(worker, self.cooker.configuration, "cookerconfig") |
1278 | worker.stdin.write(b"<extraconfigdata>" + pickle.dumps(self.cooker.extraconfigdata) + b"</extraconfigdata>") | 1403 | RunQueue.send_pickled_data(worker, self.cooker.extraconfigdata, "extraconfigdata") |
1279 | worker.stdin.write(b"<workerdata>" + pickle.dumps(workerdata) + b"</workerdata>") | 1404 | RunQueue.send_pickled_data(worker, workerdata, "workerdata") |
1280 | worker.stdin.flush() | 1405 | worker.stdin.flush() |
1281 | 1406 | ||
1282 | return RunQueueWorker(worker, workerpipe) | 1407 | return RunQueueWorker(worker, workerpipe) |
@@ -1286,7 +1411,7 @@ class RunQueue: | |||
1286 | return | 1411 | return |
1287 | logger.debug("Teardown for bitbake-worker") | 1412 | logger.debug("Teardown for bitbake-worker") |
1288 | try: | 1413 | try: |
1289 | worker.process.stdin.write(b"<quit></quit>") | 1414 | RunQueue.send_pickled_data(worker.process, b"", "quit") |
1290 | worker.process.stdin.flush() | 1415 | worker.process.stdin.flush() |
1291 | worker.process.stdin.close() | 1416 | worker.process.stdin.close() |
1292 | except IOError: | 1417 | except IOError: |
@@ -1298,12 +1423,12 @@ class RunQueue: | |||
1298 | continue | 1423 | continue |
1299 | worker.pipe.close() | 1424 | worker.pipe.close() |
1300 | 1425 | ||
1301 | def start_worker(self): | 1426 | def start_worker(self, rqexec): |
1302 | if self.worker: | 1427 | if self.worker: |
1303 | self.teardown_workers() | 1428 | self.teardown_workers() |
1304 | self.teardown = False | 1429 | self.teardown = False |
1305 | for mc in self.rqdata.dataCaches: | 1430 | for mc in self.rqdata.dataCaches: |
1306 | self.worker[mc] = self._start_worker(mc) | 1431 | self.worker[mc] = self._start_worker(mc, False, rqexec) |
1307 | 1432 | ||
1308 | def start_fakeworker(self, rqexec, mc): | 1433 | def start_fakeworker(self, rqexec, mc): |
1309 | if not mc in self.fakeworker: | 1434 | if not mc in self.fakeworker: |
@@ -1345,15 +1470,7 @@ class RunQueue: | |||
1345 | if taskname is None: | 1470 | if taskname is None: |
1346 | taskname = tn | 1471 | taskname = tn |
1347 | 1472 | ||
1348 | if self.stamppolicy == "perfile": | 1473 | stampfile = bb.parse.siggen.stampfile_mcfn(taskname, taskfn) |
1349 | fulldeptree = False | ||
1350 | else: | ||
1351 | fulldeptree = True | ||
1352 | stampwhitelist = [] | ||
1353 | if self.stamppolicy == "whitelist": | ||
1354 | stampwhitelist = self.rqdata.stampfnwhitelist[mc] | ||
1355 | |||
1356 | stampfile = bb.build.stampfile(taskname, self.rqdata.dataCaches[mc], taskfn) | ||
1357 | 1474 | ||
1358 | # If the stamp is missing, it's not current | 1475 | # If the stamp is missing, it's not current |
1359 | if not os.access(stampfile, os.F_OK): | 1476 | if not os.access(stampfile, os.F_OK): |
@@ -1365,7 +1482,7 @@ class RunQueue: | |||
1365 | logger.debug2("%s.%s is nostamp\n", fn, taskname) | 1482 | logger.debug2("%s.%s is nostamp\n", fn, taskname) |
1366 | return False | 1483 | return False |
1367 | 1484 | ||
1368 | if taskname != "do_setscene" and taskname.endswith("_setscene"): | 1485 | if taskname.endswith("_setscene"): |
1369 | return True | 1486 | return True |
1370 | 1487 | ||
1371 | if cache is None: | 1488 | if cache is None: |
@@ -1376,15 +1493,15 @@ class RunQueue: | |||
1376 | for dep in self.rqdata.runtaskentries[tid].depends: | 1493 | for dep in self.rqdata.runtaskentries[tid].depends: |
1377 | if iscurrent: | 1494 | if iscurrent: |
1378 | (mc2, fn2, taskname2, taskfn2) = split_tid_mcfn(dep) | 1495 | (mc2, fn2, taskname2, taskfn2) = split_tid_mcfn(dep) |
1379 | stampfile2 = bb.build.stampfile(taskname2, self.rqdata.dataCaches[mc2], taskfn2) | 1496 | stampfile2 = bb.parse.siggen.stampfile_mcfn(taskname2, taskfn2) |
1380 | stampfile3 = bb.build.stampfile(taskname2 + "_setscene", self.rqdata.dataCaches[mc2], taskfn2) | 1497 | stampfile3 = bb.parse.siggen.stampfile_mcfn(taskname2 + "_setscene", taskfn2) |
1381 | t2 = get_timestamp(stampfile2) | 1498 | t2 = get_timestamp(stampfile2) |
1382 | t3 = get_timestamp(stampfile3) | 1499 | t3 = get_timestamp(stampfile3) |
1383 | if t3 and not t2: | 1500 | if t3 and not t2: |
1384 | continue | 1501 | continue |
1385 | if t3 and t3 > t2: | 1502 | if t3 and t3 > t2: |
1386 | continue | 1503 | continue |
1387 | if fn == fn2 or (fulldeptree and fn2 not in stampwhitelist): | 1504 | if fn == fn2: |
1388 | if not t2: | 1505 | if not t2: |
1389 | logger.debug2('Stampfile %s does not exist', stampfile2) | 1506 | logger.debug2('Stampfile %s does not exist', stampfile2) |
1390 | iscurrent = False | 1507 | iscurrent = False |
@@ -1434,10 +1551,11 @@ class RunQueue: | |||
1434 | """ | 1551 | """ |
1435 | Run the tasks in a queue prepared by rqdata.prepare() | 1552 | Run the tasks in a queue prepared by rqdata.prepare() |
1436 | Upon failure, optionally try to recover the build using any alternate providers | 1553 | Upon failure, optionally try to recover the build using any alternate providers |
1437 | (if the abort on failure configuration option isn't set) | 1554 | (if the halt on failure configuration option isn't set) |
1438 | """ | 1555 | """ |
1439 | 1556 | ||
1440 | retval = True | 1557 | retval = True |
1558 | bb.event.check_for_interrupts(self.cooker.data) | ||
1441 | 1559 | ||
1442 | if self.state is runQueuePrepare: | 1560 | if self.state is runQueuePrepare: |
1443 | # NOTE: if you add, remove or significantly refactor the stages of this | 1561 | # NOTE: if you add, remove or significantly refactor the stages of this |
@@ -1466,31 +1584,37 @@ class RunQueue: | |||
1466 | 1584 | ||
1467 | if not self.dm_event_handler_registered: | 1585 | if not self.dm_event_handler_registered: |
1468 | res = bb.event.register(self.dm_event_handler_name, | 1586 | res = bb.event.register(self.dm_event_handler_name, |
1469 | lambda x: self.dm.check(self) if self.state in [runQueueRunning, runQueueCleanUp] else False, | 1587 | lambda x, y: self.dm.check(self) if self.state in [runQueueRunning, runQueueCleanUp] else False, |
1470 | ('bb.event.HeartbeatEvent',), data=self.cfgData) | 1588 | ('bb.event.HeartbeatEvent',), data=self.cfgData) |
1471 | self.dm_event_handler_registered = True | 1589 | self.dm_event_handler_registered = True |
1472 | 1590 | ||
1473 | dump = self.cooker.configuration.dump_signatures | 1591 | self.rqdata.init_progress_reporter.next_stage() |
1474 | if dump: | 1592 | self.rqexe = RunQueueExecute(self) |
1593 | |||
1594 | dumpsigs = self.cooker.configuration.dump_signatures | ||
1595 | if dumpsigs: | ||
1475 | self.rqdata.init_progress_reporter.finish() | 1596 | self.rqdata.init_progress_reporter.finish() |
1476 | if 'printdiff' in dump: | 1597 | if 'printdiff' in dumpsigs: |
1477 | invalidtasks = self.print_diffscenetasks() | 1598 | self.invalidtasks_dump = self.print_diffscenetasks() |
1478 | self.dump_signatures(dump) | 1599 | self.state = runQueueDumpSigs |
1479 | if 'printdiff' in dump: | 1600 | |
1480 | self.write_diffscenetasks(invalidtasks) | 1601 | if self.state is runQueueDumpSigs: |
1602 | dumpsigs = self.cooker.configuration.dump_signatures | ||
1603 | retval = self.dump_signatures(dumpsigs) | ||
1604 | if retval is False: | ||
1605 | if 'printdiff' in dumpsigs: | ||
1606 | self.write_diffscenetasks(self.invalidtasks_dump) | ||
1481 | self.state = runQueueComplete | 1607 | self.state = runQueueComplete |
1482 | 1608 | ||
1483 | if self.state is runQueueSceneInit: | 1609 | if self.state is runQueueSceneInit: |
1484 | self.rqdata.init_progress_reporter.next_stage() | 1610 | self.start_worker(self.rqexe) |
1485 | self.start_worker() | 1611 | self.rqdata.init_progress_reporter.finish() |
1486 | self.rqdata.init_progress_reporter.next_stage() | ||
1487 | self.rqexe = RunQueueExecute(self) | ||
1488 | 1612 | ||
1489 | # If we don't have any setscene functions, skip execution | 1613 | # If we don't have any setscene functions, skip execution |
1490 | if len(self.rqdata.runq_setscene_tids) == 0: | 1614 | if not self.rqdata.runq_setscene_tids: |
1491 | logger.info('No setscene tasks') | 1615 | logger.info('No setscene tasks') |
1492 | for tid in self.rqdata.runtaskentries: | 1616 | for tid in self.rqdata.runtaskentries: |
1493 | if len(self.rqdata.runtaskentries[tid].depends) == 0: | 1617 | if not self.rqdata.runtaskentries[tid].depends: |
1494 | self.rqexe.setbuildable(tid) | 1618 | self.rqexe.setbuildable(tid) |
1495 | self.rqexe.tasks_notcovered.add(tid) | 1619 | self.rqexe.tasks_notcovered.add(tid) |
1496 | self.rqexe.sqdone = True | 1620 | self.rqexe.sqdone = True |
@@ -1563,43 +1687,62 @@ class RunQueue: | |||
1563 | else: | 1687 | else: |
1564 | self.rqexe.finish() | 1688 | self.rqexe.finish() |
1565 | 1689 | ||
1566 | def rq_dump_sigfn(self, fn, options): | 1690 | def _rq_dump_sigtid(self, tids): |
1567 | bb_cache = bb.cache.NoCache(self.cooker.databuilder) | 1691 | for tid in tids: |
1568 | mc = bb.runqueue.mc_from_tid(fn) | 1692 | (mc, fn, taskname, taskfn) = split_tid_mcfn(tid) |
1569 | the_data = bb_cache.loadDataFull(fn, self.cooker.collections[mc].get_file_appends(fn)) | 1693 | dataCaches = self.rqdata.dataCaches |
1570 | siggen = bb.parse.siggen | 1694 | bb.parse.siggen.dump_sigtask(taskfn, taskname, dataCaches[mc].stamp[taskfn], True) |
1571 | dataCaches = self.rqdata.dataCaches | ||
1572 | siggen.dump_sigfn(fn, dataCaches, options) | ||
1573 | 1695 | ||
1574 | def dump_signatures(self, options): | 1696 | def dump_signatures(self, options): |
1575 | fns = set() | 1697 | if not hasattr(self, "dumpsigs_launched"): |
1576 | bb.note("Reparsing files to collect dependency data") | 1698 | if bb.cooker.CookerFeatures.RECIPE_SIGGEN_INFO not in self.cooker.featureset: |
1699 | bb.fatal("The dump signatures functionality needs the RECIPE_SIGGEN_INFO feature enabled") | ||
1577 | 1700 | ||
1578 | for tid in self.rqdata.runtaskentries: | 1701 | bb.note("Writing task signature files") |
1579 | fn = fn_from_tid(tid) | 1702 | |
1580 | fns.add(fn) | 1703 | max_process = int(self.cfgData.getVar("BB_NUMBER_PARSE_THREADS") or os.cpu_count() or 1) |
1581 | 1704 | def chunkify(l, n): | |
1582 | max_process = int(self.cfgData.getVar("BB_NUMBER_PARSE_THREADS") or os.cpu_count() or 1) | 1705 | return [l[i::n] for i in range(n)] |
1583 | # We cannot use the real multiprocessing.Pool easily due to some local data | 1706 | dumpsigs_tids = chunkify(list(self.rqdata.runtaskentries), max_process) |
1584 | # that can't be pickled. This is a cheap multi-process solution. | 1707 | |
1585 | launched = [] | 1708 | # We cannot use the real multiprocessing.Pool easily due to some local data |
1586 | while fns: | 1709 | # that can't be pickled. This is a cheap multi-process solution. |
1587 | if len(launched) < max_process: | 1710 | self.dumpsigs_launched = [] |
1588 | p = Process(target=self.rq_dump_sigfn, args=(fns.pop(), options)) | 1711 | |
1712 | for tids in dumpsigs_tids: | ||
1713 | p = Process(target=self._rq_dump_sigtid, args=(tids, )) | ||
1589 | p.start() | 1714 | p.start() |
1590 | launched.append(p) | 1715 | self.dumpsigs_launched.append(p) |
1591 | for q in launched: | 1716 | |
1592 | # The finished processes are joined when calling is_alive() | 1717 | return 1.0 |
1593 | if not q.is_alive(): | 1718 | |
1594 | launched.remove(q) | 1719 | for q in self.dumpsigs_launched: |
1595 | for p in launched: | 1720 | # The finished processes are joined when calling is_alive() |
1721 | if not q.is_alive(): | ||
1722 | self.dumpsigs_launched.remove(q) | ||
1723 | |||
1724 | if self.dumpsigs_launched: | ||
1725 | return 1.0 | ||
1726 | |||
1727 | for p in self.dumpsigs_launched: | ||
1596 | p.join() | 1728 | p.join() |
1597 | 1729 | ||
1598 | bb.parse.siggen.dump_sigs(self.rqdata.dataCaches, options) | 1730 | bb.parse.siggen.dump_sigs(self.rqdata.dataCaches, options) |
1599 | 1731 | ||
1600 | return | 1732 | return False |
1601 | 1733 | ||
1602 | def print_diffscenetasks(self): | 1734 | def print_diffscenetasks(self): |
1735 | def get_root_invalid_tasks(task, taskdepends, valid, noexec, visited_invalid): | ||
1736 | invalidtasks = [] | ||
1737 | for t in taskdepends[task].depends: | ||
1738 | if t not in valid and t not in visited_invalid: | ||
1739 | invalidtasks.extend(get_root_invalid_tasks(t, taskdepends, valid, noexec, visited_invalid)) | ||
1740 | visited_invalid.add(t) | ||
1741 | |||
1742 | direct_invalid = [t for t in taskdepends[task].depends if t not in valid] | ||
1743 | if not direct_invalid and task not in noexec: | ||
1744 | invalidtasks = [task] | ||
1745 | return invalidtasks | ||
1603 | 1746 | ||
1604 | noexec = [] | 1747 | noexec = [] |
1605 | tocheck = set() | 1748 | tocheck = set() |
@@ -1633,46 +1776,49 @@ class RunQueue: | |||
1633 | valid_new.add(dep) | 1776 | valid_new.add(dep) |
1634 | 1777 | ||
1635 | invalidtasks = set() | 1778 | invalidtasks = set() |
1636 | for tid in self.rqdata.runtaskentries: | ||
1637 | if tid not in valid_new and tid not in noexec: | ||
1638 | invalidtasks.add(tid) | ||
1639 | 1779 | ||
1640 | found = set() | 1780 | toptasks = set(["{}:{}".format(t[3], t[2]) for t in self.rqdata.targets]) |
1641 | processed = set() | 1781 | for tid in toptasks: |
1642 | for tid in invalidtasks: | ||
1643 | toprocess = set([tid]) | 1782 | toprocess = set([tid]) |
1644 | while toprocess: | 1783 | while toprocess: |
1645 | next = set() | 1784 | next = set() |
1785 | visited_invalid = set() | ||
1646 | for t in toprocess: | 1786 | for t in toprocess: |
1647 | for dep in self.rqdata.runtaskentries[t].depends: | 1787 | if t not in valid_new and t not in noexec: |
1648 | if dep in invalidtasks: | 1788 | invalidtasks.update(get_root_invalid_tasks(t, self.rqdata.runtaskentries, valid_new, noexec, visited_invalid)) |
1649 | found.add(tid) | 1789 | continue |
1650 | if dep not in processed: | 1790 | if t in self.rqdata.runq_setscene_tids: |
1651 | processed.add(dep) | 1791 | for dep in self.rqexe.sqdata.sq_deps[t]: |
1652 | next.add(dep) | 1792 | next.add(dep) |
1793 | continue | ||
1794 | |||
1795 | for dep in self.rqdata.runtaskentries[t].depends: | ||
1796 | next.add(dep) | ||
1797 | |||
1653 | toprocess = next | 1798 | toprocess = next |
1654 | if tid in found: | ||
1655 | toprocess = set() | ||
1656 | 1799 | ||
1657 | tasklist = [] | 1800 | tasklist = [] |
1658 | for tid in invalidtasks.difference(found): | 1801 | for tid in invalidtasks: |
1659 | tasklist.append(tid) | 1802 | tasklist.append(tid) |
1660 | 1803 | ||
1661 | if tasklist: | 1804 | if tasklist: |
1662 | bb.plain("The differences between the current build and any cached tasks start at the following tasks:\n" + "\n".join(tasklist)) | 1805 | bb.plain("The differences between the current build and any cached tasks start at the following tasks:\n" + "\n".join(tasklist)) |
1663 | 1806 | ||
1664 | return invalidtasks.difference(found) | 1807 | return invalidtasks |
1665 | 1808 | ||
1666 | def write_diffscenetasks(self, invalidtasks): | 1809 | def write_diffscenetasks(self, invalidtasks): |
1810 | bb.siggen.check_siggen_version(bb.siggen) | ||
1667 | 1811 | ||
1668 | # Define recursion callback | 1812 | # Define recursion callback |
1669 | def recursecb(key, hash1, hash2): | 1813 | def recursecb(key, hash1, hash2): |
1670 | hashes = [hash1, hash2] | 1814 | hashes = [hash1, hash2] |
1815 | bb.debug(1, "Recursively looking for recipe {} hashes {}".format(key, hashes)) | ||
1671 | hashfiles = bb.siggen.find_siginfo(key, None, hashes, self.cfgData) | 1816 | hashfiles = bb.siggen.find_siginfo(key, None, hashes, self.cfgData) |
1817 | bb.debug(1, "Found hashfiles:\n{}".format(hashfiles)) | ||
1672 | 1818 | ||
1673 | recout = [] | 1819 | recout = [] |
1674 | if len(hashfiles) == 2: | 1820 | if len(hashfiles) == 2: |
1675 | out2 = bb.siggen.compare_sigfiles(hashfiles[hash1], hashfiles[hash2], recursecb) | 1821 | out2 = bb.siggen.compare_sigfiles(hashfiles[hash1]['path'], hashfiles[hash2]['path'], recursecb) |
1676 | recout.extend(list(' ' + l for l in out2)) | 1822 | recout.extend(list(' ' + l for l in out2)) |
1677 | else: | 1823 | else: |
1678 | recout.append("Unable to find matching sigdata for %s with hashes %s or %s" % (key, hash1, hash2)) | 1824 | recout.append("Unable to find matching sigdata for %s with hashes %s or %s" % (key, hash1, hash2)) |
@@ -1683,20 +1829,25 @@ class RunQueue: | |||
1683 | for tid in invalidtasks: | 1829 | for tid in invalidtasks: |
1684 | (mc, fn, taskname, taskfn) = split_tid_mcfn(tid) | 1830 | (mc, fn, taskname, taskfn) = split_tid_mcfn(tid) |
1685 | pn = self.rqdata.dataCaches[mc].pkg_fn[taskfn] | 1831 | pn = self.rqdata.dataCaches[mc].pkg_fn[taskfn] |
1686 | h = self.rqdata.runtaskentries[tid].hash | 1832 | h = self.rqdata.runtaskentries[tid].unihash |
1687 | matches = bb.siggen.find_siginfo(pn, taskname, [], self.cfgData) | 1833 | bb.debug(1, "Looking for recipe {} task {}".format(pn, taskname)) |
1834 | matches = bb.siggen.find_siginfo(pn, taskname, [], self.cooker.databuilder.mcdata[mc]) | ||
1835 | bb.debug(1, "Found hashfiles:\n{}".format(matches)) | ||
1688 | match = None | 1836 | match = None |
1689 | for m in matches: | 1837 | for m in matches.values(): |
1690 | if h in m: | 1838 | if h in m['path']: |
1691 | match = m | 1839 | match = m['path'] |
1692 | if match is None: | 1840 | if match is None: |
1693 | bb.fatal("Can't find a task we're supposed to have written out? (hash: %s)?" % h) | 1841 | bb.fatal("Can't find a task we're supposed to have written out? (hash: %s tid: %s)?" % (h, tid)) |
1694 | matches = {k : v for k, v in iter(matches.items()) if h not in k} | 1842 | matches = {k : v for k, v in iter(matches.items()) if h not in k} |
1843 | matches_local = {k : v for k, v in iter(matches.items()) if h not in k and not v['sstate']} | ||
1844 | if matches_local: | ||
1845 | matches = matches_local | ||
1695 | if matches: | 1846 | if matches: |
1696 | latestmatch = sorted(matches.keys(), key=lambda f: matches[f])[-1] | 1847 | latestmatch = matches[sorted(matches.keys(), key=lambda h: matches[h]['time'])[-1]]['path'] |
1697 | prevh = __find_sha256__.search(latestmatch).group(0) | 1848 | prevh = __find_sha256__.search(latestmatch).group(0) |
1698 | output = bb.siggen.compare_sigfiles(latestmatch, match, recursecb) | 1849 | output = bb.siggen.compare_sigfiles(latestmatch, match, recursecb) |
1699 | bb.plain("\nTask %s:%s couldn't be used from the cache because:\n We need hash %s, closest matching task was %s\n " % (pn, taskname, h, prevh) + '\n '.join(output)) | 1850 | bb.plain("\nTask %s:%s couldn't be used from the cache because:\n We need hash %s, most recent matching task was %s\n " % (pn, taskname, h, prevh) + '\n '.join(output)) |
1700 | 1851 | ||
1701 | 1852 | ||
1702 | class RunQueueExecute: | 1853 | class RunQueueExecute: |
@@ -1709,6 +1860,10 @@ class RunQueueExecute: | |||
1709 | 1860 | ||
1710 | self.number_tasks = int(self.cfgData.getVar("BB_NUMBER_THREADS") or 1) | 1861 | self.number_tasks = int(self.cfgData.getVar("BB_NUMBER_THREADS") or 1) |
1711 | self.scheduler = self.cfgData.getVar("BB_SCHEDULER") or "speed" | 1862 | self.scheduler = self.cfgData.getVar("BB_SCHEDULER") or "speed" |
1863 | self.max_cpu_pressure = self.cfgData.getVar("BB_PRESSURE_MAX_CPU") | ||
1864 | self.max_io_pressure = self.cfgData.getVar("BB_PRESSURE_MAX_IO") | ||
1865 | self.max_memory_pressure = self.cfgData.getVar("BB_PRESSURE_MAX_MEMORY") | ||
1866 | self.max_loadfactor = self.cfgData.getVar("BB_LOADFACTOR_MAX") | ||
1712 | 1867 | ||
1713 | self.sq_buildable = set() | 1868 | self.sq_buildable = set() |
1714 | self.sq_running = set() | 1869 | self.sq_running = set() |
@@ -1726,6 +1881,8 @@ class RunQueueExecute: | |||
1726 | self.build_stamps2 = [] | 1881 | self.build_stamps2 = [] |
1727 | self.failed_tids = [] | 1882 | self.failed_tids = [] |
1728 | self.sq_deferred = {} | 1883 | self.sq_deferred = {} |
1884 | self.sq_needed_harddeps = set() | ||
1885 | self.sq_harddep_deferred = set() | ||
1729 | 1886 | ||
1730 | self.stampcache = {} | 1887 | self.stampcache = {} |
1731 | 1888 | ||
@@ -1733,17 +1890,39 @@ class RunQueueExecute: | |||
1733 | self.holdoff_need_update = True | 1890 | self.holdoff_need_update = True |
1734 | self.sqdone = False | 1891 | self.sqdone = False |
1735 | 1892 | ||
1736 | self.stats = RunQueueStats(len(self.rqdata.runtaskentries)) | 1893 | self.stats = RunQueueStats(len(self.rqdata.runtaskentries), len(self.rqdata.runq_setscene_tids)) |
1737 | self.sq_stats = RunQueueStats(len(self.rqdata.runq_setscene_tids)) | ||
1738 | |||
1739 | for mc in rq.worker: | ||
1740 | rq.worker[mc].pipe.setrunqueueexec(self) | ||
1741 | for mc in rq.fakeworker: | ||
1742 | rq.fakeworker[mc].pipe.setrunqueueexec(self) | ||
1743 | 1894 | ||
1744 | if self.number_tasks <= 0: | 1895 | if self.number_tasks <= 0: |
1745 | bb.fatal("Invalid BB_NUMBER_THREADS %s" % self.number_tasks) | 1896 | bb.fatal("Invalid BB_NUMBER_THREADS %s" % self.number_tasks) |
1746 | 1897 | ||
1898 | lower_limit = 1.0 | ||
1899 | upper_limit = 1000000.0 | ||
1900 | if self.max_cpu_pressure: | ||
1901 | self.max_cpu_pressure = float(self.max_cpu_pressure) | ||
1902 | if self.max_cpu_pressure < lower_limit: | ||
1903 | bb.fatal("Invalid BB_PRESSURE_MAX_CPU %s, minimum value is %s." % (self.max_cpu_pressure, lower_limit)) | ||
1904 | if self.max_cpu_pressure > upper_limit: | ||
1905 | bb.warn("Your build will be largely unregulated since BB_PRESSURE_MAX_CPU is set to %s. It is very unlikely that such high pressure will be experienced." % (self.max_cpu_pressure)) | ||
1906 | |||
1907 | if self.max_io_pressure: | ||
1908 | self.max_io_pressure = float(self.max_io_pressure) | ||
1909 | if self.max_io_pressure < lower_limit: | ||
1910 | bb.fatal("Invalid BB_PRESSURE_MAX_IO %s, minimum value is %s." % (self.max_io_pressure, lower_limit)) | ||
1911 | if self.max_io_pressure > upper_limit: | ||
1912 | bb.warn("Your build will be largely unregulated since BB_PRESSURE_MAX_IO is set to %s. It is very unlikely that such high pressure will be experienced." % (self.max_io_pressure)) | ||
1913 | |||
1914 | if self.max_memory_pressure: | ||
1915 | self.max_memory_pressure = float(self.max_memory_pressure) | ||
1916 | if self.max_memory_pressure < lower_limit: | ||
1917 | bb.fatal("Invalid BB_PRESSURE_MAX_MEMORY %s, minimum value is %s." % (self.max_memory_pressure, lower_limit)) | ||
1918 | if self.max_memory_pressure > upper_limit: | ||
1919 | bb.warn("Your build will be largely unregulated since BB_PRESSURE_MAX_MEMORY is set to %s. It is very unlikely that such high pressure will be experienced." % (self.max_io_pressure)) | ||
1920 | |||
1921 | if self.max_loadfactor: | ||
1922 | self.max_loadfactor = float(self.max_loadfactor) | ||
1923 | if self.max_loadfactor <= 0: | ||
1924 | bb.fatal("Invalid BB_LOADFACTOR_MAX %s, needs to be greater than zero." % (self.max_loadfactor)) | ||
1925 | |||
1747 | # List of setscene tasks which we've covered | 1926 | # List of setscene tasks which we've covered |
1748 | self.scenequeue_covered = set() | 1927 | self.scenequeue_covered = set() |
1749 | # List of tasks which are covered (including setscene ones) | 1928 | # List of tasks which are covered (including setscene ones) |
@@ -1753,11 +1932,6 @@ class RunQueueExecute: | |||
1753 | self.tasks_notcovered = set() | 1932 | self.tasks_notcovered = set() |
1754 | self.scenequeue_notneeded = set() | 1933 | self.scenequeue_notneeded = set() |
1755 | 1934 | ||
1756 | # We can't skip specified target tasks which aren't setscene tasks | ||
1757 | self.cantskip = set(self.rqdata.target_tids) | ||
1758 | self.cantskip.difference_update(self.rqdata.runq_setscene_tids) | ||
1759 | self.cantskip.intersection_update(self.rqdata.runtaskentries) | ||
1760 | |||
1761 | schedulers = self.get_schedulers() | 1935 | schedulers = self.get_schedulers() |
1762 | for scheduler in schedulers: | 1936 | for scheduler in schedulers: |
1763 | if self.scheduler == scheduler.name: | 1937 | if self.scheduler == scheduler.name: |
@@ -1768,11 +1942,29 @@ class RunQueueExecute: | |||
1768 | bb.fatal("Invalid scheduler '%s'. Available schedulers: %s" % | 1942 | bb.fatal("Invalid scheduler '%s'. Available schedulers: %s" % |
1769 | (self.scheduler, ", ".join(obj.name for obj in schedulers))) | 1943 | (self.scheduler, ", ".join(obj.name for obj in schedulers))) |
1770 | 1944 | ||
1771 | #if len(self.rqdata.runq_setscene_tids) > 0: | 1945 | #if self.rqdata.runq_setscene_tids: |
1772 | self.sqdata = SQData() | 1946 | self.sqdata = SQData() |
1773 | build_scenequeue_data(self.sqdata, self.rqdata, self.rq, self.cooker, self.stampcache, self) | 1947 | build_scenequeue_data(self.sqdata, self.rqdata, self) |
1948 | |||
1949 | update_scenequeue_data(self.sqdata.sq_revdeps, self.sqdata, self.rqdata, self.rq, self.cooker, self.stampcache, self, summary=True) | ||
1950 | |||
1951 | # Compute a list of 'stale' sstate tasks where the current hash does not match the one | ||
1952 | # in any stamp files. Pass the list out to metadata as an event. | ||
1953 | found = {} | ||
1954 | for tid in self.rqdata.runq_setscene_tids: | ||
1955 | (mc, fn, taskname, taskfn) = split_tid_mcfn(tid) | ||
1956 | stamps = bb.build.find_stale_stamps(taskname, taskfn) | ||
1957 | if stamps: | ||
1958 | if mc not in found: | ||
1959 | found[mc] = {} | ||
1960 | found[mc][tid] = stamps | ||
1961 | for mc in found: | ||
1962 | event = bb.event.StaleSetSceneTasks(found[mc]) | ||
1963 | bb.event.fire(event, self.cooker.databuilder.mcdata[mc]) | ||
1964 | |||
1965 | self.build_taskdepdata_cache() | ||
1774 | 1966 | ||
1775 | def runqueue_process_waitpid(self, task, status): | 1967 | def runqueue_process_waitpid(self, task, status, fakerootlog=None): |
1776 | 1968 | ||
1777 | # self.build_stamps[pid] may not exist when use shared work directory. | 1969 | # self.build_stamps[pid] may not exist when use shared work directory. |
1778 | if task in self.build_stamps: | 1970 | if task in self.build_stamps: |
@@ -1785,9 +1977,10 @@ class RunQueueExecute: | |||
1785 | else: | 1977 | else: |
1786 | self.sq_task_complete(task) | 1978 | self.sq_task_complete(task) |
1787 | self.sq_live.remove(task) | 1979 | self.sq_live.remove(task) |
1980 | self.stats.updateActiveSetscene(len(self.sq_live)) | ||
1788 | else: | 1981 | else: |
1789 | if status != 0: | 1982 | if status != 0: |
1790 | self.task_fail(task, status) | 1983 | self.task_fail(task, status, fakerootlog=fakerootlog) |
1791 | else: | 1984 | else: |
1792 | self.task_complete(task) | 1985 | self.task_complete(task) |
1793 | return True | 1986 | return True |
@@ -1795,20 +1988,20 @@ class RunQueueExecute: | |||
1795 | def finish_now(self): | 1988 | def finish_now(self): |
1796 | for mc in self.rq.worker: | 1989 | for mc in self.rq.worker: |
1797 | try: | 1990 | try: |
1798 | self.rq.worker[mc].process.stdin.write(b"<finishnow></finishnow>") | 1991 | RunQueue.send_pickled_data(self.rq.worker[mc].process, b"", "finishnow") |
1799 | self.rq.worker[mc].process.stdin.flush() | 1992 | self.rq.worker[mc].process.stdin.flush() |
1800 | except IOError: | 1993 | except IOError: |
1801 | # worker must have died? | 1994 | # worker must have died? |
1802 | pass | 1995 | pass |
1803 | for mc in self.rq.fakeworker: | 1996 | for mc in self.rq.fakeworker: |
1804 | try: | 1997 | try: |
1805 | self.rq.fakeworker[mc].process.stdin.write(b"<finishnow></finishnow>") | 1998 | RunQueue.send_pickled_data(self.rq.fakeworker[mc].process, b"", "finishnow") |
1806 | self.rq.fakeworker[mc].process.stdin.flush() | 1999 | self.rq.fakeworker[mc].process.stdin.flush() |
1807 | except IOError: | 2000 | except IOError: |
1808 | # worker must have died? | 2001 | # worker must have died? |
1809 | pass | 2002 | pass |
1810 | 2003 | ||
1811 | if len(self.failed_tids) != 0: | 2004 | if self.failed_tids: |
1812 | self.rq.state = runQueueFailed | 2005 | self.rq.state = runQueueFailed |
1813 | return | 2006 | return |
1814 | 2007 | ||
@@ -1818,13 +2011,13 @@ class RunQueueExecute: | |||
1818 | def finish(self): | 2011 | def finish(self): |
1819 | self.rq.state = runQueueCleanUp | 2012 | self.rq.state = runQueueCleanUp |
1820 | 2013 | ||
1821 | active = self.stats.active + self.sq_stats.active | 2014 | active = self.stats.active + len(self.sq_live) |
1822 | if active > 0: | 2015 | if active > 0: |
1823 | bb.event.fire(runQueueExitWait(active), self.cfgData) | 2016 | bb.event.fire(runQueueExitWait(active), self.cfgData) |
1824 | self.rq.read_workers() | 2017 | self.rq.read_workers() |
1825 | return self.rq.active_fds() | 2018 | return self.rq.active_fds() |
1826 | 2019 | ||
1827 | if len(self.failed_tids) != 0: | 2020 | if self.failed_tids: |
1828 | self.rq.state = runQueueFailed | 2021 | self.rq.state = runQueueFailed |
1829 | return True | 2022 | return True |
1830 | 2023 | ||
@@ -1851,7 +2044,7 @@ class RunQueueExecute: | |||
1851 | return valid | 2044 | return valid |
1852 | 2045 | ||
1853 | def can_start_task(self): | 2046 | def can_start_task(self): |
1854 | active = self.stats.active + self.sq_stats.active | 2047 | active = self.stats.active + len(self.sq_live) |
1855 | can_start = active < self.number_tasks | 2048 | can_start = active < self.number_tasks |
1856 | return can_start | 2049 | return can_start |
1857 | 2050 | ||
@@ -1871,8 +2064,7 @@ class RunQueueExecute: | |||
1871 | try: | 2064 | try: |
1872 | module = __import__(modname, fromlist=(name,)) | 2065 | module = __import__(modname, fromlist=(name,)) |
1873 | except ImportError as exc: | 2066 | except ImportError as exc: |
1874 | logger.critical("Unable to import scheduler '%s' from '%s': %s" % (name, modname, exc)) | 2067 | bb.fatal("Unable to import scheduler '%s' from '%s': %s" % (name, modname, exc)) |
1875 | raise SystemExit(1) | ||
1876 | else: | 2068 | else: |
1877 | schedulers.add(getattr(module, name)) | 2069 | schedulers.add(getattr(module, name)) |
1878 | return schedulers | 2070 | return schedulers |
@@ -1902,21 +2094,52 @@ class RunQueueExecute: | |||
1902 | self.setbuildable(revdep) | 2094 | self.setbuildable(revdep) |
1903 | logger.debug("Marking task %s as buildable", revdep) | 2095 | logger.debug("Marking task %s as buildable", revdep) |
1904 | 2096 | ||
2097 | found = None | ||
2098 | for t in sorted(self.sq_deferred.copy()): | ||
2099 | if self.sq_deferred[t] == task: | ||
2100 | # Allow the next deferred task to run. Any other deferred tasks should be deferred after that task. | ||
2101 | # We shouldn't allow all to run at once as it is prone to races. | ||
2102 | if not found: | ||
2103 | bb.debug(1, "Deferred task %s now buildable" % t) | ||
2104 | del self.sq_deferred[t] | ||
2105 | update_scenequeue_data([t], self.sqdata, self.rqdata, self.rq, self.cooker, self.stampcache, self, summary=False) | ||
2106 | found = t | ||
2107 | else: | ||
2108 | bb.debug(1, "Deferring %s after %s" % (t, found)) | ||
2109 | self.sq_deferred[t] = found | ||
2110 | |||
1905 | def task_complete(self, task): | 2111 | def task_complete(self, task): |
1906 | self.stats.taskCompleted() | 2112 | self.stats.taskCompleted() |
1907 | bb.event.fire(runQueueTaskCompleted(task, self.stats, self.rq), self.cfgData) | 2113 | bb.event.fire(runQueueTaskCompleted(task, self.stats, self.rq), self.cfgData) |
1908 | self.task_completeoutright(task) | 2114 | self.task_completeoutright(task) |
1909 | self.runq_tasksrun.add(task) | 2115 | self.runq_tasksrun.add(task) |
1910 | 2116 | ||
1911 | def task_fail(self, task, exitcode): | 2117 | def task_fail(self, task, exitcode, fakerootlog=None): |
1912 | """ | 2118 | """ |
1913 | Called when a task has failed | 2119 | Called when a task has failed |
1914 | Updates the state engine with the failure | 2120 | Updates the state engine with the failure |
1915 | """ | 2121 | """ |
1916 | self.stats.taskFailed() | 2122 | self.stats.taskFailed() |
1917 | self.failed_tids.append(task) | 2123 | self.failed_tids.append(task) |
1918 | bb.event.fire(runQueueTaskFailed(task, self.stats, exitcode, self.rq), self.cfgData) | 2124 | |
1919 | if self.rqdata.taskData[''].abort: | 2125 | fakeroot_log = [] |
2126 | if fakerootlog and os.path.exists(fakerootlog): | ||
2127 | with open(fakerootlog) as fakeroot_log_file: | ||
2128 | fakeroot_failed = False | ||
2129 | for line in reversed(fakeroot_log_file.readlines()): | ||
2130 | for fakeroot_error in ['mismatch', 'error', 'fatal']: | ||
2131 | if fakeroot_error in line.lower(): | ||
2132 | fakeroot_failed = True | ||
2133 | if 'doing new pid setup and server start' in line: | ||
2134 | break | ||
2135 | fakeroot_log.append(line) | ||
2136 | |||
2137 | if not fakeroot_failed: | ||
2138 | fakeroot_log = [] | ||
2139 | |||
2140 | bb.event.fire(runQueueTaskFailed(task, self.stats, exitcode, self.rq, fakeroot_log=("".join(fakeroot_log) or None)), self.cfgData) | ||
2141 | |||
2142 | if self.rqdata.taskData[''].halt: | ||
1920 | self.rq.state = runQueueCleanUp | 2143 | self.rq.state = runQueueCleanUp |
1921 | 2144 | ||
1922 | def task_skip(self, task, reason): | 2145 | def task_skip(self, task, reason): |
@@ -1931,7 +2154,7 @@ class RunQueueExecute: | |||
1931 | err = False | 2154 | err = False |
1932 | if not self.sqdone: | 2155 | if not self.sqdone: |
1933 | logger.debug('We could skip tasks %s', "\n".join(sorted(self.scenequeue_covered))) | 2156 | logger.debug('We could skip tasks %s', "\n".join(sorted(self.scenequeue_covered))) |
1934 | completeevent = sceneQueueComplete(self.sq_stats, self.rq) | 2157 | completeevent = sceneQueueComplete(self.stats, self.rq) |
1935 | bb.event.fire(completeevent, self.cfgData) | 2158 | bb.event.fire(completeevent, self.cfgData) |
1936 | if self.sq_deferred: | 2159 | if self.sq_deferred: |
1937 | logger.error("Scenequeue had deferred entries: %s" % pprint.pformat(self.sq_deferred)) | 2160 | logger.error("Scenequeue had deferred entries: %s" % pprint.pformat(self.sq_deferred)) |
@@ -1943,6 +2166,10 @@ class RunQueueExecute: | |||
1943 | logger.error("Scenequeue had holdoff tasks: %s" % pprint.pformat(self.holdoff_tasks)) | 2166 | logger.error("Scenequeue had holdoff tasks: %s" % pprint.pformat(self.holdoff_tasks)) |
1944 | err = True | 2167 | err = True |
1945 | 2168 | ||
2169 | for tid in self.scenequeue_covered.intersection(self.scenequeue_notcovered): | ||
2170 | # No task should end up in both covered and uncovered, that is a bug. | ||
2171 | logger.error("Setscene task %s in both covered and notcovered." % tid) | ||
2172 | |||
1946 | for tid in self.rqdata.runq_setscene_tids: | 2173 | for tid in self.rqdata.runq_setscene_tids: |
1947 | if tid not in self.scenequeue_covered and tid not in self.scenequeue_notcovered: | 2174 | if tid not in self.scenequeue_covered and tid not in self.scenequeue_notcovered: |
1948 | err = True | 2175 | err = True |
@@ -1961,7 +2188,7 @@ class RunQueueExecute: | |||
1961 | if x not in self.tasks_scenequeue_done: | 2188 | if x not in self.tasks_scenequeue_done: |
1962 | logger.error("Task %s was never processed by the setscene code" % x) | 2189 | logger.error("Task %s was never processed by the setscene code" % x) |
1963 | err = True | 2190 | err = True |
1964 | if len(self.rqdata.runtaskentries[x].depends) == 0 and x not in self.runq_buildable: | 2191 | if not self.rqdata.runtaskentries[x].depends and x not in self.runq_buildable: |
1965 | logger.error("Task %s was never marked as buildable by the setscene code" % x) | 2192 | logger.error("Task %s was never marked as buildable by the setscene code" % x) |
1966 | err = True | 2193 | err = True |
1967 | return err | 2194 | return err |
@@ -1979,13 +2206,24 @@ class RunQueueExecute: | |||
1979 | if not hasattr(self, "sorted_setscene_tids"): | 2206 | if not hasattr(self, "sorted_setscene_tids"): |
1980 | # Don't want to sort this set every execution | 2207 | # Don't want to sort this set every execution |
1981 | self.sorted_setscene_tids = sorted(self.rqdata.runq_setscene_tids) | 2208 | self.sorted_setscene_tids = sorted(self.rqdata.runq_setscene_tids) |
2209 | # Resume looping where we left off when we returned to feed the mainloop | ||
2210 | self.setscene_tids_generator = itertools.cycle(self.rqdata.runq_setscene_tids) | ||
1982 | 2211 | ||
1983 | task = None | 2212 | task = None |
1984 | if not self.sqdone and self.can_start_task(): | 2213 | if not self.sqdone and self.can_start_task(): |
1985 | # Find the next setscene to run | 2214 | loopcount = 0 |
1986 | for nexttask in self.sorted_setscene_tids: | 2215 | # Find the next setscene to run, exit the loop when we've processed all tids or found something to execute |
1987 | if nexttask in self.sq_buildable and nexttask not in self.sq_running and self.sqdata.stamps[nexttask] not in self.build_stamps.values(): | 2216 | while loopcount < len(self.rqdata.runq_setscene_tids): |
1988 | if nexttask not in self.sqdata.unskippable and len(self.sqdata.sq_revdeps[nexttask]) > 0 and self.sqdata.sq_revdeps[nexttask].issubset(self.scenequeue_covered) and self.check_dependencies(nexttask, self.sqdata.sq_revdeps[nexttask]): | 2217 | loopcount += 1 |
2218 | nexttask = next(self.setscene_tids_generator) | ||
2219 | if nexttask in self.sq_buildable and nexttask not in self.sq_running and self.sqdata.stamps[nexttask] not in self.build_stamps.values() and nexttask not in self.sq_harddep_deferred: | ||
2220 | if nexttask in self.sq_deferred and self.sq_deferred[nexttask] not in self.runq_complete: | ||
2221 | # Skip deferred tasks quickly before the 'expensive' tests below - this is key to performant multiconfig builds | ||
2222 | continue | ||
2223 | if nexttask not in self.sqdata.unskippable and self.sqdata.sq_revdeps[nexttask] and \ | ||
2224 | nexttask not in self.sq_needed_harddeps and \ | ||
2225 | self.sqdata.sq_revdeps[nexttask].issubset(self.scenequeue_covered) and \ | ||
2226 | self.check_dependencies(nexttask, self.sqdata.sq_revdeps[nexttask]): | ||
1989 | if nexttask not in self.rqdata.target_tids: | 2227 | if nexttask not in self.rqdata.target_tids: |
1990 | logger.debug2("Skipping setscene for task %s" % nexttask) | 2228 | logger.debug2("Skipping setscene for task %s" % nexttask) |
1991 | self.sq_task_skip(nexttask) | 2229 | self.sq_task_skip(nexttask) |
@@ -1993,13 +2231,25 @@ class RunQueueExecute: | |||
1993 | if nexttask in self.sq_deferred: | 2231 | if nexttask in self.sq_deferred: |
1994 | del self.sq_deferred[nexttask] | 2232 | del self.sq_deferred[nexttask] |
1995 | return True | 2233 | return True |
2234 | if nexttask in self.sqdata.sq_harddeps_rev and not self.sqdata.sq_harddeps_rev[nexttask].issubset(self.scenequeue_covered | self.scenequeue_notcovered): | ||
2235 | logger.debug2("Deferring %s due to hard dependencies" % nexttask) | ||
2236 | updated = False | ||
2237 | for dep in self.sqdata.sq_harddeps_rev[nexttask]: | ||
2238 | if dep not in self.sq_needed_harddeps: | ||
2239 | logger.debug2("Enabling task %s as it is a hard dependency" % dep) | ||
2240 | self.sq_buildable.add(dep) | ||
2241 | self.sq_needed_harddeps.add(dep) | ||
2242 | updated = True | ||
2243 | self.sq_harddep_deferred.add(nexttask) | ||
2244 | if updated: | ||
2245 | return True | ||
2246 | continue | ||
1996 | # If covered tasks are running, need to wait for them to complete | 2247 | # If covered tasks are running, need to wait for them to complete |
1997 | for t in self.sqdata.sq_covered_tasks[nexttask]: | 2248 | for t in self.sqdata.sq_covered_tasks[nexttask]: |
1998 | if t in self.runq_running and t not in self.runq_complete: | 2249 | if t in self.runq_running and t not in self.runq_complete: |
1999 | continue | 2250 | continue |
2000 | if nexttask in self.sq_deferred: | 2251 | if nexttask in self.sq_deferred: |
2001 | if self.sq_deferred[nexttask] not in self.runq_complete: | 2252 | # Deferred tasks that were still deferred were skipped above so we now need to process |
2002 | continue | ||
2003 | logger.debug("Task %s no longer deferred" % nexttask) | 2253 | logger.debug("Task %s no longer deferred" % nexttask) |
2004 | del self.sq_deferred[nexttask] | 2254 | del self.sq_deferred[nexttask] |
2005 | valid = self.rq.validate_hashes(set([nexttask]), self.cooker.data, 0, False, summary=False) | 2255 | valid = self.rq.validate_hashes(set([nexttask]), self.cooker.data, 0, False, summary=False) |
@@ -2007,8 +2257,6 @@ class RunQueueExecute: | |||
2007 | logger.debug("%s didn't become valid, skipping setscene" % nexttask) | 2257 | logger.debug("%s didn't become valid, skipping setscene" % nexttask) |
2008 | self.sq_task_failoutright(nexttask) | 2258 | self.sq_task_failoutright(nexttask) |
2009 | return True | 2259 | return True |
2010 | else: | ||
2011 | self.sqdata.outrightfail.remove(nexttask) | ||
2012 | if nexttask in self.sqdata.outrightfail: | 2260 | if nexttask in self.sqdata.outrightfail: |
2013 | logger.debug2('No package found, so skipping setscene task %s', nexttask) | 2261 | logger.debug2('No package found, so skipping setscene task %s', nexttask) |
2014 | self.sq_task_failoutright(nexttask) | 2262 | self.sq_task_failoutright(nexttask) |
@@ -2040,28 +2288,42 @@ class RunQueueExecute: | |||
2040 | self.sq_task_failoutright(task) | 2288 | self.sq_task_failoutright(task) |
2041 | return True | 2289 | return True |
2042 | 2290 | ||
2043 | startevent = sceneQueueTaskStarted(task, self.sq_stats, self.rq) | 2291 | startevent = sceneQueueTaskStarted(task, self.stats, self.rq) |
2044 | bb.event.fire(startevent, self.cfgData) | 2292 | bb.event.fire(startevent, self.cfgData) |
2045 | 2293 | ||
2046 | taskdepdata = self.sq_build_taskdepdata(task) | ||
2047 | |||
2048 | taskdep = self.rqdata.dataCaches[mc].task_deps[taskfn] | 2294 | taskdep = self.rqdata.dataCaches[mc].task_deps[taskfn] |
2049 | taskhash = self.rqdata.get_task_hash(task) | 2295 | realfn = bb.cache.virtualfn2realfn(taskfn)[0] |
2050 | unihash = self.rqdata.get_task_unihash(task) | 2296 | runtask = { |
2297 | 'fn' : taskfn, | ||
2298 | 'task' : task, | ||
2299 | 'taskname' : taskname, | ||
2300 | 'taskhash' : self.rqdata.get_task_hash(task), | ||
2301 | 'unihash' : self.rqdata.get_task_unihash(task), | ||
2302 | 'quieterrors' : True, | ||
2303 | 'appends' : self.cooker.collections[mc].get_file_appends(taskfn), | ||
2304 | 'layername' : self.cooker.collections[mc].calc_bbfile_priority(realfn)[2], | ||
2305 | 'taskdepdata' : self.sq_build_taskdepdata(task), | ||
2306 | 'dry_run' : False, | ||
2307 | 'taskdep': taskdep, | ||
2308 | 'fakerootenv' : self.rqdata.dataCaches[mc].fakerootenv[taskfn], | ||
2309 | 'fakerootdirs' : self.rqdata.dataCaches[mc].fakerootdirs[taskfn], | ||
2310 | 'fakerootnoenv' : self.rqdata.dataCaches[mc].fakerootnoenv[taskfn] | ||
2311 | } | ||
2312 | |||
2051 | if 'fakeroot' in taskdep and taskname in taskdep['fakeroot'] and not self.cooker.configuration.dry_run: | 2313 | if 'fakeroot' in taskdep and taskname in taskdep['fakeroot'] and not self.cooker.configuration.dry_run: |
2052 | if not mc in self.rq.fakeworker: | 2314 | if not mc in self.rq.fakeworker: |
2053 | self.rq.start_fakeworker(self, mc) | 2315 | self.rq.start_fakeworker(self, mc) |
2054 | self.rq.fakeworker[mc].process.stdin.write(b"<runtask>" + pickle.dumps((taskfn, task, taskname, taskhash, unihash, True, self.cooker.collections[mc].get_file_appends(taskfn), taskdepdata, False)) + b"</runtask>") | 2316 | RunQueue.send_pickled_data(self.rq.fakeworker[mc].process, runtask, "runtask") |
2055 | self.rq.fakeworker[mc].process.stdin.flush() | 2317 | self.rq.fakeworker[mc].process.stdin.flush() |
2056 | else: | 2318 | else: |
2057 | self.rq.worker[mc].process.stdin.write(b"<runtask>" + pickle.dumps((taskfn, task, taskname, taskhash, unihash, True, self.cooker.collections[mc].get_file_appends(taskfn), taskdepdata, False)) + b"</runtask>") | 2319 | RunQueue.send_pickled_data(self.rq.worker[mc].process, runtask, "runtask") |
2058 | self.rq.worker[mc].process.stdin.flush() | 2320 | self.rq.worker[mc].process.stdin.flush() |
2059 | 2321 | ||
2060 | self.build_stamps[task] = bb.build.stampfile(taskname, self.rqdata.dataCaches[mc], taskfn, noextra=True) | 2322 | self.build_stamps[task] = bb.parse.siggen.stampfile_mcfn(taskname, taskfn, extrainfo=False) |
2061 | self.build_stamps2.append(self.build_stamps[task]) | 2323 | self.build_stamps2.append(self.build_stamps[task]) |
2062 | self.sq_running.add(task) | 2324 | self.sq_running.add(task) |
2063 | self.sq_live.add(task) | 2325 | self.sq_live.add(task) |
2064 | self.sq_stats.taskActive() | 2326 | self.stats.updateActiveSetscene(len(self.sq_live)) |
2065 | if self.can_start_task(): | 2327 | if self.can_start_task(): |
2066 | return True | 2328 | return True |
2067 | 2329 | ||
@@ -2092,9 +2354,9 @@ class RunQueueExecute: | |||
2092 | if task is not None: | 2354 | if task is not None: |
2093 | (mc, fn, taskname, taskfn) = split_tid_mcfn(task) | 2355 | (mc, fn, taskname, taskfn) = split_tid_mcfn(task) |
2094 | 2356 | ||
2095 | if self.rqdata.setscenewhitelist is not None: | 2357 | if self.rqdata.setscene_ignore_tasks is not None: |
2096 | if self.check_setscenewhitelist(task): | 2358 | if self.check_setscene_ignore_tasks(task): |
2097 | self.task_fail(task, "setscene whitelist") | 2359 | self.task_fail(task, "setscene ignore_tasks") |
2098 | return True | 2360 | return True |
2099 | 2361 | ||
2100 | if task in self.tasks_covered: | 2362 | if task in self.tasks_covered: |
@@ -2117,18 +2379,32 @@ class RunQueueExecute: | |||
2117 | self.runq_running.add(task) | 2379 | self.runq_running.add(task) |
2118 | self.stats.taskActive() | 2380 | self.stats.taskActive() |
2119 | if not (self.cooker.configuration.dry_run or self.rqdata.setscene_enforce): | 2381 | if not (self.cooker.configuration.dry_run or self.rqdata.setscene_enforce): |
2120 | bb.build.make_stamp(taskname, self.rqdata.dataCaches[mc], taskfn) | 2382 | bb.build.make_stamp_mcfn(taskname, taskfn) |
2121 | self.task_complete(task) | 2383 | self.task_complete(task) |
2122 | return True | 2384 | return True |
2123 | else: | 2385 | else: |
2124 | startevent = runQueueTaskStarted(task, self.stats, self.rq) | 2386 | startevent = runQueueTaskStarted(task, self.stats, self.rq) |
2125 | bb.event.fire(startevent, self.cfgData) | 2387 | bb.event.fire(startevent, self.cfgData) |
2126 | 2388 | ||
2127 | taskdepdata = self.build_taskdepdata(task) | ||
2128 | |||
2129 | taskdep = self.rqdata.dataCaches[mc].task_deps[taskfn] | 2389 | taskdep = self.rqdata.dataCaches[mc].task_deps[taskfn] |
2130 | taskhash = self.rqdata.get_task_hash(task) | 2390 | realfn = bb.cache.virtualfn2realfn(taskfn)[0] |
2131 | unihash = self.rqdata.get_task_unihash(task) | 2391 | runtask = { |
2392 | 'fn' : taskfn, | ||
2393 | 'task' : task, | ||
2394 | 'taskname' : taskname, | ||
2395 | 'taskhash' : self.rqdata.get_task_hash(task), | ||
2396 | 'unihash' : self.rqdata.get_task_unihash(task), | ||
2397 | 'quieterrors' : False, | ||
2398 | 'appends' : self.cooker.collections[mc].get_file_appends(taskfn), | ||
2399 | 'layername' : self.cooker.collections[mc].calc_bbfile_priority(realfn)[2], | ||
2400 | 'taskdepdata' : self.build_taskdepdata(task), | ||
2401 | 'dry_run' : self.rqdata.setscene_enforce, | ||
2402 | 'taskdep': taskdep, | ||
2403 | 'fakerootenv' : self.rqdata.dataCaches[mc].fakerootenv[taskfn], | ||
2404 | 'fakerootdirs' : self.rqdata.dataCaches[mc].fakerootdirs[taskfn], | ||
2405 | 'fakerootnoenv' : self.rqdata.dataCaches[mc].fakerootnoenv[taskfn] | ||
2406 | } | ||
2407 | |||
2132 | if 'fakeroot' in taskdep and taskname in taskdep['fakeroot'] and not (self.cooker.configuration.dry_run or self.rqdata.setscene_enforce): | 2408 | if 'fakeroot' in taskdep and taskname in taskdep['fakeroot'] and not (self.cooker.configuration.dry_run or self.rqdata.setscene_enforce): |
2133 | if not mc in self.rq.fakeworker: | 2409 | if not mc in self.rq.fakeworker: |
2134 | try: | 2410 | try: |
@@ -2138,31 +2414,31 @@ class RunQueueExecute: | |||
2138 | self.rq.state = runQueueFailed | 2414 | self.rq.state = runQueueFailed |
2139 | self.stats.taskFailed() | 2415 | self.stats.taskFailed() |
2140 | return True | 2416 | return True |
2141 | self.rq.fakeworker[mc].process.stdin.write(b"<runtask>" + pickle.dumps((taskfn, task, taskname, taskhash, unihash, False, self.cooker.collections[mc].get_file_appends(taskfn), taskdepdata, self.rqdata.setscene_enforce)) + b"</runtask>") | 2417 | RunQueue.send_pickled_data(self.rq.fakeworker[mc].process, runtask, "runtask") |
2142 | self.rq.fakeworker[mc].process.stdin.flush() | 2418 | self.rq.fakeworker[mc].process.stdin.flush() |
2143 | else: | 2419 | else: |
2144 | self.rq.worker[mc].process.stdin.write(b"<runtask>" + pickle.dumps((taskfn, task, taskname, taskhash, unihash, False, self.cooker.collections[mc].get_file_appends(taskfn), taskdepdata, self.rqdata.setscene_enforce)) + b"</runtask>") | 2420 | RunQueue.send_pickled_data(self.rq.worker[mc].process, runtask, "runtask") |
2145 | self.rq.worker[mc].process.stdin.flush() | 2421 | self.rq.worker[mc].process.stdin.flush() |
2146 | 2422 | ||
2147 | self.build_stamps[task] = bb.build.stampfile(taskname, self.rqdata.dataCaches[mc], taskfn, noextra=True) | 2423 | self.build_stamps[task] = bb.parse.siggen.stampfile_mcfn(taskname, taskfn, extrainfo=False) |
2148 | self.build_stamps2.append(self.build_stamps[task]) | 2424 | self.build_stamps2.append(self.build_stamps[task]) |
2149 | self.runq_running.add(task) | 2425 | self.runq_running.add(task) |
2150 | self.stats.taskActive() | 2426 | self.stats.taskActive() |
2151 | if self.can_start_task(): | 2427 | if self.can_start_task(): |
2152 | return True | 2428 | return True |
2153 | 2429 | ||
2154 | if self.stats.active > 0 or self.sq_stats.active > 0: | 2430 | if self.stats.active > 0 or self.sq_live: |
2155 | self.rq.read_workers() | 2431 | self.rq.read_workers() |
2156 | return self.rq.active_fds() | 2432 | return self.rq.active_fds() |
2157 | 2433 | ||
2158 | # No more tasks can be run. If we have deferred setscene tasks we should run them. | 2434 | # No more tasks can be run. If we have deferred setscene tasks we should run them. |
2159 | if self.sq_deferred: | 2435 | if self.sq_deferred: |
2160 | tid = self.sq_deferred.pop(list(self.sq_deferred.keys())[0]) | 2436 | deferred_tid = list(self.sq_deferred.keys())[0] |
2161 | logger.warning("Runqeueue deadlocked on deferred tasks, forcing task %s" % tid) | 2437 | blocking_tid = self.sq_deferred.pop(deferred_tid) |
2162 | self.sq_task_failoutright(tid) | 2438 | logger.warning("Runqueue deadlocked on deferred tasks, forcing task %s blocked by %s" % (deferred_tid, blocking_tid)) |
2163 | return True | 2439 | return True |
2164 | 2440 | ||
2165 | if len(self.failed_tids) != 0: | 2441 | if self.failed_tids: |
2166 | self.rq.state = runQueueFailed | 2442 | self.rq.state = runQueueFailed |
2167 | return True | 2443 | return True |
2168 | 2444 | ||
@@ -2195,6 +2471,25 @@ class RunQueueExecute: | |||
2195 | ret.add(dep) | 2471 | ret.add(dep) |
2196 | return ret | 2472 | return ret |
2197 | 2473 | ||
2474 | # Build the individual cache entries in advance once to save time | ||
2475 | def build_taskdepdata_cache(self): | ||
2476 | taskdepdata_cache = {} | ||
2477 | for task in self.rqdata.runtaskentries: | ||
2478 | (mc, fn, taskname, taskfn) = split_tid_mcfn(task) | ||
2479 | taskdepdata_cache[task] = bb.TaskData( | ||
2480 | pn = self.rqdata.dataCaches[mc].pkg_fn[taskfn], | ||
2481 | taskname = taskname, | ||
2482 | fn = fn, | ||
2483 | deps = self.filtermcdeps(task, mc, self.rqdata.runtaskentries[task].depends), | ||
2484 | provides = self.rqdata.dataCaches[mc].fn_provides[taskfn], | ||
2485 | taskhash = self.rqdata.runtaskentries[task].hash, | ||
2486 | unihash = self.rqdata.runtaskentries[task].unihash, | ||
2487 | hashfn = self.rqdata.dataCaches[mc].hashfn[taskfn], | ||
2488 | taskhash_deps = self.rqdata.runtaskentries[task].taskhash_deps, | ||
2489 | ) | ||
2490 | |||
2491 | self.taskdepdata_cache = taskdepdata_cache | ||
2492 | |||
2198 | # We filter out multiconfig dependencies from taskdepdata we pass to the tasks | 2493 | # We filter out multiconfig dependencies from taskdepdata we pass to the tasks |
2199 | # as most code can't handle them | 2494 | # as most code can't handle them |
2200 | def build_taskdepdata(self, task): | 2495 | def build_taskdepdata(self, task): |
@@ -2206,15 +2501,11 @@ class RunQueueExecute: | |||
2206 | while next: | 2501 | while next: |
2207 | additional = [] | 2502 | additional = [] |
2208 | for revdep in next: | 2503 | for revdep in next: |
2209 | (mc, fn, taskname, taskfn) = split_tid_mcfn(revdep) | 2504 | self.taskdepdata_cache[revdep] = self.taskdepdata_cache[revdep]._replace( |
2210 | pn = self.rqdata.dataCaches[mc].pkg_fn[taskfn] | 2505 | unihash=self.rqdata.runtaskentries[revdep].unihash |
2211 | deps = self.rqdata.runtaskentries[revdep].depends | 2506 | ) |
2212 | provides = self.rqdata.dataCaches[mc].fn_provides[taskfn] | 2507 | taskdepdata[revdep] = self.taskdepdata_cache[revdep] |
2213 | taskhash = self.rqdata.runtaskentries[revdep].hash | 2508 | for revdep2 in self.taskdepdata_cache[revdep].deps: |
2214 | unihash = self.rqdata.runtaskentries[revdep].unihash | ||
2215 | deps = self.filtermcdeps(task, mc, deps) | ||
2216 | taskdepdata[revdep] = [pn, taskname, fn, deps, provides, taskhash, unihash] | ||
2217 | for revdep2 in deps: | ||
2218 | if revdep2 not in taskdepdata: | 2509 | if revdep2 not in taskdepdata: |
2219 | additional.append(revdep2) | 2510 | additional.append(revdep2) |
2220 | next = additional | 2511 | next = additional |
@@ -2228,7 +2519,7 @@ class RunQueueExecute: | |||
2228 | return | 2519 | return |
2229 | 2520 | ||
2230 | notcovered = set(self.scenequeue_notcovered) | 2521 | notcovered = set(self.scenequeue_notcovered) |
2231 | notcovered |= self.cantskip | 2522 | notcovered |= self.sqdata.cantskip |
2232 | for tid in self.scenequeue_notcovered: | 2523 | for tid in self.scenequeue_notcovered: |
2233 | notcovered |= self.sqdata.sq_covered_tasks[tid] | 2524 | notcovered |= self.sqdata.sq_covered_tasks[tid] |
2234 | notcovered |= self.sqdata.unskippable.difference(self.rqdata.runq_setscene_tids) | 2525 | notcovered |= self.sqdata.unskippable.difference(self.rqdata.runq_setscene_tids) |
@@ -2241,7 +2532,7 @@ class RunQueueExecute: | |||
2241 | covered.intersection_update(self.tasks_scenequeue_done) | 2532 | covered.intersection_update(self.tasks_scenequeue_done) |
2242 | 2533 | ||
2243 | for tid in notcovered | covered: | 2534 | for tid in notcovered | covered: |
2244 | if len(self.rqdata.runtaskentries[tid].depends) == 0: | 2535 | if not self.rqdata.runtaskentries[tid].depends: |
2245 | self.setbuildable(tid) | 2536 | self.setbuildable(tid) |
2246 | elif self.rqdata.runtaskentries[tid].depends.issubset(self.runq_complete): | 2537 | elif self.rqdata.runtaskentries[tid].depends.issubset(self.runq_complete): |
2247 | self.setbuildable(tid) | 2538 | self.setbuildable(tid) |
@@ -2273,10 +2564,16 @@ class RunQueueExecute: | |||
2273 | self.updated_taskhash_queue.remove((tid, unihash)) | 2564 | self.updated_taskhash_queue.remove((tid, unihash)) |
2274 | 2565 | ||
2275 | if unihash != self.rqdata.runtaskentries[tid].unihash: | 2566 | if unihash != self.rqdata.runtaskentries[tid].unihash: |
2276 | hashequiv_logger.verbose("Task %s unihash changed to %s" % (tid, unihash)) | 2567 | # Make sure we rehash any other tasks with the same task hash that we're deferred against. |
2277 | self.rqdata.runtaskentries[tid].unihash = unihash | 2568 | torehash = [tid] |
2278 | bb.parse.siggen.set_unihash(tid, unihash) | 2569 | for deftid in self.sq_deferred: |
2279 | toprocess.add(tid) | 2570 | if self.sq_deferred[deftid] == tid: |
2571 | torehash.append(deftid) | ||
2572 | for hashtid in torehash: | ||
2573 | hashequiv_logger.verbose("Task %s unihash changed to %s" % (hashtid, unihash)) | ||
2574 | self.rqdata.runtaskentries[hashtid].unihash = unihash | ||
2575 | bb.parse.siggen.set_unihash(hashtid, unihash) | ||
2576 | toprocess.add(hashtid) | ||
2280 | 2577 | ||
2281 | # Work out all tasks which depend upon these | 2578 | # Work out all tasks which depend upon these |
2282 | total = set() | 2579 | total = set() |
@@ -2294,23 +2591,33 @@ class RunQueueExecute: | |||
2294 | # Now iterate those tasks in dependency order to regenerate their taskhash/unihash | 2591 | # Now iterate those tasks in dependency order to regenerate their taskhash/unihash |
2295 | next = set() | 2592 | next = set() |
2296 | for p in total: | 2593 | for p in total: |
2297 | if len(self.rqdata.runtaskentries[p].depends) == 0: | 2594 | if not self.rqdata.runtaskentries[p].depends: |
2298 | next.add(p) | 2595 | next.add(p) |
2299 | elif self.rqdata.runtaskentries[p].depends.isdisjoint(total): | 2596 | elif self.rqdata.runtaskentries[p].depends.isdisjoint(total): |
2300 | next.add(p) | 2597 | next.add(p) |
2301 | 2598 | ||
2599 | starttime = time.time() | ||
2600 | lasttime = starttime | ||
2601 | |||
2302 | # When an item doesn't have dependencies in total, we can process it. Drop items from total when handled | 2602 | # When an item doesn't have dependencies in total, we can process it. Drop items from total when handled |
2303 | while next: | 2603 | while next: |
2304 | current = next.copy() | 2604 | current = next.copy() |
2305 | next = set() | 2605 | next = set() |
2606 | ready = {} | ||
2306 | for tid in current: | 2607 | for tid in current: |
2307 | if len(self.rqdata.runtaskentries[p].depends) and not self.rqdata.runtaskentries[tid].depends.isdisjoint(total): | 2608 | if self.rqdata.runtaskentries[p].depends and not self.rqdata.runtaskentries[tid].depends.isdisjoint(total): |
2308 | continue | 2609 | continue |
2610 | # get_taskhash for a given tid *must* be called before get_unihash* below | ||
2611 | ready[tid] = bb.parse.siggen.get_taskhash(tid, self.rqdata.runtaskentries[tid].depends, self.rqdata.dataCaches) | ||
2612 | |||
2613 | unihashes = bb.parse.siggen.get_unihashes(ready.keys()) | ||
2614 | |||
2615 | for tid in ready: | ||
2309 | orighash = self.rqdata.runtaskentries[tid].hash | 2616 | orighash = self.rqdata.runtaskentries[tid].hash |
2310 | dc = bb.parse.siggen.get_data_caches(self.rqdata.dataCaches, mc_from_tid(tid)) | 2617 | newhash = ready[tid] |
2311 | newhash = bb.parse.siggen.get_taskhash(tid, self.rqdata.runtaskentries[tid].depends, dc) | ||
2312 | origuni = self.rqdata.runtaskentries[tid].unihash | 2618 | origuni = self.rqdata.runtaskentries[tid].unihash |
2313 | newuni = bb.parse.siggen.get_unihash(tid) | 2619 | newuni = unihashes[tid] |
2620 | |||
2314 | # FIXME, need to check it can come from sstate at all for determinism? | 2621 | # FIXME, need to check it can come from sstate at all for determinism? |
2315 | remapped = False | 2622 | remapped = False |
2316 | if newuni == origuni: | 2623 | if newuni == origuni: |
@@ -2331,12 +2638,21 @@ class RunQueueExecute: | |||
2331 | next |= self.rqdata.runtaskentries[tid].revdeps | 2638 | next |= self.rqdata.runtaskentries[tid].revdeps |
2332 | total.remove(tid) | 2639 | total.remove(tid) |
2333 | next.intersection_update(total) | 2640 | next.intersection_update(total) |
2641 | bb.event.check_for_interrupts(self.cooker.data) | ||
2642 | |||
2643 | if time.time() > (lasttime + 30): | ||
2644 | lasttime = time.time() | ||
2645 | hashequiv_logger.verbose("Rehash loop slow progress: %s in %s" % (len(total), lasttime - starttime)) | ||
2646 | |||
2647 | endtime = time.time() | ||
2648 | if (endtime-starttime > 60): | ||
2649 | hashequiv_logger.verbose("Rehash loop took more than 60s: %s" % (endtime-starttime)) | ||
2334 | 2650 | ||
2335 | if changed: | 2651 | if changed: |
2336 | for mc in self.rq.worker: | 2652 | for mc in self.rq.worker: |
2337 | self.rq.worker[mc].process.stdin.write(b"<newtaskhashes>" + pickle.dumps(bb.parse.siggen.get_taskhashes()) + b"</newtaskhashes>") | 2653 | RunQueue.send_pickled_data(self.rq.worker[mc].process, bb.parse.siggen.get_taskhashes(), "newtaskhashes") |
2338 | for mc in self.rq.fakeworker: | 2654 | for mc in self.rq.fakeworker: |
2339 | self.rq.fakeworker[mc].process.stdin.write(b"<newtaskhashes>" + pickle.dumps(bb.parse.siggen.get_taskhashes()) + b"</newtaskhashes>") | 2655 | RunQueue.send_pickled_data(self.rq.fakeworker[mc].process, bb.parse.siggen.get_taskhashes(), "newtaskhashes") |
2340 | 2656 | ||
2341 | hashequiv_logger.debug(pprint.pformat("Tasks changed:\n%s" % (changed))) | 2657 | hashequiv_logger.debug(pprint.pformat("Tasks changed:\n%s" % (changed))) |
2342 | 2658 | ||
@@ -2370,7 +2686,7 @@ class RunQueueExecute: | |||
2370 | self.tasks_scenequeue_done.remove(tid) | 2686 | self.tasks_scenequeue_done.remove(tid) |
2371 | for dep in self.sqdata.sq_covered_tasks[tid]: | 2687 | for dep in self.sqdata.sq_covered_tasks[tid]: |
2372 | if dep in self.runq_complete and dep not in self.runq_tasksrun: | 2688 | if dep in self.runq_complete and dep not in self.runq_tasksrun: |
2373 | bb.error("Task %s marked as completed but now needing to rerun? Aborting build." % dep) | 2689 | bb.error("Task %s marked as completed but now needing to rerun? Halting build." % dep) |
2374 | self.failed_tids.append(tid) | 2690 | self.failed_tids.append(tid) |
2375 | self.rq.state = runQueueCleanUp | 2691 | self.rq.state = runQueueCleanUp |
2376 | return | 2692 | return |
@@ -2383,17 +2699,6 @@ class RunQueueExecute: | |||
2383 | self.sq_buildable.remove(tid) | 2699 | self.sq_buildable.remove(tid) |
2384 | if tid in self.sq_running: | 2700 | if tid in self.sq_running: |
2385 | self.sq_running.remove(tid) | 2701 | self.sq_running.remove(tid) |
2386 | harddepfail = False | ||
2387 | for t in self.sqdata.sq_harddeps: | ||
2388 | if tid in self.sqdata.sq_harddeps[t] and t in self.scenequeue_notcovered: | ||
2389 | harddepfail = True | ||
2390 | break | ||
2391 | if not harddepfail and self.sqdata.sq_revdeps[tid].issubset(self.scenequeue_covered | self.scenequeue_notcovered): | ||
2392 | if tid not in self.sq_buildable: | ||
2393 | self.sq_buildable.add(tid) | ||
2394 | if len(self.sqdata.sq_revdeps[tid]) == 0: | ||
2395 | self.sq_buildable.add(tid) | ||
2396 | |||
2397 | if tid in self.sqdata.outrightfail: | 2702 | if tid in self.sqdata.outrightfail: |
2398 | self.sqdata.outrightfail.remove(tid) | 2703 | self.sqdata.outrightfail.remove(tid) |
2399 | if tid in self.scenequeue_notcovered: | 2704 | if tid in self.scenequeue_notcovered: |
@@ -2404,7 +2709,7 @@ class RunQueueExecute: | |||
2404 | self.scenequeue_notneeded.remove(tid) | 2709 | self.scenequeue_notneeded.remove(tid) |
2405 | 2710 | ||
2406 | (mc, fn, taskname, taskfn) = split_tid_mcfn(tid) | 2711 | (mc, fn, taskname, taskfn) = split_tid_mcfn(tid) |
2407 | self.sqdata.stamps[tid] = bb.build.stampfile(taskname + "_setscene", self.rqdata.dataCaches[mc], taskfn, noextra=True) | 2712 | self.sqdata.stamps[tid] = bb.parse.siggen.stampfile_mcfn(taskname, taskfn, extrainfo=False) |
2408 | 2713 | ||
2409 | if tid in self.stampcache: | 2714 | if tid in self.stampcache: |
2410 | del self.stampcache[tid] | 2715 | del self.stampcache[tid] |
@@ -2412,29 +2717,67 @@ class RunQueueExecute: | |||
2412 | if tid in self.build_stamps: | 2717 | if tid in self.build_stamps: |
2413 | del self.build_stamps[tid] | 2718 | del self.build_stamps[tid] |
2414 | 2719 | ||
2415 | update_tasks.append((tid, harddepfail, tid in self.sqdata.valid)) | 2720 | update_tasks.append(tid) |
2416 | 2721 | ||
2417 | if update_tasks: | 2722 | update_tasks2 = [] |
2723 | for tid in update_tasks: | ||
2724 | harddepfail = False | ||
2725 | for t in self.sqdata.sq_harddeps_rev[tid]: | ||
2726 | if t in self.scenequeue_notcovered: | ||
2727 | harddepfail = True | ||
2728 | break | ||
2729 | if not harddepfail and self.sqdata.sq_revdeps[tid].issubset(self.scenequeue_covered | self.scenequeue_notcovered): | ||
2730 | if tid not in self.sq_buildable: | ||
2731 | self.sq_buildable.add(tid) | ||
2732 | if not self.sqdata.sq_revdeps[tid]: | ||
2733 | self.sq_buildable.add(tid) | ||
2734 | |||
2735 | update_tasks2.append((tid, harddepfail, tid in self.sqdata.valid)) | ||
2736 | |||
2737 | if update_tasks2: | ||
2418 | self.sqdone = False | 2738 | self.sqdone = False |
2419 | update_scenequeue_data([t[0] for t in update_tasks], self.sqdata, self.rqdata, self.rq, self.cooker, self.stampcache, self, summary=False) | 2739 | for mc in sorted(self.sqdata.multiconfigs): |
2740 | for tid in sorted([t[0] for t in update_tasks2]): | ||
2741 | if mc_from_tid(tid) != mc: | ||
2742 | continue | ||
2743 | h = pending_hash_index(tid, self.rqdata) | ||
2744 | if h in self.sqdata.hashes and tid != self.sqdata.hashes[h]: | ||
2745 | self.sq_deferred[tid] = self.sqdata.hashes[h] | ||
2746 | bb.note("Deferring %s after %s" % (tid, self.sqdata.hashes[h])) | ||
2747 | update_scenequeue_data([t[0] for t in update_tasks2], self.sqdata, self.rqdata, self.rq, self.cooker, self.stampcache, self, summary=False) | ||
2420 | 2748 | ||
2421 | for (tid, harddepfail, origvalid) in update_tasks: | 2749 | for (tid, harddepfail, origvalid) in update_tasks2: |
2422 | if tid in self.sqdata.valid and not origvalid: | 2750 | if tid in self.sqdata.valid and not origvalid: |
2423 | hashequiv_logger.verbose("Setscene task %s became valid" % tid) | 2751 | hashequiv_logger.verbose("Setscene task %s became valid" % tid) |
2424 | if harddepfail: | 2752 | if harddepfail: |
2753 | logger.debug2("%s has an unavailable hard dependency so skipping" % (tid)) | ||
2425 | self.sq_task_failoutright(tid) | 2754 | self.sq_task_failoutright(tid) |
2426 | 2755 | ||
2427 | if changed: | 2756 | if changed: |
2757 | self.stats.updateCovered(len(self.scenequeue_covered), len(self.scenequeue_notcovered)) | ||
2758 | self.sq_needed_harddeps = set() | ||
2759 | self.sq_harddep_deferred = set() | ||
2428 | self.holdoff_need_update = True | 2760 | self.holdoff_need_update = True |
2429 | 2761 | ||
2430 | def scenequeue_updatecounters(self, task, fail=False): | 2762 | def scenequeue_updatecounters(self, task, fail=False): |
2431 | 2763 | ||
2432 | for dep in sorted(self.sqdata.sq_deps[task]): | 2764 | if fail and task in self.sqdata.sq_harddeps: |
2433 | if fail and task in self.sqdata.sq_harddeps and dep in self.sqdata.sq_harddeps[task]: | 2765 | for dep in sorted(self.sqdata.sq_harddeps[task]): |
2766 | if dep in self.scenequeue_covered or dep in self.scenequeue_notcovered: | ||
2767 | # dependency could be already processed, e.g. noexec setscene task | ||
2768 | continue | ||
2769 | noexec, stamppresent = check_setscene_stamps(dep, self.rqdata, self.rq, self.stampcache) | ||
2770 | if noexec or stamppresent: | ||
2771 | continue | ||
2434 | logger.debug2("%s was unavailable and is a hard dependency of %s so skipping" % (task, dep)) | 2772 | logger.debug2("%s was unavailable and is a hard dependency of %s so skipping" % (task, dep)) |
2435 | self.sq_task_failoutright(dep) | 2773 | self.sq_task_failoutright(dep) |
2436 | continue | 2774 | continue |
2437 | if self.sqdata.sq_revdeps[dep].issubset(self.scenequeue_covered | self.scenequeue_notcovered): | 2775 | |
2776 | # For performance, only compute allcovered once if needed | ||
2777 | if self.sqdata.sq_deps[task]: | ||
2778 | allcovered = self.scenequeue_covered | self.scenequeue_notcovered | ||
2779 | for dep in sorted(self.sqdata.sq_deps[task]): | ||
2780 | if self.sqdata.sq_revdeps[dep].issubset(allcovered): | ||
2438 | if dep not in self.sq_buildable: | 2781 | if dep not in self.sq_buildable: |
2439 | self.sq_buildable.add(dep) | 2782 | self.sq_buildable.add(dep) |
2440 | 2783 | ||
@@ -2452,6 +2795,14 @@ class RunQueueExecute: | |||
2452 | new.add(dep) | 2795 | new.add(dep) |
2453 | next = new | 2796 | next = new |
2454 | 2797 | ||
2798 | # If this task was one which other setscene tasks have a hard dependency upon, we need | ||
2799 | # to walk through the hard dependencies and allow execution of those which have completed dependencies. | ||
2800 | if task in self.sqdata.sq_harddeps: | ||
2801 | for dep in self.sq_harddep_deferred.copy(): | ||
2802 | if self.sqdata.sq_harddeps_rev[dep].issubset(self.scenequeue_covered | self.scenequeue_notcovered): | ||
2803 | self.sq_harddep_deferred.remove(dep) | ||
2804 | |||
2805 | self.stats.updateCovered(len(self.scenequeue_covered), len(self.scenequeue_notcovered)) | ||
2455 | self.holdoff_need_update = True | 2806 | self.holdoff_need_update = True |
2456 | 2807 | ||
2457 | def sq_task_completeoutright(self, task): | 2808 | def sq_task_completeoutright(self, task): |
@@ -2466,22 +2817,20 @@ class RunQueueExecute: | |||
2466 | self.scenequeue_updatecounters(task) | 2817 | self.scenequeue_updatecounters(task) |
2467 | 2818 | ||
2468 | def sq_check_taskfail(self, task): | 2819 | def sq_check_taskfail(self, task): |
2469 | if self.rqdata.setscenewhitelist is not None: | 2820 | if self.rqdata.setscene_ignore_tasks is not None: |
2470 | realtask = task.split('_setscene')[0] | 2821 | realtask = task.split('_setscene')[0] |
2471 | (mc, fn, taskname, taskfn) = split_tid_mcfn(realtask) | 2822 | (mc, fn, taskname, taskfn) = split_tid_mcfn(realtask) |
2472 | pn = self.rqdata.dataCaches[mc].pkg_fn[taskfn] | 2823 | pn = self.rqdata.dataCaches[mc].pkg_fn[taskfn] |
2473 | if not check_setscene_enforce_whitelist(pn, taskname, self.rqdata.setscenewhitelist): | 2824 | if not check_setscene_enforce_ignore_tasks(pn, taskname, self.rqdata.setscene_ignore_tasks): |
2474 | logger.error('Task %s.%s failed' % (pn, taskname + "_setscene")) | 2825 | logger.error('Task %s.%s failed' % (pn, taskname + "_setscene")) |
2475 | self.rq.state = runQueueCleanUp | 2826 | self.rq.state = runQueueCleanUp |
2476 | 2827 | ||
2477 | def sq_task_complete(self, task): | 2828 | def sq_task_complete(self, task): |
2478 | self.sq_stats.taskCompleted() | 2829 | bb.event.fire(sceneQueueTaskCompleted(task, self.stats, self.rq), self.cfgData) |
2479 | bb.event.fire(sceneQueueTaskCompleted(task, self.sq_stats, self.rq), self.cfgData) | ||
2480 | self.sq_task_completeoutright(task) | 2830 | self.sq_task_completeoutright(task) |
2481 | 2831 | ||
2482 | def sq_task_fail(self, task, result): | 2832 | def sq_task_fail(self, task, result): |
2483 | self.sq_stats.taskFailed() | 2833 | bb.event.fire(sceneQueueTaskFailed(task, self.stats, result, self), self.cfgData) |
2484 | bb.event.fire(sceneQueueTaskFailed(task, self.sq_stats, result, self), self.cfgData) | ||
2485 | self.scenequeue_notcovered.add(task) | 2834 | self.scenequeue_notcovered.add(task) |
2486 | self.scenequeue_updatecounters(task, True) | 2835 | self.scenequeue_updatecounters(task, True) |
2487 | self.sq_check_taskfail(task) | 2836 | self.sq_check_taskfail(task) |
@@ -2489,8 +2838,6 @@ class RunQueueExecute: | |||
2489 | def sq_task_failoutright(self, task): | 2838 | def sq_task_failoutright(self, task): |
2490 | self.sq_running.add(task) | 2839 | self.sq_running.add(task) |
2491 | self.sq_buildable.add(task) | 2840 | self.sq_buildable.add(task) |
2492 | self.sq_stats.taskSkipped() | ||
2493 | self.sq_stats.taskCompleted() | ||
2494 | self.scenequeue_notcovered.add(task) | 2841 | self.scenequeue_notcovered.add(task) |
2495 | self.scenequeue_updatecounters(task, True) | 2842 | self.scenequeue_updatecounters(task, True) |
2496 | 2843 | ||
@@ -2498,8 +2845,6 @@ class RunQueueExecute: | |||
2498 | self.sq_running.add(task) | 2845 | self.sq_running.add(task) |
2499 | self.sq_buildable.add(task) | 2846 | self.sq_buildable.add(task) |
2500 | self.sq_task_completeoutright(task) | 2847 | self.sq_task_completeoutright(task) |
2501 | self.sq_stats.taskSkipped() | ||
2502 | self.sq_stats.taskCompleted() | ||
2503 | 2848 | ||
2504 | def sq_build_taskdepdata(self, task): | 2849 | def sq_build_taskdepdata(self, task): |
2505 | def getsetscenedeps(tid): | 2850 | def getsetscenedeps(tid): |
@@ -2525,12 +2870,19 @@ class RunQueueExecute: | |||
2525 | additional = [] | 2870 | additional = [] |
2526 | for revdep in next: | 2871 | for revdep in next: |
2527 | (mc, fn, taskname, taskfn) = split_tid_mcfn(revdep) | 2872 | (mc, fn, taskname, taskfn) = split_tid_mcfn(revdep) |
2528 | pn = self.rqdata.dataCaches[mc].pkg_fn[taskfn] | ||
2529 | deps = getsetscenedeps(revdep) | 2873 | deps = getsetscenedeps(revdep) |
2530 | provides = self.rqdata.dataCaches[mc].fn_provides[taskfn] | 2874 | |
2531 | taskhash = self.rqdata.runtaskentries[revdep].hash | 2875 | taskdepdata[revdep] = bb.TaskData( |
2532 | unihash = self.rqdata.runtaskentries[revdep].unihash | 2876 | pn = self.rqdata.dataCaches[mc].pkg_fn[taskfn], |
2533 | taskdepdata[revdep] = [pn, taskname, fn, deps, provides, taskhash, unihash] | 2877 | taskname = taskname, |
2878 | fn = fn, | ||
2879 | deps = deps, | ||
2880 | provides = self.rqdata.dataCaches[mc].fn_provides[taskfn], | ||
2881 | taskhash = self.rqdata.runtaskentries[revdep].hash, | ||
2882 | unihash = self.rqdata.runtaskentries[revdep].unihash, | ||
2883 | hashfn = self.rqdata.dataCaches[mc].hashfn[taskfn], | ||
2884 | taskhash_deps = self.rqdata.runtaskentries[revdep].taskhash_deps, | ||
2885 | ) | ||
2534 | for revdep2 in deps: | 2886 | for revdep2 in deps: |
2535 | if revdep2 not in taskdepdata: | 2887 | if revdep2 not in taskdepdata: |
2536 | additional.append(revdep2) | 2888 | additional.append(revdep2) |
@@ -2539,8 +2891,8 @@ class RunQueueExecute: | |||
2539 | #bb.note("Task %s: " % task + str(taskdepdata).replace("], ", "],\n")) | 2891 | #bb.note("Task %s: " % task + str(taskdepdata).replace("], ", "],\n")) |
2540 | return taskdepdata | 2892 | return taskdepdata |
2541 | 2893 | ||
2542 | def check_setscenewhitelist(self, tid): | 2894 | def check_setscene_ignore_tasks(self, tid): |
2543 | # Check task that is going to run against the whitelist | 2895 | # Check task that is going to run against the ignore tasks list |
2544 | (mc, fn, taskname, taskfn) = split_tid_mcfn(tid) | 2896 | (mc, fn, taskname, taskfn) = split_tid_mcfn(tid) |
2545 | # Ignore covered tasks | 2897 | # Ignore covered tasks |
2546 | if tid in self.tasks_covered: | 2898 | if tid in self.tasks_covered: |
@@ -2554,14 +2906,15 @@ class RunQueueExecute: | |||
2554 | return False | 2906 | return False |
2555 | 2907 | ||
2556 | pn = self.rqdata.dataCaches[mc].pkg_fn[taskfn] | 2908 | pn = self.rqdata.dataCaches[mc].pkg_fn[taskfn] |
2557 | if not check_setscene_enforce_whitelist(pn, taskname, self.rqdata.setscenewhitelist): | 2909 | if not check_setscene_enforce_ignore_tasks(pn, taskname, self.rqdata.setscene_ignore_tasks): |
2558 | if tid in self.rqdata.runq_setscene_tids: | 2910 | if tid in self.rqdata.runq_setscene_tids: |
2559 | msg = 'Task %s.%s attempted to execute unexpectedly and should have been setscened' % (pn, taskname) | 2911 | msg = ['Task %s.%s attempted to execute unexpectedly and should have been setscened' % (pn, taskname)] |
2560 | else: | 2912 | else: |
2561 | msg = 'Task %s.%s attempted to execute unexpectedly' % (pn, taskname) | 2913 | msg = ['Task %s.%s attempted to execute unexpectedly' % (pn, taskname)] |
2562 | for t in self.scenequeue_notcovered: | 2914 | for t in self.scenequeue_notcovered: |
2563 | msg = msg + "\nTask %s, unihash %s, taskhash %s" % (t, self.rqdata.runtaskentries[t].unihash, self.rqdata.runtaskentries[t].hash) | 2915 | msg.append("\nTask %s, unihash %s, taskhash %s" % (t, self.rqdata.runtaskentries[t].unihash, self.rqdata.runtaskentries[t].hash)) |
2564 | logger.error(msg + '\nThis is usually due to missing setscene tasks. Those missing in this build were: %s' % pprint.pformat(self.scenequeue_notcovered)) | 2916 | msg.append('\nThis is usually due to missing setscene tasks. Those missing in this build were: %s' % pprint.pformat(self.scenequeue_notcovered)) |
2917 | logger.error("".join(msg)) | ||
2565 | return True | 2918 | return True |
2566 | return False | 2919 | return False |
2567 | 2920 | ||
@@ -2573,6 +2926,7 @@ class SQData(object): | |||
2573 | self.sq_revdeps = {} | 2926 | self.sq_revdeps = {} |
2574 | # Injected inter-setscene task dependencies | 2927 | # Injected inter-setscene task dependencies |
2575 | self.sq_harddeps = {} | 2928 | self.sq_harddeps = {} |
2929 | self.sq_harddeps_rev = {} | ||
2576 | # Cache of stamp files so duplicates can't run in parallel | 2930 | # Cache of stamp files so duplicates can't run in parallel |
2577 | self.stamps = {} | 2931 | self.stamps = {} |
2578 | # Setscene tasks directly depended upon by the build | 2932 | # Setscene tasks directly depended upon by the build |
@@ -2582,12 +2936,17 @@ class SQData(object): | |||
2582 | # A list of normal tasks a setscene task covers | 2936 | # A list of normal tasks a setscene task covers |
2583 | self.sq_covered_tasks = {} | 2937 | self.sq_covered_tasks = {} |
2584 | 2938 | ||
2585 | def build_scenequeue_data(sqdata, rqdata, rq, cooker, stampcache, sqrq): | 2939 | def build_scenequeue_data(sqdata, rqdata, sqrq): |
2586 | 2940 | ||
2587 | sq_revdeps = {} | 2941 | sq_revdeps = {} |
2588 | sq_revdeps_squash = {} | 2942 | sq_revdeps_squash = {} |
2589 | sq_collated_deps = {} | 2943 | sq_collated_deps = {} |
2590 | 2944 | ||
2945 | # We can't skip specified target tasks which aren't setscene tasks | ||
2946 | sqdata.cantskip = set(rqdata.target_tids) | ||
2947 | sqdata.cantskip.difference_update(rqdata.runq_setscene_tids) | ||
2948 | sqdata.cantskip.intersection_update(rqdata.runtaskentries) | ||
2949 | |||
2591 | # We need to construct a dependency graph for the setscene functions. Intermediate | 2950 | # We need to construct a dependency graph for the setscene functions. Intermediate |
2592 | # dependencies between the setscene tasks only complicate the code. This code | 2951 | # dependencies between the setscene tasks only complicate the code. This code |
2593 | # therefore aims to collapse the huge runqueue dependency tree into a smaller one | 2952 | # therefore aims to collapse the huge runqueue dependency tree into a smaller one |
@@ -2600,7 +2959,7 @@ def build_scenequeue_data(sqdata, rqdata, rq, cooker, stampcache, sqrq): | |||
2600 | for tid in rqdata.runtaskentries: | 2959 | for tid in rqdata.runtaskentries: |
2601 | sq_revdeps[tid] = copy.copy(rqdata.runtaskentries[tid].revdeps) | 2960 | sq_revdeps[tid] = copy.copy(rqdata.runtaskentries[tid].revdeps) |
2602 | sq_revdeps_squash[tid] = set() | 2961 | sq_revdeps_squash[tid] = set() |
2603 | if (len(sq_revdeps[tid]) == 0) and tid not in rqdata.runq_setscene_tids: | 2962 | if not sq_revdeps[tid] and tid not in rqdata.runq_setscene_tids: |
2604 | #bb.warn("Added endpoint %s" % (tid)) | 2963 | #bb.warn("Added endpoint %s" % (tid)) |
2605 | endpoints[tid] = set() | 2964 | endpoints[tid] = set() |
2606 | 2965 | ||
@@ -2634,16 +2993,15 @@ def build_scenequeue_data(sqdata, rqdata, rq, cooker, stampcache, sqrq): | |||
2634 | sq_revdeps_squash[point] = set() | 2993 | sq_revdeps_squash[point] = set() |
2635 | if point in rqdata.runq_setscene_tids: | 2994 | if point in rqdata.runq_setscene_tids: |
2636 | sq_revdeps_squash[point] = tasks | 2995 | sq_revdeps_squash[point] = tasks |
2637 | tasks = set() | ||
2638 | continue | 2996 | continue |
2639 | for dep in rqdata.runtaskentries[point].depends: | 2997 | for dep in rqdata.runtaskentries[point].depends: |
2640 | if point in sq_revdeps[dep]: | 2998 | if point in sq_revdeps[dep]: |
2641 | sq_revdeps[dep].remove(point) | 2999 | sq_revdeps[dep].remove(point) |
2642 | if tasks: | 3000 | if tasks: |
2643 | sq_revdeps_squash[dep] |= tasks | 3001 | sq_revdeps_squash[dep] |= tasks |
2644 | if len(sq_revdeps[dep]) == 0 and dep not in rqdata.runq_setscene_tids: | 3002 | if not sq_revdeps[dep] and dep not in rqdata.runq_setscene_tids: |
2645 | newendpoints[dep] = task | 3003 | newendpoints[dep] = task |
2646 | if len(newendpoints) != 0: | 3004 | if newendpoints: |
2647 | process_endpoints(newendpoints) | 3005 | process_endpoints(newendpoints) |
2648 | 3006 | ||
2649 | process_endpoints(endpoints) | 3007 | process_endpoints(endpoints) |
@@ -2655,16 +3013,16 @@ def build_scenequeue_data(sqdata, rqdata, rq, cooker, stampcache, sqrq): | |||
2655 | # Take the build endpoints (no revdeps) and find the sstate tasks they depend upon | 3013 | # Take the build endpoints (no revdeps) and find the sstate tasks they depend upon |
2656 | new = True | 3014 | new = True |
2657 | for tid in rqdata.runtaskentries: | 3015 | for tid in rqdata.runtaskentries: |
2658 | if len(rqdata.runtaskentries[tid].revdeps) == 0: | 3016 | if not rqdata.runtaskentries[tid].revdeps: |
2659 | sqdata.unskippable.add(tid) | 3017 | sqdata.unskippable.add(tid) |
2660 | sqdata.unskippable |= sqrq.cantskip | 3018 | sqdata.unskippable |= sqdata.cantskip |
2661 | while new: | 3019 | while new: |
2662 | new = False | 3020 | new = False |
2663 | orig = sqdata.unskippable.copy() | 3021 | orig = sqdata.unskippable.copy() |
2664 | for tid in sorted(orig, reverse=True): | 3022 | for tid in sorted(orig, reverse=True): |
2665 | if tid in rqdata.runq_setscene_tids: | 3023 | if tid in rqdata.runq_setscene_tids: |
2666 | continue | 3024 | continue |
2667 | if len(rqdata.runtaskentries[tid].depends) == 0: | 3025 | if not rqdata.runtaskentries[tid].depends: |
2668 | # These are tasks which have no setscene tasks in their chain, need to mark as directly buildable | 3026 | # These are tasks which have no setscene tasks in their chain, need to mark as directly buildable |
2669 | sqrq.setbuildable(tid) | 3027 | sqrq.setbuildable(tid) |
2670 | sqdata.unskippable |= rqdata.runtaskentries[tid].depends | 3028 | sqdata.unskippable |= rqdata.runtaskentries[tid].depends |
@@ -2676,14 +3034,13 @@ def build_scenequeue_data(sqdata, rqdata, rq, cooker, stampcache, sqrq): | |||
2676 | rqdata.init_progress_reporter.next_stage(len(rqdata.runtaskentries)) | 3034 | rqdata.init_progress_reporter.next_stage(len(rqdata.runtaskentries)) |
2677 | 3035 | ||
2678 | # Sanity check all dependencies could be changed to setscene task references | 3036 | # Sanity check all dependencies could be changed to setscene task references |
2679 | for taskcounter, tid in enumerate(rqdata.runtaskentries): | 3037 | for tid in rqdata.runtaskentries: |
2680 | if tid in rqdata.runq_setscene_tids: | 3038 | if tid in rqdata.runq_setscene_tids: |
2681 | pass | 3039 | pass |
2682 | elif len(sq_revdeps_squash[tid]) != 0: | 3040 | elif sq_revdeps_squash[tid]: |
2683 | bb.msg.fatal("RunQueue", "Something went badly wrong during scenequeue generation, aborting. Please report this problem.") | 3041 | bb.msg.fatal("RunQueue", "Something went badly wrong during scenequeue generation, halting. Please report this problem.") |
2684 | else: | 3042 | else: |
2685 | del sq_revdeps_squash[tid] | 3043 | del sq_revdeps_squash[tid] |
2686 | rqdata.init_progress_reporter.update(taskcounter) | ||
2687 | 3044 | ||
2688 | rqdata.init_progress_reporter.next_stage() | 3045 | rqdata.init_progress_reporter.next_stage() |
2689 | 3046 | ||
@@ -2694,7 +3051,9 @@ def build_scenequeue_data(sqdata, rqdata, rq, cooker, stampcache, sqrq): | |||
2694 | (mc, fn, taskname, taskfn) = split_tid_mcfn(tid) | 3051 | (mc, fn, taskname, taskfn) = split_tid_mcfn(tid) |
2695 | realtid = tid + "_setscene" | 3052 | realtid = tid + "_setscene" |
2696 | idepends = rqdata.taskData[mc].taskentries[realtid].idepends | 3053 | idepends = rqdata.taskData[mc].taskentries[realtid].idepends |
2697 | sqdata.stamps[tid] = bb.build.stampfile(taskname + "_setscene", rqdata.dataCaches[mc], taskfn, noextra=True) | 3054 | sqdata.stamps[tid] = bb.parse.siggen.stampfile_mcfn(taskname, taskfn, extrainfo=False) |
3055 | |||
3056 | sqdata.sq_harddeps_rev[tid] = set() | ||
2698 | for (depname, idependtask) in idepends: | 3057 | for (depname, idependtask) in idepends: |
2699 | 3058 | ||
2700 | if depname not in rqdata.taskData[mc].build_targets: | 3059 | if depname not in rqdata.taskData[mc].build_targets: |
@@ -2707,20 +3066,15 @@ def build_scenequeue_data(sqdata, rqdata, rq, cooker, stampcache, sqrq): | |||
2707 | if deptid not in rqdata.runtaskentries: | 3066 | if deptid not in rqdata.runtaskentries: |
2708 | bb.msg.fatal("RunQueue", "Task %s depends upon non-existent task %s:%s" % (realtid, depfn, idependtask)) | 3067 | bb.msg.fatal("RunQueue", "Task %s depends upon non-existent task %s:%s" % (realtid, depfn, idependtask)) |
2709 | 3068 | ||
3069 | logger.debug2("Adding hard setscene dependency %s for %s" % (deptid, tid)) | ||
3070 | |||
2710 | if not deptid in sqdata.sq_harddeps: | 3071 | if not deptid in sqdata.sq_harddeps: |
2711 | sqdata.sq_harddeps[deptid] = set() | 3072 | sqdata.sq_harddeps[deptid] = set() |
2712 | sqdata.sq_harddeps[deptid].add(tid) | 3073 | sqdata.sq_harddeps[deptid].add(tid) |
2713 | 3074 | sqdata.sq_harddeps_rev[tid].add(deptid) | |
2714 | sq_revdeps_squash[tid].add(deptid) | ||
2715 | # Have to zero this to avoid circular dependencies | ||
2716 | sq_revdeps_squash[deptid] = set() | ||
2717 | 3075 | ||
2718 | rqdata.init_progress_reporter.next_stage() | 3076 | rqdata.init_progress_reporter.next_stage() |
2719 | 3077 | ||
2720 | for task in sqdata.sq_harddeps: | ||
2721 | for dep in sqdata.sq_harddeps[task]: | ||
2722 | sq_revdeps_squash[dep].add(task) | ||
2723 | |||
2724 | rqdata.init_progress_reporter.next_stage() | 3078 | rqdata.init_progress_reporter.next_stage() |
2725 | 3079 | ||
2726 | #for tid in sq_revdeps_squash: | 3080 | #for tid in sq_revdeps_squash: |
@@ -2744,16 +3098,47 @@ def build_scenequeue_data(sqdata, rqdata, rq, cooker, stampcache, sqrq): | |||
2744 | sqdata.multiconfigs = set() | 3098 | sqdata.multiconfigs = set() |
2745 | for tid in sqdata.sq_revdeps: | 3099 | for tid in sqdata.sq_revdeps: |
2746 | sqdata.multiconfigs.add(mc_from_tid(tid)) | 3100 | sqdata.multiconfigs.add(mc_from_tid(tid)) |
2747 | if len(sqdata.sq_revdeps[tid]) == 0: | 3101 | if not sqdata.sq_revdeps[tid]: |
2748 | sqrq.sq_buildable.add(tid) | 3102 | sqrq.sq_buildable.add(tid) |
2749 | 3103 | ||
2750 | rqdata.init_progress_reporter.finish() | 3104 | rqdata.init_progress_reporter.next_stage() |
2751 | 3105 | ||
2752 | sqdata.noexec = set() | 3106 | sqdata.noexec = set() |
2753 | sqdata.stamppresent = set() | 3107 | sqdata.stamppresent = set() |
2754 | sqdata.valid = set() | 3108 | sqdata.valid = set() |
2755 | 3109 | ||
2756 | update_scenequeue_data(sqdata.sq_revdeps, sqdata, rqdata, rq, cooker, stampcache, sqrq, summary=True) | 3110 | sqdata.hashes = {} |
3111 | sqrq.sq_deferred = {} | ||
3112 | for mc in sorted(sqdata.multiconfigs): | ||
3113 | for tid in sorted(sqdata.sq_revdeps): | ||
3114 | if mc_from_tid(tid) != mc: | ||
3115 | continue | ||
3116 | h = pending_hash_index(tid, rqdata) | ||
3117 | if h not in sqdata.hashes: | ||
3118 | sqdata.hashes[h] = tid | ||
3119 | else: | ||
3120 | sqrq.sq_deferred[tid] = sqdata.hashes[h] | ||
3121 | bb.debug(1, "Deferring %s after %s" % (tid, sqdata.hashes[h])) | ||
3122 | |||
3123 | def check_setscene_stamps(tid, rqdata, rq, stampcache, noexecstamp=False): | ||
3124 | |||
3125 | (mc, fn, taskname, taskfn) = split_tid_mcfn(tid) | ||
3126 | |||
3127 | taskdep = rqdata.dataCaches[mc].task_deps[taskfn] | ||
3128 | |||
3129 | if 'noexec' in taskdep and taskname in taskdep['noexec']: | ||
3130 | bb.build.make_stamp_mcfn(taskname + "_setscene", taskfn) | ||
3131 | return True, False | ||
3132 | |||
3133 | if rq.check_stamp_task(tid, taskname + "_setscene", cache=stampcache): | ||
3134 | logger.debug2('Setscene stamp current for task %s', tid) | ||
3135 | return False, True | ||
3136 | |||
3137 | if rq.check_stamp_task(tid, taskname, recurse = True, cache=stampcache): | ||
3138 | logger.debug2('Normal stamp current for task %s', tid) | ||
3139 | return False, True | ||
3140 | |||
3141 | return False, False | ||
2757 | 3142 | ||
2758 | def update_scenequeue_data(tids, sqdata, rqdata, rq, cooker, stampcache, sqrq, summary=True): | 3143 | def update_scenequeue_data(tids, sqdata, rqdata, rq, cooker, stampcache, sqrq, summary=True): |
2759 | 3144 | ||
@@ -2764,55 +3149,42 @@ def update_scenequeue_data(tids, sqdata, rqdata, rq, cooker, stampcache, sqrq, s | |||
2764 | sqdata.stamppresent.remove(tid) | 3149 | sqdata.stamppresent.remove(tid) |
2765 | if tid in sqdata.valid: | 3150 | if tid in sqdata.valid: |
2766 | sqdata.valid.remove(tid) | 3151 | sqdata.valid.remove(tid) |
3152 | if tid in sqdata.outrightfail: | ||
3153 | sqdata.outrightfail.remove(tid) | ||
2767 | 3154 | ||
2768 | (mc, fn, taskname, taskfn) = split_tid_mcfn(tid) | 3155 | noexec, stamppresent = check_setscene_stamps(tid, rqdata, rq, stampcache, noexecstamp=True) |
2769 | |||
2770 | taskdep = rqdata.dataCaches[mc].task_deps[taskfn] | ||
2771 | 3156 | ||
2772 | if 'noexec' in taskdep and taskname in taskdep['noexec']: | 3157 | if noexec: |
2773 | sqdata.noexec.add(tid) | 3158 | sqdata.noexec.add(tid) |
2774 | sqrq.sq_task_skip(tid) | 3159 | sqrq.sq_task_skip(tid) |
2775 | bb.build.make_stamp(taskname + "_setscene", rqdata.dataCaches[mc], taskfn) | 3160 | logger.debug2("%s is noexec so skipping setscene" % (tid)) |
2776 | continue | ||
2777 | |||
2778 | if rq.check_stamp_task(tid, taskname + "_setscene", cache=stampcache): | ||
2779 | logger.debug2('Setscene stamp current for task %s', tid) | ||
2780 | sqdata.stamppresent.add(tid) | ||
2781 | sqrq.sq_task_skip(tid) | ||
2782 | continue | 3161 | continue |
2783 | 3162 | ||
2784 | if rq.check_stamp_task(tid, taskname, recurse = True, cache=stampcache): | 3163 | if stamppresent: |
2785 | logger.debug2('Normal stamp current for task %s', tid) | ||
2786 | sqdata.stamppresent.add(tid) | 3164 | sqdata.stamppresent.add(tid) |
2787 | sqrq.sq_task_skip(tid) | 3165 | sqrq.sq_task_skip(tid) |
3166 | logger.debug2("%s has a valid stamp, skipping" % (tid)) | ||
2788 | continue | 3167 | continue |
2789 | 3168 | ||
2790 | tocheck.add(tid) | 3169 | tocheck.add(tid) |
2791 | 3170 | ||
2792 | sqdata.valid |= rq.validate_hashes(tocheck, cooker.data, len(sqdata.stamppresent), False, summary=summary) | 3171 | sqdata.valid |= rq.validate_hashes(tocheck, cooker.data, len(sqdata.stamppresent), False, summary=summary) |
2793 | 3172 | ||
2794 | sqdata.hashes = {} | 3173 | for tid in tids: |
2795 | for mc in sorted(sqdata.multiconfigs): | 3174 | if tid in sqdata.stamppresent: |
2796 | for tid in sorted(sqdata.sq_revdeps): | 3175 | continue |
2797 | if mc_from_tid(tid) != mc: | 3176 | if tid in sqdata.valid: |
2798 | continue | 3177 | continue |
2799 | if tid in sqdata.stamppresent: | 3178 | if tid in sqdata.noexec: |
2800 | continue | 3179 | continue |
2801 | if tid in sqdata.valid: | 3180 | if tid in sqrq.scenequeue_covered: |
2802 | continue | 3181 | continue |
2803 | if tid in sqdata.noexec: | 3182 | if tid in sqrq.scenequeue_notcovered: |
2804 | continue | 3183 | continue |
2805 | if tid in sqrq.scenequeue_notcovered: | 3184 | if tid in sqrq.sq_deferred: |
2806 | continue | 3185 | continue |
2807 | sqdata.outrightfail.add(tid) | 3186 | sqdata.outrightfail.add(tid) |
2808 | 3187 | logger.debug2("%s already handled (fallthrough), skipping" % (tid)) | |
2809 | h = pending_hash_index(tid, rqdata) | ||
2810 | if h not in sqdata.hashes: | ||
2811 | sqdata.hashes[h] = tid | ||
2812 | else: | ||
2813 | sqrq.sq_deferred[tid] = sqdata.hashes[h] | ||
2814 | bb.note("Deferring %s after %s" % (tid, sqdata.hashes[h])) | ||
2815 | |||
2816 | 3188 | ||
2817 | class TaskFailure(Exception): | 3189 | class TaskFailure(Exception): |
2818 | """ | 3190 | """ |
@@ -2876,12 +3248,16 @@ class runQueueTaskFailed(runQueueEvent): | |||
2876 | """ | 3248 | """ |
2877 | Event notifying a task failed | 3249 | Event notifying a task failed |
2878 | """ | 3250 | """ |
2879 | def __init__(self, task, stats, exitcode, rq): | 3251 | def __init__(self, task, stats, exitcode, rq, fakeroot_log=None): |
2880 | runQueueEvent.__init__(self, task, stats, rq) | 3252 | runQueueEvent.__init__(self, task, stats, rq) |
2881 | self.exitcode = exitcode | 3253 | self.exitcode = exitcode |
3254 | self.fakeroot_log = fakeroot_log | ||
2882 | 3255 | ||
2883 | def __str__(self): | 3256 | def __str__(self): |
2884 | return "Task (%s) failed with exit code '%s'" % (self.taskstring, self.exitcode) | 3257 | if self.fakeroot_log: |
3258 | return "Task (%s) failed with exit code '%s' \nPseudo log:\n%s" % (self.taskstring, self.exitcode, self.fakeroot_log) | ||
3259 | else: | ||
3260 | return "Task (%s) failed with exit code '%s'" % (self.taskstring, self.exitcode) | ||
2885 | 3261 | ||
2886 | class sceneQueueTaskFailed(sceneQueueEvent): | 3262 | class sceneQueueTaskFailed(sceneQueueEvent): |
2887 | """ | 3263 | """ |
@@ -2933,18 +3309,16 @@ class runQueuePipe(): | |||
2933 | """ | 3309 | """ |
2934 | Abstraction for a pipe between a worker thread and the server | 3310 | Abstraction for a pipe between a worker thread and the server |
2935 | """ | 3311 | """ |
2936 | def __init__(self, pipein, pipeout, d, rq, rqexec): | 3312 | def __init__(self, pipein, pipeout, d, rq, rqexec, fakerootlogs=None): |
2937 | self.input = pipein | 3313 | self.input = pipein |
2938 | if pipeout: | 3314 | if pipeout: |
2939 | pipeout.close() | 3315 | pipeout.close() |
2940 | bb.utils.nonblockingfd(self.input) | 3316 | bb.utils.nonblockingfd(self.input) |
2941 | self.queue = b"" | 3317 | self.queue = bytearray() |
2942 | self.d = d | 3318 | self.d = d |
2943 | self.rq = rq | 3319 | self.rq = rq |
2944 | self.rqexec = rqexec | 3320 | self.rqexec = rqexec |
2945 | 3321 | self.fakerootlogs = fakerootlogs | |
2946 | def setrunqueueexec(self, rqexec): | ||
2947 | self.rqexec = rqexec | ||
2948 | 3322 | ||
2949 | def read(self): | 3323 | def read(self): |
2950 | for workers, name in [(self.rq.worker, "Worker"), (self.rq.fakeworker, "Fakeroot")]: | 3324 | for workers, name in [(self.rq.worker, "Worker"), (self.rq.fakeworker, "Fakeroot")]: |
@@ -2956,13 +3330,13 @@ class runQueuePipe(): | |||
2956 | 3330 | ||
2957 | start = len(self.queue) | 3331 | start = len(self.queue) |
2958 | try: | 3332 | try: |
2959 | self.queue = self.queue + (self.input.read(102400) or b"") | 3333 | self.queue.extend(self.input.read(512 * 1024) or b"") |
2960 | except (OSError, IOError) as e: | 3334 | except (OSError, IOError) as e: |
2961 | if e.errno != errno.EAGAIN: | 3335 | if e.errno != errno.EAGAIN: |
2962 | raise | 3336 | raise |
2963 | end = len(self.queue) | 3337 | end = len(self.queue) |
2964 | found = True | 3338 | found = True |
2965 | while found and len(self.queue): | 3339 | while found and self.queue: |
2966 | found = False | 3340 | found = False |
2967 | index = self.queue.find(b"</event>") | 3341 | index = self.queue.find(b"</event>") |
2968 | while index != -1 and self.queue.startswith(b"<event>"): | 3342 | while index != -1 and self.queue.startswith(b"<event>"): |
@@ -2987,7 +3361,11 @@ class runQueuePipe(): | |||
2987 | task, status = pickle.loads(self.queue[10:index]) | 3361 | task, status = pickle.loads(self.queue[10:index]) |
2988 | except (ValueError, pickle.UnpicklingError, AttributeError, IndexError) as e: | 3362 | except (ValueError, pickle.UnpicklingError, AttributeError, IndexError) as e: |
2989 | bb.msg.fatal("RunQueue", "failed load pickle '%s': '%s'" % (e, self.queue[10:index])) | 3363 | bb.msg.fatal("RunQueue", "failed load pickle '%s': '%s'" % (e, self.queue[10:index])) |
2990 | self.rqexec.runqueue_process_waitpid(task, status) | 3364 | (_, _, _, taskfn) = split_tid_mcfn(task) |
3365 | fakerootlog = None | ||
3366 | if self.fakerootlogs and taskfn and taskfn in self.fakerootlogs: | ||
3367 | fakerootlog = self.fakerootlogs[taskfn] | ||
3368 | self.rqexec.runqueue_process_waitpid(task, status, fakerootlog=fakerootlog) | ||
2991 | found = True | 3369 | found = True |
2992 | self.queue = self.queue[index+11:] | 3370 | self.queue = self.queue[index+11:] |
2993 | index = self.queue.find(b"</exitcode>") | 3371 | index = self.queue.find(b"</exitcode>") |
@@ -2996,16 +3374,16 @@ class runQueuePipe(): | |||
2996 | def close(self): | 3374 | def close(self): |
2997 | while self.read(): | 3375 | while self.read(): |
2998 | continue | 3376 | continue |
2999 | if len(self.queue) > 0: | 3377 | if self.queue: |
3000 | print("Warning, worker left partial message: %s" % self.queue) | 3378 | print("Warning, worker left partial message: %s" % self.queue) |
3001 | self.input.close() | 3379 | self.input.close() |
3002 | 3380 | ||
3003 | def get_setscene_enforce_whitelist(d, targets): | 3381 | def get_setscene_enforce_ignore_tasks(d, targets): |
3004 | if d.getVar('BB_SETSCENE_ENFORCE') != '1': | 3382 | if d.getVar('BB_SETSCENE_ENFORCE') != '1': |
3005 | return None | 3383 | return None |
3006 | whitelist = (d.getVar("BB_SETSCENE_ENFORCE_WHITELIST") or "").split() | 3384 | ignore_tasks = (d.getVar("BB_SETSCENE_ENFORCE_IGNORE_TASKS") or "").split() |
3007 | outlist = [] | 3385 | outlist = [] |
3008 | for item in whitelist[:]: | 3386 | for item in ignore_tasks[:]: |
3009 | if item.startswith('%:'): | 3387 | if item.startswith('%:'): |
3010 | for (mc, target, task, fn) in targets: | 3388 | for (mc, target, task, fn) in targets: |
3011 | outlist.append(target + ':' + item.split(':')[1]) | 3389 | outlist.append(target + ':' + item.split(':')[1]) |
@@ -3013,12 +3391,12 @@ def get_setscene_enforce_whitelist(d, targets): | |||
3013 | outlist.append(item) | 3391 | outlist.append(item) |
3014 | return outlist | 3392 | return outlist |
3015 | 3393 | ||
3016 | def check_setscene_enforce_whitelist(pn, taskname, whitelist): | 3394 | def check_setscene_enforce_ignore_tasks(pn, taskname, ignore_tasks): |
3017 | import fnmatch | 3395 | import fnmatch |
3018 | if whitelist is not None: | 3396 | if ignore_tasks is not None: |
3019 | item = '%s:%s' % (pn, taskname) | 3397 | item = '%s:%s' % (pn, taskname) |
3020 | for whitelist_item in whitelist: | 3398 | for ignore_tasks in ignore_tasks: |
3021 | if fnmatch.fnmatch(item, whitelist_item): | 3399 | if fnmatch.fnmatch(item, ignore_tasks): |
3022 | return True | 3400 | return True |
3023 | return False | 3401 | return False |
3024 | return True | 3402 | return True |