diff options
Diffstat (limited to 'bitbake/lib/bb/cooker.py')
-rw-r--r-- | bitbake/lib/bb/cooker.py | 904 |
1 files changed, 505 insertions, 399 deletions
diff --git a/bitbake/lib/bb/cooker.py b/bitbake/lib/bb/cooker.py index f4ab797edf..1810bcc604 100644 --- a/bitbake/lib/bb/cooker.py +++ b/bitbake/lib/bb/cooker.py | |||
@@ -8,22 +8,20 @@ | |||
8 | # | 8 | # |
9 | # SPDX-License-Identifier: GPL-2.0-only | 9 | # SPDX-License-Identifier: GPL-2.0-only |
10 | # | 10 | # |
11 | 11 | import enum | |
12 | import sys, os, glob, os.path, re, time | 12 | import sys, os, glob, os.path, re, time |
13 | import itertools | 13 | import itertools |
14 | import logging | 14 | import logging |
15 | import multiprocessing | 15 | import multiprocessing |
16 | import sre_constants | ||
17 | import threading | 16 | import threading |
18 | from io import StringIO, UnsupportedOperation | 17 | from io import StringIO, UnsupportedOperation |
19 | from contextlib import closing | 18 | from contextlib import closing |
20 | from collections import defaultdict, namedtuple | 19 | from collections import defaultdict, namedtuple |
21 | import bb, bb.exceptions, bb.command | 20 | import bb, bb.command |
22 | from bb import utils, data, parse, event, cache, providers, taskdata, runqueue, build | 21 | from bb import utils, data, parse, event, cache, providers, taskdata, runqueue, build |
23 | import queue | 22 | import queue |
24 | import signal | 23 | import signal |
25 | import prserv.serv | 24 | import prserv.serv |
26 | import pyinotify | ||
27 | import json | 25 | import json |
28 | import pickle | 26 | import pickle |
29 | import codecs | 27 | import codecs |
@@ -50,16 +48,15 @@ class CollectionError(bb.BBHandledException): | |||
50 | Exception raised when layer configuration is incorrect | 48 | Exception raised when layer configuration is incorrect |
51 | """ | 49 | """ |
52 | 50 | ||
53 | class state: | ||
54 | initial, parsing, running, shutdown, forceshutdown, stopped, error = list(range(7)) | ||
55 | 51 | ||
56 | @classmethod | 52 | class State(enum.Enum): |
57 | def get_name(cls, code): | 53 | INITIAL = 0, |
58 | for name in dir(cls): | 54 | PARSING = 1, |
59 | value = getattr(cls, name) | 55 | RUNNING = 2, |
60 | if type(value) == type(cls.initial) and value == code: | 56 | SHUTDOWN = 3, |
61 | return name | 57 | FORCE_SHUTDOWN = 4, |
62 | raise ValueError("Invalid status code: %s" % code) | 58 | STOPPED = 5, |
59 | ERROR = 6 | ||
63 | 60 | ||
64 | 61 | ||
65 | class SkippedPackage: | 62 | class SkippedPackage: |
@@ -81,7 +78,7 @@ class SkippedPackage: | |||
81 | 78 | ||
82 | 79 | ||
83 | class CookerFeatures(object): | 80 | class CookerFeatures(object): |
84 | _feature_list = [HOB_EXTRA_CACHES, BASEDATASTORE_TRACKING, SEND_SANITYEVENTS] = list(range(3)) | 81 | _feature_list = [HOB_EXTRA_CACHES, BASEDATASTORE_TRACKING, SEND_SANITYEVENTS, RECIPE_SIGGEN_INFO] = list(range(4)) |
85 | 82 | ||
86 | def __init__(self): | 83 | def __init__(self): |
87 | self._features=set() | 84 | self._features=set() |
@@ -104,12 +101,15 @@ class CookerFeatures(object): | |||
104 | 101 | ||
105 | class EventWriter: | 102 | class EventWriter: |
106 | def __init__(self, cooker, eventfile): | 103 | def __init__(self, cooker, eventfile): |
107 | self.file_inited = None | ||
108 | self.cooker = cooker | 104 | self.cooker = cooker |
109 | self.eventfile = eventfile | 105 | self.eventfile = eventfile |
110 | self.event_queue = [] | 106 | self.event_queue = [] |
111 | 107 | ||
112 | def write_event(self, event): | 108 | def write_variables(self): |
109 | with open(self.eventfile, "a") as f: | ||
110 | f.write("%s\n" % json.dumps({ "allvariables" : self.cooker.getAllKeysWithFlags(["doc", "func"])})) | ||
111 | |||
112 | def send(self, event): | ||
113 | with open(self.eventfile, "a") as f: | 113 | with open(self.eventfile, "a") as f: |
114 | try: | 114 | try: |
115 | str_event = codecs.encode(pickle.dumps(event), 'base64').decode('utf-8') | 115 | str_event = codecs.encode(pickle.dumps(event), 'base64').decode('utf-8') |
@@ -119,28 +119,6 @@ class EventWriter: | |||
119 | import traceback | 119 | import traceback |
120 | print(err, traceback.format_exc()) | 120 | print(err, traceback.format_exc()) |
121 | 121 | ||
122 | def send(self, event): | ||
123 | if self.file_inited: | ||
124 | # we have the file, just write the event | ||
125 | self.write_event(event) | ||
126 | else: | ||
127 | # init on bb.event.BuildStarted | ||
128 | name = "%s.%s" % (event.__module__, event.__class__.__name__) | ||
129 | if name in ("bb.event.BuildStarted", "bb.cooker.CookerExit"): | ||
130 | with open(self.eventfile, "w") as f: | ||
131 | f.write("%s\n" % json.dumps({ "allvariables" : self.cooker.getAllKeysWithFlags(["doc", "func"])})) | ||
132 | |||
133 | self.file_inited = True | ||
134 | |||
135 | # write pending events | ||
136 | for evt in self.event_queue: | ||
137 | self.write_event(evt) | ||
138 | |||
139 | # also write the current event | ||
140 | self.write_event(event) | ||
141 | else: | ||
142 | # queue all events until the file is inited | ||
143 | self.event_queue.append(event) | ||
144 | 122 | ||
145 | #============================================================================# | 123 | #============================================================================# |
146 | # BBCooker | 124 | # BBCooker |
@@ -150,43 +128,34 @@ class BBCooker: | |||
150 | Manages one bitbake build run | 128 | Manages one bitbake build run |
151 | """ | 129 | """ |
152 | 130 | ||
153 | def __init__(self, featureSet=None, idleCallBackRegister=None): | 131 | def __init__(self, featureSet=None, server=None): |
154 | self.recipecaches = None | 132 | self.recipecaches = None |
133 | self.baseconfig_valid = False | ||
134 | self.parsecache_valid = False | ||
155 | self.eventlog = None | 135 | self.eventlog = None |
156 | self.skiplist = {} | 136 | # The skiplists, one per multiconfig |
137 | self.skiplist_by_mc = defaultdict(dict) | ||
157 | self.featureset = CookerFeatures() | 138 | self.featureset = CookerFeatures() |
158 | if featureSet: | 139 | if featureSet: |
159 | for f in featureSet: | 140 | for f in featureSet: |
160 | self.featureset.setFeature(f) | 141 | self.featureset.setFeature(f) |
161 | 142 | ||
143 | self.orig_syspath = sys.path.copy() | ||
144 | self.orig_sysmodules = [*sys.modules] | ||
145 | |||
162 | self.configuration = bb.cookerdata.CookerConfiguration() | 146 | self.configuration = bb.cookerdata.CookerConfiguration() |
163 | 147 | ||
164 | self.idleCallBackRegister = idleCallBackRegister | 148 | self.process_server = server |
149 | self.idleCallBackRegister = None | ||
150 | self.waitIdle = None | ||
151 | if server: | ||
152 | self.idleCallBackRegister = server.register_idle_function | ||
153 | self.waitIdle = server.wait_for_idle | ||
165 | 154 | ||
166 | bb.debug(1, "BBCooker starting %s" % time.time()) | 155 | bb.debug(1, "BBCooker starting %s" % time.time()) |
167 | sys.stdout.flush() | 156 | |
168 | 157 | self.configwatched = {} | |
169 | self.configwatcher = pyinotify.WatchManager() | 158 | self.parsewatched = {} |
170 | bb.debug(1, "BBCooker pyinotify1 %s" % time.time()) | ||
171 | sys.stdout.flush() | ||
172 | |||
173 | self.configwatcher.bbseen = set() | ||
174 | self.configwatcher.bbwatchedfiles = set() | ||
175 | self.confignotifier = pyinotify.Notifier(self.configwatcher, self.config_notifications) | ||
176 | bb.debug(1, "BBCooker pyinotify2 %s" % time.time()) | ||
177 | sys.stdout.flush() | ||
178 | self.watchmask = pyinotify.IN_CLOSE_WRITE | pyinotify.IN_CREATE | pyinotify.IN_DELETE | \ | ||
179 | pyinotify.IN_DELETE_SELF | pyinotify.IN_MODIFY | pyinotify.IN_MOVE_SELF | \ | ||
180 | pyinotify.IN_MOVED_FROM | pyinotify.IN_MOVED_TO | ||
181 | self.watcher = pyinotify.WatchManager() | ||
182 | bb.debug(1, "BBCooker pyinotify3 %s" % time.time()) | ||
183 | sys.stdout.flush() | ||
184 | self.watcher.bbseen = set() | ||
185 | self.watcher.bbwatchedfiles = set() | ||
186 | self.notifier = pyinotify.Notifier(self.watcher, self.notifications) | ||
187 | |||
188 | bb.debug(1, "BBCooker pyinotify complete %s" % time.time()) | ||
189 | sys.stdout.flush() | ||
190 | 159 | ||
191 | # If being called by something like tinfoil, we need to clean cached data | 160 | # If being called by something like tinfoil, we need to clean cached data |
192 | # which may now be invalid | 161 | # which may now be invalid |
@@ -197,14 +166,6 @@ class BBCooker: | |||
197 | self.hashserv = None | 166 | self.hashserv = None |
198 | self.hashservaddr = None | 167 | self.hashservaddr = None |
199 | 168 | ||
200 | self.inotify_modified_files = [] | ||
201 | |||
202 | def _process_inotify_updates(server, cooker, abort): | ||
203 | cooker.process_inotify_updates() | ||
204 | return 1.0 | ||
205 | |||
206 | self.idleCallBackRegister(_process_inotify_updates, self) | ||
207 | |||
208 | # TOSTOP must not be set or our children will hang when they output | 169 | # TOSTOP must not be set or our children will hang when they output |
209 | try: | 170 | try: |
210 | fd = sys.stdout.fileno() | 171 | fd = sys.stdout.fileno() |
@@ -218,8 +179,8 @@ class BBCooker: | |||
218 | except UnsupportedOperation: | 179 | except UnsupportedOperation: |
219 | pass | 180 | pass |
220 | 181 | ||
221 | self.command = bb.command.Command(self) | 182 | self.command = bb.command.Command(self, self.process_server) |
222 | self.state = state.initial | 183 | self.state = State.INITIAL |
223 | 184 | ||
224 | self.parser = None | 185 | self.parser = None |
225 | 186 | ||
@@ -228,108 +189,68 @@ class BBCooker: | |||
228 | signal.signal(signal.SIGHUP, self.sigterm_exception) | 189 | signal.signal(signal.SIGHUP, self.sigterm_exception) |
229 | 190 | ||
230 | bb.debug(1, "BBCooker startup complete %s" % time.time()) | 191 | bb.debug(1, "BBCooker startup complete %s" % time.time()) |
231 | sys.stdout.flush() | ||
232 | 192 | ||
233 | def init_configdata(self): | 193 | def init_configdata(self): |
234 | if not hasattr(self, "data"): | 194 | if not hasattr(self, "data"): |
235 | self.initConfigurationData() | 195 | self.initConfigurationData() |
236 | bb.debug(1, "BBCooker parsed base configuration %s" % time.time()) | 196 | bb.debug(1, "BBCooker parsed base configuration %s" % time.time()) |
237 | sys.stdout.flush() | ||
238 | self.handlePRServ() | 197 | self.handlePRServ() |
239 | 198 | ||
240 | def process_inotify_updates(self): | 199 | def _baseconfig_set(self, value): |
241 | for n in [self.confignotifier, self.notifier]: | 200 | if value and not self.baseconfig_valid: |
242 | if n.check_events(timeout=0): | 201 | bb.server.process.serverlog("Base config valid") |
243 | # read notified events and enqeue them | 202 | elif not value and self.baseconfig_valid: |
244 | n.read_events() | 203 | bb.server.process.serverlog("Base config invalidated") |
245 | n.process_events() | 204 | self.baseconfig_valid = value |
246 | 205 | ||
247 | def config_notifications(self, event): | 206 | def _parsecache_set(self, value): |
248 | if event.maskname == "IN_Q_OVERFLOW": | 207 | if value and not self.parsecache_valid: |
249 | bb.warn("inotify event queue overflowed, invalidating caches.") | 208 | bb.server.process.serverlog("Parse cache valid") |
250 | self.parsecache_valid = False | 209 | elif not value and self.parsecache_valid: |
251 | self.baseconfig_valid = False | 210 | bb.server.process.serverlog("Parse cache invalidated") |
252 | bb.parse.clear_cache() | 211 | self.parsecache_valid = value |
253 | return | 212 | |
254 | if not event.pathname in self.configwatcher.bbwatchedfiles: | 213 | def add_filewatch(self, deps, configwatcher=False): |
255 | return | 214 | if configwatcher: |
256 | if not event.pathname in self.inotify_modified_files: | 215 | watcher = self.configwatched |
257 | self.inotify_modified_files.append(event.pathname) | 216 | else: |
258 | self.baseconfig_valid = False | 217 | watcher = self.parsewatched |
259 | |||
260 | def notifications(self, event): | ||
261 | if event.maskname == "IN_Q_OVERFLOW": | ||
262 | bb.warn("inotify event queue overflowed, invalidating caches.") | ||
263 | self.parsecache_valid = False | ||
264 | bb.parse.clear_cache() | ||
265 | return | ||
266 | if event.pathname.endswith("bitbake-cookerdaemon.log") \ | ||
267 | or event.pathname.endswith("bitbake.lock"): | ||
268 | return | ||
269 | if not event.pathname in self.inotify_modified_files: | ||
270 | self.inotify_modified_files.append(event.pathname) | ||
271 | self.parsecache_valid = False | ||
272 | 218 | ||
273 | def add_filewatch(self, deps, watcher=None, dirs=False): | ||
274 | if not watcher: | ||
275 | watcher = self.watcher | ||
276 | for i in deps: | 219 | for i in deps: |
277 | watcher.bbwatchedfiles.add(i[0]) | 220 | f = i[0] |
278 | if dirs: | 221 | mtime = i[1] |
279 | f = i[0] | 222 | watcher[f] = mtime |
280 | else: | ||
281 | f = os.path.dirname(i[0]) | ||
282 | if f in watcher.bbseen: | ||
283 | continue | ||
284 | watcher.bbseen.add(f) | ||
285 | watchtarget = None | ||
286 | while True: | ||
287 | # We try and add watches for files that don't exist but if they did, would influence | ||
288 | # the parser. The parent directory of these files may not exist, in which case we need | ||
289 | # to watch any parent that does exist for changes. | ||
290 | try: | ||
291 | watcher.add_watch(f, self.watchmask, quiet=False) | ||
292 | if watchtarget: | ||
293 | watcher.bbwatchedfiles.add(watchtarget) | ||
294 | break | ||
295 | except pyinotify.WatchManagerError as e: | ||
296 | if 'ENOENT' in str(e): | ||
297 | watchtarget = f | ||
298 | f = os.path.dirname(f) | ||
299 | if f in watcher.bbseen: | ||
300 | break | ||
301 | watcher.bbseen.add(f) | ||
302 | continue | ||
303 | if 'ENOSPC' in str(e): | ||
304 | providerlog.error("No space left on device or exceeds fs.inotify.max_user_watches?") | ||
305 | providerlog.error("To check max_user_watches: sysctl -n fs.inotify.max_user_watches.") | ||
306 | providerlog.error("To modify max_user_watches: sysctl -n -w fs.inotify.max_user_watches=<value>.") | ||
307 | providerlog.error("Root privilege is required to modify max_user_watches.") | ||
308 | raise | ||
309 | 223 | ||
310 | def sigterm_exception(self, signum, stackframe): | 224 | def sigterm_exception(self, signum, stackframe): |
311 | if signum == signal.SIGTERM: | 225 | if signum == signal.SIGTERM: |
312 | bb.warn("Cooker received SIGTERM, shutting down...") | 226 | bb.warn("Cooker received SIGTERM, shutting down...") |
313 | elif signum == signal.SIGHUP: | 227 | elif signum == signal.SIGHUP: |
314 | bb.warn("Cooker received SIGHUP, shutting down...") | 228 | bb.warn("Cooker received SIGHUP, shutting down...") |
315 | self.state = state.forceshutdown | 229 | self.state = State.FORCE_SHUTDOWN |
230 | bb.event._should_exit.set() | ||
316 | 231 | ||
317 | def setFeatures(self, features): | 232 | def setFeatures(self, features): |
318 | # we only accept a new feature set if we're in state initial, so we can reset without problems | 233 | # we only accept a new feature set if we're in state initial, so we can reset without problems |
319 | if not self.state in [state.initial, state.shutdown, state.forceshutdown, state.stopped, state.error]: | 234 | if not self.state in [State.INITIAL, State.SHUTDOWN, State.FORCE_SHUTDOWN, State.STOPPED, State.ERROR]: |
320 | raise Exception("Illegal state for feature set change") | 235 | raise Exception("Illegal state for feature set change") |
321 | original_featureset = list(self.featureset) | 236 | original_featureset = list(self.featureset) |
322 | for feature in features: | 237 | for feature in features: |
323 | self.featureset.setFeature(feature) | 238 | self.featureset.setFeature(feature) |
324 | bb.debug(1, "Features set %s (was %s)" % (original_featureset, list(self.featureset))) | 239 | bb.debug(1, "Features set %s (was %s)" % (original_featureset, list(self.featureset))) |
325 | if (original_featureset != list(self.featureset)) and self.state != state.error and hasattr(self, "data"): | 240 | if (original_featureset != list(self.featureset)) and self.state != State.ERROR and hasattr(self, "data"): |
326 | self.reset() | 241 | self.reset() |
327 | 242 | ||
328 | def initConfigurationData(self): | 243 | def initConfigurationData(self): |
329 | 244 | self.state = State.INITIAL | |
330 | self.state = state.initial | ||
331 | self.caches_array = [] | 245 | self.caches_array = [] |
332 | 246 | ||
247 | sys.path = self.orig_syspath.copy() | ||
248 | for mod in [*sys.modules]: | ||
249 | if mod not in self.orig_sysmodules: | ||
250 | del sys.modules[mod] | ||
251 | |||
252 | self.configwatched = {} | ||
253 | |||
333 | # Need to preserve BB_CONSOLELOG over resets | 254 | # Need to preserve BB_CONSOLELOG over resets |
334 | consolelog = None | 255 | consolelog = None |
335 | if hasattr(self, "data"): | 256 | if hasattr(self, "data"): |
@@ -338,12 +259,12 @@ class BBCooker: | |||
338 | if CookerFeatures.BASEDATASTORE_TRACKING in self.featureset: | 259 | if CookerFeatures.BASEDATASTORE_TRACKING in self.featureset: |
339 | self.enableDataTracking() | 260 | self.enableDataTracking() |
340 | 261 | ||
341 | all_extra_cache_names = [] | 262 | caches_name_array = ['bb.cache:CoreRecipeInfo'] |
342 | # We hardcode all known cache types in a single place, here. | 263 | # We hardcode all known cache types in a single place, here. |
343 | if CookerFeatures.HOB_EXTRA_CACHES in self.featureset: | 264 | if CookerFeatures.HOB_EXTRA_CACHES in self.featureset: |
344 | all_extra_cache_names.append("bb.cache_extra:HobRecipeInfo") | 265 | caches_name_array.append("bb.cache_extra:HobRecipeInfo") |
345 | 266 | if CookerFeatures.RECIPE_SIGGEN_INFO in self.featureset: | |
346 | caches_name_array = ['bb.cache:CoreRecipeInfo'] + all_extra_cache_names | 267 | caches_name_array.append("bb.cache:SiggenRecipeInfo") |
347 | 268 | ||
348 | # At least CoreRecipeInfo will be loaded, so caches_array will never be empty! | 269 | # At least CoreRecipeInfo will be loaded, so caches_array will never be empty! |
349 | # This is the entry point, no further check needed! | 270 | # This is the entry point, no further check needed! |
@@ -359,9 +280,12 @@ class BBCooker: | |||
359 | self.databuilder = bb.cookerdata.CookerDataBuilder(self.configuration, False) | 280 | self.databuilder = bb.cookerdata.CookerDataBuilder(self.configuration, False) |
360 | self.databuilder.parseBaseConfiguration() | 281 | self.databuilder.parseBaseConfiguration() |
361 | self.data = self.databuilder.data | 282 | self.data = self.databuilder.data |
362 | self.data_hash = self.databuilder.data_hash | ||
363 | self.extraconfigdata = {} | 283 | self.extraconfigdata = {} |
364 | 284 | ||
285 | eventlog = self.data.getVar("BB_DEFAULT_EVENTLOG") | ||
286 | if not self.configuration.writeeventlog and eventlog: | ||
287 | self.setupEventLog(eventlog) | ||
288 | |||
365 | if consolelog: | 289 | if consolelog: |
366 | self.data.setVar("BB_CONSOLELOG", consolelog) | 290 | self.data.setVar("BB_CONSOLELOG", consolelog) |
367 | 291 | ||
@@ -371,31 +295,48 @@ class BBCooker: | |||
371 | self.disableDataTracking() | 295 | self.disableDataTracking() |
372 | 296 | ||
373 | for mc in self.databuilder.mcdata.values(): | 297 | for mc in self.databuilder.mcdata.values(): |
374 | mc.renameVar("__depends", "__base_depends") | 298 | self.add_filewatch(mc.getVar("__base_depends", False), configwatcher=True) |
375 | self.add_filewatch(mc.getVar("__base_depends", False), self.configwatcher) | ||
376 | 299 | ||
377 | self.baseconfig_valid = True | 300 | self._baseconfig_set(True) |
378 | self.parsecache_valid = False | 301 | self._parsecache_set(False) |
379 | 302 | ||
380 | def handlePRServ(self): | 303 | def handlePRServ(self): |
381 | # Setup a PR Server based on the new configuration | 304 | # Setup a PR Server based on the new configuration |
382 | try: | 305 | try: |
383 | self.prhost = prserv.serv.auto_start(self.data) | 306 | self.prhost = prserv.serv.auto_start(self.data) |
384 | except prserv.serv.PRServiceConfigError as e: | 307 | except prserv.serv.PRServiceConfigError as e: |
385 | bb.fatal("Unable to start PR Server, exitting") | 308 | bb.fatal("Unable to start PR Server, exiting, check the bitbake-cookerdaemon.log") |
386 | 309 | ||
387 | if self.data.getVar("BB_HASHSERVE") == "auto": | 310 | if self.data.getVar("BB_HASHSERVE") == "auto": |
388 | # Create a new hash server bound to a unix domain socket | 311 | # Create a new hash server bound to a unix domain socket |
389 | if not self.hashserv: | 312 | if not self.hashserv: |
390 | dbfile = (self.data.getVar("PERSISTENT_DIR") or self.data.getVar("CACHE")) + "/hashserv.db" | 313 | dbfile = (self.data.getVar("PERSISTENT_DIR") or self.data.getVar("CACHE")) + "/hashserv.db" |
314 | upstream = self.data.getVar("BB_HASHSERVE_UPSTREAM") or None | ||
315 | if upstream: | ||
316 | try: | ||
317 | with hashserv.create_client(upstream) as client: | ||
318 | client.ping() | ||
319 | except ImportError as e: | ||
320 | bb.fatal(""""Unable to use hash equivalence server at '%s' due to missing or incorrect python module: | ||
321 | %s | ||
322 | Please install the needed module on the build host, or use an environment containing it (e.g a pip venv or OpenEmbedded's buildtools tarball). | ||
323 | You can also remove the BB_HASHSERVE_UPSTREAM setting, but this may result in significantly longer build times as bitbake will be unable to reuse prebuilt sstate artefacts.""" | ||
324 | % (upstream, repr(e))) | ||
325 | except ConnectionError as e: | ||
326 | bb.warn("Unable to connect to hash equivalence server at '%s', please correct or remove BB_HASHSERVE_UPSTREAM:\n%s" | ||
327 | % (upstream, repr(e))) | ||
328 | upstream = None | ||
329 | |||
391 | self.hashservaddr = "unix://%s/hashserve.sock" % self.data.getVar("TOPDIR") | 330 | self.hashservaddr = "unix://%s/hashserve.sock" % self.data.getVar("TOPDIR") |
392 | self.hashserv = hashserv.create_server(self.hashservaddr, dbfile, sync=False) | 331 | self.hashserv = hashserv.create_server( |
393 | self.hashserv.process = multiprocessing.Process(target=self.hashserv.serve_forever) | 332 | self.hashservaddr, |
394 | self.hashserv.process.start() | 333 | dbfile, |
395 | self.data.setVar("BB_HASHSERVE", self.hashservaddr) | 334 | sync=False, |
396 | self.databuilder.origdata.setVar("BB_HASHSERVE", self.hashservaddr) | 335 | upstream=upstream, |
397 | self.databuilder.data.setVar("BB_HASHSERVE", self.hashservaddr) | 336 | ) |
337 | self.hashserv.serve_as_process(log_level=logging.WARNING) | ||
398 | for mc in self.databuilder.mcdata: | 338 | for mc in self.databuilder.mcdata: |
339 | self.databuilder.mcorigdata[mc].setVar("BB_HASHSERVE", self.hashservaddr) | ||
399 | self.databuilder.mcdata[mc].setVar("BB_HASHSERVE", self.hashservaddr) | 340 | self.databuilder.mcdata[mc].setVar("BB_HASHSERVE", self.hashservaddr) |
400 | 341 | ||
401 | bb.parse.init_parser(self.data) | 342 | bb.parse.init_parser(self.data) |
@@ -410,6 +351,34 @@ class BBCooker: | |||
410 | if hasattr(self, "data"): | 351 | if hasattr(self, "data"): |
411 | self.data.disableTracking() | 352 | self.data.disableTracking() |
412 | 353 | ||
354 | def revalidateCaches(self): | ||
355 | bb.parse.clear_cache() | ||
356 | |||
357 | clean = True | ||
358 | for f in self.configwatched: | ||
359 | if not bb.parse.check_mtime(f, self.configwatched[f]): | ||
360 | bb.server.process.serverlog("Found %s changed, invalid cache" % f) | ||
361 | self._baseconfig_set(False) | ||
362 | self._parsecache_set(False) | ||
363 | clean = False | ||
364 | break | ||
365 | |||
366 | if clean: | ||
367 | for f in self.parsewatched: | ||
368 | if not bb.parse.check_mtime(f, self.parsewatched[f]): | ||
369 | bb.server.process.serverlog("Found %s changed, invalid cache" % f) | ||
370 | self._parsecache_set(False) | ||
371 | clean = False | ||
372 | break | ||
373 | |||
374 | if not clean: | ||
375 | bb.parse.BBHandler.cached_statements = {} | ||
376 | |||
377 | # If writes were made to any of the data stores, we need to recalculate the data | ||
378 | # store cache | ||
379 | if hasattr(self, "databuilder"): | ||
380 | self.databuilder.calc_datastore_hashes() | ||
381 | |||
413 | def parseConfiguration(self): | 382 | def parseConfiguration(self): |
414 | self.updateCacheSync() | 383 | self.updateCacheSync() |
415 | 384 | ||
@@ -428,8 +397,24 @@ class BBCooker: | |||
428 | self.recipecaches[mc] = bb.cache.CacheData(self.caches_array) | 397 | self.recipecaches[mc] = bb.cache.CacheData(self.caches_array) |
429 | 398 | ||
430 | self.handleCollections(self.data.getVar("BBFILE_COLLECTIONS")) | 399 | self.handleCollections(self.data.getVar("BBFILE_COLLECTIONS")) |
431 | 400 | self.collections = {} | |
432 | self.parsecache_valid = False | 401 | for mc in self.multiconfigs: |
402 | self.collections[mc] = CookerCollectFiles(self.bbfile_config_priorities, mc) | ||
403 | |||
404 | self._parsecache_set(False) | ||
405 | |||
406 | def setupEventLog(self, eventlog): | ||
407 | if self.eventlog and self.eventlog[0] != eventlog: | ||
408 | bb.event.unregister_UIHhandler(self.eventlog[1]) | ||
409 | self.eventlog = None | ||
410 | if not self.eventlog or self.eventlog[0] != eventlog: | ||
411 | # we log all events to a file if so directed | ||
412 | # register the log file writer as UI Handler | ||
413 | if not os.path.exists(os.path.dirname(eventlog)): | ||
414 | bb.utils.mkdirhier(os.path.dirname(eventlog)) | ||
415 | writer = EventWriter(self, eventlog) | ||
416 | EventLogWriteHandler = namedtuple('EventLogWriteHandler', ['event']) | ||
417 | self.eventlog = (eventlog, bb.event.register_UIHhandler(EventLogWriteHandler(writer)), writer) | ||
433 | 418 | ||
434 | def updateConfigOpts(self, options, environment, cmdline): | 419 | def updateConfigOpts(self, options, environment, cmdline): |
435 | self.ui_cmdline = cmdline | 420 | self.ui_cmdline = cmdline |
@@ -450,14 +435,7 @@ class BBCooker: | |||
450 | setattr(self.configuration, o, options[o]) | 435 | setattr(self.configuration, o, options[o]) |
451 | 436 | ||
452 | if self.configuration.writeeventlog: | 437 | if self.configuration.writeeventlog: |
453 | if self.eventlog and self.eventlog[0] != self.configuration.writeeventlog: | 438 | self.setupEventLog(self.configuration.writeeventlog) |
454 | bb.event.unregister_UIHhandler(self.eventlog[1]) | ||
455 | if not self.eventlog or self.eventlog[0] != self.configuration.writeeventlog: | ||
456 | # we log all events to a file if so directed | ||
457 | # register the log file writer as UI Handler | ||
458 | writer = EventWriter(self, self.configuration.writeeventlog) | ||
459 | EventLogWriteHandler = namedtuple('EventLogWriteHandler', ['event']) | ||
460 | self.eventlog = (self.configuration.writeeventlog, bb.event.register_UIHhandler(EventLogWriteHandler(writer))) | ||
461 | 439 | ||
462 | bb.msg.loggerDefaultLogLevel = self.configuration.default_loglevel | 440 | bb.msg.loggerDefaultLogLevel = self.configuration.default_loglevel |
463 | bb.msg.loggerDefaultDomains = self.configuration.debug_domains | 441 | bb.msg.loggerDefaultDomains = self.configuration.debug_domains |
@@ -487,37 +465,37 @@ class BBCooker: | |||
487 | # Now update all the variables not in the datastore to match | 465 | # Now update all the variables not in the datastore to match |
488 | self.configuration.env = environment | 466 | self.configuration.env = environment |
489 | 467 | ||
468 | self.revalidateCaches() | ||
490 | if not clean: | 469 | if not clean: |
491 | logger.debug("Base environment change, triggering reparse") | 470 | logger.debug("Base environment change, triggering reparse") |
492 | self.reset() | 471 | self.reset() |
493 | 472 | ||
494 | def runCommands(self, server, data, abort): | ||
495 | """ | ||
496 | Run any queued asynchronous command | ||
497 | This is done by the idle handler so it runs in true context rather than | ||
498 | tied to any UI. | ||
499 | """ | ||
500 | |||
501 | return self.command.runAsyncCommand() | ||
502 | |||
503 | def showVersions(self): | 473 | def showVersions(self): |
504 | 474 | ||
505 | (latest_versions, preferred_versions) = self.findProviders() | 475 | (latest_versions, preferred_versions, required) = self.findProviders() |
506 | 476 | ||
507 | logger.plain("%-35s %25s %25s", "Recipe Name", "Latest Version", "Preferred Version") | 477 | logger.plain("%-35s %25s %25s %25s", "Recipe Name", "Latest Version", "Preferred Version", "Required Version") |
508 | logger.plain("%-35s %25s %25s\n", "===========", "==============", "=================") | 478 | logger.plain("%-35s %25s %25s %25s\n", "===========", "==============", "=================", "================") |
509 | 479 | ||
510 | for p in sorted(self.recipecaches[''].pkg_pn): | 480 | for p in sorted(self.recipecaches[''].pkg_pn): |
511 | pref = preferred_versions[p] | 481 | preferred = preferred_versions[p] |
512 | latest = latest_versions[p] | 482 | latest = latest_versions[p] |
483 | requiredstr = "" | ||
484 | preferredstr = "" | ||
485 | if required[p]: | ||
486 | if preferred[0] is not None: | ||
487 | requiredstr = preferred[0][0] + ":" + preferred[0][1] + '-' + preferred[0][2] | ||
488 | else: | ||
489 | bb.fatal("REQUIRED_VERSION of package %s not available" % p) | ||
490 | else: | ||
491 | preferredstr = preferred[0][0] + ":" + preferred[0][1] + '-' + preferred[0][2] | ||
513 | 492 | ||
514 | prefstr = pref[0][0] + ":" + pref[0][1] + '-' + pref[0][2] | ||
515 | lateststr = latest[0][0] + ":" + latest[0][1] + "-" + latest[0][2] | 493 | lateststr = latest[0][0] + ":" + latest[0][1] + "-" + latest[0][2] |
516 | 494 | ||
517 | if pref == latest: | 495 | if preferred == latest: |
518 | prefstr = "" | 496 | preferredstr = "" |
519 | 497 | ||
520 | logger.plain("%-35s %25s %25s", p, lateststr, prefstr) | 498 | logger.plain("%-35s %25s %25s %25s", p, lateststr, preferredstr, requiredstr) |
521 | 499 | ||
522 | def showEnvironment(self, buildfile=None, pkgs_to_build=None): | 500 | def showEnvironment(self, buildfile=None, pkgs_to_build=None): |
523 | """ | 501 | """ |
@@ -533,6 +511,8 @@ class BBCooker: | |||
533 | if not orig_tracking: | 511 | if not orig_tracking: |
534 | self.enableDataTracking() | 512 | self.enableDataTracking() |
535 | self.reset() | 513 | self.reset() |
514 | # reset() resets to the UI requested value so we have to redo this | ||
515 | self.enableDataTracking() | ||
536 | 516 | ||
537 | def mc_base(p): | 517 | def mc_base(p): |
538 | if p.startswith('mc:'): | 518 | if p.startswith('mc:'): |
@@ -556,21 +536,21 @@ class BBCooker: | |||
556 | if pkgs_to_build[0] in set(ignore.split()): | 536 | if pkgs_to_build[0] in set(ignore.split()): |
557 | bb.fatal("%s is in ASSUME_PROVIDED" % pkgs_to_build[0]) | 537 | bb.fatal("%s is in ASSUME_PROVIDED" % pkgs_to_build[0]) |
558 | 538 | ||
559 | taskdata, runlist = self.buildTaskData(pkgs_to_build, None, self.configuration.abort, allowincomplete=True) | 539 | taskdata, runlist = self.buildTaskData(pkgs_to_build, None, self.configuration.halt, allowincomplete=True) |
560 | 540 | ||
561 | mc = runlist[0][0] | 541 | mc = runlist[0][0] |
562 | fn = runlist[0][3] | 542 | fn = runlist[0][3] |
563 | 543 | ||
564 | if fn: | 544 | if fn: |
565 | try: | 545 | try: |
566 | bb_caches = bb.cache.MulticonfigCache(self.databuilder, self.data_hash, self.caches_array) | 546 | layername = self.collections[mc].calc_bbfile_priority(fn)[2] |
567 | envdata = bb_caches[mc].loadDataFull(fn, self.collections[mc].get_file_appends(fn)) | 547 | envdata = self.databuilder.parseRecipe(fn, self.collections[mc].get_file_appends(fn), layername) |
568 | except Exception as e: | 548 | except Exception as e: |
569 | parselog.exception("Unable to read %s", fn) | 549 | parselog.exception("Unable to read %s", fn) |
570 | raise | 550 | raise |
571 | else: | 551 | else: |
572 | if not mc in self.databuilder.mcdata: | 552 | if not mc in self.databuilder.mcdata: |
573 | bb.fatal('Not multiconfig named "%s" found' % mc) | 553 | bb.fatal('No multiconfig named "%s" found' % mc) |
574 | envdata = self.databuilder.mcdata[mc] | 554 | envdata = self.databuilder.mcdata[mc] |
575 | data.expandKeys(envdata) | 555 | data.expandKeys(envdata) |
576 | parse.ast.runAnonFuncs(envdata) | 556 | parse.ast.runAnonFuncs(envdata) |
@@ -585,7 +565,7 @@ class BBCooker: | |||
585 | data.emit_env(env, envdata, True) | 565 | data.emit_env(env, envdata, True) |
586 | logger.plain(env.getvalue()) | 566 | logger.plain(env.getvalue()) |
587 | 567 | ||
588 | # emit the metadata which isnt valid shell | 568 | # emit the metadata which isn't valid shell |
589 | for e in sorted(envdata.keys()): | 569 | for e in sorted(envdata.keys()): |
590 | if envdata.getVarFlag(e, 'func', False) and envdata.getVarFlag(e, 'python', False): | 570 | if envdata.getVarFlag(e, 'func', False) and envdata.getVarFlag(e, 'python', False): |
591 | logger.plain("\npython %s () {\n%s}\n", e, envdata.getVar(e, False)) | 571 | logger.plain("\npython %s () {\n%s}\n", e, envdata.getVar(e, False)) |
@@ -594,7 +574,7 @@ class BBCooker: | |||
594 | self.disableDataTracking() | 574 | self.disableDataTracking() |
595 | self.reset() | 575 | self.reset() |
596 | 576 | ||
597 | def buildTaskData(self, pkgs_to_build, task, abort, allowincomplete=False): | 577 | def buildTaskData(self, pkgs_to_build, task, halt, allowincomplete=False): |
598 | """ | 578 | """ |
599 | Prepare a runqueue and taskdata object for iteration over pkgs_to_build | 579 | Prepare a runqueue and taskdata object for iteration over pkgs_to_build |
600 | """ | 580 | """ |
@@ -641,8 +621,8 @@ class BBCooker: | |||
641 | localdata = {} | 621 | localdata = {} |
642 | 622 | ||
643 | for mc in self.multiconfigs: | 623 | for mc in self.multiconfigs: |
644 | taskdata[mc] = bb.taskdata.TaskData(abort, skiplist=self.skiplist, allowincomplete=allowincomplete) | 624 | taskdata[mc] = bb.taskdata.TaskData(halt, skiplist=self.skiplist_by_mc[mc], allowincomplete=allowincomplete) |
645 | localdata[mc] = data.createCopy(self.databuilder.mcdata[mc]) | 625 | localdata[mc] = bb.data.createCopy(self.databuilder.mcdata[mc]) |
646 | bb.data.expandKeys(localdata[mc]) | 626 | bb.data.expandKeys(localdata[mc]) |
647 | 627 | ||
648 | current = 0 | 628 | current = 0 |
@@ -690,19 +670,18 @@ class BBCooker: | |||
690 | taskdata[mc].add_unresolved(localdata[mc], self.recipecaches[mc]) | 670 | taskdata[mc].add_unresolved(localdata[mc], self.recipecaches[mc]) |
691 | mcdeps |= set(taskdata[mc].get_mcdepends()) | 671 | mcdeps |= set(taskdata[mc].get_mcdepends()) |
692 | new = False | 672 | new = False |
693 | for mc in self.multiconfigs: | 673 | for k in mcdeps: |
694 | for k in mcdeps: | 674 | if k in seen: |
695 | if k in seen: | 675 | continue |
696 | continue | 676 | l = k.split(':') |
697 | l = k.split(':') | 677 | depmc = l[2] |
698 | depmc = l[2] | 678 | if depmc not in self.multiconfigs: |
699 | if depmc not in self.multiconfigs: | 679 | bb.fatal("Multiconfig dependency %s depends on nonexistent multiconfig configuration named configuration %s" % (k,depmc)) |
700 | bb.fatal("Multiconfig dependency %s depends on nonexistent multiconfig configuration named configuration %s" % (k,depmc)) | 680 | else: |
701 | else: | 681 | logger.debug("Adding providers for multiconfig dependency %s" % l[3]) |
702 | logger.debug("Adding providers for multiconfig dependency %s" % l[3]) | 682 | taskdata[depmc].add_provider(localdata[depmc], self.recipecaches[depmc], l[3]) |
703 | taskdata[depmc].add_provider(localdata[depmc], self.recipecaches[depmc], l[3]) | 683 | seen.add(k) |
704 | seen.add(k) | 684 | new = True |
705 | new = True | ||
706 | 685 | ||
707 | for mc in self.multiconfigs: | 686 | for mc in self.multiconfigs: |
708 | taskdata[mc].add_unresolved(localdata[mc], self.recipecaches[mc]) | 687 | taskdata[mc].add_unresolved(localdata[mc], self.recipecaches[mc]) |
@@ -710,14 +689,14 @@ class BBCooker: | |||
710 | bb.event.fire(bb.event.TreeDataPreparationCompleted(len(fulltargetlist)), self.data) | 689 | bb.event.fire(bb.event.TreeDataPreparationCompleted(len(fulltargetlist)), self.data) |
711 | return taskdata, runlist | 690 | return taskdata, runlist |
712 | 691 | ||
713 | def prepareTreeData(self, pkgs_to_build, task): | 692 | def prepareTreeData(self, pkgs_to_build, task, halt=False): |
714 | """ | 693 | """ |
715 | Prepare a runqueue and taskdata object for iteration over pkgs_to_build | 694 | Prepare a runqueue and taskdata object for iteration over pkgs_to_build |
716 | """ | 695 | """ |
717 | 696 | ||
718 | # We set abort to False here to prevent unbuildable targets raising | 697 | # We set halt to False here to prevent unbuildable targets raising |
719 | # an exception when we're just generating data | 698 | # an exception when we're just generating data |
720 | taskdata, runlist = self.buildTaskData(pkgs_to_build, task, False, allowincomplete=True) | 699 | taskdata, runlist = self.buildTaskData(pkgs_to_build, task, halt, allowincomplete=True) |
721 | 700 | ||
722 | return runlist, taskdata | 701 | return runlist, taskdata |
723 | 702 | ||
@@ -731,7 +710,7 @@ class BBCooker: | |||
731 | if not task.startswith("do_"): | 710 | if not task.startswith("do_"): |
732 | task = "do_%s" % task | 711 | task = "do_%s" % task |
733 | 712 | ||
734 | runlist, taskdata = self.prepareTreeData(pkgs_to_build, task) | 713 | runlist, taskdata = self.prepareTreeData(pkgs_to_build, task, halt=True) |
735 | rq = bb.runqueue.RunQueue(self, self.data, self.recipecaches, taskdata, runlist) | 714 | rq = bb.runqueue.RunQueue(self, self.data, self.recipecaches, taskdata, runlist) |
736 | rq.rqdata.prepare() | 715 | rq.rqdata.prepare() |
737 | return self.buildDependTree(rq, taskdata) | 716 | return self.buildDependTree(rq, taskdata) |
@@ -792,7 +771,9 @@ class BBCooker: | |||
792 | for dep in rq.rqdata.runtaskentries[tid].depends: | 771 | for dep in rq.rqdata.runtaskentries[tid].depends: |
793 | (depmc, depfn, _, deptaskfn) = bb.runqueue.split_tid_mcfn(dep) | 772 | (depmc, depfn, _, deptaskfn) = bb.runqueue.split_tid_mcfn(dep) |
794 | deppn = self.recipecaches[depmc].pkg_fn[deptaskfn] | 773 | deppn = self.recipecaches[depmc].pkg_fn[deptaskfn] |
795 | depend_tree["tdepends"][dotname].append("%s.%s" % (deppn, bb.runqueue.taskname_from_tid(dep))) | 774 | if depmc: |
775 | depmc = "mc:" + depmc + ":" | ||
776 | depend_tree["tdepends"][dotname].append("%s%s.%s" % (depmc, deppn, bb.runqueue.taskname_from_tid(dep))) | ||
796 | if taskfn not in seen_fns: | 777 | if taskfn not in seen_fns: |
797 | seen_fns.append(taskfn) | 778 | seen_fns.append(taskfn) |
798 | packages = [] | 779 | packages = [] |
@@ -924,10 +905,11 @@ class BBCooker: | |||
924 | 905 | ||
925 | depgraph = self.generateTaskDepTreeData(pkgs_to_build, task) | 906 | depgraph = self.generateTaskDepTreeData(pkgs_to_build, task) |
926 | 907 | ||
927 | with open('pn-buildlist', 'w') as f: | 908 | pns = depgraph["pn"].keys() |
928 | for pn in depgraph["pn"]: | 909 | if pns: |
929 | f.write(pn + "\n") | 910 | with open('pn-buildlist', 'w') as f: |
930 | logger.info("PN build list saved to 'pn-buildlist'") | 911 | f.write("%s\n" % "\n".join(sorted(pns))) |
912 | logger.info("PN build list saved to 'pn-buildlist'") | ||
931 | 913 | ||
932 | # Remove old format output files to ensure no confusion with stale data | 914 | # Remove old format output files to ensure no confusion with stale data |
933 | try: | 915 | try: |
@@ -961,7 +943,7 @@ class BBCooker: | |||
961 | for mc in self.multiconfigs: | 943 | for mc in self.multiconfigs: |
962 | # First get list of recipes, including skipped | 944 | # First get list of recipes, including skipped |
963 | recipefns = list(self.recipecaches[mc].pkg_fn.keys()) | 945 | recipefns = list(self.recipecaches[mc].pkg_fn.keys()) |
964 | recipefns.extend(self.skiplist.keys()) | 946 | recipefns.extend(self.skiplist_by_mc[mc].keys()) |
965 | 947 | ||
966 | # Work out list of bbappends that have been applied | 948 | # Work out list of bbappends that have been applied |
967 | applied_appends = [] | 949 | applied_appends = [] |
@@ -980,13 +962,7 @@ class BBCooker: | |||
980 | '\n '.join(appends_without_recipes[mc]))) | 962 | '\n '.join(appends_without_recipes[mc]))) |
981 | 963 | ||
982 | if msgs: | 964 | if msgs: |
983 | msg = "\n".join(msgs) | 965 | bb.fatal("\n".join(msgs)) |
984 | warn_only = self.databuilder.mcdata[mc].getVar("BB_DANGLINGAPPENDS_WARNONLY", \ | ||
985 | False) or "no" | ||
986 | if warn_only.lower() in ("1", "yes", "true"): | ||
987 | bb.warn(msg) | ||
988 | else: | ||
989 | bb.fatal(msg) | ||
990 | 966 | ||
991 | def handlePrefProviders(self): | 967 | def handlePrefProviders(self): |
992 | 968 | ||
@@ -1056,6 +1032,11 @@ class BBCooker: | |||
1056 | if matches: | 1032 | if matches: |
1057 | bb.event.fire(bb.event.FilesMatchingFound(filepattern, matches), self.data) | 1033 | bb.event.fire(bb.event.FilesMatchingFound(filepattern, matches), self.data) |
1058 | 1034 | ||
1035 | def testCookerCommandEvent(self, filepattern): | ||
1036 | # Dummy command used by OEQA selftest to test tinfoil without IO | ||
1037 | matches = ["A", "B"] | ||
1038 | bb.event.fire(bb.event.FilesMatchingFound(filepattern, matches), self.data) | ||
1039 | |||
1059 | def findProviders(self, mc=''): | 1040 | def findProviders(self, mc=''): |
1060 | return bb.providers.findProviders(self.databuilder.mcdata[mc], self.recipecaches[mc], self.recipecaches[mc].pkg_pn) | 1041 | return bb.providers.findProviders(self.databuilder.mcdata[mc], self.recipecaches[mc], self.recipecaches[mc].pkg_pn) |
1061 | 1042 | ||
@@ -1063,10 +1044,16 @@ class BBCooker: | |||
1063 | if pn in self.recipecaches[mc].providers: | 1044 | if pn in self.recipecaches[mc].providers: |
1064 | filenames = self.recipecaches[mc].providers[pn] | 1045 | filenames = self.recipecaches[mc].providers[pn] |
1065 | eligible, foundUnique = bb.providers.filterProviders(filenames, pn, self.databuilder.mcdata[mc], self.recipecaches[mc]) | 1046 | eligible, foundUnique = bb.providers.filterProviders(filenames, pn, self.databuilder.mcdata[mc], self.recipecaches[mc]) |
1066 | filename = eligible[0] | 1047 | if eligible is not None: |
1048 | filename = eligible[0] | ||
1049 | else: | ||
1050 | filename = None | ||
1067 | return None, None, None, filename | 1051 | return None, None, None, filename |
1068 | elif pn in self.recipecaches[mc].pkg_pn: | 1052 | elif pn in self.recipecaches[mc].pkg_pn: |
1069 | return bb.providers.findBestProvider(pn, self.databuilder.mcdata[mc], self.recipecaches[mc], self.recipecaches[mc].pkg_pn) | 1053 | (latest, latest_f, preferred_ver, preferred_file, required) = bb.providers.findBestProvider(pn, self.databuilder.mcdata[mc], self.recipecaches[mc], self.recipecaches[mc].pkg_pn) |
1054 | if required and preferred_file is None: | ||
1055 | return None, None, None, None | ||
1056 | return (latest, latest_f, preferred_ver, preferred_file) | ||
1070 | else: | 1057 | else: |
1071 | return None, None, None, None | 1058 | return None, None, None, None |
1072 | 1059 | ||
@@ -1211,15 +1198,15 @@ class BBCooker: | |||
1211 | except bb.utils.VersionStringException as vse: | 1198 | except bb.utils.VersionStringException as vse: |
1212 | bb.fatal('Error parsing LAYERRECOMMENDS_%s: %s' % (c, str(vse))) | 1199 | bb.fatal('Error parsing LAYERRECOMMENDS_%s: %s' % (c, str(vse))) |
1213 | if not res: | 1200 | if not res: |
1214 | parselog.debug(3,"Layer '%s' recommends version %s of layer '%s', but version %s is currently enabled in your configuration. Check that you are using the correct matching versions/branches of these two layers.", c, opstr, rec, layerver) | 1201 | parselog.debug3("Layer '%s' recommends version %s of layer '%s', but version %s is currently enabled in your configuration. Check that you are using the correct matching versions/branches of these two layers.", c, opstr, rec, layerver) |
1215 | continue | 1202 | continue |
1216 | else: | 1203 | else: |
1217 | parselog.debug(3,"Layer '%s' recommends version %s of layer '%s', which exists in your configuration but does not specify a version. Check that you are using the correct matching versions/branches of these two layers.", c, opstr, rec) | 1204 | parselog.debug3("Layer '%s' recommends version %s of layer '%s', which exists in your configuration but does not specify a version. Check that you are using the correct matching versions/branches of these two layers.", c, opstr, rec) |
1218 | continue | 1205 | continue |
1219 | parselog.debug(3,"Layer '%s' recommends layer '%s', so we are adding it", c, rec) | 1206 | parselog.debug3("Layer '%s' recommends layer '%s', so we are adding it", c, rec) |
1220 | collection_depends[c].append(rec) | 1207 | collection_depends[c].append(rec) |
1221 | else: | 1208 | else: |
1222 | parselog.debug(3,"Layer '%s' recommends layer '%s', but this layer is not enabled in your configuration", c, rec) | 1209 | parselog.debug3("Layer '%s' recommends layer '%s', but this layer is not enabled in your configuration", c, rec) |
1223 | 1210 | ||
1224 | # Recursively work out collection priorities based on dependencies | 1211 | # Recursively work out collection priorities based on dependencies |
1225 | def calc_layer_priority(collection): | 1212 | def calc_layer_priority(collection): |
@@ -1231,7 +1218,7 @@ class BBCooker: | |||
1231 | if depprio > max_depprio: | 1218 | if depprio > max_depprio: |
1232 | max_depprio = depprio | 1219 | max_depprio = depprio |
1233 | max_depprio += 1 | 1220 | max_depprio += 1 |
1234 | parselog.debug(1, "Calculated priority of layer %s as %d", collection, max_depprio) | 1221 | parselog.debug("Calculated priority of layer %s as %d", collection, max_depprio) |
1235 | collection_priorities[collection] = max_depprio | 1222 | collection_priorities[collection] = max_depprio |
1236 | 1223 | ||
1237 | # Calculate all layer priorities using calc_layer_priority and store in bbfile_config_priorities | 1224 | # Calculate all layer priorities using calc_layer_priority and store in bbfile_config_priorities |
@@ -1243,7 +1230,7 @@ class BBCooker: | |||
1243 | errors = True | 1230 | errors = True |
1244 | continue | 1231 | continue |
1245 | elif regex == "": | 1232 | elif regex == "": |
1246 | parselog.debug(1, "BBFILE_PATTERN_%s is empty" % c) | 1233 | parselog.debug("BBFILE_PATTERN_%s is empty" % c) |
1247 | cre = re.compile('^NULL$') | 1234 | cre = re.compile('^NULL$') |
1248 | errors = False | 1235 | errors = False |
1249 | else: | 1236 | else: |
@@ -1290,8 +1277,8 @@ class BBCooker: | |||
1290 | if bf.startswith("/") or bf.startswith("../"): | 1277 | if bf.startswith("/") or bf.startswith("../"): |
1291 | bf = os.path.abspath(bf) | 1278 | bf = os.path.abspath(bf) |
1292 | 1279 | ||
1293 | self.collections = {mc: CookerCollectFiles(self.bbfile_config_priorities, mc)} | 1280 | collections = {mc: CookerCollectFiles(self.bbfile_config_priorities, mc)} |
1294 | filelist, masked, searchdirs = self.collections[mc].collect_bbfiles(self.databuilder.mcdata[mc], self.databuilder.mcdata[mc]) | 1281 | filelist, masked, searchdirs = collections[mc].collect_bbfiles(self.databuilder.mcdata[mc], self.databuilder.mcdata[mc]) |
1295 | try: | 1282 | try: |
1296 | os.stat(bf) | 1283 | os.stat(bf) |
1297 | bf = os.path.abspath(bf) | 1284 | bf = os.path.abspath(bf) |
@@ -1355,9 +1342,10 @@ class BBCooker: | |||
1355 | self.buildSetVars() | 1342 | self.buildSetVars() |
1356 | self.reset_mtime_caches() | 1343 | self.reset_mtime_caches() |
1357 | 1344 | ||
1358 | bb_caches = bb.cache.MulticonfigCache(self.databuilder, self.data_hash, self.caches_array) | 1345 | bb_caches = bb.cache.MulticonfigCache(self.databuilder, self.databuilder.data_hash, self.caches_array) |
1359 | 1346 | ||
1360 | infos = bb_caches[mc].parse(fn, self.collections[mc].get_file_appends(fn)) | 1347 | layername = self.collections[mc].calc_bbfile_priority(fn)[2] |
1348 | infos = bb_caches[mc].parse(fn, self.collections[mc].get_file_appends(fn), layername) | ||
1361 | infos = dict(infos) | 1349 | infos = dict(infos) |
1362 | 1350 | ||
1363 | fn = bb.cache.realfn2virtual(fn, cls, mc) | 1351 | fn = bb.cache.realfn2virtual(fn, cls, mc) |
@@ -1383,14 +1371,16 @@ class BBCooker: | |||
1383 | self.recipecaches[mc].rundeps[fn] = defaultdict(list) | 1371 | self.recipecaches[mc].rundeps[fn] = defaultdict(list) |
1384 | self.recipecaches[mc].runrecs[fn] = defaultdict(list) | 1372 | self.recipecaches[mc].runrecs[fn] = defaultdict(list) |
1385 | 1373 | ||
1374 | bb.parse.siggen.setup_datacache(self.recipecaches) | ||
1375 | |||
1386 | # Invalidate task for target if force mode active | 1376 | # Invalidate task for target if force mode active |
1387 | if self.configuration.force: | 1377 | if self.configuration.force: |
1388 | logger.verbose("Invalidate task %s, %s", task, fn) | 1378 | logger.verbose("Invalidate task %s, %s", task, fn) |
1389 | bb.parse.siggen.invalidate_task(task, self.recipecaches[mc], fn) | 1379 | bb.parse.siggen.invalidate_task(task, fn) |
1390 | 1380 | ||
1391 | # Setup taskdata structure | 1381 | # Setup taskdata structure |
1392 | taskdata = {} | 1382 | taskdata = {} |
1393 | taskdata[mc] = bb.taskdata.TaskData(self.configuration.abort) | 1383 | taskdata[mc] = bb.taskdata.TaskData(self.configuration.halt) |
1394 | taskdata[mc].add_provider(self.databuilder.mcdata[mc], self.recipecaches[mc], item) | 1384 | taskdata[mc].add_provider(self.databuilder.mcdata[mc], self.recipecaches[mc], item) |
1395 | 1385 | ||
1396 | if quietlog: | 1386 | if quietlog: |
@@ -1400,21 +1390,24 @@ class BBCooker: | |||
1400 | buildname = self.databuilder.mcdata[mc].getVar("BUILDNAME") | 1390 | buildname = self.databuilder.mcdata[mc].getVar("BUILDNAME") |
1401 | if fireevents: | 1391 | if fireevents: |
1402 | bb.event.fire(bb.event.BuildStarted(buildname, [item]), self.databuilder.mcdata[mc]) | 1392 | bb.event.fire(bb.event.BuildStarted(buildname, [item]), self.databuilder.mcdata[mc]) |
1393 | if self.eventlog: | ||
1394 | self.eventlog[2].write_variables() | ||
1395 | bb.event.enable_heartbeat() | ||
1403 | 1396 | ||
1404 | # Execute the runqueue | 1397 | # Execute the runqueue |
1405 | runlist = [[mc, item, task, fn]] | 1398 | runlist = [[mc, item, task, fn]] |
1406 | 1399 | ||
1407 | rq = bb.runqueue.RunQueue(self, self.data, self.recipecaches, taskdata, runlist) | 1400 | rq = bb.runqueue.RunQueue(self, self.data, self.recipecaches, taskdata, runlist) |
1408 | 1401 | ||
1409 | def buildFileIdle(server, rq, abort): | 1402 | def buildFileIdle(server, rq, halt): |
1410 | 1403 | ||
1411 | msg = None | 1404 | msg = None |
1412 | interrupted = 0 | 1405 | interrupted = 0 |
1413 | if abort or self.state == state.forceshutdown: | 1406 | if halt or self.state == State.FORCE_SHUTDOWN: |
1414 | rq.finish_runqueue(True) | 1407 | rq.finish_runqueue(True) |
1415 | msg = "Forced shutdown" | 1408 | msg = "Forced shutdown" |
1416 | interrupted = 2 | 1409 | interrupted = 2 |
1417 | elif self.state == state.shutdown: | 1410 | elif self.state == State.SHUTDOWN: |
1418 | rq.finish_runqueue(False) | 1411 | rq.finish_runqueue(False) |
1419 | msg = "Stopped build" | 1412 | msg = "Stopped build" |
1420 | interrupted = 1 | 1413 | interrupted = 1 |
@@ -1425,41 +1418,71 @@ class BBCooker: | |||
1425 | failures += len(exc.args) | 1418 | failures += len(exc.args) |
1426 | retval = False | 1419 | retval = False |
1427 | except SystemExit as exc: | 1420 | except SystemExit as exc: |
1428 | self.command.finishAsyncCommand(str(exc)) | ||
1429 | if quietlog: | 1421 | if quietlog: |
1430 | bb.runqueue.logger.setLevel(rqloglevel) | 1422 | bb.runqueue.logger.setLevel(rqloglevel) |
1431 | return False | 1423 | return bb.server.process.idleFinish(str(exc)) |
1432 | 1424 | ||
1433 | if not retval: | 1425 | if not retval: |
1434 | if fireevents: | 1426 | if fireevents: |
1435 | bb.event.fire(bb.event.BuildCompleted(len(rq.rqdata.runtaskentries), buildname, item, failures, interrupted), self.databuilder.mcdata[mc]) | 1427 | bb.event.fire(bb.event.BuildCompleted(len(rq.rqdata.runtaskentries), buildname, item, failures, interrupted), self.databuilder.mcdata[mc]) |
1436 | self.command.finishAsyncCommand(msg) | 1428 | bb.event.disable_heartbeat() |
1437 | # We trashed self.recipecaches above | 1429 | # We trashed self.recipecaches above |
1438 | self.parsecache_valid = False | 1430 | self._parsecache_set(False) |
1439 | self.configuration.limited_deps = False | 1431 | self.configuration.limited_deps = False |
1440 | bb.parse.siggen.reset(self.data) | 1432 | bb.parse.siggen.reset(self.data) |
1441 | if quietlog: | 1433 | if quietlog: |
1442 | bb.runqueue.logger.setLevel(rqloglevel) | 1434 | bb.runqueue.logger.setLevel(rqloglevel) |
1443 | return False | 1435 | return bb.server.process.idleFinish(msg) |
1444 | if retval is True: | 1436 | if retval is True: |
1445 | return True | 1437 | return True |
1446 | return retval | 1438 | return retval |
1447 | 1439 | ||
1448 | self.idleCallBackRegister(buildFileIdle, rq) | 1440 | self.idleCallBackRegister(buildFileIdle, rq) |
1449 | 1441 | ||
1442 | def getTaskSignatures(self, target, tasks): | ||
1443 | sig = [] | ||
1444 | getAllTaskSignatures = False | ||
1445 | |||
1446 | if not tasks: | ||
1447 | tasks = ["do_build"] | ||
1448 | getAllTaskSignatures = True | ||
1449 | |||
1450 | for task in tasks: | ||
1451 | taskdata, runlist = self.buildTaskData(target, task, self.configuration.halt) | ||
1452 | rq = bb.runqueue.RunQueue(self, self.data, self.recipecaches, taskdata, runlist) | ||
1453 | rq.rqdata.prepare() | ||
1454 | |||
1455 | for l in runlist: | ||
1456 | mc, pn, taskname, fn = l | ||
1457 | |||
1458 | taskdep = rq.rqdata.dataCaches[mc].task_deps[fn] | ||
1459 | for t in taskdep['tasks']: | ||
1460 | if t in taskdep['nostamp'] or "setscene" in t: | ||
1461 | continue | ||
1462 | tid = bb.runqueue.build_tid(mc, fn, t) | ||
1463 | |||
1464 | if t in task or getAllTaskSignatures: | ||
1465 | try: | ||
1466 | sig.append([pn, t, rq.rqdata.get_task_unihash(tid)]) | ||
1467 | except KeyError: | ||
1468 | sig.append(self.getTaskSignatures(target, [t])[0]) | ||
1469 | |||
1470 | return sig | ||
1471 | |||
1450 | def buildTargets(self, targets, task): | 1472 | def buildTargets(self, targets, task): |
1451 | """ | 1473 | """ |
1452 | Attempt to build the targets specified | 1474 | Attempt to build the targets specified |
1453 | """ | 1475 | """ |
1454 | 1476 | ||
1455 | def buildTargetsIdle(server, rq, abort): | 1477 | def buildTargetsIdle(server, rq, halt): |
1456 | msg = None | 1478 | msg = None |
1457 | interrupted = 0 | 1479 | interrupted = 0 |
1458 | if abort or self.state == state.forceshutdown: | 1480 | if halt or self.state == State.FORCE_SHUTDOWN: |
1481 | bb.event._should_exit.set() | ||
1459 | rq.finish_runqueue(True) | 1482 | rq.finish_runqueue(True) |
1460 | msg = "Forced shutdown" | 1483 | msg = "Forced shutdown" |
1461 | interrupted = 2 | 1484 | interrupted = 2 |
1462 | elif self.state == state.shutdown: | 1485 | elif self.state == State.SHUTDOWN: |
1463 | rq.finish_runqueue(False) | 1486 | rq.finish_runqueue(False) |
1464 | msg = "Stopped build" | 1487 | msg = "Stopped build" |
1465 | interrupted = 1 | 1488 | interrupted = 1 |
@@ -1470,16 +1493,16 @@ class BBCooker: | |||
1470 | failures += len(exc.args) | 1493 | failures += len(exc.args) |
1471 | retval = False | 1494 | retval = False |
1472 | except SystemExit as exc: | 1495 | except SystemExit as exc: |
1473 | self.command.finishAsyncCommand(str(exc)) | 1496 | return bb.server.process.idleFinish(str(exc)) |
1474 | return False | ||
1475 | 1497 | ||
1476 | if not retval: | 1498 | if not retval: |
1477 | try: | 1499 | try: |
1478 | for mc in self.multiconfigs: | 1500 | for mc in self.multiconfigs: |
1479 | bb.event.fire(bb.event.BuildCompleted(len(rq.rqdata.runtaskentries), buildname, targets, failures, interrupted), self.databuilder.mcdata[mc]) | 1501 | bb.event.fire(bb.event.BuildCompleted(len(rq.rqdata.runtaskentries), buildname, targets, failures, interrupted), self.databuilder.mcdata[mc]) |
1480 | finally: | 1502 | finally: |
1481 | self.command.finishAsyncCommand(msg) | 1503 | bb.event.disable_heartbeat() |
1482 | return False | 1504 | return bb.server.process.idleFinish(msg) |
1505 | |||
1483 | if retval is True: | 1506 | if retval is True: |
1484 | return True | 1507 | return True |
1485 | return retval | 1508 | return retval |
@@ -1498,7 +1521,7 @@ class BBCooker: | |||
1498 | 1521 | ||
1499 | bb.event.fire(bb.event.BuildInit(packages), self.data) | 1522 | bb.event.fire(bb.event.BuildInit(packages), self.data) |
1500 | 1523 | ||
1501 | taskdata, runlist = self.buildTaskData(targets, task, self.configuration.abort) | 1524 | taskdata, runlist = self.buildTaskData(targets, task, self.configuration.halt) |
1502 | 1525 | ||
1503 | buildname = self.data.getVar("BUILDNAME", False) | 1526 | buildname = self.data.getVar("BUILDNAME", False) |
1504 | 1527 | ||
@@ -1511,6 +1534,9 @@ class BBCooker: | |||
1511 | 1534 | ||
1512 | for mc in self.multiconfigs: | 1535 | for mc in self.multiconfigs: |
1513 | bb.event.fire(bb.event.BuildStarted(buildname, ntargets), self.databuilder.mcdata[mc]) | 1536 | bb.event.fire(bb.event.BuildStarted(buildname, ntargets), self.databuilder.mcdata[mc]) |
1537 | if self.eventlog: | ||
1538 | self.eventlog[2].write_variables() | ||
1539 | bb.event.enable_heartbeat() | ||
1514 | 1540 | ||
1515 | rq = bb.runqueue.RunQueue(self, self.data, self.recipecaches, taskdata, runlist) | 1541 | rq = bb.runqueue.RunQueue(self, self.data, self.recipecaches, taskdata, runlist) |
1516 | if 'universe' in targets: | 1542 | if 'universe' in targets: |
@@ -1520,7 +1546,13 @@ class BBCooker: | |||
1520 | 1546 | ||
1521 | 1547 | ||
1522 | def getAllKeysWithFlags(self, flaglist): | 1548 | def getAllKeysWithFlags(self, flaglist): |
1549 | def dummy_autorev(d): | ||
1550 | return | ||
1551 | |||
1523 | dump = {} | 1552 | dump = {} |
1553 | # Horrible but for now we need to avoid any sideeffects of autorev being called | ||
1554 | saved = bb.fetch2.get_autorev | ||
1555 | bb.fetch2.get_autorev = dummy_autorev | ||
1524 | for k in self.data.keys(): | 1556 | for k in self.data.keys(): |
1525 | try: | 1557 | try: |
1526 | expand = True | 1558 | expand = True |
@@ -1540,20 +1572,14 @@ class BBCooker: | |||
1540 | dump[k][d] = None | 1572 | dump[k][d] = None |
1541 | except Exception as e: | 1573 | except Exception as e: |
1542 | print(e) | 1574 | print(e) |
1575 | bb.fetch2.get_autorev = saved | ||
1543 | return dump | 1576 | return dump |
1544 | 1577 | ||
1545 | 1578 | ||
1546 | def updateCacheSync(self): | 1579 | def updateCacheSync(self): |
1547 | if self.state == state.running: | 1580 | if self.state == State.RUNNING: |
1548 | return | 1581 | return |
1549 | 1582 | ||
1550 | # reload files for which we got notifications | ||
1551 | for p in self.inotify_modified_files: | ||
1552 | bb.parse.update_cache(p) | ||
1553 | if p in bb.parse.BBHandler.cached_statements: | ||
1554 | del bb.parse.BBHandler.cached_statements[p] | ||
1555 | self.inotify_modified_files = [] | ||
1556 | |||
1557 | if not self.baseconfig_valid: | 1583 | if not self.baseconfig_valid: |
1558 | logger.debug("Reloading base configuration data") | 1584 | logger.debug("Reloading base configuration data") |
1559 | self.initConfigurationData() | 1585 | self.initConfigurationData() |
@@ -1561,19 +1587,22 @@ class BBCooker: | |||
1561 | 1587 | ||
1562 | # This is called for all async commands when self.state != running | 1588 | # This is called for all async commands when self.state != running |
1563 | def updateCache(self): | 1589 | def updateCache(self): |
1564 | if self.state == state.running: | 1590 | if self.state == State.RUNNING: |
1565 | return | 1591 | return |
1566 | 1592 | ||
1567 | if self.state in (state.shutdown, state.forceshutdown, state.error): | 1593 | if self.state in (State.SHUTDOWN, State.FORCE_SHUTDOWN, State.ERROR): |
1568 | if hasattr(self.parser, 'shutdown'): | 1594 | if hasattr(self.parser, 'shutdown'): |
1569 | self.parser.shutdown(clean=False, force = True) | 1595 | self.parser.shutdown(clean=False) |
1570 | self.parser.final_cleanup() | 1596 | self.parser.final_cleanup() |
1571 | raise bb.BBHandledException() | 1597 | raise bb.BBHandledException() |
1572 | 1598 | ||
1573 | if self.state != state.parsing: | 1599 | if self.state != State.PARSING: |
1574 | self.updateCacheSync() | 1600 | self.updateCacheSync() |
1575 | 1601 | ||
1576 | if self.state != state.parsing and not self.parsecache_valid: | 1602 | if self.state != State.PARSING and not self.parsecache_valid: |
1603 | bb.server.process.serverlog("Parsing started") | ||
1604 | self.parsewatched = {} | ||
1605 | |||
1577 | bb.parse.siggen.reset(self.data) | 1606 | bb.parse.siggen.reset(self.data) |
1578 | self.parseConfiguration () | 1607 | self.parseConfiguration () |
1579 | if CookerFeatures.SEND_SANITYEVENTS in self.featureset: | 1608 | if CookerFeatures.SEND_SANITYEVENTS in self.featureset: |
@@ -1587,37 +1616,35 @@ class BBCooker: | |||
1587 | for dep in self.configuration.extra_assume_provided: | 1616 | for dep in self.configuration.extra_assume_provided: |
1588 | self.recipecaches[mc].ignored_dependencies.add(dep) | 1617 | self.recipecaches[mc].ignored_dependencies.add(dep) |
1589 | 1618 | ||
1590 | self.collections = {} | ||
1591 | |||
1592 | mcfilelist = {} | 1619 | mcfilelist = {} |
1593 | total_masked = 0 | 1620 | total_masked = 0 |
1594 | searchdirs = set() | 1621 | searchdirs = set() |
1595 | for mc in self.multiconfigs: | 1622 | for mc in self.multiconfigs: |
1596 | self.collections[mc] = CookerCollectFiles(self.bbfile_config_priorities, mc) | ||
1597 | (filelist, masked, search) = self.collections[mc].collect_bbfiles(self.databuilder.mcdata[mc], self.databuilder.mcdata[mc]) | 1623 | (filelist, masked, search) = self.collections[mc].collect_bbfiles(self.databuilder.mcdata[mc], self.databuilder.mcdata[mc]) |
1598 | 1624 | ||
1599 | mcfilelist[mc] = filelist | 1625 | mcfilelist[mc] = filelist |
1600 | total_masked += masked | 1626 | total_masked += masked |
1601 | searchdirs |= set(search) | 1627 | searchdirs |= set(search) |
1602 | 1628 | ||
1603 | # Add inotify watches for directories searched for bb/bbappend files | 1629 | # Add mtimes for directories searched for bb/bbappend files |
1604 | for dirent in searchdirs: | 1630 | for dirent in searchdirs: |
1605 | self.add_filewatch([[dirent]], dirs=True) | 1631 | self.add_filewatch([(dirent, bb.parse.cached_mtime_noerror(dirent))]) |
1606 | 1632 | ||
1607 | self.parser = CookerParser(self, mcfilelist, total_masked) | 1633 | self.parser = CookerParser(self, mcfilelist, total_masked) |
1608 | self.parsecache_valid = True | 1634 | self._parsecache_set(True) |
1609 | 1635 | ||
1610 | self.state = state.parsing | 1636 | self.state = State.PARSING |
1611 | 1637 | ||
1612 | if not self.parser.parse_next(): | 1638 | if not self.parser.parse_next(): |
1613 | collectlog.debug(1, "parsing complete") | 1639 | bb.server.process.serverlog("Parsing completed") |
1640 | collectlog.debug("parsing complete") | ||
1614 | if self.parser.error: | 1641 | if self.parser.error: |
1615 | raise bb.BBHandledException() | 1642 | raise bb.BBHandledException() |
1616 | self.show_appends_with_no_recipes() | 1643 | self.show_appends_with_no_recipes() |
1617 | self.handlePrefProviders() | 1644 | self.handlePrefProviders() |
1618 | for mc in self.multiconfigs: | 1645 | for mc in self.multiconfigs: |
1619 | self.recipecaches[mc].bbfile_priority = self.collections[mc].collection_priorities(self.recipecaches[mc].pkg_fn, self.parser.mcfilelist[mc], self.data) | 1646 | self.recipecaches[mc].bbfile_priority = self.collections[mc].collection_priorities(self.recipecaches[mc].pkg_fn, self.parser.mcfilelist[mc], self.data) |
1620 | self.state = state.running | 1647 | self.state = State.RUNNING |
1621 | 1648 | ||
1622 | # Send an event listing all stamps reachable after parsing | 1649 | # Send an event listing all stamps reachable after parsing |
1623 | # which the metadata may use to clean up stale data | 1650 | # which the metadata may use to clean up stale data |
@@ -1633,7 +1660,7 @@ class BBCooker: | |||
1633 | # Return a copy, don't modify the original | 1660 | # Return a copy, don't modify the original |
1634 | pkgs_to_build = pkgs_to_build[:] | 1661 | pkgs_to_build = pkgs_to_build[:] |
1635 | 1662 | ||
1636 | if len(pkgs_to_build) == 0: | 1663 | if not pkgs_to_build: |
1637 | raise NothingToBuild | 1664 | raise NothingToBuild |
1638 | 1665 | ||
1639 | ignore = (self.data.getVar("ASSUME_PROVIDED") or "").split() | 1666 | ignore = (self.data.getVar("ASSUME_PROVIDED") or "").split() |
@@ -1655,7 +1682,7 @@ class BBCooker: | |||
1655 | 1682 | ||
1656 | if 'universe' in pkgs_to_build: | 1683 | if 'universe' in pkgs_to_build: |
1657 | parselog.verbnote("The \"universe\" target is only intended for testing and may produce errors.") | 1684 | parselog.verbnote("The \"universe\" target is only intended for testing and may produce errors.") |
1658 | parselog.debug(1, "collating packages for \"universe\"") | 1685 | parselog.debug("collating packages for \"universe\"") |
1659 | pkgs_to_build.remove('universe') | 1686 | pkgs_to_build.remove('universe') |
1660 | for mc in self.multiconfigs: | 1687 | for mc in self.multiconfigs: |
1661 | for t in self.recipecaches[mc].universe_target: | 1688 | for t in self.recipecaches[mc].universe_target: |
@@ -1680,26 +1707,36 @@ class BBCooker: | |||
1680 | def post_serve(self): | 1707 | def post_serve(self): |
1681 | self.shutdown(force=True) | 1708 | self.shutdown(force=True) |
1682 | prserv.serv.auto_shutdown() | 1709 | prserv.serv.auto_shutdown() |
1710 | if hasattr(bb.parse, "siggen"): | ||
1711 | bb.parse.siggen.exit() | ||
1683 | if self.hashserv: | 1712 | if self.hashserv: |
1684 | self.hashserv.process.terminate() | 1713 | self.hashserv.process.terminate() |
1685 | self.hashserv.process.join() | 1714 | self.hashserv.process.join() |
1686 | if hasattr(self, "data"): | 1715 | if hasattr(self, "data"): |
1687 | bb.event.fire(CookerExit(), self.data) | 1716 | bb.event.fire(CookerExit(), self.data) |
1688 | 1717 | ||
1689 | def shutdown(self, force = False): | 1718 | def shutdown(self, force=False): |
1690 | if force: | 1719 | if force: |
1691 | self.state = state.forceshutdown | 1720 | self.state = State.FORCE_SHUTDOWN |
1721 | bb.event._should_exit.set() | ||
1692 | else: | 1722 | else: |
1693 | self.state = state.shutdown | 1723 | self.state = State.SHUTDOWN |
1694 | 1724 | ||
1695 | if self.parser: | 1725 | if self.parser: |
1696 | self.parser.shutdown(clean=not force, force=force) | 1726 | self.parser.shutdown(clean=False) |
1697 | self.parser.final_cleanup() | 1727 | self.parser.final_cleanup() |
1698 | 1728 | ||
1699 | def finishcommand(self): | 1729 | def finishcommand(self): |
1700 | self.state = state.initial | 1730 | if hasattr(self.parser, 'shutdown'): |
1731 | self.parser.shutdown(clean=False) | ||
1732 | self.parser.final_cleanup() | ||
1733 | self.state = State.INITIAL | ||
1734 | bb.event._should_exit.clear() | ||
1701 | 1735 | ||
1702 | def reset(self): | 1736 | def reset(self): |
1737 | if hasattr(bb.parse, "siggen"): | ||
1738 | bb.parse.siggen.exit() | ||
1739 | self.finishcommand() | ||
1703 | self.initConfigurationData() | 1740 | self.initConfigurationData() |
1704 | self.handlePRServ() | 1741 | self.handlePRServ() |
1705 | 1742 | ||
@@ -1711,9 +1748,9 @@ class BBCooker: | |||
1711 | if hasattr(self, "data"): | 1748 | if hasattr(self, "data"): |
1712 | self.databuilder.reset() | 1749 | self.databuilder.reset() |
1713 | self.data = self.databuilder.data | 1750 | self.data = self.databuilder.data |
1714 | self.parsecache_valid = False | 1751 | # In theory tinfoil could have modified the base data before parsing, |
1715 | self.baseconfig_valid = False | 1752 | # ideally need to track if anything did modify the datastore |
1716 | 1753 | self._parsecache_set(False) | |
1717 | 1754 | ||
1718 | class CookerExit(bb.event.Event): | 1755 | class CookerExit(bb.event.Event): |
1719 | """ | 1756 | """ |
@@ -1728,16 +1765,16 @@ class CookerCollectFiles(object): | |||
1728 | def __init__(self, priorities, mc=''): | 1765 | def __init__(self, priorities, mc=''): |
1729 | self.mc = mc | 1766 | self.mc = mc |
1730 | self.bbappends = [] | 1767 | self.bbappends = [] |
1731 | # Priorities is a list of tupples, with the second element as the pattern. | 1768 | # Priorities is a list of tuples, with the second element as the pattern. |
1732 | # We need to sort the list with the longest pattern first, and so on to | 1769 | # We need to sort the list with the longest pattern first, and so on to |
1733 | # the shortest. This allows nested layers to be properly evaluated. | 1770 | # the shortest. This allows nested layers to be properly evaluated. |
1734 | self.bbfile_config_priorities = sorted(priorities, key=lambda tup: tup[1], reverse=True) | 1771 | self.bbfile_config_priorities = sorted(priorities, key=lambda tup: tup[1], reverse=True) |
1735 | 1772 | ||
1736 | def calc_bbfile_priority(self, filename): | 1773 | def calc_bbfile_priority(self, filename): |
1737 | for _, _, regex, pri in self.bbfile_config_priorities: | 1774 | for layername, _, regex, pri in self.bbfile_config_priorities: |
1738 | if regex.match(filename): | 1775 | if regex.match(filename): |
1739 | return pri, regex | 1776 | return pri, regex, layername |
1740 | return 0, None | 1777 | return 0, None, None |
1741 | 1778 | ||
1742 | def get_bbfiles(self): | 1779 | def get_bbfiles(self): |
1743 | """Get list of default .bb files by reading out the current directory""" | 1780 | """Get list of default .bb files by reading out the current directory""" |
@@ -1756,7 +1793,7 @@ class CookerCollectFiles(object): | |||
1756 | for ignored in ('SCCS', 'CVS', '.svn'): | 1793 | for ignored in ('SCCS', 'CVS', '.svn'): |
1757 | if ignored in dirs: | 1794 | if ignored in dirs: |
1758 | dirs.remove(ignored) | 1795 | dirs.remove(ignored) |
1759 | found += [os.path.join(dir, f) for f in files if (f.endswith(['.bb', '.bbappend']))] | 1796 | found += [os.path.join(dir, f) for f in files if (f.endswith(('.bb', '.bbappend')))] |
1760 | 1797 | ||
1761 | return found | 1798 | return found |
1762 | 1799 | ||
@@ -1764,7 +1801,7 @@ class CookerCollectFiles(object): | |||
1764 | """Collect all available .bb build files""" | 1801 | """Collect all available .bb build files""" |
1765 | masked = 0 | 1802 | masked = 0 |
1766 | 1803 | ||
1767 | collectlog.debug(1, "collecting .bb files") | 1804 | collectlog.debug("collecting .bb files") |
1768 | 1805 | ||
1769 | files = (config.getVar( "BBFILES") or "").split() | 1806 | files = (config.getVar( "BBFILES") or "").split() |
1770 | 1807 | ||
@@ -1772,16 +1809,16 @@ class CookerCollectFiles(object): | |||
1772 | files.sort( key=lambda fileitem: self.calc_bbfile_priority(fileitem)[0] ) | 1809 | files.sort( key=lambda fileitem: self.calc_bbfile_priority(fileitem)[0] ) |
1773 | config.setVar("BBFILES_PRIORITIZED", " ".join(files)) | 1810 | config.setVar("BBFILES_PRIORITIZED", " ".join(files)) |
1774 | 1811 | ||
1775 | if not len(files): | 1812 | if not files: |
1776 | files = self.get_bbfiles() | 1813 | files = self.get_bbfiles() |
1777 | 1814 | ||
1778 | if not len(files): | 1815 | if not files: |
1779 | collectlog.error("no recipe files to build, check your BBPATH and BBFILES?") | 1816 | collectlog.error("no recipe files to build, check your BBPATH and BBFILES?") |
1780 | bb.event.fire(CookerExit(), eventdata) | 1817 | bb.event.fire(CookerExit(), eventdata) |
1781 | 1818 | ||
1782 | # We need to track where we look so that we can add inotify watches. There | 1819 | # We need to track where we look so that we can know when the cache is invalid. There |
1783 | # is no nice way to do this, this is horrid. We intercept the os.listdir() | 1820 | # is no nice way to do this, this is horrid. We intercept the os.listdir() and os.scandir() |
1784 | # (or os.scandir() for python 3.6+) calls while we run glob(). | 1821 | # calls while we run glob(). |
1785 | origlistdir = os.listdir | 1822 | origlistdir = os.listdir |
1786 | if hasattr(os, 'scandir'): | 1823 | if hasattr(os, 'scandir'): |
1787 | origscandir = os.scandir | 1824 | origscandir = os.scandir |
@@ -1835,7 +1872,7 @@ class CookerCollectFiles(object): | |||
1835 | try: | 1872 | try: |
1836 | re.compile(mask) | 1873 | re.compile(mask) |
1837 | bbmasks.append(mask) | 1874 | bbmasks.append(mask) |
1838 | except sre_constants.error: | 1875 | except re.error: |
1839 | collectlog.critical("BBMASK contains an invalid regular expression, ignoring: %s" % mask) | 1876 | collectlog.critical("BBMASK contains an invalid regular expression, ignoring: %s" % mask) |
1840 | 1877 | ||
1841 | # Then validate the combined regular expressions. This should never | 1878 | # Then validate the combined regular expressions. This should never |
@@ -1843,7 +1880,7 @@ class CookerCollectFiles(object): | |||
1843 | bbmask = "|".join(bbmasks) | 1880 | bbmask = "|".join(bbmasks) |
1844 | try: | 1881 | try: |
1845 | bbmask_compiled = re.compile(bbmask) | 1882 | bbmask_compiled = re.compile(bbmask) |
1846 | except sre_constants.error: | 1883 | except re.error: |
1847 | collectlog.critical("BBMASK is not a valid regular expression, ignoring: %s" % bbmask) | 1884 | collectlog.critical("BBMASK is not a valid regular expression, ignoring: %s" % bbmask) |
1848 | bbmask = None | 1885 | bbmask = None |
1849 | 1886 | ||
@@ -1851,7 +1888,7 @@ class CookerCollectFiles(object): | |||
1851 | bbappend = [] | 1888 | bbappend = [] |
1852 | for f in newfiles: | 1889 | for f in newfiles: |
1853 | if bbmask and bbmask_compiled.search(f): | 1890 | if bbmask and bbmask_compiled.search(f): |
1854 | collectlog.debug(1, "skipping masked file %s", f) | 1891 | collectlog.debug("skipping masked file %s", f) |
1855 | masked += 1 | 1892 | masked += 1 |
1856 | continue | 1893 | continue |
1857 | if f.endswith('.bb'): | 1894 | if f.endswith('.bb'): |
@@ -1859,7 +1896,7 @@ class CookerCollectFiles(object): | |||
1859 | elif f.endswith('.bbappend'): | 1896 | elif f.endswith('.bbappend'): |
1860 | bbappend.append(f) | 1897 | bbappend.append(f) |
1861 | else: | 1898 | else: |
1862 | collectlog.debug(1, "skipping %s: unknown file extension", f) | 1899 | collectlog.debug("skipping %s: unknown file extension", f) |
1863 | 1900 | ||
1864 | # Build a list of .bbappend files for each .bb file | 1901 | # Build a list of .bbappend files for each .bb file |
1865 | for f in bbappend: | 1902 | for f in bbappend: |
@@ -1910,7 +1947,7 @@ class CookerCollectFiles(object): | |||
1910 | # Calculate priorities for each file | 1947 | # Calculate priorities for each file |
1911 | for p in pkgfns: | 1948 | for p in pkgfns: |
1912 | realfn, cls, mc = bb.cache.virtualfn2realfn(p) | 1949 | realfn, cls, mc = bb.cache.virtualfn2realfn(p) |
1913 | priorities[p], regex = self.calc_bbfile_priority(realfn) | 1950 | priorities[p], regex, _ = self.calc_bbfile_priority(realfn) |
1914 | if regex in unmatched_regex: | 1951 | if regex in unmatched_regex: |
1915 | matched_regex.add(regex) | 1952 | matched_regex.add(regex) |
1916 | unmatched_regex.remove(regex) | 1953 | unmatched_regex.remove(regex) |
@@ -1961,15 +1998,30 @@ class ParsingFailure(Exception): | |||
1961 | Exception.__init__(self, realexception, recipe) | 1998 | Exception.__init__(self, realexception, recipe) |
1962 | 1999 | ||
1963 | class Parser(multiprocessing.Process): | 2000 | class Parser(multiprocessing.Process): |
1964 | def __init__(self, jobs, results, quit, init, profile): | 2001 | def __init__(self, jobs, results, quit, profile): |
1965 | self.jobs = jobs | 2002 | self.jobs = jobs |
1966 | self.results = results | 2003 | self.results = results |
1967 | self.quit = quit | 2004 | self.quit = quit |
1968 | self.init = init | ||
1969 | multiprocessing.Process.__init__(self) | 2005 | multiprocessing.Process.__init__(self) |
1970 | self.context = bb.utils.get_context().copy() | 2006 | self.context = bb.utils.get_context().copy() |
1971 | self.handlers = bb.event.get_class_handlers().copy() | 2007 | self.handlers = bb.event.get_class_handlers().copy() |
1972 | self.profile = profile | 2008 | self.profile = profile |
2009 | self.queue_signals = False | ||
2010 | self.signal_received = [] | ||
2011 | self.signal_threadlock = threading.Lock() | ||
2012 | |||
2013 | def catch_sig(self, signum, frame): | ||
2014 | if self.queue_signals: | ||
2015 | self.signal_received.append(signum) | ||
2016 | else: | ||
2017 | self.handle_sig(signum, frame) | ||
2018 | |||
2019 | def handle_sig(self, signum, frame): | ||
2020 | if signum == signal.SIGTERM: | ||
2021 | signal.signal(signal.SIGTERM, signal.SIG_DFL) | ||
2022 | os.kill(os.getpid(), signal.SIGTERM) | ||
2023 | elif signum == signal.SIGINT: | ||
2024 | signal.default_int_handler(signum, frame) | ||
1973 | 2025 | ||
1974 | def run(self): | 2026 | def run(self): |
1975 | 2027 | ||
@@ -1989,38 +2041,50 @@ class Parser(multiprocessing.Process): | |||
1989 | prof.dump_stats(logfile) | 2041 | prof.dump_stats(logfile) |
1990 | 2042 | ||
1991 | def realrun(self): | 2043 | def realrun(self): |
1992 | if self.init: | 2044 | # Signal handling here is hard. We must not terminate any process or thread holding the write |
1993 | self.init() | 2045 | # lock for the event stream as it will not be released, ever, and things will hang. |
2046 | # Python handles signals in the main thread/process but they can be raised from any thread and | ||
2047 | # we want to defer processing of any SIGTERM/SIGINT signal until we're outside the critical section | ||
2048 | # and don't hold the lock (see server/process.py). We therefore always catch the signals (so any | ||
2049 | # new thread should also do so) and we defer handling but we handle with the local thread lock | ||
2050 | # held (a threading lock, not a multiprocessing one) so that no other thread in the process | ||
2051 | # can be in the critical section. | ||
2052 | signal.signal(signal.SIGTERM, self.catch_sig) | ||
2053 | signal.signal(signal.SIGHUP, signal.SIG_DFL) | ||
2054 | signal.signal(signal.SIGINT, self.catch_sig) | ||
2055 | bb.utils.set_process_name(multiprocessing.current_process().name) | ||
2056 | multiprocessing.util.Finalize(None, bb.codeparser.parser_cache_save, exitpriority=1) | ||
2057 | multiprocessing.util.Finalize(None, bb.fetch.fetcher_parse_save, exitpriority=1) | ||
1994 | 2058 | ||
1995 | pending = [] | 2059 | pending = [] |
1996 | while True: | 2060 | havejobs = True |
1997 | try: | 2061 | try: |
1998 | self.quit.get_nowait() | 2062 | while havejobs or pending: |
1999 | except queue.Empty: | 2063 | if self.quit.is_set(): |
2000 | pass | 2064 | break |
2001 | else: | ||
2002 | self.results.close() | ||
2003 | self.results.join_thread() | ||
2004 | break | ||
2005 | 2065 | ||
2006 | if pending: | 2066 | job = None |
2007 | result = pending.pop() | ||
2008 | else: | ||
2009 | try: | 2067 | try: |
2010 | job = self.jobs.pop() | 2068 | job = self.jobs.pop() |
2011 | except IndexError: | 2069 | except IndexError: |
2012 | self.results.close() | 2070 | havejobs = False |
2013 | self.results.join_thread() | 2071 | if job: |
2014 | break | 2072 | result = self.parse(*job) |
2015 | result = self.parse(*job) | 2073 | # Clear the siggen cache after parsing to control memory usage, its huge |
2016 | # Clear the siggen cache after parsing to control memory usage, its huge | 2074 | bb.parse.siggen.postparsing_clean_cache() |
2017 | bb.parse.siggen.postparsing_clean_cache() | 2075 | pending.append(result) |
2018 | try: | 2076 | |
2019 | self.results.put(result, timeout=0.25) | 2077 | if pending: |
2020 | except queue.Full: | 2078 | try: |
2021 | pending.append(result) | 2079 | result = pending.pop() |
2080 | self.results.put(result, timeout=0.05) | ||
2081 | except queue.Full: | ||
2082 | pending.append(result) | ||
2083 | finally: | ||
2084 | self.results.close() | ||
2085 | self.results.join_thread() | ||
2022 | 2086 | ||
2023 | def parse(self, mc, cache, filename, appends): | 2087 | def parse(self, mc, cache, filename, appends, layername): |
2024 | try: | 2088 | try: |
2025 | origfilter = bb.event.LogHandler.filter | 2089 | origfilter = bb.event.LogHandler.filter |
2026 | # Record the filename we're parsing into any events generated | 2090 | # Record the filename we're parsing into any events generated |
@@ -2034,17 +2098,16 @@ class Parser(multiprocessing.Process): | |||
2034 | bb.event.set_class_handlers(self.handlers.copy()) | 2098 | bb.event.set_class_handlers(self.handlers.copy()) |
2035 | bb.event.LogHandler.filter = parse_filter | 2099 | bb.event.LogHandler.filter = parse_filter |
2036 | 2100 | ||
2037 | return True, mc, cache.parse(filename, appends) | 2101 | return True, mc, cache.parse(filename, appends, layername) |
2038 | except Exception as exc: | 2102 | except Exception as exc: |
2039 | tb = sys.exc_info()[2] | 2103 | tb = sys.exc_info()[2] |
2040 | exc.recipe = filename | 2104 | exc.recipe = filename |
2041 | exc.traceback = list(bb.exceptions.extract_traceback(tb, context=3)) | 2105 | return True, None, exc |
2042 | return True, exc | ||
2043 | # Need to turn BaseExceptions into Exceptions here so we gracefully shutdown | 2106 | # Need to turn BaseExceptions into Exceptions here so we gracefully shutdown |
2044 | # and for example a worker thread doesn't just exit on its own in response to | 2107 | # and for example a worker thread doesn't just exit on its own in response to |
2045 | # a SystemExit event for example. | 2108 | # a SystemExit event for example. |
2046 | except BaseException as exc: | 2109 | except BaseException as exc: |
2047 | return True, ParsingFailure(exc, filename) | 2110 | return True, None, ParsingFailure(exc, filename) |
2048 | finally: | 2111 | finally: |
2049 | bb.event.LogHandler.filter = origfilter | 2112 | bb.event.LogHandler.filter = origfilter |
2050 | 2113 | ||
@@ -2053,7 +2116,7 @@ class CookerParser(object): | |||
2053 | self.mcfilelist = mcfilelist | 2116 | self.mcfilelist = mcfilelist |
2054 | self.cooker = cooker | 2117 | self.cooker = cooker |
2055 | self.cfgdata = cooker.data | 2118 | self.cfgdata = cooker.data |
2056 | self.cfghash = cooker.data_hash | 2119 | self.cfghash = cooker.databuilder.data_hash |
2057 | self.cfgbuilder = cooker.databuilder | 2120 | self.cfgbuilder = cooker.databuilder |
2058 | 2121 | ||
2059 | # Accounting statistics | 2122 | # Accounting statistics |
@@ -2074,10 +2137,11 @@ class CookerParser(object): | |||
2074 | for mc in self.cooker.multiconfigs: | 2137 | for mc in self.cooker.multiconfigs: |
2075 | for filename in self.mcfilelist[mc]: | 2138 | for filename in self.mcfilelist[mc]: |
2076 | appends = self.cooker.collections[mc].get_file_appends(filename) | 2139 | appends = self.cooker.collections[mc].get_file_appends(filename) |
2140 | layername = self.cooker.collections[mc].calc_bbfile_priority(filename)[2] | ||
2077 | if not self.bb_caches[mc].cacheValid(filename, appends): | 2141 | if not self.bb_caches[mc].cacheValid(filename, appends): |
2078 | self.willparse.add((mc, self.bb_caches[mc], filename, appends)) | 2142 | self.willparse.add((mc, self.bb_caches[mc], filename, appends, layername)) |
2079 | else: | 2143 | else: |
2080 | self.fromcache.add((mc, self.bb_caches[mc], filename, appends)) | 2144 | self.fromcache.add((mc, self.bb_caches[mc], filename, appends, layername)) |
2081 | 2145 | ||
2082 | self.total = len(self.fromcache) + len(self.willparse) | 2146 | self.total = len(self.fromcache) + len(self.willparse) |
2083 | self.toparse = len(self.willparse) | 2147 | self.toparse = len(self.willparse) |
@@ -2086,6 +2150,7 @@ class CookerParser(object): | |||
2086 | self.num_processes = min(int(self.cfgdata.getVar("BB_NUMBER_PARSE_THREADS") or | 2150 | self.num_processes = min(int(self.cfgdata.getVar("BB_NUMBER_PARSE_THREADS") or |
2087 | multiprocessing.cpu_count()), self.toparse) | 2151 | multiprocessing.cpu_count()), self.toparse) |
2088 | 2152 | ||
2153 | bb.cache.SiggenRecipeInfo.reset() | ||
2089 | self.start() | 2154 | self.start() |
2090 | self.haveshutdown = False | 2155 | self.haveshutdown = False |
2091 | self.syncthread = None | 2156 | self.syncthread = None |
@@ -2095,15 +2160,8 @@ class CookerParser(object): | |||
2095 | self.processes = [] | 2160 | self.processes = [] |
2096 | if self.toparse: | 2161 | if self.toparse: |
2097 | bb.event.fire(bb.event.ParseStarted(self.toparse), self.cfgdata) | 2162 | bb.event.fire(bb.event.ParseStarted(self.toparse), self.cfgdata) |
2098 | def init(): | 2163 | |
2099 | signal.signal(signal.SIGTERM, signal.SIG_DFL) | 2164 | self.parser_quit = multiprocessing.Event() |
2100 | signal.signal(signal.SIGHUP, signal.SIG_DFL) | ||
2101 | signal.signal(signal.SIGINT, signal.SIG_IGN) | ||
2102 | bb.utils.set_process_name(multiprocessing.current_process().name) | ||
2103 | multiprocessing.util.Finalize(None, bb.codeparser.parser_cache_save, exitpriority=1) | ||
2104 | multiprocessing.util.Finalize(None, bb.fetch.fetcher_parse_save, exitpriority=1) | ||
2105 | |||
2106 | self.parser_quit = multiprocessing.Queue(maxsize=self.num_processes) | ||
2107 | self.result_queue = multiprocessing.Queue() | 2165 | self.result_queue = multiprocessing.Queue() |
2108 | 2166 | ||
2109 | def chunkify(lst,n): | 2167 | def chunkify(lst,n): |
@@ -2111,14 +2169,14 @@ class CookerParser(object): | |||
2111 | self.jobs = chunkify(list(self.willparse), self.num_processes) | 2169 | self.jobs = chunkify(list(self.willparse), self.num_processes) |
2112 | 2170 | ||
2113 | for i in range(0, self.num_processes): | 2171 | for i in range(0, self.num_processes): |
2114 | parser = Parser(self.jobs[i], self.result_queue, self.parser_quit, init, self.cooker.configuration.profile) | 2172 | parser = Parser(self.jobs[i], self.result_queue, self.parser_quit, self.cooker.configuration.profile) |
2115 | parser.start() | 2173 | parser.start() |
2116 | self.process_names.append(parser.name) | 2174 | self.process_names.append(parser.name) |
2117 | self.processes.append(parser) | 2175 | self.processes.append(parser) |
2118 | 2176 | ||
2119 | self.results = itertools.chain(self.results, self.parse_generator()) | 2177 | self.results = itertools.chain(self.results, self.parse_generator()) |
2120 | 2178 | ||
2121 | def shutdown(self, clean=True, force=False): | 2179 | def shutdown(self, clean=True, eventmsg="Parsing halted due to errors"): |
2122 | if not self.toparse: | 2180 | if not self.toparse: |
2123 | return | 2181 | return |
2124 | if self.haveshutdown: | 2182 | if self.haveshutdown: |
@@ -2132,9 +2190,9 @@ class CookerParser(object): | |||
2132 | self.total) | 2190 | self.total) |
2133 | 2191 | ||
2134 | bb.event.fire(event, self.cfgdata) | 2192 | bb.event.fire(event, self.cfgdata) |
2135 | 2193 | else: | |
2136 | for process in self.processes: | 2194 | bb.event.fire(bb.event.ParseError(eventmsg), self.cfgdata) |
2137 | self.parser_quit.put(None) | 2195 | bb.error("Parsing halted due to errors, see error messages above") |
2138 | 2196 | ||
2139 | # Cleanup the queue before call process.join(), otherwise there might be | 2197 | # Cleanup the queue before call process.join(), otherwise there might be |
2140 | # deadlocks. | 2198 | # deadlocks. |
@@ -2144,106 +2202,152 @@ class CookerParser(object): | |||
2144 | except queue.Empty: | 2202 | except queue.Empty: |
2145 | break | 2203 | break |
2146 | 2204 | ||
2147 | for process in self.processes: | ||
2148 | if force: | ||
2149 | process.join(.1) | ||
2150 | process.terminate() | ||
2151 | else: | ||
2152 | process.join() | ||
2153 | |||
2154 | self.parser_quit.close() | ||
2155 | # Allow data left in the cancel queue to be discarded | ||
2156 | self.parser_quit.cancel_join_thread() | ||
2157 | |||
2158 | def sync_caches(): | 2205 | def sync_caches(): |
2159 | for c in self.bb_caches.values(): | 2206 | for c in self.bb_caches.values(): |
2207 | bb.cache.SiggenRecipeInfo.reset() | ||
2160 | c.sync() | 2208 | c.sync() |
2161 | 2209 | ||
2162 | sync = threading.Thread(target=sync_caches, name="SyncThread") | 2210 | self.syncthread = threading.Thread(target=sync_caches, name="SyncThread") |
2163 | self.syncthread = sync | 2211 | self.syncthread.start() |
2164 | sync.start() | 2212 | |
2213 | self.parser_quit.set() | ||
2214 | |||
2215 | for process in self.processes: | ||
2216 | process.join(0.5) | ||
2217 | |||
2218 | for process in self.processes: | ||
2219 | if process.exitcode is None: | ||
2220 | os.kill(process.pid, signal.SIGINT) | ||
2221 | |||
2222 | for process in self.processes: | ||
2223 | process.join(0.5) | ||
2224 | |||
2225 | for process in self.processes: | ||
2226 | if process.exitcode is None: | ||
2227 | process.terminate() | ||
2228 | |||
2229 | for process in self.processes: | ||
2230 | process.join() | ||
2231 | # clean up zombies | ||
2232 | process.close() | ||
2233 | |||
2234 | bb.codeparser.parser_cache_save() | ||
2165 | bb.codeparser.parser_cache_savemerge() | 2235 | bb.codeparser.parser_cache_savemerge() |
2236 | bb.cache.SiggenRecipeInfo.reset() | ||
2166 | bb.fetch.fetcher_parse_done() | 2237 | bb.fetch.fetcher_parse_done() |
2167 | if self.cooker.configuration.profile: | 2238 | if self.cooker.configuration.profile: |
2168 | profiles = [] | 2239 | profiles = [] |
2169 | for i in self.process_names: | 2240 | for i in self.process_names: |
2170 | logfile = "profile-parse-%s.log" % i | 2241 | logfile = "profile-parse-%s.log" % i |
2171 | if os.path.exists(logfile): | 2242 | if os.path.exists(logfile) and os.path.getsize(logfile): |
2172 | profiles.append(logfile) | 2243 | profiles.append(logfile) |
2173 | 2244 | ||
2174 | pout = "profile-parse.log.processed" | 2245 | if profiles: |
2175 | bb.utils.process_profilelog(profiles, pout = pout) | 2246 | pout = "profile-parse.log.processed" |
2176 | print("Processed parsing statistics saved to %s" % (pout)) | 2247 | bb.utils.process_profilelog(profiles, pout = pout) |
2248 | print("Processed parsing statistics saved to %s" % (pout)) | ||
2177 | 2249 | ||
2178 | def final_cleanup(self): | 2250 | def final_cleanup(self): |
2179 | if self.syncthread: | 2251 | if self.syncthread: |
2180 | self.syncthread.join() | 2252 | self.syncthread.join() |
2181 | 2253 | ||
2182 | def load_cached(self): | 2254 | def load_cached(self): |
2183 | for mc, cache, filename, appends in self.fromcache: | 2255 | for mc, cache, filename, appends, layername in self.fromcache: |
2184 | cached, infos = cache.load(filename, appends) | 2256 | infos = cache.loadCached(filename, appends) |
2185 | yield not cached, mc, infos | 2257 | yield False, mc, infos |
2186 | 2258 | ||
2187 | def parse_generator(self): | 2259 | def parse_generator(self): |
2188 | while True: | 2260 | empty = False |
2261 | while self.processes or not empty: | ||
2262 | for process in self.processes.copy(): | ||
2263 | if not process.is_alive(): | ||
2264 | process.join() | ||
2265 | self.processes.remove(process) | ||
2266 | |||
2189 | if self.parsed >= self.toparse: | 2267 | if self.parsed >= self.toparse: |
2190 | break | 2268 | break |
2191 | 2269 | ||
2192 | try: | 2270 | try: |
2193 | result = self.result_queue.get(timeout=0.25) | 2271 | result = self.result_queue.get(timeout=0.25) |
2194 | except queue.Empty: | 2272 | except queue.Empty: |
2195 | pass | 2273 | empty = True |
2274 | yield None, None, None | ||
2196 | else: | 2275 | else: |
2197 | value = result[1] | 2276 | empty = False |
2198 | if isinstance(value, BaseException): | 2277 | yield result |
2199 | raise value | 2278 | |
2200 | else: | 2279 | if not (self.parsed >= self.toparse): |
2201 | yield result | 2280 | raise bb.parse.ParseError("Not all recipes parsed, parser thread killed/died? Exiting.", None) |
2281 | |||
2202 | 2282 | ||
2203 | def parse_next(self): | 2283 | def parse_next(self): |
2204 | result = [] | 2284 | result = [] |
2205 | parsed = None | 2285 | parsed = None |
2206 | try: | 2286 | try: |
2207 | parsed, mc, result = next(self.results) | 2287 | parsed, mc, result = next(self.results) |
2288 | if isinstance(result, BaseException): | ||
2289 | # Turn exceptions back into exceptions | ||
2290 | raise result | ||
2291 | if parsed is None: | ||
2292 | # Timeout, loop back through the main loop | ||
2293 | return True | ||
2294 | |||
2208 | except StopIteration: | 2295 | except StopIteration: |
2209 | self.shutdown() | 2296 | self.shutdown() |
2210 | return False | 2297 | return False |
2211 | except bb.BBHandledException as exc: | 2298 | except bb.BBHandledException as exc: |
2212 | self.error += 1 | 2299 | self.error += 1 |
2213 | logger.error('Failed to parse recipe: %s' % exc.recipe) | 2300 | logger.debug('Failed to parse recipe: %s' % exc.recipe) |
2214 | self.shutdown(clean=False, force=True) | 2301 | self.shutdown(clean=False) |
2215 | return False | 2302 | return False |
2216 | except ParsingFailure as exc: | 2303 | except ParsingFailure as exc: |
2217 | self.error += 1 | 2304 | self.error += 1 |
2218 | logger.error('Unable to parse %s: %s' % | 2305 | |
2219 | (exc.recipe, bb.exceptions.to_string(exc.realexception))) | 2306 | exc_desc = str(exc) |
2220 | self.shutdown(clean=False, force=True) | 2307 | if isinstance(exc, SystemExit) and not isinstance(exc.code, str): |
2308 | exc_desc = 'Exited with "%d"' % exc.code | ||
2309 | |||
2310 | logger.error('Unable to parse %s: %s' % (exc.recipe, exc_desc)) | ||
2311 | self.shutdown(clean=False) | ||
2221 | return False | 2312 | return False |
2222 | except bb.parse.ParseError as exc: | 2313 | except bb.parse.ParseError as exc: |
2223 | self.error += 1 | 2314 | self.error += 1 |
2224 | logger.error(str(exc)) | 2315 | logger.error(str(exc)) |
2225 | self.shutdown(clean=False, force=True) | 2316 | self.shutdown(clean=False, eventmsg=str(exc)) |
2226 | return False | 2317 | return False |
2227 | except bb.data_smart.ExpansionError as exc: | 2318 | except bb.data_smart.ExpansionError as exc: |
2319 | def skip_frames(f, fn_prefix): | ||
2320 | while f and f.tb_frame.f_code.co_filename.startswith(fn_prefix): | ||
2321 | f = f.tb_next | ||
2322 | return f | ||
2323 | |||
2228 | self.error += 1 | 2324 | self.error += 1 |
2229 | bbdir = os.path.dirname(__file__) + os.sep | 2325 | bbdir = os.path.dirname(__file__) + os.sep |
2230 | etype, value, _ = sys.exc_info() | 2326 | etype, value, tb = sys.exc_info() |
2231 | tb = list(itertools.dropwhile(lambda e: e.filename.startswith(bbdir), exc.traceback)) | 2327 | |
2328 | # Remove any frames where the code comes from bitbake. This | ||
2329 | # prevents deep (and pretty useless) backtraces for expansion error | ||
2330 | tb = skip_frames(tb, bbdir) | ||
2331 | cur = tb | ||
2332 | while cur: | ||
2333 | cur.tb_next = skip_frames(cur.tb_next, bbdir) | ||
2334 | cur = cur.tb_next | ||
2335 | |||
2232 | logger.error('ExpansionError during parsing %s', value.recipe, | 2336 | logger.error('ExpansionError during parsing %s', value.recipe, |
2233 | exc_info=(etype, value, tb)) | 2337 | exc_info=(etype, value, tb)) |
2234 | self.shutdown(clean=False, force=True) | 2338 | self.shutdown(clean=False) |
2235 | return False | 2339 | return False |
2236 | except Exception as exc: | 2340 | except Exception as exc: |
2237 | self.error += 1 | 2341 | self.error += 1 |
2238 | etype, value, tb = sys.exc_info() | 2342 | _, value, _ = sys.exc_info() |
2239 | if hasattr(value, "recipe"): | 2343 | if hasattr(value, "recipe"): |
2240 | logger.error('Unable to parse %s' % value.recipe, | 2344 | logger.error('Unable to parse %s' % value.recipe, |
2241 | exc_info=(etype, value, exc.traceback)) | 2345 | exc_info=sys.exc_info()) |
2242 | else: | 2346 | else: |
2243 | # Most likely, an exception occurred during raising an exception | 2347 | # Most likely, an exception occurred during raising an exception |
2244 | import traceback | 2348 | import traceback |
2245 | logger.error('Exception during parse: %s' % traceback.format_exc()) | 2349 | logger.error('Exception during parse: %s' % traceback.format_exc()) |
2246 | self.shutdown(clean=False, force=True) | 2350 | self.shutdown(clean=False) |
2247 | return False | 2351 | return False |
2248 | 2352 | ||
2249 | self.current += 1 | 2353 | self.current += 1 |
@@ -2259,17 +2363,19 @@ class CookerParser(object): | |||
2259 | for virtualfn, info_array in result: | 2363 | for virtualfn, info_array in result: |
2260 | if info_array[0].skipped: | 2364 | if info_array[0].skipped: |
2261 | self.skipped += 1 | 2365 | self.skipped += 1 |
2262 | self.cooker.skiplist[virtualfn] = SkippedPackage(info_array[0]) | 2366 | self.cooker.skiplist_by_mc[mc][virtualfn] = SkippedPackage(info_array[0]) |
2263 | self.bb_caches[mc].add_info(virtualfn, info_array, self.cooker.recipecaches[mc], | 2367 | self.bb_caches[mc].add_info(virtualfn, info_array, self.cooker.recipecaches[mc], |
2264 | parsed=parsed, watcher = self.cooker.add_filewatch) | 2368 | parsed=parsed, watcher = self.cooker.add_filewatch) |
2265 | return True | 2369 | return True |
2266 | 2370 | ||
2267 | def reparse(self, filename): | 2371 | def reparse(self, filename): |
2372 | bb.cache.SiggenRecipeInfo.reset() | ||
2268 | to_reparse = set() | 2373 | to_reparse = set() |
2269 | for mc in self.cooker.multiconfigs: | 2374 | for mc in self.cooker.multiconfigs: |
2270 | to_reparse.add((mc, filename, self.cooker.collections[mc].get_file_appends(filename))) | 2375 | layername = self.cooker.collections[mc].calc_bbfile_priority(filename)[2] |
2376 | to_reparse.add((mc, filename, self.cooker.collections[mc].get_file_appends(filename), layername)) | ||
2271 | 2377 | ||
2272 | for mc, filename, appends in to_reparse: | 2378 | for mc, filename, appends, layername in to_reparse: |
2273 | infos = self.bb_caches[mc].parse(filename, appends) | 2379 | infos = self.bb_caches[mc].parse(filename, appends, layername) |
2274 | for vfn, info_array in infos: | 2380 | for vfn, info_array in infos: |
2275 | self.cooker.recipecaches[mc].add_from_recipeinfo(vfn, info_array) | 2381 | self.cooker.recipecaches[mc].add_from_recipeinfo(vfn, info_array) |