diff options
Diffstat (limited to 'bitbake/lib/bb/cooker.py')
-rw-r--r-- | bitbake/lib/bb/cooker.py | 254 |
1 files changed, 137 insertions, 117 deletions
diff --git a/bitbake/lib/bb/cooker.py b/bitbake/lib/bb/cooker.py index c5bfef55d6..fb87368f17 100644 --- a/bitbake/lib/bb/cooker.py +++ b/bitbake/lib/bb/cooker.py | |||
@@ -8,16 +8,16 @@ | |||
8 | # | 8 | # |
9 | # SPDX-License-Identifier: GPL-2.0-only | 9 | # SPDX-License-Identifier: GPL-2.0-only |
10 | # | 10 | # |
11 | 11 | import enum | |
12 | import sys, os, glob, os.path, re, time | 12 | import sys, os, glob, os.path, re, time |
13 | import itertools | 13 | import itertools |
14 | import logging | 14 | import logging |
15 | import multiprocessing | 15 | from bb import multiprocessing |
16 | import threading | 16 | import threading |
17 | from io import StringIO, UnsupportedOperation | 17 | from io import StringIO, UnsupportedOperation |
18 | from contextlib import closing | 18 | from contextlib import closing |
19 | from collections import defaultdict, namedtuple | 19 | from collections import defaultdict, namedtuple |
20 | import bb, bb.exceptions, bb.command | 20 | import bb, bb.command |
21 | from bb import utils, data, parse, event, cache, providers, taskdata, runqueue, build | 21 | from bb import utils, data, parse, event, cache, providers, taskdata, runqueue, build |
22 | import queue | 22 | import queue |
23 | import signal | 23 | import signal |
@@ -26,6 +26,7 @@ import json | |||
26 | import pickle | 26 | import pickle |
27 | import codecs | 27 | import codecs |
28 | import hashserv | 28 | import hashserv |
29 | import ctypes | ||
29 | 30 | ||
30 | logger = logging.getLogger("BitBake") | 31 | logger = logging.getLogger("BitBake") |
31 | collectlog = logging.getLogger("BitBake.Collection") | 32 | collectlog = logging.getLogger("BitBake.Collection") |
@@ -48,16 +49,15 @@ class CollectionError(bb.BBHandledException): | |||
48 | Exception raised when layer configuration is incorrect | 49 | Exception raised when layer configuration is incorrect |
49 | """ | 50 | """ |
50 | 51 | ||
51 | class state: | ||
52 | initial, parsing, running, shutdown, forceshutdown, stopped, error = list(range(7)) | ||
53 | 52 | ||
54 | @classmethod | 53 | class State(enum.Enum): |
55 | def get_name(cls, code): | 54 | INITIAL = 0, |
56 | for name in dir(cls): | 55 | PARSING = 1, |
57 | value = getattr(cls, name) | 56 | RUNNING = 2, |
58 | if type(value) == type(cls.initial) and value == code: | 57 | SHUTDOWN = 3, |
59 | return name | 58 | FORCE_SHUTDOWN = 4, |
60 | raise ValueError("Invalid status code: %s" % code) | 59 | STOPPED = 5, |
60 | ERROR = 6 | ||
61 | 61 | ||
62 | 62 | ||
63 | class SkippedPackage: | 63 | class SkippedPackage: |
@@ -134,7 +134,8 @@ class BBCooker: | |||
134 | self.baseconfig_valid = False | 134 | self.baseconfig_valid = False |
135 | self.parsecache_valid = False | 135 | self.parsecache_valid = False |
136 | self.eventlog = None | 136 | self.eventlog = None |
137 | self.skiplist = {} | 137 | # The skiplists, one per multiconfig |
138 | self.skiplist_by_mc = defaultdict(dict) | ||
138 | self.featureset = CookerFeatures() | 139 | self.featureset = CookerFeatures() |
139 | if featureSet: | 140 | if featureSet: |
140 | for f in featureSet: | 141 | for f in featureSet: |
@@ -180,7 +181,7 @@ class BBCooker: | |||
180 | pass | 181 | pass |
181 | 182 | ||
182 | self.command = bb.command.Command(self, self.process_server) | 183 | self.command = bb.command.Command(self, self.process_server) |
183 | self.state = state.initial | 184 | self.state = State.INITIAL |
184 | 185 | ||
185 | self.parser = None | 186 | self.parser = None |
186 | 187 | ||
@@ -226,23 +227,22 @@ class BBCooker: | |||
226 | bb.warn("Cooker received SIGTERM, shutting down...") | 227 | bb.warn("Cooker received SIGTERM, shutting down...") |
227 | elif signum == signal.SIGHUP: | 228 | elif signum == signal.SIGHUP: |
228 | bb.warn("Cooker received SIGHUP, shutting down...") | 229 | bb.warn("Cooker received SIGHUP, shutting down...") |
229 | self.state = state.forceshutdown | 230 | self.state = State.FORCE_SHUTDOWN |
230 | bb.event._should_exit.set() | 231 | bb.event._should_exit.set() |
231 | 232 | ||
232 | def setFeatures(self, features): | 233 | def setFeatures(self, features): |
233 | # we only accept a new feature set if we're in state initial, so we can reset without problems | 234 | # we only accept a new feature set if we're in state initial, so we can reset without problems |
234 | if not self.state in [state.initial, state.shutdown, state.forceshutdown, state.stopped, state.error]: | 235 | if not self.state in [State.INITIAL, State.SHUTDOWN, State.FORCE_SHUTDOWN, State.STOPPED, State.ERROR]: |
235 | raise Exception("Illegal state for feature set change") | 236 | raise Exception("Illegal state for feature set change") |
236 | original_featureset = list(self.featureset) | 237 | original_featureset = list(self.featureset) |
237 | for feature in features: | 238 | for feature in features: |
238 | self.featureset.setFeature(feature) | 239 | self.featureset.setFeature(feature) |
239 | bb.debug(1, "Features set %s (was %s)" % (original_featureset, list(self.featureset))) | 240 | bb.debug(1, "Features set %s (was %s)" % (original_featureset, list(self.featureset))) |
240 | if (original_featureset != list(self.featureset)) and self.state != state.error and hasattr(self, "data"): | 241 | if (original_featureset != list(self.featureset)) and self.state != State.ERROR and hasattr(self, "data"): |
241 | self.reset() | 242 | self.reset() |
242 | 243 | ||
243 | def initConfigurationData(self): | 244 | def initConfigurationData(self): |
244 | 245 | self.state = State.INITIAL | |
245 | self.state = state.initial | ||
246 | self.caches_array = [] | 246 | self.caches_array = [] |
247 | 247 | ||
248 | sys.path = self.orig_syspath.copy() | 248 | sys.path = self.orig_syspath.copy() |
@@ -281,7 +281,6 @@ class BBCooker: | |||
281 | self.databuilder = bb.cookerdata.CookerDataBuilder(self.configuration, False) | 281 | self.databuilder = bb.cookerdata.CookerDataBuilder(self.configuration, False) |
282 | self.databuilder.parseBaseConfiguration() | 282 | self.databuilder.parseBaseConfiguration() |
283 | self.data = self.databuilder.data | 283 | self.data = self.databuilder.data |
284 | self.data_hash = self.databuilder.data_hash | ||
285 | self.extraconfigdata = {} | 284 | self.extraconfigdata = {} |
286 | 285 | ||
287 | eventlog = self.data.getVar("BB_DEFAULT_EVENTLOG") | 286 | eventlog = self.data.getVar("BB_DEFAULT_EVENTLOG") |
@@ -315,13 +314,19 @@ class BBCooker: | |||
315 | dbfile = (self.data.getVar("PERSISTENT_DIR") or self.data.getVar("CACHE")) + "/hashserv.db" | 314 | dbfile = (self.data.getVar("PERSISTENT_DIR") or self.data.getVar("CACHE")) + "/hashserv.db" |
316 | upstream = self.data.getVar("BB_HASHSERVE_UPSTREAM") or None | 315 | upstream = self.data.getVar("BB_HASHSERVE_UPSTREAM") or None |
317 | if upstream: | 316 | if upstream: |
318 | import socket | ||
319 | try: | 317 | try: |
320 | sock = socket.create_connection(upstream.split(":"), 5) | 318 | with hashserv.create_client(upstream) as client: |
321 | sock.close() | 319 | client.ping() |
322 | except socket.error as e: | 320 | except ImportError as e: |
323 | bb.warn("BB_HASHSERVE_UPSTREAM is not valid, unable to connect hash equivalence server at '%s': %s" | 321 | bb.fatal(""""Unable to use hash equivalence server at '%s' due to missing or incorrect python module: |
322 | %s | ||
323 | Please install the needed module on the build host, or use an environment containing it (e.g a pip venv or OpenEmbedded's buildtools tarball). | ||
324 | You can also remove the BB_HASHSERVE_UPSTREAM setting, but this may result in significantly longer build times as bitbake will be unable to reuse prebuilt sstate artefacts.""" | ||
325 | % (upstream, repr(e))) | ||
326 | except ConnectionError as e: | ||
327 | bb.warn("Unable to connect to hash equivalence server at '%s', please correct or remove BB_HASHSERVE_UPSTREAM:\n%s" | ||
324 | % (upstream, repr(e))) | 328 | % (upstream, repr(e))) |
329 | upstream = None | ||
325 | 330 | ||
326 | self.hashservaddr = "unix://%s/hashserve.sock" % self.data.getVar("TOPDIR") | 331 | self.hashservaddr = "unix://%s/hashserve.sock" % self.data.getVar("TOPDIR") |
327 | self.hashserv = hashserv.create_server( | 332 | self.hashserv = hashserv.create_server( |
@@ -370,6 +375,11 @@ class BBCooker: | |||
370 | if not clean: | 375 | if not clean: |
371 | bb.parse.BBHandler.cached_statements = {} | 376 | bb.parse.BBHandler.cached_statements = {} |
372 | 377 | ||
378 | # If writes were made to any of the data stores, we need to recalculate the data | ||
379 | # store cache | ||
380 | if hasattr(self, "databuilder"): | ||
381 | self.databuilder.calc_datastore_hashes() | ||
382 | |||
373 | def parseConfiguration(self): | 383 | def parseConfiguration(self): |
374 | self.updateCacheSync() | 384 | self.updateCacheSync() |
375 | 385 | ||
@@ -612,8 +622,8 @@ class BBCooker: | |||
612 | localdata = {} | 622 | localdata = {} |
613 | 623 | ||
614 | for mc in self.multiconfigs: | 624 | for mc in self.multiconfigs: |
615 | taskdata[mc] = bb.taskdata.TaskData(halt, skiplist=self.skiplist, allowincomplete=allowincomplete) | 625 | taskdata[mc] = bb.taskdata.TaskData(halt, skiplist=self.skiplist_by_mc[mc], allowincomplete=allowincomplete) |
616 | localdata[mc] = data.createCopy(self.databuilder.mcdata[mc]) | 626 | localdata[mc] = bb.data.createCopy(self.databuilder.mcdata[mc]) |
617 | bb.data.expandKeys(localdata[mc]) | 627 | bb.data.expandKeys(localdata[mc]) |
618 | 628 | ||
619 | current = 0 | 629 | current = 0 |
@@ -680,14 +690,14 @@ class BBCooker: | |||
680 | bb.event.fire(bb.event.TreeDataPreparationCompleted(len(fulltargetlist)), self.data) | 690 | bb.event.fire(bb.event.TreeDataPreparationCompleted(len(fulltargetlist)), self.data) |
681 | return taskdata, runlist | 691 | return taskdata, runlist |
682 | 692 | ||
683 | def prepareTreeData(self, pkgs_to_build, task): | 693 | def prepareTreeData(self, pkgs_to_build, task, halt=False): |
684 | """ | 694 | """ |
685 | Prepare a runqueue and taskdata object for iteration over pkgs_to_build | 695 | Prepare a runqueue and taskdata object for iteration over pkgs_to_build |
686 | """ | 696 | """ |
687 | 697 | ||
688 | # We set halt to False here to prevent unbuildable targets raising | 698 | # We set halt to False here to prevent unbuildable targets raising |
689 | # an exception when we're just generating data | 699 | # an exception when we're just generating data |
690 | taskdata, runlist = self.buildTaskData(pkgs_to_build, task, False, allowincomplete=True) | 700 | taskdata, runlist = self.buildTaskData(pkgs_to_build, task, halt, allowincomplete=True) |
691 | 701 | ||
692 | return runlist, taskdata | 702 | return runlist, taskdata |
693 | 703 | ||
@@ -701,7 +711,7 @@ class BBCooker: | |||
701 | if not task.startswith("do_"): | 711 | if not task.startswith("do_"): |
702 | task = "do_%s" % task | 712 | task = "do_%s" % task |
703 | 713 | ||
704 | runlist, taskdata = self.prepareTreeData(pkgs_to_build, task) | 714 | runlist, taskdata = self.prepareTreeData(pkgs_to_build, task, halt=True) |
705 | rq = bb.runqueue.RunQueue(self, self.data, self.recipecaches, taskdata, runlist) | 715 | rq = bb.runqueue.RunQueue(self, self.data, self.recipecaches, taskdata, runlist) |
706 | rq.rqdata.prepare() | 716 | rq.rqdata.prepare() |
707 | return self.buildDependTree(rq, taskdata) | 717 | return self.buildDependTree(rq, taskdata) |
@@ -896,10 +906,11 @@ class BBCooker: | |||
896 | 906 | ||
897 | depgraph = self.generateTaskDepTreeData(pkgs_to_build, task) | 907 | depgraph = self.generateTaskDepTreeData(pkgs_to_build, task) |
898 | 908 | ||
899 | with open('pn-buildlist', 'w') as f: | 909 | pns = depgraph["pn"].keys() |
900 | for pn in depgraph["pn"]: | 910 | if pns: |
901 | f.write(pn + "\n") | 911 | with open('pn-buildlist', 'w') as f: |
902 | logger.info("PN build list saved to 'pn-buildlist'") | 912 | f.write("%s\n" % "\n".join(sorted(pns))) |
913 | logger.info("PN build list saved to 'pn-buildlist'") | ||
903 | 914 | ||
904 | # Remove old format output files to ensure no confusion with stale data | 915 | # Remove old format output files to ensure no confusion with stale data |
905 | try: | 916 | try: |
@@ -933,7 +944,7 @@ class BBCooker: | |||
933 | for mc in self.multiconfigs: | 944 | for mc in self.multiconfigs: |
934 | # First get list of recipes, including skipped | 945 | # First get list of recipes, including skipped |
935 | recipefns = list(self.recipecaches[mc].pkg_fn.keys()) | 946 | recipefns = list(self.recipecaches[mc].pkg_fn.keys()) |
936 | recipefns.extend(self.skiplist.keys()) | 947 | recipefns.extend(self.skiplist_by_mc[mc].keys()) |
937 | 948 | ||
938 | # Work out list of bbappends that have been applied | 949 | # Work out list of bbappends that have been applied |
939 | applied_appends = [] | 950 | applied_appends = [] |
@@ -952,13 +963,7 @@ class BBCooker: | |||
952 | '\n '.join(appends_without_recipes[mc]))) | 963 | '\n '.join(appends_without_recipes[mc]))) |
953 | 964 | ||
954 | if msgs: | 965 | if msgs: |
955 | msg = "\n".join(msgs) | 966 | bb.fatal("\n".join(msgs)) |
956 | warn_only = self.databuilder.mcdata[mc].getVar("BB_DANGLINGAPPENDS_WARNONLY", \ | ||
957 | False) or "no" | ||
958 | if warn_only.lower() in ("1", "yes", "true"): | ||
959 | bb.warn(msg) | ||
960 | else: | ||
961 | bb.fatal(msg) | ||
962 | 967 | ||
963 | def handlePrefProviders(self): | 968 | def handlePrefProviders(self): |
964 | 969 | ||
@@ -1338,7 +1343,7 @@ class BBCooker: | |||
1338 | self.buildSetVars() | 1343 | self.buildSetVars() |
1339 | self.reset_mtime_caches() | 1344 | self.reset_mtime_caches() |
1340 | 1345 | ||
1341 | bb_caches = bb.cache.MulticonfigCache(self.databuilder, self.data_hash, self.caches_array) | 1346 | bb_caches = bb.cache.MulticonfigCache(self.databuilder, self.databuilder.data_hash, self.caches_array) |
1342 | 1347 | ||
1343 | layername = self.collections[mc].calc_bbfile_priority(fn)[2] | 1348 | layername = self.collections[mc].calc_bbfile_priority(fn)[2] |
1344 | infos = bb_caches[mc].parse(fn, self.collections[mc].get_file_appends(fn), layername) | 1349 | infos = bb_caches[mc].parse(fn, self.collections[mc].get_file_appends(fn), layername) |
@@ -1399,11 +1404,11 @@ class BBCooker: | |||
1399 | 1404 | ||
1400 | msg = None | 1405 | msg = None |
1401 | interrupted = 0 | 1406 | interrupted = 0 |
1402 | if halt or self.state == state.forceshutdown: | 1407 | if halt or self.state == State.FORCE_SHUTDOWN: |
1403 | rq.finish_runqueue(True) | 1408 | rq.finish_runqueue(True) |
1404 | msg = "Forced shutdown" | 1409 | msg = "Forced shutdown" |
1405 | interrupted = 2 | 1410 | interrupted = 2 |
1406 | elif self.state == state.shutdown: | 1411 | elif self.state == State.SHUTDOWN: |
1407 | rq.finish_runqueue(False) | 1412 | rq.finish_runqueue(False) |
1408 | msg = "Stopped build" | 1413 | msg = "Stopped build" |
1409 | interrupted = 1 | 1414 | interrupted = 1 |
@@ -1429,8 +1434,7 @@ class BBCooker: | |||
1429 | if quietlog: | 1434 | if quietlog: |
1430 | bb.runqueue.logger.setLevel(rqloglevel) | 1435 | bb.runqueue.logger.setLevel(rqloglevel) |
1431 | return bb.server.process.idleFinish(msg) | 1436 | return bb.server.process.idleFinish(msg) |
1432 | if retval is True: | 1437 | |
1433 | return True | ||
1434 | return retval | 1438 | return retval |
1435 | 1439 | ||
1436 | self.idleCallBackRegister(buildFileIdle, rq) | 1440 | self.idleCallBackRegister(buildFileIdle, rq) |
@@ -1459,7 +1463,6 @@ class BBCooker: | |||
1459 | 1463 | ||
1460 | if t in task or getAllTaskSignatures: | 1464 | if t in task or getAllTaskSignatures: |
1461 | try: | 1465 | try: |
1462 | rq.rqdata.prepare_task_hash(tid) | ||
1463 | sig.append([pn, t, rq.rqdata.get_task_unihash(tid)]) | 1466 | sig.append([pn, t, rq.rqdata.get_task_unihash(tid)]) |
1464 | except KeyError: | 1467 | except KeyError: |
1465 | sig.append(self.getTaskSignatures(target, [t])[0]) | 1468 | sig.append(self.getTaskSignatures(target, [t])[0]) |
@@ -1474,12 +1477,12 @@ class BBCooker: | |||
1474 | def buildTargetsIdle(server, rq, halt): | 1477 | def buildTargetsIdle(server, rq, halt): |
1475 | msg = None | 1478 | msg = None |
1476 | interrupted = 0 | 1479 | interrupted = 0 |
1477 | if halt or self.state == state.forceshutdown: | 1480 | if halt or self.state == State.FORCE_SHUTDOWN: |
1478 | bb.event._should_exit.set() | 1481 | bb.event._should_exit.set() |
1479 | rq.finish_runqueue(True) | 1482 | rq.finish_runqueue(True) |
1480 | msg = "Forced shutdown" | 1483 | msg = "Forced shutdown" |
1481 | interrupted = 2 | 1484 | interrupted = 2 |
1482 | elif self.state == state.shutdown: | 1485 | elif self.state == State.SHUTDOWN: |
1483 | rq.finish_runqueue(False) | 1486 | rq.finish_runqueue(False) |
1484 | msg = "Stopped build" | 1487 | msg = "Stopped build" |
1485 | interrupted = 1 | 1488 | interrupted = 1 |
@@ -1500,8 +1503,6 @@ class BBCooker: | |||
1500 | bb.event.disable_heartbeat() | 1503 | bb.event.disable_heartbeat() |
1501 | return bb.server.process.idleFinish(msg) | 1504 | return bb.server.process.idleFinish(msg) |
1502 | 1505 | ||
1503 | if retval is True: | ||
1504 | return True | ||
1505 | return retval | 1506 | return retval |
1506 | 1507 | ||
1507 | self.reset_mtime_caches() | 1508 | self.reset_mtime_caches() |
@@ -1574,7 +1575,7 @@ class BBCooker: | |||
1574 | 1575 | ||
1575 | 1576 | ||
1576 | def updateCacheSync(self): | 1577 | def updateCacheSync(self): |
1577 | if self.state == state.running: | 1578 | if self.state == State.RUNNING: |
1578 | return | 1579 | return |
1579 | 1580 | ||
1580 | if not self.baseconfig_valid: | 1581 | if not self.baseconfig_valid: |
@@ -1584,19 +1585,19 @@ class BBCooker: | |||
1584 | 1585 | ||
1585 | # This is called for all async commands when self.state != running | 1586 | # This is called for all async commands when self.state != running |
1586 | def updateCache(self): | 1587 | def updateCache(self): |
1587 | if self.state == state.running: | 1588 | if self.state == State.RUNNING: |
1588 | return | 1589 | return |
1589 | 1590 | ||
1590 | if self.state in (state.shutdown, state.forceshutdown, state.error): | 1591 | if self.state in (State.SHUTDOWN, State.FORCE_SHUTDOWN, State.ERROR): |
1591 | if hasattr(self.parser, 'shutdown'): | 1592 | if hasattr(self.parser, 'shutdown'): |
1592 | self.parser.shutdown(clean=False) | 1593 | self.parser.shutdown(clean=False) |
1593 | self.parser.final_cleanup() | 1594 | self.parser.final_cleanup() |
1594 | raise bb.BBHandledException() | 1595 | raise bb.BBHandledException() |
1595 | 1596 | ||
1596 | if self.state != state.parsing: | 1597 | if self.state != State.PARSING: |
1597 | self.updateCacheSync() | 1598 | self.updateCacheSync() |
1598 | 1599 | ||
1599 | if self.state != state.parsing and not self.parsecache_valid: | 1600 | if self.state != State.PARSING and not self.parsecache_valid: |
1600 | bb.server.process.serverlog("Parsing started") | 1601 | bb.server.process.serverlog("Parsing started") |
1601 | self.parsewatched = {} | 1602 | self.parsewatched = {} |
1602 | 1603 | ||
@@ -1630,9 +1631,10 @@ class BBCooker: | |||
1630 | self.parser = CookerParser(self, mcfilelist, total_masked) | 1631 | self.parser = CookerParser(self, mcfilelist, total_masked) |
1631 | self._parsecache_set(True) | 1632 | self._parsecache_set(True) |
1632 | 1633 | ||
1633 | self.state = state.parsing | 1634 | self.state = State.PARSING |
1634 | 1635 | ||
1635 | if not self.parser.parse_next(): | 1636 | if not self.parser.parse_next(): |
1637 | bb.server.process.serverlog("Parsing completed") | ||
1636 | collectlog.debug("parsing complete") | 1638 | collectlog.debug("parsing complete") |
1637 | if self.parser.error: | 1639 | if self.parser.error: |
1638 | raise bb.BBHandledException() | 1640 | raise bb.BBHandledException() |
@@ -1640,7 +1642,7 @@ class BBCooker: | |||
1640 | self.handlePrefProviders() | 1642 | self.handlePrefProviders() |
1641 | for mc in self.multiconfigs: | 1643 | for mc in self.multiconfigs: |
1642 | self.recipecaches[mc].bbfile_priority = self.collections[mc].collection_priorities(self.recipecaches[mc].pkg_fn, self.parser.mcfilelist[mc], self.data) | 1644 | self.recipecaches[mc].bbfile_priority = self.collections[mc].collection_priorities(self.recipecaches[mc].pkg_fn, self.parser.mcfilelist[mc], self.data) |
1643 | self.state = state.running | 1645 | self.state = State.RUNNING |
1644 | 1646 | ||
1645 | # Send an event listing all stamps reachable after parsing | 1647 | # Send an event listing all stamps reachable after parsing |
1646 | # which the metadata may use to clean up stale data | 1648 | # which the metadata may use to clean up stale data |
@@ -1713,10 +1715,10 @@ class BBCooker: | |||
1713 | 1715 | ||
1714 | def shutdown(self, force=False): | 1716 | def shutdown(self, force=False): |
1715 | if force: | 1717 | if force: |
1716 | self.state = state.forceshutdown | 1718 | self.state = State.FORCE_SHUTDOWN |
1717 | bb.event._should_exit.set() | 1719 | bb.event._should_exit.set() |
1718 | else: | 1720 | else: |
1719 | self.state = state.shutdown | 1721 | self.state = State.SHUTDOWN |
1720 | 1722 | ||
1721 | if self.parser: | 1723 | if self.parser: |
1722 | self.parser.shutdown(clean=False) | 1724 | self.parser.shutdown(clean=False) |
@@ -1726,7 +1728,7 @@ class BBCooker: | |||
1726 | if hasattr(self.parser, 'shutdown'): | 1728 | if hasattr(self.parser, 'shutdown'): |
1727 | self.parser.shutdown(clean=False) | 1729 | self.parser.shutdown(clean=False) |
1728 | self.parser.final_cleanup() | 1730 | self.parser.final_cleanup() |
1729 | self.state = state.initial | 1731 | self.state = State.INITIAL |
1730 | bb.event._should_exit.clear() | 1732 | bb.event._should_exit.clear() |
1731 | 1733 | ||
1732 | def reset(self): | 1734 | def reset(self): |
@@ -1813,8 +1815,8 @@ class CookerCollectFiles(object): | |||
1813 | bb.event.fire(CookerExit(), eventdata) | 1815 | bb.event.fire(CookerExit(), eventdata) |
1814 | 1816 | ||
1815 | # We need to track where we look so that we can know when the cache is invalid. There | 1817 | # We need to track where we look so that we can know when the cache is invalid. There |
1816 | # is no nice way to do this, this is horrid. We intercept the os.listdir() | 1818 | # is no nice way to do this, this is horrid. We intercept the os.listdir() and os.scandir() |
1817 | # (or os.scandir() for python 3.6+) calls while we run glob(). | 1819 | # calls while we run glob(). |
1818 | origlistdir = os.listdir | 1820 | origlistdir = os.listdir |
1819 | if hasattr(os, 'scandir'): | 1821 | if hasattr(os, 'scandir'): |
1820 | origscandir = os.scandir | 1822 | origscandir = os.scandir |
@@ -1994,8 +1996,9 @@ class ParsingFailure(Exception): | |||
1994 | Exception.__init__(self, realexception, recipe) | 1996 | Exception.__init__(self, realexception, recipe) |
1995 | 1997 | ||
1996 | class Parser(multiprocessing.Process): | 1998 | class Parser(multiprocessing.Process): |
1997 | def __init__(self, jobs, results, quit, profile): | 1999 | def __init__(self, jobs, next_job_id, results, quit, profile): |
1998 | self.jobs = jobs | 2000 | self.jobs = jobs |
2001 | self.next_job_id = next_job_id | ||
1999 | self.results = results | 2002 | self.results = results |
2000 | self.quit = quit | 2003 | self.quit = quit |
2001 | multiprocessing.Process.__init__(self) | 2004 | multiprocessing.Process.__init__(self) |
@@ -2005,6 +2008,7 @@ class Parser(multiprocessing.Process): | |||
2005 | self.queue_signals = False | 2008 | self.queue_signals = False |
2006 | self.signal_received = [] | 2009 | self.signal_received = [] |
2007 | self.signal_threadlock = threading.Lock() | 2010 | self.signal_threadlock = threading.Lock() |
2011 | self.exit = False | ||
2008 | 2012 | ||
2009 | def catch_sig(self, signum, frame): | 2013 | def catch_sig(self, signum, frame): |
2010 | if self.queue_signals: | 2014 | if self.queue_signals: |
@@ -2017,24 +2021,10 @@ class Parser(multiprocessing.Process): | |||
2017 | signal.signal(signal.SIGTERM, signal.SIG_DFL) | 2021 | signal.signal(signal.SIGTERM, signal.SIG_DFL) |
2018 | os.kill(os.getpid(), signal.SIGTERM) | 2022 | os.kill(os.getpid(), signal.SIGTERM) |
2019 | elif signum == signal.SIGINT: | 2023 | elif signum == signal.SIGINT: |
2020 | signal.default_int_handler(signum, frame) | 2024 | self.exit = True |
2021 | 2025 | ||
2022 | def run(self): | 2026 | def run(self): |
2023 | 2027 | bb.utils.profile_function("parsing" in self.profile, self.realrun, "profile-parse-%s.log" % multiprocessing.current_process().name, process=False) | |
2024 | if not self.profile: | ||
2025 | self.realrun() | ||
2026 | return | ||
2027 | |||
2028 | try: | ||
2029 | import cProfile as profile | ||
2030 | except: | ||
2031 | import profile | ||
2032 | prof = profile.Profile() | ||
2033 | try: | ||
2034 | profile.Profile.runcall(prof, self.realrun) | ||
2035 | finally: | ||
2036 | logfile = "profile-parse-%s.log" % multiprocessing.current_process().name | ||
2037 | prof.dump_stats(logfile) | ||
2038 | 2028 | ||
2039 | def realrun(self): | 2029 | def realrun(self): |
2040 | # Signal handling here is hard. We must not terminate any process or thread holding the write | 2030 | # Signal handling here is hard. We must not terminate any process or thread holding the write |
@@ -2055,15 +2045,19 @@ class Parser(multiprocessing.Process): | |||
2055 | pending = [] | 2045 | pending = [] |
2056 | havejobs = True | 2046 | havejobs = True |
2057 | try: | 2047 | try: |
2058 | while havejobs or pending: | 2048 | while (havejobs or pending) and not self.exit: |
2059 | if self.quit.is_set(): | 2049 | if self.quit.is_set(): |
2060 | break | 2050 | break |
2061 | 2051 | ||
2062 | job = None | 2052 | job = None |
2063 | try: | 2053 | if havejobs: |
2064 | job = self.jobs.pop() | 2054 | with self.next_job_id.get_lock(): |
2065 | except IndexError: | 2055 | if self.next_job_id.value < len(self.jobs): |
2066 | havejobs = False | 2056 | job = self.jobs[self.next_job_id.value] |
2057 | self.next_job_id.value += 1 | ||
2058 | else: | ||
2059 | havejobs = False | ||
2060 | |||
2067 | if job: | 2061 | if job: |
2068 | result = self.parse(*job) | 2062 | result = self.parse(*job) |
2069 | # Clear the siggen cache after parsing to control memory usage, its huge | 2063 | # Clear the siggen cache after parsing to control memory usage, its huge |
@@ -2098,7 +2092,6 @@ class Parser(multiprocessing.Process): | |||
2098 | except Exception as exc: | 2092 | except Exception as exc: |
2099 | tb = sys.exc_info()[2] | 2093 | tb = sys.exc_info()[2] |
2100 | exc.recipe = filename | 2094 | exc.recipe = filename |
2101 | exc.traceback = list(bb.exceptions.extract_traceback(tb, context=3)) | ||
2102 | return True, None, exc | 2095 | return True, None, exc |
2103 | # Need to turn BaseExceptions into Exceptions here so we gracefully shutdown | 2096 | # Need to turn BaseExceptions into Exceptions here so we gracefully shutdown |
2104 | # and for example a worker thread doesn't just exit on its own in response to | 2097 | # and for example a worker thread doesn't just exit on its own in response to |
@@ -2113,7 +2106,7 @@ class CookerParser(object): | |||
2113 | self.mcfilelist = mcfilelist | 2106 | self.mcfilelist = mcfilelist |
2114 | self.cooker = cooker | 2107 | self.cooker = cooker |
2115 | self.cfgdata = cooker.data | 2108 | self.cfgdata = cooker.data |
2116 | self.cfghash = cooker.data_hash | 2109 | self.cfghash = cooker.databuilder.data_hash |
2117 | self.cfgbuilder = cooker.databuilder | 2110 | self.cfgbuilder = cooker.databuilder |
2118 | 2111 | ||
2119 | # Accounting statistics | 2112 | # Accounting statistics |
@@ -2130,13 +2123,13 @@ class CookerParser(object): | |||
2130 | 2123 | ||
2131 | self.bb_caches = bb.cache.MulticonfigCache(self.cfgbuilder, self.cfghash, cooker.caches_array) | 2124 | self.bb_caches = bb.cache.MulticonfigCache(self.cfgbuilder, self.cfghash, cooker.caches_array) |
2132 | self.fromcache = set() | 2125 | self.fromcache = set() |
2133 | self.willparse = set() | 2126 | self.willparse = [] |
2134 | for mc in self.cooker.multiconfigs: | 2127 | for mc in self.cooker.multiconfigs: |
2135 | for filename in self.mcfilelist[mc]: | 2128 | for filename in self.mcfilelist[mc]: |
2136 | appends = self.cooker.collections[mc].get_file_appends(filename) | 2129 | appends = self.cooker.collections[mc].get_file_appends(filename) |
2137 | layername = self.cooker.collections[mc].calc_bbfile_priority(filename)[2] | 2130 | layername = self.cooker.collections[mc].calc_bbfile_priority(filename)[2] |
2138 | if not self.bb_caches[mc].cacheValid(filename, appends): | 2131 | if not self.bb_caches[mc].cacheValid(filename, appends): |
2139 | self.willparse.add((mc, self.bb_caches[mc], filename, appends, layername)) | 2132 | self.willparse.append((mc, self.bb_caches[mc], filename, appends, layername)) |
2140 | else: | 2133 | else: |
2141 | self.fromcache.add((mc, self.bb_caches[mc], filename, appends, layername)) | 2134 | self.fromcache.add((mc, self.bb_caches[mc], filename, appends, layername)) |
2142 | 2135 | ||
@@ -2155,18 +2148,18 @@ class CookerParser(object): | |||
2155 | def start(self): | 2148 | def start(self): |
2156 | self.results = self.load_cached() | 2149 | self.results = self.load_cached() |
2157 | self.processes = [] | 2150 | self.processes = [] |
2151 | |||
2158 | if self.toparse: | 2152 | if self.toparse: |
2159 | bb.event.fire(bb.event.ParseStarted(self.toparse), self.cfgdata) | 2153 | bb.event.fire(bb.event.ParseStarted(self.toparse), self.cfgdata) |
2160 | 2154 | ||
2155 | next_job_id = multiprocessing.Value(ctypes.c_int, 0) | ||
2161 | self.parser_quit = multiprocessing.Event() | 2156 | self.parser_quit = multiprocessing.Event() |
2162 | self.result_queue = multiprocessing.Queue() | 2157 | self.result_queue = multiprocessing.Queue() |
2163 | 2158 | ||
2164 | def chunkify(lst,n): | 2159 | # Have to pass in willparse at fork time so all parsing processes have the unpickleable data |
2165 | return [lst[i::n] for i in range(n)] | 2160 | # then access it by index from the parse queue. |
2166 | self.jobs = chunkify(list(self.willparse), self.num_processes) | ||
2167 | |||
2168 | for i in range(0, self.num_processes): | 2161 | for i in range(0, self.num_processes): |
2169 | parser = Parser(self.jobs[i], self.result_queue, self.parser_quit, self.cooker.configuration.profile) | 2162 | parser = Parser(self.willparse, next_job_id, self.result_queue, self.parser_quit, self.cooker.configuration.profile) |
2170 | parser.start() | 2163 | parser.start() |
2171 | self.process_names.append(parser.name) | 2164 | self.process_names.append(parser.name) |
2172 | self.processes.append(parser) | 2165 | self.processes.append(parser) |
@@ -2193,11 +2186,17 @@ class CookerParser(object): | |||
2193 | 2186 | ||
2194 | # Cleanup the queue before call process.join(), otherwise there might be | 2187 | # Cleanup the queue before call process.join(), otherwise there might be |
2195 | # deadlocks. | 2188 | # deadlocks. |
2196 | while True: | 2189 | def read_results(): |
2197 | try: | 2190 | while True: |
2198 | self.result_queue.get(timeout=0.25) | 2191 | try: |
2199 | except queue.Empty: | 2192 | self.result_queue.get(timeout=0.25) |
2200 | break | 2193 | except queue.Empty: |
2194 | break | ||
2195 | except KeyError: | ||
2196 | # The restore state from SiggenRecipeInfo in cache.py can | ||
2197 | # fail here if this is an unclean shutdown since the state may have been | ||
2198 | # reset. Ignore key errors for that reason, we don't care. | ||
2199 | pass | ||
2201 | 2200 | ||
2202 | def sync_caches(): | 2201 | def sync_caches(): |
2203 | for c in self.bb_caches.values(): | 2202 | for c in self.bb_caches.values(): |
@@ -2209,15 +2208,19 @@ class CookerParser(object): | |||
2209 | 2208 | ||
2210 | self.parser_quit.set() | 2209 | self.parser_quit.set() |
2211 | 2210 | ||
2211 | read_results() | ||
2212 | |||
2212 | for process in self.processes: | 2213 | for process in self.processes: |
2213 | process.join(0.5) | 2214 | process.join(2) |
2214 | 2215 | ||
2215 | for process in self.processes: | 2216 | for process in self.processes: |
2216 | if process.exitcode is None: | 2217 | if process.exitcode is None: |
2217 | os.kill(process.pid, signal.SIGINT) | 2218 | os.kill(process.pid, signal.SIGINT) |
2218 | 2219 | ||
2220 | read_results() | ||
2221 | |||
2219 | for process in self.processes: | 2222 | for process in self.processes: |
2220 | process.join(0.5) | 2223 | process.join(2) |
2221 | 2224 | ||
2222 | for process in self.processes: | 2225 | for process in self.processes: |
2223 | if process.exitcode is None: | 2226 | if process.exitcode is None: |
@@ -2225,9 +2228,8 @@ class CookerParser(object): | |||
2225 | 2228 | ||
2226 | for process in self.processes: | 2229 | for process in self.processes: |
2227 | process.join() | 2230 | process.join() |
2228 | # Added in 3.7, cleans up zombies | 2231 | # clean up zombies |
2229 | if hasattr(process, "close"): | 2232 | process.close() |
2230 | process.close() | ||
2231 | 2233 | ||
2232 | bb.codeparser.parser_cache_save() | 2234 | bb.codeparser.parser_cache_save() |
2233 | bb.codeparser.parser_cache_savemerge() | 2235 | bb.codeparser.parser_cache_savemerge() |
@@ -2237,12 +2239,13 @@ class CookerParser(object): | |||
2237 | profiles = [] | 2239 | profiles = [] |
2238 | for i in self.process_names: | 2240 | for i in self.process_names: |
2239 | logfile = "profile-parse-%s.log" % i | 2241 | logfile = "profile-parse-%s.log" % i |
2240 | if os.path.exists(logfile): | 2242 | if os.path.exists(logfile) and os.path.getsize(logfile): |
2241 | profiles.append(logfile) | 2243 | profiles.append(logfile) |
2242 | 2244 | ||
2243 | pout = "profile-parse.log.processed" | 2245 | if profiles: |
2244 | bb.utils.process_profilelog(profiles, pout = pout) | 2246 | fn_out = "profile-parse.log.report" |
2245 | print("Processed parsing statistics saved to %s" % (pout)) | 2247 | bb.utils.process_profilelog(profiles, fn_out=fn_out) |
2248 | print("Processed parsing statistics saved to %s" % (fn_out)) | ||
2246 | 2249 | ||
2247 | def final_cleanup(self): | 2250 | def final_cleanup(self): |
2248 | if self.syncthread: | 2251 | if self.syncthread: |
@@ -2274,7 +2277,7 @@ class CookerParser(object): | |||
2274 | yield result | 2277 | yield result |
2275 | 2278 | ||
2276 | if not (self.parsed >= self.toparse): | 2279 | if not (self.parsed >= self.toparse): |
2277 | raise bb.parse.ParseError("Not all recipes parsed, parser thread killed/died? Exiting.", None) | 2280 | raise bb.parse.ParseError("Not all recipes parsed, parser thread killed/died? (%s %s of %s) Exiting." % (len(self.processes), self.parsed, self.toparse), None) |
2278 | 2281 | ||
2279 | 2282 | ||
2280 | def parse_next(self): | 2283 | def parse_next(self): |
@@ -2299,8 +2302,12 @@ class CookerParser(object): | |||
2299 | return False | 2302 | return False |
2300 | except ParsingFailure as exc: | 2303 | except ParsingFailure as exc: |
2301 | self.error += 1 | 2304 | self.error += 1 |
2302 | logger.error('Unable to parse %s: %s' % | 2305 | |
2303 | (exc.recipe, bb.exceptions.to_string(exc.realexception))) | 2306 | exc_desc = str(exc) |
2307 | if isinstance(exc, SystemExit) and not isinstance(exc.code, str): | ||
2308 | exc_desc = 'Exited with "%d"' % exc.code | ||
2309 | |||
2310 | logger.error('Unable to parse %s: %s' % (exc.recipe, exc_desc)) | ||
2304 | self.shutdown(clean=False) | 2311 | self.shutdown(clean=False) |
2305 | return False | 2312 | return False |
2306 | except bb.parse.ParseError as exc: | 2313 | except bb.parse.ParseError as exc: |
@@ -2309,20 +2316,33 @@ class CookerParser(object): | |||
2309 | self.shutdown(clean=False, eventmsg=str(exc)) | 2316 | self.shutdown(clean=False, eventmsg=str(exc)) |
2310 | return False | 2317 | return False |
2311 | except bb.data_smart.ExpansionError as exc: | 2318 | except bb.data_smart.ExpansionError as exc: |
2319 | def skip_frames(f, fn_prefix): | ||
2320 | while f and f.tb_frame.f_code.co_filename.startswith(fn_prefix): | ||
2321 | f = f.tb_next | ||
2322 | return f | ||
2323 | |||
2312 | self.error += 1 | 2324 | self.error += 1 |
2313 | bbdir = os.path.dirname(__file__) + os.sep | 2325 | bbdir = os.path.dirname(__file__) + os.sep |
2314 | etype, value, _ = sys.exc_info() | 2326 | etype, value, tb = sys.exc_info() |
2315 | tb = list(itertools.dropwhile(lambda e: e.filename.startswith(bbdir), exc.traceback)) | 2327 | |
2328 | # Remove any frames where the code comes from bitbake. This | ||
2329 | # prevents deep (and pretty useless) backtraces for expansion error | ||
2330 | tb = skip_frames(tb, bbdir) | ||
2331 | cur = tb | ||
2332 | while cur: | ||
2333 | cur.tb_next = skip_frames(cur.tb_next, bbdir) | ||
2334 | cur = cur.tb_next | ||
2335 | |||
2316 | logger.error('ExpansionError during parsing %s', value.recipe, | 2336 | logger.error('ExpansionError during parsing %s', value.recipe, |
2317 | exc_info=(etype, value, tb)) | 2337 | exc_info=(etype, value, tb)) |
2318 | self.shutdown(clean=False) | 2338 | self.shutdown(clean=False) |
2319 | return False | 2339 | return False |
2320 | except Exception as exc: | 2340 | except Exception as exc: |
2321 | self.error += 1 | 2341 | self.error += 1 |
2322 | etype, value, tb = sys.exc_info() | 2342 | _, value, _ = sys.exc_info() |
2323 | if hasattr(value, "recipe"): | 2343 | if hasattr(value, "recipe"): |
2324 | logger.error('Unable to parse %s' % value.recipe, | 2344 | logger.error('Unable to parse %s' % value.recipe, |
2325 | exc_info=(etype, value, exc.traceback)) | 2345 | exc_info=sys.exc_info()) |
2326 | else: | 2346 | else: |
2327 | # Most likely, an exception occurred during raising an exception | 2347 | # Most likely, an exception occurred during raising an exception |
2328 | import traceback | 2348 | import traceback |
@@ -2343,7 +2363,7 @@ class CookerParser(object): | |||
2343 | for virtualfn, info_array in result: | 2363 | for virtualfn, info_array in result: |
2344 | if info_array[0].skipped: | 2364 | if info_array[0].skipped: |
2345 | self.skipped += 1 | 2365 | self.skipped += 1 |
2346 | self.cooker.skiplist[virtualfn] = SkippedPackage(info_array[0]) | 2366 | self.cooker.skiplist_by_mc[mc][virtualfn] = SkippedPackage(info_array[0]) |
2347 | self.bb_caches[mc].add_info(virtualfn, info_array, self.cooker.recipecaches[mc], | 2367 | self.bb_caches[mc].add_info(virtualfn, info_array, self.cooker.recipecaches[mc], |
2348 | parsed=parsed, watcher = self.cooker.add_filewatch) | 2368 | parsed=parsed, watcher = self.cooker.add_filewatch) |
2349 | return True | 2369 | return True |