diff options
Diffstat (limited to 'bitbake/lib/bb/cooker.py')
| -rw-r--r-- | bitbake/lib/bb/cooker.py | 257 |
1 files changed, 140 insertions, 117 deletions
diff --git a/bitbake/lib/bb/cooker.py b/bitbake/lib/bb/cooker.py index c5bfef55d6..03f262ac16 100644 --- a/bitbake/lib/bb/cooker.py +++ b/bitbake/lib/bb/cooker.py | |||
| @@ -8,16 +8,16 @@ | |||
| 8 | # | 8 | # |
| 9 | # SPDX-License-Identifier: GPL-2.0-only | 9 | # SPDX-License-Identifier: GPL-2.0-only |
| 10 | # | 10 | # |
| 11 | 11 | import enum | |
| 12 | import sys, os, glob, os.path, re, time | 12 | import sys, os, glob, os.path, re, time |
| 13 | import itertools | 13 | import itertools |
| 14 | import logging | 14 | import logging |
| 15 | import multiprocessing | 15 | from bb import multiprocessing |
| 16 | import threading | 16 | import threading |
| 17 | from io import StringIO, UnsupportedOperation | 17 | from io import StringIO, UnsupportedOperation |
| 18 | from contextlib import closing | 18 | from contextlib import closing |
| 19 | from collections import defaultdict, namedtuple | 19 | from collections import defaultdict, namedtuple |
| 20 | import bb, bb.exceptions, bb.command | 20 | import bb, bb.command |
| 21 | from bb import utils, data, parse, event, cache, providers, taskdata, runqueue, build | 21 | from bb import utils, data, parse, event, cache, providers, taskdata, runqueue, build |
| 22 | import queue | 22 | import queue |
| 23 | import signal | 23 | import signal |
| @@ -26,6 +26,7 @@ import json | |||
| 26 | import pickle | 26 | import pickle |
| 27 | import codecs | 27 | import codecs |
| 28 | import hashserv | 28 | import hashserv |
| 29 | import ctypes | ||
| 29 | 30 | ||
| 30 | logger = logging.getLogger("BitBake") | 31 | logger = logging.getLogger("BitBake") |
| 31 | collectlog = logging.getLogger("BitBake.Collection") | 32 | collectlog = logging.getLogger("BitBake.Collection") |
| @@ -48,16 +49,15 @@ class CollectionError(bb.BBHandledException): | |||
| 48 | Exception raised when layer configuration is incorrect | 49 | Exception raised when layer configuration is incorrect |
| 49 | """ | 50 | """ |
| 50 | 51 | ||
| 51 | class state: | ||
| 52 | initial, parsing, running, shutdown, forceshutdown, stopped, error = list(range(7)) | ||
| 53 | 52 | ||
| 54 | @classmethod | 53 | class State(enum.Enum): |
| 55 | def get_name(cls, code): | 54 | INITIAL = 0, |
| 56 | for name in dir(cls): | 55 | PARSING = 1, |
| 57 | value = getattr(cls, name) | 56 | RUNNING = 2, |
| 58 | if type(value) == type(cls.initial) and value == code: | 57 | SHUTDOWN = 3, |
| 59 | return name | 58 | FORCE_SHUTDOWN = 4, |
| 60 | raise ValueError("Invalid status code: %s" % code) | 59 | STOPPED = 5, |
| 60 | ERROR = 6 | ||
| 61 | 61 | ||
| 62 | 62 | ||
| 63 | class SkippedPackage: | 63 | class SkippedPackage: |
| @@ -134,7 +134,8 @@ class BBCooker: | |||
| 134 | self.baseconfig_valid = False | 134 | self.baseconfig_valid = False |
| 135 | self.parsecache_valid = False | 135 | self.parsecache_valid = False |
| 136 | self.eventlog = None | 136 | self.eventlog = None |
| 137 | self.skiplist = {} | 137 | # The skiplists, one per multiconfig |
| 138 | self.skiplist_by_mc = defaultdict(dict) | ||
| 138 | self.featureset = CookerFeatures() | 139 | self.featureset = CookerFeatures() |
| 139 | if featureSet: | 140 | if featureSet: |
| 140 | for f in featureSet: | 141 | for f in featureSet: |
| @@ -180,7 +181,7 @@ class BBCooker: | |||
| 180 | pass | 181 | pass |
| 181 | 182 | ||
| 182 | self.command = bb.command.Command(self, self.process_server) | 183 | self.command = bb.command.Command(self, self.process_server) |
| 183 | self.state = state.initial | 184 | self.state = State.INITIAL |
| 184 | 185 | ||
| 185 | self.parser = None | 186 | self.parser = None |
| 186 | 187 | ||
| @@ -226,23 +227,22 @@ class BBCooker: | |||
| 226 | bb.warn("Cooker received SIGTERM, shutting down...") | 227 | bb.warn("Cooker received SIGTERM, shutting down...") |
| 227 | elif signum == signal.SIGHUP: | 228 | elif signum == signal.SIGHUP: |
| 228 | bb.warn("Cooker received SIGHUP, shutting down...") | 229 | bb.warn("Cooker received SIGHUP, shutting down...") |
| 229 | self.state = state.forceshutdown | 230 | self.state = State.FORCE_SHUTDOWN |
| 230 | bb.event._should_exit.set() | 231 | bb.event._should_exit.set() |
| 231 | 232 | ||
| 232 | def setFeatures(self, features): | 233 | def setFeatures(self, features): |
| 233 | # we only accept a new feature set if we're in state initial, so we can reset without problems | 234 | # we only accept a new feature set if we're in state initial, so we can reset without problems |
| 234 | if not self.state in [state.initial, state.shutdown, state.forceshutdown, state.stopped, state.error]: | 235 | if not self.state in [State.INITIAL, State.SHUTDOWN, State.FORCE_SHUTDOWN, State.STOPPED, State.ERROR]: |
| 235 | raise Exception("Illegal state for feature set change") | 236 | raise Exception("Illegal state for feature set change") |
| 236 | original_featureset = list(self.featureset) | 237 | original_featureset = list(self.featureset) |
| 237 | for feature in features: | 238 | for feature in features: |
| 238 | self.featureset.setFeature(feature) | 239 | self.featureset.setFeature(feature) |
| 239 | bb.debug(1, "Features set %s (was %s)" % (original_featureset, list(self.featureset))) | 240 | bb.debug(1, "Features set %s (was %s)" % (original_featureset, list(self.featureset))) |
| 240 | if (original_featureset != list(self.featureset)) and self.state != state.error and hasattr(self, "data"): | 241 | if (original_featureset != list(self.featureset)) and self.state != State.ERROR and hasattr(self, "data"): |
| 241 | self.reset() | 242 | self.reset() |
| 242 | 243 | ||
| 243 | def initConfigurationData(self): | 244 | def initConfigurationData(self): |
| 244 | 245 | self.state = State.INITIAL | |
| 245 | self.state = state.initial | ||
| 246 | self.caches_array = [] | 246 | self.caches_array = [] |
| 247 | 247 | ||
| 248 | sys.path = self.orig_syspath.copy() | 248 | sys.path = self.orig_syspath.copy() |
| @@ -281,7 +281,6 @@ class BBCooker: | |||
| 281 | self.databuilder = bb.cookerdata.CookerDataBuilder(self.configuration, False) | 281 | self.databuilder = bb.cookerdata.CookerDataBuilder(self.configuration, False) |
| 282 | self.databuilder.parseBaseConfiguration() | 282 | self.databuilder.parseBaseConfiguration() |
| 283 | self.data = self.databuilder.data | 283 | self.data = self.databuilder.data |
| 284 | self.data_hash = self.databuilder.data_hash | ||
| 285 | self.extraconfigdata = {} | 284 | self.extraconfigdata = {} |
| 286 | 285 | ||
| 287 | eventlog = self.data.getVar("BB_DEFAULT_EVENTLOG") | 286 | eventlog = self.data.getVar("BB_DEFAULT_EVENTLOG") |
| @@ -315,13 +314,22 @@ class BBCooker: | |||
| 315 | dbfile = (self.data.getVar("PERSISTENT_DIR") or self.data.getVar("CACHE")) + "/hashserv.db" | 314 | dbfile = (self.data.getVar("PERSISTENT_DIR") or self.data.getVar("CACHE")) + "/hashserv.db" |
| 316 | upstream = self.data.getVar("BB_HASHSERVE_UPSTREAM") or None | 315 | upstream = self.data.getVar("BB_HASHSERVE_UPSTREAM") or None |
| 317 | if upstream: | 316 | if upstream: |
| 318 | import socket | ||
| 319 | try: | 317 | try: |
| 320 | sock = socket.create_connection(upstream.split(":"), 5) | 318 | with hashserv.create_client(upstream) as client: |
| 321 | sock.close() | 319 | client.ping() |
| 322 | except socket.error as e: | 320 | except ImportError as e: |
| 323 | bb.warn("BB_HASHSERVE_UPSTREAM is not valid, unable to connect hash equivalence server at '%s': %s" | 321 | bb.fatal(""""Unable to use hash equivalence server at '%s' due to missing or incorrect python module: |
| 322 | %s | ||
| 323 | Please install the needed module on the build host, or use an environment containing it: | ||
| 324 | - if you are using bitbake-setup, run 'bitbake-setup install-buildtools' | ||
| 325 | - openembedded-core layer contains 'scripts/install-buildtools' that can also be used | ||
| 326 | - or set up pip venv | ||
| 327 | You can also remove the BB_HASHSERVE_UPSTREAM setting, but this may result in significantly longer build times as bitbake will be unable to reuse prebuilt sstate artefacts.""" | ||
| 328 | % (upstream, repr(e))) | ||
| 329 | except ConnectionError as e: | ||
| 330 | bb.warn("Unable to connect to hash equivalence server at '%s', please correct or remove BB_HASHSERVE_UPSTREAM:\n%s" | ||
| 324 | % (upstream, repr(e))) | 331 | % (upstream, repr(e))) |
| 332 | upstream = None | ||
| 325 | 333 | ||
| 326 | self.hashservaddr = "unix://%s/hashserve.sock" % self.data.getVar("TOPDIR") | 334 | self.hashservaddr = "unix://%s/hashserve.sock" % self.data.getVar("TOPDIR") |
| 327 | self.hashserv = hashserv.create_server( | 335 | self.hashserv = hashserv.create_server( |
| @@ -370,6 +378,11 @@ class BBCooker: | |||
| 370 | if not clean: | 378 | if not clean: |
| 371 | bb.parse.BBHandler.cached_statements = {} | 379 | bb.parse.BBHandler.cached_statements = {} |
| 372 | 380 | ||
| 381 | # If writes were made to any of the data stores, we need to recalculate the data | ||
| 382 | # store cache | ||
| 383 | if hasattr(self, "databuilder"): | ||
| 384 | self.databuilder.calc_datastore_hashes() | ||
| 385 | |||
| 373 | def parseConfiguration(self): | 386 | def parseConfiguration(self): |
| 374 | self.updateCacheSync() | 387 | self.updateCacheSync() |
| 375 | 388 | ||
| @@ -612,8 +625,8 @@ class BBCooker: | |||
| 612 | localdata = {} | 625 | localdata = {} |
| 613 | 626 | ||
| 614 | for mc in self.multiconfigs: | 627 | for mc in self.multiconfigs: |
| 615 | taskdata[mc] = bb.taskdata.TaskData(halt, skiplist=self.skiplist, allowincomplete=allowincomplete) | 628 | taskdata[mc] = bb.taskdata.TaskData(halt, skiplist=self.skiplist_by_mc[mc], allowincomplete=allowincomplete) |
| 616 | localdata[mc] = data.createCopy(self.databuilder.mcdata[mc]) | 629 | localdata[mc] = bb.data.createCopy(self.databuilder.mcdata[mc]) |
| 617 | bb.data.expandKeys(localdata[mc]) | 630 | bb.data.expandKeys(localdata[mc]) |
| 618 | 631 | ||
| 619 | current = 0 | 632 | current = 0 |
| @@ -680,14 +693,14 @@ class BBCooker: | |||
| 680 | bb.event.fire(bb.event.TreeDataPreparationCompleted(len(fulltargetlist)), self.data) | 693 | bb.event.fire(bb.event.TreeDataPreparationCompleted(len(fulltargetlist)), self.data) |
| 681 | return taskdata, runlist | 694 | return taskdata, runlist |
| 682 | 695 | ||
| 683 | def prepareTreeData(self, pkgs_to_build, task): | 696 | def prepareTreeData(self, pkgs_to_build, task, halt=False): |
| 684 | """ | 697 | """ |
| 685 | Prepare a runqueue and taskdata object for iteration over pkgs_to_build | 698 | Prepare a runqueue and taskdata object for iteration over pkgs_to_build |
| 686 | """ | 699 | """ |
| 687 | 700 | ||
| 688 | # We set halt to False here to prevent unbuildable targets raising | 701 | # We set halt to False here to prevent unbuildable targets raising |
| 689 | # an exception when we're just generating data | 702 | # an exception when we're just generating data |
| 690 | taskdata, runlist = self.buildTaskData(pkgs_to_build, task, False, allowincomplete=True) | 703 | taskdata, runlist = self.buildTaskData(pkgs_to_build, task, halt, allowincomplete=True) |
| 691 | 704 | ||
| 692 | return runlist, taskdata | 705 | return runlist, taskdata |
| 693 | 706 | ||
| @@ -701,7 +714,7 @@ class BBCooker: | |||
| 701 | if not task.startswith("do_"): | 714 | if not task.startswith("do_"): |
| 702 | task = "do_%s" % task | 715 | task = "do_%s" % task |
| 703 | 716 | ||
| 704 | runlist, taskdata = self.prepareTreeData(pkgs_to_build, task) | 717 | runlist, taskdata = self.prepareTreeData(pkgs_to_build, task, halt=True) |
| 705 | rq = bb.runqueue.RunQueue(self, self.data, self.recipecaches, taskdata, runlist) | 718 | rq = bb.runqueue.RunQueue(self, self.data, self.recipecaches, taskdata, runlist) |
| 706 | rq.rqdata.prepare() | 719 | rq.rqdata.prepare() |
| 707 | return self.buildDependTree(rq, taskdata) | 720 | return self.buildDependTree(rq, taskdata) |
| @@ -896,10 +909,11 @@ class BBCooker: | |||
| 896 | 909 | ||
| 897 | depgraph = self.generateTaskDepTreeData(pkgs_to_build, task) | 910 | depgraph = self.generateTaskDepTreeData(pkgs_to_build, task) |
| 898 | 911 | ||
| 899 | with open('pn-buildlist', 'w') as f: | 912 | pns = depgraph["pn"].keys() |
| 900 | for pn in depgraph["pn"]: | 913 | if pns: |
| 901 | f.write(pn + "\n") | 914 | with open('pn-buildlist', 'w') as f: |
| 902 | logger.info("PN build list saved to 'pn-buildlist'") | 915 | f.write("%s\n" % "\n".join(sorted(pns))) |
| 916 | logger.info("PN build list saved to 'pn-buildlist'") | ||
| 903 | 917 | ||
| 904 | # Remove old format output files to ensure no confusion with stale data | 918 | # Remove old format output files to ensure no confusion with stale data |
| 905 | try: | 919 | try: |
| @@ -933,7 +947,7 @@ class BBCooker: | |||
| 933 | for mc in self.multiconfigs: | 947 | for mc in self.multiconfigs: |
| 934 | # First get list of recipes, including skipped | 948 | # First get list of recipes, including skipped |
| 935 | recipefns = list(self.recipecaches[mc].pkg_fn.keys()) | 949 | recipefns = list(self.recipecaches[mc].pkg_fn.keys()) |
| 936 | recipefns.extend(self.skiplist.keys()) | 950 | recipefns.extend(self.skiplist_by_mc[mc].keys()) |
| 937 | 951 | ||
| 938 | # Work out list of bbappends that have been applied | 952 | # Work out list of bbappends that have been applied |
| 939 | applied_appends = [] | 953 | applied_appends = [] |
| @@ -952,13 +966,7 @@ class BBCooker: | |||
| 952 | '\n '.join(appends_without_recipes[mc]))) | 966 | '\n '.join(appends_without_recipes[mc]))) |
| 953 | 967 | ||
| 954 | if msgs: | 968 | if msgs: |
| 955 | msg = "\n".join(msgs) | 969 | bb.fatal("\n".join(msgs)) |
| 956 | warn_only = self.databuilder.mcdata[mc].getVar("BB_DANGLINGAPPENDS_WARNONLY", \ | ||
| 957 | False) or "no" | ||
| 958 | if warn_only.lower() in ("1", "yes", "true"): | ||
| 959 | bb.warn(msg) | ||
| 960 | else: | ||
| 961 | bb.fatal(msg) | ||
| 962 | 970 | ||
| 963 | def handlePrefProviders(self): | 971 | def handlePrefProviders(self): |
| 964 | 972 | ||
| @@ -1338,7 +1346,7 @@ class BBCooker: | |||
| 1338 | self.buildSetVars() | 1346 | self.buildSetVars() |
| 1339 | self.reset_mtime_caches() | 1347 | self.reset_mtime_caches() |
| 1340 | 1348 | ||
| 1341 | bb_caches = bb.cache.MulticonfigCache(self.databuilder, self.data_hash, self.caches_array) | 1349 | bb_caches = bb.cache.MulticonfigCache(self.databuilder, self.databuilder.data_hash, self.caches_array) |
| 1342 | 1350 | ||
| 1343 | layername = self.collections[mc].calc_bbfile_priority(fn)[2] | 1351 | layername = self.collections[mc].calc_bbfile_priority(fn)[2] |
| 1344 | infos = bb_caches[mc].parse(fn, self.collections[mc].get_file_appends(fn), layername) | 1352 | infos = bb_caches[mc].parse(fn, self.collections[mc].get_file_appends(fn), layername) |
| @@ -1399,11 +1407,11 @@ class BBCooker: | |||
| 1399 | 1407 | ||
| 1400 | msg = None | 1408 | msg = None |
| 1401 | interrupted = 0 | 1409 | interrupted = 0 |
| 1402 | if halt or self.state == state.forceshutdown: | 1410 | if halt or self.state == State.FORCE_SHUTDOWN: |
| 1403 | rq.finish_runqueue(True) | 1411 | rq.finish_runqueue(True) |
| 1404 | msg = "Forced shutdown" | 1412 | msg = "Forced shutdown" |
| 1405 | interrupted = 2 | 1413 | interrupted = 2 |
| 1406 | elif self.state == state.shutdown: | 1414 | elif self.state == State.SHUTDOWN: |
| 1407 | rq.finish_runqueue(False) | 1415 | rq.finish_runqueue(False) |
| 1408 | msg = "Stopped build" | 1416 | msg = "Stopped build" |
| 1409 | interrupted = 1 | 1417 | interrupted = 1 |
| @@ -1429,8 +1437,7 @@ class BBCooker: | |||
| 1429 | if quietlog: | 1437 | if quietlog: |
| 1430 | bb.runqueue.logger.setLevel(rqloglevel) | 1438 | bb.runqueue.logger.setLevel(rqloglevel) |
| 1431 | return bb.server.process.idleFinish(msg) | 1439 | return bb.server.process.idleFinish(msg) |
| 1432 | if retval is True: | 1440 | |
| 1433 | return True | ||
| 1434 | return retval | 1441 | return retval |
| 1435 | 1442 | ||
| 1436 | self.idleCallBackRegister(buildFileIdle, rq) | 1443 | self.idleCallBackRegister(buildFileIdle, rq) |
| @@ -1459,7 +1466,6 @@ class BBCooker: | |||
| 1459 | 1466 | ||
| 1460 | if t in task or getAllTaskSignatures: | 1467 | if t in task or getAllTaskSignatures: |
| 1461 | try: | 1468 | try: |
| 1462 | rq.rqdata.prepare_task_hash(tid) | ||
| 1463 | sig.append([pn, t, rq.rqdata.get_task_unihash(tid)]) | 1469 | sig.append([pn, t, rq.rqdata.get_task_unihash(tid)]) |
| 1464 | except KeyError: | 1470 | except KeyError: |
| 1465 | sig.append(self.getTaskSignatures(target, [t])[0]) | 1471 | sig.append(self.getTaskSignatures(target, [t])[0]) |
| @@ -1474,12 +1480,12 @@ class BBCooker: | |||
| 1474 | def buildTargetsIdle(server, rq, halt): | 1480 | def buildTargetsIdle(server, rq, halt): |
| 1475 | msg = None | 1481 | msg = None |
| 1476 | interrupted = 0 | 1482 | interrupted = 0 |
| 1477 | if halt or self.state == state.forceshutdown: | 1483 | if halt or self.state == State.FORCE_SHUTDOWN: |
| 1478 | bb.event._should_exit.set() | 1484 | bb.event._should_exit.set() |
| 1479 | rq.finish_runqueue(True) | 1485 | rq.finish_runqueue(True) |
| 1480 | msg = "Forced shutdown" | 1486 | msg = "Forced shutdown" |
| 1481 | interrupted = 2 | 1487 | interrupted = 2 |
| 1482 | elif self.state == state.shutdown: | 1488 | elif self.state == State.SHUTDOWN: |
| 1483 | rq.finish_runqueue(False) | 1489 | rq.finish_runqueue(False) |
| 1484 | msg = "Stopped build" | 1490 | msg = "Stopped build" |
| 1485 | interrupted = 1 | 1491 | interrupted = 1 |
| @@ -1500,8 +1506,6 @@ class BBCooker: | |||
| 1500 | bb.event.disable_heartbeat() | 1506 | bb.event.disable_heartbeat() |
| 1501 | return bb.server.process.idleFinish(msg) | 1507 | return bb.server.process.idleFinish(msg) |
| 1502 | 1508 | ||
| 1503 | if retval is True: | ||
| 1504 | return True | ||
| 1505 | return retval | 1509 | return retval |
| 1506 | 1510 | ||
| 1507 | self.reset_mtime_caches() | 1511 | self.reset_mtime_caches() |
| @@ -1574,7 +1578,7 @@ class BBCooker: | |||
| 1574 | 1578 | ||
| 1575 | 1579 | ||
| 1576 | def updateCacheSync(self): | 1580 | def updateCacheSync(self): |
| 1577 | if self.state == state.running: | 1581 | if self.state == State.RUNNING: |
| 1578 | return | 1582 | return |
| 1579 | 1583 | ||
| 1580 | if not self.baseconfig_valid: | 1584 | if not self.baseconfig_valid: |
| @@ -1584,19 +1588,19 @@ class BBCooker: | |||
| 1584 | 1588 | ||
| 1585 | # This is called for all async commands when self.state != running | 1589 | # This is called for all async commands when self.state != running |
| 1586 | def updateCache(self): | 1590 | def updateCache(self): |
| 1587 | if self.state == state.running: | 1591 | if self.state == State.RUNNING: |
| 1588 | return | 1592 | return |
| 1589 | 1593 | ||
| 1590 | if self.state in (state.shutdown, state.forceshutdown, state.error): | 1594 | if self.state in (State.SHUTDOWN, State.FORCE_SHUTDOWN, State.ERROR): |
| 1591 | if hasattr(self.parser, 'shutdown'): | 1595 | if hasattr(self.parser, 'shutdown'): |
| 1592 | self.parser.shutdown(clean=False) | 1596 | self.parser.shutdown(clean=False) |
| 1593 | self.parser.final_cleanup() | 1597 | self.parser.final_cleanup() |
| 1594 | raise bb.BBHandledException() | 1598 | raise bb.BBHandledException() |
| 1595 | 1599 | ||
| 1596 | if self.state != state.parsing: | 1600 | if self.state != State.PARSING: |
| 1597 | self.updateCacheSync() | 1601 | self.updateCacheSync() |
| 1598 | 1602 | ||
| 1599 | if self.state != state.parsing and not self.parsecache_valid: | 1603 | if self.state != State.PARSING and not self.parsecache_valid: |
| 1600 | bb.server.process.serverlog("Parsing started") | 1604 | bb.server.process.serverlog("Parsing started") |
| 1601 | self.parsewatched = {} | 1605 | self.parsewatched = {} |
| 1602 | 1606 | ||
| @@ -1630,9 +1634,10 @@ class BBCooker: | |||
| 1630 | self.parser = CookerParser(self, mcfilelist, total_masked) | 1634 | self.parser = CookerParser(self, mcfilelist, total_masked) |
| 1631 | self._parsecache_set(True) | 1635 | self._parsecache_set(True) |
| 1632 | 1636 | ||
| 1633 | self.state = state.parsing | 1637 | self.state = State.PARSING |
| 1634 | 1638 | ||
| 1635 | if not self.parser.parse_next(): | 1639 | if not self.parser.parse_next(): |
| 1640 | bb.server.process.serverlog("Parsing completed") | ||
| 1636 | collectlog.debug("parsing complete") | 1641 | collectlog.debug("parsing complete") |
| 1637 | if self.parser.error: | 1642 | if self.parser.error: |
| 1638 | raise bb.BBHandledException() | 1643 | raise bb.BBHandledException() |
| @@ -1640,7 +1645,7 @@ class BBCooker: | |||
| 1640 | self.handlePrefProviders() | 1645 | self.handlePrefProviders() |
| 1641 | for mc in self.multiconfigs: | 1646 | for mc in self.multiconfigs: |
| 1642 | self.recipecaches[mc].bbfile_priority = self.collections[mc].collection_priorities(self.recipecaches[mc].pkg_fn, self.parser.mcfilelist[mc], self.data) | 1647 | self.recipecaches[mc].bbfile_priority = self.collections[mc].collection_priorities(self.recipecaches[mc].pkg_fn, self.parser.mcfilelist[mc], self.data) |
| 1643 | self.state = state.running | 1648 | self.state = State.RUNNING |
| 1644 | 1649 | ||
| 1645 | # Send an event listing all stamps reachable after parsing | 1650 | # Send an event listing all stamps reachable after parsing |
| 1646 | # which the metadata may use to clean up stale data | 1651 | # which the metadata may use to clean up stale data |
| @@ -1713,10 +1718,10 @@ class BBCooker: | |||
| 1713 | 1718 | ||
| 1714 | def shutdown(self, force=False): | 1719 | def shutdown(self, force=False): |
| 1715 | if force: | 1720 | if force: |
| 1716 | self.state = state.forceshutdown | 1721 | self.state = State.FORCE_SHUTDOWN |
| 1717 | bb.event._should_exit.set() | 1722 | bb.event._should_exit.set() |
| 1718 | else: | 1723 | else: |
| 1719 | self.state = state.shutdown | 1724 | self.state = State.SHUTDOWN |
| 1720 | 1725 | ||
| 1721 | if self.parser: | 1726 | if self.parser: |
| 1722 | self.parser.shutdown(clean=False) | 1727 | self.parser.shutdown(clean=False) |
| @@ -1726,7 +1731,7 @@ class BBCooker: | |||
| 1726 | if hasattr(self.parser, 'shutdown'): | 1731 | if hasattr(self.parser, 'shutdown'): |
| 1727 | self.parser.shutdown(clean=False) | 1732 | self.parser.shutdown(clean=False) |
| 1728 | self.parser.final_cleanup() | 1733 | self.parser.final_cleanup() |
| 1729 | self.state = state.initial | 1734 | self.state = State.INITIAL |
| 1730 | bb.event._should_exit.clear() | 1735 | bb.event._should_exit.clear() |
| 1731 | 1736 | ||
| 1732 | def reset(self): | 1737 | def reset(self): |
| @@ -1813,8 +1818,8 @@ class CookerCollectFiles(object): | |||
| 1813 | bb.event.fire(CookerExit(), eventdata) | 1818 | bb.event.fire(CookerExit(), eventdata) |
| 1814 | 1819 | ||
| 1815 | # We need to track where we look so that we can know when the cache is invalid. There | 1820 | # We need to track where we look so that we can know when the cache is invalid. There |
| 1816 | # is no nice way to do this, this is horrid. We intercept the os.listdir() | 1821 | # is no nice way to do this, this is horrid. We intercept the os.listdir() and os.scandir() |
| 1817 | # (or os.scandir() for python 3.6+) calls while we run glob(). | 1822 | # calls while we run glob(). |
| 1818 | origlistdir = os.listdir | 1823 | origlistdir = os.listdir |
| 1819 | if hasattr(os, 'scandir'): | 1824 | if hasattr(os, 'scandir'): |
| 1820 | origscandir = os.scandir | 1825 | origscandir = os.scandir |
| @@ -1994,8 +1999,9 @@ class ParsingFailure(Exception): | |||
| 1994 | Exception.__init__(self, realexception, recipe) | 1999 | Exception.__init__(self, realexception, recipe) |
| 1995 | 2000 | ||
| 1996 | class Parser(multiprocessing.Process): | 2001 | class Parser(multiprocessing.Process): |
| 1997 | def __init__(self, jobs, results, quit, profile): | 2002 | def __init__(self, jobs, next_job_id, results, quit, profile): |
| 1998 | self.jobs = jobs | 2003 | self.jobs = jobs |
| 2004 | self.next_job_id = next_job_id | ||
| 1999 | self.results = results | 2005 | self.results = results |
| 2000 | self.quit = quit | 2006 | self.quit = quit |
| 2001 | multiprocessing.Process.__init__(self) | 2007 | multiprocessing.Process.__init__(self) |
| @@ -2005,6 +2011,7 @@ class Parser(multiprocessing.Process): | |||
| 2005 | self.queue_signals = False | 2011 | self.queue_signals = False |
| 2006 | self.signal_received = [] | 2012 | self.signal_received = [] |
| 2007 | self.signal_threadlock = threading.Lock() | 2013 | self.signal_threadlock = threading.Lock() |
| 2014 | self.exit = False | ||
| 2008 | 2015 | ||
| 2009 | def catch_sig(self, signum, frame): | 2016 | def catch_sig(self, signum, frame): |
| 2010 | if self.queue_signals: | 2017 | if self.queue_signals: |
| @@ -2017,24 +2024,10 @@ class Parser(multiprocessing.Process): | |||
| 2017 | signal.signal(signal.SIGTERM, signal.SIG_DFL) | 2024 | signal.signal(signal.SIGTERM, signal.SIG_DFL) |
| 2018 | os.kill(os.getpid(), signal.SIGTERM) | 2025 | os.kill(os.getpid(), signal.SIGTERM) |
| 2019 | elif signum == signal.SIGINT: | 2026 | elif signum == signal.SIGINT: |
| 2020 | signal.default_int_handler(signum, frame) | 2027 | self.exit = True |
| 2021 | 2028 | ||
| 2022 | def run(self): | 2029 | def run(self): |
| 2023 | 2030 | bb.utils.profile_function("parsing" in self.profile, self.realrun, "profile-parse-%s.log" % multiprocessing.current_process().name, process=False) | |
| 2024 | if not self.profile: | ||
| 2025 | self.realrun() | ||
| 2026 | return | ||
| 2027 | |||
| 2028 | try: | ||
| 2029 | import cProfile as profile | ||
| 2030 | except: | ||
| 2031 | import profile | ||
| 2032 | prof = profile.Profile() | ||
| 2033 | try: | ||
| 2034 | profile.Profile.runcall(prof, self.realrun) | ||
| 2035 | finally: | ||
| 2036 | logfile = "profile-parse-%s.log" % multiprocessing.current_process().name | ||
| 2037 | prof.dump_stats(logfile) | ||
| 2038 | 2031 | ||
| 2039 | def realrun(self): | 2032 | def realrun(self): |
| 2040 | # Signal handling here is hard. We must not terminate any process or thread holding the write | 2033 | # Signal handling here is hard. We must not terminate any process or thread holding the write |
| @@ -2055,15 +2048,19 @@ class Parser(multiprocessing.Process): | |||
| 2055 | pending = [] | 2048 | pending = [] |
| 2056 | havejobs = True | 2049 | havejobs = True |
| 2057 | try: | 2050 | try: |
| 2058 | while havejobs or pending: | 2051 | while (havejobs or pending) and not self.exit: |
| 2059 | if self.quit.is_set(): | 2052 | if self.quit.is_set(): |
| 2060 | break | 2053 | break |
| 2061 | 2054 | ||
| 2062 | job = None | 2055 | job = None |
| 2063 | try: | 2056 | if havejobs: |
| 2064 | job = self.jobs.pop() | 2057 | with self.next_job_id.get_lock(): |
| 2065 | except IndexError: | 2058 | if self.next_job_id.value < len(self.jobs): |
| 2066 | havejobs = False | 2059 | job = self.jobs[self.next_job_id.value] |
| 2060 | self.next_job_id.value += 1 | ||
| 2061 | else: | ||
| 2062 | havejobs = False | ||
| 2063 | |||
| 2067 | if job: | 2064 | if job: |
| 2068 | result = self.parse(*job) | 2065 | result = self.parse(*job) |
| 2069 | # Clear the siggen cache after parsing to control memory usage, its huge | 2066 | # Clear the siggen cache after parsing to control memory usage, its huge |
| @@ -2098,7 +2095,6 @@ class Parser(multiprocessing.Process): | |||
| 2098 | except Exception as exc: | 2095 | except Exception as exc: |
| 2099 | tb = sys.exc_info()[2] | 2096 | tb = sys.exc_info()[2] |
| 2100 | exc.recipe = filename | 2097 | exc.recipe = filename |
| 2101 | exc.traceback = list(bb.exceptions.extract_traceback(tb, context=3)) | ||
| 2102 | return True, None, exc | 2098 | return True, None, exc |
| 2103 | # Need to turn BaseExceptions into Exceptions here so we gracefully shutdown | 2099 | # Need to turn BaseExceptions into Exceptions here so we gracefully shutdown |
| 2104 | # and for example a worker thread doesn't just exit on its own in response to | 2100 | # and for example a worker thread doesn't just exit on its own in response to |
| @@ -2113,7 +2109,7 @@ class CookerParser(object): | |||
| 2113 | self.mcfilelist = mcfilelist | 2109 | self.mcfilelist = mcfilelist |
| 2114 | self.cooker = cooker | 2110 | self.cooker = cooker |
| 2115 | self.cfgdata = cooker.data | 2111 | self.cfgdata = cooker.data |
| 2116 | self.cfghash = cooker.data_hash | 2112 | self.cfghash = cooker.databuilder.data_hash |
| 2117 | self.cfgbuilder = cooker.databuilder | 2113 | self.cfgbuilder = cooker.databuilder |
| 2118 | 2114 | ||
| 2119 | # Accounting statistics | 2115 | # Accounting statistics |
| @@ -2130,13 +2126,13 @@ class CookerParser(object): | |||
| 2130 | 2126 | ||
| 2131 | self.bb_caches = bb.cache.MulticonfigCache(self.cfgbuilder, self.cfghash, cooker.caches_array) | 2127 | self.bb_caches = bb.cache.MulticonfigCache(self.cfgbuilder, self.cfghash, cooker.caches_array) |
| 2132 | self.fromcache = set() | 2128 | self.fromcache = set() |
| 2133 | self.willparse = set() | 2129 | self.willparse = [] |
| 2134 | for mc in self.cooker.multiconfigs: | 2130 | for mc in self.cooker.multiconfigs: |
| 2135 | for filename in self.mcfilelist[mc]: | 2131 | for filename in self.mcfilelist[mc]: |
| 2136 | appends = self.cooker.collections[mc].get_file_appends(filename) | 2132 | appends = self.cooker.collections[mc].get_file_appends(filename) |
| 2137 | layername = self.cooker.collections[mc].calc_bbfile_priority(filename)[2] | 2133 | layername = self.cooker.collections[mc].calc_bbfile_priority(filename)[2] |
| 2138 | if not self.bb_caches[mc].cacheValid(filename, appends): | 2134 | if not self.bb_caches[mc].cacheValid(filename, appends): |
| 2139 | self.willparse.add((mc, self.bb_caches[mc], filename, appends, layername)) | 2135 | self.willparse.append((mc, self.bb_caches[mc], filename, appends, layername)) |
| 2140 | else: | 2136 | else: |
| 2141 | self.fromcache.add((mc, self.bb_caches[mc], filename, appends, layername)) | 2137 | self.fromcache.add((mc, self.bb_caches[mc], filename, appends, layername)) |
| 2142 | 2138 | ||
| @@ -2155,18 +2151,18 @@ class CookerParser(object): | |||
| 2155 | def start(self): | 2151 | def start(self): |
| 2156 | self.results = self.load_cached() | 2152 | self.results = self.load_cached() |
| 2157 | self.processes = [] | 2153 | self.processes = [] |
| 2154 | |||
| 2158 | if self.toparse: | 2155 | if self.toparse: |
| 2159 | bb.event.fire(bb.event.ParseStarted(self.toparse), self.cfgdata) | 2156 | bb.event.fire(bb.event.ParseStarted(self.toparse), self.cfgdata) |
| 2160 | 2157 | ||
| 2158 | next_job_id = multiprocessing.Value(ctypes.c_int, 0) | ||
| 2161 | self.parser_quit = multiprocessing.Event() | 2159 | self.parser_quit = multiprocessing.Event() |
| 2162 | self.result_queue = multiprocessing.Queue() | 2160 | self.result_queue = multiprocessing.Queue() |
| 2163 | 2161 | ||
| 2164 | def chunkify(lst,n): | 2162 | # Have to pass in willparse at fork time so all parsing processes have the unpickleable data |
| 2165 | return [lst[i::n] for i in range(n)] | 2163 | # then access it by index from the parse queue. |
| 2166 | self.jobs = chunkify(list(self.willparse), self.num_processes) | ||
| 2167 | |||
| 2168 | for i in range(0, self.num_processes): | 2164 | for i in range(0, self.num_processes): |
| 2169 | parser = Parser(self.jobs[i], self.result_queue, self.parser_quit, self.cooker.configuration.profile) | 2165 | parser = Parser(self.willparse, next_job_id, self.result_queue, self.parser_quit, self.cooker.configuration.profile) |
| 2170 | parser.start() | 2166 | parser.start() |
| 2171 | self.process_names.append(parser.name) | 2167 | self.process_names.append(parser.name) |
| 2172 | self.processes.append(parser) | 2168 | self.processes.append(parser) |
| @@ -2193,11 +2189,17 @@ class CookerParser(object): | |||
| 2193 | 2189 | ||
| 2194 | # Cleanup the queue before call process.join(), otherwise there might be | 2190 | # Cleanup the queue before call process.join(), otherwise there might be |
| 2195 | # deadlocks. | 2191 | # deadlocks. |
| 2196 | while True: | 2192 | def read_results(): |
| 2197 | try: | 2193 | while True: |
| 2198 | self.result_queue.get(timeout=0.25) | 2194 | try: |
| 2199 | except queue.Empty: | 2195 | self.result_queue.get(timeout=0.25) |
| 2200 | break | 2196 | except queue.Empty: |
| 2197 | break | ||
| 2198 | except KeyError: | ||
| 2199 | # The restore state from SiggenRecipeInfo in cache.py can | ||
| 2200 | # fail here if this is an unclean shutdown since the state may have been | ||
| 2201 | # reset. Ignore key errors for that reason, we don't care. | ||
| 2202 | pass | ||
| 2201 | 2203 | ||
| 2202 | def sync_caches(): | 2204 | def sync_caches(): |
| 2203 | for c in self.bb_caches.values(): | 2205 | for c in self.bb_caches.values(): |
| @@ -2209,15 +2211,19 @@ class CookerParser(object): | |||
| 2209 | 2211 | ||
| 2210 | self.parser_quit.set() | 2212 | self.parser_quit.set() |
| 2211 | 2213 | ||
| 2214 | read_results() | ||
| 2215 | |||
| 2212 | for process in self.processes: | 2216 | for process in self.processes: |
| 2213 | process.join(0.5) | 2217 | process.join(2) |
| 2214 | 2218 | ||
| 2215 | for process in self.processes: | 2219 | for process in self.processes: |
| 2216 | if process.exitcode is None: | 2220 | if process.exitcode is None: |
| 2217 | os.kill(process.pid, signal.SIGINT) | 2221 | os.kill(process.pid, signal.SIGINT) |
| 2218 | 2222 | ||
| 2223 | read_results() | ||
| 2224 | |||
| 2219 | for process in self.processes: | 2225 | for process in self.processes: |
| 2220 | process.join(0.5) | 2226 | process.join(2) |
| 2221 | 2227 | ||
| 2222 | for process in self.processes: | 2228 | for process in self.processes: |
| 2223 | if process.exitcode is None: | 2229 | if process.exitcode is None: |
| @@ -2225,9 +2231,8 @@ class CookerParser(object): | |||
| 2225 | 2231 | ||
| 2226 | for process in self.processes: | 2232 | for process in self.processes: |
| 2227 | process.join() | 2233 | process.join() |
| 2228 | # Added in 3.7, cleans up zombies | 2234 | # clean up zombies |
| 2229 | if hasattr(process, "close"): | 2235 | process.close() |
| 2230 | process.close() | ||
| 2231 | 2236 | ||
| 2232 | bb.codeparser.parser_cache_save() | 2237 | bb.codeparser.parser_cache_save() |
| 2233 | bb.codeparser.parser_cache_savemerge() | 2238 | bb.codeparser.parser_cache_savemerge() |
| @@ -2237,12 +2242,13 @@ class CookerParser(object): | |||
| 2237 | profiles = [] | 2242 | profiles = [] |
| 2238 | for i in self.process_names: | 2243 | for i in self.process_names: |
| 2239 | logfile = "profile-parse-%s.log" % i | 2244 | logfile = "profile-parse-%s.log" % i |
| 2240 | if os.path.exists(logfile): | 2245 | if os.path.exists(logfile) and os.path.getsize(logfile): |
| 2241 | profiles.append(logfile) | 2246 | profiles.append(logfile) |
| 2242 | 2247 | ||
| 2243 | pout = "profile-parse.log.processed" | 2248 | if profiles: |
| 2244 | bb.utils.process_profilelog(profiles, pout = pout) | 2249 | fn_out = "profile-parse.log.report" |
| 2245 | print("Processed parsing statistics saved to %s" % (pout)) | 2250 | bb.utils.process_profilelog(profiles, fn_out=fn_out) |
| 2251 | print("Processed parsing statistics saved to %s" % (fn_out)) | ||
| 2246 | 2252 | ||
| 2247 | def final_cleanup(self): | 2253 | def final_cleanup(self): |
| 2248 | if self.syncthread: | 2254 | if self.syncthread: |
| @@ -2274,7 +2280,7 @@ class CookerParser(object): | |||
| 2274 | yield result | 2280 | yield result |
| 2275 | 2281 | ||
| 2276 | if not (self.parsed >= self.toparse): | 2282 | if not (self.parsed >= self.toparse): |
| 2277 | raise bb.parse.ParseError("Not all recipes parsed, parser thread killed/died? Exiting.", None) | 2283 | raise bb.parse.ParseError("Not all recipes parsed, parser thread killed/died? (%s %s of %s) Exiting." % (len(self.processes), self.parsed, self.toparse), None) |
| 2278 | 2284 | ||
| 2279 | 2285 | ||
| 2280 | def parse_next(self): | 2286 | def parse_next(self): |
| @@ -2299,8 +2305,12 @@ class CookerParser(object): | |||
| 2299 | return False | 2305 | return False |
| 2300 | except ParsingFailure as exc: | 2306 | except ParsingFailure as exc: |
| 2301 | self.error += 1 | 2307 | self.error += 1 |
| 2302 | logger.error('Unable to parse %s: %s' % | 2308 | |
| 2303 | (exc.recipe, bb.exceptions.to_string(exc.realexception))) | 2309 | exc_desc = str(exc) |
| 2310 | if isinstance(exc, SystemExit) and not isinstance(exc.code, str): | ||
| 2311 | exc_desc = 'Exited with "%d"' % exc.code | ||
| 2312 | |||
| 2313 | logger.error('Unable to parse %s: %s' % (exc.recipe, exc_desc)) | ||
| 2304 | self.shutdown(clean=False) | 2314 | self.shutdown(clean=False) |
| 2305 | return False | 2315 | return False |
| 2306 | except bb.parse.ParseError as exc: | 2316 | except bb.parse.ParseError as exc: |
| @@ -2309,20 +2319,33 @@ class CookerParser(object): | |||
| 2309 | self.shutdown(clean=False, eventmsg=str(exc)) | 2319 | self.shutdown(clean=False, eventmsg=str(exc)) |
| 2310 | return False | 2320 | return False |
| 2311 | except bb.data_smart.ExpansionError as exc: | 2321 | except bb.data_smart.ExpansionError as exc: |
| 2322 | def skip_frames(f, fn_prefix): | ||
| 2323 | while f and f.tb_frame.f_code.co_filename.startswith(fn_prefix): | ||
| 2324 | f = f.tb_next | ||
| 2325 | return f | ||
| 2326 | |||
| 2312 | self.error += 1 | 2327 | self.error += 1 |
| 2313 | bbdir = os.path.dirname(__file__) + os.sep | 2328 | bbdir = os.path.dirname(__file__) + os.sep |
| 2314 | etype, value, _ = sys.exc_info() | 2329 | etype, value, tb = sys.exc_info() |
| 2315 | tb = list(itertools.dropwhile(lambda e: e.filename.startswith(bbdir), exc.traceback)) | 2330 | |
| 2331 | # Remove any frames where the code comes from bitbake. This | ||
| 2332 | # prevents deep (and pretty useless) backtraces for expansion error | ||
| 2333 | tb = skip_frames(tb, bbdir) | ||
| 2334 | cur = tb | ||
| 2335 | while cur: | ||
| 2336 | cur.tb_next = skip_frames(cur.tb_next, bbdir) | ||
| 2337 | cur = cur.tb_next | ||
| 2338 | |||
| 2316 | logger.error('ExpansionError during parsing %s', value.recipe, | 2339 | logger.error('ExpansionError during parsing %s', value.recipe, |
| 2317 | exc_info=(etype, value, tb)) | 2340 | exc_info=(etype, value, tb)) |
| 2318 | self.shutdown(clean=False) | 2341 | self.shutdown(clean=False) |
| 2319 | return False | 2342 | return False |
| 2320 | except Exception as exc: | 2343 | except Exception as exc: |
| 2321 | self.error += 1 | 2344 | self.error += 1 |
| 2322 | etype, value, tb = sys.exc_info() | 2345 | _, value, _ = sys.exc_info() |
| 2323 | if hasattr(value, "recipe"): | 2346 | if hasattr(value, "recipe"): |
| 2324 | logger.error('Unable to parse %s' % value.recipe, | 2347 | logger.error('Unable to parse %s' % value.recipe, |
| 2325 | exc_info=(etype, value, exc.traceback)) | 2348 | exc_info=sys.exc_info()) |
| 2326 | else: | 2349 | else: |
| 2327 | # Most likely, an exception occurred during raising an exception | 2350 | # Most likely, an exception occurred during raising an exception |
| 2328 | import traceback | 2351 | import traceback |
| @@ -2343,7 +2366,7 @@ class CookerParser(object): | |||
| 2343 | for virtualfn, info_array in result: | 2366 | for virtualfn, info_array in result: |
| 2344 | if info_array[0].skipped: | 2367 | if info_array[0].skipped: |
| 2345 | self.skipped += 1 | 2368 | self.skipped += 1 |
| 2346 | self.cooker.skiplist[virtualfn] = SkippedPackage(info_array[0]) | 2369 | self.cooker.skiplist_by_mc[mc][virtualfn] = SkippedPackage(info_array[0]) |
| 2347 | self.bb_caches[mc].add_info(virtualfn, info_array, self.cooker.recipecaches[mc], | 2370 | self.bb_caches[mc].add_info(virtualfn, info_array, self.cooker.recipecaches[mc], |
| 2348 | parsed=parsed, watcher = self.cooker.add_filewatch) | 2371 | parsed=parsed, watcher = self.cooker.add_filewatch) |
| 2349 | return True | 2372 | return True |
