diff options
Diffstat (limited to 'bitbake')
-rwxr-xr-x | bitbake/bin/bitbake-worker | 12 | ||||
-rw-r--r-- | bitbake/lib/bb/runqueue.py | 13 |
2 files changed, 14 insertions, 11 deletions
diff --git a/bitbake/bin/bitbake-worker b/bitbake/bin/bitbake-worker index 4dbd681172..5010bada79 100755 --- a/bitbake/bin/bitbake-worker +++ b/bitbake/bin/bitbake-worker | |||
@@ -136,7 +136,7 @@ def sigterm_handler(signum, frame): | |||
136 | os.killpg(0, signal.SIGTERM) | 136 | os.killpg(0, signal.SIGTERM) |
137 | sys.exit() | 137 | sys.exit() |
138 | 138 | ||
139 | def fork_off_task(cfg, data, databuilder, workerdata, fn, task, taskname, appends, taskdepdata, extraconfigdata, quieterrors=False): | 139 | def fork_off_task(cfg, data, databuilder, workerdata, fn, task, taskname, appends, taskdepdata, extraconfigdata, quieterrors=False, dry_run_exec=False): |
140 | # We need to setup the environment BEFORE the fork, since | 140 | # We need to setup the environment BEFORE the fork, since |
141 | # a fork() or exec*() activates PSEUDO... | 141 | # a fork() or exec*() activates PSEUDO... |
142 | 142 | ||
@@ -152,8 +152,10 @@ def fork_off_task(cfg, data, databuilder, workerdata, fn, task, taskname, append | |||
152 | except TypeError: | 152 | except TypeError: |
153 | umask = taskdep['umask'][taskname] | 153 | umask = taskdep['umask'][taskname] |
154 | 154 | ||
155 | dry_run = cfg.dry_run or dry_run_exec | ||
156 | |||
155 | # We can't use the fakeroot environment in a dry run as it possibly hasn't been built | 157 | # We can't use the fakeroot environment in a dry run as it possibly hasn't been built |
156 | if 'fakeroot' in taskdep and taskname in taskdep['fakeroot'] and not cfg.dry_run: | 158 | if 'fakeroot' in taskdep and taskname in taskdep['fakeroot'] and not dry_run: |
157 | envvars = (workerdata["fakerootenv"][fn] or "").split() | 159 | envvars = (workerdata["fakerootenv"][fn] or "").split() |
158 | for key, value in (var.split('=') for var in envvars): | 160 | for key, value in (var.split('=') for var in envvars): |
159 | envbackup[key] = os.environ.get(key) | 161 | envbackup[key] = os.environ.get(key) |
@@ -263,7 +265,7 @@ def fork_off_task(cfg, data, databuilder, workerdata, fn, task, taskname, append | |||
263 | logger.critical(traceback.format_exc()) | 265 | logger.critical(traceback.format_exc()) |
264 | os._exit(1) | 266 | os._exit(1) |
265 | try: | 267 | try: |
266 | if cfg.dry_run: | 268 | if dry_run: |
267 | return 0 | 269 | return 0 |
268 | return bb.build.exec_task(fn, taskname, the_data, cfg.profile) | 270 | return bb.build.exec_task(fn, taskname, the_data, cfg.profile) |
269 | except: | 271 | except: |
@@ -421,10 +423,10 @@ class BitbakeWorker(object): | |||
421 | sys.exit(0) | 423 | sys.exit(0) |
422 | 424 | ||
423 | def handle_runtask(self, data): | 425 | def handle_runtask(self, data): |
424 | fn, task, taskname, quieterrors, appends, taskdepdata = pickle.loads(data) | 426 | fn, task, taskname, quieterrors, appends, taskdepdata, dry_run_exec = pickle.loads(data) |
425 | workerlog_write("Handling runtask %s %s %s\n" % (task, fn, taskname)) | 427 | workerlog_write("Handling runtask %s %s %s\n" % (task, fn, taskname)) |
426 | 428 | ||
427 | pid, pipein, pipeout = fork_off_task(self.cookercfg, self.data, self.databuilder, self.workerdata, fn, task, taskname, appends, taskdepdata, self.extraconfigdata, quieterrors) | 429 | pid, pipein, pipeout = fork_off_task(self.cookercfg, self.data, self.databuilder, self.workerdata, fn, task, taskname, appends, taskdepdata, self.extraconfigdata, quieterrors, dry_run_exec) |
428 | 430 | ||
429 | self.build_pids[pid] = task | 431 | self.build_pids[pid] = task |
430 | self.build_pipes[pid] = runQueueWorkerPipe(pipein, pipeout) | 432 | self.build_pipes[pid] = runQueueWorkerPipe(pipein, pipeout) |
diff --git a/bitbake/lib/bb/runqueue.py b/bitbake/lib/bb/runqueue.py index da7502118a..1e1bdf4b6e 100644 --- a/bitbake/lib/bb/runqueue.py +++ b/bitbake/lib/bb/runqueue.py | |||
@@ -266,6 +266,7 @@ class RunQueueData: | |||
266 | self.multi_provider_whitelist = (cfgData.getVar("MULTI_PROVIDER_WHITELIST") or "").split() | 266 | self.multi_provider_whitelist = (cfgData.getVar("MULTI_PROVIDER_WHITELIST") or "").split() |
267 | self.setscenewhitelist = get_setscene_enforce_whitelist(cfgData) | 267 | self.setscenewhitelist = get_setscene_enforce_whitelist(cfgData) |
268 | self.setscenewhitelist_checked = False | 268 | self.setscenewhitelist_checked = False |
269 | self.setscene_enforce = (cfgData.getVar('BB_SETSCENE_ENFORCE') == "1") | ||
269 | self.init_progress_reporter = bb.progress.DummyMultiStageProcessProgressReporter() | 270 | self.init_progress_reporter = bb.progress.DummyMultiStageProcessProgressReporter() |
270 | 271 | ||
271 | self.reset() | 272 | self.reset() |
@@ -1790,7 +1791,7 @@ class RunQueueExecuteTasks(RunQueueExecute): | |||
1790 | bb.event.fire(startevent, self.cfgData) | 1791 | bb.event.fire(startevent, self.cfgData) |
1791 | self.runq_running.add(task) | 1792 | self.runq_running.add(task) |
1792 | self.stats.taskActive() | 1793 | self.stats.taskActive() |
1793 | if not self.cooker.configuration.dry_run: | 1794 | if not (self.cooker.configuration.dry_run or self.rqdata.setscene_enforce): |
1794 | bb.build.make_stamp(taskname, self.rqdata.dataCaches[mc], taskfn) | 1795 | bb.build.make_stamp(taskname, self.rqdata.dataCaches[mc], taskfn) |
1795 | self.task_complete(task) | 1796 | self.task_complete(task) |
1796 | return True | 1797 | return True |
@@ -1801,7 +1802,7 @@ class RunQueueExecuteTasks(RunQueueExecute): | |||
1801 | taskdepdata = self.build_taskdepdata(task) | 1802 | taskdepdata = self.build_taskdepdata(task) |
1802 | 1803 | ||
1803 | taskdep = self.rqdata.dataCaches[mc].task_deps[taskfn] | 1804 | taskdep = self.rqdata.dataCaches[mc].task_deps[taskfn] |
1804 | if 'fakeroot' in taskdep and taskname in taskdep['fakeroot'] and not self.cooker.configuration.dry_run: | 1805 | if 'fakeroot' in taskdep and taskname in taskdep['fakeroot'] and not (self.cooker.configuration.dry_run or self.rqdata.setscene_enforce): |
1805 | if not self.rq.fakeworker: | 1806 | if not self.rq.fakeworker: |
1806 | try: | 1807 | try: |
1807 | self.rq.start_fakeworker(self) | 1808 | self.rq.start_fakeworker(self) |
@@ -1810,10 +1811,10 @@ class RunQueueExecuteTasks(RunQueueExecute): | |||
1810 | self.rq.state = runQueueFailed | 1811 | self.rq.state = runQueueFailed |
1811 | self.stats.taskFailed() | 1812 | self.stats.taskFailed() |
1812 | return True | 1813 | return True |
1813 | self.rq.fakeworker[mc].process.stdin.write(b"<runtask>" + pickle.dumps((taskfn, task, taskname, False, self.cooker.collection.get_file_appends(fn), taskdepdata)) + b"</runtask>") | 1814 | self.rq.fakeworker[mc].process.stdin.write(b"<runtask>" + pickle.dumps((taskfn, task, taskname, False, self.cooker.collection.get_file_appends(fn), taskdepdata, self.rqdata.setscene_enforce)) + b"</runtask>") |
1814 | self.rq.fakeworker[mc].process.stdin.flush() | 1815 | self.rq.fakeworker[mc].process.stdin.flush() |
1815 | else: | 1816 | else: |
1816 | self.rq.worker[mc].process.stdin.write(b"<runtask>" + pickle.dumps((taskfn, task, taskname, False, self.cooker.collection.get_file_appends(taskfn), taskdepdata)) + b"</runtask>") | 1817 | self.rq.worker[mc].process.stdin.write(b"<runtask>" + pickle.dumps((taskfn, task, taskname, False, self.cooker.collection.get_file_appends(taskfn), taskdepdata, self.rqdata.setscene_enforce)) + b"</runtask>") |
1817 | self.rq.worker[mc].process.stdin.flush() | 1818 | self.rq.worker[mc].process.stdin.flush() |
1818 | 1819 | ||
1819 | self.build_stamps[task] = bb.build.stampfile(taskname, self.rqdata.dataCaches[mc], taskfn, noextra=True) | 1820 | self.build_stamps[task] = bb.build.stampfile(taskname, self.rqdata.dataCaches[mc], taskfn, noextra=True) |
@@ -2219,10 +2220,10 @@ class RunQueueExecuteScenequeue(RunQueueExecute): | |||
2219 | if 'fakeroot' in taskdep and taskname in taskdep['fakeroot'] and not self.cooker.configuration.dry_run: | 2220 | if 'fakeroot' in taskdep and taskname in taskdep['fakeroot'] and not self.cooker.configuration.dry_run: |
2220 | if not self.rq.fakeworker: | 2221 | if not self.rq.fakeworker: |
2221 | self.rq.start_fakeworker(self) | 2222 | self.rq.start_fakeworker(self) |
2222 | self.rq.fakeworker[mc].process.stdin.write(b"<runtask>" + pickle.dumps((taskfn, task, taskname, True, self.cooker.collection.get_file_appends(taskfn), taskdepdata)) + b"</runtask>") | 2223 | self.rq.fakeworker[mc].process.stdin.write(b"<runtask>" + pickle.dumps((taskfn, task, taskname, True, self.cooker.collection.get_file_appends(taskfn), taskdepdata, False)) + b"</runtask>") |
2223 | self.rq.fakeworker[mc].process.stdin.flush() | 2224 | self.rq.fakeworker[mc].process.stdin.flush() |
2224 | else: | 2225 | else: |
2225 | self.rq.worker[mc].process.stdin.write(b"<runtask>" + pickle.dumps((taskfn, task, taskname, True, self.cooker.collection.get_file_appends(taskfn), taskdepdata)) + b"</runtask>") | 2226 | self.rq.worker[mc].process.stdin.write(b"<runtask>" + pickle.dumps((taskfn, task, taskname, True, self.cooker.collection.get_file_appends(taskfn), taskdepdata, False)) + b"</runtask>") |
2226 | self.rq.worker[mc].process.stdin.flush() | 2227 | self.rq.worker[mc].process.stdin.flush() |
2227 | 2228 | ||
2228 | self.runq_running.add(task) | 2229 | self.runq_running.add(task) |