diff options
-rw-r--r-- | bitbake/lib/bb/runqueue.py | 143 | ||||
-rw-r--r-- | bitbake/lib/bb/siggen.py | 9 |
2 files changed, 92 insertions, 60 deletions
diff --git a/bitbake/lib/bb/runqueue.py b/bitbake/lib/bb/runqueue.py index 09f9ac11f8..b4134f8266 100644 --- a/bitbake/lib/bb/runqueue.py +++ b/bitbake/lib/bb/runqueue.py | |||
@@ -723,10 +723,7 @@ class RunQueueData: | |||
723 | hashdata["msg-debug-domains"] = self.cooker.configuration.debug_domains | 723 | hashdata["msg-debug-domains"] = self.cooker.configuration.debug_domains |
724 | hashdata["verbose"] = self.cooker.configuration.verbose | 724 | hashdata["verbose"] = self.cooker.configuration.verbose |
725 | 725 | ||
726 | # Write out the hashes into a file for use by the individual tasks | 726 | self.hashdata = hashdata |
727 | self.hashfile = bb.data.expand("${TMPDIR}/cache/hashdata.dat", self.cooker.configuration.data) | ||
728 | p = pickle.Pickler(file(self.hashfile, "wb"), -1) | ||
729 | p.dump(hashdata) | ||
730 | 727 | ||
731 | return len(self.runq_fnid) | 728 | return len(self.runq_fnid) |
732 | 729 | ||
@@ -1010,7 +1007,6 @@ class RunQueueExecute: | |||
1010 | self.runq_complete = [] | 1007 | self.runq_complete = [] |
1011 | self.build_pids = {} | 1008 | self.build_pids = {} |
1012 | self.build_pipes = {} | 1009 | self.build_pipes = {} |
1013 | self.build_procs = {} | ||
1014 | self.failed_fnids = [] | 1010 | self.failed_fnids = [] |
1015 | 1011 | ||
1016 | def runqueue_process_waitpid(self): | 1012 | def runqueue_process_waitpid(self): |
@@ -1018,19 +1014,17 @@ class RunQueueExecute: | |||
1018 | Return none is there are no processes awaiting result collection, otherwise | 1014 | Return none is there are no processes awaiting result collection, otherwise |
1019 | collect the process exit codes and close the information pipe. | 1015 | collect the process exit codes and close the information pipe. |
1020 | """ | 1016 | """ |
1021 | for pid in self.build_procs.keys(): | 1017 | result = os.waitpid(-1, os.WNOHANG) |
1022 | proc = self.build_procs[pid] | 1018 | if result[0] is 0 and result[1] is 0: |
1023 | proc.poll() | 1019 | return None |
1024 | if proc.returncode is not None: | 1020 | task = self.build_pids[result[0]] |
1025 | task = self.build_pids[pid] | 1021 | del self.build_pids[result[0]] |
1026 | del self.build_pids[pid] | 1022 | self.build_pipes[result[0]].close() |
1027 | self.build_pipes[pid].close() | 1023 | del self.build_pipes[result[0]] |
1028 | del self.build_pipes[pid] | 1024 | if result[1] != 0: |
1029 | del self.build_procs[pid] | 1025 | self.task_fail(task, result[1]) |
1030 | if proc.returncode != 0: | 1026 | else: |
1031 | self.task_fail(task, proc.returncode) | 1027 | self.task_complete(task) |
1032 | else: | ||
1033 | self.task_complete(task) | ||
1034 | 1028 | ||
1035 | def finish_now(self): | 1029 | def finish_now(self): |
1036 | if self.stats.active: | 1030 | if self.stats.active: |
@@ -1062,38 +1056,81 @@ class RunQueueExecute: | |||
1062 | return | 1056 | return |
1063 | 1057 | ||
1064 | def fork_off_task(self, fn, task, taskname): | 1058 | def fork_off_task(self, fn, task, taskname): |
1065 | try: | 1059 | the_data = self.cooker.bb_cache.loadDataFull(fn, self.cooker.get_file_appends(fn), self.cooker.configuration.data) |
1066 | the_data = self.cooker.bb_cache.loadDataFull(fn, self.cooker.get_file_appends(fn), self.cooker.configuration.data) | ||
1067 | 1060 | ||
1068 | env = bb.data.export_vars(the_data) | 1061 | env = bb.data.export_vars(the_data) |
1069 | env = bb.data.export_envvars(env, the_data) | 1062 | env = bb.data.export_envvars(env, the_data) |
1070 | 1063 | ||
1071 | taskdep = self.rqdata.dataCache.task_deps[fn] | 1064 | taskdep = self.rqdata.dataCache.task_deps[fn] |
1072 | if 'fakeroot' in taskdep and taskname in taskdep['fakeroot']: | 1065 | if 'fakeroot' in taskdep and taskname in taskdep['fakeroot']: |
1073 | envvars = the_data.getVar("FAKEROOTENV", True).split() | 1066 | envvars = the_data.getVar("FAKEROOTENV", True).split() |
1074 | for var in envvars: | 1067 | for var in envvars: |
1075 | comps = var.split("=") | 1068 | comps = var.split("=") |
1076 | env[comps[0]] = comps[1] | 1069 | env[comps[0]] = comps[1] |
1077 | fakedirs = (the_data.getVar("FAKEROOTDIRS", True) or "").split() | 1070 | fakedirs = (the_data.getVar("FAKEROOTDIRS", True) or "").split() |
1078 | for p in fakedirs: | 1071 | for p in fakedirs: |
1079 | bb.mkdirhier(p) | 1072 | bb.mkdirhier(p) |
1080 | bb.msg.debug(2, bb.msg.domain.RunQueue, "Running %s:%s under fakeroot, state dir is %s" % (fn, taskname, fakedirs)) | 1073 | bb.msg.debug(2, bb.msg.domain.RunQueue, "Running %s:%s under fakeroot, state dir is %s" % (fn, taskname, fakedirs)) |
1081 | 1074 | ||
1082 | env['BB_TASKHASH'] = self.rqdata.runq_hash[task] | 1075 | env['BB_TASKHASH'] = self.rqdata.runq_hash[task] |
1083 | env['PATH'] = self.cooker.configuration.initial_path | 1076 | env['PATH'] = self.cooker.configuration.initial_path |
1084 | 1077 | ||
1085 | sys.stdout.flush() | 1078 | envbackup = os.environ.copy() |
1086 | sys.stderr.flush() | 1079 | os.environ = env |
1087 | 1080 | ||
1088 | runtask = the_data.getVar("BB_RUNTASK", True) or "bitbake-runtask" | 1081 | sys.stdout.flush() |
1089 | proc = subprocess.Popen([runtask, self.rqdata.hashfile, fn, taskname, str(self.cooker.configuration.dry_run)], env=env, stdout=subprocess.PIPE, stdin=subprocess.PIPE) | 1082 | sys.stderr.flush() |
1090 | pipein = proc.stdout | 1083 | |
1091 | pipeout = proc.stdin | 1084 | try: |
1092 | pid = proc.pid | 1085 | pipeinfd, pipeoutfd = os.pipe() |
1093 | except OSError as e: | 1086 | pipein = os.fdopen(pipeinfd, 'rb', 4096) |
1087 | pipeout = os.fdopen(pipeoutfd, 'wb', 4096) | ||
1088 | |||
1089 | pid = os.fork() | ||
1090 | except OSError as e: | ||
1094 | bb.msg.fatal(bb.msg.domain.RunQueue, "fork failed: %d (%s)" % (e.errno, e.strerror)) | 1091 | bb.msg.fatal(bb.msg.domain.RunQueue, "fork failed: %d (%s)" % (e.errno, e.strerror)) |
1092 | if pid == 0: | ||
1093 | pipein.close() | ||
1094 | # Save out the PID so that the event can include it the | ||
1095 | # events | ||
1096 | bb.event.worker_pid = os.getpid() | ||
1097 | bb.event.worker_pipe = pipeout | ||
1098 | bb.event.useStdout = False | ||
1099 | |||
1100 | self.rq.state = runQueueChildProcess | ||
1101 | # Make the child the process group leader | ||
1102 | os.setpgid(0, 0) | ||
1103 | # No stdin | ||
1104 | newsi = os.open('/dev/null', os.O_RDWR) | ||
1105 | os.dup2(newsi, sys.stdin.fileno()) | ||
1106 | # Stdout to a logfile | ||
1107 | #logout = data.expand("${TMPDIR}/log/stdout.%s" % os.getpid(), self.cfgData, True) | ||
1108 | #mkdirhier(os.path.dirname(logout)) | ||
1109 | #newso = open(logout, 'w') | ||
1110 | #os.dup2(newso.fileno(), sys.stdout.fileno()) | ||
1111 | #os.dup2(newso.fileno(), sys.stderr.fileno()) | ||
1112 | if taskname.endswith("_setscene"): | ||
1113 | the_data.setVarFlag(taskname, "quieterrors", "1") | ||
1114 | |||
1115 | bb.data.setVar("BB_WORKERCONTEXT", "1", the_data) | ||
1116 | bb.parse.siggen.set_taskdata(self.rqdata.hashdata["hashes"], self.rqdata.hashdata["deps"]) | ||
1117 | |||
1118 | for h in self.rqdata.hashdata["hashes"]: | ||
1119 | bb.data.setVar("BBHASH_%s" % h, self.rqdata.hashdata["hashes"][h], the_data) | ||
1120 | for h in self.rqdata.hashdata["deps"]: | ||
1121 | bb.data.setVar("BBHASHDEPS_%s" % h, self.rqdata.hashdata["deps"][h], the_data) | ||
1122 | |||
1123 | ret = 0 | ||
1124 | try: | ||
1125 | if not self.cooker.configuration.dry_run: | ||
1126 | ret = bb.build.exec_task(fn, taskname, the_data) | ||
1127 | os._exit(ret) | ||
1128 | except: | ||
1129 | os._exit(1) | ||
1130 | |||
1131 | os.environ = envbackup | ||
1095 | 1132 | ||
1096 | return proc | 1133 | return pid, pipein, pipeout |
1097 | 1134 | ||
1098 | class RunQueueExecuteDummy(RunQueueExecute): | 1135 | class RunQueueExecuteDummy(RunQueueExecute): |
1099 | def __init__(self, rq): | 1136 | def __init__(self, rq): |
@@ -1238,11 +1275,10 @@ class RunQueueExecuteTasks(RunQueueExecute): | |||
1238 | task, | 1275 | task, |
1239 | self.rqdata.get_user_idstring(task))) | 1276 | self.rqdata.get_user_idstring(task))) |
1240 | 1277 | ||
1241 | proc = self.fork_off_task(fn, task, taskname) | 1278 | pid, pipein, pipeout = self.fork_off_task(fn, task, taskname) |
1242 | 1279 | ||
1243 | self.build_pids[proc.pid] = task | 1280 | self.build_pids[pid] = task |
1244 | self.build_procs[proc.pid] = proc | 1281 | self.build_pipes[pid] = runQueuePipe(pipein, pipeout, self.cfgData) |
1245 | self.build_pipes[proc.pid] = runQueuePipe(proc.stdout, proc.stdin, self.cfgData) | ||
1246 | self.runq_running[task] = 1 | 1282 | self.runq_running[task] = 1 |
1247 | self.stats.taskActive() | 1283 | self.stats.taskActive() |
1248 | if self.stats.active < self.number_tasks: | 1284 | if self.stats.active < self.number_tasks: |
@@ -1487,11 +1523,10 @@ class RunQueueExecuteScenequeue(RunQueueExecute): | |||
1487 | "Running setscene task %d of %d (%s:%s)" % (self.stats.completed + self.stats.active + self.stats.failed + 1, | 1523 | "Running setscene task %d of %d (%s:%s)" % (self.stats.completed + self.stats.active + self.stats.failed + 1, |
1488 | self.stats.total, fn, taskname)) | 1524 | self.stats.total, fn, taskname)) |
1489 | 1525 | ||
1490 | proc = self.fork_off_task(fn, realtask, taskname) | 1526 | pid, pipein, pipeout = self.fork_off_task(fn, realtask, taskname) |
1491 | 1527 | ||
1492 | self.build_pids[proc.pid] = task | 1528 | self.build_pids[pid] = task |
1493 | self.build_procs[proc.pid] = proc | 1529 | self.build_pipes[pid] = runQueuePipe(pipein, pipeout, self.cfgData) |
1494 | self.build_pipes[proc.pid] = runQueuePipe(proc.stdout, proc.stdin, self.cfgData) | ||
1495 | self.runq_running[task] = 1 | 1530 | self.runq_running[task] = 1 |
1496 | self.stats.taskActive() | 1531 | self.stats.taskActive() |
1497 | if self.stats.active < self.number_tasks: | 1532 | if self.stats.active < self.number_tasks: |
diff --git a/bitbake/lib/bb/siggen.py b/bitbake/lib/bb/siggen.py index 3eecd751b4..51983ca19a 100644 --- a/bitbake/lib/bb/siggen.py +++ b/bitbake/lib/bb/siggen.py | |||
@@ -55,8 +55,6 @@ class SignatureGeneratorBasic(SignatureGenerator): | |||
55 | else: | 55 | else: |
56 | self.twl = None | 56 | self.twl = None |
57 | 57 | ||
58 | self.dumpsigs = dumpsigs | ||
59 | |||
60 | def _build_data(self, fn, d): | 58 | def _build_data(self, fn, d): |
61 | 59 | ||
62 | taskdeps, gendeps = bb.data.generate_dependencies(d) | 60 | taskdeps, gendeps = bb.data.generate_dependencies(d) |
@@ -82,10 +80,9 @@ class SignatureGeneratorBasic(SignatureGenerator): | |||
82 | self.basehash[fn + "." + task] = hashlib.md5(data).hexdigest() | 80 | self.basehash[fn + "." + task] = hashlib.md5(data).hexdigest() |
83 | #bb.note("Hash for %s is %s" % (task, tashhash[task])) | 81 | #bb.note("Hash for %s is %s" % (task, tashhash[task])) |
84 | 82 | ||
85 | if self.dumpsigs: | 83 | self.taskdeps[fn] = taskdeps |
86 | self.taskdeps[fn] = taskdeps | 84 | self.gendeps[fn] = gendeps |
87 | self.gendeps[fn] = gendeps | 85 | self.lookupcache[fn] = lookupcache |
88 | self.lookupcache[fn] = lookupcache | ||
89 | 86 | ||
90 | return taskdeps | 87 | return taskdeps |
91 | 88 | ||