diff options
Diffstat (limited to 'bitbake/lib')
-rw-r--r-- | bitbake/lib/bb/cooker.py | 59 |
1 files changed, 37 insertions, 22 deletions
diff --git a/bitbake/lib/bb/cooker.py b/bitbake/lib/bb/cooker.py index 1810bcc604..938b999b4f 100644 --- a/bitbake/lib/bb/cooker.py +++ b/bitbake/lib/bb/cooker.py | |||
@@ -1998,8 +1998,9 @@ class ParsingFailure(Exception): | |||
1998 | Exception.__init__(self, realexception, recipe) | 1998 | Exception.__init__(self, realexception, recipe) |
1999 | 1999 | ||
2000 | class Parser(multiprocessing.Process): | 2000 | class Parser(multiprocessing.Process): |
2001 | def __init__(self, jobs, results, quit, profile): | 2001 | def __init__(self, jobs, jobid_queue, results, quit, profile): |
2002 | self.jobs = jobs | 2002 | self.jobs = jobs |
2003 | self.jobid_queue = jobid_queue | ||
2003 | self.results = results | 2004 | self.results = results |
2004 | self.quit = quit | 2005 | self.quit = quit |
2005 | multiprocessing.Process.__init__(self) | 2006 | multiprocessing.Process.__init__(self) |
@@ -2009,6 +2010,7 @@ class Parser(multiprocessing.Process): | |||
2009 | self.queue_signals = False | 2010 | self.queue_signals = False |
2010 | self.signal_received = [] | 2011 | self.signal_received = [] |
2011 | self.signal_threadlock = threading.Lock() | 2012 | self.signal_threadlock = threading.Lock() |
2013 | self.exit = False | ||
2012 | 2014 | ||
2013 | def catch_sig(self, signum, frame): | 2015 | def catch_sig(self, signum, frame): |
2014 | if self.queue_signals: | 2016 | if self.queue_signals: |
@@ -2021,7 +2023,7 @@ class Parser(multiprocessing.Process): | |||
2021 | signal.signal(signal.SIGTERM, signal.SIG_DFL) | 2023 | signal.signal(signal.SIGTERM, signal.SIG_DFL) |
2022 | os.kill(os.getpid(), signal.SIGTERM) | 2024 | os.kill(os.getpid(), signal.SIGTERM) |
2023 | elif signum == signal.SIGINT: | 2025 | elif signum == signal.SIGINT: |
2024 | signal.default_int_handler(signum, frame) | 2026 | self.exit = True |
2025 | 2027 | ||
2026 | def run(self): | 2028 | def run(self): |
2027 | 2029 | ||
@@ -2059,16 +2061,19 @@ class Parser(multiprocessing.Process): | |||
2059 | pending = [] | 2061 | pending = [] |
2060 | havejobs = True | 2062 | havejobs = True |
2061 | try: | 2063 | try: |
2062 | while havejobs or pending: | 2064 | while (havejobs or pending) and not self.exit: |
2063 | if self.quit.is_set(): | 2065 | if self.quit.is_set(): |
2064 | break | 2066 | break |
2065 | 2067 | ||
2066 | job = None | 2068 | jobid = None |
2067 | try: | 2069 | try: |
2068 | job = self.jobs.pop() | 2070 | # Have to wait for all parsers to have forked |
2069 | except IndexError: | 2071 | jobid = self.jobid_queue.get(True, 5) |
2072 | except (ValueError, OSError, queue.Empty): | ||
2070 | havejobs = False | 2073 | havejobs = False |
2071 | if job: | 2074 | |
2075 | if jobid is not None: | ||
2076 | job = self.jobs[jobid] | ||
2072 | result = self.parse(*job) | 2077 | result = self.parse(*job) |
2073 | # Clear the siggen cache after parsing to control memory usage, its huge | 2078 | # Clear the siggen cache after parsing to control memory usage, its huge |
2074 | bb.parse.siggen.postparsing_clean_cache() | 2079 | bb.parse.siggen.postparsing_clean_cache() |
@@ -2081,6 +2086,7 @@ class Parser(multiprocessing.Process): | |||
2081 | except queue.Full: | 2086 | except queue.Full: |
2082 | pending.append(result) | 2087 | pending.append(result) |
2083 | finally: | 2088 | finally: |
2089 | self.jobs.close() | ||
2084 | self.results.close() | 2090 | self.results.close() |
2085 | self.results.join_thread() | 2091 | self.results.join_thread() |
2086 | 2092 | ||
@@ -2133,13 +2139,13 @@ class CookerParser(object): | |||
2133 | 2139 | ||
2134 | self.bb_caches = bb.cache.MulticonfigCache(self.cfgbuilder, self.cfghash, cooker.caches_array) | 2140 | self.bb_caches = bb.cache.MulticonfigCache(self.cfgbuilder, self.cfghash, cooker.caches_array) |
2135 | self.fromcache = set() | 2141 | self.fromcache = set() |
2136 | self.willparse = set() | 2142 | self.willparse = [] |
2137 | for mc in self.cooker.multiconfigs: | 2143 | for mc in self.cooker.multiconfigs: |
2138 | for filename in self.mcfilelist[mc]: | 2144 | for filename in self.mcfilelist[mc]: |
2139 | appends = self.cooker.collections[mc].get_file_appends(filename) | 2145 | appends = self.cooker.collections[mc].get_file_appends(filename) |
2140 | layername = self.cooker.collections[mc].calc_bbfile_priority(filename)[2] | 2146 | layername = self.cooker.collections[mc].calc_bbfile_priority(filename)[2] |
2141 | if not self.bb_caches[mc].cacheValid(filename, appends): | 2147 | if not self.bb_caches[mc].cacheValid(filename, appends): |
2142 | self.willparse.add((mc, self.bb_caches[mc], filename, appends, layername)) | 2148 | self.willparse.append((mc, self.bb_caches[mc], filename, appends, layername)) |
2143 | else: | 2149 | else: |
2144 | self.fromcache.add((mc, self.bb_caches[mc], filename, appends, layername)) | 2150 | self.fromcache.add((mc, self.bb_caches[mc], filename, appends, layername)) |
2145 | 2151 | ||
@@ -2158,22 +2164,26 @@ class CookerParser(object): | |||
2158 | def start(self): | 2164 | def start(self): |
2159 | self.results = self.load_cached() | 2165 | self.results = self.load_cached() |
2160 | self.processes = [] | 2166 | self.processes = [] |
2167 | |||
2161 | if self.toparse: | 2168 | if self.toparse: |
2162 | bb.event.fire(bb.event.ParseStarted(self.toparse), self.cfgdata) | 2169 | bb.event.fire(bb.event.ParseStarted(self.toparse), self.cfgdata) |
2163 | 2170 | ||
2171 | self.toparse_queue = multiprocessing.Queue(len(self.willparse)) | ||
2164 | self.parser_quit = multiprocessing.Event() | 2172 | self.parser_quit = multiprocessing.Event() |
2165 | self.result_queue = multiprocessing.Queue() | 2173 | self.result_queue = multiprocessing.Queue() |
2166 | 2174 | ||
2167 | def chunkify(lst,n): | 2175 | # Have to pass in willparse at fork time so all parsing processes have the unpickleable data |
2168 | return [lst[i::n] for i in range(n)] | 2176 | # then access it by index from the parse queue. |
2169 | self.jobs = chunkify(list(self.willparse), self.num_processes) | ||
2170 | |||
2171 | for i in range(0, self.num_processes): | 2177 | for i in range(0, self.num_processes): |
2172 | parser = Parser(self.jobs[i], self.result_queue, self.parser_quit, self.cooker.configuration.profile) | 2178 | parser = Parser(self.willparse, self.toparse_queue, self.result_queue, self.parser_quit, self.cooker.configuration.profile) |
2173 | parser.start() | 2179 | parser.start() |
2174 | self.process_names.append(parser.name) | 2180 | self.process_names.append(parser.name) |
2175 | self.processes.append(parser) | 2181 | self.processes.append(parser) |
2176 | 2182 | ||
2183 | for jobid in range(len(self.willparse)): | ||
2184 | self.toparse_queue.put(jobid) | ||
2185 | self.toparse_queue.close() | ||
2186 | |||
2177 | self.results = itertools.chain(self.results, self.parse_generator()) | 2187 | self.results = itertools.chain(self.results, self.parse_generator()) |
2178 | 2188 | ||
2179 | def shutdown(self, clean=True, eventmsg="Parsing halted due to errors"): | 2189 | def shutdown(self, clean=True, eventmsg="Parsing halted due to errors"): |
@@ -2196,11 +2206,12 @@ class CookerParser(object): | |||
2196 | 2206 | ||
2197 | # Cleanup the queue before call process.join(), otherwise there might be | 2207 | # Cleanup the queue before call process.join(), otherwise there might be |
2198 | # deadlocks. | 2208 | # deadlocks. |
2199 | while True: | 2209 | def read_results(): |
2200 | try: | 2210 | while True: |
2201 | self.result_queue.get(timeout=0.25) | 2211 | try: |
2202 | except queue.Empty: | 2212 | self.result_queue.get(timeout=0.25) |
2203 | break | 2213 | except queue.Empty: |
2214 | break | ||
2204 | 2215 | ||
2205 | def sync_caches(): | 2216 | def sync_caches(): |
2206 | for c in self.bb_caches.values(): | 2217 | for c in self.bb_caches.values(): |
@@ -2212,15 +2223,19 @@ class CookerParser(object): | |||
2212 | 2223 | ||
2213 | self.parser_quit.set() | 2224 | self.parser_quit.set() |
2214 | 2225 | ||
2226 | read_results() | ||
2227 | |||
2215 | for process in self.processes: | 2228 | for process in self.processes: |
2216 | process.join(0.5) | 2229 | process.join(2) |
2217 | 2230 | ||
2218 | for process in self.processes: | 2231 | for process in self.processes: |
2219 | if process.exitcode is None: | 2232 | if process.exitcode is None: |
2220 | os.kill(process.pid, signal.SIGINT) | 2233 | os.kill(process.pid, signal.SIGINT) |
2221 | 2234 | ||
2235 | read_results() | ||
2236 | |||
2222 | for process in self.processes: | 2237 | for process in self.processes: |
2223 | process.join(0.5) | 2238 | process.join(2) |
2224 | 2239 | ||
2225 | for process in self.processes: | 2240 | for process in self.processes: |
2226 | if process.exitcode is None: | 2241 | if process.exitcode is None: |
@@ -2277,7 +2292,7 @@ class CookerParser(object): | |||
2277 | yield result | 2292 | yield result |
2278 | 2293 | ||
2279 | if not (self.parsed >= self.toparse): | 2294 | if not (self.parsed >= self.toparse): |
2280 | raise bb.parse.ParseError("Not all recipes parsed, parser thread killed/died? Exiting.", None) | 2295 | raise bb.parse.ParseError("Not all recipes parsed, parser thread killed/died? (%s %s of %s) Exiting." % (len(self.processes), self.parsed, self.toparse), None) |
2281 | 2296 | ||
2282 | 2297 | ||
2283 | def parse_next(self): | 2298 | def parse_next(self): |