diff options
Diffstat (limited to 'bitbake/lib/bb/cooker.py')
-rw-r--r-- | bitbake/lib/bb/cooker.py | 55 |
1 files changed, 35 insertions, 20 deletions
diff --git a/bitbake/lib/bb/cooker.py b/bitbake/lib/bb/cooker.py index 1810bcc604..93707a250e 100644 --- a/bitbake/lib/bb/cooker.py +++ b/bitbake/lib/bb/cooker.py | |||
@@ -1998,8 +1998,9 @@ class ParsingFailure(Exception): | |||
1998 | Exception.__init__(self, realexception, recipe) | 1998 | Exception.__init__(self, realexception, recipe) |
1999 | 1999 | ||
2000 | class Parser(multiprocessing.Process): | 2000 | class Parser(multiprocessing.Process): |
2001 | def __init__(self, jobs, results, quit, profile): | 2001 | def __init__(self, jobs, jobid_queue, results, quit, profile): |
2002 | self.jobs = jobs | 2002 | self.jobs = jobs |
2003 | self.jobid_queue = jobid_queue | ||
2003 | self.results = results | 2004 | self.results = results |
2004 | self.quit = quit | 2005 | self.quit = quit |
2005 | multiprocessing.Process.__init__(self) | 2006 | multiprocessing.Process.__init__(self) |
@@ -2009,6 +2010,7 @@ class Parser(multiprocessing.Process): | |||
2009 | self.queue_signals = False | 2010 | self.queue_signals = False |
2010 | self.signal_received = [] | 2011 | self.signal_received = [] |
2011 | self.signal_threadlock = threading.Lock() | 2012 | self.signal_threadlock = threading.Lock() |
2013 | self.exit = False | ||
2012 | 2014 | ||
2013 | def catch_sig(self, signum, frame): | 2015 | def catch_sig(self, signum, frame): |
2014 | if self.queue_signals: | 2016 | if self.queue_signals: |
@@ -2021,7 +2023,7 @@ class Parser(multiprocessing.Process): | |||
2021 | signal.signal(signal.SIGTERM, signal.SIG_DFL) | 2023 | signal.signal(signal.SIGTERM, signal.SIG_DFL) |
2022 | os.kill(os.getpid(), signal.SIGTERM) | 2024 | os.kill(os.getpid(), signal.SIGTERM) |
2023 | elif signum == signal.SIGINT: | 2025 | elif signum == signal.SIGINT: |
2024 | signal.default_int_handler(signum, frame) | 2026 | self.exit = True |
2025 | 2027 | ||
2026 | def run(self): | 2028 | def run(self): |
2027 | 2029 | ||
@@ -2059,16 +2061,18 @@ class Parser(multiprocessing.Process): | |||
2059 | pending = [] | 2061 | pending = [] |
2060 | havejobs = True | 2062 | havejobs = True |
2061 | try: | 2063 | try: |
2062 | while havejobs or pending: | 2064 | while (havejobs or pending) and not self.exit: |
2063 | if self.quit.is_set(): | 2065 | if self.quit.is_set(): |
2064 | break | 2066 | break |
2065 | 2067 | ||
2066 | job = None | 2068 | jobid = None |
2067 | try: | 2069 | try: |
2068 | job = self.jobs.pop() | 2070 | jobid = self.jobid_queue.get(True, 0.5) |
2069 | except IndexError: | 2071 | except (ValueError, OSError): |
2070 | havejobs = False | 2072 | havejobs = False |
2071 | if job: | 2073 | |
2074 | if jobid is not None: | ||
2075 | job = self.jobs[jobid] | ||
2072 | result = self.parse(*job) | 2076 | result = self.parse(*job) |
2073 | # Clear the siggen cache after parsing to control memory usage, its huge | 2077 | # Clear the siggen cache after parsing to control memory usage, its huge |
2074 | bb.parse.siggen.postparsing_clean_cache() | 2078 | bb.parse.siggen.postparsing_clean_cache() |
@@ -2081,6 +2085,7 @@ class Parser(multiprocessing.Process): | |||
2081 | except queue.Full: | 2085 | except queue.Full: |
2082 | pending.append(result) | 2086 | pending.append(result) |
2083 | finally: | 2087 | finally: |
2088 | self.jobs.close() | ||
2084 | self.results.close() | 2089 | self.results.close() |
2085 | self.results.join_thread() | 2090 | self.results.join_thread() |
2086 | 2091 | ||
@@ -2133,13 +2138,13 @@ class CookerParser(object): | |||
2133 | 2138 | ||
2134 | self.bb_caches = bb.cache.MulticonfigCache(self.cfgbuilder, self.cfghash, cooker.caches_array) | 2139 | self.bb_caches = bb.cache.MulticonfigCache(self.cfgbuilder, self.cfghash, cooker.caches_array) |
2135 | self.fromcache = set() | 2140 | self.fromcache = set() |
2136 | self.willparse = set() | 2141 | self.willparse = [] |
2137 | for mc in self.cooker.multiconfigs: | 2142 | for mc in self.cooker.multiconfigs: |
2138 | for filename in self.mcfilelist[mc]: | 2143 | for filename in self.mcfilelist[mc]: |
2139 | appends = self.cooker.collections[mc].get_file_appends(filename) | 2144 | appends = self.cooker.collections[mc].get_file_appends(filename) |
2140 | layername = self.cooker.collections[mc].calc_bbfile_priority(filename)[2] | 2145 | layername = self.cooker.collections[mc].calc_bbfile_priority(filename)[2] |
2141 | if not self.bb_caches[mc].cacheValid(filename, appends): | 2146 | if not self.bb_caches[mc].cacheValid(filename, appends): |
2142 | self.willparse.add((mc, self.bb_caches[mc], filename, appends, layername)) | 2147 | self.willparse.append((mc, self.bb_caches[mc], filename, appends, layername)) |
2143 | else: | 2148 | else: |
2144 | self.fromcache.add((mc, self.bb_caches[mc], filename, appends, layername)) | 2149 | self.fromcache.add((mc, self.bb_caches[mc], filename, appends, layername)) |
2145 | 2150 | ||
@@ -2158,22 +2163,27 @@ class CookerParser(object): | |||
2158 | def start(self): | 2163 | def start(self): |
2159 | self.results = self.load_cached() | 2164 | self.results = self.load_cached() |
2160 | self.processes = [] | 2165 | self.processes = [] |
2166 | |||
2161 | if self.toparse: | 2167 | if self.toparse: |
2162 | bb.event.fire(bb.event.ParseStarted(self.toparse), self.cfgdata) | 2168 | bb.event.fire(bb.event.ParseStarted(self.toparse), self.cfgdata) |
2163 | 2169 | ||
2170 | self.toparse_queue = multiprocessing.Queue(len(self.willparse)) | ||
2164 | self.parser_quit = multiprocessing.Event() | 2171 | self.parser_quit = multiprocessing.Event() |
2165 | self.result_queue = multiprocessing.Queue() | 2172 | self.result_queue = multiprocessing.Queue() |
2166 | 2173 | ||
2167 | def chunkify(lst,n): | 2174 | for jobid in range(len(self.willparse)): |
2168 | return [lst[i::n] for i in range(n)] | 2175 | self.toparse_queue.put(jobid) |
2169 | self.jobs = chunkify(list(self.willparse), self.num_processes) | ||
2170 | 2176 | ||
2177 | # Have to pass in willparse at fork time so all parsing processes have the unpickleable data | ||
2178 | # then access it by index from the parse queue. | ||
2171 | for i in range(0, self.num_processes): | 2179 | for i in range(0, self.num_processes): |
2172 | parser = Parser(self.jobs[i], self.result_queue, self.parser_quit, self.cooker.configuration.profile) | 2180 | parser = Parser(self.willparse, self.toparse_queue, self.result_queue, self.parser_quit, self.cooker.configuration.profile) |
2173 | parser.start() | 2181 | parser.start() |
2174 | self.process_names.append(parser.name) | 2182 | self.process_names.append(parser.name) |
2175 | self.processes.append(parser) | 2183 | self.processes.append(parser) |
2176 | 2184 | ||
2185 | self.toparse_queue.close() | ||
2186 | |||
2177 | self.results = itertools.chain(self.results, self.parse_generator()) | 2187 | self.results = itertools.chain(self.results, self.parse_generator()) |
2178 | 2188 | ||
2179 | def shutdown(self, clean=True, eventmsg="Parsing halted due to errors"): | 2189 | def shutdown(self, clean=True, eventmsg="Parsing halted due to errors"): |
@@ -2196,11 +2206,12 @@ class CookerParser(object): | |||
2196 | 2206 | ||
2197 | # Cleanup the queue before call process.join(), otherwise there might be | 2207 | # Cleanup the queue before call process.join(), otherwise there might be |
2198 | # deadlocks. | 2208 | # deadlocks. |
2199 | while True: | 2209 | def read_results(): |
2200 | try: | 2210 | while True: |
2201 | self.result_queue.get(timeout=0.25) | 2211 | try: |
2202 | except queue.Empty: | 2212 | self.result_queue.get(timeout=0.25) |
2203 | break | 2213 | except queue.Empty: |
2214 | break | ||
2204 | 2215 | ||
2205 | def sync_caches(): | 2216 | def sync_caches(): |
2206 | for c in self.bb_caches.values(): | 2217 | for c in self.bb_caches.values(): |
@@ -2212,15 +2223,19 @@ class CookerParser(object): | |||
2212 | 2223 | ||
2213 | self.parser_quit.set() | 2224 | self.parser_quit.set() |
2214 | 2225 | ||
2226 | read_results() | ||
2227 | |||
2215 | for process in self.processes: | 2228 | for process in self.processes: |
2216 | process.join(0.5) | 2229 | process.join(2) |
2217 | 2230 | ||
2218 | for process in self.processes: | 2231 | for process in self.processes: |
2219 | if process.exitcode is None: | 2232 | if process.exitcode is None: |
2220 | os.kill(process.pid, signal.SIGINT) | 2233 | os.kill(process.pid, signal.SIGINT) |
2221 | 2234 | ||
2235 | read_results() | ||
2236 | |||
2222 | for process in self.processes: | 2237 | for process in self.processes: |
2223 | process.join(0.5) | 2238 | process.join(2) |
2224 | 2239 | ||
2225 | for process in self.processes: | 2240 | for process in self.processes: |
2226 | if process.exitcode is None: | 2241 | if process.exitcode is None: |