diff options
author | Richard Purdie <richard@openedhand.com> | 2008-01-06 16:51:51 +0000 |
---|---|---|
committer | Richard Purdie <richard@openedhand.com> | 2008-01-06 16:51:51 +0000 |
commit | 7821f2250d16e24097d973c34197db7fd60bc51a (patch) | |
tree | a65ca61381af497265f02b5363196b36fe1b0150 | |
parent | c7fca99aab060efe14332ce6d913bc7d346a4478 (diff) | |
download | poky-7821f2250d16e24097d973c34197db7fd60bc51a.tar.gz |
bitbake: Sync with bitbake upstream for various fixes
git-svn-id: https://svn.o-hand.com/repos/poky/trunk@3411 311d38ba-8fff-0310-9ca6-ca027cbcb966
-rw-r--r-- | bitbake/ChangeLog | 10 | ||||
-rw-r--r-- | bitbake/lib/bb/cooker.py | 77 | ||||
-rw-r--r-- | bitbake/lib/bb/runqueue.py | 399 | ||||
-rw-r--r-- | bitbake/lib/bb/shell.py | 36 |
4 files changed, 391 insertions, 131 deletions
diff --git a/bitbake/ChangeLog b/bitbake/ChangeLog index 4fddeca86c..4f3eafe2ba 100644 --- a/bitbake/ChangeLog +++ b/bitbake/ChangeLog | |||
@@ -25,6 +25,16 @@ Changes in BitBake 1.8.x: | |||
25 | faster at the expense of not creating mirror tarballs. | 25 | faster at the expense of not creating mirror tarballs. |
26 | - SRCREV handling updates, improvements and fixes from Poky | 26 | - SRCREV handling updates, improvements and fixes from Poky |
27 | - Add bb.utils.lockfile() and bb.utils.unlockfile() from Poky | 27 | - Add bb.utils.lockfile() and bb.utils.unlockfile() from Poky |
28 | - Add support for task selfstamp and lockfiles flags | ||
29 | - Disable task number acceleration since it can allow the tasks to run | ||
30 | out of sequence | ||
31 | - Improve runqueue code comments | ||
32 | - Add task scheduler abstraction and some example schedulers | ||
33 | - Improve circular dependency chain debugging code and user feedback | ||
34 | - Don't give a stacktrace for invalid tasks, have a user friendly message (#3431) | ||
35 | - Add support for "-e target" (#3432) | ||
36 | - Fix shell showdata command (#3259) | ||
37 | - Fix shell data updating problems (#1880) | ||
28 | 38 | ||
29 | Changes in Bitbake 1.8.8: | 39 | Changes in Bitbake 1.8.8: |
30 | - Rewrite svn fetcher to make adding extra operations easier | 40 | - Rewrite svn fetcher to make adding extra operations easier |
diff --git a/bitbake/lib/bb/cooker.py b/bitbake/lib/bb/cooker.py index c16709e552..c6597c52f9 100644 --- a/bitbake/lib/bb/cooker.py +++ b/bitbake/lib/bb/cooker.py | |||
@@ -67,6 +67,10 @@ class BBCooker: | |||
67 | if not self.configuration.cmd: | 67 | if not self.configuration.cmd: |
68 | self.configuration.cmd = bb.data.getVar("BB_DEFAULT_TASK", self.configuration.data) or "build" | 68 | self.configuration.cmd = bb.data.getVar("BB_DEFAULT_TASK", self.configuration.data) or "build" |
69 | 69 | ||
70 | bbpkgs = bb.data.getVar('BBPKGS', self.configuration.data, True) | ||
71 | if bbpkgs: | ||
72 | self.configuration.pkgs_to_build.extend(bbpkgs.split()) | ||
73 | |||
70 | # | 74 | # |
71 | # Special updated configuration we use for firing events | 75 | # Special updated configuration we use for firing events |
72 | # | 76 | # |
@@ -158,29 +162,68 @@ class BBCooker: | |||
158 | prefstr) | 162 | prefstr) |
159 | 163 | ||
160 | 164 | ||
161 | def showEnvironment( self ): | 165 | def showEnvironment(self , buildfile = None, pkgs_to_build = []): |
162 | """Show the outer or per-package environment""" | 166 | """ |
163 | if self.configuration.buildfile: | 167 | Show the outer or per-package environment |
168 | """ | ||
169 | fn = None | ||
170 | envdata = None | ||
171 | |||
172 | if 'world' in pkgs_to_build: | ||
173 | print "'world' is not a valid target for --environment." | ||
174 | sys.exit(1) | ||
175 | |||
176 | if len(pkgs_to_build) > 1: | ||
177 | print "Only one target can be used with the --environment option." | ||
178 | sys.exit(1) | ||
179 | |||
180 | if buildfile: | ||
181 | if len(pkgs_to_build) > 0: | ||
182 | print "No target should be used with the --environment and --buildfile options." | ||
183 | sys.exit(1) | ||
164 | self.cb = None | 184 | self.cb = None |
165 | self.bb_cache = bb.cache.init(self) | 185 | self.bb_cache = bb.cache.init(self) |
166 | bf = self.matchFile(self.configuration.buildfile) | 186 | fn = self.matchFile(buildfile) |
187 | elif len(pkgs_to_build) == 1: | ||
188 | self.updateCache() | ||
189 | |||
190 | localdata = data.createCopy(self.configuration.data) | ||
191 | bb.data.update_data(localdata) | ||
192 | bb.data.expandKeys(localdata) | ||
193 | |||
194 | taskdata = bb.taskdata.TaskData(self.configuration.abort) | ||
195 | |||
196 | try: | ||
197 | taskdata.add_provider(localdata, self.status, pkgs_to_build[0]) | ||
198 | taskdata.add_unresolved(localdata, self.status) | ||
199 | except bb.providers.NoProvider: | ||
200 | sys.exit(1) | ||
201 | |||
202 | targetid = taskdata.getbuild_id(pkgs_to_build[0]) | ||
203 | fnid = taskdata.build_targets[targetid][0] | ||
204 | fn = taskdata.fn_index[fnid] | ||
205 | else: | ||
206 | envdata = self.configuration.data | ||
207 | |||
208 | if fn: | ||
167 | try: | 209 | try: |
168 | self.configuration.data = self.bb_cache.loadDataFull(bf, self.configuration.data) | 210 | envdata = self.bb_cache.loadDataFull(fn, self.configuration.data) |
169 | except IOError, e: | 211 | except IOError, e: |
170 | bb.msg.fatal(bb.msg.domain.Parsing, "Unable to read %s: %s" % (bf, e)) | 212 | bb.msg.fatal(bb.msg.domain.Parsing, "Unable to read %s: %s" % (fn, e)) |
171 | except Exception, e: | 213 | except Exception, e: |
172 | bb.msg.fatal(bb.msg.domain.Parsing, "%s" % e) | 214 | bb.msg.fatal(bb.msg.domain.Parsing, "%s" % e) |
215 | |||
173 | # emit variables and shell functions | 216 | # emit variables and shell functions |
174 | try: | 217 | try: |
175 | data.update_data( self.configuration.data ) | 218 | data.update_data( envdata ) |
176 | data.emit_env(sys.__stdout__, self.configuration.data, True) | 219 | data.emit_env(sys.__stdout__, envdata, True) |
177 | except Exception, e: | 220 | except Exception, e: |
178 | bb.msg.fatal(bb.msg.domain.Parsing, "%s" % e) | 221 | bb.msg.fatal(bb.msg.domain.Parsing, "%s" % e) |
179 | # emit the metadata which isnt valid shell | 222 | # emit the metadata which isnt valid shell |
180 | data.expandKeys( self.configuration.data ) | 223 | data.expandKeys( envdata ) |
181 | for e in self.configuration.data.keys(): | 224 | for e in envdata.keys(): |
182 | if data.getVarFlag( e, 'python', self.configuration.data ): | 225 | if data.getVarFlag( e, 'python', envdata ): |
183 | sys.__stdout__.write("\npython %s () {\n%s}\n" % (e, data.getVar(e, self.configuration.data, 1))) | 226 | sys.__stdout__.write("\npython %s () {\n%s}\n" % (e, data.getVar(e, envdata, 1))) |
184 | 227 | ||
185 | def generateDotGraph( self, pkgs_to_build, ignore_deps ): | 228 | def generateDotGraph( self, pkgs_to_build, ignore_deps ): |
186 | """ | 229 | """ |
@@ -338,8 +381,6 @@ class BBCooker: | |||
338 | except ImportError, details: | 381 | except ImportError, details: |
339 | bb.msg.fatal(bb.msg.domain.Parsing, "Sorry, shell not available (%s)" % details ) | 382 | bb.msg.fatal(bb.msg.domain.Parsing, "Sorry, shell not available (%s)" % details ) |
340 | else: | 383 | else: |
341 | bb.data.update_data( self.configuration.data ) | ||
342 | bb.data.expandKeys( self.configuration.data ) | ||
343 | shell.start( self ) | 384 | shell.start( self ) |
344 | sys.exit( 0 ) | 385 | sys.exit( 0 ) |
345 | 386 | ||
@@ -519,7 +560,7 @@ class BBCooker: | |||
519 | """ | 560 | """ |
520 | 561 | ||
521 | if self.configuration.show_environment: | 562 | if self.configuration.show_environment: |
522 | self.showEnvironment() | 563 | self.showEnvironment(self.configuration.buildfile, self.configuration.pkgs_to_build) |
523 | sys.exit( 0 ) | 564 | sys.exit( 0 ) |
524 | 565 | ||
525 | self.buildSetVars() | 566 | self.buildSetVars() |
@@ -539,11 +580,7 @@ class BBCooker: | |||
539 | 580 | ||
540 | pkgs_to_build = self.configuration.pkgs_to_build | 581 | pkgs_to_build = self.configuration.pkgs_to_build |
541 | 582 | ||
542 | bbpkgs = bb.data.getVar('BBPKGS', self.configuration.data, 1) | 583 | if len(pkgs_to_build) == 0 and not self.configuration.show_versions: |
543 | if bbpkgs: | ||
544 | pkgs_to_build.extend(bbpkgs.split()) | ||
545 | if len(pkgs_to_build) == 0 and not self.configuration.show_versions \ | ||
546 | and not self.configuration.show_environment: | ||
547 | print "Nothing to do. Use 'bitbake world' to build everything, or run 'bitbake --help'" | 584 | print "Nothing to do. Use 'bitbake world' to build everything, or run 'bitbake --help'" |
548 | print "for usage information." | 585 | print "for usage information." |
549 | sys.exit(0) | 586 | sys.exit(0) |
diff --git a/bitbake/lib/bb/runqueue.py b/bitbake/lib/bb/runqueue.py index 2f80dd4c88..68ef3a722f 100644 --- a/bitbake/lib/bb/runqueue.py +++ b/bitbake/lib/bb/runqueue.py | |||
@@ -51,6 +51,88 @@ class RunQueueStats: | |||
51 | def taskSkipped(self): | 51 | def taskSkipped(self): |
52 | self.skipped = self.skipped + 1 | 52 | self.skipped = self.skipped + 1 |
53 | 53 | ||
54 | class RunQueueScheduler: | ||
55 | """ | ||
56 | Control the order tasks are scheduled in. | ||
57 | """ | ||
58 | def __init__(self, runqueue): | ||
59 | """ | ||
60 | The default scheduler just returns the first buildable task (the | ||
61 | priority map is sorted by task numer) | ||
62 | """ | ||
63 | self.rq = runqueue | ||
64 | numTasks = len(self.rq.runq_fnid) | ||
65 | |||
66 | self.prio_map = [] | ||
67 | self.prio_map.extend(range(numTasks)) | ||
68 | |||
69 | def next(self): | ||
70 | """ | ||
71 | Return the id of the first task we find that is buildable | ||
72 | """ | ||
73 | for task1 in range(len(self.rq.runq_fnid)): | ||
74 | task = self.prio_map[task1] | ||
75 | if self.rq.runq_running[task] == 1: | ||
76 | continue | ||
77 | if self.rq.runq_buildable[task] == 1: | ||
78 | return task | ||
79 | |||
80 | class RunQueueSchedulerSpeed(RunQueueScheduler): | ||
81 | """ | ||
82 | A scheduler optimised for speed. The priority map is sorted by task weight, | ||
83 | heavier weighted tasks (tasks needed by the most other tasks) are run first. | ||
84 | """ | ||
85 | def __init__(self, runqueue): | ||
86 | """ | ||
87 | The priority map is sorted by task weight. | ||
88 | """ | ||
89 | from copy import deepcopy | ||
90 | |||
91 | self.rq = runqueue | ||
92 | |||
93 | sortweight = deepcopy(self.rq.runq_weight) | ||
94 | sortweight.sort() | ||
95 | copyweight = deepcopy(self.rq.runq_weight) | ||
96 | self.prio_map = [] | ||
97 | |||
98 | for weight in sortweight: | ||
99 | idx = copyweight.index(weight) | ||
100 | self.prio_map.append(idx) | ||
101 | copyweight[idx] = -1 | ||
102 | |||
103 | self.prio_map.reverse() | ||
104 | |||
105 | class RunQueueSchedulerCompletion(RunQueueSchedulerSpeed): | ||
106 | """ | ||
107 | A scheduler optimised to complete .bb files are quickly as possible. The | ||
108 | priority map is sorted by task weight, but then reordered so once a given | ||
109 | .bb file starts to build, its completed as quickly as possible. This works | ||
110 | well where disk space is at a premium and classes like OE's rm_work are in | ||
111 | force. | ||
112 | """ | ||
113 | def __init__(self, runqueue): | ||
114 | RunQueueSchedulerSpeed.__init__(self, runqueue) | ||
115 | from copy import deepcopy | ||
116 | |||
117 | #FIXME - whilst this groups all fnids together it does not reorder the | ||
118 | #fnid groups optimally. | ||
119 | |||
120 | basemap = deepcopy(self.prio_map) | ||
121 | self.prio_map = [] | ||
122 | while (len(basemap) > 0): | ||
123 | entry = basemap.pop(0) | ||
124 | self.prio_map.append(entry) | ||
125 | fnid = self.rq.runq_fnid[entry] | ||
126 | todel = [] | ||
127 | for entry in basemap: | ||
128 | entry_fnid = self.rq.runq_fnid[entry] | ||
129 | if entry_fnid == fnid: | ||
130 | todel.append(basemap.index(entry)) | ||
131 | self.prio_map.append(entry) | ||
132 | todel.reverse() | ||
133 | for idx in todel: | ||
134 | del basemap[idx] | ||
135 | |||
54 | class RunQueue: | 136 | class RunQueue: |
55 | """ | 137 | """ |
56 | BitBake Run Queue implementation | 138 | BitBake Run Queue implementation |
@@ -71,14 +153,158 @@ class RunQueue: | |||
71 | self.runq_task = [] | 153 | self.runq_task = [] |
72 | self.runq_depends = [] | 154 | self.runq_depends = [] |
73 | self.runq_revdeps = [] | 155 | self.runq_revdeps = [] |
74 | self.runq_weight = [] | ||
75 | self.prio_map = [] | ||
76 | 156 | ||
77 | def get_user_idstring(self, task): | 157 | def get_user_idstring(self, task): |
78 | fn = self.taskData.fn_index[self.runq_fnid[task]] | 158 | fn = self.taskData.fn_index[self.runq_fnid[task]] |
79 | taskname = self.runq_task[task] | 159 | taskname = self.runq_task[task] |
80 | return "%s, %s" % (fn, taskname) | 160 | return "%s, %s" % (fn, taskname) |
81 | 161 | ||
162 | def circular_depchains_handler(self, tasks): | ||
163 | """ | ||
164 | Some tasks aren't buildable, likely due to circular dependency issues. | ||
165 | Identify the circular dependencies and print them in a user readable format. | ||
166 | """ | ||
167 | from copy import deepcopy | ||
168 | |||
169 | valid_chains = [] | ||
170 | explored_deps = {} | ||
171 | msgs = [] | ||
172 | |||
173 | def chain_reorder(chain): | ||
174 | """ | ||
175 | Reorder a dependency chain so the lowest task id is first | ||
176 | """ | ||
177 | lowest = 0 | ||
178 | new_chain = [] | ||
179 | for entry in range(len(chain)): | ||
180 | if chain[entry] < chain[lowest]: | ||
181 | lowest = entry | ||
182 | new_chain.extend(chain[lowest:]) | ||
183 | new_chain.extend(chain[:lowest]) | ||
184 | return new_chain | ||
185 | |||
186 | def chain_compare_equal(chain1, chain2): | ||
187 | """ | ||
188 | Compare two dependency chains and see if they're the same | ||
189 | """ | ||
190 | if len(chain1) != len(chain2): | ||
191 | return False | ||
192 | for index in range(len(chain1)): | ||
193 | if chain1[index] != chain2[index]: | ||
194 | return False | ||
195 | return True | ||
196 | |||
197 | def chain_array_contains(chain, chain_array): | ||
198 | """ | ||
199 | Return True if chain_array contains chain | ||
200 | """ | ||
201 | for ch in chain_array: | ||
202 | if chain_compare_equal(ch, chain): | ||
203 | return True | ||
204 | return False | ||
205 | |||
206 | def find_chains(taskid, prev_chain): | ||
207 | prev_chain.append(taskid) | ||
208 | total_deps = [] | ||
209 | total_deps.extend(self.runq_revdeps[taskid]) | ||
210 | for revdep in self.runq_revdeps[taskid]: | ||
211 | if revdep in prev_chain: | ||
212 | idx = prev_chain.index(revdep) | ||
213 | # To prevent duplicates, reorder the chain to start with the lowest taskid | ||
214 | # and search through an array of those we've already printed | ||
215 | chain = prev_chain[idx:] | ||
216 | new_chain = chain_reorder(chain) | ||
217 | if not chain_array_contains(new_chain, valid_chains): | ||
218 | valid_chains.append(new_chain) | ||
219 | msgs.append("Dependency loop #%d found:\n" % len(valid_chains)) | ||
220 | for dep in new_chain: | ||
221 | msgs.append(" Task %s (%s) (depends: %s)\n" % (dep, self.get_user_idstring(dep), self.runq_depends[dep])) | ||
222 | msgs.append("\n") | ||
223 | if len(valid_chains) > 10: | ||
224 | msgs.append("Aborted dependency loops search after 10 matches.\n") | ||
225 | return msgs | ||
226 | continue | ||
227 | scan = False | ||
228 | if revdep not in explored_deps: | ||
229 | scan = True | ||
230 | elif revdep in explored_deps[revdep]: | ||
231 | scan = True | ||
232 | else: | ||
233 | for dep in prev_chain: | ||
234 | if dep in explored_deps[revdep]: | ||
235 | scan = True | ||
236 | if scan: | ||
237 | find_chains(revdep, deepcopy(prev_chain)) | ||
238 | for dep in explored_deps[revdep]: | ||
239 | if dep not in total_deps: | ||
240 | total_deps.append(dep) | ||
241 | |||
242 | explored_deps[taskid] = total_deps | ||
243 | |||
244 | for task in tasks: | ||
245 | find_chains(task, []) | ||
246 | |||
247 | return msgs | ||
248 | |||
249 | def calculate_task_weights(self, endpoints): | ||
250 | """ | ||
251 | Calculate a number representing the "weight" of each task. Heavier weighted tasks | ||
252 | have more dependencies and hence should be executed sooner for maximum speed. | ||
253 | |||
254 | This function also sanity checks the task list finding tasks that its not | ||
255 | possible to execute due to circular dependencies. | ||
256 | """ | ||
257 | |||
258 | numTasks = len(self.runq_fnid) | ||
259 | weight = [] | ||
260 | deps_left = [] | ||
261 | task_done = [] | ||
262 | |||
263 | for listid in range(numTasks): | ||
264 | task_done.append(False) | ||
265 | weight.append(0) | ||
266 | deps_left.append(len(self.runq_revdeps[listid])) | ||
267 | |||
268 | for listid in endpoints: | ||
269 | weight[listid] = 1 | ||
270 | task_done[listid] = True | ||
271 | |||
272 | while 1: | ||
273 | next_points = [] | ||
274 | for listid in endpoints: | ||
275 | for revdep in self.runq_depends[listid]: | ||
276 | weight[revdep] = weight[revdep] + weight[listid] | ||
277 | deps_left[revdep] = deps_left[revdep] - 1 | ||
278 | if deps_left[revdep] == 0: | ||
279 | next_points.append(revdep) | ||
280 | task_done[revdep] = True | ||
281 | endpoints = next_points | ||
282 | if len(next_points) == 0: | ||
283 | break | ||
284 | |||
285 | # Circular dependency sanity check | ||
286 | problem_tasks = [] | ||
287 | for task in range(numTasks): | ||
288 | if task_done[task] is False or deps_left[task] != 0: | ||
289 | problem_tasks.append(task) | ||
290 | bb.msg.debug(2, bb.msg.domain.RunQueue, "Task %s (%s) is not buildable\n" % (task, self.get_user_idstring(task))) | ||
291 | bb.msg.debug(2, bb.msg.domain.RunQueue, "(Complete marker was %s and the remaining dependency count was %s)\n\n" % (task_done[task], deps_left[task])) | ||
292 | |||
293 | if problem_tasks: | ||
294 | message = "Unbuildable tasks were found.\n" | ||
295 | message = message + "These are usually caused by circular dependencies and any circular dependency chains found will be printed below. Increase the debug level to see a list of unbuildable tasks.\n\n" | ||
296 | message = message + "Identifying dependency loops (this may take a short while)...\n" | ||
297 | bb.msg.error(bb.msg.domain.RunQueue, message) | ||
298 | |||
299 | msgs = self.circular_depchains_handler(problem_tasks) | ||
300 | |||
301 | message = "\n" | ||
302 | for msg in msgs: | ||
303 | message = message + msg | ||
304 | bb.msg.fatal(bb.msg.domain.RunQueue, message) | ||
305 | |||
306 | return weight | ||
307 | |||
82 | def prepare_runqueue(self): | 308 | def prepare_runqueue(self): |
83 | """ | 309 | """ |
84 | Turn a set of taskData into a RunQueue and compute data needed | 310 | Turn a set of taskData into a RunQueue and compute data needed |
@@ -86,9 +312,7 @@ class RunQueue: | |||
86 | """ | 312 | """ |
87 | 313 | ||
88 | depends = [] | 314 | depends = [] |
89 | runq_weight1 = [] | ||
90 | runq_build = [] | 315 | runq_build = [] |
91 | runq_done = [] | ||
92 | 316 | ||
93 | taskData = self.taskData | 317 | taskData = self.taskData |
94 | 318 | ||
@@ -98,6 +322,17 @@ class RunQueue: | |||
98 | 322 | ||
99 | bb.msg.note(1, bb.msg.domain.RunQueue, "Preparing runqueue") | 323 | bb.msg.note(1, bb.msg.domain.RunQueue, "Preparing runqueue") |
100 | 324 | ||
325 | # Step A - Work out a list of tasks to run | ||
326 | # | ||
327 | # Taskdata gives us a list of possible providers for a every target | ||
328 | # ordered by priority (build_targets, run_targets). It also gives | ||
329 | # information on each of those providers. | ||
330 | # | ||
331 | # To create the actual list of tasks to execute we fix the list of | ||
332 | # providers and then resolve the dependencies into task IDs. This | ||
333 | # process is repeated for each type of dependency (tdepends, deptask, | ||
334 | # rdeptast, recrdeptask, idepends). | ||
335 | |||
101 | for task in range(len(taskData.tasks_name)): | 336 | for task in range(len(taskData.tasks_name)): |
102 | fnid = taskData.tasks_fnid[task] | 337 | fnid = taskData.tasks_fnid[task] |
103 | fn = taskData.fn_index[fnid] | 338 | fn = taskData.fn_index[fnid] |
@@ -105,9 +340,15 @@ class RunQueue: | |||
105 | 340 | ||
106 | if fnid not in taskData.failed_fnids: | 341 | if fnid not in taskData.failed_fnids: |
107 | 342 | ||
343 | # Resolve task internal dependencies | ||
344 | # | ||
345 | # e.g. addtask before X after Y | ||
108 | depends = taskData.tasks_tdepends[task] | 346 | depends = taskData.tasks_tdepends[task] |
109 | 347 | ||
110 | # Resolve Depends | 348 | # Resolve 'deptask' dependencies |
349 | # | ||
350 | # e.g. do_sometask[deptask] = "do_someothertask" | ||
351 | # (makes sure sometask runs after someothertask of all DEPENDS) | ||
111 | if 'deptask' in task_deps and taskData.tasks_name[task] in task_deps['deptask']: | 352 | if 'deptask' in task_deps and taskData.tasks_name[task] in task_deps['deptask']: |
112 | tasknames = task_deps['deptask'][taskData.tasks_name[task]].split() | 353 | tasknames = task_deps['deptask'][taskData.tasks_name[task]].split() |
113 | for depid in taskData.depids[fnid]: | 354 | for depid in taskData.depids[fnid]: |
@@ -119,7 +360,10 @@ class RunQueue: | |||
119 | for taskname in tasknames: | 360 | for taskname in tasknames: |
120 | depends.append(taskData.gettask_id(dep, taskname)) | 361 | depends.append(taskData.gettask_id(dep, taskname)) |
121 | 362 | ||
122 | # Resolve Runtime Depends | 363 | # Resolve 'rdeptask' dependencies |
364 | # | ||
365 | # e.g. do_sometask[rdeptask] = "do_someothertask" | ||
366 | # (makes sure sometask runs after someothertask of all RDEPENDS) | ||
123 | if 'rdeptask' in task_deps and taskData.tasks_name[task] in task_deps['rdeptask']: | 367 | if 'rdeptask' in task_deps and taskData.tasks_name[task] in task_deps['rdeptask']: |
124 | taskname = task_deps['rdeptask'][taskData.tasks_name[task]] | 368 | taskname = task_deps['rdeptask'][taskData.tasks_name[task]] |
125 | for depid in taskData.rdepids[fnid]: | 369 | for depid in taskData.rdepids[fnid]: |
@@ -129,6 +373,10 @@ class RunQueue: | |||
129 | dep = taskData.fn_index[depdata] | 373 | dep = taskData.fn_index[depdata] |
130 | depends.append(taskData.gettask_id(dep, taskname)) | 374 | depends.append(taskData.gettask_id(dep, taskname)) |
131 | 375 | ||
376 | # Resolve inter-task dependencies | ||
377 | # | ||
378 | # e.g. do_sometask[depends] = "targetname:do_someothertask" | ||
379 | # (makes sure sometask runs after targetname's someothertask) | ||
132 | idepends = taskData.tasks_idepends[task] | 380 | idepends = taskData.tasks_idepends[task] |
133 | for idepend in idepends: | 381 | for idepend in idepends: |
134 | depid = int(idepend.split(":")[0]) | 382 | depid = int(idepend.split(":")[0]) |
@@ -207,9 +455,10 @@ class RunQueue: | |||
207 | if nextdepid not in dep_seen: | 455 | if nextdepid not in dep_seen: |
208 | add_recursive_build(nextdepid, fnid) | 456 | add_recursive_build(nextdepid, fnid) |
209 | 457 | ||
210 | 458 | # Resolve recursive 'recrdeptask' dependencies | |
211 | # Resolve Recursive Runtime Depends | 459 | # |
212 | # Also includes all thier build depends, intertask depends and runtime depends | 460 | # e.g. do_sometask[recrdeptask] = "do_someothertask" |
461 | # (makes sure sometask runs after someothertask of all DEPENDS, RDEPENDS and intertask dependencies, recursively) | ||
213 | if 'recrdeptask' in task_deps and taskData.tasks_name[task] in task_deps['recrdeptask']: | 462 | if 'recrdeptask' in task_deps and taskData.tasks_name[task] in task_deps['recrdeptask']: |
214 | for taskname in task_deps['recrdeptask'][taskData.tasks_name[task]].split(): | 463 | for taskname in task_deps['recrdeptask'][taskData.tasks_name[task]].split(): |
215 | dep_seen = [] | 464 | dep_seen = [] |
@@ -223,7 +472,7 @@ class RunQueue: | |||
223 | depid = int(idepend.split(":")[0]) | 472 | depid = int(idepend.split(":")[0]) |
224 | add_recursive_build(depid, fnid) | 473 | add_recursive_build(depid, fnid) |
225 | 474 | ||
226 | #Prune self references | 475 | # Rmove all self references |
227 | if task in depends: | 476 | if task in depends: |
228 | newdep = [] | 477 | newdep = [] |
229 | bb.msg.debug(2, bb.msg.domain.RunQueue, "Task %s (%s %s) contains self reference! %s" % (task, taskData.fn_index[taskData.tasks_fnid[task]], taskData.tasks_name[task], depends)) | 478 | bb.msg.debug(2, bb.msg.domain.RunQueue, "Task %s (%s %s) contains self reference! %s" % (task, taskData.fn_index[taskData.tasks_fnid[task]], taskData.tasks_name[task], depends)) |
@@ -237,11 +486,14 @@ class RunQueue: | |||
237 | self.runq_task.append(taskData.tasks_name[task]) | 486 | self.runq_task.append(taskData.tasks_name[task]) |
238 | self.runq_depends.append(Set(depends)) | 487 | self.runq_depends.append(Set(depends)) |
239 | self.runq_revdeps.append(Set()) | 488 | self.runq_revdeps.append(Set()) |
240 | self.runq_weight.append(0) | ||
241 | 489 | ||
242 | runq_weight1.append(0) | ||
243 | runq_build.append(0) | 490 | runq_build.append(0) |
244 | runq_done.append(0) | 491 | |
492 | # Step B - Mark all active tasks | ||
493 | # | ||
494 | # Start with the tasks we were asked to run and mark all dependencies | ||
495 | # as active too. If the task is to be 'forced', clear its stamp. Once | ||
496 | # all active tasks are marked, prune the ones we don't need. | ||
245 | 497 | ||
246 | bb.msg.note(2, bb.msg.domain.RunQueue, "Marking Active Tasks") | 498 | bb.msg.note(2, bb.msg.domain.RunQueue, "Marking Active Tasks") |
247 | 499 | ||
@@ -280,11 +532,17 @@ class RunQueue: | |||
280 | if fnid in taskData.failed_fnids: | 532 | if fnid in taskData.failed_fnids: |
281 | continue | 533 | continue |
282 | 534 | ||
535 | if target[1] not in taskData.tasks_lookup[fnid]: | ||
536 | bb.msg.fatal(bb.msg.domain.RunQueue, "Task %s does not exist for target %s" % (target[1], target[0])) | ||
537 | |||
283 | listid = taskData.tasks_lookup[fnid][target[1]] | 538 | listid = taskData.tasks_lookup[fnid][target[1]] |
284 | 539 | ||
285 | mark_active(listid, 1) | 540 | mark_active(listid, 1) |
286 | 541 | ||
287 | # Prune inactive tasks | 542 | # Step C - Prune all inactive tasks |
543 | # | ||
544 | # Once all active tasks are marked, prune the ones we don't need. | ||
545 | |||
288 | maps = [] | 546 | maps = [] |
289 | delcount = 0 | 547 | delcount = 0 |
290 | for listid in range(len(self.runq_fnid)): | 548 | for listid in range(len(self.runq_fnid)): |
@@ -294,14 +552,16 @@ class RunQueue: | |||
294 | del self.runq_fnid[listid-delcount] | 552 | del self.runq_fnid[listid-delcount] |
295 | del self.runq_task[listid-delcount] | 553 | del self.runq_task[listid-delcount] |
296 | del self.runq_depends[listid-delcount] | 554 | del self.runq_depends[listid-delcount] |
297 | del self.runq_weight[listid-delcount] | ||
298 | del runq_weight1[listid-delcount] | ||
299 | del runq_build[listid-delcount] | 555 | del runq_build[listid-delcount] |
300 | del runq_done[listid-delcount] | ||
301 | del self.runq_revdeps[listid-delcount] | 556 | del self.runq_revdeps[listid-delcount] |
302 | delcount = delcount + 1 | 557 | delcount = delcount + 1 |
303 | maps.append(-1) | 558 | maps.append(-1) |
304 | 559 | ||
560 | # | ||
561 | # Step D - Sanity checks and computation | ||
562 | # | ||
563 | |||
564 | # Check to make sure we still have tasks to run | ||
305 | if len(self.runq_fnid) == 0: | 565 | if len(self.runq_fnid) == 0: |
306 | if not taskData.abort: | 566 | if not taskData.abort: |
307 | bb.msg.note(1, bb.msg.domain.RunQueue, "All possible tasks have been run but build incomplete (--continue mode). See errors above for incomplete tasks.") | 567 | bb.msg.note(1, bb.msg.domain.RunQueue, "All possible tasks have been run but build incomplete (--continue mode). See errors above for incomplete tasks.") |
@@ -310,6 +570,8 @@ class RunQueue: | |||
310 | 570 | ||
311 | bb.msg.note(2, bb.msg.domain.RunQueue, "Pruned %s inactive tasks, %s left" % (delcount, len(self.runq_fnid))) | 571 | bb.msg.note(2, bb.msg.domain.RunQueue, "Pruned %s inactive tasks, %s left" % (delcount, len(self.runq_fnid))) |
312 | 572 | ||
573 | # Remap the dependencies to account for the deleted tasks | ||
574 | # Check we didn't delete a task we depend on | ||
313 | for listid in range(len(self.runq_fnid)): | 575 | for listid in range(len(self.runq_fnid)): |
314 | newdeps = [] | 576 | newdeps = [] |
315 | origdeps = self.runq_depends[listid] | 577 | origdeps = self.runq_depends[listid] |
@@ -321,62 +583,37 @@ class RunQueue: | |||
321 | 583 | ||
322 | bb.msg.note(2, bb.msg.domain.RunQueue, "Assign Weightings") | 584 | bb.msg.note(2, bb.msg.domain.RunQueue, "Assign Weightings") |
323 | 585 | ||
586 | # Generate a list of reverse dependencies to ease future calculations | ||
324 | for listid in range(len(self.runq_fnid)): | 587 | for listid in range(len(self.runq_fnid)): |
325 | for dep in self.runq_depends[listid]: | 588 | for dep in self.runq_depends[listid]: |
326 | self.runq_revdeps[dep].add(listid) | 589 | self.runq_revdeps[dep].add(listid) |
327 | 590 | ||
591 | # Identify tasks at the end of dependency chains | ||
592 | # Error on circular dependency loops (length two) | ||
328 | endpoints = [] | 593 | endpoints = [] |
329 | for listid in range(len(self.runq_fnid)): | 594 | for listid in range(len(self.runq_fnid)): |
330 | revdeps = self.runq_revdeps[listid] | 595 | revdeps = self.runq_revdeps[listid] |
331 | if len(revdeps) == 0: | 596 | if len(revdeps) == 0: |
332 | runq_done[listid] = 1 | ||
333 | self.runq_weight[listid] = 1 | ||
334 | endpoints.append(listid) | 597 | endpoints.append(listid) |
335 | for dep in revdeps: | 598 | for dep in revdeps: |
336 | if dep in self.runq_depends[listid]: | 599 | if dep in self.runq_depends[listid]: |
337 | #self.dump_data(taskData) | 600 | #self.dump_data(taskData) |
338 | bb.msg.fatal(bb.msg.domain.RunQueue, "Task %s (%s) has circular dependency on %s (%s)" % (taskData.fn_index[self.runq_fnid[dep]], self.runq_task[dep] , taskData.fn_index[self.runq_fnid[listid]], self.runq_task[listid])) | 601 | bb.msg.fatal(bb.msg.domain.RunQueue, "Task %s (%s) has circular dependency on %s (%s)" % (taskData.fn_index[self.runq_fnid[dep]], self.runq_task[dep] , taskData.fn_index[self.runq_fnid[listid]], self.runq_task[listid])) |
339 | runq_weight1[listid] = len(revdeps) | ||
340 | 602 | ||
341 | bb.msg.note(2, bb.msg.domain.RunQueue, "Compute totals (have %s endpoint(s))" % len(endpoints)) | 603 | bb.msg.note(2, bb.msg.domain.RunQueue, "Compute totals (have %s endpoint(s))" % len(endpoints)) |
342 | 604 | ||
343 | while 1: | ||
344 | next_points = [] | ||
345 | for listid in endpoints: | ||
346 | for revdep in self.runq_depends[listid]: | ||
347 | self.runq_weight[revdep] = self.runq_weight[revdep] + self.runq_weight[listid] | ||
348 | runq_weight1[revdep] = runq_weight1[revdep] - 1 | ||
349 | if runq_weight1[revdep] == 0: | ||
350 | next_points.append(revdep) | ||
351 | runq_done[revdep] = 1 | ||
352 | endpoints = next_points | ||
353 | if len(next_points) == 0: | ||
354 | break | ||
355 | 605 | ||
356 | # Sanity Checks | 606 | # Calculate task weights |
357 | for task in range(len(self.runq_fnid)): | 607 | # Check of higher length circular dependencies |
358 | if runq_done[task] == 0: | 608 | self.runq_weight = self.calculate_task_weights(endpoints) |
359 | seen = [] | 609 | |
360 | deps_seen = [] | 610 | # Decide what order to execute the tasks in, pick a scheduler |
361 | def print_chain(taskid, finish): | 611 | # FIXME - Allow user selection |
362 | seen.append(taskid) | 612 | #self.sched = RunQueueScheduler(self) |
363 | for revdep in self.runq_revdeps[taskid]: | 613 | self.sched = RunQueueSchedulerSpeed(self) |
364 | if runq_done[revdep] == 0 and revdep not in seen and not finish: | 614 | #self.sched = RunQueueSchedulerCompletion(self) |
365 | bb.msg.error(bb.msg.domain.RunQueue, "Task %s (%s) (depends: %s)" % (revdep, self.get_user_idstring(revdep), self.runq_depends[revdep])) | 615 | |
366 | if revdep in deps_seen: | 616 | # Sanity Check - Check for multiple tasks building the same provider |
367 | bb.msg.error(bb.msg.domain.RunQueue, "Chain ends at Task %s (%s)" % (revdep, self.get_user_idstring(revdep))) | ||
368 | finish = True | ||
369 | return | ||
370 | for dep in self.runq_depends[revdep]: | ||
371 | deps_seen.append(dep) | ||
372 | print_chain(revdep, finish) | ||
373 | print_chain(task, False) | ||
374 | bb.msg.fatal(bb.msg.domain.RunQueue, "Task %s (%s) not processed!\nThis is probably a circular dependency (the chain might be printed above)." % (task, self.get_user_idstring(task))) | ||
375 | if runq_weight1[task] != 0: | ||
376 | bb.msg.fatal(bb.msg.domain.RunQueue, "Task %s (%s) count not zero!" % (task, self.get_user_idstring(task))) | ||
377 | |||
378 | |||
379 | # Check for multiple tasks building the same provider | ||
380 | prov_list = {} | 617 | prov_list = {} |
381 | seen_fn = [] | 618 | seen_fn = [] |
382 | for task in range(len(self.runq_fnid)): | 619 | for task in range(len(self.runq_fnid)): |
@@ -397,21 +634,6 @@ class RunQueue: | |||
397 | #if error: | 634 | #if error: |
398 | # bb.msg.fatal(bb.msg.domain.RunQueue, "Corrupted metadata configuration detected, aborting...") | 635 | # bb.msg.fatal(bb.msg.domain.RunQueue, "Corrupted metadata configuration detected, aborting...") |
399 | 636 | ||
400 | |||
401 | # Make a weight sorted map | ||
402 | from copy import deepcopy | ||
403 | |||
404 | sortweight = deepcopy(self.runq_weight) | ||
405 | sortweight.sort() | ||
406 | copyweight = deepcopy(self.runq_weight) | ||
407 | self.prio_map = [] | ||
408 | |||
409 | for weight in sortweight: | ||
410 | idx = copyweight.index(weight) | ||
411 | self.prio_map.append(idx) | ||
412 | copyweight[idx] = -1 | ||
413 | self.prio_map.reverse() | ||
414 | |||
415 | #self.dump_data(taskData) | 637 | #self.dump_data(taskData) |
416 | 638 | ||
417 | def execute_runqueue(self): | 639 | def execute_runqueue(self): |
@@ -483,18 +705,6 @@ class RunQueue: | |||
483 | taskname = self.runq_task[revdep] | 705 | taskname = self.runq_task[revdep] |
484 | bb.msg.debug(1, bb.msg.domain.RunQueue, "Marking task %s (%s, %s) as buildable" % (revdep, fn, taskname)) | 706 | bb.msg.debug(1, bb.msg.domain.RunQueue, "Marking task %s (%s, %s) as buildable" % (revdep, fn, taskname)) |
485 | 707 | ||
486 | def get_next_task(self): | ||
487 | """ | ||
488 | Return the id of the highest priority task that is buildable | ||
489 | """ | ||
490 | for task1 in range(len(self.runq_fnid)): | ||
491 | task = self.prio_map[task1] | ||
492 | if self.runq_running[task] == 1: | ||
493 | continue | ||
494 | if self.runq_buildable[task] == 1: | ||
495 | return task | ||
496 | return None | ||
497 | |||
498 | def execute_runqueue_internal(self): | 708 | def execute_runqueue_internal(self): |
499 | """ | 709 | """ |
500 | Run the tasks in a queue prepared by prepare_runqueue | 710 | Run the tasks in a queue prepared by prepare_runqueue |
@@ -511,20 +721,21 @@ class RunQueue: | |||
511 | def sigint_handler(signum, frame): | 721 | def sigint_handler(signum, frame): |
512 | raise KeyboardInterrupt | 722 | raise KeyboardInterrupt |
513 | 723 | ||
724 | # RP - this code allows tasks to run out of the correct order - disabled, FIXME | ||
514 | # Find any tasks with current stamps and remove them from the queue | 725 | # Find any tasks with current stamps and remove them from the queue |
515 | for task1 in range(len(self.runq_fnid)): | 726 | #for task1 in range(len(self.runq_fnid)): |
516 | task = self.prio_map[task1] | 727 | # task = self.prio_map[task1] |
517 | fn = self.taskData.fn_index[self.runq_fnid[task]] | 728 | # fn = self.taskData.fn_index[self.runq_fnid[task]] |
518 | taskname = self.runq_task[task] | 729 | # taskname = self.runq_task[task] |
519 | if bb.build.stamp_is_current(taskname, self.dataCache, fn): | 730 | # if bb.build.stamp_is_current(taskname, self.dataCache, fn): |
520 | bb.msg.debug(2, bb.msg.domain.RunQueue, "Stamp current task %s (%s)" % (task, self.get_user_idstring(task))) | 731 | # bb.msg.debug(2, bb.msg.domain.RunQueue, "Stamp current task %s (%s)" % (task, self.get_user_idstring(task))) |
521 | self.runq_running[task] = 1 | 732 | # self.runq_running[task] = 1 |
522 | self.task_complete(task) | 733 | # self.task_complete(task) |
523 | self.stats.taskCompleted() | 734 | # self.stats.taskCompleted() |
524 | self.stats.taskSkipped() | 735 | # self.stats.taskSkipped() |
525 | 736 | ||
526 | while True: | 737 | while True: |
527 | task = self.get_next_task() | 738 | task = self.sched.next() |
528 | if task is not None: | 739 | if task is not None: |
529 | fn = self.taskData.fn_index[self.runq_fnid[task]] | 740 | fn = self.taskData.fn_index[self.runq_fnid[task]] |
530 | 741 | ||
diff --git a/bitbake/lib/bb/shell.py b/bitbake/lib/bb/shell.py index fc213c3f4a..745091fb7d 100644 --- a/bitbake/lib/bb/shell.py +++ b/bitbake/lib/bb/shell.py | |||
@@ -68,7 +68,6 @@ leave_mainloop = False | |||
68 | last_exception = None | 68 | last_exception = None |
69 | cooker = None | 69 | cooker = None |
70 | parsed = False | 70 | parsed = False |
71 | initdata = None | ||
72 | debug = os.environ.get( "BBSHELL_DEBUG", "" ) | 71 | debug = os.environ.get( "BBSHELL_DEBUG", "" ) |
73 | 72 | ||
74 | ########################################################################## | 73 | ########################################################################## |
@@ -156,11 +155,14 @@ class BitBakeShellCommands: | |||
156 | cooker.configuration.cmd = cmd | 155 | cooker.configuration.cmd = cmd |
157 | 156 | ||
158 | td = taskdata.TaskData(cooker.configuration.abort) | 157 | td = taskdata.TaskData(cooker.configuration.abort) |
158 | localdata = data.createCopy(cooker.configuration.data) | ||
159 | data.update_data(localdata) | ||
160 | data.expandKeys(localdata) | ||
159 | 161 | ||
160 | try: | 162 | try: |
161 | tasks = [] | 163 | tasks = [] |
162 | for name in names: | 164 | for name in names: |
163 | td.add_provider(cooker.configuration.data, cooker.status, name) | 165 | td.add_provider(localdata, cooker.status, name) |
164 | providers = td.get_provider(name) | 166 | providers = td.get_provider(name) |
165 | 167 | ||
166 | if len(providers) == 0: | 168 | if len(providers) == 0: |
@@ -168,9 +170,9 @@ class BitBakeShellCommands: | |||
168 | 170 | ||
169 | tasks.append([name, "do_%s" % cooker.configuration.cmd]) | 171 | tasks.append([name, "do_%s" % cooker.configuration.cmd]) |
170 | 172 | ||
171 | td.add_unresolved(cooker.configuration.data, cooker.status) | 173 | td.add_unresolved(localdata, cooker.status) |
172 | 174 | ||
173 | rq = runqueue.RunQueue(cooker, cooker.configuration.data, cooker.status, td, tasks) | 175 | rq = runqueue.RunQueue(cooker, localdata, cooker.status, td, tasks) |
174 | rq.prepare_runqueue() | 176 | rq.prepare_runqueue() |
175 | rq.execute_runqueue() | 177 | rq.execute_runqueue() |
176 | 178 | ||
@@ -217,8 +219,8 @@ class BitBakeShellCommands: | |||
217 | edit.usage = "<providee>" | 219 | edit.usage = "<providee>" |
218 | 220 | ||
219 | def environment( self, params ): | 221 | def environment( self, params ): |
220 | """Dump out the outer BitBake environment (see bbread)""" | 222 | """Dump out the outer BitBake environment""" |
221 | data.emit_env(sys.__stdout__, cooker.configuration.data, True) | 223 | cooker.showEnvironment() |
222 | 224 | ||
223 | def exit_( self, params ): | 225 | def exit_( self, params ): |
224 | """Leave the BitBake Shell""" | 226 | """Leave the BitBake Shell""" |
@@ -241,11 +243,10 @@ class BitBakeShellCommands: | |||
241 | oldcmd = cooker.configuration.cmd | 243 | oldcmd = cooker.configuration.cmd |
242 | cooker.configuration.cmd = cmd | 244 | cooker.configuration.cmd = cmd |
243 | 245 | ||
244 | thisdata = copy.deepcopy( initdata ) | 246 | thisdata = data.createCopy(cooker.configuration.data) |
245 | # Caution: parse.handle modifies thisdata, hence it would | 247 | data.update_data(thisdata) |
246 | # lead to pollution cooker.configuration.data, which is | 248 | data.expandKeys(thisdata) |
247 | # why we use it on a safe copy we obtained from cooker right after | 249 | |
248 | # parsing the initial *.conf files | ||
249 | try: | 250 | try: |
250 | bbfile_data = parse.handle( bf, thisdata ) | 251 | bbfile_data = parse.handle( bf, thisdata ) |
251 | except parse.ParseError: | 252 | except parse.ParseError: |
@@ -393,6 +394,11 @@ SRC_URI = "" | |||
393 | os.system( "%s %s/%s" % ( os.environ.get( "EDITOR" ), fulldirname, filename ) ) | 394 | os.system( "%s %s/%s" % ( os.environ.get( "EDITOR" ), fulldirname, filename ) ) |
394 | new.usage = "<directory> <filename>" | 395 | new.usage = "<directory> <filename>" |
395 | 396 | ||
397 | def package( self, params ): | ||
398 | """Execute 'package' on a providee""" | ||
399 | self.build( params, "package" ) | ||
400 | package.usage = "<providee>" | ||
401 | |||
396 | def pasteBin( self, params ): | 402 | def pasteBin( self, params ): |
397 | """Send a command + output buffer to the pastebin at http://rafb.net/paste""" | 403 | """Send a command + output buffer to the pastebin at http://rafb.net/paste""" |
398 | index = params[0] | 404 | index = params[0] |
@@ -501,8 +507,8 @@ SRC_URI = "" | |||
501 | interpreter.interact( "SHELL: Expert Mode - BitBake Python %s\nType 'help' for more information, press CTRL-D to switch back to BBSHELL." % sys.version ) | 507 | interpreter.interact( "SHELL: Expert Mode - BitBake Python %s\nType 'help' for more information, press CTRL-D to switch back to BBSHELL." % sys.version ) |
502 | 508 | ||
503 | def showdata( self, params ): | 509 | def showdata( self, params ): |
504 | """Execute 'showdata' on a providee""" | 510 | """Show the parsed metadata for a given providee""" |
505 | self.build( params, "showdata" ) | 511 | cooker.showEnvironment(None, params) |
506 | showdata.usage = "<providee>" | 512 | showdata.usage = "<providee>" |
507 | 513 | ||
508 | def setVar( self, params ): | 514 | def setVar( self, params ): |
@@ -732,10 +738,6 @@ class BitBakeShell: | |||
732 | 738 | ||
733 | print __credits__ | 739 | print __credits__ |
734 | 740 | ||
735 | # save initial cooker configuration (will be reused in file*** commands) | ||
736 | global initdata | ||
737 | initdata = copy.deepcopy( cooker.configuration.data ) | ||
738 | |||
739 | def cleanup( self ): | 741 | def cleanup( self ): |
740 | """Write readline history and clean up resources""" | 742 | """Write readline history and clean up resources""" |
741 | debugOut( "writing command history" ) | 743 | debugOut( "writing command history" ) |