diff options
author | Adrian Dudau <adrian.dudau@enea.com> | 2014-06-26 14:36:22 +0200 |
---|---|---|
committer | Adrian Dudau <adrian.dudau@enea.com> | 2014-06-26 15:32:53 +0200 |
commit | f4cf9fe05bb3f32fabea4e54dd92d368967a80da (patch) | |
tree | 487180fa9866985ea7b28e625651765d86f515c3 /scripts/lib | |
download | poky-f4cf9fe05bb3f32fabea4e54dd92d368967a80da.tar.gz |
initial commit for Enea Linux 4.0
Migrated from the internal git server on the daisy-enea branch
Signed-off-by: Adrian Dudau <adrian.dudau@enea.com>
Diffstat (limited to 'scripts/lib')
291 files changed, 39563 insertions, 0 deletions
diff --git a/scripts/lib/bsp/__init__.py b/scripts/lib/bsp/__init__.py new file mode 100644 index 0000000000..8bbb6e1530 --- /dev/null +++ b/scripts/lib/bsp/__init__.py | |||
@@ -0,0 +1,22 @@ | |||
1 | # | ||
2 | # Yocto BSP tools library | ||
3 | # | ||
4 | # Copyright (c) 2012, Intel Corporation. | ||
5 | # All rights reserved. | ||
6 | # | ||
7 | # This program is free software; you can redistribute it and/or modify | ||
8 | # it under the terms of the GNU General Public License version 2 as | ||
9 | # published by the Free Software Foundation. | ||
10 | # | ||
11 | # This program is distributed in the hope that it will be useful, | ||
12 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | # GNU General Public License for more details. | ||
15 | # | ||
16 | # You should have received a copy of the GNU General Public License along | ||
17 | # with this program; if not, write to the Free Software Foundation, Inc., | ||
18 | # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. | ||
19 | # | ||
20 | # AUTHORS | ||
21 | # Tom Zanussi <tom.zanussi (at] intel.com> | ||
22 | # | ||
diff --git a/scripts/lib/bsp/engine.py b/scripts/lib/bsp/engine.py new file mode 100644 index 0000000000..681720d20a --- /dev/null +++ b/scripts/lib/bsp/engine.py | |||
@@ -0,0 +1,1780 @@ | |||
1 | # ex:ts=4:sw=4:sts=4:et | ||
2 | # -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- | ||
3 | # | ||
4 | # Copyright (c) 2012, Intel Corporation. | ||
5 | # All rights reserved. | ||
6 | # | ||
7 | # This program is free software; you can redistribute it and/or modify | ||
8 | # it under the terms of the GNU General Public License version 2 as | ||
9 | # published by the Free Software Foundation. | ||
10 | # | ||
11 | # This program is distributed in the hope that it will be useful, | ||
12 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | # GNU General Public License for more details. | ||
15 | # | ||
16 | # You should have received a copy of the GNU General Public License along | ||
17 | # with this program; if not, write to the Free Software Foundation, Inc., | ||
18 | # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. | ||
19 | # | ||
20 | # DESCRIPTION | ||
21 | # This module implements the templating engine used by 'yocto-bsp' to | ||
22 | # create BSPs. The BSP templates are simply the set of files expected | ||
23 | # to appear in a generated BSP, marked up with a small set of tags | ||
24 | # used to customize the output. The engine parses through the | ||
25 | # templates and generates a Python program containing all the logic | ||
26 | # and input elements needed to display and retrieve BSP-specific | ||
27 | # information from the user. The resulting program uses those results | ||
28 | # to generate the final BSP files. | ||
29 | # | ||
30 | # AUTHORS | ||
31 | # Tom Zanussi <tom.zanussi (at] intel.com> | ||
32 | # | ||
33 | |||
34 | import os | ||
35 | import sys | ||
36 | from abc import ABCMeta, abstractmethod | ||
37 | from tags import * | ||
38 | import shlex | ||
39 | import json | ||
40 | import subprocess | ||
41 | import shutil | ||
42 | |||
43 | class Line(): | ||
44 | """ | ||
45 | Generic (abstract) container representing a line that will appear | ||
46 | in the BSP-generating program. | ||
47 | """ | ||
48 | __metaclass__ = ABCMeta | ||
49 | |||
50 | def __init__(self, line): | ||
51 | self.line = line | ||
52 | self.generated_line = "" | ||
53 | self.prio = sys.maxint | ||
54 | self.discard = False | ||
55 | |||
56 | @abstractmethod | ||
57 | def gen(self, context = None): | ||
58 | """ | ||
59 | Generate the final executable line that will appear in the | ||
60 | BSP-generation program. | ||
61 | """ | ||
62 | pass | ||
63 | |||
64 | def escape(self, line): | ||
65 | """ | ||
66 | Escape single and double quotes and backslashes until I find | ||
67 | something better (re.escape() escapes way too much). | ||
68 | """ | ||
69 | return line.replace("\\", "\\\\").replace("\"", "\\\"").replace("'", "\\'") | ||
70 | |||
71 | def parse_error(self, msg, lineno, line): | ||
72 | raise SyntaxError("%s: %s" % (msg, line)) | ||
73 | |||
74 | |||
75 | class NormalLine(Line): | ||
76 | """ | ||
77 | Container for normal (non-tag) lines. | ||
78 | """ | ||
79 | def __init__(self, line): | ||
80 | Line.__init__(self, line) | ||
81 | self.is_filename = False | ||
82 | self.is_dirname = False | ||
83 | self.out_filebase = None | ||
84 | |||
85 | def gen(self, context = None): | ||
86 | if self.is_filename: | ||
87 | line = "current_file = \"" + os.path.join(self.out_filebase, self.escape(self.line)) + "\"; of = open(current_file, \"w\")" | ||
88 | elif self.is_dirname: | ||
89 | dirname = os.path.join(self.out_filebase, self.escape(self.line)) | ||
90 | line = "if not os.path.exists(\"" + dirname + "\"): os.mkdir(\"" + dirname + "\")" | ||
91 | else: | ||
92 | line = "of.write(\"" + self.escape(self.line) + "\\n\")" | ||
93 | return line | ||
94 | |||
95 | |||
96 | class CodeLine(Line): | ||
97 | """ | ||
98 | Container for Python code tag lines. | ||
99 | """ | ||
100 | def __init__(self, line): | ||
101 | Line.__init__(self, line) | ||
102 | |||
103 | def gen(self, context = None): | ||
104 | return self.line | ||
105 | |||
106 | |||
107 | class Assignment: | ||
108 | """ | ||
109 | Representation of everything we know about {{=name }} tags. | ||
110 | Instances of these are used by Assignment lines. | ||
111 | """ | ||
112 | def __init__(self, start, end, name): | ||
113 | self.start = start | ||
114 | self.end = end | ||
115 | self.name = name | ||
116 | |||
117 | |||
118 | class AssignmentLine(NormalLine): | ||
119 | """ | ||
120 | Container for normal lines containing assignment tags. Assignment | ||
121 | tags must be in ascending order of 'start' value. | ||
122 | """ | ||
123 | def __init__(self, line): | ||
124 | NormalLine.__init__(self, line) | ||
125 | self.assignments = [] | ||
126 | |||
127 | def add_assignment(self, start, end, name): | ||
128 | self.assignments.append(Assignment(start, end, name)) | ||
129 | |||
130 | def gen(self, context = None): | ||
131 | line = self.escape(self.line) | ||
132 | |||
133 | for assignment in self.assignments: | ||
134 | replacement = "\" + " + assignment.name + " + \"" | ||
135 | idx = line.find(ASSIGN_TAG) | ||
136 | line = line[:idx] + replacement + line[idx + assignment.end - assignment.start:] | ||
137 | if self.is_filename: | ||
138 | return "current_file = \"" + os.path.join(self.out_filebase, line) + "\"; of = open(current_file, \"w\")" | ||
139 | elif self.is_dirname: | ||
140 | dirname = os.path.join(self.out_filebase, line) | ||
141 | return "if not os.path.exists(\"" + dirname + "\"): os.mkdir(\"" + dirname + "\")" | ||
142 | else: | ||
143 | return "of.write(\"" + line + "\\n\")" | ||
144 | |||
145 | |||
146 | class InputLine(Line): | ||
147 | """ | ||
148 | Base class for Input lines. | ||
149 | """ | ||
150 | def __init__(self, props, tag, lineno): | ||
151 | Line.__init__(self, tag) | ||
152 | self.props = props | ||
153 | self.lineno = lineno | ||
154 | |||
155 | try: | ||
156 | self.prio = int(props["prio"]) | ||
157 | except KeyError: | ||
158 | self.prio = sys.maxint | ||
159 | |||
160 | def gen(self, context = None): | ||
161 | try: | ||
162 | depends_on = self.props["depends-on"] | ||
163 | try: | ||
164 | depends_on_val = self.props["depends-on-val"] | ||
165 | except KeyError: | ||
166 | self.parse_error("No 'depends-on-val' for 'depends-on' property", | ||
167 | self.lineno, self.line) | ||
168 | except KeyError: | ||
169 | pass | ||
170 | |||
171 | |||
172 | class EditBoxInputLine(InputLine): | ||
173 | """ | ||
174 | Base class for 'editbox' Input lines. | ||
175 | |||
176 | props: | ||
177 | name: example - "Load address" | ||
178 | msg: example - "Please enter the load address" | ||
179 | result: | ||
180 | Sets the value of the variable specified by 'name' to | ||
181 | whatever the user typed. | ||
182 | """ | ||
183 | def __init__(self, props, tag, lineno): | ||
184 | InputLine.__init__(self, props, tag, lineno) | ||
185 | |||
186 | def gen(self, context = None): | ||
187 | InputLine.gen(self, context) | ||
188 | name = self.props["name"] | ||
189 | if not name: | ||
190 | self.parse_error("No input 'name' property found", | ||
191 | self.lineno, self.line) | ||
192 | msg = self.props["msg"] | ||
193 | if not msg: | ||
194 | self.parse_error("No input 'msg' property found", | ||
195 | self.lineno, self.line) | ||
196 | |||
197 | try: | ||
198 | default_choice = self.props["default"] | ||
199 | except KeyError: | ||
200 | default_choice = "" | ||
201 | |||
202 | msg += " [default: " + default_choice + "]" | ||
203 | |||
204 | line = name + " = default(raw_input(\"" + msg + " \"), " + name + ")" | ||
205 | |||
206 | return line | ||
207 | |||
208 | |||
209 | class GitRepoEditBoxInputLine(EditBoxInputLine): | ||
210 | """ | ||
211 | Base class for 'editbox' Input lines for user input of remote git | ||
212 | repos. This class verifies the existence and connectivity of the | ||
213 | specified git repo. | ||
214 | |||
215 | props: | ||
216 | name: example - "Load address" | ||
217 | msg: example - "Please enter the load address" | ||
218 | result: | ||
219 | Sets the value of the variable specified by 'name' to | ||
220 | whatever the user typed. | ||
221 | """ | ||
222 | def __init__(self, props, tag, lineno): | ||
223 | EditBoxInputLine.__init__(self, props, tag, lineno) | ||
224 | |||
225 | def gen(self, context = None): | ||
226 | EditBoxInputLine.gen(self, context) | ||
227 | name = self.props["name"] | ||
228 | if not name: | ||
229 | self.parse_error("No input 'name' property found", | ||
230 | self.lineno, self.line) | ||
231 | msg = self.props["msg"] | ||
232 | if not msg: | ||
233 | self.parse_error("No input 'msg' property found", | ||
234 | self.lineno, self.line) | ||
235 | |||
236 | try: | ||
237 | default_choice = self.props["default"] | ||
238 | except KeyError: | ||
239 | default_choice = "" | ||
240 | |||
241 | msg += " [default: " + default_choice + "]" | ||
242 | |||
243 | line = name + " = get_verified_git_repo(\"" + msg + "\"," + name + ")" | ||
244 | |||
245 | return line | ||
246 | |||
247 | |||
248 | class FileEditBoxInputLine(EditBoxInputLine): | ||
249 | """ | ||
250 | Base class for 'editbox' Input lines for user input of existing | ||
251 | files. This class verifies the existence of the specified file. | ||
252 | |||
253 | props: | ||
254 | name: example - "Load address" | ||
255 | msg: example - "Please enter the load address" | ||
256 | result: | ||
257 | Sets the value of the variable specified by 'name' to | ||
258 | whatever the user typed. | ||
259 | """ | ||
260 | def __init__(self, props, tag, lineno): | ||
261 | EditBoxInputLine.__init__(self, props, tag, lineno) | ||
262 | |||
263 | def gen(self, context = None): | ||
264 | EditBoxInputLine.gen(self, context) | ||
265 | name = self.props["name"] | ||
266 | if not name: | ||
267 | self.parse_error("No input 'name' property found", | ||
268 | self.lineno, self.line) | ||
269 | msg = self.props["msg"] | ||
270 | if not msg: | ||
271 | self.parse_error("No input 'msg' property found", | ||
272 | self.lineno, self.line) | ||
273 | |||
274 | try: | ||
275 | default_choice = self.props["default"] | ||
276 | except KeyError: | ||
277 | default_choice = "" | ||
278 | |||
279 | msg += " [default: " + default_choice + "]" | ||
280 | |||
281 | line = name + " = get_verified_file(\"" + msg + "\"," + name + ", True)" | ||
282 | |||
283 | return line | ||
284 | |||
285 | |||
286 | class BooleanInputLine(InputLine): | ||
287 | """ | ||
288 | Base class for boolean Input lines. | ||
289 | props: | ||
290 | name: example - "keyboard" | ||
291 | msg: example - "Got keyboard?" | ||
292 | result: | ||
293 | Sets the value of the variable specified by 'name' to "yes" or "no" | ||
294 | example - keyboard = "yes" | ||
295 | """ | ||
296 | def __init__(self, props, tag, lineno): | ||
297 | InputLine.__init__(self, props, tag, lineno) | ||
298 | |||
299 | def gen(self, context = None): | ||
300 | InputLine.gen(self, context) | ||
301 | name = self.props["name"] | ||
302 | if not name: | ||
303 | self.parse_error("No input 'name' property found", | ||
304 | self.lineno, self.line) | ||
305 | msg = self.props["msg"] | ||
306 | if not msg: | ||
307 | self.parse_error("No input 'msg' property found", | ||
308 | self.lineno, self.line) | ||
309 | |||
310 | try: | ||
311 | default_choice = self.props["default"] | ||
312 | except KeyError: | ||
313 | default_choice = "" | ||
314 | |||
315 | msg += " [default: " + default_choice + "]" | ||
316 | |||
317 | line = name + " = boolean(raw_input(\"" + msg + " \"), " + name + ")" | ||
318 | |||
319 | return line | ||
320 | |||
321 | |||
322 | class ListInputLine(InputLine): | ||
323 | """ | ||
324 | Base class for List-based Input lines. e.g. Choicelist, Checklist. | ||
325 | """ | ||
326 | __metaclass__ = ABCMeta | ||
327 | |||
328 | def __init__(self, props, tag, lineno): | ||
329 | InputLine.__init__(self, props, tag, lineno) | ||
330 | self.choices = [] | ||
331 | |||
332 | def gen_choicepair_list(self): | ||
333 | """Generate a list of 2-item val:desc lists from self.choices.""" | ||
334 | if not self.choices: | ||
335 | return None | ||
336 | |||
337 | choicepair_list = list() | ||
338 | |||
339 | for choice in self.choices: | ||
340 | choicepair = [] | ||
341 | choicepair.append(choice.val) | ||
342 | choicepair.append(choice.desc) | ||
343 | choicepair_list.append(choicepair) | ||
344 | |||
345 | return choicepair_list | ||
346 | |||
347 | def gen_degenerate_choicepair_list(self, choices): | ||
348 | """Generate a list of 2-item val:desc with val=desc from passed-in choices.""" | ||
349 | choicepair_list = list() | ||
350 | |||
351 | for choice in choices: | ||
352 | choicepair = [] | ||
353 | choicepair.append(choice) | ||
354 | choicepair.append(choice) | ||
355 | choicepair_list.append(choicepair) | ||
356 | |||
357 | return choicepair_list | ||
358 | |||
359 | def exec_listgen_fn(self, context = None): | ||
360 | """ | ||
361 | Execute the list-generating function contained as a string in | ||
362 | the "gen" property. | ||
363 | """ | ||
364 | retval = None | ||
365 | try: | ||
366 | fname = self.props["gen"] | ||
367 | modsplit = fname.split('.') | ||
368 | mod_fn = modsplit.pop() | ||
369 | mod = '.'.join(modsplit) | ||
370 | |||
371 | __import__(mod) | ||
372 | # python 2.7 has a better way to do this using importlib.import_module | ||
373 | m = sys.modules[mod] | ||
374 | |||
375 | fn = getattr(m, mod_fn) | ||
376 | if not fn: | ||
377 | self.parse_error("couldn't load function specified for 'gen' property ", | ||
378 | self.lineno, self.line) | ||
379 | retval = fn(context) | ||
380 | if not retval: | ||
381 | self.parse_error("function specified for 'gen' property returned nothing ", | ||
382 | self.lineno, self.line) | ||
383 | except KeyError: | ||
384 | pass | ||
385 | |||
386 | return retval | ||
387 | |||
388 | def gen_choices_str(self, choicepairs): | ||
389 | """ | ||
390 | Generate a numbered list of choices from a list of choicepairs | ||
391 | for display to the user. | ||
392 | """ | ||
393 | choices_str = "" | ||
394 | |||
395 | for i, choicepair in enumerate(choicepairs): | ||
396 | choices_str += "\t" + str(i + 1) + ") " + choicepair[1] + "\n" | ||
397 | |||
398 | return choices_str | ||
399 | |||
400 | def gen_choices_val_str(self, choicepairs): | ||
401 | """ | ||
402 | Generate an array of choice values corresponding to the | ||
403 | numbered list generated by gen_choices_str(). | ||
404 | """ | ||
405 | choices_val_list = "[" | ||
406 | |||
407 | for i, choicepair in enumerate(choicepairs): | ||
408 | choices_val_list += "\"" + choicepair[0] + "\"," | ||
409 | choices_val_list += "]" | ||
410 | |||
411 | return choices_val_list | ||
412 | |||
413 | def gen_choices_val_list(self, choicepairs): | ||
414 | """ | ||
415 | Generate an array of choice values corresponding to the | ||
416 | numbered list generated by gen_choices_str(). | ||
417 | """ | ||
418 | choices_val_list = [] | ||
419 | |||
420 | for i, choicepair in enumerate(choicepairs): | ||
421 | choices_val_list.append(choicepair[0]) | ||
422 | |||
423 | return choices_val_list | ||
424 | |||
425 | def gen_choices_list(self, context = None, checklist = False): | ||
426 | """ | ||
427 | Generate an array of choice values corresponding to the | ||
428 | numbered list generated by gen_choices_str(). | ||
429 | """ | ||
430 | choices = self.exec_listgen_fn(context) | ||
431 | if choices: | ||
432 | if len(choices) == 0: | ||
433 | self.parse_error("No entries available for input list", | ||
434 | self.lineno, self.line) | ||
435 | choicepairs = self.gen_degenerate_choicepair_list(choices) | ||
436 | else: | ||
437 | if len(self.choices) == 0: | ||
438 | self.parse_error("No entries available for input list", | ||
439 | self.lineno, self.line) | ||
440 | choicepairs = self.gen_choicepair_list() | ||
441 | |||
442 | return choicepairs | ||
443 | |||
444 | def gen_choices(self, context = None, checklist = False): | ||
445 | """ | ||
446 | Generate an array of choice values corresponding to the | ||
447 | numbered list generated by gen_choices_str(), display it to | ||
448 | the user, and process the result. | ||
449 | """ | ||
450 | msg = self.props["msg"] | ||
451 | if not msg: | ||
452 | self.parse_error("No input 'msg' property found", | ||
453 | self.lineno, self.line) | ||
454 | |||
455 | try: | ||
456 | default_choice = self.props["default"] | ||
457 | except KeyError: | ||
458 | default_choice = "" | ||
459 | |||
460 | msg += " [default: " + default_choice + "]" | ||
461 | |||
462 | choicepairs = self.gen_choices_list(context, checklist) | ||
463 | |||
464 | choices_str = self.gen_choices_str(choicepairs) | ||
465 | choices_val_list = self.gen_choices_val_list(choicepairs) | ||
466 | if checklist: | ||
467 | choiceval = default(find_choicevals(raw_input(msg + "\n" + choices_str), choices_val_list), default_choice) | ||
468 | else: | ||
469 | choiceval = default(find_choiceval(raw_input(msg + "\n" + choices_str), choices_val_list), default_choice) | ||
470 | |||
471 | return choiceval | ||
472 | |||
473 | |||
474 | def find_choiceval(choice_str, choice_list): | ||
475 | """ | ||
476 | Take number as string and return val string from choice_list, | ||
477 | empty string if oob. choice_list is a simple python list. | ||
478 | """ | ||
479 | choice_val = "" | ||
480 | |||
481 | try: | ||
482 | choice_idx = int(choice_str) | ||
483 | if choice_idx <= len(choice_list): | ||
484 | choice_idx -= 1 | ||
485 | choice_val = choice_list[choice_idx] | ||
486 | except ValueError: | ||
487 | pass | ||
488 | |||
489 | return choice_val | ||
490 | |||
491 | |||
492 | def find_choicevals(choice_str, choice_list): | ||
493 | """ | ||
494 | Take numbers as space-separated string and return vals list from | ||
495 | choice_list, empty list if oob. choice_list is a simple python | ||
496 | list. | ||
497 | """ | ||
498 | choice_vals = [] | ||
499 | |||
500 | choices = choice_str.split() | ||
501 | for choice in choices: | ||
502 | choice_vals.append(find_choiceval(choice, choice_list)) | ||
503 | |||
504 | return choice_vals | ||
505 | |||
506 | |||
507 | def default(input_str, name): | ||
508 | """ | ||
509 | Return default if no input_str, otherwise stripped input_str. | ||
510 | """ | ||
511 | if not input_str: | ||
512 | return name | ||
513 | |||
514 | return input_str.strip() | ||
515 | |||
516 | |||
517 | def verify_git_repo(giturl): | ||
518 | """ | ||
519 | Verify that the giturl passed in can be connected to. This can be | ||
520 | used as a check for the existence of the given repo and/or basic | ||
521 | git remote connectivity. | ||
522 | |||
523 | Returns True if the connection was successful, fals otherwise | ||
524 | """ | ||
525 | if not giturl: | ||
526 | return False | ||
527 | |||
528 | gitcmd = "git ls-remote %s > /dev/null 2>&1" % (giturl) | ||
529 | rc = subprocess.call(gitcmd, shell=True) | ||
530 | if rc == 0: | ||
531 | return True | ||
532 | |||
533 | return False | ||
534 | |||
535 | |||
536 | def get_verified_git_repo(input_str, name): | ||
537 | """ | ||
538 | Return git repo if verified, otherwise loop forever asking user | ||
539 | for filename. | ||
540 | """ | ||
541 | msg = input_str.strip() + " " | ||
542 | |||
543 | giturl = default(raw_input(msg), name) | ||
544 | |||
545 | while True: | ||
546 | if verify_git_repo(giturl): | ||
547 | return giturl | ||
548 | giturl = default(raw_input(msg), name) | ||
549 | |||
550 | |||
551 | def get_verified_file(input_str, name, filename_can_be_null): | ||
552 | """ | ||
553 | Return filename if the file exists, otherwise loop forever asking | ||
554 | user for filename. | ||
555 | """ | ||
556 | msg = input_str.strip() + " " | ||
557 | |||
558 | filename = default(raw_input(msg), name) | ||
559 | |||
560 | while True: | ||
561 | if not filename and filename_can_be_null: | ||
562 | return filename | ||
563 | if os.path.isfile(filename): | ||
564 | return filename | ||
565 | filename = default(raw_input(msg), name) | ||
566 | |||
567 | |||
568 | def replace_file(replace_this, with_this): | ||
569 | """ | ||
570 | Replace the given file with the contents of filename, retaining | ||
571 | the original filename. | ||
572 | """ | ||
573 | try: | ||
574 | replace_this.close() | ||
575 | shutil.copy(with_this, replace_this.name) | ||
576 | except IOError: | ||
577 | pass | ||
578 | |||
579 | |||
580 | def boolean(input_str, name): | ||
581 | """ | ||
582 | Return lowercase version of first char in string, or value in name. | ||
583 | """ | ||
584 | if not input_str: | ||
585 | return name | ||
586 | |||
587 | str = input_str.lower().strip() | ||
588 | if str and str[0] == "y" or str[0] == "n": | ||
589 | return str[0] | ||
590 | else: | ||
591 | return name | ||
592 | |||
593 | |||
594 | def strip_base(input_str): | ||
595 | """ | ||
596 | strip '/base' off the end of input_str, so we can use 'base' in | ||
597 | the branch names we present to the user. | ||
598 | """ | ||
599 | if input_str and input_str.endswith("/base"): | ||
600 | return input_str[:-len("/base")] | ||
601 | return input_str.strip() | ||
602 | |||
603 | |||
604 | deferred_choices = {} | ||
605 | |||
606 | def gen_choices_defer(input_line, context, checklist = False): | ||
607 | """ | ||
608 | Save the context hashed the name of the input item, which will be | ||
609 | passed to the gen function later. | ||
610 | """ | ||
611 | name = input_line.props["name"] | ||
612 | |||
613 | try: | ||
614 | nameappend = input_line.props["nameappend"] | ||
615 | except KeyError: | ||
616 | nameappend = "" | ||
617 | |||
618 | try: | ||
619 | branches_base = input_line.props["branches_base"] | ||
620 | except KeyError: | ||
621 | branches_base = "" | ||
622 | |||
623 | filename = input_line.props["filename"] | ||
624 | |||
625 | closetag_start = filename.find(CLOSE_TAG) | ||
626 | |||
627 | if closetag_start != -1: | ||
628 | filename = filename[closetag_start + len(CLOSE_TAG):] | ||
629 | |||
630 | filename = filename.strip() | ||
631 | filename = os.path.splitext(filename)[0] | ||
632 | |||
633 | captured_context = capture_context(context) | ||
634 | context["filename"] = filename | ||
635 | captured_context["filename"] = filename | ||
636 | context["nameappend"] = nameappend | ||
637 | captured_context["nameappend"] = nameappend | ||
638 | context["branches_base"] = branches_base | ||
639 | captured_context["branches_base"] = branches_base | ||
640 | |||
641 | deferred_choice = (input_line, captured_context, checklist) | ||
642 | key = name + "_" + filename + "_" + nameappend | ||
643 | deferred_choices[key] = deferred_choice | ||
644 | |||
645 | |||
646 | def invoke_deferred_choices(name): | ||
647 | """ | ||
648 | Invoke the choice generation function using the context hashed by | ||
649 | 'name'. | ||
650 | """ | ||
651 | deferred_choice = deferred_choices[name] | ||
652 | input_line = deferred_choice[0] | ||
653 | context = deferred_choice[1] | ||
654 | checklist = deferred_choice[2] | ||
655 | |||
656 | context["name"] = name | ||
657 | |||
658 | choices = input_line.gen_choices(context, checklist) | ||
659 | |||
660 | return choices | ||
661 | |||
662 | |||
663 | class ChoicelistInputLine(ListInputLine): | ||
664 | """ | ||
665 | Base class for choicelist Input lines. | ||
666 | props: | ||
667 | name: example - "xserver_choice" | ||
668 | msg: example - "Please select an xserver for this machine" | ||
669 | result: | ||
670 | Sets the value of the variable specified by 'name' to whichever Choice was chosen | ||
671 | example - xserver_choice = "xserver_vesa" | ||
672 | """ | ||
673 | def __init__(self, props, tag, lineno): | ||
674 | ListInputLine.__init__(self, props, tag, lineno) | ||
675 | |||
676 | def gen(self, context = None): | ||
677 | InputLine.gen(self, context) | ||
678 | |||
679 | gen_choices_defer(self, context) | ||
680 | name = self.props["name"] | ||
681 | nameappend = context["nameappend"] | ||
682 | filename = context["filename"] | ||
683 | |||
684 | try: | ||
685 | default_choice = self.props["default"] | ||
686 | except KeyError: | ||
687 | default_choice = "" | ||
688 | |||
689 | line = name + " = default(invoke_deferred_choices(\"" + name + "_" + filename + "_" + nameappend + "\"), \"" + default_choice + "\")" | ||
690 | |||
691 | return line | ||
692 | |||
693 | |||
694 | class ListValInputLine(InputLine): | ||
695 | """ | ||
696 | Abstract base class for choice and checkbox Input lines. | ||
697 | """ | ||
698 | def __init__(self, props, tag, lineno): | ||
699 | InputLine.__init__(self, props, tag, lineno) | ||
700 | |||
701 | try: | ||
702 | self.val = self.props["val"] | ||
703 | except KeyError: | ||
704 | self.parse_error("No input 'val' property found", self.lineno, self.line) | ||
705 | |||
706 | try: | ||
707 | self.desc = self.props["msg"] | ||
708 | except KeyError: | ||
709 | self.parse_error("No input 'msg' property found", self.lineno, self.line) | ||
710 | |||
711 | |||
712 | class ChoiceInputLine(ListValInputLine): | ||
713 | """ | ||
714 | Base class for choicelist item Input lines. | ||
715 | """ | ||
716 | def __init__(self, props, tag, lineno): | ||
717 | ListValInputLine.__init__(self, props, tag, lineno) | ||
718 | |||
719 | def gen(self, context = None): | ||
720 | return None | ||
721 | |||
722 | |||
723 | class ChecklistInputLine(ListInputLine): | ||
724 | """ | ||
725 | Base class for checklist Input lines. | ||
726 | """ | ||
727 | def __init__(self, props, tag, lineno): | ||
728 | ListInputLine.__init__(self, props, tag, lineno) | ||
729 | |||
730 | def gen(self, context = None): | ||
731 | InputLine.gen(self, context) | ||
732 | |||
733 | gen_choices_defer(self, context, True) | ||
734 | name = self.props["name"] | ||
735 | nameappend = context["nameappend"] | ||
736 | filename = context["filename"] | ||
737 | |||
738 | try: | ||
739 | default_choice = self.props["default"] | ||
740 | except KeyError: | ||
741 | default_choice = "" | ||
742 | |||
743 | line = name + " = default(invoke_deferred_choices(\"" + name + "_" + filename + "_" + nameappend + "\"), \"" + default_choice + "\")" | ||
744 | |||
745 | return line | ||
746 | |||
747 | |||
748 | class CheckInputLine(ListValInputLine): | ||
749 | """ | ||
750 | Base class for checklist item Input lines. | ||
751 | """ | ||
752 | def __init__(self, props, tag, lineno): | ||
753 | ListValInputLine.__init__(self, props, tag, lineno) | ||
754 | |||
755 | def gen(self, context = None): | ||
756 | return None | ||
757 | |||
758 | |||
759 | class SubstrateBase(object): | ||
760 | """ | ||
761 | Base class for both expanded and unexpanded file and dir container | ||
762 | objects. | ||
763 | """ | ||
764 | def __init__(self, filename, filebase, out_filebase): | ||
765 | self.filename = filename | ||
766 | self.filebase = filebase | ||
767 | self.out_filebase = out_filebase | ||
768 | self.raw_lines = [] | ||
769 | self.expanded_lines = [] | ||
770 | self.prev_choicelist = None | ||
771 | |||
772 | def parse_error(self, msg, lineno, line): | ||
773 | raise SyntaxError("%s: [%s: %d]: %s" % (msg, self.filename, lineno, line)) | ||
774 | |||
775 | def expand_input_tag(self, tag, lineno): | ||
776 | """ | ||
777 | Input tags consist of the word 'input' at the beginning, | ||
778 | followed by name:value property pairs which are converted into | ||
779 | a dictionary. | ||
780 | """ | ||
781 | propstr = tag[len(INPUT_TAG):] | ||
782 | |||
783 | props = dict(prop.split(":", 1) for prop in shlex.split(propstr)) | ||
784 | props["filename"] = self.filename | ||
785 | |||
786 | input_type = props[INPUT_TYPE_PROPERTY] | ||
787 | if not props[INPUT_TYPE_PROPERTY]: | ||
788 | self.parse_error("No input 'type' property found", lineno, tag) | ||
789 | |||
790 | if input_type == "boolean": | ||
791 | return BooleanInputLine(props, tag, lineno) | ||
792 | if input_type == "edit": | ||
793 | return EditBoxInputLine(props, tag, lineno) | ||
794 | if input_type == "edit-git-repo": | ||
795 | return GitRepoEditBoxInputLine(props, tag, lineno) | ||
796 | if input_type == "edit-file": | ||
797 | return FileEditBoxInputLine(props, tag, lineno) | ||
798 | elif input_type == "choicelist": | ||
799 | self.prev_choicelist = ChoicelistInputLine(props, tag, lineno) | ||
800 | return self.prev_choicelist | ||
801 | elif input_type == "choice": | ||
802 | if not self.prev_choicelist: | ||
803 | self.parse_error("Found 'choice' input tag but no previous choicelist", | ||
804 | lineno, tag) | ||
805 | choice = ChoiceInputLine(props, tag, lineno) | ||
806 | self.prev_choicelist.choices.append(choice) | ||
807 | return choice | ||
808 | elif input_type == "checklist": | ||
809 | return ChecklistInputLine(props, tag, lineno) | ||
810 | elif input_type == "check": | ||
811 | return CheckInputLine(props, tag, lineno) | ||
812 | |||
813 | def expand_assignment_tag(self, start, line, lineno): | ||
814 | """ | ||
815 | Expand all tags in a line. | ||
816 | """ | ||
817 | expanded_line = AssignmentLine(line.rstrip()) | ||
818 | |||
819 | while start != -1: | ||
820 | end = line.find(CLOSE_TAG, start) | ||
821 | if end == -1: | ||
822 | self.parse_error("No close tag found for assignment tag", lineno, line) | ||
823 | else: | ||
824 | name = line[start + len(ASSIGN_TAG):end].strip() | ||
825 | expanded_line.add_assignment(start, end + len(CLOSE_TAG), name) | ||
826 | start = line.find(ASSIGN_TAG, end) | ||
827 | |||
828 | return expanded_line | ||
829 | |||
830 | def expand_tag(self, line, lineno): | ||
831 | """ | ||
832 | Returns a processed tag line, or None if there was no tag | ||
833 | |||
834 | The rules for tags are very simple: | ||
835 | - No nested tags | ||
836 | - Tags start with {{ and end with }} | ||
837 | - An assign tag, {{=, can appear anywhere and will | ||
838 | be replaced with what the assignment evaluates to | ||
839 | - Any other tag occupies the whole line it is on | ||
840 | - if there's anything else on the tag line, it's an error | ||
841 | - if it starts with 'input', it's an input tag and | ||
842 | will only be used for prompting and setting variables | ||
843 | - anything else is straight Python | ||
844 | - tags are in effect only until the next blank line or tag or 'pass' tag | ||
845 | - we don't have indentation in tags, but we need some way to end a block | ||
846 | forcefully without blank lines or other tags - that's the 'pass' tag | ||
847 | - todo: implement pass tag | ||
848 | - directories and filenames can have tags as well, but only assignment | ||
849 | and 'if' code lines | ||
850 | - directories and filenames are the only case where normal tags can | ||
851 | coexist with normal text on the same 'line' | ||
852 | """ | ||
853 | start = line.find(ASSIGN_TAG) | ||
854 | if start != -1: | ||
855 | return self.expand_assignment_tag(start, line, lineno) | ||
856 | |||
857 | start = line.find(OPEN_TAG) | ||
858 | if start == -1: | ||
859 | return None | ||
860 | |||
861 | end = line.find(CLOSE_TAG, 0) | ||
862 | if end == -1: | ||
863 | self.parse_error("No close tag found for open tag", lineno, line) | ||
864 | |||
865 | tag = line[start + len(OPEN_TAG):end].strip() | ||
866 | |||
867 | if not tag.lstrip().startswith(INPUT_TAG): | ||
868 | return CodeLine(tag) | ||
869 | |||
870 | return self.expand_input_tag(tag, lineno) | ||
871 | |||
872 | def expand_file_or_dir_name(self): | ||
873 | """ | ||
874 | Expand file or dir names into codeline. Dirnames and | ||
875 | filenames can only have assignments or if statements. First | ||
876 | translate if statements into CodeLine + (dirname or filename | ||
877 | creation). | ||
878 | """ | ||
879 | lineno = 0 | ||
880 | |||
881 | line = self.filename[len(self.filebase):] | ||
882 | if line.startswith("/"): | ||
883 | line = line[1:] | ||
884 | opentag_start = -1 | ||
885 | |||
886 | start = line.find(OPEN_TAG) | ||
887 | while start != -1: | ||
888 | if not line[start:].startswith(ASSIGN_TAG): | ||
889 | opentag_start = start | ||
890 | break | ||
891 | start += len(ASSIGN_TAG) | ||
892 | start = line.find(OPEN_TAG, start) | ||
893 | |||
894 | if opentag_start != -1: | ||
895 | end = line.find(CLOSE_TAG, opentag_start) | ||
896 | if end == -1: | ||
897 | self.parse_error("No close tag found for open tag", lineno, line) | ||
898 | # we have a {{ tag i.e. code | ||
899 | tag = line[opentag_start + len(OPEN_TAG):end].strip() | ||
900 | |||
901 | if not tag.lstrip().startswith(IF_TAG): | ||
902 | self.parse_error("Only 'if' tags are allowed in file or directory names", | ||
903 | lineno, line) | ||
904 | self.expanded_lines.append(CodeLine(tag)) | ||
905 | |||
906 | # everything after }} is the actual filename (possibly with assignments) | ||
907 | # everything before is the pathname | ||
908 | line = line[:opentag_start] + line[end + len(CLOSE_TAG):].strip() | ||
909 | |||
910 | assign_start = line.find(ASSIGN_TAG) | ||
911 | if assign_start != -1: | ||
912 | assignment_tag = self.expand_assignment_tag(assign_start, line, lineno) | ||
913 | if isinstance(self, SubstrateFile): | ||
914 | assignment_tag.is_filename = True | ||
915 | assignment_tag.out_filebase = self.out_filebase | ||
916 | elif isinstance(self, SubstrateDir): | ||
917 | assignment_tag.is_dirname = True | ||
918 | assignment_tag.out_filebase = self.out_filebase | ||
919 | self.expanded_lines.append(assignment_tag) | ||
920 | return | ||
921 | |||
922 | normal_line = NormalLine(line) | ||
923 | if isinstance(self, SubstrateFile): | ||
924 | normal_line.is_filename = True | ||
925 | normal_line.out_filebase = self.out_filebase | ||
926 | elif isinstance(self, SubstrateDir): | ||
927 | normal_line.is_dirname = True | ||
928 | normal_line.out_filebase = self.out_filebase | ||
929 | self.expanded_lines.append(normal_line) | ||
930 | |||
931 | def expand(self): | ||
932 | """ | ||
933 | Expand the file or dir name first, eventually this ends up | ||
934 | creating the file or dir. | ||
935 | """ | ||
936 | self.expand_file_or_dir_name() | ||
937 | |||
938 | |||
939 | class SubstrateFile(SubstrateBase): | ||
940 | """ | ||
941 | Container for both expanded and unexpanded substrate files. | ||
942 | """ | ||
943 | def __init__(self, filename, filebase, out_filebase): | ||
944 | SubstrateBase.__init__(self, filename, filebase, out_filebase) | ||
945 | |||
946 | def read(self): | ||
947 | if self.raw_lines: | ||
948 | return | ||
949 | f = open(self.filename) | ||
950 | self.raw_lines = f.readlines() | ||
951 | |||
952 | def expand(self): | ||
953 | """Expand the contents of all template tags in the file.""" | ||
954 | SubstrateBase.expand(self) | ||
955 | self.read() | ||
956 | |||
957 | for lineno, line in enumerate(self.raw_lines): | ||
958 | expanded_line = self.expand_tag(line, lineno + 1) # humans not 0-based | ||
959 | if not expanded_line: | ||
960 | expanded_line = NormalLine(line.rstrip()) | ||
961 | self.expanded_lines.append(expanded_line) | ||
962 | |||
963 | def gen(self, context = None): | ||
964 | """Generate the code that generates the BSP.""" | ||
965 | base_indent = 0 | ||
966 | |||
967 | indent = new_indent = base_indent | ||
968 | |||
969 | for line in self.expanded_lines: | ||
970 | genline = line.gen(context) | ||
971 | if not genline: | ||
972 | continue | ||
973 | if isinstance(line, InputLine): | ||
974 | line.generated_line = genline | ||
975 | continue | ||
976 | if genline.startswith(OPEN_START): | ||
977 | if indent == 1: | ||
978 | base_indent = 1 | ||
979 | if indent: | ||
980 | if genline == BLANKLINE_STR or (not genline.startswith(NORMAL_START) | ||
981 | and not genline.startswith(OPEN_START)): | ||
982 | indent = new_indent = base_indent | ||
983 | if genline.endswith(":"): | ||
984 | new_indent = base_indent + 1 | ||
985 | line.generated_line = (indent * INDENT_STR) + genline | ||
986 | indent = new_indent | ||
987 | |||
988 | |||
989 | class SubstrateDir(SubstrateBase): | ||
990 | """ | ||
991 | Container for both expanded and unexpanded substrate dirs. | ||
992 | """ | ||
993 | def __init__(self, filename, filebase, out_filebase): | ||
994 | SubstrateBase.__init__(self, filename, filebase, out_filebase) | ||
995 | |||
996 | def expand(self): | ||
997 | SubstrateBase.expand(self) | ||
998 | |||
999 | def gen(self, context = None): | ||
1000 | """Generate the code that generates the BSP.""" | ||
1001 | indent = new_indent = 0 | ||
1002 | for line in self.expanded_lines: | ||
1003 | genline = line.gen(context) | ||
1004 | if not genline: | ||
1005 | continue | ||
1006 | if genline.endswith(":"): | ||
1007 | new_indent = 1 | ||
1008 | else: | ||
1009 | new_indent = 0 | ||
1010 | line.generated_line = (indent * INDENT_STR) + genline | ||
1011 | indent = new_indent | ||
1012 | |||
1013 | |||
1014 | def expand_target(target, all_files, out_filebase): | ||
1015 | """ | ||
1016 | Expand the contents of all template tags in the target. This | ||
1017 | means removing tags and categorizing or creating lines so that | ||
1018 | future passes can process and present input lines and generate the | ||
1019 | corresponding lines of the Python program that will be exec'ed to | ||
1020 | actually produce the final BSP. 'all_files' includes directories. | ||
1021 | """ | ||
1022 | for root, dirs, files in os.walk(target): | ||
1023 | for file in files: | ||
1024 | if file.endswith("~") or file.endswith("#"): | ||
1025 | continue | ||
1026 | f = os.path.join(root, file) | ||
1027 | sfile = SubstrateFile(f, target, out_filebase) | ||
1028 | sfile.expand() | ||
1029 | all_files.append(sfile) | ||
1030 | |||
1031 | for dir in dirs: | ||
1032 | d = os.path.join(root, dir) | ||
1033 | sdir = SubstrateDir(d, target, out_filebase) | ||
1034 | sdir.expand() | ||
1035 | all_files.append(sdir) | ||
1036 | |||
1037 | |||
1038 | def gen_program_machine_lines(machine, program_lines): | ||
1039 | """ | ||
1040 | Use the input values we got from the command line. | ||
1041 | """ | ||
1042 | line = "machine = \"" + machine + "\"" | ||
1043 | program_lines.append(line) | ||
1044 | |||
1045 | line = "layer_name = \"" + machine + "\"" | ||
1046 | program_lines.append(line) | ||
1047 | |||
1048 | |||
1049 | def sort_inputlines(input_lines): | ||
1050 | """Sort input lines according to priority (position).""" | ||
1051 | input_lines.sort(key = lambda l: l.prio) | ||
1052 | |||
1053 | |||
1054 | def find_parent_dependency(lines, depends_on): | ||
1055 | for i, line in lines: | ||
1056 | if isinstance(line, CodeLine): | ||
1057 | continue | ||
1058 | if line.props["name"] == depends_on: | ||
1059 | return i | ||
1060 | |||
1061 | return -1 | ||
1062 | |||
1063 | |||
1064 | def process_inputline_dependencies(input_lines, all_inputlines): | ||
1065 | """If any input lines depend on others, put the others first.""" | ||
1066 | for line in input_lines: | ||
1067 | if isinstance(line, InputLineGroup): | ||
1068 | group_inputlines = [] | ||
1069 | process_inputline_dependencies(line.group, group_inputlines) | ||
1070 | line.group = group_inputlines | ||
1071 | all_inputlines.append(line) | ||
1072 | continue | ||
1073 | |||
1074 | if isinstance(line, CodeLine) or isinstance(line, NormalLine): | ||
1075 | all_inputlines.append(line) | ||
1076 | continue | ||
1077 | |||
1078 | try: | ||
1079 | depends_on = line.props["depends-on"] | ||
1080 | depends_codeline = "if " + line.props["depends-on"] + " == \"" + line.props["depends-on-val"] + "\":" | ||
1081 | all_inputlines.append(CodeLine(depends_codeline)) | ||
1082 | all_inputlines.append(line) | ||
1083 | except KeyError: | ||
1084 | all_inputlines.append(line) | ||
1085 | |||
1086 | |||
1087 | def conditional_filename(filename): | ||
1088 | """ | ||
1089 | Check if the filename itself contains a conditional statement. If | ||
1090 | so, return a codeline for it. | ||
1091 | """ | ||
1092 | opentag_start = filename.find(OPEN_TAG) | ||
1093 | |||
1094 | if opentag_start != -1: | ||
1095 | if filename[opentag_start:].startswith(ASSIGN_TAG): | ||
1096 | return None | ||
1097 | end = filename.find(CLOSE_TAG, opentag_start) | ||
1098 | if end == -1: | ||
1099 | print "No close tag found for open tag in filename %s" % filename | ||
1100 | sys.exit(1) | ||
1101 | |||
1102 | # we have a {{ tag i.e. code | ||
1103 | tag = filename[opentag_start + len(OPEN_TAG):end].strip() | ||
1104 | if not tag.lstrip().startswith(IF_TAG): | ||
1105 | print "Only 'if' tags are allowed in file or directory names, filename: %s" % filename | ||
1106 | sys.exit(1) | ||
1107 | |||
1108 | return CodeLine(tag) | ||
1109 | |||
1110 | return None | ||
1111 | |||
1112 | |||
1113 | class InputLineGroup(InputLine): | ||
1114 | """ | ||
1115 | InputLine that does nothing but group other input lines | ||
1116 | corresponding to all the input lines in a SubstrateFile so they | ||
1117 | can be generated as a group. prio is the only property used. | ||
1118 | """ | ||
1119 | def __init__(self, codeline): | ||
1120 | InputLine.__init__(self, {}, "", 0) | ||
1121 | self.group = [] | ||
1122 | self.prio = sys.maxint | ||
1123 | self.group.append(codeline) | ||
1124 | |||
1125 | def append(self, line): | ||
1126 | self.group.append(line) | ||
1127 | if line.prio < self.prio: | ||
1128 | self.prio = line.prio | ||
1129 | |||
1130 | def len(self): | ||
1131 | return len(self.group) | ||
1132 | |||
1133 | |||
1134 | def gather_inputlines(files): | ||
1135 | """ | ||
1136 | Gather all the InputLines - we want to generate them first. | ||
1137 | """ | ||
1138 | all_inputlines = [] | ||
1139 | input_lines = [] | ||
1140 | |||
1141 | for file in files: | ||
1142 | if isinstance(file, SubstrateFile): | ||
1143 | group = None | ||
1144 | basename = os.path.basename(file.filename) | ||
1145 | |||
1146 | codeline = conditional_filename(basename) | ||
1147 | if codeline: | ||
1148 | group = InputLineGroup(codeline) | ||
1149 | |||
1150 | have_condition = False | ||
1151 | condition_to_write = None | ||
1152 | for line in file.expanded_lines: | ||
1153 | if isinstance(line, CodeLine): | ||
1154 | have_condition = True | ||
1155 | condition_to_write = line | ||
1156 | continue | ||
1157 | if isinstance(line, InputLine): | ||
1158 | if group: | ||
1159 | if condition_to_write: | ||
1160 | condition_to_write.prio = line.prio | ||
1161 | condition_to_write.discard = True | ||
1162 | group.append(condition_to_write) | ||
1163 | condition_to_write = None | ||
1164 | group.append(line) | ||
1165 | else: | ||
1166 | if condition_to_write: | ||
1167 | condition_to_write.prio = line.prio | ||
1168 | condition_to_write.discard = True | ||
1169 | input_lines.append(condition_to_write) | ||
1170 | condition_to_write = None | ||
1171 | input_lines.append(line) | ||
1172 | else: | ||
1173 | if condition_to_write: | ||
1174 | condition_to_write = None | ||
1175 | if have_condition: | ||
1176 | if not line.line.strip(): | ||
1177 | line.discard = True | ||
1178 | input_lines.append(line) | ||
1179 | have_condition = False | ||
1180 | |||
1181 | if group and group.len() > 1: | ||
1182 | input_lines.append(group) | ||
1183 | |||
1184 | sort_inputlines(input_lines) | ||
1185 | process_inputline_dependencies(input_lines, all_inputlines) | ||
1186 | |||
1187 | return all_inputlines | ||
1188 | |||
1189 | |||
1190 | def run_program_lines(linelist, codedump): | ||
1191 | """ | ||
1192 | For a single file, print all the python code into a buf and execute it. | ||
1193 | """ | ||
1194 | buf = "\n".join(linelist) | ||
1195 | |||
1196 | if codedump: | ||
1197 | of = open("bspgen.out", "w") | ||
1198 | of.write(buf) | ||
1199 | of.close() | ||
1200 | exec buf | ||
1201 | |||
1202 | |||
1203 | def gen_target(files, context = None): | ||
1204 | """ | ||
1205 | Generate the python code for each file. | ||
1206 | """ | ||
1207 | for file in files: | ||
1208 | file.gen(context) | ||
1209 | |||
1210 | |||
1211 | def gen_program_header_lines(program_lines): | ||
1212 | """ | ||
1213 | Generate any imports we need. | ||
1214 | """ | ||
1215 | program_lines.append("current_file = \"\"") | ||
1216 | |||
1217 | |||
1218 | def gen_supplied_property_vals(properties, program_lines): | ||
1219 | """ | ||
1220 | Generate user-specified entries for input values instead of | ||
1221 | generating input prompts. | ||
1222 | """ | ||
1223 | for name, val in properties.iteritems(): | ||
1224 | program_line = name + " = \"" + val + "\"" | ||
1225 | program_lines.append(program_line) | ||
1226 | |||
1227 | |||
1228 | def gen_initial_property_vals(input_lines, program_lines): | ||
1229 | """ | ||
1230 | Generate null or default entries for input values, so we don't | ||
1231 | have undefined variables. | ||
1232 | """ | ||
1233 | for line in input_lines: | ||
1234 | if isinstance(line, InputLineGroup): | ||
1235 | gen_initial_property_vals(line.group, program_lines) | ||
1236 | continue | ||
1237 | |||
1238 | if isinstance(line, InputLine): | ||
1239 | try: | ||
1240 | name = line.props["name"] | ||
1241 | try: | ||
1242 | default_val = "\"" + line.props["default"] + "\"" | ||
1243 | except: | ||
1244 | default_val = "\"\"" | ||
1245 | program_line = name + " = " + default_val | ||
1246 | program_lines.append(program_line) | ||
1247 | except KeyError: | ||
1248 | pass | ||
1249 | |||
1250 | |||
1251 | def gen_program_input_lines(input_lines, program_lines, context, in_group = False): | ||
1252 | """ | ||
1253 | Generate only the input lines used for prompting the user. For | ||
1254 | that, we only have input lines and CodeLines that affect the next | ||
1255 | input line. | ||
1256 | """ | ||
1257 | indent = new_indent = 0 | ||
1258 | |||
1259 | for line in input_lines: | ||
1260 | if isinstance(line, InputLineGroup): | ||
1261 | gen_program_input_lines(line.group, program_lines, context, True) | ||
1262 | continue | ||
1263 | if not line.line.strip(): | ||
1264 | continue | ||
1265 | |||
1266 | genline = line.gen(context) | ||
1267 | if not genline: | ||
1268 | continue | ||
1269 | if genline.endswith(":"): | ||
1270 | new_indent += 1 | ||
1271 | else: | ||
1272 | if indent > 1 or (not in_group and indent): | ||
1273 | new_indent -= 1 | ||
1274 | |||
1275 | line.generated_line = (indent * INDENT_STR) + genline | ||
1276 | program_lines.append(line.generated_line) | ||
1277 | |||
1278 | indent = new_indent | ||
1279 | |||
1280 | |||
1281 | def gen_program_lines(target_files, program_lines): | ||
1282 | """ | ||
1283 | Generate the program lines that make up the BSP generation | ||
1284 | program. This appends the generated lines of all target_files to | ||
1285 | program_lines, and skips input lines, which are dealt with | ||
1286 | separately, or omitted. | ||
1287 | """ | ||
1288 | for file in target_files: | ||
1289 | if file.filename.endswith("noinstall"): | ||
1290 | continue | ||
1291 | |||
1292 | for line in file.expanded_lines: | ||
1293 | if isinstance(line, InputLine): | ||
1294 | continue | ||
1295 | if line.discard: | ||
1296 | continue | ||
1297 | |||
1298 | program_lines.append(line.generated_line) | ||
1299 | |||
1300 | |||
1301 | def create_context(machine, arch, scripts_path): | ||
1302 | """ | ||
1303 | Create a context object for use in deferred function invocation. | ||
1304 | """ | ||
1305 | context = {} | ||
1306 | |||
1307 | context["machine"] = machine | ||
1308 | context["arch"] = arch | ||
1309 | context["scripts_path"] = scripts_path | ||
1310 | |||
1311 | return context | ||
1312 | |||
1313 | |||
1314 | def capture_context(context): | ||
1315 | """ | ||
1316 | Create a context object for use in deferred function invocation. | ||
1317 | """ | ||
1318 | captured_context = {} | ||
1319 | |||
1320 | captured_context["machine"] = context["machine"] | ||
1321 | captured_context["arch"] = context["arch"] | ||
1322 | captured_context["scripts_path"] = context["scripts_path"] | ||
1323 | |||
1324 | return captured_context | ||
1325 | |||
1326 | |||
1327 | def expand_targets(context, bsp_output_dir, expand_common=True): | ||
1328 | """ | ||
1329 | Expand all the tags in both the common and machine-specific | ||
1330 | 'targets'. | ||
1331 | |||
1332 | If expand_common is False, don't expand the common target (this | ||
1333 | option is used to create special-purpose layers). | ||
1334 | """ | ||
1335 | target_files = [] | ||
1336 | |||
1337 | machine = context["machine"] | ||
1338 | arch = context["arch"] | ||
1339 | scripts_path = context["scripts_path"] | ||
1340 | |||
1341 | lib_path = scripts_path + '/lib' | ||
1342 | bsp_path = lib_path + '/bsp' | ||
1343 | arch_path = bsp_path + '/substrate/target/arch' | ||
1344 | |||
1345 | if expand_common: | ||
1346 | common = os.path.join(arch_path, "common") | ||
1347 | expand_target(common, target_files, bsp_output_dir) | ||
1348 | |||
1349 | arches = os.listdir(arch_path) | ||
1350 | if arch not in arches or arch == "common": | ||
1351 | print "Invalid karch, exiting\n" | ||
1352 | sys.exit(1) | ||
1353 | |||
1354 | target = os.path.join(arch_path, arch) | ||
1355 | expand_target(target, target_files, bsp_output_dir) | ||
1356 | |||
1357 | gen_target(target_files, context) | ||
1358 | |||
1359 | return target_files | ||
1360 | |||
1361 | |||
1362 | def yocto_common_create(machine, target, scripts_path, layer_output_dir, codedump, properties_file, properties_str="", expand_common=True): | ||
1363 | """ | ||
1364 | Common layer-creation code | ||
1365 | |||
1366 | machine - user-defined machine name (if needed, will generate 'machine' var) | ||
1367 | target - the 'target' the layer will be based on, must be one in | ||
1368 | scripts/lib/bsp/substrate/target/arch | ||
1369 | scripts_path - absolute path to yocto /scripts dir | ||
1370 | layer_output_dir - dirname to create for layer | ||
1371 | codedump - dump generated code to bspgen.out | ||
1372 | properties_file - use values from this file if nonempty i.e no prompting | ||
1373 | properties_str - use values from this string if nonempty i.e no prompting | ||
1374 | expand_common - boolean, use the contents of (for bsp layers) arch/common | ||
1375 | """ | ||
1376 | if os.path.exists(layer_output_dir): | ||
1377 | print "\nlayer output dir already exists, exiting. (%s)" % layer_output_dir | ||
1378 | sys.exit(1) | ||
1379 | |||
1380 | properties = None | ||
1381 | |||
1382 | if properties_file: | ||
1383 | try: | ||
1384 | infile = open(properties_file, "r") | ||
1385 | except IOError: | ||
1386 | print "Couldn't open properties file %s for reading, exiting" % properties_file | ||
1387 | sys.exit(1) | ||
1388 | |||
1389 | properties = json.load(infile) | ||
1390 | |||
1391 | if properties_str and not properties: | ||
1392 | properties = json.loads(properties_str) | ||
1393 | |||
1394 | os.mkdir(layer_output_dir) | ||
1395 | |||
1396 | context = create_context(machine, target, scripts_path) | ||
1397 | target_files = expand_targets(context, layer_output_dir, expand_common) | ||
1398 | |||
1399 | input_lines = gather_inputlines(target_files) | ||
1400 | |||
1401 | program_lines = [] | ||
1402 | |||
1403 | gen_program_header_lines(program_lines) | ||
1404 | |||
1405 | gen_initial_property_vals(input_lines, program_lines) | ||
1406 | |||
1407 | if properties: | ||
1408 | gen_supplied_property_vals(properties, program_lines) | ||
1409 | |||
1410 | gen_program_machine_lines(machine, program_lines) | ||
1411 | |||
1412 | if not properties: | ||
1413 | gen_program_input_lines(input_lines, program_lines, context) | ||
1414 | |||
1415 | gen_program_lines(target_files, program_lines) | ||
1416 | |||
1417 | run_program_lines(program_lines, codedump) | ||
1418 | |||
1419 | |||
1420 | def yocto_layer_create(layer_name, scripts_path, layer_output_dir, codedump, properties_file, properties=""): | ||
1421 | """ | ||
1422 | Create yocto layer | ||
1423 | |||
1424 | layer_name - user-defined layer name | ||
1425 | scripts_path - absolute path to yocto /scripts dir | ||
1426 | layer_output_dir - dirname to create for layer | ||
1427 | codedump - dump generated code to bspgen.out | ||
1428 | properties_file - use values from this file if nonempty i.e no prompting | ||
1429 | properties - use values from this string if nonempty i.e no prompting | ||
1430 | """ | ||
1431 | yocto_common_create(layer_name, "layer", scripts_path, layer_output_dir, codedump, properties_file, properties, False) | ||
1432 | |||
1433 | print "\nNew layer created in %s.\n" % (layer_output_dir) | ||
1434 | print "Don't forget to add it to your BBLAYERS (for details see %s\README)." % (layer_output_dir) | ||
1435 | |||
1436 | |||
1437 | def yocto_bsp_create(machine, arch, scripts_path, bsp_output_dir, codedump, properties_file, properties=None): | ||
1438 | """ | ||
1439 | Create bsp | ||
1440 | |||
1441 | machine - user-defined machine name | ||
1442 | arch - the arch the bsp will be based on, must be one in | ||
1443 | scripts/lib/bsp/substrate/target/arch | ||
1444 | scripts_path - absolute path to yocto /scripts dir | ||
1445 | bsp_output_dir - dirname to create for BSP | ||
1446 | codedump - dump generated code to bspgen.out | ||
1447 | properties_file - use values from this file if nonempty i.e no prompting | ||
1448 | properties - use values from this string if nonempty i.e no prompting | ||
1449 | """ | ||
1450 | yocto_common_create(machine, arch, scripts_path, bsp_output_dir, codedump, properties_file, properties) | ||
1451 | |||
1452 | print "\nNew %s BSP created in %s" % (arch, bsp_output_dir) | ||
1453 | |||
1454 | |||
1455 | def print_dict(items, indent = 0): | ||
1456 | """ | ||
1457 | Print the values in a possibly nested dictionary. | ||
1458 | """ | ||
1459 | for key, val in items.iteritems(): | ||
1460 | print " "*indent + "\"%s\" :" % key, | ||
1461 | if type(val) == dict: | ||
1462 | print "{" | ||
1463 | print_dict(val, indent + 1) | ||
1464 | print " "*indent + "}" | ||
1465 | else: | ||
1466 | print "%s" % val | ||
1467 | |||
1468 | |||
1469 | def get_properties(input_lines): | ||
1470 | """ | ||
1471 | Get the complete set of properties for all the input items in the | ||
1472 | BSP, as a possibly nested dictionary. | ||
1473 | """ | ||
1474 | properties = {} | ||
1475 | |||
1476 | for line in input_lines: | ||
1477 | if isinstance(line, InputLineGroup): | ||
1478 | statement = line.group[0].line | ||
1479 | group_properties = get_properties(line.group) | ||
1480 | properties[statement] = group_properties | ||
1481 | continue | ||
1482 | |||
1483 | if not isinstance(line, InputLine): | ||
1484 | continue | ||
1485 | |||
1486 | if isinstance(line, ChoiceInputLine): | ||
1487 | continue | ||
1488 | |||
1489 | props = line.props | ||
1490 | item = {} | ||
1491 | name = props["name"] | ||
1492 | for key, val in props.items(): | ||
1493 | if not key == "name": | ||
1494 | item[key] = val | ||
1495 | properties[name] = item | ||
1496 | |||
1497 | return properties | ||
1498 | |||
1499 | |||
1500 | def yocto_layer_list_properties(arch, scripts_path, properties_file, expand_common=True): | ||
1501 | """ | ||
1502 | List the complete set of properties for all the input items in the | ||
1503 | layer. If properties_file is non-null, write the complete set of | ||
1504 | properties as a nested JSON object corresponding to a possibly | ||
1505 | nested dictionary. | ||
1506 | """ | ||
1507 | context = create_context("unused", arch, scripts_path) | ||
1508 | target_files = expand_targets(context, "unused", expand_common) | ||
1509 | |||
1510 | input_lines = gather_inputlines(target_files) | ||
1511 | |||
1512 | properties = get_properties(input_lines) | ||
1513 | if properties_file: | ||
1514 | try: | ||
1515 | of = open(properties_file, "w") | ||
1516 | except IOError: | ||
1517 | print "Couldn't open properties file %s for writing, exiting" % properties_file | ||
1518 | sys.exit(1) | ||
1519 | |||
1520 | json.dump(properties, of) | ||
1521 | |||
1522 | print_dict(properties) | ||
1523 | |||
1524 | |||
1525 | def split_nested_property(property): | ||
1526 | """ | ||
1527 | A property name of the form x.y describes a nested property | ||
1528 | i.e. the property y is contained within x and can be addressed | ||
1529 | using standard JSON syntax for nested properties. Note that if a | ||
1530 | property name itself contains '.', it should be contained in | ||
1531 | double quotes. | ||
1532 | """ | ||
1533 | splittable_property = "" | ||
1534 | in_quotes = False | ||
1535 | for c in property: | ||
1536 | if c == '.' and not in_quotes: | ||
1537 | splittable_property += '\n' | ||
1538 | continue | ||
1539 | if c == '"': | ||
1540 | in_quotes = not in_quotes | ||
1541 | splittable_property += c | ||
1542 | |||
1543 | split_properties = splittable_property.split('\n') | ||
1544 | |||
1545 | if len(split_properties) > 1: | ||
1546 | return split_properties | ||
1547 | |||
1548 | return None | ||
1549 | |||
1550 | |||
1551 | def find_input_line_group(substring, input_lines): | ||
1552 | """ | ||
1553 | Find and return the InputLineGroup containing the specified substring. | ||
1554 | """ | ||
1555 | for line in input_lines: | ||
1556 | if isinstance(line, InputLineGroup): | ||
1557 | if substring in line.group[0].line: | ||
1558 | return line | ||
1559 | |||
1560 | return None | ||
1561 | |||
1562 | |||
1563 | def find_input_line(name, input_lines): | ||
1564 | """ | ||
1565 | Find the input line with the specified name. | ||
1566 | """ | ||
1567 | for line in input_lines: | ||
1568 | if isinstance(line, InputLineGroup): | ||
1569 | l = find_input_line(name, line.group) | ||
1570 | if l: | ||
1571 | return l | ||
1572 | |||
1573 | if isinstance(line, InputLine): | ||
1574 | try: | ||
1575 | if line.props["name"] == name: | ||
1576 | return line | ||
1577 | if line.props["name"] + "_" + line.props["nameappend"] == name: | ||
1578 | return line | ||
1579 | except KeyError: | ||
1580 | pass | ||
1581 | |||
1582 | return None | ||
1583 | |||
1584 | |||
1585 | def print_values(type, values_list): | ||
1586 | """ | ||
1587 | Print the values in the given list of values. | ||
1588 | """ | ||
1589 | if type == "choicelist": | ||
1590 | for value in values_list: | ||
1591 | print "[\"%s\", \"%s\"]" % (value[0], value[1]) | ||
1592 | elif type == "boolean": | ||
1593 | for value in values_list: | ||
1594 | print "[\"%s\", \"%s\"]" % (value[0], value[1]) | ||
1595 | |||
1596 | |||
1597 | def yocto_layer_list_property_values(arch, property, scripts_path, properties_file, expand_common=True): | ||
1598 | """ | ||
1599 | List the possible values for a given input property. If | ||
1600 | properties_file is non-null, write the complete set of properties | ||
1601 | as a JSON object corresponding to an array of possible values. | ||
1602 | """ | ||
1603 | context = create_context("unused", arch, scripts_path) | ||
1604 | context["name"] = property | ||
1605 | |||
1606 | target_files = expand_targets(context, "unused", expand_common) | ||
1607 | |||
1608 | input_lines = gather_inputlines(target_files) | ||
1609 | |||
1610 | properties = get_properties(input_lines) | ||
1611 | |||
1612 | nested_properties = split_nested_property(property) | ||
1613 | if nested_properties: | ||
1614 | # currently the outer property of a nested property always | ||
1615 | # corresponds to an input line group | ||
1616 | input_line_group = find_input_line_group(nested_properties[0], input_lines) | ||
1617 | if input_line_group: | ||
1618 | input_lines[:] = input_line_group.group[1:] | ||
1619 | # The inner property of a nested property name is the | ||
1620 | # actual property name we want, so reset to that | ||
1621 | property = nested_properties[1] | ||
1622 | |||
1623 | input_line = find_input_line(property, input_lines) | ||
1624 | if not input_line: | ||
1625 | print "Couldn't find values for property %s" % property | ||
1626 | return | ||
1627 | |||
1628 | values_list = [] | ||
1629 | |||
1630 | type = input_line.props["type"] | ||
1631 | if type == "boolean": | ||
1632 | values_list.append(["y", "n"]) | ||
1633 | elif type == "choicelist" or type == "checklist": | ||
1634 | try: | ||
1635 | gen_fn = input_line.props["gen"] | ||
1636 | if nested_properties: | ||
1637 | context["filename"] = nested_properties[0] | ||
1638 | try: | ||
1639 | context["branches_base"] = input_line.props["branches_base"] | ||
1640 | except KeyError: | ||
1641 | context["branches_base"] = None | ||
1642 | values_list = input_line.gen_choices_list(context, False) | ||
1643 | except KeyError: | ||
1644 | for choice in input_line.choices: | ||
1645 | choicepair = [] | ||
1646 | choicepair.append(choice.val) | ||
1647 | choicepair.append(choice.desc) | ||
1648 | values_list.append(choicepair) | ||
1649 | |||
1650 | if properties_file: | ||
1651 | try: | ||
1652 | of = open(properties_file, "w") | ||
1653 | except IOError: | ||
1654 | print "Couldn't open properties file %s for writing, exiting" % properties_file | ||
1655 | sys.exit(1) | ||
1656 | |||
1657 | json.dump(values_list, of) | ||
1658 | |||
1659 | print_values(type, values_list) | ||
1660 | |||
1661 | |||
1662 | def yocto_bsp_list(args, scripts_path, properties_file): | ||
1663 | """ | ||
1664 | Print available architectures, or the complete list of properties | ||
1665 | defined by the BSP, or the possible values for a particular BSP | ||
1666 | property. | ||
1667 | """ | ||
1668 | if len(args) < 1: | ||
1669 | return False | ||
1670 | |||
1671 | if args[0] == "karch": | ||
1672 | lib_path = scripts_path + '/lib' | ||
1673 | bsp_path = lib_path + '/bsp' | ||
1674 | arch_path = bsp_path + '/substrate/target/arch' | ||
1675 | print "Architectures available:" | ||
1676 | for arch in os.listdir(arch_path): | ||
1677 | if arch == "common" or arch == "layer": | ||
1678 | continue | ||
1679 | print " %s" % arch | ||
1680 | return True | ||
1681 | else: | ||
1682 | arch = args[0] | ||
1683 | |||
1684 | if len(args) < 2 or len(args) > 3: | ||
1685 | return False | ||
1686 | |||
1687 | if len(args) == 2: | ||
1688 | if args[1] == "properties": | ||
1689 | yocto_layer_list_properties(arch, scripts_path, properties_file) | ||
1690 | else: | ||
1691 | return False | ||
1692 | |||
1693 | if len(args) == 3: | ||
1694 | if args[1] == "property": | ||
1695 | yocto_layer_list_property_values(arch, args[2], scripts_path, properties_file) | ||
1696 | else: | ||
1697 | return False | ||
1698 | |||
1699 | return True | ||
1700 | |||
1701 | |||
1702 | def yocto_layer_list(args, scripts_path, properties_file): | ||
1703 | """ | ||
1704 | Print the complete list of input properties defined by the layer, | ||
1705 | or the possible values for a particular layer property. | ||
1706 | """ | ||
1707 | if len(args) < 1: | ||
1708 | return False | ||
1709 | |||
1710 | if len(args) < 1 or len(args) > 2: | ||
1711 | return False | ||
1712 | |||
1713 | if len(args) == 1: | ||
1714 | if args[0] == "properties": | ||
1715 | yocto_layer_list_properties("layer", scripts_path, properties_file, False) | ||
1716 | else: | ||
1717 | return False | ||
1718 | |||
1719 | if len(args) == 2: | ||
1720 | if args[0] == "property": | ||
1721 | yocto_layer_list_property_values("layer", args[1], scripts_path, properties_file, False) | ||
1722 | else: | ||
1723 | return False | ||
1724 | |||
1725 | return True | ||
1726 | |||
1727 | |||
1728 | def map_standard_kbranch(need_new_kbranch, new_kbranch, existing_kbranch): | ||
1729 | """ | ||
1730 | Return the linux-yocto bsp branch to use with the specified | ||
1731 | kbranch. This handles the -standard variants for 3.4 and 3.8; the | ||
1732 | other variants don't need mappings. | ||
1733 | """ | ||
1734 | if need_new_kbranch == "y": | ||
1735 | kbranch = new_kbranch | ||
1736 | else: | ||
1737 | kbranch = existing_kbranch | ||
1738 | |||
1739 | if kbranch.startswith("standard/common-pc-64"): | ||
1740 | return "bsp/common-pc-64/common-pc-64-standard.scc" | ||
1741 | if kbranch.startswith("standard/common-pc"): | ||
1742 | return "bsp/common-pc/common-pc-standard.scc" | ||
1743 | else: | ||
1744 | return "ktypes/standard/standard.scc" | ||
1745 | |||
1746 | |||
1747 | def map_preempt_rt_kbranch(need_new_kbranch, new_kbranch, existing_kbranch): | ||
1748 | """ | ||
1749 | Return the linux-yocto bsp branch to use with the specified | ||
1750 | kbranch. This handles the -preempt-rt variants for 3.4 and 3.8; | ||
1751 | the other variants don't need mappings. | ||
1752 | """ | ||
1753 | if need_new_kbranch == "y": | ||
1754 | kbranch = new_kbranch | ||
1755 | else: | ||
1756 | kbranch = existing_kbranch | ||
1757 | |||
1758 | if kbranch.startswith("standard/preempt-rt/common-pc-64"): | ||
1759 | return "bsp/common-pc-64/common-pc-64-preempt-rt.scc" | ||
1760 | if kbranch.startswith("standard/preempt-rt/common-pc"): | ||
1761 | return "bsp/common-pc/common-pc-preempt-rt.scc" | ||
1762 | else: | ||
1763 | return "ktypes/preempt-rt/preempt-rt.scc" | ||
1764 | |||
1765 | |||
1766 | def map_tiny_kbranch(need_new_kbranch, new_kbranch, existing_kbranch): | ||
1767 | """ | ||
1768 | Return the linux-yocto bsp branch to use with the specified | ||
1769 | kbranch. This handles the -tiny variants for 3.4 and 3.8; the | ||
1770 | other variants don't need mappings. | ||
1771 | """ | ||
1772 | if need_new_kbranch == "y": | ||
1773 | kbranch = new_kbranch | ||
1774 | else: | ||
1775 | kbranch = existing_kbranch | ||
1776 | |||
1777 | if kbranch.startswith("standard/tiny/common-pc"): | ||
1778 | return "bsp/common-pc/common-pc-tiny.scc" | ||
1779 | else: | ||
1780 | return "ktypes/tiny/tiny.scc" | ||
diff --git a/scripts/lib/bsp/help.py b/scripts/lib/bsp/help.py new file mode 100644 index 0000000000..7c436d6be0 --- /dev/null +++ b/scripts/lib/bsp/help.py | |||
@@ -0,0 +1,1043 @@ | |||
1 | # ex:ts=4:sw=4:sts=4:et | ||
2 | # -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- | ||
3 | # | ||
4 | # Copyright (c) 2012, Intel Corporation. | ||
5 | # All rights reserved. | ||
6 | # | ||
7 | # This program is free software; you can redistribute it and/or modify | ||
8 | # it under the terms of the GNU General Public License version 2 as | ||
9 | # published by the Free Software Foundation. | ||
10 | # | ||
11 | # This program is distributed in the hope that it will be useful, | ||
12 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | # GNU General Public License for more details. | ||
15 | # | ||
16 | # You should have received a copy of the GNU General Public License along | ||
17 | # with this program; if not, write to the Free Software Foundation, Inc., | ||
18 | # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. | ||
19 | # | ||
20 | # DESCRIPTION | ||
21 | # This module implements some basic help invocation functions along | ||
22 | # with the bulk of the help topic text for the Yocto BSP Tools. | ||
23 | # | ||
24 | # AUTHORS | ||
25 | # Tom Zanussi <tom.zanussi (at] intel.com> | ||
26 | # | ||
27 | |||
28 | import subprocess | ||
29 | import logging | ||
30 | |||
31 | |||
32 | def subcommand_error(args): | ||
33 | logging.info("invalid subcommand %s" % args[0]) | ||
34 | |||
35 | |||
36 | def display_help(subcommand, subcommands): | ||
37 | """ | ||
38 | Display help for subcommand. | ||
39 | """ | ||
40 | if subcommand not in subcommands: | ||
41 | return False | ||
42 | |||
43 | help = subcommands.get(subcommand, subcommand_error)[2] | ||
44 | pager = subprocess.Popen('less', stdin=subprocess.PIPE) | ||
45 | pager.communicate(help) | ||
46 | |||
47 | return True | ||
48 | |||
49 | |||
50 | def yocto_help(args, usage_str, subcommands): | ||
51 | """ | ||
52 | Subcommand help dispatcher. | ||
53 | """ | ||
54 | if len(args) == 1 or not display_help(args[1], subcommands): | ||
55 | print(usage_str) | ||
56 | |||
57 | |||
58 | def invoke_subcommand(args, parser, main_command_usage, subcommands): | ||
59 | """ | ||
60 | Dispatch to subcommand handler borrowed from combo-layer. | ||
61 | Should use argparse, but has to work in 2.6. | ||
62 | """ | ||
63 | if not args: | ||
64 | logging.error("No subcommand specified, exiting") | ||
65 | parser.print_help() | ||
66 | elif args[0] == "help": | ||
67 | yocto_help(args, main_command_usage, subcommands) | ||
68 | elif args[0] not in subcommands: | ||
69 | logging.error("Unsupported subcommand %s, exiting\n" % (args[0])) | ||
70 | parser.print_help() | ||
71 | else: | ||
72 | usage = subcommands.get(args[0], subcommand_error)[1] | ||
73 | subcommands.get(args[0], subcommand_error)[0](args[1:], usage) | ||
74 | |||
75 | |||
76 | ## | ||
77 | # yocto-bsp help and usage strings | ||
78 | ## | ||
79 | |||
80 | yocto_bsp_usage = """ | ||
81 | |||
82 | Create a customized Yocto BSP layer. | ||
83 | |||
84 | usage: yocto-bsp [--version] [--help] COMMAND [ARGS] | ||
85 | |||
86 | Current 'yocto-bsp' commands are: | ||
87 | create Create a new Yocto BSP | ||
88 | list List available values for options and BSP properties | ||
89 | |||
90 | See 'yocto-bsp help COMMAND' for more information on a specific command. | ||
91 | """ | ||
92 | |||
93 | yocto_bsp_help_usage = """ | ||
94 | |||
95 | usage: yocto-bsp help <subcommand> | ||
96 | |||
97 | This command displays detailed help for the specified subcommand. | ||
98 | """ | ||
99 | |||
100 | yocto_bsp_create_usage = """ | ||
101 | |||
102 | Create a new Yocto BSP | ||
103 | |||
104 | usage: yocto-bsp create <bsp-name> <karch> [-o <DIRNAME> | --outdir <DIRNAME>] | ||
105 | [-i <JSON PROPERTY FILE> | --infile <JSON PROPERTY_FILE>] | ||
106 | |||
107 | This command creates a Yocto BSP based on the specified parameters. | ||
108 | The new BSP will be a new Yocto BSP layer contained by default within | ||
109 | the top-level directory specified as 'meta-bsp-name'. The -o option | ||
110 | can be used to place the BSP layer in a directory with a different | ||
111 | name and location. | ||
112 | |||
113 | The value of the 'karch' parameter determines the set of files that | ||
114 | will be generated for the BSP, along with the specific set of | ||
115 | 'properties' that will be used to fill out the BSP-specific portions | ||
116 | of the BSP. The possible values for the 'karch' paramter can be | ||
117 | listed via 'yocto-bsp list karch'. | ||
118 | |||
119 | NOTE: Once created, you should add your new layer to your | ||
120 | bblayers.conf file in order for it to be subsequently seen and | ||
121 | modified by the yocto-kernel tool. | ||
122 | |||
123 | See 'yocto bsp help create' for more detailed instructions. | ||
124 | """ | ||
125 | |||
126 | yocto_bsp_create_help = """ | ||
127 | |||
128 | NAME | ||
129 | yocto-bsp create - Create a new Yocto BSP | ||
130 | |||
131 | SYNOPSIS | ||
132 | yocto-bsp create <bsp-name> <karch> [-o <DIRNAME> | --outdir <DIRNAME>] | ||
133 | [-i <JSON PROPERTY FILE> | --infile <JSON PROPERTY_FILE>] | ||
134 | |||
135 | DESCRIPTION | ||
136 | This command creates a Yocto BSP based on the specified | ||
137 | parameters. The new BSP will be a new Yocto BSP layer contained | ||
138 | by default within the top-level directory specified as | ||
139 | 'meta-bsp-name'. The -o option can be used to place the BSP layer | ||
140 | in a directory with a different name and location. | ||
141 | |||
142 | The value of the 'karch' parameter determines the set of files | ||
143 | that will be generated for the BSP, along with the specific set of | ||
144 | 'properties' that will be used to fill out the BSP-specific | ||
145 | portions of the BSP. The possible values for the 'karch' paramter | ||
146 | can be listed via 'yocto-bsp list karch'. | ||
147 | |||
148 | The BSP-specific properties that define the values that will be | ||
149 | used to generate a particular BSP can be specified on the | ||
150 | command-line using the -i option and supplying a JSON object | ||
151 | consisting of the set of name:value pairs needed by the BSP. | ||
152 | |||
153 | If the -i option is not used, the user will be interactively | ||
154 | prompted for each of the required property values, which will then | ||
155 | be used as values for BSP generation. | ||
156 | |||
157 | The set of properties available for a given architecture can be | ||
158 | listed using the 'yocto-bsp list' command. | ||
159 | |||
160 | Specifying -c causes the Python code generated and executed to | ||
161 | create the BSP to be dumped to the 'bspgen.out' file in the | ||
162 | current directory, and is useful for debugging. | ||
163 | |||
164 | NOTE: Once created, you should add your new layer to your | ||
165 | bblayers.conf file in order for it to be subsequently seen and | ||
166 | modified by the yocto-kernel tool. | ||
167 | |||
168 | For example, assuming your poky repo is at /path/to/poky, your new | ||
169 | BSP layer is at /path/to/poky/meta-mybsp, and your build directory | ||
170 | is /path/to/build: | ||
171 | |||
172 | $ gedit /path/to/build/conf/bblayers.conf | ||
173 | |||
174 | BBLAYERS ?= " \\ | ||
175 | /path/to/poky/meta \\ | ||
176 | /path/to/poky/meta-yocto \\ | ||
177 | /path/to/poky/meta-mybsp \\ | ||
178 | " | ||
179 | """ | ||
180 | |||
181 | yocto_bsp_list_usage = """ | ||
182 | |||
183 | usage: yocto-bsp list karch | ||
184 | yocto-bsp list <karch> properties | ||
185 | [-o <JSON PROPERTY FILE> | --outfile <JSON PROPERTY_FILE>] | ||
186 | yocto-bsp list <karch> property <xxx> | ||
187 | [-o <JSON PROPERTY FILE> | --outfile <JSON PROPERTY_FILE>] | ||
188 | |||
189 | This command enumerates the complete set of possible values for a | ||
190 | specified option or property needed by the BSP creation process. | ||
191 | |||
192 | The first form enumerates all the possible values that exist and can | ||
193 | be specified for the 'karch' parameter to the 'yocto bsp create' | ||
194 | command. | ||
195 | |||
196 | The second form enumerates all the possible properties that exist and | ||
197 | must have values specified for them in the 'yocto bsp create' command | ||
198 | for the given 'karch'. | ||
199 | |||
200 | The third form enumerates all the possible values that exist and can | ||
201 | be specified for any of the enumerable properties of the given | ||
202 | 'karch' in the 'yocto bsp create' command. | ||
203 | |||
204 | See 'yocto-bsp help list' for more details. | ||
205 | """ | ||
206 | |||
207 | yocto_bsp_list_help = """ | ||
208 | |||
209 | NAME | ||
210 | yocto-bsp list - List available values for options and BSP properties | ||
211 | |||
212 | SYNOPSIS | ||
213 | yocto-bsp list karch | ||
214 | yocto-bsp list <karch> properties | ||
215 | [--o <JSON PROPERTY FILE> | -outfile <JSON PROPERTY_FILE>] | ||
216 | yocto-bsp list <karch> property <xxx> | ||
217 | [--o <JSON PROPERTY FILE> | -outfile <JSON PROPERTY_FILE>] | ||
218 | |||
219 | DESCRIPTION | ||
220 | This command enumerates the complete set of possible values for a | ||
221 | specified option or property needed by the BSP creation process. | ||
222 | |||
223 | The first form enumerates all the possible values that exist and | ||
224 | can be specified for the 'karch' parameter to the 'yocto bsp | ||
225 | create' command. Example output for the 'list karch' command: | ||
226 | |||
227 | $ yocto-bsp list karch | ||
228 | Architectures available: | ||
229 | arm | ||
230 | powerpc | ||
231 | i386 | ||
232 | mips | ||
233 | x86_64 | ||
234 | qemu | ||
235 | |||
236 | The second form enumerates all the possible properties that exist | ||
237 | and must have values specified for them in the 'yocto bsp create' | ||
238 | command for the given 'karch'. This command is mainly meant to | ||
239 | allow the development user interface alternatives to the default | ||
240 | text-based prompting interface. If the -o option is specified, | ||
241 | the list of properties, in addition to being displayed, will be | ||
242 | written to the specified file as a JSON object. In this case, the | ||
243 | object will consist of the set of name:value pairs corresponding | ||
244 | to the (possibly nested) dictionary of properties defined by the | ||
245 | input statements used by the BSP. Some example output for the | ||
246 | 'list properties' command: | ||
247 | |||
248 | $ yocto-bsp list arm properties | ||
249 | "touchscreen" : { | ||
250 | "msg" : Does your BSP have a touchscreen? (y/N) | ||
251 | "default" : n | ||
252 | "type" : boolean | ||
253 | } | ||
254 | "uboot_loadaddress" : { | ||
255 | "msg" : Please specify a value for UBOOT_LOADADDRESS. | ||
256 | "default" : 0x80008000 | ||
257 | "type" : edit | ||
258 | "prio" : 40 | ||
259 | } | ||
260 | "kernel_choice" : { | ||
261 | "prio" : 10 | ||
262 | "default" : linux-yocto_3.2 | ||
263 | "depends-on" : use_default_kernel | ||
264 | "depends-on-val" : n | ||
265 | "msg" : Please choose the kernel to use in this BSP => | ||
266 | "type" : choicelist | ||
267 | "gen" : bsp.kernel.kernels | ||
268 | } | ||
269 | "if kernel_choice == "linux-yocto_3.0":" : { | ||
270 | "base_kbranch_linux_yocto_3_0" : { | ||
271 | "prio" : 20 | ||
272 | "default" : yocto/standard | ||
273 | "depends-on" : new_kbranch_linux_yocto_3_0 | ||
274 | "depends-on-val" : y | ||
275 | "msg" : Please choose a machine branch to base this BSP on => | ||
276 | "type" : choicelist | ||
277 | "gen" : bsp.kernel.all_branches | ||
278 | } | ||
279 | . | ||
280 | . | ||
281 | . | ||
282 | |||
283 | Each entry in the output consists of the name of the input element | ||
284 | e.g. "touchscreen", followed by the properties defined for that | ||
285 | element enclosed in braces. This information should provide | ||
286 | sufficient information to create a complete user interface with. | ||
287 | Two features of the scheme provide for conditional input. First, | ||
288 | if a Python "if" statement appears in place of an input element | ||
289 | name, the set of enclosed input elements apply and should be | ||
290 | presented to the user only if the 'if' statement evaluates to | ||
291 | true. The test in the if statement will always reference another | ||
292 | input element in the list, which means that the element being | ||
293 | tested should be presented to the user before the elements | ||
294 | enclosed by the if block. Secondly, in a similar way, some | ||
295 | elements contain "depends-on" and depends-on-val" tags, which mean | ||
296 | that the affected input element should only be presented to the | ||
297 | user if the element it depends on has already been presented to | ||
298 | the user and the user has selected the specified value for that | ||
299 | element. | ||
300 | |||
301 | The third form enumerates all the possible values that exist and | ||
302 | can be specified for any of the enumerable properties of the given | ||
303 | 'karch' in the 'yocto bsp create' command. If the -o option is | ||
304 | specified, the list of values for the given property, in addition | ||
305 | to being displayed, will be written to the specified file as a | ||
306 | JSON object. In this case, the object will consist of the set of | ||
307 | name:value pairs corresponding to the array of property values | ||
308 | associated with the property. | ||
309 | |||
310 | $ yocto-bsp list i386 property xserver_choice | ||
311 | ["xserver_vesa", "VESA xserver support"] | ||
312 | ["xserver_i915", "i915 xserver support"] | ||
313 | |||
314 | $ yocto-bsp list arm property base_kbranch_linux_yocto_3_0 | ||
315 | Getting branches from remote repo git://git.yoctoproject.org/linux-yocto-3.0... | ||
316 | ["yocto/base", "yocto/base"] | ||
317 | ["yocto/eg20t", "yocto/eg20t"] | ||
318 | ["yocto/gma500", "yocto/gma500"] | ||
319 | ["yocto/pvr", "yocto/pvr"] | ||
320 | ["yocto/standard/arm-versatile-926ejs", "yocto/standard/arm-versatile-926ejs"] | ||
321 | ["yocto/standard/base", "yocto/standard/base"] | ||
322 | ["yocto/standard/cedartrail", "yocto/standard/cedartrail"] | ||
323 | . | ||
324 | . | ||
325 | . | ||
326 | ["yocto/standard/qemu-ppc32", "yocto/standard/qemu-ppc32"] | ||
327 | ["yocto/standard/routerstationpro", "yocto/standard/routerstationpro"] | ||
328 | |||
329 | The third form as well is meant mainly for developers of | ||
330 | alternative interfaces - it allows the developer to fetch the | ||
331 | possible values for a given input element on-demand. This | ||
332 | on-demand capability is especially valuable for elements that | ||
333 | require relatively expensive remote operations to fulfill, such as | ||
334 | the example that returns the set of branches available in a remote | ||
335 | git tree above. | ||
336 | |||
337 | """ | ||
338 | |||
339 | ## | ||
340 | # yocto-kernel help and usage strings | ||
341 | ## | ||
342 | |||
343 | yocto_kernel_usage = """ | ||
344 | |||
345 | Modify and list Yocto BSP kernel config items and patches. | ||
346 | |||
347 | usage: yocto-kernel [--version] [--help] COMMAND [ARGS] | ||
348 | |||
349 | Current 'yocto-kernel' commands are: | ||
350 | config list List the modifiable set of bare kernel config options for a BSP | ||
351 | config add Add or modify bare kernel config options for a BSP | ||
352 | config rm Remove bare kernel config options from a BSP | ||
353 | patch list List the patches associated with a BSP | ||
354 | patch add Patch the Yocto kernel for a BSP | ||
355 | patch rm Remove patches from a BSP | ||
356 | feature list List the features used by a BSP | ||
357 | feature add Have a BSP use a feature | ||
358 | feature rm Have a BSP stop using a feature | ||
359 | features list List the features available to BSPs | ||
360 | feature describe Describe a particular feature | ||
361 | feature create Create a new BSP-local feature | ||
362 | feature destroy Remove a BSP-local feature | ||
363 | |||
364 | See 'yocto-kernel help COMMAND' for more information on a specific command. | ||
365 | |||
366 | """ | ||
367 | |||
368 | |||
369 | yocto_kernel_help_usage = """ | ||
370 | |||
371 | usage: yocto-kernel help <subcommand> | ||
372 | |||
373 | This command displays detailed help for the specified subcommand. | ||
374 | """ | ||
375 | |||
376 | yocto_kernel_config_list_usage = """ | ||
377 | |||
378 | List the modifiable set of bare kernel config options for a BSP | ||
379 | |||
380 | usage: yocto-kernel config list <bsp-name> | ||
381 | |||
382 | This command lists the 'modifiable' config items for a BSP i.e. the | ||
383 | items which are eligible for modification or removal by other | ||
384 | yocto-kernel commands. | ||
385 | |||
386 | 'modifiable' config items are the config items contained a BSP's | ||
387 | user-config.cfg base config. | ||
388 | """ | ||
389 | |||
390 | |||
391 | yocto_kernel_config_list_help = """ | ||
392 | |||
393 | NAME | ||
394 | yocto-kernel config list - List the modifiable set of bare kernel | ||
395 | config options for a BSP | ||
396 | |||
397 | SYNOPSIS | ||
398 | yocto-kernel config list <bsp-name> | ||
399 | |||
400 | DESCRIPTION | ||
401 | This command lists the 'modifiable' config items for a BSP | ||
402 | i.e. the items which are eligible for modification or removal by | ||
403 | other yocto-kernel commands. | ||
404 | """ | ||
405 | |||
406 | |||
407 | yocto_kernel_config_add_usage = """ | ||
408 | |||
409 | Add or modify bare kernel config options for a BSP | ||
410 | |||
411 | usage: yocto-kernel config add <bsp-name> [<CONFIG_XXX=x> ...] | ||
412 | |||
413 | This command adds one or more CONFIG_XXX=x items to a BSP's user-config.cfg | ||
414 | base config. | ||
415 | """ | ||
416 | |||
417 | |||
418 | yocto_kernel_config_add_help = """ | ||
419 | |||
420 | NAME | ||
421 | yocto-kernel config add - Add or modify bare kernel config options | ||
422 | for a BSP | ||
423 | |||
424 | SYNOPSIS | ||
425 | yocto-kernel config add <bsp-name> [<CONFIG_XXX=x> ...] | ||
426 | |||
427 | DESCRIPTION | ||
428 | This command adds one or more CONFIG_XXX=x items to a BSP's | ||
429 | foo.cfg base config. | ||
430 | |||
431 | NOTE: It's up to the user to determine whether or not the config | ||
432 | options being added make sense or not - this command does no | ||
433 | sanity checking or verification of any kind to ensure that a | ||
434 | config option really makes sense and will actually be set in in | ||
435 | the final config. For example, if a config option depends on | ||
436 | other config options, it will be turned off by kconfig if the | ||
437 | other options aren't set correctly. | ||
438 | """ | ||
439 | |||
440 | |||
441 | yocto_kernel_config_rm_usage = """ | ||
442 | |||
443 | Remove bare kernel config options from a BSP | ||
444 | |||
445 | usage: yocto-kernel config rm <bsp-name> | ||
446 | |||
447 | This command removes (turns off) one or more CONFIG_XXX items from a | ||
448 | BSP's user-config.cfg base config. | ||
449 | |||
450 | The set of config items available to be removed by this command for a | ||
451 | BSP is listed and the user prompted for the specific items to remove. | ||
452 | """ | ||
453 | |||
454 | |||
455 | yocto_kernel_config_rm_help = """ | ||
456 | |||
457 | NAME | ||
458 | yocto-kernel config rm - Remove bare kernel config options from a | ||
459 | BSP | ||
460 | |||
461 | SYNOPSIS | ||
462 | yocto-kernel config rm <bsp-name> | ||
463 | |||
464 | DESCRIPTION | ||
465 | This command removes (turns off) one or more CONFIG_XXX items from a | ||
466 | BSP's user-config.cfg base config. | ||
467 | |||
468 | The set of config items available to be removed by this command | ||
469 | for a BSP is listed and the user prompted for the specific items | ||
470 | to remove. | ||
471 | """ | ||
472 | |||
473 | |||
474 | yocto_kernel_patch_list_usage = """ | ||
475 | |||
476 | List the patches associated with the kernel for a BSP | ||
477 | |||
478 | usage: yocto-kernel patch list <bsp-name> | ||
479 | |||
480 | This command lists the patches associated with a BSP. | ||
481 | |||
482 | NOTE: this only applies to patches listed in the kernel recipe's | ||
483 | user-patches.scc file (and currently repeated in its SRC_URI). | ||
484 | """ | ||
485 | |||
486 | |||
487 | yocto_kernel_patch_list_help = """ | ||
488 | |||
489 | NAME | ||
490 | yocto-kernel patch list - List the patches associated with the kernel | ||
491 | for a BSP | ||
492 | |||
493 | SYNOPSIS | ||
494 | yocto-kernel patch list <bsp-name> | ||
495 | |||
496 | DESCRIPTION | ||
497 | This command lists the patches associated with a BSP. | ||
498 | |||
499 | NOTE: this only applies to patches listed in the kernel recipe's | ||
500 | user-patches.scc file (and currently repeated in its SRC_URI). | ||
501 | """ | ||
502 | |||
503 | |||
504 | yocto_kernel_patch_add_usage = """ | ||
505 | |||
506 | Patch the Yocto kernel for a specific BSP | ||
507 | |||
508 | usage: yocto-kernel patch add <bsp-name> [<PATCH> ...] | ||
509 | |||
510 | This command adds one or more patches to a BSP's machine branch. The | ||
511 | patch will be added to the BSP's linux-yocto kernel user-patches.scc | ||
512 | file (and currently repeated in its SRC_URI) and will be guaranteed | ||
513 | to be applied in the order specified. | ||
514 | """ | ||
515 | |||
516 | |||
517 | yocto_kernel_patch_add_help = """ | ||
518 | |||
519 | NAME | ||
520 | yocto-kernel patch add - Patch the Yocto kernel for a specific BSP | ||
521 | |||
522 | SYNOPSIS | ||
523 | yocto-kernel patch add <bsp-name> [<PATCH> ...] | ||
524 | |||
525 | DESCRIPTION | ||
526 | This command adds one or more patches to a BSP's machine branch. | ||
527 | The patch will be added to the BSP's linux-yocto kernel | ||
528 | user-patches.scc file (and currently repeated in its SRC_URI) and | ||
529 | will be guaranteed to be applied in the order specified. | ||
530 | |||
531 | NOTE: It's up to the user to determine whether or not the patches | ||
532 | being added makes sense or not - this command does no sanity | ||
533 | checking or verification of any kind to ensure that a patch can | ||
534 | actually be applied to the BSP's kernel branch; it's assumed that | ||
535 | the user has already done that. | ||
536 | """ | ||
537 | |||
538 | |||
539 | yocto_kernel_patch_rm_usage = """ | ||
540 | |||
541 | Remove a patch from the Yocto kernel for a specific BSP | ||
542 | |||
543 | usage: yocto-kernel patch rm <bsp-name> | ||
544 | |||
545 | This command removes one or more patches from a BSP's machine branch. | ||
546 | The patch will be removed from the BSP's linux-yocto kernel | ||
547 | user-patches.scc file (and currently repeated in its SRC_URI) and | ||
548 | kernel SRC_URI dir. | ||
549 | |||
550 | The set of patches available to be removed by this command for a BSP | ||
551 | is listed and the user prompted for the specific patches to remove. | ||
552 | """ | ||
553 | |||
554 | |||
555 | yocto_kernel_patch_rm_help = """ | ||
556 | |||
557 | NAME | ||
558 | yocto-kernel patch rm - Remove a patch from the Yocto kernel for a specific BSP | ||
559 | |||
560 | SYNOPSIS | ||
561 | yocto-kernel patch rm <bsp-name> | ||
562 | |||
563 | DESCRIPTION | ||
564 | This command removes one or more patches from a BSP's machine | ||
565 | branch. The patch will be removed from the BSP's linux-yocto | ||
566 | kernel user-patches.scc file (and currently repeated in its | ||
567 | SRC_URI). | ||
568 | |||
569 | The set of patches available to be removed by this command for a | ||
570 | BSP is listed and the user prompted for the specific patches to | ||
571 | remove. | ||
572 | """ | ||
573 | |||
574 | yocto_kernel_feature_list_usage = """ | ||
575 | |||
576 | List the BSP features that are being used by a BSP | ||
577 | |||
578 | usage: yocto-kernel feature list <bsp-name> | ||
579 | |||
580 | This command lists the features being used by a BSP i.e. the features | ||
581 | which are eligible for modification or removal by other yocto-kernel | ||
582 | commands. | ||
583 | |||
584 | 'modifiable' features are the features listed in a BSP's | ||
585 | user-features.scc file. | ||
586 | """ | ||
587 | |||
588 | |||
589 | yocto_kernel_feature_list_help = """ | ||
590 | |||
591 | NAME | ||
592 | yocto-kernel feature list - List the modifiable set of features | ||
593 | being used by a BSP | ||
594 | |||
595 | SYNOPSIS | ||
596 | yocto-kernel feature list <bsp-name> | ||
597 | |||
598 | DESCRIPTION | ||
599 | This command lists the 'modifiable' features being used by a BSP | ||
600 | i.e. the features which are eligible for modification or removal | ||
601 | by other yocto-kernel commands. | ||
602 | """ | ||
603 | |||
604 | |||
605 | yocto_kernel_feature_add_usage = """ | ||
606 | |||
607 | Add to or modify the list of features being used for a BSP | ||
608 | |||
609 | usage: yocto-kernel feature add <bsp-name> [/xxxx/yyyy/feature.scc ...] | ||
610 | |||
611 | This command adds one or more feature items to a BSP's kernel | ||
612 | user-features.scc file, which is the file used to manage features in | ||
613 | a yocto-bsp-generated BSP. Features to be added must be specified as | ||
614 | fully-qualified feature names. | ||
615 | """ | ||
616 | |||
617 | |||
618 | yocto_kernel_feature_add_help = """ | ||
619 | |||
620 | NAME | ||
621 | yocto-kernel feature add - Add to or modify the list of features | ||
622 | being used for a BSP | ||
623 | |||
624 | SYNOPSIS | ||
625 | yocto-kernel feature add <bsp-name> [/xxxx/yyyy/feature.scc ...] | ||
626 | |||
627 | DESCRIPTION | ||
628 | This command adds one or more feature items to a BSP's | ||
629 | user-features.scc file, which is the file used to manage features | ||
630 | in a yocto-bsp-generated BSP. Features to be added must be | ||
631 | specified as fully-qualified feature names. | ||
632 | """ | ||
633 | |||
634 | |||
635 | yocto_kernel_feature_rm_usage = """ | ||
636 | |||
637 | Remove a feature from the list of features being used for a BSP | ||
638 | |||
639 | usage: yocto-kernel feature rm <bsp-name> | ||
640 | |||
641 | This command removes (turns off) one or more features from a BSP's | ||
642 | user-features.scc file, which is the file used to manage features in | ||
643 | a yocto-bsp-generated BSP. | ||
644 | |||
645 | The set of features available to be removed by this command for a BSP | ||
646 | is listed and the user prompted for the specific items to remove. | ||
647 | """ | ||
648 | |||
649 | |||
650 | yocto_kernel_feature_rm_help = """ | ||
651 | |||
652 | NAME | ||
653 | yocto-kernel feature rm - Remove a feature from the list of | ||
654 | features being used for a BSP | ||
655 | |||
656 | SYNOPSIS | ||
657 | yocto-kernel feature rm <bsp-name> | ||
658 | |||
659 | DESCRIPTION | ||
660 | This command removes (turns off) one or more features from a BSP's | ||
661 | user-features.scc file, which is the file used to manage features | ||
662 | in a yocto-bsp-generated BSP. | ||
663 | |||
664 | The set of features available to be removed by this command for a | ||
665 | BSP is listed and the user prompted for the specific items to | ||
666 | remove. | ||
667 | """ | ||
668 | |||
669 | |||
670 | yocto_kernel_available_features_list_usage = """ | ||
671 | |||
672 | List the set of kernel features available to a BSP | ||
673 | |||
674 | usage: yocto-kernel features list <bsp-name> | ||
675 | |||
676 | This command lists the complete set of kernel features available to a | ||
677 | BSP. This includes the features contained in linux-yocto meta | ||
678 | branches as well as recipe-space features defined locally to the BSP. | ||
679 | """ | ||
680 | |||
681 | |||
682 | yocto_kernel_available_features_list_help = """ | ||
683 | |||
684 | NAME | ||
685 | yocto-kernel features list - List the set of kernel features | ||
686 | available to a BSP | ||
687 | |||
688 | SYNOPSIS | ||
689 | yocto-kernel features list <bsp-name> | ||
690 | |||
691 | DESCRIPTION | ||
692 | This command lists the complete set of kernel features available | ||
693 | to a BSP. This includes the features contained in linux-yocto | ||
694 | meta branches as well as recipe-space features defined locally to | ||
695 | the BSP. | ||
696 | """ | ||
697 | |||
698 | |||
699 | yocto_kernel_feature_describe_usage = """ | ||
700 | |||
701 | Print the description and compatibility information for a given kernel feature | ||
702 | |||
703 | usage: yocto-kernel feature describe <bsp-name> [/xxxx/yyyy/feature.scc ...] | ||
704 | |||
705 | This command prints the description and compatibility of a specific | ||
706 | feature in the format 'description [compatibility]. | ||
707 | """ | ||
708 | |||
709 | |||
710 | yocto_kernel_feature_describe_help = """ | ||
711 | |||
712 | NAME | ||
713 | yocto-kernel feature describe - print the description and | ||
714 | compatibility information for a given kernel feature | ||
715 | |||
716 | SYNOPSIS | ||
717 | yocto-kernel feature describe <bsp-name> [/xxxx/yyyy/feature.scc ...] | ||
718 | |||
719 | DESCRIPTION | ||
720 | This command prints the description and compatibility of a | ||
721 | specific feature in the format 'description [compatibility]. If | ||
722 | the feature doesn't define a description or compatibility, a | ||
723 | string with generic unknown values will be printed. | ||
724 | """ | ||
725 | |||
726 | |||
727 | yocto_kernel_feature_create_usage = """ | ||
728 | |||
729 | Create a recipe-space kernel feature in a BSP | ||
730 | |||
731 | usage: yocto-kernel feature create <bsp-name> newfeature.scc \ | ||
732 | "Feature Description" capabilities [<CONFIG_XXX=x> ...] [<PATCH> ...] | ||
733 | |||
734 | This command creates a new kernel feature from the bare config | ||
735 | options and patches specified on the command-line. | ||
736 | """ | ||
737 | |||
738 | |||
739 | yocto_kernel_feature_create_help = """ | ||
740 | |||
741 | NAME | ||
742 | yocto-kernel feature create - create a recipe-space kernel feature | ||
743 | in a BSP | ||
744 | |||
745 | SYNOPSIS | ||
746 | yocto-kernel feature create <bsp-name> newfeature.scc \ | ||
747 | "Feature Description" capabilities [<CONFIG_XXX=x> ...] [<PATCH> ...] | ||
748 | |||
749 | DESCRIPTION | ||
750 | This command creates a new kernel feature from the bare config | ||
751 | options and patches specified on the command-line. The new | ||
752 | feature will be created in recipe-space, specifically in either | ||
753 | the kernel .bbappend's /files/cfg or /files/features subdirectory, | ||
754 | depending on whether or not the feature contains config items only | ||
755 | or config items along with patches. The named feature must end | ||
756 | with .scc and must not contain a feature directory to contain the | ||
757 | feature (this will be determined automatically), and a feature | ||
758 | decription in double-quotes along with a capabilities string | ||
759 | (which for the time being can be one of: 'all' or 'board'). | ||
760 | """ | ||
761 | |||
762 | |||
763 | yocto_kernel_feature_destroy_usage = """ | ||
764 | |||
765 | Destroy a recipe-space kernel feature in a BSP | ||
766 | |||
767 | usage: yocto-kernel feature destroy <bsp-name> feature.scc | ||
768 | |||
769 | This command destroys a kernel feature defined in the specified BSP's | ||
770 | recipe-space kernel definition. | ||
771 | """ | ||
772 | |||
773 | |||
774 | yocto_kernel_feature_destroy_help = """ | ||
775 | |||
776 | NAME | ||
777 | yocto-kernel feature destroy <bsp-name> feature.scc - destroy a | ||
778 | recipe-space kernel feature in a BSP | ||
779 | |||
780 | SYNOPSIS | ||
781 | yocto-kernel feature destroy <bsp-name> feature.scc | ||
782 | |||
783 | DESCRIPTION | ||
784 | This command destroys a kernel feature defined in the specified | ||
785 | BSP's recipe-space kernel definition. The named feature must end | ||
786 | with .scc and must not contain a feature directory to contain the | ||
787 | feature (this will be determined automatically). If the kernel | ||
788 | feature is in use by a BSP, it can't be removed until the BSP | ||
789 | stops using it (see yocto-kernel feature rm to stop using it). | ||
790 | """ | ||
791 | |||
792 | ## | ||
793 | # yocto-layer help and usage strings | ||
794 | ## | ||
795 | |||
796 | yocto_layer_usage = """ | ||
797 | |||
798 | Create a generic Yocto layer. | ||
799 | |||
800 | usage: yocto-layer [--version] [--help] COMMAND [ARGS] | ||
801 | |||
802 | Current 'yocto-layer' commands are: | ||
803 | create Create a new generic Yocto layer | ||
804 | list List available values for input options and properties | ||
805 | |||
806 | See 'yocto-layer help COMMAND' for more information on a specific command. | ||
807 | """ | ||
808 | |||
809 | yocto_layer_help_usage = """ | ||
810 | |||
811 | usage: yocto-layer help <subcommand> | ||
812 | |||
813 | This command displays detailed help for the specified subcommand. | ||
814 | """ | ||
815 | |||
816 | yocto_layer_create_usage = """ | ||
817 | |||
818 | Create a new generic Yocto layer | ||
819 | |||
820 | usage: yocto-layer create <layer-name> [layer_priority] | ||
821 | [-o <DIRNAME> | --outdir <DIRNAME>] | ||
822 | [-i <JSON PROPERTY FILE> | --infile <JSON PROPERTY_FILE>] | ||
823 | |||
824 | This command creates a generic Yocto layer based on the specified | ||
825 | parameters. The new layer will be a new Yocto layer contained by | ||
826 | default within the top-level directory specified as | ||
827 | 'meta-layer-name'. The -o option can be used to place the layer in a | ||
828 | directory with a different name and location. | ||
829 | |||
830 | If layer_priority is specified, a simple layer will be created using | ||
831 | the given layer priority, and the user will not be prompted for | ||
832 | further input. | ||
833 | |||
834 | NOTE: Once created, you should add your new layer to your | ||
835 | bblayers.conf file in order for it to be subsequently seen and | ||
836 | modified by the yocto-kernel tool. Instructions for doing this can | ||
837 | be found in the README file generated in the layer's top-level | ||
838 | directory. | ||
839 | |||
840 | See 'yocto layer help create' for more detailed instructions. | ||
841 | """ | ||
842 | |||
843 | yocto_layer_create_help = """ | ||
844 | |||
845 | NAME | ||
846 | yocto-layer create - Create a new generic Yocto layer | ||
847 | |||
848 | SYNOPSIS | ||
849 | yocto-layer create <layer-name> [layer_priority] | ||
850 | [-o <DIRNAME> | --outdir <DIRNAME>] | ||
851 | [-i <JSON PROPERTY FILE> | --infile <JSON PROPERTY_FILE>] | ||
852 | |||
853 | DESCRIPTION | ||
854 | This command creates a generic Yocto layer based on the specified | ||
855 | parameters. The new layer will be a new Yocto layer contained by | ||
856 | default within the top-level directory specified as | ||
857 | 'meta-layer-name'. The -o option can be used to place the layer | ||
858 | in a directory with a different name and location. | ||
859 | |||
860 | If layer_priority is specified, a simple layer will be created | ||
861 | using the given layer priority, and the user will not be prompted | ||
862 | for further input. | ||
863 | |||
864 | The layer-specific properties that define the values that will be | ||
865 | used to generate the layer can be specified on the command-line | ||
866 | using the -i option and supplying a JSON object consisting of the | ||
867 | set of name:value pairs needed by the layer. | ||
868 | |||
869 | If the -i option is not used, the user will be interactively | ||
870 | prompted for each of the required property values, which will then | ||
871 | be used as values for layer generation. | ||
872 | |||
873 | The set of properties available can be listed using the | ||
874 | 'yocto-layer list' command. | ||
875 | |||
876 | Specifying -c causes the Python code generated and executed to | ||
877 | create the layer to be dumped to the 'bspgen.out' file in the | ||
878 | current directory, and is useful for debugging. | ||
879 | |||
880 | NOTE: Once created, you should add your new layer to your | ||
881 | bblayers.conf file in order for it to be subsequently seen and | ||
882 | modified by the yocto-kernel tool. Instructions for doing this | ||
883 | can be found in the README file generated in the layer's top-level | ||
884 | directory. | ||
885 | |||
886 | For example, assuming your poky repo is at /path/to/poky, your new | ||
887 | layer is at /path/to/poky/meta-mylayer, and your build directory | ||
888 | is /path/to/build: | ||
889 | |||
890 | $ gedit /path/to/build/conf/bblayers.conf | ||
891 | |||
892 | BBLAYERS ?= " \\ | ||
893 | /path/to/poky/meta \\ | ||
894 | /path/to/poky/meta-yocto \\ | ||
895 | /path/to/poky/meta-mylayer \\ | ||
896 | " | ||
897 | """ | ||
898 | |||
899 | yocto_layer_list_usage = """ | ||
900 | |||
901 | usage: yocto-layer list properties | ||
902 | [-o <JSON PROPERTY FILE> | --outfile <JSON PROPERTY_FILE>] | ||
903 | yocto-layer list property <xxx> | ||
904 | [-o <JSON PROPERTY FILE> | --outfile <JSON PROPERTY_FILE>] | ||
905 | |||
906 | This command enumerates the complete set of possible values for a | ||
907 | specified option or property needed by the layer creation process. | ||
908 | |||
909 | The first form enumerates all the possible properties that exist and | ||
910 | must have values specified for them in the 'yocto-layer create' | ||
911 | command. | ||
912 | |||
913 | The second form enumerates all the possible values that exist and can | ||
914 | be specified for any of the enumerable properties in the 'yocto-layer | ||
915 | create' command. | ||
916 | |||
917 | See 'yocto-layer help list' for more details. | ||
918 | """ | ||
919 | |||
920 | yocto_layer_list_help = """ | ||
921 | |||
922 | NAME | ||
923 | yocto-layer list - List available values for layer input options and properties | ||
924 | |||
925 | SYNOPSIS | ||
926 | yocto-layer list properties | ||
927 | [--o <JSON PROPERTY FILE> | -outfile <JSON PROPERTY_FILE>] | ||
928 | yocto-layer list property <xxx> | ||
929 | [--o <JSON PROPERTY FILE> | -outfile <JSON PROPERTY_FILE>] | ||
930 | |||
931 | DESCRIPTION | ||
932 | This command enumerates the complete set of possible values for a | ||
933 | specified option or property needed by the layer creation process. | ||
934 | |||
935 | The first form enumerates all the possible properties that exist | ||
936 | and must have values specified for them in the 'yocto-layer | ||
937 | create' command. This command is mainly meant to aid the | ||
938 | development of user interface alternatives to the default | ||
939 | text-based prompting interface. If the -o option is specified, | ||
940 | the list of properties, in addition to being displayed, will be | ||
941 | written to the specified file as a JSON object. In this case, the | ||
942 | object will consist of the set of name:value pairs corresponding | ||
943 | to the (possibly nested) dictionary of properties defined by the | ||
944 | input statements used by the BSP. Some example output for the | ||
945 | 'list properties' command: | ||
946 | |||
947 | $ yocto-layer list properties | ||
948 | "example_bbappend_name" : { | ||
949 | "default" : example | ||
950 | "msg" : Please enter the name you'd like to use for your bbappend file: | ||
951 | "type" : edit | ||
952 | "prio" : 20 | ||
953 | "filename" : /home/trz/yocto/yocto-layer-dev/scripts/lib/bsp/substrate/target/arch/layer/layer-questions.noinstall | ||
954 | } | ||
955 | "create_example_recipe" : { | ||
956 | "default" : n | ||
957 | "msg" : Would you like to have an example recipe created? (y/n) | ||
958 | "type" : boolean | ||
959 | "prio" : 20 | ||
960 | "filename" : /home/trz/yocto/yocto-layer-dev/scripts/lib/bsp/substrate/target/arch/layer/layer-questions.noinstall | ||
961 | } | ||
962 | "example_recipe_name" : { | ||
963 | "default" : example | ||
964 | "msg" : Please enter the name you'd like to use for your example recipe: | ||
965 | "type" : edit | ||
966 | "prio" : 20 | ||
967 | "filename" : /home/trz/yocto/yocto-layer-dev/scripts/lib/bsp/substrate/target/arch/layer/layer-questions.noinstall | ||
968 | } | ||
969 | "layer_priority" : { | ||
970 | "default" : 6 | ||
971 | "msg" : Please enter the layer priority you'd like to use for the layer: | ||
972 | "type" : edit | ||
973 | "prio" : 20 | ||
974 | "filename" : /home/trz/yocto/yocto-layer-dev/scripts/lib/bsp/substrate/target/arch/layer/layer-questions.noinstall | ||
975 | } | ||
976 | "create_example_bbappend" : { | ||
977 | "default" : n | ||
978 | "msg" : Would you like to have an example bbappend file created? (y/n) | ||
979 | "type" : boolean | ||
980 | "prio" : 20 | ||
981 | "filename" : /home/trz/yocto/yocto-layer-dev/scripts/lib/bsp/substrate/target/arch/layer/layer-questions.noinstall | ||
982 | } | ||
983 | "example_bbappend_version" : { | ||
984 | "default" : 0.1 | ||
985 | "msg" : Please enter the version number you'd like to use for your bbappend file (this should match the recipe you're appending to): | ||
986 | "type" : edit | ||
987 | "prio" : 20 | ||
988 | "filename" : /home/trz/yocto/yocto-layer-dev/scripts/lib/bsp/substrate/target/arch/layer/layer-questions.noinstall | ||
989 | } | ||
990 | |||
991 | Each entry in the output consists of the name of the input element | ||
992 | e.g. "layer_priority", followed by the properties defined for that | ||
993 | element enclosed in braces. This information should provide | ||
994 | sufficient information to create a complete user interface. Two | ||
995 | features of the scheme provide for conditional input. First, if a | ||
996 | Python "if" statement appears in place of an input element name, | ||
997 | the set of enclosed input elements apply and should be presented | ||
998 | to the user only if the 'if' statement evaluates to true. The | ||
999 | test in the if statement will always reference another input | ||
1000 | element in the list, which means that the element being tested | ||
1001 | should be presented to the user before the elements enclosed by | ||
1002 | the if block. Secondly, in a similar way, some elements contain | ||
1003 | "depends-on" and depends-on-val" tags, which mean that the | ||
1004 | affected input element should only be presented to the user if the | ||
1005 | element it depends on has already been presented to the user and | ||
1006 | the user has selected the specified value for that element. | ||
1007 | |||
1008 | The second form enumerates all the possible values that exist and | ||
1009 | can be specified for any of the enumerable properties in the | ||
1010 | 'yocto-layer create' command. If the -o option is specified, the | ||
1011 | list of values for the given property, in addition to being | ||
1012 | displayed, will be written to the specified file as a JSON object. | ||
1013 | In this case, the object will consist of the set of name:value | ||
1014 | pairs corresponding to the array of property values associated | ||
1015 | with the property. | ||
1016 | |||
1017 | $ yocto-layer list property layer_priority | ||
1018 | [no output - layer_priority is a text field that has no enumerable values] | ||
1019 | |||
1020 | The second form as well is meant mainly for developers of | ||
1021 | alternative interfaces - it allows the developer to fetch the | ||
1022 | possible values for a given input element on-demand. This | ||
1023 | on-demand capability is especially valuable for elements that | ||
1024 | require relatively expensive remote operations to fulfill, such as | ||
1025 | the example that returns the set of branches available in a remote | ||
1026 | git tree above. | ||
1027 | |||
1028 | """ | ||
1029 | |||
1030 | ## | ||
1031 | # test code | ||
1032 | ## | ||
1033 | |||
1034 | test_bsp_properties = { | ||
1035 | 'smp': 'yes', | ||
1036 | 'touchscreen': 'yes', | ||
1037 | 'keyboard': 'no', | ||
1038 | 'xserver': 'yes', | ||
1039 | 'xserver_choice': 'xserver-i915', | ||
1040 | 'features': ['goodfeature', 'greatfeature'], | ||
1041 | 'tunefile': 'tune-quark', | ||
1042 | } | ||
1043 | |||
diff --git a/scripts/lib/bsp/kernel.py b/scripts/lib/bsp/kernel.py new file mode 100644 index 0000000000..ba68b60fcb --- /dev/null +++ b/scripts/lib/bsp/kernel.py | |||
@@ -0,0 +1,1071 @@ | |||
1 | # ex:ts=4:sw=4:sts=4:et | ||
2 | # -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- | ||
3 | # | ||
4 | # Copyright (c) 2012, Intel Corporation. | ||
5 | # All rights reserved. | ||
6 | # | ||
7 | # This program is free software; you can redistribute it and/or modify | ||
8 | # it under the terms of the GNU General Public License version 2 as | ||
9 | # published by the Free Software Foundation. | ||
10 | # | ||
11 | # This program is distributed in the hope that it will be useful, | ||
12 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | # GNU General Public License for more details. | ||
15 | # | ||
16 | # You should have received a copy of the GNU General Public License along | ||
17 | # with this program; if not, write to the Free Software Foundation, Inc., | ||
18 | # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. | ||
19 | # | ||
20 | # DESCRIPTION | ||
21 | # This module implements the kernel-related functions used by | ||
22 | # 'yocto-kernel' to manage kernel config items and patches for Yocto | ||
23 | # BSPs. | ||
24 | # | ||
25 | # AUTHORS | ||
26 | # Tom Zanussi <tom.zanussi (at] intel.com> | ||
27 | # | ||
28 | |||
29 | import sys | ||
30 | import os | ||
31 | import shutil | ||
32 | from tags import * | ||
33 | import glob | ||
34 | import subprocess | ||
35 | from engine import create_context | ||
36 | |||
37 | |||
38 | def find_bblayers(): | ||
39 | """ | ||
40 | Find and return a sanitized list of the layers found in BBLAYERS. | ||
41 | """ | ||
42 | try: | ||
43 | builddir = os.environ["BUILDDIR"] | ||
44 | except KeyError: | ||
45 | print "BUILDDIR not found, exiting. (Did you forget to source oe-init-build-env?)" | ||
46 | sys.exit(1) | ||
47 | bblayers_conf = os.path.join(builddir, "conf/bblayers.conf") | ||
48 | |||
49 | layers = [] | ||
50 | |||
51 | bitbake_env_cmd = "bitbake -e" | ||
52 | bitbake_env_lines = subprocess.Popen(bitbake_env_cmd, shell=True, | ||
53 | stdout=subprocess.PIPE).stdout.read() | ||
54 | |||
55 | if not bitbake_env_lines: | ||
56 | print "Couldn't get '%s' output, exiting." % bitbake_env_cmd | ||
57 | sys.exit(1) | ||
58 | |||
59 | for line in bitbake_env_lines.split('\n'): | ||
60 | bblayers = get_line_val(line, "BBLAYERS") | ||
61 | if (bblayers): | ||
62 | break | ||
63 | |||
64 | if not bblayers: | ||
65 | print "Couldn't find BBLAYERS in %s output, exiting." % \ | ||
66 | bitbake_env_cmd | ||
67 | sys.exit(1) | ||
68 | |||
69 | raw_layers = bblayers.split() | ||
70 | |||
71 | for layer in raw_layers: | ||
72 | if layer == 'BBLAYERS' or '=' in layer: | ||
73 | continue | ||
74 | layers.append(layer) | ||
75 | |||
76 | return layers | ||
77 | |||
78 | |||
79 | def get_line_val(line, key): | ||
80 | """ | ||
81 | Extract the value from the VAR="val" string | ||
82 | """ | ||
83 | if line.startswith(key + "="): | ||
84 | stripped_line = line.split('=')[1] | ||
85 | stripped_line = stripped_line.replace('\"', '') | ||
86 | return stripped_line | ||
87 | return None | ||
88 | |||
89 | |||
90 | def find_meta_layer(): | ||
91 | """ | ||
92 | Find and return the meta layer in BBLAYERS. | ||
93 | """ | ||
94 | layers = find_bblayers() | ||
95 | |||
96 | for layer in layers: | ||
97 | if layer.endswith("meta"): | ||
98 | return layer | ||
99 | |||
100 | return None | ||
101 | |||
102 | |||
103 | def find_bsp_layer(machine): | ||
104 | """ | ||
105 | Find and return a machine's BSP layer in BBLAYERS. | ||
106 | """ | ||
107 | layers = find_bblayers() | ||
108 | |||
109 | for layer in layers: | ||
110 | if layer.endswith(machine): | ||
111 | return layer | ||
112 | |||
113 | print "Unable to find the BSP layer for machine %s." % machine | ||
114 | print "Please make sure it is listed in bblayers.conf" | ||
115 | sys.exit(1) | ||
116 | |||
117 | |||
118 | def gen_choices_str(choices): | ||
119 | """ | ||
120 | Generate a numbered list of choices from a list of choices for | ||
121 | display to the user. | ||
122 | """ | ||
123 | choices_str = "" | ||
124 | |||
125 | for i, choice in enumerate(choices): | ||
126 | choices_str += "\t" + str(i + 1) + ") " + choice + "\n" | ||
127 | |||
128 | return choices_str | ||
129 | |||
130 | |||
131 | def open_user_file(scripts_path, machine, userfile, mode): | ||
132 | """ | ||
133 | Find one of the user files (user-config.cfg, user-patches.scc) | ||
134 | associated with the machine (could be in files/, | ||
135 | linux-yocto-custom/, etc). Returns the open file if found, None | ||
136 | otherwise. | ||
137 | |||
138 | The caller is responsible for closing the file returned. | ||
139 | """ | ||
140 | layer = find_bsp_layer(machine) | ||
141 | linuxdir = os.path.join(layer, "recipes-kernel/linux") | ||
142 | linuxdir_list = os.listdir(linuxdir) | ||
143 | for fileobj in linuxdir_list: | ||
144 | fileobj_path = os.path.join(linuxdir, fileobj) | ||
145 | if os.path.isdir(fileobj_path): | ||
146 | userfile_name = os.path.join(fileobj_path, userfile) | ||
147 | try: | ||
148 | f = open(userfile_name, mode) | ||
149 | return f | ||
150 | except IOError: | ||
151 | continue | ||
152 | return None | ||
153 | |||
154 | |||
155 | def read_config_items(scripts_path, machine): | ||
156 | """ | ||
157 | Find and return a list of config items (CONFIG_XXX) in a machine's | ||
158 | user-defined config fragment [${machine}-user-config.cfg]. | ||
159 | """ | ||
160 | config_items = [] | ||
161 | |||
162 | f = open_user_file(scripts_path, machine, machine+"-user-config.cfg", "r") | ||
163 | lines = f.readlines() | ||
164 | for line in lines: | ||
165 | s = line.strip() | ||
166 | if s and not s.startswith("#"): | ||
167 | config_items.append(s) | ||
168 | f.close() | ||
169 | |||
170 | return config_items | ||
171 | |||
172 | |||
173 | def write_config_items(scripts_path, machine, config_items): | ||
174 | """ | ||
175 | Write (replace) the list of config items (CONFIG_XXX) in a | ||
176 | machine's user-defined config fragment [${machine}=user-config.cfg]. | ||
177 | """ | ||
178 | f = open_user_file(scripts_path, machine, machine+"-user-config.cfg", "w") | ||
179 | for item in config_items: | ||
180 | f.write(item + "\n") | ||
181 | f.close() | ||
182 | |||
183 | kernel_contents_changed(scripts_path, machine) | ||
184 | |||
185 | |||
186 | def yocto_kernel_config_list(scripts_path, machine): | ||
187 | """ | ||
188 | Display the list of config items (CONFIG_XXX) in a machine's | ||
189 | user-defined config fragment [${machine}-user-config.cfg]. | ||
190 | """ | ||
191 | config_items = read_config_items(scripts_path, machine) | ||
192 | |||
193 | print "The current set of machine-specific kernel config items for %s is:" % machine | ||
194 | print gen_choices_str(config_items) | ||
195 | |||
196 | |||
197 | def yocto_kernel_config_rm(scripts_path, machine): | ||
198 | """ | ||
199 | Display the list of config items (CONFIG_XXX) in a machine's | ||
200 | user-defined config fragment [${machine}-user-config.cfg], prompt the user | ||
201 | for one or more to remove, and remove them. | ||
202 | """ | ||
203 | config_items = read_config_items(scripts_path, machine) | ||
204 | |||
205 | print "Specify the kernel config items to remove:" | ||
206 | input = raw_input(gen_choices_str(config_items)) | ||
207 | rm_choices = input.split() | ||
208 | rm_choices.sort() | ||
209 | |||
210 | removed = [] | ||
211 | |||
212 | for choice in reversed(rm_choices): | ||
213 | try: | ||
214 | idx = int(choice) - 1 | ||
215 | except ValueError: | ||
216 | print "Invalid choice (%s), exiting" % choice | ||
217 | sys.exit(1) | ||
218 | if idx < 0 or idx >= len(config_items): | ||
219 | print "Invalid choice (%d), exiting" % (idx + 1) | ||
220 | sys.exit(1) | ||
221 | removed.append(config_items.pop(idx)) | ||
222 | |||
223 | write_config_items(scripts_path, machine, config_items) | ||
224 | |||
225 | print "Removed items:" | ||
226 | for r in removed: | ||
227 | print "\t%s" % r | ||
228 | |||
229 | |||
230 | def yocto_kernel_config_add(scripts_path, machine, config_items): | ||
231 | """ | ||
232 | Add one or more config items (CONFIG_XXX) to a machine's | ||
233 | user-defined config fragment [${machine}-user-config.cfg]. | ||
234 | """ | ||
235 | new_items = [] | ||
236 | dup_items = [] | ||
237 | |||
238 | cur_items = read_config_items(scripts_path, machine) | ||
239 | |||
240 | for item in config_items: | ||
241 | if not item.startswith("CONFIG") or (not "=y" in item and not "=m" in item): | ||
242 | print "Invalid config item (%s), exiting" % item | ||
243 | sys.exit(1) | ||
244 | if item not in cur_items and item not in new_items: | ||
245 | new_items.append(item) | ||
246 | else: | ||
247 | dup_items.append(item) | ||
248 | |||
249 | if len(new_items) > 0: | ||
250 | cur_items.extend(new_items) | ||
251 | write_config_items(scripts_path, machine, cur_items) | ||
252 | print "Added item%s:" % ("" if len(new_items)==1 else "s") | ||
253 | for n in new_items: | ||
254 | print "\t%s" % n | ||
255 | |||
256 | if len(dup_items) > 0: | ||
257 | output="The following item%s already exist%s in the current configuration, ignoring %s:" % \ | ||
258 | (("","s", "it") if len(dup_items)==1 else ("s", "", "them" )) | ||
259 | print output | ||
260 | for n in dup_items: | ||
261 | print "\t%s" % n | ||
262 | |||
263 | def find_current_kernel(bsp_layer, machine): | ||
264 | """ | ||
265 | Determine the kernel and version currently being used in the BSP. | ||
266 | """ | ||
267 | machine_conf = os.path.join(bsp_layer, "conf/machine/" + machine + ".conf") | ||
268 | |||
269 | preferred_kernel = preferred_kernel_version = preferred_version_varname = None | ||
270 | |||
271 | f = open(machine_conf, "r") | ||
272 | lines = f.readlines() | ||
273 | for line in lines: | ||
274 | if line.strip().startswith("PREFERRED_PROVIDER_virtual/kernel"): | ||
275 | preferred_kernel = line.split()[-1] | ||
276 | preferred_kernel = preferred_kernel.replace('\"','') | ||
277 | preferred_version_varname = "PREFERRED_VERSION_" + preferred_kernel | ||
278 | if preferred_version_varname and line.strip().startswith(preferred_version_varname): | ||
279 | preferred_kernel_version = line.split()[-1] | ||
280 | preferred_kernel_version = preferred_kernel_version.replace('\"','') | ||
281 | preferred_kernel_version = preferred_kernel_version.replace('%','') | ||
282 | |||
283 | if preferred_kernel and preferred_kernel_version: | ||
284 | return preferred_kernel + "_" + preferred_kernel_version | ||
285 | elif preferred_kernel: | ||
286 | return preferred_kernel | ||
287 | |||
288 | |||
289 | def find_filesdir(scripts_path, machine): | ||
290 | """ | ||
291 | Find the name of the 'files' dir associated with the machine | ||
292 | (could be in files/, linux-yocto-custom/, etc). Returns the name | ||
293 | of the files dir if found, None otherwise. | ||
294 | """ | ||
295 | layer = find_bsp_layer(machine) | ||
296 | filesdir = None | ||
297 | linuxdir = os.path.join(layer, "recipes-kernel/linux") | ||
298 | linuxdir_list = os.listdir(linuxdir) | ||
299 | for fileobj in linuxdir_list: | ||
300 | fileobj_path = os.path.join(linuxdir, fileobj) | ||
301 | if os.path.isdir(fileobj_path): | ||
302 | # this could be files/ or linux-yocto-custom/, we have no way of distinguishing | ||
303 | # so we take the first (and normally only) dir we find as the 'filesdir' | ||
304 | filesdir = fileobj_path | ||
305 | |||
306 | return filesdir | ||
307 | |||
308 | |||
309 | def read_patch_items(scripts_path, machine): | ||
310 | """ | ||
311 | Find and return a list of patch items in a machine's user-defined | ||
312 | patch list [${machine}-user-patches.scc]. | ||
313 | """ | ||
314 | patch_items = [] | ||
315 | |||
316 | f = open_user_file(scripts_path, machine, machine+"-user-patches.scc", "r") | ||
317 | lines = f.readlines() | ||
318 | for line in lines: | ||
319 | s = line.strip() | ||
320 | if s and not s.startswith("#"): | ||
321 | fields = s.split() | ||
322 | if not fields[0] == "patch": | ||
323 | continue | ||
324 | patch_items.append(fields[1]) | ||
325 | f.close() | ||
326 | |||
327 | return patch_items | ||
328 | |||
329 | |||
330 | def write_patch_items(scripts_path, machine, patch_items): | ||
331 | """ | ||
332 | Write (replace) the list of patches in a machine's user-defined | ||
333 | patch list [${machine}-user-patches.scc]. | ||
334 | """ | ||
335 | f = open_user_file(scripts_path, machine, machine+"-user-patches.scc", "w") | ||
336 | for item in patch_items: | ||
337 | f.write("patch " + item + "\n") | ||
338 | f.close() | ||
339 | |||
340 | kernel_contents_changed(scripts_path, machine) | ||
341 | |||
342 | |||
343 | def yocto_kernel_patch_list(scripts_path, machine): | ||
344 | """ | ||
345 | Display the list of patches in a machine's user-defined patch list | ||
346 | [${machine}-user-patches.scc]. | ||
347 | """ | ||
348 | patches = read_patch_items(scripts_path, machine) | ||
349 | |||
350 | print "The current set of machine-specific patches for %s is:" % machine | ||
351 | print gen_choices_str(patches) | ||
352 | |||
353 | |||
354 | def yocto_kernel_patch_rm(scripts_path, machine): | ||
355 | """ | ||
356 | Remove one or more patches from a machine's user-defined patch | ||
357 | list [${machine}-user-patches.scc]. | ||
358 | """ | ||
359 | patches = read_patch_items(scripts_path, machine) | ||
360 | |||
361 | print "Specify the patches to remove:" | ||
362 | input = raw_input(gen_choices_str(patches)) | ||
363 | rm_choices = input.split() | ||
364 | rm_choices.sort() | ||
365 | |||
366 | removed = [] | ||
367 | |||
368 | filesdir = find_filesdir(scripts_path, machine) | ||
369 | if not filesdir: | ||
370 | print "Couldn't rm patch(es) since we couldn't find a 'files' dir" | ||
371 | sys.exit(1) | ||
372 | |||
373 | for choice in reversed(rm_choices): | ||
374 | try: | ||
375 | idx = int(choice) - 1 | ||
376 | except ValueError: | ||
377 | print "Invalid choice (%s), exiting" % choice | ||
378 | sys.exit(1) | ||
379 | if idx < 0 or idx >= len(patches): | ||
380 | print "Invalid choice (%d), exiting" % (idx + 1) | ||
381 | sys.exit(1) | ||
382 | filesdir_patch = os.path.join(filesdir, patches[idx]) | ||
383 | if os.path.isfile(filesdir_patch): | ||
384 | os.remove(filesdir_patch) | ||
385 | removed.append(patches[idx]) | ||
386 | patches.pop(idx) | ||
387 | |||
388 | write_patch_items(scripts_path, machine, patches) | ||
389 | |||
390 | print "Removed patches:" | ||
391 | for r in removed: | ||
392 | print "\t%s" % r | ||
393 | |||
394 | |||
395 | def yocto_kernel_patch_add(scripts_path, machine, patches): | ||
396 | """ | ||
397 | Add one or more patches to a machine's user-defined patch list | ||
398 | [${machine}-user-patches.scc]. | ||
399 | """ | ||
400 | existing_patches = read_patch_items(scripts_path, machine) | ||
401 | |||
402 | for patch in patches: | ||
403 | if os.path.basename(patch) in existing_patches: | ||
404 | print "Couldn't add patch (%s) since it's already been added" % os.path.basename(patch) | ||
405 | sys.exit(1) | ||
406 | |||
407 | filesdir = find_filesdir(scripts_path, machine) | ||
408 | if not filesdir: | ||
409 | print "Couldn't add patch (%s) since we couldn't find a 'files' dir to add it to" % os.path.basename(patch) | ||
410 | sys.exit(1) | ||
411 | |||
412 | new_patches = [] | ||
413 | |||
414 | for patch in patches: | ||
415 | if not os.path.isfile(patch): | ||
416 | print "Couldn't find patch (%s), exiting" % patch | ||
417 | sys.exit(1) | ||
418 | basename = os.path.basename(patch) | ||
419 | filesdir_patch = os.path.join(filesdir, basename) | ||
420 | shutil.copyfile(patch, filesdir_patch) | ||
421 | new_patches.append(basename) | ||
422 | |||
423 | cur_items = read_patch_items(scripts_path, machine) | ||
424 | cur_items.extend(new_patches) | ||
425 | write_patch_items(scripts_path, machine, cur_items) | ||
426 | |||
427 | print "Added patches:" | ||
428 | for n in new_patches: | ||
429 | print "\t%s" % n | ||
430 | |||
431 | |||
432 | def inc_pr(line): | ||
433 | """ | ||
434 | Add 1 to the PR value in the given bbappend PR line. For the PR | ||
435 | lines in kernel .bbappends after modifications. Handles PRs of | ||
436 | the form PR := "${PR}.1" as well as PR = "r0". | ||
437 | """ | ||
438 | idx = line.find("\"") | ||
439 | |||
440 | pr_str = line[idx:] | ||
441 | pr_str = pr_str.replace('\"','') | ||
442 | fields = pr_str.split('.') | ||
443 | if len(fields) > 1: | ||
444 | fields[1] = str(int(fields[1]) + 1) | ||
445 | pr_str = "\"" + '.'.join(fields) + "\"\n" | ||
446 | else: | ||
447 | pr_val = pr_str[1:] | ||
448 | pr_str = "\"" + "r" + str(int(pr_val) + 1) + "\"\n" | ||
449 | idx2 = line.find("\"", idx + 1) | ||
450 | line = line[:idx] + pr_str | ||
451 | |||
452 | return line | ||
453 | |||
454 | |||
455 | def kernel_contents_changed(scripts_path, machine): | ||
456 | """ | ||
457 | Do what we need to do to notify the system that the kernel | ||
458 | recipe's contents have changed. | ||
459 | """ | ||
460 | layer = find_bsp_layer(machine) | ||
461 | |||
462 | kernel = find_current_kernel(layer, machine) | ||
463 | if not kernel: | ||
464 | print "Couldn't determine the kernel for this BSP, exiting." | ||
465 | sys.exit(1) | ||
466 | |||
467 | kernel_bbfile = os.path.join(layer, "recipes-kernel/linux/" + kernel + ".bbappend") | ||
468 | if not os.path.isfile(kernel_bbfile): | ||
469 | kernel_bbfile = os.path.join(layer, "recipes-kernel/linux/" + kernel + ".bb") | ||
470 | if not os.path.isfile(kernel_bbfile): | ||
471 | return | ||
472 | kernel_bbfile_prev = kernel_bbfile + ".prev" | ||
473 | shutil.copyfile(kernel_bbfile, kernel_bbfile_prev) | ||
474 | |||
475 | ifile = open(kernel_bbfile_prev, "r") | ||
476 | ofile = open(kernel_bbfile, "w") | ||
477 | ifile_lines = ifile.readlines() | ||
478 | for ifile_line in ifile_lines: | ||
479 | if ifile_line.strip().startswith("PR"): | ||
480 | ifile_line = inc_pr(ifile_line) | ||
481 | ofile.write(ifile_line) | ||
482 | ofile.close() | ||
483 | ifile.close() | ||
484 | |||
485 | |||
486 | def kernels(context): | ||
487 | """ | ||
488 | Return the list of available kernels in the BSP i.e. corresponding | ||
489 | to the kernel .bbappends found in the layer. | ||
490 | """ | ||
491 | archdir = os.path.join(context["scripts_path"], "lib/bsp/substrate/target/arch/" + context["arch"]) | ||
492 | kerndir = os.path.join(archdir, "recipes-kernel/linux") | ||
493 | bbglob = os.path.join(kerndir, "*.bbappend") | ||
494 | |||
495 | bbappends = glob.glob(bbglob) | ||
496 | |||
497 | kernels = [] | ||
498 | |||
499 | for kernel in bbappends: | ||
500 | filename = os.path.splitext(os.path.basename(kernel))[0] | ||
501 | idx = filename.find(CLOSE_TAG) | ||
502 | if idx != -1: | ||
503 | filename = filename[idx + len(CLOSE_TAG):].strip() | ||
504 | kernels.append(filename) | ||
505 | |||
506 | kernels.append("custom") | ||
507 | |||
508 | return kernels | ||
509 | |||
510 | |||
511 | def extract_giturl(file): | ||
512 | """ | ||
513 | Extract the git url of the kernel repo from the kernel recipe's | ||
514 | SRC_URI. | ||
515 | """ | ||
516 | url = None | ||
517 | f = open(file, "r") | ||
518 | lines = f.readlines() | ||
519 | for line in lines: | ||
520 | line = line.strip() | ||
521 | if line.startswith("SRC_URI"): | ||
522 | line = line[len("SRC_URI"):].strip() | ||
523 | if line.startswith("="): | ||
524 | line = line[1:].strip() | ||
525 | if line.startswith("\""): | ||
526 | line = line[1:].strip() | ||
527 | prot = "git" | ||
528 | for s in line.split(";"): | ||
529 | if s.startswith("git://"): | ||
530 | url = s | ||
531 | if s.startswith("protocol="): | ||
532 | prot = s.split("=")[1] | ||
533 | if url: | ||
534 | url = prot + url[3:] | ||
535 | return url | ||
536 | |||
537 | |||
538 | def find_giturl(context): | ||
539 | """ | ||
540 | Find the git url of the kernel repo from the kernel recipe's | ||
541 | SRC_URI. | ||
542 | """ | ||
543 | name = context["name"] | ||
544 | filebase = context["filename"] | ||
545 | scripts_path = context["scripts_path"] | ||
546 | |||
547 | meta_layer = find_meta_layer() | ||
548 | |||
549 | kerndir = os.path.join(meta_layer, "recipes-kernel/linux") | ||
550 | bbglob = os.path.join(kerndir, "*.bb") | ||
551 | bbs = glob.glob(bbglob) | ||
552 | for kernel in bbs: | ||
553 | filename = os.path.splitext(os.path.basename(kernel))[0] | ||
554 | if filename in filebase: | ||
555 | giturl = extract_giturl(kernel) | ||
556 | return giturl | ||
557 | |||
558 | return None | ||
559 | |||
560 | |||
561 | def read_features(scripts_path, machine): | ||
562 | """ | ||
563 | Find and return a list of features in a machine's user-defined | ||
564 | features fragment [${machine}-user-features.scc]. | ||
565 | """ | ||
566 | features = [] | ||
567 | |||
568 | f = open_user_file(scripts_path, machine, machine+"-user-features.scc", "r") | ||
569 | lines = f.readlines() | ||
570 | for line in lines: | ||
571 | s = line.strip() | ||
572 | if s and not s.startswith("#"): | ||
573 | feature_include = s.split() | ||
574 | features.append(feature_include[1].strip()) | ||
575 | f.close() | ||
576 | |||
577 | return features | ||
578 | |||
579 | |||
580 | def write_features(scripts_path, machine, features): | ||
581 | """ | ||
582 | Write (replace) the list of feature items in a | ||
583 | machine's user-defined features fragment [${machine}=user-features.cfg]. | ||
584 | """ | ||
585 | f = open_user_file(scripts_path, machine, machine+"-user-features.scc", "w") | ||
586 | for item in features: | ||
587 | f.write("include " + item + "\n") | ||
588 | f.close() | ||
589 | |||
590 | kernel_contents_changed(scripts_path, machine) | ||
591 | |||
592 | |||
593 | def yocto_kernel_feature_list(scripts_path, machine): | ||
594 | """ | ||
595 | Display the list of features used in a machine's user-defined | ||
596 | features fragment [${machine}-user-features.scc]. | ||
597 | """ | ||
598 | features = read_features(scripts_path, machine) | ||
599 | |||
600 | print "The current set of machine-specific features for %s is:" % machine | ||
601 | print gen_choices_str(features) | ||
602 | |||
603 | |||
604 | def yocto_kernel_feature_rm(scripts_path, machine): | ||
605 | """ | ||
606 | Display the list of features used in a machine's user-defined | ||
607 | features fragment [${machine}-user-features.scc], prompt the user | ||
608 | for one or more to remove, and remove them. | ||
609 | """ | ||
610 | features = read_features(scripts_path, machine) | ||
611 | |||
612 | print "Specify the features to remove:" | ||
613 | input = raw_input(gen_choices_str(features)) | ||
614 | rm_choices = input.split() | ||
615 | rm_choices.sort() | ||
616 | |||
617 | removed = [] | ||
618 | |||
619 | for choice in reversed(rm_choices): | ||
620 | try: | ||
621 | idx = int(choice) - 1 | ||
622 | except ValueError: | ||
623 | print "Invalid choice (%s), exiting" % choice | ||
624 | sys.exit(1) | ||
625 | if idx < 0 or idx >= len(features): | ||
626 | print "Invalid choice (%d), exiting" % (idx + 1) | ||
627 | sys.exit(1) | ||
628 | removed.append(features.pop(idx)) | ||
629 | |||
630 | write_features(scripts_path, machine, features) | ||
631 | |||
632 | print "Removed features:" | ||
633 | for r in removed: | ||
634 | print "\t%s" % r | ||
635 | |||
636 | |||
637 | def yocto_kernel_feature_add(scripts_path, machine, features): | ||
638 | """ | ||
639 | Add one or more features a machine's user-defined features | ||
640 | fragment [${machine}-user-features.scc]. | ||
641 | """ | ||
642 | new_items = [] | ||
643 | |||
644 | for item in features: | ||
645 | if not item.endswith(".scc"): | ||
646 | print "Invalid feature (%s), exiting" % item | ||
647 | sys.exit(1) | ||
648 | new_items.append(item) | ||
649 | |||
650 | cur_items = read_features(scripts_path, machine) | ||
651 | cur_items.extend(new_items) | ||
652 | |||
653 | write_features(scripts_path, machine, cur_items) | ||
654 | |||
655 | print "Added features:" | ||
656 | for n in new_items: | ||
657 | print "\t%s" % n | ||
658 | |||
659 | |||
660 | def find_feature_url(git_url): | ||
661 | """ | ||
662 | Find the url of the kern-features.rc kernel for the kernel repo | ||
663 | specified from the BSP's kernel recipe SRC_URI. | ||
664 | """ | ||
665 | feature_url = "" | ||
666 | if git_url.startswith("git://"): | ||
667 | git_url = git_url[len("git://"):].strip() | ||
668 | s = git_url.split("/") | ||
669 | if s[1].endswith(".git"): | ||
670 | s[1] = s[1][:len(s[1]) - len(".git")] | ||
671 | feature_url = "http://" + s[0] + "/cgit/cgit.cgi/" + s[1] + \ | ||
672 | "/plain/meta/cfg/kern-features.rc?h=meta" | ||
673 | |||
674 | return feature_url | ||
675 | |||
676 | |||
677 | def find_feature_desc(lines): | ||
678 | """ | ||
679 | Find the feature description and compatibility in the passed-in | ||
680 | set of lines. Returns a string string of the form 'desc | ||
681 | [compat]'. | ||
682 | """ | ||
683 | desc = "no description available" | ||
684 | compat = "unknown" | ||
685 | |||
686 | for line in lines: | ||
687 | idx = line.find("KFEATURE_DESCRIPTION") | ||
688 | if idx != -1: | ||
689 | desc = line[idx + len("KFEATURE_DESCRIPTION"):].strip() | ||
690 | if desc.startswith("\""): | ||
691 | desc = desc[1:] | ||
692 | if desc.endswith("\""): | ||
693 | desc = desc[:-1] | ||
694 | else: | ||
695 | idx = line.find("KFEATURE_COMPATIBILITY") | ||
696 | if idx != -1: | ||
697 | compat = line[idx + len("KFEATURE_COMPATIBILITY"):].strip() | ||
698 | |||
699 | return desc + " [" + compat + "]" | ||
700 | |||
701 | |||
702 | def print_feature_descs(layer, feature_dir): | ||
703 | """ | ||
704 | Print the feature descriptions for the features in feature_dir. | ||
705 | """ | ||
706 | kernel_files_features = os.path.join(layer, "recipes-kernel/linux/files/" + | ||
707 | feature_dir) | ||
708 | for root, dirs, files in os.walk(kernel_files_features): | ||
709 | for file in files: | ||
710 | if file.endswith("~") or file.endswith("#"): | ||
711 | continue | ||
712 | if file.endswith(".scc"): | ||
713 | fullpath = os.path.join(layer, "recipes-kernel/linux/files/" + | ||
714 | feature_dir + "/" + file) | ||
715 | f = open(fullpath) | ||
716 | feature_desc = find_feature_desc(f.readlines()) | ||
717 | print feature_dir + "/" + file + ": " + feature_desc | ||
718 | |||
719 | |||
720 | def yocto_kernel_available_features_list(scripts_path, machine): | ||
721 | """ | ||
722 | Display the list of all the kernel features available for use in | ||
723 | BSPs, as gathered from the set of feature sources. | ||
724 | """ | ||
725 | layer = find_bsp_layer(machine) | ||
726 | kernel = find_current_kernel(layer, machine) | ||
727 | if not kernel: | ||
728 | print "Couldn't determine the kernel for this BSP, exiting." | ||
729 | sys.exit(1) | ||
730 | |||
731 | context = create_context(machine, "arch", scripts_path) | ||
732 | context["name"] = "name" | ||
733 | context["filename"] = kernel | ||
734 | giturl = find_giturl(context) | ||
735 | feature_url = find_feature_url(giturl) | ||
736 | |||
737 | feature_cmd = "wget -q -O - " + feature_url | ||
738 | tmp = subprocess.Popen(feature_cmd, shell=True, stdout=subprocess.PIPE).stdout.read() | ||
739 | |||
740 | print "The current set of kernel features available to %s is:\n" % machine | ||
741 | |||
742 | if tmp: | ||
743 | tmpline = tmp.split("\n") | ||
744 | in_kernel_options = False | ||
745 | for line in tmpline: | ||
746 | if not "=" in line: | ||
747 | if in_kernel_options: | ||
748 | break | ||
749 | if "kernel-options" in line: | ||
750 | in_kernel_options = True | ||
751 | continue | ||
752 | if in_kernel_options: | ||
753 | feature_def = line.split("=") | ||
754 | feature_type = feature_def[0].strip() | ||
755 | feature = feature_def[1].strip() | ||
756 | desc = get_feature_desc(giturl, feature) | ||
757 | print "%s: %s" % (feature, desc) | ||
758 | |||
759 | print "[local]" | ||
760 | |||
761 | print_feature_descs(layer, "cfg") | ||
762 | print_feature_descs(layer, "features") | ||
763 | |||
764 | |||
765 | def find_feature_desc_url(git_url, feature): | ||
766 | """ | ||
767 | Find the url of the kernel feature in the kernel repo specified | ||
768 | from the BSP's kernel recipe SRC_URI. | ||
769 | """ | ||
770 | feature_desc_url = "" | ||
771 | if git_url.startswith("git://"): | ||
772 | git_url = git_url[len("git://"):].strip() | ||
773 | s = git_url.split("/") | ||
774 | if s[1].endswith(".git"): | ||
775 | s[1] = s[1][:len(s[1]) - len(".git")] | ||
776 | feature_desc_url = "http://" + s[0] + "/cgit/cgit.cgi/" + s[1] + \ | ||
777 | "/plain/meta/cfg/kernel-cache/" + feature + "?h=meta" | ||
778 | |||
779 | return feature_desc_url | ||
780 | |||
781 | |||
782 | def get_feature_desc(git_url, feature): | ||
783 | """ | ||
784 | Return a feature description of the form 'description [compatibility] | ||
785 | BSPs, as gathered from the set of feature sources. | ||
786 | """ | ||
787 | feature_desc_url = find_feature_desc_url(git_url, feature) | ||
788 | feature_desc_cmd = "wget -q -O - " + feature_desc_url | ||
789 | tmp = subprocess.Popen(feature_desc_cmd, shell=True, stdout=subprocess.PIPE).stdout.read() | ||
790 | |||
791 | return find_feature_desc(tmp.split("\n")) | ||
792 | |||
793 | |||
794 | def yocto_kernel_feature_describe(scripts_path, machine, feature): | ||
795 | """ | ||
796 | Display the description of a specific kernel feature available for | ||
797 | use in a BSP. | ||
798 | """ | ||
799 | layer = find_bsp_layer(machine) | ||
800 | |||
801 | kernel = find_current_kernel(layer, machine) | ||
802 | if not kernel: | ||
803 | print "Couldn't determine the kernel for this BSP, exiting." | ||
804 | sys.exit(1) | ||
805 | |||
806 | context = create_context(machine, "arch", scripts_path) | ||
807 | context["name"] = "name" | ||
808 | context["filename"] = kernel | ||
809 | giturl = find_giturl(context) | ||
810 | |||
811 | desc = get_feature_desc(giturl, feature) | ||
812 | |||
813 | print desc | ||
814 | |||
815 | |||
816 | def check_feature_name(feature_name): | ||
817 | """ | ||
818 | Sanity-check the feature name for create/destroy. Return False if not OK. | ||
819 | """ | ||
820 | if not feature_name.endswith(".scc"): | ||
821 | print "Invalid feature name (must end with .scc) [%s], exiting" % feature_name | ||
822 | return False | ||
823 | |||
824 | if "/" in feature_name: | ||
825 | print "Invalid feature name (don't specify directory) [%s], exiting" % feature_name | ||
826 | return False | ||
827 | |||
828 | return True | ||
829 | |||
830 | |||
831 | def check_create_input(feature_items): | ||
832 | """ | ||
833 | Sanity-check the create input. Return False if not OK. | ||
834 | """ | ||
835 | if not check_feature_name(feature_items[0]): | ||
836 | return False | ||
837 | |||
838 | if feature_items[1].endswith(".patch") or feature_items[1].startswith("CONFIG_"): | ||
839 | print "Missing description and/or compatibilty [%s], exiting" % feature_items[1] | ||
840 | return False | ||
841 | |||
842 | if feature_items[2].endswith(".patch") or feature_items[2].startswith("CONFIG_"): | ||
843 | print "Missing description and/or compatibility [%s], exiting" % feature_items[1] | ||
844 | return False | ||
845 | |||
846 | return True | ||
847 | |||
848 | |||
849 | def yocto_kernel_feature_create(scripts_path, machine, feature_items): | ||
850 | """ | ||
851 | Create a recipe-space kernel feature in a BSP. | ||
852 | """ | ||
853 | if not check_create_input(feature_items): | ||
854 | sys.exit(1) | ||
855 | |||
856 | feature = feature_items[0] | ||
857 | feature_basename = feature.split(".")[0] | ||
858 | feature_description = feature_items[1] | ||
859 | feature_compat = feature_items[2] | ||
860 | |||
861 | patches = [] | ||
862 | cfg_items = [] | ||
863 | |||
864 | for item in feature_items[3:]: | ||
865 | if item.endswith(".patch"): | ||
866 | patches.append(item) | ||
867 | elif item.startswith("CONFIG"): | ||
868 | if ("=y" in item or "=m" in item): | ||
869 | cfg_items.append(item) | ||
870 | else: | ||
871 | print "Invalid feature item (must be .patch or CONFIG_*) [%s], exiting" % item | ||
872 | sys.exit(1) | ||
873 | |||
874 | feature_dirname = "cfg" | ||
875 | if patches: | ||
876 | feature_dirname = "features" | ||
877 | |||
878 | filesdir = find_filesdir(scripts_path, machine) | ||
879 | if not filesdir: | ||
880 | print "Couldn't add feature (%s), no 'files' dir found" % feature | ||
881 | sys.exit(1) | ||
882 | |||
883 | featdir = os.path.join(filesdir, feature_dirname) | ||
884 | if not os.path.exists(featdir): | ||
885 | os.mkdir(featdir) | ||
886 | |||
887 | for patch in patches: | ||
888 | if not os.path.isfile(patch): | ||
889 | print "Couldn't find patch (%s), exiting" % patch | ||
890 | sys.exit(1) | ||
891 | basename = os.path.basename(patch) | ||
892 | featdir_patch = os.path.join(featdir, basename) | ||
893 | shutil.copyfile(patch, featdir_patch) | ||
894 | |||
895 | new_cfg_filename = os.path.join(featdir, feature_basename + ".cfg") | ||
896 | new_cfg_file = open(new_cfg_filename, "w") | ||
897 | for cfg_item in cfg_items: | ||
898 | new_cfg_file.write(cfg_item + "\n") | ||
899 | new_cfg_file.close() | ||
900 | |||
901 | new_feature_filename = os.path.join(featdir, feature_basename + ".scc") | ||
902 | new_feature_file = open(new_feature_filename, "w") | ||
903 | new_feature_file.write("define KFEATURE_DESCRIPTION \"" + feature_description + "\"\n") | ||
904 | new_feature_file.write("define KFEATURE_COMPATIBILITY " + feature_compat + "\n\n") | ||
905 | |||
906 | for patch in patches: | ||
907 | patch_dir, patch_file = os.path.split(patch) | ||
908 | new_feature_file.write("patch " + patch_file + "\n") | ||
909 | |||
910 | new_feature_file.write("kconf non-hardware " + feature_basename + ".cfg\n") | ||
911 | new_feature_file.close() | ||
912 | |||
913 | print "Added feature:" | ||
914 | print "\t%s" % feature_dirname + "/" + feature | ||
915 | |||
916 | |||
917 | def feature_in_use(scripts_path, machine, feature): | ||
918 | """ | ||
919 | Determine whether the specified feature is in use by the BSP. | ||
920 | Return True if so, False otherwise. | ||
921 | """ | ||
922 | features = read_features(scripts_path, machine) | ||
923 | for f in features: | ||
924 | if f == feature: | ||
925 | return True | ||
926 | return False | ||
927 | |||
928 | |||
929 | def feature_remove(scripts_path, machine, feature): | ||
930 | """ | ||
931 | Remove the specified feature from the available recipe-space | ||
932 | features defined for the BSP. | ||
933 | """ | ||
934 | features = read_features(scripts_path, machine) | ||
935 | new_features = [] | ||
936 | for f in features: | ||
937 | if f == feature: | ||
938 | continue | ||
939 | new_features.append(f) | ||
940 | write_features(scripts_path, machine, new_features) | ||
941 | |||
942 | |||
943 | def yocto_kernel_feature_destroy(scripts_path, machine, feature): | ||
944 | """ | ||
945 | Remove a recipe-space kernel feature from a BSP. | ||
946 | """ | ||
947 | if not check_feature_name(feature): | ||
948 | sys.exit(1) | ||
949 | |||
950 | if feature_in_use(scripts_path, machine, "features/" + feature) or \ | ||
951 | feature_in_use(scripts_path, machine, "cfg/" + feature): | ||
952 | print "Feature %s is in use (use 'feature rm' to un-use it first), exiting" % feature | ||
953 | sys.exit(1) | ||
954 | |||
955 | filesdir = find_filesdir(scripts_path, machine) | ||
956 | if not filesdir: | ||
957 | print "Couldn't destroy feature (%s), no 'files' dir found" % feature | ||
958 | sys.exit(1) | ||
959 | |||
960 | feature_dirname = "features" | ||
961 | featdir = os.path.join(filesdir, feature_dirname) | ||
962 | if not os.path.exists(featdir): | ||
963 | print "Couldn't find feature directory (%s)" % feature_dirname | ||
964 | sys.exit(1) | ||
965 | |||
966 | feature_fqn = os.path.join(featdir, feature) | ||
967 | if not os.path.exists(feature_fqn): | ||
968 | feature_dirname = "cfg" | ||
969 | featdir = os.path.join(filesdir, feature_dirname) | ||
970 | if not os.path.exists(featdir): | ||
971 | print "Couldn't find feature directory (%s)" % feature_dirname | ||
972 | sys.exit(1) | ||
973 | feature_fqn = os.path.join(featdir, feature_filename) | ||
974 | if not os.path.exists(feature_fqn): | ||
975 | print "Couldn't find feature (%s)" % feature | ||
976 | sys.exit(1) | ||
977 | |||
978 | f = open(feature_fqn, "r") | ||
979 | lines = f.readlines() | ||
980 | for line in lines: | ||
981 | s = line.strip() | ||
982 | if s.startswith("patch ") or s.startswith("kconf "): | ||
983 | split_line = s.split() | ||
984 | filename = os.path.join(featdir, split_line[-1]) | ||
985 | if os.path.exists(filename): | ||
986 | os.remove(filename) | ||
987 | f.close() | ||
988 | os.remove(feature_fqn) | ||
989 | |||
990 | feature_remove(scripts_path, machine, feature) | ||
991 | |||
992 | print "Removed feature:" | ||
993 | print "\t%s" % feature_dirname + "/" + feature | ||
994 | |||
995 | |||
996 | def base_branches(context): | ||
997 | """ | ||
998 | Return a list of the base branches found in the kernel git repo. | ||
999 | """ | ||
1000 | giturl = find_giturl(context) | ||
1001 | |||
1002 | print "Getting branches from remote repo %s..." % giturl | ||
1003 | |||
1004 | gitcmd = "git ls-remote %s *heads* 2>&1" % (giturl) | ||
1005 | tmp = subprocess.Popen(gitcmd, shell=True, stdout=subprocess.PIPE).stdout.read() | ||
1006 | |||
1007 | branches = [] | ||
1008 | |||
1009 | if tmp: | ||
1010 | tmpline = tmp.split("\n") | ||
1011 | for line in tmpline: | ||
1012 | if len(line)==0: | ||
1013 | break; | ||
1014 | if not line.endswith("base"): | ||
1015 | continue; | ||
1016 | idx = line.find("refs/heads/") | ||
1017 | kbranch = line[idx + len("refs/heads/"):] | ||
1018 | if kbranch.find("/") == -1 and kbranch.find("base") == -1: | ||
1019 | continue | ||
1020 | idx = kbranch.find("base") | ||
1021 | branches.append(kbranch[:idx - 1]) | ||
1022 | |||
1023 | return branches | ||
1024 | |||
1025 | |||
1026 | def all_branches(context): | ||
1027 | """ | ||
1028 | Return a list of all the branches found in the kernel git repo. | ||
1029 | """ | ||
1030 | giturl = find_giturl(context) | ||
1031 | |||
1032 | print "Getting branches from remote repo %s..." % giturl | ||
1033 | |||
1034 | gitcmd = "git ls-remote %s *heads* 2>&1" % (giturl) | ||
1035 | tmp = subprocess.Popen(gitcmd, shell=True, stdout=subprocess.PIPE).stdout.read() | ||
1036 | |||
1037 | branches = [] | ||
1038 | |||
1039 | base_prefixes = None | ||
1040 | |||
1041 | try: | ||
1042 | branches_base = context["branches_base"] | ||
1043 | if branches_base: | ||
1044 | base_prefixes = branches_base.split(":") | ||
1045 | except KeyError: | ||
1046 | pass | ||
1047 | |||
1048 | arch = context["arch"] | ||
1049 | |||
1050 | if tmp: | ||
1051 | tmpline = tmp.split("\n") | ||
1052 | for line in tmpline: | ||
1053 | if len(line)==0: | ||
1054 | break; | ||
1055 | idx = line.find("refs/heads/") | ||
1056 | kbranch = line[idx + len("refs/heads/"):] | ||
1057 | kbranch_prefix = kbranch.rsplit("/", 1)[0] | ||
1058 | |||
1059 | if base_prefixes: | ||
1060 | for base_prefix in base_prefixes: | ||
1061 | if kbranch_prefix == base_prefix: | ||
1062 | branches.append(kbranch) | ||
1063 | continue | ||
1064 | |||
1065 | if (kbranch.find("/") != -1 and | ||
1066 | (kbranch.find("standard") != -1 or kbranch.find("base") != -1) or | ||
1067 | kbranch == "base"): | ||
1068 | branches.append(kbranch) | ||
1069 | continue | ||
1070 | |||
1071 | return branches | ||
diff --git a/scripts/lib/bsp/substrate/target/arch/arm/.gitignore b/scripts/lib/bsp/substrate/target/arch/arm/.gitignore new file mode 100644 index 0000000000..e69de29bb2 --- /dev/null +++ b/scripts/lib/bsp/substrate/target/arch/arm/.gitignore | |||
diff --git a/scripts/lib/bsp/substrate/target/arch/arm/conf/machine/{{=machine}}.conf b/scripts/lib/bsp/substrate/target/arch/arm/conf/machine/{{=machine}}.conf new file mode 100644 index 0000000000..44a80d226c --- /dev/null +++ b/scripts/lib/bsp/substrate/target/arch/arm/conf/machine/{{=machine}}.conf | |||
@@ -0,0 +1,105 @@ | |||
1 | #@TYPE: Machine | ||
2 | #@NAME: {{=machine}} | ||
3 | |||
4 | #@DESCRIPTION: Machine configuration for {{=machine}} systems | ||
5 | |||
6 | {{ input type:"boolean" name:"xserver" prio:"50" msg:"Do you need support for X? (y/n)" default:"y" }} | ||
7 | {{ if xserver == "y": }} | ||
8 | PREFERRED_PROVIDER_virtual/xserver ?= "xserver-xorg" | ||
9 | XSERVER ?= "xserver-xorg \ | ||
10 | xf86-input-evdev \ | ||
11 | xf86-input-mouse \ | ||
12 | xf86-video-omapfb \ | ||
13 | xf86-input-keyboard" | ||
14 | |||
15 | # Ship all kernel modules by default | ||
16 | MACHINE_EXTRA_RRECOMMENDS = " kernel-modules" | ||
17 | |||
18 | # Allow for MMC booting (required by the NAND-less Beagleboard XM) | ||
19 | EXTRA_IMAGEDEPENDS += "u-boot" | ||
20 | |||
21 | # Uncomment the following line to enable the hard floating point abi. Note that | ||
22 | # this breaks some binary libraries and 3D (neither of which ship with | ||
23 | # meta-yocto). For maximum compatibility, leave this disabled. | ||
24 | #DEFAULTTUNE ?= "cortexa8hf-neon" | ||
25 | {{ input type:"choicelist" name:"tunefile" prio:"40" msg:"Which machine tuning would you like to use?" default:"tune_cortexa8" }} | ||
26 | {{ input type:"choice" val:"tune_arm1136jf_s" msg:"arm1136jf-s tuning optimizations" }} | ||
27 | {{ input type:"choice" val:"tune_arm920t" msg:"arm920t tuning optimizations" }} | ||
28 | {{ input type:"choice" val:"tune_arm926ejs" msg:"arm926ejs tuning optimizations" }} | ||
29 | {{ input type:"choice" val:"tune_arm9tdmi" msg:"arm9tdmi tuning optimizations" }} | ||
30 | {{ input type:"choice" val:"tune_cortexa5" msg:"cortexa5 tuning optimizations" }} | ||
31 | {{ input type:"choice" val:"tune_cortexa7" msg:"cortexa7 tuning optimizations" }} | ||
32 | {{ input type:"choice" val:"tune_cortexa8" msg:"cortexa8 tuning optimizations" }} | ||
33 | {{ input type:"choice" val:"tune_cortexa9" msg:"cortexa9 tuning optimizations" }} | ||
34 | {{ input type:"choice" val:"tune_cortexa15" msg:"cortexa15 tuning optimizations" }} | ||
35 | {{ input type:"choice" val:"tune_cortexm1" msg:"cortexm1 tuning optimizations" }} | ||
36 | {{ input type:"choice" val:"tune_cortexm3" msg:"cortexm3 tuning optimizations" }} | ||
37 | {{ input type:"choice" val:"tune_cortexr4" msg:"cortexr4 tuning optimizations" }} | ||
38 | {{ input type:"choice" val:"tune_ep9312" msg:"ep9312 tuning optimizations" }} | ||
39 | {{ input type:"choice" val:"tune_iwmmxt" msg:"iwmmxt tuning optimizations" }} | ||
40 | {{ input type:"choice" val:"tune_strongarm1100" msg:"strongarm1100 tuning optimizations" }} | ||
41 | {{ input type:"choice" val:"tune_xscale" msg:"xscale tuning optimizations" }} | ||
42 | {{ if tunefile == "tune_arm1136jf_s": }} | ||
43 | include conf/machine/include/tune-arm1136jf-s.inc | ||
44 | {{ if tunefile == "tune_arm920t": }} | ||
45 | include conf/machine/include/tune-arm920t.inc | ||
46 | {{ if tunefile == "tune_arm926ejs": }} | ||
47 | include conf/machine/include/tune-arm926ejs.inc | ||
48 | {{ if tunefile == "tune_arm9tdmi": }} | ||
49 | include conf/machine/include/tune-arm9tdmi.inc | ||
50 | {{ if tunefile == "tune_cortexa5": }} | ||
51 | include conf/machine/include/tune-cortexa5.inc | ||
52 | {{ if tunefile == "tune_cortexa7": }} | ||
53 | include conf/machine/include/tune-cortexa7.inc | ||
54 | {{ if tunefile == "tune_cortexa8": }} | ||
55 | include conf/machine/include/tune-cortexa8.inc | ||
56 | {{ if tunefile == "tune_cortexa9": }} | ||
57 | include conf/machine/include/tune-cortexa9.inc | ||
58 | {{ if tunefile == "tune_cortexa15": }} | ||
59 | include conf/machine/include/tune-cortexa15.inc | ||
60 | {{ if tunefile == "tune_cortexm1": }} | ||
61 | include conf/machine/include/tune-cortexm1.inc | ||
62 | {{ if tunefile == "tune_cortexm3": }} | ||
63 | include conf/machine/include/tune-cortexm3.inc | ||
64 | {{ if tunefile == "tune_cortexr4": }} | ||
65 | include conf/machine/include/tune-cortexr4.inc | ||
66 | {{ if tunefile == "tune_ep9312": }} | ||
67 | include conf/machine/include/tune-ep9312.inc | ||
68 | {{ if tunefile == "tune_iwmmxt": }} | ||
69 | include conf/machine/include/tune-iwmmxt.inc | ||
70 | {{ if tunefile == "tune_strongarm1100": }} | ||
71 | include conf/machine/include/tune-strongarm1100.inc | ||
72 | {{ if tunefile == "tune_xscale": }} | ||
73 | include conf/machine/include/tune-xscale.inc | ||
74 | |||
75 | IMAGE_FSTYPES += "tar.bz2 jffs2" | ||
76 | EXTRA_IMAGECMD_jffs2 = "-lnp " | ||
77 | |||
78 | # 2.6.37 and later kernels use OMAP_SERIAL, ttyO2 | ||
79 | # earlier kernels use ttyS2 | ||
80 | SERIAL_CONSOLE = "115200 ttyO2" | ||
81 | |||
82 | {{ if kernel_choice == "custom": preferred_kernel = "linux-yocto-custom" }} | ||
83 | {{ if kernel_choice == "linux-yocto-dev": preferred_kernel = "linux-yocto-dev" }} | ||
84 | {{ if kernel_choice == "custom" or kernel_choice == "linux-yocto-dev" : }} | ||
85 | PREFERRED_PROVIDER_virtual/kernel ?= "{{=preferred_kernel}}" | ||
86 | |||
87 | {{ if kernel_choice != "custom" and kernel_choice != "linux-yocto-dev": preferred_kernel = kernel_choice.split('_')[0] }} | ||
88 | {{ if kernel_choice != "custom" and kernel_choice != "linux-yocto-dev": preferred_kernel_version = kernel_choice.split('_')[1] }} | ||
89 | {{ if kernel_choice != "custom" and kernel_choice != "linux-yocto-dev": }} | ||
90 | PREFERRED_PROVIDER_virtual/kernel ?= "{{=preferred_kernel}}" | ||
91 | PREFERRED_VERSION_{{=preferred_kernel}} ?= "{{=preferred_kernel_version}}%" | ||
92 | |||
93 | KERNEL_IMAGETYPE = "zImage" | ||
94 | KERNEL_DEVICETREE = "${S}/arch/arm/boot/dts/omap3-beagle.dts ${S}/arch/arm/boot/dts/omap3-beagle-xm.dts" | ||
95 | |||
96 | SPL_BINARY = "MLO" | ||
97 | UBOOT_SUFFIX = "img" | ||
98 | {{ input type:"edit" name:"uboot_machine" prio:"40" msg:"Please specify a value for UBOOT_MACHINE:" default:"omap3_beagle_config" }} | ||
99 | UBOOT_MACHINE = "{{=uboot_machine}}" | ||
100 | {{ input type:"edit" name:"uboot_entrypoint" prio:"40" msg:"Please specify a value for UBOOT_ENTRYPOINT:" default:"0x80008000" }} | ||
101 | UBOOT_ENTRYPOINT = "{{=uboot_entrypoint}}" | ||
102 | {{ input type:"edit" name:"uboot_loadaddress" prio:"40" msg:"Please specify a value for UBOOT_LOADADDRESS:" default:"0x80008000" }} | ||
103 | UBOOT_LOADADDRESS = "{{=uboot_loadaddress}}" | ||
104 | |||
105 | MACHINE_FEATURES = "usbgadget usbhost vfat alsa" | ||
diff --git a/scripts/lib/bsp/substrate/target/arch/arm/recipes-graphics/xorg-xserver/xserver-xf86-config/{{=machine}}/{{ if xserver == "y": }} xorg.conf b/scripts/lib/bsp/substrate/target/arch/arm/recipes-graphics/xorg-xserver/xserver-xf86-config/{{=machine}}/{{ if xserver == "y": }} xorg.conf new file mode 100644 index 0000000000..264f3c91ad --- /dev/null +++ b/scripts/lib/bsp/substrate/target/arch/arm/recipes-graphics/xorg-xserver/xserver-xf86-config/{{=machine}}/{{ if xserver == "y": }} xorg.conf | |||
@@ -0,0 +1,33 @@ | |||
1 | Section "Module" | ||
2 | Load "extmod" | ||
3 | Load "dbe" | ||
4 | Load "glx" | ||
5 | Load "freetype" | ||
6 | Load "type1" | ||
7 | Load "record" | ||
8 | Load "dri" | ||
9 | EndSection | ||
10 | |||
11 | Section "Monitor" | ||
12 | Identifier "Builtin Default Monitor" | ||
13 | EndSection | ||
14 | |||
15 | Section "Device" | ||
16 | Identifier "Builtin Default fbdev Device 0" | ||
17 | Driver "omapfb" | ||
18 | EndSection | ||
19 | |||
20 | Section "Screen" | ||
21 | Identifier "Builtin Default fbdev Screen 0" | ||
22 | Device "Builtin Default fbdev Device 0" | ||
23 | Monitor "Builtin Default Monitor" | ||
24 | EndSection | ||
25 | |||
26 | Section "ServerLayout" | ||
27 | Identifier "Builtin Default Layout" | ||
28 | Screen "Builtin Default fbdev Screen 0" | ||
29 | EndSection | ||
30 | |||
31 | Section "ServerFlags" | ||
32 | Option "DontZap" "0" | ||
33 | EndSection | ||
diff --git a/scripts/lib/bsp/substrate/target/arch/arm/recipes-graphics/xorg-xserver/{{ if xserver == "y": }} xserver-xf86-config_0.1.bbappend b/scripts/lib/bsp/substrate/target/arch/arm/recipes-graphics/xorg-xserver/{{ if xserver == "y": }} xserver-xf86-config_0.1.bbappend new file mode 100644 index 0000000000..72d991c7e5 --- /dev/null +++ b/scripts/lib/bsp/substrate/target/arch/arm/recipes-graphics/xorg-xserver/{{ if xserver == "y": }} xserver-xf86-config_0.1.bbappend | |||
@@ -0,0 +1 @@ | |||
FILESEXTRAPATHS_prepend := "${THISDIR}/${PN}:" | |||
diff --git a/scripts/lib/bsp/substrate/target/arch/arm/recipes-kernel/linux/kernel-list.noinstall b/scripts/lib/bsp/substrate/target/arch/arm/recipes-kernel/linux/kernel-list.noinstall new file mode 100644 index 0000000000..a04e6c7852 --- /dev/null +++ b/scripts/lib/bsp/substrate/target/arch/arm/recipes-kernel/linux/kernel-list.noinstall | |||
@@ -0,0 +1,5 @@ | |||
1 | {{ if kernel_choice != "custom": }} | ||
2 | {{ input type:"boolean" name:"use_default_kernel" prio:"10" msg:"Would you like to use the default (3.14) kernel? (y/n)" default:"y"}} | ||
3 | |||
4 | {{ if kernel_choice != "custom" and use_default_kernel == "n": }} | ||
5 | {{ input type:"choicelist" name:"kernel_choice" gen:"bsp.kernel.kernels" prio:"10" msg:"Please choose the kernel to use in this BSP:" default:"linux-yocto_3.14"}} | ||
diff --git a/scripts/lib/bsp/substrate/target/arch/arm/recipes-kernel/linux/{{ if kernel_choice != "custom": }} files/{{=machine}}-non_hardware.cfg b/scripts/lib/bsp/substrate/target/arch/arm/recipes-kernel/linux/{{ if kernel_choice != "custom": }} files/{{=machine}}-non_hardware.cfg new file mode 100644 index 0000000000..361343bb58 --- /dev/null +++ b/scripts/lib/bsp/substrate/target/arch/arm/recipes-kernel/linux/{{ if kernel_choice != "custom": }} files/{{=machine}}-non_hardware.cfg | |||
@@ -0,0 +1,30 @@ | |||
1 | # | ||
2 | # Miscellaneous filesystems | ||
3 | # | ||
4 | CONFIG_NFS_DEF_FILE_IO_SIZE=1024 | ||
5 | |||
6 | # | ||
7 | # Multiple Device Support | ||
8 | # | ||
9 | # CONFIG_MD is not set | ||
10 | |||
11 | # Kernel Features | ||
12 | # | ||
13 | CONFIG_NO_HZ=y | ||
14 | |||
15 | # | ||
16 | # CPUIdle | ||
17 | # | ||
18 | CONFIG_CPU_IDLE=y | ||
19 | CONFIG_CPU_IDLE_GOV_LADDER=y | ||
20 | CONFIG_CPU_IDLE_GOV_MENU=y | ||
21 | |||
22 | # | ||
23 | # Kernel hacking | ||
24 | # | ||
25 | CONFIG_DEBUG_FS=y | ||
26 | |||
27 | # | ||
28 | # Power management options | ||
29 | # | ||
30 | CONFIG_PM_DEBUG=y | ||
diff --git a/scripts/lib/bsp/substrate/target/arch/arm/recipes-kernel/linux/{{ if kernel_choice != "custom": }} files/{{=machine}}-preempt-rt.scc b/scripts/lib/bsp/substrate/target/arch/arm/recipes-kernel/linux/{{ if kernel_choice != "custom": }} files/{{=machine}}-preempt-rt.scc new file mode 100644 index 0000000000..56f7f0f1e3 --- /dev/null +++ b/scripts/lib/bsp/substrate/target/arch/arm/recipes-kernel/linux/{{ if kernel_choice != "custom": }} files/{{=machine}}-preempt-rt.scc | |||
@@ -0,0 +1,13 @@ | |||
1 | define KMACHINE {{=machine}} | ||
2 | define KTYPE preempt-rt | ||
3 | define KARCH arm | ||
4 | |||
5 | include {{=map_preempt_rt_kbranch(need_new_kbranch, new_kbranch, existing_kbranch)}} | ||
6 | {{ if need_new_kbranch == "y": }} | ||
7 | branch {{=machine}} | ||
8 | |||
9 | include {{=machine}}.scc | ||
10 | |||
11 | # default policy for preempt-rt kernels | ||
12 | include features/latencytop/latencytop.scc | ||
13 | include features/profiling/profiling.scc | ||
diff --git a/scripts/lib/bsp/substrate/target/arch/arm/recipes-kernel/linux/{{ if kernel_choice != "custom": }} files/{{=machine}}-standard.scc b/scripts/lib/bsp/substrate/target/arch/arm/recipes-kernel/linux/{{ if kernel_choice != "custom": }} files/{{=machine}}-standard.scc new file mode 100644 index 0000000000..80640db4a2 --- /dev/null +++ b/scripts/lib/bsp/substrate/target/arch/arm/recipes-kernel/linux/{{ if kernel_choice != "custom": }} files/{{=machine}}-standard.scc | |||
@@ -0,0 +1,13 @@ | |||
1 | define KMACHINE {{=machine}} | ||
2 | define KTYPE standard | ||
3 | define KARCH arm | ||
4 | |||
5 | include {{=map_standard_kbranch(need_new_kbranch, new_kbranch, existing_kbranch)}} | ||
6 | {{ if need_new_kbranch == "y": }} | ||
7 | branch {{=machine}} | ||
8 | |||
9 | include {{=machine}}.scc | ||
10 | |||
11 | # default policy for standard kernels | ||
12 | include features/latencytop/latencytop.scc | ||
13 | include features/profiling/profiling.scc | ||
diff --git a/scripts/lib/bsp/substrate/target/arch/arm/recipes-kernel/linux/{{ if kernel_choice != "custom": }} files/{{=machine}}-tiny.scc b/scripts/lib/bsp/substrate/target/arch/arm/recipes-kernel/linux/{{ if kernel_choice != "custom": }} files/{{=machine}}-tiny.scc new file mode 100644 index 0000000000..51eaf2d32c --- /dev/null +++ b/scripts/lib/bsp/substrate/target/arch/arm/recipes-kernel/linux/{{ if kernel_choice != "custom": }} files/{{=machine}}-tiny.scc | |||
@@ -0,0 +1,9 @@ | |||
1 | define KMACHINE {{=machine}} | ||
2 | define KTYPE tiny | ||
3 | define KARCH arm | ||
4 | |||
5 | include {{=map_tiny_kbranch(need_new_kbranch, new_kbranch, existing_kbranch)}} | ||
6 | {{ if need_new_kbranch == "y": }} | ||
7 | branch {{=machine}} | ||
8 | |||
9 | include {{=machine}}.scc | ||
diff --git a/scripts/lib/bsp/substrate/target/arch/arm/recipes-kernel/linux/{{ if kernel_choice != "custom": }} files/{{=machine}}-user-config.cfg b/scripts/lib/bsp/substrate/target/arch/arm/recipes-kernel/linux/{{ if kernel_choice != "custom": }} files/{{=machine}}-user-config.cfg new file mode 100644 index 0000000000..e69de29bb2 --- /dev/null +++ b/scripts/lib/bsp/substrate/target/arch/arm/recipes-kernel/linux/{{ if kernel_choice != "custom": }} files/{{=machine}}-user-config.cfg | |||
diff --git a/scripts/lib/bsp/substrate/target/arch/arm/recipes-kernel/linux/{{ if kernel_choice != "custom": }} files/{{=machine}}-user-features.scc b/scripts/lib/bsp/substrate/target/arch/arm/recipes-kernel/linux/{{ if kernel_choice != "custom": }} files/{{=machine}}-user-features.scc new file mode 100644 index 0000000000..e69de29bb2 --- /dev/null +++ b/scripts/lib/bsp/substrate/target/arch/arm/recipes-kernel/linux/{{ if kernel_choice != "custom": }} files/{{=machine}}-user-features.scc | |||
diff --git a/scripts/lib/bsp/substrate/target/arch/arm/recipes-kernel/linux/{{ if kernel_choice != "custom": }} files/{{=machine}}-user-patches.scc b/scripts/lib/bsp/substrate/target/arch/arm/recipes-kernel/linux/{{ if kernel_choice != "custom": }} files/{{=machine}}-user-patches.scc new file mode 100644 index 0000000000..e69de29bb2 --- /dev/null +++ b/scripts/lib/bsp/substrate/target/arch/arm/recipes-kernel/linux/{{ if kernel_choice != "custom": }} files/{{=machine}}-user-patches.scc | |||
diff --git a/scripts/lib/bsp/substrate/target/arch/arm/recipes-kernel/linux/{{ if kernel_choice != "custom": }} files/{{=machine}}.cfg b/scripts/lib/bsp/substrate/target/arch/arm/recipes-kernel/linux/{{ if kernel_choice != "custom": }} files/{{=machine}}.cfg new file mode 100644 index 0000000000..10134c81f5 --- /dev/null +++ b/scripts/lib/bsp/substrate/target/arch/arm/recipes-kernel/linux/{{ if kernel_choice != "custom": }} files/{{=machine}}.cfg | |||
@@ -0,0 +1,320 @@ | |||
1 | # | ||
2 | # System Type | ||
3 | # | ||
4 | CONFIG_ARCH_OMAP=y | ||
5 | |||
6 | # | ||
7 | # TI OMAP Implementations | ||
8 | # | ||
9 | # CONFIG_ARCH_OMAP2 is not set | ||
10 | CONFIG_ARCH_OMAP3=y | ||
11 | |||
12 | # | ||
13 | # TI OMAP Common Features | ||
14 | # | ||
15 | CONFIG_ARCH_OMAP2PLUS=y | ||
16 | |||
17 | # | ||
18 | # OMAP Feature Selections | ||
19 | # | ||
20 | CONFIG_OMAP_32K_TIMER=y | ||
21 | CONFIG_OMAP_32K_TIMER_HZ=128 | ||
22 | CONFIG_OMAP_DM_TIMER=y | ||
23 | CONFIG_OMAP_RESET_CLOCKS=y | ||
24 | CONFIG_OMAP_SMARTREFLEX=y | ||
25 | CONFIG_OMAP_SMARTREFLEX_CLASS3=y | ||
26 | CONFIG_OMAP_MBOX_FWK=m | ||
27 | CONFIG_OMAP_MBOX_KFIFO_SIZE=256 | ||
28 | |||
29 | # | ||
30 | # OMAP Board Type | ||
31 | # | ||
32 | CONFIG_MACH_OMAP3_BEAGLE=y | ||
33 | |||
34 | # | ||
35 | # Processor Features | ||
36 | # | ||
37 | CONFIG_ARM_THUMBEE=y | ||
38 | CONFIG_ARM_ERRATA_430973=y | ||
39 | |||
40 | # | ||
41 | # Kernel Features | ||
42 | # | ||
43 | CONFIG_LEDS=y | ||
44 | |||
45 | |||
46 | # | ||
47 | # Serial drivers | ||
48 | # | ||
49 | CONFIG_SERIAL_OMAP=y | ||
50 | CONFIG_SERIAL_OMAP_CONSOLE=y | ||
51 | |||
52 | # | ||
53 | # At least one emulation must be selected | ||
54 | # | ||
55 | CONFIG_VFP=y | ||
56 | CONFIG_NEON=y | ||
57 | |||
58 | # | ||
59 | # Power management options | ||
60 | # | ||
61 | CONFIG_PM=y | ||
62 | CONFIG_PM_RUNTIME=y | ||
63 | |||
64 | # | ||
65 | # Generic Driver Options | ||
66 | # | ||
67 | CONFIG_MTD=y | ||
68 | CONFIG_MTD_CMDLINE_PARTS=y | ||
69 | # | ||
70 | # User Modules And Translation Layers | ||
71 | # | ||
72 | CONFIG_MTD_BLKDEVS=y | ||
73 | CONFIG_MTD_BLOCK=y | ||
74 | |||
75 | # | ||
76 | # RAM/ROM/Flash chip drivers | ||
77 | # | ||
78 | CONFIG_MTD_CFI=y | ||
79 | CONFIG_MTD_CFI_INTELEXT=y | ||
80 | |||
81 | # | ||
82 | # Disk-On-Chip Device Drivers | ||
83 | # | ||
84 | CONFIG_MTD_NAND=y | ||
85 | |||
86 | CONFIG_MTD_NAND_OMAP2=y | ||
87 | |||
88 | CONFIG_MTD_UBI=y | ||
89 | |||
90 | # | ||
91 | # SCSI device support | ||
92 | # | ||
93 | CONFIG_SCSI=y | ||
94 | |||
95 | # | ||
96 | # SCSI support type (disk, tape, CD-ROM) | ||
97 | # | ||
98 | CONFIG_BLK_DEV_SD=y | ||
99 | |||
100 | # | ||
101 | # Ethernet (10 or 100Mbit) | ||
102 | # | ||
103 | CONFIG_SMSC911X=y | ||
104 | CONFIG_USB_NET_SMSC95XX=y | ||
105 | |||
106 | # | ||
107 | # Userland interfaces | ||
108 | # | ||
109 | CONFIG_INPUT_EVDEV=y | ||
110 | |||
111 | # | ||
112 | # Input Device Drivers | ||
113 | # | ||
114 | CONFIG_KEYBOARD_TWL4030=y | ||
115 | CONFIG_INPUT_TOUCHSCREEN=y | ||
116 | CONFIG_TOUCHSCREEN_ADS7846=y | ||
117 | |||
118 | # | ||
119 | # Miscellaneous I2C Chip support | ||
120 | # | ||
121 | CONFIG_I2C=y | ||
122 | CONFIG_I2C_OMAP=y | ||
123 | CONFIG_SPI=y | ||
124 | CONFIG_SPI_MASTER=y | ||
125 | CONFIG_SPI_OMAP24XX=y | ||
126 | |||
127 | # | ||
128 | # I2C GPIO expanders: | ||
129 | # | ||
130 | CONFIG_GPIO_TWL4030=y | ||
131 | |||
132 | # | ||
133 | # SPI GPIO expanders: | ||
134 | # | ||
135 | CONFIG_OMAP_WATCHDOG=y | ||
136 | CONFIG_WATCHDOG_NOWAYOUT=y | ||
137 | |||
138 | # | ||
139 | # Multifunction device drivers | ||
140 | # | ||
141 | CONFIG_TWL4030_CORE=y | ||
142 | CONFIG_REGULATOR=y | ||
143 | CONFIG_REGULATOR_DUMMY=y | ||
144 | CONFIG_REGULATOR_TWL4030=y | ||
145 | |||
146 | # | ||
147 | # Graphics support | ||
148 | # | ||
149 | CONFIG_FB=y | ||
150 | CONFIG_DRM=m | ||
151 | # CONFIG_VGASTATE is not set | ||
152 | # CONFIG_VIDEO_OUTPUT_CONTROL is not set | ||
153 | # CONFIG_FIRMWARE_EDID is not set | ||
154 | # CONFIG_FB_DDC is not set | ||
155 | # CONFIG_FB_BOOT_VESA_SUPPORT is not set | ||
156 | CONFIG_FB_CFB_FILLRECT=y | ||
157 | CONFIG_FB_CFB_COPYAREA=y | ||
158 | CONFIG_FB_CFB_IMAGEBLIT=y | ||
159 | # CONFIG_FB_CFB_REV_PIXELS_IN_BYTE is not set | ||
160 | # CONFIG_FB_SYS_FILLRECT is not set | ||
161 | # CONFIG_FB_SYS_COPYAREA is not set | ||
162 | # CONFIG_FB_SYS_IMAGEBLIT is not set | ||
163 | # CONFIG_FB_FOREIGN_ENDIAN is not set | ||
164 | # CONFIG_FB_SYS_FOPS is not set | ||
165 | # CONFIG_FB_SVGALIB is not set | ||
166 | # CONFIG_FB_MACMODES is not set | ||
167 | # CONFIG_FB_BACKLIGHT is not set | ||
168 | CONFIG_FB_MODE_HELPERS=y | ||
169 | # CONFIG_FB_TILEBLITTING is not set | ||
170 | |||
171 | # | ||
172 | # Frame buffer hardware drivers | ||
173 | # | ||
174 | # CONFIG_FB_S1D13XXX is not set | ||
175 | # CONFIG_FB_TMIO is not set | ||
176 | # CONFIG_FB_VIRTUAL is not set | ||
177 | # CONFIG_FB_METRONOME is not set | ||
178 | # CONFIG_FB_MB862XX is not set | ||
179 | # CONFIG_FB_BROADSHEET is not set | ||
180 | # CONFIG_FB_OMAP_BOOTLOADER_INIT is not set | ||
181 | CONFIG_OMAP2_VRAM=y | ||
182 | CONFIG_OMAP2_VRFB=y | ||
183 | CONFIG_OMAP2_DSS=y | ||
184 | CONFIG_OMAP2_VRAM_SIZE=14 | ||
185 | CONFIG_OMAP2_DSS_DEBUG_SUPPORT=y | ||
186 | # CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS is not set | ||
187 | CONFIG_OMAP2_DSS_DPI=y | ||
188 | # CONFIG_OMAP2_DSS_RFBI is not set | ||
189 | CONFIG_OMAP2_DSS_VENC=y | ||
190 | # CONFIG_OMAP2_DSS_SDI is not set | ||
191 | CONFIG_OMAP2_DSS_DSI=y | ||
192 | # CONFIG_OMAP2_DSS_FAKE_VSYNC is not set | ||
193 | CONFIG_OMAP2_DSS_MIN_FCK_PER_PCK=0 | ||
194 | CONFIG_FB_OMAP2=y | ||
195 | CONFIG_FB_OMAP2_DEBUG_SUPPORT=y | ||
196 | CONFIG_FB_OMAP2_NUM_FBS=2 | ||
197 | |||
198 | # | ||
199 | # OMAP2/3 Display Device Drivers | ||
200 | # | ||
201 | CONFIG_PANEL_GENERIC_DPI=y | ||
202 | CONFIG_PANEL_DVI=y | ||
203 | CONFIG_PANEL_SHARP_LS037V7DW01=y | ||
204 | # CONFIG_PANEL_LGPHILIPS_LB035Q02 is not set | ||
205 | # CONFIG_PANEL_TAAL is not set | ||
206 | CONFIG_PANEL_TPO_TD043MTEA1=m | ||
207 | # CONFIG_BACKLIGHT_LCD_SUPPORT is not set | ||
208 | CONFIG_BACKLIGHT_CLASS_DEVICE=y | ||
209 | |||
210 | # | ||
211 | # Display device support | ||
212 | # | ||
213 | CONFIG_DISPLAY_SUPPORT=y | ||
214 | CONFIG_DUMMY_CONSOLE=y | ||
215 | # CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY is not set | ||
216 | CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y | ||
217 | # CONFIG_FONTS is not set | ||
218 | CONFIG_FONT_8x8=y | ||
219 | CONFIG_FONT_8x16=y | ||
220 | # CONFIG_LOGO_LINUX_MONO is not set | ||
221 | # CONFIG_LOGO_LINUX_VGA16 is not set | ||
222 | |||
223 | # | ||
224 | # Console display driver support | ||
225 | # | ||
226 | CONFIG_FRAMEBUFFER_CONSOLE=y | ||
227 | CONFIG_LOGO=y | ||
228 | # CONFIG_VGA_CONSOLE is not set | ||
229 | |||
230 | # DMA Devices | ||
231 | CONFIG_DMADEVICES=y | ||
232 | CONFIG_DMA_OMAP=y | ||
233 | CONFIG_DMA_OF=y | ||
234 | |||
235 | CONFIG_SOUND=y | ||
236 | CONFIG_SND=y | ||
237 | CONFIG_SND_SOC=y | ||
238 | CONFIG_SND_OMAP_SOC=y | ||
239 | CONFIG_SND_OMAP_SOC_OMAP_TWL4030=y | ||
240 | |||
241 | # | ||
242 | # USB Input Devices | ||
243 | # | ||
244 | CONFIG_USB=y | ||
245 | CONFIG_USB_SUPPORT=y | ||
246 | |||
247 | # | ||
248 | # Miscellaneous USB options | ||
249 | # | ||
250 | CONFIG_USB_OTG=y | ||
251 | # CONFIG_USB_OTG_WHITELIST is not set | ||
252 | |||
253 | # | ||
254 | # USB Host Controller Drivers | ||
255 | # | ||
256 | CONFIG_USB_EHCI_HCD=y | ||
257 | CONFIG_USB_EHCI_TT_NEWSCHED=y | ||
258 | CONFIG_USB_EHCI_ROOT_HUB_TT=y | ||
259 | CONFIG_USB_MUSB_HDRC=y | ||
260 | CONFIG_USB_MUSB_OMAP2PLUS=y | ||
261 | CONFIG_USB_OMAP=y | ||
262 | |||
263 | # | ||
264 | # OMAP 343x high speed USB support | ||
265 | # | ||
266 | CONFIG_USB_MUSB_OTG=y | ||
267 | CONFIG_USB_GADGET_MUSB_HDRC=y | ||
268 | CONFIG_USB_MUSB_HDRC_HCD=y | ||
269 | CONFIG_USB_INVENTRA_DMA=y | ||
270 | |||
271 | # | ||
272 | # NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support' | ||
273 | # | ||
274 | |||
275 | # | ||
276 | # may also be needed; see USB_STORAGE Help for more information | ||
277 | # | ||
278 | CONFIG_USB_STORAGE=y | ||
279 | |||
280 | # | ||
281 | # USB Miscellaneous drivers | ||
282 | # | ||
283 | CONFIG_USB_GADGET=y | ||
284 | CONFIG_USB_GADGET_DUALSPEED=y | ||
285 | CONFIG_USB_OTG_UTILS=y | ||
286 | CONFIG_TWL4030_USB=y | ||
287 | |||
288 | # USB gadget modules | ||
289 | CONFIG_USB_G_NCM=y | ||
290 | CONFIG_USB_MASS_STORAGE=y | ||
291 | |||
292 | CONFIG_MMC=y | ||
293 | |||
294 | # | ||
295 | # MMC/SD Host Controller Drivers | ||
296 | # | ||
297 | CONFIG_MMC_OMAP_HS=y | ||
298 | |||
299 | # | ||
300 | # Real Time Clock | ||
301 | # | ||
302 | CONFIG_RTC_LIB=y | ||
303 | CONFIG_RTC_CLASS=y | ||
304 | CONFIG_RTC_DRV_TWL4030=y | ||
305 | |||
306 | # | ||
307 | # DOS/FAT/NT Filesystems | ||
308 | # | ||
309 | CONFIG_VFAT_FS=y | ||
310 | |||
311 | # | ||
312 | # Multimedia core support | ||
313 | # | ||
314 | |||
315 | # CONFIG_VIDEO_HELPER_CHIPS_AUTO is not set | ||
316 | |||
317 | # | ||
318 | # Advanced Power Management Emulation support | ||
319 | # | ||
320 | CONFIG_APM_EMULATION=y | ||
diff --git a/scripts/lib/bsp/substrate/target/arch/arm/recipes-kernel/linux/{{ if kernel_choice != "custom": }} files/{{=machine}}.scc b/scripts/lib/bsp/substrate/target/arch/arm/recipes-kernel/linux/{{ if kernel_choice != "custom": }} files/{{=machine}}.scc new file mode 100644 index 0000000000..24196e6f67 --- /dev/null +++ b/scripts/lib/bsp/substrate/target/arch/arm/recipes-kernel/linux/{{ if kernel_choice != "custom": }} files/{{=machine}}.scc | |||
@@ -0,0 +1,7 @@ | |||
1 | kconf hardware {{=machine}}.cfg | ||
2 | kconf non-hardware {{machine}}-non_hardware.cfg | ||
3 | |||
4 | include features/usb-net/usb-net.scc | ||
5 | |||
6 | kconf hardware {{=machine}}-user-config.cfg | ||
7 | include {{=machine}}-user-patches.scc | ||
diff --git a/scripts/lib/bsp/substrate/target/arch/arm/recipes-kernel/linux/{{ if kernel_choice == "linux-yocto-dev": }} linux-yocto-dev.bbappend b/scripts/lib/bsp/substrate/target/arch/arm/recipes-kernel/linux/{{ if kernel_choice == "linux-yocto-dev": }} linux-yocto-dev.bbappend new file mode 100644 index 0000000000..25c87a85ac --- /dev/null +++ b/scripts/lib/bsp/substrate/target/arch/arm/recipes-kernel/linux/{{ if kernel_choice == "linux-yocto-dev": }} linux-yocto-dev.bbappend | |||
@@ -0,0 +1,25 @@ | |||
1 | FILESEXTRAPATHS_prepend := "${THISDIR}/files:" | ||
2 | |||
3 | PR := "${PR}.1" | ||
4 | |||
5 | COMPATIBLE_MACHINE_{{=machine}} = "{{=machine}}" | ||
6 | {{ input type:"boolean" name:"need_new_kbranch" prio:"20" msg:"Do you need a new machine branch for this BSP (the alternative is to re-use an existing branch)? [y/n]" default:"y" }} | ||
7 | |||
8 | {{ if need_new_kbranch == "y": }} | ||
9 | {{ input type:"choicelist" name:"new_kbranch" nameappend:"i386" gen:"bsp.kernel.all_branches" branches_base:"standard" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/base" }} | ||
10 | |||
11 | {{ if need_new_kbranch == "n": }} | ||
12 | {{ input type:"choicelist" name:"existing_kbranch" nameappend:"i386" gen:"bsp.kernel.all_branches" branches_base:"standard" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/base" }} | ||
13 | |||
14 | {{ if need_new_kbranch == "n": }} | ||
15 | KBRANCH_{{=machine}} = "{{=existing_kbranch}}" | ||
16 | |||
17 | {{ input type:"boolean" name:"smp" prio:"30" msg:"Would you like SMP support? (y/n)" default:"y"}} | ||
18 | {{ if smp == "y": }} | ||
19 | KERNEL_FEATURES_append_{{=machine}} += " cfg/smp.scc" | ||
20 | |||
21 | SRC_URI += "file://{{=machine}}-standard.scc \ | ||
22 | file://{{=machine}}-user-config.cfg \ | ||
23 | file://{{=machine}}-user-patches.scc \ | ||
24 | file://{{=machine}}-user-features.scc \ | ||
25 | " | ||
diff --git a/scripts/lib/bsp/substrate/target/arch/arm/recipes-kernel/linux/{{ if kernel_choice == "linux-yocto-rt_3.10": }} linux-yocto-rt_3.10.bbappend b/scripts/lib/bsp/substrate/target/arch/arm/recipes-kernel/linux/{{ if kernel_choice == "linux-yocto-rt_3.10": }} linux-yocto-rt_3.10.bbappend new file mode 100644 index 0000000000..08b1f88d1b --- /dev/null +++ b/scripts/lib/bsp/substrate/target/arch/arm/recipes-kernel/linux/{{ if kernel_choice == "linux-yocto-rt_3.10": }} linux-yocto-rt_3.10.bbappend | |||
@@ -0,0 +1,32 @@ | |||
1 | FILESEXTRAPATHS_prepend := "${THISDIR}/files:" | ||
2 | |||
3 | PR := "${PR}.1" | ||
4 | |||
5 | COMPATIBLE_MACHINE_{{=machine}} = "{{=machine}}" | ||
6 | |||
7 | {{ input type:"boolean" name:"need_new_kbranch" prio:"20" msg:"Do you need a new machine branch for this BSP (the alternative is to re-use an existing branch)? [y/n]" default:"y" }} | ||
8 | |||
9 | {{ if need_new_kbranch == "y": }} | ||
10 | {{ input type:"choicelist" name:"new_kbranch" gen:"bsp.kernel.all_branches" branches_base:"standard/preempt-rt" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/preempt-rt/base" }} | ||
11 | |||
12 | {{ if need_new_kbranch == "n": }} | ||
13 | {{ input type:"choicelist" name:"existing_kbranch" gen:"bsp.kernel.all_branches" branches_base:"standard/preempt-rt" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/preempt-rt/base" }} | ||
14 | |||
15 | {{ if need_new_kbranch == "n": }} | ||
16 | KBRANCH_{{=machine}} = "{{=existing_kbranch}}" | ||
17 | |||
18 | {{ input type:"boolean" name:"smp" prio:"30" msg:"Do you need SMP support? (y/n)" default:"y"}} | ||
19 | {{ if smp == "y": }} | ||
20 | KERNEL_FEATURES_append_{{=machine}} += " cfg/smp.scc" | ||
21 | |||
22 | SRC_URI += "file://{{=machine}}-preempt-rt.scc \ | ||
23 | file://{{=machine}}-user-config.cfg \ | ||
24 | file://{{=machine}}-user-patches.scc \ | ||
25 | file://{{=machine}}-user-features.scc \ | ||
26 | " | ||
27 | |||
28 | # uncomment and replace these SRCREVs with the real commit ids once you've had | ||
29 | # the appropriate changes committed to the upstream linux-yocto repo | ||
30 | #SRCREV_machine_pn-linux-yocto-rt_{{=machine}} ?= "f35992f80c81dc5fa1a97165dfd5cbb84661f7cb" | ||
31 | #SRCREV_meta_pn-linux-yocto-rt_{{=machine}} ?= "1b534b2f8bbe9b8a773268cfa30a4850346f6f5f" | ||
32 | #LINUX_VERSION = "3.10.9" | ||
diff --git a/scripts/lib/bsp/substrate/target/arch/arm/recipes-kernel/linux/{{ if kernel_choice == "linux-yocto-tiny_3.10": }} linux-yocto-tiny_3.10.bbappend b/scripts/lib/bsp/substrate/target/arch/arm/recipes-kernel/linux/{{ if kernel_choice == "linux-yocto-tiny_3.10": }} linux-yocto-tiny_3.10.bbappend new file mode 100644 index 0000000000..bc6968d832 --- /dev/null +++ b/scripts/lib/bsp/substrate/target/arch/arm/recipes-kernel/linux/{{ if kernel_choice == "linux-yocto-tiny_3.10": }} linux-yocto-tiny_3.10.bbappend | |||
@@ -0,0 +1,32 @@ | |||
1 | FILESEXTRAPATHS_prepend := "${THISDIR}/files:" | ||
2 | |||
3 | PR := "${PR}.1" | ||
4 | |||
5 | COMPATIBLE_MACHINE_{{=machine}} = "{{=machine}}" | ||
6 | |||
7 | {{ input type:"boolean" name:"need_new_kbranch" prio:"20" msg:"Do you need a new machine branch for this BSP (the alternative is to re-use an existing branch)? [y/n]" default:"y" }} | ||
8 | |||
9 | {{ if need_new_kbranch == "y": }} | ||
10 | {{ input type:"choicelist" name:"new_kbranch" gen:"bsp.kernel.all_branches" branches_base:"standard/tiny" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/tiny/base" }} | ||
11 | |||
12 | {{ if need_new_kbranch == "n": }} | ||
13 | {{ input type:"choicelist" name:"existing_kbranch" gen:"bsp.kernel.all_branches" branches_base:"standard/tiny" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/tiny/base" }} | ||
14 | |||
15 | {{ if need_new_kbranch == "n": }} | ||
16 | KBRANCH_{{=machine}} = "{{=existing_kbranch}}" | ||
17 | |||
18 | {{ input type:"boolean" name:"smp" prio:"30" msg:"Do you need SMP support? (y/n)" default:"y"}} | ||
19 | {{ if smp == "y": }} | ||
20 | KERNEL_FEATURES_append_{{=machine}} += " cfg/smp.scc" | ||
21 | |||
22 | SRC_URI += "file://{{=machine}}-tiny.scc \ | ||
23 | file://{{=machine}}-user-config.cfg \ | ||
24 | file://{{=machine}}-user-patches.scc \ | ||
25 | file://{{=machine}}-user-features.scc \ | ||
26 | " | ||
27 | |||
28 | # uncomment and replace these SRCREVs with the real commit ids once you've had | ||
29 | # the appropriate changes committed to the upstream linux-yocto repo | ||
30 | #SRCREV_machine_pn-linux-yocto-tiny_{{=machine}} ?= "840bb8c059418c4753415df56c9aff1c0d5354c8" | ||
31 | #SRCREV_meta_pn-linux-yocto-tiny_{{=machine}} ?= "4fd76cc4f33e0afd8f906b1e8f231b6d13b6c993" | ||
32 | #LINUX_VERSION = "3.10.9" | ||
diff --git a/scripts/lib/bsp/substrate/target/arch/arm/recipes-kernel/linux/{{ if kernel_choice == "linux-yocto-tiny_3.14": }} linux-yocto-tiny_3.14.bbappend b/scripts/lib/bsp/substrate/target/arch/arm/recipes-kernel/linux/{{ if kernel_choice == "linux-yocto-tiny_3.14": }} linux-yocto-tiny_3.14.bbappend new file mode 100644 index 0000000000..d221d5f2a4 --- /dev/null +++ b/scripts/lib/bsp/substrate/target/arch/arm/recipes-kernel/linux/{{ if kernel_choice == "linux-yocto-tiny_3.14": }} linux-yocto-tiny_3.14.bbappend | |||
@@ -0,0 +1,32 @@ | |||
1 | FILESEXTRAPATHS_prepend := "${THISDIR}/files:" | ||
2 | |||
3 | PR := "${PR}.1" | ||
4 | |||
5 | COMPATIBLE_MACHINE_{{=machine}} = "{{=machine}}" | ||
6 | |||
7 | {{ input type:"boolean" name:"need_new_kbranch" prio:"20" msg:"Do you need a new machine branch for this BSP (the alternative is to re-use an existing branch)? [y/n]" default:"y" }} | ||
8 | |||
9 | {{ if need_new_kbranch == "y": }} | ||
10 | {{ input type:"choicelist" name:"new_kbranch" gen:"bsp.kernel.all_branches" branches_base:"standard/tiny" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/tiny/base" }} | ||
11 | |||
12 | {{ if need_new_kbranch == "n": }} | ||
13 | {{ input type:"choicelist" name:"existing_kbranch" gen:"bsp.kernel.all_branches" branches_base:"standard/tiny" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/tiny/base" }} | ||
14 | |||
15 | {{ if need_new_kbranch == "n": }} | ||
16 | KBRANCH_{{=machine}} = "{{=existing_kbranch}}" | ||
17 | |||
18 | {{ input type:"boolean" name:"smp" prio:"30" msg:"Do you need SMP support? (y/n)" default:"y"}} | ||
19 | {{ if smp == "y": }} | ||
20 | KERNEL_FEATURES_append_{{=machine}} += " cfg/smp.scc" | ||
21 | |||
22 | SRC_URI += "file://{{=machine}}-tiny.scc \ | ||
23 | file://{{=machine}}-user-config.cfg \ | ||
24 | file://{{=machine}}-user-patches.scc \ | ||
25 | file://{{=machine}}-user-features.scc \ | ||
26 | " | ||
27 | |||
28 | # uncomment and replace these SRCREVs with the real commit ids once you've had | ||
29 | # the appropriate changes committed to the upstream linux-yocto repo | ||
30 | #SRCREV_machine_pn-linux-yocto-tiny_{{=machine}} ?= "840bb8c059418c4753415df56c9aff1c0d5354c8" | ||
31 | #SRCREV_meta_pn-linux-yocto-tiny_{{=machine}} ?= "4fd76cc4f33e0afd8f906b1e8f231b6d13b6c993" | ||
32 | #LINUX_VERSION = "3.14" | ||
diff --git a/scripts/lib/bsp/substrate/target/arch/arm/recipes-kernel/linux/{{ if kernel_choice == "linux-yocto_3.10": }} linux-yocto_3.10.bbappend b/scripts/lib/bsp/substrate/target/arch/arm/recipes-kernel/linux/{{ if kernel_choice == "linux-yocto_3.10": }} linux-yocto_3.10.bbappend new file mode 100644 index 0000000000..1e814c54d7 --- /dev/null +++ b/scripts/lib/bsp/substrate/target/arch/arm/recipes-kernel/linux/{{ if kernel_choice == "linux-yocto_3.10": }} linux-yocto_3.10.bbappend | |||
@@ -0,0 +1,32 @@ | |||
1 | FILESEXTRAPATHS_prepend := "${THISDIR}/files:" | ||
2 | |||
3 | PR := "${PR}.1" | ||
4 | |||
5 | COMPATIBLE_MACHINE_{{=machine}} = "{{=machine}}" | ||
6 | |||
7 | {{ input type:"boolean" name:"need_new_kbranch" prio:"20" msg:"Do you need a new machine branch for this BSP (the alternative is to re-use an existing branch)? [y/n]" default:"y" }} | ||
8 | |||
9 | {{ if need_new_kbranch == "y": }} | ||
10 | {{ input type:"choicelist" name:"new_kbranch" gen:"bsp.kernel.all_branches" branches_base:"standard" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/base" }} | ||
11 | |||
12 | {{ if need_new_kbranch == "n": }} | ||
13 | {{ input type:"choicelist" name:"existing_kbranch" gen:"bsp.kernel.all_branches" branches_base:"standard" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/base" }} | ||
14 | |||
15 | {{ if need_new_kbranch == "n": }} | ||
16 | KBRANCH_{{=machine}} = "{{=existing_kbranch}}" | ||
17 | |||
18 | {{ input type:"boolean" name:"smp" prio:"30" msg:"Do you need SMP support? (y/n)" default:"y"}} | ||
19 | {{ if smp == "y": }} | ||
20 | KERNEL_FEATURES_append_{{=machine}} += " cfg/smp.scc" | ||
21 | |||
22 | SRC_URI += "file://{{=machine}}-standard.scc \ | ||
23 | file://{{=machine}}-user-config.cfg \ | ||
24 | file://{{=machine}}-user-patches.scc \ | ||
25 | file://{{=machine}}-user-features.scc \ | ||
26 | " | ||
27 | |||
28 | # uncomment and replace these SRCREVs with the real commit ids once you've had | ||
29 | # the appropriate changes committed to the upstream linux-yocto repo | ||
30 | #SRCREV_machine_pn-linux-yocto_{{=machine}} ?= "19f7e43b54aef08d58135ed2a897d77b624b320a" | ||
31 | #SRCREV_meta_pn-linux-yocto_{{=machine}} ?= "459165c1dd61c4e843c36e6a1abeb30949a20ba7" | ||
32 | #LINUX_VERSION = "3.10.9" \ No newline at end of file | ||
diff --git a/scripts/lib/bsp/substrate/target/arch/arm/recipes-kernel/linux/{{ if kernel_choice == "linux-yocto_3.14": }} linux-yocto_3.14.bbappend b/scripts/lib/bsp/substrate/target/arch/arm/recipes-kernel/linux/{{ if kernel_choice == "linux-yocto_3.14": }} linux-yocto_3.14.bbappend new file mode 100644 index 0000000000..ca7f8c5978 --- /dev/null +++ b/scripts/lib/bsp/substrate/target/arch/arm/recipes-kernel/linux/{{ if kernel_choice == "linux-yocto_3.14": }} linux-yocto_3.14.bbappend | |||
@@ -0,0 +1,32 @@ | |||
1 | FILESEXTRAPATHS_prepend := "${THISDIR}/files:" | ||
2 | |||
3 | PR := "${PR}.1" | ||
4 | |||
5 | COMPATIBLE_MACHINE_{{=machine}} = "{{=machine}}" | ||
6 | |||
7 | {{ input type:"boolean" name:"need_new_kbranch" prio:"20" msg:"Do you need a new machine branch for this BSP (the alternative is to re-use an existing branch)? [y/n]" default:"y" }} | ||
8 | |||
9 | {{ if need_new_kbranch == "y": }} | ||
10 | {{ input type:"choicelist" name:"new_kbranch" gen:"bsp.kernel.all_branches" branches_base:"standard" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/base" }} | ||
11 | |||
12 | {{ if need_new_kbranch == "n": }} | ||
13 | {{ input type:"choicelist" name:"existing_kbranch" gen:"bsp.kernel.all_branches" branches_base:"standard" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/base" }} | ||
14 | |||
15 | {{ if need_new_kbranch == "n": }} | ||
16 | KBRANCH_{{=machine}} = "{{=existing_kbranch}}" | ||
17 | |||
18 | {{ input type:"boolean" name:"smp" prio:"30" msg:"Do you need SMP support? (y/n)" default:"y"}} | ||
19 | {{ if smp == "y": }} | ||
20 | KERNEL_FEATURES_append_{{=machine}} += " cfg/smp.scc" | ||
21 | |||
22 | SRC_URI += "file://{{=machine}}-standard.scc \ | ||
23 | file://{{=machine}}-user-config.cfg \ | ||
24 | file://{{=machine}}-user-patches.scc \ | ||
25 | file://{{=machine}}-user-features.scc \ | ||
26 | " | ||
27 | |||
28 | # uncomment and replace these SRCREVs with the real commit ids once you've had | ||
29 | # the appropriate changes committed to the upstream linux-yocto repo | ||
30 | #SRCREV_machine_pn-linux-yocto_{{=machine}} ?= "840bb8c059418c4753415df56c9aff1c0d5354c8" | ||
31 | #SRCREV_meta_pn-linux-yocto_{{=machine}} ?= "4fd76cc4f33e0afd8f906b1e8f231b6d13b6c993" | ||
32 | #LINUX_VERSION = "3.14" \ No newline at end of file | ||
diff --git a/scripts/lib/bsp/substrate/target/arch/common/COPYING.MIT b/scripts/lib/bsp/substrate/target/arch/common/COPYING.MIT new file mode 100644 index 0000000000..fb950dc69f --- /dev/null +++ b/scripts/lib/bsp/substrate/target/arch/common/COPYING.MIT | |||
@@ -0,0 +1,17 @@ | |||
1 | Permission is hereby granted, free of charge, to any person obtaining a copy | ||
2 | of this software and associated documentation files (the "Software"), to deal | ||
3 | in the Software without restriction, including without limitation the rights | ||
4 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell | ||
5 | copies of the Software, and to permit persons to whom the Software is | ||
6 | furnished to do so, subject to the following conditions: | ||
7 | |||
8 | The above copyright notice and this permission notice shall be included in | ||
9 | all copies or substantial portions of the Software. | ||
10 | |||
11 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
12 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
13 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE | ||
14 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
15 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, | ||
16 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN | ||
17 | THE SOFTWARE. | ||
diff --git a/scripts/lib/bsp/substrate/target/arch/common/README b/scripts/lib/bsp/substrate/target/arch/common/README new file mode 100644 index 0000000000..928659f302 --- /dev/null +++ b/scripts/lib/bsp/substrate/target/arch/common/README | |||
@@ -0,0 +1,118 @@ | |||
1 | This README file contains information on building the meta-{{=machine}} | ||
2 | BSP layer, and booting the images contained in the /binary directory. | ||
3 | Please see the corresponding sections below for details. | ||
4 | |||
5 | |||
6 | Dependencies | ||
7 | ============ | ||
8 | |||
9 | This layer depends on: | ||
10 | |||
11 | URI: git://git.openembedded.org/bitbake | ||
12 | branch: master | ||
13 | |||
14 | URI: git://git.openembedded.org/openembedded-core | ||
15 | layers: meta | ||
16 | branch: master | ||
17 | |||
18 | URI: git://git.yoctoproject.org/xxxx | ||
19 | layers: xxxx | ||
20 | branch: master | ||
21 | |||
22 | |||
23 | Patches | ||
24 | ======= | ||
25 | |||
26 | Please submit any patches against this BSP to the Yocto mailing list | ||
27 | (yocto@yoctoproject.org) and cc: the maintainer: | ||
28 | |||
29 | Maintainer: XXX YYYYYY <xxx.yyyyyy@zzzzz.com> | ||
30 | |||
31 | Please see the meta-xxxx/MAINTAINERS file for more details. | ||
32 | |||
33 | |||
34 | Table of Contents | ||
35 | ================= | ||
36 | |||
37 | I. Building the meta-{{=machine}} BSP layer | ||
38 | II. Booting the images in /binary | ||
39 | |||
40 | |||
41 | I. Building the meta-{{=machine}} BSP layer | ||
42 | ======================================== | ||
43 | |||
44 | --- replace with specific instructions for your layer --- | ||
45 | |||
46 | In order to build an image with BSP support for a given release, you | ||
47 | need to download the corresponding BSP tarball from the 'Board Support | ||
48 | Package (BSP) Downloads' page of the Yocto Project website. | ||
49 | |||
50 | Having done that, and assuming you extracted the BSP tarball contents | ||
51 | at the top-level of your yocto build tree, you can build a | ||
52 | {{=machine}} image by adding the location of the meta-{{=machine}} | ||
53 | layer to bblayers.conf, along with any other layers needed (to access | ||
54 | common metadata shared between BSPs) e.g.: | ||
55 | |||
56 | yocto/meta-xxxx \ | ||
57 | yocto/meta-xxxx/meta-{{=machine}} \ | ||
58 | |||
59 | To enable the {{=machine}} layer, add the {{=machine}} MACHINE to local.conf: | ||
60 | |||
61 | MACHINE ?= "{{=machine}}" | ||
62 | |||
63 | You should then be able to build a {{=machine}} image as such: | ||
64 | |||
65 | $ source oe-init-build-env | ||
66 | $ bitbake core-image-sato | ||
67 | |||
68 | At the end of a successful build, you should have a live image that | ||
69 | you can boot from a USB flash drive (see instructions on how to do | ||
70 | that below, in the section 'Booting the images from /binary'). | ||
71 | |||
72 | As an alternative to downloading the BSP tarball, you can also work | ||
73 | directly from the meta-xxxx git repository. For each BSP in the | ||
74 | 'meta-xxxx' repository, there are multiple branches, one corresponding | ||
75 | to each major release starting with 'laverne' (0.90), in addition to | ||
76 | the latest code which tracks the current master (note that not all | ||
77 | BSPs are present in every release). Instead of extracting a BSP | ||
78 | tarball at the top level of your yocto build tree, you can | ||
79 | equivalently check out the appropriate branch from the meta-xxxx | ||
80 | repository at the same location. | ||
81 | |||
82 | |||
83 | II. Booting the images in /binary | ||
84 | ================================= | ||
85 | |||
86 | --- replace with specific instructions for your platform --- | ||
87 | |||
88 | This BSP contains bootable live images, which can be used to directly | ||
89 | boot Yocto off of a USB flash drive. | ||
90 | |||
91 | Under Linux, insert a USB flash drive. Assuming the USB flash drive | ||
92 | takes device /dev/sdf, use dd to copy the live image to it. For | ||
93 | example: | ||
94 | |||
95 | # dd if=core-image-sato-{{=machine}}-20101207053738.hddimg of=/dev/sdf | ||
96 | # sync | ||
97 | # eject /dev/sdf | ||
98 | |||
99 | This should give you a bootable USB flash device. Insert the device | ||
100 | into a bootable USB socket on the target, and power on. This should | ||
101 | result in a system booted to the Sato graphical desktop. | ||
102 | |||
103 | If you want a terminal, use the arrows at the top of the UI to move to | ||
104 | different pages of available applications, one of which is named | ||
105 | 'Terminal'. Clicking that should give you a root terminal. | ||
106 | |||
107 | If you want to ssh into the system, you can use the root terminal to | ||
108 | ifconfig the IP address and use that to ssh in. The root password is | ||
109 | empty, so to log in type 'root' for the user name and hit 'Enter' at | ||
110 | the Password prompt: and you should be in. | ||
111 | |||
112 | ---- | ||
113 | |||
114 | If you find you're getting corrupt images on the USB (it doesn't show | ||
115 | the syslinux boot: prompt, or the boot: prompt contains strange | ||
116 | characters), try doing this first: | ||
117 | |||
118 | # dd if=/dev/zero of=/dev/sdf bs=1M count=512 | ||
diff --git a/scripts/lib/bsp/substrate/target/arch/common/README.sources b/scripts/lib/bsp/substrate/target/arch/common/README.sources new file mode 100644 index 0000000000..3c4cb7b435 --- /dev/null +++ b/scripts/lib/bsp/substrate/target/arch/common/README.sources | |||
@@ -0,0 +1,17 @@ | |||
1 | The sources for the packages comprising the images shipped with this | ||
2 | BSP can be found at the following location: | ||
3 | |||
4 | http://downloads.yoctoproject.org/mirror/sources/ | ||
5 | |||
6 | The metadata used to generate the images shipped with this BSP, in | ||
7 | addition to the code contained in this BSP, can be found at the | ||
8 | following location: | ||
9 | |||
10 | http://www.yoctoproject.org/downloads/yocto-1.1/poky-edison-6.0.tar.bz2 | ||
11 | |||
12 | The metadata used to generate the images shipped with this BSP, in | ||
13 | addition to the code contained in this BSP, can also be found at the | ||
14 | following locations: | ||
15 | |||
16 | git://git.yoctoproject.org/poky.git | ||
17 | git://git.yoctoproject.org/meta-xxxx | ||
diff --git a/scripts/lib/bsp/substrate/target/arch/common/binary/.gitignore b/scripts/lib/bsp/substrate/target/arch/common/binary/.gitignore new file mode 100644 index 0000000000..e69de29bb2 --- /dev/null +++ b/scripts/lib/bsp/substrate/target/arch/common/binary/.gitignore | |||
diff --git a/scripts/lib/bsp/substrate/target/arch/common/conf/layer.conf b/scripts/lib/bsp/substrate/target/arch/common/conf/layer.conf new file mode 100644 index 0000000000..5529f45954 --- /dev/null +++ b/scripts/lib/bsp/substrate/target/arch/common/conf/layer.conf | |||
@@ -0,0 +1,10 @@ | |||
1 | # We have a conf and classes directory, add to BBPATH | ||
2 | BBPATH .= ":${LAYERDIR}" | ||
3 | |||
4 | # We have a recipes-* directories, add to BBFILES | ||
5 | BBFILES += "${LAYERDIR}/recipes-*/*/*.bb \ | ||
6 | ${LAYERDIR}/recipes-*/*/*.bbappend" | ||
7 | |||
8 | BBFILE_COLLECTIONS += "{{=machine}}" | ||
9 | BBFILE_PATTERN_{{=machine}} = "^${LAYERDIR}/" | ||
10 | BBFILE_PRIORITY_{{=machine}} = "6" | ||
diff --git a/scripts/lib/bsp/substrate/target/arch/common/recipes-bsp/formfactor/formfactor/{{=machine}}/machconfig b/scripts/lib/bsp/substrate/target/arch/common/recipes-bsp/formfactor/formfactor/{{=machine}}/machconfig new file mode 100644 index 0000000000..3b85d3821f --- /dev/null +++ b/scripts/lib/bsp/substrate/target/arch/common/recipes-bsp/formfactor/formfactor/{{=machine}}/machconfig | |||
@@ -0,0 +1,5 @@ | |||
1 | # Assume a USB mouse and keyboard are connected | ||
2 | {{ input type:"boolean" name:"touchscreen" msg:"Does your BSP have a touchscreen? (y/n)" default:"n" }} | ||
3 | HAVE_TOUCHSCREEN={{=touchscreen}} | ||
4 | {{ input type:"boolean" name:"keyboard" msg:"Does your BSP have a keyboard? (y/n)" default:"y" }} | ||
5 | HAVE_KEYBOARD={{=keyboard}} | ||
diff --git a/scripts/lib/bsp/substrate/target/arch/common/recipes-bsp/formfactor/formfactor_0.0.bbappend b/scripts/lib/bsp/substrate/target/arch/common/recipes-bsp/formfactor/formfactor_0.0.bbappend new file mode 100644 index 0000000000..6d4804d127 --- /dev/null +++ b/scripts/lib/bsp/substrate/target/arch/common/recipes-bsp/formfactor/formfactor_0.0.bbappend | |||
@@ -0,0 +1,2 @@ | |||
1 | FILESEXTRAPATHS_prepend := "${THISDIR}/${PN}:" | ||
2 | |||
diff --git a/scripts/lib/bsp/substrate/target/arch/common/recipes-kernel/linux/kernel-list.noinstall b/scripts/lib/bsp/substrate/target/arch/common/recipes-kernel/linux/kernel-list.noinstall new file mode 100644 index 0000000000..03b7d84ec2 --- /dev/null +++ b/scripts/lib/bsp/substrate/target/arch/common/recipes-kernel/linux/kernel-list.noinstall | |||
@@ -0,0 +1,26 @@ | |||
1 | {{ if kernel_choice == "custom": }} | ||
2 | {{ input type:"boolean" name:"custom_kernel_remote" prio:"20" msg:"Is the custom kernel you'd like to use in a remote git repo? (y/n)" default:"y"}} | ||
3 | |||
4 | {{ if kernel_choice == "custom" and custom_kernel_remote == "y": }} | ||
5 | {{ input type:"edit-git-repo" name:"custom_kernel_remote_path" prio:"20" msg:"Please enter the full URI to the remote git repo (the default corresponds to linux-stable v3.13.9)" default:"git://git.kernel.org/pub/scm/linux/kernel/git/stable/linux-stable.git"}} | ||
6 | |||
7 | {{ if kernel_choice == "custom" and custom_kernel_remote == "n": }} | ||
8 | {{ input type:"edit-git-repo" name:"custom_kernel_local_path" prio:"20" msg:"You've indicated that you're not using a remote git repo. Please enter the full path to the local git repo you want to use (the default assumes a local linux-stable v3.13.9)" default:"/home/trz/yocto/kernels/linux-stable.git"}} | ||
9 | |||
10 | {{ if kernel_choice == "custom": }} | ||
11 | {{ input type:"boolean" name:"custom_kernel_need_kbranch" prio:"20" msg:"Do you need to use a specific (non-master) branch? (y/n)" default:"n"}} | ||
12 | |||
13 | {{ if kernel_choice == "custom" and custom_kernel_need_kbranch == "y": }} | ||
14 | {{ input type:"edit" name:"custom_kernel_kbranch" prio:"20" msg:"Please enter the branch you want to use (the default branch corresponds to the linux-stable 'linux-3.13.y' branch):" default:"linux-3.13.y"}} | ||
15 | |||
16 | {{ if kernel_choice == "custom": }} | ||
17 | {{ input type:"edit" name:"custom_kernel_srcrev" prio:"20" msg:"Please enter the SRCREV (commit id) you'd like to use (use '${AUTOREV}' to track the current HEAD):" default:"${AUTOREV}"}} | ||
18 | |||
19 | {{ if kernel_choice == "custom": }} | ||
20 | {{ input type:"edit" name:"custom_kernel_linux_version" prio:"20" msg:"Please enter the Linux version of the kernel you've specified:" default:"3.13.9"}} | ||
21 | |||
22 | {{ if kernel_choice == "custom": }} | ||
23 | {{ input type:"edit" name:"custom_kernel_linux_version_extension" prio:"20" msg:"Please enter a Linux version extension if you want (it will show up at the end of the kernel name shown by uname):" default:"-custom"}} | ||
24 | |||
25 | {{ if kernel_choice == "custom": }} | ||
26 | {{ input type:"edit-file" name:"custom_kernel_defconfig" prio:"20" msg:"It's recommended (but not required) that custom kernels be built using a defconfig. Please enter the full path to the defconfig for your kernel (NOTE: if you don't specify a defconfig the kernel probably won't build or boot):" default:""}} | ||
diff --git a/scripts/lib/bsp/substrate/target/arch/common/recipes-kernel/linux/{{ if kernel_choice == "custom": }} linux-yocto-custom.bb b/scripts/lib/bsp/substrate/target/arch/common/recipes-kernel/linux/{{ if kernel_choice == "custom": }} linux-yocto-custom.bb new file mode 100644 index 0000000000..6d3cc6f743 --- /dev/null +++ b/scripts/lib/bsp/substrate/target/arch/common/recipes-kernel/linux/{{ if kernel_choice == "custom": }} linux-yocto-custom.bb | |||
@@ -0,0 +1,57 @@ | |||
1 | # This file was derived from the linux-yocto-custom.bb recipe in | ||
2 | # oe-core. | ||
3 | # | ||
4 | # linux-yocto-custom.bb: | ||
5 | # | ||
6 | # A yocto-bsp-generated kernel recipe that uses the linux-yocto and | ||
7 | # oe-core kernel classes to apply a subset of yocto kernel | ||
8 | # management to git managed kernel repositories. | ||
9 | # | ||
10 | # Warning: | ||
11 | # | ||
12 | # Building this kernel without providing a defconfig or BSP | ||
13 | # configuration will result in build or boot errors. This is not a | ||
14 | # bug. | ||
15 | # | ||
16 | # Notes: | ||
17 | # | ||
18 | # patches: patches can be merged into to the source git tree itself, | ||
19 | # added via the SRC_URI, or controlled via a BSP | ||
20 | # configuration. | ||
21 | # | ||
22 | # example configuration addition: | ||
23 | # SRC_URI += "file://smp.cfg" | ||
24 | # example patch addition: | ||
25 | # SRC_URI += "file://0001-linux-version-tweak.patch | ||
26 | # example feature addition: | ||
27 | # SRC_URI += "file://feature.scc" | ||
28 | # | ||
29 | |||
30 | inherit kernel | ||
31 | require recipes-kernel/linux/linux-yocto.inc | ||
32 | |||
33 | {{ if kernel_choice == "custom" and custom_kernel_remote == "y": }} | ||
34 | SRC_URI = "{{=custom_kernel_remote_path}};protocol=git;bareclone=1" | ||
35 | {{ if kernel_choice == "custom" and custom_kernel_remote == "n": }} | ||
36 | SRC_URI = "git://{{=custom_kernel_local_path}};protocol=file;bareclone=1" | ||
37 | |||
38 | SRC_URI += "file://defconfig" | ||
39 | |||
40 | SRC_URI += "file://{{=machine}}.scc \ | ||
41 | file://{{=machine}}.cfg \ | ||
42 | file://{{=machine}}-user-config.cfg \ | ||
43 | file://{{=machine}}-user-patches.scc \ | ||
44 | " | ||
45 | |||
46 | {{ if kernel_choice == "custom" and custom_kernel_need_kbranch == "y" and custom_kernel_kbranch and custom_kernel_kbranch != "master": }} | ||
47 | KBRANCH = "{{=custom_kernel_kbranch}}" | ||
48 | |||
49 | LINUX_VERSION ?= "{{=custom_kernel_linux_version}}" | ||
50 | LINUX_VERSION_EXTENSION ?= "{{=custom_kernel_linux_version_extension}}" | ||
51 | |||
52 | SRCREV="{{=custom_kernel_srcrev}}" | ||
53 | |||
54 | PR = "r0" | ||
55 | PV = "${LINUX_VERSION}+git${SRCPV}" | ||
56 | |||
57 | COMPATIBLE_MACHINE_{{=machine}} = "{{=machine}}" | ||
diff --git a/scripts/lib/bsp/substrate/target/arch/common/recipes-kernel/linux/{{ if kernel_choice == "custom": }} linux-yocto-custom/defconfig b/scripts/lib/bsp/substrate/target/arch/common/recipes-kernel/linux/{{ if kernel_choice == "custom": }} linux-yocto-custom/defconfig new file mode 100644 index 0000000000..ceb0ffa30c --- /dev/null +++ b/scripts/lib/bsp/substrate/target/arch/common/recipes-kernel/linux/{{ if kernel_choice == "custom": }} linux-yocto-custom/defconfig | |||
@@ -0,0 +1,5 @@ | |||
1 | # | ||
2 | # Placeholder for custom default kernel configuration. yocto-bsp will | ||
3 | # replace this file with a user-specified defconfig. | ||
4 | # | ||
5 | {{ if custom_kernel_defconfig: replace_file(of, custom_kernel_defconfig) }} | ||
diff --git a/scripts/lib/bsp/substrate/target/arch/common/recipes-kernel/linux/{{ if kernel_choice == "custom": }} linux-yocto-custom/{{=machine}}-user-config.cfg b/scripts/lib/bsp/substrate/target/arch/common/recipes-kernel/linux/{{ if kernel_choice == "custom": }} linux-yocto-custom/{{=machine}}-user-config.cfg new file mode 100644 index 0000000000..17c8b503da --- /dev/null +++ b/scripts/lib/bsp/substrate/target/arch/common/recipes-kernel/linux/{{ if kernel_choice == "custom": }} linux-yocto-custom/{{=machine}}-user-config.cfg | |||
@@ -0,0 +1,8 @@ | |||
1 | # | ||
2 | # Used by yocto-kernel to manage config options. | ||
3 | # | ||
4 | # yocto-kernel may change the contents of this file in any | ||
5 | # way it sees fit, including removing comments like this, | ||
6 | # so don't manually make any modifications you don't want | ||
7 | # to lose. | ||
8 | # | ||
diff --git a/scripts/lib/bsp/substrate/target/arch/common/recipes-kernel/linux/{{ if kernel_choice == "custom": }} linux-yocto-custom/{{=machine}}-user-patches.scc b/scripts/lib/bsp/substrate/target/arch/common/recipes-kernel/linux/{{ if kernel_choice == "custom": }} linux-yocto-custom/{{=machine}}-user-patches.scc new file mode 100644 index 0000000000..7a598d9118 --- /dev/null +++ b/scripts/lib/bsp/substrate/target/arch/common/recipes-kernel/linux/{{ if kernel_choice == "custom": }} linux-yocto-custom/{{=machine}}-user-patches.scc | |||
@@ -0,0 +1,8 @@ | |||
1 | # | ||
2 | # Used by yocto-kernel to manage patches. | ||
3 | # | ||
4 | # yocto-kernel may change the contents of this file in any | ||
5 | # way it sees fit, including removing comments like this, | ||
6 | # so don't manually make any modifications you don't want | ||
7 | # to lose. | ||
8 | # | ||
diff --git a/scripts/lib/bsp/substrate/target/arch/common/recipes-kernel/linux/{{ if kernel_choice == "custom": }} linux-yocto-custom/{{=machine}}.cfg b/scripts/lib/bsp/substrate/target/arch/common/recipes-kernel/linux/{{ if kernel_choice == "custom": }} linux-yocto-custom/{{=machine}}.cfg new file mode 100644 index 0000000000..95170b12eb --- /dev/null +++ b/scripts/lib/bsp/substrate/target/arch/common/recipes-kernel/linux/{{ if kernel_choice == "custom": }} linux-yocto-custom/{{=machine}}.cfg | |||
@@ -0,0 +1,3 @@ | |||
1 | # | ||
2 | # A convenient place to add config options, nothing more. | ||
3 | # | ||
diff --git a/scripts/lib/bsp/substrate/target/arch/common/recipes-kernel/linux/{{ if kernel_choice == "custom": }} linux-yocto-custom/{{=machine}}.scc b/scripts/lib/bsp/substrate/target/arch/common/recipes-kernel/linux/{{ if kernel_choice == "custom": }} linux-yocto-custom/{{=machine}}.scc new file mode 100644 index 0000000000..2e3ca90793 --- /dev/null +++ b/scripts/lib/bsp/substrate/target/arch/common/recipes-kernel/linux/{{ if kernel_choice == "custom": }} linux-yocto-custom/{{=machine}}.scc | |||
@@ -0,0 +1,17 @@ | |||
1 | # | ||
2 | # The top-level 'feature' for the {{=machine}} custom kernel. | ||
3 | # | ||
4 | # Essentially this is a convenient top-level container or starting | ||
5 | # point for adding lower-level config fragements and features. | ||
6 | # | ||
7 | |||
8 | # {{=machine}}.cfg in the linux-yocto-custom subdir is just a | ||
9 | # convenient place for adding random config fragments. | ||
10 | |||
11 | kconf hardware {{=machine}}.cfg | ||
12 | |||
13 | # These are used by yocto-kernel to add config fragments and features. | ||
14 | # Don't remove if you plan on using yocto-kernel with this BSP. | ||
15 | |||
16 | kconf hardware {{=machine}}-user-config.cfg | ||
17 | include {{=machine}}-user-patches.scc | ||
diff --git a/scripts/lib/bsp/substrate/target/arch/i386/conf/machine/{{=machine}}.conf b/scripts/lib/bsp/substrate/target/arch/i386/conf/machine/{{=machine}}.conf new file mode 100644 index 0000000000..932fd79bb9 --- /dev/null +++ b/scripts/lib/bsp/substrate/target/arch/i386/conf/machine/{{=machine}}.conf | |||
@@ -0,0 +1,68 @@ | |||
1 | #@TYPE: Machine | ||
2 | #@NAME: {{=machine}} | ||
3 | |||
4 | #@DESCRIPTION: Machine configuration for {{=machine}} systems | ||
5 | |||
6 | {{ if kernel_choice == "custom": preferred_kernel = "linux-yocto-custom" }} | ||
7 | {{ if kernel_choice == "linux-yocto-dev": preferred_kernel = "linux-yocto-dev" }} | ||
8 | {{ if kernel_choice == "custom" or kernel_choice == "linux-yocto-dev" : }} | ||
9 | PREFERRED_PROVIDER_virtual/kernel ?= "{{=preferred_kernel}}" | ||
10 | |||
11 | {{ if kernel_choice != "custom" and kernel_choice != "linux-yocto-dev": preferred_kernel = kernel_choice.split('_')[0] }} | ||
12 | {{ if kernel_choice != "custom" and kernel_choice != "linux-yocto-dev": preferred_kernel_version = kernel_choice.split('_')[1] }} | ||
13 | {{ if kernel_choice != "custom" and kernel_choice != "linux-yocto-dev": }} | ||
14 | PREFERRED_PROVIDER_virtual/kernel ?= "{{=preferred_kernel}}" | ||
15 | PREFERRED_VERSION_{{=preferred_kernel}} ?= "{{=preferred_kernel_version}}%" | ||
16 | |||
17 | {{ input type:"choicelist" name:"tunefile" prio:"40" msg:"Which machine tuning would you like to use?" default:"tune_core2" }} | ||
18 | {{ input type:"choice" val:"tune_i586" msg:"i586 tuning optimizations" }} | ||
19 | {{ input type:"choice" val:"tune_atom" msg:"Atom tuning optimizations" }} | ||
20 | {{ input type:"choice" val:"tune_core2" msg:"Core2 tuning optimizations" }} | ||
21 | {{ if tunefile == "tune_i586": }} | ||
22 | require conf/machine/include/tune-i586.inc | ||
23 | {{ if tunefile == "tune_atom": }} | ||
24 | require conf/machine/include/tune-atom.inc | ||
25 | {{ if tunefile == "tune_core2": }} | ||
26 | DEFAULTTUNE="core2-32" | ||
27 | require conf/machine/include/tune-core2.inc | ||
28 | |||
29 | require conf/machine/include/x86-base.inc | ||
30 | |||
31 | MACHINE_FEATURES += "wifi efi pcbios" | ||
32 | |||
33 | {{ input type:"boolean" name:"xserver" prio:"50" msg:"Do you need support for X? (y/n)" default:"y" }} | ||
34 | |||
35 | {{ if xserver == "y" and (kernel_choice == "linux-yocto_3.14" or kernel_choice == "linux-yocto_3.10"): }} | ||
36 | {{ input type:"choicelist" name:"xserver_choice" prio:"50" msg:"Please select an xserver for this machine:" default:"xserver_i915" }} | ||
37 | {{ input type:"choice" val:"xserver_vesa" msg:"VESA xserver support" }} | ||
38 | {{ input type:"choice" val:"xserver_i915" msg:"i915 xserver support" }} | ||
39 | {{ input type:"choice" val:"xserver_i965" msg:"i965 xserver support" }} | ||
40 | |||
41 | {{ if xserver == "y" and kernel_choice == "custom": }} | ||
42 | {{ input type:"choicelist" name:"xserver_choice" prio:"50" msg:"Please select an xserver for this machine:" default:"xserver_i915" }} | ||
43 | {{ input type:"choice" val:"xserver_vesa" msg:"VESA xserver support" }} | ||
44 | {{ input type:"choice" val:"xserver_i915" msg:"i915 xserver support" }} | ||
45 | {{ input type:"choice" val:"xserver_i965" msg:"i965 xserver support" }} | ||
46 | |||
47 | {{ if xserver == "y" and kernel_choice != "linux-yocto_3.14" and kernel_choice != "linux-yocto_3.10" and kernel_choice != "custom": xserver_choice = "xserver_i915" }} | ||
48 | |||
49 | {{ if xserver == "y": }} | ||
50 | XSERVER ?= "${XSERVER_X86_BASE} \ | ||
51 | ${XSERVER_X86_EXT} \ | ||
52 | {{ if xserver == "y" and xserver_choice == "xserver_vesa": }} | ||
53 | ${XSERVER_X86_VESA} \ | ||
54 | {{ if xserver == "y" and xserver_choice == "xserver_i915": }} | ||
55 | ${XSERVER_X86_I915} \ | ||
56 | {{ if xserver == "y" and xserver_choice == "xserver_i965": }} | ||
57 | ${XSERVER_X86_I965} \ | ||
58 | {{ if xserver == "y": }} | ||
59 | " | ||
60 | |||
61 | MACHINE_EXTRA_RRECOMMENDS += "linux-firmware v86d" | ||
62 | |||
63 | GLIBC_ADDONS = "nptl" | ||
64 | |||
65 | EXTRA_OECONF_append_pn-matchbox-panel-2 = " --with-battery=acpi" | ||
66 | |||
67 | {{ if xserver == "y" and xserver_choice == "xserver_vesa": }} | ||
68 | APPEND += "video=vesafb vga=0x318" | ||
diff --git a/scripts/lib/bsp/substrate/target/arch/i386/recipes-graphics/xorg-xserver/xserver-xf86-config/{{=machine}}/{{ if xserver == "y": }} xorg.conf b/scripts/lib/bsp/substrate/target/arch/i386/recipes-graphics/xorg-xserver/xserver-xf86-config/{{=machine}}/{{ if xserver == "y": }} xorg.conf new file mode 100644 index 0000000000..e69de29bb2 --- /dev/null +++ b/scripts/lib/bsp/substrate/target/arch/i386/recipes-graphics/xorg-xserver/xserver-xf86-config/{{=machine}}/{{ if xserver == "y": }} xorg.conf | |||
diff --git a/scripts/lib/bsp/substrate/target/arch/i386/recipes-graphics/xorg-xserver/{{ if xserver == "y": }} xserver-xf86-config_0.1.bbappend b/scripts/lib/bsp/substrate/target/arch/i386/recipes-graphics/xorg-xserver/{{ if xserver == "y": }} xserver-xf86-config_0.1.bbappend new file mode 100644 index 0000000000..72d991c7e5 --- /dev/null +++ b/scripts/lib/bsp/substrate/target/arch/i386/recipes-graphics/xorg-xserver/{{ if xserver == "y": }} xserver-xf86-config_0.1.bbappend | |||
@@ -0,0 +1 @@ | |||
FILESEXTRAPATHS_prepend := "${THISDIR}/${PN}:" | |||
diff --git a/scripts/lib/bsp/substrate/target/arch/i386/recipes-kernel/linux/kernel-list.noinstall b/scripts/lib/bsp/substrate/target/arch/i386/recipes-kernel/linux/kernel-list.noinstall new file mode 100644 index 0000000000..a04e6c7852 --- /dev/null +++ b/scripts/lib/bsp/substrate/target/arch/i386/recipes-kernel/linux/kernel-list.noinstall | |||
@@ -0,0 +1,5 @@ | |||
1 | {{ if kernel_choice != "custom": }} | ||
2 | {{ input type:"boolean" name:"use_default_kernel" prio:"10" msg:"Would you like to use the default (3.14) kernel? (y/n)" default:"y"}} | ||
3 | |||
4 | {{ if kernel_choice != "custom" and use_default_kernel == "n": }} | ||
5 | {{ input type:"choicelist" name:"kernel_choice" gen:"bsp.kernel.kernels" prio:"10" msg:"Please choose the kernel to use in this BSP:" default:"linux-yocto_3.14"}} | ||
diff --git a/scripts/lib/bsp/substrate/target/arch/i386/recipes-kernel/linux/{{ if kernel_choice != "custom": }} files/{{=machine}}-preempt-rt.scc b/scripts/lib/bsp/substrate/target/arch/i386/recipes-kernel/linux/{{ if kernel_choice != "custom": }} files/{{=machine}}-preempt-rt.scc new file mode 100644 index 0000000000..bfefb0d0a0 --- /dev/null +++ b/scripts/lib/bsp/substrate/target/arch/i386/recipes-kernel/linux/{{ if kernel_choice != "custom": }} files/{{=machine}}-preempt-rt.scc | |||
@@ -0,0 +1,15 @@ | |||
1 | define KMACHINE {{=machine}} | ||
2 | define KTYPE preempt-rt | ||
3 | define KARCH i386 | ||
4 | |||
5 | include {{=map_preempt_rt_kbranch(need_new_kbranch, new_kbranch, existing_kbranch)}} | ||
6 | {{ if need_new_kbranch == "y": }} | ||
7 | branch {{=machine}} | ||
8 | |||
9 | include {{=machine}}.scc | ||
10 | |||
11 | # default policy for preempt-rt kernels | ||
12 | include cfg/usb-mass-storage.scc | ||
13 | include cfg/boot-live.scc | ||
14 | include features/latencytop/latencytop.scc | ||
15 | include features/profiling/profiling.scc | ||
diff --git a/scripts/lib/bsp/substrate/target/arch/i386/recipes-kernel/linux/{{ if kernel_choice != "custom": }} files/{{=machine}}-standard.scc b/scripts/lib/bsp/substrate/target/arch/i386/recipes-kernel/linux/{{ if kernel_choice != "custom": }} files/{{=machine}}-standard.scc new file mode 100644 index 0000000000..60b670dffc --- /dev/null +++ b/scripts/lib/bsp/substrate/target/arch/i386/recipes-kernel/linux/{{ if kernel_choice != "custom": }} files/{{=machine}}-standard.scc | |||
@@ -0,0 +1,15 @@ | |||
1 | define KMACHINE {{=machine}} | ||
2 | define KTYPE standard | ||
3 | define KARCH i386 | ||
4 | |||
5 | include {{=map_standard_kbranch(need_new_kbranch, new_kbranch, existing_kbranch)}} | ||
6 | {{ if need_new_kbranch == "y": }} | ||
7 | branch {{=machine}} | ||
8 | |||
9 | include {{=machine}}.scc | ||
10 | |||
11 | # default policy for standard kernels | ||
12 | include cfg/usb-mass-storage.scc | ||
13 | include cfg/boot-live.scc | ||
14 | include features/latencytop/latencytop.scc | ||
15 | include features/profiling/profiling.scc | ||
diff --git a/scripts/lib/bsp/substrate/target/arch/i386/recipes-kernel/linux/{{ if kernel_choice != "custom": }} files/{{=machine}}-tiny.scc b/scripts/lib/bsp/substrate/target/arch/i386/recipes-kernel/linux/{{ if kernel_choice != "custom": }} files/{{=machine}}-tiny.scc new file mode 100644 index 0000000000..ec44ef9485 --- /dev/null +++ b/scripts/lib/bsp/substrate/target/arch/i386/recipes-kernel/linux/{{ if kernel_choice != "custom": }} files/{{=machine}}-tiny.scc | |||
@@ -0,0 +1,9 @@ | |||
1 | define KMACHINE {{=machine}} | ||
2 | define KTYPE tiny | ||
3 | define KARCH i386 | ||
4 | |||
5 | include {{=map_tiny_kbranch(need_new_kbranch, new_kbranch, existing_kbranch)}} | ||
6 | {{ if need_new_kbranch == "y": }} | ||
7 | branch {{=machine}} | ||
8 | |||
9 | include {{=machine}}.scc | ||
diff --git a/scripts/lib/bsp/substrate/target/arch/i386/recipes-kernel/linux/{{ if kernel_choice != "custom": }} files/{{=machine}}-user-config.cfg b/scripts/lib/bsp/substrate/target/arch/i386/recipes-kernel/linux/{{ if kernel_choice != "custom": }} files/{{=machine}}-user-config.cfg new file mode 100644 index 0000000000..e69de29bb2 --- /dev/null +++ b/scripts/lib/bsp/substrate/target/arch/i386/recipes-kernel/linux/{{ if kernel_choice != "custom": }} files/{{=machine}}-user-config.cfg | |||
diff --git a/scripts/lib/bsp/substrate/target/arch/i386/recipes-kernel/linux/{{ if kernel_choice != "custom": }} files/{{=machine}}-user-features.scc b/scripts/lib/bsp/substrate/target/arch/i386/recipes-kernel/linux/{{ if kernel_choice != "custom": }} files/{{=machine}}-user-features.scc new file mode 100644 index 0000000000..e69de29bb2 --- /dev/null +++ b/scripts/lib/bsp/substrate/target/arch/i386/recipes-kernel/linux/{{ if kernel_choice != "custom": }} files/{{=machine}}-user-features.scc | |||
diff --git a/scripts/lib/bsp/substrate/target/arch/i386/recipes-kernel/linux/{{ if kernel_choice != "custom": }} files/{{=machine}}-user-patches.scc b/scripts/lib/bsp/substrate/target/arch/i386/recipes-kernel/linux/{{ if kernel_choice != "custom": }} files/{{=machine}}-user-patches.scc new file mode 100644 index 0000000000..e69de29bb2 --- /dev/null +++ b/scripts/lib/bsp/substrate/target/arch/i386/recipes-kernel/linux/{{ if kernel_choice != "custom": }} files/{{=machine}}-user-patches.scc | |||
diff --git a/scripts/lib/bsp/substrate/target/arch/i386/recipes-kernel/linux/{{ if kernel_choice != "custom": }} files/{{=machine}}.cfg b/scripts/lib/bsp/substrate/target/arch/i386/recipes-kernel/linux/{{ if kernel_choice != "custom": }} files/{{=machine}}.cfg new file mode 100644 index 0000000000..e93c0b8a08 --- /dev/null +++ b/scripts/lib/bsp/substrate/target/arch/i386/recipes-kernel/linux/{{ if kernel_choice != "custom": }} files/{{=machine}}.cfg | |||
@@ -0,0 +1,54 @@ | |||
1 | CONFIG_X86_32=y | ||
2 | CONFIG_MATOM=y | ||
3 | CONFIG_PRINTK=y | ||
4 | |||
5 | # Basic hardware support for the box - network, USB, PCI, sound | ||
6 | CONFIG_NETDEVICES=y | ||
7 | CONFIG_ATA=y | ||
8 | CONFIG_ATA_GENERIC=y | ||
9 | CONFIG_ATA_SFF=y | ||
10 | CONFIG_PCI=y | ||
11 | CONFIG_MMC=y | ||
12 | CONFIG_MMC_SDHCI=y | ||
13 | CONFIG_USB_SUPPORT=y | ||
14 | CONFIG_USB=y | ||
15 | CONFIG_USB_ARCH_HAS_EHCI=y | ||
16 | CONFIG_R8169=y | ||
17 | CONFIG_PATA_SCH=y | ||
18 | CONFIG_MMC_SDHCI_PCI=y | ||
19 | CONFIG_USB_EHCI_HCD=y | ||
20 | CONFIG_PCIEPORTBUS=y | ||
21 | CONFIG_NET=y | ||
22 | CONFIG_USB_UHCI_HCD=y | ||
23 | CONFIG_USB_OHCI_HCD=y | ||
24 | CONFIG_BLK_DEV_SD=y | ||
25 | CONFIG_CHR_DEV_SG=y | ||
26 | CONFIG_SOUND=y | ||
27 | CONFIG_SND=y | ||
28 | CONFIG_SND_HDA_INTEL=y | ||
29 | CONFIG_SATA_AHCI=y | ||
30 | CONFIG_AGP=y | ||
31 | CONFIG_PM=y | ||
32 | CONFIG_ACPI=y | ||
33 | CONFIG_BACKLIGHT_LCD_SUPPORT=y | ||
34 | CONFIG_BACKLIGHT_CLASS_DEVICE=y | ||
35 | CONFIG_INPUT=y | ||
36 | |||
37 | # Make sure these are on, otherwise the bootup won't be fun | ||
38 | CONFIG_EXT3_FS=y | ||
39 | CONFIG_UNIX=y | ||
40 | CONFIG_INET=y | ||
41 | CONFIG_MODULES=y | ||
42 | CONFIG_SHMEM=y | ||
43 | CONFIG_TMPFS=y | ||
44 | CONFIG_PACKET=y | ||
45 | |||
46 | # Needed for booting (and using) USB memory sticks | ||
47 | CONFIG_BLK_DEV_LOOP=y | ||
48 | CONFIG_NLS_CODEPAGE_437=y | ||
49 | CONFIG_NLS_ISO8859_1=y | ||
50 | |||
51 | CONFIG_RD_GZIP=y | ||
52 | |||
53 | # Needed for booting (and using) CD images | ||
54 | CONFIG_BLK_DEV_SR=y | ||
diff --git a/scripts/lib/bsp/substrate/target/arch/i386/recipes-kernel/linux/{{ if kernel_choice != "custom": }} files/{{=machine}}.scc b/scripts/lib/bsp/substrate/target/arch/i386/recipes-kernel/linux/{{ if kernel_choice != "custom": }} files/{{=machine}}.scc new file mode 100644 index 0000000000..eda1d62f11 --- /dev/null +++ b/scripts/lib/bsp/substrate/target/arch/i386/recipes-kernel/linux/{{ if kernel_choice != "custom": }} files/{{=machine}}.scc | |||
@@ -0,0 +1,20 @@ | |||
1 | kconf hardware {{=machine}}.cfg | ||
2 | |||
3 | include features/intel-e1xxxx/intel-e100.scc | ||
4 | include features/intel-e1xxxx/intel-e1xxxx.scc | ||
5 | |||
6 | {{ if xserver == "y" and xserver_choice == "xserver_i915" or xserver_choice == "xserver_i965": }} | ||
7 | include features/i915/i915.scc | ||
8 | |||
9 | include features/serial/8250.scc | ||
10 | include features/ericsson-3g/f5521gw.scc | ||
11 | |||
12 | {{ if xserver == "y" and xserver_choice == "xserver_vesa": }} | ||
13 | include cfg/vesafb.scc | ||
14 | |||
15 | include cfg/usb-mass-storage.scc | ||
16 | include cfg/boot-live.scc | ||
17 | include features/power/intel.scc | ||
18 | |||
19 | kconf hardware {{=machine}}-user-config.cfg | ||
20 | include {{=machine}}-user-patches.scc | ||
diff --git a/scripts/lib/bsp/substrate/target/arch/i386/recipes-kernel/linux/{{ if kernel_choice == "linux-yocto-dev": }} linux-yocto-dev.bbappend b/scripts/lib/bsp/substrate/target/arch/i386/recipes-kernel/linux/{{ if kernel_choice == "linux-yocto-dev": }} linux-yocto-dev.bbappend new file mode 100644 index 0000000000..25c87a85ac --- /dev/null +++ b/scripts/lib/bsp/substrate/target/arch/i386/recipes-kernel/linux/{{ if kernel_choice == "linux-yocto-dev": }} linux-yocto-dev.bbappend | |||
@@ -0,0 +1,25 @@ | |||
1 | FILESEXTRAPATHS_prepend := "${THISDIR}/files:" | ||
2 | |||
3 | PR := "${PR}.1" | ||
4 | |||
5 | COMPATIBLE_MACHINE_{{=machine}} = "{{=machine}}" | ||
6 | {{ input type:"boolean" name:"need_new_kbranch" prio:"20" msg:"Do you need a new machine branch for this BSP (the alternative is to re-use an existing branch)? [y/n]" default:"y" }} | ||
7 | |||
8 | {{ if need_new_kbranch == "y": }} | ||
9 | {{ input type:"choicelist" name:"new_kbranch" nameappend:"i386" gen:"bsp.kernel.all_branches" branches_base:"standard" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/base" }} | ||
10 | |||
11 | {{ if need_new_kbranch == "n": }} | ||
12 | {{ input type:"choicelist" name:"existing_kbranch" nameappend:"i386" gen:"bsp.kernel.all_branches" branches_base:"standard" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/base" }} | ||
13 | |||
14 | {{ if need_new_kbranch == "n": }} | ||
15 | KBRANCH_{{=machine}} = "{{=existing_kbranch}}" | ||
16 | |||
17 | {{ input type:"boolean" name:"smp" prio:"30" msg:"Would you like SMP support? (y/n)" default:"y"}} | ||
18 | {{ if smp == "y": }} | ||
19 | KERNEL_FEATURES_append_{{=machine}} += " cfg/smp.scc" | ||
20 | |||
21 | SRC_URI += "file://{{=machine}}-standard.scc \ | ||
22 | file://{{=machine}}-user-config.cfg \ | ||
23 | file://{{=machine}}-user-patches.scc \ | ||
24 | file://{{=machine}}-user-features.scc \ | ||
25 | " | ||
diff --git a/scripts/lib/bsp/substrate/target/arch/i386/recipes-kernel/linux/{{ if kernel_choice == "linux-yocto-rt_3.10": }} linux-yocto-rt_3.10.bbappend b/scripts/lib/bsp/substrate/target/arch/i386/recipes-kernel/linux/{{ if kernel_choice == "linux-yocto-rt_3.10": }} linux-yocto-rt_3.10.bbappend new file mode 100644 index 0000000000..08b1f88d1b --- /dev/null +++ b/scripts/lib/bsp/substrate/target/arch/i386/recipes-kernel/linux/{{ if kernel_choice == "linux-yocto-rt_3.10": }} linux-yocto-rt_3.10.bbappend | |||
@@ -0,0 +1,32 @@ | |||
1 | FILESEXTRAPATHS_prepend := "${THISDIR}/files:" | ||
2 | |||
3 | PR := "${PR}.1" | ||
4 | |||
5 | COMPATIBLE_MACHINE_{{=machine}} = "{{=machine}}" | ||
6 | |||
7 | {{ input type:"boolean" name:"need_new_kbranch" prio:"20" msg:"Do you need a new machine branch for this BSP (the alternative is to re-use an existing branch)? [y/n]" default:"y" }} | ||
8 | |||
9 | {{ if need_new_kbranch == "y": }} | ||
10 | {{ input type:"choicelist" name:"new_kbranch" gen:"bsp.kernel.all_branches" branches_base:"standard/preempt-rt" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/preempt-rt/base" }} | ||
11 | |||
12 | {{ if need_new_kbranch == "n": }} | ||
13 | {{ input type:"choicelist" name:"existing_kbranch" gen:"bsp.kernel.all_branches" branches_base:"standard/preempt-rt" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/preempt-rt/base" }} | ||
14 | |||
15 | {{ if need_new_kbranch == "n": }} | ||
16 | KBRANCH_{{=machine}} = "{{=existing_kbranch}}" | ||
17 | |||
18 | {{ input type:"boolean" name:"smp" prio:"30" msg:"Do you need SMP support? (y/n)" default:"y"}} | ||
19 | {{ if smp == "y": }} | ||
20 | KERNEL_FEATURES_append_{{=machine}} += " cfg/smp.scc" | ||
21 | |||
22 | SRC_URI += "file://{{=machine}}-preempt-rt.scc \ | ||
23 | file://{{=machine}}-user-config.cfg \ | ||
24 | file://{{=machine}}-user-patches.scc \ | ||
25 | file://{{=machine}}-user-features.scc \ | ||
26 | " | ||
27 | |||
28 | # uncomment and replace these SRCREVs with the real commit ids once you've had | ||
29 | # the appropriate changes committed to the upstream linux-yocto repo | ||
30 | #SRCREV_machine_pn-linux-yocto-rt_{{=machine}} ?= "f35992f80c81dc5fa1a97165dfd5cbb84661f7cb" | ||
31 | #SRCREV_meta_pn-linux-yocto-rt_{{=machine}} ?= "1b534b2f8bbe9b8a773268cfa30a4850346f6f5f" | ||
32 | #LINUX_VERSION = "3.10.9" | ||
diff --git a/scripts/lib/bsp/substrate/target/arch/i386/recipes-kernel/linux/{{ if kernel_choice == "linux-yocto-tiny_3.10": }} linux-yocto-tiny_3.10.bbappend b/scripts/lib/bsp/substrate/target/arch/i386/recipes-kernel/linux/{{ if kernel_choice == "linux-yocto-tiny_3.10": }} linux-yocto-tiny_3.10.bbappend new file mode 100644 index 0000000000..bc6968d832 --- /dev/null +++ b/scripts/lib/bsp/substrate/target/arch/i386/recipes-kernel/linux/{{ if kernel_choice == "linux-yocto-tiny_3.10": }} linux-yocto-tiny_3.10.bbappend | |||
@@ -0,0 +1,32 @@ | |||
1 | FILESEXTRAPATHS_prepend := "${THISDIR}/files:" | ||
2 | |||
3 | PR := "${PR}.1" | ||
4 | |||
5 | COMPATIBLE_MACHINE_{{=machine}} = "{{=machine}}" | ||
6 | |||
7 | {{ input type:"boolean" name:"need_new_kbranch" prio:"20" msg:"Do you need a new machine branch for this BSP (the alternative is to re-use an existing branch)? [y/n]" default:"y" }} | ||
8 | |||
9 | {{ if need_new_kbranch == "y": }} | ||
10 | {{ input type:"choicelist" name:"new_kbranch" gen:"bsp.kernel.all_branches" branches_base:"standard/tiny" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/tiny/base" }} | ||
11 | |||
12 | {{ if need_new_kbranch == "n": }} | ||
13 | {{ input type:"choicelist" name:"existing_kbranch" gen:"bsp.kernel.all_branches" branches_base:"standard/tiny" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/tiny/base" }} | ||
14 | |||
15 | {{ if need_new_kbranch == "n": }} | ||
16 | KBRANCH_{{=machine}} = "{{=existing_kbranch}}" | ||
17 | |||
18 | {{ input type:"boolean" name:"smp" prio:"30" msg:"Do you need SMP support? (y/n)" default:"y"}} | ||
19 | {{ if smp == "y": }} | ||
20 | KERNEL_FEATURES_append_{{=machine}} += " cfg/smp.scc" | ||
21 | |||
22 | SRC_URI += "file://{{=machine}}-tiny.scc \ | ||
23 | file://{{=machine}}-user-config.cfg \ | ||
24 | file://{{=machine}}-user-patches.scc \ | ||
25 | file://{{=machine}}-user-features.scc \ | ||
26 | " | ||
27 | |||
28 | # uncomment and replace these SRCREVs with the real commit ids once you've had | ||
29 | # the appropriate changes committed to the upstream linux-yocto repo | ||
30 | #SRCREV_machine_pn-linux-yocto-tiny_{{=machine}} ?= "840bb8c059418c4753415df56c9aff1c0d5354c8" | ||
31 | #SRCREV_meta_pn-linux-yocto-tiny_{{=machine}} ?= "4fd76cc4f33e0afd8f906b1e8f231b6d13b6c993" | ||
32 | #LINUX_VERSION = "3.10.9" | ||
diff --git a/scripts/lib/bsp/substrate/target/arch/i386/recipes-kernel/linux/{{ if kernel_choice == "linux-yocto-tiny_3.14": }} linux-yocto-tiny_3.14.bbappend b/scripts/lib/bsp/substrate/target/arch/i386/recipes-kernel/linux/{{ if kernel_choice == "linux-yocto-tiny_3.14": }} linux-yocto-tiny_3.14.bbappend new file mode 100644 index 0000000000..d221d5f2a4 --- /dev/null +++ b/scripts/lib/bsp/substrate/target/arch/i386/recipes-kernel/linux/{{ if kernel_choice == "linux-yocto-tiny_3.14": }} linux-yocto-tiny_3.14.bbappend | |||
@@ -0,0 +1,32 @@ | |||
1 | FILESEXTRAPATHS_prepend := "${THISDIR}/files:" | ||
2 | |||
3 | PR := "${PR}.1" | ||
4 | |||
5 | COMPATIBLE_MACHINE_{{=machine}} = "{{=machine}}" | ||
6 | |||
7 | {{ input type:"boolean" name:"need_new_kbranch" prio:"20" msg:"Do you need a new machine branch for this BSP (the alternative is to re-use an existing branch)? [y/n]" default:"y" }} | ||
8 | |||
9 | {{ if need_new_kbranch == "y": }} | ||
10 | {{ input type:"choicelist" name:"new_kbranch" gen:"bsp.kernel.all_branches" branches_base:"standard/tiny" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/tiny/base" }} | ||
11 | |||
12 | {{ if need_new_kbranch == "n": }} | ||
13 | {{ input type:"choicelist" name:"existing_kbranch" gen:"bsp.kernel.all_branches" branches_base:"standard/tiny" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/tiny/base" }} | ||
14 | |||
15 | {{ if need_new_kbranch == "n": }} | ||
16 | KBRANCH_{{=machine}} = "{{=existing_kbranch}}" | ||
17 | |||
18 | {{ input type:"boolean" name:"smp" prio:"30" msg:"Do you need SMP support? (y/n)" default:"y"}} | ||
19 | {{ if smp == "y": }} | ||
20 | KERNEL_FEATURES_append_{{=machine}} += " cfg/smp.scc" | ||
21 | |||
22 | SRC_URI += "file://{{=machine}}-tiny.scc \ | ||
23 | file://{{=machine}}-user-config.cfg \ | ||
24 | file://{{=machine}}-user-patches.scc \ | ||
25 | file://{{=machine}}-user-features.scc \ | ||
26 | " | ||
27 | |||
28 | # uncomment and replace these SRCREVs with the real commit ids once you've had | ||
29 | # the appropriate changes committed to the upstream linux-yocto repo | ||
30 | #SRCREV_machine_pn-linux-yocto-tiny_{{=machine}} ?= "840bb8c059418c4753415df56c9aff1c0d5354c8" | ||
31 | #SRCREV_meta_pn-linux-yocto-tiny_{{=machine}} ?= "4fd76cc4f33e0afd8f906b1e8f231b6d13b6c993" | ||
32 | #LINUX_VERSION = "3.14" | ||
diff --git a/scripts/lib/bsp/substrate/target/arch/i386/recipes-kernel/linux/{{ if kernel_choice == "linux-yocto_3.10": }} linux-yocto_3.10.bbappend b/scripts/lib/bsp/substrate/target/arch/i386/recipes-kernel/linux/{{ if kernel_choice == "linux-yocto_3.10": }} linux-yocto_3.10.bbappend new file mode 100644 index 0000000000..c1f26540a7 --- /dev/null +++ b/scripts/lib/bsp/substrate/target/arch/i386/recipes-kernel/linux/{{ if kernel_choice == "linux-yocto_3.10": }} linux-yocto_3.10.bbappend | |||
@@ -0,0 +1,32 @@ | |||
1 | FILESEXTRAPATHS_prepend := "${THISDIR}/files:" | ||
2 | |||
3 | PR := "${PR}.1" | ||
4 | |||
5 | COMPATIBLE_MACHINE_{{=machine}} = "{{=machine}}" | ||
6 | |||
7 | {{ input type:"boolean" name:"need_new_kbranch" prio:"20" msg:"Do you need a new machine branch for this BSP (the alternative is to re-use an existing branch)? [y/n]" default:"y" }} | ||
8 | |||
9 | {{ if need_new_kbranch == "y": }} | ||
10 | {{ input type:"choicelist" name:"new_kbranch" gen:"bsp.kernel.all_branches" branches_base:"standard:standard/common-pc" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/base" }} | ||
11 | |||
12 | {{ if need_new_kbranch == "n": }} | ||
13 | {{ input type:"choicelist" name:"existing_kbranch" gen:"bsp.kernel.all_branches" branches_base:"standard:standard/common-pc" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/base" }} | ||
14 | |||
15 | {{ if need_new_kbranch == "n": }} | ||
16 | KBRANCH_{{=machine}} = "{{=existing_kbranch}}" | ||
17 | |||
18 | {{ input type:"boolean" name:"smp" prio:"30" msg:"Do you need SMP support? (y/n)" default:"y"}} | ||
19 | {{ if smp == "y": }} | ||
20 | KERNEL_FEATURES_append_{{=machine}} += " cfg/smp.scc" | ||
21 | |||
22 | SRC_URI += "file://{{=machine}}-standard.scc \ | ||
23 | file://{{=machine}}-user-config.cfg \ | ||
24 | file://{{=machine}}-user-patches.scc \ | ||
25 | file://{{=machine}}-user-features.scc \ | ||
26 | " | ||
27 | |||
28 | # uncomment and replace these SRCREVs with the real commit ids once you've had | ||
29 | # the appropriate changes committed to the upstream linux-yocto repo | ||
30 | #SRCREV_machine_pn-linux-yocto_{{=machine}} ?= "19f7e43b54aef08d58135ed2a897d77b624b320a" | ||
31 | #SRCREV_meta_pn-linux-yocto_{{=machine}} ?= "459165c1dd61c4e843c36e6a1abeb30949a20ba7" | ||
32 | #LINUX_VERSION = "3.10.9" \ No newline at end of file | ||
diff --git a/scripts/lib/bsp/substrate/target/arch/i386/recipes-kernel/linux/{{ if kernel_choice == "linux-yocto_3.14": }} linux-yocto_3.14.bbappend b/scripts/lib/bsp/substrate/target/arch/i386/recipes-kernel/linux/{{ if kernel_choice == "linux-yocto_3.14": }} linux-yocto_3.14.bbappend new file mode 100644 index 0000000000..948d568cd1 --- /dev/null +++ b/scripts/lib/bsp/substrate/target/arch/i386/recipes-kernel/linux/{{ if kernel_choice == "linux-yocto_3.14": }} linux-yocto_3.14.bbappend | |||
@@ -0,0 +1,32 @@ | |||
1 | FILESEXTRAPATHS_prepend := "${THISDIR}/files:" | ||
2 | |||
3 | PR := "${PR}.1" | ||
4 | |||
5 | COMPATIBLE_MACHINE_{{=machine}} = "{{=machine}}" | ||
6 | |||
7 | {{ input type:"boolean" name:"need_new_kbranch" prio:"20" msg:"Do you need a new machine branch for this BSP (the alternative is to re-use an existing branch)? [y/n]" default:"y" }} | ||
8 | |||
9 | {{ if need_new_kbranch == "y": }} | ||
10 | {{ input type:"choicelist" name:"new_kbranch" gen:"bsp.kernel.all_branches" branches_base:"standard:standard/common-pc" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/base" }} | ||
11 | |||
12 | {{ if need_new_kbranch == "n": }} | ||
13 | {{ input type:"choicelist" name:"existing_kbranch" gen:"bsp.kernel.all_branches" branches_base:"standard:standard/common-pc" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/base" }} | ||
14 | |||
15 | {{ if need_new_kbranch == "n": }} | ||
16 | KBRANCH_{{=machine}} = "{{=existing_kbranch}}" | ||
17 | |||
18 | {{ input type:"boolean" name:"smp" prio:"30" msg:"Do you need SMP support? (y/n)" default:"y"}} | ||
19 | {{ if smp == "y": }} | ||
20 | KERNEL_FEATURES_append_{{=machine}} += " cfg/smp.scc" | ||
21 | |||
22 | SRC_URI += "file://{{=machine}}-standard.scc \ | ||
23 | file://{{=machine}}-user-config.cfg \ | ||
24 | file://{{=machine}}-user-patches.scc \ | ||
25 | file://{{=machine}}-user-features.scc \ | ||
26 | " | ||
27 | |||
28 | # uncomment and replace these SRCREVs with the real commit ids once you've had | ||
29 | # the appropriate changes committed to the upstream linux-yocto repo | ||
30 | #SRCREV_machine_pn-linux-yocto_{{=machine}} ?= "840bb8c059418c4753415df56c9aff1c0d5354c8" | ||
31 | #SRCREV_meta_pn-linux-yocto_{{=machine}} ?= "4fd76cc4f33e0afd8f906b1e8f231b6d13b6c993" | ||
32 | #LINUX_VERSION = "3.14" \ No newline at end of file | ||
diff --git a/scripts/lib/bsp/substrate/target/arch/layer/COPYING.MIT b/scripts/lib/bsp/substrate/target/arch/layer/COPYING.MIT new file mode 100644 index 0000000000..89de354795 --- /dev/null +++ b/scripts/lib/bsp/substrate/target/arch/layer/COPYING.MIT | |||
@@ -0,0 +1,17 @@ | |||
1 | Permission is hereby granted, free of charge, to any person obtaining a copy | ||
2 | of this software and associated documentation files (the "Software"), to deal | ||
3 | in the Software without restriction, including without limitation the rights | ||
4 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell | ||
5 | copies of the Software, and to permit persons to whom the Software is | ||
6 | furnished to do so, subject to the following conditions: | ||
7 | |||
8 | The above copyright notice and this permission notice shall be included in | ||
9 | all copies or substantial portions of the Software. | ||
10 | |||
11 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
12 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
13 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE | ||
14 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
15 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, | ||
16 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN | ||
17 | THE SOFTWARE. | ||
diff --git a/scripts/lib/bsp/substrate/target/arch/layer/README b/scripts/lib/bsp/substrate/target/arch/layer/README new file mode 100644 index 0000000000..943dfc4412 --- /dev/null +++ b/scripts/lib/bsp/substrate/target/arch/layer/README | |||
@@ -0,0 +1,64 @@ | |||
1 | This README file contains information on the contents of the | ||
2 | {{=layer_name}} layer. | ||
3 | |||
4 | Please see the corresponding sections below for details. | ||
5 | |||
6 | |||
7 | Dependencies | ||
8 | ============ | ||
9 | |||
10 | This layer depends on: | ||
11 | |||
12 | URI: git://git.openembedded.org/bitbake | ||
13 | branch: master | ||
14 | |||
15 | URI: git://git.openembedded.org/openembedded-core | ||
16 | layers: meta | ||
17 | branch: master | ||
18 | |||
19 | URI: git://git.yoctoproject.org/xxxx | ||
20 | layers: xxxx | ||
21 | branch: master | ||
22 | |||
23 | |||
24 | Patches | ||
25 | ======= | ||
26 | |||
27 | Please submit any patches against the {{=layer_name}} layer to the | ||
28 | xxxx mailing list (xxxx@zzzz.org) and cc: the maintainer: | ||
29 | |||
30 | Maintainer: XXX YYYYYY <xxx.yyyyyy@zzzzz.com> | ||
31 | |||
32 | |||
33 | Table of Contents | ||
34 | ================= | ||
35 | |||
36 | I. Adding the {{=layer_name}} layer to your build | ||
37 | II. Misc | ||
38 | |||
39 | |||
40 | I. Adding the {{=layer_name}} layer to your build | ||
41 | ================================================= | ||
42 | |||
43 | --- replace with specific instructions for the {{=layer_name}} layer --- | ||
44 | |||
45 | In order to use this layer, you need to make the build system aware of | ||
46 | it. | ||
47 | |||
48 | Assuming the {{=layer_name}} layer exists at the top-level of your | ||
49 | yocto build tree, you can add it to the build system by adding the | ||
50 | location of the {{=layer_name}} layer to bblayers.conf, along with any | ||
51 | other layers needed. e.g.: | ||
52 | |||
53 | BBLAYERS ?= " \ | ||
54 | /path/to/yocto/meta \ | ||
55 | /path/to/yocto/meta-yocto \ | ||
56 | /path/to/yocto/meta-yocto-bsp \ | ||
57 | /path/to/yocto/meta-{{=layer_name}} \ | ||
58 | " | ||
59 | |||
60 | |||
61 | II. Misc | ||
62 | ======== | ||
63 | |||
64 | --- replace with specific information about the {{=layer_name}} layer --- | ||
diff --git a/scripts/lib/bsp/substrate/target/arch/layer/conf/layer.conf b/scripts/lib/bsp/substrate/target/arch/layer/conf/layer.conf new file mode 100644 index 0000000000..bdffe17195 --- /dev/null +++ b/scripts/lib/bsp/substrate/target/arch/layer/conf/layer.conf | |||
@@ -0,0 +1,10 @@ | |||
1 | # We have a conf and classes directory, add to BBPATH | ||
2 | BBPATH .= ":${LAYERDIR}" | ||
3 | |||
4 | # We have recipes-* directories, add to BBFILES | ||
5 | BBFILES += "${LAYERDIR}/recipes-*/*/*.bb \ | ||
6 | ${LAYERDIR}/recipes-*/*/*.bbappend" | ||
7 | |||
8 | BBFILE_COLLECTIONS += "{{=layer_name}}" | ||
9 | BBFILE_PATTERN_{{=layer_name}} = "^${LAYERDIR}/" | ||
10 | BBFILE_PRIORITY_{{=layer_name}} = "{{=layer_priority}}" | ||
diff --git a/scripts/lib/bsp/substrate/target/arch/layer/layer-questions.noinstall b/scripts/lib/bsp/substrate/target/arch/layer/layer-questions.noinstall new file mode 100644 index 0000000000..e2a89c3b5d --- /dev/null +++ b/scripts/lib/bsp/substrate/target/arch/layer/layer-questions.noinstall | |||
@@ -0,0 +1,14 @@ | |||
1 | {{ input type:"edit" name:"layer_priority" prio:"20" msg:"Please enter the layer priority you'd like to use for the layer:" default:"6"}} | ||
2 | |||
3 | {{ input type:"boolean" name:"create_example_recipe" prio:"20" msg:"Would you like to have an example recipe created? (y/n)" default:"n"}} | ||
4 | |||
5 | {{ if create_example_recipe == "y": }} | ||
6 | {{ input type:"edit" name:"example_recipe_name" prio:"20" msg:"Please enter the name you'd like to use for your example recipe:" default:"example"}} | ||
7 | |||
8 | {{ input type:"boolean" name:"create_example_bbappend" prio:"20" msg:"Would you like to have an example bbappend file created? (y/n)" default:"n"}} | ||
9 | |||
10 | {{ if create_example_bbappend == "y": }} | ||
11 | {{ input type:"edit" name:"example_bbappend_name" prio:"20" msg:"Please enter the name you'd like to use for your bbappend file:" default:"example"}} | ||
12 | |||
13 | {{ if create_example_bbappend == "y": }} | ||
14 | {{ input type:"edit" name:"example_bbappend_version" prio:"20" msg:"Please enter the version number you'd like to use for your bbappend file (this should match the recipe you're appending to):" default:"0.1"}} | ||
diff --git a/scripts/lib/bsp/substrate/target/arch/layer/{{ if create_example_bbappend == "y": }} recipes-example-bbappend/example-bbappend/{{=example_bbappend_name}}-{{=example_bbappend_version}}/example.patch b/scripts/lib/bsp/substrate/target/arch/layer/{{ if create_example_bbappend == "y": }} recipes-example-bbappend/example-bbappend/{{=example_bbappend_name}}-{{=example_bbappend_version}}/example.patch new file mode 100644 index 0000000000..2000a34da5 --- /dev/null +++ b/scripts/lib/bsp/substrate/target/arch/layer/{{ if create_example_bbappend == "y": }} recipes-example-bbappend/example-bbappend/{{=example_bbappend_name}}-{{=example_bbappend_version}}/example.patch | |||
@@ -0,0 +1,12 @@ | |||
1 | # | ||
2 | # This is a non-functional placeholder file, here for example purposes | ||
3 | # only. | ||
4 | # | ||
5 | # If you had a patch for your recipe, you'd put it in this directory | ||
6 | # and reference it from your recipe's SRC_URI: | ||
7 | # | ||
8 | # SRC_URI += "file://example.patch" | ||
9 | # | ||
10 | # Note that you could also rename the directory containing this patch | ||
11 | # to remove the version number or simply rename it 'files'. Doing so | ||
12 | # allows you to use the same directory for multiple recipes. | ||
diff --git a/scripts/lib/bsp/substrate/target/arch/layer/{{ if create_example_bbappend == "y": }} recipes-example-bbappend/example-bbappend/{{=example_bbappend_name}}_{{=example_bbappend_version}}.bbappend b/scripts/lib/bsp/substrate/target/arch/layer/{{ if create_example_bbappend == "y": }} recipes-example-bbappend/example-bbappend/{{=example_bbappend_name}}_{{=example_bbappend_version}}.bbappend new file mode 100644 index 0000000000..2e50ff668d --- /dev/null +++ b/scripts/lib/bsp/substrate/target/arch/layer/{{ if create_example_bbappend == "y": }} recipes-example-bbappend/example-bbappend/{{=example_bbappend_name}}_{{=example_bbappend_version}}.bbappend | |||
@@ -0,0 +1,8 @@ | |||
1 | FILESEXTRAPATHS_prepend := "${THISDIR}/${PN}-${PV}:" | ||
2 | |||
3 | # | ||
4 | # This .bbappend doesn't yet do anything - replace this text with | ||
5 | # modifications to the example_0.1.bb recipe, or whatever recipe it is | ||
6 | # that you want to modify with this .bbappend (make sure you change | ||
7 | # the recipe name (PN) and version (PV) to match). | ||
8 | # | ||
diff --git a/scripts/lib/bsp/substrate/target/arch/layer/{{ if create_example_recipe == "y": }} recipes-example/example/{{=example_recipe_name}}-0.1/example.patch b/scripts/lib/bsp/substrate/target/arch/layer/{{ if create_example_recipe == "y": }} recipes-example/example/{{=example_recipe_name}}-0.1/example.patch new file mode 100644 index 0000000000..2000a34da5 --- /dev/null +++ b/scripts/lib/bsp/substrate/target/arch/layer/{{ if create_example_recipe == "y": }} recipes-example/example/{{=example_recipe_name}}-0.1/example.patch | |||
@@ -0,0 +1,12 @@ | |||
1 | # | ||
2 | # This is a non-functional placeholder file, here for example purposes | ||
3 | # only. | ||
4 | # | ||
5 | # If you had a patch for your recipe, you'd put it in this directory | ||
6 | # and reference it from your recipe's SRC_URI: | ||
7 | # | ||
8 | # SRC_URI += "file://example.patch" | ||
9 | # | ||
10 | # Note that you could also rename the directory containing this patch | ||
11 | # to remove the version number or simply rename it 'files'. Doing so | ||
12 | # allows you to use the same directory for multiple recipes. | ||
diff --git a/scripts/lib/bsp/substrate/target/arch/layer/{{ if create_example_recipe == "y": }} recipes-example/example/{{=example_recipe_name}}-0.1/helloworld.c b/scripts/lib/bsp/substrate/target/arch/layer/{{ if create_example_recipe == "y": }} recipes-example/example/{{=example_recipe_name}}-0.1/helloworld.c new file mode 100644 index 0000000000..71f2e46b4e --- /dev/null +++ b/scripts/lib/bsp/substrate/target/arch/layer/{{ if create_example_recipe == "y": }} recipes-example/example/{{=example_recipe_name}}-0.1/helloworld.c | |||
@@ -0,0 +1,8 @@ | |||
1 | #include <stdio.h> | ||
2 | |||
3 | int main(int argc, char **argv) | ||
4 | { | ||
5 | printf("Hello World!\n"); | ||
6 | |||
7 | return 0; | ||
8 | } | ||
diff --git a/scripts/lib/bsp/substrate/target/arch/layer/{{ if create_example_recipe == "y": }} recipes-example/example/{{=example_recipe_name}}_0.1.bb b/scripts/lib/bsp/substrate/target/arch/layer/{{ if create_example_recipe == "y": }} recipes-example/example/{{=example_recipe_name}}_0.1.bb new file mode 100644 index 0000000000..14bf344da5 --- /dev/null +++ b/scripts/lib/bsp/substrate/target/arch/layer/{{ if create_example_recipe == "y": }} recipes-example/example/{{=example_recipe_name}}_0.1.bb | |||
@@ -0,0 +1,23 @@ | |||
1 | # | ||
2 | # This file was derived from the 'Hello World!' example recipe in the | ||
3 | # Yocto Project Development Manual. | ||
4 | # | ||
5 | |||
6 | DESCRIPTION = "Simple helloworld application" | ||
7 | SECTION = "examples" | ||
8 | LICENSE = "MIT" | ||
9 | LIC_FILES_CHKSUM = "file://${COMMON_LICENSE_DIR}/MIT;md5=0835ade698e0bcf8506ecda2f7b4f302" | ||
10 | PR = "r0" | ||
11 | |||
12 | SRC_URI = "file://helloworld.c" | ||
13 | |||
14 | S = "${WORKDIR}" | ||
15 | |||
16 | do_compile() { | ||
17 | ${CC} helloworld.c -o helloworld | ||
18 | } | ||
19 | |||
20 | do_install() { | ||
21 | install -d ${D}${bindir} | ||
22 | install -m 0755 helloworld ${D}${bindir} | ||
23 | } | ||
diff --git a/scripts/lib/bsp/substrate/target/arch/mips/.gitignore b/scripts/lib/bsp/substrate/target/arch/mips/.gitignore new file mode 100644 index 0000000000..e69de29bb2 --- /dev/null +++ b/scripts/lib/bsp/substrate/target/arch/mips/.gitignore | |||
diff --git a/scripts/lib/bsp/substrate/target/arch/mips/conf/machine/{{=machine}}.conf b/scripts/lib/bsp/substrate/target/arch/mips/conf/machine/{{=machine}}.conf new file mode 100644 index 0000000000..2e704263e1 --- /dev/null +++ b/scripts/lib/bsp/substrate/target/arch/mips/conf/machine/{{=machine}}.conf | |||
@@ -0,0 +1,38 @@ | |||
1 | #@TYPE: Machine | ||
2 | #@NAME: {{=machine}} | ||
3 | |||
4 | #@DESCRIPTION: Machine configuration for {{=machine}} systems | ||
5 | |||
6 | require conf/machine/include/tune-mips32.inc | ||
7 | |||
8 | MACHINE_FEATURES = "screen keyboard pci usbhost ext2 ext3 serial" | ||
9 | |||
10 | KERNEL_IMAGETYPE = "vmlinux" | ||
11 | KERNEL_ALT_IMAGETYPE = "vmlinux.bin" | ||
12 | KERNEL_IMAGE_STRIP_EXTRA_SECTIONS = ".comment" | ||
13 | |||
14 | {{ if kernel_choice == "custom": preferred_kernel = "linux-yocto-custom" }} | ||
15 | {{ if kernel_choice == "linux-yocto-dev": preferred_kernel = "linux-yocto-dev" }} | ||
16 | {{ if kernel_choice == "custom" or kernel_choice == "linux-yocto-dev" : }} | ||
17 | PREFERRED_PROVIDER_virtual/kernel ?= "{{=preferred_kernel}}" | ||
18 | |||
19 | {{ if kernel_choice != "custom" and kernel_choice != "linux-yocto-dev": preferred_kernel = kernel_choice.split('_')[0] }} | ||
20 | {{ if kernel_choice != "custom" and kernel_choice != "linux-yocto-dev": preferred_kernel_version = kernel_choice.split('_')[1] }} | ||
21 | {{ if kernel_choice != "custom" and kernel_choice != "linux-yocto-dev": }} | ||
22 | PREFERRED_PROVIDER_virtual/kernel ?= "{{=preferred_kernel}}" | ||
23 | PREFERRED_VERSION_{{=preferred_kernel}} ?= "{{=preferred_kernel_version}}%" | ||
24 | |||
25 | {{ input type:"boolean" name:"xserver" prio:"50" msg:"Do you need support for X? (y/n)" default:"y" }} | ||
26 | {{ if xserver == "y": }} | ||
27 | PREFERRED_PROVIDER_virtual/xserver ?= "xserver-xorg" | ||
28 | XSERVER ?= "xserver-xorg \ | ||
29 | xf86-input-evdev \ | ||
30 | xf86-video-fbdev" | ||
31 | |||
32 | SERIAL_CONSOLE = "115200 ttyS0" | ||
33 | USE_VT ?= "0" | ||
34 | |||
35 | MACHINE_EXTRA_RRECOMMENDS = " kernel-modules" | ||
36 | |||
37 | IMAGE_FSTYPES ?= "jffs2 tar.bz2" | ||
38 | JFFS2_ERASEBLOCK = "0x10000" \ No newline at end of file | ||
diff --git a/scripts/lib/bsp/substrate/target/arch/mips/recipes-kernel/linux/kernel-list.noinstall b/scripts/lib/bsp/substrate/target/arch/mips/recipes-kernel/linux/kernel-list.noinstall new file mode 100644 index 0000000000..a04e6c7852 --- /dev/null +++ b/scripts/lib/bsp/substrate/target/arch/mips/recipes-kernel/linux/kernel-list.noinstall | |||
@@ -0,0 +1,5 @@ | |||
1 | {{ if kernel_choice != "custom": }} | ||
2 | {{ input type:"boolean" name:"use_default_kernel" prio:"10" msg:"Would you like to use the default (3.14) kernel? (y/n)" default:"y"}} | ||
3 | |||
4 | {{ if kernel_choice != "custom" and use_default_kernel == "n": }} | ||
5 | {{ input type:"choicelist" name:"kernel_choice" gen:"bsp.kernel.kernels" prio:"10" msg:"Please choose the kernel to use in this BSP:" default:"linux-yocto_3.14"}} | ||
diff --git a/scripts/lib/bsp/substrate/target/arch/mips/recipes-kernel/linux/{{ if kernel_choice != "custom": }} files/{{=machine}}-preempt-rt.scc b/scripts/lib/bsp/substrate/target/arch/mips/recipes-kernel/linux/{{ if kernel_choice != "custom": }} files/{{=machine}}-preempt-rt.scc new file mode 100644 index 0000000000..b0fb63ac6a --- /dev/null +++ b/scripts/lib/bsp/substrate/target/arch/mips/recipes-kernel/linux/{{ if kernel_choice != "custom": }} files/{{=machine}}-preempt-rt.scc | |||
@@ -0,0 +1,9 @@ | |||
1 | define KMACHINE {{=machine}} | ||
2 | define KTYPE preempt-rt | ||
3 | define KARCH mips | ||
4 | |||
5 | include {{=map_preempt_rt_kbranch(need_new_kbranch, new_kbranch, existing_kbranch)}} | ||
6 | {{ if need_new_kbranch == "y": }} | ||
7 | branch {{=machine}} | ||
8 | |||
9 | include {{=machine}}.scc | ||
diff --git a/scripts/lib/bsp/substrate/target/arch/mips/recipes-kernel/linux/{{ if kernel_choice != "custom": }} files/{{=machine}}-standard.scc b/scripts/lib/bsp/substrate/target/arch/mips/recipes-kernel/linux/{{ if kernel_choice != "custom": }} files/{{=machine}}-standard.scc new file mode 100644 index 0000000000..326663a509 --- /dev/null +++ b/scripts/lib/bsp/substrate/target/arch/mips/recipes-kernel/linux/{{ if kernel_choice != "custom": }} files/{{=machine}}-standard.scc | |||
@@ -0,0 +1,9 @@ | |||
1 | define KMACHINE {{=machine}} | ||
2 | define KTYPE standard | ||
3 | define KARCH mips | ||
4 | |||
5 | include {{=map_standard_kbranch(need_new_kbranch, new_kbranch, existing_kbranch)}} | ||
6 | {{ if need_new_kbranch == "y": }} | ||
7 | branch {{=machine}} | ||
8 | |||
9 | include {{=machine}}.scc | ||
diff --git a/scripts/lib/bsp/substrate/target/arch/mips/recipes-kernel/linux/{{ if kernel_choice != "custom": }} files/{{=machine}}-tiny.scc b/scripts/lib/bsp/substrate/target/arch/mips/recipes-kernel/linux/{{ if kernel_choice != "custom": }} files/{{=machine}}-tiny.scc new file mode 100644 index 0000000000..4514765eb3 --- /dev/null +++ b/scripts/lib/bsp/substrate/target/arch/mips/recipes-kernel/linux/{{ if kernel_choice != "custom": }} files/{{=machine}}-tiny.scc | |||
@@ -0,0 +1,9 @@ | |||
1 | define KMACHINE {{=machine}} | ||
2 | define KTYPE tiny | ||
3 | define KARCH mips | ||
4 | |||
5 | include {{=map_tiny_kbranch(need_new_kbranch, new_kbranch, existing_kbranch)}} | ||
6 | {{ if need_new_kbranch == "y": }} | ||
7 | branch {{=machine}} | ||
8 | |||
9 | include {{=machine}}.scc | ||
diff --git a/scripts/lib/bsp/substrate/target/arch/mips/recipes-kernel/linux/{{ if kernel_choice != "custom": }} files/{{=machine}}-user-config.cfg b/scripts/lib/bsp/substrate/target/arch/mips/recipes-kernel/linux/{{ if kernel_choice != "custom": }} files/{{=machine}}-user-config.cfg new file mode 100644 index 0000000000..e69de29bb2 --- /dev/null +++ b/scripts/lib/bsp/substrate/target/arch/mips/recipes-kernel/linux/{{ if kernel_choice != "custom": }} files/{{=machine}}-user-config.cfg | |||
diff --git a/scripts/lib/bsp/substrate/target/arch/mips/recipes-kernel/linux/{{ if kernel_choice != "custom": }} files/{{=machine}}-user-features.scc b/scripts/lib/bsp/substrate/target/arch/mips/recipes-kernel/linux/{{ if kernel_choice != "custom": }} files/{{=machine}}-user-features.scc new file mode 100644 index 0000000000..e69de29bb2 --- /dev/null +++ b/scripts/lib/bsp/substrate/target/arch/mips/recipes-kernel/linux/{{ if kernel_choice != "custom": }} files/{{=machine}}-user-features.scc | |||
diff --git a/scripts/lib/bsp/substrate/target/arch/mips/recipes-kernel/linux/{{ if kernel_choice != "custom": }} files/{{=machine}}-user-patches.scc b/scripts/lib/bsp/substrate/target/arch/mips/recipes-kernel/linux/{{ if kernel_choice != "custom": }} files/{{=machine}}-user-patches.scc new file mode 100644 index 0000000000..e69de29bb2 --- /dev/null +++ b/scripts/lib/bsp/substrate/target/arch/mips/recipes-kernel/linux/{{ if kernel_choice != "custom": }} files/{{=machine}}-user-patches.scc | |||
diff --git a/scripts/lib/bsp/substrate/target/arch/mips/recipes-kernel/linux/{{ if kernel_choice != "custom": }} files/{{=machine}}.cfg b/scripts/lib/bsp/substrate/target/arch/mips/recipes-kernel/linux/{{ if kernel_choice != "custom": }} files/{{=machine}}.cfg new file mode 100644 index 0000000000..a1b333ca56 --- /dev/null +++ b/scripts/lib/bsp/substrate/target/arch/mips/recipes-kernel/linux/{{ if kernel_choice != "custom": }} files/{{=machine}}.cfg | |||
@@ -0,0 +1 @@ | |||
CONFIG_MIPS=y | |||
diff --git a/scripts/lib/bsp/substrate/target/arch/mips/recipes-kernel/linux/{{ if kernel_choice != "custom": }} files/{{=machine}}.scc b/scripts/lib/bsp/substrate/target/arch/mips/recipes-kernel/linux/{{ if kernel_choice != "custom": }} files/{{=machine}}.scc new file mode 100644 index 0000000000..1ef01b6e3c --- /dev/null +++ b/scripts/lib/bsp/substrate/target/arch/mips/recipes-kernel/linux/{{ if kernel_choice != "custom": }} files/{{=machine}}.scc | |||
@@ -0,0 +1,7 @@ | |||
1 | kconf hardware {{=machine}}.cfg | ||
2 | |||
3 | include cfg/usb-mass-storage.scc | ||
4 | include cfg/fs/vfat.scc | ||
5 | |||
6 | kconf hardware {{=machine}}-user-config.cfg | ||
7 | include {{=machine}}-user-patches.scc | ||
diff --git a/scripts/lib/bsp/substrate/target/arch/mips/recipes-kernel/linux/{{ if kernel_choice == "linux-yocto-dev": }} linux-yocto-dev.bbappend b/scripts/lib/bsp/substrate/target/arch/mips/recipes-kernel/linux/{{ if kernel_choice == "linux-yocto-dev": }} linux-yocto-dev.bbappend new file mode 100644 index 0000000000..25c87a85ac --- /dev/null +++ b/scripts/lib/bsp/substrate/target/arch/mips/recipes-kernel/linux/{{ if kernel_choice == "linux-yocto-dev": }} linux-yocto-dev.bbappend | |||
@@ -0,0 +1,25 @@ | |||
1 | FILESEXTRAPATHS_prepend := "${THISDIR}/files:" | ||
2 | |||
3 | PR := "${PR}.1" | ||
4 | |||
5 | COMPATIBLE_MACHINE_{{=machine}} = "{{=machine}}" | ||
6 | {{ input type:"boolean" name:"need_new_kbranch" prio:"20" msg:"Do you need a new machine branch for this BSP (the alternative is to re-use an existing branch)? [y/n]" default:"y" }} | ||
7 | |||
8 | {{ if need_new_kbranch == "y": }} | ||
9 | {{ input type:"choicelist" name:"new_kbranch" nameappend:"i386" gen:"bsp.kernel.all_branches" branches_base:"standard" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/base" }} | ||
10 | |||
11 | {{ if need_new_kbranch == "n": }} | ||
12 | {{ input type:"choicelist" name:"existing_kbranch" nameappend:"i386" gen:"bsp.kernel.all_branches" branches_base:"standard" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/base" }} | ||
13 | |||
14 | {{ if need_new_kbranch == "n": }} | ||
15 | KBRANCH_{{=machine}} = "{{=existing_kbranch}}" | ||
16 | |||
17 | {{ input type:"boolean" name:"smp" prio:"30" msg:"Would you like SMP support? (y/n)" default:"y"}} | ||
18 | {{ if smp == "y": }} | ||
19 | KERNEL_FEATURES_append_{{=machine}} += " cfg/smp.scc" | ||
20 | |||
21 | SRC_URI += "file://{{=machine}}-standard.scc \ | ||
22 | file://{{=machine}}-user-config.cfg \ | ||
23 | file://{{=machine}}-user-patches.scc \ | ||
24 | file://{{=machine}}-user-features.scc \ | ||
25 | " | ||
diff --git a/scripts/lib/bsp/substrate/target/arch/mips/recipes-kernel/linux/{{ if kernel_choice == "linux-yocto-rt_3.10": }} linux-yocto-rt_3.10.bbappend b/scripts/lib/bsp/substrate/target/arch/mips/recipes-kernel/linux/{{ if kernel_choice == "linux-yocto-rt_3.10": }} linux-yocto-rt_3.10.bbappend new file mode 100644 index 0000000000..08b1f88d1b --- /dev/null +++ b/scripts/lib/bsp/substrate/target/arch/mips/recipes-kernel/linux/{{ if kernel_choice == "linux-yocto-rt_3.10": }} linux-yocto-rt_3.10.bbappend | |||
@@ -0,0 +1,32 @@ | |||
1 | FILESEXTRAPATHS_prepend := "${THISDIR}/files:" | ||
2 | |||
3 | PR := "${PR}.1" | ||
4 | |||
5 | COMPATIBLE_MACHINE_{{=machine}} = "{{=machine}}" | ||
6 | |||
7 | {{ input type:"boolean" name:"need_new_kbranch" prio:"20" msg:"Do you need a new machine branch for this BSP (the alternative is to re-use an existing branch)? [y/n]" default:"y" }} | ||
8 | |||
9 | {{ if need_new_kbranch == "y": }} | ||
10 | {{ input type:"choicelist" name:"new_kbranch" gen:"bsp.kernel.all_branches" branches_base:"standard/preempt-rt" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/preempt-rt/base" }} | ||
11 | |||
12 | {{ if need_new_kbranch == "n": }} | ||
13 | {{ input type:"choicelist" name:"existing_kbranch" gen:"bsp.kernel.all_branches" branches_base:"standard/preempt-rt" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/preempt-rt/base" }} | ||
14 | |||
15 | {{ if need_new_kbranch == "n": }} | ||
16 | KBRANCH_{{=machine}} = "{{=existing_kbranch}}" | ||
17 | |||
18 | {{ input type:"boolean" name:"smp" prio:"30" msg:"Do you need SMP support? (y/n)" default:"y"}} | ||
19 | {{ if smp == "y": }} | ||
20 | KERNEL_FEATURES_append_{{=machine}} += " cfg/smp.scc" | ||
21 | |||
22 | SRC_URI += "file://{{=machine}}-preempt-rt.scc \ | ||
23 | file://{{=machine}}-user-config.cfg \ | ||
24 | file://{{=machine}}-user-patches.scc \ | ||
25 | file://{{=machine}}-user-features.scc \ | ||
26 | " | ||
27 | |||
28 | # uncomment and replace these SRCREVs with the real commit ids once you've had | ||
29 | # the appropriate changes committed to the upstream linux-yocto repo | ||
30 | #SRCREV_machine_pn-linux-yocto-rt_{{=machine}} ?= "f35992f80c81dc5fa1a97165dfd5cbb84661f7cb" | ||
31 | #SRCREV_meta_pn-linux-yocto-rt_{{=machine}} ?= "1b534b2f8bbe9b8a773268cfa30a4850346f6f5f" | ||
32 | #LINUX_VERSION = "3.10.9" | ||
diff --git a/scripts/lib/bsp/substrate/target/arch/mips/recipes-kernel/linux/{{ if kernel_choice == "linux-yocto-tiny_3.10": }} linux-yocto-tiny_3.10.bbappend b/scripts/lib/bsp/substrate/target/arch/mips/recipes-kernel/linux/{{ if kernel_choice == "linux-yocto-tiny_3.10": }} linux-yocto-tiny_3.10.bbappend new file mode 100644 index 0000000000..bc6968d832 --- /dev/null +++ b/scripts/lib/bsp/substrate/target/arch/mips/recipes-kernel/linux/{{ if kernel_choice == "linux-yocto-tiny_3.10": }} linux-yocto-tiny_3.10.bbappend | |||
@@ -0,0 +1,32 @@ | |||
1 | FILESEXTRAPATHS_prepend := "${THISDIR}/files:" | ||
2 | |||
3 | PR := "${PR}.1" | ||
4 | |||
5 | COMPATIBLE_MACHINE_{{=machine}} = "{{=machine}}" | ||
6 | |||
7 | {{ input type:"boolean" name:"need_new_kbranch" prio:"20" msg:"Do you need a new machine branch for this BSP (the alternative is to re-use an existing branch)? [y/n]" default:"y" }} | ||
8 | |||
9 | {{ if need_new_kbranch == "y": }} | ||
10 | {{ input type:"choicelist" name:"new_kbranch" gen:"bsp.kernel.all_branches" branches_base:"standard/tiny" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/tiny/base" }} | ||
11 | |||
12 | {{ if need_new_kbranch == "n": }} | ||
13 | {{ input type:"choicelist" name:"existing_kbranch" gen:"bsp.kernel.all_branches" branches_base:"standard/tiny" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/tiny/base" }} | ||
14 | |||
15 | {{ if need_new_kbranch == "n": }} | ||
16 | KBRANCH_{{=machine}} = "{{=existing_kbranch}}" | ||
17 | |||
18 | {{ input type:"boolean" name:"smp" prio:"30" msg:"Do you need SMP support? (y/n)" default:"y"}} | ||
19 | {{ if smp == "y": }} | ||
20 | KERNEL_FEATURES_append_{{=machine}} += " cfg/smp.scc" | ||
21 | |||
22 | SRC_URI += "file://{{=machine}}-tiny.scc \ | ||
23 | file://{{=machine}}-user-config.cfg \ | ||
24 | file://{{=machine}}-user-patches.scc \ | ||
25 | file://{{=machine}}-user-features.scc \ | ||
26 | " | ||
27 | |||
28 | # uncomment and replace these SRCREVs with the real commit ids once you've had | ||
29 | # the appropriate changes committed to the upstream linux-yocto repo | ||
30 | #SRCREV_machine_pn-linux-yocto-tiny_{{=machine}} ?= "840bb8c059418c4753415df56c9aff1c0d5354c8" | ||
31 | #SRCREV_meta_pn-linux-yocto-tiny_{{=machine}} ?= "4fd76cc4f33e0afd8f906b1e8f231b6d13b6c993" | ||
32 | #LINUX_VERSION = "3.10.9" | ||
diff --git a/scripts/lib/bsp/substrate/target/arch/mips/recipes-kernel/linux/{{ if kernel_choice == "linux-yocto-tiny_3.14": }} linux-yocto-tiny_3.14.bbappend b/scripts/lib/bsp/substrate/target/arch/mips/recipes-kernel/linux/{{ if kernel_choice == "linux-yocto-tiny_3.14": }} linux-yocto-tiny_3.14.bbappend new file mode 100644 index 0000000000..85544e812c --- /dev/null +++ b/scripts/lib/bsp/substrate/target/arch/mips/recipes-kernel/linux/{{ if kernel_choice == "linux-yocto-tiny_3.14": }} linux-yocto-tiny_3.14.bbappend | |||
@@ -0,0 +1,32 @@ | |||
1 | FILESEXTRAPATHS_prepend := "${THISDIR}/files:" | ||
2 | |||
3 | PR := "${PR}.1" | ||
4 | |||
5 | COMPATIBLE_MACHINE_{{=machine}} = "{{=machine}}" | ||
6 | |||
7 | {{ input type:"boolean" name:"need_new_kbranch" prio:"20" msg:"Do you need a new machine branch for this BSP (the alternative is to re-use an existing branch)? [y/n]" default:"y" }} | ||
8 | |||
9 | {{ if need_new_kbranch == "y": }} | ||
10 | {{ input type:"choicelist" name:"new_kbranch" gen:"bsp.kernel.all_branches" branches_base:"standard/tiny" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/tiny/base" }} | ||
11 | |||
12 | {{ if need_new_kbranch == "n": }} | ||
13 | {{ input type:"choicelist" name:"existing_kbranch" gen:"bsp.kernel.all_branches" branches_base:"standard/tiny" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/tiny/base" }} | ||
14 | |||
15 | {{ if need_new_kbranch == "n": }} | ||
16 | KBRANCH_{{=machine}} = "{{=existing_kbranch}}" | ||
17 | |||
18 | {{ input type:"boolean" name:"smp" prio:"30" msg:"Do you need SMP support? (y/n)" default:"y"}} | ||
19 | {{ if smp == "y": }} | ||
20 | KERNEL_FEATURES_append_{{=machine}} += " cfg/smp.scc" | ||
21 | |||
22 | SRC_URI += "file://{{=machine}}-tiny.scc \ | ||
23 | file://{{=machine}}-user-config.cfg \ | ||
24 | file://{{=machine}}-user-patches.scc \ | ||
25 | file://{{=machine}}-user-features.scc \ | ||
26 | " | ||
27 | |||
28 | # uncomment and replace these SRCREVs with the real commit ids once you've had | ||
29 | # the appropriate changes committed to the upstream linux-yocto repo | ||
30 | #SRCREV_machine_pn-linux-yocto-tiny_{{=machine}} ?= "840bb8c059418c4753415df56c9aff1c0d5354c8" | ||
31 | #SRCREV_meta_pn-linux-yocto-tiny_{{=machine}} ?= "4fd76cc4f33e0afd8f906b1e8f231b6d13b6c993" | ||
32 | #LINUX_VERSION = "3.14" \ No newline at end of file | ||
diff --git a/scripts/lib/bsp/substrate/target/arch/mips/recipes-kernel/linux/{{ if kernel_choice == "linux-yocto_3.10": }} linux-yocto_3.10.bbappend b/scripts/lib/bsp/substrate/target/arch/mips/recipes-kernel/linux/{{ if kernel_choice == "linux-yocto_3.10": }} linux-yocto_3.10.bbappend new file mode 100644 index 0000000000..1e814c54d7 --- /dev/null +++ b/scripts/lib/bsp/substrate/target/arch/mips/recipes-kernel/linux/{{ if kernel_choice == "linux-yocto_3.10": }} linux-yocto_3.10.bbappend | |||
@@ -0,0 +1,32 @@ | |||
1 | FILESEXTRAPATHS_prepend := "${THISDIR}/files:" | ||
2 | |||
3 | PR := "${PR}.1" | ||
4 | |||
5 | COMPATIBLE_MACHINE_{{=machine}} = "{{=machine}}" | ||
6 | |||
7 | {{ input type:"boolean" name:"need_new_kbranch" prio:"20" msg:"Do you need a new machine branch for this BSP (the alternative is to re-use an existing branch)? [y/n]" default:"y" }} | ||
8 | |||
9 | {{ if need_new_kbranch == "y": }} | ||
10 | {{ input type:"choicelist" name:"new_kbranch" gen:"bsp.kernel.all_branches" branches_base:"standard" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/base" }} | ||
11 | |||
12 | {{ if need_new_kbranch == "n": }} | ||
13 | {{ input type:"choicelist" name:"existing_kbranch" gen:"bsp.kernel.all_branches" branches_base:"standard" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/base" }} | ||
14 | |||
15 | {{ if need_new_kbranch == "n": }} | ||
16 | KBRANCH_{{=machine}} = "{{=existing_kbranch}}" | ||
17 | |||
18 | {{ input type:"boolean" name:"smp" prio:"30" msg:"Do you need SMP support? (y/n)" default:"y"}} | ||
19 | {{ if smp == "y": }} | ||
20 | KERNEL_FEATURES_append_{{=machine}} += " cfg/smp.scc" | ||
21 | |||
22 | SRC_URI += "file://{{=machine}}-standard.scc \ | ||
23 | file://{{=machine}}-user-config.cfg \ | ||
24 | file://{{=machine}}-user-patches.scc \ | ||
25 | file://{{=machine}}-user-features.scc \ | ||
26 | " | ||
27 | |||
28 | # uncomment and replace these SRCREVs with the real commit ids once you've had | ||
29 | # the appropriate changes committed to the upstream linux-yocto repo | ||
30 | #SRCREV_machine_pn-linux-yocto_{{=machine}} ?= "19f7e43b54aef08d58135ed2a897d77b624b320a" | ||
31 | #SRCREV_meta_pn-linux-yocto_{{=machine}} ?= "459165c1dd61c4e843c36e6a1abeb30949a20ba7" | ||
32 | #LINUX_VERSION = "3.10.9" \ No newline at end of file | ||
diff --git a/scripts/lib/bsp/substrate/target/arch/mips/recipes-kernel/linux/{{ if kernel_choice == "linux-yocto_3.14": }} linux-yocto_3.14.bbappend b/scripts/lib/bsp/substrate/target/arch/mips/recipes-kernel/linux/{{ if kernel_choice == "linux-yocto_3.14": }} linux-yocto_3.14.bbappend new file mode 100644 index 0000000000..ca7f8c5978 --- /dev/null +++ b/scripts/lib/bsp/substrate/target/arch/mips/recipes-kernel/linux/{{ if kernel_choice == "linux-yocto_3.14": }} linux-yocto_3.14.bbappend | |||
@@ -0,0 +1,32 @@ | |||
1 | FILESEXTRAPATHS_prepend := "${THISDIR}/files:" | ||
2 | |||
3 | PR := "${PR}.1" | ||
4 | |||
5 | COMPATIBLE_MACHINE_{{=machine}} = "{{=machine}}" | ||
6 | |||
7 | {{ input type:"boolean" name:"need_new_kbranch" prio:"20" msg:"Do you need a new machine branch for this BSP (the alternative is to re-use an existing branch)? [y/n]" default:"y" }} | ||
8 | |||
9 | {{ if need_new_kbranch == "y": }} | ||
10 | {{ input type:"choicelist" name:"new_kbranch" gen:"bsp.kernel.all_branches" branches_base:"standard" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/base" }} | ||
11 | |||
12 | {{ if need_new_kbranch == "n": }} | ||
13 | {{ input type:"choicelist" name:"existing_kbranch" gen:"bsp.kernel.all_branches" branches_base:"standard" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/base" }} | ||
14 | |||
15 | {{ if need_new_kbranch == "n": }} | ||
16 | KBRANCH_{{=machine}} = "{{=existing_kbranch}}" | ||
17 | |||
18 | {{ input type:"boolean" name:"smp" prio:"30" msg:"Do you need SMP support? (y/n)" default:"y"}} | ||
19 | {{ if smp == "y": }} | ||
20 | KERNEL_FEATURES_append_{{=machine}} += " cfg/smp.scc" | ||
21 | |||
22 | SRC_URI += "file://{{=machine}}-standard.scc \ | ||
23 | file://{{=machine}}-user-config.cfg \ | ||
24 | file://{{=machine}}-user-patches.scc \ | ||
25 | file://{{=machine}}-user-features.scc \ | ||
26 | " | ||
27 | |||
28 | # uncomment and replace these SRCREVs with the real commit ids once you've had | ||
29 | # the appropriate changes committed to the upstream linux-yocto repo | ||
30 | #SRCREV_machine_pn-linux-yocto_{{=machine}} ?= "840bb8c059418c4753415df56c9aff1c0d5354c8" | ||
31 | #SRCREV_meta_pn-linux-yocto_{{=machine}} ?= "4fd76cc4f33e0afd8f906b1e8f231b6d13b6c993" | ||
32 | #LINUX_VERSION = "3.14" \ No newline at end of file | ||
diff --git a/scripts/lib/bsp/substrate/target/arch/powerpc/.gitignore b/scripts/lib/bsp/substrate/target/arch/powerpc/.gitignore new file mode 100644 index 0000000000..e69de29bb2 --- /dev/null +++ b/scripts/lib/bsp/substrate/target/arch/powerpc/.gitignore | |||
diff --git a/scripts/lib/bsp/substrate/target/arch/powerpc/conf/machine/{{=machine}}.conf b/scripts/lib/bsp/substrate/target/arch/powerpc/conf/machine/{{=machine}}.conf new file mode 100644 index 0000000000..78fb5db22b --- /dev/null +++ b/scripts/lib/bsp/substrate/target/arch/powerpc/conf/machine/{{=machine}}.conf | |||
@@ -0,0 +1,74 @@ | |||
1 | #@TYPE: Machine | ||
2 | #@NAME: {{=machine}} | ||
3 | |||
4 | #@DESCRIPTION: Machine configuration for {{=machine}} systems | ||
5 | |||
6 | TARGET_FPU = "" | ||
7 | |||
8 | {{ input type:"choicelist" name:"tunefile" prio:"40" msg:"Which machine tuning would you like to use?" default:"tune_ppce300c3" }} | ||
9 | {{ input type:"choice" val:"tune_ppc476" msg:"ppc476 tuning optimizations" }} | ||
10 | {{ input type:"choice" val:"tune_ppc603e" msg:"ppc603e tuning optimizations" }} | ||
11 | {{ input type:"choice" val:"tune_ppc7400" msg:"ppc7400 tuning optimizations" }} | ||
12 | {{ input type:"choice" val:"tune_ppce300c2" msg:"ppce300c2 tuning optimizations" }} | ||
13 | {{ input type:"choice" val:"tune_ppce300c3" msg:"ppce300c3 tuning optimizations" }} | ||
14 | {{ input type:"choice" val:"tune_ppce500" msg:"ppce500 tuning optimizations" }} | ||
15 | {{ input type:"choice" val:"tune_ppce500mc" msg:"ppce500mc tuning optimizations" }} | ||
16 | {{ input type:"choice" val:"tune_ppce500v2" msg:"ppce500v2 tuning optimizations" }} | ||
17 | {{ input type:"choice" val:"tune_ppce5500" msg:"ppce5500 tuning optimizations" }} | ||
18 | {{ input type:"choice" val:"tune_ppce6500" msg:"ppce6500 tuning optimizations" }} | ||
19 | {{ if tunefile == "tune_ppc476": }} | ||
20 | include conf/machine/include/tune-ppc476.inc | ||
21 | {{ if tunefile == "tune_ppc603e": }} | ||
22 | include conf/machine/include/tune-ppc603e.inc | ||
23 | {{ if tunefile == "tune_ppc7400": }} | ||
24 | include conf/machine/include/tune-ppc7400.inc | ||
25 | {{ if tunefile == "tune_ppce300c2": }} | ||
26 | include conf/machine/include/tune-ppce300c2.inc | ||
27 | {{ if tunefile == "tune_ppce300c3": }} | ||
28 | include conf/machine/include/tune-ppce300c3.inc | ||
29 | {{ if tunefile == "tune_ppce500": }} | ||
30 | include conf/machine/include/tune-ppce500.inc | ||
31 | {{ if tunefile == "tune_ppce500mc": }} | ||
32 | include conf/machine/include/tune-ppce500mc.inc | ||
33 | {{ if tunefile == "tune_ppce500v2": }} | ||
34 | include conf/machine/include/tune-ppce500v2.inc | ||
35 | {{ if tunefile == "tune_ppce5500": }} | ||
36 | include conf/machine/include/tune-ppce5500.inc | ||
37 | {{ if tunefile == "tune_ppce6500": }} | ||
38 | include conf/machine/include/tune-ppce6500.inc | ||
39 | |||
40 | KERNEL_IMAGETYPE = "uImage" | ||
41 | |||
42 | EXTRA_IMAGEDEPENDS += "u-boot" | ||
43 | UBOOT_MACHINE_{{=machine}} = "MPC8315ERDB_config" | ||
44 | |||
45 | SERIAL_CONSOLE = "115200 ttyS0" | ||
46 | |||
47 | MACHINE_FEATURES = "keyboard pci ext2 ext3 serial" | ||
48 | |||
49 | {{ if kernel_choice == "custom": preferred_kernel = "linux-yocto-custom" }} | ||
50 | {{ if kernel_choice == "linux-yocto-dev": preferred_kernel = "linux-yocto-dev" }} | ||
51 | {{ if kernel_choice == "custom" or kernel_choice == "linux-yocto-dev" : }} | ||
52 | PREFERRED_PROVIDER_virtual/kernel ?= "{{=preferred_kernel}}" | ||
53 | |||
54 | {{ if kernel_choice != "custom" and kernel_choice != "linux-yocto-dev": preferred_kernel = kernel_choice.split('_')[0] }} | ||
55 | {{ if kernel_choice != "custom" and kernel_choice != "linux-yocto-dev": preferred_kernel_version = kernel_choice.split('_')[1] }} | ||
56 | {{ if kernel_choice != "custom" and kernel_choice != "linux-yocto-dev": }} | ||
57 | PREFERRED_PROVIDER_virtual/kernel ?= "{{=preferred_kernel}}" | ||
58 | PREFERRED_VERSION_{{=preferred_kernel}} ?= "{{=preferred_kernel_version}}%" | ||
59 | |||
60 | {{ input type:"boolean" name:"xserver" prio:"50" msg:"Do you need support for X? (y/n)" default:"y" }} | ||
61 | {{ if xserver == "y": }} | ||
62 | PREFERRED_PROVIDER_virtual/xserver ?= "xserver-xorg" | ||
63 | XSERVER ?= "xserver-xorg \ | ||
64 | xf86-input-evdev \ | ||
65 | xf86-video-fbdev" | ||
66 | |||
67 | PREFERRED_VERSION_u-boot ?= "v2013.07%" | ||
68 | {{ input type:"edit" name:"uboot_entrypoint" prio:"40" msg:"Please specify a value for UBOOT_ENTRYPOINT:" default:"0x00000000" }} | ||
69 | UBOOT_ENTRYPOINT = "{{=uboot_entrypoint}}" | ||
70 | |||
71 | {{ input type:"edit" name:"kernel_devicetree" prio:"40" msg:"Please specify a [arch/powerpc/boot/dts/xxx] value for KERNEL_DEVICETREE:" default:"mpc8315erdb.dts" }} | ||
72 | KERNEL_DEVICETREE = "${S}/arch/powerpc/boot/dts/{{=kernel_devicetree}}" | ||
73 | |||
74 | MACHINE_EXTRA_RRECOMMENDS = " kernel-modules" | ||
diff --git a/scripts/lib/bsp/substrate/target/arch/powerpc/recipes-kernel/linux/kernel-list.noinstall b/scripts/lib/bsp/substrate/target/arch/powerpc/recipes-kernel/linux/kernel-list.noinstall new file mode 100644 index 0000000000..a04e6c7852 --- /dev/null +++ b/scripts/lib/bsp/substrate/target/arch/powerpc/recipes-kernel/linux/kernel-list.noinstall | |||
@@ -0,0 +1,5 @@ | |||
1 | {{ if kernel_choice != "custom": }} | ||
2 | {{ input type:"boolean" name:"use_default_kernel" prio:"10" msg:"Would you like to use the default (3.14) kernel? (y/n)" default:"y"}} | ||
3 | |||
4 | {{ if kernel_choice != "custom" and use_default_kernel == "n": }} | ||
5 | {{ input type:"choicelist" name:"kernel_choice" gen:"bsp.kernel.kernels" prio:"10" msg:"Please choose the kernel to use in this BSP:" default:"linux-yocto_3.14"}} | ||
diff --git a/scripts/lib/bsp/substrate/target/arch/powerpc/recipes-kernel/linux/{{ if kernel_choice != "custom": }} files/{{=machine}}-preempt-rt.scc b/scripts/lib/bsp/substrate/target/arch/powerpc/recipes-kernel/linux/{{ if kernel_choice != "custom": }} files/{{=machine}}-preempt-rt.scc new file mode 100644 index 0000000000..1da7b0c892 --- /dev/null +++ b/scripts/lib/bsp/substrate/target/arch/powerpc/recipes-kernel/linux/{{ if kernel_choice != "custom": }} files/{{=machine}}-preempt-rt.scc | |||
@@ -0,0 +1,9 @@ | |||
1 | define KMACHINE {{=machine}} | ||
2 | define KTYPE preempt-rt | ||
3 | define KARCH powerpc | ||
4 | |||
5 | include {{=map_preempt_rt_kbranch(need_new_kbranch, new_kbranch, existing_kbranch)}} | ||
6 | {{ if need_new_kbranch == "y": }} | ||
7 | branch {{=machine}} | ||
8 | |||
9 | include {{=machine}}.scc | ||
diff --git a/scripts/lib/bsp/substrate/target/arch/powerpc/recipes-kernel/linux/{{ if kernel_choice != "custom": }} files/{{=machine}}-standard.scc b/scripts/lib/bsp/substrate/target/arch/powerpc/recipes-kernel/linux/{{ if kernel_choice != "custom": }} files/{{=machine}}-standard.scc new file mode 100644 index 0000000000..53a74a6ca2 --- /dev/null +++ b/scripts/lib/bsp/substrate/target/arch/powerpc/recipes-kernel/linux/{{ if kernel_choice != "custom": }} files/{{=machine}}-standard.scc | |||
@@ -0,0 +1,9 @@ | |||
1 | define KMACHINE {{=machine}} | ||
2 | define KTYPE standard | ||
3 | define KARCH powerpc | ||
4 | |||
5 | include {{=map_standard_kbranch(need_new_kbranch, new_kbranch, existing_kbranch)}} | ||
6 | {{ if need_new_kbranch == "y": }} | ||
7 | branch {{=machine}} | ||
8 | |||
9 | include {{=machine}}.scc | ||
diff --git a/scripts/lib/bsp/substrate/target/arch/powerpc/recipes-kernel/linux/{{ if kernel_choice != "custom": }} files/{{=machine}}-tiny.scc b/scripts/lib/bsp/substrate/target/arch/powerpc/recipes-kernel/linux/{{ if kernel_choice != "custom": }} files/{{=machine}}-tiny.scc new file mode 100644 index 0000000000..4ca6224774 --- /dev/null +++ b/scripts/lib/bsp/substrate/target/arch/powerpc/recipes-kernel/linux/{{ if kernel_choice != "custom": }} files/{{=machine}}-tiny.scc | |||
@@ -0,0 +1,9 @@ | |||
1 | define KMACHINE {{=machine}} | ||
2 | define KTYPE tiny | ||
3 | define KARCH powerpc | ||
4 | |||
5 | include {{=map_tiny_kbranch(need_new_kbranch, new_kbranch, existing_kbranch)}} | ||
6 | {{ if need_new_kbranch == "y": }} | ||
7 | branch {{=machine}} | ||
8 | |||
9 | include {{=machine}}.scc | ||
diff --git a/scripts/lib/bsp/substrate/target/arch/powerpc/recipes-kernel/linux/{{ if kernel_choice != "custom": }} files/{{=machine}}-user-config.cfg b/scripts/lib/bsp/substrate/target/arch/powerpc/recipes-kernel/linux/{{ if kernel_choice != "custom": }} files/{{=machine}}-user-config.cfg new file mode 100644 index 0000000000..e69de29bb2 --- /dev/null +++ b/scripts/lib/bsp/substrate/target/arch/powerpc/recipes-kernel/linux/{{ if kernel_choice != "custom": }} files/{{=machine}}-user-config.cfg | |||
diff --git a/scripts/lib/bsp/substrate/target/arch/powerpc/recipes-kernel/linux/{{ if kernel_choice != "custom": }} files/{{=machine}}-user-features.scc b/scripts/lib/bsp/substrate/target/arch/powerpc/recipes-kernel/linux/{{ if kernel_choice != "custom": }} files/{{=machine}}-user-features.scc new file mode 100644 index 0000000000..e69de29bb2 --- /dev/null +++ b/scripts/lib/bsp/substrate/target/arch/powerpc/recipes-kernel/linux/{{ if kernel_choice != "custom": }} files/{{=machine}}-user-features.scc | |||
diff --git a/scripts/lib/bsp/substrate/target/arch/powerpc/recipes-kernel/linux/{{ if kernel_choice != "custom": }} files/{{=machine}}-user-patches.scc b/scripts/lib/bsp/substrate/target/arch/powerpc/recipes-kernel/linux/{{ if kernel_choice != "custom": }} files/{{=machine}}-user-patches.scc new file mode 100644 index 0000000000..e69de29bb2 --- /dev/null +++ b/scripts/lib/bsp/substrate/target/arch/powerpc/recipes-kernel/linux/{{ if kernel_choice != "custom": }} files/{{=machine}}-user-patches.scc | |||
diff --git a/scripts/lib/bsp/substrate/target/arch/powerpc/recipes-kernel/linux/{{ if kernel_choice != "custom": }} files/{{=machine}}.cfg b/scripts/lib/bsp/substrate/target/arch/powerpc/recipes-kernel/linux/{{ if kernel_choice != "custom": }} files/{{=machine}}.cfg new file mode 100644 index 0000000000..9f37d07553 --- /dev/null +++ b/scripts/lib/bsp/substrate/target/arch/powerpc/recipes-kernel/linux/{{ if kernel_choice != "custom": }} files/{{=machine}}.cfg | |||
@@ -0,0 +1,163 @@ | |||
1 | .......................................................................... | ||
2 | . WARNING | ||
3 | . | ||
4 | . This file is a kernel configuration fragment, and not a full kernel | ||
5 | . configuration file. The final kernel configuration is made up of | ||
6 | . an assembly of processed fragments, each of which is designed to | ||
7 | . capture a specific part of the final configuration (e.g. platform | ||
8 | . configuration, feature configuration, and board specific hardware | ||
9 | . configuration). For more information on kernel configuration, please | ||
10 | . consult the product documentation. | ||
11 | . | ||
12 | .......................................................................... | ||
13 | CONFIG_PPC32=y | ||
14 | CONFIG_PPC_OF=y | ||
15 | CONFIG_PPC_UDBG_16550=y | ||
16 | |||
17 | # | ||
18 | # Processor support | ||
19 | # | ||
20 | CONFIG_PPC_83xx=y | ||
21 | |||
22 | # | ||
23 | # Platform support | ||
24 | # | ||
25 | CONFIG_MPC831x_RDB=y | ||
26 | # CONFIG_PPC_CHRP is not set | ||
27 | # CONFIG_PPC_PMAC is not set | ||
28 | |||
29 | # | ||
30 | # Bus options | ||
31 | # | ||
32 | CONFIG_PCI=y | ||
33 | |||
34 | # | ||
35 | # Memory Technology Devices (MTD) | ||
36 | # | ||
37 | CONFIG_MTD=y | ||
38 | CONFIG_MTD_PARTITIONS=y | ||
39 | CONFIG_MTD_CMDLINE_PARTS=y | ||
40 | CONFIG_MTD_OF_PARTS=y | ||
41 | |||
42 | # | ||
43 | # User Modules And Translation Layers | ||
44 | # | ||
45 | CONFIG_MTD_CHAR=y | ||
46 | CONFIG_MTD_BLOCK=y | ||
47 | |||
48 | # | ||
49 | # RAM/ROM/Flash chip drivers | ||
50 | # | ||
51 | CONFIG_MTD_CFI=y | ||
52 | CONFIG_MTD_CFI_AMDSTD=y | ||
53 | |||
54 | # | ||
55 | # Mapping drivers for chip access | ||
56 | # | ||
57 | CONFIG_MTD_PHYSMAP_OF=y | ||
58 | |||
59 | # | ||
60 | # NAND Flash Device Drivers | ||
61 | # | ||
62 | CONFIG_MTD_NAND=y | ||
63 | |||
64 | # | ||
65 | # Ethernet (1000 Mbit) | ||
66 | # | ||
67 | CONFIG_GIANFAR=y | ||
68 | |||
69 | # | ||
70 | # Serial drivers | ||
71 | # | ||
72 | CONFIG_SERIAL_8250=y | ||
73 | CONFIG_SERIAL_8250_CONSOLE=y | ||
74 | CONFIG_SERIAL_8250_NR_UARTS=2 | ||
75 | |||
76 | # | ||
77 | # Watchdog Device Drivers | ||
78 | # | ||
79 | CONFIG_8xxx_WDT=y | ||
80 | |||
81 | # | ||
82 | # I2C support | ||
83 | # | ||
84 | CONFIG_I2C=y | ||
85 | CONFIG_I2C_CHARDEV=y | ||
86 | |||
87 | # | ||
88 | # I2C Hardware Bus support | ||
89 | # | ||
90 | CONFIG_I2C_MPC=y | ||
91 | |||
92 | CONFIG_SENSORS_LM75=y | ||
93 | |||
94 | CONFIG_MISC_DEVICES=y | ||
95 | |||
96 | # | ||
97 | # Miscellaneous I2C Chip support | ||
98 | # | ||
99 | CONFIG_EEPROM_AT24=y | ||
100 | |||
101 | # | ||
102 | # SPI support | ||
103 | # | ||
104 | CONFIG_SPI=y | ||
105 | # CONFIG_SPI_DEBUG is not set | ||
106 | CONFIG_SPI_MASTER=y | ||
107 | |||
108 | # | ||
109 | # SPI Master Controller Drivers | ||
110 | # | ||
111 | CONFIG_SPI_MPC8xxx=y | ||
112 | |||
113 | # | ||
114 | # SPI Protocol Masters | ||
115 | # | ||
116 | CONFIG_HWMON=y | ||
117 | |||
118 | # | ||
119 | # SCSI device support | ||
120 | # | ||
121 | CONFIG_SCSI=y | ||
122 | CONFIG_BLK_DEV_SD=y | ||
123 | CONFIG_CHR_DEV_SG=y | ||
124 | CONFIG_SCSI_LOGGING=y | ||
125 | |||
126 | CONFIG_ATA=y | ||
127 | CONFIG_ATA_VERBOSE_ERROR=y | ||
128 | CONFIG_SATA_FSL=y | ||
129 | CONFIG_ATA_SFF=y | ||
130 | |||
131 | # | ||
132 | # USB support | ||
133 | # | ||
134 | CONFIG_USB=m | ||
135 | CONFIG_USB_DEVICEFS=y | ||
136 | |||
137 | # | ||
138 | # USB Host Controller Drivers | ||
139 | # | ||
140 | CONFIG_USB_EHCI_HCD=m | ||
141 | CONFIG_USB_EHCI_FSL=y | ||
142 | CONFIG_USB_STORAGE=m | ||
143 | |||
144 | # | ||
145 | # Real Time Clock | ||
146 | # | ||
147 | CONFIG_RTC_CLASS=y | ||
148 | |||
149 | # | ||
150 | # I2C RTC drivers | ||
151 | # | ||
152 | CONFIG_RTC_DRV_DS1307=y | ||
153 | |||
154 | CONFIG_KGDB_8250=m | ||
155 | |||
156 | CONFIG_CRYPTO_DEV_TALITOS=m | ||
157 | |||
158 | CONFIG_FSL_DMA=y | ||
159 | |||
160 | CONFIG_MMC=y | ||
161 | CONFIG_MMC_SPI=m | ||
162 | |||
163 | CONFIG_USB_FSL_MPH_DR_OF=y | ||
diff --git a/scripts/lib/bsp/substrate/target/arch/powerpc/recipes-kernel/linux/{{ if kernel_choice != "custom": }} files/{{=machine}}.scc b/scripts/lib/bsp/substrate/target/arch/powerpc/recipes-kernel/linux/{{ if kernel_choice != "custom": }} files/{{=machine}}.scc new file mode 100644 index 0000000000..c9fd468180 --- /dev/null +++ b/scripts/lib/bsp/substrate/target/arch/powerpc/recipes-kernel/linux/{{ if kernel_choice != "custom": }} files/{{=machine}}.scc | |||
@@ -0,0 +1,9 @@ | |||
1 | kconf hardware {{=machine}}.cfg | ||
2 | |||
3 | include cfg/usb-mass-storage.scc | ||
4 | include cfg/fs/vfat.scc | ||
5 | |||
6 | include cfg/dmaengine.scc | ||
7 | |||
8 | kconf hardware {{=machine}}-user-config.cfg | ||
9 | include {{=machine}}-user-patches.scc | ||
diff --git a/scripts/lib/bsp/substrate/target/arch/powerpc/recipes-kernel/linux/{{ if kernel_choice == "linux-yocto-dev": }} linux-yocto-dev.bbappend b/scripts/lib/bsp/substrate/target/arch/powerpc/recipes-kernel/linux/{{ if kernel_choice == "linux-yocto-dev": }} linux-yocto-dev.bbappend new file mode 100644 index 0000000000..25c87a85ac --- /dev/null +++ b/scripts/lib/bsp/substrate/target/arch/powerpc/recipes-kernel/linux/{{ if kernel_choice == "linux-yocto-dev": }} linux-yocto-dev.bbappend | |||
@@ -0,0 +1,25 @@ | |||
1 | FILESEXTRAPATHS_prepend := "${THISDIR}/files:" | ||
2 | |||
3 | PR := "${PR}.1" | ||
4 | |||
5 | COMPATIBLE_MACHINE_{{=machine}} = "{{=machine}}" | ||
6 | {{ input type:"boolean" name:"need_new_kbranch" prio:"20" msg:"Do you need a new machine branch for this BSP (the alternative is to re-use an existing branch)? [y/n]" default:"y" }} | ||
7 | |||
8 | {{ if need_new_kbranch == "y": }} | ||
9 | {{ input type:"choicelist" name:"new_kbranch" nameappend:"i386" gen:"bsp.kernel.all_branches" branches_base:"standard" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/base" }} | ||
10 | |||
11 | {{ if need_new_kbranch == "n": }} | ||
12 | {{ input type:"choicelist" name:"existing_kbranch" nameappend:"i386" gen:"bsp.kernel.all_branches" branches_base:"standard" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/base" }} | ||
13 | |||
14 | {{ if need_new_kbranch == "n": }} | ||
15 | KBRANCH_{{=machine}} = "{{=existing_kbranch}}" | ||
16 | |||
17 | {{ input type:"boolean" name:"smp" prio:"30" msg:"Would you like SMP support? (y/n)" default:"y"}} | ||
18 | {{ if smp == "y": }} | ||
19 | KERNEL_FEATURES_append_{{=machine}} += " cfg/smp.scc" | ||
20 | |||
21 | SRC_URI += "file://{{=machine}}-standard.scc \ | ||
22 | file://{{=machine}}-user-config.cfg \ | ||
23 | file://{{=machine}}-user-patches.scc \ | ||
24 | file://{{=machine}}-user-features.scc \ | ||
25 | " | ||
diff --git a/scripts/lib/bsp/substrate/target/arch/powerpc/recipes-kernel/linux/{{ if kernel_choice == "linux-yocto-rt_3.10": }} linux-yocto-rt_3.10.bbappend b/scripts/lib/bsp/substrate/target/arch/powerpc/recipes-kernel/linux/{{ if kernel_choice == "linux-yocto-rt_3.10": }} linux-yocto-rt_3.10.bbappend new file mode 100644 index 0000000000..00c8c68933 --- /dev/null +++ b/scripts/lib/bsp/substrate/target/arch/powerpc/recipes-kernel/linux/{{ if kernel_choice == "linux-yocto-rt_3.10": }} linux-yocto-rt_3.10.bbappend | |||
@@ -0,0 +1,32 @@ | |||
1 | FILESEXTRAPATHS_prepend := "${THISDIR}/files:" | ||
2 | |||
3 | PR := "${PR}.1" | ||
4 | |||
5 | COMPATIBLE_MACHINE_{{=machine}} = "{{=machine}}" | ||
6 | |||
7 | {{ input type:"boolean" name:"need_new_kbranch" prio:"20" msg:"Do you need a new machine branch for this BSP (the alternative is to re-use an existing branch)? [y/n]" default:"y" }} | ||
8 | |||
9 | {{ if need_new_kbranch == "y": }} | ||
10 | {{ input type:"choicelist" name:"new_kbranch" gen:"bsp.kernel.all_branches" branches_base:"standard/preempt-rt" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/preempt-rt/base" }} | ||
11 | |||
12 | {{ if need_new_kbranch == "n": }} | ||
13 | {{ input type:"choicelist" name:"existing_kbranch" gen:"bsp.kernel.all_branches" branches_base:"standard/preempt-rt" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/preempt-rt/base" }} | ||
14 | |||
15 | {{ if need_new_kbranch == "n": }} | ||
16 | KBRANCH_{{=machine}} = "{{=existing_kbranch}}" | ||
17 | |||
18 | {{ input type:"boolean" name:"smp" prio:"30" msg:"Do you need SMP support? (y/n)" default:"y"}} | ||
19 | {{ if smp == "y": }} | ||
20 | KERNEL_FEATURES_append_{{=machine}} += " cfg/smp.scc" | ||
21 | |||
22 | SRC_URI += "file://{{=machine}}-preempt-rt.scc \ | ||
23 | file://{{=machine}}-user-config.cfg \ | ||
24 | file://{{=machine}}-user-patches.scc \ | ||
25 | file://{{=machine}}-user-features.scc \ | ||
26 | " | ||
27 | |||
28 | # uncomment and replace these SRCREVs with the real commit ids once you've had | ||
29 | # the appropriate changes committed to the upstream linux-yocto repo | ||
30 | #SRCREV_machine_pn-linux-yocto-rt_{{=machine}} ?= "f35992f80c81dc5fa1a97165dfd5cbb84661f7cb" | ||
31 | #SRCREV_meta_pn-linux-yocto-rt_{{=machine}} ?= "1b534b2f8bbe9b8a773268cfa30a4850346f6f5f" | ||
32 | #LINUX_VERSION = "3.10.9" \ No newline at end of file | ||
diff --git a/scripts/lib/bsp/substrate/target/arch/powerpc/recipes-kernel/linux/{{ if kernel_choice == "linux-yocto-tiny_3.10": }} linux-yocto-tiny_3.10.bbappend b/scripts/lib/bsp/substrate/target/arch/powerpc/recipes-kernel/linux/{{ if kernel_choice == "linux-yocto-tiny_3.10": }} linux-yocto-tiny_3.10.bbappend new file mode 100644 index 0000000000..bc6968d832 --- /dev/null +++ b/scripts/lib/bsp/substrate/target/arch/powerpc/recipes-kernel/linux/{{ if kernel_choice == "linux-yocto-tiny_3.10": }} linux-yocto-tiny_3.10.bbappend | |||
@@ -0,0 +1,32 @@ | |||
1 | FILESEXTRAPATHS_prepend := "${THISDIR}/files:" | ||
2 | |||
3 | PR := "${PR}.1" | ||
4 | |||
5 | COMPATIBLE_MACHINE_{{=machine}} = "{{=machine}}" | ||
6 | |||
7 | {{ input type:"boolean" name:"need_new_kbranch" prio:"20" msg:"Do you need a new machine branch for this BSP (the alternative is to re-use an existing branch)? [y/n]" default:"y" }} | ||
8 | |||
9 | {{ if need_new_kbranch == "y": }} | ||
10 | {{ input type:"choicelist" name:"new_kbranch" gen:"bsp.kernel.all_branches" branches_base:"standard/tiny" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/tiny/base" }} | ||
11 | |||
12 | {{ if need_new_kbranch == "n": }} | ||
13 | {{ input type:"choicelist" name:"existing_kbranch" gen:"bsp.kernel.all_branches" branches_base:"standard/tiny" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/tiny/base" }} | ||
14 | |||
15 | {{ if need_new_kbranch == "n": }} | ||
16 | KBRANCH_{{=machine}} = "{{=existing_kbranch}}" | ||
17 | |||
18 | {{ input type:"boolean" name:"smp" prio:"30" msg:"Do you need SMP support? (y/n)" default:"y"}} | ||
19 | {{ if smp == "y": }} | ||
20 | KERNEL_FEATURES_append_{{=machine}} += " cfg/smp.scc" | ||
21 | |||
22 | SRC_URI += "file://{{=machine}}-tiny.scc \ | ||
23 | file://{{=machine}}-user-config.cfg \ | ||
24 | file://{{=machine}}-user-patches.scc \ | ||
25 | file://{{=machine}}-user-features.scc \ | ||
26 | " | ||
27 | |||
28 | # uncomment and replace these SRCREVs with the real commit ids once you've had | ||
29 | # the appropriate changes committed to the upstream linux-yocto repo | ||
30 | #SRCREV_machine_pn-linux-yocto-tiny_{{=machine}} ?= "840bb8c059418c4753415df56c9aff1c0d5354c8" | ||
31 | #SRCREV_meta_pn-linux-yocto-tiny_{{=machine}} ?= "4fd76cc4f33e0afd8f906b1e8f231b6d13b6c993" | ||
32 | #LINUX_VERSION = "3.10.9" | ||
diff --git a/scripts/lib/bsp/substrate/target/arch/powerpc/recipes-kernel/linux/{{ if kernel_choice == "linux-yocto-tiny_3.14": }} linux-yocto-tiny_3.14.bbappend b/scripts/lib/bsp/substrate/target/arch/powerpc/recipes-kernel/linux/{{ if kernel_choice == "linux-yocto-tiny_3.14": }} linux-yocto-tiny_3.14.bbappend new file mode 100644 index 0000000000..d221d5f2a4 --- /dev/null +++ b/scripts/lib/bsp/substrate/target/arch/powerpc/recipes-kernel/linux/{{ if kernel_choice == "linux-yocto-tiny_3.14": }} linux-yocto-tiny_3.14.bbappend | |||
@@ -0,0 +1,32 @@ | |||
1 | FILESEXTRAPATHS_prepend := "${THISDIR}/files:" | ||
2 | |||
3 | PR := "${PR}.1" | ||
4 | |||
5 | COMPATIBLE_MACHINE_{{=machine}} = "{{=machine}}" | ||
6 | |||
7 | {{ input type:"boolean" name:"need_new_kbranch" prio:"20" msg:"Do you need a new machine branch for this BSP (the alternative is to re-use an existing branch)? [y/n]" default:"y" }} | ||
8 | |||
9 | {{ if need_new_kbranch == "y": }} | ||
10 | {{ input type:"choicelist" name:"new_kbranch" gen:"bsp.kernel.all_branches" branches_base:"standard/tiny" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/tiny/base" }} | ||
11 | |||
12 | {{ if need_new_kbranch == "n": }} | ||
13 | {{ input type:"choicelist" name:"existing_kbranch" gen:"bsp.kernel.all_branches" branches_base:"standard/tiny" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/tiny/base" }} | ||
14 | |||
15 | {{ if need_new_kbranch == "n": }} | ||
16 | KBRANCH_{{=machine}} = "{{=existing_kbranch}}" | ||
17 | |||
18 | {{ input type:"boolean" name:"smp" prio:"30" msg:"Do you need SMP support? (y/n)" default:"y"}} | ||
19 | {{ if smp == "y": }} | ||
20 | KERNEL_FEATURES_append_{{=machine}} += " cfg/smp.scc" | ||
21 | |||
22 | SRC_URI += "file://{{=machine}}-tiny.scc \ | ||
23 | file://{{=machine}}-user-config.cfg \ | ||
24 | file://{{=machine}}-user-patches.scc \ | ||
25 | file://{{=machine}}-user-features.scc \ | ||
26 | " | ||
27 | |||
28 | # uncomment and replace these SRCREVs with the real commit ids once you've had | ||
29 | # the appropriate changes committed to the upstream linux-yocto repo | ||
30 | #SRCREV_machine_pn-linux-yocto-tiny_{{=machine}} ?= "840bb8c059418c4753415df56c9aff1c0d5354c8" | ||
31 | #SRCREV_meta_pn-linux-yocto-tiny_{{=machine}} ?= "4fd76cc4f33e0afd8f906b1e8f231b6d13b6c993" | ||
32 | #LINUX_VERSION = "3.14" | ||
diff --git a/scripts/lib/bsp/substrate/target/arch/powerpc/recipes-kernel/linux/{{ if kernel_choice == "linux-yocto_3.10": }} linux-yocto_3.10.bbappend b/scripts/lib/bsp/substrate/target/arch/powerpc/recipes-kernel/linux/{{ if kernel_choice == "linux-yocto_3.10": }} linux-yocto_3.10.bbappend new file mode 100644 index 0000000000..a61f5ccb80 --- /dev/null +++ b/scripts/lib/bsp/substrate/target/arch/powerpc/recipes-kernel/linux/{{ if kernel_choice == "linux-yocto_3.10": }} linux-yocto_3.10.bbappend | |||
@@ -0,0 +1,32 @@ | |||
1 | FILESEXTRAPATHS_prepend := "${THISDIR}/files:" | ||
2 | |||
3 | PR := "${PR}.1" | ||
4 | |||
5 | COMPATIBLE_MACHINE_{{=machine}} = "{{=machine}}" | ||
6 | |||
7 | {{ input type:"boolean" name:"need_new_kbranch" prio:"20" msg:"Do you need a new machine branch for this BSP (the alternative is to re-use an existing branch)? [y/n]" default:"y" }} | ||
8 | |||
9 | {{ if need_new_kbranch == "y": }} | ||
10 | {{ input type:"choicelist" name:"new_kbranch" gen:"bsp.kernel.all_branches" branches_base:"standard" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/base" }} | ||
11 | |||
12 | {{ if need_new_kbranch == "n": }} | ||
13 | {{ input type:"choicelist" name:"existing_kbranch" gen:"bsp.kernel.all_branches" branches_base:"standard" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/base" }} | ||
14 | |||
15 | {{ if need_new_kbranch == "n": }} | ||
16 | KBRANCH_{{=machine}} = "{{=existing_kbranch}}" | ||
17 | |||
18 | {{ input type:"boolean" name:"smp" prio:"30" msg:"Do you need SMP support? (y/n)" default:"y"}} | ||
19 | {{ if smp == "y": }} | ||
20 | KERNEL_FEATURES_append_{{=machine}} += " cfg/smp.scc" | ||
21 | |||
22 | SRC_URI += "file://{{=machine}}-standard.scc \ | ||
23 | file://{{=machine}}-user-config.cfg \ | ||
24 | file://{{=machine}}-user-patches.scc \ | ||
25 | file://{{=machine}}-user-features.scc \ | ||
26 | " | ||
27 | |||
28 | # uncomment and replace these SRCREVs with the real commit ids once you've had | ||
29 | # the appropriate changes committed to the upstream linux-yocto repo | ||
30 | #SRCREV_machine_pn-linux-yocto_{{=machine}} ?= "19f7e43b54aef08d58135ed2a897d77b624b320a" | ||
31 | #SRCREV_meta_pn-linux-yocto_{{=machine}} ?= "459165c1dd61c4e843c36e6a1abeb30949a20ba7" | ||
32 | #LINUX_VERSION = "3.10.9" | ||
diff --git a/scripts/lib/bsp/substrate/target/arch/powerpc/recipes-kernel/linux/{{ if kernel_choice == "linux-yocto_3.14": }} linux-yocto_3.14.bbappend b/scripts/lib/bsp/substrate/target/arch/powerpc/recipes-kernel/linux/{{ if kernel_choice == "linux-yocto_3.14": }} linux-yocto_3.14.bbappend new file mode 100644 index 0000000000..aebda9b3a5 --- /dev/null +++ b/scripts/lib/bsp/substrate/target/arch/powerpc/recipes-kernel/linux/{{ if kernel_choice == "linux-yocto_3.14": }} linux-yocto_3.14.bbappend | |||
@@ -0,0 +1,32 @@ | |||
1 | FILESEXTRAPATHS_prepend := "${THISDIR}/files:" | ||
2 | |||
3 | PR := "${PR}.1" | ||
4 | |||
5 | COMPATIBLE_MACHINE_{{=machine}} = "{{=machine}}" | ||
6 | |||
7 | {{ input type:"boolean" name:"need_new_kbranch" prio:"20" msg:"Do you need a new machine branch for this BSP (the alternative is to re-use an existing branch)? [y/n]" default:"y" }} | ||
8 | |||
9 | {{ if need_new_kbranch == "y": }} | ||
10 | {{ input type:"choicelist" name:"new_kbranch" gen:"bsp.kernel.all_branches" branches_base:"standard" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/base" }} | ||
11 | |||
12 | {{ if need_new_kbranch == "n": }} | ||
13 | {{ input type:"choicelist" name:"existing_kbranch" gen:"bsp.kernel.all_branches" branches_base:"standard" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/base" }} | ||
14 | |||
15 | {{ if need_new_kbranch == "n": }} | ||
16 | KBRANCH_{{=machine}} = "{{=existing_kbranch}}" | ||
17 | |||
18 | {{ input type:"boolean" name:"smp" prio:"30" msg:"Do you need SMP support? (y/n)" default:"y"}} | ||
19 | {{ if smp == "y": }} | ||
20 | KERNEL_FEATURES_append_{{=machine}} += " cfg/smp.scc" | ||
21 | |||
22 | SRC_URI += "file://{{=machine}}-standard.scc \ | ||
23 | file://{{=machine}}-user-config.cfg \ | ||
24 | file://{{=machine}}-user-patches.scc \ | ||
25 | file://{{=machine}}-user-features.scc \ | ||
26 | " | ||
27 | |||
28 | # uncomment and replace these SRCREVs with the real commit ids once you've had | ||
29 | # the appropriate changes committed to the upstream linux-yocto repo | ||
30 | #SRCREV_machine_pn-linux-yocto_{{=machine}} ?= "840bb8c059418c4753415df56c9aff1c0d5354c8" | ||
31 | #SRCREV_meta_pn-linux-yocto_{{=machine}} ?= "4fd76cc4f33e0afd8f906b1e8f231b6d13b6c993" | ||
32 | #LINUX_VERSION = "3.14" | ||
diff --git a/scripts/lib/bsp/substrate/target/arch/qemu/conf/machine/{{=machine}}.conf b/scripts/lib/bsp/substrate/target/arch/qemu/conf/machine/{{=machine}}.conf new file mode 100644 index 0000000000..782ac217d9 --- /dev/null +++ b/scripts/lib/bsp/substrate/target/arch/qemu/conf/machine/{{=machine}}.conf | |||
@@ -0,0 +1,68 @@ | |||
1 | #@TYPE: Machine | ||
2 | #@NAME: {{=machine}} | ||
3 | |||
4 | #@DESCRIPTION: Machine configuration for {{=machine}} systems | ||
5 | |||
6 | {{ if kernel_choice == "custom": preferred_kernel = "linux-yocto-custom" }} | ||
7 | {{ if kernel_choice == "linux-yocto-dev": preferred_kernel = "linux-yocto-dev" }} | ||
8 | {{ if kernel_choice == "custom" or kernel_choice == "linux-yocto-dev" : }} | ||
9 | PREFERRED_PROVIDER_virtual/kernel ?= "{{=preferred_kernel}}" | ||
10 | |||
11 | {{ if kernel_choice != "custom" and kernel_choice != "linux-yocto-dev": preferred_kernel = kernel_choice.split('_')[0] }} | ||
12 | {{ if kernel_choice != "custom" and kernel_choice != "linux-yocto-dev": preferred_kernel_version = kernel_choice.split('_')[1] }} | ||
13 | {{ if kernel_choice != "custom" and kernel_choice != "linux-yocto-dev": }} | ||
14 | PREFERRED_PROVIDER_virtual/kernel ?= "{{=preferred_kernel}}" | ||
15 | PREFERRED_VERSION_{{=preferred_kernel}} ?= "{{=preferred_kernel_version}}%" | ||
16 | |||
17 | {{ if qemuarch == "i386" or qemuarch == "x86_64": }} | ||
18 | PREFERRED_PROVIDER_virtual/xserver ?= "xserver-xorg" | ||
19 | PREFERRED_PROVIDER_virtual/libgl ?= "mesa" | ||
20 | PREFERRED_PROVIDER_virtual/libgles1 ?= "mesa" | ||
21 | PREFERRED_PROVIDER_virtual/libgles2 ?= "mesa" | ||
22 | |||
23 | {{ input type:"choicelist" name:"qemuarch" prio:"5" msg:"Which qemu architecture would you like to use?" default:"i386" }} | ||
24 | {{ input type:"choice" val:"i386" msg:"i386 (32-bit)" }} | ||
25 | {{ input type:"choice" val:"x86_64" msg:"x86_64 (64-bit)" }} | ||
26 | {{ input type:"choice" val:"arm" msg:"ARM (32-bit)" }} | ||
27 | {{ input type:"choice" val:"powerpc" msg:"PowerPC (32-bit)" }} | ||
28 | {{ input type:"choice" val:"mips" msg:"MIPS (32-bit)" }} | ||
29 | {{ if qemuarch == "i386": }} | ||
30 | require conf/machine/include/qemu.inc | ||
31 | require conf/machine/include/tune-i586.inc | ||
32 | {{ if qemuarch == "x86_64": }} | ||
33 | require conf/machine/include/qemu.inc | ||
34 | require conf/machine/include/tune-x86_64.inc | ||
35 | {{ if qemuarch == "arm": }} | ||
36 | require conf/machine/include/qemu.inc | ||
37 | require conf/machine/include/tune-arm926ejs.inc | ||
38 | {{ if qemuarch == "powerpc": }} | ||
39 | require conf/machine/include/qemu.inc | ||
40 | require conf/machine/include/tune-ppc603e.inc | ||
41 | {{ if qemuarch == "mips": }} | ||
42 | require conf/machine/include/qemu.inc | ||
43 | require conf/machine/include/tune-mips32.inc | ||
44 | |||
45 | {{ if qemuarch == "i386" or qemuarch == "x86_64": }} | ||
46 | MACHINE_FEATURES += "x86" | ||
47 | KERNEL_IMAGETYPE = "bzImage" | ||
48 | SERIAL_CONSOLE = "115200 ttyS0" | ||
49 | XSERVER = "xserver-xorg \ | ||
50 | ${@base_contains('DISTRO_FEATURES', 'opengl', 'mesa-driver-swrast', '', d)} \ | ||
51 | xf86-input-vmmouse \ | ||
52 | xf86-input-keyboard \ | ||
53 | xf86-input-evdev \ | ||
54 | xf86-video-vmware" | ||
55 | |||
56 | {{ if qemuarch == "arm": }} | ||
57 | KERNEL_IMAGETYPE = "zImage" | ||
58 | SERIAL_CONSOLE = "115200 ttyAMA0" | ||
59 | |||
60 | {{ if qemuarch == "powerpc": }} | ||
61 | KERNEL_IMAGETYPE = "vmlinux" | ||
62 | SERIAL_CONSOLE = "115200 ttyS0" | ||
63 | |||
64 | {{ if qemuarch == "mips": }} | ||
65 | KERNEL_IMAGETYPE = "vmlinux" | ||
66 | KERNEL_ALT_IMAGETYPE = "vmlinux.bin" | ||
67 | SERIAL_CONSOLE = "115200 ttyS0" | ||
68 | MACHINE_EXTRA_RRECOMMENDS = " kernel-modules" | ||
diff --git a/scripts/lib/bsp/substrate/target/arch/qemu/recipes-core/init-ifupdown/init-ifupdown/{{=machine}}/interfaces b/scripts/lib/bsp/substrate/target/arch/qemu/recipes-core/init-ifupdown/init-ifupdown/{{=machine}}/interfaces new file mode 100644 index 0000000000..16967763e5 --- /dev/null +++ b/scripts/lib/bsp/substrate/target/arch/qemu/recipes-core/init-ifupdown/init-ifupdown/{{=machine}}/interfaces | |||
@@ -0,0 +1,5 @@ | |||
1 | # /etc/network/interfaces -- configuration file for ifup(8), ifdown(8) | ||
2 | |||
3 | # The loopback interface | ||
4 | auto lo | ||
5 | iface lo inet loopback | ||
diff --git a/scripts/lib/bsp/substrate/target/arch/qemu/recipes-core/init-ifupdown/init-ifupdown_1.0.bbappend b/scripts/lib/bsp/substrate/target/arch/qemu/recipes-core/init-ifupdown/init-ifupdown_1.0.bbappend new file mode 100644 index 0000000000..72d991c7e5 --- /dev/null +++ b/scripts/lib/bsp/substrate/target/arch/qemu/recipes-core/init-ifupdown/init-ifupdown_1.0.bbappend | |||
@@ -0,0 +1 @@ | |||
FILESEXTRAPATHS_prepend := "${THISDIR}/${PN}:" | |||
diff --git a/scripts/lib/bsp/substrate/target/arch/qemu/recipes-graphics/xorg-xserver/xserver-xf86-config/{{=machine}}/xorg.conf b/scripts/lib/bsp/substrate/target/arch/qemu/recipes-graphics/xorg-xserver/xserver-xf86-config/{{=machine}}/xorg.conf new file mode 100644 index 0000000000..13519804bc --- /dev/null +++ b/scripts/lib/bsp/substrate/target/arch/qemu/recipes-graphics/xorg-xserver/xserver-xf86-config/{{=machine}}/xorg.conf | |||
@@ -0,0 +1,77 @@ | |||
1 | |||
2 | Section "Files" | ||
3 | EndSection | ||
4 | |||
5 | Section "InputDevice" | ||
6 | Identifier "Generic Keyboard" | ||
7 | Driver "evdev" | ||
8 | Option "CoreKeyboard" | ||
9 | Option "Device" "/dev/input/by-path/platform-i8042-serio-0-event-kbd" | ||
10 | Option "XkbRules" "xorg" | ||
11 | Option "XkbModel" "evdev" | ||
12 | Option "XkbLayout" "us" | ||
13 | EndSection | ||
14 | |||
15 | Section "InputDevice" | ||
16 | Identifier "Configured Mouse" | ||
17 | {{ if qemuarch == "arm" or qemuarch == "powerpc" or qemuarch == "mips": }} | ||
18 | Driver "mouse" | ||
19 | {{ if qemuarch == "i386" or qemuarch == "x86_64": }} | ||
20 | Driver "vmmouse" | ||
21 | |||
22 | Option "CorePointer" | ||
23 | Option "Device" "/dev/input/mice" | ||
24 | Option "Protocol" "ImPS/2" | ||
25 | Option "ZAxisMapping" "4 5" | ||
26 | Option "Emulate3Buttons" "true" | ||
27 | EndSection | ||
28 | |||
29 | Section "InputDevice" | ||
30 | Identifier "Qemu Tablet" | ||
31 | Driver "evdev" | ||
32 | Option "CorePointer" | ||
33 | Option "Device" "/dev/input/touchscreen0" | ||
34 | Option "USB" "on" | ||
35 | EndSection | ||
36 | |||
37 | Section "Device" | ||
38 | Identifier "Graphics Controller" | ||
39 | {{ if qemuarch == "arm" or qemuarch == "powerpc" or qemuarch == "mips": }} | ||
40 | Driver "fbdev" | ||
41 | {{ if qemuarch == "i386" or qemuarch == "x86_64": }} | ||
42 | Driver "vmware" | ||
43 | |||
44 | EndSection | ||
45 | |||
46 | Section "Monitor" | ||
47 | Identifier "Generic Monitor" | ||
48 | Option "DPMS" | ||
49 | # 1024x600 59.85 Hz (CVT) hsync: 37.35 kHz; pclk: 49.00 MHz | ||
50 | Modeline "1024x600_60.00" 49.00 1024 1072 1168 1312 600 603 613 624 -hsync +vsync | ||
51 | # 640x480 @ 60Hz (Industry standard) hsync: 31.5kHz | ||
52 | ModeLine "640x480" 25.2 640 656 752 800 480 490 492 525 -hsync -vsync | ||
53 | # 640x480 @ 72Hz (VESA) hsync: 37.9kHz | ||
54 | ModeLine "640x480" 31.5 640 664 704 832 480 489 491 520 -hsync -vsync | ||
55 | # 640x480 @ 75Hz (VESA) hsync: 37.5kHz | ||
56 | ModeLine "640x480" 31.5 640 656 720 840 480 481 484 500 -hsync -vsync | ||
57 | # 640x480 @ 85Hz (VESA) hsync: 43.3kHz | ||
58 | ModeLine "640x480" 36.0 640 696 752 832 480 481 484 509 -hsync -vsync | ||
59 | EndSection | ||
60 | |||
61 | Section "Screen" | ||
62 | Identifier "Default Screen" | ||
63 | Device "Graphics Controller" | ||
64 | Monitor "Generic Monitor" | ||
65 | SubSection "Display" | ||
66 | Modes "640x480" | ||
67 | EndSubSection | ||
68 | EndSection | ||
69 | |||
70 | Section "ServerLayout" | ||
71 | Identifier "Default Layout" | ||
72 | Screen "Default Screen" | ||
73 | InputDevice "Generic Keyboard" | ||
74 | # InputDevice "Configured Mouse" | ||
75 | InputDevice "QEMU Tablet" | ||
76 | Option "AllowEmptyInput" "no" | ||
77 | EndSection | ||
diff --git a/scripts/lib/bsp/substrate/target/arch/qemu/recipes-graphics/xorg-xserver/xserver-xf86-config_0.1.bbappend b/scripts/lib/bsp/substrate/target/arch/qemu/recipes-graphics/xorg-xserver/xserver-xf86-config_0.1.bbappend new file mode 100644 index 0000000000..72d991c7e5 --- /dev/null +++ b/scripts/lib/bsp/substrate/target/arch/qemu/recipes-graphics/xorg-xserver/xserver-xf86-config_0.1.bbappend | |||
@@ -0,0 +1 @@ | |||
FILESEXTRAPATHS_prepend := "${THISDIR}/${PN}:" | |||
diff --git a/scripts/lib/bsp/substrate/target/arch/qemu/recipes-kernel/linux/kernel-list.noinstall b/scripts/lib/bsp/substrate/target/arch/qemu/recipes-kernel/linux/kernel-list.noinstall new file mode 100644 index 0000000000..a04e6c7852 --- /dev/null +++ b/scripts/lib/bsp/substrate/target/arch/qemu/recipes-kernel/linux/kernel-list.noinstall | |||
@@ -0,0 +1,5 @@ | |||
1 | {{ if kernel_choice != "custom": }} | ||
2 | {{ input type:"boolean" name:"use_default_kernel" prio:"10" msg:"Would you like to use the default (3.14) kernel? (y/n)" default:"y"}} | ||
3 | |||
4 | {{ if kernel_choice != "custom" and use_default_kernel == "n": }} | ||
5 | {{ input type:"choicelist" name:"kernel_choice" gen:"bsp.kernel.kernels" prio:"10" msg:"Please choose the kernel to use in this BSP:" default:"linux-yocto_3.14"}} | ||
diff --git a/scripts/lib/bsp/substrate/target/arch/qemu/recipes-kernel/linux/{{ if kernel_choice != "custom": }} files/{{=machine}}-preempt-rt.scc b/scripts/lib/bsp/substrate/target/arch/qemu/recipes-kernel/linux/{{ if kernel_choice != "custom": }} files/{{=machine}}-preempt-rt.scc new file mode 100644 index 0000000000..af34437d0a --- /dev/null +++ b/scripts/lib/bsp/substrate/target/arch/qemu/recipes-kernel/linux/{{ if kernel_choice != "custom": }} files/{{=machine}}-preempt-rt.scc | |||
@@ -0,0 +1,9 @@ | |||
1 | define KMACHINE {{=machine}} | ||
2 | define KTYPE preempt-rt | ||
3 | define KARCH {{=qemuarch}} | ||
4 | |||
5 | include {{=map_preempt_rt_kbranch(need_new_kbranch, new_kbranch, existing_kbranch)}} | ||
6 | {{ if need_new_kbranch == "y": }} | ||
7 | branch {{=machine}} | ||
8 | |||
9 | include {{=machine}}.scc | ||
diff --git a/scripts/lib/bsp/substrate/target/arch/qemu/recipes-kernel/linux/{{ if kernel_choice != "custom": }} files/{{=machine}}-standard.scc b/scripts/lib/bsp/substrate/target/arch/qemu/recipes-kernel/linux/{{ if kernel_choice != "custom": }} files/{{=machine}}-standard.scc new file mode 100644 index 0000000000..0e20023764 --- /dev/null +++ b/scripts/lib/bsp/substrate/target/arch/qemu/recipes-kernel/linux/{{ if kernel_choice != "custom": }} files/{{=machine}}-standard.scc | |||
@@ -0,0 +1,16 @@ | |||
1 | define KMACHINE {{=machine}} | ||
2 | define KTYPE standard | ||
3 | define KARCH {{=qemuarch}} | ||
4 | |||
5 | {{ if qemuarch == "i386" or qemuarch == "x86_64": }} | ||
6 | include {{=map_standard_kbranch(need_new_kbranch, new_kbranch, existing_kbranch)}} | ||
7 | {{ if qemuarch == "arm": }} | ||
8 | include bsp/arm-versatile-926ejs/arm-versatile-926ejs-standard | ||
9 | {{ if qemuarch == "powerpc": }} | ||
10 | include bsp/qemu-ppc32/qemu-ppc32-standard | ||
11 | {{ if qemuarch == "mips": }} | ||
12 | include bsp/mti-malta32/mti-malta32-be-standard | ||
13 | {{ if need_new_kbranch == "y": }} | ||
14 | branch {{=machine}} | ||
15 | |||
16 | include {{=machine}}.scc | ||
diff --git a/scripts/lib/bsp/substrate/target/arch/qemu/recipes-kernel/linux/{{ if kernel_choice != "custom": }} files/{{=machine}}-tiny.scc b/scripts/lib/bsp/substrate/target/arch/qemu/recipes-kernel/linux/{{ if kernel_choice != "custom": }} files/{{=machine}}-tiny.scc new file mode 100644 index 0000000000..10c4dac44d --- /dev/null +++ b/scripts/lib/bsp/substrate/target/arch/qemu/recipes-kernel/linux/{{ if kernel_choice != "custom": }} files/{{=machine}}-tiny.scc | |||
@@ -0,0 +1,9 @@ | |||
1 | define KMACHINE {{=machine}} | ||
2 | define KTYPE tiny | ||
3 | define KARCH {{=qemuarch}} | ||
4 | |||
5 | include {{=map_tiny_kbranch(need_new_kbranch, new_kbranch, existing_kbranch)}} | ||
6 | {{ if need_new_kbranch == "y": }} | ||
7 | branch {{=machine}} | ||
8 | |||
9 | include {{=machine}}.scc | ||
diff --git a/scripts/lib/bsp/substrate/target/arch/qemu/recipes-kernel/linux/{{ if kernel_choice != "custom": }} files/{{=machine}}-user-config.cfg b/scripts/lib/bsp/substrate/target/arch/qemu/recipes-kernel/linux/{{ if kernel_choice != "custom": }} files/{{=machine}}-user-config.cfg new file mode 100644 index 0000000000..e69de29bb2 --- /dev/null +++ b/scripts/lib/bsp/substrate/target/arch/qemu/recipes-kernel/linux/{{ if kernel_choice != "custom": }} files/{{=machine}}-user-config.cfg | |||
diff --git a/scripts/lib/bsp/substrate/target/arch/qemu/recipes-kernel/linux/{{ if kernel_choice != "custom": }} files/{{=machine}}-user-features.scc b/scripts/lib/bsp/substrate/target/arch/qemu/recipes-kernel/linux/{{ if kernel_choice != "custom": }} files/{{=machine}}-user-features.scc new file mode 100644 index 0000000000..e69de29bb2 --- /dev/null +++ b/scripts/lib/bsp/substrate/target/arch/qemu/recipes-kernel/linux/{{ if kernel_choice != "custom": }} files/{{=machine}}-user-features.scc | |||
diff --git a/scripts/lib/bsp/substrate/target/arch/qemu/recipes-kernel/linux/{{ if kernel_choice != "custom": }} files/{{=machine}}-user-patches.scc b/scripts/lib/bsp/substrate/target/arch/qemu/recipes-kernel/linux/{{ if kernel_choice != "custom": }} files/{{=machine}}-user-patches.scc new file mode 100644 index 0000000000..e69de29bb2 --- /dev/null +++ b/scripts/lib/bsp/substrate/target/arch/qemu/recipes-kernel/linux/{{ if kernel_choice != "custom": }} files/{{=machine}}-user-patches.scc | |||
diff --git a/scripts/lib/bsp/substrate/target/arch/qemu/recipes-kernel/linux/{{ if kernel_choice != "custom": }} files/{{=machine}}.cfg b/scripts/lib/bsp/substrate/target/arch/qemu/recipes-kernel/linux/{{ if kernel_choice != "custom": }} files/{{=machine}}.cfg new file mode 100644 index 0000000000..e69de29bb2 --- /dev/null +++ b/scripts/lib/bsp/substrate/target/arch/qemu/recipes-kernel/linux/{{ if kernel_choice != "custom": }} files/{{=machine}}.cfg | |||
diff --git a/scripts/lib/bsp/substrate/target/arch/qemu/recipes-kernel/linux/{{ if kernel_choice != "custom": }} files/{{=machine}}.scc b/scripts/lib/bsp/substrate/target/arch/qemu/recipes-kernel/linux/{{ if kernel_choice != "custom": }} files/{{=machine}}.scc new file mode 100644 index 0000000000..f3739be1e6 --- /dev/null +++ b/scripts/lib/bsp/substrate/target/arch/qemu/recipes-kernel/linux/{{ if kernel_choice != "custom": }} files/{{=machine}}.scc | |||
@@ -0,0 +1,4 @@ | |||
1 | kconf hardware {{=machine}}.cfg | ||
2 | |||
3 | kconf hardware {{=machine}}-user-config.cfg | ||
4 | include {{=machine}}-user-patches.scc | ||
diff --git a/scripts/lib/bsp/substrate/target/arch/qemu/recipes-kernel/linux/{{ if kernel_choice == "linux-yocto-dev": }} linux-yocto-dev.bbappend b/scripts/lib/bsp/substrate/target/arch/qemu/recipes-kernel/linux/{{ if kernel_choice == "linux-yocto-dev": }} linux-yocto-dev.bbappend new file mode 100644 index 0000000000..7599ecb0a5 --- /dev/null +++ b/scripts/lib/bsp/substrate/target/arch/qemu/recipes-kernel/linux/{{ if kernel_choice == "linux-yocto-dev": }} linux-yocto-dev.bbappend | |||
@@ -0,0 +1,49 @@ | |||
1 | FILESEXTRAPATHS_prepend := "${THISDIR}/files:" | ||
2 | |||
3 | PR := "${PR}.1" | ||
4 | |||
5 | COMPATIBLE_MACHINE_{{=machine}} = "{{=machine}}" | ||
6 | {{ input type:"boolean" name:"need_new_kbranch" prio:"20" msg:"Do you need a new machine branch for this BSP (the alternative is to re-use an existing branch)? [y/n]" default:"y" }} | ||
7 | |||
8 | {{ if need_new_kbranch == "y" and qemuarch == "arm": }} | ||
9 | {{ input type:"choicelist" name:"new_kbranch" gen:"bsp.kernel.all_branches" branches_base:"standard" prio:"20" msg:"Please choose a machine branch to base your new BSP branch on:" default:"standard/base" }} | ||
10 | |||
11 | {{ if need_new_kbranch == "n" and qemuarch == "arm": }} | ||
12 | {{ input type:"choicelist" name:"existing_kbranch" gen:"bsp.kernel.all_branches" branches_base:"standard" prio:"20" msg:"Please choose an existing machine branch to use for this BSP:" default:"standard/arm-versatile-926ejs" }} | ||
13 | |||
14 | {{ if need_new_kbranch == "y" and qemuarch == "powerpc": }} | ||
15 | {{ input type:"choicelist" name:"new_kbranch" nameappend:"powerpc" gen:"bsp.kernel.all_branches" branches_base:"standard" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/base" }} | ||
16 | |||
17 | {{ if need_new_kbranch == "n" and qemuarch == "powerpc": }} | ||
18 | {{ input type:"choicelist" name:"existing_kbranch" nameappend:"powerpc" gen:"bsp.kernel.all_branches" branches_base:"standard" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/qemuppc" }} | ||
19 | |||
20 | {{ if need_new_kbranch == "y" and qemuarch == "i386": }} | ||
21 | {{ input type:"choicelist" name:"new_kbranch" nameappend:"i386" gen:"bsp.kernel.all_branches" branches_base:"standard:standard/common-pc" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/common-pc/base" }} | ||
22 | |||
23 | {{ if need_new_kbranch == "n" and qemuarch == "i386": }} | ||
24 | {{ input type:"choicelist" name:"existing_kbranch" nameappend:"i386" gen:"bsp.kernel.all_branches" branches_base:"standard:standard/common-pc" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/common-pc/base" }} | ||
25 | |||
26 | {{ if need_new_kbranch == "y" and qemuarch == "x86_64": }} | ||
27 | {{ input type:"choicelist" name:"new_kbranch" nameappend:"x86_64" gen:"bsp.kernel.all_branches" branches_base:"standard:standard/common-pc-64" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/common-pc-64/base" }} | ||
28 | |||
29 | {{ if need_new_kbranch == "n" and qemuarch == "x86_64": }} | ||
30 | {{ input type:"choicelist" name:"existing_kbranch" nameappend:"x86_64" gen:"bsp.kernel.all_branches" branches_base:"standard:standard/common-pc-64" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/common-pc-64/base" }} | ||
31 | |||
32 | {{ if need_new_kbranch == "y" and qemuarch == "mips": }} | ||
33 | {{ input type:"choicelist" name:"new_kbranch" nameappend:"mips" gen:"bsp.kernel.all_branches" branches_base:"standard" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/base" }} | ||
34 | |||
35 | {{ if need_new_kbranch == "n" and qemuarch == "mips": }} | ||
36 | {{ input type:"choicelist" name:"existing_kbranch" nameappend:"mips" gen:"bsp.kernel.all_branches" branches_base:"standard" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/mti-malta32" }} | ||
37 | |||
38 | {{ if need_new_kbranch == "n": }} | ||
39 | KBRANCH_{{=machine}} = "{{=existing_kbranch}}" | ||
40 | |||
41 | {{ input type:"boolean" name:"smp" prio:"30" msg:"Would you like SMP support? (y/n)" default:"y"}} | ||
42 | {{ if smp == "y": }} | ||
43 | KERNEL_FEATURES_append_{{=machine}} += " cfg/smp.scc" | ||
44 | |||
45 | SRC_URI += "file://{{=machine}}-standard.scc \ | ||
46 | file://{{=machine}}-user-config.cfg \ | ||
47 | file://{{=machine}}-user-patches.scc \ | ||
48 | file://{{=machine}}-user-features.scc \ | ||
49 | " | ||
diff --git a/scripts/lib/bsp/substrate/target/arch/qemu/recipes-kernel/linux/{{ if kernel_choice == "linux-yocto-rt_3.10": }} linux-yocto-rt_3.10.bbappend b/scripts/lib/bsp/substrate/target/arch/qemu/recipes-kernel/linux/{{ if kernel_choice == "linux-yocto-rt_3.10": }} linux-yocto-rt_3.10.bbappend new file mode 100644 index 0000000000..73b6e34839 --- /dev/null +++ b/scripts/lib/bsp/substrate/target/arch/qemu/recipes-kernel/linux/{{ if kernel_choice == "linux-yocto-rt_3.10": }} linux-yocto-rt_3.10.bbappend | |||
@@ -0,0 +1,55 @@ | |||
1 | FILESEXTRAPATHS_prepend := "${THISDIR}/files:" | ||
2 | |||
3 | PR := "${PR}.1" | ||
4 | |||
5 | COMPATIBLE_MACHINE_{{=machine}} = "{{=machine}}" | ||
6 | {{ input type:"boolean" name:"need_new_kbranch" prio:"20" msg:"Do you need a new machine branch for this BSP (the alternative is to re-use an existing branch)? [y/n]" default:"y" }} | ||
7 | |||
8 | {{ if need_new_kbranch == "y" and qemuarch == "arm": }} | ||
9 | {{ input type:"choicelist" name:"new_kbranch" nameappend:"arm" gen:"bsp.kernel.all_branches" branches_base:"standard/preempt-rt" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/preempt-rt/base" }} | ||
10 | |||
11 | {{ if need_new_kbranch == "n" and qemuarch == "arm": }} | ||
12 | {{ input type:"choicelist" name:"existing_kbranch" nameappend:"arm" gen:"bsp.kernel.all_branches" branches_base:"standard/preempt-rt" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/preempt-rt/base" }} | ||
13 | |||
14 | {{ if need_new_kbranch == "y" and qemuarch == "powerpc": }} | ||
15 | {{ input type:"choicelist" name:"new_kbranch" nameappend:"powerpc" gen:"bsp.kernel.all_branches" branches_base:"standard/preempt-rt" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/preempt-rt/base" }} | ||
16 | |||
17 | {{ if need_new_kbranch == "n" and qemuarch == "powerpc": }} | ||
18 | {{ input type:"choicelist" name:"existing_kbranch" nameappend:"powerpc" gen:"bsp.kernel.all_branches" branches_base:"standard/preempt-rt" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/preempt-rt/qemuppc" }} | ||
19 | |||
20 | {{ if need_new_kbranch == "y" and qemuarch == "i386": }} | ||
21 | {{ input type:"choicelist" name:"new_kbranch" nameappend:"i386" gen:"bsp.kernel.all_branches" branches_base:"standard/preempt-rt" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/preempt-rt/base" }} | ||
22 | |||
23 | {{ if need_new_kbranch == "n" and qemuarch == "i386": }} | ||
24 | {{ input type:"choicelist" name:"existing_kbranch" nameappend:"i386" gen:"bsp.kernel.all_branches" branches_base:"standard/preempt-rt" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/preempt-rt/base" }} | ||
25 | |||
26 | {{ if need_new_kbranch == "y" and qemuarch == "x86_64": }} | ||
27 | {{ input type:"choicelist" name:"new_kbranch" nameappend:"x86_64" gen:"bsp.kernel.all_branches" branches_base:"standard/preempt-rt" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/preempt-rt/base" }} | ||
28 | |||
29 | {{ if need_new_kbranch == "n" and qemuarch == "x86_64": }} | ||
30 | {{ input type:"choicelist" name:"existing_kbranch" nameappend:"x86_64" gen:"bsp.kernel.all_branches" branches_base:"standard/preempt-rt" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/preempt-rt/base" }} | ||
31 | |||
32 | {{ if need_new_kbranch == "y" and qemuarch == "mips": }} | ||
33 | {{ input type:"choicelist" name:"new_kbranch" nameappend:"mips" gen:"bsp.kernel.all_branches" branches_base:"standard/preempt-rt" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/preempt-rt/base" }} | ||
34 | |||
35 | {{ if need_new_kbranch == "n" and qemuarch == "mips": }} | ||
36 | {{ input type:"choicelist" name:"existing_kbranch" nameappend:"mips" gen:"bsp.kernel.all_branches" branches_base:"standard/preempt-rt" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/preempt-rt/base" }} | ||
37 | |||
38 | {{ if need_new_kbranch == "n": }} | ||
39 | KBRANCH_{{=machine}} = "{{=existing_kbranch}}" | ||
40 | |||
41 | {{ input type:"boolean" name:"smp" prio:"30" msg:"Do you need SMP support? (y/n)" default:"y"}} | ||
42 | {{ if smp == "y": }} | ||
43 | KERNEL_FEATURES_append_{{=machine}} += " cfg/smp.scc" | ||
44 | |||
45 | SRC_URI += "file://{{=machine}}-preempt-rt.scc \ | ||
46 | file://{{=machine}}-user-config.cfg \ | ||
47 | file://{{=machine}}-user-patches.scc \ | ||
48 | file://{{=machine}}-user-features.scc \ | ||
49 | " | ||
50 | |||
51 | # uncomment and replace these SRCREVs with the real commit ids once you've had | ||
52 | # the appropriate changes committed to the upstream linux-yocto repo | ||
53 | #SRCREV_machine_pn-linux-yocto-rt_{{=machine}} ?= "f35992f80c81dc5fa1a97165dfd5cbb84661f7cb" | ||
54 | #SRCREV_meta_pn-linux-yocto-rt_{{=machine}} ?= "1b534b2f8bbe9b8a773268cfa30a4850346f6f5f" | ||
55 | #LINUX_VERSION = "3.10.35" | ||
diff --git a/scripts/lib/bsp/substrate/target/arch/qemu/recipes-kernel/linux/{{ if kernel_choice == "linux-yocto-tiny_3.10": }} linux-yocto-tiny_3.10.bbappend b/scripts/lib/bsp/substrate/target/arch/qemu/recipes-kernel/linux/{{ if kernel_choice == "linux-yocto-tiny_3.10": }} linux-yocto-tiny_3.10.bbappend new file mode 100644 index 0000000000..da4e61ef83 --- /dev/null +++ b/scripts/lib/bsp/substrate/target/arch/qemu/recipes-kernel/linux/{{ if kernel_choice == "linux-yocto-tiny_3.10": }} linux-yocto-tiny_3.10.bbappend | |||
@@ -0,0 +1,55 @@ | |||
1 | FILESEXTRAPATHS_prepend := "${THISDIR}/files:" | ||
2 | |||
3 | PR := "${PR}.1" | ||
4 | |||
5 | COMPATIBLE_MACHINE_{{=machine}} = "{{=machine}}" | ||
6 | {{ input type:"boolean" name:"need_new_kbranch" prio:"20" msg:"Do you need a new machine branch for this BSP (the alternative is to re-use an existing branch)? [y/n]" default:"y" }} | ||
7 | |||
8 | {{ if need_new_kbranch == "y" and qemuarch == "arm": }} | ||
9 | {{ input type:"choicelist" name:"new_kbranch" nameappend:"arm" gen:"bsp.kernel.all_branches" branches_base:"standard/tiny" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/tiny/base" }} | ||
10 | |||
11 | {{ if need_new_kbranch == "n" and qemuarch == "arm": }} | ||
12 | {{ input type:"choicelist" name:"existing_kbranch" nameappend:"arm" gen:"bsp.kernel.all_branches" branches_base:"standard/tiny" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/tiny/base" }} | ||
13 | |||
14 | {{ if need_new_kbranch == "y" and qemuarch == "powerpc": }} | ||
15 | {{ input type:"choicelist" name:"new_kbranch" nameappend:"powerpc" gen:"bsp.kernel.all_branches" branches_base:"standard/tiny" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/tiny/base" }} | ||
16 | |||
17 | {{ if need_new_kbranch == "n" and qemuarch == "powerpc": }} | ||
18 | {{ input type:"choicelist" name:"existing_kbranch" nameappend:"powerpc" gen:"bsp.kernel.all_branches" branches_base:"standard/tiny" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/tiny/base" }} | ||
19 | |||
20 | {{ if need_new_kbranch == "y" and qemuarch == "i386": }} | ||
21 | {{ input type:"choicelist" name:"new_kbranch" nameappend:"i386" gen:"bsp.kernel.all_branches" branches_base:"standard/tiny" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/tiny/base" }} | ||
22 | |||
23 | {{ if need_new_kbranch == "n" and qemuarch == "i386": }} | ||
24 | {{ input type:"choicelist" name:"existing_kbranch" nameappend:"i386" gen:"bsp.kernel.all_branches" branches_base:"standard/tiny" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/tiny/common-pc" }} | ||
25 | |||
26 | {{ if need_new_kbranch == "y" and qemuarch == "x86_64": }} | ||
27 | {{ input type:"choicelist" name:"new_kbranch" nameappend:"x86_64" gen:"bsp.kernel.all_branches" branches_base:"standard/tiny" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/tiny/base" }} | ||
28 | |||
29 | {{ if need_new_kbranch == "n" and qemuarch == "x86_64": }} | ||
30 | {{ input type:"choicelist" name:"existing_kbranch" nameappend:"x86_64" gen:"bsp.kernel.all_branches" branches_base:"standard/tiny" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/tiny/base" }} | ||
31 | |||
32 | {{ if need_new_kbranch == "y" and qemuarch == "mips": }} | ||
33 | {{ input type:"choicelist" name:"new_kbranch" nameappend:"mips" gen:"bsp.kernel.all_branches" branches_base:"standard/tiny" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/tiny/base" }} | ||
34 | |||
35 | {{ if need_new_kbranch == "n" and qemuarch == "mips": }} | ||
36 | {{ input type:"choicelist" name:"existing_kbranch" nameappend:"mips" gen:"bsp.kernel.all_branches" branches_base:"standard/tiny" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/tiny/base" }} | ||
37 | |||
38 | {{ if need_new_kbranch == "n": }} | ||
39 | KBRANCH_{{=machine}} = "{{=existing_kbranch}}" | ||
40 | |||
41 | {{ input type:"boolean" name:"smp" prio:"30" msg:"Do you need SMP support? (y/n)" default:"y"}} | ||
42 | {{ if smp == "y": }} | ||
43 | KERNEL_FEATURES_append_{{=machine}} += " cfg/smp.scc" | ||
44 | |||
45 | SRC_URI += "file://{{=machine}}-tiny.scc \ | ||
46 | file://{{=machine}}-user-config.cfg \ | ||
47 | file://{{=machine}}-user-patches.scc \ | ||
48 | file://{{=machine}}-user-features.scc \ | ||
49 | " | ||
50 | |||
51 | # uncomment and replace these SRCREVs with the real commit ids once you've had | ||
52 | # the appropriate changes committed to the upstream linux-yocto repo | ||
53 | #SRCREV_machine_pn-linux-yocto-tiny_{{=machine}} ?= "840bb8c059418c4753415df56c9aff1c0d5354c8" | ||
54 | #SRCREV_meta_pn-linux-yocto-tiny_{{=machine}} ?= "4fd76cc4f33e0afd8f906b1e8f231b6d13b6c993" | ||
55 | #LINUX_VERSION = "3.10.35" | ||
diff --git a/scripts/lib/bsp/substrate/target/arch/qemu/recipes-kernel/linux/{{ if kernel_choice == "linux-yocto-tiny_3.14": }} linux-yocto-tiny_3.4.bbappend b/scripts/lib/bsp/substrate/target/arch/qemu/recipes-kernel/linux/{{ if kernel_choice == "linux-yocto-tiny_3.14": }} linux-yocto-tiny_3.4.bbappend new file mode 100644 index 0000000000..013883ffeb --- /dev/null +++ b/scripts/lib/bsp/substrate/target/arch/qemu/recipes-kernel/linux/{{ if kernel_choice == "linux-yocto-tiny_3.14": }} linux-yocto-tiny_3.4.bbappend | |||
@@ -0,0 +1,55 @@ | |||
1 | FILESEXTRAPATHS_prepend := "${THISDIR}/files:" | ||
2 | |||
3 | PR := "${PR}.1" | ||
4 | |||
5 | COMPATIBLE_MACHINE_{{=machine}} = "{{=machine}}" | ||
6 | {{ input type:"boolean" name:"need_new_kbranch" prio:"20" msg:"Do you need a new machine branch for this BSP (the alternative is to re-use an existing branch)? [y/n]" default:"y" }} | ||
7 | |||
8 | {{ if need_new_kbranch == "y" and qemuarch == "arm": }} | ||
9 | {{ input type:"choicelist" name:"new_kbranch" nameappend:"arm" gen:"bsp.kernel.all_branches" branches_base:"standard/tiny" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/tiny/base" }} | ||
10 | |||
11 | {{ if need_new_kbranch == "n" and qemuarch == "arm": }} | ||
12 | {{ input type:"choicelist" name:"existing_kbranch" nameappend:"arm" gen:"bsp.kernel.all_branches" branches_base:"standard/tiny" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/tiny/base" }} | ||
13 | |||
14 | {{ if need_new_kbranch == "y" and qemuarch == "powerpc": }} | ||
15 | {{ input type:"choicelist" name:"new_kbranch" nameappend:"powerpc" gen:"bsp.kernel.all_branches" branches_base:"standard/tiny" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/tiny/base" }} | ||
16 | |||
17 | {{ if need_new_kbranch == "n" and qemuarch == "powerpc": }} | ||
18 | {{ input type:"choicelist" name:"existing_kbranch" nameappend:"powerpc" gen:"bsp.kernel.all_branches" branches_base:"standard/tiny" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/tiny/base" }} | ||
19 | |||
20 | {{ if need_new_kbranch == "y" and qemuarch == "i386": }} | ||
21 | {{ input type:"choicelist" name:"new_kbranch" nameappend:"i386" gen:"bsp.kernel.all_branches" branches_base:"standard/tiny" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/tiny/base" }} | ||
22 | |||
23 | {{ if need_new_kbranch == "n" and qemuarch == "i386": }} | ||
24 | {{ input type:"choicelist" name:"existing_kbranch" nameappend:"i386" gen:"bsp.kernel.all_branches" branches_base:"standard/tiny" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/tiny/common-pc" }} | ||
25 | |||
26 | {{ if need_new_kbranch == "y" and qemuarch == "x86_64": }} | ||
27 | {{ input type:"choicelist" name:"new_kbranch" nameappend:"x86_64" gen:"bsp.kernel.all_branches" branches_base:"standard/tiny" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/tiny/base" }} | ||
28 | |||
29 | {{ if need_new_kbranch == "n" and qemuarch == "x86_64": }} | ||
30 | {{ input type:"choicelist" name:"existing_kbranch" nameappend:"x86_64" gen:"bsp.kernel.all_branches" branches_base:"standard/tiny" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/tiny/base" }} | ||
31 | |||
32 | {{ if need_new_kbranch == "y" and qemuarch == "mips": }} | ||
33 | {{ input type:"choicelist" name:"new_kbranch" nameappend:"mips" gen:"bsp.kernel.all_branches" branches_base:"standard/tiny" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/tiny/base" }} | ||
34 | |||
35 | {{ if need_new_kbranch == "n" and qemuarch == "mips": }} | ||
36 | {{ input type:"choicelist" name:"existing_kbranch" nameappend:"mips" gen:"bsp.kernel.all_branches" branches_base:"standard/tiny" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/tiny/base" }} | ||
37 | |||
38 | {{ if need_new_kbranch == "n": }} | ||
39 | KBRANCH_{{=machine}} = "{{=existing_kbranch}}" | ||
40 | |||
41 | {{ input type:"boolean" name:"smp" prio:"30" msg:"Do you need SMP support? (y/n)" default:"y"}} | ||
42 | {{ if smp == "y": }} | ||
43 | KERNEL_FEATURES_append_{{=machine}} += " cfg/smp.scc" | ||
44 | |||
45 | SRC_URI += "file://{{=machine}}-tiny.scc \ | ||
46 | file://{{=machine}}-user-config.cfg \ | ||
47 | file://{{=machine}}-user-patches.scc \ | ||
48 | file://{{=machine}}-user-features.scc \ | ||
49 | " | ||
50 | |||
51 | # uncomment and replace these SRCREVs with the real commit ids once you've had | ||
52 | # the appropriate changes committed to the upstream linux-yocto repo | ||
53 | #SRCREV_machine_pn-linux-yocto-tiny_{{=machine}} ?= "0143c6ebb4a2d63b241df5f608b19f483f7eb9e0" | ||
54 | #SRCREV_meta_pn-linux-yocto-tiny_{{=machine}} ?= "8f55bee2403176a50cc0dd41811aa60fcf07243c" | ||
55 | #LINUX_VERSION = "3.14" | ||
diff --git a/scripts/lib/bsp/substrate/target/arch/qemu/recipes-kernel/linux/{{ if kernel_choice == "linux-yocto_3.10": }} linux-yocto_3.10.bbappend b/scripts/lib/bsp/substrate/target/arch/qemu/recipes-kernel/linux/{{ if kernel_choice == "linux-yocto_3.10": }} linux-yocto_3.10.bbappend new file mode 100644 index 0000000000..392ace6694 --- /dev/null +++ b/scripts/lib/bsp/substrate/target/arch/qemu/recipes-kernel/linux/{{ if kernel_choice == "linux-yocto_3.10": }} linux-yocto_3.10.bbappend | |||
@@ -0,0 +1,55 @@ | |||
1 | FILESEXTRAPATHS_prepend := "${THISDIR}/files:" | ||
2 | |||
3 | PR := "${PR}.1" | ||
4 | |||
5 | COMPATIBLE_MACHINE_{{=machine}} = "{{=machine}}" | ||
6 | {{ input type:"boolean" name:"need_new_kbranch" prio:"20" msg:"Do you need a new machine branch for this BSP (the alternative is to re-use an existing branch)? [y/n]" default:"y" }} | ||
7 | |||
8 | {{ if need_new_kbranch == "y" and qemuarch == "arm": }} | ||
9 | {{ input type:"choicelist" name:"new_kbranch" gen:"bsp.kernel.all_branches" branches_base:"standard" prio:"20" msg:"Please choose a machine branch to base your new BSP branch on:" default:"standard/base" }} | ||
10 | |||
11 | {{ if need_new_kbranch == "n" and qemuarch == "arm": }} | ||
12 | {{ input type:"choicelist" name:"existing_kbranch" gen:"bsp.kernel.all_branches" branches_base:"standard" prio:"20" msg:"Please choose an existing machine branch to use for this BSP:" default:"standard/arm-versatile-926ejs" }} | ||
13 | |||
14 | {{ if need_new_kbranch == "y" and qemuarch == "powerpc": }} | ||
15 | {{ input type:"choicelist" name:"new_kbranch" nameappend:"powerpc" gen:"bsp.kernel.all_branches" branches_base:"standard" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/base" }} | ||
16 | |||
17 | {{ if need_new_kbranch == "n" and qemuarch == "powerpc": }} | ||
18 | {{ input type:"choicelist" name:"existing_kbranch" nameappend:"powerpc" gen:"bsp.kernel.all_branches" branches_base:"standard" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/qemuppc" }} | ||
19 | |||
20 | {{ if need_new_kbranch == "y" and qemuarch == "i386": }} | ||
21 | {{ input type:"choicelist" name:"new_kbranch" nameappend:"i386" gen:"bsp.kernel.all_branches" branches_base:"standard:standard/common-pc" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/common-pc/base" }} | ||
22 | |||
23 | {{ if need_new_kbranch == "n" and qemuarch == "i386": }} | ||
24 | {{ input type:"choicelist" name:"existing_kbranch" nameappend:"i386" gen:"bsp.kernel.all_branches" branches_base:"standard:standard/common-pc" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/common-pc/base" }} | ||
25 | |||
26 | {{ if need_new_kbranch == "y" and qemuarch == "x86_64": }} | ||
27 | {{ input type:"choicelist" name:"new_kbranch" nameappend:"x86_64" gen:"bsp.kernel.all_branches" branches_base:"standard:standard/common-pc-64" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/common-pc-64/base" }} | ||
28 | |||
29 | {{ if need_new_kbranch == "n" and qemuarch == "x86_64": }} | ||
30 | {{ input type:"choicelist" name:"existing_kbranch" nameappend:"x86_64" gen:"bsp.kernel.all_branches" branches_base:"standard:standard/common-pc-64" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/common-pc-64/base" }} | ||
31 | |||
32 | {{ if need_new_kbranch == "y" and qemuarch == "mips": }} | ||
33 | {{ input type:"choicelist" name:"new_kbranch" nameappend:"mips" gen:"bsp.kernel.all_branches" branches_base:"standard" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/base" }} | ||
34 | |||
35 | {{ if need_new_kbranch == "n" and qemuarch == "mips": }} | ||
36 | {{ input type:"choicelist" name:"existing_kbranch" nameappend:"mips" gen:"bsp.kernel.all_branches" branches_base:"standard" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/mti-malta32" }} | ||
37 | |||
38 | {{ if need_new_kbranch == "n": }} | ||
39 | KBRANCH_{{=machine}} = "{{=existing_kbranch}}" | ||
40 | |||
41 | {{ input type:"boolean" name:"smp" prio:"30" msg:"Would you like SMP support? (y/n)" default:"y"}} | ||
42 | {{ if smp == "y": }} | ||
43 | KERNEL_FEATURES_append_{{=machine}} += " cfg/smp.scc" | ||
44 | |||
45 | SRC_URI += "file://{{=machine}}-standard.scc \ | ||
46 | file://{{=machine}}-user-config.cfg \ | ||
47 | file://{{=machine}}-user-patches.scc \ | ||
48 | file://{{=machine}}-user-features.scc \ | ||
49 | " | ||
50 | |||
51 | # uncomment and replace these SRCREVs with the real commit ids once you've had | ||
52 | # the appropriate changes committed to the upstream linux-yocto repo | ||
53 | #SRCREV_machine_pn-linux-yocto_{{=machine}} ?= "b170394a475b96ecc92cbc9e4b002bed0a9f69c5" | ||
54 | #SRCREV_meta_pn-linux-yocto_{{=machine}} ?= "c2ed0f16fdec628242a682897d5d86df4547cf24" | ||
55 | #LINUX_VERSION = "3.10.35" | ||
diff --git a/scripts/lib/bsp/substrate/target/arch/qemu/recipes-kernel/linux/{{ if kernel_choice == "linux-yocto_3.14": }} linux-yocto_3.14.bbappend b/scripts/lib/bsp/substrate/target/arch/qemu/recipes-kernel/linux/{{ if kernel_choice == "linux-yocto_3.14": }} linux-yocto_3.14.bbappend new file mode 100644 index 0000000000..2cc9b87cf2 --- /dev/null +++ b/scripts/lib/bsp/substrate/target/arch/qemu/recipes-kernel/linux/{{ if kernel_choice == "linux-yocto_3.14": }} linux-yocto_3.14.bbappend | |||
@@ -0,0 +1,55 @@ | |||
1 | FILESEXTRAPATHS_prepend := "${THISDIR}/files:" | ||
2 | |||
3 | PR := "${PR}.1" | ||
4 | |||
5 | COMPATIBLE_MACHINE_{{=machine}} = "{{=machine}}" | ||
6 | {{ input type:"boolean" name:"need_new_kbranch" prio:"20" msg:"Do you need a new machine branch for this BSP (the alternative is to re-use an existing branch)? [y/n]" default:"y" }} | ||
7 | |||
8 | {{ if need_new_kbranch == "y" and qemuarch == "arm": }} | ||
9 | {{ input type:"choicelist" name:"new_kbranch" gen:"bsp.kernel.all_branches" branches_base:"standard" prio:"20" msg:"Please choose a machine branch to base your new BSP branch on:" default:"standard/base" }} | ||
10 | |||
11 | {{ if need_new_kbranch == "n" and qemuarch == "arm": }} | ||
12 | {{ input type:"choicelist" name:"existing_kbranch" gen:"bsp.kernel.all_branches" branches_base:"standard" prio:"20" msg:"Please choose an existing machine branch to use for this BSP:" default:"standard/arm-versatile-926ejs" }} | ||
13 | |||
14 | {{ if need_new_kbranch == "y" and qemuarch == "powerpc": }} | ||
15 | {{ input type:"choicelist" name:"new_kbranch" nameappend:"powerpc" gen:"bsp.kernel.all_branches" branches_base:"standard" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/base" }} | ||
16 | |||
17 | {{ if need_new_kbranch == "n" and qemuarch == "powerpc": }} | ||
18 | {{ input type:"choicelist" name:"existing_kbranch" nameappend:"powerpc" gen:"bsp.kernel.all_branches" branches_base:"standard" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/qemuppc" }} | ||
19 | |||
20 | {{ if need_new_kbranch == "y" and qemuarch == "i386": }} | ||
21 | {{ input type:"choicelist" name:"new_kbranch" nameappend:"i386" gen:"bsp.kernel.all_branches" branches_base:"standard:standard/common-pc" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/common-pc/base" }} | ||
22 | |||
23 | {{ if need_new_kbranch == "n" and qemuarch == "i386": }} | ||
24 | {{ input type:"choicelist" name:"existing_kbranch" nameappend:"i386" gen:"bsp.kernel.all_branches" branches_base:"standard:standard/common-pc" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/common-pc/base" }} | ||
25 | |||
26 | {{ if need_new_kbranch == "y" and qemuarch == "x86_64": }} | ||
27 | {{ input type:"choicelist" name:"new_kbranch" nameappend:"x86_64" gen:"bsp.kernel.all_branches" branches_base:"standard:standard/common-pc-64" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/common-pc-64/base" }} | ||
28 | |||
29 | {{ if need_new_kbranch == "n" and qemuarch == "x86_64": }} | ||
30 | {{ input type:"choicelist" name:"existing_kbranch" nameappend:"x86_64" gen:"bsp.kernel.all_branches" branches_base:"standard:standard/common-pc-64" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/common-pc-64/base" }} | ||
31 | |||
32 | {{ if need_new_kbranch == "y" and qemuarch == "mips": }} | ||
33 | {{ input type:"choicelist" name:"new_kbranch" nameappend:"mips" gen:"bsp.kernel.all_branches" branches_base:"standard" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/base" }} | ||
34 | |||
35 | {{ if need_new_kbranch == "n" and qemuarch == "mips": }} | ||
36 | {{ input type:"choicelist" name:"existing_kbranch" nameappend:"mips" gen:"bsp.kernel.all_branches" branches_base:"standard" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/mti-malta32" }} | ||
37 | |||
38 | {{ if need_new_kbranch == "n": }} | ||
39 | KBRANCH_{{=machine}} = "{{=existing_kbranch}}" | ||
40 | |||
41 | {{ input type:"boolean" name:"smp" prio:"30" msg:"Would you like SMP support? (y/n)" default:"y"}} | ||
42 | {{ if smp == "y": }} | ||
43 | KERNEL_FEATURES_append_{{=machine}} += " cfg/smp.scc" | ||
44 | |||
45 | SRC_URI += "file://{{=machine}}-standard.scc \ | ||
46 | file://{{=machine}}-user-config.cfg \ | ||
47 | file://{{=machine}}-user-patches.scc \ | ||
48 | file://{{=machine}}-user-features.scc \ | ||
49 | " | ||
50 | |||
51 | # uncomment and replace these SRCREVs with the real commit ids once you've had | ||
52 | # the appropriate changes committed to the upstream linux-yocto repo | ||
53 | #SRCREV_machine_pn-linux-yocto_{{=machine}} ?= "0143c6ebb4a2d63b241df5f608b19f483f7eb9e0" | ||
54 | #SRCREV_meta_pn-linux-yocto_{{=machine}} ?= "8f55bee2403176a50cc0dd41811aa60fcf07243c" | ||
55 | #LINUX_VERSION = "3.14" | ||
diff --git a/scripts/lib/bsp/substrate/target/arch/x86_64/.gitignore b/scripts/lib/bsp/substrate/target/arch/x86_64/.gitignore new file mode 100644 index 0000000000..e69de29bb2 --- /dev/null +++ b/scripts/lib/bsp/substrate/target/arch/x86_64/.gitignore | |||
diff --git a/scripts/lib/bsp/substrate/target/arch/x86_64/conf/machine/{{=machine}}.conf b/scripts/lib/bsp/substrate/target/arch/x86_64/conf/machine/{{=machine}}.conf new file mode 100644 index 0000000000..53e8e92e6d --- /dev/null +++ b/scripts/lib/bsp/substrate/target/arch/x86_64/conf/machine/{{=machine}}.conf | |||
@@ -0,0 +1,58 @@ | |||
1 | #@TYPE: Machine | ||
2 | #@NAME: {{=machine}} | ||
3 | |||
4 | #@DESCRIPTION: Machine configuration for {{=machine}} systems | ||
5 | |||
6 | {{ if kernel_choice == "custom": preferred_kernel = "linux-yocto-custom" }} | ||
7 | {{ if kernel_choice == "linux-yocto-dev": preferred_kernel = "linux-yocto-dev" }} | ||
8 | {{ if kernel_choice == "custom" or kernel_choice == "linux-yocto-dev" : }} | ||
9 | PREFERRED_PROVIDER_virtual/kernel ?= "{{=preferred_kernel}}" | ||
10 | |||
11 | {{ if kernel_choice != "custom" and kernel_choice != "linux-yocto-dev": preferred_kernel = kernel_choice.split('_')[0] }} | ||
12 | {{ if kernel_choice != "custom" and kernel_choice != "linux-yocto-dev": preferred_kernel_version = kernel_choice.split('_')[1] }} | ||
13 | {{ if kernel_choice != "custom" and kernel_choice != "linux-yocto-dev": }} | ||
14 | PREFERRED_PROVIDER_virtual/kernel ?= "{{=preferred_kernel}}" | ||
15 | PREFERRED_VERSION_{{=preferred_kernel}} ?= "{{=preferred_kernel_version}}%" | ||
16 | |||
17 | {{ input type:"choicelist" name:"tunefile" prio:"40" msg:"Which machine tuning would you like to use?" default:"tune_core2" }} | ||
18 | {{ input type:"choice" val:"tune_core2" msg:"Core2 tuning optimizations" }} | ||
19 | {{ input type:"choice" val:"tune_corei7" msg:"Corei7 tuning optimizations" }} | ||
20 | {{ if tunefile == "tune_core2": }} | ||
21 | DEFAULTTUNE ?= "core2-64" | ||
22 | require conf/machine/include/tune-core2.inc | ||
23 | {{ if tunefile == "tune_corei7": }} | ||
24 | DEFAULTTUNE ?= "corei7-64" | ||
25 | require conf/machine/include/tune-corei7.inc | ||
26 | |||
27 | require conf/machine/include/x86-base.inc | ||
28 | |||
29 | MACHINE_FEATURES += "wifi efi pcbios" | ||
30 | |||
31 | {{ input type:"boolean" name:"xserver" prio:"50" msg:"Do you need support for X? (y/n)" default:"y" }} | ||
32 | |||
33 | {{ if xserver == "y": }} | ||
34 | {{ input type:"choicelist" name:"xserver_choice" prio:"50" msg:"Please select an xserver for this machine:" default:"xserver_i915" }} | ||
35 | |||
36 | {{ input type:"choice" val:"xserver_vesa" msg:"VESA xserver support" }} | ||
37 | {{ input type:"choice" val:"xserver_i915" msg:"i915 xserver support" }} | ||
38 | {{ input type:"choice" val:"xserver_i965" msg:"i965 xserver support" }} | ||
39 | {{ if xserver == "y": }} | ||
40 | XSERVER ?= "${XSERVER_X86_BASE} \ | ||
41 | ${XSERVER_X86_EXT} \ | ||
42 | {{ if xserver == "y" and xserver_choice == "xserver_vesa": }} | ||
43 | ${XSERVER_X86_VESA} \ | ||
44 | {{ if xserver == "y" and xserver_choice == "xserver_i915": }} | ||
45 | ${XSERVER_X86_I915} \ | ||
46 | {{ if xserver == "y" and xserver_choice == "xserver_i965": }} | ||
47 | ${XSERVER_X86_I965} \ | ||
48 | {{ if xserver == "y": }} | ||
49 | " | ||
50 | |||
51 | MACHINE_EXTRA_RRECOMMENDS += "linux-firmware v86d" | ||
52 | |||
53 | GLIBC_ADDONS = "nptl" | ||
54 | |||
55 | EXTRA_OECONF_append_pn-matchbox-panel-2 = " --with-battery=acpi" | ||
56 | |||
57 | {{ if xserver == "y" and xserver_choice == "xserver_vesa": }} | ||
58 | APPEND += "video=vesafb vga=0x318" | ||
diff --git a/scripts/lib/bsp/substrate/target/arch/x86_64/recipes-graphics/xorg-xserver/xserver-xf86-config/{{=machine}}/{{ if xserver == "y": }} xorg.conf b/scripts/lib/bsp/substrate/target/arch/x86_64/recipes-graphics/xorg-xserver/xserver-xf86-config/{{=machine}}/{{ if xserver == "y": }} xorg.conf new file mode 100644 index 0000000000..e69de29bb2 --- /dev/null +++ b/scripts/lib/bsp/substrate/target/arch/x86_64/recipes-graphics/xorg-xserver/xserver-xf86-config/{{=machine}}/{{ if xserver == "y": }} xorg.conf | |||
diff --git a/scripts/lib/bsp/substrate/target/arch/x86_64/recipes-graphics/xorg-xserver/{{ if xserver == "y": }} xserver-xf86-config_0.1.bbappend b/scripts/lib/bsp/substrate/target/arch/x86_64/recipes-graphics/xorg-xserver/{{ if xserver == "y": }} xserver-xf86-config_0.1.bbappend new file mode 100644 index 0000000000..72d991c7e5 --- /dev/null +++ b/scripts/lib/bsp/substrate/target/arch/x86_64/recipes-graphics/xorg-xserver/{{ if xserver == "y": }} xserver-xf86-config_0.1.bbappend | |||
@@ -0,0 +1 @@ | |||
FILESEXTRAPATHS_prepend := "${THISDIR}/${PN}:" | |||
diff --git a/scripts/lib/bsp/substrate/target/arch/x86_64/recipes-kernel/linux/kernel-list.noinstall b/scripts/lib/bsp/substrate/target/arch/x86_64/recipes-kernel/linux/kernel-list.noinstall new file mode 100644 index 0000000000..a04e6c7852 --- /dev/null +++ b/scripts/lib/bsp/substrate/target/arch/x86_64/recipes-kernel/linux/kernel-list.noinstall | |||
@@ -0,0 +1,5 @@ | |||
1 | {{ if kernel_choice != "custom": }} | ||
2 | {{ input type:"boolean" name:"use_default_kernel" prio:"10" msg:"Would you like to use the default (3.14) kernel? (y/n)" default:"y"}} | ||
3 | |||
4 | {{ if kernel_choice != "custom" and use_default_kernel == "n": }} | ||
5 | {{ input type:"choicelist" name:"kernel_choice" gen:"bsp.kernel.kernels" prio:"10" msg:"Please choose the kernel to use in this BSP:" default:"linux-yocto_3.14"}} | ||
diff --git a/scripts/lib/bsp/substrate/target/arch/x86_64/recipes-kernel/linux/{{ if kernel_choice != "custom": }} files/{{=machine}}-preempt-rt.scc b/scripts/lib/bsp/substrate/target/arch/x86_64/recipes-kernel/linux/{{ if kernel_choice != "custom": }} files/{{=machine}}-preempt-rt.scc new file mode 100644 index 0000000000..c9882590a8 --- /dev/null +++ b/scripts/lib/bsp/substrate/target/arch/x86_64/recipes-kernel/linux/{{ if kernel_choice != "custom": }} files/{{=machine}}-preempt-rt.scc | |||
@@ -0,0 +1,15 @@ | |||
1 | define KMACHINE {{=machine}} | ||
2 | define KTYPE preempt-rt | ||
3 | define KARCH x86_64 | ||
4 | |||
5 | include {{=map_preempt_rt_kbranch(need_new_kbranch, new_kbranch, existing_kbranch)}} | ||
6 | {{ if need_new_kbranch == "y": }} | ||
7 | branch {{=machine}} | ||
8 | |||
9 | include {{=machine}}.scc | ||
10 | |||
11 | # default policy for preempt-rt kernels | ||
12 | include cfg/usb-mass-storage.scc | ||
13 | include cfg/boot-live.scc | ||
14 | include features/latencytop/latencytop.scc | ||
15 | include features/profiling/profiling.scc | ||
diff --git a/scripts/lib/bsp/substrate/target/arch/x86_64/recipes-kernel/linux/{{ if kernel_choice != "custom": }} files/{{=machine}}-standard.scc b/scripts/lib/bsp/substrate/target/arch/x86_64/recipes-kernel/linux/{{ if kernel_choice != "custom": }} files/{{=machine}}-standard.scc new file mode 100644 index 0000000000..e500bad4b2 --- /dev/null +++ b/scripts/lib/bsp/substrate/target/arch/x86_64/recipes-kernel/linux/{{ if kernel_choice != "custom": }} files/{{=machine}}-standard.scc | |||
@@ -0,0 +1,15 @@ | |||
1 | define KMACHINE {{=machine}} | ||
2 | define KTYPE standard | ||
3 | define KARCH x86_64 | ||
4 | |||
5 | include {{=map_standard_kbranch(need_new_kbranch, new_kbranch, existing_kbranch)}} | ||
6 | {{ if need_new_kbranch == "y": }} | ||
7 | branch {{=machine}} | ||
8 | |||
9 | include {{=machine}}.scc | ||
10 | |||
11 | # default policy for standard kernels | ||
12 | include cfg/usb-mass-storage.scc | ||
13 | include cfg/boot-live.scc | ||
14 | include features/latencytop/latencytop.scc | ||
15 | include features/profiling/profiling.scc | ||
diff --git a/scripts/lib/bsp/substrate/target/arch/x86_64/recipes-kernel/linux/{{ if kernel_choice != "custom": }} files/{{=machine}}-tiny.scc b/scripts/lib/bsp/substrate/target/arch/x86_64/recipes-kernel/linux/{{ if kernel_choice != "custom": }} files/{{=machine}}-tiny.scc new file mode 100644 index 0000000000..e8e3c1c04d --- /dev/null +++ b/scripts/lib/bsp/substrate/target/arch/x86_64/recipes-kernel/linux/{{ if kernel_choice != "custom": }} files/{{=machine}}-tiny.scc | |||
@@ -0,0 +1,9 @@ | |||
1 | define KMACHINE {{=machine}} | ||
2 | define KTYPE tiny | ||
3 | define KARCH x86_64 | ||
4 | |||
5 | include {{=map_tiny_kbranch(need_new_kbranch, new_kbranch, existing_kbranch)}} | ||
6 | {{ if need_new_kbranch == "y": }} | ||
7 | branch {{=machine}} | ||
8 | |||
9 | include {{=machine}}.scc | ||
diff --git a/scripts/lib/bsp/substrate/target/arch/x86_64/recipes-kernel/linux/{{ if kernel_choice != "custom": }} files/{{=machine}}-user-config.cfg b/scripts/lib/bsp/substrate/target/arch/x86_64/recipes-kernel/linux/{{ if kernel_choice != "custom": }} files/{{=machine}}-user-config.cfg new file mode 100644 index 0000000000..e69de29bb2 --- /dev/null +++ b/scripts/lib/bsp/substrate/target/arch/x86_64/recipes-kernel/linux/{{ if kernel_choice != "custom": }} files/{{=machine}}-user-config.cfg | |||
diff --git a/scripts/lib/bsp/substrate/target/arch/x86_64/recipes-kernel/linux/{{ if kernel_choice != "custom": }} files/{{=machine}}-user-features.scc b/scripts/lib/bsp/substrate/target/arch/x86_64/recipes-kernel/linux/{{ if kernel_choice != "custom": }} files/{{=machine}}-user-features.scc new file mode 100644 index 0000000000..e69de29bb2 --- /dev/null +++ b/scripts/lib/bsp/substrate/target/arch/x86_64/recipes-kernel/linux/{{ if kernel_choice != "custom": }} files/{{=machine}}-user-features.scc | |||
diff --git a/scripts/lib/bsp/substrate/target/arch/x86_64/recipes-kernel/linux/{{ if kernel_choice != "custom": }} files/{{=machine}}-user-patches.scc b/scripts/lib/bsp/substrate/target/arch/x86_64/recipes-kernel/linux/{{ if kernel_choice != "custom": }} files/{{=machine}}-user-patches.scc new file mode 100644 index 0000000000..e69de29bb2 --- /dev/null +++ b/scripts/lib/bsp/substrate/target/arch/x86_64/recipes-kernel/linux/{{ if kernel_choice != "custom": }} files/{{=machine}}-user-patches.scc | |||
diff --git a/scripts/lib/bsp/substrate/target/arch/x86_64/recipes-kernel/linux/{{ if kernel_choice != "custom": }} files/{{=machine}}.cfg b/scripts/lib/bsp/substrate/target/arch/x86_64/recipes-kernel/linux/{{ if kernel_choice != "custom": }} files/{{=machine}}.cfg new file mode 100644 index 0000000000..b4b82d7ca0 --- /dev/null +++ b/scripts/lib/bsp/substrate/target/arch/x86_64/recipes-kernel/linux/{{ if kernel_choice != "custom": }} files/{{=machine}}.cfg | |||
@@ -0,0 +1,47 @@ | |||
1 | CONFIG_PRINTK=y | ||
2 | |||
3 | # Basic hardware support for the box - network, USB, PCI, sound | ||
4 | CONFIG_NETDEVICES=y | ||
5 | CONFIG_ATA=y | ||
6 | CONFIG_ATA_GENERIC=y | ||
7 | CONFIG_ATA_SFF=y | ||
8 | CONFIG_PCI=y | ||
9 | CONFIG_MMC=y | ||
10 | CONFIG_MMC_SDHCI=y | ||
11 | CONFIG_USB_SUPPORT=y | ||
12 | CONFIG_USB=y | ||
13 | CONFIG_USB_ARCH_HAS_EHCI=y | ||
14 | CONFIG_R8169=y | ||
15 | CONFIG_PATA_SCH=y | ||
16 | CONFIG_MMC_SDHCI_PCI=y | ||
17 | CONFIG_USB_EHCI_HCD=y | ||
18 | CONFIG_PCIEPORTBUS=y | ||
19 | CONFIG_NET=y | ||
20 | CONFIG_USB_UHCI_HCD=y | ||
21 | CONFIG_BLK_DEV_SD=y | ||
22 | CONFIG_CHR_DEV_SG=y | ||
23 | CONFIG_SOUND=y | ||
24 | CONFIG_SND=y | ||
25 | CONFIG_SND_HDA_INTEL=y | ||
26 | |||
27 | # Make sure these are on, otherwise the bootup won't be fun | ||
28 | CONFIG_EXT3_FS=y | ||
29 | CONFIG_UNIX=y | ||
30 | CONFIG_INET=y | ||
31 | CONFIG_MODULES=y | ||
32 | CONFIG_SHMEM=y | ||
33 | CONFIG_TMPFS=y | ||
34 | CONFIG_PACKET=y | ||
35 | |||
36 | CONFIG_I2C=y | ||
37 | CONFIG_AGP=y | ||
38 | CONFIG_PM=y | ||
39 | CONFIG_ACPI=y | ||
40 | CONFIG_INPUT=y | ||
41 | |||
42 | # Needed for booting (and using) USB memory sticks | ||
43 | CONFIG_BLK_DEV_LOOP=y | ||
44 | CONFIG_NLS_CODEPAGE_437=y | ||
45 | CONFIG_NLS_ISO8859_1=y | ||
46 | |||
47 | CONFIG_RD_GZIP=y | ||
diff --git a/scripts/lib/bsp/substrate/target/arch/x86_64/recipes-kernel/linux/{{ if kernel_choice != "custom": }} files/{{=machine}}.scc b/scripts/lib/bsp/substrate/target/arch/x86_64/recipes-kernel/linux/{{ if kernel_choice != "custom": }} files/{{=machine}}.scc new file mode 100644 index 0000000000..db45140381 --- /dev/null +++ b/scripts/lib/bsp/substrate/target/arch/x86_64/recipes-kernel/linux/{{ if kernel_choice != "custom": }} files/{{=machine}}.scc | |||
@@ -0,0 +1,13 @@ | |||
1 | kconf hardware {{=machine}}.cfg | ||
2 | |||
3 | include features/serial/8250.scc | ||
4 | {{ if xserver == "y" and xserver_choice == "xserver_vesa": }} | ||
5 | include cfg/vesafb.scc | ||
6 | {{ if xserver == "y" and xserver_choice == "xserver_i915" or xserver_choice == "xserver_i965": }} | ||
7 | include features/i915/i915.scc | ||
8 | |||
9 | include cfg/usb-mass-storage.scc | ||
10 | include features/power/intel.scc | ||
11 | |||
12 | kconf hardware {{=machine}}-user-config.cfg | ||
13 | include {{=machine}}-user-patches.scc | ||
diff --git a/scripts/lib/bsp/substrate/target/arch/x86_64/recipes-kernel/linux/{{ if kernel_choice == "linux-yocto-dev": }} linux-yocto-dev.bbappend b/scripts/lib/bsp/substrate/target/arch/x86_64/recipes-kernel/linux/{{ if kernel_choice == "linux-yocto-dev": }} linux-yocto-dev.bbappend new file mode 100644 index 0000000000..25c87a85ac --- /dev/null +++ b/scripts/lib/bsp/substrate/target/arch/x86_64/recipes-kernel/linux/{{ if kernel_choice == "linux-yocto-dev": }} linux-yocto-dev.bbappend | |||
@@ -0,0 +1,25 @@ | |||
1 | FILESEXTRAPATHS_prepend := "${THISDIR}/files:" | ||
2 | |||
3 | PR := "${PR}.1" | ||
4 | |||
5 | COMPATIBLE_MACHINE_{{=machine}} = "{{=machine}}" | ||
6 | {{ input type:"boolean" name:"need_new_kbranch" prio:"20" msg:"Do you need a new machine branch for this BSP (the alternative is to re-use an existing branch)? [y/n]" default:"y" }} | ||
7 | |||
8 | {{ if need_new_kbranch == "y": }} | ||
9 | {{ input type:"choicelist" name:"new_kbranch" nameappend:"i386" gen:"bsp.kernel.all_branches" branches_base:"standard" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/base" }} | ||
10 | |||
11 | {{ if need_new_kbranch == "n": }} | ||
12 | {{ input type:"choicelist" name:"existing_kbranch" nameappend:"i386" gen:"bsp.kernel.all_branches" branches_base:"standard" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/base" }} | ||
13 | |||
14 | {{ if need_new_kbranch == "n": }} | ||
15 | KBRANCH_{{=machine}} = "{{=existing_kbranch}}" | ||
16 | |||
17 | {{ input type:"boolean" name:"smp" prio:"30" msg:"Would you like SMP support? (y/n)" default:"y"}} | ||
18 | {{ if smp == "y": }} | ||
19 | KERNEL_FEATURES_append_{{=machine}} += " cfg/smp.scc" | ||
20 | |||
21 | SRC_URI += "file://{{=machine}}-standard.scc \ | ||
22 | file://{{=machine}}-user-config.cfg \ | ||
23 | file://{{=machine}}-user-patches.scc \ | ||
24 | file://{{=machine}}-user-features.scc \ | ||
25 | " | ||
diff --git a/scripts/lib/bsp/substrate/target/arch/x86_64/recipes-kernel/linux/{{ if kernel_choice == "linux-yocto-rt_3.10": }} linux-yocto-rt_3.10.bbappend b/scripts/lib/bsp/substrate/target/arch/x86_64/recipes-kernel/linux/{{ if kernel_choice == "linux-yocto-rt_3.10": }} linux-yocto-rt_3.10.bbappend new file mode 100644 index 0000000000..00c8c68933 --- /dev/null +++ b/scripts/lib/bsp/substrate/target/arch/x86_64/recipes-kernel/linux/{{ if kernel_choice == "linux-yocto-rt_3.10": }} linux-yocto-rt_3.10.bbappend | |||
@@ -0,0 +1,32 @@ | |||
1 | FILESEXTRAPATHS_prepend := "${THISDIR}/files:" | ||
2 | |||
3 | PR := "${PR}.1" | ||
4 | |||
5 | COMPATIBLE_MACHINE_{{=machine}} = "{{=machine}}" | ||
6 | |||
7 | {{ input type:"boolean" name:"need_new_kbranch" prio:"20" msg:"Do you need a new machine branch for this BSP (the alternative is to re-use an existing branch)? [y/n]" default:"y" }} | ||
8 | |||
9 | {{ if need_new_kbranch == "y": }} | ||
10 | {{ input type:"choicelist" name:"new_kbranch" gen:"bsp.kernel.all_branches" branches_base:"standard/preempt-rt" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/preempt-rt/base" }} | ||
11 | |||
12 | {{ if need_new_kbranch == "n": }} | ||
13 | {{ input type:"choicelist" name:"existing_kbranch" gen:"bsp.kernel.all_branches" branches_base:"standard/preempt-rt" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/preempt-rt/base" }} | ||
14 | |||
15 | {{ if need_new_kbranch == "n": }} | ||
16 | KBRANCH_{{=machine}} = "{{=existing_kbranch}}" | ||
17 | |||
18 | {{ input type:"boolean" name:"smp" prio:"30" msg:"Do you need SMP support? (y/n)" default:"y"}} | ||
19 | {{ if smp == "y": }} | ||
20 | KERNEL_FEATURES_append_{{=machine}} += " cfg/smp.scc" | ||
21 | |||
22 | SRC_URI += "file://{{=machine}}-preempt-rt.scc \ | ||
23 | file://{{=machine}}-user-config.cfg \ | ||
24 | file://{{=machine}}-user-patches.scc \ | ||
25 | file://{{=machine}}-user-features.scc \ | ||
26 | " | ||
27 | |||
28 | # uncomment and replace these SRCREVs with the real commit ids once you've had | ||
29 | # the appropriate changes committed to the upstream linux-yocto repo | ||
30 | #SRCREV_machine_pn-linux-yocto-rt_{{=machine}} ?= "f35992f80c81dc5fa1a97165dfd5cbb84661f7cb" | ||
31 | #SRCREV_meta_pn-linux-yocto-rt_{{=machine}} ?= "1b534b2f8bbe9b8a773268cfa30a4850346f6f5f" | ||
32 | #LINUX_VERSION = "3.10.9" \ No newline at end of file | ||
diff --git a/scripts/lib/bsp/substrate/target/arch/x86_64/recipes-kernel/linux/{{ if kernel_choice == "linux-yocto-tiny_3.10": }} linux-yocto-tiny_3.10.bbappend b/scripts/lib/bsp/substrate/target/arch/x86_64/recipes-kernel/linux/{{ if kernel_choice == "linux-yocto-tiny_3.10": }} linux-yocto-tiny_3.10.bbappend new file mode 100644 index 0000000000..bc6968d832 --- /dev/null +++ b/scripts/lib/bsp/substrate/target/arch/x86_64/recipes-kernel/linux/{{ if kernel_choice == "linux-yocto-tiny_3.10": }} linux-yocto-tiny_3.10.bbappend | |||
@@ -0,0 +1,32 @@ | |||
1 | FILESEXTRAPATHS_prepend := "${THISDIR}/files:" | ||
2 | |||
3 | PR := "${PR}.1" | ||
4 | |||
5 | COMPATIBLE_MACHINE_{{=machine}} = "{{=machine}}" | ||
6 | |||
7 | {{ input type:"boolean" name:"need_new_kbranch" prio:"20" msg:"Do you need a new machine branch for this BSP (the alternative is to re-use an existing branch)? [y/n]" default:"y" }} | ||
8 | |||
9 | {{ if need_new_kbranch == "y": }} | ||
10 | {{ input type:"choicelist" name:"new_kbranch" gen:"bsp.kernel.all_branches" branches_base:"standard/tiny" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/tiny/base" }} | ||
11 | |||
12 | {{ if need_new_kbranch == "n": }} | ||
13 | {{ input type:"choicelist" name:"existing_kbranch" gen:"bsp.kernel.all_branches" branches_base:"standard/tiny" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/tiny/base" }} | ||
14 | |||
15 | {{ if need_new_kbranch == "n": }} | ||
16 | KBRANCH_{{=machine}} = "{{=existing_kbranch}}" | ||
17 | |||
18 | {{ input type:"boolean" name:"smp" prio:"30" msg:"Do you need SMP support? (y/n)" default:"y"}} | ||
19 | {{ if smp == "y": }} | ||
20 | KERNEL_FEATURES_append_{{=machine}} += " cfg/smp.scc" | ||
21 | |||
22 | SRC_URI += "file://{{=machine}}-tiny.scc \ | ||
23 | file://{{=machine}}-user-config.cfg \ | ||
24 | file://{{=machine}}-user-patches.scc \ | ||
25 | file://{{=machine}}-user-features.scc \ | ||
26 | " | ||
27 | |||
28 | # uncomment and replace these SRCREVs with the real commit ids once you've had | ||
29 | # the appropriate changes committed to the upstream linux-yocto repo | ||
30 | #SRCREV_machine_pn-linux-yocto-tiny_{{=machine}} ?= "840bb8c059418c4753415df56c9aff1c0d5354c8" | ||
31 | #SRCREV_meta_pn-linux-yocto-tiny_{{=machine}} ?= "4fd76cc4f33e0afd8f906b1e8f231b6d13b6c993" | ||
32 | #LINUX_VERSION = "3.10.9" | ||
diff --git a/scripts/lib/bsp/substrate/target/arch/x86_64/recipes-kernel/linux/{{ if kernel_choice == "linux-yocto-tiny_3.14": }} linux-yocto-tiny_3.14.bbappend b/scripts/lib/bsp/substrate/target/arch/x86_64/recipes-kernel/linux/{{ if kernel_choice == "linux-yocto-tiny_3.14": }} linux-yocto-tiny_3.14.bbappend new file mode 100644 index 0000000000..d221d5f2a4 --- /dev/null +++ b/scripts/lib/bsp/substrate/target/arch/x86_64/recipes-kernel/linux/{{ if kernel_choice == "linux-yocto-tiny_3.14": }} linux-yocto-tiny_3.14.bbappend | |||
@@ -0,0 +1,32 @@ | |||
1 | FILESEXTRAPATHS_prepend := "${THISDIR}/files:" | ||
2 | |||
3 | PR := "${PR}.1" | ||
4 | |||
5 | COMPATIBLE_MACHINE_{{=machine}} = "{{=machine}}" | ||
6 | |||
7 | {{ input type:"boolean" name:"need_new_kbranch" prio:"20" msg:"Do you need a new machine branch for this BSP (the alternative is to re-use an existing branch)? [y/n]" default:"y" }} | ||
8 | |||
9 | {{ if need_new_kbranch == "y": }} | ||
10 | {{ input type:"choicelist" name:"new_kbranch" gen:"bsp.kernel.all_branches" branches_base:"standard/tiny" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/tiny/base" }} | ||
11 | |||
12 | {{ if need_new_kbranch == "n": }} | ||
13 | {{ input type:"choicelist" name:"existing_kbranch" gen:"bsp.kernel.all_branches" branches_base:"standard/tiny" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/tiny/base" }} | ||
14 | |||
15 | {{ if need_new_kbranch == "n": }} | ||
16 | KBRANCH_{{=machine}} = "{{=existing_kbranch}}" | ||
17 | |||
18 | {{ input type:"boolean" name:"smp" prio:"30" msg:"Do you need SMP support? (y/n)" default:"y"}} | ||
19 | {{ if smp == "y": }} | ||
20 | KERNEL_FEATURES_append_{{=machine}} += " cfg/smp.scc" | ||
21 | |||
22 | SRC_URI += "file://{{=machine}}-tiny.scc \ | ||
23 | file://{{=machine}}-user-config.cfg \ | ||
24 | file://{{=machine}}-user-patches.scc \ | ||
25 | file://{{=machine}}-user-features.scc \ | ||
26 | " | ||
27 | |||
28 | # uncomment and replace these SRCREVs with the real commit ids once you've had | ||
29 | # the appropriate changes committed to the upstream linux-yocto repo | ||
30 | #SRCREV_machine_pn-linux-yocto-tiny_{{=machine}} ?= "840bb8c059418c4753415df56c9aff1c0d5354c8" | ||
31 | #SRCREV_meta_pn-linux-yocto-tiny_{{=machine}} ?= "4fd76cc4f33e0afd8f906b1e8f231b6d13b6c993" | ||
32 | #LINUX_VERSION = "3.14" | ||
diff --git a/scripts/lib/bsp/substrate/target/arch/x86_64/recipes-kernel/linux/{{ if kernel_choice == "linux-yocto_3.10": }} linux-yocto_3.10.bbappend b/scripts/lib/bsp/substrate/target/arch/x86_64/recipes-kernel/linux/{{ if kernel_choice == "linux-yocto_3.10": }} linux-yocto_3.10.bbappend new file mode 100644 index 0000000000..162348114f --- /dev/null +++ b/scripts/lib/bsp/substrate/target/arch/x86_64/recipes-kernel/linux/{{ if kernel_choice == "linux-yocto_3.10": }} linux-yocto_3.10.bbappend | |||
@@ -0,0 +1,32 @@ | |||
1 | FILESEXTRAPATHS_prepend := "${THISDIR}/files:" | ||
2 | |||
3 | PR := "${PR}.1" | ||
4 | |||
5 | COMPATIBLE_MACHINE_{{=machine}} = "{{=machine}}" | ||
6 | |||
7 | {{ input type:"boolean" name:"need_new_kbranch" prio:"20" msg:"Do you need a new machine branch for this BSP (the alternative is to re-use an existing branch)? [y/n]" default:"y" }} | ||
8 | |||
9 | {{ if need_new_kbranch == "y": }} | ||
10 | {{ input type:"choicelist" name:"new_kbranch" gen:"bsp.kernel.all_branches" branches_base:"standard:standard/common-pc-64" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/common-pc-64/base" }} | ||
11 | |||
12 | {{ if need_new_kbranch == "n": }} | ||
13 | {{ input type:"choicelist" name:"existing_kbranch" gen:"bsp.kernel.all_branches" branches_base:"standard:standard/common-pc-64" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/common-pc-64/base" }} | ||
14 | |||
15 | {{ if need_new_kbranch == "n": }} | ||
16 | KBRANCH_{{=machine}} = "{{=existing_kbranch}}" | ||
17 | |||
18 | {{ input type:"boolean" name:"smp" prio:"30" msg:"Do you need SMP support? (y/n)" default:"y"}} | ||
19 | {{ if smp == "y": }} | ||
20 | KERNEL_FEATURES_append_{{=machine}} += " cfg/smp.scc" | ||
21 | |||
22 | SRC_URI += "file://{{=machine}}-standard.scc \ | ||
23 | file://{{=machine}}-user-config.cfg \ | ||
24 | file://{{=machine}}-user-patches.scc \ | ||
25 | file://{{=machine}}-user-features.scc \ | ||
26 | " | ||
27 | |||
28 | # uncomment and replace these SRCREVs with the real commit ids once you've had | ||
29 | # the appropriate changes committed to the upstream linux-yocto repo | ||
30 | #SRCREV_machine_pn-linux-yocto_{{=machine}} ?= "b170394a475b96ecc92cbc9e4b002bed0a9f69c5" | ||
31 | #SRCREV_meta_pn-linux-yocto_{{=machine}} ?= "c2ed0f16fdec628242a682897d5d86df4547cf24" | ||
32 | #LINUX_VERSION = "3.10.9" \ No newline at end of file | ||
diff --git a/scripts/lib/bsp/substrate/target/arch/x86_64/recipes-kernel/linux/{{ if kernel_choice == "linux-yocto_3.14": }} linux-yocto_3.14.bbappend b/scripts/lib/bsp/substrate/target/arch/x86_64/recipes-kernel/linux/{{ if kernel_choice == "linux-yocto_3.14": }} linux-yocto_3.14.bbappend new file mode 100644 index 0000000000..81e528bc33 --- /dev/null +++ b/scripts/lib/bsp/substrate/target/arch/x86_64/recipes-kernel/linux/{{ if kernel_choice == "linux-yocto_3.14": }} linux-yocto_3.14.bbappend | |||
@@ -0,0 +1,32 @@ | |||
1 | FILESEXTRAPATHS_prepend := "${THISDIR}/files:" | ||
2 | |||
3 | PR := "${PR}.1" | ||
4 | |||
5 | COMPATIBLE_MACHINE_{{=machine}} = "{{=machine}}" | ||
6 | |||
7 | {{ input type:"boolean" name:"need_new_kbranch" prio:"20" msg:"Do you need a new machine branch for this BSP (the alternative is to re-use an existing branch)? [y/n]" default:"y" }} | ||
8 | |||
9 | {{ if need_new_kbranch == "y": }} | ||
10 | {{ input type:"choicelist" name:"new_kbranch" gen:"bsp.kernel.all_branches" branches_base:"standard:standard/common-pc-64" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/common-pc-64/base" }} | ||
11 | |||
12 | {{ if need_new_kbranch == "n": }} | ||
13 | {{ input type:"choicelist" name:"existing_kbranch" gen:"bsp.kernel.all_branches" branches_base:"standard:standard/common-pc-64" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/common-pc-64/base" }} | ||
14 | |||
15 | {{ if need_new_kbranch == "n": }} | ||
16 | KBRANCH_{{=machine}} = "{{=existing_kbranch}}" | ||
17 | |||
18 | {{ input type:"boolean" name:"smp" prio:"30" msg:"Do you need SMP support? (y/n)" default:"y"}} | ||
19 | {{ if smp == "y": }} | ||
20 | KERNEL_FEATURES_append_{{=machine}} += " cfg/smp.scc" | ||
21 | |||
22 | SRC_URI += "file://{{=machine}}-standard.scc \ | ||
23 | file://{{=machine}}-user-config.cfg \ | ||
24 | file://{{=machine}}-user-patches.scc \ | ||
25 | file://{{=machine}}-user-features.scc \ | ||
26 | " | ||
27 | |||
28 | # uncomment and replace these SRCREVs with the real commit ids once you've had | ||
29 | # the appropriate changes committed to the upstream linux-yocto repo | ||
30 | #SRCREV_machine_pn-linux-yocto_{{=machine}} ?= "840bb8c059418c4753415df56c9aff1c0d5354c8" | ||
31 | #SRCREV_meta_pn-linux-yocto_{{=machine}} ?= "4fd76cc4f33e0afd8f906b1e8f231b6d13b6c993" | ||
32 | #LINUX_VERSION = "3.14" | ||
diff --git a/scripts/lib/bsp/tags.py b/scripts/lib/bsp/tags.py new file mode 100644 index 0000000000..6d5feb0a59 --- /dev/null +++ b/scripts/lib/bsp/tags.py | |||
@@ -0,0 +1,47 @@ | |||
1 | # ex:ts=4:sw=4:sts=4:et | ||
2 | # -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- | ||
3 | # | ||
4 | # Copyright (c) 2012, Intel Corporation. | ||
5 | # All rights reserved. | ||
6 | # | ||
7 | # This program is free software; you can redistribute it and/or modify | ||
8 | # it under the terms of the GNU General Public License version 2 as | ||
9 | # published by the Free Software Foundation. | ||
10 | # | ||
11 | # This program is distributed in the hope that it will be useful, | ||
12 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | # GNU General Public License for more details. | ||
15 | # | ||
16 | # You should have received a copy of the GNU General Public License along | ||
17 | # with this program; if not, write to the Free Software Foundation, Inc., | ||
18 | # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. | ||
19 | # | ||
20 | # DESCRIPTION | ||
21 | # This module provides a place to define common constants for the | ||
22 | # Yocto BSP Tools. | ||
23 | # | ||
24 | # AUTHORS | ||
25 | # Tom Zanussi <tom.zanussi (at] intel.com> | ||
26 | # | ||
27 | |||
28 | OPEN_TAG = "{{" | ||
29 | CLOSE_TAG = "}}" | ||
30 | ASSIGN_TAG = "{{=" | ||
31 | INPUT_TAG = "input" | ||
32 | IF_TAG = "if" | ||
33 | |||
34 | INDENT_STR = " " | ||
35 | |||
36 | BLANKLINE_STR = "of.write(\"\\n\")" | ||
37 | NORMAL_START = "of.write" | ||
38 | OPEN_START = "current_file =" | ||
39 | |||
40 | INPUT_TYPE_PROPERTY = "type" | ||
41 | |||
42 | SRC_URI_FILE = "file://" | ||
43 | |||
44 | GIT_CHECK_URI = "git://git.yoctoproject.org/linux-yocto-dev.git" | ||
45 | |||
46 | |||
47 | |||
diff --git a/scripts/lib/image/__init__.py b/scripts/lib/image/__init__.py new file mode 100644 index 0000000000..1ff814e761 --- /dev/null +++ b/scripts/lib/image/__init__.py | |||
@@ -0,0 +1,22 @@ | |||
1 | # | ||
2 | # OpenEmbedded Image tools library | ||
3 | # | ||
4 | # Copyright (c) 2013, Intel Corporation. | ||
5 | # All rights reserved. | ||
6 | # | ||
7 | # This program is free software; you can redistribute it and/or modify | ||
8 | # it under the terms of the GNU General Public License version 2 as | ||
9 | # published by the Free Software Foundation. | ||
10 | # | ||
11 | # This program is distributed in the hope that it will be useful, | ||
12 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | # GNU General Public License for more details. | ||
15 | # | ||
16 | # You should have received a copy of the GNU General Public License along | ||
17 | # with this program; if not, write to the Free Software Foundation, Inc., | ||
18 | # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. | ||
19 | # | ||
20 | # AUTHORS | ||
21 | # Tom Zanussi <tom.zanussi (at] linux.intel.com> | ||
22 | # | ||
diff --git a/scripts/lib/image/canned-wks/directdisk.wks b/scripts/lib/image/canned-wks/directdisk.wks new file mode 100644 index 0000000000..397a929c74 --- /dev/null +++ b/scripts/lib/image/canned-wks/directdisk.wks | |||
@@ -0,0 +1,10 @@ | |||
1 | # short-description: Create a 'pcbios' direct disk image | ||
2 | # long-description: Creates a partitioned legacy BIOS disk image that the user | ||
3 | # can directly dd to boot media. | ||
4 | |||
5 | |||
6 | part /boot --source bootimg-pcbios --ondisk sda --fstype=msdos --label boot --active --align 1024 | ||
7 | part / --source rootfs --ondisk sda --fstype=ext3 --label platform --align 1024 | ||
8 | |||
9 | bootloader --timeout=0 --append="rootwait rootfstype=ext3 video=vesafb vga=0x318 console=tty0" | ||
10 | |||
diff --git a/scripts/lib/image/canned-wks/mkefidisk.wks b/scripts/lib/image/canned-wks/mkefidisk.wks new file mode 100644 index 0000000000..e976bc80dd --- /dev/null +++ b/scripts/lib/image/canned-wks/mkefidisk.wks | |||
@@ -0,0 +1,11 @@ | |||
1 | # short-description: Create an EFI disk image | ||
2 | # long-description: Creates a partitioned EFI disk image that the user | ||
3 | # can directly dd to boot media. | ||
4 | |||
5 | part /boot --source bootimg-efi --ondisk sda --fstype=msdos --label msdos --active --align 1024 | ||
6 | |||
7 | part / --source rootfs --ondisk sda --fstype=ext3 --label platform --align 1024 | ||
8 | |||
9 | part swap --ondisk sda --size 44 --label swap1 --fstype=swap | ||
10 | |||
11 | bootloader --timeout=10 --append="rootwait rootfstype=ext3 console=ttyPCH0,115200 console=tty0 vmalloc=256MB snd-hda-intel.enable_msi=0" | ||
diff --git a/scripts/lib/image/canned-wks/uboot.wks b/scripts/lib/image/canned-wks/uboot.wks new file mode 100644 index 0000000000..7de0572d0f --- /dev/null +++ b/scripts/lib/image/canned-wks/uboot.wks | |||
@@ -0,0 +1,17 @@ | |||
1 | # short-description: . Create a ramdisk image for U-Boot | ||
2 | # long-description: Creates a ramdisk image for U-Boot that user | ||
3 | # can directly load it into ram through tftp | ||
4 | # | ||
5 | # part - is a wic command that drive the process of generating a valid file system | ||
6 | # - --source=uboot : wic plugin that generates a ramdisk image for U-Boot | ||
7 | # - --fstype=ext2 : file system type( ext2 / ext3 / ext 4) | ||
8 | # | ||
9 | # %packages %end - option to provide a list of packages that will be installed | ||
10 | # into rootfs. All packages dependencies will be installed by | ||
11 | # package manager(default opkg). | ||
12 | |||
13 | |||
14 | part / --source=uboot --fstype=ext2 --label imageName --align 1024 | ||
15 | |||
16 | %packages | ||
17 | %end | ||
diff --git a/scripts/lib/image/config/wic.conf b/scripts/lib/image/config/wic.conf new file mode 100644 index 0000000000..2a2750b4ee --- /dev/null +++ b/scripts/lib/image/config/wic.conf | |||
@@ -0,0 +1,11 @@ | |||
1 | [common] | ||
2 | ; general settings | ||
3 | distro_name = OpenEmbedded | ||
4 | |||
5 | [create] | ||
6 | ; settings for create subcommand | ||
7 | ; repourl=http://linux.com/ipk/all http://linux.com/ipk/target http://linux.com/ipk/arch | ||
8 | arch=powerpc | ||
9 | pkgmgr=opkg | ||
10 | runtime=native | ||
11 | install_pkgs=source | ||
diff --git a/scripts/lib/image/engine.py b/scripts/lib/image/engine.py new file mode 100644 index 0000000000..0643780f1a --- /dev/null +++ b/scripts/lib/image/engine.py | |||
@@ -0,0 +1,287 @@ | |||
1 | # ex:ts=4:sw=4:sts=4:et | ||
2 | # -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- | ||
3 | # | ||
4 | # Copyright (c) 2013, Intel Corporation. | ||
5 | # All rights reserved. | ||
6 | # | ||
7 | # This program is free software; you can redistribute it and/or modify | ||
8 | # it under the terms of the GNU General Public License version 2 as | ||
9 | # published by the Free Software Foundation. | ||
10 | # | ||
11 | # This program is distributed in the hope that it will be useful, | ||
12 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | # GNU General Public License for more details. | ||
15 | # | ||
16 | # You should have received a copy of the GNU General Public License along | ||
17 | # with this program; if not, write to the Free Software Foundation, Inc., | ||
18 | # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. | ||
19 | # | ||
20 | # DESCRIPTION | ||
21 | |||
22 | # This module implements the image creation engine used by 'wic' to | ||
23 | # create images. The engine parses through the OpenEmbedded kickstart | ||
24 | # (wks) file specified and generates images that can then be directly | ||
25 | # written onto media. | ||
26 | # | ||
27 | # AUTHORS | ||
28 | # Tom Zanussi <tom.zanussi (at] linux.intel.com> | ||
29 | # | ||
30 | |||
31 | import os | ||
32 | import sys | ||
33 | from abc import ABCMeta, abstractmethod | ||
34 | import shlex | ||
35 | import json | ||
36 | import subprocess | ||
37 | import shutil | ||
38 | |||
39 | import os, sys, errno | ||
40 | from mic import msger, creator | ||
41 | from mic.utils import cmdln, misc, errors | ||
42 | from mic.conf import configmgr | ||
43 | from mic.plugin import pluginmgr | ||
44 | from mic.__version__ import VERSION | ||
45 | from mic.utils.oe.misc import * | ||
46 | |||
47 | |||
48 | def verify_build_env(): | ||
49 | """ | ||
50 | Verify that the build environment is sane. | ||
51 | |||
52 | Returns True if it is, false otherwise | ||
53 | """ | ||
54 | try: | ||
55 | builddir = os.environ["BUILDDIR"] | ||
56 | except KeyError: | ||
57 | print "BUILDDIR not found, exiting. (Did you forget to source oe-init-build-env?)" | ||
58 | sys.exit(1) | ||
59 | |||
60 | return True | ||
61 | |||
62 | |||
63 | def find_bitbake_env_lines(image_name): | ||
64 | """ | ||
65 | If image_name is empty, plugins might still be able to use the | ||
66 | environment, so set it regardless. | ||
67 | """ | ||
68 | if image_name: | ||
69 | bitbake_env_cmd = "bitbake -e %s" % image_name | ||
70 | else: | ||
71 | bitbake_env_cmd = "bitbake -e" | ||
72 | rc, bitbake_env_lines = exec_cmd(bitbake_env_cmd) | ||
73 | if rc != 0: | ||
74 | print "Couldn't get '%s' output." % bitbake_env_cmd | ||
75 | return None | ||
76 | |||
77 | return bitbake_env_lines | ||
78 | |||
79 | |||
80 | def find_artifacts(image_name): | ||
81 | """ | ||
82 | Gather the build artifacts for the current image (the image_name | ||
83 | e.g. core-image-minimal) for the current MACHINE set in local.conf | ||
84 | """ | ||
85 | bitbake_env_lines = get_bitbake_env_lines() | ||
86 | |||
87 | rootfs_dir = kernel_dir = hdddir = staging_data_dir = native_sysroot = "" | ||
88 | |||
89 | for line in bitbake_env_lines.split('\n'): | ||
90 | if (get_line_val(line, "IMAGE_ROOTFS")): | ||
91 | rootfs_dir = get_line_val(line, "IMAGE_ROOTFS") | ||
92 | continue | ||
93 | if (get_line_val(line, "STAGING_KERNEL_DIR")): | ||
94 | kernel_dir = get_line_val(line, "STAGING_KERNEL_DIR") | ||
95 | continue | ||
96 | if (get_line_val(line, "HDDDIR")): | ||
97 | hdddir = get_line_val(line, "HDDDIR") | ||
98 | continue | ||
99 | if (get_line_val(line, "STAGING_DATADIR")): | ||
100 | staging_data_dir = get_line_val(line, "STAGING_DATADIR") | ||
101 | continue | ||
102 | if (get_line_val(line, "STAGING_DIR_NATIVE")): | ||
103 | native_sysroot = get_line_val(line, "STAGING_DIR_NATIVE") | ||
104 | continue | ||
105 | |||
106 | return (rootfs_dir, kernel_dir, hdddir, staging_data_dir, native_sysroot) | ||
107 | |||
108 | |||
109 | CANNED_IMAGE_DIR = "lib/image/canned-wks" # relative to scripts | ||
110 | |||
111 | def find_canned_image(scripts_path, wks_file): | ||
112 | """ | ||
113 | Find a .wks file with the given name in the canned files dir. | ||
114 | |||
115 | Return False if not found | ||
116 | """ | ||
117 | canned_wks_dir = os.path.join(scripts_path, CANNED_IMAGE_DIR) | ||
118 | |||
119 | for root, dirs, files in os.walk(canned_wks_dir): | ||
120 | for file in files: | ||
121 | if file.endswith("~") or file.endswith("#"): | ||
122 | continue | ||
123 | if file.endswith(".wks") and wks_file + ".wks" == file: | ||
124 | fullpath = os.path.join(canned_wks_dir, file) | ||
125 | return fullpath | ||
126 | return None | ||
127 | |||
128 | |||
129 | def list_canned_images(scripts_path): | ||
130 | """ | ||
131 | List the .wks files in the canned image dir, minus the extension. | ||
132 | """ | ||
133 | canned_wks_dir = os.path.join(scripts_path, CANNED_IMAGE_DIR) | ||
134 | |||
135 | for root, dirs, files in os.walk(canned_wks_dir): | ||
136 | for file in files: | ||
137 | if file.endswith("~") or file.endswith("#"): | ||
138 | continue | ||
139 | if file.endswith(".wks"): | ||
140 | fullpath = os.path.join(canned_wks_dir, file) | ||
141 | f = open(fullpath, "r") | ||
142 | lines = f.readlines() | ||
143 | for line in lines: | ||
144 | desc = "" | ||
145 | idx = line.find("short-description:") | ||
146 | if idx != -1: | ||
147 | desc = line[idx + len("short-description:"):].strip() | ||
148 | break | ||
149 | basename = os.path.splitext(file)[0] | ||
150 | print " %s\t\t%s" % (basename, desc) | ||
151 | |||
152 | |||
153 | def list_canned_image_help(scripts_path, fullpath): | ||
154 | """ | ||
155 | List the help and params in the specified canned image. | ||
156 | """ | ||
157 | canned_wks_dir = os.path.join(scripts_path, CANNED_IMAGE_DIR) | ||
158 | |||
159 | f = open(fullpath, "r") | ||
160 | lines = f.readlines() | ||
161 | found = False | ||
162 | for line in lines: | ||
163 | if not found: | ||
164 | idx = line.find("long-description:") | ||
165 | if idx != -1: | ||
166 | |||
167 | print line[idx + len("long-description:"):].strip() | ||
168 | found = True | ||
169 | continue | ||
170 | if not line.strip(): | ||
171 | break | ||
172 | idx = line.find("#") | ||
173 | if idx != -1: | ||
174 | print line[idx + len("#:"):].rstrip() | ||
175 | else: | ||
176 | break | ||
177 | |||
178 | |||
179 | def wic_create(args, wks_file, rootfs_dir, bootimg_dir, kernel_dir, | ||
180 | native_sysroot, hdddir, staging_data_dir, scripts_path, | ||
181 | image_output_dir, debug, properties_file, properties=None): | ||
182 | """ | ||
183 | Create image | ||
184 | |||
185 | wks_file - user-defined OE kickstart file | ||
186 | rootfs_dir - absolute path to the build's /rootfs dir | ||
187 | bootimg_dir - absolute path to the build's boot artifacts directory | ||
188 | kernel_dir - absolute path to the build's kernel directory | ||
189 | native_sysroot - absolute path to the build's native sysroots dir | ||
190 | hdddir - absolute path to the build's HDDDIR dir | ||
191 | staging_data_dir - absolute path to the build's STAGING_DATA_DIR dir | ||
192 | scripts_path - absolute path to /scripts dir | ||
193 | image_output_dir - dirname to create for image | ||
194 | properties_file - use values from this file if nonempty i.e no prompting | ||
195 | properties - use values from this string if nonempty i.e no prompting | ||
196 | |||
197 | Normally, the values for the build artifacts values are determined | ||
198 | by 'wic -e' from the output of the 'bitbake -e' command given an | ||
199 | image name e.g. 'core-image-minimal' and a given machine set in | ||
200 | local.conf. If that's the case, the variables get the following | ||
201 | values from the output of 'bitbake -e': | ||
202 | |||
203 | rootfs_dir: IMAGE_ROOTFS | ||
204 | kernel_dir: STAGING_KERNEL_DIR | ||
205 | native_sysroot: STAGING_DIR_NATIVE | ||
206 | hdddir: HDDDIR | ||
207 | staging_data_dir: STAGING_DATA_DIR | ||
208 | |||
209 | In the above case, bootimg_dir remains unset and the image | ||
210 | creation code determines which of the passed-in directories to | ||
211 | use. | ||
212 | |||
213 | In the case where the values are passed in explicitly i.e 'wic -e' | ||
214 | is not used but rather the individual 'wic' options are used to | ||
215 | explicitly specify these values, hdddir and staging_data_dir will | ||
216 | be unset, but bootimg_dir must be explicit i.e. explicitly set to | ||
217 | either hdddir or staging_data_dir, depending on the image being | ||
218 | generated. The other values (rootfs_dir, kernel_dir, and | ||
219 | native_sysroot) correspond to the same values found above via | ||
220 | 'bitbake -e'). | ||
221 | |||
222 | """ | ||
223 | try: | ||
224 | oe_builddir = os.environ["BUILDDIR"] | ||
225 | except KeyError: | ||
226 | print "BUILDDIR not found, exiting. (Did you forget to source oe-init-build-env?)" | ||
227 | sys.exit(1) | ||
228 | |||
229 | direct_args = list() | ||
230 | direct_args.insert(0, oe_builddir) | ||
231 | direct_args.insert(0, image_output_dir) | ||
232 | direct_args.insert(0, wks_file) | ||
233 | direct_args.insert(0, rootfs_dir) | ||
234 | direct_args.insert(0, bootimg_dir) | ||
235 | direct_args.insert(0, kernel_dir) | ||
236 | direct_args.insert(0, native_sysroot) | ||
237 | direct_args.insert(0, hdddir) | ||
238 | direct_args.insert(0, staging_data_dir) | ||
239 | direct_args.insert(0, "direct") | ||
240 | |||
241 | if debug: | ||
242 | msger.set_loglevel('debug') | ||
243 | |||
244 | cr = creator.Creator() | ||
245 | |||
246 | cr.main(direct_args) | ||
247 | |||
248 | print "\nThe image(s) were created using OE kickstart file:\n %s" % wks_file | ||
249 | |||
250 | |||
251 | def wic_list(args, scripts_path, properties_file): | ||
252 | """ | ||
253 | Print the complete list of properties defined by the image, or the | ||
254 | possible values for a particular image property. | ||
255 | """ | ||
256 | if len(args) < 1: | ||
257 | return False | ||
258 | |||
259 | if len(args) == 1: | ||
260 | if args[0] == "images": | ||
261 | list_canned_images(scripts_path) | ||
262 | return True | ||
263 | elif args[0] == "properties": | ||
264 | return True | ||
265 | else: | ||
266 | return False | ||
267 | |||
268 | if len(args) == 2: | ||
269 | if args[0] == "properties": | ||
270 | wks_file = args[1] | ||
271 | print "print properties contained in wks file: %s" % wks_file | ||
272 | return True | ||
273 | elif args[0] == "property": | ||
274 | print "print property values for property: %s" % args[1] | ||
275 | return True | ||
276 | elif args[1] == "help": | ||
277 | wks_file = args[0] | ||
278 | fullpath = find_canned_image(scripts_path, wks_file) | ||
279 | if not fullpath: | ||
280 | print "No image named %s found, exiting. (Use 'wic list images' to list available images, or specify a fully-qualified OE kickstart (.wks) filename)\n" % wks_file | ||
281 | sys.exit(1) | ||
282 | list_canned_image_help(scripts_path, fullpath) | ||
283 | return True | ||
284 | else: | ||
285 | return False | ||
286 | |||
287 | return False | ||
diff --git a/scripts/lib/image/help.py b/scripts/lib/image/help.py new file mode 100644 index 0000000000..cb3112cf08 --- /dev/null +++ b/scripts/lib/image/help.py | |||
@@ -0,0 +1,311 @@ | |||
1 | # ex:ts=4:sw=4:sts=4:et | ||
2 | # -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- | ||
3 | # | ||
4 | # Copyright (c) 2013, Intel Corporation. | ||
5 | # All rights reserved. | ||
6 | # | ||
7 | # This program is free software; you can redistribute it and/or modify | ||
8 | # it under the terms of the GNU General Public License version 2 as | ||
9 | # published by the Free Software Foundation. | ||
10 | # | ||
11 | # This program is distributed in the hope that it will be useful, | ||
12 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | # GNU General Public License for more details. | ||
15 | # | ||
16 | # You should have received a copy of the GNU General Public License along | ||
17 | # with this program; if not, write to the Free Software Foundation, Inc., | ||
18 | # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. | ||
19 | # | ||
20 | # DESCRIPTION | ||
21 | # This module implements some basic help invocation functions along | ||
22 | # with the bulk of the help topic text for the OE Core Image Tools. | ||
23 | # | ||
24 | # AUTHORS | ||
25 | # Tom Zanussi <tom.zanussi (at] linux.intel.com> | ||
26 | # | ||
27 | |||
28 | import subprocess | ||
29 | import logging | ||
30 | |||
31 | |||
32 | def subcommand_error(args): | ||
33 | logging.info("invalid subcommand %s" % args[0]) | ||
34 | |||
35 | |||
36 | def display_help(subcommand, subcommands): | ||
37 | """ | ||
38 | Display help for subcommand. | ||
39 | """ | ||
40 | if subcommand not in subcommands: | ||
41 | return False | ||
42 | |||
43 | help = subcommands.get(subcommand, subcommand_error)[2] | ||
44 | pager = subprocess.Popen('less', stdin=subprocess.PIPE) | ||
45 | pager.communicate(help) | ||
46 | |||
47 | return True | ||
48 | |||
49 | |||
50 | def wic_help(args, usage_str, subcommands): | ||
51 | """ | ||
52 | Subcommand help dispatcher. | ||
53 | """ | ||
54 | if len(args) == 1 or not display_help(args[1], subcommands): | ||
55 | print(usage_str) | ||
56 | |||
57 | |||
58 | def invoke_subcommand(args, parser, main_command_usage, subcommands): | ||
59 | """ | ||
60 | Dispatch to subcommand handler borrowed from combo-layer. | ||
61 | Should use argparse, but has to work in 2.6. | ||
62 | """ | ||
63 | if not args: | ||
64 | logging.error("No subcommand specified, exiting") | ||
65 | parser.print_help() | ||
66 | elif args[0] == "help": | ||
67 | wic_help(args, main_command_usage, subcommands) | ||
68 | elif args[0] not in subcommands: | ||
69 | logging.error("Unsupported subcommand %s, exiting\n" % (args[0])) | ||
70 | parser.print_help() | ||
71 | else: | ||
72 | usage = subcommands.get(args[0], subcommand_error)[1] | ||
73 | subcommands.get(args[0], subcommand_error)[0](args[1:], usage) | ||
74 | |||
75 | |||
76 | ## | ||
77 | # wic help and usage strings | ||
78 | ## | ||
79 | |||
80 | wic_usage = """ | ||
81 | |||
82 | Create a customized OpenEmbedded image | ||
83 | |||
84 | usage: wic [--version] [--help] COMMAND [ARGS] | ||
85 | |||
86 | Current 'wic' commands are: | ||
87 | create Create a new OpenEmbedded image | ||
88 | list List available values for options and image properties | ||
89 | |||
90 | See 'wic help COMMAND' for more information on a specific command. | ||
91 | """ | ||
92 | |||
93 | wic_help_usage = """ | ||
94 | |||
95 | usage: wic help <subcommand> | ||
96 | |||
97 | This command displays detailed help for the specified subcommand. | ||
98 | """ | ||
99 | |||
100 | wic_create_usage = """ | ||
101 | |||
102 | Create a new OpenEmbedded image | ||
103 | |||
104 | usage: wic create <wks file or image name> [-o <DIRNAME> | --outdir <DIRNAME>] | ||
105 | [-i <JSON PROPERTY FILE> | --infile <JSON PROPERTY_FILE>] | ||
106 | [-e | --image-name] [-r, --rootfs-dir] [-b, --bootimg-dir] | ||
107 | [-k, --kernel-dir] [-n, --native-sysroot] [-s, --skip-build-check] | ||
108 | |||
109 | This command creates an OpenEmbedded image based on the 'OE kickstart | ||
110 | commands' found in the <wks file>. | ||
111 | |||
112 | The -o option can be used to place the image in a directory with a | ||
113 | different name and location. | ||
114 | |||
115 | See 'wic help create' for more detailed instructions. | ||
116 | """ | ||
117 | |||
118 | wic_create_help = """ | ||
119 | |||
120 | NAME | ||
121 | wic create - Create a new OpenEmbedded image | ||
122 | |||
123 | SYNOPSIS | ||
124 | wic create <wks file or image name> [-o <DIRNAME> | --outdir <DIRNAME>] | ||
125 | [-i <JSON PROPERTY FILE> | --infile <JSON PROPERTY_FILE>] | ||
126 | [-e | --image-name] [-r, --rootfs-dir] [-b, --bootimg-dir] | ||
127 | [-k, --kernel-dir] [-n, --native-sysroot] [-s, --skip-build-check] | ||
128 | |||
129 | DESCRIPTION | ||
130 | This command creates an OpenEmbedded image based on the 'OE | ||
131 | kickstart commands' found in the <wks file>. | ||
132 | |||
133 | In order to do this, wic needs to know the locations of the | ||
134 | various build artifacts required to build the image. | ||
135 | |||
136 | Users can explicitly specify the build artifact locations using | ||
137 | the -r, -b, -k, and -n options. See below for details on where | ||
138 | the corresponding artifacts are typically found in a normal | ||
139 | OpenEmbedded build. | ||
140 | |||
141 | Alternatively, users can use the -e option to have 'mic' determine | ||
142 | those locations for a given image. If the -e option is used, the | ||
143 | user needs to have set the appropriate MACHINE variable in | ||
144 | local.conf, and have sourced the build environment. | ||
145 | |||
146 | The -e option is used to specify the name of the image to use the | ||
147 | artifacts from e.g. core-image-sato. | ||
148 | |||
149 | The -r option is used to specify the path to the /rootfs dir to | ||
150 | use as the .wks rootfs source. | ||
151 | |||
152 | The -b option is used to specify the path to the dir containing | ||
153 | the boot artifacts (e.g. /EFI or /syslinux dirs) to use as the | ||
154 | .wks bootimg source. | ||
155 | |||
156 | The -k option is used to specify the path to the dir containing | ||
157 | the kernel to use in the .wks bootimg. | ||
158 | |||
159 | The -n option is used to specify the path to the native sysroot | ||
160 | containing the tools to use to build the image. | ||
161 | |||
162 | The -s option is used to skip the build check. The build check is | ||
163 | a simple sanity check used to determine whether the user has | ||
164 | sourced the build environment so that the -e option can operate | ||
165 | correctly. If the user has specified the build artifact locations | ||
166 | explicitly, 'wic' assumes the user knows what he or she is doing | ||
167 | and skips the build check. | ||
168 | |||
169 | When 'wic -e' is used, the locations for the build artifacts | ||
170 | values are determined by 'wic -e' from the output of the 'bitbake | ||
171 | -e' command given an image name e.g. 'core-image-minimal' and a | ||
172 | given machine set in local.conf. In that case, the image is | ||
173 | created as if the following 'bitbake -e' variables were used: | ||
174 | |||
175 | -r: IMAGE_ROOTFS | ||
176 | -k: STAGING_KERNEL_DIR | ||
177 | -n: STAGING_DIR_NATIVE | ||
178 | -b: HDDDIR and STAGING_DATA_DIR (handlers decide which to use) | ||
179 | |||
180 | If 'wic -e' is not used, the user needs to select the appropriate | ||
181 | value for -b (as well as -r, -k, and -n). | ||
182 | |||
183 | The -o option can be used to place the image in a directory with a | ||
184 | different name and location. | ||
185 | |||
186 | As an alternative to the wks file, the image-specific properties | ||
187 | that define the values that will be used to generate a particular | ||
188 | image can be specified on the command-line using the -i option and | ||
189 | supplying a JSON object consisting of the set of name:value pairs | ||
190 | needed by image creation. | ||
191 | |||
192 | The set of properties available for a given image type can be | ||
193 | listed using the 'wic list' command. | ||
194 | """ | ||
195 | |||
196 | wic_list_usage = """ | ||
197 | |||
198 | List available OpenEmbedded image properties and values | ||
199 | |||
200 | usage: wic list images | ||
201 | wic list \n" | ||
465 | |||
466 | msger.debug("writing image XML to %s/%s.xml" %(self._outdir, self.name)) | ||
467 | cfg = open("%s/%s.xml" % (self._outdir, self.name), "w") | ||
468 | cfg.write(xml) | ||
469 | cfg.close() | ||
470 | |||
471 | def generate_bmap(self): | ||
472 | """ Generate block map file for the image. The idea is that while disk | ||
473 | images we generate may be large (e.g., 4GiB), they may actually contain | ||
474 | only little real data, e.g., 512MiB. This data are files, directories, | ||
475 | file-system meta-data, partition table, etc. In other words, when | ||
476 | flashing the image to the target device, you do not have to copy all the | ||
477 | 4GiB of data, you can copy only 512MiB of it, which is 4 times faster. | ||
478 | |||
479 | This function generates the block map file for an arbitrary image that | ||
480 | mic has generated. The block map file is basically an XML file which | ||
481 | contains a list of blocks which have to be copied to the target device. | ||
482 | The other blocks are not used and there is no need to copy them. """ | ||
483 | |||
484 | if self.bmap_needed is None: | ||
485 | return | ||
486 | |||
487 | from mic.utils import BmapCreate | ||
488 | msger.info("Generating the map file(s)") | ||
489 | |||
490 | for name in self.__disks.keys(): | ||
491 | image = self._full_path(self.__imgdir, name, self.__disk_format) | ||
492 | bmap_file = self._full_path(self._outdir, name, "bmap") | ||
493 | |||
494 | msger.debug("Generating block map file '%s'" % bmap_file) | ||
495 | |||
496 | try: | ||
497 | creator = BmapCreate.BmapCreate(image, bmap_file) | ||
498 | creator.generate() | ||
499 | del creator | ||
500 | except BmapCreate.Error as err: | ||
501 | raise CreatorError("Failed to create bmap file: %s" % str(err)) | ||
diff --git a/scripts/lib/mic/kickstart/__init__.py b/scripts/lib/mic/kickstart/__init__.py new file mode 100644 index 0000000000..72f3ca6849 --- /dev/null +++ b/scripts/lib/mic/kickstart/__init__.py | |||
@@ -0,0 +1,892 @@ | |||
1 | #!/usr/bin/python -tt | ||
2 | # | ||
3 | # Copyright (c) 2007 Red Hat, Inc. | ||
4 | # Copyright (c) 2009, 2010, 2011 Intel, Inc. | ||
5 | # | ||
6 | # This program is free software; you can redistribute it and/or modify it | ||
7 | # under the terms of the GNU General Public License as published by the Free | ||
8 | # Software Foundation; version 2 of the License | ||
9 | # | ||
10 | # This program is distributed in the hope that it will be useful, but | ||
11 | # WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY | ||
12 | # or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | ||
13 | # for more details. | ||
14 | # | ||
15 | # You should have received a copy of the GNU General Public License along | ||
16 | # with this program; if not, write to the Free Software Foundation, Inc., 59 | ||
17 | # Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
18 | |||
19 | import os, sys, re | ||
20 | import shutil | ||
21 | import subprocess | ||
22 | import string | ||
23 | |||
24 | import pykickstart.sections as kssections | ||
25 | import pykickstart.commands as kscommands | ||
26 | import pykickstart.constants as ksconstants | ||
27 | import pykickstart.errors as kserrors | ||
28 | import pykickstart.parser as ksparser | ||
29 | import pykickstart.version as ksversion | ||
30 | from pykickstart.handlers.control import commandMap | ||
31 | from pykickstart.handlers.control import dataMap | ||
32 | |||
33 | from mic import msger | ||
34 | from mic.utils import errors, misc, runner, fs_related as fs | ||
35 | from custom_commands import desktop, micrepo, wicboot, partition, installerfw | ||
36 | |||
37 | |||
38 | AUTH_URL_PTN = r"(?P<scheme>.*)://(?P<username>.*)(:?P<password>.*)?@(?P<url>.*)" | ||
39 | |||
40 | |||
41 | class PrepackageSection(kssections.Section): | ||
42 | sectionOpen = "%prepackages" | ||
43 | |||
44 | def handleLine(self, line): | ||
45 | if not self.handler: | ||
46 | return | ||
47 | |||
48 | (h, s, t) = line.partition('#') | ||
49 | line = h.rstrip() | ||
50 | |||
51 | self.handler.prepackages.add([line]) | ||
52 | |||
53 | def handleHeader(self, lineno, args): | ||
54 | kssections.Section.handleHeader(self, lineno, args) | ||
55 | |||
56 | class AttachmentSection(kssections.Section): | ||
57 | sectionOpen = "%attachment" | ||
58 | |||
59 | def handleLine(self, line): | ||
60 | if not self.handler: | ||
61 | return | ||
62 | |||
63 | (h, s, t) = line.partition('#') | ||
64 | line = h.rstrip() | ||
65 | |||
66 | self.handler.attachment.add([line]) | ||
67 | |||
68 | def handleHeader(self, lineno, args): | ||
69 | kssections.Section.handleHeader(self, lineno, args) | ||
70 | |||
71 | def apply_wrapper(func): | ||
72 | def wrapper(*kargs, **kwargs): | ||
73 | try: | ||
74 | func(*kargs, **kwargs) | ||
75 | except (OSError, IOError, errors.KsError), err: | ||
76 | cfgcls = kargs[0].__class__.__name__ | ||
77 | if msger.ask("Failed to apply %s, skip and continue?" % cfgcls): | ||
78 | msger.warning("%s" % err) | ||
79 | pass | ||
80 | else: | ||
81 | # just throw out the exception | ||
82 | raise | ||
83 | return wrapper | ||
84 | |||
85 | def read_kickstart(path): | ||
86 | """Parse a kickstart file and return a KickstartParser instance. | ||
87 | |||
88 | This is a simple utility function which takes a path to a kickstart file, | ||
89 | parses it and returns a pykickstart KickstartParser instance which can | ||
90 | be then passed to an ImageCreator constructor. | ||
91 | |||
92 | If an error occurs, a CreatorError exception is thrown. | ||
93 | """ | ||
94 | |||
95 | #version = ksversion.makeVersion() | ||
96 | #ks = ksparser.KickstartParser(version) | ||
97 | |||
98 | using_version = ksversion.DEVEL | ||
99 | commandMap[using_version]["desktop"] = desktop.Mic_Desktop | ||
100 | commandMap[using_version]["repo"] = micrepo.Mic_Repo | ||
101 | commandMap[using_version]["bootloader"] = wicboot.Wic_Bootloader | ||
102 | commandMap[using_version]["part"] = partition.Wic_Partition | ||
103 | commandMap[using_version]["partition"] = partition.Wic_Partition | ||
104 | commandMap[using_version]["installerfw"] = installerfw.Mic_installerfw | ||
105 | dataMap[using_version]["RepoData"] = micrepo.Mic_RepoData | ||
106 | dataMap[using_version]["PartData"] = partition.Wic_PartData | ||
107 | superclass = ksversion.returnClassForVersion(version=using_version) | ||
108 | |||
109 | class KSHandlers(superclass): | ||
110 | def __init__(self): | ||
111 | superclass.__init__(self, mapping=commandMap[using_version]) | ||
112 | self.prepackages = ksparser.Packages() | ||
113 | self.attachment = ksparser.Packages() | ||
114 | |||
115 | ks = ksparser.KickstartParser(KSHandlers(), errorsAreFatal=False) | ||
116 | ks.registerSection(PrepackageSection(ks.handler)) | ||
117 | ks.registerSection(AttachmentSection(ks.handler)) | ||
118 | |||
119 | try: | ||
120 | ks.readKickstart(path) | ||
121 | except (kserrors.KickstartParseError, kserrors.KickstartError), err: | ||
122 | if msger.ask("Errors occured on kickstart file, skip and continue?"): | ||
123 | msger.warning("%s" % err) | ||
124 | pass | ||
125 | else: | ||
126 | raise errors.KsError("%s" % err) | ||
127 | |||
128 | return ks | ||
129 | |||
130 | class KickstartConfig(object): | ||
131 | """A base class for applying kickstart configurations to a system.""" | ||
132 | def __init__(self, instroot): | ||
133 | self.instroot = instroot | ||
134 | |||
135 | def path(self, subpath): | ||
136 | return self.instroot + subpath | ||
137 | |||
138 | def _check_sysconfig(self): | ||
139 | if not os.path.exists(self.path("/etc/sysconfig")): | ||
140 | fs.makedirs(self.path("/etc/sysconfig")) | ||
141 | |||
142 | def chroot(self): | ||
143 | os.chroot(self.instroot) | ||
144 | os.chdir("/") | ||
145 | |||
146 | def call(self, args): | ||
147 | if not os.path.exists("%s/%s" %(self.instroot, args[0])): | ||
148 | raise errors.KsError("Can't find %s in chroot" % args[0]) | ||
149 | subprocess.call(args, preexec_fn = self.chroot) | ||
150 | |||
151 | def apply(self): | ||
152 | pass | ||
153 | |||
154 | class LanguageConfig(KickstartConfig): | ||
155 | """A class to apply a kickstart language configuration to a system.""" | ||
156 | @apply_wrapper | ||
157 | def apply(self, kslang): | ||
158 | self._check_sysconfig() | ||
159 | if kslang.lang: | ||
160 | f = open(self.path("/etc/sysconfig/i18n"), "w+") | ||
161 | f.write("LANG=\"" + kslang.lang + "\"\n") | ||
162 | f.close() | ||
163 | |||
164 | class KeyboardConfig(KickstartConfig): | ||
165 | """A class to apply a kickstart keyboard configuration to a system.""" | ||
166 | @apply_wrapper | ||
167 | def apply(self, kskeyboard): | ||
168 | # | ||
169 | # FIXME: | ||
170 | # should this impact the X keyboard config too? | ||
171 | # or do we want to make X be able to do this mapping? | ||
172 | # | ||
173 | #k = rhpl.keyboard.Keyboard() | ||
174 | #if kskeyboard.keyboard: | ||
175 | # k.set(kskeyboard.keyboard) | ||
176 | #k.write(self.instroot) | ||
177 | pass | ||
178 | |||
179 | class TimezoneConfig(KickstartConfig): | ||
180 | """A class to apply a kickstart timezone configuration to a system.""" | ||
181 | @apply_wrapper | ||
182 | def apply(self, kstimezone): | ||
183 | self._check_sysconfig() | ||
184 | tz = kstimezone.timezone or "America/New_York" | ||
185 | utc = str(kstimezone.isUtc) | ||
186 | |||
187 | f = open(self.path("/etc/sysconfig/clock"), "w+") | ||
188 | f.write("ZONE=\"" + tz + "\"\n") | ||
189 | f.write("UTC=" + utc + "\n") | ||
190 | f.close() | ||
191 | tz_source = "/usr/share/zoneinfo/%s" % (tz) | ||
192 | tz_dest = "/etc/localtime" | ||
193 | try: | ||
194 | cpcmd = fs.find_binary_inchroot('cp', self.instroot) | ||
195 | if cpcmd: | ||
196 | self.call([cpcmd, "-f", tz_source, tz_dest]) | ||
197 | else: | ||
198 | cpcmd = fs.find_binary_path('cp') | ||
199 | subprocess.call([cpcmd, "-f", | ||
200 | self.path(tz_source), | ||
201 | self.path(tz_dest)]) | ||
202 | except (IOError, OSError), (errno, msg): | ||
203 | raise errors.KsError("Timezone setting error: %s" % msg) | ||
204 | |||
205 | class AuthConfig(KickstartConfig): | ||
206 | """A class to apply a kickstart authconfig configuration to a system.""" | ||
207 | @apply_wrapper | ||
208 | def apply(self, ksauthconfig): | ||
209 | auth = ksauthconfig.authconfig or "--useshadow --enablemd5" | ||
210 | args = ["/usr/share/authconfig/authconfig.py", "--update", "--nostart"] | ||
211 | self.call(args + auth.split()) | ||
212 | |||
213 | class FirewallConfig(KickstartConfig): | ||
214 | """A class to apply a kickstart firewall configuration to a system.""" | ||
215 | @apply_wrapper | ||
216 | def apply(self, ksfirewall): | ||
217 | # | ||
218 | # FIXME: should handle the rest of the options | ||
219 | # | ||
220 | if not os.path.exists(self.path("/usr/sbin/lokkit")): | ||
221 | return | ||
222 | if ksfirewall.enabled: | ||
223 | status = "--enabled" | ||
224 | else: | ||
225 | status = "--disabled" | ||
226 | |||
227 | self.call(["/usr/sbin/lokkit", | ||
228 | "-f", "--quiet", "--nostart", status]) | ||
229 | |||
230 | class RootPasswordConfig(KickstartConfig): | ||
231 | """A class to apply a kickstart root password configuration to a system.""" | ||
232 | def unset(self): | ||
233 | self.call(["/usr/bin/passwd", "-d", "root"]) | ||
234 | |||
235 | def set_encrypted(self, password): | ||
236 | self.call(["/usr/sbin/usermod", "-p", password, "root"]) | ||
237 | |||
238 | def set_unencrypted(self, password): | ||
239 | for p in ("/bin/echo", "/usr/sbin/chpasswd"): | ||
240 | if not os.path.exists("%s/%s" %(self.instroot, p)): | ||
241 | raise errors.KsError("Unable to set unencrypted password due " | ||
242 | "to lack of %s" % p) | ||
243 | |||
244 | p1 = subprocess.Popen(["/bin/echo", "root:%s" %password], | ||
245 | stdout = subprocess.PIPE, | ||
246 | preexec_fn = self.chroot) | ||
247 | p2 = subprocess.Popen(["/usr/sbin/chpasswd", "-m"], | ||
248 | stdin = p1.stdout, | ||
249 | stdout = subprocess.PIPE, | ||
250 | preexec_fn = self.chroot) | ||
251 | p2.communicate() | ||
252 | |||
253 | @apply_wrapper | ||
254 | def apply(self, ksrootpw): | ||
255 | if ksrootpw.isCrypted: | ||
256 | self.set_encrypted(ksrootpw.password) | ||
257 | elif ksrootpw.password != "": | ||
258 | self.set_unencrypted(ksrootpw.password) | ||
259 | else: | ||
260 | self.unset() | ||
261 | |||
262 | class UserConfig(KickstartConfig): | ||
263 | def set_empty_passwd(self, user): | ||
264 | self.call(["/usr/bin/passwd", "-d", user]) | ||
265 | |||
266 | def set_encrypted_passwd(self, user, password): | ||
267 | self.call(["/usr/sbin/usermod", "-p", "%s" % password, user]) | ||
268 | |||
269 | def set_unencrypted_passwd(self, user, password): | ||
270 | for p in ("/bin/echo", "/usr/sbin/chpasswd"): | ||
271 | if not os.path.exists("%s/%s" %(self.instroot, p)): | ||
272 | raise errors.KsError("Unable to set unencrypted password due " | ||
273 | "to lack of %s" % p) | ||
274 | |||
275 | p1 = subprocess.Popen(["/bin/echo", "%s:%s" %(user, password)], | ||
276 | stdout = subprocess.PIPE, | ||
277 | preexec_fn = self.chroot) | ||
278 | p2 = subprocess.Popen(["/usr/sbin/chpasswd", "-m"], | ||
279 | stdin = p1.stdout, | ||
280 | stdout = subprocess.PIPE, | ||
281 | preexec_fn = self.chroot) | ||
282 | p2.communicate() | ||
283 | |||
284 | def addUser(self, userconfig): | ||
285 | args = [ "/usr/sbin/useradd" ] | ||
286 | if userconfig.groups: | ||
287 | args += [ "--groups", string.join(userconfig.groups, ",") ] | ||
288 | if userconfig.name: | ||
289 | args += [ "-m"] | ||
290 | args += [ "-d", "/home/%s" % userconfig.name ] | ||
291 | args.append(userconfig.name) | ||
292 | try: | ||
293 | dev_null = os.open("/dev/null", os.O_WRONLY) | ||
294 | msger.debug('adding user with %s' % args) | ||
295 | subprocess.call(args, | ||
296 | stdout = dev_null, | ||
297 | stderr = dev_null, | ||
298 | preexec_fn = self.chroot) | ||
299 | os.close(dev_null) | ||
300 | except: | ||
301 | msger.warning('Cannot add user using "useradd"') | ||
302 | |||
303 | if userconfig.password not in (None, ""): | ||
304 | if userconfig.isCrypted: | ||
305 | self.set_encrypted_passwd(userconfig.name, | ||
306 | userconfig.password) | ||
307 | else: | ||
308 | self.set_unencrypted_passwd(userconfig.name, | ||
309 | userconfig.password) | ||
310 | else: | ||
311 | self.set_empty_passwd(userconfig.name) | ||
312 | else: | ||
313 | raise errors.KsError("Invalid kickstart command: %s" \ | ||
314 | % userconfig.__str__()) | ||
315 | |||
316 | @apply_wrapper | ||
317 | def apply(self, user): | ||
318 | for userconfig in user.userList: | ||
319 | self.addUser(userconfig) | ||
320 | |||
321 | class ServicesConfig(KickstartConfig): | ||
322 | """A class to apply a kickstart services configuration to a system.""" | ||
323 | @apply_wrapper | ||
324 | def apply(self, ksservices): | ||
325 | if not os.path.exists(self.path("/sbin/chkconfig")): | ||
326 | return | ||
327 | for s in ksservices.enabled: | ||
328 | self.call(["/sbin/chkconfig", s, "on"]) | ||
329 | for s in ksservices.disabled: | ||
330 | self.call(["/sbin/chkconfig", s, "off"]) | ||
331 | |||
332 | class XConfig(KickstartConfig): | ||
333 | """A class to apply a kickstart X configuration to a system.""" | ||
334 | @apply_wrapper | ||
335 | def apply(self, ksxconfig): | ||
336 | if ksxconfig.startX and os.path.exists(self.path("/etc/inittab")): | ||
337 | f = open(self.path("/etc/inittab"), "rw+") | ||
338 | buf = f.read() | ||
339 | buf = buf.replace("id:3:initdefault", "id:5:initdefault") | ||
340 | f.seek(0) | ||
341 | f.write(buf) | ||
342 | f.close() | ||
343 | if ksxconfig.defaultdesktop: | ||
344 | self._check_sysconfig() | ||
345 | f = open(self.path("/etc/sysconfig/desktop"), "w") | ||
346 | f.write("DESKTOP="+ksxconfig.defaultdesktop+"\n") | ||
347 | f.close() | ||
348 | |||
349 | class DesktopConfig(KickstartConfig): | ||
350 | """A class to apply a kickstart desktop configuration to a system.""" | ||
351 | @apply_wrapper | ||
352 | def apply(self, ksdesktop): | ||
353 | if ksdesktop.defaultdesktop: | ||
354 | self._check_sysconfig() | ||
355 | f = open(self.path("/etc/sysconfig/desktop"), "w") | ||
356 | f.write("DESKTOP="+ksdesktop.defaultdesktop+"\n") | ||
357 | f.close() | ||
358 | if os.path.exists(self.path("/etc/gdm/custom.conf")): | ||
359 | f = open(self.path("/etc/skel/.dmrc"), "w") | ||
360 | f.write("[Desktop]\n") | ||
361 | f.write("Session="+ksdesktop.defaultdesktop.lower()+"\n") | ||
362 | f.close() | ||
363 | if ksdesktop.session: | ||
364 | if os.path.exists(self.path("/etc/sysconfig/uxlaunch")): | ||
365 | f = open(self.path("/etc/sysconfig/uxlaunch"), "a+") | ||
366 | f.write("session="+ksdesktop.session.lower()+"\n") | ||
367 | f.close() | ||
368 | if ksdesktop.autologinuser: | ||
369 | self._check_sysconfig() | ||
370 | f = open(self.path("/etc/sysconfig/desktop"), "a+") | ||
371 | f.write("AUTOLOGIN_USER=" + ksdesktop.autologinuser + "\n") | ||
372 | f.close() | ||
373 | if os.path.exists(self.path("/etc/gdm/custom.conf")): | ||
374 | f = open(self.path("/etc/gdm/custom.conf"), "w") | ||
375 | f.write("[daemon]\n") | ||
376 | f.write("AutomaticLoginEnable=true\n") | ||
377 | f.write("AutomaticLogin=" + ksdesktop.autologinuser + "\n") | ||
378 | f.close() | ||
379 | |||
380 | class MoblinRepoConfig(KickstartConfig): | ||
381 | """A class to apply a kickstart desktop configuration to a system.""" | ||
382 | def __create_repo_section(self, repo, type, fd): | ||
383 | baseurl = None | ||
384 | mirrorlist = None | ||
385 | reposuffix = {"base":"", "debuginfo":"-debuginfo", "source":"-source"} | ||
386 | reponame = repo.name + reposuffix[type] | ||
387 | if type == "base": | ||
388 | if repo.baseurl: | ||
389 | baseurl = repo.baseurl | ||
390 | if repo.mirrorlist: | ||
391 | mirrorlist = repo.mirrorlist | ||
392 | |||
393 | elif type == "debuginfo": | ||
394 | if repo.baseurl: | ||
395 | if repo.baseurl.endswith("/"): | ||
396 | baseurl = os.path.dirname(os.path.dirname(repo.baseurl)) | ||
397 | else: | ||
398 | baseurl = os.path.dirname(repo.baseurl) | ||
399 | baseurl += "/debug" | ||
400 | |||
401 | if repo.mirrorlist: | ||
402 | variant = repo.mirrorlist[repo.mirrorlist.find("$"):] | ||
403 | mirrorlist = repo.mirrorlist[0:repo.mirrorlist.find("$")] | ||
404 | mirrorlist += "debug" + "-" + variant | ||
405 | |||
406 | elif type == "source": | ||
407 | if repo.baseurl: | ||
408 | if repo.baseurl.endswith("/"): | ||
409 | baseurl = os.path.dirname( | ||
410 | os.path.dirname( | ||
411 | os.path.dirname(repo.baseurl))) | ||
412 | else: | ||
413 | baseurl = os.path.dirname(os.path.dirname(repo.baseurl)) | ||
414 | baseurl += "/source" | ||
415 | |||
416 | if repo.mirrorlist: | ||
417 | variant = repo.mirrorlist[repo.mirrorlist.find("$"):] | ||
418 | mirrorlist = repo.mirrorlist[0:repo.mirrorlist.find("$")] | ||
419 | mirrorlist += "source" + "-" + variant | ||
420 | |||
421 | fd.write("[" + reponame + "]\n") | ||
422 | fd.write("name=" + reponame + "\n") | ||
423 | fd.write("failovermethod=priority\n") | ||
424 | if baseurl: | ||
425 | auth_url = re.compile(AUTH_URL_PTN) | ||
426 | m = auth_url.match(baseurl) | ||
427 | if m: | ||
428 | baseurl = "%s://%s" % (m.group('scheme'), m.group('url')) | ||
429 | fd.write("baseurl=" + baseurl + "\n") | ||
430 | if mirrorlist: | ||
431 | fd.write("mirrorlist=" + mirrorlist + "\n") | ||
432 | """ Skip saving proxy settings """ | ||
433 | #if repo.proxy: | ||
434 | # fd.write("proxy=" + repo.proxy + "\n") | ||
435 | #if repo.proxy_username: | ||
436 | # fd.write("proxy_username=" + repo.proxy_username + "\n") | ||
437 | #if repo.proxy_password: | ||
438 | # fd.write("proxy_password=" + repo.proxy_password + "\n") | ||
439 | if repo.gpgkey: | ||
440 | fd.write("gpgkey=" + repo.gpgkey + "\n") | ||
441 | fd.write("gpgcheck=1\n") | ||
442 | else: | ||
443 | fd.write("gpgcheck=0\n") | ||
444 | if type == "source" or type == "debuginfo" or repo.disable: | ||
445 | fd.write("enabled=0\n") | ||
446 | else: | ||
447 | fd.write("enabled=1\n") | ||
448 | fd.write("\n") | ||
449 | |||
450 | def __create_repo_file(self, repo, repodir): | ||
451 | fs.makedirs(self.path(repodir)) | ||
452 | f = open(self.path(repodir + "/" + repo.name + ".repo"), "w") | ||
453 | self.__create_repo_section(repo, "base", f) | ||
454 | if repo.debuginfo: | ||
455 | self.__create_repo_section(repo, "debuginfo", f) | ||
456 | if repo.source: | ||
457 | self.__create_repo_section(repo, "source", f) | ||
458 | f.close() | ||
459 | |||
460 | @apply_wrapper | ||
461 | def apply(self, ksrepo, repodata, repourl): | ||
462 | for repo in ksrepo.repoList: | ||
463 | if repo.name in repourl: | ||
464 | repo.baseurl = repourl[repo.name] | ||
465 | if repo.save: | ||
466 | #self.__create_repo_file(repo, "/etc/yum.repos.d") | ||
467 | self.__create_repo_file(repo, "/etc/zypp/repos.d") | ||
468 | """ Import repo gpg keys """ | ||
469 | if repodata: | ||
470 | for repo in repodata: | ||
471 | if repo['repokey']: | ||
472 | runner.quiet(['rpm', | ||
473 | "--root=%s" % self.instroot, | ||
474 | "--import", | ||
475 | repo['repokey']]) | ||
476 | |||
477 | class RPMMacroConfig(KickstartConfig): | ||
478 | """A class to apply the specified rpm macros to the filesystem""" | ||
479 | @apply_wrapper | ||
480 | def apply(self, ks): | ||
481 | if not ks: | ||
482 | return | ||
483 | if not os.path.exists(self.path("/etc/rpm")): | ||
484 | os.mkdir(self.path("/etc/rpm")) | ||
485 | f = open(self.path("/etc/rpm/macros.imgcreate"), "w+") | ||
486 | if exclude_docs(ks): | ||
487 | f.write("%_excludedocs 1\n") | ||
488 | f.write("%__file_context_path %{nil}\n") | ||
489 | if inst_langs(ks) != None: | ||
490 | f.write("%_install_langs ") | ||
491 | f.write(inst_langs(ks)) | ||
492 | f.write("\n") | ||
493 | f.close() | ||
494 | |||
495 | class NetworkConfig(KickstartConfig): | ||
496 | """A class to apply a kickstart network configuration to a system.""" | ||
497 | def write_ifcfg(self, network): | ||
498 | p = self.path("/etc/sysconfig/network-scripts/ifcfg-" + network.device) | ||
499 | |||
500 | f = file(p, "w+") | ||
501 | os.chmod(p, 0644) | ||
502 | |||
503 | f.write("DEVICE=%s\n" % network.device) | ||
504 | f.write("BOOTPROTO=%s\n" % network.bootProto) | ||
505 | |||
506 | if network.bootProto.lower() == "static": | ||
507 | if network.ip: | ||
508 | f.write("IPADDR=%s\n" % network.ip) | ||
509 | if network.netmask: | ||
510 | f.write("NETMASK=%s\n" % network.netmask) | ||
511 | |||
512 | if network.onboot: | ||
513 | f.write("ONBOOT=on\n") | ||
514 | else: | ||
515 | f.write("ONBOOT=off\n") | ||
516 | |||
517 | if network.essid: | ||
518 | f.write("ESSID=%s\n" % network.essid) | ||
519 | |||
520 | if network.ethtool: | ||
521 | if network.ethtool.find("autoneg") == -1: | ||
522 | network.ethtool = "autoneg off " + network.ethtool | ||
523 | f.write("ETHTOOL_OPTS=%s\n" % network.ethtool) | ||
524 | |||
525 | if network.bootProto.lower() == "dhcp": | ||
526 | if network.hostname: | ||
527 | f.write("DHCP_HOSTNAME=%s\n" % network.hostname) | ||
528 | if network.dhcpclass: | ||
529 | f.write("DHCP_CLASSID=%s\n" % network.dhcpclass) | ||
530 | |||
531 | if network.mtu: | ||
532 | f.write("MTU=%s\n" % network.mtu) | ||
533 | |||
534 | f.close() | ||
535 | |||
536 | def write_wepkey(self, network): | ||
537 | if not network.wepkey: | ||
538 | return | ||
539 | |||
540 | p = self.path("/etc/sysconfig/network-scripts/keys-" + network.device) | ||
541 | f = file(p, "w+") | ||
542 | os.chmod(p, 0600) | ||
543 | f.write("KEY=%s\n" % network.wepkey) | ||
544 | f.close() | ||
545 | |||
546 | def write_sysconfig(self, useipv6, hostname, gateway): | ||
547 | path = self.path("/etc/sysconfig/network") | ||
548 | f = file(path, "w+") | ||
549 | os.chmod(path, 0644) | ||
550 | |||
551 | f.write("NETWORKING=yes\n") | ||
552 | |||
553 | if useipv6: | ||
554 | f.write("NETWORKING_IPV6=yes\n") | ||
555 | else: | ||
556 | f.write("NETWORKING_IPV6=no\n") | ||
557 | |||
558 | if hostname: | ||
559 | f.write("HOSTNAME=%s\n" % hostname) | ||
560 | else: | ||
561 | f.write("HOSTNAME=localhost.localdomain\n") | ||
562 | |||
563 | if gateway: | ||
564 | f.write("GATEWAY=%s\n" % gateway) | ||
565 | |||
566 | f.close() | ||
567 | |||
568 | def write_hosts(self, hostname): | ||
569 | localline = "" | ||
570 | if hostname and hostname != "localhost.localdomain": | ||
571 | localline += hostname + " " | ||
572 | l = hostname.split(".") | ||
573 | if len(l) > 1: | ||
574 | localline += l[0] + " " | ||
575 | localline += "localhost.localdomain localhost" | ||
576 | |||
577 | path = self.path("/etc/hosts") | ||
578 | f = file(path, "w+") | ||
579 | os.chmod(path, 0644) | ||
580 | f.write("127.0.0.1\t\t%s\n" % localline) | ||
581 | f.write("::1\t\tlocalhost6.localdomain6 localhost6\n") | ||
582 | f.close() | ||
583 | |||
584 | def write_resolv(self, nodns, nameservers): | ||
585 | if nodns or not nameservers: | ||
586 | return | ||
587 | |||
588 | path = self.path("/etc/resolv.conf") | ||
589 | f = file(path, "w+") | ||
590 | os.chmod(path, 0644) | ||
591 | |||
592 | for ns in (nameservers): | ||
593 | if ns: | ||
594 | f.write("nameserver %s\n" % ns) | ||
595 | |||
596 | f.close() | ||
597 | |||
598 | @apply_wrapper | ||
599 | def apply(self, ksnet): | ||
600 | fs.makedirs(self.path("/etc/sysconfig/network-scripts")) | ||
601 | |||
602 | useipv6 = False | ||
603 | nodns = False | ||
604 | hostname = None | ||
605 | gateway = None | ||
606 | nameservers = None | ||
607 | |||
608 | for network in ksnet.network: | ||
609 | if not network.device: | ||
610 | raise errors.KsError("No --device specified with " | ||
611 | "network kickstart command") | ||
612 | |||
613 | if (network.onboot and network.bootProto.lower() != "dhcp" and | ||
614 | not (network.ip and network.netmask)): | ||
615 | raise errors.KsError("No IP address and/or netmask " | ||
616 | "specified with static " | ||
617 | "configuration for '%s'" % | ||
618 | network.device) | ||
619 | |||
620 | self.write_ifcfg(network) | ||
621 | self.write_wepkey(network) | ||
622 | |||
623 | if network.ipv6: | ||
624 | useipv6 = True | ||
625 | if network.nodns: | ||
626 | nodns = True | ||
627 | |||
628 | if network.hostname: | ||
629 | hostname = network.hostname | ||
630 | if network.gateway: | ||
631 | gateway = network.gateway | ||
632 | |||
633 | if network.nameserver: | ||
634 | nameservers = network.nameserver.split(",") | ||
635 | |||
636 | self.write_sysconfig(useipv6, hostname, gateway) | ||
637 | self.write_hosts(hostname) | ||
638 | self.write_resolv(nodns, nameservers) | ||
639 | |||
640 | def use_installerfw(ks, feature): | ||
641 | """ Check if the installer framework has to be used for a feature | ||
642 | "feature". """ | ||
643 | |||
644 | features = ks.handler.installerfw.features | ||
645 | if features: | ||
646 | if feature in features or "all" in features: | ||
647 | return True | ||
648 | return False | ||
649 | |||
650 | def get_image_size(ks, default = None): | ||
651 | __size = 0 | ||
652 | for p in ks.handler.partition.partitions: | ||
653 | if p.mountpoint == "/" and p.size: | ||
654 | __size = p.size | ||
655 | if __size > 0: | ||
656 | return int(__size) * 1024L * 1024L | ||
657 | else: | ||
658 | return default | ||
659 | |||
660 | def get_image_fstype(ks, default = None): | ||
661 | for p in ks.handler.partition.partitions: | ||
662 | if p.mountpoint == "/" and p.fstype: | ||
663 | return p.fstype | ||
664 | return default | ||
665 | |||
666 | def get_image_fsopts(ks, default = None): | ||
667 | for p in ks.handler.partition.partitions: | ||
668 | if p.mountpoint == "/" and p.fsopts: | ||
669 | return p.fsopts | ||
670 | return default | ||
671 | |||
672 | def get_modules(ks): | ||
673 | devices = [] | ||
674 | if isinstance(ks.handler.device, kscommands.device.FC3_Device): | ||
675 | devices.append(ks.handler.device) | ||
676 | else: | ||
677 | devices.extend(ks.handler.device.deviceList) | ||
678 | |||
679 | modules = [] | ||
680 | for device in devices: | ||
681 | if not device.moduleName: | ||
682 | continue | ||
683 | modules.extend(device.moduleName.split(":")) | ||
684 | |||
685 | return modules | ||
686 | |||
687 | def get_timeout(ks, default = None): | ||
688 | if not hasattr(ks.handler.bootloader, "timeout"): | ||
689 | return default | ||
690 | if ks.handler.bootloader.timeout is None: | ||
691 | return default | ||
692 | return int(ks.handler.bootloader.timeout) | ||
693 | |||
694 | def get_kernel_args(ks, default = "ro rd.live.image"): | ||
695 | if not hasattr(ks.handler.bootloader, "appendLine"): | ||
696 | return default | ||
697 | if ks.handler.bootloader.appendLine is None: | ||
698 | return default | ||
699 | return "%s %s" %(default, ks.handler.bootloader.appendLine) | ||
700 | |||
701 | def get_menu_args(ks, default = ""): | ||
702 | if not hasattr(ks.handler.bootloader, "menus"): | ||
703 | return default | ||
704 | if ks.handler.bootloader.menus in (None, ""): | ||
705 | return default | ||
706 | return "%s" % ks.handler.bootloader.menus | ||
707 | |||
708 | def get_default_kernel(ks, default = None): | ||
709 | if not hasattr(ks.handler.bootloader, "default"): | ||
710 | return default | ||
711 | if not ks.handler.bootloader.default: | ||
712 | return default | ||
713 | return ks.handler.bootloader.default | ||
714 | |||
715 | def get_repos(ks, repo_urls=None): | ||
716 | repos = {} | ||
717 | for repo in ks.handler.repo.repoList: | ||
718 | inc = [] | ||
719 | if hasattr(repo, "includepkgs"): | ||
720 | inc.extend(repo.includepkgs) | ||
721 | |||
722 | exc = [] | ||
723 | if hasattr(repo, "excludepkgs"): | ||
724 | exc.extend(repo.excludepkgs) | ||
725 | |||
726 | baseurl = repo.baseurl | ||
727 | mirrorlist = repo.mirrorlist | ||
728 | |||
729 | if repo_urls and repo.name in repo_urls: | ||
730 | baseurl = repo_urls[repo.name] | ||
731 | mirrorlist = None | ||
732 | |||
733 | if repos.has_key(repo.name): | ||
734 | msger.warning("Overriding already specified repo %s" %(repo.name,)) | ||
735 | |||
736 | proxy = None | ||
737 | if hasattr(repo, "proxy"): | ||
738 | proxy = repo.proxy | ||
739 | proxy_username = None | ||
740 | if hasattr(repo, "proxy_username"): | ||
741 | proxy_username = repo.proxy_username | ||
742 | proxy_password = None | ||
743 | if hasattr(repo, "proxy_password"): | ||
744 | proxy_password = repo.proxy_password | ||
745 | if hasattr(repo, "debuginfo"): | ||
746 | debuginfo = repo.debuginfo | ||
747 | if hasattr(repo, "source"): | ||
748 | source = repo.source | ||
749 | if hasattr(repo, "gpgkey"): | ||
750 | gpgkey = repo.gpgkey | ||
751 | if hasattr(repo, "disable"): | ||
752 | disable = repo.disable | ||
753 | ssl_verify = True | ||
754 | if hasattr(repo, "ssl_verify"): | ||
755 | ssl_verify = repo.ssl_verify == "yes" | ||
756 | nocache = False | ||
757 | if hasattr(repo, "nocache"): | ||
758 | nocache = repo.nocache | ||
759 | cost = None | ||
760 | if hasattr(repo, "cost"): | ||
761 | cost = repo.cost | ||
762 | priority = None | ||
763 | if hasattr(repo, "priority"): | ||
764 | priority = repo.priority | ||
765 | |||
766 | repos[repo.name] = (repo.name, baseurl, mirrorlist, inc, exc, | ||
767 | proxy, proxy_username, proxy_password, debuginfo, | ||
768 | source, gpgkey, disable, ssl_verify, nocache, | ||
769 | cost, priority) | ||
770 | |||
771 | return repos.values() | ||
772 | |||
773 | def convert_method_to_repo(ks): | ||
774 | try: | ||
775 | ks.handler.repo.methodToRepo() | ||
776 | except (AttributeError, kserrors.KickstartError): | ||
777 | pass | ||
778 | |||
779 | def get_attachment(ks, required=()): | ||
780 | return ks.handler.attachment.packageList + list(required) | ||
781 | |||
782 | def get_pre_packages(ks, required=()): | ||
783 | return ks.handler.prepackages.packageList + list(required) | ||
784 | |||
785 | def get_packages(ks, required=()): | ||
786 | return ks.handler.packages.packageList + list(required) | ||
787 | |||
788 | def get_groups(ks, required=()): | ||
789 | return ks.handler.packages.groupList + list(required) | ||
790 | |||
791 | def get_excluded(ks, required=()): | ||
792 | return ks.handler.packages.excludedList + list(required) | ||
793 | |||
794 | def get_partitions(ks): | ||
795 | return ks.handler.partition.partitions | ||
796 | |||
797 | def ignore_missing(ks): | ||
798 | return ks.handler.packages.handleMissing == ksconstants.KS_MISSING_IGNORE | ||
799 | |||
800 | def exclude_docs(ks): | ||
801 | return ks.handler.packages.excludeDocs | ||
802 | |||
803 | def inst_langs(ks): | ||
804 | if hasattr(ks.handler.packages, "instLange"): | ||
805 | return ks.handler.packages.instLange | ||
806 | elif hasattr(ks.handler.packages, "instLangs"): | ||
807 | return ks.handler.packages.instLangs | ||
808 | return "" | ||
809 | |||
810 | def get_post_scripts(ks): | ||
811 | scripts = [] | ||
812 | for s in ks.handler.scripts: | ||
813 | if s.type != ksparser.KS_SCRIPT_POST: | ||
814 | continue | ||
815 | scripts.append(s) | ||
816 | return scripts | ||
817 | |||
818 | def add_repo(ks, repostr): | ||
819 | args = repostr.split() | ||
820 | repoobj = ks.handler.repo.parse(args[1:]) | ||
821 | if repoobj and repoobj not in ks.handler.repo.repoList: | ||
822 | ks.handler.repo.repoList.append(repoobj) | ||
823 | |||
824 | def remove_all_repos(ks): | ||
825 | while len(ks.handler.repo.repoList) != 0: | ||
826 | del ks.handler.repo.repoList[0] | ||
827 | |||
828 | def remove_duplicate_repos(ks): | ||
829 | i = 0 | ||
830 | j = i + 1 | ||
831 | while True: | ||
832 | if len(ks.handler.repo.repoList) < 2: | ||
833 | break | ||
834 | if i >= len(ks.handler.repo.repoList) - 1: | ||
835 | break | ||
836 | name = ks.handler.repo.repoList[i].name | ||
837 | baseurl = ks.handler.repo.repoList[i].baseurl | ||
838 | if j < len(ks.handler.repo.repoList): | ||
839 | if (ks.handler.repo.repoList[j].name == name or \ | ||
840 | ks.handler.repo.repoList[j].baseurl == baseurl): | ||
841 | del ks.handler.repo.repoList[j] | ||
842 | else: | ||
843 | j += 1 | ||
844 | if j >= len(ks.handler.repo.repoList): | ||
845 | i += 1 | ||
846 | j = i + 1 | ||
847 | else: | ||
848 | i += 1 | ||
849 | j = i + 1 | ||
850 | |||
851 | def resolve_groups(creatoropts, repometadata): | ||
852 | iszypp = False | ||
853 | if 'zypp' == creatoropts['pkgmgr']: | ||
854 | iszypp = True | ||
855 | ks = creatoropts['ks'] | ||
856 | |||
857 | for repo in repometadata: | ||
858 | """ Mustn't replace group with package list if repo is ready for the | ||
859 | corresponding package manager. | ||
860 | """ | ||
861 | |||
862 | if iszypp and repo["patterns"]: | ||
863 | continue | ||
864 | if not iszypp and repo["comps"]: | ||
865 | continue | ||
866 | |||
867 | # But we also must handle such cases, use zypp but repo only has comps, | ||
868 | # use yum but repo only has patterns, use zypp but use_comps is true, | ||
869 | # use yum but use_comps is false. | ||
870 | groupfile = None | ||
871 | if iszypp and repo["comps"]: | ||
872 | groupfile = repo["comps"] | ||
873 | get_pkglist_handler = misc.get_pkglist_in_comps | ||
874 | if not iszypp and repo["patterns"]: | ||
875 | groupfile = repo["patterns"] | ||
876 | get_pkglist_handler = misc.get_pkglist_in_patterns | ||
877 | |||
878 | if groupfile: | ||
879 | i = 0 | ||
880 | while True: | ||
881 | if i >= len(ks.handler.packages.groupList): | ||
882 | break | ||
883 | pkglist = get_pkglist_handler( | ||
884 | ks.handler.packages.groupList[i].name, | ||
885 | groupfile) | ||
886 | if pkglist: | ||
887 | del ks.handler.packages.groupList[i] | ||
888 | for pkg in pkglist: | ||
889 | if pkg not in ks.handler.packages.packageList: | ||
890 | ks.handler.packages.packageList.append(pkg) | ||
891 | else: | ||
892 | i = i + 1 | ||
diff --git a/scripts/lib/mic/kickstart/custom_commands/__init__.py b/scripts/lib/mic/kickstart/custom_commands/__init__.py new file mode 100644 index 0000000000..6aed0ff6fa --- /dev/null +++ b/scripts/lib/mic/kickstart/custom_commands/__init__.py | |||
@@ -0,0 +1,17 @@ | |||
1 | from desktop import Mic_Desktop | ||
2 | from micrepo import Mic_Repo, Mic_RepoData | ||
3 | from micpartition import Mic_Partition | ||
4 | from micpartition import Mic_PartData | ||
5 | from installerfw import Mic_installerfw | ||
6 | from partition import Wic_Partition | ||
7 | |||
8 | __all__ = ( | ||
9 | "Mic_Desktop", | ||
10 | "Mic_Repo", | ||
11 | "Mic_RepoData", | ||
12 | "Mic_Partition", | ||
13 | "Mic_PartData", | ||
14 | "Mic_installerfw", | ||
15 | "Wic_Partition", | ||
16 | "Wic_PartData", | ||
17 | ) | ||
diff --git a/scripts/lib/mic/kickstart/custom_commands/desktop.py b/scripts/lib/mic/kickstart/custom_commands/desktop.py new file mode 100644 index 0000000000..c8bd647ae3 --- /dev/null +++ b/scripts/lib/mic/kickstart/custom_commands/desktop.py | |||
@@ -0,0 +1,95 @@ | |||
1 | #!/usr/bin/python -tt | ||
2 | # | ||
3 | # Copyright (c) 2008, 2009, 2010 Intel, Inc. | ||
4 | # | ||
5 | # Yi Yang <yi.y.yang@intel.com> | ||
6 | # | ||
7 | # This program is free software; you can redistribute it and/or modify it | ||
8 | # under the terms of the GNU General Public License as published by the Free | ||
9 | # Software Foundation; version 2 of the License | ||
10 | # | ||
11 | # This program is distributed in the hope that it will be useful, but | ||
12 | # WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY | ||
13 | # or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | ||
14 | # for more details. | ||
15 | # | ||
16 | # You should have received a copy of the GNU General Public License along | ||
17 | # with this program; if not, write to the Free Software Foundation, Inc., 59 | ||
18 | # Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
19 | |||
20 | from pykickstart.base import * | ||
21 | from pykickstart.errors import * | ||
22 | from pykickstart.options import * | ||
23 | |||
24 | class Mic_Desktop(KickstartCommand): | ||
25 | def __init__(self, writePriority=0, | ||
26 | defaultdesktop=None, | ||
27 | defaultdm=None, | ||
28 | autologinuser=None, | ||
29 | session=None): | ||
30 | |||
31 | KickstartCommand.__init__(self, writePriority) | ||
32 | |||
33 | self.__new_version = False | ||
34 | self.op = self._getParser() | ||
35 | |||
36 | self.defaultdesktop = defaultdesktop | ||
37 | self.autologinuser = autologinuser | ||
38 | self.defaultdm = defaultdm | ||
39 | self.session = session | ||
40 | |||
41 | def __str__(self): | ||
42 | retval = "" | ||
43 | |||
44 | if self.defaultdesktop != None: | ||
45 | retval += " --defaultdesktop=%s" % self.defaultdesktop | ||
46 | if self.session != None: | ||
47 | retval += " --session=\"%s\"" % self.session | ||
48 | if self.autologinuser != None: | ||
49 | retval += " --autologinuser=%s" % self.autologinuser | ||
50 | if self.defaultdm != None: | ||
51 | retval += " --defaultdm=%s" % self.defaultdm | ||
52 | |||
53 | if retval != "": | ||
54 | retval = "# Default Desktop Settings\ndesktop %s\n" % retval | ||
55 | |||
56 | return retval | ||
57 | |||
58 | def _getParser(self): | ||
59 | try: | ||
60 | op = KSOptionParser(lineno=self.lineno) | ||
61 | except TypeError: | ||
62 | # the latest version has not lineno argument | ||
63 | op = KSOptionParser() | ||
64 | self.__new_version = True | ||
65 | |||
66 | op.add_option("--defaultdesktop", dest="defaultdesktop", | ||
67 | action="store", | ||
68 | type="string", | ||
69 | nargs=1) | ||
70 | op.add_option("--autologinuser", dest="autologinuser", | ||
71 | action="store", | ||
72 | type="string", | ||
73 | nargs=1) | ||
74 | op.add_option("--defaultdm", dest="defaultdm", | ||
75 | action="store", | ||
76 | type="string", | ||
77 | nargs=1) | ||
78 | op.add_option("--session", dest="session", | ||
79 | action="store", | ||
80 | type="string", | ||
81 | nargs=1) | ||
82 | return op | ||
83 | |||
84 | def parse(self, args): | ||
85 | if self.__new_version: | ||
86 | (opts, extra) = self.op.parse_args(args=args, lineno=self.lineno) | ||
87 | else: | ||
88 | (opts, extra) = self.op.parse_args(args=args) | ||
89 | |||
90 | if extra: | ||
91 | m = _("Unexpected arguments to %(command)s command: %(options)s") \ | ||
92 | % {"command": "desktop", "options": extra} | ||
93 | raise KickstartValueError, formatErrorMsg(self.lineno, msg=m) | ||
94 | |||
95 | self._setToSelf(self.op, opts) | ||
diff --git a/scripts/lib/mic/kickstart/custom_commands/installerfw.py b/scripts/lib/mic/kickstart/custom_commands/installerfw.py new file mode 100644 index 0000000000..2466f1dc07 --- /dev/null +++ b/scripts/lib/mic/kickstart/custom_commands/installerfw.py | |||
@@ -0,0 +1,63 @@ | |||
1 | #!/usr/bin/python -tt | ||
2 | # | ||
3 | # Copyright (c) 2013 Intel, Inc. | ||
4 | # | ||
5 | # This program is free software; you can redistribute it and/or modify it | ||
6 | # under the terms of the GNU General Public License as published by the Free | ||
7 | # Software Foundation; version 2 of the License | ||
8 | # | ||
9 | # This program is distributed in the hope that it will be useful, but | ||
10 | # WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY | ||
11 | # or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | ||
12 | # for more details. | ||
13 | # | ||
14 | # You should have received a copy of the GNU General Public License along | ||
15 | # with this program; if not, write to the Free Software Foundation, Inc., 59 | ||
16 | # Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
17 | |||
18 | from pykickstart.base import * | ||
19 | from pykickstart.options import * | ||
20 | |||
21 | class Mic_installerfw(KickstartCommand): | ||
22 | """ This class implements the "installerfw" KS option. The argument | ||
23 | of the option is a comman-separated list of MIC features which have to be | ||
24 | disabled and instead, will be done in the installer. For example, | ||
25 | "installerfw=extlinux" disables all the MIC code which installs extlinux to | ||
26 | the target images, and instead, the extlinux or whatever boot-loader will | ||
27 | be installed by the installer instead. | ||
28 | |||
29 | The installer is a tool which is external to MIC, it comes from the | ||
30 | installation repositories and can be executed by MIC in order to perform | ||
31 | various configuration actions. The main point here is to make sure MIC has | ||
32 | no hard-wired knoledge about the target OS configuration. """ | ||
33 | |||
34 | removedKeywords = KickstartCommand.removedKeywords | ||
35 | removedAttrs = KickstartCommand.removedAttrs | ||
36 | |||
37 | def __init__(self, *args, **kwargs): | ||
38 | KickstartCommand.__init__(self, *args, **kwargs) | ||
39 | self.op = self._getParser() | ||
40 | self.features = kwargs.get("installerfw", None) | ||
41 | |||
42 | def __str__(self): | ||
43 | retval = KickstartCommand.__str__(self) | ||
44 | |||
45 | if self.features: | ||
46 | retval += "# Enable installer framework features\ninstallerfw\n" | ||
47 | |||
48 | return retval | ||
49 | |||
50 | def _getParser(self): | ||
51 | op = KSOptionParser() | ||
52 | return op | ||
53 | |||
54 | def parse(self, args): | ||
55 | (_, extra) = self.op.parse_args(args=args, lineno=self.lineno) | ||
56 | |||
57 | if len(extra) != 1: | ||
58 | msg = "Kickstart command \"installerfw\" requires one " \ | ||
59 | "argumet - a list of legacy features to disable" | ||
60 | raise KickstartValueError, formatErrorMsg(self.lineno, msg = msg) | ||
61 | |||
62 | self.features = extra[0].split(",") | ||
63 | return self | ||
diff --git a/scripts/lib/mic/kickstart/custom_commands/micboot.py b/scripts/lib/mic/kickstart/custom_commands/micboot.py new file mode 100644 index 0000000000..66d1678aa7 --- /dev/null +++ b/scripts/lib/mic/kickstart/custom_commands/micboot.py | |||
@@ -0,0 +1,49 @@ | |||
1 | #!/usr/bin/python -tt | ||
2 | # | ||
3 | # Copyright (c) 2008, 2009, 2010 Intel, Inc. | ||
4 | # | ||
5 | # Anas Nashif | ||
6 | # | ||
7 | # This program is free software; you can redistribute it and/or modify it | ||
8 | # under the terms of the GNU General Public License as published by the Free | ||
9 | # Software Foundation; version 2 of the License | ||
10 | # | ||
11 | # This program is distributed in the hope that it will be useful, but | ||
12 | # WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY | ||
13 | # or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | ||
14 | # for more details. | ||
15 | # | ||
16 | # You should have received a copy of the GNU General Public License along | ||
17 | # with this program; if not, write to the Free Software Foundation, Inc., 59 | ||
18 | # Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
19 | |||
20 | from pykickstart.base import * | ||
21 | from pykickstart.errors import * | ||
22 | from pykickstart.options import * | ||
23 | from pykickstart.commands.bootloader import * | ||
24 | |||
25 | class Mic_Bootloader(F8_Bootloader): | ||
26 | def __init__(self, writePriority=10, appendLine="", driveorder=None, | ||
27 | forceLBA=False, location="", md5pass="", password="", | ||
28 | upgrade=False, menus=""): | ||
29 | F8_Bootloader.__init__(self, writePriority, appendLine, driveorder, | ||
30 | forceLBA, location, md5pass, password, upgrade) | ||
31 | |||
32 | self.menus = "" | ||
33 | self.ptable = "msdos" | ||
34 | |||
35 | def _getArgsAsStr(self): | ||
36 | ret = F8_Bootloader._getArgsAsStr(self) | ||
37 | |||
38 | if self.menus == "": | ||
39 | ret += " --menus=%s" %(self.menus,) | ||
40 | if self.ptable: | ||
41 | ret += " --ptable=\"%s\"" %(self.ptable,) | ||
42 | return ret | ||
43 | |||
44 | def _getParser(self): | ||
45 | op = F8_Bootloader._getParser(self) | ||
46 | op.add_option("--menus", dest="menus") | ||
47 | op.add_option("--ptable", dest="ptable", type="string") | ||
48 | return op | ||
49 | |||
diff --git a/scripts/lib/mic/kickstart/custom_commands/micpartition.py b/scripts/lib/mic/kickstart/custom_commands/micpartition.py new file mode 100644 index 0000000000..59a87fb486 --- /dev/null +++ b/scripts/lib/mic/kickstart/custom_commands/micpartition.py | |||
@@ -0,0 +1,57 @@ | |||
1 | #!/usr/bin/python -tt | ||
2 | # | ||
3 | # Marko Saukko <marko.saukko@cybercom.com> | ||
4 | # | ||
5 | # Copyright (C) 2011 Nokia Corporation and/or its subsidiary(-ies). | ||
6 | # | ||
7 | # This copyrighted material is made available to anyone wishing to use, modify, | ||
8 | # copy, or redistribute it subject to the terms and conditions of the GNU | ||
9 | # General Public License v.2. This program is distributed in the hope that it | ||
10 | # will be useful, but WITHOUT ANY WARRANTY expressed or implied, including the | ||
11 | # implied warranties of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. | ||
12 | # See the GNU General Public License for more details. | ||
13 | # | ||
14 | # You should have received a copy of the GNU General Public License along with | ||
15 | # this program; if not, write to the Free Software Foundation, Inc., 51 | ||
16 | # Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | ||
17 | |||
18 | from pykickstart.commands.partition import * | ||
19 | |||
20 | class Mic_PartData(FC4_PartData): | ||
21 | removedKeywords = FC4_PartData.removedKeywords | ||
22 | removedAttrs = FC4_PartData.removedAttrs | ||
23 | |||
24 | def __init__(self, *args, **kwargs): | ||
25 | FC4_PartData.__init__(self, *args, **kwargs) | ||
26 | self.deleteRemovedAttrs() | ||
27 | self.align = kwargs.get("align", None) | ||
28 | self.extopts = kwargs.get("extopts", None) | ||
29 | self.part_type = kwargs.get("part_type", None) | ||
30 | |||
31 | def _getArgsAsStr(self): | ||
32 | retval = FC4_PartData._getArgsAsStr(self) | ||
33 | |||
34 | if self.align: | ||
35 | retval += " --align" | ||
36 | if self.extopts: | ||
37 | retval += " --extoptions=%s" % self.extopts | ||
38 | if self.part_type: | ||
39 | retval += " --part-type=%s" % self.part_type | ||
40 | |||
41 | return retval | ||
42 | |||
43 | class Mic_Partition(FC4_Partition): | ||
44 | removedKeywords = FC4_Partition.removedKeywords | ||
45 | removedAttrs = FC4_Partition.removedAttrs | ||
46 | |||
47 | def _getParser(self): | ||
48 | op = FC4_Partition._getParser(self) | ||
49 | # The alignment value is given in kBytes. e.g., value 8 means that | ||
50 | # the partition is aligned to start from 8096 byte boundary. | ||
51 | op.add_option("--align", type="int", action="store", dest="align", | ||
52 | default=None) | ||
53 | op.add_option("--extoptions", type="string", action="store", dest="extopts", | ||
54 | default=None) | ||
55 | op.add_option("--part-type", type="string", action="store", dest="part_type", | ||
56 | default=None) | ||
57 | return op | ||
diff --git a/scripts/lib/mic/kickstart/custom_commands/micrepo.py b/scripts/lib/mic/kickstart/custom_commands/micrepo.py new file mode 100644 index 0000000000..b31576e400 --- /dev/null +++ b/scripts/lib/mic/kickstart/custom_commands/micrepo.py | |||
@@ -0,0 +1,127 @@ | |||
1 | #!/usr/bin/python -tt | ||
2 | # | ||
3 | # Copyright (c) 2008, 2009, 2010 Intel, Inc. | ||
4 | # | ||
5 | # Yi Yang <yi.y.yang@intel.com> | ||
6 | # | ||
7 | # This program is free software; you can redistribute it and/or modify it | ||
8 | # under the terms of the GNU General Public License as published by the Free | ||
9 | # Software Foundation; version 2 of the License | ||
10 | # | ||
11 | # This program is distributed in the hope that it will be useful, but | ||
12 | # WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY | ||
13 | # or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | ||
14 | # for more details. | ||
15 | # | ||
16 | # You should have received a copy of the GNU General Public License along | ||
17 | # with this program; if not, write to the Free Software Foundation, Inc., 59 | ||
18 | # Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
19 | |||
20 | from pykickstart.base import * | ||
21 | from pykickstart.errors import * | ||
22 | from pykickstart.options import * | ||
23 | from pykickstart.commands.repo import * | ||
24 | |||
25 | class Mic_RepoData(F8_RepoData): | ||
26 | |||
27 | def __init__(self, baseurl="", mirrorlist=None, name="", priority=None, | ||
28 | includepkgs=(), excludepkgs=(), save=False, proxy=None, | ||
29 | proxy_username=None, proxy_password=None, debuginfo=False, | ||
30 | source=False, gpgkey=None, disable=False, ssl_verify="yes", | ||
31 | nocache=False): | ||
32 | kw = {} | ||
33 | # F8_RepoData keywords | ||
34 | if includepkgs: | ||
35 | kw['includepkgs'] = includepkgs | ||
36 | if excludepkgs: | ||
37 | kw['excludepkgs'] = excludepkgs | ||
38 | |||
39 | #FC6_RepoData keywords | ||
40 | if baseurl: | ||
41 | kw['baseurl'] = baseurl | ||
42 | if mirrorlist: | ||
43 | kw['mirrorlist'] = mirrorlist | ||
44 | if name: | ||
45 | kw['name'] = name | ||
46 | |||
47 | F8_RepoData.__init__(self, **kw) | ||
48 | self.save = save | ||
49 | self.proxy = proxy | ||
50 | self.proxy_username = proxy_username | ||
51 | self.proxy_password = proxy_password | ||
52 | self.debuginfo = debuginfo | ||
53 | self.disable = disable | ||
54 | self.source = source | ||
55 | self.gpgkey = gpgkey | ||
56 | self.ssl_verify = ssl_verify.lower() | ||
57 | self.priority = priority | ||
58 | self.nocache = nocache | ||
59 | |||
60 | def _getArgsAsStr(self): | ||
61 | retval = F8_RepoData._getArgsAsStr(self) | ||
62 | |||
63 | if self.save: | ||
64 | retval += " --save" | ||
65 | if self.proxy: | ||
66 | retval += " --proxy=%s" % self.proxy | ||
67 | if self.proxy_username: | ||
68 | retval += " --proxyuser=%s" % self.proxy_username | ||
69 | if self.proxy_password: | ||
70 | retval += " --proxypasswd=%s" % self.proxy_password | ||
71 | if self.debuginfo: | ||
72 | retval += " --debuginfo" | ||
73 | if self.source: | ||
74 | retval += " --source" | ||
75 | if self.gpgkey: | ||
76 | retval += " --gpgkey=%s" % self.gpgkey | ||
77 | if self.disable: | ||
78 | retval += " --disable" | ||
79 | if self.ssl_verify: | ||
80 | retval += " --ssl_verify=%s" % self.ssl_verify | ||
81 | if self.priority: | ||
82 | retval += " --priority=%s" % self.priority | ||
83 | if self.nocache: | ||
84 | retval += " --nocache" | ||
85 | |||
86 | return retval | ||
87 | |||
88 | class Mic_Repo(F8_Repo): | ||
89 | def __init__(self, writePriority=0, repoList=None): | ||
90 | F8_Repo.__init__(self, writePriority, repoList) | ||
91 | |||
92 | def __str__(self): | ||
93 | retval = "" | ||
94 | for repo in self.repoList: | ||
95 | retval += repo.__str__() | ||
96 | |||
97 | return retval | ||
98 | |||
99 | def _getParser(self): | ||
100 | def list_cb (option, opt_str, value, parser): | ||
101 | for d in value.split(','): | ||
102 | parser.values.ensure_value(option.dest, []).append(d) | ||
103 | |||
104 | op = F8_Repo._getParser(self) | ||
105 | op.add_option("--save", action="store_true", dest="save", | ||
106 | default=False) | ||
107 | op.add_option("--proxy", type="string", action="store", dest="proxy", | ||
108 | default=None, nargs=1) | ||
109 | op.add_option("--proxyuser", type="string", action="store", | ||
110 | dest="proxy_username", default=None, nargs=1) | ||
111 | op.add_option("--proxypasswd", type="string", action="store", | ||
112 | dest="proxy_password", default=None, nargs=1) | ||
113 | op.add_option("--debuginfo", action="store_true", dest="debuginfo", | ||
114 | default=False) | ||
115 | op.add_option("--source", action="store_true", dest="source", | ||
116 | default=False) | ||
117 | op.add_option("--disable", action="store_true", dest="disable", | ||
118 | default=False) | ||
119 | op.add_option("--gpgkey", type="string", action="store", dest="gpgkey", | ||
120 | default=None, nargs=1) | ||
121 | op.add_option("--ssl_verify", type="string", action="store", | ||
122 | dest="ssl_verify", default="yes") | ||
123 | op.add_option("--priority", type="int", action="store", dest="priority", | ||
124 | default=None) | ||
125 | op.add_option("--nocache", action="store_true", dest="nocache", | ||
126 | default=False) | ||
127 | return op | ||
diff --git a/scripts/lib/mic/kickstart/custom_commands/partition.py b/scripts/lib/mic/kickstart/custom_commands/partition.py new file mode 100644 index 0000000000..450d2d492d --- /dev/null +++ b/scripts/lib/mic/kickstart/custom_commands/partition.py | |||
@@ -0,0 +1,482 @@ | |||
1 | # ex:ts=4:sw=4:sts=4:et | ||
2 | # -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- | ||
3 | # | ||
4 | # Copyright (c) 2013, Intel Corporation. | ||
5 | # All rights reserved. | ||
6 | # | ||
7 | # This program is free software; you can redistribute it and/or modify | ||
8 | # it under the terms of the GNU General Public License version 2 as | ||
9 | # published by the Free Software Foundation. | ||
10 | # | ||
11 | # This program is distributed in the hope that it will be useful, | ||
12 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | # GNU General Public License for more details. | ||
15 | # | ||
16 | # You should have received a copy of the GNU General Public License along | ||
17 | # with this program; if not, write to the Free Software Foundation, Inc., | ||
18 | # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. | ||
19 | # | ||
20 | # DESCRIPTION | ||
21 | # This module provides the OpenEmbedded partition object definitions. | ||
22 | # | ||
23 | # AUTHORS | ||
24 | # Tom Zanussi <tom.zanussi (at] linux.intel.com> | ||
25 | # | ||
26 | |||
27 | import shutil | ||
28 | |||
29 | from pykickstart.commands.partition import * | ||
30 | from mic.utils.oe.misc import * | ||
31 | from mic.kickstart.custom_commands import * | ||
32 | from mic.plugin import pluginmgr | ||
33 | |||
34 | import os | ||
35 | from mic.utils.oe.package_manager import * | ||
36 | |||
37 | partition_methods = { | ||
38 | "do_install_pkgs":None, | ||
39 | "do_stage_partition":None, | ||
40 | "do_prepare_partition":None, | ||
41 | "do_configure_partition":None, | ||
42 | } | ||
43 | |||
44 | class Wic_PartData(Mic_PartData): | ||
45 | removedKeywords = Mic_PartData.removedKeywords | ||
46 | removedAttrs = Mic_PartData.removedAttrs | ||
47 | |||
48 | def __init__(self, *args, **kwargs): | ||
49 | Mic_PartData.__init__(self, *args, **kwargs) | ||
50 | self.deleteRemovedAttrs() | ||
51 | self.source = kwargs.get("source", None) | ||
52 | self.rootfs = kwargs.get("rootfs-dir", None) | ||
53 | self.source_file = "" | ||
54 | self.size = 0 | ||
55 | |||
56 | def _getArgsAsStr(self): | ||
57 | retval = Mic_PartData._getArgsAsStr(self) | ||
58 | |||
59 | if self.source: | ||
60 | retval += " --source=%s" % self.source | ||
61 | if self.rootfs: | ||
62 | retval += " --rootfs-dir=%s" % self.rootfs | ||
63 | |||
64 | return retval | ||
65 | |||
66 | def get_rootfs(self): | ||
67 | """ | ||
68 | Acessor for rootfs dir | ||
69 | """ | ||
70 | return self.rootfs | ||
71 | |||
72 | def set_rootfs(self, rootfs): | ||
73 | """ | ||
74 | Acessor for actual rootfs dir, which must be set by source | ||
75 | plugins. | ||
76 | """ | ||
77 | self.rootfs = rootfs | ||
78 | |||
79 | def get_size(self): | ||
80 | """ | ||
81 | Accessor for partition size, 0 or --size before set_size(). | ||
82 | """ | ||
83 | return self.size | ||
84 | |||
85 | def set_size(self, size): | ||
86 | """ | ||
87 | Accessor for actual partition size, which must be set by source | ||
88 | plugins. | ||
89 | """ | ||
90 | self.size = size | ||
91 | |||
92 | def set_source_file(self, source_file): | ||
93 | """ | ||
94 | Accessor for source_file, the location of the generated partition | ||
95 | image, which must be set by source plugins. | ||
96 | """ | ||
97 | self.source_file = source_file | ||
98 | |||
99 | def get_extra_block_count(self, current_blocks): | ||
100 | """ | ||
101 | The --size param is reflected in self.size (in MB), and we already | ||
102 | have current_blocks (1k) blocks, calculate and return the | ||
103 | number of (1k) blocks we need to add to get to --size, 0 if | ||
104 | we're already there or beyond. | ||
105 | """ | ||
106 | msger.debug("Requested partition size for %s: %d" % \ | ||
107 | (self.mountpoint, self.size)) | ||
108 | |||
109 | if not self.size: | ||
110 | return 0 | ||
111 | |||
112 | requested_blocks = self.size * 1024 | ||
113 | |||
114 | msger.debug("Requested blocks %d, current_blocks %d" % \ | ||
115 | (requested_blocks, current_blocks)) | ||
116 | |||
117 | if requested_blocks > current_blocks: | ||
118 | return requested_blocks - current_blocks | ||
119 | else: | ||
120 | return 0 | ||
121 | |||
122 | def install_pkgs(self, creator, cr_workdir, oe_builddir, rootfs_dir, | ||
123 | bootimg_dir, kernel_dir, native_sysroot): | ||
124 | """ | ||
125 | Prepare content for individual partitions, installing packages. | ||
126 | """ | ||
127 | |||
128 | if not self.source: | ||
129 | return | ||
130 | |||
131 | self._source_methods = pluginmgr.get_source_plugin_methods(self.source, partition_methods) | ||
132 | self._source_methods["do_install_pkgs"](self, creator, | ||
133 | cr_workdir, | ||
134 | oe_builddir, | ||
135 | rootfs_dir, | ||
136 | bootimg_dir, | ||
137 | kernel_dir, | ||
138 | native_sysroot) | ||
139 | |||
140 | def install_pkgs_ipk(self, cr_workdir, oe_builddir, rootfs_dir, | ||
141 | native_sysroot, packages, repourl): | ||
142 | """ | ||
143 | Install packages specified into wks file using opkg package manager. | ||
144 | This method is dependend on bb module. | ||
145 | """ | ||
146 | |||
147 | gVar = {} | ||
148 | gVar["DEPLOY_DIR_IPK"] = os.path.join(oe_builddir, "tmp/deploy/ipk") | ||
149 | |||
150 | # Run postinstall scripts even in offline mode | ||
151 | # Use the arch priority package rather than higher version one if more than one candidate is found. | ||
152 | #d.setVar("OPKG_ARGS", "--force_postinstall --prefer-arch-to-version") | ||
153 | gVar["OPKG_ARGS"] = "--force_postinstall" | ||
154 | |||
155 | # OPKG path relative to /output_path | ||
156 | gVar["OPKGLIBDIR"] = "var/lib" | ||
157 | |||
158 | source_url = repourl.split() | ||
159 | |||
160 | # Generate feed uri's names, it doesn't seem to matter what name they have | ||
161 | feed_uris = "" | ||
162 | cnt = 0 | ||
163 | archs = "" | ||
164 | for url in source_url: | ||
165 | feed_uris += "cl_def_feed%d##%s\n" % (cnt, url) | ||
166 | cnt += 1 | ||
167 | head, tail = os.path.split(url) | ||
168 | archs += " " + tail | ||
169 | |||
170 | # IPK_FEED_URIS with special formating defines the URI's used as source for packages | ||
171 | gVar['IPK_FEED_URIS'] = feed_uris | ||
172 | |||
173 | gVar['BUILD_IMAGES_FROM_FEEDS'] = "1" | ||
174 | |||
175 | # We need to provide sysroot for utilities | ||
176 | gVar['STAGING_DIR_NATIVE'] = native_sysroot | ||
177 | |||
178 | # Set WORKDIR for output | ||
179 | gVar['WORKDIR'] = cr_workdir | ||
180 | |||
181 | # Set TMPDIR for output | ||
182 | gVar['TMPDIR'] = os.path.join(cr_workdir, "tmp") | ||
183 | |||
184 | if 'ROOTFS_DIR' in rootfs_dir: | ||
185 | target_dir = rootfs_dir['ROOTFS_DIR'] | ||
186 | elif os.path.isdir(rootfs_dir): | ||
187 | target_dir = rootfs_dir | ||
188 | else: | ||
189 | msg = "Couldn't find --rootfs-dir=%s connection" | ||
190 | msg += " or it is not a valid path, exiting" | ||
191 | msger.error(msg % rootfs_dir) | ||
192 | |||
193 | # Need native sysroot /usr/bin/ for opkg-cl | ||
194 | # chnage PATH var to avoid issues with host tools | ||
195 | defpath = os.environ['PATH'] | ||
196 | os.environ['PATH'] = native_sysroot + "/usr/bin/" + ":/bin:/usr/bin:" | ||
197 | |||
198 | pseudo = "export PSEUDO_PREFIX=%s/usr;" % native_sysroot | ||
199 | pseudo += "export PSEUDO_LOCALSTATEDIR=%s/../pseudo;" % target_dir | ||
200 | pseudo += "export PSEUDO_PASSWD=%s;" % target_dir | ||
201 | pseudo += "export PSEUDO_NOSYMLINKEXP=1;" | ||
202 | pseudo += "%s/usr/bin/pseudo " % native_sysroot | ||
203 | |||
204 | pm = WicOpkgPM(gVar, | ||
205 | target_dir, | ||
206 | 'opkg.conf', | ||
207 | archs, | ||
208 | pseudo, | ||
209 | native_sysroot) | ||
210 | |||
211 | pm.update() | ||
212 | |||
213 | pm.install(packages) | ||
214 | |||
215 | os.environ['PATH'] += defpath + ":" + native_sysroot + "/usr/bin/" | ||
216 | |||
217 | |||
218 | def prepare(self, cr, cr_workdir, oe_builddir, rootfs_dir, bootimg_dir, | ||
219 | kernel_dir, native_sysroot): | ||
220 | """ | ||
221 | Prepare content for individual partitions, depending on | ||
222 | partition command parameters. | ||
223 | """ | ||
224 | if not self.source: | ||
225 | if self.fstype and self.fstype == "swap": | ||
226 | self.prepare_swap_partition(cr_workdir, oe_builddir, | ||
227 | native_sysroot) | ||
228 | elif self.fstype: | ||
229 | self.prepare_empty_partition(cr_workdir, oe_builddir, | ||
230 | native_sysroot) | ||
231 | return | ||
232 | |||
233 | self._source_methods = pluginmgr.get_source_plugin_methods(self.source, partition_methods) | ||
234 | self._source_methods["do_configure_partition"](self, cr, cr_workdir, | ||
235 | oe_builddir, | ||
236 | bootimg_dir, | ||
237 | kernel_dir, | ||
238 | native_sysroot) | ||
239 | self._source_methods["do_stage_partition"](self, cr, cr_workdir, | ||
240 | oe_builddir, | ||
241 | bootimg_dir, kernel_dir, | ||
242 | native_sysroot) | ||
243 | self._source_methods["do_prepare_partition"](self, cr, cr_workdir, | ||
244 | oe_builddir, | ||
245 | bootimg_dir, kernel_dir, rootfs_dir, | ||
246 | native_sysroot) | ||
247 | |||
248 | def prepare_rootfs_from_fs_image(self, cr_workdir, oe_builddir, | ||
249 | rootfs_dir): | ||
250 | """ | ||
251 | Handle an already-created partition e.g. xxx.ext3 | ||
252 | """ | ||
253 | rootfs = oe_builddir | ||
254 | du_cmd = "du -Lbms %s" % rootfs | ||
255 | rc, out = exec_cmd(du_cmd) | ||
256 | rootfs_size = out.split()[0] | ||
257 | |||
258 | self.size = rootfs_size | ||
259 | self.source_file = rootfs | ||
260 | |||
261 | def prepare_rootfs(self, cr_workdir, oe_builddir, rootfs_dir, | ||
262 | native_sysroot): | ||
263 | """ | ||
264 | Prepare content for a rootfs partition i.e. create a partition | ||
265 | and fill it from a /rootfs dir. | ||
266 | |||
267 | Currently handles ext2/3/4 and btrfs. | ||
268 | """ | ||
269 | pseudo = "export PSEUDO_PREFIX=%s/usr;" % native_sysroot | ||
270 | pseudo += "export PSEUDO_LOCALSTATEDIR=%s/../pseudo;" % rootfs_dir | ||
271 | pseudo += "export PSEUDO_PASSWD=%s;" % rootfs_dir | ||
272 | pseudo += "export PSEUDO_NOSYMLINKEXP=1;" | ||
273 | pseudo += "%s/usr/bin/pseudo " % native_sysroot | ||
274 | |||
275 | if self.fstype.startswith("ext"): | ||
276 | return self.prepare_rootfs_ext(cr_workdir, oe_builddir, | ||
277 | rootfs_dir, native_sysroot, | ||
278 | pseudo) | ||
279 | elif self.fstype.startswith("btrfs"): | ||
280 | return self.prepare_rootfs_btrfs(cr_workdir, oe_builddir, | ||
281 | rootfs_dir, native_sysroot, | ||
282 | pseudo) | ||
283 | |||
284 | def prepare_rootfs_ext(self, cr_workdir, oe_builddir, rootfs_dir, | ||
285 | native_sysroot, pseudo): | ||
286 | """ | ||
287 | Prepare content for an ext2/3/4 rootfs partition. | ||
288 | """ | ||
289 | |||
290 | image_rootfs = rootfs_dir | ||
291 | rootfs = "%s/rootfs_%s.%s" % (cr_workdir, self.label ,self.fstype) | ||
292 | |||
293 | du_cmd = "du -ks %s" % image_rootfs | ||
294 | rc, out = exec_cmd(du_cmd) | ||
295 | actual_rootfs_size = int(out.split()[0]) | ||
296 | |||
297 | extra_blocks = self.get_extra_block_count(actual_rootfs_size) | ||
298 | |||
299 | if extra_blocks < IMAGE_EXTRA_SPACE: | ||
300 | extra_blocks = IMAGE_EXTRA_SPACE | ||
301 | |||
302 | rootfs_size = actual_rootfs_size + extra_blocks | ||
303 | |||
304 | msger.debug("Added %d extra blocks to %s to get to %d total blocks" % \ | ||
305 | (extra_blocks, self.mountpoint, rootfs_size)) | ||
306 | |||
307 | dd_cmd = "dd if=/dev/zero of=%s bs=1024 seek=%d count=0 bs=1k" % \ | ||
308 | (rootfs, rootfs_size) | ||
309 | rc, out = exec_cmd(dd_cmd) | ||
310 | |||
311 | extra_imagecmd = "-i 8192" | ||
312 | |||
313 | mkfs_cmd = "mkfs.%s -F %s %s -d %s" % \ | ||
314 | (self.fstype, extra_imagecmd, rootfs, image_rootfs) | ||
315 | rc, out = exec_native_cmd(pseudo + mkfs_cmd, native_sysroot) | ||
316 | |||
317 | |||
318 | # get the rootfs size in the right units for kickstart (Mb) | ||
319 | du_cmd = "du -Lbms %s" % rootfs | ||
320 | rc, out = exec_cmd(du_cmd) | ||
321 | rootfs_size = out.split()[0] | ||
322 | |||
323 | self.size = rootfs_size | ||
324 | self.source_file = rootfs | ||
325 | |||
326 | return 0 | ||
327 | |||
328 | def prepare_for_uboot(self, arch, cr_workdir, oe_builddir, rootfs_dir, | ||
329 | native_sysroot): | ||
330 | """ | ||
331 | Generates u-boot image from source_file( ext2/3/4 ) | ||
332 | |||
333 | """ | ||
334 | pseudo = "export PSEUDO_PREFIX=%s/usr;" % native_sysroot | ||
335 | pseudo += "export PSEUDO_LOCALSTATEDIR=%s/../pseudo;" % rootfs_dir | ||
336 | pseudo += "export PSEUDO_PASSWD=%s;" % rootfs_dir | ||
337 | pseudo += "export PSEUDO_NOSYMLINKEXP=1;" | ||
338 | pseudo += "%s/usr/bin/pseudo " % native_sysroot | ||
339 | |||
340 | # 1) compress image | ||
341 | rootfs = self.source_file | ||
342 | rootfs_gzip = "%s.gz" % rootfs | ||
343 | gzip_cmd = "gzip -f -9 -c %s > %s" % (rootfs, rootfs_gzip) | ||
344 | rc, out = exec_native_cmd(pseudo + gzip_cmd, native_sysroot) | ||
345 | |||
346 | # 2) image for U-Boot | ||
347 | rootfs_uboot = "%s.u-boot" % rootfs_gzip | ||
348 | mkimage_cmd = "mkimage -A %s -O linux -T ramdisk -C gzip -n %s -d %s %s" % \ | ||
349 | (arch, self.label, rootfs_gzip, rootfs_uboot) | ||
350 | rc, out = exec_native_cmd(pseudo + mkimage_cmd, native_sysroot) | ||
351 | |||
352 | msger.info("\n\n\tThe new U-Boot ramdisk image can be found here:\n\t\t%s\n\n" % rootfs_uboot) | ||
353 | |||
354 | return 0 | ||
355 | |||
356 | def prepare_rootfs_btrfs(self, cr_workdir, oe_builddir, rootfs_dir, | ||
357 | native_sysroot, pseudo): | ||
358 | """ | ||
359 | Prepare content for a btrfs rootfs partition. | ||
360 | |||
361 | Currently handles ext2/3/4 and btrfs. | ||
362 | """ | ||
363 | image_rootfs = rootfs_dir | ||
364 | rootfs = "%s/rootfs_%s.%s" % (cr_workdir, self.label, self.fstype) | ||
365 | |||
366 | du_cmd = "du -ks %s" % image_rootfs | ||
367 | rc, out = exec_cmd(du_cmd) | ||
368 | actual_rootfs_size = int(out.split()[0]) | ||
369 | |||
370 | extra_blocks = self.get_extra_block_count(actual_rootfs_size) | ||
371 | |||
372 | if extra_blocks < IMAGE_EXTRA_SPACE: | ||
373 | extra_blocks = IMAGE_EXTRA_SPACE | ||
374 | |||
375 | rootfs_size = actual_rootfs_size + extra_blocks | ||
376 | |||
377 | msger.debug("Added %d extra blocks to %s to get to %d total blocks" % \ | ||
378 | (extra_blocks, self.mountpoint, rootfs_size)) | ||
379 | |||
380 | dd_cmd = "dd if=/dev/zero of=%s bs=1024 seek=%d count=0 bs=1k" % \ | ||
381 | (rootfs, rootfs_size) | ||
382 | rc, out = exec_cmd(dd_cmd) | ||
383 | |||
384 | mkfs_cmd = "mkfs.%s -b %d -r %s %s" % \ | ||
385 | (self.fstype, rootfs_size * 1024, image_rootfs, rootfs) | ||
386 | rc, out = exec_native_cmd(pseudo + mkfs_cmd, native_sysroot) | ||
387 | |||
388 | # get the rootfs size in the right units for kickstart (Mb) | ||
389 | du_cmd = "du -Lbms %s" % rootfs | ||
390 | rc, out = exec_cmd(du_cmd) | ||
391 | rootfs_size = out.split()[0] | ||
392 | |||
393 | self.size = rootfs_size | ||
394 | self.source_file = rootfs | ||
395 | |||
396 | def prepare_empty_partition(self, cr_workdir, oe_builddir, native_sysroot): | ||
397 | """ | ||
398 | Prepare an empty partition. | ||
399 | """ | ||
400 | if self.fstype.startswith("ext"): | ||
401 | return self.prepare_empty_partition_ext(cr_workdir, oe_builddir, | ||
402 | native_sysroot) | ||
403 | elif self.fstype.startswith("btrfs"): | ||
404 | return self.prepare_empty_partition_btrfs(cr_workdir, oe_builddir, | ||
405 | native_sysroot) | ||
406 | |||
407 | def prepare_empty_partition_ext(self, cr_workdir, oe_builddir, | ||
408 | native_sysroot): | ||
409 | """ | ||
410 | Prepare an empty ext2/3/4 partition. | ||
411 | """ | ||
412 | fs = "%s/fs.%s" % (cr_workdir, self.fstype) | ||
413 | |||
414 | dd_cmd = "dd if=/dev/zero of=%s bs=1M seek=%d count=0" % \ | ||
415 | (fs, self.size) | ||
416 | rc, out = exec_cmd(dd_cmd) | ||
417 | |||
418 | extra_imagecmd = "-i 8192" | ||
419 | |||
420 | mkfs_cmd = "mkfs.%s -F %s %s" % (self.fstype, extra_imagecmd, fs) | ||
421 | rc, out = exec_native_cmd(mkfs_cmd, native_sysroot) | ||
422 | |||
423 | self.source_file = fs | ||
424 | |||
425 | return 0 | ||
426 | |||
427 | def prepare_empty_partition_btrfs(self, cr_workdir, oe_builddir, | ||
428 | native_sysroot): | ||
429 | """ | ||
430 | Prepare an empty btrfs partition. | ||
431 | """ | ||
432 | fs = "%s/fs.%s" % (cr_workdir, self.fstype) | ||
433 | |||
434 | dd_cmd = "dd if=/dev/zero of=%s bs=1M seek=%d count=0" % \ | ||
435 | (fs, self.size) | ||
436 | rc, out = exec_cmd(dd_cmd) | ||
437 | |||
438 | mkfs_cmd = "mkfs.%s -b %d %s" % (self.fstype, self.size * 1024, rootfs) | ||
439 | rc, out = exec_native_cmd(mkfs_cmd, native_sysroot) | ||
440 | |||
441 | mkfs_cmd = "mkfs.%s -F %s %s" % (self.fstype, extra_imagecmd, fs) | ||
442 | rc, out = exec_native_cmd(mkfs_cmd, native_sysroot) | ||
443 | |||
444 | self.source_file = fs | ||
445 | |||
446 | return 0 | ||
447 | |||
448 | def prepare_swap_partition(self, cr_workdir, oe_builddir, native_sysroot): | ||
449 | """ | ||
450 | Prepare a swap partition. | ||
451 | """ | ||
452 | fs = "%s/fs.%s" % (cr_workdir, self.fstype) | ||
453 | |||
454 | dd_cmd = "dd if=/dev/zero of=%s bs=1M seek=%d count=0" % \ | ||
455 | (fs, self.size) | ||
456 | rc, out = exec_cmd(dd_cmd) | ||
457 | |||
458 | import uuid | ||
459 | label_str = "" | ||
460 | if self.label: | ||
461 | label_str = "-L %s" % self.label | ||
462 | mkswap_cmd = "mkswap %s -U %s %s" % (label_str, str(uuid.uuid1()), fs) | ||
463 | rc, out = exec_native_cmd(mkswap_cmd, native_sysroot) | ||
464 | |||
465 | self.source_file = fs | ||
466 | |||
467 | return 0 | ||
468 | |||
469 | class Wic_Partition(Mic_Partition): | ||
470 | removedKeywords = Mic_Partition.removedKeywords | ||
471 | removedAttrs = Mic_Partition.removedAttrs | ||
472 | |||
473 | def _getParser(self): | ||
474 | op = Mic_Partition._getParser(self) | ||
475 | # use specified source file to fill the partition | ||
476 | # and calculate partition size | ||
477 | op.add_option("--source", type="string", action="store", | ||
478 | dest="source", default=None) | ||
479 | # use specified rootfs path to fill the partition | ||
480 | op.add_option("--rootfs-dir", type="string", action="store", | ||
481 | dest="rootfs", default=None) | ||
482 | return op | ||
diff --git a/scripts/lib/mic/kickstart/custom_commands/wicboot.py b/scripts/lib/mic/kickstart/custom_commands/wicboot.py new file mode 100644 index 0000000000..ab8871de4e --- /dev/null +++ b/scripts/lib/mic/kickstart/custom_commands/wicboot.py | |||
@@ -0,0 +1,57 @@ | |||
1 | # ex:ts=4:sw=4:sts=4:et | ||
2 | # -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- | ||
3 | # | ||
4 | # Copyright (c) 2014, Intel Corporation. | ||
5 | # All rights reserved. | ||
6 | # | ||
7 | # This program is free software; you can redistribute it and/or modify | ||
8 | # it under the terms of the GNU General Public License version 2 as | ||
9 | # published by the Free Software Foundation. | ||
10 | # | ||
11 | # This program is distributed in the hope that it will be useful, | ||
12 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | # GNU General Public License for more details. | ||
15 | # | ||
16 | # You should have received a copy of the GNU General Public License along | ||
17 | # with this program; if not, write to the Free Software Foundation, Inc., | ||
18 | # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. | ||
19 | # | ||
20 | # DESCRIPTION | ||
21 | # This module provides the OpenEmbedded bootloader object definitions. | ||
22 | # | ||
23 | # AUTHORS | ||
24 | # Tom Zanussi <tom.zanussi (at] linux.intel.com> | ||
25 | # | ||
26 | |||
27 | from pykickstart.base import * | ||
28 | from pykickstart.errors import * | ||
29 | from pykickstart.options import * | ||
30 | from pykickstart.commands.bootloader import * | ||
31 | |||
32 | from mic.kickstart.custom_commands.micboot import * | ||
33 | |||
34 | class Wic_Bootloader(Mic_Bootloader): | ||
35 | def __init__(self, writePriority=10, appendLine="", driveorder=None, | ||
36 | forceLBA=False, location="", md5pass="", password="", | ||
37 | upgrade=False, menus=""): | ||
38 | Mic_Bootloader.__init__(self, writePriority, appendLine, driveorder, | ||
39 | forceLBA, location, md5pass, password, upgrade) | ||
40 | |||
41 | self.source = "" | ||
42 | |||
43 | def _getArgsAsStr(self): | ||
44 | retval = Mic_Bootloader._getArgsAsStr(self) | ||
45 | |||
46 | if self.source: | ||
47 | retval += " --source=%s" % self.source | ||
48 | |||
49 | return retval | ||
50 | |||
51 | def _getParser(self): | ||
52 | op = Mic_Bootloader._getParser(self) | ||
53 | # use specified source plugin to implement bootloader-specific methods | ||
54 | op.add_option("--source", type="string", action="store", | ||
55 | dest="source", default=None) | ||
56 | return op | ||
57 | |||
diff --git a/scripts/lib/mic/msger.py b/scripts/lib/mic/msger.py new file mode 100644 index 0000000000..9afc85be93 --- /dev/null +++ b/scripts/lib/mic/msger.py | |||
@@ -0,0 +1,309 @@ | |||
1 | #!/usr/bin/python -tt | ||
2 | # vim: ai ts=4 sts=4 et sw=4 | ||
3 | # | ||
4 | # Copyright (c) 2009, 2010, 2011 Intel, Inc. | ||
5 | # | ||
6 | # This program is free software; you can redistribute it and/or modify it | ||
7 | # under the terms of the GNU General Public License as published by the Free | ||
8 | # Software Foundation; version 2 of the License | ||
9 | # | ||
10 | # This program is distributed in the hope that it will be useful, but | ||
11 | # WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY | ||
12 | # or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | ||
13 | # for more details. | ||
14 | # | ||
15 | # You should have received a copy of the GNU General Public License along | ||
16 | # with this program; if not, write to the Free Software Foundation, Inc., 59 | ||
17 | # Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
18 | |||
19 | import os,sys | ||
20 | import re | ||
21 | import time | ||
22 | |||
23 | __ALL__ = ['set_mode', | ||
24 | 'get_loglevel', | ||
25 | 'set_loglevel', | ||
26 | 'set_logfile', | ||
27 | 'raw', | ||
28 | 'debug', | ||
29 | 'verbose', | ||
30 | 'info', | ||
31 | 'warning', | ||
32 | 'error', | ||
33 | 'ask', | ||
34 | 'pause', | ||
35 | ] | ||
36 | |||
37 | # COLORs in ANSI | ||
38 | INFO_COLOR = 32 # green | ||
39 | WARN_COLOR = 33 # yellow | ||
40 | ERR_COLOR = 31 # red | ||
41 | ASK_COLOR = 34 # blue | ||
42 | NO_COLOR = 0 | ||
43 | |||
44 | PREFIX_RE = re.compile('^<(.*?)>\s*(.*)', re.S) | ||
45 | |||
46 | INTERACTIVE = True | ||
47 | |||
48 | LOG_LEVEL = 1 | ||
49 | LOG_LEVELS = { | ||
50 | 'quiet': 0, | ||
51 | 'normal': 1, | ||
52 | 'verbose': 2, | ||
53 | 'debug': 3, | ||
54 | 'never': 4, | ||
55 | } | ||
56 | |||
57 | LOG_FILE_FP = None | ||
58 | LOG_CONTENT = '' | ||
59 | CATCHERR_BUFFILE_FD = -1 | ||
60 | CATCHERR_BUFFILE_PATH = None | ||
61 | CATCHERR_SAVED_2 = -1 | ||
62 | |||
63 | def _general_print(head, color, msg = None, stream = None, level = 'normal'): | ||
64 | global LOG_CONTENT | ||
65 | if not stream: | ||
66 | stream = sys.stdout | ||
67 | |||
68 | if LOG_LEVELS[level] > LOG_LEVEL: | ||
69 | # skip | ||
70 | return | ||
71 | |||
72 | # encode raw 'unicode' str to utf8 encoded str | ||
73 | if msg and isinstance(msg, unicode): | ||
74 | msg = msg.encode('utf-8', 'ignore') | ||
75 | |||
76 | errormsg = '' | ||
77 | if CATCHERR_BUFFILE_FD > 0: | ||
78 | size = os.lseek(CATCHERR_BUFFILE_FD , 0, os.SEEK_END) | ||
79 | os.lseek(CATCHERR_BUFFILE_FD, 0, os.SEEK_SET) | ||
80 | errormsg = os.read(CATCHERR_BUFFILE_FD, size) | ||
81 | os.ftruncate(CATCHERR_BUFFILE_FD, 0) | ||
82 | |||
83 | # append error msg to LOG | ||
84 | if errormsg: | ||
85 | LOG_CONTENT += errormsg | ||
86 | |||
87 | # append normal msg to LOG | ||
88 | save_msg = msg.strip() if msg else None | ||
89 | if save_msg: | ||
90 | timestr = time.strftime("[%m/%d %H:%M:%S %Z] ", time.localtime()) | ||
91 | LOG_CONTENT += timestr + save_msg + '\n' | ||
92 | |||
93 | if errormsg: | ||
94 | _color_print('', NO_COLOR, errormsg, stream, level) | ||
95 | |||
96 | _color_print(head, color, msg, stream, level) | ||
97 | |||
98 | def _color_print(head, color, msg, stream, level): | ||
99 | colored = True | ||
100 | if color == NO_COLOR or \ | ||
101 | not stream.isatty() or \ | ||
102 | os.getenv('ANSI_COLORS_DISABLED') is not None: | ||
103 | colored = False | ||
104 | |||
105 | if head.startswith('\r'): | ||
106 | # need not \n at last | ||
107 | newline = False | ||
108 | else: | ||
109 | newline = True | ||
110 | |||
111 | if colored: | ||
112 | head = '\033[%dm%s:\033[0m ' %(color, head) | ||
113 | if not newline: | ||
114 | # ESC cmd to clear line | ||
115 | head = '\033[2K' + head | ||
116 | else: | ||
117 | if head: | ||
118 | head += ': ' | ||
119 | if head.startswith('\r'): | ||
120 | head = head.lstrip() | ||
121 | newline = True | ||
122 | |||
123 | if msg is not None: | ||
124 | if isinstance(msg, unicode): | ||
125 | msg = msg.encode('utf8', 'ignore') | ||
126 | |||
127 | stream.write('%s%s' % (head, msg)) | ||
128 | if newline: | ||
129 | stream.write('\n') | ||
130 | |||
131 | stream.flush() | ||
132 | |||
133 | def _color_perror(head, color, msg, level = 'normal'): | ||
134 | if CATCHERR_BUFFILE_FD > 0: | ||
135 | _general_print(head, color, msg, sys.stdout, level) | ||
136 | else: | ||
137 | _general_print(head, color, msg, sys.stderr, level) | ||
138 | |||
139 | def _split_msg(head, msg): | ||
140 | if isinstance(msg, list): | ||
141 | msg = '\n'.join(map(str, msg)) | ||
142 | |||
143 | if msg.startswith('\n'): | ||
144 | # means print \n at first | ||
145 | msg = msg.lstrip() | ||
146 | head = '\n' + head | ||
147 | |||
148 | elif msg.startswith('\r'): | ||
149 | # means print \r at first | ||
150 | msg = msg.lstrip() | ||
151 | head = '\r' + head | ||
152 | |||
153 | m = PREFIX_RE.match(msg) | ||
154 | if m: | ||
155 | head += ' <%s>' % m.group(1) | ||
156 | msg = m.group(2) | ||
157 | |||
158 | return head, msg | ||
159 | |||
160 | def get_loglevel(): | ||
161 | return (k for k,v in LOG_LEVELS.items() if v==LOG_LEVEL).next() | ||
162 | |||
163 | def set_loglevel(level): | ||
164 | global LOG_LEVEL | ||
165 | if level not in LOG_LEVELS: | ||
166 | # no effect | ||
167 | return | ||
168 | |||
169 | LOG_LEVEL = LOG_LEVELS[level] | ||
170 | |||
171 | def set_interactive(mode=True): | ||
172 | global INTERACTIVE | ||
173 | if mode: | ||
174 | INTERACTIVE = True | ||
175 | else: | ||
176 | INTERACTIVE = False | ||
177 | |||
178 | def log(msg=''): | ||
179 | # log msg to LOG_CONTENT then save to logfile | ||
180 | global LOG_CONTENT | ||
181 | if msg: | ||
182 | LOG_CONTENT += msg | ||
183 | |||
184 | def raw(msg=''): | ||
185 | _general_print('', NO_COLOR, msg) | ||
186 | |||
187 | def info(msg): | ||
188 | head, msg = _split_msg('Info', msg) | ||
189 | _general_print(head, INFO_COLOR, msg) | ||
190 | |||
191 | def verbose(msg): | ||
192 | head, msg = _split_msg('Verbose', msg) | ||
193 | _general_print(head, INFO_COLOR, msg, level = 'verbose') | ||
194 | |||
195 | def warning(msg): | ||
196 | head, msg = _split_msg('Warning', msg) | ||
197 | _color_perror(head, WARN_COLOR, msg) | ||
198 | |||
199 | def debug(msg): | ||
200 | head, msg = _split_msg('Debug', msg) | ||
201 | _color_perror(head, ERR_COLOR, msg, level = 'debug') | ||
202 | |||
203 | def error(msg): | ||
204 | head, msg = _split_msg('Error', msg) | ||
205 | _color_perror(head, ERR_COLOR, msg) | ||
206 | sys.exit(1) | ||
207 | |||
208 | def ask(msg, default=True): | ||
209 | _general_print('\rQ', ASK_COLOR, '') | ||
210 | try: | ||
211 | if default: | ||
212 | msg += '(Y/n) ' | ||
213 | else: | ||
214 | msg += '(y/N) ' | ||
215 | if INTERACTIVE: | ||
216 | while True: | ||
217 | repl = raw_input(msg) | ||
218 | if repl.lower() == 'y': | ||
219 | return True | ||
220 | elif repl.lower() == 'n': | ||
221 | return False | ||
222 | elif not repl.strip(): | ||
223 | # <Enter> | ||
224 | return default | ||
225 | |||
226 | # else loop | ||
227 | else: | ||
228 | if default: | ||
229 | msg += ' Y' | ||
230 | else: | ||
231 | msg += ' N' | ||
232 | _general_print('', NO_COLOR, msg) | ||
233 | |||
234 | return default | ||
235 | except KeyboardInterrupt: | ||
236 | sys.stdout.write('\n') | ||
237 | sys.exit(2) | ||
238 | |||
239 | def choice(msg, choices, default=0): | ||
240 | if default >= len(choices): | ||
241 | return None | ||
242 | _general_print('\rQ', ASK_COLOR, '') | ||
243 | try: | ||
244 | msg += " [%s] " % '/'.join(choices) | ||
245 | if INTERACTIVE: | ||
246 | while True: | ||
247 | repl = raw_input(msg) | ||
248 | if repl in choices: | ||
249 | return repl | ||
250 | elif not repl.strip(): | ||
251 | return choices[default] | ||
252 | else: | ||
253 | msg += choices[default] | ||
254 | _general_print('', NO_COLOR, msg) | ||
255 | |||
256 | return choices[default] | ||
257 | except KeyboardInterrupt: | ||
258 | sys.stdout.write('\n') | ||
259 | sys.exit(2) | ||
260 | |||
261 | def pause(msg=None): | ||
262 | if INTERACTIVE: | ||
263 | _general_print('\rQ', ASK_COLOR, '') | ||
264 | if msg is None: | ||
265 | msg = 'press <ENTER> to continue ...' | ||
266 | raw_input(msg) | ||
267 | |||
268 | def set_logfile(fpath): | ||
269 | global LOG_FILE_FP | ||
270 | |||
271 | def _savelogf(): | ||
272 | if LOG_FILE_FP: | ||
273 | fp = open(LOG_FILE_FP, 'w') | ||
274 | fp.write(LOG_CONTENT) | ||
275 | fp.close() | ||
276 | |||
277 | if LOG_FILE_FP is not None: | ||
278 | warning('duplicate log file configuration') | ||
279 | |||
280 | LOG_FILE_FP = fpath | ||
281 | |||
282 | import atexit | ||
283 | atexit.register(_savelogf) | ||
284 | |||
285 | def enable_logstderr(fpath): | ||
286 | global CATCHERR_BUFFILE_FD | ||
287 | global CATCHERR_BUFFILE_PATH | ||
288 | global CATCHERR_SAVED_2 | ||
289 | |||
290 | if os.path.exists(fpath): | ||
291 | os.remove(fpath) | ||
292 | CATCHERR_BUFFILE_PATH = fpath | ||
293 | CATCHERR_BUFFILE_FD = os.open(CATCHERR_BUFFILE_PATH, os.O_RDWR|os.O_CREAT) | ||
294 | CATCHERR_SAVED_2 = os.dup(2) | ||
295 | os.dup2(CATCHERR_BUFFILE_FD, 2) | ||
296 | |||
297 | def disable_logstderr(): | ||
298 | global CATCHERR_BUFFILE_FD | ||
299 | global CATCHERR_BUFFILE_PATH | ||
300 | global CATCHERR_SAVED_2 | ||
301 | |||
302 | raw(msg = None) # flush message buffer and print it. | ||
303 | os.dup2(CATCHERR_SAVED_2, 2) | ||
304 | os.close(CATCHERR_SAVED_2) | ||
305 | os.close(CATCHERR_BUFFILE_FD) | ||
306 | os.unlink(CATCHERR_BUFFILE_PATH) | ||
307 | CATCHERR_BUFFILE_FD = -1 | ||
308 | CATCHERR_BUFFILE_PATH = None | ||
309 | CATCHERR_SAVED_2 = -1 | ||
diff --git a/scripts/lib/mic/plugin.py b/scripts/lib/mic/plugin.py new file mode 100644 index 0000000000..df03c15081 --- /dev/null +++ b/scripts/lib/mic/plugin.py | |||
@@ -0,0 +1,121 @@ | |||
1 | #!/usr/bin/python -tt | ||
2 | # | ||
3 | # Copyright (c) 2011 Intel, Inc. | ||
4 | # | ||
5 | # This program is free software; you can redistribute it and/or modify it | ||
6 | # under the terms of the GNU General Public License as published by the Free | ||
7 | # Software Foundation; version 2 of the License | ||
8 | # | ||
9 | # This program is distributed in the hope that it will be useful, but | ||
10 | # WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY | ||
11 | # or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | ||
12 | # for more details. | ||
13 | # | ||
14 | # You should have received a copy of the GNU General Public License along | ||
15 | # with this program; if not, write to the Free Software Foundation, Inc., 59 | ||
16 | # Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
17 | |||
18 | import os, sys | ||
19 | |||
20 | from mic import msger | ||
21 | from mic import pluginbase | ||
22 | from mic.utils import errors | ||
23 | |||
24 | |||
25 | __ALL__ = ['PluginMgr', 'pluginmgr'] | ||
26 | |||
27 | PLUGIN_TYPES = ["imager", "source"] # TODO "hook" | ||
28 | |||
29 | |||
30 | class PluginMgr(object): | ||
31 | plugin_dirs = {} | ||
32 | |||
33 | # make the manager class as singleton | ||
34 | _instance = None | ||
35 | def __new__(cls, *args, **kwargs): | ||
36 | if not cls._instance: | ||
37 | cls._instance = super(PluginMgr, cls).__new__(cls, *args, **kwargs) | ||
38 | |||
39 | return cls._instance | ||
40 | |||
41 | def __init__(self): | ||
42 | mic_path = os.path.dirname(__file__) | ||
43 | eos = mic_path.find('scripts') + len('scripts') | ||
44 | scripts_path = mic_path[:eos] | ||
45 | |||
46 | self.plugin_dir = scripts_path + "/lib/mic/plugins" | ||
47 | |||
48 | def append_dirs(self, dirs): | ||
49 | for path in dirs: | ||
50 | self._add_plugindir(path) | ||
51 | |||
52 | # load all the plugins AGAIN | ||
53 | self._load_all() | ||
54 | |||
55 | def _add_plugindir(self, path): | ||
56 | path = os.path.abspath(os.path.expanduser(path)) | ||
57 | |||
58 | if not os.path.isdir(path): | ||
59 | msger.warning("Plugin dir is not a directory or does not exist: %s"\ | ||
60 | % path) | ||
61 | return | ||
62 | |||
63 | if path not in self.plugin_dirs: | ||
64 | self.plugin_dirs[path] = False | ||
65 | # the value True/False means "loaded" | ||
66 | |||
67 | def _load_all(self): | ||
68 | for (pdir, loaded) in self.plugin_dirs.iteritems(): | ||
69 | if loaded: continue | ||
70 | |||
71 | sys.path.insert(0, pdir) | ||
72 | for mod in [x[:-3] for x in os.listdir(pdir) if x.endswith(".py")]: | ||
73 | if mod and mod != '__init__': | ||
74 | if mod in sys.modules: | ||
75 | #self.plugin_dirs[pdir] = True | ||
76 | msger.warning("Module %s already exists, skip" % mod) | ||
77 | else: | ||
78 | try: | ||
79 | pymod = __import__(mod) | ||
80 | self.plugin_dirs[pdir] = True | ||
81 | msger.debug("Plugin module %s:%s imported"\ | ||
82 | % (mod, pymod.__file__)) | ||
83 | except ImportError, err: | ||
84 | msg = 'Failed to load plugin %s/%s: %s' \ | ||
85 | % (os.path.basename(pdir), mod, err) | ||
86 | msger.warning(msg) | ||
87 | |||
88 | del(sys.path[0]) | ||
89 | |||
90 | def get_plugins(self, ptype): | ||
91 | """ the return value is dict of name:class pairs """ | ||
92 | |||
93 | if ptype not in PLUGIN_TYPES: | ||
94 | raise errors.CreatorError('%s is not valid plugin type' % ptype) | ||
95 | |||
96 | self._add_plugindir(os.path.join(self.plugin_dir, ptype)) | ||
97 | self._load_all() | ||
98 | |||
99 | return pluginbase.get_plugins(ptype) | ||
100 | |||
101 | def get_source_plugin_methods(self, source_name, methods): | ||
102 | """ | ||
103 | The methods param is a dict with the method names to find. On | ||
104 | return, the dict values will be filled in with pointers to the | ||
105 | corresponding methods. If one or more methods are not found, | ||
106 | None is returned. | ||
107 | """ | ||
108 | return_methods = None | ||
109 | for _source_name, klass in self.get_plugins('source').iteritems(): | ||
110 | if _source_name == source_name: | ||
111 | for _method_name in methods.keys(): | ||
112 | if not hasattr(klass, _method_name): | ||
113 | msger.warning("Unimplemented %s source interface for: %s"\ | ||
114 | % (_method_name, _source_name)) | ||
115 | return None | ||
116 | func = getattr(klass, _method_name) | ||
117 | methods[_method_name] = func | ||
118 | return_methods = methods | ||
119 | return return_methods | ||
120 | |||
121 | pluginmgr = PluginMgr() | ||
diff --git a/scripts/lib/mic/pluginbase.py b/scripts/lib/mic/pluginbase.py new file mode 100644 index 0000000000..881d9969c6 --- /dev/null +++ b/scripts/lib/mic/pluginbase.py | |||
@@ -0,0 +1,158 @@ | |||
1 | #!/usr/bin/python -tt | ||
2 | # | ||
3 | # Copyright (c) 2011 Intel, Inc. | ||
4 | # | ||
5 | # This program is free software; you can redistribute it and/or modify it | ||
6 | # under the terms of the GNU General Public License as published by the Free | ||
7 | # Software Foundation; version 2 of the License | ||
8 | # | ||
9 | # This program is distributed in the hope that it will be useful, but | ||
10 | # WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY | ||
11 | # or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | ||
12 | # for more details. | ||
13 | # | ||
14 | # You should have received a copy of the GNU General Public License along | ||
15 | # with this program; if not, write to the Free Software Foundation, Inc., 59 | ||
16 | # Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
17 | |||
18 | import os | ||
19 | import shutil | ||
20 | from mic import msger | ||
21 | from mic.utils import errors | ||
22 | |||
23 | class _Plugin(object): | ||
24 | class __metaclass__(type): | ||
25 | def __init__(cls, name, bases, attrs): | ||
26 | if not hasattr(cls, 'plugins'): | ||
27 | cls.plugins = {} | ||
28 | |||
29 | elif 'mic_plugin_type' in attrs: | ||
30 | if attrs['mic_plugin_type'] not in cls.plugins: | ||
31 | cls.plugins[attrs['mic_plugin_type']] = {} | ||
32 | |||
33 | elif hasattr(cls, 'mic_plugin_type') and 'name' in attrs: | ||
34 | cls.plugins[cls.mic_plugin_type][attrs['name']] = cls | ||
35 | |||
36 | def show_plugins(cls): | ||
37 | for cls in cls.plugins[cls.mic_plugin_type]: | ||
38 | print cls | ||
39 | |||
40 | def get_plugins(cls): | ||
41 | return cls.plugins | ||
42 | |||
43 | class ImagerPlugin(_Plugin): | ||
44 | mic_plugin_type = "imager" | ||
45 | |||
46 | @classmethod | ||
47 | def check_image_exists(self, destdir, apacking=None, | ||
48 | images=(), | ||
49 | release=None): | ||
50 | |||
51 | # if it's a packing file, reset images | ||
52 | if apacking: | ||
53 | images = [apacking] | ||
54 | |||
55 | # release option will override images | ||
56 | if release is not None: | ||
57 | images = [os.path.basename(destdir.rstrip('/'))] | ||
58 | destdir = os.path.dirname(destdir.rstrip('/')) | ||
59 | |||
60 | for name in images: | ||
61 | if not name: | ||
62 | continue | ||
63 | |||
64 | image = os.path.join(destdir, name) | ||
65 | if not os.path.exists(image): | ||
66 | continue | ||
67 | |||
68 | if msger.ask("Target image/dir: %s already exists, " | ||
69 | "clean up and continue?" % image): | ||
70 | if os.path.isdir(image): | ||
71 | shutil.rmtree(image) | ||
72 | else: | ||
73 | os.unlink(image) | ||
74 | else: | ||
75 | raise errors.Abort("Cancled") | ||
76 | |||
77 | def do_create(self): | ||
78 | pass | ||
79 | |||
80 | def do_chroot(self): | ||
81 | pass | ||
82 | |||
83 | class SourcePlugin(_Plugin): | ||
84 | mic_plugin_type = "source" | ||
85 | """ | ||
86 | The methods that can be implemented by --source plugins. | ||
87 | |||
88 | Any methods not implemented in a subclass inherit these. | ||
89 | """ | ||
90 | |||
91 | @classmethod | ||
92 | def do_install_pkgs(self, part, creator, cr_workdir, oe_builddir, rootfs_dir, | ||
93 | bootimg_dir, kernel_dir, native_sysroot): | ||
94 | """ | ||
95 | Called before partitions have been prepared and assembled into a | ||
96 | disk image. Install packages into rootfs | ||
97 | """ | ||
98 | msger.debug("SourcePlugin: do_install_pkgs: part %s" % part) | ||
99 | |||
100 | @classmethod | ||
101 | def do_install_disk(self, disk, disk_name, cr, workdir, oe_builddir, | ||
102 | bootimg_dir, kernel_dir, native_sysroot): | ||
103 | """ | ||
104 | Called after all partitions have been prepared and assembled into a | ||
105 | disk image. This provides a hook to allow finalization of a | ||
106 | disk image e.g. to write an MBR to it. | ||
107 | """ | ||
108 | msger.debug("SourcePlugin: do_install_disk: disk: %s" % disk_name) | ||
109 | |||
110 | @classmethod | ||
111 | def do_stage_partition(self, part, cr, workdir, oe_builddir, bootimg_dir, | ||
112 | kernel_dir, native_sysroot): | ||
113 | """ | ||
114 | Special content staging hook called before do_prepare_partition(), | ||
115 | normally empty. | ||
116 | |||
117 | Typically, a partition will just use the passed-in parame e.g | ||
118 | straight bootimg_dir, etc, but in some cases, things need to | ||
119 | be more tailored e.g. to use a deploy dir + /boot, etc. This | ||
120 | hook allows those files to be staged in a customized fashion. | ||
121 | Not that get_bitbake_var() allows you to acces non-standard | ||
122 | variables that you might want to use for this. | ||
123 | """ | ||
124 | msger.debug("SourcePlugin: do_stage_partition: part: %s" % part) | ||
125 | |||
126 | @classmethod | ||
127 | def do_configure_partition(self, part, cr, cr_workdir, oe_builddir, | ||
128 | bootimg_dir, kernel_dir, native_sysroot): | ||
129 | """ | ||
130 | Called before do_prepare_partition(), typically used to create | ||
131 | custom configuration files for a partition, for example | ||
132 | syslinux or grub config files. | ||
133 | """ | ||
134 | msger.debug("SourcePlugin: do_configure_partition: part: %s" % part) | ||
135 | |||
136 | @classmethod | ||
137 | def do_prepare_partition(self, part, cr, cr_workdir, oe_builddir, bootimg_dir, | ||
138 | kernel_dir, rootfs_dir, native_sysroot): | ||
139 | """ | ||
140 | Called to do the actual content population for a partition i.e. it | ||
141 | 'prepares' the partition to be incorporated into the image. | ||
142 | """ | ||
143 | msger.debug("SourcePlugin: do_prepare_partition: part: %s" % part) | ||
144 | |||
145 | class BackendPlugin(_Plugin): | ||
146 | mic_plugin_type="backend" | ||
147 | |||
148 | def addRepository(self): | ||
149 | pass | ||
150 | |||
151 | def get_plugins(typen): | ||
152 | ps = ImagerPlugin.get_plugins() | ||
153 | if typen in ps: | ||
154 | return ps[typen] | ||
155 | else: | ||
156 | return None | ||
157 | |||
158 | __all__ = ['ImagerPlugin', 'BackendPlugin', 'SourcePlugin', 'get_plugins'] | ||
diff --git a/scripts/lib/mic/plugins/backend/yumpkgmgr.py b/scripts/lib/mic/plugins/backend/yumpkgmgr.py new file mode 100644 index 0000000000..955f813109 --- /dev/null +++ b/scripts/lib/mic/plugins/backend/yumpkgmgr.py | |||
@@ -0,0 +1,490 @@ | |||
1 | #!/usr/bin/python -tt | ||
2 | # | ||
3 | # Copyright (c) 2007 Red Hat Inc. | ||
4 | # Copyright (c) 2010, 2011 Intel, Inc. | ||
5 | # | ||
6 | # This program is free software; you can redistribute it and/or modify it | ||
7 | # under the terms of the GNU General Public License as published by the Free | ||
8 | # Software Foundation; version 2 of the License | ||
9 | # | ||
10 | # This program is distributed in the hope that it will be useful, but | ||
11 | # WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY | ||
12 | # or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | ||
13 | # for more details. | ||
14 | # | ||
15 | # You should have received a copy of the GNU General Public License along | ||
16 | # with this program; if not, write to the Free Software Foundation, Inc., 59 | ||
17 | # Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
18 | |||
19 | import os, sys | ||
20 | import re | ||
21 | import tempfile | ||
22 | import glob | ||
23 | from string import Template | ||
24 | |||
25 | import rpmUtils | ||
26 | import yum | ||
27 | |||
28 | from mic import msger | ||
29 | from mic.kickstart import ksparser | ||
30 | from mic.utils import misc, rpmmisc | ||
31 | from mic.utils.grabber import TextProgress | ||
32 | from mic.utils.proxy import get_proxy_for | ||
33 | from mic.utils.errors import CreatorError | ||
34 | from mic.imager.baseimager import BaseImageCreator | ||
35 | |||
36 | YUMCONF_TEMP = """[main] | ||
37 | installroot=$installroot | ||
38 | cachedir=/var/cache/yum | ||
39 | persistdir=/var/lib/yum | ||
40 | plugins=0 | ||
41 | reposdir= | ||
42 | failovermethod=priority | ||
43 | http_caching=packages | ||
44 | sslverify=1 | ||
45 | """ | ||
46 | |||
47 | class MyYumRepository(yum.yumRepo.YumRepository): | ||
48 | def __del__(self): | ||
49 | pass | ||
50 | |||
51 | def dirSetup(self): | ||
52 | super(MyYumRepository, self).dirSetup() | ||
53 | # relocate package dir | ||
54 | pkgdir = os.path.join(self.basecachedir, 'packages', self.id) | ||
55 | self.setAttribute('_dir_setup_pkgdir', pkgdir) | ||
56 | self._dirSetupMkdir_p(self.pkgdir) | ||
57 | |||
58 | def _getFile(self, url=None, | ||
59 | relative=None, | ||
60 | local=None, | ||
61 | start=None, | ||
62 | end=None, | ||
63 | copy_local=None, | ||
64 | checkfunc=None, | ||
65 | text=None, | ||
66 | reget='simple', | ||
67 | cache=True, | ||
68 | size=None): | ||
69 | |||
70 | m2c_connection = None | ||
71 | if not self.sslverify: | ||
72 | try: | ||
73 | import M2Crypto | ||
74 | m2c_connection = M2Crypto.SSL.Connection.clientPostConnectionCheck | ||
75 | M2Crypto.SSL.Connection.clientPostConnectionCheck = None | ||
76 | except ImportError, err: | ||
77 | raise CreatorError("%s, please try to install python-m2crypto" % str(err)) | ||
78 | |||
79 | proxy = None | ||
80 | if url: | ||
81 | proxy = get_proxy_for(url) | ||
82 | else: | ||
83 | proxy = get_proxy_for(self.urls[0]) | ||
84 | |||
85 | if proxy: | ||
86 | self.proxy = str(proxy) | ||
87 | |||
88 | size = int(size) if size else None | ||
89 | rvalue = super(MyYumRepository, self)._getFile(url, | ||
90 | relative, | ||
91 | local, | ||
92 | start, | ||
93 | end, | ||
94 | copy_local, | ||
95 | checkfunc, | ||
96 | text, | ||
97 | reget, | ||
98 | cache, | ||
99 | size) | ||
100 | |||
101 | if m2c_connection and \ | ||
102 | not M2Crypto.SSL.Connection.clientPostConnectionCheck: | ||
103 | M2Crypto.SSL.Connection.clientPostConnectionCheck = m2c_connection | ||
104 | |||
105 | return rvalue | ||
106 | |||
107 | from mic.pluginbase import BackendPlugin | ||
108 | class Yum(BackendPlugin, yum.YumBase): | ||
109 | name = 'yum' | ||
110 | |||
111 | def __init__(self, target_arch, instroot, cachedir): | ||
112 | yum.YumBase.__init__(self) | ||
113 | |||
114 | self.cachedir = cachedir | ||
115 | self.instroot = instroot | ||
116 | self.target_arch = target_arch | ||
117 | |||
118 | if self.target_arch: | ||
119 | if not rpmUtils.arch.arches.has_key(self.target_arch): | ||
120 | rpmUtils.arch.arches["armv7hl"] = "noarch" | ||
121 | rpmUtils.arch.arches["armv7tnhl"] = "armv7nhl" | ||
122 | rpmUtils.arch.arches["armv7tnhl"] = "armv7thl" | ||
123 | rpmUtils.arch.arches["armv7thl"] = "armv7hl" | ||
124 | rpmUtils.arch.arches["armv7nhl"] = "armv7hl" | ||
125 | self.arch.setup_arch(self.target_arch) | ||
126 | |||
127 | self.__pkgs_license = {} | ||
128 | self.__pkgs_content = {} | ||
129 | self.__pkgs_vcsinfo = {} | ||
130 | |||
131 | self.install_debuginfo = False | ||
132 | |||
133 | def doFileLogSetup(self, uid, logfile): | ||
134 | # don't do the file log for the livecd as it can lead to open fds | ||
135 | # being left and an inability to clean up after ourself | ||
136 | pass | ||
137 | |||
138 | def close(self): | ||
139 | try: | ||
140 | os.unlink(self.confpath) | ||
141 | os.unlink(self.conf.installroot + "/yum.conf") | ||
142 | except: | ||
143 | pass | ||
144 | |||
145 | if self.ts: | ||
146 | self.ts.close() | ||
147 | self._delRepos() | ||
148 | self._delSacks() | ||
149 | yum.YumBase.close(self) | ||
150 | self.closeRpmDB() | ||
151 | |||
152 | if not os.path.exists("/etc/fedora-release") and \ | ||
153 | not os.path.exists("/etc/meego-release"): | ||
154 | for i in range(3, os.sysconf("SC_OPEN_MAX")): | ||
155 | try: | ||
156 | os.close(i) | ||
157 | except: | ||
158 | pass | ||
159 | |||
160 | def __del__(self): | ||
161 | pass | ||
162 | |||
163 | def _writeConf(self, confpath, installroot): | ||
164 | conf = Template(YUMCONF_TEMP).safe_substitute(installroot=installroot) | ||
165 | |||
166 | f = file(confpath, "w+") | ||
167 | f.write(conf) | ||
168 | f.close() | ||
169 | |||
170 | os.chmod(confpath, 0644) | ||
171 | |||
172 | def _cleanupRpmdbLocks(self, installroot): | ||
173 | # cleans up temporary files left by bdb so that differing | ||
174 | # versions of rpm don't cause problems | ||
175 | for f in glob.glob(installroot + "/var/lib/rpm/__db*"): | ||
176 | os.unlink(f) | ||
177 | |||
178 | def setup(self): | ||
179 | # create yum.conf | ||
180 | (fn, self.confpath) = tempfile.mkstemp(dir=self.cachedir, | ||
181 | prefix='yum.conf-') | ||
182 | os.close(fn) | ||
183 | self._writeConf(self.confpath, self.instroot) | ||
184 | self._cleanupRpmdbLocks(self.instroot) | ||
185 | # do setup | ||
186 | self.doConfigSetup(fn = self.confpath, root = self.instroot) | ||
187 | self.conf.cache = 0 | ||
188 | self.doTsSetup() | ||
189 | self.doRpmDBSetup() | ||
190 | self.doRepoSetup() | ||
191 | self.doSackSetup() | ||
192 | |||
193 | def preInstall(self, pkg): | ||
194 | # FIXME: handle pre-install package | ||
195 | return None | ||
196 | |||
197 | def selectPackage(self, pkg): | ||
198 | """Select a given package. | ||
199 | Can be specified with name.arch or name* | ||
200 | """ | ||
201 | |||
202 | try: | ||
203 | self.install(pattern = pkg) | ||
204 | return None | ||
205 | except yum.Errors.InstallError: | ||
206 | return "No package(s) available to install" | ||
207 | except yum.Errors.RepoError, e: | ||
208 | raise CreatorError("Unable to download from repo : %s" % (e,)) | ||
209 | except yum.Errors.YumBaseError, e: | ||
210 | raise CreatorError("Unable to install: %s" % (e,)) | ||
211 | |||
212 | def deselectPackage(self, pkg): | ||
213 | """Deselect package. Can be specified as name.arch or name* | ||
214 | """ | ||
215 | |||
216 | sp = pkg.rsplit(".", 2) | ||
217 | txmbrs = [] | ||
218 | if len(sp) == 2: | ||
219 | txmbrs = self.tsInfo.matchNaevr(name=sp[0], arch=sp[1]) | ||
220 | |||
221 | if len(txmbrs) == 0: | ||
222 | exact, match, unmatch = yum.packages.parsePackages( | ||
223 | self.pkgSack.returnPackages(), | ||
224 | [pkg], | ||
225 | casematch=1) | ||
226 | for p in exact + match: | ||
227 | txmbrs.append(p) | ||
228 | |||
229 | if len(txmbrs) > 0: | ||
230 | for x in txmbrs: | ||
231 | self.tsInfo.remove(x.pkgtup) | ||
232 | # we also need to remove from the conditionals | ||
233 | # dict so that things don't get pulled back in as a result | ||
234 | # of them. yes, this is ugly. conditionals should die. | ||
235 | for req, pkgs in self.tsInfo.conditionals.iteritems(): | ||
236 | if x in pkgs: | ||
237 | pkgs.remove(x) | ||
238 | self.tsInfo.conditionals[req] = pkgs | ||
239 | else: | ||
240 | msger.warning("No such package %s to remove" %(pkg,)) | ||
241 | |||
242 | def selectGroup(self, grp, include = ksparser.GROUP_DEFAULT): | ||
243 | try: | ||
244 | yum.YumBase.selectGroup(self, grp) | ||
245 | if include == ksparser.GROUP_REQUIRED: | ||
246 | for p in grp.default_packages.keys(): | ||
247 | self.deselectPackage(p) | ||
248 | |||
249 | elif include == ksparser.GROUP_ALL: | ||
250 | for p in grp.optional_packages.keys(): | ||
251 | self.selectPackage(p) | ||
252 | |||
253 | return None | ||
254 | except (yum.Errors.InstallError, yum.Errors.GroupsError), e: | ||
255 | return e | ||
256 | except yum.Errors.RepoError, e: | ||
257 | raise CreatorError("Unable to download from repo : %s" % (e,)) | ||
258 | except yum.Errors.YumBaseError, e: | ||
259 | raise CreatorError("Unable to install: %s" % (e,)) | ||
260 | |||
261 | def addRepository(self, name, url = None, mirrorlist = None, proxy = None, | ||
262 | proxy_username = None, proxy_password = None, | ||
263 | inc = None, exc = None, ssl_verify=True, nocache=False, | ||
264 | cost = None, priority=None): | ||
265 | # TODO: Handle priority attribute for repos | ||
266 | def _varSubstitute(option): | ||
267 | # takes a variable and substitutes like yum configs do | ||
268 | option = option.replace("$basearch", rpmUtils.arch.getBaseArch()) | ||
269 | option = option.replace("$arch", rpmUtils.arch.getCanonArch()) | ||
270 | return option | ||
271 | |||
272 | repo = MyYumRepository(name) | ||
273 | |||
274 | # Set proxy | ||
275 | repo.proxy = proxy | ||
276 | repo.proxy_username = proxy_username | ||
277 | repo.proxy_password = proxy_password | ||
278 | |||
279 | if url: | ||
280 | repo.baseurl.append(_varSubstitute(url)) | ||
281 | |||
282 | # check LICENSE files | ||
283 | if not rpmmisc.checkRepositoryEULA(name, repo): | ||
284 | msger.warning('skip repo:%s for failed EULA confirmation' % name) | ||
285 | return None | ||
286 | |||
287 | if mirrorlist: | ||
288 | repo.mirrorlist = _varSubstitute(mirrorlist) | ||
289 | |||
290 | conf = yum.config.RepoConf() | ||
291 | for k, v in conf.iteritems(): | ||
292 | if v or not hasattr(repo, k): | ||
293 | repo.setAttribute(k, v) | ||
294 | |||
295 | repo.sslverify = ssl_verify | ||
296 | repo.cache = not nocache | ||
297 | |||
298 | repo.basecachedir = self.cachedir | ||
299 | repo.base_persistdir = self.conf.persistdir | ||
300 | repo.failovermethod = "priority" | ||
301 | repo.metadata_expire = 0 | ||
302 | # Enable gpg check for verifying corrupt packages | ||
303 | repo.gpgcheck = 1 | ||
304 | repo.enable() | ||
305 | repo.setup(0) | ||
306 | self.repos.add(repo) | ||
307 | if cost: | ||
308 | repo.cost = cost | ||
309 | |||
310 | msger.verbose('repo: %s was added' % name) | ||
311 | return repo | ||
312 | |||
313 | def installLocal(self, pkg, po=None, updateonly=False): | ||
314 | ts = rpmUtils.transaction.initReadOnlyTransaction() | ||
315 | try: | ||
316 | hdr = rpmUtils.miscutils.hdrFromPackage(ts, pkg) | ||
317 | except rpmUtils.RpmUtilsError, e: | ||
318 | raise yum.Errors.MiscError, \ | ||
319 | 'Could not open local rpm file: %s: %s' % (pkg, e) | ||
320 | |||
321 | self.deselectPackage(hdr['name']) | ||
322 | yum.YumBase.installLocal(self, pkg, po, updateonly) | ||
323 | |||
324 | def installHasFile(self, file): | ||
325 | provides_pkg = self.whatProvides(file, None, None) | ||
326 | dlpkgs = map( | ||
327 | lambda x: x.po, | ||
328 | filter( | ||
329 | lambda txmbr: txmbr.ts_state in ("i", "u"), | ||
330 | self.tsInfo.getMembers())) | ||
331 | |||
332 | for p in dlpkgs: | ||
333 | for q in provides_pkg: | ||
334 | if (p == q): | ||
335 | return True | ||
336 | |||
337 | return False | ||
338 | |||
339 | def runInstall(self, checksize = 0): | ||
340 | os.environ["HOME"] = "/" | ||
341 | os.environ["LD_PRELOAD"] = "" | ||
342 | try: | ||
343 | (res, resmsg) = self.buildTransaction() | ||
344 | except yum.Errors.RepoError, e: | ||
345 | raise CreatorError("Unable to download from repo : %s" %(e,)) | ||
346 | |||
347 | if res != 2: | ||
348 | raise CreatorError("Failed to build transaction : %s" \ | ||
349 | % str.join("\n", resmsg)) | ||
350 | |||
351 | dlpkgs = map( | ||
352 | lambda x: x.po, | ||
353 | filter( | ||
354 | lambda txmbr: txmbr.ts_state in ("i", "u"), | ||
355 | self.tsInfo.getMembers())) | ||
356 | |||
357 | # record all pkg and the content | ||
358 | for pkg in dlpkgs: | ||
359 | pkg_long_name = misc.RPM_FMT % { | ||
360 | 'name': pkg.name, | ||
361 | 'arch': pkg.arch, | ||
362 | 'version': pkg.version, | ||
363 | 'release': pkg.release | ||
364 | } | ||
365 | self.__pkgs_content[pkg_long_name] = pkg.files | ||
366 | license = pkg.license | ||
367 | if license in self.__pkgs_license.keys(): | ||
368 | self.__pkgs_license[license].append(pkg_long_name) | ||
369 | else: | ||
370 | self.__pkgs_license[license] = [pkg_long_name] | ||
371 | |||
372 | total_count = len(dlpkgs) | ||
373 | cached_count = 0 | ||
374 | download_total_size = sum(map(lambda x: int(x.packagesize), dlpkgs)) | ||
375 | |||
376 | msger.info("\nChecking packages cached ...") | ||
377 | for po in dlpkgs: | ||
378 | local = po.localPkg() | ||
379 | repo = filter(lambda r: r.id == po.repoid, self.repos.listEnabled())[0] | ||
380 | if not repo.cache and os.path.exists(local): | ||
381 | os.unlink(local) | ||
382 | if not os.path.exists(local): | ||
383 | continue | ||
384 | if not self.verifyPkg(local, po, False): | ||
385 | msger.warning("Package %s is damaged: %s" \ | ||
386 | % (os.path.basename(local), local)) | ||
387 | else: | ||
388 | download_total_size -= int(po.packagesize) | ||
389 | cached_count +=1 | ||
390 | |||
391 | cache_avail_size = misc.get_filesystem_avail(self.cachedir) | ||
392 | if cache_avail_size < download_total_size: | ||
393 | raise CreatorError("No enough space used for downloading.") | ||
394 | |||
395 | # record the total size of installed pkgs | ||
396 | pkgs_total_size = 0L | ||
397 | for x in dlpkgs: | ||
398 | if hasattr(x, 'installedsize'): | ||
399 | pkgs_total_size += int(x.installedsize) | ||
400 | else: | ||
401 | pkgs_total_size += int(x.size) | ||
402 | |||
403 | # check needed size before actually download and install | ||
404 | if checksize and pkgs_total_size > checksize: | ||
405 | raise CreatorError("No enough space used for installing, " | ||
406 | "please resize partition size in ks file") | ||
407 | |||
408 | msger.info("Packages: %d Total, %d Cached, %d Missed" \ | ||
409 | % (total_count, cached_count, total_count - cached_count)) | ||
410 | |||
411 | try: | ||
412 | repos = self.repos.listEnabled() | ||
413 | for repo in repos: | ||
414 | repo.setCallback(TextProgress(total_count - cached_count)) | ||
415 | |||
416 | self.downloadPkgs(dlpkgs) | ||
417 | # FIXME: sigcheck? | ||
418 | |||
419 | self.initActionTs() | ||
420 | self.populateTs(keepold=0) | ||
421 | |||
422 | deps = self.ts.check() | ||
423 | if len(deps) != 0: | ||
424 | # This isn't fatal, Ubuntu has this issue but it is ok. | ||
425 | msger.debug(deps) | ||
426 | msger.warning("Dependency check failed!") | ||
427 | |||
428 | rc = self.ts.order() | ||
429 | if rc != 0: | ||
430 | raise CreatorError("ordering packages for installation failed") | ||
431 | |||
432 | # FIXME: callback should be refactored a little in yum | ||
433 | cb = rpmmisc.RPMInstallCallback(self.ts) | ||
434 | cb.tsInfo = self.tsInfo | ||
435 | cb.filelog = False | ||
436 | |||
437 | msger.warning('\nCaution, do NOT interrupt the installation, ' | ||
438 | 'else mic cannot finish the cleanup.') | ||
439 | |||
440 | installlogfile = "%s/__catched_stderr.buf" % (self.instroot) | ||
441 | msger.enable_logstderr(installlogfile) | ||
442 | self.runTransaction(cb) | ||
443 | self._cleanupRpmdbLocks(self.conf.installroot) | ||
444 | |||
445 | except rpmUtils.RpmUtilsError, e: | ||
446 | raise CreatorError("mic does NOT support delta rpm: %s" % e) | ||
447 | except yum.Errors.RepoError, e: | ||
448 | raise CreatorError("Unable to download from repo : %s" % e) | ||
449 | except yum.Errors.YumBaseError, e: | ||
450 | raise CreatorError("Unable to install: %s" % e) | ||
451 | finally: | ||
452 | msger.disable_logstderr() | ||
453 | |||
454 | def getVcsInfo(self): | ||
455 | return self.__pkgs_vcsinfo | ||
456 | |||
457 | def getAllContent(self): | ||
458 | return self.__pkgs_content | ||
459 | |||
460 | def getPkgsLicense(self): | ||
461 | return self.__pkgs_license | ||
462 | |||
463 | def getFilelist(self, pkgname): | ||
464 | if not pkgname: | ||
465 | return None | ||
466 | |||
467 | pkg = filter(lambda txmbr: txmbr.po.name == pkgname, self.tsInfo.getMembers()) | ||
468 | if not pkg: | ||
469 | return None | ||
470 | return pkg[0].po.filelist | ||
471 | |||
472 | def package_url(self, pkgname): | ||
473 | pkgs = self.pkgSack.searchNevra(name=pkgname) | ||
474 | if pkgs: | ||
475 | proxy = None | ||
476 | proxies = None | ||
477 | url = pkgs[0].remote_url | ||
478 | repoid = pkgs[0].repoid | ||
479 | repos = filter(lambda r: r.id == repoid, self.repos.listEnabled()) | ||
480 | |||
481 | if repos: | ||
482 | proxy = repos[0].proxy | ||
483 | if not proxy: | ||
484 | proxy = get_proxy_for(url) | ||
485 | if proxy: | ||
486 | proxies = {str(url.split(':')[0]): str(proxy)} | ||
487 | |||
488 | return (url, proxies) | ||
489 | |||
490 | return (None, None) | ||
diff --git a/scripts/lib/mic/plugins/backend/zypppkgmgr.py b/scripts/lib/mic/plugins/backend/zypppkgmgr.py new file mode 100755 index 0000000000..c760859832 --- /dev/null +++ b/scripts/lib/mic/plugins/backend/zypppkgmgr.py | |||
@@ -0,0 +1,973 @@ | |||
1 | #!/usr/bin/python -tt | ||
2 | # | ||
3 | # Copyright (c) 2010, 2011 Intel, Inc. | ||
4 | # | ||
5 | # This program is free software; you can redistribute it and/or modify it | ||
6 | # under the terms of the GNU General Public License as published by the Free | ||
7 | # Software Foundation; version 2 of the License | ||
8 | # | ||
9 | # This program is distributed in the hope that it will be useful, but | ||
10 | # WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY | ||
11 | # or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | ||
12 | # for more details. | ||
13 | # | ||
14 | # You should have received a copy of the GNU General Public License along | ||
15 | # with this program; if not, write to the Free Software Foundation, Inc., 59 | ||
16 | # Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
17 | |||
18 | import os | ||
19 | import shutil | ||
20 | import urlparse | ||
21 | import rpm | ||
22 | |||
23 | import zypp | ||
24 | if not hasattr(zypp, 'PoolQuery') or \ | ||
25 | not hasattr(zypp.RepoManager, 'loadSolvFile'): | ||
26 | raise ImportError("python-zypp in host system cannot support PoolQuery or " | ||
27 | "loadSolvFile interface, please update it to enhanced " | ||
28 | "version which can be found in download.tizen.org/tools") | ||
29 | |||
30 | from mic import msger | ||
31 | from mic.kickstart import ksparser | ||
32 | from mic.utils import misc, rpmmisc, runner, fs_related | ||
33 | from mic.utils.grabber import myurlgrab, TextProgress | ||
34 | from mic.utils.proxy import get_proxy_for | ||
35 | from mic.utils.errors import CreatorError, RepoError, RpmError | ||
36 | from mic.imager.baseimager import BaseImageCreator | ||
37 | |||
38 | class RepositoryStub: | ||
39 | def __init__(self): | ||
40 | self.name = None | ||
41 | self.baseurl = [] | ||
42 | self.mirrorlist = None | ||
43 | self.proxy = None | ||
44 | self.proxy_username = None | ||
45 | self.proxy_password = None | ||
46 | self.nocache = False | ||
47 | |||
48 | self.enabled = True | ||
49 | self.autorefresh = True | ||
50 | self.keeppackages = True | ||
51 | self.priority = None | ||
52 | |||
53 | from mic.pluginbase import BackendPlugin | ||
54 | class Zypp(BackendPlugin): | ||
55 | name = 'zypp' | ||
56 | |||
57 | def __init__(self, target_arch, instroot, cachedir): | ||
58 | self.cachedir = cachedir | ||
59 | self.instroot = instroot | ||
60 | self.target_arch = target_arch | ||
61 | |||
62 | self.__pkgs_license = {} | ||
63 | self.__pkgs_content = {} | ||
64 | self.__pkgs_vcsinfo = {} | ||
65 | self.repos = [] | ||
66 | self.to_deselect = [] | ||
67 | self.localpkgs = {} | ||
68 | self.repo_manager = None | ||
69 | self.repo_manager_options = None | ||
70 | self.Z = None | ||
71 | self.ts = None | ||
72 | self.ts_pre = None | ||
73 | self.incpkgs = {} | ||
74 | self.excpkgs = {} | ||
75 | self.pre_pkgs = [] | ||
76 | self.probFilterFlags = [ rpm.RPMPROB_FILTER_OLDPACKAGE, | ||
77 | rpm.RPMPROB_FILTER_REPLACEPKG ] | ||
78 | |||
79 | self.has_prov_query = True | ||
80 | self.install_debuginfo = False | ||
81 | |||
82 | def doFileLogSetup(self, uid, logfile): | ||
83 | # don't do the file log for the livecd as it can lead to open fds | ||
84 | # being left and an inability to clean up after ourself | ||
85 | pass | ||
86 | |||
87 | def closeRpmDB(self): | ||
88 | pass | ||
89 | |||
90 | def close(self): | ||
91 | if self.ts: | ||
92 | self.ts.closeDB() | ||
93 | self.ts = None | ||
94 | |||
95 | if self.ts_pre: | ||
96 | self.ts_pre.closeDB() | ||
97 | self.ts = None | ||
98 | |||
99 | self.closeRpmDB() | ||
100 | |||
101 | if not os.path.exists("/etc/fedora-release") and \ | ||
102 | not os.path.exists("/etc/meego-release"): | ||
103 | for i in range(3, os.sysconf("SC_OPEN_MAX")): | ||
104 | try: | ||
105 | os.close(i) | ||
106 | except: | ||
107 | pass | ||
108 | |||
109 | def __del__(self): | ||
110 | self.close() | ||
111 | |||
112 | def _cleanupRpmdbLocks(self, installroot): | ||
113 | # cleans up temporary files left by bdb so that differing | ||
114 | # versions of rpm don't cause problems | ||
115 | import glob | ||
116 | for f in glob.glob(installroot + "/var/lib/rpm/__db*"): | ||
117 | os.unlink(f) | ||
118 | |||
119 | def _cleanupZyppJunk(self, installroot): | ||
120 | try: | ||
121 | shutil.rmtree(os.path.join(installroot, '.zypp')) | ||
122 | except: | ||
123 | pass | ||
124 | |||
125 | def setup(self): | ||
126 | self._cleanupRpmdbLocks(self.instroot) | ||
127 | |||
128 | def whatObsolete(self, pkg): | ||
129 | query = zypp.PoolQuery() | ||
130 | query.addKind(zypp.ResKind.package) | ||
131 | query.addAttribute(zypp.SolvAttr.obsoletes, pkg) | ||
132 | query.setMatchExact() | ||
133 | for pi in query.queryResults(self.Z.pool()): | ||
134 | return pi | ||
135 | return None | ||
136 | |||
137 | def _zyppQueryPackage(self, pkg): | ||
138 | query = zypp.PoolQuery() | ||
139 | query.addKind(zypp.ResKind.package) | ||
140 | query.addAttribute(zypp.SolvAttr.name,pkg) | ||
141 | query.setMatchExact() | ||
142 | for pi in query.queryResults(self.Z.pool()): | ||
143 | return pi | ||
144 | return None | ||
145 | |||
146 | def _splitPkgString(self, pkg): | ||
147 | sp = pkg.rsplit(".",1) | ||
148 | name = sp[0] | ||
149 | arch = None | ||
150 | if len(sp) == 2: | ||
151 | arch = sp[1] | ||
152 | sysarch = zypp.Arch(self.target_arch) | ||
153 | if not zypp.Arch(arch).compatible_with (sysarch): | ||
154 | arch = None | ||
155 | name = ".".join(sp) | ||
156 | return name, arch | ||
157 | |||
158 | def selectPackage(self, pkg): | ||
159 | """Select a given package or package pattern, can be specified | ||
160 | with name.arch or name* or *name | ||
161 | """ | ||
162 | |||
163 | if not self.Z: | ||
164 | self.__initialize_zypp() | ||
165 | |||
166 | def markPoolItem(obs, pi): | ||
167 | if obs == None: | ||
168 | pi.status().setToBeInstalled (zypp.ResStatus.USER) | ||
169 | else: | ||
170 | obs.status().setToBeInstalled (zypp.ResStatus.USER) | ||
171 | |||
172 | def cmpEVR(p1, p2): | ||
173 | # compare criterion: arch compatibility first, then repo | ||
174 | # priority, and version last | ||
175 | a1 = p1.arch() | ||
176 | a2 = p2.arch() | ||
177 | if str(a1) != str(a2): | ||
178 | if a1.compatible_with(a2): | ||
179 | return -1 | ||
180 | else: | ||
181 | return 1 | ||
182 | # Priority of a repository is an integer value between 0 (the | ||
183 | # highest priority) and 99 (the lowest priority) | ||
184 | pr1 = int(p1.repoInfo().priority()) | ||
185 | pr2 = int(p2.repoInfo().priority()) | ||
186 | if pr1 > pr2: | ||
187 | return -1 | ||
188 | elif pr1 < pr2: | ||
189 | return 1 | ||
190 | |||
191 | ed1 = p1.edition() | ||
192 | ed2 = p2.edition() | ||
193 | (e1, v1, r1) = map(str, [ed1.epoch(), ed1.version(), ed1.release()]) | ||
194 | (e2, v2, r2) = map(str, [ed2.epoch(), ed2.version(), ed2.release()]) | ||
195 | return rpm.labelCompare((e1, v1, r1), (e2, v2, r2)) | ||
196 | |||
197 | found = False | ||
198 | startx = pkg.startswith("*") | ||
199 | endx = pkg.endswith("*") | ||
200 | ispattern = startx or endx | ||
201 | name, arch = self._splitPkgString(pkg) | ||
202 | |||
203 | q = zypp.PoolQuery() | ||
204 | q.addKind(zypp.ResKind.package) | ||
205 | |||
206 | if ispattern: | ||
207 | if startx and not endx: | ||
208 | pattern = '%s$' % (pkg[1:]) | ||
209 | if endx and not startx: | ||
210 | pattern = '^%s' % (pkg[0:-1]) | ||
211 | if endx and startx: | ||
212 | pattern = '%s' % (pkg[1:-1]) | ||
213 | q.setMatchRegex() | ||
214 | q.addAttribute(zypp.SolvAttr.name,pattern) | ||
215 | |||
216 | elif arch: | ||
217 | q.setMatchExact() | ||
218 | q.addAttribute(zypp.SolvAttr.name,name) | ||
219 | |||
220 | else: | ||
221 | q.setMatchExact() | ||
222 | q.addAttribute(zypp.SolvAttr.name,pkg) | ||
223 | |||
224 | for pitem in sorted( | ||
225 | q.queryResults(self.Z.pool()), | ||
226 | cmp=lambda x,y: cmpEVR(zypp.asKindPackage(x), zypp.asKindPackage(y)), | ||
227 | reverse=True): | ||
228 | item = zypp.asKindPackage(pitem) | ||
229 | if item.name() in self.excpkgs.keys() and \ | ||
230 | self.excpkgs[item.name()] == item.repoInfo().name(): | ||
231 | continue | ||
232 | if item.name() in self.incpkgs.keys() and \ | ||
233 | self.incpkgs[item.name()] != item.repoInfo().name(): | ||
234 | continue | ||
235 | |||
236 | found = True | ||
237 | obspkg = self.whatObsolete(item.name()) | ||
238 | if arch: | ||
239 | if arch == str(item.arch()): | ||
240 | item.status().setToBeInstalled (zypp.ResStatus.USER) | ||
241 | else: | ||
242 | markPoolItem(obspkg, pitem) | ||
243 | if not ispattern: | ||
244 | break | ||
245 | |||
246 | # Can't match using package name, then search from packge | ||
247 | # provides infomation | ||
248 | if found == False and not ispattern: | ||
249 | q.addAttribute(zypp.SolvAttr.provides, pkg) | ||
250 | q.addAttribute(zypp.SolvAttr.name,'') | ||
251 | |||
252 | for pitem in sorted( | ||
253 | q.queryResults(self.Z.pool()), | ||
254 | cmp=lambda x,y: cmpEVR(zypp.asKindPackage(x), zypp.asKindPackage(y)), | ||
255 | reverse=True): | ||
256 | item = zypp.asKindPackage(pitem) | ||
257 | if item.name() in self.excpkgs.keys() and \ | ||
258 | self.excpkgs[item.name()] == item.repoInfo().name(): | ||
259 | continue | ||
260 | if item.name() in self.incpkgs.keys() and \ | ||
261 | self.incpkgs[item.name()] != item.repoInfo().name(): | ||
262 | continue | ||
263 | |||
264 | found = True | ||
265 | obspkg = self.whatObsolete(item.name()) | ||
266 | markPoolItem(obspkg, pitem) | ||
267 | break | ||
268 | |||
269 | if found: | ||
270 | return None | ||
271 | else: | ||
272 | raise CreatorError("Unable to find package: %s" % (pkg,)) | ||
273 | |||
274 | def inDeselectPackages(self, pitem): | ||
275 | """check if specified pacakges are in the list of inDeselectPackages | ||
276 | """ | ||
277 | item = zypp.asKindPackage(pitem) | ||
278 | name = item.name() | ||
279 | for pkg in self.to_deselect: | ||
280 | startx = pkg.startswith("*") | ||
281 | endx = pkg.endswith("*") | ||
282 | ispattern = startx or endx | ||
283 | pkgname, pkgarch = self._splitPkgString(pkg) | ||
284 | if not ispattern: | ||
285 | if pkgarch: | ||
286 | if name == pkgname and str(item.arch()) == pkgarch: | ||
287 | return True; | ||
288 | else: | ||
289 | if name == pkgname: | ||
290 | return True; | ||
291 | else: | ||
292 | if startx and name.endswith(pkg[1:]): | ||
293 | return True; | ||
294 | if endx and name.startswith(pkg[:-1]): | ||
295 | return True; | ||
296 | |||
297 | return False; | ||
298 | |||
299 | def deselectPackage(self, pkg): | ||
300 | """collect packages should not be installed""" | ||
301 | self.to_deselect.append(pkg) | ||
302 | |||
303 | def selectGroup(self, grp, include = ksparser.GROUP_DEFAULT): | ||
304 | if not self.Z: | ||
305 | self.__initialize_zypp() | ||
306 | found = False | ||
307 | q=zypp.PoolQuery() | ||
308 | q.addKind(zypp.ResKind.pattern) | ||
309 | for pitem in q.queryResults(self.Z.pool()): | ||
310 | item = zypp.asKindPattern(pitem) | ||
311 | summary = "%s" % item.summary() | ||
312 | name = "%s" % item.name() | ||
313 | if name == grp or summary == grp: | ||
314 | found = True | ||
315 | pitem.status().setToBeInstalled (zypp.ResStatus.USER) | ||
316 | break | ||
317 | |||
318 | if found: | ||
319 | if include == ksparser.GROUP_REQUIRED: | ||
320 | map( | ||
321 | lambda p: self.deselectPackage(p), | ||
322 | grp.default_packages.keys()) | ||
323 | |||
324 | return None | ||
325 | else: | ||
326 | raise CreatorError("Unable to find pattern: %s" % (grp,)) | ||
327 | |||
328 | def addRepository(self, name, | ||
329 | url = None, | ||
330 | mirrorlist = None, | ||
331 | proxy = None, | ||
332 | proxy_username = None, | ||
333 | proxy_password = None, | ||
334 | inc = None, | ||
335 | exc = None, | ||
336 | ssl_verify = True, | ||
337 | nocache = False, | ||
338 | cost=None, | ||
339 | priority=None): | ||
340 | # TODO: Handle cost attribute for repos | ||
341 | |||
342 | if not self.repo_manager: | ||
343 | self.__initialize_repo_manager() | ||
344 | |||
345 | if not proxy and url: | ||
346 | proxy = get_proxy_for(url) | ||
347 | |||
348 | repo = RepositoryStub() | ||
349 | repo.name = name | ||
350 | repo.id = name | ||
351 | repo.proxy = proxy | ||
352 | repo.proxy_username = proxy_username | ||
353 | repo.proxy_password = proxy_password | ||
354 | repo.ssl_verify = ssl_verify | ||
355 | repo.nocache = nocache | ||
356 | repo.baseurl.append(url) | ||
357 | if inc: | ||
358 | for pkg in inc: | ||
359 | self.incpkgs[pkg] = name | ||
360 | if exc: | ||
361 | for pkg in exc: | ||
362 | self.excpkgs[pkg] = name | ||
363 | |||
364 | # check LICENSE files | ||
365 | if not rpmmisc.checkRepositoryEULA(name, repo): | ||
366 | msger.warning('skip repo:%s for failed EULA confirmation' % name) | ||
367 | return None | ||
368 | |||
369 | if mirrorlist: | ||
370 | repo.mirrorlist = mirrorlist | ||
371 | |||
372 | # Enable gpg check for verifying corrupt packages | ||
373 | repo.gpgcheck = 1 | ||
374 | if priority is not None: | ||
375 | # priority 0 has issue in RepoInfo.setPriority | ||
376 | repo.priority = priority + 1 | ||
377 | |||
378 | try: | ||
379 | repo_info = zypp.RepoInfo() | ||
380 | repo_info.setAlias(repo.name) | ||
381 | repo_info.setName(repo.name) | ||
382 | repo_info.setEnabled(repo.enabled) | ||
383 | repo_info.setAutorefresh(repo.autorefresh) | ||
384 | repo_info.setKeepPackages(repo.keeppackages) | ||
385 | baseurl = zypp.Url(repo.baseurl[0]) | ||
386 | if not ssl_verify: | ||
387 | baseurl.setQueryParam("ssl_verify", "no") | ||
388 | if proxy: | ||
389 | scheme, host, path, parm, query, frag = urlparse.urlparse(proxy) | ||
390 | |||
391 | proxyinfo = host.split(":") | ||
392 | host = proxyinfo[0] | ||
393 | |||
394 | port = "80" | ||
395 | if len(proxyinfo) > 1: | ||
396 | port = proxyinfo[1] | ||
397 | |||
398 | if proxy.startswith("socks") and len(proxy.rsplit(':', 1)) == 2: | ||
399 | host = proxy.rsplit(':', 1)[0] | ||
400 | port = proxy.rsplit(':', 1)[1] | ||
401 | |||
402 | baseurl.setQueryParam ("proxy", host) | ||
403 | baseurl.setQueryParam ("proxyport", port) | ||
404 | |||
405 | repo.baseurl[0] = baseurl.asCompleteString() | ||
406 | self.repos.append(repo) | ||
407 | |||
408 | repo_info.addBaseUrl(baseurl) | ||
409 | |||
410 | if repo.priority is not None: | ||
411 | repo_info.setPriority(repo.priority) | ||
412 | |||
413 | # this hack is used to change zypp credential file location | ||
414 | # the default one is $HOME/.zypp, which cause conflicts when | ||
415 | # installing some basic packages, and the location doesn't | ||
416 | # have any interface actually, so use a tricky way anyway | ||
417 | homedir = None | ||
418 | if 'HOME' in os.environ: | ||
419 | homedir = os.environ['HOME'] | ||
420 | os.environ['HOME'] = '/' | ||
421 | else: | ||
422 | os.environ['HOME'] = '/' | ||
423 | |||
424 | self.repo_manager.addRepository(repo_info) | ||
425 | |||
426 | # save back the $HOME env | ||
427 | if homedir: | ||
428 | os.environ['HOME'] = homedir | ||
429 | else: | ||
430 | del os.environ['HOME'] | ||
431 | |||
432 | self.__build_repo_cache(name) | ||
433 | |||
434 | except RuntimeError, e: | ||
435 | raise CreatorError(str(e)) | ||
436 | |||
437 | msger.verbose('repo: %s was added' % name) | ||
438 | return repo | ||
439 | |||
440 | def installHasFile(self, file): | ||
441 | return False | ||
442 | |||
443 | def preInstall(self, pkg): | ||
444 | self.pre_pkgs.append(pkg) | ||
445 | |||
446 | def runInstall(self, checksize = 0): | ||
447 | os.environ["HOME"] = "/" | ||
448 | os.environ["LD_PRELOAD"] = "" | ||
449 | self.buildTransaction() | ||
450 | |||
451 | todo = zypp.GetResolvablesToInsDel(self.Z.pool()) | ||
452 | installed_pkgs = todo._toInstall | ||
453 | dlpkgs = [] | ||
454 | for pitem in installed_pkgs: | ||
455 | if not zypp.isKindPattern(pitem) and \ | ||
456 | not self.inDeselectPackages(pitem): | ||
457 | item = zypp.asKindPackage(pitem) | ||
458 | dlpkgs.append(item) | ||
459 | |||
460 | if not self.install_debuginfo or str(item.arch()) == "noarch": | ||
461 | continue | ||
462 | |||
463 | dipkg = self._zyppQueryPackage("%s-debuginfo" % item.name()) | ||
464 | if dipkg: | ||
465 | ditem = zypp.asKindPackage(dipkg) | ||
466 | dlpkgs.append(ditem) | ||
467 | else: | ||
468 | msger.warning("No debuginfo rpm found for: %s" \ | ||
469 | % item.name()) | ||
470 | |||
471 | # record all pkg and the content | ||
472 | localpkgs = self.localpkgs.keys() | ||
473 | for pkg in dlpkgs: | ||
474 | license = '' | ||
475 | if pkg.name() in localpkgs: | ||
476 | hdr = rpmmisc.readRpmHeader(self.ts, self.localpkgs[pkg.name()]) | ||
477 | pkg_long_name = misc.RPM_FMT % { | ||
478 | 'name': hdr['name'], | ||
479 | 'arch': hdr['arch'], | ||
480 | 'version': hdr['version'], | ||
481 | 'release': hdr['release'] | ||
482 | } | ||
483 | license = hdr['license'] | ||
484 | |||
485 | else: | ||
486 | pkg_long_name = misc.RPM_FMT % { | ||
487 | 'name': pkg.name(), | ||
488 | 'arch': pkg.arch(), | ||
489 | 'version': pkg.edition().version(), | ||
490 | 'release': pkg.edition().release() | ||
491 | } | ||
492 | |||
493 | license = pkg.license() | ||
494 | |||
495 | if license in self.__pkgs_license.keys(): | ||
496 | self.__pkgs_license[license].append(pkg_long_name) | ||
497 | else: | ||
498 | self.__pkgs_license[license] = [pkg_long_name] | ||
499 | |||
500 | total_count = len(dlpkgs) | ||
501 | cached_count = 0 | ||
502 | download_total_size = sum(map(lambda x: int(x.downloadSize()), dlpkgs)) | ||
503 | localpkgs = self.localpkgs.keys() | ||
504 | |||
505 | msger.info("Checking packages cached ...") | ||
506 | for po in dlpkgs: | ||
507 | # Check if it is cached locally | ||
508 | if po.name() in localpkgs: | ||
509 | cached_count += 1 | ||
510 | else: | ||
511 | local = self.getLocalPkgPath(po) | ||
512 | name = str(po.repoInfo().name()) | ||
513 | try: | ||
514 | repo = filter(lambda r: r.name == name, self.repos)[0] | ||
515 | except IndexError: | ||
516 | repo = None | ||
517 | nocache = repo.nocache if repo else False | ||
518 | |||
519 | if os.path.exists(local): | ||
520 | if nocache or self.checkPkg(local) !=0: | ||
521 | os.unlink(local) | ||
522 | else: | ||
523 | download_total_size -= int(po.downloadSize()) | ||
524 | cached_count += 1 | ||
525 | cache_avail_size = misc.get_filesystem_avail(self.cachedir) | ||
526 | if cache_avail_size < download_total_size: | ||
527 | raise CreatorError("No enough space used for downloading.") | ||
528 | |||
529 | # record the total size of installed pkgs | ||
530 | install_total_size = sum(map(lambda x: int(x.installSize()), dlpkgs)) | ||
531 | # check needed size before actually download and install | ||
532 | |||
533 | # FIXME: for multiple partitions for loop type, check fails | ||
534 | # skip the check temporarily | ||
535 | #if checksize and install_total_size > checksize: | ||
536 | # raise CreatorError("No enough space used for installing, " | ||
537 | # "please resize partition size in ks file") | ||
538 | |||
539 | download_count = total_count - cached_count | ||
540 | msger.info("Packages: %d Total, %d Cached, %d Missed" \ | ||
541 | % (total_count, cached_count, download_count)) | ||
542 | |||
543 | try: | ||
544 | if download_count > 0: | ||
545 | msger.info("Downloading packages ...") | ||
546 | self.downloadPkgs(dlpkgs, download_count) | ||
547 | |||
548 | self.installPkgs(dlpkgs) | ||
549 | |||
550 | except (RepoError, RpmError): | ||
551 | raise | ||
552 | except Exception, e: | ||
553 | raise CreatorError("Package installation failed: %s" % (e,)) | ||
554 | |||
555 | def getVcsInfo(self): | ||
556 | if self.__pkgs_vcsinfo: | ||
557 | return | ||
558 | |||
559 | if not self.ts: | ||
560 | self.__initialize_transaction() | ||
561 | |||
562 | mi = self.ts.dbMatch() | ||
563 | for hdr in mi: | ||
564 | lname = misc.RPM_FMT % { | ||
565 | 'name': hdr['name'], | ||
566 | 'arch': hdr['arch'], | ||
567 | 'version': hdr['version'], | ||
568 | 'release': hdr['release'] | ||
569 | } | ||
570 | self.__pkgs_vcsinfo[lname] = hdr['VCS'] | ||
571 | |||
572 | return self.__pkgs_vcsinfo | ||
573 | |||
574 | def getAllContent(self): | ||
575 | if self.__pkgs_content: | ||
576 | return self.__pkgs_content | ||
577 | |||
578 | if not self.ts: | ||
579 | self.__initialize_transaction() | ||
580 | |||
581 | mi = self.ts.dbMatch() | ||
582 | for hdr in mi: | ||
583 | lname = misc.RPM_FMT % { | ||
584 | 'name': hdr['name'], | ||
585 | 'arch': hdr['arch'], | ||
586 | 'version': hdr['version'], | ||
587 | 'release': hdr['release'] | ||
588 | } | ||
589 | self.__pkgs_content[lname] = hdr['FILENAMES'] | ||
590 | |||
591 | return self.__pkgs_content | ||
592 | |||
593 | def getPkgsLicense(self): | ||
594 | return self.__pkgs_license | ||
595 | |||
596 | def getFilelist(self, pkgname): | ||
597 | if not pkgname: | ||
598 | return None | ||
599 | |||
600 | if not self.ts: | ||
601 | self.__initialize_transaction() | ||
602 | |||
603 | mi = self.ts.dbMatch('name', pkgname) | ||
604 | for header in mi: | ||
605 | return header['FILENAMES'] | ||
606 | |||
607 | def __initialize_repo_manager(self): | ||
608 | if self.repo_manager: | ||
609 | return | ||
610 | |||
611 | # Clean up repo metadata | ||
612 | shutil.rmtree(self.cachedir + "/etc", ignore_errors = True) | ||
613 | shutil.rmtree(self.cachedir + "/solv", ignore_errors = True) | ||
614 | shutil.rmtree(self.cachedir + "/raw", ignore_errors = True) | ||
615 | |||
616 | zypp.KeyRing.setDefaultAccept( zypp.KeyRing.ACCEPT_UNSIGNED_FILE | ||
617 | | zypp.KeyRing.ACCEPT_VERIFICATION_FAILED | ||
618 | | zypp.KeyRing.ACCEPT_UNKNOWNKEY | ||
619 | | zypp.KeyRing.TRUST_KEY_TEMPORARILY | ||
620 | ) | ||
621 | |||
622 | self.repo_manager_options = \ | ||
623 | zypp.RepoManagerOptions(zypp.Pathname(self.instroot)) | ||
624 | |||
625 | self.repo_manager_options.knownReposPath = \ | ||
626 | zypp.Pathname(self.cachedir + "/etc/zypp/repos.d") | ||
627 | |||
628 | self.repo_manager_options.repoCachePath = \ | ||
629 | zypp.Pathname(self.cachedir) | ||
630 | |||
631 | self.repo_manager_options.repoRawCachePath = \ | ||
632 | zypp.Pathname(self.cachedir + "/raw") | ||
633 | |||
634 | self.repo_manager_options.repoSolvCachePath = \ | ||
635 | zypp.Pathname(self.cachedir + "/solv") | ||
636 | |||
637 | self.repo_manager_options.repoPackagesCachePath = \ | ||
638 | zypp.Pathname(self.cachedir + "/packages") | ||
639 | |||
640 | self.repo_manager = zypp.RepoManager(self.repo_manager_options) | ||
641 | |||
642 | def __build_repo_cache(self, name): | ||
643 | repo = self.repo_manager.getRepositoryInfo(name) | ||
644 | if self.repo_manager.isCached(repo) or not repo.enabled(): | ||
645 | return | ||
646 | |||
647 | msger.info('Refreshing repository: %s ...' % name) | ||
648 | self.repo_manager.buildCache(repo, zypp.RepoManager.BuildIfNeeded) | ||
649 | |||
650 | def __initialize_zypp(self): | ||
651 | if self.Z: | ||
652 | return | ||
653 | |||
654 | zconfig = zypp.ZConfig_instance() | ||
655 | |||
656 | # Set system architecture | ||
657 | if self.target_arch: | ||
658 | zconfig.setSystemArchitecture(zypp.Arch(self.target_arch)) | ||
659 | |||
660 | msger.info("zypp architecture is <%s>" % zconfig.systemArchitecture()) | ||
661 | |||
662 | # repoPackagesCachePath is corrected by this | ||
663 | self.repo_manager = zypp.RepoManager(self.repo_manager_options) | ||
664 | repos = self.repo_manager.knownRepositories() | ||
665 | for repo in repos: | ||
666 | if not repo.enabled(): | ||
667 | continue | ||
668 | self.repo_manager.loadFromCache(repo) | ||
669 | |||
670 | self.Z = zypp.ZYppFactory_instance().getZYpp() | ||
671 | self.Z.initializeTarget(zypp.Pathname(self.instroot)) | ||
672 | self.Z.target().load() | ||
673 | |||
674 | def buildTransaction(self): | ||
675 | if not self.Z.resolver().resolvePool(): | ||
676 | probs = self.Z.resolver().problems() | ||
677 | |||
678 | for problem in probs: | ||
679 | msger.warning("repo problem: %s, %s" \ | ||
680 | % (problem.description().decode("utf-8"), | ||
681 | problem.details().decode("utf-8"))) | ||
682 | |||
683 | raise RepoError("found %d resolver problem, abort!" \ | ||
684 | % len(probs)) | ||
685 | |||
686 | def getLocalPkgPath(self, po): | ||
687 | repoinfo = po.repoInfo() | ||
688 | cacheroot = repoinfo.packagesPath() | ||
689 | location= po.location() | ||
690 | rpmpath = str(location.filename()) | ||
691 | pkgpath = "%s/%s" % (cacheroot, os.path.basename(rpmpath)) | ||
692 | return pkgpath | ||
693 | |||
694 | def installLocal(self, pkg, po=None, updateonly=False): | ||
695 | if not self.ts: | ||
696 | self.__initialize_transaction() | ||
697 | |||
698 | solvfile = "%s/.solv" % (self.cachedir) | ||
699 | |||
700 | rc, out = runner.runtool([fs_related.find_binary_path("rpms2solv"), | ||
701 | pkg]) | ||
702 | if rc == 0: | ||
703 | f = open(solvfile, "w+") | ||
704 | f.write(out) | ||
705 | f.close() | ||
706 | |||
707 | warnmsg = self.repo_manager.loadSolvFile(solvfile, | ||
708 | os.path.basename(pkg)) | ||
709 | if warnmsg: | ||
710 | msger.warning(warnmsg) | ||
711 | |||
712 | os.unlink(solvfile) | ||
713 | else: | ||
714 | msger.warning('Can not get %s solv data.' % pkg) | ||
715 | |||
716 | hdr = rpmmisc.readRpmHeader(self.ts, pkg) | ||
717 | arch = zypp.Arch(hdr['arch']) | ||
718 | sysarch = zypp.Arch(self.target_arch) | ||
719 | |||
720 | if arch.compatible_with (sysarch): | ||
721 | pkgname = hdr['name'] | ||
722 | self.localpkgs[pkgname] = pkg | ||
723 | self.selectPackage(pkgname) | ||
724 | msger.info("Marking %s to be installed" % (pkg)) | ||
725 | |||
726 | else: | ||
727 | msger.warning("Cannot add package %s to transaction. " | ||
728 | "Not a compatible architecture: %s" \ | ||
729 | % (pkg, hdr['arch'])) | ||
730 | |||
731 | def downloadPkgs(self, package_objects, count): | ||
732 | localpkgs = self.localpkgs.keys() | ||
733 | progress_obj = TextProgress(count) | ||
734 | |||
735 | for po in package_objects: | ||
736 | if po.name() in localpkgs: | ||
737 | continue | ||
738 | |||
739 | filename = self.getLocalPkgPath(po) | ||
740 | if os.path.exists(filename): | ||
741 | if self.checkPkg(filename) == 0: | ||
742 | continue | ||
743 | |||
744 | dirn = os.path.dirname(filename) | ||
745 | if not os.path.exists(dirn): | ||
746 | os.makedirs(dirn) | ||
747 | |||
748 | url = self.get_url(po) | ||
749 | proxies = self.get_proxies(po) | ||
750 | |||
751 | try: | ||
752 | filename = myurlgrab(url, filename, proxies, progress_obj) | ||
753 | except CreatorError: | ||
754 | self.close() | ||
755 | raise | ||
756 | |||
757 | def preinstallPkgs(self): | ||
758 | if not self.ts_pre: | ||
759 | self.__initialize_transaction() | ||
760 | |||
761 | self.ts_pre.order() | ||
762 | cb = rpmmisc.RPMInstallCallback(self.ts_pre) | ||
763 | cb.headmsg = "Preinstall" | ||
764 | installlogfile = "%s/__catched_stderr.buf" % (self.instroot) | ||
765 | |||
766 | # start to catch stderr output from librpm | ||
767 | msger.enable_logstderr(installlogfile) | ||
768 | |||
769 | errors = self.ts_pre.run(cb.callback, '') | ||
770 | # stop catch | ||
771 | msger.disable_logstderr() | ||
772 | self.ts_pre.closeDB() | ||
773 | self.ts_pre = None | ||
774 | |||
775 | if errors is not None: | ||
776 | if len(errors) == 0: | ||
777 | msger.warning('scriptlet or other non-fatal errors occurred ' | ||
778 | 'during transaction.') | ||
779 | |||
780 | else: | ||
781 | for e in errors: | ||
782 | msger.warning(e[0]) | ||
783 | raise RepoError('Could not run transaction.') | ||
784 | |||
785 | def installPkgs(self, package_objects): | ||
786 | if not self.ts: | ||
787 | self.__initialize_transaction() | ||
788 | |||
789 | # clean rpm lock | ||
790 | self._cleanupRpmdbLocks(self.instroot) | ||
791 | self._cleanupZyppJunk(self.instroot) | ||
792 | # Set filters | ||
793 | probfilter = 0 | ||
794 | for flag in self.probFilterFlags: | ||
795 | probfilter |= flag | ||
796 | self.ts.setProbFilter(probfilter) | ||
797 | self.ts_pre.setProbFilter(probfilter) | ||
798 | |||
799 | localpkgs = self.localpkgs.keys() | ||
800 | |||
801 | for po in package_objects: | ||
802 | pkgname = po.name() | ||
803 | if pkgname in localpkgs: | ||
804 | rpmpath = self.localpkgs[pkgname] | ||
805 | else: | ||
806 | rpmpath = self.getLocalPkgPath(po) | ||
807 | |||
808 | if not os.path.exists(rpmpath): | ||
809 | # Maybe it is a local repo | ||
810 | rpmuri = self.get_url(po) | ||
811 | if rpmuri.startswith("file:/"): | ||
812 | rpmpath = rpmuri[5:] | ||
813 | |||
814 | if not os.path.exists(rpmpath): | ||
815 | raise RpmError("Error: %s doesn't exist" % rpmpath) | ||
816 | |||
817 | h = rpmmisc.readRpmHeader(self.ts, rpmpath) | ||
818 | |||
819 | if pkgname in self.pre_pkgs: | ||
820 | msger.verbose("pre-install package added: %s" % pkgname) | ||
821 | self.ts_pre.addInstall(h, rpmpath, 'u') | ||
822 | |||
823 | self.ts.addInstall(h, rpmpath, 'u') | ||
824 | |||
825 | unresolved_dependencies = self.ts.check() | ||
826 | if not unresolved_dependencies: | ||
827 | if self.pre_pkgs: | ||
828 | self.preinstallPkgs() | ||
829 | |||
830 | self.ts.order() | ||
831 | cb = rpmmisc.RPMInstallCallback(self.ts) | ||
832 | installlogfile = "%s/__catched_stderr.buf" % (self.instroot) | ||
833 | |||
834 | # start to catch stderr output from librpm | ||
835 | msger.enable_logstderr(installlogfile) | ||
836 | |||
837 | errors = self.ts.run(cb.callback, '') | ||
838 | # stop catch | ||
839 | msger.disable_logstderr() | ||
840 | self.ts.closeDB() | ||
841 | self.ts = None | ||
842 | |||
843 | if errors is not None: | ||
844 | if len(errors) == 0: | ||
845 | msger.warning('scriptlet or other non-fatal errors occurred ' | ||
846 | 'during transaction.') | ||
847 | |||
848 | else: | ||
849 | for e in errors: | ||
850 | msger.warning(e[0]) | ||
851 | raise RepoError('Could not run transaction.') | ||
852 | |||
853 | else: | ||
854 | for pkg, need, needflags, sense, key in unresolved_dependencies: | ||
855 | package = '-'.join(pkg) | ||
856 | |||
857 | if needflags == rpm.RPMSENSE_LESS: | ||
858 | deppkg = ' < '.join(need) | ||
859 | elif needflags == rpm.RPMSENSE_EQUAL: | ||
860 | deppkg = ' = '.join(need) | ||
861 | elif needflags == rpm.RPMSENSE_GREATER: | ||
862 | deppkg = ' > '.join(need) | ||
863 | else: | ||
864 | deppkg = '-'.join(need) | ||
865 | |||
866 | if sense == rpm.RPMDEP_SENSE_REQUIRES: | ||
867 | msger.warning("[%s] Requires [%s], which is not provided" \ | ||
868 | % (package, deppkg)) | ||
869 | |||
870 | elif sense == rpm.RPMDEP_SENSE_CONFLICTS: | ||
871 | msger.warning("[%s] Conflicts with [%s]" %(package,deppkg)) | ||
872 | |||
873 | raise RepoError("Unresolved dependencies, transaction failed.") | ||
874 | |||
875 | def __initialize_transaction(self): | ||
876 | if not self.ts: | ||
877 | self.ts = rpm.TransactionSet(self.instroot) | ||
878 | # Set to not verify DSA signatures. | ||
879 | self.ts.setVSFlags(rpm._RPMVSF_NOSIGNATURES|rpm._RPMVSF_NODIGESTS) | ||
880 | |||
881 | if not self.ts_pre: | ||
882 | self.ts_pre = rpm.TransactionSet(self.instroot) | ||
883 | # Just unpack the files, don't run scripts | ||
884 | self.ts_pre.setFlags(rpm.RPMTRANS_FLAG_ALLFILES | rpm.RPMTRANS_FLAG_NOSCRIPTS) | ||
885 | # Set to not verify DSA signatures. | ||
886 | self.ts_pre.setVSFlags(rpm._RPMVSF_NOSIGNATURES|rpm._RPMVSF_NODIGESTS) | ||
887 | |||
888 | def checkPkg(self, pkg): | ||
889 | ret = 1 | ||
890 | if not os.path.exists(pkg): | ||
891 | return ret | ||
892 | ret = rpmmisc.checkRpmIntegrity('rpm', pkg) | ||
893 | if ret != 0: | ||
894 | msger.warning("package %s is damaged: %s" \ | ||
895 | % (os.path.basename(pkg), pkg)) | ||
896 | |||
897 | return ret | ||
898 | |||
899 | def _add_prob_flags(self, *flags): | ||
900 | for flag in flags: | ||
901 | if flag not in self.probFilterFlags: | ||
902 | self.probFilterFlags.append(flag) | ||
903 | |||
904 | def get_proxies(self, pobj): | ||
905 | if not pobj: | ||
906 | return None | ||
907 | |||
908 | proxy = None | ||
909 | proxies = None | ||
910 | repoinfo = pobj.repoInfo() | ||
911 | reponame = "%s" % repoinfo.name() | ||
912 | repos = filter(lambda r: r.name == reponame, self.repos) | ||
913 | repourl = str(repoinfo.baseUrls()[0]) | ||
914 | |||
915 | if repos: | ||
916 | proxy = repos[0].proxy | ||
917 | if not proxy: | ||
918 | proxy = get_proxy_for(repourl) | ||
919 | if proxy: | ||
920 | proxies = {str(repourl.split(':')[0]): str(proxy)} | ||
921 | |||
922 | return proxies | ||
923 | |||
924 | def get_url(self, pobj): | ||
925 | if not pobj: | ||
926 | return None | ||
927 | |||
928 | name = str(pobj.repoInfo().name()) | ||
929 | try: | ||
930 | repo = filter(lambda r: r.name == name, self.repos)[0] | ||
931 | except IndexError: | ||
932 | return None | ||
933 | |||
934 | baseurl = repo.baseurl[0] | ||
935 | |||
936 | index = baseurl.find("?") | ||
937 | if index > -1: | ||
938 | baseurl = baseurl[:index] | ||
939 | |||
940 | location = pobj.location() | ||
941 | location = str(location.filename()) | ||
942 | if location.startswith("./"): | ||
943 | location = location[2:] | ||
944 | |||
945 | return os.path.join(baseurl, location) | ||
946 | |||
947 | def package_url(self, pkgname): | ||
948 | |||
949 | def cmpEVR(p1, p2): | ||
950 | ed1 = p1.edition() | ||
951 | ed2 = p2.edition() | ||
952 | (e1, v1, r1) = map(str, [ed1.epoch(), ed1.version(), ed1.release()]) | ||
953 | (e2, v2, r2) = map(str, [ed2.epoch(), ed2.version(), ed2.release()]) | ||
954 | return rpm.labelCompare((e1, v1, r1), (e2, v2, r2)) | ||
955 | |||
956 | if not self.Z: | ||
957 | self.__initialize_zypp() | ||
958 | |||
959 | q = zypp.PoolQuery() | ||
960 | q.addKind(zypp.ResKind.package) | ||
961 | q.setMatchExact() | ||
962 | q.addAttribute(zypp.SolvAttr.name, pkgname) | ||
963 | items = sorted(q.queryResults(self.Z.pool()), | ||
964 | cmp=lambda x,y: cmpEVR(zypp.asKindPackage(x), zypp.asKindPackage(y)), | ||
965 | reverse=True) | ||
966 | |||
967 | if items: | ||
968 | item = zypp.asKindPackage(items[0]) | ||
969 | url = self.get_url(item) | ||
970 | proxies = self.get_proxies(item) | ||
971 | return (url, proxies) | ||
972 | |||
973 | return (None, None) | ||
diff --git a/scripts/lib/mic/plugins/hook/.py b/scripts/lib/mic/plugins/hook/.py new file mode 100644 index 0000000000..e69de29bb2 --- /dev/null +++ b/scripts/lib/mic/plugins/hook/.py | |||
diff --git a/scripts/lib/mic/plugins/hook/empty_hook.py b/scripts/lib/mic/plugins/hook/empty_hook.py new file mode 100644 index 0000000000..397585d8c1 --- /dev/null +++ b/scripts/lib/mic/plugins/hook/empty_hook.py | |||
@@ -0,0 +1,3 @@ | |||
1 | #!/usr/bin/python | ||
2 | |||
3 | # TODO: plugin base for hooks | ||
diff --git a/scripts/lib/mic/plugins/imager/direct_plugin.py b/scripts/lib/mic/plugins/imager/direct_plugin.py new file mode 100644 index 0000000000..fc7c10c3df --- /dev/null +++ b/scripts/lib/mic/plugins/imager/direct_plugin.py | |||
@@ -0,0 +1,107 @@ | |||
1 | # ex:ts=4:sw=4:sts=4:et | ||
2 | # -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- | ||
3 | # | ||
4 | # Copyright (c) 2013, Intel Corporation. | ||
5 | # All rights reserved. | ||
6 | # | ||
7 | # This program is free software; you can redistribute it and/or modify | ||
8 | # it under the terms of the GNU General Public License version 2 as | ||
9 | # published by the Free Software Foundation. | ||
10 | # | ||
11 | # This program is distributed in the hope that it will be useful, | ||
12 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | # GNU General Public License for more details. | ||
15 | # | ||
16 | # You should have received a copy of the GNU General Public License along | ||
17 | # with this program; if not, write to the Free Software Foundation, Inc., | ||
18 | # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. | ||
19 | # | ||
20 | # DESCRIPTION | ||
21 | # This implements the 'direct' imager plugin class for 'wic', based | ||
22 | # loosely on the raw imager plugin from 'mic' | ||
23 | # | ||
24 | # AUTHORS | ||
25 | # Tom Zanussi <tom.zanussi (at] linux.intel.com> | ||
26 | # | ||
27 | |||
28 | import os | ||
29 | import shutil | ||
30 | import re | ||
31 | import tempfile | ||
32 | |||
33 | from mic import chroot, msger | ||
34 | from mic.utils import misc, fs_related, errors, runner, cmdln | ||
35 | from mic.conf import configmgr | ||
36 | from mic.plugin import pluginmgr | ||
37 | from mic.utils.partitionedfs import PartitionedMount | ||
38 | |||
39 | import mic.imager.direct as direct | ||
40 | from mic.pluginbase import ImagerPlugin | ||
41 | |||
42 | class DirectPlugin(ImagerPlugin): | ||
43 | name = 'direct' | ||
44 | |||
45 | @classmethod | ||
46 | def __rootfs_dir_to_dict(self, rootfs_dirs): | ||
47 | """ | ||
48 | Gets a string that contain 'connection=dir' splitted by | ||
49 | space and return a dict | ||
50 | """ | ||
51 | krootfs_dir = {} | ||
52 | for rootfs_dir in rootfs_dirs.split(' '): | ||
53 | k, v = rootfs_dir.split('=') | ||
54 | krootfs_dir[k] = v | ||
55 | |||
56 | return krootfs_dir | ||
57 | |||
58 | @classmethod | ||
59 | def do_create(self, subcmd, opts, *args): | ||
60 | """ | ||
61 | Create direct image, called from creator as 'direct' cmd | ||
62 | """ | ||
63 | if len(args) != 9: | ||
64 | raise errors.Usage("Extra arguments given") | ||
65 | |||
66 | staging_data_dir = args[0] | ||
67 | hdddir = args[1] | ||
68 | native_sysroot = args[2] | ||
69 | kernel_dir = args[3] | ||
70 | bootimg_dir = args[4] | ||
71 | rootfs_dir = args[5] | ||
72 | |||
73 | creatoropts = configmgr.create | ||
74 | ksconf = args[6] | ||
75 | |||
76 | image_output_dir = args[7] | ||
77 | oe_builddir = args[8] | ||
78 | |||
79 | krootfs_dir = self.__rootfs_dir_to_dict(rootfs_dir) | ||
80 | |||
81 | configmgr._ksconf = ksconf | ||
82 | |||
83 | creator = direct.DirectImageCreator(oe_builddir, | ||
84 | image_output_dir, | ||
85 | krootfs_dir, | ||
86 | bootimg_dir, | ||
87 | kernel_dir, | ||
88 | native_sysroot, | ||
89 | hdddir, | ||
90 | staging_data_dir, | ||
91 | creatoropts, | ||
92 | None, | ||
93 | None, | ||
94 | None) | ||
95 | |||
96 | try: | ||
97 | creator.mount(None, creatoropts["cachedir"]) | ||
98 | creator.install() | ||
99 | creator.configure(creatoropts["repomd"]) | ||
100 | creator.print_outimage_info() | ||
101 | |||
102 | except errors.CreatorError: | ||
103 | raise | ||
104 | finally: | ||
105 | creator.cleanup() | ||
106 | |||
107 | return 0 | ||
diff --git a/scripts/lib/mic/plugins/imager/fs_plugin.py b/scripts/lib/mic/plugins/imager/fs_plugin.py new file mode 100644 index 0000000000..6bcaf00729 --- /dev/null +++ b/scripts/lib/mic/plugins/imager/fs_plugin.py | |||
@@ -0,0 +1,143 @@ | |||
1 | #!/usr/bin/python -tt | ||
2 | # | ||
3 | # Copyright (c) 2011 Intel, Inc. | ||
4 | # | ||
5 | # This program is free software; you can redistribute it and/or modify it | ||
6 | # under the terms of the GNU General Public License as published by the Free | ||
7 | # Software Foundation; version 2 of the License | ||
8 | # | ||
9 | # This program is distributed in the hope that it will be useful, but | ||
10 | # WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY | ||
11 | # or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | ||
12 | # for more details. | ||
13 | # | ||
14 | # You should have received a copy of the GNU General Public License along | ||
15 | # with this program; if not, write to the Free Software Foundation, Inc., 59 | ||
16 | # Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
17 | |||
18 | import os | ||
19 | import sys | ||
20 | |||
21 | from mic import chroot, msger | ||
22 | from mic.utils import cmdln, misc, errors, fs_related | ||
23 | from mic.imager import fs | ||
24 | from mic.conf import configmgr | ||
25 | from mic.plugin import pluginmgr | ||
26 | |||
27 | from mic.pluginbase import ImagerPlugin | ||
28 | class FsPlugin(ImagerPlugin): | ||
29 | name = 'fs' | ||
30 | |||
31 | @classmethod | ||
32 | @cmdln.option("--include-src", | ||
33 | dest="include_src", | ||
34 | action="store_true", | ||
35 | default=False, | ||
36 | help="Generate a image with source rpms included") | ||
37 | def do_create(self, subcmd, opts, *args): | ||
38 | """${cmd_name}: create fs image | ||
39 | |||
40 | Usage: | ||
41 | ${name} ${cmd_name} <ksfile> [OPTS] | ||
42 | |||
43 | ${cmd_option_list} | ||
44 | """ | ||
45 | |||
46 | if len(args) != 1: | ||
47 | raise errors.Usage("Extra arguments given") | ||
48 | |||
49 | creatoropts = configmgr.create | ||
50 | ksconf = args[0] | ||
51 | |||
52 | if creatoropts['runtime'] == 'bootstrap': | ||
53 | configmgr._ksconf = ksconf | ||
54 | rt_util.bootstrap_mic() | ||
55 | |||
56 | recording_pkgs = [] | ||
57 | if len(creatoropts['record_pkgs']) > 0: | ||
58 | recording_pkgs = creatoropts['record_pkgs'] | ||
59 | |||
60 | if creatoropts['release'] is not None: | ||
61 | if 'name' not in recording_pkgs: | ||
62 | recording_pkgs.append('name') | ||
63 | if 'vcs' not in recording_pkgs: | ||
64 | recording_pkgs.append('vcs') | ||
65 | |||
66 | configmgr._ksconf = ksconf | ||
67 | |||
68 | # Called After setting the configmgr._ksconf as the creatoropts['name'] is reset there. | ||
69 | if creatoropts['release'] is not None: | ||
70 | creatoropts['outdir'] = "%s/%s/images/%s/" % (creatoropts['outdir'], creatoropts['release'], creatoropts['name']) | ||
71 | |||
72 | # try to find the pkgmgr | ||
73 | pkgmgr = None | ||
74 | backends = pluginmgr.get_plugins('backend') | ||
75 | if 'auto' == creatoropts['pkgmgr']: | ||
76 | for key in configmgr.prefer_backends: | ||
77 | if key in backends: | ||
78 | pkgmgr = backends[key] | ||
79 | break | ||
80 | else: | ||
81 | for key in backends.keys(): | ||
82 | if key == creatoropts['pkgmgr']: | ||
83 | pkgmgr = backends[key] | ||
84 | break | ||
85 | |||
86 | if not pkgmgr: | ||
87 | raise errors.CreatorError("Can't find backend: %s, " | ||
88 | "available choices: %s" % | ||
89 | (creatoropts['pkgmgr'], | ||
90 | ','.join(backends.keys()))) | ||
91 | |||
92 | creator = fs.FsImageCreator(creatoropts, pkgmgr) | ||
93 | creator._include_src = opts.include_src | ||
94 | |||
95 | if len(recording_pkgs) > 0: | ||
96 | creator._recording_pkgs = recording_pkgs | ||
97 | |||
98 | self.check_image_exists(creator.destdir, | ||
99 | creator.pack_to, | ||
100 | [creator.name], | ||
101 | creatoropts['release']) | ||
102 | |||
103 | try: | ||
104 | creator.check_depend_tools() | ||
105 | creator.mount(None, creatoropts["cachedir"]) | ||
106 | creator.install() | ||
107 | #Download the source packages ###private options | ||
108 | if opts.include_src: | ||
109 | installed_pkgs = creator.get_installed_packages() | ||
110 | msger.info('--------------------------------------------------') | ||
111 | msger.info('Generating the image with source rpms included ...') | ||
112 | if not misc.SrcpkgsDownload(installed_pkgs, creatoropts["repomd"], creator._instroot, creatoropts["cachedir"]): | ||
113 | msger.warning("Source packages can't be downloaded") | ||
114 | |||
115 | creator.configure(creatoropts["repomd"]) | ||
116 | creator.copy_kernel() | ||
117 | creator.unmount() | ||
118 | creator.package(creatoropts["outdir"]) | ||
119 | if creatoropts['release'] is not None: | ||
120 | creator.release_output(ksconf, creatoropts['outdir'], creatoropts['release']) | ||
121 | creator.print_outimage_info() | ||
122 | except errors.CreatorError: | ||
123 | raise | ||
124 | finally: | ||
125 | creator.cleanup() | ||
126 | |||
127 | msger.info("Finished.") | ||
128 | return 0 | ||
129 | |||
130 | @classmethod | ||
131 | def do_chroot(self, target, cmd=[]):#chroot.py parse opts&args | ||
132 | try: | ||
133 | if len(cmd) != 0: | ||
134 | cmdline = ' '.join(cmd) | ||
135 | else: | ||
136 | cmdline = "/bin/bash" | ||
137 | envcmd = fs_related.find_binary_inchroot("env", target) | ||
138 | if envcmd: | ||
139 | cmdline = "%s HOME=/root %s" % (envcmd, cmdline) | ||
140 | chroot.chroot(target, None, cmdline) | ||
141 | finally: | ||
142 | chroot.cleanup_after_chroot("dir", None, None, None) | ||
143 | return 1 | ||
diff --git a/scripts/lib/mic/plugins/imager/livecd_plugin.py b/scripts/lib/mic/plugins/imager/livecd_plugin.py new file mode 100644 index 0000000000..82cb1af7dc --- /dev/null +++ b/scripts/lib/mic/plugins/imager/livecd_plugin.py | |||
@@ -0,0 +1,255 @@ | |||
1 | #!/usr/bin/python -tt | ||
2 | # | ||
3 | # Copyright (c) 2011 Intel, Inc. | ||
4 | # | ||
5 | # This program is free software; you can redistribute it and/or modify it | ||
6 | # under the terms of the GNU General Public License as published by the Free | ||
7 | # Software Foundation; version 2 of the License | ||
8 | # | ||
9 | # This program is distributed in the hope that it will be useful, but | ||
10 | # WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY | ||
11 | # or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | ||
12 | # for more details. | ||
13 | # | ||
14 | # You should have received a copy of the GNU General Public License along | ||
15 | # with this program; if not, write to the Free Software Foundation, Inc., 59 | ||
16 | # Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
17 | |||
18 | import os | ||
19 | import shutil | ||
20 | import tempfile | ||
21 | |||
22 | from mic import chroot, msger | ||
23 | from mic.utils import misc, fs_related, errors | ||
24 | from mic.conf import configmgr | ||
25 | import mic.imager.livecd as livecd | ||
26 | from mic.plugin import pluginmgr | ||
27 | |||
28 | from mic.pluginbase import ImagerPlugin | ||
29 | class LiveCDPlugin(ImagerPlugin): | ||
30 | name = 'livecd' | ||
31 | |||
32 | @classmethod | ||
33 | def do_create(self, subcmd, opts, *args): | ||
34 | """${cmd_name}: create livecd image | ||
35 | |||
36 | Usage: | ||
37 | ${name} ${cmd_name} <ksfile> [OPTS] | ||
38 | |||
39 | ${cmd_option_list} | ||
40 | """ | ||
41 | |||
42 | if len(args) != 1: | ||
43 | raise errors.Usage("Extra arguments given") | ||
44 | |||
45 | creatoropts = configmgr.create | ||
46 | ksconf = args[0] | ||
47 | |||
48 | if creatoropts['runtime'] == 'bootstrap': | ||
49 | configmgr._ksconf = ksconf | ||
50 | rt_util.bootstrap_mic() | ||
51 | |||
52 | if creatoropts['arch'] and creatoropts['arch'].startswith('arm'): | ||
53 | msger.warning('livecd cannot support arm images, Quit') | ||
54 | return | ||
55 | |||
56 | recording_pkgs = [] | ||
57 | if len(creatoropts['record_pkgs']) > 0: | ||
58 | recording_pkgs = creatoropts['record_pkgs'] | ||
59 | |||
60 | if creatoropts['release'] is not None: | ||
61 | if 'name' not in recording_pkgs: | ||
62 | recording_pkgs.append('name') | ||
63 | if 'vcs' not in recording_pkgs: | ||
64 | recording_pkgs.append('vcs') | ||
65 | |||
66 | configmgr._ksconf = ksconf | ||
67 | |||
68 | # Called After setting the configmgr._ksconf as the creatoropts['name'] is reset there. | ||
69 | if creatoropts['release'] is not None: | ||
70 | creatoropts['outdir'] = "%s/%s/images/%s/" % (creatoropts['outdir'], creatoropts['release'], creatoropts['name']) | ||
71 | |||
72 | # try to find the pkgmgr | ||
73 | pkgmgr = None | ||
74 | backends = pluginmgr.get_plugins('backend') | ||
75 | if 'auto' == creatoropts['pkgmgr']: | ||
76 | for key in configmgr.prefer_backends: | ||
77 | if key in backends: | ||
78 | pkgmgr = backends[key] | ||
79 | break | ||
80 | else: | ||
81 | for key in backends.keys(): | ||
82 | if key == creatoropts['pkgmgr']: | ||
83 | pkgmgr = backends[key] | ||
84 | break | ||
85 | |||
86 | if not pkgmgr: | ||
87 | raise errors.CreatorError("Can't find backend: %s, " | ||
88 | "available choices: %s" % | ||
89 | (creatoropts['pkgmgr'], | ||
90 | ','.join(backends.keys()))) | ||
91 | |||
92 | creator = livecd.LiveCDImageCreator(creatoropts, pkgmgr) | ||
93 | |||
94 | if len(recording_pkgs) > 0: | ||
95 | creator._recording_pkgs = recording_pkgs | ||
96 | |||
97 | self.check_image_exists(creator.destdir, | ||
98 | creator.pack_to, | ||
99 | [creator.name + ".iso"], | ||
100 | creatoropts['release']) | ||
101 | |||
102 | try: | ||
103 | creator.check_depend_tools() | ||
104 | creator.mount(None, creatoropts["cachedir"]) | ||
105 | creator.install() | ||
106 | creator.configure(creatoropts["repomd"]) | ||
107 | creator.copy_kernel() | ||
108 | creator.unmount() | ||
109 | creator.package(creatoropts["outdir"]) | ||
110 | if creatoropts['release'] is not None: | ||
111 | creator.release_output(ksconf, creatoropts['outdir'], creatoropts['release']) | ||
112 | creator.print_outimage_info() | ||
113 | |||
114 | except errors.CreatorError: | ||
115 | raise | ||
116 | finally: | ||
117 | creator.cleanup() | ||
118 | |||
119 | msger.info("Finished.") | ||
120 | return 0 | ||
121 | |||
122 | @classmethod | ||
123 | def do_chroot(cls, target, cmd=[]): | ||
124 | os_image = cls.do_unpack(target) | ||
125 | os_image_dir = os.path.dirname(os_image) | ||
126 | |||
127 | # unpack image to target dir | ||
128 | imgsize = misc.get_file_size(os_image) * 1024L * 1024L | ||
129 | imgtype = misc.get_image_type(os_image) | ||
130 | if imgtype == "btrfsimg": | ||
131 | fstype = "btrfs" | ||
132 | myDiskMount = fs_related.BtrfsDiskMount | ||
133 | elif imgtype in ("ext3fsimg", "ext4fsimg"): | ||
134 | fstype = imgtype[:4] | ||
135 | myDiskMount = fs_related.ExtDiskMount | ||
136 | else: | ||
137 | raise errors.CreatorError("Unsupported filesystem type: %s" % fstype) | ||
138 | |||
139 | extmnt = misc.mkdtemp() | ||
140 | extloop = myDiskMount(fs_related.SparseLoopbackDisk(os_image, imgsize), | ||
141 | extmnt, | ||
142 | fstype, | ||
143 | 4096, | ||
144 | "%s label" % fstype) | ||
145 | try: | ||
146 | extloop.mount() | ||
147 | |||
148 | except errors.MountError: | ||
149 | extloop.cleanup() | ||
150 | shutil.rmtree(extmnt, ignore_errors = True) | ||
151 | shutil.rmtree(os_image_dir, ignore_errors = True) | ||
152 | raise | ||
153 | |||
154 | try: | ||
155 | if len(cmd) != 0: | ||
156 | cmdline = ' '.join(cmd) | ||
157 | else: | ||
158 | cmdline = "/bin/bash" | ||
159 | envcmd = fs_related.find_binary_inchroot("env", extmnt) | ||
160 | if envcmd: | ||
161 | cmdline = "%s HOME=/root %s" % (envcmd, cmdline) | ||
162 | chroot.chroot(extmnt, None, cmdline) | ||
163 | except: | ||
164 | raise errors.CreatorError("Failed to chroot to %s." %target) | ||
165 | finally: | ||
166 | chroot.cleanup_after_chroot("img", extloop, os_image_dir, extmnt) | ||
167 | |||
168 | @classmethod | ||
169 | def do_pack(cls, base_on): | ||
170 | import subprocess | ||
171 | |||
172 | def __mkinitrd(instance): | ||
173 | kernelver = instance._get_kernel_versions().values()[0][0] | ||
174 | args = [ "/usr/libexec/mkliveinitrd", "/boot/initrd-%s.img" % kernelver, "%s" % kernelver ] | ||
175 | try: | ||
176 | subprocess.call(args, preexec_fn = instance._chroot) | ||
177 | except OSError, (err, msg): | ||
178 | raise errors.CreatorError("Failed to execute /usr/libexec/mkliveinitrd: %s" % msg) | ||
179 | |||
180 | def __run_post_cleanups(instance): | ||
181 | kernelver = instance._get_kernel_versions().values()[0][0] | ||
182 | args = ["rm", "-f", "/boot/initrd-%s.img" % kernelver] | ||
183 | |||
184 | try: | ||
185 | subprocess.call(args, preexec_fn = instance._chroot) | ||
186 | except OSError, (err, msg): | ||
187 | raise errors.CreatorError("Failed to run post cleanups: %s" % msg) | ||
188 | |||
189 | convertoropts = configmgr.convert | ||
190 | convertoropts['name'] = os.path.splitext(os.path.basename(base_on))[0] | ||
191 | convertor = livecd.LiveCDImageCreator(convertoropts) | ||
192 | imgtype = misc.get_image_type(base_on) | ||
193 | if imgtype == "btrfsimg": | ||
194 | fstype = "btrfs" | ||
195 | elif imgtype in ("ext3fsimg", "ext4fsimg"): | ||
196 | fstype = imgtype[:4] | ||
197 | else: | ||
198 | raise errors.CreatorError("Unsupported filesystem type: %s" % fstype) | ||
199 | convertor._set_fstype(fstype) | ||
200 | try: | ||
201 | convertor.mount(base_on) | ||
202 | __mkinitrd(convertor) | ||
203 | convertor._create_bootconfig() | ||
204 | __run_post_cleanups(convertor) | ||
205 | convertor.launch_shell(convertoropts['shell']) | ||
206 | convertor.unmount() | ||
207 | convertor.package() | ||
208 | convertor.print_outimage_info() | ||
209 | finally: | ||
210 | shutil.rmtree(os.path.dirname(base_on), ignore_errors = True) | ||
211 | |||
212 | @classmethod | ||
213 | def do_unpack(cls, srcimg): | ||
214 | img = srcimg | ||
215 | imgmnt = misc.mkdtemp() | ||
216 | imgloop = fs_related.DiskMount(fs_related.LoopbackDisk(img, 0), imgmnt) | ||
217 | try: | ||
218 | imgloop.mount() | ||
219 | except errors.MountError: | ||
220 | imgloop.cleanup() | ||
221 | raise | ||
222 | |||
223 | # legacy LiveOS filesystem layout support, remove for F9 or F10 | ||
224 | if os.path.exists(imgmnt + "/squashfs.img"): | ||
225 | squashimg = imgmnt + "/squashfs.img" | ||
226 | else: | ||
227 | squashimg = imgmnt + "/LiveOS/squashfs.img" | ||
228 | |||
229 | tmpoutdir = misc.mkdtemp() | ||
230 | # unsquashfs requires outdir mustn't exist | ||
231 | shutil.rmtree(tmpoutdir, ignore_errors = True) | ||
232 | misc.uncompress_squashfs(squashimg, tmpoutdir) | ||
233 | |||
234 | try: | ||
235 | # legacy LiveOS filesystem layout support, remove for F9 or F10 | ||
236 | if os.path.exists(tmpoutdir + "/os.img"): | ||
237 | os_image = tmpoutdir + "/os.img" | ||
238 | else: | ||
239 | os_image = tmpoutdir + "/LiveOS/ext3fs.img" | ||
240 | |||
241 | if not os.path.exists(os_image): | ||
242 | raise errors.CreatorError("'%s' is not a valid live CD ISO : neither " | ||
243 | "LiveOS/ext3fs.img nor os.img exist" %img) | ||
244 | |||
245 | imgname = os.path.basename(srcimg) | ||
246 | imgname = os.path.splitext(imgname)[0] + ".img" | ||
247 | rtimage = os.path.join(tempfile.mkdtemp(dir = "/var/tmp", prefix = "tmp"), imgname) | ||
248 | shutil.copyfile(os_image, rtimage) | ||
249 | |||
250 | finally: | ||
251 | imgloop.cleanup() | ||
252 | shutil.rmtree(tmpoutdir, ignore_errors = True) | ||
253 | shutil.rmtree(imgmnt, ignore_errors = True) | ||
254 | |||
255 | return rtimage | ||
diff --git a/scripts/lib/mic/plugins/imager/liveusb_plugin.py b/scripts/lib/mic/plugins/imager/liveusb_plugin.py new file mode 100644 index 0000000000..3d53c84410 --- /dev/null +++ b/scripts/lib/mic/plugins/imager/liveusb_plugin.py | |||
@@ -0,0 +1,260 @@ | |||
1 | #!/usr/bin/python -tt | ||
2 | # | ||
3 | # Copyright (c) 2011 Intel, Inc. | ||
4 | # | ||
5 | # This program is free software; you can redistribute it and/or modify it | ||
6 | # under the terms of the GNU General Public License as published by the Free | ||
7 | # Software Foundation; version 2 of the License | ||
8 | # | ||
9 | # This program is distributed in the hope that it will be useful, but | ||
10 | # WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY | ||
11 | # or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | ||
12 | # for more details. | ||
13 | # | ||
14 | # You should have received a copy of the GNU General Public License along | ||
15 | # with this program; if not, write to the Free Software Foundation, Inc., 59 | ||
16 | # Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
17 | |||
18 | import os | ||
19 | import shutil | ||
20 | import tempfile | ||
21 | |||
22 | from mic import chroot, msger | ||
23 | from mic.utils import misc, fs_related, errors | ||
24 | from mic.utils.partitionedfs import PartitionedMount | ||
25 | from mic.conf import configmgr | ||
26 | from mic.plugin import pluginmgr | ||
27 | |||
28 | import mic.imager.liveusb as liveusb | ||
29 | |||
30 | from mic.pluginbase import ImagerPlugin | ||
31 | class LiveUSBPlugin(ImagerPlugin): | ||
32 | name = 'liveusb' | ||
33 | |||
34 | @classmethod | ||
35 | def do_create(self, subcmd, opts, *args): | ||
36 | """${cmd_name}: create liveusb image | ||
37 | |||
38 | Usage: | ||
39 | ${name} ${cmd_name} <ksfile> [OPTS] | ||
40 | |||
41 | ${cmd_option_list} | ||
42 | """ | ||
43 | |||
44 | if len(args) != 1: | ||
45 | raise errors.Usage("Extra arguments given") | ||
46 | |||
47 | creatoropts = configmgr.create | ||
48 | ksconf = args[0] | ||
49 | |||
50 | if creatoropts['runtime'] == "bootstrap": | ||
51 | configmgr._ksconf = ksconf | ||
52 | rt_util.bootstrap_mic() | ||
53 | |||
54 | if creatoropts['arch'] and creatoropts['arch'].startswith('arm'): | ||
55 | msger.warning('liveusb cannot support arm images, Quit') | ||
56 | return | ||
57 | |||
58 | recording_pkgs = [] | ||
59 | if len(creatoropts['record_pkgs']) > 0: | ||
60 | recording_pkgs = creatoropts['record_pkgs'] | ||
61 | |||
62 | if creatoropts['release'] is not None: | ||
63 | if 'name' not in recording_pkgs: | ||
64 | recording_pkgs.append('name') | ||
65 | if 'vcs' not in recording_pkgs: | ||
66 | recording_pkgs.append('vcs') | ||
67 | |||
68 | configmgr._ksconf = ksconf | ||
69 | |||
70 | # Called After setting the configmgr._ksconf as the creatoropts['name'] is reset there. | ||
71 | if creatoropts['release'] is not None: | ||
72 | creatoropts['outdir'] = "%s/%s/images/%s/" % (creatoropts['outdir'], creatoropts['release'], creatoropts['name']) | ||
73 | |||
74 | # try to find the pkgmgr | ||
75 | pkgmgr = None | ||
76 | backends = pluginmgr.get_plugins('backend') | ||
77 | if 'auto' == creatoropts['pkgmgr']: | ||
78 | for key in configmgr.prefer_backends: | ||
79 | if key in backends: | ||
80 | pkgmgr = backends[key] | ||
81 | break | ||
82 | else: | ||
83 | for key in backends.keys(): | ||
84 | if key == creatoropts['pkgmgr']: | ||
85 | pkgmgr = backends[key] | ||
86 | break | ||
87 | |||
88 | if not pkgmgr: | ||
89 | raise errors.CreatorError("Can't find backend: %s, " | ||
90 | "available choices: %s" % | ||
91 | (creatoropts['pkgmgr'], | ||
92 | ','.join(backends.keys()))) | ||
93 | |||
94 | creator = liveusb.LiveUSBImageCreator(creatoropts, pkgmgr) | ||
95 | |||
96 | if len(recording_pkgs) > 0: | ||
97 | creator._recording_pkgs = recording_pkgs | ||
98 | |||
99 | self.check_image_exists(creator.destdir, | ||
100 | creator.pack_to, | ||
101 | [creator.name + ".usbimg"], | ||
102 | creatoropts['release']) | ||
103 | try: | ||
104 | creator.check_depend_tools() | ||
105 | creator.mount(None, creatoropts["cachedir"]) | ||
106 | creator.install() | ||
107 | creator.configure(creatoropts["repomd"]) | ||
108 | creator.copy_kernel() | ||
109 | creator.unmount() | ||
110 | creator.package(creatoropts["outdir"]) | ||
111 | if creatoropts['release'] is not None: | ||
112 | creator.release_output(ksconf, creatoropts['outdir'], creatoropts['release']) | ||
113 | creator.print_outimage_info() | ||
114 | |||
115 | except errors.CreatorError: | ||
116 | raise | ||
117 | finally: | ||
118 | creator.cleanup() | ||
119 | |||
120 | msger.info("Finished.") | ||
121 | return 0 | ||
122 | |||
123 | @classmethod | ||
124 | def do_chroot(cls, target, cmd=[]): | ||
125 | os_image = cls.do_unpack(target) | ||
126 | os_image_dir = os.path.dirname(os_image) | ||
127 | |||
128 | # unpack image to target dir | ||
129 | imgsize = misc.get_file_size(os_image) * 1024L * 1024L | ||
130 | imgtype = misc.get_image_type(os_image) | ||
131 | if imgtype == "btrfsimg": | ||
132 | fstype = "btrfs" | ||
133 | myDiskMount = fs_related.BtrfsDiskMount | ||
134 | elif imgtype in ("ext3fsimg", "ext4fsimg"): | ||
135 | fstype = imgtype[:4] | ||
136 | myDiskMount = fs_related.ExtDiskMount | ||
137 | else: | ||
138 | raise errors.CreatorError("Unsupported filesystem type: %s" % fstype) | ||
139 | |||
140 | extmnt = misc.mkdtemp() | ||
141 | extloop = myDiskMount(fs_related.SparseLoopbackDisk(os_image, imgsize), | ||
142 | extmnt, | ||
143 | fstype, | ||
144 | 4096, | ||
145 | "%s label" % fstype) | ||
146 | |||
147 | try: | ||
148 | extloop.mount() | ||
149 | |||
150 | except errors.MountError: | ||
151 | extloop.cleanup() | ||
152 | shutil.rmtree(extmnt, ignore_errors = True) | ||
153 | raise | ||
154 | |||
155 | try: | ||
156 | if len(cmd) != 0: | ||
157 | cmdline = ' '.join(cmd) | ||
158 | else: | ||
159 | cmdline = "/bin/bash" | ||
160 | envcmd = fs_related.find_binary_inchroot("env", extmnt) | ||
161 | if envcmd: | ||
162 | cmdline = "%s HOME=/root %s" % (envcmd, cmdline) | ||
163 | chroot.chroot(extmnt, None, cmdline) | ||
164 | except: | ||
165 | raise errors.CreatorError("Failed to chroot to %s." %target) | ||
166 | finally: | ||
167 | chroot.cleanup_after_chroot("img", extloop, os_image_dir, extmnt) | ||
168 | |||
169 | @classmethod | ||
170 | def do_pack(cls, base_on): | ||
171 | import subprocess | ||
172 | |||
173 | def __mkinitrd(instance): | ||
174 | kernelver = instance._get_kernel_versions().values()[0][0] | ||
175 | args = [ "/usr/libexec/mkliveinitrd", "/boot/initrd-%s.img" % kernelver, "%s" % kernelver ] | ||
176 | try: | ||
177 | subprocess.call(args, preexec_fn = instance._chroot) | ||
178 | |||
179 | except OSError, (err, msg): | ||
180 | raise errors.CreatorError("Failed to execute /usr/libexec/mkliveinitrd: %s" % msg) | ||
181 | |||
182 | def __run_post_cleanups(instance): | ||
183 | kernelver = instance._get_kernel_versions().values()[0][0] | ||
184 | args = ["rm", "-f", "/boot/initrd-%s.img" % kernelver] | ||
185 | |||
186 | try: | ||
187 | subprocess.call(args, preexec_fn = instance._chroot) | ||
188 | except OSError, (err, msg): | ||
189 | raise errors.CreatorError("Failed to run post cleanups: %s" % msg) | ||
190 | |||
191 | convertoropts = configmgr.convert | ||
192 | convertoropts['name'] = os.path.splitext(os.path.basename(base_on))[0] | ||
193 | convertor = liveusb.LiveUSBImageCreator(convertoropts) | ||
194 | imgtype = misc.get_image_type(base_on) | ||
195 | if imgtype == "btrfsimg": | ||
196 | fstype = "btrfs" | ||
197 | elif imgtype in ("ext3fsimg", "ext4fsimg"): | ||
198 | fstype = imgtype[:4] | ||
199 | else: | ||
200 | raise errors.CreatorError("Unsupported filesystem type: %s" % fstyp) | ||
201 | convertor._set_fstype(fstype) | ||
202 | try: | ||
203 | convertor.mount(base_on) | ||
204 | __mkinitrd(convertor) | ||
205 | convertor._create_bootconfig() | ||
206 | __run_post_cleanups(convertor) | ||
207 | convertor.launch_shell(convertoropts['shell']) | ||
208 | convertor.unmount() | ||
209 | convertor.package() | ||
210 | convertor.print_outimage_info() | ||
211 | finally: | ||
212 | shutil.rmtree(os.path.dirname(base_on), ignore_errors = True) | ||
213 | |||
214 | @classmethod | ||
215 | def do_unpack(cls, srcimg): | ||
216 | img = srcimg | ||
217 | imgsize = misc.get_file_size(img) * 1024L * 1024L | ||
218 | imgmnt = misc.mkdtemp() | ||
219 | disk = fs_related.SparseLoopbackDisk(img, imgsize) | ||
220 | imgloop = PartitionedMount(imgmnt, skipformat = True) | ||
221 | imgloop.add_disk('/dev/sdb', disk) | ||
222 | imgloop.add_partition(imgsize/1024/1024, "/dev/sdb", "/", "vfat", boot=False) | ||
223 | try: | ||
224 | imgloop.mount() | ||
225 | except errors.MountError: | ||
226 | imgloop.cleanup() | ||
227 | raise | ||
228 | |||
229 | # legacy LiveOS filesystem layout support, remove for F9 or F10 | ||
230 | if os.path.exists(imgmnt + "/squashfs.img"): | ||
231 | squashimg = imgmnt + "/squashfs.img" | ||
232 | else: | ||
233 | squashimg = imgmnt + "/LiveOS/squashfs.img" | ||
234 | |||
235 | tmpoutdir = misc.mkdtemp() | ||
236 | # unsquashfs requires outdir mustn't exist | ||
237 | shutil.rmtree(tmpoutdir, ignore_errors = True) | ||
238 | misc.uncompress_squashfs(squashimg, tmpoutdir) | ||
239 | |||
240 | try: | ||
241 | # legacy LiveOS filesystem layout support, remove for F9 or F10 | ||
242 | if os.path.exists(tmpoutdir + "/os.img"): | ||
243 | os_image = tmpoutdir + "/os.img" | ||
244 | else: | ||
245 | os_image = tmpoutdir + "/LiveOS/ext3fs.img" | ||
246 | |||
247 | if not os.path.exists(os_image): | ||
248 | raise errors.CreatorError("'%s' is not a valid live CD ISO : neither " | ||
249 | "LiveOS/ext3fs.img nor os.img exist" %img) | ||
250 | imgname = os.path.basename(srcimg) | ||
251 | imgname = os.path.splitext(imgname)[0] + ".img" | ||
252 | rtimage = os.path.join(tempfile.mkdtemp(dir = "/var/tmp", prefix = "tmp"), imgname) | ||
253 | shutil.copyfile(os_image, rtimage) | ||
254 | |||
255 | finally: | ||
256 | imgloop.cleanup() | ||
257 | shutil.rmtree(tmpoutdir, ignore_errors = True) | ||
258 | shutil.rmtree(imgmnt, ignore_errors = True) | ||
259 | |||
260 | return rtimage | ||
diff --git a/scripts/lib/mic/plugins/imager/loop_plugin.py b/scripts/lib/mic/plugins/imager/loop_plugin.py new file mode 100644 index 0000000000..2a05b3c238 --- /dev/null +++ b/scripts/lib/mic/plugins/imager/loop_plugin.py | |||
@@ -0,0 +1,255 @@ | |||
1 | #!/usr/bin/python -tt | ||
2 | # | ||
3 | # Copyright (c) 2011 Intel, Inc. | ||
4 | # | ||
5 | # This program is free software; you can redistribute it and/or modify it | ||
6 | # under the terms of the GNU General Public License as published by the Free | ||
7 | # Software Foundation; version 2 of the License | ||
8 | # | ||
9 | # This program is distributed in the hope that it will be useful, but | ||
10 | # WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY | ||
11 | # or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | ||
12 | # for more details. | ||
13 | # | ||
14 | # You should have received a copy of the GNU General Public License along | ||
15 | # with this program; if not, write to the Free Software Foundation, Inc., 59 | ||
16 | # Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
17 | |||
18 | import os | ||
19 | import shutil | ||
20 | import tempfile | ||
21 | |||
22 | from mic import chroot, msger | ||
23 | from mic.utils import misc, fs_related, errors, cmdln | ||
24 | from mic.conf import configmgr | ||
25 | from mic.plugin import pluginmgr | ||
26 | from mic.imager.loop import LoopImageCreator, load_mountpoints | ||
27 | |||
28 | from mic.pluginbase import ImagerPlugin | ||
29 | class LoopPlugin(ImagerPlugin): | ||
30 | name = 'loop' | ||
31 | |||
32 | @classmethod | ||
33 | @cmdln.option("--compress-disk-image", dest="compress_image", | ||
34 | type='choice', choices=("gz", "bz2"), default=None, | ||
35 | help="Same with --compress-image") | ||
36 | # alias to compress-image for compatibility | ||
37 | @cmdln.option("--compress-image", dest="compress_image", | ||
38 | type='choice', choices=("gz", "bz2"), default=None, | ||
39 | help="Compress all loop images with 'gz' or 'bz2'") | ||
40 | @cmdln.option("--shrink", action='store_true', default=False, | ||
41 | help="Whether to shrink loop images to minimal size") | ||
42 | def do_create(self, subcmd, opts, *args): | ||
43 | """${cmd_name}: create loop image | ||
44 | |||
45 | Usage: | ||
46 | ${name} ${cmd_name} <ksfile> [OPTS] | ||
47 | |||
48 | ${cmd_option_list} | ||
49 | """ | ||
50 | |||
51 | if len(args) != 1: | ||
52 | raise errors.Usage("Extra arguments given") | ||
53 | |||
54 | creatoropts = configmgr.create | ||
55 | ksconf = args[0] | ||
56 | |||
57 | if creatoropts['runtime'] == "bootstrap": | ||
58 | configmgr._ksconf = ksconf | ||
59 | rt_util.bootstrap_mic() | ||
60 | |||
61 | recording_pkgs = [] | ||
62 | if len(creatoropts['record_pkgs']) > 0: | ||
63 | recording_pkgs = creatoropts['record_pkgs'] | ||
64 | |||
65 | if creatoropts['release'] is not None: | ||
66 | if 'name' not in recording_pkgs: | ||
67 | recording_pkgs.append('name') | ||
68 | if 'vcs' not in recording_pkgs: | ||
69 | recording_pkgs.append('vcs') | ||
70 | |||
71 | configmgr._ksconf = ksconf | ||
72 | |||
73 | # Called After setting the configmgr._ksconf | ||
74 | # as the creatoropts['name'] is reset there. | ||
75 | if creatoropts['release'] is not None: | ||
76 | creatoropts['outdir'] = "%s/%s/images/%s/" % (creatoropts['outdir'], | ||
77 | creatoropts['release'], | ||
78 | creatoropts['name']) | ||
79 | # try to find the pkgmgr | ||
80 | pkgmgr = None | ||
81 | backends = pluginmgr.get_plugins('backend') | ||
82 | if 'auto' == creatoropts['pkgmgr']: | ||
83 | for key in configmgr.prefer_backends: | ||
84 | if key in backends: | ||
85 | pkgmgr = backends[key] | ||
86 | break | ||
87 | else: | ||
88 | for key in backends.keys(): | ||
89 | if key == creatoropts['pkgmgr']: | ||
90 | pkgmgr = backends[key] | ||
91 | break | ||
92 | |||
93 | if not pkgmgr: | ||
94 | raise errors.CreatorError("Can't find backend: %s, " | ||
95 | "available choices: %s" % | ||
96 | (creatoropts['pkgmgr'], | ||
97 | ','.join(backends.keys()))) | ||
98 | |||
99 | creator = LoopImageCreator(creatoropts, | ||
100 | pkgmgr, | ||
101 | opts.compress_image, | ||
102 | opts.shrink) | ||
103 | |||
104 | if len(recording_pkgs) > 0: | ||
105 | creator._recording_pkgs = recording_pkgs | ||
106 | |||
107 | image_names = [creator.name + ".img"] | ||
108 | image_names.extend(creator.get_image_names()) | ||
109 | self.check_image_exists(creator.destdir, | ||
110 | creator.pack_to, | ||
111 | image_names, | ||
112 | creatoropts['release']) | ||
113 | |||
114 | try: | ||
115 | creator.check_depend_tools() | ||
116 | creator.mount(None, creatoropts["cachedir"]) | ||
117 | creator.install() | ||
118 | creator.configure(creatoropts["repomd"]) | ||
119 | creator.copy_kernel() | ||
120 | creator.unmount() | ||
121 | creator.package(creatoropts["outdir"]) | ||
122 | |||
123 | if creatoropts['release'] is not None: | ||
124 | creator.release_output(ksconf, | ||
125 | creatoropts['outdir'], | ||
126 | creatoropts['release']) | ||
127 | creator.print_outimage_info() | ||
128 | |||
129 | except errors.CreatorError: | ||
130 | raise | ||
131 | finally: | ||
132 | creator.cleanup() | ||
133 | |||
134 | msger.info("Finished.") | ||
135 | return 0 | ||
136 | |||
137 | @classmethod | ||
138 | def _do_chroot_tar(cls, target, cmd=[]): | ||
139 | mountfp_xml = os.path.splitext(target)[0] + '.xml' | ||
140 | if not os.path.exists(mountfp_xml): | ||
141 | raise errors.CreatorError("No mount point file found for this tar " | ||
142 | "image, please check %s" % mountfp_xml) | ||
143 | |||
144 | import tarfile | ||
145 | tar = tarfile.open(target, 'r') | ||
146 | tmpdir = misc.mkdtemp() | ||
147 | tar.extractall(path=tmpdir) | ||
148 | tar.close() | ||
149 | |||
150 | mntdir = misc.mkdtemp() | ||
151 | |||
152 | loops = [] | ||
153 | for (mp, label, name, size, fstype) in load_mountpoints(mountfp_xml): | ||
154 | if fstype in ("ext2", "ext3", "ext4"): | ||
155 | myDiskMount = fs_related.ExtDiskMount | ||
156 | elif fstype == "btrfs": | ||
157 | myDiskMount = fs_related.BtrfsDiskMount | ||
158 | elif fstype in ("vfat", "msdos"): | ||
159 | myDiskMount = fs_related.VfatDiskMount | ||
160 | else: | ||
161 | msger.error("Cannot support fstype: %s" % fstype) | ||
162 | |||
163 | name = os.path.join(tmpdir, name) | ||
164 | size = size * 1024L * 1024L | ||
165 | loop = myDiskMount(fs_related.SparseLoopbackDisk(name, size), | ||
166 | os.path.join(mntdir, mp.lstrip('/')), | ||
167 | fstype, size, label) | ||
168 | |||
169 | try: | ||
170 | msger.verbose("Mount %s to %s" % (mp, mntdir + mp)) | ||
171 | fs_related.makedirs(os.path.join(mntdir, mp.lstrip('/'))) | ||
172 | loop.mount() | ||
173 | |||
174 | except: | ||
175 | loop.cleanup() | ||
176 | for lp in reversed(loops): | ||
177 | chroot.cleanup_after_chroot("img", lp, None, mntdir) | ||
178 | |||
179 | shutil.rmtree(tmpdir, ignore_errors=True) | ||
180 | raise | ||
181 | |||
182 | loops.append(loop) | ||
183 | |||
184 | try: | ||
185 | if len(cmd) != 0: | ||
186 | cmdline = "/usr/bin/env HOME=/root " + ' '.join(cmd) | ||
187 | else: | ||
188 | cmdline = "/usr/bin/env HOME=/root /bin/bash" | ||
189 | chroot.chroot(mntdir, None, cmdline) | ||
190 | except: | ||
191 | raise errors.CreatorError("Failed to chroot to %s." % target) | ||
192 | finally: | ||
193 | for loop in reversed(loops): | ||
194 | chroot.cleanup_after_chroot("img", loop, None, mntdir) | ||
195 | |||
196 | shutil.rmtree(tmpdir, ignore_errors=True) | ||
197 | |||
198 | @classmethod | ||
199 | def do_chroot(cls, target, cmd=[]): | ||
200 | if target.endswith('.tar'): | ||
201 | import tarfile | ||
202 | if tarfile.is_tarfile(target): | ||
203 | LoopPlugin._do_chroot_tar(target, cmd) | ||
204 | return | ||
205 | else: | ||
206 | raise errors.CreatorError("damaged tarball for loop images") | ||
207 | |||
208 | img = target | ||
209 | imgsize = misc.get_file_size(img) * 1024L * 1024L | ||
210 | imgtype = misc.get_image_type(img) | ||
211 | if imgtype == "btrfsimg": | ||
212 | fstype = "btrfs" | ||
213 | myDiskMount = fs_related.BtrfsDiskMount | ||
214 | elif imgtype in ("ext3fsimg", "ext4fsimg"): | ||
215 | fstype = imgtype[:4] | ||
216 | myDiskMount = fs_related.ExtDiskMount | ||
217 | else: | ||
218 | raise errors.CreatorError("Unsupported filesystem type: %s" \ | ||
219 | % imgtype) | ||
220 | |||
221 | extmnt = misc.mkdtemp() | ||
222 | extloop = myDiskMount(fs_related.SparseLoopbackDisk(img, imgsize), | ||
223 | extmnt, | ||
224 | fstype, | ||
225 | 4096, | ||
226 | "%s label" % fstype) | ||
227 | try: | ||
228 | extloop.mount() | ||
229 | |||
230 | except errors.MountError: | ||
231 | extloop.cleanup() | ||
232 | shutil.rmtree(extmnt, ignore_errors=True) | ||
233 | raise | ||
234 | |||
235 | try: | ||
236 | if len(cmd) != 0: | ||
237 | cmdline = ' '.join(cmd) | ||
238 | else: | ||
239 | cmdline = "/bin/bash" | ||
240 | envcmd = fs_related.find_binary_inchroot("env", extmnt) | ||
241 | if envcmd: | ||
242 | cmdline = "%s HOME=/root %s" % (envcmd, cmdline) | ||
243 | chroot.chroot(extmnt, None, cmdline) | ||
244 | except: | ||
245 | raise errors.CreatorError("Failed to chroot to %s." % img) | ||
246 | finally: | ||
247 | chroot.cleanup_after_chroot("img", extloop, None, extmnt) | ||
248 | |||
249 | @classmethod | ||
250 | def do_unpack(cls, srcimg): | ||
251 | image = os.path.join(tempfile.mkdtemp(dir="/var/tmp", prefix="tmp"), | ||
252 | "target.img") | ||
253 | msger.info("Copying file system ...") | ||
254 | shutil.copyfile(srcimg, image) | ||
255 | return image | ||
diff --git a/scripts/lib/mic/plugins/imager/raw_plugin.py b/scripts/lib/mic/plugins/imager/raw_plugin.py new file mode 100644 index 0000000000..f9625b87e8 --- /dev/null +++ b/scripts/lib/mic/plugins/imager/raw_plugin.py | |||
@@ -0,0 +1,275 @@ | |||
1 | #!/usr/bin/python -tt | ||
2 | # | ||
3 | # Copyright (c) 2011 Intel, Inc. | ||
4 | # | ||
5 | # This program is free software; you can redistribute it and/or modify it | ||
6 | # under the terms of the GNU General Public License as published by the Free | ||
7 | # Software Foundation; version 2 of the License | ||
8 | # | ||
9 | # This program is distributed in the hope that it will be useful, but | ||
10 | # WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY | ||
11 | # or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | ||
12 | # for more details. | ||
13 | # | ||
14 | # You should have received a copy of the GNU General Public License along | ||
15 | # with this program; if not, write to the Free Software Foundation, Inc., 59 | ||
16 | # Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
17 | |||
18 | import os | ||
19 | import shutil | ||
20 | import re | ||
21 | import tempfile | ||
22 | |||
23 | from mic import chroot, msger | ||
24 | from mic.utils import misc, fs_related, errors, runner, cmdln | ||
25 | from mic.conf import configmgr | ||
26 | from mic.plugin import pluginmgr | ||
27 | from mic.utils.partitionedfs import PartitionedMount | ||
28 | |||
29 | import mic.imager.raw as raw | ||
30 | |||
31 | from mic.pluginbase import ImagerPlugin | ||
32 | class RawPlugin(ImagerPlugin): | ||
33 | name = 'raw' | ||
34 | |||
35 | @classmethod | ||
36 | @cmdln.option("--compress-disk-image", dest="compress_image", type='choice', | ||
37 | choices=("gz", "bz2"), default=None, | ||
38 | help="Same with --compress-image") | ||
39 | @cmdln.option("--compress-image", dest="compress_image", type='choice', | ||
40 | choices=("gz", "bz2"), default = None, | ||
41 | help="Compress all raw images before package") | ||
42 | @cmdln.option("--generate-bmap", action="store_true", default = None, | ||
43 | help="also generate the block map file") | ||
44 | @cmdln.option("--fstab-entry", dest="fstab_entry", type='choice', | ||
45 | choices=("name", "uuid"), default="uuid", | ||
46 | help="Set fstab entry, 'name' means using device names, " | ||
47 | "'uuid' means using filesystem uuid") | ||
48 | def do_create(self, subcmd, opts, *args): | ||
49 | """${cmd_name}: create raw image | ||
50 | |||
51 | Usage: | ||
52 | ${name} ${cmd_name} <ksfile> [OPTS] | ||
53 | |||
54 | ${cmd_option_list} | ||
55 | """ | ||
56 | |||
57 | if len(args) != 1: | ||
58 | raise errors.Usage("Extra arguments given") | ||
59 | |||
60 | creatoropts = configmgr.create | ||
61 | ksconf = args[0] | ||
62 | |||
63 | if creatoropts['runtime'] == "bootstrap": | ||
64 | configmgr._ksconf = ksconf | ||
65 | rt_util.bootstrap_mic() | ||
66 | |||
67 | recording_pkgs = [] | ||
68 | if len(creatoropts['record_pkgs']) > 0: | ||
69 | recording_pkgs = creatoropts['record_pkgs'] | ||
70 | |||
71 | if creatoropts['release'] is not None: | ||
72 | if 'name' not in recording_pkgs: | ||
73 | recording_pkgs.append('name') | ||
74 | if 'vcs' not in recording_pkgs: | ||
75 | recording_pkgs.append('vcs') | ||
76 | |||
77 | configmgr._ksconf = ksconf | ||
78 | |||
79 | # Called After setting the configmgr._ksconf as the creatoropts['name'] is reset there. | ||
80 | if creatoropts['release'] is not None: | ||
81 | creatoropts['outdir'] = "%s/%s/images/%s/" % (creatoropts['outdir'], creatoropts['release'], creatoropts['name']) | ||
82 | |||
83 | # try to find the pkgmgr | ||
84 | pkgmgr = None | ||
85 | backends = pluginmgr.get_plugins('backend') | ||
86 | if 'auto' == creatoropts['pkgmgr']: | ||
87 | for key in configmgr.prefer_backends: | ||
88 | if key in backends: | ||
89 | pkgmgr = backends[key] | ||
90 | break | ||
91 | else: | ||
92 | for key in backends.keys(): | ||
93 | if key == creatoropts['pkgmgr']: | ||
94 | pkgmgr = backends[key] | ||
95 | break | ||
96 | |||
97 | if not pkgmgr: | ||
98 | raise errors.CreatorError("Can't find backend: %s, " | ||
99 | "available choices: %s" % | ||
100 | (creatoropts['pkgmgr'], | ||
101 | ','.join(backends.keys()))) | ||
102 | |||
103 | creator = raw.RawImageCreator(creatoropts, pkgmgr, opts.compress_image, | ||
104 | opts.generate_bmap, opts.fstab_entry) | ||
105 | |||
106 | if len(recording_pkgs) > 0: | ||
107 | creator._recording_pkgs = recording_pkgs | ||
108 | |||
109 | images = ["%s-%s.raw" % (creator.name, disk_name) | ||
110 | for disk_name in creator.get_disk_names()] | ||
111 | self.check_image_exists(creator.destdir, | ||
112 | creator.pack_to, | ||
113 | images, | ||
114 | creatoropts['release']) | ||
115 | |||
116 | try: | ||
117 | creator.check_depend_tools() | ||
118 | creator.mount(None, creatoropts["cachedir"]) | ||
119 | creator.install() | ||
120 | creator.configure(creatoropts["repomd"]) | ||
121 | creator.copy_kernel() | ||
122 | creator.unmount() | ||
123 | creator.generate_bmap() | ||
124 | creator.package(creatoropts["outdir"]) | ||
125 | if creatoropts['release'] is not None: | ||
126 | creator.release_output(ksconf, creatoropts['outdir'], creatoropts['release']) | ||
127 | creator.print_outimage_info() | ||
128 | |||
129 | except errors.CreatorError: | ||
130 | raise | ||
131 | finally: | ||
132 | creator.cleanup() | ||
133 | |||
134 | msger.info("Finished.") | ||
135 | return 0 | ||
136 | |||
137 | @classmethod | ||
138 | def do_chroot(cls, target, cmd=[]): | ||
139 | img = target | ||
140 | imgsize = misc.get_file_size(img) * 1024L * 1024L | ||
141 | partedcmd = fs_related.find_binary_path("parted") | ||
142 | disk = fs_related.SparseLoopbackDisk(img, imgsize) | ||
143 | imgmnt = misc.mkdtemp() | ||
144 | imgloop = PartitionedMount(imgmnt, skipformat = True) | ||
145 | imgloop.add_disk('/dev/sdb', disk) | ||
146 | img_fstype = "ext3" | ||
147 | |||
148 | msger.info("Partition Table:") | ||
149 | partnum = [] | ||
150 | for line in runner.outs([partedcmd, "-s", img, "print"]).splitlines(): | ||
151 | # no use strip to keep line output here | ||
152 | if "Number" in line: | ||
153 | msger.raw(line) | ||
154 | if line.strip() and line.strip()[0].isdigit(): | ||
155 | partnum.append(line.strip()[0]) | ||
156 | msger.raw(line) | ||
157 | |||
158 | rootpart = None | ||
159 | if len(partnum) > 1: | ||
160 | rootpart = msger.choice("please choose root partition", partnum) | ||
161 | |||
162 | # Check the partitions from raw disk. | ||
163 | # if choose root part, the mark it as mounted | ||
164 | if rootpart: | ||
165 | root_mounted = True | ||
166 | else: | ||
167 | root_mounted = False | ||
168 | partition_mounts = 0 | ||
169 | for line in runner.outs([partedcmd,"-s",img,"unit","B","print"]).splitlines(): | ||
170 | line = line.strip() | ||
171 | |||
172 | # Lines that start with number are the partitions, | ||
173 | # because parted can be translated we can't refer to any text lines. | ||
174 | if not line or not line[0].isdigit(): | ||
175 | continue | ||
176 | |||
177 | # Some vars have extra , as list seperator. | ||
178 | line = line.replace(",","") | ||
179 | |||
180 | # Example of parted output lines that are handled: | ||
181 | # Number Start End Size Type File system Flags | ||
182 | # 1 512B 3400000511B 3400000000B primary | ||
183 | # 2 3400531968B 3656384511B 255852544B primary linux-swap(v1) | ||
184 | # 3 3656384512B 3720347647B 63963136B primary fat16 boot, lba | ||
185 | |||
186 | partition_info = re.split("\s+",line) | ||
187 | |||
188 | size = partition_info[3].split("B")[0] | ||
189 | |||
190 | if len(partition_info) < 6 or partition_info[5] in ["boot"]: | ||
191 | # No filesystem can be found from partition line. Assuming | ||
192 | # btrfs, because that is the only MeeGo fs that parted does | ||
193 | # not recognize properly. | ||
194 | # TODO: Can we make better assumption? | ||
195 | fstype = "btrfs" | ||
196 | elif partition_info[5] in ["ext2","ext3","ext4","btrfs"]: | ||
197 | fstype = partition_info[5] | ||
198 | elif partition_info[5] in ["fat16","fat32"]: | ||
199 | fstype = "vfat" | ||
200 | elif "swap" in partition_info[5]: | ||
201 | fstype = "swap" | ||
202 | else: | ||
203 | raise errors.CreatorError("Could not recognize partition fs type '%s'." % partition_info[5]) | ||
204 | |||
205 | if rootpart and rootpart == line[0]: | ||
206 | mountpoint = '/' | ||
207 | elif not root_mounted and fstype in ["ext2","ext3","ext4","btrfs"]: | ||
208 | # TODO: Check that this is actually the valid root partition from /etc/fstab | ||
209 | mountpoint = "/" | ||
210 | root_mounted = True | ||
211 | elif fstype == "swap": | ||
212 | mountpoint = "swap" | ||
213 | else: | ||
214 | # TODO: Assing better mount points for the rest of the partitions. | ||
215 | partition_mounts += 1 | ||
216 | mountpoint = "/media/partition_%d" % partition_mounts | ||
217 | |||
218 | if "boot" in partition_info: | ||
219 | boot = True | ||
220 | else: | ||
221 | boot = False | ||
222 | |||
223 | msger.verbose("Size: %s Bytes, fstype: %s, mountpoint: %s, boot: %s" % (size, fstype, mountpoint, boot)) | ||
224 | # TODO: add_partition should take bytes as size parameter. | ||
225 | imgloop.add_partition((int)(size)/1024/1024, "/dev/sdb", mountpoint, fstype = fstype, boot = boot) | ||
226 | |||
227 | try: | ||
228 | imgloop.mount() | ||
229 | |||
230 | except errors.MountError: | ||
231 | imgloop.cleanup() | ||
232 | raise | ||
233 | |||
234 | try: | ||
235 | if len(cmd) != 0: | ||
236 | cmdline = ' '.join(cmd) | ||
237 | else: | ||
238 | cmdline = "/bin/bash" | ||
239 | envcmd = fs_related.find_binary_inchroot("env", imgmnt) | ||
240 | if envcmd: | ||
241 | cmdline = "%s HOME=/root %s" % (envcmd, cmdline) | ||
242 | chroot.chroot(imgmnt, None, cmdline) | ||
243 | except: | ||
244 | raise errors.CreatorError("Failed to chroot to %s." %img) | ||
245 | finally: | ||
246 | chroot.cleanup_after_chroot("img", imgloop, None, imgmnt) | ||
247 | |||
248 | @classmethod | ||
249 | def do_unpack(cls, srcimg): | ||
250 | srcimgsize = (misc.get_file_size(srcimg)) * 1024L * 1024L | ||
251 | srcmnt = misc.mkdtemp("srcmnt") | ||
252 | disk = fs_related.SparseLoopbackDisk(srcimg, srcimgsize) | ||
253 | srcloop = PartitionedMount(srcmnt, skipformat = True) | ||
254 | |||
255 | srcloop.add_disk('/dev/sdb', disk) | ||
256 | srcloop.add_partition(srcimgsize/1024/1024, "/dev/sdb", "/", "ext3", boot=False) | ||
257 | try: | ||
258 | srcloop.mount() | ||
259 | |||
260 | except errors.MountError: | ||
261 | srcloop.cleanup() | ||
262 | raise | ||
263 | |||
264 | image = os.path.join(tempfile.mkdtemp(dir = "/var/tmp", prefix = "tmp"), "target.img") | ||
265 | args = ['dd', "if=%s" % srcloop.partitions[0]['device'], "of=%s" % image] | ||
266 | |||
267 | msger.info("`dd` image ...") | ||
268 | rc = runner.show(args) | ||
269 | srcloop.cleanup() | ||
270 | shutil.rmtree(os.path.dirname(srcmnt), ignore_errors = True) | ||
271 | |||
272 | if rc != 0: | ||
273 | raise errors.CreatorError("Failed to dd") | ||
274 | else: | ||
275 | return image | ||
diff --git a/scripts/lib/mic/plugins/source/bootimg-efi.py b/scripts/lib/mic/plugins/source/bootimg-efi.py new file mode 100644 index 0000000000..2cc179a337 --- /dev/null +++ b/scripts/lib/mic/plugins/source/bootimg-efi.py | |||
@@ -0,0 +1,169 @@ | |||
1 | # ex:ts=4:sw=4:sts=4:et | ||
2 | # -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- | ||
3 | # | ||
4 | # Copyright (c) 2014, Intel Corporation. | ||
5 | # All rights reserved. | ||
6 | # | ||
7 | # This program is free software; you can redistribute it and/or modify | ||
8 | # it under the terms of the GNU General Public License version 2 as | ||
9 | # published by the Free Software Foundation. | ||
10 | # | ||
11 | # This program is distributed in the hope that it will be useful, | ||
12 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | # GNU General Public License for more details. | ||
15 | # | ||
16 | # You should have received a copy of the GNU General Public License along | ||
17 | # with this program; if not, write to the Free Software Foundation, Inc., | ||
18 | # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. | ||
19 | # | ||
20 | # DESCRIPTION | ||
21 | # This implements the 'bootimg-efi' source plugin class for 'wic' | ||
22 | # | ||
23 | # AUTHORS | ||
24 | # Tom Zanussi <tom.zanussi (at] linux.intel.com> | ||
25 | # | ||
26 | |||
27 | import os | ||
28 | import shutil | ||
29 | import re | ||
30 | import tempfile | ||
31 | |||
32 | from mic import kickstart, chroot, msger | ||
33 | from mic.utils import misc, fs_related, errors, runner, cmdln | ||
34 | from mic.conf import configmgr | ||
35 | from mic.plugin import pluginmgr | ||
36 | from mic.utils.partitionedfs import PartitionedMount | ||
37 | import mic.imager.direct as direct | ||
38 | from mic.pluginbase import SourcePlugin | ||
39 | from mic.utils.oe.misc import * | ||
40 | from mic.imager.direct import DirectImageCreator | ||
41 | |||
42 | class BootimgEFIPlugin(SourcePlugin): | ||
43 | name = 'bootimg-efi' | ||
44 | |||
45 | @classmethod | ||
46 | def do_configure_partition(self, part, cr, cr_workdir, oe_builddir, | ||
47 | bootimg_dir, kernel_dir, native_sysroot): | ||
48 | """ | ||
49 | Called before do_prepare_partition(), creates grubefi config | ||
50 | """ | ||
51 | hdddir = "%s/hdd/boot" % cr_workdir | ||
52 | rm_cmd = "rm -rf %s" % cr_workdir | ||
53 | exec_cmd(rm_cmd) | ||
54 | |||
55 | install_cmd = "install -d %s/EFI/BOOT" % hdddir | ||
56 | tmp = exec_cmd(install_cmd) | ||
57 | |||
58 | splash = os.path.join(cr_workdir, "/EFI/boot/splash.jpg") | ||
59 | if os.path.exists(splash): | ||
60 | splashline = "menu background splash.jpg" | ||
61 | else: | ||
62 | splashline = "" | ||
63 | |||
64 | (rootdev, root_part_uuid) = cr._get_boot_config() | ||
65 | options = cr.ks.handler.bootloader.appendLine | ||
66 | |||
67 | grubefi_conf = "" | ||
68 | grubefi_conf += "serial --unit=0 --speed=115200 --word=8 --parity=no --stop=1\n" | ||
69 | grubefi_conf += "default=boot\n" | ||
70 | timeout = kickstart.get_timeout(cr.ks) | ||
71 | if not timeout: | ||
72 | timeout = 0 | ||
73 | grubefi_conf += "timeout=%s\n" % timeout | ||
74 | grubefi_conf += "menuentry 'boot'{\n" | ||
75 | |||
76 | kernel = "/vmlinuz" | ||
77 | |||
78 | if cr._ptable_format == 'msdos': | ||
79 | rootstr = rootdev | ||
80 | else: | ||
81 | if not root_part_uuid: | ||
82 | raise MountError("Cannot find the root GPT partition UUID") | ||
83 | rootstr = "PARTUUID=%s" % root_part_uuid | ||
84 | |||
85 | grubefi_conf += "linux %s root=%s rootwait %s\n" \ | ||
86 | % (kernel, rootstr, options) | ||
87 | grubefi_conf += "}\n" | ||
88 | if splashline: | ||
89 | syslinux_conf += "%s\n" % splashline | ||
90 | |||
91 | msger.debug("Writing grubefi config %s/hdd/boot/EFI/BOOT/grub.cfg" \ | ||
92 | % cr_workdir) | ||
93 | cfg = open("%s/hdd/boot/EFI/BOOT/grub.cfg" % cr_workdir, "w") | ||
94 | cfg.write(grubefi_conf) | ||
95 | cfg.close() | ||
96 | |||
97 | @classmethod | ||
98 | def do_prepare_partition(self, part, cr, cr_workdir, oe_builddir, bootimg_dir, | ||
99 | kernel_dir, rootfs_dir, native_sysroot): | ||
100 | """ | ||
101 | Called to do the actual content population for a partition i.e. it | ||
102 | 'prepares' the partition to be incorporated into the image. | ||
103 | In this case, prepare content for an EFI (grub) boot partition. | ||
104 | """ | ||
105 | if not bootimg_dir: | ||
106 | bootimg_dir = get_bitbake_var("HDDDIR") | ||
107 | if not bootimg_dir: | ||
108 | msger.error("Couldn't find HDDDIR, exiting\n") | ||
109 | # just so the result notes display it | ||
110 | cr.set_bootimg_dir(bootimg_dir) | ||
111 | |||
112 | staging_kernel_dir = kernel_dir | ||
113 | staging_data_dir = bootimg_dir | ||
114 | |||
115 | hdddir = "%s/hdd" % cr_workdir | ||
116 | |||
117 | install_cmd = "install -m 0644 %s/bzImage %s/bzImage" % \ | ||
118 | (staging_kernel_dir, hdddir) | ||
119 | tmp = exec_cmd(install_cmd) | ||
120 | |||
121 | shutil.copyfile("%s/hdd/boot/EFI/BOOT/grub.cfg" % cr_workdir, | ||
122 | "%s/grub.cfg" % cr_workdir) | ||
123 | |||
124 | cp_cmd = "cp %s/EFI/BOOT/* %s/EFI/BOOT" % (staging_data_dir, hdddir) | ||
125 | exec_cmd(cp_cmd, True) | ||
126 | |||
127 | shutil.move("%s/grub.cfg" % cr_workdir, | ||
128 | "%s/hdd/boot/EFI/BOOT/grub.cfg" % cr_workdir) | ||
129 | |||
130 | du_cmd = "du -bks %s" % hdddir | ||
131 | rc, out = exec_cmd(du_cmd) | ||
132 | blocks = int(out.split()[0]) | ||
133 | |||
134 | extra_blocks = part.get_extra_block_count(blocks) | ||
135 | |||
136 | if extra_blocks < BOOTDD_EXTRA_SPACE: | ||
137 | extra_blocks = BOOTDD_EXTRA_SPACE | ||
138 | |||
139 | blocks += extra_blocks | ||
140 | |||
141 | msger.debug("Added %d extra blocks to %s to get to %d total blocks" % \ | ||
142 | (extra_blocks, part.mountpoint, blocks)) | ||
143 | |||
144 | # Ensure total sectors is an integral number of sectors per | ||
145 | # track or mcopy will complain. Sectors are 512 bytes, and we | ||
146 | # generate images with 32 sectors per track. This calculation is | ||
147 | # done in blocks, thus the mod by 16 instead of 32. | ||
148 | blocks += (16 - (blocks % 16)) | ||
149 | |||
150 | # dosfs image, created by mkdosfs | ||
151 | bootimg = "%s/boot.img" % cr_workdir | ||
152 | |||
153 | dosfs_cmd = "mkdosfs -n efi -C %s %d" % (bootimg, blocks) | ||
154 | exec_native_cmd(dosfs_cmd, native_sysroot) | ||
155 | |||
156 | mcopy_cmd = "mcopy -i %s -s %s/* ::/" % (bootimg, hdddir) | ||
157 | exec_native_cmd(mcopy_cmd, native_sysroot) | ||
158 | |||
159 | chmod_cmd = "chmod 644 %s" % bootimg | ||
160 | exec_cmd(chmod_cmd) | ||
161 | |||
162 | du_cmd = "du -Lbms %s" % bootimg | ||
163 | rc, out = exec_cmd(du_cmd) | ||
164 | bootimg_size = out.split()[0] | ||
165 | |||
166 | part.set_size(bootimg_size) | ||
167 | part.set_source_file(bootimg) | ||
168 | |||
169 | |||
diff --git a/scripts/lib/mic/plugins/source/bootimg-pcbios.py b/scripts/lib/mic/plugins/source/bootimg-pcbios.py new file mode 100644 index 0000000000..1211e5c93b --- /dev/null +++ b/scripts/lib/mic/plugins/source/bootimg-pcbios.py | |||
@@ -0,0 +1,195 @@ | |||
1 | # ex:ts=4:sw=4:sts=4:et | ||
2 | # -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- | ||
3 | # | ||
4 | # Copyright (c) 2014, Intel Corporation. | ||
5 | # All rights reserved. | ||
6 | # | ||
7 | # This program is free software; you can redistribute it and/or modify | ||
8 | # it under the terms of the GNU General Public License version 2 as | ||
9 | # published by the Free Software Foundation. | ||
10 | # | ||
11 | # This program is distributed in the hope that it will be useful, | ||
12 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | # GNU General Public License for more details. | ||
15 | # | ||
16 | # You should have received a copy of the GNU General Public License along | ||
17 | # with this program; if not, write to the Free Software Foundation, Inc., | ||
18 | # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. | ||
19 | # | ||
20 | # DESCRIPTION | ||
21 | # This implements the 'bootimg-pcbios' source plugin class for 'wic' | ||
22 | # | ||
23 | # AUTHORS | ||
24 | # Tom Zanussi <tom.zanussi (at] linux.intel.com> | ||
25 | # | ||
26 | |||
27 | import os | ||
28 | import shutil | ||
29 | import re | ||
30 | import tempfile | ||
31 | |||
32 | from mic import kickstart, chroot, msger | ||
33 | from mic.utils import misc, fs_related, errors, runner, cmdln | ||
34 | from mic.conf import configmgr | ||
35 | from mic.plugin import pluginmgr | ||
36 | from mic.utils.partitionedfs import PartitionedMount | ||
37 | import mic.imager.direct as direct | ||
38 | from mic.pluginbase import SourcePlugin | ||
39 | from mic.utils.oe.misc import * | ||
40 | from mic.imager.direct import DirectImageCreator | ||
41 | |||
42 | class BootimgPcbiosPlugin(SourcePlugin): | ||
43 | name = 'bootimg-pcbios' | ||
44 | |||
45 | @classmethod | ||
46 | def do_install_disk(self, disk, disk_name, cr, workdir, oe_builddir, | ||
47 | bootimg_dir, kernel_dir, native_sysroot): | ||
48 | """ | ||
49 | Called after all partitions have been prepared and assembled into a | ||
50 | disk image. In this case, we install the MBR. | ||
51 | """ | ||
52 | mbrfile = "%s/syslinux/" % bootimg_dir | ||
53 | if cr._ptable_format == 'gpt': | ||
54 | mbrfile += "gptmbr.bin" | ||
55 | else: | ||
56 | mbrfile += "mbr.bin" | ||
57 | |||
58 | if not os.path.exists(mbrfile): | ||
59 | msger.error("Couldn't find %s. If using the -e option, do you have the right MACHINE set in local.conf? If not, is the bootimg_dir path correct?" % mbrfile) | ||
60 | |||
61 | full_path = cr._full_path(workdir, disk_name, "direct") | ||
62 | msger.debug("Installing MBR on disk %s as %s with size %s bytes" \ | ||
63 | % (disk_name, full_path, disk['min_size'])) | ||
64 | |||
65 | rc = runner.show(['dd', 'if=%s' % mbrfile, | ||
66 | 'of=%s' % full_path, 'conv=notrunc']) | ||
67 | if rc != 0: | ||
68 | raise MountError("Unable to set MBR to %s" % full_path) | ||
69 | |||
70 | @classmethod | ||
71 | def do_configure_partition(self, part, cr, cr_workdir, oe_builddir, | ||
72 | bootimg_dir, kernel_dir, native_sysroot): | ||
73 | """ | ||
74 | Called before do_prepare_partition(), creates syslinux config | ||
75 | """ | ||
76 | hdddir = "%s/hdd/boot" % cr_workdir | ||
77 | rm_cmd = "rm -rf " + cr_workdir | ||
78 | exec_cmd(rm_cmd) | ||
79 | |||
80 | install_cmd = "install -d %s" % hdddir | ||
81 | tmp = exec_cmd(install_cmd) | ||
82 | |||
83 | splash = os.path.join(cr_workdir, "/hdd/boot/splash.jpg") | ||
84 | if os.path.exists(splash): | ||
85 | splashline = "menu background splash.jpg" | ||
86 | else: | ||
87 | splashline = "" | ||
88 | |||
89 | (rootdev, root_part_uuid) = cr._get_boot_config() | ||
90 | options = cr.ks.handler.bootloader.appendLine | ||
91 | |||
92 | syslinux_conf = "" | ||
93 | syslinux_conf += "PROMPT 0\n" | ||
94 | timeout = kickstart.get_timeout(cr.ks) | ||
95 | if not timeout: | ||
96 | timeout = 0 | ||
97 | syslinux_conf += "TIMEOUT " + str(timeout) + "\n" | ||
98 | syslinux_conf += "\n" | ||
99 | syslinux_conf += "ALLOWOPTIONS 1\n" | ||
100 | syslinux_conf += "SERIAL 0 115200\n" | ||
101 | syslinux_conf += "\n" | ||
102 | if splashline: | ||
103 | syslinux_conf += "%s\n" % splashline | ||
104 | syslinux_conf += "DEFAULT boot\n" | ||
105 | syslinux_conf += "LABEL boot\n" | ||
106 | |||
107 | kernel = "/vmlinuz" | ||
108 | syslinux_conf += "KERNEL " + kernel + "\n" | ||
109 | |||
110 | if cr._ptable_format == 'msdos': | ||
111 | rootstr = rootdev | ||
112 | else: | ||
113 | if not root_part_uuid: | ||
114 | raise MountError("Cannot find the root GPT partition UUID") | ||
115 | rootstr = "PARTUUID=%s" % root_part_uuid | ||
116 | |||
117 | syslinux_conf += "APPEND label=boot root=%s %s\n" % (rootstr, options) | ||
118 | |||
119 | msger.debug("Writing syslinux config %s/hdd/boot/syslinux.cfg" \ | ||
120 | % cr_workdir) | ||
121 | cfg = open("%s/hdd/boot/syslinux.cfg" % cr_workdir, "w") | ||
122 | cfg.write(syslinux_conf) | ||
123 | cfg.close() | ||
124 | |||
125 | @classmethod | ||
126 | def do_prepare_partition(self, part, cr, cr_workdir, oe_builddir, bootimg_dir, | ||
127 | kernel_dir, rootfs_dir, native_sysroot): | ||
128 | """ | ||
129 | Called to do the actual content population for a partition i.e. it | ||
130 | 'prepares' the partition to be incorporated into the image. | ||
131 | In this case, prepare content for legacy bios boot partition. | ||
132 | """ | ||
133 | if not bootimg_dir: | ||
134 | bootimg_dir = get_bitbake_var("STAGING_DATADIR") | ||
135 | if not bootimg_dir: | ||
136 | msger.error("Couldn't find STAGING_DATADIR, exiting\n") | ||
137 | # just so the result notes display it | ||
138 | cr.set_bootimg_dir(bootimg_dir) | ||
139 | |||
140 | staging_kernel_dir = kernel_dir | ||
141 | staging_data_dir = bootimg_dir | ||
142 | |||
143 | hdddir = "%s/hdd/boot" % cr_workdir | ||
144 | |||
145 | install_cmd = "install -m 0644 %s/bzImage %s/vmlinuz" \ | ||
146 | % (staging_kernel_dir, hdddir) | ||
147 | tmp = exec_cmd(install_cmd) | ||
148 | |||
149 | install_cmd = "install -m 444 %s/syslinux/ldlinux.sys %s/ldlinux.sys" \ | ||
150 | % (staging_data_dir, hdddir) | ||
151 | tmp = exec_cmd(install_cmd) | ||
152 | |||
153 | du_cmd = "du -bks %s" % hdddir | ||
154 | rc, out = exec_cmd(du_cmd) | ||
155 | blocks = int(out.split()[0]) | ||
156 | |||
157 | extra_blocks = part.get_extra_block_count(blocks) | ||
158 | |||
159 | if extra_blocks < BOOTDD_EXTRA_SPACE: | ||
160 | extra_blocks = BOOTDD_EXTRA_SPACE | ||
161 | |||
162 | blocks += extra_blocks | ||
163 | |||
164 | msger.debug("Added %d extra blocks to %s to get to %d total blocks" % \ | ||
165 | (extra_blocks, part.mountpoint, blocks)) | ||
166 | |||
167 | # Ensure total sectors is an integral number of sectors per | ||
168 | # track or mcopy will complain. Sectors are 512 bytes, and we | ||
169 | # generate images with 32 sectors per track. This calculation is | ||
170 | # done in blocks, thus the mod by 16 instead of 32. | ||
171 | blocks += (16 - (blocks % 16)) | ||
172 | |||
173 | # dosfs image, created by mkdosfs | ||
174 | bootimg = "%s/boot.img" % cr_workdir | ||
175 | |||
176 | dosfs_cmd = "mkdosfs -n boot -S 512 -C %s %d" % (bootimg, blocks) | ||
177 | exec_native_cmd(dosfs_cmd, native_sysroot) | ||
178 | |||
179 | mcopy_cmd = "mcopy -i %s -s %s/* ::/" % (bootimg, hdddir) | ||
180 | exec_native_cmd(mcopy_cmd, native_sysroot) | ||
181 | |||
182 | syslinux_cmd = "syslinux %s" % bootimg | ||
183 | exec_native_cmd(syslinux_cmd, native_sysroot) | ||
184 | |||
185 | chmod_cmd = "chmod 644 %s" % bootimg | ||
186 | exec_cmd(chmod_cmd) | ||
187 | |||
188 | du_cmd = "du -Lbms %s" % bootimg | ||
189 | rc, out = exec_cmd(du_cmd) | ||
190 | bootimg_size = out.split()[0] | ||
191 | |||
192 | part.set_size(bootimg_size) | ||
193 | part.set_source_file(bootimg) | ||
194 | |||
195 | |||
diff --git a/scripts/lib/mic/plugins/source/rootfs.py b/scripts/lib/mic/plugins/source/rootfs.py new file mode 100644 index 0000000000..75999e03d2 --- /dev/null +++ b/scripts/lib/mic/plugins/source/rootfs.py | |||
@@ -0,0 +1,71 @@ | |||
1 | # ex:ts=4:sw=4:sts=4:et | ||
2 | # -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- | ||
3 | # | ||
4 | # Copyright (c) 2014, Intel Corporation. | ||
5 | # All rights reserved. | ||
6 | # | ||
7 | # This program is free software; you can redistribute it and/or modify | ||
8 | # it under the terms of the GNU General Public License version 2 as | ||
9 | # published by the Free Software Foundation. | ||
10 | # | ||
11 | # This program is distributed in the hope that it will be useful, | ||
12 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | # GNU General Public License for more details. | ||
15 | # | ||
16 | # You should have received a copy of the GNU General Public License along | ||
17 | # with this program; if not, write to the Free Software Foundation, Inc., | ||
18 | # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. | ||
19 | # | ||
20 | # DESCRIPTION | ||
21 | # This implements the 'rootfs' source plugin class for 'wic' | ||
22 | # | ||
23 | # AUTHORS | ||
24 | # Tom Zanussi <tom.zanussi (at] linux.intel.com> | ||
25 | # Joao Henrique Ferreira de Freitas <joaohf (at] gmail.com> | ||
26 | # | ||
27 | |||
28 | import os | ||
29 | import shutil | ||
30 | import re | ||
31 | import tempfile | ||
32 | |||
33 | from mic import kickstart, chroot, msger | ||
34 | from mic.utils import misc, fs_related, errors, runner, cmdln | ||
35 | from mic.conf import configmgr | ||
36 | from mic.plugin import pluginmgr | ||
37 | from mic.utils.partitionedfs import PartitionedMount | ||
38 | import mic.imager.direct as direct | ||
39 | from mic.pluginbase import SourcePlugin | ||
40 | from mic.utils.oe.misc import * | ||
41 | from mic.imager.direct import DirectImageCreator | ||
42 | |||
43 | class RootfsPlugin(SourcePlugin): | ||
44 | name = 'rootfs' | ||
45 | |||
46 | @classmethod | ||
47 | def do_prepare_partition(self, part, cr, cr_workdir, oe_builddir, bootimg_dir, | ||
48 | kernel_dir, krootfs_dir, native_sysroot): | ||
49 | """ | ||
50 | Called to do the actual content population for a partition i.e. it | ||
51 | 'prepares' the partition to be incorporated into the image. | ||
52 | In this case, prepare content for legacy bios boot partition. | ||
53 | """ | ||
54 | if part.rootfs is None: | ||
55 | if not 'ROOTFS_DIR' in krootfs_dir: | ||
56 | msg = "Couldn't find --rootfs-dir, exiting" | ||
57 | msger.error(msg) | ||
58 | rootfs_dir = krootfs_dir['ROOTFS_DIR'] | ||
59 | else: | ||
60 | if part.rootfs in krootfs_dir: | ||
61 | rootfs_dir = krootfs_dir[part.rootfs] | ||
62 | elif os.path.isdir(part.rootfs): | ||
63 | rootfs_dir = part.rootfs | ||
64 | else: | ||
65 | msg = "Couldn't find --rootfs-dir=%s connection" | ||
66 | msg += " or it is not a valid path, exiting" | ||
67 | msger.error(msg % part.rootfs) | ||
68 | |||
69 | part.set_rootfs(rootfs_dir) | ||
70 | part.prepare_rootfs(cr_workdir, oe_builddir, rootfs_dir, native_sysroot) | ||
71 | |||
diff --git a/scripts/lib/mic/plugins/source/uboot.py b/scripts/lib/mic/plugins/source/uboot.py new file mode 100644 index 0000000000..57cb3cf8fe --- /dev/null +++ b/scripts/lib/mic/plugins/source/uboot.py | |||
@@ -0,0 +1,173 @@ | |||
1 | # ex:ts=4:sw=4:sts=4:et | ||
2 | # -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- | ||
3 | # | ||
4 | # Copyright (c) 2014, Enea AB. | ||
5 | # All rights reserved. | ||
6 | # | ||
7 | # This program is free software; you can redistribute it and/or modify | ||
8 | # it under the terms of the GNU General Public License version 2 as | ||
9 | # published by the Free Software Foundation. | ||
10 | # | ||
11 | # This program is distributed in the hope that it will be useful, | ||
12 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | # GNU General Public License for more details. | ||
15 | # | ||
16 | # You should have received a copy of the GNU General Public License along | ||
17 | # with this program; if not, write to the Free Software Foundation, Inc., | ||
18 | # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. | ||
19 | # | ||
20 | # DESCRIPTION | ||
21 | # This implements the 'uboot' source plugin class for 'wic' | ||
22 | # | ||
23 | # AUTHORS | ||
24 | # Adrian Calianu <adrian.calianu (at] enea.com> | ||
25 | # | ||
26 | |||
27 | import os | ||
28 | import shutil | ||
29 | import re | ||
30 | import tempfile | ||
31 | |||
32 | from mic import kickstart, chroot, msger | ||
33 | from mic.utils import misc, fs_related, errors, runner, cmdln | ||
34 | from mic.conf import configmgr | ||
35 | from mic.plugin import pluginmgr | ||
36 | from mic.utils.partitionedfs import PartitionedMount | ||
37 | import mic.imager.direct as direct | ||
38 | from mic.pluginbase import SourcePlugin | ||
39 | from mic.utils.oe.misc import * | ||
40 | from mic.imager.direct import DirectImageCreator | ||
41 | |||
42 | def create_local_rootfs(part, creator, cr_workdir, krootfs_dir, native_sysroot): | ||
43 | # In order to have a full control over rootfs we will make a local copy under workdir | ||
44 | # and change rootfs_dir to new location. | ||
45 | # In this way we can install more than one ROOTFS_DIRs and/or use | ||
46 | # an empty rootfs to install packages, so a rootfs could be generated only from pkgs | ||
47 | # TBD: create workdir/rootfs ; copy rootfs-> workdir/rootfs; set rootfs=workdir/rootfs | ||
48 | |||
49 | cr_workdir = os.path.abspath(cr_workdir) | ||
50 | new_rootfs_dir = "%s/rootfs_%s" % (cr_workdir, creator.name) | ||
51 | |||
52 | rootfs_exists = 1 | ||
53 | if part.rootfs is None: | ||
54 | if not 'ROOTFS_DIR' in krootfs_dir: | ||
55 | msg = "Couldn't find --rootfs-dir, exiting, " | ||
56 | msger.info(msg) | ||
57 | rootfs_exists = 0 | ||
58 | rootfs_dir = krootfs_dir['ROOTFS_DIR'] | ||
59 | creator.rootfs_dir['ROOTFS_DIR'] = new_rootfs_dir | ||
60 | else: | ||
61 | if part.rootfs in krootfs_dir: | ||
62 | rootfs_dir = krootfs_dir[part.rootfs] | ||
63 | creator.rootfs_dir[part.rootfs] = new_rootfs_dir | ||
64 | elif os.path.isdir(part.rootfs): | ||
65 | rootfs_dir = part.rootfs | ||
66 | part.rootfs = new_rootfs_dir | ||
67 | else: | ||
68 | msg = "Couldn't find --rootfs-dir=%s connection" | ||
69 | msg += " or it is not a valid path, exiting" | ||
70 | msger.info(msg % part.rootfs) | ||
71 | rootfs_exists = 0 | ||
72 | creator.rootfs_dir['ROOTFS_DIR'] = new_rootfs_dir | ||
73 | |||
74 | pseudox = "export PSEUDO_PREFIX=%s/usr;" % native_sysroot | ||
75 | pseudox += "export PSEUDO_LOCALSTATEDIR=%s/../pseudo;" % new_rootfs_dir | ||
76 | pseudox += "export PSEUDO_PASSWD=%s;" % new_rootfs_dir | ||
77 | pseudox += "export PSEUDO_NOSYMLINKEXP=1;" | ||
78 | pseudox += "%s/usr/bin/pseudo " % native_sysroot | ||
79 | |||
80 | mkdir_cmd = "mkdir %s" % (new_rootfs_dir) | ||
81 | # rc, out = exec_native_cmd(pseudox + mkdir_cmd, native_sysroot) | ||
82 | rc, out = exec_cmd(mkdir_cmd, True) | ||
83 | |||
84 | if rootfs_exists == 1 and os.path.isdir(rootfs_dir): | ||
85 | defpath = os.environ['PATH'] | ||
86 | os.environ['PATH'] = native_sysroot + "/usr/bin/" + ":/bin:/usr/bin:" | ||
87 | |||
88 | rootfs_dir = os.path.abspath(rootfs_dir) | ||
89 | |||
90 | pseudoc = "export PSEUDO_PREFIX=%s/usr;" % native_sysroot | ||
91 | pseudoc += "export PSEUDO_LOCALSTATEDIR=%s/../pseudo;" % rootfs_dir | ||
92 | pseudoc += "export PSEUDO_PASSWD=%s;" % rootfs_dir | ||
93 | pseudoc += "export PSEUDO_NOSYMLINKEXP=1;" | ||
94 | pseudoc += "%s/usr/bin/pseudo " % native_sysroot | ||
95 | |||
96 | tarc_cmd = "tar cvpf %s/rootfs.tar -C %s ." % (cr_workdir, rootfs_dir) | ||
97 | rc, out = exec_native_cmd(pseudoc + tarc_cmd, native_sysroot) | ||
98 | |||
99 | tarx_cmd = "tar xpvf %s/rootfs.tar -C %s" % (cr_workdir, new_rootfs_dir) | ||
100 | rc, out = exec_native_cmd(pseudox + tarx_cmd, native_sysroot) | ||
101 | |||
102 | rm_cmd = "rm %s/rootfs.tar" % cr_workdir | ||
103 | rc, out = exec_cmd(rm_cmd, True) | ||
104 | |||
105 | os.environ['PATH'] += defpath + ":" + native_sysroot + "/usr/bin/" | ||
106 | |||
107 | return new_rootfs_dir | ||
108 | |||
109 | class UBootPlugin(SourcePlugin): | ||
110 | name = 'uboot' | ||
111 | |||
112 | @classmethod | ||
113 | def do_install_pkgs(self, part, creator, cr_workdir, oe_builddir, krootfs_dir, | ||
114 | bootimg_dir, kernel_dir, native_sysroot): | ||
115 | """ | ||
116 | Called before all partitions have been prepared and assembled into a | ||
117 | disk image. Intall packages based on wic configuration. | ||
118 | """ | ||
119 | |||
120 | # set new rootfs_dir | ||
121 | rootfs_dir = create_local_rootfs(part, creator, cr_workdir, krootfs_dir, native_sysroot) | ||
122 | |||
123 | # wks file parsing | ||
124 | packages = kickstart.get_packages(creator.ks) | ||
125 | |||
126 | # wic.conf file parsing = found under 'creator' | ||
127 | local_pkgs_path = creator._local_pkgs_path | ||
128 | repourl = creator.repourl | ||
129 | pkgmgr = creator.pkgmgr_name | ||
130 | |||
131 | # install packages | ||
132 | if packages and pkgmgr in ["opkg"]: | ||
133 | if len(repourl) > 0 : | ||
134 | part.install_pkgs_ipk(cr_workdir, oe_builddir, rootfs_dir, native_sysroot, | ||
135 | packages, repourl) | ||
136 | else: | ||
137 | msger.error("No packages repository provided in wic.conf") | ||
138 | |||
139 | @classmethod | ||
140 | def do_prepare_partition(self, part, cr, cr_workdir, oe_builddir, bootimg_dir, | ||
141 | kernel_dir, krootfs_dir, native_sysroot): | ||
142 | """ | ||
143 | Called to do the actual content population for a partition i.e. it | ||
144 | 'prepares' the partition to be incorporated into the image. | ||
145 | In this case, prepare content for legacy bios boot partition. | ||
146 | """ | ||
147 | if part.rootfs is None: | ||
148 | if not 'ROOTFS_DIR' in krootfs_dir: | ||
149 | msg = "Couldn't find --rootfs-dir, exiting" | ||
150 | msger.error(msg) | ||
151 | rootfs_dir = krootfs_dir['ROOTFS_DIR'] | ||
152 | else: | ||
153 | if part.rootfs in krootfs_dir: | ||
154 | rootfs_dir = krootfs_dir[part.rootfs] | ||
155 | elif os.path.isdir(part.rootfs): | ||
156 | rootfs_dir = part.rootfs | ||
157 | else: | ||
158 | msg = "Couldn't find --rootfs-dir=%s connection" | ||
159 | msg += " or it is not a valid path, exiting" | ||
160 | msger.error(msg % part.rootfs) | ||
161 | |||
162 | part.set_rootfs(rootfs_dir) | ||
163 | |||
164 | # change partition label wich will reflect into the final rootfs image name | ||
165 | part.label = "%s_%s" % (part.label, cr.name) | ||
166 | |||
167 | defpath = os.environ['PATH'] | ||
168 | os.environ['PATH'] = native_sysroot + "/usr/bin/" + ":/bin:/usr/bin:" | ||
169 | |||
170 | part.prepare_rootfs(cr_workdir, oe_builddir, rootfs_dir, native_sysroot) | ||
171 | part.prepare_for_uboot(cr.target_arch,cr_workdir, oe_builddir, rootfs_dir, native_sysroot) | ||
172 | |||
173 | os.environ['PATH'] += defpath + ":" + native_sysroot + "/usr/bin/" | ||
diff --git a/scripts/lib/mic/rt_util.py b/scripts/lib/mic/rt_util.py new file mode 100644 index 0000000000..2a31f4a218 --- /dev/null +++ b/scripts/lib/mic/rt_util.py | |||
@@ -0,0 +1,223 @@ | |||
1 | #!/usr/bin/python -tt | ||
2 | # | ||
3 | # Copyright (c) 2009, 2010, 2011 Intel, Inc. | ||
4 | # | ||
5 | # This program is free software; you can redistribute it and/or modify it | ||
6 | # under the terms of the GNU General Public License as published by the Free | ||
7 | # Software Foundation; version 2 of the License | ||
8 | # | ||
9 | # This program is distributed in the hope that it will be useful, but | ||
10 | # WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY | ||
11 | # or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | ||
12 | # for more details. | ||
13 | # | ||
14 | # You should have received a copy of the GNU General Public License along | ||
15 | # with this program; if not, write to the Free Software Foundation, Inc., 59 | ||
16 | # Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
17 | |||
18 | from __future__ import with_statement | ||
19 | import os | ||
20 | import sys | ||
21 | import glob | ||
22 | import re | ||
23 | import shutil | ||
24 | import subprocess | ||
25 | |||
26 | from mic import bootstrap, msger | ||
27 | from mic.conf import configmgr | ||
28 | from mic.utils import errors, proxy | ||
29 | from mic.utils.fs_related import find_binary_path, makedirs | ||
30 | from mic.chroot import setup_chrootenv, cleanup_chrootenv | ||
31 | |||
32 | expath = lambda p: os.path.abspath(os.path.expanduser(p)) | ||
33 | |||
34 | def bootstrap_mic(argv=None): | ||
35 | |||
36 | |||
37 | def mychroot(): | ||
38 | os.chroot(rootdir) | ||
39 | os.chdir(cwd) | ||
40 | |||
41 | # by default, sys.argv is used to run mic in bootstrap | ||
42 | if not argv: | ||
43 | argv = sys.argv | ||
44 | if argv[0] not in ('/usr/bin/mic', 'mic'): | ||
45 | argv[0] = '/usr/bin/mic' | ||
46 | |||
47 | cropts = configmgr.create | ||
48 | bsopts = configmgr.bootstrap | ||
49 | distro = bsopts['distro_name'].lower() | ||
50 | |||
51 | rootdir = bsopts['rootdir'] | ||
52 | pkglist = bsopts['packages'] | ||
53 | cwd = os.getcwd() | ||
54 | |||
55 | # create bootstrap and run mic in bootstrap | ||
56 | bsenv = bootstrap.Bootstrap(rootdir, distro, cropts['arch']) | ||
57 | bsenv.logfile = cropts['logfile'] | ||
58 | # rootdir is regenerated as a temp dir | ||
59 | rootdir = bsenv.rootdir | ||
60 | |||
61 | if 'optional' in bsopts: | ||
62 | optlist = bsopts['optional'] | ||
63 | else: | ||
64 | optlist = [] | ||
65 | |||
66 | try: | ||
67 | msger.info("Creating %s bootstrap ..." % distro) | ||
68 | bsenv.create(cropts['repomd'], pkglist, optlist) | ||
69 | |||
70 | # bootstrap is relocated under "bootstrap" | ||
71 | if os.path.exists(os.path.join(rootdir, "bootstrap")): | ||
72 | rootdir = os.path.join(rootdir, "bootstrap") | ||
73 | |||
74 | bsenv.dirsetup(rootdir) | ||
75 | sync_mic(rootdir) | ||
76 | |||
77 | #FIXME: sync the ks file to bootstrap | ||
78 | if "/" == os.path.dirname(os.path.abspath(configmgr._ksconf)): | ||
79 | safecopy(configmgr._ksconf, rootdir) | ||
80 | |||
81 | msger.info("Start mic in bootstrap: %s\n" % rootdir) | ||
82 | bindmounts = get_bindmounts(cropts) | ||
83 | ret = bsenv.run(argv, cwd, rootdir, bindmounts) | ||
84 | |||
85 | except errors.BootstrapError, err: | ||
86 | msger.warning('\n%s' % err) | ||
87 | if msger.ask("Switch to native mode and continue?"): | ||
88 | return | ||
89 | raise | ||
90 | except RuntimeError, err: | ||
91 | #change exception type but keep the trace back | ||
92 | value, tb = sys.exc_info()[1:] | ||
93 | raise errors.BootstrapError, value, tb | ||
94 | else: | ||
95 | sys.exit(ret) | ||
96 | finally: | ||
97 | bsenv.cleanup() | ||
98 | |||
99 | def get_bindmounts(cropts): | ||
100 | binddirs = [ | ||
101 | os.getcwd(), | ||
102 | cropts['tmpdir'], | ||
103 | cropts['cachedir'], | ||
104 | cropts['outdir'], | ||
105 | cropts['local_pkgs_path'], | ||
106 | ] | ||
107 | bindfiles = [ | ||
108 | cropts['logfile'], | ||
109 | configmgr._ksconf, | ||
110 | ] | ||
111 | |||
112 | for lrepo in cropts['localrepos']: | ||
113 | binddirs.append(lrepo) | ||
114 | |||
115 | bindlist = map(expath, filter(None, binddirs)) | ||
116 | bindlist += map(os.path.dirname, map(expath, filter(None, bindfiles))) | ||
117 | bindlist = sorted(set(bindlist)) | ||
118 | bindmounts = ';'.join(bindlist) | ||
119 | return bindmounts | ||
120 | |||
121 | |||
122 | def get_mic_binpath(): | ||
123 | fp = None | ||
124 | try: | ||
125 | import pkg_resources # depends on 'setuptools' | ||
126 | except ImportError: | ||
127 | pass | ||
128 | else: | ||
129 | dist = pkg_resources.get_distribution('mic') | ||
130 | # the real script is under EGG_INFO/scripts | ||
131 | if dist.has_metadata('scripts/mic'): | ||
132 | fp = os.path.join(dist.egg_info, "scripts/mic") | ||
133 | |||
134 | if fp: | ||
135 | return fp | ||
136 | |||
137 | # not found script if 'flat' egg installed | ||
138 | try: | ||
139 | return find_binary_path('mic') | ||
140 | except errors.CreatorError: | ||
141 | raise errors.BootstrapError("Can't find mic binary in host OS") | ||
142 | |||
143 | |||
144 | def get_mic_modpath(): | ||
145 | try: | ||
146 | import mic | ||
147 | except ImportError: | ||
148 | raise errors.BootstrapError("Can't find mic module in host OS") | ||
149 | path = os.path.abspath(mic.__file__) | ||
150 | return os.path.dirname(path) | ||
151 | |||
152 | def get_mic_libpath(): | ||
153 | # TBD: so far mic lib path is hard coded | ||
154 | return "/usr/lib/mic" | ||
155 | |||
156 | # the hard code path is prepared for bootstrap | ||
157 | def sync_mic(bootstrap, binpth = '/usr/bin/mic', | ||
158 | libpth='/usr/lib', | ||
159 | pylib = '/usr/lib/python2.7/site-packages', | ||
160 | conf = '/etc/mic/mic.conf'): | ||
161 | _path = lambda p: os.path.join(bootstrap, p.lstrip('/')) | ||
162 | |||
163 | micpaths = { | ||
164 | 'binpth': get_mic_binpath(), | ||
165 | 'libpth': get_mic_libpath(), | ||
166 | 'pylib': get_mic_modpath(), | ||
167 | 'conf': '/etc/mic/mic.conf', | ||
168 | } | ||
169 | |||
170 | if not os.path.exists(_path(pylib)): | ||
171 | pyptn = '/usr/lib/python?.?/site-packages' | ||
172 | pylibs = glob.glob(_path(pyptn)) | ||
173 | if pylibs: | ||
174 | pylib = pylibs[0].replace(bootstrap, '') | ||
175 | else: | ||
176 | raise errors.BootstrapError("Can't find python site dir in: %s" % | ||
177 | bootstrap) | ||
178 | |||
179 | for key, value in micpaths.items(): | ||
180 | try: | ||
181 | safecopy(value, _path(eval(key)), False, ["*.pyc", "*.pyo"]) | ||
182 | except (OSError, IOError), err: | ||
183 | raise errors.BootstrapError(err) | ||
184 | |||
185 | # auto select backend | ||
186 | conf_str = file(_path(conf)).read() | ||
187 | conf_str = re.sub("pkgmgr\s*=\s*.*", "pkgmgr=auto", conf_str) | ||
188 | with open(_path(conf), 'w') as wf: | ||
189 | wf.write(conf_str) | ||
190 | |||
191 | # chmod +x /usr/bin/mic | ||
192 | os.chmod(_path(binpth), 0777) | ||
193 | |||
194 | # correct python interpreter | ||
195 | mic_cont = file(_path(binpth)).read() | ||
196 | mic_cont = "#!/usr/bin/python\n" + mic_cont | ||
197 | with open(_path(binpth), 'w') as wf: | ||
198 | wf.write(mic_cont) | ||
199 | |||
200 | |||
201 | def safecopy(src, dst, symlinks=False, ignore_ptns=()): | ||
202 | if os.path.isdir(src): | ||
203 | if os.path.isdir(dst): | ||
204 | dst = os.path.join(dst, os.path.basename(src)) | ||
205 | if os.path.exists(dst): | ||
206 | shutil.rmtree(dst, ignore_errors=True) | ||
207 | |||
208 | src = src.rstrip('/') | ||
209 | # check common prefix to ignore copying itself | ||
210 | if dst.startswith(src + '/'): | ||
211 | ignore_ptns = list(ignore_ptns) + [ os.path.basename(src) ] | ||
212 | |||
213 | ignores = shutil.ignore_patterns(*ignore_ptns) | ||
214 | try: | ||
215 | shutil.copytree(src, dst, symlinks, ignores) | ||
216 | except (OSError, IOError): | ||
217 | shutil.rmtree(dst, ignore_errors=True) | ||
218 | raise | ||
219 | else: | ||
220 | if not os.path.isdir(dst): | ||
221 | makedirs(os.path.dirname(dst)) | ||
222 | |||
223 | shutil.copy2(src, dst) | ||
diff --git a/scripts/lib/mic/test b/scripts/lib/mic/test new file mode 100644 index 0000000000..9daeafb986 --- /dev/null +++ b/scripts/lib/mic/test | |||
@@ -0,0 +1 @@ | |||
test | |||
diff --git a/scripts/lib/mic/utils/BmapCreate.py b/scripts/lib/mic/utils/BmapCreate.py new file mode 100644 index 0000000000..65b19a5f46 --- /dev/null +++ b/scripts/lib/mic/utils/BmapCreate.py | |||
@@ -0,0 +1,298 @@ | |||
1 | """ This module implements the block map (bmap) creation functionality and | ||
2 | provides the corresponding API in form of the 'BmapCreate' class. | ||
3 | |||
4 | The idea is that while images files may generally be very large (e.g., 4GiB), | ||
5 | they may nevertheless contain only little real data, e.g., 512MiB. This data | ||
6 | are files, directories, file-system meta-data, partition table, etc. When | ||
7 | copying the image to the target device, you do not have to copy all the 4GiB of | ||
8 | data, you can copy only 512MiB of it, which is 4 times less, so copying should | ||
9 | presumably be 4 times faster. | ||
10 | |||
11 | The block map file is an XML file which contains a list of blocks which have to | ||
12 | be copied to the target device. The other blocks are not used and there is no | ||
13 | need to copy them. The XML file also contains some additional information like | ||
14 | block size, image size, count of mapped blocks, etc. There are also many | ||
15 | commentaries, so it is human-readable. | ||
16 | |||
17 | The image has to be a sparse file. Generally, this means that when you generate | ||
18 | this image file, you should start with a huge sparse file which contains a | ||
19 | single hole spanning the entire file. Then you should partition it, write all | ||
20 | the data (probably by means of loop-back mounting the image or parts of it), | ||
21 | etc. The end result should be a sparse file where mapped areas represent useful | ||
22 | parts of the image and holes represent useless parts of the image, which do not | ||
23 | have to be copied when copying the image to the target device. | ||
24 | |||
25 | This module uses the FIBMAP ioctl to detect holes. """ | ||
26 | |||
27 | # Disable the following pylint recommendations: | ||
28 | # * Too many instance attributes - R0902 | ||
29 | # * Too few public methods - R0903 | ||
30 | # pylint: disable=R0902,R0903 | ||
31 | |||
32 | import hashlib | ||
33 | from mic.utils.misc import human_size | ||
34 | from mic.utils import Fiemap | ||
35 | |||
36 | # The bmap format version we generate | ||
37 | SUPPORTED_BMAP_VERSION = "1.3" | ||
38 | |||
39 | _BMAP_START_TEMPLATE = \ | ||
40 | """<?xml version="1.0" ?> | ||
41 | <!-- This file contains the block map for an image file, which is basically | ||
42 | a list of useful (mapped) block numbers in the image file. In other words, | ||
43 | it lists only those blocks which contain data (boot sector, partition | ||
44 | table, file-system metadata, files, directories, extents, etc). These | ||
45 | blocks have to be copied to the target device. The other blocks do not | ||
46 | contain any useful data and do not have to be copied to the target | ||
47 | device. | ||
48 | |||
49 | The block map an optimization which allows to copy or flash the image to | ||
50 | the image quicker than copying of flashing the entire image. This is | ||
51 | because with bmap less data is copied: <MappedBlocksCount> blocks instead | ||
52 | of <BlocksCount> blocks. | ||
53 | |||
54 | Besides the machine-readable data, this file contains useful commentaries | ||
55 | which contain human-readable information like image size, percentage of | ||
56 | mapped data, etc. | ||
57 | |||
58 | The 'version' attribute is the block map file format version in the | ||
59 | 'major.minor' format. The version major number is increased whenever an | ||
60 | incompatible block map format change is made. The minor number changes | ||
61 | in case of minor backward-compatible changes. --> | ||
62 | |||
63 | <bmap version="%s"> | ||
64 | <!-- Image size in bytes: %s --> | ||
65 | <ImageSize> %u </ImageSize> | ||
66 | |||
67 | <!-- Size of a block in bytes --> | ||
68 | <BlockSize> %u </BlockSize> | ||
69 | |||
70 | <!-- Count of blocks in the image file --> | ||
71 | <BlocksCount> %u </BlocksCount> | ||
72 | |||
73 | """ | ||
74 | |||
75 | class Error(Exception): | ||
76 | """ A class for exceptions generated by this module. We currently support | ||
77 | only one type of exceptions, and we basically throw human-readable problem | ||
78 | description in case of errors. """ | ||
79 | pass | ||
80 | |||
81 | class BmapCreate: | ||
82 | """ This class implements the bmap creation functionality. To generate a | ||
83 | bmap for an image (which is supposedly a sparse file), you should first | ||
84 | create an instance of 'BmapCreate' and provide: | ||
85 | |||
86 | * full path or a file-like object of the image to create bmap for | ||
87 | * full path or a file object to use for writing the results to | ||
88 | |||
89 | Then you should invoke the 'generate()' method of this class. It will use | ||
90 | the FIEMAP ioctl to generate the bmap. """ | ||
91 | |||
92 | def _open_image_file(self): | ||
93 | """ Open the image file. """ | ||
94 | |||
95 | try: | ||
96 | self._f_image = open(self._image_path, 'rb') | ||
97 | except IOError as err: | ||
98 | raise Error("cannot open image file '%s': %s" \ | ||
99 | % (self._image_path, err)) | ||
100 | |||
101 | self._f_image_needs_close = True | ||
102 | |||
103 | def _open_bmap_file(self): | ||
104 | """ Open the bmap file. """ | ||
105 | |||
106 | try: | ||
107 | self._f_bmap = open(self._bmap_path, 'w+') | ||
108 | except IOError as err: | ||
109 | raise Error("cannot open bmap file '%s': %s" \ | ||
110 | % (self._bmap_path, err)) | ||
111 | |||
112 | self._f_bmap_needs_close = True | ||
113 | |||
114 | def __init__(self, image, bmap): | ||
115 | """ Initialize a class instance: | ||
116 | * image - full path or a file-like object of the image to create bmap | ||
117 | for | ||
118 | * bmap - full path or a file object to use for writing the resulting | ||
119 | bmap to """ | ||
120 | |||
121 | self.image_size = None | ||
122 | self.image_size_human = None | ||
123 | self.block_size = None | ||
124 | self.blocks_cnt = None | ||
125 | self.mapped_cnt = None | ||
126 | self.mapped_size = None | ||
127 | self.mapped_size_human = None | ||
128 | self.mapped_percent = None | ||
129 | |||
130 | self._mapped_count_pos1 = None | ||
131 | self._mapped_count_pos2 = None | ||
132 | self._sha1_pos = None | ||
133 | |||
134 | self._f_image_needs_close = False | ||
135 | self._f_bmap_needs_close = False | ||
136 | |||
137 | if hasattr(image, "read"): | ||
138 | self._f_image = image | ||
139 | self._image_path = image.name | ||
140 | else: | ||
141 | self._image_path = image | ||
142 | self._open_image_file() | ||
143 | |||
144 | if hasattr(bmap, "read"): | ||
145 | self._f_bmap = bmap | ||
146 | self._bmap_path = bmap.name | ||
147 | else: | ||
148 | self._bmap_path = bmap | ||
149 | self._open_bmap_file() | ||
150 | |||
151 | self.fiemap = Fiemap.Fiemap(self._f_image) | ||
152 | |||
153 | self.image_size = self.fiemap.image_size | ||
154 | self.image_size_human = human_size(self.image_size) | ||
155 | if self.image_size == 0: | ||
156 | raise Error("cannot generate bmap for zero-sized image file '%s'" \ | ||
157 | % self._image_path) | ||
158 | |||
159 | self.block_size = self.fiemap.block_size | ||
160 | self.blocks_cnt = self.fiemap.blocks_cnt | ||
161 | |||
162 | def _bmap_file_start(self): | ||
163 | """ A helper function which generates the starting contents of the | ||
164 | block map file: the header comment, image size, block size, etc. """ | ||
165 | |||
166 | # We do not know the amount of mapped blocks at the moment, so just put | ||
167 | # whitespaces instead of real numbers. Assume the longest possible | ||
168 | # numbers. | ||
169 | mapped_count = ' ' * len(str(self.image_size)) | ||
170 | mapped_size_human = ' ' * len(self.image_size_human) | ||
171 | |||
172 | xml = _BMAP_START_TEMPLATE \ | ||
173 | % (SUPPORTED_BMAP_VERSION, self.image_size_human, | ||
174 | self.image_size, self.block_size, self.blocks_cnt) | ||
175 | xml += " <!-- Count of mapped blocks: " | ||
176 | |||
177 | self._f_bmap.write(xml) | ||
178 | self._mapped_count_pos1 = self._f_bmap.tell() | ||
179 | |||
180 | # Just put white-spaces instead of real information about mapped blocks | ||
181 | xml = "%s or %.1f -->\n" % (mapped_size_human, 100.0) | ||
182 | xml += " <MappedBlocksCount> " | ||
183 | |||
184 | self._f_bmap.write(xml) | ||
185 | self._mapped_count_pos2 = self._f_bmap.tell() | ||
186 | |||
187 | xml = "%s </MappedBlocksCount>\n\n" % mapped_count | ||
188 | |||
189 | # pylint: disable=C0301 | ||
190 | xml += " <!-- The checksum of this bmap file. When it is calculated, the value of\n" | ||
191 | xml += " the SHA1 checksum has be zeoro (40 ASCII \"0\" symbols). -->\n" | ||
192 | xml += " <BmapFileSHA1> " | ||
193 | |||
194 | self._f_bmap.write(xml) | ||
195 | self._sha1_pos = self._f_bmap.tell() | ||
196 | |||
197 | xml = "0" * 40 + " </BmapFileSHA1>\n\n" | ||
198 | xml += " <!-- The block map which consists of elements which may either be a\n" | ||
199 | xml += " range of blocks or a single block. The 'sha1' attribute (if present)\n" | ||
200 | xml += " is the SHA1 checksum of this blocks range. -->\n" | ||
201 | xml += " <BlockMap>\n" | ||
202 | # pylint: enable=C0301 | ||
203 | |||
204 | self._f_bmap.write(xml) | ||
205 | |||
206 | def _bmap_file_end(self): | ||
207 | """ A helper function which generates the final parts of the block map | ||
208 | file: the ending tags and the information about the amount of mapped | ||
209 | blocks. """ | ||
210 | |||
211 | xml = " </BlockMap>\n" | ||
212 | xml += "</bmap>\n" | ||
213 | |||
214 | self._f_bmap.write(xml) | ||
215 | |||
216 | self._f_bmap.seek(self._mapped_count_pos1) | ||
217 | self._f_bmap.write("%s or %.1f%%" % \ | ||
218 | (self.mapped_size_human, self.mapped_percent)) | ||
219 | |||
220 | self._f_bmap.seek(self._mapped_count_pos2) | ||
221 | self._f_bmap.write("%u" % self.mapped_cnt) | ||
222 | |||
223 | self._f_bmap.seek(0) | ||
224 | sha1 = hashlib.sha1(self._f_bmap.read()).hexdigest() | ||
225 | self._f_bmap.seek(self._sha1_pos) | ||
226 | self._f_bmap.write("%s" % sha1) | ||
227 | |||
228 | def _calculate_sha1(self, first, last): | ||
229 | """ A helper function which calculates SHA1 checksum for the range of | ||
230 | blocks of the image file: from block 'first' to block 'last'. """ | ||
231 | |||
232 | start = first * self.block_size | ||
233 | end = (last + 1) * self.block_size | ||
234 | |||
235 | self._f_image.seek(start) | ||
236 | hash_obj = hashlib.new("sha1") | ||
237 | |||
238 | chunk_size = 1024*1024 | ||
239 | to_read = end - start | ||
240 | read = 0 | ||
241 | |||
242 | while read < to_read: | ||
243 | if read + chunk_size > to_read: | ||
244 | chunk_size = to_read - read | ||
245 | chunk = self._f_image.read(chunk_size) | ||
246 | hash_obj.update(chunk) | ||
247 | read += chunk_size | ||
248 | |||
249 | return hash_obj.hexdigest() | ||
250 | |||
251 | def generate(self, include_checksums = True): | ||
252 | """ Generate bmap for the image file. If 'include_checksums' is 'True', | ||
253 | also generate SHA1 checksums for block ranges. """ | ||
254 | |||
255 | # Save image file position in order to restore it at the end | ||
256 | image_pos = self._f_image.tell() | ||
257 | |||
258 | self._bmap_file_start() | ||
259 | |||
260 | # Generate the block map and write it to the XML block map | ||
261 | # file as we go. | ||
262 | self.mapped_cnt = 0 | ||
263 | for first, last in self.fiemap.get_mapped_ranges(0, self.blocks_cnt): | ||
264 | self.mapped_cnt += last - first + 1 | ||
265 | if include_checksums: | ||
266 | sha1 = self._calculate_sha1(first, last) | ||
267 | sha1 = " sha1=\"%s\"" % sha1 | ||
268 | else: | ||
269 | sha1 = "" | ||
270 | |||
271 | if first != last: | ||
272 | self._f_bmap.write(" <Range%s> %s-%s </Range>\n" \ | ||
273 | % (sha1, first, last)) | ||
274 | else: | ||
275 | self._f_bmap.write(" <Range%s> %s </Range>\n" \ | ||
276 | % (sha1, first)) | ||
277 | |||
278 | self.mapped_size = self.mapped_cnt * self.block_size | ||
279 | self.mapped_size_human = human_size(self.mapped_size) | ||
280 | self.mapped_percent = (self.mapped_cnt * 100.0) / self.blocks_cnt | ||
281 | |||
282 | self._bmap_file_end() | ||
283 | |||
284 | try: | ||
285 | self._f_bmap.flush() | ||
286 | except IOError as err: | ||
287 | raise Error("cannot flush the bmap file '%s': %s" \ | ||
288 | % (self._bmap_path, err)) | ||
289 | |||
290 | self._f_image.seek(image_pos) | ||
291 | |||
292 | def __del__(self): | ||
293 | """ The class destructor which closes the opened files. """ | ||
294 | |||
295 | if self._f_image_needs_close: | ||
296 | self._f_image.close() | ||
297 | if self._f_bmap_needs_close: | ||
298 | self._f_bmap.close() | ||
diff --git a/scripts/lib/mic/utils/Fiemap.py b/scripts/lib/mic/utils/Fiemap.py new file mode 100644 index 0000000000..f2db6ff0b8 --- /dev/null +++ b/scripts/lib/mic/utils/Fiemap.py | |||
@@ -0,0 +1,252 @@ | |||
1 | """ This module implements python API for the FIEMAP ioctl. The FIEMAP ioctl | ||
2 | allows to find holes and mapped areas in a file. """ | ||
3 | |||
4 | # Note, a lot of code in this module is not very readable, because it deals | ||
5 | # with the rather complex FIEMAP ioctl. To understand the code, you need to | ||
6 | # know the FIEMAP interface, which is documented in the | ||
7 | # Documentation/filesystems/fiemap.txt file in the Linux kernel sources. | ||
8 | |||
9 | # Disable the following pylint recommendations: | ||
10 | # * Too many instance attributes (R0902) | ||
11 | # pylint: disable=R0902 | ||
12 | |||
13 | import os | ||
14 | import struct | ||
15 | import array | ||
16 | import fcntl | ||
17 | from mic.utils.misc import get_block_size | ||
18 | |||
19 | # Format string for 'struct fiemap' | ||
20 | _FIEMAP_FORMAT = "=QQLLLL" | ||
21 | # sizeof(struct fiemap) | ||
22 | _FIEMAP_SIZE = struct.calcsize(_FIEMAP_FORMAT) | ||
23 | # Format string for 'struct fiemap_extent' | ||
24 | _FIEMAP_EXTENT_FORMAT = "=QQQQQLLLL" | ||
25 | # sizeof(struct fiemap_extent) | ||
26 | _FIEMAP_EXTENT_SIZE = struct.calcsize(_FIEMAP_EXTENT_FORMAT) | ||
27 | # The FIEMAP ioctl number | ||
28 | _FIEMAP_IOCTL = 0xC020660B | ||
29 | |||
30 | # Minimum buffer which is required for 'class Fiemap' to operate | ||
31 | MIN_BUFFER_SIZE = _FIEMAP_SIZE + _FIEMAP_EXTENT_SIZE | ||
32 | # The default buffer size for 'class Fiemap' | ||
33 | DEFAULT_BUFFER_SIZE = 256 * 1024 | ||
34 | |||
35 | class Error(Exception): | ||
36 | """ A class for exceptions generated by this module. We currently support | ||
37 | only one type of exceptions, and we basically throw human-readable problem | ||
38 | description in case of errors. """ | ||
39 | pass | ||
40 | |||
41 | class Fiemap: | ||
42 | """ This class provides API to the FIEMAP ioctl. Namely, it allows to | ||
43 | iterate over all mapped blocks and over all holes. """ | ||
44 | |||
45 | def _open_image_file(self): | ||
46 | """ Open the image file. """ | ||
47 | |||
48 | try: | ||
49 | self._f_image = open(self._image_path, 'rb') | ||
50 | except IOError as err: | ||
51 | raise Error("cannot open image file '%s': %s" \ | ||
52 | % (self._image_path, err)) | ||
53 | |||
54 | self._f_image_needs_close = True | ||
55 | |||
56 | def __init__(self, image, buf_size = DEFAULT_BUFFER_SIZE): | ||
57 | """ Initialize a class instance. The 'image' argument is full path to | ||
58 | the file to operate on, or a file object to operate on. | ||
59 | |||
60 | The 'buf_size' argument is the size of the buffer for 'struct | ||
61 | fiemap_extent' elements which will be used when invoking the FIEMAP | ||
62 | ioctl. The larger is the buffer, the less times the FIEMAP ioctl will | ||
63 | be invoked. """ | ||
64 | |||
65 | self._f_image_needs_close = False | ||
66 | |||
67 | if hasattr(image, "fileno"): | ||
68 | self._f_image = image | ||
69 | self._image_path = image.name | ||
70 | else: | ||
71 | self._image_path = image | ||
72 | self._open_image_file() | ||
73 | |||
74 | # Validate 'buf_size' | ||
75 | if buf_size < MIN_BUFFER_SIZE: | ||
76 | raise Error("too small buffer (%d bytes), minimum is %d bytes" \ | ||
77 | % (buf_size, MIN_BUFFER_SIZE)) | ||
78 | |||
79 | # How many 'struct fiemap_extent' elements fit the buffer | ||
80 | buf_size -= _FIEMAP_SIZE | ||
81 | self._fiemap_extent_cnt = buf_size / _FIEMAP_EXTENT_SIZE | ||
82 | self._buf_size = self._fiemap_extent_cnt * _FIEMAP_EXTENT_SIZE | ||
83 | self._buf_size += _FIEMAP_SIZE | ||
84 | |||
85 | # Allocate a mutable buffer for the FIEMAP ioctl | ||
86 | self._buf = array.array('B', [0] * self._buf_size) | ||
87 | |||
88 | self.image_size = os.fstat(self._f_image.fileno()).st_size | ||
89 | |||
90 | try: | ||
91 | self.block_size = get_block_size(self._f_image) | ||
92 | except IOError as err: | ||
93 | raise Error("cannot get block size for '%s': %s" \ | ||
94 | % (self._image_path, err)) | ||
95 | |||
96 | self.blocks_cnt = self.image_size + self.block_size - 1 | ||
97 | self.blocks_cnt /= self.block_size | ||
98 | |||
99 | # Synchronize the image file to make sure FIEMAP returns correct values | ||
100 | try: | ||
101 | self._f_image.flush() | ||
102 | except IOError as err: | ||
103 | raise Error("cannot flush image file '%s': %s" \ | ||
104 | % (self._image_path, err)) | ||
105 | try: | ||
106 | os.fsync(self._f_image.fileno()), | ||
107 | except OSError as err: | ||
108 | raise Error("cannot synchronize image file '%s': %s " \ | ||
109 | % (self._image_path, err.strerror)) | ||
110 | |||
111 | # Check if the FIEMAP ioctl is supported | ||
112 | self.block_is_mapped(0) | ||
113 | |||
114 | def __del__(self): | ||
115 | """ The class destructor which closes the opened files. """ | ||
116 | |||
117 | if self._f_image_needs_close: | ||
118 | self._f_image.close() | ||
119 | |||
120 | def _invoke_fiemap(self, block, count): | ||
121 | """ Invoke the FIEMAP ioctl for 'count' blocks of the file starting from | ||
122 | block number 'block'. | ||
123 | |||
124 | The full result of the operation is stored in 'self._buf' on exit. | ||
125 | Returns the unpacked 'struct fiemap' data structure in form of a python | ||
126 | list (just like 'struct.upack()'). """ | ||
127 | |||
128 | if block < 0 or block >= self.blocks_cnt: | ||
129 | raise Error("bad block number %d, should be within [0, %d]" \ | ||
130 | % (block, self.blocks_cnt)) | ||
131 | |||
132 | # Initialize the 'struct fiemap' part of the buffer | ||
133 | struct.pack_into(_FIEMAP_FORMAT, self._buf, 0, block * self.block_size, | ||
134 | count * self.block_size, 0, 0, | ||
135 | self._fiemap_extent_cnt, 0) | ||
136 | |||
137 | try: | ||
138 | fcntl.ioctl(self._f_image, _FIEMAP_IOCTL, self._buf, 1) | ||
139 | except IOError as err: | ||
140 | error_msg = "the FIEMAP ioctl failed for '%s': %s" \ | ||
141 | % (self._image_path, err) | ||
142 | if err.errno == os.errno.EPERM or err.errno == os.errno.EACCES: | ||
143 | # The FIEMAP ioctl was added in kernel version 2.6.28 in 2008 | ||
144 | error_msg += " (looks like your kernel does not support FIEMAP)" | ||
145 | |||
146 | raise Error(error_msg) | ||
147 | |||
148 | return struct.unpack(_FIEMAP_FORMAT, self._buf[:_FIEMAP_SIZE]) | ||
149 | |||
150 | def block_is_mapped(self, block): | ||
151 | """ This function returns 'True' if block number 'block' of the image | ||
152 | file is mapped and 'False' otherwise. """ | ||
153 | |||
154 | struct_fiemap = self._invoke_fiemap(block, 1) | ||
155 | |||
156 | # The 3rd element of 'struct_fiemap' is the 'fm_mapped_extents' field. | ||
157 | # If it contains zero, the block is not mapped, otherwise it is | ||
158 | # mapped. | ||
159 | return bool(struct_fiemap[3]) | ||
160 | |||
161 | def block_is_unmapped(self, block): | ||
162 | """ This function returns 'True' if block number 'block' of the image | ||
163 | file is not mapped (hole) and 'False' otherwise. """ | ||
164 | |||
165 | return not self.block_is_mapped(block) | ||
166 | |||
167 | def _unpack_fiemap_extent(self, index): | ||
168 | """ Unpack a 'struct fiemap_extent' structure object number 'index' | ||
169 | from the internal 'self._buf' buffer. """ | ||
170 | |||
171 | offset = _FIEMAP_SIZE + _FIEMAP_EXTENT_SIZE * index | ||
172 | return struct.unpack(_FIEMAP_EXTENT_FORMAT, | ||
173 | self._buf[offset : offset + _FIEMAP_EXTENT_SIZE]) | ||
174 | |||
175 | def _do_get_mapped_ranges(self, start, count): | ||
176 | """ Implements most the functionality for the 'get_mapped_ranges()' | ||
177 | generator: invokes the FIEMAP ioctl, walks through the mapped | ||
178 | extents and yields mapped block ranges. However, the ranges may be | ||
179 | consecutive (e.g., (1, 100), (100, 200)) and 'get_mapped_ranges()' | ||
180 | simply merges them. """ | ||
181 | |||
182 | block = start | ||
183 | while block < start + count: | ||
184 | struct_fiemap = self._invoke_fiemap(block, count) | ||
185 | |||
186 | mapped_extents = struct_fiemap[3] | ||
187 | if mapped_extents == 0: | ||
188 | # No more mapped blocks | ||
189 | return | ||
190 | |||
191 | extent = 0 | ||
192 | while extent < mapped_extents: | ||
193 | fiemap_extent = self._unpack_fiemap_extent(extent) | ||
194 | |||
195 | # Start of the extent | ||
196 | extent_start = fiemap_extent[0] | ||
197 | # Starting block number of the extent | ||
198 | extent_block = extent_start / self.block_size | ||
199 | # Length of the extent | ||
200 | extent_len = fiemap_extent[2] | ||
201 | # Count of blocks in the extent | ||
202 | extent_count = extent_len / self.block_size | ||
203 | |||
204 | # Extent length and offset have to be block-aligned | ||
205 | assert extent_start % self.block_size == 0 | ||
206 | assert extent_len % self.block_size == 0 | ||
207 | |||
208 | if extent_block > start + count - 1: | ||
209 | return | ||
210 | |||
211 | first = max(extent_block, block) | ||
212 | last = min(extent_block + extent_count, start + count) - 1 | ||
213 | yield (first, last) | ||
214 | |||
215 | extent += 1 | ||
216 | |||
217 | block = extent_block + extent_count | ||
218 | |||
219 | def get_mapped_ranges(self, start, count): | ||
220 | """ A generator which yields ranges of mapped blocks in the file. The | ||
221 | ranges are tuples of 2 elements: [first, last], where 'first' is the | ||
222 | first mapped block and 'last' is the last mapped block. | ||
223 | |||
224 | The ranges are yielded for the area of the file of size 'count' blocks, | ||
225 | starting from block 'start'. """ | ||
226 | |||
227 | iterator = self._do_get_mapped_ranges(start, count) | ||
228 | |||
229 | first_prev, last_prev = iterator.next() | ||
230 | |||
231 | for first, last in iterator: | ||
232 | if last_prev == first - 1: | ||
233 | last_prev = last | ||
234 | else: | ||
235 | yield (first_prev, last_prev) | ||
236 | first_prev, last_prev = first, last | ||
237 | |||
238 | yield (first_prev, last_prev) | ||
239 | |||
240 | def get_unmapped_ranges(self, start, count): | ||
241 | """ Just like 'get_mapped_ranges()', but yields unmapped block ranges | ||
242 | instead (holes). """ | ||
243 | |||
244 | hole_first = start | ||
245 | for first, last in self._do_get_mapped_ranges(start, count): | ||
246 | if first > hole_first: | ||
247 | yield (hole_first, first - 1) | ||
248 | |||
249 | hole_first = last + 1 | ||
250 | |||
251 | if hole_first < start + count: | ||
252 | yield (hole_first, start + count - 1) | ||
diff --git a/scripts/lib/mic/utils/__init__.py b/scripts/lib/mic/utils/__init__.py new file mode 100644 index 0000000000..e69de29bb2 --- /dev/null +++ b/scripts/lib/mic/utils/__init__.py | |||
diff --git a/scripts/lib/mic/utils/cmdln.py b/scripts/lib/mic/utils/cmdln.py new file mode 100644 index 0000000000..b099473ee4 --- /dev/null +++ b/scripts/lib/mic/utils/cmdln.py | |||
@@ -0,0 +1,1586 @@ | |||
1 | #!/usr/bin/env python | ||
2 | # Copyright (c) 2002-2007 ActiveState Software Inc. | ||
3 | # License: MIT (see LICENSE.txt for license details) | ||
4 | # Author: Trent Mick | ||
5 | # Home: http://trentm.com/projects/cmdln/ | ||
6 | |||
7 | """An improvement on Python's standard cmd.py module. | ||
8 | |||
9 | As with cmd.py, this module provides "a simple framework for writing | ||
10 | line-oriented command intepreters." This module provides a 'RawCmdln' | ||
11 | class that fixes some design flaws in cmd.Cmd, making it more scalable | ||
12 | and nicer to use for good 'cvs'- or 'svn'-style command line interfaces | ||
13 | or simple shells. And it provides a 'Cmdln' class that add | ||
14 | optparse-based option processing. Basically you use it like this: | ||
15 | |||
16 | import cmdln | ||
17 | |||
18 | class MySVN(cmdln.Cmdln): | ||
19 | name = "svn" | ||
20 | |||
21 | @cmdln.alias('stat', 'st') | ||
22 | @cmdln.option('-v', '--verbose', action='store_true' | ||
23 | help='print verbose information') | ||
24 | def do_status(self, subcmd, opts, *paths): | ||
25 | print "handle 'svn status' command" | ||
26 | |||
27 | #... | ||
28 | |||
29 | if __name__ == "__main__": | ||
30 | shell = MySVN() | ||
31 | retval = shell.main() | ||
32 | sys.exit(retval) | ||
33 | |||
34 | See the README.txt or <http://trentm.com/projects/cmdln/> for more | ||
35 | details. | ||
36 | """ | ||
37 | |||
38 | __version_info__ = (1, 1, 2) | ||
39 | __version__ = '.'.join(map(str, __version_info__)) | ||
40 | |||
41 | import os | ||
42 | import sys | ||
43 | import re | ||
44 | import cmd | ||
45 | import optparse | ||
46 | from pprint import pprint | ||
47 | import sys | ||
48 | |||
49 | |||
50 | |||
51 | |||
52 | #---- globals | ||
53 | |||
54 | LOOP_ALWAYS, LOOP_NEVER, LOOP_IF_EMPTY = range(3) | ||
55 | |||
56 | # An unspecified optional argument when None is a meaningful value. | ||
57 | _NOT_SPECIFIED = ("Not", "Specified") | ||
58 | |||
59 | # Pattern to match a TypeError message from a call that | ||
60 | # failed because of incorrect number of arguments (see | ||
61 | # Python/getargs.c). | ||
62 | _INCORRECT_NUM_ARGS_RE = re.compile( | ||
63 | r"(takes [\w ]+ )(\d+)( arguments? \()(\d+)( given\))") | ||
64 | |||
65 | |||
66 | |||
67 | #---- exceptions | ||
68 | |||
69 | class CmdlnError(Exception): | ||
70 | """A cmdln.py usage error.""" | ||
71 | def __init__(self, msg): | ||
72 | self.msg = msg | ||
73 | def __str__(self): | ||
74 | return self.msg | ||
75 | |||
76 | class CmdlnUserError(Exception): | ||
77 | """An error by a user of a cmdln-based tool/shell.""" | ||
78 | pass | ||
79 | |||
80 | |||
81 | |||
82 | #---- public methods and classes | ||
83 | |||
84 | def alias(*aliases): | ||
85 | """Decorator to add aliases for Cmdln.do_* command handlers. | ||
86 | |||
87 | Example: | ||
88 | class MyShell(cmdln.Cmdln): | ||
89 | @cmdln.alias("!", "sh") | ||
90 | def do_shell(self, argv): | ||
91 | #...implement 'shell' command | ||
92 | """ | ||
93 | def decorate(f): | ||
94 | if not hasattr(f, "aliases"): | ||
95 | f.aliases = [] | ||
96 | f.aliases += aliases | ||
97 | return f | ||
98 | return decorate | ||
99 | |||
100 | |||
101 | class RawCmdln(cmd.Cmd): | ||
102 | """An improved (on cmd.Cmd) framework for building multi-subcommand | ||
103 | scripts (think "svn" & "cvs") and simple shells (think "pdb" and | ||
104 | "gdb"). | ||
105 | |||
106 | A simple example: | ||
107 | |||
108 | import cmdln | ||
109 | |||
110 | class MySVN(cmdln.RawCmdln): | ||
111 | name = "svn" | ||
112 | |||
113 | @cmdln.aliases('stat', 'st') | ||
114 | def do_status(self, argv): | ||
115 | print "handle 'svn status' command" | ||
116 | |||
117 | if __name__ == "__main__": | ||
118 | shell = MySVN() | ||
119 | retval = shell.main() | ||
120 | sys.exit(retval) | ||
121 | |||
122 | See <http://trentm.com/projects/cmdln> for more information. | ||
123 | """ | ||
124 | name = None # if unset, defaults basename(sys.argv[0]) | ||
125 | prompt = None # if unset, defaults to self.name+"> " | ||
126 | version = None # if set, default top-level options include --version | ||
127 | |||
128 | # Default messages for some 'help' command error cases. | ||
129 | # They are interpolated with one arg: the command. | ||
130 | nohelp = "no help on '%s'" | ||
131 | unknowncmd = "unknown command: '%s'" | ||
132 | |||
133 | helpindent = '' # string with which to indent help output | ||
134 | |||
135 | def __init__(self, completekey='tab', | ||
136 | stdin=None, stdout=None, stderr=None): | ||
137 | """Cmdln(completekey='tab', stdin=None, stdout=None, stderr=None) | ||
138 | |||
139 | The optional argument 'completekey' is the readline name of a | ||
140 | completion key; it defaults to the Tab key. If completekey is | ||
141 | not None and the readline module is available, command completion | ||
142 | is done automatically. | ||
143 | |||
144 | The optional arguments 'stdin', 'stdout' and 'stderr' specify | ||
145 | alternate input, output and error output file objects; if not | ||
146 | specified, sys.* are used. | ||
147 | |||
148 | If 'stdout' but not 'stderr' is specified, stdout is used for | ||
149 | error output. This is to provide least surprise for users used | ||
150 | to only the 'stdin' and 'stdout' options with cmd.Cmd. | ||
151 | """ | ||
152 | import sys | ||
153 | if self.name is None: | ||
154 | self.name = os.path.basename(sys.argv[0]) | ||
155 | if self.prompt is None: | ||
156 | self.prompt = self.name+"> " | ||
157 | self._name_str = self._str(self.name) | ||
158 | self._prompt_str = self._str(self.prompt) | ||
159 | if stdin is not None: | ||
160 | self.stdin = stdin | ||
161 | else: | ||
162 | self.stdin = sys.stdin | ||
163 | if stdout is not None: | ||
164 | self.stdout = stdout | ||
165 | else: | ||
166 | self.stdout = sys.stdout | ||
167 | if stderr is not None: | ||
168 | self.stderr = stderr | ||
169 | elif stdout is not None: | ||
170 | self.stderr = stdout | ||
171 | else: | ||
172 | self.stderr = sys.stderr | ||
173 | self.cmdqueue = [] | ||
174 | self.completekey = completekey | ||
175 | self.cmdlooping = False | ||
176 | |||
177 | def get_optparser(self): | ||
178 | """Hook for subclasses to set the option parser for the | ||
179 | top-level command/shell. | ||
180 | |||
181 | This option parser is used retrieved and used by `.main()' to | ||
182 | handle top-level options. | ||
183 | |||
184 | The default implements a single '-h|--help' option. Sub-classes | ||
185 | can return None to have no options at the top-level. Typically | ||
186 | an instance of CmdlnOptionParser should be returned. | ||
187 | """ | ||
188 | version = (self.version is not None | ||
189 | and "%s %s" % (self._name_str, self.version) | ||
190 | or None) | ||
191 | return CmdlnOptionParser(self, version=version) | ||
192 | |||
193 | def postoptparse(self): | ||
194 | """Hook method executed just after `.main()' parses top-level | ||
195 | options. | ||
196 | |||
197 | When called `self.options' holds the results of the option parse. | ||
198 | """ | ||
199 | pass | ||
200 | |||
201 | def main(self, argv=None, loop=LOOP_NEVER): | ||
202 | """A possible mainline handler for a script, like so: | ||
203 | |||
204 | import cmdln | ||
205 | class MyCmd(cmdln.Cmdln): | ||
206 | name = "mycmd" | ||
207 | ... | ||
208 | |||
209 | if __name__ == "__main__": | ||
210 | MyCmd().main() | ||
211 | |||
212 | By default this will use sys.argv to issue a single command to | ||
213 | 'MyCmd', then exit. The 'loop' argument can be use to control | ||
214 | interactive shell behaviour. | ||
215 | |||
216 | Arguments: | ||
217 | "argv" (optional, default sys.argv) is the command to run. | ||
218 | It must be a sequence, where the first element is the | ||
219 | command name and subsequent elements the args for that | ||
220 | command. | ||
221 | "loop" (optional, default LOOP_NEVER) is a constant | ||
222 | indicating if a command loop should be started (i.e. an | ||
223 | interactive shell). Valid values (constants on this module): | ||
224 | LOOP_ALWAYS start loop and run "argv", if any | ||
225 | LOOP_NEVER run "argv" (or .emptyline()) and exit | ||
226 | LOOP_IF_EMPTY run "argv", if given, and exit; | ||
227 | otherwise, start loop | ||
228 | """ | ||
229 | if argv is None: | ||
230 | import sys | ||
231 | argv = sys.argv | ||
232 | else: | ||
233 | argv = argv[:] # don't modify caller's list | ||
234 | |||
235 | self.optparser = self.get_optparser() | ||
236 | if self.optparser: # i.e. optparser=None means don't process for opts | ||
237 | try: | ||
238 | self.options, args = self.optparser.parse_args(argv[1:]) | ||
239 | except CmdlnUserError, ex: | ||
240 | msg = "%s: %s\nTry '%s help' for info.\n"\ | ||
241 | % (self.name, ex, self.name) | ||
242 | self.stderr.write(self._str(msg)) | ||
243 | self.stderr.flush() | ||
244 | return 1 | ||
245 | except StopOptionProcessing, ex: | ||
246 | return 0 | ||
247 | else: | ||
248 | self.options, args = None, argv[1:] | ||
249 | self.postoptparse() | ||
250 | |||
251 | if loop == LOOP_ALWAYS: | ||
252 | if args: | ||
253 | self.cmdqueue.append(args) | ||
254 | return self.cmdloop() | ||
255 | elif loop == LOOP_NEVER: | ||
256 | if args: | ||
257 | return self.cmd(args) | ||
258 | else: | ||
259 | return self.emptyline() | ||
260 | elif loop == LOOP_IF_EMPTY: | ||
261 | if args: | ||
262 | return self.cmd(args) | ||
263 | else: | ||
264 | return self.cmdloop() | ||
265 | |||
266 | def cmd(self, argv): | ||
267 | """Run one command and exit. | ||
268 | |||
269 | "argv" is the arglist for the command to run. argv[0] is the | ||
270 | command to run. If argv is an empty list then the | ||
271 | 'emptyline' handler is run. | ||
272 | |||
273 | Returns the return value from the command handler. | ||
274 | """ | ||
275 | assert isinstance(argv, (list, tuple)), \ | ||
276 | "'argv' is not a sequence: %r" % argv | ||
277 | retval = None | ||
278 | try: | ||
279 | argv = self.precmd(argv) | ||
280 | retval = self.onecmd(argv) | ||
281 | self.postcmd(argv) | ||
282 | except: | ||
283 | if not self.cmdexc(argv): | ||
284 | raise | ||
285 | retval = 1 | ||
286 | return retval | ||
287 | |||
288 | def _str(self, s): | ||
289 | """Safely convert the given str/unicode to a string for printing.""" | ||
290 | try: | ||
291 | return str(s) | ||
292 | except UnicodeError: | ||
293 | #XXX What is the proper encoding to use here? 'utf-8' seems | ||
294 | # to work better than "getdefaultencoding" (usually | ||
295 | # 'ascii'), on OS X at least. | ||
296 | #import sys | ||
297 | #return s.encode(sys.getdefaultencoding(), "replace") | ||
298 | return s.encode("utf-8", "replace") | ||
299 | |||
300 | def cmdloop(self, intro=None): | ||
301 | """Repeatedly issue a prompt, accept input, parse into an argv, and | ||
302 | dispatch (via .precmd(), .onecmd() and .postcmd()), passing them | ||
303 | the argv. In other words, start a shell. | ||
304 | |||
305 | "intro" (optional) is a introductory message to print when | ||
306 | starting the command loop. This overrides the class | ||
307 | "intro" attribute, if any. | ||
308 | """ | ||
309 | self.cmdlooping = True | ||
310 | self.preloop() | ||
311 | if self.use_rawinput and self.completekey: | ||
312 | try: | ||
313 | import readline | ||
314 | self.old_completer = readline.get_completer() | ||
315 | readline.set_completer(self.complete) | ||
316 | readline.parse_and_bind(self.completekey+": complete") | ||
317 | except ImportError: | ||
318 | pass | ||
319 | try: | ||
320 | if intro is None: | ||
321 | intro = self.intro | ||
322 | if intro: | ||
323 | intro_str = self._str(intro) | ||
324 | self.stdout.write(intro_str+'\n') | ||
325 | self.stop = False | ||
326 | retval = None | ||
327 | while not self.stop: | ||
328 | if self.cmdqueue: | ||
329 | argv = self.cmdqueue.pop(0) | ||
330 | assert isinstance(argv, (list, tuple)), \ | ||
331 | "item on 'cmdqueue' is not a sequence: %r" % argv | ||
332 | else: | ||
333 | if self.use_rawinput: | ||
334 | try: | ||
335 | line = raw_input(self._prompt_str) | ||
336 | except EOFError: | ||
337 | line = 'EOF' | ||
338 | else: | ||
339 | self.stdout.write(self._prompt_str) | ||
340 | self.stdout.flush() | ||
341 | line = self.stdin.readline() | ||
342 | if not len(line): | ||
343 | line = 'EOF' | ||
344 | else: | ||
345 | line = line[:-1] # chop '\n' | ||
346 | argv = line2argv(line) | ||
347 | try: | ||
348 | argv = self.precmd(argv) | ||
349 | retval = self.onecmd(argv) | ||
350 | self.postcmd(argv) | ||
351 | except: | ||
352 | if not self.cmdexc(argv): | ||
353 | raise | ||
354 | retval = 1 | ||
355 | self.lastretval = retval | ||
356 | self.postloop() | ||
357 | finally: | ||
358 | if self.use_rawinput and self.completekey: | ||
359 | try: | ||
360 | import readline | ||
361 | readline.set_completer(self.old_completer) | ||
362 | except ImportError: | ||
363 | pass | ||
364 | self.cmdlooping = False | ||
365 | return retval | ||
366 | |||
367 | def precmd(self, argv): | ||
368 | """Hook method executed just before the command argv is | ||
369 | interpreted, but after the input prompt is generated and issued. | ||
370 | |||
371 | "argv" is the cmd to run. | ||
372 | |||
373 | Returns an argv to run (i.e. this method can modify the command | ||
374 | to run). | ||
375 | """ | ||
376 | return argv | ||
377 | |||
378 | def postcmd(self, argv): | ||
379 | """Hook method executed just after a command dispatch is finished. | ||
380 | |||
381 | "argv" is the command that was run. | ||
382 | """ | ||
383 | pass | ||
384 | |||
385 | def cmdexc(self, argv): | ||
386 | """Called if an exception is raised in any of precmd(), onecmd(), | ||
387 | or postcmd(). If True is returned, the exception is deemed to have | ||
388 | been dealt with. Otherwise, the exception is re-raised. | ||
389 | |||
390 | The default implementation handles CmdlnUserError's, which | ||
391 | typically correspond to user error in calling commands (as | ||
392 | opposed to programmer error in the design of the script using | ||
393 | cmdln.py). | ||
394 | """ | ||
395 | import sys | ||
396 | type, exc, traceback = sys.exc_info() | ||
397 | if isinstance(exc, CmdlnUserError): | ||
398 | msg = "%s %s: %s\nTry '%s help %s' for info.\n"\ | ||
399 | % (self.name, argv[0], exc, self.name, argv[0]) | ||
400 | self.stderr.write(self._str(msg)) | ||
401 | self.stderr.flush() | ||
402 | return True | ||
403 | |||
404 | def onecmd(self, argv): | ||
405 | if not argv: | ||
406 | return self.emptyline() | ||
407 | self.lastcmd = argv | ||
408 | cmdname = self._get_canonical_cmd_name(argv[0]) | ||
409 | if cmdname: | ||
410 | handler = self._get_cmd_handler(cmdname) | ||
411 | if handler: | ||
412 | return self._dispatch_cmd(handler, argv) | ||
413 | return self.default(argv) | ||
414 | |||
415 | def _dispatch_cmd(self, handler, argv): | ||
416 | return handler(argv) | ||
417 | |||
418 | def default(self, argv): | ||
419 | """Hook called to handle a command for which there is no handler. | ||
420 | |||
421 | "argv" is the command and arguments to run. | ||
422 | |||
423 | The default implementation writes and error message to stderr | ||
424 | and returns an error exit status. | ||
425 | |||
426 | Returns a numeric command exit status. | ||
427 | """ | ||
428 | errmsg = self._str(self.unknowncmd % (argv[0],)) | ||
429 | if self.cmdlooping: | ||
430 | self.stderr.write(errmsg+"\n") | ||
431 | else: | ||
432 | self.stderr.write("%s: %s\nTry '%s help' for info.\n" | ||
433 | % (self._name_str, errmsg, self._name_str)) | ||
434 | self.stderr.flush() | ||
435 | return 1 | ||
436 | |||
437 | def parseline(self, line): | ||
438 | # This is used by Cmd.complete (readline completer function) to | ||
439 | # massage the current line buffer before completion processing. | ||
440 | # We override to drop special '!' handling. | ||
441 | line = line.strip() | ||
442 | if not line: | ||
443 | return None, None, line | ||
444 | elif line[0] == '?': | ||
445 | line = 'help ' + line[1:] | ||
446 | i, n = 0, len(line) | ||
447 | while i < n and line[i] in self.identchars: i = i+1 | ||
448 | cmd, arg = line[:i], line[i:].strip() | ||
449 | return cmd, arg, line | ||
450 | |||
451 | def helpdefault(self, cmd, known): | ||
452 | """Hook called to handle help on a command for which there is no | ||
453 | help handler. | ||
454 | |||
455 | "cmd" is the command name on which help was requested. | ||
456 | "known" is a boolean indicating if this command is known | ||
457 | (i.e. if there is a handler for it). | ||
458 | |||
459 | Returns a return code. | ||
460 | """ | ||
461 | if known: | ||
462 | msg = self._str(self.nohelp % (cmd,)) | ||
463 | if self.cmdlooping: | ||
464 | self.stderr.write(msg + '\n') | ||
465 | else: | ||
466 | self.stderr.write("%s: %s\n" % (self.name, msg)) | ||
467 | else: | ||
468 | msg = self.unknowncmd % (cmd,) | ||
469 | if self.cmdlooping: | ||
470 | self.stderr.write(msg + '\n') | ||
471 | else: | ||
472 | self.stderr.write("%s: %s\n" | ||
473 | "Try '%s help' for info.\n" | ||
474 | % (self.name, msg, self.name)) | ||
475 | self.stderr.flush() | ||
476 | return 1 | ||
477 | |||
478 | def do_help(self, argv): | ||
479 | """${cmd_name}: give detailed help on a specific sub-command | ||
480 | |||
481 | Usage: | ||
482 | ${name} help [COMMAND] | ||
483 | """ | ||
484 | if len(argv) > 1: # asking for help on a particular command | ||
485 | doc = None | ||
486 | cmdname = self._get_canonical_cmd_name(argv[1]) or argv[1] | ||
487 | if not cmdname: | ||
488 | return self.helpdefault(argv[1], False) | ||
489 | else: | ||
490 | helpfunc = getattr(self, "help_"+cmdname, None) | ||
491 | if helpfunc: | ||
492 | doc = helpfunc() | ||
493 | else: | ||
494 | handler = self._get_cmd_handler(cmdname) | ||
495 | if handler: | ||
496 | doc = handler.__doc__ | ||
497 | if doc is None: | ||
498 | return self.helpdefault(argv[1], handler != None) | ||
499 | else: # bare "help" command | ||
500 | doc = self.__class__.__doc__ # try class docstring | ||
501 | if doc is None: | ||
502 | # Try to provide some reasonable useful default help. | ||
503 | if self.cmdlooping: prefix = "" | ||
504 | else: prefix = self.name+' ' | ||
505 | doc = """Usage: | ||
506 | %sCOMMAND [ARGS...] | ||
507 | %shelp [COMMAND] | ||
508 | |||
509 | ${option_list} | ||
510 | ${command_list} | ||
511 | ${help_list} | ||
512 | """ % (prefix, prefix) | ||
513 | cmdname = None | ||
514 | |||
515 | if doc: # *do* have help content, massage and print that | ||
516 | doc = self._help_reindent(doc) | ||
517 | doc = self._help_preprocess(doc, cmdname) | ||
518 | doc = doc.rstrip() + '\n' # trim down trailing space | ||
519 | self.stdout.write(self._str(doc)) | ||
520 | self.stdout.flush() | ||
521 | do_help.aliases = ["?"] | ||
522 | |||
523 | def _help_reindent(self, help, indent=None): | ||
524 | """Hook to re-indent help strings before writing to stdout. | ||
525 | |||
526 | "help" is the help content to re-indent | ||
527 | "indent" is a string with which to indent each line of the | ||
528 | help content after normalizing. If unspecified or None | ||
529 | then the default is use: the 'self.helpindent' class | ||
530 | attribute. By default this is the empty string, i.e. | ||
531 | no indentation. | ||
532 | |||
533 | By default, all common leading whitespace is removed and then | ||
534 | the lot is indented by 'self.helpindent'. When calculating the | ||
535 | common leading whitespace the first line is ignored -- hence | ||
536 | help content for Conan can be written as follows and have the | ||
537 | expected indentation: | ||
538 | |||
539 | def do_crush(self, ...): | ||
540 | '''${cmd_name}: crush your enemies, see them driven before you... | ||
541 | |||
542 | c.f. Conan the Barbarian''' | ||
543 | """ | ||
544 | if indent is None: | ||
545 | indent = self.helpindent | ||
546 | lines = help.splitlines(0) | ||
547 | _dedentlines(lines, skip_first_line=True) | ||
548 | lines = [(indent+line).rstrip() for line in lines] | ||
549 | return '\n'.join(lines) | ||
550 | |||
551 | def _help_preprocess(self, help, cmdname): | ||
552 | """Hook to preprocess a help string before writing to stdout. | ||
553 | |||
554 | "help" is the help string to process. | ||
555 | "cmdname" is the canonical sub-command name for which help | ||
556 | is being given, or None if the help is not specific to a | ||
557 | command. | ||
558 | |||
559 | By default the following template variables are interpolated in | ||
560 | help content. (Note: these are similar to Python 2.4's | ||
561 | string.Template interpolation but not quite.) | ||
562 | |||
563 | ${name} | ||
564 | The tool's/shell's name, i.e. 'self.name'. | ||
565 | ${option_list} | ||
566 | A formatted table of options for this shell/tool. | ||
567 | ${command_list} | ||
568 | A formatted table of available sub-commands. | ||
569 | ${help_list} | ||
570 | A formatted table of additional help topics (i.e. 'help_*' | ||
571 | methods with no matching 'do_*' method). | ||
572 | ${cmd_name} | ||
573 | The name (and aliases) for this sub-command formatted as: | ||
574 | "NAME (ALIAS1, ALIAS2, ...)". | ||
575 | ${cmd_usage} | ||
576 | A formatted usage block inferred from the command function | ||
577 | signature. | ||
578 | ${cmd_option_list} | ||
579 | A formatted table of options for this sub-command. (This is | ||
580 | only available for commands using the optparse integration, | ||
581 | i.e. using @cmdln.option decorators or manually setting the | ||
582 | 'optparser' attribute on the 'do_*' method.) | ||
583 | |||
584 | Returns the processed help. | ||
585 | """ | ||
586 | preprocessors = { | ||
587 | "${name}": self._help_preprocess_name, | ||
588 | "${option_list}": self._help_preprocess_option_list, | ||
589 | "${command_list}": self._help_preprocess_command_list, | ||
590 | "${help_list}": self._help_preprocess_help_list, | ||
591 | "${cmd_name}": self._help_preprocess_cmd_name, | ||
592 | "${cmd_usage}": self._help_preprocess_cmd_usage, | ||
593 | "${cmd_option_list}": self._help_preprocess_cmd_option_list, | ||
594 | } | ||
595 | |||
596 | for marker, preprocessor in preprocessors.items(): | ||
597 | if marker in help: | ||
598 | help = preprocessor(help, cmdname) | ||
599 | return help | ||
600 | |||
601 | def _help_preprocess_name(self, help, cmdname=None): | ||
602 | return help.replace("${name}", self.name) | ||
603 | |||
604 | def _help_preprocess_option_list(self, help, cmdname=None): | ||
605 | marker = "${option_list}" | ||
606 | indent, indent_width = _get_indent(marker, help) | ||
607 | suffix = _get_trailing_whitespace(marker, help) | ||
608 | |||
609 | if self.optparser: | ||
610 | # Setup formatting options and format. | ||
611 | # - Indentation of 4 is better than optparse default of 2. | ||
612 | # C.f. Damian Conway's discussion of this in Perl Best | ||
613 | # Practices. | ||
614 | self.optparser.formatter.indent_increment = 4 | ||
615 | self.optparser.formatter.current_indent = indent_width | ||
616 | block = self.optparser.format_option_help() + '\n' | ||
617 | else: | ||
618 | block = "" | ||
619 | |||
620 | help = help.replace(indent+marker+suffix, block, 1) | ||
621 | return help | ||
622 | |||
623 | |||
624 | def _help_preprocess_command_list(self, help, cmdname=None): | ||
625 | marker = "${command_list}" | ||
626 | indent, indent_width = _get_indent(marker, help) | ||
627 | suffix = _get_trailing_whitespace(marker, help) | ||
628 | |||
629 | # Find any aliases for commands. | ||
630 | token2canonical = self._get_canonical_map() | ||
631 | aliases = {} | ||
632 | for token, cmdname in token2canonical.items(): | ||
633 | if token == cmdname: continue | ||
634 | aliases.setdefault(cmdname, []).append(token) | ||
635 | |||
636 | # Get the list of (non-hidden) commands and their | ||
637 | # documentation, if any. | ||
638 | cmdnames = {} # use a dict to strip duplicates | ||
639 | for attr in self.get_names(): | ||
640 | if attr.startswith("do_"): | ||
641 | cmdnames[attr[3:]] = True | ||
642 | cmdnames = cmdnames.keys() | ||
643 | cmdnames.sort() | ||
644 | linedata = [] | ||
645 | for cmdname in cmdnames: | ||
646 | if aliases.get(cmdname): | ||
647 | a = aliases[cmdname] | ||
648 | a.sort() | ||
649 | cmdstr = "%s (%s)" % (cmdname, ", ".join(a)) | ||
650 | else: | ||
651 | cmdstr = cmdname | ||
652 | doc = None | ||
653 | try: | ||
654 | helpfunc = getattr(self, 'help_'+cmdname) | ||
655 | except AttributeError: | ||
656 | handler = self._get_cmd_handler(cmdname) | ||
657 | if handler: | ||
658 | doc = handler.__doc__ | ||
659 | else: | ||
660 | doc = helpfunc() | ||
661 | |||
662 | # Strip "${cmd_name}: " from the start of a command's doc. Best | ||
663 | # practice dictates that command help strings begin with this, but | ||
664 | # it isn't at all wanted for the command list. | ||
665 | to_strip = "${cmd_name}:" | ||
666 | if doc and doc.startswith(to_strip): | ||
667 | #log.debug("stripping %r from start of %s's help string", | ||
668 | # to_strip, cmdname) | ||
669 | doc = doc[len(to_strip):].lstrip() | ||
670 | linedata.append( (cmdstr, doc) ) | ||
671 | |||
672 | if linedata: | ||
673 | subindent = indent + ' '*4 | ||
674 | lines = _format_linedata(linedata, subindent, indent_width+4) | ||
675 | block = indent + "Commands:\n" \ | ||
676 | + '\n'.join(lines) + "\n\n" | ||
677 | help = help.replace(indent+marker+suffix, block, 1) | ||
678 | return help | ||
679 | |||
680 | def _gen_names_and_attrs(self): | ||
681 | # Inheritance says we have to look in class and | ||
682 | # base classes; order is not important. | ||
683 | names = [] | ||
684 | classes = [self.__class__] | ||
685 | while classes: | ||
686 | aclass = classes.pop(0) | ||
687 | if aclass.__bases__: | ||
688 | classes = classes + list(aclass.__bases__) | ||
689 | for name in dir(aclass): | ||
690 | yield (name, getattr(aclass, name)) | ||
691 | |||
692 | def _help_preprocess_help_list(self, help, cmdname=None): | ||
693 | marker = "${help_list}" | ||
694 | indent, indent_width = _get_indent(marker, help) | ||
695 | suffix = _get_trailing_whitespace(marker, help) | ||
696 | |||
697 | # Determine the additional help topics, if any. | ||
698 | helpnames = {} | ||
699 | token2cmdname = self._get_canonical_map() | ||
700 | for attrname, attr in self._gen_names_and_attrs(): | ||
701 | if not attrname.startswith("help_"): continue | ||
702 | helpname = attrname[5:] | ||
703 | if helpname not in token2cmdname: | ||
704 | helpnames[helpname] = attr | ||
705 | |||
706 | if helpnames: | ||
707 | linedata = [(n, a.__doc__ or "") for n, a in helpnames.items()] | ||
708 | linedata.sort() | ||
709 | |||
710 | subindent = indent + ' '*4 | ||
711 | lines = _format_linedata(linedata, subindent, indent_width+4) | ||
712 | block = (indent | ||
713 | + "Additional help topics (run `%s help TOPIC'):\n" % self.name | ||
714 | + '\n'.join(lines) | ||
715 | + "\n\n") | ||
716 | else: | ||
717 | block = '' | ||
718 | help = help.replace(indent+marker+suffix, block, 1) | ||
719 | return help | ||
720 | |||
721 | def _help_preprocess_cmd_name(self, help, cmdname=None): | ||
722 | marker = "${cmd_name}" | ||
723 | handler = self._get_cmd_handler(cmdname) | ||
724 | if not handler: | ||
725 | raise CmdlnError("cannot preprocess '%s' into help string: " | ||
726 | "could not find command handler for %r" | ||
727 | % (marker, cmdname)) | ||
728 | s = cmdname | ||
729 | if hasattr(handler, "aliases"): | ||
730 | s += " (%s)" % (", ".join(handler.aliases)) | ||
731 | help = help.replace(marker, s) | ||
732 | return help | ||
733 | |||
734 | #TODO: this only makes sense as part of the Cmdln class. | ||
735 | # Add hooks to add help preprocessing template vars and put | ||
736 | # this one on that class. | ||
737 | def _help_preprocess_cmd_usage(self, help, cmdname=None): | ||
738 | marker = "${cmd_usage}" | ||
739 | handler = self._get_cmd_handler(cmdname) | ||
740 | if not handler: | ||
741 | raise CmdlnError("cannot preprocess '%s' into help string: " | ||
742 | "could not find command handler for %r" | ||
743 | % (marker, cmdname)) | ||
744 | indent, indent_width = _get_indent(marker, help) | ||
745 | suffix = _get_trailing_whitespace(marker, help) | ||
746 | |||
747 | # Extract the introspection bits we need. | ||
748 | func = handler.im_func | ||
749 | if func.func_defaults: | ||
750 | func_defaults = list(func.func_defaults) | ||
751 | else: | ||
752 | func_defaults = [] | ||
753 | co_argcount = func.func_code.co_argcount | ||
754 | co_varnames = func.func_code.co_varnames | ||
755 | co_flags = func.func_code.co_flags | ||
756 | CO_FLAGS_ARGS = 4 | ||
757 | CO_FLAGS_KWARGS = 8 | ||
758 | |||
759 | # Adjust argcount for possible *args and **kwargs arguments. | ||
760 | argcount = co_argcount | ||
761 | if co_flags & CO_FLAGS_ARGS: argcount += 1 | ||
762 | if co_flags & CO_FLAGS_KWARGS: argcount += 1 | ||
763 | |||
764 | # Determine the usage string. | ||
765 | usage = "%s %s" % (self.name, cmdname) | ||
766 | if argcount <= 2: # handler ::= do_FOO(self, argv) | ||
767 | usage += " [ARGS...]" | ||
768 | elif argcount >= 3: # handler ::= do_FOO(self, subcmd, opts, ...) | ||
769 | argnames = list(co_varnames[3:argcount]) | ||
770 | tail = "" | ||
771 | if co_flags & CO_FLAGS_KWARGS: | ||
772 | name = argnames.pop(-1) | ||
773 | import warnings | ||
774 | # There is no generally accepted mechanism for passing | ||
775 | # keyword arguments from the command line. Could | ||
776 | # *perhaps* consider: arg=value arg2=value2 ... | ||
777 | warnings.warn("argument '**%s' on '%s.%s' command " | ||
778 | "handler will never get values" | ||
779 | % (name, self.__class__.__name__, | ||
780 | func.func_name)) | ||
781 | if co_flags & CO_FLAGS_ARGS: | ||
782 | name = argnames.pop(-1) | ||
783 | tail = "[%s...]" % name.upper() | ||
784 | while func_defaults: | ||
785 | func_defaults.pop(-1) | ||
786 | name = argnames.pop(-1) | ||
787 | tail = "[%s%s%s]" % (name.upper(), (tail and ' ' or ''), tail) | ||
788 | while argnames: | ||
789 | name = argnames.pop(-1) | ||
790 | tail = "%s %s" % (name.upper(), tail) | ||
791 | usage += ' ' + tail | ||
792 | |||
793 | block_lines = [ | ||
794 | self.helpindent + "Usage:", | ||
795 | self.helpindent + ' '*4 + usage | ||
796 | ] | ||
797 | block = '\n'.join(block_lines) + '\n\n' | ||
798 | |||
799 | help = help.replace(indent+marker+suffix, block, 1) | ||
800 | return help | ||
801 | |||
802 | #TODO: this only makes sense as part of the Cmdln class. | ||
803 | # Add hooks to add help preprocessing template vars and put | ||
804 | # this one on that class. | ||
805 | def _help_preprocess_cmd_option_list(self, help, cmdname=None): | ||
806 | marker = "${cmd_option_list}" | ||
807 | handler = self._get_cmd_handler(cmdname) | ||
808 | if not handler: | ||
809 | raise CmdlnError("cannot preprocess '%s' into help string: " | ||
810 | "could not find command handler for %r" | ||
811 | % (marker, cmdname)) | ||
812 | indent, indent_width = _get_indent(marker, help) | ||
813 | suffix = _get_trailing_whitespace(marker, help) | ||
814 | if hasattr(handler, "optparser"): | ||
815 | # Setup formatting options and format. | ||
816 | # - Indentation of 4 is better than optparse default of 2. | ||
817 | # C.f. Damian Conway's discussion of this in Perl Best | ||
818 | # Practices. | ||
819 | handler.optparser.formatter.indent_increment = 4 | ||
820 | handler.optparser.formatter.current_indent = indent_width | ||
821 | block = handler.optparser.format_option_help() + '\n' | ||
822 | else: | ||
823 | block = "" | ||
824 | |||
825 | help = help.replace(indent+marker+suffix, block, 1) | ||
826 | return help | ||
827 | |||
828 | def _get_canonical_cmd_name(self, token): | ||
829 | map = self._get_canonical_map() | ||
830 | return map.get(token, None) | ||
831 | |||
832 | def _get_canonical_map(self): | ||
833 | """Return a mapping of available command names and aliases to | ||
834 | their canonical command name. | ||
835 | """ | ||
836 | cacheattr = "_token2canonical" | ||
837 | if not hasattr(self, cacheattr): | ||
838 | # Get the list of commands and their aliases, if any. | ||
839 | token2canonical = {} | ||
840 | cmd2funcname = {} # use a dict to strip duplicates | ||
841 | for attr in self.get_names(): | ||
842 | if attr.startswith("do_"): cmdname = attr[3:] | ||
843 | elif attr.startswith("_do_"): cmdname = attr[4:] | ||
844 | else: | ||
845 | continue | ||
846 | cmd2funcname[cmdname] = attr | ||
847 | token2canonical[cmdname] = cmdname | ||
848 | for cmdname, funcname in cmd2funcname.items(): # add aliases | ||
849 | func = getattr(self, funcname) | ||
850 | aliases = getattr(func, "aliases", []) | ||
851 | for alias in aliases: | ||
852 | if alias in cmd2funcname: | ||
853 | import warnings | ||
854 | warnings.warn("'%s' alias for '%s' command conflicts " | ||
855 | "with '%s' handler" | ||
856 | % (alias, cmdname, cmd2funcname[alias])) | ||
857 | continue | ||
858 | token2canonical[alias] = cmdname | ||
859 | setattr(self, cacheattr, token2canonical) | ||
860 | return getattr(self, cacheattr) | ||
861 | |||
862 | def _get_cmd_handler(self, cmdname): | ||
863 | handler = None | ||
864 | try: | ||
865 | handler = getattr(self, 'do_' + cmdname) | ||
866 | except AttributeError: | ||
867 | try: | ||
868 | # Private command handlers begin with "_do_". | ||
869 | handler = getattr(self, '_do_' + cmdname) | ||
870 | except AttributeError: | ||
871 | pass | ||
872 | return handler | ||
873 | |||
874 | def _do_EOF(self, argv): | ||
875 | # Default EOF handler | ||
876 | # Note: an actual EOF is redirected to this command. | ||
877 | #TODO: separate name for this. Currently it is available from | ||
878 | # command-line. Is that okay? | ||
879 | self.stdout.write('\n') | ||
880 | self.stdout.flush() | ||
881 | self.stop = True | ||
882 | |||
883 | def emptyline(self): | ||
884 | # Different from cmd.Cmd: don't repeat the last command for an | ||
885 | # emptyline. | ||
886 | if self.cmdlooping: | ||
887 | pass | ||
888 | else: | ||
889 | return self.do_help(["help"]) | ||
890 | |||
891 | |||
892 | #---- optparse.py extension to fix (IMO) some deficiencies | ||
893 | # | ||
894 | # See the class _OptionParserEx docstring for details. | ||
895 | # | ||
896 | |||
897 | class StopOptionProcessing(Exception): | ||
898 | """Indicate that option *and argument* processing should stop | ||
899 | cleanly. This is not an error condition. It is similar in spirit to | ||
900 | StopIteration. This is raised by _OptionParserEx's default "help" | ||
901 | and "version" option actions and can be raised by custom option | ||
902 | callbacks too. | ||
903 | |||
904 | Hence the typical CmdlnOptionParser (a subclass of _OptionParserEx) | ||
905 | usage is: | ||
906 | |||
907 | parser = CmdlnOptionParser(mycmd) | ||
908 | parser.add_option("-f", "--force", dest="force") | ||
909 | ... | ||
910 | try: | ||
911 | opts, args = parser.parse_args() | ||
912 | except StopOptionProcessing: | ||
913 | # normal termination, "--help" was probably given | ||
914 | sys.exit(0) | ||
915 | """ | ||
916 | |||
917 | class _OptionParserEx(optparse.OptionParser): | ||
918 | """An optparse.OptionParser that uses exceptions instead of sys.exit. | ||
919 | |||
920 | This class is an extension of optparse.OptionParser that differs | ||
921 | as follows: | ||
922 | - Correct (IMO) the default OptionParser error handling to never | ||
923 | sys.exit(). Instead OptParseError exceptions are passed through. | ||
924 | - Add the StopOptionProcessing exception (a la StopIteration) to | ||
925 | indicate normal termination of option processing. | ||
926 | See StopOptionProcessing's docstring for details. | ||
927 | |||
928 | I'd also like to see the following in the core optparse.py, perhaps | ||
929 | as a RawOptionParser which would serve as a base class for the more | ||
930 | generally used OptionParser (that works as current): | ||
931 | - Remove the implicit addition of the -h|--help and --version | ||
932 | options. They can get in the way (e.g. if want '-?' and '-V' for | ||
933 | these as well) and it is not hard to do: | ||
934 | optparser.add_option("-h", "--help", action="help") | ||
935 | optparser.add_option("--version", action="version") | ||
936 | These are good practices, just not valid defaults if they can | ||
937 | get in the way. | ||
938 | """ | ||
939 | def error(self, msg): | ||
940 | raise optparse.OptParseError(msg) | ||
941 | |||
942 | def exit(self, status=0, msg=None): | ||
943 | if status == 0: | ||
944 | raise StopOptionProcessing(msg) | ||
945 | else: | ||
946 | #TODO: don't lose status info here | ||
947 | raise optparse.OptParseError(msg) | ||
948 | |||
949 | |||
950 | |||
951 | #---- optparse.py-based option processing support | ||
952 | |||
953 | class CmdlnOptionParser(_OptionParserEx): | ||
954 | """An optparse.OptionParser class more appropriate for top-level | ||
955 | Cmdln options. For parsing of sub-command options, see | ||
956 | SubCmdOptionParser. | ||
957 | |||
958 | Changes: | ||
959 | - disable_interspersed_args() by default, because a Cmdln instance | ||
960 | has sub-commands which may themselves have options. | ||
961 | - Redirect print_help() to the Cmdln.do_help() which is better | ||
962 | equiped to handle the "help" action. | ||
963 | - error() will raise a CmdlnUserError: OptionParse.error() is meant | ||
964 | to be called for user errors. Raising a well-known error here can | ||
965 | make error handling clearer. | ||
966 | - Also see the changes in _OptionParserEx. | ||
967 | """ | ||
968 | def __init__(self, cmdln, **kwargs): | ||
969 | self.cmdln = cmdln | ||
970 | kwargs["prog"] = self.cmdln.name | ||
971 | _OptionParserEx.__init__(self, **kwargs) | ||
972 | self.disable_interspersed_args() | ||
973 | |||
974 | def print_help(self, file=None): | ||
975 | self.cmdln.onecmd(["help"]) | ||
976 | |||
977 | def error(self, msg): | ||
978 | raise CmdlnUserError(msg) | ||
979 | |||
980 | |||
981 | class SubCmdOptionParser(_OptionParserEx): | ||
982 | def set_cmdln_info(self, cmdln, subcmd): | ||
983 | """Called by Cmdln to pass relevant info about itself needed | ||
984 | for print_help(). | ||
985 | """ | ||
986 | self.cmdln = cmdln | ||
987 | self.subcmd = subcmd | ||
988 | |||
989 | def print_help(self, file=None): | ||
990 | self.cmdln.onecmd(["help", self.subcmd]) | ||
991 | |||
992 | def error(self, msg): | ||
993 | raise CmdlnUserError(msg) | ||
994 | |||
995 | |||
996 | def option(*args, **kwargs): | ||
997 | """Decorator to add an option to the optparser argument of a Cmdln | ||
998 | subcommand. | ||
999 | |||
1000 | Example: | ||
1001 | class MyShell(cmdln.Cmdln): | ||
1002 | @cmdln.option("-f", "--force", help="force removal") | ||
1003 | def do_remove(self, subcmd, opts, *args): | ||
1004 | #... | ||
1005 | """ | ||
1006 | #XXX Is there a possible optimization for many options to not have a | ||
1007 | # large stack depth here? | ||
1008 | def decorate(f): | ||
1009 | if not hasattr(f, "optparser"): | ||
1010 | f.optparser = SubCmdOptionParser() | ||
1011 | f.optparser.add_option(*args, **kwargs) | ||
1012 | return f | ||
1013 | return decorate | ||
1014 | |||
1015 | |||
1016 | class Cmdln(RawCmdln): | ||
1017 | """An improved (on cmd.Cmd) framework for building multi-subcommand | ||
1018 | scripts (think "svn" & "cvs") and simple shells (think "pdb" and | ||
1019 | "gdb"). | ||
1020 | |||
1021 | A simple example: | ||
1022 | |||
1023 | import cmdln | ||
1024 | |||
1025 | class MySVN(cmdln.Cmdln): | ||
1026 | name = "svn" | ||
1027 | |||
1028 | @cmdln.aliases('stat', 'st') | ||
1029 | @cmdln.option('-v', '--verbose', action='store_true' | ||
1030 | help='print verbose information') | ||
1031 | def do_status(self, subcmd, opts, *paths): | ||
1032 | print "handle 'svn status' command" | ||
1033 | |||
1034 | #... | ||
1035 | |||
1036 | if __name__ == "__main__": | ||
1037 | shell = MySVN() | ||
1038 | retval = shell.main() | ||
1039 | sys.exit(retval) | ||
1040 | |||
1041 | 'Cmdln' extends 'RawCmdln' by providing optparse option processing | ||
1042 | integration. See this class' _dispatch_cmd() docstring and | ||
1043 | <http://trentm.com/projects/cmdln> for more information. | ||
1044 | """ | ||
1045 | def _dispatch_cmd(self, handler, argv): | ||
1046 | """Introspect sub-command handler signature to determine how to | ||
1047 | dispatch the command. The raw handler provided by the base | ||
1048 | 'RawCmdln' class is still supported: | ||
1049 | |||
1050 | def do_foo(self, argv): | ||
1051 | # 'argv' is the vector of command line args, argv[0] is | ||
1052 | # the command name itself (i.e. "foo" or an alias) | ||
1053 | pass | ||
1054 | |||
1055 | In addition, if the handler has more than 2 arguments option | ||
1056 | processing is automatically done (using optparse): | ||
1057 | |||
1058 | @cmdln.option('-v', '--verbose', action='store_true') | ||
1059 | def do_bar(self, subcmd, opts, *args): | ||
1060 | # subcmd = <"bar" or an alias> | ||
1061 | # opts = <an optparse.Values instance> | ||
1062 | if opts.verbose: | ||
1063 | print "lots of debugging output..." | ||
1064 | # args = <tuple of arguments> | ||
1065 | for arg in args: | ||
1066 | bar(arg) | ||
1067 | |||
1068 | TODO: explain that "*args" can be other signatures as well. | ||
1069 | |||
1070 | The `cmdln.option` decorator corresponds to an `add_option()` | ||
1071 | method call on an `optparse.OptionParser` instance. | ||
1072 | |||
1073 | You can declare a specific number of arguments: | ||
1074 | |||
1075 | @cmdln.option('-v', '--verbose', action='store_true') | ||
1076 | def do_bar2(self, subcmd, opts, bar_one, bar_two): | ||
1077 | #... | ||
1078 | |||
1079 | and an appropriate error message will be raised/printed if the | ||
1080 | command is called with a different number of args. | ||
1081 | """ | ||
1082 | co_argcount = handler.im_func.func_code.co_argcount | ||
1083 | if co_argcount == 2: # handler ::= do_foo(self, argv) | ||
1084 | return handler(argv) | ||
1085 | elif co_argcount >= 3: # handler ::= do_foo(self, subcmd, opts, ...) | ||
1086 | try: | ||
1087 | optparser = handler.optparser | ||
1088 | except AttributeError: | ||
1089 | optparser = handler.im_func.optparser = SubCmdOptionParser() | ||
1090 | assert isinstance(optparser, SubCmdOptionParser) | ||
1091 | optparser.set_cmdln_info(self, argv[0]) | ||
1092 | try: | ||
1093 | opts, args = optparser.parse_args(argv[1:]) | ||
1094 | except StopOptionProcessing: | ||
1095 | #TODO: this doesn't really fly for a replacement of | ||
1096 | # optparse.py behaviour, does it? | ||
1097 | return 0 # Normal command termination | ||
1098 | |||
1099 | try: | ||
1100 | return handler(argv[0], opts, *args) | ||
1101 | except TypeError, ex: | ||
1102 | # Some TypeError's are user errors: | ||
1103 | # do_foo() takes at least 4 arguments (3 given) | ||
1104 | # do_foo() takes at most 5 arguments (6 given) | ||
1105 | # do_foo() takes exactly 5 arguments (6 given) | ||
1106 | # Raise CmdlnUserError for these with a suitably | ||
1107 | # massaged error message. | ||
1108 | import sys | ||
1109 | tb = sys.exc_info()[2] # the traceback object | ||
1110 | if tb.tb_next is not None: | ||
1111 | # If the traceback is more than one level deep, then the | ||
1112 | # TypeError do *not* happen on the "handler(...)" call | ||
1113 | # above. In that we don't want to handle it specially | ||
1114 | # here: it would falsely mask deeper code errors. | ||
1115 | raise | ||
1116 | msg = ex.args[0] | ||
1117 | match = _INCORRECT_NUM_ARGS_RE.search(msg) | ||
1118 | if match: | ||
1119 | msg = list(match.groups()) | ||
1120 | msg[1] = int(msg[1]) - 3 | ||
1121 | if msg[1] == 1: | ||
1122 | msg[2] = msg[2].replace("arguments", "argument") | ||
1123 | msg[3] = int(msg[3]) - 3 | ||
1124 | msg = ''.join(map(str, msg)) | ||
1125 | raise CmdlnUserError(msg) | ||
1126 | else: | ||
1127 | raise | ||
1128 | else: | ||
1129 | raise CmdlnError("incorrect argcount for %s(): takes %d, must " | ||
1130 | "take 2 for 'argv' signature or 3+ for 'opts' " | ||
1131 | "signature" % (handler.__name__, co_argcount)) | ||
1132 | |||
1133 | |||
1134 | |||
1135 | #---- internal support functions | ||
1136 | |||
1137 | def _format_linedata(linedata, indent, indent_width): | ||
1138 | """Format specific linedata into a pleasant layout. | ||
1139 | |||
1140 | "linedata" is a list of 2-tuples of the form: | ||
1141 | (<item-display-string>, <item-docstring>) | ||
1142 | "indent" is a string to use for one level of indentation | ||
1143 | "indent_width" is a number of columns by which the | ||
1144 | formatted data will be indented when printed. | ||
1145 | |||
1146 | The <item-display-string> column is held to 15 columns. | ||
1147 | """ | ||
1148 | lines = [] | ||
1149 | WIDTH = 78 - indent_width | ||
1150 | SPACING = 2 | ||
1151 | NAME_WIDTH_LOWER_BOUND = 13 | ||
1152 | NAME_WIDTH_UPPER_BOUND = 16 | ||
1153 | NAME_WIDTH = max([len(s) for s,d in linedata]) | ||
1154 | if NAME_WIDTH < NAME_WIDTH_LOWER_BOUND: | ||
1155 | NAME_WIDTH = NAME_WIDTH_LOWER_BOUND | ||
1156 | else: | ||
1157 | NAME_WIDTH = NAME_WIDTH_UPPER_BOUND | ||
1158 | |||
1159 | DOC_WIDTH = WIDTH - NAME_WIDTH - SPACING | ||
1160 | for namestr, doc in linedata: | ||
1161 | line = indent + namestr | ||
1162 | if len(namestr) <= NAME_WIDTH: | ||
1163 | line += ' ' * (NAME_WIDTH + SPACING - len(namestr)) | ||
1164 | else: | ||
1165 | lines.append(line) | ||
1166 | line = indent + ' ' * (NAME_WIDTH + SPACING) | ||
1167 | line += _summarize_doc(doc, DOC_WIDTH) | ||
1168 | lines.append(line.rstrip()) | ||
1169 | return lines | ||
1170 | |||
1171 | def _summarize_doc(doc, length=60): | ||
1172 | r"""Parse out a short one line summary from the given doclines. | ||
1173 | |||
1174 | "doc" is the doc string to summarize. | ||
1175 | "length" is the max length for the summary | ||
1176 | |||
1177 | >>> _summarize_doc("this function does this") | ||
1178 | 'this function does this' | ||
1179 | >>> _summarize_doc("this function does this", 10) | ||
1180 | 'this fu...' | ||
1181 | >>> _summarize_doc("this function does this\nand that") | ||
1182 | 'this function does this and that' | ||
1183 | >>> _summarize_doc("this function does this\n\nand that") | ||
1184 | 'this function does this' | ||
1185 | """ | ||
1186 | import re | ||
1187 | if doc is None: | ||
1188 | return "" | ||
1189 | assert length > 3, "length <= 3 is absurdly short for a doc summary" | ||
1190 | doclines = doc.strip().splitlines(0) | ||
1191 | if not doclines: | ||
1192 | return "" | ||
1193 | |||
1194 | summlines = [] | ||
1195 | for i, line in enumerate(doclines): | ||
1196 | stripped = line.strip() | ||
1197 | if not stripped: | ||
1198 | break | ||
1199 | summlines.append(stripped) | ||
1200 | if len(''.join(summlines)) >= length: | ||
1201 | break | ||
1202 | |||
1203 | summary = ' '.join(summlines) | ||
1204 | if len(summary) > length: | ||
1205 | summary = summary[:length-3] + "..." | ||
1206 | return summary | ||
1207 | |||
1208 | |||
1209 | def line2argv(line): | ||
1210 | r"""Parse the given line into an argument vector. | ||
1211 | |||
1212 | "line" is the line of input to parse. | ||
1213 | |||
1214 | This may get niggly when dealing with quoting and escaping. The | ||
1215 | current state of this parsing may not be completely thorough/correct | ||
1216 | in this respect. | ||
1217 | |||
1218 | >>> from cmdln import line2argv | ||
1219 | >>> line2argv("foo") | ||
1220 | ['foo'] | ||
1221 | >>> line2argv("foo bar") | ||
1222 | ['foo', 'bar'] | ||
1223 | >>> line2argv("foo bar ") | ||
1224 | ['foo', 'bar'] | ||
1225 | >>> line2argv(" foo bar") | ||
1226 | ['foo', 'bar'] | ||
1227 | |||
1228 | Quote handling: | ||
1229 | |||
1230 | >>> line2argv("'foo bar'") | ||
1231 | ['foo bar'] | ||
1232 | >>> line2argv('"foo bar"') | ||
1233 | ['foo bar'] | ||
1234 | >>> line2argv(r'"foo\"bar"') | ||
1235 | ['foo"bar'] | ||
1236 | >>> line2argv("'foo bar' spam") | ||
1237 | ['foo bar', 'spam'] | ||
1238 | >>> line2argv("'foo 'bar spam") | ||
1239 | ['foo bar', 'spam'] | ||
1240 | |||
1241 | >>> line2argv('some\tsimple\ttests') | ||
1242 | ['some', 'simple', 'tests'] | ||
1243 | >>> line2argv('a "more complex" test') | ||
1244 | ['a', 'more complex', 'test'] | ||
1245 | >>> line2argv('a more="complex test of " quotes') | ||
1246 | ['a', 'more=complex test of ', 'quotes'] | ||
1247 | >>> line2argv('a more" complex test of " quotes') | ||
1248 | ['a', 'more complex test of ', 'quotes'] | ||
1249 | >>> line2argv('an "embedded \\"quote\\""') | ||
1250 | ['an', 'embedded "quote"'] | ||
1251 | |||
1252 | # Komodo bug 48027 | ||
1253 | >>> line2argv('foo bar C:\\') | ||
1254 | ['foo', 'bar', 'C:\\'] | ||
1255 | |||
1256 | # Komodo change 127581 | ||
1257 | >>> line2argv(r'"\test\slash" "foo bar" "foo\"bar"') | ||
1258 | ['\\test\\slash', 'foo bar', 'foo"bar'] | ||
1259 | |||
1260 | # Komodo change 127629 | ||
1261 | >>> if sys.platform == "win32": | ||
1262 | ... line2argv(r'\foo\bar') == ['\\foo\\bar'] | ||
1263 | ... line2argv(r'\\foo\\bar') == ['\\\\foo\\\\bar'] | ||
1264 | ... line2argv('"foo') == ['foo'] | ||
1265 | ... else: | ||
1266 | ... line2argv(r'\foo\bar') == ['foobar'] | ||
1267 | ... line2argv(r'\\foo\\bar') == ['\\foo\\bar'] | ||
1268 | ... try: | ||
1269 | ... line2argv('"foo') | ||
1270 | ... except ValueError, ex: | ||
1271 | ... "not terminated" in str(ex) | ||
1272 | True | ||
1273 | True | ||
1274 | True | ||
1275 | """ | ||
1276 | import string | ||
1277 | line = line.strip() | ||
1278 | argv = [] | ||
1279 | state = "default" | ||
1280 | arg = None # the current argument being parsed | ||
1281 | i = -1 | ||
1282 | while 1: | ||
1283 | i += 1 | ||
1284 | if i >= len(line): break | ||
1285 | ch = line[i] | ||
1286 | |||
1287 | if ch == "\\" and i+1 < len(line): | ||
1288 | # escaped char always added to arg, regardless of state | ||
1289 | if arg is None: arg = "" | ||
1290 | if (sys.platform == "win32" | ||
1291 | or state in ("double-quoted", "single-quoted") | ||
1292 | ) and line[i+1] not in tuple('"\''): | ||
1293 | arg += ch | ||
1294 | i += 1 | ||
1295 | arg += line[i] | ||
1296 | continue | ||
1297 | |||
1298 | if state == "single-quoted": | ||
1299 | if ch == "'": | ||
1300 | state = "default" | ||
1301 | else: | ||
1302 | arg += ch | ||
1303 | elif state == "double-quoted": | ||
1304 | if ch == '"': | ||
1305 | state = "default" | ||
1306 | else: | ||
1307 | arg += ch | ||
1308 | elif state == "default": | ||
1309 | if ch == '"': | ||
1310 | if arg is None: arg = "" | ||
1311 | state = "double-quoted" | ||
1312 | elif ch == "'": | ||
1313 | if arg is None: arg = "" | ||
1314 | state = "single-quoted" | ||
1315 | elif ch in string.whitespace: | ||
1316 | if arg is not None: | ||
1317 | argv.append(arg) | ||
1318 | arg = None | ||
1319 | else: | ||
1320 | if arg is None: arg = "" | ||
1321 | arg += ch | ||
1322 | if arg is not None: | ||
1323 | argv.append(arg) | ||
1324 | if not sys.platform == "win32" and state != "default": | ||
1325 | raise ValueError("command line is not terminated: unfinished %s " | ||
1326 | "segment" % state) | ||
1327 | return argv | ||
1328 | |||
1329 | |||
1330 | def argv2line(argv): | ||
1331 | r"""Put together the given argument vector into a command line. | ||
1332 | |||
1333 | "argv" is the argument vector to process. | ||
1334 | |||
1335 | >>> from cmdln import argv2line | ||
1336 | >>> argv2line(['foo']) | ||
1337 | 'foo' | ||
1338 | >>> argv2line(['foo', 'bar']) | ||
1339 | 'foo bar' | ||
1340 | >>> argv2line(['foo', 'bar baz']) | ||
1341 | 'foo "bar baz"' | ||
1342 | >>> argv2line(['foo"bar']) | ||
1343 | 'foo"bar' | ||
1344 | >>> print argv2line(['foo" bar']) | ||
1345 | 'foo" bar' | ||
1346 | >>> print argv2line(["foo' bar"]) | ||
1347 | "foo' bar" | ||
1348 | >>> argv2line(["foo'bar"]) | ||
1349 | "foo'bar" | ||
1350 | """ | ||
1351 | escapedArgs = [] | ||
1352 | for arg in argv: | ||
1353 | if ' ' in arg and '"' not in arg: | ||
1354 | arg = '"'+arg+'"' | ||
1355 | elif ' ' in arg and "'" not in arg: | ||
1356 | arg = "'"+arg+"'" | ||
1357 | elif ' ' in arg: | ||
1358 | arg = arg.replace('"', r'\"') | ||
1359 | arg = '"'+arg+'"' | ||
1360 | escapedArgs.append(arg) | ||
1361 | return ' '.join(escapedArgs) | ||
1362 | |||
1363 | |||
1364 | # Recipe: dedent (0.1) in /Users/trentm/tm/recipes/cookbook | ||
1365 | def _dedentlines(lines, tabsize=8, skip_first_line=False): | ||
1366 | """_dedentlines(lines, tabsize=8, skip_first_line=False) -> dedented lines | ||
1367 | |||
1368 | "lines" is a list of lines to dedent. | ||
1369 | "tabsize" is the tab width to use for indent width calculations. | ||
1370 | "skip_first_line" is a boolean indicating if the first line should | ||
1371 | be skipped for calculating the indent width and for dedenting. | ||
1372 | This is sometimes useful for docstrings and similar. | ||
1373 | |||
1374 | Same as dedent() except operates on a sequence of lines. Note: the | ||
1375 | lines list is modified **in-place**. | ||
1376 | """ | ||
1377 | DEBUG = False | ||
1378 | if DEBUG: | ||
1379 | print "dedent: dedent(..., tabsize=%d, skip_first_line=%r)"\ | ||
1380 | % (tabsize, skip_first_line) | ||
1381 | indents = [] | ||
1382 | margin = None | ||
1383 | for i, line in enumerate(lines): | ||
1384 | if i == 0 and skip_first_line: continue | ||
1385 | indent = 0 | ||
1386 | for ch in line: | ||
1387 | if ch == ' ': | ||
1388 | indent += 1 | ||
1389 | elif ch == '\t': | ||
1390 | indent += tabsize - (indent % tabsize) | ||
1391 | elif ch in '\r\n': | ||
1392 | continue # skip all-whitespace lines | ||
1393 | else: | ||
1394 | break | ||
1395 | else: | ||
1396 | continue # skip all-whitespace lines | ||
1397 | if DEBUG: print "dedent: indent=%d: %r" % (indent, line) | ||
1398 | if margin is None: | ||
1399 | margin = indent | ||
1400 | else: | ||
1401 | margin = min(margin, indent) | ||
1402 | if DEBUG: print "dedent: margin=%r" % margin | ||
1403 | |||
1404 | if margin is not None and margin > 0: | ||
1405 | for i, line in enumerate(lines): | ||
1406 | if i == 0 and skip_first_line: continue | ||
1407 | removed = 0 | ||
1408 | for j, ch in enumerate(line): | ||
1409 | if ch == ' ': | ||
1410 | removed += 1 | ||
1411 | elif ch == '\t': | ||
1412 | removed += tabsize - (removed % tabsize) | ||
1413 | elif ch in '\r\n': | ||
1414 | if DEBUG: print "dedent: %r: EOL -> strip up to EOL" % line | ||
1415 | lines[i] = lines[i][j:] | ||
1416 | break | ||
1417 | else: | ||
1418 | raise ValueError("unexpected non-whitespace char %r in " | ||
1419 | "line %r while removing %d-space margin" | ||
1420 | % (ch, line, margin)) | ||
1421 | if DEBUG: | ||
1422 | print "dedent: %r: %r -> removed %d/%d"\ | ||
1423 | % (line, ch, removed, margin) | ||
1424 | if removed == margin: | ||
1425 | lines[i] = lines[i][j+1:] | ||
1426 | break | ||
1427 | elif removed > margin: | ||
1428 | lines[i] = ' '*(removed-margin) + lines[i][j+1:] | ||
1429 | break | ||
1430 | return lines | ||
1431 | |||
1432 | def _dedent(text, tabsize=8, skip_first_line=False): | ||
1433 | """_dedent(text, tabsize=8, skip_first_line=False) -> dedented text | ||
1434 | |||
1435 | "text" is the text to dedent. | ||
1436 | "tabsize" is the tab width to use for indent width calculations. | ||
1437 | "skip_first_line" is a boolean indicating if the first line should | ||
1438 | be skipped for calculating the indent width and for dedenting. | ||
1439 | This is sometimes useful for docstrings and similar. | ||
1440 | |||
1441 | textwrap.dedent(s), but don't expand tabs to spaces | ||
1442 | """ | ||
1443 | lines = text.splitlines(1) | ||
1444 | _dedentlines(lines, tabsize=tabsize, skip_first_line=skip_first_line) | ||
1445 | return ''.join(lines) | ||
1446 | |||
1447 | |||
1448 | def _get_indent(marker, s, tab_width=8): | ||
1449 | """_get_indent(marker, s, tab_width=8) -> | ||
1450 | (<indentation-of-'marker'>, <indentation-width>)""" | ||
1451 | # Figure out how much the marker is indented. | ||
1452 | INDENT_CHARS = tuple(' \t') | ||
1453 | start = s.index(marker) | ||
1454 | i = start | ||
1455 | while i > 0: | ||
1456 | if s[i-1] not in INDENT_CHARS: | ||
1457 | break | ||
1458 | i -= 1 | ||
1459 | indent = s[i:start] | ||
1460 | indent_width = 0 | ||
1461 | for ch in indent: | ||
1462 | if ch == ' ': | ||
1463 | indent_width += 1 | ||
1464 | elif ch == '\t': | ||
1465 | indent_width += tab_width - (indent_width % tab_width) | ||
1466 | return indent, indent_width | ||
1467 | |||
1468 | def _get_trailing_whitespace(marker, s): | ||
1469 | """Return the whitespace content trailing the given 'marker' in string 's', | ||
1470 | up to and including a newline. | ||
1471 | """ | ||
1472 | suffix = '' | ||
1473 | start = s.index(marker) + len(marker) | ||
1474 | i = start | ||
1475 | while i < len(s): | ||
1476 | if s[i] in ' \t': | ||
1477 | suffix += s[i] | ||
1478 | elif s[i] in '\r\n': | ||
1479 | suffix += s[i] | ||
1480 | if s[i] == '\r' and i+1 < len(s) and s[i+1] == '\n': | ||
1481 | suffix += s[i+1] | ||
1482 | break | ||
1483 | else: | ||
1484 | break | ||
1485 | i += 1 | ||
1486 | return suffix | ||
1487 | |||
1488 | |||
1489 | |||
1490 | #---- bash completion support | ||
1491 | # Note: This is still experimental. I expect to change this | ||
1492 | # significantly. | ||
1493 | # | ||
1494 | # To get Bash completion for a cmdln.Cmdln class, run the following | ||
1495 | # bash command: | ||
1496 | # $ complete -C 'python -m cmdln /path/to/script.py CmdlnClass' cmdname | ||
1497 | # For example: | ||
1498 | # $ complete -C 'python -m cmdln ~/bin/svn.py SVN' svn | ||
1499 | # | ||
1500 | #TODO: Simplify the above so don't have to given path to script (try to | ||
1501 | # find it on PATH, if possible). Could also make class name | ||
1502 | # optional if there is only one in the module (common case). | ||
1503 | |||
1504 | if __name__ == "__main__" and len(sys.argv) == 6: | ||
1505 | def _log(s): | ||
1506 | return # no-op, comment out for debugging | ||
1507 | from os.path import expanduser | ||
1508 | fout = open(expanduser("~/tmp/bashcpln.log"), 'a') | ||
1509 | fout.write(str(s) + '\n') | ||
1510 | fout.close() | ||
1511 | |||
1512 | # Recipe: module_from_path (1.0.1+) | ||
1513 | def _module_from_path(path): | ||
1514 | import imp, os, sys | ||
1515 | path = os.path.expanduser(path) | ||
1516 | dir = os.path.dirname(path) or os.curdir | ||
1517 | name = os.path.splitext(os.path.basename(path))[0] | ||
1518 | sys.path.insert(0, dir) | ||
1519 | try: | ||
1520 | iinfo = imp.find_module(name, [dir]) | ||
1521 | return imp.load_module(name, *iinfo) | ||
1522 | finally: | ||
1523 | sys.path.remove(dir) | ||
1524 | |||
1525 | def _get_bash_cplns(script_path, class_name, cmd_name, | ||
1526 | token, preceding_token): | ||
1527 | _log('--') | ||
1528 | _log('get_cplns(%r, %r, %r, %r, %r)' | ||
1529 | % (script_path, class_name, cmd_name, token, preceding_token)) | ||
1530 | comp_line = os.environ["COMP_LINE"] | ||
1531 | comp_point = int(os.environ["COMP_POINT"]) | ||
1532 | _log("COMP_LINE: %r" % comp_line) | ||
1533 | _log("COMP_POINT: %r" % comp_point) | ||
1534 | |||
1535 | try: | ||
1536 | script = _module_from_path(script_path) | ||
1537 | except ImportError, ex: | ||
1538 | _log("error importing `%s': %s" % (script_path, ex)) | ||
1539 | return [] | ||
1540 | shell = getattr(script, class_name)() | ||
1541 | cmd_map = shell._get_canonical_map() | ||
1542 | del cmd_map["EOF"] | ||
1543 | |||
1544 | # Determine if completing the sub-command name. | ||
1545 | parts = comp_line[:comp_point].split(None, 1) | ||
1546 | _log(parts) | ||
1547 | if len(parts) == 1 or not (' ' in parts[1] or '\t' in parts[1]): | ||
1548 | #TODO: if parts[1].startswith('-'): handle top-level opts | ||
1549 | _log("complete sub-command names") | ||
1550 | matches = {} | ||
1551 | for name, canon_name in cmd_map.items(): | ||
1552 | if name.startswith(token): | ||
1553 | matches[name] = canon_name | ||
1554 | if not matches: | ||
1555 | return [] | ||
1556 | elif len(matches) == 1: | ||
1557 | return matches.keys() | ||
1558 | elif len(set(matches.values())) == 1: | ||
1559 | return [matches.values()[0]] | ||
1560 | else: | ||
1561 | return matches.keys() | ||
1562 | |||
1563 | # Otherwise, complete options for the given sub-command. | ||
1564 | #TODO: refine this so it does the right thing with option args | ||
1565 | if token.startswith('-'): | ||
1566 | cmd_name = comp_line.split(None, 2)[1] | ||
1567 | try: | ||
1568 | cmd_canon_name = cmd_map[cmd_name] | ||
1569 | except KeyError: | ||
1570 | return [] | ||
1571 | handler = shell._get_cmd_handler(cmd_canon_name) | ||
1572 | optparser = getattr(handler, "optparser", None) | ||
1573 | if optparser is None: | ||
1574 | optparser = SubCmdOptionParser() | ||
1575 | opt_strs = [] | ||
1576 | for option in optparser.option_list: | ||
1577 | for opt_str in option._short_opts + option._long_opts: | ||
1578 | if opt_str.startswith(token): | ||
1579 | opt_strs.append(opt_str) | ||
1580 | return opt_strs | ||
1581 | |||
1582 | return [] | ||
1583 | |||
1584 | for cpln in _get_bash_cplns(*sys.argv[1:]): | ||
1585 | print cpln | ||
1586 | |||
diff --git a/scripts/lib/mic/utils/errors.py b/scripts/lib/mic/utils/errors.py new file mode 100644 index 0000000000..8d720f9080 --- /dev/null +++ b/scripts/lib/mic/utils/errors.py | |||
@@ -0,0 +1,71 @@ | |||
1 | #!/usr/bin/python -tt | ||
2 | # | ||
3 | # Copyright (c) 2007 Red Hat, Inc. | ||
4 | # Copyright (c) 2011 Intel, Inc. | ||
5 | # | ||
6 | # This program is free software; you can redistribute it and/or modify it | ||
7 | # under the terms of the GNU General Public License as published by the Free | ||
8 | # Software Foundation; version 2 of the License | ||
9 | # | ||
10 | # This program is distributed in the hope that it will be useful, but | ||
11 | # WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY | ||
12 | # or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | ||
13 | # for more details. | ||
14 | # | ||
15 | # You should have received a copy of the GNU General Public License along | ||
16 | # with this program; if not, write to the Free Software Foundation, Inc., 59 | ||
17 | # Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
18 | |||
19 | class CreatorError(Exception): | ||
20 | """An exception base class for all imgcreate errors.""" | ||
21 | keyword = '<creator>' | ||
22 | |||
23 | def __init__(self, msg): | ||
24 | self.msg = msg | ||
25 | |||
26 | def __str__(self): | ||
27 | if isinstance(self.msg, unicode): | ||
28 | self.msg = self.msg.encode('utf-8', 'ignore') | ||
29 | else: | ||
30 | self.msg = str(self.msg) | ||
31 | return self.keyword + self.msg | ||
32 | |||
33 | class Usage(CreatorError): | ||
34 | keyword = '<usage>' | ||
35 | |||
36 | def __str__(self): | ||
37 | if isinstance(self.msg, unicode): | ||
38 | self.msg = self.msg.encode('utf-8', 'ignore') | ||
39 | else: | ||
40 | self.msg = str(self.msg) | ||
41 | return self.keyword + self.msg + ', please use "--help" for more info' | ||
42 | |||
43 | class Abort(CreatorError): | ||
44 | keyword = '' | ||
45 | |||
46 | class ConfigError(CreatorError): | ||
47 | keyword = '<config>' | ||
48 | |||
49 | class KsError(CreatorError): | ||
50 | keyword = '<kickstart>' | ||
51 | |||
52 | class RepoError(CreatorError): | ||
53 | keyword = '<repo>' | ||
54 | |||
55 | class RpmError(CreatorError): | ||
56 | keyword = '<rpm>' | ||
57 | |||
58 | class MountError(CreatorError): | ||
59 | keyword = '<mount>' | ||
60 | |||
61 | class SnapshotError(CreatorError): | ||
62 | keyword = '<snapshot>' | ||
63 | |||
64 | class SquashfsError(CreatorError): | ||
65 | keyword = '<squashfs>' | ||
66 | |||
67 | class BootstrapError(CreatorError): | ||
68 | keyword = '<bootstrap>' | ||
69 | |||
70 | class RuntimeError(CreatorError): | ||
71 | keyword = '<runtime>' | ||
diff --git a/scripts/lib/mic/utils/fs_related.py b/scripts/lib/mic/utils/fs_related.py new file mode 100644 index 0000000000..dd420e88dc --- /dev/null +++ b/scripts/lib/mic/utils/fs_related.py | |||
@@ -0,0 +1,1060 @@ | |||
1 | #!/usr/bin/python -tt | ||
2 | # | ||
3 | # Copyright (c) 2007, Red Hat, Inc. | ||
4 | # Copyright (c) 2009, 2010, 2011 Intel, Inc. | ||
5 | # | ||
6 | # This program is free software; you can redistribute it and/or modify it | ||
7 | # under the terms of the GNU General Public License as published by the Free | ||
8 | # Software Foundation; version 2 of the License | ||
9 | # | ||
10 | # This program is distributed in the hope that it will be useful, but | ||
11 | # WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY | ||
12 | # or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | ||
13 | # for more details. | ||
14 | # | ||
15 | # You should have received a copy of the GNU General Public License along | ||
16 | # with this program; if not, write to the Free Software Foundation, Inc., 59 | ||
17 | # Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
18 | |||
19 | from __future__ import with_statement | ||
20 | import os | ||
21 | import sys | ||
22 | import errno | ||
23 | import stat | ||
24 | import random | ||
25 | import string | ||
26 | import time | ||
27 | import uuid | ||
28 | |||
29 | from mic import msger | ||
30 | from mic.utils import runner | ||
31 | from mic.utils.errors import * | ||
32 | from mic.utils.oe.misc import * | ||
33 | |||
34 | def find_binary_inchroot(binary, chroot): | ||
35 | paths = ["/usr/sbin", | ||
36 | "/usr/bin", | ||
37 | "/sbin", | ||
38 | "/bin" | ||
39 | ] | ||
40 | |||
41 | for path in paths: | ||
42 | bin_path = "%s/%s" % (path, binary) | ||
43 | if os.path.exists("%s/%s" % (chroot, bin_path)): | ||
44 | return bin_path | ||
45 | return None | ||
46 | |||
47 | def find_binary_path(binary): | ||
48 | if os.environ.has_key("PATH"): | ||
49 | paths = os.environ["PATH"].split(":") | ||
50 | else: | ||
51 | paths = [] | ||
52 | if os.environ.has_key("HOME"): | ||
53 | paths += [os.environ["HOME"] + "/bin"] | ||
54 | paths += ["/usr/local/sbin", "/usr/local/bin", "/usr/sbin", "/usr/bin", "/sbin", "/bin"] | ||
55 | |||
56 | for path in paths: | ||
57 | bin_path = "%s/%s" % (path, binary) | ||
58 | if os.path.exists(bin_path): | ||
59 | return bin_path | ||
60 | |||
61 | print "External command '%s' not found, exiting." % binary | ||
62 | print " (Please install '%s' on your host system)" % binary | ||
63 | sys.exit(1) | ||
64 | |||
65 | def makedirs(dirname): | ||
66 | """A version of os.makedirs() that doesn't throw an | ||
67 | exception if the leaf directory already exists. | ||
68 | """ | ||
69 | try: | ||
70 | os.makedirs(dirname) | ||
71 | except OSError, err: | ||
72 | if err.errno != errno.EEXIST: | ||
73 | raise | ||
74 | |||
75 | def mksquashfs(in_img, out_img): | ||
76 | fullpathmksquashfs = find_binary_path("mksquashfs") | ||
77 | args = [fullpathmksquashfs, in_img, out_img] | ||
78 | |||
79 | if not sys.stdout.isatty(): | ||
80 | args.append("-no-progress") | ||
81 | |||
82 | ret = runner.show(args) | ||
83 | if ret != 0: | ||
84 | raise SquashfsError("'%s' exited with error (%d)" % (' '.join(args), ret)) | ||
85 | |||
86 | def resize2fs(fs, size): | ||
87 | resize2fs = find_binary_path("resize2fs") | ||
88 | if size == 0: | ||
89 | # it means to minimalize it | ||
90 | return runner.show([resize2fs, '-M', fs]) | ||
91 | else: | ||
92 | return runner.show([resize2fs, fs, "%sK" % (size / 1024,)]) | ||
93 | |||
94 | def my_fuser(fp): | ||
95 | fuser = find_binary_path("fuser") | ||
96 | if not os.path.exists(fp): | ||
97 | return False | ||
98 | |||
99 | rc = runner.quiet([fuser, "-s", fp]) | ||
100 | if rc == 0: | ||
101 | for pid in runner.outs([fuser, fp]).split(): | ||
102 | fd = open("/proc/%s/cmdline" % pid, "r") | ||
103 | cmdline = fd.read() | ||
104 | fd.close() | ||
105 | if cmdline[:-1] == "/bin/bash": | ||
106 | return True | ||
107 | |||
108 | # not found | ||
109 | return False | ||
110 | |||
111 | class BindChrootMount: | ||
112 | """Represents a bind mount of a directory into a chroot.""" | ||
113 | def __init__(self, src, chroot, dest = None, option = None): | ||
114 | self.root = os.path.abspath(os.path.expanduser(chroot)) | ||
115 | self.option = option | ||
116 | |||
117 | self.orig_src = self.src = src | ||
118 | if os.path.islink(src): | ||
119 | self.src = os.readlink(src) | ||
120 | if not self.src.startswith('/'): | ||
121 | self.src = os.path.abspath(os.path.join(os.path.dirname(src), | ||
122 | self.src)) | ||
123 | |||
124 | if not dest: | ||
125 | dest = self.src | ||
126 | self.dest = os.path.join(self.root, dest.lstrip('/')) | ||
127 | |||
128 | self.mounted = False | ||
129 | self.mountcmd = find_binary_path("mount") | ||
130 | self.umountcmd = find_binary_path("umount") | ||
131 | |||
132 | def ismounted(self): | ||
133 | with open('/proc/mounts') as f: | ||
134 | for line in f: | ||
135 | if line.split()[1] == os.path.abspath(self.dest): | ||
136 | return True | ||
137 | |||
138 | return False | ||
139 | |||
140 | def has_chroot_instance(self): | ||
141 | lock = os.path.join(self.root, ".chroot.lock") | ||
142 | return my_fuser(lock) | ||
143 | |||
144 | def mount(self): | ||
145 | if self.mounted or self.ismounted(): | ||
146 | return | ||
147 | |||
148 | makedirs(self.dest) | ||
149 | rc = runner.show([self.mountcmd, "--bind", self.src, self.dest]) | ||
150 | if rc != 0: | ||
151 | raise MountError("Bind-mounting '%s' to '%s' failed" % | ||
152 | (self.src, self.dest)) | ||
153 | if self.option: | ||
154 | rc = runner.show([self.mountcmd, "--bind", "-o", "remount,%s" % self.option, self.dest]) | ||
155 | if rc != 0: | ||
156 | raise MountError("Bind-remounting '%s' failed" % self.dest) | ||
157 | |||
158 | self.mounted = True | ||
159 | if os.path.islink(self.orig_src): | ||
160 | dest = os.path.join(self.root, self.orig_src.lstrip('/')) | ||
161 | if not os.path.exists(dest): | ||
162 | os.symlink(self.src, dest) | ||
163 | |||
164 | def unmount(self): | ||
165 | if self.has_chroot_instance(): | ||
166 | return | ||
167 | |||
168 | if self.ismounted(): | ||
169 | runner.show([self.umountcmd, "-l", self.dest]) | ||
170 | self.mounted = False | ||
171 | |||
172 | class LoopbackMount: | ||
173 | """LoopbackMount compatibility layer for old API""" | ||
174 | def __init__(self, lofile, mountdir, fstype = None): | ||
175 | self.diskmount = DiskMount(LoopbackDisk(lofile,size = 0),mountdir,fstype,rmmountdir = True) | ||
176 | self.losetup = False | ||
177 | self.losetupcmd = find_binary_path("losetup") | ||
178 | |||
179 | def cleanup(self): | ||
180 | self.diskmount.cleanup() | ||
181 | |||
182 | def unmount(self): | ||
183 | self.diskmount.unmount() | ||
184 | |||
185 | def lounsetup(self): | ||
186 | if self.losetup: | ||
187 | runner.show([self.losetupcmd, "-d", self.loopdev]) | ||
188 | self.losetup = False | ||
189 | self.loopdev = None | ||
190 | |||
191 | def loopsetup(self): | ||
192 | if self.losetup: | ||
193 | return | ||
194 | |||
195 | self.loopdev = get_loop_device(self.losetupcmd, self.lofile) | ||
196 | self.losetup = True | ||
197 | |||
198 | def mount(self): | ||
199 | self.diskmount.mount() | ||
200 | |||
201 | class SparseLoopbackMount(LoopbackMount): | ||
202 | """SparseLoopbackMount compatibility layer for old API""" | ||
203 | def __init__(self, lofile, mountdir, size, fstype = None): | ||
204 | self.diskmount = DiskMount(SparseLoopbackDisk(lofile,size),mountdir,fstype,rmmountdir = True) | ||
205 | |||
206 | def expand(self, create = False, size = None): | ||
207 | self.diskmount.disk.expand(create, size) | ||
208 | |||
209 | def truncate(self, size = None): | ||
210 | self.diskmount.disk.truncate(size) | ||
211 | |||
212 | def create(self): | ||
213 | self.diskmount.disk.create() | ||
214 | |||
215 | class SparseExtLoopbackMount(SparseLoopbackMount): | ||
216 | """SparseExtLoopbackMount compatibility layer for old API""" | ||
217 | def __init__(self, lofile, mountdir, size, fstype, blocksize, fslabel): | ||
218 | self.diskmount = ExtDiskMount(SparseLoopbackDisk(lofile,size), mountdir, fstype, blocksize, fslabel, rmmountdir = True) | ||
219 | |||
220 | |||
221 | def __format_filesystem(self): | ||
222 | self.diskmount.__format_filesystem() | ||
223 | |||
224 | def create(self): | ||
225 | self.diskmount.disk.create() | ||
226 | |||
227 | def resize(self, size = None): | ||
228 | return self.diskmount.__resize_filesystem(size) | ||
229 | |||
230 | def mount(self): | ||
231 | self.diskmount.mount() | ||
232 | |||
233 | def __fsck(self): | ||
234 | self.extdiskmount.__fsck() | ||
235 | |||
236 | def __get_size_from_filesystem(self): | ||
237 | return self.diskmount.__get_size_from_filesystem() | ||
238 | |||
239 | def __resize_to_minimal(self): | ||
240 | return self.diskmount.__resize_to_minimal() | ||
241 | |||
242 | def resparse(self, size = None): | ||
243 | return self.diskmount.resparse(size) | ||
244 | |||
245 | class Disk: | ||
246 | """Generic base object for a disk | ||
247 | |||
248 | The 'create' method must make the disk visible as a block device - eg | ||
249 | by calling losetup. For RawDisk, this is obviously a no-op. The 'cleanup' | ||
250 | method must undo the 'create' operation. | ||
251 | """ | ||
252 | def __init__(self, size, device = None): | ||
253 | self._device = device | ||
254 | self._size = size | ||
255 | |||
256 | def create(self): | ||
257 | pass | ||
258 | |||
259 | def cleanup(self): | ||
260 | pass | ||
261 | |||
262 | def get_device(self): | ||
263 | return self._device | ||
264 | def set_device(self, path): | ||
265 | self._device = path | ||
266 | device = property(get_device, set_device) | ||
267 | |||
268 | def get_size(self): | ||
269 | return self._size | ||
270 | size = property(get_size) | ||
271 | |||
272 | |||
273 | class RawDisk(Disk): | ||
274 | """A Disk backed by a block device. | ||
275 | Note that create() is a no-op. | ||
276 | """ | ||
277 | def __init__(self, size, device): | ||
278 | Disk.__init__(self, size, device) | ||
279 | |||
280 | def fixed(self): | ||
281 | return True | ||
282 | |||
283 | def exists(self): | ||
284 | return True | ||
285 | |||
286 | |||
287 | class DiskImage(Disk): | ||
288 | """ | ||
289 | A Disk backed by a file. | ||
290 | """ | ||
291 | def __init__(self, image_file, size): | ||
292 | Disk.__init__(self, size) | ||
293 | self.image_file = image_file | ||
294 | |||
295 | def exists(self): | ||
296 | return os.path.exists(self.image_file) | ||
297 | |||
298 | def create(self): | ||
299 | if self.device is not None: | ||
300 | return | ||
301 | |||
302 | blocks = self.size / 1024 | ||
303 | if self.size - blocks * 1024: | ||
304 | blocks += 1 | ||
305 | |||
306 | # create disk image | ||
307 | dd_cmd = "dd if=/dev/zero of=%s bs=1024 seek=%d count=1" % \ | ||
308 | (self.image_file, blocks) | ||
309 | rc, out = exec_cmd(dd_cmd) | ||
310 | |||
311 | self.device = self.image_file | ||
312 | |||
313 | |||
314 | class LoopbackDisk(Disk): | ||
315 | """A Disk backed by a file via the loop module.""" | ||
316 | def __init__(self, lofile, size): | ||
317 | Disk.__init__(self, size) | ||
318 | self.lofile = lofile | ||
319 | self.losetupcmd = find_binary_path("losetup") | ||
320 | |||
321 | def fixed(self): | ||
322 | return False | ||
323 | |||
324 | def exists(self): | ||
325 | return os.path.exists(self.lofile) | ||
326 | |||
327 | def create(self): | ||
328 | if self.device is not None: | ||
329 | return | ||
330 | |||
331 | self.device = get_loop_device(self.losetupcmd, self.lofile) | ||
332 | |||
333 | def cleanup(self): | ||
334 | if self.device is None: | ||
335 | return | ||
336 | msger.debug("Losetup remove %s" % self.device) | ||
337 | rc = runner.show([self.losetupcmd, "-d", self.device]) | ||
338 | self.device = None | ||
339 | |||
340 | class SparseLoopbackDisk(LoopbackDisk): | ||
341 | """A Disk backed by a sparse file via the loop module.""" | ||
342 | def __init__(self, lofile, size): | ||
343 | LoopbackDisk.__init__(self, lofile, size) | ||
344 | |||
345 | def expand(self, create = False, size = None): | ||
346 | flags = os.O_WRONLY | ||
347 | if create: | ||
348 | flags |= os.O_CREAT | ||
349 | if not os.path.exists(self.lofile): | ||
350 | makedirs(os.path.dirname(self.lofile)) | ||
351 | |||
352 | if size is None: | ||
353 | size = self.size | ||
354 | |||
355 | msger.debug("Extending sparse file %s to %d" % (self.lofile, size)) | ||
356 | if create: | ||
357 | fd = os.open(self.lofile, flags, 0644) | ||
358 | else: | ||
359 | fd = os.open(self.lofile, flags) | ||
360 | |||
361 | if size <= 0: | ||
362 | size = 1 | ||
363 | try: | ||
364 | os.ftruncate(fd, size) | ||
365 | except: | ||
366 | # may be limited by 2G in 32bit env | ||
367 | os.ftruncate(fd, 2**31L) | ||
368 | |||
369 | os.close(fd) | ||
370 | |||
371 | def truncate(self, size = None): | ||
372 | if size is None: | ||
373 | size = self.size | ||
374 | |||
375 | msger.debug("Truncating sparse file %s to %d" % (self.lofile, size)) | ||
376 | fd = os.open(self.lofile, os.O_WRONLY) | ||
377 | os.ftruncate(fd, size) | ||
378 | os.close(fd) | ||
379 | |||
380 | def create(self): | ||
381 | self.expand(create = True) | ||
382 | LoopbackDisk.create(self) | ||
383 | |||
384 | class Mount: | ||
385 | """A generic base class to deal with mounting things.""" | ||
386 | def __init__(self, mountdir): | ||
387 | self.mountdir = mountdir | ||
388 | |||
389 | def cleanup(self): | ||
390 | self.unmount() | ||
391 | |||
392 | def mount(self, options = None): | ||
393 | pass | ||
394 | |||
395 | def unmount(self): | ||
396 | pass | ||
397 | |||
398 | class DiskMount(Mount): | ||
399 | """A Mount object that handles mounting of a Disk.""" | ||
400 | def __init__(self, disk, mountdir, fstype = None, rmmountdir = True): | ||
401 | Mount.__init__(self, mountdir) | ||
402 | |||
403 | self.disk = disk | ||
404 | self.fstype = fstype | ||
405 | self.rmmountdir = rmmountdir | ||
406 | |||
407 | self.mounted = False | ||
408 | self.rmdir = False | ||
409 | if fstype: | ||
410 | self.mkfscmd = find_binary_path("mkfs." + self.fstype) | ||
411 | else: | ||
412 | self.mkfscmd = None | ||
413 | self.mountcmd = find_binary_path("mount") | ||
414 | self.umountcmd = find_binary_path("umount") | ||
415 | |||
416 | def cleanup(self): | ||
417 | Mount.cleanup(self) | ||
418 | self.disk.cleanup() | ||
419 | |||
420 | def unmount(self): | ||
421 | if self.mounted: | ||
422 | msger.debug("Unmounting directory %s" % self.mountdir) | ||
423 | runner.quiet('sync') # sync the data on this mount point | ||
424 | rc = runner.show([self.umountcmd, "-l", self.mountdir]) | ||
425 | if rc == 0: | ||
426 | self.mounted = False | ||
427 | else: | ||
428 | raise MountError("Failed to umount %s" % self.mountdir) | ||
429 | if self.rmdir and not self.mounted: | ||
430 | try: | ||
431 | os.rmdir(self.mountdir) | ||
432 | except OSError, e: | ||
433 | pass | ||
434 | self.rmdir = False | ||
435 | |||
436 | |||
437 | def __create(self): | ||
438 | self.disk.create() | ||
439 | |||
440 | |||
441 | def mount(self, options = None): | ||
442 | if self.mounted: | ||
443 | return | ||
444 | |||
445 | if not os.path.isdir(self.mountdir): | ||
446 | msger.debug("Creating mount point %s" % self.mountdir) | ||
447 | os.makedirs(self.mountdir) | ||
448 | self.rmdir = self.rmmountdir | ||
449 | |||
450 | self.__create() | ||
451 | |||
452 | msger.debug("Mounting %s at %s" % (self.disk.device, self.mountdir)) | ||
453 | if options: | ||
454 | args = [ self.mountcmd, "-o", options, self.disk.device, self.mountdir ] | ||
455 | else: | ||
456 | args = [ self.mountcmd, self.disk.device, self.mountdir ] | ||
457 | if self.fstype: | ||
458 | args.extend(["-t", self.fstype]) | ||
459 | |||
460 | rc = runner.show(args) | ||
461 | if rc != 0: | ||
462 | raise MountError("Failed to mount '%s' to '%s' with command '%s'. Retval: %s" % | ||
463 | (self.disk.device, self.mountdir, " ".join(args), rc)) | ||
464 | |||
465 | self.mounted = True | ||
466 | |||
467 | class ExtDiskMount(DiskMount): | ||
468 | """A DiskMount object that is able to format/resize ext[23] filesystems.""" | ||
469 | def __init__(self, disk, mountdir, fstype, blocksize, fslabel, rmmountdir=True, skipformat = False, fsopts = None): | ||
470 | DiskMount.__init__(self, disk, mountdir, fstype, rmmountdir) | ||
471 | self.blocksize = blocksize | ||
472 | self.fslabel = fslabel.replace("/", "") | ||
473 | self.uuid = str(uuid.uuid4()) | ||
474 | self.skipformat = skipformat | ||
475 | self.fsopts = fsopts | ||
476 | self.extopts = None | ||
477 | self.dumpe2fs = find_binary_path("dumpe2fs") | ||
478 | self.tune2fs = find_binary_path("tune2fs") | ||
479 | |||
480 | def __parse_field(self, output, field): | ||
481 | for line in output.split("\n"): | ||
482 | if line.startswith(field + ":"): | ||
483 | return line[len(field) + 1:].strip() | ||
484 | |||
485 | raise KeyError("Failed to find field '%s' in output" % field) | ||
486 | |||
487 | def __format_filesystem(self): | ||
488 | if self.skipformat: | ||
489 | msger.debug("Skip filesystem format.") | ||
490 | return | ||
491 | |||
492 | msger.verbose("Formating %s filesystem on %s" % (self.fstype, self.disk.device)) | ||
493 | cmdlist = [self.mkfscmd, "-F", "-L", self.fslabel, "-m", "1", "-b", | ||
494 | str(self.blocksize), "-U", self.uuid] | ||
495 | if self.extopts: | ||
496 | cmdlist.extend(self.extopts.split()) | ||
497 | cmdlist.extend([self.disk.device]) | ||
498 | |||
499 | rc, errout = runner.runtool(cmdlist, catch=2) | ||
500 | if rc != 0: | ||
501 | raise MountError("Error creating %s filesystem on disk %s:\n%s" % | ||
502 | (self.fstype, self.disk.device, errout)) | ||
503 | |||
504 | if not self.extopts: | ||
505 | msger.debug("Tuning filesystem on %s" % self.disk.device) | ||
506 | runner.show([self.tune2fs, "-c0", "-i0", "-Odir_index", "-ouser_xattr,acl", self.disk.device]) | ||
507 | |||
508 | def __resize_filesystem(self, size = None): | ||
509 | current_size = os.stat(self.disk.lofile)[stat.ST_SIZE] | ||
510 | |||
511 | if size is None: | ||
512 | size = self.disk.size | ||
513 | |||
514 | if size == current_size: | ||
515 | return | ||
516 | |||
517 | if size > current_size: | ||
518 | self.disk.expand(size) | ||
519 | |||
520 | self.__fsck() | ||
521 | |||
522 | resize2fs(self.disk.lofile, size) | ||
523 | return size | ||
524 | |||
525 | def __create(self): | ||
526 | resize = False | ||
527 | if not self.disk.fixed() and self.disk.exists(): | ||
528 | resize = True | ||
529 | |||
530 | self.disk.create() | ||
531 | |||
532 | if resize: | ||
533 | self.__resize_filesystem() | ||
534 | else: | ||
535 | self.__format_filesystem() | ||
536 | |||
537 | def mount(self, options = None): | ||
538 | self.__create() | ||
539 | DiskMount.mount(self, options) | ||
540 | |||
541 | def __fsck(self): | ||
542 | msger.info("Checking filesystem %s" % self.disk.lofile) | ||
543 | runner.quiet(["/sbin/e2fsck", "-f", "-y", self.disk.lofile]) | ||
544 | |||
545 | def __get_size_from_filesystem(self): | ||
546 | return int(self.__parse_field(runner.outs([self.dumpe2fs, '-h', self.disk.lofile]), | ||
547 | "Block count")) * self.blocksize | ||
548 | |||
549 | def __resize_to_minimal(self): | ||
550 | self.__fsck() | ||
551 | |||
552 | # | ||
553 | # Use a binary search to find the minimal size | ||
554 | # we can resize the image to | ||
555 | # | ||
556 | bot = 0 | ||
557 | top = self.__get_size_from_filesystem() | ||
558 | while top != (bot + 1): | ||
559 | t = bot + ((top - bot) / 2) | ||
560 | |||
561 | if not resize2fs(self.disk.lofile, t): | ||
562 | top = t | ||
563 | else: | ||
564 | bot = t | ||
565 | return top | ||
566 | |||
567 | def resparse(self, size = None): | ||
568 | self.cleanup() | ||
569 | if size == 0: | ||
570 | minsize = 0 | ||
571 | else: | ||
572 | minsize = self.__resize_to_minimal() | ||
573 | self.disk.truncate(minsize) | ||
574 | |||
575 | self.__resize_filesystem(size) | ||
576 | return minsize | ||
577 | |||
578 | class VfatDiskMount(DiskMount): | ||
579 | """A DiskMount object that is able to format vfat/msdos filesystems.""" | ||
580 | def __init__(self, disk, mountdir, fstype, blocksize, fslabel, rmmountdir=True, skipformat = False, fsopts = None): | ||
581 | DiskMount.__init__(self, disk, mountdir, fstype, rmmountdir) | ||
582 | self.blocksize = blocksize | ||
583 | self.fslabel = fslabel.replace("/", "") | ||
584 | rand1 = random.randint(0, 2**16 - 1) | ||
585 | rand2 = random.randint(0, 2**16 - 1) | ||
586 | self.uuid = "%04X-%04X" % (rand1, rand2) | ||
587 | self.skipformat = skipformat | ||
588 | self.fsopts = fsopts | ||
589 | self.fsckcmd = find_binary_path("fsck." + self.fstype) | ||
590 | |||
591 | def __format_filesystem(self): | ||
592 | if self.skipformat: | ||
593 | msger.debug("Skip filesystem format.") | ||
594 | return | ||
595 | |||
596 | msger.verbose("Formating %s filesystem on %s" % (self.fstype, self.disk.device)) | ||
597 | rc = runner.show([self.mkfscmd, "-n", self.fslabel, | ||
598 | "-i", self.uuid.replace("-", ""), self.disk.device]) | ||
599 | if rc != 0: | ||
600 | raise MountError("Error creating %s filesystem on disk %s" % (self.fstype,self.disk.device)) | ||
601 | |||
602 | msger.verbose("Tuning filesystem on %s" % self.disk.device) | ||
603 | |||
604 | def __resize_filesystem(self, size = None): | ||
605 | current_size = os.stat(self.disk.lofile)[stat.ST_SIZE] | ||
606 | |||
607 | if size is None: | ||
608 | size = self.disk.size | ||
609 | |||
610 | if size == current_size: | ||
611 | return | ||
612 | |||
613 | if size > current_size: | ||
614 | self.disk.expand(size) | ||
615 | |||
616 | self.__fsck() | ||
617 | |||
618 | #resize2fs(self.disk.lofile, size) | ||
619 | return size | ||
620 | |||
621 | def __create(self): | ||
622 | resize = False | ||
623 | if not self.disk.fixed() and self.disk.exists(): | ||
624 | resize = True | ||
625 | |||
626 | self.disk.create() | ||
627 | |||
628 | if resize: | ||
629 | self.__resize_filesystem() | ||
630 | else: | ||
631 | self.__format_filesystem() | ||
632 | |||
633 | def mount(self, options = None): | ||
634 | self.__create() | ||
635 | DiskMount.mount(self, options) | ||
636 | |||
637 | def __fsck(self): | ||
638 | msger.debug("Checking filesystem %s" % self.disk.lofile) | ||
639 | runner.show([self.fsckcmd, "-y", self.disk.lofile]) | ||
640 | |||
641 | def __get_size_from_filesystem(self): | ||
642 | return self.disk.size | ||
643 | |||
644 | def __resize_to_minimal(self): | ||
645 | self.__fsck() | ||
646 | |||
647 | # | ||
648 | # Use a binary search to find the minimal size | ||
649 | # we can resize the image to | ||
650 | # | ||
651 | bot = 0 | ||
652 | top = self.__get_size_from_filesystem() | ||
653 | return top | ||
654 | |||
655 | def resparse(self, size = None): | ||
656 | self.cleanup() | ||
657 | minsize = self.__resize_to_minimal() | ||
658 | self.disk.truncate(minsize) | ||
659 | self.__resize_filesystem(size) | ||
660 | return minsize | ||
661 | |||
662 | class BtrfsDiskMount(DiskMount): | ||
663 | """A DiskMount object that is able to format/resize btrfs filesystems.""" | ||
664 | def __init__(self, disk, mountdir, fstype, blocksize, fslabel, rmmountdir=True, skipformat = False, fsopts = None): | ||
665 | self.__check_btrfs() | ||
666 | DiskMount.__init__(self, disk, mountdir, fstype, rmmountdir) | ||
667 | self.blocksize = blocksize | ||
668 | self.fslabel = fslabel.replace("/", "") | ||
669 | self.uuid = None | ||
670 | self.skipformat = skipformat | ||
671 | self.fsopts = fsopts | ||
672 | self.blkidcmd = find_binary_path("blkid") | ||
673 | self.btrfsckcmd = find_binary_path("btrfsck") | ||
674 | |||
675 | def __check_btrfs(self): | ||
676 | found = False | ||
677 | """ Need to load btrfs module to mount it """ | ||
678 | load_module("btrfs") | ||
679 | for line in open("/proc/filesystems").xreadlines(): | ||
680 | if line.find("btrfs") > -1: | ||
681 | found = True | ||
682 | break | ||
683 | if not found: | ||
684 | raise MountError("Your system can't mount btrfs filesystem, please make sure your kernel has btrfs support and the module btrfs.ko has been loaded.") | ||
685 | |||
686 | # disable selinux, selinux will block write | ||
687 | if os.path.exists("/usr/sbin/setenforce"): | ||
688 | runner.show(["/usr/sbin/setenforce", "0"]) | ||
689 | |||
690 | def __parse_field(self, output, field): | ||
691 | for line in output.split(" "): | ||
692 | if line.startswith(field + "="): | ||
693 | return line[len(field) + 1:].strip().replace("\"", "") | ||
694 | |||
695 | raise KeyError("Failed to find field '%s' in output" % field) | ||
696 | |||
697 | def __format_filesystem(self): | ||
698 | if self.skipformat: | ||
699 | msger.debug("Skip filesystem format.") | ||
700 | return | ||
701 | |||
702 | msger.verbose("Formating %s filesystem on %s" % (self.fstype, self.disk.device)) | ||
703 | rc = runner.show([self.mkfscmd, "-L", self.fslabel, self.disk.device]) | ||
704 | if rc != 0: | ||
705 | raise MountError("Error creating %s filesystem on disk %s" % (self.fstype,self.disk.device)) | ||
706 | |||
707 | self.uuid = self.__parse_field(runner.outs([self.blkidcmd, self.disk.device]), "UUID") | ||
708 | |||
709 | def __resize_filesystem(self, size = None): | ||
710 | current_size = os.stat(self.disk.lofile)[stat.ST_SIZE] | ||
711 | |||
712 | if size is None: | ||
713 | size = self.disk.size | ||
714 | |||
715 | if size == current_size: | ||
716 | return | ||
717 | |||
718 | if size > current_size: | ||
719 | self.disk.expand(size) | ||
720 | |||
721 | self.__fsck() | ||
722 | return size | ||
723 | |||
724 | def __create(self): | ||
725 | resize = False | ||
726 | if not self.disk.fixed() and self.disk.exists(): | ||
727 | resize = True | ||
728 | |||
729 | self.disk.create() | ||
730 | |||
731 | if resize: | ||
732 | self.__resize_filesystem() | ||
733 | else: | ||
734 | self.__format_filesystem() | ||
735 | |||
736 | def mount(self, options = None): | ||
737 | self.__create() | ||
738 | DiskMount.mount(self, options) | ||
739 | |||
740 | def __fsck(self): | ||
741 | msger.debug("Checking filesystem %s" % self.disk.lofile) | ||
742 | runner.quiet([self.btrfsckcmd, self.disk.lofile]) | ||
743 | |||
744 | def __get_size_from_filesystem(self): | ||
745 | return self.disk.size | ||
746 | |||
747 | def __resize_to_minimal(self): | ||
748 | self.__fsck() | ||
749 | |||
750 | return self.__get_size_from_filesystem() | ||
751 | |||
752 | def resparse(self, size = None): | ||
753 | self.cleanup() | ||
754 | minsize = self.__resize_to_minimal() | ||
755 | self.disk.truncate(minsize) | ||
756 | self.__resize_filesystem(size) | ||
757 | return minsize | ||
758 | |||
759 | class DeviceMapperSnapshot(object): | ||
760 | def __init__(self, imgloop, cowloop): | ||
761 | self.imgloop = imgloop | ||
762 | self.cowloop = cowloop | ||
763 | |||
764 | self.__created = False | ||
765 | self.__name = None | ||
766 | self.dmsetupcmd = find_binary_path("dmsetup") | ||
767 | |||
768 | """Load dm_snapshot if it isn't loaded""" | ||
769 | load_module("dm_snapshot") | ||
770 | |||
771 | def get_path(self): | ||
772 | if self.__name is None: | ||
773 | return None | ||
774 | return os.path.join("/dev/mapper", self.__name) | ||
775 | path = property(get_path) | ||
776 | |||
777 | def create(self): | ||
778 | if self.__created: | ||
779 | return | ||
780 | |||
781 | self.imgloop.create() | ||
782 | self.cowloop.create() | ||
783 | |||
784 | self.__name = "imgcreate-%d-%d" % (os.getpid(), | ||
785 | random.randint(0, 2**16)) | ||
786 | |||
787 | size = os.stat(self.imgloop.lofile)[stat.ST_SIZE] | ||
788 | |||
789 | table = "0 %d snapshot %s %s p 8" % (size / 512, | ||
790 | self.imgloop.device, | ||
791 | self.cowloop.device) | ||
792 | |||
793 | args = [self.dmsetupcmd, "create", self.__name, "--table", table] | ||
794 | if runner.show(args) != 0: | ||
795 | self.cowloop.cleanup() | ||
796 | self.imgloop.cleanup() | ||
797 | raise SnapshotError("Could not create snapshot device using: " + ' '.join(args)) | ||
798 | |||
799 | self.__created = True | ||
800 | |||
801 | def remove(self, ignore_errors = False): | ||
802 | if not self.__created: | ||
803 | return | ||
804 | |||
805 | time.sleep(2) | ||
806 | rc = runner.show([self.dmsetupcmd, "remove", self.__name]) | ||
807 | if not ignore_errors and rc != 0: | ||
808 | raise SnapshotError("Could not remove snapshot device") | ||
809 | |||
810 | self.__name = None | ||
811 | self.__created = False | ||
812 | |||
813 | self.cowloop.cleanup() | ||
814 | self.imgloop.cleanup() | ||
815 | |||
816 | def get_cow_used(self): | ||
817 | if not self.__created: | ||
818 | return 0 | ||
819 | |||
820 | # | ||
821 | # dmsetup status on a snapshot returns e.g. | ||
822 | # "0 8388608 snapshot 416/1048576" | ||
823 | # or, more generally: | ||
824 | # "A B snapshot C/D" | ||
825 | # where C is the number of 512 byte sectors in use | ||
826 | # | ||
827 | out = runner.outs([self.dmsetupcmd, "status", self.__name]) | ||
828 | try: | ||
829 | return int((out.split()[3]).split('/')[0]) * 512 | ||
830 | except ValueError: | ||
831 | raise SnapshotError("Failed to parse dmsetup status: " + out) | ||
832 | |||
833 | def create_image_minimizer(path, image, minimal_size): | ||
834 | """ | ||
835 | Builds a copy-on-write image which can be used to | ||
836 | create a device-mapper snapshot of an image where | ||
837 | the image's filesystem is as small as possible | ||
838 | |||
839 | The steps taken are: | ||
840 | 1) Create a sparse COW | ||
841 | 2) Loopback mount the image and the COW | ||
842 | 3) Create a device-mapper snapshot of the image | ||
843 | using the COW | ||
844 | 4) Resize the filesystem to the minimal size | ||
845 | 5) Determine the amount of space used in the COW | ||
846 | 6) Restroy the device-mapper snapshot | ||
847 | 7) Truncate the COW, removing unused space | ||
848 | 8) Create a squashfs of the COW | ||
849 | """ | ||
850 | imgloop = LoopbackDisk(image, None) # Passing bogus size - doesn't matter | ||
851 | |||
852 | cowloop = SparseLoopbackDisk(os.path.join(os.path.dirname(path), "osmin"), | ||
853 | 64L * 1024L * 1024L) | ||
854 | |||
855 | snapshot = DeviceMapperSnapshot(imgloop, cowloop) | ||
856 | |||
857 | try: | ||
858 | snapshot.create() | ||
859 | |||
860 | resize2fs(snapshot.path, minimal_size) | ||
861 | |||
862 | cow_used = snapshot.get_cow_used() | ||
863 | finally: | ||
864 | snapshot.remove(ignore_errors = (not sys.exc_info()[0] is None)) | ||
865 | |||
866 | cowloop.truncate(cow_used) | ||
867 | |||
868 | mksquashfs(cowloop.lofile, path) | ||
869 | |||
870 | os.unlink(cowloop.lofile) | ||
871 | |||
872 | def load_module(module): | ||
873 | found = False | ||
874 | for line in open('/proc/modules').xreadlines(): | ||
875 | if line.startswith("%s " % module): | ||
876 | found = True | ||
877 | break | ||
878 | if not found: | ||
879 | msger.info("Loading %s..." % module) | ||
880 | runner.quiet(['modprobe', module]) | ||
881 | |||
882 | class LoopDevice(object): | ||
883 | def __init__(self, loopid=None): | ||
884 | self.device = None | ||
885 | self.loopid = loopid | ||
886 | self.created = False | ||
887 | self.kpartxcmd = find_binary_path("kpartx") | ||
888 | self.losetupcmd = find_binary_path("losetup") | ||
889 | |||
890 | def register(self, device): | ||
891 | self.device = device | ||
892 | self.loopid = None | ||
893 | self.created = True | ||
894 | |||
895 | def reg_atexit(self): | ||
896 | import atexit | ||
897 | atexit.register(self.close) | ||
898 | |||
899 | def _genloopid(self): | ||
900 | import glob | ||
901 | if not glob.glob("/dev/loop[0-9]*"): | ||
902 | return 10 | ||
903 | |||
904 | fint = lambda x: x[9:].isdigit() and int(x[9:]) or 0 | ||
905 | maxid = 1 + max(filter(lambda x: x<100, | ||
906 | map(fint, glob.glob("/dev/loop[0-9]*")))) | ||
907 | if maxid < 10: maxid = 10 | ||
908 | if maxid >= 100: raise | ||
909 | return maxid | ||
910 | |||
911 | def _kpseek(self, device): | ||
912 | rc, out = runner.runtool([self.kpartxcmd, '-l', '-v', device]) | ||
913 | if rc != 0: | ||
914 | raise MountError("Can't query dm snapshot on %s" % device) | ||
915 | for line in out.splitlines(): | ||
916 | if line and line.startswith("loop"): | ||
917 | return True | ||
918 | return False | ||
919 | |||
920 | def _loseek(self, device): | ||
921 | import re | ||
922 | rc, out = runner.runtool([self.losetupcmd, '-a']) | ||
923 | if rc != 0: | ||
924 | raise MountError("Failed to run 'losetup -a'") | ||
925 | for line in out.splitlines(): | ||
926 | m = re.match("([^:]+): .*", line) | ||
927 | if m and m.group(1) == device: | ||
928 | return True | ||
929 | return False | ||
930 | |||
931 | def create(self): | ||
932 | if not self.created: | ||
933 | if not self.loopid: | ||
934 | self.loopid = self._genloopid() | ||
935 | self.device = "/dev/loop%d" % self.loopid | ||
936 | if os.path.exists(self.device): | ||
937 | if self._loseek(self.device): | ||
938 | raise MountError("Device busy: %s" % self.device) | ||
939 | else: | ||
940 | self.created = True | ||
941 | return | ||
942 | |||
943 | mknod = find_binary_path('mknod') | ||
944 | rc = runner.show([mknod, '-m664', self.device, 'b', '7', str(self.loopid)]) | ||
945 | if rc != 0: | ||
946 | raise MountError("Failed to create device %s" % self.device) | ||
947 | else: | ||
948 | self.created = True | ||
949 | |||
950 | def close(self): | ||
951 | if self.created: | ||
952 | try: | ||
953 | self.cleanup() | ||
954 | self.device = None | ||
955 | except MountError, e: | ||
956 | msger.error("%s" % e) | ||
957 | |||
958 | def cleanup(self): | ||
959 | |||
960 | if self.device is None: | ||
961 | return | ||
962 | |||
963 | |||
964 | if self._kpseek(self.device): | ||
965 | if self.created: | ||
966 | for i in range(3, os.sysconf("SC_OPEN_MAX")): | ||
967 | try: | ||
968 | os.close(i) | ||
969 | except: | ||
970 | pass | ||
971 | runner.quiet([self.kpartxcmd, "-d", self.device]) | ||
972 | if self._loseek(self.device): | ||
973 | runner.quiet([self.losetupcmd, "-d", self.device]) | ||
974 | # FIXME: should sleep a while between two loseek | ||
975 | if self._loseek(self.device): | ||
976 | msger.warning("Can't cleanup loop device %s" % self.device) | ||
977 | elif self.loopid: | ||
978 | os.unlink(self.device) | ||
979 | |||
980 | DEVICE_PIDFILE_DIR = "/var/tmp/mic/device" | ||
981 | DEVICE_LOCKFILE = "/var/lock/__mic_loopdev.lock" | ||
982 | |||
983 | def get_loop_device(losetupcmd, lofile): | ||
984 | global DEVICE_PIDFILE_DIR | ||
985 | global DEVICE_LOCKFILE | ||
986 | |||
987 | import fcntl | ||
988 | makedirs(os.path.dirname(DEVICE_LOCKFILE)) | ||
989 | fp = open(DEVICE_LOCKFILE, 'w') | ||
990 | fcntl.flock(fp, fcntl.LOCK_EX) | ||
991 | try: | ||
992 | loopdev = None | ||
993 | devinst = LoopDevice() | ||
994 | |||
995 | # clean up left loop device first | ||
996 | clean_loop_devices() | ||
997 | |||
998 | # provide an avaible loop device | ||
999 | rc, out = runner.runtool([losetupcmd, "--find"]) | ||
1000 | if rc == 0: | ||
1001 | loopdev = out.split()[0] | ||
1002 | devinst.register(loopdev) | ||
1003 | if not loopdev or not os.path.exists(loopdev): | ||
1004 | devinst.create() | ||
1005 | loopdev = devinst.device | ||
1006 | |||
1007 | # setup a loop device for image file | ||
1008 | rc = runner.show([losetupcmd, loopdev, lofile]) | ||
1009 | if rc != 0: | ||
1010 | raise MountError("Failed to setup loop device for '%s'" % lofile) | ||
1011 | |||
1012 | devinst.reg_atexit() | ||
1013 | |||
1014 | # try to save device and pid | ||
1015 | makedirs(DEVICE_PIDFILE_DIR) | ||
1016 | pidfile = os.path.join(DEVICE_PIDFILE_DIR, os.path.basename(loopdev)) | ||
1017 | if os.path.exists(pidfile): | ||
1018 | os.unlink(pidfile) | ||
1019 | with open(pidfile, 'w') as wf: | ||
1020 | wf.write(str(os.getpid())) | ||
1021 | |||
1022 | except MountError, err: | ||
1023 | raise CreatorError("%s" % str(err)) | ||
1024 | except: | ||
1025 | raise | ||
1026 | finally: | ||
1027 | try: | ||
1028 | fcntl.flock(fp, fcntl.LOCK_UN) | ||
1029 | fp.close() | ||
1030 | os.unlink(DEVICE_LOCKFILE) | ||
1031 | except: | ||
1032 | pass | ||
1033 | |||
1034 | return loopdev | ||
1035 | |||
1036 | def clean_loop_devices(piddir=DEVICE_PIDFILE_DIR): | ||
1037 | if not os.path.exists(piddir) or not os.path.isdir(piddir): | ||
1038 | return | ||
1039 | |||
1040 | for loopdev in os.listdir(piddir): | ||
1041 | pidfile = os.path.join(piddir, loopdev) | ||
1042 | try: | ||
1043 | with open(pidfile, 'r') as rf: | ||
1044 | devpid = int(rf.read()) | ||
1045 | except: | ||
1046 | devpid = None | ||
1047 | |||
1048 | # if the process using this device is alive, skip it | ||
1049 | if not devpid or os.path.exists(os.path.join('/proc', str(devpid))): | ||
1050 | continue | ||
1051 | |||
1052 | # try to clean it up | ||
1053 | try: | ||
1054 | devinst = LoopDevice() | ||
1055 | devinst.register(os.path.join('/dev', loopdev)) | ||
1056 | devinst.cleanup() | ||
1057 | os.unlink(pidfile) | ||
1058 | except: | ||
1059 | pass | ||
1060 | |||
diff --git a/scripts/lib/mic/utils/gpt_parser.py b/scripts/lib/mic/utils/gpt_parser.py new file mode 100644 index 0000000000..5d43b70778 --- /dev/null +++ b/scripts/lib/mic/utils/gpt_parser.py | |||
@@ -0,0 +1,331 @@ | |||
1 | #!/usr/bin/python -tt | ||
2 | # | ||
3 | # Copyright (c) 2013 Intel, Inc. | ||
4 | # | ||
5 | # This program is free software; you can redistribute it and/or modify it | ||
6 | # under the terms of the GNU General Public License as published by the Free | ||
7 | # Software Foundation; version 2 of the License | ||
8 | # | ||
9 | # This program is distributed in the hope that it will be useful, but | ||
10 | # WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY | ||
11 | # or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | ||
12 | # for more details. | ||
13 | # | ||
14 | # You should have received a copy of the GNU General Public License along | ||
15 | # with this program; if not, write to the Free Software Foundation, Inc., 59 | ||
16 | # Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
17 | |||
18 | """ This module implements a simple GPT partitions parser which can read the | ||
19 | GPT header and the GPT partition table. """ | ||
20 | |||
21 | import struct | ||
22 | import uuid | ||
23 | import binascii | ||
24 | from mic.utils.errors import MountError | ||
25 | |||
26 | _GPT_HEADER_FORMAT = "<8s4sIIIQQQQ16sQIII" | ||
27 | _GPT_HEADER_SIZE = struct.calcsize(_GPT_HEADER_FORMAT) | ||
28 | _GPT_ENTRY_FORMAT = "<16s16sQQQ72s" | ||
29 | _GPT_ENTRY_SIZE = struct.calcsize(_GPT_ENTRY_FORMAT) | ||
30 | _SUPPORTED_GPT_REVISION = '\x00\x00\x01\x00' | ||
31 | |||
32 | def _stringify_uuid(binary_uuid): | ||
33 | """ A small helper function to transform a binary UUID into a string | ||
34 | format. """ | ||
35 | |||
36 | uuid_str = str(uuid.UUID(bytes_le = binary_uuid)) | ||
37 | |||
38 | return uuid_str.upper() | ||
39 | |||
40 | def _calc_header_crc(raw_hdr): | ||
41 | """ Calculate GPT header CRC32 checksum. The 'raw_hdr' parameter has to | ||
42 | be a list or a tuple containing all the elements of the GPT header in a | ||
43 | "raw" form, meaning that it should simply contain "unpacked" disk data. | ||
44 | """ | ||
45 | |||
46 | raw_hdr = list(raw_hdr) | ||
47 | raw_hdr[3] = 0 | ||
48 | raw_hdr = struct.pack(_GPT_HEADER_FORMAT, *raw_hdr) | ||
49 | |||
50 | return binascii.crc32(raw_hdr) & 0xFFFFFFFF | ||
51 | |||
52 | def _validate_header(raw_hdr): | ||
53 | """ Validate the GPT header. The 'raw_hdr' parameter has to be a list or a | ||
54 | tuple containing all the elements of the GPT header in a "raw" form, | ||
55 | meaning that it should simply contain "unpacked" disk data. """ | ||
56 | |||
57 | # Validate the signature | ||
58 | if raw_hdr[0] != 'EFI PART': | ||
59 | raise MountError("GPT partition table not found") | ||
60 | |||
61 | # Validate the revision | ||
62 | if raw_hdr[1] != _SUPPORTED_GPT_REVISION: | ||
63 | raise MountError("Unsupported GPT revision '%s', supported revision " \ | ||
64 | "is '%s'" % \ | ||
65 | (binascii.hexlify(raw_hdr[1]), | ||
66 | binascii.hexlify(_SUPPORTED_GPT_REVISION))) | ||
67 | |||
68 | # Validate header size | ||
69 | if raw_hdr[2] != _GPT_HEADER_SIZE: | ||
70 | raise MountError("Bad GPT header size: %d bytes, expected %d" % \ | ||
71 | (raw_hdr[2], _GPT_HEADER_SIZE)) | ||
72 | |||
73 | crc = _calc_header_crc(raw_hdr) | ||
74 | if raw_hdr[3] != crc: | ||
75 | raise MountError("GPT header crc mismatch: %#x, should be %#x" % \ | ||
76 | (crc, raw_hdr[3])) | ||
77 | |||
78 | class GptParser: | ||
79 | """ GPT partition table parser. Allows reading the GPT header and the | ||
80 | partition table, as well as modifying the partition table records. """ | ||
81 | |||
82 | def __init__(self, disk_path, sector_size = 512): | ||
83 | """ The class constructor which accepts the following parameters: | ||
84 | * disk_path - full path to the disk image or device node | ||
85 | * sector_size - size of a disk sector in bytes """ | ||
86 | |||
87 | self.sector_size = sector_size | ||
88 | self.disk_path = disk_path | ||
89 | |||
90 | try: | ||
91 | self._disk_obj = open(disk_path, 'r+b') | ||
92 | except IOError as err: | ||
93 | raise MountError("Cannot open file '%s' for reading GPT " \ | ||
94 | "partitions: %s" % (disk_path, err)) | ||
95 | |||
96 | def __del__(self): | ||
97 | """ The class destructor. """ | ||
98 | |||
99 | self._disk_obj.close() | ||
100 | |||
101 | def _read_disk(self, offset, size): | ||
102 | """ A helper function which reads 'size' bytes from offset 'offset' of | ||
103 | the disk and checks all the error conditions. """ | ||
104 | |||
105 | self._disk_obj.seek(offset) | ||
106 | try: | ||
107 | data = self._disk_obj.read(size) | ||
108 | except IOError as err: | ||
109 | raise MountError("cannot read from '%s': %s" % \ | ||
110 | (self.disk_path, err)) | ||
111 | |||
112 | if len(data) != size: | ||
113 | raise MountError("cannot read %d bytes from offset '%d' of '%s', " \ | ||
114 | "read only %d bytes" % \ | ||
115 | (size, offset, self.disk_path, len(data))) | ||
116 | |||
117 | return data | ||
118 | |||
119 | def _write_disk(self, offset, buf): | ||
120 | """ A helper function which writes buffer 'buf' to offset 'offset' of | ||
121 | the disk. This function takes care of unaligned writes and checks all | ||
122 | the error conditions. """ | ||
123 | |||
124 | # Since we may be dealing with a block device, we only can write in | ||
125 | # 'self.sector_size' chunks. Find the aligned starting and ending | ||
126 | # disk offsets to read. | ||
127 | start = (offset / self.sector_size) * self.sector_size | ||
128 | end = ((start + len(buf)) / self.sector_size + 1) * self.sector_size | ||
129 | |||
130 | data = self._read_disk(start, end - start) | ||
131 | off = offset - start | ||
132 | data = data[:off] + buf + data[off + len(buf):] | ||
133 | |||
134 | self._disk_obj.seek(start) | ||
135 | try: | ||
136 | self._disk_obj.write(data) | ||
137 | except IOError as err: | ||
138 | raise MountError("cannot write to '%s': %s" % (self.disk_path, err)) | ||
139 | |||
140 | def read_header(self, primary = True): | ||
141 | """ Read and verify the GPT header and return a dictionary containing | ||
142 | the following elements: | ||
143 | |||
144 | 'signature' : header signature | ||
145 | 'revision' : header revision | ||
146 | 'hdr_size' : header size in bytes | ||
147 | 'hdr_crc' : header CRC32 | ||
148 | 'hdr_lba' : LBA of this header | ||
149 | 'hdr_offs' : byte disk offset of this header | ||
150 | 'backup_lba' : backup header LBA | ||
151 | 'backup_offs' : byte disk offset of backup header | ||
152 | 'first_lba' : first usable LBA for partitions | ||
153 | 'first_offs' : first usable byte disk offset for partitions | ||
154 | 'last_lba' : last usable LBA for partitions | ||
155 | 'last_offs' : last usable byte disk offset for partitions | ||
156 | 'disk_uuid' : UUID of the disk | ||
157 | 'ptable_lba' : starting LBA of array of partition entries | ||
158 | 'ptable_offs' : disk byte offset of the start of the partition table | ||
159 | 'ptable_size' : partition table size in bytes | ||
160 | 'entries_cnt' : number of available partition table entries | ||
161 | 'entry_size' : size of a single partition entry | ||
162 | 'ptable_crc' : CRC32 of the partition table | ||
163 | 'primary' : a boolean, if 'True', this is the primary GPT header, | ||
164 | if 'False' - the secondary | ||
165 | 'primary_str' : contains string "primary" if this is the primary GPT | ||
166 | header, and "backup" otherwise | ||
167 | |||
168 | This dictionary corresponds to the GPT header format. Please, see the | ||
169 | UEFI standard for the description of these fields. | ||
170 | |||
171 | If the 'primary' parameter is 'True', the primary GPT header is read, | ||
172 | otherwise the backup GPT header is read instead. """ | ||
173 | |||
174 | # Read and validate the primary GPT header | ||
175 | raw_hdr = self._read_disk(self.sector_size, _GPT_HEADER_SIZE) | ||
176 | raw_hdr = struct.unpack(_GPT_HEADER_FORMAT, raw_hdr) | ||
177 | _validate_header(raw_hdr) | ||
178 | primary_str = "primary" | ||
179 | |||
180 | if not primary: | ||
181 | # Read and validate the backup GPT header | ||
182 | raw_hdr = self._read_disk(raw_hdr[6] * self.sector_size, _GPT_HEADER_SIZE) | ||
183 | raw_hdr = struct.unpack(_GPT_HEADER_FORMAT, raw_hdr) | ||
184 | _validate_header(raw_hdr) | ||
185 | primary_str = "backup" | ||
186 | |||
187 | return { 'signature' : raw_hdr[0], | ||
188 | 'revision' : raw_hdr[1], | ||
189 | 'hdr_size' : raw_hdr[2], | ||
190 | 'hdr_crc' : raw_hdr[3], | ||
191 | 'hdr_lba' : raw_hdr[5], | ||
192 | 'hdr_offs' : raw_hdr[5] * self.sector_size, | ||
193 | 'backup_lba' : raw_hdr[6], | ||
194 | 'backup_offs' : raw_hdr[6] * self.sector_size, | ||
195 | 'first_lba' : raw_hdr[7], | ||
196 | 'first_offs' : raw_hdr[7] * self.sector_size, | ||
197 | 'last_lba' : raw_hdr[8], | ||
198 | 'last_offs' : raw_hdr[8] * self.sector_size, | ||
199 | 'disk_uuid' :_stringify_uuid(raw_hdr[9]), | ||
200 | 'ptable_lba' : raw_hdr[10], | ||
201 | 'ptable_offs' : raw_hdr[10] * self.sector_size, | ||
202 | 'ptable_size' : raw_hdr[11] * raw_hdr[12], | ||
203 | 'entries_cnt' : raw_hdr[11], | ||
204 | 'entry_size' : raw_hdr[12], | ||
205 | 'ptable_crc' : raw_hdr[13], | ||
206 | 'primary' : primary, | ||
207 | 'primary_str' : primary_str } | ||
208 | |||
209 | def _read_raw_ptable(self, header): | ||
210 | """ Read and validate primary or backup partition table. The 'header' | ||
211 | argument is the GPT header. If it is the primary GPT header, then the | ||
212 | primary partition table is read and validated, otherwise - the backup | ||
213 | one. The 'header' argument is a dictionary which is returned by the | ||
214 | 'read_header()' method. """ | ||
215 | |||
216 | raw_ptable = self._read_disk(header['ptable_offs'], | ||
217 | header['ptable_size']) | ||
218 | |||
219 | crc = binascii.crc32(raw_ptable) & 0xFFFFFFFF | ||
220 | if crc != header['ptable_crc']: | ||
221 | raise MountError("Partition table at LBA %d (%s) is corrupted" % \ | ||
222 | (header['ptable_lba'], header['primary_str'])) | ||
223 | |||
224 | return raw_ptable | ||
225 | |||
226 | def get_partitions(self, primary = True): | ||
227 | """ This is a generator which parses the GPT partition table and | ||
228 | generates the following dictionary for each partition: | ||
229 | |||
230 | 'index' : the index of the partition table endry | ||
231 | 'offs' : byte disk offset of the partition table entry | ||
232 | 'type_uuid' : partition type UUID | ||
233 | 'part_uuid' : partition UUID | ||
234 | 'first_lba' : the first LBA | ||
235 | 'last_lba' : the last LBA | ||
236 | 'flags' : attribute flags | ||
237 | 'name' : partition name | ||
238 | 'primary' : a boolean, if 'True', this is the primary partition | ||
239 | table, if 'False' - the secondary | ||
240 | 'primary_str' : contains string "primary" if this is the primary GPT | ||
241 | header, and "backup" otherwise | ||
242 | |||
243 | This dictionary corresponds to the GPT header format. Please, see the | ||
244 | UEFI standard for the description of these fields. | ||
245 | |||
246 | If the 'primary' parameter is 'True', partitions from the primary GPT | ||
247 | partition table are generated, otherwise partitions from the backup GPT | ||
248 | partition table are generated. """ | ||
249 | |||
250 | if primary: | ||
251 | primary_str = "primary" | ||
252 | else: | ||
253 | primary_str = "backup" | ||
254 | |||
255 | header = self.read_header(primary) | ||
256 | raw_ptable = self._read_raw_ptable(header) | ||
257 | |||
258 | for index in xrange(0, header['entries_cnt']): | ||
259 | start = header['entry_size'] * index | ||
260 | end = start + header['entry_size'] | ||
261 | raw_entry = struct.unpack(_GPT_ENTRY_FORMAT, raw_ptable[start:end]) | ||
262 | |||
263 | if raw_entry[2] == 0 or raw_entry[3] == 0: | ||
264 | continue | ||
265 | |||
266 | part_name = str(raw_entry[5].decode('UTF-16').split('\0', 1)[0]) | ||
267 | |||
268 | yield { 'index' : index, | ||
269 | 'offs' : header['ptable_offs'] + start, | ||
270 | 'type_uuid' : _stringify_uuid(raw_entry[0]), | ||
271 | 'part_uuid' : _stringify_uuid(raw_entry[1]), | ||
272 | 'first_lba' : raw_entry[2], | ||
273 | 'last_lba' : raw_entry[3], | ||
274 | 'flags' : raw_entry[4], | ||
275 | 'name' : part_name, | ||
276 | 'primary' : primary, | ||
277 | 'primary_str' : primary_str } | ||
278 | |||
279 | def _change_partition(self, header, entry): | ||
280 | """ A helper function for 'change_partitions()' which changes a | ||
281 | a paricular instance of the partition table (primary or backup). """ | ||
282 | |||
283 | if entry['index'] >= header['entries_cnt']: | ||
284 | raise MountError("Partition table at LBA %d has only %d " \ | ||
285 | "records cannot change record number %d" % \ | ||
286 | (header['entries_cnt'], entry['index'])) | ||
287 | # Read raw GPT header | ||
288 | raw_hdr = self._read_disk(header['hdr_offs'], _GPT_HEADER_SIZE) | ||
289 | raw_hdr = list(struct.unpack(_GPT_HEADER_FORMAT, raw_hdr)) | ||
290 | _validate_header(raw_hdr) | ||
291 | |||
292 | # Prepare the new partition table entry | ||
293 | raw_entry = struct.pack(_GPT_ENTRY_FORMAT, | ||
294 | uuid.UUID(entry['type_uuid']).bytes_le, | ||
295 | uuid.UUID(entry['part_uuid']).bytes_le, | ||
296 | entry['first_lba'], | ||
297 | entry['last_lba'], | ||
298 | entry['flags'], | ||
299 | entry['name'].encode('UTF-16')) | ||
300 | |||
301 | # Write the updated entry to the disk | ||
302 | entry_offs = header['ptable_offs'] + \ | ||
303 | header['entry_size'] * entry['index'] | ||
304 | self._write_disk(entry_offs, raw_entry) | ||
305 | |||
306 | # Calculate and update partition table CRC32 | ||
307 | raw_ptable = self._read_disk(header['ptable_offs'], | ||
308 | header['ptable_size']) | ||
309 | raw_hdr[13] = binascii.crc32(raw_ptable) & 0xFFFFFFFF | ||
310 | |||
311 | # Calculate and update the GPT header CRC | ||
312 | raw_hdr[3] = _calc_header_crc(raw_hdr) | ||
313 | |||
314 | # Write the updated header to the disk | ||
315 | raw_hdr = struct.pack(_GPT_HEADER_FORMAT, *raw_hdr) | ||
316 | self._write_disk(header['hdr_offs'], raw_hdr) | ||
317 | |||
318 | def change_partition(self, entry): | ||
319 | """ Change a GPT partition. The 'entry' argument has the same format as | ||
320 | 'get_partitions()' returns. This function simply changes the partition | ||
321 | table record corresponding to 'entry' in both, the primary and the | ||
322 | backup GPT partition tables. The parition table CRC is re-calculated | ||
323 | and the GPT headers are modified accordingly. """ | ||
324 | |||
325 | # Change the primary partition table | ||
326 | header = self.read_header(True) | ||
327 | self._change_partition(header, entry) | ||
328 | |||
329 | # Change the backup partition table | ||
330 | header = self.read_header(False) | ||
331 | self._change_partition(header, entry) | ||
diff --git a/scripts/lib/mic/utils/grabber.py b/scripts/lib/mic/utils/grabber.py new file mode 100644 index 0000000000..45e30b4fb0 --- /dev/null +++ b/scripts/lib/mic/utils/grabber.py | |||
@@ -0,0 +1,97 @@ | |||
1 | #!/usr/bin/python | ||
2 | |||
3 | import os | ||
4 | import sys | ||
5 | import rpm | ||
6 | import fcntl | ||
7 | import struct | ||
8 | import termios | ||
9 | |||
10 | from mic import msger | ||
11 | from mic.utils import runner | ||
12 | from mic.utils.errors import CreatorError | ||
13 | |||
14 | from urlgrabber import grabber | ||
15 | from urlgrabber import __version__ as grabber_version | ||
16 | |||
17 | if rpm.labelCompare(grabber_version.split('.'), '3.9.0'.split('.')) == -1: | ||
18 | msger.warning("Version of python-urlgrabber is %s, lower than '3.9.0', " | ||
19 | "you may encounter some network issues" % grabber_version) | ||
20 | |||
21 | def myurlgrab(url, filename, proxies, progress_obj = None): | ||
22 | g = grabber.URLGrabber() | ||
23 | if progress_obj is None: | ||
24 | progress_obj = TextProgress() | ||
25 | |||
26 | if url.startswith("file:/"): | ||
27 | filepath = "/%s" % url.replace("file:", "").lstrip('/') | ||
28 | if not os.path.exists(filepath): | ||
29 | raise CreatorError("URLGrabber error: can't find file %s" % url) | ||
30 | if url.endswith('.rpm'): | ||
31 | return filepath | ||
32 | else: | ||
33 | # untouch repometadata in source path | ||
34 | runner.show(['cp', '-f', filepath, filename]) | ||
35 | |||
36 | else: | ||
37 | try: | ||
38 | filename = g.urlgrab(url=str(url), | ||
39 | filename=filename, | ||
40 | ssl_verify_host=False, | ||
41 | ssl_verify_peer=False, | ||
42 | proxies=proxies, | ||
43 | http_headers=(('Pragma', 'no-cache'),), | ||
44 | quote=0, | ||
45 | progress_obj=progress_obj) | ||
46 | except grabber.URLGrabError, err: | ||
47 | msg = str(err) | ||
48 | if msg.find(url) < 0: | ||
49 | msg += ' on %s' % url | ||
50 | raise CreatorError(msg) | ||
51 | |||
52 | return filename | ||
53 | |||
54 | def terminal_width(fd=1): | ||
55 | """ Get the real terminal width """ | ||
56 | try: | ||
57 | buf = 'abcdefgh' | ||
58 | buf = fcntl.ioctl(fd, termios.TIOCGWINSZ, buf) | ||
59 | return struct.unpack('hhhh', buf)[1] | ||
60 | except: # IOError | ||
61 | return 80 | ||
62 | |||
63 | def truncate_url(url, width): | ||
64 | return os.path.basename(url)[0:width] | ||
65 | |||
66 | class TextProgress(object): | ||
67 | # make the class as singleton | ||
68 | _instance = None | ||
69 | def __new__(cls, *args, **kwargs): | ||
70 | if not cls._instance: | ||
71 | cls._instance = super(TextProgress, cls).__new__(cls, *args, **kwargs) | ||
72 | |||
73 | return cls._instance | ||
74 | |||
75 | def __init__(self, totalnum = None): | ||
76 | self.total = totalnum | ||
77 | self.counter = 1 | ||
78 | |||
79 | def start(self, filename, url, *args, **kwargs): | ||
80 | self.url = url | ||
81 | self.termwidth = terminal_width() | ||
82 | msger.info("\r%-*s" % (self.termwidth, " ")) | ||
83 | if self.total is None: | ||
84 | msger.info("\rRetrieving %s ..." % truncate_url(self.url, self.termwidth - 15)) | ||
85 | else: | ||
86 | msger.info("\rRetrieving %s [%d/%d] ..." % (truncate_url(self.url, self.termwidth - 25), self.counter, self.total)) | ||
87 | |||
88 | def update(self, *args): | ||
89 | pass | ||
90 | |||
91 | def end(self, *args): | ||
92 | if self.counter == self.total: | ||
93 | msger.raw("\n") | ||
94 | |||
95 | if self.total is not None: | ||
96 | self.counter += 1 | ||
97 | |||
diff --git a/scripts/lib/mic/utils/misc.py b/scripts/lib/mic/utils/misc.py new file mode 100644 index 0000000000..95241d7f15 --- /dev/null +++ b/scripts/lib/mic/utils/misc.py | |||
@@ -0,0 +1,1065 @@ | |||
1 | #!/usr/bin/python -tt | ||
2 | # | ||
3 | # Copyright (c) 2010, 2011 Intel Inc. | ||
4 | # | ||
5 | # This program is free software; you can redistribute it and/or modify it | ||
6 | # under the terms of the GNU General Public License as published by the Free | ||
7 | # Software Foundation; version 2 of the License | ||
8 | # | ||
9 | # This program is distributed in the hope that it will be useful, but | ||
10 | # WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY | ||
11 | # or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | ||
12 | # for more details. | ||
13 | # | ||
14 | # You should have received a copy of the GNU General Public License along | ||
15 | # with this program; if not, write to the Free Software Foundation, Inc., 59 | ||
16 | # Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
17 | |||
18 | import os | ||
19 | import sys | ||
20 | import time | ||
21 | import tempfile | ||
22 | import re | ||
23 | import shutil | ||
24 | import glob | ||
25 | import hashlib | ||
26 | import subprocess | ||
27 | import platform | ||
28 | import traceback | ||
29 | |||
30 | |||
31 | try: | ||
32 | import sqlite3 as sqlite | ||
33 | except ImportError: | ||
34 | import sqlite | ||
35 | |||
36 | try: | ||
37 | from xml.etree import cElementTree | ||
38 | except ImportError: | ||
39 | import cElementTree | ||
40 | xmlparse = cElementTree.parse | ||
41 | |||
42 | from mic import msger | ||
43 | from mic.utils.errors import CreatorError, SquashfsError | ||
44 | from mic.utils.fs_related import find_binary_path, makedirs | ||
45 | from mic.utils.proxy import get_proxy_for | ||
46 | from mic.utils import runner | ||
47 | |||
48 | |||
49 | RPM_RE = re.compile("(.*)\.(.*) (.*)-(.*)") | ||
50 | RPM_FMT = "%(name)s.%(arch)s %(version)s-%(release)s" | ||
51 | SRPM_RE = re.compile("(.*)-(\d+.*)-(\d+\.\d+).src.rpm") | ||
52 | |||
53 | |||
54 | def build_name(kscfg, release=None, prefix = None, suffix = None): | ||
55 | """Construct and return an image name string. | ||
56 | |||
57 | This is a utility function to help create sensible name and fslabel | ||
58 | strings. The name is constructed using the sans-prefix-and-extension | ||
59 | kickstart filename and the supplied prefix and suffix. | ||
60 | |||
61 | kscfg -- a path to a kickstart file | ||
62 | release -- a replacement to suffix for image release | ||
63 | prefix -- a prefix to prepend to the name; defaults to None, which causes | ||
64 | no prefix to be used | ||
65 | suffix -- a suffix to append to the name; defaults to None, which causes | ||
66 | a YYYYMMDDHHMM suffix to be used | ||
67 | |||
68 | Note, if maxlen is less then the len(suffix), you get to keep both pieces. | ||
69 | |||
70 | """ | ||
71 | name = os.path.basename(kscfg) | ||
72 | idx = name.rfind('.') | ||
73 | if idx >= 0: | ||
74 | name = name[:idx] | ||
75 | |||
76 | if release is not None: | ||
77 | suffix = "" | ||
78 | if prefix is None: | ||
79 | prefix = "" | ||
80 | if suffix is None: | ||
81 | suffix = time.strftime("%Y%m%d%H%M") | ||
82 | |||
83 | if name.startswith(prefix): | ||
84 | name = name[len(prefix):] | ||
85 | |||
86 | prefix = "%s-" % prefix if prefix else "" | ||
87 | suffix = "-%s" % suffix if suffix else "" | ||
88 | |||
89 | ret = prefix + name + suffix | ||
90 | return ret | ||
91 | |||
92 | def get_distro(): | ||
93 | """Detect linux distribution, support "meego" | ||
94 | """ | ||
95 | |||
96 | support_dists = ('SuSE', | ||
97 | 'debian', | ||
98 | 'fedora', | ||
99 | 'redhat', | ||
100 | 'centos', | ||
101 | 'meego', | ||
102 | 'moblin', | ||
103 | 'tizen') | ||
104 | try: | ||
105 | (dist, ver, id) = platform.linux_distribution( \ | ||
106 | supported_dists = support_dists) | ||
107 | except: | ||
108 | (dist, ver, id) = platform.dist( \ | ||
109 | supported_dists = support_dists) | ||
110 | |||
111 | return (dist, ver, id) | ||
112 | |||
113 | def get_distro_str(): | ||
114 | """Get composited string for current linux distribution | ||
115 | """ | ||
116 | (dist, ver, id) = get_distro() | ||
117 | |||
118 | if not dist: | ||
119 | return 'Unknown Linux Distro' | ||
120 | else: | ||
121 | distro_str = ' '.join(map(str.strip, (dist, ver, id))) | ||
122 | return distro_str.strip() | ||
123 | |||
124 | _LOOP_RULE_PTH = None | ||
125 | |||
126 | def hide_loopdev_presentation(): | ||
127 | udev_rules = "80-prevent-loop-present.rules" | ||
128 | udev_rules_dir = [ | ||
129 | '/usr/lib/udev/rules.d/', | ||
130 | '/lib/udev/rules.d/', | ||
131 | '/etc/udev/rules.d/' | ||
132 | ] | ||
133 | |||
134 | global _LOOP_RULE_PTH | ||
135 | |||
136 | for rdir in udev_rules_dir: | ||
137 | if os.path.exists(rdir): | ||
138 | _LOOP_RULE_PTH = os.path.join(rdir, udev_rules) | ||
139 | |||
140 | if not _LOOP_RULE_PTH: | ||
141 | return | ||
142 | |||
143 | try: | ||
144 | with open(_LOOP_RULE_PTH, 'w') as wf: | ||
145 | wf.write('KERNEL=="loop*", ENV{UDISKS_PRESENTATION_HIDE}="1"') | ||
146 | |||
147 | runner.quiet('udevadm trigger') | ||
148 | except: | ||
149 | pass | ||
150 | |||
151 | def unhide_loopdev_presentation(): | ||
152 | global _LOOP_RULE_PTH | ||
153 | |||
154 | if not _LOOP_RULE_PTH: | ||
155 | return | ||
156 | |||
157 | try: | ||
158 | os.unlink(_LOOP_RULE_PTH) | ||
159 | runner.quiet('udevadm trigger') | ||
160 | except: | ||
161 | pass | ||
162 | |||
163 | def extract_rpm(rpmfile, targetdir): | ||
164 | rpm2cpio = find_binary_path("rpm2cpio") | ||
165 | cpio = find_binary_path("cpio") | ||
166 | |||
167 | olddir = os.getcwd() | ||
168 | os.chdir(targetdir) | ||
169 | |||
170 | msger.verbose("Extract rpm file with cpio: %s" % rpmfile) | ||
171 | p1 = subprocess.Popen([rpm2cpio, rpmfile], stdout=subprocess.PIPE) | ||
172 | p2 = subprocess.Popen([cpio, "-idv"], stdin=p1.stdout, | ||
173 | stdout=subprocess.PIPE, stderr=subprocess.PIPE) | ||
174 | (sout, serr) = p2.communicate() | ||
175 | msger.verbose(sout or serr) | ||
176 | |||
177 | os.chdir(olddir) | ||
178 | |||
179 | def compressing(fpath, method): | ||
180 | comp_map = { | ||
181 | "gz": "gzip", | ||
182 | "bz2": "bzip2" | ||
183 | } | ||
184 | if method not in comp_map: | ||
185 | raise CreatorError("Unsupport compress format: %s, valid values: %s" | ||
186 | % (method, ','.join(comp_map.keys()))) | ||
187 | cmd = find_binary_path(comp_map[method]) | ||
188 | rc = runner.show([cmd, "-f", fpath]) | ||
189 | if rc: | ||
190 | raise CreatorError("Failed to %s file: %s" % (comp_map[method], fpath)) | ||
191 | |||
192 | def taring(dstfile, target): | ||
193 | import tarfile | ||
194 | basen, ext = os.path.splitext(dstfile) | ||
195 | comp = {".tar": None, | ||
196 | ".gz": "gz", # for .tar.gz | ||
197 | ".bz2": "bz2", # for .tar.bz2 | ||
198 | ".tgz": "gz", | ||
199 | ".tbz": "bz2"}[ext] | ||
200 | |||
201 | # specify tarball file path | ||
202 | if not comp: | ||
203 | tarpath = dstfile | ||
204 | elif basen.endswith(".tar"): | ||
205 | tarpath = basen | ||
206 | else: | ||
207 | tarpath = basen + ".tar" | ||
208 | wf = tarfile.open(tarpath, 'w') | ||
209 | |||
210 | if os.path.isdir(target): | ||
211 | for item in os.listdir(target): | ||
212 | wf.add(os.path.join(target, item), item) | ||
213 | else: | ||
214 | wf.add(target, os.path.basename(target)) | ||
215 | wf.close() | ||
216 | |||
217 | if comp: | ||
218 | compressing(tarpath, comp) | ||
219 | # when dstfile ext is ".tgz" and ".tbz", should rename | ||
220 | if not basen.endswith(".tar"): | ||
221 | shutil.move("%s.%s" % (tarpath, comp), dstfile) | ||
222 | |||
223 | def ziping(dstfile, target): | ||
224 | import zipfile | ||
225 | wf = zipfile.ZipFile(dstfile, 'w', compression=zipfile.ZIP_DEFLATED) | ||
226 | if os.path.isdir(target): | ||
227 | for item in os.listdir(target): | ||
228 | fpath = os.path.join(target, item) | ||
229 | if not os.path.isfile(fpath): | ||
230 | continue | ||
231 | wf.write(fpath, item, zipfile.ZIP_DEFLATED) | ||
232 | else: | ||
233 | wf.write(target, os.path.basename(target), zipfile.ZIP_DEFLATED) | ||
234 | wf.close() | ||
235 | |||
236 | pack_formats = { | ||
237 | ".tar": taring, | ||
238 | ".tar.gz": taring, | ||
239 | ".tar.bz2": taring, | ||
240 | ".tgz": taring, | ||
241 | ".tbz": taring, | ||
242 | ".zip": ziping, | ||
243 | } | ||
244 | |||
245 | def packing(dstfile, target): | ||
246 | (base, ext) = os.path.splitext(dstfile) | ||
247 | if ext in (".gz", ".bz2") and base.endswith(".tar"): | ||
248 | ext = ".tar" + ext | ||
249 | if ext not in pack_formats: | ||
250 | raise CreatorError("Unsupport pack format: %s, valid values: %s" | ||
251 | % (ext, ','.join(pack_formats.keys()))) | ||
252 | func = pack_formats[ext] | ||
253 | # func should be callable | ||
254 | func(dstfile, target) | ||
255 | |||
256 | def human_size(size): | ||
257 | """Return human readable string for Bytes size | ||
258 | """ | ||
259 | |||
260 | if size <= 0: | ||
261 | return "0M" | ||
262 | import math | ||
263 | measure = ['B', 'K', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y'] | ||
264 | expo = int(math.log(size, 1024)) | ||
265 | mant = float(size/math.pow(1024, expo)) | ||
266 | return "{0:.1f}{1:s}".format(mant, measure[expo]) | ||
267 | |||
268 | def get_block_size(file_obj): | ||
269 | """ Returns block size for file object 'file_obj'. Errors are indicated by | ||
270 | the 'IOError' exception. """ | ||
271 | |||
272 | from fcntl import ioctl | ||
273 | import struct | ||
274 | |||
275 | # Get the block size of the host file-system for the image file by calling | ||
276 | # the FIGETBSZ ioctl (number 2). | ||
277 | binary_data = ioctl(file_obj, 2, struct.pack('I', 0)) | ||
278 | return struct.unpack('I', binary_data)[0] | ||
279 | |||
280 | def check_space_pre_cp(src, dst): | ||
281 | """Check whether disk space is enough before 'cp' like | ||
282 | operations, else exception will be raised. | ||
283 | """ | ||
284 | |||
285 | srcsize = get_file_size(src) * 1024 * 1024 | ||
286 | freesize = get_filesystem_avail(dst) | ||
287 | if srcsize > freesize: | ||
288 | raise CreatorError("space on %s(%s) is not enough for about %s files" | ||
289 | % (dst, human_size(freesize), human_size(srcsize))) | ||
290 | |||
291 | def calc_hashes(file_path, hash_names, start = 0, end = None): | ||
292 | """ Calculate hashes for a file. The 'file_path' argument is the file | ||
293 | to calculate hash functions for, 'start' and 'end' are the starting and | ||
294 | ending file offset to calculate the has functions for. The 'hash_names' | ||
295 | argument is a list of hash names to calculate. Returns the the list | ||
296 | of calculated hash values in the hexadecimal form in the same order | ||
297 | as 'hash_names'. | ||
298 | """ | ||
299 | if end == None: | ||
300 | end = os.path.getsize(file_path) | ||
301 | |||
302 | chunk_size = 65536 | ||
303 | to_read = end - start | ||
304 | read = 0 | ||
305 | |||
306 | hashes = [] | ||
307 | for hash_name in hash_names: | ||
308 | hashes.append(hashlib.new(hash_name)) | ||
309 | |||
310 | with open(file_path, "rb") as f: | ||
311 | f.seek(start) | ||
312 | |||
313 | while read < to_read: | ||
314 | if read + chunk_size > to_read: | ||
315 | chunk_size = to_read - read | ||
316 | chunk = f.read(chunk_size) | ||
317 | for hash_obj in hashes: | ||
318 | hash_obj.update(chunk) | ||
319 | read += chunk_size | ||
320 | |||
321 | result = [] | ||
322 | for hash_obj in hashes: | ||
323 | result.append(hash_obj.hexdigest()) | ||
324 | |||
325 | return result | ||
326 | |||
327 | def get_md5sum(fpath): | ||
328 | return calc_hashes(fpath, ('md5', ))[0] | ||
329 | |||
330 | |||
331 | def normalize_ksfile(ksconf, release, arch): | ||
332 | ''' | ||
333 | Return the name of a normalized ks file in which macro variables | ||
334 | @BUILD_ID@ and @ARCH@ are replace with real values. | ||
335 | |||
336 | The original ks file is returned if no special macro is used, otherwise | ||
337 | a temp file is created and returned, which will be deleted when program | ||
338 | exits normally. | ||
339 | ''' | ||
340 | |||
341 | if not release: | ||
342 | release = "latest" | ||
343 | if not arch or re.match(r'i.86', arch): | ||
344 | arch = "ia32" | ||
345 | |||
346 | with open(ksconf) as f: | ||
347 | ksc = f.read() | ||
348 | |||
349 | if "@ARCH@" not in ksc and "@BUILD_ID@" not in ksc: | ||
350 | return ksconf | ||
351 | |||
352 | msger.info("Substitute macro variable @BUILD_ID@/@ARCH@ in ks: %s" % ksconf) | ||
353 | ksc = ksc.replace("@ARCH@", arch) | ||
354 | ksc = ksc.replace("@BUILD_ID@", release) | ||
355 | |||
356 | fd, ksconf = tempfile.mkstemp(prefix=os.path.basename(ksconf)) | ||
357 | os.write(fd, ksc) | ||
358 | os.close(fd) | ||
359 | |||
360 | msger.debug('normalized ks file:%s' % ksconf) | ||
361 | |||
362 | def remove_temp_ks(): | ||
363 | try: | ||
364 | os.unlink(ksconf) | ||
365 | except OSError, err: | ||
366 | msger.warning('Failed to remove temp ks file:%s:%s' % (ksconf, err)) | ||
367 | |||
368 | import atexit | ||
369 | atexit.register(remove_temp_ks) | ||
370 | |||
371 | return ksconf | ||
372 | |||
373 | |||
374 | def _check_mic_chroot(rootdir): | ||
375 | def _path(path): | ||
376 | return rootdir.rstrip('/') + path | ||
377 | |||
378 | release_files = map(_path, [ "/etc/moblin-release", | ||
379 | "/etc/meego-release", | ||
380 | "/etc/tizen-release"]) | ||
381 | |||
382 | if not any(map(os.path.exists, release_files)): | ||
383 | msger.warning("Dir %s is not a MeeGo/Tizen chroot env" % rootdir) | ||
384 | |||
385 | if not glob.glob(rootdir + "/boot/vmlinuz-*"): | ||
386 | msger.warning("Failed to find kernel module under %s" % rootdir) | ||
387 | |||
388 | return | ||
389 | |||
390 | def selinux_check(arch, fstypes): | ||
391 | try: | ||
392 | getenforce = find_binary_path('getenforce') | ||
393 | except CreatorError: | ||
394 | return | ||
395 | |||
396 | selinux_status = runner.outs([getenforce]) | ||
397 | if arch and arch.startswith("arm") and selinux_status == "Enforcing": | ||
398 | raise CreatorError("Can't create arm image if selinux is enabled, " | ||
399 | "please run 'setenforce 0' to disable selinux") | ||
400 | |||
401 | use_btrfs = filter(lambda typ: typ == 'btrfs', fstypes) | ||
402 | if use_btrfs and selinux_status == "Enforcing": | ||
403 | raise CreatorError("Can't create btrfs image if selinux is enabled," | ||
404 | " please run 'setenforce 0' to disable selinux") | ||
405 | |||
406 | def get_image_type(path): | ||
407 | def _get_extension_name(path): | ||
408 | match = re.search("(?<=\.)\w+$", path) | ||
409 | if match: | ||
410 | return match.group(0) | ||
411 | else: | ||
412 | return None | ||
413 | |||
414 | if os.path.isdir(path): | ||
415 | _check_mic_chroot(path) | ||
416 | return "fs" | ||
417 | |||
418 | maptab = { | ||
419 | "tar": "loop", | ||
420 | "raw":"raw", | ||
421 | "vmdk":"vmdk", | ||
422 | "vdi":"vdi", | ||
423 | "iso":"livecd", | ||
424 | "usbimg":"liveusb", | ||
425 | } | ||
426 | |||
427 | extension = _get_extension_name(path) | ||
428 | if extension in maptab: | ||
429 | return maptab[extension] | ||
430 | |||
431 | fd = open(path, "rb") | ||
432 | file_header = fd.read(1024) | ||
433 | fd.close() | ||
434 | vdi_flag = "<<< Sun VirtualBox Disk Image >>>" | ||
435 | if file_header[0:len(vdi_flag)] == vdi_flag: | ||
436 | return maptab["vdi"] | ||
437 | |||
438 | output = runner.outs(['file', path]) | ||
439 | isoptn = re.compile(r".*ISO 9660 CD-ROM filesystem.*(bootable).*") | ||
440 | usbimgptn = re.compile(r".*x86 boot sector.*active.*") | ||
441 | rawptn = re.compile(r".*x86 boot sector.*") | ||
442 | vmdkptn = re.compile(r".*VMware. disk image.*") | ||
443 | ext3fsimgptn = re.compile(r".*Linux.*ext3 filesystem data.*") | ||
444 | ext4fsimgptn = re.compile(r".*Linux.*ext4 filesystem data.*") | ||
445 | btrfsimgptn = re.compile(r".*BTRFS.*") | ||
446 | if isoptn.match(output): | ||
447 | return maptab["iso"] | ||
448 | elif usbimgptn.match(output): | ||
449 | return maptab["usbimg"] | ||
450 | elif rawptn.match(output): | ||
451 | return maptab["raw"] | ||
452 | elif vmdkptn.match(output): | ||
453 | return maptab["vmdk"] | ||
454 | elif ext3fsimgptn.match(output): | ||
455 | return "ext3fsimg" | ||
456 | elif ext4fsimgptn.match(output): | ||
457 | return "ext4fsimg" | ||
458 | elif btrfsimgptn.match(output): | ||
459 | return "btrfsimg" | ||
460 | else: | ||
461 | raise CreatorError("Cannot detect the type of image: %s" % path) | ||
462 | |||
463 | |||
464 | def get_file_size(filename): | ||
465 | """ Return size in MB unit """ | ||
466 | cmd = ['du', "-s", "-b", "-B", "1M", filename] | ||
467 | rc, duOutput = runner.runtool(cmd) | ||
468 | if rc != 0: | ||
469 | raise CreatorError("Failed to run: %s" % ' '.join(cmd)) | ||
470 | size1 = int(duOutput.split()[0]) | ||
471 | |||
472 | cmd = ['du', "-s", "-B", "1M", filename] | ||
473 | rc, duOutput = runner.runtool(cmd) | ||
474 | if rc != 0: | ||
475 | raise CreatorError("Failed to run: %s" % ' '.join(cmd)) | ||
476 | |||
477 | size2 = int(duOutput.split()[0]) | ||
478 | return max(size1, size2) | ||
479 | |||
480 | |||
481 | def get_filesystem_avail(fs): | ||
482 | vfstat = os.statvfs(fs) | ||
483 | return vfstat.f_bavail * vfstat.f_bsize | ||
484 | |||
485 | def convert_image(srcimg, srcfmt, dstimg, dstfmt): | ||
486 | #convert disk format | ||
487 | if dstfmt != "raw": | ||
488 | raise CreatorError("Invalid destination image format: %s" % dstfmt) | ||
489 | msger.debug("converting %s image to %s" % (srcimg, dstimg)) | ||
490 | if srcfmt == "vmdk": | ||
491 | path = find_binary_path("qemu-img") | ||
492 | argv = [path, "convert", "-f", "vmdk", srcimg, "-O", dstfmt, dstimg] | ||
493 | elif srcfmt == "vdi": | ||
494 | path = find_binary_path("VBoxManage") | ||
495 | argv = [path, "internalcommands", "converttoraw", srcimg, dstimg] | ||
496 | else: | ||
497 | raise CreatorError("Invalid soure image format: %s" % srcfmt) | ||
498 | |||
499 | rc = runner.show(argv) | ||
500 | if rc == 0: | ||
501 | msger.debug("convert successful") | ||
502 | if rc != 0: | ||
503 | raise CreatorError("Unable to convert disk to %s" % dstfmt) | ||
504 | |||
505 | def uncompress_squashfs(squashfsimg, outdir): | ||
506 | """Uncompress file system from squshfs image""" | ||
507 | unsquashfs = find_binary_path("unsquashfs") | ||
508 | args = [ unsquashfs, "-d", outdir, squashfsimg ] | ||
509 | rc = runner.show(args) | ||
510 | if (rc != 0): | ||
511 | raise SquashfsError("Failed to uncompress %s." % squashfsimg) | ||
512 | |||
513 | def mkdtemp(dir = "/var/tmp", prefix = "wic-tmp-"): | ||
514 | """ FIXME: use the dir in wic.conf instead """ | ||
515 | |||
516 | makedirs(dir) | ||
517 | return tempfile.mkdtemp(dir = dir, prefix = prefix) | ||
518 | |||
519 | def get_repostrs_from_ks(ks): | ||
520 | def _get_temp_reponame(baseurl): | ||
521 | md5obj = hashlib.md5(baseurl) | ||
522 | tmpreponame = "%s" % md5obj.hexdigest() | ||
523 | return tmpreponame | ||
524 | |||
525 | kickstart_repos = [] | ||
526 | |||
527 | for repodata in ks.handler.repo.repoList: | ||
528 | repo = {} | ||
529 | for attr in ('name', | ||
530 | 'baseurl', | ||
531 | 'mirrorlist', | ||
532 | 'includepkgs', # val is list | ||
533 | 'excludepkgs', # val is list | ||
534 | 'cost', # int | ||
535 | 'priority',# int | ||
536 | 'save', | ||
537 | 'proxy', | ||
538 | 'proxyuser', | ||
539 | 'proxypasswd', | ||
540 | 'proxypasswd', | ||
541 | 'debuginfo', | ||
542 | 'source', | ||
543 | 'gpgkey', | ||
544 | 'ssl_verify'): | ||
545 | if hasattr(repodata, attr) and getattr(repodata, attr): | ||
546 | repo[attr] = getattr(repodata, attr) | ||
547 | |||
548 | if 'name' not in repo: | ||
549 | repo['name'] = _get_temp_reponame(repodata.baseurl) | ||
550 | |||
551 | kickstart_repos.append(repo) | ||
552 | |||
553 | return kickstart_repos | ||
554 | |||
555 | def _get_uncompressed_data_from_url(url, filename, proxies): | ||
556 | filename = myurlgrab(url, filename, proxies) | ||
557 | suffix = None | ||
558 | if filename.endswith(".gz"): | ||
559 | suffix = ".gz" | ||
560 | runner.quiet(['gunzip', "-f", filename]) | ||
561 | elif filename.endswith(".bz2"): | ||
562 | suffix = ".bz2" | ||
563 | runner.quiet(['bunzip2', "-f", filename]) | ||
564 | if suffix: | ||
565 | filename = filename.replace(suffix, "") | ||
566 | return filename | ||
567 | |||
568 | def _get_metadata_from_repo(baseurl, proxies, cachedir, reponame, filename, | ||
569 | sumtype=None, checksum=None): | ||
570 | url = os.path.join(baseurl, filename) | ||
571 | filename_tmp = str("%s/%s/%s" % (cachedir, reponame, os.path.basename(filename))) | ||
572 | if os.path.splitext(filename_tmp)[1] in (".gz", ".bz2"): | ||
573 | filename = os.path.splitext(filename_tmp)[0] | ||
574 | else: | ||
575 | filename = filename_tmp | ||
576 | if sumtype and checksum and os.path.exists(filename): | ||
577 | try: | ||
578 | sumcmd = find_binary_path("%ssum" % sumtype) | ||
579 | except: | ||
580 | file_checksum = None | ||
581 | else: | ||
582 | file_checksum = runner.outs([sumcmd, filename]).split()[0] | ||
583 | |||
584 | if file_checksum and file_checksum == checksum: | ||
585 | return filename | ||
586 | |||
587 | return _get_uncompressed_data_from_url(url,filename_tmp,proxies) | ||
588 | |||
589 | def get_metadata_from_repos(repos, cachedir): | ||
590 | my_repo_metadata = [] | ||
591 | for repo in repos: | ||
592 | reponame = repo['name'] | ||
593 | baseurl = repo['baseurl'] | ||
594 | |||
595 | |||
596 | if 'proxy' in repo: | ||
597 | proxy = repo['proxy'] | ||
598 | else: | ||
599 | proxy = get_proxy_for(baseurl) | ||
600 | |||
601 | proxies = None | ||
602 | if proxy: | ||
603 | proxies = {str(baseurl.split(":")[0]):str(proxy)} | ||
604 | |||
605 | makedirs(os.path.join(cachedir, reponame)) | ||
606 | url = os.path.join(baseurl, "repodata/repomd.xml") | ||
607 | filename = os.path.join(cachedir, reponame, 'repomd.xml') | ||
608 | repomd = myurlgrab(url, filename, proxies) | ||
609 | try: | ||
610 | root = xmlparse(repomd) | ||
611 | except SyntaxError: | ||
612 | raise CreatorError("repomd.xml syntax error.") | ||
613 | |||
614 | ns = root.getroot().tag | ||
615 | ns = ns[0:ns.rindex("}")+1] | ||
616 | |||
617 | filepaths = {} | ||
618 | checksums = {} | ||
619 | sumtypes = {} | ||
620 | |||
621 | for elm in root.getiterator("%sdata" % ns): | ||
622 | if elm.attrib["type"] == "patterns": | ||
623 | filepaths['patterns'] = elm.find("%slocation" % ns).attrib['href'] | ||
624 | checksums['patterns'] = elm.find("%sopen-checksum" % ns).text | ||
625 | sumtypes['patterns'] = elm.find("%sopen-checksum" % ns).attrib['type'] | ||
626 | break | ||
627 | |||
628 | for elm in root.getiterator("%sdata" % ns): | ||
629 | if elm.attrib["type"] in ("group_gz", "group"): | ||
630 | filepaths['comps'] = elm.find("%slocation" % ns).attrib['href'] | ||
631 | checksums['comps'] = elm.find("%sopen-checksum" % ns).text | ||
632 | sumtypes['comps'] = elm.find("%sopen-checksum" % ns).attrib['type'] | ||
633 | break | ||
634 | |||
635 | primary_type = None | ||
636 | for elm in root.getiterator("%sdata" % ns): | ||
637 | if elm.attrib["type"] in ("primary_db", "primary"): | ||
638 | primary_type = elm.attrib["type"] | ||
639 | filepaths['primary'] = elm.find("%slocation" % ns).attrib['href'] | ||
640 | checksums['primary'] = elm.find("%sopen-checksum" % ns).text | ||
641 | sumtypes['primary'] = elm.find("%sopen-checksum" % ns).attrib['type'] | ||
642 | break | ||
643 | |||
644 | if not primary_type: | ||
645 | continue | ||
646 | |||
647 | for item in ("primary", "patterns", "comps"): | ||
648 | if item not in filepaths: | ||
649 | filepaths[item] = None | ||
650 | continue | ||
651 | if not filepaths[item]: | ||
652 | continue | ||
653 | filepaths[item] = _get_metadata_from_repo(baseurl, | ||
654 | proxies, | ||
655 | cachedir, | ||
656 | reponame, | ||
657 | filepaths[item], | ||
658 | sumtypes[item], | ||
659 | checksums[item]) | ||
660 | |||
661 | """ Get repo key """ | ||
662 | try: | ||
663 | repokey = _get_metadata_from_repo(baseurl, | ||
664 | proxies, | ||
665 | cachedir, | ||
666 | reponame, | ||
667 | "repodata/repomd.xml.key") | ||
668 | except CreatorError: | ||
669 | repokey = None | ||
670 | msger.debug("\ncan't get %s/%s" % (baseurl, "repodata/repomd.xml.key")) | ||
671 | |||
672 | my_repo_metadata.append({"name":reponame, | ||
673 | "baseurl":baseurl, | ||
674 | "repomd":repomd, | ||
675 | "primary":filepaths['primary'], | ||
676 | "cachedir":cachedir, | ||
677 | "proxies":proxies, | ||
678 | "patterns":filepaths['patterns'], | ||
679 | "comps":filepaths['comps'], | ||
680 | "repokey":repokey}) | ||
681 | |||
682 | return my_repo_metadata | ||
683 | |||
684 | def get_rpmver_in_repo(repometadata): | ||
685 | for repo in repometadata: | ||
686 | if repo["primary"].endswith(".xml"): | ||
687 | root = xmlparse(repo["primary"]) | ||
688 | ns = root.getroot().tag | ||
689 | ns = ns[0:ns.rindex("}")+1] | ||
690 | |||
691 | versionlist = [] | ||
692 | for elm in root.getiterator("%spackage" % ns): | ||
693 | if elm.find("%sname" % ns).text == 'rpm': | ||
694 | for node in elm.getchildren(): | ||
695 | if node.tag == "%sversion" % ns: | ||
696 | versionlist.append(node.attrib['ver']) | ||
697 | |||
698 | if versionlist: | ||
699 | return reversed( | ||
700 | sorted( | ||
701 | versionlist, | ||
702 | key = lambda ver: map(int, ver.split('.')))).next() | ||
703 | |||
704 | elif repo["primary"].endswith(".sqlite"): | ||
705 | con = sqlite.connect(repo["primary"]) | ||
706 | for row in con.execute("select version from packages where " | ||
707 | "name=\"rpm\" ORDER by version DESC"): | ||
708 | con.close() | ||
709 | return row[0] | ||
710 | |||
711 | return None | ||
712 | |||
713 | def get_arch(repometadata): | ||
714 | archlist = [] | ||
715 | for repo in repometadata: | ||
716 | if repo["primary"].endswith(".xml"): | ||
717 | root = xmlparse(repo["primary"]) | ||
718 | ns = root.getroot().tag | ||
719 | ns = ns[0:ns.rindex("}")+1] | ||
720 | for elm in root.getiterator("%spackage" % ns): | ||
721 | if elm.find("%sarch" % ns).text not in ("noarch", "src"): | ||
722 | arch = elm.find("%sarch" % ns).text | ||
723 | if arch not in archlist: | ||
724 | archlist.append(arch) | ||
725 | elif repo["primary"].endswith(".sqlite"): | ||
726 | con = sqlite.connect(repo["primary"]) | ||
727 | for row in con.execute("select arch from packages where arch not in (\"src\", \"noarch\")"): | ||
728 | if row[0] not in archlist: | ||
729 | archlist.append(row[0]) | ||
730 | |||
731 | con.close() | ||
732 | |||
733 | uniq_arch = [] | ||
734 | for i in range(len(archlist)): | ||
735 | if archlist[i] not in rpmmisc.archPolicies.keys(): | ||
736 | continue | ||
737 | need_append = True | ||
738 | j = 0 | ||
739 | while j < len(uniq_arch): | ||
740 | if archlist[i] in rpmmisc.archPolicies[uniq_arch[j]].split(':'): | ||
741 | need_append = False | ||
742 | break | ||
743 | if uniq_arch[j] in rpmmisc.archPolicies[archlist[i]].split(':'): | ||
744 | if need_append: | ||
745 | uniq_arch[j] = archlist[i] | ||
746 | need_append = False | ||
747 | else: | ||
748 | uniq_arch.remove(uniq_arch[j]) | ||
749 | continue | ||
750 | j += 1 | ||
751 | if need_append: | ||
752 | uniq_arch.append(archlist[i]) | ||
753 | |||
754 | return uniq_arch, archlist | ||
755 | |||
756 | def get_package(pkg, repometadata, arch = None): | ||
757 | ver = "" | ||
758 | target_repo = None | ||
759 | if not arch: | ||
760 | arches = [] | ||
761 | elif arch not in rpmmisc.archPolicies: | ||
762 | arches = [arch] | ||
763 | else: | ||
764 | arches = rpmmisc.archPolicies[arch].split(':') | ||
765 | arches.append('noarch') | ||
766 | |||
767 | for repo in repometadata: | ||
768 | if repo["primary"].endswith(".xml"): | ||
769 | root = xmlparse(repo["primary"]) | ||
770 | ns = root.getroot().tag | ||
771 | ns = ns[0:ns.rindex("}")+1] | ||
772 | for elm in root.getiterator("%spackage" % ns): | ||
773 | if elm.find("%sname" % ns).text == pkg: | ||
774 | if elm.find("%sarch" % ns).text in arches: | ||
775 | version = elm.find("%sversion" % ns) | ||
776 | tmpver = "%s-%s" % (version.attrib['ver'], version.attrib['rel']) | ||
777 | if tmpver > ver: | ||
778 | ver = tmpver | ||
779 | location = elm.find("%slocation" % ns) | ||
780 | pkgpath = "%s" % location.attrib['href'] | ||
781 | target_repo = repo | ||
782 | break | ||
783 | if repo["primary"].endswith(".sqlite"): | ||
784 | con = sqlite.connect(repo["primary"]) | ||
785 | if arch: | ||
786 | sql = 'select version, release, location_href from packages ' \ | ||
787 | 'where name = "%s" and arch IN ("%s")' % \ | ||
788 | (pkg, '","'.join(arches)) | ||
789 | for row in con.execute(sql): | ||
790 | tmpver = "%s-%s" % (row[0], row[1]) | ||
791 | if tmpver > ver: | ||
792 | ver = tmpver | ||
793 | pkgpath = "%s" % row[2] | ||
794 | target_repo = repo | ||
795 | break | ||
796 | else: | ||
797 | sql = 'select version, release, location_href from packages ' \ | ||
798 | 'where name = "%s"' % pkg | ||
799 | for row in con.execute(sql): | ||
800 | tmpver = "%s-%s" % (row[0], row[1]) | ||
801 | if tmpver > ver: | ||
802 | ver = tmpver | ||
803 | pkgpath = "%s" % row[2] | ||
804 | target_repo = repo | ||
805 | break | ||
806 | con.close() | ||
807 | if target_repo: | ||
808 | makedirs("%s/packages/%s" % (target_repo["cachedir"], target_repo["name"])) | ||
809 | url = os.path.join(target_repo["baseurl"], pkgpath) | ||
810 | filename = str("%s/packages/%s/%s" % (target_repo["cachedir"], target_repo["name"], os.path.basename(pkgpath))) | ||
811 | if os.path.exists(filename): | ||
812 | ret = rpmmisc.checkRpmIntegrity('rpm', filename) | ||
813 | if ret == 0: | ||
814 | return filename | ||
815 | |||
816 | msger.warning("package %s is damaged: %s" % | ||
817 | (os.path.basename(filename), filename)) | ||
818 | os.unlink(filename) | ||
819 | |||
820 | pkg = myurlgrab(str(url), filename, target_repo["proxies"]) | ||
821 | return pkg | ||
822 | else: | ||
823 | return None | ||
824 | |||
825 | def get_source_name(pkg, repometadata): | ||
826 | |||
827 | def get_bin_name(pkg): | ||
828 | m = RPM_RE.match(pkg) | ||
829 | if m: | ||
830 | return m.group(1) | ||
831 | return None | ||
832 | |||
833 | def get_src_name(srpm): | ||
834 | m = SRPM_RE.match(srpm) | ||
835 | if m: | ||
836 | return m.group(1) | ||
837 | return None | ||
838 | |||
839 | ver = "" | ||
840 | target_repo = None | ||
841 | |||
842 | pkg_name = get_bin_name(pkg) | ||
843 | if not pkg_name: | ||
844 | return None | ||
845 | |||
846 | for repo in repometadata: | ||
847 | if repo["primary"].endswith(".xml"): | ||
848 | root = xmlparse(repo["primary"]) | ||
849 | ns = root.getroot().tag | ||
850 | ns = ns[0:ns.rindex("}")+1] | ||
851 | for elm in root.getiterator("%spackage" % ns): | ||
852 | if elm.find("%sname" % ns).text == pkg_name: | ||
853 | if elm.find("%sarch" % ns).text != "src": | ||
854 | version = elm.find("%sversion" % ns) | ||
855 | tmpver = "%s-%s" % (version.attrib['ver'], version.attrib['rel']) | ||
856 | if tmpver > ver: | ||
857 | ver = tmpver | ||
858 | fmt = elm.find("%sformat" % ns) | ||
859 | if fmt: | ||
860 | fns = fmt.getchildren()[0].tag | ||
861 | fns = fns[0:fns.rindex("}")+1] | ||
862 | pkgpath = fmt.find("%ssourcerpm" % fns).text | ||
863 | target_repo = repo | ||
864 | break | ||
865 | |||
866 | if repo["primary"].endswith(".sqlite"): | ||
867 | con = sqlite.connect(repo["primary"]) | ||
868 | for row in con.execute("select version, release, rpm_sourcerpm from packages where name = \"%s\" and arch != \"src\"" % pkg_name): | ||
869 | tmpver = "%s-%s" % (row[0], row[1]) | ||
870 | if tmpver > ver: | ||
871 | pkgpath = "%s" % row[2] | ||
872 | target_repo = repo | ||
873 | break | ||
874 | con.close() | ||
875 | if target_repo: | ||
876 | return get_src_name(pkgpath) | ||
877 | else: | ||
878 | return None | ||
879 | |||
880 | def get_pkglist_in_patterns(group, patterns): | ||
881 | found = False | ||
882 | pkglist = [] | ||
883 | try: | ||
884 | root = xmlparse(patterns) | ||
885 | except SyntaxError: | ||
886 | raise SyntaxError("%s syntax error." % patterns) | ||
887 | |||
888 | for elm in list(root.getroot()): | ||
889 | ns = elm.tag | ||
890 | ns = ns[0:ns.rindex("}")+1] | ||
891 | name = elm.find("%sname" % ns) | ||
892 | summary = elm.find("%ssummary" % ns) | ||
893 | if name.text == group or summary.text == group: | ||
894 | found = True | ||
895 | break | ||
896 | |||
897 | if not found: | ||
898 | return pkglist | ||
899 | |||
900 | found = False | ||
901 | for requires in list(elm): | ||
902 | if requires.tag.endswith("requires"): | ||
903 | found = True | ||
904 | break | ||
905 | |||
906 | if not found: | ||
907 | return pkglist | ||
908 | |||
909 | for pkg in list(requires): | ||
910 | pkgname = pkg.attrib["name"] | ||
911 | if pkgname not in pkglist: | ||
912 | pkglist.append(pkgname) | ||
913 | |||
914 | return pkglist | ||
915 | |||
916 | def get_pkglist_in_comps(group, comps): | ||
917 | found = False | ||
918 | pkglist = [] | ||
919 | try: | ||
920 | root = xmlparse(comps) | ||
921 | except SyntaxError: | ||
922 | raise SyntaxError("%s syntax error." % comps) | ||
923 | |||
924 | for elm in root.getiterator("group"): | ||
925 | id = elm.find("id") | ||
926 | name = elm.find("name") | ||
927 | if id.text == group or name.text == group: | ||
928 | packagelist = elm.find("packagelist") | ||
929 | found = True | ||
930 | break | ||
931 | |||
932 | if not found: | ||
933 | return pkglist | ||
934 | |||
935 | for require in elm.getiterator("packagereq"): | ||
936 | if require.tag.endswith("packagereq"): | ||
937 | pkgname = require.text | ||
938 | if pkgname not in pkglist: | ||
939 | pkglist.append(pkgname) | ||
940 | |||
941 | return pkglist | ||
942 | |||
943 | def is_statically_linked(binary): | ||
944 | return ", statically linked, " in runner.outs(['file', binary]) | ||
945 | |||
946 | def setup_qemu_emulator(rootdir, arch): | ||
947 | # mount binfmt_misc if it doesn't exist | ||
948 | if not os.path.exists("/proc/sys/fs/binfmt_misc"): | ||
949 | modprobecmd = find_binary_path("modprobe") | ||
950 | runner.show([modprobecmd, "binfmt_misc"]) | ||
951 | if not os.path.exists("/proc/sys/fs/binfmt_misc/register"): | ||
952 | mountcmd = find_binary_path("mount") | ||
953 | runner.show([mountcmd, "-t", "binfmt_misc", "none", "/proc/sys/fs/binfmt_misc"]) | ||
954 | |||
955 | # qemu_emulator is a special case, we can't use find_binary_path | ||
956 | # qemu emulator should be a statically-linked executable file | ||
957 | qemu_emulator = "/usr/bin/qemu-arm" | ||
958 | if not os.path.exists(qemu_emulator) or not is_statically_linked(qemu_emulator): | ||
959 | qemu_emulator = "/usr/bin/qemu-arm-static" | ||
960 | if not os.path.exists(qemu_emulator): | ||
961 | raise CreatorError("Please install a statically-linked qemu-arm") | ||
962 | |||
963 | # qemu emulator version check | ||
964 | armv7_list = [arch for arch in rpmmisc.archPolicies.keys() if arch.startswith('armv7')] | ||
965 | if arch in armv7_list: # need qemu (>=0.13.0) | ||
966 | qemuout = runner.outs([qemu_emulator, "-h"]) | ||
967 | m = re.search("version\s*([.\d]+)", qemuout) | ||
968 | if m: | ||
969 | qemu_version = m.group(1) | ||
970 | if qemu_version < "0.13": | ||
971 | raise CreatorError("Requires %s version >=0.13 for %s" % (qemu_emulator, arch)) | ||
972 | else: | ||
973 | msger.warning("Can't get version info of %s, please make sure it's higher than 0.13.0" % qemu_emulator) | ||
974 | |||
975 | if not os.path.exists(rootdir + "/usr/bin"): | ||
976 | makedirs(rootdir + "/usr/bin") | ||
977 | shutil.copy(qemu_emulator, rootdir + "/usr/bin/qemu-arm-static") | ||
978 | qemu_emulator = "/usr/bin/qemu-arm-static" | ||
979 | |||
980 | # disable selinux, selinux will block qemu emulator to run | ||
981 | if os.path.exists("/usr/sbin/setenforce"): | ||
982 | msger.info('Try to disable selinux') | ||
983 | runner.show(["/usr/sbin/setenforce", "0"]) | ||
984 | |||
985 | # unregister it if it has been registered and is a dynamically-linked executable | ||
986 | node = "/proc/sys/fs/binfmt_misc/arm" | ||
987 | if os.path.exists(node): | ||
988 | qemu_unregister_string = "-1\n" | ||
989 | fd = open("/proc/sys/fs/binfmt_misc/arm", "w") | ||
990 | fd.write(qemu_unregister_string) | ||
991 | fd.close() | ||
992 | |||
993 | # register qemu emulator for interpreting other arch executable file | ||
994 | if not os.path.exists(node): | ||
995 | qemu_arm_string = ":arm:M::\\x7fELF\\x01\\x01\\x01\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x02\\x00\\x28\\x00:\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\x00\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\xfa\\xff\\xff\\xff:%s:\n" % qemu_emulator | ||
996 | fd = open("/proc/sys/fs/binfmt_misc/register", "w") | ||
997 | fd.write(qemu_arm_string) | ||
998 | fd.close() | ||
999 | |||
1000 | return qemu_emulator | ||
1001 | |||
1002 | def SrcpkgsDownload(pkgs, repometadata, instroot, cachedir): | ||
1003 | def get_source_repometadata(repometadata): | ||
1004 | src_repometadata=[] | ||
1005 | for repo in repometadata: | ||
1006 | if repo["name"].endswith("-source"): | ||
1007 | src_repometadata.append(repo) | ||
1008 | if src_repometadata: | ||
1009 | return src_repometadata | ||
1010 | return None | ||
1011 | |||
1012 | def get_src_name(srpm): | ||
1013 | m = SRPM_RE.match(srpm) | ||
1014 | if m: | ||
1015 | return m.group(1) | ||
1016 | return None | ||
1017 | |||
1018 | src_repometadata = get_source_repometadata(repometadata) | ||
1019 | |||
1020 | if not src_repometadata: | ||
1021 | msger.warning("No source repo found") | ||
1022 | return None | ||
1023 | |||
1024 | src_pkgs = [] | ||
1025 | lpkgs_dict = {} | ||
1026 | lpkgs_path = [] | ||
1027 | for repo in src_repometadata: | ||
1028 | cachepath = "%s/%s/packages/*.src.rpm" %(cachedir, repo["name"]) | ||
1029 | lpkgs_path += glob.glob(cachepath) | ||
1030 | |||
1031 | for lpkg in lpkgs_path: | ||
1032 | lpkg_name = get_src_name(os.path.basename(lpkg)) | ||
1033 | lpkgs_dict[lpkg_name] = lpkg | ||
1034 | localpkgs = lpkgs_dict.keys() | ||
1035 | |||
1036 | cached_count = 0 | ||
1037 | destdir = instroot+'/usr/src/SRPMS' | ||
1038 | if not os.path.exists(destdir): | ||
1039 | os.makedirs(destdir) | ||
1040 | |||
1041 | srcpkgset = set() | ||
1042 | for _pkg in pkgs: | ||
1043 | srcpkg_name = get_source_name(_pkg, repometadata) | ||
1044 | if not srcpkg_name: | ||
1045 | continue | ||
1046 | srcpkgset.add(srcpkg_name) | ||
1047 | |||
1048 | for pkg in list(srcpkgset): | ||
1049 | if pkg in localpkgs: | ||
1050 | cached_count += 1 | ||
1051 | shutil.copy(lpkgs_dict[pkg], destdir) | ||
1052 | src_pkgs.append(os.path.basename(lpkgs_dict[pkg])) | ||
1053 | else: | ||
1054 | src_pkg = get_package(pkg, src_repometadata, 'src') | ||
1055 | if src_pkg: | ||
1056 | shutil.copy(src_pkg, destdir) | ||
1057 | src_pkgs.append(src_pkg) | ||
1058 | msger.info("%d source packages gotten from cache" % cached_count) | ||
1059 | |||
1060 | return src_pkgs | ||
1061 | |||
1062 | def strip_end(text, suffix): | ||
1063 | if not text.endswith(suffix): | ||
1064 | return text | ||
1065 | return text[:-len(suffix)] | ||
diff --git a/scripts/lib/mic/utils/oe/__init__.py b/scripts/lib/mic/utils/oe/__init__.py new file mode 100644 index 0000000000..d10e802116 --- /dev/null +++ b/scripts/lib/mic/utils/oe/__init__.py | |||
@@ -0,0 +1,22 @@ | |||
1 | # | ||
2 | # OpenEmbedded mic utils library | ||
3 | # | ||
4 | # Copyright (c) 2013, Intel Corporation. | ||
5 | # All rights reserved. | ||
6 | # | ||
7 | # This program is free software; you can redistribute it and/or modify | ||
8 | # it under the terms of the GNU General Public License version 2 as | ||
9 | # published by the Free Software Foundation. | ||
10 | # | ||
11 | # This program is distributed in the hope that it will be useful, | ||
12 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | # GNU General Public License for more details. | ||
15 | # | ||
16 | # You should have received a copy of the GNU General Public License along | ||
17 | # with this program; if not, write to the Free Software Foundation, Inc., | ||
18 | # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. | ||
19 | # | ||
20 | # AUTHORS | ||
21 | # Tom Zanussi <tom.zanussi (at] linux.intel.com> | ||
22 | # | ||
diff --git a/scripts/lib/mic/utils/oe/misc.py b/scripts/lib/mic/utils/oe/misc.py new file mode 100644 index 0000000000..7ad3aa9685 --- /dev/null +++ b/scripts/lib/mic/utils/oe/misc.py | |||
@@ -0,0 +1,144 @@ | |||
1 | # ex:ts=4:sw=4:sts=4:et | ||
2 | # -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- | ||
3 | # | ||
4 | # Copyright (c) 2013, Intel Corporation. | ||
5 | # All rights reserved. | ||
6 | # | ||
7 | # This program is free software; you can redistribute it and/or modify | ||
8 | # it under the terms of the GNU General Public License version 2 as | ||
9 | # published by the Free Software Foundation. | ||
10 | # | ||
11 | # This program is distributed in the hope that it will be useful, | ||
12 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | # GNU General Public License for more details. | ||
15 | # | ||
16 | # You should have received a copy of the GNU General Public License along | ||
17 | # with this program; if not, write to the Free Software Foundation, Inc., | ||
18 | # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. | ||
19 | # | ||
20 | # DESCRIPTION | ||
21 | # This module provides a place to collect various mic-related utils | ||
22 | # for the OpenEmbedded Image Tools. | ||
23 | # | ||
24 | # AUTHORS | ||
25 | # Tom Zanussi <tom.zanussi (at] linux.intel.com> | ||
26 | # | ||
27 | |||
28 | from mic import msger | ||
29 | from mic.utils import runner | ||
30 | |||
31 | def exec_cmd(cmd_and_args, as_shell = False, catch = 3): | ||
32 | """ | ||
33 | Execute command, catching stderr, stdout | ||
34 | |||
35 | Need to execute as_shell if the command uses wildcards | ||
36 | """ | ||
37 | msger.debug("exec_cmd: %s" % cmd_and_args) | ||
38 | args = cmd_and_args.split() | ||
39 | msger.debug(args) | ||
40 | |||
41 | if (as_shell): | ||
42 | rc, out = runner.runtool(cmd_and_args, catch) | ||
43 | else: | ||
44 | rc, out = runner.runtool(args, catch) | ||
45 | out = out.strip() | ||
46 | msger.debug("exec_cmd: output for %s (rc = %d): %s" % \ | ||
47 | (cmd_and_args, rc, out)) | ||
48 | |||
49 | if rc != 0: | ||
50 | # We don't throw exception when return code is not 0, because | ||
51 | # parted always fails to reload part table with loop devices. This | ||
52 | # prevents us from distinguishing real errors based on return | ||
53 | # code. | ||
54 | msger.warning("WARNING: %s returned '%s' instead of 0" % (cmd_and_args, rc)) | ||
55 | |||
56 | return (rc, out) | ||
57 | |||
58 | |||
59 | def exec_cmd_quiet(cmd_and_args, as_shell = False): | ||
60 | """ | ||
61 | Execute command, catching nothing in the output | ||
62 | |||
63 | Need to execute as_shell if the command uses wildcards | ||
64 | """ | ||
65 | return exec_cmd(cmd_and_args, as_shell, 0) | ||
66 | |||
67 | |||
68 | def exec_native_cmd(cmd_and_args, native_sysroot, catch = 3): | ||
69 | """ | ||
70 | Execute native command, catching stderr, stdout | ||
71 | |||
72 | Need to execute as_shell if the command uses wildcards | ||
73 | |||
74 | Always need to execute native commands as_shell | ||
75 | """ | ||
76 | native_paths = \ | ||
77 | "export PATH=%s/sbin:%s/usr/sbin:%s/usr/bin:$PATH" % \ | ||
78 | (native_sysroot, native_sysroot, native_sysroot) | ||
79 | native_cmd_and_args = "%s;%s" % (native_paths, cmd_and_args) | ||
80 | msger.debug("exec_native_cmd: %s" % cmd_and_args) | ||
81 | |||
82 | args = cmd_and_args.split() | ||
83 | msger.debug(args) | ||
84 | |||
85 | rc, out = exec_cmd(native_cmd_and_args, True, catch) | ||
86 | |||
87 | if rc == 127: # shell command-not-found | ||
88 | msger.error("A native (host) program required to build the image " | ||
89 | "was not found (see details above). Please make sure " | ||
90 | "it's installed and try again.") | ||
91 | |||
92 | return (rc, out) | ||
93 | |||
94 | |||
95 | def exec_native_cmd_quiet(cmd_and_args, native_sysroot): | ||
96 | """ | ||
97 | Execute native command, catching nothing in the output | ||
98 | |||
99 | Need to execute as_shell if the command uses wildcards | ||
100 | |||
101 | Always need to execute native commands as_shell | ||
102 | """ | ||
103 | return exec_native_cmd(cmd_and_args, native_sysroot, 0) | ||
104 | |||
105 | |||
106 | # kickstart doesn't support variable substution in commands, so this | ||
107 | # is our current simplistic scheme for supporting that | ||
108 | |||
109 | wks_vars = dict() | ||
110 | |||
111 | def get_wks_var(key): | ||
112 | return wks_vars[key] | ||
113 | |||
114 | def add_wks_var(key, val): | ||
115 | wks_vars[key] = val | ||
116 | |||
117 | BOOTDD_EXTRA_SPACE = 16384 | ||
118 | IMAGE_EXTRA_SPACE = 10240 | ||
119 | |||
120 | __bitbake_env_lines = "" | ||
121 | |||
122 | def set_bitbake_env_lines(bitbake_env_lines): | ||
123 | global __bitbake_env_lines | ||
124 | __bitbake_env_lines = bitbake_env_lines | ||
125 | |||
126 | def get_bitbake_env_lines(): | ||
127 | return __bitbake_env_lines | ||
128 | |||
129 | def get_line_val(line, key): | ||
130 | """ | ||
131 | Extract the value from the VAR="val" string | ||
132 | """ | ||
133 | if line.startswith(key + "="): | ||
134 | stripped_line = line.split('=')[1] | ||
135 | stripped_line = stripped_line.replace('\"', '') | ||
136 | return stripped_line | ||
137 | return None | ||
138 | |||
139 | def get_bitbake_var(key): | ||
140 | for line in __bitbake_env_lines.split('\n'): | ||
141 | if (get_line_val(line, key)): | ||
142 | val = get_line_val(line, key) | ||
143 | return val | ||
144 | return None | ||
diff --git a/scripts/lib/mic/utils/oe/package_manager.py b/scripts/lib/mic/utils/oe/package_manager.py new file mode 100644 index 0000000000..92ce98e2ce --- /dev/null +++ b/scripts/lib/mic/utils/oe/package_manager.py | |||
@@ -0,0 +1,810 @@ | |||
1 | # ex:ts=4:sw=4:sts=4:et | ||
2 | # -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- | ||
3 | # | ||
4 | # Copyright (c) 2014, Enea AB. | ||
5 | # All rights reserved. | ||
6 | # | ||
7 | # This program is free software; you can redistribute it and/or modify | ||
8 | # it under the terms of the GNU General Public License version 2 as | ||
9 | # published by the Free Software Foundation. | ||
10 | # | ||
11 | # This program is distributed in the hope that it will be useful, | ||
12 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | # GNU General Public License for more details. | ||
15 | # | ||
16 | # You should have received a copy of the GNU General Public License along | ||
17 | # with this program; if not, write to the Free Software Foundation, Inc., | ||
18 | # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. | ||
19 | # | ||
20 | # DESCRIPTION | ||
21 | # This implements the opkg package manager wrapper as a combination of | ||
22 | # meta/lib/oe/package_manager.py and bitbake/lib/bb/utils.py files and | ||
23 | # adaptation of those files to 'wic'. | ||
24 | # | ||
25 | # AUTHORS | ||
26 | # Adrian Calianu <adrian.calianu (at] enea.com> | ||
27 | # | ||
28 | # This file incorporates work covered by the following copyright and | ||
29 | # permission notice: | ||
30 | # | ||
31 | # meta/COPYING.GPLv2 (GPLv2) | ||
32 | # meta/COPYING.MIT (MIT) | ||
33 | # | ||
34 | # Copyright (C) 2004 Michael Lauer | ||
35 | # | ||
36 | # Permission to use, copy, modify, and/or distribute this software | ||
37 | # for any purpose with or without fee is hereby granted, provided | ||
38 | # that the above copyright notice and this permission notice appear | ||
39 | # in all copies. | ||
40 | # | ||
41 | # THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL | ||
42 | # WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED | ||
43 | # WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE | ||
44 | # AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR | ||
45 | # CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS | ||
46 | # OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, | ||
47 | # NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN | ||
48 | # CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. | ||
49 | |||
50 | |||
51 | from abc import ABCMeta, abstractmethod | ||
52 | import os | ||
53 | import glob | ||
54 | import subprocess | ||
55 | import shutil | ||
56 | import multiprocessing | ||
57 | import re | ||
58 | import errno | ||
59 | import fcntl | ||
60 | |||
61 | from mic.utils.oe.misc import * | ||
62 | from mic import msger | ||
63 | |||
64 | def mkdirhier(directory): | ||
65 | """Create a directory like 'mkdir -p', but does not complain if | ||
66 | directory already exists like os.makedirs | ||
67 | """ | ||
68 | |||
69 | try: | ||
70 | os.makedirs(directory) | ||
71 | except OSError as e: | ||
72 | if e.errno != errno.EEXIST: | ||
73 | raise e | ||
74 | |||
75 | def remove(path, recurse=False): | ||
76 | """Equivalent to rm -f or rm -rf""" | ||
77 | if not path: | ||
78 | return | ||
79 | if recurse: | ||
80 | # shutil.rmtree(name) would be ideal but its too slow | ||
81 | subprocess.call(['rm', '-rf'] + glob.glob(path)) | ||
82 | return | ||
83 | for name in glob.glob(path): | ||
84 | try: | ||
85 | os.unlink(name) | ||
86 | except OSError as exc: | ||
87 | if exc.errno != errno.ENOENT: | ||
88 | raise | ||
89 | |||
90 | def lockfile(name, shared=False, retry=True): | ||
91 | """ | ||
92 | Use the file fn as a lock file, return when the lock has been acquired. | ||
93 | Returns a variable to pass to unlockfile(). | ||
94 | """ | ||
95 | dirname = os.path.dirname(name) | ||
96 | mkdirhier(dirname) | ||
97 | |||
98 | if not os.access(dirname, os.W_OK): | ||
99 | logger.error("Unable to acquire lock '%s', directory is not writable", | ||
100 | name) | ||
101 | sys.exit(1) | ||
102 | |||
103 | op = fcntl.LOCK_EX | ||
104 | if shared: | ||
105 | op = fcntl.LOCK_SH | ||
106 | if not retry: | ||
107 | op = op | fcntl.LOCK_NB | ||
108 | |||
109 | while True: | ||
110 | # If we leave the lockfiles lying around there is no problem | ||
111 | # but we should clean up after ourselves. This gives potential | ||
112 | # for races though. To work around this, when we acquire the lock | ||
113 | # we check the file we locked was still the lock file on disk. | ||
114 | # by comparing inode numbers. If they don't match or the lockfile | ||
115 | # no longer exists, we start again. | ||
116 | |||
117 | # This implementation is unfair since the last person to request the | ||
118 | # lock is the most likely to win it. | ||
119 | |||
120 | try: | ||
121 | lf = open(name, 'a+') | ||
122 | fileno = lf.fileno() | ||
123 | fcntl.flock(fileno, op) | ||
124 | statinfo = os.fstat(fileno) | ||
125 | if os.path.exists(lf.name): | ||
126 | statinfo2 = os.stat(lf.name) | ||
127 | if statinfo.st_ino == statinfo2.st_ino: | ||
128 | return lf | ||
129 | lf.close() | ||
130 | except Exception: | ||
131 | try: | ||
132 | lf.close() | ||
133 | except Exception: | ||
134 | pass | ||
135 | pass | ||
136 | if not retry: | ||
137 | return None | ||
138 | |||
139 | def unlockfile(lf): | ||
140 | """ | ||
141 | Unlock a file locked using lockfile() | ||
142 | """ | ||
143 | try: | ||
144 | # If we had a shared lock, we need to promote to exclusive before | ||
145 | # removing the lockfile. Attempt this, ignore failures. | ||
146 | fcntl.flock(lf.fileno(), fcntl.LOCK_EX|fcntl.LOCK_NB) | ||
147 | os.unlink(lf.name) | ||
148 | except (IOError, OSError): | ||
149 | pass | ||
150 | fcntl.flock(lf.fileno(), fcntl.LOCK_UN) | ||
151 | lf.close() | ||
152 | |||
153 | def which(path, item, direction = 0, history = False): | ||
154 | """ | ||
155 | Locate a file in a PATH | ||
156 | """ | ||
157 | |||
158 | hist = [] | ||
159 | paths = (path or "").split(':') | ||
160 | if direction != 0: | ||
161 | paths.reverse() | ||
162 | |||
163 | for p in paths: | ||
164 | next = os.path.join(p, item) | ||
165 | hist.append(next) | ||
166 | if os.path.exists(next): | ||
167 | if not os.path.isabs(next): | ||
168 | next = os.path.abspath(next) | ||
169 | if history: | ||
170 | return next, hist | ||
171 | return next | ||
172 | |||
173 | if history: | ||
174 | return "", hist | ||
175 | return "" | ||
176 | |||
177 | |||
178 | |||
179 | # this can be used by all PM backends to create the index files in parallel | ||
180 | def wic_create_index(arg): | ||
181 | index_cmd = arg | ||
182 | |||
183 | try: | ||
184 | msger.info("Executing '%s' ..." % index_cmd) | ||
185 | subprocess.check_output(index_cmd, stderr=subprocess.STDOUT, shell=True) | ||
186 | except subprocess.CalledProcessError as e: | ||
187 | return("Index creation command '%s' failed with return code %d:\n%s" % | ||
188 | (e.cmd, e.returncode, e.output)) | ||
189 | |||
190 | return None | ||
191 | |||
192 | |||
193 | class WicIndexer(object): | ||
194 | __metaclass__ = ABCMeta | ||
195 | |||
196 | def __init__(self, d, deploy_dir): | ||
197 | self.d = d | ||
198 | self.deploy_dir = deploy_dir | ||
199 | |||
200 | @abstractmethod | ||
201 | def write_index(self): | ||
202 | pass | ||
203 | |||
204 | class WicOpkgIndexer(WicIndexer): | ||
205 | def write_index(self): | ||
206 | arch_vars = ["ALL_MULTILIB_PACKAGE_ARCHS", | ||
207 | "SDK_PACKAGE_ARCHS", | ||
208 | "MULTILIB_ARCHS"] | ||
209 | |||
210 | opkg_index_cmd = which(os.getenv('PATH'), "opkg-make-index") | ||
211 | |||
212 | if not os.path.exists(os.path.join(self.deploy_dir, "Packages")): | ||
213 | open(os.path.join(self.deploy_dir, "Packages"), "w").close() | ||
214 | |||
215 | index_cmds = [] | ||
216 | for arch_var in arch_vars: | ||
217 | if self.d.has_key(arch_var): | ||
218 | archs = self.d[arch_var] | ||
219 | else: | ||
220 | archs = None | ||
221 | |||
222 | if archs is None: | ||
223 | continue | ||
224 | |||
225 | for arch in archs.split(): | ||
226 | pkgs_dir = os.path.join(self.deploy_dir, arch) | ||
227 | pkgs_file = os.path.join(pkgs_dir, "Packages") | ||
228 | |||
229 | if not os.path.isdir(pkgs_dir): | ||
230 | continue | ||
231 | |||
232 | if not os.path.exists(pkgs_file): | ||
233 | open(pkgs_file, "w").close() | ||
234 | |||
235 | index_cmds.append('%s -r %s -p %s -m %s' % | ||
236 | (opkg_index_cmd, pkgs_file, pkgs_file, pkgs_dir)) | ||
237 | |||
238 | if len(index_cmds) == 0: | ||
239 | msger.info("There are no packages in %s!" % self.deploy_dir) | ||
240 | return | ||
241 | |||
242 | nproc = multiprocessing.cpu_count() | ||
243 | pool = multiprocessing.Pool(nproc) | ||
244 | results = list(pool.imap(wic_create_index, index_cmds)) | ||
245 | pool.close() | ||
246 | pool.join() | ||
247 | |||
248 | for result in results: | ||
249 | if result is not None: | ||
250 | return(result) | ||
251 | |||
252 | class WicPkgsList(object): | ||
253 | __metaclass__ = ABCMeta | ||
254 | |||
255 | def __init__(self, d, rootfs_dir): | ||
256 | self.d = d | ||
257 | self.rootfs_dir = rootfs_dir | ||
258 | |||
259 | @abstractmethod | ||
260 | def list(self, format=None): | ||
261 | pass | ||
262 | |||
263 | |||
264 | class WicOpkgPkgsList(WicPkgsList): | ||
265 | def __init__(self, d, rootfs_dir, config_file): | ||
266 | super(WicOpkgPkgsList, self).__init__(d, rootfs_dir) | ||
267 | |||
268 | self.opkg_cmd = which(os.getenv('PATH'), "opkg-cl") | ||
269 | self.opkg_args = "-f %s -o %s " % (config_file, rootfs_dir) | ||
270 | if self.d.has_key("OPKG_ARGS"): | ||
271 | self.opkg_args += self.d["OPKG_ARGS"] | ||
272 | |||
273 | def list(self, format=None): | ||
274 | opkg_query_cmd = which(os.getenv('PATH'), "opkg-query-helper.py") | ||
275 | |||
276 | if format == "arch": | ||
277 | cmd = "%s %s status | %s -a" % \ | ||
278 | (self.opkg_cmd, self.opkg_args, opkg_query_cmd) | ||
279 | elif format == "file": | ||
280 | cmd = "%s %s status | %s -f" % \ | ||
281 | (self.opkg_cmd, self.opkg_args, opkg_query_cmd) | ||
282 | elif format == "ver": | ||
283 | cmd = "%s %s status | %s -v" % \ | ||
284 | (self.opkg_cmd, self.opkg_args, opkg_query_cmd) | ||
285 | elif format == "deps": | ||
286 | cmd = "%s %s status | %s" % \ | ||
287 | (self.opkg_cmd, self.opkg_args, opkg_query_cmd) | ||
288 | else: | ||
289 | cmd = "%s %s list_installed | cut -d' ' -f1" % \ | ||
290 | (self.opkg_cmd, self.opkg_args) | ||
291 | |||
292 | try: | ||
293 | output = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True).strip() | ||
294 | except subprocess.CalledProcessError as e: | ||
295 | msger.error("Cannot get the installed packages list. Command '%s' " | ||
296 | "returned %d:\n%s" % (cmd, e.returncode, e.output)) | ||
297 | |||
298 | if output and format == "file": | ||
299 | tmp_output = "" | ||
300 | for line in output.split('\n'): | ||
301 | pkg, pkg_file, pkg_arch = line.split() | ||
302 | full_path = os.path.join(self.rootfs_dir, pkg_arch, pkg_file) | ||
303 | if os.path.exists(full_path): | ||
304 | tmp_output += "%s %s %s\n" % (pkg, full_path, pkg_arch) | ||
305 | else: | ||
306 | tmp_output += "%s %s %s\n" % (pkg, pkg_file, pkg_arch) | ||
307 | |||
308 | output = tmp_output | ||
309 | |||
310 | return output | ||
311 | |||
312 | |||
313 | class WicPackageManager(object): | ||
314 | """ | ||
315 | This is an abstract class. Do not instantiate this directly. | ||
316 | """ | ||
317 | __metaclass__ = ABCMeta | ||
318 | |||
319 | def __init__(self, d, pseudo, native_sysroot): | ||
320 | self.d = d | ||
321 | self.deploy_dir = None | ||
322 | self.deploy_lock = None | ||
323 | if self.d.has_key('PACKAGE_FEED_URIS'): | ||
324 | self.feed_uris = self.d['PACKAGE_FEED_URIS'] | ||
325 | else: | ||
326 | self.feed_uris = "" | ||
327 | self.pseudo = pseudo | ||
328 | self.native_sysroot = native_sysroot | ||
329 | |||
330 | """ | ||
331 | Update the package manager package database. | ||
332 | """ | ||
333 | @abstractmethod | ||
334 | def update(self): | ||
335 | pass | ||
336 | |||
337 | """ | ||
338 | Install a list of packages. 'pkgs' is a list object. If 'attempt_only' is | ||
339 | True, installation failures are ignored. | ||
340 | """ | ||
341 | @abstractmethod | ||
342 | def install(self, pkgs, attempt_only=False): | ||
343 | pass | ||
344 | |||
345 | """ | ||
346 | Remove a list of packages. 'pkgs' is a list object. If 'with_dependencies' | ||
347 | is False, the any dependencies are left in place. | ||
348 | """ | ||
349 | @abstractmethod | ||
350 | def remove(self, pkgs, with_dependencies=True): | ||
351 | pass | ||
352 | |||
353 | """ | ||
354 | This function creates the index files | ||
355 | """ | ||
356 | @abstractmethod | ||
357 | def write_index(self): | ||
358 | pass | ||
359 | |||
360 | @abstractmethod | ||
361 | def remove_packaging_data(self): | ||
362 | pass | ||
363 | |||
364 | @abstractmethod | ||
365 | def list_installed(self, format=None): | ||
366 | pass | ||
367 | |||
368 | @abstractmethod | ||
369 | def insert_feeds_uris(self): | ||
370 | pass | ||
371 | |||
372 | """ | ||
373 | Install complementary packages based upon the list of currently installed | ||
374 | packages e.g. locales, *-dev, *-dbg, etc. This will only attempt to install | ||
375 | these packages, if they don't exist then no error will occur. Note: every | ||
376 | backend needs to call this function explicitly after the normal package | ||
377 | installation | ||
378 | """ | ||
379 | def install_complementary(self, globs=None): | ||
380 | # we need to write the list of installed packages to a file because the | ||
381 | # oe-pkgdata-util reads it from a file | ||
382 | if self.d.has_key('WORKDIR'): | ||
383 | installed_pkgs_file = os.path.join(self.d['WORKDIR'], | ||
384 | "installed_pkgs.txt") | ||
385 | else: | ||
386 | msger.error("No WORKDIR provided!") | ||
387 | |||
388 | with open(installed_pkgs_file, "w+") as installed_pkgs: | ||
389 | installed_pkgs.write(self.list_installed("arch")) | ||
390 | |||
391 | if globs is None: | ||
392 | if self.d.has_key('IMAGE_INSTALL_COMPLEMENTARY'): | ||
393 | globs = self.d['IMAGE_INSTALL_COMPLEMENTARY'] | ||
394 | split_linguas = set() | ||
395 | |||
396 | if self.d.has_key('IMAGE_LINGUAS'): | ||
397 | for translation in self.d['IMAGE_LINGUAS'].split(): | ||
398 | split_linguas.add(translation) | ||
399 | split_linguas.add(translation.split('-')[0]) | ||
400 | |||
401 | split_linguas = sorted(split_linguas) | ||
402 | |||
403 | for lang in split_linguas: | ||
404 | globs += " *-locale-%s" % lang | ||
405 | |||
406 | if globs is None: | ||
407 | return | ||
408 | |||
409 | if not self.d.has_key('PKGDATA_DIR'): | ||
410 | msger.error("No PKGDATA_DIR provided!") | ||
411 | |||
412 | cmd = [which(os.getenv('PATH'), "oe-pkgdata-util"), | ||
413 | "glob", self.d['PKGDATA_DIR'], installed_pkgs_file, | ||
414 | globs] | ||
415 | |||
416 | rc, out = exec_native_cmd(self.pseudo + cmd, self.native_sysroot) | ||
417 | if rc != 0: | ||
418 | msger.error("Could not compute complementary packages list. Command " | ||
419 | "'%s' returned %d" % | ||
420 | (' '.join(cmd), rc)) | ||
421 | |||
422 | self.install(out.split(), attempt_only=True) | ||
423 | |||
424 | |||
425 | def deploy_dir_lock(self): | ||
426 | if self.deploy_dir is None: | ||
427 | raise RuntimeError("deploy_dir is not set!") | ||
428 | |||
429 | lock_file_name = os.path.join(self.deploy_dir, "deploy.lock") | ||
430 | |||
431 | self.deploy_lock = lockfile(lock_file_name) | ||
432 | |||
433 | def deploy_dir_unlock(self): | ||
434 | if self.deploy_lock is None: | ||
435 | return | ||
436 | |||
437 | unlockfile(self.deploy_lock) | ||
438 | |||
439 | self.deploy_lock = None | ||
440 | |||
441 | |||
442 | class WicOpkgPM(WicPackageManager): | ||
443 | def __init__(self, d, target_rootfs, config_file, archs, pseudo, native_sysroot, task_name='target'): | ||
444 | super(WicOpkgPM, self).__init__(d, pseudo, native_sysroot) | ||
445 | |||
446 | self.target_rootfs = target_rootfs | ||
447 | self.config_file = config_file | ||
448 | self.pkg_archs = archs | ||
449 | self.task_name = task_name | ||
450 | |||
451 | if self.d.has_key("DEPLOY_DIR_IPK"): | ||
452 | self.deploy_dir = self.d["DEPLOY_DIR_IPK"] | ||
453 | |||
454 | self.deploy_lock_file = os.path.join(self.deploy_dir, "deploy.lock") | ||
455 | self.opkg_cmd = which(os.getenv('PATH'), "opkg-cl") | ||
456 | self.opkg_args = "-f %s -o %s " % (self.config_file, target_rootfs) | ||
457 | if self.d.has_key("OPKG_ARGS"): | ||
458 | self.opkg_args += self.d["OPKG_ARGS"] | ||
459 | |||
460 | if self.d.has_key('OPKGLIBDIR'): | ||
461 | opkg_lib_dir = self.d['OPKGLIBDIR'] | ||
462 | else: | ||
463 | opkg_lib_dir = "" | ||
464 | |||
465 | if opkg_lib_dir[0] == "/": | ||
466 | opkg_lib_dir = opkg_lib_dir[1:] | ||
467 | |||
468 | self.opkg_dir = os.path.join(target_rootfs, opkg_lib_dir, "opkg") | ||
469 | |||
470 | mkdirhier(self.opkg_dir) | ||
471 | |||
472 | if self.d.has_key("TMPDIR"): | ||
473 | tmp_dir = self.d["TMPDIR"] | ||
474 | else: | ||
475 | tmp_dir = "" | ||
476 | |||
477 | self.saved_opkg_dir = '%s/saved/%s' % (tmp_dir, self.task_name) | ||
478 | if not os.path.exists('%s/saved' % tmp_dir): | ||
479 | mkdirhier('%s/saved' % tmp_dir) | ||
480 | |||
481 | if self.d.has_key('BUILD_IMAGES_FROM_FEEDS') and self.d['BUILD_IMAGES_FROM_FEEDS'] != "1": | ||
482 | self._create_config() | ||
483 | else: | ||
484 | self._create_custom_config() | ||
485 | |||
486 | self.indexer = WicOpkgIndexer(self.d, self.deploy_dir) | ||
487 | |||
488 | """ | ||
489 | This function will change a package's status in /var/lib/opkg/status file. | ||
490 | If 'packages' is None then the new_status will be applied to all | ||
491 | packages | ||
492 | """ | ||
493 | def mark_packages(self, status_tag, packages=None): | ||
494 | status_file = os.path.join(self.opkg_dir, "status") | ||
495 | |||
496 | with open(status_file, "r") as sf: | ||
497 | with open(status_file + ".tmp", "w+") as tmp_sf: | ||
498 | if packages is None: | ||
499 | tmp_sf.write(re.sub(r"Package: (.*?)\n((?:[^\n]+\n)*?)Status: (.*)(?:unpacked|installed)", | ||
500 | r"Package: \1\n\2Status: \3%s" % status_tag, | ||
501 | sf.read())) | ||
502 | else: | ||
503 | if type(packages).__name__ != "list": | ||
504 | raise TypeError("'packages' should be a list object") | ||
505 | |||
506 | status = sf.read() | ||
507 | for pkg in packages: | ||
508 | status = re.sub(r"Package: %s\n((?:[^\n]+\n)*?)Status: (.*)(?:unpacked|installed)" % pkg, | ||
509 | r"Package: %s\n\1Status: \2%s" % (pkg, status_tag), | ||
510 | status) | ||
511 | |||
512 | tmp_sf.write(status) | ||
513 | |||
514 | os.rename(status_file + ".tmp", status_file) | ||
515 | |||
516 | def _create_custom_config(self): | ||
517 | msger.info("Building from feeds activated!") | ||
518 | |||
519 | with open(self.config_file, "w+") as config_file: | ||
520 | priority = 1 | ||
521 | for arch in self.pkg_archs.split(): | ||
522 | config_file.write("arch %s %d\n" % (arch, priority)) | ||
523 | priority += 5 | ||
524 | |||
525 | if self.d.has_key('IPK_FEED_URIS'): | ||
526 | ipk_feed_uris = self.d['IPK_FEED_URIS'] | ||
527 | else: | ||
528 | ipk_feed_uris = "" | ||
529 | |||
530 | for line in ipk_feed_uris.split(): | ||
531 | feed_match = re.match("^[ \t]*(.*)##([^ \t]*)[ \t]*$", line) | ||
532 | |||
533 | if feed_match is not None: | ||
534 | feed_name = feed_match.group(1) | ||
535 | feed_uri = feed_match.group(2) | ||
536 | |||
537 | msger.info("Add %s feed with URL %s" % (feed_name, feed_uri)) | ||
538 | |||
539 | config_file.write("src/gz %s %s\n" % (feed_name, feed_uri)) | ||
540 | |||
541 | """ | ||
542 | Allow to use package deploy directory contents as quick devel-testing | ||
543 | feed. This creates individual feed configs for each arch subdir of those | ||
544 | specified as compatible for the current machine. | ||
545 | NOTE: Development-helper feature, NOT a full-fledged feed. | ||
546 | """ | ||
547 | if self.d.has_key('FEED_DEPLOYDIR_BASE_URI'): | ||
548 | feed_deploydir_base_dir = self.d['FEED_DEPLOYDIR_BASE_URI'] | ||
549 | else: | ||
550 | feed_deploydir_base_dir = "" | ||
551 | |||
552 | if feed_deploydir_base_dir != "": | ||
553 | for arch in self.pkg_archs.split(): | ||
554 | if self.d.has_key("sysconfdir"): | ||
555 | sysconfdir = self.d["sysconfdir"] | ||
556 | else: | ||
557 | sysconfdir = None | ||
558 | |||
559 | cfg_file_name = os.path.join(self.target_rootfs, | ||
560 | sysconfdir, | ||
561 | "opkg", | ||
562 | "local-%s-feed.conf" % arch) | ||
563 | |||
564 | with open(cfg_file_name, "w+") as cfg_file: | ||
565 | cfg_file.write("src/gz local-%s %s/%s" % | ||
566 | arch, | ||
567 | feed_deploydir_base_dir, | ||
568 | arch) | ||
569 | |||
570 | def _create_config(self): | ||
571 | with open(self.config_file, "w+") as config_file: | ||
572 | priority = 1 | ||
573 | for arch in self.pkg_archs.split(): | ||
574 | config_file.write("arch %s %d\n" % (arch, priority)) | ||
575 | priority += 5 | ||
576 | |||
577 | config_file.write("src oe file:%s\n" % self.deploy_dir) | ||
578 | |||
579 | for arch in self.pkg_archs.split(): | ||
580 | pkgs_dir = os.path.join(self.deploy_dir, arch) | ||
581 | if os.path.isdir(pkgs_dir): | ||
582 | config_file.write("src oe-%s file:%s\n" % | ||
583 | (arch, pkgs_dir)) | ||
584 | |||
585 | def insert_feeds_uris(self): | ||
586 | if self.feed_uris == "": | ||
587 | return | ||
588 | |||
589 | rootfs_config = os.path.join('%s/etc/opkg/base-feeds.conf' | ||
590 | % self.target_rootfs) | ||
591 | |||
592 | with open(rootfs_config, "w+") as config_file: | ||
593 | uri_iterator = 0 | ||
594 | for uri in self.feed_uris.split(): | ||
595 | config_file.write("src/gz url-%d %s/ipk\n" % | ||
596 | (uri_iterator, uri)) | ||
597 | |||
598 | for arch in self.pkg_archs.split(): | ||
599 | if not os.path.exists(os.path.join(self.deploy_dir, arch)): | ||
600 | continue | ||
601 | msger.info('Note: adding opkg channel url-%s-%d (%s)' % | ||
602 | (arch, uri_iterator, uri)) | ||
603 | |||
604 | config_file.write("src/gz uri-%s-%d %s/ipk/%s\n" % | ||
605 | (arch, uri_iterator, uri, arch)) | ||
606 | uri_iterator += 1 | ||
607 | |||
608 | def update(self): | ||
609 | self.deploy_dir_lock() | ||
610 | |||
611 | cmd = "%s %s update" % (self.opkg_cmd, self.opkg_args) | ||
612 | |||
613 | rc, out = exec_native_cmd(self.pseudo + cmd, self.native_sysroot) | ||
614 | if rc != 0: | ||
615 | self.deploy_dir_unlock() | ||
616 | msger.error("Unable to update the package index files. Command '%s' " | ||
617 | "returned %d" % (cmd, rc)) | ||
618 | |||
619 | self.deploy_dir_unlock() | ||
620 | |||
621 | def install(self, pkgs, attempt_only=False): | ||
622 | if attempt_only and len(pkgs) == 0: | ||
623 | return | ||
624 | |||
625 | cmd = "%s %s install %s" % (self.opkg_cmd, self.opkg_args, ' '.join(pkgs)) | ||
626 | |||
627 | os.environ['D'] = self.target_rootfs | ||
628 | os.environ['OFFLINE_ROOT'] = self.target_rootfs | ||
629 | os.environ['IPKG_OFFLINE_ROOT'] = self.target_rootfs | ||
630 | os.environ['OPKG_OFFLINE_ROOT'] = self.target_rootfs | ||
631 | if self.d.has_key('WORKDIR'): | ||
632 | os.environ['INTERCEPT_DIR'] = os.path.join(self.d['WORKDIR'], | ||
633 | "intercept_scripts") | ||
634 | else: | ||
635 | os.environ['INTERCEPT_DIR'] = "." | ||
636 | msger.warning("No WORKDIR provided!") | ||
637 | |||
638 | if self.d.has_key('STAGING_DIR_NATIVE'): | ||
639 | os.environ['NATIVE_ROOT'] = self.d['STAGING_DIR_NATIVE'] | ||
640 | else: | ||
641 | msger.error("No STAGING_DIR_NATIVE provided!") | ||
642 | |||
643 | rc, out = exec_native_cmd(self.pseudo + cmd, self.native_sysroot) | ||
644 | if rc != 0: | ||
645 | msger.error("Unable to install packages. " | ||
646 | "Command '%s' returned %d" % (cmd, rc)) | ||
647 | |||
648 | |||
649 | def remove(self, pkgs, with_dependencies=True): | ||
650 | if with_dependencies: | ||
651 | cmd = "%s %s --force-depends --force-remove --force-removal-of-dependent-packages remove %s" % \ | ||
652 | (self.opkg_cmd, self.opkg_args, ' '.join(pkgs)) | ||
653 | else: | ||
654 | cmd = "%s %s --force-depends remove %s" % \ | ||
655 | (self.opkg_cmd, self.opkg_args, ' '.join(pkgs)) | ||
656 | |||
657 | rc, out = exec_native_cmd(self.pseudo + cmd, self.native_sysroot) | ||
658 | if rc != 0: | ||
659 | msger.error("Unable to remove packages. Command '%s' " | ||
660 | "returned %d" % (cmd, rc)) | ||
661 | |||
662 | |||
663 | def write_index(self): | ||
664 | self.deploy_dir_lock() | ||
665 | |||
666 | result = self.indexer.write_index() | ||
667 | |||
668 | self.deploy_dir_unlock() | ||
669 | |||
670 | if result is not None: | ||
671 | msger.error(result) | ||
672 | |||
673 | def remove_packaging_data(self): | ||
674 | remove(self.opkg_dir, True) | ||
675 | # create the directory back, it's needed by PM lock | ||
676 | mkdirhier(self.opkg_dir) | ||
677 | |||
678 | def list_installed(self, format=None): | ||
679 | return WicOpkgPkgsList(self.d, self.target_rootfs, self.config_file).list(format) | ||
680 | |||
681 | def handle_bad_recommendations(self): | ||
682 | if self.d.has_key("BAD_RECOMMENDATIONS"): | ||
683 | bad_recommendations = self.d["BAD_RECOMMENDATIONS"] | ||
684 | else: | ||
685 | bad_recommendations = "" | ||
686 | |||
687 | if bad_recommendations.strip() == "": | ||
688 | return | ||
689 | |||
690 | status_file = os.path.join(self.opkg_dir, "status") | ||
691 | |||
692 | # If status file existed, it means the bad recommendations has already | ||
693 | # been handled | ||
694 | if os.path.exists(status_file): | ||
695 | return | ||
696 | |||
697 | cmd = "%s %s info " % (self.opkg_cmd, self.opkg_args) | ||
698 | |||
699 | with open(status_file, "w+") as status: | ||
700 | for pkg in bad_recommendations.split(): | ||
701 | pkg_info = cmd + pkg | ||
702 | |||
703 | try: | ||
704 | output = subprocess.check_output(pkg_info.split(), stderr=subprocess.STDOUT).strip() | ||
705 | except subprocess.CalledProcessError as e: | ||
706 | msger.error("Cannot get package info. Command '%s' " | ||
707 | "returned %d:\n%s" % (pkg_info, e.returncode, e.output)) | ||
708 | |||
709 | if output == "": | ||
710 | msger.info("Ignored bad recommendation: '%s' is " | ||
711 | "not a package" % pkg) | ||
712 | continue | ||
713 | |||
714 | for line in output.split('\n'): | ||
715 | if line.startswith("Status:"): | ||
716 | status.write("Status: deinstall hold not-installed\n") | ||
717 | else: | ||
718 | status.write(line + "\n") | ||
719 | |||
720 | ''' | ||
721 | The following function dummy installs pkgs and returns the log of output. | ||
722 | ''' | ||
723 | def dummy_install(self, pkgs): | ||
724 | if len(pkgs) == 0: | ||
725 | return | ||
726 | |||
727 | # Create an temp dir as opkg root for dummy installation | ||
728 | if self.d.has_key("TMPDIR"): | ||
729 | tmp_dir = self.d["TMPDIR"] | ||
730 | else: | ||
731 | tmp_dir = "." | ||
732 | msger.warning("No TMPDIR provided!") | ||
733 | |||
734 | temp_rootfs = '%s/opkg' % tmp_dir | ||
735 | temp_opkg_dir = os.path.join(temp_rootfs, 'var/lib/opkg') | ||
736 | mkdirhier(temp_opkg_dir) | ||
737 | |||
738 | opkg_args = "-f %s -o %s " % (self.config_file, temp_rootfs) | ||
739 | if self.d.has_key("OPKG_ARGS"): | ||
740 | opkg_args += self.d["OPKG_ARGS"] | ||
741 | |||
742 | cmd = "%s %s update" % (self.opkg_cmd, opkg_args) | ||
743 | try: | ||
744 | subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True) | ||
745 | except subprocess.CalledProcessError as e: | ||
746 | msger.error("Unable to update. Command '%s' " | ||
747 | "returned %d:\n%s" % (cmd, e.returncode, e.output)) | ||
748 | |||
749 | # Dummy installation | ||
750 | cmd = "%s %s --noaction install %s " % (self.opkg_cmd, | ||
751 | opkg_args, | ||
752 | ' '.join(pkgs)) | ||
753 | try: | ||
754 | output = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True) | ||
755 | except subprocess.CalledProcessError as e: | ||
756 | msger.error("Unable to dummy install packages. Command '%s' " | ||
757 | "returned %d:\n%s" % (cmd, e.returncode, e.output)) | ||
758 | |||
759 | remove(temp_rootfs, True) | ||
760 | |||
761 | return output | ||
762 | |||
763 | def backup_packaging_data(self): | ||
764 | # Save the opkglib for increment ipk image generation | ||
765 | if os.path.exists(self.saved_opkg_dir): | ||
766 | remove(self.saved_opkg_dir, True) | ||
767 | shutil.copytree(self.opkg_dir, | ||
768 | self.saved_opkg_dir, | ||
769 | symlinks=True) | ||
770 | |||
771 | def recover_packaging_data(self): | ||
772 | # Move the opkglib back | ||
773 | if os.path.exists(self.saved_opkg_dir): | ||
774 | if os.path.exists(self.opkg_dir): | ||
775 | remove(self.opkg_dir, True) | ||
776 | |||
777 | msger.info('Recover packaging data') | ||
778 | shutil.copytree(self.saved_opkg_dir, | ||
779 | self.opkg_dir, | ||
780 | symlinks=True) | ||
781 | |||
782 | |||
783 | def wic_generate_index_files(d): | ||
784 | if d.has_key('PACKAGE_CLASSES'): | ||
785 | classes = d['PACKAGE_CLASSES'].replace("package_", "").split() | ||
786 | else: | ||
787 | classes = "" | ||
788 | msger.warning("No PACKAGE_CLASSES provided!") | ||
789 | |||
790 | if d.has_key('DEPLOY_DIR_IPK'): | ||
791 | deploy_dir_ipk = d['DEPLOY_DIR_IPK'] | ||
792 | else: | ||
793 | deploy_dir_ipk = None | ||
794 | msger.warning("No DEPLOY_DIR_IPK provided!") | ||
795 | |||
796 | indexer_map = { | ||
797 | "ipk": (WicOpkgIndexer, deploy_dir_ipk) | ||
798 | } | ||
799 | |||
800 | result = None | ||
801 | |||
802 | for pkg_class in classes: | ||
803 | if not pkg_class in indexer_map: | ||
804 | continue | ||
805 | |||
806 | if os.path.exists(indexer_map[pkg_class][1]): | ||
807 | result = indexer_map[pkg_class][0](d, indexer_map[pkg_class][1]).write_index() | ||
808 | |||
809 | if result is not None: | ||
810 | msger.error(result) | ||
diff --git a/scripts/lib/mic/utils/partitionedfs.py b/scripts/lib/mic/utils/partitionedfs.py new file mode 100644 index 0000000000..6607466a83 --- /dev/null +++ b/scripts/lib/mic/utils/partitionedfs.py | |||
@@ -0,0 +1,782 @@ | |||
1 | #!/usr/bin/python -tt | ||
2 | # | ||
3 | # Copyright (c) 2009, 2010, 2011 Intel, Inc. | ||
4 | # Copyright (c) 2007, 2008 Red Hat, Inc. | ||
5 | # Copyright (c) 2008 Daniel P. Berrange | ||
6 | # Copyright (c) 2008 David P. Huff | ||
7 | # | ||
8 | # This program is free software; you can redistribute it and/or modify it | ||
9 | # under the terms of the GNU General Public License as published by the Free | ||
10 | # Software Foundation; version 2 of the License | ||
11 | # | ||
12 | # This program is distributed in the hope that it will be useful, but | ||
13 | # WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY | ||
14 | # or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | ||
15 | # for more details. | ||
16 | # | ||
17 | # You should have received a copy of the GNU General Public License along | ||
18 | # with this program; if not, write to the Free Software Foundation, Inc., 59 | ||
19 | # Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
20 | |||
21 | import os | ||
22 | |||
23 | from mic import msger | ||
24 | from mic.utils import runner | ||
25 | from mic.utils.errors import MountError | ||
26 | from mic.utils.fs_related import * | ||
27 | from mic.utils.gpt_parser import GptParser | ||
28 | from mic.utils.oe.misc import * | ||
29 | |||
30 | # Overhead of the MBR partitioning scheme (just one sector) | ||
31 | MBR_OVERHEAD = 1 | ||
32 | # Overhead of the GPT partitioning scheme | ||
33 | GPT_OVERHEAD = 34 | ||
34 | |||
35 | # Size of a sector in bytes | ||
36 | SECTOR_SIZE = 512 | ||
37 | |||
38 | class PartitionedMount(Mount): | ||
39 | def __init__(self, mountdir, skipformat = False): | ||
40 | Mount.__init__(self, mountdir) | ||
41 | self.disks = {} | ||
42 | self.partitions = [] | ||
43 | self.subvolumes = [] | ||
44 | self.mapped = False | ||
45 | self.mountOrder = [] | ||
46 | self.unmountOrder = [] | ||
47 | self.parted = find_binary_path("parted") | ||
48 | self.btrfscmd=None | ||
49 | self.skipformat = skipformat | ||
50 | self.snapshot_created = self.skipformat | ||
51 | # Size of a sector used in calculations | ||
52 | self.sector_size = SECTOR_SIZE | ||
53 | self._partitions_layed_out = False | ||
54 | |||
55 | def __add_disk(self, disk_name): | ||
56 | """ Add a disk 'disk_name' to the internal list of disks. Note, | ||
57 | 'disk_name' is the name of the disk in the target system | ||
58 | (e.g., sdb). """ | ||
59 | |||
60 | if disk_name in self.disks: | ||
61 | # We already have this disk | ||
62 | return | ||
63 | |||
64 | assert not self._partitions_layed_out | ||
65 | |||
66 | self.disks[disk_name] = \ | ||
67 | { 'disk': None, # Disk object | ||
68 | 'mapped': False, # True if kpartx mapping exists | ||
69 | 'numpart': 0, # Number of allocate partitions | ||
70 | 'partitions': [], # Indexes to self.partitions | ||
71 | 'offset': 0, # Offset of next partition (in sectors) | ||
72 | # Minimum required disk size to fit all partitions (in bytes) | ||
73 | 'min_size': 0, | ||
74 | 'ptable_format': "msdos" } # Partition table format | ||
75 | |||
76 | def add_disk(self, disk_name, disk_obj): | ||
77 | """ Add a disk object which have to be partitioned. More than one disk | ||
78 | can be added. In case of multiple disks, disk partitions have to be | ||
79 | added for each disk separately with 'add_partition()". """ | ||
80 | |||
81 | self.__add_disk(disk_name) | ||
82 | self.disks[disk_name]['disk'] = disk_obj | ||
83 | |||
84 | def __add_partition(self, part): | ||
85 | """ This is a helper function for 'add_partition()' which adds a | ||
86 | partition to the internal list of partitions. """ | ||
87 | |||
88 | assert not self._partitions_layed_out | ||
89 | |||
90 | self.partitions.append(part) | ||
91 | self.__add_disk(part['disk_name']) | ||
92 | |||
93 | def add_partition(self, size, disk_name, mountpoint, source_file = None, fstype = None, | ||
94 | label=None, fsopts = None, boot = False, align = None, | ||
95 | part_type = None): | ||
96 | """ Add the next partition. Prtitions have to be added in the | ||
97 | first-to-last order. """ | ||
98 | |||
99 | ks_pnum = len(self.partitions) | ||
100 | |||
101 | # Converting MB to sectors for parted | ||
102 | size = size * 1024 * 1024 / self.sector_size | ||
103 | |||
104 | # We need to handle subvolumes for btrfs | ||
105 | if fstype == "btrfs" and fsopts and fsopts.find("subvol=") != -1: | ||
106 | self.btrfscmd=find_binary_path("btrfs") | ||
107 | subvol = None | ||
108 | opts = fsopts.split(",") | ||
109 | for opt in opts: | ||
110 | if opt.find("subvol=") != -1: | ||
111 | subvol = opt.replace("subvol=", "").strip() | ||
112 | break | ||
113 | if not subvol: | ||
114 | raise MountError("No subvolume: %s" % fsopts) | ||
115 | self.subvolumes.append({'size': size, # In sectors | ||
116 | 'mountpoint': mountpoint, # Mount relative to chroot | ||
117 | 'fstype': fstype, # Filesystem type | ||
118 | 'fsopts': fsopts, # Filesystem mount options | ||
119 | 'disk_name': disk_name, # physical disk name holding partition | ||
120 | 'device': None, # kpartx device node for partition | ||
121 | 'mount': None, # Mount object | ||
122 | 'subvol': subvol, # Subvolume name | ||
123 | 'boot': boot, # Bootable flag | ||
124 | 'mounted': False # Mount flag | ||
125 | }) | ||
126 | |||
127 | # We still need partition for "/" or non-subvolume | ||
128 | if mountpoint == "/" or not fsopts or fsopts.find("subvol=") == -1: | ||
129 | # Don't need subvolume for "/" because it will be set as default subvolume | ||
130 | if fsopts and fsopts.find("subvol=") != -1: | ||
131 | opts = fsopts.split(",") | ||
132 | for opt in opts: | ||
133 | if opt.strip().startswith("subvol="): | ||
134 | opts.remove(opt) | ||
135 | break | ||
136 | fsopts = ",".join(opts) | ||
137 | |||
138 | part = { 'ks_pnum' : ks_pnum, # Partition number in the KS file | ||
139 | 'size': size, # In sectors | ||
140 | 'mountpoint': mountpoint, # Mount relative to chroot | ||
141 | 'source_file': source_file, # partition contents | ||
142 | 'fstype': fstype, # Filesystem type | ||
143 | 'fsopts': fsopts, # Filesystem mount options | ||
144 | 'label': label, # Partition label | ||
145 | 'disk_name': disk_name, # physical disk name holding partition | ||
146 | 'device': None, # kpartx device node for partition | ||
147 | 'mount': None, # Mount object | ||
148 | 'num': None, # Partition number | ||
149 | 'boot': boot, # Bootable flag | ||
150 | 'align': align, # Partition alignment | ||
151 | 'part_type' : part_type, # Partition type | ||
152 | 'partuuid': None } # Partition UUID (GPT-only) | ||
153 | |||
154 | self.__add_partition(part) | ||
155 | |||
156 | def layout_partitions(self, ptable_format = "msdos"): | ||
157 | """ Layout the partitions, meaning calculate the position of every | ||
158 | partition on the disk. The 'ptable_format' parameter defines the | ||
159 | partition table format, and may be either "msdos" or "gpt". """ | ||
160 | |||
161 | msger.debug("Assigning %s partitions to disks" % ptable_format) | ||
162 | |||
163 | if ptable_format not in ('msdos', 'gpt'): | ||
164 | raise MountError("Unknown partition table format '%s', supported " \ | ||
165 | "formats are: 'msdos' and 'gpt'" % ptable_format) | ||
166 | |||
167 | if self._partitions_layed_out: | ||
168 | return | ||
169 | |||
170 | self._partitions_layed_out = True | ||
171 | |||
172 | # Go through partitions in the order they are added in .ks file | ||
173 | for n in range(len(self.partitions)): | ||
174 | p = self.partitions[n] | ||
175 | |||
176 | if not self.disks.has_key(p['disk_name']): | ||
177 | raise MountError("No disk %s for partition %s" \ | ||
178 | % (p['disk_name'], p['mountpoint'])) | ||
179 | |||
180 | if p['part_type'] and ptable_format != 'gpt': | ||
181 | # The --part-type can also be implemented for MBR partitions, | ||
182 | # in which case it would map to the 1-byte "partition type" | ||
183 | # filed at offset 3 of the partition entry. | ||
184 | raise MountError("setting custom partition type is only " \ | ||
185 | "imlemented for GPT partitions") | ||
186 | |||
187 | # Get the disk where the partition is located | ||
188 | d = self.disks[p['disk_name']] | ||
189 | d['numpart'] += 1 | ||
190 | d['ptable_format'] = ptable_format | ||
191 | |||
192 | if d['numpart'] == 1: | ||
193 | if ptable_format == "msdos": | ||
194 | overhead = MBR_OVERHEAD | ||
195 | else: | ||
196 | overhead = GPT_OVERHEAD | ||
197 | |||
198 | # Skip one sector required for the partitioning scheme overhead | ||
199 | d['offset'] += overhead | ||
200 | # Steal few sectors from the first partition to offset for the | ||
201 | # partitioning overhead | ||
202 | p['size'] -= overhead | ||
203 | |||
204 | if p['align']: | ||
205 | # If not first partition and we do have alignment set we need | ||
206 | # to align the partition. | ||
207 | # FIXME: This leaves a empty spaces to the disk. To fill the | ||
208 | # gaps we could enlargea the previous partition? | ||
209 | |||
210 | # Calc how much the alignment is off. | ||
211 | align_sectors = d['offset'] % (p['align'] * 1024 / self.sector_size) | ||
212 | # We need to move forward to the next alignment point | ||
213 | align_sectors = (p['align'] * 1024 / self.sector_size) - align_sectors | ||
214 | |||
215 | msger.debug("Realignment for %s%s with %s sectors, original" | ||
216 | " offset %s, target alignment is %sK." % | ||
217 | (p['disk_name'], d['numpart'], align_sectors, | ||
218 | d['offset'], p['align'])) | ||
219 | |||
220 | # increase the offset so we actually start the partition on right alignment | ||
221 | d['offset'] += align_sectors | ||
222 | |||
223 | p['start'] = d['offset'] | ||
224 | d['offset'] += p['size'] | ||
225 | |||
226 | p['type'] = 'primary' | ||
227 | p['num'] = d['numpart'] | ||
228 | |||
229 | if d['ptable_format'] == "msdos": | ||
230 | if d['numpart'] > 2: | ||
231 | # Every logical partition requires an additional sector for | ||
232 | # the EBR, so steal the last sector from the end of each | ||
233 | # partition starting from the 3rd one for the EBR. This | ||
234 | # will make sure the logical partitions are aligned | ||
235 | # correctly. | ||
236 | p['size'] -= 1 | ||
237 | |||
238 | if d['numpart'] > 3: | ||
239 | p['type'] = 'logical' | ||
240 | p['num'] = d['numpart'] + 1 | ||
241 | |||
242 | d['partitions'].append(n) | ||
243 | msger.debug("Assigned %s to %s%d, sectors range %d-%d size %d " | ||
244 | "sectors (%d bytes)." \ | ||
245 | % (p['mountpoint'], p['disk_name'], p['num'], | ||
246 | p['start'], p['start'] + p['size'] - 1, | ||
247 | p['size'], p['size'] * self.sector_size)) | ||
248 | |||
249 | # Once all the partitions have been layed out, we can calculate the | ||
250 | # minumim disk sizes. | ||
251 | for disk_name, d in self.disks.items(): | ||
252 | d['min_size'] = d['offset'] | ||
253 | if d['ptable_format'] == 'gpt': | ||
254 | # Account for the backup partition table at the end of the disk | ||
255 | d['min_size'] += GPT_OVERHEAD | ||
256 | |||
257 | d['min_size'] *= self.sector_size | ||
258 | |||
259 | def __run_parted(self, args): | ||
260 | """ Run parted with arguments specified in the 'args' list. """ | ||
261 | |||
262 | args.insert(0, self.parted) | ||
263 | msger.debug(args) | ||
264 | |||
265 | rc, out = runner.runtool(args, catch = 3) | ||
266 | out = out.strip() | ||
267 | if out: | ||
268 | msger.debug('"parted" output: %s' % out) | ||
269 | |||
270 | if rc != 0: | ||
271 | # We don't throw exception when return code is not 0, because | ||
272 | # parted always fails to reload part table with loop devices. This | ||
273 | # prevents us from distinguishing real errors based on return | ||
274 | # code. | ||
275 | msger.debug("WARNING: parted returned '%s' instead of 0" % rc) | ||
276 | |||
277 | def __create_partition(self, device, parttype, fstype, start, size): | ||
278 | """ Create a partition on an image described by the 'device' object. """ | ||
279 | |||
280 | # Start is included to the size so we need to substract one from the end. | ||
281 | end = start + size - 1 | ||
282 | msger.debug("Added '%s' partition, sectors %d-%d, size %d sectors" % | ||
283 | (parttype, start, end, size)) | ||
284 | |||
285 | args = ["-s", device, "unit", "s", "mkpart", parttype] | ||
286 | if fstype: | ||
287 | args.extend([fstype]) | ||
288 | args.extend(["%d" % start, "%d" % end]) | ||
289 | |||
290 | return self.__run_parted(args) | ||
291 | |||
292 | def __format_disks(self): | ||
293 | self.layout_partitions() | ||
294 | |||
295 | if self.skipformat: | ||
296 | msger.debug("Skipping disk format, because skipformat flag is set.") | ||
297 | return | ||
298 | |||
299 | for dev in self.disks.keys(): | ||
300 | d = self.disks[dev] | ||
301 | msger.debug("Initializing partition table for %s" % \ | ||
302 | (d['disk'].device)) | ||
303 | self.__run_parted(["-s", d['disk'].device, "mklabel", | ||
304 | d['ptable_format']]) | ||
305 | |||
306 | msger.debug("Creating partitions") | ||
307 | |||
308 | for p in self.partitions: | ||
309 | d = self.disks[p['disk_name']] | ||
310 | if d['ptable_format'] == "msdos" and p['num'] == 5: | ||
311 | # The last sector of the 3rd partition was reserved for the EBR | ||
312 | # of the first _logical_ partition. This is why the extended | ||
313 | # partition should start one sector before the first logical | ||
314 | # partition. | ||
315 | self.__create_partition(d['disk'].device, "extended", | ||
316 | None, p['start'] - 1, | ||
317 | d['offset'] - p['start']) | ||
318 | |||
319 | if p['fstype'] == "swap": | ||
320 | parted_fs_type = "linux-swap" | ||
321 | elif p['fstype'] == "vfat": | ||
322 | parted_fs_type = "fat32" | ||
323 | elif p['fstype'] == "msdos": | ||
324 | parted_fs_type = "fat16" | ||
325 | else: | ||
326 | # Type for ext2/ext3/ext4/btrfs | ||
327 | parted_fs_type = "ext2" | ||
328 | |||
329 | # Boot ROM of OMAP boards require vfat boot partition to have an | ||
330 | # even number of sectors. | ||
331 | if p['mountpoint'] == "/boot" and p['fstype'] in ["vfat", "msdos"] \ | ||
332 | and p['size'] % 2: | ||
333 | msger.debug("Substracting one sector from '%s' partition to " \ | ||
334 | "get even number of sectors for the partition" % \ | ||
335 | p['mountpoint']) | ||
336 | p['size'] -= 1 | ||
337 | |||
338 | self.__create_partition(d['disk'].device, p['type'], | ||
339 | parted_fs_type, p['start'], p['size']) | ||
340 | |||
341 | if p['boot']: | ||
342 | if d['ptable_format'] == 'gpt': | ||
343 | flag_name = "legacy_boot" | ||
344 | else: | ||
345 | flag_name = "boot" | ||
346 | msger.debug("Set '%s' flag for partition '%s' on disk '%s'" % \ | ||
347 | (flag_name, p['num'], d['disk'].device)) | ||
348 | self.__run_parted(["-s", d['disk'].device, "set", | ||
349 | "%d" % p['num'], flag_name, "on"]) | ||
350 | |||
351 | # Parted defaults to enabling the lba flag for fat16 partitions, | ||
352 | # which causes compatibility issues with some firmware (and really | ||
353 | # isn't necessary). | ||
354 | if parted_fs_type == "fat16": | ||
355 | if d['ptable_format'] == 'msdos': | ||
356 | msger.debug("Disable 'lba' flag for partition '%s' on disk '%s'" % \ | ||
357 | (p['num'], d['disk'].device)) | ||
358 | self.__run_parted(["-s", d['disk'].device, "set", | ||
359 | "%d" % p['num'], "lba", "off"]) | ||
360 | |||
361 | # If the partition table format is "gpt", find out PARTUUIDs for all | ||
362 | # the partitions. And if users specified custom parition type UUIDs, | ||
363 | # set them. | ||
364 | for disk_name, disk in self.disks.items(): | ||
365 | if disk['ptable_format'] != 'gpt': | ||
366 | continue | ||
367 | |||
368 | pnum = 0 | ||
369 | gpt_parser = GptParser(d['disk'].device, SECTOR_SIZE) | ||
370 | # Iterate over all GPT partitions on this disk | ||
371 | for entry in gpt_parser.get_partitions(): | ||
372 | pnum += 1 | ||
373 | # Find the matching partition in the 'self.partitions' list | ||
374 | for n in d['partitions']: | ||
375 | p = self.partitions[n] | ||
376 | if p['num'] == pnum: | ||
377 | # Found, fetch PARTUUID (partition's unique ID) | ||
378 | p['partuuid'] = entry['part_uuid'] | ||
379 | msger.debug("PARTUUID for partition %d on disk '%s' " \ | ||
380 | "(mount point '%s') is '%s'" % (pnum, \ | ||
381 | disk_name, p['mountpoint'], p['partuuid'])) | ||
382 | if p['part_type']: | ||
383 | entry['type_uuid'] = p['part_type'] | ||
384 | msger.debug("Change type of partition %d on disk " \ | ||
385 | "'%s' (mount point '%s') to '%s'" % \ | ||
386 | (pnum, disk_name, p['mountpoint'], | ||
387 | p['part_type'])) | ||
388 | gpt_parser.change_partition(entry) | ||
389 | |||
390 | del gpt_parser | ||
391 | |||
392 | def __map_partitions(self): | ||
393 | """Load it if dm_snapshot isn't loaded. """ | ||
394 | load_module("dm_snapshot") | ||
395 | |||
396 | for dev in self.disks.keys(): | ||
397 | d = self.disks[dev] | ||
398 | if d['mapped']: | ||
399 | continue | ||
400 | |||
401 | msger.debug("Running kpartx on %s" % d['disk'].device ) | ||
402 | rc, kpartxOutput = runner.runtool([self.kpartx, "-l", "-v", d['disk'].device]) | ||
403 | kpartxOutput = kpartxOutput.splitlines() | ||
404 | |||
405 | if rc != 0: | ||
406 | raise MountError("Failed to query partition mapping for '%s'" % | ||
407 | d['disk'].device) | ||
408 | |||
409 | # Strip trailing blank and mask verbose output | ||
410 | i = 0 | ||
411 | while i < len(kpartxOutput) and kpartxOutput[i][0:4] != "loop": | ||
412 | i = i + 1 | ||
413 | kpartxOutput = kpartxOutput[i:] | ||
414 | |||
415 | # Make sure kpartx reported the right count of partitions | ||
416 | if len(kpartxOutput) != d['numpart']: | ||
417 | # If this disk has more than 3 partitions, then in case of MBR | ||
418 | # paritions there is an extended parition. Different versions | ||
419 | # of kpartx behave differently WRT the extended partition - | ||
420 | # some map it, some ignore it. This is why we do the below hack | ||
421 | # - if kpartx reported one more partition and the partition | ||
422 | # table type is "msdos" and the amount of partitions is more | ||
423 | # than 3, we just assume kpartx mapped the extended parition | ||
424 | # and we remove it. | ||
425 | if len(kpartxOutput) == d['numpart'] + 1 \ | ||
426 | and d['ptable_format'] == 'msdos' and len(kpartxOutput) > 3: | ||
427 | kpartxOutput.pop(3) | ||
428 | else: | ||
429 | raise MountError("Unexpected number of partitions from " \ | ||
430 | "kpartx: %d != %d" % \ | ||
431 | (len(kpartxOutput), d['numpart'])) | ||
432 | |||
433 | for i in range(len(kpartxOutput)): | ||
434 | line = kpartxOutput[i] | ||
435 | newdev = line.split()[0] | ||
436 | mapperdev = "/dev/mapper/" + newdev | ||
437 | loopdev = d['disk'].device + newdev[-1] | ||
438 | |||
439 | msger.debug("Dev %s: %s -> %s" % (newdev, loopdev, mapperdev)) | ||
440 | pnum = d['partitions'][i] | ||
441 | self.partitions[pnum]['device'] = loopdev | ||
442 | |||
443 | # grub's install wants partitions to be named | ||
444 | # to match their parent device + partition num | ||
445 | # kpartx doesn't work like this, so we add compat | ||
446 | # symlinks to point to /dev/mapper | ||
447 | if os.path.lexists(loopdev): | ||
448 | os.unlink(loopdev) | ||
449 | os.symlink(mapperdev, loopdev) | ||
450 | |||
451 | msger.debug("Adding partx mapping for %s" % d['disk'].device) | ||
452 | rc = runner.show([self.kpartx, "-v", "-a", d['disk'].device]) | ||
453 | |||
454 | if rc != 0: | ||
455 | # Make sure that the device maps are also removed on error case. | ||
456 | # The d['mapped'] isn't set to True if the kpartx fails so | ||
457 | # failed mapping will not be cleaned on cleanup either. | ||
458 | runner.quiet([self.kpartx, "-d", d['disk'].device]) | ||
459 | raise MountError("Failed to map partitions for '%s'" % | ||
460 | d['disk'].device) | ||
461 | |||
462 | # FIXME: there is a bit delay for multipath device setup, | ||
463 | # wait 10ms for the setup | ||
464 | import time | ||
465 | time.sleep(10) | ||
466 | d['mapped'] = True | ||
467 | |||
468 | def __unmap_partitions(self): | ||
469 | for dev in self.disks.keys(): | ||
470 | d = self.disks[dev] | ||
471 | if not d['mapped']: | ||
472 | continue | ||
473 | |||
474 | msger.debug("Removing compat symlinks") | ||
475 | for pnum in d['partitions']: | ||
476 | if self.partitions[pnum]['device'] != None: | ||
477 | os.unlink(self.partitions[pnum]['device']) | ||
478 | self.partitions[pnum]['device'] = None | ||
479 | |||
480 | msger.debug("Unmapping %s" % d['disk'].device) | ||
481 | rc = runner.quiet([self.kpartx, "-d", d['disk'].device]) | ||
482 | if rc != 0: | ||
483 | raise MountError("Failed to unmap partitions for '%s'" % | ||
484 | d['disk'].device) | ||
485 | |||
486 | d['mapped'] = False | ||
487 | |||
488 | def __calculate_mountorder(self): | ||
489 | msger.debug("Calculating mount order") | ||
490 | for p in self.partitions: | ||
491 | if p['mountpoint']: | ||
492 | self.mountOrder.append(p['mountpoint']) | ||
493 | self.unmountOrder.append(p['mountpoint']) | ||
494 | |||
495 | self.mountOrder.sort() | ||
496 | self.unmountOrder.sort() | ||
497 | self.unmountOrder.reverse() | ||
498 | |||
499 | def cleanup(self): | ||
500 | Mount.cleanup(self) | ||
501 | if self.disks: | ||
502 | self.__unmap_partitions() | ||
503 | for dev in self.disks.keys(): | ||
504 | d = self.disks[dev] | ||
505 | try: | ||
506 | d['disk'].cleanup() | ||
507 | except: | ||
508 | pass | ||
509 | |||
510 | def unmount(self): | ||
511 | self.__unmount_subvolumes() | ||
512 | for mp in self.unmountOrder: | ||
513 | if mp == 'swap': | ||
514 | continue | ||
515 | p = None | ||
516 | for p1 in self.partitions: | ||
517 | if p1['mountpoint'] == mp: | ||
518 | p = p1 | ||
519 | break | ||
520 | |||
521 | if p['mount'] != None: | ||
522 | try: | ||
523 | # Create subvolume snapshot here | ||
524 | if p['fstype'] == "btrfs" and p['mountpoint'] == "/" and not self.snapshot_created: | ||
525 | self.__create_subvolume_snapshots(p, p["mount"]) | ||
526 | p['mount'].cleanup() | ||
527 | except: | ||
528 | pass | ||
529 | p['mount'] = None | ||
530 | |||
531 | # Only for btrfs | ||
532 | def __get_subvolume_id(self, rootpath, subvol): | ||
533 | if not self.btrfscmd: | ||
534 | self.btrfscmd=find_binary_path("btrfs") | ||
535 | argv = [ self.btrfscmd, "subvolume", "list", rootpath ] | ||
536 | |||
537 | rc, out = runner.runtool(argv) | ||
538 | msger.debug(out) | ||
539 | |||
540 | if rc != 0: | ||
541 | raise MountError("Failed to get subvolume id from %s', return code: %d." % (rootpath, rc)) | ||
542 | |||
543 | subvolid = -1 | ||
544 | for line in out.splitlines(): | ||
545 | if line.endswith(" path %s" % subvol): | ||
546 | subvolid = line.split()[1] | ||
547 | if not subvolid.isdigit(): | ||
548 | raise MountError("Invalid subvolume id: %s" % subvolid) | ||
549 | subvolid = int(subvolid) | ||
550 | break | ||
551 | return subvolid | ||
552 | |||
553 | def __create_subvolume_metadata(self, p, pdisk): | ||
554 | if len(self.subvolumes) == 0: | ||
555 | return | ||
556 | |||
557 | argv = [ self.btrfscmd, "subvolume", "list", pdisk.mountdir ] | ||
558 | rc, out = runner.runtool(argv) | ||
559 | msger.debug(out) | ||
560 | |||
561 | if rc != 0: | ||
562 | raise MountError("Failed to get subvolume id from %s', return code: %d." % (pdisk.mountdir, rc)) | ||
563 | |||
564 | subvolid_items = out.splitlines() | ||
565 | subvolume_metadata = "" | ||
566 | for subvol in self.subvolumes: | ||
567 | for line in subvolid_items: | ||
568 | if line.endswith(" path %s" % subvol["subvol"]): | ||
569 | subvolid = line.split()[1] | ||
570 | if not subvolid.isdigit(): | ||
571 | raise MountError("Invalid subvolume id: %s" % subvolid) | ||
572 | |||
573 | subvolid = int(subvolid) | ||
574 | opts = subvol["fsopts"].split(",") | ||
575 | for opt in opts: | ||
576 | if opt.strip().startswith("subvol="): | ||
577 | opts.remove(opt) | ||
578 | break | ||
579 | fsopts = ",".join(opts) | ||
580 | subvolume_metadata += "%d\t%s\t%s\t%s\n" % (subvolid, subvol["subvol"], subvol['mountpoint'], fsopts) | ||
581 | |||
582 | if subvolume_metadata: | ||
583 | fd = open("%s/.subvolume_metadata" % pdisk.mountdir, "w") | ||
584 | fd.write(subvolume_metadata) | ||
585 | fd.close() | ||
586 | |||
587 | def __get_subvolume_metadata(self, p, pdisk): | ||
588 | subvolume_metadata_file = "%s/.subvolume_metadata" % pdisk.mountdir | ||
589 | if not os.path.exists(subvolume_metadata_file): | ||
590 | return | ||
591 | |||
592 | fd = open(subvolume_metadata_file, "r") | ||
593 | content = fd.read() | ||
594 | fd.close() | ||
595 | |||
596 | for line in content.splitlines(): | ||
597 | items = line.split("\t") | ||
598 | if items and len(items) == 4: | ||
599 | self.subvolumes.append({'size': 0, # In sectors | ||
600 | 'mountpoint': items[2], # Mount relative to chroot | ||
601 | 'fstype': "btrfs", # Filesystem type | ||
602 | 'fsopts': items[3] + ",subvol=%s" % items[1], # Filesystem mount options | ||
603 | 'disk_name': p['disk_name'], # physical disk name holding partition | ||
604 | 'device': None, # kpartx device node for partition | ||
605 | 'mount': None, # Mount object | ||
606 | 'subvol': items[1], # Subvolume name | ||
607 | 'boot': False, # Bootable flag | ||
608 | 'mounted': False # Mount flag | ||
609 | }) | ||
610 | |||
611 | def __create_subvolumes(self, p, pdisk): | ||
612 | """ Create all the subvolumes. """ | ||
613 | |||
614 | for subvol in self.subvolumes: | ||
615 | argv = [ self.btrfscmd, "subvolume", "create", pdisk.mountdir + "/" + subvol["subvol"]] | ||
616 | |||
617 | rc = runner.show(argv) | ||
618 | if rc != 0: | ||
619 | raise MountError("Failed to create subvolume '%s', return code: %d." % (subvol["subvol"], rc)) | ||
620 | |||
621 | # Set default subvolume, subvolume for "/" is default | ||
622 | subvol = None | ||
623 | for subvolume in self.subvolumes: | ||
624 | if subvolume["mountpoint"] == "/" and p['disk_name'] == subvolume['disk_name']: | ||
625 | subvol = subvolume | ||
626 | break | ||
627 | |||
628 | if subvol: | ||
629 | # Get default subvolume id | ||
630 | subvolid = self. __get_subvolume_id(pdisk.mountdir, subvol["subvol"]) | ||
631 | # Set default subvolume | ||
632 | if subvolid != -1: | ||
633 | rc = runner.show([ self.btrfscmd, "subvolume", "set-default", "%d" % subvolid, pdisk.mountdir]) | ||
634 | if rc != 0: | ||
635 | raise MountError("Failed to set default subvolume id: %d', return code: %d." % (subvolid, rc)) | ||
636 | |||
637 | self.__create_subvolume_metadata(p, pdisk) | ||
638 | |||
639 | def __mount_subvolumes(self, p, pdisk): | ||
640 | if self.skipformat: | ||
641 | # Get subvolume info | ||
642 | self.__get_subvolume_metadata(p, pdisk) | ||
643 | # Set default mount options | ||
644 | if len(self.subvolumes) != 0: | ||
645 | for subvol in self.subvolumes: | ||
646 | if subvol["mountpoint"] == p["mountpoint"] == "/": | ||
647 | opts = subvol["fsopts"].split(",") | ||
648 | for opt in opts: | ||
649 | if opt.strip().startswith("subvol="): | ||
650 | opts.remove(opt) | ||
651 | break | ||
652 | pdisk.fsopts = ",".join(opts) | ||
653 | break | ||
654 | |||
655 | if len(self.subvolumes) == 0: | ||
656 | # Return directly if no subvolumes | ||
657 | return | ||
658 | |||
659 | # Remount to make default subvolume mounted | ||
660 | rc = runner.show([self.umountcmd, pdisk.mountdir]) | ||
661 | if rc != 0: | ||
662 | raise MountError("Failed to umount %s" % pdisk.mountdir) | ||
663 | |||
664 | rc = runner.show([self.mountcmd, "-o", pdisk.fsopts, pdisk.disk.device, pdisk.mountdir]) | ||
665 | if rc != 0: | ||
666 | raise MountError("Failed to umount %s" % pdisk.mountdir) | ||
667 | |||
668 | for subvol in self.subvolumes: | ||
669 | if subvol["mountpoint"] == "/": | ||
670 | continue | ||
671 | subvolid = self. __get_subvolume_id(pdisk.mountdir, subvol["subvol"]) | ||
672 | if subvolid == -1: | ||
673 | msger.debug("WARNING: invalid subvolume %s" % subvol["subvol"]) | ||
674 | continue | ||
675 | # Replace subvolume name with subvolume ID | ||
676 | opts = subvol["fsopts"].split(",") | ||
677 | for opt in opts: | ||
678 | if opt.strip().startswith("subvol="): | ||
679 | opts.remove(opt) | ||
680 | break | ||
681 | |||
682 | opts.extend(["subvolrootid=0", "subvol=%s" % subvol["subvol"]]) | ||
683 | fsopts = ",".join(opts) | ||
684 | subvol['fsopts'] = fsopts | ||
685 | mountpoint = self.mountdir + subvol['mountpoint'] | ||
686 | makedirs(mountpoint) | ||
687 | rc = runner.show([self.mountcmd, "-o", fsopts, pdisk.disk.device, mountpoint]) | ||
688 | if rc != 0: | ||
689 | raise MountError("Failed to mount subvolume %s to %s" % (subvol["subvol"], mountpoint)) | ||
690 | subvol["mounted"] = True | ||
691 | |||
692 | def __unmount_subvolumes(self): | ||
693 | """ It may be called multiple times, so we need to chekc if it is still mounted. """ | ||
694 | for subvol in self.subvolumes: | ||
695 | if subvol["mountpoint"] == "/": | ||
696 | continue | ||
697 | if not subvol["mounted"]: | ||
698 | continue | ||
699 | mountpoint = self.mountdir + subvol['mountpoint'] | ||
700 | rc = runner.show([self.umountcmd, mountpoint]) | ||
701 | if rc != 0: | ||
702 | raise MountError("Failed to unmount subvolume %s from %s" % (subvol["subvol"], mountpoint)) | ||
703 | subvol["mounted"] = False | ||
704 | |||
705 | def __create_subvolume_snapshots(self, p, pdisk): | ||
706 | import time | ||
707 | |||
708 | if self.snapshot_created: | ||
709 | return | ||
710 | |||
711 | # Remount with subvolid=0 | ||
712 | rc = runner.show([self.umountcmd, pdisk.mountdir]) | ||
713 | if rc != 0: | ||
714 | raise MountError("Failed to umount %s" % pdisk.mountdir) | ||
715 | if pdisk.fsopts: | ||
716 | mountopts = pdisk.fsopts + ",subvolid=0" | ||
717 | else: | ||
718 | mountopts = "subvolid=0" | ||
719 | rc = runner.show([self.mountcmd, "-o", mountopts, pdisk.disk.device, pdisk.mountdir]) | ||
720 | if rc != 0: | ||
721 | raise MountError("Failed to umount %s" % pdisk.mountdir) | ||
722 | |||
723 | # Create all the subvolume snapshots | ||
724 | snapshotts = time.strftime("%Y%m%d-%H%M") | ||
725 | for subvol in self.subvolumes: | ||
726 | subvolpath = pdisk.mountdir + "/" + subvol["subvol"] | ||
727 | snapshotpath = subvolpath + "_%s-1" % snapshotts | ||
728 | rc = runner.show([ self.btrfscmd, "subvolume", "snapshot", subvolpath, snapshotpath ]) | ||
729 | if rc != 0: | ||
730 | raise MountError("Failed to create subvolume snapshot '%s' for '%s', return code: %d." % (snapshotpath, subvolpath, rc)) | ||
731 | |||
732 | self.snapshot_created = True | ||
733 | |||
734 | def __install_partition(self, num, source_file, start, size): | ||
735 | """ | ||
736 | Install source_file contents into a partition. | ||
737 | """ | ||
738 | if not source_file: # nothing to install | ||
739 | return | ||
740 | |||
741 | # Start is included in the size so need to substract one from the end. | ||
742 | end = start + size - 1 | ||
743 | msger.debug("Installed %s in partition %d, sectors %d-%d, size %d sectors" % (source_file, num, start, end, size)) | ||
744 | |||
745 | dd_cmd = "dd if=%s of=%s bs=%d seek=%d count=%d conv=notrunc" % \ | ||
746 | (source_file, self.image_file, self.sector_size, start, size) | ||
747 | rc, out = exec_cmd(dd_cmd) | ||
748 | |||
749 | |||
750 | def install(self, image_file): | ||
751 | msger.debug("Installing partitions") | ||
752 | |||
753 | self.image_file = image_file | ||
754 | |||
755 | for p in self.partitions: | ||
756 | d = self.disks[p['disk_name']] | ||
757 | if d['ptable_format'] == "msdos" and p['num'] == 5: | ||
758 | # The last sector of the 3rd partition was reserved for the EBR | ||
759 | # of the first _logical_ partition. This is why the extended | ||
760 | # partition should start one sector before the first logical | ||
761 | # partition. | ||
762 | self.__install_partition(p['num'], p['source_file'], | ||
763 | p['start'] - 1, | ||
764 | d['offset'] - p['start']) | ||
765 | |||
766 | self.__install_partition(p['num'], p['source_file'], | ||
767 | p['start'], p['size']) | ||
768 | |||
769 | def mount(self): | ||
770 | for dev in self.disks.keys(): | ||
771 | d = self.disks[dev] | ||
772 | d['disk'].create() | ||
773 | |||
774 | self.__format_disks() | ||
775 | |||
776 | self.__calculate_mountorder() | ||
777 | |||
778 | return | ||
779 | |||
780 | def resparse(self, size = None): | ||
781 | # Can't re-sparse a disk image - too hard | ||
782 | pass | ||
diff --git a/scripts/lib/mic/utils/proxy.py b/scripts/lib/mic/utils/proxy.py new file mode 100644 index 0000000000..91451a2d01 --- /dev/null +++ b/scripts/lib/mic/utils/proxy.py | |||
@@ -0,0 +1,183 @@ | |||
1 | #!/usr/bin/python -tt | ||
2 | # | ||
3 | # Copyright (c) 2010, 2011 Intel, Inc. | ||
4 | # | ||
5 | # This program is free software; you can redistribute it and/or modify it | ||
6 | # under the terms of the GNU General Public License as published by the Free | ||
7 | # Software Foundation; version 2 of the License | ||
8 | # | ||
9 | # This program is distributed in the hope that it will be useful, but | ||
10 | # WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY | ||
11 | # or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | ||
12 | # for more details. | ||
13 | # | ||
14 | # You should have received a copy of the GNU General Public License along | ||
15 | # with this program; if not, write to the Free Software Foundation, Inc., 59 | ||
16 | # Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
17 | |||
18 | import os | ||
19 | import urlparse | ||
20 | |||
21 | _my_proxies = {} | ||
22 | _my_noproxy = None | ||
23 | _my_noproxy_list = [] | ||
24 | |||
25 | def set_proxy_environ(): | ||
26 | global _my_noproxy, _my_proxies | ||
27 | if not _my_proxies: | ||
28 | return | ||
29 | for key in _my_proxies.keys(): | ||
30 | os.environ[key + "_proxy"] = _my_proxies[key] | ||
31 | if not _my_noproxy: | ||
32 | return | ||
33 | os.environ["no_proxy"] = _my_noproxy | ||
34 | |||
35 | def unset_proxy_environ(): | ||
36 | for env in ('http_proxy', | ||
37 | 'https_proxy', | ||
38 | 'ftp_proxy', | ||
39 | 'all_proxy'): | ||
40 | if env in os.environ: | ||
41 | del os.environ[env] | ||
42 | |||
43 | ENV=env.upper() | ||
44 | if ENV in os.environ: | ||
45 | del os.environ[ENV] | ||
46 | |||
47 | def _set_proxies(proxy = None, no_proxy = None): | ||
48 | """Return a dictionary of scheme -> proxy server URL mappings. | ||
49 | """ | ||
50 | |||
51 | global _my_noproxy, _my_proxies | ||
52 | _my_proxies = {} | ||
53 | _my_noproxy = None | ||
54 | proxies = [] | ||
55 | if proxy: | ||
56 | proxies.append(("http_proxy", proxy)) | ||
57 | if no_proxy: | ||
58 | proxies.append(("no_proxy", no_proxy)) | ||
59 | |||
60 | # Get proxy settings from environment if not provided | ||
61 | if not proxy and not no_proxy: | ||
62 | proxies = os.environ.items() | ||
63 | |||
64 | # Remove proxy env variables, urllib2 can't handle them correctly | ||
65 | unset_proxy_environ() | ||
66 | |||
67 | for name, value in proxies: | ||
68 | name = name.lower() | ||
69 | if value and name[-6:] == '_proxy': | ||
70 | if name[0:2] != "no": | ||
71 | _my_proxies[name[:-6]] = value | ||
72 | else: | ||
73 | _my_noproxy = value | ||
74 | |||
75 | def _ip_to_int(ip): | ||
76 | ipint=0 | ||
77 | shift=24 | ||
78 | for dec in ip.split("."): | ||
79 | ipint |= int(dec) << shift | ||
80 | shift -= 8 | ||
81 | return ipint | ||
82 | |||
83 | def _int_to_ip(val): | ||
84 | ipaddr="" | ||
85 | shift=0 | ||
86 | for i in range(4): | ||
87 | dec = val >> shift | ||
88 | dec &= 0xff | ||
89 | ipaddr = ".%d%s" % (dec, ipaddr) | ||
90 | shift += 8 | ||
91 | return ipaddr[1:] | ||
92 | |||
93 | def _isip(host): | ||
94 | if host.replace(".", "").isdigit(): | ||
95 | return True | ||
96 | return False | ||
97 | |||
98 | def _set_noproxy_list(): | ||
99 | global _my_noproxy, _my_noproxy_list | ||
100 | _my_noproxy_list = [] | ||
101 | if not _my_noproxy: | ||
102 | return | ||
103 | for item in _my_noproxy.split(","): | ||
104 | item = item.strip() | ||
105 | if not item: | ||
106 | continue | ||
107 | |||
108 | if item[0] != '.' and item.find("/") == -1: | ||
109 | # Need to match it | ||
110 | _my_noproxy_list.append({"match":0,"needle":item}) | ||
111 | |||
112 | elif item[0] == '.': | ||
113 | # Need to match at tail | ||
114 | _my_noproxy_list.append({"match":1,"needle":item}) | ||
115 | |||
116 | elif item.find("/") > 3: | ||
117 | # IP/MASK, need to match at head | ||
118 | needle = item[0:item.find("/")].strip() | ||
119 | ip = _ip_to_int(needle) | ||
120 | netmask = 0 | ||
121 | mask = item[item.find("/")+1:].strip() | ||
122 | |||
123 | if mask.isdigit(): | ||
124 | netmask = int(mask) | ||
125 | netmask = ~((1<<(32-netmask)) - 1) | ||
126 | ip &= netmask | ||
127 | else: | ||
128 | shift=24 | ||
129 | netmask=0 | ||
130 | for dec in mask.split("."): | ||
131 | netmask |= int(dec) << shift | ||
132 | shift -= 8 | ||
133 | ip &= netmask | ||
134 | |||
135 | _my_noproxy_list.append({"match":2,"needle":ip,"netmask":netmask}) | ||
136 | |||
137 | def _isnoproxy(url): | ||
138 | (scheme, host, path, parm, query, frag) = urlparse.urlparse(url) | ||
139 | |||
140 | if '@' in host: | ||
141 | user_pass, host = host.split('@', 1) | ||
142 | |||
143 | if ':' in host: | ||
144 | host, port = host.split(':', 1) | ||
145 | |||
146 | hostisip = _isip(host) | ||
147 | for item in _my_noproxy_list: | ||
148 | if hostisip and item["match"] <= 1: | ||
149 | continue | ||
150 | |||
151 | if item["match"] == 2 and hostisip: | ||
152 | if (_ip_to_int(host) & item["netmask"]) == item["needle"]: | ||
153 | return True | ||
154 | |||
155 | if item["match"] == 0: | ||
156 | if host == item["needle"]: | ||
157 | return True | ||
158 | |||
159 | if item["match"] == 1: | ||
160 | if host.rfind(item["needle"]) > 0: | ||
161 | return True | ||
162 | |||
163 | return False | ||
164 | |||
165 | def set_proxies(proxy = None, no_proxy = None): | ||
166 | _set_proxies(proxy, no_proxy) | ||
167 | _set_noproxy_list() | ||
168 | set_proxy_environ() | ||
169 | |||
170 | def get_proxy_for(url): | ||
171 | if url.startswith('file:') or _isnoproxy(url): | ||
172 | return None | ||
173 | |||
174 | type = url[0:url.index(":")] | ||
175 | proxy = None | ||
176 | if _my_proxies.has_key(type): | ||
177 | proxy = _my_proxies[type] | ||
178 | elif _my_proxies.has_key("http"): | ||
179 | proxy = _my_proxies["http"] | ||
180 | else: | ||
181 | proxy = None | ||
182 | |||
183 | return proxy | ||
diff --git a/scripts/lib/mic/utils/rpmmisc.py b/scripts/lib/mic/utils/rpmmisc.py new file mode 100644 index 0000000000..af15763e18 --- /dev/null +++ b/scripts/lib/mic/utils/rpmmisc.py | |||
@@ -0,0 +1,600 @@ | |||
1 | #!/usr/bin/python -tt | ||
2 | # | ||
3 | # Copyright (c) 2008, 2009, 2010, 2011 Intel, Inc. | ||
4 | # | ||
5 | # This program is free software; you can redistribute it and/or modify it | ||
6 | # under the terms of the GNU General Public License as published by the Free | ||
7 | # Software Foundation; version 2 of the License | ||
8 | # | ||
9 | # This program is distributed in the hope that it will be useful, but | ||
10 | # WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY | ||
11 | # or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | ||
12 | # for more details. | ||
13 | # | ||
14 | # You should have received a copy of the GNU General Public License along | ||
15 | # with this program; if not, write to the Free Software Foundation, Inc., 59 | ||
16 | # Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
17 | |||
18 | import os | ||
19 | import sys | ||
20 | import re | ||
21 | import rpm | ||
22 | |||
23 | from mic import msger | ||
24 | from mic.utils.errors import CreatorError | ||
25 | from mic.utils.proxy import get_proxy_for | ||
26 | from mic.utils import runner | ||
27 | |||
28 | |||
29 | class RPMInstallCallback: | ||
30 | """ Command line callback class for callbacks from the RPM library. | ||
31 | """ | ||
32 | |||
33 | def __init__(self, ts, output=1): | ||
34 | self.output = output | ||
35 | self.callbackfilehandles = {} | ||
36 | self.total_actions = 0 | ||
37 | self.total_installed = 0 | ||
38 | self.installed_pkg_names = [] | ||
39 | self.total_removed = 0 | ||
40 | self.mark = "+" | ||
41 | self.marks = 40 | ||
42 | self.lastmsg = None | ||
43 | self.tsInfo = None # this needs to be set for anything else to work | ||
44 | self.ts = ts | ||
45 | self.filelog = False | ||
46 | self.logString = [] | ||
47 | self.headmsg = "Installing" | ||
48 | |||
49 | def _dopkgtup(self, hdr): | ||
50 | tmpepoch = hdr['epoch'] | ||
51 | if tmpepoch is None: epoch = '0' | ||
52 | else: epoch = str(tmpepoch) | ||
53 | |||
54 | return (hdr['name'], hdr['arch'], epoch, hdr['version'], hdr['release']) | ||
55 | |||
56 | def _makeHandle(self, hdr): | ||
57 | handle = '%s:%s.%s-%s-%s' % (hdr['epoch'], hdr['name'], hdr['version'], | ||
58 | hdr['release'], hdr['arch']) | ||
59 | |||
60 | return handle | ||
61 | |||
62 | def _localprint(self, msg): | ||
63 | if self.output: | ||
64 | msger.info(msg) | ||
65 | |||
66 | def _makefmt(self, percent, progress = True): | ||
67 | l = len(str(self.total_actions)) | ||
68 | size = "%s.%s" % (l, l) | ||
69 | fmt_done = "[%" + size + "s/%" + size + "s]" | ||
70 | done = fmt_done % (self.total_installed + self.total_removed, | ||
71 | self.total_actions) | ||
72 | marks = self.marks - (2 * l) | ||
73 | width = "%s.%s" % (marks, marks) | ||
74 | fmt_bar = "%-" + width + "s" | ||
75 | if progress: | ||
76 | bar = fmt_bar % (self.mark * int(marks * (percent / 100.0)), ) | ||
77 | fmt = "\r %-10.10s: %-20.20s " + bar + " " + done | ||
78 | else: | ||
79 | bar = fmt_bar % (self.mark * marks, ) | ||
80 | fmt = " %-10.10s: %-20.20s " + bar + " " + done | ||
81 | return fmt | ||
82 | |||
83 | def _logPkgString(self, hdr): | ||
84 | """return nice representation of the package for the log""" | ||
85 | (n,a,e,v,r) = self._dopkgtup(hdr) | ||
86 | if e == '0': | ||
87 | pkg = '%s.%s %s-%s' % (n, a, v, r) | ||
88 | else: | ||
89 | pkg = '%s.%s %s:%s-%s' % (n, a, e, v, r) | ||
90 | |||
91 | return pkg | ||
92 | |||
93 | def callback(self, what, bytes, total, h, user): | ||
94 | if what == rpm.RPMCALLBACK_TRANS_START: | ||
95 | if bytes == 6: | ||
96 | self.total_actions = total | ||
97 | |||
98 | elif what == rpm.RPMCALLBACK_TRANS_PROGRESS: | ||
99 | pass | ||
100 | |||
101 | elif what == rpm.RPMCALLBACK_TRANS_STOP: | ||
102 | pass | ||
103 | |||
104 | elif what == rpm.RPMCALLBACK_INST_OPEN_FILE: | ||
105 | self.lastmsg = None | ||
106 | hdr = None | ||
107 | if h is not None: | ||
108 | try: | ||
109 | hdr, rpmloc = h | ||
110 | except: | ||
111 | rpmloc = h | ||
112 | hdr = readRpmHeader(self.ts, h) | ||
113 | |||
114 | handle = self._makeHandle(hdr) | ||
115 | fd = os.open(rpmloc, os.O_RDONLY) | ||
116 | self.callbackfilehandles[handle]=fd | ||
117 | if hdr['name'] not in self.installed_pkg_names: | ||
118 | self.installed_pkg_names.append(hdr['name']) | ||
119 | self.total_installed += 1 | ||
120 | return fd | ||
121 | else: | ||
122 | self._localprint("No header - huh?") | ||
123 | |||
124 | elif what == rpm.RPMCALLBACK_INST_CLOSE_FILE: | ||
125 | hdr = None | ||
126 | if h is not None: | ||
127 | try: | ||
128 | hdr, rpmloc = h | ||
129 | except: | ||
130 | rpmloc = h | ||
131 | hdr = readRpmHeader(self.ts, h) | ||
132 | |||
133 | handle = self._makeHandle(hdr) | ||
134 | os.close(self.callbackfilehandles[handle]) | ||
135 | fd = 0 | ||
136 | |||
137 | # log stuff | ||
138 | #pkgtup = self._dopkgtup(hdr) | ||
139 | self.logString.append(self._logPkgString(hdr)) | ||
140 | |||
141 | elif what == rpm.RPMCALLBACK_INST_PROGRESS: | ||
142 | if h is not None: | ||
143 | percent = (self.total_installed*100L)/self.total_actions | ||
144 | if total > 0: | ||
145 | try: | ||
146 | hdr, rpmloc = h | ||
147 | except: | ||
148 | rpmloc = h | ||
149 | |||
150 | m = re.match("(.*)-(\d+.*)-(\d+\.\d+)\.(.+)\.rpm", os.path.basename(rpmloc)) | ||
151 | if m: | ||
152 | pkgname = m.group(1) | ||
153 | else: | ||
154 | pkgname = os.path.basename(rpmloc) | ||
155 | if self.output: | ||
156 | fmt = self._makefmt(percent) | ||
157 | msg = fmt % (self.headmsg, pkgname) | ||
158 | if msg != self.lastmsg: | ||
159 | self.lastmsg = msg | ||
160 | |||
161 | msger.info(msg) | ||
162 | |||
163 | if self.total_installed == self.total_actions: | ||
164 | msger.raw('') | ||
165 | msger.verbose('\n'.join(self.logString)) | ||
166 | |||
167 | elif what == rpm.RPMCALLBACK_UNINST_START: | ||
168 | pass | ||
169 | |||
170 | elif what == rpm.RPMCALLBACK_UNINST_PROGRESS: | ||
171 | pass | ||
172 | |||
173 | elif what == rpm.RPMCALLBACK_UNINST_STOP: | ||
174 | self.total_removed += 1 | ||
175 | |||
176 | elif what == rpm.RPMCALLBACK_REPACKAGE_START: | ||
177 | pass | ||
178 | |||
179 | elif what == rpm.RPMCALLBACK_REPACKAGE_STOP: | ||
180 | pass | ||
181 | |||
182 | elif what == rpm.RPMCALLBACK_REPACKAGE_PROGRESS: | ||
183 | pass | ||
184 | |||
185 | def readRpmHeader(ts, filename): | ||
186 | """ Read an rpm header. """ | ||
187 | |||
188 | fd = os.open(filename, os.O_RDONLY) | ||
189 | h = ts.hdrFromFdno(fd) | ||
190 | os.close(fd) | ||
191 | return h | ||
192 | |||
193 | def splitFilename(filename): | ||
194 | """ Pass in a standard style rpm fullname | ||
195 | |||
196 | Return a name, version, release, epoch, arch, e.g.:: | ||
197 | foo-1.0-1.i386.rpm returns foo, 1.0, 1, i386 | ||
198 | 1:bar-9-123a.ia64.rpm returns bar, 9, 123a, 1, ia64 | ||
199 | """ | ||
200 | |||
201 | if filename[-4:] == '.rpm': | ||
202 | filename = filename[:-4] | ||
203 | |||
204 | archIndex = filename.rfind('.') | ||
205 | arch = filename[archIndex+1:] | ||
206 | |||
207 | relIndex = filename[:archIndex].rfind('-') | ||
208 | rel = filename[relIndex+1:archIndex] | ||
209 | |||
210 | verIndex = filename[:relIndex].rfind('-') | ||
211 | ver = filename[verIndex+1:relIndex] | ||
212 | |||
213 | epochIndex = filename.find(':') | ||
214 | if epochIndex == -1: | ||
215 | epoch = '' | ||
216 | else: | ||
217 | epoch = filename[:epochIndex] | ||
218 | |||
219 | name = filename[epochIndex + 1:verIndex] | ||
220 | return name, ver, rel, epoch, arch | ||
221 | |||
222 | def getCanonX86Arch(arch): | ||
223 | # | ||
224 | if arch == "i586": | ||
225 | f = open("/proc/cpuinfo", "r") | ||
226 | lines = f.readlines() | ||
227 | f.close() | ||
228 | for line in lines: | ||
229 | if line.startswith("model name") and line.find("Geode(TM)") != -1: | ||
230 | return "geode" | ||
231 | return arch | ||
232 | # only athlon vs i686 isn't handled with uname currently | ||
233 | if arch != "i686": | ||
234 | return arch | ||
235 | |||
236 | # if we're i686 and AuthenticAMD, then we should be an athlon | ||
237 | f = open("/proc/cpuinfo", "r") | ||
238 | lines = f.readlines() | ||
239 | f.close() | ||
240 | for line in lines: | ||
241 | if line.startswith("vendor") and line.find("AuthenticAMD") != -1: | ||
242 | return "athlon" | ||
243 | # i686 doesn't guarantee cmov, but we depend on it | ||
244 | elif line.startswith("flags") and line.find("cmov") == -1: | ||
245 | return "i586" | ||
246 | |||
247 | return arch | ||
248 | |||
249 | def getCanonX86_64Arch(arch): | ||
250 | if arch != "x86_64": | ||
251 | return arch | ||
252 | |||
253 | vendor = None | ||
254 | f = open("/proc/cpuinfo", "r") | ||
255 | lines = f.readlines() | ||
256 | f.close() | ||
257 | for line in lines: | ||
258 | if line.startswith("vendor_id"): | ||
259 | vendor = line.split(':')[1] | ||
260 | break | ||
261 | if vendor is None: | ||
262 | return arch | ||
263 | |||
264 | if vendor.find("Authentic AMD") != -1 or vendor.find("AuthenticAMD") != -1: | ||
265 | return "amd64" | ||
266 | if vendor.find("GenuineIntel") != -1: | ||
267 | return "ia32e" | ||
268 | return arch | ||
269 | |||
270 | def getCanonArch(): | ||
271 | arch = os.uname()[4] | ||
272 | |||
273 | if (len(arch) == 4 and arch[0] == "i" and arch[2:4] == "86"): | ||
274 | return getCanonX86Arch(arch) | ||
275 | |||
276 | if arch == "x86_64": | ||
277 | return getCanonX86_64Arch(arch) | ||
278 | |||
279 | return arch | ||
280 | |||
281 | # Copy from libsatsolver:poolarch.c, with cleanup | ||
282 | archPolicies = { | ||
283 | "x86_64": "x86_64:i686:i586:i486:i386", | ||
284 | "i686": "i686:i586:i486:i386", | ||
285 | "i586": "i586:i486:i386", | ||
286 | "ia64": "ia64:i686:i586:i486:i386", | ||
287 | "armv7tnhl": "armv7tnhl:armv7thl:armv7nhl:armv7hl", | ||
288 | "armv7thl": "armv7thl:armv7hl", | ||
289 | "armv7nhl": "armv7nhl:armv7hl", | ||
290 | "armv7hl": "armv7hl", | ||
291 | "armv7l": "armv7l:armv6l:armv5tejl:armv5tel:armv5l:armv4tl:armv4l:armv3l", | ||
292 | "armv6l": "armv6l:armv5tejl:armv5tel:armv5l:armv4tl:armv4l:armv3l", | ||
293 | "armv5tejl": "armv5tejl:armv5tel:armv5l:armv4tl:armv4l:armv3l", | ||
294 | "armv5tel": "armv5tel:armv5l:armv4tl:armv4l:armv3l", | ||
295 | "armv5l": "armv5l:armv4tl:armv4l:armv3l", | ||
296 | } | ||
297 | |||
298 | # dict mapping arch -> ( multicompat, best personality, biarch personality ) | ||
299 | multilibArches = { | ||
300 | "x86_64": ( "athlon", "x86_64", "athlon" ), | ||
301 | } | ||
302 | |||
303 | # from yumUtils.py | ||
304 | arches = { | ||
305 | # ia32 | ||
306 | "athlon": "i686", | ||
307 | "i686": "i586", | ||
308 | "geode": "i586", | ||
309 | "i586": "i486", | ||
310 | "i486": "i386", | ||
311 | "i386": "noarch", | ||
312 | |||
313 | # amd64 | ||
314 | "x86_64": "athlon", | ||
315 | "amd64": "x86_64", | ||
316 | "ia32e": "x86_64", | ||
317 | |||
318 | # arm | ||
319 | "armv7tnhl": "armv7nhl", | ||
320 | "armv7nhl": "armv7hl", | ||
321 | "armv7hl": "noarch", | ||
322 | "armv7l": "armv6l", | ||
323 | "armv6l": "armv5tejl", | ||
324 | "armv5tejl": "armv5tel", | ||
325 | "armv5tel": "noarch", | ||
326 | |||
327 | #itanium | ||
328 | "ia64": "noarch", | ||
329 | } | ||
330 | |||
331 | def isMultiLibArch(arch=None): | ||
332 | """returns true if arch is a multilib arch, false if not""" | ||
333 | if arch is None: | ||
334 | arch = getCanonArch() | ||
335 | |||
336 | if not arches.has_key(arch): # or we could check if it is noarch | ||
337 | return False | ||
338 | |||
339 | if multilibArches.has_key(arch): | ||
340 | return True | ||
341 | |||
342 | if multilibArches.has_key(arches[arch]): | ||
343 | return True | ||
344 | |||
345 | return False | ||
346 | |||
347 | def getBaseArch(): | ||
348 | myarch = getCanonArch() | ||
349 | if not arches.has_key(myarch): | ||
350 | return myarch | ||
351 | |||
352 | if isMultiLibArch(arch=myarch): | ||
353 | if multilibArches.has_key(myarch): | ||
354 | return myarch | ||
355 | else: | ||
356 | return arches[myarch] | ||
357 | |||
358 | if arches.has_key(myarch): | ||
359 | basearch = myarch | ||
360 | value = arches[basearch] | ||
361 | while value != 'noarch': | ||
362 | basearch = value | ||
363 | value = arches[basearch] | ||
364 | |||
365 | return basearch | ||
366 | |||
367 | def checkRpmIntegrity(bin_rpm, package): | ||
368 | return runner.quiet([bin_rpm, "-K", "--nosignature", package]) | ||
369 | |||
370 | def checkSig(ts, package): | ||
371 | """ Takes a transaction set and a package, check it's sigs, | ||
372 | return 0 if they are all fine | ||
373 | return 1 if the gpg key can't be found | ||
374 | return 2 if the header is in someway damaged | ||
375 | return 3 if the key is not trusted | ||
376 | return 4 if the pkg is not gpg or pgp signed | ||
377 | """ | ||
378 | |||
379 | value = 0 | ||
380 | currentflags = ts.setVSFlags(0) | ||
381 | fdno = os.open(package, os.O_RDONLY) | ||
382 | try: | ||
383 | hdr = ts.hdrFromFdno(fdno) | ||
384 | |||
385 | except rpm.error, e: | ||
386 | if str(e) == "public key not availaiable": | ||
387 | value = 1 | ||
388 | if str(e) == "public key not available": | ||
389 | value = 1 | ||
390 | if str(e) == "public key not trusted": | ||
391 | value = 3 | ||
392 | if str(e) == "error reading package header": | ||
393 | value = 2 | ||
394 | else: | ||
395 | error, siginfo = getSigInfo(hdr) | ||
396 | if error == 101: | ||
397 | os.close(fdno) | ||
398 | del hdr | ||
399 | value = 4 | ||
400 | else: | ||
401 | del hdr | ||
402 | |||
403 | try: | ||
404 | os.close(fdno) | ||
405 | except OSError: | ||
406 | pass | ||
407 | |||
408 | ts.setVSFlags(currentflags) # put things back like they were before | ||
409 | return value | ||
410 | |||
411 | def getSigInfo(hdr): | ||
412 | """ checks signature from an hdr hand back signature information and/or | ||
413 | an error code | ||
414 | """ | ||
415 | |||
416 | import locale | ||
417 | locale.setlocale(locale.LC_ALL, 'C') | ||
418 | |||
419 | string = '%|DSAHEADER?{%{DSAHEADER:pgpsig}}:{%|RSAHEADER?{%{RSAHEADER:pgpsig}}:{%|SIGGPG?{%{SIGGPG:pgpsig}}:{%|SIGPGP?{%{SIGPGP:pgpsig}}:{(none)}|}|}|}|' | ||
420 | siginfo = hdr.sprintf(string) | ||
421 | if siginfo != '(none)': | ||
422 | error = 0 | ||
423 | sigtype, sigdate, sigid = siginfo.split(',') | ||
424 | else: | ||
425 | error = 101 | ||
426 | sigtype = 'MD5' | ||
427 | sigdate = 'None' | ||
428 | sigid = 'None' | ||
429 | |||
430 | infotuple = (sigtype, sigdate, sigid) | ||
431 | return error, infotuple | ||
432 | |||
433 | def checkRepositoryEULA(name, repo): | ||
434 | """ This function is to check the EULA file if provided. | ||
435 | return True: no EULA or accepted | ||
436 | return False: user declined the EULA | ||
437 | """ | ||
438 | |||
439 | import tempfile | ||
440 | import shutil | ||
441 | import urlparse | ||
442 | import urllib2 as u2 | ||
443 | import httplib | ||
444 | from mic.utils.errors import CreatorError | ||
445 | |||
446 | def _check_and_download_url(u2opener, url, savepath): | ||
447 | try: | ||
448 | if u2opener: | ||
449 | f = u2opener.open(url) | ||
450 | else: | ||
451 | f = u2.urlopen(url) | ||
452 | except u2.HTTPError, httperror: | ||
453 | if httperror.code in (404, 503): | ||
454 | return None | ||
455 | else: | ||
456 | raise CreatorError(httperror) | ||
457 | except OSError, oserr: | ||
458 | if oserr.errno == 2: | ||
459 | return None | ||
460 | else: | ||
461 | raise CreatorError(oserr) | ||
462 | except IOError, oserr: | ||
463 | if hasattr(oserr, "reason") and oserr.reason.errno == 2: | ||
464 | return None | ||
465 | else: | ||
466 | raise CreatorError(oserr) | ||
467 | except u2.URLError, err: | ||
468 | raise CreatorError(err) | ||
469 | except httplib.HTTPException, e: | ||
470 | raise CreatorError(e) | ||
471 | |||
472 | # save to file | ||
473 | licf = open(savepath, "w") | ||
474 | licf.write(f.read()) | ||
475 | licf.close() | ||
476 | f.close() | ||
477 | |||
478 | return savepath | ||
479 | |||
480 | def _pager_file(savepath): | ||
481 | |||
482 | if os.path.splitext(savepath)[1].upper() in ('.HTM', '.HTML'): | ||
483 | pagers = ('w3m', 'links', 'lynx', 'less', 'more') | ||
484 | else: | ||
485 | pagers = ('less', 'more') | ||
486 | |||
487 | file_showed = False | ||
488 | for pager in pagers: | ||
489 | cmd = "%s %s" % (pager, savepath) | ||
490 | try: | ||
491 | os.system(cmd) | ||
492 | except OSError: | ||
493 | continue | ||
494 | else: | ||
495 | file_showed = True | ||
496 | break | ||
497 | |||
498 | if not file_showed: | ||
499 | f = open(savepath) | ||
500 | msger.raw(f.read()) | ||
501 | f.close() | ||
502 | msger.pause() | ||
503 | |||
504 | # when proxy needed, make urllib2 follow it | ||
505 | proxy = repo.proxy | ||
506 | proxy_username = repo.proxy_username | ||
507 | proxy_password = repo.proxy_password | ||
508 | |||
509 | if not proxy: | ||
510 | proxy = get_proxy_for(repo.baseurl[0]) | ||
511 | |||
512 | handlers = [] | ||
513 | auth_handler = u2.HTTPBasicAuthHandler(u2.HTTPPasswordMgrWithDefaultRealm()) | ||
514 | u2opener = None | ||
515 | if proxy: | ||
516 | if proxy_username: | ||
517 | proxy_netloc = urlparse.urlsplit(proxy).netloc | ||
518 | if proxy_password: | ||
519 | proxy_url = 'http://%s:%s@%s' % (proxy_username, proxy_password, proxy_netloc) | ||
520 | else: | ||
521 | proxy_url = 'http://%s@%s' % (proxy_username, proxy_netloc) | ||
522 | else: | ||
523 | proxy_url = proxy | ||
524 | |||
525 | proxy_support = u2.ProxyHandler({'http': proxy_url, | ||
526 | 'https': proxy_url, | ||
527 | 'ftp': proxy_url}) | ||
528 | handlers.append(proxy_support) | ||
529 | |||
530 | # download all remote files to one temp dir | ||
531 | baseurl = None | ||
532 | repo_lic_dir = tempfile.mkdtemp(prefix = 'repolic') | ||
533 | |||
534 | for url in repo.baseurl: | ||
535 | tmphandlers = handlers[:] | ||
536 | |||
537 | (scheme, host, path, parm, query, frag) = urlparse.urlparse(url.rstrip('/') + '/') | ||
538 | if scheme not in ("http", "https", "ftp", "ftps", "file"): | ||
539 | raise CreatorError("Error: invalid url %s" % url) | ||
540 | |||
541 | if '@' in host: | ||
542 | try: | ||
543 | user_pass, host = host.split('@', 1) | ||
544 | if ':' in user_pass: | ||
545 | user, password = user_pass.split(':', 1) | ||
546 | except ValueError, e: | ||
547 | raise CreatorError('Bad URL: %s' % url) | ||
548 | |||
549 | msger.verbose("adding HTTP auth: %s, XXXXXXXX" %(user)) | ||
550 | auth_handler.add_password(None, host, user, password) | ||
551 | tmphandlers.append(auth_handler) | ||
552 | url = scheme + "://" + host + path + parm + query + frag | ||
553 | |||
554 | if tmphandlers: | ||
555 | u2opener = u2.build_opener(*tmphandlers) | ||
556 | |||
557 | # try to download | ||
558 | repo_eula_url = urlparse.urljoin(url, "LICENSE.txt") | ||
559 | repo_eula_path = _check_and_download_url( | ||
560 | u2opener, | ||
561 | repo_eula_url, | ||
562 | os.path.join(repo_lic_dir, repo.id + '_LICENSE.txt')) | ||
563 | if repo_eula_path: | ||
564 | # found | ||
565 | baseurl = url | ||
566 | break | ||
567 | |||
568 | if not baseurl: | ||
569 | shutil.rmtree(repo_lic_dir) #cleanup | ||
570 | return True | ||
571 | |||
572 | # show the license file | ||
573 | msger.info('For the software packages in this yum repo:') | ||
574 | msger.info(' %s: %s' % (name, baseurl)) | ||
575 | msger.info('There is an "End User License Agreement" file that need to be checked.') | ||
576 | msger.info('Please read the terms and conditions outlined in it and answer the followed qustions.') | ||
577 | msger.pause() | ||
578 | |||
579 | _pager_file(repo_eula_path) | ||
580 | |||
581 | # Asking for the "Accept/Decline" | ||
582 | if not msger.ask('Would you agree to the terms and conditions outlined in the above End User License Agreement?'): | ||
583 | msger.warning('Will not install pkgs from this repo.') | ||
584 | shutil.rmtree(repo_lic_dir) #cleanup | ||
585 | return False | ||
586 | |||
587 | # try to find support_info.html for extra infomation | ||
588 | repo_info_url = urlparse.urljoin(baseurl, "support_info.html") | ||
589 | repo_info_path = _check_and_download_url( | ||
590 | u2opener, | ||
591 | repo_info_url, | ||
592 | os.path.join(repo_lic_dir, repo.id + '_support_info.html')) | ||
593 | if repo_info_path: | ||
594 | msger.info('There is one more file in the repo for additional support information, please read it') | ||
595 | msger.pause() | ||
596 | _pager_file(repo_info_path) | ||
597 | |||
598 | #cleanup | ||
599 | shutil.rmtree(repo_lic_dir) | ||
600 | return True | ||
diff --git a/scripts/lib/mic/utils/runner.py b/scripts/lib/mic/utils/runner.py new file mode 100644 index 0000000000..fded3c93fa --- /dev/null +++ b/scripts/lib/mic/utils/runner.py | |||
@@ -0,0 +1,109 @@ | |||
1 | #!/usr/bin/python -tt | ||
2 | # | ||
3 | # Copyright (c) 2011 Intel, Inc. | ||
4 | # | ||
5 | # This program is free software; you can redistribute it and/or modify it | ||
6 | # under the terms of the GNU General Public License as published by the Free | ||
7 | # Software Foundation; version 2 of the License | ||
8 | # | ||
9 | # This program is distributed in the hope that it will be useful, but | ||
10 | # WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY | ||
11 | # or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | ||
12 | # for more details. | ||
13 | # | ||
14 | # You should have received a copy of the GNU General Public License along | ||
15 | # with this program; if not, write to the Free Software Foundation, Inc., 59 | ||
16 | # Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
17 | |||
18 | import os | ||
19 | import subprocess | ||
20 | |||
21 | from mic import msger | ||
22 | |||
23 | def runtool(cmdln_or_args, catch=1): | ||
24 | """ wrapper for most of the subprocess calls | ||
25 | input: | ||
26 | cmdln_or_args: can be both args and cmdln str (shell=True) | ||
27 | catch: 0, quitely run | ||
28 | 1, only STDOUT | ||
29 | 2, only STDERR | ||
30 | 3, both STDOUT and STDERR | ||
31 | return: | ||
32 | (rc, output) | ||
33 | if catch==0: the output will always None | ||
34 | """ | ||
35 | |||
36 | if catch not in (0, 1, 2, 3): | ||
37 | # invalid catch selection, will cause exception, that's good | ||
38 | return None | ||
39 | |||
40 | if isinstance(cmdln_or_args, list): | ||
41 | cmd = cmdln_or_args[0] | ||
42 | shell = False | ||
43 | else: | ||
44 | import shlex | ||
45 | cmd = shlex.split(cmdln_or_args)[0] | ||
46 | shell = True | ||
47 | |||
48 | if catch != 3: | ||
49 | dev_null = os.open("/dev/null", os.O_WRONLY) | ||
50 | |||
51 | if catch == 0: | ||
52 | sout = dev_null | ||
53 | serr = dev_null | ||
54 | elif catch == 1: | ||
55 | sout = subprocess.PIPE | ||
56 | serr = dev_null | ||
57 | elif catch == 2: | ||
58 | sout = dev_null | ||
59 | serr = subprocess.PIPE | ||
60 | elif catch == 3: | ||
61 | sout = subprocess.PIPE | ||
62 | serr = subprocess.STDOUT | ||
63 | |||
64 | try: | ||
65 | p = subprocess.Popen(cmdln_or_args, stdout=sout, | ||
66 | stderr=serr, shell=shell) | ||
67 | (sout, serr) = p.communicate() | ||
68 | # combine stdout and stderr, filter None out | ||
69 | out = ''.join(filter(None, [sout, serr])) | ||
70 | except OSError, e: | ||
71 | if e.errno == 2: | ||
72 | # [Errno 2] No such file or directory | ||
73 | msger.error('Cannot run command: %s, lost dependency?' % cmd) | ||
74 | else: | ||
75 | raise # relay | ||
76 | finally: | ||
77 | if catch != 3: | ||
78 | os.close(dev_null) | ||
79 | |||
80 | return (p.returncode, out) | ||
81 | |||
82 | def show(cmdln_or_args): | ||
83 | # show all the message using msger.verbose | ||
84 | |||
85 | rc, out = runtool(cmdln_or_args, catch=3) | ||
86 | |||
87 | if isinstance(cmdln_or_args, list): | ||
88 | cmd = ' '.join(cmdln_or_args) | ||
89 | else: | ||
90 | cmd = cmdln_or_args | ||
91 | |||
92 | msg = 'running command: "%s"' % cmd | ||
93 | if out: out = out.strip() | ||
94 | if out: | ||
95 | msg += ', with output::' | ||
96 | msg += '\n +----------------' | ||
97 | for line in out.splitlines(): | ||
98 | msg += '\n | %s' % line | ||
99 | msg += '\n +----------------' | ||
100 | |||
101 | msger.verbose(msg) | ||
102 | return rc | ||
103 | |||
104 | def outs(cmdln_or_args, catch=1): | ||
105 | # get the outputs of tools | ||
106 | return runtool(cmdln_or_args, catch)[1].strip() | ||
107 | |||
108 | def quiet(cmdln_or_args): | ||
109 | return runtool(cmdln_or_args, catch=0)[0] | ||