diff options
Diffstat (limited to 'scripts/lib/mic/utils/misc.py')
-rw-r--r-- | scripts/lib/mic/utils/misc.py | 1067 |
1 files changed, 1067 insertions, 0 deletions
diff --git a/scripts/lib/mic/utils/misc.py b/scripts/lib/mic/utils/misc.py new file mode 100644 index 0000000000..63024346a9 --- /dev/null +++ b/scripts/lib/mic/utils/misc.py | |||
@@ -0,0 +1,1067 @@ | |||
1 | #!/usr/bin/python -tt | ||
2 | # | ||
3 | # Copyright (c) 2010, 2011 Intel Inc. | ||
4 | # | ||
5 | # This program is free software; you can redistribute it and/or modify it | ||
6 | # under the terms of the GNU General Public License as published by the Free | ||
7 | # Software Foundation; version 2 of the License | ||
8 | # | ||
9 | # This program is distributed in the hope that it will be useful, but | ||
10 | # WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY | ||
11 | # or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | ||
12 | # for more details. | ||
13 | # | ||
14 | # You should have received a copy of the GNU General Public License along | ||
15 | # with this program; if not, write to the Free Software Foundation, Inc., 59 | ||
16 | # Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
17 | |||
18 | import os | ||
19 | import sys | ||
20 | import time | ||
21 | import tempfile | ||
22 | import re | ||
23 | import shutil | ||
24 | import glob | ||
25 | import hashlib | ||
26 | import subprocess | ||
27 | import platform | ||
28 | import traceback | ||
29 | |||
30 | |||
31 | try: | ||
32 | import sqlite3 as sqlite | ||
33 | except ImportError: | ||
34 | import sqlite | ||
35 | |||
36 | try: | ||
37 | from xml.etree import cElementTree | ||
38 | except ImportError: | ||
39 | import cElementTree | ||
40 | xmlparse = cElementTree.parse | ||
41 | |||
42 | from mic import msger | ||
43 | from mic.utils.errors import CreatorError, SquashfsError | ||
44 | from mic.utils.fs_related import find_binary_path, makedirs | ||
45 | from mic.utils.grabber import myurlgrab | ||
46 | from mic.utils.proxy import get_proxy_for | ||
47 | from mic.utils import runner | ||
48 | from mic.utils import rpmmisc | ||
49 | |||
50 | |||
51 | RPM_RE = re.compile("(.*)\.(.*) (.*)-(.*)") | ||
52 | RPM_FMT = "%(name)s.%(arch)s %(version)s-%(release)s" | ||
53 | SRPM_RE = re.compile("(.*)-(\d+.*)-(\d+\.\d+).src.rpm") | ||
54 | |||
55 | |||
56 | def build_name(kscfg, release=None, prefix = None, suffix = None): | ||
57 | """Construct and return an image name string. | ||
58 | |||
59 | This is a utility function to help create sensible name and fslabel | ||
60 | strings. The name is constructed using the sans-prefix-and-extension | ||
61 | kickstart filename and the supplied prefix and suffix. | ||
62 | |||
63 | kscfg -- a path to a kickstart file | ||
64 | release -- a replacement to suffix for image release | ||
65 | prefix -- a prefix to prepend to the name; defaults to None, which causes | ||
66 | no prefix to be used | ||
67 | suffix -- a suffix to append to the name; defaults to None, which causes | ||
68 | a YYYYMMDDHHMM suffix to be used | ||
69 | |||
70 | Note, if maxlen is less then the len(suffix), you get to keep both pieces. | ||
71 | |||
72 | """ | ||
73 | name = os.path.basename(kscfg) | ||
74 | idx = name.rfind('.') | ||
75 | if idx >= 0: | ||
76 | name = name[:idx] | ||
77 | |||
78 | if release is not None: | ||
79 | suffix = "" | ||
80 | if prefix is None: | ||
81 | prefix = "" | ||
82 | if suffix is None: | ||
83 | suffix = time.strftime("%Y%m%d%H%M") | ||
84 | |||
85 | if name.startswith(prefix): | ||
86 | name = name[len(prefix):] | ||
87 | |||
88 | prefix = "%s-" % prefix if prefix else "" | ||
89 | suffix = "-%s" % suffix if suffix else "" | ||
90 | |||
91 | ret = prefix + name + suffix | ||
92 | return ret | ||
93 | |||
94 | def get_distro(): | ||
95 | """Detect linux distribution, support "meego" | ||
96 | """ | ||
97 | |||
98 | support_dists = ('SuSE', | ||
99 | 'debian', | ||
100 | 'fedora', | ||
101 | 'redhat', | ||
102 | 'centos', | ||
103 | 'meego', | ||
104 | 'moblin', | ||
105 | 'tizen') | ||
106 | try: | ||
107 | (dist, ver, id) = platform.linux_distribution( \ | ||
108 | supported_dists = support_dists) | ||
109 | except: | ||
110 | (dist, ver, id) = platform.dist( \ | ||
111 | supported_dists = support_dists) | ||
112 | |||
113 | return (dist, ver, id) | ||
114 | |||
115 | def get_distro_str(): | ||
116 | """Get composited string for current linux distribution | ||
117 | """ | ||
118 | (dist, ver, id) = get_distro() | ||
119 | |||
120 | if not dist: | ||
121 | return 'Unknown Linux Distro' | ||
122 | else: | ||
123 | distro_str = ' '.join(map(str.strip, (dist, ver, id))) | ||
124 | return distro_str.strip() | ||
125 | |||
126 | _LOOP_RULE_PTH = None | ||
127 | |||
128 | def hide_loopdev_presentation(): | ||
129 | udev_rules = "80-prevent-loop-present.rules" | ||
130 | udev_rules_dir = [ | ||
131 | '/usr/lib/udev/rules.d/', | ||
132 | '/lib/udev/rules.d/', | ||
133 | '/etc/udev/rules.d/' | ||
134 | ] | ||
135 | |||
136 | global _LOOP_RULE_PTH | ||
137 | |||
138 | for rdir in udev_rules_dir: | ||
139 | if os.path.exists(rdir): | ||
140 | _LOOP_RULE_PTH = os.path.join(rdir, udev_rules) | ||
141 | |||
142 | if not _LOOP_RULE_PTH: | ||
143 | return | ||
144 | |||
145 | try: | ||
146 | with open(_LOOP_RULE_PTH, 'w') as wf: | ||
147 | wf.write('KERNEL=="loop*", ENV{UDISKS_PRESENTATION_HIDE}="1"') | ||
148 | |||
149 | runner.quiet('udevadm trigger') | ||
150 | except: | ||
151 | pass | ||
152 | |||
153 | def unhide_loopdev_presentation(): | ||
154 | global _LOOP_RULE_PTH | ||
155 | |||
156 | if not _LOOP_RULE_PTH: | ||
157 | return | ||
158 | |||
159 | try: | ||
160 | os.unlink(_LOOP_RULE_PTH) | ||
161 | runner.quiet('udevadm trigger') | ||
162 | except: | ||
163 | pass | ||
164 | |||
165 | def extract_rpm(rpmfile, targetdir): | ||
166 | rpm2cpio = find_binary_path("rpm2cpio") | ||
167 | cpio = find_binary_path("cpio") | ||
168 | |||
169 | olddir = os.getcwd() | ||
170 | os.chdir(targetdir) | ||
171 | |||
172 | msger.verbose("Extract rpm file with cpio: %s" % rpmfile) | ||
173 | p1 = subprocess.Popen([rpm2cpio, rpmfile], stdout=subprocess.PIPE) | ||
174 | p2 = subprocess.Popen([cpio, "-idv"], stdin=p1.stdout, | ||
175 | stdout=subprocess.PIPE, stderr=subprocess.PIPE) | ||
176 | (sout, serr) = p2.communicate() | ||
177 | msger.verbose(sout or serr) | ||
178 | |||
179 | os.chdir(olddir) | ||
180 | |||
181 | def compressing(fpath, method): | ||
182 | comp_map = { | ||
183 | "gz": "gzip", | ||
184 | "bz2": "bzip2" | ||
185 | } | ||
186 | if method not in comp_map: | ||
187 | raise CreatorError("Unsupport compress format: %s, valid values: %s" | ||
188 | % (method, ','.join(comp_map.keys()))) | ||
189 | cmd = find_binary_path(comp_map[method]) | ||
190 | rc = runner.show([cmd, "-f", fpath]) | ||
191 | if rc: | ||
192 | raise CreatorError("Failed to %s file: %s" % (comp_map[method], fpath)) | ||
193 | |||
194 | def taring(dstfile, target): | ||
195 | import tarfile | ||
196 | basen, ext = os.path.splitext(dstfile) | ||
197 | comp = {".tar": None, | ||
198 | ".gz": "gz", # for .tar.gz | ||
199 | ".bz2": "bz2", # for .tar.bz2 | ||
200 | ".tgz": "gz", | ||
201 | ".tbz": "bz2"}[ext] | ||
202 | |||
203 | # specify tarball file path | ||
204 | if not comp: | ||
205 | tarpath = dstfile | ||
206 | elif basen.endswith(".tar"): | ||
207 | tarpath = basen | ||
208 | else: | ||
209 | tarpath = basen + ".tar" | ||
210 | wf = tarfile.open(tarpath, 'w') | ||
211 | |||
212 | if os.path.isdir(target): | ||
213 | for item in os.listdir(target): | ||
214 | wf.add(os.path.join(target, item), item) | ||
215 | else: | ||
216 | wf.add(target, os.path.basename(target)) | ||
217 | wf.close() | ||
218 | |||
219 | if comp: | ||
220 | compressing(tarpath, comp) | ||
221 | # when dstfile ext is ".tgz" and ".tbz", should rename | ||
222 | if not basen.endswith(".tar"): | ||
223 | shutil.move("%s.%s" % (tarpath, comp), dstfile) | ||
224 | |||
225 | def ziping(dstfile, target): | ||
226 | import zipfile | ||
227 | wf = zipfile.ZipFile(dstfile, 'w', compression=zipfile.ZIP_DEFLATED) | ||
228 | if os.path.isdir(target): | ||
229 | for item in os.listdir(target): | ||
230 | fpath = os.path.join(target, item) | ||
231 | if not os.path.isfile(fpath): | ||
232 | continue | ||
233 | wf.write(fpath, item, zipfile.ZIP_DEFLATED) | ||
234 | else: | ||
235 | wf.write(target, os.path.basename(target), zipfile.ZIP_DEFLATED) | ||
236 | wf.close() | ||
237 | |||
238 | pack_formats = { | ||
239 | ".tar": taring, | ||
240 | ".tar.gz": taring, | ||
241 | ".tar.bz2": taring, | ||
242 | ".tgz": taring, | ||
243 | ".tbz": taring, | ||
244 | ".zip": ziping, | ||
245 | } | ||
246 | |||
247 | def packing(dstfile, target): | ||
248 | (base, ext) = os.path.splitext(dstfile) | ||
249 | if ext in (".gz", ".bz2") and base.endswith(".tar"): | ||
250 | ext = ".tar" + ext | ||
251 | if ext not in pack_formats: | ||
252 | raise CreatorError("Unsupport pack format: %s, valid values: %s" | ||
253 | % (ext, ','.join(pack_formats.keys()))) | ||
254 | func = pack_formats[ext] | ||
255 | # func should be callable | ||
256 | func(dstfile, target) | ||
257 | |||
258 | def human_size(size): | ||
259 | """Return human readable string for Bytes size | ||
260 | """ | ||
261 | |||
262 | if size <= 0: | ||
263 | return "0M" | ||
264 | import math | ||
265 | measure = ['B', 'K', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y'] | ||
266 | expo = int(math.log(size, 1024)) | ||
267 | mant = float(size/math.pow(1024, expo)) | ||
268 | return "{0:.1f}{1:s}".format(mant, measure[expo]) | ||
269 | |||
270 | def get_block_size(file_obj): | ||
271 | """ Returns block size for file object 'file_obj'. Errors are indicated by | ||
272 | the 'IOError' exception. """ | ||
273 | |||
274 | from fcntl import ioctl | ||
275 | import struct | ||
276 | |||
277 | # Get the block size of the host file-system for the image file by calling | ||
278 | # the FIGETBSZ ioctl (number 2). | ||
279 | binary_data = ioctl(file_obj, 2, struct.pack('I', 0)) | ||
280 | return struct.unpack('I', binary_data)[0] | ||
281 | |||
282 | def check_space_pre_cp(src, dst): | ||
283 | """Check whether disk space is enough before 'cp' like | ||
284 | operations, else exception will be raised. | ||
285 | """ | ||
286 | |||
287 | srcsize = get_file_size(src) * 1024 * 1024 | ||
288 | freesize = get_filesystem_avail(dst) | ||
289 | if srcsize > freesize: | ||
290 | raise CreatorError("space on %s(%s) is not enough for about %s files" | ||
291 | % (dst, human_size(freesize), human_size(srcsize))) | ||
292 | |||
293 | def calc_hashes(file_path, hash_names, start = 0, end = None): | ||
294 | """ Calculate hashes for a file. The 'file_path' argument is the file | ||
295 | to calculate hash functions for, 'start' and 'end' are the starting and | ||
296 | ending file offset to calculate the has functions for. The 'hash_names' | ||
297 | argument is a list of hash names to calculate. Returns the the list | ||
298 | of calculated hash values in the hexadecimal form in the same order | ||
299 | as 'hash_names'. | ||
300 | """ | ||
301 | if end == None: | ||
302 | end = os.path.getsize(file_path) | ||
303 | |||
304 | chunk_size = 65536 | ||
305 | to_read = end - start | ||
306 | read = 0 | ||
307 | |||
308 | hashes = [] | ||
309 | for hash_name in hash_names: | ||
310 | hashes.append(hashlib.new(hash_name)) | ||
311 | |||
312 | with open(file_path, "rb") as f: | ||
313 | f.seek(start) | ||
314 | |||
315 | while read < to_read: | ||
316 | if read + chunk_size > to_read: | ||
317 | chunk_size = to_read - read | ||
318 | chunk = f.read(chunk_size) | ||
319 | for hash_obj in hashes: | ||
320 | hash_obj.update(chunk) | ||
321 | read += chunk_size | ||
322 | |||
323 | result = [] | ||
324 | for hash_obj in hashes: | ||
325 | result.append(hash_obj.hexdigest()) | ||
326 | |||
327 | return result | ||
328 | |||
329 | def get_md5sum(fpath): | ||
330 | return calc_hashes(fpath, ('md5', ))[0] | ||
331 | |||
332 | |||
333 | def normalize_ksfile(ksconf, release, arch): | ||
334 | ''' | ||
335 | Return the name of a normalized ks file in which macro variables | ||
336 | @BUILD_ID@ and @ARCH@ are replace with real values. | ||
337 | |||
338 | The original ks file is returned if no special macro is used, otherwise | ||
339 | a temp file is created and returned, which will be deleted when program | ||
340 | exits normally. | ||
341 | ''' | ||
342 | |||
343 | if not release: | ||
344 | release = "latest" | ||
345 | if not arch or re.match(r'i.86', arch): | ||
346 | arch = "ia32" | ||
347 | |||
348 | with open(ksconf) as f: | ||
349 | ksc = f.read() | ||
350 | |||
351 | if "@ARCH@" not in ksc and "@BUILD_ID@" not in ksc: | ||
352 | return ksconf | ||
353 | |||
354 | msger.info("Substitute macro variable @BUILD_ID@/@ARCH@ in ks: %s" % ksconf) | ||
355 | ksc = ksc.replace("@ARCH@", arch) | ||
356 | ksc = ksc.replace("@BUILD_ID@", release) | ||
357 | |||
358 | fd, ksconf = tempfile.mkstemp(prefix=os.path.basename(ksconf)) | ||
359 | os.write(fd, ksc) | ||
360 | os.close(fd) | ||
361 | |||
362 | msger.debug('normalized ks file:%s' % ksconf) | ||
363 | |||
364 | def remove_temp_ks(): | ||
365 | try: | ||
366 | os.unlink(ksconf) | ||
367 | except OSError, err: | ||
368 | msger.warning('Failed to remove temp ks file:%s:%s' % (ksconf, err)) | ||
369 | |||
370 | import atexit | ||
371 | atexit.register(remove_temp_ks) | ||
372 | |||
373 | return ksconf | ||
374 | |||
375 | |||
376 | def _check_mic_chroot(rootdir): | ||
377 | def _path(path): | ||
378 | return rootdir.rstrip('/') + path | ||
379 | |||
380 | release_files = map(_path, [ "/etc/moblin-release", | ||
381 | "/etc/meego-release", | ||
382 | "/etc/tizen-release"]) | ||
383 | |||
384 | if not any(map(os.path.exists, release_files)): | ||
385 | msger.warning("Dir %s is not a MeeGo/Tizen chroot env" % rootdir) | ||
386 | |||
387 | if not glob.glob(rootdir + "/boot/vmlinuz-*"): | ||
388 | msger.warning("Failed to find kernel module under %s" % rootdir) | ||
389 | |||
390 | return | ||
391 | |||
392 | def selinux_check(arch, fstypes): | ||
393 | try: | ||
394 | getenforce = find_binary_path('getenforce') | ||
395 | except CreatorError: | ||
396 | return | ||
397 | |||
398 | selinux_status = runner.outs([getenforce]) | ||
399 | if arch and arch.startswith("arm") and selinux_status == "Enforcing": | ||
400 | raise CreatorError("Can't create arm image if selinux is enabled, " | ||
401 | "please run 'setenforce 0' to disable selinux") | ||
402 | |||
403 | use_btrfs = filter(lambda typ: typ == 'btrfs', fstypes) | ||
404 | if use_btrfs and selinux_status == "Enforcing": | ||
405 | raise CreatorError("Can't create btrfs image if selinux is enabled," | ||
406 | " please run 'setenforce 0' to disable selinux") | ||
407 | |||
408 | def get_image_type(path): | ||
409 | def _get_extension_name(path): | ||
410 | match = re.search("(?<=\.)\w+$", path) | ||
411 | if match: | ||
412 | return match.group(0) | ||
413 | else: | ||
414 | return None | ||
415 | |||
416 | if os.path.isdir(path): | ||
417 | _check_mic_chroot(path) | ||
418 | return "fs" | ||
419 | |||
420 | maptab = { | ||
421 | "tar": "loop", | ||
422 | "raw":"raw", | ||
423 | "vmdk":"vmdk", | ||
424 | "vdi":"vdi", | ||
425 | "iso":"livecd", | ||
426 | "usbimg":"liveusb", | ||
427 | } | ||
428 | |||
429 | extension = _get_extension_name(path) | ||
430 | if extension in maptab: | ||
431 | return maptab[extension] | ||
432 | |||
433 | fd = open(path, "rb") | ||
434 | file_header = fd.read(1024) | ||
435 | fd.close() | ||
436 | vdi_flag = "<<< Sun VirtualBox Disk Image >>>" | ||
437 | if file_header[0:len(vdi_flag)] == vdi_flag: | ||
438 | return maptab["vdi"] | ||
439 | |||
440 | output = runner.outs(['file', path]) | ||
441 | isoptn = re.compile(r".*ISO 9660 CD-ROM filesystem.*(bootable).*") | ||
442 | usbimgptn = re.compile(r".*x86 boot sector.*active.*") | ||
443 | rawptn = re.compile(r".*x86 boot sector.*") | ||
444 | vmdkptn = re.compile(r".*VMware. disk image.*") | ||
445 | ext3fsimgptn = re.compile(r".*Linux.*ext3 filesystem data.*") | ||
446 | ext4fsimgptn = re.compile(r".*Linux.*ext4 filesystem data.*") | ||
447 | btrfsimgptn = re.compile(r".*BTRFS.*") | ||
448 | if isoptn.match(output): | ||
449 | return maptab["iso"] | ||
450 | elif usbimgptn.match(output): | ||
451 | return maptab["usbimg"] | ||
452 | elif rawptn.match(output): | ||
453 | return maptab["raw"] | ||
454 | elif vmdkptn.match(output): | ||
455 | return maptab["vmdk"] | ||
456 | elif ext3fsimgptn.match(output): | ||
457 | return "ext3fsimg" | ||
458 | elif ext4fsimgptn.match(output): | ||
459 | return "ext4fsimg" | ||
460 | elif btrfsimgptn.match(output): | ||
461 | return "btrfsimg" | ||
462 | else: | ||
463 | raise CreatorError("Cannot detect the type of image: %s" % path) | ||
464 | |||
465 | |||
466 | def get_file_size(filename): | ||
467 | """ Return size in MB unit """ | ||
468 | cmd = ['du', "-s", "-b", "-B", "1M", filename] | ||
469 | rc, duOutput = runner.runtool(cmd) | ||
470 | if rc != 0: | ||
471 | raise CreatorError("Failed to run: %s" % ' '.join(cmd)) | ||
472 | size1 = int(duOutput.split()[0]) | ||
473 | |||
474 | cmd = ['du', "-s", "-B", "1M", filename] | ||
475 | rc, duOutput = runner.runtool(cmd) | ||
476 | if rc != 0: | ||
477 | raise CreatorError("Failed to run: %s" % ' '.join(cmd)) | ||
478 | |||
479 | size2 = int(duOutput.split()[0]) | ||
480 | return max(size1, size2) | ||
481 | |||
482 | |||
483 | def get_filesystem_avail(fs): | ||
484 | vfstat = os.statvfs(fs) | ||
485 | return vfstat.f_bavail * vfstat.f_bsize | ||
486 | |||
487 | def convert_image(srcimg, srcfmt, dstimg, dstfmt): | ||
488 | #convert disk format | ||
489 | if dstfmt != "raw": | ||
490 | raise CreatorError("Invalid destination image format: %s" % dstfmt) | ||
491 | msger.debug("converting %s image to %s" % (srcimg, dstimg)) | ||
492 | if srcfmt == "vmdk": | ||
493 | path = find_binary_path("qemu-img") | ||
494 | argv = [path, "convert", "-f", "vmdk", srcimg, "-O", dstfmt, dstimg] | ||
495 | elif srcfmt == "vdi": | ||
496 | path = find_binary_path("VBoxManage") | ||
497 | argv = [path, "internalcommands", "converttoraw", srcimg, dstimg] | ||
498 | else: | ||
499 | raise CreatorError("Invalid soure image format: %s" % srcfmt) | ||
500 | |||
501 | rc = runner.show(argv) | ||
502 | if rc == 0: | ||
503 | msger.debug("convert successful") | ||
504 | if rc != 0: | ||
505 | raise CreatorError("Unable to convert disk to %s" % dstfmt) | ||
506 | |||
507 | def uncompress_squashfs(squashfsimg, outdir): | ||
508 | """Uncompress file system from squshfs image""" | ||
509 | unsquashfs = find_binary_path("unsquashfs") | ||
510 | args = [ unsquashfs, "-d", outdir, squashfsimg ] | ||
511 | rc = runner.show(args) | ||
512 | if (rc != 0): | ||
513 | raise SquashfsError("Failed to uncompress %s." % squashfsimg) | ||
514 | |||
515 | def mkdtemp(dir = "/var/tmp", prefix = "mic-tmp-"): | ||
516 | """ FIXME: use the dir in mic.conf instead """ | ||
517 | |||
518 | makedirs(dir) | ||
519 | return tempfile.mkdtemp(dir = dir, prefix = prefix) | ||
520 | |||
521 | def get_repostrs_from_ks(ks): | ||
522 | def _get_temp_reponame(baseurl): | ||
523 | md5obj = hashlib.md5(baseurl) | ||
524 | tmpreponame = "%s" % md5obj.hexdigest() | ||
525 | return tmpreponame | ||
526 | |||
527 | kickstart_repos = [] | ||
528 | |||
529 | for repodata in ks.handler.repo.repoList: | ||
530 | repo = {} | ||
531 | for attr in ('name', | ||
532 | 'baseurl', | ||
533 | 'mirrorlist', | ||
534 | 'includepkgs', # val is list | ||
535 | 'excludepkgs', # val is list | ||
536 | 'cost', # int | ||
537 | 'priority',# int | ||
538 | 'save', | ||
539 | 'proxy', | ||
540 | 'proxyuser', | ||
541 | 'proxypasswd', | ||
542 | 'proxypasswd', | ||
543 | 'debuginfo', | ||
544 | 'source', | ||
545 | 'gpgkey', | ||
546 | 'ssl_verify'): | ||
547 | if hasattr(repodata, attr) and getattr(repodata, attr): | ||
548 | repo[attr] = getattr(repodata, attr) | ||
549 | |||
550 | if 'name' not in repo: | ||
551 | repo['name'] = _get_temp_reponame(repodata.baseurl) | ||
552 | |||
553 | kickstart_repos.append(repo) | ||
554 | |||
555 | return kickstart_repos | ||
556 | |||
557 | def _get_uncompressed_data_from_url(url, filename, proxies): | ||
558 | filename = myurlgrab(url, filename, proxies) | ||
559 | suffix = None | ||
560 | if filename.endswith(".gz"): | ||
561 | suffix = ".gz" | ||
562 | runner.quiet(['gunzip', "-f", filename]) | ||
563 | elif filename.endswith(".bz2"): | ||
564 | suffix = ".bz2" | ||
565 | runner.quiet(['bunzip2', "-f", filename]) | ||
566 | if suffix: | ||
567 | filename = filename.replace(suffix, "") | ||
568 | return filename | ||
569 | |||
570 | def _get_metadata_from_repo(baseurl, proxies, cachedir, reponame, filename, | ||
571 | sumtype=None, checksum=None): | ||
572 | url = os.path.join(baseurl, filename) | ||
573 | filename_tmp = str("%s/%s/%s" % (cachedir, reponame, os.path.basename(filename))) | ||
574 | if os.path.splitext(filename_tmp)[1] in (".gz", ".bz2"): | ||
575 | filename = os.path.splitext(filename_tmp)[0] | ||
576 | else: | ||
577 | filename = filename_tmp | ||
578 | if sumtype and checksum and os.path.exists(filename): | ||
579 | try: | ||
580 | sumcmd = find_binary_path("%ssum" % sumtype) | ||
581 | except: | ||
582 | file_checksum = None | ||
583 | else: | ||
584 | file_checksum = runner.outs([sumcmd, filename]).split()[0] | ||
585 | |||
586 | if file_checksum and file_checksum == checksum: | ||
587 | return filename | ||
588 | |||
589 | return _get_uncompressed_data_from_url(url,filename_tmp,proxies) | ||
590 | |||
591 | def get_metadata_from_repos(repos, cachedir): | ||
592 | my_repo_metadata = [] | ||
593 | for repo in repos: | ||
594 | reponame = repo['name'] | ||
595 | baseurl = repo['baseurl'] | ||
596 | |||
597 | |||
598 | if 'proxy' in repo: | ||
599 | proxy = repo['proxy'] | ||
600 | else: | ||
601 | proxy = get_proxy_for(baseurl) | ||
602 | |||
603 | proxies = None | ||
604 | if proxy: | ||
605 | proxies = {str(baseurl.split(":")[0]):str(proxy)} | ||
606 | |||
607 | makedirs(os.path.join(cachedir, reponame)) | ||
608 | url = os.path.join(baseurl, "repodata/repomd.xml") | ||
609 | filename = os.path.join(cachedir, reponame, 'repomd.xml') | ||
610 | repomd = myurlgrab(url, filename, proxies) | ||
611 | try: | ||
612 | root = xmlparse(repomd) | ||
613 | except SyntaxError: | ||
614 | raise CreatorError("repomd.xml syntax error.") | ||
615 | |||
616 | ns = root.getroot().tag | ||
617 | ns = ns[0:ns.rindex("}")+1] | ||
618 | |||
619 | filepaths = {} | ||
620 | checksums = {} | ||
621 | sumtypes = {} | ||
622 | |||
623 | for elm in root.getiterator("%sdata" % ns): | ||
624 | if elm.attrib["type"] == "patterns": | ||
625 | filepaths['patterns'] = elm.find("%slocation" % ns).attrib['href'] | ||
626 | checksums['patterns'] = elm.find("%sopen-checksum" % ns).text | ||
627 | sumtypes['patterns'] = elm.find("%sopen-checksum" % ns).attrib['type'] | ||
628 | break | ||
629 | |||
630 | for elm in root.getiterator("%sdata" % ns): | ||
631 | if elm.attrib["type"] in ("group_gz", "group"): | ||
632 | filepaths['comps'] = elm.find("%slocation" % ns).attrib['href'] | ||
633 | checksums['comps'] = elm.find("%sopen-checksum" % ns).text | ||
634 | sumtypes['comps'] = elm.find("%sopen-checksum" % ns).attrib['type'] | ||
635 | break | ||
636 | |||
637 | primary_type = None | ||
638 | for elm in root.getiterator("%sdata" % ns): | ||
639 | if elm.attrib["type"] in ("primary_db", "primary"): | ||
640 | primary_type = elm.attrib["type"] | ||
641 | filepaths['primary'] = elm.find("%slocation" % ns).attrib['href'] | ||
642 | checksums['primary'] = elm.find("%sopen-checksum" % ns).text | ||
643 | sumtypes['primary'] = elm.find("%sopen-checksum" % ns).attrib['type'] | ||
644 | break | ||
645 | |||
646 | if not primary_type: | ||
647 | continue | ||
648 | |||
649 | for item in ("primary", "patterns", "comps"): | ||
650 | if item not in filepaths: | ||
651 | filepaths[item] = None | ||
652 | continue | ||
653 | if not filepaths[item]: | ||
654 | continue | ||
655 | filepaths[item] = _get_metadata_from_repo(baseurl, | ||
656 | proxies, | ||
657 | cachedir, | ||
658 | reponame, | ||
659 | filepaths[item], | ||
660 | sumtypes[item], | ||
661 | checksums[item]) | ||
662 | |||
663 | """ Get repo key """ | ||
664 | try: | ||
665 | repokey = _get_metadata_from_repo(baseurl, | ||
666 | proxies, | ||
667 | cachedir, | ||
668 | reponame, | ||
669 | "repodata/repomd.xml.key") | ||
670 | except CreatorError: | ||
671 | repokey = None | ||
672 | msger.debug("\ncan't get %s/%s" % (baseurl, "repodata/repomd.xml.key")) | ||
673 | |||
674 | my_repo_metadata.append({"name":reponame, | ||
675 | "baseurl":baseurl, | ||
676 | "repomd":repomd, | ||
677 | "primary":filepaths['primary'], | ||
678 | "cachedir":cachedir, | ||
679 | "proxies":proxies, | ||
680 | "patterns":filepaths['patterns'], | ||
681 | "comps":filepaths['comps'], | ||
682 | "repokey":repokey}) | ||
683 | |||
684 | return my_repo_metadata | ||
685 | |||
686 | def get_rpmver_in_repo(repometadata): | ||
687 | for repo in repometadata: | ||
688 | if repo["primary"].endswith(".xml"): | ||
689 | root = xmlparse(repo["primary"]) | ||
690 | ns = root.getroot().tag | ||
691 | ns = ns[0:ns.rindex("}")+1] | ||
692 | |||
693 | versionlist = [] | ||
694 | for elm in root.getiterator("%spackage" % ns): | ||
695 | if elm.find("%sname" % ns).text == 'rpm': | ||
696 | for node in elm.getchildren(): | ||
697 | if node.tag == "%sversion" % ns: | ||
698 | versionlist.append(node.attrib['ver']) | ||
699 | |||
700 | if versionlist: | ||
701 | return reversed( | ||
702 | sorted( | ||
703 | versionlist, | ||
704 | key = lambda ver: map(int, ver.split('.')))).next() | ||
705 | |||
706 | elif repo["primary"].endswith(".sqlite"): | ||
707 | con = sqlite.connect(repo["primary"]) | ||
708 | for row in con.execute("select version from packages where " | ||
709 | "name=\"rpm\" ORDER by version DESC"): | ||
710 | con.close() | ||
711 | return row[0] | ||
712 | |||
713 | return None | ||
714 | |||
715 | def get_arch(repometadata): | ||
716 | archlist = [] | ||
717 | for repo in repometadata: | ||
718 | if repo["primary"].endswith(".xml"): | ||
719 | root = xmlparse(repo["primary"]) | ||
720 | ns = root.getroot().tag | ||
721 | ns = ns[0:ns.rindex("}")+1] | ||
722 | for elm in root.getiterator("%spackage" % ns): | ||
723 | if elm.find("%sarch" % ns).text not in ("noarch", "src"): | ||
724 | arch = elm.find("%sarch" % ns).text | ||
725 | if arch not in archlist: | ||
726 | archlist.append(arch) | ||
727 | elif repo["primary"].endswith(".sqlite"): | ||
728 | con = sqlite.connect(repo["primary"]) | ||
729 | for row in con.execute("select arch from packages where arch not in (\"src\", \"noarch\")"): | ||
730 | if row[0] not in archlist: | ||
731 | archlist.append(row[0]) | ||
732 | |||
733 | con.close() | ||
734 | |||
735 | uniq_arch = [] | ||
736 | for i in range(len(archlist)): | ||
737 | if archlist[i] not in rpmmisc.archPolicies.keys(): | ||
738 | continue | ||
739 | need_append = True | ||
740 | j = 0 | ||
741 | while j < len(uniq_arch): | ||
742 | if archlist[i] in rpmmisc.archPolicies[uniq_arch[j]].split(':'): | ||
743 | need_append = False | ||
744 | break | ||
745 | if uniq_arch[j] in rpmmisc.archPolicies[archlist[i]].split(':'): | ||
746 | if need_append: | ||
747 | uniq_arch[j] = archlist[i] | ||
748 | need_append = False | ||
749 | else: | ||
750 | uniq_arch.remove(uniq_arch[j]) | ||
751 | continue | ||
752 | j += 1 | ||
753 | if need_append: | ||
754 | uniq_arch.append(archlist[i]) | ||
755 | |||
756 | return uniq_arch, archlist | ||
757 | |||
758 | def get_package(pkg, repometadata, arch = None): | ||
759 | ver = "" | ||
760 | target_repo = None | ||
761 | if not arch: | ||
762 | arches = [] | ||
763 | elif arch not in rpmmisc.archPolicies: | ||
764 | arches = [arch] | ||
765 | else: | ||
766 | arches = rpmmisc.archPolicies[arch].split(':') | ||
767 | arches.append('noarch') | ||
768 | |||
769 | for repo in repometadata: | ||
770 | if repo["primary"].endswith(".xml"): | ||
771 | root = xmlparse(repo["primary"]) | ||
772 | ns = root.getroot().tag | ||
773 | ns = ns[0:ns.rindex("}")+1] | ||
774 | for elm in root.getiterator("%spackage" % ns): | ||
775 | if elm.find("%sname" % ns).text == pkg: | ||
776 | if elm.find("%sarch" % ns).text in arches: | ||
777 | version = elm.find("%sversion" % ns) | ||
778 | tmpver = "%s-%s" % (version.attrib['ver'], version.attrib['rel']) | ||
779 | if tmpver > ver: | ||
780 | ver = tmpver | ||
781 | location = elm.find("%slocation" % ns) | ||
782 | pkgpath = "%s" % location.attrib['href'] | ||
783 | target_repo = repo | ||
784 | break | ||
785 | if repo["primary"].endswith(".sqlite"): | ||
786 | con = sqlite.connect(repo["primary"]) | ||
787 | if arch: | ||
788 | sql = 'select version, release, location_href from packages ' \ | ||
789 | 'where name = "%s" and arch IN ("%s")' % \ | ||
790 | (pkg, '","'.join(arches)) | ||
791 | for row in con.execute(sql): | ||
792 | tmpver = "%s-%s" % (row[0], row[1]) | ||
793 | if tmpver > ver: | ||
794 | ver = tmpver | ||
795 | pkgpath = "%s" % row[2] | ||
796 | target_repo = repo | ||
797 | break | ||
798 | else: | ||
799 | sql = 'select version, release, location_href from packages ' \ | ||
800 | 'where name = "%s"' % pkg | ||
801 | for row in con.execute(sql): | ||
802 | tmpver = "%s-%s" % (row[0], row[1]) | ||
803 | if tmpver > ver: | ||
804 | ver = tmpver | ||
805 | pkgpath = "%s" % row[2] | ||
806 | target_repo = repo | ||
807 | break | ||
808 | con.close() | ||
809 | if target_repo: | ||
810 | makedirs("%s/packages/%s" % (target_repo["cachedir"], target_repo["name"])) | ||
811 | url = os.path.join(target_repo["baseurl"], pkgpath) | ||
812 | filename = str("%s/packages/%s/%s" % (target_repo["cachedir"], target_repo["name"], os.path.basename(pkgpath))) | ||
813 | if os.path.exists(filename): | ||
814 | ret = rpmmisc.checkRpmIntegrity('rpm', filename) | ||
815 | if ret == 0: | ||
816 | return filename | ||
817 | |||
818 | msger.warning("package %s is damaged: %s" % | ||
819 | (os.path.basename(filename), filename)) | ||
820 | os.unlink(filename) | ||
821 | |||
822 | pkg = myurlgrab(str(url), filename, target_repo["proxies"]) | ||
823 | return pkg | ||
824 | else: | ||
825 | return None | ||
826 | |||
827 | def get_source_name(pkg, repometadata): | ||
828 | |||
829 | def get_bin_name(pkg): | ||
830 | m = RPM_RE.match(pkg) | ||
831 | if m: | ||
832 | return m.group(1) | ||
833 | return None | ||
834 | |||
835 | def get_src_name(srpm): | ||
836 | m = SRPM_RE.match(srpm) | ||
837 | if m: | ||
838 | return m.group(1) | ||
839 | return None | ||
840 | |||
841 | ver = "" | ||
842 | target_repo = None | ||
843 | |||
844 | pkg_name = get_bin_name(pkg) | ||
845 | if not pkg_name: | ||
846 | return None | ||
847 | |||
848 | for repo in repometadata: | ||
849 | if repo["primary"].endswith(".xml"): | ||
850 | root = xmlparse(repo["primary"]) | ||
851 | ns = root.getroot().tag | ||
852 | ns = ns[0:ns.rindex("}")+1] | ||
853 | for elm in root.getiterator("%spackage" % ns): | ||
854 | if elm.find("%sname" % ns).text == pkg_name: | ||
855 | if elm.find("%sarch" % ns).text != "src": | ||
856 | version = elm.find("%sversion" % ns) | ||
857 | tmpver = "%s-%s" % (version.attrib['ver'], version.attrib['rel']) | ||
858 | if tmpver > ver: | ||
859 | ver = tmpver | ||
860 | fmt = elm.find("%sformat" % ns) | ||
861 | if fmt: | ||
862 | fns = fmt.getchildren()[0].tag | ||
863 | fns = fns[0:fns.rindex("}")+1] | ||
864 | pkgpath = fmt.find("%ssourcerpm" % fns).text | ||
865 | target_repo = repo | ||
866 | break | ||
867 | |||
868 | if repo["primary"].endswith(".sqlite"): | ||
869 | con = sqlite.connect(repo["primary"]) | ||
870 | for row in con.execute("select version, release, rpm_sourcerpm from packages where name = \"%s\" and arch != \"src\"" % pkg_name): | ||
871 | tmpver = "%s-%s" % (row[0], row[1]) | ||
872 | if tmpver > ver: | ||
873 | pkgpath = "%s" % row[2] | ||
874 | target_repo = repo | ||
875 | break | ||
876 | con.close() | ||
877 | if target_repo: | ||
878 | return get_src_name(pkgpath) | ||
879 | else: | ||
880 | return None | ||
881 | |||
882 | def get_pkglist_in_patterns(group, patterns): | ||
883 | found = False | ||
884 | pkglist = [] | ||
885 | try: | ||
886 | root = xmlparse(patterns) | ||
887 | except SyntaxError: | ||
888 | raise SyntaxError("%s syntax error." % patterns) | ||
889 | |||
890 | for elm in list(root.getroot()): | ||
891 | ns = elm.tag | ||
892 | ns = ns[0:ns.rindex("}")+1] | ||
893 | name = elm.find("%sname" % ns) | ||
894 | summary = elm.find("%ssummary" % ns) | ||
895 | if name.text == group or summary.text == group: | ||
896 | found = True | ||
897 | break | ||
898 | |||
899 | if not found: | ||
900 | return pkglist | ||
901 | |||
902 | found = False | ||
903 | for requires in list(elm): | ||
904 | if requires.tag.endswith("requires"): | ||
905 | found = True | ||
906 | break | ||
907 | |||
908 | if not found: | ||
909 | return pkglist | ||
910 | |||
911 | for pkg in list(requires): | ||
912 | pkgname = pkg.attrib["name"] | ||
913 | if pkgname not in pkglist: | ||
914 | pkglist.append(pkgname) | ||
915 | |||
916 | return pkglist | ||
917 | |||
918 | def get_pkglist_in_comps(group, comps): | ||
919 | found = False | ||
920 | pkglist = [] | ||
921 | try: | ||
922 | root = xmlparse(comps) | ||
923 | except SyntaxError: | ||
924 | raise SyntaxError("%s syntax error." % comps) | ||
925 | |||
926 | for elm in root.getiterator("group"): | ||
927 | id = elm.find("id") | ||
928 | name = elm.find("name") | ||
929 | if id.text == group or name.text == group: | ||
930 | packagelist = elm.find("packagelist") | ||
931 | found = True | ||
932 | break | ||
933 | |||
934 | if not found: | ||
935 | return pkglist | ||
936 | |||
937 | for require in elm.getiterator("packagereq"): | ||
938 | if require.tag.endswith("packagereq"): | ||
939 | pkgname = require.text | ||
940 | if pkgname not in pkglist: | ||
941 | pkglist.append(pkgname) | ||
942 | |||
943 | return pkglist | ||
944 | |||
945 | def is_statically_linked(binary): | ||
946 | return ", statically linked, " in runner.outs(['file', binary]) | ||
947 | |||
948 | def setup_qemu_emulator(rootdir, arch): | ||
949 | # mount binfmt_misc if it doesn't exist | ||
950 | if not os.path.exists("/proc/sys/fs/binfmt_misc"): | ||
951 | modprobecmd = find_binary_path("modprobe") | ||
952 | runner.show([modprobecmd, "binfmt_misc"]) | ||
953 | if not os.path.exists("/proc/sys/fs/binfmt_misc/register"): | ||
954 | mountcmd = find_binary_path("mount") | ||
955 | runner.show([mountcmd, "-t", "binfmt_misc", "none", "/proc/sys/fs/binfmt_misc"]) | ||
956 | |||
957 | # qemu_emulator is a special case, we can't use find_binary_path | ||
958 | # qemu emulator should be a statically-linked executable file | ||
959 | qemu_emulator = "/usr/bin/qemu-arm" | ||
960 | if not os.path.exists(qemu_emulator) or not is_statically_linked(qemu_emulator): | ||
961 | qemu_emulator = "/usr/bin/qemu-arm-static" | ||
962 | if not os.path.exists(qemu_emulator): | ||
963 | raise CreatorError("Please install a statically-linked qemu-arm") | ||
964 | |||
965 | # qemu emulator version check | ||
966 | armv7_list = [arch for arch in rpmmisc.archPolicies.keys() if arch.startswith('armv7')] | ||
967 | if arch in armv7_list: # need qemu (>=0.13.0) | ||
968 | qemuout = runner.outs([qemu_emulator, "-h"]) | ||
969 | m = re.search("version\s*([.\d]+)", qemuout) | ||
970 | if m: | ||
971 | qemu_version = m.group(1) | ||
972 | if qemu_version < "0.13": | ||
973 | raise CreatorError("Requires %s version >=0.13 for %s" % (qemu_emulator, arch)) | ||
974 | else: | ||
975 | msger.warning("Can't get version info of %s, please make sure it's higher than 0.13.0" % qemu_emulator) | ||
976 | |||
977 | if not os.path.exists(rootdir + "/usr/bin"): | ||
978 | makedirs(rootdir + "/usr/bin") | ||
979 | shutil.copy(qemu_emulator, rootdir + "/usr/bin/qemu-arm-static") | ||
980 | qemu_emulator = "/usr/bin/qemu-arm-static" | ||
981 | |||
982 | # disable selinux, selinux will block qemu emulator to run | ||
983 | if os.path.exists("/usr/sbin/setenforce"): | ||
984 | msger.info('Try to disable selinux') | ||
985 | runner.show(["/usr/sbin/setenforce", "0"]) | ||
986 | |||
987 | # unregister it if it has been registered and is a dynamically-linked executable | ||
988 | node = "/proc/sys/fs/binfmt_misc/arm" | ||
989 | if os.path.exists(node): | ||
990 | qemu_unregister_string = "-1\n" | ||
991 | fd = open("/proc/sys/fs/binfmt_misc/arm", "w") | ||
992 | fd.write(qemu_unregister_string) | ||
993 | fd.close() | ||
994 | |||
995 | # register qemu emulator for interpreting other arch executable file | ||
996 | if not os.path.exists(node): | ||
997 | qemu_arm_string = ":arm:M::\\x7fELF\\x01\\x01\\x01\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x02\\x00\\x28\\x00:\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\x00\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\xfa\\xff\\xff\\xff:%s:\n" % qemu_emulator | ||
998 | fd = open("/proc/sys/fs/binfmt_misc/register", "w") | ||
999 | fd.write(qemu_arm_string) | ||
1000 | fd.close() | ||
1001 | |||
1002 | return qemu_emulator | ||
1003 | |||
1004 | def SrcpkgsDownload(pkgs, repometadata, instroot, cachedir): | ||
1005 | def get_source_repometadata(repometadata): | ||
1006 | src_repometadata=[] | ||
1007 | for repo in repometadata: | ||
1008 | if repo["name"].endswith("-source"): | ||
1009 | src_repometadata.append(repo) | ||
1010 | if src_repometadata: | ||
1011 | return src_repometadata | ||
1012 | return None | ||
1013 | |||
1014 | def get_src_name(srpm): | ||
1015 | m = SRPM_RE.match(srpm) | ||
1016 | if m: | ||
1017 | return m.group(1) | ||
1018 | return None | ||
1019 | |||
1020 | src_repometadata = get_source_repometadata(repometadata) | ||
1021 | |||
1022 | if not src_repometadata: | ||
1023 | msger.warning("No source repo found") | ||
1024 | return None | ||
1025 | |||
1026 | src_pkgs = [] | ||
1027 | lpkgs_dict = {} | ||
1028 | lpkgs_path = [] | ||
1029 | for repo in src_repometadata: | ||
1030 | cachepath = "%s/%s/packages/*.src.rpm" %(cachedir, repo["name"]) | ||
1031 | lpkgs_path += glob.glob(cachepath) | ||
1032 | |||
1033 | for lpkg in lpkgs_path: | ||
1034 | lpkg_name = get_src_name(os.path.basename(lpkg)) | ||
1035 | lpkgs_dict[lpkg_name] = lpkg | ||
1036 | localpkgs = lpkgs_dict.keys() | ||
1037 | |||
1038 | cached_count = 0 | ||
1039 | destdir = instroot+'/usr/src/SRPMS' | ||
1040 | if not os.path.exists(destdir): | ||
1041 | os.makedirs(destdir) | ||
1042 | |||
1043 | srcpkgset = set() | ||
1044 | for _pkg in pkgs: | ||
1045 | srcpkg_name = get_source_name(_pkg, repometadata) | ||
1046 | if not srcpkg_name: | ||
1047 | continue | ||
1048 | srcpkgset.add(srcpkg_name) | ||
1049 | |||
1050 | for pkg in list(srcpkgset): | ||
1051 | if pkg in localpkgs: | ||
1052 | cached_count += 1 | ||
1053 | shutil.copy(lpkgs_dict[pkg], destdir) | ||
1054 | src_pkgs.append(os.path.basename(lpkgs_dict[pkg])) | ||
1055 | else: | ||
1056 | src_pkg = get_package(pkg, src_repometadata, 'src') | ||
1057 | if src_pkg: | ||
1058 | shutil.copy(src_pkg, destdir) | ||
1059 | src_pkgs.append(src_pkg) | ||
1060 | msger.info("%d source packages gotten from cache" % cached_count) | ||
1061 | |||
1062 | return src_pkgs | ||
1063 | |||
1064 | def strip_end(text, suffix): | ||
1065 | if not text.endswith(suffix): | ||
1066 | return text | ||
1067 | return text[:-len(suffix)] | ||