diff options
author | Tudor Florea <tudor.florea@enea.com> | 2015-10-09 22:59:03 +0200 |
---|---|---|
committer | Tudor Florea <tudor.florea@enea.com> | 2015-10-09 22:59:03 +0200 |
commit | 972dcfcdbfe75dcfeb777150c136576cf1a71e99 (patch) | |
tree | 97a61cd7e293d7ae9d56ef7ed0f81253365bb026 /scripts/lib/wic/utils/oe/package_manager.py | |
download | poky-972dcfcdbfe75dcfeb777150c136576cf1a71e99.tar.gz |
initial commit for Enea Linux 5.0 arm
Signed-off-by: Tudor Florea <tudor.florea@enea.com>
Diffstat (limited to 'scripts/lib/wic/utils/oe/package_manager.py')
-rw-r--r-- | scripts/lib/wic/utils/oe/package_manager.py | 810 |
1 files changed, 810 insertions, 0 deletions
diff --git a/scripts/lib/wic/utils/oe/package_manager.py b/scripts/lib/wic/utils/oe/package_manager.py new file mode 100644 index 0000000000..92ce98e2ce --- /dev/null +++ b/scripts/lib/wic/utils/oe/package_manager.py | |||
@@ -0,0 +1,810 @@ | |||
1 | # ex:ts=4:sw=4:sts=4:et | ||
2 | # -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- | ||
3 | # | ||
4 | # Copyright (c) 2014, Enea AB. | ||
5 | # All rights reserved. | ||
6 | # | ||
7 | # This program is free software; you can redistribute it and/or modify | ||
8 | # it under the terms of the GNU General Public License version 2 as | ||
9 | # published by the Free Software Foundation. | ||
10 | # | ||
11 | # This program is distributed in the hope that it will be useful, | ||
12 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | # GNU General Public License for more details. | ||
15 | # | ||
16 | # You should have received a copy of the GNU General Public License along | ||
17 | # with this program; if not, write to the Free Software Foundation, Inc., | ||
18 | # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. | ||
19 | # | ||
20 | # DESCRIPTION | ||
21 | # This implements the opkg package manager wrapper as a combination of | ||
22 | # meta/lib/oe/package_manager.py and bitbake/lib/bb/utils.py files and | ||
23 | # adaptation of those files to 'wic'. | ||
24 | # | ||
25 | # AUTHORS | ||
26 | # Adrian Calianu <adrian.calianu (at] enea.com> | ||
27 | # | ||
28 | # This file incorporates work covered by the following copyright and | ||
29 | # permission notice: | ||
30 | # | ||
31 | # meta/COPYING.GPLv2 (GPLv2) | ||
32 | # meta/COPYING.MIT (MIT) | ||
33 | # | ||
34 | # Copyright (C) 2004 Michael Lauer | ||
35 | # | ||
36 | # Permission to use, copy, modify, and/or distribute this software | ||
37 | # for any purpose with or without fee is hereby granted, provided | ||
38 | # that the above copyright notice and this permission notice appear | ||
39 | # in all copies. | ||
40 | # | ||
41 | # THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL | ||
42 | # WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED | ||
43 | # WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE | ||
44 | # AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR | ||
45 | # CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS | ||
46 | # OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, | ||
47 | # NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN | ||
48 | # CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. | ||
49 | |||
50 | |||
51 | from abc import ABCMeta, abstractmethod | ||
52 | import os | ||
53 | import glob | ||
54 | import subprocess | ||
55 | import shutil | ||
56 | import multiprocessing | ||
57 | import re | ||
58 | import errno | ||
59 | import fcntl | ||
60 | |||
61 | from mic.utils.oe.misc import * | ||
62 | from mic import msger | ||
63 | |||
64 | def mkdirhier(directory): | ||
65 | """Create a directory like 'mkdir -p', but does not complain if | ||
66 | directory already exists like os.makedirs | ||
67 | """ | ||
68 | |||
69 | try: | ||
70 | os.makedirs(directory) | ||
71 | except OSError as e: | ||
72 | if e.errno != errno.EEXIST: | ||
73 | raise e | ||
74 | |||
75 | def remove(path, recurse=False): | ||
76 | """Equivalent to rm -f or rm -rf""" | ||
77 | if not path: | ||
78 | return | ||
79 | if recurse: | ||
80 | # shutil.rmtree(name) would be ideal but its too slow | ||
81 | subprocess.call(['rm', '-rf'] + glob.glob(path)) | ||
82 | return | ||
83 | for name in glob.glob(path): | ||
84 | try: | ||
85 | os.unlink(name) | ||
86 | except OSError as exc: | ||
87 | if exc.errno != errno.ENOENT: | ||
88 | raise | ||
89 | |||
90 | def lockfile(name, shared=False, retry=True): | ||
91 | """ | ||
92 | Use the file fn as a lock file, return when the lock has been acquired. | ||
93 | Returns a variable to pass to unlockfile(). | ||
94 | """ | ||
95 | dirname = os.path.dirname(name) | ||
96 | mkdirhier(dirname) | ||
97 | |||
98 | if not os.access(dirname, os.W_OK): | ||
99 | logger.error("Unable to acquire lock '%s', directory is not writable", | ||
100 | name) | ||
101 | sys.exit(1) | ||
102 | |||
103 | op = fcntl.LOCK_EX | ||
104 | if shared: | ||
105 | op = fcntl.LOCK_SH | ||
106 | if not retry: | ||
107 | op = op | fcntl.LOCK_NB | ||
108 | |||
109 | while True: | ||
110 | # If we leave the lockfiles lying around there is no problem | ||
111 | # but we should clean up after ourselves. This gives potential | ||
112 | # for races though. To work around this, when we acquire the lock | ||
113 | # we check the file we locked was still the lock file on disk. | ||
114 | # by comparing inode numbers. If they don't match or the lockfile | ||
115 | # no longer exists, we start again. | ||
116 | |||
117 | # This implementation is unfair since the last person to request the | ||
118 | # lock is the most likely to win it. | ||
119 | |||
120 | try: | ||
121 | lf = open(name, 'a+') | ||
122 | fileno = lf.fileno() | ||
123 | fcntl.flock(fileno, op) | ||
124 | statinfo = os.fstat(fileno) | ||
125 | if os.path.exists(lf.name): | ||
126 | statinfo2 = os.stat(lf.name) | ||
127 | if statinfo.st_ino == statinfo2.st_ino: | ||
128 | return lf | ||
129 | lf.close() | ||
130 | except Exception: | ||
131 | try: | ||
132 | lf.close() | ||
133 | except Exception: | ||
134 | pass | ||
135 | pass | ||
136 | if not retry: | ||
137 | return None | ||
138 | |||
139 | def unlockfile(lf): | ||
140 | """ | ||
141 | Unlock a file locked using lockfile() | ||
142 | """ | ||
143 | try: | ||
144 | # If we had a shared lock, we need to promote to exclusive before | ||
145 | # removing the lockfile. Attempt this, ignore failures. | ||
146 | fcntl.flock(lf.fileno(), fcntl.LOCK_EX|fcntl.LOCK_NB) | ||
147 | os.unlink(lf.name) | ||
148 | except (IOError, OSError): | ||
149 | pass | ||
150 | fcntl.flock(lf.fileno(), fcntl.LOCK_UN) | ||
151 | lf.close() | ||
152 | |||
153 | def which(path, item, direction = 0, history = False): | ||
154 | """ | ||
155 | Locate a file in a PATH | ||
156 | """ | ||
157 | |||
158 | hist = [] | ||
159 | paths = (path or "").split(':') | ||
160 | if direction != 0: | ||
161 | paths.reverse() | ||
162 | |||
163 | for p in paths: | ||
164 | next = os.path.join(p, item) | ||
165 | hist.append(next) | ||
166 | if os.path.exists(next): | ||
167 | if not os.path.isabs(next): | ||
168 | next = os.path.abspath(next) | ||
169 | if history: | ||
170 | return next, hist | ||
171 | return next | ||
172 | |||
173 | if history: | ||
174 | return "", hist | ||
175 | return "" | ||
176 | |||
177 | |||
178 | |||
179 | # this can be used by all PM backends to create the index files in parallel | ||
180 | def wic_create_index(arg): | ||
181 | index_cmd = arg | ||
182 | |||
183 | try: | ||
184 | msger.info("Executing '%s' ..." % index_cmd) | ||
185 | subprocess.check_output(index_cmd, stderr=subprocess.STDOUT, shell=True) | ||
186 | except subprocess.CalledProcessError as e: | ||
187 | return("Index creation command '%s' failed with return code %d:\n%s" % | ||
188 | (e.cmd, e.returncode, e.output)) | ||
189 | |||
190 | return None | ||
191 | |||
192 | |||
193 | class WicIndexer(object): | ||
194 | __metaclass__ = ABCMeta | ||
195 | |||
196 | def __init__(self, d, deploy_dir): | ||
197 | self.d = d | ||
198 | self.deploy_dir = deploy_dir | ||
199 | |||
200 | @abstractmethod | ||
201 | def write_index(self): | ||
202 | pass | ||
203 | |||
204 | class WicOpkgIndexer(WicIndexer): | ||
205 | def write_index(self): | ||
206 | arch_vars = ["ALL_MULTILIB_PACKAGE_ARCHS", | ||
207 | "SDK_PACKAGE_ARCHS", | ||
208 | "MULTILIB_ARCHS"] | ||
209 | |||
210 | opkg_index_cmd = which(os.getenv('PATH'), "opkg-make-index") | ||
211 | |||
212 | if not os.path.exists(os.path.join(self.deploy_dir, "Packages")): | ||
213 | open(os.path.join(self.deploy_dir, "Packages"), "w").close() | ||
214 | |||
215 | index_cmds = [] | ||
216 | for arch_var in arch_vars: | ||
217 | if self.d.has_key(arch_var): | ||
218 | archs = self.d[arch_var] | ||
219 | else: | ||
220 | archs = None | ||
221 | |||
222 | if archs is None: | ||
223 | continue | ||
224 | |||
225 | for arch in archs.split(): | ||
226 | pkgs_dir = os.path.join(self.deploy_dir, arch) | ||
227 | pkgs_file = os.path.join(pkgs_dir, "Packages") | ||
228 | |||
229 | if not os.path.isdir(pkgs_dir): | ||
230 | continue | ||
231 | |||
232 | if not os.path.exists(pkgs_file): | ||
233 | open(pkgs_file, "w").close() | ||
234 | |||
235 | index_cmds.append('%s -r %s -p %s -m %s' % | ||
236 | (opkg_index_cmd, pkgs_file, pkgs_file, pkgs_dir)) | ||
237 | |||
238 | if len(index_cmds) == 0: | ||
239 | msger.info("There are no packages in %s!" % self.deploy_dir) | ||
240 | return | ||
241 | |||
242 | nproc = multiprocessing.cpu_count() | ||
243 | pool = multiprocessing.Pool(nproc) | ||
244 | results = list(pool.imap(wic_create_index, index_cmds)) | ||
245 | pool.close() | ||
246 | pool.join() | ||
247 | |||
248 | for result in results: | ||
249 | if result is not None: | ||
250 | return(result) | ||
251 | |||
252 | class WicPkgsList(object): | ||
253 | __metaclass__ = ABCMeta | ||
254 | |||
255 | def __init__(self, d, rootfs_dir): | ||
256 | self.d = d | ||
257 | self.rootfs_dir = rootfs_dir | ||
258 | |||
259 | @abstractmethod | ||
260 | def list(self, format=None): | ||
261 | pass | ||
262 | |||
263 | |||
264 | class WicOpkgPkgsList(WicPkgsList): | ||
265 | def __init__(self, d, rootfs_dir, config_file): | ||
266 | super(WicOpkgPkgsList, self).__init__(d, rootfs_dir) | ||
267 | |||
268 | self.opkg_cmd = which(os.getenv('PATH'), "opkg-cl") | ||
269 | self.opkg_args = "-f %s -o %s " % (config_file, rootfs_dir) | ||
270 | if self.d.has_key("OPKG_ARGS"): | ||
271 | self.opkg_args += self.d["OPKG_ARGS"] | ||
272 | |||
273 | def list(self, format=None): | ||
274 | opkg_query_cmd = which(os.getenv('PATH'), "opkg-query-helper.py") | ||
275 | |||
276 | if format == "arch": | ||
277 | cmd = "%s %s status | %s -a" % \ | ||
278 | (self.opkg_cmd, self.opkg_args, opkg_query_cmd) | ||
279 | elif format == "file": | ||
280 | cmd = "%s %s status | %s -f" % \ | ||
281 | (self.opkg_cmd, self.opkg_args, opkg_query_cmd) | ||
282 | elif format == "ver": | ||
283 | cmd = "%s %s status | %s -v" % \ | ||
284 | (self.opkg_cmd, self.opkg_args, opkg_query_cmd) | ||
285 | elif format == "deps": | ||
286 | cmd = "%s %s status | %s" % \ | ||
287 | (self.opkg_cmd, self.opkg_args, opkg_query_cmd) | ||
288 | else: | ||
289 | cmd = "%s %s list_installed | cut -d' ' -f1" % \ | ||
290 | (self.opkg_cmd, self.opkg_args) | ||
291 | |||
292 | try: | ||
293 | output = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True).strip() | ||
294 | except subprocess.CalledProcessError as e: | ||
295 | msger.error("Cannot get the installed packages list. Command '%s' " | ||
296 | "returned %d:\n%s" % (cmd, e.returncode, e.output)) | ||
297 | |||
298 | if output and format == "file": | ||
299 | tmp_output = "" | ||
300 | for line in output.split('\n'): | ||
301 | pkg, pkg_file, pkg_arch = line.split() | ||
302 | full_path = os.path.join(self.rootfs_dir, pkg_arch, pkg_file) | ||
303 | if os.path.exists(full_path): | ||
304 | tmp_output += "%s %s %s\n" % (pkg, full_path, pkg_arch) | ||
305 | else: | ||
306 | tmp_output += "%s %s %s\n" % (pkg, pkg_file, pkg_arch) | ||
307 | |||
308 | output = tmp_output | ||
309 | |||
310 | return output | ||
311 | |||
312 | |||
313 | class WicPackageManager(object): | ||
314 | """ | ||
315 | This is an abstract class. Do not instantiate this directly. | ||
316 | """ | ||
317 | __metaclass__ = ABCMeta | ||
318 | |||
319 | def __init__(self, d, pseudo, native_sysroot): | ||
320 | self.d = d | ||
321 | self.deploy_dir = None | ||
322 | self.deploy_lock = None | ||
323 | if self.d.has_key('PACKAGE_FEED_URIS'): | ||
324 | self.feed_uris = self.d['PACKAGE_FEED_URIS'] | ||
325 | else: | ||
326 | self.feed_uris = "" | ||
327 | self.pseudo = pseudo | ||
328 | self.native_sysroot = native_sysroot | ||
329 | |||
330 | """ | ||
331 | Update the package manager package database. | ||
332 | """ | ||
333 | @abstractmethod | ||
334 | def update(self): | ||
335 | pass | ||
336 | |||
337 | """ | ||
338 | Install a list of packages. 'pkgs' is a list object. If 'attempt_only' is | ||
339 | True, installation failures are ignored. | ||
340 | """ | ||
341 | @abstractmethod | ||
342 | def install(self, pkgs, attempt_only=False): | ||
343 | pass | ||
344 | |||
345 | """ | ||
346 | Remove a list of packages. 'pkgs' is a list object. If 'with_dependencies' | ||
347 | is False, the any dependencies are left in place. | ||
348 | """ | ||
349 | @abstractmethod | ||
350 | def remove(self, pkgs, with_dependencies=True): | ||
351 | pass | ||
352 | |||
353 | """ | ||
354 | This function creates the index files | ||
355 | """ | ||
356 | @abstractmethod | ||
357 | def write_index(self): | ||
358 | pass | ||
359 | |||
360 | @abstractmethod | ||
361 | def remove_packaging_data(self): | ||
362 | pass | ||
363 | |||
364 | @abstractmethod | ||
365 | def list_installed(self, format=None): | ||
366 | pass | ||
367 | |||
368 | @abstractmethod | ||
369 | def insert_feeds_uris(self): | ||
370 | pass | ||
371 | |||
372 | """ | ||
373 | Install complementary packages based upon the list of currently installed | ||
374 | packages e.g. locales, *-dev, *-dbg, etc. This will only attempt to install | ||
375 | these packages, if they don't exist then no error will occur. Note: every | ||
376 | backend needs to call this function explicitly after the normal package | ||
377 | installation | ||
378 | """ | ||
379 | def install_complementary(self, globs=None): | ||
380 | # we need to write the list of installed packages to a file because the | ||
381 | # oe-pkgdata-util reads it from a file | ||
382 | if self.d.has_key('WORKDIR'): | ||
383 | installed_pkgs_file = os.path.join(self.d['WORKDIR'], | ||
384 | "installed_pkgs.txt") | ||
385 | else: | ||
386 | msger.error("No WORKDIR provided!") | ||
387 | |||
388 | with open(installed_pkgs_file, "w+") as installed_pkgs: | ||
389 | installed_pkgs.write(self.list_installed("arch")) | ||
390 | |||
391 | if globs is None: | ||
392 | if self.d.has_key('IMAGE_INSTALL_COMPLEMENTARY'): | ||
393 | globs = self.d['IMAGE_INSTALL_COMPLEMENTARY'] | ||
394 | split_linguas = set() | ||
395 | |||
396 | if self.d.has_key('IMAGE_LINGUAS'): | ||
397 | for translation in self.d['IMAGE_LINGUAS'].split(): | ||
398 | split_linguas.add(translation) | ||
399 | split_linguas.add(translation.split('-')[0]) | ||
400 | |||
401 | split_linguas = sorted(split_linguas) | ||
402 | |||
403 | for lang in split_linguas: | ||
404 | globs += " *-locale-%s" % lang | ||
405 | |||
406 | if globs is None: | ||
407 | return | ||
408 | |||
409 | if not self.d.has_key('PKGDATA_DIR'): | ||
410 | msger.error("No PKGDATA_DIR provided!") | ||
411 | |||
412 | cmd = [which(os.getenv('PATH'), "oe-pkgdata-util"), | ||
413 | "glob", self.d['PKGDATA_DIR'], installed_pkgs_file, | ||
414 | globs] | ||
415 | |||
416 | rc, out = exec_native_cmd(self.pseudo + cmd, self.native_sysroot) | ||
417 | if rc != 0: | ||
418 | msger.error("Could not compute complementary packages list. Command " | ||
419 | "'%s' returned %d" % | ||
420 | (' '.join(cmd), rc)) | ||
421 | |||
422 | self.install(out.split(), attempt_only=True) | ||
423 | |||
424 | |||
425 | def deploy_dir_lock(self): | ||
426 | if self.deploy_dir is None: | ||
427 | raise RuntimeError("deploy_dir is not set!") | ||
428 | |||
429 | lock_file_name = os.path.join(self.deploy_dir, "deploy.lock") | ||
430 | |||
431 | self.deploy_lock = lockfile(lock_file_name) | ||
432 | |||
433 | def deploy_dir_unlock(self): | ||
434 | if self.deploy_lock is None: | ||
435 | return | ||
436 | |||
437 | unlockfile(self.deploy_lock) | ||
438 | |||
439 | self.deploy_lock = None | ||
440 | |||
441 | |||
442 | class WicOpkgPM(WicPackageManager): | ||
443 | def __init__(self, d, target_rootfs, config_file, archs, pseudo, native_sysroot, task_name='target'): | ||
444 | super(WicOpkgPM, self).__init__(d, pseudo, native_sysroot) | ||
445 | |||
446 | self.target_rootfs = target_rootfs | ||
447 | self.config_file = config_file | ||
448 | self.pkg_archs = archs | ||
449 | self.task_name = task_name | ||
450 | |||
451 | if self.d.has_key("DEPLOY_DIR_IPK"): | ||
452 | self.deploy_dir = self.d["DEPLOY_DIR_IPK"] | ||
453 | |||
454 | self.deploy_lock_file = os.path.join(self.deploy_dir, "deploy.lock") | ||
455 | self.opkg_cmd = which(os.getenv('PATH'), "opkg-cl") | ||
456 | self.opkg_args = "-f %s -o %s " % (self.config_file, target_rootfs) | ||
457 | if self.d.has_key("OPKG_ARGS"): | ||
458 | self.opkg_args += self.d["OPKG_ARGS"] | ||
459 | |||
460 | if self.d.has_key('OPKGLIBDIR'): | ||
461 | opkg_lib_dir = self.d['OPKGLIBDIR'] | ||
462 | else: | ||
463 | opkg_lib_dir = "" | ||
464 | |||
465 | if opkg_lib_dir[0] == "/": | ||
466 | opkg_lib_dir = opkg_lib_dir[1:] | ||
467 | |||
468 | self.opkg_dir = os.path.join(target_rootfs, opkg_lib_dir, "opkg") | ||
469 | |||
470 | mkdirhier(self.opkg_dir) | ||
471 | |||
472 | if self.d.has_key("TMPDIR"): | ||
473 | tmp_dir = self.d["TMPDIR"] | ||
474 | else: | ||
475 | tmp_dir = "" | ||
476 | |||
477 | self.saved_opkg_dir = '%s/saved/%s' % (tmp_dir, self.task_name) | ||
478 | if not os.path.exists('%s/saved' % tmp_dir): | ||
479 | mkdirhier('%s/saved' % tmp_dir) | ||
480 | |||
481 | if self.d.has_key('BUILD_IMAGES_FROM_FEEDS') and self.d['BUILD_IMAGES_FROM_FEEDS'] != "1": | ||
482 | self._create_config() | ||
483 | else: | ||
484 | self._create_custom_config() | ||
485 | |||
486 | self.indexer = WicOpkgIndexer(self.d, self.deploy_dir) | ||
487 | |||
488 | """ | ||
489 | This function will change a package's status in /var/lib/opkg/status file. | ||
490 | If 'packages' is None then the new_status will be applied to all | ||
491 | packages | ||
492 | """ | ||
493 | def mark_packages(self, status_tag, packages=None): | ||
494 | status_file = os.path.join(self.opkg_dir, "status") | ||
495 | |||
496 | with open(status_file, "r") as sf: | ||
497 | with open(status_file + ".tmp", "w+") as tmp_sf: | ||
498 | if packages is None: | ||
499 | tmp_sf.write(re.sub(r"Package: (.*?)\n((?:[^\n]+\n)*?)Status: (.*)(?:unpacked|installed)", | ||
500 | r"Package: \1\n\2Status: \3%s" % status_tag, | ||
501 | sf.read())) | ||
502 | else: | ||
503 | if type(packages).__name__ != "list": | ||
504 | raise TypeError("'packages' should be a list object") | ||
505 | |||
506 | status = sf.read() | ||
507 | for pkg in packages: | ||
508 | status = re.sub(r"Package: %s\n((?:[^\n]+\n)*?)Status: (.*)(?:unpacked|installed)" % pkg, | ||
509 | r"Package: %s\n\1Status: \2%s" % (pkg, status_tag), | ||
510 | status) | ||
511 | |||
512 | tmp_sf.write(status) | ||
513 | |||
514 | os.rename(status_file + ".tmp", status_file) | ||
515 | |||
516 | def _create_custom_config(self): | ||
517 | msger.info("Building from feeds activated!") | ||
518 | |||
519 | with open(self.config_file, "w+") as config_file: | ||
520 | priority = 1 | ||
521 | for arch in self.pkg_archs.split(): | ||
522 | config_file.write("arch %s %d\n" % (arch, priority)) | ||
523 | priority += 5 | ||
524 | |||
525 | if self.d.has_key('IPK_FEED_URIS'): | ||
526 | ipk_feed_uris = self.d['IPK_FEED_URIS'] | ||
527 | else: | ||
528 | ipk_feed_uris = "" | ||
529 | |||
530 | for line in ipk_feed_uris.split(): | ||
531 | feed_match = re.match("^[ \t]*(.*)##([^ \t]*)[ \t]*$", line) | ||
532 | |||
533 | if feed_match is not None: | ||
534 | feed_name = feed_match.group(1) | ||
535 | feed_uri = feed_match.group(2) | ||
536 | |||
537 | msger.info("Add %s feed with URL %s" % (feed_name, feed_uri)) | ||
538 | |||
539 | config_file.write("src/gz %s %s\n" % (feed_name, feed_uri)) | ||
540 | |||
541 | """ | ||
542 | Allow to use package deploy directory contents as quick devel-testing | ||
543 | feed. This creates individual feed configs for each arch subdir of those | ||
544 | specified as compatible for the current machine. | ||
545 | NOTE: Development-helper feature, NOT a full-fledged feed. | ||
546 | """ | ||
547 | if self.d.has_key('FEED_DEPLOYDIR_BASE_URI'): | ||
548 | feed_deploydir_base_dir = self.d['FEED_DEPLOYDIR_BASE_URI'] | ||
549 | else: | ||
550 | feed_deploydir_base_dir = "" | ||
551 | |||
552 | if feed_deploydir_base_dir != "": | ||
553 | for arch in self.pkg_archs.split(): | ||
554 | if self.d.has_key("sysconfdir"): | ||
555 | sysconfdir = self.d["sysconfdir"] | ||
556 | else: | ||
557 | sysconfdir = None | ||
558 | |||
559 | cfg_file_name = os.path.join(self.target_rootfs, | ||
560 | sysconfdir, | ||
561 | "opkg", | ||
562 | "local-%s-feed.conf" % arch) | ||
563 | |||
564 | with open(cfg_file_name, "w+") as cfg_file: | ||
565 | cfg_file.write("src/gz local-%s %s/%s" % | ||
566 | arch, | ||
567 | feed_deploydir_base_dir, | ||
568 | arch) | ||
569 | |||
570 | def _create_config(self): | ||
571 | with open(self.config_file, "w+") as config_file: | ||
572 | priority = 1 | ||
573 | for arch in self.pkg_archs.split(): | ||
574 | config_file.write("arch %s %d\n" % (arch, priority)) | ||
575 | priority += 5 | ||
576 | |||
577 | config_file.write("src oe file:%s\n" % self.deploy_dir) | ||
578 | |||
579 | for arch in self.pkg_archs.split(): | ||
580 | pkgs_dir = os.path.join(self.deploy_dir, arch) | ||
581 | if os.path.isdir(pkgs_dir): | ||
582 | config_file.write("src oe-%s file:%s\n" % | ||
583 | (arch, pkgs_dir)) | ||
584 | |||
585 | def insert_feeds_uris(self): | ||
586 | if self.feed_uris == "": | ||
587 | return | ||
588 | |||
589 | rootfs_config = os.path.join('%s/etc/opkg/base-feeds.conf' | ||
590 | % self.target_rootfs) | ||
591 | |||
592 | with open(rootfs_config, "w+") as config_file: | ||
593 | uri_iterator = 0 | ||
594 | for uri in self.feed_uris.split(): | ||
595 | config_file.write("src/gz url-%d %s/ipk\n" % | ||
596 | (uri_iterator, uri)) | ||
597 | |||
598 | for arch in self.pkg_archs.split(): | ||
599 | if not os.path.exists(os.path.join(self.deploy_dir, arch)): | ||
600 | continue | ||
601 | msger.info('Note: adding opkg channel url-%s-%d (%s)' % | ||
602 | (arch, uri_iterator, uri)) | ||
603 | |||
604 | config_file.write("src/gz uri-%s-%d %s/ipk/%s\n" % | ||
605 | (arch, uri_iterator, uri, arch)) | ||
606 | uri_iterator += 1 | ||
607 | |||
608 | def update(self): | ||
609 | self.deploy_dir_lock() | ||
610 | |||
611 | cmd = "%s %s update" % (self.opkg_cmd, self.opkg_args) | ||
612 | |||
613 | rc, out = exec_native_cmd(self.pseudo + cmd, self.native_sysroot) | ||
614 | if rc != 0: | ||
615 | self.deploy_dir_unlock() | ||
616 | msger.error("Unable to update the package index files. Command '%s' " | ||
617 | "returned %d" % (cmd, rc)) | ||
618 | |||
619 | self.deploy_dir_unlock() | ||
620 | |||
621 | def install(self, pkgs, attempt_only=False): | ||
622 | if attempt_only and len(pkgs) == 0: | ||
623 | return | ||
624 | |||
625 | cmd = "%s %s install %s" % (self.opkg_cmd, self.opkg_args, ' '.join(pkgs)) | ||
626 | |||
627 | os.environ['D'] = self.target_rootfs | ||
628 | os.environ['OFFLINE_ROOT'] = self.target_rootfs | ||
629 | os.environ['IPKG_OFFLINE_ROOT'] = self.target_rootfs | ||
630 | os.environ['OPKG_OFFLINE_ROOT'] = self.target_rootfs | ||
631 | if self.d.has_key('WORKDIR'): | ||
632 | os.environ['INTERCEPT_DIR'] = os.path.join(self.d['WORKDIR'], | ||
633 | "intercept_scripts") | ||
634 | else: | ||
635 | os.environ['INTERCEPT_DIR'] = "." | ||
636 | msger.warning("No WORKDIR provided!") | ||
637 | |||
638 | if self.d.has_key('STAGING_DIR_NATIVE'): | ||
639 | os.environ['NATIVE_ROOT'] = self.d['STAGING_DIR_NATIVE'] | ||
640 | else: | ||
641 | msger.error("No STAGING_DIR_NATIVE provided!") | ||
642 | |||
643 | rc, out = exec_native_cmd(self.pseudo + cmd, self.native_sysroot) | ||
644 | if rc != 0: | ||
645 | msger.error("Unable to install packages. " | ||
646 | "Command '%s' returned %d" % (cmd, rc)) | ||
647 | |||
648 | |||
649 | def remove(self, pkgs, with_dependencies=True): | ||
650 | if with_dependencies: | ||
651 | cmd = "%s %s --force-depends --force-remove --force-removal-of-dependent-packages remove %s" % \ | ||
652 | (self.opkg_cmd, self.opkg_args, ' '.join(pkgs)) | ||
653 | else: | ||
654 | cmd = "%s %s --force-depends remove %s" % \ | ||
655 | (self.opkg_cmd, self.opkg_args, ' '.join(pkgs)) | ||
656 | |||
657 | rc, out = exec_native_cmd(self.pseudo + cmd, self.native_sysroot) | ||
658 | if rc != 0: | ||
659 | msger.error("Unable to remove packages. Command '%s' " | ||
660 | "returned %d" % (cmd, rc)) | ||
661 | |||
662 | |||
663 | def write_index(self): | ||
664 | self.deploy_dir_lock() | ||
665 | |||
666 | result = self.indexer.write_index() | ||
667 | |||
668 | self.deploy_dir_unlock() | ||
669 | |||
670 | if result is not None: | ||
671 | msger.error(result) | ||
672 | |||
673 | def remove_packaging_data(self): | ||
674 | remove(self.opkg_dir, True) | ||
675 | # create the directory back, it's needed by PM lock | ||
676 | mkdirhier(self.opkg_dir) | ||
677 | |||
678 | def list_installed(self, format=None): | ||
679 | return WicOpkgPkgsList(self.d, self.target_rootfs, self.config_file).list(format) | ||
680 | |||
681 | def handle_bad_recommendations(self): | ||
682 | if self.d.has_key("BAD_RECOMMENDATIONS"): | ||
683 | bad_recommendations = self.d["BAD_RECOMMENDATIONS"] | ||
684 | else: | ||
685 | bad_recommendations = "" | ||
686 | |||
687 | if bad_recommendations.strip() == "": | ||
688 | return | ||
689 | |||
690 | status_file = os.path.join(self.opkg_dir, "status") | ||
691 | |||
692 | # If status file existed, it means the bad recommendations has already | ||
693 | # been handled | ||
694 | if os.path.exists(status_file): | ||
695 | return | ||
696 | |||
697 | cmd = "%s %s info " % (self.opkg_cmd, self.opkg_args) | ||
698 | |||
699 | with open(status_file, "w+") as status: | ||
700 | for pkg in bad_recommendations.split(): | ||
701 | pkg_info = cmd + pkg | ||
702 | |||
703 | try: | ||
704 | output = subprocess.check_output(pkg_info.split(), stderr=subprocess.STDOUT).strip() | ||
705 | except subprocess.CalledProcessError as e: | ||
706 | msger.error("Cannot get package info. Command '%s' " | ||
707 | "returned %d:\n%s" % (pkg_info, e.returncode, e.output)) | ||
708 | |||
709 | if output == "": | ||
710 | msger.info("Ignored bad recommendation: '%s' is " | ||
711 | "not a package" % pkg) | ||
712 | continue | ||
713 | |||
714 | for line in output.split('\n'): | ||
715 | if line.startswith("Status:"): | ||
716 | status.write("Status: deinstall hold not-installed\n") | ||
717 | else: | ||
718 | status.write(line + "\n") | ||
719 | |||
720 | ''' | ||
721 | The following function dummy installs pkgs and returns the log of output. | ||
722 | ''' | ||
723 | def dummy_install(self, pkgs): | ||
724 | if len(pkgs) == 0: | ||
725 | return | ||
726 | |||
727 | # Create an temp dir as opkg root for dummy installation | ||
728 | if self.d.has_key("TMPDIR"): | ||
729 | tmp_dir = self.d["TMPDIR"] | ||
730 | else: | ||
731 | tmp_dir = "." | ||
732 | msger.warning("No TMPDIR provided!") | ||
733 | |||
734 | temp_rootfs = '%s/opkg' % tmp_dir | ||
735 | temp_opkg_dir = os.path.join(temp_rootfs, 'var/lib/opkg') | ||
736 | mkdirhier(temp_opkg_dir) | ||
737 | |||
738 | opkg_args = "-f %s -o %s " % (self.config_file, temp_rootfs) | ||
739 | if self.d.has_key("OPKG_ARGS"): | ||
740 | opkg_args += self.d["OPKG_ARGS"] | ||
741 | |||
742 | cmd = "%s %s update" % (self.opkg_cmd, opkg_args) | ||
743 | try: | ||
744 | subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True) | ||
745 | except subprocess.CalledProcessError as e: | ||
746 | msger.error("Unable to update. Command '%s' " | ||
747 | "returned %d:\n%s" % (cmd, e.returncode, e.output)) | ||
748 | |||
749 | # Dummy installation | ||
750 | cmd = "%s %s --noaction install %s " % (self.opkg_cmd, | ||
751 | opkg_args, | ||
752 | ' '.join(pkgs)) | ||
753 | try: | ||
754 | output = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True) | ||
755 | except subprocess.CalledProcessError as e: | ||
756 | msger.error("Unable to dummy install packages. Command '%s' " | ||
757 | "returned %d:\n%s" % (cmd, e.returncode, e.output)) | ||
758 | |||
759 | remove(temp_rootfs, True) | ||
760 | |||
761 | return output | ||
762 | |||
763 | def backup_packaging_data(self): | ||
764 | # Save the opkglib for increment ipk image generation | ||
765 | if os.path.exists(self.saved_opkg_dir): | ||
766 | remove(self.saved_opkg_dir, True) | ||
767 | shutil.copytree(self.opkg_dir, | ||
768 | self.saved_opkg_dir, | ||
769 | symlinks=True) | ||
770 | |||
771 | def recover_packaging_data(self): | ||
772 | # Move the opkglib back | ||
773 | if os.path.exists(self.saved_opkg_dir): | ||
774 | if os.path.exists(self.opkg_dir): | ||
775 | remove(self.opkg_dir, True) | ||
776 | |||
777 | msger.info('Recover packaging data') | ||
778 | shutil.copytree(self.saved_opkg_dir, | ||
779 | self.opkg_dir, | ||
780 | symlinks=True) | ||
781 | |||
782 | |||
783 | def wic_generate_index_files(d): | ||
784 | if d.has_key('PACKAGE_CLASSES'): | ||
785 | classes = d['PACKAGE_CLASSES'].replace("package_", "").split() | ||
786 | else: | ||
787 | classes = "" | ||
788 | msger.warning("No PACKAGE_CLASSES provided!") | ||
789 | |||
790 | if d.has_key('DEPLOY_DIR_IPK'): | ||
791 | deploy_dir_ipk = d['DEPLOY_DIR_IPK'] | ||
792 | else: | ||
793 | deploy_dir_ipk = None | ||
794 | msger.warning("No DEPLOY_DIR_IPK provided!") | ||
795 | |||
796 | indexer_map = { | ||
797 | "ipk": (WicOpkgIndexer, deploy_dir_ipk) | ||
798 | } | ||
799 | |||
800 | result = None | ||
801 | |||
802 | for pkg_class in classes: | ||
803 | if not pkg_class in indexer_map: | ||
804 | continue | ||
805 | |||
806 | if os.path.exists(indexer_map[pkg_class][1]): | ||
807 | result = indexer_map[pkg_class][0](d, indexer_map[pkg_class][1]).write_index() | ||
808 | |||
809 | if result is not None: | ||
810 | msger.error(result) | ||