From 972dcfcdbfe75dcfeb777150c136576cf1a71e99 Mon Sep 17 00:00:00 2001 From: Tudor Florea Date: Fri, 9 Oct 2015 22:59:03 +0200 Subject: initial commit for Enea Linux 5.0 arm Signed-off-by: Tudor Florea --- scripts/lib/wic/utils/oe/package_manager.py | 810 ++++++++++++++++++++++++++++ 1 file changed, 810 insertions(+) create mode 100644 scripts/lib/wic/utils/oe/package_manager.py (limited to 'scripts/lib/wic/utils/oe/package_manager.py') diff --git a/scripts/lib/wic/utils/oe/package_manager.py b/scripts/lib/wic/utils/oe/package_manager.py new file mode 100644 index 0000000000..92ce98e2ce --- /dev/null +++ b/scripts/lib/wic/utils/oe/package_manager.py @@ -0,0 +1,810 @@ +# ex:ts=4:sw=4:sts=4:et +# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- +# +# Copyright (c) 2014, Enea AB. +# All rights reserved. +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 2 as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# +# DESCRIPTION +# This implements the opkg package manager wrapper as a combination of +# meta/lib/oe/package_manager.py and bitbake/lib/bb/utils.py files and +# adaptation of those files to 'wic'. +# +# AUTHORS +# Adrian Calianu +# +# This file incorporates work covered by the following copyright and +# permission notice: +# +# meta/COPYING.GPLv2 (GPLv2) +# meta/COPYING.MIT (MIT) +# +# Copyright (C) 2004 Michael Lauer +# +# Permission to use, copy, modify, and/or distribute this software +# for any purpose with or without fee is hereby granted, provided +# that the above copyright notice and this permission notice appear +# in all copies. +# +# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL +# WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE +# AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR +# CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS +# OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, +# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN +# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + + +from abc import ABCMeta, abstractmethod +import os +import glob +import subprocess +import shutil +import multiprocessing +import re +import errno +import fcntl + +from mic.utils.oe.misc import * +from mic import msger + +def mkdirhier(directory): + """Create a directory like 'mkdir -p', but does not complain if + directory already exists like os.makedirs + """ + + try: + os.makedirs(directory) + except OSError as e: + if e.errno != errno.EEXIST: + raise e + +def remove(path, recurse=False): + """Equivalent to rm -f or rm -rf""" + if not path: + return + if recurse: + # shutil.rmtree(name) would be ideal but its too slow + subprocess.call(['rm', '-rf'] + glob.glob(path)) + return + for name in glob.glob(path): + try: + os.unlink(name) + except OSError as exc: + if exc.errno != errno.ENOENT: + raise + +def lockfile(name, shared=False, retry=True): + """ + Use the file fn as a lock file, return when the lock has been acquired. + Returns a variable to pass to unlockfile(). + """ + dirname = os.path.dirname(name) + mkdirhier(dirname) + + if not os.access(dirname, os.W_OK): + logger.error("Unable to acquire lock '%s', directory is not writable", + name) + sys.exit(1) + + op = fcntl.LOCK_EX + if shared: + op = fcntl.LOCK_SH + if not retry: + op = op | fcntl.LOCK_NB + + while True: + # If we leave the lockfiles lying around there is no problem + # but we should clean up after ourselves. This gives potential + # for races though. To work around this, when we acquire the lock + # we check the file we locked was still the lock file on disk. + # by comparing inode numbers. If they don't match or the lockfile + # no longer exists, we start again. + + # This implementation is unfair since the last person to request the + # lock is the most likely to win it. + + try: + lf = open(name, 'a+') + fileno = lf.fileno() + fcntl.flock(fileno, op) + statinfo = os.fstat(fileno) + if os.path.exists(lf.name): + statinfo2 = os.stat(lf.name) + if statinfo.st_ino == statinfo2.st_ino: + return lf + lf.close() + except Exception: + try: + lf.close() + except Exception: + pass + pass + if not retry: + return None + +def unlockfile(lf): + """ + Unlock a file locked using lockfile() + """ + try: + # If we had a shared lock, we need to promote to exclusive before + # removing the lockfile. Attempt this, ignore failures. + fcntl.flock(lf.fileno(), fcntl.LOCK_EX|fcntl.LOCK_NB) + os.unlink(lf.name) + except (IOError, OSError): + pass + fcntl.flock(lf.fileno(), fcntl.LOCK_UN) + lf.close() + +def which(path, item, direction = 0, history = False): + """ + Locate a file in a PATH + """ + + hist = [] + paths = (path or "").split(':') + if direction != 0: + paths.reverse() + + for p in paths: + next = os.path.join(p, item) + hist.append(next) + if os.path.exists(next): + if not os.path.isabs(next): + next = os.path.abspath(next) + if history: + return next, hist + return next + + if history: + return "", hist + return "" + + + +# this can be used by all PM backends to create the index files in parallel +def wic_create_index(arg): + index_cmd = arg + + try: + msger.info("Executing '%s' ..." % index_cmd) + subprocess.check_output(index_cmd, stderr=subprocess.STDOUT, shell=True) + except subprocess.CalledProcessError as e: + return("Index creation command '%s' failed with return code %d:\n%s" % + (e.cmd, e.returncode, e.output)) + + return None + + +class WicIndexer(object): + __metaclass__ = ABCMeta + + def __init__(self, d, deploy_dir): + self.d = d + self.deploy_dir = deploy_dir + + @abstractmethod + def write_index(self): + pass + +class WicOpkgIndexer(WicIndexer): + def write_index(self): + arch_vars = ["ALL_MULTILIB_PACKAGE_ARCHS", + "SDK_PACKAGE_ARCHS", + "MULTILIB_ARCHS"] + + opkg_index_cmd = which(os.getenv('PATH'), "opkg-make-index") + + if not os.path.exists(os.path.join(self.deploy_dir, "Packages")): + open(os.path.join(self.deploy_dir, "Packages"), "w").close() + + index_cmds = [] + for arch_var in arch_vars: + if self.d.has_key(arch_var): + archs = self.d[arch_var] + else: + archs = None + + if archs is None: + continue + + for arch in archs.split(): + pkgs_dir = os.path.join(self.deploy_dir, arch) + pkgs_file = os.path.join(pkgs_dir, "Packages") + + if not os.path.isdir(pkgs_dir): + continue + + if not os.path.exists(pkgs_file): + open(pkgs_file, "w").close() + + index_cmds.append('%s -r %s -p %s -m %s' % + (opkg_index_cmd, pkgs_file, pkgs_file, pkgs_dir)) + + if len(index_cmds) == 0: + msger.info("There are no packages in %s!" % self.deploy_dir) + return + + nproc = multiprocessing.cpu_count() + pool = multiprocessing.Pool(nproc) + results = list(pool.imap(wic_create_index, index_cmds)) + pool.close() + pool.join() + + for result in results: + if result is not None: + return(result) + +class WicPkgsList(object): + __metaclass__ = ABCMeta + + def __init__(self, d, rootfs_dir): + self.d = d + self.rootfs_dir = rootfs_dir + + @abstractmethod + def list(self, format=None): + pass + + +class WicOpkgPkgsList(WicPkgsList): + def __init__(self, d, rootfs_dir, config_file): + super(WicOpkgPkgsList, self).__init__(d, rootfs_dir) + + self.opkg_cmd = which(os.getenv('PATH'), "opkg-cl") + self.opkg_args = "-f %s -o %s " % (config_file, rootfs_dir) + if self.d.has_key("OPKG_ARGS"): + self.opkg_args += self.d["OPKG_ARGS"] + + def list(self, format=None): + opkg_query_cmd = which(os.getenv('PATH'), "opkg-query-helper.py") + + if format == "arch": + cmd = "%s %s status | %s -a" % \ + (self.opkg_cmd, self.opkg_args, opkg_query_cmd) + elif format == "file": + cmd = "%s %s status | %s -f" % \ + (self.opkg_cmd, self.opkg_args, opkg_query_cmd) + elif format == "ver": + cmd = "%s %s status | %s -v" % \ + (self.opkg_cmd, self.opkg_args, opkg_query_cmd) + elif format == "deps": + cmd = "%s %s status | %s" % \ + (self.opkg_cmd, self.opkg_args, opkg_query_cmd) + else: + cmd = "%s %s list_installed | cut -d' ' -f1" % \ + (self.opkg_cmd, self.opkg_args) + + try: + output = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True).strip() + except subprocess.CalledProcessError as e: + msger.error("Cannot get the installed packages list. Command '%s' " + "returned %d:\n%s" % (cmd, e.returncode, e.output)) + + if output and format == "file": + tmp_output = "" + for line in output.split('\n'): + pkg, pkg_file, pkg_arch = line.split() + full_path = os.path.join(self.rootfs_dir, pkg_arch, pkg_file) + if os.path.exists(full_path): + tmp_output += "%s %s %s\n" % (pkg, full_path, pkg_arch) + else: + tmp_output += "%s %s %s\n" % (pkg, pkg_file, pkg_arch) + + output = tmp_output + + return output + + +class WicPackageManager(object): + """ + This is an abstract class. Do not instantiate this directly. + """ + __metaclass__ = ABCMeta + + def __init__(self, d, pseudo, native_sysroot): + self.d = d + self.deploy_dir = None + self.deploy_lock = None + if self.d.has_key('PACKAGE_FEED_URIS'): + self.feed_uris = self.d['PACKAGE_FEED_URIS'] + else: + self.feed_uris = "" + self.pseudo = pseudo + self.native_sysroot = native_sysroot + + """ + Update the package manager package database. + """ + @abstractmethod + def update(self): + pass + + """ + Install a list of packages. 'pkgs' is a list object. If 'attempt_only' is + True, installation failures are ignored. + """ + @abstractmethod + def install(self, pkgs, attempt_only=False): + pass + + """ + Remove a list of packages. 'pkgs' is a list object. If 'with_dependencies' + is False, the any dependencies are left in place. + """ + @abstractmethod + def remove(self, pkgs, with_dependencies=True): + pass + + """ + This function creates the index files + """ + @abstractmethod + def write_index(self): + pass + + @abstractmethod + def remove_packaging_data(self): + pass + + @abstractmethod + def list_installed(self, format=None): + pass + + @abstractmethod + def insert_feeds_uris(self): + pass + + """ + Install complementary packages based upon the list of currently installed + packages e.g. locales, *-dev, *-dbg, etc. This will only attempt to install + these packages, if they don't exist then no error will occur. Note: every + backend needs to call this function explicitly after the normal package + installation + """ + def install_complementary(self, globs=None): + # we need to write the list of installed packages to a file because the + # oe-pkgdata-util reads it from a file + if self.d.has_key('WORKDIR'): + installed_pkgs_file = os.path.join(self.d['WORKDIR'], + "installed_pkgs.txt") + else: + msger.error("No WORKDIR provided!") + + with open(installed_pkgs_file, "w+") as installed_pkgs: + installed_pkgs.write(self.list_installed("arch")) + + if globs is None: + if self.d.has_key('IMAGE_INSTALL_COMPLEMENTARY'): + globs = self.d['IMAGE_INSTALL_COMPLEMENTARY'] + split_linguas = set() + + if self.d.has_key('IMAGE_LINGUAS'): + for translation in self.d['IMAGE_LINGUAS'].split(): + split_linguas.add(translation) + split_linguas.add(translation.split('-')[0]) + + split_linguas = sorted(split_linguas) + + for lang in split_linguas: + globs += " *-locale-%s" % lang + + if globs is None: + return + + if not self.d.has_key('PKGDATA_DIR'): + msger.error("No PKGDATA_DIR provided!") + + cmd = [which(os.getenv('PATH'), "oe-pkgdata-util"), + "glob", self.d['PKGDATA_DIR'], installed_pkgs_file, + globs] + + rc, out = exec_native_cmd(self.pseudo + cmd, self.native_sysroot) + if rc != 0: + msger.error("Could not compute complementary packages list. Command " + "'%s' returned %d" % + (' '.join(cmd), rc)) + + self.install(out.split(), attempt_only=True) + + + def deploy_dir_lock(self): + if self.deploy_dir is None: + raise RuntimeError("deploy_dir is not set!") + + lock_file_name = os.path.join(self.deploy_dir, "deploy.lock") + + self.deploy_lock = lockfile(lock_file_name) + + def deploy_dir_unlock(self): + if self.deploy_lock is None: + return + + unlockfile(self.deploy_lock) + + self.deploy_lock = None + + +class WicOpkgPM(WicPackageManager): + def __init__(self, d, target_rootfs, config_file, archs, pseudo, native_sysroot, task_name='target'): + super(WicOpkgPM, self).__init__(d, pseudo, native_sysroot) + + self.target_rootfs = target_rootfs + self.config_file = config_file + self.pkg_archs = archs + self.task_name = task_name + + if self.d.has_key("DEPLOY_DIR_IPK"): + self.deploy_dir = self.d["DEPLOY_DIR_IPK"] + + self.deploy_lock_file = os.path.join(self.deploy_dir, "deploy.lock") + self.opkg_cmd = which(os.getenv('PATH'), "opkg-cl") + self.opkg_args = "-f %s -o %s " % (self.config_file, target_rootfs) + if self.d.has_key("OPKG_ARGS"): + self.opkg_args += self.d["OPKG_ARGS"] + + if self.d.has_key('OPKGLIBDIR'): + opkg_lib_dir = self.d['OPKGLIBDIR'] + else: + opkg_lib_dir = "" + + if opkg_lib_dir[0] == "/": + opkg_lib_dir = opkg_lib_dir[1:] + + self.opkg_dir = os.path.join(target_rootfs, opkg_lib_dir, "opkg") + + mkdirhier(self.opkg_dir) + + if self.d.has_key("TMPDIR"): + tmp_dir = self.d["TMPDIR"] + else: + tmp_dir = "" + + self.saved_opkg_dir = '%s/saved/%s' % (tmp_dir, self.task_name) + if not os.path.exists('%s/saved' % tmp_dir): + mkdirhier('%s/saved' % tmp_dir) + + if self.d.has_key('BUILD_IMAGES_FROM_FEEDS') and self.d['BUILD_IMAGES_FROM_FEEDS'] != "1": + self._create_config() + else: + self._create_custom_config() + + self.indexer = WicOpkgIndexer(self.d, self.deploy_dir) + + """ + This function will change a package's status in /var/lib/opkg/status file. + If 'packages' is None then the new_status will be applied to all + packages + """ + def mark_packages(self, status_tag, packages=None): + status_file = os.path.join(self.opkg_dir, "status") + + with open(status_file, "r") as sf: + with open(status_file + ".tmp", "w+") as tmp_sf: + if packages is None: + tmp_sf.write(re.sub(r"Package: (.*?)\n((?:[^\n]+\n)*?)Status: (.*)(?:unpacked|installed)", + r"Package: \1\n\2Status: \3%s" % status_tag, + sf.read())) + else: + if type(packages).__name__ != "list": + raise TypeError("'packages' should be a list object") + + status = sf.read() + for pkg in packages: + status = re.sub(r"Package: %s\n((?:[^\n]+\n)*?)Status: (.*)(?:unpacked|installed)" % pkg, + r"Package: %s\n\1Status: \2%s" % (pkg, status_tag), + status) + + tmp_sf.write(status) + + os.rename(status_file + ".tmp", status_file) + + def _create_custom_config(self): + msger.info("Building from feeds activated!") + + with open(self.config_file, "w+") as config_file: + priority = 1 + for arch in self.pkg_archs.split(): + config_file.write("arch %s %d\n" % (arch, priority)) + priority += 5 + + if self.d.has_key('IPK_FEED_URIS'): + ipk_feed_uris = self.d['IPK_FEED_URIS'] + else: + ipk_feed_uris = "" + + for line in ipk_feed_uris.split(): + feed_match = re.match("^[ \t]*(.*)##([^ \t]*)[ \t]*$", line) + + if feed_match is not None: + feed_name = feed_match.group(1) + feed_uri = feed_match.group(2) + + msger.info("Add %s feed with URL %s" % (feed_name, feed_uri)) + + config_file.write("src/gz %s %s\n" % (feed_name, feed_uri)) + + """ + Allow to use package deploy directory contents as quick devel-testing + feed. This creates individual feed configs for each arch subdir of those + specified as compatible for the current machine. + NOTE: Development-helper feature, NOT a full-fledged feed. + """ + if self.d.has_key('FEED_DEPLOYDIR_BASE_URI'): + feed_deploydir_base_dir = self.d['FEED_DEPLOYDIR_BASE_URI'] + else: + feed_deploydir_base_dir = "" + + if feed_deploydir_base_dir != "": + for arch in self.pkg_archs.split(): + if self.d.has_key("sysconfdir"): + sysconfdir = self.d["sysconfdir"] + else: + sysconfdir = None + + cfg_file_name = os.path.join(self.target_rootfs, + sysconfdir, + "opkg", + "local-%s-feed.conf" % arch) + + with open(cfg_file_name, "w+") as cfg_file: + cfg_file.write("src/gz local-%s %s/%s" % + arch, + feed_deploydir_base_dir, + arch) + + def _create_config(self): + with open(self.config_file, "w+") as config_file: + priority = 1 + for arch in self.pkg_archs.split(): + config_file.write("arch %s %d\n" % (arch, priority)) + priority += 5 + + config_file.write("src oe file:%s\n" % self.deploy_dir) + + for arch in self.pkg_archs.split(): + pkgs_dir = os.path.join(self.deploy_dir, arch) + if os.path.isdir(pkgs_dir): + config_file.write("src oe-%s file:%s\n" % + (arch, pkgs_dir)) + + def insert_feeds_uris(self): + if self.feed_uris == "": + return + + rootfs_config = os.path.join('%s/etc/opkg/base-feeds.conf' + % self.target_rootfs) + + with open(rootfs_config, "w+") as config_file: + uri_iterator = 0 + for uri in self.feed_uris.split(): + config_file.write("src/gz url-%d %s/ipk\n" % + (uri_iterator, uri)) + + for arch in self.pkg_archs.split(): + if not os.path.exists(os.path.join(self.deploy_dir, arch)): + continue + msger.info('Note: adding opkg channel url-%s-%d (%s)' % + (arch, uri_iterator, uri)) + + config_file.write("src/gz uri-%s-%d %s/ipk/%s\n" % + (arch, uri_iterator, uri, arch)) + uri_iterator += 1 + + def update(self): + self.deploy_dir_lock() + + cmd = "%s %s update" % (self.opkg_cmd, self.opkg_args) + + rc, out = exec_native_cmd(self.pseudo + cmd, self.native_sysroot) + if rc != 0: + self.deploy_dir_unlock() + msger.error("Unable to update the package index files. Command '%s' " + "returned %d" % (cmd, rc)) + + self.deploy_dir_unlock() + + def install(self, pkgs, attempt_only=False): + if attempt_only and len(pkgs) == 0: + return + + cmd = "%s %s install %s" % (self.opkg_cmd, self.opkg_args, ' '.join(pkgs)) + + os.environ['D'] = self.target_rootfs + os.environ['OFFLINE_ROOT'] = self.target_rootfs + os.environ['IPKG_OFFLINE_ROOT'] = self.target_rootfs + os.environ['OPKG_OFFLINE_ROOT'] = self.target_rootfs + if self.d.has_key('WORKDIR'): + os.environ['INTERCEPT_DIR'] = os.path.join(self.d['WORKDIR'], + "intercept_scripts") + else: + os.environ['INTERCEPT_DIR'] = "." + msger.warning("No WORKDIR provided!") + + if self.d.has_key('STAGING_DIR_NATIVE'): + os.environ['NATIVE_ROOT'] = self.d['STAGING_DIR_NATIVE'] + else: + msger.error("No STAGING_DIR_NATIVE provided!") + + rc, out = exec_native_cmd(self.pseudo + cmd, self.native_sysroot) + if rc != 0: + msger.error("Unable to install packages. " + "Command '%s' returned %d" % (cmd, rc)) + + + def remove(self, pkgs, with_dependencies=True): + if with_dependencies: + cmd = "%s %s --force-depends --force-remove --force-removal-of-dependent-packages remove %s" % \ + (self.opkg_cmd, self.opkg_args, ' '.join(pkgs)) + else: + cmd = "%s %s --force-depends remove %s" % \ + (self.opkg_cmd, self.opkg_args, ' '.join(pkgs)) + + rc, out = exec_native_cmd(self.pseudo + cmd, self.native_sysroot) + if rc != 0: + msger.error("Unable to remove packages. Command '%s' " + "returned %d" % (cmd, rc)) + + + def write_index(self): + self.deploy_dir_lock() + + result = self.indexer.write_index() + + self.deploy_dir_unlock() + + if result is not None: + msger.error(result) + + def remove_packaging_data(self): + remove(self.opkg_dir, True) + # create the directory back, it's needed by PM lock + mkdirhier(self.opkg_dir) + + def list_installed(self, format=None): + return WicOpkgPkgsList(self.d, self.target_rootfs, self.config_file).list(format) + + def handle_bad_recommendations(self): + if self.d.has_key("BAD_RECOMMENDATIONS"): + bad_recommendations = self.d["BAD_RECOMMENDATIONS"] + else: + bad_recommendations = "" + + if bad_recommendations.strip() == "": + return + + status_file = os.path.join(self.opkg_dir, "status") + + # If status file existed, it means the bad recommendations has already + # been handled + if os.path.exists(status_file): + return + + cmd = "%s %s info " % (self.opkg_cmd, self.opkg_args) + + with open(status_file, "w+") as status: + for pkg in bad_recommendations.split(): + pkg_info = cmd + pkg + + try: + output = subprocess.check_output(pkg_info.split(), stderr=subprocess.STDOUT).strip() + except subprocess.CalledProcessError as e: + msger.error("Cannot get package info. Command '%s' " + "returned %d:\n%s" % (pkg_info, e.returncode, e.output)) + + if output == "": + msger.info("Ignored bad recommendation: '%s' is " + "not a package" % pkg) + continue + + for line in output.split('\n'): + if line.startswith("Status:"): + status.write("Status: deinstall hold not-installed\n") + else: + status.write(line + "\n") + + ''' + The following function dummy installs pkgs and returns the log of output. + ''' + def dummy_install(self, pkgs): + if len(pkgs) == 0: + return + + # Create an temp dir as opkg root for dummy installation + if self.d.has_key("TMPDIR"): + tmp_dir = self.d["TMPDIR"] + else: + tmp_dir = "." + msger.warning("No TMPDIR provided!") + + temp_rootfs = '%s/opkg' % tmp_dir + temp_opkg_dir = os.path.join(temp_rootfs, 'var/lib/opkg') + mkdirhier(temp_opkg_dir) + + opkg_args = "-f %s -o %s " % (self.config_file, temp_rootfs) + if self.d.has_key("OPKG_ARGS"): + opkg_args += self.d["OPKG_ARGS"] + + cmd = "%s %s update" % (self.opkg_cmd, opkg_args) + try: + subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True) + except subprocess.CalledProcessError as e: + msger.error("Unable to update. Command '%s' " + "returned %d:\n%s" % (cmd, e.returncode, e.output)) + + # Dummy installation + cmd = "%s %s --noaction install %s " % (self.opkg_cmd, + opkg_args, + ' '.join(pkgs)) + try: + output = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True) + except subprocess.CalledProcessError as e: + msger.error("Unable to dummy install packages. Command '%s' " + "returned %d:\n%s" % (cmd, e.returncode, e.output)) + + remove(temp_rootfs, True) + + return output + + def backup_packaging_data(self): + # Save the opkglib for increment ipk image generation + if os.path.exists(self.saved_opkg_dir): + remove(self.saved_opkg_dir, True) + shutil.copytree(self.opkg_dir, + self.saved_opkg_dir, + symlinks=True) + + def recover_packaging_data(self): + # Move the opkglib back + if os.path.exists(self.saved_opkg_dir): + if os.path.exists(self.opkg_dir): + remove(self.opkg_dir, True) + + msger.info('Recover packaging data') + shutil.copytree(self.saved_opkg_dir, + self.opkg_dir, + symlinks=True) + + +def wic_generate_index_files(d): + if d.has_key('PACKAGE_CLASSES'): + classes = d['PACKAGE_CLASSES'].replace("package_", "").split() + else: + classes = "" + msger.warning("No PACKAGE_CLASSES provided!") + + if d.has_key('DEPLOY_DIR_IPK'): + deploy_dir_ipk = d['DEPLOY_DIR_IPK'] + else: + deploy_dir_ipk = None + msger.warning("No DEPLOY_DIR_IPK provided!") + + indexer_map = { + "ipk": (WicOpkgIndexer, deploy_dir_ipk) + } + + result = None + + for pkg_class in classes: + if not pkg_class in indexer_map: + continue + + if os.path.exists(indexer_map[pkg_class][1]): + result = indexer_map[pkg_class][0](d, indexer_map[pkg_class][1]).write_index() + + if result is not None: + msger.error(result) -- cgit v1.2.3-54-g00ecf