diff options
Diffstat (limited to 'meta/lib/oe/package_manager.py')
-rw-r--r-- | meta/lib/oe/package_manager.py | 1797 |
1 files changed, 1797 insertions, 0 deletions
diff --git a/meta/lib/oe/package_manager.py b/meta/lib/oe/package_manager.py new file mode 100644 index 0000000000..505509543d --- /dev/null +++ b/meta/lib/oe/package_manager.py | |||
@@ -0,0 +1,1797 @@ | |||
1 | from abc import ABCMeta, abstractmethod | ||
2 | import os | ||
3 | import glob | ||
4 | import subprocess | ||
5 | import shutil | ||
6 | import multiprocessing | ||
7 | import re | ||
8 | import bb | ||
9 | import tempfile | ||
10 | import oe.utils | ||
11 | |||
12 | |||
13 | # this can be used by all PM backends to create the index files in parallel | ||
14 | def create_index(arg): | ||
15 | index_cmd = arg | ||
16 | |||
17 | try: | ||
18 | bb.note("Executing '%s' ..." % index_cmd) | ||
19 | result = subprocess.check_output(index_cmd, stderr=subprocess.STDOUT, shell=True) | ||
20 | except subprocess.CalledProcessError as e: | ||
21 | return("Index creation command '%s' failed with return code %d:\n%s" % | ||
22 | (e.cmd, e.returncode, e.output)) | ||
23 | |||
24 | if result: | ||
25 | bb.note(result) | ||
26 | |||
27 | return None | ||
28 | |||
29 | |||
30 | class Indexer(object): | ||
31 | __metaclass__ = ABCMeta | ||
32 | |||
33 | def __init__(self, d, deploy_dir): | ||
34 | self.d = d | ||
35 | self.deploy_dir = deploy_dir | ||
36 | |||
37 | @abstractmethod | ||
38 | def write_index(self): | ||
39 | pass | ||
40 | |||
41 | |||
42 | class RpmIndexer(Indexer): | ||
43 | def get_ml_prefix_and_os_list(self, arch_var=None, os_var=None): | ||
44 | package_archs = { | ||
45 | 'default': [], | ||
46 | } | ||
47 | |||
48 | target_os = { | ||
49 | 'default': "", | ||
50 | } | ||
51 | |||
52 | if arch_var is not None and os_var is not None: | ||
53 | package_archs['default'] = self.d.getVar(arch_var, True).split() | ||
54 | package_archs['default'].reverse() | ||
55 | target_os['default'] = self.d.getVar(os_var, True).strip() | ||
56 | else: | ||
57 | package_archs['default'] = self.d.getVar("PACKAGE_ARCHS", True).split() | ||
58 | # arch order is reversed. This ensures the -best- match is | ||
59 | # listed first! | ||
60 | package_archs['default'].reverse() | ||
61 | target_os['default'] = self.d.getVar("TARGET_OS", True).strip() | ||
62 | multilibs = self.d.getVar('MULTILIBS', True) or "" | ||
63 | for ext in multilibs.split(): | ||
64 | eext = ext.split(':') | ||
65 | if len(eext) > 1 and eext[0] == 'multilib': | ||
66 | localdata = bb.data.createCopy(self.d) | ||
67 | default_tune_key = "DEFAULTTUNE_virtclass-multilib-" + eext[1] | ||
68 | default_tune = localdata.getVar(default_tune_key, False) | ||
69 | if default_tune is None: | ||
70 | default_tune_key = "DEFAULTTUNE_ML_" + eext[1] | ||
71 | default_tune = localdata.getVar(default_tune_key, False) | ||
72 | if default_tune: | ||
73 | localdata.setVar("DEFAULTTUNE", default_tune) | ||
74 | bb.data.update_data(localdata) | ||
75 | package_archs[eext[1]] = localdata.getVar('PACKAGE_ARCHS', | ||
76 | True).split() | ||
77 | package_archs[eext[1]].reverse() | ||
78 | target_os[eext[1]] = localdata.getVar("TARGET_OS", | ||
79 | True).strip() | ||
80 | |||
81 | ml_prefix_list = dict() | ||
82 | for mlib in package_archs: | ||
83 | if mlib == 'default': | ||
84 | ml_prefix_list[mlib] = package_archs[mlib] | ||
85 | else: | ||
86 | ml_prefix_list[mlib] = list() | ||
87 | for arch in package_archs[mlib]: | ||
88 | if arch in ['all', 'noarch', 'any']: | ||
89 | ml_prefix_list[mlib].append(arch) | ||
90 | else: | ||
91 | ml_prefix_list[mlib].append(mlib + "_" + arch) | ||
92 | |||
93 | return (ml_prefix_list, target_os) | ||
94 | |||
95 | def write_index(self): | ||
96 | sdk_pkg_archs = (self.d.getVar('SDK_PACKAGE_ARCHS', True) or "").replace('-', '_').split() | ||
97 | all_mlb_pkg_archs = (self.d.getVar('ALL_MULTILIB_PACKAGE_ARCHS', True) or "").replace('-', '_').split() | ||
98 | |||
99 | mlb_prefix_list = self.get_ml_prefix_and_os_list()[0] | ||
100 | |||
101 | archs = set() | ||
102 | for item in mlb_prefix_list: | ||
103 | archs = archs.union(set(i.replace('-', '_') for i in mlb_prefix_list[item])) | ||
104 | |||
105 | if len(archs) == 0: | ||
106 | archs = archs.union(set(all_mlb_pkg_archs)) | ||
107 | |||
108 | archs = archs.union(set(sdk_pkg_archs)) | ||
109 | |||
110 | rpm_createrepo = bb.utils.which(os.getenv('PATH'), "createrepo") | ||
111 | index_cmds = [] | ||
112 | rpm_dirs_found = False | ||
113 | for arch in archs: | ||
114 | arch_dir = os.path.join(self.deploy_dir, arch) | ||
115 | if not os.path.isdir(arch_dir): | ||
116 | continue | ||
117 | |||
118 | index_cmds.append("%s --update -q %s" % (rpm_createrepo, arch_dir)) | ||
119 | |||
120 | rpm_dirs_found = True | ||
121 | |||
122 | if not rpm_dirs_found: | ||
123 | bb.note("There are no packages in %s" % self.deploy_dir) | ||
124 | return | ||
125 | |||
126 | result = oe.utils.multiprocess_exec(index_cmds, create_index) | ||
127 | if result: | ||
128 | bb.fatal('%s' % ('\n'.join(result))) | ||
129 | |||
130 | |||
131 | class OpkgIndexer(Indexer): | ||
132 | def write_index(self): | ||
133 | arch_vars = ["ALL_MULTILIB_PACKAGE_ARCHS", | ||
134 | "SDK_PACKAGE_ARCHS", | ||
135 | "MULTILIB_ARCHS"] | ||
136 | |||
137 | opkg_index_cmd = bb.utils.which(os.getenv('PATH'), "opkg-make-index") | ||
138 | |||
139 | if not os.path.exists(os.path.join(self.deploy_dir, "Packages")): | ||
140 | open(os.path.join(self.deploy_dir, "Packages"), "w").close() | ||
141 | |||
142 | index_cmds = [] | ||
143 | for arch_var in arch_vars: | ||
144 | archs = self.d.getVar(arch_var, True) | ||
145 | if archs is None: | ||
146 | continue | ||
147 | |||
148 | for arch in archs.split(): | ||
149 | pkgs_dir = os.path.join(self.deploy_dir, arch) | ||
150 | pkgs_file = os.path.join(pkgs_dir, "Packages") | ||
151 | |||
152 | if not os.path.isdir(pkgs_dir): | ||
153 | continue | ||
154 | |||
155 | if not os.path.exists(pkgs_file): | ||
156 | open(pkgs_file, "w").close() | ||
157 | |||
158 | index_cmds.append('%s -r %s -p %s -m %s' % | ||
159 | (opkg_index_cmd, pkgs_file, pkgs_file, pkgs_dir)) | ||
160 | |||
161 | if len(index_cmds) == 0: | ||
162 | bb.note("There are no packages in %s!" % self.deploy_dir) | ||
163 | return | ||
164 | |||
165 | result = oe.utils.multiprocess_exec(index_cmds, create_index) | ||
166 | if result: | ||
167 | bb.fatal('%s' % ('\n'.join(result))) | ||
168 | |||
169 | |||
170 | |||
171 | class DpkgIndexer(Indexer): | ||
172 | def write_index(self): | ||
173 | pkg_archs = self.d.getVar('PACKAGE_ARCHS', True) | ||
174 | if pkg_archs is not None: | ||
175 | arch_list = pkg_archs.split() | ||
176 | sdk_pkg_archs = self.d.getVar('SDK_PACKAGE_ARCHS', True) | ||
177 | if sdk_pkg_archs is not None: | ||
178 | for a in sdk_pkg_archs.split(): | ||
179 | if a not in pkg_archs: | ||
180 | arch_list.append(a) | ||
181 | |||
182 | all_mlb_pkg_arch_list = (self.d.getVar('ALL_MULTILIB_PACKAGE_ARCHS', True) or "").replace('-', '_').split() | ||
183 | arch_list.extend(arch for arch in all_mlb_pkg_arch_list if arch not in arch_list) | ||
184 | |||
185 | apt_ftparchive = bb.utils.which(os.getenv('PATH'), "apt-ftparchive") | ||
186 | gzip = bb.utils.which(os.getenv('PATH'), "gzip") | ||
187 | |||
188 | index_cmds = [] | ||
189 | deb_dirs_found = False | ||
190 | for arch in arch_list: | ||
191 | arch_dir = os.path.join(self.deploy_dir, arch) | ||
192 | if not os.path.isdir(arch_dir): | ||
193 | continue | ||
194 | |||
195 | cmd = "cd %s; PSEUDO_UNLOAD=1 %s packages . > Packages;" % (arch_dir, apt_ftparchive) | ||
196 | |||
197 | cmd += "%s -fc Packages > Packages.gz;" % gzip | ||
198 | |||
199 | with open(os.path.join(arch_dir, "Release"), "w+") as release: | ||
200 | release.write("Label: %s\n" % arch) | ||
201 | |||
202 | cmd += "PSEUDO_UNLOAD=1 %s release . >> Release" % apt_ftparchive | ||
203 | |||
204 | index_cmds.append(cmd) | ||
205 | |||
206 | deb_dirs_found = True | ||
207 | |||
208 | if not deb_dirs_found: | ||
209 | bb.note("There are no packages in %s" % self.deploy_dir) | ||
210 | return | ||
211 | |||
212 | result = oe.utils.multiprocess_exec(index_cmds, create_index) | ||
213 | if result: | ||
214 | bb.fatal('%s' % ('\n'.join(result))) | ||
215 | |||
216 | |||
217 | |||
218 | class PkgsList(object): | ||
219 | __metaclass__ = ABCMeta | ||
220 | |||
221 | def __init__(self, d, rootfs_dir): | ||
222 | self.d = d | ||
223 | self.rootfs_dir = rootfs_dir | ||
224 | |||
225 | @abstractmethod | ||
226 | def list(self, format=None): | ||
227 | pass | ||
228 | |||
229 | |||
230 | class RpmPkgsList(PkgsList): | ||
231 | def __init__(self, d, rootfs_dir, arch_var=None, os_var=None): | ||
232 | super(RpmPkgsList, self).__init__(d, rootfs_dir) | ||
233 | |||
234 | self.rpm_cmd = bb.utils.which(os.getenv('PATH'), "rpm") | ||
235 | self.image_rpmlib = os.path.join(self.rootfs_dir, 'var/lib/rpm') | ||
236 | |||
237 | self.ml_prefix_list, self.ml_os_list = \ | ||
238 | RpmIndexer(d, rootfs_dir).get_ml_prefix_and_os_list(arch_var, os_var) | ||
239 | |||
240 | # Determine rpm version | ||
241 | cmd = "%s --version" % self.rpm_cmd | ||
242 | try: | ||
243 | output = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True) | ||
244 | except subprocess.CalledProcessError as e: | ||
245 | bb.fatal("Getting rpm version failed. Command '%s' " | ||
246 | "returned %d:\n%s" % (cmd, e.returncode, e.output)) | ||
247 | self.rpm_version = int(output.split()[-1].split('.')[0]) | ||
248 | |||
249 | ''' | ||
250 | Translate the RPM/Smart format names to the OE multilib format names | ||
251 | ''' | ||
252 | def _pkg_translate_smart_to_oe(self, pkg, arch): | ||
253 | new_pkg = pkg | ||
254 | new_arch = arch | ||
255 | fixed_arch = arch.replace('_', '-') | ||
256 | found = 0 | ||
257 | for mlib in self.ml_prefix_list: | ||
258 | for cmp_arch in self.ml_prefix_list[mlib]: | ||
259 | fixed_cmp_arch = cmp_arch.replace('_', '-') | ||
260 | if fixed_arch == fixed_cmp_arch: | ||
261 | if mlib == 'default': | ||
262 | new_pkg = pkg | ||
263 | new_arch = cmp_arch | ||
264 | else: | ||
265 | new_pkg = mlib + '-' + pkg | ||
266 | # We need to strip off the ${mlib}_ prefix on the arch | ||
267 | new_arch = cmp_arch.replace(mlib + '_', '') | ||
268 | |||
269 | # Workaround for bug 3565. Simply look to see if we | ||
270 | # know of a package with that name, if not try again! | ||
271 | filename = os.path.join(self.d.getVar('PKGDATA_DIR', True), | ||
272 | 'runtime-reverse', | ||
273 | new_pkg) | ||
274 | if os.path.exists(filename): | ||
275 | found = 1 | ||
276 | break | ||
277 | |||
278 | if found == 1 and fixed_arch == fixed_cmp_arch: | ||
279 | break | ||
280 | #bb.note('%s, %s -> %s, %s' % (pkg, arch, new_pkg, new_arch)) | ||
281 | return new_pkg, new_arch | ||
282 | |||
283 | def _list_pkg_deps(self): | ||
284 | cmd = [bb.utils.which(os.getenv('PATH'), "rpmresolve"), | ||
285 | "-t", self.image_rpmlib] | ||
286 | |||
287 | try: | ||
288 | output = subprocess.check_output(cmd, stderr=subprocess.STDOUT).strip() | ||
289 | except subprocess.CalledProcessError as e: | ||
290 | bb.fatal("Cannot get the package dependencies. Command '%s' " | ||
291 | "returned %d:\n%s" % (' '.join(cmd), e.returncode, e.output)) | ||
292 | |||
293 | return output | ||
294 | |||
295 | def list(self, format=None): | ||
296 | if format == "deps": | ||
297 | if self.rpm_version == 4: | ||
298 | bb.fatal("'deps' format dependency listings are not supported with rpm 4 since rpmresolve does not work") | ||
299 | return self._list_pkg_deps() | ||
300 | |||
301 | cmd = self.rpm_cmd + ' --root ' + self.rootfs_dir | ||
302 | cmd += ' -D "_dbpath /var/lib/rpm" -qa' | ||
303 | if self.rpm_version == 4: | ||
304 | cmd += " --qf '[%{NAME} %{ARCH} %{VERSION}\n]'" | ||
305 | else: | ||
306 | cmd += " --qf '[%{NAME} %{ARCH} %{VERSION} %{PACKAGEORIGIN}\n]'" | ||
307 | |||
308 | try: | ||
309 | # bb.note(cmd) | ||
310 | tmp_output = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True).strip() | ||
311 | |||
312 | except subprocess.CalledProcessError as e: | ||
313 | bb.fatal("Cannot get the installed packages list. Command '%s' " | ||
314 | "returned %d:\n%s" % (cmd, e.returncode, e.output)) | ||
315 | |||
316 | output = list() | ||
317 | for line in tmp_output.split('\n'): | ||
318 | if len(line.strip()) == 0: | ||
319 | continue | ||
320 | pkg = line.split()[0] | ||
321 | arch = line.split()[1] | ||
322 | ver = line.split()[2] | ||
323 | if self.rpm_version == 4: | ||
324 | pkgorigin = "unknown" | ||
325 | else: | ||
326 | pkgorigin = line.split()[3] | ||
327 | new_pkg, new_arch = self._pkg_translate_smart_to_oe(pkg, arch) | ||
328 | |||
329 | if format == "arch": | ||
330 | output.append('%s %s' % (new_pkg, new_arch)) | ||
331 | elif format == "file": | ||
332 | output.append('%s %s %s' % (new_pkg, pkgorigin, new_arch)) | ||
333 | elif format == "ver": | ||
334 | output.append('%s %s %s' % (new_pkg, new_arch, ver)) | ||
335 | else: | ||
336 | output.append('%s' % (new_pkg)) | ||
337 | |||
338 | output.sort() | ||
339 | |||
340 | return '\n'.join(output) | ||
341 | |||
342 | |||
343 | class OpkgPkgsList(PkgsList): | ||
344 | def __init__(self, d, rootfs_dir, config_file): | ||
345 | super(OpkgPkgsList, self).__init__(d, rootfs_dir) | ||
346 | |||
347 | self.opkg_cmd = bb.utils.which(os.getenv('PATH'), "opkg-cl") | ||
348 | self.opkg_args = "-f %s -o %s " % (config_file, rootfs_dir) | ||
349 | self.opkg_args += self.d.getVar("OPKG_ARGS", True) | ||
350 | |||
351 | def list(self, format=None): | ||
352 | opkg_query_cmd = bb.utils.which(os.getenv('PATH'), "opkg-query-helper.py") | ||
353 | |||
354 | if format == "arch": | ||
355 | cmd = "%s %s status | %s -a" % \ | ||
356 | (self.opkg_cmd, self.opkg_args, opkg_query_cmd) | ||
357 | elif format == "file": | ||
358 | cmd = "%s %s status | %s -f" % \ | ||
359 | (self.opkg_cmd, self.opkg_args, opkg_query_cmd) | ||
360 | elif format == "ver": | ||
361 | cmd = "%s %s status | %s -v" % \ | ||
362 | (self.opkg_cmd, self.opkg_args, opkg_query_cmd) | ||
363 | elif format == "deps": | ||
364 | cmd = "%s %s status | %s" % \ | ||
365 | (self.opkg_cmd, self.opkg_args, opkg_query_cmd) | ||
366 | else: | ||
367 | cmd = "%s %s list_installed | cut -d' ' -f1" % \ | ||
368 | (self.opkg_cmd, self.opkg_args) | ||
369 | |||
370 | try: | ||
371 | output = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True).strip() | ||
372 | except subprocess.CalledProcessError as e: | ||
373 | bb.fatal("Cannot get the installed packages list. Command '%s' " | ||
374 | "returned %d:\n%s" % (cmd, e.returncode, e.output)) | ||
375 | |||
376 | if output and format == "file": | ||
377 | tmp_output = "" | ||
378 | for line in output.split('\n'): | ||
379 | pkg, pkg_file, pkg_arch = line.split() | ||
380 | full_path = os.path.join(self.rootfs_dir, pkg_arch, pkg_file) | ||
381 | if os.path.exists(full_path): | ||
382 | tmp_output += "%s %s %s\n" % (pkg, full_path, pkg_arch) | ||
383 | else: | ||
384 | tmp_output += "%s %s %s\n" % (pkg, pkg_file, pkg_arch) | ||
385 | |||
386 | output = tmp_output | ||
387 | |||
388 | return output | ||
389 | |||
390 | |||
391 | class DpkgPkgsList(PkgsList): | ||
392 | def list(self, format=None): | ||
393 | cmd = [bb.utils.which(os.getenv('PATH'), "dpkg-query"), | ||
394 | "--admindir=%s/var/lib/dpkg" % self.rootfs_dir, | ||
395 | "-W"] | ||
396 | |||
397 | if format == "arch": | ||
398 | cmd.append("-f=${Package} ${PackageArch}\n") | ||
399 | elif format == "file": | ||
400 | cmd.append("-f=${Package} ${Package}_${Version}_${Architecture}.deb ${PackageArch}\n") | ||
401 | elif format == "ver": | ||
402 | cmd.append("-f=${Package} ${PackageArch} ${Version}\n") | ||
403 | elif format == "deps": | ||
404 | cmd.append("-f=Package: ${Package}\nDepends: ${Depends}\nRecommends: ${Recommends}\n\n") | ||
405 | else: | ||
406 | cmd.append("-f=${Package}\n") | ||
407 | |||
408 | try: | ||
409 | output = subprocess.check_output(cmd, stderr=subprocess.STDOUT).strip() | ||
410 | except subprocess.CalledProcessError as e: | ||
411 | bb.fatal("Cannot get the installed packages list. Command '%s' " | ||
412 | "returned %d:\n%s" % (' '.join(cmd), e.returncode, e.output)) | ||
413 | |||
414 | if format == "file": | ||
415 | tmp_output = "" | ||
416 | for line in tuple(output.split('\n')): | ||
417 | pkg, pkg_file, pkg_arch = line.split() | ||
418 | full_path = os.path.join(self.rootfs_dir, pkg_arch, pkg_file) | ||
419 | if os.path.exists(full_path): | ||
420 | tmp_output += "%s %s %s\n" % (pkg, full_path, pkg_arch) | ||
421 | else: | ||
422 | tmp_output += "%s %s %s\n" % (pkg, pkg_file, pkg_arch) | ||
423 | |||
424 | output = tmp_output | ||
425 | elif format == "deps": | ||
426 | opkg_query_cmd = bb.utils.which(os.getenv('PATH'), "opkg-query-helper.py") | ||
427 | file_out = tempfile.NamedTemporaryFile() | ||
428 | file_out.write(output) | ||
429 | file_out.flush() | ||
430 | |||
431 | try: | ||
432 | output = subprocess.check_output("cat %s | %s" % | ||
433 | (file_out.name, opkg_query_cmd), | ||
434 | stderr=subprocess.STDOUT, | ||
435 | shell=True) | ||
436 | except subprocess.CalledProcessError as e: | ||
437 | file_out.close() | ||
438 | bb.fatal("Cannot compute packages dependencies. Command '%s' " | ||
439 | "returned %d:\n%s" % (e.cmd, e.returncode, e.output)) | ||
440 | |||
441 | file_out.close() | ||
442 | |||
443 | return output | ||
444 | |||
445 | |||
446 | class PackageManager(object): | ||
447 | """ | ||
448 | This is an abstract class. Do not instantiate this directly. | ||
449 | """ | ||
450 | __metaclass__ = ABCMeta | ||
451 | |||
452 | def __init__(self, d): | ||
453 | self.d = d | ||
454 | self.deploy_dir = None | ||
455 | self.deploy_lock = None | ||
456 | self.feed_uris = self.d.getVar('PACKAGE_FEED_URIS', True) or "" | ||
457 | |||
458 | """ | ||
459 | Update the package manager package database. | ||
460 | """ | ||
461 | @abstractmethod | ||
462 | def update(self): | ||
463 | pass | ||
464 | |||
465 | """ | ||
466 | Install a list of packages. 'pkgs' is a list object. If 'attempt_only' is | ||
467 | True, installation failures are ignored. | ||
468 | """ | ||
469 | @abstractmethod | ||
470 | def install(self, pkgs, attempt_only=False): | ||
471 | pass | ||
472 | |||
473 | """ | ||
474 | Remove a list of packages. 'pkgs' is a list object. If 'with_dependencies' | ||
475 | is False, the any dependencies are left in place. | ||
476 | """ | ||
477 | @abstractmethod | ||
478 | def remove(self, pkgs, with_dependencies=True): | ||
479 | pass | ||
480 | |||
481 | """ | ||
482 | This function creates the index files | ||
483 | """ | ||
484 | @abstractmethod | ||
485 | def write_index(self): | ||
486 | pass | ||
487 | |||
488 | @abstractmethod | ||
489 | def remove_packaging_data(self): | ||
490 | pass | ||
491 | |||
492 | @abstractmethod | ||
493 | def list_installed(self, format=None): | ||
494 | pass | ||
495 | |||
496 | @abstractmethod | ||
497 | def insert_feeds_uris(self): | ||
498 | pass | ||
499 | |||
500 | """ | ||
501 | Install complementary packages based upon the list of currently installed | ||
502 | packages e.g. locales, *-dev, *-dbg, etc. This will only attempt to install | ||
503 | these packages, if they don't exist then no error will occur. Note: every | ||
504 | backend needs to call this function explicitly after the normal package | ||
505 | installation | ||
506 | """ | ||
507 | def install_complementary(self, globs=None): | ||
508 | # we need to write the list of installed packages to a file because the | ||
509 | # oe-pkgdata-util reads it from a file | ||
510 | installed_pkgs_file = os.path.join(self.d.getVar('WORKDIR', True), | ||
511 | "installed_pkgs.txt") | ||
512 | with open(installed_pkgs_file, "w+") as installed_pkgs: | ||
513 | installed_pkgs.write(self.list_installed("arch")) | ||
514 | |||
515 | if globs is None: | ||
516 | globs = self.d.getVar('IMAGE_INSTALL_COMPLEMENTARY', True) | ||
517 | split_linguas = set() | ||
518 | |||
519 | for translation in self.d.getVar('IMAGE_LINGUAS', True).split(): | ||
520 | split_linguas.add(translation) | ||
521 | split_linguas.add(translation.split('-')[0]) | ||
522 | |||
523 | split_linguas = sorted(split_linguas) | ||
524 | |||
525 | for lang in split_linguas: | ||
526 | globs += " *-locale-%s" % lang | ||
527 | |||
528 | if globs is None: | ||
529 | return | ||
530 | |||
531 | cmd = [bb.utils.which(os.getenv('PATH'), "oe-pkgdata-util"), | ||
532 | "glob", self.d.getVar('PKGDATA_DIR', True), installed_pkgs_file, | ||
533 | globs] | ||
534 | exclude = self.d.getVar('PACKAGE_EXCLUDE_COMPLEMENTARY', True) | ||
535 | if exclude: | ||
536 | cmd.extend(['-x', exclude]) | ||
537 | try: | ||
538 | bb.note("Installing complementary packages ...") | ||
539 | complementary_pkgs = subprocess.check_output(cmd, stderr=subprocess.STDOUT) | ||
540 | except subprocess.CalledProcessError as e: | ||
541 | bb.fatal("Could not compute complementary packages list. Command " | ||
542 | "'%s' returned %d:\n%s" % | ||
543 | (' '.join(cmd), e.returncode, e.output)) | ||
544 | |||
545 | self.install(complementary_pkgs.split(), attempt_only=True) | ||
546 | |||
547 | def deploy_dir_lock(self): | ||
548 | if self.deploy_dir is None: | ||
549 | raise RuntimeError("deploy_dir is not set!") | ||
550 | |||
551 | lock_file_name = os.path.join(self.deploy_dir, "deploy.lock") | ||
552 | |||
553 | self.deploy_lock = bb.utils.lockfile(lock_file_name) | ||
554 | |||
555 | def deploy_dir_unlock(self): | ||
556 | if self.deploy_lock is None: | ||
557 | return | ||
558 | |||
559 | bb.utils.unlockfile(self.deploy_lock) | ||
560 | |||
561 | self.deploy_lock = None | ||
562 | |||
563 | |||
564 | class RpmPM(PackageManager): | ||
565 | def __init__(self, | ||
566 | d, | ||
567 | target_rootfs, | ||
568 | target_vendor, | ||
569 | task_name='target', | ||
570 | providename=None, | ||
571 | arch_var=None, | ||
572 | os_var=None): | ||
573 | super(RpmPM, self).__init__(d) | ||
574 | self.target_rootfs = target_rootfs | ||
575 | self.target_vendor = target_vendor | ||
576 | self.task_name = task_name | ||
577 | self.providename = providename | ||
578 | self.fullpkglist = list() | ||
579 | self.deploy_dir = self.d.getVar('DEPLOY_DIR_RPM', True) | ||
580 | self.etcrpm_dir = os.path.join(self.target_rootfs, "etc/rpm") | ||
581 | self.install_dir = os.path.join(self.target_rootfs, "install") | ||
582 | self.rpm_cmd = bb.utils.which(os.getenv('PATH'), "rpm") | ||
583 | self.smart_cmd = bb.utils.which(os.getenv('PATH'), "smart") | ||
584 | self.smart_opt = "--quiet --data-dir=" + os.path.join(target_rootfs, | ||
585 | 'var/lib/smart') | ||
586 | self.scriptlet_wrapper = self.d.expand('${WORKDIR}/scriptlet_wrapper') | ||
587 | self.solution_manifest = self.d.expand('${T}/saved/%s_solution' % | ||
588 | self.task_name) | ||
589 | self.saved_rpmlib = self.d.expand('${T}/saved/%s' % self.task_name) | ||
590 | self.image_rpmlib = os.path.join(self.target_rootfs, 'var/lib/rpm') | ||
591 | |||
592 | if not os.path.exists(self.d.expand('${T}/saved')): | ||
593 | bb.utils.mkdirhier(self.d.expand('${T}/saved')) | ||
594 | |||
595 | self.indexer = RpmIndexer(self.d, self.deploy_dir) | ||
596 | self.pkgs_list = RpmPkgsList(self.d, self.target_rootfs, arch_var, os_var) | ||
597 | self.rpm_version = self.pkgs_list.rpm_version | ||
598 | |||
599 | self.ml_prefix_list, self.ml_os_list = self.indexer.get_ml_prefix_and_os_list(arch_var, os_var) | ||
600 | |||
601 | def insert_feeds_uris(self): | ||
602 | if self.feed_uris == "": | ||
603 | return | ||
604 | |||
605 | # List must be prefered to least preferred order | ||
606 | default_platform_extra = set() | ||
607 | platform_extra = set() | ||
608 | bbextendvariant = self.d.getVar('BBEXTENDVARIANT', True) or "" | ||
609 | for mlib in self.ml_os_list: | ||
610 | for arch in self.ml_prefix_list[mlib]: | ||
611 | plt = arch.replace('-', '_') + '-.*-' + self.ml_os_list[mlib] | ||
612 | if mlib == bbextendvariant: | ||
613 | default_platform_extra.add(plt) | ||
614 | else: | ||
615 | platform_extra.add(plt) | ||
616 | |||
617 | platform_extra = platform_extra.union(default_platform_extra) | ||
618 | |||
619 | arch_list = [] | ||
620 | for canonical_arch in platform_extra: | ||
621 | arch = canonical_arch.split('-')[0] | ||
622 | if not os.path.exists(os.path.join(self.deploy_dir, arch)): | ||
623 | continue | ||
624 | arch_list.append(arch) | ||
625 | |||
626 | uri_iterator = 0 | ||
627 | channel_priority = 10 + 5 * len(self.feed_uris.split()) * len(arch_list) | ||
628 | |||
629 | for uri in self.feed_uris.split(): | ||
630 | for arch in arch_list: | ||
631 | bb.note('Note: adding Smart channel url%d%s (%s)' % | ||
632 | (uri_iterator, arch, channel_priority)) | ||
633 | self._invoke_smart('channel --add url%d-%s type=rpm-md baseurl=%s/rpm/%s -y' | ||
634 | % (uri_iterator, arch, uri, arch)) | ||
635 | self._invoke_smart('channel --set url%d-%s priority=%d' % | ||
636 | (uri_iterator, arch, channel_priority)) | ||
637 | channel_priority -= 5 | ||
638 | uri_iterator += 1 | ||
639 | |||
640 | ''' | ||
641 | Create configs for rpm and smart, and multilib is supported | ||
642 | ''' | ||
643 | def create_configs(self): | ||
644 | target_arch = self.d.getVar('TARGET_ARCH', True) | ||
645 | platform = '%s%s-%s' % (target_arch.replace('-', '_'), | ||
646 | self.target_vendor, | ||
647 | self.ml_os_list['default']) | ||
648 | |||
649 | # List must be prefered to least preferred order | ||
650 | default_platform_extra = list() | ||
651 | platform_extra = list() | ||
652 | bbextendvariant = self.d.getVar('BBEXTENDVARIANT', True) or "" | ||
653 | for mlib in self.ml_os_list: | ||
654 | for arch in self.ml_prefix_list[mlib]: | ||
655 | plt = arch.replace('-', '_') + '-.*-' + self.ml_os_list[mlib] | ||
656 | if mlib == bbextendvariant: | ||
657 | if plt not in default_platform_extra: | ||
658 | default_platform_extra.append(plt) | ||
659 | else: | ||
660 | if plt not in platform_extra: | ||
661 | platform_extra.append(plt) | ||
662 | platform_extra = default_platform_extra + platform_extra | ||
663 | |||
664 | self._create_configs(platform, platform_extra) | ||
665 | |||
666 | def _invoke_smart(self, args): | ||
667 | cmd = "%s %s %s" % (self.smart_cmd, self.smart_opt, args) | ||
668 | # bb.note(cmd) | ||
669 | try: | ||
670 | complementary_pkgs = subprocess.check_output(cmd, | ||
671 | stderr=subprocess.STDOUT, | ||
672 | shell=True) | ||
673 | # bb.note(complementary_pkgs) | ||
674 | return complementary_pkgs | ||
675 | except subprocess.CalledProcessError as e: | ||
676 | bb.fatal("Could not invoke smart. Command " | ||
677 | "'%s' returned %d:\n%s" % (cmd, e.returncode, e.output)) | ||
678 | |||
679 | def _search_pkg_name_in_feeds(self, pkg, feed_archs): | ||
680 | for arch in feed_archs: | ||
681 | arch = arch.replace('-', '_') | ||
682 | for p in self.fullpkglist: | ||
683 | regex_match = r"^%s-[^-]*-[^-]*@%s$" % \ | ||
684 | (re.escape(pkg), re.escape(arch)) | ||
685 | if re.match(regex_match, p) is not None: | ||
686 | # First found is best match | ||
687 | # bb.note('%s -> %s' % (pkg, pkg + '@' + arch)) | ||
688 | return pkg + '@' + arch | ||
689 | |||
690 | return "" | ||
691 | |||
692 | ''' | ||
693 | Translate the OE multilib format names to the RPM/Smart format names | ||
694 | It searched the RPM/Smart format names in probable multilib feeds first, | ||
695 | and then searched the default base feed. | ||
696 | ''' | ||
697 | def _pkg_translate_oe_to_smart(self, pkgs, attempt_only=False): | ||
698 | new_pkgs = list() | ||
699 | |||
700 | for pkg in pkgs: | ||
701 | new_pkg = pkg | ||
702 | # Search new_pkg in probable multilibs first | ||
703 | for mlib in self.ml_prefix_list: | ||
704 | # Jump the default archs | ||
705 | if mlib == 'default': | ||
706 | continue | ||
707 | |||
708 | subst = pkg.replace(mlib + '-', '') | ||
709 | # if the pkg in this multilib feed | ||
710 | if subst != pkg: | ||
711 | feed_archs = self.ml_prefix_list[mlib] | ||
712 | new_pkg = self._search_pkg_name_in_feeds(subst, feed_archs) | ||
713 | if not new_pkg: | ||
714 | # Failed to translate, package not found! | ||
715 | err_msg = '%s not found in the %s feeds (%s).\n' % \ | ||
716 | (pkg, mlib, " ".join(feed_archs)) | ||
717 | if not attempt_only: | ||
718 | err_msg += " ".join(self.fullpkglist) | ||
719 | bb.fatal(err_msg) | ||
720 | bb.warn(err_msg) | ||
721 | else: | ||
722 | new_pkgs.append(new_pkg) | ||
723 | |||
724 | break | ||
725 | |||
726 | # Apparently not a multilib package... | ||
727 | if pkg == new_pkg: | ||
728 | # Search new_pkg in default archs | ||
729 | default_archs = self.ml_prefix_list['default'] | ||
730 | new_pkg = self._search_pkg_name_in_feeds(pkg, default_archs) | ||
731 | if not new_pkg: | ||
732 | err_msg = '%s not found in the base feeds (%s).\n' % \ | ||
733 | (pkg, ' '.join(default_archs)) | ||
734 | if not attempt_only: | ||
735 | err_msg += " ".join(self.fullpkglist) | ||
736 | bb.fatal(err_msg) | ||
737 | bb.warn(err_msg) | ||
738 | else: | ||
739 | new_pkgs.append(new_pkg) | ||
740 | |||
741 | return new_pkgs | ||
742 | |||
743 | def _create_configs(self, platform, platform_extra): | ||
744 | # Setup base system configuration | ||
745 | bb.note("configuring RPM platform settings") | ||
746 | |||
747 | # Configure internal RPM environment when using Smart | ||
748 | os.environ['RPM_ETCRPM'] = self.etcrpm_dir | ||
749 | bb.utils.mkdirhier(self.etcrpm_dir) | ||
750 | |||
751 | # Setup temporary directory -- install... | ||
752 | if os.path.exists(self.install_dir): | ||
753 | bb.utils.remove(self.install_dir, True) | ||
754 | bb.utils.mkdirhier(os.path.join(self.install_dir, 'tmp')) | ||
755 | |||
756 | channel_priority = 5 | ||
757 | platform_dir = os.path.join(self.etcrpm_dir, "platform") | ||
758 | sdkos = self.d.getVar("SDK_OS", True) | ||
759 | with open(platform_dir, "w+") as platform_fd: | ||
760 | platform_fd.write(platform + '\n') | ||
761 | for pt in platform_extra: | ||
762 | channel_priority += 5 | ||
763 | if sdkos: | ||
764 | tmp = re.sub("-%s$" % sdkos, "-%s\n" % sdkos, pt) | ||
765 | tmp = re.sub("-linux.*$", "-linux.*\n", tmp) | ||
766 | platform_fd.write(tmp) | ||
767 | |||
768 | # Tell RPM that the "/" directory exist and is available | ||
769 | bb.note("configuring RPM system provides") | ||
770 | sysinfo_dir = os.path.join(self.etcrpm_dir, "sysinfo") | ||
771 | bb.utils.mkdirhier(sysinfo_dir) | ||
772 | with open(os.path.join(sysinfo_dir, "Dirnames"), "w+") as dirnames: | ||
773 | dirnames.write("/\n") | ||
774 | |||
775 | if self.providename: | ||
776 | providename_dir = os.path.join(sysinfo_dir, "Providename") | ||
777 | if not os.path.exists(providename_dir): | ||
778 | providename_content = '\n'.join(self.providename) | ||
779 | providename_content += '\n' | ||
780 | open(providename_dir, "w+").write(providename_content) | ||
781 | |||
782 | # Configure RPM... we enforce these settings! | ||
783 | bb.note("configuring RPM DB settings") | ||
784 | # After change the __db.* cache size, log file will not be | ||
785 | # generated automatically, that will raise some warnings, | ||
786 | # so touch a bare log for rpm write into it. | ||
787 | if self.rpm_version == 5: | ||
788 | rpmlib_log = os.path.join(self.image_rpmlib, 'log', 'log.0000000001') | ||
789 | if not os.path.exists(rpmlib_log): | ||
790 | bb.utils.mkdirhier(os.path.join(self.image_rpmlib, 'log')) | ||
791 | open(rpmlib_log, 'w+').close() | ||
792 | |||
793 | DB_CONFIG_CONTENT = "# ================ Environment\n" \ | ||
794 | "set_data_dir .\n" \ | ||
795 | "set_create_dir .\n" \ | ||
796 | "set_lg_dir ./log\n" \ | ||
797 | "set_tmp_dir ./tmp\n" \ | ||
798 | "set_flags db_log_autoremove on\n" \ | ||
799 | "\n" \ | ||
800 | "# -- thread_count must be >= 8\n" \ | ||
801 | "set_thread_count 64\n" \ | ||
802 | "\n" \ | ||
803 | "# ================ Logging\n" \ | ||
804 | "\n" \ | ||
805 | "# ================ Memory Pool\n" \ | ||
806 | "set_cachesize 0 1048576 0\n" \ | ||
807 | "set_mp_mmapsize 268435456\n" \ | ||
808 | "\n" \ | ||
809 | "# ================ Locking\n" \ | ||
810 | "set_lk_max_locks 16384\n" \ | ||
811 | "set_lk_max_lockers 16384\n" \ | ||
812 | "set_lk_max_objects 16384\n" \ | ||
813 | "mutex_set_max 163840\n" \ | ||
814 | "\n" \ | ||
815 | "# ================ Replication\n" | ||
816 | |||
817 | db_config_dir = os.path.join(self.image_rpmlib, 'DB_CONFIG') | ||
818 | if not os.path.exists(db_config_dir): | ||
819 | open(db_config_dir, 'w+').write(DB_CONFIG_CONTENT) | ||
820 | |||
821 | # Create database so that smart doesn't complain (lazy init) | ||
822 | opt = "-qa" | ||
823 | if self.rpm_version == 4: | ||
824 | opt = "--initdb" | ||
825 | cmd = "%s --root %s --dbpath /var/lib/rpm %s > /dev/null" % ( | ||
826 | self.rpm_cmd, self.target_rootfs, opt) | ||
827 | try: | ||
828 | subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True) | ||
829 | except subprocess.CalledProcessError as e: | ||
830 | bb.fatal("Create rpm database failed. Command '%s' " | ||
831 | "returned %d:\n%s" % (cmd, e.returncode, e.output)) | ||
832 | |||
833 | # Configure smart | ||
834 | bb.note("configuring Smart settings") | ||
835 | bb.utils.remove(os.path.join(self.target_rootfs, 'var/lib/smart'), | ||
836 | True) | ||
837 | self._invoke_smart('config --set rpm-root=%s' % self.target_rootfs) | ||
838 | self._invoke_smart('config --set rpm-dbpath=/var/lib/rpm') | ||
839 | self._invoke_smart('config --set rpm-extra-macros._var=%s' % | ||
840 | self.d.getVar('localstatedir', True)) | ||
841 | cmd = 'config --set rpm-extra-macros._tmppath=/install/tmp' | ||
842 | |||
843 | prefer_color = self.d.getVar('RPM_PREFER_ELF_ARCH', True) | ||
844 | if prefer_color: | ||
845 | if prefer_color not in ['0', '1', '2', '4']: | ||
846 | bb.fatal("Invalid RPM_PREFER_ELF_ARCH: %s, it should be one of:\n" | ||
847 | "\t1: ELF32 wins\n" | ||
848 | "\t2: ELF64 wins\n" | ||
849 | "\t4: ELF64 N32 wins (mips64 or mips64el only)" % | ||
850 | prefer_color) | ||
851 | if prefer_color == "4" and self.d.getVar("TUNE_ARCH", True) not in \ | ||
852 | ['mips64', 'mips64el']: | ||
853 | bb.fatal("RPM_PREFER_ELF_ARCH = \"4\" is for mips64 or mips64el " | ||
854 | "only.") | ||
855 | self._invoke_smart('config --set rpm-extra-macros._prefer_color=%s' | ||
856 | % prefer_color) | ||
857 | |||
858 | self._invoke_smart(cmd) | ||
859 | |||
860 | # Write common configuration for host and target usage | ||
861 | self._invoke_smart('config --set rpm-nolinktos=1') | ||
862 | self._invoke_smart('config --set rpm-noparentdirs=1') | ||
863 | check_signature = self.d.getVar('RPM_CHECK_SIGNATURES', True) | ||
864 | if check_signature and check_signature.strip() == "0": | ||
865 | self._invoke_smart('config --set rpm-check-signatures=false') | ||
866 | for i in self.d.getVar('BAD_RECOMMENDATIONS', True).split(): | ||
867 | self._invoke_smart('flag --set ignore-recommends %s' % i) | ||
868 | |||
869 | # Do the following configurations here, to avoid them being | ||
870 | # saved for field upgrade | ||
871 | if self.d.getVar('NO_RECOMMENDATIONS', True).strip() == "1": | ||
872 | self._invoke_smart('config --set ignore-all-recommends=1') | ||
873 | pkg_exclude = self.d.getVar('PACKAGE_EXCLUDE', True) or "" | ||
874 | for i in pkg_exclude.split(): | ||
875 | self._invoke_smart('flag --set exclude-packages %s' % i) | ||
876 | |||
877 | # Optional debugging | ||
878 | # self._invoke_smart('config --set rpm-log-level=debug') | ||
879 | # cmd = 'config --set rpm-log-file=/tmp/smart-debug-logfile' | ||
880 | # self._invoke_smart(cmd) | ||
881 | ch_already_added = [] | ||
882 | for canonical_arch in platform_extra: | ||
883 | arch = canonical_arch.split('-')[0] | ||
884 | arch_channel = os.path.join(self.deploy_dir, arch) | ||
885 | if os.path.exists(arch_channel) and not arch in ch_already_added: | ||
886 | bb.note('Note: adding Smart channel %s (%s)' % | ||
887 | (arch, channel_priority)) | ||
888 | self._invoke_smart('channel --add %s type=rpm-md baseurl=%s -y' | ||
889 | % (arch, arch_channel)) | ||
890 | self._invoke_smart('channel --set %s priority=%d' % | ||
891 | (arch, channel_priority)) | ||
892 | channel_priority -= 5 | ||
893 | |||
894 | ch_already_added.append(arch) | ||
895 | |||
896 | bb.note('adding Smart RPM DB channel') | ||
897 | self._invoke_smart('channel --add rpmsys type=rpm-sys -y') | ||
898 | |||
899 | # Construct install scriptlet wrapper. | ||
900 | # Scripts need to be ordered when executed, this ensures numeric order. | ||
901 | # If we ever run into needing more the 899 scripts, we'll have to. | ||
902 | # change num to start with 1000. | ||
903 | # | ||
904 | if self.rpm_version == 4: | ||
905 | scriptletcmd = "$2 $3 $4\n" | ||
906 | else: | ||
907 | scriptletcmd = "$2 $1/$3 $4\n" | ||
908 | |||
909 | SCRIPTLET_FORMAT = "#!/bin/bash\n" \ | ||
910 | "\n" \ | ||
911 | "export PATH=%s\n" \ | ||
912 | "export D=%s\n" \ | ||
913 | 'export OFFLINE_ROOT="$D"\n' \ | ||
914 | 'export IPKG_OFFLINE_ROOT="$D"\n' \ | ||
915 | 'export OPKG_OFFLINE_ROOT="$D"\n' \ | ||
916 | "export INTERCEPT_DIR=%s\n" \ | ||
917 | "export NATIVE_ROOT=%s\n" \ | ||
918 | "\n" \ | ||
919 | + scriptletcmd + \ | ||
920 | "if [ $? -ne 0 ]; then\n" \ | ||
921 | " if [ $4 -eq 1 ]; then\n" \ | ||
922 | " mkdir -p $1/etc/rpm-postinsts\n" \ | ||
923 | " num=100\n" \ | ||
924 | " while [ -e $1/etc/rpm-postinsts/${num}-* ]; do num=$((num + 1)); done\n" \ | ||
925 | " name=`head -1 $1/$3 | cut -d\' \' -f 2`\n" \ | ||
926 | ' echo "#!$2" > $1/etc/rpm-postinsts/${num}-${name}\n' \ | ||
927 | ' echo "# Arg: $4" >> $1/etc/rpm-postinsts/${num}-${name}\n' \ | ||
928 | " cat $1/$3 >> $1/etc/rpm-postinsts/${num}-${name}\n" \ | ||
929 | " chmod +x $1/etc/rpm-postinsts/${num}-${name}\n" \ | ||
930 | " else\n" \ | ||
931 | ' echo "Error: pre/post remove scriptlet failed"\n' \ | ||
932 | " fi\n" \ | ||
933 | "fi\n" | ||
934 | |||
935 | intercept_dir = self.d.expand('${WORKDIR}/intercept_scripts') | ||
936 | native_root = self.d.getVar('STAGING_DIR_NATIVE', True) | ||
937 | scriptlet_content = SCRIPTLET_FORMAT % (os.environ['PATH'], | ||
938 | self.target_rootfs, | ||
939 | intercept_dir, | ||
940 | native_root) | ||
941 | open(self.scriptlet_wrapper, 'w+').write(scriptlet_content) | ||
942 | |||
943 | bb.note("Note: configuring RPM cross-install scriptlet_wrapper") | ||
944 | os.chmod(self.scriptlet_wrapper, 0755) | ||
945 | cmd = 'config --set rpm-extra-macros._cross_scriptlet_wrapper=%s' % \ | ||
946 | self.scriptlet_wrapper | ||
947 | self._invoke_smart(cmd) | ||
948 | |||
949 | # Debug to show smart config info | ||
950 | # bb.note(self._invoke_smart('config --show')) | ||
951 | |||
952 | def update(self): | ||
953 | self._invoke_smart('update rpmsys') | ||
954 | |||
955 | ''' | ||
956 | Install pkgs with smart, the pkg name is oe format | ||
957 | ''' | ||
958 | def install(self, pkgs, attempt_only=False): | ||
959 | |||
960 | bb.note("Installing the following packages: %s" % ' '.join(pkgs)) | ||
961 | if attempt_only and len(pkgs) == 0: | ||
962 | return | ||
963 | pkgs = self._pkg_translate_oe_to_smart(pkgs, attempt_only) | ||
964 | |||
965 | if not attempt_only: | ||
966 | bb.note('to be installed: %s' % ' '.join(pkgs)) | ||
967 | cmd = "%s %s install -y %s" % \ | ||
968 | (self.smart_cmd, self.smart_opt, ' '.join(pkgs)) | ||
969 | bb.note(cmd) | ||
970 | else: | ||
971 | bb.note('installing attempt only packages...') | ||
972 | bb.note('Attempting %s' % ' '.join(pkgs)) | ||
973 | cmd = "%s %s install --attempt -y %s" % \ | ||
974 | (self.smart_cmd, self.smart_opt, ' '.join(pkgs)) | ||
975 | try: | ||
976 | output = subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT) | ||
977 | bb.note(output) | ||
978 | except subprocess.CalledProcessError as e: | ||
979 | bb.fatal("Unable to install packages. Command '%s' " | ||
980 | "returned %d:\n%s" % (cmd, e.returncode, e.output)) | ||
981 | |||
982 | ''' | ||
983 | Remove pkgs with smart, the pkg name is smart/rpm format | ||
984 | ''' | ||
985 | def remove(self, pkgs, with_dependencies=True): | ||
986 | bb.note('to be removed: ' + ' '.join(pkgs)) | ||
987 | |||
988 | if not with_dependencies: | ||
989 | cmd = "%s -e --nodeps " % self.rpm_cmd | ||
990 | cmd += "--root=%s " % self.target_rootfs | ||
991 | cmd += "--dbpath=/var/lib/rpm " | ||
992 | cmd += "--define='_cross_scriptlet_wrapper %s' " % \ | ||
993 | self.scriptlet_wrapper | ||
994 | cmd += "--define='_tmppath /install/tmp' %s" % ' '.join(pkgs) | ||
995 | else: | ||
996 | # for pkg in pkgs: | ||
997 | # bb.note('Debug: What required: %s' % pkg) | ||
998 | # bb.note(self._invoke_smart('query %s --show-requiredby' % pkg)) | ||
999 | |||
1000 | cmd = "%s %s remove -y %s" % (self.smart_cmd, | ||
1001 | self.smart_opt, | ||
1002 | ' '.join(pkgs)) | ||
1003 | |||
1004 | try: | ||
1005 | bb.note(cmd) | ||
1006 | output = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True) | ||
1007 | bb.note(output) | ||
1008 | except subprocess.CalledProcessError as e: | ||
1009 | bb.note("Unable to remove packages. Command '%s' " | ||
1010 | "returned %d:\n%s" % (cmd, e.returncode, e.output)) | ||
1011 | |||
1012 | def upgrade(self): | ||
1013 | bb.note('smart upgrade') | ||
1014 | self._invoke_smart('upgrade') | ||
1015 | |||
1016 | def write_index(self): | ||
1017 | result = self.indexer.write_index() | ||
1018 | |||
1019 | if result is not None: | ||
1020 | bb.fatal(result) | ||
1021 | |||
1022 | def remove_packaging_data(self): | ||
1023 | bb.utils.remove(self.image_rpmlib, True) | ||
1024 | bb.utils.remove(os.path.join(self.target_rootfs, 'var/lib/smart'), | ||
1025 | True) | ||
1026 | bb.utils.remove(os.path.join(self.target_rootfs, 'var/lib/opkg'), True) | ||
1027 | |||
1028 | # remove temp directory | ||
1029 | bb.utils.remove(self.d.expand('${IMAGE_ROOTFS}/install'), True) | ||
1030 | |||
1031 | def backup_packaging_data(self): | ||
1032 | # Save the rpmlib for increment rpm image generation | ||
1033 | if os.path.exists(self.saved_rpmlib): | ||
1034 | bb.utils.remove(self.saved_rpmlib, True) | ||
1035 | shutil.copytree(self.image_rpmlib, | ||
1036 | self.saved_rpmlib, | ||
1037 | symlinks=True) | ||
1038 | |||
1039 | def recovery_packaging_data(self): | ||
1040 | # Move the rpmlib back | ||
1041 | if os.path.exists(self.saved_rpmlib): | ||
1042 | if os.path.exists(self.image_rpmlib): | ||
1043 | bb.utils.remove(self.image_rpmlib, True) | ||
1044 | |||
1045 | bb.note('Recovery packaging data') | ||
1046 | shutil.copytree(self.saved_rpmlib, | ||
1047 | self.image_rpmlib, | ||
1048 | symlinks=True) | ||
1049 | |||
1050 | def list_installed(self, format=None): | ||
1051 | return self.pkgs_list.list(format) | ||
1052 | |||
1053 | ''' | ||
1054 | If incremental install, we need to determine what we've got, | ||
1055 | what we need to add, and what to remove... | ||
1056 | The dump_install_solution will dump and save the new install | ||
1057 | solution. | ||
1058 | ''' | ||
1059 | def dump_install_solution(self, pkgs): | ||
1060 | bb.note('creating new install solution for incremental install') | ||
1061 | if len(pkgs) == 0: | ||
1062 | return | ||
1063 | |||
1064 | pkgs = self._pkg_translate_oe_to_smart(pkgs, False) | ||
1065 | install_pkgs = list() | ||
1066 | |||
1067 | cmd = "%s %s install -y --dump %s 2>%s" % \ | ||
1068 | (self.smart_cmd, | ||
1069 | self.smart_opt, | ||
1070 | ' '.join(pkgs), | ||
1071 | self.solution_manifest) | ||
1072 | try: | ||
1073 | # Disable rpmsys channel for the fake install | ||
1074 | self._invoke_smart('channel --disable rpmsys') | ||
1075 | |||
1076 | subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True) | ||
1077 | with open(self.solution_manifest, 'r') as manifest: | ||
1078 | for pkg in manifest.read().split('\n'): | ||
1079 | if '@' in pkg: | ||
1080 | install_pkgs.append(pkg) | ||
1081 | except subprocess.CalledProcessError as e: | ||
1082 | bb.note("Unable to dump install packages. Command '%s' " | ||
1083 | "returned %d:\n%s" % (cmd, e.returncode, e.output)) | ||
1084 | # Recovery rpmsys channel | ||
1085 | self._invoke_smart('channel --enable rpmsys') | ||
1086 | return install_pkgs | ||
1087 | |||
1088 | ''' | ||
1089 | If incremental install, we need to determine what we've got, | ||
1090 | what we need to add, and what to remove... | ||
1091 | The load_old_install_solution will load the previous install | ||
1092 | solution | ||
1093 | ''' | ||
1094 | def load_old_install_solution(self): | ||
1095 | bb.note('load old install solution for incremental install') | ||
1096 | installed_pkgs = list() | ||
1097 | if not os.path.exists(self.solution_manifest): | ||
1098 | bb.note('old install solution not exist') | ||
1099 | return installed_pkgs | ||
1100 | |||
1101 | with open(self.solution_manifest, 'r') as manifest: | ||
1102 | for pkg in manifest.read().split('\n'): | ||
1103 | if '@' in pkg: | ||
1104 | installed_pkgs.append(pkg.strip()) | ||
1105 | |||
1106 | return installed_pkgs | ||
1107 | |||
1108 | ''' | ||
1109 | Dump all available packages in feeds, it should be invoked after the | ||
1110 | newest rpm index was created | ||
1111 | ''' | ||
1112 | def dump_all_available_pkgs(self): | ||
1113 | available_manifest = self.d.expand('${T}/saved/available_pkgs.txt') | ||
1114 | available_pkgs = list() | ||
1115 | cmd = "%s %s query --output %s" % \ | ||
1116 | (self.smart_cmd, self.smart_opt, available_manifest) | ||
1117 | try: | ||
1118 | subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True) | ||
1119 | with open(available_manifest, 'r') as manifest: | ||
1120 | for pkg in manifest.read().split('\n'): | ||
1121 | if '@' in pkg: | ||
1122 | available_pkgs.append(pkg.strip()) | ||
1123 | except subprocess.CalledProcessError as e: | ||
1124 | bb.note("Unable to list all available packages. Command '%s' " | ||
1125 | "returned %d:\n%s" % (cmd, e.returncode, e.output)) | ||
1126 | |||
1127 | self.fullpkglist = available_pkgs | ||
1128 | |||
1129 | return | ||
1130 | |||
1131 | def save_rpmpostinst(self, pkg): | ||
1132 | mlibs = (self.d.getVar('MULTILIB_GLOBAL_VARIANTS') or "").split() | ||
1133 | |||
1134 | new_pkg = pkg | ||
1135 | # Remove any multilib prefix from the package name | ||
1136 | for mlib in mlibs: | ||
1137 | if mlib in pkg: | ||
1138 | new_pkg = pkg.replace(mlib + '-', '') | ||
1139 | break | ||
1140 | |||
1141 | bb.note(' * postponing %s' % new_pkg) | ||
1142 | saved_dir = self.target_rootfs + self.d.expand('${sysconfdir}/rpm-postinsts/') + new_pkg | ||
1143 | |||
1144 | cmd = self.rpm_cmd + ' -q --scripts --root ' + self.target_rootfs | ||
1145 | cmd += ' --dbpath=/var/lib/rpm ' + new_pkg | ||
1146 | cmd += ' | sed -n -e "/^postinstall scriptlet (using .*):$/,/^.* scriptlet (using .*):$/ {/.*/p}"' | ||
1147 | cmd += ' | sed -e "/postinstall scriptlet (using \(.*\)):$/d"' | ||
1148 | cmd += ' -e "/^.* scriptlet (using .*):$/d" > %s' % saved_dir | ||
1149 | |||
1150 | try: | ||
1151 | bb.note(cmd) | ||
1152 | output = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True).strip() | ||
1153 | bb.note(output) | ||
1154 | os.chmod(saved_dir, 0755) | ||
1155 | except subprocess.CalledProcessError as e: | ||
1156 | bb.fatal("Invoke save_rpmpostinst failed. Command '%s' " | ||
1157 | "returned %d:\n%s" % (cmd, e.returncode, e.output)) | ||
1158 | |||
1159 | '''Write common configuration for target usage''' | ||
1160 | def rpm_setup_smart_target_config(self): | ||
1161 | bb.utils.remove(os.path.join(self.target_rootfs, 'var/lib/smart'), | ||
1162 | True) | ||
1163 | |||
1164 | self._invoke_smart('config --set rpm-nolinktos=1') | ||
1165 | self._invoke_smart('config --set rpm-noparentdirs=1') | ||
1166 | for i in self.d.getVar('BAD_RECOMMENDATIONS', True).split(): | ||
1167 | self._invoke_smart('flag --set ignore-recommends %s' % i) | ||
1168 | self._invoke_smart('channel --add rpmsys type=rpm-sys -y') | ||
1169 | |||
1170 | ''' | ||
1171 | The rpm db lock files were produced after invoking rpm to query on | ||
1172 | build system, and they caused the rpm on target didn't work, so we | ||
1173 | need to unlock the rpm db by removing the lock files. | ||
1174 | ''' | ||
1175 | def unlock_rpm_db(self): | ||
1176 | # Remove rpm db lock files | ||
1177 | rpm_db_locks = glob.glob('%s/var/lib/rpm/__db.*' % self.target_rootfs) | ||
1178 | for f in rpm_db_locks: | ||
1179 | bb.utils.remove(f, True) | ||
1180 | |||
1181 | |||
1182 | class OpkgPM(PackageManager): | ||
1183 | def __init__(self, d, target_rootfs, config_file, archs, task_name='target'): | ||
1184 | super(OpkgPM, self).__init__(d) | ||
1185 | |||
1186 | self.target_rootfs = target_rootfs | ||
1187 | self.config_file = config_file | ||
1188 | self.pkg_archs = archs | ||
1189 | self.task_name = task_name | ||
1190 | |||
1191 | self.deploy_dir = self.d.getVar("DEPLOY_DIR_IPK", True) | ||
1192 | self.deploy_lock_file = os.path.join(self.deploy_dir, "deploy.lock") | ||
1193 | self.opkg_cmd = bb.utils.which(os.getenv('PATH'), "opkg-cl") | ||
1194 | self.opkg_args = "-f %s -o %s " % (self.config_file, target_rootfs) | ||
1195 | self.opkg_args += self.d.getVar("OPKG_ARGS", True) | ||
1196 | |||
1197 | opkg_lib_dir = self.d.getVar('OPKGLIBDIR', True) | ||
1198 | if opkg_lib_dir[0] == "/": | ||
1199 | opkg_lib_dir = opkg_lib_dir[1:] | ||
1200 | |||
1201 | self.opkg_dir = os.path.join(target_rootfs, opkg_lib_dir, "opkg") | ||
1202 | |||
1203 | bb.utils.mkdirhier(self.opkg_dir) | ||
1204 | |||
1205 | self.saved_opkg_dir = self.d.expand('${T}/saved/%s' % self.task_name) | ||
1206 | if not os.path.exists(self.d.expand('${T}/saved')): | ||
1207 | bb.utils.mkdirhier(self.d.expand('${T}/saved')) | ||
1208 | |||
1209 | if (self.d.getVar('BUILD_IMAGES_FROM_FEEDS', True) or "") != "1": | ||
1210 | self._create_config() | ||
1211 | else: | ||
1212 | self._create_custom_config() | ||
1213 | |||
1214 | self.indexer = OpkgIndexer(self.d, self.deploy_dir) | ||
1215 | |||
1216 | """ | ||
1217 | This function will change a package's status in /var/lib/opkg/status file. | ||
1218 | If 'packages' is None then the new_status will be applied to all | ||
1219 | packages | ||
1220 | """ | ||
1221 | def mark_packages(self, status_tag, packages=None): | ||
1222 | status_file = os.path.join(self.opkg_dir, "status") | ||
1223 | |||
1224 | with open(status_file, "r") as sf: | ||
1225 | with open(status_file + ".tmp", "w+") as tmp_sf: | ||
1226 | if packages is None: | ||
1227 | tmp_sf.write(re.sub(r"Package: (.*?)\n((?:[^\n]+\n)*?)Status: (.*)(?:unpacked|installed)", | ||
1228 | r"Package: \1\n\2Status: \3%s" % status_tag, | ||
1229 | sf.read())) | ||
1230 | else: | ||
1231 | if type(packages).__name__ != "list": | ||
1232 | raise TypeError("'packages' should be a list object") | ||
1233 | |||
1234 | status = sf.read() | ||
1235 | for pkg in packages: | ||
1236 | status = re.sub(r"Package: %s\n((?:[^\n]+\n)*?)Status: (.*)(?:unpacked|installed)" % pkg, | ||
1237 | r"Package: %s\n\1Status: \2%s" % (pkg, status_tag), | ||
1238 | status) | ||
1239 | |||
1240 | tmp_sf.write(status) | ||
1241 | |||
1242 | os.rename(status_file + ".tmp", status_file) | ||
1243 | |||
1244 | def _create_custom_config(self): | ||
1245 | bb.note("Building from feeds activated!") | ||
1246 | |||
1247 | with open(self.config_file, "w+") as config_file: | ||
1248 | priority = 1 | ||
1249 | for arch in self.pkg_archs.split(): | ||
1250 | config_file.write("arch %s %d\n" % (arch, priority)) | ||
1251 | priority += 5 | ||
1252 | |||
1253 | for line in (self.d.getVar('IPK_FEED_URIS', True) or "").split(): | ||
1254 | feed_match = re.match("^[ \t]*(.*)##([^ \t]*)[ \t]*$", line) | ||
1255 | |||
1256 | if feed_match is not None: | ||
1257 | feed_name = feed_match.group(1) | ||
1258 | feed_uri = feed_match.group(2) | ||
1259 | |||
1260 | bb.note("Add %s feed with URL %s" % (feed_name, feed_uri)) | ||
1261 | |||
1262 | config_file.write("src/gz %s %s\n" % (feed_name, feed_uri)) | ||
1263 | |||
1264 | """ | ||
1265 | Allow to use package deploy directory contents as quick devel-testing | ||
1266 | feed. This creates individual feed configs for each arch subdir of those | ||
1267 | specified as compatible for the current machine. | ||
1268 | NOTE: Development-helper feature, NOT a full-fledged feed. | ||
1269 | """ | ||
1270 | if (self.d.getVar('FEED_DEPLOYDIR_BASE_URI', True) or "") != "": | ||
1271 | for arch in self.pkg_archs.split(): | ||
1272 | cfg_file_name = os.path.join(self.target_rootfs, | ||
1273 | self.d.getVar("sysconfdir", True), | ||
1274 | "opkg", | ||
1275 | "local-%s-feed.conf" % arch) | ||
1276 | |||
1277 | with open(cfg_file_name, "w+") as cfg_file: | ||
1278 | cfg_file.write("src/gz local-%s %s/%s" % | ||
1279 | (arch, | ||
1280 | self.d.getVar('FEED_DEPLOYDIR_BASE_URI', True), | ||
1281 | arch)) | ||
1282 | |||
1283 | def _create_config(self): | ||
1284 | with open(self.config_file, "w+") as config_file: | ||
1285 | priority = 1 | ||
1286 | for arch in self.pkg_archs.split(): | ||
1287 | config_file.write("arch %s %d\n" % (arch, priority)) | ||
1288 | priority += 5 | ||
1289 | |||
1290 | config_file.write("src oe file:%s\n" % self.deploy_dir) | ||
1291 | |||
1292 | for arch in self.pkg_archs.split(): | ||
1293 | pkgs_dir = os.path.join(self.deploy_dir, arch) | ||
1294 | if os.path.isdir(pkgs_dir): | ||
1295 | config_file.write("src oe-%s file:%s\n" % | ||
1296 | (arch, pkgs_dir)) | ||
1297 | |||
1298 | def insert_feeds_uris(self): | ||
1299 | if self.feed_uris == "": | ||
1300 | return | ||
1301 | |||
1302 | rootfs_config = os.path.join('%s/etc/opkg/base-feeds.conf' | ||
1303 | % self.target_rootfs) | ||
1304 | |||
1305 | with open(rootfs_config, "w+") as config_file: | ||
1306 | uri_iterator = 0 | ||
1307 | for uri in self.feed_uris.split(): | ||
1308 | config_file.write("src/gz url-%d %s/ipk\n" % | ||
1309 | (uri_iterator, uri)) | ||
1310 | |||
1311 | for arch in self.pkg_archs.split(): | ||
1312 | if not os.path.exists(os.path.join(self.deploy_dir, arch)): | ||
1313 | continue | ||
1314 | bb.note('Note: adding opkg channel url-%s-%d (%s)' % | ||
1315 | (arch, uri_iterator, uri)) | ||
1316 | |||
1317 | config_file.write("src/gz uri-%s-%d %s/ipk/%s\n" % | ||
1318 | (arch, uri_iterator, uri, arch)) | ||
1319 | uri_iterator += 1 | ||
1320 | |||
1321 | def update(self): | ||
1322 | self.deploy_dir_lock() | ||
1323 | |||
1324 | cmd = "%s %s update" % (self.opkg_cmd, self.opkg_args) | ||
1325 | |||
1326 | try: | ||
1327 | subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT) | ||
1328 | except subprocess.CalledProcessError as e: | ||
1329 | self.deploy_dir_unlock() | ||
1330 | bb.fatal("Unable to update the package index files. Command '%s' " | ||
1331 | "returned %d:\n%s" % (cmd, e.returncode, e.output)) | ||
1332 | |||
1333 | self.deploy_dir_unlock() | ||
1334 | |||
1335 | def install(self, pkgs, attempt_only=False): | ||
1336 | if attempt_only and len(pkgs) == 0: | ||
1337 | return | ||
1338 | |||
1339 | cmd = "%s %s install %s" % (self.opkg_cmd, self.opkg_args, ' '.join(pkgs)) | ||
1340 | |||
1341 | os.environ['D'] = self.target_rootfs | ||
1342 | os.environ['OFFLINE_ROOT'] = self.target_rootfs | ||
1343 | os.environ['IPKG_OFFLINE_ROOT'] = self.target_rootfs | ||
1344 | os.environ['OPKG_OFFLINE_ROOT'] = self.target_rootfs | ||
1345 | os.environ['INTERCEPT_DIR'] = os.path.join(self.d.getVar('WORKDIR', True), | ||
1346 | "intercept_scripts") | ||
1347 | os.environ['NATIVE_ROOT'] = self.d.getVar('STAGING_DIR_NATIVE', True) | ||
1348 | |||
1349 | try: | ||
1350 | bb.note("Installing the following packages: %s" % ' '.join(pkgs)) | ||
1351 | bb.note(cmd) | ||
1352 | output = subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT) | ||
1353 | bb.note(output) | ||
1354 | except subprocess.CalledProcessError as e: | ||
1355 | (bb.fatal, bb.note)[attempt_only]("Unable to install packages. " | ||
1356 | "Command '%s' returned %d:\n%s" % | ||
1357 | (cmd, e.returncode, e.output)) | ||
1358 | |||
1359 | def remove(self, pkgs, with_dependencies=True): | ||
1360 | if with_dependencies: | ||
1361 | cmd = "%s %s --force-depends --force-remove --force-removal-of-dependent-packages remove %s" % \ | ||
1362 | (self.opkg_cmd, self.opkg_args, ' '.join(pkgs)) | ||
1363 | else: | ||
1364 | cmd = "%s %s --force-depends remove %s" % \ | ||
1365 | (self.opkg_cmd, self.opkg_args, ' '.join(pkgs)) | ||
1366 | |||
1367 | try: | ||
1368 | bb.note(cmd) | ||
1369 | output = subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT) | ||
1370 | bb.note(output) | ||
1371 | except subprocess.CalledProcessError as e: | ||
1372 | bb.fatal("Unable to remove packages. Command '%s' " | ||
1373 | "returned %d:\n%s" % (e.cmd, e.returncode, e.output)) | ||
1374 | |||
1375 | def write_index(self): | ||
1376 | self.deploy_dir_lock() | ||
1377 | |||
1378 | result = self.indexer.write_index() | ||
1379 | |||
1380 | self.deploy_dir_unlock() | ||
1381 | |||
1382 | if result is not None: | ||
1383 | bb.fatal(result) | ||
1384 | |||
1385 | def remove_packaging_data(self): | ||
1386 | bb.utils.remove(self.opkg_dir, True) | ||
1387 | # create the directory back, it's needed by PM lock | ||
1388 | bb.utils.mkdirhier(self.opkg_dir) | ||
1389 | |||
1390 | def list_installed(self, format=None): | ||
1391 | return OpkgPkgsList(self.d, self.target_rootfs, self.config_file).list(format) | ||
1392 | |||
1393 | def handle_bad_recommendations(self): | ||
1394 | bad_recommendations = self.d.getVar("BAD_RECOMMENDATIONS", True) or "" | ||
1395 | if bad_recommendations.strip() == "": | ||
1396 | return | ||
1397 | |||
1398 | status_file = os.path.join(self.opkg_dir, "status") | ||
1399 | |||
1400 | # If status file existed, it means the bad recommendations has already | ||
1401 | # been handled | ||
1402 | if os.path.exists(status_file): | ||
1403 | return | ||
1404 | |||
1405 | cmd = "%s %s info " % (self.opkg_cmd, self.opkg_args) | ||
1406 | |||
1407 | with open(status_file, "w+") as status: | ||
1408 | for pkg in bad_recommendations.split(): | ||
1409 | pkg_info = cmd + pkg | ||
1410 | |||
1411 | try: | ||
1412 | output = subprocess.check_output(pkg_info.split(), stderr=subprocess.STDOUT).strip() | ||
1413 | except subprocess.CalledProcessError as e: | ||
1414 | bb.fatal("Cannot get package info. Command '%s' " | ||
1415 | "returned %d:\n%s" % (pkg_info, e.returncode, e.output)) | ||
1416 | |||
1417 | if output == "": | ||
1418 | bb.note("Ignored bad recommendation: '%s' is " | ||
1419 | "not a package" % pkg) | ||
1420 | continue | ||
1421 | |||
1422 | for line in output.split('\n'): | ||
1423 | if line.startswith("Status:"): | ||
1424 | status.write("Status: deinstall hold not-installed\n") | ||
1425 | else: | ||
1426 | status.write(line + "\n") | ||
1427 | |||
1428 | # Append a blank line after each package entry to ensure that it | ||
1429 | # is separated from the following entry | ||
1430 | status.write("\n") | ||
1431 | |||
1432 | ''' | ||
1433 | The following function dummy installs pkgs and returns the log of output. | ||
1434 | ''' | ||
1435 | def dummy_install(self, pkgs): | ||
1436 | if len(pkgs) == 0: | ||
1437 | return | ||
1438 | |||
1439 | # Create an temp dir as opkg root for dummy installation | ||
1440 | temp_rootfs = self.d.expand('${T}/opkg') | ||
1441 | temp_opkg_dir = os.path.join(temp_rootfs, 'var/lib/opkg') | ||
1442 | bb.utils.mkdirhier(temp_opkg_dir) | ||
1443 | |||
1444 | opkg_args = "-f %s -o %s " % (self.config_file, temp_rootfs) | ||
1445 | opkg_args += self.d.getVar("OPKG_ARGS", True) | ||
1446 | |||
1447 | cmd = "%s %s update" % (self.opkg_cmd, opkg_args) | ||
1448 | try: | ||
1449 | subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True) | ||
1450 | except subprocess.CalledProcessError as e: | ||
1451 | bb.fatal("Unable to update. Command '%s' " | ||
1452 | "returned %d:\n%s" % (cmd, e.returncode, e.output)) | ||
1453 | |||
1454 | # Dummy installation | ||
1455 | cmd = "%s %s --noaction install %s " % (self.opkg_cmd, | ||
1456 | opkg_args, | ||
1457 | ' '.join(pkgs)) | ||
1458 | try: | ||
1459 | output = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True) | ||
1460 | except subprocess.CalledProcessError as e: | ||
1461 | bb.fatal("Unable to dummy install packages. Command '%s' " | ||
1462 | "returned %d:\n%s" % (cmd, e.returncode, e.output)) | ||
1463 | |||
1464 | bb.utils.remove(temp_rootfs, True) | ||
1465 | |||
1466 | return output | ||
1467 | |||
1468 | def backup_packaging_data(self): | ||
1469 | # Save the opkglib for increment ipk image generation | ||
1470 | if os.path.exists(self.saved_opkg_dir): | ||
1471 | bb.utils.remove(self.saved_opkg_dir, True) | ||
1472 | shutil.copytree(self.opkg_dir, | ||
1473 | self.saved_opkg_dir, | ||
1474 | symlinks=True) | ||
1475 | |||
1476 | def recover_packaging_data(self): | ||
1477 | # Move the opkglib back | ||
1478 | if os.path.exists(self.saved_opkg_dir): | ||
1479 | if os.path.exists(self.opkg_dir): | ||
1480 | bb.utils.remove(self.opkg_dir, True) | ||
1481 | |||
1482 | bb.note('Recover packaging data') | ||
1483 | shutil.copytree(self.saved_opkg_dir, | ||
1484 | self.opkg_dir, | ||
1485 | symlinks=True) | ||
1486 | |||
1487 | |||
1488 | class DpkgPM(PackageManager): | ||
1489 | def __init__(self, d, target_rootfs, archs, base_archs, apt_conf_dir=None): | ||
1490 | super(DpkgPM, self).__init__(d) | ||
1491 | self.target_rootfs = target_rootfs | ||
1492 | self.deploy_dir = self.d.getVar('DEPLOY_DIR_DEB', True) | ||
1493 | if apt_conf_dir is None: | ||
1494 | self.apt_conf_dir = self.d.expand("${APTCONF_TARGET}/apt") | ||
1495 | else: | ||
1496 | self.apt_conf_dir = apt_conf_dir | ||
1497 | self.apt_conf_file = os.path.join(self.apt_conf_dir, "apt.conf") | ||
1498 | self.apt_get_cmd = bb.utils.which(os.getenv('PATH'), "apt-get") | ||
1499 | |||
1500 | self.apt_args = d.getVar("APT_ARGS", True) | ||
1501 | |||
1502 | self.all_arch_list = archs.split() | ||
1503 | all_mlb_pkg_arch_list = (self.d.getVar('ALL_MULTILIB_PACKAGE_ARCHS', True) or "").replace('-', '_').split() | ||
1504 | self.all_arch_list.extend(arch for arch in all_mlb_pkg_arch_list if arch not in self.all_arch_list) | ||
1505 | |||
1506 | self._create_configs(archs, base_archs) | ||
1507 | |||
1508 | self.indexer = DpkgIndexer(self.d, self.deploy_dir) | ||
1509 | |||
1510 | """ | ||
1511 | This function will change a package's status in /var/lib/dpkg/status file. | ||
1512 | If 'packages' is None then the new_status will be applied to all | ||
1513 | packages | ||
1514 | """ | ||
1515 | def mark_packages(self, status_tag, packages=None): | ||
1516 | status_file = self.target_rootfs + "/var/lib/dpkg/status" | ||
1517 | |||
1518 | with open(status_file, "r") as sf: | ||
1519 | with open(status_file + ".tmp", "w+") as tmp_sf: | ||
1520 | if packages is None: | ||
1521 | tmp_sf.write(re.sub(r"Package: (.*?)\n((?:[^\n]+\n)*?)Status: (.*)(?:unpacked|installed)", | ||
1522 | r"Package: \1\n\2Status: \3%s" % status_tag, | ||
1523 | sf.read())) | ||
1524 | else: | ||
1525 | if type(packages).__name__ != "list": | ||
1526 | raise TypeError("'packages' should be a list object") | ||
1527 | |||
1528 | status = sf.read() | ||
1529 | for pkg in packages: | ||
1530 | status = re.sub(r"Package: %s\n((?:[^\n]+\n)*?)Status: (.*)(?:unpacked|installed)" % pkg, | ||
1531 | r"Package: %s\n\1Status: \2%s" % (pkg, status_tag), | ||
1532 | status) | ||
1533 | |||
1534 | tmp_sf.write(status) | ||
1535 | |||
1536 | os.rename(status_file + ".tmp", status_file) | ||
1537 | |||
1538 | """ | ||
1539 | Run the pre/post installs for package "package_name". If package_name is | ||
1540 | None, then run all pre/post install scriptlets. | ||
1541 | """ | ||
1542 | def run_pre_post_installs(self, package_name=None): | ||
1543 | info_dir = self.target_rootfs + "/var/lib/dpkg/info" | ||
1544 | suffixes = [(".preinst", "Preinstall"), (".postinst", "Postinstall")] | ||
1545 | status_file = self.target_rootfs + "/var/lib/dpkg/status" | ||
1546 | installed_pkgs = [] | ||
1547 | |||
1548 | with open(status_file, "r") as status: | ||
1549 | for line in status.read().split('\n'): | ||
1550 | m = re.match("^Package: (.*)", line) | ||
1551 | if m is not None: | ||
1552 | installed_pkgs.append(m.group(1)) | ||
1553 | |||
1554 | if package_name is not None and not package_name in installed_pkgs: | ||
1555 | return | ||
1556 | |||
1557 | os.environ['D'] = self.target_rootfs | ||
1558 | os.environ['OFFLINE_ROOT'] = self.target_rootfs | ||
1559 | os.environ['IPKG_OFFLINE_ROOT'] = self.target_rootfs | ||
1560 | os.environ['OPKG_OFFLINE_ROOT'] = self.target_rootfs | ||
1561 | os.environ['INTERCEPT_DIR'] = os.path.join(self.d.getVar('WORKDIR', True), | ||
1562 | "intercept_scripts") | ||
1563 | os.environ['NATIVE_ROOT'] = self.d.getVar('STAGING_DIR_NATIVE', True) | ||
1564 | |||
1565 | failed_pkgs = [] | ||
1566 | for pkg_name in installed_pkgs: | ||
1567 | for suffix in suffixes: | ||
1568 | p_full = os.path.join(info_dir, pkg_name + suffix[0]) | ||
1569 | if os.path.exists(p_full): | ||
1570 | try: | ||
1571 | bb.note("Executing %s for package: %s ..." % | ||
1572 | (suffix[1].lower(), pkg_name)) | ||
1573 | subprocess.check_output(p_full, stderr=subprocess.STDOUT) | ||
1574 | except subprocess.CalledProcessError as e: | ||
1575 | bb.note("%s for package %s failed with %d:\n%s" % | ||
1576 | (suffix[1], pkg_name, e.returncode, e.output)) | ||
1577 | failed_pkgs.append(pkg_name) | ||
1578 | break | ||
1579 | |||
1580 | if len(failed_pkgs): | ||
1581 | self.mark_packages("unpacked", failed_pkgs) | ||
1582 | |||
1583 | def update(self): | ||
1584 | os.environ['APT_CONFIG'] = self.apt_conf_file | ||
1585 | |||
1586 | self.deploy_dir_lock() | ||
1587 | |||
1588 | cmd = "%s update" % self.apt_get_cmd | ||
1589 | |||
1590 | try: | ||
1591 | subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT) | ||
1592 | except subprocess.CalledProcessError as e: | ||
1593 | bb.fatal("Unable to update the package index files. Command '%s' " | ||
1594 | "returned %d:\n%s" % (e.cmd, e.returncode, e.output)) | ||
1595 | |||
1596 | self.deploy_dir_unlock() | ||
1597 | |||
1598 | def install(self, pkgs, attempt_only=False): | ||
1599 | if attempt_only and len(pkgs) == 0: | ||
1600 | return | ||
1601 | |||
1602 | os.environ['APT_CONFIG'] = self.apt_conf_file | ||
1603 | |||
1604 | cmd = "%s %s install --force-yes --allow-unauthenticated %s" % \ | ||
1605 | (self.apt_get_cmd, self.apt_args, ' '.join(pkgs)) | ||
1606 | |||
1607 | try: | ||
1608 | bb.note("Installing the following packages: %s" % ' '.join(pkgs)) | ||
1609 | subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT) | ||
1610 | except subprocess.CalledProcessError as e: | ||
1611 | (bb.fatal, bb.note)[attempt_only]("Unable to install packages. " | ||
1612 | "Command '%s' returned %d:\n%s" % | ||
1613 | (cmd, e.returncode, e.output)) | ||
1614 | |||
1615 | # rename *.dpkg-new files/dirs | ||
1616 | for root, dirs, files in os.walk(self.target_rootfs): | ||
1617 | for dir in dirs: | ||
1618 | new_dir = re.sub("\.dpkg-new", "", dir) | ||
1619 | if dir != new_dir: | ||
1620 | os.rename(os.path.join(root, dir), | ||
1621 | os.path.join(root, new_dir)) | ||
1622 | |||
1623 | for file in files: | ||
1624 | new_file = re.sub("\.dpkg-new", "", file) | ||
1625 | if file != new_file: | ||
1626 | os.rename(os.path.join(root, file), | ||
1627 | os.path.join(root, new_file)) | ||
1628 | |||
1629 | |||
1630 | def remove(self, pkgs, with_dependencies=True): | ||
1631 | if with_dependencies: | ||
1632 | os.environ['APT_CONFIG'] = self.apt_conf_file | ||
1633 | cmd = "%s remove %s" % (self.apt_get_cmd, ' '.join(pkgs)) | ||
1634 | else: | ||
1635 | cmd = "%s --admindir=%s/var/lib/dpkg --instdir=%s" \ | ||
1636 | " -r --force-depends %s" % \ | ||
1637 | (bb.utils.which(os.getenv('PATH'), "dpkg"), | ||
1638 | self.target_rootfs, self.target_rootfs, ' '.join(pkgs)) | ||
1639 | |||
1640 | try: | ||
1641 | subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT) | ||
1642 | except subprocess.CalledProcessError as e: | ||
1643 | bb.fatal("Unable to remove packages. Command '%s' " | ||
1644 | "returned %d:\n%s" % (e.cmd, e.returncode, e.output)) | ||
1645 | |||
1646 | def write_index(self): | ||
1647 | self.deploy_dir_lock() | ||
1648 | |||
1649 | result = self.indexer.write_index() | ||
1650 | |||
1651 | self.deploy_dir_unlock() | ||
1652 | |||
1653 | if result is not None: | ||
1654 | bb.fatal(result) | ||
1655 | |||
1656 | def insert_feeds_uris(self): | ||
1657 | if self.feed_uris == "": | ||
1658 | return | ||
1659 | |||
1660 | sources_conf = os.path.join("%s/etc/apt/sources.list" | ||
1661 | % self.target_rootfs) | ||
1662 | arch_list = [] | ||
1663 | |||
1664 | for arch in self.all_arch_list: | ||
1665 | if not os.path.exists(os.path.join(self.deploy_dir, arch)): | ||
1666 | continue | ||
1667 | arch_list.append(arch) | ||
1668 | |||
1669 | with open(sources_conf, "w+") as sources_file: | ||
1670 | for uri in self.feed_uris.split(): | ||
1671 | for arch in arch_list: | ||
1672 | bb.note('Note: adding dpkg channel at (%s)' % uri) | ||
1673 | sources_file.write("deb %s/deb/%s ./\n" % | ||
1674 | (uri, arch)) | ||
1675 | |||
1676 | def _create_configs(self, archs, base_archs): | ||
1677 | base_archs = re.sub("_", "-", base_archs) | ||
1678 | |||
1679 | if os.path.exists(self.apt_conf_dir): | ||
1680 | bb.utils.remove(self.apt_conf_dir, True) | ||
1681 | |||
1682 | bb.utils.mkdirhier(self.apt_conf_dir) | ||
1683 | bb.utils.mkdirhier(self.apt_conf_dir + "/lists/partial/") | ||
1684 | bb.utils.mkdirhier(self.apt_conf_dir + "/apt.conf.d/") | ||
1685 | |||
1686 | arch_list = [] | ||
1687 | for arch in self.all_arch_list: | ||
1688 | if not os.path.exists(os.path.join(self.deploy_dir, arch)): | ||
1689 | continue | ||
1690 | arch_list.append(arch) | ||
1691 | |||
1692 | with open(os.path.join(self.apt_conf_dir, "preferences"), "w+") as prefs_file: | ||
1693 | priority = 801 | ||
1694 | for arch in arch_list: | ||
1695 | prefs_file.write( | ||
1696 | "Package: *\n" | ||
1697 | "Pin: release l=%s\n" | ||
1698 | "Pin-Priority: %d\n\n" % (arch, priority)) | ||
1699 | |||
1700 | priority += 5 | ||
1701 | |||
1702 | pkg_exclude = self.d.getVar('PACKAGE_EXCLUDE', True) or "" | ||
1703 | for pkg in pkg_exclude.split(): | ||
1704 | prefs_file.write( | ||
1705 | "Package: %s\n" | ||
1706 | "Pin: release *\n" | ||
1707 | "Pin-Priority: -1\n\n" % pkg) | ||
1708 | |||
1709 | arch_list.reverse() | ||
1710 | |||
1711 | with open(os.path.join(self.apt_conf_dir, "sources.list"), "w+") as sources_file: | ||
1712 | for arch in arch_list: | ||
1713 | sources_file.write("deb file:%s/ ./\n" % | ||
1714 | os.path.join(self.deploy_dir, arch)) | ||
1715 | |||
1716 | base_arch_list = base_archs.split() | ||
1717 | multilib_variants = self.d.getVar("MULTILIB_VARIANTS", True); | ||
1718 | for variant in multilib_variants.split(): | ||
1719 | if variant == "lib32": | ||
1720 | base_arch_list.append("i386") | ||
1721 | elif variant == "lib64": | ||
1722 | base_arch_list.append("amd64") | ||
1723 | |||
1724 | with open(self.apt_conf_file, "w+") as apt_conf: | ||
1725 | with open(self.d.expand("${STAGING_ETCDIR_NATIVE}/apt/apt.conf.sample")) as apt_conf_sample: | ||
1726 | for line in apt_conf_sample.read().split("\n"): | ||
1727 | match_arch = re.match(" Architecture \".*\";$", line) | ||
1728 | architectures = "" | ||
1729 | if match_arch: | ||
1730 | for base_arch in base_arch_list: | ||
1731 | architectures += "\"%s\";" % base_arch | ||
1732 | apt_conf.write(" Architectures {%s};\n" % architectures); | ||
1733 | apt_conf.write(" Architecture \"%s\";\n" % base_archs) | ||
1734 | else: | ||
1735 | line = re.sub("#ROOTFS#", self.target_rootfs, line) | ||
1736 | line = re.sub("#APTCONF#", self.apt_conf_dir, line) | ||
1737 | apt_conf.write(line + "\n") | ||
1738 | |||
1739 | target_dpkg_dir = "%s/var/lib/dpkg" % self.target_rootfs | ||
1740 | bb.utils.mkdirhier(os.path.join(target_dpkg_dir, "info")) | ||
1741 | |||
1742 | bb.utils.mkdirhier(os.path.join(target_dpkg_dir, "updates")) | ||
1743 | |||
1744 | if not os.path.exists(os.path.join(target_dpkg_dir, "status")): | ||
1745 | open(os.path.join(target_dpkg_dir, "status"), "w+").close() | ||
1746 | if not os.path.exists(os.path.join(target_dpkg_dir, "available")): | ||
1747 | open(os.path.join(target_dpkg_dir, "available"), "w+").close() | ||
1748 | |||
1749 | def remove_packaging_data(self): | ||
1750 | bb.utils.remove(os.path.join(self.target_rootfs, | ||
1751 | self.d.getVar('opkglibdir', True)), True) | ||
1752 | bb.utils.remove(self.target_rootfs + "/var/lib/dpkg/", True) | ||
1753 | |||
1754 | def fix_broken_dependencies(self): | ||
1755 | os.environ['APT_CONFIG'] = self.apt_conf_file | ||
1756 | |||
1757 | cmd = "%s %s -f install" % (self.apt_get_cmd, self.apt_args) | ||
1758 | |||
1759 | try: | ||
1760 | subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT) | ||
1761 | except subprocess.CalledProcessError as e: | ||
1762 | bb.fatal("Cannot fix broken dependencies. Command '%s' " | ||
1763 | "returned %d:\n%s" % (cmd, e.returncode, e.output)) | ||
1764 | |||
1765 | def list_installed(self, format=None): | ||
1766 | return DpkgPkgsList(self.d, self.target_rootfs).list() | ||
1767 | |||
1768 | |||
1769 | def generate_index_files(d): | ||
1770 | classes = d.getVar('PACKAGE_CLASSES', True).replace("package_", "").split() | ||
1771 | |||
1772 | indexer_map = { | ||
1773 | "rpm": (RpmIndexer, d.getVar('DEPLOY_DIR_RPM', True)), | ||
1774 | "ipk": (OpkgIndexer, d.getVar('DEPLOY_DIR_IPK', True)), | ||
1775 | "deb": (DpkgIndexer, d.getVar('DEPLOY_DIR_DEB', True)) | ||
1776 | } | ||
1777 | |||
1778 | result = None | ||
1779 | |||
1780 | for pkg_class in classes: | ||
1781 | if not pkg_class in indexer_map: | ||
1782 | continue | ||
1783 | |||
1784 | if os.path.exists(indexer_map[pkg_class][1]): | ||
1785 | result = indexer_map[pkg_class][0](d, indexer_map[pkg_class][1]).write_index() | ||
1786 | |||
1787 | if result is not None: | ||
1788 | bb.fatal(result) | ||
1789 | |||
1790 | if __name__ == "__main__": | ||
1791 | """ | ||
1792 | We should be able to run this as a standalone script, from outside bitbake | ||
1793 | environment. | ||
1794 | """ | ||
1795 | """ | ||
1796 | TBD | ||
1797 | """ | ||