diff options
| author | Richard Purdie <richard.purdie@linuxfoundation.org> | 2022-08-10 14:35:29 +0100 |
|---|---|---|
| committer | Richard Purdie <richard.purdie@linuxfoundation.org> | 2022-08-12 15:27:17 +0100 |
| commit | fd1517e2b51a170f2427122c6b95396db251d827 (patch) | |
| tree | dabfe3e631339c2fc99a9ee7febb0f9c128e325e /meta/classes/image.bbclass | |
| parent | 10317912ee319ccf7f83605d438b5cbf9663f296 (diff) | |
| download | poky-fd1517e2b51a170f2427122c6b95396db251d827.tar.gz | |
classes: Update classes to match new bitbake class scope functionality
Move classes to classes-global or classes-recipe as appropriate to take
advantage of new bitbake functionality to check class scope/usage.
(From OE-Core rev: f5c128008365e141082c129417eb72d2751e8045)
Signed-off-by: Richard Purdie <richard.purdie@linuxfoundation.org>
Diffstat (limited to 'meta/classes/image.bbclass')
| -rw-r--r-- | meta/classes/image.bbclass | 684 |
1 files changed, 0 insertions, 684 deletions
diff --git a/meta/classes/image.bbclass b/meta/classes/image.bbclass deleted file mode 100644 index 433172378a..0000000000 --- a/meta/classes/image.bbclass +++ /dev/null | |||
| @@ -1,684 +0,0 @@ | |||
| 1 | # | ||
| 2 | # Copyright OpenEmbedded Contributors | ||
| 3 | # | ||
| 4 | # SPDX-License-Identifier: MIT | ||
| 5 | # | ||
| 6 | |||
| 7 | IMAGE_CLASSES ??= "" | ||
| 8 | |||
| 9 | # rootfs bootstrap install | ||
| 10 | # warning - image-container resets this | ||
| 11 | ROOTFS_BOOTSTRAP_INSTALL = "run-postinsts" | ||
| 12 | |||
| 13 | # Handle inherits of any of the image classes we need | ||
| 14 | IMGCLASSES = "rootfs_${IMAGE_PKGTYPE} image_types ${IMAGE_CLASSES}" | ||
| 15 | # Only Linux SDKs support populate_sdk_ext, fall back to populate_sdk_base | ||
| 16 | # in the non-Linux SDK_OS case, such as mingw32 | ||
| 17 | IMGCLASSES += "${@['populate_sdk_base', 'populate_sdk_ext']['linux' in d.getVar("SDK_OS")]}" | ||
| 18 | IMGCLASSES += "${@bb.utils.contains_any('IMAGE_FSTYPES', 'live iso hddimg', 'image-live', '', d)}" | ||
| 19 | IMGCLASSES += "${@bb.utils.contains('IMAGE_FSTYPES', 'container', 'image-container', '', d)}" | ||
| 20 | IMGCLASSES += "image_types_wic" | ||
| 21 | IMGCLASSES += "rootfs-postcommands" | ||
| 22 | IMGCLASSES += "image-postinst-intercepts" | ||
| 23 | IMGCLASSES += "overlayfs-etc" | ||
| 24 | inherit ${IMGCLASSES} | ||
| 25 | |||
| 26 | TOOLCHAIN_TARGET_TASK += "${PACKAGE_INSTALL}" | ||
| 27 | TOOLCHAIN_TARGET_TASK_ATTEMPTONLY += "${PACKAGE_INSTALL_ATTEMPTONLY}" | ||
| 28 | POPULATE_SDK_POST_TARGET_COMMAND += "rootfs_sysroot_relativelinks; " | ||
| 29 | |||
| 30 | LICENSE ?= "MIT" | ||
| 31 | PACKAGES = "" | ||
| 32 | DEPENDS += "${@' '.join(["%s-qemuwrapper-cross" % m for m in d.getVar("MULTILIB_VARIANTS").split()])} qemuwrapper-cross depmodwrapper-cross cross-localedef-native" | ||
| 33 | RDEPENDS += "${PACKAGE_INSTALL} ${LINGUAS_INSTALL} ${IMAGE_INSTALL_DEBUGFS}" | ||
| 34 | RRECOMMENDS += "${PACKAGE_INSTALL_ATTEMPTONLY}" | ||
| 35 | PATH:prepend = "${@":".join(all_multilib_tune_values(d, 'STAGING_BINDIR_CROSS').split())}:" | ||
| 36 | |||
| 37 | INHIBIT_DEFAULT_DEPS = "1" | ||
| 38 | |||
| 39 | # IMAGE_FEATURES may contain any available package group | ||
| 40 | IMAGE_FEATURES ?= "" | ||
| 41 | IMAGE_FEATURES[type] = "list" | ||
| 42 | IMAGE_FEATURES[validitems] += "debug-tweaks read-only-rootfs read-only-rootfs-delayed-postinsts stateless-rootfs empty-root-password allow-empty-password allow-root-login post-install-logging overlayfs-etc" | ||
| 43 | |||
| 44 | # Generate companion debugfs? | ||
| 45 | IMAGE_GEN_DEBUGFS ?= "0" | ||
| 46 | |||
| 47 | # These packages will be installed as additional into debug rootfs | ||
| 48 | IMAGE_INSTALL_DEBUGFS ?= "" | ||
| 49 | |||
| 50 | # These packages will be removed from a read-only rootfs after all other | ||
| 51 | # packages have been installed | ||
| 52 | ROOTFS_RO_UNNEEDED ??= "update-rc.d base-passwd shadow ${VIRTUAL-RUNTIME_update-alternatives} ${ROOTFS_BOOTSTRAP_INSTALL}" | ||
| 53 | |||
| 54 | # packages to install from features | ||
| 55 | FEATURE_INSTALL = "${@' '.join(oe.packagegroup.required_packages(oe.data.typed_value('IMAGE_FEATURES', d), d))}" | ||
| 56 | FEATURE_INSTALL[vardepvalue] = "${FEATURE_INSTALL}" | ||
| 57 | FEATURE_INSTALL_OPTIONAL = "${@' '.join(oe.packagegroup.optional_packages(oe.data.typed_value('IMAGE_FEATURES', d), d))}" | ||
| 58 | FEATURE_INSTALL_OPTIONAL[vardepvalue] = "${FEATURE_INSTALL_OPTIONAL}" | ||
| 59 | |||
| 60 | # Define some very basic feature package groups | ||
| 61 | FEATURE_PACKAGES_package-management = "${ROOTFS_PKGMANAGE}" | ||
| 62 | SPLASH ?= "${@bb.utils.contains("MACHINE_FEATURES", "screen", "psplash", "", d)}" | ||
| 63 | FEATURE_PACKAGES_splash = "${SPLASH}" | ||
| 64 | |||
| 65 | IMAGE_INSTALL_COMPLEMENTARY = '${@complementary_globs("IMAGE_FEATURES", d)}' | ||
| 66 | |||
| 67 | def check_image_features(d): | ||
| 68 | valid_features = (d.getVarFlag('IMAGE_FEATURES', 'validitems') or "").split() | ||
| 69 | valid_features += d.getVarFlags('COMPLEMENTARY_GLOB').keys() | ||
| 70 | for var in d: | ||
| 71 | if var.startswith("FEATURE_PACKAGES_"): | ||
| 72 | valid_features.append(var[17:]) | ||
| 73 | valid_features.sort() | ||
| 74 | |||
| 75 | features = set(oe.data.typed_value('IMAGE_FEATURES', d)) | ||
| 76 | for feature in features: | ||
| 77 | if feature not in valid_features: | ||
| 78 | if bb.utils.contains('EXTRA_IMAGE_FEATURES', feature, True, False, d): | ||
| 79 | raise bb.parse.SkipRecipe("'%s' in IMAGE_FEATURES (added via EXTRA_IMAGE_FEATURES) is not a valid image feature. Valid features: %s" % (feature, ' '.join(valid_features))) | ||
| 80 | else: | ||
| 81 | raise bb.parse.SkipRecipe("'%s' in IMAGE_FEATURES is not a valid image feature. Valid features: %s" % (feature, ' '.join(valid_features))) | ||
| 82 | |||
| 83 | IMAGE_INSTALL ?= "" | ||
| 84 | IMAGE_INSTALL[type] = "list" | ||
| 85 | export PACKAGE_INSTALL ?= "${IMAGE_INSTALL} ${ROOTFS_BOOTSTRAP_INSTALL} ${FEATURE_INSTALL}" | ||
| 86 | PACKAGE_INSTALL_ATTEMPTONLY ?= "${FEATURE_INSTALL_OPTIONAL}" | ||
| 87 | |||
| 88 | IMGDEPLOYDIR = "${WORKDIR}/deploy-${PN}-image-complete" | ||
| 89 | |||
| 90 | # Images are generally built explicitly, do not need to be part of world. | ||
| 91 | EXCLUDE_FROM_WORLD = "1" | ||
| 92 | |||
| 93 | USE_DEVFS ?= "1" | ||
| 94 | USE_DEPMOD ?= "1" | ||
| 95 | |||
| 96 | PID = "${@os.getpid()}" | ||
| 97 | |||
| 98 | PACKAGE_ARCH = "${MACHINE_ARCH}" | ||
| 99 | |||
| 100 | LDCONFIGDEPEND ?= "ldconfig-native:do_populate_sysroot" | ||
| 101 | LDCONFIGDEPEND:libc-musl = "" | ||
| 102 | |||
| 103 | # This is needed to have depmod data in PKGDATA_DIR, | ||
| 104 | # but if you're building small initramfs image | ||
| 105 | # e.g. to include it in your kernel, you probably | ||
| 106 | # don't want this dependency, which is causing dependency loop | ||
| 107 | KERNELDEPMODDEPEND ?= "virtual/kernel:do_packagedata" | ||
| 108 | |||
| 109 | do_rootfs[depends] += " \ | ||
| 110 | makedevs-native:do_populate_sysroot virtual/fakeroot-native:do_populate_sysroot ${LDCONFIGDEPEND} \ | ||
| 111 | virtual/update-alternatives-native:do_populate_sysroot update-rc.d-native:do_populate_sysroot \ | ||
| 112 | ${KERNELDEPMODDEPEND} \ | ||
| 113 | " | ||
| 114 | do_rootfs[recrdeptask] += "do_packagedata" | ||
| 115 | |||
| 116 | def rootfs_command_variables(d): | ||
| 117 | return ['ROOTFS_POSTPROCESS_COMMAND','ROOTFS_PREPROCESS_COMMAND','ROOTFS_POSTINSTALL_COMMAND','ROOTFS_POSTUNINSTALL_COMMAND','OPKG_PREPROCESS_COMMANDS','OPKG_POSTPROCESS_COMMANDS','IMAGE_POSTPROCESS_COMMAND', | ||
| 118 | 'IMAGE_PREPROCESS_COMMAND','RPM_PREPROCESS_COMMANDS','RPM_POSTPROCESS_COMMANDS','DEB_PREPROCESS_COMMANDS','DEB_POSTPROCESS_COMMANDS'] | ||
| 119 | |||
| 120 | python () { | ||
| 121 | variables = rootfs_command_variables(d) | ||
| 122 | for var in variables: | ||
| 123 | if d.getVar(var, False): | ||
| 124 | d.setVarFlag(var, 'func', '1') | ||
| 125 | } | ||
| 126 | |||
| 127 | def rootfs_variables(d): | ||
| 128 | from oe.rootfs import variable_depends | ||
| 129 | variables = ['IMAGE_DEVICE_TABLE','IMAGE_DEVICE_TABLES','BUILD_IMAGES_FROM_FEEDS','IMAGE_TYPES_MASKED','IMAGE_ROOTFS_ALIGNMENT','IMAGE_OVERHEAD_FACTOR','IMAGE_ROOTFS_SIZE','IMAGE_ROOTFS_EXTRA_SPACE', | ||
| 130 | 'IMAGE_ROOTFS_MAXSIZE','IMAGE_NAME','IMAGE_LINK_NAME','IMAGE_MANIFEST','DEPLOY_DIR_IMAGE','IMAGE_FSTYPES','IMAGE_INSTALL_COMPLEMENTARY','IMAGE_LINGUAS', 'IMAGE_LINGUAS_COMPLEMENTARY', 'IMAGE_LOCALES_ARCHIVE', | ||
| 131 | 'MULTILIBRE_ALLOW_REP','MULTILIB_TEMP_ROOTFS','MULTILIB_VARIANTS','MULTILIBS','ALL_MULTILIB_PACKAGE_ARCHS','MULTILIB_GLOBAL_VARIANTS','BAD_RECOMMENDATIONS','NO_RECOMMENDATIONS', | ||
| 132 | 'PACKAGE_ARCHS','PACKAGE_CLASSES','TARGET_VENDOR','TARGET_ARCH','TARGET_OS','OVERRIDES','BBEXTENDVARIANT','FEED_DEPLOYDIR_BASE_URI','INTERCEPT_DIR','USE_DEVFS', | ||
| 133 | 'CONVERSIONTYPES', 'IMAGE_GEN_DEBUGFS', 'ROOTFS_RO_UNNEEDED', 'IMGDEPLOYDIR', 'PACKAGE_EXCLUDE_COMPLEMENTARY', 'REPRODUCIBLE_TIMESTAMP_ROOTFS', 'IMAGE_INSTALL_DEBUGFS'] | ||
| 134 | variables.extend(rootfs_command_variables(d)) | ||
| 135 | variables.extend(variable_depends(d)) | ||
| 136 | return " ".join(variables) | ||
| 137 | |||
| 138 | do_rootfs[vardeps] += "${@rootfs_variables(d)}" | ||
| 139 | |||
| 140 | # This is needed to have kernel image in DEPLOY_DIR. | ||
| 141 | # This follows many common usecases and user expectations. | ||
| 142 | # But if you are building an image which doesn't need the kernel image at all, | ||
| 143 | # you can unset this variable manually. | ||
| 144 | KERNEL_DEPLOY_DEPEND ?= "virtual/kernel:do_deploy" | ||
| 145 | do_build[depends] += "${KERNEL_DEPLOY_DEPEND}" | ||
| 146 | |||
| 147 | |||
| 148 | python () { | ||
| 149 | def extraimage_getdepends(task): | ||
| 150 | deps = "" | ||
| 151 | for dep in (d.getVar('EXTRA_IMAGEDEPENDS') or "").split(): | ||
| 152 | if ":" in dep: | ||
| 153 | deps += " %s " % (dep) | ||
| 154 | else: | ||
| 155 | deps += " %s:%s" % (dep, task) | ||
| 156 | return deps | ||
| 157 | |||
| 158 | d.appendVarFlag('do_image_complete', 'depends', extraimage_getdepends('do_populate_sysroot')) | ||
| 159 | |||
| 160 | deps = " " + imagetypes_getdepends(d) | ||
| 161 | d.appendVarFlag('do_rootfs', 'depends', deps) | ||
| 162 | |||
| 163 | #process IMAGE_FEATURES, we must do this before runtime_mapping_rename | ||
| 164 | #Check for replaces image features | ||
| 165 | features = set(oe.data.typed_value('IMAGE_FEATURES', d)) | ||
| 166 | remain_features = features.copy() | ||
| 167 | for feature in features: | ||
| 168 | replaces = set((d.getVar("IMAGE_FEATURES_REPLACES_%s" % feature) or "").split()) | ||
| 169 | remain_features -= replaces | ||
| 170 | |||
| 171 | #Check for conflict image features | ||
| 172 | for feature in remain_features: | ||
| 173 | conflicts = set((d.getVar("IMAGE_FEATURES_CONFLICTS_%s" % feature) or "").split()) | ||
| 174 | temp = conflicts & remain_features | ||
| 175 | if temp: | ||
| 176 | bb.fatal("%s contains conflicting IMAGE_FEATURES %s %s" % (d.getVar('PN'), feature, ' '.join(list(temp)))) | ||
| 177 | |||
| 178 | d.setVar('IMAGE_FEATURES', ' '.join(sorted(list(remain_features)))) | ||
| 179 | |||
| 180 | check_image_features(d) | ||
| 181 | } | ||
| 182 | |||
| 183 | IMAGE_POSTPROCESS_COMMAND ?= "" | ||
| 184 | |||
| 185 | # some default locales | ||
| 186 | IMAGE_LINGUAS ?= "de-de fr-fr en-gb" | ||
| 187 | |||
| 188 | LINGUAS_INSTALL ?= "${@" ".join(map(lambda s: "locale-base-%s" % s, d.getVar('IMAGE_LINGUAS').split()))}" | ||
| 189 | |||
| 190 | # per default create a locale archive | ||
| 191 | IMAGE_LOCALES_ARCHIVE ?= '1' | ||
| 192 | |||
| 193 | # Prefer image, but use the fallback files for lookups if the image ones | ||
| 194 | # aren't yet available. | ||
| 195 | PSEUDO_PASSWD = "${IMAGE_ROOTFS}:${STAGING_DIR_NATIVE}" | ||
| 196 | |||
| 197 | PSEUDO_IGNORE_PATHS .= ",${WORKDIR}/intercept_scripts,${WORKDIR}/oe-rootfs-repo,${WORKDIR}/sstate-build-image_complete" | ||
| 198 | |||
| 199 | PACKAGE_EXCLUDE ??= "" | ||
| 200 | PACKAGE_EXCLUDE[type] = "list" | ||
| 201 | |||
| 202 | fakeroot python do_rootfs () { | ||
| 203 | from oe.rootfs import create_rootfs | ||
| 204 | from oe.manifest import create_manifest | ||
| 205 | import logging | ||
| 206 | |||
| 207 | logger = d.getVar('BB_TASK_LOGGER', False) | ||
| 208 | if logger: | ||
| 209 | logcatcher = bb.utils.LogCatcher() | ||
| 210 | logger.addHandler(logcatcher) | ||
| 211 | else: | ||
| 212 | logcatcher = None | ||
| 213 | |||
| 214 | # NOTE: if you add, remove or significantly refactor the stages of this | ||
| 215 | # process then you should recalculate the weightings here. This is quite | ||
| 216 | # easy to do - just change the MultiStageProgressReporter line temporarily | ||
| 217 | # to pass debug=True as the last parameter and you'll get a printout of | ||
| 218 | # the weightings as well as a map to the lines where next_stage() was | ||
| 219 | # called. Of course this isn't critical, but it helps to keep the progress | ||
| 220 | # reporting accurate. | ||
| 221 | stage_weights = [1, 203, 354, 186, 65, 4228, 1, 353, 49, 330, 382, 23, 1] | ||
| 222 | progress_reporter = bb.progress.MultiStageProgressReporter(d, stage_weights) | ||
| 223 | progress_reporter.next_stage() | ||
| 224 | |||
| 225 | # Handle package exclusions | ||
| 226 | excl_pkgs = d.getVar("PACKAGE_EXCLUDE").split() | ||
| 227 | inst_pkgs = d.getVar("PACKAGE_INSTALL").split() | ||
| 228 | inst_attempt_pkgs = d.getVar("PACKAGE_INSTALL_ATTEMPTONLY").split() | ||
| 229 | |||
| 230 | d.setVar('PACKAGE_INSTALL_ORIG', ' '.join(inst_pkgs)) | ||
| 231 | d.setVar('PACKAGE_INSTALL_ATTEMPTONLY', ' '.join(inst_attempt_pkgs)) | ||
| 232 | |||
| 233 | for pkg in excl_pkgs: | ||
| 234 | if pkg in inst_pkgs: | ||
| 235 | bb.warn("Package %s, set to be excluded, is in %s PACKAGE_INSTALL (%s). It will be removed from the list." % (pkg, d.getVar('PN'), inst_pkgs)) | ||
| 236 | inst_pkgs.remove(pkg) | ||
| 237 | |||
| 238 | if pkg in inst_attempt_pkgs: | ||
| 239 | bb.warn("Package %s, set to be excluded, is in %s PACKAGE_INSTALL_ATTEMPTONLY (%s). It will be removed from the list." % (pkg, d.getVar('PN'), inst_pkgs)) | ||
| 240 | inst_attempt_pkgs.remove(pkg) | ||
| 241 | |||
| 242 | d.setVar("PACKAGE_INSTALL", ' '.join(inst_pkgs)) | ||
| 243 | d.setVar("PACKAGE_INSTALL_ATTEMPTONLY", ' '.join(inst_attempt_pkgs)) | ||
| 244 | |||
| 245 | # Ensure we handle package name remapping | ||
| 246 | # We have to delay the runtime_mapping_rename until just before rootfs runs | ||
| 247 | # otherwise, the multilib renaming could step in and squash any fixups that | ||
| 248 | # may have occurred. | ||
| 249 | pn = d.getVar('PN') | ||
| 250 | runtime_mapping_rename("PACKAGE_INSTALL", pn, d) | ||
| 251 | runtime_mapping_rename("PACKAGE_INSTALL_ATTEMPTONLY", pn, d) | ||
| 252 | runtime_mapping_rename("BAD_RECOMMENDATIONS", pn, d) | ||
| 253 | |||
| 254 | # Generate the initial manifest | ||
| 255 | create_manifest(d) | ||
| 256 | |||
| 257 | progress_reporter.next_stage() | ||
| 258 | |||
| 259 | # generate rootfs | ||
| 260 | d.setVarFlag('REPRODUCIBLE_TIMESTAMP_ROOTFS', 'export', '1') | ||
| 261 | create_rootfs(d, progress_reporter=progress_reporter, logcatcher=logcatcher) | ||
| 262 | |||
| 263 | progress_reporter.finish() | ||
| 264 | } | ||
| 265 | do_rootfs[dirs] = "${TOPDIR}" | ||
| 266 | do_rootfs[cleandirs] += "${IMAGE_ROOTFS} ${IMGDEPLOYDIR} ${S}" | ||
| 267 | do_rootfs[file-checksums] += "${POSTINST_INTERCEPT_CHECKSUMS}" | ||
| 268 | addtask rootfs after do_prepare_recipe_sysroot | ||
| 269 | |||
| 270 | fakeroot python do_image () { | ||
| 271 | from oe.utils import execute_pre_post_process | ||
| 272 | |||
| 273 | d.setVarFlag('REPRODUCIBLE_TIMESTAMP_ROOTFS', 'export', '1') | ||
| 274 | pre_process_cmds = d.getVar("IMAGE_PREPROCESS_COMMAND") | ||
| 275 | |||
| 276 | execute_pre_post_process(d, pre_process_cmds) | ||
| 277 | } | ||
| 278 | do_image[dirs] = "${TOPDIR}" | ||
| 279 | addtask do_image after do_rootfs | ||
| 280 | |||
| 281 | fakeroot python do_image_complete () { | ||
| 282 | from oe.utils import execute_pre_post_process | ||
| 283 | |||
| 284 | post_process_cmds = d.getVar("IMAGE_POSTPROCESS_COMMAND") | ||
| 285 | |||
| 286 | execute_pre_post_process(d, post_process_cmds) | ||
| 287 | } | ||
| 288 | do_image_complete[dirs] = "${TOPDIR}" | ||
| 289 | SSTATETASKS += "do_image_complete" | ||
| 290 | SSTATE_SKIP_CREATION:task-image-complete = '1' | ||
| 291 | do_image_complete[sstate-inputdirs] = "${IMGDEPLOYDIR}" | ||
| 292 | do_image_complete[sstate-outputdirs] = "${DEPLOY_DIR_IMAGE}" | ||
| 293 | do_image_complete[stamp-extra-info] = "${MACHINE_ARCH}" | ||
| 294 | addtask do_image_complete after do_image before do_build | ||
| 295 | python do_image_complete_setscene () { | ||
| 296 | sstate_setscene(d) | ||
| 297 | } | ||
| 298 | addtask do_image_complete_setscene | ||
| 299 | |||
| 300 | # Add image-level QA/sanity checks to IMAGE_QA_COMMANDS | ||
| 301 | # | ||
| 302 | # IMAGE_QA_COMMANDS += " \ | ||
| 303 | # image_check_everything_ok \ | ||
| 304 | # " | ||
| 305 | # This task runs all functions in IMAGE_QA_COMMANDS after the rootfs | ||
| 306 | # construction has completed in order to validate the resulting image. | ||
| 307 | # | ||
| 308 | # The functions should use ${IMAGE_ROOTFS} to find the unpacked rootfs | ||
| 309 | # directory, which if QA passes will be the basis for the images. | ||
| 310 | fakeroot python do_image_qa () { | ||
| 311 | from oe.utils import ImageQAFailed | ||
| 312 | |||
| 313 | qa_cmds = (d.getVar('IMAGE_QA_COMMANDS') or '').split() | ||
| 314 | qamsg = "" | ||
| 315 | |||
| 316 | for cmd in qa_cmds: | ||
| 317 | try: | ||
| 318 | bb.build.exec_func(cmd, d) | ||
| 319 | except oe.utils.ImageQAFailed as e: | ||
| 320 | qamsg = qamsg + '\tImage QA function %s failed: %s\n' % (e.name, e.description) | ||
| 321 | except Exception as e: | ||
| 322 | qamsg = qamsg + '\tImage QA function %s failed\n' % cmd | ||
| 323 | |||
| 324 | if qamsg: | ||
| 325 | imgname = d.getVar('IMAGE_NAME') | ||
| 326 | bb.fatal("QA errors found whilst validating image: %s\n%s" % (imgname, qamsg)) | ||
| 327 | } | ||
| 328 | addtask do_image_qa after do_rootfs before do_image | ||
| 329 | |||
| 330 | SSTATETASKS += "do_image_qa" | ||
| 331 | SSTATE_SKIP_CREATION:task-image-qa = '1' | ||
| 332 | do_image_qa[sstate-inputdirs] = "" | ||
| 333 | do_image_qa[sstate-outputdirs] = "" | ||
| 334 | python do_image_qa_setscene () { | ||
| 335 | sstate_setscene(d) | ||
| 336 | } | ||
| 337 | addtask do_image_qa_setscene | ||
| 338 | |||
| 339 | def setup_debugfs_variables(d): | ||
| 340 | d.appendVar('IMAGE_ROOTFS', '-dbg') | ||
| 341 | if d.getVar('IMAGE_LINK_NAME'): | ||
| 342 | d.appendVar('IMAGE_LINK_NAME', '-dbg') | ||
| 343 | d.appendVar('IMAGE_NAME','-dbg') | ||
| 344 | d.setVar('IMAGE_BUILDING_DEBUGFS', 'true') | ||
| 345 | debugfs_image_fstypes = d.getVar('IMAGE_FSTYPES_DEBUGFS') | ||
| 346 | if debugfs_image_fstypes: | ||
| 347 | d.setVar('IMAGE_FSTYPES', debugfs_image_fstypes) | ||
| 348 | |||
| 349 | python setup_debugfs () { | ||
| 350 | setup_debugfs_variables(d) | ||
| 351 | } | ||
| 352 | |||
| 353 | python () { | ||
| 354 | vardeps = set() | ||
| 355 | # We allow CONVERSIONTYPES to have duplicates. That avoids breaking | ||
| 356 | # derived distros when OE-core or some other layer independently adds | ||
| 357 | # the same type. There is still only one command for each type, but | ||
| 358 | # presumably the commands will do the same when the type is the same, | ||
| 359 | # even when added in different places. | ||
| 360 | # | ||
| 361 | # Without de-duplication, gen_conversion_cmds() below | ||
| 362 | # would create the same compression command multiple times. | ||
| 363 | ctypes = set(d.getVar('CONVERSIONTYPES').split()) | ||
| 364 | old_overrides = d.getVar('OVERRIDES', False) | ||
| 365 | |||
| 366 | def _image_base_type(type): | ||
| 367 | basetype = type | ||
| 368 | for ctype in ctypes: | ||
| 369 | if type.endswith("." + ctype): | ||
| 370 | basetype = type[:-len("." + ctype)] | ||
| 371 | break | ||
| 372 | |||
| 373 | if basetype != type: | ||
| 374 | # New base type itself might be generated by a conversion command. | ||
| 375 | basetype = _image_base_type(basetype) | ||
| 376 | |||
| 377 | return basetype | ||
| 378 | |||
| 379 | basetypes = {} | ||
| 380 | alltypes = d.getVar('IMAGE_FSTYPES').split() | ||
| 381 | typedeps = {} | ||
| 382 | |||
| 383 | if d.getVar('IMAGE_GEN_DEBUGFS') == "1": | ||
| 384 | debugfs_fstypes = d.getVar('IMAGE_FSTYPES_DEBUGFS').split() | ||
| 385 | for t in debugfs_fstypes: | ||
| 386 | alltypes.append("debugfs_" + t) | ||
| 387 | |||
| 388 | def _add_type(t): | ||
| 389 | baset = _image_base_type(t) | ||
| 390 | input_t = t | ||
| 391 | if baset not in basetypes: | ||
| 392 | basetypes[baset]= [] | ||
| 393 | if t not in basetypes[baset]: | ||
| 394 | basetypes[baset].append(t) | ||
| 395 | debug = "" | ||
| 396 | if t.startswith("debugfs_"): | ||
| 397 | t = t[8:] | ||
| 398 | debug = "debugfs_" | ||
| 399 | deps = (d.getVar('IMAGE_TYPEDEP:' + t) or "").split() | ||
| 400 | vardeps.add('IMAGE_TYPEDEP:' + t) | ||
| 401 | if baset not in typedeps: | ||
| 402 | typedeps[baset] = set() | ||
| 403 | deps = [debug + dep for dep in deps] | ||
| 404 | for dep in deps: | ||
| 405 | if dep not in alltypes: | ||
| 406 | alltypes.append(dep) | ||
| 407 | _add_type(dep) | ||
| 408 | basedep = _image_base_type(dep) | ||
| 409 | typedeps[baset].add(basedep) | ||
| 410 | |||
| 411 | if baset != input_t: | ||
| 412 | _add_type(baset) | ||
| 413 | |||
| 414 | for t in alltypes[:]: | ||
| 415 | _add_type(t) | ||
| 416 | |||
| 417 | d.appendVarFlag('do_image', 'vardeps', ' '.join(vardeps)) | ||
| 418 | |||
| 419 | maskedtypes = (d.getVar('IMAGE_TYPES_MASKED') or "").split() | ||
| 420 | maskedtypes = [dbg + t for t in maskedtypes for dbg in ("", "debugfs_")] | ||
| 421 | |||
| 422 | for t in basetypes: | ||
| 423 | vardeps = set() | ||
| 424 | cmds = [] | ||
| 425 | subimages = [] | ||
| 426 | realt = t | ||
| 427 | |||
| 428 | if t in maskedtypes: | ||
| 429 | continue | ||
| 430 | |||
| 431 | localdata = bb.data.createCopy(d) | ||
| 432 | debug = "" | ||
| 433 | if t.startswith("debugfs_"): | ||
| 434 | setup_debugfs_variables(localdata) | ||
| 435 | debug = "setup_debugfs " | ||
| 436 | realt = t[8:] | ||
| 437 | localdata.setVar('OVERRIDES', '%s:%s' % (realt, old_overrides)) | ||
| 438 | localdata.setVar('type', realt) | ||
| 439 | # Delete DATETIME so we don't expand any references to it now | ||
| 440 | # This means the task's hash can be stable rather than having hardcoded | ||
| 441 | # date/time values. It will get expanded at execution time. | ||
| 442 | # Similarly TMPDIR since otherwise we see QA stamp comparision problems | ||
| 443 | # Expand PV else it can trigger get_srcrev which can fail due to these variables being unset | ||
| 444 | localdata.setVar('PV', d.getVar('PV')) | ||
| 445 | localdata.delVar('DATETIME') | ||
| 446 | localdata.delVar('DATE') | ||
| 447 | localdata.delVar('TMPDIR') | ||
| 448 | localdata.delVar('IMAGE_VERSION_SUFFIX') | ||
| 449 | vardepsexclude = (d.getVarFlag('IMAGE_CMD:' + realt, 'vardepsexclude', True) or '').split() | ||
| 450 | for dep in vardepsexclude: | ||
| 451 | localdata.delVar(dep) | ||
| 452 | |||
| 453 | image_cmd = localdata.getVar("IMAGE_CMD") | ||
| 454 | vardeps.add('IMAGE_CMD:' + realt) | ||
| 455 | if image_cmd: | ||
| 456 | cmds.append("\t" + image_cmd) | ||
| 457 | else: | ||
| 458 | bb.fatal("No IMAGE_CMD defined for IMAGE_FSTYPES entry '%s' - possibly invalid type name or missing support class" % t) | ||
| 459 | cmds.append(localdata.expand("\tcd ${IMGDEPLOYDIR}")) | ||
| 460 | |||
| 461 | # Since a copy of IMAGE_CMD:xxx will be inlined within do_image_xxx, | ||
| 462 | # prevent a redundant copy of IMAGE_CMD:xxx being emitted as a function. | ||
| 463 | d.delVarFlag('IMAGE_CMD:' + realt, 'func') | ||
| 464 | |||
| 465 | rm_tmp_images = set() | ||
| 466 | def gen_conversion_cmds(bt): | ||
| 467 | for ctype in sorted(ctypes): | ||
| 468 | if bt.endswith("." + ctype): | ||
| 469 | type = bt[0:-len(ctype) - 1] | ||
| 470 | if type.startswith("debugfs_"): | ||
| 471 | type = type[8:] | ||
| 472 | # Create input image first. | ||
| 473 | gen_conversion_cmds(type) | ||
| 474 | localdata.setVar('type', type) | ||
| 475 | cmd = "\t" + localdata.getVar("CONVERSION_CMD:" + ctype) | ||
| 476 | if cmd not in cmds: | ||
| 477 | cmds.append(cmd) | ||
| 478 | vardeps.add('CONVERSION_CMD:' + ctype) | ||
| 479 | subimage = type + "." + ctype | ||
| 480 | if subimage not in subimages: | ||
| 481 | subimages.append(subimage) | ||
| 482 | if type not in alltypes: | ||
| 483 | rm_tmp_images.add(localdata.expand("${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}")) | ||
| 484 | |||
| 485 | for bt in basetypes[t]: | ||
| 486 | gen_conversion_cmds(bt) | ||
| 487 | |||
| 488 | localdata.setVar('type', realt) | ||
| 489 | if t not in alltypes: | ||
| 490 | rm_tmp_images.add(localdata.expand("${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}")) | ||
| 491 | else: | ||
| 492 | subimages.append(realt) | ||
| 493 | |||
| 494 | # Clean up after applying all conversion commands. Some of them might | ||
| 495 | # use the same input, therefore we cannot delete sooner without applying | ||
| 496 | # some complex dependency analysis. | ||
| 497 | for image in sorted(rm_tmp_images): | ||
| 498 | cmds.append("\trm " + image) | ||
| 499 | |||
| 500 | after = 'do_image' | ||
| 501 | for dep in typedeps[t]: | ||
| 502 | after += ' do_image_%s' % dep.replace("-", "_").replace(".", "_") | ||
| 503 | |||
| 504 | task = "do_image_%s" % t.replace("-", "_").replace(".", "_") | ||
| 505 | |||
| 506 | d.setVar(task, '\n'.join(cmds)) | ||
| 507 | d.setVarFlag(task, 'func', '1') | ||
| 508 | d.setVarFlag(task, 'fakeroot', '1') | ||
| 509 | |||
| 510 | d.appendVarFlag(task, 'prefuncs', ' ' + debug + ' set_image_size') | ||
| 511 | d.prependVarFlag(task, 'postfuncs', 'create_symlinks ') | ||
| 512 | d.appendVarFlag(task, 'subimages', ' ' + ' '.join(subimages)) | ||
| 513 | d.appendVarFlag(task, 'vardeps', ' ' + ' '.join(vardeps)) | ||
| 514 | d.appendVarFlag(task, 'vardepsexclude', ' DATETIME DATE ' + ' '.join(vardepsexclude)) | ||
| 515 | |||
| 516 | bb.debug(2, "Adding task %s before %s, after %s" % (task, 'do_image_complete', after)) | ||
| 517 | bb.build.addtask(task, 'do_image_complete', after, d) | ||
| 518 | } | ||
| 519 | |||
| 520 | # | ||
| 521 | # Compute the rootfs size | ||
| 522 | # | ||
| 523 | def get_rootfs_size(d): | ||
| 524 | import subprocess, oe.utils | ||
| 525 | |||
| 526 | rootfs_alignment = int(d.getVar('IMAGE_ROOTFS_ALIGNMENT')) | ||
| 527 | overhead_factor = float(d.getVar('IMAGE_OVERHEAD_FACTOR')) | ||
| 528 | rootfs_req_size = int(d.getVar('IMAGE_ROOTFS_SIZE')) | ||
| 529 | rootfs_extra_space = eval(d.getVar('IMAGE_ROOTFS_EXTRA_SPACE')) | ||
| 530 | rootfs_maxsize = d.getVar('IMAGE_ROOTFS_MAXSIZE') | ||
| 531 | image_fstypes = d.getVar('IMAGE_FSTYPES') or '' | ||
| 532 | initramfs_fstypes = d.getVar('INITRAMFS_FSTYPES') or '' | ||
| 533 | initramfs_maxsize = d.getVar('INITRAMFS_MAXSIZE') | ||
| 534 | |||
| 535 | size_kb = oe.utils.directory_size(d.getVar("IMAGE_ROOTFS")) / 1024 | ||
| 536 | |||
| 537 | base_size = size_kb * overhead_factor | ||
| 538 | bb.debug(1, '%f = %d * %f' % (base_size, size_kb, overhead_factor)) | ||
| 539 | base_size2 = max(base_size, rootfs_req_size) + rootfs_extra_space | ||
| 540 | bb.debug(1, '%f = max(%f, %d)[%f] + %d' % (base_size2, base_size, rootfs_req_size, max(base_size, rootfs_req_size), rootfs_extra_space)) | ||
| 541 | |||
| 542 | base_size = base_size2 | ||
| 543 | if base_size != int(base_size): | ||
| 544 | base_size = int(base_size + 1) | ||
| 545 | else: | ||
| 546 | base_size = int(base_size) | ||
| 547 | bb.debug(1, '%f = int(%f)' % (base_size, base_size2)) | ||
| 548 | |||
| 549 | base_size_saved = base_size | ||
| 550 | base_size += rootfs_alignment - 1 | ||
| 551 | base_size -= base_size % rootfs_alignment | ||
| 552 | bb.debug(1, '%d = aligned(%d)' % (base_size, base_size_saved)) | ||
| 553 | |||
| 554 | # Do not check image size of the debugfs image. This is not supposed | ||
| 555 | # to be deployed, etc. so it doesn't make sense to limit the size | ||
| 556 | # of the debug. | ||
| 557 | if (d.getVar('IMAGE_BUILDING_DEBUGFS') or "") == "true": | ||
| 558 | bb.debug(1, 'returning debugfs size %d' % (base_size)) | ||
| 559 | return base_size | ||
| 560 | |||
| 561 | # Check the rootfs size against IMAGE_ROOTFS_MAXSIZE (if set) | ||
| 562 | if rootfs_maxsize: | ||
| 563 | rootfs_maxsize_int = int(rootfs_maxsize) | ||
| 564 | if base_size > rootfs_maxsize_int: | ||
| 565 | bb.fatal("The rootfs size %d(K) exceeds IMAGE_ROOTFS_MAXSIZE: %d(K)" % \ | ||
| 566 | (base_size, rootfs_maxsize_int)) | ||
| 567 | |||
| 568 | # Check the initramfs size against INITRAMFS_MAXSIZE (if set) | ||
| 569 | if image_fstypes == initramfs_fstypes != '' and initramfs_maxsize: | ||
| 570 | initramfs_maxsize_int = int(initramfs_maxsize) | ||
| 571 | if base_size > initramfs_maxsize_int: | ||
| 572 | bb.error("The initramfs size %d(K) exceeds INITRAMFS_MAXSIZE: %d(K)" % \ | ||
| 573 | (base_size, initramfs_maxsize_int)) | ||
| 574 | bb.error("You can set INITRAMFS_MAXSIZE a larger value. Usually, it should") | ||
| 575 | bb.fatal("be less than 1/2 of ram size, or you may fail to boot it.\n") | ||
| 576 | |||
| 577 | bb.debug(1, 'returning %d' % (base_size)) | ||
| 578 | return base_size | ||
| 579 | |||
| 580 | python set_image_size () { | ||
| 581 | rootfs_size = get_rootfs_size(d) | ||
| 582 | d.setVar('ROOTFS_SIZE', str(rootfs_size)) | ||
| 583 | d.setVarFlag('ROOTFS_SIZE', 'export', '1') | ||
| 584 | } | ||
| 585 | |||
| 586 | # | ||
| 587 | # Create symlinks to the newly created image | ||
| 588 | # | ||
| 589 | python create_symlinks() { | ||
| 590 | |||
| 591 | deploy_dir = d.getVar('IMGDEPLOYDIR') | ||
| 592 | img_name = d.getVar('IMAGE_NAME') | ||
| 593 | link_name = d.getVar('IMAGE_LINK_NAME') | ||
| 594 | manifest_name = d.getVar('IMAGE_MANIFEST') | ||
| 595 | taskname = d.getVar("BB_CURRENTTASK") | ||
| 596 | subimages = (d.getVarFlag("do_" + taskname, 'subimages', False) or "").split() | ||
| 597 | imgsuffix = d.getVarFlag("do_" + taskname, 'imgsuffix') or d.expand("${IMAGE_NAME_SUFFIX}.") | ||
| 598 | |||
| 599 | if not link_name: | ||
| 600 | return | ||
| 601 | for type in subimages: | ||
| 602 | dst = os.path.join(deploy_dir, link_name + "." + type) | ||
| 603 | src = img_name + imgsuffix + type | ||
| 604 | if os.path.exists(os.path.join(deploy_dir, src)): | ||
| 605 | bb.note("Creating symlink: %s -> %s" % (dst, src)) | ||
| 606 | if os.path.islink(dst): | ||
| 607 | os.remove(dst) | ||
| 608 | os.symlink(src, dst) | ||
| 609 | else: | ||
| 610 | bb.note("Skipping symlink, source does not exist: %s -> %s" % (dst, src)) | ||
| 611 | } | ||
| 612 | |||
| 613 | MULTILIBRE_ALLOW_REP =. "${base_bindir}|${base_sbindir}|${bindir}|${sbindir}|${libexecdir}|${sysconfdir}|${nonarch_base_libdir}/udev|/lib/modules/[^/]*/modules.*|" | ||
| 614 | MULTILIB_CHECK_FILE = "${WORKDIR}/multilib_check.py" | ||
| 615 | MULTILIB_TEMP_ROOTFS = "${WORKDIR}/multilib" | ||
| 616 | |||
| 617 | do_fetch[noexec] = "1" | ||
| 618 | do_unpack[noexec] = "1" | ||
| 619 | do_patch[noexec] = "1" | ||
| 620 | do_configure[noexec] = "1" | ||
| 621 | do_compile[noexec] = "1" | ||
| 622 | do_install[noexec] = "1" | ||
| 623 | deltask do_populate_lic | ||
| 624 | deltask do_populate_sysroot | ||
| 625 | do_package[noexec] = "1" | ||
| 626 | deltask do_package_qa | ||
| 627 | deltask do_packagedata | ||
| 628 | deltask do_package_write_ipk | ||
| 629 | deltask do_package_write_deb | ||
| 630 | deltask do_package_write_rpm | ||
| 631 | |||
| 632 | # Prepare the root links to point to the /usr counterparts. | ||
| 633 | create_merged_usr_symlinks() { | ||
| 634 | root="$1" | ||
| 635 | install -d $root${base_bindir} $root${base_sbindir} $root${base_libdir} | ||
| 636 | ln -rs $root${base_bindir} $root/bin | ||
| 637 | ln -rs $root${base_sbindir} $root/sbin | ||
| 638 | ln -rs $root${base_libdir} $root/${baselib} | ||
| 639 | |||
| 640 | if [ "${nonarch_base_libdir}" != "${base_libdir}" ]; then | ||
| 641 | install -d $root${nonarch_base_libdir} | ||
| 642 | ln -rs $root${nonarch_base_libdir} $root/lib | ||
| 643 | fi | ||
| 644 | |||
| 645 | # create base links for multilibs | ||
| 646 | multi_libdirs="${@d.getVar('MULTILIB_VARIANTS')}" | ||
| 647 | for d in $multi_libdirs; do | ||
| 648 | install -d $root${exec_prefix}/$d | ||
| 649 | ln -rs $root${exec_prefix}/$d $root/$d | ||
| 650 | done | ||
| 651 | } | ||
| 652 | |||
| 653 | create_merged_usr_symlinks_rootfs() { | ||
| 654 | create_merged_usr_symlinks ${IMAGE_ROOTFS} | ||
| 655 | } | ||
| 656 | |||
| 657 | create_merged_usr_symlinks_sdk() { | ||
| 658 | create_merged_usr_symlinks ${SDK_OUTPUT}${SDKTARGETSYSROOT} | ||
| 659 | } | ||
| 660 | |||
| 661 | ROOTFS_PREPROCESS_COMMAND += "${@bb.utils.contains('DISTRO_FEATURES', 'usrmerge', 'create_merged_usr_symlinks_rootfs; ', '',d)}" | ||
| 662 | POPULATE_SDK_PRE_TARGET_COMMAND += "${@bb.utils.contains('DISTRO_FEATURES', 'usrmerge', 'create_merged_usr_symlinks_sdk; ', '',d)}" | ||
| 663 | |||
| 664 | reproducible_final_image_task () { | ||
| 665 | if [ "$REPRODUCIBLE_TIMESTAMP_ROOTFS" = "" ]; then | ||
| 666 | REPRODUCIBLE_TIMESTAMP_ROOTFS=`git -C "${COREBASE}" log -1 --pretty=%ct 2>/dev/null` || true | ||
| 667 | if [ "$REPRODUCIBLE_TIMESTAMP_ROOTFS" = "" ]; then | ||
| 668 | REPRODUCIBLE_TIMESTAMP_ROOTFS=`stat -c%Y ${@bb.utils.which(d.getVar("BBPATH"), "conf/bitbake.conf")}` | ||
| 669 | fi | ||
| 670 | fi | ||
| 671 | # Set mtime of all files to a reproducible value | ||
| 672 | bbnote "reproducible_final_image_task: mtime set to $REPRODUCIBLE_TIMESTAMP_ROOTFS" | ||
| 673 | find ${IMAGE_ROOTFS} -print0 | xargs -0 touch -h --date=@$REPRODUCIBLE_TIMESTAMP_ROOTFS | ||
| 674 | } | ||
| 675 | |||
| 676 | systemd_preset_all () { | ||
| 677 | if [ -e ${IMAGE_ROOTFS}${root_prefix}/lib/systemd/systemd ]; then | ||
| 678 | systemctl --root="${IMAGE_ROOTFS}" --preset-mode=enable-only preset-all | ||
| 679 | fi | ||
| 680 | } | ||
| 681 | |||
| 682 | IMAGE_PREPROCESS_COMMAND:append = " ${@ 'systemd_preset_all;' if bb.utils.contains('DISTRO_FEATURES', 'systemd', True, False, d) and not bb.utils.contains('IMAGE_FEATURES', 'stateless-rootfs', True, False, d) else ''} reproducible_final_image_task; " | ||
| 683 | |||
| 684 | CVE_PRODUCT = "" | ||
