diff options
Diffstat (limited to 'meta/classes')
58 files changed, 2164 insertions, 455 deletions
diff --git a/meta/classes/archiver.bbclass b/meta/classes/archiver.bbclass index 7ca35a573b..6ead010fe1 100644 --- a/meta/classes/archiver.bbclass +++ b/meta/classes/archiver.bbclass | |||
@@ -54,9 +54,10 @@ ARCHIVER_MODE[mirror] ?= "split" | |||
54 | 54 | ||
55 | DEPLOY_DIR_SRC ?= "${DEPLOY_DIR}/sources" | 55 | DEPLOY_DIR_SRC ?= "${DEPLOY_DIR}/sources" |
56 | ARCHIVER_TOPDIR ?= "${WORKDIR}/archiver-sources" | 56 | ARCHIVER_TOPDIR ?= "${WORKDIR}/archiver-sources" |
57 | ARCHIVER_OUTDIR = "${ARCHIVER_TOPDIR}/${TARGET_SYS}/${PF}/" | 57 | ARCHIVER_ARCH = "${TARGET_SYS}" |
58 | ARCHIVER_OUTDIR = "${ARCHIVER_TOPDIR}/${ARCHIVER_ARCH}/${PF}/" | ||
58 | ARCHIVER_RPMTOPDIR ?= "${WORKDIR}/deploy-sources-rpm" | 59 | ARCHIVER_RPMTOPDIR ?= "${WORKDIR}/deploy-sources-rpm" |
59 | ARCHIVER_RPMOUTDIR = "${ARCHIVER_RPMTOPDIR}/${TARGET_SYS}/${PF}/" | 60 | ARCHIVER_RPMOUTDIR = "${ARCHIVER_RPMTOPDIR}/${ARCHIVER_ARCH}/${PF}/" |
60 | ARCHIVER_WORKDIR = "${WORKDIR}/archiver-work/" | 61 | ARCHIVER_WORKDIR = "${WORKDIR}/archiver-work/" |
61 | 62 | ||
62 | # When producing a combined mirror directory, allow duplicates for the case | 63 | # When producing a combined mirror directory, allow duplicates for the case |
@@ -100,6 +101,10 @@ python () { | |||
100 | bb.debug(1, 'archiver: %s is excluded, covered by gcc-source' % pn) | 101 | bb.debug(1, 'archiver: %s is excluded, covered by gcc-source' % pn) |
101 | return | 102 | return |
102 | 103 | ||
104 | # TARGET_SYS in ARCHIVER_ARCH will break the stamp for gcc-source in multiconfig | ||
105 | if pn.startswith('gcc-source'): | ||
106 | d.setVar('ARCHIVER_ARCH', "allarch") | ||
107 | |||
103 | def hasTask(task): | 108 | def hasTask(task): |
104 | return bool(d.getVarFlag(task, "task", False)) and not bool(d.getVarFlag(task, "noexec", False)) | 109 | return bool(d.getVarFlag(task, "task", False)) and not bool(d.getVarFlag(task, "noexec", False)) |
105 | 110 | ||
@@ -281,7 +286,10 @@ python do_ar_configured() { | |||
281 | # ${STAGING_DATADIR}/aclocal/libtool.m4, so we can't re-run the | 286 | # ${STAGING_DATADIR}/aclocal/libtool.m4, so we can't re-run the |
282 | # do_configure, we archive the already configured ${S} to | 287 | # do_configure, we archive the already configured ${S} to |
283 | # instead of. | 288 | # instead of. |
284 | elif pn != 'libtool-native': | 289 | # The kernel class functions require it to be on work-shared, we |
290 | # don't unpack, patch, configure again, just archive the already | ||
291 | # configured ${S} | ||
292 | elif not (pn == 'libtool-native' or is_work_shared(d)): | ||
285 | def runTask(task): | 293 | def runTask(task): |
286 | prefuncs = d.getVarFlag(task, 'prefuncs') or '' | 294 | prefuncs = d.getVarFlag(task, 'prefuncs') or '' |
287 | for func in prefuncs.split(): | 295 | for func in prefuncs.split(): |
@@ -484,6 +492,9 @@ python do_unpack_and_patch() { | |||
484 | src_orig = '%s.orig' % src | 492 | src_orig = '%s.orig' % src |
485 | oe.path.copytree(src, src_orig) | 493 | oe.path.copytree(src, src_orig) |
486 | 494 | ||
495 | if bb.data.inherits_class('dos2unix', d): | ||
496 | bb.build.exec_func('do_convert_crlf_to_lf', d) | ||
497 | |||
487 | # Make sure gcc and kernel sources are patched only once | 498 | # Make sure gcc and kernel sources are patched only once |
488 | if not (d.getVar('SRC_URI') == "" or is_work_shared(d)): | 499 | if not (d.getVar('SRC_URI') == "" or is_work_shared(d)): |
489 | bb.build.exec_func('do_patch', d) | 500 | bb.build.exec_func('do_patch', d) |
@@ -572,7 +583,7 @@ python do_dumpdata () { | |||
572 | 583 | ||
573 | SSTATETASKS += "do_deploy_archives" | 584 | SSTATETASKS += "do_deploy_archives" |
574 | do_deploy_archives () { | 585 | do_deploy_archives () { |
575 | echo "Deploying source archive files from ${ARCHIVER_TOPDIR} to ${DEPLOY_DIR_SRC}." | 586 | bbnote "Deploying source archive files from ${ARCHIVER_TOPDIR} to ${DEPLOY_DIR_SRC}." |
576 | } | 587 | } |
577 | python do_deploy_archives_setscene () { | 588 | python do_deploy_archives_setscene () { |
578 | sstate_setscene(d) | 589 | sstate_setscene(d) |
diff --git a/meta/classes/base.bbclass b/meta/classes/base.bbclass index 8a1b5f79c1..3cae577a0e 100644 --- a/meta/classes/base.bbclass +++ b/meta/classes/base.bbclass | |||
@@ -122,6 +122,10 @@ def setup_hosttools_dir(dest, toolsvar, d, fatal=True): | |||
122 | tools = d.getVar(toolsvar).split() | 122 | tools = d.getVar(toolsvar).split() |
123 | origbbenv = d.getVar("BB_ORIGENV", False) | 123 | origbbenv = d.getVar("BB_ORIGENV", False) |
124 | path = origbbenv.getVar("PATH") | 124 | path = origbbenv.getVar("PATH") |
125 | # Need to ignore our own scripts directories to avoid circular links | ||
126 | for p in path.split(":"): | ||
127 | if p.endswith("/scripts"): | ||
128 | path = path.replace(p, "/ignoreme") | ||
125 | bb.utils.mkdirhier(dest) | 129 | bb.utils.mkdirhier(dest) |
126 | notfound = [] | 130 | notfound = [] |
127 | for tool in tools: | 131 | for tool in tools: |
@@ -135,7 +139,7 @@ def setup_hosttools_dir(dest, toolsvar, d, fatal=True): | |||
135 | # /usr/local/bin/ccache/gcc -> /usr/bin/ccache, then which(gcc) | 139 | # /usr/local/bin/ccache/gcc -> /usr/bin/ccache, then which(gcc) |
136 | # would return /usr/local/bin/ccache/gcc, but what we need is | 140 | # would return /usr/local/bin/ccache/gcc, but what we need is |
137 | # /usr/bin/gcc, this code can check and fix that. | 141 | # /usr/bin/gcc, this code can check and fix that. |
138 | if "ccache" in srctool: | 142 | if os.path.islink(srctool) and os.path.basename(os.readlink(srctool)) == 'ccache': |
139 | srctool = bb.utils.which(path, tool, executable=True, direction=1) | 143 | srctool = bb.utils.which(path, tool, executable=True, direction=1) |
140 | if srctool: | 144 | if srctool: |
141 | os.symlink(srctool, desttool) | 145 | os.symlink(srctool, desttool) |
@@ -153,14 +157,14 @@ do_fetch[vardeps] += "SRCREV" | |||
153 | python base_do_fetch() { | 157 | python base_do_fetch() { |
154 | 158 | ||
155 | src_uri = (d.getVar('SRC_URI') or "").split() | 159 | src_uri = (d.getVar('SRC_URI') or "").split() |
156 | if len(src_uri) == 0: | 160 | if not src_uri: |
157 | return | 161 | return |
158 | 162 | ||
159 | try: | 163 | try: |
160 | fetcher = bb.fetch2.Fetch(src_uri, d) | 164 | fetcher = bb.fetch2.Fetch(src_uri, d) |
161 | fetcher.download() | 165 | fetcher.download() |
162 | except bb.fetch2.BBFetchException as e: | 166 | except bb.fetch2.BBFetchException as e: |
163 | bb.fatal(str(e)) | 167 | bb.fatal("Bitbake Fetcher Error: " + repr(e)) |
164 | } | 168 | } |
165 | 169 | ||
166 | addtask unpack after do_fetch | 170 | addtask unpack after do_fetch |
@@ -170,14 +174,14 @@ do_unpack[cleandirs] = "${@d.getVar('S') if os.path.normpath(d.getVar('S')) != o | |||
170 | 174 | ||
171 | python base_do_unpack() { | 175 | python base_do_unpack() { |
172 | src_uri = (d.getVar('SRC_URI') or "").split() | 176 | src_uri = (d.getVar('SRC_URI') or "").split() |
173 | if len(src_uri) == 0: | 177 | if not src_uri: |
174 | return | 178 | return |
175 | 179 | ||
176 | try: | 180 | try: |
177 | fetcher = bb.fetch2.Fetch(src_uri, d) | 181 | fetcher = bb.fetch2.Fetch(src_uri, d) |
178 | fetcher.unpack(d.getVar('WORKDIR')) | 182 | fetcher.unpack(d.getVar('WORKDIR')) |
179 | except bb.fetch2.BBFetchException as e: | 183 | except bb.fetch2.BBFetchException as e: |
180 | bb.fatal(str(e)) | 184 | bb.fatal("Bitbake Fetcher Error: " + repr(e)) |
181 | } | 185 | } |
182 | 186 | ||
183 | def get_layers_branch_rev(d): | 187 | def get_layers_branch_rev(d): |
@@ -688,7 +692,7 @@ python () { | |||
688 | if os.path.basename(p) == machine and os.path.isdir(p): | 692 | if os.path.basename(p) == machine and os.path.isdir(p): |
689 | paths.append(p) | 693 | paths.append(p) |
690 | 694 | ||
691 | if len(paths) != 0: | 695 | if paths: |
692 | for s in srcuri.split(): | 696 | for s in srcuri.split(): |
693 | if not s.startswith("file://"): | 697 | if not s.startswith("file://"): |
694 | continue | 698 | continue |
@@ -721,7 +725,7 @@ do_cleansstate[nostamp] = "1" | |||
721 | 725 | ||
722 | python do_cleanall() { | 726 | python do_cleanall() { |
723 | src_uri = (d.getVar('SRC_URI') or "").split() | 727 | src_uri = (d.getVar('SRC_URI') or "").split() |
724 | if len(src_uri) == 0: | 728 | if not src_uri: |
725 | return | 729 | return |
726 | 730 | ||
727 | try: | 731 | try: |
diff --git a/meta/classes/bin_package.bbclass b/meta/classes/bin_package.bbclass index cbc9b1fa13..c1954243ee 100644 --- a/meta/classes/bin_package.bbclass +++ b/meta/classes/bin_package.bbclass | |||
@@ -30,8 +30,9 @@ bin_package_do_install () { | |||
30 | bbfatal bin_package has nothing to install. Be sure the SRC_URI unpacks into S. | 30 | bbfatal bin_package has nothing to install. Be sure the SRC_URI unpacks into S. |
31 | fi | 31 | fi |
32 | cd ${S} | 32 | cd ${S} |
33 | install -d ${D}${base_prefix} | ||
33 | tar --no-same-owner --exclude='./patches' --exclude='./.pc' -cpf - . \ | 34 | tar --no-same-owner --exclude='./patches' --exclude='./.pc' -cpf - . \ |
34 | | tar --no-same-owner -xpf - -C ${D} | 35 | | tar --no-same-owner -xpf - -C ${D}${base_prefix} |
35 | } | 36 | } |
36 | 37 | ||
37 | FILES_${PN} = "/" | 38 | FILES_${PN} = "/" |
diff --git a/meta/classes/buildhistory.bbclass b/meta/classes/buildhistory.bbclass index 8a1359acbe..6a1a20653a 100644 --- a/meta/classes/buildhistory.bbclass +++ b/meta/classes/buildhistory.bbclass | |||
@@ -671,13 +671,16 @@ IMAGE_POSTPROCESS_COMMAND[vardepsexclude] += "buildhistory_get_imageinfo" | |||
671 | POPULATE_SDK_POST_TARGET_COMMAND_append = " buildhistory_list_installed_sdk_target;" | 671 | POPULATE_SDK_POST_TARGET_COMMAND_append = " buildhistory_list_installed_sdk_target;" |
672 | POPULATE_SDK_POST_TARGET_COMMAND_append = " buildhistory_get_sdk_installed_target;" | 672 | POPULATE_SDK_POST_TARGET_COMMAND_append = " buildhistory_get_sdk_installed_target;" |
673 | POPULATE_SDK_POST_TARGET_COMMAND[vardepvalueexclude] .= "| buildhistory_list_installed_sdk_target;| buildhistory_get_sdk_installed_target;" | 673 | POPULATE_SDK_POST_TARGET_COMMAND[vardepvalueexclude] .= "| buildhistory_list_installed_sdk_target;| buildhistory_get_sdk_installed_target;" |
674 | POPULATE_SDK_POST_TARGET_COMMAND[vardepsexclude] += "buildhistory_list_installed_sdk_target buildhistory_get_sdk_installed_target" | ||
674 | 675 | ||
675 | POPULATE_SDK_POST_HOST_COMMAND_append = " buildhistory_list_installed_sdk_host;" | 676 | POPULATE_SDK_POST_HOST_COMMAND_append = " buildhistory_list_installed_sdk_host;" |
676 | POPULATE_SDK_POST_HOST_COMMAND_append = " buildhistory_get_sdk_installed_host;" | 677 | POPULATE_SDK_POST_HOST_COMMAND_append = " buildhistory_get_sdk_installed_host;" |
677 | POPULATE_SDK_POST_HOST_COMMAND[vardepvalueexclude] .= "| buildhistory_list_installed_sdk_host;| buildhistory_get_sdk_installed_host;" | 678 | POPULATE_SDK_POST_HOST_COMMAND[vardepvalueexclude] .= "| buildhistory_list_installed_sdk_host;| buildhistory_get_sdk_installed_host;" |
679 | POPULATE_SDK_POST_HOST_COMMAND[vardepsexclude] += "buildhistory_list_installed_sdk_host buildhistory_get_sdk_installed_host" | ||
678 | 680 | ||
679 | SDK_POSTPROCESS_COMMAND_append = " buildhistory_get_sdkinfo ; buildhistory_get_extra_sdkinfo; " | 681 | SDK_POSTPROCESS_COMMAND_append = " buildhistory_get_sdkinfo ; buildhistory_get_extra_sdkinfo; " |
680 | SDK_POSTPROCESS_COMMAND[vardepvalueexclude] .= "| buildhistory_get_sdkinfo ; buildhistory_get_extra_sdkinfo; " | 682 | SDK_POSTPROCESS_COMMAND[vardepvalueexclude] .= "| buildhistory_get_sdkinfo ; buildhistory_get_extra_sdkinfo; " |
683 | SDK_POSTPROCESS_COMMAND[vardepsexclude] += "buildhistory_get_sdkinfo buildhistory_get_extra_sdkinfo" | ||
681 | 684 | ||
682 | python buildhistory_write_sigs() { | 685 | python buildhistory_write_sigs() { |
683 | if not "task" in (d.getVar('BUILDHISTORY_FEATURES') or "").split(): | 686 | if not "task" in (d.getVar('BUILDHISTORY_FEATURES') or "").split(): |
@@ -862,6 +865,7 @@ python buildhistory_eventhandler() { | |||
862 | if os.path.isdir(olddir): | 865 | if os.path.isdir(olddir): |
863 | shutil.rmtree(olddir) | 866 | shutil.rmtree(olddir) |
864 | rootdir = e.data.getVar("BUILDHISTORY_DIR") | 867 | rootdir = e.data.getVar("BUILDHISTORY_DIR") |
868 | bb.utils.mkdirhier(rootdir) | ||
865 | entries = [ x for x in os.listdir(rootdir) if not x.startswith('.') ] | 869 | entries = [ x for x in os.listdir(rootdir) if not x.startswith('.') ] |
866 | bb.utils.mkdirhier(olddir) | 870 | bb.utils.mkdirhier(olddir) |
867 | for entry in entries: | 871 | for entry in entries: |
@@ -950,23 +954,19 @@ def write_latest_srcrev(d, pkghistdir): | |||
950 | value = value.replace('"', '').strip() | 954 | value = value.replace('"', '').strip() |
951 | old_tag_srcrevs[key] = value | 955 | old_tag_srcrevs[key] = value |
952 | with open(srcrevfile, 'w') as f: | 956 | with open(srcrevfile, 'w') as f: |
953 | orig_srcrev = d.getVar('SRCREV', False) or 'INVALID' | 957 | for name, srcrev in sorted(srcrevs.items()): |
954 | if orig_srcrev != 'INVALID': | 958 | suffix = "_" + name |
955 | f.write('# SRCREV = "%s"\n' % orig_srcrev) | 959 | if name == "default": |
956 | if len(srcrevs) > 1: | 960 | suffix = "" |
957 | for name, srcrev in sorted(srcrevs.items()): | 961 | orig_srcrev = d.getVar('SRCREV%s' % suffix, False) |
958 | orig_srcrev = d.getVar('SRCREV_%s' % name, False) | 962 | if orig_srcrev: |
959 | if orig_srcrev: | 963 | f.write('# SRCREV%s = "%s"\n' % (suffix, orig_srcrev)) |
960 | f.write('# SRCREV_%s = "%s"\n' % (name, orig_srcrev)) | 964 | f.write('SRCREV%s = "%s"\n' % (suffix, srcrev)) |
961 | f.write('SRCREV_%s = "%s"\n' % (name, srcrev)) | 965 | for name, srcrev in sorted(tag_srcrevs.items()): |
962 | else: | 966 | f.write('# tag_%s = "%s"\n' % (name, srcrev)) |
963 | f.write('SRCREV = "%s"\n' % next(iter(srcrevs.values()))) | 967 | if name in old_tag_srcrevs and old_tag_srcrevs[name] != srcrev: |
964 | if len(tag_srcrevs) > 0: | 968 | pkg = d.getVar('PN') |
965 | for name, srcrev in sorted(tag_srcrevs.items()): | 969 | bb.warn("Revision for tag %s in package %s was changed since last build (from %s to %s)" % (name, pkg, old_tag_srcrevs[name], srcrev)) |
966 | f.write('# tag_%s = "%s"\n' % (name, srcrev)) | ||
967 | if name in old_tag_srcrevs and old_tag_srcrevs[name] != srcrev: | ||
968 | pkg = d.getVar('PN') | ||
969 | bb.warn("Revision for tag %s in package %s was changed since last build (from %s to %s)" % (name, pkg, old_tag_srcrevs[name], srcrev)) | ||
970 | 970 | ||
971 | else: | 971 | else: |
972 | if os.path.exists(srcrevfile): | 972 | if os.path.exists(srcrevfile): |
diff --git a/meta/classes/cmake.bbclass b/meta/classes/cmake.bbclass index 8243f7ce8c..af6a8c4395 100644 --- a/meta/classes/cmake.bbclass +++ b/meta/classes/cmake.bbclass | |||
@@ -102,7 +102,8 @@ set( CMAKE_CXX_COMPILER ${OECMAKE_CXX_COMPILER} ) | |||
102 | set( CMAKE_C_COMPILER_LAUNCHER ${OECMAKE_C_COMPILER_LAUNCHER} ) | 102 | set( CMAKE_C_COMPILER_LAUNCHER ${OECMAKE_C_COMPILER_LAUNCHER} ) |
103 | set( CMAKE_CXX_COMPILER_LAUNCHER ${OECMAKE_CXX_COMPILER_LAUNCHER} ) | 103 | set( CMAKE_CXX_COMPILER_LAUNCHER ${OECMAKE_CXX_COMPILER_LAUNCHER} ) |
104 | set( CMAKE_ASM_COMPILER ${OECMAKE_C_COMPILER} ) | 104 | set( CMAKE_ASM_COMPILER ${OECMAKE_C_COMPILER} ) |
105 | set( CMAKE_AR ${OECMAKE_AR} CACHE FILEPATH "Archiver" ) | 105 | find_program( CMAKE_AR ${OECMAKE_AR} DOC "Archiver" REQUIRED ) |
106 | |||
106 | set( CMAKE_C_FLAGS "${OECMAKE_C_FLAGS}" CACHE STRING "CFLAGS" ) | 107 | set( CMAKE_C_FLAGS "${OECMAKE_C_FLAGS}" CACHE STRING "CFLAGS" ) |
107 | set( CMAKE_CXX_FLAGS "${OECMAKE_CXX_FLAGS}" CACHE STRING "CXXFLAGS" ) | 108 | set( CMAKE_CXX_FLAGS "${OECMAKE_CXX_FLAGS}" CACHE STRING "CXXFLAGS" ) |
108 | set( CMAKE_ASM_FLAGS "${OECMAKE_C_FLAGS}" CACHE STRING "ASM FLAGS" ) | 109 | set( CMAKE_ASM_FLAGS "${OECMAKE_C_FLAGS}" CACHE STRING "ASM FLAGS" ) |
diff --git a/meta/classes/cml1.bbclass b/meta/classes/cml1.bbclass index 8ab240589a..46a19fce32 100644 --- a/meta/classes/cml1.bbclass +++ b/meta/classes/cml1.bbclass | |||
@@ -36,6 +36,14 @@ python do_menuconfig() { | |||
36 | except OSError: | 36 | except OSError: |
37 | mtime = 0 | 37 | mtime = 0 |
38 | 38 | ||
39 | # setup native pkg-config variables (kconfig scripts call pkg-config directly, cannot generically be overriden to pkg-config-native) | ||
40 | d.setVar("PKG_CONFIG_DIR", "${STAGING_DIR_NATIVE}${libdir_native}/pkgconfig") | ||
41 | d.setVar("PKG_CONFIG_PATH", "${PKG_CONFIG_DIR}:${STAGING_DATADIR_NATIVE}/pkgconfig") | ||
42 | d.setVar("PKG_CONFIG_LIBDIR", "${PKG_CONFIG_DIR}") | ||
43 | d.setVarFlag("PKG_CONFIG_SYSROOT_DIR", "unexport", "1") | ||
44 | # ensure that environment variables are overwritten with this tasks 'd' values | ||
45 | d.appendVar("OE_TERMINAL_EXPORTS", " PKG_CONFIG_DIR PKG_CONFIG_PATH PKG_CONFIG_LIBDIR PKG_CONFIG_SYSROOT_DIR") | ||
46 | |||
39 | oe_terminal("sh -c \"make %s; if [ \\$? -ne 0 ]; then echo 'Command failed.'; printf 'Press any key to continue... '; read r; fi\"" % d.getVar('KCONFIG_CONFIG_COMMAND'), | 47 | oe_terminal("sh -c \"make %s; if [ \\$? -ne 0 ]; then echo 'Command failed.'; printf 'Press any key to continue... '; read r; fi\"" % d.getVar('KCONFIG_CONFIG_COMMAND'), |
40 | d.getVar('PN') + ' Configuration', d) | 48 | d.getVar('PN') + ' Configuration', d) |
41 | 49 | ||
diff --git a/meta/classes/create-spdx-2.2.bbclass b/meta/classes/create-spdx-2.2.bbclass new file mode 100644 index 0000000000..42b693d586 --- /dev/null +++ b/meta/classes/create-spdx-2.2.bbclass | |||
@@ -0,0 +1,1067 @@ | |||
1 | # | ||
2 | # Copyright OpenEmbedded Contributors | ||
3 | # | ||
4 | # SPDX-License-Identifier: GPL-2.0-only | ||
5 | # | ||
6 | |||
7 | DEPLOY_DIR_SPDX ??= "${DEPLOY_DIR}/spdx/${MACHINE}" | ||
8 | |||
9 | # The product name that the CVE database uses. Defaults to BPN, but may need to | ||
10 | # be overriden per recipe (for example tiff.bb sets CVE_PRODUCT=libtiff). | ||
11 | CVE_PRODUCT ??= "${BPN}" | ||
12 | CVE_VERSION ??= "${PV}" | ||
13 | |||
14 | SPDXDIR ??= "${WORKDIR}/spdx" | ||
15 | SPDXDEPLOY = "${SPDXDIR}/deploy" | ||
16 | SPDXWORK = "${SPDXDIR}/work" | ||
17 | SPDXIMAGEWORK = "${SPDXDIR}/image-work" | ||
18 | SPDXSDKWORK = "${SPDXDIR}/sdk-work" | ||
19 | |||
20 | SPDX_TOOL_NAME ??= "oe-spdx-creator" | ||
21 | SPDX_TOOL_VERSION ??= "1.0" | ||
22 | |||
23 | SPDXRUNTIMEDEPLOY = "${SPDXDIR}/runtime-deploy" | ||
24 | |||
25 | SPDX_INCLUDE_SOURCES ??= "0" | ||
26 | SPDX_ARCHIVE_SOURCES ??= "0" | ||
27 | SPDX_ARCHIVE_PACKAGED ??= "0" | ||
28 | |||
29 | SPDX_UUID_NAMESPACE ??= "sbom.openembedded.org" | ||
30 | SPDX_NAMESPACE_PREFIX ??= "http://spdx.org/spdxdoc" | ||
31 | SPDX_PRETTY ??= "0" | ||
32 | |||
33 | SPDX_LICENSES ??= "${COREBASE}/meta/files/spdx-licenses.json" | ||
34 | |||
35 | SPDX_CUSTOM_ANNOTATION_VARS ??= "" | ||
36 | |||
37 | SPDX_ORG ??= "OpenEmbedded ()" | ||
38 | SPDX_SUPPLIER ??= "Organization: ${SPDX_ORG}" | ||
39 | SPDX_SUPPLIER[doc] = "The SPDX PackageSupplier field for SPDX packages created from \ | ||
40 | this recipe. For SPDX documents create using this class during the build, this \ | ||
41 | is the contact information for the person or organization who is doing the \ | ||
42 | build." | ||
43 | |||
44 | def extract_licenses(filename): | ||
45 | import re | ||
46 | |||
47 | lic_regex = re.compile(rb'^\W*SPDX-License-Identifier:\s*([ \w\d.()+-]+?)(?:\s+\W*)?$', re.MULTILINE) | ||
48 | |||
49 | try: | ||
50 | with open(filename, 'rb') as f: | ||
51 | size = min(15000, os.stat(filename).st_size) | ||
52 | txt = f.read(size) | ||
53 | licenses = re.findall(lic_regex, txt) | ||
54 | if licenses: | ||
55 | ascii_licenses = [lic.decode('ascii') for lic in licenses] | ||
56 | return ascii_licenses | ||
57 | except Exception as e: | ||
58 | bb.warn(f"Exception reading {filename}: {e}") | ||
59 | return None | ||
60 | |||
61 | def get_doc_namespace(d, doc): | ||
62 | import uuid | ||
63 | namespace_uuid = uuid.uuid5(uuid.NAMESPACE_DNS, d.getVar("SPDX_UUID_NAMESPACE")) | ||
64 | return "%s/%s-%s" % (d.getVar("SPDX_NAMESPACE_PREFIX"), doc.name, str(uuid.uuid5(namespace_uuid, doc.name))) | ||
65 | |||
66 | def create_annotation(d, comment): | ||
67 | from datetime import datetime, timezone | ||
68 | |||
69 | creation_time = datetime.now(tz=timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ") | ||
70 | annotation = oe.spdx.SPDXAnnotation() | ||
71 | annotation.annotationDate = creation_time | ||
72 | annotation.annotationType = "OTHER" | ||
73 | annotation.annotator = "Tool: %s - %s" % (d.getVar("SPDX_TOOL_NAME"), d.getVar("SPDX_TOOL_VERSION")) | ||
74 | annotation.comment = comment | ||
75 | return annotation | ||
76 | |||
77 | def recipe_spdx_is_native(d, recipe): | ||
78 | return any(a.annotationType == "OTHER" and | ||
79 | a.annotator == "Tool: %s - %s" % (d.getVar("SPDX_TOOL_NAME"), d.getVar("SPDX_TOOL_VERSION")) and | ||
80 | a.comment == "isNative" for a in recipe.annotations) | ||
81 | |||
82 | def is_work_shared_spdx(d): | ||
83 | return bb.data.inherits_class('kernel', d) or ('work-shared' in d.getVar('WORKDIR')) | ||
84 | |||
85 | def get_json_indent(d): | ||
86 | if d.getVar("SPDX_PRETTY") == "1": | ||
87 | return 2 | ||
88 | return None | ||
89 | |||
90 | python() { | ||
91 | import json | ||
92 | if d.getVar("SPDX_LICENSE_DATA"): | ||
93 | return | ||
94 | |||
95 | with open(d.getVar("SPDX_LICENSES"), "r") as f: | ||
96 | data = json.load(f) | ||
97 | # Transform the license array to a dictionary | ||
98 | data["licenses"] = {l["licenseId"]: l for l in data["licenses"]} | ||
99 | d.setVar("SPDX_LICENSE_DATA", data) | ||
100 | } | ||
101 | |||
102 | def convert_license_to_spdx(lic, document, d, existing={}): | ||
103 | from pathlib import Path | ||
104 | import oe.spdx | ||
105 | |||
106 | license_data = d.getVar("SPDX_LICENSE_DATA") | ||
107 | extracted = {} | ||
108 | |||
109 | def add_extracted_license(ident, name): | ||
110 | nonlocal document | ||
111 | |||
112 | if name in extracted: | ||
113 | return | ||
114 | |||
115 | extracted_info = oe.spdx.SPDXExtractedLicensingInfo() | ||
116 | extracted_info.name = name | ||
117 | extracted_info.licenseId = ident | ||
118 | extracted_info.extractedText = None | ||
119 | |||
120 | if name == "PD": | ||
121 | # Special-case this. | ||
122 | extracted_info.extractedText = "Software released to the public domain" | ||
123 | else: | ||
124 | # Seach for the license in COMMON_LICENSE_DIR and LICENSE_PATH | ||
125 | for directory in [d.getVar('COMMON_LICENSE_DIR')] + (d.getVar('LICENSE_PATH') or '').split(): | ||
126 | try: | ||
127 | with (Path(directory) / name).open(errors="replace") as f: | ||
128 | extracted_info.extractedText = f.read() | ||
129 | break | ||
130 | except FileNotFoundError: | ||
131 | pass | ||
132 | if extracted_info.extractedText is None: | ||
133 | # If it's not SPDX or PD, then NO_GENERIC_LICENSE must be set | ||
134 | filename = d.getVarFlag('NO_GENERIC_LICENSE', name) | ||
135 | if filename: | ||
136 | filename = d.expand("${S}/" + filename) | ||
137 | with open(filename, errors="replace") as f: | ||
138 | extracted_info.extractedText = f.read() | ||
139 | else: | ||
140 | bb.error("Cannot find any text for license %s" % name) | ||
141 | |||
142 | extracted[name] = extracted_info | ||
143 | document.hasExtractedLicensingInfos.append(extracted_info) | ||
144 | |||
145 | def convert(l): | ||
146 | if l == "(" or l == ")": | ||
147 | return l | ||
148 | |||
149 | if l == "&": | ||
150 | return "AND" | ||
151 | |||
152 | if l == "|": | ||
153 | return "OR" | ||
154 | |||
155 | if l == "CLOSED": | ||
156 | return "NONE" | ||
157 | |||
158 | spdx_license = d.getVarFlag("SPDXLICENSEMAP", l) or l | ||
159 | if spdx_license in license_data["licenses"]: | ||
160 | return spdx_license | ||
161 | |||
162 | try: | ||
163 | spdx_license = existing[l] | ||
164 | except KeyError: | ||
165 | spdx_license = "LicenseRef-" + l | ||
166 | add_extracted_license(spdx_license, l) | ||
167 | |||
168 | return spdx_license | ||
169 | |||
170 | lic_split = lic.replace("(", " ( ").replace(")", " ) ").split() | ||
171 | |||
172 | return ' '.join(convert(l) for l in lic_split) | ||
173 | |||
174 | def process_sources(d): | ||
175 | pn = d.getVar('PN') | ||
176 | assume_provided = (d.getVar("ASSUME_PROVIDED") or "").split() | ||
177 | if pn in assume_provided: | ||
178 | for p in d.getVar("PROVIDES").split(): | ||
179 | if p != pn: | ||
180 | pn = p | ||
181 | break | ||
182 | |||
183 | # glibc-locale: do_fetch, do_unpack and do_patch tasks have been deleted, | ||
184 | # so avoid archiving source here. | ||
185 | if pn.startswith('glibc-locale'): | ||
186 | return False | ||
187 | if d.getVar('PN') == "libtool-cross": | ||
188 | return False | ||
189 | if d.getVar('PN') == "libgcc-initial": | ||
190 | return False | ||
191 | if d.getVar('PN') == "shadow-sysroot": | ||
192 | return False | ||
193 | |||
194 | # We just archive gcc-source for all the gcc related recipes | ||
195 | if d.getVar('BPN') in ['gcc', 'libgcc']: | ||
196 | bb.debug(1, 'spdx: There is bug in scan of %s is, do nothing' % pn) | ||
197 | return False | ||
198 | |||
199 | return True | ||
200 | |||
201 | |||
202 | def add_package_files(d, doc, spdx_pkg, topdir, get_spdxid, get_types, *, archive=None, ignore_dirs=[], ignore_top_level_dirs=[]): | ||
203 | from pathlib import Path | ||
204 | import oe.spdx | ||
205 | import hashlib | ||
206 | |||
207 | source_date_epoch = d.getVar("SOURCE_DATE_EPOCH") | ||
208 | if source_date_epoch: | ||
209 | source_date_epoch = int(source_date_epoch) | ||
210 | |||
211 | sha1s = [] | ||
212 | spdx_files = [] | ||
213 | |||
214 | file_counter = 1 | ||
215 | for subdir, dirs, files in os.walk(topdir): | ||
216 | dirs[:] = [d for d in dirs if d not in ignore_dirs] | ||
217 | if subdir == str(topdir): | ||
218 | dirs[:] = [d for d in dirs if d not in ignore_top_level_dirs] | ||
219 | |||
220 | for file in files: | ||
221 | filepath = Path(subdir) / file | ||
222 | filename = str(filepath.relative_to(topdir)) | ||
223 | |||
224 | if not filepath.is_symlink() and filepath.is_file(): | ||
225 | spdx_file = oe.spdx.SPDXFile() | ||
226 | spdx_file.SPDXID = get_spdxid(file_counter) | ||
227 | for t in get_types(filepath): | ||
228 | spdx_file.fileTypes.append(t) | ||
229 | spdx_file.fileName = filename | ||
230 | |||
231 | if archive is not None: | ||
232 | with filepath.open("rb") as f: | ||
233 | info = archive.gettarinfo(fileobj=f) | ||
234 | info.name = filename | ||
235 | info.uid = 0 | ||
236 | info.gid = 0 | ||
237 | info.uname = "root" | ||
238 | info.gname = "root" | ||
239 | |||
240 | if source_date_epoch is not None and info.mtime > source_date_epoch: | ||
241 | info.mtime = source_date_epoch | ||
242 | |||
243 | archive.addfile(info, f) | ||
244 | |||
245 | sha1 = bb.utils.sha1_file(filepath) | ||
246 | sha1s.append(sha1) | ||
247 | spdx_file.checksums.append(oe.spdx.SPDXChecksum( | ||
248 | algorithm="SHA1", | ||
249 | checksumValue=sha1, | ||
250 | )) | ||
251 | spdx_file.checksums.append(oe.spdx.SPDXChecksum( | ||
252 | algorithm="SHA256", | ||
253 | checksumValue=bb.utils.sha256_file(filepath), | ||
254 | )) | ||
255 | |||
256 | if "SOURCE" in spdx_file.fileTypes: | ||
257 | extracted_lics = extract_licenses(filepath) | ||
258 | if extracted_lics: | ||
259 | spdx_file.licenseInfoInFiles = extracted_lics | ||
260 | |||
261 | doc.files.append(spdx_file) | ||
262 | doc.add_relationship(spdx_pkg, "CONTAINS", spdx_file) | ||
263 | spdx_pkg.hasFiles.append(spdx_file.SPDXID) | ||
264 | |||
265 | spdx_files.append(spdx_file) | ||
266 | |||
267 | file_counter += 1 | ||
268 | |||
269 | sha1s.sort() | ||
270 | verifier = hashlib.sha1() | ||
271 | for v in sha1s: | ||
272 | verifier.update(v.encode("utf-8")) | ||
273 | spdx_pkg.packageVerificationCode.packageVerificationCodeValue = verifier.hexdigest() | ||
274 | |||
275 | return spdx_files | ||
276 | |||
277 | |||
278 | def add_package_sources_from_debug(d, package_doc, spdx_package, package, package_files, sources): | ||
279 | from pathlib import Path | ||
280 | import hashlib | ||
281 | import oe.packagedata | ||
282 | import oe.spdx | ||
283 | |||
284 | debug_search_paths = [ | ||
285 | Path(d.getVar('PKGD')), | ||
286 | Path(d.getVar('STAGING_DIR_TARGET')), | ||
287 | Path(d.getVar('STAGING_DIR_NATIVE')), | ||
288 | Path(d.getVar('STAGING_KERNEL_DIR')), | ||
289 | ] | ||
290 | |||
291 | pkg_data = oe.packagedata.read_subpkgdata_extended(package, d) | ||
292 | |||
293 | if pkg_data is None: | ||
294 | return | ||
295 | |||
296 | for file_path, file_data in pkg_data["files_info"].items(): | ||
297 | if not "debugsrc" in file_data: | ||
298 | continue | ||
299 | |||
300 | for pkg_file in package_files: | ||
301 | if file_path.lstrip("/") == pkg_file.fileName.lstrip("/"): | ||
302 | break | ||
303 | else: | ||
304 | bb.fatal("No package file found for %s" % str(file_path)) | ||
305 | continue | ||
306 | |||
307 | for debugsrc in file_data["debugsrc"]: | ||
308 | ref_id = "NOASSERTION" | ||
309 | for search in debug_search_paths: | ||
310 | if debugsrc.startswith("/usr/src/kernel"): | ||
311 | debugsrc_path = search / debugsrc.replace('/usr/src/kernel/', '') | ||
312 | else: | ||
313 | debugsrc_path = search / debugsrc.lstrip("/") | ||
314 | if not debugsrc_path.exists(): | ||
315 | continue | ||
316 | |||
317 | file_sha256 = bb.utils.sha256_file(debugsrc_path) | ||
318 | |||
319 | if file_sha256 in sources: | ||
320 | source_file = sources[file_sha256] | ||
321 | |||
322 | doc_ref = package_doc.find_external_document_ref(source_file.doc.documentNamespace) | ||
323 | if doc_ref is None: | ||
324 | doc_ref = oe.spdx.SPDXExternalDocumentRef() | ||
325 | doc_ref.externalDocumentId = "DocumentRef-dependency-" + source_file.doc.name | ||
326 | doc_ref.spdxDocument = source_file.doc.documentNamespace | ||
327 | doc_ref.checksum.algorithm = "SHA1" | ||
328 | doc_ref.checksum.checksumValue = source_file.doc_sha1 | ||
329 | package_doc.externalDocumentRefs.append(doc_ref) | ||
330 | |||
331 | ref_id = "%s:%s" % (doc_ref.externalDocumentId, source_file.file.SPDXID) | ||
332 | else: | ||
333 | bb.debug(1, "Debug source %s with SHA256 %s not found in any dependency" % (str(debugsrc_path), file_sha256)) | ||
334 | break | ||
335 | else: | ||
336 | bb.debug(1, "Debug source %s not found" % debugsrc) | ||
337 | |||
338 | package_doc.add_relationship(pkg_file, "GENERATED_FROM", ref_id, comment=debugsrc) | ||
339 | |||
340 | def collect_dep_recipes(d, doc, spdx_recipe): | ||
341 | from pathlib import Path | ||
342 | import oe.sbom | ||
343 | import oe.spdx | ||
344 | |||
345 | deploy_dir_spdx = Path(d.getVar("DEPLOY_DIR_SPDX")) | ||
346 | |||
347 | dep_recipes = [] | ||
348 | taskdepdata = d.getVar("BB_TASKDEPDATA", False) | ||
349 | deps = sorted(set( | ||
350 | dep[0] for dep in taskdepdata.values() if | ||
351 | dep[1] == "do_create_spdx" and dep[0] != d.getVar("PN") | ||
352 | )) | ||
353 | for dep_pn in deps: | ||
354 | dep_recipe_path = deploy_dir_spdx / "recipes" / ("recipe-%s.spdx.json" % dep_pn) | ||
355 | |||
356 | spdx_dep_doc, spdx_dep_sha1 = oe.sbom.read_doc(dep_recipe_path) | ||
357 | |||
358 | for pkg in spdx_dep_doc.packages: | ||
359 | if pkg.name == dep_pn: | ||
360 | spdx_dep_recipe = pkg | ||
361 | break | ||
362 | else: | ||
363 | continue | ||
364 | |||
365 | dep_recipes.append(oe.sbom.DepRecipe(spdx_dep_doc, spdx_dep_sha1, spdx_dep_recipe)) | ||
366 | |||
367 | dep_recipe_ref = oe.spdx.SPDXExternalDocumentRef() | ||
368 | dep_recipe_ref.externalDocumentId = "DocumentRef-dependency-" + spdx_dep_doc.name | ||
369 | dep_recipe_ref.spdxDocument = spdx_dep_doc.documentNamespace | ||
370 | dep_recipe_ref.checksum.algorithm = "SHA1" | ||
371 | dep_recipe_ref.checksum.checksumValue = spdx_dep_sha1 | ||
372 | |||
373 | doc.externalDocumentRefs.append(dep_recipe_ref) | ||
374 | |||
375 | doc.add_relationship( | ||
376 | "%s:%s" % (dep_recipe_ref.externalDocumentId, spdx_dep_recipe.SPDXID), | ||
377 | "BUILD_DEPENDENCY_OF", | ||
378 | spdx_recipe | ||
379 | ) | ||
380 | |||
381 | return dep_recipes | ||
382 | |||
383 | collect_dep_recipes[vardepsexclude] += "BB_TASKDEPDATA" | ||
384 | collect_dep_recipes[vardeps] += "DEPENDS" | ||
385 | |||
386 | def collect_dep_sources(d, dep_recipes): | ||
387 | import oe.sbom | ||
388 | |||
389 | sources = {} | ||
390 | for dep in dep_recipes: | ||
391 | # Don't collect sources from native recipes as they | ||
392 | # match non-native sources also. | ||
393 | if recipe_spdx_is_native(d, dep.recipe): | ||
394 | continue | ||
395 | recipe_files = set(dep.recipe.hasFiles) | ||
396 | |||
397 | for spdx_file in dep.doc.files: | ||
398 | if spdx_file.SPDXID not in recipe_files: | ||
399 | continue | ||
400 | |||
401 | if "SOURCE" in spdx_file.fileTypes: | ||
402 | for checksum in spdx_file.checksums: | ||
403 | if checksum.algorithm == "SHA256": | ||
404 | sources[checksum.checksumValue] = oe.sbom.DepSource(dep.doc, dep.doc_sha1, dep.recipe, spdx_file) | ||
405 | break | ||
406 | |||
407 | return sources | ||
408 | |||
409 | def add_download_packages(d, doc, recipe): | ||
410 | import os.path | ||
411 | from bb.fetch2 import decodeurl, CHECKSUM_LIST | ||
412 | import bb.process | ||
413 | import oe.spdx | ||
414 | import oe.sbom | ||
415 | |||
416 | for download_idx, src_uri in enumerate(d.getVar('SRC_URI').split()): | ||
417 | f = bb.fetch2.FetchData(src_uri, d) | ||
418 | |||
419 | for name in f.names: | ||
420 | package = oe.spdx.SPDXPackage() | ||
421 | package.name = "%s-source-%d" % (d.getVar("PN"), download_idx + 1) | ||
422 | package.SPDXID = oe.sbom.get_download_spdxid(d, download_idx + 1) | ||
423 | |||
424 | if f.type == "file": | ||
425 | continue | ||
426 | |||
427 | uri = f.type | ||
428 | proto = getattr(f, "proto", None) | ||
429 | if proto is not None: | ||
430 | uri = uri + "+" + proto | ||
431 | uri = uri + "://" + f.host + f.path | ||
432 | |||
433 | if f.method.supports_srcrev(): | ||
434 | uri = uri + "@" + f.revisions[name] | ||
435 | |||
436 | if f.method.supports_checksum(f): | ||
437 | for checksum_id in CHECKSUM_LIST: | ||
438 | if checksum_id.upper() not in oe.spdx.SPDXPackage.ALLOWED_CHECKSUMS: | ||
439 | continue | ||
440 | |||
441 | expected_checksum = getattr(f, "%s_expected" % checksum_id) | ||
442 | if expected_checksum is None: | ||
443 | continue | ||
444 | |||
445 | c = oe.spdx.SPDXChecksum() | ||
446 | c.algorithm = checksum_id.upper() | ||
447 | c.checksumValue = expected_checksum | ||
448 | package.checksums.append(c) | ||
449 | |||
450 | package.downloadLocation = uri | ||
451 | doc.packages.append(package) | ||
452 | doc.add_relationship(doc, "DESCRIBES", package) | ||
453 | # In the future, we might be able to do more fancy dependencies, | ||
454 | # but this should be sufficient for now | ||
455 | doc.add_relationship(package, "BUILD_DEPENDENCY_OF", recipe) | ||
456 | |||
457 | python do_create_spdx() { | ||
458 | from datetime import datetime, timezone | ||
459 | import oe.sbom | ||
460 | import oe.spdx | ||
461 | import uuid | ||
462 | from pathlib import Path | ||
463 | from contextlib import contextmanager | ||
464 | import oe.cve_check | ||
465 | |||
466 | @contextmanager | ||
467 | def optional_tarfile(name, guard, mode="w"): | ||
468 | import tarfile | ||
469 | import gzip | ||
470 | |||
471 | if guard: | ||
472 | name.parent.mkdir(parents=True, exist_ok=True) | ||
473 | with gzip.open(name, mode=mode + "b") as f: | ||
474 | with tarfile.open(fileobj=f, mode=mode + "|") as tf: | ||
475 | yield tf | ||
476 | else: | ||
477 | yield None | ||
478 | |||
479 | |||
480 | deploy_dir_spdx = Path(d.getVar("DEPLOY_DIR_SPDX")) | ||
481 | spdx_workdir = Path(d.getVar("SPDXWORK")) | ||
482 | include_sources = d.getVar("SPDX_INCLUDE_SOURCES") == "1" | ||
483 | archive_sources = d.getVar("SPDX_ARCHIVE_SOURCES") == "1" | ||
484 | archive_packaged = d.getVar("SPDX_ARCHIVE_PACKAGED") == "1" | ||
485 | |||
486 | creation_time = datetime.now(tz=timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ") | ||
487 | |||
488 | doc = oe.spdx.SPDXDocument() | ||
489 | |||
490 | doc.name = "recipe-" + d.getVar("PN") | ||
491 | doc.documentNamespace = get_doc_namespace(d, doc) | ||
492 | doc.creationInfo.created = creation_time | ||
493 | doc.creationInfo.comment = "This document was created by analyzing recipe files during the build." | ||
494 | doc.creationInfo.licenseListVersion = d.getVar("SPDX_LICENSE_DATA")["licenseListVersion"] | ||
495 | doc.creationInfo.creators.append("Tool: OpenEmbedded Core create-spdx.bbclass") | ||
496 | doc.creationInfo.creators.append("Organization: %s" % d.getVar("SPDX_ORG")) | ||
497 | doc.creationInfo.creators.append("Person: N/A ()") | ||
498 | |||
499 | recipe = oe.spdx.SPDXPackage() | ||
500 | recipe.name = d.getVar("PN") | ||
501 | recipe.versionInfo = d.getVar("PV") | ||
502 | recipe.SPDXID = oe.sbom.get_recipe_spdxid(d) | ||
503 | recipe.supplier = d.getVar("SPDX_SUPPLIER") | ||
504 | if bb.data.inherits_class("native", d) or bb.data.inherits_class("cross", d): | ||
505 | recipe.annotations.append(create_annotation(d, "isNative")) | ||
506 | |||
507 | homepage = d.getVar("HOMEPAGE") | ||
508 | if homepage: | ||
509 | recipe.homepage = homepage | ||
510 | |||
511 | license = d.getVar("LICENSE") | ||
512 | if license: | ||
513 | recipe.licenseDeclared = convert_license_to_spdx(license, doc, d) | ||
514 | |||
515 | summary = d.getVar("SUMMARY") | ||
516 | if summary: | ||
517 | recipe.summary = summary | ||
518 | |||
519 | description = d.getVar("DESCRIPTION") | ||
520 | if description: | ||
521 | recipe.description = description | ||
522 | |||
523 | if d.getVar("SPDX_CUSTOM_ANNOTATION_VARS"): | ||
524 | for var in d.getVar('SPDX_CUSTOM_ANNOTATION_VARS').split(): | ||
525 | recipe.annotations.append(create_annotation(d, var + "=" + d.getVar(var))) | ||
526 | |||
527 | # Some CVEs may be patched during the build process without incrementing the version number, | ||
528 | # so querying for CVEs based on the CPE id can lead to false positives. To account for this, | ||
529 | # save the CVEs fixed by patches to source information field in the SPDX. | ||
530 | patched_cves = oe.cve_check.get_patched_cves(d) | ||
531 | patched_cves = list(patched_cves) | ||
532 | patched_cves = ' '.join(patched_cves) | ||
533 | if patched_cves: | ||
534 | recipe.sourceInfo = "CVEs fixed: " + patched_cves | ||
535 | |||
536 | cpe_ids = oe.cve_check.get_cpe_ids(d.getVar("CVE_PRODUCT"), d.getVar("CVE_VERSION")) | ||
537 | if cpe_ids: | ||
538 | for cpe_id in cpe_ids: | ||
539 | cpe = oe.spdx.SPDXExternalReference() | ||
540 | cpe.referenceCategory = "SECURITY" | ||
541 | cpe.referenceType = "http://spdx.org/rdf/references/cpe23Type" | ||
542 | cpe.referenceLocator = cpe_id | ||
543 | recipe.externalRefs.append(cpe) | ||
544 | |||
545 | doc.packages.append(recipe) | ||
546 | doc.add_relationship(doc, "DESCRIBES", recipe) | ||
547 | |||
548 | add_download_packages(d, doc, recipe) | ||
549 | |||
550 | if process_sources(d) and include_sources: | ||
551 | recipe_archive = deploy_dir_spdx / "recipes" / (doc.name + ".tar.gz") | ||
552 | with optional_tarfile(recipe_archive, archive_sources) as archive: | ||
553 | spdx_get_src(d) | ||
554 | |||
555 | add_package_files( | ||
556 | d, | ||
557 | doc, | ||
558 | recipe, | ||
559 | spdx_workdir, | ||
560 | lambda file_counter: "SPDXRef-SourceFile-%s-%d" % (d.getVar("PN"), file_counter), | ||
561 | lambda filepath: ["SOURCE"], | ||
562 | ignore_dirs=[".git"], | ||
563 | ignore_top_level_dirs=["temp"], | ||
564 | archive=archive, | ||
565 | ) | ||
566 | |||
567 | if archive is not None: | ||
568 | recipe.packageFileName = str(recipe_archive.name) | ||
569 | |||
570 | dep_recipes = collect_dep_recipes(d, doc, recipe) | ||
571 | |||
572 | doc_sha1 = oe.sbom.write_doc(d, doc, "recipes", indent=get_json_indent(d)) | ||
573 | dep_recipes.append(oe.sbom.DepRecipe(doc, doc_sha1, recipe)) | ||
574 | |||
575 | recipe_ref = oe.spdx.SPDXExternalDocumentRef() | ||
576 | recipe_ref.externalDocumentId = "DocumentRef-recipe-" + recipe.name | ||
577 | recipe_ref.spdxDocument = doc.documentNamespace | ||
578 | recipe_ref.checksum.algorithm = "SHA1" | ||
579 | recipe_ref.checksum.checksumValue = doc_sha1 | ||
580 | |||
581 | sources = collect_dep_sources(d, dep_recipes) | ||
582 | found_licenses = {license.name:recipe_ref.externalDocumentId + ":" + license.licenseId for license in doc.hasExtractedLicensingInfos} | ||
583 | |||
584 | if not recipe_spdx_is_native(d, recipe): | ||
585 | bb.build.exec_func("read_subpackage_metadata", d) | ||
586 | |||
587 | pkgdest = Path(d.getVar("PKGDEST")) | ||
588 | for package in d.getVar("PACKAGES").split(): | ||
589 | if not oe.packagedata.packaged(package, d): | ||
590 | continue | ||
591 | |||
592 | package_doc = oe.spdx.SPDXDocument() | ||
593 | pkg_name = d.getVar("PKG:%s" % package) or package | ||
594 | package_doc.name = pkg_name | ||
595 | package_doc.documentNamespace = get_doc_namespace(d, package_doc) | ||
596 | package_doc.creationInfo.created = creation_time | ||
597 | package_doc.creationInfo.comment = "This document was created by analyzing packages created during the build." | ||
598 | package_doc.creationInfo.licenseListVersion = d.getVar("SPDX_LICENSE_DATA")["licenseListVersion"] | ||
599 | package_doc.creationInfo.creators.append("Tool: OpenEmbedded Core create-spdx.bbclass") | ||
600 | package_doc.creationInfo.creators.append("Organization: %s" % d.getVar("SPDX_ORG")) | ||
601 | package_doc.creationInfo.creators.append("Person: N/A ()") | ||
602 | package_doc.externalDocumentRefs.append(recipe_ref) | ||
603 | |||
604 | package_license = d.getVar("LICENSE:%s" % package) or d.getVar("LICENSE") | ||
605 | |||
606 | spdx_package = oe.spdx.SPDXPackage() | ||
607 | |||
608 | spdx_package.SPDXID = oe.sbom.get_package_spdxid(pkg_name) | ||
609 | spdx_package.name = pkg_name | ||
610 | spdx_package.versionInfo = d.getVar("PV") | ||
611 | spdx_package.licenseDeclared = convert_license_to_spdx(package_license, package_doc, d, found_licenses) | ||
612 | spdx_package.supplier = d.getVar("SPDX_SUPPLIER") | ||
613 | |||
614 | package_doc.packages.append(spdx_package) | ||
615 | |||
616 | package_doc.add_relationship(spdx_package, "GENERATED_FROM", "%s:%s" % (recipe_ref.externalDocumentId, recipe.SPDXID)) | ||
617 | package_doc.add_relationship(package_doc, "DESCRIBES", spdx_package) | ||
618 | |||
619 | package_archive = deploy_dir_spdx / "packages" / (package_doc.name + ".tar.gz") | ||
620 | with optional_tarfile(package_archive, archive_packaged) as archive: | ||
621 | package_files = add_package_files( | ||
622 | d, | ||
623 | package_doc, | ||
624 | spdx_package, | ||
625 | pkgdest / package, | ||
626 | lambda file_counter: oe.sbom.get_packaged_file_spdxid(pkg_name, file_counter), | ||
627 | lambda filepath: ["BINARY"], | ||
628 | ignore_top_level_dirs=['CONTROL', 'DEBIAN'], | ||
629 | archive=archive, | ||
630 | ) | ||
631 | |||
632 | if archive is not None: | ||
633 | spdx_package.packageFileName = str(package_archive.name) | ||
634 | |||
635 | add_package_sources_from_debug(d, package_doc, spdx_package, package, package_files, sources) | ||
636 | |||
637 | oe.sbom.write_doc(d, package_doc, "packages", indent=get_json_indent(d)) | ||
638 | } | ||
639 | # NOTE: depending on do_unpack is a hack that is necessary to get it's dependencies for archive the source | ||
640 | addtask do_create_spdx after do_package do_packagedata do_unpack before do_populate_sdk do_build do_rm_work | ||
641 | |||
642 | SSTATETASKS += "do_create_spdx" | ||
643 | do_create_spdx[sstate-inputdirs] = "${SPDXDEPLOY}" | ||
644 | do_create_spdx[sstate-outputdirs] = "${DEPLOY_DIR_SPDX}" | ||
645 | |||
646 | python do_create_spdx_setscene () { | ||
647 | sstate_setscene(d) | ||
648 | } | ||
649 | addtask do_create_spdx_setscene | ||
650 | |||
651 | do_create_spdx[dirs] = "${SPDXWORK}" | ||
652 | do_create_spdx[cleandirs] = "${SPDXDEPLOY} ${SPDXWORK}" | ||
653 | do_create_spdx[depends] += "${PATCHDEPENDENCY}" | ||
654 | do_create_spdx[deptask] = "do_create_spdx" | ||
655 | |||
656 | def collect_package_providers(d): | ||
657 | from pathlib import Path | ||
658 | import oe.sbom | ||
659 | import oe.spdx | ||
660 | import json | ||
661 | |||
662 | deploy_dir_spdx = Path(d.getVar("DEPLOY_DIR_SPDX")) | ||
663 | |||
664 | providers = {} | ||
665 | |||
666 | taskdepdata = d.getVar("BB_TASKDEPDATA", False) | ||
667 | deps = sorted(set( | ||
668 | dep[0] for dep in taskdepdata.values() if dep[0] != d.getVar("PN") | ||
669 | )) | ||
670 | deps.append(d.getVar("PN")) | ||
671 | |||
672 | for dep_pn in deps: | ||
673 | recipe_data = oe.packagedata.read_pkgdata(dep_pn, d) | ||
674 | |||
675 | for pkg in recipe_data.get("PACKAGES", "").split(): | ||
676 | |||
677 | pkg_data = oe.packagedata.read_subpkgdata_dict(pkg, d) | ||
678 | rprovides = set(n for n, _ in bb.utils.explode_dep_versions2(pkg_data.get("RPROVIDES", "")).items()) | ||
679 | rprovides.add(pkg) | ||
680 | |||
681 | for r in rprovides: | ||
682 | providers[r] = pkg | ||
683 | |||
684 | return providers | ||
685 | |||
686 | collect_package_providers[vardepsexclude] += "BB_TASKDEPDATA" | ||
687 | |||
688 | python do_create_runtime_spdx() { | ||
689 | from datetime import datetime, timezone | ||
690 | import oe.sbom | ||
691 | import oe.spdx | ||
692 | import oe.packagedata | ||
693 | from pathlib import Path | ||
694 | |||
695 | deploy_dir_spdx = Path(d.getVar("DEPLOY_DIR_SPDX")) | ||
696 | spdx_deploy = Path(d.getVar("SPDXRUNTIMEDEPLOY")) | ||
697 | is_native = bb.data.inherits_class("native", d) or bb.data.inherits_class("cross", d) | ||
698 | |||
699 | creation_time = datetime.now(tz=timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ") | ||
700 | |||
701 | providers = collect_package_providers(d) | ||
702 | |||
703 | if not is_native: | ||
704 | bb.build.exec_func("read_subpackage_metadata", d) | ||
705 | |||
706 | dep_package_cache = {} | ||
707 | |||
708 | pkgdest = Path(d.getVar("PKGDEST")) | ||
709 | for package in d.getVar("PACKAGES").split(): | ||
710 | localdata = bb.data.createCopy(d) | ||
711 | pkg_name = d.getVar("PKG:%s" % package) or package | ||
712 | localdata.setVar("PKG", pkg_name) | ||
713 | localdata.setVar('OVERRIDES', d.getVar("OVERRIDES", False) + ":" + package) | ||
714 | |||
715 | if not oe.packagedata.packaged(package, localdata): | ||
716 | continue | ||
717 | |||
718 | pkg_spdx_path = deploy_dir_spdx / "packages" / (pkg_name + ".spdx.json") | ||
719 | |||
720 | package_doc, package_doc_sha1 = oe.sbom.read_doc(pkg_spdx_path) | ||
721 | |||
722 | for p in package_doc.packages: | ||
723 | if p.name == pkg_name: | ||
724 | spdx_package = p | ||
725 | break | ||
726 | else: | ||
727 | bb.fatal("Package '%s' not found in %s" % (pkg_name, pkg_spdx_path)) | ||
728 | |||
729 | runtime_doc = oe.spdx.SPDXDocument() | ||
730 | runtime_doc.name = "runtime-" + pkg_name | ||
731 | runtime_doc.documentNamespace = get_doc_namespace(localdata, runtime_doc) | ||
732 | runtime_doc.creationInfo.created = creation_time | ||
733 | runtime_doc.creationInfo.comment = "This document was created by analyzing package runtime dependencies." | ||
734 | runtime_doc.creationInfo.licenseListVersion = d.getVar("SPDX_LICENSE_DATA")["licenseListVersion"] | ||
735 | runtime_doc.creationInfo.creators.append("Tool: OpenEmbedded Core create-spdx.bbclass") | ||
736 | runtime_doc.creationInfo.creators.append("Organization: %s" % d.getVar("SPDX_ORG")) | ||
737 | runtime_doc.creationInfo.creators.append("Person: N/A ()") | ||
738 | |||
739 | package_ref = oe.spdx.SPDXExternalDocumentRef() | ||
740 | package_ref.externalDocumentId = "DocumentRef-package-" + package | ||
741 | package_ref.spdxDocument = package_doc.documentNamespace | ||
742 | package_ref.checksum.algorithm = "SHA1" | ||
743 | package_ref.checksum.checksumValue = package_doc_sha1 | ||
744 | |||
745 | runtime_doc.externalDocumentRefs.append(package_ref) | ||
746 | |||
747 | runtime_doc.add_relationship( | ||
748 | runtime_doc.SPDXID, | ||
749 | "AMENDS", | ||
750 | "%s:%s" % (package_ref.externalDocumentId, package_doc.SPDXID) | ||
751 | ) | ||
752 | |||
753 | deps = bb.utils.explode_dep_versions2(localdata.getVar("RDEPENDS") or "") | ||
754 | seen_deps = set() | ||
755 | for dep, _ in deps.items(): | ||
756 | if dep in seen_deps: | ||
757 | continue | ||
758 | |||
759 | if dep not in providers: | ||
760 | continue | ||
761 | |||
762 | dep = providers[dep] | ||
763 | |||
764 | if not oe.packagedata.packaged(dep, localdata): | ||
765 | continue | ||
766 | |||
767 | dep_pkg_data = oe.packagedata.read_subpkgdata_dict(dep, d) | ||
768 | dep_pkg = dep_pkg_data["PKG"] | ||
769 | |||
770 | if dep in dep_package_cache: | ||
771 | (dep_spdx_package, dep_package_ref) = dep_package_cache[dep] | ||
772 | else: | ||
773 | dep_path = deploy_dir_spdx / "packages" / ("%s.spdx.json" % dep_pkg) | ||
774 | |||
775 | spdx_dep_doc, spdx_dep_sha1 = oe.sbom.read_doc(dep_path) | ||
776 | |||
777 | for pkg in spdx_dep_doc.packages: | ||
778 | if pkg.name == dep_pkg: | ||
779 | dep_spdx_package = pkg | ||
780 | break | ||
781 | else: | ||
782 | bb.fatal("Package '%s' not found in %s" % (dep_pkg, dep_path)) | ||
783 | |||
784 | dep_package_ref = oe.spdx.SPDXExternalDocumentRef() | ||
785 | dep_package_ref.externalDocumentId = "DocumentRef-runtime-dependency-" + spdx_dep_doc.name | ||
786 | dep_package_ref.spdxDocument = spdx_dep_doc.documentNamespace | ||
787 | dep_package_ref.checksum.algorithm = "SHA1" | ||
788 | dep_package_ref.checksum.checksumValue = spdx_dep_sha1 | ||
789 | |||
790 | dep_package_cache[dep] = (dep_spdx_package, dep_package_ref) | ||
791 | |||
792 | runtime_doc.externalDocumentRefs.append(dep_package_ref) | ||
793 | |||
794 | runtime_doc.add_relationship( | ||
795 | "%s:%s" % (dep_package_ref.externalDocumentId, dep_spdx_package.SPDXID), | ||
796 | "RUNTIME_DEPENDENCY_OF", | ||
797 | "%s:%s" % (package_ref.externalDocumentId, spdx_package.SPDXID) | ||
798 | ) | ||
799 | seen_deps.add(dep) | ||
800 | |||
801 | oe.sbom.write_doc(d, runtime_doc, "runtime", spdx_deploy, indent=get_json_indent(d)) | ||
802 | } | ||
803 | |||
804 | addtask do_create_runtime_spdx after do_create_spdx before do_build do_rm_work | ||
805 | SSTATETASKS += "do_create_runtime_spdx" | ||
806 | do_create_runtime_spdx[sstate-inputdirs] = "${SPDXRUNTIMEDEPLOY}" | ||
807 | do_create_runtime_spdx[sstate-outputdirs] = "${DEPLOY_DIR_SPDX}" | ||
808 | |||
809 | python do_create_runtime_spdx_setscene () { | ||
810 | sstate_setscene(d) | ||
811 | } | ||
812 | addtask do_create_runtime_spdx_setscene | ||
813 | |||
814 | do_create_runtime_spdx[dirs] = "${SPDXRUNTIMEDEPLOY}" | ||
815 | do_create_runtime_spdx[cleandirs] = "${SPDXRUNTIMEDEPLOY}" | ||
816 | do_create_runtime_spdx[rdeptask] = "do_create_spdx" | ||
817 | |||
818 | def spdx_get_src(d): | ||
819 | """ | ||
820 | save patched source of the recipe in SPDX_WORKDIR. | ||
821 | """ | ||
822 | import shutil | ||
823 | spdx_workdir = d.getVar('SPDXWORK') | ||
824 | spdx_sysroot_native = d.getVar('STAGING_DIR_NATIVE') | ||
825 | pn = d.getVar('PN') | ||
826 | |||
827 | workdir = d.getVar("WORKDIR") | ||
828 | |||
829 | try: | ||
830 | # The kernel class functions require it to be on work-shared, so we dont change WORKDIR | ||
831 | if not is_work_shared_spdx(d): | ||
832 | # Change the WORKDIR to make do_unpack do_patch run in another dir. | ||
833 | d.setVar('WORKDIR', spdx_workdir) | ||
834 | # Restore the original path to recipe's native sysroot (it's relative to WORKDIR). | ||
835 | d.setVar('STAGING_DIR_NATIVE', spdx_sysroot_native) | ||
836 | |||
837 | # The changed 'WORKDIR' also caused 'B' changed, create dir 'B' for the | ||
838 | # possibly requiring of the following tasks (such as some recipes's | ||
839 | # do_patch required 'B' existed). | ||
840 | bb.utils.mkdirhier(d.getVar('B')) | ||
841 | |||
842 | bb.build.exec_func('do_unpack', d) | ||
843 | # Copy source of kernel to spdx_workdir | ||
844 | if is_work_shared_spdx(d): | ||
845 | share_src = d.getVar('WORKDIR') | ||
846 | d.setVar('WORKDIR', spdx_workdir) | ||
847 | d.setVar('STAGING_DIR_NATIVE', spdx_sysroot_native) | ||
848 | src_dir = spdx_workdir + "/" + d.getVar('PN')+ "-" + d.getVar('PV') + "-" + d.getVar('PR') | ||
849 | bb.utils.mkdirhier(src_dir) | ||
850 | if bb.data.inherits_class('kernel',d): | ||
851 | share_src = d.getVar('STAGING_KERNEL_DIR') | ||
852 | cmd_copy_share = "cp -rf " + share_src + "/* " + src_dir + "/" | ||
853 | cmd_copy_shared_res = os.popen(cmd_copy_share).read() | ||
854 | bb.note("cmd_copy_shared_result = " + cmd_copy_shared_res) | ||
855 | |||
856 | git_path = src_dir + "/.git" | ||
857 | if os.path.exists(git_path): | ||
858 | shutils.rmtree(git_path) | ||
859 | |||
860 | # Make sure gcc and kernel sources are patched only once | ||
861 | if not (d.getVar('SRC_URI') == "" or is_work_shared_spdx(d)): | ||
862 | bb.build.exec_func('do_patch', d) | ||
863 | |||
864 | # Some userland has no source. | ||
865 | if not os.path.exists( spdx_workdir ): | ||
866 | bb.utils.mkdirhier(spdx_workdir) | ||
867 | finally: | ||
868 | d.setVar("WORKDIR", workdir) | ||
869 | |||
870 | do_rootfs[recrdeptask] += "do_create_spdx do_create_runtime_spdx" | ||
871 | do_rootfs[cleandirs] += "${SPDXIMAGEWORK}" | ||
872 | |||
873 | ROOTFS_POSTUNINSTALL_COMMAND =+ "image_combine_spdx ; " | ||
874 | |||
875 | do_populate_sdk[recrdeptask] += "do_create_spdx do_create_runtime_spdx" | ||
876 | do_populate_sdk[cleandirs] += "${SPDXSDKWORK}" | ||
877 | POPULATE_SDK_POST_HOST_COMMAND:append:task-populate-sdk = " sdk_host_combine_spdx; " | ||
878 | POPULATE_SDK_POST_TARGET_COMMAND:append:task-populate-sdk = " sdk_target_combine_spdx; " | ||
879 | |||
880 | python image_combine_spdx() { | ||
881 | import os | ||
882 | import oe.sbom | ||
883 | from pathlib import Path | ||
884 | from oe.rootfs import image_list_installed_packages | ||
885 | |||
886 | image_name = d.getVar("IMAGE_NAME") | ||
887 | image_link_name = d.getVar("IMAGE_LINK_NAME") | ||
888 | imgdeploydir = Path(d.getVar("IMGDEPLOYDIR")) | ||
889 | img_spdxid = oe.sbom.get_image_spdxid(image_name) | ||
890 | packages = image_list_installed_packages(d) | ||
891 | |||
892 | combine_spdx(d, image_name, imgdeploydir, img_spdxid, packages, Path(d.getVar("SPDXIMAGEWORK"))) | ||
893 | |||
894 | def make_image_link(target_path, suffix): | ||
895 | if image_link_name: | ||
896 | link = imgdeploydir / (image_link_name + suffix) | ||
897 | if link != target_path: | ||
898 | link.symlink_to(os.path.relpath(target_path, link.parent)) | ||
899 | |||
900 | spdx_tar_path = imgdeploydir / (image_name + ".spdx.tar.gz") | ||
901 | make_image_link(spdx_tar_path, ".spdx.tar.gz") | ||
902 | } | ||
903 | |||
904 | python sdk_host_combine_spdx() { | ||
905 | sdk_combine_spdx(d, "host") | ||
906 | } | ||
907 | |||
908 | python sdk_target_combine_spdx() { | ||
909 | sdk_combine_spdx(d, "target") | ||
910 | } | ||
911 | |||
912 | def sdk_combine_spdx(d, sdk_type): | ||
913 | import oe.sbom | ||
914 | from pathlib import Path | ||
915 | from oe.sdk import sdk_list_installed_packages | ||
916 | |||
917 | sdk_name = d.getVar("SDK_NAME") + "-" + sdk_type | ||
918 | sdk_deploydir = Path(d.getVar("SDKDEPLOYDIR")) | ||
919 | sdk_spdxid = oe.sbom.get_sdk_spdxid(sdk_name) | ||
920 | sdk_packages = sdk_list_installed_packages(d, sdk_type == "target") | ||
921 | combine_spdx(d, sdk_name, sdk_deploydir, sdk_spdxid, sdk_packages, Path(d.getVar('SPDXSDKWORK'))) | ||
922 | |||
923 | def combine_spdx(d, rootfs_name, rootfs_deploydir, rootfs_spdxid, packages, spdx_workdir): | ||
924 | import os | ||
925 | import oe.spdx | ||
926 | import oe.sbom | ||
927 | import io | ||
928 | import json | ||
929 | from datetime import timezone, datetime | ||
930 | from pathlib import Path | ||
931 | import tarfile | ||
932 | import gzip | ||
933 | |||
934 | creation_time = datetime.now(tz=timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ") | ||
935 | deploy_dir_spdx = Path(d.getVar("DEPLOY_DIR_SPDX")) | ||
936 | source_date_epoch = d.getVar("SOURCE_DATE_EPOCH") | ||
937 | |||
938 | doc = oe.spdx.SPDXDocument() | ||
939 | doc.name = rootfs_name | ||
940 | doc.documentNamespace = get_doc_namespace(d, doc) | ||
941 | doc.creationInfo.created = creation_time | ||
942 | doc.creationInfo.comment = "This document was created by analyzing the source of the Yocto recipe during the build." | ||
943 | doc.creationInfo.licenseListVersion = d.getVar("SPDX_LICENSE_DATA")["licenseListVersion"] | ||
944 | doc.creationInfo.creators.append("Tool: OpenEmbedded Core create-spdx.bbclass") | ||
945 | doc.creationInfo.creators.append("Organization: %s" % d.getVar("SPDX_ORG")) | ||
946 | doc.creationInfo.creators.append("Person: N/A ()") | ||
947 | |||
948 | image = oe.spdx.SPDXPackage() | ||
949 | image.name = d.getVar("PN") | ||
950 | image.versionInfo = d.getVar("PV") | ||
951 | image.SPDXID = rootfs_spdxid | ||
952 | image.supplier = d.getVar("SPDX_SUPPLIER") | ||
953 | |||
954 | doc.packages.append(image) | ||
955 | |||
956 | for name in sorted(packages.keys()): | ||
957 | pkg_spdx_path = deploy_dir_spdx / "packages" / (name + ".spdx.json") | ||
958 | pkg_doc, pkg_doc_sha1 = oe.sbom.read_doc(pkg_spdx_path) | ||
959 | |||
960 | for p in pkg_doc.packages: | ||
961 | if p.name == name: | ||
962 | pkg_ref = oe.spdx.SPDXExternalDocumentRef() | ||
963 | pkg_ref.externalDocumentId = "DocumentRef-%s" % pkg_doc.name | ||
964 | pkg_ref.spdxDocument = pkg_doc.documentNamespace | ||
965 | pkg_ref.checksum.algorithm = "SHA1" | ||
966 | pkg_ref.checksum.checksumValue = pkg_doc_sha1 | ||
967 | |||
968 | doc.externalDocumentRefs.append(pkg_ref) | ||
969 | doc.add_relationship(image, "CONTAINS", "%s:%s" % (pkg_ref.externalDocumentId, p.SPDXID)) | ||
970 | break | ||
971 | else: | ||
972 | bb.fatal("Unable to find package with name '%s' in SPDX file %s" % (name, pkg_spdx_path)) | ||
973 | |||
974 | runtime_spdx_path = deploy_dir_spdx / "runtime" / ("runtime-" + name + ".spdx.json") | ||
975 | runtime_doc, runtime_doc_sha1 = oe.sbom.read_doc(runtime_spdx_path) | ||
976 | |||
977 | runtime_ref = oe.spdx.SPDXExternalDocumentRef() | ||
978 | runtime_ref.externalDocumentId = "DocumentRef-%s" % runtime_doc.name | ||
979 | runtime_ref.spdxDocument = runtime_doc.documentNamespace | ||
980 | runtime_ref.checksum.algorithm = "SHA1" | ||
981 | runtime_ref.checksum.checksumValue = runtime_doc_sha1 | ||
982 | |||
983 | # "OTHER" isn't ideal here, but I can't find a relationship that makes sense | ||
984 | doc.externalDocumentRefs.append(runtime_ref) | ||
985 | doc.add_relationship( | ||
986 | image, | ||
987 | "OTHER", | ||
988 | "%s:%s" % (runtime_ref.externalDocumentId, runtime_doc.SPDXID), | ||
989 | comment="Runtime dependencies for %s" % name | ||
990 | ) | ||
991 | |||
992 | image_spdx_path = spdx_workdir / (rootfs_name + ".spdx.json") | ||
993 | |||
994 | with image_spdx_path.open("wb") as f: | ||
995 | doc.to_json(f, sort_keys=True, indent=get_json_indent(d)) | ||
996 | |||
997 | num_threads = int(d.getVar("BB_NUMBER_THREADS")) | ||
998 | |||
999 | visited_docs = set() | ||
1000 | |||
1001 | index = {"documents": []} | ||
1002 | |||
1003 | spdx_tar_path = rootfs_deploydir / (rootfs_name + ".spdx.tar.gz") | ||
1004 | with gzip.open(spdx_tar_path, "w") as f: | ||
1005 | with tarfile.open(fileobj=f, mode="w|") as tar: | ||
1006 | def collect_spdx_document(path): | ||
1007 | nonlocal tar | ||
1008 | nonlocal deploy_dir_spdx | ||
1009 | nonlocal source_date_epoch | ||
1010 | nonlocal index | ||
1011 | |||
1012 | if path in visited_docs: | ||
1013 | return | ||
1014 | |||
1015 | visited_docs.add(path) | ||
1016 | |||
1017 | with path.open("rb") as f: | ||
1018 | doc, sha1 = oe.sbom.read_doc(f) | ||
1019 | f.seek(0) | ||
1020 | |||
1021 | if doc.documentNamespace in visited_docs: | ||
1022 | return | ||
1023 | |||
1024 | bb.note("Adding SPDX document %s" % path) | ||
1025 | visited_docs.add(doc.documentNamespace) | ||
1026 | info = tar.gettarinfo(fileobj=f) | ||
1027 | |||
1028 | info.name = doc.name + ".spdx.json" | ||
1029 | info.uid = 0 | ||
1030 | info.gid = 0 | ||
1031 | info.uname = "root" | ||
1032 | info.gname = "root" | ||
1033 | |||
1034 | if source_date_epoch is not None and info.mtime > int(source_date_epoch): | ||
1035 | info.mtime = int(source_date_epoch) | ||
1036 | |||
1037 | tar.addfile(info, f) | ||
1038 | |||
1039 | index["documents"].append({ | ||
1040 | "filename": info.name, | ||
1041 | "documentNamespace": doc.documentNamespace, | ||
1042 | "sha1": sha1, | ||
1043 | }) | ||
1044 | |||
1045 | for ref in doc.externalDocumentRefs: | ||
1046 | ref_path = deploy_dir_spdx / "by-namespace" / ref.spdxDocument.replace("/", "_") | ||
1047 | collect_spdx_document(ref_path) | ||
1048 | |||
1049 | collect_spdx_document(image_spdx_path) | ||
1050 | |||
1051 | index["documents"].sort(key=lambda x: x["filename"]) | ||
1052 | |||
1053 | index_str = io.BytesIO(json.dumps( | ||
1054 | index, | ||
1055 | sort_keys=True, | ||
1056 | indent=get_json_indent(d), | ||
1057 | ).encode("utf-8")) | ||
1058 | |||
1059 | info = tarfile.TarInfo() | ||
1060 | info.name = "index.json" | ||
1061 | info.size = len(index_str.getvalue()) | ||
1062 | info.uid = 0 | ||
1063 | info.gid = 0 | ||
1064 | info.uname = "root" | ||
1065 | info.gname = "root" | ||
1066 | |||
1067 | tar.addfile(info, fileobj=index_str) | ||
diff --git a/meta/classes/create-spdx.bbclass b/meta/classes/create-spdx.bbclass new file mode 100644 index 0000000000..19c6c0ff0b --- /dev/null +++ b/meta/classes/create-spdx.bbclass | |||
@@ -0,0 +1,8 @@ | |||
1 | # | ||
2 | # Copyright OpenEmbedded Contributors | ||
3 | # | ||
4 | # SPDX-License-Identifier: GPL-2.0-only | ||
5 | # | ||
6 | # Include this class when you don't care what version of SPDX you get; it will | ||
7 | # be updated to the latest stable version that is supported | ||
8 | inherit create-spdx-2.2 | ||
diff --git a/meta/classes/cve-check.bbclass b/meta/classes/cve-check.bbclass index 8086cf05e9..5e6bae1757 100644 --- a/meta/classes/cve-check.bbclass +++ b/meta/classes/cve-check.bbclass | |||
@@ -20,13 +20,13 @@ | |||
20 | # the only method to check against CVEs. Running this tool | 20 | # the only method to check against CVEs. Running this tool |
21 | # doesn't guarantee your packages are free of CVEs. | 21 | # doesn't guarantee your packages are free of CVEs. |
22 | 22 | ||
23 | # The product name that the CVE database uses. Defaults to BPN, but may need to | 23 | # The product name that the CVE database uses defaults to BPN, but may need to |
24 | # be overriden per recipe (for example tiff.bb sets CVE_PRODUCT=libtiff). | 24 | # be overriden per recipe (for example tiff.bb sets CVE_PRODUCT=libtiff). |
25 | CVE_PRODUCT ??= "${BPN}" | 25 | CVE_PRODUCT ??= "${BPN}" |
26 | CVE_VERSION ??= "${PV}" | 26 | CVE_VERSION ??= "${PV}" |
27 | 27 | ||
28 | CVE_CHECK_DB_DIR ?= "${DL_DIR}/CVE_CHECK" | 28 | CVE_CHECK_DB_DIR ?= "${DL_DIR}/CVE_CHECK" |
29 | CVE_CHECK_DB_FILE ?= "${CVE_CHECK_DB_DIR}/nvdcve_1.1.db" | 29 | CVE_CHECK_DB_FILE ?= "${CVE_CHECK_DB_DIR}/nvdcve_2.db" |
30 | CVE_CHECK_DB_FILE_LOCK ?= "${CVE_CHECK_DB_FILE}.lock" | 30 | CVE_CHECK_DB_FILE_LOCK ?= "${CVE_CHECK_DB_FILE}.lock" |
31 | 31 | ||
32 | CVE_CHECK_LOG ?= "${T}/cve.log" | 32 | CVE_CHECK_LOG ?= "${T}/cve.log" |
@@ -34,15 +34,33 @@ CVE_CHECK_TMP_FILE ?= "${TMPDIR}/cve_check" | |||
34 | CVE_CHECK_SUMMARY_DIR ?= "${LOG_DIR}/cve" | 34 | CVE_CHECK_SUMMARY_DIR ?= "${LOG_DIR}/cve" |
35 | CVE_CHECK_SUMMARY_FILE_NAME ?= "cve-summary" | 35 | CVE_CHECK_SUMMARY_FILE_NAME ?= "cve-summary" |
36 | CVE_CHECK_SUMMARY_FILE ?= "${CVE_CHECK_SUMMARY_DIR}/${CVE_CHECK_SUMMARY_FILE_NAME}" | 36 | CVE_CHECK_SUMMARY_FILE ?= "${CVE_CHECK_SUMMARY_DIR}/${CVE_CHECK_SUMMARY_FILE_NAME}" |
37 | CVE_CHECK_SUMMARY_FILE_NAME_JSON = "cve-summary.json" | ||
38 | CVE_CHECK_SUMMARY_INDEX_PATH = "${CVE_CHECK_SUMMARY_DIR}/cve-summary-index.txt" | ||
39 | |||
40 | CVE_CHECK_LOG_JSON ?= "${T}/cve.json" | ||
37 | 41 | ||
38 | CVE_CHECK_DIR ??= "${DEPLOY_DIR}/cve" | 42 | CVE_CHECK_DIR ??= "${DEPLOY_DIR}/cve" |
39 | CVE_CHECK_RECIPE_FILE ?= "${CVE_CHECK_DIR}/${PN}" | 43 | CVE_CHECK_RECIPE_FILE ?= "${CVE_CHECK_DIR}/${PN}" |
40 | CVE_CHECK_MANIFEST ?= "${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.cve" | 44 | CVE_CHECK_RECIPE_FILE_JSON ?= "${CVE_CHECK_DIR}/${PN}_cve.json" |
45 | CVE_CHECK_MANIFEST ?= "${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.cve" | ||
46 | CVE_CHECK_MANIFEST_JSON ?= "${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.json" | ||
41 | CVE_CHECK_COPY_FILES ??= "1" | 47 | CVE_CHECK_COPY_FILES ??= "1" |
42 | CVE_CHECK_CREATE_MANIFEST ??= "1" | 48 | CVE_CHECK_CREATE_MANIFEST ??= "1" |
43 | 49 | ||
50 | # Report Patched or Ignored/Whitelisted CVEs | ||
44 | CVE_CHECK_REPORT_PATCHED ??= "1" | 51 | CVE_CHECK_REPORT_PATCHED ??= "1" |
45 | 52 | ||
53 | CVE_CHECK_SHOW_WARNINGS ??= "1" | ||
54 | |||
55 | # Provide text output | ||
56 | CVE_CHECK_FORMAT_TEXT ??= "1" | ||
57 | |||
58 | # Provide JSON output - disabled by default for backward compatibility | ||
59 | CVE_CHECK_FORMAT_JSON ??= "0" | ||
60 | |||
61 | # Check for packages without CVEs (no issues or missing product name) | ||
62 | CVE_CHECK_COVERAGE ??= "1" | ||
63 | |||
46 | # Whitelist for packages (PN) | 64 | # Whitelist for packages (PN) |
47 | CVE_CHECK_PN_WHITELIST ?= "" | 65 | CVE_CHECK_PN_WHITELIST ?= "" |
48 | 66 | ||
@@ -53,12 +71,43 @@ CVE_CHECK_PN_WHITELIST ?= "" | |||
53 | # | 71 | # |
54 | CVE_CHECK_WHITELIST ?= "" | 72 | CVE_CHECK_WHITELIST ?= "" |
55 | 73 | ||
56 | # set to "alphabetical" for version using single alphabetical character as increament release | 74 | # Layers to be excluded |
75 | CVE_CHECK_LAYER_EXCLUDELIST ??= "" | ||
76 | |||
77 | # Layers to be included | ||
78 | CVE_CHECK_LAYER_INCLUDELIST ??= "" | ||
79 | |||
80 | |||
81 | # set to "alphabetical" for version using single alphabetical character as increment release | ||
57 | CVE_VERSION_SUFFIX ??= "" | 82 | CVE_VERSION_SUFFIX ??= "" |
58 | 83 | ||
84 | def generate_json_report(d, out_path, link_path): | ||
85 | if os.path.exists(d.getVar("CVE_CHECK_SUMMARY_INDEX_PATH")): | ||
86 | import json | ||
87 | from oe.cve_check import cve_check_merge_jsons, update_symlinks | ||
88 | |||
89 | bb.note("Generating JSON CVE summary") | ||
90 | index_file = d.getVar("CVE_CHECK_SUMMARY_INDEX_PATH") | ||
91 | summary = {"version":"1", "package": []} | ||
92 | with open(index_file) as f: | ||
93 | filename = f.readline() | ||
94 | while filename: | ||
95 | with open(filename.rstrip()) as j: | ||
96 | data = json.load(j) | ||
97 | cve_check_merge_jsons(summary, data) | ||
98 | filename = f.readline() | ||
99 | |||
100 | summary["package"].sort(key=lambda d: d['name']) | ||
101 | |||
102 | with open(out_path, "w") as f: | ||
103 | json.dump(summary, f, indent=2) | ||
104 | |||
105 | update_symlinks(out_path, link_path) | ||
106 | |||
59 | python cve_save_summary_handler () { | 107 | python cve_save_summary_handler () { |
60 | import shutil | 108 | import shutil |
61 | import datetime | 109 | import datetime |
110 | from oe.cve_check import update_symlinks | ||
62 | 111 | ||
63 | cve_tmp_file = d.getVar("CVE_CHECK_TMP_FILE") | 112 | cve_tmp_file = d.getVar("CVE_CHECK_TMP_FILE") |
64 | 113 | ||
@@ -71,13 +120,15 @@ python cve_save_summary_handler () { | |||
71 | 120 | ||
72 | if os.path.exists(cve_tmp_file): | 121 | if os.path.exists(cve_tmp_file): |
73 | shutil.copyfile(cve_tmp_file, cve_summary_file) | 122 | shutil.copyfile(cve_tmp_file, cve_summary_file) |
74 | 123 | cvefile_link = os.path.join(cvelogpath, cve_summary_name) | |
75 | if cve_summary_file and os.path.exists(cve_summary_file): | 124 | update_symlinks(cve_summary_file, cvefile_link) |
76 | cvefile_link = os.path.join(cvelogpath, cve_summary_name) | 125 | bb.plain("Complete CVE report summary created at: %s" % cvefile_link) |
77 | 126 | ||
78 | if os.path.exists(os.path.realpath(cvefile_link)): | 127 | if d.getVar("CVE_CHECK_FORMAT_JSON") == "1": |
79 | os.remove(cvefile_link) | 128 | json_summary_link_name = os.path.join(cvelogpath, d.getVar("CVE_CHECK_SUMMARY_FILE_NAME_JSON")) |
80 | os.symlink(os.path.basename(cve_summary_file), cvefile_link) | 129 | json_summary_name = os.path.join(cvelogpath, "%s-%s.json" % (cve_summary_name, timestamp)) |
130 | generate_json_report(d, json_summary_name, json_summary_link_name) | ||
131 | bb.plain("Complete CVE JSON report summary created at: %s" % json_summary_link_name) | ||
81 | } | 132 | } |
82 | 133 | ||
83 | addhandler cve_save_summary_handler | 134 | addhandler cve_save_summary_handler |
@@ -87,23 +138,25 @@ python do_cve_check () { | |||
87 | """ | 138 | """ |
88 | Check recipe for patched and unpatched CVEs | 139 | Check recipe for patched and unpatched CVEs |
89 | """ | 140 | """ |
141 | from oe.cve_check import get_patched_cves | ||
90 | 142 | ||
91 | if os.path.exists(d.getVar("CVE_CHECK_DB_FILE")): | 143 | with bb.utils.fileslocked([d.getVar("CVE_CHECK_DB_FILE_LOCK")], shared=True): |
92 | try: | 144 | if os.path.exists(d.getVar("CVE_CHECK_DB_FILE")): |
93 | patched_cves = get_patches_cves(d) | 145 | try: |
94 | except FileNotFoundError: | 146 | patched_cves = get_patched_cves(d) |
95 | bb.fatal("Failure in searching patches") | 147 | except FileNotFoundError: |
96 | whitelisted, patched, unpatched = check_cves(d, patched_cves) | 148 | bb.fatal("Failure in searching patches") |
97 | if patched or unpatched: | 149 | ignored, patched, unpatched, status = check_cves(d, patched_cves) |
98 | cve_data = get_cve_info(d, patched + unpatched) | 150 | if patched or unpatched or (d.getVar("CVE_CHECK_COVERAGE") == "1" and status): |
99 | cve_write_data(d, patched, unpatched, whitelisted, cve_data) | 151 | cve_data = get_cve_info(d, patched + unpatched + ignored) |
100 | else: | 152 | cve_write_data(d, patched, unpatched, ignored, cve_data, status) |
101 | bb.note("No CVE database found, skipping CVE check") | 153 | else: |
154 | bb.note("No CVE database found, skipping CVE check") | ||
102 | 155 | ||
103 | } | 156 | } |
104 | 157 | ||
105 | addtask cve_check before do_build after do_fetch | 158 | addtask cve_check before do_build |
106 | do_cve_check[depends] = "cve-update-db-native:do_populate_cve_db" | 159 | do_cve_check[depends] = "cve-update-nvd2-native:do_fetch" |
107 | do_cve_check[nostamp] = "1" | 160 | do_cve_check[nostamp] = "1" |
108 | 161 | ||
109 | python cve_check_cleanup () { | 162 | python cve_check_cleanup () { |
@@ -111,10 +164,11 @@ python cve_check_cleanup () { | |||
111 | Delete the file used to gather all the CVE information. | 164 | Delete the file used to gather all the CVE information. |
112 | """ | 165 | """ |
113 | bb.utils.remove(e.data.getVar("CVE_CHECK_TMP_FILE")) | 166 | bb.utils.remove(e.data.getVar("CVE_CHECK_TMP_FILE")) |
167 | bb.utils.remove(e.data.getVar("CVE_CHECK_SUMMARY_INDEX_PATH")) | ||
114 | } | 168 | } |
115 | 169 | ||
116 | addhandler cve_check_cleanup | 170 | addhandler cve_check_cleanup |
117 | cve_check_cleanup[eventmask] = "bb.cooker.CookerExit" | 171 | cve_check_cleanup[eventmask] = "bb.event.BuildCompleted" |
118 | 172 | ||
119 | python cve_check_write_rootfs_manifest () { | 173 | python cve_check_write_rootfs_manifest () { |
120 | """ | 174 | """ |
@@ -122,115 +176,107 @@ python cve_check_write_rootfs_manifest () { | |||
122 | """ | 176 | """ |
123 | 177 | ||
124 | import shutil | 178 | import shutil |
179 | import json | ||
180 | from oe.rootfs import image_list_installed_packages | ||
181 | from oe.cve_check import cve_check_merge_jsons, update_symlinks | ||
125 | 182 | ||
126 | if d.getVar("CVE_CHECK_COPY_FILES") == "1": | 183 | if d.getVar("CVE_CHECK_COPY_FILES") == "1": |
127 | deploy_file = d.getVar("CVE_CHECK_RECIPE_FILE") | 184 | deploy_file = d.getVar("CVE_CHECK_RECIPE_FILE") |
128 | if os.path.exists(deploy_file): | 185 | if os.path.exists(deploy_file): |
129 | bb.utils.remove(deploy_file) | 186 | bb.utils.remove(deploy_file) |
130 | 187 | deploy_file_json = d.getVar("CVE_CHECK_RECIPE_FILE_JSON") | |
131 | if os.path.exists(d.getVar("CVE_CHECK_TMP_FILE")): | 188 | if os.path.exists(deploy_file_json): |
132 | bb.note("Writing rootfs CVE manifest") | 189 | bb.utils.remove(deploy_file_json) |
133 | deploy_dir = d.getVar("DEPLOY_DIR_IMAGE") | 190 | |
134 | link_name = d.getVar("IMAGE_LINK_NAME") | 191 | # Create a list of relevant recipies |
192 | recipies = set() | ||
193 | for pkg in list(image_list_installed_packages(d)): | ||
194 | pkg_info = os.path.join(d.getVar('PKGDATA_DIR'), | ||
195 | 'runtime-reverse', pkg) | ||
196 | pkg_data = oe.packagedata.read_pkgdatafile(pkg_info) | ||
197 | recipies.add(pkg_data["PN"]) | ||
198 | |||
199 | bb.note("Writing rootfs CVE manifest") | ||
200 | deploy_dir = d.getVar("IMGDEPLOYDIR") | ||
201 | link_name = d.getVar("IMAGE_LINK_NAME") | ||
202 | |||
203 | json_data = {"version":"1", "package": []} | ||
204 | text_data = "" | ||
205 | enable_json = d.getVar("CVE_CHECK_FORMAT_JSON") == "1" | ||
206 | enable_text = d.getVar("CVE_CHECK_FORMAT_TEXT") == "1" | ||
207 | |||
208 | save_pn = d.getVar("PN") | ||
209 | |||
210 | for pkg in recipies: | ||
211 | # To be able to use the CVE_CHECK_RECIPE_FILE variable we have to evaluate | ||
212 | # it with the different PN names set each time. | ||
213 | d.setVar("PN", pkg) | ||
214 | if enable_text: | ||
215 | pkgfilepath = d.getVar("CVE_CHECK_RECIPE_FILE") | ||
216 | if os.path.exists(pkgfilepath): | ||
217 | with open(pkgfilepath) as pfile: | ||
218 | text_data += pfile.read() | ||
219 | |||
220 | if enable_json: | ||
221 | pkgfilepath = d.getVar("CVE_CHECK_RECIPE_FILE_JSON") | ||
222 | if os.path.exists(pkgfilepath): | ||
223 | with open(pkgfilepath) as j: | ||
224 | data = json.load(j) | ||
225 | cve_check_merge_jsons(json_data, data) | ||
226 | |||
227 | d.setVar("PN", save_pn) | ||
228 | |||
229 | if enable_text: | ||
230 | link_path = os.path.join(deploy_dir, "%s.cve" % link_name) | ||
135 | manifest_name = d.getVar("CVE_CHECK_MANIFEST") | 231 | manifest_name = d.getVar("CVE_CHECK_MANIFEST") |
136 | cve_tmp_file = d.getVar("CVE_CHECK_TMP_FILE") | ||
137 | |||
138 | shutil.copyfile(cve_tmp_file, manifest_name) | ||
139 | 232 | ||
140 | if manifest_name and os.path.exists(manifest_name): | 233 | with open(manifest_name, "w") as f: |
141 | manifest_link = os.path.join(deploy_dir, "%s.cve" % link_name) | 234 | f.write(text_data) |
142 | # If we already have another manifest, update symlinks | ||
143 | if os.path.exists(os.path.realpath(manifest_link)): | ||
144 | os.remove(manifest_link) | ||
145 | os.symlink(os.path.basename(manifest_name), manifest_link) | ||
146 | bb.plain("Image CVE report stored in: %s" % manifest_name) | ||
147 | } | ||
148 | |||
149 | ROOTFS_POSTPROCESS_COMMAND_prepend = "${@'cve_check_write_rootfs_manifest; ' if d.getVar('CVE_CHECK_CREATE_MANIFEST') == '1' else ''}" | ||
150 | do_rootfs[recrdeptask] += "${@'do_cve_check' if d.getVar('CVE_CHECK_CREATE_MANIFEST') == '1' else ''}" | ||
151 | 235 | ||
152 | def get_patches_cves(d): | 236 | update_symlinks(manifest_name, link_path) |
153 | """ | 237 | bb.plain("Image CVE report stored in: %s" % manifest_name) |
154 | Get patches that solve CVEs using the "CVE: " tag. | ||
155 | """ | ||
156 | 238 | ||
157 | import re | 239 | if enable_json: |
240 | link_path = os.path.join(deploy_dir, "%s.json" % link_name) | ||
241 | manifest_name = d.getVar("CVE_CHECK_MANIFEST_JSON") | ||
158 | 242 | ||
159 | pn = d.getVar("PN") | 243 | with open(manifest_name, "w") as f: |
160 | cve_match = re.compile("CVE:( CVE\-\d{4}\-\d+)+") | 244 | json.dump(json_data, f, indent=2) |
161 | |||
162 | # Matches last CVE-1234-211432 in the file name, also if written | ||
163 | # with small letters. Not supporting multiple CVE id's in a single | ||
164 | # file name. | ||
165 | cve_file_name_match = re.compile(".*([Cc][Vv][Ee]\-\d{4}\-\d+)") | ||
166 | |||
167 | patched_cves = set() | ||
168 | bb.debug(2, "Looking for patches that solves CVEs for %s" % pn) | ||
169 | for url in src_patches(d): | ||
170 | patch_file = bb.fetch.decodeurl(url)[2] | ||
171 | |||
172 | if not os.path.isfile(patch_file): | ||
173 | bb.error("File Not found: %s" % patch_file) | ||
174 | raise FileNotFoundError | ||
175 | |||
176 | # Check patch file name for CVE ID | ||
177 | fname_match = cve_file_name_match.search(patch_file) | ||
178 | if fname_match: | ||
179 | cve = fname_match.group(1).upper() | ||
180 | patched_cves.add(cve) | ||
181 | bb.debug(2, "Found CVE %s from patch file name %s" % (cve, patch_file)) | ||
182 | |||
183 | with open(patch_file, "r", encoding="utf-8") as f: | ||
184 | try: | ||
185 | patch_text = f.read() | ||
186 | except UnicodeDecodeError: | ||
187 | bb.debug(1, "Failed to read patch %s using UTF-8 encoding" | ||
188 | " trying with iso8859-1" % patch_file) | ||
189 | f.close() | ||
190 | with open(patch_file, "r", encoding="iso8859-1") as f: | ||
191 | patch_text = f.read() | ||
192 | |||
193 | # Search for one or more "CVE: " lines | ||
194 | text_match = False | ||
195 | for match in cve_match.finditer(patch_text): | ||
196 | # Get only the CVEs without the "CVE: " tag | ||
197 | cves = patch_text[match.start()+5:match.end()] | ||
198 | for cve in cves.split(): | ||
199 | bb.debug(2, "Patch %s solves %s" % (patch_file, cve)) | ||
200 | patched_cves.add(cve) | ||
201 | text_match = True | ||
202 | 245 | ||
203 | if not fname_match and not text_match: | 246 | update_symlinks(manifest_name, link_path) |
204 | bb.debug(2, "Patch %s doesn't solve CVEs" % patch_file) | 247 | bb.plain("Image CVE JSON report stored in: %s" % manifest_name) |
248 | } | ||
205 | 249 | ||
206 | return patched_cves | 250 | ROOTFS_POSTPROCESS_COMMAND_prepend = "${@'cve_check_write_rootfs_manifest; ' if d.getVar('CVE_CHECK_CREATE_MANIFEST') == '1' else ''}" |
251 | do_rootfs[recrdeptask] += "${@'do_cve_check' if d.getVar('CVE_CHECK_CREATE_MANIFEST') == '1' else ''}" | ||
252 | do_populate_sdk[recrdeptask] += "${@'do_cve_check' if d.getVar('CVE_CHECK_CREATE_MANIFEST') == '1' else ''}" | ||
207 | 253 | ||
208 | def check_cves(d, patched_cves): | 254 | def check_cves(d, patched_cves): |
209 | """ | 255 | """ |
210 | Connect to the NVD database and find unpatched cves. | 256 | Connect to the NVD database and find unpatched cves. |
211 | """ | 257 | """ |
212 | from oe.cve_check import Version | 258 | from oe.cve_check import Version, convert_cve_version |
213 | 259 | ||
214 | pn = d.getVar("PN") | 260 | pn = d.getVar("PN") |
215 | real_pv = d.getVar("PV") | 261 | real_pv = d.getVar("PV") |
216 | suffix = d.getVar("CVE_VERSION_SUFFIX") | 262 | suffix = d.getVar("CVE_VERSION_SUFFIX") |
217 | 263 | ||
218 | cves_unpatched = [] | 264 | cves_unpatched = [] |
265 | cves_ignored = [] | ||
266 | cves_status = [] | ||
267 | cves_in_recipe = False | ||
219 | # CVE_PRODUCT can contain more than one product (eg. curl/libcurl) | 268 | # CVE_PRODUCT can contain more than one product (eg. curl/libcurl) |
220 | products = d.getVar("CVE_PRODUCT").split() | 269 | products = d.getVar("CVE_PRODUCT").split() |
221 | # If this has been unset then we're not scanning for CVEs here (for example, image recipes) | 270 | # If this has been unset then we're not scanning for CVEs here (for example, image recipes) |
222 | if not products: | 271 | if not products: |
223 | return ([], [], []) | 272 | return ([], [], [], []) |
224 | pv = d.getVar("CVE_VERSION").split("+git")[0] | 273 | pv = d.getVar("CVE_VERSION").split("+git")[0] |
225 | 274 | ||
226 | # If the recipe has been whitlisted we return empty lists | 275 | # If the recipe has been whitelisted we return empty lists |
227 | if pn in d.getVar("CVE_CHECK_PN_WHITELIST").split(): | 276 | if pn in d.getVar("CVE_CHECK_PN_WHITELIST").split(): |
228 | bb.note("Recipe has been whitelisted, skipping check") | 277 | bb.note("Recipe has been whitelisted, skipping check") |
229 | return ([], [], []) | 278 | return ([], [], [], []) |
230 | 279 | ||
231 | old_cve_whitelist = d.getVar("CVE_CHECK_CVE_WHITELIST") | ||
232 | if old_cve_whitelist: | ||
233 | bb.warn("CVE_CHECK_CVE_WHITELIST is deprecated, please use CVE_CHECK_WHITELIST.") | ||
234 | cve_whitelist = d.getVar("CVE_CHECK_WHITELIST").split() | 280 | cve_whitelist = d.getVar("CVE_CHECK_WHITELIST").split() |
235 | 281 | ||
236 | import sqlite3 | 282 | import sqlite3 |
@@ -239,28 +285,42 @@ def check_cves(d, patched_cves): | |||
239 | 285 | ||
240 | # For each of the known product names (e.g. curl has CPEs using curl and libcurl)... | 286 | # For each of the known product names (e.g. curl has CPEs using curl and libcurl)... |
241 | for product in products: | 287 | for product in products: |
288 | cves_in_product = False | ||
242 | if ":" in product: | 289 | if ":" in product: |
243 | vendor, product = product.split(":", 1) | 290 | vendor, product = product.split(":", 1) |
244 | else: | 291 | else: |
245 | vendor = "%" | 292 | vendor = "%" |
246 | 293 | ||
247 | # Find all relevant CVE IDs. | 294 | # Find all relevant CVE IDs. |
248 | for cverow in conn.execute("SELECT DISTINCT ID FROM PRODUCTS WHERE PRODUCT IS ? AND VENDOR LIKE ?", (product, vendor)): | 295 | cve_cursor = conn.execute("SELECT DISTINCT ID FROM PRODUCTS WHERE PRODUCT IS ? AND VENDOR LIKE ?", (product, vendor)) |
296 | for cverow in cve_cursor: | ||
249 | cve = cverow[0] | 297 | cve = cverow[0] |
250 | 298 | ||
251 | if cve in cve_whitelist: | 299 | if cve in cve_whitelist: |
252 | bb.note("%s-%s has been whitelisted for %s" % (product, pv, cve)) | 300 | bb.note("%s-%s has been whitelisted for %s" % (product, pv, cve)) |
253 | # TODO: this should be in the report as 'whitelisted' | 301 | cves_ignored.append(cve) |
254 | patched_cves.add(cve) | ||
255 | continue | 302 | continue |
256 | elif cve in patched_cves: | 303 | elif cve in patched_cves: |
257 | bb.note("%s has been patched" % (cve)) | 304 | bb.note("%s has been patched" % (cve)) |
258 | continue | 305 | continue |
306 | # Write status once only for each product | ||
307 | if not cves_in_product: | ||
308 | cves_status.append([product, True]) | ||
309 | cves_in_product = True | ||
310 | cves_in_recipe = True | ||
259 | 311 | ||
260 | vulnerable = False | 312 | vulnerable = False |
261 | for row in conn.execute("SELECT * FROM PRODUCTS WHERE ID IS ? AND PRODUCT IS ? AND VENDOR LIKE ?", (cve, product, vendor)): | 313 | ignored = False |
314 | |||
315 | product_cursor = conn.execute("SELECT * FROM PRODUCTS WHERE ID IS ? AND PRODUCT IS ? AND VENDOR LIKE ?", (cve, product, vendor)) | ||
316 | for row in product_cursor: | ||
262 | (_, _, _, version_start, operator_start, version_end, operator_end) = row | 317 | (_, _, _, version_start, operator_start, version_end, operator_end) = row |
263 | #bb.debug(2, "Evaluating row " + str(row)) | 318 | #bb.debug(2, "Evaluating row " + str(row)) |
319 | if cve in cve_whitelist: | ||
320 | ignored = True | ||
321 | |||
322 | version_start = convert_cve_version(version_start) | ||
323 | version_end = convert_cve_version(version_end) | ||
264 | 324 | ||
265 | if (operator_start == '=' and pv == version_start) or version_start == '-': | 325 | if (operator_start == '=' and pv == version_start) or version_start == '-': |
266 | vulnerable = True | 326 | vulnerable = True |
@@ -293,18 +353,27 @@ def check_cves(d, patched_cves): | |||
293 | vulnerable = vulnerable_start or vulnerable_end | 353 | vulnerable = vulnerable_start or vulnerable_end |
294 | 354 | ||
295 | if vulnerable: | 355 | if vulnerable: |
296 | bb.note("%s-%s is vulnerable to %s" % (pn, real_pv, cve)) | 356 | if ignored: |
297 | cves_unpatched.append(cve) | 357 | bb.note("%s is ignored in %s-%s" % (cve, pn, real_pv)) |
358 | cves_ignored.append(cve) | ||
359 | else: | ||
360 | bb.note("%s-%s is vulnerable to %s" % (pn, real_pv, cve)) | ||
361 | cves_unpatched.append(cve) | ||
298 | break | 362 | break |
363 | product_cursor.close() | ||
299 | 364 | ||
300 | if not vulnerable: | 365 | if not vulnerable: |
301 | bb.note("%s-%s is not vulnerable to %s" % (pn, real_pv, cve)) | 366 | bb.note("%s-%s is not vulnerable to %s" % (pn, real_pv, cve)) |
302 | # TODO: not patched but not vulnerable | ||
303 | patched_cves.add(cve) | 367 | patched_cves.add(cve) |
368 | cve_cursor.close() | ||
369 | |||
370 | if not cves_in_product: | ||
371 | bb.note("No CVE records found for product %s, pn %s" % (product, pn)) | ||
372 | cves_status.append([product, False]) | ||
304 | 373 | ||
305 | conn.close() | 374 | conn.close() |
306 | 375 | ||
307 | return (list(cve_whitelist), list(patched_cves), cves_unpatched) | 376 | return (list(cves_ignored), list(patched_cves), cves_unpatched, cves_status) |
308 | 377 | ||
309 | def get_cve_info(d, cves): | 378 | def get_cve_info(d, cves): |
310 | """ | 379 | """ |
@@ -314,21 +383,23 @@ def get_cve_info(d, cves): | |||
314 | import sqlite3 | 383 | import sqlite3 |
315 | 384 | ||
316 | cve_data = {} | 385 | cve_data = {} |
317 | conn = sqlite3.connect(d.getVar("CVE_CHECK_DB_FILE")) | 386 | db_file = d.expand("file:${CVE_CHECK_DB_FILE}?mode=ro") |
387 | conn = sqlite3.connect(db_file, uri=True) | ||
318 | 388 | ||
319 | for cve in cves: | 389 | for cve in cves: |
320 | for row in conn.execute("SELECT * FROM NVD WHERE ID IS ?", (cve,)): | 390 | cursor = conn.execute("SELECT * FROM NVD WHERE ID IS ?", (cve,)) |
391 | for row in cursor: | ||
321 | cve_data[row[0]] = {} | 392 | cve_data[row[0]] = {} |
322 | cve_data[row[0]]["summary"] = row[1] | 393 | cve_data[row[0]]["summary"] = row[1] |
323 | cve_data[row[0]]["scorev2"] = row[2] | 394 | cve_data[row[0]]["scorev2"] = row[2] |
324 | cve_data[row[0]]["scorev3"] = row[3] | 395 | cve_data[row[0]]["scorev3"] = row[3] |
325 | cve_data[row[0]]["modified"] = row[4] | 396 | cve_data[row[0]]["modified"] = row[4] |
326 | cve_data[row[0]]["vector"] = row[5] | 397 | cve_data[row[0]]["vector"] = row[5] |
327 | 398 | cursor.close() | |
328 | conn.close() | 399 | conn.close() |
329 | return cve_data | 400 | return cve_data |
330 | 401 | ||
331 | def cve_write_data(d, patched, unpatched, whitelisted, cve_data): | 402 | def cve_write_data_text(d, patched, unpatched, whitelisted, cve_data): |
332 | """ | 403 | """ |
333 | Write CVE information in WORKDIR; and to CVE_CHECK_DIR, and | 404 | Write CVE information in WORKDIR; and to CVE_CHECK_DIR, and |
334 | CVE manifest if enabled. | 405 | CVE manifest if enabled. |
@@ -338,20 +409,38 @@ def cve_write_data(d, patched, unpatched, whitelisted, cve_data): | |||
338 | fdir_name = d.getVar("FILE_DIRNAME") | 409 | fdir_name = d.getVar("FILE_DIRNAME") |
339 | layer = fdir_name.split("/")[-3] | 410 | layer = fdir_name.split("/")[-3] |
340 | 411 | ||
341 | nvd_link = "https://web.nvd.nist.gov/view/vuln/detail?vulnId=" | 412 | include_layers = d.getVar("CVE_CHECK_LAYER_INCLUDELIST").split() |
413 | exclude_layers = d.getVar("CVE_CHECK_LAYER_EXCLUDELIST").split() | ||
414 | |||
415 | report_all = d.getVar("CVE_CHECK_REPORT_PATCHED") == "1" | ||
416 | |||
417 | if exclude_layers and layer in exclude_layers: | ||
418 | return | ||
419 | |||
420 | if include_layers and layer not in include_layers: | ||
421 | return | ||
422 | |||
423 | # Early exit, the text format does not report packages without CVEs | ||
424 | if not patched+unpatched+whitelisted: | ||
425 | return | ||
426 | |||
427 | nvd_link = "https://nvd.nist.gov/vuln/detail/" | ||
342 | write_string = "" | 428 | write_string = "" |
343 | unpatched_cves = [] | 429 | unpatched_cves = [] |
344 | bb.utils.mkdirhier(os.path.dirname(cve_file)) | 430 | bb.utils.mkdirhier(os.path.dirname(cve_file)) |
345 | 431 | ||
346 | for cve in sorted(cve_data): | 432 | for cve in sorted(cve_data): |
347 | is_patched = cve in patched | 433 | is_patched = cve in patched |
348 | if is_patched and (d.getVar("CVE_CHECK_REPORT_PATCHED") != "1"): | 434 | is_ignored = cve in whitelisted |
435 | |||
436 | if (is_patched or is_ignored) and not report_all: | ||
349 | continue | 437 | continue |
438 | |||
350 | write_string += "LAYER: %s\n" % layer | 439 | write_string += "LAYER: %s\n" % layer |
351 | write_string += "PACKAGE NAME: %s\n" % d.getVar("PN") | 440 | write_string += "PACKAGE NAME: %s\n" % d.getVar("PN") |
352 | write_string += "PACKAGE VERSION: %s%s\n" % (d.getVar("EXTENDPE"), d.getVar("PV")) | 441 | write_string += "PACKAGE VERSION: %s%s\n" % (d.getVar("EXTENDPE"), d.getVar("PV")) |
353 | write_string += "CVE: %s\n" % cve | 442 | write_string += "CVE: %s\n" % cve |
354 | if cve in whitelisted: | 443 | if is_ignored: |
355 | write_string += "CVE STATUS: Whitelisted\n" | 444 | write_string += "CVE STATUS: Whitelisted\n" |
356 | elif is_patched: | 445 | elif is_patched: |
357 | write_string += "CVE STATUS: Patched\n" | 446 | write_string += "CVE STATUS: Patched\n" |
@@ -364,23 +453,138 @@ def cve_write_data(d, patched, unpatched, whitelisted, cve_data): | |||
364 | write_string += "VECTOR: %s\n" % cve_data[cve]["vector"] | 453 | write_string += "VECTOR: %s\n" % cve_data[cve]["vector"] |
365 | write_string += "MORE INFORMATION: %s%s\n\n" % (nvd_link, cve) | 454 | write_string += "MORE INFORMATION: %s%s\n\n" % (nvd_link, cve) |
366 | 455 | ||
367 | if unpatched_cves: | 456 | if unpatched_cves and d.getVar("CVE_CHECK_SHOW_WARNINGS") == "1": |
368 | bb.warn("Found unpatched CVE (%s), for more information check %s" % (" ".join(unpatched_cves),cve_file)) | 457 | bb.warn("Found unpatched CVE (%s), for more information check %s" % (" ".join(unpatched_cves),cve_file)) |
369 | 458 | ||
370 | if write_string: | 459 | with open(cve_file, "w") as f: |
371 | with open(cve_file, "w") as f: | 460 | bb.note("Writing file %s with CVE information" % cve_file) |
372 | bb.note("Writing file %s with CVE information" % cve_file) | 461 | f.write(write_string) |
462 | |||
463 | if d.getVar("CVE_CHECK_COPY_FILES") == "1": | ||
464 | deploy_file = d.getVar("CVE_CHECK_RECIPE_FILE") | ||
465 | bb.utils.mkdirhier(os.path.dirname(deploy_file)) | ||
466 | with open(deploy_file, "w") as f: | ||
467 | f.write(write_string) | ||
468 | |||
469 | if d.getVar("CVE_CHECK_CREATE_MANIFEST") == "1": | ||
470 | cvelogpath = d.getVar("CVE_CHECK_SUMMARY_DIR") | ||
471 | bb.utils.mkdirhier(cvelogpath) | ||
472 | |||
473 | with open(d.getVar("CVE_CHECK_TMP_FILE"), "a") as f: | ||
474 | f.write("%s" % write_string) | ||
475 | |||
476 | def cve_check_write_json_output(d, output, direct_file, deploy_file, manifest_file): | ||
477 | """ | ||
478 | Write CVE information in the JSON format: to WORKDIR; and to | ||
479 | CVE_CHECK_DIR, if CVE manifest if enabled, write fragment | ||
480 | files that will be assembled at the end in cve_check_write_rootfs_manifest. | ||
481 | """ | ||
482 | |||
483 | import json | ||
484 | |||
485 | write_string = json.dumps(output, indent=2) | ||
486 | with open(direct_file, "w") as f: | ||
487 | bb.note("Writing file %s with CVE information" % direct_file) | ||
488 | f.write(write_string) | ||
489 | |||
490 | if d.getVar("CVE_CHECK_COPY_FILES") == "1": | ||
491 | bb.utils.mkdirhier(os.path.dirname(deploy_file)) | ||
492 | with open(deploy_file, "w") as f: | ||
493 | f.write(write_string) | ||
494 | |||
495 | if d.getVar("CVE_CHECK_CREATE_MANIFEST") == "1": | ||
496 | cvelogpath = d.getVar("CVE_CHECK_SUMMARY_DIR") | ||
497 | index_path = d.getVar("CVE_CHECK_SUMMARY_INDEX_PATH") | ||
498 | bb.utils.mkdirhier(cvelogpath) | ||
499 | fragment_file = os.path.basename(deploy_file) | ||
500 | fragment_path = os.path.join(cvelogpath, fragment_file) | ||
501 | with open(fragment_path, "w") as f: | ||
373 | f.write(write_string) | 502 | f.write(write_string) |
503 | with open(index_path, "a+") as f: | ||
504 | f.write("%s\n" % fragment_path) | ||
505 | |||
506 | def cve_write_data_json(d, patched, unpatched, ignored, cve_data, cve_status): | ||
507 | """ | ||
508 | Prepare CVE data for the JSON format, then write it. | ||
509 | """ | ||
510 | |||
511 | output = {"version":"1", "package": []} | ||
512 | nvd_link = "https://nvd.nist.gov/vuln/detail/" | ||
513 | |||
514 | fdir_name = d.getVar("FILE_DIRNAME") | ||
515 | layer = fdir_name.split("/")[-3] | ||
516 | |||
517 | include_layers = d.getVar("CVE_CHECK_LAYER_INCLUDELIST").split() | ||
518 | exclude_layers = d.getVar("CVE_CHECK_LAYER_EXCLUDELIST").split() | ||
519 | |||
520 | report_all = d.getVar("CVE_CHECK_REPORT_PATCHED") == "1" | ||
521 | |||
522 | if exclude_layers and layer in exclude_layers: | ||
523 | return | ||
524 | |||
525 | if include_layers and layer not in include_layers: | ||
526 | return | ||
527 | |||
528 | unpatched_cves = [] | ||
529 | |||
530 | product_data = [] | ||
531 | for s in cve_status: | ||
532 | p = {"product": s[0], "cvesInRecord": "Yes"} | ||
533 | if s[1] == False: | ||
534 | p["cvesInRecord"] = "No" | ||
535 | product_data.append(p) | ||
536 | |||
537 | package_version = "%s%s" % (d.getVar("EXTENDPE"), d.getVar("PV")) | ||
538 | package_data = { | ||
539 | "name" : d.getVar("PN"), | ||
540 | "layer" : layer, | ||
541 | "version" : package_version, | ||
542 | "products": product_data | ||
543 | } | ||
544 | cve_list = [] | ||
545 | |||
546 | for cve in sorted(cve_data): | ||
547 | is_patched = cve in patched | ||
548 | is_ignored = cve in ignored | ||
549 | status = "Unpatched" | ||
550 | if (is_patched or is_ignored) and not report_all: | ||
551 | continue | ||
552 | if is_ignored: | ||
553 | status = "Ignored" | ||
554 | elif is_patched: | ||
555 | status = "Patched" | ||
556 | else: | ||
557 | # default value of status is Unpatched | ||
558 | unpatched_cves.append(cve) | ||
559 | |||
560 | issue_link = "%s%s" % (nvd_link, cve) | ||
374 | 561 | ||
375 | if d.getVar("CVE_CHECK_COPY_FILES") == "1": | 562 | cve_item = { |
376 | deploy_file = d.getVar("CVE_CHECK_RECIPE_FILE") | 563 | "id" : cve, |
377 | bb.utils.mkdirhier(os.path.dirname(deploy_file)) | 564 | "summary" : cve_data[cve]["summary"], |
378 | with open(deploy_file, "w") as f: | 565 | "scorev2" : cve_data[cve]["scorev2"], |
379 | f.write(write_string) | 566 | "scorev3" : cve_data[cve]["scorev3"], |
567 | "vector" : cve_data[cve]["vector"], | ||
568 | "status" : status, | ||
569 | "link": issue_link | ||
570 | } | ||
571 | cve_list.append(cve_item) | ||
380 | 572 | ||
381 | if d.getVar("CVE_CHECK_CREATE_MANIFEST") == "1": | 573 | package_data["issue"] = cve_list |
382 | cvelogpath = d.getVar("CVE_CHECK_SUMMARY_DIR") | 574 | output["package"].append(package_data) |
383 | bb.utils.mkdirhier(cvelogpath) | 575 | |
576 | direct_file = d.getVar("CVE_CHECK_LOG_JSON") | ||
577 | deploy_file = d.getVar("CVE_CHECK_RECIPE_FILE_JSON") | ||
578 | manifest_file = d.getVar("CVE_CHECK_SUMMARY_FILE_NAME_JSON") | ||
579 | |||
580 | cve_check_write_json_output(d, output, direct_file, deploy_file, manifest_file) | ||
581 | |||
582 | def cve_write_data(d, patched, unpatched, ignored, cve_data, status): | ||
583 | """ | ||
584 | Write CVE data in each enabled format. | ||
585 | """ | ||
384 | 586 | ||
385 | with open(d.getVar("CVE_CHECK_TMP_FILE"), "a") as f: | 587 | if d.getVar("CVE_CHECK_FORMAT_TEXT") == "1": |
386 | f.write("%s" % write_string) | 588 | cve_write_data_text(d, patched, unpatched, ignored, cve_data) |
589 | if d.getVar("CVE_CHECK_FORMAT_JSON") == "1": | ||
590 | cve_write_data_json(d, patched, unpatched, ignored, cve_data, status) | ||
diff --git a/meta/classes/devshell.bbclass b/meta/classes/devshell.bbclass index fdf7dc100f..76dd0b42ee 100644 --- a/meta/classes/devshell.bbclass +++ b/meta/classes/devshell.bbclass | |||
@@ -128,6 +128,7 @@ def devpyshell(d): | |||
128 | more = i.runsource(source, "<pyshell>") | 128 | more = i.runsource(source, "<pyshell>") |
129 | if not more: | 129 | if not more: |
130 | buf = [] | 130 | buf = [] |
131 | sys.stderr.flush() | ||
131 | prompt(more) | 132 | prompt(more) |
132 | except KeyboardInterrupt: | 133 | except KeyboardInterrupt: |
133 | i.write("\nKeyboardInterrupt\n") | 134 | i.write("\nKeyboardInterrupt\n") |
diff --git a/meta/classes/devtool-source.bbclass b/meta/classes/devtool-source.bbclass index 280d6009f3..41900e651f 100644 --- a/meta/classes/devtool-source.bbclass +++ b/meta/classes/devtool-source.bbclass | |||
@@ -199,6 +199,7 @@ python devtool_post_patch() { | |||
199 | # Run do_patch function with the override applied | 199 | # Run do_patch function with the override applied |
200 | localdata = bb.data.createCopy(d) | 200 | localdata = bb.data.createCopy(d) |
201 | localdata.setVar('OVERRIDES', ':'.join(no_overrides)) | 201 | localdata.setVar('OVERRIDES', ':'.join(no_overrides)) |
202 | localdata.setVar('FILESOVERRIDES', ':'.join(no_overrides)) | ||
202 | bb.build.exec_func('do_patch', localdata) | 203 | bb.build.exec_func('do_patch', localdata) |
203 | rm_patches() | 204 | rm_patches() |
204 | # Now we need to reconcile the dev branch with the no-overrides one | 205 | # Now we need to reconcile the dev branch with the no-overrides one |
@@ -216,7 +217,8 @@ python devtool_post_patch() { | |||
216 | # Reset back to the initial commit on a new branch | 217 | # Reset back to the initial commit on a new branch |
217 | bb.process.run('git checkout %s -b devtool-override-%s' % (initial_rev, override), cwd=srcsubdir) | 218 | bb.process.run('git checkout %s -b devtool-override-%s' % (initial_rev, override), cwd=srcsubdir) |
218 | # Run do_patch function with the override applied | 219 | # Run do_patch function with the override applied |
219 | localdata.appendVar('OVERRIDES', ':%s' % override) | 220 | localdata.setVar('OVERRIDES', ':'.join(no_overrides + [override])) |
221 | localdata.setVar('FILESOVERRIDES', ':'.join(no_overrides + [override])) | ||
220 | bb.build.exec_func('do_patch', localdata) | 222 | bb.build.exec_func('do_patch', localdata) |
221 | rm_patches() | 223 | rm_patches() |
222 | # Now we need to reconcile the new branch with the no-overrides one | 224 | # Now we need to reconcile the new branch with the no-overrides one |
diff --git a/meta/classes/devupstream.bbclass b/meta/classes/devupstream.bbclass index 7780c5482c..97e137cb40 100644 --- a/meta/classes/devupstream.bbclass +++ b/meta/classes/devupstream.bbclass | |||
@@ -4,7 +4,7 @@ | |||
4 | # | 4 | # |
5 | # Usage: | 5 | # Usage: |
6 | # BBCLASSEXTEND = "devupstream:target" | 6 | # BBCLASSEXTEND = "devupstream:target" |
7 | # SRC_URI_class-devupstream = "git://git.example.com/example" | 7 | # SRC_URI_class-devupstream = "git://git.example.com/example;branch=master" |
8 | # SRCREV_class-devupstream = "abcdef" | 8 | # SRCREV_class-devupstream = "abcdef" |
9 | # | 9 | # |
10 | # If the first entry in SRC_URI is a git: URL then S is rewritten to | 10 | # If the first entry in SRC_URI is a git: URL then S is rewritten to |
diff --git a/meta/classes/externalsrc.bbclass b/meta/classes/externalsrc.bbclass index 1d7300d65b..9c9451e528 100644 --- a/meta/classes/externalsrc.bbclass +++ b/meta/classes/externalsrc.bbclass | |||
@@ -60,7 +60,7 @@ python () { | |||
60 | if externalsrcbuild: | 60 | if externalsrcbuild: |
61 | d.setVar('B', externalsrcbuild) | 61 | d.setVar('B', externalsrcbuild) |
62 | else: | 62 | else: |
63 | d.setVar('B', '${WORKDIR}/${BPN}-${PV}/') | 63 | d.setVar('B', '${WORKDIR}/${BPN}-${PV}') |
64 | 64 | ||
65 | local_srcuri = [] | 65 | local_srcuri = [] |
66 | fetch = bb.fetch2.Fetch((d.getVar('SRC_URI') or '').split(), d) | 66 | fetch = bb.fetch2.Fetch((d.getVar('SRC_URI') or '').split(), d) |
@@ -108,6 +108,15 @@ python () { | |||
108 | if local_srcuri and task in fetch_tasks: | 108 | if local_srcuri and task in fetch_tasks: |
109 | continue | 109 | continue |
110 | bb.build.deltask(task, d) | 110 | bb.build.deltask(task, d) |
111 | if bb.data.inherits_class('reproducible_build', d) and task == 'do_unpack': | ||
112 | # The reproducible_build's create_source_date_epoch_stamp function must | ||
113 | # be run after the source is available and before the | ||
114 | # do_deploy_source_date_epoch task. In the normal case, it's attached | ||
115 | # to do_unpack as a postfuncs, but since we removed do_unpack (above) | ||
116 | # we need to move the function elsewhere. The easiest thing to do is | ||
117 | # move it into the prefuncs of the do_deploy_source_date_epoch task. | ||
118 | # This is safe, as externalsrc runs with the source already unpacked. | ||
119 | d.prependVarFlag('do_deploy_source_date_epoch', 'prefuncs', 'create_source_date_epoch_stamp ') | ||
111 | 120 | ||
112 | d.prependVarFlag('do_compile', 'prefuncs', "externalsrc_compile_prefunc ") | 121 | d.prependVarFlag('do_compile', 'prefuncs', "externalsrc_compile_prefunc ") |
113 | d.prependVarFlag('do_configure', 'prefuncs', "externalsrc_configure_prefunc ") | 122 | d.prependVarFlag('do_configure', 'prefuncs', "externalsrc_configure_prefunc ") |
@@ -198,8 +207,8 @@ def srctree_hash_files(d, srcdir=None): | |||
198 | try: | 207 | try: |
199 | git_dir = os.path.join(s_dir, | 208 | git_dir = os.path.join(s_dir, |
200 | subprocess.check_output(['git', '-C', s_dir, 'rev-parse', '--git-dir'], stderr=subprocess.DEVNULL).decode("utf-8").rstrip()) | 209 | subprocess.check_output(['git', '-C', s_dir, 'rev-parse', '--git-dir'], stderr=subprocess.DEVNULL).decode("utf-8").rstrip()) |
201 | top_git_dir = os.path.join(s_dir, subprocess.check_output(['git', '-C', d.getVar("TOPDIR"), 'rev-parse', '--git-dir'], | 210 | top_git_dir = os.path.join(d.getVar("TOPDIR"), |
202 | stderr=subprocess.DEVNULL).decode("utf-8").rstrip()) | 211 | subprocess.check_output(['git', '-C', d.getVar("TOPDIR"), 'rev-parse', '--git-dir'], stderr=subprocess.DEVNULL).decode("utf-8").rstrip()) |
203 | if git_dir == top_git_dir: | 212 | if git_dir == top_git_dir: |
204 | git_dir = None | 213 | git_dir = None |
205 | except subprocess.CalledProcessError: | 214 | except subprocess.CalledProcessError: |
@@ -216,14 +225,16 @@ def srctree_hash_files(d, srcdir=None): | |||
216 | env['GIT_INDEX_FILE'] = tmp_index.name | 225 | env['GIT_INDEX_FILE'] = tmp_index.name |
217 | subprocess.check_output(['git', 'add', '-A', '.'], cwd=s_dir, env=env) | 226 | subprocess.check_output(['git', 'add', '-A', '.'], cwd=s_dir, env=env) |
218 | git_sha1 = subprocess.check_output(['git', 'write-tree'], cwd=s_dir, env=env).decode("utf-8") | 227 | git_sha1 = subprocess.check_output(['git', 'write-tree'], cwd=s_dir, env=env).decode("utf-8") |
219 | submodule_helper = subprocess.check_output(['git', 'submodule--helper', 'list'], cwd=s_dir, env=env).decode("utf-8") | 228 | if os.path.exists(os.path.join(s_dir, ".gitmodules")) and os.path.getsize(os.path.join(s_dir, ".gitmodules")) > 0: |
220 | for line in submodule_helper.splitlines(): | 229 | submodule_helper = subprocess.check_output(["git", "config", "--file", ".gitmodules", "--get-regexp", "path"], cwd=s_dir, env=env).decode("utf-8") |
221 | module_dir = os.path.join(s_dir, line.rsplit(maxsplit=1)[1]) | 230 | for line in submodule_helper.splitlines(): |
222 | proc = subprocess.Popen(['git', 'add', '-A', '.'], cwd=module_dir, env=env, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL) | 231 | module_dir = os.path.join(s_dir, line.rsplit(maxsplit=1)[1]) |
223 | proc.communicate() | 232 | if os.path.isdir(module_dir): |
224 | proc = subprocess.Popen(['git', 'write-tree'], cwd=module_dir, env=env, stdout=subprocess.PIPE, stderr=subprocess.DEVNULL) | 233 | proc = subprocess.Popen(['git', 'add', '-A', '.'], cwd=module_dir, env=env, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL) |
225 | stdout, _ = proc.communicate() | 234 | proc.communicate() |
226 | git_sha1 += stdout.decode("utf-8") | 235 | proc = subprocess.Popen(['git', 'write-tree'], cwd=module_dir, env=env, stdout=subprocess.PIPE, stderr=subprocess.DEVNULL) |
236 | stdout, _ = proc.communicate() | ||
237 | git_sha1 += stdout.decode("utf-8") | ||
227 | sha1 = hashlib.sha1(git_sha1.encode("utf-8")).hexdigest() | 238 | sha1 = hashlib.sha1(git_sha1.encode("utf-8")).hexdigest() |
228 | with open(oe_hash_file, 'w') as fobj: | 239 | with open(oe_hash_file, 'w') as fobj: |
229 | fobj.write(sha1) | 240 | fobj.write(sha1) |
diff --git a/meta/classes/fs-uuid.bbclass b/meta/classes/fs-uuid.bbclass index 9b53dfba7a..731ea575bd 100644 --- a/meta/classes/fs-uuid.bbclass +++ b/meta/classes/fs-uuid.bbclass | |||
@@ -4,7 +4,7 @@ | |||
4 | def get_rootfs_uuid(d): | 4 | def get_rootfs_uuid(d): |
5 | import subprocess | 5 | import subprocess |
6 | rootfs = d.getVar('ROOTFS') | 6 | rootfs = d.getVar('ROOTFS') |
7 | output = subprocess.check_output(['tune2fs', '-l', rootfs]) | 7 | output = subprocess.check_output(['tune2fs', '-l', rootfs], text=True) |
8 | for line in output.split('\n'): | 8 | for line in output.split('\n'): |
9 | if line.startswith('Filesystem UUID:'): | 9 | if line.startswith('Filesystem UUID:'): |
10 | uuid = line.split()[-1] | 10 | uuid = line.split()[-1] |
diff --git a/meta/classes/go.bbclass b/meta/classes/go.bbclass index e6c3591479..21b1a0271e 100644 --- a/meta/classes/go.bbclass +++ b/meta/classes/go.bbclass | |||
@@ -118,7 +118,7 @@ go_do_install() { | |||
118 | tar -C ${B} -cf - --exclude-vcs --exclude '*.test' --exclude 'testdata' pkg | \ | 118 | tar -C ${B} -cf - --exclude-vcs --exclude '*.test' --exclude 'testdata' pkg | \ |
119 | tar -C ${D}${libdir}/go --no-same-owner -xf - | 119 | tar -C ${D}${libdir}/go --no-same-owner -xf - |
120 | 120 | ||
121 | if [ -n "`ls ${B}/${GO_BUILD_BINDIR}/`" ]; then | 121 | if ls ${B}/${GO_BUILD_BINDIR}/* >/dev/null 2>/dev/null ; then |
122 | install -d ${D}${bindir} | 122 | install -d ${D}${bindir} |
123 | install -m 0755 ${B}/${GO_BUILD_BINDIR}/* ${D}${bindir}/ | 123 | install -m 0755 ${B}/${GO_BUILD_BINDIR}/* ${D}${bindir}/ |
124 | fi | 124 | fi |
@@ -145,11 +145,11 @@ FILES_${PN}-staticdev = "${libdir}/go/pkg" | |||
145 | 145 | ||
146 | INSANE_SKIP_${PN} += "ldflags" | 146 | INSANE_SKIP_${PN} += "ldflags" |
147 | 147 | ||
148 | # Add -buildmode=pie to GOBUILDFLAGS to satisfy "textrel" QA checking, but mips | 148 | # Add -buildmode=pie to GOBUILDFLAGS to satisfy "textrel" QA checking, but |
149 | # doesn't support -buildmode=pie, so skip the QA checking for mips and its | 149 | # windows/mips/riscv doesn't support -buildmode=pie, so skip the QA checking |
150 | # variants. | 150 | # for windows/mips/riscv and their variants. |
151 | python() { | 151 | python() { |
152 | if 'mips' in d.getVar('TARGET_ARCH') or 'riscv' in d.getVar('TARGET_ARCH'): | 152 | if 'mips' in d.getVar('TARGET_ARCH') or 'riscv' in d.getVar('TARGET_ARCH') or 'windows' in d.getVar('TARGET_GOOS'): |
153 | d.appendVar('INSANE_SKIP_%s' % d.getVar('PN'), " textrel") | 153 | d.appendVar('INSANE_SKIP_%s' % d.getVar('PN'), " textrel") |
154 | else: | 154 | else: |
155 | d.appendVar('GOBUILDFLAGS', ' -buildmode=pie') | 155 | d.appendVar('GOBUILDFLAGS', ' -buildmode=pie') |
diff --git a/meta/classes/goarch.bbclass b/meta/classes/goarch.bbclass index 1099b95769..ecd3044edd 100644 --- a/meta/classes/goarch.bbclass +++ b/meta/classes/goarch.bbclass | |||
@@ -114,6 +114,8 @@ def go_map_mips(a, f, d): | |||
114 | def go_map_os(o, d): | 114 | def go_map_os(o, d): |
115 | if o.startswith('linux'): | 115 | if o.startswith('linux'): |
116 | return 'linux' | 116 | return 'linux' |
117 | elif o.startswith('mingw'): | ||
118 | return 'windows' | ||
117 | return o | 119 | return o |
118 | 120 | ||
119 | 121 | ||
diff --git a/meta/classes/image-live.bbclass b/meta/classes/image-live.bbclass index 54058b350d..2fa839b0de 100644 --- a/meta/classes/image-live.bbclass +++ b/meta/classes/image-live.bbclass | |||
@@ -30,7 +30,7 @@ do_bootimg[depends] += "dosfstools-native:do_populate_sysroot \ | |||
30 | virtual/kernel:do_deploy \ | 30 | virtual/kernel:do_deploy \ |
31 | ${MLPREFIX}syslinux:do_populate_sysroot \ | 31 | ${MLPREFIX}syslinux:do_populate_sysroot \ |
32 | syslinux-native:do_populate_sysroot \ | 32 | syslinux-native:do_populate_sysroot \ |
33 | ${PN}:do_image_${@d.getVar('LIVE_ROOTFS_TYPE').replace('-', '_')} \ | 33 | ${@'%s:do_image_%s' % (d.getVar('PN'), d.getVar('LIVE_ROOTFS_TYPE').replace('-', '_')) if d.getVar('ROOTFS') else ''} \ |
34 | " | 34 | " |
35 | 35 | ||
36 | 36 | ||
@@ -261,4 +261,4 @@ python do_bootimg() { | |||
261 | do_bootimg[subimages] = "hddimg iso" | 261 | do_bootimg[subimages] = "hddimg iso" |
262 | do_bootimg[imgsuffix] = "." | 262 | do_bootimg[imgsuffix] = "." |
263 | 263 | ||
264 | addtask bootimg before do_image_complete | 264 | addtask bootimg before do_image_complete after do_rootfs |
diff --git a/meta/classes/image.bbclass b/meta/classes/image.bbclass index 459d872b4a..fbf7206d04 100644 --- a/meta/classes/image.bbclass +++ b/meta/classes/image.bbclass | |||
@@ -38,7 +38,7 @@ IMAGE_FEATURES[validitems] += "debug-tweaks read-only-rootfs stateless-rootfs em | |||
38 | # Generate companion debugfs? | 38 | # Generate companion debugfs? |
39 | IMAGE_GEN_DEBUGFS ?= "0" | 39 | IMAGE_GEN_DEBUGFS ?= "0" |
40 | 40 | ||
41 | # These pacackages will be installed as additional into debug rootfs | 41 | # These packages will be installed as additional into debug rootfs |
42 | IMAGE_INSTALL_DEBUGFS ?= "" | 42 | IMAGE_INSTALL_DEBUGFS ?= "" |
43 | 43 | ||
44 | # These packages will be removed from a read-only rootfs after all other | 44 | # These packages will be removed from a read-only rootfs after all other |
@@ -115,7 +115,7 @@ def rootfs_command_variables(d): | |||
115 | 'IMAGE_PREPROCESS_COMMAND','RPM_PREPROCESS_COMMANDS','RPM_POSTPROCESS_COMMANDS','DEB_PREPROCESS_COMMANDS','DEB_POSTPROCESS_COMMANDS'] | 115 | 'IMAGE_PREPROCESS_COMMAND','RPM_PREPROCESS_COMMANDS','RPM_POSTPROCESS_COMMANDS','DEB_PREPROCESS_COMMANDS','DEB_POSTPROCESS_COMMANDS'] |
116 | 116 | ||
117 | python () { | 117 | python () { |
118 | variables = rootfs_command_variables(d) + sdk_command_variables(d) | 118 | variables = rootfs_command_variables(d) |
119 | for var in variables: | 119 | for var in variables: |
120 | if d.getVar(var, False): | 120 | if d.getVar(var, False): |
121 | d.setVarFlag(var, 'func', '1') | 121 | d.setVarFlag(var, 'func', '1') |
@@ -124,7 +124,7 @@ python () { | |||
124 | def rootfs_variables(d): | 124 | def rootfs_variables(d): |
125 | from oe.rootfs import variable_depends | 125 | from oe.rootfs import variable_depends |
126 | variables = ['IMAGE_DEVICE_TABLE','IMAGE_DEVICE_TABLES','BUILD_IMAGES_FROM_FEEDS','IMAGE_TYPES_MASKED','IMAGE_ROOTFS_ALIGNMENT','IMAGE_OVERHEAD_FACTOR','IMAGE_ROOTFS_SIZE','IMAGE_ROOTFS_EXTRA_SPACE', | 126 | variables = ['IMAGE_DEVICE_TABLE','IMAGE_DEVICE_TABLES','BUILD_IMAGES_FROM_FEEDS','IMAGE_TYPES_MASKED','IMAGE_ROOTFS_ALIGNMENT','IMAGE_OVERHEAD_FACTOR','IMAGE_ROOTFS_SIZE','IMAGE_ROOTFS_EXTRA_SPACE', |
127 | 'IMAGE_ROOTFS_MAXSIZE','IMAGE_NAME','IMAGE_LINK_NAME','IMAGE_MANIFEST','DEPLOY_DIR_IMAGE','IMAGE_FSTYPES','IMAGE_INSTALL_COMPLEMENTARY','IMAGE_LINGUAS', 'IMAGE_LINGUAS_COMPLEMENTARY', | 127 | 'IMAGE_ROOTFS_MAXSIZE','IMAGE_NAME','IMAGE_LINK_NAME','IMAGE_MANIFEST','DEPLOY_DIR_IMAGE','IMAGE_FSTYPES','IMAGE_INSTALL_COMPLEMENTARY','IMAGE_LINGUAS', 'IMAGE_LINGUAS_COMPLEMENTARY', 'IMAGE_LOCALES_ARCHIVE', |
128 | 'MULTILIBRE_ALLOW_REP','MULTILIB_TEMP_ROOTFS','MULTILIB_VARIANTS','MULTILIBS','ALL_MULTILIB_PACKAGE_ARCHS','MULTILIB_GLOBAL_VARIANTS','BAD_RECOMMENDATIONS','NO_RECOMMENDATIONS', | 128 | 'MULTILIBRE_ALLOW_REP','MULTILIB_TEMP_ROOTFS','MULTILIB_VARIANTS','MULTILIBS','ALL_MULTILIB_PACKAGE_ARCHS','MULTILIB_GLOBAL_VARIANTS','BAD_RECOMMENDATIONS','NO_RECOMMENDATIONS', |
129 | 'PACKAGE_ARCHS','PACKAGE_CLASSES','TARGET_VENDOR','TARGET_ARCH','TARGET_OS','OVERRIDES','BBEXTENDVARIANT','FEED_DEPLOYDIR_BASE_URI','INTERCEPT_DIR','USE_DEVFS', | 129 | 'PACKAGE_ARCHS','PACKAGE_CLASSES','TARGET_VENDOR','TARGET_ARCH','TARGET_OS','OVERRIDES','BBEXTENDVARIANT','FEED_DEPLOYDIR_BASE_URI','INTERCEPT_DIR','USE_DEVFS', |
130 | 'CONVERSIONTYPES', 'IMAGE_GEN_DEBUGFS', 'ROOTFS_RO_UNNEEDED', 'IMGDEPLOYDIR', 'PACKAGE_EXCLUDE_COMPLEMENTARY', 'REPRODUCIBLE_TIMESTAMP_ROOTFS', 'IMAGE_INSTALL_DEBUGFS'] | 130 | 'CONVERSIONTYPES', 'IMAGE_GEN_DEBUGFS', 'ROOTFS_RO_UNNEEDED', 'IMGDEPLOYDIR', 'PACKAGE_EXCLUDE_COMPLEMENTARY', 'REPRODUCIBLE_TIMESTAMP_ROOTFS', 'IMAGE_INSTALL_DEBUGFS'] |
@@ -176,10 +176,15 @@ IMAGE_LINGUAS ?= "de-de fr-fr en-gb" | |||
176 | 176 | ||
177 | LINGUAS_INSTALL ?= "${@" ".join(map(lambda s: "locale-base-%s" % s, d.getVar('IMAGE_LINGUAS').split()))}" | 177 | LINGUAS_INSTALL ?= "${@" ".join(map(lambda s: "locale-base-%s" % s, d.getVar('IMAGE_LINGUAS').split()))}" |
178 | 178 | ||
179 | # per default create a locale archive | ||
180 | IMAGE_LOCALES_ARCHIVE ?= '1' | ||
181 | |||
179 | # Prefer image, but use the fallback files for lookups if the image ones | 182 | # Prefer image, but use the fallback files for lookups if the image ones |
180 | # aren't yet available. | 183 | # aren't yet available. |
181 | PSEUDO_PASSWD = "${IMAGE_ROOTFS}:${STAGING_DIR_NATIVE}" | 184 | PSEUDO_PASSWD = "${IMAGE_ROOTFS}:${STAGING_DIR_NATIVE}" |
182 | 185 | ||
186 | PSEUDO_IGNORE_PATHS .= ",${WORKDIR}/intercept_scripts,${WORKDIR}/oe-rootfs-repo,${WORKDIR}/sstate-build-image_complete" | ||
187 | |||
183 | PACKAGE_EXCLUDE ??= "" | 188 | PACKAGE_EXCLUDE ??= "" |
184 | PACKAGE_EXCLUDE[type] = "list" | 189 | PACKAGE_EXCLUDE[type] = "list" |
185 | 190 | ||
@@ -306,7 +311,7 @@ fakeroot python do_image_qa () { | |||
306 | except oe.utils.ImageQAFailed as e: | 311 | except oe.utils.ImageQAFailed as e: |
307 | qamsg = qamsg + '\tImage QA function %s failed: %s\n' % (e.name, e.description) | 312 | qamsg = qamsg + '\tImage QA function %s failed: %s\n' % (e.name, e.description) |
308 | except Exception as e: | 313 | except Exception as e: |
309 | qamsg = qamsg + '\tImage QA function %s failed\n' % cmd | 314 | qamsg = qamsg + '\tImage QA function %s failed: %s\n' % (cmd, e) |
310 | 315 | ||
311 | if qamsg: | 316 | if qamsg: |
312 | imgname = d.getVar('IMAGE_NAME') | 317 | imgname = d.getVar('IMAGE_NAME') |
@@ -432,7 +437,7 @@ python () { | |||
432 | localdata.delVar('DATETIME') | 437 | localdata.delVar('DATETIME') |
433 | localdata.delVar('DATE') | 438 | localdata.delVar('DATE') |
434 | localdata.delVar('TMPDIR') | 439 | localdata.delVar('TMPDIR') |
435 | vardepsexclude = (d.getVarFlag('IMAGE_CMD_' + realt, 'vardepsexclude', True) or '').split() | 440 | vardepsexclude = (d.getVarFlag('IMAGE_CMD_' + realt, 'vardepsexclude') or '').split() |
436 | for dep in vardepsexclude: | 441 | for dep in vardepsexclude: |
437 | localdata.delVar(dep) | 442 | localdata.delVar(dep) |
438 | 443 | ||
@@ -660,7 +665,7 @@ reproducible_final_image_task () { | |||
660 | fi | 665 | fi |
661 | # Set mtime of all files to a reproducible value | 666 | # Set mtime of all files to a reproducible value |
662 | bbnote "reproducible_final_image_task: mtime set to $REPRODUCIBLE_TIMESTAMP_ROOTFS" | 667 | bbnote "reproducible_final_image_task: mtime set to $REPRODUCIBLE_TIMESTAMP_ROOTFS" |
663 | find ${IMAGE_ROOTFS} -exec touch -h --date=@$REPRODUCIBLE_TIMESTAMP_ROOTFS {} \; | 668 | find ${IMAGE_ROOTFS} -print0 | xargs -0 touch -h --date=@$REPRODUCIBLE_TIMESTAMP_ROOTFS |
664 | fi | 669 | fi |
665 | } | 670 | } |
666 | 671 | ||
diff --git a/meta/classes/image_types.bbclass b/meta/classes/image_types.bbclass index ff42ac9423..6dc0e094d0 100644 --- a/meta/classes/image_types.bbclass +++ b/meta/classes/image_types.bbclass | |||
@@ -240,7 +240,7 @@ EXTRA_IMAGECMD_jffs2 ?= "--pad ${JFFS2_ENDIANNESS} --eraseblock=${JFFS2_ERASEBLO | |||
240 | EXTRA_IMAGECMD_ext2 ?= "-i 4096" | 240 | EXTRA_IMAGECMD_ext2 ?= "-i 4096" |
241 | EXTRA_IMAGECMD_ext3 ?= "-i 4096" | 241 | EXTRA_IMAGECMD_ext3 ?= "-i 4096" |
242 | EXTRA_IMAGECMD_ext4 ?= "-i 4096" | 242 | EXTRA_IMAGECMD_ext4 ?= "-i 4096" |
243 | EXTRA_IMAGECMD_btrfs ?= "-n 4096" | 243 | EXTRA_IMAGECMD_btrfs ?= "-n 4096 --shrink" |
244 | EXTRA_IMAGECMD_f2fs ?= "" | 244 | EXTRA_IMAGECMD_f2fs ?= "" |
245 | 245 | ||
246 | do_image_cpio[depends] += "cpio-native:do_populate_sysroot" | 246 | do_image_cpio[depends] += "cpio-native:do_populate_sysroot" |
diff --git a/meta/classes/insane.bbclass b/meta/classes/insane.bbclass index b5c6b2186f..d6da53252f 100644 --- a/meta/classes/insane.bbclass +++ b/meta/classes/insane.bbclass | |||
@@ -174,7 +174,7 @@ def package_qa_check_useless_rpaths(file, name, d, elf, messages): | |||
174 | if rpath_eq(rpath, libdir) or rpath_eq(rpath, base_libdir): | 174 | if rpath_eq(rpath, libdir) or rpath_eq(rpath, base_libdir): |
175 | # The dynamic linker searches both these places anyway. There is no point in | 175 | # The dynamic linker searches both these places anyway. There is no point in |
176 | # looking there again. | 176 | # looking there again. |
177 | package_qa_add_message(messages, "useless-rpaths", "%s: %s contains probably-redundant RPATH %s" % (name, package_qa_clean_path(file, d), rpath)) | 177 | package_qa_add_message(messages, "useless-rpaths", "%s: %s contains probably-redundant RPATH %s" % (name, package_qa_clean_path(file, d, name), rpath)) |
178 | 178 | ||
179 | QAPATHTEST[dev-so] = "package_qa_check_dev" | 179 | QAPATHTEST[dev-so] = "package_qa_check_dev" |
180 | def package_qa_check_dev(path, name, d, elf, messages): | 180 | def package_qa_check_dev(path, name, d, elf, messages): |
@@ -183,8 +183,8 @@ def package_qa_check_dev(path, name, d, elf, messages): | |||
183 | """ | 183 | """ |
184 | 184 | ||
185 | if not name.endswith("-dev") and not name.endswith("-dbg") and not name.endswith("-ptest") and not name.startswith("nativesdk-") and path.endswith(".so") and os.path.islink(path): | 185 | if not name.endswith("-dev") and not name.endswith("-dbg") and not name.endswith("-ptest") and not name.startswith("nativesdk-") and path.endswith(".so") and os.path.islink(path): |
186 | package_qa_add_message(messages, "dev-so", "non -dev/-dbg/nativesdk- package contains symlink .so: %s path '%s'" % \ | 186 | package_qa_add_message(messages, "dev-so", "non -dev/-dbg/nativesdk- package %s contains symlink .so '%s'" % \ |
187 | (name, package_qa_clean_path(path,d))) | 187 | (name, package_qa_clean_path(path, d, name))) |
188 | 188 | ||
189 | QAPATHTEST[dev-elf] = "package_qa_check_dev_elf" | 189 | QAPATHTEST[dev-elf] = "package_qa_check_dev_elf" |
190 | def package_qa_check_dev_elf(path, name, d, elf, messages): | 190 | def package_qa_check_dev_elf(path, name, d, elf, messages): |
@@ -194,8 +194,8 @@ def package_qa_check_dev_elf(path, name, d, elf, messages): | |||
194 | install link-time .so files that are linker scripts. | 194 | install link-time .so files that are linker scripts. |
195 | """ | 195 | """ |
196 | if name.endswith("-dev") and path.endswith(".so") and not os.path.islink(path) and elf: | 196 | if name.endswith("-dev") and path.endswith(".so") and not os.path.islink(path) and elf: |
197 | package_qa_add_message(messages, "dev-elf", "-dev package contains non-symlink .so: %s path '%s'" % \ | 197 | package_qa_add_message(messages, "dev-elf", "-dev package %s contains non-symlink .so '%s'" % \ |
198 | (name, package_qa_clean_path(path,d))) | 198 | (name, package_qa_clean_path(path, d, name))) |
199 | 199 | ||
200 | QAPATHTEST[staticdev] = "package_qa_check_staticdev" | 200 | QAPATHTEST[staticdev] = "package_qa_check_staticdev" |
201 | def package_qa_check_staticdev(path, name, d, elf, messages): | 201 | def package_qa_check_staticdev(path, name, d, elf, messages): |
@@ -208,7 +208,7 @@ def package_qa_check_staticdev(path, name, d, elf, messages): | |||
208 | 208 | ||
209 | if not name.endswith("-pic") and not name.endswith("-staticdev") and not name.endswith("-ptest") and path.endswith(".a") and not path.endswith("_nonshared.a") and not '/usr/lib/debug-static/' in path and not '/.debug-static/' in path: | 209 | if not name.endswith("-pic") and not name.endswith("-staticdev") and not name.endswith("-ptest") and path.endswith(".a") and not path.endswith("_nonshared.a") and not '/usr/lib/debug-static/' in path and not '/.debug-static/' in path: |
210 | package_qa_add_message(messages, "staticdev", "non -staticdev package contains static .a library: %s path '%s'" % \ | 210 | package_qa_add_message(messages, "staticdev", "non -staticdev package contains static .a library: %s path '%s'" % \ |
211 | (name, package_qa_clean_path(path,d))) | 211 | (name, package_qa_clean_path(path,d, name))) |
212 | 212 | ||
213 | QAPATHTEST[mime] = "package_qa_check_mime" | 213 | QAPATHTEST[mime] = "package_qa_check_mime" |
214 | def package_qa_check_mime(path, name, d, elf, messages): | 214 | def package_qa_check_mime(path, name, d, elf, messages): |
@@ -452,12 +452,14 @@ def package_qa_check_buildpaths(path, name, d, elf, messages): | |||
452 | """ | 452 | """ |
453 | Check for build paths inside target files and error if not found in the whitelist | 453 | Check for build paths inside target files and error if not found in the whitelist |
454 | """ | 454 | """ |
455 | import stat | ||
455 | # Ignore .debug files, not interesting | 456 | # Ignore .debug files, not interesting |
456 | if path.find(".debug") != -1: | 457 | if path.find(".debug") != -1: |
457 | return | 458 | return |
458 | 459 | ||
459 | # Ignore symlinks | 460 | # Ignore symlinks/devs/fifos |
460 | if os.path.islink(path): | 461 | mode = os.lstat(path).st_mode |
462 | if stat.S_ISLNK(mode) or stat.S_ISBLK(mode) or stat.S_ISFIFO(mode) or stat.S_ISCHR(mode) or stat.S_ISSOCK(mode): | ||
461 | return | 463 | return |
462 | 464 | ||
463 | tmpdir = bytes(d.getVar('TMPDIR'), encoding="utf-8") | 465 | tmpdir = bytes(d.getVar('TMPDIR'), encoding="utf-8") |
@@ -945,7 +947,7 @@ def package_qa_check_host_user(path, name, d, elf, messages): | |||
945 | 947 | ||
946 | dest = d.getVar('PKGDEST') | 948 | dest = d.getVar('PKGDEST') |
947 | pn = d.getVar('PN') | 949 | pn = d.getVar('PN') |
948 | home = os.path.join(dest, 'home') | 950 | home = os.path.join(dest, name, 'home') |
949 | if path == home or path.startswith(home + os.sep): | 951 | if path == home or path.startswith(home + os.sep): |
950 | return | 952 | return |
951 | 953 | ||
@@ -1012,26 +1014,6 @@ python do_package_qa () { | |||
1012 | logdir = d.getVar('T') | 1014 | logdir = d.getVar('T') |
1013 | pn = d.getVar('PN') | 1015 | pn = d.getVar('PN') |
1014 | 1016 | ||
1015 | # Check the compile log for host contamination | ||
1016 | compilelog = os.path.join(logdir,"log.do_compile") | ||
1017 | |||
1018 | if os.path.exists(compilelog): | ||
1019 | statement = "grep -e 'CROSS COMPILE Badness:' -e 'is unsafe for cross-compilation' %s > /dev/null" % compilelog | ||
1020 | if subprocess.call(statement, shell=True) == 0: | ||
1021 | msg = "%s: The compile log indicates that host include and/or library paths were used.\n \ | ||
1022 | Please check the log '%s' for more information." % (pn, compilelog) | ||
1023 | package_qa_handle_error("compile-host-path", msg, d) | ||
1024 | |||
1025 | # Check the install log for host contamination | ||
1026 | installlog = os.path.join(logdir,"log.do_install") | ||
1027 | |||
1028 | if os.path.exists(installlog): | ||
1029 | statement = "grep -e 'CROSS COMPILE Badness:' -e 'is unsafe for cross-compilation' %s > /dev/null" % installlog | ||
1030 | if subprocess.call(statement, shell=True) == 0: | ||
1031 | msg = "%s: The install log indicates that host include and/or library paths were used.\n \ | ||
1032 | Please check the log '%s' for more information." % (pn, installlog) | ||
1033 | package_qa_handle_error("install-host-path", msg, d) | ||
1034 | |||
1035 | # Scan the packages... | 1017 | # Scan the packages... |
1036 | pkgdest = d.getVar('PKGDEST') | 1018 | pkgdest = d.getVar('PKGDEST') |
1037 | packages = set((d.getVar('PACKAGES') or '').split()) | 1019 | packages = set((d.getVar('PACKAGES') or '').split()) |
@@ -1210,7 +1192,7 @@ python do_qa_configure() { | |||
1210 | if bb.data.inherits_class('autotools', d) and not skip_configure_unsafe: | 1192 | if bb.data.inherits_class('autotools', d) and not skip_configure_unsafe: |
1211 | bb.note("Checking autotools environment for common misconfiguration") | 1193 | bb.note("Checking autotools environment for common misconfiguration") |
1212 | for root, dirs, files in os.walk(workdir): | 1194 | for root, dirs, files in os.walk(workdir): |
1213 | statement = "grep -q -F -e 'CROSS COMPILE Badness:' -e 'is unsafe for cross-compilation' %s" % \ | 1195 | statement = "grep -q -F -e 'is unsafe for cross-compilation' %s" % \ |
1214 | os.path.join(root,"config.log") | 1196 | os.path.join(root,"config.log") |
1215 | if "config.log" in files: | 1197 | if "config.log" in files: |
1216 | if subprocess.call(statement, shell=True) == 0: | 1198 | if subprocess.call(statement, shell=True) == 0: |
diff --git a/meta/classes/kernel-arch.bbclass b/meta/classes/kernel-arch.bbclass index 07ec242e63..4cd08b96fb 100644 --- a/meta/classes/kernel-arch.bbclass +++ b/meta/classes/kernel-arch.bbclass | |||
@@ -61,8 +61,8 @@ HOST_LD_KERNEL_ARCH ?= "${TARGET_LD_KERNEL_ARCH}" | |||
61 | TARGET_AR_KERNEL_ARCH ?= "" | 61 | TARGET_AR_KERNEL_ARCH ?= "" |
62 | HOST_AR_KERNEL_ARCH ?= "${TARGET_AR_KERNEL_ARCH}" | 62 | HOST_AR_KERNEL_ARCH ?= "${TARGET_AR_KERNEL_ARCH}" |
63 | 63 | ||
64 | KERNEL_CC = "${CCACHE}${HOST_PREFIX}gcc ${HOST_CC_KERNEL_ARCH} -fuse-ld=bfd ${DEBUG_PREFIX_MAP} -fdebug-prefix-map=${STAGING_KERNEL_DIR}=${KERNEL_SRC_PATH}" | 64 | KERNEL_CC = "${CCACHE}${HOST_PREFIX}gcc ${HOST_CC_KERNEL_ARCH} -fuse-ld=bfd ${DEBUG_PREFIX_MAP} -fdebug-prefix-map=${STAGING_KERNEL_DIR}=${KERNEL_SRC_PATH} -fdebug-prefix-map=${STAGING_KERNEL_BUILDDIR}=${KERNEL_SRC_PATH}" |
65 | KERNEL_LD = "${CCACHE}${HOST_PREFIX}ld.bfd ${HOST_LD_KERNEL_ARCH}" | 65 | KERNEL_LD = "${CCACHE}${HOST_PREFIX}ld.bfd ${HOST_LD_KERNEL_ARCH}" |
66 | KERNEL_AR = "${CCACHE}${HOST_PREFIX}ar ${HOST_AR_KERNEL_ARCH}" | 66 | KERNEL_AR = "${CCACHE}${HOST_PREFIX}ar ${HOST_AR_KERNEL_ARCH}" |
67 | TOOLCHAIN = "gcc" | 67 | TOOLCHAIN ?= "gcc" |
68 | 68 | ||
diff --git a/meta/classes/kernel-devicetree.bbclass b/meta/classes/kernel-devicetree.bbclass index 81dda8003f..27a4905ac6 100644 --- a/meta/classes/kernel-devicetree.bbclass +++ b/meta/classes/kernel-devicetree.bbclass | |||
@@ -1,14 +1,20 @@ | |||
1 | # Support for device tree generation | 1 | # Support for device tree generation |
2 | PACKAGES_append = " \ | 2 | python () { |
3 | ${KERNEL_PACKAGE_NAME}-devicetree \ | 3 | if not bb.data.inherits_class('nopackages', d): |
4 | ${@[d.getVar('KERNEL_PACKAGE_NAME') + '-image-zimage-bundle', ''][d.getVar('KERNEL_DEVICETREE_BUNDLE') != '1']} \ | 4 | d.appendVar("PACKAGES", " ${KERNEL_PACKAGE_NAME}-devicetree") |
5 | " | 5 | if d.getVar('KERNEL_DEVICETREE_BUNDLE') == '1': |
6 | d.appendVar("PACKAGES", " ${KERNEL_PACKAGE_NAME}-image-zimage-bundle") | ||
7 | } | ||
8 | |||
6 | FILES_${KERNEL_PACKAGE_NAME}-devicetree = "/${KERNEL_IMAGEDEST}/*.dtb /${KERNEL_IMAGEDEST}/*.dtbo" | 9 | FILES_${KERNEL_PACKAGE_NAME}-devicetree = "/${KERNEL_IMAGEDEST}/*.dtb /${KERNEL_IMAGEDEST}/*.dtbo" |
7 | FILES_${KERNEL_PACKAGE_NAME}-image-zimage-bundle = "/${KERNEL_IMAGEDEST}/zImage-*.dtb.bin" | 10 | FILES_${KERNEL_PACKAGE_NAME}-image-zimage-bundle = "/${KERNEL_IMAGEDEST}/zImage-*.dtb.bin" |
8 | 11 | ||
9 | # Generate kernel+devicetree bundle | 12 | # Generate kernel+devicetree bundle |
10 | KERNEL_DEVICETREE_BUNDLE ?= "0" | 13 | KERNEL_DEVICETREE_BUNDLE ?= "0" |
11 | 14 | ||
15 | # dtc flags passed via DTC_FLAGS env variable | ||
16 | KERNEL_DTC_FLAGS ?= "" | ||
17 | |||
12 | normalize_dtb () { | 18 | normalize_dtb () { |
13 | dtb="$1" | 19 | dtb="$1" |
14 | if echo $dtb | grep -q '/dts/'; then | 20 | if echo $dtb | grep -q '/dts/'; then |
@@ -50,6 +56,10 @@ do_configure_append() { | |||
50 | } | 56 | } |
51 | 57 | ||
52 | do_compile_append() { | 58 | do_compile_append() { |
59 | if [ -n "${KERNEL_DTC_FLAGS}" ]; then | ||
60 | export DTC_FLAGS="${KERNEL_DTC_FLAGS}" | ||
61 | fi | ||
62 | |||
53 | for dtbf in ${KERNEL_DEVICETREE}; do | 63 | for dtbf in ${KERNEL_DEVICETREE}; do |
54 | dtb=`normalize_dtb "$dtbf"` | 64 | dtb=`normalize_dtb "$dtbf"` |
55 | oe_runmake $dtb CC="${KERNEL_CC} $cc_extra " LD="${KERNEL_LD}" ${KERNEL_EXTRA_ARGS} | 65 | oe_runmake $dtb CC="${KERNEL_CC} $cc_extra " LD="${KERNEL_LD}" ${KERNEL_EXTRA_ARGS} |
diff --git a/meta/classes/kernel-fitimage.bbclass b/meta/classes/kernel-fitimage.bbclass index 72b05ff8d1..7c7bcd3fc0 100644 --- a/meta/classes/kernel-fitimage.bbclass +++ b/meta/classes/kernel-fitimage.bbclass | |||
@@ -1,5 +1,7 @@ | |||
1 | inherit kernel-uboot kernel-artifact-names uboot-sign | 1 | inherit kernel-uboot kernel-artifact-names uboot-sign |
2 | 2 | ||
3 | KERNEL_IMAGETYPE_REPLACEMENT = "" | ||
4 | |||
3 | python __anonymous () { | 5 | python __anonymous () { |
4 | kerneltypes = d.getVar('KERNEL_IMAGETYPES') or "" | 6 | kerneltypes = d.getVar('KERNEL_IMAGETYPES') or "" |
5 | if 'fitImage' in kerneltypes.split(): | 7 | if 'fitImage' in kerneltypes.split(): |
@@ -21,6 +23,8 @@ python __anonymous () { | |||
21 | else: | 23 | else: |
22 | replacementtype = "zImage" | 24 | replacementtype = "zImage" |
23 | 25 | ||
26 | d.setVar("KERNEL_IMAGETYPE_REPLACEMENT", replacementtype) | ||
27 | |||
24 | # Override KERNEL_IMAGETYPE_FOR_MAKE variable, which is internal | 28 | # Override KERNEL_IMAGETYPE_FOR_MAKE variable, which is internal |
25 | # to kernel.bbclass . We have to override it, since we pack zImage | 29 | # to kernel.bbclass . We have to override it, since we pack zImage |
26 | # (at least for now) into the fitImage . | 30 | # (at least for now) into the fitImage . |
@@ -45,6 +49,8 @@ python __anonymous () { | |||
45 | if d.getVar('UBOOT_SIGN_ENABLE') == "1" and d.getVar('UBOOT_DTB_BINARY'): | 49 | if d.getVar('UBOOT_SIGN_ENABLE') == "1" and d.getVar('UBOOT_DTB_BINARY'): |
46 | uboot_pn = d.getVar('PREFERRED_PROVIDER_u-boot') or 'u-boot' | 50 | uboot_pn = d.getVar('PREFERRED_PROVIDER_u-boot') or 'u-boot' |
47 | d.appendVarFlag('do_assemble_fitimage', 'depends', ' %s:do_populate_sysroot' % uboot_pn) | 51 | d.appendVarFlag('do_assemble_fitimage', 'depends', ' %s:do_populate_sysroot' % uboot_pn) |
52 | if d.getVar('INITRAMFS_IMAGE_BUNDLE') == "1": | ||
53 | d.appendVarFlag('do_assemble_fitimage_initramfs', 'depends', ' %s:do_populate_sysroot' % uboot_pn) | ||
48 | } | 54 | } |
49 | 55 | ||
50 | # Options for the device tree compiler passed to mkimage '-D' feature: | 56 | # Options for the device tree compiler passed to mkimage '-D' feature: |
@@ -56,6 +62,12 @@ FIT_HASH_ALG ?= "sha256" | |||
56 | # fitImage Signature Algo | 62 | # fitImage Signature Algo |
57 | FIT_SIGN_ALG ?= "rsa2048" | 63 | FIT_SIGN_ALG ?= "rsa2048" |
58 | 64 | ||
65 | # fitImage Padding Algo | ||
66 | FIT_PAD_ALG ?= "pkcs-1.5" | ||
67 | |||
68 | # Arguments passed to mkimage for signing | ||
69 | UBOOT_MKIMAGE_SIGN_ARGS ?= "" | ||
70 | |||
59 | # | 71 | # |
60 | # Emit the fitImage ITS header | 72 | # Emit the fitImage ITS header |
61 | # | 73 | # |
@@ -124,7 +136,7 @@ fitimage_emit_section_kernel() { | |||
124 | fi | 136 | fi |
125 | 137 | ||
126 | cat << EOF >> ${1} | 138 | cat << EOF >> ${1} |
127 | kernel@${2} { | 139 | kernel-${2} { |
128 | description = "Linux kernel"; | 140 | description = "Linux kernel"; |
129 | data = /incbin/("${3}"); | 141 | data = /incbin/("${3}"); |
130 | type = "kernel"; | 142 | type = "kernel"; |
@@ -133,7 +145,7 @@ fitimage_emit_section_kernel() { | |||
133 | compression = "${4}"; | 145 | compression = "${4}"; |
134 | load = <${UBOOT_LOADADDRESS}>; | 146 | load = <${UBOOT_LOADADDRESS}>; |
135 | entry = <${ENTRYPOINT}>; | 147 | entry = <${ENTRYPOINT}>; |
136 | hash@1 { | 148 | hash-1 { |
137 | algo = "${kernel_csum}"; | 149 | algo = "${kernel_csum}"; |
138 | }; | 150 | }; |
139 | }; | 151 | }; |
@@ -160,14 +172,14 @@ fitimage_emit_section_dtb() { | |||
160 | dtb_loadline="load = <${UBOOT_DTB_LOADADDRESS}>;" | 172 | dtb_loadline="load = <${UBOOT_DTB_LOADADDRESS}>;" |
161 | fi | 173 | fi |
162 | cat << EOF >> ${1} | 174 | cat << EOF >> ${1} |
163 | fdt@${2} { | 175 | fdt-${2} { |
164 | description = "Flattened Device Tree blob"; | 176 | description = "Flattened Device Tree blob"; |
165 | data = /incbin/("${3}"); | 177 | data = /incbin/("${3}"); |
166 | type = "flat_dt"; | 178 | type = "flat_dt"; |
167 | arch = "${UBOOT_ARCH}"; | 179 | arch = "${UBOOT_ARCH}"; |
168 | compression = "none"; | 180 | compression = "none"; |
169 | ${dtb_loadline} | 181 | ${dtb_loadline} |
170 | hash@1 { | 182 | hash-1 { |
171 | algo = "${dtb_csum}"; | 183 | algo = "${dtb_csum}"; |
172 | }; | 184 | }; |
173 | }; | 185 | }; |
@@ -175,6 +187,43 @@ EOF | |||
175 | } | 187 | } |
176 | 188 | ||
177 | # | 189 | # |
190 | # Emit the fitImage ITS u-boot script section | ||
191 | # | ||
192 | # $1 ... .its filename | ||
193 | # $2 ... Image counter | ||
194 | # $3 ... Path to boot script image | ||
195 | fitimage_emit_section_boot_script() { | ||
196 | |||
197 | bootscr_csum="${FIT_HASH_ALG}" | ||
198 | bootscr_sign_algo="${FIT_SIGN_ALG}" | ||
199 | bootscr_sign_keyname="${UBOOT_SIGN_IMG_KEYNAME}" | ||
200 | |||
201 | cat << EOF >> $1 | ||
202 | bootscr-$2 { | ||
203 | description = "U-boot script"; | ||
204 | data = /incbin/("$3"); | ||
205 | type = "script"; | ||
206 | arch = "${UBOOT_ARCH}"; | ||
207 | compression = "none"; | ||
208 | hash-1 { | ||
209 | algo = "$bootscr_csum"; | ||
210 | }; | ||
211 | }; | ||
212 | EOF | ||
213 | |||
214 | if [ "${UBOOT_SIGN_ENABLE}" = "1" -a "${FIT_SIGN_INDIVIDUAL}" = "1" -a -n "$bootscr_sign_keyname" ] ; then | ||
215 | sed -i '$ d' $1 | ||
216 | cat << EOF >> $1 | ||
217 | signature-1 { | ||
218 | algo = "$bootscr_csum,$bootscr_sign_algo"; | ||
219 | key-name-hint = "$bootscr_sign_keyname"; | ||
220 | }; | ||
221 | }; | ||
222 | EOF | ||
223 | fi | ||
224 | } | ||
225 | |||
226 | # | ||
178 | # Emit the fitImage ITS setup section | 227 | # Emit the fitImage ITS setup section |
179 | # | 228 | # |
180 | # $1 ... .its filename | 229 | # $1 ... .its filename |
@@ -185,7 +234,7 @@ fitimage_emit_section_setup() { | |||
185 | setup_csum="${FIT_HASH_ALG}" | 234 | setup_csum="${FIT_HASH_ALG}" |
186 | 235 | ||
187 | cat << EOF >> ${1} | 236 | cat << EOF >> ${1} |
188 | setup@${2} { | 237 | setup-${2} { |
189 | description = "Linux setup.bin"; | 238 | description = "Linux setup.bin"; |
190 | data = /incbin/("${3}"); | 239 | data = /incbin/("${3}"); |
191 | type = "x86_setup"; | 240 | type = "x86_setup"; |
@@ -194,7 +243,7 @@ fitimage_emit_section_setup() { | |||
194 | compression = "none"; | 243 | compression = "none"; |
195 | load = <0x00090000>; | 244 | load = <0x00090000>; |
196 | entry = <0x00090000>; | 245 | entry = <0x00090000>; |
197 | hash@1 { | 246 | hash-1 { |
198 | algo = "${setup_csum}"; | 247 | algo = "${setup_csum}"; |
199 | }; | 248 | }; |
200 | }; | 249 | }; |
@@ -221,7 +270,7 @@ fitimage_emit_section_ramdisk() { | |||
221 | fi | 270 | fi |
222 | 271 | ||
223 | cat << EOF >> ${1} | 272 | cat << EOF >> ${1} |
224 | ramdisk@${2} { | 273 | ramdisk-${2} { |
225 | description = "${INITRAMFS_IMAGE}"; | 274 | description = "${INITRAMFS_IMAGE}"; |
226 | data = /incbin/("${3}"); | 275 | data = /incbin/("${3}"); |
227 | type = "ramdisk"; | 276 | type = "ramdisk"; |
@@ -230,7 +279,7 @@ fitimage_emit_section_ramdisk() { | |||
230 | compression = "none"; | 279 | compression = "none"; |
231 | ${ramdisk_loadline} | 280 | ${ramdisk_loadline} |
232 | ${ramdisk_entryline} | 281 | ${ramdisk_entryline} |
233 | hash@1 { | 282 | hash-1 { |
234 | algo = "${ramdisk_csum}"; | 283 | algo = "${ramdisk_csum}"; |
235 | }; | 284 | }; |
236 | }; | 285 | }; |
@@ -244,13 +293,15 @@ EOF | |||
244 | # $2 ... Linux kernel ID | 293 | # $2 ... Linux kernel ID |
245 | # $3 ... DTB image name | 294 | # $3 ... DTB image name |
246 | # $4 ... ramdisk ID | 295 | # $4 ... ramdisk ID |
247 | # $5 ... config ID | 296 | # $5 ... u-boot script ID |
248 | # $6 ... default flag | 297 | # $6 ... config ID |
298 | # $7 ... default flag | ||
249 | fitimage_emit_section_config() { | 299 | fitimage_emit_section_config() { |
250 | 300 | ||
251 | conf_csum="${FIT_HASH_ALG}" | 301 | conf_csum="${FIT_HASH_ALG}" |
252 | conf_sign_algo="${FIT_SIGN_ALG}" | 302 | conf_sign_algo="${FIT_SIGN_ALG}" |
253 | if [ -n "${UBOOT_SIGN_ENABLE}" ] ; then | 303 | conf_padding_algo="${FIT_PAD_ALG}" |
304 | if [ "${UBOOT_SIGN_ENABLE}" = "1" ] ; then | ||
254 | conf_sign_keyname="${UBOOT_SIGN_KEYNAME}" | 305 | conf_sign_keyname="${UBOOT_SIGN_KEYNAME}" |
255 | fi | 306 | fi |
256 | 307 | ||
@@ -260,45 +311,53 @@ fitimage_emit_section_config() { | |||
260 | kernel_line="" | 311 | kernel_line="" |
261 | fdt_line="" | 312 | fdt_line="" |
262 | ramdisk_line="" | 313 | ramdisk_line="" |
314 | bootscr_line="" | ||
263 | setup_line="" | 315 | setup_line="" |
264 | default_line="" | 316 | default_line="" |
265 | 317 | ||
266 | if [ -n "${2}" ]; then | 318 | if [ -n "${2}" ]; then |
267 | conf_desc="Linux kernel" | 319 | conf_desc="Linux kernel" |
268 | sep=", " | 320 | sep=", " |
269 | kernel_line="kernel = \"kernel@${2}\";" | 321 | kernel_line="kernel = \"kernel-${2}\";" |
270 | fi | 322 | fi |
271 | 323 | ||
272 | if [ -n "${3}" ]; then | 324 | if [ -n "${3}" ]; then |
273 | conf_desc="${conf_desc}${sep}FDT blob" | 325 | conf_desc="${conf_desc}${sep}FDT blob" |
274 | sep=", " | 326 | sep=", " |
275 | fdt_line="fdt = \"fdt@${3}\";" | 327 | fdt_line="fdt = \"fdt-${3}\";" |
276 | fi | 328 | fi |
277 | 329 | ||
278 | if [ -n "${4}" ]; then | 330 | if [ -n "${4}" ]; then |
279 | conf_desc="${conf_desc}${sep}ramdisk" | 331 | conf_desc="${conf_desc}${sep}ramdisk" |
280 | sep=", " | 332 | sep=", " |
281 | ramdisk_line="ramdisk = \"ramdisk@${4}\";" | 333 | ramdisk_line="ramdisk = \"ramdisk-${4}\";" |
282 | fi | 334 | fi |
283 | 335 | ||
284 | if [ -n "${5}" ]; then | 336 | if [ -n "${5}" ]; then |
337 | conf_desc="${conf_desc}${sep}u-boot script" | ||
338 | sep=", " | ||
339 | bootscr_line="bootscr = \"bootscr-${5}\";" | ||
340 | fi | ||
341 | |||
342 | if [ -n "${6}" ]; then | ||
285 | conf_desc="${conf_desc}${sep}setup" | 343 | conf_desc="${conf_desc}${sep}setup" |
286 | setup_line="setup = \"setup@${5}\";" | 344 | setup_line="setup = \"setup-${6}\";" |
287 | fi | 345 | fi |
288 | 346 | ||
289 | if [ "${6}" = "1" ]; then | 347 | if [ "${7}" = "1" ]; then |
290 | default_line="default = \"conf@${3}\";" | 348 | default_line="default = \"conf-${3}\";" |
291 | fi | 349 | fi |
292 | 350 | ||
293 | cat << EOF >> ${1} | 351 | cat << EOF >> ${1} |
294 | ${default_line} | 352 | ${default_line} |
295 | conf@${3} { | 353 | conf-${3} { |
296 | description = "${6} ${conf_desc}"; | 354 | description = "${7} ${conf_desc}"; |
297 | ${kernel_line} | 355 | ${kernel_line} |
298 | ${fdt_line} | 356 | ${fdt_line} |
299 | ${ramdisk_line} | 357 | ${ramdisk_line} |
358 | ${bootscr_line} | ||
300 | ${setup_line} | 359 | ${setup_line} |
301 | hash@1 { | 360 | hash-1 { |
302 | algo = "${conf_csum}"; | 361 | algo = "${conf_csum}"; |
303 | }; | 362 | }; |
304 | EOF | 363 | EOF |
@@ -324,15 +383,21 @@ EOF | |||
324 | fi | 383 | fi |
325 | 384 | ||
326 | if [ -n "${5}" ]; then | 385 | if [ -n "${5}" ]; then |
386 | sign_line="${sign_line}${sep}\"bootscr\"" | ||
387 | sep=", " | ||
388 | fi | ||
389 | |||
390 | if [ -n "${6}" ]; then | ||
327 | sign_line="${sign_line}${sep}\"setup\"" | 391 | sign_line="${sign_line}${sep}\"setup\"" |
328 | fi | 392 | fi |
329 | 393 | ||
330 | sign_line="${sign_line};" | 394 | sign_line="${sign_line};" |
331 | 395 | ||
332 | cat << EOF >> ${1} | 396 | cat << EOF >> ${1} |
333 | signature@1 { | 397 | signature-1 { |
334 | algo = "${conf_csum},${conf_sign_algo}"; | 398 | algo = "${conf_csum},${conf_sign_algo}"; |
335 | key-name-hint = "${conf_sign_keyname}"; | 399 | key-name-hint = "${conf_sign_keyname}"; |
400 | padding = "${conf_padding_algo}"; | ||
336 | ${sign_line} | 401 | ${sign_line} |
337 | }; | 402 | }; |
338 | EOF | 403 | EOF |
@@ -355,6 +420,7 @@ fitimage_assemble() { | |||
355 | DTBS="" | 420 | DTBS="" |
356 | ramdiskcount=${3} | 421 | ramdiskcount=${3} |
357 | setupcount="" | 422 | setupcount="" |
423 | bootscr_id="" | ||
358 | rm -f ${1} arch/${ARCH}/boot/${2} | 424 | rm -f ${1} arch/${ARCH}/boot/${2} |
359 | 425 | ||
360 | fitimage_emit_fit_header ${1} | 426 | fitimage_emit_fit_header ${1} |
@@ -365,7 +431,7 @@ fitimage_assemble() { | |||
365 | fitimage_emit_section_maint ${1} imagestart | 431 | fitimage_emit_section_maint ${1} imagestart |
366 | 432 | ||
367 | uboot_prep_kimage | 433 | uboot_prep_kimage |
368 | fitimage_emit_section_kernel ${1} "${kernelcount}" linux.bin "${linux_comp}" | 434 | fitimage_emit_section_kernel $1 $kernelcount linux.bin "$linux_comp" |
369 | 435 | ||
370 | # | 436 | # |
371 | # Step 2: Prepare a DTB image section | 437 | # Step 2: Prepare a DTB image section |
@@ -399,7 +465,21 @@ fitimage_assemble() { | |||
399 | fi | 465 | fi |
400 | 466 | ||
401 | # | 467 | # |
402 | # Step 3: Prepare a setup section. (For x86) | 468 | # Step 3: Prepare a u-boot script section |
469 | # | ||
470 | |||
471 | if [ -n "${UBOOT_ENV}" ] && [ -d "${STAGING_DIR_HOST}/boot" ]; then | ||
472 | if [ -e "${STAGING_DIR_HOST}/boot/${UBOOT_ENV_BINARY}" ]; then | ||
473 | cp ${STAGING_DIR_HOST}/boot/${UBOOT_ENV_BINARY} ${B} | ||
474 | bootscr_id="${UBOOT_ENV_BINARY}" | ||
475 | fitimage_emit_section_boot_script ${1} "${bootscr_id}" ${UBOOT_ENV_BINARY} | ||
476 | else | ||
477 | bbwarn "${STAGING_DIR_HOST}/boot/${UBOOT_ENV_BINARY} not found." | ||
478 | fi | ||
479 | fi | ||
480 | |||
481 | # | ||
482 | # Step 4: Prepare a setup section. (For x86) | ||
403 | # | 483 | # |
404 | if [ -e arch/${ARCH}/boot/setup.bin ]; then | 484 | if [ -e arch/${ARCH}/boot/setup.bin ]; then |
405 | setupcount=1 | 485 | setupcount=1 |
@@ -407,9 +487,9 @@ fitimage_assemble() { | |||
407 | fi | 487 | fi |
408 | 488 | ||
409 | # | 489 | # |
410 | # Step 4: Prepare a ramdisk section. | 490 | # Step 5: Prepare a ramdisk section. |
411 | # | 491 | # |
412 | if [ "x${ramdiskcount}" = "x1" ] ; then | 492 | if [ "x${ramdiskcount}" = "x1" ] && [ "${INITRAMFS_IMAGE_BUNDLE}" != "1" ]; then |
413 | # Find and use the first initramfs image archive type we find | 493 | # Find and use the first initramfs image archive type we find |
414 | for img in cpio.lz4 cpio.lzo cpio.lzma cpio.xz cpio.gz ext2.gz cpio; do | 494 | for img in cpio.lz4 cpio.lzo cpio.lzma cpio.xz cpio.gz ext2.gz cpio; do |
415 | initramfs_path="${DEPLOY_DIR_IMAGE}/${INITRAMFS_IMAGE_NAME}.${img}" | 495 | initramfs_path="${DEPLOY_DIR_IMAGE}/${INITRAMFS_IMAGE_NAME}.${img}" |
@@ -430,7 +510,7 @@ fitimage_assemble() { | |||
430 | fi | 510 | fi |
431 | 511 | ||
432 | # | 512 | # |
433 | # Step 5: Prepare a configurations section | 513 | # Step 6: Prepare a configurations section |
434 | # | 514 | # |
435 | fitimage_emit_section_maint ${1} confstart | 515 | fitimage_emit_section_maint ${1} confstart |
436 | 516 | ||
@@ -439,9 +519,9 @@ fitimage_assemble() { | |||
439 | for DTB in ${DTBS}; do | 519 | for DTB in ${DTBS}; do |
440 | dtb_ext=${DTB##*.} | 520 | dtb_ext=${DTB##*.} |
441 | if [ "${dtb_ext}" = "dtbo" ]; then | 521 | if [ "${dtb_ext}" = "dtbo" ]; then |
442 | fitimage_emit_section_config ${1} "" "${DTB}" "" "" "`expr ${i} = ${dtbcount}`" | 522 | fitimage_emit_section_config ${1} "" "${DTB}" "" "${bootscr_id}" "" "`expr ${i} = ${dtbcount}`" |
443 | else | 523 | else |
444 | fitimage_emit_section_config ${1} "${kernelcount}" "${DTB}" "${ramdiskcount}" "${setupcount}" "`expr ${i} = ${dtbcount}`" | 524 | fitimage_emit_section_config ${1} "${kernelcount}" "${DTB}" "${ramdiskcount}" "${bootscr_id}" "${setupcount}" "`expr ${i} = ${dtbcount}`" |
445 | fi | 525 | fi |
446 | i=`expr ${i} + 1` | 526 | i=`expr ${i} + 1` |
447 | done | 527 | done |
@@ -452,7 +532,7 @@ fitimage_assemble() { | |||
452 | fitimage_emit_section_maint ${1} fitend | 532 | fitimage_emit_section_maint ${1} fitend |
453 | 533 | ||
454 | # | 534 | # |
455 | # Step 6: Assemble the image | 535 | # Step 7: Assemble the image |
456 | # | 536 | # |
457 | uboot-mkimage \ | 537 | uboot-mkimage \ |
458 | ${@'-D "${UBOOT_MKIMAGE_DTCOPTS}"' if len('${UBOOT_MKIMAGE_DTCOPTS}') else ''} \ | 538 | ${@'-D "${UBOOT_MKIMAGE_DTCOPTS}"' if len('${UBOOT_MKIMAGE_DTCOPTS}') else ''} \ |
@@ -460,7 +540,7 @@ fitimage_assemble() { | |||
460 | arch/${ARCH}/boot/${2} | 540 | arch/${ARCH}/boot/${2} |
461 | 541 | ||
462 | # | 542 | # |
463 | # Step 7: Sign the image and add public key to U-Boot dtb | 543 | # Step 8: Sign the image and add public key to U-Boot dtb |
464 | # | 544 | # |
465 | if [ "x${UBOOT_SIGN_ENABLE}" = "x1" ] ; then | 545 | if [ "x${UBOOT_SIGN_ENABLE}" = "x1" ] ; then |
466 | add_key_to_u_boot="" | 546 | add_key_to_u_boot="" |
@@ -474,7 +554,8 @@ fitimage_assemble() { | |||
474 | ${@'-D "${UBOOT_MKIMAGE_DTCOPTS}"' if len('${UBOOT_MKIMAGE_DTCOPTS}') else ''} \ | 554 | ${@'-D "${UBOOT_MKIMAGE_DTCOPTS}"' if len('${UBOOT_MKIMAGE_DTCOPTS}') else ''} \ |
475 | -F -k "${UBOOT_SIGN_KEYDIR}" \ | 555 | -F -k "${UBOOT_SIGN_KEYDIR}" \ |
476 | $add_key_to_u_boot \ | 556 | $add_key_to_u_boot \ |
477 | -r arch/${ARCH}/boot/${2} | 557 | -r arch/${ARCH}/boot/${2} \ |
558 | ${UBOOT_MKIMAGE_SIGN_ARGS} | ||
478 | fi | 559 | fi |
479 | } | 560 | } |
480 | 561 | ||
@@ -491,7 +572,11 @@ do_assemble_fitimage_initramfs() { | |||
491 | if echo ${KERNEL_IMAGETYPES} | grep -wq "fitImage" && \ | 572 | if echo ${KERNEL_IMAGETYPES} | grep -wq "fitImage" && \ |
492 | test -n "${INITRAMFS_IMAGE}" ; then | 573 | test -n "${INITRAMFS_IMAGE}" ; then |
493 | cd ${B} | 574 | cd ${B} |
494 | fitimage_assemble fit-image-${INITRAMFS_IMAGE}.its fitImage-${INITRAMFS_IMAGE} 1 | 575 | if [ "${INITRAMFS_IMAGE_BUNDLE}" = "1" ]; then |
576 | fitimage_assemble fit-image-${INITRAMFS_IMAGE}.its fitImage "" | ||
577 | else | ||
578 | fitimage_assemble fit-image-${INITRAMFS_IMAGE}.its fitImage-${INITRAMFS_IMAGE} 1 | ||
579 | fi | ||
495 | fi | 580 | fi |
496 | } | 581 | } |
497 | 582 | ||
@@ -502,22 +587,32 @@ kernel_do_deploy[vardepsexclude] = "DATETIME" | |||
502 | kernel_do_deploy_append() { | 587 | kernel_do_deploy_append() { |
503 | # Update deploy directory | 588 | # Update deploy directory |
504 | if echo ${KERNEL_IMAGETYPES} | grep -wq "fitImage"; then | 589 | if echo ${KERNEL_IMAGETYPES} | grep -wq "fitImage"; then |
505 | echo "Copying fit-image.its source file..." | 590 | if [ "${INITRAMFS_IMAGE_BUNDLE}" != "1" ]; then |
506 | install -m 0644 ${B}/fit-image.its "$deployDir/fitImage-its-${KERNEL_FIT_NAME}.its" | 591 | echo "Copying fit-image.its source file..." |
507 | ln -snf fitImage-its-${KERNEL_FIT_NAME}.its "$deployDir/fitImage-its-${KERNEL_FIT_LINK_NAME}" | 592 | install -m 0644 ${B}/fit-image.its "$deployDir/fitImage-its-${KERNEL_FIT_NAME}.its" |
593 | if [ -n "${KERNEL_FIT_LINK_NAME}" ] ; then | ||
594 | ln -snf fitImage-its-${KERNEL_FIT_NAME}.its "$deployDir/fitImage-its-${KERNEL_FIT_LINK_NAME}" | ||
595 | fi | ||
508 | 596 | ||
509 | echo "Copying linux.bin file..." | 597 | echo "Copying linux.bin file..." |
510 | install -m 0644 ${B}/linux.bin $deployDir/fitImage-linux.bin-${KERNEL_FIT_NAME}.bin | 598 | install -m 0644 ${B}/linux.bin $deployDir/fitImage-linux.bin-${KERNEL_FIT_NAME}.bin |
511 | ln -snf fitImage-linux.bin-${KERNEL_FIT_NAME}.bin "$deployDir/fitImage-linux.bin-${KERNEL_FIT_LINK_NAME}" | 599 | if [ -n "${KERNEL_FIT_LINK_NAME}" ] ; then |
600 | ln -snf fitImage-linux.bin-${KERNEL_FIT_NAME}.bin "$deployDir/fitImage-linux.bin-${KERNEL_FIT_LINK_NAME}" | ||
601 | fi | ||
602 | fi | ||
512 | 603 | ||
513 | if [ -n "${INITRAMFS_IMAGE}" ]; then | 604 | if [ -n "${INITRAMFS_IMAGE}" ]; then |
514 | echo "Copying fit-image-${INITRAMFS_IMAGE}.its source file..." | 605 | echo "Copying fit-image-${INITRAMFS_IMAGE}.its source file..." |
515 | install -m 0644 ${B}/fit-image-${INITRAMFS_IMAGE}.its "$deployDir/fitImage-its-${INITRAMFS_IMAGE_NAME}-${KERNEL_FIT_NAME}.its" | 606 | install -m 0644 ${B}/fit-image-${INITRAMFS_IMAGE}.its "$deployDir/fitImage-its-${INITRAMFS_IMAGE_NAME}-${KERNEL_FIT_NAME}.its" |
516 | ln -snf fitImage-its-${INITRAMFS_IMAGE_NAME}-${KERNEL_FIT_NAME}.its "$deployDir/fitImage-its-${INITRAMFS_IMAGE_NAME}-${KERNEL_FIT_LINK_NAME}" | 607 | ln -snf fitImage-its-${INITRAMFS_IMAGE_NAME}-${KERNEL_FIT_NAME}.its "$deployDir/fitImage-its-${INITRAMFS_IMAGE_NAME}-${KERNEL_FIT_LINK_NAME}" |
517 | 608 | ||
518 | echo "Copying fitImage-${INITRAMFS_IMAGE} file..." | 609 | if [ "${INITRAMFS_IMAGE_BUNDLE}" != "1" ]; then |
519 | install -m 0644 ${B}/arch/${ARCH}/boot/fitImage-${INITRAMFS_IMAGE} "$deployDir/fitImage-${INITRAMFS_IMAGE_NAME}-${KERNEL_FIT_NAME}.bin" | 610 | echo "Copying fitImage-${INITRAMFS_IMAGE} file..." |
520 | ln -snf fitImage-${INITRAMFS_IMAGE_NAME}-${KERNEL_FIT_NAME}.bin "$deployDir/fitImage-${INITRAMFS_IMAGE_NAME}-${KERNEL_FIT_LINK_NAME}" | 611 | install -m 0644 ${B}/arch/${ARCH}/boot/fitImage-${INITRAMFS_IMAGE} "$deployDir/fitImage-${INITRAMFS_IMAGE_NAME}-${KERNEL_FIT_NAME}.bin" |
612 | if [ -n "${KERNEL_FIT_LINK_NAME}" ] ; then | ||
613 | ln -snf fitImage-${INITRAMFS_IMAGE_NAME}-${KERNEL_FIT_NAME}.bin "$deployDir/fitImage-${INITRAMFS_IMAGE_NAME}-${KERNEL_FIT_LINK_NAME}" | ||
614 | fi | ||
615 | fi | ||
521 | fi | 616 | fi |
522 | if [ "${UBOOT_SIGN_ENABLE}" = "1" -a -n "${UBOOT_DTB_BINARY}" ] ; then | 617 | if [ "${UBOOT_SIGN_ENABLE}" = "1" -a -n "${UBOOT_DTB_BINARY}" ] ; then |
523 | # UBOOT_DTB_IMAGE is a realfile, but we can't use | 618 | # UBOOT_DTB_IMAGE is a realfile, but we can't use |
@@ -527,3 +622,13 @@ kernel_do_deploy_append() { | |||
527 | fi | 622 | fi |
528 | fi | 623 | fi |
529 | } | 624 | } |
625 | |||
626 | # The function below performs the following in case of initramfs bundles: | ||
627 | # - Removes do_assemble_fitimage. FIT generation is done through | ||
628 | # do_assemble_fitimage_initramfs. do_assemble_fitimage is not needed | ||
629 | # and should not be part of the tasks to be executed. | ||
630 | python () { | ||
631 | d.appendVarFlag('do_compile', 'vardeps', ' INITRAMFS_IMAGE_BUNDLE') | ||
632 | if d.getVar('INITRAMFS_IMAGE_BUNDLE') == "1": | ||
633 | bb.build.deltask('do_assemble_fitimage', d) | ||
634 | } | ||
diff --git a/meta/classes/kernel-yocto.bbclass b/meta/classes/kernel-yocto.bbclass index ec5fb7b1de..2abbc2ff66 100644 --- a/meta/classes/kernel-yocto.bbclass +++ b/meta/classes/kernel-yocto.bbclass | |||
@@ -105,6 +105,8 @@ do_kernel_metadata() { | |||
105 | cd ${S} | 105 | cd ${S} |
106 | export KMETA=${KMETA} | 106 | export KMETA=${KMETA} |
107 | 107 | ||
108 | bbnote "do_kernel_metadata: for summary/debug, set KCONF_AUDIT_LEVEL > 0" | ||
109 | |||
108 | # if kernel tools are available in-tree, they are preferred | 110 | # if kernel tools are available in-tree, they are preferred |
109 | # and are placed on the path before any external tools. Unless | 111 | # and are placed on the path before any external tools. Unless |
110 | # the external tools flag is set, in that case we do nothing. | 112 | # the external tools flag is set, in that case we do nothing. |
@@ -192,7 +194,7 @@ do_kernel_metadata() { | |||
192 | # SRC_URI. If they were supplied, we convert them into include directives | 194 | # SRC_URI. If they were supplied, we convert them into include directives |
193 | # for the update part of the process | 195 | # for the update part of the process |
194 | for f in ${feat_dirs}; do | 196 | for f in ${feat_dirs}; do |
195 | if [ -d "${WORKDIR}/$f/meta" ]; then | 197 | if [ -d "${WORKDIR}/$f/kernel-meta" ]; then |
196 | includes="$includes -I${WORKDIR}/$f/kernel-meta" | 198 | includes="$includes -I${WORKDIR}/$f/kernel-meta" |
197 | elif [ -d "${WORKDIR}/../oe-local-files/$f" ]; then | 199 | elif [ -d "${WORKDIR}/../oe-local-files/$f" ]; then |
198 | includes="$includes -I${WORKDIR}/../oe-local-files/$f" | 200 | includes="$includes -I${WORKDIR}/../oe-local-files/$f" |
@@ -252,6 +254,23 @@ do_kernel_metadata() { | |||
252 | bbfatal_log "Could not generate configuration queue for ${KMACHINE}." | 254 | bbfatal_log "Could not generate configuration queue for ${KMACHINE}." |
253 | fi | 255 | fi |
254 | fi | 256 | fi |
257 | |||
258 | if [ ${KCONF_AUDIT_LEVEL} -gt 0 ]; then | ||
259 | bbnote "kernel meta data summary for ${KMACHINE} (${LINUX_KERNEL_TYPE}):" | ||
260 | bbnote "======================================================================" | ||
261 | if [ -n "${KMETA_EXTERNAL_BSPS}" ]; then | ||
262 | bbnote "Non kernel-cache (external) bsp" | ||
263 | fi | ||
264 | bbnote "BSP entry point / definition: $bsp_definition" | ||
265 | if [ -n "$in_tree_defconfig" ]; then | ||
266 | bbnote "KBUILD_DEFCONFIG: ${KBUILD_DEFCONFIG}" | ||
267 | fi | ||
268 | bbnote "Fragments from SRC_URI: $sccs_from_src_uri" | ||
269 | bbnote "KERNEL_FEATURES: $KERNEL_FEATURES_FINAL" | ||
270 | bbnote "Final scc/cfg list: $sccs_defconfig $bsp_definition $sccs $KERNEL_FEATURES_FINAL" | ||
271 | fi | ||
272 | |||
273 | set -e | ||
255 | } | 274 | } |
256 | 275 | ||
257 | do_patch() { | 276 | do_patch() { |
@@ -281,6 +300,8 @@ do_patch() { | |||
281 | fi | 300 | fi |
282 | done | 301 | done |
283 | fi | 302 | fi |
303 | |||
304 | set -e | ||
284 | } | 305 | } |
285 | 306 | ||
286 | do_kernel_checkout() { | 307 | do_kernel_checkout() { |
@@ -303,6 +324,21 @@ do_kernel_checkout() { | |||
303 | fi | 324 | fi |
304 | fi | 325 | fi |
305 | cd ${S} | 326 | cd ${S} |
327 | |||
328 | # convert any remote branches to local tracking ones | ||
329 | for i in `git branch -a --no-color | grep remotes | grep -v HEAD`; do | ||
330 | b=`echo $i | cut -d' ' -f2 | sed 's%remotes/origin/%%'`; | ||
331 | git show-ref --quiet --verify -- "refs/heads/$b" | ||
332 | if [ $? -ne 0 ]; then | ||
333 | git branch $b $i > /dev/null | ||
334 | fi | ||
335 | done | ||
336 | |||
337 | # Create a working tree copy of the kernel by checking out a branch | ||
338 | machine_branch="${@ get_machine_branch(d, "${KBRANCH}" )}" | ||
339 | |||
340 | # checkout and clobber any unimportant files | ||
341 | git checkout -f ${machine_branch} | ||
306 | else | 342 | else |
307 | # case: we have no git repository at all. | 343 | # case: we have no git repository at all. |
308 | # To support low bandwidth options for building the kernel, we'll just | 344 | # To support low bandwidth options for building the kernel, we'll just |
@@ -325,20 +361,7 @@ do_kernel_checkout() { | |||
325 | git clean -d -f | 361 | git clean -d -f |
326 | fi | 362 | fi |
327 | 363 | ||
328 | # convert any remote branches to local tracking ones | 364 | set -e |
329 | for i in `git branch -a --no-color | grep remotes | grep -v HEAD`; do | ||
330 | b=`echo $i | cut -d' ' -f2 | sed 's%remotes/origin/%%'`; | ||
331 | git show-ref --quiet --verify -- "refs/heads/$b" | ||
332 | if [ $? -ne 0 ]; then | ||
333 | git branch $b $i > /dev/null | ||
334 | fi | ||
335 | done | ||
336 | |||
337 | # Create a working tree copy of the kernel by checking out a branch | ||
338 | machine_branch="${@ get_machine_branch(d, "${KBRANCH}" )}" | ||
339 | |||
340 | # checkout and clobber any unimportant files | ||
341 | git checkout -f ${machine_branch} | ||
342 | } | 365 | } |
343 | do_kernel_checkout[dirs] = "${S}" | 366 | do_kernel_checkout[dirs] = "${S}" |
344 | 367 | ||
@@ -506,6 +529,8 @@ do_validate_branches() { | |||
506 | kgit-s2q --clean | 529 | kgit-s2q --clean |
507 | fi | 530 | fi |
508 | fi | 531 | fi |
532 | |||
533 | set -e | ||
509 | } | 534 | } |
510 | 535 | ||
511 | OE_TERMINAL_EXPORTS += "KBUILD_OUTPUT" | 536 | OE_TERMINAL_EXPORTS += "KBUILD_OUTPUT" |
diff --git a/meta/classes/kernel.bbclass b/meta/classes/kernel.bbclass index 83a574efcd..ca7530095e 100644 --- a/meta/classes/kernel.bbclass +++ b/meta/classes/kernel.bbclass | |||
@@ -75,7 +75,7 @@ python __anonymous () { | |||
75 | # KERNEL_IMAGETYPES may contain a mixture of image types supported directly | 75 | # KERNEL_IMAGETYPES may contain a mixture of image types supported directly |
76 | # by the kernel build system and types which are created by post-processing | 76 | # by the kernel build system and types which are created by post-processing |
77 | # the output of the kernel build system (e.g. compressing vmlinux -> | 77 | # the output of the kernel build system (e.g. compressing vmlinux -> |
78 | # vmlinux.gz in kernel_do_compile()). | 78 | # vmlinux.gz in kernel_do_transform_kernel()). |
79 | # KERNEL_IMAGETYPE_FOR_MAKE should contain only image types supported | 79 | # KERNEL_IMAGETYPE_FOR_MAKE should contain only image types supported |
80 | # directly by the kernel build system. | 80 | # directly by the kernel build system. |
81 | if not d.getVar('KERNEL_IMAGETYPE_FOR_MAKE'): | 81 | if not d.getVar('KERNEL_IMAGETYPE_FOR_MAKE'): |
@@ -91,6 +91,8 @@ python __anonymous () { | |||
91 | imagedest = d.getVar('KERNEL_IMAGEDEST') | 91 | imagedest = d.getVar('KERNEL_IMAGEDEST') |
92 | 92 | ||
93 | for type in types.split(): | 93 | for type in types.split(): |
94 | if bb.data.inherits_class('nopackages', d): | ||
95 | continue | ||
94 | typelower = type.lower() | 96 | typelower = type.lower() |
95 | d.appendVar('PACKAGES', ' %s-image-%s' % (kname, typelower)) | 97 | d.appendVar('PACKAGES', ' %s-image-%s' % (kname, typelower)) |
96 | d.setVar('FILES_' + kname + '-image-' + typelower, '/' + imagedest + '/' + type + '-${KERNEL_VERSION_NAME}' + ' /' + imagedest + '/' + type) | 98 | d.setVar('FILES_' + kname + '-image-' + typelower, '/' + imagedest + '/' + type + '-${KERNEL_VERSION_NAME}' + ' /' + imagedest + '/' + type) |
@@ -104,6 +106,8 @@ python __anonymous () { | |||
104 | # standalone for use by wic and other tools. | 106 | # standalone for use by wic and other tools. |
105 | if image: | 107 | if image: |
106 | d.appendVarFlag('do_bundle_initramfs', 'depends', ' ${INITRAMFS_IMAGE}:do_image_complete') | 108 | d.appendVarFlag('do_bundle_initramfs', 'depends', ' ${INITRAMFS_IMAGE}:do_image_complete') |
109 | if image and bb.utils.to_boolean(d.getVar('INITRAMFS_IMAGE_BUNDLE')): | ||
110 | bb.build.addtask('do_transform_bundled_initramfs', 'do_deploy', 'do_bundle_initramfs', d) | ||
107 | 111 | ||
108 | # NOTE: setting INITRAMFS_TASK is for backward compatibility | 112 | # NOTE: setting INITRAMFS_TASK is for backward compatibility |
109 | # The preferred method is to set INITRAMFS_IMAGE, because | 113 | # The preferred method is to set INITRAMFS_IMAGE, because |
@@ -139,13 +143,14 @@ do_unpack[cleandirs] += " ${S} ${STAGING_KERNEL_DIR} ${B} ${STAGING_KERNEL_BUILD | |||
139 | do_clean[cleandirs] += " ${S} ${STAGING_KERNEL_DIR} ${B} ${STAGING_KERNEL_BUILDDIR}" | 143 | do_clean[cleandirs] += " ${S} ${STAGING_KERNEL_DIR} ${B} ${STAGING_KERNEL_BUILDDIR}" |
140 | python do_symlink_kernsrc () { | 144 | python do_symlink_kernsrc () { |
141 | s = d.getVar("S") | 145 | s = d.getVar("S") |
142 | if s[-1] == '/': | ||
143 | # drop trailing slash, so that os.symlink(kernsrc, s) doesn't use s as directory name and fail | ||
144 | s=s[:-1] | ||
145 | kernsrc = d.getVar("STAGING_KERNEL_DIR") | 146 | kernsrc = d.getVar("STAGING_KERNEL_DIR") |
146 | if s != kernsrc: | 147 | if s != kernsrc: |
147 | bb.utils.mkdirhier(kernsrc) | 148 | bb.utils.mkdirhier(kernsrc) |
148 | bb.utils.remove(kernsrc, recurse=True) | 149 | bb.utils.remove(kernsrc, recurse=True) |
150 | if s[-1] == '/': | ||
151 | # drop trailing slash, so that os.symlink(kernsrc, s) doesn't use s as | ||
152 | # directory name and fail | ||
153 | s = s[:-1] | ||
149 | if d.getVar("EXTERNALSRC"): | 154 | if d.getVar("EXTERNALSRC"): |
150 | # With EXTERNALSRC S will not be wiped so we can symlink to it | 155 | # With EXTERNALSRC S will not be wiped so we can symlink to it |
151 | os.symlink(s, kernsrc) | 156 | os.symlink(s, kernsrc) |
@@ -194,6 +199,8 @@ UBOOT_LOADADDRESS ?= "${UBOOT_ENTRYPOINT}" | |||
194 | KERNEL_EXTRA_ARGS ?= "" | 199 | KERNEL_EXTRA_ARGS ?= "" |
195 | 200 | ||
196 | EXTRA_OEMAKE = " HOSTCC="${BUILD_CC} ${BUILD_CFLAGS} ${BUILD_LDFLAGS}" HOSTCPP="${BUILD_CPP}"" | 201 | EXTRA_OEMAKE = " HOSTCC="${BUILD_CC} ${BUILD_CFLAGS} ${BUILD_LDFLAGS}" HOSTCPP="${BUILD_CPP}"" |
202 | EXTRA_OEMAKE += " HOSTCXX="${BUILD_CXX} ${BUILD_CXXFLAGS} ${BUILD_LDFLAGS}"" | ||
203 | |||
197 | KERNEL_ALT_IMAGETYPE ??= "" | 204 | KERNEL_ALT_IMAGETYPE ??= "" |
198 | 205 | ||
199 | copy_initramfs() { | 206 | copy_initramfs() { |
@@ -276,6 +283,14 @@ do_bundle_initramfs () { | |||
276 | } | 283 | } |
277 | do_bundle_initramfs[dirs] = "${B}" | 284 | do_bundle_initramfs[dirs] = "${B}" |
278 | 285 | ||
286 | kernel_do_transform_bundled_initramfs() { | ||
287 | # vmlinux.gz is not built by kernel | ||
288 | if (echo "${KERNEL_IMAGETYPES}" | grep -wq "vmlinux\.gz"); then | ||
289 | gzip -9cn < ${KERNEL_OUTPUT_DIR}/vmlinux.initramfs > ${KERNEL_OUTPUT_DIR}/vmlinux.gz.initramfs | ||
290 | fi | ||
291 | } | ||
292 | do_transform_bundled_initramfs[dirs] = "${B}" | ||
293 | |||
279 | python do_devshell_prepend () { | 294 | python do_devshell_prepend () { |
280 | os.environ["LDFLAGS"] = '' | 295 | os.environ["LDFLAGS"] = '' |
281 | } | 296 | } |
@@ -307,6 +322,10 @@ kernel_do_compile() { | |||
307 | export KBUILD_BUILD_TIMESTAMP="$ts" | 322 | export KBUILD_BUILD_TIMESTAMP="$ts" |
308 | export KCONFIG_NOTIMESTAMP=1 | 323 | export KCONFIG_NOTIMESTAMP=1 |
309 | bbnote "KBUILD_BUILD_TIMESTAMP: $ts" | 324 | bbnote "KBUILD_BUILD_TIMESTAMP: $ts" |
325 | else | ||
326 | ts=`LC_ALL=C date` | ||
327 | export KBUILD_BUILD_TIMESTAMP="$ts" | ||
328 | bbnote "KBUILD_BUILD_TIMESTAMP: $ts" | ||
310 | fi | 329 | fi |
311 | # The $use_alternate_initrd is only set from | 330 | # The $use_alternate_initrd is only set from |
312 | # do_bundle_initramfs() This variable is specifically for the | 331 | # do_bundle_initramfs() This variable is specifically for the |
@@ -325,12 +344,17 @@ kernel_do_compile() { | |||
325 | for typeformake in ${KERNEL_IMAGETYPE_FOR_MAKE} ; do | 344 | for typeformake in ${KERNEL_IMAGETYPE_FOR_MAKE} ; do |
326 | oe_runmake ${typeformake} CC="${KERNEL_CC} $cc_extra " LD="${KERNEL_LD}" ${KERNEL_EXTRA_ARGS} $use_alternate_initrd | 345 | oe_runmake ${typeformake} CC="${KERNEL_CC} $cc_extra " LD="${KERNEL_LD}" ${KERNEL_EXTRA_ARGS} $use_alternate_initrd |
327 | done | 346 | done |
347 | } | ||
348 | |||
349 | kernel_do_transform_kernel() { | ||
328 | # vmlinux.gz is not built by kernel | 350 | # vmlinux.gz is not built by kernel |
329 | if (echo "${KERNEL_IMAGETYPES}" | grep -wq "vmlinux\.gz"); then | 351 | if (echo "${KERNEL_IMAGETYPES}" | grep -wq "vmlinux\.gz"); then |
330 | mkdir -p "${KERNEL_OUTPUT_DIR}" | 352 | mkdir -p "${KERNEL_OUTPUT_DIR}" |
331 | gzip -9cn < ${B}/vmlinux > "${KERNEL_OUTPUT_DIR}/vmlinux.gz" | 353 | gzip -9cn < ${B}/vmlinux > "${KERNEL_OUTPUT_DIR}/vmlinux.gz" |
332 | fi | 354 | fi |
333 | } | 355 | } |
356 | do_transform_kernel[dirs] = "${B}" | ||
357 | addtask transform_kernel after do_compile before do_install | ||
334 | 358 | ||
335 | do_compile_kernelmodules() { | 359 | do_compile_kernelmodules() { |
336 | unset CFLAGS CPPFLAGS CXXFLAGS LDFLAGS MACHINE | 360 | unset CFLAGS CPPFLAGS CXXFLAGS LDFLAGS MACHINE |
@@ -348,6 +372,10 @@ do_compile_kernelmodules() { | |||
348 | export KBUILD_BUILD_TIMESTAMP="$ts" | 372 | export KBUILD_BUILD_TIMESTAMP="$ts" |
349 | export KCONFIG_NOTIMESTAMP=1 | 373 | export KCONFIG_NOTIMESTAMP=1 |
350 | bbnote "KBUILD_BUILD_TIMESTAMP: $ts" | 374 | bbnote "KBUILD_BUILD_TIMESTAMP: $ts" |
375 | else | ||
376 | ts=`LC_ALL=C date` | ||
377 | export KBUILD_BUILD_TIMESTAMP="$ts" | ||
378 | bbnote "KBUILD_BUILD_TIMESTAMP: $ts" | ||
351 | fi | 379 | fi |
352 | if (grep -q -i -e '^CONFIG_MODULES=y$' ${B}/.config); then | 380 | if (grep -q -i -e '^CONFIG_MODULES=y$' ${B}/.config); then |
353 | cc_extra=$(get_cc_option) | 381 | cc_extra=$(get_cc_option) |
@@ -377,8 +405,8 @@ kernel_do_install() { | |||
377 | unset CFLAGS CPPFLAGS CXXFLAGS LDFLAGS MACHINE | 405 | unset CFLAGS CPPFLAGS CXXFLAGS LDFLAGS MACHINE |
378 | if (grep -q -i -e '^CONFIG_MODULES=y$' .config); then | 406 | if (grep -q -i -e '^CONFIG_MODULES=y$' .config); then |
379 | oe_runmake DEPMOD=echo MODLIB=${D}${nonarch_base_libdir}/modules/${KERNEL_VERSION} INSTALL_FW_PATH=${D}${nonarch_base_libdir}/firmware modules_install | 407 | oe_runmake DEPMOD=echo MODLIB=${D}${nonarch_base_libdir}/modules/${KERNEL_VERSION} INSTALL_FW_PATH=${D}${nonarch_base_libdir}/firmware modules_install |
380 | rm "${D}${nonarch_base_libdir}/modules/${KERNEL_VERSION}/build" | 408 | rm -f "${D}${nonarch_base_libdir}/modules/${KERNEL_VERSION}/build" |
381 | rm "${D}${nonarch_base_libdir}/modules/${KERNEL_VERSION}/source" | 409 | rm -f "${D}${nonarch_base_libdir}/modules/${KERNEL_VERSION}/source" |
382 | # If the kernel/ directory is empty remove it to prevent QA issues | 410 | # If the kernel/ directory is empty remove it to prevent QA issues |
383 | rmdir --ignore-fail-on-non-empty "${D}${nonarch_base_libdir}/modules/${KERNEL_VERSION}/kernel" | 411 | rmdir --ignore-fail-on-non-empty "${D}${nonarch_base_libdir}/modules/${KERNEL_VERSION}/kernel" |
384 | else | 412 | else |
@@ -390,12 +418,26 @@ kernel_do_install() { | |||
390 | # | 418 | # |
391 | install -d ${D}/${KERNEL_IMAGEDEST} | 419 | install -d ${D}/${KERNEL_IMAGEDEST} |
392 | install -d ${D}/boot | 420 | install -d ${D}/boot |
421 | |||
422 | # | ||
423 | # When including an initramfs bundle inside a FIT image, the fitImage is created after the install task | ||
424 | # by do_assemble_fitimage_initramfs. | ||
425 | # This happens after the generation of the initramfs bundle (done by do_bundle_initramfs). | ||
426 | # So, at the level of the install task we should not try to install the fitImage. fitImage is still not | ||
427 | # generated yet. | ||
428 | # After the generation of the fitImage, the deploy task copies the fitImage from the build directory to | ||
429 | # the deploy folder. | ||
430 | # | ||
431 | |||
393 | for imageType in ${KERNEL_IMAGETYPES} ; do | 432 | for imageType in ${KERNEL_IMAGETYPES} ; do |
394 | install -m 0644 ${KERNEL_OUTPUT_DIR}/${imageType} ${D}/${KERNEL_IMAGEDEST}/${imageType}-${KERNEL_VERSION} | 433 | if [ $imageType != "fitImage" ] || [ "${INITRAMFS_IMAGE_BUNDLE}" != "1" ] ; then |
395 | if [ "${KERNEL_PACKAGE_NAME}" = "kernel" ]; then | 434 | install -m 0644 ${KERNEL_OUTPUT_DIR}/${imageType} ${D}/${KERNEL_IMAGEDEST}/${imageType}-${KERNEL_VERSION} |
396 | ln -sf ${imageType}-${KERNEL_VERSION} ${D}/${KERNEL_IMAGEDEST}/${imageType} | 435 | if [ "${KERNEL_PACKAGE_NAME}" = "kernel" ]; then |
436 | ln -sf ${imageType}-${KERNEL_VERSION} ${D}/${KERNEL_IMAGEDEST}/${imageType} | ||
437 | fi | ||
397 | fi | 438 | fi |
398 | done | 439 | done |
440 | |||
399 | install -m 0644 System.map ${D}/boot/System.map-${KERNEL_VERSION} | 441 | install -m 0644 System.map ${D}/boot/System.map-${KERNEL_VERSION} |
400 | install -m 0644 .config ${D}/boot/config-${KERNEL_VERSION} | 442 | install -m 0644 .config ${D}/boot/config-${KERNEL_VERSION} |
401 | install -m 0644 vmlinux ${D}/boot/vmlinux-${KERNEL_VERSION} | 443 | install -m 0644 vmlinux ${D}/boot/vmlinux-${KERNEL_VERSION} |
@@ -403,7 +445,6 @@ kernel_do_install() { | |||
403 | install -d ${D}${sysconfdir}/modules-load.d | 445 | install -d ${D}${sysconfdir}/modules-load.d |
404 | install -d ${D}${sysconfdir}/modprobe.d | 446 | install -d ${D}${sysconfdir}/modprobe.d |
405 | } | 447 | } |
406 | do_install[prefuncs] += "package_get_auto_pr" | ||
407 | 448 | ||
408 | # Must be ran no earlier than after do_kernel_checkout or else Makefile won't be in ${S}/Makefile | 449 | # Must be ran no earlier than after do_kernel_checkout or else Makefile won't be in ${S}/Makefile |
409 | do_kernel_version_sanity_check() { | 450 | do_kernel_version_sanity_check() { |
@@ -569,11 +610,11 @@ do_savedefconfig() { | |||
569 | do_savedefconfig[nostamp] = "1" | 610 | do_savedefconfig[nostamp] = "1" |
570 | addtask savedefconfig after do_configure | 611 | addtask savedefconfig after do_configure |
571 | 612 | ||
572 | inherit cml1 | 613 | inherit cml1 pkgconfig |
573 | 614 | ||
574 | KCONFIG_CONFIG_COMMAND_append = " LD='${KERNEL_LD}' HOSTLDFLAGS='${BUILD_LDFLAGS}'" | 615 | KCONFIG_CONFIG_COMMAND_append = " LD='${KERNEL_LD}' HOSTLDFLAGS='${BUILD_LDFLAGS}'" |
575 | 616 | ||
576 | EXPORT_FUNCTIONS do_compile do_install do_configure | 617 | EXPORT_FUNCTIONS do_compile do_transform_kernel do_transform_bundled_initramfs do_install do_configure |
577 | 618 | ||
578 | # kernel-base becomes kernel-${KERNEL_VERSION} | 619 | # kernel-base becomes kernel-${KERNEL_VERSION} |
579 | # kernel-image becomes kernel-image-${KERNEL_VERSION} | 620 | # kernel-image becomes kernel-image-${KERNEL_VERSION} |
@@ -679,7 +720,7 @@ do_sizecheck() { | |||
679 | at_least_one_fits= | 720 | at_least_one_fits= |
680 | for imageType in ${KERNEL_IMAGETYPES} ; do | 721 | for imageType in ${KERNEL_IMAGETYPES} ; do |
681 | size=`du -ks ${B}/${KERNEL_OUTPUT_DIR}/$imageType | awk '{print $1}'` | 722 | size=`du -ks ${B}/${KERNEL_OUTPUT_DIR}/$imageType | awk '{print $1}'` |
682 | if [ $size -ge ${KERNEL_IMAGE_MAXSIZE} ]; then | 723 | if [ $size -gt ${KERNEL_IMAGE_MAXSIZE} ]; then |
683 | bbwarn "This kernel $imageType (size=$size(K) > ${KERNEL_IMAGE_MAXSIZE}(K)) is too big for your device." | 724 | bbwarn "This kernel $imageType (size=$size(K) > ${KERNEL_IMAGE_MAXSIZE}(K)) is too big for your device." |
684 | else | 725 | else |
685 | at_least_one_fits=y | 726 | at_least_one_fits=y |
@@ -718,7 +759,7 @@ kernel_do_deploy() { | |||
718 | fi | 759 | fi |
719 | 760 | ||
720 | if [ ! -z "${INITRAMFS_IMAGE}" -a x"${INITRAMFS_IMAGE_BUNDLE}" = x1 ]; then | 761 | if [ ! -z "${INITRAMFS_IMAGE}" -a x"${INITRAMFS_IMAGE_BUNDLE}" = x1 ]; then |
721 | for imageType in ${KERNEL_IMAGETYPE_FOR_MAKE} ; do | 762 | for imageType in ${KERNEL_IMAGETYPES} ; do |
722 | if [ "$imageType" = "fitImage" ] ; then | 763 | if [ "$imageType" = "fitImage" ] ; then |
723 | continue | 764 | continue |
724 | fi | 765 | fi |
diff --git a/meta/classes/libc-package.bbclass b/meta/classes/libc-package.bbclass index de3b4250c7..72f489d673 100644 --- a/meta/classes/libc-package.bbclass +++ b/meta/classes/libc-package.bbclass | |||
@@ -45,6 +45,7 @@ PACKAGE_NO_GCONV ?= "0" | |||
45 | OVERRIDES_append = ":${TARGET_ARCH}-${TARGET_OS}" | 45 | OVERRIDES_append = ":${TARGET_ARCH}-${TARGET_OS}" |
46 | 46 | ||
47 | locale_base_postinst_ontarget() { | 47 | locale_base_postinst_ontarget() { |
48 | mkdir ${libdir}/locale | ||
48 | localedef --inputfile=${datadir}/i18n/locales/%s --charmap=%s %s | 49 | localedef --inputfile=${datadir}/i18n/locales/%s --charmap=%s %s |
49 | } | 50 | } |
50 | 51 | ||
@@ -355,7 +356,7 @@ python package_do_split_gconvs () { | |||
355 | m.write("\t@echo 'Progress %d/%d'\n" % (i, total)) | 356 | m.write("\t@echo 'Progress %d/%d'\n" % (i, total)) |
356 | m.write("\t" + makerecipe + "\n\n") | 357 | m.write("\t" + makerecipe + "\n\n") |
357 | d.setVar("EXTRA_OEMAKE", "-C %s ${PARALLEL_MAKE}" % (os.path.dirname(makefile))) | 358 | d.setVar("EXTRA_OEMAKE", "-C %s ${PARALLEL_MAKE}" % (os.path.dirname(makefile))) |
358 | d.setVarFlag("oe_runmake", "progress", "outof:Progress\s(\d+)/(\d+)") | 359 | d.setVarFlag("oe_runmake", "progress", r"outof:Progress\s(\d+)/(\d+)") |
359 | bb.note("Executing binary locale generation makefile") | 360 | bb.note("Executing binary locale generation makefile") |
360 | bb.build.exec_func("oe_runmake", d) | 361 | bb.build.exec_func("oe_runmake", d) |
361 | bb.note("collecting binary locales from locale tree") | 362 | bb.note("collecting binary locales from locale tree") |
diff --git a/meta/classes/license.bbclass b/meta/classes/license.bbclass index dc91118340..806b5069fd 100644 --- a/meta/classes/license.bbclass +++ b/meta/classes/license.bbclass | |||
@@ -31,8 +31,8 @@ python do_populate_lic() { | |||
31 | f.write("%s: %s\n" % (key, info[key])) | 31 | f.write("%s: %s\n" % (key, info[key])) |
32 | } | 32 | } |
33 | 33 | ||
34 | PSEUDO_IGNORE_PATHS .= ",${@','.join(((d.getVar('COMMON_LICENSE_DIR') or '') + ' ' + (d.getVar('LICENSE_PATH') or '')).split())}" | 34 | PSEUDO_IGNORE_PATHS .= ",${@','.join(((d.getVar('COMMON_LICENSE_DIR') or '') + ' ' + (d.getVar('LICENSE_PATH') or '') + ' ' + d.getVar('COREBASE') + '/meta/COPYING').split())}" |
35 | # it would be better to copy them in do_install_append, but find_license_filesa is python | 35 | # it would be better to copy them in do_install:append, but find_license_filesa is python |
36 | python perform_packagecopy_prepend () { | 36 | python perform_packagecopy_prepend () { |
37 | enabled = oe.data.typed_value('LICENSE_CREATE_PACKAGE', d) | 37 | enabled = oe.data.typed_value('LICENSE_CREATE_PACKAGE', d) |
38 | if d.getVar('CLASSOVERRIDE') == 'class-target' and enabled: | 38 | if d.getVar('CLASSOVERRIDE') == 'class-target' and enabled: |
@@ -91,17 +91,17 @@ def copy_license_files(lic_files_paths, destdir): | |||
91 | os.link(src, dst) | 91 | os.link(src, dst) |
92 | except OSError as err: | 92 | except OSError as err: |
93 | if err.errno == errno.EXDEV: | 93 | if err.errno == errno.EXDEV: |
94 | # Copy license files if hard-link is not possible even if st_dev is the | 94 | # Copy license files if hardlink is not possible even if st_dev is the |
95 | # same on source and destination (docker container with device-mapper?) | 95 | # same on source and destination (docker container with device-mapper?) |
96 | canlink = False | 96 | canlink = False |
97 | else: | 97 | else: |
98 | raise | 98 | raise |
99 | # Only chown if we did hardling, and, we're running under pseudo | 99 | # Only chown if we did hardlink and we're running under pseudo |
100 | if canlink and os.environ.get('PSEUDO_DISABLED') == '0': | 100 | if canlink and os.environ.get('PSEUDO_DISABLED') == '0': |
101 | os.chown(dst,0,0) | 101 | os.chown(dst,0,0) |
102 | if not canlink: | 102 | if not canlink: |
103 | begin_idx = int(beginline)-1 if beginline is not None else None | 103 | begin_idx = max(0, int(beginline) - 1) if beginline is not None else None |
104 | end_idx = int(endline) if endline is not None else None | 104 | end_idx = max(0, int(endline)) if endline is not None else None |
105 | if begin_idx is None and end_idx is None: | 105 | if begin_idx is None and end_idx is None: |
106 | shutil.copyfile(src, dst) | 106 | shutil.copyfile(src, dst) |
107 | else: | 107 | else: |
@@ -153,6 +153,10 @@ def find_license_files(d): | |||
153 | find_license(node.s.replace("+", "").replace("*", "")) | 153 | find_license(node.s.replace("+", "").replace("*", "")) |
154 | self.generic_visit(node) | 154 | self.generic_visit(node) |
155 | 155 | ||
156 | def visit_Constant(self, node): | ||
157 | find_license(node.value.replace("+", "").replace("*", "")) | ||
158 | self.generic_visit(node) | ||
159 | |||
156 | def find_license(license_type): | 160 | def find_license(license_type): |
157 | try: | 161 | try: |
158 | bb.utils.mkdirhier(gen_lic_dest) | 162 | bb.utils.mkdirhier(gen_lic_dest) |
diff --git a/meta/classes/license_image.bbclass b/meta/classes/license_image.bbclass index a69cc5f065..325b3cbba7 100644 --- a/meta/classes/license_image.bbclass +++ b/meta/classes/license_image.bbclass | |||
@@ -1,3 +1,5 @@ | |||
1 | ROOTFS_LICENSE_DIR = "${IMAGE_ROOTFS}/usr/share/common-licenses" | ||
2 | |||
1 | python write_package_manifest() { | 3 | python write_package_manifest() { |
2 | # Get list of installed packages | 4 | # Get list of installed packages |
3 | license_image_dir = d.expand('${LICENSE_DIRECTORY}/${IMAGE_NAME}') | 5 | license_image_dir = d.expand('${LICENSE_DIRECTORY}/${IMAGE_NAME}') |
@@ -7,8 +9,8 @@ python write_package_manifest() { | |||
7 | 9 | ||
8 | pkgs = image_list_installed_packages(d) | 10 | pkgs = image_list_installed_packages(d) |
9 | output = format_pkg_list(pkgs) | 11 | output = format_pkg_list(pkgs) |
10 | open(os.path.join(license_image_dir, 'package.manifest'), | 12 | with open(os.path.join(license_image_dir, 'package.manifest'), "w+") as package_manifest: |
11 | 'w+').write(output) | 13 | package_manifest.write(output) |
12 | } | 14 | } |
13 | 15 | ||
14 | python license_create_manifest() { | 16 | python license_create_manifest() { |
@@ -105,8 +107,7 @@ def write_license_files(d, license_manifest, pkg_dic, rootfs=True): | |||
105 | copy_lic_manifest = d.getVar('COPY_LIC_MANIFEST') | 107 | copy_lic_manifest = d.getVar('COPY_LIC_MANIFEST') |
106 | copy_lic_dirs = d.getVar('COPY_LIC_DIRS') | 108 | copy_lic_dirs = d.getVar('COPY_LIC_DIRS') |
107 | if rootfs and copy_lic_manifest == "1": | 109 | if rootfs and copy_lic_manifest == "1": |
108 | rootfs_license_dir = os.path.join(d.getVar('IMAGE_ROOTFS'), | 110 | rootfs_license_dir = d.getVar('ROOTFS_LICENSE_DIR') |
109 | 'usr', 'share', 'common-licenses') | ||
110 | bb.utils.mkdirhier(rootfs_license_dir) | 111 | bb.utils.mkdirhier(rootfs_license_dir) |
111 | rootfs_license_manifest = os.path.join(rootfs_license_dir, | 112 | rootfs_license_manifest = os.path.join(rootfs_license_dir, |
112 | os.path.split(license_manifest)[1]) | 113 | os.path.split(license_manifest)[1]) |
@@ -144,12 +145,13 @@ def write_license_files(d, license_manifest, pkg_dic, rootfs=True): | |||
144 | continue | 145 | continue |
145 | 146 | ||
146 | # Make sure we use only canonical name for the license file | 147 | # Make sure we use only canonical name for the license file |
147 | rootfs_license = os.path.join(rootfs_license_dir, "generic_%s" % generic_lic) | 148 | generic_lic_file = "generic_%s" % generic_lic |
149 | rootfs_license = os.path.join(rootfs_license_dir, generic_lic_file) | ||
148 | if not os.path.exists(rootfs_license): | 150 | if not os.path.exists(rootfs_license): |
149 | oe.path.copyhardlink(pkg_license, rootfs_license) | 151 | oe.path.copyhardlink(pkg_license, rootfs_license) |
150 | 152 | ||
151 | if not os.path.exists(pkg_rootfs_license): | 153 | if not os.path.exists(pkg_rootfs_license): |
152 | os.symlink(os.path.join('..', lic), pkg_rootfs_license) | 154 | os.symlink(os.path.join('..', generic_lic_file), pkg_rootfs_license) |
153 | else: | 155 | else: |
154 | if (oe.license.license_ok(canonical_license(d, | 156 | if (oe.license.license_ok(canonical_license(d, |
155 | lic), bad_licenses) == False or | 157 | lic), bad_licenses) == False or |
@@ -209,7 +211,7 @@ def get_deployed_dependencies(d): | |||
209 | deploy = {} | 211 | deploy = {} |
210 | # Get all the dependencies for the current task (rootfs). | 212 | # Get all the dependencies for the current task (rootfs). |
211 | taskdata = d.getVar("BB_TASKDEPDATA", False) | 213 | taskdata = d.getVar("BB_TASKDEPDATA", False) |
212 | pn = d.getVar("PN", True) | 214 | pn = d.getVar("PN") |
213 | depends = list(set([dep[0] for dep | 215 | depends = list(set([dep[0] for dep |
214 | in list(taskdata.values()) | 216 | in list(taskdata.values()) |
215 | if not dep[0].endswith("-native") and not dep[0] == pn])) | 217 | if not dep[0].endswith("-native") and not dep[0] == pn])) |
@@ -256,3 +258,13 @@ python do_populate_lic_deploy() { | |||
256 | addtask populate_lic_deploy before do_build after do_image_complete | 258 | addtask populate_lic_deploy before do_build after do_image_complete |
257 | do_populate_lic_deploy[recrdeptask] += "do_populate_lic do_deploy" | 259 | do_populate_lic_deploy[recrdeptask] += "do_populate_lic do_deploy" |
258 | 260 | ||
261 | python license_qa_dead_symlink() { | ||
262 | import os | ||
263 | |||
264 | for root, dirs, files in os.walk(d.getVar('ROOTFS_LICENSE_DIR')): | ||
265 | for file in files: | ||
266 | full_path = root + "/" + file | ||
267 | if os.path.islink(full_path) and not os.path.exists(full_path): | ||
268 | bb.error("broken symlink: " + full_path) | ||
269 | } | ||
270 | IMAGE_QA_COMMANDS += "license_qa_dead_symlink" | ||
diff --git a/meta/classes/linux-dummy.bbclass b/meta/classes/linux-dummy.bbclass new file mode 100644 index 0000000000..cd8791557d --- /dev/null +++ b/meta/classes/linux-dummy.bbclass | |||
@@ -0,0 +1,26 @@ | |||
1 | |||
2 | python __anonymous () { | ||
3 | if d.getVar('PREFERRED_PROVIDER_virtual/kernel') == 'linux-dummy': | ||
4 | # copy part codes from kernel.bbclass | ||
5 | kname = d.getVar('KERNEL_PACKAGE_NAME') or "kernel" | ||
6 | |||
7 | # set an empty package of kernel-devicetree | ||
8 | d.appendVar('PACKAGES', ' %s-devicetree' % kname) | ||
9 | d.setVar('ALLOW_EMPTY_%s-devicetree' % kname, '1') | ||
10 | |||
11 | # Merge KERNEL_IMAGETYPE and KERNEL_ALT_IMAGETYPE into KERNEL_IMAGETYPES | ||
12 | type = d.getVar('KERNEL_IMAGETYPE') or "" | ||
13 | alttype = d.getVar('KERNEL_ALT_IMAGETYPE') or "" | ||
14 | types = d.getVar('KERNEL_IMAGETYPES') or "" | ||
15 | if type not in types.split(): | ||
16 | types = (type + ' ' + types).strip() | ||
17 | if alttype not in types.split(): | ||
18 | types = (alttype + ' ' + types).strip() | ||
19 | |||
20 | # set empty packages of kernel-image-* | ||
21 | for type in types.split(): | ||
22 | typelower = type.lower() | ||
23 | d.appendVar('PACKAGES', ' %s-image-%s' % (kname, typelower)) | ||
24 | d.setVar('ALLOW_EMPTY_%s-image-%s' % (kname, typelower), '1') | ||
25 | } | ||
26 | |||
diff --git a/meta/classes/metadata_scm.bbclass b/meta/classes/metadata_scm.bbclass index 2608a7ef7b..47cb969b8d 100644 --- a/meta/classes/metadata_scm.bbclass +++ b/meta/classes/metadata_scm.bbclass | |||
@@ -1,8 +1,3 @@ | |||
1 | METADATA_BRANCH ?= "${@base_detect_branch(d)}" | ||
2 | METADATA_BRANCH[vardepvalue] = "${METADATA_BRANCH}" | ||
3 | METADATA_REVISION ?= "${@base_detect_revision(d)}" | ||
4 | METADATA_REVISION[vardepvalue] = "${METADATA_REVISION}" | ||
5 | |||
6 | def base_detect_revision(d): | 1 | def base_detect_revision(d): |
7 | path = base_get_scmbasepath(d) | 2 | path = base_get_scmbasepath(d) |
8 | return base_get_metadata_git_revision(path, d) | 3 | return base_get_metadata_git_revision(path, d) |
@@ -42,3 +37,8 @@ def base_get_metadata_git_revision(path, d): | |||
42 | except bb.process.ExecutionError: | 37 | except bb.process.ExecutionError: |
43 | rev = '<unknown>' | 38 | rev = '<unknown>' |
44 | return rev.strip() | 39 | return rev.strip() |
40 | |||
41 | METADATA_BRANCH := "${@base_detect_branch(d)}" | ||
42 | METADATA_BRANCH[vardepvalue] = "${METADATA_BRANCH}" | ||
43 | METADATA_REVISION := "${@base_detect_revision(d)}" | ||
44 | METADATA_REVISION[vardepvalue] = "${METADATA_REVISION}" | ||
diff --git a/meta/classes/mirrors.bbclass b/meta/classes/mirrors.bbclass index 87bba41472..669d0cc8ff 100644 --- a/meta/classes/mirrors.bbclass +++ b/meta/classes/mirrors.bbclass | |||
@@ -29,7 +29,6 @@ ftp://dante.ctan.org/tex-archive ftp://ftp.fu-berlin.de/tex/CTAN \n \ | |||
29 | ftp://dante.ctan.org/tex-archive http://sunsite.sut.ac.jp/pub/archives/ctan/ \n \ | 29 | ftp://dante.ctan.org/tex-archive http://sunsite.sut.ac.jp/pub/archives/ctan/ \n \ |
30 | ftp://dante.ctan.org/tex-archive http://ctan.unsw.edu.au/ \n \ | 30 | ftp://dante.ctan.org/tex-archive http://ctan.unsw.edu.au/ \n \ |
31 | ftp://ftp.gnutls.org/gcrypt/gnutls ${GNUPG_MIRROR}/gnutls \n \ | 31 | ftp://ftp.gnutls.org/gcrypt/gnutls ${GNUPG_MIRROR}/gnutls \n \ |
32 | http://ftp.info-zip.org/pub/infozip/src/ http://mirror.switch.ch/ftp/mirror/infozip/src/ \n \ | ||
33 | http://ftp.info-zip.org/pub/infozip/src/ ftp://sunsite.icm.edu.pl/pub/unix/archiving/info-zip/src/ \n \ | 32 | http://ftp.info-zip.org/pub/infozip/src/ ftp://sunsite.icm.edu.pl/pub/unix/archiving/info-zip/src/ \n \ |
34 | http://www.mirrorservice.org/sites/lsof.itap.purdue.edu/pub/tools/unix/lsof/ http://www.mirrorservice.org/sites/lsof.itap.purdue.edu/pub/tools/unix/lsof/OLD/ \n \ | 33 | http://www.mirrorservice.org/sites/lsof.itap.purdue.edu/pub/tools/unix/lsof/ http://www.mirrorservice.org/sites/lsof.itap.purdue.edu/pub/tools/unix/lsof/OLD/ \n \ |
35 | ${APACHE_MIRROR} http://www.us.apache.org/dist \n \ | 34 | ${APACHE_MIRROR} http://www.us.apache.org/dist \n \ |
@@ -43,6 +42,7 @@ ftp://sourceware.org/pub http://ftp.gwdg.de/pub/linux/sources.redhat.com/sourcew | |||
43 | cvs://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \n \ | 42 | cvs://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \n \ |
44 | svn://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \n \ | 43 | svn://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \n \ |
45 | git://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \n \ | 44 | git://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \n \ |
45 | gitsm://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \n \ | ||
46 | hg://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \n \ | 46 | hg://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \n \ |
47 | bzr://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \n \ | 47 | bzr://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \n \ |
48 | p4://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \n \ | 48 | p4://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \n \ |
@@ -53,6 +53,7 @@ npm://.*/?.* http://downloads.yoctoproject.org/mirror/sources/ \n \ | |||
53 | cvs://.*/.* http://sources.openembedded.org/ \n \ | 53 | cvs://.*/.* http://sources.openembedded.org/ \n \ |
54 | svn://.*/.* http://sources.openembedded.org/ \n \ | 54 | svn://.*/.* http://sources.openembedded.org/ \n \ |
55 | git://.*/.* http://sources.openembedded.org/ \n \ | 55 | git://.*/.* http://sources.openembedded.org/ \n \ |
56 | gitsm://.*/.* http://sources.openembedded.org/ \n \ | ||
56 | hg://.*/.* http://sources.openembedded.org/ \n \ | 57 | hg://.*/.* http://sources.openembedded.org/ \n \ |
57 | bzr://.*/.* http://sources.openembedded.org/ \n \ | 58 | bzr://.*/.* http://sources.openembedded.org/ \n \ |
58 | p4://.*/.* http://sources.openembedded.org/ \n \ | 59 | p4://.*/.* http://sources.openembedded.org/ \n \ |
@@ -62,6 +63,8 @@ ftp://.*/.* http://sources.openembedded.org/ \n \ | |||
62 | npm://.*/?.* http://sources.openembedded.org/ \n \ | 63 | npm://.*/?.* http://sources.openembedded.org/ \n \ |
63 | ${CPAN_MIRROR} http://cpan.metacpan.org/ \n \ | 64 | ${CPAN_MIRROR} http://cpan.metacpan.org/ \n \ |
64 | ${CPAN_MIRROR} http://search.cpan.org/CPAN/ \n \ | 65 | ${CPAN_MIRROR} http://search.cpan.org/CPAN/ \n \ |
66 | https?$://downloads.yoctoproject.org/releases/uninative/ https://mirrors.kernel.org/yocto/uninative/ \n \ | ||
67 | https?$://downloads.yoctoproject.org/mirror/sources/ https://mirrors.kernel.org/yocto-sources/ \n \ | ||
65 | " | 68 | " |
66 | 69 | ||
67 | # Use MIRRORS to provide git repo fallbacks using the https protocol, for cases | 70 | # Use MIRRORS to provide git repo fallbacks using the https protocol, for cases |
diff --git a/meta/classes/multilib.bbclass b/meta/classes/multilib.bbclass index ee677da1e2..b5c59ac593 100644 --- a/meta/classes/multilib.bbclass +++ b/meta/classes/multilib.bbclass | |||
@@ -45,6 +45,7 @@ python multilib_virtclass_handler () { | |||
45 | e.data.setVar("RECIPE_SYSROOT", "${WORKDIR}/recipe-sysroot") | 45 | e.data.setVar("RECIPE_SYSROOT", "${WORKDIR}/recipe-sysroot") |
46 | e.data.setVar("STAGING_DIR_TARGET", "${WORKDIR}/recipe-sysroot") | 46 | e.data.setVar("STAGING_DIR_TARGET", "${WORKDIR}/recipe-sysroot") |
47 | e.data.setVar("STAGING_DIR_HOST", "${WORKDIR}/recipe-sysroot") | 47 | e.data.setVar("STAGING_DIR_HOST", "${WORKDIR}/recipe-sysroot") |
48 | e.data.setVar("RECIPE_SYSROOT_MANIFEST_SUBDIR", "nativesdk-" + variant) | ||
48 | e.data.setVar("MLPREFIX", variant + "-") | 49 | e.data.setVar("MLPREFIX", variant + "-") |
49 | override = ":virtclass-multilib-" + variant | 50 | override = ":virtclass-multilib-" + variant |
50 | e.data.setVar("OVERRIDES", e.data.getVar("OVERRIDES", False) + override) | 51 | e.data.setVar("OVERRIDES", e.data.getVar("OVERRIDES", False) + override) |
@@ -106,7 +107,6 @@ python __anonymous () { | |||
106 | d.setVar("LINGUAS_INSTALL", "") | 107 | d.setVar("LINGUAS_INSTALL", "") |
107 | # FIXME, we need to map this to something, not delete it! | 108 | # FIXME, we need to map this to something, not delete it! |
108 | d.setVar("PACKAGE_INSTALL_ATTEMPTONLY", "") | 109 | d.setVar("PACKAGE_INSTALL_ATTEMPTONLY", "") |
109 | bb.build.deltask('do_populate_sdk', d) | ||
110 | bb.build.deltask('do_populate_sdk_ext', d) | 110 | bb.build.deltask('do_populate_sdk_ext', d) |
111 | return | 111 | return |
112 | 112 | ||
diff --git a/meta/classes/nativesdk.bbclass b/meta/classes/nativesdk.bbclass index 7f2692c51a..dc5a9756b6 100644 --- a/meta/classes/nativesdk.bbclass +++ b/meta/classes/nativesdk.bbclass | |||
@@ -113,3 +113,5 @@ do_packagedata[stamp-extra-info] = "" | |||
113 | USE_NLS = "${SDKUSE_NLS}" | 113 | USE_NLS = "${SDKUSE_NLS}" |
114 | 114 | ||
115 | OLDEST_KERNEL = "${SDK_OLDEST_KERNEL}" | 115 | OLDEST_KERNEL = "${SDK_OLDEST_KERNEL}" |
116 | |||
117 | PATH_prepend = "${COREBASE}/scripts/nativesdk-intercept:" | ||
diff --git a/meta/classes/package.bbclass b/meta/classes/package.bbclass index 15bff9c778..49d30caef7 100644 --- a/meta/classes/package.bbclass +++ b/meta/classes/package.bbclass | |||
@@ -1140,6 +1140,14 @@ python split_and_strip_files () { | |||
1140 | # Modified the file so clear the cache | 1140 | # Modified the file so clear the cache |
1141 | cpath.updatecache(file) | 1141 | cpath.updatecache(file) |
1142 | 1142 | ||
1143 | def strip_pkgd_prefix(f): | ||
1144 | nonlocal dvar | ||
1145 | |||
1146 | if f.startswith(dvar): | ||
1147 | return f[len(dvar):] | ||
1148 | |||
1149 | return f | ||
1150 | |||
1143 | # | 1151 | # |
1144 | # First lets process debug splitting | 1152 | # First lets process debug splitting |
1145 | # | 1153 | # |
@@ -1153,6 +1161,8 @@ python split_and_strip_files () { | |||
1153 | for file in staticlibs: | 1161 | for file in staticlibs: |
1154 | results.append( (file,source_info(file, d)) ) | 1162 | results.append( (file,source_info(file, d)) ) |
1155 | 1163 | ||
1164 | d.setVar("PKGDEBUGSOURCES", {strip_pkgd_prefix(f): sorted(s) for f, s in results}) | ||
1165 | |||
1156 | sources = set() | 1166 | sources = set() |
1157 | for r in results: | 1167 | for r in results: |
1158 | sources.update(r[1]) | 1168 | sources.update(r[1]) |
@@ -1460,6 +1470,7 @@ PKGDATA_VARS = "PN PE PV PR PKGE PKGV PKGR LICENSE DESCRIPTION SUMMARY RDEPENDS | |||
1460 | python emit_pkgdata() { | 1470 | python emit_pkgdata() { |
1461 | from glob import glob | 1471 | from glob import glob |
1462 | import json | 1472 | import json |
1473 | import gzip | ||
1463 | 1474 | ||
1464 | def process_postinst_on_target(pkg, mlprefix): | 1475 | def process_postinst_on_target(pkg, mlprefix): |
1465 | pkgval = d.getVar('PKG_%s' % pkg) | 1476 | pkgval = d.getVar('PKG_%s' % pkg) |
@@ -1532,6 +1543,8 @@ fi | |||
1532 | with open(data_file, 'w') as fd: | 1543 | with open(data_file, 'w') as fd: |
1533 | fd.write("PACKAGES: %s\n" % packages) | 1544 | fd.write("PACKAGES: %s\n" % packages) |
1534 | 1545 | ||
1546 | pkgdebugsource = d.getVar("PKGDEBUGSOURCES") or [] | ||
1547 | |||
1535 | pn = d.getVar('PN') | 1548 | pn = d.getVar('PN') |
1536 | global_variants = (d.getVar('MULTILIB_GLOBAL_VARIANTS') or "").split() | 1549 | global_variants = (d.getVar('MULTILIB_GLOBAL_VARIANTS') or "").split() |
1537 | variants = (d.getVar('MULTILIB_VARIANTS') or "").split() | 1550 | variants = (d.getVar('MULTILIB_VARIANTS') or "").split() |
@@ -1551,17 +1564,32 @@ fi | |||
1551 | pkgval = pkg | 1564 | pkgval = pkg |
1552 | d.setVar('PKG_%s' % pkg, pkg) | 1565 | d.setVar('PKG_%s' % pkg, pkg) |
1553 | 1566 | ||
1567 | extended_data = { | ||
1568 | "files_info": {} | ||
1569 | } | ||
1570 | |||
1554 | pkgdestpkg = os.path.join(pkgdest, pkg) | 1571 | pkgdestpkg = os.path.join(pkgdest, pkg) |
1555 | files = {} | 1572 | files = {} |
1573 | files_extra = {} | ||
1556 | total_size = 0 | 1574 | total_size = 0 |
1557 | seen = set() | 1575 | seen = set() |
1558 | for f in pkgfiles[pkg]: | 1576 | for f in pkgfiles[pkg]: |
1559 | relpth = os.path.relpath(f, pkgdestpkg) | 1577 | fpath = os.sep + os.path.relpath(f, pkgdestpkg) |
1578 | |||
1560 | fstat = os.lstat(f) | 1579 | fstat = os.lstat(f) |
1561 | files[os.sep + relpth] = fstat.st_size | 1580 | files[fpath] = fstat.st_size |
1581 | |||
1582 | extended_data["files_info"].setdefault(fpath, {}) | ||
1583 | extended_data["files_info"][fpath]['size'] = fstat.st_size | ||
1584 | |||
1562 | if fstat.st_ino not in seen: | 1585 | if fstat.st_ino not in seen: |
1563 | seen.add(fstat.st_ino) | 1586 | seen.add(fstat.st_ino) |
1564 | total_size += fstat.st_size | 1587 | total_size += fstat.st_size |
1588 | |||
1589 | if fpath in pkgdebugsource: | ||
1590 | extended_data["files_info"][fpath]['debugsrc'] = pkgdebugsource[fpath] | ||
1591 | del pkgdebugsource[fpath] | ||
1592 | |||
1565 | d.setVar('FILES_INFO', json.dumps(files, sort_keys=True)) | 1593 | d.setVar('FILES_INFO', json.dumps(files, sort_keys=True)) |
1566 | 1594 | ||
1567 | process_postinst_on_target(pkg, d.getVar("MLPREFIX")) | 1595 | process_postinst_on_target(pkg, d.getVar("MLPREFIX")) |
@@ -1582,6 +1610,10 @@ fi | |||
1582 | 1610 | ||
1583 | sf.write('%s_%s: %d\n' % ('PKGSIZE', pkg, total_size)) | 1611 | sf.write('%s_%s: %d\n' % ('PKGSIZE', pkg, total_size)) |
1584 | 1612 | ||
1613 | subdata_extended_file = pkgdatadir + "/extended/%s.json.gz" % pkg | ||
1614 | with gzip.open(subdata_extended_file, "wt", encoding="utf-8") as f: | ||
1615 | json.dump(extended_data, f, sort_keys=True, separators=(",", ":")) | ||
1616 | |||
1585 | # Symlinks needed for rprovides lookup | 1617 | # Symlinks needed for rprovides lookup |
1586 | rprov = d.getVar('RPROVIDES_%s' % pkg) or d.getVar('RPROVIDES') | 1618 | rprov = d.getVar('RPROVIDES_%s' % pkg) or d.getVar('RPROVIDES') |
1587 | if rprov: | 1619 | if rprov: |
@@ -1612,7 +1644,8 @@ fi | |||
1612 | write_extra_runtime_pkgs(global_variants, packages, pkgdatadir) | 1644 | write_extra_runtime_pkgs(global_variants, packages, pkgdatadir) |
1613 | 1645 | ||
1614 | } | 1646 | } |
1615 | emit_pkgdata[dirs] = "${PKGDESTWORK}/runtime ${PKGDESTWORK}/runtime-reverse ${PKGDESTWORK}/runtime-rprovides" | 1647 | emit_pkgdata[dirs] = "${PKGDESTWORK}/runtime ${PKGDESTWORK}/runtime-reverse ${PKGDESTWORK}/runtime-rprovides ${PKGDESTWORK}/extended" |
1648 | emit_pkgdata[vardepsexclude] = "BB_NUMBER_THREADS" | ||
1616 | 1649 | ||
1617 | ldconfig_postinst_fragment() { | 1650 | ldconfig_postinst_fragment() { |
1618 | if [ x"$D" = "x" ]; then | 1651 | if [ x"$D" = "x" ]; then |
@@ -1620,7 +1653,7 @@ if [ x"$D" = "x" ]; then | |||
1620 | fi | 1653 | fi |
1621 | } | 1654 | } |
1622 | 1655 | ||
1623 | RPMDEPS = "${STAGING_LIBDIR_NATIVE}/rpm/rpmdeps --alldeps" | 1656 | RPMDEPS = "${STAGING_LIBDIR_NATIVE}/rpm/rpmdeps --alldeps --define '__font_provides %{nil}'" |
1624 | 1657 | ||
1625 | # Collect perfile run-time dependency metadata | 1658 | # Collect perfile run-time dependency metadata |
1626 | # Output: | 1659 | # Output: |
@@ -1989,12 +2022,12 @@ python package_do_pkgconfig () { | |||
1989 | for pkg in packages.split(): | 2022 | for pkg in packages.split(): |
1990 | pkgconfig_provided[pkg] = [] | 2023 | pkgconfig_provided[pkg] = [] |
1991 | pkgconfig_needed[pkg] = [] | 2024 | pkgconfig_needed[pkg] = [] |
1992 | for file in pkgfiles[pkg]: | 2025 | for file in sorted(pkgfiles[pkg]): |
1993 | m = pc_re.match(file) | 2026 | m = pc_re.match(file) |
1994 | if m: | 2027 | if m: |
1995 | pd = bb.data.init() | 2028 | pd = bb.data.init() |
1996 | name = m.group(1) | 2029 | name = m.group(1) |
1997 | pkgconfig_provided[pkg].append(name) | 2030 | pkgconfig_provided[pkg].append(os.path.basename(name)) |
1998 | if not os.access(file, os.R_OK): | 2031 | if not os.access(file, os.R_OK): |
1999 | continue | 2032 | continue |
2000 | with open(file, 'r') as f: | 2033 | with open(file, 'r') as f: |
@@ -2017,7 +2050,7 @@ python package_do_pkgconfig () { | |||
2017 | pkgs_file = os.path.join(shlibswork_dir, pkg + ".pclist") | 2050 | pkgs_file = os.path.join(shlibswork_dir, pkg + ".pclist") |
2018 | if pkgconfig_provided[pkg] != []: | 2051 | if pkgconfig_provided[pkg] != []: |
2019 | with open(pkgs_file, 'w') as f: | 2052 | with open(pkgs_file, 'w') as f: |
2020 | for p in pkgconfig_provided[pkg]: | 2053 | for p in sorted(pkgconfig_provided[pkg]): |
2021 | f.write('%s\n' % p) | 2054 | f.write('%s\n' % p) |
2022 | 2055 | ||
2023 | # Go from least to most specific since the last one found wins | 2056 | # Go from least to most specific since the last one found wins |
diff --git a/meta/classes/package_deb.bbclass b/meta/classes/package_deb.bbclass index 790b26aef2..fa8c6c82ff 100644 --- a/meta/classes/package_deb.bbclass +++ b/meta/classes/package_deb.bbclass | |||
@@ -315,8 +315,8 @@ do_package_write_deb[dirs] = "${PKGWRITEDIRDEB}" | |||
315 | do_package_write_deb[cleandirs] = "${PKGWRITEDIRDEB}" | 315 | do_package_write_deb[cleandirs] = "${PKGWRITEDIRDEB}" |
316 | do_package_write_deb[umask] = "022" | 316 | do_package_write_deb[umask] = "022" |
317 | do_package_write_deb[depends] += "${@oe.utils.build_depends_string(d.getVar('PACKAGE_WRITE_DEPS'), 'do_populate_sysroot')}" | 317 | do_package_write_deb[depends] += "${@oe.utils.build_depends_string(d.getVar('PACKAGE_WRITE_DEPS'), 'do_populate_sysroot')}" |
318 | addtask package_write_deb after do_packagedata do_package | 318 | EPOCHTASK ??= "" |
319 | 319 | addtask package_write_deb after do_packagedata do_package ${EPOCHTASK} | |
320 | 320 | ||
321 | PACKAGEINDEXDEPS += "dpkg-native:do_populate_sysroot" | 321 | PACKAGEINDEXDEPS += "dpkg-native:do_populate_sysroot" |
322 | PACKAGEINDEXDEPS += "apt-native:do_populate_sysroot" | 322 | PACKAGEINDEXDEPS += "apt-native:do_populate_sysroot" |
diff --git a/meta/classes/package_ipk.bbclass b/meta/classes/package_ipk.bbclass index c008559e4a..4927cfba00 100644 --- a/meta/classes/package_ipk.bbclass +++ b/meta/classes/package_ipk.bbclass | |||
@@ -274,7 +274,8 @@ do_package_write_ipk[dirs] = "${PKGWRITEDIRIPK}" | |||
274 | do_package_write_ipk[cleandirs] = "${PKGWRITEDIRIPK}" | 274 | do_package_write_ipk[cleandirs] = "${PKGWRITEDIRIPK}" |
275 | do_package_write_ipk[umask] = "022" | 275 | do_package_write_ipk[umask] = "022" |
276 | do_package_write_ipk[depends] += "${@oe.utils.build_depends_string(d.getVar('PACKAGE_WRITE_DEPS'), 'do_populate_sysroot')}" | 276 | do_package_write_ipk[depends] += "${@oe.utils.build_depends_string(d.getVar('PACKAGE_WRITE_DEPS'), 'do_populate_sysroot')}" |
277 | addtask package_write_ipk after do_packagedata do_package | 277 | EPOCHTASK ??= "" |
278 | addtask package_write_ipk after do_packagedata do_package ${EPOCHTASK} | ||
278 | 279 | ||
279 | PACKAGEINDEXDEPS += "opkg-utils-native:do_populate_sysroot" | 280 | PACKAGEINDEXDEPS += "opkg-utils-native:do_populate_sysroot" |
280 | PACKAGEINDEXDEPS += "opkg-native:do_populate_sysroot" | 281 | PACKAGEINDEXDEPS += "opkg-native:do_populate_sysroot" |
diff --git a/meta/classes/package_pkgdata.bbclass b/meta/classes/package_pkgdata.bbclass index 18b7ed62e0..a1ea8fc041 100644 --- a/meta/classes/package_pkgdata.bbclass +++ b/meta/classes/package_pkgdata.bbclass | |||
@@ -162,6 +162,6 @@ python package_prepare_pkgdata() { | |||
162 | 162 | ||
163 | } | 163 | } |
164 | package_prepare_pkgdata[cleandirs] = "${WORKDIR_PKGDATA}" | 164 | package_prepare_pkgdata[cleandirs] = "${WORKDIR_PKGDATA}" |
165 | package_prepare_pkgdata[vardepsexclude] += "MACHINE_ARCH PACKAGE_EXTRA_ARCHS SDK_ARCH BUILD_ARCH SDK_OS BB_TASKDEPDATA" | 165 | package_prepare_pkgdata[vardepsexclude] += "MACHINE_ARCH PACKAGE_EXTRA_ARCHS SDK_ARCH BUILD_ARCH SDK_OS BB_TASKDEPDATA SSTATETASKS" |
166 | 166 | ||
167 | 167 | ||
diff --git a/meta/classes/package_rpm.bbclass b/meta/classes/package_rpm.bbclass index 95731c7d8d..65587d228b 100644 --- a/meta/classes/package_rpm.bbclass +++ b/meta/classes/package_rpm.bbclass | |||
@@ -678,11 +678,12 @@ python do_package_rpm () { | |||
678 | cmd = cmd + " --define '_use_internal_dependency_generator 0'" | 678 | cmd = cmd + " --define '_use_internal_dependency_generator 0'" |
679 | cmd = cmd + " --define '_binaries_in_noarch_packages_terminate_build 0'" | 679 | cmd = cmd + " --define '_binaries_in_noarch_packages_terminate_build 0'" |
680 | cmd = cmd + " --define '_build_id_links none'" | 680 | cmd = cmd + " --define '_build_id_links none'" |
681 | cmd = cmd + " --define '_binary_payload w6T.xzdio'" | 681 | cmd = cmd + " --define '_binary_payload w6T%d.xzdio'" % int(d.getVar("XZ_THREADS")) |
682 | cmd = cmd + " --define '_source_payload w6T.xzdio'" | 682 | cmd = cmd + " --define '_source_payload w6T%d.xzdio'" % int(d.getVar("XZ_THREADS")) |
683 | cmd = cmd + " --define 'clamp_mtime_to_source_date_epoch 1'" | 683 | cmd = cmd + " --define 'clamp_mtime_to_source_date_epoch 1'" |
684 | cmd = cmd + " --define 'use_source_date_epoch_as_buildtime 1'" | 684 | cmd = cmd + " --define 'use_source_date_epoch_as_buildtime 1'" |
685 | cmd = cmd + " --define '_buildhost reproducible'" | 685 | cmd = cmd + " --define '_buildhost reproducible'" |
686 | cmd = cmd + " --define '__font_provides %{nil}'" | ||
686 | if perfiledeps: | 687 | if perfiledeps: |
687 | cmd = cmd + " --define '__find_requires " + outdepends + "'" | 688 | cmd = cmd + " --define '__find_requires " + outdepends + "'" |
688 | cmd = cmd + " --define '__find_provides " + outprovides + "'" | 689 | cmd = cmd + " --define '__find_provides " + outprovides + "'" |
@@ -742,7 +743,8 @@ do_package_write_rpm[dirs] = "${PKGWRITEDIRRPM}" | |||
742 | do_package_write_rpm[cleandirs] = "${PKGWRITEDIRRPM}" | 743 | do_package_write_rpm[cleandirs] = "${PKGWRITEDIRRPM}" |
743 | do_package_write_rpm[umask] = "022" | 744 | do_package_write_rpm[umask] = "022" |
744 | do_package_write_rpm[depends] += "${@oe.utils.build_depends_string(d.getVar('PACKAGE_WRITE_DEPS'), 'do_populate_sysroot')}" | 745 | do_package_write_rpm[depends] += "${@oe.utils.build_depends_string(d.getVar('PACKAGE_WRITE_DEPS'), 'do_populate_sysroot')}" |
745 | addtask package_write_rpm after do_packagedata do_package | 746 | EPOCHTASK ??= "" |
747 | addtask package_write_rpm after do_packagedata do_package ${EPOCHTASK} | ||
746 | 748 | ||
747 | PACKAGEINDEXDEPS += "rpm-native:do_populate_sysroot" | 749 | PACKAGEINDEXDEPS += "rpm-native:do_populate_sysroot" |
748 | PACKAGEINDEXDEPS += "createrepo-c-native:do_populate_sysroot" | 750 | PACKAGEINDEXDEPS += "createrepo-c-native:do_populate_sysroot" |
diff --git a/meta/classes/patch.bbclass b/meta/classes/patch.bbclass index 25ec089ae1..484d27ac76 100644 --- a/meta/classes/patch.bbclass +++ b/meta/classes/patch.bbclass | |||
@@ -131,6 +131,9 @@ python patch_do_patch() { | |||
131 | patchdir = parm["patchdir"] | 131 | patchdir = parm["patchdir"] |
132 | if not os.path.isabs(patchdir): | 132 | if not os.path.isabs(patchdir): |
133 | patchdir = os.path.join(s, patchdir) | 133 | patchdir = os.path.join(s, patchdir) |
134 | if not os.path.isdir(patchdir): | ||
135 | bb.fatal("Target directory '%s' not found, patchdir '%s' is incorrect in patch file '%s'" % | ||
136 | (patchdir, parm["patchdir"], parm['patchname'])) | ||
134 | else: | 137 | else: |
135 | patchdir = s | 138 | patchdir = s |
136 | 139 | ||
@@ -147,12 +150,12 @@ python patch_do_patch() { | |||
147 | patchset.Import({"file":local, "strippath": parm['striplevel']}, True) | 150 | patchset.Import({"file":local, "strippath": parm['striplevel']}, True) |
148 | except Exception as exc: | 151 | except Exception as exc: |
149 | bb.utils.remove(process_tmpdir, True) | 152 | bb.utils.remove(process_tmpdir, True) |
150 | bb.fatal(str(exc)) | 153 | bb.fatal("Importing patch '%s' with striplevel '%s'\n%s" % (parm['patchname'], parm['striplevel'], str(exc))) |
151 | try: | 154 | try: |
152 | resolver.Resolve() | 155 | resolver.Resolve() |
153 | except bb.BBHandledException as e: | 156 | except bb.BBHandledException as e: |
154 | bb.utils.remove(process_tmpdir, True) | 157 | bb.utils.remove(process_tmpdir, True) |
155 | bb.fatal(str(e)) | 158 | bb.fatal("Applying patch '%s' on target directory '%s'\n%s" % (parm['patchname'], patchdir, str(e))) |
156 | 159 | ||
157 | bb.utils.remove(process_tmpdir, True) | 160 | bb.utils.remove(process_tmpdir, True) |
158 | del os.environ['TMPDIR'] | 161 | del os.environ['TMPDIR'] |
diff --git a/meta/classes/populate_sdk_base.bbclass b/meta/classes/populate_sdk_base.bbclass index dea272c441..49fdfaa93d 100644 --- a/meta/classes/populate_sdk_base.bbclass +++ b/meta/classes/populate_sdk_base.bbclass | |||
@@ -51,6 +51,8 @@ TOOLCHAIN_OUTPUTNAME ?= "${SDK_NAME}-toolchain-${SDK_VERSION}" | |||
51 | SDK_ARCHIVE_TYPE ?= "tar.xz" | 51 | SDK_ARCHIVE_TYPE ?= "tar.xz" |
52 | SDK_XZ_COMPRESSION_LEVEL ?= "-9" | 52 | SDK_XZ_COMPRESSION_LEVEL ?= "-9" |
53 | SDK_XZ_OPTIONS ?= "${XZ_DEFAULTS} ${SDK_XZ_COMPRESSION_LEVEL}" | 53 | SDK_XZ_OPTIONS ?= "${XZ_DEFAULTS} ${SDK_XZ_COMPRESSION_LEVEL}" |
54 | SDK_ZIP_OPTIONS ?= "-y" | ||
55 | |||
54 | 56 | ||
55 | # To support different sdk type according to SDK_ARCHIVE_TYPE, now support zip and tar.xz | 57 | # To support different sdk type according to SDK_ARCHIVE_TYPE, now support zip and tar.xz |
56 | python () { | 58 | python () { |
@@ -58,7 +60,7 @@ python () { | |||
58 | d.setVar('SDK_ARCHIVE_DEPENDS', 'zip-native') | 60 | d.setVar('SDK_ARCHIVE_DEPENDS', 'zip-native') |
59 | # SDK_ARCHIVE_CMD used to generate archived sdk ${TOOLCHAIN_OUTPUTNAME}.${SDK_ARCHIVE_TYPE} from input dir ${SDK_OUTPUT}/${SDKPATH} to output dir ${SDKDEPLOYDIR} | 61 | # SDK_ARCHIVE_CMD used to generate archived sdk ${TOOLCHAIN_OUTPUTNAME}.${SDK_ARCHIVE_TYPE} from input dir ${SDK_OUTPUT}/${SDKPATH} to output dir ${SDKDEPLOYDIR} |
60 | # recommand to cd into input dir first to avoid archive with buildpath | 62 | # recommand to cd into input dir first to avoid archive with buildpath |
61 | d.setVar('SDK_ARCHIVE_CMD', 'cd ${SDK_OUTPUT}/${SDKPATH}; zip -r -y ${SDKDEPLOYDIR}/${TOOLCHAIN_OUTPUTNAME}.${SDK_ARCHIVE_TYPE} .') | 63 | d.setVar('SDK_ARCHIVE_CMD', 'cd ${SDK_OUTPUT}/${SDKPATH}; zip -r ${SDK_ZIP_OPTIONS} ${SDKDEPLOYDIR}/${TOOLCHAIN_OUTPUTNAME}.${SDK_ARCHIVE_TYPE} .') |
62 | else: | 64 | else: |
63 | d.setVar('SDK_ARCHIVE_DEPENDS', 'xz-native') | 65 | d.setVar('SDK_ARCHIVE_DEPENDS', 'xz-native') |
64 | d.setVar('SDK_ARCHIVE_CMD', 'cd ${SDK_OUTPUT}/${SDKPATH}; tar ${SDKTAROPTS} -cf - . | xz ${SDK_XZ_OPTIONS} > ${SDKDEPLOYDIR}/${TOOLCHAIN_OUTPUTNAME}.${SDK_ARCHIVE_TYPE}') | 66 | d.setVar('SDK_ARCHIVE_CMD', 'cd ${SDK_OUTPUT}/${SDKPATH}; tar ${SDKTAROPTS} -cf - . | xz ${SDK_XZ_OPTIONS} > ${SDKDEPLOYDIR}/${TOOLCHAIN_OUTPUTNAME}.${SDK_ARCHIVE_TYPE}') |
@@ -66,7 +68,7 @@ python () { | |||
66 | 68 | ||
67 | SDK_RDEPENDS = "${TOOLCHAIN_TARGET_TASK} ${TOOLCHAIN_HOST_TASK}" | 69 | SDK_RDEPENDS = "${TOOLCHAIN_TARGET_TASK} ${TOOLCHAIN_HOST_TASK}" |
68 | SDK_DEPENDS = "virtual/fakeroot-native ${SDK_ARCHIVE_DEPENDS} cross-localedef-native nativesdk-qemuwrapper-cross ${@' '.join(["%s-qemuwrapper-cross" % m for m in d.getVar("MULTILIB_VARIANTS").split()])} qemuwrapper-cross" | 70 | SDK_DEPENDS = "virtual/fakeroot-native ${SDK_ARCHIVE_DEPENDS} cross-localedef-native nativesdk-qemuwrapper-cross ${@' '.join(["%s-qemuwrapper-cross" % m for m in d.getVar("MULTILIB_VARIANTS").split()])} qemuwrapper-cross" |
69 | PATH_prepend = "${STAGING_DIR_HOST}${SDKPATHNATIVE}${bindir}/crossscripts:${@":".join(all_multilib_tune_values(d, 'STAGING_BINDIR_CROSS').split())}:" | 71 | PATH_prepend = "${WORKDIR}/recipe-sysroot/${SDKPATHNATIVE}${bindir}/crossscripts:${@":".join(all_multilib_tune_values(d, 'STAGING_BINDIR_CROSS').split())}:" |
70 | SDK_DEPENDS += "nativesdk-glibc-locale" | 72 | SDK_DEPENDS += "nativesdk-glibc-locale" |
71 | 73 | ||
72 | # We want the MULTIARCH_TARGET_SYS to point to the TUNE_PKGARCH, not PACKAGE_ARCH as it | 74 | # We want the MULTIARCH_TARGET_SYS to point to the TUNE_PKGARCH, not PACKAGE_ARCH as it |
@@ -178,7 +180,7 @@ do_populate_sdk[sstate-inputdirs] = "${SDKDEPLOYDIR}" | |||
178 | do_populate_sdk[sstate-outputdirs] = "${SDK_DEPLOY}" | 180 | do_populate_sdk[sstate-outputdirs] = "${SDK_DEPLOY}" |
179 | do_populate_sdk[stamp-extra-info] = "${MACHINE_ARCH}${SDKMACHINE}" | 181 | do_populate_sdk[stamp-extra-info] = "${MACHINE_ARCH}${SDKMACHINE}" |
180 | 182 | ||
181 | PSEUDO_IGNORE_PATHS .= ",${SDKDEPLOYDIR}" | 183 | PSEUDO_IGNORE_PATHS .= ",${SDKDEPLOYDIR},${WORKDIR}/oe-sdk-repo,${WORKDIR}/sstate-build-populate_sdk" |
182 | 184 | ||
183 | fakeroot create_sdk_files() { | 185 | fakeroot create_sdk_files() { |
184 | cp ${COREBASE}/scripts/relocate_sdk.py ${SDK_OUTPUT}/${SDKPATH}/ | 186 | cp ${COREBASE}/scripts/relocate_sdk.py ${SDK_OUTPUT}/${SDKPATH}/ |
@@ -275,6 +277,7 @@ EOF | |||
275 | # substitute variables | 277 | # substitute variables |
276 | sed -i -e 's#@SDK_ARCH@#${SDK_ARCH}#g' \ | 278 | sed -i -e 's#@SDK_ARCH@#${SDK_ARCH}#g' \ |
277 | -e 's#@SDKPATH@#${SDKPATH}#g' \ | 279 | -e 's#@SDKPATH@#${SDKPATH}#g' \ |
280 | -e 's#@SDKPATHINSTALL@#${SDKPATHINSTALL}#g' \ | ||
278 | -e 's#@SDKEXTPATH@#${SDKEXTPATH}#g' \ | 281 | -e 's#@SDKEXTPATH@#${SDKEXTPATH}#g' \ |
279 | -e 's#@OLDEST_KERNEL@#${SDK_OLDEST_KERNEL}#g' \ | 282 | -e 's#@OLDEST_KERNEL@#${SDK_OLDEST_KERNEL}#g' \ |
280 | -e 's#@REAL_MULTIMACH_TARGET_SYS@#${REAL_MULTIMACH_TARGET_SYS}#g' \ | 283 | -e 's#@REAL_MULTIMACH_TARGET_SYS@#${REAL_MULTIMACH_TARGET_SYS}#g' \ |
@@ -324,6 +327,13 @@ def sdk_variables(d): | |||
324 | 327 | ||
325 | do_populate_sdk[vardeps] += "${@sdk_variables(d)}" | 328 | do_populate_sdk[vardeps] += "${@sdk_variables(d)}" |
326 | 329 | ||
330 | python () { | ||
331 | variables = sdk_command_variables(d) | ||
332 | for var in variables: | ||
333 | if d.getVar(var, False): | ||
334 | d.setVarFlag(var, 'func', '1') | ||
335 | } | ||
336 | |||
327 | do_populate_sdk[file-checksums] += "${TOOLCHAIN_SHAR_REL_TMPL}:True \ | 337 | do_populate_sdk[file-checksums] += "${TOOLCHAIN_SHAR_REL_TMPL}:True \ |
328 | ${TOOLCHAIN_SHAR_EXT_TMPL}:True" | 338 | ${TOOLCHAIN_SHAR_EXT_TMPL}:True" |
329 | 339 | ||
diff --git a/meta/classes/populate_sdk_ext.bbclass b/meta/classes/populate_sdk_ext.bbclass index 71686bc993..1bdfd92847 100644 --- a/meta/classes/populate_sdk_ext.bbclass +++ b/meta/classes/populate_sdk_ext.bbclass | |||
@@ -117,7 +117,7 @@ python write_host_sdk_ext_manifest () { | |||
117 | f.write("%s %s %s\n" % (info[1], info[2], info[3])) | 117 | f.write("%s %s %s\n" % (info[1], info[2], info[3])) |
118 | } | 118 | } |
119 | 119 | ||
120 | SDK_POSTPROCESS_COMMAND_append_task-populate-sdk-ext = "write_target_sdk_ext_manifest; write_host_sdk_ext_manifest; " | 120 | SDK_POSTPROCESS_COMMAND_append_task-populate-sdk-ext = " write_target_sdk_ext_manifest; write_host_sdk_ext_manifest; " |
121 | 121 | ||
122 | SDK_TITLE_task-populate-sdk-ext = "${@d.getVar('DISTRO_NAME') or d.getVar('DISTRO')} Extensible SDK" | 122 | SDK_TITLE_task-populate-sdk-ext = "${@d.getVar('DISTRO_NAME') or d.getVar('DISTRO')} Extensible SDK" |
123 | 123 | ||
@@ -247,7 +247,9 @@ python copy_buildsystem () { | |||
247 | 247 | ||
248 | # Create a layer for new recipes / appends | 248 | # Create a layer for new recipes / appends |
249 | bbpath = d.getVar('BBPATH') | 249 | bbpath = d.getVar('BBPATH') |
250 | bb.process.run(['devtool', '--bbpath', bbpath, '--basepath', baseoutpath, 'create-workspace', '--create-only', os.path.join(baseoutpath, 'workspace')]) | 250 | env = os.environ.copy() |
251 | env['PYTHONDONTWRITEBYTECODE'] = '1' | ||
252 | bb.process.run(['devtool', '--bbpath', bbpath, '--basepath', baseoutpath, 'create-workspace', '--create-only', os.path.join(baseoutpath, 'workspace')], env=env) | ||
251 | 253 | ||
252 | # Create bblayers.conf | 254 | # Create bblayers.conf |
253 | bb.utils.mkdirhier(baseoutpath + '/conf') | 255 | bb.utils.mkdirhier(baseoutpath + '/conf') |
@@ -360,6 +362,10 @@ python copy_buildsystem () { | |||
360 | # Hide the config information from bitbake output (since it's fixed within the SDK) | 362 | # Hide the config information from bitbake output (since it's fixed within the SDK) |
361 | f.write('BUILDCFG_HEADER = ""\n\n') | 363 | f.write('BUILDCFG_HEADER = ""\n\n') |
362 | 364 | ||
365 | # Write METADATA_REVISION | ||
366 | # Needs distro override so it can override the value set in the bbclass code (later than local.conf) | ||
367 | f.write('METADATA_REVISION:%s = "%s"\n\n' % (d.getVar('DISTRO'), d.getVar('METADATA_REVISION'))) | ||
368 | |||
363 | f.write('# Provide a flag to indicate we are in the EXT_SDK Context\n') | 369 | f.write('# Provide a flag to indicate we are in the EXT_SDK Context\n') |
364 | f.write('WITHIN_EXT_SDK = "1"\n\n') | 370 | f.write('WITHIN_EXT_SDK = "1"\n\n') |
365 | 371 | ||
@@ -664,7 +670,7 @@ sdk_ext_postinst() { | |||
664 | 670 | ||
665 | # A bit of another hack, but we need this in the path only for devtool | 671 | # A bit of another hack, but we need this in the path only for devtool |
666 | # so put it at the end of $PATH. | 672 | # so put it at the end of $PATH. |
667 | echo "export PATH=$target_sdk_dir/sysroots/${SDK_SYS}${bindir_nativesdk}:\$PATH" >> $env_setup_script | 673 | echo "export PATH=\"$target_sdk_dir/sysroots/${SDK_SYS}${bindir_nativesdk}:\$PATH\"" >> $env_setup_script |
668 | 674 | ||
669 | echo "printf 'SDK environment now set up; additionally you may now run devtool to perform development tasks.\nRun devtool --help for further details.\n'" >> $env_setup_script | 675 | echo "printf 'SDK environment now set up; additionally you may now run devtool to perform development tasks.\nRun devtool --help for further details.\n'" >> $env_setup_script |
670 | 676 | ||
diff --git a/meta/classes/pypi.bbclass b/meta/classes/pypi.bbclass index 87b4c85fc0..c68367449a 100644 --- a/meta/classes/pypi.bbclass +++ b/meta/classes/pypi.bbclass | |||
@@ -24,3 +24,5 @@ S = "${WORKDIR}/${PYPI_PACKAGE}-${PV}" | |||
24 | 24 | ||
25 | UPSTREAM_CHECK_URI ?= "https://pypi.org/project/${PYPI_PACKAGE}/" | 25 | UPSTREAM_CHECK_URI ?= "https://pypi.org/project/${PYPI_PACKAGE}/" |
26 | UPSTREAM_CHECK_REGEX ?= "/${PYPI_PACKAGE}/(?P<pver>(\d+[\.\-_]*)+)/" | 26 | UPSTREAM_CHECK_REGEX ?= "/${PYPI_PACKAGE}/(?P<pver>(\d+[\.\-_]*)+)/" |
27 | |||
28 | CVE_PRODUCT ?= "python:${PYPI_PACKAGE}" | ||
diff --git a/meta/classes/python3targetconfig.bbclass b/meta/classes/python3targetconfig.bbclass index fc1025c207..a6e67f1bf8 100644 --- a/meta/classes/python3targetconfig.bbclass +++ b/meta/classes/python3targetconfig.bbclass | |||
@@ -15,3 +15,15 @@ do_compile_prepend_class-target() { | |||
15 | do_install_prepend_class-target() { | 15 | do_install_prepend_class-target() { |
16 | export _PYTHON_SYSCONFIGDATA_NAME="_sysconfigdata" | 16 | export _PYTHON_SYSCONFIGDATA_NAME="_sysconfigdata" |
17 | } | 17 | } |
18 | |||
19 | do_configure:prepend:class-nativesdk() { | ||
20 | export _PYTHON_SYSCONFIGDATA_NAME="_sysconfigdata" | ||
21 | } | ||
22 | |||
23 | do_compile:prepend:class-nativesdk() { | ||
24 | export _PYTHON_SYSCONFIGDATA_NAME="_sysconfigdata" | ||
25 | } | ||
26 | |||
27 | do_install:prepend:class-nativesdk() { | ||
28 | export _PYTHON_SYSCONFIGDATA_NAME="_sysconfigdata" | ||
29 | } | ||
diff --git a/meta/classes/qemuboot.bbclass b/meta/classes/qemuboot.bbclass index 648af09b6e..92ae69d9f2 100644 --- a/meta/classes/qemuboot.bbclass +++ b/meta/classes/qemuboot.bbclass | |||
@@ -7,6 +7,7 @@ | |||
7 | # QB_OPT_APPEND: options to append to qemu, e.g., "-show-cursor" | 7 | # QB_OPT_APPEND: options to append to qemu, e.g., "-show-cursor" |
8 | # | 8 | # |
9 | # QB_DEFAULT_KERNEL: default kernel to boot, e.g., "bzImage" | 9 | # QB_DEFAULT_KERNEL: default kernel to boot, e.g., "bzImage" |
10 | # e.g., "bzImage-initramfs-qemux86-64.bin" if INITRAMFS_IMAGE_BUNDLE is set to 1. | ||
10 | # | 11 | # |
11 | # QB_DEFAULT_FSTYPE: default FSTYPE to boot, e.g., "ext4" | 12 | # QB_DEFAULT_FSTYPE: default FSTYPE to boot, e.g., "ext4" |
12 | # | 13 | # |
@@ -75,7 +76,7 @@ | |||
75 | 76 | ||
76 | QB_MEM ?= "-m 256" | 77 | QB_MEM ?= "-m 256" |
77 | QB_SERIAL_OPT ?= "-serial mon:stdio -serial null" | 78 | QB_SERIAL_OPT ?= "-serial mon:stdio -serial null" |
78 | QB_DEFAULT_KERNEL ?= "${KERNEL_IMAGETYPE}" | 79 | QB_DEFAULT_KERNEL ?= "${@bb.utils.contains("INITRAMFS_IMAGE_BUNDLE", "1", "${KERNEL_IMAGETYPE}-${INITRAMFS_LINK_NAME}.bin", "${KERNEL_IMAGETYPE}", d)}" |
79 | QB_DEFAULT_FSTYPE ?= "ext4" | 80 | QB_DEFAULT_FSTYPE ?= "ext4" |
80 | QB_OPT_APPEND ?= "-show-cursor" | 81 | QB_OPT_APPEND ?= "-show-cursor" |
81 | QB_NETWORK_DEVICE ?= "-device virtio-net-pci,netdev=net0,mac=@MAC@" | 82 | QB_NETWORK_DEVICE ?= "-device virtio-net-pci,netdev=net0,mac=@MAC@" |
diff --git a/meta/classes/report-error.bbclass b/meta/classes/report-error.bbclass index 1a12db1206..de48e4ff0f 100644 --- a/meta/classes/report-error.bbclass +++ b/meta/classes/report-error.bbclass | |||
@@ -64,6 +64,8 @@ python errorreport_handler () { | |||
64 | data['failures'] = [] | 64 | data['failures'] = [] |
65 | data['component'] = " ".join(e.getPkgs()) | 65 | data['component'] = " ".join(e.getPkgs()) |
66 | data['branch_commit'] = str(base_detect_branch(e.data)) + ": " + str(base_detect_revision(e.data)) | 66 | data['branch_commit'] = str(base_detect_branch(e.data)) + ": " + str(base_detect_revision(e.data)) |
67 | data['bitbake_version'] = e.data.getVar("BB_VERSION") | ||
68 | data['layer_version'] = get_layers_branch_rev(e.data) | ||
67 | data['local_conf'] = get_conf_data(e, 'local.conf') | 69 | data['local_conf'] = get_conf_data(e, 'local.conf') |
68 | data['auto_conf'] = get_conf_data(e, 'auto.conf') | 70 | data['auto_conf'] = get_conf_data(e, 'auto.conf') |
69 | lock = bb.utils.lockfile(datafile + '.lock') | 71 | lock = bb.utils.lockfile(datafile + '.lock') |
diff --git a/meta/classes/reproducible_build.bbclass b/meta/classes/reproducible_build.bbclass index 2f3bd90b07..3c01dbd5b3 100644 --- a/meta/classes/reproducible_build.bbclass +++ b/meta/classes/reproducible_build.bbclass | |||
@@ -1,17 +1,38 @@ | |||
1 | # reproducible_build.bbclass | 1 | # reproducible_build.bbclass |
2 | # | 2 | # |
3 | # Sets SOURCE_DATE_EPOCH in each component's build environment. | 3 | # Sets the default SOURCE_DATE_EPOCH in each component's build environment. |
4 | # The format is number of seconds since the system epoch. | ||
5 | # | ||
4 | # Upstream components (generally) respect this environment variable, | 6 | # Upstream components (generally) respect this environment variable, |
5 | # using it in place of the "current" date and time. | 7 | # using it in place of the "current" date and time. |
6 | # See https://reproducible-builds.org/specs/source-date-epoch/ | 8 | # See https://reproducible-builds.org/specs/source-date-epoch/ |
7 | # | 9 | # |
8 | # After sources are unpacked but before they are patched, we set a reproducible value for SOURCE_DATE_EPOCH. | 10 | # The default value of SOURCE_DATE_EPOCH comes from the function |
9 | # This value should be reproducible for anyone who builds the same revision from the same sources. | 11 | # get_source_date_epoch_value which reads from the SDE_FILE, or if the file |
12 | # is not available (or set to 0) will use the fallback of | ||
13 | # SOURCE_DATE_EPOCH_FALLBACK. | ||
14 | # | ||
15 | # The SDE_FILE is normally constructed from the function | ||
16 | # create_source_date_epoch_stamp which is typically added as a postfuncs to | ||
17 | # the do_unpack task. If a recipe does NOT have do_unpack, it should be added | ||
18 | # to a task that runs after the source is available and before the | ||
19 | # do_deploy_source_date_epoch task is executed. | ||
20 | # | ||
21 | # If a recipe wishes to override the default behavior it should set it's own | ||
22 | # SOURCE_DATE_EPOCH or override the do_deploy_source_date_epoch_stamp task | ||
23 | # with recipe-specific functionality to write the appropriate | ||
24 | # SOURCE_DATE_EPOCH into the SDE_FILE. | ||
25 | # | ||
26 | # SOURCE_DATE_EPOCH is intended to be a reproducible value. This value should | ||
27 | # be reproducible for anyone who builds the same revision from the same | ||
28 | # sources. | ||
10 | # | 29 | # |
11 | # There are 4 ways we determine SOURCE_DATE_EPOCH: | 30 | # There are 4 ways the create_source_date_epoch_stamp function determines what |
31 | # becomes SOURCE_DATE_EPOCH: | ||
12 | # | 32 | # |
13 | # 1. Use the value from __source_date_epoch.txt file if this file exists. | 33 | # 1. Use the value from __source_date_epoch.txt file if this file exists. |
14 | # This file was most likely created in the previous build by one of the following methods 2,3,4. | 34 | # This file was most likely created in the previous build by one of the |
35 | # following methods 2,3,4. | ||
15 | # Alternatively, it can be provided by a recipe via SRC_URI. | 36 | # Alternatively, it can be provided by a recipe via SRC_URI. |
16 | # | 37 | # |
17 | # If the file does not exist: | 38 | # If the file does not exist: |
@@ -22,25 +43,24 @@ | |||
22 | # 3. Use the mtime of "known" files such as NEWS, CHANGLELOG, ... | 43 | # 3. Use the mtime of "known" files such as NEWS, CHANGLELOG, ... |
23 | # This works for well-kept repositories distributed via tarball. | 44 | # This works for well-kept repositories distributed via tarball. |
24 | # | 45 | # |
25 | # 4. Use the modification time of the youngest file in the source tree, if there is one. | 46 | # 4. Use the modification time of the youngest file in the source tree, if |
47 | # there is one. | ||
26 | # This will be the newest file from the distribution tarball, if any. | 48 | # This will be the newest file from the distribution tarball, if any. |
27 | # | 49 | # |
28 | # 5. Fall back to a fixed timestamp. | 50 | # 5. Fall back to a fixed timestamp (SOURCE_DATE_EPOCH_FALLBACK). |
29 | # | 51 | # |
30 | # Once the value of SOURCE_DATE_EPOCH is determined, it is stored in the recipe's SDE_FILE. | 52 | # Once the value is determined, it is stored in the recipe's SDE_FILE. |
31 | # If none of these mechanisms are suitable, replace the do_deploy_source_date_epoch task | ||
32 | # with recipe-specific functionality to write the appropriate SOURCE_DATE_EPOCH into the SDE_FILE. | ||
33 | # | ||
34 | # If this file is found by other tasks, the value is exported in the SOURCE_DATE_EPOCH variable. | ||
35 | # SOURCE_DATE_EPOCH is set for all tasks that might use it (do_configure, do_compile, do_package, ...) | ||
36 | 53 | ||
37 | BUILD_REPRODUCIBLE_BINARIES ??= '1' | 54 | BUILD_REPRODUCIBLE_BINARIES ??= '1' |
38 | inherit ${@oe.utils.ifelse(d.getVar('BUILD_REPRODUCIBLE_BINARIES') == '1', 'reproducible_build_simple', '')} | 55 | inherit reproducible_build_simple |
39 | 56 | ||
40 | SDE_DIR ="${WORKDIR}/source-date-epoch" | 57 | SDE_DIR = "${WORKDIR}/source-date-epoch" |
41 | SDE_FILE = "${SDE_DIR}/__source_date_epoch.txt" | 58 | SDE_FILE = "${SDE_DIR}/__source_date_epoch.txt" |
42 | SDE_DEPLOYDIR = "${WORKDIR}/deploy-source-date-epoch" | 59 | SDE_DEPLOYDIR = "${WORKDIR}/deploy-source-date-epoch" |
43 | 60 | ||
61 | # A SOURCE_DATE_EPOCH of '0' might be misinterpreted as no SDE | ||
62 | export SOURCE_DATE_EPOCH_FALLBACK ??= "1302044400" | ||
63 | |||
44 | SSTATETASKS += "do_deploy_source_date_epoch" | 64 | SSTATETASKS += "do_deploy_source_date_epoch" |
45 | 65 | ||
46 | do_deploy_source_date_epoch () { | 66 | do_deploy_source_date_epoch () { |
@@ -74,45 +94,47 @@ python create_source_date_epoch_stamp() { | |||
74 | import oe.reproducible | 94 | import oe.reproducible |
75 | 95 | ||
76 | epochfile = d.getVar('SDE_FILE') | 96 | epochfile = d.getVar('SDE_FILE') |
77 | # If it exists we need to regenerate as the sources may have changed | 97 | tmp_file = "%s.new" % epochfile |
78 | if os.path.isfile(epochfile): | ||
79 | bb.debug(1, "Deleting existing SOURCE_DATE_EPOCH from: %s" % epochfile) | ||
80 | os.remove(epochfile) | ||
81 | 98 | ||
82 | source_date_epoch = oe.reproducible.get_source_date_epoch(d, d.getVar('S')) | 99 | source_date_epoch = oe.reproducible.get_source_date_epoch(d, d.getVar('S')) |
83 | 100 | ||
84 | bb.debug(1, "SOURCE_DATE_EPOCH: %d" % source_date_epoch) | 101 | bb.debug(1, "SOURCE_DATE_EPOCH: %d" % source_date_epoch) |
85 | bb.utils.mkdirhier(d.getVar('SDE_DIR')) | 102 | bb.utils.mkdirhier(d.getVar('SDE_DIR')) |
86 | with open(epochfile, 'w') as f: | 103 | with open(tmp_file, 'w') as f: |
87 | f.write(str(source_date_epoch)) | 104 | f.write(str(source_date_epoch)) |
105 | |||
106 | os.rename(tmp_file, epochfile) | ||
88 | } | 107 | } |
89 | 108 | ||
109 | EPOCHTASK = "do_deploy_source_date_epoch" | ||
110 | |||
111 | # Generate the stamp after do_unpack runs | ||
112 | do_unpack[postfuncs] += "create_source_date_epoch_stamp" | ||
113 | |||
90 | def get_source_date_epoch_value(d): | 114 | def get_source_date_epoch_value(d): |
91 | cached = d.getVar('__CACHED_SOURCE_DATE_EPOCH') | 115 | epochfile = d.getVar('SDE_FILE') |
92 | if cached: | 116 | cached, efile = d.getVar('__CACHED_SOURCE_DATE_EPOCH') or (None, None) |
117 | if cached and efile == epochfile: | ||
93 | return cached | 118 | return cached |
94 | 119 | ||
95 | epochfile = d.getVar('SDE_FILE') | 120 | if cached and epochfile != efile: |
96 | source_date_epoch = 0 | 121 | bb.debug(1, "Epoch file changed from %s to %s" % (efile, epochfile)) |
97 | if os.path.isfile(epochfile): | 122 | |
123 | source_date_epoch = int(d.getVar('SOURCE_DATE_EPOCH_FALLBACK')) | ||
124 | try: | ||
98 | with open(epochfile, 'r') as f: | 125 | with open(epochfile, 'r') as f: |
99 | s = f.read() | 126 | s = f.read() |
100 | try: | 127 | try: |
101 | source_date_epoch = int(s) | 128 | source_date_epoch = int(s) |
102 | except ValueError: | 129 | except ValueError: |
103 | bb.warn("SOURCE_DATE_EPOCH value '%s' is invalid. Reverting to 0" % s) | 130 | bb.warn("SOURCE_DATE_EPOCH value '%s' is invalid. Reverting to SOURCE_DATE_EPOCH_FALLBACK" % s) |
104 | source_date_epoch = 0 | 131 | source_date_epoch = int(d.getVar('SOURCE_DATE_EPOCH_FALLBACK')) |
105 | bb.debug(1, "SOURCE_DATE_EPOCH: %d" % source_date_epoch) | 132 | bb.debug(1, "SOURCE_DATE_EPOCH: %d" % source_date_epoch) |
106 | else: | 133 | except FileNotFoundError: |
107 | bb.debug(1, "Cannot find %s. SOURCE_DATE_EPOCH will default to %d" % (epochfile, source_date_epoch)) | 134 | bb.debug(1, "Cannot find %s. SOURCE_DATE_EPOCH will default to %d" % (epochfile, source_date_epoch)) |
108 | 135 | ||
109 | d.setVar('__CACHED_SOURCE_DATE_EPOCH', str(source_date_epoch)) | 136 | d.setVar('__CACHED_SOURCE_DATE_EPOCH', (str(source_date_epoch), epochfile)) |
110 | return str(source_date_epoch) | 137 | return str(source_date_epoch) |
111 | 138 | ||
112 | export SOURCE_DATE_EPOCH ?= "${@get_source_date_epoch_value(d)}" | 139 | export SOURCE_DATE_EPOCH ?= "${@get_source_date_epoch_value(d)}" |
113 | BB_HASHBASE_WHITELIST += "SOURCE_DATE_EPOCH" | 140 | BB_HASHBASE_WHITELIST += "SOURCE_DATE_EPOCH" |
114 | |||
115 | python () { | ||
116 | if d.getVar('BUILD_REPRODUCIBLE_BINARIES') == '1': | ||
117 | d.appendVarFlag("do_unpack", "postfuncs", " create_source_date_epoch_stamp") | ||
118 | } | ||
diff --git a/meta/classes/rm_work.bbclass b/meta/classes/rm_work.bbclass index 01c2ab1c78..24051aa378 100644 --- a/meta/classes/rm_work.bbclass +++ b/meta/classes/rm_work.bbclass | |||
@@ -27,6 +27,13 @@ BB_SCHEDULER ?= "completion" | |||
27 | BB_TASK_IONICE_LEVEL_task-rm_work = "3.0" | 27 | BB_TASK_IONICE_LEVEL_task-rm_work = "3.0" |
28 | 28 | ||
29 | do_rm_work () { | 29 | do_rm_work () { |
30 | # Force using the HOSTTOOLS 'rm' - otherwise the SYSROOT_NATIVE 'rm' can be selected depending on PATH | ||
31 | # Avoids race-condition accessing 'rm' when deleting WORKDIR folders at the end of this function | ||
32 | RM_BIN="$(PATH=${HOSTTOOLS_DIR} command -v rm)" | ||
33 | if [ -z "${RM_BIN}" ]; then | ||
34 | bbfatal "Binary 'rm' not found in HOSTTOOLS_DIR, cannot remove WORKDIR data." | ||
35 | fi | ||
36 | |||
30 | # If the recipe name is in the RM_WORK_EXCLUDE, skip the recipe. | 37 | # If the recipe name is in the RM_WORK_EXCLUDE, skip the recipe. |
31 | for p in ${RM_WORK_EXCLUDE}; do | 38 | for p in ${RM_WORK_EXCLUDE}; do |
32 | if [ "$p" = "${PN}" ]; then | 39 | if [ "$p" = "${PN}" ]; then |
@@ -73,7 +80,7 @@ do_rm_work () { | |||
73 | # sstate version since otherwise we'd need to leave 'plaindirs' around | 80 | # sstate version since otherwise we'd need to leave 'plaindirs' around |
74 | # such as 'packages' and 'packages-split' and these can be large. No end | 81 | # such as 'packages' and 'packages-split' and these can be large. No end |
75 | # of chain tasks depend directly on do_package anymore. | 82 | # of chain tasks depend directly on do_package anymore. |
76 | rm -f $i; | 83 | "${RM_BIN}" -f -- $i; |
77 | ;; | 84 | ;; |
78 | *_setscene*) | 85 | *_setscene*) |
79 | # Skip stamps which are already setscene versions | 86 | # Skip stamps which are already setscene versions |
@@ -90,7 +97,7 @@ do_rm_work () { | |||
90 | ;; | 97 | ;; |
91 | esac | 98 | esac |
92 | done | 99 | done |
93 | rm -f $i | 100 | "${RM_BIN}" -f -- $i |
94 | esac | 101 | esac |
95 | done | 102 | done |
96 | 103 | ||
@@ -100,9 +107,9 @@ do_rm_work () { | |||
100 | # Retain only logs and other files in temp, safely ignore | 107 | # Retain only logs and other files in temp, safely ignore |
101 | # failures of removing pseudo folers on NFS2/3 server. | 108 | # failures of removing pseudo folers on NFS2/3 server. |
102 | if [ $dir = 'pseudo' ]; then | 109 | if [ $dir = 'pseudo' ]; then |
103 | rm -rf $dir 2> /dev/null || true | 110 | "${RM_BIN}" -rf -- $dir 2> /dev/null || true |
104 | elif ! echo "$excludes" | grep -q -w "$dir"; then | 111 | elif ! echo "$excludes" | grep -q -w "$dir"; then |
105 | rm -rf $dir | 112 | "${RM_BIN}" -rf -- $dir |
106 | fi | 113 | fi |
107 | done | 114 | done |
108 | } | 115 | } |
diff --git a/meta/classes/rootfs-postcommands.bbclass b/meta/classes/rootfs-postcommands.bbclass index c43b9a9823..943534c57a 100644 --- a/meta/classes/rootfs-postcommands.bbclass +++ b/meta/classes/rootfs-postcommands.bbclass | |||
@@ -1,6 +1,6 @@ | |||
1 | 1 | ||
2 | # Zap the root password if debug-tweaks feature is not enabled | 2 | # Zap the root password if debug-tweaks feature is not enabled |
3 | ROOTFS_POSTPROCESS_COMMAND += '${@bb.utils.contains_any("IMAGE_FEATURES", [ 'debug-tweaks', 'empty-root-password' ], "", "zap_empty_root_password ; ",d)}' | 3 | ROOTFS_POSTPROCESS_COMMAND += '${@bb.utils.contains_any("IMAGE_FEATURES", [ 'debug-tweaks', 'empty-root-password' ], "", "zap_empty_root_password; ",d)}' |
4 | 4 | ||
5 | # Allow dropbear/openssh to accept logins from accounts with an empty password string if debug-tweaks or allow-empty-password is enabled | 5 | # Allow dropbear/openssh to accept logins from accounts with an empty password string if debug-tweaks or allow-empty-password is enabled |
6 | ROOTFS_POSTPROCESS_COMMAND += '${@bb.utils.contains_any("IMAGE_FEATURES", [ 'debug-tweaks', 'allow-empty-password' ], "ssh_allow_empty_password; ", "",d)}' | 6 | ROOTFS_POSTPROCESS_COMMAND += '${@bb.utils.contains_any("IMAGE_FEATURES", [ 'debug-tweaks', 'allow-empty-password' ], "ssh_allow_empty_password; ", "",d)}' |
@@ -12,7 +12,7 @@ ROOTFS_POSTPROCESS_COMMAND += '${@bb.utils.contains_any("IMAGE_FEATURES", [ 'deb | |||
12 | ROOTFS_POSTPROCESS_COMMAND += '${@bb.utils.contains_any("IMAGE_FEATURES", [ 'debug-tweaks', 'post-install-logging' ], "postinst_enable_logging; ", "",d)}' | 12 | ROOTFS_POSTPROCESS_COMMAND += '${@bb.utils.contains_any("IMAGE_FEATURES", [ 'debug-tweaks', 'post-install-logging' ], "postinst_enable_logging; ", "",d)}' |
13 | 13 | ||
14 | # Create /etc/timestamp during image construction to give a reasonably sane default time setting | 14 | # Create /etc/timestamp during image construction to give a reasonably sane default time setting |
15 | ROOTFS_POSTPROCESS_COMMAND += "rootfs_update_timestamp ; " | 15 | ROOTFS_POSTPROCESS_COMMAND += "rootfs_update_timestamp; " |
16 | 16 | ||
17 | # Tweak the mount options for rootfs in /etc/fstab if read-only-rootfs is enabled | 17 | # Tweak the mount options for rootfs in /etc/fstab if read-only-rootfs is enabled |
18 | ROOTFS_POSTPROCESS_COMMAND += '${@bb.utils.contains("IMAGE_FEATURES", "read-only-rootfs", "read_only_rootfs_hook; ", "",d)}' | 18 | ROOTFS_POSTPROCESS_COMMAND += '${@bb.utils.contains("IMAGE_FEATURES", "read-only-rootfs", "read_only_rootfs_hook; ", "",d)}' |
@@ -26,7 +26,7 @@ ROOTFS_POSTPROCESS_COMMAND += '${@bb.utils.contains("IMAGE_FEATURES", "read-only | |||
26 | APPEND_append = '${@bb.utils.contains("IMAGE_FEATURES", "read-only-rootfs", " ro", "", d)}' | 26 | APPEND_append = '${@bb.utils.contains("IMAGE_FEATURES", "read-only-rootfs", " ro", "", d)}' |
27 | 27 | ||
28 | # Generates test data file with data store variables expanded in json format | 28 | # Generates test data file with data store variables expanded in json format |
29 | ROOTFS_POSTPROCESS_COMMAND += "write_image_test_data ; " | 29 | ROOTFS_POSTPROCESS_COMMAND += "write_image_test_data; " |
30 | 30 | ||
31 | # Write manifest | 31 | # Write manifest |
32 | IMAGE_MANIFEST = "${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.manifest" | 32 | IMAGE_MANIFEST = "${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.manifest" |
@@ -267,9 +267,10 @@ python write_image_manifest () { | |||
267 | 267 | ||
268 | if os.path.exists(manifest_name) and link_name: | 268 | if os.path.exists(manifest_name) and link_name: |
269 | manifest_link = deploy_dir + "/" + link_name + ".manifest" | 269 | manifest_link = deploy_dir + "/" + link_name + ".manifest" |
270 | if os.path.lexists(manifest_link): | 270 | if manifest_link != manifest_name: |
271 | os.remove(manifest_link) | 271 | if os.path.lexists(manifest_link): |
272 | os.symlink(os.path.basename(manifest_name), manifest_link) | 272 | os.remove(manifest_link) |
273 | os.symlink(os.path.basename(manifest_name), manifest_link) | ||
273 | } | 274 | } |
274 | 275 | ||
275 | # Can be used to create /etc/timestamp during image construction to give a reasonably | 276 | # Can be used to create /etc/timestamp during image construction to give a reasonably |
@@ -304,7 +305,7 @@ rootfs_trim_schemas () { | |||
304 | } | 305 | } |
305 | 306 | ||
306 | rootfs_check_host_user_contaminated () { | 307 | rootfs_check_host_user_contaminated () { |
307 | contaminated="${WORKDIR}/host-user-contaminated.txt" | 308 | contaminated="${S}/host-user-contaminated.txt" |
308 | HOST_USER_UID="$(PSEUDO_UNLOAD=1 id -u)" | 309 | HOST_USER_UID="$(PSEUDO_UNLOAD=1 id -u)" |
309 | HOST_USER_GID="$(PSEUDO_UNLOAD=1 id -g)" | 310 | HOST_USER_GID="$(PSEUDO_UNLOAD=1 id -g)" |
310 | 311 | ||
@@ -339,9 +340,10 @@ python write_image_test_data() { | |||
339 | 340 | ||
340 | if os.path.exists(testdata_name) and link_name: | 341 | if os.path.exists(testdata_name) and link_name: |
341 | testdata_link = os.path.join(deploy_dir, "%s.testdata.json" % link_name) | 342 | testdata_link = os.path.join(deploy_dir, "%s.testdata.json" % link_name) |
342 | if os.path.lexists(testdata_link): | 343 | if testdata_link != testdata_name: |
343 | os.remove(testdata_link) | 344 | if os.path.lexists(testdata_link): |
344 | os.symlink(os.path.basename(testdata_name), testdata_link) | 345 | os.remove(testdata_link) |
346 | os.symlink(os.path.basename(testdata_name), testdata_link) | ||
345 | } | 347 | } |
346 | write_image_test_data[vardepsexclude] += "TOPDIR" | 348 | write_image_test_data[vardepsexclude] += "TOPDIR" |
347 | 349 | ||
diff --git a/meta/classes/rootfsdebugfiles.bbclass b/meta/classes/rootfsdebugfiles.bbclass index e2ba4e3647..85c7ec7434 100644 --- a/meta/classes/rootfsdebugfiles.bbclass +++ b/meta/classes/rootfsdebugfiles.bbclass | |||
@@ -28,7 +28,7 @@ | |||
28 | ROOTFS_DEBUG_FILES ?= "" | 28 | ROOTFS_DEBUG_FILES ?= "" |
29 | ROOTFS_DEBUG_FILES[doc] = "Lists additional files or directories to be installed with 'cp -a' in the format 'source1 target1;source2 target2;...'" | 29 | ROOTFS_DEBUG_FILES[doc] = "Lists additional files or directories to be installed with 'cp -a' in the format 'source1 target1;source2 target2;...'" |
30 | 30 | ||
31 | ROOTFS_POSTPROCESS_COMMAND += "rootfs_debug_files ;" | 31 | ROOTFS_POSTPROCESS_COMMAND += "rootfs_debug_files;" |
32 | rootfs_debug_files () { | 32 | rootfs_debug_files () { |
33 | #!/bin/sh -e | 33 | #!/bin/sh -e |
34 | echo "${ROOTFS_DEBUG_FILES}" | sed -e 's/;/\n/g' | while read source target mode; do | 34 | echo "${ROOTFS_DEBUG_FILES}" | sed -e 's/;/\n/g' | while read source target mode; do |
diff --git a/meta/classes/sanity.bbclass b/meta/classes/sanity.bbclass index 866d066288..33e5e5952f 100644 --- a/meta/classes/sanity.bbclass +++ b/meta/classes/sanity.bbclass | |||
@@ -392,9 +392,12 @@ def check_connectivity(d): | |||
392 | msg = data.getVar('CONNECTIVITY_CHECK_MSG') or "" | 392 | msg = data.getVar('CONNECTIVITY_CHECK_MSG') or "" |
393 | if len(msg) == 0: | 393 | if len(msg) == 0: |
394 | msg = "%s.\n" % err | 394 | msg = "%s.\n" % err |
395 | msg += " Please ensure your host's network is configured correctly,\n" | 395 | msg += " Please ensure your host's network is configured correctly.\n" |
396 | msg += " or set BB_NO_NETWORK = \"1\" to disable network access if\n" | 396 | msg += " If your ISP or network is blocking the above URL,\n" |
397 | msg += " all required sources are on local disk.\n" | 397 | msg += " try with another domain name, for example by setting:\n" |
398 | msg += " CONNECTIVITY_CHECK_URIS = \"https://www.example.com/\"" | ||
399 | msg += " You could also set BB_NO_NETWORK = \"1\" to disable network\n" | ||
400 | msg += " access if all required sources are on local disk.\n" | ||
398 | retval = msg | 401 | retval = msg |
399 | 402 | ||
400 | return retval | 403 | return retval |
@@ -558,6 +561,14 @@ def check_tar_version(sanity_data): | |||
558 | version = result.split()[3] | 561 | version = result.split()[3] |
559 | if LooseVersion(version) < LooseVersion("1.28"): | 562 | if LooseVersion(version) < LooseVersion("1.28"): |
560 | return "Your version of tar is older than 1.28 and does not have the support needed to enable reproducible builds. Please install a newer version of tar (you could use the project's buildtools-tarball from our last release or use scripts/install-buildtools).\n" | 563 | return "Your version of tar is older than 1.28 and does not have the support needed to enable reproducible builds. Please install a newer version of tar (you could use the project's buildtools-tarball from our last release or use scripts/install-buildtools).\n" |
564 | |||
565 | try: | ||
566 | result = subprocess.check_output(["tar", "--help"], stderr=subprocess.STDOUT).decode('utf-8') | ||
567 | if "--xattrs" not in result: | ||
568 | return "Your tar doesn't support --xattrs, please use GNU tar.\n" | ||
569 | except subprocess.CalledProcessError as e: | ||
570 | return "Unable to execute tar --help, exit code %d\n%s\n" % (e.returncode, e.output) | ||
571 | |||
561 | return None | 572 | return None |
562 | 573 | ||
563 | # We use git parameters and functionality only found in 1.7.8 or later | 574 | # We use git parameters and functionality only found in 1.7.8 or later |
@@ -882,13 +893,18 @@ def check_sanity_everybuild(status, d): | |||
882 | except: | 893 | except: |
883 | pass | 894 | pass |
884 | 895 | ||
885 | oeroot = d.getVar('COREBASE') | 896 | for checkdir in ['COREBASE', 'TMPDIR']: |
886 | if oeroot.find('+') != -1: | 897 | val = d.getVar(checkdir) |
887 | status.addresult("Error, you have an invalid character (+) in your COREBASE directory path. Please move the installation to a directory which doesn't include any + characters.") | 898 | if val.find('..') != -1: |
888 | if oeroot.find('@') != -1: | 899 | status.addresult("Error, you have '..' in your %s directory path. Please ensure the variable contains an absolute path as this can break some recipe builds in obtuse ways." % checkdir) |
889 | status.addresult("Error, you have an invalid character (@) in your COREBASE directory path. Please move the installation to a directory which doesn't include any @ characters.") | 900 | if val.find('+') != -1: |
890 | if oeroot.find(' ') != -1: | 901 | status.addresult("Error, you have an invalid character (+) in your %s directory path. Please move the installation to a directory which doesn't include any + characters." % checkdir) |
891 | status.addresult("Error, you have a space in your COREBASE directory path. Please move the installation to a directory which doesn't include a space since autotools doesn't support this.") | 902 | if val.find('@') != -1: |
903 | status.addresult("Error, you have an invalid character (@) in your %s directory path. Please move the installation to a directory which doesn't include any @ characters." % checkdir) | ||
904 | if val.find(' ') != -1: | ||
905 | status.addresult("Error, you have a space in your %s directory path. Please move the installation to a directory which doesn't include a space since autotools doesn't support this." % checkdir) | ||
906 | if val.find('%') != -1: | ||
907 | status.addresult("Error, you have an invalid character (%) in your %s directory path which causes problems with python string formatting. Please move the installation to a directory which doesn't include any % characters." % checkdir) | ||
892 | 908 | ||
893 | # Check the format of MIRRORS, PREMIRRORS and SSTATE_MIRRORS | 909 | # Check the format of MIRRORS, PREMIRRORS and SSTATE_MIRRORS |
894 | import re | 910 | import re |
diff --git a/meta/classes/sstate.bbclass b/meta/classes/sstate.bbclass index a8e169a10b..1058778980 100644 --- a/meta/classes/sstate.bbclass +++ b/meta/classes/sstate.bbclass | |||
@@ -20,7 +20,7 @@ def generate_sstatefn(spec, hash, taskname, siginfo, d): | |||
20 | components = spec.split(":") | 20 | components = spec.split(":") |
21 | # Fields 0,5,6 are mandatory, 1 is most useful, 2,3,4 are just for information | 21 | # Fields 0,5,6 are mandatory, 1 is most useful, 2,3,4 are just for information |
22 | # 7 is for the separators | 22 | # 7 is for the separators |
23 | avail = (254 - len(hash + "_" + taskname + extension) - len(components[0]) - len(components[1]) - len(components[5]) - len(components[6]) - 7) // 3 | 23 | avail = (limit - len(hash + "_" + taskname + extension) - len(components[0]) - len(components[1]) - len(components[5]) - len(components[6]) - 7) // 3 |
24 | components[2] = components[2][:avail] | 24 | components[2] = components[2][:avail] |
25 | components[3] = components[3][:avail] | 25 | components[3] = components[3][:avail] |
26 | components[4] = components[4][:avail] | 26 | components[4] = components[4][:avail] |
@@ -123,8 +123,6 @@ SSTATE_HASHEQUIV_REPORT_TASKDATA[doc] = "Report additional useful data to the \ | |||
123 | python () { | 123 | python () { |
124 | if bb.data.inherits_class('native', d): | 124 | if bb.data.inherits_class('native', d): |
125 | d.setVar('SSTATE_PKGARCH', d.getVar('BUILD_ARCH', False)) | 125 | d.setVar('SSTATE_PKGARCH', d.getVar('BUILD_ARCH', False)) |
126 | if d.getVar("PN") == "pseudo-native": | ||
127 | d.appendVar('SSTATE_PKGARCH', '_${ORIGNATIVELSBSTRING}') | ||
128 | elif bb.data.inherits_class('crosssdk', d): | 126 | elif bb.data.inherits_class('crosssdk', d): |
129 | d.setVar('SSTATE_PKGARCH', d.expand("${BUILD_ARCH}_${SDK_ARCH}_${SDK_OS}")) | 127 | d.setVar('SSTATE_PKGARCH', d.expand("${BUILD_ARCH}_${SDK_ARCH}_${SDK_OS}")) |
130 | elif bb.data.inherits_class('cross', d): | 128 | elif bb.data.inherits_class('cross', d): |
@@ -319,6 +317,8 @@ def sstate_install(ss, d): | |||
319 | if os.path.exists(i): | 317 | if os.path.exists(i): |
320 | with open(i, "r") as f: | 318 | with open(i, "r") as f: |
321 | manifests = f.readlines() | 319 | manifests = f.readlines() |
320 | # We append new entries, we don't remove older entries which may have the same | ||
321 | # manifest name but different versions from stamp/workdir. See below. | ||
322 | if filedata not in manifests: | 322 | if filedata not in manifests: |
323 | with open(i, "a+") as f: | 323 | with open(i, "a+") as f: |
324 | f.write(filedata) | 324 | f.write(filedata) |
@@ -481,7 +481,7 @@ def sstate_clean_cachefiles(d): | |||
481 | ss = sstate_state_fromvars(ld, task) | 481 | ss = sstate_state_fromvars(ld, task) |
482 | sstate_clean_cachefile(ss, ld) | 482 | sstate_clean_cachefile(ss, ld) |
483 | 483 | ||
484 | def sstate_clean_manifest(manifest, d, prefix=None): | 484 | def sstate_clean_manifest(manifest, d, canrace=False, prefix=None): |
485 | import oe.path | 485 | import oe.path |
486 | 486 | ||
487 | mfile = open(manifest) | 487 | mfile = open(manifest) |
@@ -499,7 +499,9 @@ def sstate_clean_manifest(manifest, d, prefix=None): | |||
499 | if entry.endswith("/"): | 499 | if entry.endswith("/"): |
500 | if os.path.islink(entry[:-1]): | 500 | if os.path.islink(entry[:-1]): |
501 | os.remove(entry[:-1]) | 501 | os.remove(entry[:-1]) |
502 | elif os.path.exists(entry) and len(os.listdir(entry)) == 0: | 502 | elif os.path.exists(entry) and len(os.listdir(entry)) == 0 and not canrace: |
503 | # Removing directories whilst builds are in progress exposes a race. Only | ||
504 | # do it in contexts where it is safe to do so. | ||
503 | os.rmdir(entry[:-1]) | 505 | os.rmdir(entry[:-1]) |
504 | else: | 506 | else: |
505 | os.remove(entry) | 507 | os.remove(entry) |
@@ -537,7 +539,7 @@ def sstate_clean(ss, d): | |||
537 | for lock in ss['lockfiles']: | 539 | for lock in ss['lockfiles']: |
538 | locks.append(bb.utils.lockfile(lock)) | 540 | locks.append(bb.utils.lockfile(lock)) |
539 | 541 | ||
540 | sstate_clean_manifest(manifest, d) | 542 | sstate_clean_manifest(manifest, d, canrace=True) |
541 | 543 | ||
542 | for lock in locks: | 544 | for lock in locks: |
543 | bb.utils.unlockfile(lock) | 545 | bb.utils.unlockfile(lock) |
@@ -638,10 +640,21 @@ python sstate_hardcode_path () { | |||
638 | 640 | ||
639 | def sstate_package(ss, d): | 641 | def sstate_package(ss, d): |
640 | import oe.path | 642 | import oe.path |
643 | import time | ||
641 | 644 | ||
642 | tmpdir = d.getVar('TMPDIR') | 645 | tmpdir = d.getVar('TMPDIR') |
643 | 646 | ||
647 | fixtime = False | ||
648 | if ss['task'] == "package": | ||
649 | fixtime = True | ||
650 | |||
651 | def fixtimestamp(root, path): | ||
652 | f = os.path.join(root, path) | ||
653 | if os.lstat(f).st_mtime > sde: | ||
654 | os.utime(f, (sde, sde), follow_symlinks=False) | ||
655 | |||
644 | sstatebuild = d.expand("${WORKDIR}/sstate-build-%s/" % ss['task']) | 656 | sstatebuild = d.expand("${WORKDIR}/sstate-build-%s/" % ss['task']) |
657 | sde = int(d.getVar("SOURCE_DATE_EPOCH") or time.time()) | ||
645 | d.setVar("SSTATE_CURRTASK", ss['task']) | 658 | d.setVar("SSTATE_CURRTASK", ss['task']) |
646 | bb.utils.remove(sstatebuild, recurse=True) | 659 | bb.utils.remove(sstatebuild, recurse=True) |
647 | bb.utils.mkdirhier(sstatebuild) | 660 | bb.utils.mkdirhier(sstatebuild) |
@@ -654,6 +667,8 @@ def sstate_package(ss, d): | |||
654 | # to sstate tasks but there aren't many of these so better just avoid them entirely. | 667 | # to sstate tasks but there aren't many of these so better just avoid them entirely. |
655 | for walkroot, dirs, files in os.walk(state[1]): | 668 | for walkroot, dirs, files in os.walk(state[1]): |
656 | for file in files + dirs: | 669 | for file in files + dirs: |
670 | if fixtime: | ||
671 | fixtimestamp(walkroot, file) | ||
657 | srcpath = os.path.join(walkroot, file) | 672 | srcpath = os.path.join(walkroot, file) |
658 | if not os.path.islink(srcpath): | 673 | if not os.path.islink(srcpath): |
659 | continue | 674 | continue |
@@ -675,6 +690,11 @@ def sstate_package(ss, d): | |||
675 | bb.utils.mkdirhier(plain) | 690 | bb.utils.mkdirhier(plain) |
676 | bb.utils.mkdirhier(pdir) | 691 | bb.utils.mkdirhier(pdir) |
677 | os.rename(plain, pdir) | 692 | os.rename(plain, pdir) |
693 | if fixtime: | ||
694 | fixtimestamp(pdir, "") | ||
695 | for walkroot, dirs, files in os.walk(pdir): | ||
696 | for file in files + dirs: | ||
697 | fixtimestamp(walkroot, file) | ||
678 | 698 | ||
679 | d.setVar('SSTATE_BUILDDIR', sstatebuild) | 699 | d.setVar('SSTATE_BUILDDIR', sstatebuild) |
680 | d.setVar('SSTATE_INSTDIR', sstatebuild) | 700 | d.setVar('SSTATE_INSTDIR', sstatebuild) |
@@ -701,9 +721,16 @@ def sstate_package(ss, d): | |||
701 | os.utime(siginfo, None) | 721 | os.utime(siginfo, None) |
702 | except PermissionError: | 722 | except PermissionError: |
703 | pass | 723 | pass |
724 | except OSError as e: | ||
725 | # Handle read-only file systems gracefully | ||
726 | import errno | ||
727 | if e.errno != errno.EROFS: | ||
728 | raise e | ||
704 | 729 | ||
705 | return | 730 | return |
706 | 731 | ||
732 | sstate_package[vardepsexclude] += "SSTATE_SIG_KEY" | ||
733 | |||
707 | def pstaging_fetch(sstatefetch, d): | 734 | def pstaging_fetch(sstatefetch, d): |
708 | import bb.fetch2 | 735 | import bb.fetch2 |
709 | 736 | ||
@@ -787,7 +814,7 @@ sstate_task_postfunc[dirs] = "${WORKDIR}" | |||
787 | sstate_create_package () { | 814 | sstate_create_package () { |
788 | # Exit early if it already exists | 815 | # Exit early if it already exists |
789 | if [ -e ${SSTATE_PKG} ]; then | 816 | if [ -e ${SSTATE_PKG} ]; then |
790 | [ ! -w ${SSTATE_PKG} ] || touch ${SSTATE_PKG} | 817 | touch ${SSTATE_PKG} 2>/dev/null || true |
791 | return | 818 | return |
792 | fi | 819 | fi |
793 | 820 | ||
@@ -814,14 +841,18 @@ sstate_create_package () { | |||
814 | fi | 841 | fi |
815 | chmod 0664 $TFILE | 842 | chmod 0664 $TFILE |
816 | # Skip if it was already created by some other process | 843 | # Skip if it was already created by some other process |
817 | if [ ! -e ${SSTATE_PKG} ]; then | 844 | if [ -h ${SSTATE_PKG} ] && [ ! -e ${SSTATE_PKG} ]; then |
845 | # There is a symbolic link, but it links to nothing. | ||
846 | # Forcefully replace it with the new file. | ||
847 | ln -f $TFILE ${SSTATE_PKG} || true | ||
848 | elif [ ! -e ${SSTATE_PKG} ]; then | ||
818 | # Move into place using ln to attempt an atomic op. | 849 | # Move into place using ln to attempt an atomic op. |
819 | # Abort if it already exists | 850 | # Abort if it already exists |
820 | ln $TFILE ${SSTATE_PKG} && rm $TFILE | 851 | ln $TFILE ${SSTATE_PKG} || true |
821 | else | 852 | else |
822 | rm $TFILE | 853 | touch ${SSTATE_PKG} 2>/dev/null || true |
823 | fi | 854 | fi |
824 | [ ! -w ${SSTATE_PKG} ] || touch ${SSTATE_PKG} | 855 | rm $TFILE |
825 | } | 856 | } |
826 | 857 | ||
827 | python sstate_sign_package () { | 858 | python sstate_sign_package () { |
@@ -850,12 +881,12 @@ python sstate_report_unihash() { | |||
850 | # | 881 | # |
851 | sstate_unpack_package () { | 882 | sstate_unpack_package () { |
852 | tar -xvzf ${SSTATE_PKG} | 883 | tar -xvzf ${SSTATE_PKG} |
853 | # update .siginfo atime on local/NFS mirror | 884 | # update .siginfo atime on local/NFS mirror if it is a symbolic link |
854 | [ -O ${SSTATE_PKG}.siginfo ] && [ -w ${SSTATE_PKG}.siginfo ] && [ -h ${SSTATE_PKG}.siginfo ] && touch -a ${SSTATE_PKG}.siginfo | 885 | [ ! -h ${SSTATE_PKG}.siginfo ] || [ ! -e ${SSTATE_PKG}.siginfo ] || touch -a ${SSTATE_PKG}.siginfo 2>/dev/null || true |
855 | # Use "! -w ||" to return true for read only files | 886 | # update each symbolic link instead of any referenced file |
856 | [ ! -w ${SSTATE_PKG} ] || touch --no-dereference ${SSTATE_PKG} | 887 | touch --no-dereference ${SSTATE_PKG} 2>/dev/null || true |
857 | [ ! -w ${SSTATE_PKG}.sig ] || [ ! -e ${SSTATE_PKG}.sig ] || touch --no-dereference ${SSTATE_PKG}.sig | 888 | [ ! -e ${SSTATE_PKG}.sig ] || touch --no-dereference ${SSTATE_PKG}.sig 2>/dev/null || true |
858 | [ ! -w ${SSTATE_PKG}.siginfo ] || [ ! -e ${SSTATE_PKG}.siginfo ] || touch --no-dereference ${SSTATE_PKG}.siginfo | 889 | [ ! -e ${SSTATE_PKG}.siginfo ] || touch --no-dereference ${SSTATE_PKG}.siginfo 2>/dev/null || true |
859 | } | 890 | } |
860 | 891 | ||
861 | BB_HASHCHECK_FUNCTION = "sstate_checkhashes" | 892 | BB_HASHCHECK_FUNCTION = "sstate_checkhashes" |
@@ -930,7 +961,7 @@ def sstate_checkhashes(sq_data, d, siginfo=False, currentcount=0, summary=True, | |||
930 | 961 | ||
931 | localdata2 = bb.data.createCopy(localdata) | 962 | localdata2 = bb.data.createCopy(localdata) |
932 | srcuri = "file://" + sstatefile | 963 | srcuri = "file://" + sstatefile |
933 | localdata.setVar('SRC_URI', srcuri) | 964 | localdata2.setVar('SRC_URI', srcuri) |
934 | bb.debug(2, "SState: Attempting to fetch %s" % srcuri) | 965 | bb.debug(2, "SState: Attempting to fetch %s" % srcuri) |
935 | 966 | ||
936 | try: | 967 | try: |
@@ -941,10 +972,11 @@ def sstate_checkhashes(sq_data, d, siginfo=False, currentcount=0, summary=True, | |||
941 | found.add(tid) | 972 | found.add(tid) |
942 | if tid in missed: | 973 | if tid in missed: |
943 | missed.remove(tid) | 974 | missed.remove(tid) |
944 | except: | 975 | except bb.fetch2.FetchError as e: |
945 | missed.add(tid) | 976 | missed.add(tid) |
946 | bb.debug(2, "SState: Unsuccessful fetch test for %s" % srcuri) | 977 | bb.debug(2, "SState: Unsuccessful fetch test for %s (%s)" % (srcuri, e)) |
947 | pass | 978 | except Exception as e: |
979 | bb.error("SState: cannot test %s: %s" % (srcuri, e)) | ||
948 | if len(tasklist) >= min_tasks: | 980 | if len(tasklist) >= min_tasks: |
949 | bb.event.fire(bb.event.ProcessProgress(msg, len(tasklist) - thread_worker.tasks.qsize()), d) | 981 | bb.event.fire(bb.event.ProcessProgress(msg, len(tasklist) - thread_worker.tasks.qsize()), d) |
950 | 982 | ||
@@ -1006,6 +1038,7 @@ def sstate_checkhashes(sq_data, d, siginfo=False, currentcount=0, summary=True, | |||
1006 | bb.parse.siggen.checkhashes(sq_data, missed, found, d) | 1038 | bb.parse.siggen.checkhashes(sq_data, missed, found, d) |
1007 | 1039 | ||
1008 | return found | 1040 | return found |
1041 | setscene_depvalid[vardepsexclude] = "SSTATE_EXCLUDEDEPS_SYSROOT" | ||
1009 | 1042 | ||
1010 | BB_SETSCENE_DEPVALID = "setscene_depvalid" | 1043 | BB_SETSCENE_DEPVALID = "setscene_depvalid" |
1011 | 1044 | ||
@@ -1031,6 +1064,10 @@ def setscene_depvalid(task, taskdependees, notneeded, d, log=None): | |||
1031 | if taskdependees[task][1] == "do_populate_lic": | 1064 | if taskdependees[task][1] == "do_populate_lic": |
1032 | return True | 1065 | return True |
1033 | 1066 | ||
1067 | # We only need to trigger deploy_source_date_epoch through direct dependencies | ||
1068 | if taskdependees[task][1] == "do_deploy_source_date_epoch": | ||
1069 | return True | ||
1070 | |||
1034 | # stash_locale and gcc_stash_builddir are never needed as a dependency for built objects | 1071 | # stash_locale and gcc_stash_builddir are never needed as a dependency for built objects |
1035 | if taskdependees[task][1] == "do_stash_locale" or taskdependees[task][1] == "do_gcc_stash_builddir": | 1072 | if taskdependees[task][1] == "do_stash_locale" or taskdependees[task][1] == "do_gcc_stash_builddir": |
1036 | return True | 1073 | return True |
@@ -1137,6 +1174,11 @@ python sstate_eventhandler() { | |||
1137 | os.utime(siginfo, None) | 1174 | os.utime(siginfo, None) |
1138 | except PermissionError: | 1175 | except PermissionError: |
1139 | pass | 1176 | pass |
1177 | except OSError as e: | ||
1178 | # Handle read-only file systems gracefully | ||
1179 | import errno | ||
1180 | if e.errno != errno.EROFS: | ||
1181 | raise e | ||
1140 | 1182 | ||
1141 | } | 1183 | } |
1142 | 1184 | ||
@@ -1175,11 +1217,21 @@ python sstate_eventhandler2() { | |||
1175 | i = d.expand("${SSTATE_MANIFESTS}/index-" + a) | 1217 | i = d.expand("${SSTATE_MANIFESTS}/index-" + a) |
1176 | if not os.path.exists(i): | 1218 | if not os.path.exists(i): |
1177 | continue | 1219 | continue |
1220 | manseen = set() | ||
1221 | ignore = [] | ||
1178 | with open(i, "r") as f: | 1222 | with open(i, "r") as f: |
1179 | lines = f.readlines() | 1223 | lines = f.readlines() |
1180 | for l in lines: | 1224 | for l in reversed(lines): |
1181 | try: | 1225 | try: |
1182 | (stamp, manifest, workdir) = l.split() | 1226 | (stamp, manifest, workdir) = l.split() |
1227 | # The index may have multiple entries for the same manifest as the code above only appends | ||
1228 | # new entries and there may be an entry with matching manifest but differing version in stamp/workdir. | ||
1229 | # The last entry in the list is the valid one, any earlier entries with matching manifests | ||
1230 | # should be ignored. | ||
1231 | if manifest in manseen: | ||
1232 | ignore.append(l) | ||
1233 | continue | ||
1234 | manseen.add(manifest) | ||
1183 | if stamp not in stamps and stamp not in preservestamps and stamp in machineindex: | 1235 | if stamp not in stamps and stamp not in preservestamps and stamp in machineindex: |
1184 | toremove.append(l) | 1236 | toremove.append(l) |
1185 | if stamp not in seen: | 1237 | if stamp not in seen: |
@@ -1210,6 +1262,8 @@ python sstate_eventhandler2() { | |||
1210 | 1262 | ||
1211 | with open(i, "w") as f: | 1263 | with open(i, "w") as f: |
1212 | for l in lines: | 1264 | for l in lines: |
1265 | if l in ignore: | ||
1266 | continue | ||
1213 | f.write(l) | 1267 | f.write(l) |
1214 | machineindex |= set(stamps) | 1268 | machineindex |= set(stamps) |
1215 | with open(mi, "w") as f: | 1269 | with open(mi, "w") as f: |
diff --git a/meta/classes/staging.bbclass b/meta/classes/staging.bbclass index 506ce0665e..21523c8f75 100644 --- a/meta/classes/staging.bbclass +++ b/meta/classes/staging.bbclass | |||
@@ -267,6 +267,10 @@ python extend_recipe_sysroot() { | |||
267 | pn = d.getVar("PN") | 267 | pn = d.getVar("PN") |
268 | stagingdir = d.getVar("STAGING_DIR") | 268 | stagingdir = d.getVar("STAGING_DIR") |
269 | sharedmanifests = d.getVar("COMPONENTS_DIR") + "/manifests" | 269 | sharedmanifests = d.getVar("COMPONENTS_DIR") + "/manifests" |
270 | # only needed by multilib cross-canadian since it redefines RECIPE_SYSROOT | ||
271 | manifestprefix = d.getVar("RECIPE_SYSROOT_MANIFEST_SUBDIR") | ||
272 | if manifestprefix: | ||
273 | sharedmanifests = sharedmanifests + "/" + manifestprefix | ||
270 | recipesysroot = d.getVar("RECIPE_SYSROOT") | 274 | recipesysroot = d.getVar("RECIPE_SYSROOT") |
271 | recipesysrootnative = d.getVar("RECIPE_SYSROOT_NATIVE") | 275 | recipesysrootnative = d.getVar("RECIPE_SYSROOT_NATIVE") |
272 | 276 | ||
@@ -408,7 +412,7 @@ python extend_recipe_sysroot() { | |||
408 | if os.path.islink(f) and not os.path.exists(f): | 412 | if os.path.islink(f) and not os.path.exists(f): |
409 | bb.note("%s no longer exists, removing from sysroot" % f) | 413 | bb.note("%s no longer exists, removing from sysroot" % f) |
410 | lnk = os.readlink(f.replace(".complete", "")) | 414 | lnk = os.readlink(f.replace(".complete", "")) |
411 | sstate_clean_manifest(depdir + "/" + lnk, d, workdir) | 415 | sstate_clean_manifest(depdir + "/" + lnk, d, canrace=True, prefix=workdir) |
412 | os.unlink(f) | 416 | os.unlink(f) |
413 | os.unlink(f.replace(".complete", "")) | 417 | os.unlink(f.replace(".complete", "")) |
414 | 418 | ||
@@ -453,7 +457,7 @@ python extend_recipe_sysroot() { | |||
453 | fl = depdir + "/" + l | 457 | fl = depdir + "/" + l |
454 | bb.note("Task %s no longer depends on %s, removing from sysroot" % (mytaskname, l)) | 458 | bb.note("Task %s no longer depends on %s, removing from sysroot" % (mytaskname, l)) |
455 | lnk = os.readlink(fl) | 459 | lnk = os.readlink(fl) |
456 | sstate_clean_manifest(depdir + "/" + lnk, d, workdir) | 460 | sstate_clean_manifest(depdir + "/" + lnk, d, canrace=True, prefix=workdir) |
457 | os.unlink(fl) | 461 | os.unlink(fl) |
458 | os.unlink(fl + ".complete") | 462 | os.unlink(fl + ".complete") |
459 | 463 | ||
@@ -474,7 +478,7 @@ python extend_recipe_sysroot() { | |||
474 | continue | 478 | continue |
475 | else: | 479 | else: |
476 | bb.note("%s exists in sysroot, but is stale (%s vs. %s), removing." % (c, lnk, c + "." + taskhash)) | 480 | bb.note("%s exists in sysroot, but is stale (%s vs. %s), removing." % (c, lnk, c + "." + taskhash)) |
477 | sstate_clean_manifest(depdir + "/" + lnk, d, workdir) | 481 | sstate_clean_manifest(depdir + "/" + lnk, d, canrace=True, prefix=workdir) |
478 | os.unlink(depdir + "/" + c) | 482 | os.unlink(depdir + "/" + c) |
479 | if os.path.lexists(depdir + "/" + c + ".complete"): | 483 | if os.path.lexists(depdir + "/" + c + ".complete"): |
480 | os.unlink(depdir + "/" + c + ".complete") | 484 | os.unlink(depdir + "/" + c + ".complete") |
diff --git a/meta/classes/testimage.bbclass b/meta/classes/testimage.bbclass index c709384b91..7c8b2b30a1 100644 --- a/meta/classes/testimage.bbclass +++ b/meta/classes/testimage.bbclass | |||
@@ -99,30 +99,9 @@ TESTIMAGE_DUMP_DIR ?= "${LOG_DIR}/runtime-hostdump/" | |||
99 | TESTIMAGE_UPDATE_VARS ?= "DL_DIR WORKDIR DEPLOY_DIR" | 99 | TESTIMAGE_UPDATE_VARS ?= "DL_DIR WORKDIR DEPLOY_DIR" |
100 | 100 | ||
101 | testimage_dump_target () { | 101 | testimage_dump_target () { |
102 | top -bn1 | ||
103 | ps | ||
104 | free | ||
105 | df | ||
106 | # The next command will export the default gateway IP | ||
107 | export DEFAULT_GATEWAY=$(ip route | awk '/default/ { print $3}') | ||
108 | ping -c3 $DEFAULT_GATEWAY | ||
109 | dmesg | ||
110 | netstat -an | ||
111 | ip address | ||
112 | # Next command will dump logs from /var/log/ | ||
113 | find /var/log/ -type f 2>/dev/null -exec echo "====================" \; -exec echo {} \; -exec echo "====================" \; -exec cat {} \; -exec echo "" \; | ||
114 | } | 102 | } |
115 | 103 | ||
116 | testimage_dump_host () { | 104 | testimage_dump_host () { |
117 | top -bn1 | ||
118 | iostat -x -z -N -d -p ALL 20 2 | ||
119 | ps -ef | ||
120 | free | ||
121 | df | ||
122 | memstat | ||
123 | dmesg | ||
124 | ip -s link | ||
125 | netstat -an | ||
126 | } | 105 | } |
127 | 106 | ||
128 | python do_testimage() { | 107 | python do_testimage() { |
@@ -193,6 +172,7 @@ def testimage_main(d): | |||
193 | import json | 172 | import json |
194 | import signal | 173 | import signal |
195 | import logging | 174 | import logging |
175 | import shutil | ||
196 | 176 | ||
197 | from bb.utils import export_proxies | 177 | from bb.utils import export_proxies |
198 | from oeqa.core.utils.misc import updateTestData | 178 | from oeqa.core.utils.misc import updateTestData |
@@ -228,9 +208,10 @@ def testimage_main(d): | |||
228 | 208 | ||
229 | tdname = "%s.testdata.json" % image_name | 209 | tdname = "%s.testdata.json" % image_name |
230 | try: | 210 | try: |
231 | td = json.load(open(tdname, "r")) | 211 | with open(tdname, "r") as f: |
232 | except (FileNotFoundError) as err: | 212 | td = json.load(f) |
233 | bb.fatal('File %s Not Found. Have you built the image with INHERIT+="testimage" in the conf/local.conf?' % tdname) | 213 | except FileNotFoundError as err: |
214 | bb.fatal('File %s not found (%s).\nHave you built the image with INHERIT += "testimage" in the conf/local.conf?' % (tdname, err)) | ||
234 | 215 | ||
235 | # Some variables need to be updates (mostly paths) with the | 216 | # Some variables need to be updates (mostly paths) with the |
236 | # ones of the current environment because some tests require them. | 217 | # ones of the current environment because some tests require them. |
@@ -397,10 +378,17 @@ def testimage_main(d): | |||
397 | get_testimage_result_id(configuration), | 378 | get_testimage_result_id(configuration), |
398 | dump_streams=d.getVar('TESTREPORT_FULLLOGS')) | 379 | dump_streams=d.getVar('TESTREPORT_FULLLOGS')) |
399 | results.logSummary(pn) | 380 | results.logSummary(pn) |
381 | |||
382 | # Copy additional logs to tmp/log/oeqa so it's easier to find them | ||
383 | targetdir = os.path.join(get_testimage_json_result_dir(d), d.getVar("PN")) | ||
384 | os.makedirs(targetdir, exist_ok=True) | ||
385 | os.symlink(bootlog, os.path.join(targetdir, os.path.basename(bootlog))) | ||
386 | os.symlink(d.getVar("BB_LOGFILE"), os.path.join(targetdir, os.path.basename(d.getVar("BB_LOGFILE") + "." + d.getVar('DATETIME')))) | ||
387 | |||
400 | if not results or not complete: | 388 | if not results or not complete: |
401 | bb.fatal('%s - FAILED - tests were interrupted during execution' % pn, forcelog=True) | 389 | bb.fatal('%s - FAILED - tests were interrupted during execution, check the logs in %s' % (pn, d.getVar("LOG_DIR")), forcelog=True) |
402 | if not results.wasSuccessful(): | 390 | if not results.wasSuccessful(): |
403 | bb.fatal('%s - FAILED - check the task log and the ssh log' % pn, forcelog=True) | 391 | bb.fatal('%s - FAILED - also check the logs in %s' % (pn, d.getVar("LOG_DIR")), forcelog=True) |
404 | 392 | ||
405 | def get_runtime_paths(d): | 393 | def get_runtime_paths(d): |
406 | """ | 394 | """ |
diff --git a/meta/classes/toolchain-scripts.bbclass b/meta/classes/toolchain-scripts.bbclass index db1d3215ef..21762b803b 100644 --- a/meta/classes/toolchain-scripts.bbclass +++ b/meta/classes/toolchain-scripts.bbclass | |||
@@ -29,7 +29,7 @@ toolchain_create_sdk_env_script () { | |||
29 | echo '# http://tldp.org/HOWTO/Program-Library-HOWTO/shared-libraries.html#AEN80' >> $script | 29 | echo '# http://tldp.org/HOWTO/Program-Library-HOWTO/shared-libraries.html#AEN80' >> $script |
30 | echo '# http://xahlee.info/UnixResource_dir/_/ldpath.html' >> $script | 30 | echo '# http://xahlee.info/UnixResource_dir/_/ldpath.html' >> $script |
31 | echo '# Only disable this check if you are absolutely know what you are doing!' >> $script | 31 | echo '# Only disable this check if you are absolutely know what you are doing!' >> $script |
32 | echo 'if [ ! -z "$LD_LIBRARY_PATH" ]; then' >> $script | 32 | echo 'if [ ! -z "${LD_LIBRARY_PATH:-}" ]; then' >> $script |
33 | echo " echo \"Your environment is misconfigured, you probably need to 'unset LD_LIBRARY_PATH'\"" >> $script | 33 | echo " echo \"Your environment is misconfigured, you probably need to 'unset LD_LIBRARY_PATH'\"" >> $script |
34 | echo " echo \"but please check why this was set in the first place and that it's safe to unset.\"" >> $script | 34 | echo " echo \"but please check why this was set in the first place and that it's safe to unset.\"" >> $script |
35 | echo ' echo "The SDK will not operate correctly in most cases when LD_LIBRARY_PATH is set."' >> $script | 35 | echo ' echo "The SDK will not operate correctly in most cases when LD_LIBRARY_PATH is set."' >> $script |
@@ -44,7 +44,7 @@ toolchain_create_sdk_env_script () { | |||
44 | for i in ${CANADIANEXTRAOS}; do | 44 | for i in ${CANADIANEXTRAOS}; do |
45 | EXTRAPATH="$EXTRAPATH:$sdkpathnative$bindir/${TARGET_ARCH}${TARGET_VENDOR}-$i" | 45 | EXTRAPATH="$EXTRAPATH:$sdkpathnative$bindir/${TARGET_ARCH}${TARGET_VENDOR}-$i" |
46 | done | 46 | done |
47 | echo "export PATH=$sdkpathnative$bindir:$sdkpathnative$sbindir:$sdkpathnative$base_bindir:$sdkpathnative$base_sbindir:$sdkpathnative$bindir/../${HOST_SYS}/bin:$sdkpathnative$bindir/${TARGET_SYS}"$EXTRAPATH':$PATH' >> $script | 47 | echo "export PATH=$sdkpathnative$bindir:$sdkpathnative$sbindir:$sdkpathnative$base_bindir:$sdkpathnative$base_sbindir:$sdkpathnative$bindir/../${HOST_SYS}/bin:$sdkpathnative$bindir/${TARGET_SYS}"$EXTRAPATH':"$PATH"' >> $script |
48 | echo 'export PKG_CONFIG_SYSROOT_DIR=$SDKTARGETSYSROOT' >> $script | 48 | echo 'export PKG_CONFIG_SYSROOT_DIR=$SDKTARGETSYSROOT' >> $script |
49 | echo 'export PKG_CONFIG_PATH=$SDKTARGETSYSROOT'"$libdir"'/pkgconfig:$SDKTARGETSYSROOT'"$prefix"'/share/pkgconfig' >> $script | 49 | echo 'export PKG_CONFIG_PATH=$SDKTARGETSYSROOT'"$libdir"'/pkgconfig:$SDKTARGETSYSROOT'"$prefix"'/share/pkgconfig' >> $script |
50 | echo 'export CONFIG_SITE=${SDKPATH}/site-config-'"${multimach_target_sys}" >> $script | 50 | echo 'export CONFIG_SITE=${SDKPATH}/site-config-'"${multimach_target_sys}" >> $script |
diff --git a/meta/classes/uninative.bbclass b/meta/classes/uninative.bbclass index 1e19917a97..4d4f53ad4d 100644 --- a/meta/classes/uninative.bbclass +++ b/meta/classes/uninative.bbclass | |||
@@ -2,7 +2,7 @@ UNINATIVE_LOADER ?= "${UNINATIVE_STAGING_DIR}-uninative/${BUILD_ARCH}-linux/lib/ | |||
2 | UNINATIVE_STAGING_DIR ?= "${STAGING_DIR}" | 2 | UNINATIVE_STAGING_DIR ?= "${STAGING_DIR}" |
3 | 3 | ||
4 | UNINATIVE_URL ?= "unset" | 4 | UNINATIVE_URL ?= "unset" |
5 | UNINATIVE_TARBALL ?= "${BUILD_ARCH}-nativesdk-libc.tar.xz" | 5 | UNINATIVE_TARBALL ?= "${BUILD_ARCH}-nativesdk-libc-${UNINATIVE_VERSION}.tar.xz" |
6 | # Example checksums | 6 | # Example checksums |
7 | #UNINATIVE_CHECKSUM[aarch64] = "dead" | 7 | #UNINATIVE_CHECKSUM[aarch64] = "dead" |
8 | #UNINATIVE_CHECKSUM[i686] = "dead" | 8 | #UNINATIVE_CHECKSUM[i686] = "dead" |
@@ -34,6 +34,8 @@ python uninative_event_fetchloader() { | |||
34 | with open(loaderchksum, "r") as f: | 34 | with open(loaderchksum, "r") as f: |
35 | readchksum = f.read().strip() | 35 | readchksum = f.read().strip() |
36 | if readchksum == chksum: | 36 | if readchksum == chksum: |
37 | if "uninative" not in d.getVar("SSTATEPOSTUNPACKFUNCS"): | ||
38 | enable_uninative(d) | ||
37 | return | 39 | return |
38 | 40 | ||
39 | import subprocess | 41 | import subprocess |
@@ -100,7 +102,7 @@ ${UNINATIVE_STAGING_DIR}-uninative/relocate_sdk.py \ | |||
100 | ${UNINATIVE_LOADER} \ | 102 | ${UNINATIVE_LOADER} \ |
101 | ${UNINATIVE_LOADER} \ | 103 | ${UNINATIVE_LOADER} \ |
102 | ${UNINATIVE_STAGING_DIR}-uninative/${BUILD_ARCH}-linux/${bindir_native}/patchelf-uninative \ | 104 | ${UNINATIVE_STAGING_DIR}-uninative/${BUILD_ARCH}-linux/${bindir_native}/patchelf-uninative \ |
103 | ${UNINATIVE_STAGING_DIR}-uninative/${BUILD_ARCH}-linux${base_libdir_native}/libc*.so" % chksum) | 105 | ${UNINATIVE_STAGING_DIR}-uninative/${BUILD_ARCH}-linux${base_libdir_native}/libc*.so*" % chksum) |
104 | subprocess.check_output(cmd, shell=True) | 106 | subprocess.check_output(cmd, shell=True) |
105 | 107 | ||
106 | with open(loaderchksum, "w") as f: | 108 | with open(loaderchksum, "w") as f: |
@@ -167,5 +169,7 @@ python uninative_changeinterp () { | |||
167 | if not elf.isDynamic(): | 169 | if not elf.isDynamic(): |
168 | continue | 170 | continue |
169 | 171 | ||
172 | os.chmod(f, s[stat.ST_MODE] | stat.S_IWUSR) | ||
170 | subprocess.check_output(("patchelf-uninative", "--set-interpreter", d.getVar("UNINATIVE_LOADER"), f), stderr=subprocess.STDOUT) | 173 | subprocess.check_output(("patchelf-uninative", "--set-interpreter", d.getVar("UNINATIVE_LOADER"), f), stderr=subprocess.STDOUT) |
174 | os.chmod(f, s[stat.ST_MODE]) | ||
171 | } | 175 | } |
diff --git a/meta/classes/useradd-staticids.bbclass b/meta/classes/useradd-staticids.bbclass index 3a1b5f1320..908b24969f 100644 --- a/meta/classes/useradd-staticids.bbclass +++ b/meta/classes/useradd-staticids.bbclass | |||
@@ -41,7 +41,7 @@ def update_useradd_static_config(d): | |||
41 | def handle_missing_id(id, type, pkg, files, var, value): | 41 | def handle_missing_id(id, type, pkg, files, var, value): |
42 | # For backwards compatibility we accept "1" in addition to "error" | 42 | # For backwards compatibility we accept "1" in addition to "error" |
43 | error_dynamic = d.getVar('USERADD_ERROR_DYNAMIC') | 43 | error_dynamic = d.getVar('USERADD_ERROR_DYNAMIC') |
44 | msg = "%s - %s: %sname %s does not have a static ID defined." % (d.getVar('PN'), pkg, type, id) | 44 | msg = 'Recipe %s, package %s: %sname "%s" does not have a static ID defined.' % (d.getVar('PN'), pkg, type, id) |
45 | if files: | 45 | if files: |
46 | msg += " Add %s to one of these files: %s" % (id, files) | 46 | msg += " Add %s to one of these files: %s" % (id, files) |
47 | else: | 47 | else: |
diff --git a/meta/classes/useradd.bbclass b/meta/classes/useradd.bbclass index e5f3ba24f9..0f0ed3446d 100644 --- a/meta/classes/useradd.bbclass +++ b/meta/classes/useradd.bbclass | |||
@@ -230,6 +230,10 @@ fakeroot python populate_packages_prepend () { | |||
230 | preinst += 'perform_useradd () {\n%s}\n' % d.getVar('perform_useradd') | 230 | preinst += 'perform_useradd () {\n%s}\n' % d.getVar('perform_useradd') |
231 | preinst += 'perform_groupmems () {\n%s}\n' % d.getVar('perform_groupmems') | 231 | preinst += 'perform_groupmems () {\n%s}\n' % d.getVar('perform_groupmems') |
232 | preinst += d.getVar('useradd_preinst') | 232 | preinst += d.getVar('useradd_preinst') |
233 | # Expand out the *_PARAM variables to the package specific versions | ||
234 | for rep in ["GROUPADD_PARAM", "USERADD_PARAM", "GROUPMEMS_PARAM"]: | ||
235 | val = d.getVar(rep + "_" + pkg) or "" | ||
236 | preinst = preinst.replace("${" + rep + "}", val) | ||
233 | d.setVar('pkg_preinst_%s' % pkg, preinst) | 237 | d.setVar('pkg_preinst_%s' % pkg, preinst) |
234 | 238 | ||
235 | # RDEPENDS setup | 239 | # RDEPENDS setup |
diff --git a/meta/classes/utils.bbclass b/meta/classes/utils.bbclass index cd3d05709e..99f68f7505 100644 --- a/meta/classes/utils.bbclass +++ b/meta/classes/utils.bbclass | |||
@@ -233,7 +233,7 @@ create_cmdline_wrapper () { | |||
233 | #!/bin/bash | 233 | #!/bin/bash |
234 | realpath=\`readlink -fn \$0\` | 234 | realpath=\`readlink -fn \$0\` |
235 | realdir=\`dirname \$realpath\` | 235 | realdir=\`dirname \$realpath\` |
236 | exec -a \`dirname \$realpath\`/$cmdname \`dirname \$realpath\`/$cmdname.real $cmdoptions "\$@" | 236 | exec -a \$realdir/$cmdname \$realdir/$cmdname.real $cmdoptions "\$@" |
237 | END | 237 | END |
238 | chmod +x $cmd | 238 | chmod +x $cmd |
239 | } | 239 | } |