diff options
Diffstat (limited to 'meta-oe/classes')
-rw-r--r-- | meta-oe/classes/capnproto.bbclass | 8 | ||||
-rw-r--r-- | meta-oe/classes/check-version-mismatch.bbclass | 471 | ||||
-rw-r--r-- | meta-oe/classes/discoverable-disk-image.bbclass | 137 | ||||
-rw-r--r-- | meta-oe/classes/fitimage.bbclass | 540 | ||||
-rw-r--r-- | meta-oe/classes/gitpkgv.bbclass | 68 | ||||
-rw-r--r-- | meta-oe/classes/gpe.bbclass | 17 | ||||
-rw-r--r-- | meta-oe/classes/image_types_verity.bbclass | 43 | ||||
-rw-r--r-- | meta-oe/classes/panel-mipi-dbi.bbclass | 48 | ||||
-rw-r--r-- | meta-oe/classes/signing.bbclass | 159 | ||||
-rw-r--r-- | meta-oe/classes/sysext-image.bbclass | 87 |
10 files changed, 1510 insertions, 68 deletions
diff --git a/meta-oe/classes/capnproto.bbclass b/meta-oe/classes/capnproto.bbclass new file mode 100644 index 0000000000..a698f41c95 --- /dev/null +++ b/meta-oe/classes/capnproto.bbclass | |||
@@ -0,0 +1,8 @@ | |||
1 | # Inherit this class in your recipe to compile against | ||
2 | # Cap'N Proto (capnproto) with CMake | ||
3 | |||
4 | DEPENDS:append = " capnproto-native " | ||
5 | DEPENDS:append:class-target = " capnproto " | ||
6 | |||
7 | EXTRA_OECMAKE:append:class-target = " -DCAPNP_EXECUTABLE=${RECIPE_SYSROOT_NATIVE}${bindir}/capnp \ | ||
8 | -DCAPNPC_CXX_EXECUTABLE=${RECIPE_SYSROOT_NATIVE}${bindir}/capnpc-c++ " | ||
diff --git a/meta-oe/classes/check-version-mismatch.bbclass b/meta-oe/classes/check-version-mismatch.bbclass new file mode 100644 index 0000000000..f735280d7a --- /dev/null +++ b/meta-oe/classes/check-version-mismatch.bbclass | |||
@@ -0,0 +1,471 @@ | |||
1 | QEMU_OPTIONS = "-r ${OLDEST_KERNEL} ${@d.getVar("QEMU_EXTRAOPTIONS:tune-%s" % d.getVar('TUNE_PKGARCH')) or ""}" | ||
2 | QEMU_OPTIONS[vardeps] += "QEMU_EXTRAOPTIONS:tune-${TUNE_PKGARCH}" | ||
3 | |||
4 | ENABLE_VERSION_MISMATCH_CHECK ?= "${@'1' if bb.utils.contains('MACHINE_FEATURES', 'qemu-usermode', True, False, d) else '0'}" | ||
5 | DEBUG_VERSION_MISMATCH_CHECK ?= "1" | ||
6 | CHECK_VERSION_PV ?= "" | ||
7 | |||
8 | DEPENDS:append:class-target = "${@' qemu-native' if bb.utils.to_boolean(d.getVar('ENABLE_VERSION_MISMATCH_CHECK')) else ''}" | ||
9 | |||
10 | QEMU_EXEC ?= "${@oe.qemu.qemu_wrapper_cmdline(d, '${STAGING_DIR_HOST}', ['${STAGING_DIR_HOST}${libdir}','${STAGING_DIR_HOST}${base_libdir}', '${PKGD}${libdir}', '${PKGD}${base_libdir}'])}" | ||
11 | |||
12 | python do_package_check_version_mismatch() { | ||
13 | import re | ||
14 | import subprocess | ||
15 | import shutil | ||
16 | import signal | ||
17 | import glob | ||
18 | |||
19 | classes_skip = ["nopackage", "image", "native", "cross", "crosssdk", "cross-canadian"] | ||
20 | for cs in classes_skip: | ||
21 | if bb.data.inherits_class(cs, d): | ||
22 | bb.note(f"Skip do_package_check_version_mismatch as {cs} is inherited.") | ||
23 | return | ||
24 | |||
25 | if not bb.utils.to_boolean(d.getVar('ENABLE_VERSION_MISMATCH_CHECK')): | ||
26 | bb.note("Skip do_package_check_version_mismatch as ENABLE_VERSION_MISMATCH_CHECK is disabled.") | ||
27 | return | ||
28 | |||
29 | __regexp_version_broad_match__ = re.compile(r"(?:\s|^|-|_|/|=| go|\()" + | ||
30 | r"(?P<version>v?[0-9][0-9.][0-9+.\-_~\(\)]*?|UNKNOWN)" + | ||
31 | r"(?:[+\-]release.*|[+\-]stable.*|)" + | ||
32 | r"(?P<extra>[+\-]unknown|[+\-]dirty|[+\-]rc?\d{1,3}|\+cargo-[0-9.]+|" + | ||
33 | r"[a-z]|-?[pP][0-9]{1,3}|-?beta[^\s]*|-?alpha[^\s]*|)" + | ||
34 | r"(?P<extra2>[+\-]dev|[+\-]devel|)" + | ||
35 | r"(?:,|:|\.|\)|-[0-9a-g]{6,42}|)" + | ||
36 | r"(?=\s|$)" | ||
37 | ) | ||
38 | __regexp_exclude_year__ = re.compile(r"^(19|20)[0-9]{2}$") | ||
39 | __regexp_single_number_ending_with_dot__ = re.compile(r"^\d\.$") | ||
40 | |||
41 | def is_shared_library(filepath): | ||
42 | return re.match(r'.*\.so(\.\d+)*$', filepath) is not None | ||
43 | |||
44 | def get_possible_versions(output_contents, full_cmd=None, max_lines=None): | ||
45 | # | ||
46 | # Algorithm: | ||
47 | # 1. Check version line by line. | ||
48 | # 2. Skip some lines which we know that do not contain version information, e.g., License, Copyright. | ||
49 | # 3. Do broad match, finding all possible versions. | ||
50 | # 4. If there's a version found by any match, do exclude match (e.g., exclude years) | ||
51 | # 5. If there's a valid version, do stripping and converting and then add to possible_versions. | ||
52 | # 6. Return possible_versions | ||
53 | # | ||
54 | possible_versions = [] | ||
55 | content_lines = output_contents.split("\n") | ||
56 | if max_lines: | ||
57 | content_lines = content_lines[0:max_lines] | ||
58 | if full_cmd: | ||
59 | base_cmd = os.path.basename(full_cmd) | ||
60 | __regex_help_format__ = re.compile(r"-[^\s].*") | ||
61 | for line in content_lines: | ||
62 | line = line.strip() | ||
63 | # skip help lines | ||
64 | if __regex_help_format__.match(line): | ||
65 | continue | ||
66 | # avoid command itself affecting output | ||
67 | if full_cmd: | ||
68 | if line.startswith(base_cmd): | ||
69 | line = line[len(base_cmd):] | ||
70 | elif line.startswith(full_cmd): | ||
71 | line = line[len(full_cmd):] | ||
72 | # skip specific lines | ||
73 | skip_keywords_start = ["copyright", "license", "compiled", "build", "built"] | ||
74 | skip_line = False | ||
75 | for sks in skip_keywords_start: | ||
76 | if line.lower().startswith(sks): | ||
77 | skip_line = True | ||
78 | break | ||
79 | if skip_line: | ||
80 | continue | ||
81 | |||
82 | # try broad match | ||
83 | for match in __regexp_version_broad_match__.finditer(line): | ||
84 | version = match.group("version") | ||
85 | #print(f"version = {version}") | ||
86 | # do exclude match | ||
87 | exclude_match = __regexp_exclude_year__.match(version) | ||
88 | if exclude_match: | ||
89 | continue | ||
90 | exclude_match = __regexp_single_number_ending_with_dot__.match(version) | ||
91 | if exclude_match: | ||
92 | continue | ||
93 | # do some stripping and converting | ||
94 | if version.startswith("("): | ||
95 | version = version[1:-1] | ||
96 | if version.startswith("v"): | ||
97 | version = version[1:] | ||
98 | if version.endswith(")") and "(" not in version: | ||
99 | version = version[:-1] | ||
100 | if not version.endswith(")") and "(" in version: | ||
101 | version = version.split('(')[0] | ||
102 | # handle extra version info | ||
103 | version = version + match.group("extra") + match.group("extra2") | ||
104 | possible_versions.append(version) | ||
105 | return possible_versions | ||
106 | |||
107 | def is_version_mismatch(rvs, pv): | ||
108 | got_match = False | ||
109 | if pv.startswith("git"): | ||
110 | return False | ||
111 | if "-pre" in pv: | ||
112 | pv = pv.split("-pre")[0] | ||
113 | if pv.startswith("v"): | ||
114 | pv = pv[1:] | ||
115 | for rv in rvs: | ||
116 | if rv == pv: | ||
117 | got_match = True | ||
118 | break | ||
119 | pv = pv.split("+git")[0] | ||
120 | # handle % character in pv which means matching any chars | ||
121 | if '%' in pv: | ||
122 | escaped_pv = re.escape(pv) | ||
123 | regex_pattern = escaped_pv.replace('%', '.*') | ||
124 | regex_pattern = f'^{regex_pattern}$' | ||
125 | if re.fullmatch(regex_pattern, rv): | ||
126 | got_match = True | ||
127 | break | ||
128 | else: | ||
129 | continue | ||
130 | # handle cases such as 2.36.0-r0 v.s. 2.36.0 | ||
131 | if "-r" in rv: | ||
132 | rv = rv.split("-r")[0] | ||
133 | chars_to_replace = ["-", "+", "_", "~"] | ||
134 | # convert to use "." as the version seperator | ||
135 | for cr in chars_to_replace: | ||
136 | rv = rv.replace(cr, ".") | ||
137 | pv = pv.replace(cr, ".") | ||
138 | if rv == pv: | ||
139 | got_match = True | ||
140 | break | ||
141 | # handle case such as 5.2.37(1) v.s. 5.2.37 | ||
142 | if "(" in rv: | ||
143 | rv = rv.split("(")[0] | ||
144 | if rv == pv: | ||
145 | got_match = True | ||
146 | break | ||
147 | # handle case such as 4.4.3p1 | ||
148 | if "p" in pv and "p" in rv.lower(): | ||
149 | pv = pv.lower().replace(".p", "p") | ||
150 | rv = rv.lower().replace(".p", "p") | ||
151 | if pv == rv: | ||
152 | got_match = True | ||
153 | break | ||
154 | # handle cases such as 6.00 v.s. 6.0 | ||
155 | if rv.startswith(pv): | ||
156 | if rv == pv + "0" or rv == pv + ".0": | ||
157 | got_match = True | ||
158 | break | ||
159 | elif pv.startswith(rv): | ||
160 | if pv == rv + "0" or pv == rv + ".0": | ||
161 | got_match = True | ||
162 | break | ||
163 | # handle cases such as 21306 v.s. 2.13.6 | ||
164 | if "." in pv and not "." in rv: | ||
165 | pv_components = pv.split(".") | ||
166 | if rv.startswith(pv_components[0]): | ||
167 | pv_num = 0 | ||
168 | for i in range(0, len(pv_components)): | ||
169 | pv_num = pv_num * 100 + int(pv_components[i]) | ||
170 | if pv_num == int(rv): | ||
171 | got_match = True | ||
172 | break | ||
173 | if got_match: | ||
174 | return False | ||
175 | else: | ||
176 | return True | ||
177 | |||
178 | def is_elf_binary(fexec): | ||
179 | fexec_real = os.path.realpath(fexec) | ||
180 | elf = oe.qa.ELFFile(fexec_real) | ||
181 | try: | ||
182 | elf.open() | ||
183 | elf.close() | ||
184 | return True | ||
185 | except: | ||
186 | return False | ||
187 | |||
188 | def get_shebang(fexec): | ||
189 | try: | ||
190 | with open(fexec, 'r') as f: | ||
191 | first_line = f.readline().strip() | ||
192 | if first_line.startswith("#!"): | ||
193 | return first_line | ||
194 | else: | ||
195 | return None | ||
196 | except Exception as e: | ||
197 | return None | ||
198 | |||
199 | def get_interpreter_from_shebang(shebang): | ||
200 | if not shebang: | ||
201 | return None | ||
202 | hosttools_path = d.getVar("TMPDIR") + "/hosttools" | ||
203 | if "/sh" in shebang: | ||
204 | return hosttools_path + "/sh" | ||
205 | elif "/bash" in shebang: | ||
206 | return hosttools_path + "/bash" | ||
207 | elif "python" in shebang: | ||
208 | return hosttools_path + "/python3" | ||
209 | elif "perl" in shebang: | ||
210 | return hosttools_path + "/perl" | ||
211 | else: | ||
212 | return None | ||
213 | |||
214 | # helper function to get PKGV, useful for recipes such as perf | ||
215 | def get_pkgv(pn): | ||
216 | pkgdestwork = d.getVar("PKGDESTWORK") | ||
217 | recipe_data_fn = pkgdestwork + "/" + pn | ||
218 | pn_data = oe.packagedata.read_pkgdatafile(recipe_data_fn) | ||
219 | if not "PACKAGES" in pn_data: | ||
220 | return d.getVar("PV") | ||
221 | packages = pn_data["PACKAGES"].split() | ||
222 | for pkg in packages: | ||
223 | pkg_fn = pkgdestwork + "/runtime/" + pkg | ||
224 | pkg_data = oe.packagedata.read_pkgdatafile(pkg_fn) | ||
225 | if "PKGV" in pkg_data: | ||
226 | return pkg_data["PKGV"] | ||
227 | |||
228 | # | ||
229 | # traverse PKGD, find executables and run them to get runtime version information and compare it with recipe version information | ||
230 | # | ||
231 | enable_debug = bb.utils.to_boolean(d.getVar("DEBUG_VERSION_MISMATCH_CHECK")) | ||
232 | pkgd = d.getVar("PKGD") | ||
233 | pn = d.getVar("PN") | ||
234 | pv = d.getVar("CHECK_VERSION_PV") | ||
235 | if not pv: | ||
236 | pv = get_pkgv(pn) | ||
237 | qemu_exec = d.getVar("QEMU_EXEC").strip() | ||
238 | executables = [] | ||
239 | possible_versions_all = [] | ||
240 | data_lines = [] | ||
241 | |||
242 | if enable_debug: | ||
243 | debug_directory = d.getVar("TMPDIR") + "/check-version-mismatch" | ||
244 | debug_data_file = debug_directory + "/" + pn | ||
245 | os.makedirs(debug_directory, exist_ok=True) | ||
246 | data_lines.append("pv: %s\n" % pv) | ||
247 | |||
248 | # handle a special case: a pure % means matching all, no point in further checking | ||
249 | if pv == "%": | ||
250 | if enable_debug: | ||
251 | data_lines.append("FINAL RESULT: MATCH (%s matches all, skipped)\n\n" % pv) | ||
252 | with open(debug_data_file, "w") as f: | ||
253 | f.writelines(data_lines) | ||
254 | return | ||
255 | |||
256 | got_quick_match_result = False | ||
257 | # handle python3-xxx recipes quickly | ||
258 | __regex_python_module_version__ = re.compile(r"(?:^|.*:)Version: (?P<version>.*)$") | ||
259 | if "python3-" in pn: | ||
260 | version_check_cmd = "find %s -name 'METADATA' | xargs grep '^Version: '" % pkgd | ||
261 | try: | ||
262 | output = subprocess.check_output(version_check_cmd, shell=True).decode("utf-8") | ||
263 | data_lines.append("version_check_cmd: %s\n" % version_check_cmd) | ||
264 | data_lines.append("output:\n'''\n%s'''\n" % output) | ||
265 | possible_versions = [] | ||
266 | for line in output.split("\n"): | ||
267 | match = __regex_python_module_version__.match(line) | ||
268 | if match: | ||
269 | possible_versions.append(match.group("version")) | ||
270 | possible_versions = sorted(set(possible_versions)) | ||
271 | data_lines.append("possible versions: %s\n" % possible_versions) | ||
272 | if is_version_mismatch(possible_versions, pv): | ||
273 | data_lines.append("FINAL RESULT: MISMATCH (%s v.s. %s)\n\n" % (possible_versions, pv)) | ||
274 | bb.warn("Possible runtime versions %s do not match recipe version %s" % (possible_versions, pv)) | ||
275 | else: | ||
276 | data_lines.append("FINAL RESULT: MATCH (%s v.s. %s)\n\n" % (possible_versions, pv)) | ||
277 | got_quick_match_result = True | ||
278 | except: | ||
279 | data_lines.append("version_check_cmd: %s\n" % version_check_cmd) | ||
280 | data_lines.append("result: RUN_FAILED\n\n") | ||
281 | if got_quick_match_result: | ||
282 | if enable_debug: | ||
283 | with open(debug_data_file, "w") as f: | ||
284 | f.writelines(data_lines) | ||
285 | return | ||
286 | |||
287 | # handle .pc files | ||
288 | version_check_cmd = "find %s -name '*.pc' | xargs grep -i version" % pkgd | ||
289 | try: | ||
290 | output = subprocess.check_output(version_check_cmd, shell=True).decode("utf-8") | ||
291 | data_lines.append("version_check_cmd: %s\n" % version_check_cmd) | ||
292 | data_lines.append("output:\n'''\n%s'''\n" % output) | ||
293 | possible_versions = get_possible_versions(output) | ||
294 | possible_versions = sorted(set(possible_versions)) | ||
295 | data_lines.append("possible versions: %s\n" % possible_versions) | ||
296 | if is_version_mismatch(possible_versions, pv): | ||
297 | if pn.startswith("lib"): | ||
298 | data_lines.append("FINAL RESULT: MISMATCH (%s v.s. %s)\n\n" % (possible_versions, pv)) | ||
299 | bb.warn("Possible runtime versions %s do not match recipe version %s" % (possible_versions, pv)) | ||
300 | got_quick_match_result = True | ||
301 | else: | ||
302 | data_lines.append("result: MISMATCH (%s v.s. %s)\n\n" % (possible_versions, pv)) | ||
303 | else: | ||
304 | data_lines.append("FINAL RESULT: MATCH (%s v.s. %s)\n\n" % (possible_versions, pv)) | ||
305 | got_quick_match_result = True | ||
306 | except: | ||
307 | data_lines.append("version_check_cmd: %s\n" % version_check_cmd) | ||
308 | data_lines.append("result: RUN_FAILED\n\n") | ||
309 | if got_quick_match_result: | ||
310 | if enable_debug: | ||
311 | with open(debug_data_file, "w") as f: | ||
312 | f.writelines(data_lines) | ||
313 | return | ||
314 | |||
315 | skipped_directories = [".debug", "ptest", "installed-tests", "tests", "test", "__pycache__", "testcases"] | ||
316 | # avoid checking configuration files, they don't give useful version information and some init scripts | ||
317 | # will kill all processes | ||
318 | skipped_directories.append("etc") | ||
319 | skipped_directories.append("go/src") | ||
320 | pkgd_libdir = pkgd + d.getVar("libdir") | ||
321 | pkgd_base_libdir = pkgd + d.getVar("base_libdir") | ||
322 | extra_exec_libdirs = [] | ||
323 | for root, dirs, files in os.walk(pkgd): | ||
324 | for dname in dirs: | ||
325 | fdir = os.path.join(root, dname) | ||
326 | if os.path.isdir(fdir) and fdir != pkgd_libdir and fdir != pkgd_base_libdir: | ||
327 | if fdir.startswith(pkgd_libdir) or fdir.startswith(pkgd_base_libdir): | ||
328 | for sd in skipped_directories: | ||
329 | if fdir.endswith("/" + sd) or ("/" + sd + "/") in fdir: | ||
330 | break | ||
331 | else: | ||
332 | extra_exec_libdirs.append(fdir) | ||
333 | for fname in files: | ||
334 | fpath = os.path.join(root, fname) | ||
335 | if os.path.isfile(fpath) and os.access(fpath, os.X_OK): | ||
336 | for sd in skipped_directories: | ||
337 | if ("/" + sd + "/") in fpath: | ||
338 | break | ||
339 | else: | ||
340 | if is_shared_library(fpath): | ||
341 | # we don't check shared libraries | ||
342 | continue | ||
343 | else: | ||
344 | executables.append(fpath) | ||
345 | if enable_debug: | ||
346 | data_lines.append("executables: %s\n" % executables) | ||
347 | |||
348 | found_match = False | ||
349 | some_cmd_succeed = False | ||
350 | if not executables: | ||
351 | bb.debug(1, "No executable found for %s" % pn) | ||
352 | data_lines.append("FINAL RESULT: NO_EXECUTABLE_FOUND\n\n") | ||
353 | else: | ||
354 | # first we extend qemu_exec to include library path if needed | ||
355 | if extra_exec_libdirs: | ||
356 | qemu_exec += ":" + ":".join(extra_exec_libdirs) | ||
357 | orig_qemu_exec = qemu_exec | ||
358 | for fexec in executables: | ||
359 | qemu_exec = orig_qemu_exec | ||
360 | for version_option in ["--version", "-V", "-v", "--help"]: | ||
361 | if not is_elf_binary(fexec): | ||
362 | shebang = get_shebang(fexec) | ||
363 | interpreter = get_interpreter_from_shebang(shebang) | ||
364 | if not interpreter: | ||
365 | bb.debug(1, "file %s is not supported to run" % fexec) | ||
366 | elif interpreter.endswith("perl"): | ||
367 | perl5lib_extra = pkgd + d.getVar("libdir") + "/perl5/site_perl" | ||
368 | for p in glob.glob("%s/usr/share/*" % pkgd): | ||
369 | perl5lib_extra += ":%s" % p | ||
370 | qemu_exec += " -E PERL5LIB=%s:$PERL5LIB %s" % (perl5lib_extra, interpreter) | ||
371 | elif interpreter.endswith("python3"): | ||
372 | pythonpath_extra = glob.glob("%s%s/python3*/site-packages" % (pkgd, d.getVar("libdir"))) | ||
373 | if pythonpath_extra: | ||
374 | qemu_exec += " -E PYTHONPATH=%s:$PYTHONPATH %s" % (pythonpath_extra[0], interpreter) | ||
375 | else: | ||
376 | qemu_exec += " %s" % interpreter | ||
377 | # remove the '-E LD_LIBRARY_PATH=xxx' | ||
378 | qemu_exec = re.sub(r"-E\s+LD_LIBRARY_PATH=\S+", "", qemu_exec) | ||
379 | version_check_cmd_full = "%s %s %s" % (qemu_exec, fexec, version_option) | ||
380 | version_check_cmd = version_check_cmd_full | ||
381 | #version_check_cmd = "%s %s" % (os.path.relpath(fexec, pkgd), version_option) | ||
382 | |||
383 | try: | ||
384 | cwd_temp = d.getVar("TMPDIR") + "/check-version-mismatch/cwd-temp/" + pn | ||
385 | os.makedirs(cwd_temp, exist_ok=True) | ||
386 | # avoid pseudo to manage any file we create | ||
387 | sp_env = os.environ.copy() | ||
388 | sp_env["PSEUDO_UNLOAD"] = "1" | ||
389 | output = subprocess.check_output(version_check_cmd_full, | ||
390 | shell=True, | ||
391 | stderr=subprocess.STDOUT, | ||
392 | cwd=cwd_temp, | ||
393 | timeout=10, | ||
394 | env=sp_env).decode("utf-8") | ||
395 | some_cmd_succeed = True | ||
396 | data_lines.append("version_check_cmd: %s\n" % version_check_cmd) | ||
397 | data_lines.append("output:\n'''\n%s'''\n" % output) | ||
398 | if version_option == "--help": | ||
399 | max_lines = 5 | ||
400 | else: | ||
401 | max_lines = None | ||
402 | possible_versions = get_possible_versions(output, full_cmd=fexec, max_lines=max_lines) | ||
403 | if "." in pv: | ||
404 | possible_versions = [item for item in possible_versions if "." in item or item == "UNKNOWN"] | ||
405 | data_lines.append("possible versions: %s\n" % possible_versions) | ||
406 | if not possible_versions: | ||
407 | data_lines.append("result: NO_RUNTIME_VERSION_FOUND\n\n") | ||
408 | continue | ||
409 | possible_versions_all.extend(possible_versions) | ||
410 | possible_versions_all = sorted(set(possible_versions_all)) | ||
411 | if is_version_mismatch(possible_versions, pv): | ||
412 | data_lines.append("result: MISMATCH (%s v.s. %s)\n\n" % (possible_versions, pv)) | ||
413 | else: | ||
414 | found_match = True | ||
415 | data_lines.append("result: MATCH (%s v.s. %s)\n\n" % (possible_versions, pv)) | ||
416 | break | ||
417 | except: | ||
418 | data_lines.append("version_check_cmd: %s\n" % version_check_cmd) | ||
419 | data_lines.append("result: RUN_FAILED\n\n") | ||
420 | finally: | ||
421 | shutil.rmtree(cwd_temp) | ||
422 | if found_match: | ||
423 | break | ||
424 | if executables: | ||
425 | if found_match: | ||
426 | data_lines.append("FINAL RESULT: MATCH (%s v.s. %s)\n" % (possible_versions_all, pv)) | ||
427 | elif len(possible_versions_all) == 0: | ||
428 | if some_cmd_succeed: | ||
429 | bb.debug(1, "No valid runtime version found") | ||
430 | data_lines.append("FINAL RESULT: NO_VALID_RUNTIME_VERSION_FOUND\n") | ||
431 | else: | ||
432 | bb.debug(1, "All version check command failed") | ||
433 | data_lines.append("FINAL RESULT: RUN_FAILED\n") | ||
434 | else: | ||
435 | bb.warn("Possible runtime versions %s do not match recipe version %s" % (possible_versions_all, pv)) | ||
436 | data_lines.append("FINAL RESULT: MISMATCH (%s v.s. %s)\n" % (possible_versions_all, pv)) | ||
437 | |||
438 | if enable_debug: | ||
439 | with open(debug_data_file, "w") as f: | ||
440 | f.writelines(data_lines) | ||
441 | |||
442 | # clean up stale processes | ||
443 | process_name_common_prefix = "%s %s" % (' '.join(qemu_exec.split()[1:]), pkgd) | ||
444 | find_stale_process_cmd = "ps -e -o pid,args | grep -v grep | grep -F '%s'" % process_name_common_prefix | ||
445 | try: | ||
446 | stale_process_output = subprocess.check_output(find_stale_process_cmd, shell=True).decode("utf-8") | ||
447 | stale_process_pids = [] | ||
448 | for line in stale_process_output.split("\n"): | ||
449 | line = line.strip() | ||
450 | if not line: | ||
451 | continue | ||
452 | pid = line.split()[0] | ||
453 | stale_process_pids.append(pid) | ||
454 | for pid in stale_process_pids: | ||
455 | os.kill(int(pid), signal.SIGKILL) | ||
456 | except Exception as e: | ||
457 | bb.debug(1, "No stale process") | ||
458 | } | ||
459 | |||
460 | addtask do_package_check_version_mismatch after do_prepare_recipe_sysroot do_package before do_build | ||
461 | |||
462 | do_build[rdeptask] += "do_package_check_version_mismatch" | ||
463 | do_rootfs[recrdeptask] += "do_package_check_version_mismatch" | ||
464 | |||
465 | SSTATETASKS += "do_package_check_version_mismatch" | ||
466 | do_package_check_version_mismatch[sstate-inputdirs] = "" | ||
467 | do_package_check_version_mismatch[sstate-outputdirs] = "" | ||
468 | python do_package_check_version_mismatch_setscene () { | ||
469 | sstate_setscene(d) | ||
470 | } | ||
471 | addtask do_package_check_version_mismatch_setscene | ||
diff --git a/meta-oe/classes/discoverable-disk-image.bbclass b/meta-oe/classes/discoverable-disk-image.bbclass new file mode 100644 index 0000000000..1f3a7b08e1 --- /dev/null +++ b/meta-oe/classes/discoverable-disk-image.bbclass | |||
@@ -0,0 +1,137 @@ | |||
1 | ## | ||
2 | # Copyright OpenEmbedded Contributors | ||
3 | # | ||
4 | # SPDX-License-Identifier: MIT | ||
5 | # | ||
6 | # | ||
7 | # Discoverable Disk Image (DDI) | ||
8 | # | ||
9 | # "DDIs (Discoverable Disk Images) are self-describing file system | ||
10 | # images that follow the DPS ( Discoverable Partitions Specification), | ||
11 | # wrapped in a GPT partition table, that may contain root (or /usr/) | ||
12 | # filesystems for bootable OS images, system extensions, configuration | ||
13 | # extensions, portable services, containers and more, and shall be | ||
14 | # protected by signed dm-verity all combined into one. They are | ||
15 | # designed to be composable and stackable, and provide security by | ||
16 | # default." | ||
17 | # https://uapi-group.org/specifications/specs/discoverable_disk_image/ | ||
18 | # https://uapi-group.org/specifications/specs/discoverable_partitions_specification/ | ||
19 | # https://www.freedesktop.org/software/systemd/man/latest/systemd.image-policy.html | ||
20 | |||
21 | # To be able to use discoverable-disk-images with a | ||
22 | # root-verity-sig or usr-verity-sig configuration: | ||
23 | # - systemd needs to include the PACKAGECONFIG 'cryptsetup', and | ||
24 | # - the kernel needs the following features enabled: | ||
25 | # CONFIG_DM_VERITY_VERIFY_ROOTHASH_SIG=y | ||
26 | # CONFIG_DM_VERITY_VERIFY_ROOTHASH_SIG_PLATFORM_KEYRING=y | ||
27 | # CONFIG_EROFS_FS=y | ||
28 | # CONFIG_EROFS_FS_XATTR=y | ||
29 | # CONFIG_EROFS_FS_ZIP=y | ||
30 | # CONFIG_EROFS_FS_ZIP_LZMA=y | ||
31 | # CONFIG_INTEGRITY_SIGNATURE=y | ||
32 | # CONFIG_INTEGRITY_ASYMMETRIC_KEYS=y | ||
33 | # CONFIG_INTEGRITY_PLATFORM_KEYRING=y | ||
34 | # CONFIG_SYSTEM_BLACKLIST_KEYRING=y | ||
35 | # CONFIG_SYSTEM_BLACKLIST_HASH_LIST="" | ||
36 | # CONFIG_SIGNATURE=y | ||
37 | |||
38 | # To sign DDIs, a key and certificate need to be provided by setting | ||
39 | # the variables: | ||
40 | # REPART_PRIVATE_KEY | ||
41 | # private key so sign the verity-hash | ||
42 | # REPART_PRIVATE_KEY_SOURCE | ||
43 | # optional, can be "engine:pkcs11" when using a (soft)hsm | ||
44 | # REPART_CERTIFICATE | ||
45 | # corresponding public certificate, in .pem format | ||
46 | # | ||
47 | |||
48 | # For signature verification, systemd-sysext expects the matching | ||
49 | # certificate to reside in /etc/verity.d as PEM formated .crt file. | ||
50 | # | ||
51 | # To enforce loading of only signed extension images, an appropriate | ||
52 | # image policy has to be passed to systemd-sysext, e.g.: | ||
53 | # systemd-sysext --image-policy='root=signed+absent:usr=signed+absent:=unused+absent' merge | ||
54 | |||
55 | # 'systemd-dissect' can be used to inspect, manually mount, ... a DDI. | ||
56 | |||
57 | inherit image | ||
58 | |||
59 | IMAGE_FSTYPES = "ddi" | ||
60 | |||
61 | DEPENDS += " \ | ||
62 | systemd-repart-native \ | ||
63 | erofs-utils-native \ | ||
64 | openssl-native \ | ||
65 | " | ||
66 | |||
67 | # systemd-repart --make-ddi takes one of "sysext", "confext" or "portable", | ||
68 | # which it then takes and looks up definitions in the host os; which we need | ||
69 | # to divert to the sysroot-native by setting '--definitions=' instead. | ||
70 | # The chosen DDI_TYPE influences which parts of the rootfs are copied into | ||
71 | # the ddi by systemd-repart: | ||
72 | # sysext: /usr (and if it exists: /opt) | ||
73 | # confext: /etc | ||
74 | # portable: / | ||
75 | # For details see systemd/repart/definitions/${REPART_DDI_TYPE}.repart.d/* | ||
76 | REPART_DDI_TYPE ?= "sysext" | ||
77 | |||
78 | REPART_DDI_EXTENSION ?= "ddi" | ||
79 | |||
80 | # systemd-repart creates temporary directoryies under /var/tmp/.#repartXXXXXXX/, | ||
81 | # to estimate partition size etc. Since files are copied there from the image/rootfs | ||
82 | # folder - which are owned by pseudo-root - this temporary location has to be | ||
83 | # added to the directories handled by pseudo; otherwise calls to e.g. | ||
84 | # fchown(0,0) inside systemd git/src/shared/copy.c end up failing. | ||
85 | PSEUDO_INCLUDE_PATHS .= ",/var/tmp/" | ||
86 | |||
87 | oe_image_systemd_repart_make_ddi() { | ||
88 | |||
89 | local additional_args="" | ||
90 | |||
91 | if [ -n "${REPART_PRIVATE_KEY}" ] | ||
92 | then | ||
93 | if [ -n "${REPART_PRIVATE_KEY_SOURCE}" ] | ||
94 | then | ||
95 | additional_args="$additional_args --private-key-source=${REPART_PRIVATE_KEY_SOURCE}" | ||
96 | fi | ||
97 | additional_args="$additional_args --private-key=${REPART_PRIVATE_KEY}" | ||
98 | fi | ||
99 | |||
100 | if [ -n "${REPART_CERTIFICATE}" ] | ||
101 | then | ||
102 | additional_args="$additional_args --certificate=${REPART_CERTIFICATE}" | ||
103 | fi | ||
104 | |||
105 | # map architectures to systemd's expected values | ||
106 | local systemd_arch="${TARGET_ARCH}" | ||
107 | case "${systemd_arch}" in | ||
108 | aarch64) | ||
109 | systemd_arch=arm64 | ||
110 | ;; | ||
111 | x86_64) | ||
112 | systemd_arch=x86-64 | ||
113 | ;; | ||
114 | esac | ||
115 | |||
116 | # prepare system-repart configuration | ||
117 | mkdir -p ${B}/definitions.repart.d | ||
118 | cp ${STAGING_LIBDIR_NATIVE}/systemd/repart/definitions/${REPART_DDI_TYPE}.repart.d/* ${B}/definitions.repart.d/ | ||
119 | # enable erofs compression | ||
120 | sed -i "/^Compression/d" ${B}/definitions.repart.d/10-root.conf | ||
121 | echo "Compression=lzma\nCompressionLevel=3" >> ${B}/definitions.repart.d/10-root.conf | ||
122 | # disable verity signature partition creation, if no key is provided | ||
123 | if [ -z "${REPART_PRIVATE_KEY}" ]; then | ||
124 | rm ${B}/definitions.repart.d/30-root-verity-sig.conf | ||
125 | fi | ||
126 | |||
127 | systemd-repart \ | ||
128 | --definitions="${B}/definitions.repart.d/" \ | ||
129 | --copy-source="${IMAGE_ROOTFS}" \ | ||
130 | --empty=create --size=auto --dry-run=no --offline=yes \ | ||
131 | --architecture="${systemd_arch}" \ | ||
132 | --json=pretty --no-pager $additional_args \ | ||
133 | "${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${REPART_DDI_EXTENSION}" | ||
134 | } | ||
135 | |||
136 | IMAGE_CMD:ddi = "oe_image_systemd_repart_make_ddi" | ||
137 | do_image_ddi[deptask] += "do_unpack" | ||
diff --git a/meta-oe/classes/fitimage.bbclass b/meta-oe/classes/fitimage.bbclass new file mode 100644 index 0000000000..03fa2fcd57 --- /dev/null +++ b/meta-oe/classes/fitimage.bbclass | |||
@@ -0,0 +1,540 @@ | |||
1 | # SPDX-License-Identifier: MIT | ||
2 | # | ||
3 | # Copyright PHYTEC Messtechnik GmbH | ||
4 | # Copyright (C) 2024 Pengutronix, <yocto@pengutronix.de> | ||
5 | # | ||
6 | # Class for creating (signed) FIT images | ||
7 | # Description: | ||
8 | # | ||
9 | # You have to define the 'images' to put in the FIT image in your recipe file | ||
10 | # following this example: | ||
11 | # | ||
12 | # FITIMAGE_IMAGES ?= "kernel fdt fdto setup ramdisk bootscript" | ||
13 | # | ||
14 | # FITIMAGE_IMAGE_kernel ?= "virtual/kernel" | ||
15 | # FITIMAGE_IMAGE_kernel[type] ?= "kernel" | ||
16 | # | ||
17 | # FITIMAGE_IMAGE_fdt ?= "virtual/dtb" # or "virtual/kernel" | ||
18 | # FITIMAGE_IMAGE_fdt[type] ?= "fdt" | ||
19 | # #FITIMAGE_IMAGE_fdt[file] ?= "hw-name.dtb" | ||
20 | # | ||
21 | # FITIMAGE_IMAGE_fdto ?= "virtual/kernel" | ||
22 | # FITIMAGE_IMAGE_fdto[type] ?= "fdto" | ||
23 | # FITIMAGE_IMAGE_fdto[file] ?= <list of all dtbo files from KERNEL_DEVICETREE> | ||
24 | # | ||
25 | # Add a devicetree created on-thy-fly of a base dtb and serveral dtbo's | ||
26 | # FITIMAGE_IMAGE_fdtapply ?= "virtual/kernel" | ||
27 | # FITIMAGE_IMAGE_fdtapply[type] ?= "fdtapply" | ||
28 | # FITIMAGE_IMAGE_fdtapply[file] ?= "base.dtb overlay-1.dtbo overlay-2.dtbo" | ||
29 | # FITIMAGE_IMAGE_fdtapply[name] ?= "<name for new generated fdt>" | ||
30 | # | ||
31 | # FITIMAGE_IMAGE_ramdisk ?= "core-image-minimal" | ||
32 | # FITIMAGE_IMAGE_ramdisk[type] ?= "ramdisk" | ||
33 | # FITIMAGE_IMAGE_ramdisk[fstype] ?= "cpio.gz" | ||
34 | # | ||
35 | # FITIMAGE_IMAGE_bootscript ?= "bootscript" | ||
36 | # FITIMAGE_IMAGE_bootscript[type] ?= "bootscript" | ||
37 | # FITIMAGE_IMAGE_bootscript[file] ?= "boot.scr" | ||
38 | # | ||
39 | # Valid options for the [type] varflag are: "kernel", "fdt", "fdto", "fdtapply", "ramdisk", "bootscript". | ||
40 | # | ||
41 | # To enable signing, set | ||
42 | # | ||
43 | # FITIMAGE_SIGN = "1" | ||
44 | # | ||
45 | # and configure FITIMAGE_SIGN_KEYDIR (and FITIMAGE_SIGN_KEYNAME) according to | ||
46 | # your needs. | ||
47 | # | ||
48 | # For signing via PKCS#11 URIs provided by the meta-oe signing.bbclass, add: | ||
49 | # | ||
50 | # inherit signing | ||
51 | # | ||
52 | # FITIMAGE_SIGNING_KEY_ROLE = "fit" | ||
53 | # | ||
54 | # do_fitimage:prepend() { | ||
55 | # signing_prepare | ||
56 | # signing_use_role "${FITIMAGE_SIGNING_KEY_ROLE}" | ||
57 | # } | ||
58 | # | ||
59 | # FITIMAGE_SIGN = "1" | ||
60 | # FITIMAGE_MKIMAGE_EXTRA_ARGS = "--engine pkcs11" | ||
61 | # FITIMAGE_SIGN_KEYDIR = "${PKCS11_URI}" | ||
62 | |||
63 | |||
64 | LICENSE ?= "MIT" | ||
65 | |||
66 | inherit deploy kernel-artifact-names image-artifact-names kernel-arch nopackages | ||
67 | |||
68 | do_patch[noexec] = "1" | ||
69 | do_compile[noexec] = "1" | ||
70 | do_install[noexec] = "1" | ||
71 | deltask do_populate_sysroot | ||
72 | |||
73 | INHIBIT_DEFAULT_DEPS = "1" | ||
74 | |||
75 | DEPENDS = "u-boot-mkimage-native dtc-native" | ||
76 | |||
77 | FITIMAGE_SIGN ?= "0" | ||
78 | FITIMAGE_SIGN[doc] = "Enable FIT image signing" | ||
79 | FITIMAGE_SIGN_KEYDIR ?= "" | ||
80 | FITIMAGE_SIGN_KEYDIR[doc] = "Key directory or pkcs#11 URI to use for signing configuration" | ||
81 | FITIMAGE_MKIMAGE_EXTRA_ARGS[doc] = "Extra arguemnts to pass to uboot-mkimage call" | ||
82 | FITIMAGE_HASH_ALGO ?= "sha256" | ||
83 | FITIMAGE_HASH_ALGO[doc] = "Hash algorithm to use" | ||
84 | FITIMAGE_ENCRYPT_ALGO ?= "rsa2048" | ||
85 | FITIMAGE_ENCRYPT_ALGO[doc] = "Signature algorithm to use" | ||
86 | FITIMAGE_CONFIG_PREFIX ?= "conf-" | ||
87 | FITIMAGE_CONFIG_PREFIX[doc] = "Prefix to use for FIT configuration node name" | ||
88 | |||
89 | FITIMAGE_LOADADDRESS ??= "" | ||
90 | FITIMAGE_ENTRYPOINT ??= "" | ||
91 | FITIMAGE_DTB_LOADADDRESS ??= "" | ||
92 | FITIMAGE_DTB_OVERLAY_LOADADDRESS ??= "" | ||
93 | FITIMAGE_RD_LOADADDRESS ??= "" | ||
94 | FITIMAGE_RD_ENTRYPOINT ??= "" | ||
95 | |||
96 | PACKAGE_ARCH = "${MACHINE_ARCH}" | ||
97 | |||
98 | # Create dependency list from images | ||
99 | python __anonymous() { | ||
100 | for image in (d.getVar('FITIMAGE_IMAGES') or "").split(): | ||
101 | imageflags = d.getVarFlags('FITIMAGE_IMAGE_%s' % image, expand=['type', 'depends']) or {} | ||
102 | imgtype = imageflags.get('type') | ||
103 | if not imgtype: | ||
104 | bb.debug(1, "No [type] given for image '%s', defaulting to 'kernel'" % image) | ||
105 | imgtype = 'kernel' | ||
106 | recipe = d.getVar('FITIMAGE_IMAGE_%s' % image) | ||
107 | |||
108 | if not recipe: | ||
109 | bb.fatal(f"No recipe set for image '{image}'. Specify via 'FITIMAGE_IMAGE_{image} = \"<recipe-name>\"'") | ||
110 | return | ||
111 | |||
112 | d.appendVarFlag('do_unpack', 'vardeps', ' FITIMAGE_IMAGE_%s' % image) | ||
113 | depends = imageflags.get('depends') | ||
114 | if depends: | ||
115 | d.appendVarFlag('do_unpack', 'depends', ' ' + depends) | ||
116 | continue | ||
117 | |||
118 | if imgtype == 'ramdisk': | ||
119 | d.appendVarFlag('do_unpack', 'depends', ' ' + recipe + ':do_image_complete') | ||
120 | elif 'fdt' in imgtype: | ||
121 | d.appendVarFlag('do_unpack', 'depends', ' ' + recipe + ':do_populate_sysroot') | ||
122 | d.appendVarFlag('do_unpack', 'depends', ' ' + recipe + ':do_deploy') | ||
123 | else: | ||
124 | d.appendVarFlag('do_unpack', 'depends', ' ' + recipe + ':do_deploy') | ||
125 | |||
126 | if 'fdt' in imgtype and d.getVar('PREFERRED_PROVIDER_virtual/dtb'): | ||
127 | d.setVar('EXTERNAL_KERNEL_DEVICETREE', '${RECIPE_SYSROOT}/boot/devicetree') | ||
128 | } | ||
129 | |||
130 | S = "${UNPACKDIR}" | ||
131 | B = "${WORKDIR}/build" | ||
132 | |||
133 | # | ||
134 | # Emit the fitImage ITS header | ||
135 | # | ||
136 | def fitimage_emit_fit_header(d, fd): | ||
137 | fd.write('/dts-v1/;\n\n/ {\n') | ||
138 | fd.write(d.expand('\tdescription = "fitImage for ${DISTRO_NAME}/${PV}/${MACHINE}";\n')) | ||
139 | fd.write('\t#address-cells = <1>;\n') | ||
140 | |||
141 | # | ||
142 | # Emit the fitImage ITS footer | ||
143 | # | ||
144 | def fitimage_emit_fit_footer(d, fd): | ||
145 | fd.write('};\n') | ||
146 | |||
147 | # | ||
148 | # Emit the fitImage section | ||
149 | # | ||
150 | def fitimage_emit_section_start(d, fd, section): | ||
151 | fd.write(f'\t{section} {{\n') | ||
152 | |||
153 | # | ||
154 | # Emit the fitImage section end | ||
155 | # | ||
156 | def fitimage_emit_section_end(d, fd): | ||
157 | fd.write('\t};\n') | ||
158 | |||
159 | def fitimage_emit_section_kernel(d, fd, imgpath, imgsource, imgcomp): | ||
160 | kernelcount = 1 | ||
161 | kernel_csum = d.getVar("FITIMAGE_HASH_ALGO") | ||
162 | arch = d.getVar("ARCH") | ||
163 | loadaddr = d.getVar("FITIMAGE_LOADADDRESS") | ||
164 | entryaddr = d.getVar("FITIMAGE_ENTRYPOINT") | ||
165 | |||
166 | bb.note(f"Adding kernel-{kernelcount} section to ITS file") | ||
167 | |||
168 | fd.write(f'\t\tkernel-{kernelcount} {{\n') | ||
169 | fd.write('\t\t\tdescription = "Linux kernel";\n') | ||
170 | fd.write(f'\t\t\tdata = /incbin/("{imgpath}/{imgsource}");\n') | ||
171 | fd.write('\t\t\ttype = "kernel";\n') | ||
172 | fd.write(f'\t\t\tarch = "{arch}";\n') | ||
173 | fd.write('\t\t\tos = "linux";\n') | ||
174 | fd.write(f'\t\t\tcompression = "{imgcomp}";\n') | ||
175 | if (loadaddr): | ||
176 | fd.write(f'\t\t\tload = <{loadaddr}>;\n') | ||
177 | if (entryaddr): | ||
178 | fd.write(f'\t\t\tentry = <{entryaddr}>;\n') | ||
179 | fd.write('\t\t\thash-1 {\n') | ||
180 | fd.write(f'\t\t\t\talgo = "{kernel_csum}";\n') | ||
181 | fd.write('\t\t\t};\n') | ||
182 | fd.write('\t\t};\n') | ||
183 | |||
184 | # | ||
185 | # Emit the fitImage ITS DTB section | ||
186 | # | ||
187 | def _fitimage_emit_section_dtb(d, fd, dtb_file, dtb_path, loadaddr, desc): | ||
188 | dtb_csum = d.getVar("FITIMAGE_HASH_ALGO") | ||
189 | arch = d.getVar("ARCH") | ||
190 | |||
191 | bb.note(f"Adding fdt-{dtb_file} section to ITS file") | ||
192 | |||
193 | fd.write(f'\t\tfdt-{dtb_file} {{\n') | ||
194 | fd.write(f'\t\t\tdescription = "{desc}";\n') | ||
195 | fd.write(f'\t\t\tdata = /incbin/("{dtb_path}/{dtb_file}");\n') | ||
196 | fd.write('\t\t\ttype = "flat_dt";\n') | ||
197 | fd.write(f'\t\t\tarch = "{arch}";\n') | ||
198 | fd.write('\t\t\tcompression = "none";\n') | ||
199 | if loadaddr: | ||
200 | fd.write(f'\t\t\tload = <{loadaddr}>;\n') | ||
201 | fd.write('\t\t\thash-1 {\n') | ||
202 | fd.write(f'\t\t\t\talgo = "{dtb_csum}";\n') | ||
203 | fd.write('\t\t\t};\n') | ||
204 | fd.write('\t\t};\n') | ||
205 | |||
206 | |||
207 | def fitimage_emit_section_dtb(d, fd, dtb_file, dtb_path): | ||
208 | loadaddr = d.getVar("FITIMAGE_DTB_LOADADDRESS") | ||
209 | |||
210 | _fitimage_emit_section_dtb(d, fd, dtb_file, dtb_path, loadaddr, "Flattened Device Tree blob") | ||
211 | |||
212 | # | ||
213 | # Emit the fitImage ITS DTB overlay section | ||
214 | # | ||
215 | def fitimage_emit_section_dtb_overlay(d, fd, dtb_file, dtb_path): | ||
216 | loadaddr = d.getVar("FITIMAGE_DTB_OVERLAY_LOADADDRESS") | ||
217 | |||
218 | _fitimage_emit_section_dtb(d, fd, dtb_file, dtb_path, loadaddr, "Flattened Device Tree Overlay blob") | ||
219 | |||
220 | |||
221 | # | ||
222 | # Emit the fitImage ITS ramdisk section | ||
223 | # | ||
224 | def fitimage_emit_section_ramdisk(d, fd, img_file, img_path): | ||
225 | ramdisk_count = "1" | ||
226 | ramdisk_csum = d.getVar("FITIMAGE_HASH_ALGO") | ||
227 | arch = d.getVar("ARCH") | ||
228 | loadaddr = d.getVar("FITIMAGE_RD_LOADADDRESS") | ||
229 | entryaddr = d.getVar("FITIMAGE_RD_ENTRYPOINT") | ||
230 | |||
231 | bb.note(f"Adding ramdisk-{ramdisk_count} section to ITS file") | ||
232 | |||
233 | fd.write(f'\t\tramdisk-{ramdisk_count} {{\n') | ||
234 | fd.write(f'\t\t\tdescription = "{img_file}";\n') | ||
235 | fd.write(f'\t\t\tdata = /incbin/("{img_path}/{img_file}");\n') | ||
236 | fd.write('\t\t\ttype = "ramdisk";\n') | ||
237 | fd.write(f'\t\t\tarch = "{arch}";\n') | ||
238 | fd.write('\t\t\tos = "linux";\n') | ||
239 | fd.write('\t\t\tcompression = "none";\n') | ||
240 | if (loadaddr): | ||
241 | fd.write(f'\t\t\tload = <{loadaddr}>;\n') | ||
242 | if (entryaddr): | ||
243 | fd.write(f'\t\t\tentry = <{entryaddr}>;\n') | ||
244 | fd.write('\t\t\thash-1 {\n') | ||
245 | fd.write(f'\t\t\t\talgo = "{ramdisk_csum}";\n') | ||
246 | fd.write('\t\t\t};\n') | ||
247 | fd.write('\t\t};\n') | ||
248 | |||
249 | def fitimage_emit_section_bootscript(d, fd, imgpath, imgsource): | ||
250 | hash_algo = d.getVar("FITIMAGE_HASH_ALGO") | ||
251 | arch = d.getVar("ARCH") | ||
252 | |||
253 | bb.note(f"Adding bootscr-{imgsource} section to ITS file") | ||
254 | |||
255 | fd.write(f'\t\tbootscr-{imgsource} {{\n') | ||
256 | fd.write('\t\t\tdescription = "U-boot script";\n') | ||
257 | fd.write(f'\t\t\tdata = /incbin/("{imgpath}/{imgsource}");\n') | ||
258 | fd.write('\t\t\ttype = "script";\n') | ||
259 | fd.write(f'\t\t\tarch = "{arch}";\n') | ||
260 | fd.write('\t\t\tos = "linux";\n') | ||
261 | fd.write('\t\t\tcompression = "none";\n') | ||
262 | fd.write('\t\t\thash-1 {\n') | ||
263 | fd.write(f'\t\t\t\talgo = "{hash_algo}";\n') | ||
264 | fd.write('\t\t\t};\n') | ||
265 | fd.write('\t\t};\n') | ||
266 | |||
267 | def fitimage_emit_subsection_signature(d, fd, sign_images_list): | ||
268 | hash_algo = d.getVar("FITIMAGE_HASH_ALGO") | ||
269 | encrypt_algo = d.getVar("FITIMAGE_ENCRYPT_ALGO") or "" | ||
270 | conf_sign_keyname = d.getVar("FITIMAGE_SIGN_KEYNAME") | ||
271 | signer_name = d.getVar("FITIMAGE_SIGNER") | ||
272 | signer_version = d.getVar("FITIMAGE_SIGNER_VERSION") | ||
273 | sign_images = ", ".join(f'"{s}"' for s in sign_images_list) | ||
274 | |||
275 | fd.write('\t\t\tsignature-1 {\n') | ||
276 | fd.write(f'\t\t\t\talgo = "{hash_algo},{encrypt_algo}";\n') | ||
277 | if conf_sign_keyname: | ||
278 | fd.write(f'\t\t\t\tkey-name-hint = "{conf_sign_keyname}";\n') | ||
279 | fd.write(f'\t\t\t\tsign-images = {sign_images};\n') | ||
280 | fd.write(f'\t\t\t\tsigner-name = "{signer_name}";\n') | ||
281 | fd.write(f'\t\t\t\tsigner-version = "{signer_version}";\n') | ||
282 | fd.write('\t\t\t};\n') | ||
283 | |||
284 | # | ||
285 | # Emit the fitImage ITS configuration section | ||
286 | # | ||
287 | def fitimage_emit_section_config(d, fd, dtb, kernelcount, ramdiskcount, setupcount, bootscriptid, compatible, dtbcount): | ||
288 | sign = d.getVar("FITIMAGE_SIGN") | ||
289 | conf_default = None | ||
290 | conf_prefix = d.getVar('FITIMAGE_CONFIG_PREFIX') or "" | ||
291 | |||
292 | bb.note(f"Adding {conf_prefix}{dtb} section to ITS file") | ||
293 | |||
294 | conf_desc="Linux kernel" | ||
295 | if dtb: | ||
296 | conf_desc += ", FDT blob" | ||
297 | if ramdiskcount: | ||
298 | conf_desc += ", ramdisk" | ||
299 | if setupcount: | ||
300 | conf_desc += ", setup" | ||
301 | if bootscriptid: | ||
302 | conf_desc += ", u-boot script" | ||
303 | if dtbcount == 1: | ||
304 | conf_default = d.getVar('FITIMAGE_DEFAULT_CONFIG') or f'{conf_prefix}{dtb}' | ||
305 | |||
306 | if conf_default: | ||
307 | fd.write(f'\t\tdefault = "{conf_default}";\n') | ||
308 | fd.write(f'\t\t{conf_prefix}{dtb} {{\n') | ||
309 | fd.write(f'\t\t\tdescription = "{dtbcount} {conf_desc}";\n') | ||
310 | if kernelcount: | ||
311 | fd.write('\t\t\tkernel = "kernel-1";\n') | ||
312 | fd.write(f'\t\t\tfdt = "fdt-{dtb}";\n') | ||
313 | if ramdiskcount: | ||
314 | fd.write(f'\t\t\tramdisk = "ramdisk-{ramdiskcount}";\n') | ||
315 | if bootscriptid: | ||
316 | fd.write(f'\t\t\tbootscr = "bootscr-{bootscriptid}";\n') | ||
317 | if compatible: | ||
318 | fd.write(f'\t\t\tcompatible = "{compatible}";\n') | ||
319 | |||
320 | if sign == "1": | ||
321 | sign_images = ["kernel"] | ||
322 | if dtb: | ||
323 | sign_images.append("fdt") | ||
324 | if ramdiskcount: | ||
325 | sign_images.append("ramdisk") | ||
326 | if setupcount: | ||
327 | sign_images.append("setup") | ||
328 | if bootscriptid: | ||
329 | sign_images.append("bootscr") | ||
330 | fitimage_emit_subsection_signature(d, fd, sign_images) | ||
331 | |||
332 | fd.write('\t\t' + '};\n') | ||
333 | |||
334 | # | ||
335 | # Emits a device tree overlay config section | ||
336 | # | ||
337 | def fitimage_emit_section_config_fdto(d, fd, dtb, compatible): | ||
338 | sign = d.getVar("FITIMAGE_SIGN") | ||
339 | bb.note("Adding overlay config section to ITS file") | ||
340 | |||
341 | fd.write(f'\t\t{dtb} {{\n') | ||
342 | fd.write(f'\t\t\tdescription = "Device Tree Overlay";\n') | ||
343 | fd.write(f'\t\t\tfdt = "fdt-{dtb}";\n') | ||
344 | if compatible: | ||
345 | fd.write(f'\t\t\tcompatible = "{compatible}";\n') | ||
346 | |||
347 | if sign == "1": | ||
348 | sign_images = ["fdt"] | ||
349 | fitimage_emit_subsection_signature(d, fd, sign_images) | ||
350 | |||
351 | fd.write('\t\t' + '};\n') | ||
352 | |||
353 | python write_manifest() { | ||
354 | machine = d.getVar('MACHINE') | ||
355 | kernelcount=1 | ||
356 | DTBS = "" | ||
357 | DTBOS = "" | ||
358 | ramdiskcount = "" | ||
359 | setupcount = "" | ||
360 | bootscriptid = "" | ||
361 | compatible = "" | ||
362 | |||
363 | def get_dtbs(d, dtb_suffix): | ||
364 | sysroot = d.getVar('RECIPE_SYSROOT') | ||
365 | deploydir = d.getVar('DEPLOY_DIR_IMAGE') | ||
366 | |||
367 | dtbs = (d.getVar('KERNEL_DEVICETREE') or '').split() | ||
368 | dtbs = [os.path.basename(x) for x in dtbs if x.endswith(dtb_suffix)] | ||
369 | ext_dtbs = os.listdir(d.getVar('EXTERNAL_KERNEL_DEVICETREE')) if d.getVar('EXTERNAL_KERNEL_DEVICETREE') else [] | ||
370 | ext_dtbs = [x for x in ext_dtbs if x.endswith(dtb_suffix)] | ||
371 | |||
372 | result = [] | ||
373 | # Prefer BSP dts if BSP and kernel provide the same dts | ||
374 | for d in sorted(set(dtbs + ext_dtbs)): | ||
375 | dtbpath = f'{sysroot}/boot/devicetree/{d}' if d in ext_dtbs else f'{deploydir}/{d}' | ||
376 | result.append(dtbpath) | ||
377 | |||
378 | return " ".join(result) | ||
379 | |||
380 | with open('%s/manifest.its' % d.getVar('B'), 'w') as fd: | ||
381 | images = d.getVar('FITIMAGE_IMAGES') | ||
382 | if not images: | ||
383 | bb.warn("No images specified in FITIMAGE_IMAGES. Generated FIT image will be empty") | ||
384 | |||
385 | fitimage_emit_fit_header(d, fd) | ||
386 | fitimage_emit_section_start(d, fd, 'images') | ||
387 | |||
388 | for image in (images or "").split(): | ||
389 | imageflags = d.getVarFlags('FITIMAGE_IMAGE_%s' % image, expand=['file', 'fstype', 'type', 'comp']) or {} | ||
390 | imgtype = imageflags.get('type', 'kernel') | ||
391 | if imgtype == 'kernel': | ||
392 | if d.getVar('KERNEL_IMAGETYPE') not in ('zImage', 'Image') and not imageflags.get('comp'): | ||
393 | bb.warn(f"KERNEL_IMAGETYPE is '{d.getVar('KERNEL_IMAGETYPE')}' but FITIMAGE_IMAGE_kernel[comp] is not set.") | ||
394 | default = "%s-%s%s" % (d.getVar('KERNEL_IMAGETYPE'), machine, d.getVar('KERNEL_IMAGE_BIN_EXT')) | ||
395 | imgsource = imageflags.get('file', default) | ||
396 | imgcomp = imageflags.get('comp', 'none') | ||
397 | imgpath = d.getVar("DEPLOY_DIR_IMAGE") | ||
398 | fitimage_emit_section_kernel(d, fd, imgpath, imgsource, imgcomp) | ||
399 | elif imgtype == 'fdt': | ||
400 | default = get_dtbs(d, "dtb") | ||
401 | dtbfiles = imageflags.get('file', default) | ||
402 | if not dtbfiles: | ||
403 | bb.fatal(f"No dtb file found for image '{image}'. Set KERNEL_DEVICETREE, [file] varflag, or reference devicetree.bbclass-based recipe.") | ||
404 | for dtb in dtbfiles.split(): | ||
405 | dtb_path, dtb_file = os.path.split(dtb) | ||
406 | DTBS += f" {dtb}" | ||
407 | fitimage_emit_section_dtb(d, fd, dtb_file, dtb_path) | ||
408 | elif imgtype == 'fdto': | ||
409 | default = get_dtbs(d, "dtbo") | ||
410 | dtbofiles = imageflags.get('file', default) | ||
411 | if not dtbofiles: | ||
412 | bb.fatal(f"No dtbo file found for image '{image}'. Set KERNEL_DEVICETREE, [file] varflag, or reference devicetree.bbclass-based recipe.") | ||
413 | for dtb in dtbofiles.split(): | ||
414 | dtb_path, dtb_file = os.path.split(dtb) | ||
415 | DTBOS = DTBOS + " " + dtb | ||
416 | fitimage_emit_section_dtb_overlay(d, fd, dtb_file, dtb_path) | ||
417 | elif imgtype == 'fdtapply': | ||
418 | import subprocess | ||
419 | dtbofiles = imageflags.get('file', None) | ||
420 | if not dtbofiles: | ||
421 | bb.fatal(f"No dtbo file found for image '{image}'. Set via [file] varflag.") | ||
422 | dtboutname = imageflags.get('name', None) | ||
423 | if not dtboutname: | ||
424 | bb.fatal(f"No dtb output name found for image '{image}'. Set via [name] varflag.") | ||
425 | dtbresult = "%s/%s" % (d.getVar('B'), dtboutname) | ||
426 | dtbcommand = "" | ||
427 | for dtb in dtbofiles.split(): | ||
428 | dtb_path, dtb_file = os.path.split(dtb) | ||
429 | if not dtb_path: | ||
430 | dtb_path = d.getVar("DEPLOY_DIR_IMAGE") | ||
431 | if not dtbcommand: | ||
432 | if not dtb_file.endswith('.dtb'): | ||
433 | bb.fatal(f"fdtapply failed: Expected (non-overlay) .dtb file as first element, but got {dtb_file}") | ||
434 | dtbcommand = f"fdtoverlay -i {dtb_path}/{dtb_file} -o {dtbresult}" | ||
435 | else: | ||
436 | if not dtb_file.endswith('.dtbo'): | ||
437 | bb.fatal(f"fdtapply failed: Expected .dtbo file, but got {dtb_file}") | ||
438 | dtbcommand += f" {dtb_path}/{dtb_file}" | ||
439 | result = subprocess.run(dtbcommand, stderr=subprocess.PIPE, shell=True, text=True) | ||
440 | if result.returncode != 0: | ||
441 | bb.fatal(f"Running {dtbcommand} failed: {result.stderr}") | ||
442 | dtb_path, dtb_file = os.path.split(dtbresult) | ||
443 | DTBS += f" {dtbresult}" | ||
444 | fitimage_emit_section_dtb(d, fd, dtb_file, dtb_path) | ||
445 | elif imgtype == 'ramdisk': | ||
446 | ramdiskcount = "1" | ||
447 | default_imgfstype = d.getVar('INITRAMFS_FSTYPES' or "").split()[0] | ||
448 | img_fstype = imageflags.get('fstype', default_imgfstype) | ||
449 | img_file = "%s%s.%s" % (d.getVar('FITIMAGE_IMAGE_%s' % image), d.getVar('IMAGE_MACHINE_SUFFIX'), img_fstype) | ||
450 | img_path = d.getVar("DEPLOY_DIR_IMAGE") | ||
451 | fitimage_emit_section_ramdisk(d, fd, img_file, img_path) | ||
452 | elif imgtype == 'bootscript': | ||
453 | if bootscriptid: | ||
454 | bb.fatal("Only a single boot script is supported (already set to: %s)" % bootscriptid) | ||
455 | imgsource = imageflags.get('file', None) | ||
456 | imgpath = d.getVar("DEPLOY_DIR_IMAGE") | ||
457 | bootscriptid = imgsource | ||
458 | fitimage_emit_section_bootscript(d, fd, imgpath, imgsource) | ||
459 | else: | ||
460 | bb.fatal(f"Unsupported image type: '{imgtype}'") | ||
461 | fitimage_emit_section_end(d, fd) | ||
462 | # | ||
463 | # Step 5: Prepare a configurations section | ||
464 | # | ||
465 | fitimage_emit_section_start(d, fd, 'configurations') | ||
466 | confcount = 0 | ||
467 | dtbcount = 1 | ||
468 | for dtb in (DTBS or "").split(): | ||
469 | import subprocess | ||
470 | try: | ||
471 | cmd = "fdtget -t s {} / compatible".format(dtb) | ||
472 | compatible = subprocess.check_output(cmd, shell=True, text=True).split()[0] | ||
473 | except subprocess.CalledProcessError: | ||
474 | bb.fatal("Failed to find root-node compatible string in (%s)" % dtb) | ||
475 | |||
476 | dtb_path, dtb_file = os.path.split(dtb) | ||
477 | fitimage_emit_section_config(d, fd, dtb_file, kernelcount, ramdiskcount, setupcount, bootscriptid, compatible, dtbcount) | ||
478 | dtbcount += 1 | ||
479 | confcount += 1 | ||
480 | for dtb in (DTBOS or "").split(): | ||
481 | import subprocess | ||
482 | try: | ||
483 | cmd = "fdtget -t s {} / compatible".format(dtb) | ||
484 | compatible = subprocess.check_output(cmd, shell=True, text=True).split()[0] | ||
485 | except subprocess.CalledProcessError: | ||
486 | bb.note("Failed to find root-node compatible string in (%s)" % dtb) | ||
487 | compatible = None | ||
488 | |||
489 | dtb_path, dtb_file = os.path.split(dtb) | ||
490 | fitimage_emit_section_config_fdto(d, fd, dtb_file, compatible) | ||
491 | confcount += 1 | ||
492 | |||
493 | fitimage_emit_section_end(d, fd) | ||
494 | |||
495 | if confcount == 0: | ||
496 | bb.fatal("Empty 'configurations' node generated! At least one 'fdt' or 'fdto' type is required.") | ||
497 | |||
498 | fitimage_emit_fit_footer(d, fd) | ||
499 | } | ||
500 | |||
501 | do_configure[postfuncs] += "write_manifest" | ||
502 | |||
503 | do_fitimage () { | ||
504 | if [ "${FITIMAGE_SIGN}" = "1" ]; then | ||
505 | uboot-mkimage ${FITIMAGE_MKIMAGE_EXTRA_ARGS} \ | ||
506 | -k "${FITIMAGE_SIGN_KEYDIR}" -r \ | ||
507 | -f "${B}/manifest.its" \ | ||
508 | "${B}/fitImage" | ||
509 | else | ||
510 | uboot-mkimage ${FITIMAGE_MKIMAGE_EXTRA_ARGS} \ | ||
511 | -f "${B}/manifest.its" \ | ||
512 | "${B}/fitImage" | ||
513 | fi | ||
514 | } | ||
515 | addtask fitimage after do_configure | ||
516 | |||
517 | ITS_NAME ?= "${PN}-${KERNEL_ARTIFACT_NAME}" | ||
518 | ITS_LINK_NAME ?= "${PN}-${KERNEL_ARTIFACT_LINK_NAME}" | ||
519 | FITIMAGE_IMAGE_NAME ?= "fitImage-${PN}-${KERNEL_FIT_NAME}${KERNEL_FIT_BIN_EXT}" | ||
520 | FITIMAGE_IMAGE_LINK_NAME ?= "fitImage-${PN}-${KERNEL_FIT_LINK_NAME}" | ||
521 | |||
522 | SSTATE_SKIP_CREATION:task-deploy = '1' | ||
523 | |||
524 | do_deploy() { | ||
525 | bbnote 'Copying fit-image.its source file...' | ||
526 | install -m 0644 ${B}/manifest.its ${DEPLOYDIR}/${ITS_NAME}.its | ||
527 | |||
528 | bbnote 'Copying all created fdt from type fdtapply' | ||
529 | for DTB_FILE in `find ${B} -maxdepth 1 -name *.dtb`; do | ||
530 | install -m 0644 ${DTB_FILE} ${DEPLOYDIR}/ | ||
531 | done | ||
532 | |||
533 | bbnote 'Copying fitImage file...' | ||
534 | install -m 0644 ${B}/fitImage ${DEPLOYDIR}/${FITIMAGE_IMAGE_NAME} | ||
535 | |||
536 | cd ${DEPLOYDIR} | ||
537 | ln -sf ${ITS_NAME}.its ${ITS_LINK_NAME}.its | ||
538 | ln -sf ${FITIMAGE_IMAGE_NAME} ${FITIMAGE_IMAGE_LINK_NAME} | ||
539 | } | ||
540 | addtask deploy after do_fitimage before do_build | ||
diff --git a/meta-oe/classes/gitpkgv.bbclass b/meta-oe/classes/gitpkgv.bbclass index 5ab507969c..eb4b1eae9a 100644 --- a/meta-oe/classes/gitpkgv.bbclass +++ b/meta-oe/classes/gitpkgv.bbclass | |||
@@ -70,54 +70,52 @@ def get_git_pkgv(d, use_tags): | |||
70 | names = [] | 70 | names = [] |
71 | for url in ud.values(): | 71 | for url in ud.values(): |
72 | if url.type == 'git' or url.type == 'gitsm': | 72 | if url.type == 'git' or url.type == 'gitsm': |
73 | names.extend(url.revisions.keys()) | 73 | names.append(url.name) |
74 | if len(names) > 0: | 74 | if len(names) > 0: |
75 | format = '_'.join(names) | 75 | format = '_'.join(names) |
76 | else: | 76 | else: |
77 | format = 'default' | 77 | format = 'default' |
78 | |||
79 | found = False | 78 | found = False |
80 | for url in ud.values(): | 79 | for url in ud.values(): |
81 | if url.type == 'git' or url.type == 'gitsm': | 80 | if url.type == 'git' or url.type == 'gitsm': |
82 | for name, rev in url.revisions.items(): | 81 | if not os.path.exists(url.localpath): |
83 | if not os.path.exists(url.localpath): | 82 | return None |
84 | return None | ||
85 | 83 | ||
86 | found = True | 84 | found = True |
87 | 85 | ||
88 | vars = { 'repodir' : quote(url.localpath), | 86 | vars = { 'repodir' : quote(url.localpath), |
89 | 'rev' : quote(rev) } | 87 | 'rev' : quote(url.revision) } |
90 | 88 | ||
91 | rev = bb.fetch2.get_srcrev(d).split('+')[1] | 89 | rev = bb.fetch2.get_srcrev(d).split('+')[1] |
92 | rev_file = os.path.join(url.localpath, "oe-gitpkgv_" + rev) | 90 | rev_file = os.path.join(url.localpath, "oe-gitpkgv_" + url.revision) |
93 | 91 | ||
94 | if not os.path.exists(rev_file) or os.path.getsize(rev_file)==0: | 92 | if not os.path.exists(rev_file) or os.path.getsize(rev_file)==0: |
95 | commits = bb.fetch2.runfetchcmd( | 93 | commits = bb.fetch2.runfetchcmd( |
96 | "git --git-dir=%(repodir)s rev-list %(rev)s -- 2>/dev/null | wc -l" | 94 | "git --git-dir=%(repodir)s rev-list %(rev)s -- 2>/dev/null | wc -l" |
97 | % vars, d, quiet=True).strip().lstrip('0') | 95 | % vars, d, quiet=True).strip().lstrip('0') |
98 | 96 | ||
99 | if commits != "": | 97 | if commits != "": |
100 | oe.path.remove(rev_file, recurse=False) | 98 | oe.path.remove(rev_file, recurse=False) |
101 | with open(rev_file, "w") as f: | 99 | with open(rev_file, "w") as f: |
102 | f.write("%d\n" % int(commits)) | 100 | f.write("%d\n" % int(commits)) |
103 | else: | ||
104 | commits = "0" | ||
105 | else: | ||
106 | with open(rev_file, "r") as f: | ||
107 | commits = f.readline(128).strip() | ||
108 | |||
109 | if use_tags: | ||
110 | try: | ||
111 | output = bb.fetch2.runfetchcmd( | ||
112 | "git --git-dir=%(repodir)s describe %(rev)s --tags --exact-match 2>/dev/null" | ||
113 | % vars, d, quiet=True).strip() | ||
114 | ver = gitpkgv_drop_tag_prefix(d, output) | ||
115 | except Exception: | ||
116 | ver = "0.0-%s-g%s" % (commits, vars['rev'][:7]) | ||
117 | else: | 101 | else: |
118 | ver = "%s+%s" % (commits, vars['rev'][:7]) | 102 | commits = "0" |
119 | 103 | else: | |
120 | format = format.replace(name, ver) | 104 | with open(rev_file, "r") as f: |
105 | commits = f.readline(128).strip() | ||
106 | |||
107 | if use_tags: | ||
108 | try: | ||
109 | output = bb.fetch2.runfetchcmd( | ||
110 | "git --git-dir=%(repodir)s describe %(rev)s --tags --exact-match 2>/dev/null" | ||
111 | % vars, d, quiet=True).strip() | ||
112 | ver = gitpkgv_drop_tag_prefix(d, output) | ||
113 | except Exception: | ||
114 | ver = "0.0-%s-g%s" % (commits, vars['rev'][:7]) | ||
115 | else: | ||
116 | ver = "%s+%s" % (commits, vars['rev'][:7]) | ||
117 | |||
118 | format = format.replace(url.name, ver) | ||
121 | 119 | ||
122 | if found: | 120 | if found: |
123 | return format | 121 | return format |
diff --git a/meta-oe/classes/gpe.bbclass b/meta-oe/classes/gpe.bbclass deleted file mode 100644 index a9b1cd5a90..0000000000 --- a/meta-oe/classes/gpe.bbclass +++ /dev/null | |||
@@ -1,17 +0,0 @@ | |||
1 | DEPENDS:prepend = "virtual/libintl intltool-native " | ||
2 | GPE_TARBALL_SUFFIX ?= "gz" | ||
3 | SRC_URI = "${GPE_MIRROR}/${BP}.tar.${GPE_TARBALL_SUFFIX}" | ||
4 | FILES:${PN} += "${datadir}/gpe ${datadir}/application-registry" | ||
5 | SECTION ?= "gpe" | ||
6 | |||
7 | inherit gettext | ||
8 | |||
9 | gpe_do_compile() { | ||
10 | oe_runmake PREFIX=${prefix} | ||
11 | } | ||
12 | |||
13 | gpe_do_install() { | ||
14 | oe_runmake PREFIX=${prefix} DESTDIR=${D} install | ||
15 | } | ||
16 | |||
17 | EXPORT_FUNCTIONS do_compile do_install | ||
diff --git a/meta-oe/classes/image_types_verity.bbclass b/meta-oe/classes/image_types_verity.bbclass index b42217c453..d77bc20a13 100644 --- a/meta-oe/classes/image_types_verity.bbclass +++ b/meta-oe/classes/image_types_verity.bbclass | |||
@@ -26,6 +26,10 @@ | |||
26 | # should be the same blockdevice in the command shown above while <dm_dev_name> | 26 | # should be the same blockdevice in the command shown above while <dm_dev_name> |
27 | # is the name of the to be created dm-verity-device. | 27 | # is the name of the to be created dm-verity-device. |
28 | # | 28 | # |
29 | # By specifying a different VERITY_IMAGE_HASHDEV_SUFFIX, the hash tree data can | ||
30 | # be created in a separate file. In this case, <dev> is just zero padded to a | ||
31 | # multiple of VERITY_BLOCK_SIZE. <hash_dev> will be a separate file. | ||
32 | # | ||
29 | # The root hash is calculated using a salt to make attacks more difficult. Thus, | 33 | # The root hash is calculated using a salt to make attacks more difficult. Thus, |
30 | # please grant each image recipe its own salt which could be generated e.g. via | 34 | # please grant each image recipe its own salt which could be generated e.g. via |
31 | # | 35 | # |
@@ -42,6 +46,7 @@ VERITY_SALT ?= "${CLASS_VERITY_SALT}" | |||
42 | VERITY_BLOCK_SIZE ?= "4096" | 46 | VERITY_BLOCK_SIZE ?= "4096" |
43 | VERITY_IMAGE_FSTYPE ?= "ext4" | 47 | VERITY_IMAGE_FSTYPE ?= "ext4" |
44 | VERITY_IMAGE_SUFFIX ?= ".verity" | 48 | VERITY_IMAGE_SUFFIX ?= ".verity" |
49 | VERITY_IMAGE_HASHDEV_SUFFIX ?= "${VERITY_IMAGE_SUFFIX}" | ||
45 | VERITY_INPUT_IMAGE ?= "${IMGDEPLOYDIR}/${IMAGE_LINK_NAME}.${VERITY_IMAGE_FSTYPE}" | 50 | VERITY_INPUT_IMAGE ?= "${IMGDEPLOYDIR}/${IMAGE_LINK_NAME}.${VERITY_IMAGE_FSTYPE}" |
46 | 51 | ||
47 | IMAGE_TYPEDEP:verity = "${VERITY_IMAGE_FSTYPE}" | 52 | IMAGE_TYPEDEP:verity = "${VERITY_IMAGE_FSTYPE}" |
@@ -56,6 +61,7 @@ python __anonymous() { | |||
56 | } | 61 | } |
57 | 62 | ||
58 | python do_image_verity () { | 63 | python do_image_verity () { |
64 | import io | ||
59 | import os | 65 | import os |
60 | import subprocess | 66 | import subprocess |
61 | import shutil | 67 | import shutil |
@@ -66,6 +72,9 @@ python do_image_verity () { | |||
66 | verity_image_suffix = d.getVar('VERITY_IMAGE_SUFFIX') | 72 | verity_image_suffix = d.getVar('VERITY_IMAGE_SUFFIX') |
67 | verity = '{}{}'.format(image, verity_image_suffix) | 73 | verity = '{}{}'.format(image, verity_image_suffix) |
68 | 74 | ||
75 | verity_image_hashdev_suffix = d.getVar('VERITY_IMAGE_HASHDEV_SUFFIX') | ||
76 | verity_hashdev = '{}{}'.format(image, verity_image_hashdev_suffix) | ||
77 | |||
69 | # For better readability the parameter VERITY_BLOCK_SIZE is specified in | 78 | # For better readability the parameter VERITY_BLOCK_SIZE is specified in |
70 | # bytes. It must be a multiple of the logical sector size which is 512 bytes | 79 | # bytes. It must be a multiple of the logical sector size which is 512 bytes |
71 | # in Linux. Make sure that this is the case as otherwise the resulting | 80 | # in Linux. Make sure that this is the case as otherwise the resulting |
@@ -87,9 +96,9 @@ python do_image_verity () { | |||
87 | bb.debug(1, f"data_size_blocks: {data_size_blocks}, {data_size_rest}") | 96 | bb.debug(1, f"data_size_blocks: {data_size_blocks}, {data_size_rest}") |
88 | bb.debug(1, f"data_size: {data_size}") | 97 | bb.debug(1, f"data_size: {data_size}") |
89 | 98 | ||
90 | # Create verity image | 99 | if verity == verity_hashdev: |
91 | try: | 100 | # creating self-contained dm-verity image |
92 | output = subprocess.check_output([ | 101 | veritysetup_command = [ |
93 | 'veritysetup', 'format', | 102 | 'veritysetup', 'format', |
94 | '--no-superblock', | 103 | '--no-superblock', |
95 | '--salt={}'.format(salt), | 104 | '--salt={}'.format(salt), |
@@ -98,7 +107,27 @@ python do_image_verity () { | |||
98 | '--hash-block-size={}'.format(block_size), | 107 | '--hash-block-size={}'.format(block_size), |
99 | '--hash-offset={}'.format(data_size), | 108 | '--hash-offset={}'.format(data_size), |
100 | verity, verity, | 109 | verity, verity, |
101 | ]) | 110 | ] |
111 | else: | ||
112 | # creating separate dm-verity and hash device image | ||
113 | veritysetup_command = [ | ||
114 | 'veritysetup', 'format', | ||
115 | '--salt={}'.format(salt), | ||
116 | '--data-blocks={}'.format(data_blocks), | ||
117 | '--data-block-size={}'.format(block_size), | ||
118 | '--hash-block-size={}'.format(block_size), | ||
119 | verity, verity_hashdev, | ||
120 | ] | ||
121 | # veritysetup expects the data device size to be a multiple of block_size | ||
122 | # when creating a separate hashdev file, zero pad verity file if needed | ||
123 | if data_size_rest: | ||
124 | with open(verity, 'rb+') as verityfile: | ||
125 | verityfile.seek(0, io.SEEK_END) | ||
126 | verityfile.write(b'\x00' * (block_size - data_size_rest)) | ||
127 | |||
128 | # Create verity image | ||
129 | try: | ||
130 | output = subprocess.check_output(veritysetup_command) | ||
102 | except subprocess.CalledProcessError as err: | 131 | except subprocess.CalledProcessError as err: |
103 | bb.fatal('%s returned with %s (%s)' % (err.cmd, err.returncode, err.output)) | 132 | bb.fatal('%s returned with %s (%s)' % (err.cmd, err.returncode, err.output)) |
104 | 133 | ||
@@ -128,7 +157,11 @@ python do_image_verity () { | |||
128 | bb.fatal('Unexpected error %s' % err) | 157 | bb.fatal('Unexpected error %s' % err) |
129 | 158 | ||
130 | # Create symlinks | 159 | # Create symlinks |
131 | for suffix in [ verity_image_suffix, '.verity-info', '.verity-params' ]: | 160 | suffix_list = [ verity_image_suffix, '.verity-info', '.verity-params' ] |
161 | if verity != verity_hashdev: | ||
162 | suffix_list.append(verity_image_hashdev_suffix) | ||
163 | |||
164 | for suffix in suffix_list: | ||
132 | try: | 165 | try: |
133 | os.remove(link + suffix) | 166 | os.remove(link + suffix) |
134 | except FileNotFoundError: | 167 | except FileNotFoundError: |
diff --git a/meta-oe/classes/panel-mipi-dbi.bbclass b/meta-oe/classes/panel-mipi-dbi.bbclass new file mode 100644 index 0000000000..7ceebc72e0 --- /dev/null +++ b/meta-oe/classes/panel-mipi-dbi.bbclass | |||
@@ -0,0 +1,48 @@ | |||
1 | # SPDX-License-Identifier: MIT | ||
2 | # | ||
3 | # Copyright Pengutronix <yocto@pengutronix.de> | ||
4 | # | ||
5 | # Class to generate firmware files for use with the `panel-mipi-dbi` Linux | ||
6 | # driver. | ||
7 | # | ||
8 | # The firmware source file contains a list of commands to send to the display | ||
9 | # controller in order to initialize it: | ||
10 | # | ||
11 | # $ cat shineworld,lh133k.txt | ||
12 | # command 0x11 # exit sleep mode | ||
13 | # delay 120 | ||
14 | # | ||
15 | # # Enable color inversion | ||
16 | # command 0x21 # INVON | ||
17 | # ... | ||
18 | # | ||
19 | # A recipe to compile such a command list into a firmware blob for use with | ||
20 | # the `panel-mipi-dbi` driver looks something like this: | ||
21 | # | ||
22 | # $ cat panel-shineworld-lh133k.bb | ||
23 | # inherit panel-mipi-dbi | ||
24 | # | ||
25 | # SRC_URI = "file://${PANEL_FIRMWARE}" | ||
26 | # | ||
27 | # PANEL_FIRMWARE = "shineworld,lh133k.txt" | ||
28 | # ... | ||
29 | |||
30 | DEPENDS = "panel-mipi-dbi-native" | ||
31 | |||
32 | PANEL_FIRMWARE_BIN ?= "${@d.getVar('PANEL_FIRMWARE').removesuffix('.txt')}.bin" | ||
33 | |||
34 | do_configure[noexec] = "1" | ||
35 | |||
36 | do_compile () { | ||
37 | mipi-dbi-cmd \ | ||
38 | "${B}/${PANEL_FIRMWARE_BIN}" \ | ||
39 | "${UNPACKDIR}/${PANEL_FIRMWARE}" | ||
40 | } | ||
41 | |||
42 | do_install () { | ||
43 | install -m 0644 -D \ | ||
44 | "${B}/${PANEL_FIRMWARE_BIN}" \ | ||
45 | "${D}${nonarch_base_libdir}/firmware/${PANEL_FIRMWARE_BIN}" | ||
46 | } | ||
47 | |||
48 | FILES:${PN} = "${nonarch_base_libdir}/firmware/" | ||
diff --git a/meta-oe/classes/signing.bbclass b/meta-oe/classes/signing.bbclass index f52d861b76..5068360ca7 100644 --- a/meta-oe/classes/signing.bbclass +++ b/meta-oe/classes/signing.bbclass | |||
@@ -54,7 +54,7 @@ | |||
54 | SIGNING_PKCS11_URI ?= "" | 54 | SIGNING_PKCS11_URI ?= "" |
55 | SIGNING_PKCS11_MODULE ?= "" | 55 | SIGNING_PKCS11_MODULE ?= "" |
56 | 56 | ||
57 | DEPENDS += "softhsm-native libp11-native opensc-native openssl-native" | 57 | DEPENDS += "softhsm-native libp11-native opensc-native openssl-native extract-cert-native" |
58 | 58 | ||
59 | def signing_class_prepare(d): | 59 | def signing_class_prepare(d): |
60 | import os.path | 60 | import os.path |
@@ -87,11 +87,21 @@ def signing_class_prepare(d): | |||
87 | export(role, "SIGNING_PKCS11_URI_%s_", pkcs11_uri) | 87 | export(role, "SIGNING_PKCS11_URI_%s_", pkcs11_uri) |
88 | export(role, "SIGNING_PKCS11_MODULE_%s_", pkcs11_module) | 88 | export(role, "SIGNING_PKCS11_MODULE_%s_", pkcs11_module) |
89 | 89 | ||
90 | # there can be an optional CA associated with this role | ||
91 | ca_cert_name = d.getVarFlag("SIGNING_CA", role) or d.getVar("SIGNING_CA") | ||
92 | if ca_cert_name: | ||
93 | export(role, "SIGNING_CA_%s_", ca_cert_name) | ||
94 | |||
90 | signing_pkcs11_tool() { | 95 | signing_pkcs11_tool() { |
91 | pkcs11-tool --module "${STAGING_LIBDIR_NATIVE}/softhsm/libsofthsm2.so" --login --pin 1111 $* | 96 | pkcs11-tool --module "${STAGING_LIBDIR_NATIVE}/softhsm/libsofthsm2.so" --login --pin 1111 $* |
92 | } | 97 | } |
93 | 98 | ||
94 | signing_import_prepare() { | 99 | signing_import_prepare() { |
100 | # the $PN is used as 'label' in the softhsm, which is a "CK_UTF8CHAR | ||
101 | # paddedLabel[32]" in softhsm2-util.cpp, so it must not be longer. | ||
102 | LEN=$(echo -n ${PN} | wc -c) | ||
103 | test $LEN -le 32 || bbfatal "PN must not have a length greater than 32 chars." | ||
104 | |||
95 | export _SIGNING_ENV_FILE_="${B}/meta-signing.env" | 105 | export _SIGNING_ENV_FILE_="${B}/meta-signing.env" |
96 | rm -f "$_SIGNING_ENV_FILE_" | 106 | rm -f "$_SIGNING_ENV_FILE_" |
97 | 107 | ||
@@ -118,28 +128,131 @@ signing_import_define_role() { | |||
118 | echo "_SIGNING_PKCS11_MODULE_${role}_=\"softhsm\"" >> $_SIGNING_ENV_FILE_ | 128 | echo "_SIGNING_PKCS11_MODULE_${role}_=\"softhsm\"" >> $_SIGNING_ENV_FILE_ |
119 | } | 129 | } |
120 | 130 | ||
121 | # signing_import_cert_from_der <role> <der> | 131 | # signing_import_cert_from_der <cert_name> <der> |
122 | # | 132 | # |
123 | # Import a certificate from DER file to a role. To be used | 133 | # Import a certificate from DER file to a cert_name. |
124 | # with SoftHSM. | 134 | # Where the <cert_name> can either be a previously setup |
135 | # signing_import_define_role linking the certificate to a signing key, | ||
136 | # or a new identifier when dealing with a standalone certificate. | ||
137 | # | ||
138 | # To be used with SoftHSM. | ||
125 | signing_import_cert_from_der() { | 139 | signing_import_cert_from_der() { |
126 | local role="${1}" | 140 | local cert_name="${1}" |
127 | local der="${2}" | 141 | local der="${2}" |
128 | 142 | ||
129 | signing_pkcs11_tool --type cert --write-object "${der}" --label "${role}" | 143 | # check wether the cert_name/role needs to be defined first, |
144 | # or do so otherwise | ||
145 | local uri=$(siging_get_uri $cert_name) | ||
146 | if [ -z "$uri" ]; then | ||
147 | signing_import_define_role "$cert_name" | ||
148 | fi | ||
149 | |||
150 | signing_pkcs11_tool --type cert --write-object "${der}" --label "${cert_name}" | ||
130 | } | 151 | } |
131 | 152 | ||
132 | # signing_import_cert_from_pem <role> <pem> | 153 | # signing_import_set_ca <cert_name> <ca_cert_name> |
133 | # | 154 | # |
134 | # Import a certificate from PEM file to a role. To be used | 155 | # Link the certificate from <cert_name> to its issuer stored in |
135 | # with SoftHSM. | 156 | # <ca_cert_name> By walking this linked list a CA-chain can later be |
157 | # reconstructed from the involed roles. | ||
158 | signing_import_set_ca() { | ||
159 | local cert_name="${1}" | ||
160 | local ca_cert_name="${2}" | ||
161 | |||
162 | echo "_SIGNING_CA_${cert_name}_=\"${ca_cert_name}\"" >> $_SIGNING_ENV_FILE_ | ||
163 | echo "added link from ${cert_name} to ${ca_cert_name}" | ||
164 | } | ||
165 | |||
166 | # signing_get_ca <cert_name> | ||
167 | # | ||
168 | # returns the <ca_cert_name> that has been set previously through | ||
169 | # either signing_import_set_ca; | ||
170 | # or a local.conf override SIGNING_CA[role] = ... | ||
171 | # If none was set, the empty string is returned. | ||
172 | signing_get_ca() { | ||
173 | local cert_name="${1}" | ||
174 | |||
175 | # prefer local configuration | ||
176 | eval local ca="\$SIGNING_CA_${cert_name}_" | ||
177 | if [ -n "$ca" ]; then | ||
178 | echo "$ca" | ||
179 | return | ||
180 | fi | ||
181 | |||
182 | # fall back to softhsm | ||
183 | eval echo "\$_SIGNING_CA_${cert_name}_" | ||
184 | } | ||
185 | |||
186 | # signing_has_ca <cert_name> | ||
187 | # | ||
188 | # check if the cert_name links to another cert_name that is its | ||
189 | # certificate authority/issuer. | ||
190 | signing_has_ca() { | ||
191 | local ca_cert_name="$(signing_get_ca ${1})" | ||
192 | |||
193 | test -n "$ca_cert_name" | ||
194 | return $? | ||
195 | } | ||
196 | |||
197 | # signing_get_intermediate_certs <cert_name> | ||
198 | # | ||
199 | # return a list of role/name intermediary CA certificates for a given | ||
200 | # <cert_name> by walking the chain setup with signing_import_set_ca. | ||
201 | # | ||
202 | # The returned list will not include the the root CA, and can | ||
203 | # potentially be empty. | ||
204 | # | ||
205 | # To be used with SoftHSM. | ||
206 | signing_get_intermediate_certs() { | ||
207 | local cert_name="${1}" | ||
208 | local intermediary="" | ||
209 | while signing_has_ca "${cert_name}"; do | ||
210 | cert_name="$(signing_get_ca ${cert_name})" | ||
211 | if signing_has_ca "${cert_name}"; then | ||
212 | intermediary="${intermediary} ${cert_name}" | ||
213 | fi | ||
214 | done | ||
215 | echo "${intermediary}" | ||
216 | } | ||
217 | |||
218 | # signing_get_root_cert <cert_name> | ||
219 | # | ||
220 | # return the role/name of the CA root certificate for a given | ||
221 | # <cert_name>, by walking the chain setup with signing_import_set_ca | ||
222 | # all the way to the last in line that doesn't have a CA set - which | ||
223 | # would be the root. | ||
224 | # | ||
225 | # To be used with SoftHSM. | ||
226 | signing_get_root_cert() { | ||
227 | local cert_name="${1}" | ||
228 | while signing_has_ca "${cert_name}"; do | ||
229 | cert_name="$(signing_get_ca ${cert_name})" | ||
230 | done | ||
231 | echo "${cert_name}" | ||
232 | } | ||
233 | |||
234 | # signing_import_cert_from_pem <cert_name> <pem> | ||
235 | # | ||
236 | # Import a certificate from PEM file to a cert_name. | ||
237 | # Where the <cert_name> can either be a previously setup | ||
238 | # signing_import_define_role linking the certificate to a signing key, | ||
239 | # or a new identifier when dealing with a standalone certificate. | ||
240 | # | ||
241 | # To be used with SoftHSM. | ||
136 | signing_import_cert_from_pem() { | 242 | signing_import_cert_from_pem() { |
137 | local role="${1}" | 243 | local cert_name="${1}" |
138 | local pem="${2}" | 244 | local pem="${2}" |
139 | 245 | ||
246 | # check wether the cert_name/role needs to be defined first, | ||
247 | # or do so otherwise | ||
248 | local uri=$(siging_get_uri $cert_name) | ||
249 | if [ -z "$uri" ]; then | ||
250 | signing_import_define_role "$cert_name" | ||
251 | fi | ||
252 | |||
140 | openssl x509 \ | 253 | openssl x509 \ |
141 | -in "${pem}" -inform pem -outform der | | 254 | -in "${pem}" -inform pem -outform der | |
142 | signing_pkcs11_tool --type cert --write-object /proc/self/fd/0 --label "${role}" | 255 | signing_pkcs11_tool --type cert --write-object /proc/self/fd/0 --label "${cert_name}" |
143 | } | 256 | } |
144 | 257 | ||
145 | # signing_import_pubkey_from_der <role> <der> | 258 | # signing_import_pubkey_from_der <role> <der> |
@@ -311,6 +424,30 @@ signing_get_module() { | |||
311 | fi | 424 | fi |
312 | } | 425 | } |
313 | 426 | ||
427 | # signing_extract_cert_der <role> <der> | ||
428 | # | ||
429 | # Export a certificate attached to a role into a DER file. | ||
430 | # To be used with SoftHSM. | ||
431 | signing_extract_cert_der() { | ||
432 | local role="${1}" | ||
433 | local output="${2}" | ||
434 | |||
435 | extract-cert "$(signing_get_uri $role)" "${output}" | ||
436 | } | ||
437 | |||
438 | # signing_extract_cert_pem <role> <pem> | ||
439 | # | ||
440 | # Export a certificate attached to a role into a PEM file. | ||
441 | # To be used with SoftHSM. | ||
442 | signing_extract_cert_pem() { | ||
443 | local role="${1}" | ||
444 | local output="${2}" | ||
445 | |||
446 | extract-cert "$(signing_get_uri $role)" "${output}.tmp-der" | ||
447 | openssl x509 -inform der -in "${output}.tmp-der" -out "${output}" | ||
448 | rm "${output}.tmp-der" | ||
449 | } | ||
450 | |||
314 | python () { | 451 | python () { |
315 | signing_class_prepare(d) | 452 | signing_class_prepare(d) |
316 | } | 453 | } |
diff --git a/meta-oe/classes/sysext-image.bbclass b/meta-oe/classes/sysext-image.bbclass new file mode 100644 index 0000000000..3771236c6e --- /dev/null +++ b/meta-oe/classes/sysext-image.bbclass | |||
@@ -0,0 +1,87 @@ | |||
1 | # | ||
2 | # Copyright OpenEmbedded Contributors | ||
3 | # | ||
4 | # SPDX-License-Identifier: MIT | ||
5 | # | ||
6 | |||
7 | # System extension images may – dynamically at runtime — extend the | ||
8 | # /usr/ and /opt/ directory hierarchies with additional files. This is | ||
9 | # particularly useful on immutable system images where a /usr/ and/or | ||
10 | # /opt/ hierarchy residing on a read-only file system shall be | ||
11 | # extended temporarily at runtime without making any persistent | ||
12 | # modifications. | ||
13 | |||
14 | ## Example usage: | ||
15 | # extension-image-example.bb | ||
16 | #SUMMARY = "An example image to showcase a system extension image." | ||
17 | #LICENSE = "MIT" | ||
18 | #inherit discoverable-disk-image sysext-image | ||
19 | #IMAGE_FEATURES = "" | ||
20 | #IMAGE_LINGUAS = "" | ||
21 | #IMAGE_INSTALL = "gdb" | ||
22 | # | ||
23 | ## After building, the resulting 'extension-image-example-*sysext.rootfs.ddi' | ||
24 | # can be deployed to an embedded system (running from a RO rootfs) and | ||
25 | # 'merged' into the OS by following steps: | ||
26 | ## 1. place a symlink into the systemd-sysext image search path: | ||
27 | # $> mkdir /run/extensions | ||
28 | # $> ln -s /tmp/extension-example.sysext.ddi /run/extensions/example.raw | ||
29 | ## 2. list all available extensions: | ||
30 | # $> systemd-sysext list | ||
31 | ## 3. and enable the found extensions: | ||
32 | # $> SYSTEMD_LOG_LEVEL=debug systemd-sysext merge | ||
33 | |||
34 | # Note: PACKAGECONFIG:pn-systemd needs to include 'sysext' | ||
35 | |||
36 | # systemd-sysext [1] has a simple mechanism for version compatibility: | ||
37 | # the extension to be loaded has to contain a file named | ||
38 | # /usr/lib/extension-release.d/extension-release.NAME | ||
39 | # with "NAME" part *exactly* matching the filename of the extensions | ||
40 | # raw-device filename/ | ||
41 | # | ||
42 | # From the extension-release file the "ID" and "VERSION_ID" fields are | ||
43 | # matched against same fields present in `os-release` and the extension | ||
44 | # is "merged" only if values in both fields from both files are an | ||
45 | # exact match. | ||
46 | # | ||
47 | # Link: https://www.freedesktop.org/software/systemd/man/latest/systemd-sysext.html | ||
48 | |||
49 | inherit image | ||
50 | |||
51 | # Include '.sysext' in the deployed image filename and symlink | ||
52 | IMAGE_NAME = "${IMAGE_BASENAME}${IMAGE_MACHINE_SUFFIX}${IMAGE_VERSION_SUFFIX}.sysext" | ||
53 | IMAGE_LINK_NAME = "${IMAGE_BASENAME}${IMAGE_MACHINE_SUFFIX}.sysext" | ||
54 | EXTENSION_NAME = "${IMAGE_LINK_NAME}.${IMAGE_FSTYPES}" | ||
55 | |||
56 | # Base extension identification fields | ||
57 | EXTENSION_ID_FIELD ?= "${DISTRO}" | ||
58 | EXTENSION_VERSION_FIELD ?= "${DISTRO_VERSION}" | ||
59 | |||
60 | sysext_image_add_version_identifier_file() { | ||
61 | # Use matching based on Distro name and version | ||
62 | echo 'ID=${EXTENSION_ID_FIELD}' > ${WORKDIR}/extension-release.base | ||
63 | # os-release.bb does "sanitise_value(ver)", which needs to be done here too | ||
64 | echo 'VERSION_ID=${EXTENSION_VERSION_FIELD}' \ | ||
65 | | sed 's,+,-,g;s, ,_,g' \ | ||
66 | >> ${WORKDIR}/extension-release.base | ||
67 | |||
68 | # Instruct `systemd-sysext` to perform re-load once extension image is verified | ||
69 | echo 'EXTENSION_RELOAD_MANAGER=1' >> ${WORKDIR}/extension-release.base | ||
70 | |||
71 | install -d ${IMAGE_ROOTFS}${nonarch_libdir}/extension-release.d | ||
72 | install -m 0644 ${WORKDIR}/extension-release.base \ | ||
73 | ${IMAGE_ROOTFS}${nonarch_libdir}/extension-release.d/extension-release.${EXTENSION_NAME} | ||
74 | |||
75 | # systemd-sysext expects an extension-release file of the exact same name as the image; | ||
76 | # by setting a xattr we allow renaming of the extension image file. | ||
77 | # (Kernel: this requires xattr support in the used filesystem) | ||
78 | setfattr -n user.extension-release.strict -v false \ | ||
79 | ${IMAGE_ROOTFS}${nonarch_libdir}/extension-release.d/extension-release.${EXTENSION_NAME} | ||
80 | } | ||
81 | |||
82 | ROOTFS_POSTPROCESS_COMMAND += "sysext_image_add_version_identifier_file" | ||
83 | |||
84 | # remove 'os-release' from the packages to be installed into the image. | ||
85 | # systemd-sysext otherwise raises the error: | ||
86 | # Extension contains '/usr/lib/os-release', which is not allowed, refusing. | ||
87 | PACKAGE_EXCLUDE += "os-release" | ||