summaryrefslogtreecommitdiffstats
path: root/meta/lib/oe/utils.py
diff options
context:
space:
mode:
authorRichard Purdie <richard.purdie@linuxfoundation.org>2025-11-07 13:31:53 +0000
committerRichard Purdie <richard.purdie@linuxfoundation.org>2025-11-07 13:31:53 +0000
commit8c22ff0d8b70d9b12f0487ef696a7e915b9e3173 (patch)
treeefdc32587159d0050a69009bdf2330a531727d95 /meta/lib/oe/utils.py
parentd412d2747595c1cc4a5e3ca975e3adc31b2f7891 (diff)
downloadpoky-8c22ff0d8b70d9b12f0487ef696a7e915b9e3173.tar.gz
The poky repository master branch is no longer being updated.
You can either: a) switch to individual clones of bitbake, openembedded-core, meta-yocto and yocto-docs b) use the new bitbake-setup You can find information about either approach in our documentation: https://docs.yoctoproject.org/ Note that "poky" the distro setting is still available in meta-yocto as before and we continue to use and maintain that. Long live Poky! Some further information on the background of this change can be found in: https://lists.openembedded.org/g/openembedded-architecture/message/2179 Signed-off-by: Richard Purdie <richard.purdie@linuxfoundation.org>
Diffstat (limited to 'meta/lib/oe/utils.py')
-rw-r--r--meta/lib/oe/utils.py513
1 files changed, 0 insertions, 513 deletions
diff --git a/meta/lib/oe/utils.py b/meta/lib/oe/utils.py
deleted file mode 100644
index 0128ee411d..0000000000
--- a/meta/lib/oe/utils.py
+++ /dev/null
@@ -1,513 +0,0 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: GPL-2.0-only
5#
6
7import subprocess
8import traceback
9import errno
10
11import bb.parse
12from bb import multiprocessing
13
14def read_file(filename):
15 try:
16 f = open( filename, "r" )
17 except IOError as reason:
18 return "" # WARNING: can't raise an error now because of the new RDEPENDS handling. This is a bit ugly. :M:
19 else:
20 data = f.read().strip()
21 f.close()
22 return data
23 return None
24
25def ifelse(condition, iftrue = True, iffalse = False):
26 if condition:
27 return iftrue
28 else:
29 return iffalse
30
31def conditional(variable, checkvalue, truevalue, falsevalue, d):
32 if d.getVar(variable) == checkvalue:
33 return truevalue
34 else:
35 return falsevalue
36
37def vartrue(var, iftrue, iffalse, d):
38 import oe.types
39 if oe.types.boolean(d.getVar(var)):
40 return iftrue
41 else:
42 return iffalse
43
44def less_or_equal(variable, checkvalue, truevalue, falsevalue, d):
45 if float(d.getVar(variable)) <= float(checkvalue):
46 return truevalue
47 else:
48 return falsevalue
49
50def version_less_or_equal(variable, checkvalue, truevalue, falsevalue, d):
51 result = bb.utils.vercmp_string(d.getVar(variable), checkvalue)
52 if result <= 0:
53 return truevalue
54 else:
55 return falsevalue
56
57def both_contain(variable1, variable2, checkvalue, d):
58 val1 = d.getVar(variable1)
59 val2 = d.getVar(variable2)
60 val1 = set(val1.split())
61 val2 = set(val2.split())
62 if isinstance(checkvalue, str):
63 checkvalue = set(checkvalue.split())
64 else:
65 checkvalue = set(checkvalue)
66 if checkvalue.issubset(val1) and checkvalue.issubset(val2):
67 return " ".join(checkvalue)
68 else:
69 return ""
70
71def set_intersect(variable1, variable2, d):
72 """
73 Expand both variables, interpret them as lists of strings, and return the
74 intersection as a flattened string.
75
76 For example:
77 s1 = "a b c"
78 s2 = "b c d"
79 s3 = set_intersect(s1, s2)
80 => s3 = "b c"
81 """
82 val1 = set(d.getVar(variable1).split())
83 val2 = set(d.getVar(variable2).split())
84 return " ".join(val1 & val2)
85
86def prune_suffix(var, suffixes, d):
87 # See if var ends with any of the suffixes listed and
88 # remove it if found
89 for suffix in suffixes:
90 if suffix and var.endswith(suffix):
91 var = var[:-len(suffix)]
92
93 prefix = d.getVar("MLPREFIX")
94 if prefix and var.startswith(prefix):
95 var = var[len(prefix):]
96
97 return var
98
99def str_filter(f, str, d):
100 from re import match
101 return " ".join([x for x in str.split() if match(f, x, 0)])
102
103def str_filter_out(f, str, d):
104 from re import match
105 return " ".join([x for x in str.split() if not match(f, x, 0)])
106
107def build_depends_string(depends, task):
108 """Append a taskname to a string of dependencies as used by the [depends] flag"""
109 return " ".join(dep + ":" + task for dep in depends.split())
110
111def inherits(d, *classes):
112 """Return True if the metadata inherits any of the specified classes"""
113 return any(bb.data.inherits_class(cls, d) for cls in classes)
114
115def features_backfill(var,d):
116 # This construct allows the addition of new features to variable specified
117 # as var
118 # Example for var = "DISTRO_FEATURES"
119 # This construct allows the addition of new features to DISTRO_FEATURES
120 # that if not present would disable existing functionality, without
121 # disturbing distributions that have already set DISTRO_FEATURES.
122 # Distributions wanting to elide a value in DISTRO_FEATURES_BACKFILL should
123 # add the feature to DISTRO_FEATURES_BACKFILL_CONSIDERED
124 features = (d.getVar(var) or "").split()
125 backfill = (d.getVar(var+"_BACKFILL") or "").split()
126 considered = (d.getVar(var+"_BACKFILL_CONSIDERED") or "").split()
127
128 addfeatures = []
129 for feature in backfill:
130 if feature not in features and feature not in considered:
131 addfeatures.append(feature)
132
133 if addfeatures:
134 d.appendVar(var, " " + " ".join(addfeatures))
135
136def all_distro_features(d, features, truevalue="1", falsevalue=""):
137 """
138 Returns truevalue if *all* given features are set in DISTRO_FEATURES,
139 else falsevalue. The features can be given as single string or anything
140 that can be turned into a set.
141
142 This is a shorter, more flexible version of
143 bb.utils.contains("DISTRO_FEATURES", features, truevalue, falsevalue, d).
144
145 Without explicit true/false values it can be used directly where
146 Python expects a boolean:
147 if oe.utils.all_distro_features(d, "foo bar"):
148 bb.fatal("foo and bar are mutually exclusive DISTRO_FEATURES")
149
150 With just a truevalue, it can be used to include files that are meant to be
151 used only when requested via DISTRO_FEATURES:
152 require ${@ oe.utils.all_distro_features(d, "foo bar", "foo-and-bar.inc")
153 """
154 return bb.utils.contains("DISTRO_FEATURES", features, truevalue, falsevalue, d)
155
156def any_distro_features(d, features, truevalue="1", falsevalue=""):
157 """
158 Returns truevalue if at least *one* of the given features is set in DISTRO_FEATURES,
159 else falsevalue. The features can be given as single string or anything
160 that can be turned into a set.
161
162 This is a shorter, more flexible version of
163 bb.utils.contains_any("DISTRO_FEATURES", features, truevalue, falsevalue, d).
164
165 Without explicit true/false values it can be used directly where
166 Python expects a boolean:
167 if not oe.utils.any_distro_features(d, "foo bar"):
168 bb.fatal("foo, bar or both must be set in DISTRO_FEATURES")
169
170 With just a truevalue, it can be used to include files that are meant to be
171 used only when requested via DISTRO_FEATURES:
172 require ${@ oe.utils.any_distro_features(d, "foo bar", "foo-or-bar.inc")
173
174 """
175 return bb.utils.contains_any("DISTRO_FEATURES", features, truevalue, falsevalue, d)
176
177def parallel_make_value(pm):
178 """
179 Return the integer value for the number of parallel threads to use when
180 building, scraped out of given string. If no parallelization option is
181 found, returns empty string
182
183 e.g. if string is "-j 10", this will return 10 as an integer.
184 """
185 # look for '-j' and throw other options (e.g. '-l') away
186 while pm:
187 opt = pm.pop(0)
188 if opt == '-j':
189 v = pm.pop(0)
190 elif opt.startswith('-j'):
191 v = opt[2:].strip()
192 else:
193 continue
194
195 return int(v)
196
197 return ''
198
199def parallel_make(d, makeinst=False):
200 """
201 Return the integer value for the number of parallel threads to use when
202 building, scraped out of PARALLEL_MAKE. If no parallelization option is
203 found, returns empty string
204
205 e.g. if PARALLEL_MAKE = "-j 10", this will return 10 as an integer.
206 """
207 if makeinst:
208 pm = (d.getVar('PARALLEL_MAKEINST') or '').split()
209 else:
210 pm = (d.getVar('PARALLEL_MAKE') or '').split()
211 return parallel_make_value(pm)
212
213def parallel_make_argument(d, fmt, limit=None, makeinst=False):
214 """
215 Helper utility to construct a parallel make argument from the number of
216 parallel threads specified in PARALLEL_MAKE.
217
218 Returns the input format string `fmt` where a single '%d' will be expanded
219 with the number of parallel threads to use. If `limit` is specified, the
220 number of parallel threads will be no larger than it. If no parallelization
221 option is found in PARALLEL_MAKE, returns an empty string
222
223 e.g. if PARALLEL_MAKE = "-j 10", parallel_make_argument(d, "-n %d") will return
224 "-n 10"
225 """
226 v = parallel_make(d, makeinst)
227 if v:
228 if limit:
229 v = min(limit, v)
230 return fmt % v
231 return ''
232
233def packages_filter_out_system(d):
234 """
235 Return a list of packages from PACKAGES with the "system" packages such as
236 PN-dbg PN-doc PN-locale-eb-gb removed.
237 """
238 pn = d.getVar('PN')
239 pkgfilter = [pn + suffix for suffix in ('', '-dbg', '-dev', '-doc', '-locale', '-staticdev', '-src')]
240 localepkg = pn + "-locale-"
241 pkgs = []
242
243 for pkg in d.getVar('PACKAGES').split():
244 if pkg not in pkgfilter and localepkg not in pkg:
245 pkgs.append(pkg)
246 return pkgs
247
248def getstatusoutput(cmd):
249 return subprocess.getstatusoutput(cmd)
250
251
252def trim_version(version, num_parts=2):
253 """
254 Return just the first <num_parts> of <version>, split by periods. For
255 example, trim_version("1.2.3", 2) will return "1.2".
256 """
257 if type(version) is not str:
258 raise TypeError("Version should be a string")
259 if num_parts < 1:
260 raise ValueError("Cannot split to parts < 1")
261
262 parts = version.split(".")
263 trimmed = ".".join(parts[:num_parts])
264 return trimmed
265
266def cpu_count(at_least=1, at_most=64):
267 cpus = len(os.sched_getaffinity(0))
268 return max(min(cpus, at_most), at_least)
269
270def execute_pre_post_process(d, cmds):
271 if cmds is None:
272 return
273
274 cmds = cmds.replace(";", " ")
275
276 for cmd in cmds.split():
277 bb.note("Executing %s ..." % cmd)
278 bb.build.exec_func(cmd, d)
279
280@bb.parse.vardepsexclude("BB_NUMBER_THREADS")
281def get_bb_number_threads(d):
282 return int(d.getVar("BB_NUMBER_THREADS") or os.cpu_count() or 1)
283
284def multiprocess_launch(target, items, d, extraargs=None):
285 max_process = get_bb_number_threads(d)
286 return multiprocess_launch_mp(target, items, max_process, extraargs)
287
288# For each item in items, call the function 'target' with item as the first
289# argument, extraargs as the other arguments and handle any exceptions in the
290# parent thread
291def multiprocess_launch_mp(target, items, max_process, extraargs=None):
292
293 class ProcessLaunch(multiprocessing.Process):
294 def __init__(self, *args, **kwargs):
295 multiprocessing.Process.__init__(self, *args, **kwargs)
296 self._pconn, self._cconn = multiprocessing.Pipe()
297 self._exception = None
298 self._result = None
299
300 def run(self):
301 try:
302 ret = self._target(*self._args, **self._kwargs)
303 self._cconn.send((None, ret))
304 except Exception as e:
305 tb = traceback.format_exc()
306 self._cconn.send((e, tb))
307
308 def update(self):
309 if self._pconn.poll():
310 (e, tb) = self._pconn.recv()
311 if e is not None:
312 self._exception = (e, tb)
313 else:
314 self._result = tb
315
316 @property
317 def exception(self):
318 self.update()
319 return self._exception
320
321 @property
322 def result(self):
323 self.update()
324 return self._result
325
326 launched = []
327 errors = []
328 results = []
329 items = list(items)
330 while (items and not errors) or launched:
331 if not errors and items and len(launched) < max_process:
332 args = items.pop()
333 if not type(args) is tuple:
334 args = (args,)
335 if extraargs is not None:
336 args = args + extraargs
337 p = ProcessLaunch(target=target, args=args)
338 p.start()
339 launched.append(p)
340 for q in launched:
341 # Have to manually call update() to avoid deadlocks. The pipe can be full and
342 # transfer stalled until we try and read the results object but the subprocess won't exit
343 # as it still has data to write (https://bugs.python.org/issue8426)
344 q.update()
345 # The finished processes are joined when calling is_alive()
346 if not q.is_alive():
347 if q.exception:
348 errors.append(q.exception)
349 if q.result:
350 results.append(q.result)
351 launched.remove(q)
352 # Paranoia doesn't hurt
353 for p in launched:
354 p.join()
355 if errors:
356 msg = ""
357 for (e, tb) in errors:
358 if isinstance(e, subprocess.CalledProcessError) and e.output:
359 msg = msg + str(e) + "\n"
360 msg = msg + "Subprocess output:"
361 msg = msg + e.output.decode("utf-8", errors="ignore")
362 else:
363 msg = msg + str(e) + ": " + str(tb) + "\n"
364 bb.fatal("Fatal errors occurred in subprocesses:\n%s" % msg)
365 return results
366
367def squashspaces(string):
368 import re
369 return re.sub(r"\s+", " ", string).strip()
370
371def rprovides_map(pkgdata_dir, pkg_dict):
372 # Map file -> pkg provider
373 rprov_map = {}
374
375 for pkg in pkg_dict:
376 path_to_pkgfile = os.path.join(pkgdata_dir, 'runtime-reverse', pkg)
377 if not os.path.isfile(path_to_pkgfile):
378 continue
379 with open(path_to_pkgfile) as f:
380 for line in f:
381 if line.startswith('RPROVIDES') or line.startswith('FILERPROVIDES'):
382 # List all components provided by pkg.
383 # Exclude version strings, i.e. those starting with (
384 provides = [x for x in line.split()[1:] if not x.startswith('(')]
385 for prov in provides:
386 if prov in rprov_map:
387 rprov_map[prov].append(pkg)
388 else:
389 rprov_map[prov] = [pkg]
390
391 return rprov_map
392
393def format_pkg_list(pkg_dict, ret_format=None, pkgdata_dir=None):
394 output = []
395
396 if ret_format == "arch":
397 for pkg in sorted(pkg_dict):
398 output.append("%s %s" % (pkg, pkg_dict[pkg]["arch"]))
399 elif ret_format == "file":
400 for pkg in sorted(pkg_dict):
401 output.append("%s %s %s" % (pkg, pkg_dict[pkg]["filename"], pkg_dict[pkg]["arch"]))
402 elif ret_format == "ver":
403 for pkg in sorted(pkg_dict):
404 output.append("%s %s %s" % (pkg, pkg_dict[pkg]["arch"], pkg_dict[pkg]["ver"]))
405 elif ret_format == "deps":
406 rprov_map = rprovides_map(pkgdata_dir, pkg_dict)
407 for pkg in sorted(pkg_dict):
408 for dep in pkg_dict[pkg]["deps"]:
409 if dep in rprov_map:
410 # There could be multiple providers within the image
411 for pkg_provider in rprov_map[dep]:
412 output.append("%s|%s * %s [RPROVIDES]" % (pkg, pkg_provider, dep))
413 else:
414 output.append("%s|%s" % (pkg, dep))
415 else:
416 for pkg in sorted(pkg_dict):
417 output.append(pkg)
418
419 output_str = '\n'.join(output)
420
421 if output_str:
422 # make sure last line is newline terminated
423 output_str += '\n'
424
425 return output_str
426
427
428# Helper function to get the host gcc version
429def get_host_gcc_version(d, taskcontextonly=False):
430 import re, subprocess
431
432 if taskcontextonly and d.getVar('BB_WORKERCONTEXT') != '1':
433 return
434
435 try:
436 env = os.environ.copy()
437 # datastore PATH does not contain session PATH as set by environment-setup-...
438 # this breaks the install-buildtools use-case
439 # env["PATH"] = d.getVar("PATH")
440 output = subprocess.check_output("gcc --version", \
441 shell=True, env=env, stderr=subprocess.STDOUT).decode("utf-8")
442 except subprocess.CalledProcessError as e:
443 bb.fatal("Error running gcc --version: %s" % (e.output.decode("utf-8")))
444
445 match = re.match(r".* (\d+\.\d+)\.\d+.*", output.split('\n')[0])
446 if not match:
447 bb.fatal("Can't get compiler version from gcc --version output")
448
449 version = match.group(1)
450 return version
451
452@bb.parse.vardepsexclude("DEFAULTTUNE_MULTILIB_ORIGINAL", "OVERRIDES")
453def get_multilib_datastore(variant, d):
454 localdata = bb.data.createCopy(d)
455 if variant:
456 overrides = localdata.getVar("OVERRIDES", False) + ":virtclass-multilib-" + variant
457 localdata.setVar("OVERRIDES", overrides)
458 localdata.setVar("MLPREFIX", variant + "-")
459 else:
460 origdefault = localdata.getVar("DEFAULTTUNE_MULTILIB_ORIGINAL")
461 if origdefault:
462 localdata.setVar("DEFAULTTUNE", origdefault)
463 overrides = localdata.getVar("OVERRIDES", False).split(":")
464 overrides = ":".join([x for x in overrides if not x.startswith("virtclass-multilib-")])
465 localdata.setVar("OVERRIDES", overrides)
466 localdata.setVar("MLPREFIX", "")
467 return localdata
468
469def sh_quote(string):
470 import shlex
471 return shlex.quote(string)
472
473def directory_size(root, blocksize=4096):
474 """
475 Calculate the size of the directory, taking into account hard links,
476 rounding up every size to multiples of the blocksize.
477 """
478 def roundup(size):
479 """
480 Round the size up to the nearest multiple of the block size.
481 """
482 import math
483 return math.ceil(size / blocksize) * blocksize
484
485 def getsize(filename):
486 """
487 Get the size of the filename, not following symlinks, taking into
488 account hard links.
489 """
490 stat = os.lstat(filename)
491 if stat.st_ino not in inodes:
492 inodes.add(stat.st_ino)
493 return stat.st_size
494 else:
495 return 0
496
497 inodes = set()
498 total = 0
499 for root, dirs, files in os.walk(root):
500 total += sum(roundup(getsize(os.path.join(root, name))) for name in files)
501 total += roundup(getsize(root))
502 return total
503
504# Update the mtime of a file, skip if permission/read-only issues
505def touch(filename):
506 try:
507 os.utime(filename, None)
508 except PermissionError:
509 pass
510 except OSError as e:
511 # Handle read-only file systems gracefully
512 if e.errno != errno.EROFS:
513 raise e