summaryrefslogtreecommitdiffstats
path: root/meta/classes/libc-package.bbclass
diff options
context:
space:
mode:
authorChristopher Larson <kergoth@gmail.com>2012-04-26 23:03:55 -0500
committerRichard Purdie <richard.purdie@linuxfoundation.org>2012-05-03 15:48:03 +0100
commit9c62f635ab82f56821fa10a5d3bd7490f9f97987 (patch)
tree0a74423d8906d105de97fba41c1cdd1c9c2d935b /meta/classes/libc-package.bbclass
parentb9918a31cea3e3f831330c28fa3faee978048c75 (diff)
downloadpoky-9c62f635ab82f56821fa10a5d3bd7490f9f97987.tar.gz
libc-package: rework ''precompiled' locale handling
There were a couple problems with the handling of precompiled locales. - it gathered the list of locales from the directories - this breaks due to the naming mismatch, e.g. en_US.UTF-8 vs en_US.utf8. - it retained its hardcoded assumption that the non-suffixed locale (en_US, as opposed to en_US.*) is UTF-8, while the others are otherwise. Hardcoding this is both inflexible and just plain wrong for some toolchains. It's most common in desktop distros for 'en_US' to be non-utf8, and ''en_US.UTF-8' is utf8, and this is the case in some external toolchains as well. The code now uses the SUPPORTED file to hold the knowledge it needs. This file not only holds the list of locales to generate, but also maps the locale names to the charsets they correspond to. The code now uses this to assemble its charset map, falling back to the '.' suffix as charset when the locale is not in the map. For precompiled, it now uses the locale->charset knowledge it has, thereby allowing non-utf8 non-suffixed locale names, whereas for non-precompiled, it reverts to the previous assumption, renaming the utf8 locale and forcibly suffixing the others. So, a person maintaining an external toolchain recipe is responsible for ensuring that the SUPPORTED file they provide matches up with the compiled locales in the toolchain, if they want to utilize precompiled locales. I believe in the long term the compiled case should do the same thing precompiled does, and use SUPPORTED or a similar mechanism to encode the knowledge, and if people want all the non-suffixed names to be utf8, they can change that file to do so. This would avoid the hardcoded assumption in the code, as well as consolidating the behavior between the compiled and precompiled cases. (From OE-Core rev: 3f36058923ccda25a3dd85046542e65b6034c09e) Signed-off-by: Christopher Larson <kergoth@gmail.com> Signed-off-by: Richard Purdie <richard.purdie@linuxfoundation.org>
Diffstat (limited to 'meta/classes/libc-package.bbclass')
-rw-r--r--meta/classes/libc-package.bbclass96
1 files changed, 45 insertions, 51 deletions
diff --git a/meta/classes/libc-package.bbclass b/meta/classes/libc-package.bbclass
index bb4ba682dc..51edba2e3f 100644
--- a/meta/classes/libc-package.bbclass
+++ b/meta/classes/libc-package.bbclass
@@ -207,40 +207,30 @@ python package_do_split_gconvs () {
207 207
208 dot_re = re.compile("(.*)\.(.*)") 208 dot_re = re.compile("(.*)\.(.*)")
209 209
210#GLIBC_GENERATE_LOCALES var specifies which locales to be supported, empty or "all" means all locales 210 # Read in supported locales and associated encodings
211 if use_bin != "precompiled": 211 supported = {}
212 supported = d.getVar('GLIBC_GENERATE_LOCALES', True) 212 with open(base_path_join(d.getVar('WORKDIR', True), "SUPPORTED")) as f:
213 if not supported or supported == "all": 213 for line in f.readlines():
214 f = open(base_path_join(d.getVar('WORKDIR', True), "SUPPORTED"), "r") 214 try:
215 supported = f.readlines() 215 locale, charset = line.rstrip().split()
216 f.close() 216 except ValueError:
217 else: 217 continue
218 supported = supported.split() 218 supported[locale] = charset
219 supported = map(lambda s:s.replace(".", " ") + "\n", supported) 219
220 # GLIBC_GENERATE_LOCALES var specifies which locales to be generated. empty or "all" means all locales
221 to_generate = d.getVar('GLIBC_GENERATE_LOCALES', True)
222 if not to_generate or to_generate == 'all':
223 to_generate = supported.keys()
220 else: 224 else:
221 supported = [] 225 to_generate = to_generate.split()
222 full_bin_path = d.getVar('PKGD', True) + binary_locales_dir 226 for locale in to_generate:
223 for dir in os.listdir(full_bin_path): 227 if locale not in supported:
224 dbase = dir.split(".") 228 if '.' in locale:
225 d2 = " " 229 charset = locale.split('.')[1]
226 if len(dbase) > 1: 230 else:
227 d2 = "." + dbase[1].upper() + " " 231 charset = 'UTF-8'
228 supported.append(dbase[0] + d2) 232 bb.warn("Unsupported locale '%s', assuming encoding '%s'" % (locale, charset))
229 233 supported[locale] = charset
230 # Collate the locales by base and encoding
231 utf8_only = int(d.getVar('LOCALE_UTF8_ONLY', True) or 0)
232 encodings = {}
233 for l in supported:
234 l = l[:-1]
235 (locale, charset) = l.split(" ")
236 if utf8_only and charset != 'UTF-8':
237 continue
238 m = dot_re.match(locale)
239 if m:
240 locale = m.group(1)
241 if not encodings.has_key(locale):
242 encodings[locale] = []
243 encodings[locale].append(charset)
244 234
245 def output_locale_source(name, pkgname, locale, encoding): 235 def output_locale_source(name, pkgname, locale, encoding):
246 d.setVar('RDEPENDS_%s' % pkgname, 'localedef %s-localedata-%s %s-charmap-%s' % \ 236 d.setVar('RDEPENDS_%s' % pkgname, 'localedef %s-localedata-%s %s-charmap-%s' % \
@@ -271,7 +261,7 @@ python package_do_split_gconvs () {
271 261
272 use_cross_localedef = d.getVar("LOCALE_GENERATION_WITH_CROSS-LOCALEDEF", True) or "0" 262 use_cross_localedef = d.getVar("LOCALE_GENERATION_WITH_CROSS-LOCALEDEF", True) or "0"
273 if use_cross_localedef == "1": 263 if use_cross_localedef == "1":
274 target_arch = d.getVar('TARGET_ARCH', True) 264 target_arch = d.getVar('TARGET_ARCH', True)
275 locale_arch_options = { \ 265 locale_arch_options = { \
276 "arm": " --uint32-align=4 --little-endian ", \ 266 "arm": " --uint32-align=4 --little-endian ", \
277 "powerpc": " --uint32-align=4 --big-endian ", \ 267 "powerpc": " --uint32-align=4 --big-endian ", \
@@ -334,25 +324,29 @@ python package_do_split_gconvs () {
334 bb.note("preparing tree for binary locale generation") 324 bb.note("preparing tree for binary locale generation")
335 bb.build.exec_func("do_prep_locale_tree", d) 325 bb.build.exec_func("do_prep_locale_tree", d)
336 326
337 # Reshuffle names so that UTF-8 is preferred over other encodings 327 utf8_only = int(d.getVar('LOCALE_UTF8_ONLY', True) or 0)
338 non_utf8 = [] 328 encodings = {}
339 for l in encodings.keys(): 329 for locale in to_generate:
340 if len(encodings[l]) == 1: 330 charset = supported[locale]
341 output_locale(l, l, encodings[l][0]) 331 if utf8_only and charset != 'UTF-8':
342 if encodings[l][0] != "UTF-8": 332 continue
343 non_utf8.append(l) 333
334 m = dot_re.match(locale)
335 if m:
336 base = m.group(1)
344 else: 337 else:
345 if "UTF-8" in encodings[l]: 338 base = locale
346 output_locale(l, l, "UTF-8")
347 encodings[l].remove("UTF-8")
348 else:
349 non_utf8.append(l)
350 for e in encodings[l]:
351 output_locale('%s.%s' % (l, e), l, e)
352 339
353 if non_utf8 != [] and use_bin != "precompiled": 340 # Precompiled locales are kept as is, obeying SUPPORTED, while
354 bb.note("the following locales are supported only in legacy encodings:") 341 # others are adjusted, ensuring that the non-suffixed locales
355 bb.note(" " + " ".join(non_utf8)) 342 # are utf-8, while the suffixed are not.
343 if use_bin == "precompiled":
344 output_locale(locale, base, charset)
345 else:
346 if charset == 'UTF-8':
347 output_locale(base, base, charset)
348 else:
349 output_locale('%s.%s' % (base, charset), base, charset)
356 350
357 if use_bin == "compile": 351 if use_bin == "compile":
358 makefile = base_path_join(d.getVar("WORKDIR", True), "locale-tree", "Makefile") 352 makefile = base_path_join(d.getVar("WORKDIR", True), "locale-tree", "Makefile")