summaryrefslogtreecommitdiffstats
path: root/bitbake/lib/bb/codeparser.py
diff options
context:
space:
mode:
authorFrazer Clews <frazer.clews@codethink.co.uk>2020-01-16 16:55:18 +0000
committerRichard Purdie <richard.purdie@linuxfoundation.org>2020-01-19 13:31:05 +0000
commit0ac5174c7d39a3e49893df0d517d47bec1935555 (patch)
tree479496afb1da7814071e39e888e8926cd03bec57 /bitbake/lib/bb/codeparser.py
parent444bcb6cb6be8d5205fc88790360d864e633a555 (diff)
downloadpoky-0ac5174c7d39a3e49893df0d517d47bec1935555.tar.gz
bitbake: lib: remove unused imports
removed unused imports which made the code harder to read, and slightly but less efficient (Bitbake rev: 4367692a932ac135c5aa4f9f2a4e4f0150f76697) Signed-off-by: Frazer Clews <frazer.clews@codethink.co.uk> Signed-off-by: Richard Purdie <richard.purdie@linuxfoundation.org>
Diffstat (limited to 'bitbake/lib/bb/codeparser.py')
-rw-r--r--bitbake/lib/bb/codeparser.py26
1 files changed, 2 insertions, 24 deletions
diff --git a/bitbake/lib/bb/codeparser.py b/bitbake/lib/bb/codeparser.py
index fd2c4734f0..25a7ac69d3 100644
--- a/bitbake/lib/bb/codeparser.py
+++ b/bitbake/lib/bb/codeparser.py
@@ -25,13 +25,11 @@ import ast
25import sys 25import sys
26import codegen 26import codegen
27import logging 27import logging
28import pickle
29import bb.pysh as pysh 28import bb.pysh as pysh
30import os.path
31import bb.utils, bb.data 29import bb.utils, bb.data
32import hashlib 30import hashlib
33from itertools import chain 31from itertools import chain
34from bb.pysh import pyshyacc, pyshlex, sherrors 32from bb.pysh import pyshyacc, pyshlex
35from bb.cache import MultiProcessCache 33from bb.cache import MultiProcessCache
36 34
37logger = logging.getLogger('BitBake.CodeParser') 35logger = logging.getLogger('BitBake.CodeParser')
@@ -58,30 +56,10 @@ def check_indent(codestr):
58 56
59 return codestr 57 return codestr
60 58
61
62# Basically pickle, in python 2.7.3 at least, does badly with data duplication
63# upon pickling and unpickling. Combine this with duplicate objects and things
64# are a mess.
65#
66# When the sets are originally created, python calls intern() on the set keys
67# which significantly improves memory usage. Sadly the pickle/unpickle process
68# doesn't call intern() on the keys and results in the same strings being duplicated
69# in memory. This also means pickle will save the same string multiple times in
70# the cache file.
71#
72# By having shell and python cacheline objects with setstate/getstate, we force
73# the object creation through our own routine where we can call intern (via internSet).
74#
75# We also use hashable frozensets and ensure we use references to these so that
76# duplicates can be removed, both in memory and in the resulting pickled data.
77#
78# By playing these games, the size of the cache file shrinks dramatically
79# meaning faster load times and the reloaded cache files also consume much less
80# memory. Smaller cache files, faster load times and lower memory usage is good.
81#
82# A custom getstate/setstate using tuples is actually worth 15% cachesize by 59# A custom getstate/setstate using tuples is actually worth 15% cachesize by
83# avoiding duplication of the attribute names! 60# avoiding duplication of the attribute names!
84 61
62
85class SetCache(object): 63class SetCache(object):
86 def __init__(self): 64 def __init__(self):
87 self.setcache = {} 65 self.setcache = {}