diff options
| author | Richard Purdie <richard.purdie@linuxfoundation.org> | 2024-10-08 13:36:23 +0100 |
|---|---|---|
| committer | Richard Purdie <richard.purdie@linuxfoundation.org> | 2024-10-09 13:04:30 +0100 |
| commit | 2aad7b988ef1feabf3c876060998d818ed01fb97 (patch) | |
| tree | 19a5db9e85761191fca2b7782a7c795940fba9e0 /bitbake | |
| parent | 76d24b00ff7db3f9b56bbca113e7fd1248c6a484 (diff) | |
| download | poky-2aad7b988ef1feabf3c876060998d818ed01fb97.tar.gz | |
bitbake: persist_data: Remove it
It was never a great solution to persisting data and there are much better
ones now. The last user has been replaced so drop the code and tests.
(Bitbake rev: 681a7516e9f7027e0be6f489c54a7a5e19fa9f06)
Signed-off-by: Richard Purdie <richard.purdie@linuxfoundation.org>
Diffstat (limited to 'bitbake')
| -rwxr-xr-x | bitbake/bin/bitbake-selftest | 1 | ||||
| -rw-r--r-- | bitbake/lib/bb/persist_data.py | 271 | ||||
| -rw-r--r-- | bitbake/lib/bb/tests/persist_data.py | 129 |
3 files changed, 0 insertions, 401 deletions
diff --git a/bitbake/bin/bitbake-selftest b/bitbake/bin/bitbake-selftest index ce901232fe..1b7a783fdc 100755 --- a/bitbake/bin/bitbake-selftest +++ b/bitbake/bin/bitbake-selftest | |||
| @@ -28,7 +28,6 @@ tests = ["bb.tests.codeparser", | |||
| 28 | "bb.tests.event", | 28 | "bb.tests.event", |
| 29 | "bb.tests.fetch", | 29 | "bb.tests.fetch", |
| 30 | "bb.tests.parse", | 30 | "bb.tests.parse", |
| 31 | "bb.tests.persist_data", | ||
| 32 | "bb.tests.runqueue", | 31 | "bb.tests.runqueue", |
| 33 | "bb.tests.siggen", | 32 | "bb.tests.siggen", |
| 34 | "bb.tests.utils", | 33 | "bb.tests.utils", |
diff --git a/bitbake/lib/bb/persist_data.py b/bitbake/lib/bb/persist_data.py deleted file mode 100644 index bcca791edf..0000000000 --- a/bitbake/lib/bb/persist_data.py +++ /dev/null | |||
| @@ -1,271 +0,0 @@ | |||
| 1 | """BitBake Persistent Data Store | ||
| 2 | |||
| 3 | Used to store data in a central location such that other threads/tasks can | ||
| 4 | access them at some future date. Acts as a convenience wrapper around sqlite, | ||
| 5 | currently, providing a key/value store accessed by 'domain'. | ||
| 6 | """ | ||
| 7 | |||
| 8 | # Copyright (C) 2007 Richard Purdie | ||
| 9 | # Copyright (C) 2010 Chris Larson <chris_larson@mentor.com> | ||
| 10 | # | ||
| 11 | # SPDX-License-Identifier: GPL-2.0-only | ||
| 12 | # | ||
| 13 | |||
| 14 | import collections | ||
| 15 | import collections.abc | ||
| 16 | import contextlib | ||
| 17 | import functools | ||
| 18 | import logging | ||
| 19 | import os.path | ||
| 20 | import sqlite3 | ||
| 21 | import sys | ||
| 22 | from collections.abc import Mapping | ||
| 23 | |||
| 24 | sqlversion = sqlite3.sqlite_version_info | ||
| 25 | if sqlversion[0] < 3 or (sqlversion[0] == 3 and sqlversion[1] < 3): | ||
| 26 | raise Exception("sqlite3 version 3.3.0 or later is required.") | ||
| 27 | |||
| 28 | |||
| 29 | logger = logging.getLogger("BitBake.PersistData") | ||
| 30 | |||
| 31 | @functools.total_ordering | ||
| 32 | class SQLTable(collections.abc.MutableMapping): | ||
| 33 | class _Decorators(object): | ||
| 34 | @staticmethod | ||
| 35 | def retry(*, reconnect=True): | ||
| 36 | """ | ||
| 37 | Decorator that restarts a function if a database locked sqlite | ||
| 38 | exception occurs. If reconnect is True, the database connection | ||
| 39 | will be closed and reopened each time a failure occurs | ||
| 40 | """ | ||
| 41 | def retry_wrapper(f): | ||
| 42 | def wrap_func(self, *args, **kwargs): | ||
| 43 | # Reconnect if necessary | ||
| 44 | if self.connection is None and reconnect: | ||
| 45 | self.reconnect() | ||
| 46 | |||
| 47 | count = 0 | ||
| 48 | while True: | ||
| 49 | try: | ||
| 50 | return f(self, *args, **kwargs) | ||
| 51 | except sqlite3.OperationalError as exc: | ||
| 52 | if count < 500 and ('is locked' in str(exc) or 'locking protocol' in str(exc)): | ||
| 53 | count = count + 1 | ||
| 54 | if reconnect: | ||
| 55 | self.reconnect() | ||
| 56 | continue | ||
| 57 | raise | ||
| 58 | return wrap_func | ||
| 59 | return retry_wrapper | ||
| 60 | |||
| 61 | @staticmethod | ||
| 62 | def transaction(f): | ||
| 63 | """ | ||
| 64 | Decorator that starts a database transaction and creates a database | ||
| 65 | cursor for performing queries. If no exception is thrown, the | ||
| 66 | database results are committed. If an exception occurs, the database | ||
| 67 | is rolled back. In all cases, the cursor is closed after the | ||
| 68 | function ends. | ||
| 69 | |||
| 70 | Note that the cursor is passed as an extra argument to the function | ||
| 71 | after `self` and before any of the normal arguments | ||
| 72 | """ | ||
| 73 | def wrap_func(self, *args, **kwargs): | ||
| 74 | # Context manager will COMMIT the database on success, | ||
| 75 | # or ROLLBACK on an exception | ||
| 76 | with self.connection: | ||
| 77 | # Automatically close the cursor when done | ||
| 78 | with contextlib.closing(self.connection.cursor()) as cursor: | ||
| 79 | return f(self, cursor, *args, **kwargs) | ||
| 80 | return wrap_func | ||
| 81 | |||
| 82 | """Object representing a table/domain in the database""" | ||
| 83 | def __init__(self, cachefile, table): | ||
| 84 | self.cachefile = cachefile | ||
| 85 | self.table = table | ||
| 86 | |||
| 87 | self.connection = None | ||
| 88 | self._execute_single("CREATE TABLE IF NOT EXISTS %s(key TEXT PRIMARY KEY NOT NULL, value TEXT);" % table) | ||
| 89 | |||
| 90 | @_Decorators.retry(reconnect=False) | ||
| 91 | @_Decorators.transaction | ||
| 92 | def _setup_database(self, cursor): | ||
| 93 | cursor.execute("pragma synchronous = off;") | ||
| 94 | # Enable WAL and keep the autocheckpoint length small (the default is | ||
| 95 | # usually 1000). Persistent caches are usually read-mostly, so keeping | ||
| 96 | # this short will keep readers running quickly | ||
| 97 | cursor.execute("pragma journal_mode = WAL;") | ||
| 98 | cursor.execute("pragma wal_autocheckpoint = 100;") | ||
| 99 | |||
| 100 | def reconnect(self): | ||
| 101 | if self.connection is not None: | ||
| 102 | self.connection.close() | ||
| 103 | self.connection = sqlite3.connect(self.cachefile, timeout=5) | ||
| 104 | self.connection.text_factory = str | ||
| 105 | self._setup_database() | ||
| 106 | |||
| 107 | @_Decorators.retry() | ||
| 108 | @_Decorators.transaction | ||
| 109 | def _execute_single(self, cursor, *query): | ||
| 110 | """ | ||
| 111 | Executes a single query and discards the results. This correctly closes | ||
| 112 | the database cursor when finished | ||
| 113 | """ | ||
| 114 | cursor.execute(*query) | ||
| 115 | |||
| 116 | @_Decorators.retry() | ||
| 117 | def _row_iter(self, f, *query): | ||
| 118 | """ | ||
| 119 | Helper function that returns a row iterator. Each time __next__ is | ||
| 120 | called on the iterator, the provided function is evaluated to determine | ||
| 121 | the return value | ||
| 122 | """ | ||
| 123 | class CursorIter(object): | ||
| 124 | def __init__(self, cursor): | ||
| 125 | self.cursor = cursor | ||
| 126 | |||
| 127 | def __iter__(self): | ||
| 128 | return self | ||
| 129 | |||
| 130 | def __next__(self): | ||
| 131 | row = self.cursor.fetchone() | ||
| 132 | if row is None: | ||
| 133 | self.cursor.close() | ||
| 134 | raise StopIteration | ||
| 135 | return f(row) | ||
| 136 | |||
| 137 | def __enter__(self): | ||
| 138 | return self | ||
| 139 | |||
| 140 | def __exit__(self, typ, value, traceback): | ||
| 141 | self.cursor.close() | ||
| 142 | return False | ||
| 143 | |||
| 144 | cursor = self.connection.cursor() | ||
| 145 | try: | ||
| 146 | cursor.execute(*query) | ||
| 147 | return CursorIter(cursor) | ||
| 148 | except: | ||
| 149 | cursor.close() | ||
| 150 | |||
| 151 | def __enter__(self): | ||
| 152 | self.connection.__enter__() | ||
| 153 | return self | ||
| 154 | |||
| 155 | def __exit__(self, *excinfo): | ||
| 156 | self.connection.__exit__(*excinfo) | ||
| 157 | |||
| 158 | @_Decorators.retry() | ||
| 159 | @_Decorators.transaction | ||
| 160 | def __getitem__(self, cursor, key): | ||
| 161 | cursor.execute("SELECT * from %s where key=?;" % self.table, [key]) | ||
| 162 | row = cursor.fetchone() | ||
| 163 | if row is not None: | ||
| 164 | return row[1] | ||
| 165 | raise KeyError(key) | ||
| 166 | |||
| 167 | @_Decorators.retry() | ||
| 168 | @_Decorators.transaction | ||
| 169 | def __delitem__(self, cursor, key): | ||
| 170 | if key not in self: | ||
| 171 | raise KeyError(key) | ||
| 172 | cursor.execute("DELETE from %s where key=?;" % self.table, [key]) | ||
| 173 | |||
| 174 | @_Decorators.retry() | ||
| 175 | @_Decorators.transaction | ||
| 176 | def __setitem__(self, cursor, key, value): | ||
| 177 | if not isinstance(key, str): | ||
| 178 | raise TypeError('Only string keys are supported') | ||
| 179 | elif not isinstance(value, str): | ||
| 180 | raise TypeError('Only string values are supported') | ||
| 181 | |||
| 182 | # Ensure the entire transaction (including SELECT) executes under write lock | ||
| 183 | cursor.execute("BEGIN EXCLUSIVE") | ||
| 184 | |||
| 185 | cursor.execute("SELECT * from %s where key=?;" % self.table, [key]) | ||
| 186 | row = cursor.fetchone() | ||
| 187 | if row is not None: | ||
| 188 | cursor.execute("UPDATE %s SET value=? WHERE key=?;" % self.table, [value, key]) | ||
| 189 | else: | ||
| 190 | cursor.execute("INSERT into %s(key, value) values (?, ?);" % self.table, [key, value]) | ||
| 191 | |||
| 192 | @_Decorators.retry() | ||
| 193 | @_Decorators.transaction | ||
| 194 | def __contains__(self, cursor, key): | ||
| 195 | cursor.execute('SELECT * from %s where key=?;' % self.table, [key]) | ||
| 196 | return cursor.fetchone() is not None | ||
| 197 | |||
| 198 | @_Decorators.retry() | ||
| 199 | @_Decorators.transaction | ||
| 200 | def __len__(self, cursor): | ||
| 201 | cursor.execute("SELECT COUNT(key) FROM %s;" % self.table) | ||
| 202 | row = cursor.fetchone() | ||
| 203 | if row is not None: | ||
| 204 | return row[0] | ||
| 205 | |||
| 206 | def __iter__(self): | ||
| 207 | return self._row_iter(lambda row: row[0], "SELECT key from %s;" % self.table) | ||
| 208 | |||
| 209 | def __lt__(self, other): | ||
| 210 | if not isinstance(other, Mapping): | ||
| 211 | raise NotImplementedError() | ||
| 212 | |||
| 213 | return len(self) < len(other) | ||
| 214 | |||
| 215 | def get_by_pattern(self, pattern): | ||
| 216 | return self._row_iter(lambda row: row[1], "SELECT * FROM %s WHERE key LIKE ?;" % | ||
| 217 | self.table, [pattern]) | ||
| 218 | |||
| 219 | def values(self): | ||
| 220 | return list(self.itervalues()) | ||
| 221 | |||
| 222 | def itervalues(self): | ||
| 223 | return self._row_iter(lambda row: row[0], "SELECT value FROM %s;" % | ||
| 224 | self.table) | ||
| 225 | |||
| 226 | def items(self): | ||
| 227 | return list(self.iteritems()) | ||
| 228 | |||
| 229 | def iteritems(self): | ||
| 230 | return self._row_iter(lambda row: (row[0], row[1]), "SELECT * FROM %s;" % | ||
| 231 | self.table) | ||
| 232 | |||
| 233 | @_Decorators.retry() | ||
| 234 | @_Decorators.transaction | ||
| 235 | def clear(self, cursor): | ||
| 236 | cursor.execute("DELETE FROM %s;" % self.table) | ||
| 237 | |||
| 238 | def has_key(self, key): | ||
| 239 | return key in self | ||
| 240 | |||
| 241 | def persist(domain, d): | ||
| 242 | """Convenience factory for SQLTable objects based upon metadata""" | ||
| 243 | import bb.utils | ||
| 244 | cachedir = (d.getVar("PERSISTENT_DIR") or | ||
| 245 | d.getVar("CACHE")) | ||
| 246 | if not cachedir: | ||
| 247 | logger.critical("Please set the 'PERSISTENT_DIR' or 'CACHE' variable") | ||
| 248 | sys.exit(1) | ||
| 249 | |||
| 250 | bb.utils.mkdirhier(cachedir) | ||
| 251 | cachefile = os.path.join(cachedir, "bb_persist_data.sqlite3") | ||
| 252 | |||
| 253 | try: | ||
| 254 | return SQLTable(cachefile, domain) | ||
| 255 | except sqlite3.OperationalError: | ||
| 256 | # Sqlite fails to open database when its path is too long. | ||
| 257 | # After testing, 504 is the biggest path length that can be opened by | ||
| 258 | # sqlite. | ||
| 259 | # Note: This code is called before sanity.bbclass and its path length | ||
| 260 | # check | ||
| 261 | max_len = 504 | ||
| 262 | if len(cachefile) > max_len: | ||
| 263 | logger.critical("The path of the cache file is too long " | ||
| 264 | "({0} chars > {1}) to be opened by sqlite! " | ||
| 265 | "Your cache file is \"{2}\"".format( | ||
| 266 | len(cachefile), | ||
| 267 | max_len, | ||
| 268 | cachefile)) | ||
| 269 | sys.exit(1) | ||
| 270 | else: | ||
| 271 | raise | ||
diff --git a/bitbake/lib/bb/tests/persist_data.py b/bitbake/lib/bb/tests/persist_data.py deleted file mode 100644 index f641b5acbc..0000000000 --- a/bitbake/lib/bb/tests/persist_data.py +++ /dev/null | |||
| @@ -1,129 +0,0 @@ | |||
| 1 | # | ||
| 2 | # BitBake Test for lib/bb/persist_data/ | ||
| 3 | # | ||
| 4 | # Copyright (C) 2018 Garmin Ltd. | ||
| 5 | # | ||
| 6 | # SPDX-License-Identifier: GPL-2.0-only | ||
| 7 | # | ||
| 8 | |||
| 9 | import unittest | ||
| 10 | import bb.data | ||
| 11 | import bb.persist_data | ||
| 12 | import tempfile | ||
| 13 | import threading | ||
| 14 | |||
| 15 | class PersistDataTest(unittest.TestCase): | ||
| 16 | def _create_data(self): | ||
| 17 | return bb.persist_data.persist('TEST_PERSIST_DATA', self.d) | ||
| 18 | |||
| 19 | def setUp(self): | ||
| 20 | self.d = bb.data.init() | ||
| 21 | self.tempdir = tempfile.TemporaryDirectory() | ||
| 22 | self.d['PERSISTENT_DIR'] = self.tempdir.name | ||
| 23 | self.data = self._create_data() | ||
| 24 | self.items = { | ||
| 25 | 'A1': '1', | ||
| 26 | 'B1': '2', | ||
| 27 | 'C2': '3' | ||
| 28 | } | ||
| 29 | self.stress_count = 10000 | ||
| 30 | self.thread_count = 5 | ||
| 31 | |||
| 32 | for k,v in self.items.items(): | ||
| 33 | self.data[k] = v | ||
| 34 | |||
| 35 | def tearDown(self): | ||
| 36 | self.tempdir.cleanup() | ||
| 37 | |||
| 38 | def _iter_helper(self, seen, iterator): | ||
| 39 | with iter(iterator): | ||
| 40 | for v in iterator: | ||
| 41 | self.assertTrue(v in seen) | ||
| 42 | seen.remove(v) | ||
| 43 | self.assertEqual(len(seen), 0, '%s not seen' % seen) | ||
| 44 | |||
| 45 | def test_get(self): | ||
| 46 | for k, v in self.items.items(): | ||
| 47 | self.assertEqual(self.data[k], v) | ||
| 48 | |||
| 49 | self.assertIsNone(self.data.get('D')) | ||
| 50 | with self.assertRaises(KeyError): | ||
| 51 | self.data['D'] | ||
| 52 | |||
| 53 | def test_set(self): | ||
| 54 | for k, v in self.items.items(): | ||
| 55 | self.data[k] += '-foo' | ||
| 56 | |||
| 57 | for k, v in self.items.items(): | ||
| 58 | self.assertEqual(self.data[k], v + '-foo') | ||
| 59 | |||
| 60 | def test_delete(self): | ||
| 61 | self.data['D'] = '4' | ||
| 62 | self.assertEqual(self.data['D'], '4') | ||
| 63 | del self.data['D'] | ||
| 64 | self.assertIsNone(self.data.get('D')) | ||
| 65 | with self.assertRaises(KeyError): | ||
| 66 | self.data['D'] | ||
| 67 | |||
| 68 | def test_contains(self): | ||
| 69 | for k in self.items: | ||
| 70 | self.assertTrue(k in self.data) | ||
| 71 | self.assertTrue(self.data.has_key(k)) | ||
| 72 | self.assertFalse('NotFound' in self.data) | ||
| 73 | self.assertFalse(self.data.has_key('NotFound')) | ||
| 74 | |||
| 75 | def test_len(self): | ||
| 76 | self.assertEqual(len(self.data), len(self.items)) | ||
| 77 | |||
| 78 | def test_iter(self): | ||
| 79 | self._iter_helper(set(self.items.keys()), self.data) | ||
| 80 | |||
| 81 | def test_itervalues(self): | ||
| 82 | self._iter_helper(set(self.items.values()), self.data.itervalues()) | ||
| 83 | |||
| 84 | def test_iteritems(self): | ||
| 85 | self._iter_helper(set(self.items.items()), self.data.iteritems()) | ||
| 86 | |||
| 87 | def test_get_by_pattern(self): | ||
| 88 | self._iter_helper({'1', '2'}, self.data.get_by_pattern('_1')) | ||
| 89 | |||
| 90 | def _stress_read(self, data): | ||
| 91 | for i in range(self.stress_count): | ||
| 92 | for k in self.items: | ||
| 93 | data[k] | ||
| 94 | |||
| 95 | def _stress_write(self, data): | ||
| 96 | for i in range(self.stress_count): | ||
| 97 | for k, v in self.items.items(): | ||
| 98 | data[k] = v + str(i) | ||
| 99 | |||
| 100 | def _validate_stress(self): | ||
| 101 | for k, v in self.items.items(): | ||
| 102 | self.assertEqual(self.data[k], v + str(self.stress_count - 1)) | ||
| 103 | |||
| 104 | def test_stress(self): | ||
| 105 | self._stress_read(self.data) | ||
| 106 | self._stress_write(self.data) | ||
| 107 | self._validate_stress() | ||
| 108 | |||
| 109 | def test_stress_threads(self): | ||
| 110 | def read_thread(): | ||
| 111 | data = self._create_data() | ||
| 112 | self._stress_read(data) | ||
| 113 | |||
| 114 | def write_thread(): | ||
| 115 | data = self._create_data() | ||
| 116 | self._stress_write(data) | ||
| 117 | |||
| 118 | threads = [] | ||
| 119 | for i in range(self.thread_count): | ||
| 120 | threads.append(threading.Thread(target=read_thread)) | ||
| 121 | threads.append(threading.Thread(target=write_thread)) | ||
| 122 | |||
| 123 | for t in threads: | ||
| 124 | t.start() | ||
| 125 | self._stress_read(self.data) | ||
| 126 | for t in threads: | ||
| 127 | t.join() | ||
| 128 | self._validate_stress() | ||
| 129 | |||
