diff options
author | Joshua Watt <JPEWhacker@gmail.com> | 2020-06-25 09:21:07 -0500 |
---|---|---|
committer | Richard Purdie <richard.purdie@linuxfoundation.org> | 2020-07-02 16:11:40 +0100 |
commit | 6ebf01bfd43b6d95a70699b1e58a42fd7d1002a6 (patch) | |
tree | 3f18afa2f1918dde70ced1013e5d859cc4c573e7 /bitbake/lib/hashserv/__init__.py | |
parent | b6e0f5889eb55d88276807407f75eaad9bf0a96a (diff) | |
download | poky-6ebf01bfd43b6d95a70699b1e58a42fd7d1002a6.tar.gz |
bitbake: hashserv: Chunkify large messages
The hash equivalence client and server can occasionally send messages
that are too large for the server to fit in the receive buffer (64 KB).
To prevent this, support is added to the protocol to "chunkify" the
stream and break it up into manageable pieces that the server can each
side can back together.
Ideally, this would be negotiated by the client and server, but it's
currently hard coded to 32 KB to prevent the round-trip delay.
(Bitbake rev: 1a7bddb5471a02a744e7a441a3b4a6da693348b0)
Signed-off-by: Joshua Watt <JPEWhacker@gmail.com>
Signed-off-by: Richard Purdie <richard.purdie@linuxfoundation.org>
(cherry picked from commit e27a28c1e40e886ee68ba4b99b537ffc9c3577d4)
Signed-off-by: Steve Sakoman <steve@sakoman.com>
Signed-off-by: Richard Purdie <richard.purdie@linuxfoundation.org>
Diffstat (limited to 'bitbake/lib/hashserv/__init__.py')
-rw-r--r-- | bitbake/lib/hashserv/__init__.py | 22 |
1 files changed, 22 insertions, 0 deletions
diff --git a/bitbake/lib/hashserv/__init__.py b/bitbake/lib/hashserv/__init__.py index c3318620f5..f95e8f43f1 100644 --- a/bitbake/lib/hashserv/__init__.py +++ b/bitbake/lib/hashserv/__init__.py | |||
@@ -6,12 +6,20 @@ | |||
6 | from contextlib import closing | 6 | from contextlib import closing |
7 | import re | 7 | import re |
8 | import sqlite3 | 8 | import sqlite3 |
9 | import itertools | ||
10 | import json | ||
9 | 11 | ||
10 | UNIX_PREFIX = "unix://" | 12 | UNIX_PREFIX = "unix://" |
11 | 13 | ||
12 | ADDR_TYPE_UNIX = 0 | 14 | ADDR_TYPE_UNIX = 0 |
13 | ADDR_TYPE_TCP = 1 | 15 | ADDR_TYPE_TCP = 1 |
14 | 16 | ||
17 | # The Python async server defaults to a 64K receive buffer, so we hardcode our | ||
18 | # maximum chunk size. It would be better if the client and server reported to | ||
19 | # each other what the maximum chunk sizes were, but that will slow down the | ||
20 | # connection setup with a round trip delay so I'd rather not do that unless it | ||
21 | # is necessary | ||
22 | DEFAULT_MAX_CHUNK = 32 * 1024 | ||
15 | 23 | ||
16 | def setup_database(database, sync=True): | 24 | def setup_database(database, sync=True): |
17 | db = sqlite3.connect(database) | 25 | db = sqlite3.connect(database) |
@@ -66,6 +74,20 @@ def parse_address(addr): | |||
66 | return (ADDR_TYPE_TCP, (host, int(port))) | 74 | return (ADDR_TYPE_TCP, (host, int(port))) |
67 | 75 | ||
68 | 76 | ||
77 | def chunkify(msg, max_chunk): | ||
78 | if len(msg) < max_chunk - 1: | ||
79 | yield ''.join((msg, "\n")) | ||
80 | else: | ||
81 | yield ''.join((json.dumps({ | ||
82 | 'chunk-stream': None | ||
83 | }), "\n")) | ||
84 | |||
85 | args = [iter(msg)] * (max_chunk - 1) | ||
86 | for m in map(''.join, itertools.zip_longest(*args, fillvalue='')): | ||
87 | yield ''.join(itertools.chain(m, "\n")) | ||
88 | yield "\n" | ||
89 | |||
90 | |||
69 | def create_server(addr, dbname, *, sync=True): | 91 | def create_server(addr, dbname, *, sync=True): |
70 | from . import server | 92 | from . import server |
71 | db = setup_database(dbname, sync=sync) | 93 | db = setup_database(dbname, sync=sync) |