summaryrefslogtreecommitdiffstats
path: root/bitbake/lib/hashserv/__init__.py
diff options
context:
space:
mode:
authorJoshua Watt <JPEWhacker@gmail.com>2020-06-25 09:21:07 -0500
committerRichard Purdie <richard.purdie@linuxfoundation.org>2020-06-28 08:36:56 +0100
commit07a02b31fd80748ab11d7e30fbf2a3d2e59b1426 (patch)
tree6e7cc522bdf2374cdcd726faa7c5199bf883a5c1 /bitbake/lib/hashserv/__init__.py
parentb3f212d6bc43936d54f330372625456dad4c570a (diff)
downloadpoky-07a02b31fd80748ab11d7e30fbf2a3d2e59b1426.tar.gz
bitbake: hashserv: Chunkify large messages
The hash equivalence client and server can occasionally send messages that are too large for the server to fit in the receive buffer (64 KB). To prevent this, support is added to the protocol to "chunkify" the stream and break it up into manageable pieces that the server can each side can back together. Ideally, this would be negotiated by the client and server, but it's currently hard coded to 32 KB to prevent the round-trip delay. (Bitbake rev: e27a28c1e40e886ee68ba4b99b537ffc9c3577d4) Signed-off-by: Joshua Watt <JPEWhacker@gmail.com> Signed-off-by: Richard Purdie <richard.purdie@linuxfoundation.org>
Diffstat (limited to 'bitbake/lib/hashserv/__init__.py')
-rw-r--r--bitbake/lib/hashserv/__init__.py22
1 files changed, 22 insertions, 0 deletions
diff --git a/bitbake/lib/hashserv/__init__.py b/bitbake/lib/hashserv/__init__.py
index c3318620f5..f95e8f43f1 100644
--- a/bitbake/lib/hashserv/__init__.py
+++ b/bitbake/lib/hashserv/__init__.py
@@ -6,12 +6,20 @@
6from contextlib import closing 6from contextlib import closing
7import re 7import re
8import sqlite3 8import sqlite3
9import itertools
10import json
9 11
10UNIX_PREFIX = "unix://" 12UNIX_PREFIX = "unix://"
11 13
12ADDR_TYPE_UNIX = 0 14ADDR_TYPE_UNIX = 0
13ADDR_TYPE_TCP = 1 15ADDR_TYPE_TCP = 1
14 16
17# The Python async server defaults to a 64K receive buffer, so we hardcode our
18# maximum chunk size. It would be better if the client and server reported to
19# each other what the maximum chunk sizes were, but that will slow down the
20# connection setup with a round trip delay so I'd rather not do that unless it
21# is necessary
22DEFAULT_MAX_CHUNK = 32 * 1024
15 23
16def setup_database(database, sync=True): 24def setup_database(database, sync=True):
17 db = sqlite3.connect(database) 25 db = sqlite3.connect(database)
@@ -66,6 +74,20 @@ def parse_address(addr):
66 return (ADDR_TYPE_TCP, (host, int(port))) 74 return (ADDR_TYPE_TCP, (host, int(port)))
67 75
68 76
77def chunkify(msg, max_chunk):
78 if len(msg) < max_chunk - 1:
79 yield ''.join((msg, "\n"))
80 else:
81 yield ''.join((json.dumps({
82 'chunk-stream': None
83 }), "\n"))
84
85 args = [iter(msg)] * (max_chunk - 1)
86 for m in map(''.join, itertools.zip_longest(*args, fillvalue='')):
87 yield ''.join(itertools.chain(m, "\n"))
88 yield "\n"
89
90
69def create_server(addr, dbname, *, sync=True): 91def create_server(addr, dbname, *, sync=True):
70 from . import server 92 from . import server
71 db = setup_database(dbname, sync=sync) 93 db = setup_database(dbname, sync=sync)