summaryrefslogtreecommitdiffstats
path: root/meta/recipes-devtools
diff options
context:
space:
mode:
authorArchana Polampalli <archana.polampalli@windriver.com>2024-08-26 13:57:02 +0000
committerSteve Sakoman <steve@sakoman.com>2024-09-03 05:39:12 -0700
commit0069bab74877d78a2cf16021c65e6cdb2d06dda0 (patch)
tree79611164b8ba7317616d4e79a120edac905b536d /meta/recipes-devtools
parent0f869ed43b3ff92eeb0e8ccb2b639a415b58616d (diff)
downloadpoky-0069bab74877d78a2cf16021c65e6cdb2d06dda0.tar.gz
qemu: fix CVE-2024-7409
A flaw was found in the QEMU NBD Server. This vulnerability allows a denial of service (DoS) attack via improper synchronization during socket closure when a client keeps a socket open as the server is taken offline. (From OE-Core rev: 334f70c408ce5c95f145aa4657f343b023f7e1b4) Signed-off-by: Archana Polampalli <archana.polampalli@windriver.com> Signed-off-by: Steve Sakoman <steve@sakoman.com>
Diffstat (limited to 'meta/recipes-devtools')
-rw-r--r--meta/recipes-devtools/qemu/qemu.inc4
-rw-r--r--meta/recipes-devtools/qemu/qemu/CVE-2024-7409-0001.patch167
-rw-r--r--meta/recipes-devtools/qemu/qemu/CVE-2024-7409-0002.patch175
-rw-r--r--meta/recipes-devtools/qemu/qemu/CVE-2024-7409-0003.patch126
-rw-r--r--meta/recipes-devtools/qemu/qemu/CVE-2024-7409-0004.patch164
5 files changed, 636 insertions, 0 deletions
diff --git a/meta/recipes-devtools/qemu/qemu.inc b/meta/recipes-devtools/qemu/qemu.inc
index 50d92b04bd..a1d8a309a0 100644
--- a/meta/recipes-devtools/qemu/qemu.inc
+++ b/meta/recipes-devtools/qemu/qemu.inc
@@ -45,6 +45,10 @@ SRC_URI = "https://download.qemu.org/${BPN}-${PV}.tar.xz \
45 file://CVE-2024-4467-0003.patch \ 45 file://CVE-2024-4467-0003.patch \
46 file://CVE-2024-4467-0004.patch \ 46 file://CVE-2024-4467-0004.patch \
47 file://CVE-2024-4467-0005.patch \ 47 file://CVE-2024-4467-0005.patch \
48 file://CVE-2024-7409-0001.patch \
49 file://CVE-2024-7409-0002.patch \
50 file://CVE-2024-7409-0003.patch \
51 file://CVE-2024-7409-0004.patch \
48 " 52 "
49UPSTREAM_CHECK_REGEX = "qemu-(?P<pver>\d+(\.\d+)+)\.tar" 53UPSTREAM_CHECK_REGEX = "qemu-(?P<pver>\d+(\.\d+)+)\.tar"
50 54
diff --git a/meta/recipes-devtools/qemu/qemu/CVE-2024-7409-0001.patch b/meta/recipes-devtools/qemu/qemu/CVE-2024-7409-0001.patch
new file mode 100644
index 0000000000..631e93a6d2
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/CVE-2024-7409-0001.patch
@@ -0,0 +1,167 @@
1From fb1c2aaa981e0a2fa6362c9985f1296b74f055ac Mon Sep 17 00:00:00 2001
2From: Eric Blake <eblake@redhat.com>
3Date: Wed, 7 Aug 2024 08:50:01 -0500
4Subject: [PATCH] nbd/server: Plumb in new args to nbd_client_add()
5
6Upcoming patches to fix a CVE need to track an opaque pointer passed
7in by the owner of a client object, as well as request for a time
8limit on how fast negotiation must complete. Prepare for that by
9changing the signature of nbd_client_new() and adding an accessor to
10get at the opaque pointer, although for now the two servers
11(qemu-nbd.c and blockdev-nbd.c) do not change behavior even though
12they pass in a new default timeout value.
13
14Suggested-by: Vladimir Sementsov-Ogievskiy <vsementsov@yandex-team.ru>
15Signed-off-by: Eric Blake <eblake@redhat.com>
16Message-ID: <20240807174943.771624-11-eblake@redhat.com>
17Reviewed-by: Daniel P. Berrangé <berrange@redhat.com>
18[eblake: s/LIMIT/MAX_SECS/ as suggested by Dan]
19Signed-off-by: Eric Blake <eblake@redhat.com>
20
21CVE: CVE-2024-7409
22
23Upstream-Status: Backport [https://github.com/qemu/qemu/commit/fb1c2aaa981e0a2fa6362c9985f1296b74f055ac]
24
25Signed-off-by: Archana Polampalli <archana.polampalli@windriver.com>
26---
27 blockdev-nbd.c | 6 ++++--
28 include/block/nbd.h | 11 ++++++++++-
29 nbd/server.c | 20 +++++++++++++++++---
30 qemu-nbd.c | 4 +++-
31 4 files changed, 34 insertions(+), 7 deletions(-)
32
33diff --git a/blockdev-nbd.c b/blockdev-nbd.c
34index 213012435..267a1de90 100644
35--- a/blockdev-nbd.c
36+++ b/blockdev-nbd.c
37@@ -64,8 +64,10 @@ static void nbd_accept(QIONetListener *listener, QIOChannelSocket *cioc,
38 nbd_update_server_watch(nbd_server);
39
40 qio_channel_set_name(QIO_CHANNEL(cioc), "nbd-server");
41- nbd_client_new(cioc, nbd_server->tlscreds, nbd_server->tlsauthz,
42- nbd_blockdev_client_closed);
43+ /* TODO - expose handshake timeout as QMP option */
44+ nbd_client_new(cioc, NBD_DEFAULT_HANDSHAKE_MAX_SECS,
45+ nbd_server->tlscreds, nbd_server->tlsauthz,
46+ nbd_blockdev_client_closed, NULL);
47 }
48
49 static void nbd_update_server_watch(NBDServerData *s)
50diff --git a/include/block/nbd.h b/include/block/nbd.h
51index 4e7bd6342..1d4d65922 100644
52--- a/include/block/nbd.h
53+++ b/include/block/nbd.h
54@@ -33,6 +33,12 @@ typedef struct NBDMetaContexts NBDMetaContexts;
55
56 extern const BlockExportDriver blk_exp_nbd;
57
58+/*
59+ * NBD_DEFAULT_HANDSHAKE_MAX_SECS: Number of seconds in which client must
60+ * succeed at NBD_OPT_GO before being forcefully dropped as too slow.
61+ */
62+#define NBD_DEFAULT_HANDSHAKE_MAX_SECS 10
63+
64 /* Handshake phase structs - this struct is passed on the wire */
65
66 typedef struct NBDOption {
67@@ -403,9 +409,12 @@ AioContext *nbd_export_aio_context(NBDExport *exp);
68 NBDExport *nbd_export_find(const char *name);
69
70 void nbd_client_new(QIOChannelSocket *sioc,
71+ uint32_t handshake_max_secs,
72 QCryptoTLSCreds *tlscreds,
73 const char *tlsauthz,
74- void (*close_fn)(NBDClient *, bool));
75+ void (*close_fn)(NBDClient *, bool),
76+ void *owner);
77+void *nbd_client_owner(NBDClient *client);
78 void nbd_client_get(NBDClient *client);
79 void nbd_client_put(NBDClient *client);
80
81diff --git a/nbd/server.c b/nbd/server.c
82index 091b57119..f8881936e 100644
83--- a/nbd/server.c
84+++ b/nbd/server.c
85@@ -124,12 +124,14 @@ struct NBDMetaContexts {
86 struct NBDClient {
87 int refcount; /* atomic */
88 void (*close_fn)(NBDClient *client, bool negotiated);
89+ void *owner;
90
91 QemuMutex lock;
92
93 NBDExport *exp;
94 QCryptoTLSCreds *tlscreds;
95 char *tlsauthz;
96+ uint32_t handshake_max_secs;
97 QIOChannelSocket *sioc; /* The underlying data channel */
98 QIOChannel *ioc; /* The current I/O channel which may differ (eg TLS) */
99
100@@ -3160,6 +3162,7 @@ static coroutine_fn void nbd_co_client_start(void *opaque)
101
102 qemu_co_mutex_init(&client->send_lock);
103
104+ /* TODO - utilize client->handshake_max_secs */
105 if (nbd_negotiate(client, &local_err)) {
106 if (local_err) {
107 error_report_err(local_err);
108@@ -3174,14 +3177,17 @@ static coroutine_fn void nbd_co_client_start(void *opaque)
109 }
110
111 /*
112- * Create a new client listener using the given channel @sioc.
113+ * Create a new client listener using the given channel @sioc and @owner.
114 * Begin servicing it in a coroutine. When the connection closes, call
115- * @close_fn with an indication of whether the client completed negotiation.
116+ * @close_fn with an indication of whether the client completed negotiation
117+ * within @handshake_max_secs seconds (0 for unbounded).
118 */
119 void nbd_client_new(QIOChannelSocket *sioc,
120+ uint32_t handshake_max_secs,
121 QCryptoTLSCreds *tlscreds,
122 const char *tlsauthz,
123- void (*close_fn)(NBDClient *, bool))
124+ void (*close_fn)(NBDClient *, bool),
125+ void *owner)
126 {
127 NBDClient *client;
128 Coroutine *co;
129@@ -3194,13 +3200,21 @@ void nbd_client_new(QIOChannelSocket *sioc,
130 object_ref(OBJECT(client->tlscreds));
131 }
132 client->tlsauthz = g_strdup(tlsauthz);
133+ client->handshake_max_secs = handshake_max_secs;
134 client->sioc = sioc;
135 qio_channel_set_delay(QIO_CHANNEL(sioc), false);
136 object_ref(OBJECT(client->sioc));
137 client->ioc = QIO_CHANNEL(sioc);
138 object_ref(OBJECT(client->ioc));
139 client->close_fn = close_fn;
140+ client->owner = owner;
141
142 co = qemu_coroutine_create(nbd_co_client_start, client);
143 qemu_coroutine_enter(co);
144 }
145+
146+void *
147+nbd_client_owner(NBDClient *client)
148+{
149+ return client->owner;
150+}
151diff --git a/qemu-nbd.c b/qemu-nbd.c
152index 186e6468b..5fa399c0b 100644
153--- a/qemu-nbd.c
154+++ b/qemu-nbd.c
155@@ -389,7 +389,9 @@ static void nbd_accept(QIONetListener *listener, QIOChannelSocket *cioc,
156
157 nb_fds++;
158 nbd_update_server_watch();
159- nbd_client_new(cioc, tlscreds, tlsauthz, nbd_client_closed);
160+ /* TODO - expose handshake timeout as command line option */
161+ nbd_client_new(cioc, NBD_DEFAULT_HANDSHAKE_MAX_SECS,
162+ tlscreds, tlsauthz, nbd_client_closed, NULL);
163 }
164
165 static void nbd_update_server_watch(void)
166--
1672.40.0
diff --git a/meta/recipes-devtools/qemu/qemu/CVE-2024-7409-0002.patch b/meta/recipes-devtools/qemu/qemu/CVE-2024-7409-0002.patch
new file mode 100644
index 0000000000..ca8ef0b44d
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/CVE-2024-7409-0002.patch
@@ -0,0 +1,175 @@
1From c8a76dbd90c2f48df89b75bef74917f90a59b623 Mon Sep 17 00:00:00 2001
2From: Eric Blake <eblake@redhat.com>
3Date: Tue, 6 Aug 2024 13:53:00 -0500
4Subject: [PATCH] nbd/server: CVE-2024-7409: Cap default max-connections to 100
5
6Allowing an unlimited number of clients to any web service is a recipe
7for a rudimentary denial of service attack: the client merely needs to
8open lots of sockets without closing them, until qemu no longer has
9any more fds available to allocate.
10
11For qemu-nbd, we default to allowing only 1 connection unless more are
12explicitly asked for (-e or --shared); this was historically picked as
13a nice default (without an explicit -t, a non-persistent qemu-nbd goes
14away after a client disconnects, without needing any additional
15follow-up commands), and we are not going to change that interface now
16(besides, someday we want to point people towards qemu-storage-daemon
17instead of qemu-nbd).
18
19But for qemu proper, and the newer qemu-storage-daemon, the QMP
20nbd-server-start command has historically had a default of unlimited
21number of connections, in part because unlike qemu-nbd it is
22inherently persistent until nbd-server-stop. Allowing multiple client
23sockets is particularly useful for clients that can take advantage of
24MULTI_CONN (creating parallel sockets to increase throughput),
25although known clients that do so (such as libnbd's nbdcopy) typically
26use only 8 or 16 connections (the benefits of scaling diminish once
27more sockets are competing for kernel attention). Picking a number
28large enough for typical use cases, but not unlimited, makes it
29slightly harder for a malicious client to perform a denial of service
30merely by opening lots of connections withot progressing through the
31handshake.
32
33This change does not eliminate CVE-2024-7409 on its own, but reduces
34the chance for fd exhaustion or unlimited memory usage as an attack
35surface. On the other hand, by itself, it makes it more obvious that
36with a finite limit, we have the problem of an unauthenticated client
37holding 100 fds opened as a way to block out a legitimate client from
38being able to connect; thus, later patches will further add timeouts
39to reject clients that are not making progress.
40
41This is an INTENTIONAL change in behavior, and will break any client
42of nbd-server-start that was not passing an explicit max-connections
43parameter, yet expects more than 100 simultaneous connections. We are
44not aware of any such client (as stated above, most clients aware of
45MULTI_CONN get by just fine on 8 or 16 connections, and probably cope
46with later connections failing by relying on the earlier connections;
47libvirt has not yet been passing max-connections, but generally
48creates NBD servers with the intent for a single client for the sake
49of live storage migration; meanwhile, the KubeSAN project anticipates
50a large cluster sharing multiple clients [up to 8 per node, and up to
51100 nodes in a cluster], but it currently uses qemu-nbd with an
52explicit --shared=0 rather than qemu-storage-daemon with
53nbd-server-start).
54
55We considered using a deprecation period (declare that omitting
56max-parameters is deprecated, and make it mandatory in 3 releases -
57then we don't need to pick an arbitrary default); that has zero risk
58of breaking any apps that accidentally depended on more than 100
59connections, and where such breakage might not be noticed under unit
60testing but only under the larger loads of production usage. But it
61does not close the denial-of-service hole until far into the future,
62and requires all apps to change to add the parameter even if 100 was
63good enough. It also has a drawback that any app (like libvirt) that
64is accidentally relying on an unlimited default should seriously
65consider their own CVE now, at which point they are going to change to
66pass explicit max-connections sooner than waiting for 3 qemu releases.
67Finally, if our changed default breaks an app, that app can always
68pass in an explicit max-parameters with a larger value.
69
70It is also intentional that the HMP interface to nbd-server-start is
71not changed to expose max-connections (any client needing to fine-tune
72things should be using QMP).
73
74Suggested-by: Daniel P. Berrangé <berrange@redhat.com>
75Signed-off-by: Eric Blake <eblake@redhat.com>
76Message-ID: <20240807174943.771624-12-eblake@redhat.com>
77Reviewed-by: Daniel P. Berrangé <berrange@redhat.com>
78[ericb: Expand commit message to summarize Dan's argument for why we
79break corner-case back-compat behavior without a deprecation period]
80Signed-off-by: Eric Blake <eblake@redhat.com>
81
82CVE: CVE-2024-7409
83
84Upstream-Status: Backport [https://github.com/qemu/qemu/commit/c8a76dbd90c2f48df89b75bef74917f90a59b623]
85
86Signed-off-by: Archana Polampalli <archana.polampalli@windriver.com>
87---
88 block/monitor/block-hmp-cmds.c | 3 ++-
89 blockdev-nbd.c | 8 ++++++++
90 include/block/nbd.h | 7 +++++++
91 qapi/block-export.json | 4 ++--
92 4 files changed, 19 insertions(+), 3 deletions(-)
93
94diff --git a/block/monitor/block-hmp-cmds.c b/block/monitor/block-hmp-cmds.c
95index c729cbf1e..78a697585 100644
96--- a/block/monitor/block-hmp-cmds.c
97+++ b/block/monitor/block-hmp-cmds.c
98@@ -415,7 +415,8 @@ void hmp_nbd_server_start(Monitor *mon, const QDict *qdict)
99 goto exit;
100 }
101
102- nbd_server_start(addr, NULL, NULL, 0, &local_err);
103+ nbd_server_start(addr, NULL, NULL, NBD_DEFAULT_MAX_CONNECTIONS,
104+ &local_err);
105 qapi_free_SocketAddress(addr);
106 if (local_err != NULL) {
107 goto exit;
108diff --git a/blockdev-nbd.c b/blockdev-nbd.c
109index 267a1de90..24ba5382d 100644
110--- a/blockdev-nbd.c
111+++ b/blockdev-nbd.c
112@@ -170,6 +170,10 @@ void nbd_server_start(SocketAddress *addr, const char *tls_creds,
113
114 void nbd_server_start_options(NbdServerOptions *arg, Error **errp)
115 {
116+ if (!arg->has_max_connections) {
117+ arg->max_connections = NBD_DEFAULT_MAX_CONNECTIONS;
118+ }
119+
120 nbd_server_start(arg->addr, arg->tls_creds, arg->tls_authz,
121 arg->max_connections, errp);
122 }
123@@ -182,6 +186,10 @@ void qmp_nbd_server_start(SocketAddressLegacy *addr,
124 {
125 SocketAddress *addr_flat = socket_address_flatten(addr);
126
127+ if (!has_max_connections) {
128+ max_connections = NBD_DEFAULT_MAX_CONNECTIONS;
129+ }
130+
131 nbd_server_start(addr_flat, tls_creds, tls_authz, max_connections, errp);
132 qapi_free_SocketAddress(addr_flat);
133 }
134diff --git a/include/block/nbd.h b/include/block/nbd.h
135index 1d4d65922..d4f8b21ae 100644
136--- a/include/block/nbd.h
137+++ b/include/block/nbd.h
138@@ -39,6 +39,13 @@ extern const BlockExportDriver blk_exp_nbd;
139 */
140 #define NBD_DEFAULT_HANDSHAKE_MAX_SECS 10
141
142+/*
143+ * NBD_DEFAULT_MAX_CONNECTIONS: Number of client sockets to allow at
144+ * once; must be large enough to allow a MULTI_CONN-aware client like
145+ * nbdcopy to create its typical number of 8-16 sockets.
146+ */
147+#define NBD_DEFAULT_MAX_CONNECTIONS 100
148+
149 /* Handshake phase structs - this struct is passed on the wire */
150
151 typedef struct NBDOption {
152diff --git a/qapi/block-export.json b/qapi/block-export.json
153index 7874a49ba..1d255d77e 100644
154--- a/qapi/block-export.json
155+++ b/qapi/block-export.json
156@@ -28,7 +28,7 @@
157 # @max-connections: The maximum number of connections to allow at the
158 # same time, 0 for unlimited. Setting this to 1 also stops the
159 # server from advertising multiple client support (since 5.2;
160-# default: 0)
161+# default: 100)
162 #
163 # Since: 4.2
164 ##
165@@ -63,7 +63,7 @@
166 # @max-connections: The maximum number of connections to allow at the
167 # same time, 0 for unlimited. Setting this to 1 also stops the
168 # server from advertising multiple client support (since 5.2;
169-# default: 0).
170+# default: 100).
171 #
172 # Returns: error if the server is already running.
173 #
174--
1752.40.0
diff --git a/meta/recipes-devtools/qemu/qemu/CVE-2024-7409-0003.patch b/meta/recipes-devtools/qemu/qemu/CVE-2024-7409-0003.patch
new file mode 100644
index 0000000000..b2b9b15c54
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/CVE-2024-7409-0003.patch
@@ -0,0 +1,126 @@
1From b9b72cb3ce15b693148bd09cef7e50110566d8a0 Mon Sep 17 00:00:00 2001
2From: Eric Blake <eblake@redhat.com>
3Date: Thu, 8 Aug 2024 16:05:08 -0500
4Subject: [PATCH] nbd/server: CVE-2024-7409: Drop non-negotiating clients
5
6A client that opens a socket but does not negotiate is merely hogging
7qemu's resources (an open fd and a small amount of memory); and a
8malicious client that can access the port where NBD is listening can
9attempt a denial of service attack by intentionally opening and
10abandoning lots of unfinished connections. The previous patch put a
11default bound on the number of such ongoing connections, but once that
12limit is hit, no more clients can connect (including legitimate ones).
13The solution is to insist that clients complete handshake within a
14reasonable time limit, defaulting to 10 seconds. A client that has
15not successfully completed NBD_OPT_GO by then (including the case of
16where the client didn't know TLS credentials to even reach the point
17of NBD_OPT_GO) is wasting our time and does not deserve to stay
18connected. Later patches will allow fine-tuning the limit away from
19the default value (including disabling it for doing integration
20testing of the handshake process itself).
21
22Note that this patch in isolation actually makes it more likely to see
23qemu SEGV after nbd-server-stop, as any client socket still connected
24when the server shuts down will now be closed after 10 seconds rather
25than at the client's whims. That will be addressed in the next patch.
26
27For a demo of this patch in action:
28$ qemu-nbd -f raw -r -t -e 10 file &
29$ nbdsh --opt-mode -c '
30H = list()
31for i in range(20):
32 print(i)
33 H.insert(i, nbd.NBD())
34 H[i].set_opt_mode(True)
35 H[i].connect_uri("nbd://localhost")
36'
37$ kill $!
38
39where later connections get to start progressing once earlier ones are
40forcefully dropped for taking too long, rather than hanging.
41
42Suggested-by: Daniel P. Berrangé <berrange@redhat.com>
43Signed-off-by: Eric Blake <eblake@redhat.com>
44Message-ID: <20240807174943.771624-13-eblake@redhat.com>
45Reviewed-by: Daniel P. Berrangé <berrange@redhat.com>
46[eblake: rebase to changes earlier in series, reduce scope of timer]
47Signed-off-by: Eric Blake <eblake@redhat.com>
48
49CVE: CVE-2024-7409
50
51Upstream-Status: Backport [https://gitlab.com/qemu-project/qemu/-/commit/b9b72cb3ce15b693148bd09cef7e50110566d8a0]
52
53Signed-off-by: Archana Polampalli <archana.polampalli@windriver.com>
54---
55 nbd/server.c | 28 +++++++++++++++++++++++++++-
56 nbd/trace-events | 1 +
57 2 files changed, 28 insertions(+), 1 deletion(-)
58
59diff --git a/nbd/server.c b/nbd/server.c
60index f8881936e..6155e329a 100644
61--- a/nbd/server.c
62+++ b/nbd/server.c
63@@ -3155,22 +3155,48 @@ static void nbd_client_receive_next_request(NBDClient *client)
64 }
65 }
66
67+static void nbd_handshake_timer_cb(void *opaque)
68+{
69+ QIOChannel *ioc = opaque;
70+
71+ trace_nbd_handshake_timer_cb();
72+ qio_channel_shutdown(ioc, QIO_CHANNEL_SHUTDOWN_BOTH, NULL);
73+}
74+
75 static coroutine_fn void nbd_co_client_start(void *opaque)
76 {
77 NBDClient *client = opaque;
78 Error *local_err = NULL;
79+ QEMUTimer *handshake_timer = NULL;
80
81 qemu_co_mutex_init(&client->send_lock);
82
83- /* TODO - utilize client->handshake_max_secs */
84+ /*
85+ * Create a timer to bound the time spent in negotiation. If the
86+ * timer expires, it is likely nbd_negotiate will fail because the
87+ * socket was shutdown.
88+ */
89+ if (client->handshake_max_secs > 0) {
90+ handshake_timer = aio_timer_new(qemu_get_aio_context(),
91+ QEMU_CLOCK_REALTIME,
92+ SCALE_NS,
93+ nbd_handshake_timer_cb,
94+ client->sioc);
95+ timer_mod(handshake_timer,
96+ qemu_clock_get_ns(QEMU_CLOCK_REALTIME) +
97+ client->handshake_max_secs * NANOSECONDS_PER_SECOND);
98+ }
99+
100 if (nbd_negotiate(client, &local_err)) {
101 if (local_err) {
102 error_report_err(local_err);
103 }
104+ timer_free(handshake_timer);
105 client_close(client, false);
106 return;
107 }
108
109+ timer_free(handshake_timer);
110 WITH_QEMU_LOCK_GUARD(&client->lock) {
111 nbd_client_receive_next_request(client);
112 }
113diff --git a/nbd/trace-events b/nbd/trace-events
114index 00ae3216a..cbd0a4ab7 100644
115--- a/nbd/trace-events
116+++ b/nbd/trace-events
117@@ -76,6 +76,7 @@ nbd_co_receive_request_payload_received(uint64_t cookie, uint64_t len) "Payload
118 nbd_co_receive_ext_payload_compliance(uint64_t from, uint64_t len) "client sent non-compliant write without payload flag: from=0x%" PRIx64 ", len=0x%" PRIx64
119 nbd_co_receive_align_compliance(const char *op, uint64_t from, uint64_t len, uint32_t align) "client sent non-compliant unaligned %s request: from=0x%" PRIx64 ", len=0x%" PRIx64 ", align=0x%" PRIx32
120 nbd_trip(void) "Reading request"
121+nbd_handshake_timer_cb(void) "client took too long to negotiate"
122
123 # client-connection.c
124 nbd_connect_thread_sleep(uint64_t timeout) "timeout %" PRIu64
125--
1262.40.0
diff --git a/meta/recipes-devtools/qemu/qemu/CVE-2024-7409-0004.patch b/meta/recipes-devtools/qemu/qemu/CVE-2024-7409-0004.patch
new file mode 100644
index 0000000000..9515c631ad
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/CVE-2024-7409-0004.patch
@@ -0,0 +1,164 @@
1From 3e7ef738c8462c45043a1d39f702a0990406a3b3 Mon Sep 17 00:00:00 2001
2From: Eric Blake <eblake@redhat.com>
3Date: Wed, 7 Aug 2024 12:23:13 -0500
4Subject: [PATCH] nbd/server: CVE-2024-7409: Close stray clients at server-stop
5
6A malicious client can attempt to connect to an NBD server, and then
7intentionally delay progress in the handshake, including if it does
8not know the TLS secrets. Although the previous two patches reduce
9this behavior by capping the default max-connections parameter and
10killing slow clients, they did not eliminate the possibility of a
11client waiting to close the socket until after the QMP nbd-server-stop
12command is executed, at which point qemu would SEGV when trying to
13dereference the NULL nbd_server global which is no longer present.
14This amounts to a denial of service attack. Worse, if another NBD
15server is started before the malicious client disconnects, I cannot
16rule out additional adverse effects when the old client interferes
17with the connection count of the new server (although the most likely
18is a crash due to an assertion failure when checking
19nbd_server->connections > 0).
20
21For environments without this patch, the CVE can be mitigated by
22ensuring (such as via a firewall) that only trusted clients can
23connect to an NBD server. Note that using frameworks like libvirt
24that ensure that TLS is used and that nbd-server-stop is not executed
25while any trusted clients are still connected will only help if there
26is also no possibility for an untrusted client to open a connection
27but then stall on the NBD handshake.
28
29Given the previous patches, it would be possible to guarantee that no
30clients remain connected by having nbd-server-stop sleep for longer
31than the default handshake deadline before finally freeing the global
32nbd_server object, but that could make QMP non-responsive for a long
33time. So intead, this patch fixes the problem by tracking all client
34sockets opened while the server is running, and forcefully closing any
35such sockets remaining without a completed handshake at the time of
36nbd-server-stop, then waiting until the coroutines servicing those
37sockets notice the state change. nbd-server-stop now has a second
38AIO_WAIT_WHILE_UNLOCKED (the first is indirectly through the
39blk_exp_close_all_type() that disconnects all clients that completed
40handshakes), but forced socket shutdown is enough to progress the
41coroutines and quickly tear down all clients before the server is
42freed, thus finally fixing the CVE.
43
44This patch relies heavily on the fact that nbd/server.c guarantees
45that it only calls nbd_blockdev_client_closed() from the main loop
46(see the assertion in nbd_client_put() and the hoops used in
47nbd_client_put_nonzero() to achieve that); if we did not have that
48guarantee, we would also need a mutex protecting our accesses of the
49list of connections to survive re-entrancy from independent iothreads.
50
51Although I did not actually try to test old builds, it looks like this
52problem has existed since at least commit 862172f45c (v2.12.0, 2017) -
53even back when that patch started using a QIONetListener to handle
54listening on multiple sockets, nbd_server_free() was already unaware
55that the nbd_blockdev_client_closed callback can be reached later by a
56client thread that has not completed handshakes (and therefore the
57client's socket never got added to the list closed in
58nbd_export_close_all), despite that patch intentionally tearing down
59the QIONetListener to prevent new clients.
60
61Reported-by: Alexander Ivanov <alexander.ivanov@virtuozzo.com>
62Fixes: CVE-2024-7409
63CC: qemu-stable@nongnu.org
64Signed-off-by: Eric Blake <eblake@redhat.com>
65Message-ID: <20240807174943.771624-14-eblake@redhat.com>
66Reviewed-by: Daniel P. Berrangé <berrange@redhat.com>
67
68CVE: CVE-2024-7409
69
70Upstream-Status: Backport [https://gitlab.com/qemu-project/qemu/-/commit/3e7ef738c8462c45043a1d39f702a0990406a3b3]
71
72Signed-off-by: Archana Polampalli <archana.polampalli@windriver.com>
73---
74 blockdev-nbd.c | 35 ++++++++++++++++++++++++++++++++++-
75 1 file changed, 34 insertions(+), 1 deletion(-)
76
77diff --git a/blockdev-nbd.c b/blockdev-nbd.c
78index 24ba5382d..f73409ae4 100644
79--- a/blockdev-nbd.c
80+++ b/blockdev-nbd.c
81@@ -21,12 +21,18 @@
82 #include "io/channel-socket.h"
83 #include "io/net-listener.h"
84
85+typedef struct NBDConn {
86+ QIOChannelSocket *cioc;
87+ QLIST_ENTRY(NBDConn) next;
88+} NBDConn;
89+
90 typedef struct NBDServerData {
91 QIONetListener *listener;
92 QCryptoTLSCreds *tlscreds;
93 char *tlsauthz;
94 uint32_t max_connections;
95 uint32_t connections;
96+ QLIST_HEAD(, NBDConn) conns;
97 } NBDServerData;
98
99 static NBDServerData *nbd_server;
100@@ -51,6 +57,14 @@ int nbd_server_max_connections(void)
101
102 static void nbd_blockdev_client_closed(NBDClient *client, bool ignored)
103 {
104+ NBDConn *conn = nbd_client_owner(client);
105+
106+ assert(qemu_in_main_thread() && nbd_server);
107+
108+ object_unref(OBJECT(conn->cioc));
109+ QLIST_REMOVE(conn, next);
110+ g_free(conn);
111+
112 nbd_client_put(client);
113 assert(nbd_server->connections > 0);
114 nbd_server->connections--;
115@@ -60,14 +74,20 @@ static void nbd_blockdev_client_closed(NBDClient *client, bool ignored)
116 static void nbd_accept(QIONetListener *listener, QIOChannelSocket *cioc,
117 gpointer opaque)
118 {
119+ NBDConn *conn = g_new0(NBDConn, 1);
120+
121+ assert(qemu_in_main_thread() && nbd_server);
122 nbd_server->connections++;
123+ object_ref(OBJECT(cioc));
124+ conn->cioc = cioc;
125+ QLIST_INSERT_HEAD(&nbd_server->conns, conn, next);
126 nbd_update_server_watch(nbd_server);
127
128 qio_channel_set_name(QIO_CHANNEL(cioc), "nbd-server");
129 /* TODO - expose handshake timeout as QMP option */
130 nbd_client_new(cioc, NBD_DEFAULT_HANDSHAKE_MAX_SECS,
131 nbd_server->tlscreds, nbd_server->tlsauthz,
132- nbd_blockdev_client_closed, NULL);
133+ nbd_blockdev_client_closed, conn);
134 }
135
136 static void nbd_update_server_watch(NBDServerData *s)
137@@ -81,12 +101,25 @@ static void nbd_update_server_watch(NBDServerData *s)
138
139 static void nbd_server_free(NBDServerData *server)
140 {
141+ NBDConn *conn, *tmp;
142+
143 if (!server) {
144 return;
145 }
146
147+ /*
148+ * Forcefully close the listener socket, and any clients that have
149+ * not yet disconnected on their own.
150+ */
151 qio_net_listener_disconnect(server->listener);
152 object_unref(OBJECT(server->listener));
153+ QLIST_FOREACH_SAFE(conn, &server->conns, next, tmp) {
154+ qio_channel_shutdown(QIO_CHANNEL(conn->cioc), QIO_CHANNEL_SHUTDOWN_BOTH,
155+ NULL);
156+ }
157+
158+ AIO_WAIT_WHILE_UNLOCKED(NULL, server->connections > 0);
159+
160 if (server->tlscreds) {
161 object_unref(OBJECT(server->tlscreds));
162 }
163--
1642.40.0