diff options
author | Kai Kang <kai.kang@windriver.com> | 2019-10-23 15:49:39 +0800 |
---|---|---|
committer | Richard Purdie <richard.purdie@linuxfoundation.org> | 2019-10-28 05:31:42 +0000 |
commit | 85903df1c1545da372c556cf12066d535355465a (patch) | |
tree | 295d876d4c9ec915aaeb9468f4a1595fc720b298 /meta/recipes-connectivity | |
parent | 37388b6899b1e9b410c1e7b041455cbc36a160a5 (diff) | |
download | poky-85903df1c1545da372c556cf12066d535355465a.tar.gz |
bind: fix CVE-2019-6471 and CVE-2018-5743
Backport patches to fix CVE-2019-6471 and CVE-2018-5743 for bind.
CVE-2019-6471 is fixed by 0001-bind-fix-CVE-2019-6471.patch and the
other 6 patches are for CVE-2018-5743. And backport one more patch to
fix compile error on arm caused by these 6 commits.
(From OE-Core rev: 3c39d4158677b97253df63f23b74c3a9dd5539f6)
Signed-off-by: Kai Kang <kai.kang@windriver.com>
Signed-off-by: Richard Purdie <richard.purdie@linuxfoundation.org>
Diffstat (limited to 'meta/recipes-connectivity')
9 files changed, 2723 insertions, 0 deletions
diff --git a/meta/recipes-connectivity/bind/bind/0001-bind-fix-CVE-2019-6471.patch b/meta/recipes-connectivity/bind/bind/0001-bind-fix-CVE-2019-6471.patch new file mode 100644 index 0000000000..2fed99e1bb --- /dev/null +++ b/meta/recipes-connectivity/bind/bind/0001-bind-fix-CVE-2019-6471.patch | |||
@@ -0,0 +1,64 @@ | |||
1 | Backport patch to fix CVE-2019-6471. | ||
2 | |||
3 | Ref: | ||
4 | https://security-tracker.debian.org/tracker/CVE-2019-6471 | ||
5 | |||
6 | CVE: CVE-2019-6471 | ||
7 | Upstream-Status: Backport [https://gitlab.isc.org/isc-projects/bind9/commit/3a9c7bb] | ||
8 | |||
9 | Signed-off-by: Kai Kang <kai.kang@windriver.com> | ||
10 | |||
11 | From 3a9c7bb80d4a609b86427406d9dd783199920b5b Mon Sep 17 00:00:00 2001 | ||
12 | From: Mark Andrews <marka@isc.org> | ||
13 | Date: Tue, 19 Mar 2019 14:14:21 +1100 | ||
14 | Subject: [PATCH] move item_out test inside lock in dns_dispatch_getnext() | ||
15 | |||
16 | (cherry picked from commit 60c42f849d520564ed42e5ed0ba46b4b69c07712) | ||
17 | --- | ||
18 | lib/dns/dispatch.c | 12 ++++++++---- | ||
19 | 1 file changed, 8 insertions(+), 4 deletions(-) | ||
20 | |||
21 | diff --git a/lib/dns/dispatch.c b/lib/dns/dispatch.c | ||
22 | index 408beda367..3278db4a07 100644 | ||
23 | --- a/lib/dns/dispatch.c | ||
24 | +++ b/lib/dns/dispatch.c | ||
25 | @@ -134,7 +134,7 @@ struct dns_dispentry { | ||
26 | isc_task_t *task; | ||
27 | isc_taskaction_t action; | ||
28 | void *arg; | ||
29 | - bool item_out; | ||
30 | + bool item_out; | ||
31 | dispsocket_t *dispsocket; | ||
32 | ISC_LIST(dns_dispatchevent_t) items; | ||
33 | ISC_LINK(dns_dispentry_t) link; | ||
34 | @@ -3422,13 +3422,14 @@ dns_dispatch_getnext(dns_dispentry_t *resp, dns_dispatchevent_t **sockevent) { | ||
35 | disp = resp->disp; | ||
36 | REQUIRE(VALID_DISPATCH(disp)); | ||
37 | |||
38 | - REQUIRE(resp->item_out == true); | ||
39 | - resp->item_out = false; | ||
40 | - | ||
41 | ev = *sockevent; | ||
42 | *sockevent = NULL; | ||
43 | |||
44 | LOCK(&disp->lock); | ||
45 | + | ||
46 | + REQUIRE(resp->item_out == true); | ||
47 | + resp->item_out = false; | ||
48 | + | ||
49 | if (ev->buffer.base != NULL) | ||
50 | free_buffer(disp, ev->buffer.base, ev->buffer.length); | ||
51 | free_devent(disp, ev); | ||
52 | @@ -3573,6 +3574,9 @@ dns_dispatch_removeresponse(dns_dispentry_t **resp, | ||
53 | isc_task_send(disp->task[0], &disp->ctlevent); | ||
54 | } | ||
55 | |||
56 | +/* | ||
57 | + * disp must be locked. | ||
58 | + */ | ||
59 | static void | ||
60 | do_cancel(dns_dispatch_t *disp) { | ||
61 | dns_dispatchevent_t *ev; | ||
62 | -- | ||
63 | 2.20.1 | ||
64 | |||
diff --git a/meta/recipes-connectivity/bind/bind/0001-fix-enforcement-of-tcp-clients-v1.patch b/meta/recipes-connectivity/bind/bind/0001-fix-enforcement-of-tcp-clients-v1.patch new file mode 100644 index 0000000000..48ae125f84 --- /dev/null +++ b/meta/recipes-connectivity/bind/bind/0001-fix-enforcement-of-tcp-clients-v1.patch | |||
@@ -0,0 +1,60 @@ | |||
1 | Backport patch to fix CVE-2018-5743. | ||
2 | |||
3 | Ref: | ||
4 | https://security-tracker.debian.org/tracker/CVE-2018-5743 | ||
5 | |||
6 | CVE: CVE-2018-5743 | ||
7 | Upstream-Status: Backport [https://gitlab.isc.org/isc-projects/bind9/commit/ec2d50d] | ||
8 | |||
9 | Signed-off-by: Kai Kang <kai.kang@windriver.com> | ||
10 | |||
11 | From ec2d50da8d81814640e28593d912f4b96c7efece Mon Sep 17 00:00:00 2001 | ||
12 | From: =?UTF-8?q?Witold=20Kr=C4=99cicki?= <wpk@isc.org> | ||
13 | Date: Thu, 3 Jan 2019 14:17:43 +0100 | ||
14 | Subject: [PATCH 1/6] fix enforcement of tcp-clients (v1) | ||
15 | |||
16 | tcp-clients settings could be exceeded in some cases by | ||
17 | creating more and more active TCP clients that are over | ||
18 | the set quota limit, which in the end could lead to a | ||
19 | DoS attack by e.g. exhaustion of file descriptors. | ||
20 | |||
21 | If TCP client we're closing went over the quota (so it's | ||
22 | not attached to a quota) mark it as mortal - so that it | ||
23 | will be destroyed and not set up to listen for new | ||
24 | connections - unless it's the last client for a specific | ||
25 | interface. | ||
26 | |||
27 | (cherry picked from commit f97131d21b97381cef72b971b157345c1f9b4115) | ||
28 | (cherry picked from commit 9689ffc485df8f971f0ad81ab8ab1f5389493776) | ||
29 | --- | ||
30 | bin/named/client.c | 13 ++++++++++++- | ||
31 | 1 file changed, 12 insertions(+), 1 deletion(-) | ||
32 | |||
33 | diff --git a/bin/named/client.c b/bin/named/client.c | ||
34 | index d482da7121..0739dd48af 100644 | ||
35 | --- a/bin/named/client.c | ||
36 | +++ b/bin/named/client.c | ||
37 | @@ -421,8 +421,19 @@ exit_check(ns_client_t *client) { | ||
38 | isc_socket_detach(&client->tcpsocket); | ||
39 | } | ||
40 | |||
41 | - if (client->tcpquota != NULL) | ||
42 | + if (client->tcpquota != NULL) { | ||
43 | isc_quota_detach(&client->tcpquota); | ||
44 | + } else { | ||
45 | + /* | ||
46 | + * We went over quota with this client, we don't | ||
47 | + * want to restart listening unless this is the | ||
48 | + * last client on this interface, which is | ||
49 | + * checked later. | ||
50 | + */ | ||
51 | + if (TCP_CLIENT(client)) { | ||
52 | + client->mortal = true; | ||
53 | + } | ||
54 | + } | ||
55 | |||
56 | if (client->timerset) { | ||
57 | (void)isc_timer_reset(client->timer, | ||
58 | -- | ||
59 | 2.20.1 | ||
60 | |||
diff --git a/meta/recipes-connectivity/bind/bind/0002-tcp-clients-could-still-be-exceeded-v2.patch b/meta/recipes-connectivity/bind/bind/0002-tcp-clients-could-still-be-exceeded-v2.patch new file mode 100644 index 0000000000..ca4e8b1a66 --- /dev/null +++ b/meta/recipes-connectivity/bind/bind/0002-tcp-clients-could-still-be-exceeded-v2.patch | |||
@@ -0,0 +1,670 @@ | |||
1 | Backport patch to fix CVE-2018-5743. | ||
2 | |||
3 | Ref: | ||
4 | https://security-tracker.debian.org/tracker/CVE-2018-5743 | ||
5 | |||
6 | CVE: CVE-2018-5743 | ||
7 | Upstream-Status: Backport [https://gitlab.isc.org/isc-projects/bind9/commit/719f604] | ||
8 | |||
9 | Signed-off-by: Kai Kang <kai.kang@windriver.com> | ||
10 | |||
11 | From 719f604e3fad5b7479bd14e2fa0ef4413f0a8fdc Mon Sep 17 00:00:00 2001 | ||
12 | From: =?UTF-8?q?Witold=20Kr=C4=99cicki?= <wpk@isc.org> | ||
13 | Date: Fri, 4 Jan 2019 12:50:51 +0100 | ||
14 | Subject: [PATCH 2/6] tcp-clients could still be exceeded (v2) | ||
15 | |||
16 | the TCP client quota could still be ineffective under some | ||
17 | circumstances. this change: | ||
18 | |||
19 | - improves quota accounting to ensure that TCP clients are | ||
20 | properly limited, while still guaranteeing that at least one client | ||
21 | is always available to serve TCP connections on each interface. | ||
22 | - uses more descriptive names and removes one (ntcptarget) that | ||
23 | was no longer needed | ||
24 | - adds comments | ||
25 | |||
26 | (cherry picked from commit 924651f1d5e605cd186d03f4f7340bcc54d77cc2) | ||
27 | (cherry picked from commit 55a7a458e30e47874d34bdf1079eb863a0512396) | ||
28 | --- | ||
29 | bin/named/client.c | 311 ++++++++++++++++++++----- | ||
30 | bin/named/include/named/client.h | 14 +- | ||
31 | bin/named/include/named/interfacemgr.h | 11 +- | ||
32 | bin/named/interfacemgr.c | 8 +- | ||
33 | 4 files changed, 267 insertions(+), 77 deletions(-) | ||
34 | |||
35 | diff --git a/bin/named/client.c b/bin/named/client.c | ||
36 | index 0739dd48af..a7b49a0f71 100644 | ||
37 | --- a/bin/named/client.c | ||
38 | +++ b/bin/named/client.c | ||
39 | @@ -246,10 +246,11 @@ static void ns_client_dumpmessage(ns_client_t *client, const char *reason); | ||
40 | static isc_result_t get_client(ns_clientmgr_t *manager, ns_interface_t *ifp, | ||
41 | dns_dispatch_t *disp, bool tcp); | ||
42 | static isc_result_t get_worker(ns_clientmgr_t *manager, ns_interface_t *ifp, | ||
43 | - isc_socket_t *sock); | ||
44 | + isc_socket_t *sock, ns_client_t *oldclient); | ||
45 | static inline bool | ||
46 | -allowed(isc_netaddr_t *addr, dns_name_t *signer, isc_netaddr_t *ecs_addr, | ||
47 | - uint8_t ecs_addrlen, uint8_t *ecs_scope, dns_acl_t *acl); | ||
48 | +allowed(isc_netaddr_t *addr, dns_name_t *signer, | ||
49 | + isc_netaddr_t *ecs_addr, uint8_t ecs_addrlen, | ||
50 | + uint8_t *ecs_scope, dns_acl_t *acl) | ||
51 | static void compute_cookie(ns_client_t *client, uint32_t when, | ||
52 | uint32_t nonce, const unsigned char *secret, | ||
53 | isc_buffer_t *buf); | ||
54 | @@ -405,8 +406,11 @@ exit_check(ns_client_t *client) { | ||
55 | */ | ||
56 | INSIST(client->recursionquota == NULL); | ||
57 | INSIST(client->newstate <= NS_CLIENTSTATE_READY); | ||
58 | - if (client->nreads > 0) | ||
59 | + | ||
60 | + if (client->nreads > 0) { | ||
61 | dns_tcpmsg_cancelread(&client->tcpmsg); | ||
62 | + } | ||
63 | + | ||
64 | if (client->nreads != 0) { | ||
65 | /* Still waiting for read cancel completion. */ | ||
66 | return (true); | ||
67 | @@ -416,25 +420,58 @@ exit_check(ns_client_t *client) { | ||
68 | dns_tcpmsg_invalidate(&client->tcpmsg); | ||
69 | client->tcpmsg_valid = false; | ||
70 | } | ||
71 | + | ||
72 | if (client->tcpsocket != NULL) { | ||
73 | CTRACE("closetcp"); | ||
74 | isc_socket_detach(&client->tcpsocket); | ||
75 | + | ||
76 | + if (client->tcpactive) { | ||
77 | + LOCK(&client->interface->lock); | ||
78 | + INSIST(client->interface->ntcpactive > 0); | ||
79 | + client->interface->ntcpactive--; | ||
80 | + UNLOCK(&client->interface->lock); | ||
81 | + client->tcpactive = false; | ||
82 | + } | ||
83 | } | ||
84 | |||
85 | if (client->tcpquota != NULL) { | ||
86 | - isc_quota_detach(&client->tcpquota); | ||
87 | - } else { | ||
88 | /* | ||
89 | - * We went over quota with this client, we don't | ||
90 | - * want to restart listening unless this is the | ||
91 | - * last client on this interface, which is | ||
92 | - * checked later. | ||
93 | + * If we are not in a pipeline group, or | ||
94 | + * we are the last client in the group, detach from | ||
95 | + * tcpquota; otherwise, transfer the quota to | ||
96 | + * another client in the same group. | ||
97 | */ | ||
98 | - if (TCP_CLIENT(client)) { | ||
99 | - client->mortal = true; | ||
100 | + if (!ISC_LINK_LINKED(client, glink) || | ||
101 | + (client->glink.next == NULL && | ||
102 | + client->glink.prev == NULL)) | ||
103 | + { | ||
104 | + isc_quota_detach(&client->tcpquota); | ||
105 | + } else if (client->glink.next != NULL) { | ||
106 | + INSIST(client->glink.next->tcpquota == NULL); | ||
107 | + client->glink.next->tcpquota = client->tcpquota; | ||
108 | + client->tcpquota = NULL; | ||
109 | + } else { | ||
110 | + INSIST(client->glink.prev->tcpquota == NULL); | ||
111 | + client->glink.prev->tcpquota = client->tcpquota; | ||
112 | + client->tcpquota = NULL; | ||
113 | } | ||
114 | } | ||
115 | |||
116 | + /* | ||
117 | + * Unlink from pipeline group. | ||
118 | + */ | ||
119 | + if (ISC_LINK_LINKED(client, glink)) { | ||
120 | + if (client->glink.next != NULL) { | ||
121 | + client->glink.next->glink.prev = | ||
122 | + client->glink.prev; | ||
123 | + } | ||
124 | + if (client->glink.prev != NULL) { | ||
125 | + client->glink.prev->glink.next = | ||
126 | + client->glink.next; | ||
127 | + } | ||
128 | + ISC_LINK_INIT(client, glink); | ||
129 | + } | ||
130 | + | ||
131 | if (client->timerset) { | ||
132 | (void)isc_timer_reset(client->timer, | ||
133 | isc_timertype_inactive, | ||
134 | @@ -455,15 +492,16 @@ exit_check(ns_client_t *client) { | ||
135 | * that already. Check whether this client needs to remain | ||
136 | * active and force it to go inactive if not. | ||
137 | * | ||
138 | - * UDP clients go inactive at this point, but TCP clients | ||
139 | - * may remain active if we have fewer active TCP client | ||
140 | - * objects than desired due to an earlier quota exhaustion. | ||
141 | + * UDP clients go inactive at this point, but a TCP client | ||
142 | + * will needs to remain active if no other clients are | ||
143 | + * listening for TCP requests on this interface, to | ||
144 | + * prevent this interface from going nonresponsive. | ||
145 | */ | ||
146 | if (client->mortal && TCP_CLIENT(client) && !ns_g_clienttest) { | ||
147 | LOCK(&client->interface->lock); | ||
148 | - if (client->interface->ntcpcurrent < | ||
149 | - client->interface->ntcptarget) | ||
150 | + if (client->interface->ntcpaccepting == 0) { | ||
151 | client->mortal = false; | ||
152 | + } | ||
153 | UNLOCK(&client->interface->lock); | ||
154 | } | ||
155 | |||
156 | @@ -472,15 +510,17 @@ exit_check(ns_client_t *client) { | ||
157 | * queue for recycling. | ||
158 | */ | ||
159 | if (client->mortal) { | ||
160 | - if (client->newstate > NS_CLIENTSTATE_INACTIVE) | ||
161 | + if (client->newstate > NS_CLIENTSTATE_INACTIVE) { | ||
162 | client->newstate = NS_CLIENTSTATE_INACTIVE; | ||
163 | + } | ||
164 | } | ||
165 | |||
166 | if (NS_CLIENTSTATE_READY == client->newstate) { | ||
167 | if (TCP_CLIENT(client)) { | ||
168 | client_accept(client); | ||
169 | - } else | ||
170 | + } else { | ||
171 | client_udprecv(client); | ||
172 | + } | ||
173 | client->newstate = NS_CLIENTSTATE_MAX; | ||
174 | return (true); | ||
175 | } | ||
176 | @@ -492,41 +532,57 @@ exit_check(ns_client_t *client) { | ||
177 | /* | ||
178 | * We are trying to enter the inactive state. | ||
179 | */ | ||
180 | - if (client->naccepts > 0) | ||
181 | + if (client->naccepts > 0) { | ||
182 | isc_socket_cancel(client->tcplistener, client->task, | ||
183 | ISC_SOCKCANCEL_ACCEPT); | ||
184 | + } | ||
185 | |||
186 | /* Still waiting for accept cancel completion. */ | ||
187 | - if (! (client->naccepts == 0)) | ||
188 | + if (! (client->naccepts == 0)) { | ||
189 | return (true); | ||
190 | + } | ||
191 | |||
192 | /* Accept cancel is complete. */ | ||
193 | - if (client->nrecvs > 0) | ||
194 | + if (client->nrecvs > 0) { | ||
195 | isc_socket_cancel(client->udpsocket, client->task, | ||
196 | ISC_SOCKCANCEL_RECV); | ||
197 | + } | ||
198 | |||
199 | /* Still waiting for recv cancel completion. */ | ||
200 | - if (! (client->nrecvs == 0)) | ||
201 | + if (! (client->nrecvs == 0)) { | ||
202 | return (true); | ||
203 | + } | ||
204 | |||
205 | /* Still waiting for control event to be delivered */ | ||
206 | - if (client->nctls > 0) | ||
207 | + if (client->nctls > 0) { | ||
208 | return (true); | ||
209 | - | ||
210 | - /* Deactivate the client. */ | ||
211 | - if (client->interface) | ||
212 | - ns_interface_detach(&client->interface); | ||
213 | + } | ||
214 | |||
215 | INSIST(client->naccepts == 0); | ||
216 | INSIST(client->recursionquota == NULL); | ||
217 | - if (client->tcplistener != NULL) | ||
218 | + if (client->tcplistener != NULL) { | ||
219 | isc_socket_detach(&client->tcplistener); | ||
220 | |||
221 | - if (client->udpsocket != NULL) | ||
222 | + if (client->tcpactive) { | ||
223 | + LOCK(&client->interface->lock); | ||
224 | + INSIST(client->interface->ntcpactive > 0); | ||
225 | + client->interface->ntcpactive--; | ||
226 | + UNLOCK(&client->interface->lock); | ||
227 | + client->tcpactive = false; | ||
228 | + } | ||
229 | + } | ||
230 | + if (client->udpsocket != NULL) { | ||
231 | isc_socket_detach(&client->udpsocket); | ||
232 | + } | ||
233 | |||
234 | - if (client->dispatch != NULL) | ||
235 | + /* Deactivate the client. */ | ||
236 | + if (client->interface != NULL) { | ||
237 | + ns_interface_detach(&client->interface); | ||
238 | + } | ||
239 | + | ||
240 | + if (client->dispatch != NULL) { | ||
241 | dns_dispatch_detach(&client->dispatch); | ||
242 | + } | ||
243 | |||
244 | client->attributes = 0; | ||
245 | client->mortal = false; | ||
246 | @@ -551,10 +607,13 @@ exit_check(ns_client_t *client) { | ||
247 | client->newstate = NS_CLIENTSTATE_MAX; | ||
248 | if (!ns_g_clienttest && manager != NULL && | ||
249 | !manager->exiting) | ||
250 | + { | ||
251 | ISC_QUEUE_PUSH(manager->inactive, client, | ||
252 | ilink); | ||
253 | - if (client->needshutdown) | ||
254 | + } | ||
255 | + if (client->needshutdown) { | ||
256 | isc_task_shutdown(client->task); | ||
257 | + } | ||
258 | return (true); | ||
259 | } | ||
260 | } | ||
261 | @@ -675,7 +734,6 @@ client_start(isc_task_t *task, isc_event_t *event) { | ||
262 | } | ||
263 | } | ||
264 | |||
265 | - | ||
266 | /*% | ||
267 | * The client's task has received a shutdown event. | ||
268 | */ | ||
269 | @@ -2507,17 +2565,12 @@ client_request(isc_task_t *task, isc_event_t *event) { | ||
270 | /* | ||
271 | * Pipeline TCP query processing. | ||
272 | */ | ||
273 | - if (client->message->opcode != dns_opcode_query) | ||
274 | + if (client->message->opcode != dns_opcode_query) { | ||
275 | client->pipelined = false; | ||
276 | + } | ||
277 | if (TCP_CLIENT(client) && client->pipelined) { | ||
278 | - result = isc_quota_reserve(&ns_g_server->tcpquota); | ||
279 | - if (result == ISC_R_SUCCESS) | ||
280 | - result = ns_client_replace(client); | ||
281 | + result = ns_client_replace(client); | ||
282 | if (result != ISC_R_SUCCESS) { | ||
283 | - ns_client_log(client, NS_LOGCATEGORY_CLIENT, | ||
284 | - NS_LOGMODULE_CLIENT, ISC_LOG_WARNING, | ||
285 | - "no more TCP clients(read): %s", | ||
286 | - isc_result_totext(result)); | ||
287 | client->pipelined = false; | ||
288 | } | ||
289 | } | ||
290 | @@ -3087,6 +3140,7 @@ client_create(ns_clientmgr_t *manager, ns_client_t **clientp) { | ||
291 | client->filter_aaaa = dns_aaaa_ok; | ||
292 | #endif | ||
293 | client->needshutdown = ns_g_clienttest; | ||
294 | + client->tcpactive = false; | ||
295 | |||
296 | ISC_EVENT_INIT(&client->ctlevent, sizeof(client->ctlevent), 0, NULL, | ||
297 | NS_EVENT_CLIENTCONTROL, client_start, client, client, | ||
298 | @@ -3100,6 +3154,7 @@ client_create(ns_clientmgr_t *manager, ns_client_t **clientp) { | ||
299 | client->formerrcache.id = 0; | ||
300 | ISC_LINK_INIT(client, link); | ||
301 | ISC_LINK_INIT(client, rlink); | ||
302 | + ISC_LINK_INIT(client, glink); | ||
303 | ISC_QLINK_INIT(client, ilink); | ||
304 | client->keytag = NULL; | ||
305 | client->keytag_len = 0; | ||
306 | @@ -3193,12 +3248,19 @@ client_newconn(isc_task_t *task, isc_event_t *event) { | ||
307 | |||
308 | INSIST(client->state == NS_CLIENTSTATE_READY); | ||
309 | |||
310 | + /* | ||
311 | + * The accept() was successful and we're now establishing a new | ||
312 | + * connection. We need to make note of it in the client and | ||
313 | + * interface objects so client objects can do the right thing | ||
314 | + * when going inactive in exit_check() (see comments in | ||
315 | + * client_accept() for details). | ||
316 | + */ | ||
317 | INSIST(client->naccepts == 1); | ||
318 | client->naccepts--; | ||
319 | |||
320 | LOCK(&client->interface->lock); | ||
321 | - INSIST(client->interface->ntcpcurrent > 0); | ||
322 | - client->interface->ntcpcurrent--; | ||
323 | + INSIST(client->interface->ntcpaccepting > 0); | ||
324 | + client->interface->ntcpaccepting--; | ||
325 | UNLOCK(&client->interface->lock); | ||
326 | |||
327 | /* | ||
328 | @@ -3232,6 +3294,9 @@ client_newconn(isc_task_t *task, isc_event_t *event) { | ||
329 | NS_LOGMODULE_CLIENT, ISC_LOG_DEBUG(3), | ||
330 | "accept failed: %s", | ||
331 | isc_result_totext(nevent->result)); | ||
332 | + if (client->tcpquota != NULL) { | ||
333 | + isc_quota_detach(&client->tcpquota); | ||
334 | + } | ||
335 | } | ||
336 | |||
337 | if (exit_check(client)) | ||
338 | @@ -3270,18 +3335,12 @@ client_newconn(isc_task_t *task, isc_event_t *event) { | ||
339 | * deny service to legitimate TCP clients. | ||
340 | */ | ||
341 | client->pipelined = false; | ||
342 | - result = isc_quota_attach(&ns_g_server->tcpquota, | ||
343 | - &client->tcpquota); | ||
344 | - if (result == ISC_R_SUCCESS) | ||
345 | - result = ns_client_replace(client); | ||
346 | - if (result != ISC_R_SUCCESS) { | ||
347 | - ns_client_log(client, NS_LOGCATEGORY_CLIENT, | ||
348 | - NS_LOGMODULE_CLIENT, ISC_LOG_WARNING, | ||
349 | - "no more TCP clients(accept): %s", | ||
350 | - isc_result_totext(result)); | ||
351 | - } else if (ns_g_server->keepresporder == NULL || | ||
352 | - !allowed(&netaddr, NULL, NULL, 0, NULL, | ||
353 | - ns_g_server->keepresporder)) { | ||
354 | + result = ns_client_replace(client); | ||
355 | + if (result == ISC_R_SUCCESS && | ||
356 | + (client->sctx->keepresporder == NULL || | ||
357 | + !allowed(&netaddr, NULL, NULL, 0, NULL, | ||
358 | + ns_g_server->keepresporder))) | ||
359 | + { | ||
360 | client->pipelined = true; | ||
361 | } | ||
362 | |||
363 | @@ -3298,12 +3357,80 @@ client_accept(ns_client_t *client) { | ||
364 | |||
365 | CTRACE("accept"); | ||
366 | |||
367 | + /* | ||
368 | + * The tcpquota object can only be simultaneously referenced a | ||
369 | + * pre-defined number of times; this is configured by 'tcp-clients' | ||
370 | + * in named.conf. If we can't attach to it here, that means the TCP | ||
371 | + * client quota has been exceeded. | ||
372 | + */ | ||
373 | + result = isc_quota_attach(&client->sctx->tcpquota, | ||
374 | + &client->tcpquota); | ||
375 | + if (result != ISC_R_SUCCESS) { | ||
376 | + bool exit; | ||
377 | + | ||
378 | + ns_client_log(client, NS_LOGCATEGORY_CLIENT, | ||
379 | + NS_LOGMODULE_CLIENT, ISC_LOG_DEBUG(1), | ||
380 | + "no more TCP clients: %s", | ||
381 | + isc_result_totext(result)); | ||
382 | + | ||
383 | + /* | ||
384 | + * We have exceeded the system-wide TCP client | ||
385 | + * quota. But, we can't just block this accept | ||
386 | + * in all cases, because if we did, a heavy TCP | ||
387 | + * load on other interfaces might cause this | ||
388 | + * interface to be starved, with no clients able | ||
389 | + * to accept new connections. | ||
390 | + * | ||
391 | + * So, we check here to see if any other client | ||
392 | + * is already servicing TCP queries on this | ||
393 | + * interface (whether accepting, reading, or | ||
394 | + * processing). | ||
395 | + * | ||
396 | + * If so, then it's okay *not* to call | ||
397 | + * accept - we can let this client to go inactive | ||
398 | + * and the other one handle the next connection | ||
399 | + * when it's ready. | ||
400 | + * | ||
401 | + * But if not, then we need to be a little bit | ||
402 | + * flexible about the quota. We allow *one* extra | ||
403 | + * TCP client through, to ensure we're listening on | ||
404 | + * every interface. | ||
405 | + * | ||
406 | + * (Note: In practice this means that the *real* | ||
407 | + * TCP client quota is tcp-clients plus the number | ||
408 | + * of interfaces.) | ||
409 | + */ | ||
410 | + LOCK(&client->interface->lock); | ||
411 | + exit = (client->interface->ntcpactive > 0); | ||
412 | + UNLOCK(&client->interface->lock); | ||
413 | + | ||
414 | + if (exit) { | ||
415 | + client->newstate = NS_CLIENTSTATE_INACTIVE; | ||
416 | + (void)exit_check(client); | ||
417 | + return; | ||
418 | + } | ||
419 | + } | ||
420 | + | ||
421 | + /* | ||
422 | + * By incrementing the interface's ntcpactive counter we signal | ||
423 | + * that there is at least one client servicing TCP queries for the | ||
424 | + * interface. | ||
425 | + * | ||
426 | + * We also make note of the fact in the client itself with the | ||
427 | + * tcpactive flag. This ensures proper accounting by preventing | ||
428 | + * us from accidentally incrementing or decrementing ntcpactive | ||
429 | + * more than once per client object. | ||
430 | + */ | ||
431 | + if (!client->tcpactive) { | ||
432 | + LOCK(&client->interface->lock); | ||
433 | + client->interface->ntcpactive++; | ||
434 | + UNLOCK(&client->interface->lock); | ||
435 | + client->tcpactive = true; | ||
436 | + } | ||
437 | + | ||
438 | result = isc_socket_accept(client->tcplistener, client->task, | ||
439 | client_newconn, client); | ||
440 | if (result != ISC_R_SUCCESS) { | ||
441 | - UNEXPECTED_ERROR(__FILE__, __LINE__, | ||
442 | - "isc_socket_accept() failed: %s", | ||
443 | - isc_result_totext(result)); | ||
444 | /* | ||
445 | * XXXRTH What should we do? We're trying to accept but | ||
446 | * it didn't work. If we just give up, then TCP | ||
447 | @@ -3311,12 +3438,39 @@ client_accept(ns_client_t *client) { | ||
448 | * | ||
449 | * For now, we just go idle. | ||
450 | */ | ||
451 | + UNEXPECTED_ERROR(__FILE__, __LINE__, | ||
452 | + "isc_socket_accept() failed: %s", | ||
453 | + isc_result_totext(result)); | ||
454 | + if (client->tcpquota != NULL) { | ||
455 | + isc_quota_detach(&client->tcpquota); | ||
456 | + } | ||
457 | return; | ||
458 | } | ||
459 | + | ||
460 | + /* | ||
461 | + * The client's 'naccepts' counter indicates that this client has | ||
462 | + * called accept() and is waiting for a new connection. It should | ||
463 | + * never exceed 1. | ||
464 | + */ | ||
465 | INSIST(client->naccepts == 0); | ||
466 | client->naccepts++; | ||
467 | + | ||
468 | + /* | ||
469 | + * The interface's 'ntcpaccepting' counter is incremented when | ||
470 | + * any client calls accept(), and decremented in client_newconn() | ||
471 | + * once the connection is established. | ||
472 | + * | ||
473 | + * When the client object is shutting down after handling a TCP | ||
474 | + * request (see exit_check()), it looks to see whether this value is | ||
475 | + * non-zero. If so, that means another client has already called | ||
476 | + * accept() and is waiting to establish the next connection, which | ||
477 | + * means the first client is free to go inactive. Otherwise, | ||
478 | + * the first client must come back and call accept() again; this | ||
479 | + * guarantees there will always be at least one client listening | ||
480 | + * for new TCP connections on each interface. | ||
481 | + */ | ||
482 | LOCK(&client->interface->lock); | ||
483 | - client->interface->ntcpcurrent++; | ||
484 | + client->interface->ntcpaccepting++; | ||
485 | UNLOCK(&client->interface->lock); | ||
486 | } | ||
487 | |||
488 | @@ -3390,13 +3544,14 @@ ns_client_replace(ns_client_t *client) { | ||
489 | tcp = TCP_CLIENT(client); | ||
490 | if (tcp && client->pipelined) { | ||
491 | result = get_worker(client->manager, client->interface, | ||
492 | - client->tcpsocket); | ||
493 | + client->tcpsocket, client); | ||
494 | } else { | ||
495 | result = get_client(client->manager, client->interface, | ||
496 | client->dispatch, tcp); | ||
497 | } | ||
498 | - if (result != ISC_R_SUCCESS) | ||
499 | + if (result != ISC_R_SUCCESS) { | ||
500 | return (result); | ||
501 | + } | ||
502 | |||
503 | /* | ||
504 | * The responsibility for listening for new requests is hereby | ||
505 | @@ -3585,6 +3740,7 @@ get_client(ns_clientmgr_t *manager, ns_interface_t *ifp, | ||
506 | client->attributes |= NS_CLIENTATTR_TCP; | ||
507 | isc_socket_attach(ifp->tcpsocket, | ||
508 | &client->tcplistener); | ||
509 | + | ||
510 | } else { | ||
511 | isc_socket_t *sock; | ||
512 | |||
513 | @@ -3602,7 +3758,8 @@ get_client(ns_clientmgr_t *manager, ns_interface_t *ifp, | ||
514 | } | ||
515 | |||
516 | static isc_result_t | ||
517 | -get_worker(ns_clientmgr_t *manager, ns_interface_t *ifp, isc_socket_t *sock) | ||
518 | +get_worker(ns_clientmgr_t *manager, ns_interface_t *ifp, isc_socket_t *sock, | ||
519 | + ns_client_t *oldclient) | ||
520 | { | ||
521 | isc_result_t result = ISC_R_SUCCESS; | ||
522 | isc_event_t *ev; | ||
523 | @@ -3610,6 +3767,7 @@ get_worker(ns_clientmgr_t *manager, ns_interface_t *ifp, isc_socket_t *sock) | ||
524 | MTRACE("get worker"); | ||
525 | |||
526 | REQUIRE(manager != NULL); | ||
527 | + REQUIRE(oldclient != NULL); | ||
528 | |||
529 | if (manager->exiting) | ||
530 | return (ISC_R_SHUTTINGDOWN); | ||
531 | @@ -3642,7 +3800,28 @@ get_worker(ns_clientmgr_t *manager, ns_interface_t *ifp, isc_socket_t *sock) | ||
532 | ns_interface_attach(ifp, &client->interface); | ||
533 | client->newstate = client->state = NS_CLIENTSTATE_WORKING; | ||
534 | INSIST(client->recursionquota == NULL); | ||
535 | - client->tcpquota = &ns_g_server->tcpquota; | ||
536 | + | ||
537 | + /* | ||
538 | + * Transfer TCP quota to the new client. | ||
539 | + */ | ||
540 | + INSIST(client->tcpquota == NULL); | ||
541 | + INSIST(oldclient->tcpquota != NULL); | ||
542 | + client->tcpquota = oldclient->tcpquota; | ||
543 | + oldclient->tcpquota = NULL; | ||
544 | + | ||
545 | + /* | ||
546 | + * Link to a pipeline group, creating it if needed. | ||
547 | + */ | ||
548 | + if (!ISC_LINK_LINKED(oldclient, glink)) { | ||
549 | + oldclient->glink.next = NULL; | ||
550 | + oldclient->glink.prev = NULL; | ||
551 | + } | ||
552 | + client->glink.next = oldclient->glink.next; | ||
553 | + client->glink.prev = oldclient; | ||
554 | + if (oldclient->glink.next != NULL) { | ||
555 | + oldclient->glink.next->glink.prev = client; | ||
556 | + } | ||
557 | + oldclient->glink.next = client; | ||
558 | |||
559 | client->dscp = ifp->dscp; | ||
560 | |||
561 | @@ -3656,6 +3835,12 @@ get_worker(ns_clientmgr_t *manager, ns_interface_t *ifp, isc_socket_t *sock) | ||
562 | (void)isc_socket_getpeername(client->tcpsocket, &client->peeraddr); | ||
563 | client->peeraddr_valid = true; | ||
564 | |||
565 | + LOCK(&client->interface->lock); | ||
566 | + client->interface->ntcpactive++; | ||
567 | + UNLOCK(&client->interface->lock); | ||
568 | + | ||
569 | + client->tcpactive = true; | ||
570 | + | ||
571 | INSIST(client->tcpmsg_valid == false); | ||
572 | dns_tcpmsg_init(client->mctx, client->tcpsocket, &client->tcpmsg); | ||
573 | client->tcpmsg_valid = true; | ||
574 | diff --git a/bin/named/include/named/client.h b/bin/named/include/named/client.h | ||
575 | index b23a7b191d..1f7973f9c5 100644 | ||
576 | --- a/bin/named/include/named/client.h | ||
577 | +++ b/bin/named/include/named/client.h | ||
578 | @@ -94,7 +94,8 @@ struct ns_client { | ||
579 | int nupdates; | ||
580 | int nctls; | ||
581 | int references; | ||
582 | - bool needshutdown; /* | ||
583 | + bool tcpactive; | ||
584 | + bool needshutdown; /* | ||
585 | * Used by clienttest to get | ||
586 | * the client to go from | ||
587 | * inactive to free state | ||
588 | @@ -130,9 +131,9 @@ struct ns_client { | ||
589 | isc_stdtime_t now; | ||
590 | isc_time_t tnow; | ||
591 | dns_name_t signername; /*%< [T]SIG key name */ | ||
592 | - dns_name_t * signer; /*%< NULL if not valid sig */ | ||
593 | - bool mortal; /*%< Die after handling request */ | ||
594 | - bool pipelined; /*%< TCP queries not in sequence */ | ||
595 | + dns_name_t *signer; /*%< NULL if not valid sig */ | ||
596 | + bool mortal; /*%< Die after handling request */ | ||
597 | + bool pipelined; /*%< TCP queries not in sequence */ | ||
598 | isc_quota_t *tcpquota; | ||
599 | isc_quota_t *recursionquota; | ||
600 | ns_interface_t *interface; | ||
601 | @@ -143,8 +144,8 @@ struct ns_client { | ||
602 | isc_sockaddr_t destsockaddr; | ||
603 | |||
604 | isc_netaddr_t ecs_addr; /*%< EDNS client subnet */ | ||
605 | - uint8_t ecs_addrlen; | ||
606 | - uint8_t ecs_scope; | ||
607 | + uint8_t ecs_addrlen; | ||
608 | + uint8_t ecs_scope; | ||
609 | |||
610 | struct in6_pktinfo pktinfo; | ||
611 | isc_dscp_t dscp; | ||
612 | @@ -166,6 +167,7 @@ struct ns_client { | ||
613 | |||
614 | ISC_LINK(ns_client_t) link; | ||
615 | ISC_LINK(ns_client_t) rlink; | ||
616 | + ISC_LINK(ns_client_t) glink; | ||
617 | ISC_QLINK(ns_client_t) ilink; | ||
618 | unsigned char cookie[8]; | ||
619 | uint32_t expire; | ||
620 | diff --git a/bin/named/include/named/interfacemgr.h b/bin/named/include/named/interfacemgr.h | ||
621 | index 7d1883e1e8..61b08826a6 100644 | ||
622 | --- a/bin/named/include/named/interfacemgr.h | ||
623 | +++ b/bin/named/include/named/interfacemgr.h | ||
624 | @@ -77,9 +77,14 @@ struct ns_interface { | ||
625 | /*%< UDP dispatchers. */ | ||
626 | isc_socket_t * tcpsocket; /*%< TCP socket. */ | ||
627 | isc_dscp_t dscp; /*%< "listen-on" DSCP value */ | ||
628 | - int ntcptarget; /*%< Desired number of concurrent | ||
629 | - TCP accepts */ | ||
630 | - int ntcpcurrent; /*%< Current ditto, locked */ | ||
631 | + int ntcpaccepting; /*%< Number of clients | ||
632 | + ready to accept new | ||
633 | + TCP connections on this | ||
634 | + interface */ | ||
635 | + int ntcpactive; /*%< Number of clients | ||
636 | + servicing TCP queries | ||
637 | + (whether accepting or | ||
638 | + connected) */ | ||
639 | int nudpdispatch; /*%< Number of UDP dispatches */ | ||
640 | ns_clientmgr_t * clientmgr; /*%< Client manager. */ | ||
641 | ISC_LINK(ns_interface_t) link; | ||
642 | diff --git a/bin/named/interfacemgr.c b/bin/named/interfacemgr.c | ||
643 | index 419927bf54..955096ef47 100644 | ||
644 | --- a/bin/named/interfacemgr.c | ||
645 | +++ b/bin/named/interfacemgr.c | ||
646 | @@ -386,8 +386,8 @@ ns_interface_create(ns_interfacemgr_t *mgr, isc_sockaddr_t *addr, | ||
647 | * connections will be handled in parallel even though there is | ||
648 | * only one client initially. | ||
649 | */ | ||
650 | - ifp->ntcptarget = 1; | ||
651 | - ifp->ntcpcurrent = 0; | ||
652 | + ifp->ntcpaccepting = 0; | ||
653 | + ifp->ntcpactive = 0; | ||
654 | ifp->nudpdispatch = 0; | ||
655 | |||
656 | ifp->dscp = -1; | ||
657 | @@ -522,9 +522,7 @@ ns_interface_accepttcp(ns_interface_t *ifp) { | ||
658 | */ | ||
659 | (void)isc_socket_filter(ifp->tcpsocket, "dataready"); | ||
660 | |||
661 | - result = ns_clientmgr_createclients(ifp->clientmgr, | ||
662 | - ifp->ntcptarget, ifp, | ||
663 | - true); | ||
664 | + result = ns_clientmgr_createclients(ifp->clientmgr, 1, ifp, true); | ||
665 | if (result != ISC_R_SUCCESS) { | ||
666 | UNEXPECTED_ERROR(__FILE__, __LINE__, | ||
667 | "TCP ns_clientmgr_createclients(): %s", | ||
668 | -- | ||
669 | 2.20.1 | ||
670 | |||
diff --git a/meta/recipes-connectivity/bind/bind/0003-use-reference-counter-for-pipeline-groups-v3.patch b/meta/recipes-connectivity/bind/bind/0003-use-reference-counter-for-pipeline-groups-v3.patch new file mode 100644 index 0000000000..032cfb8c44 --- /dev/null +++ b/meta/recipes-connectivity/bind/bind/0003-use-reference-counter-for-pipeline-groups-v3.patch | |||
@@ -0,0 +1,278 @@ | |||
1 | Backport patch to fix CVE-2018-5743. | ||
2 | |||
3 | Ref: | ||
4 | https://security-tracker.debian.org/tracker/CVE-2018-5743 | ||
5 | |||
6 | CVE: CVE-2018-5743 | ||
7 | Upstream-Status: Backport [https://gitlab.isc.org/isc-projects/bind9/commit/366b4e1] | ||
8 | |||
9 | Signed-off-by: Kai Kang <kai.kang@windriver.com> | ||
10 | |||
11 | From 366b4e1ede8aed690e981e07137cb1cb77879c36 Mon Sep 17 00:00:00 2001 | ||
12 | From: =?UTF-8?q?Micha=C5=82=20K=C4=99pie=C5=84?= <michal@isc.org> | ||
13 | Date: Thu, 17 Jan 2019 15:53:38 +0100 | ||
14 | Subject: [PATCH 3/6] use reference counter for pipeline groups (v3) | ||
15 | |||
16 | Track pipeline groups using a shared reference counter | ||
17 | instead of a linked list. | ||
18 | |||
19 | (cherry picked from commit 513afd33eb17d5dc41a3f0d2d38204ef8c5f6f91) | ||
20 | (cherry picked from commit 9446629b730c59c4215f08d37fbaf810282fbccb) | ||
21 | --- | ||
22 | bin/named/client.c | 171 ++++++++++++++++++++----------- | ||
23 | bin/named/include/named/client.h | 2 +- | ||
24 | 2 files changed, 110 insertions(+), 63 deletions(-) | ||
25 | |||
26 | diff --git a/bin/named/client.c b/bin/named/client.c | ||
27 | index a7b49a0f71..277656cef0 100644 | ||
28 | --- a/bin/named/client.c | ||
29 | +++ b/bin/named/client.c | ||
30 | @@ -299,6 +299,75 @@ ns_client_settimeout(ns_client_t *client, unsigned int seconds) { | ||
31 | } | ||
32 | } | ||
33 | |||
34 | +/*% | ||
35 | + * Allocate a reference counter that will track the number of client structures | ||
36 | + * using the TCP connection that 'client' called accept() for. This counter | ||
37 | + * will be shared between all client structures associated with this TCP | ||
38 | + * connection. | ||
39 | + */ | ||
40 | +static void | ||
41 | +pipeline_init(ns_client_t *client) { | ||
42 | + isc_refcount_t *refs; | ||
43 | + | ||
44 | + REQUIRE(client->pipeline_refs == NULL); | ||
45 | + | ||
46 | + /* | ||
47 | + * A global memory context is used for the allocation as different | ||
48 | + * client structures may have different memory contexts assigned and a | ||
49 | + * reference counter allocated here might need to be freed by a | ||
50 | + * different client. The performance impact caused by memory context | ||
51 | + * contention here is expected to be negligible, given that this code | ||
52 | + * is only executed for TCP connections. | ||
53 | + */ | ||
54 | + refs = isc_mem_allocate(client->sctx->mctx, sizeof(*refs)); | ||
55 | + isc_refcount_init(refs, 1); | ||
56 | + client->pipeline_refs = refs; | ||
57 | +} | ||
58 | + | ||
59 | +/*% | ||
60 | + * Increase the count of client structures using the TCP connection that | ||
61 | + * 'source' is associated with and put a pointer to that count in 'target', | ||
62 | + * thus associating it with the same TCP connection. | ||
63 | + */ | ||
64 | +static void | ||
65 | +pipeline_attach(ns_client_t *source, ns_client_t *target) { | ||
66 | + int old_refs; | ||
67 | + | ||
68 | + REQUIRE(source->pipeline_refs != NULL); | ||
69 | + REQUIRE(target->pipeline_refs == NULL); | ||
70 | + | ||
71 | + old_refs = isc_refcount_increment(source->pipeline_refs); | ||
72 | + INSIST(old_refs > 0); | ||
73 | + target->pipeline_refs = source->pipeline_refs; | ||
74 | +} | ||
75 | + | ||
76 | +/*% | ||
77 | + * Decrease the count of client structures using the TCP connection that | ||
78 | + * 'client' is associated with. If this is the last client using this TCP | ||
79 | + * connection, free the reference counter and return true; otherwise, return | ||
80 | + * false. | ||
81 | + */ | ||
82 | +static bool | ||
83 | +pipeline_detach(ns_client_t *client) { | ||
84 | + isc_refcount_t *refs; | ||
85 | + int old_refs; | ||
86 | + | ||
87 | + REQUIRE(client->pipeline_refs != NULL); | ||
88 | + | ||
89 | + refs = client->pipeline_refs; | ||
90 | + client->pipeline_refs = NULL; | ||
91 | + | ||
92 | + old_refs = isc_refcount_decrement(refs); | ||
93 | + INSIST(old_refs > 0); | ||
94 | + | ||
95 | + if (old_refs == 1) { | ||
96 | + isc_mem_free(client->sctx->mctx, refs); | ||
97 | + return (true); | ||
98 | + } | ||
99 | + | ||
100 | + return (false); | ||
101 | +} | ||
102 | + | ||
103 | /*% | ||
104 | * Check for a deactivation or shutdown request and take appropriate | ||
105 | * action. Returns true if either is in progress; in this case | ||
106 | @@ -421,6 +490,40 @@ exit_check(ns_client_t *client) { | ||
107 | client->tcpmsg_valid = false; | ||
108 | } | ||
109 | |||
110 | + if (client->tcpquota != NULL) { | ||
111 | + if (client->pipeline_refs == NULL || | ||
112 | + pipeline_detach(client)) | ||
113 | + { | ||
114 | + /* | ||
115 | + * Only detach from the TCP client quota if | ||
116 | + * there are no more client structures using | ||
117 | + * this TCP connection. | ||
118 | + * | ||
119 | + * Note that we check 'pipeline_refs' and not | ||
120 | + * 'pipelined' because in some cases (e.g. | ||
121 | + * after receiving a request with an opcode | ||
122 | + * different than QUERY) 'pipelined' is set to | ||
123 | + * false after the reference counter gets | ||
124 | + * allocated in pipeline_init() and we must | ||
125 | + * still drop our reference as failing to do so | ||
126 | + * would prevent the reference counter itself | ||
127 | + * from being freed. | ||
128 | + */ | ||
129 | + isc_quota_detach(&client->tcpquota); | ||
130 | + } else { | ||
131 | + /* | ||
132 | + * There are other client structures using this | ||
133 | + * TCP connection, so we cannot detach from the | ||
134 | + * TCP client quota to prevent excess TCP | ||
135 | + * connections from being accepted. However, | ||
136 | + * this client structure might later be reused | ||
137 | + * for accepting new connections and thus must | ||
138 | + * have its 'tcpquota' field set to NULL. | ||
139 | + */ | ||
140 | + client->tcpquota = NULL; | ||
141 | + } | ||
142 | + } | ||
143 | + | ||
144 | if (client->tcpsocket != NULL) { | ||
145 | CTRACE("closetcp"); | ||
146 | isc_socket_detach(&client->tcpsocket); | ||
147 | @@ -434,44 +537,6 @@ exit_check(ns_client_t *client) { | ||
148 | } | ||
149 | } | ||
150 | |||
151 | - if (client->tcpquota != NULL) { | ||
152 | - /* | ||
153 | - * If we are not in a pipeline group, or | ||
154 | - * we are the last client in the group, detach from | ||
155 | - * tcpquota; otherwise, transfer the quota to | ||
156 | - * another client in the same group. | ||
157 | - */ | ||
158 | - if (!ISC_LINK_LINKED(client, glink) || | ||
159 | - (client->glink.next == NULL && | ||
160 | - client->glink.prev == NULL)) | ||
161 | - { | ||
162 | - isc_quota_detach(&client->tcpquota); | ||
163 | - } else if (client->glink.next != NULL) { | ||
164 | - INSIST(client->glink.next->tcpquota == NULL); | ||
165 | - client->glink.next->tcpquota = client->tcpquota; | ||
166 | - client->tcpquota = NULL; | ||
167 | - } else { | ||
168 | - INSIST(client->glink.prev->tcpquota == NULL); | ||
169 | - client->glink.prev->tcpquota = client->tcpquota; | ||
170 | - client->tcpquota = NULL; | ||
171 | - } | ||
172 | - } | ||
173 | - | ||
174 | - /* | ||
175 | - * Unlink from pipeline group. | ||
176 | - */ | ||
177 | - if (ISC_LINK_LINKED(client, glink)) { | ||
178 | - if (client->glink.next != NULL) { | ||
179 | - client->glink.next->glink.prev = | ||
180 | - client->glink.prev; | ||
181 | - } | ||
182 | - if (client->glink.prev != NULL) { | ||
183 | - client->glink.prev->glink.next = | ||
184 | - client->glink.next; | ||
185 | - } | ||
186 | - ISC_LINK_INIT(client, glink); | ||
187 | - } | ||
188 | - | ||
189 | if (client->timerset) { | ||
190 | (void)isc_timer_reset(client->timer, | ||
191 | isc_timertype_inactive, | ||
192 | @@ -3130,6 +3195,7 @@ client_create(ns_clientmgr_t *manager, ns_client_t **clientp) { | ||
193 | dns_name_init(&client->signername, NULL); | ||
194 | client->mortal = false; | ||
195 | client->pipelined = false; | ||
196 | + client->pipeline_refs = NULL; | ||
197 | client->tcpquota = NULL; | ||
198 | client->recursionquota = NULL; | ||
199 | client->interface = NULL; | ||
200 | @@ -3154,7 +3220,6 @@ client_create(ns_clientmgr_t *manager, ns_client_t **clientp) { | ||
201 | client->formerrcache.id = 0; | ||
202 | ISC_LINK_INIT(client, link); | ||
203 | ISC_LINK_INIT(client, rlink); | ||
204 | - ISC_LINK_INIT(client, glink); | ||
205 | ISC_QLINK_INIT(client, ilink); | ||
206 | client->keytag = NULL; | ||
207 | client->keytag_len = 0; | ||
208 | @@ -3341,6 +3406,7 @@ client_newconn(isc_task_t *task, isc_event_t *event) { | ||
209 | !allowed(&netaddr, NULL, NULL, 0, NULL, | ||
210 | ns_g_server->keepresporder))) | ||
211 | { | ||
212 | + pipeline_init(client); | ||
213 | client->pipelined = true; | ||
214 | } | ||
215 | |||
216 | @@ -3800,35 +3866,16 @@ get_worker(ns_clientmgr_t *manager, ns_interface_t *ifp, isc_socket_t *sock, | ||
217 | ns_interface_attach(ifp, &client->interface); | ||
218 | client->newstate = client->state = NS_CLIENTSTATE_WORKING; | ||
219 | INSIST(client->recursionquota == NULL); | ||
220 | - | ||
221 | - /* | ||
222 | - * Transfer TCP quota to the new client. | ||
223 | - */ | ||
224 | - INSIST(client->tcpquota == NULL); | ||
225 | - INSIST(oldclient->tcpquota != NULL); | ||
226 | - client->tcpquota = oldclient->tcpquota; | ||
227 | - oldclient->tcpquota = NULL; | ||
228 | - | ||
229 | - /* | ||
230 | - * Link to a pipeline group, creating it if needed. | ||
231 | - */ | ||
232 | - if (!ISC_LINK_LINKED(oldclient, glink)) { | ||
233 | - oldclient->glink.next = NULL; | ||
234 | - oldclient->glink.prev = NULL; | ||
235 | - } | ||
236 | - client->glink.next = oldclient->glink.next; | ||
237 | - client->glink.prev = oldclient; | ||
238 | - if (oldclient->glink.next != NULL) { | ||
239 | - oldclient->glink.next->glink.prev = client; | ||
240 | - } | ||
241 | - oldclient->glink.next = client; | ||
242 | + client->tcpquota = &client->sctx->tcpquota; | ||
243 | |||
244 | client->dscp = ifp->dscp; | ||
245 | |||
246 | client->attributes |= NS_CLIENTATTR_TCP; | ||
247 | - client->pipelined = true; | ||
248 | client->mortal = true; | ||
249 | |||
250 | + pipeline_attach(oldclient, client); | ||
251 | + client->pipelined = true; | ||
252 | + | ||
253 | isc_socket_attach(ifp->tcpsocket, &client->tcplistener); | ||
254 | isc_socket_attach(sock, &client->tcpsocket); | ||
255 | isc_socket_setname(client->tcpsocket, "worker-tcp", NULL); | ||
256 | diff --git a/bin/named/include/named/client.h b/bin/named/include/named/client.h | ||
257 | index 1f7973f9c5..aeed9ccdda 100644 | ||
258 | --- a/bin/named/include/named/client.h | ||
259 | +++ b/bin/named/include/named/client.h | ||
260 | @@ -134,6 +134,7 @@ struct ns_client { | ||
261 | dns_name_t *signer; /*%< NULL if not valid sig */ | ||
262 | bool mortal; /*%< Die after handling request */ | ||
263 | bool pipelined; /*%< TCP queries not in sequence */ | ||
264 | + isc_refcount_t *pipeline_refs; | ||
265 | isc_quota_t *tcpquota; | ||
266 | isc_quota_t *recursionquota; | ||
267 | ns_interface_t *interface; | ||
268 | @@ -167,7 +168,6 @@ struct ns_client { | ||
269 | |||
270 | ISC_LINK(ns_client_t) link; | ||
271 | ISC_LINK(ns_client_t) rlink; | ||
272 | - ISC_LINK(ns_client_t) glink; | ||
273 | ISC_QLINK(ns_client_t) ilink; | ||
274 | unsigned char cookie[8]; | ||
275 | uint32_t expire; | ||
276 | -- | ||
277 | 2.20.1 | ||
278 | |||
diff --git a/meta/recipes-connectivity/bind/bind/0004-better-tcpquota-accounting-and-client-mortality-chec.patch b/meta/recipes-connectivity/bind/bind/0004-better-tcpquota-accounting-and-client-mortality-chec.patch new file mode 100644 index 0000000000..034ab13303 --- /dev/null +++ b/meta/recipes-connectivity/bind/bind/0004-better-tcpquota-accounting-and-client-mortality-chec.patch | |||
@@ -0,0 +1,512 @@ | |||
1 | Backport patch to fix CVE-2018-5743. | ||
2 | |||
3 | Ref: | ||
4 | https://security-tracker.debian.org/tracker/CVE-2018-5743 | ||
5 | |||
6 | CVE: CVE-2018-5743 | ||
7 | Upstream-Status: Backport [https://gitlab.isc.org/isc-projects/bind9/commit/2ab8a08] | ||
8 | |||
9 | Signed-off-by: Kai Kang <kai.kang@windriver.com> | ||
10 | |||
11 | From 2ab8a085b3c666f28f1f9229bd6ecb59915b26c3 Mon Sep 17 00:00:00 2001 | ||
12 | From: Evan Hunt <each@isc.org> | ||
13 | Date: Fri, 5 Apr 2019 16:12:18 -0700 | ||
14 | Subject: [PATCH 4/6] better tcpquota accounting and client mortality checks | ||
15 | |||
16 | - ensure that tcpactive is cleaned up correctly when accept() fails. | ||
17 | - set 'client->tcpattached' when the client is attached to the tcpquota. | ||
18 | carry this value on to new clients sharing the same pipeline group. | ||
19 | don't call isc_quota_detach() on the tcpquota unless tcpattached is | ||
20 | set. this way clients that were allowed to accept TCP connections | ||
21 | despite being over quota (and therefore, were never attached to the | ||
22 | quota) will not inadvertently detach from it and mess up the | ||
23 | accounting. | ||
24 | - simplify the code for tcpquota disconnection by using a new function | ||
25 | tcpquota_disconnect(). | ||
26 | - before deciding whether to reject a new connection due to quota | ||
27 | exhaustion, check to see whether there are at least two active | ||
28 | clients. previously, this was "at least one", but that could be | ||
29 | insufficient if there was one other client in READING state (waiting | ||
30 | for messages on an open connection) but none in READY (listening | ||
31 | for new connections). | ||
32 | - before deciding whether a TCP client object can to go inactive, we | ||
33 | must ensure there are enough other clients to maintain service | ||
34 | afterward -- both accepting new connections and reading/processing new | ||
35 | queries. A TCP client can't shut down unless at least one | ||
36 | client is accepting new connections and (in the case of pipelined | ||
37 | clients) at least one additional client is waiting to read. | ||
38 | |||
39 | (cherry picked from commit c7394738b2445c16f728a88394864dd61baad900) | ||
40 | (cherry picked from commit e965d5f11d3d0f6d59704e614fceca2093cb1856) | ||
41 | (cherry picked from commit 87d431161450777ea093821212abfb52d51b36e3) | ||
42 | --- | ||
43 | bin/named/client.c | 244 +++++++++++++++++++------------ | ||
44 | bin/named/include/named/client.h | 3 +- | ||
45 | 2 files changed, 152 insertions(+), 95 deletions(-) | ||
46 | |||
47 | diff --git a/bin/named/client.c b/bin/named/client.c | ||
48 | index 277656cef0..61e96dd28c 100644 | ||
49 | --- a/bin/named/client.c | ||
50 | +++ b/bin/named/client.c | ||
51 | @@ -244,13 +244,14 @@ static void client_start(isc_task_t *task, isc_event_t *event); | ||
52 | static void client_request(isc_task_t *task, isc_event_t *event); | ||
53 | static void ns_client_dumpmessage(ns_client_t *client, const char *reason); | ||
54 | static isc_result_t get_client(ns_clientmgr_t *manager, ns_interface_t *ifp, | ||
55 | - dns_dispatch_t *disp, bool tcp); | ||
56 | + dns_dispatch_t *disp, ns_client_t *oldclient, | ||
57 | + bool tcp); | ||
58 | static isc_result_t get_worker(ns_clientmgr_t *manager, ns_interface_t *ifp, | ||
59 | isc_socket_t *sock, ns_client_t *oldclient); | ||
60 | static inline bool | ||
61 | allowed(isc_netaddr_t *addr, dns_name_t *signer, | ||
62 | isc_netaddr_t *ecs_addr, uint8_t ecs_addrlen, | ||
63 | - uint8_t *ecs_scope, dns_acl_t *acl) | ||
64 | + uint8_t *ecs_scope, dns_acl_t *acl); | ||
65 | static void compute_cookie(ns_client_t *client, uint32_t when, | ||
66 | uint32_t nonce, const unsigned char *secret, | ||
67 | isc_buffer_t *buf); | ||
68 | @@ -319,7 +320,7 @@ pipeline_init(ns_client_t *client) { | ||
69 | * contention here is expected to be negligible, given that this code | ||
70 | * is only executed for TCP connections. | ||
71 | */ | ||
72 | - refs = isc_mem_allocate(client->sctx->mctx, sizeof(*refs)); | ||
73 | + refs = isc_mem_allocate(ns_g_mctx, sizeof(*refs)); | ||
74 | isc_refcount_init(refs, 1); | ||
75 | client->pipeline_refs = refs; | ||
76 | } | ||
77 | @@ -331,13 +332,13 @@ pipeline_init(ns_client_t *client) { | ||
78 | */ | ||
79 | static void | ||
80 | pipeline_attach(ns_client_t *source, ns_client_t *target) { | ||
81 | - int old_refs; | ||
82 | + int refs; | ||
83 | |||
84 | REQUIRE(source->pipeline_refs != NULL); | ||
85 | REQUIRE(target->pipeline_refs == NULL); | ||
86 | |||
87 | - old_refs = isc_refcount_increment(source->pipeline_refs); | ||
88 | - INSIST(old_refs > 0); | ||
89 | + isc_refcount_increment(source->pipeline_refs, &refs); | ||
90 | + INSIST(refs > 1); | ||
91 | target->pipeline_refs = source->pipeline_refs; | ||
92 | } | ||
93 | |||
94 | @@ -349,25 +350,51 @@ pipeline_attach(ns_client_t *source, ns_client_t *target) { | ||
95 | */ | ||
96 | static bool | ||
97 | pipeline_detach(ns_client_t *client) { | ||
98 | - isc_refcount_t *refs; | ||
99 | - int old_refs; | ||
100 | + isc_refcount_t *refcount; | ||
101 | + int refs; | ||
102 | |||
103 | REQUIRE(client->pipeline_refs != NULL); | ||
104 | |||
105 | - refs = client->pipeline_refs; | ||
106 | + refcount = client->pipeline_refs; | ||
107 | client->pipeline_refs = NULL; | ||
108 | |||
109 | - old_refs = isc_refcount_decrement(refs); | ||
110 | - INSIST(old_refs > 0); | ||
111 | + isc_refcount_decrement(refcount, refs); | ||
112 | |||
113 | - if (old_refs == 1) { | ||
114 | - isc_mem_free(client->sctx->mctx, refs); | ||
115 | + if (refs == 0) { | ||
116 | + isc_mem_free(ns_g_mctx, refs); | ||
117 | return (true); | ||
118 | } | ||
119 | |||
120 | return (false); | ||
121 | } | ||
122 | |||
123 | +/* | ||
124 | + * Detach a client from the TCP client quota if appropriate, and set | ||
125 | + * the quota pointer to NULL. | ||
126 | + * | ||
127 | + * Sometimes when the TCP client quota is exhausted but there are no other | ||
128 | + * clients servicing the interface, a client will be allowed to continue | ||
129 | + * running despite not having been attached to the quota. In this event, | ||
130 | + * the TCP quota was never attached to the client, so when the client (or | ||
131 | + * associated pipeline group) shuts down, the quota must NOT be detached. | ||
132 | + * | ||
133 | + * Otherwise, if the quota pointer is set, it should be detached. If not | ||
134 | + * set at all, we just return without doing anything. | ||
135 | + */ | ||
136 | +static void | ||
137 | +tcpquota_disconnect(ns_client_t *client) { | ||
138 | + if (client->tcpquota == NULL) { | ||
139 | + return; | ||
140 | + } | ||
141 | + | ||
142 | + if (client->tcpattached) { | ||
143 | + isc_quota_detach(&client->tcpquota); | ||
144 | + client->tcpattached = false; | ||
145 | + } else { | ||
146 | + client->tcpquota = NULL; | ||
147 | + } | ||
148 | +} | ||
149 | + | ||
150 | /*% | ||
151 | * Check for a deactivation or shutdown request and take appropriate | ||
152 | * action. Returns true if either is in progress; in this case | ||
153 | @@ -490,38 +517,31 @@ exit_check(ns_client_t *client) { | ||
154 | client->tcpmsg_valid = false; | ||
155 | } | ||
156 | |||
157 | - if (client->tcpquota != NULL) { | ||
158 | - if (client->pipeline_refs == NULL || | ||
159 | - pipeline_detach(client)) | ||
160 | - { | ||
161 | - /* | ||
162 | - * Only detach from the TCP client quota if | ||
163 | - * there are no more client structures using | ||
164 | - * this TCP connection. | ||
165 | - * | ||
166 | - * Note that we check 'pipeline_refs' and not | ||
167 | - * 'pipelined' because in some cases (e.g. | ||
168 | - * after receiving a request with an opcode | ||
169 | - * different than QUERY) 'pipelined' is set to | ||
170 | - * false after the reference counter gets | ||
171 | - * allocated in pipeline_init() and we must | ||
172 | - * still drop our reference as failing to do so | ||
173 | - * would prevent the reference counter itself | ||
174 | - * from being freed. | ||
175 | - */ | ||
176 | - isc_quota_detach(&client->tcpquota); | ||
177 | - } else { | ||
178 | - /* | ||
179 | - * There are other client structures using this | ||
180 | - * TCP connection, so we cannot detach from the | ||
181 | - * TCP client quota to prevent excess TCP | ||
182 | - * connections from being accepted. However, | ||
183 | - * this client structure might later be reused | ||
184 | - * for accepting new connections and thus must | ||
185 | - * have its 'tcpquota' field set to NULL. | ||
186 | - */ | ||
187 | - client->tcpquota = NULL; | ||
188 | - } | ||
189 | + /* | ||
190 | + * Detach from pipeline group and from TCP client quota, | ||
191 | + * if appropriate. | ||
192 | + * | ||
193 | + * - If no pipeline group is active, attempt to | ||
194 | + * detach from the TCP client quota. | ||
195 | + * | ||
196 | + * - If a pipeline group is active, detach from it; | ||
197 | + * if the return code indicates that there no more | ||
198 | + * clients left if this pipeline group, we also detach | ||
199 | + * from the TCP client quota. | ||
200 | + * | ||
201 | + * - Otherwise we don't try to detach, we just set the | ||
202 | + * TCP quota pointer to NULL if it wasn't NULL already. | ||
203 | + * | ||
204 | + * tcpquota_disconnect() will set tcpquota to NULL, either | ||
205 | + * by detaching it or by assignment, depending on the | ||
206 | + * needs of the client. See the comments on that function | ||
207 | + * for further information. | ||
208 | + */ | ||
209 | + if (client->pipeline_refs == NULL || pipeline_detach(client)) { | ||
210 | + tcpquota_disconnect(client); | ||
211 | + } else { | ||
212 | + client->tcpquota = NULL; | ||
213 | + client->tcpattached = false; | ||
214 | } | ||
215 | |||
216 | if (client->tcpsocket != NULL) { | ||
217 | @@ -544,8 +564,6 @@ exit_check(ns_client_t *client) { | ||
218 | client->timerset = false; | ||
219 | } | ||
220 | |||
221 | - client->pipelined = false; | ||
222 | - | ||
223 | client->peeraddr_valid = false; | ||
224 | |||
225 | client->state = NS_CLIENTSTATE_READY; | ||
226 | @@ -558,18 +576,27 @@ exit_check(ns_client_t *client) { | ||
227 | * active and force it to go inactive if not. | ||
228 | * | ||
229 | * UDP clients go inactive at this point, but a TCP client | ||
230 | - * will needs to remain active if no other clients are | ||
231 | - * listening for TCP requests on this interface, to | ||
232 | - * prevent this interface from going nonresponsive. | ||
233 | + * may need to remain active and go into ready state if | ||
234 | + * no other clients are available to listen for TCP | ||
235 | + * requests on this interface or (in the case of pipelined | ||
236 | + * clients) to read for additional messages on the current | ||
237 | + * connection. | ||
238 | */ | ||
239 | if (client->mortal && TCP_CLIENT(client) && !ns_g_clienttest) { | ||
240 | LOCK(&client->interface->lock); | ||
241 | - if (client->interface->ntcpaccepting == 0) { | ||
242 | + if ((client->interface->ntcpaccepting == 0 || | ||
243 | + (client->pipelined && | ||
244 | + client->interface->ntcpactive < 2)) && | ||
245 | + client->newstate != NS_CLIENTSTATE_FREED) | ||
246 | + { | ||
247 | client->mortal = false; | ||
248 | + client->newstate = NS_CLIENTSTATE_READY; | ||
249 | } | ||
250 | UNLOCK(&client->interface->lock); | ||
251 | } | ||
252 | |||
253 | + client->pipelined = false; | ||
254 | + | ||
255 | /* | ||
256 | * We don't need the client; send it to the inactive | ||
257 | * queue for recycling. | ||
258 | @@ -2634,6 +2661,18 @@ client_request(isc_task_t *task, isc_event_t *event) { | ||
259 | client->pipelined = false; | ||
260 | } | ||
261 | if (TCP_CLIENT(client) && client->pipelined) { | ||
262 | + /* | ||
263 | + * We're pipelining. Replace the client; the | ||
264 | + * the replacement can read the TCP socket looking | ||
265 | + * for new messages and this client can process the | ||
266 | + * current message asynchronously. | ||
267 | + * | ||
268 | + * There are now at least three clients using this | ||
269 | + * TCP socket - one accepting new connections, | ||
270 | + * one reading an existing connection to get new | ||
271 | + * messages, and one answering the message already | ||
272 | + * received. | ||
273 | + */ | ||
274 | result = ns_client_replace(client); | ||
275 | if (result != ISC_R_SUCCESS) { | ||
276 | client->pipelined = false; | ||
277 | @@ -3197,6 +3236,7 @@ client_create(ns_clientmgr_t *manager, ns_client_t **clientp) { | ||
278 | client->pipelined = false; | ||
279 | client->pipeline_refs = NULL; | ||
280 | client->tcpquota = NULL; | ||
281 | + client->tcpattached = false; | ||
282 | client->recursionquota = NULL; | ||
283 | client->interface = NULL; | ||
284 | client->peeraddr_valid = false; | ||
285 | @@ -3359,9 +3399,7 @@ client_newconn(isc_task_t *task, isc_event_t *event) { | ||
286 | NS_LOGMODULE_CLIENT, ISC_LOG_DEBUG(3), | ||
287 | "accept failed: %s", | ||
288 | isc_result_totext(nevent->result)); | ||
289 | - if (client->tcpquota != NULL) { | ||
290 | - isc_quota_detach(&client->tcpquota); | ||
291 | - } | ||
292 | + tcpquota_disconnect(client); | ||
293 | } | ||
294 | |||
295 | if (exit_check(client)) | ||
296 | @@ -3402,7 +3440,7 @@ client_newconn(isc_task_t *task, isc_event_t *event) { | ||
297 | client->pipelined = false; | ||
298 | result = ns_client_replace(client); | ||
299 | if (result == ISC_R_SUCCESS && | ||
300 | - (client->sctx->keepresporder == NULL || | ||
301 | + (ns_g_server->keepresporder == NULL || | ||
302 | !allowed(&netaddr, NULL, NULL, 0, NULL, | ||
303 | ns_g_server->keepresporder))) | ||
304 | { | ||
305 | @@ -3429,7 +3467,7 @@ client_accept(ns_client_t *client) { | ||
306 | * in named.conf. If we can't attach to it here, that means the TCP | ||
307 | * client quota has been exceeded. | ||
308 | */ | ||
309 | - result = isc_quota_attach(&client->sctx->tcpquota, | ||
310 | + result = isc_quota_attach(&ns_g_server->tcpquota, | ||
311 | &client->tcpquota); | ||
312 | if (result != ISC_R_SUCCESS) { | ||
313 | bool exit; | ||
314 | @@ -3447,27 +3485,27 @@ client_accept(ns_client_t *client) { | ||
315 | * interface to be starved, with no clients able | ||
316 | * to accept new connections. | ||
317 | * | ||
318 | - * So, we check here to see if any other client | ||
319 | - * is already servicing TCP queries on this | ||
320 | + * So, we check here to see if any other clients | ||
321 | + * are already servicing TCP queries on this | ||
322 | * interface (whether accepting, reading, or | ||
323 | - * processing). | ||
324 | - * | ||
325 | - * If so, then it's okay *not* to call | ||
326 | - * accept - we can let this client to go inactive | ||
327 | - * and the other one handle the next connection | ||
328 | - * when it's ready. | ||
329 | + * processing). If there are at least two | ||
330 | + * (one reading and one processing a request) | ||
331 | + * then it's okay *not* to call accept - we | ||
332 | + * can let this client go inactive and another | ||
333 | + * one will resume accepting when it's done. | ||
334 | * | ||
335 | - * But if not, then we need to be a little bit | ||
336 | - * flexible about the quota. We allow *one* extra | ||
337 | - * TCP client through, to ensure we're listening on | ||
338 | - * every interface. | ||
339 | + * If there aren't enough active clients on the | ||
340 | + * interface, then we can be a little bit | ||
341 | + * flexible about the quota. We'll allow *one* | ||
342 | + * extra client through to ensure we're listening | ||
343 | + * on every interface. | ||
344 | * | ||
345 | - * (Note: In practice this means that the *real* | ||
346 | - * TCP client quota is tcp-clients plus the number | ||
347 | - * of interfaces.) | ||
348 | + * (Note: In practice this means that the real | ||
349 | + * TCP client quota is tcp-clients plus the | ||
350 | + * number of listening interfaces plus 2.) | ||
351 | */ | ||
352 | LOCK(&client->interface->lock); | ||
353 | - exit = (client->interface->ntcpactive > 0); | ||
354 | + exit = (client->interface->ntcpactive > 1); | ||
355 | UNLOCK(&client->interface->lock); | ||
356 | |||
357 | if (exit) { | ||
358 | @@ -3475,6 +3513,9 @@ client_accept(ns_client_t *client) { | ||
359 | (void)exit_check(client); | ||
360 | return; | ||
361 | } | ||
362 | + | ||
363 | + } else { | ||
364 | + client->tcpattached = true; | ||
365 | } | ||
366 | |||
367 | /* | ||
368 | @@ -3507,9 +3548,16 @@ client_accept(ns_client_t *client) { | ||
369 | UNEXPECTED_ERROR(__FILE__, __LINE__, | ||
370 | "isc_socket_accept() failed: %s", | ||
371 | isc_result_totext(result)); | ||
372 | - if (client->tcpquota != NULL) { | ||
373 | - isc_quota_detach(&client->tcpquota); | ||
374 | + | ||
375 | + tcpquota_disconnect(client); | ||
376 | + | ||
377 | + if (client->tcpactive) { | ||
378 | + LOCK(&client->interface->lock); | ||
379 | + client->interface->ntcpactive--; | ||
380 | + UNLOCK(&client->interface->lock); | ||
381 | + client->tcpactive = false; | ||
382 | } | ||
383 | + | ||
384 | return; | ||
385 | } | ||
386 | |||
387 | @@ -3527,13 +3575,12 @@ client_accept(ns_client_t *client) { | ||
388 | * once the connection is established. | ||
389 | * | ||
390 | * When the client object is shutting down after handling a TCP | ||
391 | - * request (see exit_check()), it looks to see whether this value is | ||
392 | - * non-zero. If so, that means another client has already called | ||
393 | - * accept() and is waiting to establish the next connection, which | ||
394 | - * means the first client is free to go inactive. Otherwise, | ||
395 | - * the first client must come back and call accept() again; this | ||
396 | - * guarantees there will always be at least one client listening | ||
397 | - * for new TCP connections on each interface. | ||
398 | + * request (see exit_check()), if this value is at least one, that | ||
399 | + * means another client has called accept() and is waiting to | ||
400 | + * establish the next connection. That means the client may be | ||
401 | + * be free to become inactive; otherwise it may need to start | ||
402 | + * listening for connections itself to prevent the interface | ||
403 | + * going dead. | ||
404 | */ | ||
405 | LOCK(&client->interface->lock); | ||
406 | client->interface->ntcpaccepting++; | ||
407 | @@ -3613,19 +3660,19 @@ ns_client_replace(ns_client_t *client) { | ||
408 | client->tcpsocket, client); | ||
409 | } else { | ||
410 | result = get_client(client->manager, client->interface, | ||
411 | - client->dispatch, tcp); | ||
412 | + client->dispatch, client, tcp); | ||
413 | + | ||
414 | + /* | ||
415 | + * The responsibility for listening for new requests is hereby | ||
416 | + * transferred to the new client. Therefore, the old client | ||
417 | + * should refrain from listening for any more requests. | ||
418 | + */ | ||
419 | + client->mortal = true; | ||
420 | } | ||
421 | if (result != ISC_R_SUCCESS) { | ||
422 | return (result); | ||
423 | } | ||
424 | |||
425 | - /* | ||
426 | - * The responsibility for listening for new requests is hereby | ||
427 | - * transferred to the new client. Therefore, the old client | ||
428 | - * should refrain from listening for any more requests. | ||
429 | - */ | ||
430 | - client->mortal = true; | ||
431 | - | ||
432 | return (ISC_R_SUCCESS); | ||
433 | } | ||
434 | |||
435 | @@ -3759,7 +3806,7 @@ ns_clientmgr_destroy(ns_clientmgr_t **managerp) { | ||
436 | |||
437 | static isc_result_t | ||
438 | get_client(ns_clientmgr_t *manager, ns_interface_t *ifp, | ||
439 | - dns_dispatch_t *disp, bool tcp) | ||
440 | + dns_dispatch_t *disp, ns_client_t *oldclient, bool tcp) | ||
441 | { | ||
442 | isc_result_t result = ISC_R_SUCCESS; | ||
443 | isc_event_t *ev; | ||
444 | @@ -3803,6 +3850,16 @@ get_client(ns_clientmgr_t *manager, ns_interface_t *ifp, | ||
445 | client->dscp = ifp->dscp; | ||
446 | |||
447 | if (tcp) { | ||
448 | + client->tcpattached = false; | ||
449 | + if (oldclient != NULL) { | ||
450 | + client->tcpattached = oldclient->tcpattached; | ||
451 | + } | ||
452 | + | ||
453 | + LOCK(&client->interface->lock); | ||
454 | + client->interface->ntcpactive++; | ||
455 | + UNLOCK(&client->interface->lock); | ||
456 | + client->tcpactive = true; | ||
457 | + | ||
458 | client->attributes |= NS_CLIENTATTR_TCP; | ||
459 | isc_socket_attach(ifp->tcpsocket, | ||
460 | &client->tcplistener); | ||
461 | @@ -3866,7 +3923,8 @@ get_worker(ns_clientmgr_t *manager, ns_interface_t *ifp, isc_socket_t *sock, | ||
462 | ns_interface_attach(ifp, &client->interface); | ||
463 | client->newstate = client->state = NS_CLIENTSTATE_WORKING; | ||
464 | INSIST(client->recursionquota == NULL); | ||
465 | - client->tcpquota = &client->sctx->tcpquota; | ||
466 | + client->tcpquota = &ns_g_server->tcpquota; | ||
467 | + client->tcpattached = oldclient->tcpattached; | ||
468 | |||
469 | client->dscp = ifp->dscp; | ||
470 | |||
471 | @@ -3885,7 +3943,6 @@ get_worker(ns_clientmgr_t *manager, ns_interface_t *ifp, isc_socket_t *sock, | ||
472 | LOCK(&client->interface->lock); | ||
473 | client->interface->ntcpactive++; | ||
474 | UNLOCK(&client->interface->lock); | ||
475 | - | ||
476 | client->tcpactive = true; | ||
477 | |||
478 | INSIST(client->tcpmsg_valid == false); | ||
479 | @@ -3913,7 +3970,8 @@ ns_clientmgr_createclients(ns_clientmgr_t *manager, unsigned int n, | ||
480 | MTRACE("createclients"); | ||
481 | |||
482 | for (disp = 0; disp < n; disp++) { | ||
483 | - result = get_client(manager, ifp, ifp->udpdispatch[disp], tcp); | ||
484 | + result = get_client(manager, ifp, ifp->udpdispatch[disp], | ||
485 | + NULL, tcp); | ||
486 | if (result != ISC_R_SUCCESS) | ||
487 | break; | ||
488 | } | ||
489 | diff --git a/bin/named/include/named/client.h b/bin/named/include/named/client.h | ||
490 | index aeed9ccdda..e2c40acd28 100644 | ||
491 | --- a/bin/named/include/named/client.h | ||
492 | +++ b/bin/named/include/named/client.h | ||
493 | @@ -9,8 +9,6 @@ | ||
494 | * information regarding copyright ownership. | ||
495 | */ | ||
496 | |||
497 | -/* $Id: client.h,v 1.96 2012/01/31 23:47:31 tbox Exp $ */ | ||
498 | - | ||
499 | #ifndef NAMED_CLIENT_H | ||
500 | #define NAMED_CLIENT_H 1 | ||
501 | |||
502 | @@ -136,6 +134,7 @@ struct ns_client { | ||
503 | bool pipelined; /*%< TCP queries not in sequence */ | ||
504 | isc_refcount_t *pipeline_refs; | ||
505 | isc_quota_t *tcpquota; | ||
506 | + bool tcpattached; | ||
507 | isc_quota_t *recursionquota; | ||
508 | ns_interface_t *interface; | ||
509 | |||
510 | -- | ||
511 | 2.20.1 | ||
512 | |||
diff --git a/meta/recipes-connectivity/bind/bind/0005-refactor-tcpquota-and-pipeline-refs-allow-special-ca.patch b/meta/recipes-connectivity/bind/bind/0005-refactor-tcpquota-and-pipeline-refs-allow-special-ca.patch new file mode 100644 index 0000000000..987e75bc0e --- /dev/null +++ b/meta/recipes-connectivity/bind/bind/0005-refactor-tcpquota-and-pipeline-refs-allow-special-ca.patch | |||
@@ -0,0 +1,911 @@ | |||
1 | Backport patch to fix CVE-2018-5743. | ||
2 | |||
3 | Ref: | ||
4 | https://security-tracker.debian.org/tracker/CVE-2018-5743 | ||
5 | |||
6 | CVE: CVE-2018-5743 | ||
7 | Upstream-Status: Backport [https://gitlab.isc.org/isc-projects/bind9/commit/c47ccf6] | ||
8 | |||
9 | Signed-off-by: Kai Kang <kai.kang@windriver.com> | ||
10 | |||
11 | From c47ccf630f147378568b33e8fdb7b754f228c346 Mon Sep 17 00:00:00 2001 | ||
12 | From: Evan Hunt <each@isc.org> | ||
13 | Date: Fri, 5 Apr 2019 16:26:05 -0700 | ||
14 | Subject: [PATCH 5/6] refactor tcpquota and pipeline refs; allow special-case | ||
15 | overrun in isc_quota | ||
16 | |||
17 | - if the TCP quota has been exceeded but there are no clients listening | ||
18 | for new connections on the interface, we can now force attachment to the | ||
19 | quota using isc_quota_force(), instead of carrying on with the quota not | ||
20 | attached. | ||
21 | - the TCP client quota is now referenced via a reference-counted | ||
22 | 'ns_tcpconn' object, one of which is created whenever a client begins | ||
23 | listening for new connections, and attached to by members of that | ||
24 | client's pipeline group. when the last reference to the tcpconn | ||
25 | object is detached, it is freed and the TCP quota slot is released. | ||
26 | - reduce code duplication by adding mark_tcp_active() function. | ||
27 | - convert counters to atomic. | ||
28 | |||
29 | (cherry picked from commit 7e8222378ca24f1302a0c1c638565050ab04681b) | ||
30 | (cherry picked from commit 4939451275722bfda490ea86ca13e84f6bc71e46) | ||
31 | (cherry picked from commit 13f7c918b8720d890408f678bd73c20e634539d9) | ||
32 | --- | ||
33 | bin/named/client.c | 444 +++++++++++-------------- | ||
34 | bin/named/include/named/client.h | 12 +- | ||
35 | bin/named/include/named/interfacemgr.h | 6 +- | ||
36 | bin/named/interfacemgr.c | 1 + | ||
37 | lib/isc/include/isc/quota.h | 7 + | ||
38 | lib/isc/quota.c | 33 +- | ||
39 | lib/isc/win32/libisc.def.in | 1 + | ||
40 | 7 files changed, 236 insertions(+), 268 deletions(-) | ||
41 | |||
42 | diff --git a/bin/named/client.c b/bin/named/client.c | ||
43 | index 61e96dd28c..d826ab32bf 100644 | ||
44 | --- a/bin/named/client.c | ||
45 | +++ b/bin/named/client.c | ||
46 | @@ -244,8 +244,7 @@ static void client_start(isc_task_t *task, isc_event_t *event); | ||
47 | static void client_request(isc_task_t *task, isc_event_t *event); | ||
48 | static void ns_client_dumpmessage(ns_client_t *client, const char *reason); | ||
49 | static isc_result_t get_client(ns_clientmgr_t *manager, ns_interface_t *ifp, | ||
50 | - dns_dispatch_t *disp, ns_client_t *oldclient, | ||
51 | - bool tcp); | ||
52 | + dns_dispatch_t *disp, bool tcp); | ||
53 | static isc_result_t get_worker(ns_clientmgr_t *manager, ns_interface_t *ifp, | ||
54 | isc_socket_t *sock, ns_client_t *oldclient); | ||
55 | static inline bool | ||
56 | @@ -301,16 +300,32 @@ ns_client_settimeout(ns_client_t *client, unsigned int seconds) { | ||
57 | } | ||
58 | |||
59 | /*% | ||
60 | - * Allocate a reference counter that will track the number of client structures | ||
61 | - * using the TCP connection that 'client' called accept() for. This counter | ||
62 | - * will be shared between all client structures associated with this TCP | ||
63 | - * connection. | ||
64 | + * Allocate a reference-counted object that will maintain a single pointer to | ||
65 | + * the (also reference-counted) TCP client quota, shared between all the | ||
66 | + * clients processing queries on a single TCP connection, so that all | ||
67 | + * clients sharing the one socket will together consume only one slot in | ||
68 | + * the 'tcp-clients' quota. | ||
69 | */ | ||
70 | -static void | ||
71 | -pipeline_init(ns_client_t *client) { | ||
72 | - isc_refcount_t *refs; | ||
73 | +static isc_result_t | ||
74 | +tcpconn_init(ns_client_t *client, bool force) { | ||
75 | + isc_result_t result; | ||
76 | + isc_quota_t *quota = NULL; | ||
77 | + ns_tcpconn_t *tconn = NULL; | ||
78 | |||
79 | - REQUIRE(client->pipeline_refs == NULL); | ||
80 | + REQUIRE(client->tcpconn == NULL); | ||
81 | + | ||
82 | + /* | ||
83 | + * Try to attach to the quota first, so we won't pointlessly | ||
84 | + * allocate memory for a tcpconn object if we can't get one. | ||
85 | + */ | ||
86 | + if (force) { | ||
87 | + result = isc_quota_force(&ns_g_server->tcpquota, "a); | ||
88 | + } else { | ||
89 | + result = isc_quota_attach(&ns_g_server->tcpquota, "a); | ||
90 | + } | ||
91 | + if (result != ISC_R_SUCCESS) { | ||
92 | + return (result); | ||
93 | + } | ||
94 | |||
95 | /* | ||
96 | * A global memory context is used for the allocation as different | ||
97 | @@ -320,78 +335,80 @@ pipeline_init(ns_client_t *client) { | ||
98 | * contention here is expected to be negligible, given that this code | ||
99 | * is only executed for TCP connections. | ||
100 | */ | ||
101 | - refs = isc_mem_allocate(ns_g_mctx, sizeof(*refs)); | ||
102 | - isc_refcount_init(refs, 1); | ||
103 | - client->pipeline_refs = refs; | ||
104 | + tconn = isc_mem_allocate(ns_g_mctx, sizeof(*tconn)); | ||
105 | + | ||
106 | + isc_refcount_init(&tconn->refs, 1); | ||
107 | + tconn->tcpquota = quota; | ||
108 | + quota = NULL; | ||
109 | + tconn->pipelined = false; | ||
110 | + | ||
111 | + client->tcpconn = tconn; | ||
112 | + | ||
113 | + return (ISC_R_SUCCESS); | ||
114 | } | ||
115 | |||
116 | /*% | ||
117 | - * Increase the count of client structures using the TCP connection that | ||
118 | - * 'source' is associated with and put a pointer to that count in 'target', | ||
119 | - * thus associating it with the same TCP connection. | ||
120 | + * Increase the count of client structures sharing the TCP connection | ||
121 | + * that 'source' is associated with; add a pointer to the same tcpconn | ||
122 | + * to 'target', thus associating it with the same TCP connection. | ||
123 | */ | ||
124 | static void | ||
125 | -pipeline_attach(ns_client_t *source, ns_client_t *target) { | ||
126 | +tcpconn_attach(ns_client_t *source, ns_client_t *target) { | ||
127 | int refs; | ||
128 | |||
129 | - REQUIRE(source->pipeline_refs != NULL); | ||
130 | - REQUIRE(target->pipeline_refs == NULL); | ||
131 | + REQUIRE(source->tcpconn != NULL); | ||
132 | + REQUIRE(target->tcpconn == NULL); | ||
133 | + REQUIRE(source->tcpconn->pipelined); | ||
134 | |||
135 | - isc_refcount_increment(source->pipeline_refs, &refs); | ||
136 | + isc_refcount_increment(&source->tcpconn->refs, &refs); | ||
137 | INSIST(refs > 1); | ||
138 | - target->pipeline_refs = source->pipeline_refs; | ||
139 | + target->tcpconn = source->tcpconn; | ||
140 | } | ||
141 | |||
142 | /*% | ||
143 | - * Decrease the count of client structures using the TCP connection that | ||
144 | + * Decrease the count of client structures sharing the TCP connection that | ||
145 | * 'client' is associated with. If this is the last client using this TCP | ||
146 | - * connection, free the reference counter and return true; otherwise, return | ||
147 | - * false. | ||
148 | + * connection, we detach from the TCP quota and free the tcpconn | ||
149 | + * object. Either way, client->tcpconn is set to NULL. | ||
150 | */ | ||
151 | -static bool | ||
152 | -pipeline_detach(ns_client_t *client) { | ||
153 | - isc_refcount_t *refcount; | ||
154 | +static void | ||
155 | +tcpconn_detach(ns_client_t *client) { | ||
156 | + ns_tcpconn_t *tconn = NULL; | ||
157 | int refs; | ||
158 | |||
159 | - REQUIRE(client->pipeline_refs != NULL); | ||
160 | - | ||
161 | - refcount = client->pipeline_refs; | ||
162 | - client->pipeline_refs = NULL; | ||
163 | + REQUIRE(client->tcpconn != NULL); | ||
164 | |||
165 | - isc_refcount_decrement(refcount, refs); | ||
166 | + tconn = client->tcpconn; | ||
167 | + client->tcpconn = NULL; | ||
168 | |||
169 | + isc_refcount_decrement(&tconn->refs, &refs); | ||
170 | if (refs == 0) { | ||
171 | - isc_mem_free(ns_g_mctx, refs); | ||
172 | - return (true); | ||
173 | + isc_quota_detach(&tconn->tcpquota); | ||
174 | + isc_mem_free(ns_g_mctx, tconn); | ||
175 | } | ||
176 | - | ||
177 | - return (false); | ||
178 | } | ||
179 | |||
180 | -/* | ||
181 | - * Detach a client from the TCP client quota if appropriate, and set | ||
182 | - * the quota pointer to NULL. | ||
183 | - * | ||
184 | - * Sometimes when the TCP client quota is exhausted but there are no other | ||
185 | - * clients servicing the interface, a client will be allowed to continue | ||
186 | - * running despite not having been attached to the quota. In this event, | ||
187 | - * the TCP quota was never attached to the client, so when the client (or | ||
188 | - * associated pipeline group) shuts down, the quota must NOT be detached. | ||
189 | +/*% | ||
190 | + * Mark a client as active and increment the interface's 'ntcpactive' | ||
191 | + * counter, as a signal that there is at least one client servicing | ||
192 | + * TCP queries for the interface. If we reach the TCP client quota at | ||
193 | + * some point, this will be used to determine whether a quota overrun | ||
194 | + * should be permitted. | ||
195 | * | ||
196 | - * Otherwise, if the quota pointer is set, it should be detached. If not | ||
197 | - * set at all, we just return without doing anything. | ||
198 | + * Marking the client active with the 'tcpactive' flag ensures proper | ||
199 | + * accounting, by preventing us from incrementing or decrementing | ||
200 | + * 'ntcpactive' more than once per client. | ||
201 | */ | ||
202 | static void | ||
203 | -tcpquota_disconnect(ns_client_t *client) { | ||
204 | - if (client->tcpquota == NULL) { | ||
205 | - return; | ||
206 | - } | ||
207 | - | ||
208 | - if (client->tcpattached) { | ||
209 | - isc_quota_detach(&client->tcpquota); | ||
210 | - client->tcpattached = false; | ||
211 | - } else { | ||
212 | - client->tcpquota = NULL; | ||
213 | +mark_tcp_active(ns_client_t *client, bool active) { | ||
214 | + if (active && !client->tcpactive) { | ||
215 | + isc_atomic_xadd(&client->interface->ntcpactive, 1); | ||
216 | + client->tcpactive = active; | ||
217 | + } else if (!active && client->tcpactive) { | ||
218 | + uint32_t old = | ||
219 | + isc_atomic_xadd(&client->interface->ntcpactive, -1); | ||
220 | + INSIST(old > 0); | ||
221 | + client->tcpactive = active; | ||
222 | } | ||
223 | } | ||
224 | |||
225 | @@ -484,7 +501,8 @@ exit_check(ns_client_t *client) { | ||
226 | INSIST(client->recursionquota == NULL); | ||
227 | |||
228 | if (NS_CLIENTSTATE_READING == client->newstate) { | ||
229 | - if (!client->pipelined) { | ||
230 | + INSIST(client->tcpconn != NULL); | ||
231 | + if (!client->tcpconn->pipelined) { | ||
232 | client_read(client); | ||
233 | client->newstate = NS_CLIENTSTATE_MAX; | ||
234 | return (true); /* We're done. */ | ||
235 | @@ -507,8 +525,8 @@ exit_check(ns_client_t *client) { | ||
236 | dns_tcpmsg_cancelread(&client->tcpmsg); | ||
237 | } | ||
238 | |||
239 | - if (client->nreads != 0) { | ||
240 | - /* Still waiting for read cancel completion. */ | ||
241 | + /* Still waiting for read cancel completion. */ | ||
242 | + if (client->nreads > 0) { | ||
243 | return (true); | ||
244 | } | ||
245 | |||
246 | @@ -518,43 +536,45 @@ exit_check(ns_client_t *client) { | ||
247 | } | ||
248 | |||
249 | /* | ||
250 | - * Detach from pipeline group and from TCP client quota, | ||
251 | - * if appropriate. | ||
252 | + * Soon the client will be ready to accept a new TCP | ||
253 | + * connection or UDP request, but we may have enough | ||
254 | + * clients doing that already. Check whether this client | ||
255 | + * needs to remain active and allow it go inactive if | ||
256 | + * not. | ||
257 | * | ||
258 | - * - If no pipeline group is active, attempt to | ||
259 | - * detach from the TCP client quota. | ||
260 | + * UDP clients always go inactive at this point, but a TCP | ||
261 | + * client may need to stay active and return to READY | ||
262 | + * state if no other clients are available to listen | ||
263 | + * for TCP requests on this interface. | ||
264 | * | ||
265 | - * - If a pipeline group is active, detach from it; | ||
266 | - * if the return code indicates that there no more | ||
267 | - * clients left if this pipeline group, we also detach | ||
268 | - * from the TCP client quota. | ||
269 | - * | ||
270 | - * - Otherwise we don't try to detach, we just set the | ||
271 | - * TCP quota pointer to NULL if it wasn't NULL already. | ||
272 | - * | ||
273 | - * tcpquota_disconnect() will set tcpquota to NULL, either | ||
274 | - * by detaching it or by assignment, depending on the | ||
275 | - * needs of the client. See the comments on that function | ||
276 | - * for further information. | ||
277 | + * Regardless, if we're going to FREED state, that means | ||
278 | + * the system is shutting down and we don't need to | ||
279 | + * retain clients. | ||
280 | */ | ||
281 | - if (client->pipeline_refs == NULL || pipeline_detach(client)) { | ||
282 | - tcpquota_disconnect(client); | ||
283 | - } else { | ||
284 | - client->tcpquota = NULL; | ||
285 | - client->tcpattached = false; | ||
286 | + if (client->mortal && TCP_CLIENT(client) && | ||
287 | + client->newstate != NS_CLIENTSTATE_FREED && | ||
288 | + !ns_g_clienttest && | ||
289 | + isc_atomic_xadd(&client->interface->ntcpaccepting, 0) == 0) | ||
290 | + { | ||
291 | + /* Nobody else is accepting */ | ||
292 | + client->mortal = false; | ||
293 | + client->newstate = NS_CLIENTSTATE_READY; | ||
294 | + } | ||
295 | + | ||
296 | + /* | ||
297 | + * Detach from TCP connection and TCP client quota, | ||
298 | + * if appropriate. If this is the last reference to | ||
299 | + * the TCP connection in our pipeline group, the | ||
300 | + * TCP quota slot will be released. | ||
301 | + */ | ||
302 | + if (client->tcpconn) { | ||
303 | + tcpconn_detach(client); | ||
304 | } | ||
305 | |||
306 | if (client->tcpsocket != NULL) { | ||
307 | CTRACE("closetcp"); | ||
308 | isc_socket_detach(&client->tcpsocket); | ||
309 | - | ||
310 | - if (client->tcpactive) { | ||
311 | - LOCK(&client->interface->lock); | ||
312 | - INSIST(client->interface->ntcpactive > 0); | ||
313 | - client->interface->ntcpactive--; | ||
314 | - UNLOCK(&client->interface->lock); | ||
315 | - client->tcpactive = false; | ||
316 | - } | ||
317 | + mark_tcp_active(client, false); | ||
318 | } | ||
319 | |||
320 | if (client->timerset) { | ||
321 | @@ -567,35 +587,6 @@ exit_check(ns_client_t *client) { | ||
322 | client->peeraddr_valid = false; | ||
323 | |||
324 | client->state = NS_CLIENTSTATE_READY; | ||
325 | - INSIST(client->recursionquota == NULL); | ||
326 | - | ||
327 | - /* | ||
328 | - * Now the client is ready to accept a new TCP connection | ||
329 | - * or UDP request, but we may have enough clients doing | ||
330 | - * that already. Check whether this client needs to remain | ||
331 | - * active and force it to go inactive if not. | ||
332 | - * | ||
333 | - * UDP clients go inactive at this point, but a TCP client | ||
334 | - * may need to remain active and go into ready state if | ||
335 | - * no other clients are available to listen for TCP | ||
336 | - * requests on this interface or (in the case of pipelined | ||
337 | - * clients) to read for additional messages on the current | ||
338 | - * connection. | ||
339 | - */ | ||
340 | - if (client->mortal && TCP_CLIENT(client) && !ns_g_clienttest) { | ||
341 | - LOCK(&client->interface->lock); | ||
342 | - if ((client->interface->ntcpaccepting == 0 || | ||
343 | - (client->pipelined && | ||
344 | - client->interface->ntcpactive < 2)) && | ||
345 | - client->newstate != NS_CLIENTSTATE_FREED) | ||
346 | - { | ||
347 | - client->mortal = false; | ||
348 | - client->newstate = NS_CLIENTSTATE_READY; | ||
349 | - } | ||
350 | - UNLOCK(&client->interface->lock); | ||
351 | - } | ||
352 | - | ||
353 | - client->pipelined = false; | ||
354 | |||
355 | /* | ||
356 | * We don't need the client; send it to the inactive | ||
357 | @@ -630,7 +621,7 @@ exit_check(ns_client_t *client) { | ||
358 | } | ||
359 | |||
360 | /* Still waiting for accept cancel completion. */ | ||
361 | - if (! (client->naccepts == 0)) { | ||
362 | + if (client->naccepts > 0) { | ||
363 | return (true); | ||
364 | } | ||
365 | |||
366 | @@ -641,7 +632,7 @@ exit_check(ns_client_t *client) { | ||
367 | } | ||
368 | |||
369 | /* Still waiting for recv cancel completion. */ | ||
370 | - if (! (client->nrecvs == 0)) { | ||
371 | + if (client->nrecvs > 0) { | ||
372 | return (true); | ||
373 | } | ||
374 | |||
375 | @@ -654,14 +645,7 @@ exit_check(ns_client_t *client) { | ||
376 | INSIST(client->recursionquota == NULL); | ||
377 | if (client->tcplistener != NULL) { | ||
378 | isc_socket_detach(&client->tcplistener); | ||
379 | - | ||
380 | - if (client->tcpactive) { | ||
381 | - LOCK(&client->interface->lock); | ||
382 | - INSIST(client->interface->ntcpactive > 0); | ||
383 | - client->interface->ntcpactive--; | ||
384 | - UNLOCK(&client->interface->lock); | ||
385 | - client->tcpactive = false; | ||
386 | - } | ||
387 | + mark_tcp_active(client, false); | ||
388 | } | ||
389 | if (client->udpsocket != NULL) { | ||
390 | isc_socket_detach(&client->udpsocket); | ||
391 | @@ -816,7 +800,7 @@ client_start(isc_task_t *task, isc_event_t *event) { | ||
392 | return; | ||
393 | |||
394 | if (TCP_CLIENT(client)) { | ||
395 | - if (client->pipelined) { | ||
396 | + if (client->tcpconn != NULL) { | ||
397 | client_read(client); | ||
398 | } else { | ||
399 | client_accept(client); | ||
400 | @@ -2470,6 +2454,7 @@ client_request(isc_task_t *task, isc_event_t *event) { | ||
401 | client->nrecvs--; | ||
402 | } else { | ||
403 | INSIST(TCP_CLIENT(client)); | ||
404 | + INSIST(client->tcpconn != NULL); | ||
405 | REQUIRE(event->ev_type == DNS_EVENT_TCPMSG); | ||
406 | REQUIRE(event->ev_sender == &client->tcpmsg); | ||
407 | buffer = &client->tcpmsg.buffer; | ||
408 | @@ -2657,17 +2642,19 @@ client_request(isc_task_t *task, isc_event_t *event) { | ||
409 | /* | ||
410 | * Pipeline TCP query processing. | ||
411 | */ | ||
412 | - if (client->message->opcode != dns_opcode_query) { | ||
413 | - client->pipelined = false; | ||
414 | + if (TCP_CLIENT(client) && | ||
415 | + client->message->opcode != dns_opcode_query) | ||
416 | + { | ||
417 | + client->tcpconn->pipelined = false; | ||
418 | } | ||
419 | - if (TCP_CLIENT(client) && client->pipelined) { | ||
420 | + if (TCP_CLIENT(client) && client->tcpconn->pipelined) { | ||
421 | /* | ||
422 | * We're pipelining. Replace the client; the | ||
423 | - * the replacement can read the TCP socket looking | ||
424 | - * for new messages and this client can process the | ||
425 | + * replacement can read the TCP socket looking | ||
426 | + * for new messages and this one can process the | ||
427 | * current message asynchronously. | ||
428 | * | ||
429 | - * There are now at least three clients using this | ||
430 | + * There will now be at least three clients using this | ||
431 | * TCP socket - one accepting new connections, | ||
432 | * one reading an existing connection to get new | ||
433 | * messages, and one answering the message already | ||
434 | @@ -2675,7 +2662,7 @@ client_request(isc_task_t *task, isc_event_t *event) { | ||
435 | */ | ||
436 | result = ns_client_replace(client); | ||
437 | if (result != ISC_R_SUCCESS) { | ||
438 | - client->pipelined = false; | ||
439 | + client->tcpconn->pipelined = false; | ||
440 | } | ||
441 | } | ||
442 | |||
443 | @@ -3233,10 +3220,7 @@ client_create(ns_clientmgr_t *manager, ns_client_t **clientp) { | ||
444 | client->signer = NULL; | ||
445 | dns_name_init(&client->signername, NULL); | ||
446 | client->mortal = false; | ||
447 | - client->pipelined = false; | ||
448 | - client->pipeline_refs = NULL; | ||
449 | - client->tcpquota = NULL; | ||
450 | - client->tcpattached = false; | ||
451 | + client->tcpconn = NULL; | ||
452 | client->recursionquota = NULL; | ||
453 | client->interface = NULL; | ||
454 | client->peeraddr_valid = false; | ||
455 | @@ -3341,9 +3325,10 @@ client_read(ns_client_t *client) { | ||
456 | |||
457 | static void | ||
458 | client_newconn(isc_task_t *task, isc_event_t *event) { | ||
459 | + isc_result_t result; | ||
460 | ns_client_t *client = event->ev_arg; | ||
461 | isc_socket_newconnev_t *nevent = (isc_socket_newconnev_t *)event; | ||
462 | - isc_result_t result; | ||
463 | + uint32_t old; | ||
464 | |||
465 | REQUIRE(event->ev_type == ISC_SOCKEVENT_NEWCONN); | ||
466 | REQUIRE(NS_CLIENT_VALID(client)); | ||
467 | @@ -3363,10 +3348,8 @@ client_newconn(isc_task_t *task, isc_event_t *event) { | ||
468 | INSIST(client->naccepts == 1); | ||
469 | client->naccepts--; | ||
470 | |||
471 | - LOCK(&client->interface->lock); | ||
472 | - INSIST(client->interface->ntcpaccepting > 0); | ||
473 | - client->interface->ntcpaccepting--; | ||
474 | - UNLOCK(&client->interface->lock); | ||
475 | + old = isc_atomic_xadd(&client->interface->ntcpaccepting, -1); | ||
476 | + INSIST(old > 0); | ||
477 | |||
478 | /* | ||
479 | * We must take ownership of the new socket before the exit | ||
480 | @@ -3399,7 +3382,7 @@ client_newconn(isc_task_t *task, isc_event_t *event) { | ||
481 | NS_LOGMODULE_CLIENT, ISC_LOG_DEBUG(3), | ||
482 | "accept failed: %s", | ||
483 | isc_result_totext(nevent->result)); | ||
484 | - tcpquota_disconnect(client); | ||
485 | + tcpconn_detach(client); | ||
486 | } | ||
487 | |||
488 | if (exit_check(client)) | ||
489 | @@ -3437,15 +3420,13 @@ client_newconn(isc_task_t *task, isc_event_t *event) { | ||
490 | * telnetting to port 53 (once per CPU) will | ||
491 | * deny service to legitimate TCP clients. | ||
492 | */ | ||
493 | - client->pipelined = false; | ||
494 | result = ns_client_replace(client); | ||
495 | if (result == ISC_R_SUCCESS && | ||
496 | (ns_g_server->keepresporder == NULL || | ||
497 | !allowed(&netaddr, NULL, NULL, 0, NULL, | ||
498 | ns_g_server->keepresporder))) | ||
499 | { | ||
500 | - pipeline_init(client); | ||
501 | - client->pipelined = true; | ||
502 | + client->tcpconn->pipelined = true; | ||
503 | } | ||
504 | |||
505 | client_read(client); | ||
506 | @@ -3462,78 +3443,59 @@ client_accept(ns_client_t *client) { | ||
507 | CTRACE("accept"); | ||
508 | |||
509 | /* | ||
510 | - * The tcpquota object can only be simultaneously referenced a | ||
511 | - * pre-defined number of times; this is configured by 'tcp-clients' | ||
512 | - * in named.conf. If we can't attach to it here, that means the TCP | ||
513 | - * client quota has been exceeded. | ||
514 | + * Set up a new TCP connection. This means try to attach to the | ||
515 | + * TCP client quota (tcp-clients), but fail if we're over quota. | ||
516 | */ | ||
517 | - result = isc_quota_attach(&ns_g_server->tcpquota, | ||
518 | - &client->tcpquota); | ||
519 | + result = tcpconn_init(client, false); | ||
520 | if (result != ISC_R_SUCCESS) { | ||
521 | - bool exit; | ||
522 | + bool exit; | ||
523 | |||
524 | - ns_client_log(client, NS_LOGCATEGORY_CLIENT, | ||
525 | - NS_LOGMODULE_CLIENT, ISC_LOG_DEBUG(1), | ||
526 | - "no more TCP clients: %s", | ||
527 | - isc_result_totext(result)); | ||
528 | - | ||
529 | - /* | ||
530 | - * We have exceeded the system-wide TCP client | ||
531 | - * quota. But, we can't just block this accept | ||
532 | - * in all cases, because if we did, a heavy TCP | ||
533 | - * load on other interfaces might cause this | ||
534 | - * interface to be starved, with no clients able | ||
535 | - * to accept new connections. | ||
536 | - * | ||
537 | - * So, we check here to see if any other clients | ||
538 | - * are already servicing TCP queries on this | ||
539 | - * interface (whether accepting, reading, or | ||
540 | - * processing). If there are at least two | ||
541 | - * (one reading and one processing a request) | ||
542 | - * then it's okay *not* to call accept - we | ||
543 | - * can let this client go inactive and another | ||
544 | - * one will resume accepting when it's done. | ||
545 | - * | ||
546 | - * If there aren't enough active clients on the | ||
547 | - * interface, then we can be a little bit | ||
548 | - * flexible about the quota. We'll allow *one* | ||
549 | - * extra client through to ensure we're listening | ||
550 | - * on every interface. | ||
551 | - * | ||
552 | - * (Note: In practice this means that the real | ||
553 | - * TCP client quota is tcp-clients plus the | ||
554 | - * number of listening interfaces plus 2.) | ||
555 | - */ | ||
556 | - LOCK(&client->interface->lock); | ||
557 | - exit = (client->interface->ntcpactive > 1); | ||
558 | - UNLOCK(&client->interface->lock); | ||
559 | + ns_client_log(client, NS_LOGCATEGORY_CLIENT, | ||
560 | + NS_LOGMODULE_CLIENT, ISC_LOG_WARNING, | ||
561 | + "TCP client quota reached: %s", | ||
562 | + isc_result_totext(result)); | ||
563 | |||
564 | - if (exit) { | ||
565 | - client->newstate = NS_CLIENTSTATE_INACTIVE; | ||
566 | - (void)exit_check(client); | ||
567 | - return; | ||
568 | - } | ||
569 | + /* | ||
570 | + * We have exceeded the system-wide TCP client quota. But, | ||
571 | + * we can't just block this accept in all cases, because if | ||
572 | + * we did, a heavy TCP load on other interfaces might cause | ||
573 | + * this interface to be starved, with no clients able to | ||
574 | + * accept new connections. | ||
575 | + * | ||
576 | + * So, we check here to see if any other clients are | ||
577 | + * already servicing TCP queries on this interface (whether | ||
578 | + * accepting, reading, or processing). If we find at least | ||
579 | + * one, then it's okay *not* to call accept - we can let this | ||
580 | + * client go inactive and another will take over when it's | ||
581 | + * done. | ||
582 | + * | ||
583 | + * If there aren't enough active clients on the interface, | ||
584 | + * then we can be a little bit flexible about the quota. | ||
585 | + * We'll allow *one* extra client through to ensure we're | ||
586 | + * listening on every interface; we do this by setting the | ||
587 | + * 'force' option to tcpconn_init(). | ||
588 | + * | ||
589 | + * (Note: In practice this means that the real TCP client | ||
590 | + * quota is tcp-clients plus the number of listening | ||
591 | + * interfaces plus 1.) | ||
592 | + */ | ||
593 | + exit = (isc_atomic_xadd(&client->interface->ntcpactive, 0) > 0); | ||
594 | + if (exit) { | ||
595 | + client->newstate = NS_CLIENTSTATE_INACTIVE; | ||
596 | + (void)exit_check(client); | ||
597 | + return; | ||
598 | + } | ||
599 | |||
600 | - } else { | ||
601 | - client->tcpattached = true; | ||
602 | + result = tcpconn_init(client, true); | ||
603 | + RUNTIME_CHECK(result == ISC_R_SUCCESS); | ||
604 | } | ||
605 | |||
606 | /* | ||
607 | - * By incrementing the interface's ntcpactive counter we signal | ||
608 | - * that there is at least one client servicing TCP queries for the | ||
609 | - * interface. | ||
610 | - * | ||
611 | - * We also make note of the fact in the client itself with the | ||
612 | - * tcpactive flag. This ensures proper accounting by preventing | ||
613 | - * us from accidentally incrementing or decrementing ntcpactive | ||
614 | - * more than once per client object. | ||
615 | + * If this client was set up using get_client() or get_worker(), | ||
616 | + * then TCP is already marked active. However, if it was restarted | ||
617 | + * from exit_check(), it might not be, so we take care of it now. | ||
618 | */ | ||
619 | - if (!client->tcpactive) { | ||
620 | - LOCK(&client->interface->lock); | ||
621 | - client->interface->ntcpactive++; | ||
622 | - UNLOCK(&client->interface->lock); | ||
623 | - client->tcpactive = true; | ||
624 | - } | ||
625 | + mark_tcp_active(client, true); | ||
626 | |||
627 | result = isc_socket_accept(client->tcplistener, client->task, | ||
628 | client_newconn, client); | ||
629 | @@ -3549,15 +3511,8 @@ client_accept(ns_client_t *client) { | ||
630 | "isc_socket_accept() failed: %s", | ||
631 | isc_result_totext(result)); | ||
632 | |||
633 | - tcpquota_disconnect(client); | ||
634 | - | ||
635 | - if (client->tcpactive) { | ||
636 | - LOCK(&client->interface->lock); | ||
637 | - client->interface->ntcpactive--; | ||
638 | - UNLOCK(&client->interface->lock); | ||
639 | - client->tcpactive = false; | ||
640 | - } | ||
641 | - | ||
642 | + tcpconn_detach(client); | ||
643 | + mark_tcp_active(client, false); | ||
644 | return; | ||
645 | } | ||
646 | |||
647 | @@ -3582,9 +3537,7 @@ client_accept(ns_client_t *client) { | ||
648 | * listening for connections itself to prevent the interface | ||
649 | * going dead. | ||
650 | */ | ||
651 | - LOCK(&client->interface->lock); | ||
652 | - client->interface->ntcpaccepting++; | ||
653 | - UNLOCK(&client->interface->lock); | ||
654 | + isc_atomic_xadd(&client->interface->ntcpaccepting, 1); | ||
655 | } | ||
656 | |||
657 | static void | ||
658 | @@ -3655,24 +3608,25 @@ ns_client_replace(ns_client_t *client) { | ||
659 | REQUIRE(client->manager != NULL); | ||
660 | |||
661 | tcp = TCP_CLIENT(client); | ||
662 | - if (tcp && client->pipelined) { | ||
663 | + if (tcp && client->tcpconn != NULL && client->tcpconn->pipelined) { | ||
664 | result = get_worker(client->manager, client->interface, | ||
665 | client->tcpsocket, client); | ||
666 | } else { | ||
667 | result = get_client(client->manager, client->interface, | ||
668 | - client->dispatch, client, tcp); | ||
669 | + client->dispatch, tcp); | ||
670 | |||
671 | - /* | ||
672 | - * The responsibility for listening for new requests is hereby | ||
673 | - * transferred to the new client. Therefore, the old client | ||
674 | - * should refrain from listening for any more requests. | ||
675 | - */ | ||
676 | - client->mortal = true; | ||
677 | } | ||
678 | if (result != ISC_R_SUCCESS) { | ||
679 | return (result); | ||
680 | } | ||
681 | |||
682 | + /* | ||
683 | + * The responsibility for listening for new requests is hereby | ||
684 | + * transferred to the new client. Therefore, the old client | ||
685 | + * should refrain from listening for any more requests. | ||
686 | + */ | ||
687 | + client->mortal = true; | ||
688 | + | ||
689 | return (ISC_R_SUCCESS); | ||
690 | } | ||
691 | |||
692 | @@ -3806,7 +3760,7 @@ ns_clientmgr_destroy(ns_clientmgr_t **managerp) { | ||
693 | |||
694 | static isc_result_t | ||
695 | get_client(ns_clientmgr_t *manager, ns_interface_t *ifp, | ||
696 | - dns_dispatch_t *disp, ns_client_t *oldclient, bool tcp) | ||
697 | + dns_dispatch_t *disp, bool tcp) | ||
698 | { | ||
699 | isc_result_t result = ISC_R_SUCCESS; | ||
700 | isc_event_t *ev; | ||
701 | @@ -3850,15 +3804,7 @@ get_client(ns_clientmgr_t *manager, ns_interface_t *ifp, | ||
702 | client->dscp = ifp->dscp; | ||
703 | |||
704 | if (tcp) { | ||
705 | - client->tcpattached = false; | ||
706 | - if (oldclient != NULL) { | ||
707 | - client->tcpattached = oldclient->tcpattached; | ||
708 | - } | ||
709 | - | ||
710 | - LOCK(&client->interface->lock); | ||
711 | - client->interface->ntcpactive++; | ||
712 | - UNLOCK(&client->interface->lock); | ||
713 | - client->tcpactive = true; | ||
714 | + mark_tcp_active(client, true); | ||
715 | |||
716 | client->attributes |= NS_CLIENTATTR_TCP; | ||
717 | isc_socket_attach(ifp->tcpsocket, | ||
718 | @@ -3923,16 +3869,14 @@ get_worker(ns_clientmgr_t *manager, ns_interface_t *ifp, isc_socket_t *sock, | ||
719 | ns_interface_attach(ifp, &client->interface); | ||
720 | client->newstate = client->state = NS_CLIENTSTATE_WORKING; | ||
721 | INSIST(client->recursionquota == NULL); | ||
722 | - client->tcpquota = &ns_g_server->tcpquota; | ||
723 | - client->tcpattached = oldclient->tcpattached; | ||
724 | |||
725 | client->dscp = ifp->dscp; | ||
726 | |||
727 | client->attributes |= NS_CLIENTATTR_TCP; | ||
728 | client->mortal = true; | ||
729 | |||
730 | - pipeline_attach(oldclient, client); | ||
731 | - client->pipelined = true; | ||
732 | + tcpconn_attach(oldclient, client); | ||
733 | + mark_tcp_active(client, true); | ||
734 | |||
735 | isc_socket_attach(ifp->tcpsocket, &client->tcplistener); | ||
736 | isc_socket_attach(sock, &client->tcpsocket); | ||
737 | @@ -3940,11 +3884,6 @@ get_worker(ns_clientmgr_t *manager, ns_interface_t *ifp, isc_socket_t *sock, | ||
738 | (void)isc_socket_getpeername(client->tcpsocket, &client->peeraddr); | ||
739 | client->peeraddr_valid = true; | ||
740 | |||
741 | - LOCK(&client->interface->lock); | ||
742 | - client->interface->ntcpactive++; | ||
743 | - UNLOCK(&client->interface->lock); | ||
744 | - client->tcpactive = true; | ||
745 | - | ||
746 | INSIST(client->tcpmsg_valid == false); | ||
747 | dns_tcpmsg_init(client->mctx, client->tcpsocket, &client->tcpmsg); | ||
748 | client->tcpmsg_valid = true; | ||
749 | @@ -3970,8 +3909,7 @@ ns_clientmgr_createclients(ns_clientmgr_t *manager, unsigned int n, | ||
750 | MTRACE("createclients"); | ||
751 | |||
752 | for (disp = 0; disp < n; disp++) { | ||
753 | - result = get_client(manager, ifp, ifp->udpdispatch[disp], | ||
754 | - NULL, tcp); | ||
755 | + result = get_client(manager, ifp, ifp->udpdispatch[disp], tcp); | ||
756 | if (result != ISC_R_SUCCESS) | ||
757 | break; | ||
758 | } | ||
759 | diff --git a/bin/named/include/named/client.h b/bin/named/include/named/client.h | ||
760 | index e2c40acd28..969ee4c08f 100644 | ||
761 | --- a/bin/named/include/named/client.h | ||
762 | +++ b/bin/named/include/named/client.h | ||
763 | @@ -78,6 +78,13 @@ | ||
764 | *** Types | ||
765 | ***/ | ||
766 | |||
767 | +/*% reference-counted TCP connection object */ | ||
768 | +typedef struct ns_tcpconn { | ||
769 | + isc_refcount_t refs; | ||
770 | + isc_quota_t *tcpquota; | ||
771 | + bool pipelined; | ||
772 | +} ns_tcpconn_t; | ||
773 | + | ||
774 | /*% nameserver client structure */ | ||
775 | struct ns_client { | ||
776 | unsigned int magic; | ||
777 | @@ -131,10 +138,7 @@ struct ns_client { | ||
778 | dns_name_t signername; /*%< [T]SIG key name */ | ||
779 | dns_name_t *signer; /*%< NULL if not valid sig */ | ||
780 | bool mortal; /*%< Die after handling request */ | ||
781 | - bool pipelined; /*%< TCP queries not in sequence */ | ||
782 | - isc_refcount_t *pipeline_refs; | ||
783 | - isc_quota_t *tcpquota; | ||
784 | - bool tcpattached; | ||
785 | + ns_tcpconn_t *tcpconn; | ||
786 | isc_quota_t *recursionquota; | ||
787 | ns_interface_t *interface; | ||
788 | |||
789 | diff --git a/bin/named/include/named/interfacemgr.h b/bin/named/include/named/interfacemgr.h | ||
790 | index 61b08826a6..3535ef22a8 100644 | ||
791 | --- a/bin/named/include/named/interfacemgr.h | ||
792 | +++ b/bin/named/include/named/interfacemgr.h | ||
793 | @@ -9,8 +9,6 @@ | ||
794 | * information regarding copyright ownership. | ||
795 | */ | ||
796 | |||
797 | -/* $Id: interfacemgr.h,v 1.35 2011/07/28 23:47:58 tbox Exp $ */ | ||
798 | - | ||
799 | #ifndef NAMED_INTERFACEMGR_H | ||
800 | #define NAMED_INTERFACEMGR_H 1 | ||
801 | |||
802 | @@ -77,11 +75,11 @@ struct ns_interface { | ||
803 | /*%< UDP dispatchers. */ | ||
804 | isc_socket_t * tcpsocket; /*%< TCP socket. */ | ||
805 | isc_dscp_t dscp; /*%< "listen-on" DSCP value */ | ||
806 | - int ntcpaccepting; /*%< Number of clients | ||
807 | + int32_t ntcpaccepting; /*%< Number of clients | ||
808 | ready to accept new | ||
809 | TCP connections on this | ||
810 | interface */ | ||
811 | - int ntcpactive; /*%< Number of clients | ||
812 | + int32_t ntcpactive; /*%< Number of clients | ||
813 | servicing TCP queries | ||
814 | (whether accepting or | ||
815 | connected) */ | ||
816 | diff --git a/bin/named/interfacemgr.c b/bin/named/interfacemgr.c | ||
817 | index 955096ef47..d9f6df5802 100644 | ||
818 | --- a/bin/named/interfacemgr.c | ||
819 | +++ b/bin/named/interfacemgr.c | ||
820 | @@ -388,6 +388,7 @@ ns_interface_create(ns_interfacemgr_t *mgr, isc_sockaddr_t *addr, | ||
821 | */ | ||
822 | ifp->ntcpaccepting = 0; | ||
823 | ifp->ntcpactive = 0; | ||
824 | + | ||
825 | ifp->nudpdispatch = 0; | ||
826 | |||
827 | ifp->dscp = -1; | ||
828 | diff --git a/lib/isc/include/isc/quota.h b/lib/isc/include/isc/quota.h | ||
829 | index b9bf59877a..36c5830242 100644 | ||
830 | --- a/lib/isc/include/isc/quota.h | ||
831 | +++ b/lib/isc/include/isc/quota.h | ||
832 | @@ -100,6 +100,13 @@ isc_quota_attach(isc_quota_t *quota, isc_quota_t **p); | ||
833 | * quota if successful (ISC_R_SUCCESS or ISC_R_SOFTQUOTA). | ||
834 | */ | ||
835 | |||
836 | +isc_result_t | ||
837 | +isc_quota_force(isc_quota_t *quota, isc_quota_t **p); | ||
838 | +/*%< | ||
839 | + * Like isc_quota_attach, but will attach '*p' to the quota | ||
840 | + * even if the hard quota has been exceeded. | ||
841 | + */ | ||
842 | + | ||
843 | void | ||
844 | isc_quota_detach(isc_quota_t **p); | ||
845 | /*%< | ||
846 | diff --git a/lib/isc/quota.c b/lib/isc/quota.c | ||
847 | index 3ddff0d875..556a61f21d 100644 | ||
848 | --- a/lib/isc/quota.c | ||
849 | +++ b/lib/isc/quota.c | ||
850 | @@ -74,20 +74,39 @@ isc_quota_release(isc_quota_t *quota) { | ||
851 | UNLOCK("a->lock); | ||
852 | } | ||
853 | |||
854 | -isc_result_t | ||
855 | -isc_quota_attach(isc_quota_t *quota, isc_quota_t **p) | ||
856 | -{ | ||
857 | +static isc_result_t | ||
858 | +doattach(isc_quota_t *quota, isc_quota_t **p, bool force) { | ||
859 | isc_result_t result; | ||
860 | - INSIST(p != NULL && *p == NULL); | ||
861 | + REQUIRE(p != NULL && *p == NULL); | ||
862 | + | ||
863 | result = isc_quota_reserve(quota); | ||
864 | - if (result == ISC_R_SUCCESS || result == ISC_R_SOFTQUOTA) | ||
865 | + if (result == ISC_R_SUCCESS || result == ISC_R_SOFTQUOTA) { | ||
866 | + *p = quota; | ||
867 | + } else if (result == ISC_R_QUOTA && force) { | ||
868 | + /* attach anyway */ | ||
869 | + LOCK("a->lock); | ||
870 | + quota->used++; | ||
871 | + UNLOCK("a->lock); | ||
872 | + | ||
873 | *p = quota; | ||
874 | + result = ISC_R_SUCCESS; | ||
875 | + } | ||
876 | + | ||
877 | return (result); | ||
878 | } | ||
879 | |||
880 | +isc_result_t | ||
881 | +isc_quota_attach(isc_quota_t *quota, isc_quota_t **p) { | ||
882 | + return (doattach(quota, p, false)); | ||
883 | +} | ||
884 | + | ||
885 | +isc_result_t | ||
886 | +isc_quota_force(isc_quota_t *quota, isc_quota_t **p) { | ||
887 | + return (doattach(quota, p, true)); | ||
888 | +} | ||
889 | + | ||
890 | void | ||
891 | -isc_quota_detach(isc_quota_t **p) | ||
892 | -{ | ||
893 | +isc_quota_detach(isc_quota_t **p) { | ||
894 | INSIST(p != NULL && *p != NULL); | ||
895 | isc_quota_release(*p); | ||
896 | *p = NULL; | ||
897 | diff --git a/lib/isc/win32/libisc.def.in b/lib/isc/win32/libisc.def.in | ||
898 | index a82facec0f..7b9f23d776 100644 | ||
899 | --- a/lib/isc/win32/libisc.def.in | ||
900 | +++ b/lib/isc/win32/libisc.def.in | ||
901 | @@ -519,6 +519,7 @@ isc_portset_removerange | ||
902 | isc_quota_attach | ||
903 | isc_quota_destroy | ||
904 | isc_quota_detach | ||
905 | +isc_quota_force | ||
906 | isc_quota_init | ||
907 | isc_quota_max | ||
908 | isc_quota_release | ||
909 | -- | ||
910 | 2.20.1 | ||
911 | |||
diff --git a/meta/recipes-connectivity/bind/bind/0006-restore-allowance-for-tcp-clients-interfaces.patch b/meta/recipes-connectivity/bind/bind/0006-restore-allowance-for-tcp-clients-interfaces.patch new file mode 100644 index 0000000000..3821d18501 --- /dev/null +++ b/meta/recipes-connectivity/bind/bind/0006-restore-allowance-for-tcp-clients-interfaces.patch | |||
@@ -0,0 +1,80 @@ | |||
1 | Backport patch to fix CVE-2018-5743. | ||
2 | |||
3 | Ref: | ||
4 | https://security-tracker.debian.org/tracker/CVE-2018-5743 | ||
5 | |||
6 | CVE: CVE-2018-5743 | ||
7 | Upstream-Status: Backport [https://gitlab.isc.org/isc-projects/bind9/commit/59434b9] | ||
8 | |||
9 | Signed-off-by: Kai Kang <kai.kang@windriver.com> | ||
10 | |||
11 | From 59434b987e8eb436b08c24e559ee094c4e939daa Mon Sep 17 00:00:00 2001 | ||
12 | From: Evan Hunt <each@isc.org> | ||
13 | Date: Fri, 5 Apr 2019 16:26:19 -0700 | ||
14 | Subject: [PATCH 6/6] restore allowance for tcp-clients < interfaces | ||
15 | |||
16 | in the "refactor tcpquota and pipeline refs" commit, the counting | ||
17 | of active interfaces was tightened in such a way that named could | ||
18 | fail to listen on an interface if there were more interfaces than | ||
19 | tcp-clients. when checking the quota to start accepting on an | ||
20 | interface, if the number of active clients was above zero, then | ||
21 | it was presumed that some other client was able to handle accepting | ||
22 | new connections. this, however, ignored the fact that the current client | ||
23 | could be included in that count, so if the quota was already exceeded | ||
24 | before all the interfaces were listening, some interfaces would never | ||
25 | listen. | ||
26 | |||
27 | we now check whether the current client has been marked active; if so, | ||
28 | then the number of active clients on the interface must be greater | ||
29 | than 1, not 0. | ||
30 | |||
31 | (cherry picked from commit 0b4e2cd4c3192ba88569dd344f542a8cc43742b5) | ||
32 | (cherry picked from commit d01023aaac35543daffbdf48464e320150235d41) | ||
33 | --- | ||
34 | bin/named/client.c | 8 +++++--- | ||
35 | doc/arm/Bv9ARM-book.xml | 3 ++- | ||
36 | 2 files changed, 7 insertions(+), 4 deletions(-) | ||
37 | |||
38 | diff --git a/bin/named/client.c b/bin/named/client.c | ||
39 | index d826ab32bf..845326abc0 100644 | ||
40 | --- a/bin/named/client.c | ||
41 | +++ b/bin/named/client.c | ||
42 | @@ -3464,8 +3464,9 @@ client_accept(ns_client_t *client) { | ||
43 | * | ||
44 | * So, we check here to see if any other clients are | ||
45 | * already servicing TCP queries on this interface (whether | ||
46 | - * accepting, reading, or processing). If we find at least | ||
47 | - * one, then it's okay *not* to call accept - we can let this | ||
48 | + * accepting, reading, or processing). If we find that at | ||
49 | + * least one client other than this one is active, then | ||
50 | + * it's okay *not* to call accept - we can let this | ||
51 | * client go inactive and another will take over when it's | ||
52 | * done. | ||
53 | * | ||
54 | @@ -3479,7 +3480,8 @@ client_accept(ns_client_t *client) { | ||
55 | * quota is tcp-clients plus the number of listening | ||
56 | * interfaces plus 1.) | ||
57 | */ | ||
58 | - exit = (isc_atomic_xadd(&client->interface->ntcpactive, 0) > 0); | ||
59 | + exit = (isc_atomic_xadd(&client->interface->ntcpactive, 0) > | ||
60 | + (client->tcpactive ? 1 : 0)); | ||
61 | if (exit) { | ||
62 | client->newstate = NS_CLIENTSTATE_INACTIVE; | ||
63 | (void)exit_check(client); | ||
64 | diff --git a/doc/arm/Bv9ARM-book.xml b/doc/arm/Bv9ARM-book.xml | ||
65 | index 381768d540..9c76d3cd6f 100644 | ||
66 | --- a/doc/arm/Bv9ARM-book.xml | ||
67 | +++ b/doc/arm/Bv9ARM-book.xml | ||
68 | @@ -8493,7 +8493,8 @@ avoid-v6-udp-ports { 40000; range 50000 60000; }; | ||
69 | <para> | ||
70 | The number of file descriptors reserved for TCP, stdio, | ||
71 | etc. This needs to be big enough to cover the number of | ||
72 | - interfaces <command>named</command> listens on, <command>tcp-clients</command> as well as | ||
73 | + interfaces <command>named</command> listens on plus | ||
74 | + <command>tcp-clients</command>, as well as | ||
75 | to provide room for outgoing TCP queries and incoming zone | ||
76 | transfers. The default is <literal>512</literal>. | ||
77 | The minimum value is <literal>128</literal> and the | ||
78 | -- | ||
79 | 2.20.1 | ||
80 | |||
diff --git a/meta/recipes-connectivity/bind/bind/0007-Replace-atomic-operations-in-bin-named-client.c-with.patch b/meta/recipes-connectivity/bind/bind/0007-Replace-atomic-operations-in-bin-named-client.c-with.patch new file mode 100644 index 0000000000..1a84eca58a --- /dev/null +++ b/meta/recipes-connectivity/bind/bind/0007-Replace-atomic-operations-in-bin-named-client.c-with.patch | |||
@@ -0,0 +1,140 @@ | |||
1 | Backport commit to fix compile error on arm caused by commits which are | ||
2 | to fix CVE-2018-5743. | ||
3 | |||
4 | CVE: CVE-2018-5743 | ||
5 | Upstream-Status: Backport [https://gitlab.isc.org/isc-projects/bind9/commit/ef49780] | ||
6 | |||
7 | Signed-off-by: Kai Kang <kai.kang@windriver.com> | ||
8 | |||
9 | From ef49780d30d3ddc5735cfc32561b678a634fa72f Mon Sep 17 00:00:00 2001 | ||
10 | From: =?UTF-8?q?Ond=C5=99ej=20Sur=C3=BD?= <ondrej@sury.org> | ||
11 | Date: Wed, 17 Apr 2019 15:22:27 +0200 | ||
12 | Subject: [PATCH] Replace atomic operations in bin/named/client.c with | ||
13 | isc_refcount reference counting | ||
14 | |||
15 | --- | ||
16 | bin/named/client.c | 18 +++++++----------- | ||
17 | bin/named/include/named/interfacemgr.h | 5 +++-- | ||
18 | bin/named/interfacemgr.c | 7 +++++-- | ||
19 | 3 files changed, 15 insertions(+), 15 deletions(-) | ||
20 | |||
21 | diff --git a/bin/named/client.c b/bin/named/client.c | ||
22 | index 845326abc0..29fecadca8 100644 | ||
23 | --- a/bin/named/client.c | ||
24 | +++ b/bin/named/client.c | ||
25 | @@ -402,12 +402,10 @@ tcpconn_detach(ns_client_t *client) { | ||
26 | static void | ||
27 | mark_tcp_active(ns_client_t *client, bool active) { | ||
28 | if (active && !client->tcpactive) { | ||
29 | - isc_atomic_xadd(&client->interface->ntcpactive, 1); | ||
30 | + isc_refcount_increment0(&client->interface->ntcpactive, NULL); | ||
31 | client->tcpactive = active; | ||
32 | } else if (!active && client->tcpactive) { | ||
33 | - uint32_t old = | ||
34 | - isc_atomic_xadd(&client->interface->ntcpactive, -1); | ||
35 | - INSIST(old > 0); | ||
36 | + isc_refcount_decrement(&client->interface->ntcpactive, NULL); | ||
37 | client->tcpactive = active; | ||
38 | } | ||
39 | } | ||
40 | @@ -554,7 +552,7 @@ exit_check(ns_client_t *client) { | ||
41 | if (client->mortal && TCP_CLIENT(client) && | ||
42 | client->newstate != NS_CLIENTSTATE_FREED && | ||
43 | !ns_g_clienttest && | ||
44 | - isc_atomic_xadd(&client->interface->ntcpaccepting, 0) == 0) | ||
45 | + isc_refcount_current(&client->interface->ntcpaccepting) == 0) | ||
46 | { | ||
47 | /* Nobody else is accepting */ | ||
48 | client->mortal = false; | ||
49 | @@ -3328,7 +3326,6 @@ client_newconn(isc_task_t *task, isc_event_t *event) { | ||
50 | isc_result_t result; | ||
51 | ns_client_t *client = event->ev_arg; | ||
52 | isc_socket_newconnev_t *nevent = (isc_socket_newconnev_t *)event; | ||
53 | - uint32_t old; | ||
54 | |||
55 | REQUIRE(event->ev_type == ISC_SOCKEVENT_NEWCONN); | ||
56 | REQUIRE(NS_CLIENT_VALID(client)); | ||
57 | @@ -3348,8 +3345,7 @@ client_newconn(isc_task_t *task, isc_event_t *event) { | ||
58 | INSIST(client->naccepts == 1); | ||
59 | client->naccepts--; | ||
60 | |||
61 | - old = isc_atomic_xadd(&client->interface->ntcpaccepting, -1); | ||
62 | - INSIST(old > 0); | ||
63 | + isc_refcount_decrement(&client->interface->ntcpaccepting, NULL); | ||
64 | |||
65 | /* | ||
66 | * We must take ownership of the new socket before the exit | ||
67 | @@ -3480,8 +3476,8 @@ client_accept(ns_client_t *client) { | ||
68 | * quota is tcp-clients plus the number of listening | ||
69 | * interfaces plus 1.) | ||
70 | */ | ||
71 | - exit = (isc_atomic_xadd(&client->interface->ntcpactive, 0) > | ||
72 | - (client->tcpactive ? 1 : 0)); | ||
73 | + exit = (isc_refcount_current(&client->interface->ntcpactive) > | ||
74 | + (client->tcpactive ? 1U : 0U)); | ||
75 | if (exit) { | ||
76 | client->newstate = NS_CLIENTSTATE_INACTIVE; | ||
77 | (void)exit_check(client); | ||
78 | @@ -3539,7 +3535,7 @@ client_accept(ns_client_t *client) { | ||
79 | * listening for connections itself to prevent the interface | ||
80 | * going dead. | ||
81 | */ | ||
82 | - isc_atomic_xadd(&client->interface->ntcpaccepting, 1); | ||
83 | + isc_refcount_increment0(&client->interface->ntcpaccepting, NULL); | ||
84 | } | ||
85 | |||
86 | static void | ||
87 | diff --git a/bin/named/include/named/interfacemgr.h b/bin/named/include/named/interfacemgr.h | ||
88 | index 3535ef22a8..6e10f210fd 100644 | ||
89 | --- a/bin/named/include/named/interfacemgr.h | ||
90 | +++ b/bin/named/include/named/interfacemgr.h | ||
91 | @@ -45,6 +45,7 @@ | ||
92 | #include <isc/magic.h> | ||
93 | #include <isc/mem.h> | ||
94 | #include <isc/socket.h> | ||
95 | +#include <isc/refcount.h> | ||
96 | |||
97 | #include <dns/result.h> | ||
98 | |||
99 | @@ -75,11 +76,11 @@ struct ns_interface { | ||
100 | /*%< UDP dispatchers. */ | ||
101 | isc_socket_t * tcpsocket; /*%< TCP socket. */ | ||
102 | isc_dscp_t dscp; /*%< "listen-on" DSCP value */ | ||
103 | - int32_t ntcpaccepting; /*%< Number of clients | ||
104 | + isc_refcount_t ntcpaccepting; /*%< Number of clients | ||
105 | ready to accept new | ||
106 | TCP connections on this | ||
107 | interface */ | ||
108 | - int32_t ntcpactive; /*%< Number of clients | ||
109 | + isc_refcount_t ntcpactive; /*%< Number of clients | ||
110 | servicing TCP queries | ||
111 | (whether accepting or | ||
112 | connected) */ | ||
113 | diff --git a/bin/named/interfacemgr.c b/bin/named/interfacemgr.c | ||
114 | index d9f6df5802..135533be6b 100644 | ||
115 | --- a/bin/named/interfacemgr.c | ||
116 | +++ b/bin/named/interfacemgr.c | ||
117 | @@ -386,8 +386,8 @@ ns_interface_create(ns_interfacemgr_t *mgr, isc_sockaddr_t *addr, | ||
118 | * connections will be handled in parallel even though there is | ||
119 | * only one client initially. | ||
120 | */ | ||
121 | - ifp->ntcpaccepting = 0; | ||
122 | - ifp->ntcpactive = 0; | ||
123 | + isc_refcount_init(&ifp->ntcpaccepting, 0); | ||
124 | + isc_refcount_init(&ifp->ntcpactive, 0); | ||
125 | |||
126 | ifp->nudpdispatch = 0; | ||
127 | |||
128 | @@ -618,6 +618,9 @@ ns_interface_destroy(ns_interface_t *ifp) { | ||
129 | |||
130 | ns_interfacemgr_detach(&ifp->mgr); | ||
131 | |||
132 | + isc_refcount_destroy(&ifp->ntcpactive); | ||
133 | + isc_refcount_destroy(&ifp->ntcpaccepting); | ||
134 | + | ||
135 | ifp->magic = 0; | ||
136 | isc_mem_put(mctx, ifp, sizeof(*ifp)); | ||
137 | } | ||
138 | -- | ||
139 | 2.20.1 | ||
140 | |||
diff --git a/meta/recipes-connectivity/bind/bind_9.11.5-P4.bb b/meta/recipes-connectivity/bind/bind_9.11.5-P4.bb index f4e985036d..3e2412dfa4 100644 --- a/meta/recipes-connectivity/bind/bind_9.11.5-P4.bb +++ b/meta/recipes-connectivity/bind/bind_9.11.5-P4.bb | |||
@@ -20,6 +20,14 @@ SRC_URI = "https://ftp.isc.org/isc/bind9/${PV}/${BPN}-${PV}.tar.gz \ | |||
20 | file://0001-configure.in-remove-useless-L-use_openssl-lib.patch \ | 20 | file://0001-configure.in-remove-useless-L-use_openssl-lib.patch \ |
21 | file://0001-named-lwresd-V-and-start-log-hide-build-options.patch \ | 21 | file://0001-named-lwresd-V-and-start-log-hide-build-options.patch \ |
22 | file://0001-avoid-start-failure-with-bind-user.patch \ | 22 | file://0001-avoid-start-failure-with-bind-user.patch \ |
23 | file://0001-bind-fix-CVE-2019-6471.patch \ | ||
24 | file://0001-fix-enforcement-of-tcp-clients-v1.patch \ | ||
25 | file://0002-tcp-clients-could-still-be-exceeded-v2.patch \ | ||
26 | file://0003-use-reference-counter-for-pipeline-groups-v3.patch \ | ||
27 | file://0004-better-tcpquota-accounting-and-client-mortality-chec.patch \ | ||
28 | file://0005-refactor-tcpquota-and-pipeline-refs-allow-special-ca.patch \ | ||
29 | file://0006-restore-allowance-for-tcp-clients-interfaces.patch \ | ||
30 | file://0007-Replace-atomic-operations-in-bin-named-client.c-with.patch \ | ||
23 | " | 31 | " |
24 | 32 | ||
25 | SRC_URI[md5sum] = "8ddab4b61fa4516fe404679c74e37960" | 33 | SRC_URI[md5sum] = "8ddab4b61fa4516fe404679c74e37960" |