diff options
Diffstat (limited to 'meta/recipes-extended')
-rw-r--r-- | meta/recipes-extended/libtirpc/libtirpc/0001-Add-missing-rwlock_unlocks-in-xprt_register.patch | 62 | ||||
-rw-r--r-- | meta/recipes-extended/libtirpc/libtirpc/Use-netbsd-queue.h.patch | 1579 | ||||
-rw-r--r-- | meta/recipes-extended/libtirpc/libtirpc/remove-des-functionality.patch | 144 | ||||
-rw-r--r-- | meta/recipes-extended/libtirpc/libtirpc/remove-des-uclibc.patch | 38 | ||||
-rw-r--r-- | meta/recipes-extended/libtirpc/libtirpc_1.0.1.bb (renamed from meta/recipes-extended/libtirpc/libtirpc_0.2.5.bb) | 11 |
5 files changed, 1793 insertions, 41 deletions
diff --git a/meta/recipes-extended/libtirpc/libtirpc/0001-Add-missing-rwlock_unlocks-in-xprt_register.patch b/meta/recipes-extended/libtirpc/libtirpc/0001-Add-missing-rwlock_unlocks-in-xprt_register.patch new file mode 100644 index 0000000000..50613ba312 --- /dev/null +++ b/meta/recipes-extended/libtirpc/libtirpc/0001-Add-missing-rwlock_unlocks-in-xprt_register.patch | |||
@@ -0,0 +1,62 @@ | |||
1 | Subject: [PATCH] Add missing rwlock_unlocks in xprt_register | ||
2 | |||
3 | It looks like in b2c9430f46c4ac848957fb8adaac176a3f6ac03f when svc_run | ||
4 | switched to poll, an early return was added, but the rwlock was not | ||
5 | unlocked. | ||
6 | |||
7 | I observed that rpcbind built against libtirpc-1.0.1 would handle only | ||
8 | one request before hanging, and tracked it down to a missing | ||
9 | rwlock_unlock here. | ||
10 | |||
11 | Fixes: b2c9430f46c4 ('Use poll() instead of select() in svc_run()') | ||
12 | |||
13 | Upstream-Status: Backport | ||
14 | |||
15 | Signed-off-by: Michael Forney <mforney@mforney.org> | ||
16 | Signed-off-by: Steve Dickson <steved@redhat.com> | ||
17 | Signed-off-by: Maxin B. John <maxin.john@intel.com> | ||
18 | --- | ||
19 | src/svc.c | 7 ++++--- | ||
20 | 1 file changed, 4 insertions(+), 3 deletions(-) | ||
21 | |||
22 | diff --git a/src/svc.c b/src/svc.c | ||
23 | index 9c41445..b59467b 100644 | ||
24 | --- a/src/svc.c | ||
25 | +++ b/src/svc.c | ||
26 | @@ -99,7 +99,7 @@ xprt_register (xprt) | ||
27 | { | ||
28 | __svc_xports = (SVCXPRT **) calloc (_rpc_dtablesize(), sizeof (SVCXPRT *)); | ||
29 | if (__svc_xports == NULL) | ||
30 | - return; | ||
31 | + goto unlock; | ||
32 | } | ||
33 | if (sock < _rpc_dtablesize()) | ||
34 | { | ||
35 | @@ -120,14 +120,14 @@ xprt_register (xprt) | ||
36 | svc_pollfd[i].fd = sock; | ||
37 | svc_pollfd[i].events = (POLLIN | POLLPRI | | ||
38 | POLLRDNORM | POLLRDBAND); | ||
39 | - return; | ||
40 | + goto unlock; | ||
41 | } | ||
42 | |||
43 | new_svc_pollfd = (struct pollfd *) realloc (svc_pollfd, | ||
44 | sizeof (struct pollfd) | ||
45 | * (svc_max_pollfd + 1)); | ||
46 | if (new_svc_pollfd == NULL) /* Out of memory */ | ||
47 | - return; | ||
48 | + goto unlock; | ||
49 | svc_pollfd = new_svc_pollfd; | ||
50 | ++svc_max_pollfd; | ||
51 | |||
52 | @@ -135,6 +135,7 @@ xprt_register (xprt) | ||
53 | svc_pollfd[svc_max_pollfd - 1].events = (POLLIN | POLLPRI | | ||
54 | POLLRDNORM | POLLRDBAND); | ||
55 | } | ||
56 | +unlock: | ||
57 | rwlock_unlock (&svc_fd_lock); | ||
58 | } | ||
59 | |||
60 | -- | ||
61 | 2.5.3 | ||
62 | |||
diff --git a/meta/recipes-extended/libtirpc/libtirpc/Use-netbsd-queue.h.patch b/meta/recipes-extended/libtirpc/libtirpc/Use-netbsd-queue.h.patch new file mode 100644 index 0000000000..21c6c53a85 --- /dev/null +++ b/meta/recipes-extended/libtirpc/libtirpc/Use-netbsd-queue.h.patch | |||
@@ -0,0 +1,1579 @@ | |||
1 | musl does not provide sys/queue.h implementation. Borrow queue.h from | ||
2 | the NetBSD project | ||
3 | http://cvsweb.netbsd.org/bsdweb.cgi/src/sys/sys/queue.h?rev=1.68 | ||
4 | |||
5 | Upstream-Status: Inappropriate [musl specific] | ||
6 | |||
7 | Signed-off-by: Jörg Krause <joerg.krause@embedded.rocks> | ||
8 | Signed-off-by: Maxin B. John <maxin.john@intel.com> | ||
9 | --- | ||
10 | diff -Naur libtirpc-1.0.1-orig/src/clnt_bcast.c libtirpc-1.0.1/src/clnt_bcast.c | ||
11 | --- libtirpc-1.0.1-orig/src/clnt_bcast.c 2015-10-30 17:15:14.000000000 +0200 | ||
12 | +++ libtirpc-1.0.1/src/clnt_bcast.c 2015-12-21 17:03:52.066008311 +0200 | ||
13 | @@ -40,7 +40,6 @@ | ||
14 | */ | ||
15 | #include <sys/socket.h> | ||
16 | #include <sys/types.h> | ||
17 | -#include <sys/queue.h> | ||
18 | |||
19 | #include <net/if.h> | ||
20 | #include <netinet/in.h> | ||
21 | @@ -62,6 +61,7 @@ | ||
22 | #include <err.h> | ||
23 | #include <string.h> | ||
24 | |||
25 | +#include "queue.h" | ||
26 | #include "rpc_com.h" | ||
27 | #include "debug.h" | ||
28 | |||
29 | diff -Naur libtirpc-1.0.1-orig/src/clnt_bcast.c.orig libtirpc-1.0.1/src/clnt_bcast.c.orig | ||
30 | --- libtirpc-1.0.1-orig/src/clnt_bcast.c.orig 1970-01-01 02:00:00.000000000 +0200 | ||
31 | +++ libtirpc-1.0.1/src/clnt_bcast.c.orig 2015-10-30 17:15:14.000000000 +0200 | ||
32 | @@ -0,0 +1,697 @@ | ||
33 | +/* | ||
34 | + * Copyright (c) 2009, Sun Microsystems, Inc. | ||
35 | + * All rights reserved. | ||
36 | + * | ||
37 | + * Redistribution and use in source and binary forms, with or without | ||
38 | + * modification, are permitted provided that the following conditions are met: | ||
39 | + * - Redistributions of source code must retain the above copyright notice, | ||
40 | + * this list of conditions and the following disclaimer. | ||
41 | + * - Redistributions in binary form must reproduce the above copyright notice, | ||
42 | + * this list of conditions and the following disclaimer in the documentation | ||
43 | + * and/or other materials provided with the distribution. | ||
44 | + * - Neither the name of Sun Microsystems, Inc. nor the names of its | ||
45 | + * contributors may be used to endorse or promote products derived | ||
46 | + * from this software without specific prior written permission. | ||
47 | + * | ||
48 | + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" | ||
49 | + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | ||
50 | + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | ||
51 | + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE | ||
52 | + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | ||
53 | + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | ||
54 | + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS | ||
55 | + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN | ||
56 | + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) | ||
57 | + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | ||
58 | + * POSSIBILITY OF SUCH DAMAGE. | ||
59 | + */ | ||
60 | +/* | ||
61 | + * Copyright (c) 1986-1991 by Sun Microsystems Inc. | ||
62 | + */ | ||
63 | + | ||
64 | +/* | ||
65 | + * clnt_bcast.c | ||
66 | + * Client interface to broadcast service. | ||
67 | + * | ||
68 | + * Copyright (C) 1988, Sun Microsystems, Inc. | ||
69 | + * | ||
70 | + * The following is kludged-up support for simple rpc broadcasts. | ||
71 | + * Someday a large, complicated system will replace these routines. | ||
72 | + */ | ||
73 | +#include <sys/socket.h> | ||
74 | +#include <sys/types.h> | ||
75 | +#include <sys/queue.h> | ||
76 | + | ||
77 | +#include <net/if.h> | ||
78 | +#include <netinet/in.h> | ||
79 | +#include <ifaddrs.h> | ||
80 | +#include <poll.h> | ||
81 | +#include <rpc/rpc.h> | ||
82 | +#ifdef PORTMAP | ||
83 | +#include <rpc/pmap_prot.h> | ||
84 | +#include <rpc/pmap_clnt.h> | ||
85 | +#include <rpc/pmap_rmt.h> | ||
86 | +#endif /* PORTMAP */ | ||
87 | +#include <rpc/nettype.h> | ||
88 | +#include <arpa/inet.h> | ||
89 | +#include <stdio.h> | ||
90 | +#include <errno.h> | ||
91 | +#include <stdlib.h> | ||
92 | +#include <unistd.h> | ||
93 | +#include <netdb.h> | ||
94 | +#include <err.h> | ||
95 | +#include <string.h> | ||
96 | + | ||
97 | +#include "rpc_com.h" | ||
98 | +#include "debug.h" | ||
99 | + | ||
100 | +#define MAXBCAST 20 /* Max no of broadcasting transports */ | ||
101 | +#define INITTIME 4000 /* Time to wait initially */ | ||
102 | +#define WAITTIME 8000 /* Maximum time to wait */ | ||
103 | + | ||
104 | +# define POLLRDNORM 0x040 /* Normal data may be read. */ | ||
105 | +# define POLLRDBAND 0x080 /* Priority data may be read. */ | ||
106 | + | ||
107 | + | ||
108 | + | ||
109 | +/* | ||
110 | + * If nettype is NULL, it broadcasts on all the available | ||
111 | + * datagram_n transports. May potentially lead to broadacst storms | ||
112 | + * and hence should be used with caution, care and courage. | ||
113 | + * | ||
114 | + * The current parameter xdr packet size is limited by the max tsdu | ||
115 | + * size of the transport. If the max tsdu size of any transport is | ||
116 | + * smaller than the parameter xdr packet, then broadcast is not | ||
117 | + * sent on that transport. | ||
118 | + * | ||
119 | + * Also, the packet size should be less the packet size of | ||
120 | + * the data link layer (for ethernet it is 1400 bytes). There is | ||
121 | + * no easy way to find out the max size of the data link layer and | ||
122 | + * we are assuming that the args would be smaller than that. | ||
123 | + * | ||
124 | + * The result size has to be smaller than the transport tsdu size. | ||
125 | + * | ||
126 | + * If PORTMAP has been defined, we send two packets for UDP, one for | ||
127 | + * rpcbind and one for portmap. For those machines which support | ||
128 | + * both rpcbind and portmap, it will cause them to reply twice, and | ||
129 | + * also here it will get two responses ... inefficient and clumsy. | ||
130 | + */ | ||
131 | + | ||
132 | +#define TAILQ_NEXT(elm, field) ((elm)->field.tqe_next) | ||
133 | + | ||
134 | +#define TAILQ_FIRST(head) ((head)->tqh_first) | ||
135 | + | ||
136 | + | ||
137 | +struct broadif { | ||
138 | + int index; | ||
139 | + struct sockaddr_storage broadaddr; | ||
140 | + TAILQ_ENTRY(broadif) link; | ||
141 | +}; | ||
142 | + | ||
143 | +typedef TAILQ_HEAD(, broadif) broadlist_t; | ||
144 | + | ||
145 | +int __rpc_getbroadifs(int, int, int, broadlist_t *); | ||
146 | +void __rpc_freebroadifs(broadlist_t *); | ||
147 | +int __rpc_broadenable(int, int, struct broadif *); | ||
148 | + | ||
149 | +int __rpc_lowvers = 0; | ||
150 | + | ||
151 | +int | ||
152 | +__rpc_getbroadifs(int af, int proto, int socktype, broadlist_t *list) | ||
153 | +{ | ||
154 | + int count = 0; | ||
155 | + struct broadif *bip; | ||
156 | + struct ifaddrs *ifap, *ifp; | ||
157 | +#ifdef INET6 | ||
158 | + struct sockaddr_in6 *sin6; | ||
159 | +#endif | ||
160 | + struct sockaddr_in *sin; | ||
161 | + struct addrinfo hints, *res; | ||
162 | + | ||
163 | + if (getifaddrs(&ifp) < 0) | ||
164 | + return 0; | ||
165 | + | ||
166 | + memset(&hints, 0, sizeof hints); | ||
167 | + | ||
168 | + hints.ai_family = af; | ||
169 | + hints.ai_protocol = proto; | ||
170 | + hints.ai_socktype = socktype; | ||
171 | + | ||
172 | + if (getaddrinfo(NULL, "sunrpc", &hints, &res) != 0) | ||
173 | + return 0; | ||
174 | + | ||
175 | + for (ifap = ifp; ifap != NULL; ifap = ifap->ifa_next) { | ||
176 | + if (ifap->ifa_addr == NULL || /* happens for eg tuntap devices */ | ||
177 | + ifap->ifa_addr->sa_family != af || | ||
178 | + !(ifap->ifa_flags & IFF_UP)) | ||
179 | + continue; | ||
180 | + bip = (struct broadif *)malloc(sizeof *bip); | ||
181 | + if (bip == NULL) | ||
182 | + break; | ||
183 | + bip->index = if_nametoindex(ifap->ifa_name); | ||
184 | + if ( | ||
185 | +#ifdef INET6 | ||
186 | + af != AF_INET6 && | ||
187 | +#endif | ||
188 | + (ifap->ifa_flags & IFF_BROADCAST) && | ||
189 | + ifap->ifa_broadaddr) { | ||
190 | + /* memcpy(&bip->broadaddr, ifap->ifa_broadaddr, | ||
191 | + (size_t)ifap->ifa_broadaddr->sa_len);*/ | ||
192 | + memcpy(&bip->broadaddr, ifap->ifa_broadaddr, | ||
193 | + sizeof(bip->broadaddr)); | ||
194 | + sin = (struct sockaddr_in *)(void *)&bip->broadaddr; | ||
195 | + sin->sin_port = | ||
196 | + ((struct sockaddr_in *) | ||
197 | + (void *)res->ai_addr)->sin_port; | ||
198 | + } else | ||
199 | +#ifdef INET6 | ||
200 | + if (af == AF_INET6 && (ifap->ifa_flags & IFF_MULTICAST)) { | ||
201 | + sin6 = (struct sockaddr_in6 *)(void *)&bip->broadaddr; | ||
202 | + inet_pton(af, RPCB_MULTICAST_ADDR, &sin6->sin6_addr); | ||
203 | + sin6->sin6_family = af; | ||
204 | + sin6->sin6_port = | ||
205 | + ((struct sockaddr_in6 *) | ||
206 | + (void *)res->ai_addr)->sin6_port; | ||
207 | + sin6->sin6_scope_id = bip->index; | ||
208 | + } else | ||
209 | +#endif | ||
210 | + { | ||
211 | + free(bip); | ||
212 | + continue; | ||
213 | + } | ||
214 | + TAILQ_INSERT_TAIL(list, bip, link); | ||
215 | + count++; | ||
216 | + } | ||
217 | + freeifaddrs(ifp); | ||
218 | + freeaddrinfo(res); | ||
219 | + | ||
220 | + return count; | ||
221 | +} | ||
222 | + | ||
223 | +void | ||
224 | +__rpc_freebroadifs(broadlist_t *list) | ||
225 | +{ | ||
226 | + struct broadif *bip, *next; | ||
227 | + | ||
228 | + bip = TAILQ_FIRST(list); | ||
229 | + | ||
230 | + while (bip != NULL) { | ||
231 | + next = TAILQ_NEXT(bip, link); | ||
232 | + free(bip); | ||
233 | + bip = next; | ||
234 | + } | ||
235 | +} | ||
236 | + | ||
237 | +int | ||
238 | +/*ARGSUSED*/ | ||
239 | +__rpc_broadenable(int af, int s, struct broadif *bip) | ||
240 | +{ | ||
241 | + int o = 1; | ||
242 | + | ||
243 | +#if 0 | ||
244 | + if (af == AF_INET6) { | ||
245 | + fprintf(stderr, "set v6 multicast if to %d\n", bip->index); | ||
246 | + if (setsockopt(s, IPPROTO_IPV6, IPV6_MULTICAST_IF, &bip->index, | ||
247 | + sizeof bip->index) < 0) | ||
248 | + return -1; | ||
249 | + } else | ||
250 | +#endif | ||
251 | + if (setsockopt(s, SOL_SOCKET, SO_BROADCAST, &o, sizeof o) < 0) | ||
252 | + return -1; | ||
253 | + | ||
254 | + return 0; | ||
255 | +} | ||
256 | + | ||
257 | +/* | ||
258 | + * Some rpcbind implementations use an IPv6 socket to serve both | ||
259 | + * IPv4 and IPv6 messages, but neglect to check for the caller's | ||
260 | + * address family when sending broadcast replies. These rpcbind | ||
261 | + * implementations return an IPv6 address in reply to an IPv4 | ||
262 | + * broadcast. We can either ignore them, or try to patch them up. | ||
263 | + */ | ||
264 | +static struct netbuf * | ||
265 | +__ipv6v4_fixup(struct sockaddr_storage *ss, const char *uaddr) | ||
266 | +{ | ||
267 | + struct sockaddr_in sin; | ||
268 | + struct netbuf *np; | ||
269 | + | ||
270 | + /* ss is the remote rpcbind server's address */ | ||
271 | + if (ss->ss_family != AF_INET) | ||
272 | + return NULL; | ||
273 | + memcpy(&sin, ss, sizeof(sin)); | ||
274 | + | ||
275 | + np = __rpc_uaddr2taddr_af(AF_INET6, uaddr); | ||
276 | + if (np == NULL) | ||
277 | + return NULL; | ||
278 | + | ||
279 | + /* Overwrite the port with that of the service we | ||
280 | + * wanted to talk to. */ | ||
281 | + sin.sin_port = ((struct sockaddr_in6 *) np)->sin6_port; | ||
282 | + | ||
283 | + /* We know netbuf holds a sockaddr_in6, so it can easily | ||
284 | + * hold a sockaddr_in as well. */ | ||
285 | + memcpy(np->buf, &sin, sizeof(sin)); | ||
286 | + np->len = sizeof(sin); | ||
287 | + | ||
288 | + return np; | ||
289 | +} | ||
290 | + | ||
291 | +enum clnt_stat | ||
292 | +rpc_broadcast_exp(prog, vers, proc, xargs, argsp, xresults, resultsp, | ||
293 | + eachresult, inittime, waittime, nettype) | ||
294 | + rpcprog_t prog; /* program number */ | ||
295 | + rpcvers_t vers; /* version number */ | ||
296 | + rpcproc_t proc; /* procedure number */ | ||
297 | + xdrproc_t xargs; /* xdr routine for args */ | ||
298 | + caddr_t argsp; /* pointer to args */ | ||
299 | + xdrproc_t xresults; /* xdr routine for results */ | ||
300 | + caddr_t resultsp; /* pointer to results */ | ||
301 | + resultproc_t eachresult; /* call with each result obtained */ | ||
302 | + int inittime; /* how long to wait initially */ | ||
303 | + int waittime; /* maximum time to wait */ | ||
304 | + const char *nettype; /* transport type */ | ||
305 | +{ | ||
306 | + enum clnt_stat stat = RPC_SUCCESS; /* Return status */ | ||
307 | + XDR xdr_stream; /* XDR stream */ | ||
308 | + XDR *xdrs = &xdr_stream; | ||
309 | + struct rpc_msg msg; /* RPC message */ | ||
310 | + struct timeval t; | ||
311 | + char *outbuf = NULL; /* Broadcast msg buffer */ | ||
312 | + char *inbuf = NULL; /* Reply buf */ | ||
313 | + int inlen; | ||
314 | + u_int maxbufsize = 0; | ||
315 | + AUTH *sys_auth = authunix_create_default(); | ||
316 | + int i; | ||
317 | + void *handle; | ||
318 | + char uaddress[1024]; /* A self imposed limit */ | ||
319 | + char *uaddrp = uaddress; | ||
320 | + int pmap_reply_flag; /* reply recvd from PORTMAP */ | ||
321 | + /* An array of all the suitable broadcast transports */ | ||
322 | + struct { | ||
323 | + int fd; /* File descriptor */ | ||
324 | + int af; | ||
325 | + int proto; | ||
326 | + struct netconfig *nconf; /* Netconfig structure */ | ||
327 | + u_int asize; /* Size of the addr buf */ | ||
328 | + u_int dsize; /* Size of the data buf */ | ||
329 | + struct sockaddr_storage raddr; /* Remote address */ | ||
330 | + broadlist_t nal; | ||
331 | + } fdlist[MAXBCAST]; | ||
332 | + struct pollfd pfd[MAXBCAST]; | ||
333 | + size_t fdlistno = 0; | ||
334 | + struct r_rpcb_rmtcallargs barg; /* Remote arguments */ | ||
335 | + struct r_rpcb_rmtcallres bres; /* Remote results */ | ||
336 | + size_t outlen; | ||
337 | + struct netconfig *nconf; | ||
338 | + int msec; | ||
339 | + int pollretval; | ||
340 | + int fds_found; | ||
341 | + | ||
342 | +#ifdef PORTMAP | ||
343 | + size_t outlen_pmap = 0; | ||
344 | + u_long port; /* Remote port number */ | ||
345 | + int pmap_flag = 0; /* UDP exists ? */ | ||
346 | + char *outbuf_pmap = NULL; | ||
347 | + struct rmtcallargs barg_pmap; /* Remote arguments */ | ||
348 | + struct rmtcallres bres_pmap; /* Remote results */ | ||
349 | + u_int udpbufsz = 0; | ||
350 | +#endif /* PORTMAP */ | ||
351 | + | ||
352 | + if (sys_auth == NULL) { | ||
353 | + return (RPC_SYSTEMERROR); | ||
354 | + } | ||
355 | + /* | ||
356 | + * initialization: create a fd, a broadcast address, and send the | ||
357 | + * request on the broadcast transport. | ||
358 | + * Listen on all of them and on replies, call the user supplied | ||
359 | + * function. | ||
360 | + */ | ||
361 | + | ||
362 | + if (nettype == NULL) | ||
363 | + nettype = "datagram_n"; | ||
364 | + if ((handle = __rpc_setconf(nettype)) == NULL) { | ||
365 | + return (RPC_UNKNOWNPROTO); | ||
366 | + } | ||
367 | + while ((nconf = __rpc_getconf(handle)) != NULL) { | ||
368 | + int fd; | ||
369 | + struct __rpc_sockinfo si; | ||
370 | + | ||
371 | + if (nconf->nc_semantics != NC_TPI_CLTS) | ||
372 | + continue; | ||
373 | + if (fdlistno >= MAXBCAST) | ||
374 | + break; /* No more slots available */ | ||
375 | + if (!__rpc_nconf2sockinfo(nconf, &si)) | ||
376 | + continue; | ||
377 | + | ||
378 | + TAILQ_INIT(&fdlist[fdlistno].nal); | ||
379 | + if (__rpc_getbroadifs(si.si_af, si.si_proto, si.si_socktype, | ||
380 | + &fdlist[fdlistno].nal) == 0) | ||
381 | + continue; | ||
382 | + | ||
383 | + fd = socket(si.si_af, si.si_socktype, si.si_proto); | ||
384 | + if (fd < 0) { | ||
385 | + stat = RPC_CANTSEND; | ||
386 | + continue; | ||
387 | + } | ||
388 | + fdlist[fdlistno].af = si.si_af; | ||
389 | + fdlist[fdlistno].proto = si.si_proto; | ||
390 | + fdlist[fdlistno].fd = fd; | ||
391 | + fdlist[fdlistno].nconf = nconf; | ||
392 | + fdlist[fdlistno].asize = __rpc_get_a_size(si.si_af); | ||
393 | + pfd[fdlistno].events = POLLIN | POLLPRI | | ||
394 | + POLLRDNORM | POLLRDBAND; | ||
395 | + pfd[fdlistno].fd = fdlist[fdlistno].fd = fd; | ||
396 | + fdlist[fdlistno].dsize = __rpc_get_t_size(si.si_af, si.si_proto, | ||
397 | + 0); | ||
398 | + | ||
399 | + if (maxbufsize <= fdlist[fdlistno].dsize) | ||
400 | + maxbufsize = fdlist[fdlistno].dsize; | ||
401 | + | ||
402 | +#ifdef PORTMAP | ||
403 | + if (si.si_af == AF_INET && si.si_proto == IPPROTO_UDP) { | ||
404 | + udpbufsz = fdlist[fdlistno].dsize; | ||
405 | + if ((outbuf_pmap = malloc(udpbufsz)) == NULL) { | ||
406 | + close(fd); | ||
407 | + stat = RPC_SYSTEMERROR; | ||
408 | + goto done_broad; | ||
409 | + } | ||
410 | + pmap_flag = 1; | ||
411 | + } | ||
412 | +#endif /* PORTMAP */ | ||
413 | + fdlistno++; | ||
414 | + } | ||
415 | + | ||
416 | + if (fdlistno == 0) { | ||
417 | + if (stat == RPC_SUCCESS) | ||
418 | + stat = RPC_UNKNOWNPROTO; | ||
419 | + goto done_broad; | ||
420 | + } | ||
421 | + if (maxbufsize == 0) { | ||
422 | + if (stat == RPC_SUCCESS) | ||
423 | + stat = RPC_CANTSEND; | ||
424 | + goto done_broad; | ||
425 | + } | ||
426 | + inbuf = malloc(maxbufsize); | ||
427 | + outbuf = malloc(maxbufsize); | ||
428 | + if ((inbuf == NULL) || (outbuf == NULL)) { | ||
429 | + stat = RPC_SYSTEMERROR; | ||
430 | + goto done_broad; | ||
431 | + } | ||
432 | + | ||
433 | + /* Serialize all the arguments which have to be sent */ | ||
434 | + (void) gettimeofday(&t, NULL); | ||
435 | + msg.rm_xid = __RPC_GETXID(&t); | ||
436 | + msg.rm_direction = CALL; | ||
437 | + msg.rm_call.cb_rpcvers = RPC_MSG_VERSION; | ||
438 | + msg.rm_call.cb_prog = RPCBPROG; | ||
439 | + msg.rm_call.cb_vers = RPCBVERS; | ||
440 | + msg.rm_call.cb_proc = RPCBPROC_CALLIT; | ||
441 | + barg.prog = prog; | ||
442 | + barg.vers = vers; | ||
443 | + barg.proc = proc; | ||
444 | + barg.args.args_val = argsp; | ||
445 | + barg.xdr_args = xargs; | ||
446 | + bres.addr = uaddrp; | ||
447 | + bres.results.results_val = resultsp; | ||
448 | + bres.xdr_res = xresults; | ||
449 | + msg.rm_call.cb_cred = sys_auth->ah_cred; | ||
450 | + msg.rm_call.cb_verf = sys_auth->ah_verf; | ||
451 | + xdrmem_create(xdrs, outbuf, maxbufsize, XDR_ENCODE); | ||
452 | + if ((!xdr_callmsg(xdrs, &msg)) || | ||
453 | + (!xdr_rpcb_rmtcallargs(xdrs, | ||
454 | + (struct rpcb_rmtcallargs *)(void *)&barg))) { | ||
455 | + stat = RPC_CANTENCODEARGS; | ||
456 | + goto done_broad; | ||
457 | + } | ||
458 | + outlen = xdr_getpos(xdrs); | ||
459 | + xdr_destroy(xdrs); | ||
460 | + | ||
461 | +#ifdef PORTMAP | ||
462 | + /* Prepare the packet for version 2 PORTMAP */ | ||
463 | + if (pmap_flag) { | ||
464 | + msg.rm_xid++; /* One way to distinguish */ | ||
465 | + msg.rm_call.cb_prog = PMAPPROG; | ||
466 | + msg.rm_call.cb_vers = PMAPVERS; | ||
467 | + msg.rm_call.cb_proc = PMAPPROC_CALLIT; | ||
468 | + barg_pmap.prog = prog; | ||
469 | + barg_pmap.vers = vers; | ||
470 | + barg_pmap.proc = proc; | ||
471 | + barg_pmap.args_ptr = argsp; | ||
472 | + barg_pmap.xdr_args = xargs; | ||
473 | + bres_pmap.port_ptr = &port; | ||
474 | + bres_pmap.xdr_results = xresults; | ||
475 | + bres_pmap.results_ptr = resultsp; | ||
476 | + xdrmem_create(xdrs, outbuf_pmap, udpbufsz, XDR_ENCODE); | ||
477 | + if ((! xdr_callmsg(xdrs, &msg)) || | ||
478 | + (! xdr_rmtcall_args(xdrs, &barg_pmap))) { | ||
479 | + stat = RPC_CANTENCODEARGS; | ||
480 | + goto done_broad; | ||
481 | + } | ||
482 | + outlen_pmap = xdr_getpos(xdrs); | ||
483 | + xdr_destroy(xdrs); | ||
484 | + } | ||
485 | +#endif /* PORTMAP */ | ||
486 | + | ||
487 | + /* | ||
488 | + * Basic loop: broadcast the packets to transports which | ||
489 | + * support data packets of size such that one can encode | ||
490 | + * all the arguments. | ||
491 | + * Wait a while for response(s). | ||
492 | + * The response timeout grows larger per iteration. | ||
493 | + */ | ||
494 | + for (msec = inittime; msec <= waittime; msec += msec) { | ||
495 | + struct broadif *bip; | ||
496 | + | ||
497 | + /* Broadcast all the packets now */ | ||
498 | + for (i = 0; i < fdlistno; i++) { | ||
499 | + if (fdlist[i].dsize < outlen) { | ||
500 | + stat = RPC_CANTSEND; | ||
501 | + continue; | ||
502 | + } | ||
503 | + for (bip = TAILQ_FIRST(&fdlist[i].nal); bip != NULL; | ||
504 | + bip = TAILQ_NEXT(bip, link)) { | ||
505 | + void *addr; | ||
506 | + | ||
507 | + addr = &bip->broadaddr; | ||
508 | + | ||
509 | + __rpc_broadenable(fdlist[i].af, fdlist[i].fd, | ||
510 | + bip); | ||
511 | + | ||
512 | + /* | ||
513 | + * Only use version 3 if lowvers is not set | ||
514 | + */ | ||
515 | + | ||
516 | + if (!__rpc_lowvers) | ||
517 | + if (sendto(fdlist[i].fd, outbuf, | ||
518 | + outlen, 0, (struct sockaddr*)addr, | ||
519 | + (size_t)fdlist[i].asize) != | ||
520 | + outlen) { | ||
521 | + LIBTIRPC_DEBUG(1, | ||
522 | + ("rpc_broadcast_exp: sendto failed: errno %d", errno)); | ||
523 | + warnx("rpc_broadcast_exp: cannot send broadcast packet"); | ||
524 | + stat = RPC_CANTSEND; | ||
525 | + continue; | ||
526 | + }; | ||
527 | + if (!__rpc_lowvers) | ||
528 | + LIBTIRPC_DEBUG(3, ("rpc_broadcast_exp: Broadcast packet sent for %s\n", | ||
529 | + fdlist[i].nconf->nc_netid)); | ||
530 | +#ifdef PORTMAP | ||
531 | + /* | ||
532 | + * Send the version 2 packet also | ||
533 | + * for UDP/IP | ||
534 | + */ | ||
535 | + if (pmap_flag && | ||
536 | + fdlist[i].proto == IPPROTO_UDP) { | ||
537 | + if (sendto(fdlist[i].fd, outbuf_pmap, | ||
538 | + outlen_pmap, 0, addr, | ||
539 | + (size_t)fdlist[i].asize) != | ||
540 | + outlen_pmap) { | ||
541 | + warnx("clnt_bcast: " | ||
542 | + "Cannot send broadcast packet"); | ||
543 | + stat = RPC_CANTSEND; | ||
544 | + continue; | ||
545 | + } | ||
546 | + } | ||
547 | + LIBTIRPC_DEBUG(3, ("rpc_broadcast_exp: PMAP Broadcast packet sent for %s\n", | ||
548 | + fdlist[i].nconf->nc_netid)); | ||
549 | +#endif /* PORTMAP */ | ||
550 | + } | ||
551 | + /* End for sending all packets on this transport */ | ||
552 | + } /* End for sending on all transports */ | ||
553 | + | ||
554 | + if (eachresult == NULL) { | ||
555 | + stat = RPC_SUCCESS; | ||
556 | + goto done_broad; | ||
557 | + } | ||
558 | + | ||
559 | + /* | ||
560 | + * Get all the replies from these broadcast requests | ||
561 | + */ | ||
562 | + recv_again: | ||
563 | + | ||
564 | + switch (pollretval = poll(pfd, fdlistno, msec)) { | ||
565 | + case 0: /* timed out */ | ||
566 | + stat = RPC_TIMEDOUT; | ||
567 | + continue; | ||
568 | + case -1: /* some kind of error - we ignore it */ | ||
569 | + goto recv_again; | ||
570 | + } /* end of poll results switch */ | ||
571 | + | ||
572 | + for (i = fds_found = 0; | ||
573 | + i < fdlistno && fds_found < pollretval; i++) { | ||
574 | + bool_t done = FALSE; | ||
575 | + | ||
576 | + if (pfd[i].revents == 0) | ||
577 | + continue; | ||
578 | + else if (pfd[i].revents & POLLNVAL) { | ||
579 | + /* | ||
580 | + * Something bad has happened to this descri- | ||
581 | + * ptor. We can cause _poll() to ignore | ||
582 | + * it simply by using a negative fd. We do that | ||
583 | + * rather than compacting the pfd[] and fdlist[] | ||
584 | + * arrays. | ||
585 | + */ | ||
586 | + pfd[i].fd = -1; | ||
587 | + fds_found++; | ||
588 | + continue; | ||
589 | + } else | ||
590 | + fds_found++; | ||
591 | + LIBTIRPC_DEBUG(3, ("rpc_broadcast_exp: response for %s\n", | ||
592 | + fdlist[i].nconf->nc_netid)); | ||
593 | + try_again: | ||
594 | + inlen = recvfrom(fdlist[i].fd, inbuf, fdlist[i].dsize, | ||
595 | + 0, (struct sockaddr *)(void *)&fdlist[i].raddr, | ||
596 | + &fdlist[i].asize); | ||
597 | + if (inlen < 0) { | ||
598 | + if (errno == EINTR) | ||
599 | + goto try_again; | ||
600 | + warnx("clnt_bcast: Cannot receive reply to " | ||
601 | + "broadcast"); | ||
602 | + stat = RPC_CANTRECV; | ||
603 | + continue; | ||
604 | + } | ||
605 | + if (inlen < sizeof (u_int32_t)) | ||
606 | + continue; /* Drop that and go ahead */ | ||
607 | + /* | ||
608 | + * see if reply transaction id matches sent id. | ||
609 | + * If so, decode the results. If return id is xid + 1 | ||
610 | + * it was a PORTMAP reply | ||
611 | + */ | ||
612 | + if (*((u_int32_t *)(void *)(inbuf)) == | ||
613 | + *((u_int32_t *)(void *)(outbuf))) { | ||
614 | + pmap_reply_flag = 0; | ||
615 | + msg.acpted_rply.ar_verf = _null_auth; | ||
616 | + msg.acpted_rply.ar_results.where = | ||
617 | + (caddr_t)(void *)&bres; | ||
618 | + msg.acpted_rply.ar_results.proc = | ||
619 | + (xdrproc_t)xdr_rpcb_rmtcallres; | ||
620 | +#ifdef PORTMAP | ||
621 | + } else if (pmap_flag && | ||
622 | + *((u_int32_t *)(void *)(inbuf)) == | ||
623 | + *((u_int32_t *)(void *)(outbuf_pmap))) { | ||
624 | + pmap_reply_flag = 1; | ||
625 | + msg.acpted_rply.ar_verf = _null_auth; | ||
626 | + msg.acpted_rply.ar_results.where = | ||
627 | + (caddr_t)(void *)&bres_pmap; | ||
628 | + msg.acpted_rply.ar_results.proc = | ||
629 | + (xdrproc_t)xdr_rmtcallres; | ||
630 | +#endif /* PORTMAP */ | ||
631 | + } else | ||
632 | + continue; | ||
633 | + xdrmem_create(xdrs, inbuf, (u_int)inlen, XDR_DECODE); | ||
634 | + if (xdr_replymsg(xdrs, &msg)) { | ||
635 | + if ((msg.rm_reply.rp_stat == MSG_ACCEPTED) && | ||
636 | + (msg.acpted_rply.ar_stat == SUCCESS)) { | ||
637 | + struct netbuf *np; | ||
638 | +#ifdef PORTMAP | ||
639 | + struct netbuf taddr; | ||
640 | + struct sockaddr_in sin; | ||
641 | + | ||
642 | + if (pmap_flag && pmap_reply_flag) { | ||
643 | + memcpy(&sin, &fdlist[i].raddr, sizeof(sin)); | ||
644 | + sin.sin_port = htons((u_short)port); | ||
645 | + memcpy(&fdlist[i].raddr, &sin, sizeof(sin)); | ||
646 | + taddr.len = taddr.maxlen = | ||
647 | + sizeof(fdlist[i].raddr); | ||
648 | + taddr.buf = &fdlist[i].raddr; | ||
649 | + done = (*eachresult)(resultsp, | ||
650 | + &taddr, fdlist[i].nconf); | ||
651 | + } else { | ||
652 | +#endif /* PORTMAP */ | ||
653 | + LIBTIRPC_DEBUG(3, ("rpc_broadcast_exp: uaddr %s\n", uaddrp)); | ||
654 | + np = uaddr2taddr( | ||
655 | + fdlist[i].nconf, uaddrp); | ||
656 | + /* Some misguided rpcbind implemenations | ||
657 | + * seem to return an IPv6 uaddr in IPv4 | ||
658 | + * responses. */ | ||
659 | + if (np == NULL) | ||
660 | + np = __ipv6v4_fixup( | ||
661 | + &fdlist[i].raddr, | ||
662 | + uaddrp); | ||
663 | + if (np != NULL) { | ||
664 | + done = (*eachresult)(resultsp, | ||
665 | + np, fdlist[i].nconf); | ||
666 | + free(np); | ||
667 | + } | ||
668 | +#ifdef PORTMAP | ||
669 | + } | ||
670 | +#endif /* PORTMAP */ | ||
671 | + } | ||
672 | + /* otherwise, we just ignore the errors ... */ | ||
673 | + } | ||
674 | + /* else some kind of deserialization problem ... */ | ||
675 | + | ||
676 | + xdrs->x_op = XDR_FREE; | ||
677 | + msg.acpted_rply.ar_results.proc = (xdrproc_t) xdr_void; | ||
678 | + (void) xdr_replymsg(xdrs, &msg); | ||
679 | + (void) (*xresults)(xdrs, resultsp); | ||
680 | + XDR_DESTROY(xdrs); | ||
681 | + if (done) { | ||
682 | + stat = RPC_SUCCESS; | ||
683 | + goto done_broad; | ||
684 | + } else { | ||
685 | + goto recv_again; | ||
686 | + } | ||
687 | + } /* The recv for loop */ | ||
688 | + } /* The giant for loop */ | ||
689 | + | ||
690 | +done_broad: | ||
691 | + if (inbuf) | ||
692 | + (void) free(inbuf); | ||
693 | + if (outbuf) | ||
694 | + (void) free(outbuf); | ||
695 | +#ifdef PORTMAP | ||
696 | + if (outbuf_pmap) | ||
697 | + (void) free(outbuf_pmap); | ||
698 | +#endif /* PORTMAP */ | ||
699 | + for (i = 0; i < fdlistno; i++) { | ||
700 | + (void)close(fdlist[i].fd); | ||
701 | + __rpc_freebroadifs(&fdlist[i].nal); | ||
702 | + } | ||
703 | + AUTH_DESTROY(sys_auth); | ||
704 | + (void) __rpc_endconf(handle); | ||
705 | + | ||
706 | + return (stat); | ||
707 | +} | ||
708 | + | ||
709 | + | ||
710 | +enum clnt_stat | ||
711 | +rpc_broadcast(prog, vers, proc, xargs, argsp, xresults, resultsp, | ||
712 | + eachresult, nettype) | ||
713 | + rpcprog_t prog; /* program number */ | ||
714 | + rpcvers_t vers; /* version number */ | ||
715 | + rpcproc_t proc; /* procedure number */ | ||
716 | + xdrproc_t xargs; /* xdr routine for args */ | ||
717 | + caddr_t argsp; /* pointer to args */ | ||
718 | + xdrproc_t xresults; /* xdr routine for results */ | ||
719 | + caddr_t resultsp; /* pointer to results */ | ||
720 | + resultproc_t eachresult; /* call with each result obtained */ | ||
721 | + const char *nettype; /* transport type */ | ||
722 | +{ | ||
723 | + enum clnt_stat dummy; | ||
724 | + | ||
725 | + dummy = rpc_broadcast_exp(prog, vers, proc, xargs, argsp, | ||
726 | + xresults, resultsp, eachresult, | ||
727 | + INITTIME, WAITTIME, nettype); | ||
728 | + return (dummy); | ||
729 | +} | ||
730 | diff -Naur libtirpc-1.0.1-orig/tirpc/queue.h libtirpc-1.0.1/tirpc/queue.h | ||
731 | --- libtirpc-1.0.1-orig/tirpc/queue.h 1970-01-01 02:00:00.000000000 +0200 | ||
732 | +++ libtirpc-1.0.1/tirpc/queue.h 2015-12-21 17:02:44.427853905 +0200 | ||
733 | @@ -0,0 +1,846 @@ | ||
734 | +/* $NetBSD: queue.h,v 1.68 2014/11/19 08:10:01 uebayasi Exp $ */ | ||
735 | + | ||
736 | +/* | ||
737 | + * Copyright (c) 1991, 1993 | ||
738 | + * The Regents of the University of California. All rights reserved. | ||
739 | + * | ||
740 | + * Redistribution and use in source and binary forms, with or without | ||
741 | + * modification, are permitted provided that the following conditions | ||
742 | + * are met: | ||
743 | + * 1. Redistributions of source code must retain the above copyright | ||
744 | + * notice, this list of conditions and the following disclaimer. | ||
745 | + * 2. Redistributions in binary form must reproduce the above copyright | ||
746 | + * notice, this list of conditions and the following disclaimer in the | ||
747 | + * documentation and/or other materials provided with the distribution. | ||
748 | + * 3. Neither the name of the University nor the names of its contributors | ||
749 | + * may be used to endorse or promote products derived from this software | ||
750 | + * without specific prior written permission. | ||
751 | + * | ||
752 | + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND | ||
753 | + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | ||
754 | + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | ||
755 | + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE | ||
756 | + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL | ||
757 | + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS | ||
758 | + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) | ||
759 | + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT | ||
760 | + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY | ||
761 | + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF | ||
762 | + * SUCH DAMAGE. | ||
763 | + * | ||
764 | + * @(#)queue.h 8.5 (Berkeley) 8/20/94 | ||
765 | + */ | ||
766 | + | ||
767 | +#ifndef _SYS_QUEUE_H_ | ||
768 | +#define _SYS_QUEUE_H_ | ||
769 | + | ||
770 | +/* | ||
771 | + * This file defines five types of data structures: singly-linked lists, | ||
772 | + * lists, simple queues, tail queues, and circular queues. | ||
773 | + * | ||
774 | + * A singly-linked list is headed by a single forward pointer. The | ||
775 | + * elements are singly linked for minimum space and pointer manipulation | ||
776 | + * overhead at the expense of O(n) removal for arbitrary elements. New | ||
777 | + * elements can be added to the list after an existing element or at the | ||
778 | + * head of the list. Elements being removed from the head of the list | ||
779 | + * should use the explicit macro for this purpose for optimum | ||
780 | + * efficiency. A singly-linked list may only be traversed in the forward | ||
781 | + * direction. Singly-linked lists are ideal for applications with large | ||
782 | + * datasets and few or no removals or for implementing a LIFO queue. | ||
783 | + * | ||
784 | + * A list is headed by a single forward pointer (or an array of forward | ||
785 | + * pointers for a hash table header). The elements are doubly linked | ||
786 | + * so that an arbitrary element can be removed without a need to | ||
787 | + * traverse the list. New elements can be added to the list before | ||
788 | + * or after an existing element or at the head of the list. A list | ||
789 | + * may only be traversed in the forward direction. | ||
790 | + * | ||
791 | + * A simple queue is headed by a pair of pointers, one the head of the | ||
792 | + * list and the other to the tail of the list. The elements are singly | ||
793 | + * linked to save space, so elements can only be removed from the | ||
794 | + * head of the list. New elements can be added to the list after | ||
795 | + * an existing element, at the head of the list, or at the end of the | ||
796 | + * list. A simple queue may only be traversed in the forward direction. | ||
797 | + * | ||
798 | + * A tail queue is headed by a pair of pointers, one to the head of the | ||
799 | + * list and the other to the tail of the list. The elements are doubly | ||
800 | + * linked so that an arbitrary element can be removed without a need to | ||
801 | + * traverse the list. New elements can be added to the list before or | ||
802 | + * after an existing element, at the head of the list, or at the end of | ||
803 | + * the list. A tail queue may be traversed in either direction. | ||
804 | + * | ||
805 | + * A circle queue is headed by a pair of pointers, one to the head of the | ||
806 | + * list and the other to the tail of the list. The elements are doubly | ||
807 | + * linked so that an arbitrary element can be removed without a need to | ||
808 | + * traverse the list. New elements can be added to the list before or after | ||
809 | + * an existing element, at the head of the list, or at the end of the list. | ||
810 | + * A circle queue may be traversed in either direction, but has a more | ||
811 | + * complex end of list detection. | ||
812 | + * | ||
813 | + * For details on the use of these macros, see the queue(3) manual page. | ||
814 | + */ | ||
815 | + | ||
816 | +/* | ||
817 | + * Include the definition of NULL only on NetBSD because sys/null.h | ||
818 | + * is not available elsewhere. This conditional makes the header | ||
819 | + * portable and it can simply be dropped verbatim into any system. | ||
820 | + * The caveat is that on other systems some other header | ||
821 | + * must provide NULL before the macros can be used. | ||
822 | + */ | ||
823 | +#ifdef __NetBSD__ | ||
824 | +#include <sys/null.h> | ||
825 | +#endif | ||
826 | + | ||
827 | +#if defined(QUEUEDEBUG) | ||
828 | +# if defined(_KERNEL) | ||
829 | +# define QUEUEDEBUG_ABORT(...) panic(__VA_ARGS__) | ||
830 | +# else | ||
831 | +# include <err.h> | ||
832 | +# define QUEUEDEBUG_ABORT(...) err(1, __VA_ARGS__) | ||
833 | +# endif | ||
834 | +#endif | ||
835 | + | ||
836 | +/* | ||
837 | + * Singly-linked List definitions. | ||
838 | + */ | ||
839 | +#define SLIST_HEAD(name, type) \ | ||
840 | +struct name { \ | ||
841 | + struct type *slh_first; /* first element */ \ | ||
842 | +} | ||
843 | + | ||
844 | +#define SLIST_HEAD_INITIALIZER(head) \ | ||
845 | + { NULL } | ||
846 | + | ||
847 | +#define SLIST_ENTRY(type) \ | ||
848 | +struct { \ | ||
849 | + struct type *sle_next; /* next element */ \ | ||
850 | +} | ||
851 | + | ||
852 | +/* | ||
853 | + * Singly-linked List access methods. | ||
854 | + */ | ||
855 | +#define SLIST_FIRST(head) ((head)->slh_first) | ||
856 | +#define SLIST_END(head) NULL | ||
857 | +#define SLIST_EMPTY(head) ((head)->slh_first == NULL) | ||
858 | +#define SLIST_NEXT(elm, field) ((elm)->field.sle_next) | ||
859 | + | ||
860 | +#define SLIST_FOREACH(var, head, field) \ | ||
861 | + for((var) = (head)->slh_first; \ | ||
862 | + (var) != SLIST_END(head); \ | ||
863 | + (var) = (var)->field.sle_next) | ||
864 | + | ||
865 | +#define SLIST_FOREACH_SAFE(var, head, field, tvar) \ | ||
866 | + for ((var) = SLIST_FIRST((head)); \ | ||
867 | + (var) != SLIST_END(head) && \ | ||
868 | + ((tvar) = SLIST_NEXT((var), field), 1); \ | ||
869 | + (var) = (tvar)) | ||
870 | + | ||
871 | +/* | ||
872 | + * Singly-linked List functions. | ||
873 | + */ | ||
874 | +#define SLIST_INIT(head) do { \ | ||
875 | + (head)->slh_first = SLIST_END(head); \ | ||
876 | +} while (/*CONSTCOND*/0) | ||
877 | + | ||
878 | +#define SLIST_INSERT_AFTER(slistelm, elm, field) do { \ | ||
879 | + (elm)->field.sle_next = (slistelm)->field.sle_next; \ | ||
880 | + (slistelm)->field.sle_next = (elm); \ | ||
881 | +} while (/*CONSTCOND*/0) | ||
882 | + | ||
883 | +#define SLIST_INSERT_HEAD(head, elm, field) do { \ | ||
884 | + (elm)->field.sle_next = (head)->slh_first; \ | ||
885 | + (head)->slh_first = (elm); \ | ||
886 | +} while (/*CONSTCOND*/0) | ||
887 | + | ||
888 | +#define SLIST_REMOVE_AFTER(slistelm, field) do { \ | ||
889 | + (slistelm)->field.sle_next = \ | ||
890 | + SLIST_NEXT(SLIST_NEXT((slistelm), field), field); \ | ||
891 | +} while (/*CONSTCOND*/0) | ||
892 | + | ||
893 | +#define SLIST_REMOVE_HEAD(head, field) do { \ | ||
894 | + (head)->slh_first = (head)->slh_first->field.sle_next; \ | ||
895 | +} while (/*CONSTCOND*/0) | ||
896 | + | ||
897 | +#define SLIST_REMOVE(head, elm, type, field) do { \ | ||
898 | + if ((head)->slh_first == (elm)) { \ | ||
899 | + SLIST_REMOVE_HEAD((head), field); \ | ||
900 | + } \ | ||
901 | + else { \ | ||
902 | + struct type *curelm = (head)->slh_first; \ | ||
903 | + while(curelm->field.sle_next != (elm)) \ | ||
904 | + curelm = curelm->field.sle_next; \ | ||
905 | + curelm->field.sle_next = \ | ||
906 | + curelm->field.sle_next->field.sle_next; \ | ||
907 | + } \ | ||
908 | +} while (/*CONSTCOND*/0) | ||
909 | + | ||
910 | + | ||
911 | +/* | ||
912 | + * List definitions. | ||
913 | + */ | ||
914 | +#define LIST_HEAD(name, type) \ | ||
915 | +struct name { \ | ||
916 | + struct type *lh_first; /* first element */ \ | ||
917 | +} | ||
918 | + | ||
919 | +#define LIST_HEAD_INITIALIZER(head) \ | ||
920 | + { NULL } | ||
921 | + | ||
922 | +#define LIST_ENTRY(type) \ | ||
923 | +struct { \ | ||
924 | + struct type *le_next; /* next element */ \ | ||
925 | + struct type **le_prev; /* address of previous next element */ \ | ||
926 | +} | ||
927 | + | ||
928 | +/* | ||
929 | + * List access methods. | ||
930 | + */ | ||
931 | +#define LIST_FIRST(head) ((head)->lh_first) | ||
932 | +#define LIST_END(head) NULL | ||
933 | +#define LIST_EMPTY(head) ((head)->lh_first == LIST_END(head)) | ||
934 | +#define LIST_NEXT(elm, field) ((elm)->field.le_next) | ||
935 | + | ||
936 | +#define LIST_FOREACH(var, head, field) \ | ||
937 | + for ((var) = ((head)->lh_first); \ | ||
938 | + (var) != LIST_END(head); \ | ||
939 | + (var) = ((var)->field.le_next)) | ||
940 | + | ||
941 | +#define LIST_FOREACH_SAFE(var, head, field, tvar) \ | ||
942 | + for ((var) = LIST_FIRST((head)); \ | ||
943 | + (var) != LIST_END(head) && \ | ||
944 | + ((tvar) = LIST_NEXT((var), field), 1); \ | ||
945 | + (var) = (tvar)) | ||
946 | + | ||
947 | +#define LIST_MOVE(head1, head2) do { \ | ||
948 | + LIST_INIT((head2)); \ | ||
949 | + if (!LIST_EMPTY((head1))) { \ | ||
950 | + (head2)->lh_first = (head1)->lh_first; \ | ||
951 | + LIST_INIT((head1)); \ | ||
952 | + } \ | ||
953 | +} while (/*CONSTCOND*/0) | ||
954 | + | ||
955 | +/* | ||
956 | + * List functions. | ||
957 | + */ | ||
958 | +#if defined(QUEUEDEBUG) | ||
959 | +#define QUEUEDEBUG_LIST_INSERT_HEAD(head, elm, field) \ | ||
960 | + if ((head)->lh_first && \ | ||
961 | + (head)->lh_first->field.le_prev != &(head)->lh_first) \ | ||
962 | + QUEUEDEBUG_ABORT("LIST_INSERT_HEAD %p %s:%d", (head), \ | ||
963 | + __FILE__, __LINE__); | ||
964 | +#define QUEUEDEBUG_LIST_OP(elm, field) \ | ||
965 | + if ((elm)->field.le_next && \ | ||
966 | + (elm)->field.le_next->field.le_prev != \ | ||
967 | + &(elm)->field.le_next) \ | ||
968 | + QUEUEDEBUG_ABORT("LIST_* forw %p %s:%d", (elm), \ | ||
969 | + __FILE__, __LINE__); \ | ||
970 | + if (*(elm)->field.le_prev != (elm)) \ | ||
971 | + QUEUEDEBUG_ABORT("LIST_* back %p %s:%d", (elm), \ | ||
972 | + __FILE__, __LINE__); | ||
973 | +#define QUEUEDEBUG_LIST_POSTREMOVE(elm, field) \ | ||
974 | + (elm)->field.le_next = (void *)1L; \ | ||
975 | + (elm)->field.le_prev = (void *)1L; | ||
976 | +#else | ||
977 | +#define QUEUEDEBUG_LIST_INSERT_HEAD(head, elm, field) | ||
978 | +#define QUEUEDEBUG_LIST_OP(elm, field) | ||
979 | +#define QUEUEDEBUG_LIST_POSTREMOVE(elm, field) | ||
980 | +#endif | ||
981 | + | ||
982 | +#define LIST_INIT(head) do { \ | ||
983 | + (head)->lh_first = LIST_END(head); \ | ||
984 | +} while (/*CONSTCOND*/0) | ||
985 | + | ||
986 | +#define LIST_INSERT_AFTER(listelm, elm, field) do { \ | ||
987 | + QUEUEDEBUG_LIST_OP((listelm), field) \ | ||
988 | + if (((elm)->field.le_next = (listelm)->field.le_next) != \ | ||
989 | + LIST_END(head)) \ | ||
990 | + (listelm)->field.le_next->field.le_prev = \ | ||
991 | + &(elm)->field.le_next; \ | ||
992 | + (listelm)->field.le_next = (elm); \ | ||
993 | + (elm)->field.le_prev = &(listelm)->field.le_next; \ | ||
994 | +} while (/*CONSTCOND*/0) | ||
995 | + | ||
996 | +#define LIST_INSERT_BEFORE(listelm, elm, field) do { \ | ||
997 | + QUEUEDEBUG_LIST_OP((listelm), field) \ | ||
998 | + (elm)->field.le_prev = (listelm)->field.le_prev; \ | ||
999 | + (elm)->field.le_next = (listelm); \ | ||
1000 | + *(listelm)->field.le_prev = (elm); \ | ||
1001 | + (listelm)->field.le_prev = &(elm)->field.le_next; \ | ||
1002 | +} while (/*CONSTCOND*/0) | ||
1003 | + | ||
1004 | +#define LIST_INSERT_HEAD(head, elm, field) do { \ | ||
1005 | + QUEUEDEBUG_LIST_INSERT_HEAD((head), (elm), field) \ | ||
1006 | + if (((elm)->field.le_next = (head)->lh_first) != LIST_END(head))\ | ||
1007 | + (head)->lh_first->field.le_prev = &(elm)->field.le_next;\ | ||
1008 | + (head)->lh_first = (elm); \ | ||
1009 | + (elm)->field.le_prev = &(head)->lh_first; \ | ||
1010 | +} while (/*CONSTCOND*/0) | ||
1011 | + | ||
1012 | +#define LIST_REMOVE(elm, field) do { \ | ||
1013 | + QUEUEDEBUG_LIST_OP((elm), field) \ | ||
1014 | + if ((elm)->field.le_next != NULL) \ | ||
1015 | + (elm)->field.le_next->field.le_prev = \ | ||
1016 | + (elm)->field.le_prev; \ | ||
1017 | + *(elm)->field.le_prev = (elm)->field.le_next; \ | ||
1018 | + QUEUEDEBUG_LIST_POSTREMOVE((elm), field) \ | ||
1019 | +} while (/*CONSTCOND*/0) | ||
1020 | + | ||
1021 | +#define LIST_REPLACE(elm, elm2, field) do { \ | ||
1022 | + if (((elm2)->field.le_next = (elm)->field.le_next) != NULL) \ | ||
1023 | + (elm2)->field.le_next->field.le_prev = \ | ||
1024 | + &(elm2)->field.le_next; \ | ||
1025 | + (elm2)->field.le_prev = (elm)->field.le_prev; \ | ||
1026 | + *(elm2)->field.le_prev = (elm2); \ | ||
1027 | + QUEUEDEBUG_LIST_POSTREMOVE((elm), field) \ | ||
1028 | +} while (/*CONSTCOND*/0) | ||
1029 | + | ||
1030 | +/* | ||
1031 | + * Simple queue definitions. | ||
1032 | + */ | ||
1033 | +#define SIMPLEQ_HEAD(name, type) \ | ||
1034 | +struct name { \ | ||
1035 | + struct type *sqh_first; /* first element */ \ | ||
1036 | + struct type **sqh_last; /* addr of last next element */ \ | ||
1037 | +} | ||
1038 | + | ||
1039 | +#define SIMPLEQ_HEAD_INITIALIZER(head) \ | ||
1040 | + { NULL, &(head).sqh_first } | ||
1041 | + | ||
1042 | +#define SIMPLEQ_ENTRY(type) \ | ||
1043 | +struct { \ | ||
1044 | + struct type *sqe_next; /* next element */ \ | ||
1045 | +} | ||
1046 | + | ||
1047 | +/* | ||
1048 | + * Simple queue access methods. | ||
1049 | + */ | ||
1050 | +#define SIMPLEQ_FIRST(head) ((head)->sqh_first) | ||
1051 | +#define SIMPLEQ_END(head) NULL | ||
1052 | +#define SIMPLEQ_EMPTY(head) ((head)->sqh_first == SIMPLEQ_END(head)) | ||
1053 | +#define SIMPLEQ_NEXT(elm, field) ((elm)->field.sqe_next) | ||
1054 | + | ||
1055 | +#define SIMPLEQ_FOREACH(var, head, field) \ | ||
1056 | + for ((var) = ((head)->sqh_first); \ | ||
1057 | + (var) != SIMPLEQ_END(head); \ | ||
1058 | + (var) = ((var)->field.sqe_next)) | ||
1059 | + | ||
1060 | +#define SIMPLEQ_FOREACH_SAFE(var, head, field, next) \ | ||
1061 | + for ((var) = ((head)->sqh_first); \ | ||
1062 | + (var) != SIMPLEQ_END(head) && \ | ||
1063 | + ((next = ((var)->field.sqe_next)), 1); \ | ||
1064 | + (var) = (next)) | ||
1065 | + | ||
1066 | +/* | ||
1067 | + * Simple queue functions. | ||
1068 | + */ | ||
1069 | +#define SIMPLEQ_INIT(head) do { \ | ||
1070 | + (head)->sqh_first = NULL; \ | ||
1071 | + (head)->sqh_last = &(head)->sqh_first; \ | ||
1072 | +} while (/*CONSTCOND*/0) | ||
1073 | + | ||
1074 | +#define SIMPLEQ_INSERT_HEAD(head, elm, field) do { \ | ||
1075 | + if (((elm)->field.sqe_next = (head)->sqh_first) == NULL) \ | ||
1076 | + (head)->sqh_last = &(elm)->field.sqe_next; \ | ||
1077 | + (head)->sqh_first = (elm); \ | ||
1078 | +} while (/*CONSTCOND*/0) | ||
1079 | + | ||
1080 | +#define SIMPLEQ_INSERT_TAIL(head, elm, field) do { \ | ||
1081 | + (elm)->field.sqe_next = NULL; \ | ||
1082 | + *(head)->sqh_last = (elm); \ | ||
1083 | + (head)->sqh_last = &(elm)->field.sqe_next; \ | ||
1084 | +} while (/*CONSTCOND*/0) | ||
1085 | + | ||
1086 | +#define SIMPLEQ_INSERT_AFTER(head, listelm, elm, field) do { \ | ||
1087 | + if (((elm)->field.sqe_next = (listelm)->field.sqe_next) == NULL)\ | ||
1088 | + (head)->sqh_last = &(elm)->field.sqe_next; \ | ||
1089 | + (listelm)->field.sqe_next = (elm); \ | ||
1090 | +} while (/*CONSTCOND*/0) | ||
1091 | + | ||
1092 | +#define SIMPLEQ_REMOVE_HEAD(head, field) do { \ | ||
1093 | + if (((head)->sqh_first = (head)->sqh_first->field.sqe_next) == NULL) \ | ||
1094 | + (head)->sqh_last = &(head)->sqh_first; \ | ||
1095 | +} while (/*CONSTCOND*/0) | ||
1096 | + | ||
1097 | +#define SIMPLEQ_REMOVE_AFTER(head, elm, field) do { \ | ||
1098 | + if (((elm)->field.sqe_next = (elm)->field.sqe_next->field.sqe_next) \ | ||
1099 | + == NULL) \ | ||
1100 | + (head)->sqh_last = &(elm)->field.sqe_next; \ | ||
1101 | +} while (/*CONSTCOND*/0) | ||
1102 | + | ||
1103 | +#define SIMPLEQ_REMOVE(head, elm, type, field) do { \ | ||
1104 | + if ((head)->sqh_first == (elm)) { \ | ||
1105 | + SIMPLEQ_REMOVE_HEAD((head), field); \ | ||
1106 | + } else { \ | ||
1107 | + struct type *curelm = (head)->sqh_first; \ | ||
1108 | + while (curelm->field.sqe_next != (elm)) \ | ||
1109 | + curelm = curelm->field.sqe_next; \ | ||
1110 | + if ((curelm->field.sqe_next = \ | ||
1111 | + curelm->field.sqe_next->field.sqe_next) == NULL) \ | ||
1112 | + (head)->sqh_last = &(curelm)->field.sqe_next; \ | ||
1113 | + } \ | ||
1114 | +} while (/*CONSTCOND*/0) | ||
1115 | + | ||
1116 | +#define SIMPLEQ_CONCAT(head1, head2) do { \ | ||
1117 | + if (!SIMPLEQ_EMPTY((head2))) { \ | ||
1118 | + *(head1)->sqh_last = (head2)->sqh_first; \ | ||
1119 | + (head1)->sqh_last = (head2)->sqh_last; \ | ||
1120 | + SIMPLEQ_INIT((head2)); \ | ||
1121 | + } \ | ||
1122 | +} while (/*CONSTCOND*/0) | ||
1123 | + | ||
1124 | +#define SIMPLEQ_LAST(head, type, field) \ | ||
1125 | + (SIMPLEQ_EMPTY((head)) ? \ | ||
1126 | + NULL : \ | ||
1127 | + ((struct type *)(void *) \ | ||
1128 | + ((char *)((head)->sqh_last) - offsetof(struct type, field)))) | ||
1129 | + | ||
1130 | +/* | ||
1131 | + * Tail queue definitions. | ||
1132 | + */ | ||
1133 | +#define _TAILQ_HEAD(name, type, qual) \ | ||
1134 | +struct name { \ | ||
1135 | + qual type *tqh_first; /* first element */ \ | ||
1136 | + qual type *qual *tqh_last; /* addr of last next element */ \ | ||
1137 | +} | ||
1138 | +#define TAILQ_HEAD(name, type) _TAILQ_HEAD(name, struct type,) | ||
1139 | + | ||
1140 | +#define TAILQ_HEAD_INITIALIZER(head) \ | ||
1141 | + { TAILQ_END(head), &(head).tqh_first } | ||
1142 | + | ||
1143 | +#define _TAILQ_ENTRY(type, qual) \ | ||
1144 | +struct { \ | ||
1145 | + qual type *tqe_next; /* next element */ \ | ||
1146 | + qual type *qual *tqe_prev; /* address of previous next element */\ | ||
1147 | +} | ||
1148 | +#define TAILQ_ENTRY(type) _TAILQ_ENTRY(struct type,) | ||
1149 | + | ||
1150 | +/* | ||
1151 | + * Tail queue access methods. | ||
1152 | + */ | ||
1153 | +#define TAILQ_FIRST(head) ((head)->tqh_first) | ||
1154 | +#define TAILQ_END(head) (NULL) | ||
1155 | +#define TAILQ_NEXT(elm, field) ((elm)->field.tqe_next) | ||
1156 | +#define TAILQ_LAST(head, headname) \ | ||
1157 | + (*(((struct headname *)((head)->tqh_last))->tqh_last)) | ||
1158 | +#define TAILQ_PREV(elm, headname, field) \ | ||
1159 | + (*(((struct headname *)((elm)->field.tqe_prev))->tqh_last)) | ||
1160 | +#define TAILQ_EMPTY(head) (TAILQ_FIRST(head) == TAILQ_END(head)) | ||
1161 | + | ||
1162 | + | ||
1163 | +#define TAILQ_FOREACH(var, head, field) \ | ||
1164 | + for ((var) = ((head)->tqh_first); \ | ||
1165 | + (var) != TAILQ_END(head); \ | ||
1166 | + (var) = ((var)->field.tqe_next)) | ||
1167 | + | ||
1168 | +#define TAILQ_FOREACH_SAFE(var, head, field, next) \ | ||
1169 | + for ((var) = ((head)->tqh_first); \ | ||
1170 | + (var) != TAILQ_END(head) && \ | ||
1171 | + ((next) = TAILQ_NEXT(var, field), 1); (var) = (next)) | ||
1172 | + | ||
1173 | +#define TAILQ_FOREACH_REVERSE(var, head, headname, field) \ | ||
1174 | + for ((var) = (*(((struct headname *)((head)->tqh_last))->tqh_last));\ | ||
1175 | + (var) != TAILQ_END(head); \ | ||
1176 | + (var) = (*(((struct headname *)((var)->field.tqe_prev))->tqh_last))) | ||
1177 | + | ||
1178 | +#define TAILQ_FOREACH_REVERSE_SAFE(var, head, headname, field, prev) \ | ||
1179 | + for ((var) = TAILQ_LAST((head), headname); \ | ||
1180 | + (var) != TAILQ_END(head) && \ | ||
1181 | + ((prev) = TAILQ_PREV((var), headname, field), 1); (var) = (prev)) | ||
1182 | + | ||
1183 | +/* | ||
1184 | + * Tail queue functions. | ||
1185 | + */ | ||
1186 | +#if defined(QUEUEDEBUG) | ||
1187 | +#define QUEUEDEBUG_TAILQ_INSERT_HEAD(head, elm, field) \ | ||
1188 | + if ((head)->tqh_first && \ | ||
1189 | + (head)->tqh_first->field.tqe_prev != &(head)->tqh_first) \ | ||
1190 | + QUEUEDEBUG_ABORT("TAILQ_INSERT_HEAD %p %s:%d", (head), \ | ||
1191 | + __FILE__, __LINE__); | ||
1192 | +#define QUEUEDEBUG_TAILQ_INSERT_TAIL(head, elm, field) \ | ||
1193 | + if (*(head)->tqh_last != NULL) \ | ||
1194 | + QUEUEDEBUG_ABORT("TAILQ_INSERT_TAIL %p %s:%d", (head), \ | ||
1195 | + __FILE__, __LINE__); | ||
1196 | +#define QUEUEDEBUG_TAILQ_OP(elm, field) \ | ||
1197 | + if ((elm)->field.tqe_next && \ | ||
1198 | + (elm)->field.tqe_next->field.tqe_prev != \ | ||
1199 | + &(elm)->field.tqe_next) \ | ||
1200 | + QUEUEDEBUG_ABORT("TAILQ_* forw %p %s:%d", (elm), \ | ||
1201 | + __FILE__, __LINE__); \ | ||
1202 | + if (*(elm)->field.tqe_prev != (elm)) \ | ||
1203 | + QUEUEDEBUG_ABORT("TAILQ_* back %p %s:%d", (elm), \ | ||
1204 | + __FILE__, __LINE__); | ||
1205 | +#define QUEUEDEBUG_TAILQ_PREREMOVE(head, elm, field) \ | ||
1206 | + if ((elm)->field.tqe_next == NULL && \ | ||
1207 | + (head)->tqh_last != &(elm)->field.tqe_next) \ | ||
1208 | + QUEUEDEBUG_ABORT("TAILQ_PREREMOVE head %p elm %p %s:%d",\ | ||
1209 | + (head), (elm), __FILE__, __LINE__); | ||
1210 | +#define QUEUEDEBUG_TAILQ_POSTREMOVE(elm, field) \ | ||
1211 | + (elm)->field.tqe_next = (void *)1L; \ | ||
1212 | + (elm)->field.tqe_prev = (void *)1L; | ||
1213 | +#else | ||
1214 | +#define QUEUEDEBUG_TAILQ_INSERT_HEAD(head, elm, field) | ||
1215 | +#define QUEUEDEBUG_TAILQ_INSERT_TAIL(head, elm, field) | ||
1216 | +#define QUEUEDEBUG_TAILQ_OP(elm, field) | ||
1217 | +#define QUEUEDEBUG_TAILQ_PREREMOVE(head, elm, field) | ||
1218 | +#define QUEUEDEBUG_TAILQ_POSTREMOVE(elm, field) | ||
1219 | +#endif | ||
1220 | + | ||
1221 | +#define TAILQ_INIT(head) do { \ | ||
1222 | + (head)->tqh_first = TAILQ_END(head); \ | ||
1223 | + (head)->tqh_last = &(head)->tqh_first; \ | ||
1224 | +} while (/*CONSTCOND*/0) | ||
1225 | + | ||
1226 | +#define TAILQ_INSERT_HEAD(head, elm, field) do { \ | ||
1227 | + QUEUEDEBUG_TAILQ_INSERT_HEAD((head), (elm), field) \ | ||
1228 | + if (((elm)->field.tqe_next = (head)->tqh_first) != TAILQ_END(head))\ | ||
1229 | + (head)->tqh_first->field.tqe_prev = \ | ||
1230 | + &(elm)->field.tqe_next; \ | ||
1231 | + else \ | ||
1232 | + (head)->tqh_last = &(elm)->field.tqe_next; \ | ||
1233 | + (head)->tqh_first = (elm); \ | ||
1234 | + (elm)->field.tqe_prev = &(head)->tqh_first; \ | ||
1235 | +} while (/*CONSTCOND*/0) | ||
1236 | + | ||
1237 | +#define TAILQ_INSERT_TAIL(head, elm, field) do { \ | ||
1238 | + QUEUEDEBUG_TAILQ_INSERT_TAIL((head), (elm), field) \ | ||
1239 | + (elm)->field.tqe_next = TAILQ_END(head); \ | ||
1240 | + (elm)->field.tqe_prev = (head)->tqh_last; \ | ||
1241 | + *(head)->tqh_last = (elm); \ | ||
1242 | + (head)->tqh_last = &(elm)->field.tqe_next; \ | ||
1243 | +} while (/*CONSTCOND*/0) | ||
1244 | + | ||
1245 | +#define TAILQ_INSERT_AFTER(head, listelm, elm, field) do { \ | ||
1246 | + QUEUEDEBUG_TAILQ_OP((listelm), field) \ | ||
1247 | + if (((elm)->field.tqe_next = (listelm)->field.tqe_next) != \ | ||
1248 | + TAILQ_END(head)) \ | ||
1249 | + (elm)->field.tqe_next->field.tqe_prev = \ | ||
1250 | + &(elm)->field.tqe_next; \ | ||
1251 | + else \ | ||
1252 | + (head)->tqh_last = &(elm)->field.tqe_next; \ | ||
1253 | + (listelm)->field.tqe_next = (elm); \ | ||
1254 | + (elm)->field.tqe_prev = &(listelm)->field.tqe_next; \ | ||
1255 | +} while (/*CONSTCOND*/0) | ||
1256 | + | ||
1257 | +#define TAILQ_INSERT_BEFORE(listelm, elm, field) do { \ | ||
1258 | + QUEUEDEBUG_TAILQ_OP((listelm), field) \ | ||
1259 | + (elm)->field.tqe_prev = (listelm)->field.tqe_prev; \ | ||
1260 | + (elm)->field.tqe_next = (listelm); \ | ||
1261 | + *(listelm)->field.tqe_prev = (elm); \ | ||
1262 | + (listelm)->field.tqe_prev = &(elm)->field.tqe_next; \ | ||
1263 | +} while (/*CONSTCOND*/0) | ||
1264 | + | ||
1265 | +#define TAILQ_REMOVE(head, elm, field) do { \ | ||
1266 | + QUEUEDEBUG_TAILQ_PREREMOVE((head), (elm), field) \ | ||
1267 | + QUEUEDEBUG_TAILQ_OP((elm), field) \ | ||
1268 | + if (((elm)->field.tqe_next) != TAILQ_END(head)) \ | ||
1269 | + (elm)->field.tqe_next->field.tqe_prev = \ | ||
1270 | + (elm)->field.tqe_prev; \ | ||
1271 | + else \ | ||
1272 | + (head)->tqh_last = (elm)->field.tqe_prev; \ | ||
1273 | + *(elm)->field.tqe_prev = (elm)->field.tqe_next; \ | ||
1274 | + QUEUEDEBUG_TAILQ_POSTREMOVE((elm), field); \ | ||
1275 | +} while (/*CONSTCOND*/0) | ||
1276 | + | ||
1277 | +#define TAILQ_REPLACE(head, elm, elm2, field) do { \ | ||
1278 | + if (((elm2)->field.tqe_next = (elm)->field.tqe_next) != \ | ||
1279 | + TAILQ_END(head)) \ | ||
1280 | + (elm2)->field.tqe_next->field.tqe_prev = \ | ||
1281 | + &(elm2)->field.tqe_next; \ | ||
1282 | + else \ | ||
1283 | + (head)->tqh_last = &(elm2)->field.tqe_next; \ | ||
1284 | + (elm2)->field.tqe_prev = (elm)->field.tqe_prev; \ | ||
1285 | + *(elm2)->field.tqe_prev = (elm2); \ | ||
1286 | + QUEUEDEBUG_TAILQ_POSTREMOVE((elm), field); \ | ||
1287 | +} while (/*CONSTCOND*/0) | ||
1288 | + | ||
1289 | +#define TAILQ_CONCAT(head1, head2, field) do { \ | ||
1290 | + if (!TAILQ_EMPTY(head2)) { \ | ||
1291 | + *(head1)->tqh_last = (head2)->tqh_first; \ | ||
1292 | + (head2)->tqh_first->field.tqe_prev = (head1)->tqh_last; \ | ||
1293 | + (head1)->tqh_last = (head2)->tqh_last; \ | ||
1294 | + TAILQ_INIT((head2)); \ | ||
1295 | + } \ | ||
1296 | +} while (/*CONSTCOND*/0) | ||
1297 | + | ||
1298 | +/* | ||
1299 | + * Singly-linked Tail queue declarations. | ||
1300 | + */ | ||
1301 | +#define STAILQ_HEAD(name, type) \ | ||
1302 | +struct name { \ | ||
1303 | + struct type *stqh_first; /* first element */ \ | ||
1304 | + struct type **stqh_last; /* addr of last next element */ \ | ||
1305 | +} | ||
1306 | + | ||
1307 | +#define STAILQ_HEAD_INITIALIZER(head) \ | ||
1308 | + { NULL, &(head).stqh_first } | ||
1309 | + | ||
1310 | +#define STAILQ_ENTRY(type) \ | ||
1311 | +struct { \ | ||
1312 | + struct type *stqe_next; /* next element */ \ | ||
1313 | +} | ||
1314 | + | ||
1315 | +/* | ||
1316 | + * Singly-linked Tail queue access methods. | ||
1317 | + */ | ||
1318 | +#define STAILQ_FIRST(head) ((head)->stqh_first) | ||
1319 | +#define STAILQ_END(head) NULL | ||
1320 | +#define STAILQ_NEXT(elm, field) ((elm)->field.stqe_next) | ||
1321 | +#define STAILQ_EMPTY(head) (STAILQ_FIRST(head) == STAILQ_END(head)) | ||
1322 | + | ||
1323 | +/* | ||
1324 | + * Singly-linked Tail queue functions. | ||
1325 | + */ | ||
1326 | +#define STAILQ_INIT(head) do { \ | ||
1327 | + (head)->stqh_first = NULL; \ | ||
1328 | + (head)->stqh_last = &(head)->stqh_first; \ | ||
1329 | +} while (/*CONSTCOND*/0) | ||
1330 | + | ||
1331 | +#define STAILQ_INSERT_HEAD(head, elm, field) do { \ | ||
1332 | + if (((elm)->field.stqe_next = (head)->stqh_first) == NULL) \ | ||
1333 | + (head)->stqh_last = &(elm)->field.stqe_next; \ | ||
1334 | + (head)->stqh_first = (elm); \ | ||
1335 | +} while (/*CONSTCOND*/0) | ||
1336 | + | ||
1337 | +#define STAILQ_INSERT_TAIL(head, elm, field) do { \ | ||
1338 | + (elm)->field.stqe_next = NULL; \ | ||
1339 | + *(head)->stqh_last = (elm); \ | ||
1340 | + (head)->stqh_last = &(elm)->field.stqe_next; \ | ||
1341 | +} while (/*CONSTCOND*/0) | ||
1342 | + | ||
1343 | +#define STAILQ_INSERT_AFTER(head, listelm, elm, field) do { \ | ||
1344 | + if (((elm)->field.stqe_next = (listelm)->field.stqe_next) == NULL)\ | ||
1345 | + (head)->stqh_last = &(elm)->field.stqe_next; \ | ||
1346 | + (listelm)->field.stqe_next = (elm); \ | ||
1347 | +} while (/*CONSTCOND*/0) | ||
1348 | + | ||
1349 | +#define STAILQ_REMOVE_HEAD(head, field) do { \ | ||
1350 | + if (((head)->stqh_first = (head)->stqh_first->field.stqe_next) == NULL) \ | ||
1351 | + (head)->stqh_last = &(head)->stqh_first; \ | ||
1352 | +} while (/*CONSTCOND*/0) | ||
1353 | + | ||
1354 | +#define STAILQ_REMOVE(head, elm, type, field) do { \ | ||
1355 | + if ((head)->stqh_first == (elm)) { \ | ||
1356 | + STAILQ_REMOVE_HEAD((head), field); \ | ||
1357 | + } else { \ | ||
1358 | + struct type *curelm = (head)->stqh_first; \ | ||
1359 | + while (curelm->field.stqe_next != (elm)) \ | ||
1360 | + curelm = curelm->field.stqe_next; \ | ||
1361 | + if ((curelm->field.stqe_next = \ | ||
1362 | + curelm->field.stqe_next->field.stqe_next) == NULL) \ | ||
1363 | + (head)->stqh_last = &(curelm)->field.stqe_next; \ | ||
1364 | + } \ | ||
1365 | +} while (/*CONSTCOND*/0) | ||
1366 | + | ||
1367 | +#define STAILQ_FOREACH(var, head, field) \ | ||
1368 | + for ((var) = ((head)->stqh_first); \ | ||
1369 | + (var); \ | ||
1370 | + (var) = ((var)->field.stqe_next)) | ||
1371 | + | ||
1372 | +#define STAILQ_FOREACH_SAFE(var, head, field, tvar) \ | ||
1373 | + for ((var) = STAILQ_FIRST((head)); \ | ||
1374 | + (var) && ((tvar) = STAILQ_NEXT((var), field), 1); \ | ||
1375 | + (var) = (tvar)) | ||
1376 | + | ||
1377 | +#define STAILQ_CONCAT(head1, head2) do { \ | ||
1378 | + if (!STAILQ_EMPTY((head2))) { \ | ||
1379 | + *(head1)->stqh_last = (head2)->stqh_first; \ | ||
1380 | + (head1)->stqh_last = (head2)->stqh_last; \ | ||
1381 | + STAILQ_INIT((head2)); \ | ||
1382 | + } \ | ||
1383 | +} while (/*CONSTCOND*/0) | ||
1384 | + | ||
1385 | +#define STAILQ_LAST(head, type, field) \ | ||
1386 | + (STAILQ_EMPTY((head)) ? \ | ||
1387 | + NULL : \ | ||
1388 | + ((struct type *)(void *) \ | ||
1389 | + ((char *)((head)->stqh_last) - offsetof(struct type, field)))) | ||
1390 | + | ||
1391 | + | ||
1392 | +#ifndef _KERNEL | ||
1393 | +/* | ||
1394 | + * Circular queue definitions. Do not use. We still keep the macros | ||
1395 | + * for compatibility but because of pointer aliasing issues their use | ||
1396 | + * is discouraged! | ||
1397 | + */ | ||
1398 | + | ||
1399 | +/* | ||
1400 | + * __launder_type(): We use this ugly hack to work around the the compiler | ||
1401 | + * noticing that two types may not alias each other and elide tests in code. | ||
1402 | + * We hit this in the CIRCLEQ macros when comparing 'struct name *' and | ||
1403 | + * 'struct type *' (see CIRCLEQ_HEAD()). Modern compilers (such as GCC | ||
1404 | + * 4.8) declare these comparisons as always false, causing the code to | ||
1405 | + * not run as designed. | ||
1406 | + * | ||
1407 | + * This hack is only to be used for comparisons and thus can be fully const. | ||
1408 | + * Do not use for assignment. | ||
1409 | + * | ||
1410 | + * If we ever choose to change the ABI of the CIRCLEQ macros, we could fix | ||
1411 | + * this by changing the head/tail sentinal values, but see the note above | ||
1412 | + * this one. | ||
1413 | + */ | ||
1414 | +static __inline const void * __launder_type(const void *); | ||
1415 | +static __inline const void * | ||
1416 | +__launder_type(const void *__x) | ||
1417 | +{ | ||
1418 | + __asm __volatile("" : "+r" (__x)); | ||
1419 | + return __x; | ||
1420 | +} | ||
1421 | + | ||
1422 | +#if defined(QUEUEDEBUG) | ||
1423 | +#define QUEUEDEBUG_CIRCLEQ_HEAD(head, field) \ | ||
1424 | + if ((head)->cqh_first != CIRCLEQ_ENDC(head) && \ | ||
1425 | + (head)->cqh_first->field.cqe_prev != CIRCLEQ_ENDC(head)) \ | ||
1426 | + QUEUEDEBUG_ABORT("CIRCLEQ head forw %p %s:%d", (head), \ | ||
1427 | + __FILE__, __LINE__); \ | ||
1428 | + if ((head)->cqh_last != CIRCLEQ_ENDC(head) && \ | ||
1429 | + (head)->cqh_last->field.cqe_next != CIRCLEQ_ENDC(head)) \ | ||
1430 | + QUEUEDEBUG_ABORT("CIRCLEQ head back %p %s:%d", (head), \ | ||
1431 | + __FILE__, __LINE__); | ||
1432 | +#define QUEUEDEBUG_CIRCLEQ_ELM(head, elm, field) \ | ||
1433 | + if ((elm)->field.cqe_next == CIRCLEQ_ENDC(head)) { \ | ||
1434 | + if ((head)->cqh_last != (elm)) \ | ||
1435 | + QUEUEDEBUG_ABORT("CIRCLEQ elm last %p %s:%d", \ | ||
1436 | + (elm), __FILE__, __LINE__); \ | ||
1437 | + } else { \ | ||
1438 | + if ((elm)->field.cqe_next->field.cqe_prev != (elm)) \ | ||
1439 | + QUEUEDEBUG_ABORT("CIRCLEQ elm forw %p %s:%d", \ | ||
1440 | + (elm), __FILE__, __LINE__); \ | ||
1441 | + } \ | ||
1442 | + if ((elm)->field.cqe_prev == CIRCLEQ_ENDC(head)) { \ | ||
1443 | + if ((head)->cqh_first != (elm)) \ | ||
1444 | + QUEUEDEBUG_ABORT("CIRCLEQ elm first %p %s:%d", \ | ||
1445 | + (elm), __FILE__, __LINE__); \ | ||
1446 | + } else { \ | ||
1447 | + if ((elm)->field.cqe_prev->field.cqe_next != (elm)) \ | ||
1448 | + QUEUEDEBUG_ABORT("CIRCLEQ elm prev %p %s:%d", \ | ||
1449 | + (elm), __FILE__, __LINE__); \ | ||
1450 | + } | ||
1451 | +#define QUEUEDEBUG_CIRCLEQ_POSTREMOVE(elm, field) \ | ||
1452 | + (elm)->field.cqe_next = (void *)1L; \ | ||
1453 | + (elm)->field.cqe_prev = (void *)1L; | ||
1454 | +#else | ||
1455 | +#define QUEUEDEBUG_CIRCLEQ_HEAD(head, field) | ||
1456 | +#define QUEUEDEBUG_CIRCLEQ_ELM(head, elm, field) | ||
1457 | +#define QUEUEDEBUG_CIRCLEQ_POSTREMOVE(elm, field) | ||
1458 | +#endif | ||
1459 | + | ||
1460 | +#define CIRCLEQ_HEAD(name, type) \ | ||
1461 | +struct name { \ | ||
1462 | + struct type *cqh_first; /* first element */ \ | ||
1463 | + struct type *cqh_last; /* last element */ \ | ||
1464 | +} | ||
1465 | + | ||
1466 | +#define CIRCLEQ_HEAD_INITIALIZER(head) \ | ||
1467 | + { CIRCLEQ_END(&head), CIRCLEQ_END(&head) } | ||
1468 | + | ||
1469 | +#define CIRCLEQ_ENTRY(type) \ | ||
1470 | +struct { \ | ||
1471 | + struct type *cqe_next; /* next element */ \ | ||
1472 | + struct type *cqe_prev; /* previous element */ \ | ||
1473 | +} | ||
1474 | + | ||
1475 | +/* | ||
1476 | + * Circular queue functions. | ||
1477 | + */ | ||
1478 | +#define CIRCLEQ_INIT(head) do { \ | ||
1479 | + (head)->cqh_first = CIRCLEQ_END(head); \ | ||
1480 | + (head)->cqh_last = CIRCLEQ_END(head); \ | ||
1481 | +} while (/*CONSTCOND*/0) | ||
1482 | + | ||
1483 | +#define CIRCLEQ_INSERT_AFTER(head, listelm, elm, field) do { \ | ||
1484 | + QUEUEDEBUG_CIRCLEQ_HEAD((head), field) \ | ||
1485 | + QUEUEDEBUG_CIRCLEQ_ELM((head), (listelm), field) \ | ||
1486 | + (elm)->field.cqe_next = (listelm)->field.cqe_next; \ | ||
1487 | + (elm)->field.cqe_prev = (listelm); \ | ||
1488 | + if ((listelm)->field.cqe_next == CIRCLEQ_ENDC(head)) \ | ||
1489 | + (head)->cqh_last = (elm); \ | ||
1490 | + else \ | ||
1491 | + (listelm)->field.cqe_next->field.cqe_prev = (elm); \ | ||
1492 | + (listelm)->field.cqe_next = (elm); \ | ||
1493 | +} while (/*CONSTCOND*/0) | ||
1494 | + | ||
1495 | +#define CIRCLEQ_INSERT_BEFORE(head, listelm, elm, field) do { \ | ||
1496 | + QUEUEDEBUG_CIRCLEQ_HEAD((head), field) \ | ||
1497 | + QUEUEDEBUG_CIRCLEQ_ELM((head), (listelm), field) \ | ||
1498 | + (elm)->field.cqe_next = (listelm); \ | ||
1499 | + (elm)->field.cqe_prev = (listelm)->field.cqe_prev; \ | ||
1500 | + if ((listelm)->field.cqe_prev == CIRCLEQ_ENDC(head)) \ | ||
1501 | + (head)->cqh_first = (elm); \ | ||
1502 | + else \ | ||
1503 | + (listelm)->field.cqe_prev->field.cqe_next = (elm); \ | ||
1504 | + (listelm)->field.cqe_prev = (elm); \ | ||
1505 | +} while (/*CONSTCOND*/0) | ||
1506 | + | ||
1507 | +#define CIRCLEQ_INSERT_HEAD(head, elm, field) do { \ | ||
1508 | + QUEUEDEBUG_CIRCLEQ_HEAD((head), field) \ | ||
1509 | + (elm)->field.cqe_next = (head)->cqh_first; \ | ||
1510 | + (elm)->field.cqe_prev = CIRCLEQ_END(head); \ | ||
1511 | + if ((head)->cqh_last == CIRCLEQ_ENDC(head)) \ | ||
1512 | + (head)->cqh_last = (elm); \ | ||
1513 | + else \ | ||
1514 | + (head)->cqh_first->field.cqe_prev = (elm); \ | ||
1515 | + (head)->cqh_first = (elm); \ | ||
1516 | +} while (/*CONSTCOND*/0) | ||
1517 | + | ||
1518 | +#define CIRCLEQ_INSERT_TAIL(head, elm, field) do { \ | ||
1519 | + QUEUEDEBUG_CIRCLEQ_HEAD((head), field) \ | ||
1520 | + (elm)->field.cqe_next = CIRCLEQ_END(head); \ | ||
1521 | + (elm)->field.cqe_prev = (head)->cqh_last; \ | ||
1522 | + if ((head)->cqh_first == CIRCLEQ_ENDC(head)) \ | ||
1523 | + (head)->cqh_first = (elm); \ | ||
1524 | + else \ | ||
1525 | + (head)->cqh_last->field.cqe_next = (elm); \ | ||
1526 | + (head)->cqh_last = (elm); \ | ||
1527 | +} while (/*CONSTCOND*/0) | ||
1528 | + | ||
1529 | +#define CIRCLEQ_REMOVE(head, elm, field) do { \ | ||
1530 | + QUEUEDEBUG_CIRCLEQ_HEAD((head), field) \ | ||
1531 | + QUEUEDEBUG_CIRCLEQ_ELM((head), (elm), field) \ | ||
1532 | + if ((elm)->field.cqe_next == CIRCLEQ_ENDC(head)) \ | ||
1533 | + (head)->cqh_last = (elm)->field.cqe_prev; \ | ||
1534 | + else \ | ||
1535 | + (elm)->field.cqe_next->field.cqe_prev = \ | ||
1536 | + (elm)->field.cqe_prev; \ | ||
1537 | + if ((elm)->field.cqe_prev == CIRCLEQ_ENDC(head)) \ | ||
1538 | + (head)->cqh_first = (elm)->field.cqe_next; \ | ||
1539 | + else \ | ||
1540 | + (elm)->field.cqe_prev->field.cqe_next = \ | ||
1541 | + (elm)->field.cqe_next; \ | ||
1542 | + QUEUEDEBUG_CIRCLEQ_POSTREMOVE((elm), field) \ | ||
1543 | +} while (/*CONSTCOND*/0) | ||
1544 | + | ||
1545 | +#define CIRCLEQ_FOREACH(var, head, field) \ | ||
1546 | + for ((var) = ((head)->cqh_first); \ | ||
1547 | + (var) != CIRCLEQ_ENDC(head); \ | ||
1548 | + (var) = ((var)->field.cqe_next)) | ||
1549 | + | ||
1550 | +#define CIRCLEQ_FOREACH_REVERSE(var, head, field) \ | ||
1551 | + for ((var) = ((head)->cqh_last); \ | ||
1552 | + (var) != CIRCLEQ_ENDC(head); \ | ||
1553 | + (var) = ((var)->field.cqe_prev)) | ||
1554 | + | ||
1555 | +/* | ||
1556 | + * Circular queue access methods. | ||
1557 | + */ | ||
1558 | +#define CIRCLEQ_FIRST(head) ((head)->cqh_first) | ||
1559 | +#define CIRCLEQ_LAST(head) ((head)->cqh_last) | ||
1560 | +/* For comparisons */ | ||
1561 | +#define CIRCLEQ_ENDC(head) (__launder_type(head)) | ||
1562 | +/* For assignments */ | ||
1563 | +#define CIRCLEQ_END(head) ((void *)(head)) | ||
1564 | +#define CIRCLEQ_NEXT(elm, field) ((elm)->field.cqe_next) | ||
1565 | +#define CIRCLEQ_PREV(elm, field) ((elm)->field.cqe_prev) | ||
1566 | +#define CIRCLEQ_EMPTY(head) \ | ||
1567 | + (CIRCLEQ_FIRST(head) == CIRCLEQ_ENDC(head)) | ||
1568 | + | ||
1569 | +#define CIRCLEQ_LOOP_NEXT(head, elm, field) \ | ||
1570 | + (((elm)->field.cqe_next == CIRCLEQ_ENDC(head)) \ | ||
1571 | + ? ((head)->cqh_first) \ | ||
1572 | + : (elm->field.cqe_next)) | ||
1573 | +#define CIRCLEQ_LOOP_PREV(head, elm, field) \ | ||
1574 | + (((elm)->field.cqe_prev == CIRCLEQ_ENDC(head)) \ | ||
1575 | + ? ((head)->cqh_last) \ | ||
1576 | + : (elm->field.cqe_prev)) | ||
1577 | +#endif /* !_KERNEL */ | ||
1578 | + | ||
1579 | +#endif /* !_SYS_QUEUE_H_ */ | ||
diff --git a/meta/recipes-extended/libtirpc/libtirpc/remove-des-functionality.patch b/meta/recipes-extended/libtirpc/libtirpc/remove-des-functionality.patch new file mode 100644 index 0000000000..512e93497d --- /dev/null +++ b/meta/recipes-extended/libtirpc/libtirpc/remove-des-functionality.patch | |||
@@ -0,0 +1,144 @@ | |||
1 | uclibc and musl does not provide des functionality. Lets disable it. | ||
2 | |||
3 | Upstream-Status: Inappropriate [uclibc and musl specific] | ||
4 | |||
5 | Signed-off-by: Khem Raj <raj.khem@gmail.com> | ||
6 | Signed-off-by: Thomas Petazzoni <thomas.petazzoni@free-electrons.com> | ||
7 | Signed-off-by: Maxin B. John <maxin.john@intel.com> | ||
8 | --- | ||
9 | diff -Naur libtirpc-1.0.1-orig/src/Makefile.am libtirpc-1.0.1/src/Makefile.am | ||
10 | --- libtirpc-1.0.1-orig/src/Makefile.am 2015-10-30 17:15:14.000000000 +0200 | ||
11 | +++ libtirpc-1.0.1/src/Makefile.am 2015-12-21 15:56:17.094702429 +0200 | ||
12 | @@ -22,9 +22,8 @@ | ||
13 | pmap_prot.c pmap_prot2.c pmap_rmt.c rpc_prot.c rpc_commondata.c \ | ||
14 | rpc_callmsg.c rpc_generic.c rpc_soc.c rpcb_clnt.c rpcb_prot.c \ | ||
15 | rpcb_st_xdr.c svc.c svc_auth.c svc_dg.c svc_auth_unix.c svc_auth_none.c \ | ||
16 | - svc_auth_des.c \ | ||
17 | svc_generic.c svc_raw.c svc_run.c svc_simple.c svc_vc.c getpeereid.c \ | ||
18 | - auth_time.c auth_des.c authdes_prot.c debug.c | ||
19 | + debug.c | ||
20 | |||
21 | ## XDR | ||
22 | libtirpc_la_SOURCES += xdr.c xdr_rec.c xdr_array.c xdr_float.c xdr_mem.c xdr_reference.c xdr_stdio.c xdr_sizeof.c | ||
23 | @@ -41,8 +40,8 @@ | ||
24 | libtirpc_la_CFLAGS = -DHAVE_RPCSEC_GSS $(GSSAPI_CFLAGS) | ||
25 | endif | ||
26 | |||
27 | -libtirpc_la_SOURCES += key_call.c key_prot_xdr.c getpublickey.c | ||
28 | -libtirpc_la_SOURCES += netname.c netnamer.c rpcdname.c rtime.c | ||
29 | +#libtirpc_la_SOURCES += key_call.c key_prot_xdr.c getpublickey.c | ||
30 | +#libtirpc_la_SOURCES += netname.c netnamer.c rpcdname.c rtime.c | ||
31 | |||
32 | CLEANFILES = cscope.* *~ | ||
33 | DISTCLEANFILES = Makefile.in | ||
34 | diff -Naur libtirpc-1.0.1-orig/src/rpc_soc.c libtirpc-1.0.1/src/rpc_soc.c | ||
35 | --- libtirpc-1.0.1-orig/src/rpc_soc.c 2015-10-30 17:15:14.000000000 +0200 | ||
36 | +++ libtirpc-1.0.1/src/rpc_soc.c 2015-12-21 15:56:17.095702416 +0200 | ||
37 | @@ -61,7 +61,6 @@ | ||
38 | #include <string.h> | ||
39 | #include <unistd.h> | ||
40 | #include <fcntl.h> | ||
41 | -#include <rpcsvc/nis.h> | ||
42 | |||
43 | #include "rpc_com.h" | ||
44 | |||
45 | @@ -522,86 +521,6 @@ | ||
46 | } | ||
47 | |||
48 | /* | ||
49 | - * Create the client des authentication object. Obsoleted by | ||
50 | - * authdes_seccreate(). | ||
51 | - */ | ||
52 | -AUTH * | ||
53 | -authdes_create(servername, window, syncaddr, ckey) | ||
54 | - char *servername; /* network name of server */ | ||
55 | - u_int window; /* time to live */ | ||
56 | - struct sockaddr *syncaddr; /* optional hostaddr to sync with */ | ||
57 | - des_block *ckey; /* optional conversation key to use */ | ||
58 | -{ | ||
59 | - AUTH *nauth; | ||
60 | - char hostname[NI_MAXHOST]; | ||
61 | - | ||
62 | - if (syncaddr) { | ||
63 | - /* | ||
64 | - * Change addr to hostname, because that is the way | ||
65 | - * new interface takes it. | ||
66 | - */ | ||
67 | - switch (syncaddr->sa_family) { | ||
68 | - case AF_INET: | ||
69 | - if (getnameinfo(syncaddr, sizeof(struct sockaddr_in), hostname, | ||
70 | - sizeof hostname, NULL, 0, 0) != 0) | ||
71 | - goto fallback; | ||
72 | - break; | ||
73 | - case AF_INET6: | ||
74 | - if (getnameinfo(syncaddr, sizeof(struct sockaddr_in6), hostname, | ||
75 | - sizeof hostname, NULL, 0, 0) != 0) | ||
76 | - goto fallback; | ||
77 | - break; | ||
78 | - default: | ||
79 | - goto fallback; | ||
80 | - } | ||
81 | - nauth = authdes_seccreate(servername, window, hostname, ckey); | ||
82 | - return (nauth); | ||
83 | - } | ||
84 | -fallback: | ||
85 | - return authdes_seccreate(servername, window, NULL, ckey); | ||
86 | -} | ||
87 | - | ||
88 | -/* | ||
89 | - * Create the client des authentication object. Obsoleted by | ||
90 | - * authdes_pk_seccreate(). | ||
91 | - */ | ||
92 | -extern AUTH *authdes_pk_seccreate(const char *, netobj *, u_int, const char *, | ||
93 | - const des_block *, nis_server *); | ||
94 | - | ||
95 | -AUTH * | ||
96 | -authdes_pk_create(servername, pkey, window, syncaddr, ckey) | ||
97 | - char *servername; /* network name of server */ | ||
98 | - netobj *pkey; /* public key */ | ||
99 | - u_int window; /* time to live */ | ||
100 | - struct sockaddr *syncaddr; /* optional hostaddr to sync with */ | ||
101 | - des_block *ckey; /* optional conversation key to use */ | ||
102 | -{ | ||
103 | - AUTH *nauth; | ||
104 | - char hostname[NI_MAXHOST]; | ||
105 | - | ||
106 | - if (syncaddr) { | ||
107 | - /* | ||
108 | - * Change addr to hostname, because that is the way | ||
109 | - * new interface takes it. | ||
110 | - */ | ||
111 | - switch (syncaddr->sa_family) { | ||
112 | - case AF_INET: | ||
113 | - if (getnameinfo(syncaddr, sizeof(struct sockaddr_in), hostname, | ||
114 | - sizeof hostname, NULL, 0, 0) != 0) | ||
115 | - goto fallback; | ||
116 | - break; | ||
117 | - default: | ||
118 | - goto fallback; | ||
119 | - } | ||
120 | - nauth = authdes_pk_seccreate(servername, pkey, window, hostname, ckey, NULL); | ||
121 | - return (nauth); | ||
122 | - } | ||
123 | -fallback: | ||
124 | - return authdes_pk_seccreate(servername, pkey, window, NULL, ckey, NULL); | ||
125 | -} | ||
126 | - | ||
127 | - | ||
128 | -/* | ||
129 | * Create a client handle for a unix connection. Obsoleted by clnt_vc_create() | ||
130 | */ | ||
131 | CLIENT * | ||
132 | diff -Naur libtirpc-1.0.1-orig/src/svc_auth.c libtirpc-1.0.1/src/svc_auth.c | ||
133 | --- libtirpc-1.0.1-orig/src/svc_auth.c 2015-10-30 17:15:14.000000000 +0200 | ||
134 | +++ libtirpc-1.0.1/src/svc_auth.c 2015-12-21 15:56:17.095702416 +0200 | ||
135 | @@ -114,9 +114,6 @@ | ||
136 | case AUTH_SHORT: | ||
137 | dummy = _svcauth_short(rqst, msg); | ||
138 | return (dummy); | ||
139 | - case AUTH_DES: | ||
140 | - dummy = _svcauth_des(rqst, msg); | ||
141 | - return (dummy); | ||
142 | #ifdef HAVE_RPCSEC_GSS | ||
143 | case RPCSEC_GSS: | ||
144 | dummy = _svcauth_gss(rqst, msg, no_dispatch); | ||
diff --git a/meta/recipes-extended/libtirpc/libtirpc/remove-des-uclibc.patch b/meta/recipes-extended/libtirpc/libtirpc/remove-des-uclibc.patch deleted file mode 100644 index 553b1ffb82..0000000000 --- a/meta/recipes-extended/libtirpc/libtirpc/remove-des-uclibc.patch +++ /dev/null | |||
@@ -1,38 +0,0 @@ | |||
1 | uclibc does not provide des functionality unlike eglibc so lets disable ssl support | ||
2 | |||
3 | Upstream-Status: Inappropriate [uclibc specific] | ||
4 | Signed-off-by: Khem Raj <raj.khem@gmail.com> | ||
5 | |||
6 | Index: libtirpc-0.2.5/src/rpc_soc.c | ||
7 | =================================================================== | ||
8 | --- libtirpc-0.2.5.orig/src/rpc_soc.c | ||
9 | +++ libtirpc-0.2.5/src/rpc_soc.c | ||
10 | @@ -520,6 +520,7 @@ clnt_broadcast(prog, vers, proc, xargs, | ||
11 | (resultproc_t) rpc_wrap_bcast, "udp"); | ||
12 | } | ||
13 | |||
14 | +#if 0 | ||
15 | /* | ||
16 | * Create the client des authentication object. Obsoleted by | ||
17 | * authdes_seccreate(). | ||
18 | @@ -551,6 +552,7 @@ fallback: | ||
19 | dummy = authdes_seccreate(servername, window, NULL, ckey); | ||
20 | return (dummy); | ||
21 | } | ||
22 | +#endif | ||
23 | |||
24 | /* | ||
25 | * Create a client handle for a unix connection. Obsoleted by clnt_vc_create() | ||
26 | Index: libtirpc-0.2.5/src/Makefile.am | ||
27 | =================================================================== | ||
28 | --- libtirpc-0.2.5.orig/src/Makefile.am | ||
29 | +++ libtirpc-0.2.5/src/Makefile.am | ||
30 | @@ -51,7 +51,7 @@ libtirpc_la_SOURCES = auth_none.c auth_u | ||
31 | rpc_callmsg.c rpc_generic.c rpc_soc.c rpcb_clnt.c rpcb_prot.c \ | ||
32 | rpcb_st_xdr.c svc.c svc_auth.c svc_dg.c svc_auth_unix.c svc_auth_none.c \ | ||
33 | svc_generic.c svc_raw.c svc_run.c svc_simple.c svc_vc.c getpeereid.c \ | ||
34 | - auth_time.c auth_des.c authdes_prot.c debug.c | ||
35 | + auth_time.c debug.c | ||
36 | |||
37 | ## XDR | ||
38 | libtirpc_la_SOURCES += xdr.c xdr_rec.c xdr_array.c xdr_float.c xdr_mem.c xdr_reference.c xdr_stdio.c | ||
diff --git a/meta/recipes-extended/libtirpc/libtirpc_0.2.5.bb b/meta/recipes-extended/libtirpc/libtirpc_1.0.1.bb index 330b829911..302dc50625 100644 --- a/meta/recipes-extended/libtirpc/libtirpc_0.2.5.bb +++ b/meta/recipes-extended/libtirpc/libtirpc_1.0.1.bb | |||
@@ -13,14 +13,19 @@ PROVIDES = "virtual/librpc" | |||
13 | SRC_URI = "${SOURCEFORGE_MIRROR}/${BPN}/${BP}.tar.bz2;name=libtirpc \ | 13 | SRC_URI = "${SOURCEFORGE_MIRROR}/${BPN}/${BP}.tar.bz2;name=libtirpc \ |
14 | ${GENTOO_MIRROR}/${BPN}-glibc-nfs.tar.xz;name=glibc-nfs \ | 14 | ${GENTOO_MIRROR}/${BPN}-glibc-nfs.tar.xz;name=glibc-nfs \ |
15 | file://libtirpc-0.2.1-fortify.patch \ | 15 | file://libtirpc-0.2.1-fortify.patch \ |
16 | file://0001-Add-missing-rwlock_unlocks-in-xprt_register.patch \ | ||
16 | " | 17 | " |
17 | 18 | ||
18 | SRC_URI_append_libc-uclibc = " file://remove-des-uclibc.patch \ | 19 | SRC_URI_append_libc-uclibc = " file://remove-des-functionality.patch \ |
19 | file://va_list.patch \ | 20 | file://va_list.patch \ |
20 | " | 21 | " |
21 | 22 | ||
22 | SRC_URI[libtirpc.md5sum] = "8cd41a5ef5a9b50d0fb6abb98af15368" | 23 | SRC_URI_append_libc-musl = " file://remove-des-functionality.patch \ |
23 | SRC_URI[libtirpc.sha256sum] = "62f9de7c2c8686c568757730e1fef66502a0e00d6cacf33546d0267984e002db" | 24 | file://Use-netbsd-queue.h.patch \ |
25 | " | ||
26 | |||
27 | SRC_URI[libtirpc.md5sum] = "36ce1c0ff80863bb0839d54aa0b94014" | ||
28 | SRC_URI[libtirpc.sha256sum] = "5156974f31be7ccbc8ab1de37c4739af6d9d42c87b1d5caf4835dda75fcbb89e" | ||
24 | SRC_URI[glibc-nfs.md5sum] = "5ae500b9d0b6b72cb875bc04944b9445" | 29 | SRC_URI[glibc-nfs.md5sum] = "5ae500b9d0b6b72cb875bc04944b9445" |
25 | SRC_URI[glibc-nfs.sha256sum] = "2677cfedf626f3f5a8f6e507aed5bb8f79a7453b589d684dbbc086e755170d83" | 30 | SRC_URI[glibc-nfs.sha256sum] = "2677cfedf626f3f5a8f6e507aed5bb8f79a7453b589d684dbbc086e755170d83" |
26 | 31 | ||