summaryrefslogtreecommitdiffstats
path: root/meta/recipes-extended
diff options
context:
space:
mode:
authorKhem Raj <raj.khem@gmail.com>2018-04-28 00:33:41 -0700
committerRichard Purdie <richard.purdie@linuxfoundation.org>2018-05-09 10:47:50 +0100
commit6ef11123ef775676c4708a1b2ca162426b9791c3 (patch)
treee9849f91315de77882802141244bbfef41721bce /meta/recipes-extended
parent00a5c6c010f3167b38fc33d450729f893d64806e (diff)
downloadpoky-6ef11123ef775676c4708a1b2ca162426b9791c3.tar.gz
libtirpc: Upgrade to 1.0.4-tc1
Drop backported patches Redo musl support patch such that it can be applied universally (From OE-Core rev: 94c23613724073f8def71bc9e76d7fd7a9f318ad) Signed-off-by: Khem Raj <raj.khem@gmail.com> Signed-off-by: Richard Purdie <richard.purdie@linuxfoundation.org>
Diffstat (limited to 'meta/recipes-extended')
-rw-r--r--meta/recipes-extended/libtirpc/libtirpc/0001-include-stdint.h-for-uintptr_t.patch29
-rw-r--r--meta/recipes-extended/libtirpc/libtirpc/0001-replace-__bzero-with-memset-API.patch64
-rw-r--r--meta/recipes-extended/libtirpc/libtirpc/Use-netbsd-queue.h.patch878
-rw-r--r--meta/recipes-extended/libtirpc/libtirpc/export_key_secretkey_is_set.patch31
-rw-r--r--meta/recipes-extended/libtirpc/libtirpc/libtirpc-1.0.4-rc1.patch103
-rw-r--r--meta/recipes-extended/libtirpc/libtirpc/musl.patch18
-rw-r--r--meta/recipes-extended/libtirpc/libtirpc_1.0.3.bb (renamed from meta/recipes-extended/libtirpc/libtirpc_1.0.2.bb)13
7 files changed, 125 insertions, 1011 deletions
diff --git a/meta/recipes-extended/libtirpc/libtirpc/0001-include-stdint.h-for-uintptr_t.patch b/meta/recipes-extended/libtirpc/libtirpc/0001-include-stdint.h-for-uintptr_t.patch
deleted file mode 100644
index 7e3e2f86ad..0000000000
--- a/meta/recipes-extended/libtirpc/libtirpc/0001-include-stdint.h-for-uintptr_t.patch
+++ /dev/null
@@ -1,29 +0,0 @@
1Upstream-Status: Backport
2Signed-off-by: Ross Burton <ross.burton@intel.com>
3
4From acb9a37977cf0a9630eac74af9adebf35e38e719 Mon Sep 17 00:00:00 2001
5From: Thorsten Kukuk <kukuk@thkukuk.de>
6Date: Tue, 14 Nov 2017 10:39:08 -0500
7Subject: [PATCH] Include stdint.h from xdr_sizeof.c to avoid missing
8 declaration errors.
9
10Signed-off-by: Thorsten Kukuk <kukuk@suse.de>
11Signed-off-by: Steve Dickson <steved@redhat.com>
12---
13 src/xdr_sizeof.c | 1 +
14 1 file changed, 1 insertion(+)
15
16diff --git a/src/xdr_sizeof.c b/src/xdr_sizeof.c
17index d23fbd1..79d6707 100644
18--- a/src/xdr_sizeof.c
19+++ b/src/xdr_sizeof.c
20@@ -39,6 +39,7 @@
21 #include <rpc/xdr.h>
22 #include <sys/types.h>
23 #include <stdlib.h>
24+#include <stdint.h>
25 #include "un-namespace.h"
26
27 /* ARGSUSED */
28--
291.8.3.1
diff --git a/meta/recipes-extended/libtirpc/libtirpc/0001-replace-__bzero-with-memset-API.patch b/meta/recipes-extended/libtirpc/libtirpc/0001-replace-__bzero-with-memset-API.patch
deleted file mode 100644
index 7ae19cb319..0000000000
--- a/meta/recipes-extended/libtirpc/libtirpc/0001-replace-__bzero-with-memset-API.patch
+++ /dev/null
@@ -1,64 +0,0 @@
1Upstream-Status: Backport
2Signed-off-by: Ross Burton <ross.burton@intel.com>
3
4From 5356b63005e9d8169e0399cb76f26fbd29a78dee Mon Sep 17 00:00:00 2001
5From: Joshua Kinard <kumba@gentoo.org>
6Date: Wed, 23 Aug 2017 14:31:36 -0400
7Subject: [PATCH] Replace bzero() calls with equivalent memset() calls
8
9As annotated in the bzero(3) man page, bzero() was marked as LEGACY in
10POSIX.1-2001 and removed in POSIX.1-2008, and should be replaced with
11memset() calls to write zeros to a memory region. The attached patch
12replaces two bzero() calls and one __bzero() call in libtirpc with
13equivalent memset() calls. The latter replacement fixes a compile error
14under uclibc-ng, which lacks a definition for __bzero()
15
16Signed-off-by: Joshua Kinard <kumba@gentoo.org>
17Signed-off-by: Steve Dickson <steved@redhat.com>
18---
19 src/auth_time.c | 2 +-
20 src/des_impl.c | 2 +-
21 src/svc_auth_des.c | 2 +-
22 3 files changed, 3 insertions(+), 3 deletions(-)
23
24diff --git a/src/auth_time.c b/src/auth_time.c
25index 7f83ab4..69400bc 100644
26--- a/src/auth_time.c
27+++ b/src/auth_time.c
28@@ -317,7 +317,7 @@ __rpc_get_time_offset(td, srv, thost, uaddr, netid)
29 sprintf(ipuaddr, "%d.%d.%d.%d.0.111", a1, a2, a3, a4);
30 useua = &ipuaddr[0];
31
32- bzero((char *)&sin, sizeof(sin));
33+ memset(&sin, 0, sizeof(sin));
34 if (uaddr_to_sockaddr(useua, &sin)) {
35 msg("unable to translate uaddr to sockaddr.");
36 if (needfree)
37diff --git a/src/des_impl.c b/src/des_impl.c
38index 9dbccaf..15bec2a 100644
39--- a/src/des_impl.c
40+++ b/src/des_impl.c
41@@ -588,7 +588,7 @@ _des_crypt (char *buf, unsigned len, struct desparams *desp)
42 }
43 tin0 = tin1 = tout0 = tout1 = xor0 = xor1 = 0;
44 tbuf[0] = tbuf[1] = 0;
45- __bzero (schedule, sizeof (schedule));
46+ memset (schedule, 0, sizeof (schedule));
47
48 return (1);
49 }
50diff --git a/src/svc_auth_des.c b/src/svc_auth_des.c
51index 2e90146..19a7c60 100644
52--- a/src/svc_auth_des.c
53+++ b/src/svc_auth_des.c
54@@ -356,7 +356,7 @@ cache_init()
55
56 authdes_cache = (struct cache_entry *)
57 mem_alloc(sizeof(struct cache_entry) * AUTHDES_CACHESZ);
58- bzero((char *)authdes_cache,
59+ memset(authdes_cache, 0,
60 sizeof(struct cache_entry) * AUTHDES_CACHESZ);
61
62 authdes_lru = (short *)mem_alloc(sizeof(short) * AUTHDES_CACHESZ);
63--
641.8.3.1
diff --git a/meta/recipes-extended/libtirpc/libtirpc/Use-netbsd-queue.h.patch b/meta/recipes-extended/libtirpc/libtirpc/Use-netbsd-queue.h.patch
deleted file mode 100644
index f93223feb4..0000000000
--- a/meta/recipes-extended/libtirpc/libtirpc/Use-netbsd-queue.h.patch
+++ /dev/null
@@ -1,878 +0,0 @@
1musl does not provide sys/queue.h implementation. Borrow queue.h from
2the NetBSD project
3http://cvsweb.netbsd.org/bsdweb.cgi/src/sys/sys/queue.h?rev=1.68
4
5Upstream-Status: Inappropriate [musl specific]
6
7Signed-off-by: Jörg Krause <joerg.krause@embedded.rocks>
8Signed-off-by: Maxin B. John <maxin.john@intel.com>
9---
10diff -Naur libtirpc-1.0.1-orig/src/clnt_bcast.c libtirpc-1.0.1/src/clnt_bcast.c
11--- libtirpc-1.0.1-orig/src/clnt_bcast.c 2015-10-30 17:15:14.000000000 +0200
12+++ libtirpc-1.0.1/src/clnt_bcast.c 2015-12-21 17:03:52.066008311 +0200
13@@ -40,7 +40,6 @@
14 */
15 #include <sys/socket.h>
16 #include <sys/types.h>
17-#include <sys/queue.h>
18
19 #include <net/if.h>
20 #include <netinet/in.h>
21@@ -62,6 +61,7 @@
22 #include <err.h>
23 #include <string.h>
24
25+#include "queue.h"
26 #include "rpc_com.h"
27 #include "debug.h"
28
29diff -Naur libtirpc-1.0.1-orig/tirpc/queue.h libtirpc-1.0.1/tirpc/queue.h
30--- libtirpc-1.0.1-orig/tirpc/queue.h 1970-01-01 02:00:00.000000000 +0200
31+++ libtirpc-1.0.1/tirpc/queue.h 2015-12-21 17:02:44.427853905 +0200
32@@ -0,0 +1,846 @@
33+/* $NetBSD: queue.h,v 1.68 2014/11/19 08:10:01 uebayasi Exp $ */
34+
35+/*
36+ * Copyright (c) 1991, 1993
37+ * The Regents of the University of California. All rights reserved.
38+ *
39+ * Redistribution and use in source and binary forms, with or without
40+ * modification, are permitted provided that the following conditions
41+ * are met:
42+ * 1. Redistributions of source code must retain the above copyright
43+ * notice, this list of conditions and the following disclaimer.
44+ * 2. Redistributions in binary form must reproduce the above copyright
45+ * notice, this list of conditions and the following disclaimer in the
46+ * documentation and/or other materials provided with the distribution.
47+ * 3. Neither the name of the University nor the names of its contributors
48+ * may be used to endorse or promote products derived from this software
49+ * without specific prior written permission.
50+ *
51+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
52+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
53+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
54+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
55+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
56+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
57+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
58+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
59+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
60+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
61+ * SUCH DAMAGE.
62+ *
63+ * @(#)queue.h 8.5 (Berkeley) 8/20/94
64+ */
65+
66+#ifndef _SYS_QUEUE_H_
67+#define _SYS_QUEUE_H_
68+
69+/*
70+ * This file defines five types of data structures: singly-linked lists,
71+ * lists, simple queues, tail queues, and circular queues.
72+ *
73+ * A singly-linked list is headed by a single forward pointer. The
74+ * elements are singly linked for minimum space and pointer manipulation
75+ * overhead at the expense of O(n) removal for arbitrary elements. New
76+ * elements can be added to the list after an existing element or at the
77+ * head of the list. Elements being removed from the head of the list
78+ * should use the explicit macro for this purpose for optimum
79+ * efficiency. A singly-linked list may only be traversed in the forward
80+ * direction. Singly-linked lists are ideal for applications with large
81+ * datasets and few or no removals or for implementing a LIFO queue.
82+ *
83+ * A list is headed by a single forward pointer (or an array of forward
84+ * pointers for a hash table header). The elements are doubly linked
85+ * so that an arbitrary element can be removed without a need to
86+ * traverse the list. New elements can be added to the list before
87+ * or after an existing element or at the head of the list. A list
88+ * may only be traversed in the forward direction.
89+ *
90+ * A simple queue is headed by a pair of pointers, one the head of the
91+ * list and the other to the tail of the list. The elements are singly
92+ * linked to save space, so elements can only be removed from the
93+ * head of the list. New elements can be added to the list after
94+ * an existing element, at the head of the list, or at the end of the
95+ * list. A simple queue may only be traversed in the forward direction.
96+ *
97+ * A tail queue is headed by a pair of pointers, one to the head of the
98+ * list and the other to the tail of the list. The elements are doubly
99+ * linked so that an arbitrary element can be removed without a need to
100+ * traverse the list. New elements can be added to the list before or
101+ * after an existing element, at the head of the list, or at the end of
102+ * the list. A tail queue may be traversed in either direction.
103+ *
104+ * A circle queue is headed by a pair of pointers, one to the head of the
105+ * list and the other to the tail of the list. The elements are doubly
106+ * linked so that an arbitrary element can be removed without a need to
107+ * traverse the list. New elements can be added to the list before or after
108+ * an existing element, at the head of the list, or at the end of the list.
109+ * A circle queue may be traversed in either direction, but has a more
110+ * complex end of list detection.
111+ *
112+ * For details on the use of these macros, see the queue(3) manual page.
113+ */
114+
115+/*
116+ * Include the definition of NULL only on NetBSD because sys/null.h
117+ * is not available elsewhere. This conditional makes the header
118+ * portable and it can simply be dropped verbatim into any system.
119+ * The caveat is that on other systems some other header
120+ * must provide NULL before the macros can be used.
121+ */
122+#ifdef __NetBSD__
123+#include <sys/null.h>
124+#endif
125+
126+#if defined(QUEUEDEBUG)
127+# if defined(_KERNEL)
128+# define QUEUEDEBUG_ABORT(...) panic(__VA_ARGS__)
129+# else
130+# include <err.h>
131+# define QUEUEDEBUG_ABORT(...) err(1, __VA_ARGS__)
132+# endif
133+#endif
134+
135+/*
136+ * Singly-linked List definitions.
137+ */
138+#define SLIST_HEAD(name, type) \
139+struct name { \
140+ struct type *slh_first; /* first element */ \
141+}
142+
143+#define SLIST_HEAD_INITIALIZER(head) \
144+ { NULL }
145+
146+#define SLIST_ENTRY(type) \
147+struct { \
148+ struct type *sle_next; /* next element */ \
149+}
150+
151+/*
152+ * Singly-linked List access methods.
153+ */
154+#define SLIST_FIRST(head) ((head)->slh_first)
155+#define SLIST_END(head) NULL
156+#define SLIST_EMPTY(head) ((head)->slh_first == NULL)
157+#define SLIST_NEXT(elm, field) ((elm)->field.sle_next)
158+
159+#define SLIST_FOREACH(var, head, field) \
160+ for((var) = (head)->slh_first; \
161+ (var) != SLIST_END(head); \
162+ (var) = (var)->field.sle_next)
163+
164+#define SLIST_FOREACH_SAFE(var, head, field, tvar) \
165+ for ((var) = SLIST_FIRST((head)); \
166+ (var) != SLIST_END(head) && \
167+ ((tvar) = SLIST_NEXT((var), field), 1); \
168+ (var) = (tvar))
169+
170+/*
171+ * Singly-linked List functions.
172+ */
173+#define SLIST_INIT(head) do { \
174+ (head)->slh_first = SLIST_END(head); \
175+} while (/*CONSTCOND*/0)
176+
177+#define SLIST_INSERT_AFTER(slistelm, elm, field) do { \
178+ (elm)->field.sle_next = (slistelm)->field.sle_next; \
179+ (slistelm)->field.sle_next = (elm); \
180+} while (/*CONSTCOND*/0)
181+
182+#define SLIST_INSERT_HEAD(head, elm, field) do { \
183+ (elm)->field.sle_next = (head)->slh_first; \
184+ (head)->slh_first = (elm); \
185+} while (/*CONSTCOND*/0)
186+
187+#define SLIST_REMOVE_AFTER(slistelm, field) do { \
188+ (slistelm)->field.sle_next = \
189+ SLIST_NEXT(SLIST_NEXT((slistelm), field), field); \
190+} while (/*CONSTCOND*/0)
191+
192+#define SLIST_REMOVE_HEAD(head, field) do { \
193+ (head)->slh_first = (head)->slh_first->field.sle_next; \
194+} while (/*CONSTCOND*/0)
195+
196+#define SLIST_REMOVE(head, elm, type, field) do { \
197+ if ((head)->slh_first == (elm)) { \
198+ SLIST_REMOVE_HEAD((head), field); \
199+ } \
200+ else { \
201+ struct type *curelm = (head)->slh_first; \
202+ while(curelm->field.sle_next != (elm)) \
203+ curelm = curelm->field.sle_next; \
204+ curelm->field.sle_next = \
205+ curelm->field.sle_next->field.sle_next; \
206+ } \
207+} while (/*CONSTCOND*/0)
208+
209+
210+/*
211+ * List definitions.
212+ */
213+#define LIST_HEAD(name, type) \
214+struct name { \
215+ struct type *lh_first; /* first element */ \
216+}
217+
218+#define LIST_HEAD_INITIALIZER(head) \
219+ { NULL }
220+
221+#define LIST_ENTRY(type) \
222+struct { \
223+ struct type *le_next; /* next element */ \
224+ struct type **le_prev; /* address of previous next element */ \
225+}
226+
227+/*
228+ * List access methods.
229+ */
230+#define LIST_FIRST(head) ((head)->lh_first)
231+#define LIST_END(head) NULL
232+#define LIST_EMPTY(head) ((head)->lh_first == LIST_END(head))
233+#define LIST_NEXT(elm, field) ((elm)->field.le_next)
234+
235+#define LIST_FOREACH(var, head, field) \
236+ for ((var) = ((head)->lh_first); \
237+ (var) != LIST_END(head); \
238+ (var) = ((var)->field.le_next))
239+
240+#define LIST_FOREACH_SAFE(var, head, field, tvar) \
241+ for ((var) = LIST_FIRST((head)); \
242+ (var) != LIST_END(head) && \
243+ ((tvar) = LIST_NEXT((var), field), 1); \
244+ (var) = (tvar))
245+
246+#define LIST_MOVE(head1, head2) do { \
247+ LIST_INIT((head2)); \
248+ if (!LIST_EMPTY((head1))) { \
249+ (head2)->lh_first = (head1)->lh_first; \
250+ LIST_INIT((head1)); \
251+ } \
252+} while (/*CONSTCOND*/0)
253+
254+/*
255+ * List functions.
256+ */
257+#if defined(QUEUEDEBUG)
258+#define QUEUEDEBUG_LIST_INSERT_HEAD(head, elm, field) \
259+ if ((head)->lh_first && \
260+ (head)->lh_first->field.le_prev != &(head)->lh_first) \
261+ QUEUEDEBUG_ABORT("LIST_INSERT_HEAD %p %s:%d", (head), \
262+ __FILE__, __LINE__);
263+#define QUEUEDEBUG_LIST_OP(elm, field) \
264+ if ((elm)->field.le_next && \
265+ (elm)->field.le_next->field.le_prev != \
266+ &(elm)->field.le_next) \
267+ QUEUEDEBUG_ABORT("LIST_* forw %p %s:%d", (elm), \
268+ __FILE__, __LINE__); \
269+ if (*(elm)->field.le_prev != (elm)) \
270+ QUEUEDEBUG_ABORT("LIST_* back %p %s:%d", (elm), \
271+ __FILE__, __LINE__);
272+#define QUEUEDEBUG_LIST_POSTREMOVE(elm, field) \
273+ (elm)->field.le_next = (void *)1L; \
274+ (elm)->field.le_prev = (void *)1L;
275+#else
276+#define QUEUEDEBUG_LIST_INSERT_HEAD(head, elm, field)
277+#define QUEUEDEBUG_LIST_OP(elm, field)
278+#define QUEUEDEBUG_LIST_POSTREMOVE(elm, field)
279+#endif
280+
281+#define LIST_INIT(head) do { \
282+ (head)->lh_first = LIST_END(head); \
283+} while (/*CONSTCOND*/0)
284+
285+#define LIST_INSERT_AFTER(listelm, elm, field) do { \
286+ QUEUEDEBUG_LIST_OP((listelm), field) \
287+ if (((elm)->field.le_next = (listelm)->field.le_next) != \
288+ LIST_END(head)) \
289+ (listelm)->field.le_next->field.le_prev = \
290+ &(elm)->field.le_next; \
291+ (listelm)->field.le_next = (elm); \
292+ (elm)->field.le_prev = &(listelm)->field.le_next; \
293+} while (/*CONSTCOND*/0)
294+
295+#define LIST_INSERT_BEFORE(listelm, elm, field) do { \
296+ QUEUEDEBUG_LIST_OP((listelm), field) \
297+ (elm)->field.le_prev = (listelm)->field.le_prev; \
298+ (elm)->field.le_next = (listelm); \
299+ *(listelm)->field.le_prev = (elm); \
300+ (listelm)->field.le_prev = &(elm)->field.le_next; \
301+} while (/*CONSTCOND*/0)
302+
303+#define LIST_INSERT_HEAD(head, elm, field) do { \
304+ QUEUEDEBUG_LIST_INSERT_HEAD((head), (elm), field) \
305+ if (((elm)->field.le_next = (head)->lh_first) != LIST_END(head))\
306+ (head)->lh_first->field.le_prev = &(elm)->field.le_next;\
307+ (head)->lh_first = (elm); \
308+ (elm)->field.le_prev = &(head)->lh_first; \
309+} while (/*CONSTCOND*/0)
310+
311+#define LIST_REMOVE(elm, field) do { \
312+ QUEUEDEBUG_LIST_OP((elm), field) \
313+ if ((elm)->field.le_next != NULL) \
314+ (elm)->field.le_next->field.le_prev = \
315+ (elm)->field.le_prev; \
316+ *(elm)->field.le_prev = (elm)->field.le_next; \
317+ QUEUEDEBUG_LIST_POSTREMOVE((elm), field) \
318+} while (/*CONSTCOND*/0)
319+
320+#define LIST_REPLACE(elm, elm2, field) do { \
321+ if (((elm2)->field.le_next = (elm)->field.le_next) != NULL) \
322+ (elm2)->field.le_next->field.le_prev = \
323+ &(elm2)->field.le_next; \
324+ (elm2)->field.le_prev = (elm)->field.le_prev; \
325+ *(elm2)->field.le_prev = (elm2); \
326+ QUEUEDEBUG_LIST_POSTREMOVE((elm), field) \
327+} while (/*CONSTCOND*/0)
328+
329+/*
330+ * Simple queue definitions.
331+ */
332+#define SIMPLEQ_HEAD(name, type) \
333+struct name { \
334+ struct type *sqh_first; /* first element */ \
335+ struct type **sqh_last; /* addr of last next element */ \
336+}
337+
338+#define SIMPLEQ_HEAD_INITIALIZER(head) \
339+ { NULL, &(head).sqh_first }
340+
341+#define SIMPLEQ_ENTRY(type) \
342+struct { \
343+ struct type *sqe_next; /* next element */ \
344+}
345+
346+/*
347+ * Simple queue access methods.
348+ */
349+#define SIMPLEQ_FIRST(head) ((head)->sqh_first)
350+#define SIMPLEQ_END(head) NULL
351+#define SIMPLEQ_EMPTY(head) ((head)->sqh_first == SIMPLEQ_END(head))
352+#define SIMPLEQ_NEXT(elm, field) ((elm)->field.sqe_next)
353+
354+#define SIMPLEQ_FOREACH(var, head, field) \
355+ for ((var) = ((head)->sqh_first); \
356+ (var) != SIMPLEQ_END(head); \
357+ (var) = ((var)->field.sqe_next))
358+
359+#define SIMPLEQ_FOREACH_SAFE(var, head, field, next) \
360+ for ((var) = ((head)->sqh_first); \
361+ (var) != SIMPLEQ_END(head) && \
362+ ((next = ((var)->field.sqe_next)), 1); \
363+ (var) = (next))
364+
365+/*
366+ * Simple queue functions.
367+ */
368+#define SIMPLEQ_INIT(head) do { \
369+ (head)->sqh_first = NULL; \
370+ (head)->sqh_last = &(head)->sqh_first; \
371+} while (/*CONSTCOND*/0)
372+
373+#define SIMPLEQ_INSERT_HEAD(head, elm, field) do { \
374+ if (((elm)->field.sqe_next = (head)->sqh_first) == NULL) \
375+ (head)->sqh_last = &(elm)->field.sqe_next; \
376+ (head)->sqh_first = (elm); \
377+} while (/*CONSTCOND*/0)
378+
379+#define SIMPLEQ_INSERT_TAIL(head, elm, field) do { \
380+ (elm)->field.sqe_next = NULL; \
381+ *(head)->sqh_last = (elm); \
382+ (head)->sqh_last = &(elm)->field.sqe_next; \
383+} while (/*CONSTCOND*/0)
384+
385+#define SIMPLEQ_INSERT_AFTER(head, listelm, elm, field) do { \
386+ if (((elm)->field.sqe_next = (listelm)->field.sqe_next) == NULL)\
387+ (head)->sqh_last = &(elm)->field.sqe_next; \
388+ (listelm)->field.sqe_next = (elm); \
389+} while (/*CONSTCOND*/0)
390+
391+#define SIMPLEQ_REMOVE_HEAD(head, field) do { \
392+ if (((head)->sqh_first = (head)->sqh_first->field.sqe_next) == NULL) \
393+ (head)->sqh_last = &(head)->sqh_first; \
394+} while (/*CONSTCOND*/0)
395+
396+#define SIMPLEQ_REMOVE_AFTER(head, elm, field) do { \
397+ if (((elm)->field.sqe_next = (elm)->field.sqe_next->field.sqe_next) \
398+ == NULL) \
399+ (head)->sqh_last = &(elm)->field.sqe_next; \
400+} while (/*CONSTCOND*/0)
401+
402+#define SIMPLEQ_REMOVE(head, elm, type, field) do { \
403+ if ((head)->sqh_first == (elm)) { \
404+ SIMPLEQ_REMOVE_HEAD((head), field); \
405+ } else { \
406+ struct type *curelm = (head)->sqh_first; \
407+ while (curelm->field.sqe_next != (elm)) \
408+ curelm = curelm->field.sqe_next; \
409+ if ((curelm->field.sqe_next = \
410+ curelm->field.sqe_next->field.sqe_next) == NULL) \
411+ (head)->sqh_last = &(curelm)->field.sqe_next; \
412+ } \
413+} while (/*CONSTCOND*/0)
414+
415+#define SIMPLEQ_CONCAT(head1, head2) do { \
416+ if (!SIMPLEQ_EMPTY((head2))) { \
417+ *(head1)->sqh_last = (head2)->sqh_first; \
418+ (head1)->sqh_last = (head2)->sqh_last; \
419+ SIMPLEQ_INIT((head2)); \
420+ } \
421+} while (/*CONSTCOND*/0)
422+
423+#define SIMPLEQ_LAST(head, type, field) \
424+ (SIMPLEQ_EMPTY((head)) ? \
425+ NULL : \
426+ ((struct type *)(void *) \
427+ ((char *)((head)->sqh_last) - offsetof(struct type, field))))
428+
429+/*
430+ * Tail queue definitions.
431+ */
432+#define _TAILQ_HEAD(name, type, qual) \
433+struct name { \
434+ qual type *tqh_first; /* first element */ \
435+ qual type *qual *tqh_last; /* addr of last next element */ \
436+}
437+#define TAILQ_HEAD(name, type) _TAILQ_HEAD(name, struct type,)
438+
439+#define TAILQ_HEAD_INITIALIZER(head) \
440+ { TAILQ_END(head), &(head).tqh_first }
441+
442+#define _TAILQ_ENTRY(type, qual) \
443+struct { \
444+ qual type *tqe_next; /* next element */ \
445+ qual type *qual *tqe_prev; /* address of previous next element */\
446+}
447+#define TAILQ_ENTRY(type) _TAILQ_ENTRY(struct type,)
448+
449+/*
450+ * Tail queue access methods.
451+ */
452+#define TAILQ_FIRST(head) ((head)->tqh_first)
453+#define TAILQ_END(head) (NULL)
454+#define TAILQ_NEXT(elm, field) ((elm)->field.tqe_next)
455+#define TAILQ_LAST(head, headname) \
456+ (*(((struct headname *)((head)->tqh_last))->tqh_last))
457+#define TAILQ_PREV(elm, headname, field) \
458+ (*(((struct headname *)((elm)->field.tqe_prev))->tqh_last))
459+#define TAILQ_EMPTY(head) (TAILQ_FIRST(head) == TAILQ_END(head))
460+
461+
462+#define TAILQ_FOREACH(var, head, field) \
463+ for ((var) = ((head)->tqh_first); \
464+ (var) != TAILQ_END(head); \
465+ (var) = ((var)->field.tqe_next))
466+
467+#define TAILQ_FOREACH_SAFE(var, head, field, next) \
468+ for ((var) = ((head)->tqh_first); \
469+ (var) != TAILQ_END(head) && \
470+ ((next) = TAILQ_NEXT(var, field), 1); (var) = (next))
471+
472+#define TAILQ_FOREACH_REVERSE(var, head, headname, field) \
473+ for ((var) = (*(((struct headname *)((head)->tqh_last))->tqh_last));\
474+ (var) != TAILQ_END(head); \
475+ (var) = (*(((struct headname *)((var)->field.tqe_prev))->tqh_last)))
476+
477+#define TAILQ_FOREACH_REVERSE_SAFE(var, head, headname, field, prev) \
478+ for ((var) = TAILQ_LAST((head), headname); \
479+ (var) != TAILQ_END(head) && \
480+ ((prev) = TAILQ_PREV((var), headname, field), 1); (var) = (prev))
481+
482+/*
483+ * Tail queue functions.
484+ */
485+#if defined(QUEUEDEBUG)
486+#define QUEUEDEBUG_TAILQ_INSERT_HEAD(head, elm, field) \
487+ if ((head)->tqh_first && \
488+ (head)->tqh_first->field.tqe_prev != &(head)->tqh_first) \
489+ QUEUEDEBUG_ABORT("TAILQ_INSERT_HEAD %p %s:%d", (head), \
490+ __FILE__, __LINE__);
491+#define QUEUEDEBUG_TAILQ_INSERT_TAIL(head, elm, field) \
492+ if (*(head)->tqh_last != NULL) \
493+ QUEUEDEBUG_ABORT("TAILQ_INSERT_TAIL %p %s:%d", (head), \
494+ __FILE__, __LINE__);
495+#define QUEUEDEBUG_TAILQ_OP(elm, field) \
496+ if ((elm)->field.tqe_next && \
497+ (elm)->field.tqe_next->field.tqe_prev != \
498+ &(elm)->field.tqe_next) \
499+ QUEUEDEBUG_ABORT("TAILQ_* forw %p %s:%d", (elm), \
500+ __FILE__, __LINE__); \
501+ if (*(elm)->field.tqe_prev != (elm)) \
502+ QUEUEDEBUG_ABORT("TAILQ_* back %p %s:%d", (elm), \
503+ __FILE__, __LINE__);
504+#define QUEUEDEBUG_TAILQ_PREREMOVE(head, elm, field) \
505+ if ((elm)->field.tqe_next == NULL && \
506+ (head)->tqh_last != &(elm)->field.tqe_next) \
507+ QUEUEDEBUG_ABORT("TAILQ_PREREMOVE head %p elm %p %s:%d",\
508+ (head), (elm), __FILE__, __LINE__);
509+#define QUEUEDEBUG_TAILQ_POSTREMOVE(elm, field) \
510+ (elm)->field.tqe_next = (void *)1L; \
511+ (elm)->field.tqe_prev = (void *)1L;
512+#else
513+#define QUEUEDEBUG_TAILQ_INSERT_HEAD(head, elm, field)
514+#define QUEUEDEBUG_TAILQ_INSERT_TAIL(head, elm, field)
515+#define QUEUEDEBUG_TAILQ_OP(elm, field)
516+#define QUEUEDEBUG_TAILQ_PREREMOVE(head, elm, field)
517+#define QUEUEDEBUG_TAILQ_POSTREMOVE(elm, field)
518+#endif
519+
520+#define TAILQ_INIT(head) do { \
521+ (head)->tqh_first = TAILQ_END(head); \
522+ (head)->tqh_last = &(head)->tqh_first; \
523+} while (/*CONSTCOND*/0)
524+
525+#define TAILQ_INSERT_HEAD(head, elm, field) do { \
526+ QUEUEDEBUG_TAILQ_INSERT_HEAD((head), (elm), field) \
527+ if (((elm)->field.tqe_next = (head)->tqh_first) != TAILQ_END(head))\
528+ (head)->tqh_first->field.tqe_prev = \
529+ &(elm)->field.tqe_next; \
530+ else \
531+ (head)->tqh_last = &(elm)->field.tqe_next; \
532+ (head)->tqh_first = (elm); \
533+ (elm)->field.tqe_prev = &(head)->tqh_first; \
534+} while (/*CONSTCOND*/0)
535+
536+#define TAILQ_INSERT_TAIL(head, elm, field) do { \
537+ QUEUEDEBUG_TAILQ_INSERT_TAIL((head), (elm), field) \
538+ (elm)->field.tqe_next = TAILQ_END(head); \
539+ (elm)->field.tqe_prev = (head)->tqh_last; \
540+ *(head)->tqh_last = (elm); \
541+ (head)->tqh_last = &(elm)->field.tqe_next; \
542+} while (/*CONSTCOND*/0)
543+
544+#define TAILQ_INSERT_AFTER(head, listelm, elm, field) do { \
545+ QUEUEDEBUG_TAILQ_OP((listelm), field) \
546+ if (((elm)->field.tqe_next = (listelm)->field.tqe_next) != \
547+ TAILQ_END(head)) \
548+ (elm)->field.tqe_next->field.tqe_prev = \
549+ &(elm)->field.tqe_next; \
550+ else \
551+ (head)->tqh_last = &(elm)->field.tqe_next; \
552+ (listelm)->field.tqe_next = (elm); \
553+ (elm)->field.tqe_prev = &(listelm)->field.tqe_next; \
554+} while (/*CONSTCOND*/0)
555+
556+#define TAILQ_INSERT_BEFORE(listelm, elm, field) do { \
557+ QUEUEDEBUG_TAILQ_OP((listelm), field) \
558+ (elm)->field.tqe_prev = (listelm)->field.tqe_prev; \
559+ (elm)->field.tqe_next = (listelm); \
560+ *(listelm)->field.tqe_prev = (elm); \
561+ (listelm)->field.tqe_prev = &(elm)->field.tqe_next; \
562+} while (/*CONSTCOND*/0)
563+
564+#define TAILQ_REMOVE(head, elm, field) do { \
565+ QUEUEDEBUG_TAILQ_PREREMOVE((head), (elm), field) \
566+ QUEUEDEBUG_TAILQ_OP((elm), field) \
567+ if (((elm)->field.tqe_next) != TAILQ_END(head)) \
568+ (elm)->field.tqe_next->field.tqe_prev = \
569+ (elm)->field.tqe_prev; \
570+ else \
571+ (head)->tqh_last = (elm)->field.tqe_prev; \
572+ *(elm)->field.tqe_prev = (elm)->field.tqe_next; \
573+ QUEUEDEBUG_TAILQ_POSTREMOVE((elm), field); \
574+} while (/*CONSTCOND*/0)
575+
576+#define TAILQ_REPLACE(head, elm, elm2, field) do { \
577+ if (((elm2)->field.tqe_next = (elm)->field.tqe_next) != \
578+ TAILQ_END(head)) \
579+ (elm2)->field.tqe_next->field.tqe_prev = \
580+ &(elm2)->field.tqe_next; \
581+ else \
582+ (head)->tqh_last = &(elm2)->field.tqe_next; \
583+ (elm2)->field.tqe_prev = (elm)->field.tqe_prev; \
584+ *(elm2)->field.tqe_prev = (elm2); \
585+ QUEUEDEBUG_TAILQ_POSTREMOVE((elm), field); \
586+} while (/*CONSTCOND*/0)
587+
588+#define TAILQ_CONCAT(head1, head2, field) do { \
589+ if (!TAILQ_EMPTY(head2)) { \
590+ *(head1)->tqh_last = (head2)->tqh_first; \
591+ (head2)->tqh_first->field.tqe_prev = (head1)->tqh_last; \
592+ (head1)->tqh_last = (head2)->tqh_last; \
593+ TAILQ_INIT((head2)); \
594+ } \
595+} while (/*CONSTCOND*/0)
596+
597+/*
598+ * Singly-linked Tail queue declarations.
599+ */
600+#define STAILQ_HEAD(name, type) \
601+struct name { \
602+ struct type *stqh_first; /* first element */ \
603+ struct type **stqh_last; /* addr of last next element */ \
604+}
605+
606+#define STAILQ_HEAD_INITIALIZER(head) \
607+ { NULL, &(head).stqh_first }
608+
609+#define STAILQ_ENTRY(type) \
610+struct { \
611+ struct type *stqe_next; /* next element */ \
612+}
613+
614+/*
615+ * Singly-linked Tail queue access methods.
616+ */
617+#define STAILQ_FIRST(head) ((head)->stqh_first)
618+#define STAILQ_END(head) NULL
619+#define STAILQ_NEXT(elm, field) ((elm)->field.stqe_next)
620+#define STAILQ_EMPTY(head) (STAILQ_FIRST(head) == STAILQ_END(head))
621+
622+/*
623+ * Singly-linked Tail queue functions.
624+ */
625+#define STAILQ_INIT(head) do { \
626+ (head)->stqh_first = NULL; \
627+ (head)->stqh_last = &(head)->stqh_first; \
628+} while (/*CONSTCOND*/0)
629+
630+#define STAILQ_INSERT_HEAD(head, elm, field) do { \
631+ if (((elm)->field.stqe_next = (head)->stqh_first) == NULL) \
632+ (head)->stqh_last = &(elm)->field.stqe_next; \
633+ (head)->stqh_first = (elm); \
634+} while (/*CONSTCOND*/0)
635+
636+#define STAILQ_INSERT_TAIL(head, elm, field) do { \
637+ (elm)->field.stqe_next = NULL; \
638+ *(head)->stqh_last = (elm); \
639+ (head)->stqh_last = &(elm)->field.stqe_next; \
640+} while (/*CONSTCOND*/0)
641+
642+#define STAILQ_INSERT_AFTER(head, listelm, elm, field) do { \
643+ if (((elm)->field.stqe_next = (listelm)->field.stqe_next) == NULL)\
644+ (head)->stqh_last = &(elm)->field.stqe_next; \
645+ (listelm)->field.stqe_next = (elm); \
646+} while (/*CONSTCOND*/0)
647+
648+#define STAILQ_REMOVE_HEAD(head, field) do { \
649+ if (((head)->stqh_first = (head)->stqh_first->field.stqe_next) == NULL) \
650+ (head)->stqh_last = &(head)->stqh_first; \
651+} while (/*CONSTCOND*/0)
652+
653+#define STAILQ_REMOVE(head, elm, type, field) do { \
654+ if ((head)->stqh_first == (elm)) { \
655+ STAILQ_REMOVE_HEAD((head), field); \
656+ } else { \
657+ struct type *curelm = (head)->stqh_first; \
658+ while (curelm->field.stqe_next != (elm)) \
659+ curelm = curelm->field.stqe_next; \
660+ if ((curelm->field.stqe_next = \
661+ curelm->field.stqe_next->field.stqe_next) == NULL) \
662+ (head)->stqh_last = &(curelm)->field.stqe_next; \
663+ } \
664+} while (/*CONSTCOND*/0)
665+
666+#define STAILQ_FOREACH(var, head, field) \
667+ for ((var) = ((head)->stqh_first); \
668+ (var); \
669+ (var) = ((var)->field.stqe_next))
670+
671+#define STAILQ_FOREACH_SAFE(var, head, field, tvar) \
672+ for ((var) = STAILQ_FIRST((head)); \
673+ (var) && ((tvar) = STAILQ_NEXT((var), field), 1); \
674+ (var) = (tvar))
675+
676+#define STAILQ_CONCAT(head1, head2) do { \
677+ if (!STAILQ_EMPTY((head2))) { \
678+ *(head1)->stqh_last = (head2)->stqh_first; \
679+ (head1)->stqh_last = (head2)->stqh_last; \
680+ STAILQ_INIT((head2)); \
681+ } \
682+} while (/*CONSTCOND*/0)
683+
684+#define STAILQ_LAST(head, type, field) \
685+ (STAILQ_EMPTY((head)) ? \
686+ NULL : \
687+ ((struct type *)(void *) \
688+ ((char *)((head)->stqh_last) - offsetof(struct type, field))))
689+
690+
691+#ifndef _KERNEL
692+/*
693+ * Circular queue definitions. Do not use. We still keep the macros
694+ * for compatibility but because of pointer aliasing issues their use
695+ * is discouraged!
696+ */
697+
698+/*
699+ * __launder_type(): We use this ugly hack to work around the the compiler
700+ * noticing that two types may not alias each other and elide tests in code.
701+ * We hit this in the CIRCLEQ macros when comparing 'struct name *' and
702+ * 'struct type *' (see CIRCLEQ_HEAD()). Modern compilers (such as GCC
703+ * 4.8) declare these comparisons as always false, causing the code to
704+ * not run as designed.
705+ *
706+ * This hack is only to be used for comparisons and thus can be fully const.
707+ * Do not use for assignment.
708+ *
709+ * If we ever choose to change the ABI of the CIRCLEQ macros, we could fix
710+ * this by changing the head/tail sentinal values, but see the note above
711+ * this one.
712+ */
713+static __inline const void * __launder_type(const void *);
714+static __inline const void *
715+__launder_type(const void *__x)
716+{
717+ __asm __volatile("" : "+r" (__x));
718+ return __x;
719+}
720+
721+#if defined(QUEUEDEBUG)
722+#define QUEUEDEBUG_CIRCLEQ_HEAD(head, field) \
723+ if ((head)->cqh_first != CIRCLEQ_ENDC(head) && \
724+ (head)->cqh_first->field.cqe_prev != CIRCLEQ_ENDC(head)) \
725+ QUEUEDEBUG_ABORT("CIRCLEQ head forw %p %s:%d", (head), \
726+ __FILE__, __LINE__); \
727+ if ((head)->cqh_last != CIRCLEQ_ENDC(head) && \
728+ (head)->cqh_last->field.cqe_next != CIRCLEQ_ENDC(head)) \
729+ QUEUEDEBUG_ABORT("CIRCLEQ head back %p %s:%d", (head), \
730+ __FILE__, __LINE__);
731+#define QUEUEDEBUG_CIRCLEQ_ELM(head, elm, field) \
732+ if ((elm)->field.cqe_next == CIRCLEQ_ENDC(head)) { \
733+ if ((head)->cqh_last != (elm)) \
734+ QUEUEDEBUG_ABORT("CIRCLEQ elm last %p %s:%d", \
735+ (elm), __FILE__, __LINE__); \
736+ } else { \
737+ if ((elm)->field.cqe_next->field.cqe_prev != (elm)) \
738+ QUEUEDEBUG_ABORT("CIRCLEQ elm forw %p %s:%d", \
739+ (elm), __FILE__, __LINE__); \
740+ } \
741+ if ((elm)->field.cqe_prev == CIRCLEQ_ENDC(head)) { \
742+ if ((head)->cqh_first != (elm)) \
743+ QUEUEDEBUG_ABORT("CIRCLEQ elm first %p %s:%d", \
744+ (elm), __FILE__, __LINE__); \
745+ } else { \
746+ if ((elm)->field.cqe_prev->field.cqe_next != (elm)) \
747+ QUEUEDEBUG_ABORT("CIRCLEQ elm prev %p %s:%d", \
748+ (elm), __FILE__, __LINE__); \
749+ }
750+#define QUEUEDEBUG_CIRCLEQ_POSTREMOVE(elm, field) \
751+ (elm)->field.cqe_next = (void *)1L; \
752+ (elm)->field.cqe_prev = (void *)1L;
753+#else
754+#define QUEUEDEBUG_CIRCLEQ_HEAD(head, field)
755+#define QUEUEDEBUG_CIRCLEQ_ELM(head, elm, field)
756+#define QUEUEDEBUG_CIRCLEQ_POSTREMOVE(elm, field)
757+#endif
758+
759+#define CIRCLEQ_HEAD(name, type) \
760+struct name { \
761+ struct type *cqh_first; /* first element */ \
762+ struct type *cqh_last; /* last element */ \
763+}
764+
765+#define CIRCLEQ_HEAD_INITIALIZER(head) \
766+ { CIRCLEQ_END(&head), CIRCLEQ_END(&head) }
767+
768+#define CIRCLEQ_ENTRY(type) \
769+struct { \
770+ struct type *cqe_next; /* next element */ \
771+ struct type *cqe_prev; /* previous element */ \
772+}
773+
774+/*
775+ * Circular queue functions.
776+ */
777+#define CIRCLEQ_INIT(head) do { \
778+ (head)->cqh_first = CIRCLEQ_END(head); \
779+ (head)->cqh_last = CIRCLEQ_END(head); \
780+} while (/*CONSTCOND*/0)
781+
782+#define CIRCLEQ_INSERT_AFTER(head, listelm, elm, field) do { \
783+ QUEUEDEBUG_CIRCLEQ_HEAD((head), field) \
784+ QUEUEDEBUG_CIRCLEQ_ELM((head), (listelm), field) \
785+ (elm)->field.cqe_next = (listelm)->field.cqe_next; \
786+ (elm)->field.cqe_prev = (listelm); \
787+ if ((listelm)->field.cqe_next == CIRCLEQ_ENDC(head)) \
788+ (head)->cqh_last = (elm); \
789+ else \
790+ (listelm)->field.cqe_next->field.cqe_prev = (elm); \
791+ (listelm)->field.cqe_next = (elm); \
792+} while (/*CONSTCOND*/0)
793+
794+#define CIRCLEQ_INSERT_BEFORE(head, listelm, elm, field) do { \
795+ QUEUEDEBUG_CIRCLEQ_HEAD((head), field) \
796+ QUEUEDEBUG_CIRCLEQ_ELM((head), (listelm), field) \
797+ (elm)->field.cqe_next = (listelm); \
798+ (elm)->field.cqe_prev = (listelm)->field.cqe_prev; \
799+ if ((listelm)->field.cqe_prev == CIRCLEQ_ENDC(head)) \
800+ (head)->cqh_first = (elm); \
801+ else \
802+ (listelm)->field.cqe_prev->field.cqe_next = (elm); \
803+ (listelm)->field.cqe_prev = (elm); \
804+} while (/*CONSTCOND*/0)
805+
806+#define CIRCLEQ_INSERT_HEAD(head, elm, field) do { \
807+ QUEUEDEBUG_CIRCLEQ_HEAD((head), field) \
808+ (elm)->field.cqe_next = (head)->cqh_first; \
809+ (elm)->field.cqe_prev = CIRCLEQ_END(head); \
810+ if ((head)->cqh_last == CIRCLEQ_ENDC(head)) \
811+ (head)->cqh_last = (elm); \
812+ else \
813+ (head)->cqh_first->field.cqe_prev = (elm); \
814+ (head)->cqh_first = (elm); \
815+} while (/*CONSTCOND*/0)
816+
817+#define CIRCLEQ_INSERT_TAIL(head, elm, field) do { \
818+ QUEUEDEBUG_CIRCLEQ_HEAD((head), field) \
819+ (elm)->field.cqe_next = CIRCLEQ_END(head); \
820+ (elm)->field.cqe_prev = (head)->cqh_last; \
821+ if ((head)->cqh_first == CIRCLEQ_ENDC(head)) \
822+ (head)->cqh_first = (elm); \
823+ else \
824+ (head)->cqh_last->field.cqe_next = (elm); \
825+ (head)->cqh_last = (elm); \
826+} while (/*CONSTCOND*/0)
827+
828+#define CIRCLEQ_REMOVE(head, elm, field) do { \
829+ QUEUEDEBUG_CIRCLEQ_HEAD((head), field) \
830+ QUEUEDEBUG_CIRCLEQ_ELM((head), (elm), field) \
831+ if ((elm)->field.cqe_next == CIRCLEQ_ENDC(head)) \
832+ (head)->cqh_last = (elm)->field.cqe_prev; \
833+ else \
834+ (elm)->field.cqe_next->field.cqe_prev = \
835+ (elm)->field.cqe_prev; \
836+ if ((elm)->field.cqe_prev == CIRCLEQ_ENDC(head)) \
837+ (head)->cqh_first = (elm)->field.cqe_next; \
838+ else \
839+ (elm)->field.cqe_prev->field.cqe_next = \
840+ (elm)->field.cqe_next; \
841+ QUEUEDEBUG_CIRCLEQ_POSTREMOVE((elm), field) \
842+} while (/*CONSTCOND*/0)
843+
844+#define CIRCLEQ_FOREACH(var, head, field) \
845+ for ((var) = ((head)->cqh_first); \
846+ (var) != CIRCLEQ_ENDC(head); \
847+ (var) = ((var)->field.cqe_next))
848+
849+#define CIRCLEQ_FOREACH_REVERSE(var, head, field) \
850+ for ((var) = ((head)->cqh_last); \
851+ (var) != CIRCLEQ_ENDC(head); \
852+ (var) = ((var)->field.cqe_prev))
853+
854+/*
855+ * Circular queue access methods.
856+ */
857+#define CIRCLEQ_FIRST(head) ((head)->cqh_first)
858+#define CIRCLEQ_LAST(head) ((head)->cqh_last)
859+/* For comparisons */
860+#define CIRCLEQ_ENDC(head) (__launder_type(head))
861+/* For assignments */
862+#define CIRCLEQ_END(head) ((void *)(head))
863+#define CIRCLEQ_NEXT(elm, field) ((elm)->field.cqe_next)
864+#define CIRCLEQ_PREV(elm, field) ((elm)->field.cqe_prev)
865+#define CIRCLEQ_EMPTY(head) \
866+ (CIRCLEQ_FIRST(head) == CIRCLEQ_ENDC(head))
867+
868+#define CIRCLEQ_LOOP_NEXT(head, elm, field) \
869+ (((elm)->field.cqe_next == CIRCLEQ_ENDC(head)) \
870+ ? ((head)->cqh_first) \
871+ : (elm->field.cqe_next))
872+#define CIRCLEQ_LOOP_PREV(head, elm, field) \
873+ (((elm)->field.cqe_prev == CIRCLEQ_ENDC(head)) \
874+ ? ((head)->cqh_last) \
875+ : (elm->field.cqe_prev))
876+#endif /* !_KERNEL */
877+
878+#endif /* !_SYS_QUEUE_H_ */
diff --git a/meta/recipes-extended/libtirpc/libtirpc/export_key_secretkey_is_set.patch b/meta/recipes-extended/libtirpc/libtirpc/export_key_secretkey_is_set.patch
deleted file mode 100644
index d7f4968669..0000000000
--- a/meta/recipes-extended/libtirpc/libtirpc/export_key_secretkey_is_set.patch
+++ /dev/null
@@ -1,31 +0,0 @@
1Upstream-Status: Backport
2Signed-off-by: Ross Burton <ross.burton@intel.com>
3
4From e51d67549b516b2dac6c71d92c8499f6e67125ad Mon Sep 17 00:00:00 2001
5From: Thorsten Kukuk <kukuk@thkukuk.de>
6Date: Tue, 14 Nov 2017 10:43:53 -0500
7Subject: [PATCH] Fix typo in src/libtirpc.map
8
9Which prevents that key_secretkey_is_set will be exported.
10
11Signed-off-by: Thorsten Kukuk <kukuk@suse.de>
12Signed-off-by: Steve Dickson <steved@redhat.com>
13---
14 src/libtirpc.map | 2 +-
15 1 file changed, 1 insertion(+), 1 deletion(-)
16
17diff --git a/src/libtirpc.map b/src/libtirpc.map
18index f385de5..21d6065 100644
19--- a/src/libtirpc.map
20+++ b/src/libtirpc.map
21@@ -298,7 +298,7 @@ TIRPC_0.3.2 {
22 key_gendes;
23 key_get_conv;
24 key_setsecret;
25- key_secret_is_set;
26+ key_secretkey_is_set;
27 key_setnet;
28 netname2host;
29 netname2user;
30--
311.8.3.1
diff --git a/meta/recipes-extended/libtirpc/libtirpc/libtirpc-1.0.4-rc1.patch b/meta/recipes-extended/libtirpc/libtirpc/libtirpc-1.0.4-rc1.patch
new file mode 100644
index 0000000000..6d40d3cce8
--- /dev/null
+++ b/meta/recipes-extended/libtirpc/libtirpc/libtirpc-1.0.4-rc1.patch
@@ -0,0 +1,103 @@
1Patch from Fedora https://src.fedoraproject.org/rpms/libtirpc/raw/master/f/libtirpc-1.0.4-rc1.patch
2
3Upstream-Status: Backport
4Signed-off-by: Khem Raj <raj.khem@gmail.com>
5
6diff --git a/src/clnt_generic.c b/src/clnt_generic.c
7index e5a314f..3f3dabf 100644
8--- a/src/clnt_generic.c
9+++ b/src/clnt_generic.c
10@@ -47,7 +47,6 @@
11
12 extern bool_t __rpc_is_local_host(const char *);
13 int __rpc_raise_fd(int);
14-extern int __binddynport(int fd);
15
16 #ifndef NETIDLEN
17 #define NETIDLEN 32
18@@ -341,8 +340,7 @@ clnt_tli_create(int fd, const struct netconfig *nconf,
19 servtype = nconf->nc_semantics;
20 if (!__rpc_fd2sockinfo(fd, &si))
21 goto err;
22- if (__binddynport(fd) == -1)
23- goto err;
24+ bindresvport(fd, NULL);
25 } else {
26 if (!__rpc_fd2sockinfo(fd, &si))
27 goto err;
28diff --git a/src/rpc_soc.c b/src/rpc_soc.c
29index af6c482..5a6eeb7 100644
30--- a/src/rpc_soc.c
31+++ b/src/rpc_soc.c
32@@ -67,8 +67,6 @@
33
34 extern mutex_t rpcsoc_lock;
35
36-extern int __binddynport(int fd);
37-
38 static CLIENT *clnt_com_create(struct sockaddr_in *, rpcprog_t, rpcvers_t,
39 int *, u_int, u_int, char *, int);
40 static SVCXPRT *svc_com_create(int, u_int, u_int, char *);
41@@ -147,8 +145,7 @@ clnt_com_create(raddr, prog, vers, sockp, sendsz, recvsz, tp, flags)
42 bindaddr.maxlen = bindaddr.len = sizeof (struct sockaddr_in);
43 bindaddr.buf = raddr;
44
45- if (__binddynport(fd) == -1)
46- goto err;
47+ bindresvport(fd, NULL);
48 cl = clnt_tli_create(fd, nconf, &bindaddr, prog, vers,
49 sendsz, recvsz);
50 if (cl) {
51diff --git a/src/rpcb_clnt.c b/src/rpcb_clnt.c
52index a94fc73..4b44364 100644
53--- a/src/rpcb_clnt.c
54+++ b/src/rpcb_clnt.c
55@@ -752,7 +752,7 @@ __try_protocol_version_2(program, version, nconf, host, tp)
56
57 client = getpmaphandle(nconf, host, &parms.r_addr);
58 if (client == NULL)
59- return (NULL);
60+ goto error;
61
62 /*
63 * Set retry timeout.
64@@ -771,11 +771,11 @@ __try_protocol_version_2(program, version, nconf, host, tp)
65 if (clnt_st != RPC_SUCCESS) {
66 rpc_createerr.cf_stat = RPC_PMAPFAILURE;
67 clnt_geterr(client, &rpc_createerr.cf_error);
68- return (NULL);
69+ goto error;
70 } else if (port == 0) {
71 pmapaddress = NULL;
72 rpc_createerr.cf_stat = RPC_PROGNOTREGISTERED;
73- return (NULL);
74+ goto error;
75 }
76 port = htons(port);
77 CLNT_CONTROL(client, CLGET_SVC_ADDR, (char *)&remote);
78@@ -789,14 +789,24 @@ __try_protocol_version_2(program, version, nconf, host, tp)
79 free(pmapaddress);
80 pmapaddress = NULL;
81 }
82- return (NULL);
83+ goto error;
84 }
85 memcpy(pmapaddress->buf, remote.buf, remote.len);
86 memcpy(&((char *)pmapaddress->buf)[sizeof (short)],
87 (char *)(void *)&port, sizeof (short));
88 pmapaddress->len = pmapaddress->maxlen = remote.len;
89
90+ CLNT_DESTROY(client);
91 return pmapaddress;
92+
93+error:
94+ if (client) {
95+ CLNT_DESTROY(client);
96+ client = NULL;
97+
98+ }
99+ return (NULL);
100+
101 }
102 #endif
103
diff --git a/meta/recipes-extended/libtirpc/libtirpc/musl.patch b/meta/recipes-extended/libtirpc/libtirpc/musl.patch
new file mode 100644
index 0000000000..0c3ce603ea
--- /dev/null
+++ b/meta/recipes-extended/libtirpc/libtirpc/musl.patch
@@ -0,0 +1,18 @@
1Consider musl provided built-in defines
2
3Helps compile libtirpc with musl
4
5Upstream-Status: Pending
6Signed-off-by: Khem Raj <raj.khem@gmail.com>
7
8--- ./tirpc/rpc/types.h.orig 2018-03-17 10:23:10.022055255 +0100
9+++ ./tirpc/rpc/types.h 2018-03-17 10:23:30.877751656 +0100
10@@ -66,7 +66,7 @@
11 #define mem_free(ptr, bsize) free(ptr)
12
13
14-#if defined __APPLE_CC__ || defined __FreeBSD__
15+#if defined __APPLE_CC__ || defined __FreeBSD__ || !defined(__GLIBC__)
16 # define __u_char_defined
17 # define __daddr_t_defined
18 #endif
diff --git a/meta/recipes-extended/libtirpc/libtirpc_1.0.2.bb b/meta/recipes-extended/libtirpc/libtirpc_1.0.3.bb
index 99212600e0..17bc038d67 100644
--- a/meta/recipes-extended/libtirpc/libtirpc_1.0.2.bb
+++ b/meta/recipes-extended/libtirpc/libtirpc_1.0.3.bb
@@ -10,17 +10,12 @@ LIC_FILES_CHKSUM = "file://COPYING;md5=f835cce8852481e4b2bbbdd23b5e47f3 \
10PROVIDES = "virtual/librpc" 10PROVIDES = "virtual/librpc"
11 11
12SRC_URI = "${SOURCEFORGE_MIRROR}/${BPN}/${BP}.tar.bz2 \ 12SRC_URI = "${SOURCEFORGE_MIRROR}/${BPN}/${BP}.tar.bz2 \
13 file://export_key_secretkey_is_set.patch \ 13 file://libtirpc-1.0.4-rc1.patch \
14 file://0001-replace-__bzero-with-memset-API.patch \ 14 file://musl.patch \
15 file://0001-include-stdint.h-for-uintptr_t.patch \
16 " 15 "
17 16
18SRC_URI_append_libc-musl = " \ 17SRC_URI[md5sum] = "f8403a10695348854e71d525c4db5931"
19 file://Use-netbsd-queue.h.patch \ 18SRC_URI[sha256sum] = "86c3a78fc1bddefa96111dd233124c703b22a78884203c55c3e06b3be6a0fd5e"
20 "
21
22SRC_URI[md5sum] = "d5a37f1dccec484f9cabe2b97e54e9a6"
23SRC_URI[sha256sum] = "723c5ce92706cbb601a8db09110df1b4b69391643158f20ff587e20e7c5f90f5"
24 19
25inherit autotools pkgconfig 20inherit autotools pkgconfig
26 21