summaryrefslogtreecommitdiffstats
path: root/meta/recipes-core/glibc/glibc/0037-Avoid-deadlock-between-pthread_create-and-ctors.patch
diff options
context:
space:
mode:
Diffstat (limited to 'meta/recipes-core/glibc/glibc/0037-Avoid-deadlock-between-pthread_create-and-ctors.patch')
-rw-r--r--meta/recipes-core/glibc/glibc/0037-Avoid-deadlock-between-pthread_create-and-ctors.patch276
1 files changed, 276 insertions, 0 deletions
diff --git a/meta/recipes-core/glibc/glibc/0037-Avoid-deadlock-between-pthread_create-and-ctors.patch b/meta/recipes-core/glibc/glibc/0037-Avoid-deadlock-between-pthread_create-and-ctors.patch
new file mode 100644
index 0000000000..7a10131bad
--- /dev/null
+++ b/meta/recipes-core/glibc/glibc/0037-Avoid-deadlock-between-pthread_create-and-ctors.patch
@@ -0,0 +1,276 @@
1From 83b5323261bb72313bffcf37476c1b8f0847c736 Mon Sep 17 00:00:00 2001
2From: Szabolcs Nagy <szabolcs.nagy@arm.com>
3Date: Wed, 15 Sep 2021 15:16:19 +0100
4Subject: [PATCH] elf: Avoid deadlock between pthread_create and ctors [BZ
5 #28357]
6
7The fix for bug 19329 caused a regression such that pthread_create can
8deadlock when concurrent ctors from dlopen are waiting for it to finish.
9Use a new GL(dl_load_tls_lock) in pthread_create that is not taken
10around ctors in dlopen.
11
12The new lock is also used in __tls_get_addr instead of GL(dl_load_lock).
13
14The new lock is held in _dl_open_worker and _dl_close_worker around
15most of the logic before/after the init/fini routines. When init/fini
16routines are running then TLS is in a consistent, usable state.
17In _dl_open_worker the new lock requires catching and reraising dlopen
18failures that happen in the critical section.
19
20The new lock is reinitialized in a fork child, to keep the existing
21behaviour and it is kept recursive in case malloc interposition or TLS
22access from signal handlers can retake it. It is not obvious if this
23is necessary or helps, but avoids changing the preexisting behaviour.
24
25The new lock may be more appropriate for dl_iterate_phdr too than
26GL(dl_load_write_lock), since TLS state of an incompletely loaded
27module may be accessed. If the new lock can replace the old one,
28that can be a separate change.
29
30Fixes bug 28357.
31
32Reviewed-by: Adhemerval Zanella <adhemerval.zanella@linaro.org>
33---
34 elf/dl-close.c | 6 ++
35 elf/dl-open.c | 35 ++++++++-
36 elf/dl-support.c | 7 ++
37 elf/dl-tls.c | 16 ++---
38 elf/rtld.c | 1 +
39 sysdeps/nptl/fork.c | 3 +
40 sysdeps/generic/ldsodefs.h | 9 ++-
41 10 files changed, 235 insertions(+), 12 deletions(-)
42---
43Upstream-Status: Backport [https://sourceware.org/git/?p=glibc.git;a=patch;h=024a7640ab9ecea80e527f4e4d7f7a1868e952c5]
44Comment: This patch is refreshed for glibc 2.31. In upstream glibc 2.34 multiple src files are shuffled, updated this patch as per the code present in glibc 2.31. Removed test case.
45Signed-off-by: Akash Hadke <akash.hadke@kpit.com>
46Signed-off-by: Akash Hadke <hadkeakash4@gmail.com>
47---
48diff --git a/elf/dl-close.c b/elf/dl-close.c
49index 93ff5c96e9..cfe0f1c0c9 100644
50--- a/elf/dl-close.c
51+++ b/elf/dl-close.c
52@@ -551,6 +551,9 @@
53 size_t tls_free_end;
54 tls_free_start = tls_free_end = NO_TLS_OFFSET;
55
56+ /* Protects global and module specitic TLS state. */
57+ __rtld_lock_lock_recursive (GL(dl_load_tls_lock));
58+
59 /* We modify the list of loaded objects. */
60 __rtld_lock_lock_recursive (GL(dl_load_write_lock));
61
62@@ -786,6 +789,9 @@
63 GL(dl_tls_static_used) = tls_free_start;
64 }
65
66+ /* TLS is cleaned up for the unloaded modules. */
67+ __rtld_lock_unlock_recursive (GL(dl_load_tls_lock));
68+
69 #ifdef SHARED
70 /* Auditing checkpoint: we have deleted all objects. */
71 if (__glibc_unlikely (do_audit))
72diff --git a/elf/dl-open.c b/elf/dl-open.c
73index 5295e931b0..6ea5dd2457 100644
74--- a/elf/dl-open.c
75+++ b/elf/dl-open.c
76@@ -57,6 +57,9 @@
77 (non-negative). */
78 unsigned int original_global_scope_pending_adds;
79
80+ /* Set to true if the end of dl_open_worker_begin was reached. */
81+ bool worker_continue;
82+
83 /* Original parameters to the program and the current environment. */
84 int argc;
85 char **argv;
86@@ -473,7 +473,7 @@
87 }
88
89 static void
90-dl_open_worker (void *a)
91+dl_open_worker_begin (void *a)
92 {
93 struct dl_open_args *args = a;
94 const char *file = args->file;
95@@ -747,6 +747,36 @@
96 if (mode & RTLD_GLOBAL)
97 add_to_global_resize (new);
98
99+ args->worker_continue = true;
100+}
101+
102+static void
103+dl_open_worker (void *a)
104+{
105+ struct dl_open_args *args = a;
106+
107+ args->worker_continue = false;
108+
109+ {
110+ /* Protects global and module specific TLS state. */
111+ __rtld_lock_lock_recursive (GL(dl_load_tls_lock));
112+
113+ struct dl_exception ex;
114+ int err = _dl_catch_exception (&ex, dl_open_worker_begin, args);
115+
116+ __rtld_lock_unlock_recursive (GL(dl_load_tls_lock));
117+
118+ if (__glibc_unlikely (ex.errstring != NULL))
119+ /* Reraise the error. */
120+ _dl_signal_exception (err, &ex, NULL);
121+ }
122+
123+ if (!args->worker_continue)
124+ return;
125+
126+ int mode = args->mode;
127+ struct link_map *new = args->map;
128+
129 /* Run the initializer functions of new objects. Temporarily
130 disable the exception handler, so that lazy binding failures are
131 fatal. */
132diff --git a/elf/dl-support.c b/elf/dl-support.c
133index 02e2ed72f5..d99c1f1d62 100644
134--- a/elf/dl-support.c
135+++ b/elf/dl-support.c
136@@ -219,6 +219,13 @@
137 list of loaded objects while an object is added to or removed from
138 that list. */
139 __rtld_lock_define_initialized_recursive (, _dl_load_write_lock)
140+/* This lock protects global and module specific TLS related data.
141+ E.g. it is held in dlopen and dlclose when GL(dl_tls_generation),
142+ GL(dl_tls_max_dtv_idx) or GL(dl_tls_dtv_slotinfo_list) are
143+ accessed and when TLS related relocations are processed for a
144+ module. It was introduced to keep pthread_create accessing TLS
145+ state that is being set up. */
146+__rtld_lock_define_initialized_recursive (, _dl_load_tls_lock)
147
148
149 #ifdef HAVE_AUX_VECTOR
150diff --git a/elf/dl-tls.c b/elf/dl-tls.c
151index d554ae4497..9260d2d696 100644
152--- a/elf/dl-tls.c
153+++ b/elf/dl-tls.c
154@@ -443,7 +443,7 @@
155 size_t maxgen = 0;
156
157 /* Protects global dynamic TLS related state. */
158- __rtld_lock_lock_recursive (GL(dl_load_lock));
159+ __rtld_lock_lock_recursive (GL(dl_load_tls_lock));
160
161 /* Check if the current dtv is big enough. */
162 if (dtv[-1].counter < GL(dl_tls_max_dtv_idx))
163@@ -517,7 +517,7 @@
164 listp = listp->next;
165 assert (listp != NULL);
166 }
167- __rtld_lock_unlock_recursive (GL(dl_load_lock));
168+ __rtld_lock_unlock_recursive (GL(dl_load_tls_lock));
169
170 /* The DTV version is up-to-date now. */
171 dtv[0].counter = maxgen;
172@@ -656,7 +656,7 @@
173
174 Here the dtv needs to be updated to new_gen generation count.
175
176- This code may be called during TLS access when GL(dl_load_lock)
177+ This code may be called during TLS access when GL(dl_load_tls_lock)
178 is not held. In that case the user code has to synchronize with
179 dlopen and dlclose calls of relevant modules. A module m is
180 relevant if the generation of m <= new_gen and dlclose of m is
181@@ -778,11 +778,11 @@
182 if (__glibc_unlikely (the_map->l_tls_offset
183 != FORCED_DYNAMIC_TLS_OFFSET))
184 {
185- __rtld_lock_lock_recursive (GL(dl_load_lock));
186+ __rtld_lock_lock_recursive (GL(dl_load_tls_lock));
187 if (__glibc_likely (the_map->l_tls_offset == NO_TLS_OFFSET))
188 {
189 the_map->l_tls_offset = FORCED_DYNAMIC_TLS_OFFSET;
190- __rtld_lock_unlock_recursive (GL(dl_load_lock));
191+ __rtld_lock_unlock_recursive (GL(dl_load_tls_lock));
192 }
193 else if (__glibc_likely (the_map->l_tls_offset
194 != FORCED_DYNAMIC_TLS_OFFSET))
195@@ -794,7 +794,7 @@
196 #else
197 # error "Either TLS_TCB_AT_TP or TLS_DTV_AT_TP must be defined"
198 #endif
199- __rtld_lock_unlock_recursive (GL(dl_load_lock));
200+ __rtld_lock_unlock_recursive (GL(dl_load_tls_lock));
201
202 dtv[GET_ADDR_MODULE].pointer.to_free = NULL;
203 dtv[GET_ADDR_MODULE].pointer.val = p;
204@@ -802,7 +802,7 @@
205 return (char *) p + GET_ADDR_OFFSET;
206 }
207 else
208- __rtld_lock_unlock_recursive (GL(dl_load_lock));
209+ __rtld_lock_unlock_recursive (GL(dl_load_tls_lock));
210 }
211 struct dtv_pointer result = allocate_and_init (the_map);
212 dtv[GET_ADDR_MODULE].pointer = result;
213@@ -873,7 +873,7 @@
214 return NULL;
215
216 dtv_t *dtv = THREAD_DTV ();
217- /* This may be called without holding the GL(dl_load_lock). Reading
218+ /* This may be called without holding the GL(dl_load_tls_lock). Reading
219 arbitrary gen value is fine since this is best effort code. */
220 size_t gen = atomic_load_relaxed (&GL(dl_tls_generation));
221 if (__glibc_unlikely (dtv[0].counter != gen))
222diff --git a/elf/rtld.c b/elf/rtld.c
223index 8d2bba3d43..9642eb9c92 100644
224--- a/elf/rtld.c
225+++ b/elf/rtld.c
226@@ -283,6 +283,7 @@
227 #ifdef _LIBC_REENTRANT
228 ._dl_load_lock = _RTLD_LOCK_RECURSIVE_INITIALIZER,
229 ._dl_load_write_lock = _RTLD_LOCK_RECURSIVE_INITIALIZER,
230+ ._dl_load_tls_lock = _RTLD_LOCK_RECURSIVE_INITIALIZER,
231 #endif
232 ._dl_nns = 1,
233 ._dl_ns =
234diff --git a/sysdeps/nptl/fork.c b/sysdeps/nptl/fork.c
235index c471f7b15f..021691b9b7 100644
236--- a/sysdeps/nptl/fork.c
237+++ b/sysdeps/nptl/fork.c
238@@ -125,6 +125,9 @@
239 /* Reset the lock the dynamic loader uses to protect its data. */
240 __rtld_lock_initialize (GL(dl_load_lock));
241
242+ /* Reset the lock protecting dynamic TLS related data. */
243+ __rtld_lock_initialize (GL(dl_load_tls_lock));
244+
245 /* Run the handlers registered for the child. */
246 __run_fork_handlers (atfork_run_child, multiple_threads);
247 }
248diff --git a/sysdeps/generic/ldsodefs.h b/sysdeps/generic/ldsodefs.h
249index d49529da0d..9ec1511bb0 100644
250--- a/sysdeps/generic/ldsodefs.h
251+++ b/sysdeps/generic/ldsodefs.h
252@@ -369,6 +369,13 @@
253 list of loaded objects while an object is added to or removed
254 from that list. */
255 __rtld_lock_define_recursive (EXTERN, _dl_load_write_lock)
256+ /* This lock protects global and module specific TLS related data.
257+ E.g. it is held in dlopen and dlclose when GL(dl_tls_generation),
258+ GL(dl_tls_max_dtv_idx) or GL(dl_tls_dtv_slotinfo_list) are
259+ accessed and when TLS related relocations are processed for a
260+ module. It was introduced to keep pthread_create accessing TLS
261+ state that is being set up. */
262+ __rtld_lock_define_recursive (EXTERN, _dl_load_tls_lock)
263
264 /* Incremented whenever something may have been added to dl_loaded. */
265 EXTERN unsigned long long _dl_load_adds;
266@@ -1153,7 +1160,7 @@
267
268 /* Add module to slot information data. If DO_ADD is false, only the
269 required memory is allocated. Must be called with GL
270- (dl_load_lock) acquired. If the function has already been called
271+ (dl_load_tls_lock) acquired. If the function has already been called
272 for the link map L with !do_add, then this function will not raise
273 an exception, otherwise it is possible that it encounters a memory
274 allocation failure. */
275--
2762.27.0