summaryrefslogtreecommitdiffstats
path: root/meta/recipes-core/glibc/glibc/0032-elf-Use-relaxed-atomics-for-racy-accesses-BZ-19329.patch
diff options
context:
space:
mode:
Diffstat (limited to 'meta/recipes-core/glibc/glibc/0032-elf-Use-relaxed-atomics-for-racy-accesses-BZ-19329.patch')
-rw-r--r--meta/recipes-core/glibc/glibc/0032-elf-Use-relaxed-atomics-for-racy-accesses-BZ-19329.patch206
1 files changed, 206 insertions, 0 deletions
diff --git a/meta/recipes-core/glibc/glibc/0032-elf-Use-relaxed-atomics-for-racy-accesses-BZ-19329.patch b/meta/recipes-core/glibc/glibc/0032-elf-Use-relaxed-atomics-for-racy-accesses-BZ-19329.patch
new file mode 100644
index 0000000000..eb8ef3161c
--- /dev/null
+++ b/meta/recipes-core/glibc/glibc/0032-elf-Use-relaxed-atomics-for-racy-accesses-BZ-19329.patch
@@ -0,0 +1,206 @@
1From f4f8f4d4e0f92488431b268c8cd9555730b9afe9 Mon Sep 17 00:00:00 2001
2From: Szabolcs Nagy <szabolcs.nagy@arm.com>
3Date: Wed, 30 Dec 2020 19:19:37 +0000
4Subject: [PATCH] elf: Use relaxed atomics for racy accesses [BZ #19329]
5
6This is a follow up patch to the fix for bug 19329. This adds relaxed
7MO atomics to accesses that were previously data races but are now
8race conditions, and where relaxed MO is sufficient.
9
10The race conditions all follow the pattern that the write is behind the
11dlopen lock, but a read can happen concurrently (e.g. during tls access)
12without holding the lock. For slotinfo entries the read value only
13matters if it reads from a synchronized write in dlopen or dlclose,
14otherwise the related dtv entry is not valid to access so it is fine
15to leave it in an inconsistent state. The same applies for
16GL(dl_tls_max_dtv_idx) and GL(dl_tls_generation), but there the
17algorithm relies on the fact that the read of the last synchronized
18write is an increasing value.
19
20Reviewed-by: Adhemerval Zanella <adhemerval.zanella@linaro.org>
21---
22 elf/dl-close.c | 20 +++++++++++++-------
23 elf/dl-open.c | 5 ++++-
24 elf/dl-tls.c | 31 +++++++++++++++++++++++--------
25 sysdeps/x86_64/dl-tls.c | 3 ++-
26 4 files changed, 42 insertions(+), 17 deletions(-)
27---
28Upstream-Status: Backport [https://sourceware.org/git/?p=glibc.git;a=patch;h=f4f8f4d4e0f92488431b268c8cd9555730b9afe9]
29Comment: Hunks from elf/dl-open.c and elf/dl-tls.c are refreshed due to offset change.
30Signed-off-by: Akash Hadke <akash.hadke@kpit.com>
31Signed-off-by: Akash Hadke <hadkeakash4@gmail.com>
32---
33diff --git a/elf/dl-close.c b/elf/dl-close.c
34index c51becd06b..3720e47dd1 100644
35--- a/elf/dl-close.c
36+++ b/elf/dl-close.c
37@@ -79,9 +79,10 @@ remove_slotinfo (size_t idx, struct dtv_slotinfo_list *listp, size_t disp,
38 {
39 assert (old_map->l_tls_modid == idx);
40
41- /* Mark the entry as unused. */
42- listp->slotinfo[idx - disp].gen = GL(dl_tls_generation) + 1;
43- listp->slotinfo[idx - disp].map = NULL;
44+ /* Mark the entry as unused. These can be read concurrently. */
45+ atomic_store_relaxed (&listp->slotinfo[idx - disp].gen,
46+ GL(dl_tls_generation) + 1);
47+ atomic_store_relaxed (&listp->slotinfo[idx - disp].map, NULL);
48 }
49
50 /* If this is not the last currently used entry no need to look
51@@ -96,8 +97,8 @@ remove_slotinfo (size_t idx, struct dtv_slotinfo_list *listp, size_t disp,
52
53 if (listp->slotinfo[idx - disp].map != NULL)
54 {
55- /* Found a new last used index. */
56- GL(dl_tls_max_dtv_idx) = idx;
57+ /* Found a new last used index. This can be read concurrently. */
58+ atomic_store_relaxed (&GL(dl_tls_max_dtv_idx), idx);
59 return true;
60 }
61 }
62@@ -571,7 +572,9 @@ _dl_close_worker (struct link_map *map, bool force)
63 GL(dl_tls_dtv_slotinfo_list), 0,
64 imap->l_init_called))
65 /* All dynamically loaded modules with TLS are unloaded. */
66- GL(dl_tls_max_dtv_idx) = GL(dl_tls_static_nelem);
67+ /* Can be read concurrently. */
68+ atomic_store_relaxed (&GL(dl_tls_max_dtv_idx),
69+ GL(dl_tls_static_nelem));
70
71 if (imap->l_tls_offset != NO_TLS_OFFSET
72 && imap->l_tls_offset != FORCED_DYNAMIC_TLS_OFFSET)
73@@ -769,8 +772,11 @@ _dl_close_worker (struct link_map *map, bool force)
74 /* If we removed any object which uses TLS bump the generation counter. */
75 if (any_tls)
76 {
77- if (__glibc_unlikely (++GL(dl_tls_generation) == 0))
78+ size_t newgen = GL(dl_tls_generation) + 1;
79+ if (__glibc_unlikely (newgen == 0))
80 _dl_fatal_printf ("TLS generation counter wrapped! Please report as described in "REPORT_BUGS_TO".\n");
81+ /* Can be read concurrently. */
82+ atomic_store_relaxed (&GL(dl_tls_generation), newgen);
83
84 if (tls_free_end == GL(dl_tls_static_used))
85 GL(dl_tls_static_used) = tls_free_start;
86diff --git a/elf/dl-open.c b/elf/dl-open.c
87index 09f0df7d38..bb79ef00f1 100644
88--- a/elf/dl-open.c
89+++ b/elf/dl-open.c
90@@ -387,9 +387,12 @@
91 }
92 }
93
94- if (__builtin_expect (++GL(dl_tls_generation) == 0, 0))
95+ size_t newgen = GL(dl_tls_generation) + 1;
96+ if (__glibc_unlikely (newgen == 0))
97 _dl_fatal_printf (N_("\
98 TLS generation counter wrapped! Please report this."));
99+ /* Can be read concurrently. */
100+ atomic_store_relaxed (&GL(dl_tls_generation), newgen);
101
102 /* We need a second pass for static tls data, because
103 _dl_update_slotinfo must not be run while calls to
104diff --git a/elf/dl-tls.c b/elf/dl-tls.c
105index 94f3cdbae0..dc69cd984e 100644
106--- a/elf/dl-tls.c
107+++ b/elf/dl-tls.c
108@@ -96,7 +96,9 @@
109 /* No gaps, allocate a new entry. */
110 nogaps:
111
112- result = ++GL(dl_tls_max_dtv_idx);
113+ result = GL(dl_tls_max_dtv_idx) + 1;
114+ /* Can be read concurrently. */
115+ atomic_store_relaxed (&GL(dl_tls_max_dtv_idx), result);
116 }
117
118 return result;
119@@ -279,10 +281,12 @@
120 dtv_t *dtv;
121 size_t dtv_length;
122
123+ /* Relaxed MO, because the dtv size is later rechecked, not relied on. */
124+ size_t max_modid = atomic_load_relaxed (&GL(dl_tls_max_dtv_idx));
125 /* We allocate a few more elements in the dtv than are needed for the
126 initial set of modules. This should avoid in most cases expansions
127 of the dtv. */
128- dtv_length = GL(dl_tls_max_dtv_idx) + DTV_SURPLUS;
129+ dtv_length = max_modid + DTV_SURPLUS;
130 dtv = calloc (dtv_length + 2, sizeof (dtv_t));
131 if (dtv != NULL)
132 {
133@@ -687,7 +691,7 @@
134 if (modid > max_modid)
135 break;
136
137- size_t gen = listp->slotinfo[cnt].gen;
138+ size_t gen = atomic_load_relaxed (&listp->slotinfo[cnt].gen);
139
140 if (gen > new_gen)
141 /* Not relevant. */
142@@ -699,7 +703,8 @@
143 continue;
144
145 /* If there is no map this means the entry is empty. */
146- struct link_map *map = listp->slotinfo[cnt].map;
147+ struct link_map *map
148+ = atomic_load_relaxed (&listp->slotinfo[cnt].map);
149 /* Check whether the current dtv array is large enough. */
150 if (dtv[-1].counter < modid)
151 {
152@@ -843,7 +848,12 @@
153 {
154 dtv_t *dtv = THREAD_DTV ();
155
156- if (__glibc_unlikely (dtv[0].counter != GL(dl_tls_generation)))
157+ /* Update is needed if dtv[0].counter < the generation of the accessed
158+ module. The global generation counter is used here as it is easier
159+ to check. Synchronization for the relaxed MO access is guaranteed
160+ by user code, see CONCURRENCY NOTES in _dl_update_slotinfo. */
161+ size_t gen = atomic_load_relaxed (&GL(dl_tls_generation));
162+ if (__glibc_unlikely (dtv[0].counter != gen))
163 return update_get_addr (GET_ADDR_PARAM);
164
165 void *p = dtv[GET_ADDR_MODULE].pointer.val;
166@@ -866,7 +876,10 @@
167 return NULL;
168
169 dtv_t *dtv = THREAD_DTV ();
170- if (__glibc_unlikely (dtv[0].counter != GL(dl_tls_generation)))
171+ /* This may be called without holding the GL(dl_load_lock). Reading
172+ arbitrary gen value is fine since this is best effort code. */
173+ size_t gen = atomic_load_relaxed (&GL(dl_tls_generation));
174+ if (__glibc_unlikely (dtv[0].counter != gen))
175 {
176 /* This thread's DTV is not completely current,
177 but it might already cover this module. */
178@@ -961,7 +974,9 @@
179 /* Add the information into the slotinfo data structure. */
180 if (do_add)
181 {
182- listp->slotinfo[idx].map = l;
183- listp->slotinfo[idx].gen = GL(dl_tls_generation) + 1;
184+ /* Can be read concurrently. See _dl_update_slotinfo. */
185+ atomic_store_relaxed (&listp->slotinfo[idx].map, l);
186+ atomic_store_relaxed (&listp->slotinfo[idx].gen,
187+ GL(dl_tls_generation) + 1);
188 }
189 }
190
191diff --git a/sysdeps/x86_64/dl-tls.c b/sysdeps/x86_64/dl-tls.c
192index 6595f6615b..24ef560b71 100644
193--- a/sysdeps/x86_64/dl-tls.c
194+++ b/sysdeps/x86_64/dl-tls.c
195@@ -40,7 +40,8 @@ __tls_get_addr_slow (GET_ADDR_ARGS)
196 {
197 dtv_t *dtv = THREAD_DTV ();
198
199- if (__glibc_unlikely (dtv[0].counter != GL(dl_tls_generation)))
200+ size_t gen = atomic_load_relaxed (&GL(dl_tls_generation));
201+ if (__glibc_unlikely (dtv[0].counter != gen))
202 return update_get_addr (GET_ADDR_PARAM);
203
204 return tls_get_addr_tail (GET_ADDR_PARAM, dtv, NULL);
205--
2062.27.0