summaryrefslogtreecommitdiffstats
path: root/meta/packages/uclibc/uclibc-0.9.30.1/linuxthreads-changes.patch
diff options
context:
space:
mode:
Diffstat (limited to 'meta/packages/uclibc/uclibc-0.9.30.1/linuxthreads-changes.patch')
-rw-r--r--meta/packages/uclibc/uclibc-0.9.30.1/linuxthreads-changes.patch291
1 files changed, 291 insertions, 0 deletions
diff --git a/meta/packages/uclibc/uclibc-0.9.30.1/linuxthreads-changes.patch b/meta/packages/uclibc/uclibc-0.9.30.1/linuxthreads-changes.patch
new file mode 100644
index 0000000000..f6f32cdd29
--- /dev/null
+++ b/meta/packages/uclibc/uclibc-0.9.30.1/linuxthreads-changes.patch
@@ -0,0 +1,291 @@
1Index: uClibc/libpthread/linuxthreads/descr.h
2===================================================================
3--- uClibc.orig/libpthread/linuxthreads/descr.h 2008-08-27 23:59:46.171809044 +0200
4+++ uClibc/libpthread/linuxthreads/descr.h 2008-08-28 00:00:35.435134759 +0200
5@@ -123,9 +123,9 @@
6 union dtv *dtvp;
7 pthread_descr self; /* Pointer to this structure */
8 int multiple_threads;
9-# ifdef NEED_DL_SYSINFO
10 uintptr_t sysinfo;
11-# endif
12+ uintptr_t stack_guard;
13+ uintptr_t pointer_guard;
14 } data;
15 void *__padding[16];
16 } p_header;
17@@ -193,6 +193,13 @@
18 size_t p_alloca_cutoff; /* Maximum size which should be allocated
19 using alloca() instead of malloc(). */
20 /* New elements must be added at the end. */
21+
22+ /* This member must be last. */
23+ char end_padding[];
24+
25+#define PTHREAD_STRUCT_END_PADDING \
26+ (sizeof (struct _pthread_descr_struct) \
27+ - offsetof (struct _pthread_descr_struct, end_padding))
28 } __attribute__ ((aligned(32))); /* We need to align the structure so that
29 doubles are aligned properly. This is 8
30 bytes on MIPS and 16 bytes on MIPS64.
31Index: uClibc/libpthread/linuxthreads/manager.c
32===================================================================
33--- uClibc.orig/libpthread/linuxthreads/manager.c 2008-08-27 23:59:54.185140485 +0200
34+++ uClibc/libpthread/linuxthreads/manager.c 2008-08-28 00:00:35.435134759 +0200
35@@ -679,6 +679,17 @@
36 new_thread->p_inheritsched = attr ? attr->__inheritsched : 0;
37 new_thread->p_alloca_cutoff = stksize / 4 > __MAX_ALLOCA_CUTOFF
38 ? __MAX_ALLOCA_CUTOFF : stksize / 4;
39+
40+ /* Copy the stack guard canary. */
41+#ifdef THREAD_COPY_STACK_GUARD
42+ THREAD_COPY_STACK_GUARD (new_thread);
43+#endif
44+
45+ /* Copy the pointer guard value. */
46+#ifdef THREAD_COPY_POINTER_GUARD
47+ THREAD_COPY_POINTER_GUARD (new_thread);
48+#endif
49+
50 /* Initialize the thread handle */
51 __pthread_init_lock(&__pthread_handles[sseg].h_lock);
52 __pthread_handles[sseg].h_descr = new_thread;
53@@ -742,15 +753,15 @@
54 pid = __clone2(pthread_start_thread_event,
55 (void **)new_thread_bottom,
56 (char *)stack_addr - new_thread_bottom,
57- CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND |
58+ CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND | CLONE_SYSVSEM |
59 __pthread_sig_cancel, new_thread);
60 #elif _STACK_GROWS_UP
61 pid = __clone(pthread_start_thread_event, (void *) new_thread_bottom,
62- CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND |
63+ CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND | CLONE_SYSVSEM |
64 __pthread_sig_cancel, new_thread);
65 #else
66 pid = __clone(pthread_start_thread_event, stack_addr,
67- CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND |
68+ CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND | CLONE_SYSVSEM |
69 __pthread_sig_cancel, new_thread);
70 #endif
71 saved_errno = errno;
72@@ -783,15 +794,15 @@
73 pid = __clone2(pthread_start_thread,
74 (void **)new_thread_bottom,
75 (char *)stack_addr - new_thread_bottom,
76- CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND |
77+ CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND | CLONE_SYSVSEM |
78 __pthread_sig_cancel, new_thread);
79 #elif _STACK_GROWS_UP
80 pid = __clone(pthread_start_thread, (void *) new_thread_bottom,
81- CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND |
82+ CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND | CLONE_SYSVSEM |
83 __pthread_sig_cancel, new_thread);
84 #else
85 pid = __clone(pthread_start_thread, stack_addr,
86- CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND |
87+ CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND | CLONE_SYSVSEM |
88 __pthread_sig_cancel, new_thread);
89 #endif /* !NEED_SEPARATE_REGISTER_STACK */
90 saved_errno = errno;
91@@ -892,10 +903,11 @@
92 #ifdef _STACK_GROWS_UP
93 # ifdef USE_TLS
94 size_t stacksize = guardaddr - th->p_stackaddr;
95+ guardaddr = th->p_stackaddr;
96 # else
97 size_t stacksize = guardaddr - (char *)th;
98-# endif
99 guardaddr = (char *)th;
100+# endif
101 #else
102 /* Guardaddr is always set, even if guardsize is 0. This allows
103 us to compute everything else. */
104Index: uClibc/libpthread/linuxthreads/pthread.c
105===================================================================
106--- uClibc.orig/libpthread/linuxthreads/pthread.c 2008-08-28 00:00:00.825141935 +0200
107+++ uClibc/libpthread/linuxthreads/pthread.c 2008-08-28 00:00:35.438472147 +0200
108@@ -698,6 +698,16 @@
109 mgr = &__pthread_manager_thread;
110 #endif
111
112+ /* Copy the stack guard canary. */
113+#ifdef THREAD_COPY_STACK_GUARD
114+ THREAD_COPY_STACK_GUARD (mgr);
115+#endif
116+
117+ /* Copy the pointer guard value. */
118+#ifdef THREAD_COPY_POINTER_GUARD
119+ THREAD_COPY_POINTER_GUARD (mgr);
120+#endif
121+
122 __pthread_manager_request = manager_pipe[1]; /* writing end */
123 __pthread_manager_reader = manager_pipe[0]; /* reading end */
124
125@@ -738,17 +748,17 @@
126 pid = __clone2(__pthread_manager_event,
127 (void **) __pthread_manager_thread_bos,
128 THREAD_MANAGER_STACK_SIZE,
129- CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND,
130+ CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND | CLONE_SYSVSEM,
131 mgr);
132 #elif _STACK_GROWS_UP
133 pid = __clone(__pthread_manager_event,
134 (void **) __pthread_manager_thread_bos,
135- CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND,
136+ CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND | CLONE_SYSVSEM,
137 mgr);
138 #else
139 pid = __clone(__pthread_manager_event,
140 (void **) __pthread_manager_thread_tos,
141- CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND,
142+ CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND | CLONE_SYSVSEM,
143 mgr);
144 #endif
145
146@@ -778,13 +788,13 @@
147 #ifdef NEED_SEPARATE_REGISTER_STACK
148 pid = __clone2(__pthread_manager, (void **) __pthread_manager_thread_bos,
149 THREAD_MANAGER_STACK_SIZE,
150- CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND, mgr);
151+ CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND | CLONE_SYSVSEM, mgr);
152 #elif _STACK_GROWS_UP
153 pid = __clone(__pthread_manager, (void **) __pthread_manager_thread_bos,
154- CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND, mgr);
155+ CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND | CLONE_SYSVSEM, mgr);
156 #else
157 pid = __clone(__pthread_manager, (void **) __pthread_manager_thread_tos,
158- CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND, mgr);
159+ CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND | CLONE_SYSVSEM, mgr);
160 #endif
161 }
162 if (__builtin_expect (pid, 0) == -1) {
163@@ -971,6 +981,10 @@
164 struct pthread_request request;
165 pthread_descr self = thread_self();
166
167+ /* Make sure we come back here after suspend(), in case we entered
168+ from a signal handler. */
169+ THREAD_SETMEM(self, p_signal_jmp, NULL);
170+
171 request.req_thread = self;
172 request.req_kind = REQ_PROCESS_EXIT;
173 request.req_args.exit.code = retcode;
174@@ -1198,13 +1212,13 @@
175
176 void __pthread_restart_old(pthread_descr th)
177 {
178- if (atomic_increment(&th->p_resume_count) == -1)
179+ if (pthread_atomic_increment(&th->p_resume_count) == -1)
180 kill(th->p_pid, __pthread_sig_restart);
181 }
182
183 void __pthread_suspend_old(pthread_descr self)
184 {
185- if (atomic_decrement(&self->p_resume_count) <= 0)
186+ if (pthread_atomic_decrement(&self->p_resume_count) <= 0)
187 __pthread_wait_for_restart_signal(self);
188 }
189
190@@ -1215,7 +1229,7 @@
191 int was_signalled = 0;
192 sigjmp_buf jmpbuf;
193
194- if (atomic_decrement(&self->p_resume_count) == 0) {
195+ if (pthread_atomic_decrement(&self->p_resume_count) == 0) {
196 /* Set up a longjmp handler for the restart signal, unblock
197 the signal and sleep. */
198
199@@ -1272,9 +1286,9 @@
200 being delivered. */
201
202 if (!was_signalled) {
203- if (atomic_increment(&self->p_resume_count) != -1) {
204+ if (pthread_atomic_increment(&self->p_resume_count) != -1) {
205 __pthread_wait_for_restart_signal(self);
206- atomic_decrement(&self->p_resume_count); /* should be zero now! */
207+ pthread_atomic_decrement(&self->p_resume_count); /* should be zero now! */
208 /* woke spontaneously and consumed restart signal */
209 return 1;
210 }
211Index: uClibc/libpthread/linuxthreads/specific.c
212===================================================================
213--- uClibc.orig/libpthread/linuxthreads/specific.c 2008-08-28 00:00:07.595139286 +0200
214+++ uClibc/libpthread/linuxthreads/specific.c 2008-08-28 00:00:35.438472147 +0200
215@@ -104,13 +104,14 @@
216 that if the key is reallocated later by pthread_key_create, its
217 associated values will be NULL in all threads.
218
219- If no threads have been created yet, clear it just in the
220- current thread. */
221+ If no threads have been created yet, or if we are exiting, clear
222+ it just in the current thread. */
223
224 struct pthread_key_delete_helper_args args;
225 args.idx1st = key / PTHREAD_KEY_2NDLEVEL_SIZE;
226 args.idx2nd = key % PTHREAD_KEY_2NDLEVEL_SIZE;
227- if (__pthread_manager_request != -1)
228+ if (__pthread_manager_request != -1
229+ && !(__builtin_expect (__pthread_exit_requested, 0)))
230 {
231 struct pthread_request request;
232
233@@ -203,8 +204,9 @@
234 __pthread_lock(THREAD_GETMEM(self, p_lock), self);
235 for (i = 0; i < PTHREAD_KEY_1STLEVEL_SIZE; i++) {
236 if (THREAD_GETMEM_NC(self, p_specific[i]) != NULL) {
237- free(THREAD_GETMEM_NC(self, p_specific[i]));
238+ void *p = THREAD_GETMEM_NC(self, p_specific[i]);
239 THREAD_SETMEM_NC(self, p_specific[i], NULL);
240+ free(p);
241 }
242 }
243 __pthread_unlock(THREAD_GETMEM(self, p_lock));
244Index: uClibc/libpthread/linuxthreads/spinlock.c
245===================================================================
246--- uClibc.orig/libpthread/linuxthreads/spinlock.c 2008-08-28 00:00:17.805140454 +0200
247+++ uClibc/libpthread/linuxthreads/spinlock.c 2008-08-28 00:00:35.438472147 +0200
248@@ -637,8 +637,20 @@
249 #if defined HAS_COMPARE_AND_SWAP
250 wait_node_dequeue(pp_head, pp_max_prio, p_max_prio);
251 #endif
252+
253+ /* Release the spinlock before restarting. */
254+#if defined TEST_FOR_COMPARE_AND_SWAP
255+ if (!__pthread_has_cas)
256+#endif
257+#if !defined HAS_COMPARE_AND_SWAP || defined TEST_FOR_COMPARE_AND_SWAP
258+ {
259+ __pthread_release(&lock->__spinlock);
260+ }
261+#endif
262+
263 restart(p_max_prio->thr);
264- break;
265+
266+ return;
267 }
268 }
269
270Index: uClibc/libpthread/linuxthreads/spinlock.h
271===================================================================
272--- uClibc.orig/libpthread/linuxthreads/spinlock.h 2008-08-28 00:00:24.768471655 +0200
273+++ uClibc/libpthread/linuxthreads/spinlock.h 2008-08-28 00:02:42.971786951 +0200
274@@ -172,7 +172,7 @@
275
276 /* Operations on pthread_atomic, which is defined in internals.h */
277
278-static __inline__ long atomic_increment(struct pthread_atomic *pa)
279+static __inline__ long pthread_atomic_increment(struct pthread_atomic *pa)
280 {
281 long oldval;
282
283@@ -184,7 +184,7 @@
284 }
285
286
287-static __inline__ long atomic_decrement(struct pthread_atomic *pa)
288+static __inline__ long pthread_atomic_decrement(struct pthread_atomic *pa)
289 {
290 long oldval;
291