summaryrefslogtreecommitdiffstats
path: root/meta/packages/qemu/qemu-0.9.1+svn/qemu-0.9.0-nptl.patch
diff options
context:
space:
mode:
authorRichard Purdie <rpurdie@linux.intel.com>2009-01-05 15:56:31 +0000
committerRichard Purdie <rpurdie@linux.intel.com>2009-01-05 15:56:31 +0000
commit55be33fd92859684db70a605c33b2c99a2c1a0f3 (patch)
tree45d9b9fa480d28571cb2780a1fe88b7b0f43e3d2 /meta/packages/qemu/qemu-0.9.1+svn/qemu-0.9.0-nptl.patch
parent5273e39edfeef73d0866276b447160cd7b1e9ecc (diff)
downloadpoky-55be33fd92859684db70a605c33b2c99a2c1a0f3.tar.gz
qemu: Upgrade to latest svn removing a ton of merged patches (yay)
Diffstat (limited to 'meta/packages/qemu/qemu-0.9.1+svn/qemu-0.9.0-nptl.patch')
-rw-r--r--meta/packages/qemu/qemu-0.9.1+svn/qemu-0.9.0-nptl.patch1100
1 files changed, 0 insertions, 1100 deletions
diff --git a/meta/packages/qemu/qemu-0.9.1+svn/qemu-0.9.0-nptl.patch b/meta/packages/qemu/qemu-0.9.1+svn/qemu-0.9.0-nptl.patch
deleted file mode 100644
index ac68ebf460..0000000000
--- a/meta/packages/qemu/qemu-0.9.1+svn/qemu-0.9.0-nptl.patch
+++ /dev/null
@@ -1,1100 +0,0 @@
1---
2 configure | 25 ++++++
3 exec-all.h | 165 ------------------------------------------
4 linux-user/arm/syscall.h | 4 -
5 linux-user/main.c | 94 +++++++++++++++++++++---
6 linux-user/qemu.h | 3
7 linux-user/syscall.c | 91 ++++++++++++++++++++++-
8 qemu_spinlock.h | 181 +++++++++++++++++++++++++++++++++++++++++++++++
9 target-arm/cpu.h | 10 ++
10 target-arm/op.c | 6 +
11 target-arm/translate.c | 9 ++
12 10 files changed, 405 insertions(+), 183 deletions(-)
13
14Index: trunk/configure
15===================================================================
16--- trunk.orig/configure 2008-04-24 20:16:52.000000000 +0100
17+++ trunk/configure 2008-04-24 20:16:53.000000000 +0100
18@@ -112,6 +112,7 @@
19 build_docs="no"
20 uname_release=""
21 curses="yes"
22+nptl="yes"
23
24 # OS specific
25 targetos=`uname -s`
26@@ -339,6 +340,8 @@
27 ;;
28 *) echo "ERROR: unknown option $opt"; show_help="yes"
29 ;;
30+ --disable-nptl) nptl="no"
31+ ;;
32 esac
33 done
34
35@@ -436,6 +439,7 @@
36 echo " --disable-linux-user disable all linux usermode emulation targets"
37 echo " --enable-darwin-user enable all darwin usermode emulation targets"
38 echo " --disable-darwin-user disable all darwin usermode emulation targets"
39+echo " --disable-nptl disable usermode NPTL guest support"
40 echo " --fmod-lib path to FMOD library"
41 echo " --fmod-inc path to FMOD includes"
42 echo " --enable-uname-release=R Return R for uname -r in usermode emulation"
43@@ -647,6 +651,23 @@
44 }
45 EOF
46
47+# check NPTL support
48+cat > $TMPC <<EOF
49+#include <sched.h>
50+void foo()
51+{
52+#ifndef CLONE_SETTLS
53+#error bork
54+#endif
55+}
56+EOF
57+
58+if $cc -c -o $TMPO $TMPC 2> /dev/null ; then
59+ :
60+else
61+ nptl="no"
62+fi
63+
64 ##########################################
65 # SDL probe
66
67@@ -845,6 +866,7 @@
68 echo "Documentation $build_docs"
69 [ ! -z "$uname_release" ] && \
70 echo "uname -r $uname_release"
71+echo "NPTL support $nptl"
72
73 if test $sdl_too_old = "yes"; then
74 echo "-> Your SDL version is too old - please upgrade to have SDL support"
75@@ -1228,6 +1250,9 @@
76 echo "#define TARGET_ARM 1" >> $config_h
77 echo "#define CONFIG_NO_DYNGEN_OP 1" >> $config_h
78 bflt="yes"
79+ if test "$nptl" = "yes" ; then
80+ echo "#define USE_NPTL 1" >> $config_h
81+ fi
82 ;;
83 cris)
84 echo "TARGET_ARCH=cris" >> $config_mak
85Index: trunk/exec-all.h
86===================================================================
87--- trunk.orig/exec-all.h 2008-04-24 20:16:41.000000000 +0100
88+++ trunk/exec-all.h 2008-04-24 20:16:53.000000000 +0100
89@@ -303,217 +303,7 @@
90 extern CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
91 extern void *io_mem_opaque[IO_MEM_NB_ENTRIES];
92
93-#if defined(__hppa__)
94-
95-typedef int spinlock_t[4];
96-
97-#define SPIN_LOCK_UNLOCKED { 1, 1, 1, 1 }
98-
99-static inline void resetlock (spinlock_t *p)
100-{
101- (*p)[0] = (*p)[1] = (*p)[2] = (*p)[3] = 1;
102-}
103-
104-#else
105-
106-typedef int spinlock_t;
107-
108-#define SPIN_LOCK_UNLOCKED 0
109-
110-static inline void resetlock (spinlock_t *p)
111-{
112- *p = SPIN_LOCK_UNLOCKED;
113-}
114-
115-#endif
116-
117-#if defined(__powerpc__)
118-static inline int testandset (int *p)
119-{
120- int ret;
121- __asm__ __volatile__ (
122- "0: lwarx %0,0,%1\n"
123- " xor. %0,%3,%0\n"
124- " bne 1f\n"
125- " stwcx. %2,0,%1\n"
126- " bne- 0b\n"
127- "1: "
128- : "=&r" (ret)
129- : "r" (p), "r" (1), "r" (0)
130- : "cr0", "memory");
131- return ret;
132-}
133-#elif defined(__i386__)
134-static inline int testandset (int *p)
135-{
136- long int readval = 0;
137-
138- __asm__ __volatile__ ("lock; cmpxchgl %2, %0"
139- : "+m" (*p), "+a" (readval)
140- : "r" (1)
141- : "cc");
142- return readval;
143-}
144-#elif defined(__x86_64__)
145-static inline int testandset (int *p)
146-{
147- long int readval = 0;
148-
149- __asm__ __volatile__ ("lock; cmpxchgl %2, %0"
150- : "+m" (*p), "+a" (readval)
151- : "r" (1)
152- : "cc");
153- return readval;
154-}
155-#elif defined(__s390__)
156-static inline int testandset (int *p)
157-{
158- int ret;
159-
160- __asm__ __volatile__ ("0: cs %0,%1,0(%2)\n"
161- " jl 0b"
162- : "=&d" (ret)
163- : "r" (1), "a" (p), "0" (*p)
164- : "cc", "memory" );
165- return ret;
166-}
167-#elif defined(__alpha__)
168-static inline int testandset (int *p)
169-{
170- int ret;
171- unsigned long one;
172-
173- __asm__ __volatile__ ("0: mov 1,%2\n"
174- " ldl_l %0,%1\n"
175- " stl_c %2,%1\n"
176- " beq %2,1f\n"
177- ".subsection 2\n"
178- "1: br 0b\n"
179- ".previous"
180- : "=r" (ret), "=m" (*p), "=r" (one)
181- : "m" (*p));
182- return ret;
183-}
184-#elif defined(__sparc__)
185-static inline int testandset (int *p)
186-{
187- int ret;
188-
189- __asm__ __volatile__("ldstub [%1], %0"
190- : "=r" (ret)
191- : "r" (p)
192- : "memory");
193-
194- return (ret ? 1 : 0);
195-}
196-#elif defined(__arm__)
197-static inline int testandset (int *spinlock)
198-{
199- register unsigned int ret;
200- __asm__ __volatile__("swp %0, %1, [%2]"
201- : "=r"(ret)
202- : "0"(1), "r"(spinlock));
203-
204- return ret;
205-}
206-#elif defined(__mc68000)
207-static inline int testandset (int *p)
208-{
209- char ret;
210- __asm__ __volatile__("tas %1; sne %0"
211- : "=r" (ret)
212- : "m" (p)
213- : "cc","memory");
214- return ret;
215-}
216-#elif defined(__hppa__)
217-
218-/* Because malloc only guarantees 8-byte alignment for malloc'd data,
219- and GCC only guarantees 8-byte alignment for stack locals, we can't
220- be assured of 16-byte alignment for atomic lock data even if we
221- specify "__attribute ((aligned(16)))" in the type declaration. So,
222- we use a struct containing an array of four ints for the atomic lock
223- type and dynamically select the 16-byte aligned int from the array
224- for the semaphore. */
225-#define __PA_LDCW_ALIGNMENT 16
226-static inline void *ldcw_align (void *p) {
227- unsigned long a = (unsigned long)p;
228- a = (a + __PA_LDCW_ALIGNMENT - 1) & ~(__PA_LDCW_ALIGNMENT - 1);
229- return (void *)a;
230-}
231-
232-static inline int testandset (spinlock_t *p)
233-{
234- unsigned int ret;
235- p = ldcw_align(p);
236- __asm__ __volatile__("ldcw 0(%1),%0"
237- : "=r" (ret)
238- : "r" (p)
239- : "memory" );
240- return !ret;
241-}
242-
243-#elif defined(__ia64)
244-
245-#include <ia64intrin.h>
246-
247-static inline int testandset (int *p)
248-{
249- return __sync_lock_test_and_set (p, 1);
250-}
251-#elif defined(__mips__)
252-static inline int testandset (int *p)
253-{
254- int ret;
255-
256- __asm__ __volatile__ (
257- " .set push \n"
258- " .set noat \n"
259- " .set mips2 \n"
260- "1: li $1, 1 \n"
261- " ll %0, %1 \n"
262- " sc $1, %1 \n"
263- " beqz $1, 1b \n"
264- " .set pop "
265- : "=r" (ret), "+R" (*p)
266- :
267- : "memory");
268-
269- return ret;
270-}
271-#else
272-#error unimplemented CPU support
273-#endif
274-
275-#if defined(CONFIG_USER_ONLY)
276-static inline void spin_lock(spinlock_t *lock)
277-{
278- while (testandset(lock));
279-}
280-
281-static inline void spin_unlock(spinlock_t *lock)
282-{
283- resetlock(lock);
284-}
285-
286-static inline int spin_trylock(spinlock_t *lock)
287-{
288- return !testandset(lock);
289-}
290-#else
291-static inline void spin_lock(spinlock_t *lock)
292-{
293-}
294-
295-static inline void spin_unlock(spinlock_t *lock)
296-{
297-}
298-
299-static inline int spin_trylock(spinlock_t *lock)
300-{
301- return 1;
302-}
303-#endif
304+#include "qemu_spinlock.h"
305
306 extern spinlock_t tb_lock;
307
308Index: trunk/linux-user/arm/syscall.h
309===================================================================
310--- trunk.orig/linux-user/arm/syscall.h 2008-04-24 20:16:41.000000000 +0100
311+++ trunk/linux-user/arm/syscall.h 2008-04-24 20:16:53.000000000 +0100
312@@ -28,7 +28,9 @@
313 #define ARM_SYSCALL_BASE 0x900000
314 #define ARM_THUMB_SYSCALL 0
315
316-#define ARM_NR_cacheflush (ARM_SYSCALL_BASE + 0xf0000 + 2)
317+#define ARM_NR_BASE 0xf0000
318+#define ARM_NR_cacheflush (ARM_NR_BASE + 2)
319+#define ARM_NR_set_tls (ARM_NR_BASE + 5)
320
321 #define ARM_NR_semihosting 0x123456
322 #define ARM_NR_thumb_semihosting 0xAB
323Index: trunk/linux-user/main.c
324===================================================================
325--- trunk.orig/linux-user/main.c 2008-04-24 20:16:47.000000000 +0100
326+++ trunk/linux-user/main.c 2008-04-24 20:17:38.000000000 +0100
327@@ -365,6 +365,50 @@
328 }
329 }
330
331+/* Handle a jump to the kernel code page. */
332+static int
333+do_kernel_trap(CPUARMState *env)
334+{
335+ uint32_t addr;
336+ uint32_t *ptr;
337+ uint32_t cpsr;
338+
339+ switch (env->regs[15]) {
340+ case 0xffff0fc0: /* __kernel_cmpxchg */
341+ /* XXX: This only works between threads, not between processes.
342+ Use native atomic operations. */
343+ /* ??? This probably breaks horribly if the access segfaults. */
344+ cpu_lock();
345+ ptr = (uint32_t *)env->regs[2];
346+ cpsr = cpsr_read(env);
347+ if (*ptr == env->regs[0]) {
348+ *ptr = env->regs[1];
349+ env->regs[0] = 0;
350+ cpsr |= CPSR_C;
351+ } else {
352+ env->regs[0] = -1;
353+ cpsr &= ~CPSR_C;
354+ }
355+ cpsr_write(env, cpsr, CPSR_C);
356+ cpu_unlock();
357+ break;
358+ case 0xffff0fe0: /* __kernel_get_tls */
359+ env->regs[0] = env->cp15.c13_tls2;
360+ break;
361+ default:
362+ return 1;
363+ }
364+ /* Jump back to the caller. */
365+ addr = env->regs[14];
366+ if (addr & 1) {
367+ env->thumb = 1;
368+ addr &= ~1;
369+ }
370+ env->regs[15] = addr;
371+
372+ return 0;
373+}
374+
375 void cpu_loop(CPUARMState *env)
376 {
377 int trapnr;
378@@ -475,10 +519,8 @@
379 }
380 }
381
382- if (n == ARM_NR_cacheflush) {
383- arm_cache_flush(env->regs[0], env->regs[1]);
384- } else if (n == ARM_NR_semihosting
385- || n == ARM_NR_thumb_semihosting) {
386+ if (n == ARM_NR_semihosting
387+ || n == ARM_NR_thumb_semihosting) {
388 env->regs[0] = do_arm_semihosting (env);
389 } else if (n == 0 || n >= ARM_SYSCALL_BASE
390 || (env->thumb && n == ARM_THUMB_SYSCALL)) {
391@@ -489,14 +531,34 @@
392 n -= ARM_SYSCALL_BASE;
393 env->eabi = 0;
394 }
395- env->regs[0] = do_syscall(env,
396- n,
397- env->regs[0],
398- env->regs[1],
399- env->regs[2],
400- env->regs[3],
401- env->regs[4],
402- env->regs[5]);
403+ if ( n > ARM_NR_BASE) {
404+ switch (n)
405+ {
406+ case ARM_NR_cacheflush:
407+ arm_cache_flush(env->regs[0], env->regs[1]);
408+ break;
409+#ifdef USE_NPTL
410+ case ARM_NR_set_tls:
411+ cpu_set_tls(env, env->regs[0]);
412+ env->regs[0] = 0;
413+ break;
414+#endif
415+ default:
416+ printf ("Error: Bad syscall: %x\n", n);
417+ goto error;
418+ }
419+ }
420+ else
421+ {
422+ env->regs[0] = do_syscall(env,
423+ n,
424+ env->regs[0],
425+ env->regs[1],
426+ env->regs[2],
427+ env->regs[3],
428+ env->regs[4],
429+ env->regs[5]);
430+ }
431 } else {
432 goto error;
433 }
434@@ -535,6 +597,10 @@
435 }
436 }
437 break;
438+ case EXCP_KERNEL_TRAP:
439+ if (do_kernel_trap(env))
440+ goto error;
441+ break;
442 default:
443 error:
444 fprintf(stderr, "qemu: unhandled CPU exception 0x%x - aborting\n",
445@@ -1994,6 +2060,11 @@
446 int drop_ld_preload = 0, environ_count = 0;
447 char **target_environ, **wrk, **dst;
448
449+ char *assume_kernel = getenv("QEMU_ASSUME_KERNEL");
450+
451+ if (assume_kernel)
452+ setenv("LD_ASSUME_KERNEL", assume_kernel, 1);
453+
454 if (argc <= 1)
455 usage();
456
457@@ -2403,6 +2474,10 @@
458 ts->heap_base = info->brk;
459 /* This will be filled in on the first SYS_HEAPINFO call. */
460 ts->heap_limit = 0;
461+ /* Register the magic kernel code page. The cpu will generate a
462+ special exception when it tries to execute code here. We can't
463+ put real code here because it may be in use by the host kernel. */
464+ page_set_flags(0xffff0000, 0xffff0fff, 0);
465 #endif
466
467 if (gdbstub_port) {
468Index: trunk/linux-user/qemu.h
469===================================================================
470--- trunk.orig/linux-user/qemu.h 2008-04-24 20:16:41.000000000 +0100
471+++ trunk/linux-user/qemu.h 2008-04-24 20:16:53.000000000 +0100
472@@ -107,6 +107,9 @@
473 uint32_t heap_base;
474 uint32_t heap_limit;
475 #endif
476+#ifdef USE_NPTL
477+ uint32_t *child_tidptr;
478+#endif
479 int used; /* non zero if used */
480 struct image_info *info;
481 uint8_t stack[0];
482Index: trunk/linux-user/syscall.c
483===================================================================
484--- trunk.orig/linux-user/syscall.c 2008-04-24 20:16:50.000000000 +0100
485+++ trunk/linux-user/syscall.c 2008-04-24 20:19:52.000000000 +0100
486@@ -61,6 +61,7 @@
487 #define tchars host_tchars /* same as target */
488 #define ltchars host_ltchars /* same as target */
489
490+#include <linux/futex.h>
491 #include <linux/termios.h>
492 #include <linux/unistd.h>
493 #include <linux/utsname.h>
494@@ -71,9 +72,18 @@
495 #include <linux/kd.h>
496
497 #include "qemu.h"
498+#include "qemu_spinlock.h"
499
500 //#define DEBUG
501
502+#ifdef USE_NPTL
503+#define CLONE_NPTL_FLAGS2 (CLONE_SETTLS | \
504+ CLONE_PARENT_SETTID | CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID)
505+#else
506+/* XXX: Hardcode the above values. */
507+#define CLONE_NPTL_FLAGS2 0
508+#endif
509+
510 #if defined(TARGET_I386) || defined(TARGET_ARM) || defined(TARGET_SPARC) \
511 || defined(TARGET_M68K) || defined(TARGET_SH4) || defined(TARGET_CRIS)
512 /* 16 bit uid wrappers emulation */
513@@ -2695,16 +2705,25 @@
514 return 0;
515 }
516 #endif
517-
518 #endif /* defined(TARGET_I386) */
519
520 /* this stack is the equivalent of the kernel stack associated with a
521 thread/process */
522 #define NEW_STACK_SIZE 8192
523
524+#ifdef USE_NPTL
525+static spinlock_t nptl_lock = SPIN_LOCK_UNLOCKED;
526+#endif
527+
528 static int clone_func(void *arg)
529 {
530 CPUState *env = arg;
531+#ifdef HAVE_NPTL
532+ /* Wait until the parent has finshed initializing the tls state. */
533+ while (!spin_trylock(&nptl_lock))
534+ usleep(1);
535+ spin_unlock(&nptl_lock);
536+#endif
537 cpu_loop(env);
538 /* never exits */
539 return 0;
540@@ -2712,15 +2731,27 @@
541
542 /* do_fork() Must return host values and target errnos (unlike most
543 do_*() functions). */
544-int do_fork(CPUState *env, unsigned int flags, abi_ulong newsp)
545+int do_fork(CPUState *env, unsigned int flags, unsigned long newsp,
546+ uint32_t *parent_tidptr, void *newtls,
547+ uint32_t *child_tidptr)
548 {
549 int ret;
550 TaskState *ts;
551 uint8_t *new_stack;
552 CPUState *new_env;
553+#if defined(TARGET_I386)
554+ uint64_t *new_gdt_table;
555+#endif
556+#ifdef USE_NPTL
557+ unsigned int nptl_flags;
558
559+ if (flags & CLONE_PARENT_SETTID)
560+ *parent_tidptr = gettid();
561+#endif
562 if (flags & CLONE_VM) {
563 ts = malloc(sizeof(TaskState) + NEW_STACK_SIZE);
564+ if (!ts)
565+ return -ENOMEM;
566 memset(ts, 0, sizeof(TaskState));
567 new_stack = ts->stack;
568 ts->used = 1;
569@@ -2732,6 +2763,29 @@
570 #if defined(TARGET_I386)
571 if (!newsp)
572 newsp = env->regs[R_ESP];
573+ new_gdt_table = malloc(9 * 8);
574+ if (!new_gdt_table) {
575+ free(new_env);
576+ return -ENOMEM;
577+ }
578+ /* Copy main GDT table from parent, but clear TLS entries */
579+ memcpy(new_gdt_table, g2h(env->gdt.base), 6 * 8);
580+ memset(&new_gdt_table[6], 0, 3 * 8);
581+ new_env->gdt.base = h2g(new_gdt_table);
582+ if (flags & 0x00080000 /* CLONE_SETTLS */) {
583+ ret = do_set_thread_area(new_env, new_env->regs[R_ESI]);
584+ if (ret) {
585+ free(new_gdt_table);
586+ free(new_env);
587+ return ret;
588+ }
589+ }
590+ cpu_x86_load_seg(env, R_CS, new_env->regs[R_CS]);
591+ cpu_x86_load_seg(env, R_DS, new_env->regs[R_DS]);
592+ cpu_x86_load_seg(env, R_ES, new_env->regs[R_ES]);
593+ cpu_x86_load_seg(env, R_SS, new_env->regs[R_SS]);
594+ cpu_x86_load_seg(env, R_FS, new_env->regs[R_FS]);
595+ cpu_x86_load_seg(env, R_GS, new_env->regs[R_GS]);
596 new_env->regs[R_ESP] = newsp;
597 new_env->regs[R_EAX] = 0;
598 #elif defined(TARGET_ARM)
599@@ -2784,16 +2838,67 @@
600 #error unsupported target CPU
601 #endif
602 new_env->opaque = ts;
603+#ifdef USE_NPTL
604+ nptl_flags = flags;
605+ flags &= ~CLONE_NPTL_FLAGS2;
606+
607+ if (nptl_flags & CLONE_CHILD_CLEARTID) {
608+ ts->child_tidptr = child_tidptr;
609+ }
610+
611+ if (nptl_flags & CLONE_SETTLS)
612+ cpu_set_tls (new_env, newtls);
613+
614+ /* Grab the global cpu lock so that the thread setup appears
615+ atomic. */
616+ if (nptl_flags & CLONE_CHILD_SETTID)
617+ spin_lock(&nptl_lock);
618+
619+#else
620+ if (flags & CLONE_NPTL_FLAGS2)
621+ return -EINVAL;
622+#endif
623+
624+ if (CLONE_VFORK & flags)
625+ flags ^= CLONE_VM;
626 #ifdef __ia64__
627 ret = __clone2(clone_func, new_stack + NEW_STACK_SIZE, flags, new_env);
628 #else
629 ret = clone(clone_func, new_stack + NEW_STACK_SIZE, flags, new_env);
630 #endif
631+#ifdef USE_NPTL
632+ if (ret != -1) {
633+ if (nptl_flags & CLONE_CHILD_SETTID)
634+ *child_tidptr = ret;
635+ }
636+
637+ /* Allow the child to continue. */
638+ if (nptl_flags & CLONE_CHILD_SETTID)
639+ spin_unlock(&nptl_lock);
640+#endif
641 } else {
642 /* if no CLONE_VM, we consider it is a fork */
643- if ((flags & ~CSIGNAL) != 0)
644+ if ((flags & ~(CSIGNAL | CLONE_NPTL_FLAGS2)) != 0)
645 return -EINVAL;
646 ret = fork();
647+#ifdef USE_NPTL
648+ /* There is a race condition here. The parent process could
649+ theoretically read the TID in the child process before the child
650+ tid is set. This would require using either ptrace
651+ (not implemented) or having *_tidptr to point at a shared memory
652+ mapping. We can't repeat the spinlock hack used above because
653+ the child process gets its own copy of the lock. */
654+ if (ret == 0) {
655+ /* Child Process. */
656+ if (flags & CLONE_CHILD_SETTID)
657+ *child_tidptr = gettid();
658+ ts = (TaskState *)env->opaque;
659+ if (flags & CLONE_CHILD_CLEARTID)
660+ ts->child_tidptr = child_tidptr;
661+ if (flags & CLONE_SETTLS)
662+ cpu_set_tls (env, newtls);
663+ }
664+#endif
665 }
666 return ret;
667 }
668@@ -3052,6 +3157,68 @@
669 unlock_user_struct(target_ts, target_addr, 1);
670 }
671
672+static long do_futex(target_ulong uaddr, int op, uint32_t val,
673+ target_ulong utime, target_ulong uaddr2,
674+ uint32_t val3)
675+{
676+ struct timespec host_utime;
677+ unsigned long val2 = utime;
678+
679+ if (utime && (op == FUTEX_WAIT || op == FUTEX_LOCK_PI)) {
680+ target_to_host_timespec(&host_utime, utime);
681+ val2 = (unsigned long)&host_utime;
682+ }
683+
684+#ifdef BSWAP_NEEDED
685+ switch(op) {
686+ case FUTEX_CMP_REQUEUE:
687+ val3 = tswap32(val3);
688+ case FUTEX_REQUEUE:
689+ val2 = tswap32(val2);
690+ case FUTEX_WAIT:
691+ case FUTEX_WAKE:
692+ val = tswap32(val);
693+ case FUTEX_LOCK_PI: /* This one's icky, but comes out OK */
694+ case FUTEX_UNLOCK_PI:
695+ break;
696+ default:
697+ gemu_log("qemu: Unsupported futex op %d\n", op);
698+ return -ENOSYS;
699+ }
700+#if 0 /* No, it's worse than this */
701+ if (op == FUTEX_WAKE_OP) {
702+ /* Need to munge the secondary operation (val3) */
703+ val3 = tswap32(val3);
704+ int op2 = (val3 >> 28) & 7;
705+ int cmp = (val3 >> 24) & 15;
706+ int oparg = (val3 << 8) >> 20;
707+ int cmparg = (val3 << 20) >> 20;
708+ int shift = val3 & (FUTEX_OP_OPARG_SHIFT << 28);
709+
710+ if (shift)
711+ oparg = (oparg & 7) + 24 - (oparg & 24);
712+ else oparg =
713+ if (op2 == FUTEX_OP_ADD) {
714+ gemu_log("qemu: Unsupported wrong-endian FUTEX_OP_ADD\n");
715+ return -ENOSYS;
716+ }
717+ if (cmparg == FUTEX_OP_CMP_LT || cmparg == FUTEX_OP_CMP_GE ||
718+ cmparg == FUTEX_OP_CMP_LE || cmparg == FUTEX_OP_CMP_GT) {
719+ gemu_log("qemu: Unsupported wrong-endian futex cmparg %d\n", cmparg);
720+ return -ENOSYS;
721+ }
722+ val3 = shift | (op2<<28) | (cmp<<24) | (oparg<<12) | cmparg;
723+ }
724+#endif
725+#endif
726+ return syscall(__NR_futex, g2h(uaddr), op, val, val2, g2h(uaddr2), val3);
727+}
728+
729+int do_set_tid_address(target_ulong tidptr)
730+{
731+ return syscall(__NR_set_tid_address, g2h(tidptr));
732+}
733+
734 /* do_syscall() should always have a single exit point at the end so
735 that actions, such as logging of syscall results, can be performed.
736 All errnos that do_syscall() returns must be -TARGET_<errcode>. */
737@@ -3076,7 +3243,7 @@
738 _mcleanup();
739 #endif
740 gdb_exit(cpu_env, arg1);
741- /* XXX: should free thread stack and CPU env */
742+ /* XXX: should free thread stack, GDT and CPU env */
743 _exit(arg1);
744 ret = 0; /* avoid warning */
745 break;
746@@ -3118,7 +3285,7 @@
747 ret = do_brk(arg1);
748 break;
749 case TARGET_NR_fork:
750- ret = get_errno(do_fork(cpu_env, SIGCHLD, 0));
751+ ret = get_errno(do_fork(cpu_env, SIGCHLD, 0, NULL, NULL, NULL));
752 break;
753 #ifdef TARGET_NR_waitpid
754 case TARGET_NR_waitpid:
755@@ -4482,7 +4649,8 @@
756 ret = get_errno(fsync(arg1));
757 break;
758 case TARGET_NR_clone:
759- ret = get_errno(do_fork(cpu_env, arg1, arg2));
760+ ret = get_errno(do_fork(cpu_env, arg1, arg2, (uint32_t *)arg3,
761+ (void *)arg4, (uint32_t *)arg5));
762 break;
763 #ifdef __NR_exit_group
764 /* new thread calls */
765@@ -4943,7 +5111,8 @@
766 #endif
767 #ifdef TARGET_NR_vfork
768 case TARGET_NR_vfork:
769- ret = get_errno(do_fork(cpu_env, CLONE_VFORK | CLONE_VM | SIGCHLD, 0));
770+ ret = get_errno(do_fork(cpu_env, CLONE_VFORK | CLONE_VM | SIGCHLD, 0,
771+ NULL, NULL, NULL));
772 break;
773 #endif
774 #ifdef TARGET_NR_ugetrlimit
775@@ -5521,6 +5690,9 @@
776 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
777 ret = do_set_thread_area(cpu_env, arg1);
778 break;
779+#elif TARGET_i386
780+ ret = get_errno(do_set_thread_area(cpu_env, arg1));
781+ break;
782 #else
783 goto unimplemented_nowarn;
784 #endif
785@@ -5538,6 +5710,12 @@
786 goto unimplemented_nowarn;
787 #endif
788
789+#ifdef TARGET_NR_futex
790+ case TARGET_NR_futex:
791+ ret = get_errno(do_futex(arg1, arg2, arg3, arg4, arg5, arg6));
792+ break;
793+#endif
794+
795 #ifdef TARGET_NR_clock_gettime
796 case TARGET_NR_clock_gettime:
797 {
798Index: trunk/qemu_spinlock.h
799===================================================================
800--- /dev/null 1970-01-01 00:00:00.000000000 +0000
801+++ trunk/qemu_spinlock.h 2008-04-24 20:16:53.000000000 +0100
802@@ -0,0 +1,250 @@
803+/*
804+ * Atomic operation helper include
805+ *
806+ * Copyright (c) 2005 Fabrice Bellard
807+ *
808+ * This library is free software; you can redistribute it and/or
809+ * modify it under the terms of the GNU Lesser General Public
810+ * License as published by the Free Software Foundation; either
811+ * version 2 of the License, or (at your option) any later version.
812+ *
813+ * This library is distributed in the hope that it will be useful,
814+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
815+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
816+ * Lesser General Public License for more details.
817+ *
818+ * You should have received a copy of the GNU Lesser General Public
819+ * License along with this library; if not, write to the Free Software
820+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
821+ */
822+#ifndef QEMU_SPINLOCK_H
823+#define QEMU_SPINLOCK_H
824+
825+#ifdef __powerpc__
826+static inline int testandset (int *p)
827+{
828+ int ret;
829+ __asm__ __volatile__ (
830+ "0: lwarx %0,0,%1\n"
831+ " xor. %0,%3,%0\n"
832+ " bne 1f\n"
833+ " stwcx. %2,0,%1\n"
834+ " bne- 0b\n"
835+ "1: "
836+ : "=&r" (ret)
837+ : "r" (p), "r" (1), "r" (0)
838+ : "cr0", "memory");
839+ return ret;
840+}
841+#endif
842+
843+#ifdef __i386__
844+static inline int testandset (int *p)
845+{
846+ long int readval = 0;
847+
848+ __asm__ __volatile__ ("lock; cmpxchgl %2, %0"
849+ : "+m" (*p), "+a" (readval)
850+ : "r" (1)
851+ : "cc");
852+ return readval;
853+}
854+#endif
855+
856+#ifdef __x86_64__
857+static inline int testandset (int *p)
858+{
859+ long int readval = 0;
860+
861+ __asm__ __volatile__ ("lock; cmpxchgl %2, %0"
862+ : "+m" (*p), "+a" (readval)
863+ : "r" (1)
864+ : "cc");
865+ return readval;
866+}
867+#endif
868+
869+#ifdef __s390__
870+static inline int testandset (int *p)
871+{
872+ int ret;
873+
874+ __asm__ __volatile__ ("0: cs %0,%1,0(%2)\n"
875+ " jl 0b"
876+ : "=&d" (ret)
877+ : "r" (1), "a" (p), "0" (*p)
878+ : "cc", "memory" );
879+ return ret;
880+}
881+#endif
882+
883+#ifdef __alpha__
884+static inline int testandset (int *p)
885+{
886+ int ret;
887+ unsigned long one;
888+
889+ __asm__ __volatile__ ("0: mov 1,%2\n"
890+ " ldl_l %0,%1\n"
891+ " stl_c %2,%1\n"
892+ " beq %2,1f\n"
893+ ".subsection 2\n"
894+ "1: br 0b\n"
895+ ".previous"
896+ : "=r" (ret), "=m" (*p), "=r" (one)
897+ : "m" (*p));
898+ return ret;
899+}
900+#endif
901+
902+#ifdef __sparc__
903+static inline int testandset (int *p)
904+{
905+ int ret;
906+
907+ __asm__ __volatile__("ldstub [%1], %0"
908+ : "=r" (ret)
909+ : "r" (p)
910+ : "memory");
911+
912+ return (ret ? 1 : 0);
913+}
914+#endif
915+
916+#ifdef __arm__
917+static inline int testandset (int *spinlock)
918+{
919+ register unsigned int ret;
920+ __asm__ __volatile__("swp %0, %1, [%2]"
921+ : "=r"(ret)
922+ : "0"(1), "r"(spinlock));
923+
924+ return ret;
925+}
926+#endif
927+
928+#ifdef __mc68000
929+static inline int testandset (int *p)
930+{
931+ char ret;
932+ __asm__ __volatile__("tas %1; sne %0"
933+ : "=r" (ret)
934+ : "m" (p)
935+ : "cc","memory");
936+ return ret;
937+}
938+#endif
939+
940+#ifdef __hppa__
941+/* Because malloc only guarantees 8-byte alignment for malloc'd data,
942+ and GCC only guarantees 8-byte alignment for stack locals, we can't
943+ be assured of 16-byte alignment for atomic lock data even if we
944+ specify "__attribute ((aligned(16)))" in the type declaration. So,
945+ we use a struct containing an array of four ints for the atomic lock
946+ type and dynamically select the 16-byte aligned int from the array
947+ for the semaphore. */
948+#define __PA_LDCW_ALIGNMENT 16
949+static inline void *ldcw_align (void *p) {
950+ unsigned long a = (unsigned long)p;
951+ a = (a + __PA_LDCW_ALIGNMENT - 1) & ~(__PA_LDCW_ALIGNMENT - 1);
952+ return (void *)a;
953+}
954+
955+static inline int testandset (spinlock_t *p)
956+{
957+ unsigned int ret;
958+ p = ldcw_align(p);
959+ __asm__ __volatile__("ldcw 0(%1),%0"
960+ : "=r" (ret)
961+ : "r" (p)
962+ : "memory" );
963+ return !ret;
964+}
965+#endif
966+
967+#ifdef __ia64
968+#include <ia64intrin.h>
969+
970+static inline int testandset (int *p)
971+{
972+ return __sync_lock_test_and_set (p, 1);
973+}
974+#endif
975+
976+#ifdef __mips__
977+static inline int testandset (int *p)
978+{
979+ int ret;
980+
981+ __asm__ __volatile__ (
982+ " .set push \n"
983+ " .set noat \n"
984+ " .set mips2 \n"
985+ "1: li $1, 1 \n"
986+ " ll %0, %1 \n"
987+ " sc $1, %1 \n"
988+ " beqz $1, 1b \n"
989+ " .set pop "
990+ : "=r" (ret), "+R" (*p)
991+ :
992+ : "memory");
993+
994+ return ret;
995+}
996+#endif
997+
998+#if defined(__hppa__)
999+
1000+typedef int spinlock_t[4];
1001+
1002+#define SPIN_LOCK_UNLOCKED { 1, 1, 1, 1 }
1003+
1004+static inline void resetlock (spinlock_t *p)
1005+{
1006+ (*p)[0] = (*p)[1] = (*p)[2] = (*p)[3] = 1;
1007+}
1008+
1009+#else
1010+
1011+typedef int spinlock_t;
1012+
1013+#define SPIN_LOCK_UNLOCKED 0
1014+
1015+static inline void resetlock (spinlock_t *p)
1016+{
1017+ *p = SPIN_LOCK_UNLOCKED;
1018+}
1019+
1020+#endif
1021+
1022+#if defined(CONFIG_USER_ONLY)
1023+static inline void spin_lock(spinlock_t *lock)
1024+{
1025+ while (testandset(lock));
1026+}
1027+
1028+static inline void spin_unlock(spinlock_t *lock)
1029+{
1030+ resetlock(lock);
1031+}
1032+
1033+static inline int spin_trylock(spinlock_t *lock)
1034+{
1035+ return !testandset(lock);
1036+}
1037+#else
1038+static inline void spin_lock(spinlock_t *lock)
1039+{
1040+}
1041+
1042+static inline void spin_unlock(spinlock_t *lock)
1043+{
1044+}
1045+
1046+static inline int spin_trylock(spinlock_t *lock)
1047+{
1048+ return 1;
1049+}
1050+#endif
1051+
1052+#endif
1053Index: trunk/target-arm/cpu.h
1054===================================================================
1055--- trunk.orig/target-arm/cpu.h 2008-04-24 20:16:41.000000000 +0100
1056+++ trunk/target-arm/cpu.h 2008-04-24 20:16:53.000000000 +0100
1057@@ -38,6 +38,7 @@
1058 #define EXCP_FIQ 6
1059 #define EXCP_BKPT 7
1060 #define EXCP_EXCEPTION_EXIT 8 /* Return from v7M exception. */
1061+#define EXCP_KERNEL_TRAP 9 /* Jumped to kernel code page. */
1062
1063 #define ARMV7M_EXCP_RESET 1
1064 #define ARMV7M_EXCP_NMI 2
1065@@ -218,6 +219,15 @@
1066 void cpu_lock(void);
1067 void cpu_unlock(void);
1068
1069+void cpu_lock(void);
1070+void cpu_unlock(void);
1071+#if defined(USE_NPTL)
1072+static inline void cpu_set_tls(CPUARMState *env, void *newtls)
1073+{
1074+ env->cp15.c13_tls2 = (uint32_t)(long)newtls;
1075+}
1076+#endif
1077+
1078 #define CPSR_M (0x1f)
1079 #define CPSR_T (1 << 5)
1080 #define CPSR_F (1 << 6)
1081Index: trunk/target-arm/translate.c
1082===================================================================
1083--- trunk.orig/target-arm/translate.c 2008-04-24 20:16:41.000000000 +0100
1084+++ trunk/target-arm/translate.c 2008-04-24 20:16:53.000000000 +0100
1085@@ -8606,7 +8606,14 @@
1086 gen_exception(EXCP_EXCEPTION_EXIT);
1087 }
1088 #endif
1089-
1090+#ifdef CONFIG_USER_ONLY
1091+ /* Intercept jump to the magic kernel page. */
1092+ if (dc->pc > 0xffff0000) {
1093+ gen_exception(EXCP_KERNEL_TRAP);
1094+ dc->is_jmp = DISAS_UPDATE;
1095+ break;
1096+ }
1097+#endif
1098 if (env->nb_breakpoints > 0) {
1099 for(j = 0; j < env->nb_breakpoints; j++) {
1100 if (env->breakpoints[j] == dc->pc) {