summaryrefslogtreecommitdiffstats
path: root/meta/packages/qemu/qemu-0.9.0+cvs20070613/qemu-0.9.0-nptl.patch
diff options
context:
space:
mode:
Diffstat (limited to 'meta/packages/qemu/qemu-0.9.0+cvs20070613/qemu-0.9.0-nptl.patch')
-rw-r--r--meta/packages/qemu/qemu-0.9.0+cvs20070613/qemu-0.9.0-nptl.patch892
1 files changed, 892 insertions, 0 deletions
diff --git a/meta/packages/qemu/qemu-0.9.0+cvs20070613/qemu-0.9.0-nptl.patch b/meta/packages/qemu/qemu-0.9.0+cvs20070613/qemu-0.9.0-nptl.patch
new file mode 100644
index 0000000000..fc7b0cfa4b
--- /dev/null
+++ b/meta/packages/qemu/qemu-0.9.0+cvs20070613/qemu-0.9.0-nptl.patch
@@ -0,0 +1,892 @@
1These are Paul Brook's patches to QEMU-0.8.2 to enable the running of single
2ARM binaries under QEMU's user-emulation mode. Without them, QEMU-0.8.1
3immediately dies saying:
4 Error: f0005
5 qemu: uncaught target signal 6 (Aborted) - exiting
6while qemu-0.8.2 dies saying:
7 qemu: Unsupported syscall: 983045
8 cannot set up thread-local storage: unknown error
9
10This file is a rediffing of the patches visible at
11https://nowt.dyndns.org/patch.qemu_nptl on 27 Sept 2006
12which "patch" fails to apply automatically.
13See also http://lists.gnu.org/archive/html/qemu-devel/2006-09/msg00194.html
14
15 Martin Guy, 27 Sept 2006
16
17Index: qemu/configure
18===================================================================
19--- qemu.orig/configure 2007-06-29 10:47:39.000000000 +0000
20+++ qemu/configure 2007-06-29 10:47:58.000000000 +0000
21@@ -101,6 +101,7 @@
22 darwin_user="no"
23 build_docs="no"
24 uname_release=""
25+nptl="yes"
26
27 # OS specific
28 targetos=`uname -s`
29@@ -281,6 +282,8 @@
30 *) echo "undefined SPARC architecture. Exiting";exit 1;;
31 esac
32 ;;
33+ --disable-nptl) nptl="no"
34+ ;;
35 esac
36 done
37
38@@ -355,6 +358,7 @@
39 echo " --disable-linux-user disable all linux usermode emulation targets"
40 echo " --enable-darwin-user enable all darwin usermode emulation targets"
41 echo " --disable-darwin-user disable all darwin usermode emulation targets"
42+echo " --disable-nptl disable usermode NPTL guest support"
43 echo " --fmod-lib path to FMOD library"
44 echo " --fmod-inc path to FMOD includes"
45 echo " --enable-uname-release=R Return R for uname -r in usermode emulation"
46@@ -524,6 +528,23 @@
47 }
48 EOF
49
50+# check NPTL support
51+cat > $TMPC <<EOF
52+#include <sched.h>
53+void foo()
54+{
55+#ifndef CLONE_SETTLS
56+#error bork
57+#endif
58+}
59+EOF
60+
61+if $cc -c -o $TMPO $TMPC 2> /dev/null ; then
62+ :
63+else
64+ nptl="no"
65+fi
66+
67 ##########################################
68 # SDL probe
69
70@@ -678,6 +699,7 @@
71 echo "Documentation $build_docs"
72 [ ! -z "$uname_release" ] && \
73 echo "uname -r $uname_release"
74+echo "NPTL support $nptl"
75
76 if test $sdl_too_old = "yes"; then
77 echo "-> Your SDL version is too old - please upgrade to have SDL support"
78@@ -1057,6 +1079,14 @@
79 echo "SDL_CFLAGS=`$sdl_config --cflags`" >> $config_mak
80 fi
81 fi
82+else
83+ if test "$nptl" = "yes" ; then
84+ case "$target_cpu" in
85+ arm | armeb)
86+ echo "#define USE_NPTL 1" >> $config_h
87+ ;;
88+ esac
89+ fi
90 fi
91
92 if test "$cocoa" = "yes" ; then
93Index: qemu/exec-all.h
94===================================================================
95--- qemu.orig/exec-all.h 2007-06-29 10:47:39.000000000 +0000
96+++ qemu/exec-all.h 2007-06-29 10:47:58.000000000 +0000
97@@ -360,170 +360,7 @@
98 extern CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
99 extern void *io_mem_opaque[IO_MEM_NB_ENTRIES];
100
101-#if defined(__powerpc__)
102-static inline int testandset (int *p)
103-{
104- int ret;
105- __asm__ __volatile__ (
106- "0: lwarx %0,0,%1\n"
107- " xor. %0,%3,%0\n"
108- " bne 1f\n"
109- " stwcx. %2,0,%1\n"
110- " bne- 0b\n"
111- "1: "
112- : "=&r" (ret)
113- : "r" (p), "r" (1), "r" (0)
114- : "cr0", "memory");
115- return ret;
116-}
117-#elif defined(__i386__)
118-static inline int testandset (int *p)
119-{
120- long int readval = 0;
121-
122- __asm__ __volatile__ ("lock; cmpxchgl %2, %0"
123- : "+m" (*p), "+a" (readval)
124- : "r" (1)
125- : "cc");
126- return readval;
127-}
128-#elif defined(__x86_64__)
129-static inline int testandset (int *p)
130-{
131- long int readval = 0;
132-
133- __asm__ __volatile__ ("lock; cmpxchgl %2, %0"
134- : "+m" (*p), "+a" (readval)
135- : "r" (1)
136- : "cc");
137- return readval;
138-}
139-#elif defined(__s390__)
140-static inline int testandset (int *p)
141-{
142- int ret;
143-
144- __asm__ __volatile__ ("0: cs %0,%1,0(%2)\n"
145- " jl 0b"
146- : "=&d" (ret)
147- : "r" (1), "a" (p), "0" (*p)
148- : "cc", "memory" );
149- return ret;
150-}
151-#elif defined(__alpha__)
152-static inline int testandset (int *p)
153-{
154- int ret;
155- unsigned long one;
156-
157- __asm__ __volatile__ ("0: mov 1,%2\n"
158- " ldl_l %0,%1\n"
159- " stl_c %2,%1\n"
160- " beq %2,1f\n"
161- ".subsection 2\n"
162- "1: br 0b\n"
163- ".previous"
164- : "=r" (ret), "=m" (*p), "=r" (one)
165- : "m" (*p));
166- return ret;
167-}
168-#elif defined(__sparc__)
169-static inline int testandset (int *p)
170-{
171- int ret;
172-
173- __asm__ __volatile__("ldstub [%1], %0"
174- : "=r" (ret)
175- : "r" (p)
176- : "memory");
177-
178- return (ret ? 1 : 0);
179-}
180-#elif defined(__arm__)
181-static inline int testandset (int *spinlock)
182-{
183- register unsigned int ret;
184- __asm__ __volatile__("swp %0, %1, [%2]"
185- : "=r"(ret)
186- : "0"(1), "r"(spinlock));
187-
188- return ret;
189-}
190-#elif defined(__mc68000)
191-static inline int testandset (int *p)
192-{
193- char ret;
194- __asm__ __volatile__("tas %1; sne %0"
195- : "=r" (ret)
196- : "m" (p)
197- : "cc","memory");
198- return ret;
199-}
200-#elif defined(__ia64)
201-
202-#include <ia64intrin.h>
203-
204-static inline int testandset (int *p)
205-{
206- return __sync_lock_test_and_set (p, 1);
207-}
208-#elif defined(__mips__)
209-static inline int testandset (int *p)
210-{
211- int ret;
212-
213- __asm__ __volatile__ (
214- " .set push \n"
215- " .set noat \n"
216- " .set mips2 \n"
217- "1: li $1, 1 \n"
218- " ll %0, %1 \n"
219- " sc $1, %1 \n"
220- " beqz $1, 1b \n"
221- " .set pop "
222- : "=r" (ret), "+R" (*p)
223- :
224- : "memory");
225-
226- return ret;
227-}
228-#else
229-#error unimplemented CPU support
230-#endif
231-
232-typedef int spinlock_t;
233-
234-#define SPIN_LOCK_UNLOCKED 0
235-
236-#if defined(CONFIG_USER_ONLY)
237-static inline void spin_lock(spinlock_t *lock)
238-{
239- while (testandset(lock));
240-}
241-
242-static inline void spin_unlock(spinlock_t *lock)
243-{
244- *lock = 0;
245-}
246-
247-static inline int spin_trylock(spinlock_t *lock)
248-{
249- return !testandset(lock);
250-}
251-#else
252-static inline void spin_lock(spinlock_t *lock)
253-{
254-}
255-
256-static inline void spin_unlock(spinlock_t *lock)
257-{
258-}
259-
260-static inline int spin_trylock(spinlock_t *lock)
261-{
262- return 1;
263-}
264-#endif
265+#include "qemu_spinlock.h"
266
267 extern spinlock_t tb_lock;
268
269Index: qemu/linux-user/arm/syscall.h
270===================================================================
271--- qemu.orig/linux-user/arm/syscall.h 2007-06-29 10:47:39.000000000 +0000
272+++ qemu/linux-user/arm/syscall.h 2007-06-29 10:47:58.000000000 +0000
273@@ -28,7 +28,9 @@
274 #define ARM_SYSCALL_BASE 0x900000
275 #define ARM_THUMB_SYSCALL 0
276
277-#define ARM_NR_cacheflush (ARM_SYSCALL_BASE + 0xf0000 + 2)
278+#define ARM_NR_BASE 0xf0000
279+#define ARM_NR_cacheflush (ARM_NR_BASE + 2)
280+#define ARM_NR_set_tls (ARM_NR_BASE + 5)
281
282 #define ARM_NR_semihosting 0x123456
283 #define ARM_NR_thumb_semihosting 0xAB
284Index: qemu/linux-user/main.c
285===================================================================
286--- qemu.orig/linux-user/main.c 2007-06-29 10:47:39.000000000 +0000
287+++ qemu/linux-user/main.c 2007-06-29 10:53:47.000000000 +0000
288@@ -325,6 +325,50 @@
289 }
290 }
291
292+/* Handle a jump to the kernel code page. */
293+static int
294+do_kernel_trap(CPUARMState *env)
295+{
296+ uint32_t addr;
297+ uint32_t *ptr;
298+ uint32_t cpsr;
299+
300+ switch (env->regs[15]) {
301+ case 0xffff0fc0: /* __kernel_cmpxchg */
302+ /* XXX: This only works between threads, not between processes.
303+ Use native atomic operations. */
304+ /* ??? This probably breaks horribly if the access segfaults. */
305+ cpu_lock();
306+ ptr = (uint32_t *)env->regs[2];
307+ cpsr = cpsr_read(env);
308+ if (*ptr == env->regs[0]) {
309+ *ptr = env->regs[1];
310+ env->regs[0] = 0;
311+ cpsr |= CPSR_C;
312+ } else {
313+ env->regs[0] = -1;
314+ cpsr &= ~CPSR_C;
315+ }
316+ cpsr_write(env, cpsr, CPSR_C);
317+ cpu_unlock();
318+ break;
319+ case 0xffff0fe0: /* __kernel_get_tls */
320+ env->regs[0] = env->cp15.c13_tls;
321+ break;
322+ default:
323+ return 1;
324+ }
325+ /* Jump back to the caller. */
326+ addr = env->regs[14];
327+ if (addr & 1) {
328+ env->thumb = 1;
329+ addr &= ~1;
330+ }
331+ env->regs[15] = addr;
332+
333+ return 0;
334+}
335+
336 void cpu_loop(CPUARMState *env)
337 {
338 int trapnr;
339@@ -381,10 +425,8 @@
340 }
341 }
342
343- if (n == ARM_NR_cacheflush) {
344- arm_cache_flush(env->regs[0], env->regs[1]);
345- } else if (n == ARM_NR_semihosting
346- || n == ARM_NR_thumb_semihosting) {
347+ if (n == ARM_NR_semihosting
348+ || n == ARM_NR_thumb_semihosting) {
349 env->regs[0] = do_arm_semihosting (env);
350 } else if (n == 0 || n >= ARM_SYSCALL_BASE
351 || (env->thumb && n == ARM_THUMB_SYSCALL)) {
352@@ -395,14 +437,34 @@
353 n -= ARM_SYSCALL_BASE;
354 env->eabi = 0;
355 }
356- env->regs[0] = do_syscall(env,
357- n,
358- env->regs[0],
359- env->regs[1],
360- env->regs[2],
361- env->regs[3],
362- env->regs[4],
363- env->regs[5]);
364+ if ( n > ARM_NR_BASE) {
365+ switch (n)
366+ {
367+ case ARM_NR_cacheflush:
368+ arm_cache_flush(env->regs[0], env->regs[1]);
369+ break;
370+#ifdef USE_NPTL
371+ case ARM_NR_set_tls:
372+ cpu_set_tls(env, env->regs[0]);
373+ env->regs[0] = 0;
374+ break;
375+#endif
376+ default:
377+ printf ("Error: Bad syscall: %x\n", n);
378+ goto error;
379+ }
380+ }
381+ else
382+ {
383+ env->regs[0] = do_syscall(env,
384+ n,
385+ env->regs[0],
386+ env->regs[1],
387+ env->regs[2],
388+ env->regs[3],
389+ env->regs[4],
390+ env->regs[5]);
391+ }
392 } else {
393 goto error;
394 }
395@@ -441,6 +503,10 @@
396 }
397 }
398 break;
399+ case EXCP_KERNEL_TRAP:
400+ if (do_kernel_trap(env))
401+ goto error;
402+ break;
403 default:
404 error:
405 fprintf(stderr, "qemu: unhandled CPU exception 0x%x - aborting\n",
406@@ -2047,6 +2113,10 @@
407 ts->heap_base = info->brk;
408 /* This will be filled in on the first SYS_HEAPINFO call. */
409 ts->heap_limit = 0;
410+ /* Register the magic kernel code page. The cpu will generate a
411+ special exception when it tries to execute code here. We can't
412+ put real code here because it may be in use by the host kernel. */
413+ page_set_flags(0xffff0000, 0xffff0fff, 0);
414 #endif
415
416 if (gdbstub_port) {
417Index: qemu/linux-user/qemu.h
418===================================================================
419--- qemu.orig/linux-user/qemu.h 2007-06-29 10:47:39.000000000 +0000
420+++ qemu/linux-user/qemu.h 2007-06-29 10:47:58.000000000 +0000
421@@ -80,6 +80,9 @@
422 uint32_t heap_base;
423 uint32_t heap_limit;
424 #endif
425+#ifdef USE_NPTL
426+ uint32_t *child_tidptr;
427+#endif
428 int used; /* non zero if used */
429 struct image_info *info;
430 uint8_t stack[0];
431Index: qemu/linux-user/syscall.c
432===================================================================
433--- qemu.orig/linux-user/syscall.c 2007-06-29 10:47:39.000000000 +0000
434+++ qemu/linux-user/syscall.c 2007-06-29 10:53:47.000000000 +0000
435@@ -70,9 +70,18 @@
436 #include <linux/kd.h>
437
438 #include "qemu.h"
439+#include "qemu_spinlock.h"
440
441 //#define DEBUG
442
443+#ifdef USE_NPTL
444+#define CLONE_NPTL_FLAGS2 (CLONE_SETTLS | \
445+ CLONE_PARENT_SETTID | CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID)
446+#else
447+/* XXX: Hardcode the above values. */
448+#define CLONE_NPTL_FLAGS2 0
449+#endif
450+
451 #if defined(TARGET_I386) || defined(TARGET_ARM) || defined(TARGET_SPARC) \
452 || defined(TARGET_M68K) || defined(TARGET_SH4)
453 /* 16 bit uid wrappers emulation */
454@@ -2119,20 +2128,38 @@
455 thread/process */
456 #define NEW_STACK_SIZE 8192
457
458+#ifdef USE_NPTL
459+static spinlock_t nptl_lock = SPIN_LOCK_UNLOCKED;
460+#endif
461+
462 static int clone_func(void *arg)
463 {
464 CPUState *env = arg;
465+#ifdef HAVE_NPTL
466+ /* Wait until the parent has finshed initializing the tls state. */
467+ while (!spin_trylock(&nptl_lock))
468+ usleep(1);
469+ spin_unlock(&nptl_lock);
470+#endif
471 cpu_loop(env);
472 /* never exits */
473 return 0;
474 }
475
476-int do_fork(CPUState *env, unsigned int flags, unsigned long newsp)
477+int do_fork(CPUState *env, unsigned int flags, unsigned long newsp,
478+ uint32_t *parent_tidptr, void *newtls,
479+ uint32_t *child_tidptr)
480 {
481 int ret;
482 TaskState *ts;
483 uint8_t *new_stack;
484 CPUState *new_env;
485+#ifdef USE_NPTL
486+ unsigned int nptl_flags;
487+
488+ if (flags & CLONE_PARENT_SETTID)
489+ *parent_tidptr = gettid();
490+#endif
491
492 if (flags & CLONE_VM) {
493 ts = malloc(sizeof(TaskState) + NEW_STACK_SIZE);
494@@ -2199,16 +2226,67 @@
495 #error unsupported target CPU
496 #endif
497 new_env->opaque = ts;
498+#ifdef USE_NPTL
499+ nptl_flags = flags;
500+ flags &= ~CLONE_NPTL_FLAGS2;
501+
502+ if (nptl_flags & CLONE_CHILD_CLEARTID) {
503+ ts->child_tidptr = child_tidptr;
504+ }
505+
506+ if (nptl_flags & CLONE_SETTLS)
507+ cpu_set_tls (new_env, newtls);
508+
509+ /* Grab the global cpu lock so that the thread setup appears
510+ atomic. */
511+ if (nptl_flags & CLONE_CHILD_SETTID)
512+ spin_lock(&nptl_lock);
513+
514+#else
515+ if (flags & CLONE_NPTL_FLAGS2)
516+ return -EINVAL;
517+#endif
518+
519+ if (CLONE_VFORK & flags)
520+ flags ^= CLONE_VM;
521 #ifdef __ia64__
522 ret = __clone2(clone_func, new_stack + NEW_STACK_SIZE, flags, new_env);
523 #else
524 ret = clone(clone_func, new_stack + NEW_STACK_SIZE, flags, new_env);
525 #endif
526+#ifdef USE_NPTL
527+ if (ret != -1) {
528+ if (nptl_flags & CLONE_CHILD_SETTID)
529+ *child_tidptr = ret;
530+ }
531+
532+ /* Allow the child to continue. */
533+ if (nptl_flags & CLONE_CHILD_SETTID)
534+ spin_unlock(&nptl_lock);
535+#endif
536 } else {
537 /* if no CLONE_VM, we consider it is a fork */
538- if ((flags & ~CSIGNAL) != 0)
539+ if ((flags & ~(CSIGNAL | CLONE_NPTL_FLAGS2)) != 0)
540 return -EINVAL;
541 ret = fork();
542+#ifdef USE_NPTL
543+ /* There is a race condition here. The parent process could
544+ theoretically read the TID in the child process before the child
545+ tid is set. This would require using either ptrace
546+ (not implemented) or having *_tidptr to point at a shared memory
547+ mapping. We can't repeat the spinlock hack used above because
548+ the child process gets its own copy of the lock. */
549+ if (ret == 0) {
550+ /* Child Process. */
551+ if (flags & CLONE_CHILD_SETTID)
552+ *child_tidptr = gettid();
553+ ts = (TaskState *)env->opaque;
554+ if (flags & CLONE_CHILD_CLEARTID)
555+ ts->child_tidptr = child_tidptr;
556+ if (flags & CLONE_SETTLS)
557+ cpu_set_tls (env, newtls);
558+ }
559+#endif
560 }
561 return ret;
562 }
563@@ -2485,7 +2563,7 @@
564 ret = do_brk(arg1);
565 break;
566 case TARGET_NR_fork:
567- ret = get_errno(do_fork(cpu_env, SIGCHLD, 0));
568+ ret = get_errno(do_fork(cpu_env, SIGCHLD, 0, NULL, NULL, NULL));
569 break;
570 #ifdef TARGET_NR_waitpid
571 case TARGET_NR_waitpid:
572@@ -3649,7 +3727,8 @@
573 ret = get_errno(fsync(arg1));
574 break;
575 case TARGET_NR_clone:
576- ret = get_errno(do_fork(cpu_env, arg1, arg2));
577+ ret = get_errno(do_fork(cpu_env, arg1, arg2, (uint32_t *)arg3,
578+ (void *)arg4, (uint32_t *)arg5));
579 break;
580 #ifdef __NR_exit_group
581 /* new thread calls */
582@@ -4037,7 +4116,8 @@
583 #endif
584 #ifdef TARGET_NR_vfork
585 case TARGET_NR_vfork:
586- ret = get_errno(do_fork(cpu_env, CLONE_VFORK | CLONE_VM | SIGCHLD, 0));
587+ ret = get_errno(do_fork(cpu_env, CLONE_VFORK | CLONE_VM | SIGCHLD, 0,
588+ NULL, NULL, NULL));
589 break;
590 #endif
591 #ifdef TARGET_NR_ugetrlimit
592@@ -4619,4 +4699,3 @@
593 #endif
594 return ret;
595 }
596-
597Index: qemu/qemu_spinlock.h
598===================================================================
599--- /dev/null 1970-01-01 00:00:00.000000000 +0000
600+++ qemu/qemu_spinlock.h 2007-06-29 10:47:58.000000000 +0000
601@@ -0,0 +1,181 @@
602+/*
603+ * Atomic operation helper include
604+ *
605+ * Copyright (c) 2005 Fabrice Bellard
606+ *
607+ * This library is free software; you can redistribute it and/or
608+ * modify it under the terms of the GNU Lesser General Public
609+ * License as published by the Free Software Foundation; either
610+ * version 2 of the License, or (at your option) any later version.
611+ *
612+ * This library is distributed in the hope that it will be useful,
613+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
614+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
615+ * Lesser General Public License for more details.
616+ *
617+ * You should have received a copy of the GNU Lesser General Public
618+ * License along with this library; if not, write to the Free Software
619+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
620+ */
621+#ifndef QEMU_SPINLOCK_H
622+#define QEMU_SPINLOCK_H
623+
624+#ifdef __powerpc__
625+static inline int testandset (int *p)
626+{
627+ int ret;
628+ __asm__ __volatile__ (
629+ "0: lwarx %0,0,%1\n"
630+ " xor. %0,%3,%0\n"
631+ " bne 1f\n"
632+ " stwcx. %2,0,%1\n"
633+ " bne- 0b\n"
634+ "1: "
635+ : "=&r" (ret)
636+ : "r" (p), "r" (1), "r" (0)
637+ : "cr0", "memory");
638+ return ret;
639+}
640+#endif
641+
642+#ifdef __i386__
643+static inline int testandset (int *p)
644+{
645+ long int readval = 0;
646+
647+ __asm__ __volatile__ ("lock; cmpxchgl %2, %0"
648+ : "+m" (*p), "+a" (readval)
649+ : "r" (1)
650+ : "cc");
651+ return readval;
652+}
653+#endif
654+
655+#ifdef __x86_64__
656+static inline int testandset (int *p)
657+{
658+ long int readval = 0;
659+
660+ __asm__ __volatile__ ("lock; cmpxchgl %2, %0"
661+ : "+m" (*p), "+a" (readval)
662+ : "r" (1)
663+ : "cc");
664+ return readval;
665+}
666+#endif
667+
668+#ifdef __s390__
669+static inline int testandset (int *p)
670+{
671+ int ret;
672+
673+ __asm__ __volatile__ ("0: cs %0,%1,0(%2)\n"
674+ " jl 0b"
675+ : "=&d" (ret)
676+ : "r" (1), "a" (p), "0" (*p)
677+ : "cc", "memory" );
678+ return ret;
679+}
680+#endif
681+
682+#ifdef __alpha__
683+static inline int testandset (int *p)
684+{
685+ int ret;
686+ unsigned long one;
687+
688+ __asm__ __volatile__ ("0: mov 1,%2\n"
689+ " ldl_l %0,%1\n"
690+ " stl_c %2,%1\n"
691+ " beq %2,1f\n"
692+ ".subsection 2\n"
693+ "1: br 0b\n"
694+ ".previous"
695+ : "=r" (ret), "=m" (*p), "=r" (one)
696+ : "m" (*p));
697+ return ret;
698+}
699+#endif
700+
701+#ifdef __sparc__
702+static inline int testandset (int *p)
703+{
704+ int ret;
705+
706+ __asm__ __volatile__("ldstub [%1], %0"
707+ : "=r" (ret)
708+ : "r" (p)
709+ : "memory");
710+
711+ return (ret ? 1 : 0);
712+}
713+#endif
714+
715+#ifdef __arm__
716+static inline int testandset (int *spinlock)
717+{
718+ register unsigned int ret;
719+ __asm__ __volatile__("swp %0, %1, [%2]"
720+ : "=r"(ret)
721+ : "0"(1), "r"(spinlock));
722+
723+ return ret;
724+}
725+#endif
726+
727+#ifdef __mc68000
728+static inline int testandset (int *p)
729+{
730+ char ret;
731+ __asm__ __volatile__("tas %1; sne %0"
732+ : "=r" (ret)
733+ : "m" (p)
734+ : "cc","memory");
735+ return ret;
736+}
737+#endif
738+
739+#ifdef __ia64
740+#include <ia64intrin.h>
741+
742+static inline int testandset (int *p)
743+{
744+ return __sync_lock_test_and_set (p, 1);
745+}
746+#endif
747+
748+typedef int spinlock_t;
749+
750+#define SPIN_LOCK_UNLOCKED 0
751+
752+#if defined(CONFIG_USER_ONLY)
753+static inline void spin_lock(spinlock_t *lock)
754+{
755+ while (testandset(lock));
756+}
757+
758+static inline void spin_unlock(spinlock_t *lock)
759+{
760+ *lock = 0;
761+}
762+
763+static inline int spin_trylock(spinlock_t *lock)
764+{
765+ return !testandset(lock);
766+}
767+#else
768+static inline void spin_lock(spinlock_t *lock)
769+{
770+}
771+
772+static inline void spin_unlock(spinlock_t *lock)
773+{
774+}
775+
776+static inline int spin_trylock(spinlock_t *lock)
777+{
778+ return 1;
779+}
780+#endif
781+
782+#endif
783Index: qemu/target-arm/cpu.h
784===================================================================
785--- qemu.orig/target-arm/cpu.h 2007-06-29 10:47:39.000000000 +0000
786+++ qemu/target-arm/cpu.h 2007-06-29 10:47:58.000000000 +0000
787@@ -37,6 +37,7 @@
788 #define EXCP_IRQ 5
789 #define EXCP_FIQ 6
790 #define EXCP_BKPT 7
791+#define EXCP_KERNEL_TRAP 8 /* Jumped to kernel code page. */
792
793 typedef void ARMWriteCPFunc(void *opaque, int cp_info,
794 int srcreg, int operand, uint32_t value);
795@@ -97,6 +98,7 @@
796 uint32_t c9_data;
797 uint32_t c13_fcse; /* FCSE PID. */
798 uint32_t c13_context; /* Context ID. */
799+ uint32_t c13_tls; /* Context ID. */
800 uint32_t c15_cpar; /* XScale Coprocessor Access Register */
801 } cp15;
802
803@@ -169,6 +171,15 @@
804 int cpu_arm_signal_handler(int host_signum, void *pinfo,
805 void *puc);
806
807+void cpu_lock(void);
808+void cpu_unlock(void);
809+#if defined(USE_NPTL)
810+static inline void cpu_set_tls(CPUARMState *env, void *newtls)
811+{
812+ env->cp15.c13_tls = (uint32_t)(long)newtls;
813+}
814+#endif
815+
816 #define CPSR_M (0x1f)
817 #define CPSR_T (1 << 5)
818 #define CPSR_F (1 << 6)
819@@ -180,7 +191,11 @@
820 #define CPSR_J (1 << 24)
821 #define CPSR_IT_0_1 (3 << 25)
822 #define CPSR_Q (1 << 27)
823-#define CPSR_NZCV (0xf << 28)
824+#define CPSR_V (1 << 28)
825+#define CPSR_C (1 << 29)
826+#define CPSR_Z (1 << 30)
827+#define CPSR_N (1 << 31)
828+#define CPSR_NZCV (CPSR_N | CPSR_Z | CPSR_C | CPSR_V)
829
830 #define CACHED_CPSR_BITS (CPSR_T | CPSR_Q | CPSR_NZCV)
831 /* Return the current CPSR value. */
832Index: qemu/target-arm/exec.h
833===================================================================
834--- qemu.orig/target-arm/exec.h 2007-06-29 10:47:39.000000000 +0000
835+++ qemu/target-arm/exec.h 2007-06-29 10:47:58.000000000 +0000
836@@ -68,8 +68,6 @@
837
838 /* In op_helper.c */
839
840-void cpu_lock(void);
841-void cpu_unlock(void);
842 void helper_set_cp(CPUState *, uint32_t, uint32_t);
843 uint32_t helper_get_cp(CPUState *, uint32_t);
844 void helper_set_cp15(CPUState *, uint32_t, uint32_t);
845Index: qemu/target-arm/op.c
846===================================================================
847--- qemu.orig/target-arm/op.c 2007-06-29 10:47:39.000000000 +0000
848+++ qemu/target-arm/op.c 2007-06-29 10:47:58.000000000 +0000
849@@ -891,6 +891,12 @@
850 cpu_loop_exit();
851 }
852
853+void OPPROTO op_kernel_trap(void)
854+{
855+ env->exception_index = EXCP_KERNEL_TRAP;
856+ cpu_loop_exit();
857+}
858+
859 /* VFP support. We follow the convention used for VFP instrunctions:
860 Single precition routines have a "s" suffix, double precision a
861 "d" suffix. */
862Index: qemu/target-arm/op_mem.h
863===================================================================
864--- qemu.orig/target-arm/op_mem.h 2007-06-29 10:47:39.000000000 +0000
865+++ qemu/target-arm/op_mem.h 2007-06-29 10:47:58.000000000 +0000
866@@ -1,5 +1,6 @@
867 /* ARM memory operations. */
868
869+void helper_ld(uint32_t);
870 /* Load from address T1 into T0. */
871 #define MEM_LD_OP(name) \
872 void OPPROTO glue(op_ld##name,MEMSUFFIX)(void) \
873Index: qemu/target-arm/translate.c
874===================================================================
875--- qemu.orig/target-arm/translate.c 2007-06-29 10:47:39.000000000 +0000
876+++ qemu/target-arm/translate.c 2007-06-29 10:47:58.000000000 +0000
877@@ -3548,6 +3548,15 @@
878 nb_gen_labels = 0;
879 lj = -1;
880 do {
881+#ifdef CONFIG_USER_ONLY
882+ /* Intercept jump to the magic kernel page. */
883+ if (dc->pc > 0xffff0000) {
884+ gen_op_kernel_trap();
885+ dc->is_jmp = DISAS_UPDATE;
886+ break;
887+ }
888+#endif
889+
890 if (env->nb_breakpoints > 0) {
891 for(j = 0; j < env->nb_breakpoints; j++) {
892 if (env->breakpoints[j] == dc->pc) {