summaryrefslogtreecommitdiffstats
path: root/meta/packages/qemu/files/94-oh-arm-nptl.patch
diff options
context:
space:
mode:
Diffstat (limited to 'meta/packages/qemu/files/94-oh-arm-nptl.patch')
-rw-r--r--meta/packages/qemu/files/94-oh-arm-nptl.patch900
1 files changed, 900 insertions, 0 deletions
diff --git a/meta/packages/qemu/files/94-oh-arm-nptl.patch b/meta/packages/qemu/files/94-oh-arm-nptl.patch
new file mode 100644
index 0000000000..37d7171983
--- /dev/null
+++ b/meta/packages/qemu/files/94-oh-arm-nptl.patch
@@ -0,0 +1,900 @@
1---
2 configure | 29 ++++++
3 exec-all.h | 165 --------------------------------------
4 linux-user/arm/syscall.h | 4
5 linux-user/main.c | 94 ++++++++++++++++++---
6 linux-user/qemu.h | 3
7 linux-user/syscall.c | 90 ++++++++++++++++++--
8 qemu_spinlock.h | 204 +++++++++++++++++++++++++++++++++++++++++++++++
9 target-arm/cpu.h | 19 ++++
10 target-arm/exec.h | 2
11 target-arm/op.c | 6 +
12 target-arm/translate.c | 10 ++
13 11 files changed, 437 insertions(+), 189 deletions(-)
14
15Index: qemu/configure
16===================================================================
17--- qemu.orig/configure 2007-06-13 11:51:56.000000000 +0100
18+++ qemu/configure 2007-06-13 11:51:57.000000000 +0100
19@@ -101,6 +101,7 @@ linux_user="no"
20 darwin_user="no"
21 build_docs="no"
22 uname_release=""
23+nptl="yes"
24
25 # OS specific
26 targetos=`uname -s`
27@@ -287,6 +288,8 @@ for opt do
28 *) echo "undefined SPARC architecture. Exiting";exit 1;;
29 esac
30 ;;
31+ --disable-nptl) nptl="no"
32+ ;;
33 esac
34 done
35
36@@ -530,6 +533,23 @@ int main(void) {
37 }
38 EOF
39
40+# check NPTL support
41+cat > $TMPC <<EOF
42+#include <sched.h>
43+void foo()
44+{
45+#ifndef CLONE_SETTLS
46+#error bork
47+#endif
48+}
49+EOF
50+
51+if $cc -c -o $TMPO $TMPC 2> /dev/null ; then
52+ :
53+else
54+ nptl="no"
55+fi
56+
57 ##########################################
58 # SDL probe
59
60@@ -681,6 +701,7 @@ if test -n "$sparc_cpu"; then
61 echo "Target Sparc Arch $sparc_cpu"
62 fi
63 echo "kqemu support $kqemu"
64+echo "NPTL support $nptl"
65 echo "Documentation $build_docs"
66 [ ! -z "$uname_release" ] && \
67 echo "uname -r $uname_release"
68@@ -1063,6 +1084,14 @@ if test "$target_user_only" = "no"; then
69 echo "SDL_CFLAGS=`$sdl_config --cflags`" >> $config_mak
70 fi
71 fi
72+else
73+ if test "$nptl" = "yes" ; then
74+ case "$target_cpu" in
75+ arm | armeb)
76+ echo "#define USE_NPTL 1" >> $config_h
77+ ;;
78+ esac
79+ fi
80 fi
81
82 if test "$cocoa" = "yes" ; then
83Index: qemu/exec-all.h
84===================================================================
85--- qemu.orig/exec-all.h 2007-06-13 11:48:22.000000000 +0100
86+++ qemu/exec-all.h 2007-06-13 11:51:57.000000000 +0100
87@@ -360,170 +360,7 @@ extern CPUWriteMemoryFunc *io_mem_write[
88 extern CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
89 extern void *io_mem_opaque[IO_MEM_NB_ENTRIES];
90
91-#if defined(__powerpc__)
92-static inline int testandset (int *p)
93-{
94- int ret;
95- __asm__ __volatile__ (
96- "0: lwarx %0,0,%1\n"
97- " xor. %0,%3,%0\n"
98- " bne 1f\n"
99- " stwcx. %2,0,%1\n"
100- " bne- 0b\n"
101- "1: "
102- : "=&r" (ret)
103- : "r" (p), "r" (1), "r" (0)
104- : "cr0", "memory");
105- return ret;
106-}
107-#elif defined(__i386__)
108-static inline int testandset (int *p)
109-{
110- long int readval = 0;
111-
112- __asm__ __volatile__ ("lock; cmpxchgl %2, %0"
113- : "+m" (*p), "+a" (readval)
114- : "r" (1)
115- : "cc");
116- return readval;
117-}
118-#elif defined(__x86_64__)
119-static inline int testandset (int *p)
120-{
121- long int readval = 0;
122-
123- __asm__ __volatile__ ("lock; cmpxchgl %2, %0"
124- : "+m" (*p), "+a" (readval)
125- : "r" (1)
126- : "cc");
127- return readval;
128-}
129-#elif defined(__s390__)
130-static inline int testandset (int *p)
131-{
132- int ret;
133-
134- __asm__ __volatile__ ("0: cs %0,%1,0(%2)\n"
135- " jl 0b"
136- : "=&d" (ret)
137- : "r" (1), "a" (p), "0" (*p)
138- : "cc", "memory" );
139- return ret;
140-}
141-#elif defined(__alpha__)
142-static inline int testandset (int *p)
143-{
144- int ret;
145- unsigned long one;
146-
147- __asm__ __volatile__ ("0: mov 1,%2\n"
148- " ldl_l %0,%1\n"
149- " stl_c %2,%1\n"
150- " beq %2,1f\n"
151- ".subsection 2\n"
152- "1: br 0b\n"
153- ".previous"
154- : "=r" (ret), "=m" (*p), "=r" (one)
155- : "m" (*p));
156- return ret;
157-}
158-#elif defined(__sparc__)
159-static inline int testandset (int *p)
160-{
161- int ret;
162-
163- __asm__ __volatile__("ldstub [%1], %0"
164- : "=r" (ret)
165- : "r" (p)
166- : "memory");
167-
168- return (ret ? 1 : 0);
169-}
170-#elif defined(__arm__)
171-static inline int testandset (int *spinlock)
172-{
173- register unsigned int ret;
174- __asm__ __volatile__("swp %0, %1, [%2]"
175- : "=r"(ret)
176- : "0"(1), "r"(spinlock));
177-
178- return ret;
179-}
180-#elif defined(__mc68000)
181-static inline int testandset (int *p)
182-{
183- char ret;
184- __asm__ __volatile__("tas %1; sne %0"
185- : "=r" (ret)
186- : "m" (p)
187- : "cc","memory");
188- return ret;
189-}
190-#elif defined(__ia64)
191-
192-#include <ia64intrin.h>
193-
194-static inline int testandset (int *p)
195-{
196- return __sync_lock_test_and_set (p, 1);
197-}
198-#elif defined(__mips__)
199-static inline int testandset (int *p)
200-{
201- int ret;
202-
203- __asm__ __volatile__ (
204- " .set push \n"
205- " .set noat \n"
206- " .set mips2 \n"
207- "1: li $1, 1 \n"
208- " ll %0, %1 \n"
209- " sc $1, %1 \n"
210- " beqz $1, 1b \n"
211- " .set pop "
212- : "=r" (ret), "+R" (*p)
213- :
214- : "memory");
215-
216- return ret;
217-}
218-#else
219-#error unimplemented CPU support
220-#endif
221-
222-typedef int spinlock_t;
223-
224-#define SPIN_LOCK_UNLOCKED 0
225-
226-#if defined(CONFIG_USER_ONLY)
227-static inline void spin_lock(spinlock_t *lock)
228-{
229- while (testandset(lock));
230-}
231-
232-static inline void spin_unlock(spinlock_t *lock)
233-{
234- *lock = 0;
235-}
236-
237-static inline int spin_trylock(spinlock_t *lock)
238-{
239- return !testandset(lock);
240-}
241-#else
242-static inline void spin_lock(spinlock_t *lock)
243-{
244-}
245-
246-static inline void spin_unlock(spinlock_t *lock)
247-{
248-}
249-
250-static inline int spin_trylock(spinlock_t *lock)
251-{
252- return 1;
253-}
254-#endif
255+#include "qemu_spinlock.h"
256
257 extern spinlock_t tb_lock;
258
259Index: qemu/linux-user/arm/syscall.h
260===================================================================
261--- qemu.orig/linux-user/arm/syscall.h 2007-06-13 11:48:22.000000000 +0100
262+++ qemu/linux-user/arm/syscall.h 2007-06-13 11:51:57.000000000 +0100
263@@ -28,7 +28,9 @@ struct target_pt_regs {
264 #define ARM_SYSCALL_BASE 0x900000
265 #define ARM_THUMB_SYSCALL 0
266
267-#define ARM_NR_cacheflush (ARM_SYSCALL_BASE + 0xf0000 + 2)
268+#define ARM_NR_BASE 0xf0000
269+#define ARM_NR_cacheflush (ARM_NR_BASE + 2)
270+#define ARM_NR_set_tls (ARM_NR_BASE + 5)
271
272 #define ARM_NR_semihosting 0x123456
273 #define ARM_NR_thumb_semihosting 0xAB
274Index: qemu/linux-user/main.c
275===================================================================
276--- qemu.orig/linux-user/main.c 2007-06-13 11:51:55.000000000 +0100
277+++ qemu/linux-user/main.c 2007-06-13 11:51:57.000000000 +0100
278@@ -325,6 +325,50 @@ static void arm_cache_flush(target_ulong
279 }
280 }
281
282+/* Handle a jump to the kernel code page. */
283+static int
284+do_kernel_trap(CPUARMState *env)
285+{
286+ uint32_t addr;
287+ uint32_t *ptr;
288+ uint32_t cpsr;
289+
290+ switch (env->regs[15]) {
291+ case 0xffff0fc0: /* __kernel_cmpxchg */
292+ /* XXX: This only works between threads, not between processes.
293+ Use native atomic operations. */
294+ /* ??? This probably breaks horribly if the access segfaults. */
295+ cpu_lock();
296+ ptr = (uint32_t *)env->regs[2];
297+ cpsr = cpsr_read(env);
298+ if (*ptr == env->regs[0]) {
299+ *ptr = env->regs[1];
300+ env->regs[0] = 0;
301+ cpsr |= CPSR_C;
302+ } else {
303+ env->regs[0] = -1;
304+ cpsr &= ~CPSR_C;
305+ }
306+ cpsr_write(env, cpsr, CPSR_C);
307+ cpu_unlock();
308+ break;
309+ case 0xffff0fe0: /* __kernel_get_tls */
310+ env->regs[0] = env->cp15.c13_tls;
311+ break;
312+ default:
313+ return 1;
314+ }
315+ /* Jump back to the caller. */
316+ addr = env->regs[14];
317+ if (addr & 1) {
318+ env->thumb = 1;
319+ addr &= ~1;
320+ }
321+ env->regs[15] = addr;
322+
323+ return 0;
324+}
325+
326 void cpu_loop(CPUARMState *env)
327 {
328 int trapnr;
329@@ -430,10 +474,8 @@ void cpu_loop(CPUARMState *env)
330 }
331 }
332
333- if (n == ARM_NR_cacheflush) {
334- arm_cache_flush(env->regs[0], env->regs[1]);
335- } else if (n == ARM_NR_semihosting
336- || n == ARM_NR_thumb_semihosting) {
337+ if (n == ARM_NR_semihosting
338+ || n == ARM_NR_thumb_semihosting) {
339 env->regs[0] = do_arm_semihosting (env);
340 } else if (n == 0 || n >= ARM_SYSCALL_BASE
341 || (env->thumb && n == ARM_THUMB_SYSCALL)) {
342@@ -444,14 +486,34 @@ void cpu_loop(CPUARMState *env)
343 n -= ARM_SYSCALL_BASE;
344 env->eabi = 0;
345 }
346- env->regs[0] = do_syscall(env,
347- n,
348- env->regs[0],
349- env->regs[1],
350- env->regs[2],
351- env->regs[3],
352- env->regs[4],
353- env->regs[5]);
354+ if ( n > ARM_NR_BASE) {
355+ switch (n)
356+ {
357+ case ARM_NR_cacheflush:
358+ arm_cache_flush(env->regs[0], env->regs[1]);
359+ break;
360+#ifdef USE_NPTL
361+ case ARM_NR_set_tls:
362+ cpu_set_tls(env, env->regs[0]);
363+ env->regs[0] = 0;
364+ break;
365+#endif
366+ default:
367+ printf ("Error: Bad syscall: %x\n", n);
368+ goto error;
369+ }
370+ }
371+ else
372+ {
373+ env->regs[0] = do_syscall(env,
374+ n,
375+ env->regs[0],
376+ env->regs[1],
377+ env->regs[2],
378+ env->regs[3],
379+ env->regs[4],
380+ env->regs[5]);
381+ }
382 } else {
383 goto error;
384 }
385@@ -490,6 +552,10 @@ void cpu_loop(CPUARMState *env)
386 }
387 }
388 break;
389+ case EXCP_KERNEL_TRAP:
390+ if (do_kernel_trap(env))
391+ goto error;
392+ break;
393 default:
394 error:
395 fprintf(stderr, "qemu: unhandled CPU exception 0x%x - aborting\n",
396@@ -2096,6 +2162,10 @@ int main(int argc, char **argv)
397 ts->heap_base = info->brk;
398 /* This will be filled in on the first SYS_HEAPINFO call. */
399 ts->heap_limit = 0;
400+ /* Register the magic kernel code page. The cpu will generate a
401+ special exception when it tries to execute code here. We can't
402+ put real code here because it may be in use by the host kernel. */
403+ page_set_flags(0xffff0000, 0xffff0fff, 0);
404 #endif
405
406 if (gdbstub_port) {
407Index: qemu/linux-user/qemu.h
408===================================================================
409--- qemu.orig/linux-user/qemu.h 2007-06-13 11:48:22.000000000 +0100
410+++ qemu/linux-user/qemu.h 2007-06-13 11:51:57.000000000 +0100
411@@ -81,6 +81,9 @@ typedef struct TaskState {
412 uint32_t heap_limit;
413 #endif
414 int used; /* non zero if used */
415+#ifdef USE_NPTL
416+ uint32_t *child_tidptr;
417+#endif
418 struct image_info *info;
419 uint8_t stack[0];
420 } __attribute__((aligned(16))) TaskState;
421Index: qemu/linux-user/syscall.c
422===================================================================
423--- qemu.orig/linux-user/syscall.c 2007-06-13 11:51:55.000000000 +0100
424+++ qemu/linux-user/syscall.c 2007-06-13 11:51:57.000000000 +0100
425@@ -71,9 +71,18 @@
426 #include <linux/kd.h>
427
428 #include "qemu.h"
429+#include "qemu_spinlock.h"
430
431 //#define DEBUG
432
433+#ifdef USE_NPTL
434+#define CLONE_NPTL_FLAGS2 (CLONE_SETTLS | \
435+ CLONE_PARENT_SETTID | CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID)
436+#else
437+/* XXX: Hardcode the above values. */
438+#define CLONE_NPTL_FLAGS2 0
439+#endif
440+
441 #if defined(TARGET_I386) || defined(TARGET_ARM) || defined(TARGET_SPARC) \
442 || defined(TARGET_M68K) || defined(TARGET_SH4)
443 /* 16 bit uid wrappers emulation */
444@@ -2121,20 +2130,38 @@ int do_modify_ldt(CPUX86State *env, int
445 thread/process */
446 #define NEW_STACK_SIZE 8192
447
448+#ifdef USE_NPTL
449+static spinlock_t nptl_lock = SPIN_LOCK_UNLOCKED;
450+#endif
451+
452 static int clone_func(void *arg)
453 {
454 CPUState *env = arg;
455+#ifdef HAVE_NPTL
456+ /* Wait until the parent has finshed initializing the tls state. */
457+ while (!spin_trylock(&nptl_lock))
458+ usleep(1);
459+ spin_unlock(&nptl_lock);
460+#endif
461 cpu_loop(env);
462 /* never exits */
463 return 0;
464 }
465
466-int do_fork(CPUState *env, unsigned int flags, unsigned long newsp)
467+int do_fork(CPUState *env, unsigned int flags, unsigned long newsp,
468+ uint32_t *parent_tidptr, void *newtls,
469+ uint32_t *child_tidptr)
470 {
471 int ret;
472 TaskState *ts;
473 uint8_t *new_stack;
474 CPUState *new_env;
475+#ifdef USE_NPTL
476+ unsigned int nptl_flags;
477+
478+ if (flags & CLONE_PARENT_SETTID)
479+ *parent_tidptr = gettid();
480+#endif
481
482 if (flags & CLONE_VM) {
483 ts = malloc(sizeof(TaskState) + NEW_STACK_SIZE);
484@@ -2197,16 +2224,60 @@ int do_fork(CPUState *env, unsigned int
485 #error unsupported target CPU
486 #endif
487 new_env->opaque = ts;
488+#ifdef USE_NPTL
489+ nptl_flags = flags;
490+ flags &= ~CLONE_NPTL_FLAGS2;
491+ if (nptl_flags & CLONE_CHILD_CLEARTID) {
492+ ts->child_tidptr = child_tidptr;
493+ }
494+ if (nptl_flags & CLONE_SETTLS)
495+ cpu_set_tls (new_env, newtls);
496+ /* Grab the global cpu lock so that the thread setup appears
497+ atomic. */
498+ if (nptl_flags & CLONE_CHILD_SETTID)
499+ spin_lock(&nptl_lock);
500+#else
501+ if (flags & CLONE_NPTL_FLAGS2)
502+ return -EINVAL;
503+#endif
504+
505 #ifdef __ia64__
506 ret = __clone2(clone_func, new_stack + NEW_STACK_SIZE, flags, new_env);
507 #else
508 ret = clone(clone_func, new_stack + NEW_STACK_SIZE, flags, new_env);
509 #endif
510+#ifdef USE_NPTL
511+ if (ret != -1) {
512+ if (nptl_flags & CLONE_CHILD_SETTID)
513+ *child_tidptr = ret;
514+ }
515+ /* Allow the child to continue. */
516+ if (nptl_flags & CLONE_CHILD_SETTID)
517+ spin_unlock(&nptl_lock);
518+#endif
519 } else {
520- /* if no CLONE_VM, we consider it is a fork */
521- if ((flags & ~CSIGNAL) != 0)
522- return -EINVAL;
523- ret = fork();
524+ /* if no CLONE_VM, we consider it is a fork */
525+ if ((flags & ~(CSIGNAL | CLONE_NPTL_FLAGS2)) != 0)
526+ return -EINVAL;
527+ ret = fork();
528+#ifdef USE_NPTL
529+ /* There is a race condition here. The parent process could
530+ theoretically read the TID in the child process before the child
531+ tid is set. This would require using either ptrace
532+ (not implemented) or having *_tidptr to point at a shared memory
533+ mapping. We can't repeat the spinlock hack used above because
534+ the child process gets its own copy of the lock. */
535+ if (ret == 0) {
536+ /* Child Process. */
537+ if (flags & CLONE_CHILD_SETTID)
538+ *child_tidptr = gettid();
539+ ts = (TaskState *)env->opaque;
540+ if (flags & CLONE_CHILD_CLEARTID)
541+ ts->child_tidptr = child_tidptr;
542+ if (flags & CLONE_SETTLS)
543+ cpu_set_tls (env, newtls);
544+ }
545+#endif
546 }
547 return ret;
548 }
549@@ -2483,7 +2554,7 @@ long do_syscall(void *cpu_env, int num,
550 ret = do_brk(arg1);
551 break;
552 case TARGET_NR_fork:
553- ret = get_errno(do_fork(cpu_env, SIGCHLD, 0));
554+ ret = get_errno(do_fork(cpu_env, SIGCHLD, 0, NULL, NULL, NULL));
555 break;
556 #ifdef TARGET_NR_waitpid
557 case TARGET_NR_waitpid:
558@@ -3648,7 +3719,8 @@ long do_syscall(void *cpu_env, int num,
559 ret = get_errno(fsync(arg1));
560 break;
561 case TARGET_NR_clone:
562- ret = get_errno(do_fork(cpu_env, arg1, arg2));
563+ ret = get_errno(do_fork(cpu_env, arg1, arg2, (uint32_t *)arg3,
564+ (void *)arg4, (uint32_t *)arg5));
565 break;
566 #ifdef __NR_exit_group
567 /* new thread calls */
568@@ -4062,7 +4134,8 @@ long do_syscall(void *cpu_env, int num,
569 #endif
570 #ifdef TARGET_NR_vfork
571 case TARGET_NR_vfork:
572- ret = get_errno(do_fork(cpu_env, CLONE_VFORK | CLONE_VM | SIGCHLD, 0));
573+ ret = get_errno(do_fork(cpu_env, CLONE_VFORK | CLONE_VM | SIGCHLD, 0,
574+ NULL, NULL, NULL));
575 break;
576 #endif
577 #ifdef TARGET_NR_ugetrlimit
578@@ -4660,4 +4733,3 @@ long do_syscall(void *cpu_env, int num,
579 #endif
580 return ret;
581 }
582-
583Index: qemu/target-arm/cpu.h
584===================================================================
585--- qemu.orig/target-arm/cpu.h 2007-06-13 11:48:22.000000000 +0100
586+++ qemu/target-arm/cpu.h 2007-06-13 11:51:57.000000000 +0100
587@@ -37,6 +37,9 @@
588 #define EXCP_IRQ 5
589 #define EXCP_FIQ 6
590 #define EXCP_BKPT 7
591+#define EXCP_KERNEL_TRAP 8 /* Jumped to kernel code page. */
592+
593+
594
595 typedef void ARMWriteCPFunc(void *opaque, int cp_info,
596 int srcreg, int operand, uint32_t value);
597@@ -97,6 +100,7 @@ typedef struct CPUARMState {
598 uint32_t c9_data;
599 uint32_t c13_fcse; /* FCSE PID. */
600 uint32_t c13_context; /* Context ID. */
601+ uint32_t c13_tls; /* Paul Brook told me to just add this ;) */
602 uint32_t c15_cpar; /* XScale Coprocessor Access Register */
603 } cp15;
604
605@@ -169,6 +173,15 @@ void switch_mode(CPUARMState *, int);
606 int cpu_arm_signal_handler(int host_signum, void *pinfo,
607 void *puc);
608
609+void cpu_lock(void);
610+void cpu_unlock(void);
611+#if defined(USE_NPTL)
612+static inline void cpu_set_tls(CPUARMState *env, void *newtls)
613+{
614+ env->cp15.c13_tls = (uint32_t)newtls;
615+}
616+#endif
617+
618 #define CPSR_M (0x1f)
619 #define CPSR_T (1 << 5)
620 #define CPSR_F (1 << 6)
621@@ -180,7 +193,11 @@ int cpu_arm_signal_handler(int host_sign
622 #define CPSR_J (1 << 24)
623 #define CPSR_IT_0_1 (3 << 25)
624 #define CPSR_Q (1 << 27)
625-#define CPSR_NZCV (0xf << 28)
626+#define CPSR_V (1 << 28)
627+#define CPSR_C (1 << 29)
628+#define CPSR_Z (1 << 30)
629+#define CPSR_N (1 << 31)
630+#define CPSR_NZCV (CPSR_N | CPSR_Z | CPSR_C | CPSR_V)
631
632 #define CACHED_CPSR_BITS (CPSR_T | CPSR_Q | CPSR_NZCV)
633 /* Return the current CPSR value. */
634Index: qemu/target-arm/exec.h
635===================================================================
636--- qemu.orig/target-arm/exec.h 2007-06-13 11:48:22.000000000 +0100
637+++ qemu/target-arm/exec.h 2007-06-13 11:51:57.000000000 +0100
638@@ -68,8 +68,6 @@ static inline int cpu_halted(CPUState *e
639
640 /* In op_helper.c */
641
642-void cpu_lock(void);
643-void cpu_unlock(void);
644 void helper_set_cp(CPUState *, uint32_t, uint32_t);
645 uint32_t helper_get_cp(CPUState *, uint32_t);
646 void helper_set_cp15(CPUState *, uint32_t, uint32_t);
647Index: qemu/target-arm/op.c
648===================================================================
649--- qemu.orig/target-arm/op.c 2007-06-13 11:48:22.000000000 +0100
650+++ qemu/target-arm/op.c 2007-06-13 11:51:57.000000000 +0100
651@@ -891,6 +891,12 @@ void OPPROTO op_bkpt(void)
652 cpu_loop_exit();
653 }
654
655+void OPPROTO op_kernel_trap(void)
656+{
657+ env->exception_index = EXCP_KERNEL_TRAP;
658+ cpu_loop_exit();
659+}
660+
661 /* VFP support. We follow the convention used for VFP instrunctions:
662 Single precition routines have a "s" suffix, double precision a
663 "d" suffix. */
664Index: qemu/target-arm/translate.c
665===================================================================
666--- qemu.orig/target-arm/translate.c 2007-06-13 11:48:22.000000000 +0100
667+++ qemu/target-arm/translate.c 2007-06-13 11:51:57.000000000 +0100
668@@ -3513,6 +3513,7 @@ undef:
669 s->is_jmp = DISAS_JUMP;
670 }
671
672+
673 /* generate intermediate code in gen_opc_buf and gen_opparam_buf for
674 basic block 'tb'. If search_pc is TRUE, also generate PC
675 information for each intermediate instruction. */
676@@ -3548,6 +3549,15 @@ static inline int gen_intermediate_code_
677 nb_gen_labels = 0;
678 lj = -1;
679 do {
680+#ifdef CONFIG_USER_ONLY
681+ /* Intercept jump to the magic kernel page. */
682+ if (dc->pc > 0xffff0000) {
683+ gen_op_kernel_trap();
684+ dc->is_jmp = DISAS_UPDATE;
685+ break;
686+ }
687+#endif
688+
689 if (env->nb_breakpoints > 0) {
690 for(j = 0; j < env->nb_breakpoints; j++) {
691 if (env->breakpoints[j] == dc->pc) {
692Index: qemu/qemu_spinlock.h
693===================================================================
694--- /dev/null 1970-01-01 00:00:00.000000000 +0000
695+++ qemu/qemu_spinlock.h 2007-06-13 11:51:57.000000000 +0100
696@@ -0,0 +1,204 @@
697+/*
698+ * internal execution defines for qemu
699+ *
700+ * Copyright (c) 2003 Fabrice Bellard
701+ *
702+ * This library is free software; you can redistribute it and/or
703+ * modify it under the terms of the GNU Lesser General Public
704+ * License as published by the Free Software Foundation; either
705+ * version 2 of the License, or (at your option) any later version.
706+ *
707+ * This library is distributed in the hope that it will be useful,
708+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
709+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
710+ * Lesser General Public License for more details.
711+ *
712+ * You should have received a copy of the GNU Lesser General Public
713+ * License along with this library; if not, write to the Free Software
714+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
715+ */
716+
717+#ifndef _QEMU_SPINLOCK_H
718+#define _QEMU_SPINLOCK_H
719+
720+#ifdef __powerpc__
721+static inline int testandset (int *p)
722+{
723+ int ret;
724+ __asm__ __volatile__ (
725+ "0: lwarx %0,0,%1\n"
726+ " xor. %0,%3,%0\n"
727+ " bne 1f\n"
728+ " stwcx. %2,0,%1\n"
729+ " bne- 0b\n"
730+ "1: "
731+ : "=&r" (ret)
732+ : "r" (p), "r" (1), "r" (0)
733+ : "cr0", "memory");
734+ return ret;
735+}
736+#endif
737+
738+#ifdef __i386__
739+static inline int testandset (int *p)
740+{
741+ long int readval = 0;
742+
743+ __asm__ __volatile__ ("lock; cmpxchgl %2, %0"
744+ : "+m" (*p), "+a" (readval)
745+ : "r" (1)
746+ : "cc");
747+ return readval;
748+}
749+#endif
750+
751+#ifdef __x86_64__
752+static inline int testandset (int *p)
753+{
754+ long int readval = 0;
755+
756+ __asm__ __volatile__ ("lock; cmpxchgl %2, %0"
757+ : "+m" (*p), "+a" (readval)
758+ : "r" (1)
759+ : "cc");
760+ return readval;
761+}
762+#endif
763+
764+#ifdef __s390__
765+static inline int testandset (int *p)
766+{
767+ int ret;
768+
769+ __asm__ __volatile__ ("0: cs %0,%1,0(%2)\n"
770+ " jl 0b"
771+ : "=&d" (ret)
772+ : "r" (1), "a" (p), "0" (*p)
773+ : "cc", "memory" );
774+ return ret;
775+}
776+#endif
777+
778+#ifdef __alpha__
779+static inline int testandset (int *p)
780+{
781+ int ret;
782+ unsigned long one;
783+
784+ __asm__ __volatile__ ("0: mov 1,%2\n"
785+ " ldl_l %0,%1\n"
786+ " stl_c %2,%1\n"
787+ " beq %2,1f\n"
788+ ".subsection 2\n"
789+ "1: br 0b\n"
790+ ".previous"
791+ : "=r" (ret), "=m" (*p), "=r" (one)
792+ : "m" (*p));
793+ return ret;
794+}
795+#endif
796+
797+#ifdef __sparc__
798+static inline int testandset (int *p)
799+{
800+ int ret;
801+
802+ __asm__ __volatile__("ldstub [%1], %0"
803+ : "=r" (ret)
804+ : "r" (p)
805+ : "memory");
806+
807+ return (ret ? 1 : 0);
808+}
809+#endif
810+
811+#ifdef __arm__
812+static inline int testandset (int *spinlock)
813+{
814+ register unsigned int ret;
815+ __asm__ __volatile__("swp %0, %1, [%2]"
816+ : "=r"(ret)
817+ : "0"(1), "r"(spinlock));
818+
819+ return ret;
820+}
821+#endif
822+
823+#ifdef __mc68000
824+static inline int testandset (int *p)
825+{
826+ char ret;
827+ __asm__ __volatile__("tas %1; sne %0"
828+ : "=r" (ret)
829+ : "m" (p)
830+ : "cc","memory");
831+ return ret;
832+}
833+#endif
834+
835+#ifdef __ia64
836+#include <ia64intrin.h>
837+
838+static inline int testandset (int *p)
839+{
840+ return __sync_lock_test_and_set (p, 1);
841+}
842+#endif
843+
844+#ifdef __mips__
845+static inline int testandset (int *p)
846+{
847+ int ret;
848+
849+ __asm__ __volatile__ (
850+ " .set push \n"
851+ " .set noat \n"
852+ " .set mips2 \n"
853+ "1: li $1, 1 \n"
854+ " ll %0, %1 \n"
855+ " sc $1, %1 \n"
856+ " beqz $1, 1b \n"
857+ " .set pop "
858+ : "=r" (ret), "+R" (*p)
859+ :
860+ : "memory");
861+
862+ return ret;
863+}
864+#endif
865+
866+typedef int spinlock_t;
867+
868+#define SPIN_LOCK_UNLOCKED 0
869+
870+#if defined(CONFIG_USER_ONLY)
871+static inline void spin_lock(spinlock_t *lock)
872+{
873+ while (testandset(lock));
874+}
875+
876+static inline void spin_unlock(spinlock_t *lock)
877+{
878+ *lock = 0;
879+}
880+
881+static inline int spin_trylock(spinlock_t *lock)
882+{
883+ return !testandset(lock);
884+}
885+#else
886+static inline void spin_lock(spinlock_t *lock)
887+{
888+}
889+
890+static inline void spin_unlock(spinlock_t *lock)
891+{
892+}
893+
894+static inline int spin_trylock(spinlock_t *lock)
895+{
896+ return 1;
897+}
898+#endif
899+
900+#endif /* ! _QEMU_SPINLOCK_H */