summaryrefslogtreecommitdiffstats
path: root/meta/packages/qemu/qemu-0.9.1/qemu-0.9.0-nptl.patch
diff options
context:
space:
mode:
Diffstat (limited to 'meta/packages/qemu/qemu-0.9.1/qemu-0.9.0-nptl.patch')
-rw-r--r--meta/packages/qemu/qemu-0.9.1/qemu-0.9.0-nptl.patch929
1 files changed, 0 insertions, 929 deletions
diff --git a/meta/packages/qemu/qemu-0.9.1/qemu-0.9.0-nptl.patch b/meta/packages/qemu/qemu-0.9.1/qemu-0.9.0-nptl.patch
deleted file mode 100644
index 10e3cc04a5..0000000000
--- a/meta/packages/qemu/qemu-0.9.1/qemu-0.9.0-nptl.patch
+++ /dev/null
@@ -1,929 +0,0 @@
1These are Paul Brook's patches to QEMU-0.8.2 to enable the running of single
2ARM binaries under QEMU's user-emulation mode. Without them, QEMU-0.8.1
3immediately dies saying:
4 Error: f0005
5 qemu: uncaught target signal 6 (Aborted) - exiting
6while qemu-0.8.2 dies saying:
7 qemu: Unsupported syscall: 983045
8 cannot set up thread-local storage: unknown error
9
10This file is a rediffing of the patches visible at
11https://nowt.dyndns.org/patch.qemu_nptl on 27 Sept 2006
12which "patch" fails to apply automatically.
13See also http://lists.gnu.org/archive/html/qemu-devel/2006-09/msg00194.html
14
15 Martin Guy, 27 Sept 2006
16
17---
18 configure | 25 ++++++
19 exec-all.h | 165 ------------------------------------------
20 linux-user/arm/syscall.h | 4 -
21 linux-user/main.c | 94 +++++++++++++++++++++---
22 linux-user/qemu.h | 3
23 linux-user/syscall.c | 91 ++++++++++++++++++++++-
24 qemu_spinlock.h | 181 +++++++++++++++++++++++++++++++++++++++++++++++
25 target-arm/cpu.h | 10 ++
26 target-arm/op.c | 6 +
27 target-arm/translate.c | 9 ++
28 10 files changed, 405 insertions(+), 183 deletions(-)
29
30--- qemu.orig/configure
31+++ qemu/configure
32@@ -103,10 +103,11 @@ check_gcc="yes"
33 softmmu="yes"
34 linux_user="no"
35 darwin_user="no"
36 build_docs="no"
37 uname_release=""
38+nptl="yes"
39
40 # OS specific
41 targetos=`uname -s`
42 case $targetos in
43 CYGWIN*)
44@@ -322,10 +323,12 @@ for opt do
45 ;;
46 --disable-werror) werror="no"
47 ;;
48 *) echo "ERROR: unknown option $opt"; show_help="yes"
49 ;;
50+ --disable-nptl) nptl="no"
51+ ;;
52 esac
53 done
54
55 if [ "$bsd" = "yes" -o "$darwin" = "yes" -o "$mingw32" = "yes" ] ; then
56 AIOLIBS=
57@@ -417,10 +420,11 @@ echo " --enable-system enable
58 echo " --disable-system disable all system emulation targets"
59 echo " --enable-linux-user enable all linux usermode emulation targets"
60 echo " --disable-linux-user disable all linux usermode emulation targets"
61 echo " --enable-darwin-user enable all darwin usermode emulation targets"
62 echo " --disable-darwin-user disable all darwin usermode emulation targets"
63+echo " --disable-nptl disable usermode NPTL guest support"
64 echo " --fmod-lib path to FMOD library"
65 echo " --fmod-inc path to FMOD includes"
66 echo " --enable-uname-release=R Return R for uname -r in usermode emulation"
67 echo " --sparc_cpu=V Build qemu for Sparc architecture v7, v8, v8plus, v8plusa, v9"
68 echo ""
69@@ -583,10 +587,27 @@ fi
70 cat > $TMPC <<EOF
71 int main(void) {
72 }
73 EOF
74
75+# check NPTL support
76+cat > $TMPC <<EOF
77+#include <sched.h>
78+void foo()
79+{
80+#ifndef CLONE_SETTLS
81+#error bork
82+#endif
83+}
84+EOF
85+
86+if $cc -c -o $TMPO $TMPC 2> /dev/null ; then
87+ :
88+else
89+ nptl="no"
90+fi
91+
92 ##########################################
93 # SDL probe
94
95 sdl_too_old=no
96
97@@ -747,10 +768,11 @@ if test -n "$sparc_cpu"; then
98 fi
99 echo "kqemu support $kqemu"
100 echo "Documentation $build_docs"
101 [ ! -z "$uname_release" ] && \
102 echo "uname -r $uname_release"
103+echo "NPTL support $nptl"
104
105 if test $sdl_too_old = "yes"; then
106 echo "-> Your SDL version is too old - please upgrade to have SDL support"
107 fi
108 if [ -s /tmp/qemu-$$-sdl-config.log ]; then
109@@ -1063,10 +1085,13 @@ if test "$target_cpu" = "i386" ; then
110 fi
111 elif test "$target_cpu" = "arm" -o "$target_cpu" = "armeb" ; then
112 echo "TARGET_ARCH=arm" >> $config_mak
113 echo "#define TARGET_ARCH \"arm\"" >> $config_h
114 echo "#define TARGET_ARM 1" >> $config_h
115+ if test "$nptl" = "yes" ; then
116+ echo "#define USE_NPTL 1" >> $config_h
117+ fi
118 bflt="yes"
119 elif test "$target_cpu" = "sparc" ; then
120 echo "TARGET_ARCH=sparc" >> $config_mak
121 echo "#define TARGET_ARCH \"sparc\"" >> $config_h
122 echo "#define TARGET_SPARC 1" >> $config_h
123--- qemu.orig/exec-all.h
124+++ qemu/exec-all.h
125@@ -338,174 +338,11 @@ dummy_label ## n: ;\
126
127 extern CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
128 extern CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
129 extern void *io_mem_opaque[IO_MEM_NB_ENTRIES];
130
131-#if defined(__powerpc__)
132-static inline int testandset (int *p)
133-{
134- int ret;
135- __asm__ __volatile__ (
136- "0: lwarx %0,0,%1\n"
137- " xor. %0,%3,%0\n"
138- " bne 1f\n"
139- " stwcx. %2,0,%1\n"
140- " bne- 0b\n"
141- "1: "
142- : "=&r" (ret)
143- : "r" (p), "r" (1), "r" (0)
144- : "cr0", "memory");
145- return ret;
146-}
147-#elif defined(__i386__)
148-static inline int testandset (int *p)
149-{
150- long int readval = 0;
151-
152- __asm__ __volatile__ ("lock; cmpxchgl %2, %0"
153- : "+m" (*p), "+a" (readval)
154- : "r" (1)
155- : "cc");
156- return readval;
157-}
158-#elif defined(__x86_64__)
159-static inline int testandset (int *p)
160-{
161- long int readval = 0;
162-
163- __asm__ __volatile__ ("lock; cmpxchgl %2, %0"
164- : "+m" (*p), "+a" (readval)
165- : "r" (1)
166- : "cc");
167- return readval;
168-}
169-#elif defined(__s390__)
170-static inline int testandset (int *p)
171-{
172- int ret;
173-
174- __asm__ __volatile__ ("0: cs %0,%1,0(%2)\n"
175- " jl 0b"
176- : "=&d" (ret)
177- : "r" (1), "a" (p), "0" (*p)
178- : "cc", "memory" );
179- return ret;
180-}
181-#elif defined(__alpha__)
182-static inline int testandset (int *p)
183-{
184- int ret;
185- unsigned long one;
186-
187- __asm__ __volatile__ ("0: mov 1,%2\n"
188- " ldl_l %0,%1\n"
189- " stl_c %2,%1\n"
190- " beq %2,1f\n"
191- ".subsection 2\n"
192- "1: br 0b\n"
193- ".previous"
194- : "=r" (ret), "=m" (*p), "=r" (one)
195- : "m" (*p));
196- return ret;
197-}
198-#elif defined(__sparc__)
199-static inline int testandset (int *p)
200-{
201- int ret;
202-
203- __asm__ __volatile__("ldstub [%1], %0"
204- : "=r" (ret)
205- : "r" (p)
206- : "memory");
207-
208- return (ret ? 1 : 0);
209-}
210-#elif defined(__arm__)
211-static inline int testandset (int *spinlock)
212-{
213- register unsigned int ret;
214- __asm__ __volatile__("swp %0, %1, [%2]"
215- : "=r"(ret)
216- : "0"(1), "r"(spinlock));
217-
218- return ret;
219-}
220-#elif defined(__mc68000)
221-static inline int testandset (int *p)
222-{
223- char ret;
224- __asm__ __volatile__("tas %1; sne %0"
225- : "=r" (ret)
226- : "m" (p)
227- : "cc","memory");
228- return ret;
229-}
230-#elif defined(__ia64)
231-
232-#include <ia64intrin.h>
233-
234-static inline int testandset (int *p)
235-{
236- return __sync_lock_test_and_set (p, 1);
237-}
238-#elif defined(__mips__)
239-static inline int testandset (int *p)
240-{
241- int ret;
242-
243- __asm__ __volatile__ (
244- " .set push \n"
245- " .set noat \n"
246- " .set mips2 \n"
247- "1: li $1, 1 \n"
248- " ll %0, %1 \n"
249- " sc $1, %1 \n"
250- " beqz $1, 1b \n"
251- " .set pop "
252- : "=r" (ret), "+R" (*p)
253- :
254- : "memory");
255-
256- return ret;
257-}
258-#else
259-#error unimplemented CPU support
260-#endif
261-
262-typedef int spinlock_t;
263-
264-#define SPIN_LOCK_UNLOCKED 0
265-
266-#if defined(CONFIG_USER_ONLY)
267-static inline void spin_lock(spinlock_t *lock)
268-{
269- while (testandset(lock));
270-}
271-
272-static inline void spin_unlock(spinlock_t *lock)
273-{
274- *lock = 0;
275-}
276-
277-static inline int spin_trylock(spinlock_t *lock)
278-{
279- return !testandset(lock);
280-}
281-#else
282-static inline void spin_lock(spinlock_t *lock)
283-{
284-}
285-
286-static inline void spin_unlock(spinlock_t *lock)
287-{
288-}
289-
290-static inline int spin_trylock(spinlock_t *lock)
291-{
292- return 1;
293-}
294-#endif
295+#include "qemu_spinlock.h"
296
297 extern spinlock_t tb_lock;
298
299 extern int tb_invalidated_flag;
300
301--- qemu.orig/linux-user/arm/syscall.h
302+++ qemu/linux-user/arm/syscall.h
303@@ -26,11 +26,13 @@ struct target_pt_regs {
304 #define ARM_ORIG_r0 uregs[17]
305
306 #define ARM_SYSCALL_BASE 0x900000
307 #define ARM_THUMB_SYSCALL 0
308
309-#define ARM_NR_cacheflush (ARM_SYSCALL_BASE + 0xf0000 + 2)
310+#define ARM_NR_BASE 0xf0000
311+#define ARM_NR_cacheflush (ARM_NR_BASE + 2)
312+#define ARM_NR_set_tls (ARM_NR_BASE + 5)
313
314 #define ARM_NR_semihosting 0x123456
315 #define ARM_NR_thumb_semihosting 0xAB
316
317 #if defined(TARGET_WORDS_BIGENDIAN)
318--- qemu.orig/linux-user/main.c
319+++ qemu/linux-user/main.c
320@@ -361,10 +361,54 @@ static void arm_cache_flush(abi_ulong st
321 break;
322 addr = last1 + 1;
323 }
324 }
325
326+/* Handle a jump to the kernel code page. */
327+static int
328+do_kernel_trap(CPUARMState *env)
329+{
330+ uint32_t addr;
331+ uint32_t *ptr;
332+ uint32_t cpsr;
333+
334+ switch (env->regs[15]) {
335+ case 0xffff0fc0: /* __kernel_cmpxchg */
336+ /* XXX: This only works between threads, not between processes.
337+ Use native atomic operations. */
338+ /* ??? This probably breaks horribly if the access segfaults. */
339+ cpu_lock();
340+ ptr = (uint32_t *)env->regs[2];
341+ cpsr = cpsr_read(env);
342+ if (*ptr == env->regs[0]) {
343+ *ptr = env->regs[1];
344+ env->regs[0] = 0;
345+ cpsr |= CPSR_C;
346+ } else {
347+ env->regs[0] = -1;
348+ cpsr &= ~CPSR_C;
349+ }
350+ cpsr_write(env, cpsr, CPSR_C);
351+ cpu_unlock();
352+ break;
353+ case 0xffff0fe0: /* __kernel_get_tls */
354+ env->regs[0] = env->cp15.c13_tls;
355+ break;
356+ default:
357+ return 1;
358+ }
359+ /* Jump back to the caller. */
360+ addr = env->regs[14];
361+ if (addr & 1) {
362+ env->thumb = 1;
363+ addr &= ~1;
364+ }
365+ env->regs[15] = addr;
366+
367+ return 0;
368+}
369+
370 void cpu_loop(CPUARMState *env)
371 {
372 int trapnr;
373 unsigned int n, insn;
374 target_siginfo_t info;
375@@ -471,32 +515,50 @@ void cpu_loop(CPUARMState *env)
376 get_user_u32(insn, env->regs[15] - 4);
377 n = insn & 0xffffff;
378 }
379 }
380
381- if (n == ARM_NR_cacheflush) {
382- arm_cache_flush(env->regs[0], env->regs[1]);
383- } else if (n == ARM_NR_semihosting
384- || n == ARM_NR_thumb_semihosting) {
385+ if (n == ARM_NR_semihosting
386+ || n == ARM_NR_thumb_semihosting) {
387 env->regs[0] = do_arm_semihosting (env);
388 } else if (n == 0 || n >= ARM_SYSCALL_BASE
389 || (env->thumb && n == ARM_THUMB_SYSCALL)) {
390 /* linux syscall */
391 if (env->thumb || n == 0) {
392 n = env->regs[7];
393 } else {
394 n -= ARM_SYSCALL_BASE;
395 env->eabi = 0;
396 }
397- env->regs[0] = do_syscall(env,
398- n,
399- env->regs[0],
400- env->regs[1],
401- env->regs[2],
402- env->regs[3],
403- env->regs[4],
404- env->regs[5]);
405+ if ( n > ARM_NR_BASE) {
406+ switch (n)
407+ {
408+ case ARM_NR_cacheflush:
409+ arm_cache_flush(env->regs[0], env->regs[1]);
410+ break;
411+#ifdef USE_NPTL
412+ case ARM_NR_set_tls:
413+ cpu_set_tls(env, env->regs[0]);
414+ env->regs[0] = 0;
415+ break;
416+#endif
417+ default:
418+ printf ("Error: Bad syscall: %x\n", n);
419+ goto error;
420+ }
421+ }
422+ else
423+ {
424+ env->regs[0] = do_syscall(env,
425+ n,
426+ env->regs[0],
427+ env->regs[1],
428+ env->regs[2],
429+ env->regs[3],
430+ env->regs[4],
431+ env->regs[5]);
432+ }
433 } else {
434 goto error;
435 }
436 }
437 break;
438@@ -531,10 +593,14 @@ void cpu_loop(CPUARMState *env)
439 info.si_code = TARGET_TRAP_BRKPT;
440 queue_signal(info.si_signo, &info);
441 }
442 }
443 break;
444+ case EXCP_KERNEL_TRAP:
445+ if (do_kernel_trap(env))
446+ goto error;
447+ break;
448 default:
449 error:
450 fprintf(stderr, "qemu: unhandled CPU exception 0x%x - aborting\n",
451 trapnr);
452 cpu_dump_state(env, stderr, fprintf, 0);
453@@ -2378,10 +2444,14 @@ int main(int argc, char **argv)
454 #if defined(TARGET_ARM) || defined(TARGET_M68K)
455 ts->stack_base = info->start_stack;
456 ts->heap_base = info->brk;
457 /* This will be filled in on the first SYS_HEAPINFO call. */
458 ts->heap_limit = 0;
459+ /* Register the magic kernel code page. The cpu will generate a
460+ special exception when it tries to execute code here. We can't
461+ put real code here because it may be in use by the host kernel. */
462+ page_set_flags(0xffff0000, 0xffff0fff, 0);
463 #endif
464
465 if (gdbstub_port) {
466 gdbserver_start (gdbstub_port);
467 gdb_handlesig(env, 0);
468--- qemu.orig/linux-user/qemu.h
469+++ qemu/linux-user/qemu.h
470@@ -100,10 +100,13 @@ typedef struct TaskState {
471 /* Extra fields for semihosted binaries. */
472 uint32_t stack_base;
473 uint32_t heap_base;
474 uint32_t heap_limit;
475 #endif
476+#ifdef USE_NPTL
477+ uint32_t *child_tidptr;
478+#endif
479 int used; /* non zero if used */
480 struct image_info *info;
481 uint8_t stack[0];
482 } __attribute__((aligned(16))) TaskState;
483
484--- qemu.orig/linux-user/syscall.c
485+++ qemu/linux-user/syscall.c
486@@ -69,13 +69,22 @@
487 #include <linux/soundcard.h>
488 #include <linux/dirent.h>
489 #include <linux/kd.h>
490
491 #include "qemu.h"
492+#include "qemu_spinlock.h"
493
494 //#define DEBUG
495
496+#ifdef USE_NPTL
497+#define CLONE_NPTL_FLAGS2 (CLONE_SETTLS | \
498+ CLONE_PARENT_SETTID | CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID)
499+#else
500+/* XXX: Hardcode the above values. */
501+#define CLONE_NPTL_FLAGS2 0
502+#endif
503+
504 #if defined(TARGET_I386) || defined(TARGET_ARM) || defined(TARGET_SPARC) \
505 || defined(TARGET_M68K) || defined(TARGET_SH4) || defined(TARGET_CRIS)
506 /* 16 bit uid wrappers emulation */
507 #define USE_UID16
508 #endif
509@@ -2690,27 +2699,46 @@ abi_long do_arch_prctl(CPUX86State *env,
510
511 /* this stack is the equivalent of the kernel stack associated with a
512 thread/process */
513 #define NEW_STACK_SIZE 8192
514
515+#ifdef USE_NPTL
516+static spinlock_t nptl_lock = SPIN_LOCK_UNLOCKED;
517+#endif
518+
519 static int clone_func(void *arg)
520 {
521 CPUState *env = arg;
522+#ifdef HAVE_NPTL
523+ /* Wait until the parent has finshed initializing the tls state. */
524+ while (!spin_trylock(&nptl_lock))
525+ usleep(1);
526+ spin_unlock(&nptl_lock);
527+#endif
528 cpu_loop(env);
529 /* never exits */
530 return 0;
531 }
532
533 /* do_fork() Must return host values and target errnos (unlike most
534 do_*() functions). */
535-int do_fork(CPUState *env, unsigned int flags, abi_ulong newsp)
536+int do_fork(CPUState *env, unsigned int flags, unsigned long newsp,
537+ uint32_t *parent_tidptr, void *newtls,
538+ uint32_t *child_tidptr)
539 {
540 int ret;
541 TaskState *ts;
542 uint8_t *new_stack;
543 CPUState *new_env;
544
545+#ifdef USE_NPTL
546+ unsigned int nptl_flags;
547+
548+ if (flags & CLONE_PARENT_SETTID)
549+ *parent_tidptr = gettid();
550+#endif
551+
552 if (flags & CLONE_VM) {
553 ts = malloc(sizeof(TaskState) + NEW_STACK_SIZE);
554 memset(ts, 0, sizeof(TaskState));
555 new_stack = ts->stack;
556 ts->used = 1;
557@@ -2772,20 +2800,71 @@ int do_fork(CPUState *env, unsigned int
558 new_env->regs[14] = newsp;
559 #else
560 #error unsupported target CPU
561 #endif
562 new_env->opaque = ts;
563+#ifdef USE_NPTL
564+ nptl_flags = flags;
565+ flags &= ~CLONE_NPTL_FLAGS2;
566+
567+ if (nptl_flags & CLONE_CHILD_CLEARTID) {
568+ ts->child_tidptr = child_tidptr;
569+ }
570+
571+ if (nptl_flags & CLONE_SETTLS)
572+ cpu_set_tls (new_env, newtls);
573+
574+ /* Grab the global cpu lock so that the thread setup appears
575+ atomic. */
576+ if (nptl_flags & CLONE_CHILD_SETTID)
577+ spin_lock(&nptl_lock);
578+
579+#else
580+ if (flags & CLONE_NPTL_FLAGS2)
581+ return -EINVAL;
582+#endif
583+
584+ if (CLONE_VFORK & flags)
585+ flags ^= CLONE_VM;
586 #ifdef __ia64__
587 ret = __clone2(clone_func, new_stack + NEW_STACK_SIZE, flags, new_env);
588 #else
589 ret = clone(clone_func, new_stack + NEW_STACK_SIZE, flags, new_env);
590 #endif
591+#ifdef USE_NPTL
592+ if (ret != -1) {
593+ if (nptl_flags & CLONE_CHILD_SETTID)
594+ *child_tidptr = ret;
595+ }
596+
597+ /* Allow the child to continue. */
598+ if (nptl_flags & CLONE_CHILD_SETTID)
599+ spin_unlock(&nptl_lock);
600+#endif
601 } else {
602 /* if no CLONE_VM, we consider it is a fork */
603- if ((flags & ~CSIGNAL) != 0)
604+ if ((flags & ~(CSIGNAL | CLONE_NPTL_FLAGS2)) != 0)
605 return -EINVAL;
606 ret = fork();
607+#ifdef USE_NPTL
608+ /* There is a race condition here. The parent process could
609+ theoretically read the TID in the child process before the child
610+ tid is set. This would require using either ptrace
611+ (not implemented) or having *_tidptr to point at a shared memory
612+ mapping. We can't repeat the spinlock hack used above because
613+ the child process gets its own copy of the lock. */
614+ if (ret == 0) {
615+ /* Child Process. */
616+ if (flags & CLONE_CHILD_SETTID)
617+ *child_tidptr = gettid();
618+ ts = (TaskState *)env->opaque;
619+ if (flags & CLONE_CHILD_CLEARTID)
620+ ts->child_tidptr = child_tidptr;
621+ if (flags & CLONE_SETTLS)
622+ cpu_set_tls (env, newtls);
623+ }
624+#endif
625 }
626 return ret;
627 }
628
629 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
630@@ -3106,11 +3185,11 @@ abi_long do_syscall(void *cpu_env, int n
631 break;
632 case TARGET_NR_brk:
633 ret = do_brk(arg1);
634 break;
635 case TARGET_NR_fork:
636- ret = get_errno(do_fork(cpu_env, SIGCHLD, 0));
637+ ret = get_errno(do_fork(cpu_env, SIGCHLD, 0, NULL, NULL, NULL));
638 break;
639 #ifdef TARGET_NR_waitpid
640 case TARGET_NR_waitpid:
641 {
642 int status;
643@@ -4463,11 +4542,12 @@ abi_long do_syscall(void *cpu_env, int n
644 #endif
645 case TARGET_NR_fsync:
646 ret = get_errno(fsync(arg1));
647 break;
648 case TARGET_NR_clone:
649- ret = get_errno(do_fork(cpu_env, arg1, arg2));
650+ ret = get_errno(do_fork(cpu_env, arg1, arg2, (uint32_t *)arg3,
651+ (void *)arg4, (uint32_t *)arg5));
652 break;
653 #ifdef __NR_exit_group
654 /* new thread calls */
655 case TARGET_NR_exit_group:
656 gdb_exit(cpu_env, arg1);
657@@ -4908,11 +4988,12 @@ abi_long do_syscall(void *cpu_env, int n
658 case TARGET_NR_putpmsg:
659 goto unimplemented;
660 #endif
661 #ifdef TARGET_NR_vfork
662 case TARGET_NR_vfork:
663- ret = get_errno(do_fork(cpu_env, CLONE_VFORK | CLONE_VM | SIGCHLD, 0));
664+ ret = get_errno(do_fork(cpu_env, CLONE_VFORK | CLONE_VM | SIGCHLD, 0,
665+ NULL, NULL, NULL));
666 break;
667 #endif
668 #ifdef TARGET_NR_ugetrlimit
669 case TARGET_NR_ugetrlimit:
670 {
671--- /dev/null
672+++ qemu/qemu_spinlock.h
673@@ -0,0 +1,181 @@
674+/*
675+ * Atomic operation helper include
676+ *
677+ * Copyright (c) 2005 Fabrice Bellard
678+ *
679+ * This library is free software; you can redistribute it and/or
680+ * modify it under the terms of the GNU Lesser General Public
681+ * License as published by the Free Software Foundation; either
682+ * version 2 of the License, or (at your option) any later version.
683+ *
684+ * This library is distributed in the hope that it will be useful,
685+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
686+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
687+ * Lesser General Public License for more details.
688+ *
689+ * You should have received a copy of the GNU Lesser General Public
690+ * License along with this library; if not, write to the Free Software
691+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
692+ */
693+#ifndef QEMU_SPINLOCK_H
694+#define QEMU_SPINLOCK_H
695+
696+#ifdef __powerpc__
697+static inline int testandset (int *p)
698+{
699+ int ret;
700+ __asm__ __volatile__ (
701+ "0: lwarx %0,0,%1\n"
702+ " xor. %0,%3,%0\n"
703+ " bne 1f\n"
704+ " stwcx. %2,0,%1\n"
705+ " bne- 0b\n"
706+ "1: "
707+ : "=&r" (ret)
708+ : "r" (p), "r" (1), "r" (0)
709+ : "cr0", "memory");
710+ return ret;
711+}
712+#endif
713+
714+#ifdef __i386__
715+static inline int testandset (int *p)
716+{
717+ long int readval = 0;
718+
719+ __asm__ __volatile__ ("lock; cmpxchgl %2, %0"
720+ : "+m" (*p), "+a" (readval)
721+ : "r" (1)
722+ : "cc");
723+ return readval;
724+}
725+#endif
726+
727+#ifdef __x86_64__
728+static inline int testandset (int *p)
729+{
730+ long int readval = 0;
731+
732+ __asm__ __volatile__ ("lock; cmpxchgl %2, %0"
733+ : "+m" (*p), "+a" (readval)
734+ : "r" (1)
735+ : "cc");
736+ return readval;
737+}
738+#endif
739+
740+#ifdef __s390__
741+static inline int testandset (int *p)
742+{
743+ int ret;
744+
745+ __asm__ __volatile__ ("0: cs %0,%1,0(%2)\n"
746+ " jl 0b"
747+ : "=&d" (ret)
748+ : "r" (1), "a" (p), "0" (*p)
749+ : "cc", "memory" );
750+ return ret;
751+}
752+#endif
753+
754+#ifdef __alpha__
755+static inline int testandset (int *p)
756+{
757+ int ret;
758+ unsigned long one;
759+
760+ __asm__ __volatile__ ("0: mov 1,%2\n"
761+ " ldl_l %0,%1\n"
762+ " stl_c %2,%1\n"
763+ " beq %2,1f\n"
764+ ".subsection 2\n"
765+ "1: br 0b\n"
766+ ".previous"
767+ : "=r" (ret), "=m" (*p), "=r" (one)
768+ : "m" (*p));
769+ return ret;
770+}
771+#endif
772+
773+#ifdef __sparc__
774+static inline int testandset (int *p)
775+{
776+ int ret;
777+
778+ __asm__ __volatile__("ldstub [%1], %0"
779+ : "=r" (ret)
780+ : "r" (p)
781+ : "memory");
782+
783+ return (ret ? 1 : 0);
784+}
785+#endif
786+
787+#ifdef __arm__
788+static inline int testandset (int *spinlock)
789+{
790+ register unsigned int ret;
791+ __asm__ __volatile__("swp %0, %1, [%2]"
792+ : "=r"(ret)
793+ : "0"(1), "r"(spinlock));
794+
795+ return ret;
796+}
797+#endif
798+
799+#ifdef __mc68000
800+static inline int testandset (int *p)
801+{
802+ char ret;
803+ __asm__ __volatile__("tas %1; sne %0"
804+ : "=r" (ret)
805+ : "m" (p)
806+ : "cc","memory");
807+ return ret;
808+}
809+#endif
810+
811+#ifdef __ia64
812+#include <ia64intrin.h>
813+
814+static inline int testandset (int *p)
815+{
816+ return __sync_lock_test_and_set (p, 1);
817+}
818+#endif
819+
820+typedef int spinlock_t;
821+
822+#define SPIN_LOCK_UNLOCKED 0
823+
824+#if defined(CONFIG_USER_ONLY)
825+static inline void spin_lock(spinlock_t *lock)
826+{
827+ while (testandset(lock));
828+}
829+
830+static inline void spin_unlock(spinlock_t *lock)
831+{
832+ *lock = 0;
833+}
834+
835+static inline int spin_trylock(spinlock_t *lock)
836+{
837+ return !testandset(lock);
838+}
839+#else
840+static inline void spin_lock(spinlock_t *lock)
841+{
842+}
843+
844+static inline void spin_unlock(spinlock_t *lock)
845+{
846+}
847+
848+static inline int spin_trylock(spinlock_t *lock)
849+{
850+ return 1;
851+}
852+#endif
853+
854+#endif
855--- qemu.orig/target-arm/cpu.h
856+++ qemu/target-arm/cpu.h
857@@ -36,10 +36,11 @@
858 #define EXCP_DATA_ABORT 4
859 #define EXCP_IRQ 5
860 #define EXCP_FIQ 6
861 #define EXCP_BKPT 7
862 #define EXCP_EXCEPTION_EXIT 8 /* Return from v7M exception. */
863+#define EXCP_KERNEL_TRAP 9 /* Jumped to kernel code page. */
864
865 #define ARMV7M_EXCP_RESET 1
866 #define ARMV7M_EXCP_NMI 2
867 #define ARMV7M_EXCP_HARD 3
868 #define ARMV7M_EXCP_MEM 4
869@@ -220,10 +221,19 @@ int cpu_arm_signal_handler(int host_sign
870 void *puc);
871
872 void cpu_lock(void);
873 void cpu_unlock(void);
874
875+void cpu_lock(void);
876+void cpu_unlock(void);
877+#if defined(USE_NPTL)
878+static inline void cpu_set_tls(CPUARMState *env, void *newtls)
879+{
880+ env->cp15.c13_tls2 = (uint32_t)(long)newtls;
881+}
882+#endif
883+
884 #define CPSR_M (0x1f)
885 #define CPSR_T (1 << 5)
886 #define CPSR_F (1 << 6)
887 #define CPSR_I (1 << 7)
888 #define CPSR_A (1 << 8)
889--- qemu.orig/target-arm/op.c
890+++ qemu/target-arm/op.c
891@@ -1007,10 +1007,16 @@ void OPPROTO op_exception_exit(void)
892 {
893 env->exception_index = EXCP_EXCEPTION_EXIT;
894 cpu_loop_exit();
895 }
896
897+void OPPROTO op_kernel_trap(void)
898+{
899+ env->exception_index = EXCP_KERNEL_TRAP;
900+ cpu_loop_exit();
901+}
902+
903 /* VFP support. We follow the convention used for VFP instrunctions:
904 Single precition routines have a "s" suffix, double precision a
905 "d" suffix. */
906
907 #define VFP_OP(name, p) void OPPROTO op_vfp_##name##p(void)
908--- qemu.orig/target-arm/translate.c
909+++ qemu/target-arm/translate.c
910@@ -7518,11 +7518,18 @@ static inline int gen_intermediate_code_
911 /* We always get here via a jump, so know we are not in a
912 conditional execution block. */
913 gen_op_exception_exit();
914 }
915 #endif
916-
917+#ifdef CONFIG_USER_ONLY
918+ /* Intercept jump to the magic kernel page. */
919+ if (dc->pc > 0xffff0000) {
920+ gen_op_kernel_trap();
921+ dc->is_jmp = DISAS_UPDATE;
922+ break;
923+ }
924+#endif
925 if (env->nb_breakpoints > 0) {
926 for(j = 0; j < env->nb_breakpoints; j++) {
927 if (env->breakpoints[j] == dc->pc) {
928 gen_set_condexec(dc);
929 gen_op_movl_T0_im((long)dc->pc);