summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorTudor Florea <tudor.florea@enea.com>2015-11-16 22:21:57 +0100
committerAdrian Dudau <adrian.dudau@enea.com>2015-11-19 13:28:36 +0100
commit3380e86a98999da9a3febc1b3d93494882162216 (patch)
treedd1489067a463dd46c3f8131714f9c37179a6436
parenta130fba56f34391c7e921b2e2fd2ba174002e6a5 (diff)
downloadmeta-enea-bsp-arm-3380e86a98999da9a3febc1b3d93494882162216.tar.gz
python: add support for aarch64 for ctypes module
Signed-off-by: Tudor Florea <tudor.florea@enea.com> Signed-off-by: Adrian Dudau <adrian.dudau@enea.com>
-rw-r--r--recipes-devtools/python/python/ctypes-libffi-aarch64.patch22
-rw-r--r--recipes-devtools/python/python/libffi-aarch64.patch1608
-rw-r--r--recipes-devtools/python/python_2.7.3.bbappend6
3 files changed, 1636 insertions, 0 deletions
diff --git a/recipes-devtools/python/python/ctypes-libffi-aarch64.patch b/recipes-devtools/python/python/ctypes-libffi-aarch64.patch
new file mode 100644
index 0000000..7349c7b
--- /dev/null
+++ b/recipes-devtools/python/python/ctypes-libffi-aarch64.patch
@@ -0,0 +1,22 @@
1Add missing fficonfig.py bits for aarch64
2
3# HG changeset patch
4# User Andreas Schwab <schwab@suse.de>
5# Date 1367276434 -7200
6# Node ID 05e8999a3901b4853e60d6701510e9b3dd54a7f3
7# Parent 84cef4f1999ad9e362694cdac2f65f0981e3d5d0
8
9Upstream-Status: Backport
10Signed-off-by: Tudor Florea <tudor.florea@enea.com>
11
12diff -r 84cef4f1999a -r 05e8999a3901 Modules/_ctypes/libffi/fficonfig.py.in
13--- a/Modules/_ctypes/libffi/fficonfig.py.in Mon Apr 29 16:09:39 2013 -0400
14+++ b/Modules/_ctypes/libffi/fficonfig.py.in Tue Apr 30 01:00:34 2013 +0200
15@@ -28,6 +28,7 @@
16 'PA': ['src/pa/linux.S', 'src/pa/ffi.c'],
17 'PA_LINUX': ['src/pa/linux.S', 'src/pa/ffi.c'],
18 'PA_HPUX': ['src/pa/hpux32.S', 'src/pa/ffi.c'],
19+ 'AARCH64' : ['src/aarch64/ffi.c', 'src/aarch64/sysv.S'],
20 }
21
22 ffi_sources += ffi_platforms['@TARGET@']
diff --git a/recipes-devtools/python/python/libffi-aarch64.patch b/recipes-devtools/python/python/libffi-aarch64.patch
new file mode 100644
index 0000000..5581922
--- /dev/null
+++ b/recipes-devtools/python/python/libffi-aarch64.patch
@@ -0,0 +1,1608 @@
1Add support for aarch64 for ctypes module
2
3Python have its own version of libffi used for ctypes module.
4libffi 3.0.10 contained in original source of Python-2.7.3 does not have
5support for aarch64 architecture.
6This is patch is backport support for aarch64 from libffi 3.1
7
8Upstream-Status: Backport
9Signed-off-by: Tudor Florea <tudor.florea@enea.com>
10
11diff -ruN Python-2.7.3.orig/Modules/_ctypes/libffi/configure.ac Python-2.7.3/Modules/_ctypes/libffi/configure.ac
12--- Python-2.7.3.orig/Modules/_ctypes/libffi/configure.ac 2015-02-27 23:15:16.118393178 +0100
13+++ Python-2.7.3/Modules/_ctypes/libffi/configure.ac 2015-02-27 23:51:03.351556903 +0100
14@@ -44,6 +44,10 @@
15
16 TARGETDIR="unknown"
17 case "$host" in
18+ aarch64*-*-*)
19+ TARGET=AARCH64; TARGETDIR=aarch64
20+ ;;
21+
22 alpha*-*-*)
23 TARGET=ALPHA; TARGETDIR=alpha;
24 # Support 128-bit long double, changeable via command-line switch.
25@@ -195,6 +199,7 @@
26 AM_CONDITIONAL(POWERPC_AIX, test x$TARGET = xPOWERPC_AIX)
27 AM_CONDITIONAL(POWERPC_DARWIN, test x$TARGET = xPOWERPC_DARWIN)
28 AM_CONDITIONAL(POWERPC_FREEBSD, test x$TARGET = xPOWERPC_FREEBSD)
29+AM_CONDITIONAL(AARCH64, test x$TARGET = xAARCH64)
30 AM_CONDITIONAL(ARM, test x$TARGET = xARM)
31 AM_CONDITIONAL(AVR32, test x$TARGET = xAVR32)
32 AM_CONDITIONAL(LIBFFI_CRIS, test x$TARGET = xLIBFFI_CRIS)
33diff -ruN Python-2.7.3.orig/Modules/_ctypes/libffi/src/aarch64/ffi.c Python-2.7.3/Modules/_ctypes/libffi/src/aarch64/ffi.c
34--- Python-2.7.3.orig/Modules/_ctypes/libffi/src/aarch64/ffi.c 1970-01-01 01:00:00.000000000 +0100
35+++ Python-2.7.3/Modules/_ctypes/libffi/src/aarch64/ffi.c 2014-04-25 19:45:13.000000000 +0200
36@@ -0,0 +1,1168 @@
37+/* Copyright (c) 2009, 2010, 2011, 2012 ARM Ltd.
38+
39+Permission is hereby granted, free of charge, to any person obtaining
40+a copy of this software and associated documentation files (the
41+``Software''), to deal in the Software without restriction, including
42+without limitation the rights to use, copy, modify, merge, publish,
43+distribute, sublicense, and/or sell copies of the Software, and to
44+permit persons to whom the Software is furnished to do so, subject to
45+the following conditions:
46+
47+The above copyright notice and this permission notice shall be
48+included in all copies or substantial portions of the Software.
49+
50+THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND,
51+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
52+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
53+IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
54+CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
55+TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
56+SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
57+
58+#include <stdio.h>
59+
60+#include <ffi.h>
61+#include <ffi_common.h>
62+
63+#include <stdlib.h>
64+
65+/* Stack alignment requirement in bytes */
66+#if defined (__APPLE__)
67+#define AARCH64_STACK_ALIGN 1
68+#else
69+#define AARCH64_STACK_ALIGN 16
70+#endif
71+
72+#define N_X_ARG_REG 8
73+#define N_V_ARG_REG 8
74+
75+#define AARCH64_FFI_WITH_V (1 << AARCH64_FFI_WITH_V_BIT)
76+
77+union _d
78+{
79+ UINT64 d;
80+ UINT32 s[2];
81+};
82+
83+struct call_context
84+{
85+ UINT64 x [AARCH64_N_XREG];
86+ struct
87+ {
88+ union _d d[2];
89+ } v [AARCH64_N_VREG];
90+};
91+
92+#if defined (__clang__) && defined (__APPLE__)
93+extern void
94+sys_icache_invalidate (void *start, size_t len);
95+#endif
96+
97+static inline void
98+ffi_clear_cache (void *start, void *end)
99+{
100+#if defined (__clang__) && defined (__APPLE__)
101+ sys_icache_invalidate (start, (char *)end - (char *)start);
102+#elif defined (__GNUC__)
103+ __builtin___clear_cache (start, end);
104+#else
105+#error "Missing builtin to flush instruction cache"
106+#endif
107+}
108+
109+static void *
110+get_x_addr (struct call_context *context, unsigned n)
111+{
112+ return &context->x[n];
113+}
114+
115+static void *
116+get_s_addr (struct call_context *context, unsigned n)
117+{
118+#if defined __AARCH64EB__
119+ return &context->v[n].d[1].s[1];
120+#else
121+ return &context->v[n].d[0].s[0];
122+#endif
123+}
124+
125+static void *
126+get_d_addr (struct call_context *context, unsigned n)
127+{
128+#if defined __AARCH64EB__
129+ return &context->v[n].d[1];
130+#else
131+ return &context->v[n].d[0];
132+#endif
133+}
134+
135+static void *
136+get_v_addr (struct call_context *context, unsigned n)
137+{
138+ return &context->v[n];
139+}
140+
141+/* Return the memory location at which a basic type would reside
142+ were it to have been stored in register n. */
143+
144+static void *
145+get_basic_type_addr (unsigned short type, struct call_context *context,
146+ unsigned n)
147+{
148+ switch (type)
149+ {
150+ case FFI_TYPE_FLOAT:
151+ return get_s_addr (context, n);
152+ case FFI_TYPE_DOUBLE:
153+ return get_d_addr (context, n);
154+#if FFI_TYPE_DOUBLE != FFI_TYPE_LONGDOUBLE
155+ case FFI_TYPE_LONGDOUBLE:
156+ return get_v_addr (context, n);
157+#endif
158+ case FFI_TYPE_UINT8:
159+ case FFI_TYPE_SINT8:
160+ case FFI_TYPE_UINT16:
161+ case FFI_TYPE_SINT16:
162+ case FFI_TYPE_UINT32:
163+ case FFI_TYPE_SINT32:
164+ case FFI_TYPE_INT:
165+ case FFI_TYPE_POINTER:
166+ case FFI_TYPE_UINT64:
167+ case FFI_TYPE_SINT64:
168+ return get_x_addr (context, n);
169+ case FFI_TYPE_VOID:
170+ return NULL;
171+ default:
172+ FFI_ASSERT (0);
173+ return NULL;
174+ }
175+}
176+
177+/* Return the alignment width for each of the basic types. */
178+
179+static size_t
180+get_basic_type_alignment (unsigned short type)
181+{
182+ switch (type)
183+ {
184+ case FFI_TYPE_FLOAT:
185+ case FFI_TYPE_DOUBLE:
186+ return sizeof (UINT64);
187+#if FFI_TYPE_DOUBLE != FFI_TYPE_LONGDOUBLE
188+ case FFI_TYPE_LONGDOUBLE:
189+ return sizeof (long double);
190+#endif
191+ case FFI_TYPE_UINT8:
192+ case FFI_TYPE_SINT8:
193+#if defined (__APPLE__)
194+ return sizeof (UINT8);
195+#endif
196+ case FFI_TYPE_UINT16:
197+ case FFI_TYPE_SINT16:
198+#if defined (__APPLE__)
199+ return sizeof (UINT16);
200+#endif
201+ case FFI_TYPE_UINT32:
202+ case FFI_TYPE_INT:
203+ case FFI_TYPE_SINT32:
204+#if defined (__APPLE__)
205+ return sizeof (UINT32);
206+#endif
207+ case FFI_TYPE_POINTER:
208+ case FFI_TYPE_UINT64:
209+ case FFI_TYPE_SINT64:
210+ return sizeof (UINT64);
211+
212+ default:
213+ FFI_ASSERT (0);
214+ return 0;
215+ }
216+}
217+
218+/* Return the size in bytes for each of the basic types. */
219+
220+static size_t
221+get_basic_type_size (unsigned short type)
222+{
223+ switch (type)
224+ {
225+ case FFI_TYPE_FLOAT:
226+ return sizeof (UINT32);
227+ case FFI_TYPE_DOUBLE:
228+ return sizeof (UINT64);
229+#if FFI_TYPE_DOUBLE != FFI_TYPE_LONGDOUBLE
230+ case FFI_TYPE_LONGDOUBLE:
231+ return sizeof (long double);
232+#endif
233+ case FFI_TYPE_UINT8:
234+ return sizeof (UINT8);
235+ case FFI_TYPE_SINT8:
236+ return sizeof (SINT8);
237+ case FFI_TYPE_UINT16:
238+ return sizeof (UINT16);
239+ case FFI_TYPE_SINT16:
240+ return sizeof (SINT16);
241+ case FFI_TYPE_UINT32:
242+ return sizeof (UINT32);
243+ case FFI_TYPE_INT:
244+ case FFI_TYPE_SINT32:
245+ return sizeof (SINT32);
246+ case FFI_TYPE_POINTER:
247+ case FFI_TYPE_UINT64:
248+ return sizeof (UINT64);
249+ case FFI_TYPE_SINT64:
250+ return sizeof (SINT64);
251+
252+ default:
253+ FFI_ASSERT (0);
254+ return 0;
255+ }
256+}
257+
258+extern void
259+ffi_call_SYSV (unsigned (*)(struct call_context *context, unsigned char *,
260+ extended_cif *),
261+ struct call_context *context,
262+ extended_cif *,
263+ size_t,
264+ void (*fn)(void));
265+
266+extern void
267+ffi_closure_SYSV (ffi_closure *);
268+
269+/* Test for an FFI floating point representation. */
270+
271+static unsigned
272+is_floating_type (unsigned short type)
273+{
274+ return (type == FFI_TYPE_FLOAT || type == FFI_TYPE_DOUBLE
275+ || type == FFI_TYPE_LONGDOUBLE);
276+}
277+
278+/* Test for a homogeneous structure. */
279+
280+static unsigned short
281+get_homogeneous_type (ffi_type *ty)
282+{
283+ if (ty->type == FFI_TYPE_STRUCT && ty->elements)
284+ {
285+ unsigned i;
286+ unsigned short candidate_type
287+ = get_homogeneous_type (ty->elements[0]);
288+ for (i =1; ty->elements[i]; i++)
289+ {
290+ unsigned short iteration_type = 0;
291+ /* If we have a nested struct, we must find its homogeneous type.
292+ If that fits with our candidate type, we are still
293+ homogeneous. */
294+ if (ty->elements[i]->type == FFI_TYPE_STRUCT
295+ && ty->elements[i]->elements)
296+ {
297+ iteration_type = get_homogeneous_type (ty->elements[i]);
298+ }
299+ else
300+ {
301+ iteration_type = ty->elements[i]->type;
302+ }
303+
304+ /* If we are not homogeneous, return FFI_TYPE_STRUCT. */
305+ if (candidate_type != iteration_type)
306+ return FFI_TYPE_STRUCT;
307+ }
308+ return candidate_type;
309+ }
310+
311+ /* Base case, we have no more levels of nesting, so we
312+ are a basic type, and so, trivially homogeneous in that type. */
313+ return ty->type;
314+}
315+
316+/* Determine the number of elements within a STRUCT.
317+
318+ Note, we must handle nested structs.
319+
320+ If ty is not a STRUCT this function will return 0. */
321+
322+static unsigned
323+element_count (ffi_type *ty)
324+{
325+ if (ty->type == FFI_TYPE_STRUCT && ty->elements)
326+ {
327+ unsigned n;
328+ unsigned elems = 0;
329+ for (n = 0; ty->elements[n]; n++)
330+ {
331+ if (ty->elements[n]->type == FFI_TYPE_STRUCT
332+ && ty->elements[n]->elements)
333+ elems += element_count (ty->elements[n]);
334+ else
335+ elems++;
336+ }
337+ return elems;
338+ }
339+ return 0;
340+}
341+
342+/* Test for a homogeneous floating point aggregate.
343+
344+ A homogeneous floating point aggregate is a homogeneous aggregate of
345+ a half- single- or double- precision floating point type with one
346+ to four elements. Note that this includes nested structs of the
347+ basic type. */
348+
349+static int
350+is_hfa (ffi_type *ty)
351+{
352+ if (ty->type == FFI_TYPE_STRUCT
353+ && ty->elements[0]
354+ && is_floating_type (get_homogeneous_type (ty)))
355+ {
356+ unsigned n = element_count (ty);
357+ return n >= 1 && n <= 4;
358+ }
359+ return 0;
360+}
361+
362+/* Test if an ffi_type is a candidate for passing in a register.
363+
364+ This test does not check that sufficient registers of the
365+ appropriate class are actually available, merely that IFF
366+ sufficient registers are available then the argument will be passed
367+ in register(s).
368+
369+ Note that an ffi_type that is deemed to be a register candidate
370+ will always be returned in registers.
371+
372+ Returns 1 if a register candidate else 0. */
373+
374+static int
375+is_register_candidate (ffi_type *ty)
376+{
377+ switch (ty->type)
378+ {
379+ case FFI_TYPE_VOID:
380+ case FFI_TYPE_FLOAT:
381+ case FFI_TYPE_DOUBLE:
382+#if FFI_TYPE_DOUBLE != FFI_TYPE_LONGDOUBLE
383+ case FFI_TYPE_LONGDOUBLE:
384+#endif
385+ case FFI_TYPE_UINT8:
386+ case FFI_TYPE_UINT16:
387+ case FFI_TYPE_UINT32:
388+ case FFI_TYPE_UINT64:
389+ case FFI_TYPE_POINTER:
390+ case FFI_TYPE_SINT8:
391+ case FFI_TYPE_SINT16:
392+ case FFI_TYPE_SINT32:
393+ case FFI_TYPE_INT:
394+ case FFI_TYPE_SINT64:
395+ return 1;
396+
397+ case FFI_TYPE_STRUCT:
398+ if (is_hfa (ty))
399+ {
400+ return 1;
401+ }
402+ else if (ty->size > 16)
403+ {
404+ /* Too large. Will be replaced with a pointer to memory. The
405+ pointer MAY be passed in a register, but the value will
406+ not. This test specifically fails since the argument will
407+ never be passed by value in registers. */
408+ return 0;
409+ }
410+ else
411+ {
412+ /* Might be passed in registers depending on the number of
413+ registers required. */
414+ return (ty->size + 7) / 8 < N_X_ARG_REG;
415+ }
416+ break;
417+
418+ default:
419+ FFI_ASSERT (0);
420+ break;
421+ }
422+
423+ return 0;
424+}
425+
426+/* Test if an ffi_type argument or result is a candidate for a vector
427+ register. */
428+
429+static int
430+is_v_register_candidate (ffi_type *ty)
431+{
432+ return is_floating_type (ty->type)
433+ || (ty->type == FFI_TYPE_STRUCT && is_hfa (ty));
434+}
435+
436+/* Representation of the procedure call argument marshalling
437+ state.
438+
439+ The terse state variable names match the names used in the AARCH64
440+ PCS. */
441+
442+struct arg_state
443+{
444+ unsigned ngrn; /* Next general-purpose register number. */
445+ unsigned nsrn; /* Next vector register number. */
446+ size_t nsaa; /* Next stack offset. */
447+
448+#if defined (__APPLE__)
449+ unsigned allocating_variadic;
450+#endif
451+};
452+
453+/* Initialize a procedure call argument marshalling state. */
454+static void
455+arg_init (struct arg_state *state, size_t call_frame_size)
456+{
457+ state->ngrn = 0;
458+ state->nsrn = 0;
459+ state->nsaa = 0;
460+
461+#if defined (__APPLE__)
462+ state->allocating_variadic = 0;
463+#endif
464+}
465+
466+/* Return the number of available consecutive core argument
467+ registers. */
468+
469+static unsigned
470+available_x (struct arg_state *state)
471+{
472+ return N_X_ARG_REG - state->ngrn;
473+}
474+
475+/* Return the number of available consecutive vector argument
476+ registers. */
477+
478+static unsigned
479+available_v (struct arg_state *state)
480+{
481+ return N_V_ARG_REG - state->nsrn;
482+}
483+
484+static void *
485+allocate_to_x (struct call_context *context, struct arg_state *state)
486+{
487+ FFI_ASSERT (state->ngrn < N_X_ARG_REG);
488+ return get_x_addr (context, (state->ngrn)++);
489+}
490+
491+static void *
492+allocate_to_s (struct call_context *context, struct arg_state *state)
493+{
494+ FFI_ASSERT (state->nsrn < N_V_ARG_REG);
495+ return get_s_addr (context, (state->nsrn)++);
496+}
497+
498+static void *
499+allocate_to_d (struct call_context *context, struct arg_state *state)
500+{
501+ FFI_ASSERT (state->nsrn < N_V_ARG_REG);
502+ return get_d_addr (context, (state->nsrn)++);
503+}
504+
505+static void *
506+allocate_to_v (struct call_context *context, struct arg_state *state)
507+{
508+ FFI_ASSERT (state->nsrn < N_V_ARG_REG);
509+ return get_v_addr (context, (state->nsrn)++);
510+}
511+
512+/* Allocate an aligned slot on the stack and return a pointer to it. */
513+static void *
514+allocate_to_stack (struct arg_state *state, void *stack, size_t alignment,
515+ size_t size)
516+{
517+ void *allocation;
518+
519+ /* Round up the NSAA to the larger of 8 or the natural
520+ alignment of the argument's type. */
521+ state->nsaa = ALIGN (state->nsaa, alignment);
522+ state->nsaa = ALIGN (state->nsaa, alignment);
523+#if defined (__APPLE__)
524+ if (state->allocating_variadic)
525+ state->nsaa = ALIGN (state->nsaa, 8);
526+#else
527+ state->nsaa = ALIGN (state->nsaa, 8);
528+#endif
529+
530+ allocation = stack + state->nsaa;
531+
532+ state->nsaa += size;
533+ return allocation;
534+}
535+
536+static void
537+copy_basic_type (void *dest, void *source, unsigned short type)
538+{
539+ /* This is necessary to ensure that basic types are copied
540+ sign extended to 64-bits as libffi expects. */
541+ switch (type)
542+ {
543+ case FFI_TYPE_FLOAT:
544+ *(float *) dest = *(float *) source;
545+ break;
546+ case FFI_TYPE_DOUBLE:
547+ *(double *) dest = *(double *) source;
548+ break;
549+#if FFI_TYPE_DOUBLE != FFI_TYPE_LONGDOUBLE
550+ case FFI_TYPE_LONGDOUBLE:
551+ *(long double *) dest = *(long double *) source;
552+ break;
553+#endif
554+ case FFI_TYPE_UINT8:
555+ *(ffi_arg *) dest = *(UINT8 *) source;
556+ break;
557+ case FFI_TYPE_SINT8:
558+ *(ffi_sarg *) dest = *(SINT8 *) source;
559+ break;
560+ case FFI_TYPE_UINT16:
561+ *(ffi_arg *) dest = *(UINT16 *) source;
562+ break;
563+ case FFI_TYPE_SINT16:
564+ *(ffi_sarg *) dest = *(SINT16 *) source;
565+ break;
566+ case FFI_TYPE_UINT32:
567+ *(ffi_arg *) dest = *(UINT32 *) source;
568+ break;
569+ case FFI_TYPE_INT:
570+ case FFI_TYPE_SINT32:
571+ *(ffi_sarg *) dest = *(SINT32 *) source;
572+ break;
573+ case FFI_TYPE_POINTER:
574+ case FFI_TYPE_UINT64:
575+ *(ffi_arg *) dest = *(UINT64 *) source;
576+ break;
577+ case FFI_TYPE_SINT64:
578+ *(ffi_sarg *) dest = *(SINT64 *) source;
579+ break;
580+ case FFI_TYPE_VOID:
581+ break;
582+
583+ default:
584+ FFI_ASSERT (0);
585+ }
586+}
587+
588+static void
589+copy_hfa_to_reg_or_stack (void *memory,
590+ ffi_type *ty,
591+ struct call_context *context,
592+ unsigned char *stack,
593+ struct arg_state *state)
594+{
595+ unsigned elems = element_count (ty);
596+ if (available_v (state) < elems)
597+ {
598+ /* There are insufficient V registers. Further V register allocations
599+ are prevented, the NSAA is adjusted (by allocate_to_stack ())
600+ and the argument is copied to memory at the adjusted NSAA. */
601+ state->nsrn = N_V_ARG_REG;
602+ memcpy (allocate_to_stack (state, stack, ty->alignment, ty->size),
603+ memory,
604+ ty->size);
605+ }
606+ else
607+ {
608+ int i;
609+ unsigned short type = get_homogeneous_type (ty);
610+ for (i = 0; i < elems; i++)
611+ {
612+ void *reg = allocate_to_v (context, state);
613+ copy_basic_type (reg, memory, type);
614+ memory += get_basic_type_size (type);
615+ }
616+ }
617+}
618+
619+/* Either allocate an appropriate register for the argument type, or if
620+ none are available, allocate a stack slot and return a pointer
621+ to the allocated space. */
622+
623+static void *
624+allocate_to_register_or_stack (struct call_context *context,
625+ unsigned char *stack,
626+ struct arg_state *state,
627+ unsigned short type)
628+{
629+ size_t alignment = get_basic_type_alignment (type);
630+ size_t size = alignment;
631+ switch (type)
632+ {
633+ case FFI_TYPE_FLOAT:
634+ /* This is the only case for which the allocated stack size
635+ should not match the alignment of the type. */
636+ size = sizeof (UINT32);
637+ /* Fall through. */
638+ case FFI_TYPE_DOUBLE:
639+ if (state->nsrn < N_V_ARG_REG)
640+ return allocate_to_d (context, state);
641+ state->nsrn = N_V_ARG_REG;
642+ break;
643+#if FFI_TYPE_DOUBLE != FFI_TYPE_LONGDOUBLE
644+ case FFI_TYPE_LONGDOUBLE:
645+ if (state->nsrn < N_V_ARG_REG)
646+ return allocate_to_v (context, state);
647+ state->nsrn = N_V_ARG_REG;
648+ break;
649+#endif
650+ case FFI_TYPE_UINT8:
651+ case FFI_TYPE_SINT8:
652+ case FFI_TYPE_UINT16:
653+ case FFI_TYPE_SINT16:
654+ case FFI_TYPE_UINT32:
655+ case FFI_TYPE_SINT32:
656+ case FFI_TYPE_INT:
657+ case FFI_TYPE_POINTER:
658+ case FFI_TYPE_UINT64:
659+ case FFI_TYPE_SINT64:
660+ if (state->ngrn < N_X_ARG_REG)
661+ return allocate_to_x (context, state);
662+ state->ngrn = N_X_ARG_REG;
663+ break;
664+ default:
665+ FFI_ASSERT (0);
666+ }
667+
668+ return allocate_to_stack (state, stack, alignment, size);
669+}
670+
671+/* Copy a value to an appropriate register, or if none are
672+ available, to the stack. */
673+
674+static void
675+copy_to_register_or_stack (struct call_context *context,
676+ unsigned char *stack,
677+ struct arg_state *state,
678+ void *value,
679+ unsigned short type)
680+{
681+ copy_basic_type (
682+ allocate_to_register_or_stack (context, stack, state, type),
683+ value,
684+ type);
685+}
686+
687+/* Marshall the arguments from FFI representation to procedure call
688+ context and stack. */
689+
690+static unsigned
691+aarch64_prep_args (struct call_context *context, unsigned char *stack,
692+ extended_cif *ecif)
693+{
694+ int i;
695+ struct arg_state state;
696+
697+ arg_init (&state, ALIGN(ecif->cif->bytes, 16));
698+
699+ for (i = 0; i < ecif->cif->nargs; i++)
700+ {
701+ ffi_type *ty = ecif->cif->arg_types[i];
702+ switch (ty->type)
703+ {
704+ case FFI_TYPE_VOID:
705+ FFI_ASSERT (0);
706+ break;
707+
708+ /* If the argument is a basic type the argument is allocated to an
709+ appropriate register, or if none are available, to the stack. */
710+ case FFI_TYPE_FLOAT:
711+ case FFI_TYPE_DOUBLE:
712+#if FFI_TYPE_DOUBLE != FFI_TYPE_LONGDOUBLE
713+ case FFI_TYPE_LONGDOUBLE:
714+#endif
715+ case FFI_TYPE_UINT8:
716+ case FFI_TYPE_SINT8:
717+ case FFI_TYPE_UINT16:
718+ case FFI_TYPE_SINT16:
719+ case FFI_TYPE_UINT32:
720+ case FFI_TYPE_INT:
721+ case FFI_TYPE_SINT32:
722+ case FFI_TYPE_POINTER:
723+ case FFI_TYPE_UINT64:
724+ case FFI_TYPE_SINT64:
725+ copy_to_register_or_stack (context, stack, &state,
726+ ecif->avalue[i], ty->type);
727+ break;
728+
729+ case FFI_TYPE_STRUCT:
730+ if (is_hfa (ty))
731+ {
732+ copy_hfa_to_reg_or_stack (ecif->avalue[i], ty, context,
733+ stack, &state);
734+ }
735+ else if (ty->size > 16)
736+ {
737+ /* If the argument is a composite type that is larger than 16
738+ bytes, then the argument has been copied to memory, and
739+ the argument is replaced by a pointer to the copy. */
740+
741+ copy_to_register_or_stack (context, stack, &state,
742+ &(ecif->avalue[i]), FFI_TYPE_POINTER);
743+ }
744+ else if (available_x (&state) >= (ty->size + 7) / 8)
745+ {
746+ /* If the argument is a composite type and the size in
747+ double-words is not more than the number of available
748+ X registers, then the argument is copied into consecutive
749+ X registers. */
750+ int j;
751+ for (j = 0; j < (ty->size + 7) / 8; j++)
752+ {
753+ memcpy (allocate_to_x (context, &state),
754+ &(((UINT64 *) ecif->avalue[i])[j]),
755+ sizeof (UINT64));
756+ }
757+ }
758+ else
759+ {
760+ /* Otherwise, there are insufficient X registers. Further X
761+ register allocations are prevented, the NSAA is adjusted
762+ (by allocate_to_stack ()) and the argument is copied to
763+ memory at the adjusted NSAA. */
764+ state.ngrn = N_X_ARG_REG;
765+
766+ memcpy (allocate_to_stack (&state, stack, ty->alignment,
767+ ty->size), ecif->avalue + i, ty->size);
768+ }
769+ break;
770+
771+ default:
772+ FFI_ASSERT (0);
773+ break;
774+ }
775+
776+#if defined (__APPLE__)
777+ if (i + 1 == ecif->cif->aarch64_nfixedargs)
778+ {
779+ state.ngrn = N_X_ARG_REG;
780+ state.nsrn = N_V_ARG_REG;
781+
782+ state.allocating_variadic = 1;
783+ }
784+#endif
785+ }
786+
787+ return ecif->cif->aarch64_flags;
788+}
789+
790+ffi_status
791+ffi_prep_cif_machdep (ffi_cif *cif)
792+{
793+ /* Round the stack up to a multiple of the stack alignment requirement. */
794+ cif->bytes =
795+ (cif->bytes + (AARCH64_STACK_ALIGN - 1)) & ~ (AARCH64_STACK_ALIGN - 1);
796+
797+ /* Initialize our flags. We are interested if this CIF will touch a
798+ vector register, if so we will enable context save and load to
799+ those registers, otherwise not. This is intended to be friendly
800+ to lazy float context switching in the kernel. */
801+ cif->aarch64_flags = 0;
802+
803+ if (is_v_register_candidate (cif->rtype))
804+ {
805+ cif->aarch64_flags |= AARCH64_FFI_WITH_V;
806+ }
807+ else
808+ {
809+ int i;
810+ for (i = 0; i < cif->nargs; i++)
811+ if (is_v_register_candidate (cif->arg_types[i]))
812+ {
813+ cif->aarch64_flags |= AARCH64_FFI_WITH_V;
814+ break;
815+ }
816+ }
817+
818+ return FFI_OK;
819+}
820+
821+#if defined (__APPLE__)
822+
823+/* Perform Apple-specific cif processing for variadic calls */
824+ffi_status ffi_prep_cif_machdep_var(ffi_cif *cif,
825+ unsigned int nfixedargs,
826+ unsigned int ntotalargs)
827+{
828+ cif->aarch64_nfixedargs = nfixedargs;
829+
830+ return ffi_prep_cif_machdep(cif);
831+}
832+
833+#endif
834+
835+/* Call a function with the provided arguments and capture the return
836+ value. */
837+void
838+ffi_call (ffi_cif *cif, void (*fn)(void), void *rvalue, void **avalue)
839+{
840+ extended_cif ecif;
841+
842+ ecif.cif = cif;
843+ ecif.avalue = avalue;
844+ ecif.rvalue = rvalue;
845+
846+ switch (cif->abi)
847+ {
848+ case FFI_SYSV:
849+ {
850+ struct call_context context;
851+ size_t stack_bytes;
852+
853+ /* Figure out the total amount of stack space we need, the
854+ above call frame space needs to be 16 bytes aligned to
855+ ensure correct alignment of the first object inserted in
856+ that space hence the ALIGN applied to cif->bytes.*/
857+ stack_bytes = ALIGN(cif->bytes, 16);
858+
859+ memset (&context, 0, sizeof (context));
860+ if (is_register_candidate (cif->rtype))
861+ {
862+ ffi_call_SYSV (aarch64_prep_args, &context, &ecif, stack_bytes, fn);
863+ switch (cif->rtype->type)
864+ {
865+ case FFI_TYPE_VOID:
866+ case FFI_TYPE_FLOAT:
867+ case FFI_TYPE_DOUBLE:
868+#if FFI_TYPE_DOUBLE != FFI_TYPE_LONGDOUBLE
869+ case FFI_TYPE_LONGDOUBLE:
870+#endif
871+ case FFI_TYPE_UINT8:
872+ case FFI_TYPE_SINT8:
873+ case FFI_TYPE_UINT16:
874+ case FFI_TYPE_SINT16:
875+ case FFI_TYPE_UINT32:
876+ case FFI_TYPE_SINT32:
877+ case FFI_TYPE_POINTER:
878+ case FFI_TYPE_UINT64:
879+ case FFI_TYPE_INT:
880+ case FFI_TYPE_SINT64:
881+ {
882+ void *addr = get_basic_type_addr (cif->rtype->type,
883+ &context, 0);
884+ copy_basic_type (rvalue, addr, cif->rtype->type);
885+ break;
886+ }
887+
888+ case FFI_TYPE_STRUCT:
889+ if (is_hfa (cif->rtype))
890+ {
891+ int j;
892+ unsigned short type = get_homogeneous_type (cif->rtype);
893+ unsigned elems = element_count (cif->rtype);
894+ for (j = 0; j < elems; j++)
895+ {
896+ void *reg = get_basic_type_addr (type, &context, j);
897+ copy_basic_type (rvalue, reg, type);
898+ rvalue += get_basic_type_size (type);
899+ }
900+ }
901+ else if ((cif->rtype->size + 7) / 8 < N_X_ARG_REG)
902+ {
903+ size_t size = ALIGN (cif->rtype->size, sizeof (UINT64));
904+ memcpy (rvalue, get_x_addr (&context, 0), size);
905+ }
906+ else
907+ {
908+ FFI_ASSERT (0);
909+ }
910+ break;
911+
912+ default:
913+ FFI_ASSERT (0);
914+ break;
915+ }
916+ }
917+ else
918+ {
919+ memcpy (get_x_addr (&context, 8), &rvalue, sizeof (UINT64));
920+ ffi_call_SYSV (aarch64_prep_args, &context, &ecif,
921+ stack_bytes, fn);
922+ }
923+ break;
924+ }
925+
926+ default:
927+ FFI_ASSERT (0);
928+ break;
929+ }
930+}
931+
932+static unsigned char trampoline [] =
933+{ 0x70, 0x00, 0x00, 0x58, /* ldr x16, 1f */
934+ 0x91, 0x00, 0x00, 0x10, /* adr x17, 2f */
935+ 0x00, 0x02, 0x1f, 0xd6 /* br x16 */
936+};
937+
938+/* Build a trampoline. */
939+
940+#define FFI_INIT_TRAMPOLINE(TRAMP,FUN,CTX,FLAGS) \
941+ ({unsigned char *__tramp = (unsigned char*)(TRAMP); \
942+ UINT64 __fun = (UINT64)(FUN); \
943+ UINT64 __ctx = (UINT64)(CTX); \
944+ UINT64 __flags = (UINT64)(FLAGS); \
945+ memcpy (__tramp, trampoline, sizeof (trampoline)); \
946+ memcpy (__tramp + 12, &__fun, sizeof (__fun)); \
947+ memcpy (__tramp + 20, &__ctx, sizeof (__ctx)); \
948+ memcpy (__tramp + 28, &__flags, sizeof (__flags)); \
949+ ffi_clear_cache(__tramp, __tramp + FFI_TRAMPOLINE_SIZE); \
950+ })
951+
952+ffi_status
953+ffi_prep_closure_loc (ffi_closure* closure,
954+ ffi_cif* cif,
955+ void (*fun)(ffi_cif*,void*,void**,void*),
956+ void *user_data,
957+ void *codeloc)
958+{
959+ if (cif->abi != FFI_SYSV)
960+ return FFI_BAD_ABI;
961+
962+ FFI_INIT_TRAMPOLINE (&closure->tramp[0], &ffi_closure_SYSV, codeloc,
963+ cif->aarch64_flags);
964+
965+ closure->cif = cif;
966+ closure->user_data = user_data;
967+ closure->fun = fun;
968+
969+ return FFI_OK;
970+}
971+
972+/* Primary handler to setup and invoke a function within a closure.
973+
974+ A closure when invoked enters via the assembler wrapper
975+ ffi_closure_SYSV(). The wrapper allocates a call context on the
976+ stack, saves the interesting registers (from the perspective of
977+ the calling convention) into the context then passes control to
978+ ffi_closure_SYSV_inner() passing the saved context and a pointer to
979+ the stack at the point ffi_closure_SYSV() was invoked.
980+
981+ On the return path the assembler wrapper will reload call context
982+ registers.
983+
984+ ffi_closure_SYSV_inner() marshalls the call context into ffi value
985+ descriptors, invokes the wrapped function, then marshalls the return
986+ value back into the call context. */
987+
988+void FFI_HIDDEN
989+ffi_closure_SYSV_inner (ffi_closure *closure, struct call_context *context,
990+ void *stack)
991+{
992+ ffi_cif *cif = closure->cif;
993+ void **avalue = (void**) alloca (cif->nargs * sizeof (void*));
994+ void *rvalue = NULL;
995+ int i;
996+ struct arg_state state;
997+
998+ arg_init (&state, ALIGN(cif->bytes, 16));
999+
1000+ for (i = 0; i < cif->nargs; i++)
1001+ {
1002+ ffi_type *ty = cif->arg_types[i];
1003+
1004+ switch (ty->type)
1005+ {
1006+ case FFI_TYPE_VOID:
1007+ FFI_ASSERT (0);
1008+ break;
1009+
1010+ case FFI_TYPE_UINT8:
1011+ case FFI_TYPE_SINT8:
1012+ case FFI_TYPE_UINT16:
1013+ case FFI_TYPE_SINT16:
1014+ case FFI_TYPE_UINT32:
1015+ case FFI_TYPE_SINT32:
1016+ case FFI_TYPE_INT:
1017+ case FFI_TYPE_POINTER:
1018+ case FFI_TYPE_UINT64:
1019+ case FFI_TYPE_SINT64:
1020+ case FFI_TYPE_FLOAT:
1021+ case FFI_TYPE_DOUBLE:
1022+#if FFI_TYPE_DOUBLE != FFI_TYPE_LONGDOUBLE
1023+ case FFI_TYPE_LONGDOUBLE:
1024+ avalue[i] = allocate_to_register_or_stack (context, stack,
1025+ &state, ty->type);
1026+ break;
1027+#endif
1028+
1029+ case FFI_TYPE_STRUCT:
1030+ if (is_hfa (ty))
1031+ {
1032+ unsigned n = element_count (ty);
1033+ if (available_v (&state) < n)
1034+ {
1035+ state.nsrn = N_V_ARG_REG;
1036+ avalue[i] = allocate_to_stack (&state, stack, ty->alignment,
1037+ ty->size);
1038+ }
1039+ else
1040+ {
1041+ switch (get_homogeneous_type (ty))
1042+ {
1043+ case FFI_TYPE_FLOAT:
1044+ {
1045+ /* Eeek! We need a pointer to the structure,
1046+ however the homogeneous float elements are
1047+ being passed in individual S registers,
1048+ therefore the structure is not represented as
1049+ a contiguous sequence of bytes in our saved
1050+ register context. We need to fake up a copy
1051+ of the structure laid out in memory
1052+ correctly. The fake can be tossed once the
1053+ closure function has returned hence alloca()
1054+ is sufficient. */
1055+ int j;
1056+ UINT32 *p = avalue[i] = alloca (ty->size);
1057+ for (j = 0; j < element_count (ty); j++)
1058+ memcpy (&p[j],
1059+ allocate_to_s (context, &state),
1060+ sizeof (*p));
1061+ break;
1062+ }
1063+
1064+ case FFI_TYPE_DOUBLE:
1065+ {
1066+ /* Eeek! We need a pointer to the structure,
1067+ however the homogeneous float elements are
1068+ being passed in individual S registers,
1069+ therefore the structure is not represented as
1070+ a contiguous sequence of bytes in our saved
1071+ register context. We need to fake up a copy
1072+ of the structure laid out in memory
1073+ correctly. The fake can be tossed once the
1074+ closure function has returned hence alloca()
1075+ is sufficient. */
1076+ int j;
1077+ UINT64 *p = avalue[i] = alloca (ty->size);
1078+ for (j = 0; j < element_count (ty); j++)
1079+ memcpy (&p[j],
1080+ allocate_to_d (context, &state),
1081+ sizeof (*p));
1082+ break;
1083+ }
1084+
1085+#if FFI_TYPE_DOUBLE != FFI_TYPE_LONGDOUBLE
1086+ case FFI_TYPE_LONGDOUBLE:
1087+ memcpy (&avalue[i],
1088+ allocate_to_v (context, &state),
1089+ sizeof (*avalue));
1090+ break;
1091+#endif
1092+
1093+ default:
1094+ FFI_ASSERT (0);
1095+ break;
1096+ }
1097+ }
1098+ }
1099+ else if (ty->size > 16)
1100+ {
1101+ /* Replace Composite type of size greater than 16 with a
1102+ pointer. */
1103+ memcpy (&avalue[i],
1104+ allocate_to_register_or_stack (context, stack,
1105+ &state, FFI_TYPE_POINTER),
1106+ sizeof (avalue[i]));
1107+ }
1108+ else if (available_x (&state) >= (ty->size + 7) / 8)
1109+ {
1110+ avalue[i] = get_x_addr (context, state.ngrn);
1111+ state.ngrn += (ty->size + 7) / 8;
1112+ }
1113+ else
1114+ {
1115+ state.ngrn = N_X_ARG_REG;
1116+
1117+ avalue[i] = allocate_to_stack (&state, stack, ty->alignment,
1118+ ty->size);
1119+ }
1120+ break;
1121+
1122+ default:
1123+ FFI_ASSERT (0);
1124+ break;
1125+ }
1126+ }
1127+
1128+ /* Figure out where the return value will be passed, either in
1129+ registers or in a memory block allocated by the caller and passed
1130+ in x8. */
1131+
1132+ if (is_register_candidate (cif->rtype))
1133+ {
1134+ /* Register candidates are *always* returned in registers. */
1135+
1136+ /* Allocate a scratchpad for the return value, we will let the
1137+ callee scrible the result into the scratch pad then move the
1138+ contents into the appropriate return value location for the
1139+ call convention. */
1140+ rvalue = alloca (cif->rtype->size);
1141+ (closure->fun) (cif, rvalue, avalue, closure->user_data);
1142+
1143+ /* Copy the return value into the call context so that it is returned
1144+ as expected to our caller. */
1145+ switch (cif->rtype->type)
1146+ {
1147+ case FFI_TYPE_VOID:
1148+ break;
1149+
1150+ case FFI_TYPE_UINT8:
1151+ case FFI_TYPE_UINT16:
1152+ case FFI_TYPE_UINT32:
1153+ case FFI_TYPE_POINTER:
1154+ case FFI_TYPE_UINT64:
1155+ case FFI_TYPE_SINT8:
1156+ case FFI_TYPE_SINT16:
1157+ case FFI_TYPE_INT:
1158+ case FFI_TYPE_SINT32:
1159+ case FFI_TYPE_SINT64:
1160+ case FFI_TYPE_FLOAT:
1161+ case FFI_TYPE_DOUBLE:
1162+#if FFI_TYPE_DOUBLE != FFI_TYPE_LONGDOUBLE
1163+ case FFI_TYPE_LONGDOUBLE:
1164+#endif
1165+ {
1166+ void *addr = get_basic_type_addr (cif->rtype->type, context, 0);
1167+ copy_basic_type (addr, rvalue, cif->rtype->type);
1168+ break;
1169+ }
1170+ case FFI_TYPE_STRUCT:
1171+ if (is_hfa (cif->rtype))
1172+ {
1173+ int j;
1174+ unsigned short type = get_homogeneous_type (cif->rtype);
1175+ unsigned elems = element_count (cif->rtype);
1176+ for (j = 0; j < elems; j++)
1177+ {
1178+ void *reg = get_basic_type_addr (type, context, j);
1179+ copy_basic_type (reg, rvalue, type);
1180+ rvalue += get_basic_type_size (type);
1181+ }
1182+ }
1183+ else if ((cif->rtype->size + 7) / 8 < N_X_ARG_REG)
1184+ {
1185+ size_t size = ALIGN (cif->rtype->size, sizeof (UINT64)) ;
1186+ memcpy (get_x_addr (context, 0), rvalue, size);
1187+ }
1188+ else
1189+ {
1190+ FFI_ASSERT (0);
1191+ }
1192+ break;
1193+ default:
1194+ FFI_ASSERT (0);
1195+ break;
1196+ }
1197+ }
1198+ else
1199+ {
1200+ memcpy (&rvalue, get_x_addr (context, 8), sizeof (UINT64));
1201+ (closure->fun) (cif, rvalue, avalue, closure->user_data);
1202+ }
1203+}
1204+
1205diff -ruN Python-2.7.3.orig/Modules/_ctypes/libffi/src/aarch64/ffitarget.h Python-2.7.3/Modules/_ctypes/libffi/src/aarch64/ffitarget.h
1206--- Python-2.7.3.orig/Modules/_ctypes/libffi/src/aarch64/ffitarget.h 1970-01-01 01:00:00.000000000 +0100
1207+++ Python-2.7.3/Modules/_ctypes/libffi/src/aarch64/ffitarget.h 2014-04-25 19:45:13.000000000 +0200
1208@@ -0,0 +1,63 @@
1209+/* Copyright (c) 2009, 2010, 2011, 2012 ARM Ltd.
1210+
1211+Permission is hereby granted, free of charge, to any person obtaining
1212+a copy of this software and associated documentation files (the
1213+``Software''), to deal in the Software without restriction, including
1214+without limitation the rights to use, copy, modify, merge, publish,
1215+distribute, sublicense, and/or sell copies of the Software, and to
1216+permit persons to whom the Software is furnished to do so, subject to
1217+the following conditions:
1218+
1219+The above copyright notice and this permission notice shall be
1220+included in all copies or substantial portions of the Software.
1221+
1222+THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND,
1223+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
1224+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
1225+IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
1226+CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
1227+TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
1228+SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
1229+
1230+#ifndef LIBFFI_TARGET_H
1231+#define LIBFFI_TARGET_H
1232+
1233+#ifndef LIBFFI_H
1234+#error "Please do not include ffitarget.h directly into your source. Use ffi.h instead."
1235+#endif
1236+
1237+#ifndef LIBFFI_ASM
1238+typedef unsigned long ffi_arg;
1239+typedef signed long ffi_sarg;
1240+
1241+typedef enum ffi_abi
1242+ {
1243+ FFI_FIRST_ABI = 0,
1244+ FFI_SYSV,
1245+ FFI_LAST_ABI,
1246+ FFI_DEFAULT_ABI = FFI_SYSV
1247+ } ffi_abi;
1248+#endif
1249+
1250+/* ---- Definitions for closures ----------------------------------------- */
1251+
1252+#define FFI_CLOSURES 1
1253+#define FFI_TRAMPOLINE_SIZE 36
1254+#define FFI_NATIVE_RAW_API 0
1255+
1256+/* ---- Internal ---- */
1257+
1258+#if defined (__APPLE__)
1259+#define FFI_TARGET_SPECIFIC_VARIADIC
1260+#define FFI_EXTRA_CIF_FIELDS unsigned aarch64_flags; unsigned aarch64_nfixedargs
1261+#else
1262+#define FFI_EXTRA_CIF_FIELDS unsigned aarch64_flags
1263+#endif
1264+
1265+#define AARCH64_FFI_WITH_V_BIT 0
1266+
1267+#define AARCH64_N_XREG 32
1268+#define AARCH64_N_VREG 32
1269+#define AARCH64_CALL_CONTEXT_SIZE (AARCH64_N_XREG * 8 + AARCH64_N_VREG * 16)
1270+
1271+#endif
1272diff -ruN Python-2.7.3.orig/Modules/_ctypes/libffi/src/aarch64/sysv.S Python-2.7.3/Modules/_ctypes/libffi/src/aarch64/sysv.S
1273--- Python-2.7.3.orig/Modules/_ctypes/libffi/src/aarch64/sysv.S 1970-01-01 01:00:00.000000000 +0100
1274+++ Python-2.7.3/Modules/_ctypes/libffi/src/aarch64/sysv.S 2014-04-25 19:45:13.000000000 +0200
1275@@ -0,0 +1,333 @@
1276+/* Copyright (c) 2009, 2010, 2011, 2012 ARM Ltd.
1277+
1278+Permission is hereby granted, free of charge, to any person obtaining
1279+a copy of this software and associated documentation files (the
1280+``Software''), to deal in the Software without restriction, including
1281+without limitation the rights to use, copy, modify, merge, publish,
1282+distribute, sublicense, and/or sell copies of the Software, and to
1283+permit persons to whom the Software is furnished to do so, subject to
1284+the following conditions:
1285+
1286+The above copyright notice and this permission notice shall be
1287+included in all copies or substantial portions of the Software.
1288+
1289+THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND,
1290+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
1291+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
1292+IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
1293+CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
1294+TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
1295+SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
1296+
1297+#define LIBFFI_ASM
1298+#include <fficonfig.h>
1299+#include <ffi.h>
1300+
1301+#ifdef HAVE_MACHINE_ASM_H
1302+#include <machine/asm.h>
1303+#else
1304+#ifdef __USER_LABEL_PREFIX__
1305+#define CONCAT1(a, b) CONCAT2(a, b)
1306+#define CONCAT2(a, b) a ## b
1307+
1308+/* Use the right prefix for global labels. */
1309+#define CNAME(x) CONCAT1 (__USER_LABEL_PREFIX__, x)
1310+#else
1311+#define CNAME(x) x
1312+#endif
1313+#endif
1314+
1315+#define cfi_adjust_cfa_offset(off) .cfi_adjust_cfa_offset off
1316+#define cfi_rel_offset(reg, off) .cfi_rel_offset reg, off
1317+#define cfi_restore(reg) .cfi_restore reg
1318+#define cfi_def_cfa_register(reg) .cfi_def_cfa_register reg
1319+
1320+ .text
1321+ .globl CNAME(ffi_call_SYSV)
1322+#ifdef __ELF__
1323+ .type CNAME(ffi_call_SYSV), #function
1324+#endif
1325+#ifdef __APPLE__
1326+ .align 2
1327+#endif
1328+
1329+/* ffi_call_SYSV()
1330+
1331+ Create a stack frame, setup an argument context, call the callee
1332+ and extract the result.
1333+
1334+ The maximum required argument stack size is provided,
1335+ ffi_call_SYSV() allocates that stack space then calls the
1336+ prepare_fn to populate register context and stack. The
1337+ argument passing registers are loaded from the register
1338+ context and the callee called, on return the register passing
1339+ register are saved back to the context. Our caller will
1340+ extract the return value from the final state of the saved
1341+ register context.
1342+
1343+ Prototype:
1344+
1345+ extern unsigned
1346+ ffi_call_SYSV (void (*)(struct call_context *context, unsigned char *,
1347+ extended_cif *),
1348+ struct call_context *context,
1349+ extended_cif *,
1350+ size_t required_stack_size,
1351+ void (*fn)(void));
1352+
1353+ Therefore on entry we have:
1354+
1355+ x0 prepare_fn
1356+ x1 &context
1357+ x2 &ecif
1358+ x3 bytes
1359+ x4 fn
1360+
1361+ This function uses the following stack frame layout:
1362+
1363+ ==
1364+ saved x30(lr)
1365+ x29(fp)-> saved x29(fp)
1366+ saved x24
1367+ saved x23
1368+ saved x22
1369+ sp' -> saved x21
1370+ ...
1371+ sp -> (constructed callee stack arguments)
1372+ ==
1373+
1374+ Voila! */
1375+
1376+#define ffi_call_SYSV_FS (8 * 4)
1377+
1378+ .cfi_startproc
1379+CNAME(ffi_call_SYSV):
1380+ stp x29, x30, [sp, #-16]!
1381+ cfi_adjust_cfa_offset (16)
1382+ cfi_rel_offset (x29, 0)
1383+ cfi_rel_offset (x30, 8)
1384+
1385+ mov x29, sp
1386+ cfi_def_cfa_register (x29)
1387+ sub sp, sp, #ffi_call_SYSV_FS
1388+
1389+ stp x21, x22, [sp, #0]
1390+ cfi_rel_offset (x21, 0 - ffi_call_SYSV_FS)
1391+ cfi_rel_offset (x22, 8 - ffi_call_SYSV_FS)
1392+
1393+ stp x23, x24, [sp, #16]
1394+ cfi_rel_offset (x23, 16 - ffi_call_SYSV_FS)
1395+ cfi_rel_offset (x24, 24 - ffi_call_SYSV_FS)
1396+
1397+ mov x21, x1
1398+ mov x22, x2
1399+ mov x24, x4
1400+
1401+ /* Allocate the stack space for the actual arguments, many
1402+ arguments will be passed in registers, but we assume
1403+ worst case and allocate sufficient stack for ALL of
1404+ the arguments. */
1405+ sub sp, sp, x3
1406+
1407+ /* unsigned (*prepare_fn) (struct call_context *context,
1408+ unsigned char *stack, extended_cif *ecif);
1409+ */
1410+ mov x23, x0
1411+ mov x0, x1
1412+ mov x1, sp
1413+ /* x2 already in place */
1414+ blr x23
1415+
1416+ /* Preserve the flags returned. */
1417+ mov x23, x0
1418+
1419+ /* Figure out if we should touch the vector registers. */
1420+ tbz x23, #AARCH64_FFI_WITH_V_BIT, 1f
1421+
1422+ /* Load the vector argument passing registers. */
1423+ ldp q0, q1, [x21, #8*32 + 0]
1424+ ldp q2, q3, [x21, #8*32 + 32]
1425+ ldp q4, q5, [x21, #8*32 + 64]
1426+ ldp q6, q7, [x21, #8*32 + 96]
1427+1:
1428+ /* Load the core argument passing registers. */
1429+ ldp x0, x1, [x21, #0]
1430+ ldp x2, x3, [x21, #16]
1431+ ldp x4, x5, [x21, #32]
1432+ ldp x6, x7, [x21, #48]
1433+
1434+ /* Don't forget x8 which may be holding the address of a return buffer.
1435+ */
1436+ ldr x8, [x21, #8*8]
1437+
1438+ blr x24
1439+
1440+ /* Save the core argument passing registers. */
1441+ stp x0, x1, [x21, #0]
1442+ stp x2, x3, [x21, #16]
1443+ stp x4, x5, [x21, #32]
1444+ stp x6, x7, [x21, #48]
1445+
1446+ /* Note nothing useful ever comes back in x8! */
1447+
1448+ /* Figure out if we should touch the vector registers. */
1449+ tbz x23, #AARCH64_FFI_WITH_V_BIT, 1f
1450+
1451+ /* Save the vector argument passing registers. */
1452+ stp q0, q1, [x21, #8*32 + 0]
1453+ stp q2, q3, [x21, #8*32 + 32]
1454+ stp q4, q5, [x21, #8*32 + 64]
1455+ stp q6, q7, [x21, #8*32 + 96]
1456+1:
1457+ /* All done, unwind our stack frame. */
1458+ ldp x21, x22, [x29, # - ffi_call_SYSV_FS]
1459+ cfi_restore (x21)
1460+ cfi_restore (x22)
1461+
1462+ ldp x23, x24, [x29, # - ffi_call_SYSV_FS + 16]
1463+ cfi_restore (x23)
1464+ cfi_restore (x24)
1465+
1466+ mov sp, x29
1467+ cfi_def_cfa_register (sp)
1468+
1469+ ldp x29, x30, [sp], #16
1470+ cfi_adjust_cfa_offset (-16)
1471+ cfi_restore (x29)
1472+ cfi_restore (x30)
1473+
1474+ ret
1475+
1476+ .cfi_endproc
1477+#ifdef __ELF__
1478+ .size CNAME(ffi_call_SYSV), .-CNAME(ffi_call_SYSV)
1479+#endif
1480+
1481+#define ffi_closure_SYSV_FS (8 * 2 + AARCH64_CALL_CONTEXT_SIZE)
1482+
1483+/* ffi_closure_SYSV
1484+
1485+ Closure invocation glue. This is the low level code invoked directly by
1486+ the closure trampoline to setup and call a closure.
1487+
1488+ On entry x17 points to a struct trampoline_data, x16 has been clobbered
1489+ all other registers are preserved.
1490+
1491+ We allocate a call context and save the argument passing registers,
1492+ then invoked the generic C ffi_closure_SYSV_inner() function to do all
1493+ the real work, on return we load the result passing registers back from
1494+ the call context.
1495+
1496+ On entry
1497+
1498+ extern void
1499+ ffi_closure_SYSV (struct trampoline_data *);
1500+
1501+ struct trampoline_data
1502+ {
1503+ UINT64 *ffi_closure;
1504+ UINT64 flags;
1505+ };
1506+
1507+ This function uses the following stack frame layout:
1508+
1509+ ==
1510+ saved x30(lr)
1511+ x29(fp)-> saved x29(fp)
1512+ saved x22
1513+ saved x21
1514+ ...
1515+ sp -> call_context
1516+ ==
1517+
1518+ Voila! */
1519+
1520+ .text
1521+ .globl CNAME(ffi_closure_SYSV)
1522+#ifdef __APPLE__
1523+ .align 2
1524+#endif
1525+ .cfi_startproc
1526+CNAME(ffi_closure_SYSV):
1527+ stp x29, x30, [sp, #-16]!
1528+ cfi_adjust_cfa_offset (16)
1529+ cfi_rel_offset (x29, 0)
1530+ cfi_rel_offset (x30, 8)
1531+
1532+ mov x29, sp
1533+ cfi_def_cfa_register (x29)
1534+
1535+ sub sp, sp, #ffi_closure_SYSV_FS
1536+
1537+ stp x21, x22, [x29, #-16]
1538+ cfi_rel_offset (x21, -16)
1539+ cfi_rel_offset (x22, -8)
1540+
1541+ /* Load x21 with &call_context. */
1542+ mov x21, sp
1543+ /* Preserve our struct trampoline_data * */
1544+ mov x22, x17
1545+
1546+ /* Save the rest of the argument passing registers. */
1547+ stp x0, x1, [x21, #0]
1548+ stp x2, x3, [x21, #16]
1549+ stp x4, x5, [x21, #32]
1550+ stp x6, x7, [x21, #48]
1551+ /* Don't forget we may have been given a result scratch pad address.
1552+ */
1553+ str x8, [x21, #64]
1554+
1555+ /* Figure out if we should touch the vector registers. */
1556+ ldr x0, [x22, #8]
1557+ tbz x0, #AARCH64_FFI_WITH_V_BIT, 1f
1558+
1559+ /* Save the argument passing vector registers. */
1560+ stp q0, q1, [x21, #8*32 + 0]
1561+ stp q2, q3, [x21, #8*32 + 32]
1562+ stp q4, q5, [x21, #8*32 + 64]
1563+ stp q6, q7, [x21, #8*32 + 96]
1564+1:
1565+ /* Load &ffi_closure.. */
1566+ ldr x0, [x22, #0]
1567+ mov x1, x21
1568+ /* Compute the location of the stack at the point that the
1569+ trampoline was called. */
1570+ add x2, x29, #16
1571+
1572+ bl CNAME(ffi_closure_SYSV_inner)
1573+
1574+ /* Figure out if we should touch the vector registers. */
1575+ ldr x0, [x22, #8]
1576+ tbz x0, #AARCH64_FFI_WITH_V_BIT, 1f
1577+
1578+ /* Load the result passing vector registers. */
1579+ ldp q0, q1, [x21, #8*32 + 0]
1580+ ldp q2, q3, [x21, #8*32 + 32]
1581+ ldp q4, q5, [x21, #8*32 + 64]
1582+ ldp q6, q7, [x21, #8*32 + 96]
1583+1:
1584+ /* Load the result passing core registers. */
1585+ ldp x0, x1, [x21, #0]
1586+ ldp x2, x3, [x21, #16]
1587+ ldp x4, x5, [x21, #32]
1588+ ldp x6, x7, [x21, #48]
1589+ /* Note nothing useful is returned in x8. */
1590+
1591+ /* We are done, unwind our frame. */
1592+ ldp x21, x22, [x29, #-16]
1593+ cfi_restore (x21)
1594+ cfi_restore (x22)
1595+
1596+ mov sp, x29
1597+ cfi_def_cfa_register (sp)
1598+
1599+ ldp x29, x30, [sp], #16
1600+ cfi_adjust_cfa_offset (-16)
1601+ cfi_restore (x29)
1602+ cfi_restore (x30)
1603+
1604+ ret
1605+ .cfi_endproc
1606+#ifdef __ELF__
1607+ .size CNAME(ffi_closure_SYSV), .-CNAME(ffi_closure_SYSV)
1608+#endif
diff --git a/recipes-devtools/python/python_2.7.3.bbappend b/recipes-devtools/python/python_2.7.3.bbappend
new file mode 100644
index 0000000..b276813
--- /dev/null
+++ b/recipes-devtools/python/python_2.7.3.bbappend
@@ -0,0 +1,6 @@
1FILESEXTRAPATHS_prepend := "${THISDIR}/${PN}:"
2
3SRC_URI += "\
4 file://ctypes-libffi-aarch64.patch \
5 file://libffi-aarch64.patch \
6"