Login
[x]
Log in using an account from:
Fedora Account System
Red Hat Associate
Red Hat Customer
Or login using a Red Hat Bugzilla account
Forgot Password
Login:
Hide Forgot
Create an Account
Red Hat Bugzilla – Attachment 832759 Details for
Bug 1038243
xulrunner does not build for aarch64
[?]
New
Simple Search
Advanced Search
My Links
Browse
Requests
Reports
Current State
Search
Tabular reports
Graphical reports
Duplicates
Other Reports
User Changes
Plotly Reports
Bug Status
Bug Severity
Non-Defaults
|
Product Dashboard
Help
Page Help!
Bug Writing Guidelines
What's new
Browser Support Policy
5.0.4.rh83 Release notes
FAQ
Guides index
User guide
Web Services
Contact
Legal
This site requires JavaScript to be enabled to function correctly, please enable it.
[patch]
fix for AArch64 architecture
xulrunner.diff (text/plain), 81.46 KB, created by
Marcin Juszkiewicz
on 2013-12-04 17:03:05 UTC
(
hide
)
Description:
fix for AArch64 architecture
Filename:
MIME Type:
Creator:
Marcin Juszkiewicz
Created:
2013-12-04 17:03:05 UTC
Size:
81.46 KB
patch
obsolete
>diff --git a/xulrunner-aarch64.patch b/xulrunner-aarch64.patch >new file mode 100644 >index 0000000..3d82158 >--- /dev/null >+++ b/xulrunner-aarch64.patch >@@ -0,0 +1,2412 @@ >+--- >+ configure.in | 4 >+ gfx/ycbcr/chromium_types.h | 4 >+ ipc/chromium/src/build/build_config.h | 3 >+ ipc/chromium/src/third_party/libevent/epoll_sub.c | 13 >+ js/src/assembler/wtf/Platform.h | 4 >+ js/src/configure.in | 5 >+ js/src/ctypes/libffi/Makefile.am | 4 >+ js/src/ctypes/libffi/Makefile.in | 34 >+ js/src/ctypes/libffi/configure | 18 >+ js/src/ctypes/libffi/configure.ac | 5 >+ js/src/ctypes/libffi/src/aarch64/ffi.c | 1076 ++++++++++ >+ js/src/ctypes/libffi/src/aarch64/ffitarget.h | 59 >+ js/src/ctypes/libffi/src/aarch64/sysv.S | 307 ++ >+ media/webrtc/trunk/webrtc/typedefs.h | 5 >+ memory/jemalloc/src/include/jemalloc/internal/jemalloc_internal.h.in | 3 >+ memory/mozjemalloc/jemalloc.c | 2 >+ mfbt/tests/TestPoisonArea.cpp | 3 >+ nsprpub/pr/include/pratom.h | 2 >+ toolkit/crashreporter/google-breakpad/src/common/linux/dump_symbols.cc | 1 >+ xpcom/reflect/xptcall/src/md/unix/Makefile.in | 14 >+ xpcom/reflect/xptcall/src/md/unix/xptcinvoke_aarch64.cpp | 138 + >+ xpcom/reflect/xptcall/src/md/unix/xptcinvoke_asm_aarch64.s | 67 >+ xpcom/reflect/xptcall/src/md/unix/xptcstubs_aarch64.cpp | 210 + >+ xpcom/reflect/xptcall/src/md/unix/xptcstubs_asm_aarch64.s | 39 >+ 24 files changed, 2013 insertions(+), 7 deletions(-) >+ >+Index: mozilla-release/configure.in >+=================================================================== >+--- mozilla-release.orig/configure.in >++++ mozilla-release/configure.in >+@@ -1127,6 +1127,10 @@ arm*) >+ mips|mipsel) >+ CPU_ARCH="mips" >+ ;; >++ >++aarch64*) >++ CPU_ARCH=aarch64 >++ ;; >+ esac >+ >+ if test -z "$OS_TARGET"; then >+Index: mozilla-release/gfx/ycbcr/chromium_types.h >+=================================================================== >+--- mozilla-release.orig/gfx/ycbcr/chromium_types.h >++++ mozilla-release/gfx/ycbcr/chromium_types.h >+@@ -58,6 +58,10 @@ typedef uint32_t uint32; >+ #define ARCH_CPU_S390_FAMILY 1 >+ #define ARCH_CPU_S390 1 >+ #define ARCH_CPU_32_BITS 1 >++#elif defined(__aarch64__) >++#define ARCH_CPU_AARCH64_FAMILY 1 >++#define ARCH_CPU_AARCH64 1 >++#define ARCH_CPU_64_BITS 1 >+ #else >+ #warning Please add support for your architecture in chromium_types.h >+ #endif >+Index: mozilla-release/ipc/chromium/src/build/build_config.h >+=================================================================== >+--- mozilla-release.orig/ipc/chromium/src/build/build_config.h >++++ mozilla-release/ipc/chromium/src/build/build_config.h >+@@ -102,6 +102,9 @@ >+ #elif defined(__alpha__) >+ #define ARCH_CPU_ALPHA 1 >+ #define ARCH_CPU_64_BITS 1 >++#elif defined(__aarch64__) >++#define ARCH_CPU_AARCH64 1 >++#define ARCH_CPU_64_BITS 1 >+ #else >+ #error Please add support for your architecture in build/build_config.h >+ #endif >+Index: mozilla-release/ipc/chromium/src/third_party/libevent/epoll_sub.c >+=================================================================== >+--- mozilla-release.orig/ipc/chromium/src/third_party/libevent/epoll_sub.c >++++ mozilla-release/ipc/chromium/src/third_party/libevent/epoll_sub.c >+@@ -31,11 +31,20 @@ >+ #include <sys/syscall.h> >+ #include <sys/epoll.h> >+ #include <unistd.h> >++#include <errno.h> >+ >+ int >+ epoll_create(int size) >+ { >++#if !defined(__NR_epoll_create) && defined(__NR_epoll_create1) >++ if (size <= 0) { >++ errno = EINVAL; >++ return -1; >++ } >++ return (syscall(__NR_epoll_create1, 0)); >++#else >+ return (syscall(__NR_epoll_create, size)); >++#endif >+ } >+ >+ int >+@@ -48,5 +57,9 @@ epoll_ctl(int epfd, int op, int fd, stru >+ int >+ epoll_wait(int epfd, struct epoll_event *events, int maxevents, int timeout) >+ { >++#if !defined(__NR_epoll_wait) && defined(__NR_epoll_pwait) >++ return (syscall(__NR_epoll_pwait, epfd, events, maxevents, timeout, NULL, 0)); >++#else >+ return (syscall(__NR_epoll_wait, epfd, events, maxevents, timeout)); >++#endif >+ } >+Index: mozilla-release/js/src/assembler/wtf/Platform.h >+=================================================================== >+--- mozilla-release.orig/js/src/assembler/wtf/Platform.h >++++ mozilla-release/js/src/assembler/wtf/Platform.h >+@@ -210,6 +210,10 @@ >+ #define WTF_CPU_BIG_ENDIAN 1 >+ #endif >+ >++#if defined(__aarch64__) >++#define WTF_CPU_AARCH64 1 >++#endif >++ >+ /* WTF_CPU_X86 - i386 / x86 32-bit */ >+ #if defined(__i386__) \ >+ || defined(i386) \ >+Index: mozilla-release/js/src/configure.in >+=================================================================== >+--- mozilla-release.orig/js/src/configure.in >++++ mozilla-release/js/src/configure.in >+@@ -943,6 +943,11 @@ arm*) >+ mips|mipsel) >+ CPU_ARCH="mips" >+ ;; >++ >++aarch64*) >++ CPU_ARCH=aarch64 >++ ;; >++ >+ esac >+ >+ if test -z "$OS_TARGET"; then >+Index: mozilla-release/js/src/ctypes/libffi/Makefile.am >+=================================================================== >+--- mozilla-release.orig/js/src/ctypes/libffi/Makefile.am >++++ mozilla-release/js/src/ctypes/libffi/Makefile.am >+@@ -5,6 +5,7 @@ AUTOMAKE_OPTIONS = foreign subdir-object >+ SUBDIRS = include testsuite man >+ >+ EXTRA_DIST = LICENSE ChangeLog.v1 ChangeLog.libgcj configure.host \ >++ src/aarch64/ffi.c src/aarch64/ffitarget.h src/aarch64/sysv.S \ >+ src/alpha/ffi.c src/alpha/osf.S src/alpha/ffitarget.h \ >+ src/arm/ffi.c src/arm/sysv.S src/arm/ffitarget.h \ >+ src/avr32/ffi.c src/avr32/sysv.S src/avr32/ffitarget.h \ >+@@ -140,6 +141,9 @@ endif >+ if POWERPC_FREEBSD >+ nodist_libffi_la_SOURCES += src/powerpc/ffi.c src/powerpc/sysv.S src/powerpc/ppc_closure.S >+ endif >++if AARCH64 >++nodist_libffi_la_SOURCES += src/aarch64/sysv.S src/aarch64/ffi.c >++endif >+ if ARM >+ nodist_libffi_la_SOURCES += src/arm/sysv.S src/arm/ffi.c >+ endif >+Index: mozilla-release/js/src/ctypes/libffi/Makefile.in >+=================================================================== >+--- mozilla-release.orig/js/src/ctypes/libffi/Makefile.in >++++ mozilla-release/js/src/ctypes/libffi/Makefile.in >+@@ -67,6 +67,7 @@ target_triplet = @target@ >+ @FFI_DEBUG_TRUE@am__append_27 = -DFFI_DEBUG >+ # Build opt. >+ @FFI_DEBUG_FALSE@am__append_28 = -O2 >++@AARCH64_TRUE@am__append_29 = src/aarch64/sysv.S src/aarch64/ffi.c >+ subdir = . >+ DIST_COMMON = README $(am__configure_deps) $(srcdir)/Makefile.am \ >+ $(srcdir)/Makefile.in $(srcdir)/doc/stamp-vti \ >+@@ -152,6 +153,7 @@ am_libffi_la_OBJECTS = src/debug.lo src/ >+ @SH64_TRUE@am__objects_24 = src/sh64/sysv.lo src/sh64/ffi.lo >+ @PA_LINUX_TRUE@am__objects_25 = src/pa/linux.lo src/pa/ffi.lo >+ @PA_HPUX_TRUE@am__objects_26 = src/pa/hpux32.lo src/pa/ffi.lo >++@AARCH64_TRUE@am__objects_50 = src/aarch64/sysv.lo src/aarch64/ffi.lo >+ nodist_libffi_la_OBJECTS = $(am__objects_1) $(am__objects_2) \ >+ $(am__objects_3) $(am__objects_4) $(am__objects_5) \ >+ $(am__objects_6) $(am__objects_7) $(am__objects_8) \ >+@@ -160,7 +162,8 @@ nodist_libffi_la_OBJECTS = $(am__objects >+ $(am__objects_15) $(am__objects_16) $(am__objects_17) \ >+ $(am__objects_18) $(am__objects_19) $(am__objects_20) \ >+ $(am__objects_21) $(am__objects_22) $(am__objects_23) \ >+- $(am__objects_24) $(am__objects_25) $(am__objects_26) >++ $(am__objects_24) $(am__objects_25) $(am__objects_26) \ >++ $(am__objects_50) >+ libffi_la_OBJECTS = $(am_libffi_la_OBJECTS) \ >+ $(nodist_libffi_la_OBJECTS) >+ libffi_la_LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) \ >+@@ -178,7 +181,7 @@ am__objects_28 = $(am__objects_1) $(am__ >+ $(am__objects_16) $(am__objects_17) $(am__objects_18) \ >+ $(am__objects_19) $(am__objects_20) $(am__objects_21) \ >+ $(am__objects_22) $(am__objects_23) $(am__objects_24) \ >+- $(am__objects_25) $(am__objects_26) >++ $(am__objects_25) $(am__objects_26) $(am__objects_50) >+ nodist_libffi_convenience_la_OBJECTS = $(am__objects_28) >+ libffi_convenience_la_OBJECTS = $(am_libffi_convenience_la_OBJECTS) \ >+ $(nodist_libffi_convenience_la_OBJECTS) >+@@ -398,6 +401,7 @@ top_srcdir = @top_srcdir@ >+ AUTOMAKE_OPTIONS = foreign subdir-objects >+ SUBDIRS = include testsuite man >+ EXTRA_DIST = LICENSE ChangeLog.v1 ChangeLog.libgcj configure.host \ >++ src/aarch64/ffi.c src/aarch64/ffitarget.h src/aarch64/sysv.S \ >+ src/alpha/ffi.c src/alpha/osf.S src/alpha/ffitarget.h \ >+ src/arm/ffi.c src/arm/sysv.S src/arm/ffitarget.h \ >+ src/avr32/ffi.c src/avr32/sysv.S src/avr32/ffitarget.h \ >+@@ -484,7 +488,8 @@ nodist_libffi_la_SOURCES = $(am__append_ >+ $(am__append_15) $(am__append_16) $(am__append_17) \ >+ $(am__append_18) $(am__append_19) $(am__append_20) \ >+ $(am__append_21) $(am__append_22) $(am__append_23) \ >+- $(am__append_24) $(am__append_25) $(am__append_26) >++ $(am__append_24) $(am__append_25) $(am__append_26) \ >++ $(am__append_29) >+ libffi_convenience_la_SOURCES = $(libffi_la_SOURCES) >+ nodist_libffi_convenience_la_SOURCES = $(nodist_libffi_la_SOURCES) >+ AM_CFLAGS = -Wall -g -fexceptions $(am__append_27) $(am__append_28) >+@@ -713,6 +718,16 @@ src/powerpc/darwin.lo: src/powerpc/$(am_ >+ src/powerpc/$(DEPDIR)/$(am__dirstamp) >+ src/powerpc/darwin_closure.lo: src/powerpc/$(am__dirstamp) \ >+ src/powerpc/$(DEPDIR)/$(am__dirstamp) >++src/aarch64/$(am__dirstamp): >++ @$(MKDIR_P) src/aarch64 >++ @: > src/aarch64/$(am__dirstamp) >++src/aarch64/$(DEPDIR)/$(am__dirstamp): >++ @$(MKDIR_P) src/aarch64/$(DEPDIR) >++ @: > src/aarch64/$(DEPDIR)/$(am__dirstamp) >++src/aarch64/sysv.lo: src/aarch64/$(am__dirstamp) \ >++ src/aarch64/$(DEPDIR)/$(am__dirstamp) >++src/aarch64/ffi.lo: src/aarch64/$(am__dirstamp) \ >++ src/aarch64/$(DEPDIR)/$(am__dirstamp) >+ src/arm/$(am__dirstamp): >+ @$(MKDIR_P) src/arm >+ @: > src/arm/$(am__dirstamp) >+@@ -816,6 +831,10 @@ mostlyclean-compile: >+ -rm -f src/alpha/ffi.lo >+ -rm -f src/alpha/osf.$(OBJEXT) >+ -rm -f src/alpha/osf.lo >++ -rm -f src/aarch64/ffi.$(OBJEXT) >++ -rm -f src/aarch64/ffi.lo >++ -rm -f src/aarch64/sysv.$(OBJEXT) >++ -rm -f src/aarch64/sysv.lo >+ -rm -f src/arm/ffi.$(OBJEXT) >+ -rm -f src/arm/ffi.lo >+ -rm -f src/arm/sysv.$(OBJEXT) >+@@ -938,6 +957,8 @@ distclean-compile: >+ @AMDEP_TRUE@@am__include@ @am__quote@src/$(DEPDIR)/prep_cif.Plo@am__quote@ >+ @AMDEP_TRUE@@am__include@ @am__quote@src/$(DEPDIR)/raw_api.Plo@am__quote@ >+ @AMDEP_TRUE@@am__include@ @am__quote@src/$(DEPDIR)/types.Plo@am__quote@ >++@AMDEP_TRUE@@am__include@ @am__quote@src/aarch64/$(DEPDIR)/ffi.Plo@am__quote@ >++@AMDEP_TRUE@@am__include@ @am__quote@src/aarch64/$(DEPDIR)/sysv.Plo@am__quote@ >+ @AMDEP_TRUE@@am__include@ @am__quote@src/alpha/$(DEPDIR)/ffi.Plo@am__quote@ >+ @AMDEP_TRUE@@am__include@ @am__quote@src/alpha/$(DEPDIR)/osf.Plo@am__quote@ >+ @AMDEP_TRUE@@am__include@ @am__quote@src/arm/$(DEPDIR)/ffi.Plo@am__quote@ >+@@ -1045,6 +1066,7 @@ mostlyclean-libtool: >+ clean-libtool: >+ -rm -rf .libs _libs >+ -rm -rf src/.libs src/_libs >++ -rm -rf src/aarch64/.libs src/aarch64/_libs >+ -rm -rf src/alpha/.libs src/alpha/_libs >+ -rm -rf src/arm/.libs src/arm/_libs >+ -rm -rf src/avr32/.libs src/avr32/_libs >+@@ -1596,6 +1618,8 @@ distclean-generic: >+ -rm -f doc/$(am__dirstamp) >+ -rm -f src/$(DEPDIR)/$(am__dirstamp) >+ -rm -f src/$(am__dirstamp) >++ -rm -f src/aarch64/$(DEPDIR)/$(am__dirstamp) >++ -rm -f src/aarch64/$(am__dirstamp) >+ -rm -f src/alpha/$(DEPDIR)/$(am__dirstamp) >+ -rm -f src/alpha/$(am__dirstamp) >+ -rm -f src/arm/$(DEPDIR)/$(am__dirstamp) >+@@ -1641,7 +1665,7 @@ clean-am: clean-aminfo clean-generic cle >+ >+ distclean: distclean-recursive >+ -rm -f $(am__CONFIG_DISTCLEAN_FILES) >+- -rm -rf src/$(DEPDIR) src/alpha/$(DEPDIR) src/arm/$(DEPDIR) src/avr32/$(DEPDIR) src/cris/$(DEPDIR) src/frv/$(DEPDIR) src/ia64/$(DEPDIR) src/m32r/$(DEPDIR) src/m68k/$(DEPDIR) src/mips/$(DEPDIR) src/moxie/$(DEPDIR) src/pa/$(DEPDIR) src/powerpc/$(DEPDIR) src/s390/$(DEPDIR) src/sh/$(DEPDIR) src/sh64/$(DEPDIR) src/sparc/$(DEPDIR) src/x86/$(DEPDIR) >++ -rm -rf src/$(DEPDIR) src/aarch64/$(DEPDIR) src/alpha/$(DEPDIR) src/arm/$(DEPDIR) src/avr32/$(DEPDIR) src/cris/$(DEPDIR) src/frv/$(DEPDIR) src/ia64/$(DEPDIR) src/m32r/$(DEPDIR) src/m68k/$(DEPDIR) src/mips/$(DEPDIR) src/moxie/$(DEPDIR) src/pa/$(DEPDIR) src/powerpc/$(DEPDIR) src/s390/$(DEPDIR) src/sh/$(DEPDIR) src/sh64/$(DEPDIR) src/sparc/$(DEPDIR) src/x86/$(DEPDIR) >+ -rm -f Makefile >+ distclean-am: clean-am distclean-compile distclean-generic \ >+ distclean-hdr distclean-libtool distclean-tags >+@@ -1761,7 +1785,7 @@ installcheck-am: >+ maintainer-clean: maintainer-clean-recursive >+ -rm -f $(am__CONFIG_DISTCLEAN_FILES) >+ -rm -rf $(top_srcdir)/autom4te.cache >+- -rm -rf src/$(DEPDIR) src/alpha/$(DEPDIR) src/arm/$(DEPDIR) src/avr32/$(DEPDIR) src/cris/$(DEPDIR) src/frv/$(DEPDIR) src/ia64/$(DEPDIR) src/m32r/$(DEPDIR) src/m68k/$(DEPDIR) src/mips/$(DEPDIR) src/moxie/$(DEPDIR) src/pa/$(DEPDIR) src/powerpc/$(DEPDIR) src/s390/$(DEPDIR) src/sh/$(DEPDIR) src/sh64/$(DEPDIR) src/sparc/$(DEPDIR) src/x86/$(DEPDIR) >++ -rm -rf src/$(DEPDIR) src/aarch64/$(DEPDIR) src/alpha/$(DEPDIR) src/arm/$(DEPDIR) src/avr32/$(DEPDIR) src/cris/$(DEPDIR) src/frv/$(DEPDIR) src/ia64/$(DEPDIR) src/m32r/$(DEPDIR) src/m68k/$(DEPDIR) src/mips/$(DEPDIR) src/moxie/$(DEPDIR) src/pa/$(DEPDIR) src/powerpc/$(DEPDIR) src/s390/$(DEPDIR) src/sh/$(DEPDIR) src/sh64/$(DEPDIR) src/sparc/$(DEPDIR) src/x86/$(DEPDIR) >+ -rm -f Makefile >+ maintainer-clean-am: distclean-am maintainer-clean-aminfo \ >+ maintainer-clean-generic maintainer-clean-vti >+Index: mozilla-release/js/src/ctypes/libffi/configure >+=================================================================== >+--- mozilla-release.orig/js/src/ctypes/libffi/configure >++++ mozilla-release/js/src/ctypes/libffi/configure >+@@ -777,6 +777,8 @@ AVR32_FALSE >+ AVR32_TRUE >+ ARM_FALSE >+ ARM_TRUE >++AARCH64_FALSE >++AARCH64_TRUE >+ POWERPC_FREEBSD_FALSE >+ POWERPC_FREEBSD_TRUE >+ POWERPC_DARWIN_FALSE >+@@ -11183,6 +11185,10 @@ fi >+ >+ TARGETDIR="unknown" >+ case "$host" in >++ aarch64*-*-*) >++ TARGET=AARCH64; TARGETDIR=aarch64 >++ ;; >++ >+ alpha*-*-*) >+ TARGET=ALPHA; TARGETDIR=alpha; >+ # Support 128-bit long double, changeable via command-line switch. >+@@ -11448,6 +11454,14 @@ else >+ POWERPC_FREEBSD_FALSE= >+ fi >+ >++ if test x$TARGET = xAARCH64; then >++ AARCH64_TRUE= >++ AARCH64_FALSE='#' >++else >++ AARCH64_TRUE='#' >++ AARCH64_FALSE= >++fi >++ >+ if test x$TARGET = xARM; then >+ ARM_TRUE= >+ ARM_FALSE='#' >+@@ -12805,6 +12819,10 @@ if test -z "${POWERPC_FREEBSD_TRUE}" && >+ as_fn_error "conditional \"POWERPC_FREEBSD\" was never defined. >+ Usually this means the macro was only invoked conditionally." "$LINENO" 5 >+ fi >++if test -z "${AARCH64_TRUE}" && test -z "${AARCH64_FALSE}"; then >++ as_fn_error $? "conditional \"AARCH64\" was never defined. >++Usually this means the macro was only invoked conditionally." "$LINENO" 5 >++fi >+ if test -z "${ARM_TRUE}" && test -z "${ARM_FALSE}"; then >+ as_fn_error "conditional \"ARM\" was never defined. >+ Usually this means the macro was only invoked conditionally." "$LINENO" 5 >+Index: mozilla-release/js/src/ctypes/libffi/configure.ac >+=================================================================== >+--- mozilla-release.orig/js/src/ctypes/libffi/configure.ac >++++ mozilla-release/js/src/ctypes/libffi/configure.ac >+@@ -45,6 +45,10 @@ AM_CONDITIONAL(TESTSUBDIR, test -d $srcd >+ >+ TARGETDIR="unknown" >+ case "$host" in >++ aarch64*-*-*) >++ TARGET=AARCH64; TARGETDIR=aarch64 >++ ;; >++ >+ alpha*-*-*) >+ TARGET=ALPHA; TARGETDIR=alpha; >+ # Support 128-bit long double, changeable via command-line switch. >+@@ -198,6 +202,7 @@ AM_CONDITIONAL(POWERPC, test x$TARGET = >+ AM_CONDITIONAL(POWERPC_AIX, test x$TARGET = xPOWERPC_AIX) >+ AM_CONDITIONAL(POWERPC_DARWIN, test x$TARGET = xPOWERPC_DARWIN) >+ AM_CONDITIONAL(POWERPC_FREEBSD, test x$TARGET = xPOWERPC_FREEBSD) >++AM_CONDITIONAL(AARCH64, test x$TARGET = xAARCH64) >+ AM_CONDITIONAL(ARM, test x$TARGET = xARM) >+ AM_CONDITIONAL(AVR32, test x$TARGET = xAVR32) >+ AM_CONDITIONAL(LIBFFI_CRIS, test x$TARGET = xLIBFFI_CRIS) >+Index: mozilla-release/js/src/ctypes/libffi/src/aarch64/ffi.c >+=================================================================== >+--- /dev/null >++++ mozilla-release/js/src/ctypes/libffi/src/aarch64/ffi.c >+@@ -0,0 +1,1076 @@ >++/* Copyright (c) 2009, 2010, 2011, 2012 ARM Ltd. >++ >++Permission is hereby granted, free of charge, to any person obtaining >++a copy of this software and associated documentation files (the >++``Software''), to deal in the Software without restriction, including >++without limitation the rights to use, copy, modify, merge, publish, >++distribute, sublicense, and/or sell copies of the Software, and to >++permit persons to whom the Software is furnished to do so, subject to >++the following conditions: >++ >++The above copyright notice and this permission notice shall be >++included in all copies or substantial portions of the Software. >++ >++THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, >++EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF >++MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. >++IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY >++CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, >++TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE >++SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ >++ >++#include <stdio.h> >++ >++#include <ffi.h> >++#include <ffi_common.h> >++ >++#include <stdlib.h> >++ >++/* Stack alignment requirement in bytes */ >++#define AARCH64_STACK_ALIGN 16 >++ >++#define N_X_ARG_REG 8 >++#define N_V_ARG_REG 8 >++ >++#define AARCH64_FFI_WITH_V (1 << AARCH64_FFI_WITH_V_BIT) >++ >++union _d >++{ >++ UINT64 d; >++ UINT32 s[2]; >++}; >++ >++struct call_context >++{ >++ UINT64 x [AARCH64_N_XREG]; >++ struct >++ { >++ union _d d[2]; >++ } v [AARCH64_N_VREG]; >++}; >++ >++static void * >++get_x_addr (struct call_context *context, unsigned n) >++{ >++ return &context->x[n]; >++} >++ >++static void * >++get_s_addr (struct call_context *context, unsigned n) >++{ >++#if defined __AARCH64EB__ >++ return &context->v[n].d[1].s[1]; >++#else >++ return &context->v[n].d[0].s[0]; >++#endif >++} >++ >++static void * >++get_d_addr (struct call_context *context, unsigned n) >++{ >++#if defined __AARCH64EB__ >++ return &context->v[n].d[1]; >++#else >++ return &context->v[n].d[0]; >++#endif >++} >++ >++static void * >++get_v_addr (struct call_context *context, unsigned n) >++{ >++ return &context->v[n]; >++} >++ >++/* Return the memory location at which a basic type would reside >++ were it to have been stored in register n. */ >++ >++static void * >++get_basic_type_addr (unsigned short type, struct call_context *context, >++ unsigned n) >++{ >++ switch (type) >++ { >++ case FFI_TYPE_FLOAT: >++ return get_s_addr (context, n); >++ case FFI_TYPE_DOUBLE: >++ return get_d_addr (context, n); >++ case FFI_TYPE_LONGDOUBLE: >++ return get_v_addr (context, n); >++ case FFI_TYPE_UINT8: >++ case FFI_TYPE_SINT8: >++ case FFI_TYPE_UINT16: >++ case FFI_TYPE_SINT16: >++ case FFI_TYPE_UINT32: >++ case FFI_TYPE_SINT32: >++ case FFI_TYPE_INT: >++ case FFI_TYPE_POINTER: >++ case FFI_TYPE_UINT64: >++ case FFI_TYPE_SINT64: >++ return get_x_addr (context, n); >++ default: >++ FFI_ASSERT (0); >++ return NULL; >++ } >++} >++ >++/* Return the alignment width for each of the basic types. */ >++ >++static size_t >++get_basic_type_alignment (unsigned short type) >++{ >++ switch (type) >++ { >++ case FFI_TYPE_FLOAT: >++ case FFI_TYPE_DOUBLE: >++ return sizeof (UINT64); >++ case FFI_TYPE_LONGDOUBLE: >++ return sizeof (long double); >++ case FFI_TYPE_UINT8: >++ case FFI_TYPE_SINT8: >++ case FFI_TYPE_UINT16: >++ case FFI_TYPE_SINT16: >++ case FFI_TYPE_UINT32: >++ case FFI_TYPE_INT: >++ case FFI_TYPE_SINT32: >++ case FFI_TYPE_POINTER: >++ case FFI_TYPE_UINT64: >++ case FFI_TYPE_SINT64: >++ return sizeof (UINT64); >++ >++ default: >++ FFI_ASSERT (0); >++ return 0; >++ } >++} >++ >++/* Return the size in bytes for each of the basic types. */ >++ >++static size_t >++get_basic_type_size (unsigned short type) >++{ >++ switch (type) >++ { >++ case FFI_TYPE_FLOAT: >++ return sizeof (UINT32); >++ case FFI_TYPE_DOUBLE: >++ return sizeof (UINT64); >++ case FFI_TYPE_LONGDOUBLE: >++ return sizeof (long double); >++ case FFI_TYPE_UINT8: >++ return sizeof (UINT8); >++ case FFI_TYPE_SINT8: >++ return sizeof (SINT8); >++ case FFI_TYPE_UINT16: >++ return sizeof (UINT16); >++ case FFI_TYPE_SINT16: >++ return sizeof (SINT16); >++ case FFI_TYPE_UINT32: >++ return sizeof (UINT32); >++ case FFI_TYPE_INT: >++ case FFI_TYPE_SINT32: >++ return sizeof (SINT32); >++ case FFI_TYPE_POINTER: >++ case FFI_TYPE_UINT64: >++ return sizeof (UINT64); >++ case FFI_TYPE_SINT64: >++ return sizeof (SINT64); >++ >++ default: >++ FFI_ASSERT (0); >++ return 0; >++ } >++} >++ >++extern void >++ffi_call_SYSV (unsigned (*)(struct call_context *context, unsigned char *, >++ extended_cif *), >++ struct call_context *context, >++ extended_cif *, >++ unsigned, >++ void (*fn)(void)); >++ >++extern void >++ffi_closure_SYSV (ffi_closure *); >++ >++/* Test for an FFI floating point representation. */ >++ >++static unsigned >++is_floating_type (unsigned short type) >++{ >++ return (type == FFI_TYPE_FLOAT || type == FFI_TYPE_DOUBLE >++ || type == FFI_TYPE_LONGDOUBLE); >++} >++ >++/* Test for a homogeneous structure. */ >++ >++static unsigned short >++get_homogeneous_type (ffi_type *ty) >++{ >++ if (ty->type == FFI_TYPE_STRUCT && ty->elements) >++ { >++ unsigned i; >++ unsigned short candidate_type >++ = get_homogeneous_type (ty->elements[0]); >++ for (i =1; ty->elements[i]; i++) >++ { >++ unsigned short iteration_type = 0; >++ /* If we have a nested struct, we must find its homogeneous type. >++ If that fits with our candidate type, we are still >++ homogeneous. */ >++ if (ty->elements[i]->type == FFI_TYPE_STRUCT >++ && ty->elements[i]->elements) >++ { >++ iteration_type = get_homogeneous_type (ty->elements[i]); >++ } >++ else >++ { >++ iteration_type = ty->elements[i]->type; >++ } >++ >++ /* If we are not homogeneous, return FFI_TYPE_STRUCT. */ >++ if (candidate_type != iteration_type) >++ return FFI_TYPE_STRUCT; >++ } >++ return candidate_type; >++ } >++ >++ /* Base case, we have no more levels of nesting, so we >++ are a basic type, and so, trivially homogeneous in that type. */ >++ return ty->type; >++} >++ >++/* Determine the number of elements within a STRUCT. >++ >++ Note, we must handle nested structs. >++ >++ If ty is not a STRUCT this function will return 0. */ >++ >++static unsigned >++element_count (ffi_type *ty) >++{ >++ if (ty->type == FFI_TYPE_STRUCT && ty->elements) >++ { >++ unsigned n; >++ unsigned elems = 0; >++ for (n = 0; ty->elements[n]; n++) >++ { >++ if (ty->elements[n]->type == FFI_TYPE_STRUCT >++ && ty->elements[n]->elements) >++ elems += element_count (ty->elements[n]); >++ else >++ elems++; >++ } >++ return elems; >++ } >++ return 0; >++} >++ >++/* Test for a homogeneous floating point aggregate. >++ >++ A homogeneous floating point aggregate is a homogeneous aggregate of >++ a half- single- or double- precision floating point type with one >++ to four elements. Note that this includes nested structs of the >++ basic type. */ >++ >++static int >++is_hfa (ffi_type *ty) >++{ >++ if (ty->type == FFI_TYPE_STRUCT >++ && ty->elements[0] >++ && is_floating_type (get_homogeneous_type (ty))) >++ { >++ unsigned n = element_count (ty); >++ return n >= 1 && n <= 4; >++ } >++ return 0; >++} >++ >++/* Test if an ffi_type is a candidate for passing in a register. >++ >++ This test does not check that sufficient registers of the >++ appropriate class are actually available, merely that IFF >++ sufficient registers are available then the argument will be passed >++ in register(s). >++ >++ Note that an ffi_type that is deemed to be a register candidate >++ will always be returned in registers. >++ >++ Returns 1 if a register candidate else 0. */ >++ >++static int >++is_register_candidate (ffi_type *ty) >++{ >++ switch (ty->type) >++ { >++ case FFI_TYPE_VOID: >++ case FFI_TYPE_FLOAT: >++ case FFI_TYPE_DOUBLE: >++ case FFI_TYPE_LONGDOUBLE: >++ case FFI_TYPE_UINT8: >++ case FFI_TYPE_UINT16: >++ case FFI_TYPE_UINT32: >++ case FFI_TYPE_UINT64: >++ case FFI_TYPE_POINTER: >++ case FFI_TYPE_SINT8: >++ case FFI_TYPE_SINT16: >++ case FFI_TYPE_SINT32: >++ case FFI_TYPE_INT: >++ case FFI_TYPE_SINT64: >++ return 1; >++ >++ case FFI_TYPE_STRUCT: >++ if (is_hfa (ty)) >++ { >++ return 1; >++ } >++ else if (ty->size > 16) >++ { >++ /* Too large. Will be replaced with a pointer to memory. The >++ pointer MAY be passed in a register, but the value will >++ not. This test specifically fails since the argument will >++ never be passed by value in registers. */ >++ return 0; >++ } >++ else >++ { >++ /* Might be passed in registers depending on the number of >++ registers required. */ >++ return (ty->size + 7) / 8 < N_X_ARG_REG; >++ } >++ break; >++ >++ default: >++ FFI_ASSERT (0); >++ break; >++ } >++ >++ return 0; >++} >++ >++/* Test if an ffi_type argument or result is a candidate for a vector >++ register. */ >++ >++static int >++is_v_register_candidate (ffi_type *ty) >++{ >++ return is_floating_type (ty->type) >++ || (ty->type == FFI_TYPE_STRUCT && is_hfa (ty)); >++} >++ >++/* Representation of the procedure call argument marshalling >++ state. >++ >++ The terse state variable names match the names used in the AARCH64 >++ PCS. */ >++ >++struct arg_state >++{ >++ unsigned ngrn; /* Next general-purpose register number. */ >++ unsigned nsrn; /* Next vector register number. */ >++ unsigned nsaa; /* Next stack offset. */ >++}; >++ >++/* Initialize a procedure call argument marshalling state. */ >++static void >++arg_init (struct arg_state *state, unsigned call_frame_size) >++{ >++ state->ngrn = 0; >++ state->nsrn = 0; >++ state->nsaa = 0; >++} >++ >++/* Return the number of available consecutive core argument >++ registers. */ >++ >++static unsigned >++available_x (struct arg_state *state) >++{ >++ return N_X_ARG_REG - state->ngrn; >++} >++ >++/* Return the number of available consecutive vector argument >++ registers. */ >++ >++static unsigned >++available_v (struct arg_state *state) >++{ >++ return N_V_ARG_REG - state->nsrn; >++} >++ >++static void * >++allocate_to_x (struct call_context *context, struct arg_state *state) >++{ >++ FFI_ASSERT (state->ngrn < N_X_ARG_REG) >++ return get_x_addr (context, (state->ngrn)++); >++} >++ >++static void * >++allocate_to_s (struct call_context *context, struct arg_state *state) >++{ >++ FFI_ASSERT (state->nsrn < N_V_ARG_REG) >++ return get_s_addr (context, (state->nsrn)++); >++} >++ >++static void * >++allocate_to_d (struct call_context *context, struct arg_state *state) >++{ >++ FFI_ASSERT (state->nsrn < N_V_ARG_REG) >++ return get_d_addr (context, (state->nsrn)++); >++} >++ >++static void * >++allocate_to_v (struct call_context *context, struct arg_state *state) >++{ >++ FFI_ASSERT (state->nsrn < N_V_ARG_REG) >++ return get_v_addr (context, (state->nsrn)++); >++} >++ >++/* Allocate an aligned slot on the stack and return a pointer to it. */ >++static void * >++allocate_to_stack (struct arg_state *state, void *stack, unsigned alignment, >++ unsigned size) >++{ >++ void *allocation; >++ >++ /* Round up the NSAA to the larger of 8 or the natural >++ alignment of the argument's type. */ >++ state->nsaa = ALIGN (state->nsaa, alignment); >++ state->nsaa = ALIGN (state->nsaa, alignment); >++ state->nsaa = ALIGN (state->nsaa, 8); >++ >++ allocation = stack + state->nsaa; >++ >++ state->nsaa += size; >++ return allocation; >++} >++ >++static void >++copy_basic_type (void *dest, void *source, unsigned short type) >++{ >++ /* This is neccessary to ensure that basic types are copied >++ sign extended to 64-bits as libffi expects. */ >++ switch (type) >++ { >++ case FFI_TYPE_FLOAT: >++ *(float *) dest = *(float *) source; >++ break; >++ case FFI_TYPE_DOUBLE: >++ *(double *) dest = *(double *) source; >++ break; >++ case FFI_TYPE_LONGDOUBLE: >++ *(long double *) dest = *(long double *) source; >++ break; >++ case FFI_TYPE_UINT8: >++ *(ffi_arg *) dest = *(UINT8 *) source; >++ break; >++ case FFI_TYPE_SINT8: >++ *(ffi_sarg *) dest = *(SINT8 *) source; >++ break; >++ case FFI_TYPE_UINT16: >++ *(ffi_arg *) dest = *(UINT16 *) source; >++ break; >++ case FFI_TYPE_SINT16: >++ *(ffi_sarg *) dest = *(SINT16 *) source; >++ break; >++ case FFI_TYPE_UINT32: >++ *(ffi_arg *) dest = *(UINT32 *) source; >++ break; >++ case FFI_TYPE_INT: >++ case FFI_TYPE_SINT32: >++ *(ffi_sarg *) dest = *(SINT32 *) source; >++ break; >++ case FFI_TYPE_POINTER: >++ case FFI_TYPE_UINT64: >++ *(ffi_arg *) dest = *(UINT64 *) source; >++ break; >++ case FFI_TYPE_SINT64: >++ *(ffi_sarg *) dest = *(SINT64 *) source; >++ break; >++ >++ default: >++ FFI_ASSERT (0); >++ } >++} >++ >++static void >++copy_hfa_to_reg_or_stack (void *memory, >++ ffi_type *ty, >++ struct call_context *context, >++ unsigned char *stack, >++ struct arg_state *state) >++{ >++ unsigned elems = element_count (ty); >++ if (available_v (state) < elems) >++ { >++ /* There are insufficient V registers. Further V register allocations >++ are prevented, the NSAA is adjusted (by allocate_to_stack ()) >++ and the argument is copied to memory at the adjusted NSAA. */ >++ state->nsrn = N_V_ARG_REG; >++ memcpy (allocate_to_stack (state, stack, ty->alignment, ty->size), >++ memory, >++ ty->size); >++ } >++ else >++ { >++ int i; >++ unsigned short type = get_homogeneous_type (ty); >++ unsigned elems = element_count (ty); >++ for (i = 0; i < elems; i++) >++ { >++ void *reg = allocate_to_v (context, state); >++ copy_basic_type (reg, memory, type); >++ memory += get_basic_type_size (type); >++ } >++ } >++} >++ >++/* Either allocate an appropriate register for the argument type, or if >++ none are available, allocate a stack slot and return a pointer >++ to the allocated space. */ >++ >++static void * >++allocate_to_register_or_stack (struct call_context *context, >++ unsigned char *stack, >++ struct arg_state *state, >++ unsigned short type) >++{ >++ size_t alignment = get_basic_type_alignment (type); >++ size_t size = alignment; >++ switch (type) >++ { >++ case FFI_TYPE_FLOAT: >++ /* This is the only case for which the allocated stack size >++ should not match the alignment of the type. */ >++ size = sizeof (UINT32); >++ /* Fall through. */ >++ case FFI_TYPE_DOUBLE: >++ if (state->nsrn < N_V_ARG_REG) >++ return allocate_to_d (context, state); >++ state->nsrn = N_V_ARG_REG; >++ break; >++ case FFI_TYPE_LONGDOUBLE: >++ if (state->nsrn < N_V_ARG_REG) >++ return allocate_to_v (context, state); >++ state->nsrn = N_V_ARG_REG; >++ break; >++ case FFI_TYPE_UINT8: >++ case FFI_TYPE_SINT8: >++ case FFI_TYPE_UINT16: >++ case FFI_TYPE_SINT16: >++ case FFI_TYPE_UINT32: >++ case FFI_TYPE_SINT32: >++ case FFI_TYPE_INT: >++ case FFI_TYPE_POINTER: >++ case FFI_TYPE_UINT64: >++ case FFI_TYPE_SINT64: >++ if (state->ngrn < N_X_ARG_REG) >++ return allocate_to_x (context, state); >++ state->ngrn = N_X_ARG_REG; >++ break; >++ default: >++ FFI_ASSERT (0); >++ } >++ >++ return allocate_to_stack (state, stack, alignment, size); >++} >++ >++/* Copy a value to an appropriate register, or if none are >++ available, to the stack. */ >++ >++static void >++copy_to_register_or_stack (struct call_context *context, >++ unsigned char *stack, >++ struct arg_state *state, >++ void *value, >++ unsigned short type) >++{ >++ copy_basic_type ( >++ allocate_to_register_or_stack (context, stack, state, type), >++ value, >++ type); >++} >++ >++/* Marshall the arguments from FFI representation to procedure call >++ context and stack. */ >++ >++static unsigned >++aarch64_prep_args (struct call_context *context, unsigned char *stack, >++ extended_cif *ecif) >++{ >++ int i; >++ struct arg_state state; >++ >++ arg_init (&state, ALIGN(ecif->cif->bytes, 16)); >++ >++ for (i = 0; i < ecif->cif->nargs; i++) >++ { >++ ffi_type *ty = ecif->cif->arg_types[i]; >++ switch (ty->type) >++ { >++ case FFI_TYPE_VOID: >++ FFI_ASSERT (0); >++ break; >++ >++ /* If the argument is a basic type the argument is allocated to an >++ appropriate register, or if none are available, to the stack. */ >++ case FFI_TYPE_FLOAT: >++ case FFI_TYPE_DOUBLE: >++ case FFI_TYPE_LONGDOUBLE: >++ case FFI_TYPE_UINT8: >++ case FFI_TYPE_SINT8: >++ case FFI_TYPE_UINT16: >++ case FFI_TYPE_SINT16: >++ case FFI_TYPE_UINT32: >++ case FFI_TYPE_INT: >++ case FFI_TYPE_SINT32: >++ case FFI_TYPE_POINTER: >++ case FFI_TYPE_UINT64: >++ case FFI_TYPE_SINT64: >++ copy_to_register_or_stack (context, stack, &state, >++ ecif->avalue[i], ty->type); >++ break; >++ >++ case FFI_TYPE_STRUCT: >++ if (is_hfa (ty)) >++ { >++ copy_hfa_to_reg_or_stack (ecif->avalue[i], ty, context, >++ stack, &state); >++ } >++ else if (ty->size > 16) >++ { >++ /* If the argument is a composite type that is larger than 16 >++ bytes, then the argument has been copied to memory, and >++ the argument is replaced by a pointer to the copy. */ >++ >++ copy_to_register_or_stack (context, stack, &state, >++ &(ecif->avalue[i]), FFI_TYPE_POINTER); >++ } >++ else if (available_x (&state) >= (ty->size + 7) / 8) >++ { >++ /* If the argument is a composite type and the size in >++ double-words is not more than the number of available >++ X registers, then the argument is copied into consecutive >++ X registers. */ >++ int j; >++ for (j = 0; j < (ty->size + 7) / 8; j++) >++ { >++ memcpy (allocate_to_x (context, &state), >++ &(((UINT64 *) ecif->avalue[i])[j]), >++ sizeof (UINT64)); >++ } >++ } >++ else >++ { >++ /* Otherwise, there are insufficient X registers. Further X >++ register allocations are prevented, the NSAA is adjusted >++ (by allocate_to_stack ()) and the argument is copied to >++ memory at the adjusted NSAA. */ >++ state.ngrn = N_X_ARG_REG; >++ >++ memcpy (allocate_to_stack (&state, stack, ty->alignment, >++ ty->size), ecif->avalue + i, ty->size); >++ } >++ break; >++ >++ default: >++ FFI_ASSERT (0); >++ break; >++ } >++ } >++ >++ return ecif->cif->aarch64_flags; >++} >++ >++ffi_status >++ffi_prep_cif_machdep (ffi_cif *cif) >++{ >++ /* Round the stack up to a multiple of the stack alignment requirement. */ >++ cif->bytes = >++ (cif->bytes + (AARCH64_STACK_ALIGN - 1)) & ~ (AARCH64_STACK_ALIGN - 1); >++ >++ /* Initialize our flags. We are interested if this CIF will touch a >++ vector register, if so we will enable context save and load to >++ those registers, otherwise not. This is intended to be friendly >++ to lazy float context switching in the kernel. */ >++ cif->aarch64_flags = 0; >++ >++ if (is_v_register_candidate (cif->rtype)) >++ { >++ cif->aarch64_flags |= AARCH64_FFI_WITH_V; >++ } >++ else >++ { >++ int i; >++ for (i = 0; i < cif->nargs; i++) >++ if (is_v_register_candidate (cif->arg_types[i])) >++ { >++ cif->aarch64_flags |= AARCH64_FFI_WITH_V; >++ break; >++ } >++ } >++ >++ return FFI_OK; >++} >++ >++/* Call a function with the provided arguments and capture the return >++ value. */ >++void >++ffi_call (ffi_cif *cif, void (*fn)(void), void *rvalue, void **avalue) >++{ >++ extended_cif ecif; >++ >++ ecif.cif = cif; >++ ecif.avalue = avalue; >++ ecif.rvalue = rvalue; >++ >++ switch (cif->abi) >++ { >++ case FFI_SYSV: >++ { >++ struct call_context context; >++ unsigned stack_bytes; >++ >++ /* Figure out the total amount of stack space we need, the >++ above call frame space needs to be 16 bytes aligned to >++ ensure correct alignment of the first object inserted in >++ that space hence the ALIGN applied to cif->bytes.*/ >++ stack_bytes = ALIGN(cif->bytes, 16); >++ >++ memset (&context, 0, sizeof (context)); >++ if (is_register_candidate (cif->rtype)) >++ { >++ ffi_call_SYSV (aarch64_prep_args, &context, &ecif, stack_bytes, fn); >++ switch (cif->rtype->type) >++ { >++ case FFI_TYPE_VOID: >++ case FFI_TYPE_FLOAT: >++ case FFI_TYPE_DOUBLE: >++ case FFI_TYPE_LONGDOUBLE: >++ case FFI_TYPE_UINT8: >++ case FFI_TYPE_SINT8: >++ case FFI_TYPE_UINT16: >++ case FFI_TYPE_SINT16: >++ case FFI_TYPE_UINT32: >++ case FFI_TYPE_SINT32: >++ case FFI_TYPE_POINTER: >++ case FFI_TYPE_UINT64: >++ case FFI_TYPE_INT: >++ case FFI_TYPE_SINT64: >++ { >++ void *addr = get_basic_type_addr (cif->rtype->type, >++ &context, 0); >++ copy_basic_type (rvalue, addr, cif->rtype->type); >++ break; >++ } >++ >++ case FFI_TYPE_STRUCT: >++ if (is_hfa (cif->rtype)) >++ { >++ int j; >++ unsigned short type = get_homogeneous_type (cif->rtype); >++ unsigned elems = element_count (cif->rtype); >++ for (j = 0; j < elems; j++) >++ { >++ void *reg = get_basic_type_addr (type, &context, j); >++ copy_basic_type (rvalue, reg, type); >++ rvalue += get_basic_type_size (type); >++ } >++ } >++ else if ((cif->rtype->size + 7) / 8 < N_X_ARG_REG) >++ { >++ unsigned size = ALIGN (cif->rtype->size, sizeof (UINT64)); >++ memcpy (rvalue, get_x_addr (&context, 0), size); >++ } >++ else >++ { >++ FFI_ASSERT (0); >++ } >++ break; >++ >++ default: >++ FFI_ASSERT (0); >++ break; >++ } >++ } >++ else >++ { >++ memcpy (get_x_addr (&context, 8), &rvalue, sizeof (UINT64)); >++ ffi_call_SYSV (aarch64_prep_args, &context, &ecif, >++ stack_bytes, fn); >++ } >++ break; >++ } >++ >++ default: >++ FFI_ASSERT (0); >++ break; >++ } >++} >++ >++static unsigned char trampoline [] = >++{ 0x70, 0x00, 0x00, 0x58, /* ldr x16, 1f */ >++ 0x91, 0x00, 0x00, 0x10, /* adr x17, 2f */ >++ 0x00, 0x02, 0x1f, 0xd6 /* br x16 */ >++}; >++ >++/* Build a trampoline. */ >++ >++#define FFI_INIT_TRAMPOLINE(TRAMP,FUN,CTX,FLAGS) \ >++ ({unsigned char *__tramp = (unsigned char*)(TRAMP); \ >++ UINT64 __fun = (UINT64)(FUN); \ >++ UINT64 __ctx = (UINT64)(CTX); \ >++ UINT64 __flags = (UINT64)(FLAGS); \ >++ memcpy (__tramp, trampoline, sizeof (trampoline)); \ >++ memcpy (__tramp + 12, &__fun, sizeof (__fun)); \ >++ memcpy (__tramp + 20, &__ctx, sizeof (__ctx)); \ >++ memcpy (__tramp + 28, &__flags, sizeof (__flags)); \ >++ __clear_cache(__tramp, __tramp + FFI_TRAMPOLINE_SIZE); \ >++ }) >++ >++ffi_status >++ffi_prep_closure_loc (ffi_closure* closure, >++ ffi_cif* cif, >++ void (*fun)(ffi_cif*,void*,void**,void*), >++ void *user_data, >++ void *codeloc) >++{ >++ if (cif->abi != FFI_SYSV) >++ return FFI_BAD_ABI; >++ >++ FFI_INIT_TRAMPOLINE (&closure->tramp[0], &ffi_closure_SYSV, codeloc, >++ cif->aarch64_flags); >++ >++ closure->cif = cif; >++ closure->user_data = user_data; >++ closure->fun = fun; >++ >++ return FFI_OK; >++} >++ >++/* Primary handler to setup and invoke a function within a closure. >++ >++ A closure when invoked enters via the assembler wrapper >++ ffi_closure_SYSV(). The wrapper allocates a call context on the >++ stack, saves the interesting registers (from the perspective of >++ the calling convention) into the context then passes control to >++ ffi_closure_SYSV_inner() passing the saved context and a pointer to >++ the stack at the point ffi_closure_SYSV() was invoked. >++ >++ On the return path the assembler wrapper will reload call context >++ regsiters. >++ >++ ffi_closure_SYSV_inner() marshalls the call context into ffi value >++ desriptors, invokes the wrapped function, then marshalls the return >++ value back into the call context. */ >++ >++void >++ffi_closure_SYSV_inner (ffi_closure *closure, struct call_context *context, >++ void *stack) >++{ >++ ffi_cif *cif = closure->cif; >++ void **avalue = (void**) alloca (cif->nargs * sizeof (void*)); >++ void *rvalue = NULL; >++ int i; >++ struct arg_state state; >++ >++ arg_init (&state, ALIGN(cif->bytes, 16)); >++ >++ for (i = 0; i < cif->nargs; i++) >++ { >++ ffi_type *ty = cif->arg_types[i]; >++ >++ switch (ty->type) >++ { >++ case FFI_TYPE_VOID: >++ FFI_ASSERT (0); >++ break; >++ >++ case FFI_TYPE_UINT8: >++ case FFI_TYPE_SINT8: >++ case FFI_TYPE_UINT16: >++ case FFI_TYPE_SINT16: >++ case FFI_TYPE_UINT32: >++ case FFI_TYPE_SINT32: >++ case FFI_TYPE_INT: >++ case FFI_TYPE_POINTER: >++ case FFI_TYPE_UINT64: >++ case FFI_TYPE_SINT64: >++ case FFI_TYPE_FLOAT: >++ case FFI_TYPE_DOUBLE: >++ case FFI_TYPE_LONGDOUBLE: >++ avalue[i] = allocate_to_register_or_stack (context, stack, >++ &state, ty->type); >++ break; >++ >++ case FFI_TYPE_STRUCT: >++ if (is_hfa (ty)) >++ { >++ unsigned n = element_count (ty); >++ if (available_v (&state) < n) >++ { >++ state.nsrn = N_V_ARG_REG; >++ avalue[i] = allocate_to_stack (&state, stack, ty->alignment, >++ ty->size); >++ } >++ else >++ { >++ switch (get_homogeneous_type (ty)) >++ { >++ case FFI_TYPE_FLOAT: >++ { >++ /* Eeek! We need a pointer to the structure, >++ however the homogeneous float elements are >++ being passed in individual S registers, >++ therefore the structure is not represented as >++ a contiguous sequence of bytes in our saved >++ register context. We need to fake up a copy >++ of the structure layed out in memory >++ correctly. The fake can be tossed once the >++ closure function has returned hence alloca() >++ is sufficient. */ >++ int j; >++ UINT32 *p = avalue[i] = alloca (ty->size); >++ for (j = 0; j < element_count (ty); j++) >++ memcpy (&p[j], >++ allocate_to_s (context, &state), >++ sizeof (*p)); >++ break; >++ } >++ >++ case FFI_TYPE_DOUBLE: >++ { >++ /* Eeek! We need a pointer to the structure, >++ however the homogeneous float elements are >++ being passed in individual S registers, >++ therefore the structure is not represented as >++ a contiguous sequence of bytes in our saved >++ register context. We need to fake up a copy >++ of the structure layed out in memory >++ correctly. The fake can be tossed once the >++ closure function has returned hence alloca() >++ is sufficient. */ >++ int j; >++ UINT64 *p = avalue[i] = alloca (ty->size); >++ for (j = 0; j < element_count (ty); j++) >++ memcpy (&p[j], >++ allocate_to_d (context, &state), >++ sizeof (*p)); >++ break; >++ } >++ >++ case FFI_TYPE_LONGDOUBLE: >++ memcpy (&avalue[i], >++ allocate_to_v (context, &state), >++ sizeof (*avalue)); >++ break; >++ >++ default: >++ FFI_ASSERT (0); >++ break; >++ } >++ } >++ } >++ else if (ty->size > 16) >++ { >++ /* Replace Composite type of size greater than 16 with a >++ pointer. */ >++ memcpy (&avalue[i], >++ allocate_to_register_or_stack (context, stack, >++ &state, FFI_TYPE_POINTER), >++ sizeof (avalue[i])); >++ } >++ else if (available_x (&state) >= (ty->size + 7) / 8) >++ { >++ avalue[i] = get_x_addr (context, state.ngrn); >++ state.ngrn += (ty->size + 7) / 8; >++ } >++ else >++ { >++ state.ngrn = N_X_ARG_REG; >++ >++ avalue[i] = allocate_to_stack (&state, stack, ty->alignment, >++ ty->size); >++ } >++ break; >++ >++ default: >++ FFI_ASSERT (0); >++ break; >++ } >++ } >++ >++ /* Figure out where the return value will be passed, either in >++ registers or in a memory block allocated by the caller and passed >++ in x8. */ >++ >++ if (is_register_candidate (cif->rtype)) >++ { >++ /* Register candidates are *always* returned in registers. */ >++ >++ /* Allocate a scratchpad for the return value, we will let the >++ callee scrible the result into the scratch pad then move the >++ contents into the appropriate return value location for the >++ call convention. */ >++ rvalue = alloca (cif->rtype->size); >++ (closure->fun) (cif, rvalue, avalue, closure->user_data); >++ >++ /* Copy the return value into the call context so that it is returned >++ as expected to our caller. */ >++ switch (cif->rtype->type) >++ { >++ case FFI_TYPE_VOID: >++ break; >++ >++ case FFI_TYPE_UINT8: >++ case FFI_TYPE_UINT16: >++ case FFI_TYPE_UINT32: >++ case FFI_TYPE_POINTER: >++ case FFI_TYPE_UINT64: >++ case FFI_TYPE_SINT8: >++ case FFI_TYPE_SINT16: >++ case FFI_TYPE_INT: >++ case FFI_TYPE_SINT32: >++ case FFI_TYPE_SINT64: >++ case FFI_TYPE_FLOAT: >++ case FFI_TYPE_DOUBLE: >++ case FFI_TYPE_LONGDOUBLE: >++ { >++ void *addr = get_basic_type_addr (cif->rtype->type, context, 0); >++ copy_basic_type (addr, rvalue, cif->rtype->type); >++ break; >++ } >++ case FFI_TYPE_STRUCT: >++ if (is_hfa (cif->rtype)) >++ { >++ int i; >++ unsigned short type = get_homogeneous_type (cif->rtype); >++ unsigned elems = element_count (cif->rtype); >++ for (i = 0; i < elems; i++) >++ { >++ void *reg = get_basic_type_addr (type, context, i); >++ copy_basic_type (reg, rvalue, type); >++ rvalue += get_basic_type_size (type); >++ } >++ } >++ else if ((cif->rtype->size + 7) / 8 < N_X_ARG_REG) >++ { >++ unsigned size = ALIGN (cif->rtype->size, sizeof (UINT64)) ; >++ memcpy (get_x_addr (context, 0), rvalue, size); >++ } >++ else >++ { >++ FFI_ASSERT (0); >++ } >++ break; >++ default: >++ FFI_ASSERT (0); >++ break; >++ } >++ } >++ else >++ { >++ memcpy (&rvalue, get_x_addr (context, 8), sizeof (UINT64)); >++ (closure->fun) (cif, rvalue, avalue, closure->user_data); >++ } >++} >++ >+Index: mozilla-release/js/src/ctypes/libffi/src/aarch64/ffitarget.h >+=================================================================== >+--- /dev/null >++++ mozilla-release/js/src/ctypes/libffi/src/aarch64/ffitarget.h >+@@ -0,0 +1,59 @@ >++/* Copyright (c) 2009, 2010, 2011, 2012 ARM Ltd. >++ >++Permission is hereby granted, free of charge, to any person obtaining >++a copy of this software and associated documentation files (the >++``Software''), to deal in the Software without restriction, including >++without limitation the rights to use, copy, modify, merge, publish, >++distribute, sublicense, and/or sell copies of the Software, and to >++permit persons to whom the Software is furnished to do so, subject to >++the following conditions: >++ >++The above copyright notice and this permission notice shall be >++included in all copies or substantial portions of the Software. >++ >++THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, >++EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF >++MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. >++IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY >++CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, >++TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE >++SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ >++ >++#ifndef LIBFFI_TARGET_H >++#define LIBFFI_TARGET_H >++ >++#ifndef LIBFFI_H >++#error "Please do not include ffitarget.h directly into your source. Use ffi.h instead." >++#endif >++ >++#ifndef LIBFFI_ASM >++typedef unsigned long ffi_arg; >++typedef signed long ffi_sarg; >++ >++typedef enum ffi_abi >++ { >++ FFI_FIRST_ABI = 0, >++ FFI_SYSV, >++ FFI_LAST_ABI, >++ FFI_DEFAULT_ABI = FFI_SYSV >++ } ffi_abi; >++#endif >++ >++/* ---- Definitions for closures ----------------------------------------- */ >++ >++#define FFI_CLOSURES 1 >++#define FFI_TRAMPOLINE_SIZE 36 >++#define FFI_NATIVE_RAW_API 0 >++ >++/* ---- Internal ---- */ >++ >++ >++#define FFI_EXTRA_CIF_FIELDS unsigned aarch64_flags >++ >++#define AARCH64_FFI_WITH_V_BIT 0 >++ >++#define AARCH64_N_XREG 32 >++#define AARCH64_N_VREG 32 >++#define AARCH64_CALL_CONTEXT_SIZE (AARCH64_N_XREG * 8 + AARCH64_N_VREG * 16) >++ >++#endif >+Index: mozilla-release/js/src/ctypes/libffi/src/aarch64/sysv.S >+=================================================================== >+--- /dev/null >++++ mozilla-release/js/src/ctypes/libffi/src/aarch64/sysv.S >+@@ -0,0 +1,307 @@ >++/* Copyright (c) 2009, 2010, 2011, 2012 ARM Ltd. >++ >++Permission is hereby granted, free of charge, to any person obtaining >++a copy of this software and associated documentation files (the >++``Software''), to deal in the Software without restriction, including >++without limitation the rights to use, copy, modify, merge, publish, >++distribute, sublicense, and/or sell copies of the Software, and to >++permit persons to whom the Software is furnished to do so, subject to >++the following conditions: >++ >++The above copyright notice and this permission notice shall be >++included in all copies or substantial portions of the Software. >++ >++THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, >++EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF >++MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. >++IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY >++CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, >++TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE >++SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ >++ >++#define LIBFFI_ASM >++#include <fficonfig.h> >++#include <ffi.h> >++ >++#define cfi_adjust_cfa_offset(off) .cfi_adjust_cfa_offset off >++#define cfi_rel_offset(reg, off) .cfi_rel_offset reg, off >++#define cfi_restore(reg) .cfi_restore reg >++#define cfi_def_cfa_register(reg) .cfi_def_cfa_register reg >++ >++ .text >++ .globl ffi_call_SYSV >++ .type ffi_call_SYSV, #function >++ >++/* ffi_call_SYSV() >++ >++ Create a stack frame, setup an argument context, call the callee >++ and extract the result. >++ >++ The maximum required argument stack size is provided, >++ ffi_call_SYSV() allocates that stack space then calls the >++ prepare_fn to populate register context and stack. The >++ argument passing registers are loaded from the register >++ context and the callee called, on return the register passing >++ register are saved back to the context. Our caller will >++ extract the return value from the final state of the saved >++ register context. >++ >++ Prototype: >++ >++ extern unsigned >++ ffi_call_SYSV (void (*)(struct call_context *context, unsigned char *, >++ extended_cif *), >++ struct call_context *context, >++ extended_cif *, >++ unsigned required_stack_size, >++ void (*fn)(void)); >++ >++ Therefore on entry we have: >++ >++ x0 prepare_fn >++ x1 &context >++ x2 &ecif >++ x3 bytes >++ x4 fn >++ >++ This function uses the following stack frame layout: >++ >++ == >++ saved x30(lr) >++ x29(fp)-> saved x29(fp) >++ saved x24 >++ saved x23 >++ saved x22 >++ sp' -> saved x21 >++ ... >++ sp -> (constructed callee stack arguments) >++ == >++ >++ Voila! */ >++ >++#define ffi_call_SYSV_FS (8 * 4) >++ >++ .cfi_startproc >++ffi_call_SYSV: >++ stp x29, x30, [sp, #-16]! >++ cfi_adjust_cfa_offset (16) >++ cfi_rel_offset (x29, 0) >++ cfi_rel_offset (x30, 8) >++ >++ mov x29, sp >++ cfi_def_cfa_register (x29) >++ sub sp, sp, #ffi_call_SYSV_FS >++ >++ stp x21, x22, [sp, 0] >++ cfi_rel_offset (x21, 0 - ffi_call_SYSV_FS) >++ cfi_rel_offset (x22, 8 - ffi_call_SYSV_FS) >++ >++ stp x23, x24, [sp, 16] >++ cfi_rel_offset (x23, 16 - ffi_call_SYSV_FS) >++ cfi_rel_offset (x24, 24 - ffi_call_SYSV_FS) >++ >++ mov x21, x1 >++ mov x22, x2 >++ mov x24, x4 >++ >++ /* Allocate the stack space for the actual arguments, many >++ arguments will be passed in registers, but we assume >++ worst case and allocate sufficient stack for ALL of >++ the arguments. */ >++ sub sp, sp, x3 >++ >++ /* unsigned (*prepare_fn) (struct call_context *context, >++ unsigned char *stack, extended_cif *ecif); >++ */ >++ mov x23, x0 >++ mov x0, x1 >++ mov x1, sp >++ /* x2 already in place */ >++ blr x23 >++ >++ /* Preserve the flags returned. */ >++ mov x23, x0 >++ >++ /* Figure out if we should touch the vector registers. */ >++ tbz x23, #AARCH64_FFI_WITH_V_BIT, 1f >++ >++ /* Load the vector argument passing registers. */ >++ ldp q0, q1, [x21, #8*32 + 0] >++ ldp q2, q3, [x21, #8*32 + 32] >++ ldp q4, q5, [x21, #8*32 + 64] >++ ldp q6, q7, [x21, #8*32 + 96] >++1: >++ /* Load the core argument passing registers. */ >++ ldp x0, x1, [x21, #0] >++ ldp x2, x3, [x21, #16] >++ ldp x4, x5, [x21, #32] >++ ldp x6, x7, [x21, #48] >++ >++ /* Don't forget x8 which may be holding the address of a return buffer. >++ */ >++ ldr x8, [x21, #8*8] >++ >++ blr x24 >++ >++ /* Save the core argument passing registers. */ >++ stp x0, x1, [x21, #0] >++ stp x2, x3, [x21, #16] >++ stp x4, x5, [x21, #32] >++ stp x6, x7, [x21, #48] >++ >++ /* Note nothing useful ever comes back in x8! */ >++ >++ /* Figure out if we should touch the vector registers. */ >++ tbz x23, #AARCH64_FFI_WITH_V_BIT, 1f >++ >++ /* Save the vector argument passing registers. */ >++ stp q0, q1, [x21, #8*32 + 0] >++ stp q2, q3, [x21, #8*32 + 32] >++ stp q4, q5, [x21, #8*32 + 64] >++ stp q6, q7, [x21, #8*32 + 96] >++1: >++ /* All done, unwind our stack frame. */ >++ ldp x21, x22, [x29, # - ffi_call_SYSV_FS] >++ cfi_restore (x21) >++ cfi_restore (x22) >++ >++ ldp x23, x24, [x29, # - ffi_call_SYSV_FS + 16] >++ cfi_restore (x23) >++ cfi_restore (x24) >++ >++ mov sp, x29 >++ cfi_def_cfa_register (sp) >++ >++ ldp x29, x30, [sp], #16 >++ cfi_adjust_cfa_offset (-16) >++ cfi_restore (x29) >++ cfi_restore (x30) >++ >++ ret >++ >++ .cfi_endproc >++ .size ffi_call_SYSV, .-ffi_call_SYSV >++ >++#define ffi_closure_SYSV_FS (8 * 2 + AARCH64_CALL_CONTEXT_SIZE) >++ >++/* ffi_closure_SYSV >++ >++ Closure invocation glue. This is the low level code invoked directly by >++ the closure trampoline to setup and call a closure. >++ >++ On entry x17 points to a struct trampoline_data, x16 has been clobbered >++ all other registers are preserved. >++ >++ We allocate a call context and save the argument passing registers, >++ then invoked the generic C ffi_closure_SYSV_inner() function to do all >++ the real work, on return we load the result passing registers back from >++ the call context. >++ >++ On entry >++ >++ extern void >++ ffi_closure_SYSV (struct trampoline_data *); >++ >++ struct trampoline_data >++ { >++ UINT64 *ffi_closure; >++ UINT64 flags; >++ }; >++ >++ This function uses the following stack frame layout: >++ >++ == >++ saved x30(lr) >++ x29(fp)-> saved x29(fp) >++ saved x22 >++ saved x21 >++ ... >++ sp -> call_context >++ == >++ >++ Voila! */ >++ >++ .text >++ .globl ffi_closure_SYSV >++ .cfi_startproc >++ffi_closure_SYSV: >++ stp x29, x30, [sp, #-16]! >++ cfi_adjust_cfa_offset (16) >++ cfi_rel_offset (x29, 0) >++ cfi_rel_offset (x30, 8) >++ >++ mov x29, sp >++ >++ sub sp, sp, #ffi_closure_SYSV_FS >++ cfi_adjust_cfa_offset (ffi_closure_SYSV_FS) >++ >++ stp x21, x22, [x29, #-16] >++ cfi_rel_offset (x21, 0) >++ cfi_rel_offset (x22, 8) >++ >++ /* Load x21 with &call_context. */ >++ mov x21, sp >++ /* Preserve our struct trampoline_data * */ >++ mov x22, x17 >++ >++ /* Save the rest of the argument passing registers. */ >++ stp x0, x1, [x21, #0] >++ stp x2, x3, [x21, #16] >++ stp x4, x5, [x21, #32] >++ stp x6, x7, [x21, #48] >++ /* Don't forget we may have been given a result scratch pad address. >++ */ >++ str x8, [x21, #64] >++ >++ /* Figure out if we should touch the vector registers. */ >++ ldr x0, [x22, #8] >++ tbz x0, #AARCH64_FFI_WITH_V_BIT, 1f >++ >++ /* Save the argument passing vector registers. */ >++ stp q0, q1, [x21, #8*32 + 0] >++ stp q2, q3, [x21, #8*32 + 32] >++ stp q4, q5, [x21, #8*32 + 64] >++ stp q6, q7, [x21, #8*32 + 96] >++1: >++ /* Load &ffi_closure.. */ >++ ldr x0, [x22, #0] >++ mov x1, x21 >++ /* Compute the location of the stack at the point that the >++ trampoline was called. */ >++ add x2, x29, #16 >++ >++ bl ffi_closure_SYSV_inner >++ >++ /* Figure out if we should touch the vector registers. */ >++ ldr x0, [x22, #8] >++ tbz x0, #AARCH64_FFI_WITH_V_BIT, 1f >++ >++ /* Load the result passing vector registers. */ >++ ldp q0, q1, [x21, #8*32 + 0] >++ ldp q2, q3, [x21, #8*32 + 32] >++ ldp q4, q5, [x21, #8*32 + 64] >++ ldp q6, q7, [x21, #8*32 + 96] >++1: >++ /* Load the result passing core registers. */ >++ ldp x0, x1, [x21, #0] >++ ldp x2, x3, [x21, #16] >++ ldp x4, x5, [x21, #32] >++ ldp x6, x7, [x21, #48] >++ /* Note nothing usefull is returned in x8. */ >++ >++ /* We are done, unwind our frame. */ >++ ldp x21, x22, [x29, #-16] >++ cfi_restore (x21) >++ cfi_restore (x22) >++ >++ mov sp, x29 >++ cfi_adjust_cfa_offset (-ffi_closure_SYSV_FS) >++ >++ ldp x29, x30, [sp], #16 >++ cfi_adjust_cfa_offset (-16) >++ cfi_restore (x29) >++ cfi_restore (x30) >++ >++ ret >++ .cfi_endproc >++ .size ffi_closure_SYSV, .-ffi_closure_SYSV >+Index: mozilla-release/media/webrtc/trunk/webrtc/typedefs.h >+=================================================================== >+--- mozilla-release.orig/media/webrtc/trunk/webrtc/typedefs.h >++++ mozilla-release/media/webrtc/trunk/webrtc/typedefs.h >+@@ -103,6 +103,11 @@ >+ #define WEBRTC_ARCH_32_BITS 1 >+ #define WEBRTC_ARCH_BIG_ENDIAN >+ #define WEBRTC_BIG_ENDIAN >++#elif defined(__aarch64__) >++#define WEBRTC_ARCH_AARCH64 1 >++#define WEBRTC_ARCH_64_BITS 1 >++#define WEBRTC_ARCH_LITTLE_ENDIAN >++#define WEBRTC_LITTLE_ENDIAN >+ #elif defined(__alpha__) >+ #define WEBRTC_ARCH_ALPHA 1 >+ #define WEBRTC_ARCH_64_BITS 1 >+Index: mozilla-release/memory/jemalloc/src/include/jemalloc/internal/jemalloc_internal.h.in >+=================================================================== >+--- mozilla-release.orig/memory/jemalloc/src/include/jemalloc/internal/jemalloc_internal.h.in >++++ mozilla-release/memory/jemalloc/src/include/jemalloc/internal/jemalloc_internal.h.in >+@@ -288,6 +288,9 @@ static const bool config_ivsalloc = >+ # ifdef __tile__ >+ # define LG_QUANTUM 4 >+ # endif >++# ifdef __aarch64__ >++# define LG_QUANTUM 4 >++# endif >+ # ifndef LG_QUANTUM >+ # error "No LG_QUANTUM definition for architecture; specify via CPPFLAGS" >+ # endif >+Index: mozilla-release/memory/mozjemalloc/jemalloc.c >+=================================================================== >+--- mozilla-release.orig/memory/mozjemalloc/jemalloc.c >++++ mozilla-release/memory/mozjemalloc/jemalloc.c >+@@ -1118,7 +1118,7 @@ static unsigned ncpus; >+ #if (defined(SOLARIS) || defined(__FreeBSD__)) && \ >+ (defined(__sparc) || defined(__sparcv9) || defined(__ia64)) >+ #define pagesize_2pow ((size_t) 13) >+-#elif defined(__powerpc64__) >++#elif defined(__powerpc64__) || defined(__aarch64__) >+ #define pagesize_2pow ((size_t) 16) >+ #else >+ #define pagesize_2pow ((size_t) 12) >+Index: mozilla-release/mfbt/tests/TestPoisonArea.cpp >+=================================================================== >+--- mozilla-release.orig/mfbt/tests/TestPoisonArea.cpp >++++ mozilla-release/mfbt/tests/TestPoisonArea.cpp >+@@ -170,6 +170,9 @@ typedef unsigned int uint32_t; >+ #elif defined __s390__ >+ #define RETURN_INSTR 0x07fe0000 /* br %r14 */ >+ >++#elif defined __aarch64__ >++#define RETURN_INSTR 0xd65f03c0 /* ret */ >++ >+ #elif defined __ia64 >+ struct ia64_instr { uint32_t i[4]; }; >+ static const ia64_instr _return_instr = >+Index: mozilla-release/nsprpub/pr/include/pratom.h >+=================================================================== >+--- mozilla-release.orig/nsprpub/pr/include/pratom.h >++++ mozilla-release/nsprpub/pr/include/pratom.h >+@@ -110,7 +110,7 @@ long __cdecl _InterlockedExchangeAdd(lon >+ (defined(__powerpc__) && !defined(__powerpc64__)) || \ >+ (defined(__arm__) && \ >+ defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4)) || \ >+- defined(__alpha)))) >++ defined(__alpha)) || defined(__aarch64__))) >+ >+ /* >+ * Because the GCC manual warns that some processors may support >+Index: mozilla-release/toolkit/crashreporter/google-breakpad/src/common/linux/dump_symbols.cc >+=================================================================== >+--- mozilla-release.orig/toolkit/crashreporter/google-breakpad/src/common/linux/dump_symbols.cc >++++ mozilla-release/toolkit/crashreporter/google-breakpad/src/common/linux/dump_symbols.cc >+@@ -805,6 +805,7 @@ const char* ElfArchitecture(const typena >+ case EM_SPARC: return "sparc"; >+ case EM_SPARCV9: return "sparcv9"; >+ case EM_X86_64: return "x86_64"; >++ case EM_AARCH64: return "aarch64"; >+ default: return NULL; >+ } >+ } >+Index: mozilla-release/xpcom/reflect/xptcall/src/md/unix/Makefile.in >+=================================================================== >+--- mozilla-release.orig/xpcom/reflect/xptcall/src/md/unix/Makefile.in >++++ mozilla-release/xpcom/reflect/xptcall/src/md/unix/Makefile.in >+@@ -144,6 +144,20 @@ endif >+ endif >+ >+ ###################################################################### >++# AARCH64 >++###################################################################### >++# >++# Linux/AArch64 >++# >++ifeq ($(OS_ARCH),Linux) >++ifneq (,$(filter aarch64,$(OS_TEST))) >++CPPSRCS := xptcinvoke_aarch64.cpp xptcstubs_aarch64.cpp >++ASFILES := xptcinvoke_asm_aarch64.s xptcstubs_asm_aarch64.s >++CXXFLAGS += -O2 >++endif >++endif >++ >++###################################################################### >+ # PowerPC >+ ###################################################################### >+ # >+Index: mozilla-release/xpcom/reflect/xptcall/src/md/unix/xptcinvoke_aarch64.cpp >+=================================================================== >+--- /dev/null >++++ mozilla-release/xpcom/reflect/xptcall/src/md/unix/xptcinvoke_aarch64.cpp >+@@ -0,0 +1,138 @@ >++/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- */ >++/* This Source Code Form is subject to the terms of the Mozilla Public >++ * License, v. 2.0. If a copy of the MPL was not distributed with this >++ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ >++ >++/* Platform specific code to invoke XPCOM methods on native objects */ >++ >++#include "xptcprivate.h" >++ >++#if !defined(__aarch64__) >++#error "This code is for Linux AArch64 only." >++#endif >++ >++ >++/* "Procedure Call Standard for the ARM 64-bit Architecture" document, sections >++ * "5.4 Parameter Passing" and "6.1.2 Procedure Calling" contain all the >++ * needed information. >++ * >++ * http://infocenter.arm.com/help/topic/com.arm.doc.ihi0042d/IHI0042D_aapcs.pdf >++ */ >++ >++#ifndef __AARCH64EL__ >++#error "Only little endian compatibility was tested" >++#endif >++ >++/* >++ * Allocation of integer function arguments initially to registers r1-r7 >++ * and then to stack. Handling of 'that' argument which goes to register r0 >++ * is handled separately and does not belong here. >++ * >++ * 'ireg_args' - pointer to the current position in the buffer, >++ * corresponding to the register arguments >++ * 'stack_args' - pointer to the current position in the buffer, >++ * corresponding to the arguments on stack >++ * 'end' - pointer to the end of the registers argument >++ * buffer. >++ */ >++static inline void alloc_word(uint64_t* &ireg_args, >++ uint64_t* &stack_args, >++ uint64_t* end, >++ uint64_t data) >++{ >++ if (ireg_args < end) { >++ *ireg_args = data; >++ ireg_args++; >++ } else { >++ *stack_args = data; >++ stack_args++; >++ } >++} >++ >++static inline void alloc_double(double* &freg_args, >++ uint64_t* &stack_args, >++ double* end, >++ double data) >++{ >++ if (freg_args < end) { >++ *freg_args = data; >++ freg_args++; >++ } else { >++ *(double *)stack_args = data; >++ stack_args++; >++ } >++} >++ >++static inline void alloc_float(double* &freg_args, >++ uint64_t* &stack_args, >++ double* end, >++ float data) >++{ >++ if (freg_args < end) { >++ *(float *)freg_args = data; >++ freg_args++; >++ } else { >++ *(float *)stack_args = data; >++ stack_args++; >++ } >++} >++ >++ >++extern "C" void >++invoke_copy_to_stack(uint64_t* stk, uint64_t *end, >++ uint32_t paramCount, nsXPTCVariant* s) >++{ >++ uint64_t *ireg_args = stk; >++ uint64_t *ireg_end = ireg_args + 8; >++ double *freg_args = (double *)ireg_end; >++ double *freg_end = freg_args + 8; >++ uint64_t *stack_args = (uint64_t *)freg_end; >++ >++ // leave room for 'that' argument in x0 >++ ++ireg_args; >++ >++ for (uint32_t i = 0; i < paramCount; i++, s++) { >++ if (s->IsPtrData()) { >++ alloc_word(ireg_args, stack_args, ireg_end, (uint64_t)s->ptr); >++ continue; >++ } >++ // According to the ABI, integral types that are smaller than 8 bytes >++ // are to be passed in 8-byte registers or 8-byte stack slots. >++ switch (s->type) >++ { >++ case nsXPTType::T_FLOAT: >++ alloc_float(freg_args, stack_args, freg_end, s->val.f); >++ break; >++ case nsXPTType::T_DOUBLE: >++ alloc_double(freg_args, stack_args, freg_end, s->val.d); >++ break; >++ case nsXPTType::T_I8: alloc_word(ireg_args, stk, end, s->val.i8); break; >++ case nsXPTType::T_I16: alloc_word(ireg_args, stk, end, s->val.i16); break; >++ case nsXPTType::T_I32: alloc_word(ireg_args, stk, end, s->val.i32); break; >++ case nsXPTType::T_I64: alloc_word(ireg_args, stk, end, s->val.i64); break; >++ case nsXPTType::T_U8: alloc_word(ireg_args, stk, end, s->val.u8); break; >++ case nsXPTType::T_U16: alloc_word(ireg_args, stk, end, s->val.u16); break; >++ case nsXPTType::T_U32: alloc_word(ireg_args, stk, end, s->val.u32); break; >++ case nsXPTType::T_U64: alloc_word(ireg_args, stk, end, s->val.u64); break; >++ case nsXPTType::T_BOOL: alloc_word(ireg_args, stk, end, s->val.b); break; >++ case nsXPTType::T_CHAR: alloc_word(ireg_args, stk, end, s->val.c); break; >++ case nsXPTType::T_WCHAR: alloc_word(ireg_args, stk, end, s->val.wc); break; >++ default: >++ // all the others are plain pointer types >++ alloc_word(ireg_args, stack_args, ireg_end, >++ reinterpret_cast<uint64_t>(s->val.p)); >++ break; >++ } >++ } >++} >++ >++extern "C" nsresult _NS_InvokeByIndex(nsISupports* that, uint32_t methodIndex, >++ uint32_t paramCount, >++ nsXPTCVariant* params); >++ >++EXPORT_XPCOM_API(nsresult) >++NS_InvokeByIndex(nsISupports* that, uint32_t methodIndex, >++ uint32_t paramCount, nsXPTCVariant* params) >++{ >++ return _NS_InvokeByIndex(that, methodIndex, paramCount, params); >++} >+Index: mozilla-release/xpcom/reflect/xptcall/src/md/unix/xptcinvoke_asm_aarch64.s >+=================================================================== >+--- /dev/null >++++ mozilla-release/xpcom/reflect/xptcall/src/md/unix/xptcinvoke_asm_aarch64.s >+@@ -0,0 +1,67 @@ >++/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- */ >++/* This Source Code Form is subject to the terms of the Mozilla Public >++ * License, v. 2.0. If a copy of the MPL was not distributed with this >++ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ >++ >++ .section ".text" >++ .globl _NS_InvokeByIndex >++ .type _NS_InvokeByIndex,@function >++ >++/* >++ * NS_InvokeByIndex(nsISupports* that, uint32_t methodIndex, >++ * uint32_t paramCount, nsXPTCVariant* params) >++ */ >++ >++_NS_InvokeByIndex: >++ # set up frame >++ stp x29, x30, [sp,#-32]! >++ mov x29, sp >++ stp x19, x20, [sp,#16] >++ >++ # save methodIndex across function calls >++ mov w20, w1 >++ >++ # end of stack area passed to invoke_copy_to_stack >++ mov x1, sp >++ >++ # assume 8 bytes of stack for each argument with 16-byte alignment >++ add w19, w2, #1 >++ and w19, w19, #0xfffffffe >++ sub sp, sp, w19, uxth #3 >++ >++ # temporary place to store args passed in r0-r7,v0-v7 >++ sub sp, sp, #128 >++ >++ # save 'that' on stack >++ str x0, [sp] >++ >++ # start of stack area passed to invoke_copy_to_stack >++ mov x0, sp >++ bl invoke_copy_to_stack >++ >++ # load arguments passed in r0-r7 >++ ldp x6, x7, [sp, #48] >++ ldp x4, x5, [sp, #32] >++ ldp x2, x3, [sp, #16] >++ ldp x0, x1, [sp],#64 >++ >++ # load arguments passed in v0-v7 >++ ldp d6, d7, [sp, #48] >++ ldp d4, d5, [sp, #32] >++ ldp d2, d3, [sp, #16] >++ ldp d0, d1, [sp],#64 >++ >++ # call the method >++ ldr x16, [x0] >++ add x16, x16, w20, uxth #3 >++ ldr x16, [x16] >++ blr x16 >++ >++ add sp, sp, w19, uxth #3 >++ ldp x19, x20, [sp,#16] >++ ldp x29, x30, [sp],#32 >++ ret >++ >++ .size _NS_InvokeByIndex, . - _NS_InvokeByIndex >++ >++ >+Index: mozilla-release/xpcom/reflect/xptcall/src/md/unix/xptcstubs_aarch64.cpp >+=================================================================== >+--- /dev/null >++++ mozilla-release/xpcom/reflect/xptcall/src/md/unix/xptcstubs_aarch64.cpp >+@@ -0,0 +1,210 @@ >++/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- */ >++/* This Source Code Form is subject to the terms of the Mozilla Public >++ * License, v. 2.0. If a copy of the MPL was not distributed with this >++ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ >++ >++#include "xptcprivate.h" >++#include "xptiprivate.h" >++ >++/* >++ * This is for AArch64 ABI >++ * >++ * When we're called, the "gp" registers are stored in gprData and >++ * the "fp" registers are stored in fprData. Each array has 8 regs >++ * but first reg in gprData is a placeholder for 'self'. >++ */ >++extern "C" nsresult >++PrepareAndDispatch(nsXPTCStubBase* self, uint32_t methodIndex, uint64_t* args, >++ uint64_t *gprData, double *fprData) >++{ >++#define PARAM_BUFFER_COUNT 16 >++#define PARAM_GPR_COUNT 8 >++#define PARAM_FPR_COUNT 8 >++ >++ nsXPTCMiniVariant paramBuffer[PARAM_BUFFER_COUNT]; >++ nsXPTCMiniVariant* dispatchParams = NULL; >++ const nsXPTMethodInfo* info; >++ uint8_t paramCount; >++ uint8_t i; >++ nsresult result = NS_ERROR_FAILURE; >++ >++ NS_ASSERTION(self,"no self"); >++ >++ self->mEntry->GetMethodInfo(uint16_t(methodIndex), &info); >++ NS_ASSERTION(info,"no method info"); >++ >++ paramCount = info->GetParamCount(); >++ >++ // setup variant array pointer >++ if(paramCount > PARAM_BUFFER_COUNT) >++ dispatchParams = new nsXPTCMiniVariant[paramCount]; >++ else >++ dispatchParams = paramBuffer; >++ NS_ASSERTION(dispatchParams,"no place for params"); >++ >++ uint64_t* ap = args; >++ uint32_t next_gpr = 1; // skip first arg which is 'self' >++ uint32_t next_fpr = 0; >++ for(i = 0; i < paramCount; i++) >++ { >++ const nsXPTParamInfo& param = info->GetParam(i); >++ const nsXPTType& type = param.GetType(); >++ nsXPTCMiniVariant* dp = &dispatchParams[i]; >++ >++ if(param.IsOut() || !type.IsArithmetic()) >++ { >++ if (next_gpr < PARAM_GPR_COUNT) >++ dp->val.p = (void*)gprData[next_gpr++]; >++ else >++ dp->val.p = (void*)*ap++; >++ continue; >++ } >++ // else >++ switch(type) >++ { >++ case nsXPTType::T_I8: >++ if (next_gpr < PARAM_GPR_COUNT) >++ dp->val.i8 = (int8_t)gprData[next_gpr++]; >++ else >++ dp->val.i8 = (int8_t)*ap++; >++ break; >++ >++ case nsXPTType::T_I16: >++ if (next_gpr < PARAM_GPR_COUNT) >++ dp->val.i16 = (int16_t)gprData[next_gpr++]; >++ else >++ dp->val.i16 = (int16_t)*ap++; >++ break; >++ >++ case nsXPTType::T_I32: >++ if (next_gpr < PARAM_GPR_COUNT) >++ dp->val.i32 = (int32_t)gprData[next_gpr++]; >++ else >++ dp->val.i32 = (int32_t)*ap++; >++ break; >++ >++ case nsXPTType::T_I64: >++ if (next_gpr < PARAM_GPR_COUNT) >++ dp->val.i64 = (int64_t)gprData[next_gpr++]; >++ else >++ dp->val.i64 = (int64_t)*ap++; >++ break; >++ >++ case nsXPTType::T_U8: >++ if (next_gpr < PARAM_GPR_COUNT) >++ dp->val.u8 = (uint8_t)gprData[next_gpr++]; >++ else >++ dp->val.u8 = (uint8_t)*ap++; >++ break; >++ >++ case nsXPTType::T_U16: >++ if (next_gpr < PARAM_GPR_COUNT) >++ dp->val.u16 = (uint16_t)gprData[next_gpr++]; >++ else >++ dp->val.u16 = (uint16_t)*ap++; >++ break; >++ >++ case nsXPTType::T_U32: >++ if (next_gpr < PARAM_GPR_COUNT) >++ dp->val.u32 = (uint32_t)gprData[next_gpr++]; >++ else >++ dp->val.u32 = (uint32_t)*ap++; >++ break; >++ >++ case nsXPTType::T_U64: >++ if (next_gpr < PARAM_GPR_COUNT) >++ dp->val.u64 = (uint64_t)gprData[next_gpr++]; >++ else >++ dp->val.u64 = (uint64_t)*ap++; >++ break; >++ >++ case nsXPTType::T_FLOAT: >++ if (next_fpr < PARAM_FPR_COUNT) >++ dp->val.f = *(float *)(fprData + next_fpr++); >++ else >++ { >++ dp->val.f = *(float *)ap; >++ ap++; >++ } >++ break; >++ >++ case nsXPTType::T_DOUBLE: >++ if (next_fpr < PARAM_FPR_COUNT) >++ dp->val.d = fprData[next_fpr++]; >++ else >++ { >++ dp->val.d = *(double*)ap; >++ ap++; >++ } >++ break; >++ >++ case nsXPTType::T_BOOL: >++ if (next_gpr < PARAM_GPR_COUNT) >++ dp->val.b = (bool)gprData[next_gpr++]; >++ else >++ dp->val.b = (bool)*ap++; >++ break; >++ >++ case nsXPTType::T_CHAR: >++ if (next_gpr < PARAM_GPR_COUNT) >++ dp->val.c = (char)gprData[next_gpr++]; >++ else >++ dp->val.c = (char)*ap++; >++ break; >++ >++ case nsXPTType::T_WCHAR: >++ if (next_gpr < PARAM_GPR_COUNT) >++ dp->val.wc = (wchar_t)gprData[next_gpr++]; >++ else >++ dp->val.wc = (wchar_t)*ap++; >++ break; >++ >++ default: >++ NS_ASSERTION(0, "bad type"); >++ break; >++ } >++ } >++ >++ result = self->mOuter->CallMethod((uint16_t)methodIndex, info, dispatchParams); >++ >++ if(dispatchParams != paramBuffer) >++ delete [] dispatchParams; >++ >++ return result; >++} >++ >++// Load w17 with the constant 'n' and branch to SharedStub(). >++# define STUB_ENTRY(n) \ >++__asm__ ( \ >++ ".section \".text\" \n\t" \ >++ ".align 2\n\t" \ >++ ".if "#n" < 10 \n\t" \ >++ ".globl _ZN14nsXPTCStubBase5Stub"#n"Ev \n\t" \ >++ ".hidden _ZN14nsXPTCStubBase5Stub"#n"Ev \n\t" \ >++ ".type _ZN14nsXPTCStubBase5Stub"#n"Ev,@function \n\n" \ >++"_ZN14nsXPTCStubBase5Stub"#n"Ev: \n\t" \ >++ ".elseif "#n" < 100 \n\t" \ >++ ".globl _ZN14nsXPTCStubBase6Stub"#n"Ev \n\t" \ >++ ".hidden _ZN14nsXPTCStubBase6Stub"#n"Ev \n\t" \ >++ ".type _ZN14nsXPTCStubBase6Stub"#n"Ev,@function \n\n" \ >++"_ZN14nsXPTCStubBase6Stub"#n"Ev: \n\t" \ >++ ".elseif "#n" < 1000 \n\t" \ >++ ".globl _ZN14nsXPTCStubBase7Stub"#n"Ev \n\t" \ >++ ".hidden _ZN14nsXPTCStubBase7Stub"#n"Ev \n\t" \ >++ ".type _ZN14nsXPTCStubBase7Stub"#n"Ev,@function \n\n" \ >++"_ZN14nsXPTCStubBase7Stub"#n"Ev: \n\t" \ >++ ".else \n\t" \ >++ ".err \"stub number "#n" >= 1000 not yet supported\"\n" \ >++ ".endif \n\t" \ >++ "mov w17,#"#n" \n\t" \ >++ "b SharedStub \n" \ >++); >++ >++#define SENTINEL_ENTRY(n) \ >++nsresult nsXPTCStubBase::Sentinel##n() \ >++{ \ >++ NS_ASSERTION(0,"nsXPTCStubBase::Sentinel called"); \ >++ return NS_ERROR_NOT_IMPLEMENTED; \ >++} >++ >++#include "xptcstubsdef.inc" >+Index: mozilla-release/xpcom/reflect/xptcall/src/md/unix/xptcstubs_asm_aarch64.s >+=================================================================== >+--- /dev/null >++++ mozilla-release/xpcom/reflect/xptcall/src/md/unix/xptcstubs_asm_aarch64.s >+@@ -0,0 +1,39 @@ >++# This Source Code Form is subject to the terms of the Mozilla Public >++# License, v. 2.0. If a copy of the MPL was not distributed with this >++# file, You can obtain one at http://mozilla.org/MPL/2.0/. >++ >++ .set NGPREGS,8 >++ .set NFPREGS,8 >++ >++ .section ".text" >++ .globl SharedStub >++ .hidden SharedStub >++ .type SharedStub,@function >++SharedStub: >++ stp x29, x30, [sp,#-16]! >++ mov x29, sp >++ >++ sub sp, sp, #8*(NGPREGS+NFPREGS) >++ stp x0, x1, [sp, #64+(0*8)] >++ stp x2, x3, [sp, #64+(2*8)] >++ stp x4, x5, [sp, #64+(4*8)] >++ stp x6, x7, [sp, #64+(6*8)] >++ stp d0, d1, [sp, #(0*8)] >++ stp d2, d3, [sp, #(2*8)] >++ stp d4, d5, [sp, #(4*8)] >++ stp d6, d7, [sp, #(6*8)] >++ >++ # methodIndex passed from stub >++ mov w1, w17 >++ >++ add x2, sp, #16+(8*(NGPREGS+NFPREGS)) >++ add x3, sp, #8*NFPREGS >++ add x4, sp, #0 >++ >++ bl PrepareAndDispatch >++ >++ add sp, sp, #8*(NGPREGS+NFPREGS) >++ ldp x29, x30, [sp],#16 >++ ret >++ >++ .size SharedStub, . - SharedStub >+Index: mozilla-release/mfbt/double-conversion/utils.h >+=================================================================== >+--- mozilla-release.orig/mfbt/double-conversion/utils.h >++++ mozilla-release/mfbt/double-conversion/utils.h >+@@ -60,7 +60,7 @@ >+ defined(__sparc__) || defined(__sparc) || defined(__s390__) || \ >+ defined(__SH4__) || defined(__alpha__) || \ >+ defined(_MIPS_ARCH_MIPS32R2) || \ >+- defined(_AARCH64EL_) >++ defined(__aarch64__) >+ #define DOUBLE_CONVERSION_CORRECT_DOUBLE_OPERATIONS 1 >+ #elif defined(_M_IX86) || defined(__i386__) || defined(__i386) >+ #if defined(_WIN32) >diff --git a/xulrunner.spec b/xulrunner.spec >index c665024..cdda279 100644 >--- a/xulrunner.spec >+++ b/xulrunner.spec >@@ -104,6 +104,9 @@ Patch200: mozilla-193-pkgconfig.patch > # Unable to install addons from https pages > Patch204: rhbz-966424.patch > >+# AArch64 port >+Patch400: xulrunner-aarch64.patch >+ > # Upstream patches > Patch300: mozilla-837563.patch > Patch301: mozilla-938730.patch >@@ -249,6 +252,8 @@ cd %{tarballdir} > %patch300 -p1 -b .837563 > %patch301 -p1 -b .938730 > >+%patch400 -p1 -b .aarch64 >+ > %{__rm} -f .mozconfig > %{__cp} %{SOURCE10} .mozconfig > >@@ -320,6 +325,14 @@ echo "ac_add_options --disable-tracejit" >> .mozconfig > echo "ac_add_options --disable-webrtc" >> .mozconfig > %endif > >+# update gnu-config files for AArch64 >+cp /usr/lib/rpm/redhat/config.{guess,sub} build/autoconf/ >+cp /usr/lib/rpm/redhat/config.{guess,sub} js/src/build/autoconf/ >+cp /usr/lib/rpm/redhat/config.{guess,sub} js/src/ctypes/libffi/ >+cp /usr/lib/rpm/redhat/config.{guess,sub} memory/jemalloc/src/ >+cp /usr/lib/rpm/redhat/config.{guess,sub} nsprpub/build/autoconf/ >+cp /usr/lib/rpm/redhat/config.{guess,sub} toolkit/crashreporter/google-breakpad/src/third_party/glog/ >+ > #--------------------------------------------------------------------- > > %build >@@ -364,7 +377,7 @@ export LIBDIR='%{_libdir}' > MOZ_SMP_FLAGS=-j1 > # On x86 architectures, Mozilla can build up to 4 jobs at once in parallel, > # however builds tend to fail on other arches when building in parallel. >-%ifarch %{ix86} x86_64 ppc ppc64 >+%ifarch %{ix86} x86_64 ppc ppc64 aarch64 > [ -z "$RPM_BUILD_NCPUS" ] && \ > RPM_BUILD_NCPUS="`/usr/bin/getconf _NPROCESSORS_ONLN`" > [ "$RPM_BUILD_NCPUS" -ge 2 ] && MOZ_SMP_FLAGS=-j2
You cannot view the attachment while viewing its details because your browser does not support IFRAMEs.
View the attachment on a separate page
.
View Attachment As Diff
View Attachment As Raw
Actions:
View
|
Diff
Attachments on
bug 1038243
:
832759
|
863125
|
863172
|
868490