summaryrefslogtreecommitdiff
path: root/extra/gnu-efi-libs
diff options
context:
space:
mode:
authorroot <root@rshg054.dnsready.net>2012-10-13 00:57:48 -0700
committerroot <root@rshg054.dnsready.net>2012-10-13 00:57:48 -0700
commit21c295a86a10a3cd59802b2cf3ab7c24ebf0c608 (patch)
treeef49797b1f76586e9f8281e301a4923d90f29aa7 /extra/gnu-efi-libs
parent3cfedd8a4ed1e5464ef328d91b1a224aedc07960 (diff)
Sat Oct 13 00:57:48 PDT 2012
Diffstat (limited to 'extra/gnu-efi-libs')
-rw-r--r--extra/gnu-efi-libs/PKGBUILD22
-rw-r--r--extra/gnu-efi-libs/gnu-efi-libs-x86_64-call-fix.patch327
2 files changed, 340 insertions, 9 deletions
diff --git a/extra/gnu-efi-libs/PKGBUILD b/extra/gnu-efi-libs/PKGBUILD
index f71cd69b5..2799ac0d9 100644
--- a/extra/gnu-efi-libs/PKGBUILD
+++ b/extra/gnu-efi-libs/PKGBUILD
@@ -1,11 +1,11 @@
-# $Id: PKGBUILD 167140 2012-09-26 06:30:46Z tpowa $
+# $Id: PKGBUILD 168460 2012-10-12 06:35:15Z tpowa $
# Maintainer: Tobias Powalowski <tpowa@archlinux.org>
# Maintainer : Keshav P R <(the.ridikulus.rat) (aatt) (gemmaeiil) (ddoott) (ccoomm)>
# Contributor: Alessio 'mOLOk' Bolognino <themolok@gmail.com>
_pkgver="3.0"
pkgname="gnu-efi-libs"
-pkgver="${_pkgver}q"
+pkgver="${_pkgver}r"
pkgrel="1"
pkgdesc="Library for building x86_64 and i386 UEFI Applications using GNU toolchain"
url="http://sourceforge.net/projects/gnu-efi/"
@@ -19,8 +19,7 @@ conflicts=('gnu-efi')
provides=('gnu-efi')
replaces=('gnu-efi')
options=('!strip' '!makeflags')
-source=("http://download.sourceforge.net/gnu-efi/gnu-efi_${pkgver}.orig.tar.gz")
-sha1sums=('99d29be5fd17dfca762cf0e265477862c21d63bb')
+source=("http://download.sourceforge.net/gnu-efi/gnu-efi_${pkgver}.orig.tar.gz" gnu-efi-libs-x86_64-call-fix.patch)
_build_gnu-efi-libs-x86_64() {
cp -r "${srcdir}/gnu-efi-${_pkgver}" "${srcdir}/gnu-efi-${_pkgver}-x86_64"
@@ -36,12 +35,15 @@ _build_gnu-efi-libs-i386() {
ARCH="ia32" make -C apps all
}
-build() {
+build() {
+ cd ${srcdir}/gnu-efi-${_pkgver}
+ # fix http://sourceforge.net/tracker/?func=detail&aid=3576537&group_id=163609&atid=828423
+ patch -Np1 -i ../gnu-efi-libs-x86_64-call-fix.patch
## Fix Makefiles to enable compile for both UEFI arch
- sed 's|INSTALLROOT=/usr/local|INSTALLROOT ?= /usr/lib|g' -i "${srcdir}/gnu-efi-${_pkgver}/Make.defaults"
- sed 's|LIBDIR=lib|LIBDIR ?= lib|g' -i "${srcdir}/gnu-efi-${_pkgver}/Make.defaults"
- sed 's|ARCH :=|ARCH ?=|g' -i "${srcdir}/gnu-efi-${_pkgver}/Make.defaults"
- sed 's|-fno-strict-aliasing|-fno-strict-aliasing -fno-stack-protector|g' -i "${srcdir}/gnu-efi-${_pkgver}/Make.defaults"
+ sed 's|INSTALLROOT=/usr/local|INSTALLROOT ?= /usr/lib|g' -i Make.defaults
+ sed 's|LIBDIR=lib|LIBDIR ?= lib|g' -i Make.defaults
+ sed 's|ARCH :=|ARCH ?=|g' -i Make.defaults
+ sed 's|-fno-strict-aliasing|-fno-strict-aliasing -fno-stack-protector|g' -i Make.defaults
if [[ "${CARCH}" == "x86_64" ]]; then
_build_gnu-efi-libs-x86_64
fi
@@ -73,3 +75,5 @@ package() {
_package_gnu-efi-libs-i386
fi
}
+md5sums=('c5081850414bec34d3e254e8e970811b'
+ '4b5428c51af1981d3eb158d924d6bf85')
diff --git a/extra/gnu-efi-libs/gnu-efi-libs-x86_64-call-fix.patch b/extra/gnu-efi-libs/gnu-efi-libs-x86_64-call-fix.patch
new file mode 100644
index 000000000..8bda35c6b
--- /dev/null
+++ b/extra/gnu-efi-libs/gnu-efi-libs-x86_64-call-fix.patch
@@ -0,0 +1,327 @@
+diff -ur gnu-efi-3.0/lib/x86_64/efi_stub.S gnu-efi-3.0-mod/lib/x86_64/efi_stub.S
+--- gnu-efi-3.0/lib/x86_64/efi_stub.S 2012-04-30 17:35:44.000000000 +0200
++++ gnu-efi-3.0-mod/lib/x86_64/efi_stub.S 2012-10-12 00:39:09.300289985 +0200
+@@ -4,6 +4,7 @@
+ * Copyright (C) 2007 Intel Corp
+ * Bibo Mao <bibo.mao@intel.com>
+ * Huang Ying <ying.huang@intel.com>
++ * Copyright (C) 2012 Felipe Contreras <felipe.contreras@gmail.com>
+ */
+
+ /*
+@@ -14,152 +15,94 @@
+ *
+ * Basically here are the conversion rules:
+ * a) our function pointer is in %rdi
+- * b) ELF gives us 8-byte aligned %rsp, so we need to pad out to 16-byte
+- * alignment.
+- * c) inside each call thunker, we can only adjust the stack by
+- * multiples of 16 bytes. "offset" below refers to however much
+- * we allocate inside a thunker.
+- * d) rsi through r8 (elf) aka rcx through r9 (ms) require stack space
++ * b) rsi through r8 (elf) aka rcx through r9 (ms) require stack space
+ * on the MS side even though it's not getting used at all.
+- * e) arguments are as follows: (elf -> ms)
++ * c) 8(%rsp) is always aligned to 16 in ELF, so %rsp is shifted 8 bytes extra
++ * d) arguments are as follows: (elf -> ms)
+ * 1) rdi -> rcx (32 saved)
+ * 2) rsi -> rdx (32 saved)
+- * 3) rdx -> r8 ( 32 saved)
++ * 3) rdx -> r8 (32 saved)
+ * 4) rcx -> r9 (32 saved)
+- * 5) r8 -> 32(%rsp) (48 saved)
++ * 5) r8 -> 32(%rsp) (32 saved)
+ * 6) r9 -> 40(%rsp) (48 saved)
+- * 7) pad+offset+0(%rsp) -> 48(%rsp) (64 saved)
+- * 8) pad+offset+8(%rsp) -> 56(%rsp) (64 saved)
+- * 9) pad+offset+16(%rsp) -> 64(%rsp) (80 saved)
+- * 10) pad+offset+24(%rsp) -> 72(%rsp) (80 saved)
+- * 11) pad+offset+32(%rsp) -> 80(%rsp) (96 saved)
+- * 12) pad+offset+40(%rsp) -> 88(%rsp) (96 saved)
+- * f) because the first argument we recieve in a thunker is actually the
++ * 7) 8(%rsp) -> 48(%rsp) (48 saved)
++ * 8) 16(%rsp) -> 56(%rsp) (64 saved)
++ * 9) 24(%rsp) -> 64(%rsp) (64 saved)
++ * 10) 32(%rsp) -> 72(%rsp) (80 saved)
++ * e) because the first argument we recieve in a thunker is actually the
+ * function to be called, arguments are offset as such:
+ * 0) rdi -> caller
+ * 1) rsi -> rcx (32 saved)
+ * 2) rdx -> rdx (32 saved)
+ * 3) rcx -> r8 (32 saved)
+ * 4) r8 -> r9 (32 saved)
+- * 5) r9 -> 32(%rsp) (48 saved)
+- * 6) pad+offset+0(%rsp) -> 40(%rsp) (48 saved)
+- * 7) pad+offset+8(%rsp) -> 48(%rsp) (64 saved)
+- * 8) pad+offset+16(%rsp) -> 56(%rsp) (64 saved)
+- * 9) pad+offset+24(%rsp) -> 64(%rsp) (80 saved)
+- * 10) pad+offset+32(%rsp) -> 72(%rsp) (80 saved)
+- * 11) pad+offset+40(%rsp) -> 80(%rsp) (96 saved)
+- * 12) pad+offset+48(%rsp) -> 88(%rsp) (96 saved)
+- * e) arguments need to be moved in opposite order to avoid clobbering
+- * f) pad_stack leaves the amount of padding it added in %r11 for functions
+- * to use
+- * g) efi -> elf calls don't need to pad the stack, because the 16-byte
+- * alignment is also always 8-byte aligned.
++ * 5) r9 -> 32(%rsp) (32 saved)
++ * 6) 8(%rsp) -> 40(%rsp) (48 saved)
++ * 7) 16(%rsp) -> 48(%rsp) (48 saved)
++ * 8) 24(%rsp) -> 56(%rsp) (64 saved)
++ * 9) 32(%rsp) -> 64(%rsp) (64 saved)
++ * 10) 40(%rsp) -> 72(%rsp) (80 saved)
++ * f) arguments need to be moved in opposite order to avoid clobbering
+ */
+
+ #define ENTRY(name) \
+ .globl name; \
+ name:
+
+-#define out(val) \
+- push %rax ; \
+- mov val, %rax ; \
+- out %al, $128 ; \
+- pop %rax
+-
+-#define pad_stack \
+- subq $8, %rsp ; /* must be a multiple of 16 - sizeof(%rip) */ \
+- /* stash some handy integers */ \
+- mov $0x8, %rax ; \
+- mov $0x10, %r10 ; \
+- /* see if we need padding */ \
+- and %rsp, %rax ; \
+- /* store the pad amount in %r11 */ \
+- cmovnz %rax, %r11 ; \
+- cmovz %r10, %r11 ; \
+- /* insert the padding */ \
+- subq %r11, %rsp ; \
+- /* add the $8 we saved above in %r11 */ \
+- addq $8, %r11 ; \
+- /* store the pad amount */ \
+- mov %r11, (%rsp) ; \
+- /* compensate for %rip being stored on the stack by call */ \
+- addq $8, %r11
+-
+-#define unpad_stack \
+- /* fetch the pad amount we saved (%r11 has been clobbered) */ \
+- mov (%rsp), %r11 ; \
+- /* remove the padding */ \
+- addq %r11, %rsp
+-
+ ENTRY(efi_call0)
+- pad_stack
+- subq $32, %rsp
++ subq $40, %rsp
+ call *%rdi
+- addq $32, %rsp
+- unpad_stack
++ addq $40, %rsp
+ ret
+
+ ENTRY(efi_call1)
+- pad_stack
+- subq $32, %rsp
++ subq $40, %rsp
+ mov %rsi, %rcx
+ call *%rdi
+- addq $32, %rsp
+- unpad_stack
++ addq $40, %rsp
+ ret
+
+ ENTRY(efi_call2)
+- pad_stack
+- subq $32, %rsp
++ subq $40, %rsp
+ /* mov %rdx, %rdx */
+ mov %rsi, %rcx
+ call *%rdi
+- addq $32, %rsp
+- unpad_stack
++ addq $40, %rsp
+ ret
+
+ ENTRY(efi_call3)
+- pad_stack
+- subq $32, %rsp
++ subq $40, %rsp
+ mov %rcx, %r8
+ /* mov %rdx, %rdx */
+ mov %rsi, %rcx
+ call *%rdi
+- addq $32, %rsp
+- unpad_stack
++ addq $40, %rsp
+ ret
+
+ ENTRY(efi_call4)
+- pad_stack
+- subq $32, %rsp
++ subq $40, %rsp
+ mov %r8, %r9
+ mov %rcx, %r8
+ /* mov %rdx, %rdx */
+ mov %rsi, %rcx
+ call *%rdi
+- addq $32, %rsp
+- unpad_stack
++ addq $40, %rsp
+ ret
+
+ ENTRY(efi_call5)
+- pad_stack
+- subq $48, %rsp
++ subq $40, %rsp
+ mov %r9, 32(%rsp)
+ mov %r8, %r9
+ mov %rcx, %r8
+ /* mov %rdx, %rdx */
+ mov %rsi, %rcx
+ call *%rdi
+- addq $48, %rsp
+- unpad_stack
++ addq $40, %rsp
+ ret
+
+ ENTRY(efi_call6)
+- pad_stack
+- subq $48, %rsp
+- addq $48, %r11
+- addq %rsp, %r11
+- mov (%r11), %rax
++ subq $56, %rsp
++ mov 56+8(%rsp), %rax
+ mov %rax, 40(%rsp)
+ mov %r9, 32(%rsp)
+ mov %r8, %r9
+@@ -167,20 +110,14 @@
+ /* mov %rdx, %rdx */
+ mov %rsi, %rcx
+ call *%rdi
+- addq $48, %rsp
+- unpad_stack
++ addq $56, %rsp
+ ret
+
+ ENTRY(efi_call7)
+- pad_stack
+- subq $64, %rsp
+- addq $64, %r11
+- addq $8, %r11
+- addq %rsp, %r11
+- mov (%r11), %rax
++ subq $56, %rsp
++ mov 56+16(%rsp), %rax
+ mov %rax, 48(%rsp)
+- subq $8, %r11
+- mov (%r11), %rax
++ mov 56+8(%rsp), %rax
+ mov %rax, 40(%rsp)
+ mov %r9, 32(%rsp)
+ mov %r8, %r9
+@@ -188,23 +125,16 @@
+ /* mov %rdx, %rdx */
+ mov %rsi, %rcx
+ call *%rdi
+- addq $64, %rsp
+- unpad_stack
++ addq $56, %rsp
+ ret
+
+ ENTRY(efi_call8)
+- pad_stack
+- subq $64, %rsp
+- addq $64, %r11
+- addq $16, %r11
+- addq %rsp, %r11
+- mov (%r11), %rax
++ subq $72, %rsp
++ mov 72+24(%rsp), %rax
+ mov %rax, 56(%rsp)
+- subq $8, %r11
+- mov (%r11), %rax
++ mov 72+16(%rsp), %rax
+ mov %rax, 48(%rsp)
+- subq $8, %r11
+- mov (%r11), %rax
++ mov 72+8(%rsp), %rax
+ mov %rax, 40(%rsp)
+ mov %r9, 32(%rsp)
+ mov %r8, %r9
+@@ -212,26 +142,18 @@
+ /* mov %rdx, %rdx */
+ mov %rsi, %rcx
+ call *%rdi
+- addq $64, %rsp
+- unpad_stack
++ addq $72, %rsp
+ ret
+
+ ENTRY(efi_call9)
+- pad_stack
+- subq $80, %rsp
+- addq $80, %r11
+- addq $24, %r11
+- addq %rsp, %r11
+- mov (%r11), %rax
++ subq $72, %rsp
++ mov 72+32(%rsp), %rax
+ mov %rax, 64(%rsp)
+- subq $8, %r11
+- mov (%r11), %rax
++ mov 72+24(%rsp), %rax
+ mov %rax, 56(%rsp)
+- subq $8, %r11
+- mov (%r11), %rax
++ mov 72+16(%rsp), %rax
+ mov %rax, 48(%rsp)
+- subq $8, %r11
+- mov (%r11), %rax
++ mov 72+8(%rsp), %rax
+ mov %rax, 40(%rsp)
+ mov %r9, 32(%rsp)
+ mov %r8, %r9
+@@ -239,29 +161,20 @@
+ /* mov %rdx, %rdx */
+ mov %rsi, %rcx
+ call *%rdi
+- addq $80, %rsp
+- unpad_stack
++ addq $72, %rsp
+ ret
+
+ ENTRY(efi_call10)
+- pad_stack
+- subq $80, %rsp
+- addq $80, %r11
+- addq $32, %r11
+- addq %rsp, %r11
+- mov (%r11), %rax
++ subq $88, %rsp
++ mov 88+40(%rsp), %rax
+ mov %rax, 72(%rsp)
+- subq $8, %r11
+- mov (%r11), %rax
++ mov 88+32(%rsp), %rax
+ mov %rax, 64(%rsp)
+- subq $8, %r11
+- mov (%r11), %rax
++ mov 88+24(%rsp), %rax
+ mov %rax, 56(%rsp)
+- subq $8, %r11
+- mov (%r11), %rax
++ mov 88+16(%rsp), %rax
+ mov %rax, 48(%rsp)
+- subq $8, %r11
+- mov (%r11), %rax
++ mov 88+8(%rsp), %rax
+ mov %rax, 40(%rsp)
+ mov %r9, 32(%rsp)
+ mov %r8, %r9
+@@ -269,6 +182,5 @@
+ /* mov %rdx, %rdx */
+ mov %rsi, %rcx
+ call *%rdi
+- addq $80, %rsp
+- unpad_stack
++ addq $88, %rsp
+ ret