scsi-libsas-fix-false-positive-device-attached-conditions.patch
efi-add-new-variable-attributes.patch
efi-validate-uefi-boot-variables.patch
+x86-efi-fix-pointer-math-issue-in-handle_ramdisks.patch
+tools-include-add-byteshift-headers-for-endian-access.patch
+x86-mkpiggy-don-t-open-code-put_unaligned_le32.patch
+x86-boot-restrict-cflags-for-hostprogs.patch
+x86-efi-fix-endian-issues-and-unaligned-accesses.patch
+x86-boot-correct-cflags-for-hostprogs.patch
+x86-efi-add-dedicated-efi-stub-entry-point.patch
--- /dev/null
+From a07f7672d7cf0ff0d6e548a9feb6e0bd016d9c6c Mon Sep 17 00:00:00 2001
+From: Matt Fleming <matt.fleming@intel.com>
+Date: Tue, 28 Feb 2012 13:37:20 +0000
+Subject: tools/include: Add byteshift headers for endian access
+
+From: Matt Fleming <matt.fleming@intel.com>
+
+commit a07f7672d7cf0ff0d6e548a9feb6e0bd016d9c6c upstream.
+
+There are various hostprogs in the kernel that are rolling their own
+implementations of {get,put}_unaligned_le*(). Copy the byteshift
+headers from include/linux/unaligned so that they can all use a single
+implementation.
+
+This requires changing some of the data types to the userspace
+exported ones (u32 -> __u32, etc).
+
+Signed-off-by: Matt Fleming <matt.fleming@intel.com>
+Link: http://lkml.kernel.org/r/1330436245-24875-2-git-send-email-matt@console-pimps.org
+Signed-off-by: H. Peter Anvin <hpa@zytor.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ tools/include/tools/be_byteshift.h | 70 +++++++++++++++++++++++++++++++++++++
+ tools/include/tools/le_byteshift.h | 70 +++++++++++++++++++++++++++++++++++++
+ 2 files changed, 140 insertions(+)
+
+--- /dev/null
++++ b/tools/include/tools/be_byteshift.h
+@@ -0,0 +1,70 @@
++#ifndef _TOOLS_BE_BYTESHIFT_H
++#define _TOOLS_BE_BYTESHIFT_H
++
++#include <linux/types.h>
++
++static inline __u16 __get_unaligned_be16(const __u8 *p)
++{
++ return p[0] << 8 | p[1];
++}
++
++static inline __u32 __get_unaligned_be32(const __u8 *p)
++{
++ return p[0] << 24 | p[1] << 16 | p[2] << 8 | p[3];
++}
++
++static inline __u64 __get_unaligned_be64(const __u8 *p)
++{
++ return (__u64)__get_unaligned_be32(p) << 32 |
++ __get_unaligned_be32(p + 4);
++}
++
++static inline void __put_unaligned_be16(__u16 val, __u8 *p)
++{
++ *p++ = val >> 8;
++ *p++ = val;
++}
++
++static inline void __put_unaligned_be32(__u32 val, __u8 *p)
++{
++ __put_unaligned_be16(val >> 16, p);
++ __put_unaligned_be16(val, p + 2);
++}
++
++static inline void __put_unaligned_be64(__u64 val, __u8 *p)
++{
++ __put_unaligned_be32(val >> 32, p);
++ __put_unaligned_be32(val, p + 4);
++}
++
++static inline __u16 get_unaligned_be16(const void *p)
++{
++ return __get_unaligned_be16((const __u8 *)p);
++}
++
++static inline __u32 get_unaligned_be32(const void *p)
++{
++ return __get_unaligned_be32((const __u8 *)p);
++}
++
++static inline __u64 get_unaligned_be64(const void *p)
++{
++ return __get_unaligned_be64((const __u8 *)p);
++}
++
++static inline void put_unaligned_be16(__u16 val, void *p)
++{
++ __put_unaligned_be16(val, p);
++}
++
++static inline void put_unaligned_be32(__u32 val, void *p)
++{
++ __put_unaligned_be32(val, p);
++}
++
++static inline void put_unaligned_be64(__u64 val, void *p)
++{
++ __put_unaligned_be64(val, p);
++}
++
++#endif /* _TOOLS_BE_BYTESHIFT_H */
+--- /dev/null
++++ b/tools/include/tools/le_byteshift.h
+@@ -0,0 +1,70 @@
++#ifndef _TOOLS_LE_BYTESHIFT_H
++#define _TOOLS_LE_BYTESHIFT_H
++
++#include <linux/types.h>
++
++static inline __u16 __get_unaligned_le16(const __u8 *p)
++{
++ return p[0] | p[1] << 8;
++}
++
++static inline __u32 __get_unaligned_le32(const __u8 *p)
++{
++ return p[0] | p[1] << 8 | p[2] << 16 | p[3] << 24;
++}
++
++static inline __u64 __get_unaligned_le64(const __u8 *p)
++{
++ return (__u64)__get_unaligned_le32(p + 4) << 32 |
++ __get_unaligned_le32(p);
++}
++
++static inline void __put_unaligned_le16(__u16 val, __u8 *p)
++{
++ *p++ = val;
++ *p++ = val >> 8;
++}
++
++static inline void __put_unaligned_le32(__u32 val, __u8 *p)
++{
++ __put_unaligned_le16(val >> 16, p + 2);
++ __put_unaligned_le16(val, p);
++}
++
++static inline void __put_unaligned_le64(__u64 val, __u8 *p)
++{
++ __put_unaligned_le32(val >> 32, p + 4);
++ __put_unaligned_le32(val, p);
++}
++
++static inline __u16 get_unaligned_le16(const void *p)
++{
++ return __get_unaligned_le16((const __u8 *)p);
++}
++
++static inline __u32 get_unaligned_le32(const void *p)
++{
++ return __get_unaligned_le32((const __u8 *)p);
++}
++
++static inline __u64 get_unaligned_le64(const void *p)
++{
++ return __get_unaligned_le64((const __u8 *)p);
++}
++
++static inline void put_unaligned_le16(__u16 val, void *p)
++{
++ __put_unaligned_le16(val, p);
++}
++
++static inline void put_unaligned_le32(__u32 val, void *p)
++{
++ __put_unaligned_le32(val, p);
++}
++
++static inline void put_unaligned_le64(__u64 val, void *p)
++{
++ __put_unaligned_le64(val, p);
++}
++
++#endif /* _TOOLS_LE_BYTESHIFT_H */
--- /dev/null
+From 446e1c86d51d0823e003a43a2b85c430efce2733 Mon Sep 17 00:00:00 2001
+From: "H. Peter Anvin" <hpa@zytor.com>
+Date: Thu, 22 Mar 2012 11:08:18 -0700
+Subject: x86, boot: Correct CFLAGS for hostprogs
+
+From: "H. Peter Anvin" <hpa@zytor.com>
+
+commit 446e1c86d51d0823e003a43a2b85c430efce2733 upstream.
+
+This is a partial revert of commit:
+ d40f833 "Restrict CFLAGS for hostprogs"
+
+The endian-manipulation macros in tools/include need <linux/types.h>,
+but the hostprogs in arch/x86/boot need several headers from the
+kernel build tree, which means we have to add the kernel headers to
+the include path. This picks up <linux/types.h> from the kernel tree,
+which gives a warning.
+
+Since this use of <linux/types.h> is intentional, add
+-D__EXPORTED_HEADERS__ to the command line to silence the warning.
+
+A better way to fix this would be to always install the exported
+kernel headers into $(objtree)/usr/include as a standard part of the
+kernel build, but that is a lot more involved.
+
+Reported-by: Linus Torvalds <torvalds@linux-foundation.org>
+Acked-by: Matt Fleming <matt.fleming@intel.com>
+Link: http://lkml.kernel.org/r/1330436245-24875-5-git-send-email-matt@console-pimps.org
+Signed-off-by: H. Peter Anvin <hpa@zytor.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/boot/Makefile | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+--- a/arch/x86/boot/Makefile
++++ b/arch/x86/boot/Makefile
+@@ -37,9 +37,9 @@ setup-y += video-bios.o
+ targets += $(setup-y)
+ hostprogs-y := mkcpustr tools/build
+
+-HOSTCFLAGS_mkcpustr.o := -I$(srctree)/arch/$(SRCARCH)/include
+-HOST_EXTRACFLAGS += -I$(objtree)/include -I$(srctree)/tools/include \
+- -include $(srctree)/include/linux/kconfig.h
++HOST_EXTRACFLAGS += -I$(srctree)/tools/include $(LINUXINCLUDE) \
++ -D__EXPORTED_HEADERS__
++
+ $(obj)/cpu.o: $(obj)/cpustr.h
+
+ quiet_cmd_cpustr = CPUSTR $@
--- /dev/null
+From d40f833630a1299fd377408dc8d8fac370d621b0 Mon Sep 17 00:00:00 2001
+From: Matt Fleming <matt.fleming@intel.com>
+Date: Tue, 28 Feb 2012 13:37:23 +0000
+Subject: x86, boot: Restrict CFLAGS for hostprogs
+
+From: Matt Fleming <matt.fleming@intel.com>
+
+commit d40f833630a1299fd377408dc8d8fac370d621b0 upstream.
+
+Currently tools/build has access to all the kernel headers in
+$(srctree). This is unnecessary and could potentially allow
+tools/build to erroneously include kernel headers when it should only
+be including userspace-exported headers.
+
+Unfortunately, mkcpustr still needs access to some of the asm kernel
+headers, so explicitly special case that hostprog.
+
+Cc: H. Peter Anvin <hpa@zytor.com>
+Signed-off-by: Matt Fleming <matt.fleming@intel.com>
+Link: http://lkml.kernel.org/r/1330436245-24875-5-git-send-email-matt@console-pimps.org
+Signed-off-by: H. Peter Anvin <hpa@zytor.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/boot/Makefile | 5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+--- a/arch/x86/boot/Makefile
++++ b/arch/x86/boot/Makefile
+@@ -37,8 +37,9 @@ setup-y += video-bios.o
+ targets += $(setup-y)
+ hostprogs-y := mkcpustr tools/build
+
+-HOST_EXTRACFLAGS += $(LINUXINCLUDE)
+-
++HOSTCFLAGS_mkcpustr.o := -I$(srctree)/arch/$(SRCARCH)/include
++HOST_EXTRACFLAGS += -I$(objtree)/include -I$(srctree)/tools/include \
++ -include $(srctree)/include/linux/kconfig.h
+ $(obj)/cpu.o: $(obj)/cpustr.h
+
+ quiet_cmd_cpustr = CPUSTR $@
--- /dev/null
+From b1994304fc399f5d3a5368c81111d713490c4799 Mon Sep 17 00:00:00 2001
+From: Matt Fleming <matt.fleming@intel.com>
+Date: Sun, 15 Apr 2012 16:06:04 +0100
+Subject: x86, efi: Add dedicated EFI stub entry point
+
+From: Matt Fleming <matt.fleming@intel.com>
+
+commit b1994304fc399f5d3a5368c81111d713490c4799 upstream.
+
+The method used to work out whether we were booted by EFI firmware or
+via a boot loader is broken. Because efi_main() is always executed
+when booting from a boot loader we will dereference invalid pointers
+either on the stack (CONFIG_X86_32) or contained in %rdx
+(CONFIG_X86_64) when searching for an EFI System Table signature.
+
+Instead of dereferencing these invalid system table pointers, add a
+new entry point that is only used when booting from EFI firmware, when
+we know the pointer arguments will be valid. With this change legacy
+boot loaders will no longer execute efi_main(), but will instead skip
+EFI stub initialisation completely.
+
+[ hpa: Marking this for urgent/stable since it is a regression when
+ the option is enabled; without the option the patch has no effect ]
+
+Signed-off-by: Matt Fleming <matt.hfleming@intel.com>
+Link: http://lkml.kernel.org/r/1334584744.26997.14.camel@mfleming-mobl1.ger.corp.intel.com
+Reported-by: Jordan Justen <jordan.l.justen@intel.com>
+Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/boot/compressed/head_32.S | 14 +++++++++++---
+ arch/x86/boot/compressed/head_64.S | 22 ++++++++++++++++------
+ arch/x86/boot/tools/build.c | 15 +++++++++++----
+ 3 files changed, 38 insertions(+), 13 deletions(-)
+
+--- a/arch/x86/boot/compressed/head_32.S
++++ b/arch/x86/boot/compressed/head_32.S
+@@ -33,6 +33,9 @@
+ __HEAD
+ ENTRY(startup_32)
+ #ifdef CONFIG_EFI_STUB
++ jmp preferred_addr
++
++ .balign 0x10
+ /*
+ * We don't need the return address, so set up the stack so
+ * efi_main() can find its arugments.
+@@ -41,12 +44,17 @@ ENTRY(startup_32)
+
+ call efi_main
+ cmpl $0, %eax
+- je preferred_addr
+ movl %eax, %esi
+- call 1f
++ jne 2f
+ 1:
++ /* EFI init failed, so hang. */
++ hlt
++ jmp 1b
++2:
++ call 3f
++3:
+ popl %eax
+- subl $1b, %eax
++ subl $3b, %eax
+ subl BP_pref_address(%esi), %eax
+ add BP_code32_start(%esi), %eax
+ leal preferred_addr(%eax), %eax
+--- a/arch/x86/boot/compressed/head_64.S
++++ b/arch/x86/boot/compressed/head_64.S
+@@ -200,18 +200,28 @@ ENTRY(startup_64)
+ * entire text+data+bss and hopefully all of memory.
+ */
+ #ifdef CONFIG_EFI_STUB
+- pushq %rsi
++ /*
++ * The entry point for the PE/COFF executable is 0x210, so only
++ * legacy boot loaders will execute this jmp.
++ */
++ jmp preferred_addr
++
++ .org 0x210
+ mov %rcx, %rdi
+ mov %rdx, %rsi
+ call efi_main
+- popq %rsi
+- cmpq $0,%rax
+- je preferred_addr
+ movq %rax,%rsi
+- call 1f
++ cmpq $0,%rax
++ jne 2f
+ 1:
++ /* EFI init failed, so hang. */
++ hlt
++ jmp 1b
++2:
++ call 3f
++3:
+ popq %rax
+- subq $1b, %rax
++ subq $3b, %rax
+ subq BP_pref_address(%rsi), %rax
+ add BP_code32_start(%esi), %eax
+ leaq preferred_addr(%rax), %rax
+--- a/arch/x86/boot/tools/build.c
++++ b/arch/x86/boot/tools/build.c
+@@ -207,8 +207,13 @@ int main(int argc, char ** argv)
+ put_unaligned_le32(file_sz, &buf[pe_header + 0x50]);
+
+ #ifdef CONFIG_X86_32
+- /* Address of entry point */
+- put_unaligned_le32(i, &buf[pe_header + 0x28]);
++ /*
++ * Address of entry point.
++ *
++ * The EFI stub entry point is +16 bytes from the start of
++ * the .text section.
++ */
++ put_unaligned_le32(i + 16, &buf[pe_header + 0x28]);
+
+ /* .text size */
+ put_unaligned_le32(file_sz, &buf[pe_header + 0xb0]);
+@@ -219,9 +224,11 @@ int main(int argc, char ** argv)
+ /*
+ * Address of entry point. startup_32 is at the beginning and
+ * the 64-bit entry point (startup_64) is always 512 bytes
+- * after.
++ * after. The EFI stub entry point is 16 bytes after that, as
++ * the first instruction allows legacy loaders to jump over
++ * the EFI stub initialisation
+ */
+- put_unaligned_le32(i + 512, &buf[pe_header + 0x28]);
++ put_unaligned_le32(i + 528, &buf[pe_header + 0x28]);
+
+ /* .text size */
+ put_unaligned_le32(file_sz, &buf[pe_header + 0xc0]);
--- /dev/null
+From 92f42c50f227ad228f815a8f4eec872524dae3a5 Mon Sep 17 00:00:00 2001
+From: Matt Fleming <matt.fleming@intel.com>
+Date: Tue, 28 Feb 2012 13:37:24 +0000
+Subject: x86, efi: Fix endian issues and unaligned accesses
+
+From: Matt Fleming <matt.fleming@intel.com>
+
+commit 92f42c50f227ad228f815a8f4eec872524dae3a5 upstream.
+
+We may need to convert the endianness of the data we read from/write
+to 'buf', so let's use {get,put}_unaligned_le32() to do that. Failure
+to do so can result in accessing invalid memory, leading to a
+segfault. Stephen Rothwell noticed this bug while cross-building an
+x86_64 allmodconfig kernel on PowerPC.
+
+We need to read from and write to 'buf' a byte at a time otherwise
+it's possible we'll perform an unaligned access, which can lead to bus
+errors when cross-building an x86 kernel on risc architectures.
+
+Cc: H. Peter Anvin <hpa@zytor.com>
+Cc: Nick Bowler <nbowler@elliptictech.com>
+Tested-by: Stephen Rothwell <sfr@canb.auug.org.au>
+Reported-by: Stephen Rothwell <sfr@canb.auug.org.au>
+Signed-off-by: Matt Fleming <matt.fleming@intel.com>
+Link: http://lkml.kernel.org/r/1330436245-24875-6-git-send-email-matt@console-pimps.org
+Signed-off-by: H. Peter Anvin <hpa@zytor.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/boot/tools/build.c | 31 +++++++++++++++----------------
+ 1 file changed, 15 insertions(+), 16 deletions(-)
+
+--- a/arch/x86/boot/tools/build.c
++++ b/arch/x86/boot/tools/build.c
+@@ -34,6 +34,7 @@
+ #include <fcntl.h>
+ #include <sys/mman.h>
+ #include <asm/boot.h>
++#include <tools/le_byteshift.h>
+
+ typedef unsigned char u8;
+ typedef unsigned short u16;
+@@ -41,6 +42,7 @@ typedef unsigned long u32;
+
+ #define DEFAULT_MAJOR_ROOT 0
+ #define DEFAULT_MINOR_ROOT 0
++#define DEFAULT_ROOT_DEV (DEFAULT_MAJOR_ROOT << 8 | DEFAULT_MINOR_ROOT)
+
+ /* Minimal number of setup sectors */
+ #define SETUP_SECT_MIN 5
+@@ -159,7 +161,7 @@ int main(int argc, char ** argv)
+ die("read-error on `setup'");
+ if (c < 1024)
+ die("The setup must be at least 1024 bytes");
+- if (buf[510] != 0x55 || buf[511] != 0xaa)
++ if (get_unaligned_le16(&buf[510]) != 0xAA55)
+ die("Boot block hasn't got boot flag (0xAA55)");
+ fclose(file);
+
+@@ -171,8 +173,7 @@ int main(int argc, char ** argv)
+ memset(buf+c, 0, i-c);
+
+ /* Set the default root device */
+- buf[508] = DEFAULT_MINOR_ROOT;
+- buf[509] = DEFAULT_MAJOR_ROOT;
++ put_unaligned_le16(DEFAULT_ROOT_DEV, &buf[508]);
+
+ fprintf(stderr, "Setup is %d bytes (padded to %d bytes).\n", c, i);
+
+@@ -192,44 +193,42 @@ int main(int argc, char ** argv)
+
+ /* Patch the setup code with the appropriate size parameters */
+ buf[0x1f1] = setup_sectors-1;
+- buf[0x1f4] = sys_size;
+- buf[0x1f5] = sys_size >> 8;
+- buf[0x1f6] = sys_size >> 16;
+- buf[0x1f7] = sys_size >> 24;
++ put_unaligned_le32(sys_size, &buf[0x1f4]);
+
+ #ifdef CONFIG_EFI_STUB
+ file_sz = sz + i + ((sys_size * 16) - sz);
+
+- pe_header = *(unsigned int *)&buf[0x3c];
++ pe_header = get_unaligned_le32(&buf[0x3c]);
+
+ /* Size of code */
+- *(unsigned int *)&buf[pe_header + 0x1c] = file_sz;
++ put_unaligned_le32(file_sz, &buf[pe_header + 0x1c]);
+
+ /* Size of image */
+- *(unsigned int *)&buf[pe_header + 0x50] = file_sz;
++ put_unaligned_le32(file_sz, &buf[pe_header + 0x50]);
+
+ #ifdef CONFIG_X86_32
+ /* Address of entry point */
+- *(unsigned int *)&buf[pe_header + 0x28] = i;
++ put_unaligned_le32(i, &buf[pe_header + 0x28]);
+
+ /* .text size */
+- *(unsigned int *)&buf[pe_header + 0xb0] = file_sz;
++ put_unaligned_le32(file_sz, &buf[pe_header + 0xb0]);
+
+ /* .text size of initialised data */
+- *(unsigned int *)&buf[pe_header + 0xb8] = file_sz;
++ put_unaligned_le32(file_sz, &buf[pe_header + 0xb8]);
+ #else
+ /*
+ * Address of entry point. startup_32 is at the beginning and
+ * the 64-bit entry point (startup_64) is always 512 bytes
+ * after.
+ */
+- *(unsigned int *)&buf[pe_header + 0x28] = i + 512;
++ put_unaligned_le32(i + 512, &buf[pe_header + 0x28]);
+
+ /* .text size */
+- *(unsigned int *)&buf[pe_header + 0xc0] = file_sz;
++ put_unaligned_le32(file_sz, &buf[pe_header + 0xc0]);
+
+ /* .text size of initialised data */
+- *(unsigned int *)&buf[pe_header + 0xc8] = file_sz;
++ put_unaligned_le32(file_sz, &buf[pe_header + 0xc8]);
++
+ #endif /* CONFIG_X86_32 */
+ #endif /* CONFIG_EFI_STUB */
+
--- /dev/null
+From c7b738351ba92f48b943ac59aff6b5b0f17f37c9 Mon Sep 17 00:00:00 2001
+From: Dan Carpenter <dan.carpenter@oracle.com>
+Date: Mon, 5 Mar 2012 21:06:14 +0300
+Subject: x86, efi: Fix pointer math issue in handle_ramdisks()
+
+From: Dan Carpenter <dan.carpenter@oracle.com>
+
+commit c7b738351ba92f48b943ac59aff6b5b0f17f37c9 upstream.
+
+"filename" is a efi_char16_t string so this check for reaching the end
+of the array doesn't work. We need to cast the pointer to (u8 *) before
+doing the math.
+
+This patch changes the "filename" to "filename_16" to avoid confusion in
+the future.
+
+Signed-off-by: Dan Carpenter <dan.carpenter@oracle.com>
+Link: http://lkml.kernel.org/r/20120305180614.GA26880@elgon.mountain
+Acked-by: Matt Fleming <matt.fleming@intel.com>
+Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/boot/compressed/eboot.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+--- a/arch/x86/boot/compressed/eboot.c
++++ b/arch/x86/boot/compressed/eboot.c
+@@ -539,7 +539,7 @@ static efi_status_t handle_ramdisks(efi_
+ struct initrd *initrd;
+ efi_file_handle_t *h;
+ efi_file_info_t *info;
+- efi_char16_t filename[256];
++ efi_char16_t filename_16[256];
+ unsigned long info_sz;
+ efi_guid_t info_guid = EFI_FILE_INFO_ID;
+ efi_char16_t *p;
+@@ -552,14 +552,14 @@ static efi_status_t handle_ramdisks(efi_
+ str += 7;
+
+ initrd = &initrds[i];
+- p = filename;
++ p = filename_16;
+
+ /* Skip any leading slashes */
+ while (*str == '/' || *str == '\\')
+ str++;
+
+ while (*str && *str != ' ' && *str != '\n') {
+- if (p >= filename + sizeof(filename))
++ if ((u8 *)p >= (u8 *)filename_16 + sizeof(filename_16))
+ break;
+
+ *p++ = *str++;
+@@ -583,7 +583,7 @@ static efi_status_t handle_ramdisks(efi_
+ goto free_initrds;
+ }
+
+- status = efi_call_phys5(fh->open, fh, &h, filename,
++ status = efi_call_phys5(fh->open, fh, &h, filename_16,
+ EFI_FILE_MODE_READ, (u64)0);
+ if (status != EFI_SUCCESS)
+ goto close_handles;
--- /dev/null
+From 12871c568305a0b20f116315479a18cd46882e9b Mon Sep 17 00:00:00 2001
+From: Matt Fleming <matt.fleming@intel.com>
+Date: Tue, 28 Feb 2012 13:37:22 +0000
+Subject: x86, mkpiggy: Don't open code put_unaligned_le32()
+
+From: Matt Fleming <matt.fleming@intel.com>
+
+commit 12871c568305a0b20f116315479a18cd46882e9b upstream.
+
+Use the new headers in tools/include instead of rolling our own
+put_unaligned_le32() implementation.
+
+Cc: H. Peter Anvin <hpa@zytor.com>
+Signed-off-by: Matt Fleming <matt.fleming@intel.com>
+Link: http://lkml.kernel.org/r/1330436245-24875-4-git-send-email-matt@console-pimps.org
+Signed-off-by: H. Peter Anvin <hpa@zytor.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/boot/compressed/Makefile | 1 +
+ arch/x86/boot/compressed/mkpiggy.c | 11 ++---------
+ 2 files changed, 3 insertions(+), 9 deletions(-)
+
+--- a/arch/x86/boot/compressed/Makefile
++++ b/arch/x86/boot/compressed/Makefile
+@@ -22,6 +22,7 @@ LDFLAGS := -m elf_$(UTS_MACHINE)
+ LDFLAGS_vmlinux := -T
+
+ hostprogs-y := mkpiggy
++HOST_EXTRACFLAGS += -I$(srctree)/tools/include
+
+ VMLINUX_OBJS = $(obj)/vmlinux.lds $(obj)/head_$(BITS).o $(obj)/misc.o \
+ $(obj)/string.o $(obj)/cmdline.o $(obj)/early_serial_console.o \
+--- a/arch/x86/boot/compressed/mkpiggy.c
++++ b/arch/x86/boot/compressed/mkpiggy.c
+@@ -29,14 +29,7 @@
+ #include <stdio.h>
+ #include <string.h>
+ #include <inttypes.h>
+-
+-static uint32_t getle32(const void *p)
+-{
+- const uint8_t *cp = p;
+-
+- return (uint32_t)cp[0] + ((uint32_t)cp[1] << 8) +
+- ((uint32_t)cp[2] << 16) + ((uint32_t)cp[3] << 24);
+-}
++#include <tools/le_byteshift.h>
+
+ int main(int argc, char *argv[])
+ {
+@@ -69,7 +62,7 @@ int main(int argc, char *argv[])
+ }
+
+ ilen = ftell(f);
+- olen = getle32(&olen);
++ olen = get_unaligned_le32(&olen);
+ fclose(f);
+
+ /*