jfs-fix-regression-preventing-coalescing-of-extents.patch
spi-takes-size-of-a-pointer-to-determine-the-size-of-the-pointed-to-type.patch
serial-bfin_5xx-add-missing-spin_lock-init.patch
+x86-memtest-remove-64-bit-division.patch
+x86-fix-uv-bau-activation-descriptor-init.patch
+x86-uv-fix-macros-for-multiple-coherency-domains.patch
+x86-enable-gart-iommu-only-after-setting-up-protection-methods.patch
+x86-move-rdtsc_barrier-into-the-tsc-vread-method.patch
+x86-fix-uv-bau-sending-buffer-initialization.patch
+x86-add-quirk-for-reboot-stalls-on-a-dell-optiplex-360.patch
--- /dev/null
+From 4a4aca641bc4598e77b866804f47c651ec4a764d Mon Sep 17 00:00:00 2001
+From: Jean Delvare <jdelvare@suse.de>
+Date: Fri, 5 Jun 2009 12:02:38 +0200
+Subject: x86: Add quirk for reboot stalls on a Dell Optiplex 360
+
+From: Jean Delvare <jdelvare@suse.de>
+
+commit 4a4aca641bc4598e77b866804f47c651ec4a764d upstream.
+
+The Dell Optiplex 360 hangs on reboot, just like the Optiplex 330, so
+the same quirk is needed.
+
+Signed-off-by: Jean Delvare <jdelvare@suse.de>
+Cc: Steve Conklin <steve.conklin@canonical.com>
+Cc: Leann Ogasawara <leann.ogasawara@canonical.com>
+LKML-Reference: <200906051202.38311.jdelvare@suse.de>
+Signed-off-by: Ingo Molnar <mingo@elte.hu>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ arch/x86/kernel/reboot.c | 9 +++++++++
+ 1 file changed, 9 insertions(+)
+
+--- a/arch/x86/kernel/reboot.c
++++ b/arch/x86/kernel/reboot.c
+@@ -192,6 +192,15 @@ static struct dmi_system_id __initdata r
+ DMI_MATCH(DMI_BOARD_NAME, "0KP561"),
+ },
+ },
++ { /* Handle problems with rebooting on Dell Optiplex 360 with 0T656F */
++ .callback = set_bios_reboot,
++ .ident = "Dell OptiPlex 360",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
++ DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 360"),
++ DMI_MATCH(DMI_BOARD_NAME, "0T656F"),
++ },
++ },
+ { /* Handle problems with rebooting on Dell 2400's */
+ .callback = set_bios_reboot,
+ .ident = "Dell PowerEdge 2400",
--- /dev/null
+From fe2245c905631a3a353504fc04388ce3dfaf9d9e Mon Sep 17 00:00:00 2001
+From: Mark Langsdorf <mark.langsdorf@amd.com>
+Date: Sun, 5 Jul 2009 15:50:52 -0500
+Subject: x86: enable GART-IOMMU only after setting up protection methods
+
+From: Mark Langsdorf <mark.langsdorf@amd.com>
+
+commit fe2245c905631a3a353504fc04388ce3dfaf9d9e upstream.
+
+The current code to set up the GART as an IOMMU enables GART
+translations before it removes the aperture from the kernel memory
+map, sets the GART PTEs to UC, sets up the guard and scratch
+pages, or does a wbinvd(). This leaves the possibility of cache
+aliasing open and can cause system crashes.
+
+Re-order the code so as to enable the GART translations only
+after all safeguards are in place and the tlb has been flushed.
+
+AMD has tested this patch on both Istanbul systems and 1st
+generation Opteron systems with APG enabled and seen no adverse
+effects. Istanbul systems with HT Assist enabled sometimes
+see MCE errors due to cache artifacts with the unmodified
+code.
+
+Signed-off-by: Mark Langsdorf <mark.langsdorf@amd.com>
+Cc: Joerg Roedel <joerg.roedel@amd.com>
+Cc: akpm@linux-foundation.org
+Cc: jbarnes@virtuousgeek.org
+Signed-off-by: Ingo Molnar <mingo@elte.hu>
+
+---
+ arch/x86/kernel/pci-gart_64.c | 10 ++++++++--
+ 1 file changed, 8 insertions(+), 2 deletions(-)
+
+--- a/arch/x86/kernel/pci-gart_64.c
++++ b/arch/x86/kernel/pci-gart_64.c
+@@ -688,8 +688,6 @@ static __init int init_k8_gatt(struct ag
+
+ agp_gatt_table = gatt;
+
+- enable_gart_translations();
+-
+ error = sysdev_class_register(&gart_sysdev_class);
+ if (!error)
+ error = sysdev_register(&device_gart);
+@@ -845,6 +843,14 @@ void __init gart_iommu_init(void)
+ * the pages as Not-Present:
+ */
+ wbinvd();
++
++ /*
++ * Now all caches are flushed and we can safely enable
++ * GART hardware. Doing it early leaves the possibility
++ * of stale cache entries that can lead to GART PTE
++ * errors.
++ */
++ enable_gart_translations();
+
+ /*
+ * Try to workaround a bug (thanks to BenH):
--- /dev/null
+From 0e2595cdfd7df9f1128f7185152601ae5417483b Mon Sep 17 00:00:00 2001
+From: Cliff Wickman <cpw@sgi.com>
+Date: Wed, 20 May 2009 08:10:57 -0500
+Subject: x86: Fix UV BAU activation descriptor init
+
+From: Cliff Wickman <cpw@sgi.com>
+
+commit 0e2595cdfd7df9f1128f7185152601ae5417483b upstream.
+
+The UV tlb shootdown code has a serious initialization error.
+
+An array of structures [32*8] is initialized as if it were [32].
+The array is indexed by (cpu number on the blade)*8, so the short
+initialization works for up to 4 cpus on a blade.
+But above that, we provide an invalid opcode to the hub's
+broadcast assist unit.
+
+This patch changes the allocation of the array to use its symbolic
+dimensions for better clarity. And initializes all 32*8 entries.
+
+Shortened 'UV_ACTIVATION_DESCRIPTOR_SIZE' to 'UV_ADP_SIZE' per Ingo's
+recommendation.
+
+Tested on the UV simulator.
+
+Signed-off-by: Cliff Wickman <cpw@sgi.com>
+LKML-Reference: <E1M6lZR-0007kV-Aq@eag09.americas.sgi.com>
+Signed-off-by: Ingo Molnar <mingo@elte.hu>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ arch/x86/include/asm/uv/uv_bau.h | 2 +-
+ arch/x86/kernel/tlb_uv.c | 15 +++++++++++++--
+ 2 files changed, 14 insertions(+), 3 deletions(-)
+
+--- a/arch/x86/include/asm/uv/uv_bau.h
++++ b/arch/x86/include/asm/uv/uv_bau.h
+@@ -37,7 +37,7 @@
+ #define UV_CPUS_PER_ACT_STATUS 32
+ #define UV_ACT_STATUS_MASK 0x3
+ #define UV_ACT_STATUS_SIZE 2
+-#define UV_ACTIVATION_DESCRIPTOR_SIZE 32
++#define UV_ADP_SIZE 32
+ #define UV_DISTRIBUTION_SIZE 256
+ #define UV_SW_ACK_NPENDING 8
+ #define UV_NET_ENDPOINT_INTD 0x38
+--- a/arch/x86/kernel/tlb_uv.c
++++ b/arch/x86/kernel/tlb_uv.c
+@@ -715,7 +715,12 @@ uv_activation_descriptor_init(int node,
+ struct bau_desc *adp;
+ struct bau_desc *ad2;
+
+- adp = (struct bau_desc *)kmalloc_node(16384, GFP_KERNEL, node);
++ /*
++ * each bau_desc is 64 bytes; there are 8 (UV_ITEMS_PER_DESCRIPTOR)
++ * per cpu; and up to 32 (UV_ADP_SIZE) cpu's per blade
++ */
++ adp = (struct bau_desc *)kmalloc_node(sizeof(struct bau_desc)*
++ UV_ADP_SIZE*UV_ITEMS_PER_DESCRIPTOR, GFP_KERNEL, node);
+ BUG_ON(!adp);
+
+ pa = uv_gpa(adp); /* need the real nasid*/
+@@ -729,7 +734,13 @@ uv_activation_descriptor_init(int node,
+ (n << UV_DESC_BASE_PNODE_SHIFT | m));
+ }
+
+- for (i = 0, ad2 = adp; i < UV_ACTIVATION_DESCRIPTOR_SIZE; i++, ad2++) {
++ /*
++ * initializing all 8 (UV_ITEMS_PER_DESCRIPTOR) descriptors for each
++ * cpu even though we only use the first one; one descriptor can
++ * describe a broadcast to 256 nodes.
++ */
++ for (i = 0, ad2 = adp; i < (UV_ADP_SIZE*UV_ITEMS_PER_DESCRIPTOR);
++ i++, ad2++) {
+ memset(ad2, 0, sizeof(struct bau_desc));
+ ad2->header.sw_ack_flag = 1;
+ /*
--- /dev/null
+From 9c26f52b900f7207135bafc8789e1a4f5d43e096 Mon Sep 17 00:00:00 2001
+From: Cliff Wickman <cpw@sgi.com>
+Date: Wed, 24 Jun 2009 09:41:59 -0500
+Subject: x86: Fix uv bau sending buffer initialization
+
+From: Cliff Wickman <cpw@sgi.com>
+
+commit 9c26f52b900f7207135bafc8789e1a4f5d43e096 upstream.
+
+The initialization of the UV Broadcast Assist Unit's sending
+buffers was making an invalid assumption about the
+initialization of an MMR that defines its address.
+
+The BIOS will not be providing that MMR. So
+uv_activation_descriptor_init() should unconditionally set it.
+
+Tested on UV simulator.
+
+Signed-off-by: Cliff Wickman <cpw@sgi.com>
+LKML-Reference: <E1MJTfj-0005i1-W8@eag09.americas.sgi.com>
+Signed-off-by: Ingo Molnar <mingo@elte.hu>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ arch/x86/kernel/tlb_uv.c | 9 ++-------
+ 1 file changed, 2 insertions(+), 7 deletions(-)
+
+--- a/arch/x86/kernel/tlb_uv.c
++++ b/arch/x86/kernel/tlb_uv.c
+@@ -711,7 +711,6 @@ uv_activation_descriptor_init(int node,
+ unsigned long pa;
+ unsigned long m;
+ unsigned long n;
+- unsigned long mmr_image;
+ struct bau_desc *adp;
+ struct bau_desc *ad2;
+
+@@ -727,12 +726,8 @@ uv_activation_descriptor_init(int node,
+ n = pa >> uv_nshift;
+ m = pa & uv_mmask;
+
+- mmr_image = uv_read_global_mmr64(pnode, UVH_LB_BAU_SB_DESCRIPTOR_BASE);
+- if (mmr_image) {
+- uv_write_global_mmr64(pnode, (unsigned long)
+- UVH_LB_BAU_SB_DESCRIPTOR_BASE,
+- (n << UV_DESC_BASE_PNODE_SHIFT | m));
+- }
++ uv_write_global_mmr64(pnode, UVH_LB_BAU_SB_DESCRIPTOR_BASE,
++ (n << UV_DESC_BASE_PNODE_SHIFT | m));
+
+ /*
+ * initializing all 8 (UV_ITEMS_PER_DESCRIPTOR) descriptors for each
--- /dev/null
+From c9690998ef48ffefeccb91c70a7739eebdea57f9 Mon Sep 17 00:00:00 2001
+From: Andreas Herrmann <andreas.herrmann3@amd.com>
+Date: Mon, 8 Jun 2009 19:09:39 +0200
+Subject: x86: memtest: remove 64-bit division
+
+From: Andreas Herrmann <andreas.herrmann3@amd.com>
+
+commit c9690998ef48ffefeccb91c70a7739eebdea57f9 upstream.
+
+Using gcc 3.3.5 a "make allmodconfig" + "CONFIG_KVM=n"
+triggers a build error:
+
+ arch/x86/mm/built-in.o(.init.text+0x43f7): In function `__change_page_attr':
+ arch/x86/mm/pageattr.c:114: undefined reference to `__udivdi3'
+ make: *** [.tmp_vmlinux1] Error 1
+
+The culprit turned out to be a division in arch/x86/mm/memtest.c
+For more info see this thread:
+
+ http://marc.info/?l=linux-kernel&m=124416232620683
+
+The patch entirely removes the division that caused the build
+error.
+
+[ Impact: build fix with certain GCC versions ]
+
+Reported-by: Tetsuo Handa <penguin-kernel@i-love.sakura.ne.jp>
+Signed-off-by: Andreas Herrmann <andreas.herrmann3@amd.com>
+Cc: Yinghai Lu <yinghai@kernel.org>
+Cc: xiyou.wangcong@gmail.com
+Cc: Andrew Morton <akpm@linux-foundation.org>
+LKML-Reference: <20090608170939.GB12431@alberich.amd.com>
+Signed-off-by: Ingo Molnar <mingo@elte.hu>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ arch/x86/mm/memtest.c | 14 +++++++-------
+ 1 file changed, 7 insertions(+), 7 deletions(-)
+
+--- a/arch/x86/mm/memtest.c
++++ b/arch/x86/mm/memtest.c
+@@ -40,23 +40,23 @@ static void __init reserve_bad_mem(u64 p
+
+ static void __init memtest(u64 pattern, u64 start_phys, u64 size)
+ {
+- u64 i, count;
+- u64 *start;
++ u64 *p;
++ void *start, *end;
+ u64 start_bad, last_bad;
+ u64 start_phys_aligned;
+ size_t incr;
+
+ incr = sizeof(pattern);
+ start_phys_aligned = ALIGN(start_phys, incr);
+- count = (size - (start_phys_aligned - start_phys))/incr;
+ start = __va(start_phys_aligned);
++ end = start + size - (start_phys_aligned - start_phys);
+ start_bad = 0;
+ last_bad = 0;
+
+- for (i = 0; i < count; i++)
+- start[i] = pattern;
+- for (i = 0; i < count; i++, start++, start_phys_aligned += incr) {
+- if (*start == pattern)
++ for (p = start; p < end; p++)
++ *p = pattern;
++ for (p = start; p < end; p++, start_phys_aligned += incr) {
++ if (*p == pattern)
+ continue;
+ if (start_phys_aligned == last_bad + incr) {
+ last_bad += incr;
--- /dev/null
+From 7d96fd41cadc55f4e00231c8c71b8e25c779f122 Mon Sep 17 00:00:00 2001
+From: Petr Tesarik <ptesarik@suse.cz>
+Date: Mon, 25 May 2009 11:02:02 +0200
+Subject: x86: move rdtsc_barrier() into the TSC vread method
+
+From: Petr Tesarik <ptesarik@suse.cz>
+
+commit 7d96fd41cadc55f4e00231c8c71b8e25c779f122 upstream.
+
+The *fence instructions were moved to vsyscall_64.c by commit
+cb9e35dce94a1b9c59d46224e8a94377d673e204. But this breaks the
+vDSO, because vread methods are also called from there.
+
+Besides, the synchronization might be unnecessary for other
+time sources than TSC.
+
+[ Impact: fix potential time warp in VDSO ]
+
+Signed-off-by: Petr Tesarik <ptesarik@suse.cz>
+LKML-Reference: <9d0ea9ea0f866bdc1f4d76831221ae117f11ea67.1243241859.git.ptesarik@suse.cz>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ arch/x86/kernel/tsc.c | 11 ++++++++++-
+ arch/x86/kernel/vsyscall_64.c | 8 --------
+ 2 files changed, 10 insertions(+), 9 deletions(-)
+
+--- a/arch/x86/kernel/tsc.c
++++ b/arch/x86/kernel/tsc.c
+@@ -710,7 +710,16 @@ static cycle_t read_tsc(struct clocksour
+ #ifdef CONFIG_X86_64
+ static cycle_t __vsyscall_fn vread_tsc(void)
+ {
+- cycle_t ret = (cycle_t)vget_cycles();
++ cycle_t ret;
++
++ /*
++ * Surround the RDTSC by barriers, to make sure it's not
++ * speculated to outside the seqlock critical section and
++ * does not cause time warps:
++ */
++ rdtsc_barrier();
++ ret = (cycle_t)vget_cycles();
++ rdtsc_barrier();
+
+ return ret >= __vsyscall_gtod_data.clock.cycle_last ?
+ ret : __vsyscall_gtod_data.clock.cycle_last;
+--- a/arch/x86/kernel/vsyscall_64.c
++++ b/arch/x86/kernel/vsyscall_64.c
+@@ -132,15 +132,7 @@ static __always_inline void do_vgettimeo
+ return;
+ }
+
+- /*
+- * Surround the RDTSC by barriers, to make sure it's not
+- * speculated to outside the seqlock critical section and
+- * does not cause time warps:
+- */
+- rdtsc_barrier();
+ now = vread();
+- rdtsc_barrier();
+-
+ base = __vsyscall_gtod_data.clock.cycle_last;
+ mask = __vsyscall_gtod_data.clock.mask;
+ mult = __vsyscall_gtod_data.clock.mult;
--- /dev/null
+From c4ed3f04ba9defe22aa729d1646f970f791c03d7 Mon Sep 17 00:00:00 2001
+From: Jack Steiner <steiner@sgi.com>
+Date: Mon, 8 Jun 2009 10:44:05 -0500
+Subject: x86, UV: Fix macros for multiple coherency domains
+
+From: Jack Steiner <steiner@sgi.com>
+
+commit c4ed3f04ba9defe22aa729d1646f970f791c03d7 upstream.
+
+Fix bug in the SGI UV macros that support systems with multiple
+coherency domains. The macros used for referencing global MMR
+(chipset registers) are failing to correctly "or" the NASID
+(node identifier) bits that reside above M+N. These high bits
+are supplied automatically by the chipset for memory accesses
+coming from the processor socket.
+
+However, the bits must be present for references to the special
+global MMR space used to map chipset registers. (See uv_hub.h
+for more details ...)
+
+The bug results in references to invalid/incorrect nodes.
+
+Signed-off-by: Jack Steiner <steiner@sgi.com>
+LKML-Reference: <20090608154405.GA16395@sgi.com>
+Signed-off-by: Ingo Molnar <mingo@elte.hu>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ arch/x86/include/asm/uv/uv_hub.h | 6 ++++--
+ arch/x86/kernel/apic/x2apic_uv_x.c | 15 +++++++++------
+ 2 files changed, 13 insertions(+), 8 deletions(-)
+
+--- a/arch/x86/include/asm/uv/uv_hub.h
++++ b/arch/x86/include/asm/uv/uv_hub.h
+@@ -133,6 +133,7 @@ struct uv_scir_s {
+ struct uv_hub_info_s {
+ unsigned long global_mmr_base;
+ unsigned long gpa_mask;
++ unsigned int gnode_extra;
+ unsigned long gnode_upper;
+ unsigned long lowmem_remap_top;
+ unsigned long lowmem_remap_base;
+@@ -159,7 +160,8 @@ DECLARE_PER_CPU(struct uv_hub_info_s, __
+ * p - PNODE (local part of nsids, right shifted 1)
+ */
+ #define UV_NASID_TO_PNODE(n) (((n) >> 1) & uv_hub_info->pnode_mask)
+-#define UV_PNODE_TO_NASID(p) (((p) << 1) | uv_hub_info->gnode_upper)
++#define UV_PNODE_TO_GNODE(p) ((p) |uv_hub_info->gnode_extra)
++#define UV_PNODE_TO_NASID(p) (UV_PNODE_TO_GNODE(p) << 1)
+
+ #define UV_LOCAL_MMR_BASE 0xf4000000UL
+ #define UV_GLOBAL_MMR32_BASE 0xf8000000UL
+@@ -173,7 +175,7 @@ DECLARE_PER_CPU(struct uv_hub_info_s, __
+ #define UV_GLOBAL_MMR32_PNODE_BITS(p) ((p) << (UV_GLOBAL_MMR32_PNODE_SHIFT))
+
+ #define UV_GLOBAL_MMR64_PNODE_BITS(p) \
+- ((unsigned long)(p) << UV_GLOBAL_MMR64_PNODE_SHIFT)
++ ((unsigned long)(UV_PNODE_TO_GNODE(p)) << UV_GLOBAL_MMR64_PNODE_SHIFT)
+
+ #define UV_APIC_PNODE_SHIFT 6
+
+--- a/arch/x86/kernel/apic/x2apic_uv_x.c
++++ b/arch/x86/kernel/apic/x2apic_uv_x.c
+@@ -562,7 +562,7 @@ void __init uv_system_init(void)
+ union uvh_node_id_u node_id;
+ unsigned long gnode_upper, lowmem_redir_base, lowmem_redir_size;
+ int bytes, nid, cpu, lcpu, pnode, blade, i, j, m_val, n_val;
+- int max_pnode = 0;
++ int gnode_extra, max_pnode = 0;
+ unsigned long mmr_base, present, paddr;
+ unsigned short pnode_mask;
+
+@@ -574,6 +574,13 @@ void __init uv_system_init(void)
+ mmr_base =
+ uv_read_local_mmr(UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR) &
+ ~UV_MMR_ENABLE;
++ pnode_mask = (1 << n_val) - 1;
++ node_id.v = uv_read_local_mmr(UVH_NODE_ID);
++ gnode_extra = (node_id.s.node_id & ~((1 << n_val) - 1)) >> 1;
++ gnode_upper = ((unsigned long)gnode_extra << m_val);
++ printk(KERN_DEBUG "UV: N %d, M %d, gnode_upper 0x%lx, gnode_extra 0x%x\n",
++ n_val, m_val, gnode_upper, gnode_extra);
++
+ printk(KERN_DEBUG "UV: global MMR base 0x%lx\n", mmr_base);
+
+ for(i = 0; i < UVH_NODE_PRESENT_TABLE_DEPTH; i++)
+@@ -607,11 +614,6 @@ void __init uv_system_init(void)
+ }
+ }
+
+- pnode_mask = (1 << n_val) - 1;
+- node_id.v = uv_read_local_mmr(UVH_NODE_ID);
+- gnode_upper = (((unsigned long)node_id.s.node_id) &
+- ~((1 << n_val) - 1)) << m_val;
+-
+ uv_bios_init();
+ uv_bios_get_sn_info(0, &uv_type, &sn_partition_id,
+ &sn_coherency_id, &sn_region_size);
+@@ -634,6 +636,7 @@ void __init uv_system_init(void)
+ uv_cpu_hub_info(cpu)->pnode_mask = pnode_mask;
+ uv_cpu_hub_info(cpu)->gpa_mask = (1 << (m_val + n_val)) - 1;
+ uv_cpu_hub_info(cpu)->gnode_upper = gnode_upper;
++ uv_cpu_hub_info(cpu)->gnode_extra = gnode_extra;
+ uv_cpu_hub_info(cpu)->global_mmr_base = mmr_base;
+ uv_cpu_hub_info(cpu)->coherency_domain_number = sn_coherency_id;
+ uv_cpu_hub_info(cpu)->scir.offset = SCIR_LOCAL_MMR_BASE + lcpu;