--- /dev/null
+From 48a719c238bcbb72d6da79de9c5b3b93ab472107 Mon Sep 17 00:00:00 2001
+From: Borislav Petkov <borislav.petkov@amd.com>
+Date: Fri, 22 Jan 2010 16:01:04 +0100
+Subject: intel-agp: Switch to wbinvd_on_all_cpus
+
+From: Borislav Petkov <borislav.petkov@amd.com>
+
+commit 48a719c238bcbb72d6da79de9c5b3b93ab472107 upstream.
+
+Simplify if-statement while at it.
+
+[ hpa: we need to #include <asm/smp.h> ]
+
+Cc: Dave Jones <davej@redhat.com>
+Cc: David Airlie <airlied@linux.ie>
+Signed-off-by: Borislav Petkov <borislav.petkov@amd.com>
+LKML-Reference: <1264172467-25155-3-git-send-email-bp@amd64.org>
+Signed-off-by: H. Peter Anvin <hpa@zytor.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/char/agp/intel-agp.c | 15 ++++-----------
+ 1 file changed, 4 insertions(+), 11 deletions(-)
+
+--- a/drivers/char/agp/intel-agp.c
++++ b/drivers/char/agp/intel-agp.c
+@@ -8,6 +8,7 @@
+ #include <linux/kernel.h>
+ #include <linux/pagemap.h>
+ #include <linux/agp_backend.h>
++#include <asm/smp.h>
+ #include "agp.h"
+
+ /*
+@@ -815,12 +816,6 @@ static void intel_i830_setup_flush(void)
+ intel_i830_fini_flush();
+ }
+
+-static void
+-do_wbinvd(void *null)
+-{
+- wbinvd();
+-}
+-
+ /* The chipset_flush interface needs to get data that has already been
+ * flushed out of the CPU all the way out to main memory, because the GPU
+ * doesn't snoop those buffers.
+@@ -837,12 +832,10 @@ static void intel_i830_chipset_flush(str
+
+ memset(pg, 0, 1024);
+
+- if (cpu_has_clflush) {
++ if (cpu_has_clflush)
+ clflush_cache_range(pg, 1024);
+- } else {
+- if (on_each_cpu(do_wbinvd, NULL, 1) != 0)
+- printk(KERN_ERR "Timed out waiting for cache flush.\n");
+- }
++ else if (wbinvd_on_all_cpus() != 0)
++ printk(KERN_ERR "Timed out waiting for cache flush.\n");
+ }
+
+ /* The intel i830 automatically initializes the agp aperture during POST.
x86-amd-iommu-use-helper-function-to-destroy-domain.patch
x86-amd-iommu-enable-iommu-before-attaching-devices.patch
revert-x86-disable-iommus-on-kernel-crash.patch
+x86-lib-add-wbinvd-smp-helpers.patch
+x86-cacheinfo-fix-disabling-of-l3-cache-indices.patch
+intel-agp-switch-to-wbinvd_on_all_cpus.patch
+x86-cacheinfo-add-cache-index-disable-sysfs-attrs-only-to-l3-caches.patch
+x86-cacheinfo-calculate-l3-indices.patch
+x86-cacheinfo-remove-numa-dependency-fix-for-amd-fam10h-rev-d1.patch
+x86-cacheinfo-enable-l3-cid-only-on-amd.patch
+vgaarb-fix-target-default-passing.patch
--- /dev/null
+From eugeneteo@kernel.sg Mon Apr 19 10:45:25 2010
+From: Eugene Teo <eugeneteo@kernel.sg>
+Date: Tue, 23 Mar 2010 10:52:13 +0800
+Subject: vgaarb: fix "target=default" passing
+To: Greg KH <greg@kroah.com>
+Cc: Kyle McMartin <kyle@redhat.com>, gregkh@suse.de, stable@kernel.org, Eugene Teo <eugene@redhat.com>
+Message-ID: <20100323025213.GA5863@infradead.org>
+Content-Disposition: inline
+
+
+Commit 77c1ff3982c6b36961725dd19e872a1c07df7f3b fixed the userspace
+pointer dereference, but introduced another bug pointed out by Eugene Teo
+in RH bug #564264. Instead of comparing the point we were at in the string,
+we instead compared the beginning of the string to "default".
+
+Signed-off-by: Eugene Teo <eugeneteo@kernel.sg>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/gpu/vga/vgaarb.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/gpu/vga/vgaarb.c
++++ b/drivers/gpu/vga/vgaarb.c
+@@ -961,7 +961,7 @@ static ssize_t vga_arb_write(struct file
+ remaining -= 7;
+ pr_devel("client 0x%p called 'target'\n", priv);
+ /* if target is default */
+- if (!strncmp(buf, "default", 7))
++ if (!strncmp(curr_pos, "default", 7))
+ pdev = pci_dev_get(vga_default_device());
+ else {
+ if (!vga_pci_str_to_vars(curr_pos, remaining,
--- /dev/null
+From 897de50e08937663912c86fb12ad7f708af2386c Mon Sep 17 00:00:00 2001
+From: Borislav Petkov <borislav.petkov@amd.com>
+Date: Fri, 22 Jan 2010 16:01:06 +0100
+Subject: x86, cacheinfo: Add cache index disable sysfs attrs only to L3 caches
+
+From: Borislav Petkov <borislav.petkov@amd.com>
+
+commit 897de50e08937663912c86fb12ad7f708af2386c upstream.
+
+The cache_disable_[01] attribute in
+
+/sys/devices/system/cpu/cpu?/cache/index[0-3]/
+
+is enabled on all cache levels although only L3 supports it. Add it only
+to the cache level that actually supports it.
+
+Signed-off-by: Borislav Petkov <borislav.petkov@amd.com>
+LKML-Reference: <1264172467-25155-5-git-send-email-bp@amd64.org>
+Signed-off-by: H. Peter Anvin <hpa@zytor.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ arch/x86/kernel/cpu/intel_cacheinfo.c | 35 +++++++++++++++++++++++++---------
+ 1 file changed, 26 insertions(+), 9 deletions(-)
+
+--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
++++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
+@@ -829,16 +829,24 @@ static struct _cache_attr cache_disable_
+ static struct _cache_attr cache_disable_1 = __ATTR(cache_disable_1, 0644,
+ show_cache_disable_1, store_cache_disable_1);
+
++#define DEFAULT_SYSFS_CACHE_ATTRS \
++ &type.attr, \
++ &level.attr, \
++ &coherency_line_size.attr, \
++ &physical_line_partition.attr, \
++ &ways_of_associativity.attr, \
++ &number_of_sets.attr, \
++ &size.attr, \
++ &shared_cpu_map.attr, \
++ &shared_cpu_list.attr
++
+ static struct attribute *default_attrs[] = {
+- &type.attr,
+- &level.attr,
+- &coherency_line_size.attr,
+- &physical_line_partition.attr,
+- &ways_of_associativity.attr,
+- &number_of_sets.attr,
+- &size.attr,
+- &shared_cpu_map.attr,
+- &shared_cpu_list.attr,
++ DEFAULT_SYSFS_CACHE_ATTRS,
++ NULL
++};
++
++static struct attribute *default_l3_attrs[] = {
++ DEFAULT_SYSFS_CACHE_ATTRS,
+ &cache_disable_0.attr,
+ &cache_disable_1.attr,
+ NULL
+@@ -931,6 +939,7 @@ static int __cpuinit cache_add_dev(struc
+ unsigned int cpu = sys_dev->id;
+ unsigned long i, j;
+ struct _index_kobject *this_object;
++ struct _cpuid4_info *this_leaf;
+ int retval;
+
+ retval = cpuid4_cache_sysfs_init(cpu);
+@@ -949,6 +958,14 @@ static int __cpuinit cache_add_dev(struc
+ this_object = INDEX_KOBJECT_PTR(cpu, i);
+ this_object->cpu = cpu;
+ this_object->index = i;
++
++ this_leaf = CPUID4_INFO_IDX(cpu, i);
++
++ if (this_leaf->can_disable)
++ ktype_cache.default_attrs = default_l3_attrs;
++ else
++ ktype_cache.default_attrs = default_attrs;
++
+ retval = kobject_init_and_add(&(this_object->kobj),
+ &ktype_cache,
+ per_cpu(cache_kobject, cpu),
--- /dev/null
+From 048a8774ca43488d78605031f11cc206d7a2682a Mon Sep 17 00:00:00 2001
+From: Borislav Petkov <borislav.petkov@amd.com>
+Date: Fri, 22 Jan 2010 16:01:07 +0100
+Subject: x86, cacheinfo: Calculate L3 indices
+
+From: Borislav Petkov <borislav.petkov@amd.com>
+
+commit 048a8774ca43488d78605031f11cc206d7a2682a upstream.
+
+We need to know the valid L3 indices interval when disabling them over
+/sysfs. Do that when the core is brought online and add boundary checks
+to the sysfs .store attribute.
+
+Signed-off-by: Borislav Petkov <borislav.petkov@amd.com>
+LKML-Reference: <1264172467-25155-6-git-send-email-bp@amd64.org>
+Signed-off-by: H. Peter Anvin <hpa@zytor.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ arch/x86/kernel/cpu/intel_cacheinfo.c | 35 ++++++++++++++++++++++++++++++----
+ 1 file changed, 31 insertions(+), 4 deletions(-)
+
+--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
++++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
+@@ -151,7 +151,8 @@ struct _cpuid4_info {
+ union _cpuid4_leaf_ebx ebx;
+ union _cpuid4_leaf_ecx ecx;
+ unsigned long size;
+- unsigned long can_disable;
++ bool can_disable;
++ unsigned int l3_indices;
+ DECLARE_BITMAP(shared_cpu_map, NR_CPUS);
+ };
+
+@@ -161,7 +162,8 @@ struct _cpuid4_info_regs {
+ union _cpuid4_leaf_ebx ebx;
+ union _cpuid4_leaf_ecx ecx;
+ unsigned long size;
+- unsigned long can_disable;
++ bool can_disable;
++ unsigned int l3_indices;
+ };
+
+ unsigned short num_cache_leaves;
+@@ -291,6 +293,29 @@ amd_cpuid4(int leaf, union _cpuid4_leaf_
+ (ebx->split.ways_of_associativity + 1) - 1;
+ }
+
++static unsigned int __cpuinit amd_calc_l3_indices(void)
++{
++ /*
++ * We're called over smp_call_function_single() and therefore
++ * are on the correct cpu.
++ */
++ int cpu = smp_processor_id();
++ int node = cpu_to_node(cpu);
++ struct pci_dev *dev = node_to_k8_nb_misc(node);
++ unsigned int sc0, sc1, sc2, sc3;
++ u32 val;
++
++ pci_read_config_dword(dev, 0x1C4, &val);
++
++ /* calculate subcache sizes */
++ sc0 = !(val & BIT(0));
++ sc1 = !(val & BIT(4));
++ sc2 = !(val & BIT(8)) + !(val & BIT(9));
++ sc3 = !(val & BIT(12)) + !(val & BIT(13));
++
++ return (max(max(max(sc0, sc1), sc2), sc3) << 10) - 1;
++}
++
+ static void __cpuinit
+ amd_check_l3_disable(int index, struct _cpuid4_info_regs *this_leaf)
+ {
+@@ -306,7 +331,8 @@ amd_check_l3_disable(int index, struct _
+ (boot_cpu_data.x86_mask < 0x1)))
+ return;
+
+- this_leaf->can_disable = 1;
++ this_leaf->can_disable = true;
++ this_leaf->l3_indices = amd_calc_l3_indices();
+ }
+
+ static int
+@@ -780,7 +806,8 @@ static ssize_t store_cache_disable(struc
+ return -EINVAL;
+
+ /* do not allow writes outside of allowed bits */
+- if (val & ~(SUBCACHE_MASK | SUBCACHE_INDEX))
++ if ((val & ~(SUBCACHE_MASK | SUBCACHE_INDEX)) ||
++ ((val & SUBCACHE_INDEX) > this_leaf->l3_indices))
+ return -EINVAL;
+
+ val |= BIT(30);
--- /dev/null
+From cb19060abfdecac0d1eb2d2f0e7d6b7a3f8bc4f4 Mon Sep 17 00:00:00 2001
+From: Borislav Petkov <borislav.petkov@amd.com>
+Date: Thu, 18 Feb 2010 19:37:14 +0100
+Subject: x86, cacheinfo: Enable L3 CID only on AMD
+
+From: Borislav Petkov <borislav.petkov@amd.com>
+
+commit cb19060abfdecac0d1eb2d2f0e7d6b7a3f8bc4f4 upstream.
+
+Final stage linking can fail with
+
+ arch/x86/built-in.o: In function `store_cache_disable':
+ intel_cacheinfo.c:(.text+0xc509): undefined reference to `amd_get_nb_id'
+ arch/x86/built-in.o: In function `show_cache_disable':
+ intel_cacheinfo.c:(.text+0xc7d3): undefined reference to `amd_get_nb_id'
+
+when CONFIG_CPU_SUP_AMD is not enabled because the amd_get_nb_id
+helper is defined in AMD-specific code but also used in generic code
+(intel_cacheinfo.c). Reorganize the L3 cache index disable code under
+CONFIG_CPU_SUP_AMD since it is AMD-only anyway.
+
+Signed-off-by: Borislav Petkov <borislav.petkov@amd.com>
+LKML-Reference: <20100218184210.GF20473@aftab>
+Signed-off-by: H. Peter Anvin <hpa@zytor.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ arch/x86/kernel/cpu/intel_cacheinfo.c | 186 +++++++++++++++++-----------------
+ 1 file changed, 98 insertions(+), 88 deletions(-)
+
+--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
++++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
+@@ -293,6 +293,13 @@ amd_cpuid4(int leaf, union _cpuid4_leaf_
+ (ebx->split.ways_of_associativity + 1) - 1;
+ }
+
++struct _cache_attr {
++ struct attribute attr;
++ ssize_t (*show)(struct _cpuid4_info *, char *);
++ ssize_t (*store)(struct _cpuid4_info *, const char *, size_t count);
++};
++
++#ifdef CONFIG_CPU_SUP_AMD
+ static unsigned int __cpuinit amd_calc_l3_indices(void)
+ {
+ /*
+@@ -303,7 +310,7 @@ static unsigned int __cpuinit amd_calc_l
+ int node = cpu_to_node(cpu);
+ struct pci_dev *dev = node_to_k8_nb_misc(node);
+ unsigned int sc0, sc1, sc2, sc3;
+- u32 val;
++ u32 val = 0;
+
+ pci_read_config_dword(dev, 0x1C4, &val);
+
+@@ -335,6 +342,94 @@ amd_check_l3_disable(int index, struct _
+ this_leaf->l3_indices = amd_calc_l3_indices();
+ }
+
++static ssize_t show_cache_disable(struct _cpuid4_info *this_leaf, char *buf,
++ unsigned int index)
++{
++ int cpu = cpumask_first(to_cpumask(this_leaf->shared_cpu_map));
++ int node = amd_get_nb_id(cpu);
++ struct pci_dev *dev = node_to_k8_nb_misc(node);
++ unsigned int reg = 0;
++
++ if (!this_leaf->can_disable)
++ return -EINVAL;
++
++ if (!dev)
++ return -EINVAL;
++
++ pci_read_config_dword(dev, 0x1BC + index * 4, ®);
++ return sprintf(buf, "0x%08x\n", reg);
++}
++
++#define SHOW_CACHE_DISABLE(index) \
++static ssize_t \
++show_cache_disable_##index(struct _cpuid4_info *this_leaf, char *buf) \
++{ \
++ return show_cache_disable(this_leaf, buf, index); \
++}
++SHOW_CACHE_DISABLE(0)
++SHOW_CACHE_DISABLE(1)
++
++static ssize_t store_cache_disable(struct _cpuid4_info *this_leaf,
++ const char *buf, size_t count, unsigned int index)
++{
++ int cpu = cpumask_first(to_cpumask(this_leaf->shared_cpu_map));
++ int node = amd_get_nb_id(cpu);
++ struct pci_dev *dev = node_to_k8_nb_misc(node);
++ unsigned long val = 0;
++
++#define SUBCACHE_MASK (3UL << 20)
++#define SUBCACHE_INDEX 0xfff
++
++ if (!this_leaf->can_disable)
++ return -EINVAL;
++
++ if (!capable(CAP_SYS_ADMIN))
++ return -EPERM;
++
++ if (!dev)
++ return -EINVAL;
++
++ if (strict_strtoul(buf, 10, &val) < 0)
++ return -EINVAL;
++
++ /* do not allow writes outside of allowed bits */
++ if ((val & ~(SUBCACHE_MASK | SUBCACHE_INDEX)) ||
++ ((val & SUBCACHE_INDEX) > this_leaf->l3_indices))
++ return -EINVAL;
++
++ val |= BIT(30);
++ pci_write_config_dword(dev, 0x1BC + index * 4, val);
++ /*
++ * We need to WBINVD on a core on the node containing the L3 cache which
++ * indices we disable therefore a simple wbinvd() is not sufficient.
++ */
++ wbinvd_on_cpu(cpu);
++ pci_write_config_dword(dev, 0x1BC + index * 4, val | BIT(31));
++ return count;
++}
++
++#define STORE_CACHE_DISABLE(index) \
++static ssize_t \
++store_cache_disable_##index(struct _cpuid4_info *this_leaf, \
++ const char *buf, size_t count) \
++{ \
++ return store_cache_disable(this_leaf, buf, count, index); \
++}
++STORE_CACHE_DISABLE(0)
++STORE_CACHE_DISABLE(1)
++
++static struct _cache_attr cache_disable_0 = __ATTR(cache_disable_0, 0644,
++ show_cache_disable_0, store_cache_disable_0);
++static struct _cache_attr cache_disable_1 = __ATTR(cache_disable_1, 0644,
++ show_cache_disable_1, store_cache_disable_1);
++
++#else /* CONFIG_CPU_SUP_AMD */
++static void __cpuinit
++amd_check_l3_disable(int index, struct _cpuid4_info_regs *this_leaf)
++{
++};
++#endif /* CONFIG_CPU_SUP_AMD */
++
+ static int
+ __cpuinit cpuid4_cache_lookup_regs(int index,
+ struct _cpuid4_info_regs *this_leaf)
+@@ -755,88 +850,6 @@ static ssize_t show_type(struct _cpuid4_
+ #define to_object(k) container_of(k, struct _index_kobject, kobj)
+ #define to_attr(a) container_of(a, struct _cache_attr, attr)
+
+-static ssize_t show_cache_disable(struct _cpuid4_info *this_leaf, char *buf,
+- unsigned int index)
+-{
+- int cpu = cpumask_first(to_cpumask(this_leaf->shared_cpu_map));
+- int node = amd_get_nb_id(cpu);
+- struct pci_dev *dev = node_to_k8_nb_misc(node);
+- unsigned int reg = 0;
+-
+- if (!this_leaf->can_disable)
+- return -EINVAL;
+-
+- if (!dev)
+- return -EINVAL;
+-
+- pci_read_config_dword(dev, 0x1BC + index * 4, ®);
+- return sprintf(buf, "0x%08x\n", reg);
+-}
+-
+-#define SHOW_CACHE_DISABLE(index) \
+-static ssize_t \
+-show_cache_disable_##index(struct _cpuid4_info *this_leaf, char *buf) \
+-{ \
+- return show_cache_disable(this_leaf, buf, index); \
+-}
+-SHOW_CACHE_DISABLE(0)
+-SHOW_CACHE_DISABLE(1)
+-
+-static ssize_t store_cache_disable(struct _cpuid4_info *this_leaf,
+- const char *buf, size_t count, unsigned int index)
+-{
+- int cpu = cpumask_first(to_cpumask(this_leaf->shared_cpu_map));
+- int node = amd_get_nb_id(cpu);
+- struct pci_dev *dev = node_to_k8_nb_misc(node);
+- unsigned long val = 0;
+-
+-#define SUBCACHE_MASK (3UL << 20)
+-#define SUBCACHE_INDEX 0xfff
+-
+- if (!this_leaf->can_disable)
+- return -EINVAL;
+-
+- if (!capable(CAP_SYS_ADMIN))
+- return -EPERM;
+-
+- if (!dev)
+- return -EINVAL;
+-
+- if (strict_strtoul(buf, 10, &val) < 0)
+- return -EINVAL;
+-
+- /* do not allow writes outside of allowed bits */
+- if ((val & ~(SUBCACHE_MASK | SUBCACHE_INDEX)) ||
+- ((val & SUBCACHE_INDEX) > this_leaf->l3_indices))
+- return -EINVAL;
+-
+- val |= BIT(30);
+- pci_write_config_dword(dev, 0x1BC + index * 4, val);
+- /*
+- * We need to WBINVD on a core on the node containing the L3 cache which
+- * indices we disable therefore a simple wbinvd() is not sufficient.
+- */
+- wbinvd_on_cpu(cpu);
+- pci_write_config_dword(dev, 0x1BC + index * 4, val | BIT(31));
+- return count;
+-}
+-
+-#define STORE_CACHE_DISABLE(index) \
+-static ssize_t \
+-store_cache_disable_##index(struct _cpuid4_info *this_leaf, \
+- const char *buf, size_t count) \
+-{ \
+- return store_cache_disable(this_leaf, buf, count, index); \
+-}
+-STORE_CACHE_DISABLE(0)
+-STORE_CACHE_DISABLE(1)
+-
+-struct _cache_attr {
+- struct attribute attr;
+- ssize_t (*show)(struct _cpuid4_info *, char *);
+- ssize_t (*store)(struct _cpuid4_info *, const char *, size_t count);
+-};
+-
+ #define define_one_ro(_name) \
+ static struct _cache_attr _name = \
+ __ATTR(_name, 0444, show_##_name, NULL)
+@@ -851,11 +864,6 @@ define_one_ro(size);
+ define_one_ro(shared_cpu_map);
+ define_one_ro(shared_cpu_list);
+
+-static struct _cache_attr cache_disable_0 = __ATTR(cache_disable_0, 0644,
+- show_cache_disable_0, store_cache_disable_0);
+-static struct _cache_attr cache_disable_1 = __ATTR(cache_disable_1, 0644,
+- show_cache_disable_1, store_cache_disable_1);
+-
+ #define DEFAULT_SYSFS_CACHE_ATTRS \
+ &type.attr, \
+ &level.attr, \
+@@ -874,8 +882,10 @@ static struct attribute *default_attrs[]
+
+ static struct attribute *default_l3_attrs[] = {
+ DEFAULT_SYSFS_CACHE_ATTRS,
++#ifdef CONFIG_CPU_SUP_AMD
+ &cache_disable_0.attr,
+ &cache_disable_1.attr,
++#endif
+ NULL
+ };
+
--- /dev/null
+From dcf39daf3d6d97f8741e82f0b9fb7554704ed2d1 Mon Sep 17 00:00:00 2001
+From: Borislav Petkov <borislav.petkov@amd.com>
+Date: Fri, 22 Jan 2010 16:01:05 +0100
+Subject: x86, cacheinfo: Fix disabling of L3 cache indices
+
+From: Borislav Petkov <borislav.petkov@amd.com>
+
+commit dcf39daf3d6d97f8741e82f0b9fb7554704ed2d1 upstream.
+
+* Correct the masks used for writing the cache index disable indices.
+* Do not turn off L3 scrubber - it is not necessary.
+* Make sure wbinvd is executed on the same node where the L3 is.
+* Check for out-of-bounds values written to the registers.
+* Make show_cache_disable hex values unambiguous
+* Check for Erratum #388
+
+Signed-off-by: Borislav Petkov <borislav.petkov@amd.com>
+LKML-Reference: <1264172467-25155-4-git-send-email-bp@amd64.org>
+Signed-off-by: H. Peter Anvin <hpa@zytor.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ arch/x86/kernel/cpu/intel_cacheinfo.c | 34 +++++++++++++++++++++-------------
+ 1 file changed, 21 insertions(+), 13 deletions(-)
+
+--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
++++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
+@@ -18,6 +18,7 @@
+ #include <asm/processor.h>
+ #include <linux/smp.h>
+ #include <asm/k8.h>
++#include <asm/smp.h>
+
+ #define LVL_1_INST 1
+ #define LVL_1_DATA 2
+@@ -299,8 +300,10 @@ amd_check_l3_disable(int index, struct _
+ if (boot_cpu_data.x86 == 0x11)
+ return;
+
+- /* see erratum #382 */
+- if ((boot_cpu_data.x86 == 0x10) && (boot_cpu_data.x86_model < 0x8))
++ /* see errata #382 and #388 */
++ if ((boot_cpu_data.x86 == 0x10) &&
++ ((boot_cpu_data.x86_model < 0x9) ||
++ (boot_cpu_data.x86_mask < 0x1)))
+ return;
+
+ this_leaf->can_disable = 1;
+@@ -741,12 +744,12 @@ static ssize_t show_cache_disable(struct
+ return -EINVAL;
+
+ pci_read_config_dword(dev, 0x1BC + index * 4, ®);
+- return sprintf(buf, "%x\n", reg);
++ return sprintf(buf, "0x%08x\n", reg);
+ }
+
+ #define SHOW_CACHE_DISABLE(index) \
+ static ssize_t \
+-show_cache_disable_##index(struct _cpuid4_info *this_leaf, char *buf) \
++show_cache_disable_##index(struct _cpuid4_info *this_leaf, char *buf) \
+ { \
+ return show_cache_disable(this_leaf, buf, index); \
+ }
+@@ -760,7 +763,9 @@ static ssize_t store_cache_disable(struc
+ int node = cpu_to_node(cpu);
+ struct pci_dev *dev = node_to_k8_nb_misc(node);
+ unsigned long val = 0;
+- unsigned int scrubber = 0;
++
++#define SUBCACHE_MASK (3UL << 20)
++#define SUBCACHE_INDEX 0xfff
+
+ if (!this_leaf->can_disable)
+ return -EINVAL;
+@@ -774,21 +779,24 @@ static ssize_t store_cache_disable(struc
+ if (strict_strtoul(buf, 10, &val) < 0)
+ return -EINVAL;
+
+- val |= 0xc0000000;
+-
+- pci_read_config_dword(dev, 0x58, &scrubber);
+- scrubber &= ~0x1f000000;
+- pci_write_config_dword(dev, 0x58, scrubber);
++ /* do not allow writes outside of allowed bits */
++ if (val & ~(SUBCACHE_MASK | SUBCACHE_INDEX))
++ return -EINVAL;
+
+- pci_write_config_dword(dev, 0x1BC + index * 4, val & ~0x40000000);
+- wbinvd();
++ val |= BIT(30);
+ pci_write_config_dword(dev, 0x1BC + index * 4, val);
++ /*
++ * We need to WBINVD on a core on the node containing the L3 cache which
++ * indices we disable therefore a simple wbinvd() is not sufficient.
++ */
++ wbinvd_on_cpu(cpu);
++ pci_write_config_dword(dev, 0x1BC + index * 4, val | BIT(31));
+ return count;
+ }
+
+ #define STORE_CACHE_DISABLE(index) \
+ static ssize_t \
+-store_cache_disable_##index(struct _cpuid4_info *this_leaf, \
++store_cache_disable_##index(struct _cpuid4_info *this_leaf, \
+ const char *buf, size_t count) \
+ { \
+ return store_cache_disable(this_leaf, buf, count, index); \
--- /dev/null
+From f619b3d8427eb57f0134dab75b0d217325c72411 Mon Sep 17 00:00:00 2001
+From: Borislav Petkov <borislav.petkov@amd.com>
+Date: Thu, 4 Feb 2010 12:09:07 +0100
+Subject: x86, cacheinfo: Remove NUMA dependency, fix for AMD Fam10h rev D1
+
+From: Borislav Petkov <borislav.petkov@amd.com>
+
+commit f619b3d8427eb57f0134dab75b0d217325c72411 upstream.
+
+The show/store_cache_disable routines depend unnecessarily on NUMA's
+cpu_to_node and the disabling of cache indices broke when !CONFIG_NUMA.
+Remove that dependency by using a helper which is always correct.
+
+While at it, enable L3 Cache Index disable on rev D1 Istanbuls which
+sport the feature too.
+
+Signed-off-by: Borislav Petkov <borislav.petkov@amd.com>
+LKML-Reference: <20100218184339.GG20473@aftab>
+Signed-off-by: H. Peter Anvin <hpa@zytor.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ arch/x86/kernel/cpu/intel_cacheinfo.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
++++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
+@@ -327,7 +327,7 @@ amd_check_l3_disable(int index, struct _
+
+ /* see errata #382 and #388 */
+ if ((boot_cpu_data.x86 == 0x10) &&
+- ((boot_cpu_data.x86_model < 0x9) ||
++ ((boot_cpu_data.x86_model < 0x8) ||
+ (boot_cpu_data.x86_mask < 0x1)))
+ return;
+
+@@ -759,7 +759,7 @@ static ssize_t show_cache_disable(struct
+ unsigned int index)
+ {
+ int cpu = cpumask_first(to_cpumask(this_leaf->shared_cpu_map));
+- int node = cpu_to_node(cpu);
++ int node = amd_get_nb_id(cpu);
+ struct pci_dev *dev = node_to_k8_nb_misc(node);
+ unsigned int reg = 0;
+
+@@ -786,7 +786,7 @@ static ssize_t store_cache_disable(struc
+ const char *buf, size_t count, unsigned int index)
+ {
+ int cpu = cpumask_first(to_cpumask(this_leaf->shared_cpu_map));
+- int node = cpu_to_node(cpu);
++ int node = amd_get_nb_id(cpu);
+ struct pci_dev *dev = node_to_k8_nb_misc(node);
+ unsigned long val = 0;
+
--- /dev/null
+From a7b480e7f30b3813353ec009f10f2ac7a6669f3b Mon Sep 17 00:00:00 2001
+From: Borislav Petkov <borislav.petkov@amd.com>
+Date: Fri, 22 Jan 2010 16:01:03 +0100
+Subject: x86, lib: Add wbinvd smp helpers
+
+From: Borislav Petkov <borislav.petkov@amd.com>
+
+commit a7b480e7f30b3813353ec009f10f2ac7a6669f3b upstream.
+
+Add wbinvd_on_cpu and wbinvd_on_all_cpus stubs for executing wbinvd on a
+particular CPU.
+
+[ hpa: renamed lib/smp.c to lib/cache-smp.c ]
+[ hpa: wbinvd_on_all_cpus() returns int, but wbinvd() returns
+ void. Thus, the former cannot be a macro for the latter,
+ replace with an inline function. ]
+
+Signed-off-by: Borislav Petkov <borislav.petkov@amd.com>
+LKML-Reference: <1264172467-25155-2-git-send-email-bp@amd64.org>
+Signed-off-by: H. Peter Anvin <hpa@zytor.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ arch/x86/include/asm/smp.h | 9 +++++++++
+ arch/x86/lib/Makefile | 2 +-
+ arch/x86/lib/cache-smp.c | 19 +++++++++++++++++++
+ 3 files changed, 29 insertions(+), 1 deletion(-)
+
+--- a/arch/x86/include/asm/smp.h
++++ b/arch/x86/include/asm/smp.h
+@@ -135,6 +135,8 @@ int native_cpu_disable(void);
+ void native_cpu_die(unsigned int cpu);
+ void native_play_dead(void);
+ void play_dead_common(void);
++void wbinvd_on_cpu(int cpu);
++int wbinvd_on_all_cpus(void);
+
+ void native_send_call_func_ipi(const struct cpumask *mask);
+ void native_send_call_func_single_ipi(int cpu);
+@@ -147,6 +149,13 @@ static inline int num_booting_cpus(void)
+ {
+ return cpumask_weight(cpu_callout_mask);
+ }
++#else /* !CONFIG_SMP */
++#define wbinvd_on_cpu(cpu) wbinvd()
++static inline int wbinvd_on_all_cpus(void)
++{
++ wbinvd();
++ return 0;
++}
+ #endif /* CONFIG_SMP */
+
+ extern unsigned disabled_cpus __cpuinitdata;
+--- a/arch/x86/lib/Makefile
++++ b/arch/x86/lib/Makefile
+@@ -2,7 +2,7 @@
+ # Makefile for x86 specific library files.
+ #
+
+-obj-$(CONFIG_SMP) += msr-smp.o
++obj-$(CONFIG_SMP) += msr-smp.o cache-smp.o
+
+ lib-y := delay.o
+ lib-y += thunk_$(BITS).o
+--- /dev/null
++++ b/arch/x86/lib/cache-smp.c
+@@ -0,0 +1,19 @@
++#include <linux/smp.h>
++#include <linux/module.h>
++
++static void __wbinvd(void *dummy)
++{
++ wbinvd();
++}
++
++void wbinvd_on_cpu(int cpu)
++{
++ smp_call_function_single(cpu, __wbinvd, NULL, 1);
++}
++EXPORT_SYMBOL(wbinvd_on_cpu);
++
++int wbinvd_on_all_cpus(void)
++{
++ return on_each_cpu(__wbinvd, NULL, 1);
++}
++EXPORT_SYMBOL(wbinvd_on_all_cpus);