]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
.33 patches
authorGreg Kroah-Hartman <gregkh@suse.de>
Mon, 19 Apr 2010 17:49:29 +0000 (10:49 -0700)
committerGreg Kroah-Hartman <gregkh@suse.de>
Mon, 19 Apr 2010 17:49:29 +0000 (10:49 -0700)
queue-2.6.33/dm-ioctl-introduce-flag-indicating-uevent-was-generated.patch [new file with mode: 0644]
queue-2.6.33/intel-agp-switch-to-wbinvd_on_all_cpus.patch [new file with mode: 0644]
queue-2.6.33/series
queue-2.6.33/x86-cacheinfo-add-cache-index-disable-sysfs-attrs-only-to-l3-caches.patch [new file with mode: 0644]
queue-2.6.33/x86-cacheinfo-calculate-l3-indices.patch [new file with mode: 0644]
queue-2.6.33/x86-cacheinfo-enable-l3-cid-only-on-amd.patch [new file with mode: 0644]
queue-2.6.33/x86-cacheinfo-fix-disabling-of-l3-cache-indices.patch [new file with mode: 0644]
queue-2.6.33/x86-cacheinfo-remove-numa-dependency-fix-for-amd-fam10h-rev-d1.patch [new file with mode: 0644]
queue-2.6.33/x86-lib-add-wbinvd-smp-helpers.patch [new file with mode: 0644]

diff --git a/queue-2.6.33/dm-ioctl-introduce-flag-indicating-uevent-was-generated.patch b/queue-2.6.33/dm-ioctl-introduce-flag-indicating-uevent-was-generated.patch
new file mode 100644 (file)
index 0000000..2249aba
--- /dev/null
@@ -0,0 +1,151 @@
+From 3abf85b5b5851b5f28d3d8a920ebb844edd08352 Mon Sep 17 00:00:00 2001
+From: Peter Rajnoha <prajnoha@redhat.com>
+Date: Sat, 6 Mar 2010 02:32:31 +0000
+Subject: dm ioctl: introduce flag indicating uevent was generated
+
+From: Peter Rajnoha <prajnoha@redhat.com>
+
+commit 3abf85b5b5851b5f28d3d8a920ebb844edd08352 upstream.
+
+Set a new DM_UEVENT_GENERATED_FLAG when returning from ioctls to
+indicate that a uevent was actually generated.  This tells the userspace
+caller that it may need to wait for the event to be processed.
+
+Signed-off-by: Peter Rajnoha <prajnoha@redhat.com>
+Signed-off-by: Alasdair G Kergon <agk@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/md/dm-ioctl.c    |   19 ++++++++++++-------
+ drivers/md/dm.c          |    7 ++++---
+ drivers/md/dm.h          |    4 ++--
+ include/linux/dm-ioctl.h |    9 +++++++--
+ 4 files changed, 25 insertions(+), 14 deletions(-)
+
+--- a/drivers/md/dm-ioctl.c
++++ b/drivers/md/dm-ioctl.c
+@@ -285,7 +285,8 @@ retry:
+       up_write(&_hash_lock);
+ }
+-static int dm_hash_rename(uint32_t cookie, const char *old, const char *new)
++static int dm_hash_rename(uint32_t cookie, uint32_t *flags, const char *old,
++                        const char *new)
+ {
+       char *new_name, *old_name;
+       struct hash_cell *hc;
+@@ -344,7 +345,8 @@ static int dm_hash_rename(uint32_t cooki
+               dm_table_put(table);
+       }
+-      dm_kobject_uevent(hc->md, KOBJ_CHANGE, cookie);
++      if (!dm_kobject_uevent(hc->md, KOBJ_CHANGE, cookie))
++              *flags |= DM_UEVENT_GENERATED_FLAG;
+       dm_put(hc->md);
+       up_write(&_hash_lock);
+@@ -736,10 +738,10 @@ static int dev_remove(struct dm_ioctl *p
+       __hash_remove(hc);
+       up_write(&_hash_lock);
+-      dm_kobject_uevent(md, KOBJ_REMOVE, param->event_nr);
++      if (!dm_kobject_uevent(md, KOBJ_REMOVE, param->event_nr))
++              param->flags |= DM_UEVENT_GENERATED_FLAG;
+       dm_put(md);
+-      param->data_size = 0;
+       return 0;
+ }
+@@ -773,7 +775,9 @@ static int dev_rename(struct dm_ioctl *p
+               return r;
+       param->data_size = 0;
+-      return dm_hash_rename(param->event_nr, param->name, new_name);
++
++      return dm_hash_rename(param->event_nr, &param->flags, param->name,
++                            new_name);
+ }
+ static int dev_set_geometry(struct dm_ioctl *param, size_t param_size)
+@@ -899,8 +903,8 @@ static int do_resume(struct dm_ioctl *pa
+       if (dm_suspended_md(md)) {
+               r = dm_resume(md);
+-              if (!r)
+-                      dm_kobject_uevent(md, KOBJ_CHANGE, param->event_nr);
++              if (!r && !dm_kobject_uevent(md, KOBJ_CHANGE, param->event_nr))
++                      param->flags |= DM_UEVENT_GENERATED_FLAG;
+       }
+       if (old_map)
+@@ -1477,6 +1481,7 @@ static int validate_params(uint cmd, str
+ {
+       /* Always clear this flag */
+       param->flags &= ~DM_BUFFER_FULL_FLAG;
++      param->flags &= ~DM_UEVENT_GENERATED_FLAG;
+       /* Ignores parameters */
+       if (cmd == DM_REMOVE_ALL_CMD ||
+--- a/drivers/md/dm.c
++++ b/drivers/md/dm.c
+@@ -2618,18 +2618,19 @@ out:
+ /*-----------------------------------------------------------------
+  * Event notification.
+  *---------------------------------------------------------------*/
+-void dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
++int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
+                      unsigned cookie)
+ {
+       char udev_cookie[DM_COOKIE_LENGTH];
+       char *envp[] = { udev_cookie, NULL };
+       if (!cookie)
+-              kobject_uevent(&disk_to_dev(md->disk)->kobj, action);
++              return kobject_uevent(&disk_to_dev(md->disk)->kobj, action);
+       else {
+               snprintf(udev_cookie, DM_COOKIE_LENGTH, "%s=%u",
+                        DM_COOKIE_ENV_VAR_NAME, cookie);
+-              kobject_uevent_env(&disk_to_dev(md->disk)->kobj, action, envp);
++              return kobject_uevent_env(&disk_to_dev(md->disk)->kobj,
++                                        action, envp);
+       }
+ }
+--- a/drivers/md/dm.h
++++ b/drivers/md/dm.h
+@@ -125,8 +125,8 @@ void dm_stripe_exit(void);
+ int dm_open_count(struct mapped_device *md);
+ int dm_lock_for_deletion(struct mapped_device *md);
+-void dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
+-                     unsigned cookie);
++int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
++                    unsigned cookie);
+ int dm_io_init(void);
+ void dm_io_exit(void);
+--- a/include/linux/dm-ioctl.h
++++ b/include/linux/dm-ioctl.h
+@@ -266,9 +266,9 @@ enum {
+ #define DM_DEV_SET_GEOMETRY   _IOWR(DM_IOCTL, DM_DEV_SET_GEOMETRY_CMD, struct dm_ioctl)
+ #define DM_VERSION_MAJOR      4
+-#define DM_VERSION_MINOR      16
++#define DM_VERSION_MINOR      17
+ #define DM_VERSION_PATCHLEVEL 0
+-#define DM_VERSION_EXTRA      "-ioctl (2009-11-05)"
++#define DM_VERSION_EXTRA      "-ioctl (2010-03-05)"
+ /* Status bits */
+ #define DM_READONLY_FLAG      (1 << 0) /* In/Out */
+@@ -316,4 +316,9 @@ enum {
+  */
+ #define DM_QUERY_INACTIVE_TABLE_FLAG  (1 << 12) /* In */
++/*
++ * If set, a uevent was generated for which the caller may need to wait.
++ */
++#define DM_UEVENT_GENERATED_FLAG      (1 << 13) /* Out */
++
+ #endif                                /* _LINUX_DM_IOCTL_H */
diff --git a/queue-2.6.33/intel-agp-switch-to-wbinvd_on_all_cpus.patch b/queue-2.6.33/intel-agp-switch-to-wbinvd_on_all_cpus.patch
new file mode 100644 (file)
index 0000000..3ed4ab7
--- /dev/null
@@ -0,0 +1,63 @@
+From 48a719c238bcbb72d6da79de9c5b3b93ab472107 Mon Sep 17 00:00:00 2001
+From: Borislav Petkov <borislav.petkov@amd.com>
+Date: Fri, 22 Jan 2010 16:01:04 +0100
+Subject: intel-agp: Switch to wbinvd_on_all_cpus
+
+From: Borislav Petkov <borislav.petkov@amd.com>
+
+commit 48a719c238bcbb72d6da79de9c5b3b93ab472107 upstream.
+
+Simplify if-statement while at it.
+
+[ hpa: we need to #include <asm/smp.h> ]
+
+Cc: Dave Jones <davej@redhat.com>
+Cc: David Airlie <airlied@linux.ie>
+Signed-off-by: Borislav Petkov <borislav.petkov@amd.com>
+LKML-Reference: <1264172467-25155-3-git-send-email-bp@amd64.org>
+Signed-off-by: H. Peter Anvin <hpa@zytor.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/char/agp/intel-agp.c |   15 ++++-----------
+ 1 file changed, 4 insertions(+), 11 deletions(-)
+
+--- a/drivers/char/agp/intel-agp.c
++++ b/drivers/char/agp/intel-agp.c
+@@ -8,6 +8,7 @@
+ #include <linux/kernel.h>
+ #include <linux/pagemap.h>
+ #include <linux/agp_backend.h>
++#include <asm/smp.h>
+ #include "agp.h"
+ /*
+@@ -815,12 +816,6 @@ static void intel_i830_setup_flush(void)
+               intel_i830_fini_flush();
+ }
+-static void
+-do_wbinvd(void *null)
+-{
+-      wbinvd();
+-}
+-
+ /* The chipset_flush interface needs to get data that has already been
+  * flushed out of the CPU all the way out to main memory, because the GPU
+  * doesn't snoop those buffers.
+@@ -837,12 +832,10 @@ static void intel_i830_chipset_flush(str
+       memset(pg, 0, 1024);
+-      if (cpu_has_clflush) {
++      if (cpu_has_clflush)
+               clflush_cache_range(pg, 1024);
+-      } else {
+-              if (on_each_cpu(do_wbinvd, NULL, 1) != 0)
+-                      printk(KERN_ERR "Timed out waiting for cache flush.\n");
+-      }
++      else if (wbinvd_on_all_cpus() != 0)
++              printk(KERN_ERR "Timed out waiting for cache flush.\n");
+ }
+ /* The intel i830 automatically initializes the agp aperture during POST.
index 78853252fc0827af79ca6ce7bddcfa7a04b7c35c..04e7d5cba8828ea69de7c7024a688d02fff6172a 100644 (file)
@@ -74,3 +74,11 @@ x86-amd-iommu-pt-mode-fix-for-domain_destroy.patch
 x86-amd-iommu-use-helper-function-to-destroy-domain.patch
 x86-amd-iommu-enable-iommu-before-attaching-devices.patch
 revert-x86-disable-iommus-on-kernel-crash.patch
+x86-lib-add-wbinvd-smp-helpers.patch
+x86-cacheinfo-fix-disabling-of-l3-cache-indices.patch
+intel-agp-switch-to-wbinvd_on_all_cpus.patch
+x86-cacheinfo-add-cache-index-disable-sysfs-attrs-only-to-l3-caches.patch
+x86-cacheinfo-calculate-l3-indices.patch
+x86-cacheinfo-remove-numa-dependency-fix-for-amd-fam10h-rev-d1.patch
+x86-cacheinfo-enable-l3-cid-only-on-amd.patch
+dm-ioctl-introduce-flag-indicating-uevent-was-generated.patch
diff --git a/queue-2.6.33/x86-cacheinfo-add-cache-index-disable-sysfs-attrs-only-to-l3-caches.patch b/queue-2.6.33/x86-cacheinfo-add-cache-index-disable-sysfs-attrs-only-to-l3-caches.patch
new file mode 100644 (file)
index 0000000..44456ba
--- /dev/null
@@ -0,0 +1,84 @@
+From 897de50e08937663912c86fb12ad7f708af2386c Mon Sep 17 00:00:00 2001
+From: Borislav Petkov <borislav.petkov@amd.com>
+Date: Fri, 22 Jan 2010 16:01:06 +0100
+Subject: x86, cacheinfo: Add cache index disable sysfs attrs only to L3 caches
+
+From: Borislav Petkov <borislav.petkov@amd.com>
+
+commit 897de50e08937663912c86fb12ad7f708af2386c upstream.
+
+The cache_disable_[01] attribute in
+
+/sys/devices/system/cpu/cpu?/cache/index[0-3]/
+
+is enabled on all cache levels although only L3 supports it. Add it only
+to the cache level that actually supports it.
+
+Signed-off-by: Borislav Petkov <borislav.petkov@amd.com>
+LKML-Reference: <1264172467-25155-5-git-send-email-bp@amd64.org>
+Signed-off-by: H. Peter Anvin <hpa@zytor.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ arch/x86/kernel/cpu/intel_cacheinfo.c |   35 +++++++++++++++++++++++++---------
+ 1 file changed, 26 insertions(+), 9 deletions(-)
+
+--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
++++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
+@@ -814,16 +814,24 @@ static struct _cache_attr cache_disable_
+ static struct _cache_attr cache_disable_1 = __ATTR(cache_disable_1, 0644,
+               show_cache_disable_1, store_cache_disable_1);
++#define DEFAULT_SYSFS_CACHE_ATTRS     \
++      &type.attr,                     \
++      &level.attr,                    \
++      &coherency_line_size.attr,      \
++      &physical_line_partition.attr,  \
++      &ways_of_associativity.attr,    \
++      &number_of_sets.attr,           \
++      &size.attr,                     \
++      &shared_cpu_map.attr,           \
++      &shared_cpu_list.attr
++
+ static struct attribute *default_attrs[] = {
+-      &type.attr,
+-      &level.attr,
+-      &coherency_line_size.attr,
+-      &physical_line_partition.attr,
+-      &ways_of_associativity.attr,
+-      &number_of_sets.attr,
+-      &size.attr,
+-      &shared_cpu_map.attr,
+-      &shared_cpu_list.attr,
++      DEFAULT_SYSFS_CACHE_ATTRS,
++      NULL
++};
++
++static struct attribute *default_l3_attrs[] = {
++      DEFAULT_SYSFS_CACHE_ATTRS,
+       &cache_disable_0.attr,
+       &cache_disable_1.attr,
+       NULL
+@@ -916,6 +924,7 @@ static int __cpuinit cache_add_dev(struc
+       unsigned int cpu = sys_dev->id;
+       unsigned long i, j;
+       struct _index_kobject *this_object;
++      struct _cpuid4_info   *this_leaf;
+       int retval;
+       retval = cpuid4_cache_sysfs_init(cpu);
+@@ -934,6 +943,14 @@ static int __cpuinit cache_add_dev(struc
+               this_object = INDEX_KOBJECT_PTR(cpu, i);
+               this_object->cpu = cpu;
+               this_object->index = i;
++
++              this_leaf = CPUID4_INFO_IDX(cpu, i);
++
++              if (this_leaf->can_disable)
++                      ktype_cache.default_attrs = default_l3_attrs;
++              else
++                      ktype_cache.default_attrs = default_attrs;
++
+               retval = kobject_init_and_add(&(this_object->kobj),
+                                             &ktype_cache,
+                                             per_cpu(ici_cache_kobject, cpu),
diff --git a/queue-2.6.33/x86-cacheinfo-calculate-l3-indices.patch b/queue-2.6.33/x86-cacheinfo-calculate-l3-indices.patch
new file mode 100644 (file)
index 0000000..928f596
--- /dev/null
@@ -0,0 +1,94 @@
+From 048a8774ca43488d78605031f11cc206d7a2682a Mon Sep 17 00:00:00 2001
+From: Borislav Petkov <borislav.petkov@amd.com>
+Date: Fri, 22 Jan 2010 16:01:07 +0100
+Subject: x86, cacheinfo: Calculate L3 indices
+
+From: Borislav Petkov <borislav.petkov@amd.com>
+
+commit 048a8774ca43488d78605031f11cc206d7a2682a upstream.
+
+We need to know the valid L3 indices interval when disabling them over
+/sysfs. Do that when the core is brought online and add boundary checks
+to the sysfs .store attribute.
+
+Signed-off-by: Borislav Petkov <borislav.petkov@amd.com>
+LKML-Reference: <1264172467-25155-6-git-send-email-bp@amd64.org>
+Signed-off-by: H. Peter Anvin <hpa@zytor.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ arch/x86/kernel/cpu/intel_cacheinfo.c |   35 ++++++++++++++++++++++++++++++----
+ 1 file changed, 31 insertions(+), 4 deletions(-)
+
+--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
++++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
+@@ -151,7 +151,8 @@ struct _cpuid4_info {
+       union _cpuid4_leaf_ebx ebx;
+       union _cpuid4_leaf_ecx ecx;
+       unsigned long size;
+-      unsigned long can_disable;
++      bool can_disable;
++      unsigned int l3_indices;
+       DECLARE_BITMAP(shared_cpu_map, NR_CPUS);
+ };
+@@ -161,7 +162,8 @@ struct _cpuid4_info_regs {
+       union _cpuid4_leaf_ebx ebx;
+       union _cpuid4_leaf_ecx ecx;
+       unsigned long size;
+-      unsigned long can_disable;
++      bool can_disable;
++      unsigned int l3_indices;
+ };
+ unsigned short                        num_cache_leaves;
+@@ -291,6 +293,29 @@ amd_cpuid4(int leaf, union _cpuid4_leaf_
+               (ebx->split.ways_of_associativity + 1) - 1;
+ }
++static unsigned int __cpuinit amd_calc_l3_indices(void)
++{
++      /*
++       * We're called over smp_call_function_single() and therefore
++       * are on the correct cpu.
++       */
++      int cpu = smp_processor_id();
++      int node = cpu_to_node(cpu);
++      struct pci_dev *dev = node_to_k8_nb_misc(node);
++      unsigned int sc0, sc1, sc2, sc3;
++      u32 val;
++
++      pci_read_config_dword(dev, 0x1C4, &val);
++
++      /* calculate subcache sizes */
++      sc0 = !(val & BIT(0));
++      sc1 = !(val & BIT(4));
++      sc2 = !(val & BIT(8))  + !(val & BIT(9));
++      sc3 = !(val & BIT(12)) + !(val & BIT(13));
++
++      return (max(max(max(sc0, sc1), sc2), sc3) << 10) - 1;
++}
++
+ static void __cpuinit
+ amd_check_l3_disable(int index, struct _cpuid4_info_regs *this_leaf)
+ {
+@@ -306,7 +331,8 @@ amd_check_l3_disable(int index, struct _
+            (boot_cpu_data.x86_mask  < 0x1)))
+               return;
+-      this_leaf->can_disable = 1;
++      this_leaf->can_disable = true;
++      this_leaf->l3_indices  = amd_calc_l3_indices();
+ }
+ static int
+@@ -765,7 +791,8 @@ static ssize_t store_cache_disable(struc
+               return -EINVAL;
+       /* do not allow writes outside of allowed bits */
+-      if (val & ~(SUBCACHE_MASK | SUBCACHE_INDEX))
++      if ((val & ~(SUBCACHE_MASK | SUBCACHE_INDEX)) ||
++          ((val & SUBCACHE_INDEX) > this_leaf->l3_indices))
+               return -EINVAL;
+       val |= BIT(30);
diff --git a/queue-2.6.33/x86-cacheinfo-enable-l3-cid-only-on-amd.patch b/queue-2.6.33/x86-cacheinfo-enable-l3-cid-only-on-amd.patch
new file mode 100644 (file)
index 0000000..b67e0ef
--- /dev/null
@@ -0,0 +1,262 @@
+From cb19060abfdecac0d1eb2d2f0e7d6b7a3f8bc4f4 Mon Sep 17 00:00:00 2001
+From: Borislav Petkov <borislav.petkov@amd.com>
+Date: Thu, 18 Feb 2010 19:37:14 +0100
+Subject: x86, cacheinfo: Enable L3 CID only on AMD
+
+From: Borislav Petkov <borislav.petkov@amd.com>
+
+commit cb19060abfdecac0d1eb2d2f0e7d6b7a3f8bc4f4 upstream.
+
+Final stage linking can fail with
+
+ arch/x86/built-in.o: In function `store_cache_disable':
+ intel_cacheinfo.c:(.text+0xc509): undefined reference to `amd_get_nb_id'
+ arch/x86/built-in.o: In function `show_cache_disable':
+ intel_cacheinfo.c:(.text+0xc7d3): undefined reference to `amd_get_nb_id'
+
+when CONFIG_CPU_SUP_AMD is not enabled because the amd_get_nb_id
+helper is defined in AMD-specific code but also used in generic code
+(intel_cacheinfo.c). Reorganize the L3 cache index disable code under
+CONFIG_CPU_SUP_AMD since it is AMD-only anyway.
+
+Signed-off-by: Borislav Petkov <borislav.petkov@amd.com>
+LKML-Reference: <20100218184210.GF20473@aftab>
+Signed-off-by: H. Peter Anvin <hpa@zytor.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ arch/x86/kernel/cpu/intel_cacheinfo.c |  186 +++++++++++++++++-----------------
+ 1 file changed, 98 insertions(+), 88 deletions(-)
+
+--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
++++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
+@@ -293,6 +293,13 @@ amd_cpuid4(int leaf, union _cpuid4_leaf_
+               (ebx->split.ways_of_associativity + 1) - 1;
+ }
++struct _cache_attr {
++      struct attribute attr;
++      ssize_t (*show)(struct _cpuid4_info *, char *);
++      ssize_t (*store)(struct _cpuid4_info *, const char *, size_t count);
++};
++
++#ifdef CONFIG_CPU_SUP_AMD
+ static unsigned int __cpuinit amd_calc_l3_indices(void)
+ {
+       /*
+@@ -303,7 +310,7 @@ static unsigned int __cpuinit amd_calc_l
+       int node = cpu_to_node(cpu);
+       struct pci_dev *dev = node_to_k8_nb_misc(node);
+       unsigned int sc0, sc1, sc2, sc3;
+-      u32 val;
++      u32 val = 0;
+       pci_read_config_dword(dev, 0x1C4, &val);
+@@ -335,6 +342,94 @@ amd_check_l3_disable(int index, struct _
+       this_leaf->l3_indices  = amd_calc_l3_indices();
+ }
++static ssize_t show_cache_disable(struct _cpuid4_info *this_leaf, char *buf,
++                                unsigned int index)
++{
++      int cpu = cpumask_first(to_cpumask(this_leaf->shared_cpu_map));
++      int node = amd_get_nb_id(cpu);
++      struct pci_dev *dev = node_to_k8_nb_misc(node);
++      unsigned int reg = 0;
++
++      if (!this_leaf->can_disable)
++              return -EINVAL;
++
++      if (!dev)
++              return -EINVAL;
++
++      pci_read_config_dword(dev, 0x1BC + index * 4, &reg);
++      return sprintf(buf, "0x%08x\n", reg);
++}
++
++#define SHOW_CACHE_DISABLE(index)                                     \
++static ssize_t                                                                \
++show_cache_disable_##index(struct _cpuid4_info *this_leaf, char *buf) \
++{                                                                     \
++      return show_cache_disable(this_leaf, buf, index);               \
++}
++SHOW_CACHE_DISABLE(0)
++SHOW_CACHE_DISABLE(1)
++
++static ssize_t store_cache_disable(struct _cpuid4_info *this_leaf,
++      const char *buf, size_t count, unsigned int index)
++{
++      int cpu = cpumask_first(to_cpumask(this_leaf->shared_cpu_map));
++      int node = amd_get_nb_id(cpu);
++      struct pci_dev *dev = node_to_k8_nb_misc(node);
++      unsigned long val = 0;
++
++#define SUBCACHE_MASK (3UL << 20)
++#define SUBCACHE_INDEX        0xfff
++
++      if (!this_leaf->can_disable)
++              return -EINVAL;
++
++      if (!capable(CAP_SYS_ADMIN))
++              return -EPERM;
++
++      if (!dev)
++              return -EINVAL;
++
++      if (strict_strtoul(buf, 10, &val) < 0)
++              return -EINVAL;
++
++      /* do not allow writes outside of allowed bits */
++      if ((val & ~(SUBCACHE_MASK | SUBCACHE_INDEX)) ||
++          ((val & SUBCACHE_INDEX) > this_leaf->l3_indices))
++              return -EINVAL;
++
++      val |= BIT(30);
++      pci_write_config_dword(dev, 0x1BC + index * 4, val);
++      /*
++       * We need to WBINVD on a core on the node containing the L3 cache which
++       * indices we disable therefore a simple wbinvd() is not sufficient.
++       */
++      wbinvd_on_cpu(cpu);
++      pci_write_config_dword(dev, 0x1BC + index * 4, val | BIT(31));
++      return count;
++}
++
++#define STORE_CACHE_DISABLE(index)                                    \
++static ssize_t                                                                \
++store_cache_disable_##index(struct _cpuid4_info *this_leaf,           \
++                          const char *buf, size_t count)              \
++{                                                                     \
++      return store_cache_disable(this_leaf, buf, count, index);       \
++}
++STORE_CACHE_DISABLE(0)
++STORE_CACHE_DISABLE(1)
++
++static struct _cache_attr cache_disable_0 = __ATTR(cache_disable_0, 0644,
++              show_cache_disable_0, store_cache_disable_0);
++static struct _cache_attr cache_disable_1 = __ATTR(cache_disable_1, 0644,
++              show_cache_disable_1, store_cache_disable_1);
++
++#else /* CONFIG_CPU_SUP_AMD */
++static void __cpuinit
++amd_check_l3_disable(int index, struct _cpuid4_info_regs *this_leaf)
++{
++};
++#endif /* CONFIG_CPU_SUP_AMD */
++
+ static int
+ __cpuinit cpuid4_cache_lookup_regs(int index,
+                                  struct _cpuid4_info_regs *this_leaf)
+@@ -740,88 +835,6 @@ static ssize_t show_type(struct _cpuid4_
+ #define to_object(k)  container_of(k, struct _index_kobject, kobj)
+ #define to_attr(a)    container_of(a, struct _cache_attr, attr)
+-static ssize_t show_cache_disable(struct _cpuid4_info *this_leaf, char *buf,
+-                                unsigned int index)
+-{
+-      int cpu = cpumask_first(to_cpumask(this_leaf->shared_cpu_map));
+-      int node = amd_get_nb_id(cpu);
+-      struct pci_dev *dev = node_to_k8_nb_misc(node);
+-      unsigned int reg = 0;
+-
+-      if (!this_leaf->can_disable)
+-              return -EINVAL;
+-
+-      if (!dev)
+-              return -EINVAL;
+-
+-      pci_read_config_dword(dev, 0x1BC + index * 4, &reg);
+-      return sprintf(buf, "0x%08x\n", reg);
+-}
+-
+-#define SHOW_CACHE_DISABLE(index)                                     \
+-static ssize_t                                                                \
+-show_cache_disable_##index(struct _cpuid4_info *this_leaf, char *buf) \
+-{                                                                     \
+-      return show_cache_disable(this_leaf, buf, index);               \
+-}
+-SHOW_CACHE_DISABLE(0)
+-SHOW_CACHE_DISABLE(1)
+-
+-static ssize_t store_cache_disable(struct _cpuid4_info *this_leaf,
+-      const char *buf, size_t count, unsigned int index)
+-{
+-      int cpu = cpumask_first(to_cpumask(this_leaf->shared_cpu_map));
+-      int node = amd_get_nb_id(cpu);
+-      struct pci_dev *dev = node_to_k8_nb_misc(node);
+-      unsigned long val = 0;
+-
+-#define SUBCACHE_MASK (3UL << 20)
+-#define SUBCACHE_INDEX        0xfff
+-
+-      if (!this_leaf->can_disable)
+-              return -EINVAL;
+-
+-      if (!capable(CAP_SYS_ADMIN))
+-              return -EPERM;
+-
+-      if (!dev)
+-              return -EINVAL;
+-
+-      if (strict_strtoul(buf, 10, &val) < 0)
+-              return -EINVAL;
+-
+-      /* do not allow writes outside of allowed bits */
+-      if ((val & ~(SUBCACHE_MASK | SUBCACHE_INDEX)) ||
+-          ((val & SUBCACHE_INDEX) > this_leaf->l3_indices))
+-              return -EINVAL;
+-
+-      val |= BIT(30);
+-      pci_write_config_dword(dev, 0x1BC + index * 4, val);
+-      /*
+-       * We need to WBINVD on a core on the node containing the L3 cache which
+-       * indices we disable therefore a simple wbinvd() is not sufficient.
+-       */
+-      wbinvd_on_cpu(cpu);
+-      pci_write_config_dword(dev, 0x1BC + index * 4, val | BIT(31));
+-      return count;
+-}
+-
+-#define STORE_CACHE_DISABLE(index)                                    \
+-static ssize_t                                                                \
+-store_cache_disable_##index(struct _cpuid4_info *this_leaf,           \
+-                          const char *buf, size_t count)              \
+-{                                                                     \
+-      return store_cache_disable(this_leaf, buf, count, index);       \
+-}
+-STORE_CACHE_DISABLE(0)
+-STORE_CACHE_DISABLE(1)
+-
+-struct _cache_attr {
+-      struct attribute attr;
+-      ssize_t (*show)(struct _cpuid4_info *, char *);
+-      ssize_t (*store)(struct _cpuid4_info *, const char *, size_t count);
+-};
+-
+ #define define_one_ro(_name) \
+ static struct _cache_attr _name = \
+       __ATTR(_name, 0444, show_##_name, NULL)
+@@ -836,11 +849,6 @@ define_one_ro(size);
+ define_one_ro(shared_cpu_map);
+ define_one_ro(shared_cpu_list);
+-static struct _cache_attr cache_disable_0 = __ATTR(cache_disable_0, 0644,
+-              show_cache_disable_0, store_cache_disable_0);
+-static struct _cache_attr cache_disable_1 = __ATTR(cache_disable_1, 0644,
+-              show_cache_disable_1, store_cache_disable_1);
+-
+ #define DEFAULT_SYSFS_CACHE_ATTRS     \
+       &type.attr,                     \
+       &level.attr,                    \
+@@ -859,8 +867,10 @@ static struct attribute *default_attrs[]
+ static struct attribute *default_l3_attrs[] = {
+       DEFAULT_SYSFS_CACHE_ATTRS,
++#ifdef CONFIG_CPU_SUP_AMD
+       &cache_disable_0.attr,
+       &cache_disable_1.attr,
++#endif
+       NULL
+ };
diff --git a/queue-2.6.33/x86-cacheinfo-fix-disabling-of-l3-cache-indices.patch b/queue-2.6.33/x86-cacheinfo-fix-disabling-of-l3-cache-indices.patch
new file mode 100644 (file)
index 0000000..170ae9e
--- /dev/null
@@ -0,0 +1,107 @@
+From dcf39daf3d6d97f8741e82f0b9fb7554704ed2d1 Mon Sep 17 00:00:00 2001
+From: Borislav Petkov <borislav.petkov@amd.com>
+Date: Fri, 22 Jan 2010 16:01:05 +0100
+Subject: x86, cacheinfo: Fix disabling of L3 cache indices
+
+From: Borislav Petkov <borislav.petkov@amd.com>
+
+commit dcf39daf3d6d97f8741e82f0b9fb7554704ed2d1 upstream.
+
+* Correct the masks used for writing the cache index disable indices.
+* Do not turn off L3 scrubber - it is not necessary.
+* Make sure wbinvd is executed on the same node where the L3 is.
+* Check for out-of-bounds values written to the registers.
+* Make show_cache_disable hex values unambiguous
+* Check for Erratum #388
+
+Signed-off-by: Borislav Petkov <borislav.petkov@amd.com>
+LKML-Reference: <1264172467-25155-4-git-send-email-bp@amd64.org>
+Signed-off-by: H. Peter Anvin <hpa@zytor.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ arch/x86/kernel/cpu/intel_cacheinfo.c |   34 +++++++++++++++++++++-------------
+ 1 file changed, 21 insertions(+), 13 deletions(-)
+
+--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
++++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
+@@ -18,6 +18,7 @@
+ #include <asm/processor.h>
+ #include <linux/smp.h>
+ #include <asm/k8.h>
++#include <asm/smp.h>
+ #define LVL_1_INST    1
+ #define LVL_1_DATA    2
+@@ -299,8 +300,10 @@ amd_check_l3_disable(int index, struct _
+       if (boot_cpu_data.x86 == 0x11)
+               return;
+-      /* see erratum #382 */
+-      if ((boot_cpu_data.x86 == 0x10) && (boot_cpu_data.x86_model < 0x8))
++      /* see errata #382 and #388 */
++      if ((boot_cpu_data.x86 == 0x10) &&
++          ((boot_cpu_data.x86_model < 0x9) ||
++           (boot_cpu_data.x86_mask  < 0x1)))
+               return;
+       this_leaf->can_disable = 1;
+@@ -726,12 +729,12 @@ static ssize_t show_cache_disable(struct
+               return -EINVAL;
+       pci_read_config_dword(dev, 0x1BC + index * 4, &reg);
+-      return sprintf(buf, "%x\n", reg);
++      return sprintf(buf, "0x%08x\n", reg);
+ }
+ #define SHOW_CACHE_DISABLE(index)                                     \
+ static ssize_t                                                                \
+-show_cache_disable_##index(struct _cpuid4_info *this_leaf, char *buf)         \
++show_cache_disable_##index(struct _cpuid4_info *this_leaf, char *buf) \
+ {                                                                     \
+       return show_cache_disable(this_leaf, buf, index);               \
+ }
+@@ -745,7 +748,9 @@ static ssize_t store_cache_disable(struc
+       int node = cpu_to_node(cpu);
+       struct pci_dev *dev = node_to_k8_nb_misc(node);
+       unsigned long val = 0;
+-      unsigned int scrubber = 0;
++
++#define SUBCACHE_MASK (3UL << 20)
++#define SUBCACHE_INDEX        0xfff
+       if (!this_leaf->can_disable)
+               return -EINVAL;
+@@ -759,21 +764,24 @@ static ssize_t store_cache_disable(struc
+       if (strict_strtoul(buf, 10, &val) < 0)
+               return -EINVAL;
+-      val |= 0xc0000000;
+-
+-      pci_read_config_dword(dev, 0x58, &scrubber);
+-      scrubber &= ~0x1f000000;
+-      pci_write_config_dword(dev, 0x58, scrubber);
++      /* do not allow writes outside of allowed bits */
++      if (val & ~(SUBCACHE_MASK | SUBCACHE_INDEX))
++              return -EINVAL;
+-      pci_write_config_dword(dev, 0x1BC + index * 4, val & ~0x40000000);
+-      wbinvd();
++      val |= BIT(30);
+       pci_write_config_dword(dev, 0x1BC + index * 4, val);
++      /*
++       * We need to WBINVD on a core on the node containing the L3 cache which
++       * indices we disable therefore a simple wbinvd() is not sufficient.
++       */
++      wbinvd_on_cpu(cpu);
++      pci_write_config_dword(dev, 0x1BC + index * 4, val | BIT(31));
+       return count;
+ }
+ #define STORE_CACHE_DISABLE(index)                                    \
+ static ssize_t                                                                \
+-store_cache_disable_##index(struct _cpuid4_info *this_leaf,           \
++store_cache_disable_##index(struct _cpuid4_info *this_leaf,           \
+                           const char *buf, size_t count)              \
+ {                                                                     \
+       return store_cache_disable(this_leaf, buf, count, index);       \
diff --git a/queue-2.6.33/x86-cacheinfo-remove-numa-dependency-fix-for-amd-fam10h-rev-d1.patch b/queue-2.6.33/x86-cacheinfo-remove-numa-dependency-fix-for-amd-fam10h-rev-d1.patch
new file mode 100644 (file)
index 0000000..5928d02
--- /dev/null
@@ -0,0 +1,54 @@
+From f619b3d8427eb57f0134dab75b0d217325c72411 Mon Sep 17 00:00:00 2001
+From: Borislav Petkov <borislav.petkov@amd.com>
+Date: Thu, 4 Feb 2010 12:09:07 +0100
+Subject: x86, cacheinfo: Remove NUMA dependency, fix for AMD Fam10h rev D1
+
+From: Borislav Petkov <borislav.petkov@amd.com>
+
+commit f619b3d8427eb57f0134dab75b0d217325c72411 upstream.
+
+The show/store_cache_disable routines depend unnecessarily on NUMA's
+cpu_to_node and the disabling of cache indices broke when !CONFIG_NUMA.
+Remove that dependency by using a helper which is always correct.
+
+While at it, enable L3 Cache Index disable on rev D1 Istanbuls which
+sport the feature too.
+
+Signed-off-by: Borislav Petkov <borislav.petkov@amd.com>
+LKML-Reference: <20100218184339.GG20473@aftab>
+Signed-off-by: H. Peter Anvin <hpa@zytor.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ arch/x86/kernel/cpu/intel_cacheinfo.c |    6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
++++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
+@@ -327,7 +327,7 @@ amd_check_l3_disable(int index, struct _
+       /* see errata #382 and #388 */
+       if ((boot_cpu_data.x86 == 0x10) &&
+-          ((boot_cpu_data.x86_model < 0x9) ||
++          ((boot_cpu_data.x86_model < 0x8) ||
+            (boot_cpu_data.x86_mask  < 0x1)))
+               return;
+@@ -744,7 +744,7 @@ static ssize_t show_cache_disable(struct
+                                 unsigned int index)
+ {
+       int cpu = cpumask_first(to_cpumask(this_leaf->shared_cpu_map));
+-      int node = cpu_to_node(cpu);
++      int node = amd_get_nb_id(cpu);
+       struct pci_dev *dev = node_to_k8_nb_misc(node);
+       unsigned int reg = 0;
+@@ -771,7 +771,7 @@ static ssize_t store_cache_disable(struc
+       const char *buf, size_t count, unsigned int index)
+ {
+       int cpu = cpumask_first(to_cpumask(this_leaf->shared_cpu_map));
+-      int node = cpu_to_node(cpu);
++      int node = amd_get_nb_id(cpu);
+       struct pci_dev *dev = node_to_k8_nb_misc(node);
+       unsigned long val = 0;
diff --git a/queue-2.6.33/x86-lib-add-wbinvd-smp-helpers.patch b/queue-2.6.33/x86-lib-add-wbinvd-smp-helpers.patch
new file mode 100644 (file)
index 0000000..6d15dd9
--- /dev/null
@@ -0,0 +1,86 @@
+From a7b480e7f30b3813353ec009f10f2ac7a6669f3b Mon Sep 17 00:00:00 2001
+From: Borislav Petkov <borislav.petkov@amd.com>
+Date: Fri, 22 Jan 2010 16:01:03 +0100
+Subject: x86, lib: Add wbinvd smp helpers
+
+From: Borislav Petkov <borislav.petkov@amd.com>
+
+commit a7b480e7f30b3813353ec009f10f2ac7a6669f3b upstream.
+
+Add wbinvd_on_cpu and wbinvd_on_all_cpus stubs for executing wbinvd on a
+particular CPU.
+
+[ hpa: renamed lib/smp.c to lib/cache-smp.c ]
+[ hpa: wbinvd_on_all_cpus() returns int, but wbinvd() returns
+  void.  Thus, the former cannot be a macro for the latter,
+  replace with an inline function. ]
+
+Signed-off-by: Borislav Petkov <borislav.petkov@amd.com>
+LKML-Reference: <1264172467-25155-2-git-send-email-bp@amd64.org>
+Signed-off-by: H. Peter Anvin <hpa@zytor.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ arch/x86/include/asm/smp.h |    9 +++++++++
+ arch/x86/lib/Makefile      |    2 +-
+ arch/x86/lib/cache-smp.c   |   19 +++++++++++++++++++
+ 3 files changed, 29 insertions(+), 1 deletion(-)
+
+--- a/arch/x86/include/asm/smp.h
++++ b/arch/x86/include/asm/smp.h
+@@ -135,6 +135,8 @@ int native_cpu_disable(void);
+ void native_cpu_die(unsigned int cpu);
+ void native_play_dead(void);
+ void play_dead_common(void);
++void wbinvd_on_cpu(int cpu);
++int wbinvd_on_all_cpus(void);
+ void native_send_call_func_ipi(const struct cpumask *mask);
+ void native_send_call_func_single_ipi(int cpu);
+@@ -147,6 +149,13 @@ static inline int num_booting_cpus(void)
+ {
+       return cpumask_weight(cpu_callout_mask);
+ }
++#else /* !CONFIG_SMP */
++#define wbinvd_on_cpu(cpu)     wbinvd()
++static inline int wbinvd_on_all_cpus(void)
++{
++      wbinvd();
++      return 0;
++}
+ #endif /* CONFIG_SMP */
+ extern unsigned disabled_cpus __cpuinitdata;
+--- a/arch/x86/lib/Makefile
++++ b/arch/x86/lib/Makefile
+@@ -14,7 +14,7 @@ $(obj)/inat.o: $(obj)/inat-tables.c
+ clean-files := inat-tables.c
+-obj-$(CONFIG_SMP) += msr-smp.o
++obj-$(CONFIG_SMP) += msr-smp.o cache-smp.o
+ lib-y := delay.o
+ lib-y += thunk_$(BITS).o
+--- /dev/null
++++ b/arch/x86/lib/cache-smp.c
+@@ -0,0 +1,19 @@
++#include <linux/smp.h>
++#include <linux/module.h>
++
++static void __wbinvd(void *dummy)
++{
++      wbinvd();
++}
++
++void wbinvd_on_cpu(int cpu)
++{
++      smp_call_function_single(cpu, __wbinvd, NULL, 1);
++}
++EXPORT_SYMBOL(wbinvd_on_cpu);
++
++int wbinvd_on_all_cpus(void)
++{
++      return on_each_cpu(__wbinvd, NULL, 1);
++}
++EXPORT_SYMBOL(wbinvd_on_all_cpus);