]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
3.0-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 14 Jun 2012 22:30:22 +0000 (15:30 -0700)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 14 Jun 2012 22:30:22 +0000 (15:30 -0700)
added patches:
e1000-save-skb-counts-in-tx-to-avoid-cache-misses.patch
fuse-fix-stat-call-on-32-bit-platforms.patch
iwlwifi-don-t-mess-up-the-scd-when-removing-a-key.patch
sched-fix-the-relax_domain_level-boot-parameter.patch
x86-mce-amd-make-apic-lvt-thresholding-interrupt-optional.patch

queue-3.0/e1000-save-skb-counts-in-tx-to-avoid-cache-misses.patch [new file with mode: 0644]
queue-3.0/fuse-fix-stat-call-on-32-bit-platforms.patch [new file with mode: 0644]
queue-3.0/iwlwifi-don-t-mess-up-the-scd-when-removing-a-key.patch [new file with mode: 0644]
queue-3.0/sched-fix-the-relax_domain_level-boot-parameter.patch [new file with mode: 0644]
queue-3.0/series
queue-3.0/x86-mce-amd-make-apic-lvt-thresholding-interrupt-optional.patch [new file with mode: 0644]

diff --git a/queue-3.0/e1000-save-skb-counts-in-tx-to-avoid-cache-misses.patch b/queue-3.0/e1000-save-skb-counts-in-tx-to-avoid-cache-misses.patch
new file mode 100644 (file)
index 0000000..bae8313
--- /dev/null
@@ -0,0 +1,92 @@
+From 31c15a2f24ebdab14333d9bf5df49757842ae2ec Mon Sep 17 00:00:00 2001
+From: Dean Nelson <dnelson@redhat.com>
+Date: Thu, 25 Aug 2011 14:39:24 +0000
+Subject: e1000: save skb counts in TX to avoid cache misses
+
+From: Dean Nelson <dnelson@redhat.com>
+
+commit 31c15a2f24ebdab14333d9bf5df49757842ae2ec upstream.
+
+Virtual Machines with emulated e1000 network adapter running on Parallels'
+server were seeing kernel panics due to the e1000 driver dereferencing an
+unexpected NULL pointer retrieved from buffer_info->skb.
+
+The problem has been addressed for the e1000e driver, but not for the e1000.
+Since the two drivers share similar code in the affected area, a port of the
+following e1000e driver commit solves the issue for the e1000 driver:
+
+commit 9ed318d546a29d7a591dbe648fd1a2efe3be1180
+Author: Tom Herbert <therbert@google.com>
+Date:   Wed May 5 14:02:27 2010 +0000
+
+    e1000e: save skb counts in TX to avoid cache misses
+
+    In e1000_tx_map, precompute number of segements and bytecounts which
+    are derived from fields in skb; these are stored in buffer_info.  When
+    cleaning tx in e1000_clean_tx_irq use the values in the associated
+    buffer_info for statistics counting, this eliminates cache misses
+    on skb fields.
+
+Signed-off-by: Dean Nelson <dnelson@redhat.com>
+Acked-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Roman Kagan <rkagan@parallels.com>
+
+---
+ drivers/net/e1000/e1000.h      |    2 ++
+ drivers/net/e1000/e1000_main.c |   18 +++++++++---------
+ 2 files changed, 11 insertions(+), 9 deletions(-)
+
+--- a/drivers/net/e1000/e1000.h
++++ b/drivers/net/e1000/e1000.h
+@@ -150,6 +150,8 @@ struct e1000_buffer {
+       unsigned long time_stamp;
+       u16 length;
+       u16 next_to_watch;
++      unsigned int segs;
++      unsigned int bytecount;
+       u16 mapped_as_page;
+ };
+--- a/drivers/net/e1000/e1000_main.c
++++ b/drivers/net/e1000/e1000_main.c
+@@ -2798,7 +2798,7 @@ static int e1000_tx_map(struct e1000_ada
+       struct e1000_buffer *buffer_info;
+       unsigned int len = skb_headlen(skb);
+       unsigned int offset = 0, size, count = 0, i;
+-      unsigned int f;
++      unsigned int f, bytecount, segs;
+       i = tx_ring->next_to_use;
+@@ -2899,7 +2899,13 @@ static int e1000_tx_map(struct e1000_ada
+               }
+       }
++      segs = skb_shinfo(skb)->gso_segs ?: 1;
++      /* multiply data chunks by size of headers */
++      bytecount = ((segs - 1) * skb_headlen(skb)) + skb->len;
++
+       tx_ring->buffer_info[i].skb = skb;
++      tx_ring->buffer_info[i].segs = segs;
++      tx_ring->buffer_info[i].bytecount = bytecount;
+       tx_ring->buffer_info[first].next_to_watch = i;
+       return count;
+@@ -3573,14 +3579,8 @@ static bool e1000_clean_tx_irq(struct e1
+                       cleaned = (i == eop);
+                       if (cleaned) {
+-                              struct sk_buff *skb = buffer_info->skb;
+-                              unsigned int segs, bytecount;
+-                              segs = skb_shinfo(skb)->gso_segs ?: 1;
+-                              /* multiply data chunks by size of headers */
+-                              bytecount = ((segs - 1) * skb_headlen(skb)) +
+-                                          skb->len;
+-                              total_tx_packets += segs;
+-                              total_tx_bytes += bytecount;
++                              total_tx_packets += buffer_info->segs;
++                              total_tx_bytes += buffer_info->bytecount;
+                       }
+                       e1000_unmap_and_free_tx_resource(adapter, buffer_info);
+                       tx_desc->upper.data = 0;
diff --git a/queue-3.0/fuse-fix-stat-call-on-32-bit-platforms.patch b/queue-3.0/fuse-fix-stat-call-on-32-bit-platforms.patch
new file mode 100644 (file)
index 0000000..2f84845
--- /dev/null
@@ -0,0 +1,97 @@
+From 45c72cd73c788dd18c8113d4a404d6b4a01decf1 Mon Sep 17 00:00:00 2001
+From: Pavel Shilovsky <piastry@etersoft.ru>
+Date: Thu, 10 May 2012 19:49:38 +0400
+Subject: fuse: fix stat call on 32 bit platforms
+
+From: Pavel Shilovsky <piastry@etersoft.ru>
+
+commit 45c72cd73c788dd18c8113d4a404d6b4a01decf1 upstream.
+
+Now we store attr->ino at inode->i_ino, return attr->ino at the
+first time and then return inode->i_ino if the attribute timeout
+isn't expired. That's wrong on 32 bit platforms because attr->ino
+is 64 bit and inode->i_ino is 32 bit in this case.
+
+Fix this by saving 64 bit ino in fuse_inode structure and returning
+it every time we call getattr. Also squash attr->ino into inode->i_ino
+explicitly.
+
+Signed-off-by: Pavel Shilovsky <piastry@etersoft.ru>
+Signed-off-by: Miklos Szeredi <mszeredi@suse.cz>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/fuse/dir.c    |    1 +
+ fs/fuse/fuse_i.h |    3 +++
+ fs/fuse/inode.c  |   17 ++++++++++++++++-
+ 3 files changed, 20 insertions(+), 1 deletion(-)
+
+--- a/fs/fuse/dir.c
++++ b/fs/fuse/dir.c
+@@ -858,6 +858,7 @@ int fuse_update_attributes(struct inode
+               if (stat) {
+                       generic_fillattr(inode, stat);
+                       stat->mode = fi->orig_i_mode;
++                      stat->ino = fi->orig_ino;
+               }
+       }
+--- a/fs/fuse/fuse_i.h
++++ b/fs/fuse/fuse_i.h
+@@ -82,6 +82,9 @@ struct fuse_inode {
+           preserve the original mode */
+       mode_t orig_i_mode;
++      /** 64 bit inode number */
++      u64 orig_ino;
++
+       /** Version of last attribute change */
+       u64 attr_version;
+--- a/fs/fuse/inode.c
++++ b/fs/fuse/inode.c
+@@ -91,6 +91,7 @@ static struct inode *fuse_alloc_inode(st
+       fi->nlookup = 0;
+       fi->attr_version = 0;
+       fi->writectr = 0;
++      fi->orig_ino = 0;
+       INIT_LIST_HEAD(&fi->write_files);
+       INIT_LIST_HEAD(&fi->queued_writes);
+       INIT_LIST_HEAD(&fi->writepages);
+@@ -140,6 +141,18 @@ static int fuse_remount_fs(struct super_
+       return 0;
+ }
++/*
++ * ino_t is 32-bits on 32-bit arch. We have to squash the 64-bit value down
++ * so that it will fit.
++ */
++static ino_t fuse_squash_ino(u64 ino64)
++{
++      ino_t ino = (ino_t) ino64;
++      if (sizeof(ino_t) < sizeof(u64))
++              ino ^= ino64 >> (sizeof(u64) - sizeof(ino_t)) * 8;
++      return ino;
++}
++
+ void fuse_change_attributes_common(struct inode *inode, struct fuse_attr *attr,
+                                  u64 attr_valid)
+ {
+@@ -149,7 +162,7 @@ void fuse_change_attributes_common(struc
+       fi->attr_version = ++fc->attr_version;
+       fi->i_time = attr_valid;
+-      inode->i_ino     = attr->ino;
++      inode->i_ino     = fuse_squash_ino(attr->ino);
+       inode->i_mode    = (inode->i_mode & S_IFMT) | (attr->mode & 07777);
+       inode->i_nlink   = attr->nlink;
+       inode->i_uid     = attr->uid;
+@@ -175,6 +188,8 @@ void fuse_change_attributes_common(struc
+       fi->orig_i_mode = inode->i_mode;
+       if (!(fc->flags & FUSE_DEFAULT_PERMISSIONS))
+               inode->i_mode &= ~S_ISVTX;
++
++      fi->orig_ino = attr->ino;
+ }
+ void fuse_change_attributes(struct inode *inode, struct fuse_attr *attr,
diff --git a/queue-3.0/iwlwifi-don-t-mess-up-the-scd-when-removing-a-key.patch b/queue-3.0/iwlwifi-don-t-mess-up-the-scd-when-removing-a-key.patch
new file mode 100644 (file)
index 0000000..6424dd2
--- /dev/null
@@ -0,0 +1,45 @@
+From d6ee27eb13beab94056e0de52d81220058ca2297 Mon Sep 17 00:00:00 2001
+From: Emmanuel Grumbach <emmanuel.grumbach@intel.com>
+Date: Wed, 6 Jun 2012 09:13:36 +0200
+Subject: iwlwifi: don't mess up the SCD when removing a key
+
+From: Emmanuel Grumbach <emmanuel.grumbach@intel.com>
+
+commit d6ee27eb13beab94056e0de52d81220058ca2297 upstream.
+
+When we remove a key, we put a key index which was supposed
+to tell the fw that we are actually removing the key. But
+instead the fw took that index as a valid index and messed
+up the SRAM of the device.
+
+This memory corruption on the device mangled the data of
+the SCD. The impact on the user is that SCD queue 2 got
+stuck after having removed keys.
+The message is the log that was printed is:
+
+Queue 2 stuck for 10000ms
+
+This doesn't seem to fix the higher queues that get stuck
+from time to time.
+
+Reviewed-by: Meenakshi Venkataraman <meenakshi.venkataraman@intel.com>
+Signed-off-by: Emmanuel Grumbach <emmanuel.grumbach@intel.com>
+Signed-off-by: Johannes Berg <johannes.berg@intel.com>
+Signed-off-by: John W. Linville <linville@tuxdriver.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/net/wireless/iwlwifi/iwl-agn-sta.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/net/wireless/iwlwifi/iwl-agn-sta.c
++++ b/drivers/net/wireless/iwlwifi/iwl-agn-sta.c
+@@ -477,7 +477,7 @@ int iwl_remove_dynamic_key(struct iwl_pr
+                                       sizeof(struct iwl_keyinfo));
+       priv->stations[sta_id].sta.key.key_flags =
+                       STA_KEY_FLG_NO_ENC | STA_KEY_FLG_INVALID;
+-      priv->stations[sta_id].sta.key.key_offset = WEP_INVALID_OFFSET;
++      priv->stations[sta_id].sta.key.key_offset = keyconf->hw_key_idx;
+       priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
+       priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
diff --git a/queue-3.0/sched-fix-the-relax_domain_level-boot-parameter.patch b/queue-3.0/sched-fix-the-relax_domain_level-boot-parameter.patch
new file mode 100644 (file)
index 0000000..96d1bc5
--- /dev/null
@@ -0,0 +1,62 @@
+From a841f8cef4bb124f0f5563314d0beaf2e1249d72 Mon Sep 17 00:00:00 2001
+From: Dimitri Sivanich <sivanich@sgi.com>
+Date: Tue, 5 Jun 2012 13:44:36 -0500
+Subject: sched: Fix the relax_domain_level boot parameter
+
+From: Dimitri Sivanich <sivanich@sgi.com>
+
+commit a841f8cef4bb124f0f5563314d0beaf2e1249d72 upstream.
+
+It does not get processed because sched_domain_level_max is 0 at the
+time that setup_relax_domain_level() is run.
+
+Simply accept the value as it is, as we don't know the value of
+sched_domain_level_max until sched domain construction is completed.
+
+Fix sched_relax_domain_level in cpuset.  The build_sched_domain() routine calls
+the set_domain_attribute() routine prior to setting the sd->level, however,
+the set_domain_attribute() routine relies on the sd->level to decide whether
+idle load balancing will be off/on.
+
+Signed-off-by: Dimitri Sivanich <sivanich@sgi.com>
+Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
+Link: http://lkml.kernel.org/r/20120605184436.GA15668@sgi.com
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/sched.c |    9 +++------
+ 1 file changed, 3 insertions(+), 6 deletions(-)
+
+--- a/kernel/sched.c
++++ b/kernel/sched.c
+@@ -7220,11 +7220,8 @@ int sched_domain_level_max;
+ static int __init setup_relax_domain_level(char *str)
+ {
+-      unsigned long val;
+-
+-      val = simple_strtoul(str, NULL, 0);
+-      if (val < sched_domain_level_max)
+-              default_relax_domain_level = val;
++      if (kstrtoint(str, 0, &default_relax_domain_level))
++              pr_warn("Unable to set relax_domain_level\n");
+       return 1;
+ }
+@@ -7417,7 +7414,6 @@ struct sched_domain *build_sched_domain(
+       if (!sd)
+               return child;
+-      set_domain_attribute(sd, attr);
+       cpumask_and(sched_domain_span(sd), cpu_map, tl->mask(cpu));
+       if (child) {
+               sd->level = child->level + 1;
+@@ -7425,6 +7421,7 @@ struct sched_domain *build_sched_domain(
+               child->parent = sd;
+       }
+       sd->child = child;
++      set_domain_attribute(sd, attr);
+       return sd;
+ }
index 51cf7a9ec50daa5acb82b8fb4afb3c0d05a927a6..c2bf561c44c6afdba55f6ca03a4be4d04eb54af4 100644 (file)
@@ -10,3 +10,8 @@ can-c_can-fix-an-interrupt-thrash-issue-with-c_can-driver.patch
 can-c_can-fix-race-condition-in-c_can_open.patch
 hwmon-fam15h_power-increase-output-resolution.patch
 acpi_video-fix-leaking-pci-references.patch
+sched-fix-the-relax_domain_level-boot-parameter.patch
+iwlwifi-don-t-mess-up-the-scd-when-removing-a-key.patch
+x86-mce-amd-make-apic-lvt-thresholding-interrupt-optional.patch
+fuse-fix-stat-call-on-32-bit-platforms.patch
+e1000-save-skb-counts-in-tx-to-avoid-cache-misses.patch
diff --git a/queue-3.0/x86-mce-amd-make-apic-lvt-thresholding-interrupt-optional.patch b/queue-3.0/x86-mce-amd-make-apic-lvt-thresholding-interrupt-optional.patch
new file mode 100644 (file)
index 0000000..d018558
--- /dev/null
@@ -0,0 +1,141 @@
+From f227d4306cf30e1d5b6f231e8ef9006c34f3d186 Mon Sep 17 00:00:00 2001
+From: Borislav Petkov <borislav.petkov@amd.com>
+Date: Mon, 16 Apr 2012 18:01:53 +0200
+Subject: x86, MCE, AMD: Make APIC LVT thresholding interrupt optional
+
+From: Borislav Petkov <borislav.petkov@amd.com>
+
+commit f227d4306cf30e1d5b6f231e8ef9006c34f3d186 upstream.
+
+Currently, the APIC LVT interrupt for error thresholding is implicitly
+enabled. However, there are models in the F15h range which do not enable
+it. Make the code machinery which sets up the APIC interrupt support
+an optional setting and add an ->interrupt_capable member to the bank
+representation mirroring that capability and enable the interrupt offset
+programming only if it is true.
+
+Simplify code and fixup comment style while at it.
+
+Signed-off-by: Borislav Petkov <borislav.petkov@amd.com>
+Signed-off-by: Robert Richter <robert.richter@amd.com>
+
+
+---
+ arch/x86/kernel/cpu/mcheck/mce_amd.c |   55 ++++++++++++++++++++++++++++-------
+ 1 file changed, 44 insertions(+), 11 deletions(-)
+
+--- a/arch/x86/kernel/cpu/mcheck/mce_amd.c
++++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c
+@@ -52,6 +52,7 @@ struct threshold_block {
+       unsigned int            cpu;
+       u32                     address;
+       u16                     interrupt_enable;
++      bool                    interrupt_capable;
+       u16                     threshold_limit;
+       struct kobject          kobj;
+       struct list_head        miscj;
+@@ -86,6 +87,21 @@ struct thresh_restart {
+       u16                     old_limit;
+ };
++static bool lvt_interrupt_supported(unsigned int bank, u32 msr_high_bits)
++{
++      /*
++       * bank 4 supports APIC LVT interrupts implicitly since forever.
++       */
++      if (bank == 4)
++              return true;
++
++      /*
++       * IntP: interrupt present; if this bit is set, the thresholding
++       * bank can generate APIC LVT interrupts
++       */
++      return msr_high_bits & BIT(28);
++}
++
+ static int lvt_off_valid(struct threshold_block *b, int apic, u32 lo, u32 hi)
+ {
+       int msr = (hi & MASK_LVTOFF_HI) >> 20;
+@@ -107,8 +123,10 @@ static int lvt_off_valid(struct threshol
+       return 1;
+ };
+-/* must be called with correct cpu affinity */
+-/* Called via smp_call_function_single() */
++/*
++ * Called via smp_call_function_single(), must be called with correct
++ * cpu affinity.
++ */
+ static void threshold_restart_bank(void *_tr)
+ {
+       struct thresh_restart *tr = _tr;
+@@ -131,6 +149,12 @@ static void threshold_restart_bank(void
+                   (new_count & THRESHOLD_MAX);
+       }
++      /* clear IntType */
++      hi &= ~MASK_INT_TYPE_HI;
++
++      if (!tr->b->interrupt_capable)
++              goto done;
++
+       if (tr->set_lvt_off) {
+               if (lvt_off_valid(tr->b, tr->lvt_off, lo, hi)) {
+                       /* set new lvt offset */
+@@ -139,9 +163,10 @@ static void threshold_restart_bank(void
+               }
+       }
+-      tr->b->interrupt_enable ?
+-          (hi = (hi & ~MASK_INT_TYPE_HI) | INT_TYPE_APIC) :
+-          (hi &= ~MASK_INT_TYPE_HI);
++      if (tr->b->interrupt_enable)
++              hi |= INT_TYPE_APIC;
++
++ done:
+       hi |= MASK_COUNT_EN_HI;
+       wrmsr(tr->b->address, lo, hi);
+@@ -206,14 +231,18 @@ void mce_amd_feature_init(struct cpuinfo
+                       if (shared_bank[bank] && c->cpu_core_id)
+                               break;
+ #endif
+-                      offset = setup_APIC_mce(offset,
+-                                              (high & MASK_LVTOFF_HI) >> 20);
+                       memset(&b, 0, sizeof(b));
+-                      b.cpu           = cpu;
+-                      b.bank          = bank;
+-                      b.block         = block;
+-                      b.address       = address;
++                      b.cpu                   = cpu;
++                      b.bank                  = bank;
++                      b.block                 = block;
++                      b.address               = address;
++                      b.interrupt_capable     = lvt_interrupt_supported(bank, high);
++
++                      if (b.interrupt_capable) {
++                              int new = (high & MASK_LVTOFF_HI) >> 20;
++                              offset  = setup_APIC_mce(offset, new);
++                      }
+                       mce_threshold_block_init(&b, offset);
+                       mce_threshold_vector = amd_threshold_interrupt;
+@@ -313,6 +342,9 @@ store_interrupt_enable(struct threshold_
+       struct thresh_restart tr;
+       unsigned long new;
++      if (!b->interrupt_capable)
++              return -EINVAL;
++
+       if (strict_strtoul(buf, 0, &new) < 0)
+               return -EINVAL;
+@@ -471,6 +503,7 @@ static __cpuinit int allocate_threshold_
+       b->cpu                  = cpu;
+       b->address              = address;
+       b->interrupt_enable     = 0;
++      b->interrupt_capable    = lvt_interrupt_supported(bank, high);
+       b->threshold_limit      = THRESHOLD_MAX;
+       INIT_LIST_HEAD(&b->miscj);