]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
more .31 patches
authorGreg Kroah-Hartman <gregkh@suse.de>
Tue, 15 Sep 2009 23:44:46 +0000 (16:44 -0700)
committerGreg Kroah-Hartman <gregkh@suse.de>
Tue, 15 Sep 2009 23:44:46 +0000 (16:44 -0700)
13 files changed:
queue-2.6.31/agp-intel-remove-restore-in-resume.patch [new file with mode: 0644]
queue-2.6.31/binfmt_elf-fix-pt_interp-bss-handling.patch [new file with mode: 0644]
queue-2.6.31/block-don-t-assume-device-has-a-request-list-backing-in-nr_requests-store.patch [new file with mode: 0644]
queue-2.6.31/fix-undefined-reference-to-user_shm_unlock.patch [new file with mode: 0644]
queue-2.6.31/md-fix-strchr-undefined.patch [new file with mode: 0644]
queue-2.6.31/perf_counter-fix-buffer-overflow-in-perf_copy_attr.patch [new file with mode: 0644]
queue-2.6.31/perf_counter-start-counting-time-enabled-when-group-leader-gets-enabled.patch [new file with mode: 0644]
queue-2.6.31/powerpc-fix-bug-where-perf_counters-breaks-oprofile.patch [new file with mode: 0644]
queue-2.6.31/powerpc-perf_counters-reduce-stack-usage-of-power_check_constraints.patch [new file with mode: 0644]
queue-2.6.31/powerpc-ps3-workaround-for-flash-memory-i-o-error.patch [new file with mode: 0644]
queue-2.6.31/series
queue-2.6.31/tpm-fixup-boot-probe-timeout-for-tpm_tis-driver.patch [new file with mode: 0644]
queue-2.6.31/x86-amd-iommu-fix-broken-check-in-amd_iommu_flush_all_devices.patch [new file with mode: 0644]

diff --git a/queue-2.6.31/agp-intel-remove-restore-in-resume.patch b/queue-2.6.31/agp-intel-remove-restore-in-resume.patch
new file mode 100644 (file)
index 0000000..fd5cba8
--- /dev/null
@@ -0,0 +1,43 @@
+From 121264827656f5f06328b17983c796af17dc5949 Mon Sep 17 00:00:00 2001
+From: Zhenyu Wang <zhenyuw@linux.intel.com>
+Date: Mon, 14 Sep 2009 10:47:06 +0800
+Subject: agp/intel: remove restore in resume
+
+From: Zhenyu Wang <zhenyuw@linux.intel.com>
+
+commit 121264827656f5f06328b17983c796af17dc5949 upstream.
+
+As early pci resume has already restored config for host
+bridge and graphics device, don't need to restore it again,
+This removes an original order hack for graphics device restore.
+
+This fixed the resume hang issue found by Alan Stern on 845G,
+caused by extra config restore on graphics device.
+
+Cc: Alan Stern <stern@rowland.harvard.edu>
+Signed-off-by: Zhenyu Wang <zhenyuw@linux.intel.com>
+Signed-off-by: Dave Airlie <airlied@linux.ie>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/char/agp/intel-agp.c |    9 ---------
+ 1 file changed, 9 deletions(-)
+
+--- a/drivers/char/agp/intel-agp.c
++++ b/drivers/char/agp/intel-agp.c
+@@ -2313,15 +2313,6 @@ static int agp_intel_resume(struct pci_d
+       struct agp_bridge_data *bridge = pci_get_drvdata(pdev);
+       int ret_val;
+-      pci_restore_state(pdev);
+-
+-      /* We should restore our graphics device's config space,
+-       * as host bridge (00:00) resumes before graphics device (02:00),
+-       * then our access to its pci space can work right.
+-       */
+-      if (intel_private.pcidev)
+-              pci_restore_state(intel_private.pcidev);
+-
+       if (bridge->driver == &intel_generic_driver)
+               intel_configure();
+       else if (bridge->driver == &intel_850_driver)
diff --git a/queue-2.6.31/binfmt_elf-fix-pt_interp-bss-handling.patch b/queue-2.6.31/binfmt_elf-fix-pt_interp-bss-handling.patch
new file mode 100644 (file)
index 0000000..d23bf58
--- /dev/null
@@ -0,0 +1,87 @@
+From 9f0ab4a3f0fdb1ff404d150618ace2fa069bb2e1 Mon Sep 17 00:00:00 2001
+From: Roland McGrath <roland@redhat.com>
+Date: Tue, 8 Sep 2009 19:49:40 -0700
+Subject: binfmt_elf: fix PT_INTERP bss handling
+
+From: Roland McGrath <roland@redhat.com>
+
+commit 9f0ab4a3f0fdb1ff404d150618ace2fa069bb2e1 upstream.
+
+In fs/binfmt_elf.c, load_elf_interp() calls padzero() for .bss even if
+the PT_LOAD has no PROT_WRITE and no .bss.  This generates EFAULT.
+
+Here is a small test case.  (Yes, there are other, useful PT_INTERP
+which have only .text and no .data/.bss.)
+
+       ----- ptinterp.S
+       _start: .globl _start
+                nop
+                int3
+       -----
+       $ gcc -m32 -nostartfiles -nostdlib -o ptinterp ptinterp.S
+       $ gcc -m32 -Wl,--dynamic-linker=ptinterp -o hello hello.c
+       $ ./hello
+       Segmentation fault  # during execve() itself
+
+       After applying the patch:
+       $ ./hello
+       Trace trap  # user-mode execution after execve() finishes
+
+If the ELF headers are actually self-inconsistent, then dying is fine.
+But having no PROT_WRITE segment is perfectly normal and correct if
+there is no segment with p_memsz > p_filesz (i.e. bss).  John Reiser
+suggested checking for PROT_WRITE in the bss logic.  I think it makes
+most sense to simply apply the bss logic only when there is bss.
+
+This patch looks less trivial than it is due to some reindentation.
+It just moves the "if (last_bss > elf_bss) {" test up to include the
+partial-page bss logic as well as the more-pages bss logic.
+
+Reported-by: John Reiser <jreiser@bitwagon.com>
+Signed-off-by: Roland McGrath <roland@redhat.com>
+Signed-off-by: James Morris <jmorris@namei.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ fs/binfmt_elf.c |   28 ++++++++++++++--------------
+ 1 file changed, 14 insertions(+), 14 deletions(-)
+
+--- a/fs/binfmt_elf.c
++++ b/fs/binfmt_elf.c
+@@ -501,22 +501,22 @@ static unsigned long load_elf_interp(str
+               }
+       }
+-      /*
+-       * Now fill out the bss section.  First pad the last page up
+-       * to the page boundary, and then perform a mmap to make sure
+-       * that there are zero-mapped pages up to and including the 
+-       * last bss page.
+-       */
+-      if (padzero(elf_bss)) {
+-              error = -EFAULT;
+-              goto out_close;
+-      }
++      if (last_bss > elf_bss) {
++              /*
++               * Now fill out the bss section.  First pad the last page up
++               * to the page boundary, and then perform a mmap to make sure
++               * that there are zero-mapped pages up to and including the
++               * last bss page.
++               */
++              if (padzero(elf_bss)) {
++                      error = -EFAULT;
++                      goto out_close;
++              }
+-      /* What we have mapped so far */
+-      elf_bss = ELF_PAGESTART(elf_bss + ELF_MIN_ALIGN - 1);
++              /* What we have mapped so far */
++              elf_bss = ELF_PAGESTART(elf_bss + ELF_MIN_ALIGN - 1);
+-      /* Map the last of the bss segment */
+-      if (last_bss > elf_bss) {
++              /* Map the last of the bss segment */
+               down_write(&current->mm->mmap_sem);
+               error = do_brk(elf_bss, last_bss - elf_bss);
+               up_write(&current->mm->mmap_sem);
diff --git a/queue-2.6.31/block-don-t-assume-device-has-a-request-list-backing-in-nr_requests-store.patch b/queue-2.6.31/block-don-t-assume-device-has-a-request-list-backing-in-nr_requests-store.patch
new file mode 100644 (file)
index 0000000..549c8dd
--- /dev/null
@@ -0,0 +1,42 @@
+From b8a9ae779f2c7049071034661e09cb7e1e82250c Mon Sep 17 00:00:00 2001
+From: Jens Axboe <jens.axboe@oracle.com>
+Date: Fri, 11 Sep 2009 22:44:29 +0200
+Subject: block: don't assume device has a request list backing in nr_requests store
+
+From: Jens Axboe <jens.axboe@oracle.com>
+
+commit b8a9ae779f2c7049071034661e09cb7e1e82250c upstream.
+
+Stacked devices do not. For now, just error out with -EINVAL. Later
+we could make the limit apply on stacked devices too, for throttling
+reasons.
+
+This fixes
+
+5a54cd13353bb3b88887604e2c980aa01e314309
+
+and should go into 2.6.31 stable as well.
+
+Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ block/blk-sysfs.c |    7 ++++++-
+ 1 file changed, 6 insertions(+), 1 deletion(-)
+
+--- a/block/blk-sysfs.c
++++ b/block/blk-sysfs.c
+@@ -40,7 +40,12 @@ queue_requests_store(struct request_queu
+ {
+       struct request_list *rl = &q->rq;
+       unsigned long nr;
+-      int ret = queue_var_store(&nr, page, count);
++      int ret;
++
++      if (!q->request_fn)
++              return -EINVAL;
++
++      ret = queue_var_store(&nr, page, count);
+       if (nr < BLKDEV_MIN_RQ)
+               nr = BLKDEV_MIN_RQ;
diff --git a/queue-2.6.31/fix-undefined-reference-to-user_shm_unlock.patch b/queue-2.6.31/fix-undefined-reference-to-user_shm_unlock.patch
new file mode 100644 (file)
index 0000000..7428de7
--- /dev/null
@@ -0,0 +1,37 @@
+From 2195d2818c37bdf263865f1e9effccdd9fc5f9d4 Mon Sep 17 00:00:00 2001
+From: Hugh Dickins <hugh.dickins@tiscali.co.uk>
+Date: Sat, 12 Sep 2009 12:21:27 +0100
+Subject: fix undefined reference to user_shm_unlock
+
+From: Hugh Dickins <hugh.dickins@tiscali.co.uk>
+
+commit 2195d2818c37bdf263865f1e9effccdd9fc5f9d4 upstream.
+
+My 353d5c30c666580347515da609dd74a2b8e9b828 "mm: fix hugetlb bug due to
+user_shm_unlock call" broke the CONFIG_SYSVIPC !CONFIG_MMU build of both
+2.6.31 and 2.6.30.6: "undefined reference to `user_shm_unlock'".
+
+gcc didn't understand my comment! so couldn't figure out to optimize
+away user_shm_unlock() from the error path in the hugetlb-less case, as
+it does elsewhere.  Help it to do so, in a language it understands.
+
+Reported-by: Mike Frysinger <vapier@gentoo.org>
+Signed-off-by: Hugh Dickins <hugh.dickins@tiscali.co.uk>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ ipc/shm.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/ipc/shm.c
++++ b/ipc/shm.c
+@@ -410,7 +410,7 @@ static int newseg(struct ipc_namespace *
+       return error;
+ no_id:
+-      if (shp->mlock_user)    /* shmflg & SHM_HUGETLB case */
++      if (is_file_hugepages(file) && shp->mlock_user)
+               user_shm_unlock(size, shp->mlock_user);
+       fput(file);
+ no_file:
diff --git a/queue-2.6.31/md-fix-strchr-undefined.patch b/queue-2.6.31/md-fix-strchr-undefined.patch
new file mode 100644 (file)
index 0000000..fa87106
--- /dev/null
@@ -0,0 +1,40 @@
+From 0d03d59d9b31cd1e33b7e46a80b6fef66244b1f2 Mon Sep 17 00:00:00 2001
+From: Geert Uytterhoeven <geert@linux-m68k.org>
+Date: Thu, 10 Sep 2009 23:13:28 +0200
+Subject: md: Fix "strchr" [drivers/md/dm-log-userspace.ko] undefined!
+
+From: Geert Uytterhoeven <geert@linux-m68k.org>
+
+commit 0d03d59d9b31cd1e33b7e46a80b6fef66244b1f2 upstream.
+
+Commit b8313b6da7e2e7c7f47d93d8561969a3ff9ba0ea ("dm log: remove incorrect
+field from userspace table output") added a call to strstr() with a
+single-character "needle" string parameter.
+
+Unfortunately some versions of gcc replace such calls to strstr() by calls
+to strchr() behind our back.  This causes linking errors if strchr() is
+defined as an inline function in <asm/string.h> (e.g. on m68k):
+
+| WARNING: "strchr" [drivers/md/dm-log-userspace.ko] undefined!
+
+Avoid this by explicitly calling strchr() instead.
+
+Signed-off-by: Geert Uytterhoeven <geert@linux-m68k.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/md/dm-log-userspace-base.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/md/dm-log-userspace-base.c
++++ b/drivers/md/dm-log-userspace-base.c
+@@ -582,7 +582,7 @@ static int userspace_status(struct dm_di
+               break;
+       case STATUSTYPE_TABLE:
+               sz = 0;
+-              table_args = strstr(lc->usr_argv_str, " ");
++              table_args = strchr(lc->usr_argv_str, ' ');
+               BUG_ON(!table_args); /* There will always be a ' ' */
+               table_args++;
diff --git a/queue-2.6.31/perf_counter-fix-buffer-overflow-in-perf_copy_attr.patch b/queue-2.6.31/perf_counter-fix-buffer-overflow-in-perf_copy_attr.patch
new file mode 100644 (file)
index 0000000..edd332f
--- /dev/null
@@ -0,0 +1,37 @@
+From b3e62e35058fc744ac794611f4e79bcd1c5a4b83 Mon Sep 17 00:00:00 2001
+From: Xiao Guangrong <xiaoguangrong@cn.fujitsu.com>
+Date: Tue, 15 Sep 2009 14:44:36 +0800
+Subject: perf_counter: Fix buffer overflow in perf_copy_attr()
+
+From: Xiao Guangrong <xiaoguangrong@cn.fujitsu.com>
+
+commit b3e62e35058fc744ac794611f4e79bcd1c5a4b83 upstream.
+
+If we pass a big size data over perf_counter_open() syscall,
+the kernel will copy this data to a small buffer, it will
+cause kernel crash.
+
+This bug makes the kernel unsafe and non-root local user can
+trigger it.
+
+Signed-off-by: Xiao Guangrong <xiaoguangrong@cn.fujitsu.com>
+Acked-by: Peter Zijlstra <peterz@infradead.org>
+Acked-by: Paul Mackerras <paulus@samba.org>
+LKML-Reference: <4AAF37D4.5010706@cn.fujitsu.com>
+Signed-off-by: Ingo Molnar <mingo@elte.hu>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ kernel/perf_counter.c |    1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/kernel/perf_counter.c
++++ b/kernel/perf_counter.c
+@@ -4171,6 +4171,7 @@ static int perf_copy_attr(struct perf_co
+                       if (val)
+                               goto err_size;
+               }
++              size = sizeof(*attr);
+       }
+       ret = copy_from_user(attr, uattr, size);
diff --git a/queue-2.6.31/perf_counter-start-counting-time-enabled-when-group-leader-gets-enabled.patch b/queue-2.6.31/perf_counter-start-counting-time-enabled-when-group-leader-gets-enabled.patch
new file mode 100644 (file)
index 0000000..7ffc908
--- /dev/null
@@ -0,0 +1,136 @@
+From fa289beca9de9119c7760bd984f3640da21bc94c Mon Sep 17 00:00:00 2001
+From: Paul Mackerras <paulus@samba.org>
+Date: Tue, 25 Aug 2009 15:17:20 +1000
+Subject: perf_counter: Start counting time enabled when group leader gets enabled
+
+From: Paul Mackerras <paulus@samba.org>
+
+commit fa289beca9de9119c7760bd984f3640da21bc94c upstream.
+
+Currently, if a group is created where the group leader is
+initially disabled but a non-leader member is initially
+enabled, and then the leader is subsequently enabled some time
+later, the time_enabled for the non-leader member will reflect
+the whole time since it was created, not just the time since
+the leader was enabled.
+
+This is incorrect, because all of the members are effectively
+disabled while the leader is disabled, since none of the
+members can go on the PMU if the leader can't.
+
+Thus we have to update the ->tstamp_enabled for all the enabled
+group members when a group leader is enabled, so that the
+time_enabled computation only counts the time since the leader
+was enabled.
+
+Similarly, when disabling a group leader we have to update the
+time_enabled and time_running for all of the group members.
+
+Also, in update_counter_times, we have to treat a counter whose
+group leader is disabled as being disabled.
+
+Reported-by: Stephane Eranian <eranian@googlemail.com>
+Signed-off-by: Paul Mackerras <paulus@samba.org>
+Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
+LKML-Reference: <19091.29664.342227.445006@drongo.ozlabs.ibm.com>
+Signed-off-by: Ingo Molnar <mingo@elte.hu>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ kernel/perf_counter.c |   43 ++++++++++++++++++++++++++++++-------------
+ 1 file changed, 30 insertions(+), 13 deletions(-)
+
+--- a/kernel/perf_counter.c
++++ b/kernel/perf_counter.c
+@@ -469,7 +469,8 @@ static void update_counter_times(struct 
+       struct perf_counter_context *ctx = counter->ctx;
+       u64 run_end;
+-      if (counter->state < PERF_COUNTER_STATE_INACTIVE)
++      if (counter->state < PERF_COUNTER_STATE_INACTIVE ||
++          counter->group_leader->state < PERF_COUNTER_STATE_INACTIVE)
+               return;
+       counter->total_time_enabled = ctx->time - counter->tstamp_enabled;
+@@ -518,7 +519,7 @@ static void __perf_counter_disable(void 
+        */
+       if (counter->state >= PERF_COUNTER_STATE_INACTIVE) {
+               update_context_time(ctx);
+-              update_counter_times(counter);
++              update_group_times(counter);
+               if (counter == counter->group_leader)
+                       group_sched_out(counter, cpuctx, ctx);
+               else
+@@ -573,7 +574,7 @@ static void perf_counter_disable(struct 
+        * in, so we can change the state safely.
+        */
+       if (counter->state == PERF_COUNTER_STATE_INACTIVE) {
+-              update_counter_times(counter);
++              update_group_times(counter);
+               counter->state = PERF_COUNTER_STATE_OFF;
+       }
+@@ -851,6 +852,27 @@ retry:
+ }
+ /*
++ * Put a counter into inactive state and update time fields.
++ * Enabling the leader of a group effectively enables all
++ * the group members that aren't explicitly disabled, so we
++ * have to update their ->tstamp_enabled also.
++ * Note: this works for group members as well as group leaders
++ * since the non-leader members' sibling_lists will be empty.
++ */
++static void __perf_counter_mark_enabled(struct perf_counter *counter,
++                                      struct perf_counter_context *ctx)
++{
++      struct perf_counter *sub;
++
++      counter->state = PERF_COUNTER_STATE_INACTIVE;
++      counter->tstamp_enabled = ctx->time - counter->total_time_enabled;
++      list_for_each_entry(sub, &counter->sibling_list, list_entry)
++              if (sub->state >= PERF_COUNTER_STATE_INACTIVE)
++                      sub->tstamp_enabled =
++                              ctx->time - sub->total_time_enabled;
++}
++
++/*
+  * Cross CPU call to enable a performance counter
+  */
+ static void __perf_counter_enable(void *info)
+@@ -877,8 +899,7 @@ static void __perf_counter_enable(void *
+       if (counter->state >= PERF_COUNTER_STATE_INACTIVE)
+               goto unlock;
+-      counter->state = PERF_COUNTER_STATE_INACTIVE;
+-      counter->tstamp_enabled = ctx->time - counter->total_time_enabled;
++      __perf_counter_mark_enabled(counter, ctx);
+       /*
+        * If the counter is in a group and isn't the group leader,
+@@ -971,11 +992,9 @@ static void perf_counter_enable(struct p
+        * Since we have the lock this context can't be scheduled
+        * in, so we can change the state safely.
+        */
+-      if (counter->state == PERF_COUNTER_STATE_OFF) {
+-              counter->state = PERF_COUNTER_STATE_INACTIVE;
+-              counter->tstamp_enabled =
+-                      ctx->time - counter->total_time_enabled;
+-      }
++      if (counter->state == PERF_COUNTER_STATE_OFF)
++              __perf_counter_mark_enabled(counter, ctx);
++
+  out:
+       spin_unlock_irq(&ctx->lock);
+ }
+@@ -1479,9 +1498,7 @@ static void perf_counter_enable_on_exec(
+               counter->attr.enable_on_exec = 0;
+               if (counter->state >= PERF_COUNTER_STATE_INACTIVE)
+                       continue;
+-              counter->state = PERF_COUNTER_STATE_INACTIVE;
+-              counter->tstamp_enabled =
+-                      ctx->time - counter->total_time_enabled;
++              __perf_counter_mark_enabled(counter, ctx);
+               enabled = 1;
+       }
diff --git a/queue-2.6.31/powerpc-fix-bug-where-perf_counters-breaks-oprofile.patch b/queue-2.6.31/powerpc-fix-bug-where-perf_counters-breaks-oprofile.patch
new file mode 100644 (file)
index 0000000..958f9ad
--- /dev/null
@@ -0,0 +1,158 @@
+From a6dbf93a2ad853585409e715eb96dca9177e3c39 Mon Sep 17 00:00:00 2001
+From: Paul Mackerras <paulus@samba.org>
+Date: Wed, 9 Sep 2009 01:26:03 +0000
+Subject: powerpc: Fix bug where perf_counters breaks oprofile
+
+From: Paul Mackerras <paulus@samba.org>
+
+commit a6dbf93a2ad853585409e715eb96dca9177e3c39 upstream.
+
+Currently there is a bug where if you use oprofile on a pSeries
+machine, then use perf_counters, then use oprofile again, oprofile
+will not work correctly; it will lose the PMU configuration the next
+time the hypervisor does a partition context switch, and thereafter
+won't count anything.
+
+Maynard Johnson identified the sequence causing the problem:
+- oprofile setup calls ppc_enable_pmcs(), which calls
+  pseries_lpar_enable_pmcs, which tells the hypervisor that we want
+  to use the PMU, and sets the "PMU in use" flag in the lppaca.
+  This flag tells the hypervisor whether it needs to save and restore
+  the PMU config.
+- The perf_counter code sets and clears the "PMU in use" flag directly
+  as it context-switches the PMU between tasks, and leaves it clear
+  when it finishes.
+- oprofile setup, called for a new oprofile run, calls ppc_enable_pmcs,
+  which does nothing because it has already been called.  In particular
+  it doesn't set the "PMU in use" flag.
+
+This fixes the problem by arranging for ppc_enable_pmcs to always set
+the "PMU in use" flag.  It makes the perf_counter code call
+ppc_enable_pmcs also rather than calling the lower-level function
+directly, and removes the setting of the "PMU in use" flag from
+pseries_lpar_enable_pmcs, since that is now done in its caller.
+
+This also removes the declaration of pasemi_enable_pmcs because it
+isn't defined anywhere.
+
+Reported-by: Maynard Johnson <mpjohn@us.ibm.com>
+Signed-off-by: Paul Mackerras <paulus@samba.org>
+Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ arch/powerpc/include/asm/pmc.h         |   16 ++++++++++++++--
+ arch/powerpc/kernel/perf_counter.c     |   13 +++----------
+ arch/powerpc/kernel/sysfs.c            |    3 +++
+ arch/powerpc/platforms/pseries/setup.c |    4 ----
+ 4 files changed, 20 insertions(+), 16 deletions(-)
+
+--- a/arch/powerpc/include/asm/pmc.h
++++ b/arch/powerpc/include/asm/pmc.h
+@@ -27,10 +27,22 @@ extern perf_irq_t perf_irq;
+ int reserve_pmc_hardware(perf_irq_t new_perf_irq);
+ void release_pmc_hardware(void);
++void ppc_enable_pmcs(void);
+ #ifdef CONFIG_PPC64
+-void power4_enable_pmcs(void);
+-void pasemi_enable_pmcs(void);
++#include <asm/lppaca.h>
++
++static inline void ppc_set_pmu_inuse(int inuse)
++{
++      get_lppaca()->pmcregs_in_use = inuse;
++}
++
++extern void power4_enable_pmcs(void);
++
++#else /* CONFIG_PPC64 */
++
++static inline void ppc_set_pmu_inuse(int inuse) { }
++
+ #endif
+ #endif /* __KERNEL__ */
+--- a/arch/powerpc/kernel/perf_counter.c
++++ b/arch/powerpc/kernel/perf_counter.c
+@@ -65,7 +65,6 @@ static inline unsigned long perf_ip_adju
+ {
+       return 0;
+ }
+-static inline void perf_set_pmu_inuse(int inuse) { }
+ static inline void perf_get_data_addr(struct pt_regs *regs, u64 *addrp) { }
+ static inline u32 perf_get_misc_flags(struct pt_regs *regs)
+ {
+@@ -96,11 +95,6 @@ static inline unsigned long perf_ip_adju
+       return 0;
+ }
+-static inline void perf_set_pmu_inuse(int inuse)
+-{
+-      get_lppaca()->pmcregs_in_use = inuse;
+-}
+-
+ /*
+  * The user wants a data address recorded.
+  * If we're not doing instruction sampling, give them the SDAR
+@@ -535,8 +529,7 @@ void hw_perf_disable(void)
+                * Check if we ever enabled the PMU on this cpu.
+                */
+               if (!cpuhw->pmcs_enabled) {
+-                      if (ppc_md.enable_pmcs)
+-                              ppc_md.enable_pmcs();
++                      ppc_enable_pmcs();
+                       cpuhw->pmcs_enabled = 1;
+               }
+@@ -598,7 +591,7 @@ void hw_perf_enable(void)
+               mtspr(SPRN_MMCRA, cpuhw->mmcr[2] & ~MMCRA_SAMPLE_ENABLE);
+               mtspr(SPRN_MMCR1, cpuhw->mmcr[1]);
+               if (cpuhw->n_counters == 0)
+-                      perf_set_pmu_inuse(0);
++                      ppc_set_pmu_inuse(0);
+               goto out_enable;
+       }
+@@ -631,7 +624,7 @@ void hw_perf_enable(void)
+        * bit set and set the hardware counters to their initial values.
+        * Then unfreeze the counters.
+        */
+-      perf_set_pmu_inuse(1);
++      ppc_set_pmu_inuse(1);
+       mtspr(SPRN_MMCRA, cpuhw->mmcr[2] & ~MMCRA_SAMPLE_ENABLE);
+       mtspr(SPRN_MMCR1, cpuhw->mmcr[1]);
+       mtspr(SPRN_MMCR0, (cpuhw->mmcr[0] & ~(MMCR0_PMC1CE | MMCR0_PMCjCE))
+--- a/arch/powerpc/kernel/sysfs.c
++++ b/arch/powerpc/kernel/sysfs.c
+@@ -17,6 +17,7 @@
+ #include <asm/prom.h>
+ #include <asm/machdep.h>
+ #include <asm/smp.h>
++#include <asm/pmc.h>
+ #include "cacheinfo.h"
+@@ -123,6 +124,8 @@ static DEFINE_PER_CPU(char, pmcs_enabled
+ void ppc_enable_pmcs(void)
+ {
++      ppc_set_pmu_inuse(1);
++
+       /* Only need to enable them once */
+       if (__get_cpu_var(pmcs_enabled))
+               return;
+--- a/arch/powerpc/platforms/pseries/setup.c
++++ b/arch/powerpc/platforms/pseries/setup.c
+@@ -223,10 +223,6 @@ static void pseries_lpar_enable_pmcs(voi
+       set = 1UL << 63;
+       reset = 0;
+       plpar_hcall_norets(H_PERFMON, set, reset);
+-
+-      /* instruct hypervisor to maintain PMCs */
+-      if (firmware_has_feature(FW_FEATURE_SPLPAR))
+-              get_lppaca()->pmcregs_in_use = 1;
+ }
+ static void __init pseries_discover_pic(void)
diff --git a/queue-2.6.31/powerpc-perf_counters-reduce-stack-usage-of-power_check_constraints.patch b/queue-2.6.31/powerpc-perf_counters-reduce-stack-usage-of-power_check_constraints.patch
new file mode 100644 (file)
index 0000000..20bf0a8
--- /dev/null
@@ -0,0 +1,180 @@
+From e51ee31e8af22948dcc3b115978469b09c96c3fd Mon Sep 17 00:00:00 2001
+From: Paul Mackerras <paulus@samba.org>
+Date: Wed, 9 Sep 2009 20:28:49 +0000
+Subject: powerpc/perf_counters: Reduce stack usage of power_check_constraints
+
+From: Paul Mackerras <paulus@samba.org>
+
+commit e51ee31e8af22948dcc3b115978469b09c96c3fd upstream.
+
+Michael Ellerman reported stack-frame size warnings being produced
+for power_check_constraints(), which uses an 8*8 array of u64 and
+two 8*8 arrays of unsigned long, which are currently allocated on the
+stack, along with some other smaller variables.  These arrays come
+to 1.5kB on 64-bit or 1kB on 32-bit, which is a bit too much for the
+stack.
+
+This fixes the problem by putting these arrays in the existing
+per-cpu cpu_hw_counters struct.  This is OK because two of the call
+sites have interrupts disabled already; for the third call site we
+use get_cpu_var, which disables preemption, so we know we won't
+get a context switch while we're in power_check_constraints().
+Note that power_check_constraints() can be called during context
+switch but is not called from interrupts.
+
+Reported-by: Michael Ellerman <michael@ellerman.id.au>
+Signed-off-by: Paul Mackerras <paulus@samba.org>
+Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ arch/powerpc/kernel/perf_counter.c |   55 +++++++++++++++++++++----------------
+ 1 file changed, 32 insertions(+), 23 deletions(-)
+
+--- a/arch/powerpc/kernel/perf_counter.c
++++ b/arch/powerpc/kernel/perf_counter.c
+@@ -32,6 +32,9 @@ struct cpu_hw_counters {
+       unsigned long mmcr[3];
+       struct perf_counter *limited_counter[MAX_LIMITED_HWCOUNTERS];
+       u8  limited_hwidx[MAX_LIMITED_HWCOUNTERS];
++      u64 alternatives[MAX_HWCOUNTERS][MAX_EVENT_ALTERNATIVES];
++      unsigned long amasks[MAX_HWCOUNTERS][MAX_EVENT_ALTERNATIVES];
++      unsigned long avalues[MAX_HWCOUNTERS][MAX_EVENT_ALTERNATIVES];
+ };
+ DEFINE_PER_CPU(struct cpu_hw_counters, cpu_hw_counters);
+@@ -245,13 +248,11 @@ static void write_pmc(int idx, unsigned 
+  * and see if any combination of alternative codes is feasible.
+  * The feasible set is returned in event[].
+  */
+-static int power_check_constraints(u64 event[], unsigned int cflags[],
++static int power_check_constraints(struct cpu_hw_counters *cpuhw,
++                                 u64 event[], unsigned int cflags[],
+                                  int n_ev)
+ {
+       unsigned long mask, value, nv;
+-      u64 alternatives[MAX_HWCOUNTERS][MAX_EVENT_ALTERNATIVES];
+-      unsigned long amasks[MAX_HWCOUNTERS][MAX_EVENT_ALTERNATIVES];
+-      unsigned long avalues[MAX_HWCOUNTERS][MAX_EVENT_ALTERNATIVES];
+       unsigned long smasks[MAX_HWCOUNTERS], svalues[MAX_HWCOUNTERS];
+       int n_alt[MAX_HWCOUNTERS], choice[MAX_HWCOUNTERS];
+       int i, j;
+@@ -266,21 +267,23 @@ static int power_check_constraints(u64 e
+               if ((cflags[i] & PPMU_LIMITED_PMC_REQD)
+                   && !ppmu->limited_pmc_event(event[i])) {
+                       ppmu->get_alternatives(event[i], cflags[i],
+-                                             alternatives[i]);
+-                      event[i] = alternatives[i][0];
++                                             cpuhw->alternatives[i]);
++                      event[i] = cpuhw->alternatives[i][0];
+               }
+-              if (ppmu->get_constraint(event[i], &amasks[i][0],
+-                                       &avalues[i][0]))
++              if (ppmu->get_constraint(event[i], &cpuhw->amasks[i][0],
++                                       &cpuhw->avalues[i][0]))
+                       return -1;
+       }
+       value = mask = 0;
+       for (i = 0; i < n_ev; ++i) {
+-              nv = (value | avalues[i][0]) + (value & avalues[i][0] & addf);
++              nv = (value | cpuhw->avalues[i][0]) +
++                      (value & cpuhw->avalues[i][0] & addf);
+               if ((((nv + tadd) ^ value) & mask) != 0 ||
+-                  (((nv + tadd) ^ avalues[i][0]) & amasks[i][0]) != 0)
++                  (((nv + tadd) ^ cpuhw->avalues[i][0]) &
++                   cpuhw->amasks[i][0]) != 0)
+                       break;
+               value = nv;
+-              mask |= amasks[i][0];
++              mask |= cpuhw->amasks[i][0];
+       }
+       if (i == n_ev)
+               return 0;       /* all OK */
+@@ -291,10 +294,11 @@ static int power_check_constraints(u64 e
+       for (i = 0; i < n_ev; ++i) {
+               choice[i] = 0;
+               n_alt[i] = ppmu->get_alternatives(event[i], cflags[i],
+-                                                alternatives[i]);
++                                                cpuhw->alternatives[i]);
+               for (j = 1; j < n_alt[i]; ++j)
+-                      ppmu->get_constraint(alternatives[i][j],
+-                                           &amasks[i][j], &avalues[i][j]);
++                      ppmu->get_constraint(cpuhw->alternatives[i][j],
++                                           &cpuhw->amasks[i][j],
++                                           &cpuhw->avalues[i][j]);
+       }
+       /* enumerate all possibilities and see if any will work */
+@@ -313,11 +317,11 @@ static int power_check_constraints(u64 e
+                * where k > j, will satisfy the constraints.
+                */
+               while (++j < n_alt[i]) {
+-                      nv = (value | avalues[i][j]) +
+-                              (value & avalues[i][j] & addf);
++                      nv = (value | cpuhw->avalues[i][j]) +
++                              (value & cpuhw->avalues[i][j] & addf);
+                       if ((((nv + tadd) ^ value) & mask) == 0 &&
+-                          (((nv + tadd) ^ avalues[i][j])
+-                           & amasks[i][j]) == 0)
++                          (((nv + tadd) ^ cpuhw->avalues[i][j])
++                           & cpuhw->amasks[i][j]) == 0)
+                               break;
+               }
+               if (j >= n_alt[i]) {
+@@ -339,7 +343,7 @@ static int power_check_constraints(u64 e
+                       svalues[i] = value;
+                       smasks[i] = mask;
+                       value = nv;
+-                      mask |= amasks[i][j];
++                      mask |= cpuhw->amasks[i][j];
+                       ++i;
+                       j = -1;
+               }
+@@ -347,7 +351,7 @@ static int power_check_constraints(u64 e
+       /* OK, we have a feasible combination, tell the caller the solution */
+       for (i = 0; i < n_ev; ++i)
+-              event[i] = alternatives[i][choice[i]];
++              event[i] = cpuhw->alternatives[i][choice[i]];
+       return 0;
+ }
+@@ -752,7 +756,7 @@ int hw_perf_group_sched_in(struct perf_c
+               return -EAGAIN;
+       if (check_excludes(cpuhw->counter, cpuhw->flags, n0, n))
+               return -EAGAIN;
+-      i = power_check_constraints(cpuhw->events, cpuhw->flags, n + n0);
++      i = power_check_constraints(cpuhw, cpuhw->events, cpuhw->flags, n + n0);
+       if (i < 0)
+               return -EAGAIN;
+       cpuhw->n_counters = n0 + n;
+@@ -807,7 +811,7 @@ static int power_pmu_enable(struct perf_
+       cpuhw->flags[n0] = counter->hw.counter_base;
+       if (check_excludes(cpuhw->counter, cpuhw->flags, n0, 1))
+               goto out;
+-      if (power_check_constraints(cpuhw->events, cpuhw->flags, n0 + 1))
++      if (power_check_constraints(cpuhw, cpuhw->events, cpuhw->flags, n0 + 1))
+               goto out;
+       counter->hw.config = cpuhw->events[n0];
+@@ -1012,6 +1016,7 @@ const struct pmu *hw_perf_counter_init(s
+       unsigned int cflags[MAX_HWCOUNTERS];
+       int n;
+       int err;
++      struct cpu_hw_counters *cpuhw;
+       if (!ppmu)
+               return ERR_PTR(-ENXIO);
+@@ -1090,7 +1095,11 @@ const struct pmu *hw_perf_counter_init(s
+       cflags[n] = flags;
+       if (check_excludes(ctrs, cflags, n, 1))
+               return ERR_PTR(-EINVAL);
+-      if (power_check_constraints(events, cflags, n + 1))
++
++      cpuhw = &get_cpu_var(cpu_hw_counters);
++      err = power_check_constraints(cpuhw, events, cflags, n + 1);
++      put_cpu_var(cpu_hw_counters);
++      if (err)
+               return ERR_PTR(-EINVAL);
+       counter->hw.config = events[n];
diff --git a/queue-2.6.31/powerpc-ps3-workaround-for-flash-memory-i-o-error.patch b/queue-2.6.31/powerpc-ps3-workaround-for-flash-memory-i-o-error.patch
new file mode 100644 (file)
index 0000000..029e317
--- /dev/null
@@ -0,0 +1,126 @@
+From bc00351edd5c1b84d48c3fdca740fedfce4ae6ce Mon Sep 17 00:00:00 2001
+From: Geoff Levand <geoffrey.levand@am.sony.com>
+Date: Wed, 9 Sep 2009 13:28:05 +0000
+Subject: powerpc/ps3: Workaround for flash memory I/O error
+
+From: Geoff Levand <geoffrey.levand@am.sony.com>
+
+commit bc00351edd5c1b84d48c3fdca740fedfce4ae6ce upstream.
+
+A workaround for flash memory I/O errors when the PS3 internal
+hard disk has not been formatted for OtherOS use.
+
+This error condition mainly effects 'Live CD' users who have not
+formatted the PS3's internal hard disk for OtherOS.
+
+Fixes errors similar to these when using the ps3-flash-util
+or ps3-boot-game-os programs:
+
+  ps3flash read failed 0x2050000
+  os_area_header_read: read error: os_area_header: Input/output error
+  main:627: os_area_read_hp error.
+  ERROR: can't change boot flag
+
+Signed-off-by: Geoff Levand <geoffrey.levand@am.sony.com>
+Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/ps3/ps3stor_lib.c |   65 +++++++++++++++++++++++++++++++++++++++++++---
+ 1 file changed, 62 insertions(+), 3 deletions(-)
+
+--- a/drivers/ps3/ps3stor_lib.c
++++ b/drivers/ps3/ps3stor_lib.c
+@@ -23,6 +23,65 @@
+ #include <asm/lv1call.h>
+ #include <asm/ps3stor.h>
++/*
++ * A workaround for flash memory I/O errors when the internal hard disk
++ * has not been formatted for OtherOS use.  Delay disk close until flash
++ * memory is closed.
++ */
++
++static struct ps3_flash_workaround {
++      int flash_open;
++      int disk_open;
++      struct ps3_system_bus_device *disk_sbd;
++} ps3_flash_workaround;
++
++static int ps3stor_open_hv_device(struct ps3_system_bus_device *sbd)
++{
++      int error = ps3_open_hv_device(sbd);
++
++      if (error)
++              return error;
++
++      if (sbd->match_id == PS3_MATCH_ID_STOR_FLASH)
++              ps3_flash_workaround.flash_open = 1;
++
++      if (sbd->match_id == PS3_MATCH_ID_STOR_DISK)
++              ps3_flash_workaround.disk_open = 1;
++
++      return 0;
++}
++
++static int ps3stor_close_hv_device(struct ps3_system_bus_device *sbd)
++{
++      int error;
++
++      if (sbd->match_id == PS3_MATCH_ID_STOR_DISK
++              && ps3_flash_workaround.disk_open
++              && ps3_flash_workaround.flash_open) {
++              ps3_flash_workaround.disk_sbd = sbd;
++              return 0;
++      }
++
++      error = ps3_close_hv_device(sbd);
++
++      if (error)
++              return error;
++
++      if (sbd->match_id == PS3_MATCH_ID_STOR_DISK)
++              ps3_flash_workaround.disk_open = 0;
++
++      if (sbd->match_id == PS3_MATCH_ID_STOR_FLASH) {
++              ps3_flash_workaround.flash_open = 0;
++
++              if (ps3_flash_workaround.disk_sbd) {
++                      ps3_close_hv_device(ps3_flash_workaround.disk_sbd);
++                      ps3_flash_workaround.disk_open = 0;
++                      ps3_flash_workaround.disk_sbd = NULL;
++              }
++      }
++
++      return 0;
++}
+ static int ps3stor_probe_access(struct ps3_storage_device *dev)
+ {
+@@ -90,7 +149,7 @@ int ps3stor_setup(struct ps3_storage_dev
+       int error, res, alignment;
+       enum ps3_dma_page_size page_size;
+-      error = ps3_open_hv_device(&dev->sbd);
++      error = ps3stor_open_hv_device(&dev->sbd);
+       if (error) {
+               dev_err(&dev->sbd.core,
+                       "%s:%u: ps3_open_hv_device failed %d\n", __func__,
+@@ -166,7 +225,7 @@ fail_free_irq:
+ fail_sb_event_receive_port_destroy:
+       ps3_sb_event_receive_port_destroy(&dev->sbd, dev->irq);
+ fail_close_device:
+-      ps3_close_hv_device(&dev->sbd);
++      ps3stor_close_hv_device(&dev->sbd);
+ fail:
+       return error;
+ }
+@@ -193,7 +252,7 @@ void ps3stor_teardown(struct ps3_storage
+                       "%s:%u: destroy event receive port failed %d\n",
+                       __func__, __LINE__, error);
+-      error = ps3_close_hv_device(&dev->sbd);
++      error = ps3stor_close_hv_device(&dev->sbd);
+       if (error)
+               dev_err(&dev->sbd.core,
+                       "%s:%u: ps3_close_hv_device failed %d\n", __func__,
index 177798e6c8d961b2acd4e233079d4e6701fdbb47..941e1864d61aacaded882a7b1e9141e39a7f04ed 100644 (file)
@@ -7,3 +7,15 @@ scsi-fix-oops-during-scsi-scanning.patch
 scsi-libsrp-fix-memory-leak-in-srp_ring_free.patch
 cfg80211-fix-looping-soft-lockup-in-find_ie.patch
 ath5k-write-pcu-registers-on-initial-reset.patch
+binfmt_elf-fix-pt_interp-bss-handling.patch
+tpm-fixup-boot-probe-timeout-for-tpm_tis-driver.patch
+md-fix-strchr-undefined.patch
+x86-amd-iommu-fix-broken-check-in-amd_iommu_flush_all_devices.patch
+fix-undefined-reference-to-user_shm_unlock.patch
+perf_counter-fix-buffer-overflow-in-perf_copy_attr.patch
+perf_counter-start-counting-time-enabled-when-group-leader-gets-enabled.patch
+powerpc-perf_counters-reduce-stack-usage-of-power_check_constraints.patch
+powerpc-fix-bug-where-perf_counters-breaks-oprofile.patch
+powerpc-ps3-workaround-for-flash-memory-i-o-error.patch
+block-don-t-assume-device-has-a-request-list-backing-in-nr_requests-store.patch
+agp-intel-remove-restore-in-resume.patch
diff --git a/queue-2.6.31/tpm-fixup-boot-probe-timeout-for-tpm_tis-driver.patch b/queue-2.6.31/tpm-fixup-boot-probe-timeout-for-tpm_tis-driver.patch
new file mode 100644 (file)
index 0000000..d699845
--- /dev/null
@@ -0,0 +1,55 @@
+From ec57935837a78f9661125b08a5d08b697568e040 Mon Sep 17 00:00:00 2001
+From: Jason Gunthorpe <jgunthorpe@obsidianresearch.com>
+Date: Wed, 9 Sep 2009 17:22:18 -0600
+Subject: TPM: Fixup boot probe timeout for tpm_tis driver
+
+From: Jason Gunthorpe <jgunthorpe@obsidianresearch.com>
+
+commit ec57935837a78f9661125b08a5d08b697568e040 upstream.
+
+When probing the device in tpm_tis_init the call request_locality
+uses timeout_a, which wasn't being initalized until after
+request_locality. This results in request_locality falsely timing
+out if the chip is still starting. Move the initialization to before
+request_locality.
+
+This probably only matters for embedded cases (ie mine), a BIOS likely
+gets the TPM into a state where this code path isn't necessary.
+
+Signed-off-by: Jason Gunthorpe <jgunthorpe@obsidianresearch.com>
+Acked-by: Rajiv Andrade <srajiv@linux.vnet.ibm.com>
+Signed-off-by: James Morris <jmorris@namei.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/char/tpm/tpm_tis.c |   12 ++++++------
+ 1 file changed, 6 insertions(+), 6 deletions(-)
+
+--- a/drivers/char/tpm/tpm_tis.c
++++ b/drivers/char/tpm/tpm_tis.c
+@@ -450,6 +450,12 @@ static int tpm_tis_init(struct device *d
+               goto out_err;
+       }
++      /* Default timeouts */
++      chip->vendor.timeout_a = msecs_to_jiffies(TIS_SHORT_TIMEOUT);
++      chip->vendor.timeout_b = msecs_to_jiffies(TIS_LONG_TIMEOUT);
++      chip->vendor.timeout_c = msecs_to_jiffies(TIS_SHORT_TIMEOUT);
++      chip->vendor.timeout_d = msecs_to_jiffies(TIS_SHORT_TIMEOUT);
++
+       if (request_locality(chip, 0) != 0) {
+               rc = -ENODEV;
+               goto out_err;
+@@ -457,12 +463,6 @@ static int tpm_tis_init(struct device *d
+       vendor = ioread32(chip->vendor.iobase + TPM_DID_VID(0));
+-      /* Default timeouts */
+-      chip->vendor.timeout_a = msecs_to_jiffies(TIS_SHORT_TIMEOUT);
+-      chip->vendor.timeout_b = msecs_to_jiffies(TIS_LONG_TIMEOUT);
+-      chip->vendor.timeout_c = msecs_to_jiffies(TIS_SHORT_TIMEOUT);
+-      chip->vendor.timeout_d = msecs_to_jiffies(TIS_SHORT_TIMEOUT);
+-
+       dev_info(dev,
+                "1.2 TPM (device-id 0x%X, rev-id %d)\n",
+                vendor >> 16, ioread8(chip->vendor.iobase + TPM_RID(0)));
diff --git a/queue-2.6.31/x86-amd-iommu-fix-broken-check-in-amd_iommu_flush_all_devices.patch b/queue-2.6.31/x86-amd-iommu-fix-broken-check-in-amd_iommu_flush_all_devices.patch
new file mode 100644 (file)
index 0000000..0cdbe9a
--- /dev/null
@@ -0,0 +1,31 @@
+From e0faf54ee82bf9c07f0307b4391caad4020bd659 Mon Sep 17 00:00:00 2001
+From: Joerg Roedel <joerg.roedel@amd.com>
+Date: Thu, 3 Sep 2009 15:45:51 +0200
+Subject: x86/amd-iommu: fix broken check in amd_iommu_flush_all_devices
+
+From: Joerg Roedel <joerg.roedel@amd.com>
+
+commit e0faf54ee82bf9c07f0307b4391caad4020bd659 upstream.
+
+The amd_iommu_pd_table is indexed by protection domain
+number and not by device id. So this check is broken and
+must be removed.
+
+Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ arch/x86/kernel/amd_iommu.c |    2 --
+ 1 file changed, 2 deletions(-)
+
+--- a/arch/x86/kernel/amd_iommu.c
++++ b/arch/x86/kernel/amd_iommu.c
+@@ -485,8 +485,6 @@ void amd_iommu_flush_all_devices(void)
+       int i;
+       for (i = 0; i <= amd_iommu_last_bdf; ++i) {
+-              if (amd_iommu_pd_table[i] == NULL)
+-                      continue;
+               iommu = amd_iommu_rlookup_table[i];
+               if (!iommu)