]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
.32 patches
authorGreg Kroah-Hartman <gregkh@suse.de>
Wed, 28 Jul 2010 00:32:48 +0000 (17:32 -0700)
committerGreg Kroah-Hartman <gregkh@suse.de>
Wed, 28 Jul 2010 00:32:48 +0000 (17:32 -0700)
13 files changed:
queue-2.6.32/amd64-agp-probe-unknown-agp-devices-the-right-way.patch [new file with mode: 0644]
queue-2.6.32/genirq-deal-with-desc-set_type-changing-desc-chip.patch [new file with mode: 0644]
queue-2.6.32/ipvs-add-missing-locking-during-connection-table-hashing-and-unhashing.patch [new file with mode: 0644]
queue-2.6.32/netfilter-ip6t_reject-fix-a-dst-leak-in-ipv6-reject.patch [new file with mode: 0644]
queue-2.6.32/perf-resurrect-flat-callchains.patch [new file with mode: 0644]
queue-2.6.32/sched-fix-over-scheduling-bug.patch [new file with mode: 0644]
queue-2.6.32/sched-prevent-compiler-from-optimising-the-sched_avg_update-loop.patch [new file with mode: 0644]
queue-2.6.32/scsi-aacraid-eliminate-use-after-free.patch [new file with mode: 0644]
queue-2.6.32/serial-cpm_uart-implement-the-cpm_uart_early_write-function-for-console-poll.patch [new file with mode: 0644]
queue-2.6.32/series
queue-2.6.32/x86-calgary-increase-max-phb-number.patch [new file with mode: 0644]
queue-2.6.32/x86-calgary-limit-the-max-phb-number-to-256.patch [new file with mode: 0644]
queue-2.6.32/x86-fix-vsyscall-on-gcc-4.5-with-os.patch [new file with mode: 0644]

diff --git a/queue-2.6.32/amd64-agp-probe-unknown-agp-devices-the-right-way.patch b/queue-2.6.32/amd64-agp-probe-unknown-agp-devices-the-right-way.patch
new file mode 100644 (file)
index 0000000..740e837
--- /dev/null
@@ -0,0 +1,90 @@
+From 6fd024893911dcb51b4a0aa71971db5ba38f7071 Mon Sep 17 00:00:00 2001
+From: Ben Hutchings <ben@decadent.org.uk>
+Date: Wed, 24 Mar 2010 03:36:31 +0000
+Subject: amd64-agp: Probe unknown AGP devices the right way
+
+From: Ben Hutchings <ben@decadent.org.uk>
+
+commit 6fd024893911dcb51b4a0aa71971db5ba38f7071 upstream.
+
+The current initialisation code probes 'unsupported' AGP devices
+simply by calling its own probe function.  It does not lock these
+devices or even check whether another driver is already bound to
+them.
+
+We must use the device core to manage this.  So if the specific
+device id table didn't match anything and agp_try_unsupported=1,
+switch the device id table and call driver_attach() again.
+
+Signed-off-by: Ben Hutchings <ben@decadent.org.uk>
+Signed-off-by: Dave Airlie <airlied@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/char/agp/amd64-agp.c |   27 +++++++++++++++------------
+ 1 file changed, 15 insertions(+), 12 deletions(-)
+
+--- a/drivers/char/agp/amd64-agp.c
++++ b/drivers/char/agp/amd64-agp.c
+@@ -499,6 +499,10 @@ static int __devinit agp_amd64_probe(str
+       u8 cap_ptr;
+       int err;
++      /* The Highlander principle */
++      if (agp_bridges_found)
++              return -ENODEV;
++
+       cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP);
+       if (!cap_ptr)
+               return -ENODEV;
+@@ -562,6 +566,8 @@ static void __devexit agp_amd64_remove(s
+                          amd64_aperture_sizes[bridge->aperture_size_idx].size);
+       agp_remove_bridge(bridge);
+       agp_put_bridge(bridge);
++
++      agp_bridges_found--;
+ }
+ #ifdef CONFIG_PM
+@@ -709,6 +715,11 @@ static struct pci_device_id agp_amd64_pc
+ MODULE_DEVICE_TABLE(pci, agp_amd64_pci_table);
++static DEFINE_PCI_DEVICE_TABLE(agp_amd64_pci_promisc_table) = {
++      { PCI_DEVICE_CLASS(0, 0) },
++      { }
++};
++
+ static struct pci_driver agp_amd64_pci_driver = {
+       .name           = "agpgart-amd64",
+       .id_table       = agp_amd64_pci_table,
+@@ -733,7 +744,6 @@ int __init agp_amd64_init(void)
+               return err;
+       if (agp_bridges_found == 0) {
+-              struct pci_dev *dev;
+               if (!agp_try_unsupported && !agp_try_unsupported_boot) {
+                       printk(KERN_INFO PFX "No supported AGP bridge found.\n");
+ #ifdef MODULE
+@@ -749,17 +759,10 @@ int __init agp_amd64_init(void)
+                       return -ENODEV;
+               /* Look for any AGP bridge */
+-              dev = NULL;
+-              err = -ENODEV;
+-              for_each_pci_dev(dev) {
+-                      if (!pci_find_capability(dev, PCI_CAP_ID_AGP))
+-                              continue;
+-                      /* Only one bridge supported right now */
+-                      if (agp_amd64_probe(dev, NULL) == 0) {
+-                              err = 0;
+-                              break;
+-                      }
+-              }
++              agp_amd64_pci_driver.id_table = agp_amd64_pci_promisc_table;
++              err = driver_attach(&agp_amd64_pci_driver.driver);
++              if (err == 0 && agp_bridges_found == 0)
++                      err = -ENODEV;
+       }
+       return err;
+ }
diff --git a/queue-2.6.32/genirq-deal-with-desc-set_type-changing-desc-chip.patch b/queue-2.6.32/genirq-deal-with-desc-set_type-changing-desc-chip.patch
new file mode 100644 (file)
index 0000000..8e139b9
--- /dev/null
@@ -0,0 +1,54 @@
+From 4673247562e39a17e09440fa1400819522ccd446 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Mon, 7 Jun 2010 17:53:51 +0200
+Subject: genirq: Deal with desc->set_type() changing desc->chip
+
+From: Thomas Gleixner <tglx@linutronix.de>
+
+commit 4673247562e39a17e09440fa1400819522ccd446 upstream.
+
+The set_type() function can change the chip implementation when the
+trigger mode changes. That might result in using an non-initialized
+irq chip when called from __setup_irq() or when called via
+set_irq_type() on an already enabled irq.
+
+The set_irq_type() function should not be called on an enabled irq,
+but because we forgot to put a check into it, we have a bunch of users
+which grew the habit of doing that and it never blew up as the
+function is serialized via desc->lock against all users of desc->chip
+and they never hit the non-initialized irq chip issue.
+
+The easy fix for the __setup_irq() issue would be to move the
+irq_chip_set_defaults(desc->chip) call after the trigger setting to
+make sure that a chip change is covered.
+
+But as we have already users, which do the type setting after
+request_irq(), the safe fix for now is to call irq_chip_set_defaults()
+from __irq_set_trigger() when desc->set_type() changed the irq chip.
+
+It needs a deeper analysis whether we should refuse to change the chip
+on an already enabled irq, but that'd be a large scale change to fix
+all the existing users. So that's neither stable nor 2.6.35 material.
+
+Reported-by: Esben Haabendal <eha@doredevelopment.dk>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
+Cc: linuxppc-dev <linuxppc-dev@ozlabs.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ kernel/irq/manage.c |    3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/kernel/irq/manage.c
++++ b/kernel/irq/manage.c
+@@ -436,6 +436,9 @@ int __irq_set_trigger(struct irq_desc *d
+               /* note that IRQF_TRIGGER_MASK == IRQ_TYPE_SENSE_MASK */
+               desc->status &= ~(IRQ_LEVEL | IRQ_TYPE_SENSE_MASK);
+               desc->status |= flags;
++
++              if (chip != desc->chip)
++                      irq_chip_set_defaults(desc->chip);
+       }
+       return ret;
diff --git a/queue-2.6.32/ipvs-add-missing-locking-during-connection-table-hashing-and-unhashing.patch b/queue-2.6.32/ipvs-add-missing-locking-during-connection-table-hashing-and-unhashing.patch
new file mode 100644 (file)
index 0000000..450ee30
--- /dev/null
@@ -0,0 +1,126 @@
+From aea9d711f3d68c656ad31ab578ecfb0bb5cd7f97 Mon Sep 17 00:00:00 2001
+From: Sven Wegener <sven.wegener@stealer.net>
+Date: Wed, 9 Jun 2010 16:10:57 +0200
+Subject: ipvs: Add missing locking during connection table hashing and unhashing
+
+From: Sven Wegener <sven.wegener@stealer.net>
+
+commit aea9d711f3d68c656ad31ab578ecfb0bb5cd7f97 upstream.
+
+The code that hashes and unhashes connections from the connection table
+is missing locking of the connection being modified, which opens up a
+race condition and results in memory corruption when this race condition
+is hit.
+
+Here is what happens in pretty verbose form:
+
+CPU 0                                  CPU 1
+------------                           ------------
+An active connection is terminated and
+we schedule ip_vs_conn_expire() on this
+CPU to expire this connection.
+
+                                       IRQ assignment is changed to this CPU,
+                                       but the expire timer stays scheduled on
+                                       the other CPU.
+
+                                       New connection from same ip:port comes
+                                       in right before the timer expires, we
+                                       find the inactive connection in our
+                                       connection table and get a reference to
+                                       it. We proper lock the connection in
+                                       tcp_state_transition() and read the
+                                       connection flags in set_tcp_state().
+
+ip_vs_conn_expire() gets called, we
+unhash the connection from our
+connection table and remove the hashed
+flag in ip_vs_conn_unhash(), without
+proper locking!
+
+                                       While still holding proper locks we
+                                       write the connection flags in
+                                       set_tcp_state() and this sets the hashed
+                                       flag again.
+
+ip_vs_conn_expire() fails to expire the
+connection, because the other CPU has
+incremented the reference count. We try
+to re-insert the connection into our
+connection table, but this fails in
+ip_vs_conn_hash(), because the hashed
+flag has been set by the other CPU. We
+re-schedule execution of
+ip_vs_conn_expire(). Now this connection
+has the hashed flag set, but isn't
+actually hashed in our connection table
+and has a dangling list_head.
+
+                                       We drop the reference we held on the
+                                       connection and schedule the expire timer
+                                       for timeouting the connection on this
+                                       CPU. Further packets won't be able to
+                                       find this connection in our connection
+                                       table.
+
+                                       ip_vs_conn_expire() gets called again,
+                                       we think it's already hashed, but the
+                                       list_head is dangling and while removing
+                                       the connection from our connection table
+                                       we write to the memory location where
+                                       this list_head points to.
+
+The result will probably be a kernel oops at some other point in time.
+
+This race condition is pretty subtle, but it can be triggered remotely.
+It needs the IRQ assignment change or another circumstance where packets
+coming from the same ip:port for the same service are being processed on
+different CPUs. And it involves hitting the exact time at which
+ip_vs_conn_expire() gets called. It can be avoided by making sure that
+all packets from one connection are always processed on the same CPU and
+can be made harder to exploit by changing the connection timeouts to
+some custom values.
+
+Signed-off-by: Sven Wegener <sven.wegener@stealer.net>
+Acked-by: Simon Horman <horms@verge.net.au>
+Signed-off-by: Patrick McHardy <kaber@trash.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ net/netfilter/ipvs/ip_vs_conn.c |    4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/net/netfilter/ipvs/ip_vs_conn.c
++++ b/net/netfilter/ipvs/ip_vs_conn.c
+@@ -146,6 +146,7 @@ static inline int ip_vs_conn_hash(struct
+       hash = ip_vs_conn_hashkey(cp->af, cp->protocol, &cp->caddr, cp->cport);
+       ct_write_lock(hash);
++      spin_lock(&cp->lock);
+       if (!(cp->flags & IP_VS_CONN_F_HASHED)) {
+               list_add(&cp->c_list, &ip_vs_conn_tab[hash]);
+@@ -158,6 +159,7 @@ static inline int ip_vs_conn_hash(struct
+               ret = 0;
+       }
++      spin_unlock(&cp->lock);
+       ct_write_unlock(hash);
+       return ret;
+@@ -177,6 +179,7 @@ static inline int ip_vs_conn_unhash(stru
+       hash = ip_vs_conn_hashkey(cp->af, cp->protocol, &cp->caddr, cp->cport);
+       ct_write_lock(hash);
++      spin_lock(&cp->lock);
+       if (cp->flags & IP_VS_CONN_F_HASHED) {
+               list_del(&cp->c_list);
+@@ -186,6 +189,7 @@ static inline int ip_vs_conn_unhash(stru
+       } else
+               ret = 0;
++      spin_unlock(&cp->lock);
+       ct_write_unlock(hash);
+       return ret;
diff --git a/queue-2.6.32/netfilter-ip6t_reject-fix-a-dst-leak-in-ipv6-reject.patch b/queue-2.6.32/netfilter-ip6t_reject-fix-a-dst-leak-in-ipv6-reject.patch
new file mode 100644 (file)
index 0000000..ac80a52
--- /dev/null
@@ -0,0 +1,38 @@
+From 499031ac8a3df6738f6186ded9da853e8ea18253 Mon Sep 17 00:00:00 2001
+From: Eric Dumazet <eric.dumazet@gmail.com>
+Date: Fri, 2 Jul 2010 10:05:01 +0200
+Subject: netfilter: ip6t_REJECT: fix a dst leak in ipv6 REJECT
+
+From: Eric Dumazet <eric.dumazet@gmail.com>
+
+commit 499031ac8a3df6738f6186ded9da853e8ea18253 upstream.
+
+We should release dst if dst->error is set.
+
+Bug introduced in 2.6.14 by commit e104411b82f5c
+([XFRM]: Always release dst_entry on error in xfrm_lookup)
+
+Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com>
+Signed-off-by: Patrick McHardy <kaber@trash.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ net/ipv6/netfilter/ip6t_REJECT.c |    6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+--- a/net/ipv6/netfilter/ip6t_REJECT.c
++++ b/net/ipv6/netfilter/ip6t_REJECT.c
+@@ -95,9 +95,11 @@ static void send_reset(struct net *net,
+       fl.fl_ip_dport = otcph.source;
+       security_skb_classify_flow(oldskb, &fl);
+       dst = ip6_route_output(net, NULL, &fl);
+-      if (dst == NULL)
++      if (dst == NULL || dst->error) {
++              dst_release(dst);
+               return;
+-      if (dst->error || xfrm_lookup(net, &dst, &fl, NULL, 0))
++      }
++      if (xfrm_lookup(net, &dst, &fl, NULL, 0))
+               return;
+       hh_len = (dst->dev->hard_header_len + 15)&~15;
diff --git a/queue-2.6.32/perf-resurrect-flat-callchains.patch b/queue-2.6.32/perf-resurrect-flat-callchains.patch
new file mode 100644 (file)
index 0000000..3b4a8cc
--- /dev/null
@@ -0,0 +1,45 @@
+From 97aa1052739c6a06cb6b0467dbf410613d20bc97 Mon Sep 17 00:00:00 2001
+From: Frederic Weisbecker <fweisbec@gmail.com>
+Date: Thu, 8 Jul 2010 06:06:17 +0200
+Subject: perf: Resurrect flat callchains
+
+From: Frederic Weisbecker <fweisbec@gmail.com>
+
+commit 97aa1052739c6a06cb6b0467dbf410613d20bc97 upstream.
+
+Initialize the callchain radix tree root correctly.
+
+When we walk through the parents, we must stop after the root, but
+since it wasn't well initialized, its parent pointer was random.
+
+Also the number of hits was random because uninitialized, hence it
+was part of the callchain while the root doesn't contain anything.
+
+This fixes segfaults and percentages followed by empty callchains
+while running:
+
+       perf report -g flat
+
+Reported-by: Ingo Molnar <mingo@elte.hu>
+Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
+Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
+Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
+Cc: Paul Mackerras <paulus@samba.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ tools/perf/util/callchain.h |    3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/tools/perf/util/callchain.h
++++ b/tools/perf/util/callchain.h
+@@ -48,6 +48,9 @@ static inline void callchain_init(struct
+       INIT_LIST_HEAD(&node->brothers);
+       INIT_LIST_HEAD(&node->children);
+       INIT_LIST_HEAD(&node->val);
++
++      node->parent = NULL;
++      node->hit = 0;
+ }
+ static inline u64 cumul_hits(struct callchain_node *node)
diff --git a/queue-2.6.32/sched-fix-over-scheduling-bug.patch b/queue-2.6.32/sched-fix-over-scheduling-bug.patch
new file mode 100644 (file)
index 0000000..f831997
--- /dev/null
@@ -0,0 +1,64 @@
+From 3c93717cfa51316e4dbb471e7c0f9d243359d5f8 Mon Sep 17 00:00:00 2001
+From: Alex,Shi <alex.shi@intel.com>
+Date: Thu, 17 Jun 2010 14:08:13 +0800
+Subject: sched: Fix over-scheduling bug
+
+From: Alex,Shi <alex.shi@intel.com>
+
+commit 3c93717cfa51316e4dbb471e7c0f9d243359d5f8 upstream.
+
+Commit e70971591 ("sched: Optimize unused cgroup configuration") introduced
+an imbalanced scheduling bug.
+
+If we do not use CGROUP, function update_h_load won't update h_load. When the
+system has a large number of tasks far more than logical CPU number, the
+incorrect cfs_rq[cpu]->h_load value will cause load_balance() to pull too
+many tasks to the local CPU from the busiest CPU. So the busiest CPU keeps
+going in a round robin. That will hurt performance.
+
+The issue was found originally by a scientific calculation workload that
+developed by Yanmin. With that commit, the workload performance drops
+about 40%.
+
+ CPU  before    after
+
+ 00   : 2       : 7
+ 01   : 1       : 7
+ 02   : 11      : 6
+ 03   : 12      : 7
+ 04   : 6       : 6
+ 05   : 11      : 7
+ 06   : 10      : 6
+ 07   : 12      : 7
+ 08   : 11      : 6
+ 09   : 12      : 6
+ 10   : 1       : 6
+ 11   : 1       : 6
+ 12   : 6       : 6
+ 13   : 2       : 6
+ 14   : 2       : 6
+ 15   : 1       : 6
+
+Reviewed-by: Yanmin zhang <yanmin.zhang@intel.com>
+Signed-off-by: Alex Shi <alex.shi@intel.com>
+Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
+LKML-Reference: <1276754893.9452.5442.camel@debian>
+Signed-off-by: Ingo Molnar <mingo@elte.hu>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ kernel/sched.c |    3 ---
+ 1 file changed, 3 deletions(-)
+
+--- a/kernel/sched.c
++++ b/kernel/sched.c
+@@ -1717,9 +1717,6 @@ static void update_shares_locked(struct
+ static void update_h_load(long cpu)
+ {
+-      if (root_task_group_empty())
+-              return;
+-
+       walk_tg_tree(tg_load_down, tg_nop, (void *)cpu);
+ }
diff --git a/queue-2.6.32/sched-prevent-compiler-from-optimising-the-sched_avg_update-loop.patch b/queue-2.6.32/sched-prevent-compiler-from-optimising-the-sched_avg_update-loop.patch
new file mode 100644 (file)
index 0000000..a08b211
--- /dev/null
@@ -0,0 +1,49 @@
+From 0d98bb2656e9bd2dfda2d089db1fe1dbdab41504 Mon Sep 17 00:00:00 2001
+From: Will Deacon <will.deacon@arm.com>
+Date: Mon, 24 May 2010 12:11:43 -0700
+Subject: sched: Prevent compiler from optimising the sched_avg_update() loop
+
+From: Will Deacon <will.deacon@arm.com>
+
+commit 0d98bb2656e9bd2dfda2d089db1fe1dbdab41504 upstream.
+
+GCC 4.4.1 on ARM has been observed to replace the while loop in
+sched_avg_update with a call to uldivmod, resulting in the
+following build failure at link-time:
+
+kernel/built-in.o: In function `sched_avg_update':
+ kernel/sched.c:1261: undefined reference to `__aeabi_uldivmod'
+ kernel/sched.c:1261: undefined reference to `__aeabi_uldivmod'
+make: *** [.tmp_vmlinux1] Error 1
+
+This patch introduces a fake data hazard to the loop body to
+prevent the compiler optimising the loop away.
+
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Acked-by: Peter Zijlstra <peterz@infradead.org>
+Cc: Catalin Marinas <catalin.marinas@arm.com>
+Cc: Russell King <rmk@arm.linux.org.uk>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Ingo Molnar <mingo@elte.hu>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ kernel/sched.c |    6 ++++++
+ 1 file changed, 6 insertions(+)
+
+--- a/kernel/sched.c
++++ b/kernel/sched.c
+@@ -1261,6 +1261,12 @@ static void sched_avg_update(struct rq *
+       s64 period = sched_avg_period();
+       while ((s64)(rq->clock - rq->age_stamp) > period) {
++              /*
++               * Inline assembly required to prevent the compiler
++               * optimising this loop into a divmod call.
++               * See __iter_div_u64_rem() for another example of this.
++               */
++              asm("" : "+rm" (rq->age_stamp));
+               rq->age_stamp += period;
+               rq->rt_avg /= 2;
+       }
diff --git a/queue-2.6.32/scsi-aacraid-eliminate-use-after-free.patch b/queue-2.6.32/scsi-aacraid-eliminate-use-after-free.patch
new file mode 100644 (file)
index 0000000..2d7acbb
--- /dev/null
@@ -0,0 +1,56 @@
+From 8a52da632ceb9d8b776494563df579e87b7b586b Mon Sep 17 00:00:00 2001
+From: Julia Lawall <julia@diku.dk>
+Date: Sat, 15 May 2010 11:46:12 +0200
+Subject: SCSI: aacraid: Eliminate use after free
+
+From: Julia Lawall <julia@diku.dk>
+
+commit 8a52da632ceb9d8b776494563df579e87b7b586b upstream.
+
+The debugging code using the freed structure is moved before the kfree.
+
+A simplified version of the semantic match that finds this problem is as
+follows: (http://coccinelle.lip6.fr/)
+
+// <smpl>
+@free@
+expression E;
+position p;
+@@
+kfree@p(E)
+
+@@
+expression free.E, subE<=free.E, E1;
+position free.p;
+@@
+
+  kfree@p(E)
+  ...
+(
+  subE = E1
+|
+* E
+)
+// </smpl>
+
+Signed-off-by: Julia Lawall <julia@diku.dk>
+Signed-off-by: James Bottomley <James.Bottomley@suse.de>
+
+---
+ drivers/scsi/aacraid/commctrl.c |    4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/scsi/aacraid/commctrl.c
++++ b/drivers/scsi/aacraid/commctrl.c
+@@ -655,9 +655,9 @@ static int aac_send_raw_srb(struct aac_d
+                               /* Does this really need to be GFP_DMA? */
+                               p = kmalloc(usg->sg[i].count,GFP_KERNEL|__GFP_DMA);
+                               if(!p) {
+-                                      kfree (usg);
+-                                      dprintk((KERN_DEBUG"aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
++                                      dprintk((KERN_DEBUG "aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
+                                         usg->sg[i].count,i,usg->count));
++                                      kfree(usg);
+                                       rcode = -ENOMEM;
+                                       goto cleanup;
+                               }
diff --git a/queue-2.6.32/serial-cpm_uart-implement-the-cpm_uart_early_write-function-for-console-poll.patch b/queue-2.6.32/serial-cpm_uart-implement-the-cpm_uart_early_write-function-for-console-poll.patch
new file mode 100644 (file)
index 0000000..117fd3e
--- /dev/null
@@ -0,0 +1,196 @@
+From 8cd774ad30c22b9d89823f1f05d845f4cdaba9e8 Mon Sep 17 00:00:00 2001
+From: Dongdong Deng <dongdong.deng@windriver.com>
+Date: Thu, 17 Jun 2010 11:13:40 +0800
+Subject: serial: cpm_uart: implement the cpm_uart_early_write() function for console poll
+
+From: Dongdong Deng <dongdong.deng@windriver.com>
+
+commit 8cd774ad30c22b9d89823f1f05d845f4cdaba9e8 upstream.
+
+The cpm_uart_early_write() function which was used for console poll
+isn't implemented in the cpm uart driver.
+
+Implementing this function both fixes the build when CONFIG_CONSOLE_POLL
+is set and allows kgdboc to work via the cpm uart.
+
+Signed-off-by: Dongdong Deng <dongdong.deng@windriver.com>
+Reviewed-by: Bruce Ashfield <bruce.ashfield@windriver.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/serial/cpm_uart/cpm_uart_core.c |  143 +++++++++++++++++---------------
+ 1 file changed, 79 insertions(+), 64 deletions(-)
+
+--- a/drivers/serial/cpm_uart/cpm_uart_core.c
++++ b/drivers/serial/cpm_uart/cpm_uart_core.c
+@@ -930,6 +930,83 @@ static void cpm_uart_config_port(struct
+       }
+ }
++#if defined(CONFIG_CONSOLE_POLL) || defined(CONFIG_SERIAL_CPM_CONSOLE)
++/*
++ * Write a string to the serial port
++ * Note that this is called with interrupts already disabled
++ */
++static void cpm_uart_early_write(struct uart_cpm_port *pinfo,
++              const char *string, u_int count)
++{
++      unsigned int i;
++      cbd_t __iomem *bdp, *bdbase;
++      unsigned char *cpm_outp_addr;
++
++      /* Get the address of the host memory buffer.
++       */
++      bdp = pinfo->tx_cur;
++      bdbase = pinfo->tx_bd_base;
++
++      /*
++       * Now, do each character.  This is not as bad as it looks
++       * since this is a holding FIFO and not a transmitting FIFO.
++       * We could add the complexity of filling the entire transmit
++       * buffer, but we would just wait longer between accesses......
++       */
++      for (i = 0; i < count; i++, string++) {
++              /* Wait for transmitter fifo to empty.
++               * Ready indicates output is ready, and xmt is doing
++               * that, not that it is ready for us to send.
++               */
++              while ((in_be16(&bdp->cbd_sc) & BD_SC_READY) != 0)
++                      ;
++
++              /* Send the character out.
++               * If the buffer address is in the CPM DPRAM, don't
++               * convert it.
++               */
++              cpm_outp_addr = cpm2cpu_addr(in_be32(&bdp->cbd_bufaddr),
++                                      pinfo);
++              *cpm_outp_addr = *string;
++
++              out_be16(&bdp->cbd_datlen, 1);
++              setbits16(&bdp->cbd_sc, BD_SC_READY);
++
++              if (in_be16(&bdp->cbd_sc) & BD_SC_WRAP)
++                      bdp = bdbase;
++              else
++                      bdp++;
++
++              /* if a LF, also do CR... */
++              if (*string == 10) {
++                      while ((in_be16(&bdp->cbd_sc) & BD_SC_READY) != 0)
++                              ;
++
++                      cpm_outp_addr = cpm2cpu_addr(in_be32(&bdp->cbd_bufaddr),
++                                              pinfo);
++                      *cpm_outp_addr = 13;
++
++                      out_be16(&bdp->cbd_datlen, 1);
++                      setbits16(&bdp->cbd_sc, BD_SC_READY);
++
++                      if (in_be16(&bdp->cbd_sc) & BD_SC_WRAP)
++                              bdp = bdbase;
++                      else
++                              bdp++;
++              }
++      }
++
++      /*
++       * Finally, Wait for transmitter & holding register to empty
++       *  and restore the IER
++       */
++      while ((in_be16(&bdp->cbd_sc) & BD_SC_READY) != 0)
++              ;
++
++      pinfo->tx_cur = bdp;
++}
++#endif
++
+ #ifdef CONFIG_CONSOLE_POLL
+ /* Serial polling routines for writing and reading from the uart while
+  * in an interrupt or debug context.
+@@ -999,7 +1076,7 @@ static void cpm_put_poll_char(struct uar
+       static char ch[2];
+       ch[0] = (char)c;
+-      cpm_uart_early_write(pinfo->port.line, ch, 1);
++      cpm_uart_early_write(pinfo, ch, 1);
+ }
+ #endif /* CONFIG_CONSOLE_POLL */
+@@ -1130,9 +1207,6 @@ static void cpm_uart_console_write(struc
+                                  u_int count)
+ {
+       struct uart_cpm_port *pinfo = &cpm_uart_ports[co->index];
+-      unsigned int i;
+-      cbd_t __iomem *bdp, *bdbase;
+-      unsigned char *cp;
+       unsigned long flags;
+       int nolock = oops_in_progress;
+@@ -1142,66 +1216,7 @@ static void cpm_uart_console_write(struc
+               spin_lock_irqsave(&pinfo->port.lock, flags);
+       }
+-      /* Get the address of the host memory buffer.
+-       */
+-      bdp = pinfo->tx_cur;
+-      bdbase = pinfo->tx_bd_base;
+-
+-      /*
+-       * Now, do each character.  This is not as bad as it looks
+-       * since this is a holding FIFO and not a transmitting FIFO.
+-       * We could add the complexity of filling the entire transmit
+-       * buffer, but we would just wait longer between accesses......
+-       */
+-      for (i = 0; i < count; i++, s++) {
+-              /* Wait for transmitter fifo to empty.
+-               * Ready indicates output is ready, and xmt is doing
+-               * that, not that it is ready for us to send.
+-               */
+-              while ((in_be16(&bdp->cbd_sc) & BD_SC_READY) != 0)
+-                      ;
+-
+-              /* Send the character out.
+-               * If the buffer address is in the CPM DPRAM, don't
+-               * convert it.
+-               */
+-              cp = cpm2cpu_addr(in_be32(&bdp->cbd_bufaddr), pinfo);
+-              *cp = *s;
+-
+-              out_be16(&bdp->cbd_datlen, 1);
+-              setbits16(&bdp->cbd_sc, BD_SC_READY);
+-
+-              if (in_be16(&bdp->cbd_sc) & BD_SC_WRAP)
+-                      bdp = bdbase;
+-              else
+-                      bdp++;
+-
+-              /* if a LF, also do CR... */
+-              if (*s == 10) {
+-                      while ((in_be16(&bdp->cbd_sc) & BD_SC_READY) != 0)
+-                              ;
+-
+-                      cp = cpm2cpu_addr(in_be32(&bdp->cbd_bufaddr), pinfo);
+-                      *cp = 13;
+-
+-                      out_be16(&bdp->cbd_datlen, 1);
+-                      setbits16(&bdp->cbd_sc, BD_SC_READY);
+-
+-                      if (in_be16(&bdp->cbd_sc) & BD_SC_WRAP)
+-                              bdp = bdbase;
+-                      else
+-                              bdp++;
+-              }
+-      }
+-
+-      /*
+-       * Finally, Wait for transmitter & holding register to empty
+-       *  and restore the IER
+-       */
+-      while ((in_be16(&bdp->cbd_sc) & BD_SC_READY) != 0)
+-              ;
+-
+-      pinfo->tx_cur = bdp;
++      cpm_uart_early_write(pinfo, s, count);
+       if (unlikely(nolock)) {
+               local_irq_restore(flags);
index 2a1ae79b1c33267d535c9e73a889cb20610d1ebd..66bc0386202a5b8d2b43619f0e4672386415ed70 100644 (file)
@@ -37,3 +37,15 @@ drm-radeon-r200-handle-more-hw-tex-coord-types.patch
 drm-radeon-kms-add-quirk-for-asus-hd-3600-board.patch
 drm-radeon-kms-fix-legacy-lvds-dpms-sequence.patch
 tpm_tis-fix-subsequent-suspend-failures.patch
+ipvs-add-missing-locking-during-connection-table-hashing-and-unhashing.patch
+netfilter-ip6t_reject-fix-a-dst-leak-in-ipv6-reject.patch
+scsi-aacraid-eliminate-use-after-free.patch
+amd64-agp-probe-unknown-agp-devices-the-right-way.patch
+perf-resurrect-flat-callchains.patch
+x86-fix-vsyscall-on-gcc-4.5-with-os.patch
+x86-calgary-increase-max-phb-number.patch
+x86-calgary-limit-the-max-phb-number-to-256.patch
+sched-prevent-compiler-from-optimising-the-sched_avg_update-loop.patch
+sched-fix-over-scheduling-bug.patch
+genirq-deal-with-desc-set_type-changing-desc-chip.patch
+serial-cpm_uart-implement-the-cpm_uart_early_write-function-for-console-poll.patch
diff --git a/queue-2.6.32/x86-calgary-increase-max-phb-number.patch b/queue-2.6.32/x86-calgary-increase-max-phb-number.patch
new file mode 100644 (file)
index 0000000..18b0d7c
--- /dev/null
@@ -0,0 +1,49 @@
+From 499a00e92dd9a75395081f595e681629eb1eebad Mon Sep 17 00:00:00 2001
+From: Darrick J. Wong <djwong@us.ibm.com>
+Date: Thu, 24 Jun 2010 14:26:47 -0700
+Subject: x86, Calgary: Increase max PHB number
+
+From: Darrick J. Wong <djwong@us.ibm.com>
+
+commit 499a00e92dd9a75395081f595e681629eb1eebad upstream.
+
+Newer systems (x3950M2) can have 48 PHBs per chassis and 8
+chassis, so bump the limits up and provide an explanation
+of the requirements for each class.
+
+Signed-off-by: Darrick J. Wong <djwong@us.ibm.com>
+Acked-by: Muli Ben-Yehuda <muli@il.ibm.com>
+Cc: Corinna Schultz <cschultz@linux.vnet.ibm.com>
+LKML-Reference: <20100624212647.GI15515@tux1.beaverton.ibm.com>
+[ v2: Fixed build bug, added back PHBS_PER_CALGARY == 4 ]
+Signed-off-by: Ingo Molnar <mingo@elte.hu>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ arch/x86/kernel/pci-calgary_64.c |   15 ++++++++++-----
+ 1 file changed, 10 insertions(+), 5 deletions(-)
+
+--- a/arch/x86/kernel/pci-calgary_64.c
++++ b/arch/x86/kernel/pci-calgary_64.c
+@@ -102,11 +102,16 @@ int use_calgary __read_mostly = 0;
+ #define PMR_SOFTSTOPFAULT     0x40000000
+ #define PMR_HARDSTOP          0x20000000
+-#define MAX_NUM_OF_PHBS               8 /* how many PHBs in total? */
+-#define MAX_NUM_CHASSIS               8 /* max number of chassis */
+-/* MAX_PHB_BUS_NUM is the maximal possible dev->bus->number */
+-#define MAX_PHB_BUS_NUM               (MAX_NUM_OF_PHBS * MAX_NUM_CHASSIS * 2)
+-#define PHBS_PER_CALGARY      4
++/*
++ * The maximum PHB bus number.
++ * x3950M2 (rare): 8 chassis, 48 PHBs per chassis = 384
++ * x3950M2: 4 chassis, 48 PHBs per chassis        = 192
++ * x3950 (PCIE): 8 chassis, 32 PHBs per chassis   = 256
++ * x3950 (PCIX): 8 chassis, 16 PHBs per chassis   = 128
++ */
++#define MAX_PHB_BUS_NUM               384
++
++#define PHBS_PER_CALGARY        4
+ /* register offsets in Calgary's internal register space */
+ static const unsigned long tar_offsets[] = {
diff --git a/queue-2.6.32/x86-calgary-limit-the-max-phb-number-to-256.patch b/queue-2.6.32/x86-calgary-limit-the-max-phb-number-to-256.patch
new file mode 100644 (file)
index 0000000..6d63dc3
--- /dev/null
@@ -0,0 +1,42 @@
+From d596043d71ff0d7b3d0bead19b1d68c55f003093 Mon Sep 17 00:00:00 2001
+From: Darrick J. Wong <djwong@us.ibm.com>
+Date: Wed, 30 Jun 2010 17:45:19 -0700
+Subject: x86, Calgary: Limit the max PHB number to 256
+
+From: Darrick J. Wong <djwong@us.ibm.com>
+
+commit d596043d71ff0d7b3d0bead19b1d68c55f003093 upstream.
+
+The x3950 family can have as many as 256 PCI buses in a single system, so
+change the limits to the maximum.  Since there can only be 256 PCI buses in one
+domain, we no longer need the BUG_ON check.
+
+Signed-off-by: Darrick J. Wong <djwong@us.ibm.com>
+LKML-Reference: <20100701004519.GQ15515@tux1.beaverton.ibm.com>
+Signed-off-by: H. Peter Anvin <hpa@zytor.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ arch/x86/kernel/pci-calgary_64.c |    4 +---
+ 1 file changed, 1 insertion(+), 3 deletions(-)
+
+--- a/arch/x86/kernel/pci-calgary_64.c
++++ b/arch/x86/kernel/pci-calgary_64.c
+@@ -109,7 +109,7 @@ int use_calgary __read_mostly = 0;
+  * x3950 (PCIE): 8 chassis, 32 PHBs per chassis   = 256
+  * x3950 (PCIX): 8 chassis, 16 PHBs per chassis   = 128
+  */
+-#define MAX_PHB_BUS_NUM               384
++#define MAX_PHB_BUS_NUM               256
+ #define PHBS_PER_CALGARY        4
+@@ -1058,8 +1058,6 @@ static int __init calgary_init_one(struc
+       struct iommu_table *tbl;
+       int ret;
+-      BUG_ON(dev->bus->number >= MAX_PHB_BUS_NUM);
+-
+       bbar = busno_to_bbar(dev->bus->number);
+       ret = calgary_setup_tar(dev, bbar);
+       if (ret)
diff --git a/queue-2.6.32/x86-fix-vsyscall-on-gcc-4.5-with-os.patch b/queue-2.6.32/x86-fix-vsyscall-on-gcc-4.5-with-os.patch
new file mode 100644 (file)
index 0000000..fa9ca3c
--- /dev/null
@@ -0,0 +1,36 @@
+From 124482935fb7fb9303c8a8ab930149c6a93d9910 Mon Sep 17 00:00:00 2001
+From: Andi Kleen <andi@firstfloor.org>
+Date: Fri, 18 Jun 2010 23:09:00 +0200
+Subject: x86: Fix vsyscall on gcc 4.5 with -Os
+
+From: Andi Kleen <andi@firstfloor.org>
+
+commit 124482935fb7fb9303c8a8ab930149c6a93d9910 upstream.
+
+This fixes the -Os breaks with gcc 4.5 bug.  rdtsc_barrier needs to be
+force inlined, otherwise user space will jump into kernel space and
+kill init.
+
+This also addresses http://gcc.gnu.org/bugzilla/show_bug.cgi?id=44129
+I believe.
+
+Signed-off-by: Andi Kleen <ak@linux.intel.com>
+LKML-Reference: <20100618210859.GA10913@basil.fritz.box>
+Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ arch/x86/include/asm/system.h |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/x86/include/asm/system.h
++++ b/arch/x86/include/asm/system.h
+@@ -449,7 +449,7 @@ void stop_this_cpu(void *dummy);
+  *
+  * (Could use an alternative three way for this if there was one.)
+  */
+-static inline void rdtsc_barrier(void)
++static __always_inline void rdtsc_barrier(void)
+ {
+       alternative(ASM_NOP3, "mfence", X86_FEATURE_MFENCE_RDTSC);
+       alternative(ASM_NOP3, "lfence", X86_FEATURE_LFENCE_RDTSC);