--- /dev/null
+From 6fd024893911dcb51b4a0aa71971db5ba38f7071 Mon Sep 17 00:00:00 2001
+From: Ben Hutchings <ben@decadent.org.uk>
+Date: Wed, 24 Mar 2010 03:36:31 +0000
+Subject: amd64-agp: Probe unknown AGP devices the right way
+
+From: Ben Hutchings <ben@decadent.org.uk>
+
+commit 6fd024893911dcb51b4a0aa71971db5ba38f7071 upstream.
+
+The current initialisation code probes 'unsupported' AGP devices
+simply by calling its own probe function. It does not lock these
+devices or even check whether another driver is already bound to
+them.
+
+We must use the device core to manage this. So if the specific
+device id table didn't match anything and agp_try_unsupported=1,
+switch the device id table and call driver_attach() again.
+
+Signed-off-by: Ben Hutchings <ben@decadent.org.uk>
+Signed-off-by: Dave Airlie <airlied@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/char/agp/amd64-agp.c | 27 +++++++++++++++------------
+ 1 file changed, 15 insertions(+), 12 deletions(-)
+
+--- a/drivers/char/agp/amd64-agp.c
++++ b/drivers/char/agp/amd64-agp.c
+@@ -499,6 +499,10 @@ static int __devinit agp_amd64_probe(str
+ u8 cap_ptr;
+ int err;
+
++ /* The Highlander principle */
++ if (agp_bridges_found)
++ return -ENODEV;
++
+ cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP);
+ if (!cap_ptr)
+ return -ENODEV;
+@@ -562,6 +566,8 @@ static void __devexit agp_amd64_remove(s
+ amd64_aperture_sizes[bridge->aperture_size_idx].size);
+ agp_remove_bridge(bridge);
+ agp_put_bridge(bridge);
++
++ agp_bridges_found--;
+ }
+
+ #ifdef CONFIG_PM
+@@ -709,6 +715,11 @@ static struct pci_device_id agp_amd64_pc
+
+ MODULE_DEVICE_TABLE(pci, agp_amd64_pci_table);
+
++static DEFINE_PCI_DEVICE_TABLE(agp_amd64_pci_promisc_table) = {
++ { PCI_DEVICE_CLASS(0, 0) },
++ { }
++};
++
+ static struct pci_driver agp_amd64_pci_driver = {
+ .name = "agpgart-amd64",
+ .id_table = agp_amd64_pci_table,
+@@ -734,7 +745,6 @@ int __init agp_amd64_init(void)
+ return err;
+
+ if (agp_bridges_found == 0) {
+- struct pci_dev *dev;
+ if (!agp_try_unsupported && !agp_try_unsupported_boot) {
+ printk(KERN_INFO PFX "No supported AGP bridge found.\n");
+ #ifdef MODULE
+@@ -750,17 +760,10 @@ int __init agp_amd64_init(void)
+ return -ENODEV;
+
+ /* Look for any AGP bridge */
+- dev = NULL;
+- err = -ENODEV;
+- for_each_pci_dev(dev) {
+- if (!pci_find_capability(dev, PCI_CAP_ID_AGP))
+- continue;
+- /* Only one bridge supported right now */
+- if (agp_amd64_probe(dev, NULL) == 0) {
+- err = 0;
+- break;
+- }
+- }
++ agp_amd64_pci_driver.id_table = agp_amd64_pci_promisc_table;
++ err = driver_attach(&agp_amd64_pci_driver.driver);
++ if (err == 0 && agp_bridges_found == 0)
++ err = -ENODEV;
+ }
+ return err;
+ }
--- /dev/null
+From 41c310447fe06bcedc22b75752c18b60e0b9521b Mon Sep 17 00:00:00 2001
+From: Borislav Petkov <borislav.petkov@amd.com>
+Date: Fri, 2 Jul 2010 17:02:43 +0200
+Subject: amd64_edac: Fix syndrome calculation on K8
+
+From: Borislav Petkov <borislav.petkov@amd.com>
+
+commit 41c310447fe06bcedc22b75752c18b60e0b9521b upstream.
+
+When calculating the DCT channel from the syndrome we need to know the
+syndrome type (x4 vs x8). On F10h, this is read out from extended PCI
+cfg space register F3x180 while on K8 we only support x4 syndromes and
+don't have extended PCI config space anyway.
+
+Make the code accessing F3x180 F10h only and fall back to x4 syndromes
+on everything else.
+
+Reported-by: Jeffrey Merkey <jeffmerkey@gmail.com>
+Signed-off-by: Borislav Petkov <borislav.petkov@amd.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/edac/amd64_edac.c | 24 ++++++++++++------------
+ 1 file changed, 12 insertions(+), 12 deletions(-)
+
+--- a/drivers/edac/amd64_edac.c
++++ b/drivers/edac/amd64_edac.c
+@@ -1958,20 +1958,20 @@ static int get_channel_from_ecc_syndrome
+ u32 value = 0;
+ int err_sym = 0;
+
+- amd64_read_pci_cfg(pvt->misc_f3_ctl, 0x180, &value);
++ if (boot_cpu_data.x86 == 0x10) {
+
+- /* F3x180[EccSymbolSize]=1, x8 symbols */
+- if (boot_cpu_data.x86 == 0x10 &&
+- boot_cpu_data.x86_model > 7 &&
+- value & BIT(25)) {
+- err_sym = decode_syndrome(syndrome, x8_vectors,
+- ARRAY_SIZE(x8_vectors), 8);
+- return map_err_sym_to_channel(err_sym, 8);
+- } else {
+- err_sym = decode_syndrome(syndrome, x4_vectors,
+- ARRAY_SIZE(x4_vectors), 4);
+- return map_err_sym_to_channel(err_sym, 4);
++ amd64_read_pci_cfg(pvt->misc_f3_ctl, 0x180, &value);
++
++ /* F3x180[EccSymbolSize]=1 => x8 symbols */
++ if (boot_cpu_data.x86_model > 7 &&
++ value & BIT(25)) {
++ err_sym = decode_syndrome(syndrome, x8_vectors,
++ ARRAY_SIZE(x8_vectors), 8);
++ return map_err_sym_to_channel(err_sym, 8);
++ }
+ }
++ err_sym = decode_syndrome(syndrome, x4_vectors, ARRAY_SIZE(x4_vectors), 4);
++ return map_err_sym_to_channel(err_sym, 4);
+ }
+
+ /*
--- /dev/null
+From c10b61f0910466b4b99c266a7d76ac4390743fb5 Mon Sep 17 00:00:00 2001
+From: Jeff Moyer <jmoyer@redhat.com>
+Date: Thu, 17 Jun 2010 10:19:11 -0400
+Subject: cfq: Don't allow queue merges for queues that have no process references
+
+From: Jeff Moyer <jmoyer@redhat.com>
+
+commit c10b61f0910466b4b99c266a7d76ac4390743fb5 upstream.
+
+Hi,
+
+A user reported a kernel bug when running a particular program that did
+the following:
+
+created 32 threads
+- each thread took a mutex, grabbed a global offset, added a buffer size
+ to that offset, released the lock
+- read from the given offset in the file
+- created a new thread to do the same
+- exited
+
+The result is that cfq's close cooperator logic would trigger, as the
+threads were issuing I/O within the mean seek distance of one another.
+This workload managed to routinely trigger a use after free bug when
+walking the list of merge candidates for a particular cfqq
+(cfqq->new_cfqq). The logic used for merging queues looks like this:
+
+static void cfq_setup_merge(struct cfq_queue *cfqq, struct cfq_queue *new_cfqq)
+{
+ int process_refs, new_process_refs;
+ struct cfq_queue *__cfqq;
+
+ /* Avoid a circular list and skip interim queue merges */
+ while ((__cfqq = new_cfqq->new_cfqq)) {
+ if (__cfqq == cfqq)
+ return;
+ new_cfqq = __cfqq;
+ }
+
+ process_refs = cfqq_process_refs(cfqq);
+ /*
+ * If the process for the cfqq has gone away, there is no
+ * sense in merging the queues.
+ */
+ if (process_refs == 0)
+ return;
+
+ /*
+ * Merge in the direction of the lesser amount of work.
+ */
+ new_process_refs = cfqq_process_refs(new_cfqq);
+ if (new_process_refs >= process_refs) {
+ cfqq->new_cfqq = new_cfqq;
+ atomic_add(process_refs, &new_cfqq->ref);
+ } else {
+ new_cfqq->new_cfqq = cfqq;
+ atomic_add(new_process_refs, &cfqq->ref);
+ }
+}
+
+When a merge candidate is found, we add the process references for the
+queue with less references to the queue with more. The actual merging
+of queues happens when a new request is issued for a given cfqq. In the
+case of the test program, it only does a single pread call to read in
+1MB, so the actual merge never happens.
+
+Normally, this is fine, as when the queue exits, we simply drop the
+references we took on the other cfqqs in the merge chain:
+
+ /*
+ * If this queue was scheduled to merge with another queue, be
+ * sure to drop the reference taken on that queue (and others in
+ * the merge chain). See cfq_setup_merge and cfq_merge_cfqqs.
+ */
+ __cfqq = cfqq->new_cfqq;
+ while (__cfqq) {
+ if (__cfqq == cfqq) {
+ WARN(1, "cfqq->new_cfqq loop detected\n");
+ break;
+ }
+ next = __cfqq->new_cfqq;
+ cfq_put_queue(__cfqq);
+ __cfqq = next;
+ }
+
+However, there is a hole in this logic. Consider the following (and
+keep in mind that each I/O keeps a reference to the cfqq):
+
+q1->new_cfqq = q2 // q2 now has 2 process references
+q3->new_cfqq = q2 // q2 now has 3 process references
+
+// the process associated with q2 exits
+// q2 now has 2 process references
+
+// queue 1 exits, drops its reference on q2
+// q2 now has 1 process reference
+
+// q3 exits, so has 0 process references, and hence drops its references
+// to q2, which leaves q2 also with 0 process references
+
+q4 comes along and wants to merge with q3
+
+q3->new_cfqq still points at q2! We follow that link and end up at an
+already freed cfqq.
+
+So, the fix is to not follow a merge chain if the top-most queue does
+not have a process reference, otherwise any queue in the chain could be
+already freed. I also changed the logic to disallow merging with a
+queue that does not have any process references. Previously, we did
+this check for one of the merge candidates, but not the other. That
+doesn't really make sense.
+
+Without the attached patch, my system would BUG within a couple of
+seconds of running the reproducer program. With the patch applied, my
+system ran the program for over an hour without issues.
+
+This addresses the following bugzilla:
+ https://bugzilla.kernel.org/show_bug.cgi?id=16217
+
+Thanks a ton to Phil Carns for providing the bug report and an excellent
+reproducer.
+
+[ Note for stable: this applies to 2.6.32/33/34 ].
+
+Signed-off-by: Jeff Moyer <jmoyer@redhat.com>
+Reported-by: Phil Carns <carns@mcs.anl.gov>
+Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ block/cfq-iosched.c | 13 +++++++++++--
+ 1 file changed, 11 insertions(+), 2 deletions(-)
+
+--- a/block/cfq-iosched.c
++++ b/block/cfq-iosched.c
+@@ -1934,6 +1934,15 @@ static void cfq_setup_merge(struct cfq_q
+ int process_refs, new_process_refs;
+ struct cfq_queue *__cfqq;
+
++ /*
++ * If there are no process references on the new_cfqq, then it is
++ * unsafe to follow the ->new_cfqq chain as other cfqq's in the
++ * chain may have dropped their last reference (not just their
++ * last process reference).
++ */
++ if (!cfqq_process_refs(new_cfqq))
++ return;
++
+ /* Avoid a circular list and skip interim queue merges */
+ while ((__cfqq = new_cfqq->new_cfqq)) {
+ if (__cfqq == cfqq)
+@@ -1942,17 +1951,17 @@ static void cfq_setup_merge(struct cfq_q
+ }
+
+ process_refs = cfqq_process_refs(cfqq);
++ new_process_refs = cfqq_process_refs(new_cfqq);
+ /*
+ * If the process for the cfqq has gone away, there is no
+ * sense in merging the queues.
+ */
+- if (process_refs == 0)
++ if (process_refs == 0 || new_process_refs == 0)
+ return;
+
+ /*
+ * Merge in the direction of the lesser amount of work.
+ */
+- new_process_refs = cfqq_process_refs(new_cfqq);
+ if (new_process_refs >= process_refs) {
+ cfqq->new_cfqq = new_cfqq;
+ atomic_add(process_refs, &new_cfqq->ref);
--- /dev/null
+From 4673247562e39a17e09440fa1400819522ccd446 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Mon, 7 Jun 2010 17:53:51 +0200
+Subject: genirq: Deal with desc->set_type() changing desc->chip
+
+From: Thomas Gleixner <tglx@linutronix.de>
+
+commit 4673247562e39a17e09440fa1400819522ccd446 upstream.
+
+The set_type() function can change the chip implementation when the
+trigger mode changes. That might result in using an non-initialized
+irq chip when called from __setup_irq() or when called via
+set_irq_type() on an already enabled irq.
+
+The set_irq_type() function should not be called on an enabled irq,
+but because we forgot to put a check into it, we have a bunch of users
+which grew the habit of doing that and it never blew up as the
+function is serialized via desc->lock against all users of desc->chip
+and they never hit the non-initialized irq chip issue.
+
+The easy fix for the __setup_irq() issue would be to move the
+irq_chip_set_defaults(desc->chip) call after the trigger setting to
+make sure that a chip change is covered.
+
+But as we have already users, which do the type setting after
+request_irq(), the safe fix for now is to call irq_chip_set_defaults()
+from __irq_set_trigger() when desc->set_type() changed the irq chip.
+
+It needs a deeper analysis whether we should refuse to change the chip
+on an already enabled irq, but that'd be a large scale change to fix
+all the existing users. So that's neither stable nor 2.6.35 material.
+
+Reported-by: Esben Haabendal <eha@doredevelopment.dk>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
+Cc: linuxppc-dev <linuxppc-dev@ozlabs.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ kernel/irq/manage.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/kernel/irq/manage.c
++++ b/kernel/irq/manage.c
+@@ -436,6 +436,9 @@ int __irq_set_trigger(struct irq_desc *d
+ /* note that IRQF_TRIGGER_MASK == IRQ_TYPE_SENSE_MASK */
+ desc->status &= ~(IRQ_LEVEL | IRQ_TYPE_SENSE_MASK);
+ desc->status |= flags;
++
++ if (chip != desc->chip)
++ irq_chip_set_defaults(desc->chip);
+ }
+
+ return ret;
--- /dev/null
+From 9f888160bdcccf0565dd2774956b8d9456e610be Mon Sep 17 00:00:00 2001
+From: stephen hemminger <shemminger@vyatta.com>
+Date: Mon, 21 Jun 2010 11:00:13 +0000
+Subject: ipv6: fix NULL reference in proxy neighbor discovery
+
+From: stephen hemminger <shemminger@vyatta.com>
+
+commit 9f888160bdcccf0565dd2774956b8d9456e610be upstream.
+
+The addition of TLLAO option created a kernel OOPS regression
+for the case where neighbor advertisement is being sent via
+proxy path. When using proxy, ipv6_get_ifaddr() returns NULL
+causing the NULL dereference.
+
+Change causing the bug was:
+commit f7734fdf61ec6bb848e0bafc1fb8bad2c124bb50
+Author: Octavian Purdila <opurdila@ixiacom.com>
+Date: Fri Oct 2 11:39:15 2009 +0000
+
+ make TLLAO option for NA packets configurable
+
+Signed-off-by: Stephen Hemminger <shemminger@vyatta.com>
+Acked-by: YOSHIFUJI Hideaki <yoshfuji@linux-ipv6.org>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ net/ipv6/ndisc.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/net/ipv6/ndisc.c
++++ b/net/ipv6/ndisc.c
+@@ -585,6 +585,7 @@ static void ndisc_send_na(struct net_dev
+ src_addr = solicited_addr;
+ if (ifp->flags & IFA_F_OPTIMISTIC)
+ override = 0;
++ inc_opt |= ifp->idev->cnf.force_tllao;
+ in6_ifa_put(ifp);
+ } else {
+ if (ipv6_dev_get_saddr(dev_net(dev), dev, daddr,
+@@ -598,7 +599,6 @@ static void ndisc_send_na(struct net_dev
+ icmp6h.icmp6_solicited = solicited;
+ icmp6h.icmp6_override = override;
+
+- inc_opt |= ifp->idev->cnf.force_tllao;
+ __ndisc_send(dev, neigh, daddr, src_addr,
+ &icmp6h, solicited_addr,
+ inc_opt ? ND_OPT_TARGET_LL_ADDR : 0);
--- /dev/null
+From aea9d711f3d68c656ad31ab578ecfb0bb5cd7f97 Mon Sep 17 00:00:00 2001
+From: Sven Wegener <sven.wegener@stealer.net>
+Date: Wed, 9 Jun 2010 16:10:57 +0200
+Subject: ipvs: Add missing locking during connection table hashing and unhashing
+
+From: Sven Wegener <sven.wegener@stealer.net>
+
+commit aea9d711f3d68c656ad31ab578ecfb0bb5cd7f97 upstream.
+
+The code that hashes and unhashes connections from the connection table
+is missing locking of the connection being modified, which opens up a
+race condition and results in memory corruption when this race condition
+is hit.
+
+Here is what happens in pretty verbose form:
+
+CPU 0 CPU 1
+------------ ------------
+An active connection is terminated and
+we schedule ip_vs_conn_expire() on this
+CPU to expire this connection.
+
+ IRQ assignment is changed to this CPU,
+ but the expire timer stays scheduled on
+ the other CPU.
+
+ New connection from same ip:port comes
+ in right before the timer expires, we
+ find the inactive connection in our
+ connection table and get a reference to
+ it. We proper lock the connection in
+ tcp_state_transition() and read the
+ connection flags in set_tcp_state().
+
+ip_vs_conn_expire() gets called, we
+unhash the connection from our
+connection table and remove the hashed
+flag in ip_vs_conn_unhash(), without
+proper locking!
+
+ While still holding proper locks we
+ write the connection flags in
+ set_tcp_state() and this sets the hashed
+ flag again.
+
+ip_vs_conn_expire() fails to expire the
+connection, because the other CPU has
+incremented the reference count. We try
+to re-insert the connection into our
+connection table, but this fails in
+ip_vs_conn_hash(), because the hashed
+flag has been set by the other CPU. We
+re-schedule execution of
+ip_vs_conn_expire(). Now this connection
+has the hashed flag set, but isn't
+actually hashed in our connection table
+and has a dangling list_head.
+
+ We drop the reference we held on the
+ connection and schedule the expire timer
+ for timeouting the connection on this
+ CPU. Further packets won't be able to
+ find this connection in our connection
+ table.
+
+ ip_vs_conn_expire() gets called again,
+ we think it's already hashed, but the
+ list_head is dangling and while removing
+ the connection from our connection table
+ we write to the memory location where
+ this list_head points to.
+
+The result will probably be a kernel oops at some other point in time.
+
+This race condition is pretty subtle, but it can be triggered remotely.
+It needs the IRQ assignment change or another circumstance where packets
+coming from the same ip:port for the same service are being processed on
+different CPUs. And it involves hitting the exact time at which
+ip_vs_conn_expire() gets called. It can be avoided by making sure that
+all packets from one connection are always processed on the same CPU and
+can be made harder to exploit by changing the connection timeouts to
+some custom values.
+
+Signed-off-by: Sven Wegener <sven.wegener@stealer.net>
+Acked-by: Simon Horman <horms@verge.net.au>
+Signed-off-by: Patrick McHardy <kaber@trash.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ net/netfilter/ipvs/ip_vs_conn.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/net/netfilter/ipvs/ip_vs_conn.c
++++ b/net/netfilter/ipvs/ip_vs_conn.c
+@@ -146,6 +146,7 @@ static inline int ip_vs_conn_hash(struct
+ hash = ip_vs_conn_hashkey(cp->af, cp->protocol, &cp->caddr, cp->cport);
+
+ ct_write_lock(hash);
++ spin_lock(&cp->lock);
+
+ if (!(cp->flags & IP_VS_CONN_F_HASHED)) {
+ list_add(&cp->c_list, &ip_vs_conn_tab[hash]);
+@@ -158,6 +159,7 @@ static inline int ip_vs_conn_hash(struct
+ ret = 0;
+ }
+
++ spin_unlock(&cp->lock);
+ ct_write_unlock(hash);
+
+ return ret;
+@@ -177,6 +179,7 @@ static inline int ip_vs_conn_unhash(stru
+ hash = ip_vs_conn_hashkey(cp->af, cp->protocol, &cp->caddr, cp->cport);
+
+ ct_write_lock(hash);
++ spin_lock(&cp->lock);
+
+ if (cp->flags & IP_VS_CONN_F_HASHED) {
+ list_del(&cp->c_list);
+@@ -186,6 +189,7 @@ static inline int ip_vs_conn_unhash(stru
+ } else
+ ret = 0;
+
++ spin_unlock(&cp->lock);
+ ct_write_unlock(hash);
+
+ return ret;
--- /dev/null
+From 0544a21db02c1d8883158fd6f323364f830a120a Mon Sep 17 00:00:00 2001
+From: Prasanna S. Panchamukhi <prasanna.panchamukhi@riverbed.com>
+Date: Thu, 24 Jun 2010 13:31:03 +1000
+Subject: md: raid10: Fix null pointer dereference in fix_read_error()
+
+From: Prasanna S. Panchamukhi <prasanna.panchamukhi@riverbed.com>
+
+commit 0544a21db02c1d8883158fd6f323364f830a120a upstream.
+
+Such NULL pointer dereference can occur when the driver was fixing the
+read errors/bad blocks and the disk was physically removed
+causing a system crash. This patch check if the
+rcu_dereference() returns valid rdev before accessing it in fix_read_error().
+
+Signed-off-by: Prasanna S. Panchamukhi <prasanna.panchamukhi@riverbed.com>
+Signed-off-by: Rob Becker <rbecker@riverbed.com>
+Signed-off-by: NeilBrown <neilb@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/md/raid10.c | 12 ++++++------
+ 1 file changed, 6 insertions(+), 6 deletions(-)
+
+--- a/drivers/md/raid10.c
++++ b/drivers/md/raid10.c
+@@ -1486,14 +1486,14 @@ static void fix_read_error(conf_t *conf,
+ int sectors = r10_bio->sectors;
+ mdk_rdev_t*rdev;
+ int max_read_errors = atomic_read(&mddev->max_corr_read_errors);
++ int d = r10_bio->devs[r10_bio->read_slot].devnum;
+
+ rcu_read_lock();
+- {
+- int d = r10_bio->devs[r10_bio->read_slot].devnum;
++ rdev = rcu_dereference(conf->mirrors[d].rdev);
++ if (rdev) { /* If rdev is not NULL */
+ char b[BDEVNAME_SIZE];
+ int cur_read_error_count = 0;
+
+- rdev = rcu_dereference(conf->mirrors[d].rdev);
+ bdevname(rdev->bdev, b);
+
+ if (test_bit(Faulty, &rdev->flags)) {
+@@ -1533,7 +1533,7 @@ static void fix_read_error(conf_t *conf,
+
+ rcu_read_lock();
+ do {
+- int d = r10_bio->devs[sl].devnum;
++ d = r10_bio->devs[sl].devnum;
+ rdev = rcu_dereference(conf->mirrors[d].rdev);
+ if (rdev &&
+ test_bit(In_sync, &rdev->flags)) {
+@@ -1567,7 +1567,7 @@ static void fix_read_error(conf_t *conf,
+ rcu_read_lock();
+ while (sl != r10_bio->read_slot) {
+ char b[BDEVNAME_SIZE];
+- int d;
++
+ if (sl==0)
+ sl = conf->copies;
+ sl--;
+@@ -1603,7 +1603,7 @@ static void fix_read_error(conf_t *conf,
+ }
+ sl = start;
+ while (sl != r10_bio->read_slot) {
+- int d;
++
+ if (sl==0)
+ sl = conf->copies;
+ sl--;
--- /dev/null
+From 499031ac8a3df6738f6186ded9da853e8ea18253 Mon Sep 17 00:00:00 2001
+From: Eric Dumazet <eric.dumazet@gmail.com>
+Date: Fri, 2 Jul 2010 10:05:01 +0200
+Subject: netfilter: ip6t_REJECT: fix a dst leak in ipv6 REJECT
+
+From: Eric Dumazet <eric.dumazet@gmail.com>
+
+commit 499031ac8a3df6738f6186ded9da853e8ea18253 upstream.
+
+We should release dst if dst->error is set.
+
+Bug introduced in 2.6.14 by commit e104411b82f5c
+([XFRM]: Always release dst_entry on error in xfrm_lookup)
+
+Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com>
+Signed-off-by: Patrick McHardy <kaber@trash.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ net/ipv6/netfilter/ip6t_REJECT.c | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+--- a/net/ipv6/netfilter/ip6t_REJECT.c
++++ b/net/ipv6/netfilter/ip6t_REJECT.c
+@@ -95,9 +95,11 @@ static void send_reset(struct net *net,
+ fl.fl_ip_dport = otcph.source;
+ security_skb_classify_flow(oldskb, &fl);
+ dst = ip6_route_output(net, NULL, &fl);
+- if (dst == NULL)
++ if (dst == NULL || dst->error) {
++ dst_release(dst);
+ return;
+- if (dst->error || xfrm_lookup(net, &dst, &fl, NULL, 0))
++ }
++ if (xfrm_lookup(net, &dst, &fl, NULL, 0))
+ return;
+
+ hh_len = (dst->dev->hard_header_len + 15)&~15;
--- /dev/null
+From 97aa1052739c6a06cb6b0467dbf410613d20bc97 Mon Sep 17 00:00:00 2001
+From: Frederic Weisbecker <fweisbec@gmail.com>
+Date: Thu, 8 Jul 2010 06:06:17 +0200
+Subject: perf: Resurrect flat callchains
+
+From: Frederic Weisbecker <fweisbec@gmail.com>
+
+commit 97aa1052739c6a06cb6b0467dbf410613d20bc97 upstream.
+
+Initialize the callchain radix tree root correctly.
+
+When we walk through the parents, we must stop after the root, but
+since it wasn't well initialized, its parent pointer was random.
+
+Also the number of hits was random because uninitialized, hence it
+was part of the callchain while the root doesn't contain anything.
+
+This fixes segfaults and percentages followed by empty callchains
+while running:
+
+ perf report -g flat
+
+Reported-by: Ingo Molnar <mingo@elte.hu>
+Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
+Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
+Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
+Cc: Paul Mackerras <paulus@samba.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ tools/perf/util/callchain.h | 3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/tools/perf/util/callchain.h
++++ b/tools/perf/util/callchain.h
+@@ -48,6 +48,9 @@ static inline void callchain_init(struct
+ INIT_LIST_HEAD(&node->brothers);
+ INIT_LIST_HEAD(&node->children);
+ INIT_LIST_HEAD(&node->val);
++
++ node->parent = NULL;
++ node->hit = 0;
+ }
+
+ static inline u64 cumul_hits(struct callchain_node *node)
--- /dev/null
+From 3c93717cfa51316e4dbb471e7c0f9d243359d5f8 Mon Sep 17 00:00:00 2001
+From: Alex,Shi <alex.shi@intel.com>
+Date: Thu, 17 Jun 2010 14:08:13 +0800
+Subject: sched: Fix over-scheduling bug
+
+From: Alex,Shi <alex.shi@intel.com>
+
+commit 3c93717cfa51316e4dbb471e7c0f9d243359d5f8 upstream.
+
+Commit e70971591 ("sched: Optimize unused cgroup configuration") introduced
+an imbalanced scheduling bug.
+
+If we do not use CGROUP, function update_h_load won't update h_load. When the
+system has a large number of tasks far more than logical CPU number, the
+incorrect cfs_rq[cpu]->h_load value will cause load_balance() to pull too
+many tasks to the local CPU from the busiest CPU. So the busiest CPU keeps
+going in a round robin. That will hurt performance.
+
+The issue was found originally by a scientific calculation workload that
+developed by Yanmin. With that commit, the workload performance drops
+about 40%.
+
+ CPU before after
+
+ 00 : 2 : 7
+ 01 : 1 : 7
+ 02 : 11 : 6
+ 03 : 12 : 7
+ 04 : 6 : 6
+ 05 : 11 : 7
+ 06 : 10 : 6
+ 07 : 12 : 7
+ 08 : 11 : 6
+ 09 : 12 : 6
+ 10 : 1 : 6
+ 11 : 1 : 6
+ 12 : 6 : 6
+ 13 : 2 : 6
+ 14 : 2 : 6
+ 15 : 1 : 6
+
+Reviewed-by: Yanmin zhang <yanmin.zhang@intel.com>
+Signed-off-by: Alex Shi <alex.shi@intel.com>
+Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
+LKML-Reference: <1276754893.9452.5442.camel@debian>
+Signed-off-by: Ingo Molnar <mingo@elte.hu>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ kernel/sched.c | 3 ---
+ 1 file changed, 3 deletions(-)
+
+--- a/kernel/sched.c
++++ b/kernel/sched.c
+@@ -1719,9 +1719,6 @@ static void update_shares_locked(struct
+
+ static void update_h_load(long cpu)
+ {
+- if (root_task_group_empty())
+- return;
+-
+ walk_tg_tree(tg_load_down, tg_nop, (void *)cpu);
+ }
+
--- /dev/null
+From 0d98bb2656e9bd2dfda2d089db1fe1dbdab41504 Mon Sep 17 00:00:00 2001
+From: Will Deacon <will.deacon@arm.com>
+Date: Mon, 24 May 2010 12:11:43 -0700
+Subject: sched: Prevent compiler from optimising the sched_avg_update() loop
+
+From: Will Deacon <will.deacon@arm.com>
+
+commit 0d98bb2656e9bd2dfda2d089db1fe1dbdab41504 upstream.
+
+GCC 4.4.1 on ARM has been observed to replace the while loop in
+sched_avg_update with a call to uldivmod, resulting in the
+following build failure at link-time:
+
+kernel/built-in.o: In function `sched_avg_update':
+ kernel/sched.c:1261: undefined reference to `__aeabi_uldivmod'
+ kernel/sched.c:1261: undefined reference to `__aeabi_uldivmod'
+make: *** [.tmp_vmlinux1] Error 1
+
+This patch introduces a fake data hazard to the loop body to
+prevent the compiler optimising the loop away.
+
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Acked-by: Peter Zijlstra <peterz@infradead.org>
+Cc: Catalin Marinas <catalin.marinas@arm.com>
+Cc: Russell King <rmk@arm.linux.org.uk>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Ingo Molnar <mingo@elte.hu>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ kernel/sched.c | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+--- a/kernel/sched.c
++++ b/kernel/sched.c
+@@ -1259,6 +1259,12 @@ static void sched_avg_update(struct rq *
+ s64 period = sched_avg_period();
+
+ while ((s64)(rq->clock - rq->age_stamp) > period) {
++ /*
++ * Inline assembly required to prevent the compiler
++ * optimising this loop into a divmod call.
++ * See __iter_div_u64_rem() for another example of this.
++ */
++ asm("" : "+rm" (rq->age_stamp));
+ rq->age_stamp += period;
+ rq->rt_avg /= 2;
+ }
--- /dev/null
+From 8a52da632ceb9d8b776494563df579e87b7b586b Mon Sep 17 00:00:00 2001
+From: Julia Lawall <julia@diku.dk>
+Date: Sat, 15 May 2010 11:46:12 +0200
+Subject: SCSI: aacraid: Eliminate use after free
+
+From: Julia Lawall <julia@diku.dk>
+
+commit 8a52da632ceb9d8b776494563df579e87b7b586b upstream.
+
+The debugging code using the freed structure is moved before the kfree.
+
+A simplified version of the semantic match that finds this problem is as
+follows: (http://coccinelle.lip6.fr/)
+
+// <smpl>
+@free@
+expression E;
+position p;
+@@
+kfree@p(E)
+
+@@
+expression free.E, subE<=free.E, E1;
+position free.p;
+@@
+
+ kfree@p(E)
+ ...
+(
+ subE = E1
+|
+* E
+)
+// </smpl>
+
+Signed-off-by: Julia Lawall <julia@diku.dk>
+Signed-off-by: James Bottomley <James.Bottomley@suse.de>
+
+---
+ drivers/scsi/aacraid/commctrl.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/scsi/aacraid/commctrl.c
++++ b/drivers/scsi/aacraid/commctrl.c
+@@ -655,9 +655,9 @@ static int aac_send_raw_srb(struct aac_d
+ /* Does this really need to be GFP_DMA? */
+ p = kmalloc(usg->sg[i].count,GFP_KERNEL|__GFP_DMA);
+ if(!p) {
+- kfree (usg);
+- dprintk((KERN_DEBUG"aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
++ dprintk((KERN_DEBUG "aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
+ usg->sg[i].count,i,usg->count));
++ kfree(usg);
+ rcode = -ENOMEM;
+ goto cleanup;
+ }
--- /dev/null
+From 8cd774ad30c22b9d89823f1f05d845f4cdaba9e8 Mon Sep 17 00:00:00 2001
+From: Dongdong Deng <dongdong.deng@windriver.com>
+Date: Thu, 17 Jun 2010 11:13:40 +0800
+Subject: serial: cpm_uart: implement the cpm_uart_early_write() function for console poll
+
+From: Dongdong Deng <dongdong.deng@windriver.com>
+
+commit 8cd774ad30c22b9d89823f1f05d845f4cdaba9e8 upstream.
+
+The cpm_uart_early_write() function which was used for console poll
+isn't implemented in the cpm uart driver.
+
+Implementing this function both fixes the build when CONFIG_CONSOLE_POLL
+is set and allows kgdboc to work via the cpm uart.
+
+Signed-off-by: Dongdong Deng <dongdong.deng@windriver.com>
+Reviewed-by: Bruce Ashfield <bruce.ashfield@windriver.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/serial/cpm_uart/cpm_uart_core.c | 143 +++++++++++++++++---------------
+ 1 file changed, 79 insertions(+), 64 deletions(-)
+
+--- a/drivers/serial/cpm_uart/cpm_uart_core.c
++++ b/drivers/serial/cpm_uart/cpm_uart_core.c
+@@ -930,6 +930,83 @@ static void cpm_uart_config_port(struct
+ }
+ }
+
++#if defined(CONFIG_CONSOLE_POLL) || defined(CONFIG_SERIAL_CPM_CONSOLE)
++/*
++ * Write a string to the serial port
++ * Note that this is called with interrupts already disabled
++ */
++static void cpm_uart_early_write(struct uart_cpm_port *pinfo,
++ const char *string, u_int count)
++{
++ unsigned int i;
++ cbd_t __iomem *bdp, *bdbase;
++ unsigned char *cpm_outp_addr;
++
++ /* Get the address of the host memory buffer.
++ */
++ bdp = pinfo->tx_cur;
++ bdbase = pinfo->tx_bd_base;
++
++ /*
++ * Now, do each character. This is not as bad as it looks
++ * since this is a holding FIFO and not a transmitting FIFO.
++ * We could add the complexity of filling the entire transmit
++ * buffer, but we would just wait longer between accesses......
++ */
++ for (i = 0; i < count; i++, string++) {
++ /* Wait for transmitter fifo to empty.
++ * Ready indicates output is ready, and xmt is doing
++ * that, not that it is ready for us to send.
++ */
++ while ((in_be16(&bdp->cbd_sc) & BD_SC_READY) != 0)
++ ;
++
++ /* Send the character out.
++ * If the buffer address is in the CPM DPRAM, don't
++ * convert it.
++ */
++ cpm_outp_addr = cpm2cpu_addr(in_be32(&bdp->cbd_bufaddr),
++ pinfo);
++ *cpm_outp_addr = *string;
++
++ out_be16(&bdp->cbd_datlen, 1);
++ setbits16(&bdp->cbd_sc, BD_SC_READY);
++
++ if (in_be16(&bdp->cbd_sc) & BD_SC_WRAP)
++ bdp = bdbase;
++ else
++ bdp++;
++
++ /* if a LF, also do CR... */
++ if (*string == 10) {
++ while ((in_be16(&bdp->cbd_sc) & BD_SC_READY) != 0)
++ ;
++
++ cpm_outp_addr = cpm2cpu_addr(in_be32(&bdp->cbd_bufaddr),
++ pinfo);
++ *cpm_outp_addr = 13;
++
++ out_be16(&bdp->cbd_datlen, 1);
++ setbits16(&bdp->cbd_sc, BD_SC_READY);
++
++ if (in_be16(&bdp->cbd_sc) & BD_SC_WRAP)
++ bdp = bdbase;
++ else
++ bdp++;
++ }
++ }
++
++ /*
++ * Finally, Wait for transmitter & holding register to empty
++ * and restore the IER
++ */
++ while ((in_be16(&bdp->cbd_sc) & BD_SC_READY) != 0)
++ ;
++
++ pinfo->tx_cur = bdp;
++}
++#endif
++
+ #ifdef CONFIG_CONSOLE_POLL
+ /* Serial polling routines for writing and reading from the uart while
+ * in an interrupt or debug context.
+@@ -999,7 +1076,7 @@ static void cpm_put_poll_char(struct uar
+ static char ch[2];
+
+ ch[0] = (char)c;
+- cpm_uart_early_write(pinfo->port.line, ch, 1);
++ cpm_uart_early_write(pinfo, ch, 1);
+ }
+ #endif /* CONFIG_CONSOLE_POLL */
+
+@@ -1130,9 +1207,6 @@ static void cpm_uart_console_write(struc
+ u_int count)
+ {
+ struct uart_cpm_port *pinfo = &cpm_uart_ports[co->index];
+- unsigned int i;
+- cbd_t __iomem *bdp, *bdbase;
+- unsigned char *cp;
+ unsigned long flags;
+ int nolock = oops_in_progress;
+
+@@ -1142,66 +1216,7 @@ static void cpm_uart_console_write(struc
+ spin_lock_irqsave(&pinfo->port.lock, flags);
+ }
+
+- /* Get the address of the host memory buffer.
+- */
+- bdp = pinfo->tx_cur;
+- bdbase = pinfo->tx_bd_base;
+-
+- /*
+- * Now, do each character. This is not as bad as it looks
+- * since this is a holding FIFO and not a transmitting FIFO.
+- * We could add the complexity of filling the entire transmit
+- * buffer, but we would just wait longer between accesses......
+- */
+- for (i = 0; i < count; i++, s++) {
+- /* Wait for transmitter fifo to empty.
+- * Ready indicates output is ready, and xmt is doing
+- * that, not that it is ready for us to send.
+- */
+- while ((in_be16(&bdp->cbd_sc) & BD_SC_READY) != 0)
+- ;
+-
+- /* Send the character out.
+- * If the buffer address is in the CPM DPRAM, don't
+- * convert it.
+- */
+- cp = cpm2cpu_addr(in_be32(&bdp->cbd_bufaddr), pinfo);
+- *cp = *s;
+-
+- out_be16(&bdp->cbd_datlen, 1);
+- setbits16(&bdp->cbd_sc, BD_SC_READY);
+-
+- if (in_be16(&bdp->cbd_sc) & BD_SC_WRAP)
+- bdp = bdbase;
+- else
+- bdp++;
+-
+- /* if a LF, also do CR... */
+- if (*s == 10) {
+- while ((in_be16(&bdp->cbd_sc) & BD_SC_READY) != 0)
+- ;
+-
+- cp = cpm2cpu_addr(in_be32(&bdp->cbd_bufaddr), pinfo);
+- *cp = 13;
+-
+- out_be16(&bdp->cbd_datlen, 1);
+- setbits16(&bdp->cbd_sc, BD_SC_READY);
+-
+- if (in_be16(&bdp->cbd_sc) & BD_SC_WRAP)
+- bdp = bdbase;
+- else
+- bdp++;
+- }
+- }
+-
+- /*
+- * Finally, Wait for transmitter & holding register to empty
+- * and restore the IER
+- */
+- while ((in_be16(&bdp->cbd_sc) & BD_SC_READY) != 0)
+- ;
+-
+- pinfo->tx_cur = bdp;
++ cpm_uart_early_write(pinfo, s, count);
+
+ if (unlikely(nolock)) {
+ local_irq_restore(flags);
drm-radeon-kms-fix-legacy-lvds-dpms-sequence.patch
drm-radeon-kms-fix-legacy-tv-out-pal-mode.patch
tpm_tis-fix-subsequent-suspend-failures.patch
+ipvs-add-missing-locking-during-connection-table-hashing-and-unhashing.patch
+ipv6-fix-null-reference-in-proxy-neighbor-discovery.patch
+netfilter-ip6t_reject-fix-a-dst-leak-in-ipv6-reject.patch
+scsi-aacraid-eliminate-use-after-free.patch
+md-raid10-fix-null-pointer-dereference-in-fix_read_error.patch
+amd64-agp-probe-unknown-agp-devices-the-right-way.patch
+amd64_edac-fix-syndrome-calculation-on-k8.patch
+perf-resurrect-flat-callchains.patch
+x86-send-a-sigtrap-for-user-icebp-traps.patch
+x86-fix-vsyscall-on-gcc-4.5-with-os.patch
+x86-calgary-increase-max-phb-number.patch
+x86-calgary-limit-the-max-phb-number-to-256.patch
+sched-prevent-compiler-from-optimising-the-sched_avg_update-loop.patch
+sched-fix-over-scheduling-bug.patch
+genirq-deal-with-desc-set_type-changing-desc-chip.patch
+cfq-don-t-allow-queue-merges-for-queues-that-have-no-process-references.patch
+serial-cpm_uart-implement-the-cpm_uart_early_write-function-for-console-poll.patch
--- /dev/null
+From 499a00e92dd9a75395081f595e681629eb1eebad Mon Sep 17 00:00:00 2001
+From: Darrick J. Wong <djwong@us.ibm.com>
+Date: Thu, 24 Jun 2010 14:26:47 -0700
+Subject: x86, Calgary: Increase max PHB number
+
+From: Darrick J. Wong <djwong@us.ibm.com>
+
+commit 499a00e92dd9a75395081f595e681629eb1eebad upstream.
+
+Newer systems (x3950M2) can have 48 PHBs per chassis and 8
+chassis, so bump the limits up and provide an explanation
+of the requirements for each class.
+
+Signed-off-by: Darrick J. Wong <djwong@us.ibm.com>
+Acked-by: Muli Ben-Yehuda <muli@il.ibm.com>
+Cc: Corinna Schultz <cschultz@linux.vnet.ibm.com>
+LKML-Reference: <20100624212647.GI15515@tux1.beaverton.ibm.com>
+[ v2: Fixed build bug, added back PHBS_PER_CALGARY == 4 ]
+Signed-off-by: Ingo Molnar <mingo@elte.hu>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ arch/x86/kernel/pci-calgary_64.c | 15 ++++++++++-----
+ 1 file changed, 10 insertions(+), 5 deletions(-)
+
+--- a/arch/x86/kernel/pci-calgary_64.c
++++ b/arch/x86/kernel/pci-calgary_64.c
+@@ -103,11 +103,16 @@ int use_calgary __read_mostly = 0;
+ #define PMR_SOFTSTOPFAULT 0x40000000
+ #define PMR_HARDSTOP 0x20000000
+
+-#define MAX_NUM_OF_PHBS 8 /* how many PHBs in total? */
+-#define MAX_NUM_CHASSIS 8 /* max number of chassis */
+-/* MAX_PHB_BUS_NUM is the maximal possible dev->bus->number */
+-#define MAX_PHB_BUS_NUM (MAX_NUM_OF_PHBS * MAX_NUM_CHASSIS * 2)
+-#define PHBS_PER_CALGARY 4
++/*
++ * The maximum PHB bus number.
++ * x3950M2 (rare): 8 chassis, 48 PHBs per chassis = 384
++ * x3950M2: 4 chassis, 48 PHBs per chassis = 192
++ * x3950 (PCIE): 8 chassis, 32 PHBs per chassis = 256
++ * x3950 (PCIX): 8 chassis, 16 PHBs per chassis = 128
++ */
++#define MAX_PHB_BUS_NUM 384
++
++#define PHBS_PER_CALGARY 4
+
+ /* register offsets in Calgary's internal register space */
+ static const unsigned long tar_offsets[] = {
--- /dev/null
+From d596043d71ff0d7b3d0bead19b1d68c55f003093 Mon Sep 17 00:00:00 2001
+From: Darrick J. Wong <djwong@us.ibm.com>
+Date: Wed, 30 Jun 2010 17:45:19 -0700
+Subject: x86, Calgary: Limit the max PHB number to 256
+
+From: Darrick J. Wong <djwong@us.ibm.com>
+
+commit d596043d71ff0d7b3d0bead19b1d68c55f003093 upstream.
+
+The x3950 family can have as many as 256 PCI buses in a single system, so
+change the limits to the maximum. Since there can only be 256 PCI buses in one
+domain, we no longer need the BUG_ON check.
+
+Signed-off-by: Darrick J. Wong <djwong@us.ibm.com>
+LKML-Reference: <20100701004519.GQ15515@tux1.beaverton.ibm.com>
+Signed-off-by: H. Peter Anvin <hpa@zytor.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ arch/x86/kernel/pci-calgary_64.c | 4 +---
+ 1 file changed, 1 insertion(+), 3 deletions(-)
+
+--- a/arch/x86/kernel/pci-calgary_64.c
++++ b/arch/x86/kernel/pci-calgary_64.c
+@@ -110,7 +110,7 @@ int use_calgary __read_mostly = 0;
+ * x3950 (PCIE): 8 chassis, 32 PHBs per chassis = 256
+ * x3950 (PCIX): 8 chassis, 16 PHBs per chassis = 128
+ */
+-#define MAX_PHB_BUS_NUM 384
++#define MAX_PHB_BUS_NUM 256
+
+ #define PHBS_PER_CALGARY 4
+
+@@ -1056,8 +1056,6 @@ static int __init calgary_init_one(struc
+ struct iommu_table *tbl;
+ int ret;
+
+- BUG_ON(dev->bus->number >= MAX_PHB_BUS_NUM);
+-
+ bbar = busno_to_bbar(dev->bus->number);
+ ret = calgary_setup_tar(dev, bbar);
+ if (ret)
--- /dev/null
+From 124482935fb7fb9303c8a8ab930149c6a93d9910 Mon Sep 17 00:00:00 2001
+From: Andi Kleen <andi@firstfloor.org>
+Date: Fri, 18 Jun 2010 23:09:00 +0200
+Subject: x86: Fix vsyscall on gcc 4.5 with -Os
+
+From: Andi Kleen <andi@firstfloor.org>
+
+commit 124482935fb7fb9303c8a8ab930149c6a93d9910 upstream.
+
+This fixes the -Os breaks with gcc 4.5 bug. rdtsc_barrier needs to be
+force inlined, otherwise user space will jump into kernel space and
+kill init.
+
+This also addresses http://gcc.gnu.org/bugzilla/show_bug.cgi?id=44129
+I believe.
+
+Signed-off-by: Andi Kleen <ak@linux.intel.com>
+LKML-Reference: <20100618210859.GA10913@basil.fritz.box>
+Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ arch/x86/include/asm/system.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/x86/include/asm/system.h
++++ b/arch/x86/include/asm/system.h
+@@ -451,7 +451,7 @@ void stop_this_cpu(void *dummy);
+ *
+ * (Could use an alternative three way for this if there was one.)
+ */
+-static inline void rdtsc_barrier(void)
++static __always_inline void rdtsc_barrier(void)
+ {
+ alternative(ASM_NOP3, "mfence", X86_FEATURE_MFENCE_RDTSC);
+ alternative(ASM_NOP3, "lfence", X86_FEATURE_LFENCE_RDTSC);
--- /dev/null
+From a1e80fafc9f0742a1776a0490258cb64912411b0 Mon Sep 17 00:00:00 2001
+From: Frederic Weisbecker <fweisbec@gmail.com>
+Date: Wed, 30 Jun 2010 15:09:06 +0200
+Subject: x86: Send a SIGTRAP for user icebp traps
+
+From: Frederic Weisbecker <fweisbec@gmail.com>
+
+commit a1e80fafc9f0742a1776a0490258cb64912411b0 upstream.
+
+Before we had a generic breakpoint layer, x86 used to send a
+sigtrap for any debug event that happened in userspace,
+except if it was caused by lazy dr7 switches.
+
+Currently we only send such signal for single step or breakpoint
+events.
+
+However, there are three other kind of debug exceptions:
+
+- debug register access detected: trigger an exception if the
+ next instruction touches the debug registers. We don't use
+ it.
+- task switch, but we don't use tss.
+- icebp/int01 trap. This instruction (0xf1) is undocumented and
+ generates an int 1 exception. Unlike single step through TF
+ flag, it doesn't set the single step origin of the exception
+ in dr6.
+
+icebp then used to be reported in userspace using trap signals
+but this have been incidentally broken with the new breakpoint
+code. Reenable this. Since this is the only debug event that
+doesn't set anything in dr6, this is all we have to check.
+
+This fixes a regression in Wine where World Of Warcraft got broken
+as it uses this for software protection checks purposes. And
+probably other apps do.
+
+Reported-and-tested-by: Alexandre Julliard <julliard@winehq.org>
+Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
+Cc: Ingo Molnar <mingo@elte.hu>
+Cc: H. Peter Anvin <hpa@zytor.com>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: Prasad <prasad@linux.vnet.ibm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ arch/x86/kernel/traps.c | 11 ++++++++++-
+ 1 file changed, 10 insertions(+), 1 deletion(-)
+
+--- a/arch/x86/kernel/traps.c
++++ b/arch/x86/kernel/traps.c
+@@ -529,11 +529,20 @@ asmlinkage __kprobes struct pt_regs *syn
+ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
+ {
+ struct task_struct *tsk = current;
++ int user_icebp = 0;
+ unsigned long dr6;
+ int si_code;
+
+ get_debugreg(dr6, 6);
+
++ /*
++ * If dr6 has no reason to give us about the origin of this trap,
++ * then it's very likely the result of an icebp/int01 trap.
++ * User wants a sigtrap for that.
++ */
++ if (!dr6 && user_mode(regs))
++ user_icebp = 1;
++
+ /* Catch kmemcheck conditions first of all! */
+ if ((dr6 & DR_STEP) && kmemcheck_trap(regs))
+ return;
+@@ -575,7 +584,7 @@ dotraplinkage void __kprobes do_debug(st
+ regs->flags &= ~X86_EFLAGS_TF;
+ }
+ si_code = get_si_code(tsk->thread.debugreg6);
+- if (tsk->thread.debugreg6 & (DR_STEP | DR_TRAP_BITS))
++ if (tsk->thread.debugreg6 & (DR_STEP | DR_TRAP_BITS) || user_icebp)
+ send_sigtrap(tsk, regs, error_code, si_code);
+ preempt_conditional_cli(regs);
+