--- /dev/null
+From 2060c44576c79086ff24718878d7edaa7384a985 Mon Sep 17 00:00:00 2001
+From: Alexey Starikovskiy <astarikovskiy@suse.de>
+Date: Fri, 16 Apr 2010 15:36:40 -0400
+Subject: ACPI: EC: Limit burst to 64 bits
+
+From: Alexey Starikovskiy <astarikovskiy@suse.de>
+
+commit 2060c44576c79086ff24718878d7edaa7384a985 upstream.
+
+access_bit_width field is u8 in ACPICA, thus 256 value written to it
+becomes 0, causing divide by zero later.
+
+Proper fix would be to remove access_bit_width at all, just because
+we already have access_byte_width, which is access_bit_width / 8.
+Limit access width to 64 bit for now.
+
+https://bugzilla.kernel.org/show_bug.cgi?id=15749
+fixes regression caused by the fix for:
+https://bugzilla.kernel.org/show_bug.cgi?id=14667
+
+Signed-off-by: Alexey Starikovskiy <astarikovskiy@suse.de>
+Signed-off-by: Len Brown <len.brown@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/acpi/acpica/exprep.c | 17 +++++++++++------
+ 1 file changed, 11 insertions(+), 6 deletions(-)
+
+--- a/drivers/acpi/acpica/exprep.c
++++ b/drivers/acpi/acpica/exprep.c
+@@ -471,13 +471,18 @@ acpi_status acpi_ex_prep_field_value(str
+ /* allow full data read from EC address space */
+ if (obj_desc->field.region_obj->region.space_id ==
+ ACPI_ADR_SPACE_EC) {
+- if (obj_desc->common_field.bit_length > 8)
+- obj_desc->common_field.access_bit_width =
+- ACPI_ROUND_UP(obj_desc->common_field.
+- bit_length, 8);
++ if (obj_desc->common_field.bit_length > 8) {
++ unsigned width =
++ ACPI_ROUND_BITS_UP_TO_BYTES(
++ obj_desc->common_field.bit_length);
++ // access_bit_width is u8, don't overflow it
++ if (width > 8)
++ width = 8;
+ obj_desc->common_field.access_byte_width =
+- ACPI_DIV_8(obj_desc->common_field.
+- access_bit_width);
++ width;
++ obj_desc->common_field.access_bit_width =
++ 8 * width;
++ }
+ }
+
+ ACPI_DEBUG_PRINT((ACPI_DB_BFIELD,
--- /dev/null
+From e2595322a3a353a59cecd7f57e7aa421ecb02d12 Mon Sep 17 00:00:00 2001
+From: Daniel T Chen <crimsun@ubuntu.com>
+Date: Sat, 19 Dec 2009 18:19:02 -0500
+Subject: ALSA: hda: Set Front Mic to input vref 50% for Lenovo 3000 Y410
+
+From: Daniel T Chen <crimsun@ubuntu.com>
+
+commit e2595322a3a353a59cecd7f57e7aa421ecb02d12 upstream.
+
+BugLink: https://bugs.launchpad.net/bugs/479373
+
+The OR has verified with hda-verb that the internal microphone needs
+VREF50 set for audible capture.
+
+Signed-off-by: Daniel T Chen <crimsun@ubuntu.com>
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ sound/pci/hda/patch_realtek.c | 10 +++++++++-
+ 1 file changed, 9 insertions(+), 1 deletion(-)
+
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -10621,6 +10621,13 @@ static struct hda_verb alc262_lenovo_300
+ {}
+ };
+
++static struct hda_verb alc262_lenovo_3000_init_verbs[] = {
++ /* Front Mic pin: input vref at 50% */
++ {0x19, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_VREF50},
++ {0x19, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE},
++ {}
++};
++
+ static struct hda_input_mux alc262_fujitsu_capture_source = {
+ .num_items = 3,
+ .items = {
+@@ -11688,7 +11695,8 @@ static struct alc_config_preset alc262_p
+ [ALC262_LENOVO_3000] = {
+ .mixers = { alc262_lenovo_3000_mixer },
+ .init_verbs = { alc262_init_verbs, alc262_EAPD_verbs,
+- alc262_lenovo_3000_unsol_verbs },
++ alc262_lenovo_3000_unsol_verbs,
++ alc262_lenovo_3000_init_verbs },
+ .num_dacs = ARRAY_SIZE(alc262_dac_nids),
+ .dac_nids = alc262_dac_nids,
+ .hp_nid = 0x03,
--- /dev/null
+From pingc@wacom.com Wed Apr 21 15:34:14 2010
+From: Ping Cheng <pingc@wacom.com>
+Date: Mon, 19 Apr 2010 11:10:50 -0700
+Subject: Input: wacom - switch mode upon system resume
+To: "Dmitry Torokhov" <dmitry.torokhov@gmail.com>, "Greg KH" <gregkh@suse.de>
+Cc: <Anton@anikin.name>, <stable@kernel.org>, <stable-commits@vger.kernel.org>
+Message-ID: <6753EB6004AFF34FAA275742C104F95201DF6FE3@wacom-nt10.wacom.com>
+
+From: Ping Cheng <pingc@wacom.com>
+
+commit 014f61504af276ba9d9544d8a7401d8f8526eb73 upstream.
+
+When Wacom devices wake up from a sleep, the switch mode command
+(wacom_query_tablet_data) is needed before wacom_open is called.
+wacom_query_tablet_data should not be executed inside wacom_open
+since wacom_open is called more than once during probe.
+
+Reported-and-tested-by: Anton Anikin <Anton@Anikin.name>
+Signed-off-by: Ping Cheng <pingc@wacom.com>
+Signed-off-by: Dmitry Torokhov <dtor@mail.ru>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/input/tablet/wacom_sys.c | 10 +++++++---
+ 1 file changed, 7 insertions(+), 3 deletions(-)
+
+--- a/drivers/input/tablet/wacom_sys.c
++++ b/drivers/input/tablet/wacom_sys.c
+@@ -562,11 +562,15 @@ static int wacom_resume(struct usb_inter
+ int rv;
+
+ mutex_lock(&wacom->lock);
+- if (wacom->open) {
++
++ /* switch to wacom mode first */
++ wacom_query_tablet_data(intf);
++
++ if (wacom->open)
+ rv = usb_submit_urb(wacom->irq, GFP_NOIO);
+- wacom_query_tablet_data(intf);
+- } else
++ else
+ rv = 0;
++
+ mutex_unlock(&wacom->lock);
+
+ return rv;
--- /dev/null
+From mathieu.desnoyers@efficios.com Wed Apr 21 15:39:34 2010
+From: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+Date: Tue, 20 Apr 2010 10:33:50 -0400
+Subject: lockdep: fix incorrect percpu usage
+To: Greg KH <greg@kroah.com>
+Cc: Tejun Heo <tj@kernel.org>, Randy Dunlap <randy.dunlap@oracle.com>, Greg Kroah-Hartman <gregkh@suse.de>, Peter Zijlstra <peterz@infradead.org>, stable <stable@kernel.org>, Rusty Russell <rusty@rustcorp.com.au>, linux-kernel@vger.kernel.org, Steven Rostedt <rostedt@goodmis.org>, Eric Dumazet <dada1@cosmosbay.com>, Ingo Molnar <mingo@elte.hu>, Linus Torvalds <torvalds@linux-foundation.org>, Andrew Morton <akpm@linux-foundation.org>
+Message-ID: <20100420143350.GA14622@Krystal>
+Content-Disposition: inline
+
+From: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+
+The mainline kernel as of 2.6.34-rc5 is not affected by this problem because
+commit 10fad5e46f6c7bdfb01b1a012380a38e3c6ab346 fixed it by refactoring.
+
+lockdep fix incorrect percpu usage
+
+Should use per_cpu_ptr() to obfuscate the per cpu pointers (RELOC_HIDE is needed
+for per cpu pointers).
+
+git blame points to commit:
+
+lockdep.c: commit 8e18257d29238311e82085152741f0c3aa18b74d
+
+But it's really just moving the code around. But it's enough to say that the
+problems appeared before Jul 19 01:48:54 2007, which brings us back to 2.6.23.
+
+It should be applied to stable 2.6.23.x to 2.6.33.x (or whichever of these
+stable branches are still maintained).
+
+(tested on 2.6.33.1 x86_64)
+
+Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+CC: Randy Dunlap <randy.dunlap@oracle.com>
+CC: Eric Dumazet <dada1@cosmosbay.com>
+CC: Rusty Russell <rusty@rustcorp.com.au>
+CC: Peter Zijlstra <a.p.zijlstra@chello.nl>
+CC: Tejun Heo <tj@kernel.org>
+CC: Ingo Molnar <mingo@elte.hu>
+CC: Andrew Morton <akpm@linux-foundation.org>
+CC: Linus Torvalds <torvalds@linux-foundation.org>
+CC: Greg Kroah-Hartman <gregkh@suse.de>
+CC: Steven Rostedt <rostedt@goodmis.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ kernel/lockdep.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+--- a/kernel/lockdep.c
++++ b/kernel/lockdep.c
+@@ -591,9 +591,9 @@ static int static_obj(void *obj)
+ * percpu var?
+ */
+ for_each_possible_cpu(i) {
+- start = (unsigned long) &__per_cpu_start + per_cpu_offset(i);
+- end = (unsigned long) &__per_cpu_start + PERCPU_ENOUGH_ROOM
+- + per_cpu_offset(i);
++ start = (unsigned long) per_cpu_ptr(&__per_cpu_start, i);
++ end = (unsigned long) per_cpu_ptr(&__per_cpu_start
++ + PERCPU_ENOUGH_ROOM, i);
+
+ if ((addr >= start) && (addr < end))
+ return 1;
--- /dev/null
+From c0ce77b8323c1a0d4eeef97caf16c0ea971222a9 Mon Sep 17 00:00:00 2001
+From: Johannes Berg <johannes@sipsolutions.net>
+Date: Wed, 3 Feb 2010 10:22:31 +0100
+Subject: mac80211: fix deferred hardware scan requests
+
+From: Johannes Berg <johannes@sipsolutions.net>
+
+commit c0ce77b8323c1a0d4eeef97caf16c0ea971222a9 upstream.
+
+Reinette found the reason for the warnings that
+happened occasionally when a hw-offloaded scan
+finished; her description of the problem:
+
+ mac80211 will defer the handling of scan requests if it is
+ busy with management work at the time. The scan requests
+ are deferred and run after the work has completed. When
+ this occurs there are currently two problems.
+
+ * The scan request for hardware scan is not fully populated
+ with the band and channels to scan not initialized.
+
+ * When the scan is queued the state is not correctly updated
+ to reflect that a scan is in progress. The problem here is
+ that when the driver completes the scan and calls
+ ieee80211_scan_completed() a warning will be triggered
+ since mac80211 was not aware that a scan was in progress.
+
+The reason is that the queued scan work will start
+the hw scan right away when the hw_scan_req struct
+has already been allocated. However, in the first
+pass it will not have been filled, which happens
+at the same time as setting the bits. To fix this,
+simply move the allocation after the pending work
+test as well, so that the first iteration of the
+scan work will call __ieee80211_start_scan() even
+in the hardware scan case.
+
+Bug-identified-by: Reinette Chatre <reinette.chatre@intel.com>
+Signed-off-by: Johannes Berg <johannes@sipsolutions.net>
+Signed-off-by: John W. Linville <linville@tuxdriver.com>
+Cc: Chase Douglas <chase.douglas@canonical.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ net/mac80211/scan.c | 18 ++++++++++--------
+ 1 file changed, 10 insertions(+), 8 deletions(-)
+
+--- a/net/mac80211/scan.c
++++ b/net/mac80211/scan.c
+@@ -408,6 +408,16 @@ static int __ieee80211_start_scan(struct
+ if (local->scan_req)
+ return -EBUSY;
+
++ if (req != local->int_scan_req &&
++ sdata->vif.type == NL80211_IFTYPE_STATION &&
++ !list_empty(&ifmgd->work_list)) {
++ /* actually wait for the work it's doing to finish/time out */
++ set_bit(IEEE80211_STA_REQ_SCAN, &ifmgd->request);
++ local->scan_req = req;
++ local->scan_sdata = sdata;
++ return 0;
++ }
++
+ if (local->ops->hw_scan) {
+ u8 *ies;
+ int ielen;
+@@ -428,14 +438,6 @@ static int __ieee80211_start_scan(struct
+ local->scan_req = req;
+ local->scan_sdata = sdata;
+
+- if (req != local->int_scan_req &&
+- sdata->vif.type == NL80211_IFTYPE_STATION &&
+- !list_empty(&ifmgd->work_list)) {
+- /* actually wait for the work it's doing to finish/time out */
+- set_bit(IEEE80211_STA_REQ_SCAN, &ifmgd->request);
+- return 0;
+- }
+-
+ if (local->ops->hw_scan)
+ __set_bit(SCAN_HW_SCANNING, &local->scanning);
+ else
--- /dev/null
+From neilb@suse.de Wed Apr 21 15:45:49 2010
+From: Neil Brown <neilb@suse.de>
+Date: Wed, 31 Mar 2010 12:07:16 +1100
+Subject: md: deal with merge_bvec_fn in component devices better.
+To: stable@kernel.org
+Message-ID: <20100331120716.14054609@notabene.brown>
+
+From: NeilBrown <neilb@suse.de>
+
+commit 627a2d3c29427637f4c5d31ccc7fcbd8d312cd71 upstream.
+
+If a component device has a merge_bvec_fn then as we never call it
+we must ensure we never need to. Currently this is done by setting
+max_sector to 1 PAGE, however this does not stop a bio being created
+with several sub-page iovecs that would violate the merge_bvec_fn.
+
+So instead set max_phys_segments to 1 and set the segment boundary to the
+same as a page boundary to ensure there is only ever one single-page
+segment of IO requested at a time.
+
+This can particularly be an issue when 'xen' is used as it is
+known to submit multiple small buffers in a single bio.
+
+Signed-off-by: NeilBrown <neilb@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/md/linear.c | 12 +++++++-----
+ drivers/md/multipath.c | 20 ++++++++++++--------
+ drivers/md/raid0.c | 13 +++++++------
+ drivers/md/raid10.c | 28 +++++++++++++++++-----------
+ 4 files changed, 43 insertions(+), 30 deletions(-)
+
+--- a/drivers/md/linear.c
++++ b/drivers/md/linear.c
+@@ -172,12 +172,14 @@ static linear_conf_t *linear_conf(mddev_
+ disk_stack_limits(mddev->gendisk, rdev->bdev,
+ rdev->data_offset << 9);
+ /* as we don't honour merge_bvec_fn, we must never risk
+- * violating it, so limit ->max_sector to one PAGE, as
+- * a one page request is never in violation.
++ * violating it, so limit max_phys_segments to 1 lying within
++ * a single page.
+ */
+- if (rdev->bdev->bd_disk->queue->merge_bvec_fn &&
+- queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9))
+- blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
++ if (rdev->bdev->bd_disk->queue->merge_bvec_fn) {
++ blk_queue_max_phys_segments(mddev->queue, 1);
++ blk_queue_segment_boundary(mddev->queue,
++ PAGE_CACHE_SIZE - 1);
++ }
+
+ conf->array_sectors += rdev->sectors;
+ cnt++;
+--- a/drivers/md/multipath.c
++++ b/drivers/md/multipath.c
+@@ -301,14 +301,16 @@ static int multipath_add_disk(mddev_t *m
+ rdev->data_offset << 9);
+
+ /* as we don't honour merge_bvec_fn, we must never risk
+- * violating it, so limit ->max_sector to one PAGE, as
+- * a one page request is never in violation.
++ * violating it, so limit ->max_phys_segments to one, lying
++ * within a single page.
+ * (Note: it is very unlikely that a device with
+ * merge_bvec_fn will be involved in multipath.)
+ */
+- if (q->merge_bvec_fn &&
+- queue_max_sectors(q) > (PAGE_SIZE>>9))
+- blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
++ if (q->merge_bvec_fn) {
++ blk_queue_max_phys_segments(mddev->queue, 1);
++ blk_queue_segment_boundary(mddev->queue,
++ PAGE_CACHE_SIZE - 1);
++ }
+
+ conf->working_disks++;
+ mddev->degraded--;
+@@ -476,9 +478,11 @@ static int multipath_run (mddev_t *mddev
+ /* as we don't honour merge_bvec_fn, we must never risk
+ * violating it, not that we ever expect a device with
+ * a merge_bvec_fn to be involved in multipath */
+- if (rdev->bdev->bd_disk->queue->merge_bvec_fn &&
+- queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9))
+- blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
++ if (rdev->bdev->bd_disk->queue->merge_bvec_fn) {
++ blk_queue_max_phys_segments(mddev->queue, 1);
++ blk_queue_segment_boundary(mddev->queue,
++ PAGE_CACHE_SIZE - 1);
++ }
+
+ if (!test_bit(Faulty, &rdev->flags))
+ conf->working_disks++;
+--- a/drivers/md/raid0.c
++++ b/drivers/md/raid0.c
+@@ -176,14 +176,15 @@ static int create_strip_zones(mddev_t *m
+ disk_stack_limits(mddev->gendisk, rdev1->bdev,
+ rdev1->data_offset << 9);
+ /* as we don't honour merge_bvec_fn, we must never risk
+- * violating it, so limit ->max_sector to one PAGE, as
+- * a one page request is never in violation.
++ * violating it, so limit ->max_phys_segments to 1, lying within
++ * a single page.
+ */
+
+- if (rdev1->bdev->bd_disk->queue->merge_bvec_fn &&
+- queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9))
+- blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
+-
++ if (rdev1->bdev->bd_disk->queue->merge_bvec_fn) {
++ blk_queue_max_phys_segments(mddev->queue, 1);
++ blk_queue_segment_boundary(mddev->queue,
++ PAGE_CACHE_SIZE - 1);
++ }
+ if (!smallest || (rdev1->sectors < smallest->sectors))
+ smallest = rdev1;
+ cnt++;
+--- a/drivers/md/raid10.c
++++ b/drivers/md/raid10.c
+@@ -1155,13 +1155,17 @@ static int raid10_add_disk(mddev_t *mdde
+
+ disk_stack_limits(mddev->gendisk, rdev->bdev,
+ rdev->data_offset << 9);
+- /* as we don't honour merge_bvec_fn, we must never risk
+- * violating it, so limit ->max_sector to one PAGE, as
+- * a one page request is never in violation.
++ /* as we don't honour merge_bvec_fn, we must
++ * never risk violating it, so limit
++ * ->max_phys_segments to one lying with a single
++ * page, as a one page request is never in
++ * violation.
+ */
+- if (rdev->bdev->bd_disk->queue->merge_bvec_fn &&
+- queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9))
+- blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
++ if (rdev->bdev->bd_disk->queue->merge_bvec_fn) {
++ blk_queue_max_phys_segments(mddev->queue, 1);
++ blk_queue_segment_boundary(mddev->queue,
++ PAGE_CACHE_SIZE - 1);
++ }
+
+ p->head_position = 0;
+ rdev->raid_disk = mirror;
+@@ -2155,12 +2159,14 @@ static int run(mddev_t *mddev)
+ disk_stack_limits(mddev->gendisk, rdev->bdev,
+ rdev->data_offset << 9);
+ /* as we don't honour merge_bvec_fn, we must never risk
+- * violating it, so limit ->max_sector to one PAGE, as
+- * a one page request is never in violation.
++ * violating it, so limit max_phys_segments to 1 lying
++ * within a single page.
+ */
+- if (rdev->bdev->bd_disk->queue->merge_bvec_fn &&
+- queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9))
+- blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
++ if (rdev->bdev->bd_disk->queue->merge_bvec_fn) {
++ blk_queue_max_phys_segments(mddev->queue, 1);
++ blk_queue_segment_boundary(mddev->queue,
++ PAGE_CACHE_SIZE - 1);
++ }
+
+ disk->head_position = 0;
+ }
--- /dev/null
+From mathieu.desnoyers@efficios.com Wed Apr 21 15:42:43 2010
+From: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+Date: Tue, 20 Apr 2010 10:38:10 -0400
+Subject: module: fix __module_ref_addr()
+To: Greg KH <greg@kroah.com>
+Cc: Steven Rostedt <rostedt@goodmis.org>, Randy Dunlap <randy.dunlap@oracle.com>, Greg Kroah-Hartman <gregkh@suse.de>, Peter Zijlstra <a.p.zijlstra@chello.nl>, stable <stable@kernel.org>, Rusty Russell <rusty@rustcorp.com.au>, linux-kernel@vger.kernel.org, Eric Dumazet <dada1@cosmosbay.com>, Tejun Heo <tj@kernel.org>, Ingo Molnar <mingo@elte.hu>, Linus Torvalds <torvalds@linux-foundation.org>, Andrew Morton <akpm@linux-foundation.org>
+Message-ID: <20100420143810.GC14622@Krystal>
+Content-Disposition: inline
+
+From: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+
+The __module_ref_addr() problem disappears in 2.6.34-rc kernels because these
+percpu accesses were re-factored.
+
+__module_ref_addr() should use per_cpu_ptr() to obfuscate the pointer
+(RELOC_HIDE is needed for per cpu pointers).
+
+This non-standard per-cpu pointer use has been introduced by commit
+720eba31f47aeade8ec130ca7f4353223c49170f
+
+It causes a NULL pointer exception on some configurations when CONFIG_TRACING is
+enabled on 2.6.33. This patch fixes the problem (acknowledged by Randy who
+reported the bug).
+
+It did not appear to hurt previously because most of the accesses were done
+through local_inc, which probably obfuscated the access enough that no compiler
+optimizations were done. But with local_read() done when CONFIG_TRACING is
+active, this becomes a problem. Non-CONFIG_TRACING is probably affected as well
+(module.c contains local_set and local_read that use __module_ref_addr()), but I
+guess nobody noticed because we've been lucky enough that the compiler did not
+generate the inappropriate optimization pattern there.
+
+This patch should be queued for the 2.6.29.x through 2.6.33.x stable branches.
+(tested on 2.6.33.1 x86_64)
+
+Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+Tested-by: Randy Dunlap <randy.dunlap@oracle.com>
+CC: Eric Dumazet <dada1@cosmosbay.com>
+CC: Rusty Russell <rusty@rustcorp.com.au>
+CC: Peter Zijlstra <a.p.zijlstra@chello.nl>
+CC: Tejun Heo <tj@kernel.org>
+CC: Ingo Molnar <mingo@elte.hu>
+CC: Andrew Morton <akpm@linux-foundation.org>
+CC: Linus Torvalds <torvalds@linux-foundation.org>
+CC: Greg Kroah-Hartman <gregkh@suse.de>
+CC: Steven Rostedt <rostedt@goodmis.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ include/linux/module.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/include/linux/module.h
++++ b/include/linux/module.h
+@@ -455,7 +455,7 @@ void symbol_put_addr(void *addr);
+ static inline local_t *__module_ref_addr(struct module *mod, int cpu)
+ {
+ #ifdef CONFIG_SMP
+- return (local_t *) (mod->refptr + per_cpu_offset(cpu));
++ return (local_t *) per_cpu_ptr(mod->refptr, cpu);
+ #else
+ return &mod->ref;
+ #endif
--- /dev/null
+From mathieu.desnoyers@efficios.com Wed Apr 21 15:38:07 2010
+From: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+Date: Tue, 20 Apr 2010 10:34:57 -0400
+Subject: modules: fix incorrect percpu usage
+To: Greg KH <greg@kroah.com>
+Cc: Steven Rostedt <rostedt@goodmis.org>, Randy Dunlap <randy.dunlap@oracle.com>, Greg Kroah-Hartman <gregkh@suse.de>, Peter Zijlstra <a.p.zijlstra@chello.nl>, Rusty Russell <rusty@rustcorp.com.au>, linux-kernel@vger.kernel.org, Andrew Morton <akpm@linux-foundation.org>, Eric Dumazet <dada1@cosmosbay.com>, Tejun Heo <tj@kernel.org>, Ingo Molnar <mingo@elte.hu>, Linus Torvalds <torvalds@linux-foundation.org>, stable <stable@kernel.org>
+Message-ID: <20100420143457.GB14622@Krystal>
+Content-Disposition: inline
+
+From: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+
+Mainline does not need this fix, as commit
+259354deaaf03d49a02dbb9975d6ec2a54675672 fixed the problem by refactoring.
+
+Should use per_cpu_ptr() to obfuscate the per cpu pointers (RELOC_HIDE is needed
+for per cpu pointers).
+
+Introduced by commit:
+
+module.c: commit 6b588c18f8dacfa6d7957c33c5ff832096e752d3
+
+This patch should be queued for the stable branch, for kernels 2.6.29.x to
+2.6.33.x. (tested on 2.6.33.1 x86_64)
+
+Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+CC: Randy Dunlap <randy.dunlap@oracle.com>
+CC: Eric Dumazet <dada1@cosmosbay.com>
+CC: Rusty Russell <rusty@rustcorp.com.au>
+CC: Peter Zijlstra <a.p.zijlstra@chello.nl>
+CC: Tejun Heo <tj@kernel.org>
+CC: Ingo Molnar <mingo@elte.hu>
+CC: Andrew Morton <akpm@linux-foundation.org>
+CC: Linus Torvalds <torvalds@linux-foundation.org>
+CC: Greg Kroah-Hartman <gregkh@suse.de>
+CC: Steven Rostedt <rostedt@goodmis.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ kernel/module.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/kernel/module.c
++++ b/kernel/module.c
+@@ -555,7 +555,7 @@ static void percpu_modcopy(void *pcpudes
+ int cpu;
+
+ for_each_possible_cpu(cpu)
+- memcpy(pcpudest + per_cpu_offset(cpu), from, size);
++ memcpy(per_cpu_ptr(pcpudest, cpu), from, size);
+ }
+
+ #else /* ... !CONFIG_SMP */
--- /dev/null
+From 301e99ce4a2f42a317129230fd42e6cd874c64b0 Mon Sep 17 00:00:00 2001
+From: Neil Brown <neilb@suse.de>
+Date: Sun, 28 Feb 2010 22:01:05 -0500
+Subject: nfsd: ensure sockets are closed on error
+
+From: Neil Brown <neilb@suse.de>
+
+commit 301e99ce4a2f42a317129230fd42e6cd874c64b0 upstream.
+
+One the changes in commit d7979ae4a "svc: Move close processing to a
+single place" is:
+
+ err_delete:
+- svc_delete_socket(svsk);
++ set_bit(SK_CLOSE, &svsk->sk_flags);
+ return -EAGAIN;
+
+This is insufficient. The recvfrom methods must always call
+svc_xprt_received on completion so that the socket gets re-queued if
+there is any more work to do. This particular path did not make that
+call because it actually destroyed the svsk, making requeue pointless.
+When the svc_delete_socket was change to just set a bit, we should have
+added a call to svc_xprt_received,
+
+This is the problem that b0401d7253 attempted to fix, incorrectly.
+
+Signed-off-by: J. Bruce Fields <bfields@citi.umich.edu>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ net/sunrpc/svcsock.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/net/sunrpc/svcsock.c
++++ b/net/sunrpc/svcsock.c
+@@ -968,6 +968,7 @@ static int svc_tcp_recv_record(struct sv
+ return len;
+ err_delete:
+ set_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags);
++ svc_xprt_received(&svsk->sk_xprt);
+ err_again:
+ return -EAGAIN;
+ }
--- /dev/null
+From dc83d6e27fa80babe31c80aa8568f125f72edf57 Mon Sep 17 00:00:00 2001
+From: J. Bruce Fields <bfields@citi.umich.edu>
+Date: Tue, 20 Oct 2009 18:51:34 -0400
+Subject: nfsd4: don't try to map gid's in generic rpc code
+
+From: J. Bruce Fields <bfields@citi.umich.edu>
+
+commit dc83d6e27fa80babe31c80aa8568f125f72edf57 upstream.
+
+For nfsd we provide users the option of mapping uid's to server-side
+supplementary group lists. That makes sense for nfsd, but not
+necessarily for other rpc users (such as the callback client).
+
+So move that lookup to svcauth_unix_set_client, which is a
+program-specific method.
+
+Signed-off-by: J. Bruce Fields <bfields@citi.umich.edu>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ net/sunrpc/svcauth_unix.c | 53 ++++++++++++++++++++++++++--------------------
+ 1 file changed, 30 insertions(+), 23 deletions(-)
+
+--- a/net/sunrpc/svcauth_unix.c
++++ b/net/sunrpc/svcauth_unix.c
+@@ -655,23 +655,25 @@ static struct unix_gid *unix_gid_lookup(
+ return NULL;
+ }
+
+-static int unix_gid_find(uid_t uid, struct group_info **gip,
+- struct svc_rqst *rqstp)
++static struct group_info *unix_gid_find(uid_t uid, struct svc_rqst *rqstp)
+ {
+- struct unix_gid *ug = unix_gid_lookup(uid);
++ struct unix_gid *ug;
++ struct group_info *gi;
++ int ret;
++
++ ug = unix_gid_lookup(uid);
+ if (!ug)
+- return -EAGAIN;
+- switch (cache_check(&unix_gid_cache, &ug->h, &rqstp->rq_chandle)) {
++ return ERR_PTR(-EAGAIN);
++ ret = cache_check(&unix_gid_cache, &ug->h, &rqstp->rq_chandle);
++ switch (ret) {
+ case -ENOENT:
+- *gip = NULL;
+- return 0;
++ return ERR_PTR(-ENOENT);
+ case 0:
+- *gip = ug->gi;
+- get_group_info(*gip);
++ gi = get_group_info(ug->gi);
+ cache_put(&ug->h, &unix_gid_cache);
+- return 0;
++ return gi;
+ default:
+- return -EAGAIN;
++ return ERR_PTR(-EAGAIN);
+ }
+ }
+
+@@ -681,6 +683,8 @@ svcauth_unix_set_client(struct svc_rqst
+ struct sockaddr_in *sin;
+ struct sockaddr_in6 *sin6, sin6_storage;
+ struct ip_map *ipm;
++ struct group_info *gi;
++ struct svc_cred *cred = &rqstp->rq_cred;
+
+ switch (rqstp->rq_addr.ss_family) {
+ case AF_INET:
+@@ -722,6 +726,17 @@ svcauth_unix_set_client(struct svc_rqst
+ ip_map_cached_put(rqstp, ipm);
+ break;
+ }
++
++ gi = unix_gid_find(cred->cr_uid, rqstp);
++ switch (PTR_ERR(gi)) {
++ case -EAGAIN:
++ return SVC_DROP;
++ case -ENOENT:
++ break;
++ default:
++ put_group_info(cred->cr_group_info);
++ cred->cr_group_info = gi;
++ }
+ return SVC_OK;
+ }
+
+@@ -818,19 +833,11 @@ svcauth_unix_accept(struct svc_rqst *rqs
+ slen = svc_getnl(argv); /* gids length */
+ if (slen > 16 || (len -= (slen + 2)*4) < 0)
+ goto badcred;
+- if (unix_gid_find(cred->cr_uid, &cred->cr_group_info, rqstp)
+- == -EAGAIN)
++ cred->cr_group_info = groups_alloc(slen);
++ if (cred->cr_group_info == NULL)
+ return SVC_DROP;
+- if (cred->cr_group_info == NULL) {
+- cred->cr_group_info = groups_alloc(slen);
+- if (cred->cr_group_info == NULL)
+- return SVC_DROP;
+- for (i = 0; i < slen; i++)
+- GROUP_AT(cred->cr_group_info, i) = svc_getnl(argv);
+- } else {
+- for (i = 0; i < slen ; i++)
+- svc_getnl(argv);
+- }
++ for (i = 0; i < slen; i++)
++ GROUP_AT(cred->cr_group_info, i) = svc_getnl(argv);
+ if (svc_getu32(argv) != htonl(RPC_AUTH_NULL) || svc_getu32(argv) != 0) {
+ *authp = rpc_autherr_badverf;
+ return SVC_DENIED;
--- /dev/null
+From f5822754ea006563e1bf0a1f43faaad49c0d8bb2 Mon Sep 17 00:00:00 2001
+From: J. Bruce Fields <bfields@citi.umich.edu>
+Date: Sun, 28 Feb 2010 16:32:51 -0500
+Subject: Revert "sunrpc: fix peername failed on closed listener"
+
+From: J. Bruce Fields <bfields@citi.umich.edu>
+
+commit f5822754ea006563e1bf0a1f43faaad49c0d8bb2 upstream.
+
+This reverts commit b292cf9ce70d221c3f04ff62db5ab13d9a249ca8. The
+commit that it attempted to patch up,
+b0401d725334a94d57335790b8ac2404144748ee, was fundamentally wrong, and
+will also be reverted.
+
+Signed-off-by: J. Bruce Fields <bfields@citi.umich.edu>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ net/sunrpc/svc_xprt.c | 3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+--- a/net/sunrpc/svc_xprt.c
++++ b/net/sunrpc/svc_xprt.c
+@@ -711,8 +711,7 @@ int svc_recv(struct svc_rqst *rqstp, lon
+ spin_unlock_bh(&pool->sp_lock);
+
+ len = 0;
+- if (test_bit(XPT_LISTENER, &xprt->xpt_flags) &&
+- !test_bit(XPT_CLOSE, &xprt->xpt_flags)) {
++ if (test_bit(XPT_LISTENER, &xprt->xpt_flags)) {
+ struct svc_xprt *newxpt;
+ newxpt = xprt->xpt_ops->xpo_accept(xprt);
+ if (newxpt) {
--- /dev/null
+From 1b644b6e6f6160ae35ce4b52c2ca89ed3e356e18 Mon Sep 17 00:00:00 2001
+From: J. Bruce Fields <bfields@citi.umich.edu>
+Date: Sun, 28 Feb 2010 16:33:31 -0500
+Subject: Revert "sunrpc: move the close processing after do recvfrom method"
+
+From: J. Bruce Fields <bfields@citi.umich.edu>
+
+commit 1b644b6e6f6160ae35ce4b52c2ca89ed3e356e18 upstream.
+
+This reverts commit b0401d725334a94d57335790b8ac2404144748ee, which
+moved svc_delete_xprt() outside of XPT_BUSY, and allowed it to be called
+after svc_xpt_recived(), removing its last reference and destroying it
+after it had already been queued for future processing.
+
+Signed-off-by: J. Bruce Fields <bfields@citi.umich.edu>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ net/sunrpc/svc_xprt.c | 12 +++++-------
+ 1 file changed, 5 insertions(+), 7 deletions(-)
+
+--- a/net/sunrpc/svc_xprt.c
++++ b/net/sunrpc/svc_xprt.c
+@@ -711,7 +711,10 @@ int svc_recv(struct svc_rqst *rqstp, lon
+ spin_unlock_bh(&pool->sp_lock);
+
+ len = 0;
+- if (test_bit(XPT_LISTENER, &xprt->xpt_flags)) {
++ if (test_bit(XPT_CLOSE, &xprt->xpt_flags)) {
++ dprintk("svc_recv: found XPT_CLOSE\n");
++ svc_delete_xprt(xprt);
++ } else if (test_bit(XPT_LISTENER, &xprt->xpt_flags)) {
+ struct svc_xprt *newxpt;
+ newxpt = xprt->xpt_ops->xpo_accept(xprt);
+ if (newxpt) {
+@@ -737,7 +740,7 @@ int svc_recv(struct svc_rqst *rqstp, lon
+ svc_xprt_received(newxpt);
+ }
+ svc_xprt_received(xprt);
+- } else if (!test_bit(XPT_CLOSE, &xprt->xpt_flags)) {
++ } else {
+ dprintk("svc: server %p, pool %u, transport %p, inuse=%d\n",
+ rqstp, pool->sp_id, xprt,
+ atomic_read(&xprt->xpt_ref.refcount));
+@@ -750,11 +753,6 @@ int svc_recv(struct svc_rqst *rqstp, lon
+ dprintk("svc: got len=%d\n", len);
+ }
+
+- if (test_bit(XPT_CLOSE, &xprt->xpt_flags)) {
+- dprintk("svc_recv: found XPT_CLOSE\n");
+- svc_delete_xprt(xprt);
+- }
+-
+ /* No data, incomplete (TCP) read, or accept() */
+ if (len == 0 || len == -EAGAIN) {
+ rqstp->rq_res.len = 0;
perf_events-x86-implement-intel-westmere-nehalem-ex-support.patch
acpi-ec-allow-multibyte-access-to-ec.patch
md-raid5-allow-for-more-than-2-31-chunks.patch
+acpi-ec-limit-burst-to-64-bits.patch
+input-wacom-switch-mode-upon-system-resume.patch
+modules-fix-incorrect-percpu-usage.patch
+lockdep-fix-incorrect-percpu-usage.patch
+module-fix-__module_ref_addr.patch
+md-deal-with-merge_bvec_fn-in-component-devices-better.patch
+nfsd4-don-t-try-to-map-gid-s-in-generic-rpc-code.patch
+revert-sunrpc-fix-peername-failed-on-closed-listener.patch
+revert-sunrpc-move-the-close-processing-after-do-recvfrom-method.patch
+nfsd-ensure-sockets-are-closed-on-error.patch
+alsa-hda-set-front-mic-to-input-vref-50-for-lenovo-3000-y410.patch
+mac80211-fix-deferred-hardware-scan-requests.patch