--- /dev/null
+From de3584bd62d87b4c250129fbc46ca52c80330add Mon Sep 17 00:00:00 2001
+From: Johannes Berg <johannes.berg@intel.com>
+Date: Mon, 21 Nov 2011 10:44:00 +0100
+Subject: cfg80211: fix regulatory NULL dereference
+
+From: Johannes Berg <johannes.berg@intel.com>
+
+commit de3584bd62d87b4c250129fbc46ca52c80330add upstream.
+
+By the time userspace returns with a response to
+the regulatory domain request, the wiphy causing
+the request might have gone away. If this is so,
+reject the update but mark the request as having
+been processed anyway.
+
+Cc: Luis R. Rodriguez <lrodriguez@qca.qualcomm.com>
+Signed-off-by: Johannes Berg <johannes.berg@intel.com>
+Signed-off-by: John W. Linville <linville@tuxdriver.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ net/wireless/reg.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/net/wireless/reg.c
++++ b/net/wireless/reg.c
+@@ -2026,6 +2026,10 @@ static int __set_regdom(const struct iee
+ }
+
+ request_wiphy = wiphy_idx_to_wiphy(last_request->wiphy_idx);
++ if (!request_wiphy) {
++ reg_set_request_processed();
++ return -ENODEV;
++ }
+
+ if (!last_request->intersect) {
+ int r;
--- /dev/null
+From 884a45d964dd395eda945842afff5e16bcaedf56 Mon Sep 17 00:00:00 2001
+From: Michal Hocko <mhocko@suse.cz>
+Date: Tue, 22 Nov 2011 07:44:47 -0800
+Subject: cgroup_freezer: fix freezing groups with stopped tasks
+
+From: Michal Hocko <mhocko@suse.cz>
+
+commit 884a45d964dd395eda945842afff5e16bcaedf56 upstream.
+
+2d3cbf8b (cgroup_freezer: update_freezer_state() does incorrect state
+transitions) removed is_task_frozen_enough and replaced it with a simple
+frozen call. This, however, breaks freezing for a group with stopped tasks
+because those cannot be frozen and so the group remains in CGROUP_FREEZING
+state (update_if_frozen doesn't count stopped tasks) and never reaches
+CGROUP_FROZEN.
+
+Let's add is_task_frozen_enough back and use it at the original locations
+(update_if_frozen and try_to_freeze_cgroup). Semantically we consider
+stopped tasks as frozen enough so we should consider both cases when
+testing frozen tasks.
+
+Testcase:
+mkdir /dev/freezer
+mount -t cgroup -o freezer none /dev/freezer
+mkdir /dev/freezer/foo
+sleep 1h &
+pid=$!
+kill -STOP $pid
+echo $pid > /dev/freezer/foo/tasks
+echo FROZEN > /dev/freezer/foo/freezer.state
+while true
+do
+ cat /dev/freezer/foo/freezer.state
+ [ "`cat /dev/freezer/foo/freezer.state`" = "FROZEN" ] && break
+ sleep 1
+done
+echo OK
+
+Signed-off-by: Michal Hocko <mhocko@suse.cz>
+Acked-by: Li Zefan <lizf@cn.fujitsu.com>
+Cc: Tomasz Buchert <tomasz.buchert@inria.fr>
+Cc: Paul Menage <paul@paulmenage.org>
+Cc: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Tejun Heo <htejun@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ kernel/cgroup_freezer.c | 11 +++++++++--
+ 1 file changed, 9 insertions(+), 2 deletions(-)
+
+--- a/kernel/cgroup_freezer.c
++++ b/kernel/cgroup_freezer.c
+@@ -153,6 +153,13 @@ static void freezer_destroy(struct cgrou
+ kfree(cgroup_freezer(cgroup));
+ }
+
++/* task is frozen or will freeze immediately when next it gets woken */
++static bool is_task_frozen_enough(struct task_struct *task)
++{
++ return frozen(task) ||
++ (task_is_stopped_or_traced(task) && freezing(task));
++}
++
+ /*
+ * The call to cgroup_lock() in the freezer.state write method prevents
+ * a write to that file racing against an attach, and hence the
+@@ -231,7 +238,7 @@ static void update_if_frozen(struct cgro
+ cgroup_iter_start(cgroup, &it);
+ while ((task = cgroup_iter_next(cgroup, &it))) {
+ ntotal++;
+- if (frozen(task))
++ if (is_task_frozen_enough(task))
+ nfrozen++;
+ }
+
+@@ -284,7 +291,7 @@ static int try_to_freeze_cgroup(struct c
+ while ((task = cgroup_iter_next(cgroup, &it))) {
+ if (!freeze_task(task, true))
+ continue;
+- if (frozen(task))
++ if (is_task_frozen_enough(task))
+ continue;
+ if (!freezing(task) && !freezer_should_skip(task))
+ num_cant_freeze_now++;
--- /dev/null
+From d65670a78cdbfae94f20a9e05ec705871d7cdf2b Mon Sep 17 00:00:00 2001
+From: John Stultz <john.stultz@linaro.org>
+Date: Mon, 31 Oct 2011 17:06:35 -0400
+Subject: clocksource: Avoid selecting mult values that might overflow when adjusted
+
+From: John Stultz <john.stultz@linaro.org>
+
+commit d65670a78cdbfae94f20a9e05ec705871d7cdf2b upstream.
+
+For some frequencies, the clocks_calc_mult_shift() function will
+unfortunately select mult values very close to 0xffffffff. This
+has the potential to overflow when NTP adjusts the clock, adding
+to the mult value.
+
+This patch adds a clocksource.maxadj value, which provides
+an approximation of an 11% adjustment(NTP limits adjustments to
+500ppm and the tick adjustment is limited to 10%), which could
+be made to the clocksource.mult value. This is then used to both
+check that the current mult value won't overflow/underflow, as
+well as warning us if the timekeeping_adjust() code pushes over
+that 11% boundary.
+
+v2: Fix max_adjustment calculation, and improve WARN_ONCE
+messages.
+
+v3: Don't warn before maxadj has actually been set
+
+CC: Yong Zhang <yong.zhang0@gmail.com>
+CC: David Daney <ddaney.cavm@gmail.com>
+CC: Thomas Gleixner <tglx@linutronix.de>
+CC: Chen Jie <chenj@lemote.com>
+CC: zhangfx <zhangfx@lemote.com>
+Reported-by: Chen Jie <chenj@lemote.com>
+Reported-by: zhangfx <zhangfx@lemote.com>
+Tested-by: Yong Zhang <yong.zhang0@gmail.com>
+Signed-off-by: John Stultz <john.stultz@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ include/linux/clocksource.h | 3 +-
+ kernel/time/clocksource.c | 58 ++++++++++++++++++++++++++++++++++++--------
+ kernel/time/timekeeping.c | 7 +++++
+ 3 files changed, 57 insertions(+), 11 deletions(-)
+
+--- a/include/linux/clocksource.h
++++ b/include/linux/clocksource.h
+@@ -156,6 +156,7 @@ extern u64 timecounter_cyc2time(struct t
+ * @mult: cycle to nanosecond multiplier
+ * @shift: cycle to nanosecond divisor (power of two)
+ * @max_idle_ns: max idle time permitted by the clocksource (nsecs)
++ * @maxadj maximum adjustment value to mult (~11%)
+ * @flags: flags describing special properties
+ * @archdata: arch-specific data
+ * @suspend: suspend function for the clocksource, if necessary
+@@ -172,7 +173,7 @@ struct clocksource {
+ u32 mult;
+ u32 shift;
+ u64 max_idle_ns;
+-
++ u32 maxadj;
+ #ifdef CONFIG_ARCH_CLOCKSOURCE_DATA
+ struct arch_clocksource_data archdata;
+ #endif
+--- a/kernel/time/clocksource.c
++++ b/kernel/time/clocksource.c
+@@ -494,6 +494,22 @@ void clocksource_touch_watchdog(void)
+ }
+
+ /**
++ * clocksource_max_adjustment- Returns max adjustment amount
++ * @cs: Pointer to clocksource
++ *
++ */
++static u32 clocksource_max_adjustment(struct clocksource *cs)
++{
++ u64 ret;
++ /*
++ * We won't try to correct for more then 11% adjustments (110,000 ppm),
++ */
++ ret = (u64)cs->mult * 11;
++ do_div(ret,100);
++ return (u32)ret;
++}
++
++/**
+ * clocksource_max_deferment - Returns max time the clocksource can be deferred
+ * @cs: Pointer to clocksource
+ *
+@@ -505,25 +521,28 @@ static u64 clocksource_max_deferment(str
+ /*
+ * Calculate the maximum number of cycles that we can pass to the
+ * cyc2ns function without overflowing a 64-bit signed result. The
+- * maximum number of cycles is equal to ULLONG_MAX/cs->mult which
+- * is equivalent to the below.
+- * max_cycles < (2^63)/cs->mult
+- * max_cycles < 2^(log2((2^63)/cs->mult))
+- * max_cycles < 2^(log2(2^63) - log2(cs->mult))
+- * max_cycles < 2^(63 - log2(cs->mult))
+- * max_cycles < 1 << (63 - log2(cs->mult))
++ * maximum number of cycles is equal to ULLONG_MAX/(cs->mult+cs->maxadj)
++ * which is equivalent to the below.
++ * max_cycles < (2^63)/(cs->mult + cs->maxadj)
++ * max_cycles < 2^(log2((2^63)/(cs->mult + cs->maxadj)))
++ * max_cycles < 2^(log2(2^63) - log2(cs->mult + cs->maxadj))
++ * max_cycles < 2^(63 - log2(cs->mult + cs->maxadj))
++ * max_cycles < 1 << (63 - log2(cs->mult + cs->maxadj))
+ * Please note that we add 1 to the result of the log2 to account for
+ * any rounding errors, ensure the above inequality is satisfied and
+ * no overflow will occur.
+ */
+- max_cycles = 1ULL << (63 - (ilog2(cs->mult) + 1));
++ max_cycles = 1ULL << (63 - (ilog2(cs->mult + cs->maxadj) + 1));
+
+ /*
+ * The actual maximum number of cycles we can defer the clocksource is
+ * determined by the minimum of max_cycles and cs->mask.
++ * Note: Here we subtract the maxadj to make sure we don't sleep for
++ * too long if there's a large negative adjustment.
+ */
+ max_cycles = min_t(u64, max_cycles, (u64) cs->mask);
+- max_nsecs = clocksource_cyc2ns(max_cycles, cs->mult, cs->shift);
++ max_nsecs = clocksource_cyc2ns(max_cycles, cs->mult - cs->maxadj,
++ cs->shift);
+
+ /*
+ * To ensure that the clocksource does not wrap whilst we are idle,
+@@ -642,7 +661,6 @@ static void clocksource_enqueue(struct c
+ void __clocksource_updatefreq_scale(struct clocksource *cs, u32 scale, u32 freq)
+ {
+ u64 sec;
+-
+ /*
+ * Calc the maximum number of seconds which we can run before
+ * wrapping around. For clocksources which have a mask > 32bit
+@@ -663,6 +681,20 @@ void __clocksource_updatefreq_scale(stru
+
+ clocks_calc_mult_shift(&cs->mult, &cs->shift, freq,
+ NSEC_PER_SEC / scale, sec * scale);
++
++ /*
++ * for clocksources that have large mults, to avoid overflow.
++ * Since mult may be adjusted by ntp, add an safety extra margin
++ *
++ */
++ cs->maxadj = clocksource_max_adjustment(cs);
++ while ((cs->mult + cs->maxadj < cs->mult)
++ || (cs->mult - cs->maxadj > cs->mult)) {
++ cs->mult >>= 1;
++ cs->shift--;
++ cs->maxadj = clocksource_max_adjustment(cs);
++ }
++
+ cs->max_idle_ns = clocksource_max_deferment(cs);
+ }
+ EXPORT_SYMBOL_GPL(__clocksource_updatefreq_scale);
+@@ -703,6 +735,12 @@ EXPORT_SYMBOL_GPL(__clocksource_register
+ */
+ int clocksource_register(struct clocksource *cs)
+ {
++ /* calculate max adjustment for given mult/shift */
++ cs->maxadj = clocksource_max_adjustment(cs);
++ WARN_ONCE(cs->mult + cs->maxadj < cs->mult,
++ "Clocksource %s might overflow on 11%% adjustment\n",
++ cs->name);
++
+ /* calculate max idle time permitted for this clocksource */
+ cs->max_idle_ns = clocksource_max_deferment(cs);
+
+--- a/kernel/time/timekeeping.c
++++ b/kernel/time/timekeeping.c
+@@ -824,6 +824,13 @@ static void timekeeping_adjust(s64 offse
+ } else
+ return;
+
++ WARN_ONCE(timekeeper.clock->maxadj &&
++ (timekeeper.mult + adj > timekeeper.clock->mult +
++ timekeeper.clock->maxadj),
++ "Adjusting %s more then 11%% (%ld vs %ld)\n",
++ timekeeper.clock->name, (long)timekeeper.mult + adj,
++ (long)timekeeper.clock->mult +
++ timekeeper.clock->maxadj);
+ timekeeper.mult += adj;
+ timekeeper.xtime_interval += interval;
+ timekeeper.xtime_nsec -= offset;
--- /dev/null
+From 4c81f045c0bd2cbb78cc6446a4cd98038fe11a2e Mon Sep 17 00:00:00 2001
+From: Tejun Heo <tj@kernel.org>
+Date: Thu, 24 Nov 2011 19:22:24 -0500
+Subject: ext4: fix racy use-after-free in ext4_end_io_dio()
+
+From: Tejun Heo <tj@kernel.org>
+
+commit 4c81f045c0bd2cbb78cc6446a4cd98038fe11a2e upstream.
+
+ext4_end_io_dio() queues io_end->work and then clears iocb->private;
+however, io_end->work calls aio_complete() which frees the iocb
+object. If that slab object gets reallocated, then ext4_end_io_dio()
+can end up clearing someone else's iocb->private, this use-after-free
+can cause a leak of a struct ext4_io_end_t structure.
+
+Detected and tested with slab poisoning.
+
+[ Note: Can also reproduce using 12 fio's against 12 file systems with the
+ following configuration file:
+
+ [global]
+ direct=1
+ ioengine=libaio
+ iodepth=1
+ bs=4k
+ ba=4k
+ size=128m
+
+ [create]
+ filename=${TESTDIR}
+ rw=write
+
+ -- tytso ]
+
+Google-Bug-Id: 5354697
+Signed-off-by: Tejun Heo <tj@kernel.org>
+Signed-off-by: "Theodore Ts'o" <tytso@mit.edu>
+Reported-by: Kent Overstreet <koverstreet@google.com>
+Tested-by: Kent Overstreet <koverstreet@google.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ fs/ext4/inode.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/fs/ext4/inode.c
++++ b/fs/ext4/inode.c
+@@ -2656,8 +2656,8 @@ out:
+ spin_unlock_irqrestore(&ei->i_completed_io_lock, flags);
+
+ /* queue the work to convert unwritten extents to written */
+- queue_work(wq, &io_end->work);
+ iocb->private = NULL;
++ queue_work(wq, &io_end->work);
+
+ /* XXX: probably should move into the real I/O completion handler */
+ inode_dio_done(inode);
--- /dev/null
+From 52553ddffad76ccf192d4dd9ce88d5818f57f62a Mon Sep 17 00:00:00 2001
+From: Edward Donovan <edward.donovan@numble.net>
+Date: Sun, 27 Nov 2011 23:07:34 -0500
+Subject: genirq: fix regression in irqfixup, irqpoll
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Edward Donovan <edward.donovan@numble.net>
+
+commit 52553ddffad76ccf192d4dd9ce88d5818f57f62a upstream.
+
+Commit fa27271bc8d2("genirq: Fixup poll handling") introduced a
+regression that broke irqfixup/irqpoll for some hardware configurations.
+
+Amidst reorganizing 'try_one_irq', that patch removed a test that
+checked for 'action->handler' returning IRQ_HANDLED, before acting on
+the interrupt. Restoring this test back returns the functionality lost
+since 2.6.39. In the current set of tests, after 'action' is set, it
+must precede '!action->next' to take effect.
+
+With this and my previous patch to irq/spurious.c, c75d720fca8a, all
+IRQ regressions that I have encountered are fixed.
+
+Signed-off-by: Edward Donovan <edward.donovan@numble.net>
+Reported-and-tested-by: Rogério Brito <rbrito@ime.usp.br>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ kernel/irq/spurious.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+--- a/kernel/irq/spurious.c
++++ b/kernel/irq/spurious.c
+@@ -84,7 +84,9 @@ static int try_one_irq(int irq, struct i
+ */
+ action = desc->action;
+ if (!action || !(action->flags & IRQF_SHARED) ||
+- (action->flags & __IRQF_TIMER) || !action->next)
++ (action->flags & __IRQF_TIMER) ||
++ (action->handler(irq, action->dev_id) == IRQ_HANDLED) ||
++ !action->next)
+ goto out;
+
+ /* Already running on another processor */
--- /dev/null
+From 27c9cd7e601632b3794e1c3344d37b86917ffb43 Mon Sep 17 00:00:00 2001
+From: Jeff Ohlstein <johlstei@codeaurora.org>
+Date: Fri, 18 Nov 2011 15:47:10 -0800
+Subject: hrtimer: Fix extra wakeups from __remove_hrtimer()
+
+From: Jeff Ohlstein <johlstei@codeaurora.org>
+
+commit 27c9cd7e601632b3794e1c3344d37b86917ffb43 upstream.
+
+__remove_hrtimer() attempts to reprogram the clockevent device when
+the timer being removed is the next to expire. However,
+__remove_hrtimer() reprograms the clockevent *before* removing the
+timer from the timerqueue and thus when hrtimer_force_reprogram()
+finds the next timer to expire it finds the timer we're trying to
+remove.
+
+This is especially noticeable when the system switches to NOHz mode
+and the system tick is removed. The timer tick is removed from the
+system but the clockevent is programmed to wakeup in another HZ
+anyway.
+
+Silence the extra wakeup by removing the timer from the timerqueue
+before calling hrtimer_force_reprogram() so that we actually program
+the clockevent for the next timer to expire.
+
+This was broken by 998adc3 "hrtimers: Convert hrtimers to use
+timerlist infrastructure".
+
+Signed-off-by: Jeff Ohlstein <johlstei@codeaurora.org>
+Link: http://lkml.kernel.org/r/1321660030-8520-1-git-send-email-johlstei@codeaurora.org
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ kernel/hrtimer.c | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+--- a/kernel/hrtimer.c
++++ b/kernel/hrtimer.c
+@@ -885,10 +885,13 @@ static void __remove_hrtimer(struct hrti
+ struct hrtimer_clock_base *base,
+ unsigned long newstate, int reprogram)
+ {
++ struct timerqueue_node *next_timer;
+ if (!(timer->state & HRTIMER_STATE_ENQUEUED))
+ goto out;
+
+- if (&timer->node == timerqueue_getnext(&base->active)) {
++ next_timer = timerqueue_getnext(&base->active);
++ timerqueue_del(&base->active, &timer->node);
++ if (&timer->node == next_timer) {
+ #ifdef CONFIG_HIGH_RES_TIMERS
+ /* Reprogram the clock event device. if enabled */
+ if (reprogram && hrtimer_hres_active()) {
+@@ -901,7 +904,6 @@ static void __remove_hrtimer(struct hrti
+ }
+ #endif
+ }
+- timerqueue_del(&base->active, &timer->node);
+ if (!timerqueue_getnext(&base->active))
+ base->cpu_base->active_bases &= ~(1 << base->index);
+ out:
--- /dev/null
+From ea4039a34c4c206d015d34a49d0b00868e37db1d Mon Sep 17 00:00:00 2001
+From: Hillf Danton <dhillf@gmail.com>
+Date: Tue, 15 Nov 2011 14:36:12 -0800
+Subject: hugetlb: release pages in the error path of hugetlb_cow()
+
+From: Hillf Danton <dhillf@gmail.com>
+
+commit ea4039a34c4c206d015d34a49d0b00868e37db1d upstream.
+
+If we fail to prepare an anon_vma, the {new, old}_page should be released,
+or they will leak.
+
+Signed-off-by: Hillf Danton <dhillf@gmail.com>
+Reviewed-by: Andrea Arcangeli <aarcange@redhat.com>
+Cc: Hugh Dickins <hughd@google.com>
+Cc: Johannes Weiner <jweiner@redhat.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Michal Hocko <mhocko@suse.cz>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ mm/hugetlb.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/mm/hugetlb.c
++++ b/mm/hugetlb.c
+@@ -2422,6 +2422,8 @@ retry_avoidcopy:
+ * anon_vma prepared.
+ */
+ if (unlikely(anon_vma_prepare(vma))) {
++ page_cache_release(new_page);
++ page_cache_release(old_page);
+ /* Caller expects lock to be held */
+ spin_lock(&mm->page_table_lock);
+ return VM_FAULT_OOM;
--- /dev/null
+From 24f50a9d165745fd0701c6e089d35f58a229ea69 Mon Sep 17 00:00:00 2001
+From: Johannes Berg <johannes.berg@intel.com>
+Date: Thu, 24 Nov 2011 20:06:14 +0100
+Subject: mac80211: don't stop a single aggregation session twice
+
+From: Johannes Berg <johannes.berg@intel.com>
+
+commit 24f50a9d165745fd0701c6e089d35f58a229ea69 upstream.
+
+Nikolay noticed (by code review) that mac80211 can
+attempt to stop an aggregation session while it is
+already being stopped. So to fix it, check whether
+stop is already being done and bail out if so.
+
+Also move setting the STOPPING state into the lock
+so things are properly atomic.
+
+Reported-by: Nikolay Martynov <mar.kolya@gmail.com>
+Signed-off-by: Johannes Berg <johannes.berg@intel.com>
+Signed-off-by: John W. Linville <linville@tuxdriver.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ net/mac80211/agg-tx.c | 10 ++++++++--
+ 1 file changed, 8 insertions(+), 2 deletions(-)
+
+--- a/net/mac80211/agg-tx.c
++++ b/net/mac80211/agg-tx.c
+@@ -162,6 +162,12 @@ int ___ieee80211_stop_tx_ba_session(stru
+ return -ENOENT;
+ }
+
++ /* if we're already stopping ignore any new requests to stop */
++ if (test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) {
++ spin_unlock_bh(&sta->lock);
++ return -EALREADY;
++ }
++
+ if (test_bit(HT_AGG_STATE_WANT_START, &tid_tx->state)) {
+ /* not even started yet! */
+ ieee80211_assign_tid_tx(sta, tid, NULL);
+@@ -170,6 +176,8 @@ int ___ieee80211_stop_tx_ba_session(stru
+ return 0;
+ }
+
++ set_bit(HT_AGG_STATE_STOPPING, &tid_tx->state);
++
+ spin_unlock_bh(&sta->lock);
+
+ #ifdef CONFIG_MAC80211_HT_DEBUG
+@@ -177,8 +185,6 @@ int ___ieee80211_stop_tx_ba_session(stru
+ sta->sta.addr, tid);
+ #endif /* CONFIG_MAC80211_HT_DEBUG */
+
+- set_bit(HT_AGG_STATE_STOPPING, &tid_tx->state);
+-
+ del_timer_sync(&tid_tx->addba_resp_timer);
+
+ /*
--- /dev/null
+From 2a1e0fd175dcfd72096ba9291d31e3b1b5342e60 Mon Sep 17 00:00:00 2001
+From: Emmanuel Grumbach <emmanuel.grumbach@intel.com>
+Date: Sun, 27 Nov 2011 15:29:44 +0200
+Subject: mac80211: fix race between the AGG SM and the Tx data path
+
+From: Emmanuel Grumbach <emmanuel.grumbach@intel.com>
+
+commit 2a1e0fd175dcfd72096ba9291d31e3b1b5342e60 upstream.
+
+When a packet is supposed to sent be as an a-MPDU, mac80211 sets
+IEEE80211_TX_CTL_AMPDU to let the driver know. On the other
+hand, mac80211 configures the driver for aggregration with the
+ampdu_action callback.
+There is race between these two mechanisms since the following
+scenario can occur when the BA agreement is torn down:
+
+Tx softIRQ drv configuration
+========== =================
+
+check OPERATIONAL bit
+Set the TX_CTL_AMPDU bit in the packet
+
+ clear OPERATIONAL bit
+ stop Tx AGG
+Pass Tx packet to the driver.
+
+In that case the driver would get a packet with TX_CTL_AMPDU set
+although it has already been notified that the BA session has been
+torn down.
+
+To fix this, we need to synchronize all the Qdisc activity after we
+cleared the OPERATIONAL bit. After that step, all the following
+packets will be buffered until the driver reports it is ready to get
+new packets for this RA / TID. This buffering allows not to run into
+another race that would send packets with TX_CTL_AMPDU unset while
+the driver hasn't been requested to tear down the BA session yet.
+
+This race occurs in practice and iwlwifi complains with a WARN_ON
+when it happens.
+
+Signed-off-by: Emmanuel Grumbach <emmanuel.grumbach@intel.com>
+Reviewed-by: Johannes Berg <johannes@sipsolutions.net>
+Signed-off-by: John W. Linville <linville@tuxdriver.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ net/mac80211/agg-tx.c | 14 ++++++++++++++
+ 1 file changed, 14 insertions(+)
+
+--- a/net/mac80211/agg-tx.c
++++ b/net/mac80211/agg-tx.c
+@@ -194,6 +194,20 @@ int ___ieee80211_stop_tx_ba_session(stru
+ */
+ clear_bit(HT_AGG_STATE_OPERATIONAL, &tid_tx->state);
+
++ /*
++ * There might be a few packets being processed right now (on
++ * another CPU) that have already gotten past the aggregation
++ * check when it was still OPERATIONAL and consequently have
++ * IEEE80211_TX_CTL_AMPDU set. In that case, this code might
++ * call into the driver at the same time or even before the
++ * TX paths calls into it, which could confuse the driver.
++ *
++ * Wait for all currently running TX paths to finish before
++ * telling the driver. New packets will not go through since
++ * the aggregation session is no longer OPERATIONAL.
++ */
++ synchronize_net();
++
+ tid_tx->stop_initiator = initiator;
+ tid_tx->tx_stop = tx;
+
--- /dev/null
+From e007b857e88097c96c45620bf3b04a4e309053d1 Mon Sep 17 00:00:00 2001
+From: Eliad Peller <eliad@wizery.com>
+Date: Thu, 24 Nov 2011 18:13:56 +0200
+Subject: nl80211: fix MAC address validation
+
+From: Eliad Peller <eliad@wizery.com>
+
+commit e007b857e88097c96c45620bf3b04a4e309053d1 upstream.
+
+MAC addresses have a fixed length. The current
+policy allows passing < ETH_ALEN bytes, which
+might result in reading beyond the buffer.
+
+Signed-off-by: Eliad Peller <eliad@wizery.com>
+Signed-off-by: John W. Linville <linville@tuxdriver.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ net/wireless/nl80211.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/net/wireless/nl80211.c
++++ b/net/wireless/nl80211.c
+@@ -83,8 +83,8 @@ static const struct nla_policy nl80211_p
+ [NL80211_ATTR_IFINDEX] = { .type = NLA_U32 },
+ [NL80211_ATTR_IFNAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ-1 },
+
+- [NL80211_ATTR_MAC] = { .type = NLA_BINARY, .len = ETH_ALEN },
+- [NL80211_ATTR_PREV_BSSID] = { .type = NLA_BINARY, .len = ETH_ALEN },
++ [NL80211_ATTR_MAC] = { .len = ETH_ALEN },
++ [NL80211_ATTR_PREV_BSSID] = { .len = ETH_ALEN },
+
+ [NL80211_ATTR_KEY] = { .type = NLA_NESTED, },
+ [NL80211_ATTR_KEY_DATA] = { .type = NLA_BINARY,
--- /dev/null
+From 32d3a3922d617a5a685a5e2d24b20d0e88f192a9 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Michael=20B=C3=BCsch?= <m@bues.ch>
+Date: Wed, 16 Nov 2011 23:48:31 +0100
+Subject: p54spi: Add missing spin_lock_init
+
+From: =?UTF-8?q?Michael=20B=C3=BCsch?= <m@bues.ch>
+
+commit 32d3a3922d617a5a685a5e2d24b20d0e88f192a9 upstream.
+
+The tx_lock is not initialized properly. Add spin_lock_init().
+
+Signed-off-by: Michael Buesch <m@bues.ch>
+Acked-by: Christian Lamparter <chunkeey@googlemail.com>
+Signed-off-by: John W. Linville <linville@tuxdriver.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/net/wireless/p54/p54spi.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/net/wireless/p54/p54spi.c
++++ b/drivers/net/wireless/p54/p54spi.c
+@@ -657,6 +657,7 @@ static int __devinit p54spi_probe(struct
+ init_completion(&priv->fw_comp);
+ INIT_LIST_HEAD(&priv->tx_pending);
+ mutex_init(&priv->mutex);
++ spin_lock_init(&priv->tx_lock);
+ SET_IEEE80211_DEV(hw, &spi->dev);
+ priv->common.open = p54spi_op_start;
+ priv->common.stop = p54spi_op_stop;
--- /dev/null
+From 2d1618170eb493d18f66f2ac03775409a6fb97c6 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Michael=20B=C3=BCsch?= <m@bues.ch>
+Date: Wed, 16 Nov 2011 23:55:46 +0100
+Subject: p54spi: Fix workqueue deadlock
+
+From: =?UTF-8?q?Michael=20B=C3=BCsch?= <m@bues.ch>
+
+commit 2d1618170eb493d18f66f2ac03775409a6fb97c6 upstream.
+
+priv->work must not be synced while priv->mutex is locked, because
+the mutex is taken in the work handler.
+Move cancel_work_sync down to after the device shutdown code.
+This is safe, because the work handler checks fw_state and bails out
+early in case of a race.
+
+Signed-off-by: Michael Buesch <m@bues.ch>
+Acked-by: Christian Lamparter <chunkeey@googlemail.com>
+Signed-off-by: John W. Linville <linville@tuxdriver.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/net/wireless/p54/p54spi.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/net/wireless/p54/p54spi.c
++++ b/drivers/net/wireless/p54/p54spi.c
+@@ -589,8 +589,6 @@ static void p54spi_op_stop(struct ieee80
+
+ WARN_ON(priv->fw_state != FW_STATE_READY);
+
+- cancel_work_sync(&priv->work);
+-
+ p54spi_power_off(priv);
+ spin_lock_irqsave(&priv->tx_lock, flags);
+ INIT_LIST_HEAD(&priv->tx_pending);
+@@ -598,6 +596,8 @@ static void p54spi_op_stop(struct ieee80
+
+ priv->fw_state = FW_STATE_OFF;
+ mutex_unlock(&priv->mutex);
++
++ cancel_work_sync(&priv->work);
+ }
+
+ static int __devinit p54spi_probe(struct spi_device *spi)
--- /dev/null
+From d4d6373c1109b11c8118340be97ae31b8f94d66a Mon Sep 17 00:00:00 2001
+From: Axel Lin <axel.lin@gmail.com>
+Date: Mon, 28 Nov 2011 14:06:31 +0800
+Subject: regulator: aat2870: Fix the logic of checking if no id is matched in aat2870_get_regulator
+
+From: Axel Lin <axel.lin@gmail.com>
+
+commit d4d6373c1109b11c8118340be97ae31b8f94d66a upstream.
+
+In current implementation, the pointer ri is not NULL if no id is matched.
+Fix it by checking i == ARRAY_SIZE(aat2870_regulators) if no id is matched.
+
+Signed-off-by: Axel Lin <axel.lin@gmail.com>
+Signed-off-by: Mark Brown <broonie@opensource.wolfsonmicro.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/regulator/aat2870-regulator.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/regulator/aat2870-regulator.c
++++ b/drivers/regulator/aat2870-regulator.c
+@@ -159,7 +159,7 @@ static struct aat2870_regulator *aat2870
+ break;
+ }
+
+- if (!ri)
++ if (i == ARRAY_SIZE(aat2870_regulators))
+ return NULL;
+
+ ri->enable_addr = AAT2870_LDO_EN;
--- /dev/null
+From ba305e31e88ea5c2f598ff9fbc5424711a429e30 Mon Sep 17 00:00:00 2001
+From: Tero Kristo <t-kristo@ti.com>
+Date: Mon, 28 Nov 2011 16:53:19 +0200
+Subject: regulator: twl: fix twl4030 support for smps regulators
+
+From: Tero Kristo <t-kristo@ti.com>
+
+commit ba305e31e88ea5c2f598ff9fbc5424711a429e30 upstream.
+
+SMPS regulator voltage control differs from the one of the LDO ones.
+Current TWL code was using LDO regulator ops for controlling the SMPS
+regulators, which fails. This was fixed fixed by adding separate
+regulator type which uses correct logic and calculations for the
+voltage levels.
+
+Signed-off-by: Tero Kristo <t-kristo@ti.com>
+Signed-off-by: Mark Brown <broonie@opensource.wolfsonmicro.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/regulator/twl-regulator.c | 46 ++++++++++++++++++++++++++++++++++++--
+ 1 file changed, 44 insertions(+), 2 deletions(-)
+
+--- a/drivers/regulator/twl-regulator.c
++++ b/drivers/regulator/twl-regulator.c
+@@ -71,6 +71,7 @@ struct twlreg_info {
+ #define VREG_TYPE 1
+ #define VREG_REMAP 2
+ #define VREG_DEDICATED 3 /* LDO control */
++#define VREG_VOLTAGE_SMPS_4030 9
+ /* TWL6030 register offsets */
+ #define VREG_TRANS 1
+ #define VREG_STATE 2
+@@ -514,6 +515,32 @@ static struct regulator_ops twl4030ldo_o
+ .get_status = twl4030reg_get_status,
+ };
+
++static int
++twl4030smps_set_voltage(struct regulator_dev *rdev, int min_uV, int max_uV,
++ unsigned *selector)
++{
++ struct twlreg_info *info = rdev_get_drvdata(rdev);
++ int vsel = DIV_ROUND_UP(min_uV - 600000, 12500);
++
++ twlreg_write(info, TWL_MODULE_PM_RECEIVER, VREG_VOLTAGE_SMPS_4030,
++ vsel);
++ return 0;
++}
++
++static int twl4030smps_get_voltage(struct regulator_dev *rdev)
++{
++ struct twlreg_info *info = rdev_get_drvdata(rdev);
++ int vsel = twlreg_read(info, TWL_MODULE_PM_RECEIVER,
++ VREG_VOLTAGE_SMPS_4030);
++
++ return vsel * 12500 + 600000;
++}
++
++static struct regulator_ops twl4030smps_ops = {
++ .set_voltage = twl4030smps_set_voltage,
++ .get_voltage = twl4030smps_get_voltage,
++};
++
+ static int twl6030ldo_list_voltage(struct regulator_dev *rdev, unsigned index)
+ {
+ struct twlreg_info *info = rdev_get_drvdata(rdev);
+@@ -856,6 +883,21 @@ static struct regulator_ops twlsmps_ops
+ }, \
+ }
+
++#define TWL4030_ADJUSTABLE_SMPS(label, offset, num, turnon_delay, remap_conf) \
++ { \
++ .base = offset, \
++ .id = num, \
++ .delay = turnon_delay, \
++ .remap = remap_conf, \
++ .desc = { \
++ .name = #label, \
++ .id = TWL4030_REG_##label, \
++ .ops = &twl4030smps_ops, \
++ .type = REGULATOR_VOLTAGE, \
++ .owner = THIS_MODULE, \
++ }, \
++ }
++
+ #define TWL6030_ADJUSTABLE_LDO(label, offset, min_mVolts, max_mVolts) { \
+ .base = offset, \
+ .min_mV = min_mVolts, \
+@@ -947,8 +989,8 @@ static struct twlreg_info twl_regs[] = {
+ TWL4030_ADJUSTABLE_LDO(VINTANA2, 0x43, 12, 100, 0x08),
+ TWL4030_FIXED_LDO(VINTDIG, 0x47, 1500, 13, 100, 0x08),
+ TWL4030_ADJUSTABLE_LDO(VIO, 0x4b, 14, 1000, 0x08),
+- TWL4030_ADJUSTABLE_LDO(VDD1, 0x55, 15, 1000, 0x08),
+- TWL4030_ADJUSTABLE_LDO(VDD2, 0x63, 16, 1000, 0x08),
++ TWL4030_ADJUSTABLE_SMPS(VDD1, 0x55, 15, 1000, 0x08),
++ TWL4030_ADJUSTABLE_SMPS(VDD2, 0x63, 16, 1000, 0x08),
+ TWL4030_FIXED_LDO(VUSB1V5, 0x71, 1500, 17, 100, 0x08),
+ TWL4030_FIXED_LDO(VUSB1V8, 0x74, 1800, 18, 100, 0x08),
+ TWL4030_FIXED_LDO(VUSB3V1, 0x77, 3100, 19, 150, 0x08),
--- /dev/null
+From 4ba7d9997869d25bd223dea7536fc1ce9fab3b3b Mon Sep 17 00:00:00 2001
+From: Stanislaw Gruszka <sgruszka@redhat.com>
+Date: Wed, 16 Nov 2011 11:09:17 +0100
+Subject: rt2800pci: handle spurious interrupts
+
+From: Stanislaw Gruszka <sgruszka@redhat.com>
+
+commit 4ba7d9997869d25bd223dea7536fc1ce9fab3b3b upstream.
+
+Some devices may generate spurious interrupts, we have to handle them
+otherwise interrupt line will be disabled with below message and driver
+will not work:
+
+[ 2052.114334] irq 17: nobody cared (try booting with the "irqpoll" option)
+[ 2052.114339] Pid: 0, comm: swapper Tainted: P 2.6.35.6-48.fc14.x86_64 #1
+[ 2052.114341] Call Trace:
+[ 2052.114342] <IRQ> [<ffffffff810a6e2b>] __report_bad_irq.clone.1+0x3d/0x8b
+[ 2052.114349] [<ffffffff810a6f93>] note_interrupt+0x11a/0x17f
+[ 2052.114352] [<ffffffff810a7a73>] handle_fasteoi_irq+0xa8/0xce
+[ 2052.114355] [<ffffffff8100c2ea>] handle_irq+0x88/0x90
+[ 2052.114357] [<ffffffff8146f034>] do_IRQ+0x5c/0xb4
+[ 2052.114360] [<ffffffff81469593>] ret_from_intr+0x0/0x11
+[ 2052.114361] <EOI> [<ffffffff8102b7f9>] ? native_safe_halt+0xb/0xd
+[ 2052.114366] [<ffffffff81010f03>] ? need_resched+0x23/0x2d
+[ 2052.114367] [<ffffffff8101102a>] default_idle+0x34/0x4f
+[ 2052.114370] [<ffffffff81008325>] cpu_idle+0xaa/0xcc
+[ 2052.114373] [<ffffffff81461f2a>] start_secondary+0x24d/0x28e
+[ 2052.114374] handlers:
+[ 2052.114375] [<ffffffff81332944>] (usb_hcd_irq+0x0/0x7c)
+[ 2052.114378] [<ffffffffa00697da>] (rt2800pci_interrupt+0x0/0x18d [rt2800pci])
+[ 2052.114384] Disabling IRQ #17
+
+Resolve:
+https://bugzilla.redhat.com/show_bug.cgi?id=658451
+
+Reported-and-tested-by: Amir Hedayaty <hedayaty@gmail.com>
+Signed-off-by: Stanislaw Gruszka <sgruszka@redhat.com>
+Acked-by: Ivo van Doorn <IvDoorn@gmail.com>
+Signed-off-by: John W. Linville <linville@tuxdriver.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/net/wireless/rt2x00/rt2800pci.c | 7 ++++++-
+ 1 file changed, 6 insertions(+), 1 deletion(-)
+
+--- a/drivers/net/wireless/rt2x00/rt2800pci.c
++++ b/drivers/net/wireless/rt2x00/rt2800pci.c
+@@ -888,8 +888,13 @@ static irqreturn_t rt2800pci_interrupt(i
+ rt2x00pci_register_read(rt2x00dev, INT_SOURCE_CSR, ®);
+ rt2x00pci_register_write(rt2x00dev, INT_SOURCE_CSR, reg);
+
++ /*
++ * Some devices can generate interrupts with empty CSR register, we
++ * "handle" such irq's to prevent interrupt controller treat them as
++ * spurious interrupts and disable irq line.
++ */
+ if (!reg)
+- return IRQ_NONE;
++ return IRQ_HANDLED;
+
+ if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
+ return IRQ_HANDLED;
--- /dev/null
+From 68fa64ef606bcee688fce46d07aa68f175070156 Mon Sep 17 00:00:00 2001
+From: Gertjan van Wingerde <gwingerde@gmail.com>
+Date: Wed, 16 Nov 2011 23:16:15 +0100
+Subject: rt2x00: Fix efuse EEPROM reading on PPC32.
+
+From: Gertjan van Wingerde <gwingerde@gmail.com>
+
+commit 68fa64ef606bcee688fce46d07aa68f175070156 upstream.
+
+Fix __le32 to __le16 conversion of the first word of an 8-word block
+of EEPROM read via the efuse method.
+
+Reported-and-tested-by: Ingvar Hagelund <ingvar@redpill-linpro.com>
+Signed-off-by: Gertjan van Wingerde <gwingerde@gmail.com>
+Acked-by: Helmut Schaa <helmut.schaa@googlemail.com>
+Acked-by: Ivo van Doorn <IvDoorn@gmail.com>
+Signed-off-by: John W. Linville <linville@tuxdriver.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/net/wireless/rt2x00/rt2800lib.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/net/wireless/rt2x00/rt2800lib.c
++++ b/drivers/net/wireless/rt2x00/rt2800lib.c
+@@ -3699,7 +3699,7 @@ static void rt2800_efuse_read(struct rt2
+ /* Apparently the data is read from end to start */
+ rt2800_register_read_lock(rt2x00dev, EFUSE_DATA3, ®);
+ /* The returned value is in CPU order, but eeprom is le */
+- rt2x00dev->eeprom[i] = cpu_to_le32(reg);
++ *(u32 *)&rt2x00dev->eeprom[i] = cpu_to_le32(reg);
+ rt2800_register_read_lock(rt2x00dev, EFUSE_DATA2, ®);
+ *(u32 *)&rt2x00dev->eeprom[i + 2] = cpu_to_le32(reg);
+ rt2800_register_read_lock(rt2x00dev, EFUSE_DATA1, ®);
--- /dev/null
+From 23085d5796561625db4143a671f1de081f66ef08 Mon Sep 17 00:00:00 2001
+From: Stanislaw Gruszka <sgruszka@redhat.com>
+Date: Wed, 16 Nov 2011 13:58:42 +0100
+Subject: rt2x00: handle spurious pci interrupts
+
+From: Stanislaw Gruszka <sgruszka@redhat.com>
+
+commit 23085d5796561625db4143a671f1de081f66ef08 upstream.
+
+We have documented case of very bad performance issue on rt2800pci
+device, because it generate spurious interrupt, what cause irq line
+is disabled: https://bugzilla.redhat.com/show_bug.cgi?id=658451
+
+We already address that problem in separate patch by returning
+IRQ_HANDLED from interrupt handler. We think similar fix is needed for
+other rt2x00 PCI devices, because users report performance problems on
+these devices too.
+
+Signed-off-by: Stanislaw Gruszka <sgruszka@redhat.com>
+Acked-by: Ivo van Doorn <IvDoorn@gmail.com>
+Signed-off-by: John W. Linville <linville@tuxdriver.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/net/wireless/rt2x00/rt2400pci.c | 2 +-
+ drivers/net/wireless/rt2x00/rt2500pci.c | 2 +-
+ drivers/net/wireless/rt2x00/rt61pci.c | 2 +-
+ 3 files changed, 3 insertions(+), 3 deletions(-)
+
+--- a/drivers/net/wireless/rt2x00/rt2400pci.c
++++ b/drivers/net/wireless/rt2x00/rt2400pci.c
+@@ -1387,7 +1387,7 @@ static irqreturn_t rt2400pci_interrupt(i
+ rt2x00pci_register_write(rt2x00dev, CSR7, reg);
+
+ if (!reg)
+- return IRQ_NONE;
++ return IRQ_HANDLED;
+
+ if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
+ return IRQ_HANDLED;
+--- a/drivers/net/wireless/rt2x00/rt2500pci.c
++++ b/drivers/net/wireless/rt2x00/rt2500pci.c
+@@ -1519,7 +1519,7 @@ static irqreturn_t rt2500pci_interrupt(i
+ rt2x00pci_register_write(rt2x00dev, CSR7, reg);
+
+ if (!reg)
+- return IRQ_NONE;
++ return IRQ_HANDLED;
+
+ if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
+ return IRQ_HANDLED;
+--- a/drivers/net/wireless/rt2x00/rt61pci.c
++++ b/drivers/net/wireless/rt2x00/rt61pci.c
+@@ -2345,7 +2345,7 @@ static irqreturn_t rt61pci_interrupt(int
+ rt2x00pci_register_write(rt2x00dev, INT_SOURCE_CSR, reg);
+
+ if (!reg && !reg_mcu)
+- return IRQ_NONE;
++ return IRQ_HANDLED;
+
+ if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
+ return IRQ_HANDLED;
--- /dev/null
+From e55b32c110b025ce07b40227f620e99700bf8741 Mon Sep 17 00:00:00 2001
+From: Stanislaw Gruszka <sgruszka@redhat.com>
+Date: Mon, 28 Nov 2011 10:33:40 +0100
+Subject: rtlwifi: fix lps_lock deadlock
+
+From: Stanislaw Gruszka <sgruszka@redhat.com>
+
+commit e55b32c110b025ce07b40227f620e99700bf8741 upstream.
+
+rtl_lps_leave can be called from interrupt context, so we have to
+disable interrupts when taking lps_lock.
+
+Below is full lockdep info about deadlock:
+
+[ 93.815269] =================================
+[ 93.815390] [ INFO: inconsistent lock state ]
+[ 93.815472] 2.6.41.1-3.offch.fc15.x86_64.debug #1
+[ 93.815556] ---------------------------------
+[ 93.815635] inconsistent {SOFTIRQ-ON-W} -> {IN-SOFTIRQ-W} usage.
+[ 93.815743] swapper/0 [HC0[0]:SC1[1]:HE1:SE0] takes:
+[ 93.815832] (&(&rtlpriv->locks.lps_lock)->rlock){+.?...}, at: [<ffffffffa025dad6>] rtl_lps_leave+0x26/0x103 [rtlwifi]
+[ 93.815947] {SOFTIRQ-ON-W} state was registered at:
+[ 93.815947] [<ffffffff8108e10d>] __lock_acquire+0x369/0xd0c
+[ 93.815947] [<ffffffff8108efb3>] lock_acquire+0xf3/0x13e
+[ 93.815947] [<ffffffff814e981d>] _raw_spin_lock+0x45/0x79
+[ 93.815947] [<ffffffffa025de34>] rtl_swlps_rf_awake+0x5a/0x76 [rtlwifi]
+[ 93.815947] [<ffffffffa025aec0>] rtl_op_config+0x12a/0x32a [rtlwifi]
+[ 93.815947] [<ffffffffa01d614b>] ieee80211_hw_config+0x124/0x129 [mac80211]
+[ 93.815947] [<ffffffffa01e0af3>] ieee80211_dynamic_ps_disable_work+0x32/0x47 [mac80211]
+[ 93.815947] [<ffffffff81075aa5>] process_one_work+0x205/0x3e7
+[ 93.815947] [<ffffffff81076753>] worker_thread+0xda/0x15d
+[ 93.815947] [<ffffffff8107a119>] kthread+0xa8/0xb0
+[ 93.815947] [<ffffffff814f3184>] kernel_thread_helper+0x4/0x10
+[ 93.815947] irq event stamp: 547822
+[ 93.815947] hardirqs last enabled at (547822): [<ffffffff814ea1a7>] _raw_spin_unlock_irqrestore+0x45/0x61
+[ 93.815947] hardirqs last disabled at (547821): [<ffffffff814e9987>] _raw_spin_lock_irqsave+0x22/0x8e
+[ 93.815947] softirqs last enabled at (547790): [<ffffffff810623ed>] _local_bh_enable+0x13/0x15
+[ 93.815947] softirqs last disabled at (547791): [<ffffffff814f327c>] call_softirq+0x1c/0x30
+[ 93.815947]
+[ 93.815947] other info that might help us debug this:
+[ 93.815947] Possible unsafe locking scenario:
+[ 93.815947]
+[ 93.815947] CPU0
+[ 93.815947] ----
+[ 93.815947] lock(&(&rtlpriv->locks.lps_lock)->rlock);
+[ 93.815947] <Interrupt>
+[ 93.815947] lock(&(&rtlpriv->locks.lps_lock)->rlock);
+[ 93.815947]
+[ 93.815947] *** DEADLOCK ***
+[ 93.815947]
+[ 93.815947] no locks held by swapper/0.
+[ 93.815947]
+[ 93.815947] stack backtrace:
+[ 93.815947] Pid: 0, comm: swapper Not tainted 2.6.41.1-3.offch.fc15.x86_64.debug #1
+[ 93.815947] Call Trace:
+[ 93.815947] <IRQ> [<ffffffff814dfd00>] print_usage_bug+0x1e7/0x1f8
+[ 93.815947] [<ffffffff8101a849>] ? save_stack_trace+0x2c/0x49
+[ 93.815947] [<ffffffff8108d55c>] ? print_irq_inversion_bug.part.18+0x1a0/0x1a0
+[ 93.815947] [<ffffffff8108dc8a>] mark_lock+0x106/0x220
+[ 93.815947] [<ffffffff8108e099>] __lock_acquire+0x2f5/0xd0c
+[ 93.815947] [<ffffffff810152af>] ? native_sched_clock+0x34/0x36
+[ 93.830125] [<ffffffff810152ba>] ? sched_clock+0x9/0xd
+[ 93.830125] [<ffffffff81080181>] ? sched_clock_local+0x12/0x75
+[ 93.830125] [<ffffffffa025dad6>] ? rtl_lps_leave+0x26/0x103 [rtlwifi]
+[ 93.830125] [<ffffffff8108efb3>] lock_acquire+0xf3/0x13e
+[ 93.830125] [<ffffffffa025dad6>] ? rtl_lps_leave+0x26/0x103 [rtlwifi]
+[ 93.830125] [<ffffffff814e981d>] _raw_spin_lock+0x45/0x79
+[ 93.830125] [<ffffffffa025dad6>] ? rtl_lps_leave+0x26/0x103 [rtlwifi]
+[ 93.830125] [<ffffffff81422467>] ? skb_dequeue+0x62/0x6d
+[ 93.830125] [<ffffffffa025dad6>] rtl_lps_leave+0x26/0x103 [rtlwifi]
+[ 93.830125] [<ffffffffa025f677>] _rtl_pci_ips_leave_tasklet+0xe/0x10 [rtlwifi]
+[ 93.830125] [<ffffffff8106281f>] tasklet_action+0x8d/0xee
+[ 93.830125] [<ffffffff810629ce>] __do_softirq+0x112/0x25a
+[ 93.830125] [<ffffffff814f327c>] call_softirq+0x1c/0x30
+[ 93.830125] [<ffffffff81010bf6>] do_softirq+0x4b/0xa1
+[ 93.830125] [<ffffffff81062d7d>] irq_exit+0x5d/0xcf
+[ 93.830125] [<ffffffff814f3b7e>] do_IRQ+0x8e/0xa5
+[ 93.830125] [<ffffffff814ea533>] common_interrupt+0x73/0x73
+[ 93.830125] <EOI> [<ffffffff8108b825>] ? trace_hardirqs_off+0xd/0xf
+[ 93.830125] [<ffffffff812bb6d5>] ? intel_idle+0xe5/0x10c
+[ 93.830125] [<ffffffff812bb6d1>] ? intel_idle+0xe1/0x10c
+[ 93.830125] [<ffffffff813f8d5e>] cpuidle_idle_call+0x11c/0x1fe
+[ 93.830125] [<ffffffff8100e2ef>] cpu_idle+0xab/0x101
+[ 93.830125] [<ffffffff814c6373>] rest_init+0xd7/0xde
+[ 93.830125] [<ffffffff814c629c>] ? csum_partial_copy_generic+0x16c/0x16c
+[ 93.830125] [<ffffffff81d4bbb0>] start_kernel+0x3dd/0x3ea
+[ 93.830125] [<ffffffff81d4b2c4>] x86_64_start_reservations+0xaf/0xb3
+[ 93.830125] [<ffffffff81d4b140>] ? early_idt_handlers+0x140/0x140
+[ 93.830125] [<ffffffff81d4b3ca>] x86_64_start_kernel+0x102/0x111
+
+Resolves:
+https://bugzilla.redhat.com/show_bug.cgi?id=755154
+
+Reported-by: vjain02@students.poly.edu
+Reported-and-tested-by: Oliver Paukstadt <pstadt@sourcentral.org>
+Acked-by: Larry Finger <Larry.Finger@lwfinger.net>
+Signed-off-by: Stanislaw Gruszka <sgruszka@redhat.com>
+Signed-off-by: John W. Linville <linville@tuxdriver.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/net/wireless/rtlwifi/ps.c | 17 +++++++++--------
+ 1 file changed, 9 insertions(+), 8 deletions(-)
+
+--- a/drivers/net/wireless/rtlwifi/ps.c
++++ b/drivers/net/wireless/rtlwifi/ps.c
+@@ -394,7 +394,7 @@ void rtl_lps_enter(struct ieee80211_hw *
+ if (mac->link_state != MAC80211_LINKED)
+ return;
+
+- spin_lock(&rtlpriv->locks.lps_lock);
++ spin_lock_irq(&rtlpriv->locks.lps_lock);
+
+ /* Idle for a while if we connect to AP a while ago. */
+ if (mac->cnt_after_linked >= 2) {
+@@ -406,7 +406,7 @@ void rtl_lps_enter(struct ieee80211_hw *
+ }
+ }
+
+- spin_unlock(&rtlpriv->locks.lps_lock);
++ spin_unlock_irq(&rtlpriv->locks.lps_lock);
+ }
+
+ /*Leave the leisure power save mode.*/
+@@ -415,8 +415,9 @@ void rtl_lps_leave(struct ieee80211_hw *
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
++ unsigned long flags;
+
+- spin_lock(&rtlpriv->locks.lps_lock);
++ spin_lock_irqsave(&rtlpriv->locks.lps_lock, flags);
+
+ if (ppsc->fwctrl_lps) {
+ if (ppsc->dot11_psmode != EACTIVE) {
+@@ -437,7 +438,7 @@ void rtl_lps_leave(struct ieee80211_hw *
+ rtl_lps_set_psmode(hw, EACTIVE);
+ }
+ }
+- spin_unlock(&rtlpriv->locks.lps_lock);
++ spin_unlock_irqrestore(&rtlpriv->locks.lps_lock, flags);
+ }
+
+ /* For sw LPS*/
+@@ -538,9 +539,9 @@ void rtl_swlps_rf_awake(struct ieee80211
+ RT_CLEAR_PS_LEVEL(ppsc, RT_PS_LEVEL_ASPM);
+ }
+
+- spin_lock(&rtlpriv->locks.lps_lock);
++ spin_lock_irq(&rtlpriv->locks.lps_lock);
+ rtl_ps_set_rf_state(hw, ERFON, RF_CHANGE_BY_PS);
+- spin_unlock(&rtlpriv->locks.lps_lock);
++ spin_unlock_irq(&rtlpriv->locks.lps_lock);
+ }
+
+ void rtl_swlps_rfon_wq_callback(void *data)
+@@ -573,9 +574,9 @@ void rtl_swlps_rf_sleep(struct ieee80211
+ if (rtlpriv->link_info.busytraffic)
+ return;
+
+- spin_lock(&rtlpriv->locks.lps_lock);
++ spin_lock_irq(&rtlpriv->locks.lps_lock);
+ rtl_ps_set_rf_state(hw, ERFSLEEP, RF_CHANGE_BY_PS);
+- spin_unlock(&rtlpriv->locks.lps_lock);
++ spin_unlock_irq(&rtlpriv->locks.lps_lock);
+
+ if (ppsc->reg_rfps_level & RT_RF_OFF_LEVL_ASPM &&
+ !RT_IN_PS_LEVEL(ppsc, RT_PS_LEVEL_ASPM)) {
--- /dev/null
+From 745718132c3c7cac98a622b610e239dcd5217f71 Mon Sep 17 00:00:00 2001
+From: Hannes Reinecke <hare@suse.de>
+Date: Wed, 9 Nov 2011 08:39:24 +0100
+Subject: SCSI: Silencing 'killing requests for dead queue'
+
+From: Hannes Reinecke <hare@suse.de>
+
+commit 745718132c3c7cac98a622b610e239dcd5217f71 upstream.
+
+When we tear down a device we try to flush all outstanding
+commands in scsi_free_queue(). However the check in
+scsi_request_fn() is imperfect as it only signals that
+we _might start_ aborting commands, not that we've actually
+aborted some.
+So move the printk inside the scsi_kill_request function,
+this will also give us a hint about which commands are aborted.
+
+Signed-off-by: Hannes Reinecke <hare@suse.de>
+Signed-off-by: James Bottomley <JBottomley@Parallels.com>
+Cc: Christoph Biedl <linux-kernel.bfrz@manchmal.in-ulm.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/scsi/scsi_lib.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/drivers/scsi/scsi_lib.c
++++ b/drivers/scsi/scsi_lib.c
+@@ -1408,6 +1408,8 @@ static void scsi_kill_request(struct req
+
+ blk_start_request(req);
+
++ scmd_printk(KERN_INFO, cmd, "killing request\n");
++
+ sdev = cmd->device;
+ starget = scsi_target(sdev);
+ shost = sdev->host;
+@@ -1489,7 +1491,6 @@ static void scsi_request_fn(struct reque
+ struct request *req;
+
+ if (!sdev) {
+- printk("scsi: killing requests for dead queue\n");
+ while ((req = blk_peek_request(q)) != NULL)
+ scsi_kill_request(req, q);
+ return;
asoc-fsl_ssi-properly-initialize-the-sysfs-attribute-object.patch
asoc-wm8753-skip-noop-reconfiguration-of-dai-mode.patch
asoc-ensure-wm8731-register-cache-is-synced-when-resuming-from-disabled.patch
+sunrpc-ensure-we-return-eagain-in-xs_nospace-if-congestion-is-cleared.patch
+ext4-fix-racy-use-after-free-in-ext4_end_io_dio.patch
+rtlwifi-fix-lps_lock-deadlock.patch
+genirq-fix-regression-in-irqfixup-irqpoll.patch
+regulator-aat2870-fix-the-logic-of-checking-if-no-id-is-matched-in-aat2870_get_regulator.patch
+regulator-twl-fix-twl4030-support-for-smps-regulators.patch
+cgroup_freezer-fix-freezing-groups-with-stopped-tasks.patch
+timekeeping-add-arch_offset-hook-to-ktime_get-functions.patch
+hrtimer-fix-extra-wakeups-from-__remove_hrtimer.patch
+clocksource-avoid-selecting-mult-values-that-might-overflow-when-adjusted.patch
+p54spi-add-missing-spin_lock_init.patch
+p54spi-fix-workqueue-deadlock.patch
+rt2800pci-handle-spurious-interrupts.patch
+rt2x00-handle-spurious-pci-interrupts.patch
+rt2x00-fix-efuse-eeprom-reading-on-ppc32.patch
+nl80211-fix-mac-address-validation.patch
+cfg80211-fix-regulatory-null-dereference.patch
+mac80211-don-t-stop-a-single-aggregation-session-twice.patch
+mac80211-fix-race-between-the-agg-sm-and-the-tx-data-path.patch
+xfs-don-t-serialise-direct-io-reads-on-page-cache-checks.patch
+xfs-avoid-direct-i-o-write-vs-buffered-i-o-race.patch
+xfs-return-eio-when-xfs_vn_getattr-failed.patch
+xfs-fix-buffer-flushing-during-unmount.patch
+xfs-fix-possible-memory-corruption-in-xfs_readlink.patch
+xfs-use-doalloc-flag-in-xfs_qm_dqattach_one.patch
+scsi-silencing-killing-requests-for-dead-queue.patch
+hugetlb-release-pages-in-the-error-path-of-hugetlb_cow.patch
--- /dev/null
+From 24ca9a847791fd53d9b217330b15f3c285827a18 Mon Sep 17 00:00:00 2001
+From: Trond Myklebust <Trond.Myklebust@netapp.com>
+Date: Tue, 22 Nov 2011 14:44:28 +0200
+Subject: SUNRPC: Ensure we return EAGAIN in xs_nospace if congestion is cleared
+
+From: Trond Myklebust <Trond.Myklebust@netapp.com>
+
+commit 24ca9a847791fd53d9b217330b15f3c285827a18 upstream.
+
+By returning '0' instead of 'EAGAIN' when the tests in xs_nospace() fail
+to find evidence of socket congestion, we are making the RPC engine believe
+that the message was incorrectly sent and so it disconnects the socket
+instead of just retrying.
+
+The bug appears to have been introduced by commit
+5e3771ce2d6a69e10fcc870cdf226d121d868491 (SUNRPC: Ensure that xs_nospace
+return values are propagated).
+
+Reported-by: Andrew Cooper <andrew.cooper3@citrix.com>
+Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
+Tested-by: Andrew Cooper <andrew.cooper3@citrix.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ net/sunrpc/xprtsock.c | 3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+--- a/net/sunrpc/xprtsock.c
++++ b/net/sunrpc/xprtsock.c
+@@ -496,7 +496,7 @@ static int xs_nospace(struct rpc_task *t
+ struct rpc_rqst *req = task->tk_rqstp;
+ struct rpc_xprt *xprt = req->rq_xprt;
+ struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
+- int ret = 0;
++ int ret = -EAGAIN;
+
+ dprintk("RPC: %5u xmit incomplete (%u left of %u)\n",
+ task->tk_pid, req->rq_slen - req->rq_bytes_sent,
+@@ -508,7 +508,6 @@ static int xs_nospace(struct rpc_task *t
+ /* Don't race with disconnect */
+ if (xprt_connected(xprt)) {
+ if (test_bit(SOCK_ASYNC_NOSPACE, &transport->sock->flags)) {
+- ret = -EAGAIN;
+ /*
+ * Notify TCP that we're limited by the application
+ * window size
--- /dev/null
+From d004e024058a0eaca097513ce62cbcf978913e0a Mon Sep 17 00:00:00 2001
+From: Hector Palacios <hector.palacios@digi.com>
+Date: Mon, 14 Nov 2011 11:15:25 +0100
+Subject: timekeeping: add arch_offset hook to ktime_get functions
+
+From: Hector Palacios <hector.palacios@digi.com>
+
+commit d004e024058a0eaca097513ce62cbcf978913e0a upstream.
+
+ktime_get and ktime_get_ts were calling timekeeping_get_ns()
+but later they were not calling arch_gettimeoffset() so architectures
+using this mechanism returned 0 ns when calling these functions.
+
+This happened for example when running Busybox's ping which calls
+syscall(__NR_clock_gettime, CLOCK_MONOTONIC, ts) which eventually
+calls ktime_get. As a result the returned ping travel time was zero.
+
+Signed-off-by: Hector Palacios <hector.palacios@digi.com>
+Signed-off-by: John Stultz <john.stultz@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ kernel/time/timekeeping.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/kernel/time/timekeeping.c
++++ b/kernel/time/timekeeping.c
+@@ -249,6 +249,8 @@ ktime_t ktime_get(void)
+ secs = xtime.tv_sec + wall_to_monotonic.tv_sec;
+ nsecs = xtime.tv_nsec + wall_to_monotonic.tv_nsec;
+ nsecs += timekeeping_get_ns();
++ /* If arch requires, add in gettimeoffset() */
++ nsecs += arch_gettimeoffset();
+
+ } while (read_seqretry(&xtime_lock, seq));
+ /*
+@@ -280,6 +282,8 @@ void ktime_get_ts(struct timespec *ts)
+ *ts = xtime;
+ tomono = wall_to_monotonic;
+ nsecs = timekeeping_get_ns();
++ /* If arch requires, add in gettimeoffset() */
++ nsecs += arch_gettimeoffset();
+
+ } while (read_seqretry(&xtime_lock, seq));
+
--- /dev/null
+From bpm@sgi.com Fri Dec 2 15:08:37 2011
+From: Christoph Hellwig <hch@infradead.org>
+Date: Thu, 1 Dec 2011 17:27:40 -0600
+Subject: xfs: avoid direct I/O write vs buffered I/O race
+To: stable@vger.kernel.org, greg@kroah.com
+Cc: xfs@oss.sgi.com, bpm@sgi.com, Christoph Hellwig <hch@infradead.org>, Christoph Hellwig <hch@lst.de>, Alex Elder <aelder@sgi.com>
+Message-ID: <1322782064-1723-3-git-send-email-bpm@sgi.com>
+
+
+From: Christoph Hellwig <hch@infradead.org>
+
+commit c58cb165bd44de8aaee9755a144136ae743be116 upstream.
+
+Currently a buffered reader or writer can add pages to the pagecache
+while we are waiting for the iolock in xfs_file_dio_aio_write. Prevent
+this by re-checking mapping->nrpages after we got the iolock, and if
+nessecary upgrade the lock to exclusive mode. To simplify this a bit
+only take the ilock inside of xfs_file_aio_write_checks.
+
+Signed-off-by: Christoph Hellwig <hch@lst.de>
+Reviewed-by: Dave Chinner <dchinner@redhat.com>
+Signed-off-by: Alex Elder <aelder@sgi.com>
+Cc: Ben Myers <bpm@sgi.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+---
+ fs/xfs/xfs_file.c | 17 ++++++++++++++---
+ 1 file changed, 14 insertions(+), 3 deletions(-)
+
+--- a/fs/xfs/xfs_file.c
++++ b/fs/xfs/xfs_file.c
+@@ -677,6 +677,7 @@ xfs_file_aio_write_checks(
+ xfs_fsize_t new_size;
+ int error = 0;
+
++ xfs_rw_ilock(ip, XFS_ILOCK_EXCL);
+ error = generic_write_checks(file, pos, count, S_ISBLK(inode->i_mode));
+ if (error) {
+ xfs_rw_iunlock(ip, XFS_ILOCK_EXCL | *iolock);
+@@ -768,14 +769,24 @@ xfs_file_dio_aio_write(
+ *iolock = XFS_IOLOCK_EXCL;
+ else
+ *iolock = XFS_IOLOCK_SHARED;
+- xfs_rw_ilock(ip, XFS_ILOCK_EXCL | *iolock);
++ xfs_rw_ilock(ip, *iolock);
+
+ ret = xfs_file_aio_write_checks(file, &pos, &count, iolock);
+ if (ret)
+ return ret;
+
++ /*
++ * Recheck if there are cached pages that need invalidate after we got
++ * the iolock to protect against other threads adding new pages while
++ * we were waiting for the iolock.
++ */
++ if (mapping->nrpages && *iolock == XFS_IOLOCK_SHARED) {
++ xfs_rw_iunlock(ip, *iolock);
++ *iolock = XFS_IOLOCK_EXCL;
++ xfs_rw_ilock(ip, *iolock);
++ }
++
+ if (mapping->nrpages) {
+- WARN_ON(*iolock != XFS_IOLOCK_EXCL);
+ ret = -xfs_flushinval_pages(ip, (pos & PAGE_CACHE_MASK), -1,
+ FI_REMAPF_LOCKED);
+ if (ret)
+@@ -820,7 +831,7 @@ xfs_file_buffered_aio_write(
+ size_t count = ocount;
+
+ *iolock = XFS_IOLOCK_EXCL;
+- xfs_rw_ilock(ip, XFS_ILOCK_EXCL | *iolock);
++ xfs_rw_ilock(ip, *iolock);
+
+ ret = xfs_file_aio_write_checks(file, &pos, &count, iolock);
+ if (ret)
--- /dev/null
+From bpm@sgi.com Fri Dec 2 15:07:50 2011
+From: Dave Chinner <dchinner@redhat.com>
+Date: Thu, 1 Dec 2011 17:27:39 -0600
+Subject: xfs: don't serialise direct IO reads on page cache checks
+To: stable@vger.kernel.org, greg@kroah.com
+Cc: xfs@oss.sgi.com, bpm@sgi.com, Dave Chinner <dchinner@redhat.com>, Alex Elder <aelder@sgi.com>
+Message-ID: <1322782064-1723-2-git-send-email-bpm@sgi.com>
+
+
+From: Dave Chinner <dchinner@redhat.com>
+
+commit 0c38a2512df272b14ef4238b476a2e4f70da1479 upstream.
+
+There is no need to grab the i_mutex of the IO lock in exclusive
+mode if we don't need to invalidate the page cache. Taking these
+locks on every direct IO effective serialises them as taking the IO
+lock in exclusive mode has to wait for all shared holders to drop
+the lock. That only happens when IO is complete, so effective it
+prevents dispatch of concurrent direct IO reads to the same inode.
+
+Fix this by taking the IO lock shared to check the page cache state,
+and only then drop it and take the IO lock exclusively if there is
+work to be done. Hence for the normal direct IO case, no exclusive
+locking will occur.
+
+Signed-off-by: Dave Chinner <dchinner@redhat.com>
+Tested-by: Joern Engel <joern@logfs.org>
+Reviewed-by: Christoph Hellwig <hch@lst.de>
+Signed-off-by: Alex Elder <aelder@sgi.com>
+Cc: Ben Myers <bpm@sgi.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+---
+ fs/xfs/xfs_file.c | 17 ++++++++++++++---
+ 1 file changed, 14 insertions(+), 3 deletions(-)
+
+--- a/fs/xfs/xfs_file.c
++++ b/fs/xfs/xfs_file.c
+@@ -317,7 +317,19 @@ xfs_file_aio_read(
+ if (XFS_FORCED_SHUTDOWN(mp))
+ return -EIO;
+
+- if (unlikely(ioflags & IO_ISDIRECT)) {
++ /*
++ * Locking is a bit tricky here. If we take an exclusive lock
++ * for direct IO, we effectively serialise all new concurrent
++ * read IO to this file and block it behind IO that is currently in
++ * progress because IO in progress holds the IO lock shared. We only
++ * need to hold the lock exclusive to blow away the page cache, so
++ * only take lock exclusively if the page cache needs invalidation.
++ * This allows the normal direct IO case of no page cache pages to
++ * proceeed concurrently without serialisation.
++ */
++ xfs_rw_ilock(ip, XFS_IOLOCK_SHARED);
++ if ((ioflags & IO_ISDIRECT) && inode->i_mapping->nrpages) {
++ xfs_rw_iunlock(ip, XFS_IOLOCK_SHARED);
+ xfs_rw_ilock(ip, XFS_IOLOCK_EXCL);
+
+ if (inode->i_mapping->nrpages) {
+@@ -330,8 +342,7 @@ xfs_file_aio_read(
+ }
+ }
+ xfs_rw_ilock_demote(ip, XFS_IOLOCK_EXCL);
+- } else
+- xfs_rw_ilock(ip, XFS_IOLOCK_SHARED);
++ }
+
+ trace_xfs_file_read(ip, size, iocb->ki_pos, ioflags);
+
--- /dev/null
+From bpm@sgi.com Fri Dec 2 15:10:33 2011
+From: Christoph Hellwig <hch@infradead.org>
+Date: Thu, 1 Dec 2011 17:27:42 -0600
+Subject: xfs: fix buffer flushing during unmount
+To: stable@vger.kernel.org, greg@kroah.com
+Cc: xfs@oss.sgi.com, bpm@sgi.com, Christoph Hellwig <hch@infradead.org>, Christoph Hellwig <hch@lst.de>, Alex Elder <aelder@sgi.com>
+Message-ID: <1322782064-1723-5-git-send-email-bpm@sgi.com>
+
+
+From: Christoph Hellwig <hch@infradead.org>
+
+commit 87c7bec7fc3377b3873eb3a0f4b603981ea16ebb upstream.
+
+The code to flush buffers in the umount code is a bit iffy: we first
+flush all delwri buffers out, but then might be able to queue up a
+new one when logging the sb counts. On a normal shutdown that one
+would get flushed out when doing the synchronous superblock write in
+xfs_unmountfs_writesb, but we skip that one if the filesystem has
+been shut down.
+
+Fix this by moving the delwri list flushing until just before unmounting
+the log, and while we're at it also remove the superflous delwri list
+and buffer lru flusing for the rt and log device that can never have
+cached or delwri buffers.
+
+Signed-off-by: Christoph Hellwig <hch@lst.de>
+Reported-by: Amit Sahrawat <amit.sahrawat83@gmail.com>
+Tested-by: Amit Sahrawat <amit.sahrawat83@gmail.com>
+Signed-off-by: Alex Elder <aelder@sgi.com>
+Cc: Ben Myers <bpm@sgi.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+---
+ fs/xfs/xfs_buf.h | 1 -
+ fs/xfs/xfs_mount.c | 29 ++++++++++-------------------
+ 2 files changed, 10 insertions(+), 20 deletions(-)
+
+--- a/fs/xfs/xfs_buf.h
++++ b/fs/xfs/xfs_buf.h
+@@ -320,7 +320,6 @@ extern struct list_head *xfs_get_buftarg
+ #define xfs_getsize_buftarg(buftarg) block_size((buftarg)->bt_bdev)
+ #define xfs_readonly_buftarg(buftarg) bdev_read_only((buftarg)->bt_bdev)
+
+-#define xfs_binval(buftarg) xfs_flush_buftarg(buftarg, 1)
+ #define XFS_bflush(buftarg) xfs_flush_buftarg(buftarg, 1)
+
+ #endif /* __XFS_BUF_H__ */
+--- a/fs/xfs/xfs_mount.c
++++ b/fs/xfs/xfs_mount.c
+@@ -44,9 +44,6 @@
+ #include "xfs_trace.h"
+
+
+-STATIC void xfs_unmountfs_wait(xfs_mount_t *);
+-
+-
+ #ifdef HAVE_PERCPU_SB
+ STATIC void xfs_icsb_balance_counter(xfs_mount_t *, xfs_sb_field_t,
+ int);
+@@ -1496,11 +1493,6 @@ xfs_unmountfs(
+ */
+ xfs_log_force(mp, XFS_LOG_SYNC);
+
+- xfs_binval(mp->m_ddev_targp);
+- if (mp->m_rtdev_targp) {
+- xfs_binval(mp->m_rtdev_targp);
+- }
+-
+ /*
+ * Unreserve any blocks we have so that when we unmount we don't account
+ * the reserved free space as used. This is really only necessary for
+@@ -1526,7 +1518,16 @@ xfs_unmountfs(
+ xfs_warn(mp, "Unable to update superblock counters. "
+ "Freespace may not be correct on next mount.");
+ xfs_unmountfs_writesb(mp);
+- xfs_unmountfs_wait(mp); /* wait for async bufs */
++
++ /*
++ * Make sure all buffers have been flushed and completed before
++ * unmounting the log.
++ */
++ error = xfs_flush_buftarg(mp->m_ddev_targp, 1);
++ if (error)
++ xfs_warn(mp, "%d busy buffers during unmount.", error);
++ xfs_wait_buftarg(mp->m_ddev_targp);
++
+ xfs_log_unmount_write(mp);
+ xfs_log_unmount(mp);
+ xfs_uuid_unmount(mp);
+@@ -1537,16 +1538,6 @@ xfs_unmountfs(
+ xfs_free_perag(mp);
+ }
+
+-STATIC void
+-xfs_unmountfs_wait(xfs_mount_t *mp)
+-{
+- if (mp->m_logdev_targp != mp->m_ddev_targp)
+- xfs_wait_buftarg(mp->m_logdev_targp);
+- if (mp->m_rtdev_targp)
+- xfs_wait_buftarg(mp->m_rtdev_targp);
+- xfs_wait_buftarg(mp->m_ddev_targp);
+-}
+-
+ int
+ xfs_fs_writable(xfs_mount_t *mp)
+ {
--- /dev/null
+From bpm@sgi.com Fri Dec 2 15:11:04 2011
+From: Carlos Maiolino <cmaiolino@redhat.com>
+Date: Thu, 1 Dec 2011 17:27:43 -0600
+Subject: xfs: Fix possible memory corruption in xfs_readlink
+To: stable@vger.kernel.org, greg@kroah.com
+Cc: xfs@oss.sgi.com, bpm@sgi.com, Carlos Maiolino <cmaiolino@redhat.com>, Alex Elder <aelder@sgi.com>
+Message-ID: <1322782064-1723-6-git-send-email-bpm@sgi.com>
+
+
+From: Carlos Maiolino <cmaiolino@redhat.com>
+
+commit b52a360b2aa1c59ba9970fb0f52bbb093fcc7a24 upstream.
+
+Fixes a possible memory corruption when the link is larger than
+MAXPATHLEN and XFS_DEBUG is not enabled. This also remove the
+S_ISLNK assert, since the inode mode is checked previously in
+xfs_readlink_by_handle() and via VFS.
+
+Updated to address concerns raised by Ben Hutchings about the loose
+attention paid to 32- vs 64-bit values, and the lack of handling a
+potentially negative pathlen value:
+ - Changed type of "pathlen" to be xfs_fsize_t, to match that of
+ ip->i_d.di_size
+ - Added checking for a negative pathlen to the too-long pathlen
+ test, and generalized the message that gets reported in that case
+ to reflect the change
+As a result, if a negative pathlen were encountered, this function
+would return EFSCORRUPTED (and would fail an assertion for a debug
+build)--just as would a too-long pathlen.
+
+Signed-off-by: Alex Elder <aelder@sgi.com>
+Signed-off-by: Carlos Maiolino <cmaiolino@redhat.com>
+Reviewed-by: Christoph Hellwig <hch@lst.de>
+Cc: Ben Myers <bpm@sgi.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+---
+ fs/xfs/xfs_vnodeops.c | 14 ++++++++++----
+ 1 file changed, 10 insertions(+), 4 deletions(-)
+
+--- a/fs/xfs/xfs_vnodeops.c
++++ b/fs/xfs/xfs_vnodeops.c
+@@ -113,7 +113,7 @@ xfs_readlink(
+ char *link)
+ {
+ xfs_mount_t *mp = ip->i_mount;
+- int pathlen;
++ xfs_fsize_t pathlen;
+ int error = 0;
+
+ trace_xfs_readlink(ip);
+@@ -123,13 +123,19 @@ xfs_readlink(
+
+ xfs_ilock(ip, XFS_ILOCK_SHARED);
+
+- ASSERT(S_ISLNK(ip->i_d.di_mode));
+- ASSERT(ip->i_d.di_size <= MAXPATHLEN);
+-
+ pathlen = ip->i_d.di_size;
+ if (!pathlen)
+ goto out;
+
++ if (pathlen < 0 || pathlen > MAXPATHLEN) {
++ xfs_alert(mp, "%s: inode (%llu) bad symlink length (%lld)",
++ __func__, (unsigned long long) ip->i_ino,
++ (long long) pathlen);
++ ASSERT(0);
++ return XFS_ERROR(EFSCORRUPTED);
++ }
++
++
+ if (ip->i_df.if_flags & XFS_IFINLINE) {
+ memcpy(link, ip->i_df.if_u1.if_data, pathlen);
+ link[pathlen] = '\0';
--- /dev/null
+From bpm@sgi.com Fri Dec 2 15:10:17 2011
+From: Mitsuo Hayasaka <mitsuo.hayasaka.hu@hitachi.com>
+Date: Thu, 1 Dec 2011 17:27:41 -0600
+Subject: xfs: Return -EIO when xfs_vn_getattr() failed
+To: stable@vger.kernel.org, greg@kroah.com
+Cc: xfs@oss.sgi.com, bpm@sgi.com, Mitsuo Hayasaka <mitsuo.hayasaka.hu@hitachi.com>, Alex Elder <aelder@sgi.com>
+Message-ID: <1322782064-1723-4-git-send-email-bpm@sgi.com>
+
+
+From: Mitsuo Hayasaka <mitsuo.hayasaka.hu@hitachi.com>
+
+commit ed32201e65e15f3e6955cb84cbb544b08f81e5a5 upstream.
+
+An attribute of inode can be fetched via xfs_vn_getattr() in XFS.
+Currently it returns EIO, not negative value, when it failed. As a
+result, the system call returns not negative value even though an
+error occured. The stat(2), ls and mv commands cannot handle this
+error and do not work correctly.
+
+This patch fixes this bug, and returns -EIO, not EIO when an error
+is detected in xfs_vn_getattr().
+
+Signed-off-by: Mitsuo Hayasaka <mitsuo.hayasaka.hu@hitachi.com>
+Reviewed-by: Christoph Hellwig <hch@lst.de>
+Signed-off-by: Alex Elder <aelder@sgi.com>
+Cc: Ben Myers <bpm@sgi.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+---
+ fs/xfs/xfs_iops.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/fs/xfs/xfs_iops.c
++++ b/fs/xfs/xfs_iops.c
+@@ -465,7 +465,7 @@ xfs_vn_getattr(
+ trace_xfs_getattr(ip);
+
+ if (XFS_FORCED_SHUTDOWN(mp))
+- return XFS_ERROR(EIO);
++ return -XFS_ERROR(EIO);
+
+ stat->size = XFS_ISIZE(ip);
+ stat->dev = inode->i_sb->s_dev;
--- /dev/null
+From bpm@sgi.com Fri Dec 2 15:11:21 2011
+From: Mitsuo Hayasaka <mitsuo.hayasaka.hu@hitachi.com>
+Date: Thu, 1 Dec 2011 17:27:44 -0600
+Subject: xfs: use doalloc flag in xfs_qm_dqattach_one()
+To: stable@vger.kernel.org, greg@kroah.com
+Cc: xfs@oss.sgi.com, bpm@sgi.com, Mitsuo Hayasaka <mitsuo.hayasaka.hu@hitachi.com>, Alex Elder <aelder@sgi.com>, Christoph Hellwig <hch@infradead.org>
+Message-ID: <1322782064-1723-7-git-send-email-bpm@sgi.com>
+
+From: Mitsuo Hayasaka <mitsuo.hayasaka.hu@hitachi.com>
+
+commit db3e74b582915d66e10b0c73a62763418f54c340 upstream.
+
+The doalloc arg in xfs_qm_dqattach_one() is a flag that indicates
+whether a new area to handle quota information will be allocated
+if needed. Originally, it was passed to xfs_qm_dqget(), but has
+been removed by the following commit (probably by mistake):
+
+ commit 8e9b6e7fa4544ea8a0e030c8987b918509c8ff47
+ Author: Christoph Hellwig <hch@lst.de>
+ Date: Sun Feb 8 21:51:42 2009 +0100
+
+ xfs: remove the unused XFS_QMOPT_DQLOCK flag
+
+As the result, xfs_qm_dqget() called from xfs_qm_dqattach_one()
+never allocates the new area even if it is needed.
+
+This patch gives the doalloc arg to xfs_qm_dqget() in
+xfs_qm_dqattach_one() to fix this problem.
+
+Signed-off-by: Mitsuo Hayasaka <mitsuo.hayasaka.hu@hitachi.com>
+Cc: Alex Elder <aelder@sgi.com>
+Cc: Christoph Hellwig <hch@infradead.org>
+Reviewed-by: Christoph Hellwig <hch@lst.de>
+Signed-off-by: Ben Myers <bpm@sgi.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+---
+ fs/xfs/xfs_qm.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/fs/xfs/xfs_qm.c
++++ b/fs/xfs/xfs_qm.c
+@@ -674,7 +674,8 @@ xfs_qm_dqattach_one(
+ * disk and we didn't ask it to allocate;
+ * ESRCH if quotas got turned off suddenly.
+ */
+- error = xfs_qm_dqget(ip->i_mount, ip, id, type, XFS_QMOPT_DOWARN, &dqp);
++ error = xfs_qm_dqget(ip->i_mount, ip, id, type,
++ doalloc | XFS_QMOPT_DOWARN, &dqp);
+ if (error)
+ return error;
+