--- /dev/null
+From b63038d6f4ca5d1849ce01d9fc5bb9cb426dec73 Mon Sep 17 00:00:00 2001
+From: Ulf Hansson <ulf.hansson@stericsson.com>
+Date: Tue, 13 Dec 2011 16:51:04 +0100
+Subject: ARM: 7214/1: mmc: mmci: Fixup handling of MCI_STARTBITERR
+
+From: Ulf Hansson <ulf.hansson@stericsson.com>
+
+commit b63038d6f4ca5d1849ce01d9fc5bb9cb426dec73 upstream.
+
+The interrupt was previously enabled and then correctly cleared.
+Now we also handle it correctly.
+
+Tested-by: Linus Walleij <linus.walleij@linaro.org>
+Signed-off-by: Ulf Hansson <ulf.hansson@stericsson.com>
+Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/mmc/host/mmci.c | 8 +++++---
+ 1 file changed, 5 insertions(+), 3 deletions(-)
+
+--- a/drivers/mmc/host/mmci.c
++++ b/drivers/mmc/host/mmci.c
+@@ -673,7 +673,8 @@ mmci_data_irq(struct mmci_host *host, st
+ unsigned int status)
+ {
+ /* First check for errors */
+- if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_TXUNDERRUN|MCI_RXOVERRUN)) {
++ if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_STARTBITERR|
++ MCI_TXUNDERRUN|MCI_RXOVERRUN)) {
+ u32 remain, success;
+
+ /* Terminate the DMA transfer */
+@@ -953,8 +954,9 @@ static irqreturn_t mmci_irq(int irq, voi
+ dev_dbg(mmc_dev(host->mmc), "irq0 (data+cmd) %08x\n", status);
+
+ data = host->data;
+- if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_TXUNDERRUN|
+- MCI_RXOVERRUN|MCI_DATAEND|MCI_DATABLOCKEND) && data)
++ if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_STARTBITERR|
++ MCI_TXUNDERRUN|MCI_RXOVERRUN|MCI_DATAEND|
++ MCI_DATABLOCKEND) && data)
+ mmci_data_irq(host, data, status);
+
+ cmd = host->cmd;
--- /dev/null
+From 3b6e3c73851a9a4b0e6ed9d378206341dd65e8a5 Mon Sep 17 00:00:00 2001
+From: Ulf Hansson <ulf.hansson@stericsson.com>
+Date: Tue, 13 Dec 2011 16:58:43 +0100
+Subject: ARM: 7220/1: mmc: mmci: Fixup error handling for dma
+
+From: Ulf Hansson <ulf.hansson@stericsson.com>
+
+commit 3b6e3c73851a9a4b0e6ed9d378206341dd65e8a5 upstream.
+
+When getting a cmd irq during an ongoing data transfer
+with dma, the dma job were never terminated. This is now
+corrected.
+
+Tested-by: Linus Walleij <linus.walleij@linaro.org>
+Signed-off-by: Per Forlin <per.forlin@stericsson.com>
+Signed-off-by: Ulf Hansson <ulf.hansson@stericsson.com>
+Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/mmc/host/mmci.c | 6 +++++-
+ 1 file changed, 5 insertions(+), 1 deletion(-)
+
+--- a/drivers/mmc/host/mmci.c
++++ b/drivers/mmc/host/mmci.c
+@@ -753,8 +753,12 @@ mmci_cmd_irq(struct mmci_host *host, str
+ }
+
+ if (!cmd->data || cmd->error) {
+- if (host->data)
++ if (host->data) {
++ /* Terminate the DMA transfer */
++ if (dma_inprogress(host))
++ mmci_dma_data_error(host);
+ mmci_stop_data(host);
++ }
+ mmci_request_end(host, cmd->mrq);
+ } else if (!(cmd->data->flags & MMC_DATA_READ)) {
+ mmci_start_data(host, cmd->data);
--- /dev/null
+From 5776ac2eb33164c77cdb4d2b48feee15616eaba3 Mon Sep 17 00:00:00 2001
+From: Jason Chen <jason.chen@linaro.org>
+Date: Mon, 19 Dec 2011 11:23:28 +0800
+Subject: ARM:imx:fix pwm period value
+
+From: Jason Chen <jason.chen@linaro.org>
+
+commit 5776ac2eb33164c77cdb4d2b48feee15616eaba3 upstream.
+
+According to imx pwm RM, the real period value should be
+PERIOD value in PWMPR plus 2.
+
+PWMO (Hz) = PCLK(Hz) / (period +2)
+
+Signed-off-by: Jason Chen <jason.chen@linaro.org>
+Signed-off-by: Sascha Hauer <s.hauer@pengutronix.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ arch/arm/plat-mxc/pwm.c | 9 +++++++++
+ 1 file changed, 9 insertions(+)
+
+--- a/arch/arm/plat-mxc/pwm.c
++++ b/arch/arm/plat-mxc/pwm.c
+@@ -77,6 +77,15 @@ int pwm_config(struct pwm_device *pwm, i
+ do_div(c, period_ns);
+ duty_cycles = c;
+
++ /*
++ * according to imx pwm RM, the real period value should be
++ * PERIOD value in PWMPR plus 2.
++ */
++ if (period_cycles > 2)
++ period_cycles -= 2;
++ else
++ period_cycles = 0;
++
+ writel(duty_cycles, pwm->mmio_base + MX3_PWMSAR);
+ writel(period_cycles, pwm->mmio_base + MX3_PWMPR);
+
--- /dev/null
+From 77e00f2ea94abee1ad13bdfde19cf7aa25992b0e Mon Sep 17 00:00:00 2001
+From: Alex Deucher <alexander.deucher@amd.com>
+Date: Wed, 21 Dec 2011 11:58:17 -0500
+Subject: drm/radeon/kms: bail on BTC parts if MC ucode is missing
+
+From: Alex Deucher <alexander.deucher@amd.com>
+
+commit 77e00f2ea94abee1ad13bdfde19cf7aa25992b0e upstream.
+
+We already do this for cayman, need to also do it for
+BTC parts. The default memory and voltage setup is not
+adequate for advanced operation. Continuing will
+result in an unusable display.
+
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Cc: Jean Delvare <khali@linux-fr.org>
+Signed-off-by: Dave Airlie <airlied@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/gpu/drm/radeon/evergreen.c | 12 ++++++++++++
+ 1 file changed, 12 insertions(+)
+
+--- a/drivers/gpu/drm/radeon/evergreen.c
++++ b/drivers/gpu/drm/radeon/evergreen.c
+@@ -3258,6 +3258,18 @@ int evergreen_init(struct radeon_device
+ rdev->accel_working = false;
+ }
+ }
++
++ /* Don't start up if the MC ucode is missing on BTC parts.
++ * The default clocks and voltages before the MC ucode
++ * is loaded are not suffient for advanced operations.
++ */
++ if (ASIC_IS_DCE5(rdev)) {
++ if (!rdev->mc_fw && !(rdev->flags & RADEON_IS_IGP)) {
++ DRM_ERROR("radeon: MC ucode required for NI+.\n");
++ return -EINVAL;
++ }
++ }
++
+ return 0;
+ }
+
--- /dev/null
+From e6780f7243eddb133cc20ec37fa69317c218b709 Mon Sep 17 00:00:00 2001
+From: Hugh Dickins <hughd@google.com>
+Date: Sat, 31 Dec 2011 11:44:01 -0800
+Subject: futex: Fix uninterruptible loop due to gate_area
+
+From: Hugh Dickins <hughd@google.com>
+
+commit e6780f7243eddb133cc20ec37fa69317c218b709 upstream.
+
+It was found (by Sasha) that if you use a futex located in the gate
+area we get stuck in an uninterruptible infinite loop, much like the
+ZERO_PAGE issue.
+
+While looking at this problem, PeterZ realized you'll get into similar
+trouble when hitting any install_special_pages() mapping. And are there
+still drivers setting up their own special mmaps without page->mapping,
+and without special VM or pte flags to make get_user_pages fail?
+
+In most cases, if page->mapping is NULL, we do not need to retry at all:
+Linus points out that even /proc/sys/vm/drop_caches poses no problem,
+because it ends up using remove_mapping(), which takes care not to
+interfere when the page reference count is raised.
+
+But there is still one case which does need a retry: if memory pressure
+called shmem_writepage in between get_user_pages_fast dropping page
+table lock and our acquiring page lock, then the page gets switched from
+filecache to swapcache (and ->mapping set to NULL) whatever the refcount.
+Fault it back in to get the page->mapping needed for key->shared.inode.
+
+Reported-by: Sasha Levin <levinsasha928@gmail.com>
+Signed-off-by: Hugh Dickins <hughd@google.com>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ kernel/futex.c | 28 ++++++++++++++++++++--------
+ 1 file changed, 20 insertions(+), 8 deletions(-)
+
+--- a/kernel/futex.c
++++ b/kernel/futex.c
+@@ -314,17 +314,29 @@ again:
+ #endif
+
+ lock_page(page_head);
++
++ /*
++ * If page_head->mapping is NULL, then it cannot be a PageAnon
++ * page; but it might be the ZERO_PAGE or in the gate area or
++ * in a special mapping (all cases which we are happy to fail);
++ * or it may have been a good file page when get_user_pages_fast
++ * found it, but truncated or holepunched or subjected to
++ * invalidate_complete_page2 before we got the page lock (also
++ * cases which we are happy to fail). And we hold a reference,
++ * so refcount care in invalidate_complete_page's remove_mapping
++ * prevents drop_caches from setting mapping to NULL beneath us.
++ *
++ * The case we do have to guard against is when memory pressure made
++ * shmem_writepage move it from filecache to swapcache beneath us:
++ * an unlikely race, but we do need to retry for page_head->mapping.
++ */
+ if (!page_head->mapping) {
++ int shmem_swizzled = PageSwapCache(page_head);
+ unlock_page(page_head);
+ put_page(page_head);
+- /*
+- * ZERO_PAGE pages don't have a mapping. Avoid a busy loop
+- * trying to find one. RW mapping would have COW'd (and thus
+- * have a mapping) so this page is RO and won't ever change.
+- */
+- if ((page_head == ZERO_PAGE(address)))
+- return -EFAULT;
+- goto again;
++ if (shmem_swizzled)
++ goto again;
++ return -EFAULT;
+ }
+
+ /*
--- /dev/null
+From b0365c8d0cb6e79eb5f21418ae61ab511f31b575 Mon Sep 17 00:00:00 2001
+From: Hillf Danton <dhillf@gmail.com>
+Date: Wed, 28 Dec 2011 15:57:16 -0800
+Subject: mm: hugetlb: fix non-atomic enqueue of huge page
+
+From: Hillf Danton <dhillf@gmail.com>
+
+commit b0365c8d0cb6e79eb5f21418ae61ab511f31b575 upstream.
+
+If a huge page is enqueued under the protection of hugetlb_lock, then the
+operation is atomic and safe.
+
+Signed-off-by: Hillf Danton <dhillf@gmail.com>
+Reviewed-by: Michal Hocko <mhocko@suse.cz>
+Acked-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ mm/hugetlb.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/mm/hugetlb.c
++++ b/mm/hugetlb.c
+@@ -901,7 +901,6 @@ retry:
+ h->resv_huge_pages += delta;
+ ret = 0;
+
+- spin_unlock(&hugetlb_lock);
+ /* Free the needed pages to the hugetlb pool */
+ list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
+ if ((--needed) < 0)
+@@ -915,6 +914,7 @@ retry:
+ VM_BUG_ON(page_count(page));
+ enqueue_huge_page(h, page);
+ }
++ spin_unlock(&hugetlb_lock);
+
+ /* Free unnecessary surplus pages to the buddy allocator */
+ free:
--- /dev/null
+From e26a51148f3ebd859bca8bf2e0f212839b447f62 Mon Sep 17 00:00:00 2001
+From: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
+Date: Wed, 28 Dec 2011 15:57:11 -0800
+Subject: mm/mempolicy.c: refix mbind_range() vma issue
+
+From: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
+
+commit e26a51148f3ebd859bca8bf2e0f212839b447f62 upstream.
+
+commit 8aacc9f550 ("mm/mempolicy.c: fix pgoff in mbind vma merge") is the
+slightly incorrect fix.
+
+Why? Think following case.
+
+1. map 4 pages of a file at offset 0
+
+ [0123]
+
+2. map 2 pages just after the first mapping of the same file but with
+ page offset 2
+
+ [0123][23]
+
+3. mbind() 2 pages from the first mapping at offset 2.
+ mbind_range() should treat new vma is,
+
+ [0123][23]
+ |23|
+ mbind vma
+
+ but it does
+
+ [0123][23]
+ |01|
+ mbind vma
+
+ Oops. then, it makes wrong vma merge and splitting ([01][0123] or similar).
+
+This patch fixes it.
+
+[testcase]
+ test result - before the patch
+
+ case4: 126: test failed. expect '2,4', actual '2,2,2'
+ case5: passed
+ case6: passed
+ case7: passed
+ case8: passed
+ case_n: 246: test failed. expect '4,2', actual '1,4'
+
+ ------------[ cut here ]------------
+ kernel BUG at mm/filemap.c:135!
+ invalid opcode: 0000 [#4] SMP DEBUG_PAGEALLOC
+
+ (snip long bug on messages)
+
+ test result - after the patch
+
+ case4: passed
+ case5: passed
+ case6: passed
+ case7: passed
+ case8: passed
+ case_n: passed
+
+ source: mbind_vma_test.c
+============================================================
+ #include <numaif.h>
+ #include <numa.h>
+ #include <sys/mman.h>
+ #include <stdio.h>
+ #include <unistd.h>
+ #include <stdlib.h>
+ #include <string.h>
+
+static unsigned long pagesize;
+void* mmap_addr;
+struct bitmask *nmask;
+char buf[1024];
+FILE *file;
+char retbuf[10240] = "";
+int mapped_fd;
+
+char *rubysrc = "ruby -e '\
+ pid = %d; \
+ vstart = 0x%llx; \
+ vend = 0x%llx; \
+ s = `pmap -q #{pid}`; \
+ rary = []; \
+ s.each_line {|line|; \
+ ary=line.split(\" \"); \
+ addr = ary[0].to_i(16); \
+ if(vstart <= addr && addr < vend) then \
+ rary.push(ary[1].to_i()/4); \
+ end; \
+ }; \
+ print rary.join(\",\"); \
+'";
+
+void init(void)
+{
+ void* addr;
+ char buf[128];
+
+ nmask = numa_allocate_nodemask();
+ numa_bitmask_setbit(nmask, 0);
+
+ pagesize = getpagesize();
+
+ sprintf(buf, "%s", "mbind_vma_XXXXXX");
+ mapped_fd = mkstemp(buf);
+ if (mapped_fd == -1)
+ perror("mkstemp "), exit(1);
+ unlink(buf);
+
+ if (lseek(mapped_fd, pagesize*8, SEEK_SET) < 0)
+ perror("lseek "), exit(1);
+ if (write(mapped_fd, "\0", 1) < 0)
+ perror("write "), exit(1);
+
+ addr = mmap(NULL, pagesize*8, PROT_NONE,
+ MAP_SHARED, mapped_fd, 0);
+ if (addr == MAP_FAILED)
+ perror("mmap "), exit(1);
+
+ if (mprotect(addr+pagesize, pagesize*6, PROT_READ|PROT_WRITE) < 0)
+ perror("mprotect "), exit(1);
+
+ mmap_addr = addr + pagesize;
+
+ /* make page populate */
+ memset(mmap_addr, 0, pagesize*6);
+}
+
+void fin(void)
+{
+ void* addr = mmap_addr - pagesize;
+ munmap(addr, pagesize*8);
+
+ memset(buf, 0, sizeof(buf));
+ memset(retbuf, 0, sizeof(retbuf));
+}
+
+void mem_bind(int index, int len)
+{
+ int err;
+
+ err = mbind(mmap_addr+pagesize*index, pagesize*len,
+ MPOL_BIND, nmask->maskp, nmask->size, 0);
+ if (err)
+ perror("mbind "), exit(err);
+}
+
+void mem_interleave(int index, int len)
+{
+ int err;
+
+ err = mbind(mmap_addr+pagesize*index, pagesize*len,
+ MPOL_INTERLEAVE, nmask->maskp, nmask->size, 0);
+ if (err)
+ perror("mbind "), exit(err);
+}
+
+void mem_unbind(int index, int len)
+{
+ int err;
+
+ err = mbind(mmap_addr+pagesize*index, pagesize*len,
+ MPOL_DEFAULT, NULL, 0, 0);
+ if (err)
+ perror("mbind "), exit(err);
+}
+
+void Assert(char *expected, char *value, char *name, int line)
+{
+ if (strcmp(expected, value) == 0) {
+ fprintf(stderr, "%s: passed\n", name);
+ return;
+ }
+ else {
+ fprintf(stderr, "%s: %d: test failed. expect '%s', actual '%s'\n",
+ name, line,
+ expected, value);
+// exit(1);
+ }
+}
+
+/*
+ AAAA
+ PPPPPPNNNNNN
+ might become
+ PPNNNNNNNNNN
+ case 4 below
+*/
+void case4(void)
+{
+ init();
+ sprintf(buf, rubysrc, getpid(), mmap_addr, mmap_addr+pagesize*6);
+
+ mem_bind(0, 4);
+ mem_unbind(2, 2);
+
+ file = popen(buf, "r");
+ fread(retbuf, sizeof(retbuf), 1, file);
+ Assert("2,4", retbuf, "case4", __LINE__);
+
+ fin();
+}
+
+/*
+ AAAA
+ PPPPPPNNNNNN
+ might become
+ PPPPPPPPPPNN
+ case 5 below
+*/
+void case5(void)
+{
+ init();
+ sprintf(buf, rubysrc, getpid(), mmap_addr, mmap_addr+pagesize*6);
+
+ mem_bind(0, 2);
+ mem_bind(2, 2);
+
+ file = popen(buf, "r");
+ fread(retbuf, sizeof(retbuf), 1, file);
+ Assert("4,2", retbuf, "case5", __LINE__);
+
+ fin();
+}
+
+/*
+ AAAA
+ PPPPNNNNXXXX
+ might become
+ PPPPPPPPPPPP 6
+*/
+void case6(void)
+{
+ init();
+ sprintf(buf, rubysrc, getpid(), mmap_addr, mmap_addr+pagesize*6);
+
+ mem_bind(0, 2);
+ mem_bind(4, 2);
+ mem_bind(2, 2);
+
+ file = popen(buf, "r");
+ fread(retbuf, sizeof(retbuf), 1, file);
+ Assert("6", retbuf, "case6", __LINE__);
+
+ fin();
+}
+
+/*
+ AAAA
+PPPPNNNNXXXX
+might become
+PPPPPPPPXXXX 7
+*/
+void case7(void)
+{
+ init();
+ sprintf(buf, rubysrc, getpid(), mmap_addr, mmap_addr+pagesize*6);
+
+ mem_bind(0, 2);
+ mem_interleave(4, 2);
+ mem_bind(2, 2);
+
+ file = popen(buf, "r");
+ fread(retbuf, sizeof(retbuf), 1, file);
+ Assert("4,2", retbuf, "case7", __LINE__);
+
+ fin();
+}
+
+/*
+ AAAA
+PPPPNNNNXXXX
+might become
+PPPPNNNNNNNN 8
+*/
+void case8(void)
+{
+ init();
+ sprintf(buf, rubysrc, getpid(), mmap_addr, mmap_addr+pagesize*6);
+
+ mem_bind(0, 2);
+ mem_interleave(4, 2);
+ mem_interleave(2, 2);
+
+ file = popen(buf, "r");
+ fread(retbuf, sizeof(retbuf), 1, file);
+ Assert("2,4", retbuf, "case8", __LINE__);
+
+ fin();
+}
+
+void case_n(void)
+{
+ init();
+ sprintf(buf, rubysrc, getpid(), mmap_addr, mmap_addr+pagesize*6);
+
+ /* make redundunt mappings [0][1234][34][7] */
+ mmap(mmap_addr + pagesize*4, pagesize*2, PROT_READ|PROT_WRITE,
+ MAP_FIXED|MAP_SHARED, mapped_fd, pagesize*3);
+
+ /* Expect to do nothing. */
+ mem_unbind(2, 2);
+
+ file = popen(buf, "r");
+ fread(retbuf, sizeof(retbuf), 1, file);
+ Assert("4,2", retbuf, "case_n", __LINE__);
+
+ fin();
+}
+
+int main(int argc, char** argv)
+{
+ case4();
+ case5();
+ case6();
+ case7();
+ case8();
+ case_n();
+
+ return 0;
+}
+=============================================================
+
+Signed-off-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
+Acked-by: Johannes Weiner <hannes@cmpxchg.org>
+Cc: Minchan Kim <minchan.kim@gmail.com>
+Cc: Caspar Zhang <caspar@casparzhang.com>
+Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
+Cc: Christoph Lameter <cl@linux.com>
+Cc: Hugh Dickins <hugh.dickins@tiscali.co.uk>
+Cc: Mel Gorman <mel@csn.ul.ie>
+Cc: Lee Schermerhorn <lee.schermerhorn@hp.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ mm/mempolicy.c | 11 ++++++++++-
+ 1 file changed, 10 insertions(+), 1 deletion(-)
+
+--- a/mm/mempolicy.c
++++ b/mm/mempolicy.c
+@@ -636,6 +636,7 @@ static int mbind_range(struct mm_struct
+ struct vm_area_struct *prev;
+ struct vm_area_struct *vma;
+ int err = 0;
++ pgoff_t pgoff;
+ unsigned long vmstart;
+ unsigned long vmend;
+
+@@ -643,13 +644,21 @@ static int mbind_range(struct mm_struct
+ if (!vma || vma->vm_start > start)
+ return -EFAULT;
+
++ if (start > vma->vm_start)
++ prev = vma;
++
+ for (; vma && vma->vm_start < end; prev = vma, vma = next) {
+ next = vma->vm_next;
+ vmstart = max(start, vma->vm_start);
+ vmend = min(end, vma->vm_end);
+
++ if (mpol_equal(vma_policy(vma), new_pol))
++ continue;
++
++ pgoff = vma->vm_pgoff +
++ ((vmstart - vma->vm_start) >> PAGE_SHIFT);
+ prev = vma_merge(mm, prev, vmstart, vmend, vma->vm_flags,
+- vma->anon_vma, vma->vm_file, vma->vm_pgoff,
++ vma->anon_vma, vma->vm_file, pgoff,
+ new_pol);
+ if (prev) {
+ vma = prev;
--- /dev/null
+From 55205c916e179e09773d98d290334d319f45ac6b Mon Sep 17 00:00:00 2001
+From: Vladimir Zapolskiy <vladimir.zapolskiy@nokia.com>
+Date: Thu, 22 Dec 2011 16:15:40 +0100
+Subject: oprofile, arm/sh: Fix oprofile_arch_exit() linkage issue
+
+From: Vladimir Zapolskiy <vladimir.zapolskiy@nokia.com>
+
+commit 55205c916e179e09773d98d290334d319f45ac6b upstream.
+
+This change fixes a linking problem, which happens if oprofile
+is selected to be compiled as built-in:
+
+ `oprofile_arch_exit' referenced in section `.init.text' of
+ arch/arm/oprofile/built-in.o: defined in discarded section
+ `.exit.text' of arch/arm/oprofile/built-in.o
+
+The problem is appeared after commit 87121ca504, which
+introduced oprofile_arch_exit() calls from __init function. Note
+that the aforementioned commit has been backported to stable
+branches, and the problem is known to be reproduced at least
+with 3.0.13 and 3.1.5 kernels.
+
+Signed-off-by: Vladimir Zapolskiy <vladimir.zapolskiy@nokia.com>
+Signed-off-by: Robert Richter <robert.richter@amd.com>
+Cc: Will Deacon <will.deacon@arm.com>
+Cc: oprofile-list <oprofile-list@lists.sourceforge.net>
+Link: http://lkml.kernel.org/r/20111222151540.GB16765@erda.amd.com
+Signed-off-by: Ingo Molnar <mingo@elte.hu>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ arch/arm/oprofile/common.c | 2 +-
+ arch/sh/oprofile/common.c | 4 ++--
+ 2 files changed, 3 insertions(+), 3 deletions(-)
+
+--- a/arch/arm/oprofile/common.c
++++ b/arch/arm/oprofile/common.c
+@@ -116,7 +116,7 @@ int __init oprofile_arch_init(struct opr
+ return oprofile_perf_init(ops);
+ }
+
+-void __exit oprofile_arch_exit(void)
++void oprofile_arch_exit(void)
+ {
+ oprofile_perf_exit();
+ }
+--- a/arch/sh/oprofile/common.c
++++ b/arch/sh/oprofile/common.c
+@@ -49,7 +49,7 @@ int __init oprofile_arch_init(struct opr
+ return oprofile_perf_init(ops);
+ }
+
+-void __exit oprofile_arch_exit(void)
++void oprofile_arch_exit(void)
+ {
+ oprofile_perf_exit();
+ kfree(sh_pmu_op_name);
+@@ -60,5 +60,5 @@ int __init oprofile_arch_init(struct opr
+ ops->backtrace = sh_backtrace;
+ return -ENODEV;
+ }
+-void __exit oprofile_arch_exit(void) {}
++void oprofile_arch_exit(void) {}
+ #endif /* CONFIG_HW_PERF_EVENTS */
iwlwifi-do-not-set-the-sequence-control-bit-is-not-needed.patch
iwlwifi-allow-to-switch-to-ht40-if-not-associated.patch
memcg-keep-root-group-unchanged-if-creation-fails.patch
+vfs-fix-race-between-cpu-hotplug-and-lglocks.patch
+arm-imx-fix-pwm-period-value.patch
+arm-7214-1-mmc-mmci-fixup-handling-of-mci_startbiterr.patch
+arm-7220-1-mmc-mmci-fixup-error-handling-for-dma.patch
+oprofile-arm-sh-fix-oprofile_arch_exit-linkage-issue.patch
+futex-fix-uninterruptible-loop-due-to-gate_area.patch
+watchdog-hpwdt-changes-to-handle-nx-secure-bit-in-32bit-path.patch
+drm-radeon-kms-bail-on-btc-parts-if-mc-ucode-is-missing.patch
+mm-hugetlb-fix-non-atomic-enqueue-of-huge-page.patch
+mm-mempolicy.c-refix-mbind_range-vma-issue.patch
--- /dev/null
+From e30e2fdfe56288576ee9e04dbb06b4bd5f282203 Mon Sep 17 00:00:00 2001
+From: "Srivatsa S. Bhat" <srivatsa.bhat@linux.vnet.ibm.com>
+Date: Thu, 22 Dec 2011 02:45:29 +0530
+Subject: VFS: Fix race between CPU hotplug and lglocks
+
+From: "Srivatsa S. Bhat" <srivatsa.bhat@linux.vnet.ibm.com>
+
+commit e30e2fdfe56288576ee9e04dbb06b4bd5f282203 upstream.
+
+Currently, the *_global_[un]lock_online() routines are not at all synchronized
+with CPU hotplug. Soft-lockups detected as a consequence of this race was
+reported earlier at https://lkml.org/lkml/2011/8/24/185. (Thanks to Cong Meng
+for finding out that the root-cause of this issue is the race condition
+between br_write_[un]lock() and CPU hotplug, which results in the lock states
+getting messed up).
+
+Fixing this race by just adding {get,put}_online_cpus() at appropriate places
+in *_global_[un]lock_online() is not a good option, because, then suddenly
+br_write_[un]lock() would become blocking, whereas they have been kept as
+non-blocking all this time, and we would want to keep them that way.
+
+So, overall, we want to ensure 3 things:
+1. br_write_lock() and br_write_unlock() must remain as non-blocking.
+2. The corresponding lock and unlock of the per-cpu spinlocks must not happen
+ for different sets of CPUs.
+3. Either prevent any new CPU online operation in between this lock-unlock, or
+ ensure that the newly onlined CPU does not proceed with its corresponding
+ per-cpu spinlock unlocked.
+
+To achieve all this:
+(a) We introduce a new spinlock that is taken by the *_global_lock_online()
+ routine and released by the *_global_unlock_online() routine.
+(b) We register a callback for CPU hotplug notifications, and this callback
+ takes the same spinlock as above.
+(c) We maintain a bitmap which is close to the cpu_online_mask, and once it is
+ initialized in the lock_init() code, all future updates to it are done in
+ the callback, under the above spinlock.
+(d) The above bitmap is used (instead of cpu_online_mask) while locking and
+ unlocking the per-cpu locks.
+
+The callback takes the spinlock upon the CPU_UP_PREPARE event. So, if the
+br_write_lock-unlock sequence is in progress, the callback keeps spinning,
+thus preventing the CPU online operation till the lock-unlock sequence is
+complete. This takes care of requirement (3).
+
+The bitmap that we maintain remains unmodified throughout the lock-unlock
+sequence, since all updates to it are managed by the callback, which takes
+the same spinlock as the one taken by the lock code and released only by the
+unlock routine. Combining this with (d) above, satisfies requirement (2).
+
+Overall, since we use a spinlock (mentioned in (a)) to prevent CPU hotplug
+operations from racing with br_write_lock-unlock, requirement (1) is also
+taken care of.
+
+By the way, it is to be noted that a CPU offline operation can actually run
+in parallel with our lock-unlock sequence, because our callback doesn't react
+to notifications earlier than CPU_DEAD (in order to maintain our bitmap
+properly). And this means, since we use our own bitmap (which is stale, on
+purpose) during the lock-unlock sequence, we could end up unlocking the
+per-cpu lock of an offline CPU (because we had locked it earlier, when the
+CPU was online), in order to satisfy requirement (2). But this is harmless,
+though it looks a bit awkward.
+
+Debugged-by: Cong Meng <mc@linux.vnet.ibm.com>
+Signed-off-by: Srivatsa S. Bhat <srivatsa.bhat@linux.vnet.ibm.com>
+Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ include/linux/lglock.h | 36 ++++++++++++++++++++++++++++++++----
+ 1 file changed, 32 insertions(+), 4 deletions(-)
+
+--- a/include/linux/lglock.h
++++ b/include/linux/lglock.h
+@@ -22,6 +22,7 @@
+ #include <linux/spinlock.h>
+ #include <linux/lockdep.h>
+ #include <linux/percpu.h>
++#include <linux/cpu.h>
+
+ /* can make br locks by using local lock for read side, global lock for write */
+ #define br_lock_init(name) name##_lock_init()
+@@ -72,9 +73,31 @@
+
+ #define DEFINE_LGLOCK(name) \
+ \
++ DEFINE_SPINLOCK(name##_cpu_lock); \
++ cpumask_t name##_cpus __read_mostly; \
+ DEFINE_PER_CPU(arch_spinlock_t, name##_lock); \
+ DEFINE_LGLOCK_LOCKDEP(name); \
+ \
++ static int \
++ name##_lg_cpu_callback(struct notifier_block *nb, \
++ unsigned long action, void *hcpu) \
++ { \
++ switch (action & ~CPU_TASKS_FROZEN) { \
++ case CPU_UP_PREPARE: \
++ spin_lock(&name##_cpu_lock); \
++ cpu_set((unsigned long)hcpu, name##_cpus); \
++ spin_unlock(&name##_cpu_lock); \
++ break; \
++ case CPU_UP_CANCELED: case CPU_DEAD: \
++ spin_lock(&name##_cpu_lock); \
++ cpu_clear((unsigned long)hcpu, name##_cpus); \
++ spin_unlock(&name##_cpu_lock); \
++ } \
++ return NOTIFY_OK; \
++ } \
++ static struct notifier_block name##_lg_cpu_notifier = { \
++ .notifier_call = name##_lg_cpu_callback, \
++ }; \
+ void name##_lock_init(void) { \
+ int i; \
+ LOCKDEP_INIT_MAP(&name##_lock_dep_map, #name, &name##_lock_key, 0); \
+@@ -83,6 +106,11 @@
+ lock = &per_cpu(name##_lock, i); \
+ *lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; \
+ } \
++ register_hotcpu_notifier(&name##_lg_cpu_notifier); \
++ get_online_cpus(); \
++ for_each_online_cpu(i) \
++ cpu_set(i, name##_cpus); \
++ put_online_cpus(); \
+ } \
+ EXPORT_SYMBOL(name##_lock_init); \
+ \
+@@ -124,9 +152,9 @@
+ \
+ void name##_global_lock_online(void) { \
+ int i; \
+- preempt_disable(); \
++ spin_lock(&name##_cpu_lock); \
+ rwlock_acquire(&name##_lock_dep_map, 0, 0, _RET_IP_); \
+- for_each_online_cpu(i) { \
++ for_each_cpu(i, &name##_cpus) { \
+ arch_spinlock_t *lock; \
+ lock = &per_cpu(name##_lock, i); \
+ arch_spin_lock(lock); \
+@@ -137,12 +165,12 @@
+ void name##_global_unlock_online(void) { \
+ int i; \
+ rwlock_release(&name##_lock_dep_map, 1, _RET_IP_); \
+- for_each_online_cpu(i) { \
++ for_each_cpu(i, &name##_cpus) { \
+ arch_spinlock_t *lock; \
+ lock = &per_cpu(name##_lock, i); \
+ arch_spin_unlock(lock); \
+ } \
+- preempt_enable(); \
++ spin_unlock(&name##_cpu_lock); \
+ } \
+ EXPORT_SYMBOL(name##_global_unlock_online); \
+ \
--- /dev/null
+From e67d668e147c3b4fec638c9e0ace04319f5ceccd Mon Sep 17 00:00:00 2001
+From: "Mingarelli, Thomas" <Thomas.Mingarelli@hp.com>
+Date: Mon, 7 Nov 2011 10:59:00 +0100
+Subject: watchdog: hpwdt: Changes to handle NX secure bit in 32bit path
+
+From: "Mingarelli, Thomas" <Thomas.Mingarelli@hp.com>
+
+commit e67d668e147c3b4fec638c9e0ace04319f5ceccd upstream.
+
+This patch makes use of the set_memory_x() kernel API in order
+to make necessary BIOS calls to source NMIs.
+
+This is needed for SLES11 SP2 and the latest upstream kernel as it appears
+the NX Execute Disable has grown in its control.
+
+Signed-off by: Thomas Mingarelli <thomas.mingarelli@hp.com>
+Signed-off by: Wim Van Sebroeck <wim@iguana.be>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/watchdog/hpwdt.c | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+--- a/drivers/watchdog/hpwdt.c
++++ b/drivers/watchdog/hpwdt.c
+@@ -230,6 +230,7 @@ static int __devinit cru_detect(unsigned
+
+ cmn_regs.u1.reax = CRU_BIOS_SIGNATURE_VALUE;
+
++ set_memory_x((unsigned long)bios32_entrypoint, (2 * PAGE_SIZE));
+ asminline_call(&cmn_regs, bios32_entrypoint);
+
+ if (cmn_regs.u1.ral != 0) {
+@@ -247,8 +248,10 @@ static int __devinit cru_detect(unsigned
+ if ((physical_bios_base + physical_bios_offset)) {
+ cru_rom_addr =
+ ioremap(cru_physical_address, cru_length);
+- if (cru_rom_addr)
++ if (cru_rom_addr) {
++ set_memory_x((unsigned long)cru_rom_addr, cru_length);
+ retval = 0;
++ }
+ }
+
+ printk(KERN_DEBUG "hpwdt: CRU Base Address: 0x%lx\n",