--- /dev/null
+From 0e7f7bcc3fc87489cda5aa6aff8ce40eed912279 Mon Sep 17 00:00:00 2001
+From: Catalin Marinas <catalin.marinas@arm.com>
+Date: Tue, 7 May 2013 16:57:06 +0100
+Subject: arm64: Ignore the 'write' ESR flag on cache maintenance faults
+
+From: Catalin Marinas <catalin.marinas@arm.com>
+
+commit 0e7f7bcc3fc87489cda5aa6aff8ce40eed912279 upstream.
+
+ESR.WnR bit is always set on data cache maintenance faults even though
+the page is not required to have write permission. If a translation
+fault (page not yet mapped) happens for read-only user address range,
+Linux incorrectly assumes a permission fault. This patch adds the check
+of the ESR.CM bit during the page fault handling to ignore the 'write'
+flag.
+
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Reported-by: Tim Northover <Tim.Northover@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm64/mm/fault.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/arch/arm64/mm/fault.c
++++ b/arch/arm64/mm/fault.c
+@@ -148,6 +148,7 @@ void do_bad_area(unsigned long addr, uns
+ #define VM_FAULT_BADACCESS 0x020000
+
+ #define ESR_WRITE (1 << 6)
++#define ESR_CM (1 << 8)
+ #define ESR_LNX_EXEC (1 << 24)
+
+ /*
+@@ -206,7 +207,7 @@ static int __kprobes do_page_fault(unsig
+ struct task_struct *tsk;
+ struct mm_struct *mm;
+ int fault, sig, code;
+- int write = esr & ESR_WRITE;
++ bool write = (esr & ESR_WRITE) && !(esr & ESR_CM);
+ unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
+ (write ? FAULT_FLAG_WRITE : 0);
+
--- /dev/null
+From e5072664f8237cf53b0bd68a51aa1a7bc69061c5 Mon Sep 17 00:00:00 2001
+From: Jun'ichi Nomura <j-nomura@ce.jp.nec.com>
+Date: Tue, 9 Apr 2013 15:01:21 +0200
+Subject: blkcg: fix "scheduling while atomic" in blk_queue_bypass_start
+
+From: Jun'ichi Nomura <j-nomura@ce.jp.nec.com>
+
+commit e5072664f8237cf53b0bd68a51aa1a7bc69061c5 upstream.
+
+Since 749fefe677 in v3.7 ("block: lift the initial queue bypass mode
+on blk_register_queue() instead of blk_init_allocated_queue()"),
+the following warning appears when multipath is used with CONFIG_PREEMPT=y.
+
+This patch moves blk_queue_bypass_start() before radix_tree_preload()
+to avoid the sleeping call while preemption is disabled.
+
+ BUG: scheduling while atomic: multipath/2460/0x00000002
+ 1 lock held by multipath/2460:
+ #0: (&md->type_lock){......}, at: [<ffffffffa019fb05>] dm_lock_md_type+0x17/0x19 [dm_mod]
+ Modules linked in: ...
+ Pid: 2460, comm: multipath Tainted: G W 3.7.0-rc2 #1
+ Call Trace:
+ [<ffffffff810723ae>] __schedule_bug+0x6a/0x78
+ [<ffffffff81428ba2>] __schedule+0xb4/0x5e0
+ [<ffffffff814291e6>] schedule+0x64/0x66
+ [<ffffffff8142773a>] schedule_timeout+0x39/0xf8
+ [<ffffffff8108ad5f>] ? put_lock_stats+0xe/0x29
+ [<ffffffff8108ae30>] ? lock_release_holdtime+0xb6/0xbb
+ [<ffffffff814289e3>] wait_for_common+0x9d/0xee
+ [<ffffffff8107526c>] ? try_to_wake_up+0x206/0x206
+ [<ffffffff810c0eb8>] ? kfree_call_rcu+0x1c/0x1c
+ [<ffffffff81428aec>] wait_for_completion+0x1d/0x1f
+ [<ffffffff810611f9>] wait_rcu_gp+0x5d/0x7a
+ [<ffffffff81061216>] ? wait_rcu_gp+0x7a/0x7a
+ [<ffffffff8106fb18>] ? complete+0x21/0x53
+ [<ffffffff810c0556>] synchronize_rcu+0x1e/0x20
+ [<ffffffff811dd903>] blk_queue_bypass_start+0x5d/0x62
+ [<ffffffff811ee109>] blkcg_activate_policy+0x73/0x270
+ [<ffffffff81130521>] ? kmem_cache_alloc_node_trace+0xc7/0x108
+ [<ffffffff811f04b3>] cfq_init_queue+0x80/0x28e
+ [<ffffffffa01a1600>] ? dm_blk_ioctl+0xa7/0xa7 [dm_mod]
+ [<ffffffff811d8c41>] elevator_init+0xe1/0x115
+ [<ffffffff811e229f>] ? blk_queue_make_request+0x54/0x59
+ [<ffffffff811dd743>] blk_init_allocated_queue+0x8c/0x9e
+ [<ffffffffa019ffcd>] dm_setup_md_queue+0x36/0xaa [dm_mod]
+ [<ffffffffa01a60e6>] table_load+0x1bd/0x2c8 [dm_mod]
+ [<ffffffffa01a7026>] ctl_ioctl+0x1d6/0x236 [dm_mod]
+ [<ffffffffa01a5f29>] ? table_clear+0xaa/0xaa [dm_mod]
+ [<ffffffffa01a7099>] dm_ctl_ioctl+0x13/0x17 [dm_mod]
+ [<ffffffff811479fc>] do_vfs_ioctl+0x3fb/0x441
+ [<ffffffff811b643c>] ? file_has_perm+0x8a/0x99
+ [<ffffffff81147aa0>] sys_ioctl+0x5e/0x82
+ [<ffffffff812010be>] ? trace_hardirqs_on_thunk+0x3a/0x3f
+ [<ffffffff814310d9>] system_call_fastpath+0x16/0x1b
+
+Signed-off-by: Jun'ichi Nomura <j-nomura@ce.jp.nec.com>
+Acked-by: Vivek Goyal <vgoyal@redhat.com>
+Acked-by: Tejun Heo <tj@kernel.org>
+Cc: Alasdair G Kergon <agk@redhat.com>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ block/blk-cgroup.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/block/blk-cgroup.c
++++ b/block/blk-cgroup.c
+@@ -972,10 +972,10 @@ int blkcg_activate_policy(struct request
+ if (!new_blkg)
+ return -ENOMEM;
+
+- preloaded = !radix_tree_preload(GFP_KERNEL);
+-
+ blk_queue_bypass_start(q);
+
++ preloaded = !radix_tree_preload(GFP_KERNEL);
++
+ /*
+ * Make sure the root blkg exists and count the existing blkgs. As
+ * @q is bypassing at this point, blkg_lookup_create() can't be
--- /dev/null
+From 871dd9286e25330c8a581e5dacfa8b1dfe1dd641 Mon Sep 17 00:00:00 2001
+From: James Bottomley <JBottomley@Parallels.com>
+Date: Wed, 24 Apr 2013 08:52:50 -0600
+Subject: block: fix max discard sectors limit
+
+From: James Bottomley <JBottomley@Parallels.com>
+
+commit 871dd9286e25330c8a581e5dacfa8b1dfe1dd641 upstream.
+
+linux-v3.8-rc1 and later support for plug for blkdev_issue_discard with
+commit 0cfbcafcae8b7364b5fa96c2b26ccde7a3a296a9
+(block: add plug for blkdev_issue_discard )
+
+For example,
+1) DISCARD rq-1 with size size 4GB
+2) DISCARD rq-2 with size size 1GB
+
+If these 2 discard requests get merged, final request size will be 5GB.
+
+In this case, request's __data_len field may overflow as it can store
+max 4GB(unsigned int).
+
+This issue was observed while doing mkfs.f2fs on 5GB SD card:
+https://lkml.org/lkml/2013/4/1/292
+
+Info: sector size = 512
+Info: total sectors = 11370496 (in 512bytes)
+Info: zone aligned segment0 blkaddr: 512
+[ 257.789764] blk_update_request: bio idx 0 >= vcnt 0
+
+mkfs process gets stuck in D state and I see the following in the dmesg:
+
+[ 257.789733] __end_that: dev mmcblk0: type=1, flags=122c8081
+[ 257.789764] sector 4194304, nr/cnr 2981888/4294959104
+[ 257.789764] bio df3840c0, biotail df3848c0, buffer (null), len
+1526726656
+[ 257.789764] blk_update_request: bio idx 0 >= vcnt 0
+[ 257.794921] request botched: dev mmcblk0: type=1, flags=122c8081
+[ 257.794921] sector 4194304, nr/cnr 2981888/4294959104
+[ 257.794921] bio df3840c0, biotail df3848c0, buffer (null), len
+1526726656
+
+This patch fixes this issue.
+
+Reported-by: Max Filippov <jcmvbkbc@gmail.com>
+Signed-off-by: James Bottomley <JBottomley@Parallels.com>
+Signed-off-by: Namjae Jeon <namjae.jeon@samsung.com>
+Tested-by: Max Filippov <jcmvbkbc@gmail.com>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ include/linux/blkdev.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/include/linux/blkdev.h
++++ b/include/linux/blkdev.h
+@@ -838,7 +838,7 @@ static inline unsigned int blk_queue_get
+ unsigned int cmd_flags)
+ {
+ if (unlikely(cmd_flags & REQ_DISCARD))
+- return q->limits.max_discard_sectors;
++ return min(q->limits.max_discard_sectors, UINT_MAX >> 9);
+
+ if (unlikely(cmd_flags & REQ_WRITE_SAME))
+ return q->limits.max_write_same_sectors;
--- /dev/null
+From f3b2bbdc8a87a080ccd23d27fca4b87d61340dd4 Mon Sep 17 00:00:00 2001
+From: Dave Airlie <airlied@redhat.com>
+Date: Thu, 2 May 2013 02:45:02 -0400
+Subject: drm/cirrus: deal with bo reserve fail in dirty update path
+
+From: Dave Airlie <airlied@redhat.com>
+
+commit f3b2bbdc8a87a080ccd23d27fca4b87d61340dd4 upstream.
+
+Port over the mgag200 fix to cirrus as it suffers the same issue.
+
+ On F19 testing, it was noticed we get a lot of errors in dmesg
+ about being unable to reserve the buffer when plymouth starts,
+ this is due to the buffer being in the process of migrating,
+ so it makes sense we can't reserve it.
+
+ In order to deal with it, this adds delayed updates for the dirty
+ updates, when the bo is unreservable, in the normal console case
+ this shouldn't ever happen, its just when plymouth or X is
+ pushing the console bo to system memory.
+
+Signed-off-by: Dave Airlie <airlied@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/cirrus/cirrus_drv.h | 2 +
+ drivers/gpu/drm/cirrus/cirrus_fbdev.c | 38 +++++++++++++++++++++++++++++++++-
+ drivers/gpu/drm/cirrus/cirrus_ttm.c | 2 -
+ 3 files changed, 40 insertions(+), 2 deletions(-)
+
+--- a/drivers/gpu/drm/cirrus/cirrus_drv.h
++++ b/drivers/gpu/drm/cirrus/cirrus_drv.h
+@@ -154,6 +154,8 @@ struct cirrus_fbdev {
+ struct list_head fbdev_list;
+ void *sysram;
+ int size;
++ int x1, y1, x2, y2; /* dirty rect */
++ spinlock_t dirty_lock;
+ };
+
+ struct cirrus_bo {
+--- a/drivers/gpu/drm/cirrus/cirrus_fbdev.c
++++ b/drivers/gpu/drm/cirrus/cirrus_fbdev.c
+@@ -27,16 +27,51 @@ static void cirrus_dirty_update(struct c
+ int bpp = (afbdev->gfb.base.bits_per_pixel + 7)/8;
+ int ret;
+ bool unmap = false;
++ bool store_for_later = false;
++ int x2, y2;
++ unsigned long flags;
+
+ obj = afbdev->gfb.obj;
+ bo = gem_to_cirrus_bo(obj);
+
++ /*
++ * try and reserve the BO, if we fail with busy
++ * then the BO is being moved and we should
++ * store up the damage until later.
++ */
+ ret = cirrus_bo_reserve(bo, true);
+ if (ret) {
+- DRM_ERROR("failed to reserve fb bo\n");
++ if (ret != -EBUSY)
++ return;
++ store_for_later = true;
++ }
++
++ x2 = x + width - 1;
++ y2 = y + height - 1;
++ spin_lock_irqsave(&afbdev->dirty_lock, flags);
++
++ if (afbdev->y1 < y)
++ y = afbdev->y1;
++ if (afbdev->y2 > y2)
++ y2 = afbdev->y2;
++ if (afbdev->x1 < x)
++ x = afbdev->x1;
++ if (afbdev->x2 > x2)
++ x2 = afbdev->x2;
++
++ if (store_for_later) {
++ afbdev->x1 = x;
++ afbdev->x2 = x2;
++ afbdev->y1 = y;
++ afbdev->y2 = y2;
++ spin_unlock_irqrestore(&afbdev->dirty_lock, flags);
+ return;
+ }
+
++ afbdev->x1 = afbdev->y1 = INT_MAX;
++ afbdev->x2 = afbdev->y2 = 0;
++ spin_unlock_irqrestore(&afbdev->dirty_lock, flags);
++
+ if (!bo->kmap.virtual) {
+ ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &bo->kmap);
+ if (ret) {
+@@ -268,6 +303,7 @@ int cirrus_fbdev_init(struct cirrus_devi
+
+ cdev->mode_info.gfbdev = gfbdev;
+ gfbdev->helper.funcs = &cirrus_fb_helper_funcs;
++ spin_lock_init(&gfbdev->dirty_lock);
+
+ ret = drm_fb_helper_init(cdev->dev, &gfbdev->helper,
+ cdev->num_crtc, CIRRUSFB_CONN_LIMIT);
+--- a/drivers/gpu/drm/cirrus/cirrus_ttm.c
++++ b/drivers/gpu/drm/cirrus/cirrus_ttm.c
+@@ -321,7 +321,7 @@ int cirrus_bo_reserve(struct cirrus_bo *
+
+ ret = ttm_bo_reserve(&bo->bo, true, no_wait, false, 0);
+ if (ret) {
+- if (ret != -ERESTARTSYS)
++ if (ret != -ERESTARTSYS && ret != -EBUSY)
+ DRM_ERROR("reserve failed %p\n", bo);
+ return ret;
+ }
--- /dev/null
+From 5b0c275926b8149c555da874bb4ec258ea3292aa Mon Sep 17 00:00:00 2001
+From: Thadeu Lima de Souza Cascardo <cascardo@linux.vnet.ibm.com>
+Date: Mon, 1 Apr 2013 20:13:39 +0000
+Subject: RDMA/cxgb4: Fix SQ allocation when on-chip SQ is disabled
+
+From: Thadeu Lima de Souza Cascardo <cascardo@linux.vnet.ibm.com>
+
+commit 5b0c275926b8149c555da874bb4ec258ea3292aa upstream.
+
+Commit c079c28714e4 ("RDMA/cxgb4: Fix error handling in create_qp()")
+broke SQ allocation. Instead of falling back to host allocation when
+on-chip allocation fails, it tries to allocate both. And when it
+does, and we try to free the address from the genpool using the host
+address, we hit a BUG and the system crashes as below.
+
+We create a new function that has the previous behavior and properly
+propagate the error, as intended.
+
+ kernel BUG at /usr/src/packages/BUILD/kernel-ppc64-3.0.68/linux-3.0/lib/genalloc.c:340!
+ Oops: Exception in kernel mode, sig: 5 [#1]
+ SMP NR_CPUS=1024 NUMA pSeries
+ Modules linked in: rdma_ucm rdma_cm ib_addr ib_cm iw_cm ib_sa ib_mad ib_uverbs iw_cxgb4 ib_core ip6t_LOG xt_tcpudp xt_pkttype ipt_LOG xt_limit ip6t_REJECT nf_conntrack_ipv6 nf_defrag_ipv6 ip6table_raw xt_NOTRACK ipt_REJECT xt_state iptable_raw iptable_filter ip6table_mangle nf_conntrack_netbios_ns nf_conntrack_broadcast nf_conntrack_ipv4 nf_conntrack nf_defrag_ipv4 ip_tables ip6table_filter ip6_tables x_tables fuse loop dm_mod ipv6 ipv6_lib sr_mod cdrom ibmveth(X) cxgb4 sg ext3 jbd mbcache sd_mod crc_t10dif scsi_dh_emc scsi_dh_hp_sw scsi_dh_alua scsi_dh_rdac scsi_dh ibmvscsic(X) scsi_transport_srp scsi_tgt scsi_mod
+ Supported: Yes
+ NIP: c00000000037d41c LR: d000000003913824 CTR: c00000000037d3b0
+ REGS: c0000001f350ae50 TRAP: 0700 Tainted: G X (3.0.68-0.9-ppc64)
+ MSR: 8000000000029032 <EE,ME,CE,IR,DR> CR: 24042482 XER: 00000001
+ TASK = c0000001f6f2a840[3616] 'rping' THREAD: c0000001f3508000 CPU: 0
+ GPR00: c0000001f6e875c8 c0000001f350b0d0 c000000000fc9690 c0000001f6e875c0
+ GPR04: 00000000000c0000 0000000000010000 0000000000000000 c0000000009d482a
+ GPR08: 000000006a170000 0000000000100000 c0000001f350b140 c0000001f6e875c8
+ GPR12: d000000003915dd0 c000000003f40000 000000003e3ecfa8 c0000001f350bea0
+ GPR16: c0000001f350bcd0 00000000003c0000 0000000000040100 c0000001f6e74a80
+ GPR20: d00000000399a898 c0000001f6e74ac8 c0000001fad91600 c0000001f6e74ab0
+ GPR24: c0000001f7d23f80 0000000000000000 0000000000000002 000000006a170000
+ GPR28: 000000000000000c c0000001f584c8d0 d000000003925180 c0000001f6e875c8
+ NIP [c00000000037d41c] .gen_pool_free+0x6c/0xf8
+ LR [d000000003913824] .c4iw_ocqp_pool_free+0x8c/0xd8 [iw_cxgb4]
+ Call Trace:
+ [c0000001f350b0d0] [c0000001f350b180] 0xc0000001f350b180 (unreliable)
+ [c0000001f350b170] [d000000003913824] .c4iw_ocqp_pool_free+0x8c/0xd8 [iw_cxgb4]
+ [c0000001f350b210] [d00000000390fd70] .dealloc_sq+0x90/0xb0 [iw_cxgb4]
+ [c0000001f350b280] [d00000000390fe08] .destroy_qp+0x78/0xf8 [iw_cxgb4]
+ [c0000001f350b310] [d000000003912738] .c4iw_destroy_qp+0x208/0x2d0 [iw_cxgb4]
+ [c0000001f350b460] [d000000003861874] .ib_destroy_qp+0x5c/0x130 [ib_core]
+ [c0000001f350b510] [d0000000039911bc] .ib_uverbs_cleanup_ucontext+0x174/0x4f8 [ib_uverbs]
+ [c0000001f350b5f0] [d000000003991568] .ib_uverbs_close+0x28/0x70 [ib_uverbs]
+ [c0000001f350b670] [c0000000001e7b2c] .__fput+0xdc/0x278
+ [c0000001f350b720] [c0000000001a9590] .remove_vma+0x68/0xd8
+ [c0000001f350b7b0] [c0000000001a9720] .exit_mmap+0x120/0x160
+ [c0000001f350b8d0] [c0000000000af330] .mmput+0x80/0x160
+ [c0000001f350b960] [c0000000000b5d0c] .exit_mm+0x1ac/0x1e8
+ [c0000001f350ba10] [c0000000000b8154] .do_exit+0x1b4/0x4b8
+ [c0000001f350bad0] [c0000000000b84b0] .do_group_exit+0x58/0xf8
+ [c0000001f350bb60] [c0000000000ce9f4] .get_signal_to_deliver+0x2f4/0x5d0
+ [c0000001f350bc60] [c000000000017ee4] .do_signal_pending+0x6c/0x3e0
+ [c0000001f350bdb0] [c0000000000182cc] .do_signal+0x74/0x78
+ [c0000001f350be30] [c000000000009e74] do_work+0x24/0x28
+
+Signed-off-by: Thadeu Lima de Souza Cascardo <cascardo@linux.vnet.ibm.com>
+Cc: Emil Goode <emilgoode@gmail.com>
+Acked-by: Steve Wise <swise@opengridcomputing.com>
+Signed-off-by: Roland Dreier <roland@purestorage.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/infiniband/hw/cxgb4/qp.c | 25 +++++++++++++------------
+ 1 file changed, 13 insertions(+), 12 deletions(-)
+
+--- a/drivers/infiniband/hw/cxgb4/qp.c
++++ b/drivers/infiniband/hw/cxgb4/qp.c
+@@ -100,6 +100,16 @@ static int alloc_host_sq(struct c4iw_rde
+ return 0;
+ }
+
++static int alloc_sq(struct c4iw_rdev *rdev, struct t4_sq *sq, int user)
++{
++ int ret = -ENOSYS;
++ if (user)
++ ret = alloc_oc_sq(rdev, sq);
++ if (ret)
++ ret = alloc_host_sq(rdev, sq);
++ return ret;
++}
++
+ static int destroy_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
+ struct c4iw_dev_ucontext *uctx)
+ {
+@@ -168,18 +178,9 @@ static int create_qp(struct c4iw_rdev *r
+ goto free_sw_rq;
+ }
+
+- if (user) {
+- ret = alloc_oc_sq(rdev, &wq->sq);
+- if (ret)
+- goto free_hwaddr;
+-
+- ret = alloc_host_sq(rdev, &wq->sq);
+- if (ret)
+- goto free_sq;
+- } else
+- ret = alloc_host_sq(rdev, &wq->sq);
+- if (ret)
+- goto free_hwaddr;
++ ret = alloc_sq(rdev, &wq->sq, user);
++ if (ret)
++ goto free_hwaddr;
+ memset(wq->sq.queue, 0, wq->sq.memsize);
+ dma_unmap_addr_set(&wq->sq, mapping, wq->sq.dma_addr);
+
ext4-add-check-for-inodes_count-overflow-in-new-resize-ioctl.patch
modsign-do-not-send-garbage-to-stderr-when-enabling-modules-signature.patch
r8169-fix-8168evl-frame-padding.patch
+rdma-cxgb4-fix-sq-allocation-when-on-chip-sq-is-disabled.patch
+arm64-ignore-the-write-esr-flag-on-cache-maintenance-faults.patch
+blkcg-fix-scheduling-while-atomic-in-blk_queue_bypass_start.patch
+block-fix-max-discard-sectors-limit.patch
+drm-cirrus-deal-with-bo-reserve-fail-in-dirty-update-path.patch