--- /dev/null
+From 39e567ae36fe03c2b446e1b83ee3d39bea08f90b Mon Sep 17 00:00:00 2001
+From: Peng Tao <bergwolf@gmail.com>
+Date: Thu, 12 Jan 2012 23:18:41 +0800
+Subject: pnfsblock: acquire im_lock in _preload_range
+
+From: Peng Tao <bergwolf@gmail.com>
+
+commit 39e567ae36fe03c2b446e1b83ee3d39bea08f90b upstream.
+
+When calling _add_entry, we should take the im_lock to protect
+agains other modifiers.
+
+Signed-off-by: Peng Tao <peng_tao@emc.com>
+Signed-off-by: Benny Halevy <bhalevy@tonian.com>
+Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ fs/nfs/blocklayout/extents.c | 11 ++++++-----
+ 1 file changed, 6 insertions(+), 5 deletions(-)
+
+--- a/fs/nfs/blocklayout/extents.c
++++ b/fs/nfs/blocklayout/extents.c
+@@ -139,11 +139,13 @@ static int _set_range(struct my_tree *tr
+ }
+
+ /* Ensure that future operations on given range of tree will not malloc */
+-static int _preload_range(struct my_tree *tree, u64 offset, u64 length)
++static int _preload_range(struct pnfs_inval_markings *marks,
++ u64 offset, u64 length)
+ {
+ u64 start, end, s;
+ int count, i, used = 0, status = -ENOMEM;
+ struct pnfs_inval_tracking **storage;
++ struct my_tree *tree = &marks->im_tree;
+
+ dprintk("%s(%llu, %llu) enter\n", __func__, offset, length);
+ start = normalize(offset, tree->mtt_step_size);
+@@ -161,12 +163,11 @@ static int _preload_range(struct my_tree
+ goto out_cleanup;
+ }
+
+- /* Now need lock - HOW??? */
+-
++ spin_lock(&marks->im_lock);
+ for (s = start; s < end; s += tree->mtt_step_size)
+ used += _add_entry(tree, s, INTERNAL_EXISTS, storage[used]);
++ spin_unlock(&marks->im_lock);
+
+- /* Unlock - HOW??? */
+ status = 0;
+
+ out_cleanup:
+@@ -286,7 +287,7 @@ int bl_mark_sectors_init(struct pnfs_inv
+
+ start = normalize(offset, marks->im_block_size);
+ end = normalize_up(offset + length, marks->im_block_size);
+- if (_preload_range(&marks->im_tree, start, end - start))
++ if (_preload_range(marks, start, end - start))
+ goto outerr;
+
+ spin_lock(&marks->im_lock);
--- /dev/null
+From 93a3844ee0f843b05a1df4b52e1a19ff26b98d24 Mon Sep 17 00:00:00 2001
+From: Peng Tao <bergwolf@gmail.com>
+Date: Thu, 12 Jan 2012 23:18:47 +0800
+Subject: pnfsblock: don't spinlock when freeing block_dev
+
+From: Peng Tao <bergwolf@gmail.com>
+
+commit 93a3844ee0f843b05a1df4b52e1a19ff26b98d24 upstream.
+
+bl_free_block_dev() may sleep. We can not call it with spinlock held.
+Besides, there is no need to take bm_lock as we are last user freeing bm_devlist.
+
+Signed-off-by: Peng Tao <peng_tao@emc.com>
+Signed-off-by: Benny Halevy <bhalevy@tonian.com>
+Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ fs/nfs/blocklayout/blocklayout.c | 11 ++++-------
+ 1 file changed, 4 insertions(+), 7 deletions(-)
+
+--- a/fs/nfs/blocklayout/blocklayout.c
++++ b/fs/nfs/blocklayout/blocklayout.c
+@@ -779,16 +779,13 @@ bl_cleanup_layoutcommit(struct nfs4_layo
+ static void free_blk_mountid(struct block_mount_id *mid)
+ {
+ if (mid) {
+- struct pnfs_block_dev *dev;
+- spin_lock(&mid->bm_lock);
+- while (!list_empty(&mid->bm_devlist)) {
+- dev = list_first_entry(&mid->bm_devlist,
+- struct pnfs_block_dev,
+- bm_node);
++ struct pnfs_block_dev *dev, *tmp;
++
++ /* No need to take bm_lock as we are last user freeing bm_devlist */
++ list_for_each_entry_safe(dev, tmp, &mid->bm_devlist, bm_node) {
+ list_del(&dev->bm_node);
+ bl_free_block_dev(dev);
+ }
+- spin_unlock(&mid->bm_lock);
+ kfree(mid);
+ }
+ }
--- /dev/null
+From 74a6eeb44ca6174d9cc93b9b8b4d58211c57bc80 Mon Sep 17 00:00:00 2001
+From: Peng Tao <bergwolf@gmail.com>
+Date: Thu, 12 Jan 2012 23:18:48 +0800
+Subject: pnfsblock: limit bio page count
+
+From: Peng Tao <bergwolf@gmail.com>
+
+commit 74a6eeb44ca6174d9cc93b9b8b4d58211c57bc80 upstream.
+
+One bio can have at most BIO_MAX_PAGES pages. We should limit it bec otherwise
+bio_alloc will fail when there are many pages in one read/write_pagelist.
+
+Signed-off-by: Peng Tao <peng_tao@emc.com>
+Signed-off-by: Benny Halevy <bhalevy@tonian.com>
+Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ fs/nfs/blocklayout/blocklayout.c | 17 +++++++++++------
+ 1 file changed, 11 insertions(+), 6 deletions(-)
+
+--- a/fs/nfs/blocklayout/blocklayout.c
++++ b/fs/nfs/blocklayout/blocklayout.c
+@@ -146,14 +146,19 @@ static struct bio *bl_alloc_init_bio(int
+ {
+ struct bio *bio;
+
++ npg = min(npg, BIO_MAX_PAGES);
+ bio = bio_alloc(GFP_NOIO, npg);
+- if (!bio)
+- return NULL;
++ if (!bio && (current->flags & PF_MEMALLOC)) {
++ while (!bio && (npg /= 2))
++ bio = bio_alloc(GFP_NOIO, npg);
++ }
+
+- bio->bi_sector = isect - be->be_f_offset + be->be_v_offset;
+- bio->bi_bdev = be->be_mdev;
+- bio->bi_end_io = end_io;
+- bio->bi_private = par;
++ if (bio) {
++ bio->bi_sector = isect - be->be_f_offset + be->be_v_offset;
++ bio->bi_bdev = be->be_mdev;
++ bio->bi_end_io = end_io;
++ bio->bi_private = par;
++ }
+ return bio;
+ }
+
uvcvideo-fix-integer-overflow-in-uvc_ioctl_ctrl_map.patch
dcache-use-a-dispose-list-in-select_parent.patch
fix-shrink_dcache_parent-livelock.patch
+pnfsblock-acquire-im_lock-in-_preload_range.patch
+pnfsblock-don-t-spinlock-when-freeing-block_dev.patch
+pnfsblock-limit-bio-page-count.patch