]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
3.0 patches
authorGreg Kroah-Hartman <gregkh@suse.de>
Wed, 19 Oct 2011 23:03:22 +0000 (16:03 -0700)
committerGreg Kroah-Hartman <gregkh@suse.de>
Wed, 19 Oct 2011 23:03:22 +0000 (16:03 -0700)
15 files changed:
queue-3.0/cifs-fix-err_ptr-dereference-in-cifs_get_root.patch [new file with mode: 0644]
queue-3.0/drm-ttm-ensure-ttm-for-new-node-is-bound-before-calling-move_notify.patch [new file with mode: 0644]
queue-3.0/drm-ttm-unbind-ttm-before-destroying-node-in-accel-move-cleanup.patch [new file with mode: 0644]
queue-3.0/firewire-sbp2-fix-panic-after-rmmod-with-slow-targets.patch [new file with mode: 0644]
queue-3.0/fuse-fix-memory-leak.patch [new file with mode: 0644]
queue-3.0/hfsplus-ensure-bio-requests-are-not-smaller-than-the-hardware-sectors.patch [new file with mode: 0644]
queue-3.0/hid-magicmouse-ignore-ivalid-report-id-while-switching-modes-v2.patch [new file with mode: 0644]
queue-3.0/platform-fix-samsung-laptop-dmi-identification-for-n150-n210-220-n230.patch [new file with mode: 0644]
queue-3.0/series
queue-3.0/uvcvideo-fix-crash-when-linking-entities.patch [new file with mode: 0644]
queue-3.0/xfs-do-not-update-xa_last_pushed_lsn-for-locked-items.patch [new file with mode: 0644]
queue-3.0/xfs-force-the-log-if-we-encounter-pinned-buffers-in-.iop_pushbuf.patch [new file with mode: 0644]
queue-3.0/xfs-revert-to-using-a-kthread-for-ail-pushing.patch [new file with mode: 0644]
queue-3.0/xfs-start-periodic-workers-later.patch [new file with mode: 0644]
queue-3.0/xfs-use-a-cursor-for-bulk-ail-insertion.patch [new file with mode: 0644]

diff --git a/queue-3.0/cifs-fix-err_ptr-dereference-in-cifs_get_root.patch b/queue-3.0/cifs-fix-err_ptr-dereference-in-cifs_get_root.patch
new file mode 100644 (file)
index 0000000..cac5405
--- /dev/null
@@ -0,0 +1,47 @@
+From 5b980b01212199833ee8023770fa4cbf1b85e9f4 Mon Sep 17 00:00:00 2001
+From: Pavel Shilovsky <piastryyy@gmail.com>
+Date: Sun, 21 Aug 2011 19:30:15 +0400
+Subject: CIFS: Fix ERR_PTR dereference in cifs_get_root
+
+From: Pavel Shilovsky <piastryyy@gmail.com>
+
+commit 5b980b01212199833ee8023770fa4cbf1b85e9f4 upstream.
+
+move it to the beginning of the loop.
+
+Signed-off-by: Pavel Shilovsky <piastryyy@gmail.com>
+Reviewed-by: Jeff Layton <jlayton@redhat.com>
+Signed-off-by: Steve French <sfrench@us.ibm.com>
+Cc: Josh Boyer <jwboyer@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ fs/cifs/cifsfs.c |   10 ++++++----
+ 1 file changed, 6 insertions(+), 4 deletions(-)
+
+--- a/fs/cifs/cifsfs.c
++++ b/fs/cifs/cifsfs.c
+@@ -566,6 +566,12 @@ cifs_get_root(struct smb_vol *vol, struc
+               struct inode *dir = dentry->d_inode;
+               struct dentry *child;
++              if (!dir) {
++                      dput(dentry);
++                      dentry = ERR_PTR(-ENOENT);
++                      break;
++              }
++
+               /* skip separators */
+               while (*s == sep)
+                       s++;
+@@ -581,10 +587,6 @@ cifs_get_root(struct smb_vol *vol, struc
+               mutex_unlock(&dir->i_mutex);
+               dput(dentry);
+               dentry = child;
+-              if (!dentry->d_inode) {
+-                      dput(dentry);
+-                      dentry = ERR_PTR(-ENOENT);
+-              }
+       } while (!IS_ERR(dentry));
+       _FreeXid(xid);
+       kfree(full_path);
diff --git a/queue-3.0/drm-ttm-ensure-ttm-for-new-node-is-bound-before-calling-move_notify.patch b/queue-3.0/drm-ttm-ensure-ttm-for-new-node-is-bound-before-calling-move_notify.patch
new file mode 100644 (file)
index 0000000..0890e56
--- /dev/null
@@ -0,0 +1,42 @@
+From 8d3bb23609d4ae22803a15d232289fc09a7b61c4 Mon Sep 17 00:00:00 2001
+From: Ben Skeggs <bskeggs@redhat.com>
+Date: Mon, 22 Aug 2011 03:15:05 +0000
+Subject: drm/ttm: ensure ttm for new node is bound before calling move_notify()
+
+From: Ben Skeggs <bskeggs@redhat.com>
+
+commit 8d3bb23609d4ae22803a15d232289fc09a7b61c4 upstream.
+
+This was true for new TTM_PL_SYSTEM and new TTM_PL_TT cases, but wasn't
+the case on TTM_PL_SYSTEM<->TTM_PL_TT moves, which causes trouble on some
+paths as nouveau's move_notify() hook requires that the dma addresses be
+valid at this point.
+
+Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
+Signed-off-by: Dave Airlie <airlied@redhat.com>
+Cc: Josh Boyer <jwboyer@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/gpu/drm/ttm/ttm_bo.c |   10 ++++++----
+ 1 file changed, 6 insertions(+), 4 deletions(-)
+
+--- a/drivers/gpu/drm/ttm/ttm_bo.c
++++ b/drivers/gpu/drm/ttm/ttm_bo.c
+@@ -392,10 +392,12 @@ static int ttm_bo_handle_move_mem(struct
+        * Create and bind a ttm if required.
+        */
+-      if (!(new_man->flags & TTM_MEMTYPE_FLAG_FIXED) && (bo->ttm == NULL)) {
+-              ret = ttm_bo_add_ttm(bo, false);
+-              if (ret)
+-                      goto out_err;
++      if (!(new_man->flags & TTM_MEMTYPE_FLAG_FIXED)) {
++              if (bo->ttm == NULL) {
++                      ret = ttm_bo_add_ttm(bo, false);
++                      if (ret)
++                              goto out_err;
++              }
+               ret = ttm_tt_set_placement_caching(bo->ttm, mem->placement);
+               if (ret)
diff --git a/queue-3.0/drm-ttm-unbind-ttm-before-destroying-node-in-accel-move-cleanup.patch b/queue-3.0/drm-ttm-unbind-ttm-before-destroying-node-in-accel-move-cleanup.patch
new file mode 100644 (file)
index 0000000..a4ea4ac
--- /dev/null
@@ -0,0 +1,39 @@
+From eac2095398668f989a3dd8d00be1b87850d78c01 Mon Sep 17 00:00:00 2001
+From: Ben Skeggs <bskeggs@redhat.com>
+Date: Mon, 22 Aug 2011 03:15:04 +0000
+Subject: drm/ttm: unbind ttm before destroying node in accel move cleanup
+
+From: Ben Skeggs <bskeggs@redhat.com>
+
+commit eac2095398668f989a3dd8d00be1b87850d78c01 upstream.
+
+Nouveau makes the assumption that if a TTM is bound there will be a mm_node
+around for it and the backwards ordering here resulted in a use-after-free
+on some eviction paths.
+
+Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
+Signed-off-by: Dave Airlie <airlied@redhat.com>
+Cc: Josh Boyer <jwboyer@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/gpu/drm/ttm/ttm_bo_util.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
++++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
+@@ -635,13 +635,13 @@ int ttm_bo_move_accel_cleanup(struct ttm
+               if (ret)
+                       return ret;
+-              ttm_bo_free_old_node(bo);
+               if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
+                   (bo->ttm != NULL)) {
+                       ttm_tt_unbind(bo->ttm);
+                       ttm_tt_destroy(bo->ttm);
+                       bo->ttm = NULL;
+               }
++              ttm_bo_free_old_node(bo);
+       } else {
+               /**
+                * This should help pipeline ordinary buffer moves.
diff --git a/queue-3.0/firewire-sbp2-fix-panic-after-rmmod-with-slow-targets.patch b/queue-3.0/firewire-sbp2-fix-panic-after-rmmod-with-slow-targets.patch
new file mode 100644 (file)
index 0000000..612fd8c
--- /dev/null
@@ -0,0 +1,43 @@
+From 0278ccd9d53e07c4e699432b2fed9de6c56f506c Mon Sep 17 00:00:00 2001
+From: Chris Boot <bootc@bootc.net>
+Date: Mon, 22 Aug 2011 21:38:38 +0100
+Subject: firewire: sbp2: fix panic after rmmod with slow targets
+
+From: Chris Boot <bootc@bootc.net>
+
+commit 0278ccd9d53e07c4e699432b2fed9de6c56f506c upstream.
+
+If firewire-sbp2 starts a login to a target that doesn't complete ORBs
+in a timely manner (and has to retry the login), and the module is
+removed before the operation times out, you end up with a null-pointer
+dereference and a kernel panic.
+
+[SR:  This happens because sbp2_target_get/put() do not maintain
+module references.  scsi_device_get/put() do, but at occasions like
+Chris describes one, nobody holds a reference to an SBP-2 sdev.]
+
+This patch cancels pending work for each unit in sbp2_remove(), which
+hopefully means there are no extra references around that prevent us
+from unloading. This fixes my crash.
+
+Signed-off-by: Chris Boot <bootc@bootc.net>
+Signed-off-by: Stefan Richter <stefanr@s5r6.in-berlin.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/firewire/sbp2.c |    4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/drivers/firewire/sbp2.c
++++ b/drivers/firewire/sbp2.c
+@@ -1198,6 +1198,10 @@ static int sbp2_remove(struct device *de
+ {
+       struct fw_unit *unit = fw_unit(dev);
+       struct sbp2_target *tgt = dev_get_drvdata(&unit->device);
++      struct sbp2_logical_unit *lu;
++
++      list_for_each_entry(lu, &tgt->lu_list, link)
++              cancel_delayed_work_sync(&lu->work);
+       sbp2_target_put(tgt);
+       return 0;
diff --git a/queue-3.0/fuse-fix-memory-leak.patch b/queue-3.0/fuse-fix-memory-leak.patch
new file mode 100644 (file)
index 0000000..0c6d301
--- /dev/null
@@ -0,0 +1,72 @@
+From 5dfcc87fd79dfb96ed155b524337dbd0da4f5993 Mon Sep 17 00:00:00 2001
+From: Miklos Szeredi <mszeredi@suse.cz>
+Date: Mon, 12 Sep 2011 09:38:03 +0200
+Subject: fuse: fix memory leak
+
+From: Miklos Szeredi <mszeredi@suse.cz>
+
+commit 5dfcc87fd79dfb96ed155b524337dbd0da4f5993 upstream.
+
+kmemleak is reporting that 32 bytes are being leaked by FUSE:
+
+  unreferenced object 0xe373b270 (size 32):
+  comm "fusermount", pid 1207, jiffies 4294707026 (age 2675.187s)
+  hex dump (first 32 bytes):
+    01 00 00 00 00 00 00 00 01 00 00 00 00 00 00 00  ................
+    00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00  ................
+  backtrace:
+    [<b05517d7>] kmemleak_alloc+0x27/0x50
+    [<b0196435>] kmem_cache_alloc+0xc5/0x180
+    [<b02455be>] fuse_alloc_forget+0x1e/0x20
+    [<b0245670>] fuse_alloc_inode+0xb0/0xd0
+    [<b01b1a8c>] alloc_inode+0x1c/0x80
+    [<b01b290f>] iget5_locked+0x8f/0x1a0
+    [<b0246022>] fuse_iget+0x72/0x1a0
+    [<b02461da>] fuse_get_root_inode+0x8a/0x90
+    [<b02465cf>] fuse_fill_super+0x3ef/0x590
+    [<b019e56f>] mount_nodev+0x3f/0x90
+    [<b0244e95>] fuse_mount+0x15/0x20
+    [<b019d1bc>] mount_fs+0x1c/0xc0
+    [<b01b5811>] vfs_kern_mount+0x41/0x90
+    [<b01b5af9>] do_kern_mount+0x39/0xd0
+    [<b01b7585>] do_mount+0x2e5/0x660
+    [<b01b7966>] sys_mount+0x66/0xa0
+
+This leak report is consistent and happens once per boot on
+3.1.0-rc5-dirty.
+
+This happens if a FORGET request is queued after the fuse device was
+released.
+
+Reported-by: Sitsofe Wheeler <sitsofe@yahoo.com>
+Signed-off-by: Miklos Szeredi <mszeredi@suse.cz>
+Tested-by: Sitsofe Wheeler <sitsofe@yahoo.com>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Josh Boyer <jwboyer@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ fs/fuse/dev.c |   12 ++++++++----
+ 1 file changed, 8 insertions(+), 4 deletions(-)
+
+--- a/fs/fuse/dev.c
++++ b/fs/fuse/dev.c
+@@ -258,10 +258,14 @@ void fuse_queue_forget(struct fuse_conn
+       forget->forget_one.nlookup = nlookup;
+       spin_lock(&fc->lock);
+-      fc->forget_list_tail->next = forget;
+-      fc->forget_list_tail = forget;
+-      wake_up(&fc->waitq);
+-      kill_fasync(&fc->fasync, SIGIO, POLL_IN);
++      if (fc->connected) {
++              fc->forget_list_tail->next = forget;
++              fc->forget_list_tail = forget;
++              wake_up(&fc->waitq);
++              kill_fasync(&fc->fasync, SIGIO, POLL_IN);
++      } else {
++              kfree(forget);
++      }
+       spin_unlock(&fc->lock);
+ }
diff --git a/queue-3.0/hfsplus-ensure-bio-requests-are-not-smaller-than-the-hardware-sectors.patch b/queue-3.0/hfsplus-ensure-bio-requests-are-not-smaller-than-the-hardware-sectors.patch
new file mode 100644 (file)
index 0000000..18e0e74
--- /dev/null
@@ -0,0 +1,310 @@
+From 6596528e391ad978a6a120142cba97a1d7324cb6 Mon Sep 17 00:00:00 2001
+From: Seth Forshee <seth.forshee@canonical.com>
+Date: Mon, 18 Jul 2011 08:06:23 -0700
+Subject: hfsplus: ensure bio requests are not smaller than the hardware sectors
+
+From: Seth Forshee <seth.forshee@canonical.com>
+
+commit 6596528e391ad978a6a120142cba97a1d7324cb6 upstream.
+
+Currently all bio requests are 512 bytes, which may fail for media
+whose physical sector size is larger than this. Ensure these
+requests are not smaller than the block device logical block size.
+
+BugLink: http://bugs.launchpad.net/bugs/734883
+Signed-off-by: Seth Forshee <seth.forshee@canonical.com>
+Signed-off-by: Christoph Hellwig <hch@lst.de>
+Cc: Josh Boyer <jwboyer@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ fs/hfsplus/hfsplus_fs.h |   16 ++++++++-
+ fs/hfsplus/part_tbl.c   |   32 ++++++++++--------
+ fs/hfsplus/super.c      |   12 +++---
+ fs/hfsplus/wrapper.c    |   83 ++++++++++++++++++++++++++++++++++++------------
+ 4 files changed, 101 insertions(+), 42 deletions(-)
+
+--- a/fs/hfsplus/hfsplus_fs.h
++++ b/fs/hfsplus/hfsplus_fs.h
+@@ -13,6 +13,7 @@
+ #include <linux/fs.h>
+ #include <linux/mutex.h>
+ #include <linux/buffer_head.h>
++#include <linux/blkdev.h>
+ #include "hfsplus_raw.h"
+ #define DBG_BNODE_REFS        0x00000001
+@@ -110,7 +111,9 @@ struct hfsplus_vh;
+ struct hfs_btree;
+ struct hfsplus_sb_info {
++      void *s_vhdr_buf;
+       struct hfsplus_vh *s_vhdr;
++      void *s_backup_vhdr_buf;
+       struct hfsplus_vh *s_backup_vhdr;
+       struct hfs_btree *ext_tree;
+       struct hfs_btree *cat_tree;
+@@ -258,6 +261,15 @@ struct hfsplus_readdir_data {
+       struct hfsplus_cat_key key;
+ };
++/*
++ * Find minimum acceptible I/O size for an hfsplus sb.
++ */
++static inline unsigned short hfsplus_min_io_size(struct super_block *sb)
++{
++      return max_t(unsigned short, bdev_logical_block_size(sb->s_bdev),
++                   HFSPLUS_SECTOR_SIZE);
++}
++
+ #define hfs_btree_open hfsplus_btree_open
+ #define hfs_btree_close hfsplus_btree_close
+ #define hfs_btree_write hfsplus_btree_write
+@@ -436,8 +448,8 @@ int hfsplus_compare_dentry(const struct
+ /* wrapper.c */
+ int hfsplus_read_wrapper(struct super_block *);
+ int hfs_part_find(struct super_block *, sector_t *, sector_t *);
+-int hfsplus_submit_bio(struct block_device *bdev, sector_t sector,
+-              void *data, int rw);
++int hfsplus_submit_bio(struct super_block *sb, sector_t sector,
++              void *buf, void **data, int rw);
+ /* time macros */
+ #define __hfsp_mt2ut(t)               (be32_to_cpu(t) - 2082844800U)
+--- a/fs/hfsplus/part_tbl.c
++++ b/fs/hfsplus/part_tbl.c
+@@ -88,11 +88,12 @@ static int hfs_parse_old_pmap(struct sup
+       return -ENOENT;
+ }
+-static int hfs_parse_new_pmap(struct super_block *sb, struct new_pmap *pm,
+-              sector_t *part_start, sector_t *part_size)
++static int hfs_parse_new_pmap(struct super_block *sb, void *buf,
++              struct new_pmap *pm, sector_t *part_start, sector_t *part_size)
+ {
+       struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb);
+       int size = be32_to_cpu(pm->pmMapBlkCnt);
++      int buf_size = hfsplus_min_io_size(sb);
+       int res;
+       int i = 0;
+@@ -107,11 +108,14 @@ static int hfs_parse_new_pmap(struct sup
+               if (++i >= size)
+                       return -ENOENT;
+-              res = hfsplus_submit_bio(sb->s_bdev,
+-                                       *part_start + HFS_PMAP_BLK + i,
+-                                       pm, READ);
+-              if (res)
+-                      return res;
++              pm = (struct new_pmap *)((u8 *)pm + HFSPLUS_SECTOR_SIZE);
++              if ((u8 *)pm - (u8 *)buf >= buf_size) {
++                      res = hfsplus_submit_bio(sb,
++                                               *part_start + HFS_PMAP_BLK + i,
++                                               buf, (void **)&pm, READ);
++                      if (res)
++                              return res;
++              }
+       } while (pm->pmSig == cpu_to_be16(HFS_NEW_PMAP_MAGIC));
+       return -ENOENT;
+@@ -124,15 +128,15 @@ static int hfs_parse_new_pmap(struct sup
+ int hfs_part_find(struct super_block *sb,
+               sector_t *part_start, sector_t *part_size)
+ {
+-      void *data;
++      void *buf, *data;
+       int res;
+-      data = kmalloc(HFSPLUS_SECTOR_SIZE, GFP_KERNEL);
+-      if (!data)
++      buf = kmalloc(hfsplus_min_io_size(sb), GFP_KERNEL);
++      if (!buf)
+               return -ENOMEM;
+-      res = hfsplus_submit_bio(sb->s_bdev, *part_start + HFS_PMAP_BLK,
+-                               data, READ);
++      res = hfsplus_submit_bio(sb, *part_start + HFS_PMAP_BLK,
++                               buf, &data, READ);
+       if (res)
+               goto out;
+@@ -141,13 +145,13 @@ int hfs_part_find(struct super_block *sb
+               res = hfs_parse_old_pmap(sb, data, part_start, part_size);
+               break;
+       case HFS_NEW_PMAP_MAGIC:
+-              res = hfs_parse_new_pmap(sb, data, part_start, part_size);
++              res = hfs_parse_new_pmap(sb, buf, data, part_start, part_size);
+               break;
+       default:
+               res = -ENOENT;
+               break;
+       }
+ out:
+-      kfree(data);
++      kfree(buf);
+       return res;
+ }
+--- a/fs/hfsplus/super.c
++++ b/fs/hfsplus/super.c
+@@ -197,17 +197,17 @@ int hfsplus_sync_fs(struct super_block *
+               write_backup = 1;
+       }
+-      error2 = hfsplus_submit_bio(sb->s_bdev,
++      error2 = hfsplus_submit_bio(sb,
+                                  sbi->part_start + HFSPLUS_VOLHEAD_SECTOR,
+-                                 sbi->s_vhdr, WRITE_SYNC);
++                                 sbi->s_vhdr_buf, NULL, WRITE_SYNC);
+       if (!error)
+               error = error2;
+       if (!write_backup)
+               goto out;
+-      error2 = hfsplus_submit_bio(sb->s_bdev,
++      error2 = hfsplus_submit_bio(sb,
+                                 sbi->part_start + sbi->sect_count - 2,
+-                                sbi->s_backup_vhdr, WRITE_SYNC);
++                                sbi->s_backup_vhdr_buf, NULL, WRITE_SYNC);
+       if (!error)
+               error2 = error;
+ out:
+@@ -251,8 +251,8 @@ static void hfsplus_put_super(struct sup
+       hfs_btree_close(sbi->ext_tree);
+       iput(sbi->alloc_file);
+       iput(sbi->hidden_dir);
+-      kfree(sbi->s_vhdr);
+-      kfree(sbi->s_backup_vhdr);
++      kfree(sbi->s_vhdr_buf);
++      kfree(sbi->s_backup_vhdr_buf);
+       unload_nls(sbi->nls);
+       kfree(sb->s_fs_info);
+       sb->s_fs_info = NULL;
+--- a/fs/hfsplus/wrapper.c
++++ b/fs/hfsplus/wrapper.c
+@@ -31,25 +31,67 @@ static void hfsplus_end_io_sync(struct b
+       complete(bio->bi_private);
+ }
+-int hfsplus_submit_bio(struct block_device *bdev, sector_t sector,
+-              void *data, int rw)
++/*
++ * hfsplus_submit_bio - Perfrom block I/O
++ * @sb: super block of volume for I/O
++ * @sector: block to read or write, for blocks of HFSPLUS_SECTOR_SIZE bytes
++ * @buf: buffer for I/O
++ * @data: output pointer for location of requested data
++ * @rw: direction of I/O
++ *
++ * The unit of I/O is hfsplus_min_io_size(sb), which may be bigger than
++ * HFSPLUS_SECTOR_SIZE, and @buf must be sized accordingly. On reads
++ * @data will return a pointer to the start of the requested sector,
++ * which may not be the same location as @buf.
++ *
++ * If @sector is not aligned to the bdev logical block size it will
++ * be rounded down. For writes this means that @buf should contain data
++ * that starts at the rounded-down address. As long as the data was
++ * read using hfsplus_submit_bio() and the same buffer is used things
++ * will work correctly.
++ */
++int hfsplus_submit_bio(struct super_block *sb, sector_t sector,
++              void *buf, void **data, int rw)
+ {
+       DECLARE_COMPLETION_ONSTACK(wait);
+       struct bio *bio;
+       int ret = 0;
++      unsigned int io_size;
++      loff_t start;
++      int offset;
++
++      /*
++       * Align sector to hardware sector size and find offset. We
++       * assume that io_size is a power of two, which _should_
++       * be true.
++       */
++      io_size = hfsplus_min_io_size(sb);
++      start = (loff_t)sector << HFSPLUS_SECTOR_SHIFT;
++      offset = start & (io_size - 1);
++      sector &= ~((io_size >> HFSPLUS_SECTOR_SHIFT) - 1);
+       bio = bio_alloc(GFP_NOIO, 1);
+       bio->bi_sector = sector;
+-      bio->bi_bdev = bdev;
++      bio->bi_bdev = sb->s_bdev;
+       bio->bi_end_io = hfsplus_end_io_sync;
+       bio->bi_private = &wait;
+-      /*
+-       * We always submit one sector at a time, so bio_add_page must not fail.
+-       */
+-      if (bio_add_page(bio, virt_to_page(data), HFSPLUS_SECTOR_SIZE,
+-                       offset_in_page(data)) != HFSPLUS_SECTOR_SIZE)
+-              BUG();
++      if (!(rw & WRITE) && data)
++              *data = (u8 *)buf + offset;
++
++      while (io_size > 0) {
++              unsigned int page_offset = offset_in_page(buf);
++              unsigned int len = min_t(unsigned int, PAGE_SIZE - page_offset,
++                                       io_size);
++
++              ret = bio_add_page(bio, virt_to_page(buf), len, page_offset);
++              if (ret != len) {
++                      ret = -EIO;
++                      goto out;
++              }
++              io_size -= len;
++              buf = (u8 *)buf + len;
++      }
+       submit_bio(rw, bio);
+       wait_for_completion(&wait);
+@@ -57,8 +99,9 @@ int hfsplus_submit_bio(struct block_devi
+       if (!bio_flagged(bio, BIO_UPTODATE))
+               ret = -EIO;
++out:
+       bio_put(bio);
+-      return ret;
++      return ret < 0 ? ret : 0;
+ }
+ static int hfsplus_read_mdb(void *bufptr, struct hfsplus_wd *wd)
+@@ -147,17 +190,17 @@ int hfsplus_read_wrapper(struct super_bl
+       }
+       error = -ENOMEM;
+-      sbi->s_vhdr = kmalloc(HFSPLUS_SECTOR_SIZE, GFP_KERNEL);
+-      if (!sbi->s_vhdr)
++      sbi->s_vhdr_buf = kmalloc(hfsplus_min_io_size(sb), GFP_KERNEL);
++      if (!sbi->s_vhdr_buf)
+               goto out;
+-      sbi->s_backup_vhdr = kmalloc(HFSPLUS_SECTOR_SIZE, GFP_KERNEL);
+-      if (!sbi->s_backup_vhdr)
++      sbi->s_backup_vhdr_buf = kmalloc(hfsplus_min_io_size(sb), GFP_KERNEL);
++      if (!sbi->s_backup_vhdr_buf)
+               goto out_free_vhdr;
+ reread:
+-      error = hfsplus_submit_bio(sb->s_bdev,
+-                                 part_start + HFSPLUS_VOLHEAD_SECTOR,
+-                                 sbi->s_vhdr, READ);
++      error = hfsplus_submit_bio(sb, part_start + HFSPLUS_VOLHEAD_SECTOR,
++                                 sbi->s_vhdr_buf, (void **)&sbi->s_vhdr,
++                                 READ);
+       if (error)
+               goto out_free_backup_vhdr;
+@@ -186,9 +229,9 @@ reread:
+               goto reread;
+       }
+-      error = hfsplus_submit_bio(sb->s_bdev,
+-                                 part_start + part_size - 2,
+-                                 sbi->s_backup_vhdr, READ);
++      error = hfsplus_submit_bio(sb, part_start + part_size - 2,
++                                 sbi->s_backup_vhdr_buf,
++                                 (void **)&sbi->s_backup_vhdr, READ);
+       if (error)
+               goto out_free_backup_vhdr;
diff --git a/queue-3.0/hid-magicmouse-ignore-ivalid-report-id-while-switching-modes-v2.patch b/queue-3.0/hid-magicmouse-ignore-ivalid-report-id-while-switching-modes-v2.patch
new file mode 100644 (file)
index 0000000..cf2f0f0
--- /dev/null
@@ -0,0 +1,63 @@
+From 35d851df23b093ee027f827fed2213ae5e88fc7a Mon Sep 17 00:00:00 2001
+From: Jiri Kosina <jkosina@suse.cz>
+Date: Thu, 25 Aug 2011 14:21:37 +0200
+Subject: HID: magicmouse: ignore 'ivalid report id' while switching modes, v2
+
+From: Jiri Kosina <jkosina@suse.cz>
+
+commit 35d851df23b093ee027f827fed2213ae5e88fc7a upstream.
+
+This is basically a more generic respin of 23746a6 ("HID: magicmouse: ignore
+'ivalid report id' while switching modes") which got reverted later by
+c3a492.
+
+It turns out that on some configurations, this is actually still the case
+and we are not able to detect in runtime.
+
+The device reponds with 'invalid report id' when feature report switching it
+into multitouch mode is sent to it.
+
+This has been silently ignored before 0825411ade ("HID: bt: Wait for ACK
+on Sent Reports"), but since this commit, it propagates -EIO from the _raw
+callback .
+
+So let the driver ignore -EIO as response to 0xd7,0x01 report, as that's
+how the device reacts in normal mode.
+
+Sad, but following reality.
+
+This fixes https://bugzilla.kernel.org/show_bug.cgi?id=35022
+
+Reported-by: Chase Douglas <chase.douglas@canonical.com>
+Reported-by: Jaikumar Ganesh <jaikumarg@android.com>
+Tested-by: Chase Douglas <chase.douglas@canonical.com>
+Tested-by: Jaikumar Ganesh <jaikumarg@android.com>
+Signed-off-by: Jiri Kosina <jkosina@suse.cz>
+Cc: Josh Boyer <jwboyer@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/hid/hid-magicmouse.c |   10 +++++++++-
+ 1 file changed, 9 insertions(+), 1 deletion(-)
+
+--- a/drivers/hid/hid-magicmouse.c
++++ b/drivers/hid/hid-magicmouse.c
+@@ -501,9 +501,17 @@ static int magicmouse_probe(struct hid_d
+       }
+       report->size = 6;
++      /*
++       * Some devices repond with 'invalid report id' when feature
++       * report switching it into multitouch mode is sent to it.
++       *
++       * This results in -EIO from the _raw low-level transport callback,
++       * but there seems to be no other way of switching the mode.
++       * Thus the super-ugly hacky success check below.
++       */
+       ret = hdev->hid_output_raw_report(hdev, feature, sizeof(feature),
+                       HID_FEATURE_REPORT);
+-      if (ret != sizeof(feature)) {
++      if (ret != -EIO && ret != sizeof(feature)) {
+               hid_err(hdev, "unable to request touch data (%d)\n", ret);
+               goto err_stop_hw;
+       }
diff --git a/queue-3.0/platform-fix-samsung-laptop-dmi-identification-for-n150-n210-220-n230.patch b/queue-3.0/platform-fix-samsung-laptop-dmi-identification-for-n150-n210-220-n230.patch
new file mode 100644 (file)
index 0000000..bd79de9
--- /dev/null
@@ -0,0 +1,45 @@
+From 78a7539b881eb557494a7c810625c0307b27296c Mon Sep 17 00:00:00 2001
+From: Thomas Courbon <thcourbon@gmail.com>
+Date: Wed, 20 Jul 2011 22:57:44 +0200
+Subject: Platform: fix samsung-laptop DMI identification for N150/N210/220/N230
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Thomas Courbon <thcourbon@gmail.com>
+
+commit 78a7539b881eb557494a7c810625c0307b27296c upstream.
+
+Some samsung latop of the N150/N2{10,20,30} serie are badly detected by the samsung-laptop platform driver, see bug # 36082.
+It appears that N230 identifies itself as N150/N210/N220/N230 whereas the other identify themselves as N150/N210/220.
+This patch attemtp fix #36082 allowing correct identification for all the said netbook model.
+
+Reported-by: Daniel Eklöf <daniel@ekloef.se>
+Signed-off-by: Thomas Courbon <thcourbon@gmail.com>
+Signed-off-by: Matthew Garrett <mjg@redhat.com>
+Cc: Josh Boyer <jwboyer@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/platform/x86/samsung-laptop.c |   10 ++++++++++
+ 1 file changed, 10 insertions(+)
+
+--- a/drivers/platform/x86/samsung-laptop.c
++++ b/drivers/platform/x86/samsung-laptop.c
+@@ -601,6 +601,16 @@ static struct dmi_system_id __initdata s
+               .callback = dmi_check_cb,
+       },
+       {
++              .ident = "N150/N210/N220",
++              .matches = {
++                      DMI_MATCH(DMI_SYS_VENDOR,
++                                      "SAMSUNG ELECTRONICS CO., LTD."),
++                      DMI_MATCH(DMI_PRODUCT_NAME, "N150/N210/N220"),
++                      DMI_MATCH(DMI_BOARD_NAME, "N150/N210/N220"),
++              },
++              .callback = dmi_check_cb,
++      },
++      {
+               .ident = "N150/N210/N220/N230",
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR,
index 6da7a52ea01ac7d7f0192528bdecf1eaebc76054..ec2775ad52c9eedf56258a2fb03bdb5e34f79754 100644 (file)
@@ -5,3 +5,17 @@ hwmon-w83627ehf-properly-report-thermal-diode-sensors.patch
 avoid-using-variable-length-arrays-in-kernel-sys.c.patch
 drm-radeon-kms-atom-fix-handling-of-fb-scratch-indices.patch
 cputimer-cure-lock-inversion.patch
+fuse-fix-memory-leak.patch
+platform-fix-samsung-laptop-dmi-identification-for-n150-n210-220-n230.patch
+hid-magicmouse-ignore-ivalid-report-id-while-switching-modes-v2.patch
+uvcvideo-fix-crash-when-linking-entities.patch
+hfsplus-ensure-bio-requests-are-not-smaller-than-the-hardware-sectors.patch
+drm-ttm-ensure-ttm-for-new-node-is-bound-before-calling-move_notify.patch
+drm-ttm-unbind-ttm-before-destroying-node-in-accel-move-cleanup.patch
+cifs-fix-err_ptr-dereference-in-cifs_get_root.patch
+xfs-start-periodic-workers-later.patch
+xfs-use-a-cursor-for-bulk-ail-insertion.patch
+xfs-do-not-update-xa_last_pushed_lsn-for-locked-items.patch
+xfs-force-the-log-if-we-encounter-pinned-buffers-in-.iop_pushbuf.patch
+xfs-revert-to-using-a-kthread-for-ail-pushing.patch
+firewire-sbp2-fix-panic-after-rmmod-with-slow-targets.patch
diff --git a/queue-3.0/uvcvideo-fix-crash-when-linking-entities.patch b/queue-3.0/uvcvideo-fix-crash-when-linking-entities.patch
new file mode 100644 (file)
index 0000000..5a096a2
--- /dev/null
@@ -0,0 +1,34 @@
+From 4d9b2ebd335d83044b9e6656d0e604e8e1300334 Mon Sep 17 00:00:00 2001
+From: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+Date: Tue, 6 Sep 2011 19:16:18 -0300
+Subject: [media] uvcvideo: Fix crash when linking entities
+
+From: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+
+commit 4d9b2ebd335d83044b9e6656d0e604e8e1300334 upstream.
+
+The uvc_mc_register_entity() function wrongfully selects the
+media_entity associated with a UVC entity when creating links. This
+results in access to uninitialized media_entity structures and can hit a
+BUG_ON statement in media_entity_create_link(). Fix it.
+
+Signed-off-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+Signed-off-by: Mauro Carvalho Chehab <mchehab@redhat.com>
+Cc: Josh Boyer <jwboyer@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/media/video/uvc/uvc_entity.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/media/video/uvc/uvc_entity.c
++++ b/drivers/media/video/uvc/uvc_entity.c
+@@ -49,7 +49,7 @@ static int uvc_mc_register_entity(struct
+               if (remote == NULL)
+                       return -EINVAL;
+-              source = (UVC_ENTITY_TYPE(remote) != UVC_TT_STREAMING)
++              source = (UVC_ENTITY_TYPE(remote) == UVC_TT_STREAMING)
+                      ? (remote->vdev ? &remote->vdev->entity : NULL)
+                      : &remote->subdev.entity;
+               if (source == NULL)
diff --git a/queue-3.0/xfs-do-not-update-xa_last_pushed_lsn-for-locked-items.patch b/queue-3.0/xfs-do-not-update-xa_last_pushed_lsn-for-locked-items.patch
new file mode 100644 (file)
index 0000000..e632fc9
--- /dev/null
@@ -0,0 +1,37 @@
+From hch@infradead.org  Wed Oct 19 15:57:22 2011
+From: Christoph Hellwig <hch@infradead.org>
+Date: Tue, 18 Oct 2011 10:23:17 -0400
+Subject: xfs: do not update xa_last_pushed_lsn for locked items
+To: stable@vger.kernel.org
+Cc: greg@kroah.com
+Message-ID: <20111018142351.875480698@bombadil.infradead.org>
+
+From: Christoph Hellwig <hch@infradead.org>
+
+commit bc6e588a8971aa74c02e42db4d6e0248679f3738 upstream
+
+If an item was locked we should not update xa_last_pushed_lsn and thus skip
+it when restarting the AIL scan as we need to be able to lock and write it
+out as soon as possible.  Otherwise heavy lock contention might starve AIL
+pushing too easily, especially given the larger backoff once we moved
+xa_last_pushed_lsn all the way to the target lsn.
+
+Signed-off-by: Christoph Hellwig <hch@lst.de>
+Reported-by: Stefan Priebe <s.priebe@profihost.ag>
+Tested-by: Stefan Priebe <s.priebe@profihost.ag>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ fs/xfs/xfs_trans_ail.c |    1 -
+ 1 file changed, 1 deletion(-)
+
+--- a/fs/xfs/xfs_trans_ail.c
++++ b/fs/xfs/xfs_trans_ail.c
+@@ -491,7 +491,6 @@ xfs_ail_worker(
+               case XFS_ITEM_LOCKED:
+                       XFS_STATS_INC(xs_push_ail_locked);
+-                      ailp->xa_last_pushed_lsn = lsn;
+                       stuck++;
+                       break;
diff --git a/queue-3.0/xfs-force-the-log-if-we-encounter-pinned-buffers-in-.iop_pushbuf.patch b/queue-3.0/xfs-force-the-log-if-we-encounter-pinned-buffers-in-.iop_pushbuf.patch
new file mode 100644 (file)
index 0000000..aac6f5a
--- /dev/null
@@ -0,0 +1,162 @@
+From hch@infradead.org  Wed Oct 19 15:57:40 2011
+From: Christoph Hellwig <hch@infradead.org>
+Date: Tue, 18 Oct 2011 10:23:18 -0400
+Subject: xfs: force the log if we encounter pinned buffers in .iop_pushbuf
+To: stable@vger.kernel.org
+Cc: greg@kroah.com
+Message-ID: <20111018142352.085201579@bombadil.infradead.org>
+
+From: Christoph Hellwig <hch@infradead.org>
+
+commit 17b38471c3c07a49f0bbc2ecc2e92050c164e226 upstream
+
+We need to check for pinned buffers even in .iop_pushbuf given that inode
+items flush into the same buffers that may be pinned directly due operations
+on the unlinked inode list operating directly on buffers.  To do this add a
+return value to .iop_pushbuf that tells the AIL push about this and use
+the existing log force mechanisms to unpin it.
+
+Signed-off-by: Christoph Hellwig <hch@lst.de>
+Reported-by: Stefan Priebe <s.priebe@profihost.ag>
+Tested-by: Stefan Priebe <s.priebe@profihost.ag>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ fs/xfs/quota/xfs_dquot_item.c |   10 +++++++---
+ fs/xfs/xfs_buf_item.c         |    3 ++-
+ fs/xfs/xfs_inode_item.c       |   10 +++++++---
+ fs/xfs/xfs_trans.h            |    2 +-
+ fs/xfs/xfs_trans_ail.c        |    9 +++++++--
+ 5 files changed, 24 insertions(+), 10 deletions(-)
+
+--- a/fs/xfs/quota/xfs_dquot_item.c
++++ b/fs/xfs/quota/xfs_dquot_item.c
+@@ -183,13 +183,14 @@ xfs_qm_dqunpin_wait(
+  * search the buffer cache can be a time consuming thing, and AIL lock is a
+  * spinlock.
+  */
+-STATIC void
++STATIC bool
+ xfs_qm_dquot_logitem_pushbuf(
+       struct xfs_log_item     *lip)
+ {
+       struct xfs_dq_logitem   *qlip = DQUOT_ITEM(lip);
+       struct xfs_dquot        *dqp = qlip->qli_dquot;
+       struct xfs_buf          *bp;
++      bool                    ret = true;
+       ASSERT(XFS_DQ_IS_LOCKED(dqp));
+@@ -201,17 +202,20 @@ xfs_qm_dquot_logitem_pushbuf(
+       if (completion_done(&dqp->q_flush) ||
+           !(lip->li_flags & XFS_LI_IN_AIL)) {
+               xfs_dqunlock(dqp);
+-              return;
++              return true;
+       }
+       bp = xfs_incore(dqp->q_mount->m_ddev_targp, qlip->qli_format.qlf_blkno,
+                       dqp->q_mount->m_quotainfo->qi_dqchunklen, XBF_TRYLOCK);
+       xfs_dqunlock(dqp);
+       if (!bp)
+-              return;
++              return true;
+       if (XFS_BUF_ISDELAYWRITE(bp))
+               xfs_buf_delwri_promote(bp);
++      if (XFS_BUF_ISPINNED(bp))
++              ret = false;
+       xfs_buf_relse(bp);
++      return ret;
+ }
+ /*
+--- a/fs/xfs/xfs_buf_item.c
++++ b/fs/xfs/xfs_buf_item.c
+@@ -632,7 +632,7 @@ xfs_buf_item_push(
+  * the xfsbufd to get this buffer written. We have to unlock the buffer
+  * to allow the xfsbufd to write it, too.
+  */
+-STATIC void
++STATIC bool
+ xfs_buf_item_pushbuf(
+       struct xfs_log_item     *lip)
+ {
+@@ -646,6 +646,7 @@ xfs_buf_item_pushbuf(
+       xfs_buf_delwri_promote(bp);
+       xfs_buf_relse(bp);
++      return true;
+ }
+ STATIC void
+--- a/fs/xfs/xfs_inode_item.c
++++ b/fs/xfs/xfs_inode_item.c
+@@ -713,13 +713,14 @@ xfs_inode_item_committed(
+  * marked delayed write. If that's the case, we'll promote it and that will
+  * allow the caller to write the buffer by triggering the xfsbufd to run.
+  */
+-STATIC void
++STATIC bool
+ xfs_inode_item_pushbuf(
+       struct xfs_log_item     *lip)
+ {
+       struct xfs_inode_log_item *iip = INODE_ITEM(lip);
+       struct xfs_inode        *ip = iip->ili_inode;
+       struct xfs_buf          *bp;
++      bool                    ret = true;
+       ASSERT(xfs_isilocked(ip, XFS_ILOCK_SHARED));
+@@ -730,7 +731,7 @@ xfs_inode_item_pushbuf(
+       if (completion_done(&ip->i_flush) ||
+           !(lip->li_flags & XFS_LI_IN_AIL)) {
+               xfs_iunlock(ip, XFS_ILOCK_SHARED);
+-              return;
++              return true;
+       }
+       bp = xfs_incore(ip->i_mount->m_ddev_targp, iip->ili_format.ilf_blkno,
+@@ -738,10 +739,13 @@ xfs_inode_item_pushbuf(
+       xfs_iunlock(ip, XFS_ILOCK_SHARED);
+       if (!bp)
+-              return;
++              return true;
+       if (XFS_BUF_ISDELAYWRITE(bp))
+               xfs_buf_delwri_promote(bp);
++      if (XFS_BUF_ISPINNED(bp))
++              ret = false;
+       xfs_buf_relse(bp);
++      return ret;
+ }
+ /*
+--- a/fs/xfs/xfs_trans.h
++++ b/fs/xfs/xfs_trans.h
+@@ -350,7 +350,7 @@ typedef struct xfs_item_ops {
+       void (*iop_unlock)(xfs_log_item_t *);
+       xfs_lsn_t (*iop_committed)(xfs_log_item_t *, xfs_lsn_t);
+       void (*iop_push)(xfs_log_item_t *);
+-      void (*iop_pushbuf)(xfs_log_item_t *);
++      bool (*iop_pushbuf)(xfs_log_item_t *);
+       void (*iop_committing)(xfs_log_item_t *, xfs_lsn_t);
+ } xfs_item_ops_t;
+--- a/fs/xfs/xfs_trans_ail.c
++++ b/fs/xfs/xfs_trans_ail.c
+@@ -478,8 +478,13 @@ xfs_ail_worker(
+               case XFS_ITEM_PUSHBUF:
+                       XFS_STATS_INC(xs_push_ail_pushbuf);
+-                      IOP_PUSHBUF(lip);
+-                      ailp->xa_last_pushed_lsn = lsn;
++
++                      if (!IOP_PUSHBUF(lip)) {
++                              stuck++;
++                              flush_log = 1;
++                      } else {
++                              ailp->xa_last_pushed_lsn = lsn;
++                      }
+                       push_xfsbufd = 1;
+                       break;
diff --git a/queue-3.0/xfs-revert-to-using-a-kthread-for-ail-pushing.patch b/queue-3.0/xfs-revert-to-using-a-kthread-for-ail-pushing.patch
new file mode 100644 (file)
index 0000000..bce0e37
--- /dev/null
@@ -0,0 +1,235 @@
+From hch@infradead.org  Wed Oct 19 15:57:58 2011
+From: Christoph Hellwig <hch@infradead.org>
+Date: Tue, 18 Oct 2011 10:23:19 -0400
+Subject: xfs: revert to using a kthread for AIL pushing
+To: stable@vger.kernel.org
+Cc: greg@kroah.com
+Message-ID: <20111018142352.317272450@bombadil.infradead.org>
+
+From: Christoph Hellwig <hch@infradead.org>
+
+commit 0030807c66f058230bcb20d2573bcaf28852e804 upstream
+
+Currently we have a few issues with the way the workqueue code is used to
+implement AIL pushing:
+
+ - it accidentally uses the same workqueue as the syncer action, and thus
+   can be prevented from running if there are enough sync actions active
+   in the system.
+ - it doesn't use the HIGHPRI flag to queue at the head of the queue of
+   work items
+
+At this point I'm not confident enough in getting all the workqueue flags and
+tweaks right to provide a perfectly reliable execution context for AIL
+pushing, which is the most important piece in XFS to make forward progress
+when the log fills.
+
+Revert back to use a kthread per filesystem which fixes all the above issues
+at the cost of having a task struct and stack around for each mounted
+filesystem.  In addition this also gives us much better ways to diagnose
+any issues involving hung AIL pushing and removes a small amount of code.
+
+Signed-off-by: Christoph Hellwig <hch@lst.de>
+Reported-by: Stefan Priebe <s.priebe@profihost.ag>
+Tested-by: Stefan Priebe <s.priebe@profihost.ag>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ fs/xfs/linux-2.6/xfs_linux.h |    2 +
+ fs/xfs/linux-2.6/xfs_super.c |   13 -------
+ fs/xfs/xfs_trans_ail.c       |   73 ++++++++++++++++++++++++-------------------
+ fs/xfs/xfs_trans_priv.h      |    8 ----
+ 4 files changed, 45 insertions(+), 51 deletions(-)
+
+--- a/fs/xfs/linux-2.6/xfs_linux.h
++++ b/fs/xfs/linux-2.6/xfs_linux.h
+@@ -70,6 +70,8 @@
+ #include <linux/ctype.h>
+ #include <linux/writeback.h>
+ #include <linux/capability.h>
++#include <linux/kthread.h>
++#include <linux/freezer.h>
+ #include <linux/list_sort.h>
+ #include <asm/page.h>
+--- a/fs/xfs/linux-2.6/xfs_super.c
++++ b/fs/xfs/linux-2.6/xfs_super.c
+@@ -1660,24 +1660,13 @@ xfs_init_workqueues(void)
+        */
+       xfs_syncd_wq = alloc_workqueue("xfssyncd", WQ_CPU_INTENSIVE, 8);
+       if (!xfs_syncd_wq)
+-              goto out;
+-
+-      xfs_ail_wq = alloc_workqueue("xfsail", WQ_CPU_INTENSIVE, 8);
+-      if (!xfs_ail_wq)
+-              goto out_destroy_syncd;
+-
++              return -ENOMEM;
+       return 0;
+-
+-out_destroy_syncd:
+-      destroy_workqueue(xfs_syncd_wq);
+-out:
+-      return -ENOMEM;
+ }
+ STATIC void
+ xfs_destroy_workqueues(void)
+ {
+-      destroy_workqueue(xfs_ail_wq);
+       destroy_workqueue(xfs_syncd_wq);
+ }
+--- a/fs/xfs/xfs_trans_ail.c
++++ b/fs/xfs/xfs_trans_ail.c
+@@ -28,8 +28,6 @@
+ #include "xfs_trans_priv.h"
+ #include "xfs_error.h"
+-struct workqueue_struct       *xfs_ail_wq;    /* AIL workqueue */
+-
+ #ifdef DEBUG
+ /*
+  * Check that the list is sorted as it should be.
+@@ -406,16 +404,10 @@ xfs_ail_delete(
+       xfs_trans_ail_cursor_clear(ailp, lip);
+ }
+-/*
+- * xfs_ail_worker does the work of pushing on the AIL. It will requeue itself
+- * to run at a later time if there is more work to do to complete the push.
+- */
+-STATIC void
+-xfs_ail_worker(
+-      struct work_struct      *work)
++static long
++xfsaild_push(
++      struct xfs_ail          *ailp)
+ {
+-      struct xfs_ail          *ailp = container_of(to_delayed_work(work),
+-                                      struct xfs_ail, xa_work);
+       xfs_mount_t             *mp = ailp->xa_mount;
+       struct xfs_ail_cursor   *cur = &ailp->xa_cursors;
+       xfs_log_item_t          *lip;
+@@ -556,20 +548,6 @@ out_done:
+               /* We're past our target or empty, so idle */
+               ailp->xa_last_pushed_lsn = 0;
+-              /*
+-               * We clear the XFS_AIL_PUSHING_BIT first before checking
+-               * whether the target has changed. If the target has changed,
+-               * this pushes the requeue race directly onto the result of the
+-               * atomic test/set bit, so we are guaranteed that either the
+-               * the pusher that changed the target or ourselves will requeue
+-               * the work (but not both).
+-               */
+-              clear_bit(XFS_AIL_PUSHING_BIT, &ailp->xa_flags);
+-              smp_rmb();
+-              if (XFS_LSN_CMP(ailp->xa_target, target) == 0 ||
+-                  test_and_set_bit(XFS_AIL_PUSHING_BIT, &ailp->xa_flags))
+-                      return;
+-
+               tout = 50;
+       } else if (XFS_LSN_CMP(lsn, target) >= 0) {
+               /*
+@@ -592,9 +570,30 @@ out_done:
+               tout = 20;
+       }
+-      /* There is more to do, requeue us.  */
+-      queue_delayed_work(xfs_syncd_wq, &ailp->xa_work,
+-                                      msecs_to_jiffies(tout));
++      return tout;
++}
++
++static int
++xfsaild(
++      void            *data)
++{
++      struct xfs_ail  *ailp = data;
++      long            tout = 0;       /* milliseconds */
++
++      while (!kthread_should_stop()) {
++              if (tout && tout <= 20)
++                      __set_current_state(TASK_KILLABLE);
++              else
++                      __set_current_state(TASK_INTERRUPTIBLE);
++              schedule_timeout(tout ?
++                               msecs_to_jiffies(tout) : MAX_SCHEDULE_TIMEOUT);
++
++              try_to_freeze();
++
++              tout = xfsaild_push(ailp);
++      }
++
++      return 0;
+ }
+ /*
+@@ -629,8 +628,9 @@ xfs_ail_push(
+        */
+       smp_wmb();
+       xfs_trans_ail_copy_lsn(ailp, &ailp->xa_target, &threshold_lsn);
+-      if (!test_and_set_bit(XFS_AIL_PUSHING_BIT, &ailp->xa_flags))
+-              queue_delayed_work(xfs_syncd_wq, &ailp->xa_work, 0);
++      smp_wmb();
++
++      wake_up_process(ailp->xa_task);
+ }
+ /*
+@@ -865,9 +865,18 @@ xfs_trans_ail_init(
+       ailp->xa_mount = mp;
+       INIT_LIST_HEAD(&ailp->xa_ail);
+       spin_lock_init(&ailp->xa_lock);
+-      INIT_DELAYED_WORK(&ailp->xa_work, xfs_ail_worker);
++
++      ailp->xa_task = kthread_run(xfsaild, ailp, "xfsaild/%s",
++                      ailp->xa_mount->m_fsname);
++      if (IS_ERR(ailp->xa_task))
++              goto out_free_ailp;
++
+       mp->m_ail = ailp;
+       return 0;
++
++out_free_ailp:
++      kmem_free(ailp);
++      return ENOMEM;
+ }
+ void
+@@ -876,6 +885,6 @@ xfs_trans_ail_destroy(
+ {
+       struct xfs_ail  *ailp = mp->m_ail;
+-      cancel_delayed_work_sync(&ailp->xa_work);
++      kthread_stop(ailp->xa_task);
+       kmem_free(ailp);
+ }
+--- a/fs/xfs/xfs_trans_priv.h
++++ b/fs/xfs/xfs_trans_priv.h
+@@ -64,23 +64,17 @@ struct xfs_ail_cursor {
+  */
+ struct xfs_ail {
+       struct xfs_mount        *xa_mount;
++      struct task_struct      *xa_task;
+       struct list_head        xa_ail;
+       xfs_lsn_t               xa_target;
+       struct xfs_ail_cursor   xa_cursors;
+       spinlock_t              xa_lock;
+-      struct delayed_work     xa_work;
+       xfs_lsn_t               xa_last_pushed_lsn;
+-      unsigned long           xa_flags;
+ };
+-#define XFS_AIL_PUSHING_BIT   0
+-
+ /*
+  * From xfs_trans_ail.c
+  */
+-
+-extern struct workqueue_struct        *xfs_ail_wq;    /* AIL workqueue */
+-
+ void  xfs_trans_ail_update_bulk(struct xfs_ail *ailp,
+                               struct xfs_ail_cursor *cur,
+                               struct xfs_log_item **log_items, int nr_items,
diff --git a/queue-3.0/xfs-start-periodic-workers-later.patch b/queue-3.0/xfs-start-periodic-workers-later.patch
new file mode 100644 (file)
index 0000000..2c20bdd
--- /dev/null
@@ -0,0 +1,102 @@
+From hch@infradead.org  Wed Oct 19 15:55:06 2011
+From: Christoph Hellwig <hch@infradead.org>
+Date: Tue, 18 Oct 2011 10:23:15 -0400
+Subject: xfs: start periodic workers later
+To: stable@vger.kernel.org
+Cc: greg@kroah.com
+Message-ID: <20111018142351.491359923@bombadil.infradead.org>
+
+From: Christoph Hellwig <hch@infradead.org>
+
+commit 2bcf6e970f5a88fa05dced5eeb0326e13d93c4a1 upstream
+
+Start the periodic sync workers only after we have finished xfs_mountfs
+and thus fully set up the filesystem structures.  Without this we can
+call into xfs_qm_sync before the quotainfo strucute is set up if the
+mount takes unusually long, and probably hit other incomplete states
+as well.
+
+Also clean up the xfs_fs_fill_super error path by using consistent
+label names, and removing an impossible to reach case.
+
+Signed-off-by: Christoph Hellwig <hch@lst.de>
+Reported-by: Arkadiusz Miskiewicz <arekm@maven.pl>
+Reviewed-by: Alex Elder <aelder@sgi.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ fs/xfs/linux-2.6/xfs_super.c |   35 ++++++++++++++---------------------
+ 1 file changed, 14 insertions(+), 21 deletions(-)
+
+--- a/fs/xfs/linux-2.6/xfs_super.c
++++ b/fs/xfs/linux-2.6/xfs_super.c
+@@ -1412,37 +1412,35 @@ xfs_fs_fill_super(
+       sb->s_time_gran = 1;
+       set_posix_acl_flag(sb);
+-      error = xfs_syncd_init(mp);
+-      if (error)
+-              goto out_filestream_unmount;
+-
+       xfs_inode_shrinker_register(mp);
+       error = xfs_mountfs(mp);
+       if (error)
+-              goto out_syncd_stop;
++              goto out_filestream_unmount;
++
++      error = xfs_syncd_init(mp);
++      if (error)
++              goto out_unmount;
+       root = igrab(VFS_I(mp->m_rootip));
+       if (!root) {
+               error = ENOENT;
+-              goto fail_unmount;
++              goto out_syncd_stop;
+       }
+       if (is_bad_inode(root)) {
+               error = EINVAL;
+-              goto fail_vnrele;
++              goto out_syncd_stop;
+       }
+       sb->s_root = d_alloc_root(root);
+       if (!sb->s_root) {
+               error = ENOMEM;
+-              goto fail_vnrele;
++              goto out_iput;
+       }
+       return 0;
+- out_syncd_stop:
+-      xfs_inode_shrinker_unregister(mp);
+-      xfs_syncd_stop(mp);
+  out_filestream_unmount:
++      xfs_inode_shrinker_unregister(mp);
+       xfs_filestream_unmount(mp);
+  out_free_sb:
+       xfs_freesb(mp);
+@@ -1456,17 +1454,12 @@ xfs_fs_fill_super(
+  out:
+       return -error;
+- fail_vnrele:
+-      if (sb->s_root) {
+-              dput(sb->s_root);
+-              sb->s_root = NULL;
+-      } else {
+-              iput(root);
+-      }
+-
+- fail_unmount:
+-      xfs_inode_shrinker_unregister(mp);
++ out_iput:
++      iput(root);
++ out_syncd_stop:
+       xfs_syncd_stop(mp);
++ out_unmount:
++      xfs_inode_shrinker_unregister(mp);
+       /*
+        * Blow away any referenced inode in the filestreams cache.
diff --git a/queue-3.0/xfs-use-a-cursor-for-bulk-ail-insertion.patch b/queue-3.0/xfs-use-a-cursor-for-bulk-ail-insertion.patch
new file mode 100644 (file)
index 0000000..3714d39
--- /dev/null
@@ -0,0 +1,308 @@
+From hch@infradead.org  Wed Oct 19 15:57:06 2011
+From: Christoph Hellwig <hch@infradead.org>
+Date: Tue, 18 Oct 2011 10:23:16 -0400
+Subject: xfs: use a cursor for bulk AIL insertion
+To: stable@vger.kernel.org
+Cc: greg@kroah.com, Dave Chinner <dchinner@redhat.com>, Alex Elder <aelder@sgi.com>
+Message-ID: <20111018142351.665759311@bombadil.infradead.org>
+
+
+From: Dave Chinner <dchinner@redhat.com>
+
+commit 1d8c95a363bf8cd4d4182dd19c01693b635311c2 upstream
+
+
+xfs: use a cursor for bulk AIL insertion
+
+Delayed logging can insert tens of thousands of log items into the
+AIL at the same LSN. When the committing of log commit records
+occur, we can get insertions occurring at an LSN that is not at the
+end of the AIL. If there are thousands of items in the AIL on the
+tail LSN, each insertion has to walk the AIL to find the correct
+place to insert the new item into the AIL. This can consume large
+amounts of CPU time and block other operations from occurring while
+the traversals are in progress.
+
+To avoid this repeated walk, use a AIL cursor to record
+where we should be inserting the new items into the AIL without
+having to repeat the walk. The cursor infrastructure already
+provides this functionality for push walks, so is a simple extension
+of existing code. While this will not avoid the initial walk, it
+will avoid repeating it tens of thousands of times during a single
+checkpoint commit.
+
+This version includes logic improvements from Christoph Hellwig.
+
+Signed-off-by: Dave Chinner <dchinner@redhat.com>
+Reviewed-by: Christoph Hellwig <hch@lst.de>
+Signed-off-by: Alex Elder <aelder@sgi.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ fs/xfs/xfs_trans.c      |   27 ++++++++++-
+ fs/xfs/xfs_trans_ail.c  |  109 ++++++++++++++++++++++++++++++++++++++----------
+ fs/xfs/xfs_trans_priv.h |   10 +++-
+ 3 files changed, 118 insertions(+), 28 deletions(-)
+
+--- a/fs/xfs/xfs_trans.c
++++ b/fs/xfs/xfs_trans.c
+@@ -1426,6 +1426,7 @@ xfs_trans_committed(
+ static inline void
+ xfs_log_item_batch_insert(
+       struct xfs_ail          *ailp,
++      struct xfs_ail_cursor   *cur,
+       struct xfs_log_item     **log_items,
+       int                     nr_items,
+       xfs_lsn_t               commit_lsn)
+@@ -1434,7 +1435,7 @@ xfs_log_item_batch_insert(
+       spin_lock(&ailp->xa_lock);
+       /* xfs_trans_ail_update_bulk drops ailp->xa_lock */
+-      xfs_trans_ail_update_bulk(ailp, log_items, nr_items, commit_lsn);
++      xfs_trans_ail_update_bulk(ailp, cur, log_items, nr_items, commit_lsn);
+       for (i = 0; i < nr_items; i++)
+               IOP_UNPIN(log_items[i], 0);
+@@ -1452,6 +1453,13 @@ xfs_log_item_batch_insert(
+  * as an iclog write error even though we haven't started any IO yet. Hence in
+  * this case all we need to do is IOP_COMMITTED processing, followed by an
+  * IOP_UNPIN(aborted) call.
++ *
++ * The AIL cursor is used to optimise the insert process. If commit_lsn is not
++ * at the end of the AIL, the insert cursor avoids the need to walk
++ * the AIL to find the insertion point on every xfs_log_item_batch_insert()
++ * call. This saves a lot of needless list walking and is a net win, even
++ * though it slightly increases that amount of AIL lock traffic to set it up
++ * and tear it down.
+  */
+ void
+ xfs_trans_committed_bulk(
+@@ -1463,8 +1471,13 @@ xfs_trans_committed_bulk(
+ #define LOG_ITEM_BATCH_SIZE   32
+       struct xfs_log_item     *log_items[LOG_ITEM_BATCH_SIZE];
+       struct xfs_log_vec      *lv;
++      struct xfs_ail_cursor   cur;
+       int                     i = 0;
++      spin_lock(&ailp->xa_lock);
++      xfs_trans_ail_cursor_last(ailp, &cur, commit_lsn);
++      spin_unlock(&ailp->xa_lock);
++
+       /* unpin all the log items */
+       for (lv = log_vector; lv; lv = lv->lv_next ) {
+               struct xfs_log_item     *lip = lv->lv_item;
+@@ -1493,7 +1506,9 @@ xfs_trans_committed_bulk(
+                       /*
+                        * Not a bulk update option due to unusual item_lsn.
+                        * Push into AIL immediately, rechecking the lsn once
+-                       * we have the ail lock. Then unpin the item.
++                       * we have the ail lock. Then unpin the item. This does
++                       * not affect the AIL cursor the bulk insert path is
++                       * using.
+                        */
+                       spin_lock(&ailp->xa_lock);
+                       if (XFS_LSN_CMP(item_lsn, lip->li_lsn) > 0)
+@@ -1507,7 +1522,7 @@ xfs_trans_committed_bulk(
+               /* Item is a candidate for bulk AIL insert.  */
+               log_items[i++] = lv->lv_item;
+               if (i >= LOG_ITEM_BATCH_SIZE) {
+-                      xfs_log_item_batch_insert(ailp, log_items,
++                      xfs_log_item_batch_insert(ailp, &cur, log_items,
+                                       LOG_ITEM_BATCH_SIZE, commit_lsn);
+                       i = 0;
+               }
+@@ -1515,7 +1530,11 @@ xfs_trans_committed_bulk(
+       /* make sure we insert the remainder! */
+       if (i)
+-              xfs_log_item_batch_insert(ailp, log_items, i, commit_lsn);
++              xfs_log_item_batch_insert(ailp, &cur, log_items, i, commit_lsn);
++
++      spin_lock(&ailp->xa_lock);
++      xfs_trans_ail_cursor_done(ailp, &cur);
++      spin_unlock(&ailp->xa_lock);
+ }
+ /*
+--- a/fs/xfs/xfs_trans_ail.c
++++ b/fs/xfs/xfs_trans_ail.c
+@@ -272,9 +272,9 @@ xfs_trans_ail_cursor_clear(
+ }
+ /*
+- * Return the item in the AIL with the current lsn.
+- * Return the current tree generation number for use
+- * in calls to xfs_trans_next_ail().
++ * Initialise the cursor to the first item in the AIL with the given @lsn.
++ * This searches the list from lowest LSN to highest. Pass a @lsn of zero
++ * to initialise the cursor to the first item in the AIL.
+  */
+ xfs_log_item_t *
+ xfs_trans_ail_cursor_first(
+@@ -300,31 +300,97 @@ out:
+ }
+ /*
+- * splice the log item list into the AIL at the given LSN.
++ * Initialise the cursor to the last item in the AIL with the given @lsn.
++ * This searches the list from highest LSN to lowest. If there is no item with
++ * the value of @lsn, then it sets the cursor to the last item with an LSN lower
++ * than @lsn.
++ */
++static struct xfs_log_item *
++__xfs_trans_ail_cursor_last(
++      struct xfs_ail          *ailp,
++      xfs_lsn_t               lsn)
++{
++      xfs_log_item_t          *lip;
++
++      list_for_each_entry_reverse(lip, &ailp->xa_ail, li_ail) {
++              if (XFS_LSN_CMP(lip->li_lsn, lsn) <= 0)
++                      return lip;
++      }
++      return NULL;
++}
++
++/*
++ * Initialise the cursor to the last item in the AIL with the given @lsn.
++ * This searches the list from highest LSN to lowest.
++ */
++struct xfs_log_item *
++xfs_trans_ail_cursor_last(
++      struct xfs_ail          *ailp,
++      struct xfs_ail_cursor   *cur,
++      xfs_lsn_t               lsn)
++{
++      xfs_trans_ail_cursor_init(ailp, cur);
++      cur->item = __xfs_trans_ail_cursor_last(ailp, lsn);
++      return cur->item;
++}
++
++/*
++ * splice the log item list into the AIL at the given LSN. We splice to the
++ * tail of the given LSN to maintain insert order for push traversals. The
++ * cursor is optional, allowing repeated updates to the same LSN to avoid
++ * repeated traversals.
+  */
+ static void
+ xfs_ail_splice(
+-      struct xfs_ail  *ailp,
+-      struct list_head *list,
+-      xfs_lsn_t       lsn)
++      struct xfs_ail          *ailp,
++      struct xfs_ail_cursor   *cur,
++      struct list_head        *list,
++      xfs_lsn_t               lsn)
+ {
+-      xfs_log_item_t  *next_lip;
++      struct xfs_log_item     *lip = cur ? cur->item : NULL;
++      struct xfs_log_item     *next_lip;
+-      /* If the list is empty, just insert the item.  */
+-      if (list_empty(&ailp->xa_ail)) {
+-              list_splice(list, &ailp->xa_ail);
+-              return;
+-      }
++      /*
++       * Get a new cursor if we don't have a placeholder or the existing one
++       * has been invalidated.
++       */
++      if (!lip || (__psint_t)lip & 1) {
++              lip = __xfs_trans_ail_cursor_last(ailp, lsn);
+-      list_for_each_entry_reverse(next_lip, &ailp->xa_ail, li_ail) {
+-              if (XFS_LSN_CMP(next_lip->li_lsn, lsn) <= 0)
+-                      break;
++              if (!lip) {
++                      /* The list is empty, so just splice and return.  */
++                      if (cur)
++                              cur->item = NULL;
++                      list_splice(list, &ailp->xa_ail);
++                      return;
++              }
+       }
+-      ASSERT(&next_lip->li_ail == &ailp->xa_ail ||
+-             XFS_LSN_CMP(next_lip->li_lsn, lsn) <= 0);
+-
+-      list_splice_init(list, &next_lip->li_ail);
++      /*
++       * Our cursor points to the item we want to insert _after_, so we have
++       * to update the cursor to point to the end of the list we are splicing
++       * in so that it points to the correct location for the next splice.
++       * i.e. before the splice
++       *
++       *  lsn -> lsn -> lsn + x -> lsn + x ...
++       *          ^
++       *          | cursor points here
++       *
++       * After the splice we have:
++       *
++       *  lsn -> lsn -> lsn -> lsn -> .... -> lsn -> lsn + x -> lsn + x ...
++       *          ^                            ^
++       *          | cursor points here         | needs to move here
++       *
++       * So we set the cursor to the last item in the list to be spliced
++       * before we execute the splice, resulting in the cursor pointing to
++       * the correct item after the splice occurs.
++       */
++      if (cur) {
++              next_lip = list_entry(list->prev, struct xfs_log_item, li_ail);
++              cur->item = next_lip;
++      }
++      list_splice(list, &lip->li_ail);
+ }
+ /*
+@@ -645,6 +711,7 @@ xfs_trans_unlocked_item(
+ void
+ xfs_trans_ail_update_bulk(
+       struct xfs_ail          *ailp,
++      struct xfs_ail_cursor   *cur,
+       struct xfs_log_item     **log_items,
+       int                     nr_items,
+       xfs_lsn_t               lsn) __releases(ailp->xa_lock)
+@@ -674,7 +741,7 @@ xfs_trans_ail_update_bulk(
+               list_add(&lip->li_ail, &tmp);
+       }
+-      xfs_ail_splice(ailp, &tmp, lsn);
++      xfs_ail_splice(ailp, cur, &tmp, lsn);
+       if (!mlip_changed) {
+               spin_unlock(&ailp->xa_lock);
+--- a/fs/xfs/xfs_trans_priv.h
++++ b/fs/xfs/xfs_trans_priv.h
+@@ -82,6 +82,7 @@ struct xfs_ail {
+ extern struct workqueue_struct        *xfs_ail_wq;    /* AIL workqueue */
+ void  xfs_trans_ail_update_bulk(struct xfs_ail *ailp,
++                              struct xfs_ail_cursor *cur,
+                               struct xfs_log_item **log_items, int nr_items,
+                               xfs_lsn_t lsn) __releases(ailp->xa_lock);
+ static inline void
+@@ -90,7 +91,7 @@ xfs_trans_ail_update(
+       struct xfs_log_item     *lip,
+       xfs_lsn_t               lsn) __releases(ailp->xa_lock)
+ {
+-      xfs_trans_ail_update_bulk(ailp, &lip, 1, lsn);
++      xfs_trans_ail_update_bulk(ailp, NULL, &lip, 1, lsn);
+ }
+ void  xfs_trans_ail_delete_bulk(struct xfs_ail *ailp,
+@@ -111,10 +112,13 @@ xfs_lsn_t                xfs_ail_min_lsn(struct xfs_ai
+ void                  xfs_trans_unlocked_item(struct xfs_ail *,
+                                       xfs_log_item_t *);
+-struct xfs_log_item   *xfs_trans_ail_cursor_first(struct xfs_ail *ailp,
++struct xfs_log_item * xfs_trans_ail_cursor_first(struct xfs_ail *ailp,
+                                       struct xfs_ail_cursor *cur,
+                                       xfs_lsn_t lsn);
+-struct xfs_log_item   *xfs_trans_ail_cursor_next(struct xfs_ail *ailp,
++struct xfs_log_item * xfs_trans_ail_cursor_last(struct xfs_ail *ailp,
++                                      struct xfs_ail_cursor *cur,
++                                      xfs_lsn_t lsn);
++struct xfs_log_item * xfs_trans_ail_cursor_next(struct xfs_ail *ailp,
+                                       struct xfs_ail_cursor *cur);
+ void                  xfs_trans_ail_cursor_done(struct xfs_ail *ailp,
+                                       struct xfs_ail_cursor *cur);