1 Patch-mainline: submitted 04aug2009
3 From: NeilBrown <neilb@suse.de>
4 Date: Tue, 4 Aug 2009 15:06:39 +1000
5 Subject: [PATCH 09/12] nfsd/idmap: drop special request deferal in favour of improved default.
7 The idmap code manages request deferal by waiting for a reply from
8 userspace rather than putting the NFS request on a queue to be retried
10 Now that the comment deferal code does this there is no need for the
11 special code in idmap.
13 Signed-off-by: NeilBrown <neilb@suse.de>
16 fs/nfsd/nfs4idmap.c | 105 +++++-----------------------------------------------
17 1 file changed, 11 insertions(+), 94 deletions(-)
19 --- linux-2.6.27-SLE11_BRANCH.orig/fs/nfsd/nfs4idmap.c
20 +++ linux-2.6.27-SLE11_BRANCH/fs/nfsd/nfs4idmap.c
21 @@ -485,109 +485,26 @@ nfsd_idmap_shutdown(void)
22 cache_unregister(&nametoid_cache);
26 - * Deferred request handling
29 -struct idmap_defer_req {
30 - struct cache_req req;
31 - struct cache_deferred_req deferred_req;
32 - wait_queue_head_t waitq;
37 -put_mdr(struct idmap_defer_req *mdr)
39 - if (atomic_dec_and_test(&mdr->count))
44 -get_mdr(struct idmap_defer_req *mdr)
46 - atomic_inc(&mdr->count);
50 -idmap_revisit(struct cache_deferred_req *dreq, int toomany)
52 - struct idmap_defer_req *mdr =
53 - container_of(dreq, struct idmap_defer_req, deferred_req);
55 - wake_up(&mdr->waitq);
59 -static struct cache_deferred_req *
60 -idmap_defer(struct cache_req *req)
62 - struct idmap_defer_req *mdr =
63 - container_of(req, struct idmap_defer_req, req);
65 - mdr->deferred_req.revisit = idmap_revisit;
67 - return (&mdr->deferred_req);
71 -do_idmap_lookup(struct ent *(*lookup_fn)(struct ent *), struct ent *key,
72 - struct cache_detail *detail, struct ent **item,
73 - struct idmap_defer_req *mdr)
75 - *item = lookup_fn(key);
78 - return cache_check(detail, &(*item)->h, &mdr->req);
82 -do_idmap_lookup_nowait(struct ent *(*lookup_fn)(struct ent *),
83 - struct ent *key, struct cache_detail *detail,
88 - *item = lookup_fn(key);
92 - if (!test_bit(CACHE_VALID, &(*item)->h.flags)
93 - || (*item)->h.expiry_time < get_seconds()
94 - || detail->flush_time > (*item)->h.last_refresh)
97 - if (test_bit(CACHE_NEGATIVE, &(*item)->h.flags))
101 - cache_put(&(*item)->h, detail);
108 idmap_lookup(struct svc_rqst *rqstp,
109 struct ent *(*lookup_fn)(struct ent *), struct ent *key,
110 struct cache_detail *detail, struct ent **item)
112 - struct idmap_defer_req *mdr;
115 - mdr = kzalloc(sizeof(*mdr), GFP_KERNEL);
117 + *item = lookup_fn(key);
120 - atomic_set(&mdr->count, 1);
121 - init_waitqueue_head(&mdr->waitq);
122 - mdr->req.defer = idmap_defer;
123 - ret = do_idmap_lookup(lookup_fn, key, detail, item, mdr);
124 - if (ret == -EAGAIN) {
125 - wait_event_interruptible_timeout(mdr->waitq,
126 - test_bit(CACHE_VALID, &(*item)->h.flags), 1 * HZ);
127 - ret = do_idmap_lookup_nowait(lookup_fn, key, detail, item);
129 + ret = cache_check(detail, &(*item)->h, &rqstp->rq_chandle);
131 + if (ret == -ETIMEDOUT) {
132 + struct ent *prev_item = *item;
133 + *item = lookup_fn(key);
134 + if (*item != prev_item)
136 + cache_put(&(*item)->h, detail);