]> git.ipfire.org Git - people/teissler/ipfire-2.x.git/blame - src/patches/suse-2.6.27.31/patches.fixes/nfsd-07-nfsd-idmap-drop-special-request-deferal-in-favour-of.patch
Merge branch 'master' of git://git.ipfire.org/ipfire-2.x
[people/teissler/ipfire-2.x.git] / src / patches / suse-2.6.27.31 / patches.fixes / nfsd-07-nfsd-idmap-drop-special-request-deferal-in-favour-of.patch
CommitLineData
2cb7cef9
BS
1Patch-mainline: submitted 04aug2009
2References: bnc#498708
3From: NeilBrown <neilb@suse.de>
4Date: Tue, 4 Aug 2009 15:06:39 +1000
5Subject: [PATCH 09/12] nfsd/idmap: drop special request deferal in favour of improved default.
6
7The idmap code manages request deferal by waiting for a reply from
8userspace rather than putting the NFS request on a queue to be retried
9from the start.
10Now that the comment deferal code does this there is no need for the
11special code in idmap.
12
13Signed-off-by: NeilBrown <neilb@suse.de>
14
15---
16 fs/nfsd/nfs4idmap.c | 105 +++++-----------------------------------------------
17 1 file changed, 11 insertions(+), 94 deletions(-)
18
19--- linux-2.6.27-SLE11_BRANCH.orig/fs/nfsd/nfs4idmap.c
20+++ linux-2.6.27-SLE11_BRANCH/fs/nfsd/nfs4idmap.c
21@@ -485,109 +485,26 @@ nfsd_idmap_shutdown(void)
22 cache_unregister(&nametoid_cache);
23 }
24
25-/*
26- * Deferred request handling
27- */
28-
29-struct idmap_defer_req {
30- struct cache_req req;
31- struct cache_deferred_req deferred_req;
32- wait_queue_head_t waitq;
33- atomic_t count;
34-};
35-
36-static inline void
37-put_mdr(struct idmap_defer_req *mdr)
38-{
39- if (atomic_dec_and_test(&mdr->count))
40- kfree(mdr);
41-}
42-
43-static inline void
44-get_mdr(struct idmap_defer_req *mdr)
45-{
46- atomic_inc(&mdr->count);
47-}
48-
49-static void
50-idmap_revisit(struct cache_deferred_req *dreq, int toomany)
51-{
52- struct idmap_defer_req *mdr =
53- container_of(dreq, struct idmap_defer_req, deferred_req);
54-
55- wake_up(&mdr->waitq);
56- put_mdr(mdr);
57-}
58-
59-static struct cache_deferred_req *
60-idmap_defer(struct cache_req *req)
61-{
62- struct idmap_defer_req *mdr =
63- container_of(req, struct idmap_defer_req, req);
64-
65- mdr->deferred_req.revisit = idmap_revisit;
66- get_mdr(mdr);
67- return (&mdr->deferred_req);
68-}
69-
70-static inline int
71-do_idmap_lookup(struct ent *(*lookup_fn)(struct ent *), struct ent *key,
72- struct cache_detail *detail, struct ent **item,
73- struct idmap_defer_req *mdr)
74-{
75- *item = lookup_fn(key);
76- if (!*item)
77- return -ENOMEM;
78- return cache_check(detail, &(*item)->h, &mdr->req);
79-}
80-
81-static inline int
82-do_idmap_lookup_nowait(struct ent *(*lookup_fn)(struct ent *),
83- struct ent *key, struct cache_detail *detail,
84- struct ent **item)
85-{
86- int ret = -ENOMEM;
87-
88- *item = lookup_fn(key);
89- if (!*item)
90- goto out_err;
91- ret = -ETIMEDOUT;
92- if (!test_bit(CACHE_VALID, &(*item)->h.flags)
93- || (*item)->h.expiry_time < get_seconds()
94- || detail->flush_time > (*item)->h.last_refresh)
95- goto out_put;
96- ret = -ENOENT;
97- if (test_bit(CACHE_NEGATIVE, &(*item)->h.flags))
98- goto out_put;
99- return 0;
100-out_put:
101- cache_put(&(*item)->h, detail);
102-out_err:
103- *item = NULL;
104- return ret;
105-}
106-
107 static int
108 idmap_lookup(struct svc_rqst *rqstp,
109 struct ent *(*lookup_fn)(struct ent *), struct ent *key,
110 struct cache_detail *detail, struct ent **item)
111 {
112- struct idmap_defer_req *mdr;
113 int ret;
114
115- mdr = kzalloc(sizeof(*mdr), GFP_KERNEL);
116- if (!mdr)
117+ *item = lookup_fn(key);
118+ if (!*item)
119 return -ENOMEM;
120- atomic_set(&mdr->count, 1);
121- init_waitqueue_head(&mdr->waitq);
122- mdr->req.defer = idmap_defer;
123- ret = do_idmap_lookup(lookup_fn, key, detail, item, mdr);
124- if (ret == -EAGAIN) {
125- wait_event_interruptible_timeout(mdr->waitq,
126- test_bit(CACHE_VALID, &(*item)->h.flags), 1 * HZ);
127- ret = do_idmap_lookup_nowait(lookup_fn, key, detail, item);
128+ retry:
129+ ret = cache_check(detail, &(*item)->h, &rqstp->rq_chandle);
130+
131+ if (ret == -ETIMEDOUT) {
132+ struct ent *prev_item = *item;
133+ *item = lookup_fn(key);
134+ if (*item != prev_item)
135+ goto retry;
136+ cache_put(&(*item)->h, detail);
137 }
138- put_mdr(mdr);
139 return ret;
140 }
141