]> git.ipfire.org Git - thirdparty/kernel/linux.git/blame - fs/ceph/caps.c
ceph: new cap message flags indicate if there is pending capsnap
[thirdparty/kernel/linux.git] / fs / ceph / caps.c
CommitLineData
3d14c5d2 1#include <linux/ceph/ceph_debug.h>
a8599bd8
SW
2
3#include <linux/fs.h>
4#include <linux/kernel.h>
174cd4b1 5#include <linux/sched/signal.h>
5a0e3ad6 6#include <linux/slab.h>
a8599bd8
SW
7#include <linux/vmalloc.h>
8#include <linux/wait.h>
f1a3d572 9#include <linux/writeback.h>
a8599bd8
SW
10
11#include "super.h"
3d14c5d2 12#include "mds_client.h"
99ccbd22 13#include "cache.h"
3d14c5d2
YS
14#include <linux/ceph/decode.h>
15#include <linux/ceph/messenger.h>
a8599bd8
SW
16
17/*
18 * Capability management
19 *
20 * The Ceph metadata servers control client access to inode metadata
21 * and file data by issuing capabilities, granting clients permission
22 * to read and/or write both inode field and file data to OSDs
23 * (storage nodes). Each capability consists of a set of bits
24 * indicating which operations are allowed.
25 *
26 * If the client holds a *_SHARED cap, the client has a coherent value
27 * that can be safely read from the cached inode.
28 *
29 * In the case of a *_EXCL (exclusive) or FILE_WR capabilities, the
30 * client is allowed to change inode attributes (e.g., file size,
31 * mtime), note its dirty state in the ceph_cap, and asynchronously
32 * flush that metadata change to the MDS.
33 *
34 * In the event of a conflicting operation (perhaps by another
35 * client), the MDS will revoke the conflicting client capabilities.
36 *
37 * In order for a client to cache an inode, it must hold a capability
38 * with at least one MDS server. When inodes are released, release
39 * notifications are batched and periodically sent en masse to the MDS
40 * cluster to release server state.
41 */
42
0e294387 43static u64 __get_oldest_flush_tid(struct ceph_mds_client *mdsc);
7bc00fdd
YZ
44static void __kick_flushing_caps(struct ceph_mds_client *mdsc,
45 struct ceph_mds_session *session,
46 struct ceph_inode_info *ci,
47 u64 oldest_flush_tid);
a8599bd8
SW
48
49/*
50 * Generate readable cap strings for debugging output.
51 */
52#define MAX_CAP_STR 20
53static char cap_str[MAX_CAP_STR][40];
54static DEFINE_SPINLOCK(cap_str_lock);
55static int last_cap_str;
56
57static char *gcap_string(char *s, int c)
58{
59 if (c & CEPH_CAP_GSHARED)
60 *s++ = 's';
61 if (c & CEPH_CAP_GEXCL)
62 *s++ = 'x';
63 if (c & CEPH_CAP_GCACHE)
64 *s++ = 'c';
65 if (c & CEPH_CAP_GRD)
66 *s++ = 'r';
67 if (c & CEPH_CAP_GWR)
68 *s++ = 'w';
69 if (c & CEPH_CAP_GBUFFER)
70 *s++ = 'b';
71 if (c & CEPH_CAP_GLAZYIO)
72 *s++ = 'l';
73 return s;
74}
75
76const char *ceph_cap_string(int caps)
77{
78 int i;
79 char *s;
80 int c;
81
82 spin_lock(&cap_str_lock);
83 i = last_cap_str++;
84 if (last_cap_str == MAX_CAP_STR)
85 last_cap_str = 0;
86 spin_unlock(&cap_str_lock);
87
88 s = cap_str[i];
89
90 if (caps & CEPH_CAP_PIN)
91 *s++ = 'p';
92
93 c = (caps >> CEPH_CAP_SAUTH) & 3;
94 if (c) {
95 *s++ = 'A';
96 s = gcap_string(s, c);
97 }
98
99 c = (caps >> CEPH_CAP_SLINK) & 3;
100 if (c) {
101 *s++ = 'L';
102 s = gcap_string(s, c);
103 }
104
105 c = (caps >> CEPH_CAP_SXATTR) & 3;
106 if (c) {
107 *s++ = 'X';
108 s = gcap_string(s, c);
109 }
110
111 c = caps >> CEPH_CAP_SFILE;
112 if (c) {
113 *s++ = 'F';
114 s = gcap_string(s, c);
115 }
116
117 if (s == cap_str[i])
118 *s++ = '-';
119 *s = 0;
120 return cap_str[i];
121}
122
37151668 123void ceph_caps_init(struct ceph_mds_client *mdsc)
a8599bd8 124{
37151668
YS
125 INIT_LIST_HEAD(&mdsc->caps_list);
126 spin_lock_init(&mdsc->caps_list_lock);
a8599bd8
SW
127}
128
37151668 129void ceph_caps_finalize(struct ceph_mds_client *mdsc)
a8599bd8
SW
130{
131 struct ceph_cap *cap;
132
37151668
YS
133 spin_lock(&mdsc->caps_list_lock);
134 while (!list_empty(&mdsc->caps_list)) {
135 cap = list_first_entry(&mdsc->caps_list,
136 struct ceph_cap, caps_item);
a8599bd8
SW
137 list_del(&cap->caps_item);
138 kmem_cache_free(ceph_cap_cachep, cap);
139 }
37151668
YS
140 mdsc->caps_total_count = 0;
141 mdsc->caps_avail_count = 0;
142 mdsc->caps_use_count = 0;
143 mdsc->caps_reserve_count = 0;
144 mdsc->caps_min_count = 0;
145 spin_unlock(&mdsc->caps_list_lock);
85ccce43
SW
146}
147
37151668 148void ceph_adjust_min_caps(struct ceph_mds_client *mdsc, int delta)
85ccce43 149{
37151668
YS
150 spin_lock(&mdsc->caps_list_lock);
151 mdsc->caps_min_count += delta;
152 BUG_ON(mdsc->caps_min_count < 0);
153 spin_unlock(&mdsc->caps_list_lock);
a8599bd8
SW
154}
155
93faca6e 156void ceph_reserve_caps(struct ceph_mds_client *mdsc,
37151668 157 struct ceph_cap_reservation *ctx, int need)
a8599bd8
SW
158{
159 int i;
160 struct ceph_cap *cap;
161 int have;
162 int alloc = 0;
163 LIST_HEAD(newcaps);
a8599bd8
SW
164
165 dout("reserve caps ctx=%p need=%d\n", ctx, need);
166
167 /* first reserve any caps that are already allocated */
37151668
YS
168 spin_lock(&mdsc->caps_list_lock);
169 if (mdsc->caps_avail_count >= need)
a8599bd8
SW
170 have = need;
171 else
37151668
YS
172 have = mdsc->caps_avail_count;
173 mdsc->caps_avail_count -= have;
174 mdsc->caps_reserve_count += have;
175 BUG_ON(mdsc->caps_total_count != mdsc->caps_use_count +
176 mdsc->caps_reserve_count +
177 mdsc->caps_avail_count);
178 spin_unlock(&mdsc->caps_list_lock);
a8599bd8
SW
179
180 for (i = have; i < need; i++) {
181 cap = kmem_cache_alloc(ceph_cap_cachep, GFP_NOFS);
93faca6e 182 if (!cap)
183 break;
a8599bd8
SW
184 list_add(&cap->caps_item, &newcaps);
185 alloc++;
186 }
93faca6e 187 /* we didn't manage to reserve as much as we needed */
188 if (have + alloc != need)
189 pr_warn("reserve caps ctx=%p ENOMEM need=%d got=%d\n",
190 ctx, need, have + alloc);
a8599bd8 191
37151668
YS
192 spin_lock(&mdsc->caps_list_lock);
193 mdsc->caps_total_count += alloc;
194 mdsc->caps_reserve_count += alloc;
195 list_splice(&newcaps, &mdsc->caps_list);
a8599bd8 196
37151668
YS
197 BUG_ON(mdsc->caps_total_count != mdsc->caps_use_count +
198 mdsc->caps_reserve_count +
199 mdsc->caps_avail_count);
200 spin_unlock(&mdsc->caps_list_lock);
a8599bd8
SW
201
202 ctx->count = need;
203 dout("reserve caps ctx=%p %d = %d used + %d resv + %d avail\n",
37151668
YS
204 ctx, mdsc->caps_total_count, mdsc->caps_use_count,
205 mdsc->caps_reserve_count, mdsc->caps_avail_count);
a8599bd8
SW
206}
207
37151668
YS
208int ceph_unreserve_caps(struct ceph_mds_client *mdsc,
209 struct ceph_cap_reservation *ctx)
a8599bd8
SW
210{
211 dout("unreserve caps ctx=%p count=%d\n", ctx, ctx->count);
212 if (ctx->count) {
37151668
YS
213 spin_lock(&mdsc->caps_list_lock);
214 BUG_ON(mdsc->caps_reserve_count < ctx->count);
215 mdsc->caps_reserve_count -= ctx->count;
216 mdsc->caps_avail_count += ctx->count;
a8599bd8
SW
217 ctx->count = 0;
218 dout("unreserve caps %d = %d used + %d resv + %d avail\n",
37151668
YS
219 mdsc->caps_total_count, mdsc->caps_use_count,
220 mdsc->caps_reserve_count, mdsc->caps_avail_count);
221 BUG_ON(mdsc->caps_total_count != mdsc->caps_use_count +
222 mdsc->caps_reserve_count +
223 mdsc->caps_avail_count);
224 spin_unlock(&mdsc->caps_list_lock);
a8599bd8
SW
225 }
226 return 0;
227}
228
d9df2783
YZ
229struct ceph_cap *ceph_get_cap(struct ceph_mds_client *mdsc,
230 struct ceph_cap_reservation *ctx)
a8599bd8
SW
231{
232 struct ceph_cap *cap = NULL;
233
234 /* temporary, until we do something about cap import/export */
443b3760
SW
235 if (!ctx) {
236 cap = kmem_cache_alloc(ceph_cap_cachep, GFP_NOFS);
237 if (cap) {
4d1d0534 238 spin_lock(&mdsc->caps_list_lock);
37151668
YS
239 mdsc->caps_use_count++;
240 mdsc->caps_total_count++;
4d1d0534 241 spin_unlock(&mdsc->caps_list_lock);
443b3760
SW
242 }
243 return cap;
244 }
a8599bd8 245
37151668 246 spin_lock(&mdsc->caps_list_lock);
a8599bd8 247 dout("get_cap ctx=%p (%d) %d = %d used + %d resv + %d avail\n",
37151668
YS
248 ctx, ctx->count, mdsc->caps_total_count, mdsc->caps_use_count,
249 mdsc->caps_reserve_count, mdsc->caps_avail_count);
a8599bd8 250 BUG_ON(!ctx->count);
37151668
YS
251 BUG_ON(ctx->count > mdsc->caps_reserve_count);
252 BUG_ON(list_empty(&mdsc->caps_list));
a8599bd8
SW
253
254 ctx->count--;
37151668
YS
255 mdsc->caps_reserve_count--;
256 mdsc->caps_use_count++;
a8599bd8 257
37151668 258 cap = list_first_entry(&mdsc->caps_list, struct ceph_cap, caps_item);
a8599bd8
SW
259 list_del(&cap->caps_item);
260
37151668
YS
261 BUG_ON(mdsc->caps_total_count != mdsc->caps_use_count +
262 mdsc->caps_reserve_count + mdsc->caps_avail_count);
263 spin_unlock(&mdsc->caps_list_lock);
a8599bd8
SW
264 return cap;
265}
266
37151668 267void ceph_put_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap)
a8599bd8 268{
37151668 269 spin_lock(&mdsc->caps_list_lock);
7c1332b8 270 dout("put_cap %p %d = %d used + %d resv + %d avail\n",
37151668
YS
271 cap, mdsc->caps_total_count, mdsc->caps_use_count,
272 mdsc->caps_reserve_count, mdsc->caps_avail_count);
273 mdsc->caps_use_count--;
a8599bd8 274 /*
85ccce43
SW
275 * Keep some preallocated caps around (ceph_min_count), to
276 * avoid lots of free/alloc churn.
a8599bd8 277 */
37151668
YS
278 if (mdsc->caps_avail_count >= mdsc->caps_reserve_count +
279 mdsc->caps_min_count) {
280 mdsc->caps_total_count--;
a8599bd8
SW
281 kmem_cache_free(ceph_cap_cachep, cap);
282 } else {
37151668
YS
283 mdsc->caps_avail_count++;
284 list_add(&cap->caps_item, &mdsc->caps_list);
a8599bd8
SW
285 }
286
37151668
YS
287 BUG_ON(mdsc->caps_total_count != mdsc->caps_use_count +
288 mdsc->caps_reserve_count + mdsc->caps_avail_count);
289 spin_unlock(&mdsc->caps_list_lock);
a8599bd8
SW
290}
291
3d14c5d2 292void ceph_reservation_status(struct ceph_fs_client *fsc,
85ccce43
SW
293 int *total, int *avail, int *used, int *reserved,
294 int *min)
a8599bd8 295{
3d14c5d2 296 struct ceph_mds_client *mdsc = fsc->mdsc;
37151668 297
a8599bd8 298 if (total)
37151668 299 *total = mdsc->caps_total_count;
a8599bd8 300 if (avail)
37151668 301 *avail = mdsc->caps_avail_count;
a8599bd8 302 if (used)
37151668 303 *used = mdsc->caps_use_count;
a8599bd8 304 if (reserved)
37151668 305 *reserved = mdsc->caps_reserve_count;
85ccce43 306 if (min)
37151668 307 *min = mdsc->caps_min_count;
a8599bd8
SW
308}
309
310/*
311 * Find ceph_cap for given mds, if any.
312 *
be655596 313 * Called with i_ceph_lock held.
a8599bd8
SW
314 */
315static struct ceph_cap *__get_cap_for_mds(struct ceph_inode_info *ci, int mds)
316{
317 struct ceph_cap *cap;
318 struct rb_node *n = ci->i_caps.rb_node;
319
320 while (n) {
321 cap = rb_entry(n, struct ceph_cap, ci_node);
322 if (mds < cap->mds)
323 n = n->rb_left;
324 else if (mds > cap->mds)
325 n = n->rb_right;
326 else
327 return cap;
328 }
329 return NULL;
330}
331
2bc50259
GF
332struct ceph_cap *ceph_get_cap_for_mds(struct ceph_inode_info *ci, int mds)
333{
334 struct ceph_cap *cap;
335
be655596 336 spin_lock(&ci->i_ceph_lock);
2bc50259 337 cap = __get_cap_for_mds(ci, mds);
be655596 338 spin_unlock(&ci->i_ceph_lock);
2bc50259
GF
339 return cap;
340}
341
a8599bd8 342/*
33caad32 343 * Return id of any MDS with a cap, preferably FILE_WR|BUFFER|EXCL, else -1.
a8599bd8 344 */
ca81f3f6 345static int __ceph_get_cap_mds(struct ceph_inode_info *ci)
a8599bd8
SW
346{
347 struct ceph_cap *cap;
348 int mds = -1;
349 struct rb_node *p;
350
33caad32 351 /* prefer mds with WR|BUFFER|EXCL caps */
a8599bd8
SW
352 for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) {
353 cap = rb_entry(p, struct ceph_cap, ci_node);
354 mds = cap->mds;
a8599bd8
SW
355 if (cap->issued & (CEPH_CAP_FILE_WR |
356 CEPH_CAP_FILE_BUFFER |
357 CEPH_CAP_FILE_EXCL))
358 break;
359 }
360 return mds;
361}
362
363int ceph_get_cap_mds(struct inode *inode)
364{
be655596 365 struct ceph_inode_info *ci = ceph_inode(inode);
a8599bd8 366 int mds;
be655596 367 spin_lock(&ci->i_ceph_lock);
ca81f3f6 368 mds = __ceph_get_cap_mds(ceph_inode(inode));
be655596 369 spin_unlock(&ci->i_ceph_lock);
a8599bd8
SW
370 return mds;
371}
372
373/*
be655596 374 * Called under i_ceph_lock.
a8599bd8
SW
375 */
376static void __insert_cap_node(struct ceph_inode_info *ci,
377 struct ceph_cap *new)
378{
379 struct rb_node **p = &ci->i_caps.rb_node;
380 struct rb_node *parent = NULL;
381 struct ceph_cap *cap = NULL;
382
383 while (*p) {
384 parent = *p;
385 cap = rb_entry(parent, struct ceph_cap, ci_node);
386 if (new->mds < cap->mds)
387 p = &(*p)->rb_left;
388 else if (new->mds > cap->mds)
389 p = &(*p)->rb_right;
390 else
391 BUG();
392 }
393
394 rb_link_node(&new->ci_node, parent, p);
395 rb_insert_color(&new->ci_node, &ci->i_caps);
396}
397
398/*
399 * (re)set cap hold timeouts, which control the delayed release
400 * of unused caps back to the MDS. Should be called on cap use.
401 */
402static void __cap_set_timeouts(struct ceph_mds_client *mdsc,
403 struct ceph_inode_info *ci)
404{
3d14c5d2 405 struct ceph_mount_options *ma = mdsc->fsc->mount_options;
a8599bd8
SW
406
407 ci->i_hold_caps_min = round_jiffies(jiffies +
408 ma->caps_wanted_delay_min * HZ);
409 ci->i_hold_caps_max = round_jiffies(jiffies +
410 ma->caps_wanted_delay_max * HZ);
411 dout("__cap_set_timeouts %p min %lu max %lu\n", &ci->vfs_inode,
412 ci->i_hold_caps_min - jiffies, ci->i_hold_caps_max - jiffies);
413}
414
415/*
416 * (Re)queue cap at the end of the delayed cap release list.
417 *
418 * If I_FLUSH is set, leave the inode at the front of the list.
419 *
be655596 420 * Caller holds i_ceph_lock
a8599bd8
SW
421 * -> we take mdsc->cap_delay_lock
422 */
423static void __cap_delay_requeue(struct ceph_mds_client *mdsc,
424 struct ceph_inode_info *ci)
425{
426 __cap_set_timeouts(mdsc, ci);
427 dout("__cap_delay_requeue %p flags %d at %lu\n", &ci->vfs_inode,
428 ci->i_ceph_flags, ci->i_hold_caps_max);
429 if (!mdsc->stopping) {
430 spin_lock(&mdsc->cap_delay_lock);
431 if (!list_empty(&ci->i_cap_delay_list)) {
432 if (ci->i_ceph_flags & CEPH_I_FLUSH)
433 goto no_change;
434 list_del_init(&ci->i_cap_delay_list);
435 }
436 list_add_tail(&ci->i_cap_delay_list, &mdsc->cap_delay_list);
437no_change:
438 spin_unlock(&mdsc->cap_delay_lock);
439 }
440}
441
442/*
443 * Queue an inode for immediate writeback. Mark inode with I_FLUSH,
444 * indicating we should send a cap message to flush dirty metadata
445 * asap, and move to the front of the delayed cap list.
446 */
447static void __cap_delay_requeue_front(struct ceph_mds_client *mdsc,
448 struct ceph_inode_info *ci)
449{
450 dout("__cap_delay_requeue_front %p\n", &ci->vfs_inode);
451 spin_lock(&mdsc->cap_delay_lock);
452 ci->i_ceph_flags |= CEPH_I_FLUSH;
453 if (!list_empty(&ci->i_cap_delay_list))
454 list_del_init(&ci->i_cap_delay_list);
455 list_add(&ci->i_cap_delay_list, &mdsc->cap_delay_list);
456 spin_unlock(&mdsc->cap_delay_lock);
457}
458
459/*
460 * Cancel delayed work on cap.
461 *
be655596 462 * Caller must hold i_ceph_lock.
a8599bd8
SW
463 */
464static void __cap_delay_cancel(struct ceph_mds_client *mdsc,
465 struct ceph_inode_info *ci)
466{
467 dout("__cap_delay_cancel %p\n", &ci->vfs_inode);
468 if (list_empty(&ci->i_cap_delay_list))
469 return;
470 spin_lock(&mdsc->cap_delay_lock);
471 list_del_init(&ci->i_cap_delay_list);
472 spin_unlock(&mdsc->cap_delay_lock);
473}
474
475/*
476 * Common issue checks for add_cap, handle_cap_grant.
477 */
478static void __check_cap_issue(struct ceph_inode_info *ci, struct ceph_cap *cap,
479 unsigned issued)
480{
481 unsigned had = __ceph_caps_issued(ci, NULL);
482
483 /*
484 * Each time we receive FILE_CACHE anew, we increment
485 * i_rdcache_gen.
486 */
2962507c 487 if ((issued & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)) &&
99ccbd22 488 (had & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)) == 0) {
a8599bd8 489 ci->i_rdcache_gen++;
99ccbd22 490 }
a8599bd8
SW
491
492 /*
2f276c51 493 * if we are newly issued FILE_SHARED, mark dir not complete; we
a8599bd8
SW
494 * don't know what happened to this directory while we didn't
495 * have the cap.
496 */
497 if ((issued & CEPH_CAP_FILE_SHARED) &&
498 (had & CEPH_CAP_FILE_SHARED) == 0) {
499 ci->i_shared_gen++;
a8673d61
YZ
500 if (S_ISDIR(ci->vfs_inode.i_mode)) {
501 dout(" marking %p NOT complete\n", &ci->vfs_inode);
2f276c51 502 __ceph_dir_clear_complete(ci);
a8673d61 503 }
a8599bd8
SW
504 }
505}
506
507/*
508 * Add a capability under the given MDS session.
509 *
510 * Caller should hold session snap_rwsem (read) and s_mutex.
511 *
512 * @fmode is the open file mode, if we are opening a file, otherwise
513 * it is < 0. (This is so we can atomically add the cap and add an
514 * open file reference to it.)
515 */
d9df2783
YZ
516void ceph_add_cap(struct inode *inode,
517 struct ceph_mds_session *session, u64 cap_id,
518 int fmode, unsigned issued, unsigned wanted,
519 unsigned seq, unsigned mseq, u64 realmino, int flags,
520 struct ceph_cap **new_cap)
a8599bd8 521{
3d14c5d2 522 struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc;
a8599bd8 523 struct ceph_inode_info *ci = ceph_inode(inode);
a8599bd8
SW
524 struct ceph_cap *cap;
525 int mds = session->s_mds;
526 int actual_wanted;
527
528 dout("add_cap %p mds%d cap %llx %s seq %d\n", inode,
529 session->s_mds, cap_id, ceph_cap_string(issued), seq);
530
531 /*
532 * If we are opening the file, include file mode wanted bits
533 * in wanted.
534 */
535 if (fmode >= 0)
536 wanted |= ceph_caps_for_mode(fmode);
537
a8599bd8
SW
538 cap = __get_cap_for_mds(ci, mds);
539 if (!cap) {
d9df2783
YZ
540 cap = *new_cap;
541 *new_cap = NULL;
a8599bd8
SW
542
543 cap->issued = 0;
544 cap->implemented = 0;
545 cap->mds = mds;
546 cap->mds_wanted = 0;
964266cc 547 cap->mseq = 0;
a8599bd8
SW
548
549 cap->ci = ci;
550 __insert_cap_node(ci, cap);
551
a8599bd8
SW
552 /* add to session cap list */
553 cap->session = session;
554 spin_lock(&session->s_cap_lock);
555 list_add_tail(&cap->session_caps, &session->s_caps);
556 session->s_nr_caps++;
557 spin_unlock(&session->s_cap_lock);
11df2dfb 558 } else {
11df2dfb
YZ
559 /*
560 * auth mds of the inode changed. we received the cap export
561 * message, but still haven't received the cap import message.
562 * handle_cap_export() updated the new auth MDS' cap.
563 *
564 * "ceph_seq_cmp(seq, cap->seq) <= 0" means we are processing
565 * a message that was send before the cap import message. So
566 * don't remove caps.
567 */
568 if (ceph_seq_cmp(seq, cap->seq) <= 0) {
569 WARN_ON(cap != ci->i_auth_cap);
570 WARN_ON(cap->cap_id != cap_id);
571 seq = cap->seq;
572 mseq = cap->mseq;
573 issued |= cap->issued;
574 flags |= CEPH_CAP_FLAG_AUTH;
575 }
576 }
a8599bd8
SW
577
578 if (!ci->i_snap_realm) {
579 /*
580 * add this inode to the appropriate snap realm
581 */
582 struct ceph_snap_realm *realm = ceph_lookup_snap_realm(mdsc,
583 realmino);
584 if (realm) {
a8599bd8
SW
585 spin_lock(&realm->inodes_with_caps_lock);
586 ci->i_snap_realm = realm;
587 list_add(&ci->i_snap_realm_item,
588 &realm->inodes_with_caps);
589 spin_unlock(&realm->inodes_with_caps_lock);
590 } else {
591 pr_err("ceph_add_cap: couldn't find snap realm %llx\n",
592 realmino);
b8cd07e7 593 WARN_ON(!realm);
a8599bd8
SW
594 }
595 }
596
597 __check_cap_issue(ci, cap, issued);
598
599 /*
600 * If we are issued caps we don't want, or the mds' wanted
601 * value appears to be off, queue a check so we'll release
602 * later and/or update the mds wanted value.
603 */
604 actual_wanted = __ceph_caps_wanted(ci);
605 if ((wanted & ~actual_wanted) ||
606 (issued & ~actual_wanted & CEPH_CAP_ANY_WR)) {
607 dout(" issued %s, mds wanted %s, actual %s, queueing\n",
608 ceph_cap_string(issued), ceph_cap_string(wanted),
609 ceph_cap_string(actual_wanted));
610 __cap_delay_requeue(mdsc, ci);
611 }
612
b8c2f3ae
YZ
613 if (flags & CEPH_CAP_FLAG_AUTH) {
614 if (ci->i_auth_cap == NULL ||
d9ffc4f7 615 ceph_seq_cmp(ci->i_auth_cap->mseq, mseq) < 0) {
b8c2f3ae 616 ci->i_auth_cap = cap;
d9ffc4f7
YZ
617 cap->mds_wanted = wanted;
618 }
11df2dfb
YZ
619 } else {
620 WARN_ON(ci->i_auth_cap == cap);
8a92a119 621 }
a8599bd8
SW
622
623 dout("add_cap inode %p (%llx.%llx) cap %p %s now %s seq %d mds%d\n",
624 inode, ceph_vinop(inode), cap, ceph_cap_string(issued),
625 ceph_cap_string(issued|cap->issued), seq, mds);
626 cap->cap_id = cap_id;
627 cap->issued = issued;
628 cap->implemented |= issued;
d1b87809 629 if (ceph_seq_cmp(mseq, cap->mseq) > 0)
964266cc
YZ
630 cap->mds_wanted = wanted;
631 else
632 cap->mds_wanted |= wanted;
a8599bd8
SW
633 cap->seq = seq;
634 cap->issue_seq = seq;
635 cap->mseq = mseq;
685f9a5d 636 cap->cap_gen = session->s_cap_gen;
a8599bd8
SW
637
638 if (fmode >= 0)
639 __ceph_get_fmode(ci, fmode);
a8599bd8
SW
640}
641
642/*
643 * Return true if cap has not timed out and belongs to the current
644 * generation of the MDS session (i.e. has not gone 'stale' due to
645 * us losing touch with the mds).
646 */
647static int __cap_is_valid(struct ceph_cap *cap)
648{
649 unsigned long ttl;
cdac8303 650 u32 gen;
a8599bd8 651
d8fb02ab 652 spin_lock(&cap->session->s_gen_ttl_lock);
a8599bd8
SW
653 gen = cap->session->s_cap_gen;
654 ttl = cap->session->s_cap_ttl;
d8fb02ab 655 spin_unlock(&cap->session->s_gen_ttl_lock);
a8599bd8 656
685f9a5d 657 if (cap->cap_gen < gen || time_after_eq(jiffies, ttl)) {
a8599bd8
SW
658 dout("__cap_is_valid %p cap %p issued %s "
659 "but STALE (gen %u vs %u)\n", &cap->ci->vfs_inode,
685f9a5d 660 cap, ceph_cap_string(cap->issued), cap->cap_gen, gen);
a8599bd8
SW
661 return 0;
662 }
663
664 return 1;
665}
666
667/*
668 * Return set of valid cap bits issued to us. Note that caps time
669 * out, and may be invalidated in bulk if the client session times out
670 * and session->s_cap_gen is bumped.
671 */
672int __ceph_caps_issued(struct ceph_inode_info *ci, int *implemented)
673{
d9df2783 674 int have = ci->i_snap_caps;
a8599bd8
SW
675 struct ceph_cap *cap;
676 struct rb_node *p;
677
678 if (implemented)
679 *implemented = 0;
680 for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) {
681 cap = rb_entry(p, struct ceph_cap, ci_node);
682 if (!__cap_is_valid(cap))
683 continue;
684 dout("__ceph_caps_issued %p cap %p issued %s\n",
685 &ci->vfs_inode, cap, ceph_cap_string(cap->issued));
686 have |= cap->issued;
687 if (implemented)
688 *implemented |= cap->implemented;
689 }
b1530f57
YZ
690 /*
691 * exclude caps issued by non-auth MDS, but are been revoking
692 * by the auth MDS. The non-auth MDS should be revoking/exporting
693 * these caps, but the message is delayed.
694 */
695 if (ci->i_auth_cap) {
696 cap = ci->i_auth_cap;
697 have &= ~cap->implemented | cap->issued;
698 }
a8599bd8
SW
699 return have;
700}
701
702/*
703 * Get cap bits issued by caps other than @ocap
704 */
705int __ceph_caps_issued_other(struct ceph_inode_info *ci, struct ceph_cap *ocap)
706{
707 int have = ci->i_snap_caps;
708 struct ceph_cap *cap;
709 struct rb_node *p;
710
711 for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) {
712 cap = rb_entry(p, struct ceph_cap, ci_node);
713 if (cap == ocap)
714 continue;
715 if (!__cap_is_valid(cap))
716 continue;
717 have |= cap->issued;
718 }
719 return have;
720}
721
722/*
723 * Move a cap to the end of the LRU (oldest caps at list head, newest
724 * at list tail).
725 */
726static void __touch_cap(struct ceph_cap *cap)
727{
728 struct ceph_mds_session *s = cap->session;
729
a8599bd8 730 spin_lock(&s->s_cap_lock);
7c1332b8 731 if (s->s_cap_iterator == NULL) {
5dacf091
SW
732 dout("__touch_cap %p cap %p mds%d\n", &cap->ci->vfs_inode, cap,
733 s->s_mds);
734 list_move_tail(&cap->session_caps, &s->s_caps);
735 } else {
736 dout("__touch_cap %p cap %p mds%d NOP, iterating over caps\n",
737 &cap->ci->vfs_inode, cap, s->s_mds);
738 }
a8599bd8
SW
739 spin_unlock(&s->s_cap_lock);
740}
741
742/*
743 * Check if we hold the given mask. If so, move the cap(s) to the
744 * front of their respective LRUs. (This is the preferred way for
745 * callers to check for caps they want.)
746 */
747int __ceph_caps_issued_mask(struct ceph_inode_info *ci, int mask, int touch)
748{
749 struct ceph_cap *cap;
750 struct rb_node *p;
751 int have = ci->i_snap_caps;
752
753 if ((have & mask) == mask) {
754 dout("__ceph_caps_issued_mask %p snap issued %s"
755 " (mask %s)\n", &ci->vfs_inode,
756 ceph_cap_string(have),
757 ceph_cap_string(mask));
758 return 1;
759 }
760
761 for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) {
762 cap = rb_entry(p, struct ceph_cap, ci_node);
763 if (!__cap_is_valid(cap))
764 continue;
765 if ((cap->issued & mask) == mask) {
766 dout("__ceph_caps_issued_mask %p cap %p issued %s"
767 " (mask %s)\n", &ci->vfs_inode, cap,
768 ceph_cap_string(cap->issued),
769 ceph_cap_string(mask));
770 if (touch)
771 __touch_cap(cap);
772 return 1;
773 }
774
775 /* does a combination of caps satisfy mask? */
776 have |= cap->issued;
777 if ((have & mask) == mask) {
778 dout("__ceph_caps_issued_mask %p combo issued %s"
779 " (mask %s)\n", &ci->vfs_inode,
780 ceph_cap_string(cap->issued),
781 ceph_cap_string(mask));
782 if (touch) {
783 struct rb_node *q;
784
25985edc 785 /* touch this + preceding caps */
a8599bd8
SW
786 __touch_cap(cap);
787 for (q = rb_first(&ci->i_caps); q != p;
788 q = rb_next(q)) {
789 cap = rb_entry(q, struct ceph_cap,
790 ci_node);
791 if (!__cap_is_valid(cap))
792 continue;
793 __touch_cap(cap);
794 }
795 }
796 return 1;
797 }
798 }
799
800 return 0;
801}
802
803/*
804 * Return true if mask caps are currently being revoked by an MDS.
805 */
6ee6b953
YZ
806int __ceph_caps_revoking_other(struct ceph_inode_info *ci,
807 struct ceph_cap *ocap, int mask)
a8599bd8 808{
a8599bd8
SW
809 struct ceph_cap *cap;
810 struct rb_node *p;
a8599bd8 811
a8599bd8
SW
812 for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) {
813 cap = rb_entry(p, struct ceph_cap, ci_node);
9563f88c 814 if (cap != ocap &&
6ee6b953
YZ
815 (cap->implemented & ~cap->issued & mask))
816 return 1;
a8599bd8 817 }
6ee6b953
YZ
818 return 0;
819}
820
821int ceph_caps_revoking(struct ceph_inode_info *ci, int mask)
822{
823 struct inode *inode = &ci->vfs_inode;
824 int ret;
825
826 spin_lock(&ci->i_ceph_lock);
827 ret = __ceph_caps_revoking_other(ci, NULL, mask);
be655596 828 spin_unlock(&ci->i_ceph_lock);
a8599bd8
SW
829 dout("ceph_caps_revoking %p %s = %d\n", inode,
830 ceph_cap_string(mask), ret);
831 return ret;
832}
833
834int __ceph_caps_used(struct ceph_inode_info *ci)
835{
836 int used = 0;
837 if (ci->i_pin_ref)
838 used |= CEPH_CAP_PIN;
839 if (ci->i_rd_ref)
840 used |= CEPH_CAP_FILE_RD;
fdd4e158
YZ
841 if (ci->i_rdcache_ref ||
842 (!S_ISDIR(ci->vfs_inode.i_mode) && /* ignore readdir cache */
843 ci->vfs_inode.i_data.nrpages))
a8599bd8
SW
844 used |= CEPH_CAP_FILE_CACHE;
845 if (ci->i_wr_ref)
846 used |= CEPH_CAP_FILE_WR;
d3d0720d 847 if (ci->i_wb_ref || ci->i_wrbuffer_ref)
a8599bd8
SW
848 used |= CEPH_CAP_FILE_BUFFER;
849 return used;
850}
851
852/*
853 * wanted, by virtue of open file modes
854 */
855int __ceph_caps_file_wanted(struct ceph_inode_info *ci)
856{
774a6a11
YZ
857 int i, bits = 0;
858 for (i = 0; i < CEPH_FILE_MODE_BITS; i++) {
859 if (ci->i_nr_by_mode[i])
860 bits |= 1 << i;
861 }
862 if (bits == 0)
863 return 0;
864 return ceph_caps_for_mode(bits >> 1);
a8599bd8
SW
865}
866
867/*
868 * Return caps we have registered with the MDS(s) as 'wanted'.
869 */
c1944fed 870int __ceph_caps_mds_wanted(struct ceph_inode_info *ci, bool check)
a8599bd8
SW
871{
872 struct ceph_cap *cap;
873 struct rb_node *p;
874 int mds_wanted = 0;
875
876 for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) {
877 cap = rb_entry(p, struct ceph_cap, ci_node);
c1944fed 878 if (check && !__cap_is_valid(cap))
a8599bd8 879 continue;
a2550604
YZ
880 if (cap == ci->i_auth_cap)
881 mds_wanted |= cap->mds_wanted;
882 else
883 mds_wanted |= (cap->mds_wanted & ~CEPH_CAP_ANY_FILE_WR);
a8599bd8
SW
884 }
885 return mds_wanted;
886}
887
888/*
be655596 889 * called under i_ceph_lock
a8599bd8
SW
890 */
891static int __ceph_is_any_caps(struct ceph_inode_info *ci)
892{
d9df2783 893 return !RB_EMPTY_ROOT(&ci->i_caps);
a8599bd8
SW
894}
895
9215aeea
YZ
896int ceph_is_any_caps(struct inode *inode)
897{
898 struct ceph_inode_info *ci = ceph_inode(inode);
899 int ret;
900
901 spin_lock(&ci->i_ceph_lock);
902 ret = __ceph_is_any_caps(ci);
903 spin_unlock(&ci->i_ceph_lock);
904
905 return ret;
906}
907
db40cc17
YZ
908static void drop_inode_snap_realm(struct ceph_inode_info *ci)
909{
910 struct ceph_snap_realm *realm = ci->i_snap_realm;
911 spin_lock(&realm->inodes_with_caps_lock);
912 list_del_init(&ci->i_snap_realm_item);
913 ci->i_snap_realm_counter++;
914 ci->i_snap_realm = NULL;
915 spin_unlock(&realm->inodes_with_caps_lock);
916 ceph_put_snap_realm(ceph_sb_to_client(ci->vfs_inode.i_sb)->mdsc,
917 realm);
918}
919
a8599bd8 920/*
f818a736
SW
921 * Remove a cap. Take steps to deal with a racing iterate_session_caps.
922 *
be655596 923 * caller should hold i_ceph_lock.
a6369741 924 * caller will not hold session s_mutex if called from destroy_inode.
a8599bd8 925 */
a096b09a 926void __ceph_remove_cap(struct ceph_cap *cap, bool queue_release)
a8599bd8
SW
927{
928 struct ceph_mds_session *session = cap->session;
929 struct ceph_inode_info *ci = cap->ci;
640ef79d 930 struct ceph_mds_client *mdsc =
3d14c5d2 931 ceph_sb_to_client(ci->vfs_inode.i_sb)->mdsc;
f818a736 932 int removed = 0;
a8599bd8
SW
933
934 dout("__ceph_remove_cap %p from %p\n", cap, &ci->vfs_inode);
935
7c1332b8
SW
936 /* remove from session list */
937 spin_lock(&session->s_cap_lock);
938 if (session->s_cap_iterator == cap) {
939 /* not yet, we are iterating over this very cap */
940 dout("__ceph_remove_cap delaying %p removal from session %p\n",
941 cap, cap->session);
942 } else {
943 list_del_init(&cap->session_caps);
944 session->s_nr_caps--;
945 cap->session = NULL;
f818a736 946 removed = 1;
7c1332b8 947 }
f818a736
SW
948 /* protect backpointer with s_cap_lock: see iterate_session_caps */
949 cap->ci = NULL;
745a8e3b
YZ
950
951 /*
952 * s_cap_reconnect is protected by s_cap_lock. no one changes
953 * s_cap_gen while session is in the reconnect state.
954 */
955 if (queue_release &&
956 (!session->s_cap_reconnect || cap->cap_gen == session->s_cap_gen)) {
957 cap->queue_release = 1;
958 if (removed) {
959 list_add_tail(&cap->session_caps,
960 &session->s_cap_releases);
961 session->s_num_cap_releases++;
962 removed = 0;
963 }
964 } else {
965 cap->queue_release = 0;
966 }
967 cap->cap_ino = ci->i_vino.ino;
968
7c1332b8
SW
969 spin_unlock(&session->s_cap_lock);
970
f818a736
SW
971 /* remove from inode list */
972 rb_erase(&cap->ci_node, &ci->i_caps);
973 if (ci->i_auth_cap == cap)
974 ci->i_auth_cap = NULL;
975
976 if (removed)
37151668 977 ceph_put_cap(mdsc, cap);
a8599bd8 978
db40cc17
YZ
979 /* when reconnect denied, we remove session caps forcibly,
980 * i_wr_ref can be non-zero. If there are ongoing write,
981 * keep i_snap_realm.
982 */
983 if (!__ceph_is_any_caps(ci) && ci->i_wr_ref == 0 && ci->i_snap_realm)
984 drop_inode_snap_realm(ci);
985
a8599bd8
SW
986 if (!__ceph_is_any_real_caps(ci))
987 __cap_delay_cancel(mdsc, ci);
988}
989
0ff8bfb3
JL
990struct cap_msg_args {
991 struct ceph_mds_session *session;
992 u64 ino, cid, follows;
993 u64 flush_tid, oldest_flush_tid, size, max_size;
994 u64 xattr_version;
995 struct ceph_buffer *xattr_buf;
996 struct timespec atime, mtime, ctime;
997 int op, caps, wanted, dirty;
998 u32 seq, issue_seq, mseq, time_warp_seq;
1e4ef0c6 999 u32 flags;
0ff8bfb3
JL
1000 kuid_t uid;
1001 kgid_t gid;
1002 umode_t mode;
1003 bool inline_data;
1004};
1005
a8599bd8
SW
1006/*
1007 * Build and send a cap message to the given MDS.
1008 *
1009 * Caller should be holding s_mutex.
1010 */
0ff8bfb3 1011static int send_cap_msg(struct cap_msg_args *arg)
a8599bd8
SW
1012{
1013 struct ceph_mds_caps *fc;
1014 struct ceph_msg *msg;
e20d258d
YZ
1015 void *p;
1016 size_t extra_len;
43b29673 1017 struct timespec zerotime = {0};
92475f05 1018 struct ceph_osd_client *osdc = &arg->session->s_mdsc->fsc->client->osdc;
a8599bd8
SW
1019
1020 dout("send_cap_msg %s %llx %llx caps %s wanted %s dirty %s"
a2971c8c 1021 " seq %u/%u tid %llu/%llu mseq %u follows %lld size %llu/%llu"
0ff8bfb3
JL
1022 " xattr_ver %llu xattr_len %d\n", ceph_cap_op_name(arg->op),
1023 arg->cid, arg->ino, ceph_cap_string(arg->caps),
1024 ceph_cap_string(arg->wanted), ceph_cap_string(arg->dirty),
1025 arg->seq, arg->issue_seq, arg->flush_tid, arg->oldest_flush_tid,
1026 arg->mseq, arg->follows, arg->size, arg->max_size,
1027 arg->xattr_version,
1028 arg->xattr_buf ? (int)arg->xattr_buf->vec.iov_len : 0);
a8599bd8 1029
a2971c8c
YZ
1030 /* flock buffer size + inline version + inline data size +
1031 * osd_epoch_barrier + oldest_flush_tid */
43b29673 1032 extra_len = 4 + 8 + 4 + 4 + 8 + 4 + 4 + 4 + 8 + 8 + 4;
e20d258d
YZ
1033 msg = ceph_msg_new(CEPH_MSG_CLIENT_CAPS, sizeof(*fc) + extra_len,
1034 GFP_NOFS, false);
a79832f2
SW
1035 if (!msg)
1036 return -ENOMEM;
a8599bd8 1037
43b29673 1038 msg->hdr.version = cpu_to_le16(10);
0ff8bfb3 1039 msg->hdr.tid = cpu_to_le64(arg->flush_tid);
a8599bd8 1040
6df058c0 1041 fc = msg->front.iov_base;
a8599bd8
SW
1042 memset(fc, 0, sizeof(*fc));
1043
0ff8bfb3
JL
1044 fc->cap_id = cpu_to_le64(arg->cid);
1045 fc->op = cpu_to_le32(arg->op);
1046 fc->seq = cpu_to_le32(arg->seq);
1047 fc->issue_seq = cpu_to_le32(arg->issue_seq);
1048 fc->migrate_seq = cpu_to_le32(arg->mseq);
1049 fc->caps = cpu_to_le32(arg->caps);
1050 fc->wanted = cpu_to_le32(arg->wanted);
1051 fc->dirty = cpu_to_le32(arg->dirty);
1052 fc->ino = cpu_to_le64(arg->ino);
1053 fc->snap_follows = cpu_to_le64(arg->follows);
1054
1055 fc->size = cpu_to_le64(arg->size);
1056 fc->max_size = cpu_to_le64(arg->max_size);
1057 ceph_encode_timespec(&fc->mtime, &arg->mtime);
1058 ceph_encode_timespec(&fc->atime, &arg->atime);
1059 ceph_encode_timespec(&fc->ctime, &arg->ctime);
1060 fc->time_warp_seq = cpu_to_le32(arg->time_warp_seq);
1061
1062 fc->uid = cpu_to_le32(from_kuid(&init_user_ns, arg->uid));
1063 fc->gid = cpu_to_le32(from_kgid(&init_user_ns, arg->gid));
1064 fc->mode = cpu_to_le32(arg->mode);
1065
1066 fc->xattr_version = cpu_to_le64(arg->xattr_version);
1067 if (arg->xattr_buf) {
1068 msg->middle = ceph_buffer_get(arg->xattr_buf);
1069 fc->xattr_len = cpu_to_le32(arg->xattr_buf->vec.iov_len);
1070 msg->hdr.middle_len = cpu_to_le32(arg->xattr_buf->vec.iov_len);
9670079f
JL
1071 }
1072
e20d258d 1073 p = fc + 1;
43b29673 1074 /* flock buffer size (version 2) */
e20d258d 1075 ceph_encode_32(&p, 0);
43b29673 1076 /* inline version (version 4) */
0ff8bfb3 1077 ceph_encode_64(&p, arg->inline_data ? 0 : CEPH_INLINE_NONE);
e20d258d
YZ
1078 /* inline data size */
1079 ceph_encode_32(&p, 0);
92475f05
JL
1080 /*
1081 * osd_epoch_barrier (version 5)
1082 * The epoch_barrier is protected osdc->lock, so READ_ONCE here in
1083 * case it was recently changed
1084 */
1085 ceph_encode_32(&p, READ_ONCE(osdc->epoch_barrier));
43b29673 1086 /* oldest_flush_tid (version 6) */
0ff8bfb3 1087 ceph_encode_64(&p, arg->oldest_flush_tid);
e20d258d 1088
43b29673
JL
1089 /*
1090 * caller_uid/caller_gid (version 7)
1091 *
1092 * Currently, we don't properly track which caller dirtied the caps
1093 * last, and force a flush of them when there is a conflict. For now,
1094 * just set this to 0:0, to emulate how the MDS has worked up to now.
1095 */
1096 ceph_encode_32(&p, 0);
1097 ceph_encode_32(&p, 0);
1098
1099 /* pool namespace (version 8) (mds always ignores this) */
1100 ceph_encode_32(&p, 0);
1101
1102 /*
1103 * btime and change_attr (version 9)
1104 *
1105 * We just zero these out for now, as the MDS ignores them unless
1106 * the requisite feature flags are set (which we don't do yet).
1107 */
1108 ceph_encode_timespec(p, &zerotime);
1109 p += sizeof(struct ceph_timespec);
1110 ceph_encode_64(&p, 0);
1111
1112 /* Advisory flags (version 10) */
1e4ef0c6 1113 ceph_encode_32(&p, arg->flags);
43b29673 1114
0ff8bfb3 1115 ceph_con_send(&arg->session->s_con, msg);
a8599bd8
SW
1116 return 0;
1117}
1118
1119/*
a6369741 1120 * Queue cap releases when an inode is dropped from our cache. Since
be655596 1121 * inode is about to be destroyed, there is no need for i_ceph_lock.
a8599bd8
SW
1122 */
1123void ceph_queue_caps_release(struct inode *inode)
1124{
1125 struct ceph_inode_info *ci = ceph_inode(inode);
1126 struct rb_node *p;
1127
a8599bd8
SW
1128 p = rb_first(&ci->i_caps);
1129 while (p) {
1130 struct ceph_cap *cap = rb_entry(p, struct ceph_cap, ci_node);
a8599bd8 1131 p = rb_next(p);
a096b09a 1132 __ceph_remove_cap(cap, true);
a8599bd8 1133 }
a8599bd8
SW
1134}
1135
1136/*
1137 * Send a cap msg on the given inode. Update our caps state, then
be655596 1138 * drop i_ceph_lock and send the message.
a8599bd8
SW
1139 *
1140 * Make note of max_size reported/requested from mds, revoked caps
1141 * that have now been implemented.
1142 *
1143 * Make half-hearted attempt ot to invalidate page cache if we are
1144 * dropping RDCACHE. Note that this will leave behind locked pages
1145 * that we'll then need to deal with elsewhere.
1146 *
1147 * Return non-zero if delayed release, or we experienced an error
1148 * such that the caller should requeue + retry later.
1149 *
be655596 1150 * called with i_ceph_lock, then drops it.
a8599bd8
SW
1151 * caller should hold snap_rwsem (read), s_mutex.
1152 */
1153static int __send_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap,
1e4ef0c6
JL
1154 int op, bool sync, int used, int want, int retain,
1155 int flushing, u64 flush_tid, u64 oldest_flush_tid)
be655596 1156 __releases(cap->ci->i_ceph_lock)
a8599bd8
SW
1157{
1158 struct ceph_inode_info *ci = cap->ci;
1159 struct inode *inode = &ci->vfs_inode;
0ff8bfb3
JL
1160 struct cap_msg_args arg;
1161 int held, revoking, dropping;
a8599bd8 1162 int wake = 0;
a8599bd8 1163 int delayed = 0;
a8599bd8
SW
1164 int ret;
1165
68c28323
SW
1166 held = cap->issued | cap->implemented;
1167 revoking = cap->implemented & ~cap->issued;
1168 retain &= ~revoking;
1169 dropping = cap->issued & ~retain;
1170
a8599bd8
SW
1171 dout("__send_cap %p cap %p session %p %s -> %s (revoking %s)\n",
1172 inode, cap, cap->session,
1173 ceph_cap_string(held), ceph_cap_string(held & retain),
1174 ceph_cap_string(revoking));
1175 BUG_ON((retain & CEPH_CAP_PIN) == 0);
1176
0ff8bfb3 1177 arg.session = cap->session;
a8599bd8
SW
1178
1179 /* don't release wanted unless we've waited a bit. */
1180 if ((ci->i_ceph_flags & CEPH_I_NODELAY) == 0 &&
1181 time_before(jiffies, ci->i_hold_caps_min)) {
1182 dout(" delaying issued %s -> %s, wanted %s -> %s on send\n",
1183 ceph_cap_string(cap->issued),
1184 ceph_cap_string(cap->issued & retain),
1185 ceph_cap_string(cap->mds_wanted),
1186 ceph_cap_string(want));
1187 want |= cap->mds_wanted;
1188 retain |= cap->issued;
1189 delayed = 1;
1190 }
1191 ci->i_ceph_flags &= ~(CEPH_I_NODELAY | CEPH_I_FLUSH);
eb65b919
YZ
1192 if (want & ~cap->mds_wanted) {
1193 /* user space may open/close single file frequently.
1194 * This avoids droping mds_wanted immediately after
1195 * requesting new mds_wanted.
1196 */
1197 __cap_set_timeouts(mdsc, ci);
1198 }
a8599bd8
SW
1199
1200 cap->issued &= retain; /* drop bits we don't want */
1201 if (cap->implemented & ~cap->issued) {
1202 /*
1203 * Wake up any waiters on wanted -> needed transition.
1204 * This is due to the weird transition from buffered
1205 * to sync IO... we need to flush dirty pages _before_
1206 * allowing sync writes to avoid reordering.
1207 */
1208 wake = 1;
1209 }
1210 cap->implemented &= cap->issued | used;
1211 cap->mds_wanted = want;
1212
0ff8bfb3
JL
1213 arg.ino = ceph_vino(inode).ino;
1214 arg.cid = cap->cap_id;
1215 arg.follows = flushing ? ci->i_head_snapc->seq : 0;
1216 arg.flush_tid = flush_tid;
1217 arg.oldest_flush_tid = oldest_flush_tid;
1218
1219 arg.size = inode->i_size;
1220 ci->i_reported_size = arg.size;
1221 arg.max_size = ci->i_wanted_max_size;
1222 ci->i_requested_max_size = arg.max_size;
a8599bd8 1223
082afec9 1224 if (flushing & CEPH_CAP_XATTR_EXCL) {
a8599bd8 1225 __ceph_build_xattrs_blob(ci);
0ff8bfb3
JL
1226 arg.xattr_version = ci->i_xattrs.version;
1227 arg.xattr_buf = ci->i_xattrs.blob;
1228 } else {
1229 arg.xattr_buf = NULL;
a8599bd8
SW
1230 }
1231
0ff8bfb3
JL
1232 arg.mtime = inode->i_mtime;
1233 arg.atime = inode->i_atime;
1234 arg.ctime = inode->i_ctime;
1235
1236 arg.op = op;
1237 arg.caps = cap->implemented;
1238 arg.wanted = want;
1239 arg.dirty = flushing;
1240
1241 arg.seq = cap->seq;
1242 arg.issue_seq = cap->issue_seq;
1243 arg.mseq = cap->mseq;
1244 arg.time_warp_seq = ci->i_time_warp_seq;
1245
1246 arg.uid = inode->i_uid;
1247 arg.gid = inode->i_gid;
1248 arg.mode = inode->i_mode;
1249
1250 arg.inline_data = ci->i_inline_version != CEPH_INLINE_NONE;
95569713
YZ
1251 if (list_empty(&ci->i_cap_snaps))
1252 arg.flags = CEPH_CLIENT_CAPS_NO_CAPSNAP;
1253 else
1254 arg.flags = CEPH_CLIENT_CAPS_PENDING_CAPSNAP;
1e4ef0c6
JL
1255 if (sync)
1256 arg.flags |= CEPH_CLIENT_CAPS_SYNC;
e20d258d 1257
be655596 1258 spin_unlock(&ci->i_ceph_lock);
a8599bd8 1259
0ff8bfb3 1260 ret = send_cap_msg(&arg);
a8599bd8
SW
1261 if (ret < 0) {
1262 dout("error sending cap msg, must requeue %p\n", inode);
1263 delayed = 1;
1264 }
1265
1266 if (wake)
03066f23 1267 wake_up_all(&ci->i_cap_wq);
a8599bd8
SW
1268
1269 return delayed;
1270}
1271
0e294387
YZ
1272static inline int __send_flush_snap(struct inode *inode,
1273 struct ceph_mds_session *session,
1274 struct ceph_cap_snap *capsnap,
1275 u32 mseq, u64 oldest_flush_tid)
1276{
0ff8bfb3
JL
1277 struct cap_msg_args arg;
1278
1279 arg.session = session;
1280 arg.ino = ceph_vino(inode).ino;
1281 arg.cid = 0;
1282 arg.follows = capsnap->follows;
1283 arg.flush_tid = capsnap->cap_flush.tid;
1284 arg.oldest_flush_tid = oldest_flush_tid;
1285
1286 arg.size = capsnap->size;
1287 arg.max_size = 0;
1288 arg.xattr_version = capsnap->xattr_version;
1289 arg.xattr_buf = capsnap->xattr_blob;
1290
1291 arg.atime = capsnap->atime;
1292 arg.mtime = capsnap->mtime;
1293 arg.ctime = capsnap->ctime;
1294
1295 arg.op = CEPH_CAP_OP_FLUSHSNAP;
1296 arg.caps = capsnap->issued;
1297 arg.wanted = 0;
1298 arg.dirty = capsnap->dirty;
1299
1300 arg.seq = 0;
1301 arg.issue_seq = 0;
1302 arg.mseq = mseq;
1303 arg.time_warp_seq = capsnap->time_warp_seq;
1304
1305 arg.uid = capsnap->uid;
1306 arg.gid = capsnap->gid;
1307 arg.mode = capsnap->mode;
1308
1309 arg.inline_data = capsnap->inline_data;
1e4ef0c6 1310 arg.flags = 0;
0ff8bfb3
JL
1311
1312 return send_cap_msg(&arg);
0e294387
YZ
1313}
1314
a8599bd8
SW
1315/*
1316 * When a snapshot is taken, clients accumulate dirty metadata on
1317 * inodes with capabilities in ceph_cap_snaps to describe the file
1318 * state at the time the snapshot was taken. This must be flushed
1319 * asynchronously back to the MDS once sync writes complete and dirty
1320 * data is written out.
1321 *
be655596 1322 * Called under i_ceph_lock. Takes s_mutex as needed.
a8599bd8 1323 */
ed9b430c
YZ
1324static void __ceph_flush_snaps(struct ceph_inode_info *ci,
1325 struct ceph_mds_session *session)
be655596
SW
1326 __releases(ci->i_ceph_lock)
1327 __acquires(ci->i_ceph_lock)
a8599bd8
SW
1328{
1329 struct inode *inode = &ci->vfs_inode;
ed9b430c 1330 struct ceph_mds_client *mdsc = session->s_mdsc;
a8599bd8 1331 struct ceph_cap_snap *capsnap;
ed9b430c
YZ
1332 u64 oldest_flush_tid = 0;
1333 u64 first_tid = 1, last_tid = 0;
a8599bd8 1334
ed9b430c 1335 dout("__flush_snaps %p session %p\n", inode, session);
a8599bd8 1336
a8599bd8 1337 list_for_each_entry(capsnap, &ci->i_cap_snaps, ci_item) {
a8599bd8
SW
1338 /*
1339 * we need to wait for sync writes to complete and for dirty
1340 * pages to be written out.
1341 */
1342 if (capsnap->dirty_pages || capsnap->writing)
cfc0bf66 1343 break;
a8599bd8 1344
86056090
YZ
1345 /* should be removed by ceph_try_drop_cap_snap() */
1346 BUG_ON(!capsnap->need_flush);
819ccbfa 1347
e835124c 1348 /* only flush each capsnap once */
0e294387 1349 if (capsnap->cap_flush.tid > 0) {
ed9b430c 1350 dout(" already flushed %p, skipping\n", capsnap);
e835124c
SW
1351 continue;
1352 }
1353
553adfd9 1354 spin_lock(&mdsc->cap_dirty_lock);
0e294387
YZ
1355 capsnap->cap_flush.tid = ++mdsc->last_cap_flush_tid;
1356 list_add_tail(&capsnap->cap_flush.g_list,
1357 &mdsc->cap_flush_list);
ed9b430c
YZ
1358 if (oldest_flush_tid == 0)
1359 oldest_flush_tid = __get_oldest_flush_tid(mdsc);
0e294387
YZ
1360 if (list_empty(&ci->i_flushing_item)) {
1361 list_add_tail(&ci->i_flushing_item,
1362 &session->s_cap_flushing);
1363 }
553adfd9
YZ
1364 spin_unlock(&mdsc->cap_dirty_lock);
1365
0e294387
YZ
1366 list_add_tail(&capsnap->cap_flush.i_list,
1367 &ci->i_cap_flush_list);
1368
ed9b430c
YZ
1369 if (first_tid == 1)
1370 first_tid = capsnap->cap_flush.tid;
1371 last_tid = capsnap->cap_flush.tid;
1372 }
1373
1374 ci->i_ceph_flags &= ~CEPH_I_FLUSH_SNAPS;
1375
1376 while (first_tid <= last_tid) {
1377 struct ceph_cap *cap = ci->i_auth_cap;
1378 struct ceph_cap_flush *cf;
1379 int ret;
1380
1381 if (!(cap && cap->session == session)) {
1382 dout("__flush_snaps %p auth cap %p not mds%d, "
1383 "stop\n", inode, cap, session->s_mds);
1384 break;
1385 }
1386
1387 ret = -ENOENT;
1388 list_for_each_entry(cf, &ci->i_cap_flush_list, i_list) {
1389 if (cf->tid >= first_tid) {
1390 ret = 0;
1391 break;
1392 }
1393 }
1394 if (ret < 0)
1395 break;
1396
1397 first_tid = cf->tid + 1;
1398
1399 capsnap = container_of(cf, struct ceph_cap_snap, cap_flush);
805692d0 1400 refcount_inc(&capsnap->nref);
be655596 1401 spin_unlock(&ci->i_ceph_lock);
a8599bd8 1402
ed9b430c
YZ
1403 dout("__flush_snaps %p capsnap %p tid %llu %s\n",
1404 inode, capsnap, cf->tid, ceph_cap_string(capsnap->dirty));
a8599bd8 1405
ed9b430c
YZ
1406 ret = __send_flush_snap(inode, session, capsnap, cap->mseq,
1407 oldest_flush_tid);
1408 if (ret < 0) {
1409 pr_err("__flush_snaps: error sending cap flushsnap, "
1410 "ino (%llx.%llx) tid %llu follows %llu\n",
1411 ceph_vinop(inode), cf->tid, capsnap->follows);
1412 }
a8599bd8 1413
ed9b430c 1414 ceph_put_cap_snap(capsnap);
be655596 1415 spin_lock(&ci->i_ceph_lock);
a8599bd8 1416 }
ed9b430c 1417}
a8599bd8 1418
ed9b430c
YZ
1419void ceph_flush_snaps(struct ceph_inode_info *ci,
1420 struct ceph_mds_session **psession)
1421{
1422 struct inode *inode = &ci->vfs_inode;
1423 struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc;
e4d2b16a 1424 struct ceph_mds_session *session = NULL;
ed9b430c 1425 int mds;
e4d2b16a 1426
ed9b430c 1427 dout("ceph_flush_snaps %p\n", inode);
e4d2b16a
YZ
1428 if (psession)
1429 session = *psession;
ed9b430c
YZ
1430retry:
1431 spin_lock(&ci->i_ceph_lock);
1432 if (!(ci->i_ceph_flags & CEPH_I_FLUSH_SNAPS)) {
1433 dout(" no capsnap needs flush, doing nothing\n");
1434 goto out;
1435 }
1436 if (!ci->i_auth_cap) {
1437 dout(" no auth cap (migrating?), doing nothing\n");
1438 goto out;
1439 }
a8599bd8 1440
ed9b430c
YZ
1441 mds = ci->i_auth_cap->session->s_mds;
1442 if (session && session->s_mds != mds) {
1443 dout(" oops, wrong session %p mutex\n", session);
a8599bd8
SW
1444 mutex_unlock(&session->s_mutex);
1445 ceph_put_mds_session(session);
ed9b430c
YZ
1446 session = NULL;
1447 }
1448 if (!session) {
1449 spin_unlock(&ci->i_ceph_lock);
1450 mutex_lock(&mdsc->mutex);
1451 session = __ceph_lookup_mds_session(mdsc, mds);
1452 mutex_unlock(&mdsc->mutex);
1453 if (session) {
1454 dout(" inverting session/ino locks on %p\n", session);
1455 mutex_lock(&session->s_mutex);
1456 }
1457 goto retry;
a8599bd8 1458 }
a8599bd8 1459
ed9b430c
YZ
1460 __ceph_flush_snaps(ci, session);
1461out:
be655596 1462 spin_unlock(&ci->i_ceph_lock);
ed9b430c
YZ
1463
1464 if (psession) {
1465 *psession = session;
1466 } else {
1467 mutex_unlock(&session->s_mutex);
1468 ceph_put_mds_session(session);
1469 }
1470 /* we flushed them all; remove this inode from the queue */
1471 spin_lock(&mdsc->snap_flush_lock);
1472 list_del_init(&ci->i_snap_flush_item);
1473 spin_unlock(&mdsc->snap_flush_lock);
a8599bd8
SW
1474}
1475
76e3b390 1476/*
fca65b4a
SW
1477 * Mark caps dirty. If inode is newly dirty, return the dirty flags.
1478 * Caller is then responsible for calling __mark_inode_dirty with the
1479 * returned flags value.
76e3b390 1480 */
f66fd9f0
YZ
1481int __ceph_mark_dirty_caps(struct ceph_inode_info *ci, int mask,
1482 struct ceph_cap_flush **pcf)
76e3b390 1483{
640ef79d 1484 struct ceph_mds_client *mdsc =
3d14c5d2 1485 ceph_sb_to_client(ci->vfs_inode.i_sb)->mdsc;
76e3b390
SW
1486 struct inode *inode = &ci->vfs_inode;
1487 int was = ci->i_dirty_caps;
1488 int dirty = 0;
1489
571ade33
YZ
1490 if (!ci->i_auth_cap) {
1491 pr_warn("__mark_dirty_caps %p %llx mask %s, "
1492 "but no auth cap (session was closed?)\n",
1493 inode, ceph_ino(inode), ceph_cap_string(mask));
1494 return 0;
1495 }
1496
76e3b390
SW
1497 dout("__mark_dirty_caps %p %s dirty %s -> %s\n", &ci->vfs_inode,
1498 ceph_cap_string(mask), ceph_cap_string(was),
1499 ceph_cap_string(was | mask));
1500 ci->i_dirty_caps |= mask;
1501 if (was == 0) {
f66fd9f0
YZ
1502 WARN_ON_ONCE(ci->i_prealloc_cap_flush);
1503 swap(ci->i_prealloc_cap_flush, *pcf);
1504
604d1b02
YZ
1505 if (!ci->i_head_snapc) {
1506 WARN_ON_ONCE(!rwsem_is_locked(&mdsc->snap_rwsem));
7d8cb26d
SW
1507 ci->i_head_snapc = ceph_get_snap_context(
1508 ci->i_snap_realm->cached_context);
604d1b02 1509 }
0685235f
YZ
1510 dout(" inode %p now dirty snapc %p auth cap %p\n",
1511 &ci->vfs_inode, ci->i_head_snapc, ci->i_auth_cap);
76e3b390
SW
1512 BUG_ON(!list_empty(&ci->i_dirty_item));
1513 spin_lock(&mdsc->cap_dirty_lock);
11df2dfb 1514 list_add(&ci->i_dirty_item, &mdsc->cap_dirty);
76e3b390
SW
1515 spin_unlock(&mdsc->cap_dirty_lock);
1516 if (ci->i_flushing_caps == 0) {
3772d26d 1517 ihold(inode);
76e3b390
SW
1518 dirty |= I_DIRTY_SYNC;
1519 }
f66fd9f0
YZ
1520 } else {
1521 WARN_ON_ONCE(!ci->i_prealloc_cap_flush);
76e3b390
SW
1522 }
1523 BUG_ON(list_empty(&ci->i_dirty_item));
1524 if (((was | ci->i_flushing_caps) & CEPH_CAP_FILE_BUFFER) &&
1525 (mask & CEPH_CAP_FILE_BUFFER))
1526 dirty |= I_DIRTY_DATASYNC;
76e3b390 1527 __cap_delay_requeue(mdsc, ci);
fca65b4a 1528 return dirty;
76e3b390
SW
1529}
1530
f66fd9f0
YZ
1531struct ceph_cap_flush *ceph_alloc_cap_flush(void)
1532{
1533 return kmem_cache_alloc(ceph_cap_flush_cachep, GFP_KERNEL);
1534}
1535
1536void ceph_free_cap_flush(struct ceph_cap_flush *cf)
1537{
1538 if (cf)
1539 kmem_cache_free(ceph_cap_flush_cachep, cf);
1540}
1541
a2971c8c
YZ
1542static u64 __get_oldest_flush_tid(struct ceph_mds_client *mdsc)
1543{
e4500b5e 1544 if (!list_empty(&mdsc->cap_flush_list)) {
a2971c8c 1545 struct ceph_cap_flush *cf =
e4500b5e
YZ
1546 list_first_entry(&mdsc->cap_flush_list,
1547 struct ceph_cap_flush, g_list);
a2971c8c
YZ
1548 return cf->tid;
1549 }
1550 return 0;
1551}
1552
c8799fc4
YZ
1553/*
1554 * Remove cap_flush from the mdsc's or inode's flushing cap list.
1555 * Return true if caller needs to wake up flush waiters.
1556 */
1557static bool __finish_cap_flush(struct ceph_mds_client *mdsc,
1558 struct ceph_inode_info *ci,
1559 struct ceph_cap_flush *cf)
1560{
1561 struct ceph_cap_flush *prev;
1562 bool wake = cf->wake;
1563 if (mdsc) {
1564 /* are there older pending cap flushes? */
1565 if (wake && cf->g_list.prev != &mdsc->cap_flush_list) {
1566 prev = list_prev_entry(cf, g_list);
1567 prev->wake = true;
1568 wake = false;
1569 }
1570 list_del(&cf->g_list);
1571 } else if (ci) {
1572 if (wake && cf->i_list.prev != &ci->i_cap_flush_list) {
1573 prev = list_prev_entry(cf, i_list);
1574 prev->wake = true;
1575 wake = false;
1576 }
1577 list_del(&cf->i_list);
1578 } else {
1579 BUG_ON(1);
1580 }
1581 return wake;
1582}
1583
a8599bd8
SW
1584/*
1585 * Add dirty inode to the flushing list. Assigned a seq number so we
1586 * can wait for caps to flush without starving.
cdc35f96 1587 *
be655596 1588 * Called under i_ceph_lock.
a8599bd8 1589 */
cdc35f96 1590static int __mark_caps_flushing(struct inode *inode,
c8799fc4 1591 struct ceph_mds_session *session, bool wake,
a2971c8c 1592 u64 *flush_tid, u64 *oldest_flush_tid)
a8599bd8 1593{
3d14c5d2 1594 struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
a8599bd8 1595 struct ceph_inode_info *ci = ceph_inode(inode);
f66fd9f0 1596 struct ceph_cap_flush *cf = NULL;
cdc35f96 1597 int flushing;
50b885b9 1598
cdc35f96 1599 BUG_ON(ci->i_dirty_caps == 0);
a8599bd8 1600 BUG_ON(list_empty(&ci->i_dirty_item));
f66fd9f0 1601 BUG_ON(!ci->i_prealloc_cap_flush);
cdc35f96
SW
1602
1603 flushing = ci->i_dirty_caps;
1604 dout("__mark_caps_flushing flushing %s, flushing_caps %s -> %s\n",
1605 ceph_cap_string(flushing),
1606 ceph_cap_string(ci->i_flushing_caps),
1607 ceph_cap_string(ci->i_flushing_caps | flushing));
1608 ci->i_flushing_caps |= flushing;
1609 ci->i_dirty_caps = 0;
afcdaea3 1610 dout(" inode %p now !dirty\n", inode);
cdc35f96 1611
f66fd9f0 1612 swap(cf, ci->i_prealloc_cap_flush);
553adfd9 1613 cf->caps = flushing;
c8799fc4 1614 cf->wake = wake;
553adfd9 1615
a8599bd8 1616 spin_lock(&mdsc->cap_dirty_lock);
afcdaea3
SW
1617 list_del_init(&ci->i_dirty_item);
1618
553adfd9 1619 cf->tid = ++mdsc->last_cap_flush_tid;
e4500b5e 1620 list_add_tail(&cf->g_list, &mdsc->cap_flush_list);
a2971c8c 1621 *oldest_flush_tid = __get_oldest_flush_tid(mdsc);
553adfd9 1622
a8599bd8
SW
1623 if (list_empty(&ci->i_flushing_item)) {
1624 list_add_tail(&ci->i_flushing_item, &session->s_cap_flushing);
1625 mdsc->num_cap_flushing++;
a8599bd8
SW
1626 }
1627 spin_unlock(&mdsc->cap_dirty_lock);
cdc35f96 1628
e4500b5e 1629 list_add_tail(&cf->i_list, &ci->i_cap_flush_list);
553adfd9
YZ
1630
1631 *flush_tid = cf->tid;
cdc35f96 1632 return flushing;
a8599bd8
SW
1633}
1634
5ecad6fd
SW
1635/*
1636 * try to invalidate mapping pages without blocking.
1637 */
5ecad6fd
SW
1638static int try_nonblocking_invalidate(struct inode *inode)
1639{
1640 struct ceph_inode_info *ci = ceph_inode(inode);
1641 u32 invalidating_gen = ci->i_rdcache_gen;
1642
be655596 1643 spin_unlock(&ci->i_ceph_lock);
5ecad6fd 1644 invalidate_mapping_pages(&inode->i_data, 0, -1);
be655596 1645 spin_lock(&ci->i_ceph_lock);
5ecad6fd 1646
18a38193 1647 if (inode->i_data.nrpages == 0 &&
5ecad6fd
SW
1648 invalidating_gen == ci->i_rdcache_gen) {
1649 /* success. */
1650 dout("try_nonblocking_invalidate %p success\n", inode);
cd045cb4
SW
1651 /* save any racing async invalidate some trouble */
1652 ci->i_rdcache_revoking = ci->i_rdcache_gen - 1;
5ecad6fd
SW
1653 return 0;
1654 }
1655 dout("try_nonblocking_invalidate %p failed\n", inode);
1656 return -1;
1657}
1658
efb0ca76
YZ
1659bool __ceph_should_report_size(struct ceph_inode_info *ci)
1660{
1661 loff_t size = ci->vfs_inode.i_size;
1662 /* mds will adjust max size according to the reported size */
1663 if (ci->i_flushing_caps & CEPH_CAP_FILE_WR)
1664 return false;
1665 if (size >= ci->i_max_size)
1666 return true;
1667 /* half of previous max_size increment has been used */
1668 if (ci->i_max_size > ci->i_reported_size &&
1669 (size << 1) >= ci->i_max_size + ci->i_reported_size)
1670 return true;
1671 return false;
1672}
1673
a8599bd8
SW
1674/*
1675 * Swiss army knife function to examine currently used and wanted
1676 * versus held caps. Release, flush, ack revoked caps to mds as
1677 * appropriate.
1678 *
1679 * CHECK_CAPS_NODELAY - caller is delayed work and we should not delay
1680 * cap release further.
1681 * CHECK_CAPS_AUTHONLY - we should only check the auth cap
1682 * CHECK_CAPS_FLUSH - we should flush any dirty caps immediately, without
1683 * further delay.
1684 */
1685void ceph_check_caps(struct ceph_inode_info *ci, int flags,
1686 struct ceph_mds_session *session)
1687{
3d14c5d2
YS
1688 struct ceph_fs_client *fsc = ceph_inode_to_client(&ci->vfs_inode);
1689 struct ceph_mds_client *mdsc = fsc->mdsc;
a8599bd8
SW
1690 struct inode *inode = &ci->vfs_inode;
1691 struct ceph_cap *cap;
a2971c8c 1692 u64 flush_tid, oldest_flush_tid;
395c312b 1693 int file_wanted, used, cap_used;
a8599bd8 1694 int took_snap_rwsem = 0; /* true if mdsc->snap_rwsem held */
cbd03635 1695 int issued, implemented, want, retain, revoking, flushing = 0;
a8599bd8
SW
1696 int mds = -1; /* keep track of how far we've gone through i_caps list
1697 to avoid an infinite loop on retry */
1698 struct rb_node *p;
3609404f
YZ
1699 int delayed = 0, sent = 0, num;
1700 bool is_delayed = flags & CHECK_CAPS_NODELAY;
1701 bool queue_invalidate = false;
1702 bool force_requeue = false;
1703 bool tried_invalidate = false;
a8599bd8
SW
1704
1705 /* if we are unmounting, flush any unused caps immediately. */
1706 if (mdsc->stopping)
1707 is_delayed = 1;
1708
be655596 1709 spin_lock(&ci->i_ceph_lock);
a8599bd8
SW
1710
1711 if (ci->i_ceph_flags & CEPH_I_FLUSH)
1712 flags |= CHECK_CAPS_FLUSH;
1713
a8599bd8
SW
1714 goto retry_locked;
1715retry:
be655596 1716 spin_lock(&ci->i_ceph_lock);
a8599bd8
SW
1717retry_locked:
1718 file_wanted = __ceph_caps_file_wanted(ci);
1719 used = __ceph_caps_used(ci);
cbd03635
SW
1720 issued = __ceph_caps_issued(ci, &implemented);
1721 revoking = implemented & ~issued;
a8599bd8 1722
41445999
YZ
1723 want = file_wanted;
1724 retain = file_wanted | used | CEPH_CAP_PIN;
a8599bd8 1725 if (!mdsc->stopping && inode->i_nlink > 0) {
41445999 1726 if (file_wanted) {
a8599bd8 1727 retain |= CEPH_CAP_ANY; /* be greedy */
32ec4397
YZ
1728 } else if (S_ISDIR(inode->i_mode) &&
1729 (issued & CEPH_CAP_FILE_SHARED) &&
1730 __ceph_dir_is_complete(ci)) {
1731 /*
1732 * If a directory is complete, we want to keep
1733 * the exclusive cap. So that MDS does not end up
1734 * revoking the shared cap on every create/unlink
1735 * operation.
1736 */
1737 want = CEPH_CAP_ANY_SHARED | CEPH_CAP_FILE_EXCL;
1738 retain |= want;
a8599bd8 1739 } else {
32ec4397 1740
a8599bd8
SW
1741 retain |= CEPH_CAP_ANY_SHARED;
1742 /*
1743 * keep RD only if we didn't have the file open RW,
1744 * because then the mds would revoke it anyway to
1745 * journal max_size=0.
1746 */
1747 if (ci->i_max_size == 0)
1748 retain |= CEPH_CAP_ANY_RD;
1749 }
1750 }
1751
1752 dout("check_caps %p file_want %s used %s dirty %s flushing %s"
cbd03635 1753 " issued %s revoking %s retain %s %s%s%s\n", inode,
a8599bd8
SW
1754 ceph_cap_string(file_wanted),
1755 ceph_cap_string(used), ceph_cap_string(ci->i_dirty_caps),
1756 ceph_cap_string(ci->i_flushing_caps),
cbd03635 1757 ceph_cap_string(issued), ceph_cap_string(revoking),
a8599bd8
SW
1758 ceph_cap_string(retain),
1759 (flags & CHECK_CAPS_AUTHONLY) ? " AUTHONLY" : "",
1760 (flags & CHECK_CAPS_NODELAY) ? " NODELAY" : "",
1761 (flags & CHECK_CAPS_FLUSH) ? " FLUSH" : "");
1762
1763 /*
1764 * If we no longer need to hold onto old our caps, and we may
1765 * have cached pages, but don't want them, then try to invalidate.
1766 * If we fail, it's because pages are locked.... try again later.
1767 */
1768 if ((!is_delayed || mdsc->stopping) &&
fdd4e158 1769 !S_ISDIR(inode->i_mode) && /* ignore readdir cache */
9abd4db7 1770 !(ci->i_wb_ref || ci->i_wrbuffer_ref) && /* no dirty pages... */
fdd4e158 1771 inode->i_data.nrpages && /* have cached pages */
5e804ac4
YZ
1772 (revoking & (CEPH_CAP_FILE_CACHE|
1773 CEPH_CAP_FILE_LAZYIO)) && /* or revoking cache */
a8599bd8 1774 !tried_invalidate) {
a8599bd8 1775 dout("check_caps trying to invalidate on %p\n", inode);
5ecad6fd 1776 if (try_nonblocking_invalidate(inode) < 0) {
2962507c
SW
1777 if (revoking & (CEPH_CAP_FILE_CACHE|
1778 CEPH_CAP_FILE_LAZYIO)) {
5ecad6fd 1779 dout("check_caps queuing invalidate\n");
3609404f 1780 queue_invalidate = true;
5ecad6fd
SW
1781 ci->i_rdcache_revoking = ci->i_rdcache_gen;
1782 } else {
1783 dout("check_caps failed to invalidate pages\n");
1784 /* we failed to invalidate pages. check these
1785 caps again later. */
3609404f 1786 force_requeue = true;
5ecad6fd
SW
1787 __cap_set_timeouts(mdsc, ci);
1788 }
a8599bd8 1789 }
3609404f 1790 tried_invalidate = true;
a8599bd8
SW
1791 goto retry_locked;
1792 }
1793
1794 num = 0;
1795 for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) {
1796 cap = rb_entry(p, struct ceph_cap, ci_node);
1797 num++;
1798
1799 /* avoid looping forever */
1800 if (mds >= cap->mds ||
1801 ((flags & CHECK_CAPS_AUTHONLY) && cap != ci->i_auth_cap))
1802 continue;
1803
1804 /* NOTE: no side-effects allowed, until we take s_mutex */
1805
395c312b
YZ
1806 cap_used = used;
1807 if (ci->i_auth_cap && cap != ci->i_auth_cap)
1808 cap_used &= ~ci->i_auth_cap->issued;
1809
a8599bd8 1810 revoking = cap->implemented & ~cap->issued;
395c312b 1811 dout(" mds%d cap %p used %s issued %s implemented %s revoking %s\n",
9abd4db7
YZ
1812 cap->mds, cap, ceph_cap_string(cap_used),
1813 ceph_cap_string(cap->issued),
088b3f5e
SW
1814 ceph_cap_string(cap->implemented),
1815 ceph_cap_string(revoking));
a8599bd8
SW
1816
1817 if (cap == ci->i_auth_cap &&
1818 (cap->issued & CEPH_CAP_FILE_WR)) {
1819 /* request larger max_size from MDS? */
1820 if (ci->i_wanted_max_size > ci->i_max_size &&
1821 ci->i_wanted_max_size > ci->i_requested_max_size) {
1822 dout("requesting new max_size\n");
1823 goto ack;
1824 }
1825
1826 /* approaching file_max? */
efb0ca76 1827 if (__ceph_should_report_size(ci)) {
a8599bd8
SW
1828 dout("i_size approaching max_size\n");
1829 goto ack;
1830 }
1831 }
1832 /* flush anything dirty? */
7bc00fdd
YZ
1833 if (cap == ci->i_auth_cap) {
1834 if ((flags & CHECK_CAPS_FLUSH) && ci->i_dirty_caps) {
1835 dout("flushing dirty caps\n");
1836 goto ack;
1837 }
1838 if (ci->i_ceph_flags & CEPH_I_FLUSH_SNAPS) {
1839 dout("flushing snap caps\n");
1840 goto ack;
1841 }
a8599bd8
SW
1842 }
1843
1844 /* completed revocation? going down and there are no caps? */
395c312b 1845 if (revoking && (revoking & cap_used) == 0) {
a8599bd8
SW
1846 dout("completed revocation of %s\n",
1847 ceph_cap_string(cap->implemented & ~cap->issued));
1848 goto ack;
1849 }
1850
1851 /* want more caps from mds? */
1852 if (want & ~(cap->mds_wanted | cap->issued))
1853 goto ack;
1854
1855 /* things we might delay */
1856 if ((cap->issued & ~retain) == 0 &&
1857 cap->mds_wanted == want)
1858 continue; /* nope, all good */
1859
1860 if (is_delayed)
1861 goto ack;
1862
1863 /* delay? */
1864 if ((ci->i_ceph_flags & CEPH_I_NODELAY) == 0 &&
1865 time_before(jiffies, ci->i_hold_caps_max)) {
1866 dout(" delaying issued %s -> %s, wanted %s -> %s\n",
1867 ceph_cap_string(cap->issued),
1868 ceph_cap_string(cap->issued & retain),
1869 ceph_cap_string(cap->mds_wanted),
1870 ceph_cap_string(want));
1871 delayed++;
1872 continue;
1873 }
1874
1875ack:
e9964c10
SW
1876 if (ci->i_ceph_flags & CEPH_I_NOFLUSH) {
1877 dout(" skipping %p I_NOFLUSH set\n", inode);
1878 continue;
1879 }
1880
a8599bd8
SW
1881 if (session && session != cap->session) {
1882 dout("oops, wrong session %p mutex\n", session);
1883 mutex_unlock(&session->s_mutex);
1884 session = NULL;
1885 }
1886 if (!session) {
1887 session = cap->session;
1888 if (mutex_trylock(&session->s_mutex) == 0) {
1889 dout("inverting session/ino locks on %p\n",
1890 session);
be655596 1891 spin_unlock(&ci->i_ceph_lock);
a8599bd8
SW
1892 if (took_snap_rwsem) {
1893 up_read(&mdsc->snap_rwsem);
1894 took_snap_rwsem = 0;
1895 }
1896 mutex_lock(&session->s_mutex);
1897 goto retry;
1898 }
1899 }
7bc00fdd
YZ
1900
1901 /* kick flushing and flush snaps before sending normal
1902 * cap message */
1903 if (cap == ci->i_auth_cap &&
1904 (ci->i_ceph_flags &
1905 (CEPH_I_KICK_FLUSH | CEPH_I_FLUSH_SNAPS))) {
1906 if (ci->i_ceph_flags & CEPH_I_KICK_FLUSH) {
1907 spin_lock(&mdsc->cap_dirty_lock);
1908 oldest_flush_tid = __get_oldest_flush_tid(mdsc);
1909 spin_unlock(&mdsc->cap_dirty_lock);
1910 __kick_flushing_caps(mdsc, session, ci,
1911 oldest_flush_tid);
1912 ci->i_ceph_flags &= ~CEPH_I_KICK_FLUSH;
1913 }
ed9b430c
YZ
1914 if (ci->i_ceph_flags & CEPH_I_FLUSH_SNAPS)
1915 __ceph_flush_snaps(ci, session);
1916
7bc00fdd
YZ
1917 goto retry_locked;
1918 }
1919
a8599bd8
SW
1920 /* take snap_rwsem after session mutex */
1921 if (!took_snap_rwsem) {
1922 if (down_read_trylock(&mdsc->snap_rwsem) == 0) {
1923 dout("inverting snap/in locks on %p\n",
1924 inode);
be655596 1925 spin_unlock(&ci->i_ceph_lock);
a8599bd8
SW
1926 down_read(&mdsc->snap_rwsem);
1927 took_snap_rwsem = 1;
1928 goto retry;
1929 }
1930 took_snap_rwsem = 1;
1931 }
1932
553adfd9 1933 if (cap == ci->i_auth_cap && ci->i_dirty_caps) {
c8799fc4 1934 flushing = __mark_caps_flushing(inode, session, false,
a2971c8c
YZ
1935 &flush_tid,
1936 &oldest_flush_tid);
553adfd9 1937 } else {
24be0c48 1938 flushing = 0;
553adfd9 1939 flush_tid = 0;
a2971c8c
YZ
1940 spin_lock(&mdsc->cap_dirty_lock);
1941 oldest_flush_tid = __get_oldest_flush_tid(mdsc);
1942 spin_unlock(&mdsc->cap_dirty_lock);
553adfd9 1943 }
a8599bd8
SW
1944
1945 mds = cap->mds; /* remember mds, so we don't repeat */
1946 sent++;
1947
be655596 1948 /* __send_cap drops i_ceph_lock */
1e4ef0c6
JL
1949 delayed += __send_cap(mdsc, cap, CEPH_CAP_OP_UPDATE, false,
1950 cap_used, want, retain, flushing,
1951 flush_tid, oldest_flush_tid);
be655596 1952 goto retry; /* retake i_ceph_lock and restart our cap scan. */
a8599bd8
SW
1953 }
1954
1955 /*
1956 * Reschedule delayed caps release if we delayed anything,
1957 * otherwise cancel.
1958 */
1959 if (delayed && is_delayed)
3609404f 1960 force_requeue = true; /* __send_cap delayed release; requeue */
a8599bd8
SW
1961 if (!delayed && !is_delayed)
1962 __cap_delay_cancel(mdsc, ci);
1963 else if (!is_delayed || force_requeue)
1964 __cap_delay_requeue(mdsc, ci);
1965
be655596 1966 spin_unlock(&ci->i_ceph_lock);
a8599bd8 1967
cbd03635 1968 if (queue_invalidate)
3c6f6b79 1969 ceph_queue_invalidate(inode);
cbd03635 1970
cdc2ce05 1971 if (session)
a8599bd8
SW
1972 mutex_unlock(&session->s_mutex);
1973 if (took_snap_rwsem)
1974 up_read(&mdsc->snap_rwsem);
1975}
1976
a8599bd8
SW
1977/*
1978 * Try to flush dirty caps back to the auth mds.
1979 */
553adfd9 1980static int try_flush_caps(struct inode *inode, u64 *ptid)
a8599bd8 1981{
3d14c5d2 1982 struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
a8599bd8 1983 struct ceph_inode_info *ci = ceph_inode(inode);
4fe59789 1984 struct ceph_mds_session *session = NULL;
89b52fe1 1985 int flushing = 0;
a2971c8c 1986 u64 flush_tid = 0, oldest_flush_tid = 0;
a8599bd8
SW
1987
1988retry:
be655596 1989 spin_lock(&ci->i_ceph_lock);
e9964c10
SW
1990 if (ci->i_ceph_flags & CEPH_I_NOFLUSH) {
1991 dout("try_flush_caps skipping %p I_NOFLUSH set\n", inode);
1992 goto out;
1993 }
a8599bd8
SW
1994 if (ci->i_dirty_caps && ci->i_auth_cap) {
1995 struct ceph_cap *cap = ci->i_auth_cap;
1996 int used = __ceph_caps_used(ci);
1997 int want = __ceph_caps_wanted(ci);
1998 int delayed;
1999
4fe59789 2000 if (!session || session != cap->session) {
be655596 2001 spin_unlock(&ci->i_ceph_lock);
4fe59789
YZ
2002 if (session)
2003 mutex_unlock(&session->s_mutex);
a8599bd8
SW
2004 session = cap->session;
2005 mutex_lock(&session->s_mutex);
2006 goto retry;
2007 }
a8599bd8
SW
2008 if (cap->session->s_state < CEPH_MDS_SESSION_OPEN)
2009 goto out;
2010
c8799fc4
YZ
2011 flushing = __mark_caps_flushing(inode, session, true,
2012 &flush_tid, &oldest_flush_tid);
a8599bd8 2013
be655596 2014 /* __send_cap drops i_ceph_lock */
1e4ef0c6
JL
2015 delayed = __send_cap(mdsc, cap, CEPH_CAP_OP_FLUSH, true,
2016 used, want, (cap->issued | cap->implemented),
2017 flushing, flush_tid, oldest_flush_tid);
a8599bd8 2018
553adfd9
YZ
2019 if (delayed) {
2020 spin_lock(&ci->i_ceph_lock);
89b52fe1 2021 __cap_delay_requeue(mdsc, ci);
553adfd9
YZ
2022 spin_unlock(&ci->i_ceph_lock);
2023 }
2024 } else {
e4500b5e 2025 if (!list_empty(&ci->i_cap_flush_list)) {
553adfd9 2026 struct ceph_cap_flush *cf =
e4500b5e 2027 list_last_entry(&ci->i_cap_flush_list,
c8799fc4
YZ
2028 struct ceph_cap_flush, i_list);
2029 cf->wake = true;
553adfd9
YZ
2030 flush_tid = cf->tid;
2031 }
2032 flushing = ci->i_flushing_caps;
2033 spin_unlock(&ci->i_ceph_lock);
a8599bd8
SW
2034 }
2035out:
4fe59789 2036 if (session)
a8599bd8 2037 mutex_unlock(&session->s_mutex);
553adfd9
YZ
2038
2039 *ptid = flush_tid;
a8599bd8
SW
2040 return flushing;
2041}
2042
2043/*
2044 * Return true if we've flushed caps through the given flush_tid.
2045 */
553adfd9 2046static int caps_are_flushed(struct inode *inode, u64 flush_tid)
a8599bd8
SW
2047{
2048 struct ceph_inode_info *ci = ceph_inode(inode);
553adfd9 2049 int ret = 1;
a8599bd8 2050
be655596 2051 spin_lock(&ci->i_ceph_lock);
e4500b5e
YZ
2052 if (!list_empty(&ci->i_cap_flush_list)) {
2053 struct ceph_cap_flush * cf =
2054 list_first_entry(&ci->i_cap_flush_list,
2055 struct ceph_cap_flush, i_list);
553adfd9 2056 if (cf->tid <= flush_tid)
a8599bd8 2057 ret = 0;
89b52fe1 2058 }
be655596 2059 spin_unlock(&ci->i_ceph_lock);
a8599bd8
SW
2060 return ret;
2061}
2062
da819c81 2063/*
68cd5b4b 2064 * wait for any unsafe requests to complete.
da819c81 2065 */
68cd5b4b 2066static int unsafe_request_wait(struct inode *inode)
da819c81
YZ
2067{
2068 struct ceph_inode_info *ci = ceph_inode(inode);
68cd5b4b
YZ
2069 struct ceph_mds_request *req1 = NULL, *req2 = NULL;
2070 int ret, err = 0;
da819c81
YZ
2071
2072 spin_lock(&ci->i_unsafe_lock);
68cd5b4b
YZ
2073 if (S_ISDIR(inode->i_mode) && !list_empty(&ci->i_unsafe_dirops)) {
2074 req1 = list_last_entry(&ci->i_unsafe_dirops,
2075 struct ceph_mds_request,
2076 r_unsafe_dir_item);
2077 ceph_mdsc_get_request(req1);
2078 }
2079 if (!list_empty(&ci->i_unsafe_iops)) {
2080 req2 = list_last_entry(&ci->i_unsafe_iops,
2081 struct ceph_mds_request,
2082 r_unsafe_target_item);
2083 ceph_mdsc_get_request(req2);
2084 }
2085 spin_unlock(&ci->i_unsafe_lock);
da819c81 2086
4945a084 2087 dout("unsafe_request_wait %p wait on tid %llu %llu\n",
68cd5b4b
YZ
2088 inode, req1 ? req1->r_tid : 0ULL, req2 ? req2->r_tid : 0ULL);
2089 if (req1) {
2090 ret = !wait_for_completion_timeout(&req1->r_safe_completion,
2091 ceph_timeout_jiffies(req1->r_timeout));
da819c81 2092 if (ret)
68cd5b4b
YZ
2093 err = -EIO;
2094 ceph_mdsc_put_request(req1);
2095 }
2096 if (req2) {
2097 ret = !wait_for_completion_timeout(&req2->r_safe_completion,
2098 ceph_timeout_jiffies(req2->r_timeout));
2099 if (ret)
2100 err = -EIO;
2101 ceph_mdsc_put_request(req2);
2102 }
2103 return err;
da819c81
YZ
2104}
2105
02c24a82 2106int ceph_fsync(struct file *file, loff_t start, loff_t end, int datasync)
a8599bd8 2107{
7ea80859 2108 struct inode *inode = file->f_mapping->host;
a8599bd8 2109 struct ceph_inode_info *ci = ceph_inode(inode);
553adfd9 2110 u64 flush_tid;
a8599bd8
SW
2111 int ret;
2112 int dirty;
2113
2114 dout("fsync %p%s\n", inode, datasync ? " datasync" : "");
9a5530c6 2115
02c24a82 2116 ret = filemap_write_and_wait_range(inode->i_mapping, start, end);
a8599bd8 2117 if (ret < 0)
da819c81
YZ
2118 goto out;
2119
2120 if (datasync)
2121 goto out;
2122
5955102c 2123 inode_lock(inode);
a8599bd8 2124
553adfd9 2125 dirty = try_flush_caps(inode, &flush_tid);
a8599bd8
SW
2126 dout("fsync dirty caps are %s\n", ceph_cap_string(dirty));
2127
68cd5b4b 2128 ret = unsafe_request_wait(inode);
da819c81 2129
a8599bd8
SW
2130 /*
2131 * only wait on non-file metadata writeback (the mds
2132 * can recover size and mtime, so we don't need to
2133 * wait for that)
2134 */
da819c81 2135 if (!ret && (dirty & ~CEPH_CAP_ANY_FILE_WR)) {
a8599bd8 2136 ret = wait_event_interruptible(ci->i_cap_wq,
da819c81 2137 caps_are_flushed(inode, flush_tid));
a8599bd8 2138 }
5955102c 2139 inode_unlock(inode);
da819c81
YZ
2140out:
2141 dout("fsync %p%s result=%d\n", inode, datasync ? " datasync" : "", ret);
a8599bd8
SW
2142 return ret;
2143}
2144
2145/*
2146 * Flush any dirty caps back to the mds. If we aren't asked to wait,
2147 * queue inode for flush but don't do so immediately, because we can
2148 * get by with fewer MDS messages if we wait for data writeback to
2149 * complete first.
2150 */
f1a3d572 2151int ceph_write_inode(struct inode *inode, struct writeback_control *wbc)
a8599bd8
SW
2152{
2153 struct ceph_inode_info *ci = ceph_inode(inode);
553adfd9 2154 u64 flush_tid;
a8599bd8
SW
2155 int err = 0;
2156 int dirty;
f1a3d572 2157 int wait = wbc->sync_mode == WB_SYNC_ALL;
a8599bd8
SW
2158
2159 dout("write_inode %p wait=%d\n", inode, wait);
2160 if (wait) {
553adfd9 2161 dirty = try_flush_caps(inode, &flush_tid);
a8599bd8
SW
2162 if (dirty)
2163 err = wait_event_interruptible(ci->i_cap_wq,
2164 caps_are_flushed(inode, flush_tid));
2165 } else {
640ef79d 2166 struct ceph_mds_client *mdsc =
3d14c5d2 2167 ceph_sb_to_client(inode->i_sb)->mdsc;
a8599bd8 2168
be655596 2169 spin_lock(&ci->i_ceph_lock);
a8599bd8
SW
2170 if (__ceph_caps_dirty(ci))
2171 __cap_delay_requeue_front(mdsc, ci);
be655596 2172 spin_unlock(&ci->i_ceph_lock);
a8599bd8
SW
2173 }
2174 return err;
2175}
2176
0e294387
YZ
2177static void __kick_flushing_caps(struct ceph_mds_client *mdsc,
2178 struct ceph_mds_session *session,
2179 struct ceph_inode_info *ci,
2180 u64 oldest_flush_tid)
2181 __releases(ci->i_ceph_lock)
2182 __acquires(ci->i_ceph_lock)
553adfd9
YZ
2183{
2184 struct inode *inode = &ci->vfs_inode;
2185 struct ceph_cap *cap;
2186 struct ceph_cap_flush *cf;
0e294387 2187 int ret;
553adfd9
YZ
2188 u64 first_tid = 0;
2189
e4500b5e
YZ
2190 list_for_each_entry(cf, &ci->i_cap_flush_list, i_list) {
2191 if (cf->tid < first_tid)
2192 continue;
2193
553adfd9
YZ
2194 cap = ci->i_auth_cap;
2195 if (!(cap && cap->session == session)) {
0e294387
YZ
2196 pr_err("%p auth cap %p not mds%d ???\n",
2197 inode, cap, session->s_mds);
553adfd9
YZ
2198 break;
2199 }
2200
553adfd9
YZ
2201 first_tid = cf->tid + 1;
2202
0e294387
YZ
2203 if (cf->caps) {
2204 dout("kick_flushing_caps %p cap %p tid %llu %s\n",
2205 inode, cap, cf->tid, ceph_cap_string(cf->caps));
2206 ci->i_ceph_flags |= CEPH_I_NODELAY;
2207 ret = __send_cap(mdsc, cap, CEPH_CAP_OP_FLUSH,
1e4ef0c6 2208 false, __ceph_caps_used(ci),
0e294387
YZ
2209 __ceph_caps_wanted(ci),
2210 cap->issued | cap->implemented,
2211 cf->caps, cf->tid, oldest_flush_tid);
2212 if (ret) {
2213 pr_err("kick_flushing_caps: error sending "
2214 "cap flush, ino (%llx.%llx) "
2215 "tid %llu flushing %s\n",
2216 ceph_vinop(inode), cf->tid,
2217 ceph_cap_string(cf->caps));
2218 }
2219 } else {
2220 struct ceph_cap_snap *capsnap =
2221 container_of(cf, struct ceph_cap_snap,
2222 cap_flush);
2223 dout("kick_flushing_caps %p capsnap %p tid %llu %s\n",
2224 inode, capsnap, cf->tid,
2225 ceph_cap_string(capsnap->dirty));
2226
805692d0 2227 refcount_inc(&capsnap->nref);
0e294387
YZ
2228 spin_unlock(&ci->i_ceph_lock);
2229
2230 ret = __send_flush_snap(inode, session, capsnap, cap->mseq,
2231 oldest_flush_tid);
2232 if (ret < 0) {
2233 pr_err("kick_flushing_caps: error sending "
2234 "cap flushsnap, ino (%llx.%llx) "
2235 "tid %llu follows %llu\n",
2236 ceph_vinop(inode), cf->tid,
2237 capsnap->follows);
2238 }
2239
2240 ceph_put_cap_snap(capsnap);
2241 }
e4500b5e
YZ
2242
2243 spin_lock(&ci->i_ceph_lock);
553adfd9 2244 }
553adfd9
YZ
2245}
2246
e548e9b9
YZ
2247void ceph_early_kick_flushing_caps(struct ceph_mds_client *mdsc,
2248 struct ceph_mds_session *session)
2249{
2250 struct ceph_inode_info *ci;
2251 struct ceph_cap *cap;
0e294387 2252 u64 oldest_flush_tid;
e548e9b9
YZ
2253
2254 dout("early_kick_flushing_caps mds%d\n", session->s_mds);
0e294387
YZ
2255
2256 spin_lock(&mdsc->cap_dirty_lock);
2257 oldest_flush_tid = __get_oldest_flush_tid(mdsc);
2258 spin_unlock(&mdsc->cap_dirty_lock);
2259
e548e9b9
YZ
2260 list_for_each_entry(ci, &session->s_cap_flushing, i_flushing_item) {
2261 spin_lock(&ci->i_ceph_lock);
2262 cap = ci->i_auth_cap;
2263 if (!(cap && cap->session == session)) {
2264 pr_err("%p auth cap %p not mds%d ???\n",
2265 &ci->vfs_inode, cap, session->s_mds);
2266 spin_unlock(&ci->i_ceph_lock);
2267 continue;
2268 }
2269
2270
2271 /*
2272 * if flushing caps were revoked, we re-send the cap flush
2273 * in client reconnect stage. This guarantees MDS * processes
2274 * the cap flush message before issuing the flushing caps to
2275 * other client.
2276 */
2277 if ((cap->issued & ci->i_flushing_caps) !=
2278 ci->i_flushing_caps) {
13c2b57d 2279 ci->i_ceph_flags &= ~CEPH_I_KICK_FLUSH;
0e294387
YZ
2280 __kick_flushing_caps(mdsc, session, ci,
2281 oldest_flush_tid);
13c2b57d
YZ
2282 } else {
2283 ci->i_ceph_flags |= CEPH_I_KICK_FLUSH;
e548e9b9
YZ
2284 }
2285
e548e9b9
YZ
2286 spin_unlock(&ci->i_ceph_lock);
2287 }
2288}
2289
a8599bd8
SW
2290void ceph_kick_flushing_caps(struct ceph_mds_client *mdsc,
2291 struct ceph_mds_session *session)
2292{
2293 struct ceph_inode_info *ci;
13c2b57d 2294 struct ceph_cap *cap;
0e294387 2295 u64 oldest_flush_tid;
a8599bd8
SW
2296
2297 dout("kick_flushing_caps mds%d\n", session->s_mds);
0e294387
YZ
2298
2299 spin_lock(&mdsc->cap_dirty_lock);
2300 oldest_flush_tid = __get_oldest_flush_tid(mdsc);
2301 spin_unlock(&mdsc->cap_dirty_lock);
2302
a8599bd8 2303 list_for_each_entry(ci, &session->s_cap_flushing, i_flushing_item) {
0e294387 2304 spin_lock(&ci->i_ceph_lock);
13c2b57d
YZ
2305 cap = ci->i_auth_cap;
2306 if (!(cap && cap->session == session)) {
2307 pr_err("%p auth cap %p not mds%d ???\n",
2308 &ci->vfs_inode, cap, session->s_mds);
2309 spin_unlock(&ci->i_ceph_lock);
2310 continue;
2311 }
2312 if (ci->i_ceph_flags & CEPH_I_KICK_FLUSH) {
2313 ci->i_ceph_flags &= ~CEPH_I_KICK_FLUSH;
2314 __kick_flushing_caps(mdsc, session, ci,
2315 oldest_flush_tid);
2316 }
0e294387 2317 spin_unlock(&ci->i_ceph_lock);
a8599bd8
SW
2318 }
2319}
2320
088b3f5e
SW
2321static void kick_flushing_inode_caps(struct ceph_mds_client *mdsc,
2322 struct ceph_mds_session *session,
2323 struct inode *inode)
0e294387 2324 __releases(ci->i_ceph_lock)
088b3f5e
SW
2325{
2326 struct ceph_inode_info *ci = ceph_inode(inode);
2327 struct ceph_cap *cap;
088b3f5e 2328
088b3f5e 2329 cap = ci->i_auth_cap;
8310b089
YZ
2330 dout("kick_flushing_inode_caps %p flushing %s\n", inode,
2331 ceph_cap_string(ci->i_flushing_caps));
005c4697 2332
0e294387
YZ
2333 if (!list_empty(&ci->i_cap_flush_list)) {
2334 u64 oldest_flush_tid;
005c4697
YZ
2335 spin_lock(&mdsc->cap_dirty_lock);
2336 list_move_tail(&ci->i_flushing_item,
2337 &cap->session->s_cap_flushing);
0e294387 2338 oldest_flush_tid = __get_oldest_flush_tid(mdsc);
005c4697
YZ
2339 spin_unlock(&mdsc->cap_dirty_lock);
2340
13c2b57d 2341 ci->i_ceph_flags &= ~CEPH_I_KICK_FLUSH;
0e294387 2342 __kick_flushing_caps(mdsc, session, ci, oldest_flush_tid);
553adfd9 2343 spin_unlock(&ci->i_ceph_lock);
088b3f5e 2344 } else {
be655596 2345 spin_unlock(&ci->i_ceph_lock);
088b3f5e
SW
2346 }
2347}
2348
a8599bd8
SW
2349
2350/*
2351 * Take references to capabilities we hold, so that we don't release
2352 * them to the MDS prematurely.
2353 *
be655596 2354 * Protected by i_ceph_lock.
a8599bd8 2355 */
5dda377c
YZ
2356static void __take_cap_refs(struct ceph_inode_info *ci, int got,
2357 bool snap_rwsem_locked)
a8599bd8
SW
2358{
2359 if (got & CEPH_CAP_PIN)
2360 ci->i_pin_ref++;
2361 if (got & CEPH_CAP_FILE_RD)
2362 ci->i_rd_ref++;
2363 if (got & CEPH_CAP_FILE_CACHE)
2364 ci->i_rdcache_ref++;
5dda377c
YZ
2365 if (got & CEPH_CAP_FILE_WR) {
2366 if (ci->i_wr_ref == 0 && !ci->i_head_snapc) {
2367 BUG_ON(!snap_rwsem_locked);
2368 ci->i_head_snapc = ceph_get_snap_context(
2369 ci->i_snap_realm->cached_context);
2370 }
a8599bd8 2371 ci->i_wr_ref++;
5dda377c 2372 }
a8599bd8 2373 if (got & CEPH_CAP_FILE_BUFFER) {
d3d0720d 2374 if (ci->i_wb_ref == 0)
3772d26d 2375 ihold(&ci->vfs_inode);
d3d0720d
HC
2376 ci->i_wb_ref++;
2377 dout("__take_cap_refs %p wb %d -> %d (?)\n",
2378 &ci->vfs_inode, ci->i_wb_ref-1, ci->i_wb_ref);
a8599bd8
SW
2379 }
2380}
2381
2382/*
2383 * Try to grab cap references. Specify those refs we @want, and the
2384 * minimal set we @need. Also include the larger offset we are writing
2385 * to (when applicable), and check against max_size here as well.
2386 * Note that caller is responsible for ensuring max_size increases are
2387 * requested from the MDS.
2388 */
2389static int try_get_cap_refs(struct ceph_inode_info *ci, int need, int want,
5dda377c 2390 loff_t endoff, bool nonblock, int *got, int *err)
a8599bd8
SW
2391{
2392 struct inode *inode = &ci->vfs_inode;
5dda377c 2393 struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc;
a8599bd8 2394 int ret = 0;
c4d4a582 2395 int have, implemented;
195d3ce2 2396 int file_wanted;
5dda377c 2397 bool snap_rwsem_locked = false;
a8599bd8
SW
2398
2399 dout("get_cap_refs %p need %s want %s\n", inode,
2400 ceph_cap_string(need), ceph_cap_string(want));
c4d4a582 2401
5dda377c 2402again:
be655596 2403 spin_lock(&ci->i_ceph_lock);
a8599bd8 2404
195d3ce2
SW
2405 /* make sure file is actually open */
2406 file_wanted = __ceph_caps_file_wanted(ci);
77310320 2407 if ((file_wanted & need) != need) {
195d3ce2
SW
2408 dout("try_get_cap_refs need %s file_wanted %s, EBADF\n",
2409 ceph_cap_string(need), ceph_cap_string(file_wanted));
a8599bd8
SW
2410 *err = -EBADF;
2411 ret = 1;
3738daa6 2412 goto out_unlock;
a8599bd8
SW
2413 }
2414
37505d57
YZ
2415 /* finish pending truncate */
2416 while (ci->i_truncate_pending) {
2417 spin_unlock(&ci->i_ceph_lock);
5dda377c
YZ
2418 if (snap_rwsem_locked) {
2419 up_read(&mdsc->snap_rwsem);
2420 snap_rwsem_locked = false;
2421 }
b415bf4f 2422 __ceph_do_pending_vmtruncate(inode);
37505d57
YZ
2423 spin_lock(&ci->i_ceph_lock);
2424 }
2425
3871cbb9
YZ
2426 have = __ceph_caps_issued(ci, &implemented);
2427
2428 if (have & need & CEPH_CAP_FILE_WR) {
a8599bd8
SW
2429 if (endoff >= 0 && endoff > (loff_t)ci->i_max_size) {
2430 dout("get_cap_refs %p endoff %llu > maxsize %llu\n",
2431 inode, endoff, ci->i_max_size);
3871cbb9 2432 if (endoff > ci->i_requested_max_size) {
5dda377c 2433 *err = -EAGAIN;
a8599bd8
SW
2434 ret = 1;
2435 }
3738daa6 2436 goto out_unlock;
a8599bd8
SW
2437 }
2438 /*
2439 * If a sync write is in progress, we must wait, so that we
2440 * can get a final snapshot value for size+mtime.
2441 */
2442 if (__ceph_have_pending_cap_snap(ci)) {
2443 dout("get_cap_refs %p cap_snap_pending\n", inode);
3738daa6 2444 goto out_unlock;
a8599bd8
SW
2445 }
2446 }
a8599bd8 2447
a8599bd8
SW
2448 if ((have & need) == need) {
2449 /*
2450 * Look at (implemented & ~have & not) so that we keep waiting
2451 * on transition from wanted -> needed caps. This is needed
2452 * for WRBUFFER|WR -> WR to avoid a new WR sync write from
2453 * going before a prior buffered writeback happens.
2454 */
2455 int not = want & ~(have & need);
2456 int revoking = implemented & ~have;
2457 dout("get_cap_refs %p have %s but not %s (revoking %s)\n",
2458 inode, ceph_cap_string(have), ceph_cap_string(not),
2459 ceph_cap_string(revoking));
2460 if ((revoking & not) == 0) {
5dda377c
YZ
2461 if (!snap_rwsem_locked &&
2462 !ci->i_head_snapc &&
2463 (need & CEPH_CAP_FILE_WR)) {
2464 if (!down_read_trylock(&mdsc->snap_rwsem)) {
2465 /*
2466 * we can not call down_read() when
2467 * task isn't in TASK_RUNNING state
2468 */
2469 if (nonblock) {
2470 *err = -EAGAIN;
2471 ret = 1;
2472 goto out_unlock;
2473 }
2474
2475 spin_unlock(&ci->i_ceph_lock);
2476 down_read(&mdsc->snap_rwsem);
2477 snap_rwsem_locked = true;
2478 goto again;
2479 }
2480 snap_rwsem_locked = true;
2481 }
c4d4a582 2482 *got = need | (have & want);
f7f7e7a0
YZ
2483 if ((need & CEPH_CAP_FILE_RD) &&
2484 !(*got & CEPH_CAP_FILE_CACHE))
2485 ceph_disable_fscache_readpage(ci);
5dda377c 2486 __take_cap_refs(ci, *got, true);
a8599bd8
SW
2487 ret = 1;
2488 }
2489 } else {
03f4fcb0
YZ
2490 int session_readonly = false;
2491 if ((need & CEPH_CAP_FILE_WR) && ci->i_auth_cap) {
2492 struct ceph_mds_session *s = ci->i_auth_cap->session;
2493 spin_lock(&s->s_cap_lock);
2494 session_readonly = s->s_readonly;
2495 spin_unlock(&s->s_cap_lock);
2496 }
2497 if (session_readonly) {
2498 dout("get_cap_refs %p needed %s but mds%d readonly\n",
2499 inode, ceph_cap_string(need), ci->i_auth_cap->mds);
2500 *err = -EROFS;
2501 ret = 1;
2502 goto out_unlock;
2503 }
2504
77310320
YZ
2505 if (ci->i_ceph_flags & CEPH_I_CAP_DROPPED) {
2506 int mds_wanted;
52953d55 2507 if (READ_ONCE(mdsc->fsc->mount_state) ==
77310320
YZ
2508 CEPH_MOUNT_SHUTDOWN) {
2509 dout("get_cap_refs %p forced umount\n", inode);
2510 *err = -EIO;
2511 ret = 1;
2512 goto out_unlock;
2513 }
c1944fed 2514 mds_wanted = __ceph_caps_mds_wanted(ci, false);
eb65b919 2515 if (need & ~(mds_wanted & need)) {
77310320
YZ
2516 dout("get_cap_refs %p caps were dropped"
2517 " (session killed?)\n", inode);
2518 *err = -ESTALE;
2519 ret = 1;
2520 goto out_unlock;
2521 }
eb65b919 2522 if (!(file_wanted & ~mds_wanted))
77310320 2523 ci->i_ceph_flags &= ~CEPH_I_CAP_DROPPED;
48fec5d0
YZ
2524 }
2525
a8599bd8
SW
2526 dout("get_cap_refs %p have %s needed %s\n", inode,
2527 ceph_cap_string(have), ceph_cap_string(need));
2528 }
3738daa6 2529out_unlock:
be655596 2530 spin_unlock(&ci->i_ceph_lock);
5dda377c
YZ
2531 if (snap_rwsem_locked)
2532 up_read(&mdsc->snap_rwsem);
3738daa6 2533
a8599bd8 2534 dout("get_cap_refs %p ret %d got %s\n", inode,
c4d4a582 2535 ret, ceph_cap_string(*got));
a8599bd8
SW
2536 return ret;
2537}
2538
2539/*
2540 * Check the offset we are writing up to against our current
2541 * max_size. If necessary, tell the MDS we want to write to
2542 * a larger offset.
2543 */
2544static void check_max_size(struct inode *inode, loff_t endoff)
2545{
2546 struct ceph_inode_info *ci = ceph_inode(inode);
2547 int check = 0;
2548
2549 /* do we need to explicitly request a larger max_size? */
be655596 2550 spin_lock(&ci->i_ceph_lock);
3871cbb9 2551 if (endoff >= ci->i_max_size && endoff > ci->i_wanted_max_size) {
a8599bd8
SW
2552 dout("write %p at large endoff %llu, req max_size\n",
2553 inode, endoff);
2554 ci->i_wanted_max_size = endoff;
a8599bd8 2555 }
3871cbb9
YZ
2556 /* duplicate ceph_check_caps()'s logic */
2557 if (ci->i_auth_cap &&
2558 (ci->i_auth_cap->issued & CEPH_CAP_FILE_WR) &&
2559 ci->i_wanted_max_size > ci->i_max_size &&
2560 ci->i_wanted_max_size > ci->i_requested_max_size)
2561 check = 1;
be655596 2562 spin_unlock(&ci->i_ceph_lock);
a8599bd8
SW
2563 if (check)
2564 ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL);
2565}
2566
2b1ac852
YZ
2567int ceph_try_get_caps(struct ceph_inode_info *ci, int need, int want, int *got)
2568{
2569 int ret, err = 0;
2570
2571 BUG_ON(need & ~CEPH_CAP_FILE_RD);
2572 BUG_ON(want & ~(CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO));
2573 ret = ceph_pool_perm_check(ci, need);
2574 if (ret < 0)
2575 return ret;
2576
2577 ret = try_get_cap_refs(ci, need, want, 0, true, got, &err);
2578 if (ret) {
2579 if (err == -EAGAIN) {
2580 ret = 0;
2581 } else if (err < 0) {
2582 ret = err;
2583 }
2584 }
2585 return ret;
2586}
2587
a8599bd8
SW
2588/*
2589 * Wait for caps, and take cap references. If we can't get a WR cap
2590 * due to a small max_size, make sure we check_max_size (and possibly
2591 * ask the mds) so we don't get hung up indefinitely.
2592 */
3738daa6
YZ
2593int ceph_get_caps(struct ceph_inode_info *ci, int need, int want,
2594 loff_t endoff, int *got, struct page **pinned_page)
a8599bd8 2595{
5dda377c 2596 int _got, ret, err = 0;
a8599bd8 2597
10183a69
YZ
2598 ret = ceph_pool_perm_check(ci, need);
2599 if (ret < 0)
2600 return ret;
2601
5dda377c
YZ
2602 while (true) {
2603 if (endoff > 0)
2604 check_max_size(&ci->vfs_inode, endoff);
c4d4a582 2605
5dda377c
YZ
2606 err = 0;
2607 _got = 0;
2608 ret = try_get_cap_refs(ci, need, want, endoff,
2609 false, &_got, &err);
2610 if (ret) {
2611 if (err == -EAGAIN)
2612 continue;
2613 if (err < 0)
77310320 2614 ret = err;
5dda377c 2615 } else {
5c341ee3
NB
2616 DEFINE_WAIT_FUNC(wait, woken_wake_function);
2617 add_wait_queue(&ci->i_cap_wq, &wait);
2618
2619 while (!try_get_cap_refs(ci, need, want, endoff,
6e09d0fb
YZ
2620 true, &_got, &err)) {
2621 if (signal_pending(current)) {
2622 ret = -ERESTARTSYS;
2623 break;
2624 }
5c341ee3 2625 wait_woken(&wait, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
6e09d0fb 2626 }
5c341ee3
NB
2627
2628 remove_wait_queue(&ci->i_cap_wq, &wait);
2629
5dda377c
YZ
2630 if (err == -EAGAIN)
2631 continue;
2632 if (err < 0)
2633 ret = err;
77310320
YZ
2634 }
2635 if (ret < 0) {
2636 if (err == -ESTALE) {
2637 /* session was killed, try renew caps */
2638 ret = ceph_renew_caps(&ci->vfs_inode);
2639 if (ret == 0)
2640 continue;
2641 }
2642 return ret;
5dda377c 2643 }
c4d4a582 2644
5dda377c
YZ
2645 if (ci->i_inline_version != CEPH_INLINE_NONE &&
2646 (_got & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)) &&
2647 i_size_read(&ci->vfs_inode) > 0) {
2648 struct page *page =
2649 find_get_page(ci->vfs_inode.i_mapping, 0);
2650 if (page) {
2651 if (PageUptodate(page)) {
2652 *pinned_page = page;
2653 break;
2654 }
09cbfeaf 2655 put_page(page);
c4d4a582 2656 }
5dda377c
YZ
2657 /*
2658 * drop cap refs first because getattr while
2659 * holding * caps refs can cause deadlock.
2660 */
2661 ceph_put_cap_refs(ci, _got);
2662 _got = 0;
c4d4a582 2663
5dda377c
YZ
2664 /*
2665 * getattr request will bring inline data into
2666 * page cache
2667 */
2668 ret = __ceph_do_getattr(&ci->vfs_inode, NULL,
2669 CEPH_STAT_CAP_INLINE_DATA,
2670 true);
2671 if (ret < 0)
2672 return ret;
2673 continue;
2674 }
2675 break;
c4d4a582 2676 }
5dda377c 2677
f7f7e7a0
YZ
2678 if ((_got & CEPH_CAP_FILE_RD) && (_got & CEPH_CAP_FILE_CACHE))
2679 ceph_fscache_revalidate_cookie(ci);
2680
c4d4a582
YZ
2681 *got = _got;
2682 return 0;
a8599bd8
SW
2683}
2684
2685/*
2686 * Take cap refs. Caller must already know we hold at least one ref
2687 * on the caps in question or we don't know this is safe.
2688 */
2689void ceph_get_cap_refs(struct ceph_inode_info *ci, int caps)
2690{
be655596 2691 spin_lock(&ci->i_ceph_lock);
5dda377c 2692 __take_cap_refs(ci, caps, false);
be655596 2693 spin_unlock(&ci->i_ceph_lock);
a8599bd8
SW
2694}
2695
86056090
YZ
2696
2697/*
2698 * drop cap_snap that is not associated with any snapshot.
2699 * we don't need to send FLUSHSNAP message for it.
2700 */
70220ac8
YZ
2701static int ceph_try_drop_cap_snap(struct ceph_inode_info *ci,
2702 struct ceph_cap_snap *capsnap)
86056090
YZ
2703{
2704 if (!capsnap->need_flush &&
2705 !capsnap->writing && !capsnap->dirty_pages) {
86056090
YZ
2706 dout("dropping cap_snap %p follows %llu\n",
2707 capsnap, capsnap->follows);
0e294387 2708 BUG_ON(capsnap->cap_flush.tid > 0);
86056090 2709 ceph_put_snap_context(capsnap->context);
70220ac8
YZ
2710 if (!list_is_last(&capsnap->ci_item, &ci->i_cap_snaps))
2711 ci->i_ceph_flags |= CEPH_I_FLUSH_SNAPS;
2712
86056090 2713 list_del(&capsnap->ci_item);
86056090
YZ
2714 ceph_put_cap_snap(capsnap);
2715 return 1;
2716 }
2717 return 0;
2718}
2719
a8599bd8
SW
2720/*
2721 * Release cap refs.
2722 *
2723 * If we released the last ref on any given cap, call ceph_check_caps
2724 * to release (or schedule a release).
2725 *
2726 * If we are releasing a WR cap (from a sync write), finalize any affected
2727 * cap_snap, and wake up any waiters.
2728 */
2729void ceph_put_cap_refs(struct ceph_inode_info *ci, int had)
2730{
2731 struct inode *inode = &ci->vfs_inode;
2732 int last = 0, put = 0, flushsnaps = 0, wake = 0;
a8599bd8 2733
be655596 2734 spin_lock(&ci->i_ceph_lock);
a8599bd8
SW
2735 if (had & CEPH_CAP_PIN)
2736 --ci->i_pin_ref;
2737 if (had & CEPH_CAP_FILE_RD)
2738 if (--ci->i_rd_ref == 0)
2739 last++;
2740 if (had & CEPH_CAP_FILE_CACHE)
2741 if (--ci->i_rdcache_ref == 0)
2742 last++;
2743 if (had & CEPH_CAP_FILE_BUFFER) {
d3d0720d 2744 if (--ci->i_wb_ref == 0) {
a8599bd8
SW
2745 last++;
2746 put++;
2747 }
d3d0720d
HC
2748 dout("put_cap_refs %p wb %d -> %d (?)\n",
2749 inode, ci->i_wb_ref+1, ci->i_wb_ref);
a8599bd8
SW
2750 }
2751 if (had & CEPH_CAP_FILE_WR)
2752 if (--ci->i_wr_ref == 0) {
2753 last++;
86056090
YZ
2754 if (__ceph_have_pending_cap_snap(ci)) {
2755 struct ceph_cap_snap *capsnap =
2756 list_last_entry(&ci->i_cap_snaps,
2757 struct ceph_cap_snap,
2758 ci_item);
2759 capsnap->writing = 0;
70220ac8 2760 if (ceph_try_drop_cap_snap(ci, capsnap))
86056090
YZ
2761 put++;
2762 else if (__ceph_finish_cap_snap(ci, capsnap))
2763 flushsnaps = 1;
2764 wake = 1;
a8599bd8 2765 }
5dda377c
YZ
2766 if (ci->i_wrbuffer_ref_head == 0 &&
2767 ci->i_dirty_caps == 0 &&
2768 ci->i_flushing_caps == 0) {
2769 BUG_ON(!ci->i_head_snapc);
2770 ceph_put_snap_context(ci->i_head_snapc);
2771 ci->i_head_snapc = NULL;
2772 }
db40cc17
YZ
2773 /* see comment in __ceph_remove_cap() */
2774 if (!__ceph_is_any_caps(ci) && ci->i_snap_realm)
2775 drop_inode_snap_realm(ci);
a8599bd8 2776 }
be655596 2777 spin_unlock(&ci->i_ceph_lock);
a8599bd8 2778
819ccbfa
SW
2779 dout("put_cap_refs %p had %s%s%s\n", inode, ceph_cap_string(had),
2780 last ? " last" : "", put ? " put" : "");
a8599bd8
SW
2781
2782 if (last && !flushsnaps)
2783 ceph_check_caps(ci, 0, NULL);
2784 else if (flushsnaps)
ed9b430c 2785 ceph_flush_snaps(ci, NULL);
a8599bd8 2786 if (wake)
03066f23 2787 wake_up_all(&ci->i_cap_wq);
86056090 2788 while (put-- > 0)
a8599bd8
SW
2789 iput(inode);
2790}
2791
2792/*
2793 * Release @nr WRBUFFER refs on dirty pages for the given @snapc snap
2794 * context. Adjust per-snap dirty page accounting as appropriate.
2795 * Once all dirty data for a cap_snap is flushed, flush snapped file
2796 * metadata back to the MDS. If we dropped the last ref, call
2797 * ceph_check_caps.
2798 */
2799void ceph_put_wrbuffer_cap_refs(struct ceph_inode_info *ci, int nr,
2800 struct ceph_snap_context *snapc)
2801{
2802 struct inode *inode = &ci->vfs_inode;
a8599bd8 2803 struct ceph_cap_snap *capsnap = NULL;
70220ac8
YZ
2804 int put = 0;
2805 bool last = false;
2806 bool found = false;
2807 bool flush_snaps = false;
2808 bool complete_capsnap = false;
a8599bd8 2809
be655596 2810 spin_lock(&ci->i_ceph_lock);
a8599bd8 2811 ci->i_wrbuffer_ref -= nr;
70220ac8
YZ
2812 if (ci->i_wrbuffer_ref == 0) {
2813 last = true;
2814 put++;
2815 }
a8599bd8
SW
2816
2817 if (ci->i_head_snapc == snapc) {
2818 ci->i_wrbuffer_ref_head -= nr;
7d8cb26d 2819 if (ci->i_wrbuffer_ref_head == 0 &&
5dda377c
YZ
2820 ci->i_wr_ref == 0 &&
2821 ci->i_dirty_caps == 0 &&
2822 ci->i_flushing_caps == 0) {
7d8cb26d 2823 BUG_ON(!ci->i_head_snapc);
a8599bd8
SW
2824 ceph_put_snap_context(ci->i_head_snapc);
2825 ci->i_head_snapc = NULL;
2826 }
2827 dout("put_wrbuffer_cap_refs on %p head %d/%d -> %d/%d %s\n",
2828 inode,
2829 ci->i_wrbuffer_ref+nr, ci->i_wrbuffer_ref_head+nr,
2830 ci->i_wrbuffer_ref, ci->i_wrbuffer_ref_head,
2831 last ? " LAST" : "");
2832 } else {
2833 list_for_each_entry(capsnap, &ci->i_cap_snaps, ci_item) {
2834 if (capsnap->context == snapc) {
70220ac8 2835 found = true;
a8599bd8
SW
2836 break;
2837 }
2838 }
2839 BUG_ON(!found);
819ccbfa
SW
2840 capsnap->dirty_pages -= nr;
2841 if (capsnap->dirty_pages == 0) {
70220ac8
YZ
2842 complete_capsnap = true;
2843 if (!capsnap->writing) {
2844 if (ceph_try_drop_cap_snap(ci, capsnap)) {
2845 put++;
2846 } else {
2847 ci->i_ceph_flags |= CEPH_I_FLUSH_SNAPS;
2848 flush_snaps = true;
2849 }
2850 }
819ccbfa 2851 }
a8599bd8 2852 dout("put_wrbuffer_cap_refs on %p cap_snap %p "
86056090 2853 " snap %lld %d/%d -> %d/%d %s%s\n",
a8599bd8
SW
2854 inode, capsnap, capsnap->context->seq,
2855 ci->i_wrbuffer_ref+nr, capsnap->dirty_pages + nr,
2856 ci->i_wrbuffer_ref, capsnap->dirty_pages,
2857 last ? " (wrbuffer last)" : "",
86056090 2858 complete_capsnap ? " (complete capsnap)" : "");
a8599bd8
SW
2859 }
2860
be655596 2861 spin_unlock(&ci->i_ceph_lock);
a8599bd8
SW
2862
2863 if (last) {
2864 ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL);
70220ac8 2865 } else if (flush_snaps) {
ed9b430c 2866 ceph_flush_snaps(ci, NULL);
a8599bd8 2867 }
70220ac8
YZ
2868 if (complete_capsnap)
2869 wake_up_all(&ci->i_cap_wq);
2870 while (put-- > 0)
819ccbfa 2871 iput(inode);
a8599bd8
SW
2872}
2873
ca20c991
YZ
2874/*
2875 * Invalidate unlinked inode's aliases, so we can drop the inode ASAP.
2876 */
2877static void invalidate_aliases(struct inode *inode)
2878{
2879 struct dentry *dn, *prev = NULL;
2880
2881 dout("invalidate_aliases inode %p\n", inode);
2882 d_prune_aliases(inode);
2883 /*
2884 * For non-directory inode, d_find_alias() only returns
fc12c80a
BF
2885 * hashed dentry. After calling d_invalidate(), the
2886 * dentry becomes unhashed.
ca20c991 2887 *
a8d436f0 2888 * For directory inode, d_find_alias() can return
fc12c80a 2889 * unhashed dentry. But directory inode should have
ca20c991
YZ
2890 * one alias at most.
2891 */
2892 while ((dn = d_find_alias(inode))) {
2893 if (dn == prev) {
2894 dput(dn);
2895 break;
2896 }
a8d436f0 2897 d_invalidate(dn);
ca20c991
YZ
2898 if (prev)
2899 dput(prev);
2900 prev = dn;
2901 }
2902 if (prev)
2903 dput(prev);
2904}
2905
a8599bd8
SW
2906/*
2907 * Handle a cap GRANT message from the MDS. (Note that a GRANT may
2908 * actually be a revocation if it specifies a smaller cap set.)
2909 *
be655596 2910 * caller holds s_mutex and i_ceph_lock, we drop both.
a8599bd8 2911 */
2cd698be
YZ
2912static void handle_cap_grant(struct ceph_mds_client *mdsc,
2913 struct inode *inode, struct ceph_mds_caps *grant,
779fe0fb
YZ
2914 struct ceph_string **pns, u64 inline_version,
2915 void *inline_data, u32 inline_len,
2cd698be 2916 struct ceph_buffer *xattr_buf,
15637c8b 2917 struct ceph_mds_session *session,
779fe0fb 2918 struct ceph_cap *cap, int issued)
2cd698be 2919 __releases(ci->i_ceph_lock)
982d6011 2920 __releases(mdsc->snap_rwsem)
a8599bd8
SW
2921{
2922 struct ceph_inode_info *ci = ceph_inode(inode);
2923 int mds = session->s_mds;
2f56f56a 2924 int seq = le32_to_cpu(grant->seq);
a8599bd8 2925 int newcaps = le32_to_cpu(grant->caps);
2cd698be 2926 int used, wanted, dirty;
a8599bd8
SW
2927 u64 size = le64_to_cpu(grant->size);
2928 u64 max_size = le64_to_cpu(grant->max_size);
2929 struct timespec mtime, atime, ctime;
15637c8b 2930 int check_caps = 0;
ab6c2c3e
FF
2931 bool wake = false;
2932 bool writeback = false;
2933 bool queue_trunc = false;
2934 bool queue_invalidate = false;
ab6c2c3e 2935 bool deleted_inode = false;
31c542a1 2936 bool fill_inline = false;
a8599bd8 2937
2f56f56a
SW
2938 dout("handle_cap_grant inode %p cap %p mds%d seq %d %s\n",
2939 inode, cap, mds, seq, ceph_cap_string(newcaps));
a8599bd8
SW
2940 dout(" size %llu max_size %llu, i_size %llu\n", size, max_size,
2941 inode->i_size);
2942
11df2dfb
YZ
2943
2944 /*
2945 * auth mds of the inode changed. we received the cap export message,
2946 * but still haven't received the cap import message. handle_cap_export
2947 * updated the new auth MDS' cap.
2948 *
2949 * "ceph_seq_cmp(seq, cap->seq) <= 0" means we are processing a message
2950 * that was sent before the cap import message. So don't remove caps.
2951 */
2952 if (ceph_seq_cmp(seq, cap->seq) <= 0) {
2953 WARN_ON(cap != ci->i_auth_cap);
2954 WARN_ON(cap->cap_id != le64_to_cpu(grant->cap_id));
2955 seq = cap->seq;
2956 newcaps |= cap->issued;
2957 }
2958
a8599bd8
SW
2959 /*
2960 * If CACHE is being revoked, and we have no dirty buffers,
2961 * try to invalidate (once). (If there are dirty buffers, we
2962 * will invalidate _after_ writeback.)
2963 */
fdd4e158
YZ
2964 if (!S_ISDIR(inode->i_mode) && /* don't invalidate readdir cache */
2965 ((cap->issued & ~newcaps) & CEPH_CAP_FILE_CACHE) &&
3b454c49 2966 (newcaps & CEPH_CAP_FILE_LAZYIO) == 0 &&
9abd4db7 2967 !(ci->i_wrbuffer_ref || ci->i_wb_ref)) {
e9075743 2968 if (try_nonblocking_invalidate(inode)) {
a8599bd8
SW
2969 /* there were locked pages.. invalidate later
2970 in a separate thread. */
2971 if (ci->i_rdcache_revoking != ci->i_rdcache_gen) {
ab6c2c3e 2972 queue_invalidate = true;
a8599bd8
SW
2973 ci->i_rdcache_revoking = ci->i_rdcache_gen;
2974 }
a8599bd8 2975 }
a8599bd8
SW
2976 }
2977
2978 /* side effects now are allowed */
685f9a5d 2979 cap->cap_gen = session->s_cap_gen;
11df2dfb 2980 cap->seq = seq;
a8599bd8
SW
2981
2982 __check_cap_issue(ci, cap, newcaps);
2983
f98a128a
YZ
2984 if ((newcaps & CEPH_CAP_AUTH_SHARED) &&
2985 (issued & CEPH_CAP_AUTH_EXCL) == 0) {
a8599bd8 2986 inode->i_mode = le32_to_cpu(grant->mode);
05cb11c1
EB
2987 inode->i_uid = make_kuid(&init_user_ns, le32_to_cpu(grant->uid));
2988 inode->i_gid = make_kgid(&init_user_ns, le32_to_cpu(grant->gid));
a8599bd8 2989 dout("%p mode 0%o uid.gid %d.%d\n", inode, inode->i_mode,
bd2bae6a
EB
2990 from_kuid(&init_user_ns, inode->i_uid),
2991 from_kgid(&init_user_ns, inode->i_gid));
a8599bd8
SW
2992 }
2993
f98a128a
YZ
2994 if ((newcaps & CEPH_CAP_AUTH_SHARED) &&
2995 (issued & CEPH_CAP_LINK_EXCL) == 0) {
bfe86848 2996 set_nlink(inode, le32_to_cpu(grant->nlink));
ca20c991
YZ
2997 if (inode->i_nlink == 0 &&
2998 (newcaps & (CEPH_CAP_LINK_SHARED | CEPH_CAP_LINK_EXCL)))
ab6c2c3e 2999 deleted_inode = true;
ca20c991 3000 }
a8599bd8
SW
3001
3002 if ((issued & CEPH_CAP_XATTR_EXCL) == 0 && grant->xattr_len) {
3003 int len = le32_to_cpu(grant->xattr_len);
3004 u64 version = le64_to_cpu(grant->xattr_version);
3005
3006 if (version > ci->i_xattrs.version) {
3007 dout(" got new xattrs v%llu on %p len %d\n",
3008 version, inode, len);
3009 if (ci->i_xattrs.blob)
3010 ceph_buffer_put(ci->i_xattrs.blob);
3011 ci->i_xattrs.blob = ceph_buffer_get(xattr_buf);
3012 ci->i_xattrs.version = version;
7221fe4c 3013 ceph_forget_all_cached_acls(inode);
a8599bd8
SW
3014 }
3015 }
3016
f98a128a
YZ
3017 if (newcaps & CEPH_CAP_ANY_RD) {
3018 /* ctime/mtime/atime? */
3019 ceph_decode_timespec(&mtime, &grant->mtime);
3020 ceph_decode_timespec(&atime, &grant->atime);
3021 ceph_decode_timespec(&ctime, &grant->ctime);
3022 ceph_fill_file_time(inode, issued,
3023 le32_to_cpu(grant->time_warp_seq),
3024 &ctime, &mtime, &atime);
3025 }
3026
3027 if (newcaps & (CEPH_CAP_ANY_FILE_RD | CEPH_CAP_ANY_FILE_WR)) {
3028 /* file layout may have changed */
7627151e 3029 s64 old_pool = ci->i_layout.pool_id;
779fe0fb
YZ
3030 struct ceph_string *old_ns;
3031
7627151e 3032 ceph_file_layout_from_legacy(&ci->i_layout, &grant->layout);
779fe0fb
YZ
3033 old_ns = rcu_dereference_protected(ci->i_layout.pool_ns,
3034 lockdep_is_held(&ci->i_ceph_lock));
3035 rcu_assign_pointer(ci->i_layout.pool_ns, *pns);
3036
3037 if (ci->i_layout.pool_id != old_pool || *pns != old_ns)
7627151e 3038 ci->i_ceph_flags &= ~CEPH_I_POOL_PERM;
5ea5c5e0 3039
779fe0fb
YZ
3040 *pns = old_ns;
3041
f98a128a
YZ
3042 /* size/truncate_seq? */
3043 queue_trunc = ceph_fill_file_size(inode, issued,
3044 le32_to_cpu(grant->truncate_seq),
3045 le64_to_cpu(grant->truncate_size),
3046 size);
84eea8c7
YZ
3047 }
3048
3049 if (ci->i_auth_cap == cap && (newcaps & CEPH_CAP_ANY_FILE_WR)) {
3050 if (max_size != ci->i_max_size) {
f98a128a
YZ
3051 dout("max_size %lld -> %llu\n",
3052 ci->i_max_size, max_size);
3053 ci->i_max_size = max_size;
3054 if (max_size >= ci->i_wanted_max_size) {
3055 ci->i_wanted_max_size = 0; /* reset */
3056 ci->i_requested_max_size = 0;
3057 }
ab6c2c3e 3058 wake = true;
84eea8c7
YZ
3059 } else if (ci->i_wanted_max_size > ci->i_max_size &&
3060 ci->i_wanted_max_size > ci->i_requested_max_size) {
3061 /* CEPH_CAP_OP_IMPORT */
3062 wake = true;
a8599bd8 3063 }
a8599bd8
SW
3064 }
3065
3066 /* check cap bits */
3067 wanted = __ceph_caps_wanted(ci);
3068 used = __ceph_caps_used(ci);
3069 dirty = __ceph_caps_dirty(ci);
3070 dout(" my wanted = %s, used = %s, dirty %s\n",
3071 ceph_cap_string(wanted),
3072 ceph_cap_string(used),
3073 ceph_cap_string(dirty));
3074 if (wanted != le32_to_cpu(grant->wanted)) {
3075 dout("mds wanted %s -> %s\n",
3076 ceph_cap_string(le32_to_cpu(grant->wanted)),
3077 ceph_cap_string(wanted));
390306c3
YZ
3078 /* imported cap may not have correct mds_wanted */
3079 if (le32_to_cpu(grant->op) == CEPH_CAP_OP_IMPORT)
3080 check_caps = 1;
a8599bd8
SW
3081 }
3082
a8599bd8
SW
3083 /* revocation, grant, or no-op? */
3084 if (cap->issued & ~newcaps) {
3b454c49
SW
3085 int revoking = cap->issued & ~newcaps;
3086
3087 dout("revocation: %s -> %s (revoking %s)\n",
3088 ceph_cap_string(cap->issued),
3089 ceph_cap_string(newcaps),
3090 ceph_cap_string(revoking));
0eb6cd49 3091 if (revoking & used & CEPH_CAP_FILE_BUFFER)
ab6c2c3e 3092 writeback = true; /* initiate writeback; will delay ack */
3b454c49
SW
3093 else if (revoking == CEPH_CAP_FILE_CACHE &&
3094 (newcaps & CEPH_CAP_FILE_LAZYIO) == 0 &&
3095 queue_invalidate)
3096 ; /* do nothing yet, invalidation will be queued */
3097 else if (cap == ci->i_auth_cap)
3098 check_caps = 1; /* check auth cap only */
3099 else
3100 check_caps = 2; /* check all caps */
a8599bd8 3101 cap->issued = newcaps;
978097c9 3102 cap->implemented |= newcaps;
a8599bd8
SW
3103 } else if (cap->issued == newcaps) {
3104 dout("caps unchanged: %s -> %s\n",
3105 ceph_cap_string(cap->issued), ceph_cap_string(newcaps));
3106 } else {
3107 dout("grant: %s -> %s\n", ceph_cap_string(cap->issued),
3108 ceph_cap_string(newcaps));
6ee6b953
YZ
3109 /* non-auth MDS is revoking the newly grant caps ? */
3110 if (cap == ci->i_auth_cap &&
3111 __ceph_caps_revoking_other(ci, cap, newcaps))
3112 check_caps = 2;
3113
a8599bd8
SW
3114 cap->issued = newcaps;
3115 cap->implemented |= newcaps; /* add bits only, to
3116 * avoid stepping on a
3117 * pending revocation */
ab6c2c3e 3118 wake = true;
a8599bd8 3119 }
978097c9 3120 BUG_ON(cap->issued & ~cap->implemented);
a8599bd8 3121
31c542a1
YZ
3122 if (inline_version > 0 && inline_version >= ci->i_inline_version) {
3123 ci->i_inline_version = inline_version;
3124 if (ci->i_inline_version != CEPH_INLINE_NONE &&
3125 (newcaps & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)))
3126 fill_inline = true;
3127 }
3128
2cd698be 3129 if (le32_to_cpu(grant->op) == CEPH_CAP_OP_IMPORT) {
2cd698be 3130 if (newcaps & ~issued)
ab6c2c3e 3131 wake = true;
0e294387
YZ
3132 kick_flushing_inode_caps(mdsc, session, inode);
3133 up_read(&mdsc->snap_rwsem);
3134 } else {
3135 spin_unlock(&ci->i_ceph_lock);
2cd698be
YZ
3136 }
3137
31c542a1
YZ
3138 if (fill_inline)
3139 ceph_fill_inline_data(inode, NULL, inline_data, inline_len);
3140
14649758 3141 if (queue_trunc)
c6bcda6f 3142 ceph_queue_vmtruncate(inode);
c6bcda6f 3143
3c6f6b79 3144 if (writeback)
a8599bd8
SW
3145 /*
3146 * queue inode for writeback: we can't actually call
3147 * filemap_write_and_wait, etc. from message handler
3148 * context.
3149 */
3c6f6b79
SW
3150 ceph_queue_writeback(inode);
3151 if (queue_invalidate)
3152 ceph_queue_invalidate(inode);
ca20c991
YZ
3153 if (deleted_inode)
3154 invalidate_aliases(inode);
a8599bd8 3155 if (wake)
03066f23 3156 wake_up_all(&ci->i_cap_wq);
15637c8b
SW
3157
3158 if (check_caps == 1)
3159 ceph_check_caps(ci, CHECK_CAPS_NODELAY|CHECK_CAPS_AUTHONLY,
3160 session);
3161 else if (check_caps == 2)
3162 ceph_check_caps(ci, CHECK_CAPS_NODELAY, session);
3163 else
3164 mutex_unlock(&session->s_mutex);
a8599bd8
SW
3165}
3166
3167/*
3168 * Handle FLUSH_ACK from MDS, indicating that metadata we sent to the
3169 * MDS has been safely committed.
3170 */
6df058c0 3171static void handle_cap_flush_ack(struct inode *inode, u64 flush_tid,
a8599bd8
SW
3172 struct ceph_mds_caps *m,
3173 struct ceph_mds_session *session,
3174 struct ceph_cap *cap)
be655596 3175 __releases(ci->i_ceph_lock)
a8599bd8
SW
3176{
3177 struct ceph_inode_info *ci = ceph_inode(inode);
3d14c5d2 3178 struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
e4500b5e 3179 struct ceph_cap_flush *cf, *tmp_cf;
553adfd9 3180 LIST_HEAD(to_remove);
a8599bd8
SW
3181 unsigned seq = le32_to_cpu(m->seq);
3182 int dirty = le32_to_cpu(m->dirty);
3183 int cleaned = 0;
c8799fc4
YZ
3184 bool drop = false;
3185 bool wake_ci = 0;
3186 bool wake_mdsc = 0;
a8599bd8 3187
e4500b5e 3188 list_for_each_entry_safe(cf, tmp_cf, &ci->i_cap_flush_list, i_list) {
553adfd9
YZ
3189 if (cf->tid == flush_tid)
3190 cleaned = cf->caps;
0e294387
YZ
3191 if (cf->caps == 0) /* capsnap */
3192 continue;
553adfd9 3193 if (cf->tid <= flush_tid) {
c8799fc4
YZ
3194 if (__finish_cap_flush(NULL, ci, cf))
3195 wake_ci = true;
e4500b5e 3196 list_add_tail(&cf->i_list, &to_remove);
553adfd9
YZ
3197 } else {
3198 cleaned &= ~cf->caps;
3199 if (!cleaned)
3200 break;
3201 }
3202 }
a8599bd8
SW
3203
3204 dout("handle_cap_flush_ack inode %p mds%d seq %d on %s cleaned %s,"
3205 " flushing %s -> %s\n",
3206 inode, session->s_mds, seq, ceph_cap_string(dirty),
3207 ceph_cap_string(cleaned), ceph_cap_string(ci->i_flushing_caps),
3208 ceph_cap_string(ci->i_flushing_caps & ~cleaned));
3209
8310b089 3210 if (list_empty(&to_remove) && !cleaned)
a8599bd8
SW
3211 goto out;
3212
a8599bd8 3213 ci->i_flushing_caps &= ~cleaned;
a8599bd8
SW
3214
3215 spin_lock(&mdsc->cap_dirty_lock);
8310b089 3216
c8799fc4
YZ
3217 list_for_each_entry(cf, &to_remove, i_list) {
3218 if (__finish_cap_flush(mdsc, NULL, cf))
3219 wake_mdsc = true;
8310b089
YZ
3220 }
3221
a8599bd8 3222 if (ci->i_flushing_caps == 0) {
0e294387
YZ
3223 if (list_empty(&ci->i_cap_flush_list)) {
3224 list_del_init(&ci->i_flushing_item);
3225 if (!list_empty(&session->s_cap_flushing)) {
3226 dout(" mds%d still flushing cap on %p\n",
3227 session->s_mds,
3228 &list_first_entry(&session->s_cap_flushing,
3229 struct ceph_inode_info,
3230 i_flushing_item)->vfs_inode);
3231 }
3232 }
a8599bd8 3233 mdsc->num_cap_flushing--;
a8599bd8 3234 dout(" inode %p now !flushing\n", inode);
afcdaea3
SW
3235
3236 if (ci->i_dirty_caps == 0) {
3237 dout(" inode %p now clean\n", inode);
3238 BUG_ON(!list_empty(&ci->i_dirty_item));
c8799fc4 3239 drop = true;
5dda377c
YZ
3240 if (ci->i_wr_ref == 0 &&
3241 ci->i_wrbuffer_ref_head == 0) {
7d8cb26d
SW
3242 BUG_ON(!ci->i_head_snapc);
3243 ceph_put_snap_context(ci->i_head_snapc);
3244 ci->i_head_snapc = NULL;
3245 }
76e3b390
SW
3246 } else {
3247 BUG_ON(list_empty(&ci->i_dirty_item));
afcdaea3 3248 }
a8599bd8
SW
3249 }
3250 spin_unlock(&mdsc->cap_dirty_lock);
a8599bd8
SW
3251
3252out:
be655596 3253 spin_unlock(&ci->i_ceph_lock);
553adfd9
YZ
3254
3255 while (!list_empty(&to_remove)) {
3256 cf = list_first_entry(&to_remove,
e4500b5e
YZ
3257 struct ceph_cap_flush, i_list);
3258 list_del(&cf->i_list);
f66fd9f0 3259 ceph_free_cap_flush(cf);
553adfd9 3260 }
c8799fc4
YZ
3261
3262 if (wake_ci)
3263 wake_up_all(&ci->i_cap_wq);
3264 if (wake_mdsc)
3265 wake_up_all(&mdsc->cap_flushing_wq);
afcdaea3 3266 if (drop)
a8599bd8
SW
3267 iput(inode);
3268}
3269
3270/*
3271 * Handle FLUSHSNAP_ACK. MDS has flushed snap data to disk and we can
3272 * throw away our cap_snap.
3273 *
3274 * Caller hold s_mutex.
3275 */
6df058c0 3276static void handle_cap_flushsnap_ack(struct inode *inode, u64 flush_tid,
a8599bd8
SW
3277 struct ceph_mds_caps *m,
3278 struct ceph_mds_session *session)
3279{
3280 struct ceph_inode_info *ci = ceph_inode(inode);
affbc19a 3281 struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
a8599bd8 3282 u64 follows = le64_to_cpu(m->snap_follows);
a8599bd8 3283 struct ceph_cap_snap *capsnap;
c8799fc4
YZ
3284 bool flushed = false;
3285 bool wake_ci = false;
3286 bool wake_mdsc = false;
a8599bd8
SW
3287
3288 dout("handle_cap_flushsnap_ack inode %p ci %p mds%d follows %lld\n",
3289 inode, ci, session->s_mds, follows);
3290
be655596 3291 spin_lock(&ci->i_ceph_lock);
a8599bd8
SW
3292 list_for_each_entry(capsnap, &ci->i_cap_snaps, ci_item) {
3293 if (capsnap->follows == follows) {
0e294387 3294 if (capsnap->cap_flush.tid != flush_tid) {
a8599bd8
SW
3295 dout(" cap_snap %p follows %lld tid %lld !="
3296 " %lld\n", capsnap, follows,
0e294387 3297 flush_tid, capsnap->cap_flush.tid);
a8599bd8
SW
3298 break;
3299 }
c8799fc4 3300 flushed = true;
a8599bd8
SW
3301 break;
3302 } else {
3303 dout(" skipping cap_snap %p follows %lld\n",
3304 capsnap, capsnap->follows);
3305 }
3306 }
0e294387 3307 if (flushed) {
0e294387
YZ
3308 WARN_ON(capsnap->dirty_pages || capsnap->writing);
3309 dout(" removing %p cap_snap %p follows %lld\n",
3310 inode, capsnap, follows);
3311 list_del(&capsnap->ci_item);
c8799fc4
YZ
3312 if (__finish_cap_flush(NULL, ci, &capsnap->cap_flush))
3313 wake_ci = true;
0e294387
YZ
3314
3315 spin_lock(&mdsc->cap_dirty_lock);
3316
3317 if (list_empty(&ci->i_cap_flush_list))
3318 list_del_init(&ci->i_flushing_item);
3319
c8799fc4
YZ
3320 if (__finish_cap_flush(mdsc, NULL, &capsnap->cap_flush))
3321 wake_mdsc = true;
0e294387
YZ
3322
3323 spin_unlock(&mdsc->cap_dirty_lock);
0e294387 3324 }
be655596 3325 spin_unlock(&ci->i_ceph_lock);
0e294387
YZ
3326 if (flushed) {
3327 ceph_put_snap_context(capsnap->context);
3328 ceph_put_cap_snap(capsnap);
c8799fc4
YZ
3329 if (wake_ci)
3330 wake_up_all(&ci->i_cap_wq);
3331 if (wake_mdsc)
3332 wake_up_all(&mdsc->cap_flushing_wq);
a8599bd8 3333 iput(inode);
0e294387 3334 }
a8599bd8
SW
3335}
3336
3337/*
3338 * Handle TRUNC from MDS, indicating file truncation.
3339 *
3340 * caller hold s_mutex.
3341 */
3342static void handle_cap_trunc(struct inode *inode,
3343 struct ceph_mds_caps *trunc,
3344 struct ceph_mds_session *session)
be655596 3345 __releases(ci->i_ceph_lock)
a8599bd8
SW
3346{
3347 struct ceph_inode_info *ci = ceph_inode(inode);
3348 int mds = session->s_mds;
3349 int seq = le32_to_cpu(trunc->seq);
3350 u32 truncate_seq = le32_to_cpu(trunc->truncate_seq);
3351 u64 truncate_size = le64_to_cpu(trunc->truncate_size);
3352 u64 size = le64_to_cpu(trunc->size);
3353 int implemented = 0;
3354 int dirty = __ceph_caps_dirty(ci);
3355 int issued = __ceph_caps_issued(ceph_inode(inode), &implemented);
3356 int queue_trunc = 0;
3357
3358 issued |= implemented | dirty;
3359
3360 dout("handle_cap_trunc inode %p mds%d seq %d to %lld seq %d\n",
3361 inode, mds, seq, truncate_size, truncate_seq);
3362 queue_trunc = ceph_fill_file_size(inode, issued,
3363 truncate_seq, truncate_size, size);
be655596 3364 spin_unlock(&ci->i_ceph_lock);
a8599bd8 3365
14649758 3366 if (queue_trunc)
3c6f6b79 3367 ceph_queue_vmtruncate(inode);
a8599bd8
SW
3368}
3369
3370/*
3371 * Handle EXPORT from MDS. Cap is being migrated _from_ this mds to a
3372 * different one. If we are the most recent migration we've seen (as
3373 * indicated by mseq), make note of the migrating cap bits for the
3374 * duration (until we see the corresponding IMPORT).
3375 *
3376 * caller holds s_mutex
3377 */
3378static void handle_cap_export(struct inode *inode, struct ceph_mds_caps *ex,
11df2dfb
YZ
3379 struct ceph_mds_cap_peer *ph,
3380 struct ceph_mds_session *session)
a8599bd8 3381{
db354052 3382 struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc;
11df2dfb 3383 struct ceph_mds_session *tsession = NULL;
d9df2783 3384 struct ceph_cap *cap, *tcap, *new_cap = NULL;
a8599bd8 3385 struct ceph_inode_info *ci = ceph_inode(inode);
11df2dfb 3386 u64 t_cap_id;
a8599bd8 3387 unsigned mseq = le32_to_cpu(ex->migrate_seq);
11df2dfb
YZ
3388 unsigned t_seq, t_mseq;
3389 int target, issued;
3390 int mds = session->s_mds;
a8599bd8 3391
11df2dfb
YZ
3392 if (ph) {
3393 t_cap_id = le64_to_cpu(ph->cap_id);
3394 t_seq = le32_to_cpu(ph->seq);
3395 t_mseq = le32_to_cpu(ph->mseq);
3396 target = le32_to_cpu(ph->mds);
3397 } else {
3398 t_cap_id = t_seq = t_mseq = 0;
3399 target = -1;
3400 }
a8599bd8 3401
11df2dfb
YZ
3402 dout("handle_cap_export inode %p ci %p mds%d mseq %d target %d\n",
3403 inode, ci, mds, mseq, target);
3404retry:
be655596 3405 spin_lock(&ci->i_ceph_lock);
11df2dfb 3406 cap = __get_cap_for_mds(ci, mds);
ca665e02 3407 if (!cap || cap->cap_id != le64_to_cpu(ex->cap_id))
11df2dfb 3408 goto out_unlock;
a8599bd8 3409
11df2dfb
YZ
3410 if (target < 0) {
3411 __ceph_remove_cap(cap, false);
77310320
YZ
3412 if (!ci->i_auth_cap)
3413 ci->i_ceph_flags |= CEPH_I_CAP_DROPPED;
11df2dfb 3414 goto out_unlock;
a8599bd8
SW
3415 }
3416
11df2dfb
YZ
3417 /*
3418 * now we know we haven't received the cap import message yet
3419 * because the exported cap still exist.
3420 */
db354052 3421
11df2dfb
YZ
3422 issued = cap->issued;
3423 WARN_ON(issued != cap->implemented);
3424
3425 tcap = __get_cap_for_mds(ci, target);
3426 if (tcap) {
3427 /* already have caps from the target */
3428 if (tcap->cap_id != t_cap_id ||
3429 ceph_seq_cmp(tcap->seq, t_seq) < 0) {
3430 dout(" updating import cap %p mds%d\n", tcap, target);
3431 tcap->cap_id = t_cap_id;
3432 tcap->seq = t_seq - 1;
3433 tcap->issue_seq = t_seq - 1;
3434 tcap->mseq = t_mseq;
3435 tcap->issued |= issued;
3436 tcap->implemented |= issued;
3437 if (cap == ci->i_auth_cap)
3438 ci->i_auth_cap = tcap;
00f06cba 3439
0e294387
YZ
3440 if (!list_empty(&ci->i_cap_flush_list) &&
3441 ci->i_auth_cap == tcap) {
11df2dfb
YZ
3442 spin_lock(&mdsc->cap_dirty_lock);
3443 list_move_tail(&ci->i_flushing_item,
3444 &tcap->session->s_cap_flushing);
3445 spin_unlock(&mdsc->cap_dirty_lock);
db354052 3446 }
a8599bd8 3447 }
a096b09a 3448 __ceph_remove_cap(cap, false);
11df2dfb 3449 goto out_unlock;
d9df2783 3450 } else if (tsession) {
11df2dfb 3451 /* add placeholder for the export tagert */
d9df2783 3452 int flag = (cap == ci->i_auth_cap) ? CEPH_CAP_FLAG_AUTH : 0;
00f06cba 3453 tcap = new_cap;
11df2dfb 3454 ceph_add_cap(inode, tsession, t_cap_id, -1, issued, 0,
d9df2783
YZ
3455 t_seq - 1, t_mseq, (u64)-1, flag, &new_cap);
3456
00f06cba
YZ
3457 if (!list_empty(&ci->i_cap_flush_list) &&
3458 ci->i_auth_cap == tcap) {
3459 spin_lock(&mdsc->cap_dirty_lock);
3460 list_move_tail(&ci->i_flushing_item,
3461 &tcap->session->s_cap_flushing);
3462 spin_unlock(&mdsc->cap_dirty_lock);
3463 }
3464
d9df2783
YZ
3465 __ceph_remove_cap(cap, false);
3466 goto out_unlock;
a8599bd8
SW
3467 }
3468
be655596 3469 spin_unlock(&ci->i_ceph_lock);
11df2dfb
YZ
3470 mutex_unlock(&session->s_mutex);
3471
3472 /* open target session */
3473 tsession = ceph_mdsc_open_export_target_session(mdsc, target);
3474 if (!IS_ERR(tsession)) {
3475 if (mds > target) {
3476 mutex_lock(&session->s_mutex);
3477 mutex_lock_nested(&tsession->s_mutex,
3478 SINGLE_DEPTH_NESTING);
3479 } else {
3480 mutex_lock(&tsession->s_mutex);
3481 mutex_lock_nested(&session->s_mutex,
3482 SINGLE_DEPTH_NESTING);
3483 }
d9df2783 3484 new_cap = ceph_get_cap(mdsc, NULL);
11df2dfb
YZ
3485 } else {
3486 WARN_ON(1);
3487 tsession = NULL;
3488 target = -1;
3489 }
3490 goto retry;
3491
3492out_unlock:
3493 spin_unlock(&ci->i_ceph_lock);
3494 mutex_unlock(&session->s_mutex);
3495 if (tsession) {
3496 mutex_unlock(&tsession->s_mutex);
3497 ceph_put_mds_session(tsession);
3498 }
d9df2783
YZ
3499 if (new_cap)
3500 ceph_put_cap(mdsc, new_cap);
a8599bd8
SW
3501}
3502
3503/*
2cd698be 3504 * Handle cap IMPORT.
a8599bd8 3505 *
2cd698be 3506 * caller holds s_mutex. acquires i_ceph_lock
a8599bd8
SW
3507 */
3508static void handle_cap_import(struct ceph_mds_client *mdsc,
3509 struct inode *inode, struct ceph_mds_caps *im,
4ee6a914 3510 struct ceph_mds_cap_peer *ph,
a8599bd8 3511 struct ceph_mds_session *session,
2cd698be
YZ
3512 struct ceph_cap **target_cap, int *old_issued)
3513 __acquires(ci->i_ceph_lock)
a8599bd8
SW
3514{
3515 struct ceph_inode_info *ci = ceph_inode(inode);
2cd698be 3516 struct ceph_cap *cap, *ocap, *new_cap = NULL;
a8599bd8 3517 int mds = session->s_mds;
2cd698be
YZ
3518 int issued;
3519 unsigned caps = le32_to_cpu(im->caps);
a8599bd8
SW
3520 unsigned wanted = le32_to_cpu(im->wanted);
3521 unsigned seq = le32_to_cpu(im->seq);
3522 unsigned mseq = le32_to_cpu(im->migrate_seq);
3523 u64 realmino = le64_to_cpu(im->realm);
3524 u64 cap_id = le64_to_cpu(im->cap_id);
4ee6a914
YZ
3525 u64 p_cap_id;
3526 int peer;
a8599bd8 3527
4ee6a914
YZ
3528 if (ph) {
3529 p_cap_id = le64_to_cpu(ph->cap_id);
3530 peer = le32_to_cpu(ph->mds);
3531 } else {
3532 p_cap_id = 0;
3533 peer = -1;
3534 }
db354052 3535
4ee6a914
YZ
3536 dout("handle_cap_import inode %p ci %p mds%d mseq %d peer %d\n",
3537 inode, ci, mds, mseq, peer);
3538
d9df2783 3539retry:
4ee6a914 3540 spin_lock(&ci->i_ceph_lock);
d9df2783
YZ
3541 cap = __get_cap_for_mds(ci, mds);
3542 if (!cap) {
3543 if (!new_cap) {
3544 spin_unlock(&ci->i_ceph_lock);
3545 new_cap = ceph_get_cap(mdsc, NULL);
3546 goto retry;
3547 }
2cd698be
YZ
3548 cap = new_cap;
3549 } else {
3550 if (new_cap) {
3551 ceph_put_cap(mdsc, new_cap);
3552 new_cap = NULL;
3553 }
d9df2783
YZ
3554 }
3555
2cd698be
YZ
3556 __ceph_caps_issued(ci, &issued);
3557 issued |= __ceph_caps_dirty(ci);
3558
3559 ceph_add_cap(inode, session, cap_id, -1, caps, wanted, seq, mseq,
d9df2783
YZ
3560 realmino, CEPH_CAP_FLAG_AUTH, &new_cap);
3561
2cd698be
YZ
3562 ocap = peer >= 0 ? __get_cap_for_mds(ci, peer) : NULL;
3563 if (ocap && ocap->cap_id == p_cap_id) {
4ee6a914 3564 dout(" remove export cap %p mds%d flags %d\n",
2cd698be 3565 ocap, peer, ph->flags);
4ee6a914 3566 if ((ph->flags & CEPH_CAP_FLAG_AUTH) &&
2cd698be
YZ
3567 (ocap->seq != le32_to_cpu(ph->seq) ||
3568 ocap->mseq != le32_to_cpu(ph->mseq))) {
4ee6a914
YZ
3569 pr_err("handle_cap_import: mismatched seq/mseq: "
3570 "ino (%llx.%llx) mds%d seq %d mseq %d "
3571 "importer mds%d has peer seq %d mseq %d\n",
2cd698be
YZ
3572 ceph_vinop(inode), peer, ocap->seq,
3573 ocap->mseq, mds, le32_to_cpu(ph->seq),
4ee6a914 3574 le32_to_cpu(ph->mseq));
db354052 3575 }
2cd698be 3576 __ceph_remove_cap(ocap, (ph->flags & CEPH_CAP_FLAG_RELEASE));
a8599bd8
SW
3577 }
3578
4ee6a914 3579 /* make sure we re-request max_size, if necessary */
4ee6a914 3580 ci->i_requested_max_size = 0;
d9df2783 3581
2cd698be
YZ
3582 *old_issued = issued;
3583 *target_cap = cap;
a8599bd8
SW
3584}
3585
3586/*
3587 * Handle a caps message from the MDS.
3588 *
3589 * Identify the appropriate session, inode, and call the right handler
3590 * based on the cap op.
3591 */
3592void ceph_handle_caps(struct ceph_mds_session *session,
3593 struct ceph_msg *msg)
3594{
3595 struct ceph_mds_client *mdsc = session->s_mdsc;
3d14c5d2 3596 struct super_block *sb = mdsc->fsc->sb;
a8599bd8 3597 struct inode *inode;
be655596 3598 struct ceph_inode_info *ci;
a8599bd8
SW
3599 struct ceph_cap *cap;
3600 struct ceph_mds_caps *h;
4ee6a914 3601 struct ceph_mds_cap_peer *peer = NULL;
779fe0fb
YZ
3602 struct ceph_snap_realm *realm = NULL;
3603 struct ceph_string *pool_ns = NULL;
2600d2dd 3604 int mds = session->s_mds;
2cd698be 3605 int op, issued;
3d7ded4d 3606 u32 seq, mseq;
a8599bd8 3607 struct ceph_vino vino;
6df058c0 3608 u64 tid;
fb01d1f8
YZ
3609 u64 inline_version = 0;
3610 void *inline_data = NULL;
3611 u32 inline_len = 0;
70edb55b 3612 void *snaptrace;
ce1fbc8d 3613 size_t snaptrace_len;
fb01d1f8 3614 void *p, *end;
a8599bd8
SW
3615
3616 dout("handle_caps from mds%d\n", mds);
3617
3618 /* decode */
4ee6a914 3619 end = msg->front.iov_base + msg->front.iov_len;
6df058c0 3620 tid = le64_to_cpu(msg->hdr.tid);
a8599bd8
SW
3621 if (msg->front.iov_len < sizeof(*h))
3622 goto bad;
3623 h = msg->front.iov_base;
3624 op = le32_to_cpu(h->op);
3625 vino.ino = le64_to_cpu(h->ino);
3626 vino.snap = CEPH_NOSNAP;
a8599bd8 3627 seq = le32_to_cpu(h->seq);
3d7ded4d 3628 mseq = le32_to_cpu(h->migrate_seq);
a8599bd8 3629
ce1fbc8d
SW
3630 snaptrace = h + 1;
3631 snaptrace_len = le32_to_cpu(h->snap_trace_len);
fb01d1f8 3632 p = snaptrace + snaptrace_len;
ce1fbc8d
SW
3633
3634 if (le16_to_cpu(msg->hdr.version) >= 2) {
fb01d1f8 3635 u32 flock_len;
ce1fbc8d 3636 ceph_decode_32_safe(&p, end, flock_len, bad);
4ee6a914
YZ
3637 if (p + flock_len > end)
3638 goto bad;
fb01d1f8 3639 p += flock_len;
ce1fbc8d
SW
3640 }
3641
4ee6a914
YZ
3642 if (le16_to_cpu(msg->hdr.version) >= 3) {
3643 if (op == CEPH_CAP_OP_IMPORT) {
4ee6a914
YZ
3644 if (p + sizeof(*peer) > end)
3645 goto bad;
3646 peer = p;
fb01d1f8 3647 p += sizeof(*peer);
11df2dfb
YZ
3648 } else if (op == CEPH_CAP_OP_EXPORT) {
3649 /* recorded in unused fields */
3650 peer = (void *)&h->size;
4ee6a914
YZ
3651 }
3652 }
3653
fb01d1f8
YZ
3654 if (le16_to_cpu(msg->hdr.version) >= 4) {
3655 ceph_decode_64_safe(&p, end, inline_version, bad);
3656 ceph_decode_32_safe(&p, end, inline_len, bad);
3657 if (p + inline_len > end)
3658 goto bad;
3659 inline_data = p;
3660 p += inline_len;
3661 }
3662
92475f05
JL
3663 if (le16_to_cpu(msg->hdr.version) >= 5) {
3664 struct ceph_osd_client *osdc = &mdsc->fsc->client->osdc;
3665 u32 epoch_barrier;
3666
3667 ceph_decode_32_safe(&p, end, epoch_barrier, bad);
3668 ceph_osdc_update_epoch_barrier(osdc, epoch_barrier);
3669 }
3670
5ea5c5e0
YZ
3671 if (le16_to_cpu(msg->hdr.version) >= 8) {
3672 u64 flush_tid;
3673 u32 caller_uid, caller_gid;
779fe0fb 3674 u32 pool_ns_len;
92475f05 3675
5ea5c5e0
YZ
3676 /* version >= 6 */
3677 ceph_decode_64_safe(&p, end, flush_tid, bad);
3678 /* version >= 7 */
3679 ceph_decode_32_safe(&p, end, caller_uid, bad);
3680 ceph_decode_32_safe(&p, end, caller_gid, bad);
3681 /* version >= 8 */
3682 ceph_decode_32_safe(&p, end, pool_ns_len, bad);
779fe0fb
YZ
3683 if (pool_ns_len > 0) {
3684 ceph_decode_need(&p, end, pool_ns_len, bad);
3685 pool_ns = ceph_find_or_create_string(p, pool_ns_len);
3686 p += pool_ns_len;
3687 }
5ea5c5e0
YZ
3688 }
3689
6cd3bcad
YZ
3690 /* lookup ino */
3691 inode = ceph_find_inode(sb, vino);
3692 ci = ceph_inode(inode);
3693 dout(" op %s ino %llx.%llx inode %p\n", ceph_cap_op_name(op), vino.ino,
3694 vino.snap, inode);
3695
a8599bd8
SW
3696 mutex_lock(&session->s_mutex);
3697 session->s_seq++;
3698 dout(" mds%d seq %lld cap seq %u\n", session->s_mds, session->s_seq,
3699 (unsigned)seq);
3700
a8599bd8
SW
3701 if (!inode) {
3702 dout(" i don't have ino %llx\n", vino.ino);
3d7ded4d 3703
a096b09a 3704 if (op == CEPH_CAP_OP_IMPORT) {
745a8e3b
YZ
3705 cap = ceph_get_cap(mdsc, NULL);
3706 cap->cap_ino = vino.ino;
3707 cap->queue_release = 1;
779fe0fb 3708 cap->cap_id = le64_to_cpu(h->cap_id);
745a8e3b
YZ
3709 cap->mseq = mseq;
3710 cap->seq = seq;
dc24de82 3711 cap->issue_seq = seq;
a096b09a 3712 spin_lock(&session->s_cap_lock);
745a8e3b
YZ
3713 list_add_tail(&cap->session_caps,
3714 &session->s_cap_releases);
3715 session->s_num_cap_releases++;
a096b09a
YZ
3716 spin_unlock(&session->s_cap_lock);
3717 }
21b559de 3718 goto flush_cap_releases;
a8599bd8
SW
3719 }
3720
3721 /* these will work even if we don't have a cap yet */
3722 switch (op) {
3723 case CEPH_CAP_OP_FLUSHSNAP_ACK:
6df058c0 3724 handle_cap_flushsnap_ack(inode, tid, h, session);
a8599bd8
SW
3725 goto done;
3726
3727 case CEPH_CAP_OP_EXPORT:
11df2dfb
YZ
3728 handle_cap_export(inode, h, peer, session);
3729 goto done_unlocked;
a8599bd8
SW
3730
3731 case CEPH_CAP_OP_IMPORT:
982d6011
YZ
3732 realm = NULL;
3733 if (snaptrace_len) {
3734 down_write(&mdsc->snap_rwsem);
3735 ceph_update_snap_trace(mdsc, snaptrace,
3736 snaptrace + snaptrace_len,
3737 false, &realm);
3738 downgrade_write(&mdsc->snap_rwsem);
3739 } else {
3740 down_read(&mdsc->snap_rwsem);
3741 }
4ee6a914 3742 handle_cap_import(mdsc, inode, h, peer, session,
2cd698be 3743 &cap, &issued);
779fe0fb 3744 handle_cap_grant(mdsc, inode, h, &pool_ns,
fb01d1f8 3745 inline_version, inline_data, inline_len,
779fe0fb 3746 msg->middle, session, cap, issued);
982d6011
YZ
3747 if (realm)
3748 ceph_put_snap_realm(mdsc, realm);
2cd698be 3749 goto done_unlocked;
a8599bd8
SW
3750 }
3751
3752 /* the rest require a cap */
be655596 3753 spin_lock(&ci->i_ceph_lock);
a8599bd8
SW
3754 cap = __get_cap_for_mds(ceph_inode(inode), mds);
3755 if (!cap) {
9dbd412f 3756 dout(" no cap on %p ino %llx.%llx from mds%d\n",
a8599bd8 3757 inode, ceph_ino(inode), ceph_snap(inode), mds);
be655596 3758 spin_unlock(&ci->i_ceph_lock);
21b559de 3759 goto flush_cap_releases;
a8599bd8
SW
3760 }
3761
be655596 3762 /* note that each of these drops i_ceph_lock for us */
a8599bd8
SW
3763 switch (op) {
3764 case CEPH_CAP_OP_REVOKE:
3765 case CEPH_CAP_OP_GRANT:
2cd698be
YZ
3766 __ceph_caps_issued(ci, &issued);
3767 issued |= __ceph_caps_dirty(ci);
779fe0fb 3768 handle_cap_grant(mdsc, inode, h, &pool_ns,
fb01d1f8 3769 inline_version, inline_data, inline_len,
779fe0fb 3770 msg->middle, session, cap, issued);
15637c8b 3771 goto done_unlocked;
a8599bd8
SW
3772
3773 case CEPH_CAP_OP_FLUSH_ACK:
6df058c0 3774 handle_cap_flush_ack(inode, tid, h, session, cap);
a8599bd8
SW
3775 break;
3776
3777 case CEPH_CAP_OP_TRUNC:
3778 handle_cap_trunc(inode, h, session);
3779 break;
3780
3781 default:
be655596 3782 spin_unlock(&ci->i_ceph_lock);
a8599bd8
SW
3783 pr_err("ceph_handle_caps: unknown cap op %d %s\n", op,
3784 ceph_cap_op_name(op));
3785 }
3786
21b559de
GF
3787 goto done;
3788
3789flush_cap_releases:
3790 /*
745a8e3b 3791 * send any cap release message to try to move things
21b559de
GF
3792 * along for the mds (who clearly thinks we still have this
3793 * cap).
3794 */
21b559de
GF
3795 ceph_send_cap_releases(mdsc, session);
3796
a8599bd8 3797done:
15637c8b
SW
3798 mutex_unlock(&session->s_mutex);
3799done_unlocked:
e96a650a 3800 iput(inode);
779fe0fb 3801 ceph_put_string(pool_ns);
a8599bd8
SW
3802 return;
3803
3804bad:
3805 pr_err("ceph_handle_caps: corrupt message\n");
9ec7cab1 3806 ceph_msg_dump(msg);
a8599bd8
SW
3807 return;
3808}
3809
3810/*
3811 * Delayed work handler to process end of delayed cap release LRU list.
3812 */
afcdaea3 3813void ceph_check_delayed_caps(struct ceph_mds_client *mdsc)
a8599bd8 3814{
4b9f2042 3815 struct inode *inode;
a8599bd8
SW
3816 struct ceph_inode_info *ci;
3817 int flags = CHECK_CAPS_NODELAY;
3818
a8599bd8
SW
3819 dout("check_delayed_caps\n");
3820 while (1) {
3821 spin_lock(&mdsc->cap_delay_lock);
3822 if (list_empty(&mdsc->cap_delay_list))
3823 break;
3824 ci = list_first_entry(&mdsc->cap_delay_list,
3825 struct ceph_inode_info,
3826 i_cap_delay_list);
3827 if ((ci->i_ceph_flags & CEPH_I_FLUSH) == 0 &&
3828 time_before(jiffies, ci->i_hold_caps_max))
3829 break;
3830 list_del_init(&ci->i_cap_delay_list);
4b9f2042
YZ
3831
3832 inode = igrab(&ci->vfs_inode);
a8599bd8 3833 spin_unlock(&mdsc->cap_delay_lock);
4b9f2042
YZ
3834
3835 if (inode) {
3836 dout("check_delayed_caps on %p\n", inode);
3837 ceph_check_caps(ci, flags, NULL);
3838 iput(inode);
3839 }
a8599bd8
SW
3840 }
3841 spin_unlock(&mdsc->cap_delay_lock);
3842}
3843
afcdaea3
SW
3844/*
3845 * Flush all dirty caps to the mds
3846 */
3847void ceph_flush_dirty_caps(struct ceph_mds_client *mdsc)
3848{
db354052
SW
3849 struct ceph_inode_info *ci;
3850 struct inode *inode;
afcdaea3
SW
3851
3852 dout("flush_dirty_caps\n");
3853 spin_lock(&mdsc->cap_dirty_lock);
db354052
SW
3854 while (!list_empty(&mdsc->cap_dirty)) {
3855 ci = list_first_entry(&mdsc->cap_dirty, struct ceph_inode_info,
3856 i_dirty_item);
70b666c3
SW
3857 inode = &ci->vfs_inode;
3858 ihold(inode);
db354052 3859 dout("flush_dirty_caps %p\n", inode);
afcdaea3 3860 spin_unlock(&mdsc->cap_dirty_lock);
70b666c3
SW
3861 ceph_check_caps(ci, CHECK_CAPS_NODELAY|CHECK_CAPS_FLUSH, NULL);
3862 iput(inode);
afcdaea3
SW
3863 spin_lock(&mdsc->cap_dirty_lock);
3864 }
3865 spin_unlock(&mdsc->cap_dirty_lock);
db354052 3866 dout("flush_dirty_caps done\n");
afcdaea3
SW
3867}
3868
774a6a11
YZ
3869void __ceph_get_fmode(struct ceph_inode_info *ci, int fmode)
3870{
3871 int i;
3872 int bits = (fmode << 1) | 1;
3873 for (i = 0; i < CEPH_FILE_MODE_BITS; i++) {
3874 if (bits & (1 << i))
3875 ci->i_nr_by_mode[i]++;
3876 }
3877}
3878
a8599bd8
SW
3879/*
3880 * Drop open file reference. If we were the last open file,
3881 * we may need to release capabilities to the MDS (or schedule
3882 * their delayed release).
3883 */
3884void ceph_put_fmode(struct ceph_inode_info *ci, int fmode)
3885{
774a6a11
YZ
3886 int i, last = 0;
3887 int bits = (fmode << 1) | 1;
be655596 3888 spin_lock(&ci->i_ceph_lock);
774a6a11
YZ
3889 for (i = 0; i < CEPH_FILE_MODE_BITS; i++) {
3890 if (bits & (1 << i)) {
3891 BUG_ON(ci->i_nr_by_mode[i] == 0);
3892 if (--ci->i_nr_by_mode[i] == 0)
3893 last++;
3894 }
3895 }
3896 dout("put_fmode %p fmode %d {%d,%d,%d,%d}\n",
3897 &ci->vfs_inode, fmode,
3898 ci->i_nr_by_mode[0], ci->i_nr_by_mode[1],
3899 ci->i_nr_by_mode[2], ci->i_nr_by_mode[3]);
be655596 3900 spin_unlock(&ci->i_ceph_lock);
a8599bd8
SW
3901
3902 if (last && ci->i_vino.snap == CEPH_NOSNAP)
3903 ceph_check_caps(ci, 0, NULL);
3904}
3905
3906/*
3907 * Helpers for embedding cap and dentry lease releases into mds
3908 * requests.
3909 *
3910 * @force is used by dentry_release (below) to force inclusion of a
3911 * record for the directory inode, even when there aren't any caps to
3912 * drop.
3913 */
3914int ceph_encode_inode_release(void **p, struct inode *inode,
3915 int mds, int drop, int unless, int force)
3916{
3917 struct ceph_inode_info *ci = ceph_inode(inode);
3918 struct ceph_cap *cap;
3919 struct ceph_mds_request_release *rel = *p;
ec97f88b 3920 int used, dirty;
a8599bd8 3921 int ret = 0;
a8599bd8 3922
be655596 3923 spin_lock(&ci->i_ceph_lock);
916623da 3924 used = __ceph_caps_used(ci);
ec97f88b 3925 dirty = __ceph_caps_dirty(ci);
916623da 3926
ec97f88b
SW
3927 dout("encode_inode_release %p mds%d used|dirty %s drop %s unless %s\n",
3928 inode, mds, ceph_cap_string(used|dirty), ceph_cap_string(drop),
916623da
SW
3929 ceph_cap_string(unless));
3930
ec97f88b
SW
3931 /* only drop unused, clean caps */
3932 drop &= ~(used | dirty);
916623da 3933
a8599bd8
SW
3934 cap = __get_cap_for_mds(ci, mds);
3935 if (cap && __cap_is_valid(cap)) {
3936 if (force ||
3937 ((cap->issued & drop) &&
3938 (cap->issued & unless) == 0)) {
3939 if ((cap->issued & drop) &&
3940 (cap->issued & unless) == 0) {
bb137f84
YZ
3941 int wanted = __ceph_caps_wanted(ci);
3942 if ((ci->i_ceph_flags & CEPH_I_NODELAY) == 0)
3943 wanted |= cap->mds_wanted;
3944 dout("encode_inode_release %p cap %p "
3945 "%s -> %s, wanted %s -> %s\n", inode, cap,
a8599bd8 3946 ceph_cap_string(cap->issued),
bb137f84
YZ
3947 ceph_cap_string(cap->issued & ~drop),
3948 ceph_cap_string(cap->mds_wanted),
3949 ceph_cap_string(wanted));
3950
a8599bd8
SW
3951 cap->issued &= ~drop;
3952 cap->implemented &= ~drop;
bb137f84 3953 cap->mds_wanted = wanted;
a8599bd8
SW
3954 } else {
3955 dout("encode_inode_release %p cap %p %s"
3956 " (force)\n", inode, cap,
3957 ceph_cap_string(cap->issued));
3958 }
3959
3960 rel->ino = cpu_to_le64(ceph_ino(inode));
3961 rel->cap_id = cpu_to_le64(cap->cap_id);
3962 rel->seq = cpu_to_le32(cap->seq);
08a0f24e 3963 rel->issue_seq = cpu_to_le32(cap->issue_seq);
a8599bd8 3964 rel->mseq = cpu_to_le32(cap->mseq);
fd7b95cd 3965 rel->caps = cpu_to_le32(cap->implemented);
a8599bd8
SW
3966 rel->wanted = cpu_to_le32(cap->mds_wanted);
3967 rel->dname_len = 0;
3968 rel->dname_seq = 0;
3969 *p += sizeof(*rel);
3970 ret = 1;
3971 } else {
3972 dout("encode_inode_release %p cap %p %s\n",
3973 inode, cap, ceph_cap_string(cap->issued));
3974 }
3975 }
be655596 3976 spin_unlock(&ci->i_ceph_lock);
a8599bd8
SW
3977 return ret;
3978}
3979
3980int ceph_encode_dentry_release(void **p, struct dentry *dentry,
ca6c8ae0 3981 struct inode *dir,
a8599bd8
SW
3982 int mds, int drop, int unless)
3983{
ca6c8ae0 3984 struct dentry *parent = NULL;
a8599bd8
SW
3985 struct ceph_mds_request_release *rel = *p;
3986 struct ceph_dentry_info *di = ceph_dentry(dentry);
3987 int force = 0;
3988 int ret;
3989
3990 /*
3991 * force an record for the directory caps if we have a dentry lease.
be655596 3992 * this is racy (can't take i_ceph_lock and d_lock together), but it
a8599bd8
SW
3993 * doesn't have to be perfect; the mds will revoke anything we don't
3994 * release.
3995 */
3996 spin_lock(&dentry->d_lock);
3997 if (di->lease_session && di->lease_session->s_mds == mds)
3998 force = 1;
ca6c8ae0
JL
3999 if (!dir) {
4000 parent = dget(dentry->d_parent);
4001 dir = d_inode(parent);
4002 }
a8599bd8
SW
4003 spin_unlock(&dentry->d_lock);
4004
ca6c8ae0 4005 ret = ceph_encode_inode_release(p, dir, mds, drop, unless, force);
adf0d687 4006 dput(parent);
a8599bd8
SW
4007
4008 spin_lock(&dentry->d_lock);
4009 if (ret && di->lease_session && di->lease_session->s_mds == mds) {
4010 dout("encode_dentry_release %p mds%d seq %d\n",
4011 dentry, mds, (int)di->lease_seq);
4012 rel->dname_len = cpu_to_le32(dentry->d_name.len);
4013 memcpy(*p, dentry->d_name.name, dentry->d_name.len);
4014 *p += dentry->d_name.len;
4015 rel->dname_seq = cpu_to_le32(di->lease_seq);
1dadcce3 4016 __ceph_mdsc_drop_dentry_lease(dentry);
a8599bd8
SW
4017 }
4018 spin_unlock(&dentry->d_lock);
4019 return ret;
4020}