]>
Commit | Line | Data |
---|---|---|
d7e09d03 PT |
1 | /* |
2 | * GPL HEADER START | |
3 | * | |
4 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or modify | |
7 | * it under the terms of the GNU General Public License version 2 only, | |
8 | * as published by the Free Software Foundation. | |
9 | * | |
10 | * This program is distributed in the hope that it will be useful, but | |
11 | * WITHOUT ANY WARRANTY; without even the implied warranty of | |
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
13 | * General Public License version 2 for more details (a copy is included | |
14 | * in the LICENSE file that accompanied this code). | |
15 | * | |
16 | * You should have received a copy of the GNU General Public License | |
17 | * version 2 along with this program; If not, see | |
18 | * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf | |
19 | * | |
20 | * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, | |
21 | * CA 95054 USA or visit www.sun.com if you need additional information or | |
22 | * have any questions. | |
23 | * | |
24 | * GPL HEADER END | |
25 | */ | |
26 | /* | |
27 | * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved. | |
28 | * Use is subject to license terms. | |
29 | * | |
30 | * Copyright (c) 2011, 2012, Intel Corporation. | |
31 | */ | |
32 | /* | |
33 | * This file is part of Lustre, http://www.lustre.org/ | |
34 | * Lustre is a trademark of Sun Microsystems, Inc. | |
35 | * | |
36 | * Implementation of cl_io for VVP layer. | |
37 | * | |
38 | * Author: Nikita Danilov <nikita.danilov@sun.com> | |
39 | * Author: Jinshan Xiong <jinshan.xiong@whamcloud.com> | |
40 | */ | |
41 | ||
42 | #define DEBUG_SUBSYSTEM S_LLITE | |
43 | ||
44 | ||
45 | #include <obd.h> | |
46 | #include <lustre_lite.h> | |
47 | ||
48 | #include "vvp_internal.h" | |
49 | ||
50 | static struct vvp_io *cl2vvp_io(const struct lu_env *env, | |
51 | const struct cl_io_slice *slice); | |
52 | ||
53 | /** | |
54 | * True, if \a io is a normal io, False for sendfile() / splice_{read|write} | |
55 | */ | |
56 | int cl_is_normalio(const struct lu_env *env, const struct cl_io *io) | |
57 | { | |
58 | struct vvp_io *vio = vvp_env_io(env); | |
59 | ||
60 | LASSERT(io->ci_type == CIT_READ || io->ci_type == CIT_WRITE); | |
61 | ||
62 | return vio->cui_io_subtype == IO_NORMAL; | |
63 | } | |
64 | ||
65 | /** | |
66 | * For swapping layout. The file's layout may have changed. | |
67 | * To avoid populating pages to a wrong stripe, we have to verify the | |
68 | * correctness of layout. It works because swapping layout processes | |
69 | * have to acquire group lock. | |
70 | */ | |
71 | static bool can_populate_pages(const struct lu_env *env, struct cl_io *io, | |
72 | struct inode *inode) | |
73 | { | |
74 | struct ll_inode_info *lli = ll_i2info(inode); | |
75 | struct ccc_io *cio = ccc_env_io(env); | |
76 | bool rc = true; | |
77 | ||
78 | switch (io->ci_type) { | |
79 | case CIT_READ: | |
80 | case CIT_WRITE: | |
81 | /* don't need lock here to check lli_layout_gen as we have held | |
82 | * extent lock and GROUP lock has to hold to swap layout */ | |
83 | if (lli->lli_layout_gen != cio->cui_layout_gen) { | |
84 | io->ci_need_restart = 1; | |
85 | /* this will return application a short read/write */ | |
86 | io->ci_continue = 0; | |
87 | rc = false; | |
88 | } | |
89 | case CIT_FAULT: | |
90 | /* fault is okay because we've already had a page. */ | |
91 | default: | |
92 | break; | |
93 | } | |
94 | ||
95 | return rc; | |
96 | } | |
97 | ||
98 | /***************************************************************************** | |
99 | * | |
100 | * io operations. | |
101 | * | |
102 | */ | |
103 | ||
104 | static int vvp_io_fault_iter_init(const struct lu_env *env, | |
105 | const struct cl_io_slice *ios) | |
106 | { | |
107 | struct vvp_io *vio = cl2vvp_io(env, ios); | |
108 | struct inode *inode = ccc_object_inode(ios->cis_obj); | |
109 | ||
110 | LASSERT(inode == | |
111 | cl2ccc_io(env, ios)->cui_fd->fd_file->f_dentry->d_inode); | |
112 | vio->u.fault.ft_mtime = LTIME_S(inode->i_mtime); | |
113 | return 0; | |
114 | } | |
115 | ||
116 | static void vvp_io_fini(const struct lu_env *env, const struct cl_io_slice *ios) | |
117 | { | |
118 | struct cl_io *io = ios->cis_io; | |
119 | struct cl_object *obj = io->ci_obj; | |
120 | struct ccc_io *cio = cl2ccc_io(env, ios); | |
121 | ||
122 | CLOBINVRNT(env, obj, ccc_object_invariant(obj)); | |
123 | ||
124 | CDEBUG(D_VFSTRACE, "ignore/verify layout %d/%d, layout version %d.\n", | |
125 | io->ci_ignore_layout, io->ci_verify_layout, cio->cui_layout_gen); | |
126 | ||
127 | if (!io->ci_ignore_layout && io->ci_verify_layout) { | |
128 | __u32 gen = 0; | |
129 | ||
130 | /* check layout version */ | |
131 | ll_layout_refresh(ccc_object_inode(obj), &gen); | |
132 | io->ci_need_restart = cio->cui_layout_gen != gen; | |
133 | if (io->ci_need_restart) | |
134 | CDEBUG(D_VFSTRACE, "layout changed from %d to %d.\n", | |
135 | cio->cui_layout_gen, gen); | |
136 | } | |
137 | } | |
138 | ||
139 | static void vvp_io_fault_fini(const struct lu_env *env, | |
140 | const struct cl_io_slice *ios) | |
141 | { | |
142 | struct cl_io *io = ios->cis_io; | |
143 | struct cl_page *page = io->u.ci_fault.ft_page; | |
144 | ||
145 | CLOBINVRNT(env, io->ci_obj, ccc_object_invariant(io->ci_obj)); | |
146 | ||
147 | if (page != NULL) { | |
148 | lu_ref_del(&page->cp_reference, "fault", io); | |
149 | cl_page_put(env, page); | |
150 | io->u.ci_fault.ft_page = NULL; | |
151 | } | |
152 | vvp_io_fini(env, ios); | |
153 | } | |
154 | ||
155 | enum cl_lock_mode vvp_mode_from_vma(struct vm_area_struct *vma) | |
156 | { | |
157 | /* | |
158 | * we only want to hold PW locks if the mmap() can generate | |
159 | * writes back to the file and that only happens in shared | |
160 | * writable vmas | |
161 | */ | |
162 | if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_WRITE)) | |
163 | return CLM_WRITE; | |
164 | return CLM_READ; | |
165 | } | |
166 | ||
167 | static int vvp_mmap_locks(const struct lu_env *env, | |
168 | struct ccc_io *vio, struct cl_io *io) | |
169 | { | |
170 | struct ccc_thread_info *cti = ccc_env_info(env); | |
171 | struct mm_struct *mm = current->mm; | |
172 | struct vm_area_struct *vma; | |
173 | struct cl_lock_descr *descr = &cti->cti_descr; | |
174 | ldlm_policy_data_t policy; | |
175 | unsigned long addr; | |
176 | unsigned long seg; | |
177 | ssize_t count; | |
178 | int result; | |
d7e09d03 PT |
179 | |
180 | LASSERT(io->ci_type == CIT_READ || io->ci_type == CIT_WRITE); | |
181 | ||
182 | if (!cl_is_normalio(env, io)) | |
0a3bdb00 | 183 | return 0; |
d7e09d03 PT |
184 | |
185 | if (vio->cui_iov == NULL) /* nfs or loop back device write */ | |
0a3bdb00 | 186 | return 0; |
d7e09d03 PT |
187 | |
188 | /* No MM (e.g. NFS)? No vmas too. */ | |
189 | if (mm == NULL) | |
0a3bdb00 | 190 | return 0; |
d7e09d03 PT |
191 | |
192 | for (seg = 0; seg < vio->cui_nrsegs; seg++) { | |
193 | const struct iovec *iv = &vio->cui_iov[seg]; | |
194 | ||
195 | addr = (unsigned long)iv->iov_base; | |
196 | count = iv->iov_len; | |
197 | if (count == 0) | |
198 | continue; | |
199 | ||
200 | count += addr & (~CFS_PAGE_MASK); | |
201 | addr &= CFS_PAGE_MASK; | |
202 | ||
203 | down_read(&mm->mmap_sem); | |
204 | while((vma = our_vma(mm, addr, count)) != NULL) { | |
205 | struct inode *inode = vma->vm_file->f_dentry->d_inode; | |
206 | int flags = CEF_MUST; | |
207 | ||
208 | if (ll_file_nolock(vma->vm_file)) { | |
209 | /* | |
210 | * For no lock case, a lockless lock will be | |
211 | * generated. | |
212 | */ | |
213 | flags = CEF_NEVER; | |
214 | } | |
215 | ||
216 | /* | |
217 | * XXX: Required lock mode can be weakened: CIT_WRITE | |
218 | * io only ever reads user level buffer, and CIT_READ | |
219 | * only writes on it. | |
220 | */ | |
221 | policy_from_vma(&policy, vma, addr, count); | |
222 | descr->cld_mode = vvp_mode_from_vma(vma); | |
223 | descr->cld_obj = ll_i2info(inode)->lli_clob; | |
224 | descr->cld_start = cl_index(descr->cld_obj, | |
225 | policy.l_extent.start); | |
226 | descr->cld_end = cl_index(descr->cld_obj, | |
227 | policy.l_extent.end); | |
228 | descr->cld_enq_flags = flags; | |
229 | result = cl_io_lock_alloc_add(env, io, descr); | |
230 | ||
231 | CDEBUG(D_VFSTRACE, "lock: %d: [%lu, %lu]\n", | |
232 | descr->cld_mode, descr->cld_start, | |
233 | descr->cld_end); | |
234 | ||
235 | if (result < 0) | |
0a3bdb00 | 236 | return result; |
d7e09d03 PT |
237 | |
238 | if (vma->vm_end - addr >= count) | |
239 | break; | |
240 | ||
241 | count -= vma->vm_end - addr; | |
242 | addr = vma->vm_end; | |
243 | } | |
244 | up_read(&mm->mmap_sem); | |
245 | } | |
0a3bdb00 | 246 | return 0; |
d7e09d03 PT |
247 | } |
248 | ||
249 | static int vvp_io_rw_lock(const struct lu_env *env, struct cl_io *io, | |
250 | enum cl_lock_mode mode, loff_t start, loff_t end) | |
251 | { | |
252 | struct ccc_io *cio = ccc_env_io(env); | |
253 | int result; | |
254 | int ast_flags = 0; | |
255 | ||
256 | LASSERT(io->ci_type == CIT_READ || io->ci_type == CIT_WRITE); | |
d7e09d03 PT |
257 | |
258 | ccc_io_update_iov(env, cio, io); | |
259 | ||
260 | if (io->u.ci_rw.crw_nonblock) | |
261 | ast_flags |= CEF_NONBLOCK; | |
262 | result = vvp_mmap_locks(env, cio, io); | |
263 | if (result == 0) | |
264 | result = ccc_io_one_lock(env, io, ast_flags, mode, start, end); | |
0a3bdb00 | 265 | return result; |
d7e09d03 PT |
266 | } |
267 | ||
268 | static int vvp_io_read_lock(const struct lu_env *env, | |
269 | const struct cl_io_slice *ios) | |
270 | { | |
271 | struct cl_io *io = ios->cis_io; | |
272 | struct ll_inode_info *lli = ll_i2info(ccc_object_inode(io->ci_obj)); | |
273 | int result; | |
274 | ||
d7e09d03 PT |
275 | /* XXX: Layer violation, we shouldn't see lsm at llite level. */ |
276 | if (lli->lli_has_smd) /* lsm-less file doesn't need to lock */ | |
277 | result = vvp_io_rw_lock(env, io, CLM_READ, | |
278 | io->u.ci_rd.rd.crw_pos, | |
279 | io->u.ci_rd.rd.crw_pos + | |
280 | io->u.ci_rd.rd.crw_count - 1); | |
281 | else | |
282 | result = 0; | |
0a3bdb00 | 283 | return result; |
d7e09d03 PT |
284 | } |
285 | ||
286 | static int vvp_io_fault_lock(const struct lu_env *env, | |
287 | const struct cl_io_slice *ios) | |
288 | { | |
289 | struct cl_io *io = ios->cis_io; | |
290 | struct vvp_io *vio = cl2vvp_io(env, ios); | |
291 | /* | |
292 | * XXX LDLM_FL_CBPENDING | |
293 | */ | |
294 | return ccc_io_one_lock_index | |
295 | (env, io, 0, vvp_mode_from_vma(vio->u.fault.ft_vma), | |
296 | io->u.ci_fault.ft_index, io->u.ci_fault.ft_index); | |
297 | } | |
298 | ||
299 | static int vvp_io_write_lock(const struct lu_env *env, | |
300 | const struct cl_io_slice *ios) | |
301 | { | |
302 | struct cl_io *io = ios->cis_io; | |
303 | loff_t start; | |
304 | loff_t end; | |
305 | ||
306 | if (io->u.ci_wr.wr_append) { | |
307 | start = 0; | |
308 | end = OBD_OBJECT_EOF; | |
309 | } else { | |
310 | start = io->u.ci_wr.wr.crw_pos; | |
311 | end = start + io->u.ci_wr.wr.crw_count - 1; | |
312 | } | |
313 | return vvp_io_rw_lock(env, io, CLM_WRITE, start, end); | |
314 | } | |
315 | ||
316 | static int vvp_io_setattr_iter_init(const struct lu_env *env, | |
317 | const struct cl_io_slice *ios) | |
318 | { | |
319 | return 0; | |
320 | } | |
321 | ||
322 | /** | |
323 | * Implementation of cl_io_operations::cio_lock() method for CIT_SETATTR io. | |
324 | * | |
325 | * Handles "lockless io" mode when extent locking is done by server. | |
326 | */ | |
327 | static int vvp_io_setattr_lock(const struct lu_env *env, | |
328 | const struct cl_io_slice *ios) | |
329 | { | |
330 | struct ccc_io *cio = ccc_env_io(env); | |
331 | struct cl_io *io = ios->cis_io; | |
332 | __u64 new_size; | |
333 | __u32 enqflags = 0; | |
334 | ||
335 | if (cl_io_is_trunc(io)) { | |
336 | new_size = io->u.ci_setattr.sa_attr.lvb_size; | |
337 | if (new_size == 0) | |
338 | enqflags = CEF_DISCARD_DATA; | |
339 | } else { | |
340 | if ((io->u.ci_setattr.sa_attr.lvb_mtime >= | |
341 | io->u.ci_setattr.sa_attr.lvb_ctime) || | |
342 | (io->u.ci_setattr.sa_attr.lvb_atime >= | |
343 | io->u.ci_setattr.sa_attr.lvb_ctime)) | |
344 | return 0; | |
345 | new_size = 0; | |
346 | } | |
347 | cio->u.setattr.cui_local_lock = SETATTR_EXTENT_LOCK; | |
348 | return ccc_io_one_lock(env, io, enqflags, CLM_WRITE, | |
349 | new_size, OBD_OBJECT_EOF); | |
350 | } | |
351 | ||
352 | static int vvp_do_vmtruncate(struct inode *inode, size_t size) | |
353 | { | |
354 | int result; | |
355 | /* | |
356 | * Only ll_inode_size_lock is taken at this level. | |
357 | */ | |
358 | ll_inode_size_lock(inode); | |
359 | result = inode_newsize_ok(inode, size); | |
360 | if (result < 0) { | |
361 | ll_inode_size_unlock(inode); | |
362 | return result; | |
363 | } | |
364 | truncate_setsize(inode, size); | |
365 | ll_inode_size_unlock(inode); | |
366 | return result; | |
367 | } | |
368 | ||
369 | static int vvp_io_setattr_trunc(const struct lu_env *env, | |
370 | const struct cl_io_slice *ios, | |
371 | struct inode *inode, loff_t size) | |
372 | { | |
373 | inode_dio_wait(inode); | |
374 | return 0; | |
375 | } | |
376 | ||
377 | static int vvp_io_setattr_time(const struct lu_env *env, | |
378 | const struct cl_io_slice *ios) | |
379 | { | |
380 | struct cl_io *io = ios->cis_io; | |
381 | struct cl_object *obj = io->ci_obj; | |
382 | struct cl_attr *attr = ccc_env_thread_attr(env); | |
383 | int result; | |
384 | unsigned valid = CAT_CTIME; | |
385 | ||
386 | cl_object_attr_lock(obj); | |
387 | attr->cat_ctime = io->u.ci_setattr.sa_attr.lvb_ctime; | |
388 | if (io->u.ci_setattr.sa_valid & ATTR_ATIME_SET) { | |
389 | attr->cat_atime = io->u.ci_setattr.sa_attr.lvb_atime; | |
390 | valid |= CAT_ATIME; | |
391 | } | |
392 | if (io->u.ci_setattr.sa_valid & ATTR_MTIME_SET) { | |
393 | attr->cat_mtime = io->u.ci_setattr.sa_attr.lvb_mtime; | |
394 | valid |= CAT_MTIME; | |
395 | } | |
396 | result = cl_object_attr_set(env, obj, attr, valid); | |
397 | cl_object_attr_unlock(obj); | |
398 | ||
399 | return result; | |
400 | } | |
401 | ||
402 | static int vvp_io_setattr_start(const struct lu_env *env, | |
403 | const struct cl_io_slice *ios) | |
404 | { | |
405 | struct cl_io *io = ios->cis_io; | |
406 | struct inode *inode = ccc_object_inode(io->ci_obj); | |
5dd16419 | 407 | int result = 0; |
d7e09d03 PT |
408 | |
409 | mutex_lock(&inode->i_mutex); | |
410 | if (cl_io_is_trunc(io)) | |
5dd16419 JX |
411 | result = vvp_io_setattr_trunc(env, ios, inode, |
412 | io->u.ci_setattr.sa_attr.lvb_size); | |
413 | if (result == 0) | |
414 | result = vvp_io_setattr_time(env, ios); | |
415 | return result; | |
d7e09d03 PT |
416 | } |
417 | ||
418 | static void vvp_io_setattr_end(const struct lu_env *env, | |
419 | const struct cl_io_slice *ios) | |
420 | { | |
421 | struct cl_io *io = ios->cis_io; | |
422 | struct inode *inode = ccc_object_inode(io->ci_obj); | |
423 | ||
424 | if (cl_io_is_trunc(io)) { | |
425 | /* Truncate in memory pages - they must be clean pages | |
426 | * because osc has already notified to destroy osc_extents. */ | |
427 | vvp_do_vmtruncate(inode, io->u.ci_setattr.sa_attr.lvb_size); | |
428 | inode_dio_write_done(inode); | |
429 | } | |
430 | mutex_unlock(&inode->i_mutex); | |
431 | } | |
432 | ||
433 | static void vvp_io_setattr_fini(const struct lu_env *env, | |
434 | const struct cl_io_slice *ios) | |
435 | { | |
436 | vvp_io_fini(env, ios); | |
437 | } | |
438 | ||
439 | static ssize_t lustre_generic_file_read(struct file *file, | |
440 | struct ccc_io *vio, loff_t *ppos) | |
441 | { | |
442 | return generic_file_aio_read(vio->cui_iocb, vio->cui_iov, | |
443 | vio->cui_nrsegs, *ppos); | |
444 | } | |
445 | ||
446 | static ssize_t lustre_generic_file_write(struct file *file, | |
447 | struct ccc_io *vio, loff_t *ppos) | |
448 | { | |
449 | return generic_file_aio_write(vio->cui_iocb, vio->cui_iov, | |
450 | vio->cui_nrsegs, *ppos); | |
451 | } | |
452 | ||
453 | static int vvp_io_read_start(const struct lu_env *env, | |
454 | const struct cl_io_slice *ios) | |
455 | { | |
456 | struct vvp_io *vio = cl2vvp_io(env, ios); | |
457 | struct ccc_io *cio = cl2ccc_io(env, ios); | |
458 | struct cl_io *io = ios->cis_io; | |
459 | struct cl_object *obj = io->ci_obj; | |
460 | struct inode *inode = ccc_object_inode(obj); | |
461 | struct ll_ra_read *bead = &vio->cui_bead; | |
462 | struct file *file = cio->cui_fd->fd_file; | |
463 | ||
464 | int result; | |
465 | loff_t pos = io->u.ci_rd.rd.crw_pos; | |
466 | long cnt = io->u.ci_rd.rd.crw_count; | |
467 | long tot = cio->cui_tot_count; | |
468 | int exceed = 0; | |
469 | ||
470 | CLOBINVRNT(env, obj, ccc_object_invariant(obj)); | |
471 | ||
472 | CDEBUG(D_VFSTRACE, "read: -> [%lli, %lli)\n", pos, pos + cnt); | |
473 | ||
474 | if (!can_populate_pages(env, io, inode)) | |
475 | return 0; | |
476 | ||
477 | result = ccc_prep_size(env, obj, io, pos, tot, &exceed); | |
478 | if (result != 0) | |
479 | return result; | |
480 | else if (exceed != 0) | |
481 | goto out; | |
482 | ||
483 | LU_OBJECT_HEADER(D_INODE, env, &obj->co_lu, | |
484 | "Read ino %lu, %lu bytes, offset %lld, size %llu\n", | |
485 | inode->i_ino, cnt, pos, i_size_read(inode)); | |
486 | ||
487 | /* turn off the kernel's read-ahead */ | |
488 | cio->cui_fd->fd_file->f_ra.ra_pages = 0; | |
489 | ||
490 | /* initialize read-ahead window once per syscall */ | |
491 | if (!vio->cui_ra_window_set) { | |
492 | vio->cui_ra_window_set = 1; | |
493 | bead->lrr_start = cl_index(obj, pos); | |
494 | /* | |
495 | * XXX: explicit PAGE_CACHE_SIZE | |
496 | */ | |
497 | bead->lrr_count = cl_index(obj, tot + PAGE_CACHE_SIZE - 1); | |
498 | ll_ra_read_in(file, bead); | |
499 | } | |
500 | ||
501 | /* BUG: 5972 */ | |
502 | file_accessed(file); | |
503 | switch (vio->cui_io_subtype) { | |
504 | case IO_NORMAL: | |
505 | result = lustre_generic_file_read(file, cio, &pos); | |
506 | break; | |
507 | case IO_SPLICE: | |
508 | result = generic_file_splice_read(file, &pos, | |
509 | vio->u.splice.cui_pipe, cnt, | |
510 | vio->u.splice.cui_flags); | |
511 | /* LU-1109: do splice read stripe by stripe otherwise if it | |
512 | * may make nfsd stuck if this read occupied all internal pipe | |
513 | * buffers. */ | |
514 | io->ci_continue = 0; | |
515 | break; | |
516 | default: | |
517 | CERROR("Wrong IO type %u\n", vio->cui_io_subtype); | |
518 | LBUG(); | |
519 | } | |
520 | ||
521 | out: | |
522 | if (result >= 0) { | |
523 | if (result < cnt) | |
524 | io->ci_continue = 0; | |
525 | io->ci_nob += result; | |
526 | ll_rw_stats_tally(ll_i2sbi(inode), current->pid, | |
4f37bc04 | 527 | cio->cui_fd, pos, result, READ); |
d7e09d03 PT |
528 | result = 0; |
529 | } | |
530 | return result; | |
531 | } | |
532 | ||
533 | static void vvp_io_read_fini(const struct lu_env *env, const struct cl_io_slice *ios) | |
534 | { | |
535 | struct vvp_io *vio = cl2vvp_io(env, ios); | |
536 | struct ccc_io *cio = cl2ccc_io(env, ios); | |
537 | ||
538 | if (vio->cui_ra_window_set) | |
539 | ll_ra_read_ex(cio->cui_fd->fd_file, &vio->cui_bead); | |
540 | ||
541 | vvp_io_fini(env, ios); | |
542 | } | |
543 | ||
544 | static int vvp_io_write_start(const struct lu_env *env, | |
545 | const struct cl_io_slice *ios) | |
546 | { | |
547 | struct ccc_io *cio = cl2ccc_io(env, ios); | |
548 | struct cl_io *io = ios->cis_io; | |
549 | struct cl_object *obj = io->ci_obj; | |
550 | struct inode *inode = ccc_object_inode(obj); | |
551 | struct file *file = cio->cui_fd->fd_file; | |
552 | ssize_t result = 0; | |
553 | loff_t pos = io->u.ci_wr.wr.crw_pos; | |
554 | size_t cnt = io->u.ci_wr.wr.crw_count; | |
555 | ||
d7e09d03 PT |
556 | if (!can_populate_pages(env, io, inode)) |
557 | return 0; | |
558 | ||
559 | if (cl_io_is_append(io)) { | |
560 | /* | |
561 | * PARALLEL IO This has to be changed for parallel IO doing | |
562 | * out-of-order writes. | |
563 | */ | |
564 | pos = io->u.ci_wr.wr.crw_pos = i_size_read(inode); | |
565 | cio->cui_iocb->ki_pos = pos; | |
566 | } | |
567 | ||
568 | CDEBUG(D_VFSTRACE, "write: [%lli, %lli)\n", pos, pos + (long long)cnt); | |
569 | ||
570 | if (cio->cui_iov == NULL) /* from a temp io in ll_cl_init(). */ | |
571 | result = 0; | |
572 | else | |
573 | result = lustre_generic_file_write(file, cio, &pos); | |
574 | ||
575 | if (result > 0) { | |
576 | if (result < cnt) | |
577 | io->ci_continue = 0; | |
578 | io->ci_nob += result; | |
579 | ll_rw_stats_tally(ll_i2sbi(inode), current->pid, | |
4f37bc04 | 580 | cio->cui_fd, pos, result, WRITE); |
d7e09d03 PT |
581 | result = 0; |
582 | } | |
0a3bdb00 | 583 | return result; |
d7e09d03 PT |
584 | } |
585 | ||
586 | static int vvp_io_kernel_fault(struct vvp_fault_io *cfio) | |
587 | { | |
588 | struct vm_fault *vmf = cfio->fault.ft_vmf; | |
589 | ||
590 | cfio->fault.ft_flags = filemap_fault(cfio->ft_vma, vmf); | |
591 | ||
592 | if (vmf->page) { | |
593 | LL_CDEBUG_PAGE(D_PAGE, vmf->page, "got addr %p type NOPAGE\n", | |
594 | vmf->virtual_address); | |
595 | if (unlikely(!(cfio->fault.ft_flags & VM_FAULT_LOCKED))) { | |
596 | lock_page(vmf->page); | |
597 | cfio->fault.ft_flags &= VM_FAULT_LOCKED; | |
598 | } | |
599 | ||
600 | cfio->ft_vmpage = vmf->page; | |
601 | return 0; | |
602 | } | |
603 | ||
1868445f | 604 | if (cfio->fault.ft_flags & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV)) { |
d7e09d03 PT |
605 | CDEBUG(D_PAGE, "got addr %p - SIGBUS\n", vmf->virtual_address); |
606 | return -EFAULT; | |
607 | } | |
608 | ||
609 | if (cfio->fault.ft_flags & VM_FAULT_OOM) { | |
610 | CDEBUG(D_PAGE, "got addr %p - OOM\n", vmf->virtual_address); | |
611 | return -ENOMEM; | |
612 | } | |
613 | ||
614 | if (cfio->fault.ft_flags & VM_FAULT_RETRY) | |
615 | return -EAGAIN; | |
616 | ||
617 | CERROR("unknow error in page fault %d!\n", cfio->fault.ft_flags); | |
618 | return -EINVAL; | |
619 | } | |
620 | ||
621 | ||
622 | static int vvp_io_fault_start(const struct lu_env *env, | |
623 | const struct cl_io_slice *ios) | |
624 | { | |
625 | struct vvp_io *vio = cl2vvp_io(env, ios); | |
626 | struct cl_io *io = ios->cis_io; | |
627 | struct cl_object *obj = io->ci_obj; | |
628 | struct inode *inode = ccc_object_inode(obj); | |
629 | struct cl_fault_io *fio = &io->u.ci_fault; | |
630 | struct vvp_fault_io *cfio = &vio->u.fault; | |
631 | loff_t offset; | |
632 | int result = 0; | |
633 | struct page *vmpage = NULL; | |
634 | struct cl_page *page; | |
635 | loff_t size; | |
636 | pgoff_t last; /* last page in a file data region */ | |
637 | ||
638 | if (fio->ft_executable && | |
639 | LTIME_S(inode->i_mtime) != vio->u.fault.ft_mtime) | |
640 | CWARN("binary "DFID | |
641 | " changed while waiting for the page fault lock\n", | |
642 | PFID(lu_object_fid(&obj->co_lu))); | |
643 | ||
644 | /* offset of the last byte on the page */ | |
645 | offset = cl_offset(obj, fio->ft_index + 1) - 1; | |
646 | LASSERT(cl_index(obj, offset) == fio->ft_index); | |
647 | result = ccc_prep_size(env, obj, io, 0, offset + 1, NULL); | |
648 | if (result != 0) | |
649 | return result; | |
650 | ||
651 | /* must return locked page */ | |
652 | if (fio->ft_mkwrite) { | |
653 | LASSERT(cfio->ft_vmpage != NULL); | |
654 | lock_page(cfio->ft_vmpage); | |
655 | } else { | |
656 | result = vvp_io_kernel_fault(cfio); | |
657 | if (result != 0) | |
658 | return result; | |
659 | } | |
660 | ||
661 | vmpage = cfio->ft_vmpage; | |
662 | LASSERT(PageLocked(vmpage)); | |
663 | ||
664 | if (OBD_FAIL_CHECK(OBD_FAIL_LLITE_FAULT_TRUNC_RACE)) | |
665 | ll_invalidate_page(vmpage); | |
666 | ||
667 | size = i_size_read(inode); | |
668 | /* Though we have already held a cl_lock upon this page, but | |
669 | * it still can be truncated locally. */ | |
670 | if (unlikely((vmpage->mapping != inode->i_mapping) || | |
671 | (page_offset(vmpage) > size))) { | |
672 | CDEBUG(D_PAGE, "llite: fault and truncate race happened!\n"); | |
673 | ||
674 | /* return +1 to stop cl_io_loop() and ll_fault() will catch | |
675 | * and retry. */ | |
676 | GOTO(out, result = +1); | |
677 | } | |
678 | ||
679 | ||
680 | if (fio->ft_mkwrite ) { | |
681 | pgoff_t last_index; | |
682 | /* | |
683 | * Capture the size while holding the lli_trunc_sem from above | |
684 | * we want to make sure that we complete the mkwrite action | |
685 | * while holding this lock. We need to make sure that we are | |
686 | * not past the end of the file. | |
687 | */ | |
688 | last_index = cl_index(obj, size - 1); | |
689 | if (last_index < fio->ft_index) { | |
690 | CDEBUG(D_PAGE, | |
691 | "llite: mkwrite and truncate race happened: " | |
692 | "%p: 0x%lx 0x%lx\n", | |
693 | vmpage->mapping,fio->ft_index,last_index); | |
694 | /* | |
695 | * We need to return if we are | |
696 | * passed the end of the file. This will propagate | |
697 | * up the call stack to ll_page_mkwrite where | |
698 | * we will return VM_FAULT_NOPAGE. Any non-negative | |
699 | * value returned here will be silently | |
700 | * converted to 0. If the vmpage->mapping is null | |
701 | * the error code would be converted back to ENODATA | |
702 | * in ll_page_mkwrite0. Thus we return -ENODATA | |
703 | * to handle both cases | |
704 | */ | |
705 | GOTO(out, result = -ENODATA); | |
706 | } | |
707 | } | |
708 | ||
709 | page = cl_page_find(env, obj, fio->ft_index, vmpage, CPT_CACHEABLE); | |
710 | if (IS_ERR(page)) | |
711 | GOTO(out, result = PTR_ERR(page)); | |
712 | ||
713 | /* if page is going to be written, we should add this page into cache | |
714 | * earlier. */ | |
715 | if (fio->ft_mkwrite) { | |
716 | wait_on_page_writeback(vmpage); | |
717 | if (set_page_dirty(vmpage)) { | |
718 | struct ccc_page *cp; | |
719 | ||
720 | /* vvp_page_assume() calls wait_on_page_writeback(). */ | |
721 | cl_page_assume(env, io, page); | |
722 | ||
723 | cp = cl2ccc_page(cl_page_at(page, &vvp_device_type)); | |
724 | vvp_write_pending(cl2ccc(obj), cp); | |
725 | ||
726 | /* Do not set Dirty bit here so that in case IO is | |
727 | * started before the page is really made dirty, we | |
728 | * still have chance to detect it. */ | |
729 | result = cl_page_cache_add(env, io, page, CRT_WRITE); | |
730 | LASSERT(cl_page_is_owned(page, io)); | |
731 | ||
732 | vmpage = NULL; | |
733 | if (result < 0) { | |
734 | cl_page_unmap(env, io, page); | |
735 | cl_page_discard(env, io, page); | |
736 | cl_page_disown(env, io, page); | |
737 | ||
738 | cl_page_put(env, page); | |
739 | ||
740 | /* we're in big trouble, what can we do now? */ | |
741 | if (result == -EDQUOT) | |
742 | result = -ENOSPC; | |
743 | GOTO(out, result); | |
744 | } else | |
745 | cl_page_disown(env, io, page); | |
746 | } | |
747 | } | |
748 | ||
749 | last = cl_index(obj, size - 1); | |
750 | /* | |
751 | * The ft_index is only used in the case of | |
752 | * a mkwrite action. We need to check | |
753 | * our assertions are correct, since | |
754 | * we should have caught this above | |
755 | */ | |
756 | LASSERT(!fio->ft_mkwrite || fio->ft_index <= last); | |
757 | if (fio->ft_index == last) | |
758 | /* | |
759 | * Last page is mapped partially. | |
760 | */ | |
761 | fio->ft_nob = size - cl_offset(obj, fio->ft_index); | |
762 | else | |
763 | fio->ft_nob = cl_page_size(obj); | |
764 | ||
765 | lu_ref_add(&page->cp_reference, "fault", io); | |
766 | fio->ft_page = page; | |
d7e09d03 PT |
767 | |
768 | out: | |
769 | /* return unlocked vmpage to avoid deadlocking */ | |
770 | if (vmpage != NULL) | |
771 | unlock_page(vmpage); | |
772 | cfio->fault.ft_flags &= ~VM_FAULT_LOCKED; | |
773 | return result; | |
774 | } | |
775 | ||
776 | static int vvp_io_fsync_start(const struct lu_env *env, | |
777 | const struct cl_io_slice *ios) | |
778 | { | |
779 | /* we should mark TOWRITE bit to each dirty page in radix tree to | |
780 | * verify pages have been written, but this is difficult because of | |
781 | * race. */ | |
782 | return 0; | |
783 | } | |
784 | ||
785 | static int vvp_io_read_page(const struct lu_env *env, | |
786 | const struct cl_io_slice *ios, | |
787 | const struct cl_page_slice *slice) | |
788 | { | |
789 | struct cl_io *io = ios->cis_io; | |
790 | struct cl_object *obj = slice->cpl_obj; | |
791 | struct ccc_page *cp = cl2ccc_page(slice); | |
792 | struct cl_page *page = slice->cpl_page; | |
793 | struct inode *inode = ccc_object_inode(obj); | |
794 | struct ll_sb_info *sbi = ll_i2sbi(inode); | |
795 | struct ll_file_data *fd = cl2ccc_io(env, ios)->cui_fd; | |
796 | struct ll_readahead_state *ras = &fd->fd_ras; | |
797 | struct page *vmpage = cp->cpg_page; | |
798 | struct cl_2queue *queue = &io->ci_queue; | |
799 | int rc; | |
800 | ||
801 | CLOBINVRNT(env, obj, ccc_object_invariant(obj)); | |
802 | LASSERT(slice->cpl_obj == obj); | |
803 | ||
d7e09d03 PT |
804 | if (sbi->ll_ra_info.ra_max_pages_per_file && |
805 | sbi->ll_ra_info.ra_max_pages) | |
806 | ras_update(sbi, inode, ras, page->cp_index, | |
807 | cp->cpg_defer_uptodate); | |
808 | ||
809 | /* Sanity check whether the page is protected by a lock. */ | |
810 | rc = cl_page_is_under_lock(env, io, page); | |
811 | if (rc != -EBUSY) { | |
812 | CL_PAGE_HEADER(D_WARNING, env, page, "%s: %d\n", | |
813 | rc == -ENODATA ? "without a lock" : | |
814 | "match failed", rc); | |
815 | if (rc != -ENODATA) | |
0a3bdb00 | 816 | return rc; |
d7e09d03 PT |
817 | } |
818 | ||
819 | if (cp->cpg_defer_uptodate) { | |
820 | cp->cpg_ra_used = 1; | |
821 | cl_page_export(env, page, 1); | |
822 | } | |
823 | /* | |
824 | * Add page into the queue even when it is marked uptodate above. | |
825 | * this will unlock it automatically as part of cl_page_list_disown(). | |
826 | */ | |
827 | cl_2queue_add(queue, page); | |
828 | if (sbi->ll_ra_info.ra_max_pages_per_file && | |
829 | sbi->ll_ra_info.ra_max_pages) | |
830 | ll_readahead(env, io, ras, | |
831 | vmpage->mapping, &queue->c2_qin, fd->fd_flags); | |
832 | ||
0a3bdb00 | 833 | return 0; |
d7e09d03 PT |
834 | } |
835 | ||
836 | static int vvp_page_sync_io(const struct lu_env *env, struct cl_io *io, | |
837 | struct cl_page *page, struct ccc_page *cp, | |
838 | enum cl_req_type crt) | |
839 | { | |
840 | struct cl_2queue *queue; | |
841 | int result; | |
842 | ||
843 | LASSERT(io->ci_type == CIT_READ || io->ci_type == CIT_WRITE); | |
844 | ||
845 | queue = &io->ci_queue; | |
846 | cl_2queue_init_page(queue, page); | |
847 | ||
848 | result = cl_io_submit_sync(env, io, crt, queue, 0); | |
849 | LASSERT(cl_page_is_owned(page, io)); | |
850 | ||
851 | if (crt == CRT_READ) | |
852 | /* | |
853 | * in CRT_WRITE case page is left locked even in case of | |
854 | * error. | |
855 | */ | |
856 | cl_page_list_disown(env, io, &queue->c2_qin); | |
857 | cl_2queue_fini(env, queue); | |
858 | ||
859 | return result; | |
860 | } | |
861 | ||
862 | /** | |
863 | * Prepare partially written-to page for a write. | |
864 | */ | |
865 | static int vvp_io_prepare_partial(const struct lu_env *env, struct cl_io *io, | |
866 | struct cl_object *obj, struct cl_page *pg, | |
867 | struct ccc_page *cp, | |
868 | unsigned from, unsigned to) | |
869 | { | |
870 | struct cl_attr *attr = ccc_env_thread_attr(env); | |
871 | loff_t offset = cl_offset(obj, pg->cp_index); | |
872 | int result; | |
873 | ||
874 | cl_object_attr_lock(obj); | |
875 | result = cl_object_attr_get(env, obj, attr); | |
876 | cl_object_attr_unlock(obj); | |
877 | if (result == 0) { | |
878 | /* | |
879 | * If are writing to a new page, no need to read old data. | |
880 | * The extent locking will have updated the KMS, and for our | |
881 | * purposes here we can treat it like i_size. | |
882 | */ | |
883 | if (attr->cat_kms <= offset) { | |
5e8ebf13 | 884 | char *kaddr = kmap_atomic(cp->cpg_page); |
d7e09d03 PT |
885 | |
886 | memset(kaddr, 0, cl_page_size(obj)); | |
5e8ebf13 | 887 | kunmap_atomic(kaddr); |
d7e09d03 PT |
888 | } else if (cp->cpg_defer_uptodate) |
889 | cp->cpg_ra_used = 1; | |
890 | else | |
891 | result = vvp_page_sync_io(env, io, pg, cp, CRT_READ); | |
892 | /* | |
893 | * In older implementations, obdo_refresh_inode is called here | |
894 | * to update the inode because the write might modify the | |
895 | * object info at OST. However, this has been proven useless, | |
896 | * since LVB functions will be called when user space program | |
897 | * tries to retrieve inode attribute. Also, see bug 15909 for | |
898 | * details. -jay | |
899 | */ | |
900 | if (result == 0) | |
901 | cl_page_export(env, pg, 1); | |
902 | } | |
903 | return result; | |
904 | } | |
905 | ||
906 | static int vvp_io_prepare_write(const struct lu_env *env, | |
907 | const struct cl_io_slice *ios, | |
908 | const struct cl_page_slice *slice, | |
909 | unsigned from, unsigned to) | |
910 | { | |
911 | struct cl_object *obj = slice->cpl_obj; | |
912 | struct ccc_page *cp = cl2ccc_page(slice); | |
913 | struct cl_page *pg = slice->cpl_page; | |
914 | struct page *vmpage = cp->cpg_page; | |
915 | ||
916 | int result; | |
917 | ||
d7e09d03 PT |
918 | LINVRNT(cl_page_is_vmlocked(env, pg)); |
919 | LASSERT(vmpage->mapping->host == ccc_object_inode(obj)); | |
920 | ||
921 | result = 0; | |
922 | ||
923 | CL_PAGE_HEADER(D_PAGE, env, pg, "preparing: [%d, %d]\n", from, to); | |
924 | if (!PageUptodate(vmpage)) { | |
925 | /* | |
926 | * We're completely overwriting an existing page, so _don't_ | |
927 | * set it up to date until commit_write | |
928 | */ | |
929 | if (from == 0 && to == PAGE_CACHE_SIZE) { | |
930 | CL_PAGE_HEADER(D_PAGE, env, pg, "full page write\n"); | |
931 | POISON_PAGE(page, 0x11); | |
932 | } else | |
933 | result = vvp_io_prepare_partial(env, ios->cis_io, obj, | |
934 | pg, cp, from, to); | |
935 | } else | |
936 | CL_PAGE_HEADER(D_PAGE, env, pg, "uptodate\n"); | |
0a3bdb00 | 937 | return result; |
d7e09d03 PT |
938 | } |
939 | ||
940 | static int vvp_io_commit_write(const struct lu_env *env, | |
941 | const struct cl_io_slice *ios, | |
942 | const struct cl_page_slice *slice, | |
943 | unsigned from, unsigned to) | |
944 | { | |
945 | struct cl_object *obj = slice->cpl_obj; | |
946 | struct cl_io *io = ios->cis_io; | |
947 | struct ccc_page *cp = cl2ccc_page(slice); | |
948 | struct cl_page *pg = slice->cpl_page; | |
949 | struct inode *inode = ccc_object_inode(obj); | |
950 | struct ll_sb_info *sbi = ll_i2sbi(inode); | |
951 | struct ll_inode_info *lli = ll_i2info(inode); | |
952 | struct page *vmpage = cp->cpg_page; | |
953 | ||
954 | int result; | |
955 | int tallyop; | |
956 | loff_t size; | |
957 | ||
d7e09d03 PT |
958 | LINVRNT(cl_page_is_vmlocked(env, pg)); |
959 | LASSERT(vmpage->mapping->host == inode); | |
960 | ||
c4f39553 | 961 | LU_OBJECT_HEADER(D_INODE, env, &obj->co_lu, "committing page write\n"); |
d7e09d03 PT |
962 | CL_PAGE_HEADER(D_PAGE, env, pg, "committing: [%d, %d]\n", from, to); |
963 | ||
964 | /* | |
965 | * queue a write for some time in the future the first time we | |
966 | * dirty the page. | |
967 | * | |
968 | * This is different from what other file systems do: they usually | |
969 | * just mark page (and some of its buffers) dirty and rely on | |
970 | * balance_dirty_pages() to start a write-back. Lustre wants write-back | |
971 | * to be started earlier for the following reasons: | |
972 | * | |
973 | * (1) with a large number of clients we need to limit the amount | |
974 | * of cached data on the clients a lot; | |
975 | * | |
976 | * (2) large compute jobs generally want compute-only then io-only | |
977 | * and the IO should complete as quickly as possible; | |
978 | * | |
979 | * (3) IO is batched up to the RPC size and is async until the | |
980 | * client max cache is hit | |
981 | * (/proc/fs/lustre/osc/OSC.../max_dirty_mb) | |
982 | * | |
983 | */ | |
984 | if (!PageDirty(vmpage)) { | |
985 | tallyop = LPROC_LL_DIRTY_MISSES; | |
986 | result = cl_page_cache_add(env, io, pg, CRT_WRITE); | |
987 | if (result == 0) { | |
988 | /* page was added into cache successfully. */ | |
989 | set_page_dirty(vmpage); | |
990 | vvp_write_pending(cl2ccc(obj), cp); | |
991 | } else if (result == -EDQUOT) { | |
992 | pgoff_t last_index = i_size_read(inode) >> PAGE_CACHE_SHIFT; | |
993 | bool need_clip = true; | |
994 | ||
995 | /* | |
996 | * Client ran out of disk space grant. Possible | |
997 | * strategies are: | |
998 | * | |
999 | * (a) do a sync write, renewing grant; | |
1000 | * | |
1001 | * (b) stop writing on this stripe, switch to the | |
1002 | * next one. | |
1003 | * | |
1004 | * (b) is a part of "parallel io" design that is the | |
1005 | * ultimate goal. (a) is what "old" client did, and | |
1006 | * what the new code continues to do for the time | |
1007 | * being. | |
1008 | */ | |
1009 | if (last_index > pg->cp_index) { | |
1010 | to = PAGE_CACHE_SIZE; | |
1011 | need_clip = false; | |
1012 | } else if (last_index == pg->cp_index) { | |
1013 | int size_to = i_size_read(inode) & ~CFS_PAGE_MASK; | |
1014 | if (to < size_to) | |
1015 | to = size_to; | |
1016 | } | |
1017 | if (need_clip) | |
1018 | cl_page_clip(env, pg, 0, to); | |
1019 | result = vvp_page_sync_io(env, io, pg, cp, CRT_WRITE); | |
1020 | if (result) | |
1021 | CERROR("Write page %lu of inode %p failed %d\n", | |
1022 | pg->cp_index, inode, result); | |
1023 | } | |
1024 | } else { | |
1025 | tallyop = LPROC_LL_DIRTY_HITS; | |
1026 | result = 0; | |
1027 | } | |
1028 | ll_stats_ops_tally(sbi, tallyop, 1); | |
1029 | ||
1030 | /* Inode should be marked DIRTY even if no new page was marked DIRTY | |
1031 | * because page could have been not flushed between 2 modifications. | |
1032 | * It is important the file is marked DIRTY as soon as the I/O is done | |
1033 | * Indeed, when cache is flushed, file could be already closed and it | |
1034 | * is too late to warn the MDT. | |
1035 | * It is acceptable that file is marked DIRTY even if I/O is dropped | |
1036 | * for some reasons before being flushed to OST. | |
1037 | */ | |
1038 | if (result == 0) { | |
1039 | spin_lock(&lli->lli_lock); | |
1040 | lli->lli_flags |= LLIF_DATA_MODIFIED; | |
1041 | spin_unlock(&lli->lli_lock); | |
1042 | } | |
1043 | ||
1044 | size = cl_offset(obj, pg->cp_index) + to; | |
1045 | ||
1046 | ll_inode_size_lock(inode); | |
1047 | if (result == 0) { | |
1048 | if (size > i_size_read(inode)) { | |
1049 | cl_isize_write_nolock(inode, size); | |
1050 | CDEBUG(D_VFSTRACE, DFID" updating i_size %lu\n", | |
1051 | PFID(lu_object_fid(&obj->co_lu)), | |
1052 | (unsigned long)size); | |
1053 | } | |
1054 | cl_page_export(env, pg, 1); | |
1055 | } else { | |
1056 | if (size > i_size_read(inode)) | |
1057 | cl_page_discard(env, io, pg); | |
1058 | } | |
1059 | ll_inode_size_unlock(inode); | |
0a3bdb00 | 1060 | return result; |
d7e09d03 PT |
1061 | } |
1062 | ||
1063 | static const struct cl_io_operations vvp_io_ops = { | |
1064 | .op = { | |
1065 | [CIT_READ] = { | |
1066 | .cio_fini = vvp_io_read_fini, | |
1067 | .cio_lock = vvp_io_read_lock, | |
1068 | .cio_start = vvp_io_read_start, | |
1069 | .cio_advance = ccc_io_advance | |
1070 | }, | |
1071 | [CIT_WRITE] = { | |
1072 | .cio_fini = vvp_io_fini, | |
1073 | .cio_lock = vvp_io_write_lock, | |
1074 | .cio_start = vvp_io_write_start, | |
1075 | .cio_advance = ccc_io_advance | |
1076 | }, | |
1077 | [CIT_SETATTR] = { | |
1078 | .cio_fini = vvp_io_setattr_fini, | |
1079 | .cio_iter_init = vvp_io_setattr_iter_init, | |
1080 | .cio_lock = vvp_io_setattr_lock, | |
1081 | .cio_start = vvp_io_setattr_start, | |
1082 | .cio_end = vvp_io_setattr_end | |
1083 | }, | |
1084 | [CIT_FAULT] = { | |
1085 | .cio_fini = vvp_io_fault_fini, | |
1086 | .cio_iter_init = vvp_io_fault_iter_init, | |
1087 | .cio_lock = vvp_io_fault_lock, | |
1088 | .cio_start = vvp_io_fault_start, | |
1089 | .cio_end = ccc_io_end | |
1090 | }, | |
1091 | [CIT_FSYNC] = { | |
1092 | .cio_start = vvp_io_fsync_start, | |
1093 | .cio_fini = vvp_io_fini | |
1094 | }, | |
1095 | [CIT_MISC] = { | |
1096 | .cio_fini = vvp_io_fini | |
1097 | } | |
1098 | }, | |
1099 | .cio_read_page = vvp_io_read_page, | |
1100 | .cio_prepare_write = vvp_io_prepare_write, | |
1101 | .cio_commit_write = vvp_io_commit_write | |
1102 | }; | |
1103 | ||
1104 | int vvp_io_init(const struct lu_env *env, struct cl_object *obj, | |
1105 | struct cl_io *io) | |
1106 | { | |
1107 | struct vvp_io *vio = vvp_env_io(env); | |
1108 | struct ccc_io *cio = ccc_env_io(env); | |
1109 | struct inode *inode = ccc_object_inode(obj); | |
1110 | int result; | |
1111 | ||
1112 | CLOBINVRNT(env, obj, ccc_object_invariant(obj)); | |
d7e09d03 PT |
1113 | |
1114 | CL_IO_SLICE_CLEAN(cio, cui_cl); | |
1115 | cl_io_slice_add(io, &cio->cui_cl, obj, &vvp_io_ops); | |
1116 | vio->cui_ra_window_set = 0; | |
1117 | result = 0; | |
1118 | if (io->ci_type == CIT_READ || io->ci_type == CIT_WRITE) { | |
1119 | size_t count; | |
1120 | struct ll_inode_info *lli = ll_i2info(inode); | |
1121 | ||
1122 | count = io->u.ci_rw.crw_count; | |
1123 | /* "If nbyte is 0, read() will return 0 and have no other | |
1124 | * results." -- Single Unix Spec */ | |
1125 | if (count == 0) | |
1126 | result = 1; | |
1127 | else { | |
1128 | cio->cui_tot_count = count; | |
1129 | cio->cui_tot_nrsegs = 0; | |
1130 | } | |
1131 | /* for read/write, we store the jobid in the inode, and | |
1132 | * it'll be fetched by osc when building RPC. | |
1133 | * | |
1134 | * it's not accurate if the file is shared by different | |
1135 | * jobs. | |
1136 | */ | |
1137 | lustre_get_jobid(lli->lli_jobid); | |
1138 | } else if (io->ci_type == CIT_SETATTR) { | |
1139 | if (!cl_io_is_trunc(io)) | |
1140 | io->ci_lockreq = CILR_MANDATORY; | |
1141 | } | |
1142 | ||
1143 | /* ignore layout change for generic CIT_MISC but not for glimpse. | |
1144 | * io context for glimpse must set ci_verify_layout to true, | |
1145 | * see cl_glimpse_size0() for details. */ | |
1146 | if (io->ci_type == CIT_MISC && !io->ci_verify_layout) | |
1147 | io->ci_ignore_layout = 1; | |
1148 | ||
1149 | /* Enqueue layout lock and get layout version. We need to do this | |
1150 | * even for operations requiring to open file, such as read and write, | |
1151 | * because it might not grant layout lock in IT_OPEN. */ | |
65fb55d1 | 1152 | if (result == 0 && !io->ci_ignore_layout) { |
d7e09d03 | 1153 | result = ll_layout_refresh(inode, &cio->cui_layout_gen); |
65fb55d1 NY |
1154 | if (result == -ENOENT) |
1155 | /* If the inode on MDS has been removed, but the objects | |
1156 | * on OSTs haven't been destroyed (async unlink), layout | |
1157 | * fetch will return -ENOENT, we'd ingore this error | |
1158 | * and continue with dirty flush. LU-3230. */ | |
1159 | result = 0; | |
1160 | if (result < 0) | |
1161 | CERROR("%s: refresh file layout " DFID " error %d.\n", | |
1162 | ll_get_fsname(inode->i_sb, NULL, 0), | |
1163 | PFID(lu_object_fid(&obj->co_lu)), result); | |
1164 | } | |
d7e09d03 | 1165 | |
0a3bdb00 | 1166 | return result; |
d7e09d03 PT |
1167 | } |
1168 | ||
1169 | static struct vvp_io *cl2vvp_io(const struct lu_env *env, | |
1170 | const struct cl_io_slice *slice) | |
1171 | { | |
1172 | /* Caling just for assertion */ | |
1173 | cl2ccc_io(env, slice); | |
1174 | return vvp_env_io(env); | |
1175 | } |