1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Object lifetime handling and tracing.
4 * Copyright (C) 2022 Red Hat, Inc. All Rights Reserved.
5 * Written by David Howells (dhowells@redhat.com)
8 #include <linux/slab.h>
9 #include <linux/mempool.h>
10 #include <linux/delay.h>
13 static void netfs_free_request(struct work_struct
*work
);
16 * Allocate an I/O request and initialise it.
18 struct netfs_io_request
*netfs_alloc_request(struct address_space
*mapping
,
20 loff_t start
, size_t len
,
21 enum netfs_io_origin origin
)
23 static atomic_t debug_ids
;
24 struct inode
*inode
= file
? file_inode(file
) : mapping
->host
;
25 struct netfs_inode
*ctx
= netfs_inode(inode
);
26 struct netfs_io_request
*rreq
;
27 mempool_t
*mempool
= ctx
->ops
->request_pool
?: &netfs_request_pool
;
28 struct kmem_cache
*cache
= mempool
->pool_data
;
32 rreq
= mempool_alloc(mempool
, GFP_KERNEL
);
38 memset(rreq
, 0, kmem_cache_size(cache
));
39 INIT_WORK(&rreq
->cleanup_work
, netfs_free_request
);
42 rreq
->origin
= origin
;
43 rreq
->netfs_ops
= ctx
->ops
;
44 rreq
->mapping
= mapping
;
46 rreq
->i_size
= i_size_read(inode
);
47 rreq
->debug_id
= atomic_inc_return(&debug_ids
);
48 rreq
->wsize
= INT_MAX
;
49 rreq
->io_streams
[0].sreq_max_len
= ULONG_MAX
;
50 rreq
->io_streams
[0].sreq_max_segs
= 0;
51 spin_lock_init(&rreq
->lock
);
52 INIT_LIST_HEAD(&rreq
->io_streams
[0].subrequests
);
53 INIT_LIST_HEAD(&rreq
->io_streams
[1].subrequests
);
54 init_waitqueue_head(&rreq
->waitq
);
55 refcount_set(&rreq
->ref
, 2);
57 if (origin
== NETFS_READAHEAD
||
58 origin
== NETFS_READPAGE
||
59 origin
== NETFS_READ_GAPS
||
60 origin
== NETFS_READ_SINGLE
||
61 origin
== NETFS_READ_FOR_WRITE
||
62 origin
== NETFS_UNBUFFERED_READ
||
63 origin
== NETFS_DIO_READ
) {
64 INIT_WORK(&rreq
->work
, netfs_read_collection_worker
);
65 rreq
->io_streams
[0].avail
= true;
67 INIT_WORK(&rreq
->work
, netfs_write_collection_worker
);
70 __set_bit(NETFS_RREQ_IN_PROGRESS
, &rreq
->flags
);
71 if (rreq
->netfs_ops
->init_request
) {
72 ret
= rreq
->netfs_ops
->init_request(rreq
, file
);
74 mempool_free(rreq
, rreq
->netfs_ops
->request_pool
?: &netfs_request_pool
);
79 atomic_inc(&ctx
->io_count
);
80 trace_netfs_rreq_ref(rreq
->debug_id
, refcount_read(&rreq
->ref
), netfs_rreq_trace_new
);
81 netfs_proc_add_rreq(rreq
);
82 netfs_stat(&netfs_n_rh_rreq
);
86 void netfs_get_request(struct netfs_io_request
*rreq
, enum netfs_rreq_ref_trace what
)
90 __refcount_inc(&rreq
->ref
, &r
);
91 trace_netfs_rreq_ref(rreq
->debug_id
, r
+ 1, what
);
94 void netfs_clear_subrequests(struct netfs_io_request
*rreq
)
96 struct netfs_io_subrequest
*subreq
;
97 struct netfs_io_stream
*stream
;
100 for (s
= 0; s
< ARRAY_SIZE(rreq
->io_streams
); s
++) {
101 stream
= &rreq
->io_streams
[s
];
102 while (!list_empty(&stream
->subrequests
)) {
103 subreq
= list_first_entry(&stream
->subrequests
,
104 struct netfs_io_subrequest
, rreq_link
);
105 list_del(&subreq
->rreq_link
);
106 netfs_put_subrequest(subreq
, netfs_sreq_trace_put_clear
);
111 static void netfs_free_request_rcu(struct rcu_head
*rcu
)
113 struct netfs_io_request
*rreq
= container_of(rcu
, struct netfs_io_request
, rcu
);
115 mempool_free(rreq
, rreq
->netfs_ops
->request_pool
?: &netfs_request_pool
);
116 netfs_stat_d(&netfs_n_rh_rreq
);
119 static void netfs_free_request(struct work_struct
*work
)
121 struct netfs_io_request
*rreq
=
122 container_of(work
, struct netfs_io_request
, cleanup_work
);
123 struct netfs_inode
*ictx
= netfs_inode(rreq
->inode
);
126 trace_netfs_rreq(rreq
, netfs_rreq_trace_free
);
128 /* Cancel/flush the result collection worker. That does not carry a
129 * ref of its own, so we must wait for it somewhere.
131 cancel_work_sync(&rreq
->work
);
133 netfs_proc_del_rreq(rreq
);
134 netfs_clear_subrequests(rreq
);
135 if (rreq
->netfs_ops
->free_request
)
136 rreq
->netfs_ops
->free_request(rreq
);
137 if (rreq
->cache_resources
.ops
)
138 rreq
->cache_resources
.ops
->end_operation(&rreq
->cache_resources
);
139 if (rreq
->direct_bv
) {
140 for (i
= 0; i
< rreq
->direct_bv_count
; i
++) {
141 if (rreq
->direct_bv
[i
].bv_page
) {
142 if (rreq
->direct_bv_unpin
)
143 unpin_user_page(rreq
->direct_bv
[i
].bv_page
);
146 kvfree(rreq
->direct_bv
);
148 rolling_buffer_clear(&rreq
->buffer
);
150 if (atomic_dec_and_test(&ictx
->io_count
))
151 wake_up_var(&ictx
->io_count
);
152 call_rcu(&rreq
->rcu
, netfs_free_request_rcu
);
155 void netfs_put_request(struct netfs_io_request
*rreq
, enum netfs_rreq_ref_trace what
)
157 unsigned int debug_id
;
162 debug_id
= rreq
->debug_id
;
163 dead
= __refcount_dec_and_test(&rreq
->ref
, &r
);
164 trace_netfs_rreq_ref(debug_id
, r
- 1, what
);
166 WARN_ON(!queue_work(system_unbound_wq
, &rreq
->cleanup_work
));
171 * Allocate and partially initialise an I/O request structure.
173 struct netfs_io_subrequest
*netfs_alloc_subrequest(struct netfs_io_request
*rreq
)
175 struct netfs_io_subrequest
*subreq
;
176 mempool_t
*mempool
= rreq
->netfs_ops
->subrequest_pool
?: &netfs_subrequest_pool
;
177 struct kmem_cache
*cache
= mempool
->pool_data
;
180 subreq
= mempool_alloc(rreq
->netfs_ops
->subrequest_pool
?: &netfs_subrequest_pool
,
187 memset(subreq
, 0, kmem_cache_size(cache
));
188 INIT_WORK(&subreq
->work
, NULL
);
189 INIT_LIST_HEAD(&subreq
->rreq_link
);
190 refcount_set(&subreq
->ref
, 2);
192 subreq
->debug_index
= atomic_inc_return(&rreq
->subreq_counter
);
193 netfs_get_request(rreq
, netfs_rreq_trace_get_subreq
);
194 netfs_stat(&netfs_n_rh_sreq
);
198 void netfs_get_subrequest(struct netfs_io_subrequest
*subreq
,
199 enum netfs_sreq_ref_trace what
)
203 __refcount_inc(&subreq
->ref
, &r
);
204 trace_netfs_sreq_ref(subreq
->rreq
->debug_id
, subreq
->debug_index
, r
+ 1,
208 static void netfs_free_subrequest(struct netfs_io_subrequest
*subreq
)
210 struct netfs_io_request
*rreq
= subreq
->rreq
;
212 trace_netfs_sreq(subreq
, netfs_sreq_trace_free
);
213 if (rreq
->netfs_ops
->free_subrequest
)
214 rreq
->netfs_ops
->free_subrequest(subreq
);
215 mempool_free(subreq
, rreq
->netfs_ops
->subrequest_pool
?: &netfs_subrequest_pool
);
216 netfs_stat_d(&netfs_n_rh_sreq
);
217 netfs_put_request(rreq
, netfs_rreq_trace_put_subreq
);
220 void netfs_put_subrequest(struct netfs_io_subrequest
*subreq
,
221 enum netfs_sreq_ref_trace what
)
223 unsigned int debug_index
= subreq
->debug_index
;
224 unsigned int debug_id
= subreq
->rreq
->debug_id
;
228 dead
= __refcount_dec_and_test(&subreq
->ref
, &r
);
229 trace_netfs_sreq_ref(debug_id
, debug_index
, r
- 1, what
);
231 netfs_free_subrequest(subreq
);