]>
Commit | Line | Data |
---|---|---|
3a4a38e6 DH |
1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* Object lifetime handling and tracing. | |
3 | * | |
4 | * Copyright (C) 2022 Red Hat, Inc. All Rights Reserved. | |
5 | * Written by David Howells (dhowells@redhat.com) | |
6 | */ | |
7 | ||
8 | #include <linux/slab.h> | |
d9f85a04 DH |
9 | #include <linux/mempool.h> |
10 | #include <linux/delay.h> | |
3a4a38e6 DH |
11 | #include "internal.h" |
12 | ||
20d72b00 DH |
13 | static void netfs_free_request(struct work_struct *work); |
14 | ||
3a4a38e6 DH |
15 | /* |
16 | * Allocate an I/O request and initialise it. | |
17 | */ | |
663dfb65 DH |
18 | struct netfs_io_request *netfs_alloc_request(struct address_space *mapping, |
19 | struct file *file, | |
663dfb65 DH |
20 | loff_t start, size_t len, |
21 | enum netfs_io_origin origin) | |
3a4a38e6 DH |
22 | { |
23 | static atomic_t debug_ids; | |
bc899ee1 | 24 | struct inode *inode = file ? file_inode(file) : mapping->host; |
874c8ca1 | 25 | struct netfs_inode *ctx = netfs_inode(inode); |
3a4a38e6 | 26 | struct netfs_io_request *rreq; |
d9f85a04 DH |
27 | mempool_t *mempool = ctx->ops->request_pool ?: &netfs_request_pool; |
28 | struct kmem_cache *cache = mempool->pool_data; | |
2de16041 | 29 | int ret; |
3a4a38e6 | 30 | |
d9f85a04 DH |
31 | for (;;) { |
32 | rreq = mempool_alloc(mempool, GFP_KERNEL); | |
33 | if (rreq) | |
34 | break; | |
35 | msleep(10); | |
36 | } | |
2de16041 | 37 | |
d9f85a04 | 38 | memset(rreq, 0, kmem_cache_size(cache)); |
20d72b00 | 39 | INIT_WORK(&rreq->cleanup_work, netfs_free_request); |
2de16041 DH |
40 | rreq->start = start; |
41 | rreq->len = len; | |
42 | rreq->origin = origin; | |
bc899ee1 | 43 | rreq->netfs_ops = ctx->ops; |
2de16041 | 44 | rreq->mapping = mapping; |
bc899ee1 DH |
45 | rreq->inode = inode; |
46 | rreq->i_size = i_size_read(inode); | |
2de16041 | 47 | rreq->debug_id = atomic_inc_return(&debug_ids); |
288ace2f | 48 | rreq->wsize = INT_MAX; |
ee4cdf7b DH |
49 | rreq->io_streams[0].sreq_max_len = ULONG_MAX; |
50 | rreq->io_streams[0].sreq_max_segs = 0; | |
288ace2f DH |
51 | spin_lock_init(&rreq->lock); |
52 | INIT_LIST_HEAD(&rreq->io_streams[0].subrequests); | |
53 | INIT_LIST_HEAD(&rreq->io_streams[1].subrequests); | |
e2d46f2e | 54 | init_waitqueue_head(&rreq->waitq); |
20d72b00 | 55 | refcount_set(&rreq->ref, 2); |
16af134c | 56 | |
24c90a79 DH |
57 | if (origin == NETFS_READAHEAD || |
58 | origin == NETFS_READPAGE || | |
ee4cdf7b | 59 | origin == NETFS_READ_GAPS || |
49866ce7 | 60 | origin == NETFS_READ_SINGLE || |
24c90a79 | 61 | origin == NETFS_READ_FOR_WRITE || |
db26d62d | 62 | origin == NETFS_UNBUFFERED_READ || |
e2d46f2e DH |
63 | origin == NETFS_DIO_READ) { |
64 | INIT_WORK(&rreq->work, netfs_read_collection_worker); | |
65 | rreq->io_streams[0].avail = true; | |
66 | } else { | |
24c90a79 | 67 | INIT_WORK(&rreq->work, netfs_write_collection_worker); |
e2d46f2e | 68 | } |
24c90a79 | 69 | |
2de16041 DH |
70 | __set_bit(NETFS_RREQ_IN_PROGRESS, &rreq->flags); |
71 | if (rreq->netfs_ops->init_request) { | |
72 | ret = rreq->netfs_ops->init_request(rreq, file); | |
73 | if (ret < 0) { | |
d9f85a04 | 74 | mempool_free(rreq, rreq->netfs_ops->request_pool ?: &netfs_request_pool); |
2de16041 DH |
75 | return ERR_PTR(ret); |
76 | } | |
3a4a38e6 DH |
77 | } |
78 | ||
f89ea63f | 79 | atomic_inc(&ctx->io_count); |
20d72b00 | 80 | trace_netfs_rreq_ref(rreq->debug_id, refcount_read(&rreq->ref), netfs_rreq_trace_new); |
87b57a04 | 81 | netfs_proc_add_rreq(rreq); |
2de16041 | 82 | netfs_stat(&netfs_n_rh_rreq); |
3a4a38e6 DH |
83 | return rreq; |
84 | } | |
85 | ||
de74023b | 86 | void netfs_get_request(struct netfs_io_request *rreq, enum netfs_rreq_ref_trace what) |
3a4a38e6 | 87 | { |
de74023b DH |
88 | int r; |
89 | ||
90 | __refcount_inc(&rreq->ref, &r); | |
91 | trace_netfs_rreq_ref(rreq->debug_id, r + 1, what); | |
3a4a38e6 DH |
92 | } |
93 | ||
20d72b00 | 94 | void netfs_clear_subrequests(struct netfs_io_request *rreq) |
3a4a38e6 DH |
95 | { |
96 | struct netfs_io_subrequest *subreq; | |
288ace2f DH |
97 | struct netfs_io_stream *stream; |
98 | int s; | |
3a4a38e6 | 99 | |
288ace2f DH |
100 | for (s = 0; s < ARRAY_SIZE(rreq->io_streams); s++) { |
101 | stream = &rreq->io_streams[s]; | |
102 | while (!list_empty(&stream->subrequests)) { | |
103 | subreq = list_first_entry(&stream->subrequests, | |
104 | struct netfs_io_subrequest, rreq_link); | |
105 | list_del(&subreq->rreq_link); | |
20d72b00 | 106 | netfs_put_subrequest(subreq, netfs_sreq_trace_put_clear); |
288ace2f DH |
107 | } |
108 | } | |
3a4a38e6 DH |
109 | } |
110 | ||
d9f85a04 DH |
111 | static void netfs_free_request_rcu(struct rcu_head *rcu) |
112 | { | |
113 | struct netfs_io_request *rreq = container_of(rcu, struct netfs_io_request, rcu); | |
114 | ||
115 | mempool_free(rreq, rreq->netfs_ops->request_pool ?: &netfs_request_pool); | |
116 | netfs_stat_d(&netfs_n_rh_rreq); | |
117 | } | |
118 | ||
3a4a38e6 DH |
119 | static void netfs_free_request(struct work_struct *work) |
120 | { | |
121 | struct netfs_io_request *rreq = | |
20d72b00 | 122 | container_of(work, struct netfs_io_request, cleanup_work); |
f89ea63f | 123 | struct netfs_inode *ictx = netfs_inode(rreq->inode); |
21d706d5 | 124 | unsigned int i; |
bc899ee1 | 125 | |
3a4a38e6 | 126 | trace_netfs_rreq(rreq, netfs_rreq_trace_free); |
20d72b00 DH |
127 | |
128 | /* Cancel/flush the result collection worker. That does not carry a | |
129 | * ref of its own, so we must wait for it somewhere. | |
130 | */ | |
131 | cancel_work_sync(&rreq->work); | |
132 | ||
87b57a04 | 133 | netfs_proc_del_rreq(rreq); |
20d72b00 | 134 | netfs_clear_subrequests(rreq); |
40a81101 DH |
135 | if (rreq->netfs_ops->free_request) |
136 | rreq->netfs_ops->free_request(rreq); | |
3a4a38e6 DH |
137 | if (rreq->cache_resources.ops) |
138 | rreq->cache_resources.ops->end_operation(&rreq->cache_resources); | |
21d706d5 DH |
139 | if (rreq->direct_bv) { |
140 | for (i = 0; i < rreq->direct_bv_count; i++) { | |
141 | if (rreq->direct_bv[i].bv_page) { | |
142 | if (rreq->direct_bv_unpin) | |
143 | unpin_user_page(rreq->direct_bv[i].bv_page); | |
144 | } | |
145 | } | |
146 | kvfree(rreq->direct_bv); | |
147 | } | |
06fa229c | 148 | rolling_buffer_clear(&rreq->buffer); |
f89ea63f DH |
149 | |
150 | if (atomic_dec_and_test(&ictx->io_count)) | |
151 | wake_up_var(&ictx->io_count); | |
d9f85a04 | 152 | call_rcu(&rreq->rcu, netfs_free_request_rcu); |
3a4a38e6 DH |
153 | } |
154 | ||
20d72b00 | 155 | void netfs_put_request(struct netfs_io_request *rreq, enum netfs_rreq_ref_trace what) |
3a4a38e6 | 156 | { |
6ba22d8d | 157 | unsigned int debug_id; |
de74023b DH |
158 | bool dead; |
159 | int r; | |
160 | ||
6ba22d8d DH |
161 | if (rreq) { |
162 | debug_id = rreq->debug_id; | |
163 | dead = __refcount_dec_and_test(&rreq->ref, &r); | |
164 | trace_netfs_rreq_ref(debug_id, r - 1, what); | |
20d72b00 DH |
165 | if (dead) |
166 | WARN_ON(!queue_work(system_unbound_wq, &rreq->cleanup_work)); | |
3a4a38e6 DH |
167 | } |
168 | } | |
169 | ||
170 | /* | |
171 | * Allocate and partially initialise an I/O request structure. | |
172 | */ | |
173 | struct netfs_io_subrequest *netfs_alloc_subrequest(struct netfs_io_request *rreq) | |
174 | { | |
175 | struct netfs_io_subrequest *subreq; | |
d9f85a04 DH |
176 | mempool_t *mempool = rreq->netfs_ops->subrequest_pool ?: &netfs_subrequest_pool; |
177 | struct kmem_cache *cache = mempool->pool_data; | |
178 | ||
179 | for (;;) { | |
180 | subreq = mempool_alloc(rreq->netfs_ops->subrequest_pool ?: &netfs_subrequest_pool, | |
181 | GFP_KERNEL); | |
182 | if (subreq) | |
183 | break; | |
184 | msleep(10); | |
3a4a38e6 DH |
185 | } |
186 | ||
d9f85a04 | 187 | memset(subreq, 0, kmem_cache_size(cache)); |
e2d46f2e | 188 | INIT_WORK(&subreq->work, NULL); |
d9f85a04 DH |
189 | INIT_LIST_HEAD(&subreq->rreq_link); |
190 | refcount_set(&subreq->ref, 2); | |
191 | subreq->rreq = rreq; | |
192 | subreq->debug_index = atomic_inc_return(&rreq->subreq_counter); | |
193 | netfs_get_request(rreq, netfs_rreq_trace_get_subreq); | |
194 | netfs_stat(&netfs_n_rh_sreq); | |
3a4a38e6 DH |
195 | return subreq; |
196 | } | |
197 | ||
6cd3d6fd DH |
198 | void netfs_get_subrequest(struct netfs_io_subrequest *subreq, |
199 | enum netfs_sreq_ref_trace what) | |
3a4a38e6 | 200 | { |
6cd3d6fd DH |
201 | int r; |
202 | ||
203 | __refcount_inc(&subreq->ref, &r); | |
204 | trace_netfs_sreq_ref(subreq->rreq->debug_id, subreq->debug_index, r + 1, | |
205 | what); | |
3a4a38e6 DH |
206 | } |
207 | ||
20d72b00 | 208 | static void netfs_free_subrequest(struct netfs_io_subrequest *subreq) |
3a4a38e6 DH |
209 | { |
210 | struct netfs_io_request *rreq = subreq->rreq; | |
211 | ||
212 | trace_netfs_sreq(subreq, netfs_sreq_trace_free); | |
5f5ce7ba DH |
213 | if (rreq->netfs_ops->free_subrequest) |
214 | rreq->netfs_ops->free_subrequest(subreq); | |
d9f85a04 | 215 | mempool_free(subreq, rreq->netfs_ops->subrequest_pool ?: &netfs_subrequest_pool); |
3a4a38e6 | 216 | netfs_stat_d(&netfs_n_rh_sreq); |
20d72b00 | 217 | netfs_put_request(rreq, netfs_rreq_trace_put_subreq); |
3a4a38e6 DH |
218 | } |
219 | ||
20d72b00 | 220 | void netfs_put_subrequest(struct netfs_io_subrequest *subreq, |
6cd3d6fd | 221 | enum netfs_sreq_ref_trace what) |
3a4a38e6 | 222 | { |
6cd3d6fd DH |
223 | unsigned int debug_index = subreq->debug_index; |
224 | unsigned int debug_id = subreq->rreq->debug_id; | |
225 | bool dead; | |
226 | int r; | |
227 | ||
228 | dead = __refcount_dec_and_test(&subreq->ref, &r); | |
229 | trace_netfs_sreq_ref(debug_id, debug_index, r - 1, what); | |
230 | if (dead) | |
20d72b00 | 231 | netfs_free_subrequest(subreq); |
3a4a38e6 | 232 | } |