]>
Commit | Line | Data |
---|---|---|
3a4a38e6 DH |
1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* Object lifetime handling and tracing. | |
3 | * | |
4 | * Copyright (C) 2022 Red Hat, Inc. All Rights Reserved. | |
5 | * Written by David Howells (dhowells@redhat.com) | |
6 | */ | |
7 | ||
8 | #include <linux/slab.h> | |
9 | #include "internal.h" | |
10 | ||
11 | /* | |
12 | * Allocate an I/O request and initialise it. | |
13 | */ | |
663dfb65 DH |
14 | struct netfs_io_request *netfs_alloc_request(struct address_space *mapping, |
15 | struct file *file, | |
663dfb65 DH |
16 | loff_t start, size_t len, |
17 | enum netfs_io_origin origin) | |
3a4a38e6 DH |
18 | { |
19 | static atomic_t debug_ids; | |
bc899ee1 | 20 | struct inode *inode = file ? file_inode(file) : mapping->host; |
874c8ca1 | 21 | struct netfs_inode *ctx = netfs_inode(inode); |
3a4a38e6 | 22 | struct netfs_io_request *rreq; |
153a9961 DH |
23 | bool is_unbuffered = (origin == NETFS_UNBUFFERED_WRITE || |
24 | origin == NETFS_DIO_READ || | |
25 | origin == NETFS_DIO_WRITE); | |
26 | bool cached = !is_unbuffered && netfs_is_cache_enabled(ctx); | |
2de16041 | 27 | int ret; |
3a4a38e6 | 28 | |
cc3cb0a1 DH |
29 | rreq = kzalloc(ctx->ops->io_request_size ?: sizeof(struct netfs_io_request), |
30 | GFP_KERNEL); | |
2de16041 DH |
31 | if (!rreq) |
32 | return ERR_PTR(-ENOMEM); | |
33 | ||
34 | rreq->start = start; | |
35 | rreq->len = len; | |
36 | rreq->origin = origin; | |
bc899ee1 | 37 | rreq->netfs_ops = ctx->ops; |
2de16041 | 38 | rreq->mapping = mapping; |
bc899ee1 DH |
39 | rreq->inode = inode; |
40 | rreq->i_size = i_size_read(inode); | |
2de16041 DH |
41 | rreq->debug_id = atomic_inc_return(&debug_ids); |
42 | INIT_LIST_HEAD(&rreq->subrequests); | |
2de16041 | 43 | refcount_set(&rreq->ref, 1); |
16af134c | 44 | |
2de16041 | 45 | __set_bit(NETFS_RREQ_IN_PROGRESS, &rreq->flags); |
16af134c DH |
46 | if (cached) |
47 | __set_bit(NETFS_RREQ_WRITE_TO_CACHE, &rreq->flags); | |
016dc851 DH |
48 | if (file && file->f_flags & O_NONBLOCK) |
49 | __set_bit(NETFS_RREQ_NONBLOCK, &rreq->flags); | |
2de16041 DH |
50 | if (rreq->netfs_ops->init_request) { |
51 | ret = rreq->netfs_ops->init_request(rreq, file); | |
52 | if (ret < 0) { | |
53 | kfree(rreq); | |
54 | return ERR_PTR(ret); | |
55 | } | |
3a4a38e6 DH |
56 | } |
57 | ||
16af134c | 58 | trace_netfs_rreq_ref(rreq->debug_id, 1, netfs_rreq_trace_new); |
87b57a04 | 59 | netfs_proc_add_rreq(rreq); |
2de16041 | 60 | netfs_stat(&netfs_n_rh_rreq); |
3a4a38e6 DH |
61 | return rreq; |
62 | } | |
63 | ||
de74023b | 64 | void netfs_get_request(struct netfs_io_request *rreq, enum netfs_rreq_ref_trace what) |
3a4a38e6 | 65 | { |
de74023b DH |
66 | int r; |
67 | ||
68 | __refcount_inc(&rreq->ref, &r); | |
69 | trace_netfs_rreq_ref(rreq->debug_id, r + 1, what); | |
3a4a38e6 DH |
70 | } |
71 | ||
72 | void netfs_clear_subrequests(struct netfs_io_request *rreq, bool was_async) | |
73 | { | |
74 | struct netfs_io_subrequest *subreq; | |
75 | ||
76 | while (!list_empty(&rreq->subrequests)) { | |
77 | subreq = list_first_entry(&rreq->subrequests, | |
78 | struct netfs_io_subrequest, rreq_link); | |
79 | list_del(&subreq->rreq_link); | |
6cd3d6fd DH |
80 | netfs_put_subrequest(subreq, was_async, |
81 | netfs_sreq_trace_put_clear); | |
3a4a38e6 DH |
82 | } |
83 | } | |
84 | ||
85 | static void netfs_free_request(struct work_struct *work) | |
86 | { | |
87 | struct netfs_io_request *rreq = | |
88 | container_of(work, struct netfs_io_request, work); | |
21d706d5 | 89 | unsigned int i; |
bc899ee1 | 90 | |
3a4a38e6 | 91 | trace_netfs_rreq(rreq, netfs_rreq_trace_free); |
87b57a04 | 92 | netfs_proc_del_rreq(rreq); |
40a81101 DH |
93 | netfs_clear_subrequests(rreq, false); |
94 | if (rreq->netfs_ops->free_request) | |
95 | rreq->netfs_ops->free_request(rreq); | |
3a4a38e6 DH |
96 | if (rreq->cache_resources.ops) |
97 | rreq->cache_resources.ops->end_operation(&rreq->cache_resources); | |
21d706d5 DH |
98 | if (rreq->direct_bv) { |
99 | for (i = 0; i < rreq->direct_bv_count; i++) { | |
100 | if (rreq->direct_bv[i].bv_page) { | |
101 | if (rreq->direct_bv_unpin) | |
102 | unpin_user_page(rreq->direct_bv[i].bv_page); | |
103 | } | |
104 | } | |
105 | kvfree(rreq->direct_bv); | |
106 | } | |
87b57a04 | 107 | kfree_rcu(rreq, rcu); |
3a4a38e6 DH |
108 | netfs_stat_d(&netfs_n_rh_rreq); |
109 | } | |
110 | ||
de74023b DH |
111 | void netfs_put_request(struct netfs_io_request *rreq, bool was_async, |
112 | enum netfs_rreq_ref_trace what) | |
3a4a38e6 | 113 | { |
6ba22d8d | 114 | unsigned int debug_id; |
de74023b DH |
115 | bool dead; |
116 | int r; | |
117 | ||
6ba22d8d DH |
118 | if (rreq) { |
119 | debug_id = rreq->debug_id; | |
120 | dead = __refcount_dec_and_test(&rreq->ref, &r); | |
121 | trace_netfs_rreq_ref(debug_id, r - 1, what); | |
122 | if (dead) { | |
123 | if (was_async) { | |
124 | rreq->work.func = netfs_free_request; | |
125 | if (!queue_work(system_unbound_wq, &rreq->work)) | |
126 | BUG(); | |
127 | } else { | |
128 | netfs_free_request(&rreq->work); | |
129 | } | |
3a4a38e6 DH |
130 | } |
131 | } | |
132 | } | |
133 | ||
134 | /* | |
135 | * Allocate and partially initialise an I/O request structure. | |
136 | */ | |
137 | struct netfs_io_subrequest *netfs_alloc_subrequest(struct netfs_io_request *rreq) | |
138 | { | |
139 | struct netfs_io_subrequest *subreq; | |
140 | ||
cc3cb0a1 DH |
141 | subreq = kzalloc(rreq->netfs_ops->io_subrequest_size ?: |
142 | sizeof(struct netfs_io_subrequest), | |
143 | GFP_KERNEL); | |
3a4a38e6 | 144 | if (subreq) { |
16af134c | 145 | INIT_WORK(&subreq->work, NULL); |
3a4a38e6 | 146 | INIT_LIST_HEAD(&subreq->rreq_link); |
6cd3d6fd | 147 | refcount_set(&subreq->ref, 2); |
3a4a38e6 | 148 | subreq->rreq = rreq; |
de74023b | 149 | netfs_get_request(rreq, netfs_rreq_trace_get_subreq); |
3a4a38e6 DH |
150 | netfs_stat(&netfs_n_rh_sreq); |
151 | } | |
152 | ||
153 | return subreq; | |
154 | } | |
155 | ||
6cd3d6fd DH |
156 | void netfs_get_subrequest(struct netfs_io_subrequest *subreq, |
157 | enum netfs_sreq_ref_trace what) | |
3a4a38e6 | 158 | { |
6cd3d6fd DH |
159 | int r; |
160 | ||
161 | __refcount_inc(&subreq->ref, &r); | |
162 | trace_netfs_sreq_ref(subreq->rreq->debug_id, subreq->debug_index, r + 1, | |
163 | what); | |
3a4a38e6 DH |
164 | } |
165 | ||
6cd3d6fd DH |
166 | static void netfs_free_subrequest(struct netfs_io_subrequest *subreq, |
167 | bool was_async) | |
3a4a38e6 DH |
168 | { |
169 | struct netfs_io_request *rreq = subreq->rreq; | |
170 | ||
171 | trace_netfs_sreq(subreq, netfs_sreq_trace_free); | |
5f5ce7ba DH |
172 | if (rreq->netfs_ops->free_subrequest) |
173 | rreq->netfs_ops->free_subrequest(subreq); | |
3a4a38e6 DH |
174 | kfree(subreq); |
175 | netfs_stat_d(&netfs_n_rh_sreq); | |
de74023b | 176 | netfs_put_request(rreq, was_async, netfs_rreq_trace_put_subreq); |
3a4a38e6 DH |
177 | } |
178 | ||
6cd3d6fd DH |
179 | void netfs_put_subrequest(struct netfs_io_subrequest *subreq, bool was_async, |
180 | enum netfs_sreq_ref_trace what) | |
3a4a38e6 | 181 | { |
6cd3d6fd DH |
182 | unsigned int debug_index = subreq->debug_index; |
183 | unsigned int debug_id = subreq->rreq->debug_id; | |
184 | bool dead; | |
185 | int r; | |
186 | ||
187 | dead = __refcount_dec_and_test(&subreq->ref, &r); | |
188 | trace_netfs_sreq_ref(debug_id, debug_index, r - 1, what); | |
189 | if (dead) | |
190 | netfs_free_subrequest(subreq, was_async); | |
3a4a38e6 | 191 | } |