]>
Commit | Line | Data |
---|---|---|
ca4b2a01 MB |
1 | // SPDX-License-Identifier: GPL-2.0 |
2 | #include <linux/vmalloc.h> | |
3 | #include "null_blk.h" | |
4 | ||
766c3297 CK |
5 | #define CREATE_TRACE_POINTS |
6 | #include "null_blk_trace.h" | |
7 | ||
ca4b2a01 MB |
8 | /* zone_size in MBs to sectors. */ |
9 | #define ZONE_SIZE_SHIFT 11 | |
10 | ||
11 | static inline unsigned int null_zone_no(struct nullb_device *dev, sector_t sect) | |
12 | { | |
13 | return sect >> ilog2(dev->zone_size_sects); | |
14 | } | |
15 | ||
d205bde7 | 16 | int null_init_zoned_dev(struct nullb_device *dev, struct request_queue *q) |
ca4b2a01 MB |
17 | { |
18 | sector_t dev_size = (sector_t)dev->size * 1024 * 1024; | |
19 | sector_t sector = 0; | |
20 | unsigned int i; | |
21 | ||
22 | if (!is_power_of_2(dev->zone_size)) { | |
9c7eddf1 | 23 | pr_err("zone_size must be power-of-two\n"); |
ca4b2a01 MB |
24 | return -EINVAL; |
25 | } | |
e2748325 CK |
26 | if (dev->zone_size > dev->size) { |
27 | pr_err("Zone size larger than device capacity\n"); | |
28 | return -EINVAL; | |
29 | } | |
ca4b2a01 MB |
30 | |
31 | dev->zone_size_sects = dev->zone_size << ZONE_SIZE_SHIFT; | |
32 | dev->nr_zones = dev_size >> | |
33 | (SECTOR_SHIFT + ilog2(dev->zone_size_sects)); | |
34 | dev->zones = kvmalloc_array(dev->nr_zones, sizeof(struct blk_zone), | |
35 | GFP_KERNEL | __GFP_ZERO); | |
36 | if (!dev->zones) | |
37 | return -ENOMEM; | |
38 | ||
ea2c18e1 MS |
39 | if (dev->zone_nr_conv >= dev->nr_zones) { |
40 | dev->zone_nr_conv = dev->nr_zones - 1; | |
9c7eddf1 | 41 | pr_info("changed the number of conventional zones to %u", |
ea2c18e1 MS |
42 | dev->zone_nr_conv); |
43 | } | |
44 | ||
45 | for (i = 0; i < dev->zone_nr_conv; i++) { | |
46 | struct blk_zone *zone = &dev->zones[i]; | |
47 | ||
48 | zone->start = sector; | |
49 | zone->len = dev->zone_size_sects; | |
50 | zone->wp = zone->start + zone->len; | |
51 | zone->type = BLK_ZONE_TYPE_CONVENTIONAL; | |
52 | zone->cond = BLK_ZONE_COND_NOT_WP; | |
53 | ||
54 | sector += dev->zone_size_sects; | |
55 | } | |
56 | ||
57 | for (i = dev->zone_nr_conv; i < dev->nr_zones; i++) { | |
ca4b2a01 MB |
58 | struct blk_zone *zone = &dev->zones[i]; |
59 | ||
60 | zone->start = zone->wp = sector; | |
61 | zone->len = dev->zone_size_sects; | |
62 | zone->type = BLK_ZONE_TYPE_SEQWRITE_REQ; | |
63 | zone->cond = BLK_ZONE_COND_EMPTY; | |
64 | ||
65 | sector += dev->zone_size_sects; | |
66 | } | |
67 | ||
d205bde7 DLM |
68 | q->limits.zoned = BLK_ZONED_HM; |
69 | blk_queue_flag_set(QUEUE_FLAG_ZONE_RESETALL, q); | |
70 | blk_queue_required_elevator_features(q, ELEVATOR_F_ZBD_SEQ_WRITE); | |
71 | ||
72 | return 0; | |
73 | } | |
74 | ||
75 | int null_register_zoned_dev(struct nullb *nullb) | |
76 | { | |
77 | struct request_queue *q = nullb->q; | |
78 | ||
79 | if (queue_is_mq(q)) | |
80 | return blk_revalidate_disk_zones(nullb->disk); | |
81 | ||
82 | blk_queue_chunk_sectors(q, nullb->dev->zone_size_sects); | |
83 | q->nr_zones = blkdev_nr_zones(nullb->disk); | |
84 | ||
ca4b2a01 MB |
85 | return 0; |
86 | } | |
87 | ||
d205bde7 | 88 | void null_free_zoned_dev(struct nullb_device *dev) |
ca4b2a01 MB |
89 | { |
90 | kvfree(dev->zones); | |
91 | } | |
92 | ||
7fc8fb51 | 93 | int null_report_zones(struct gendisk *disk, sector_t sector, |
d4100351 | 94 | unsigned int nr_zones, report_zones_cb cb, void *data) |
ca4b2a01 | 95 | { |
e76239a3 CH |
96 | struct nullb *nullb = disk->private_data; |
97 | struct nullb_device *dev = nullb->dev; | |
d4100351 CH |
98 | unsigned int first_zone, i; |
99 | struct blk_zone zone; | |
100 | int error; | |
ca4b2a01 | 101 | |
d4100351 CH |
102 | first_zone = null_zone_no(dev, sector); |
103 | if (first_zone >= dev->nr_zones) | |
104 | return 0; | |
ca4b2a01 | 105 | |
d4100351 | 106 | nr_zones = min(nr_zones, dev->nr_zones - first_zone); |
766c3297 CK |
107 | trace_nullb_report_zones(nullb, nr_zones); |
108 | ||
d4100351 CH |
109 | for (i = 0; i < nr_zones; i++) { |
110 | /* | |
111 | * Stacked DM target drivers will remap the zone information by | |
112 | * modifying the zone information passed to the report callback. | |
113 | * So use a local copy to avoid corruption of the device zone | |
114 | * array. | |
115 | */ | |
116 | memcpy(&zone, &dev->zones[first_zone + i], | |
117 | sizeof(struct blk_zone)); | |
118 | error = cb(&zone, i, data); | |
119 | if (error) | |
120 | return error; | |
121 | } | |
ca4b2a01 | 122 | |
d4100351 | 123 | return nr_zones; |
ca4b2a01 MB |
124 | } |
125 | ||
dd85b492 AJ |
126 | size_t null_zone_valid_read_len(struct nullb *nullb, |
127 | sector_t sector, unsigned int len) | |
128 | { | |
129 | struct nullb_device *dev = nullb->dev; | |
130 | struct blk_zone *zone = &dev->zones[null_zone_no(dev, sector)]; | |
131 | unsigned int nr_sectors = len >> SECTOR_SHIFT; | |
132 | ||
133 | /* Read must be below the write pointer position */ | |
134 | if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL || | |
135 | sector + nr_sectors <= zone->wp) | |
136 | return len; | |
137 | ||
138 | if (sector > zone->wp) | |
139 | return 0; | |
140 | ||
141 | return (zone->wp - sector) << SECTOR_SHIFT; | |
142 | } | |
143 | ||
fceb5d1b | 144 | static blk_status_t null_zone_write(struct nullb_cmd *cmd, sector_t sector, |
b228ba1c | 145 | unsigned int nr_sectors) |
ca4b2a01 MB |
146 | { |
147 | struct nullb_device *dev = cmd->nq->dev; | |
ca4b2a01 MB |
148 | unsigned int zno = null_zone_no(dev, sector); |
149 | struct blk_zone *zone = &dev->zones[zno]; | |
9dd44c7e DLM |
150 | blk_status_t ret; |
151 | ||
152 | trace_nullb_zone_op(cmd, zno, zone->cond); | |
153 | ||
154 | if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL) | |
155 | return null_process_cmd(cmd, REQ_OP_WRITE, sector, nr_sectors); | |
ca4b2a01 MB |
156 | |
157 | switch (zone->cond) { | |
158 | case BLK_ZONE_COND_FULL: | |
159 | /* Cannot write to a full zone */ | |
fceb5d1b | 160 | return BLK_STS_IOERR; |
ca4b2a01 MB |
161 | case BLK_ZONE_COND_EMPTY: |
162 | case BLK_ZONE_COND_IMP_OPEN: | |
16c731fe DLM |
163 | case BLK_ZONE_COND_EXP_OPEN: |
164 | case BLK_ZONE_COND_CLOSED: | |
ca4b2a01 | 165 | /* Writes must be at the write pointer position */ |
fceb5d1b CK |
166 | if (sector != zone->wp) |
167 | return BLK_STS_IOERR; | |
ca4b2a01 | 168 | |
16c731fe | 169 | if (zone->cond != BLK_ZONE_COND_EXP_OPEN) |
ca4b2a01 MB |
170 | zone->cond = BLK_ZONE_COND_IMP_OPEN; |
171 | ||
9dd44c7e DLM |
172 | ret = null_process_cmd(cmd, REQ_OP_WRITE, sector, nr_sectors); |
173 | if (ret != BLK_STS_OK) | |
174 | return ret; | |
175 | ||
b228ba1c | 176 | zone->wp += nr_sectors; |
ca4b2a01 MB |
177 | if (zone->wp == zone->start + zone->len) |
178 | zone->cond = BLK_ZONE_COND_FULL; | |
9dd44c7e | 179 | return BLK_STS_OK; |
ca4b2a01 MB |
180 | default: |
181 | /* Invalid zone condition */ | |
fceb5d1b | 182 | return BLK_STS_IOERR; |
ca4b2a01 MB |
183 | } |
184 | } | |
185 | ||
da644b2c AJ |
186 | static blk_status_t null_zone_mgmt(struct nullb_cmd *cmd, enum req_opf op, |
187 | sector_t sector) | |
ca4b2a01 MB |
188 | { |
189 | struct nullb_device *dev = cmd->nq->dev; | |
766c3297 CK |
190 | unsigned int zone_no = null_zone_no(dev, sector); |
191 | struct blk_zone *zone = &dev->zones[zone_no]; | |
a61dbfb1 CK |
192 | size_t i; |
193 | ||
da644b2c | 194 | switch (op) { |
a61dbfb1 CK |
195 | case REQ_OP_ZONE_RESET_ALL: |
196 | for (i = 0; i < dev->nr_zones; i++) { | |
197 | if (zone[i].type == BLK_ZONE_TYPE_CONVENTIONAL) | |
198 | continue; | |
199 | zone[i].cond = BLK_ZONE_COND_EMPTY; | |
200 | zone[i].wp = zone[i].start; | |
201 | } | |
202 | break; | |
203 | case REQ_OP_ZONE_RESET: | |
fceb5d1b CK |
204 | if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL) |
205 | return BLK_STS_IOERR; | |
ca4b2a01 | 206 | |
a61dbfb1 CK |
207 | zone->cond = BLK_ZONE_COND_EMPTY; |
208 | zone->wp = zone->start; | |
209 | break; | |
da644b2c AJ |
210 | case REQ_OP_ZONE_OPEN: |
211 | if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL) | |
212 | return BLK_STS_IOERR; | |
213 | if (zone->cond == BLK_ZONE_COND_FULL) | |
214 | return BLK_STS_IOERR; | |
215 | ||
216 | zone->cond = BLK_ZONE_COND_EXP_OPEN; | |
217 | break; | |
218 | case REQ_OP_ZONE_CLOSE: | |
219 | if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL) | |
220 | return BLK_STS_IOERR; | |
221 | if (zone->cond == BLK_ZONE_COND_FULL) | |
222 | return BLK_STS_IOERR; | |
223 | ||
c7d776f8 DLM |
224 | if (zone->wp == zone->start) |
225 | zone->cond = BLK_ZONE_COND_EMPTY; | |
226 | else | |
227 | zone->cond = BLK_ZONE_COND_CLOSED; | |
da644b2c AJ |
228 | break; |
229 | case REQ_OP_ZONE_FINISH: | |
230 | if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL) | |
231 | return BLK_STS_IOERR; | |
232 | ||
233 | zone->cond = BLK_ZONE_COND_FULL; | |
234 | zone->wp = zone->start + zone->len; | |
235 | break; | |
a61dbfb1 | 236 | default: |
79a85e21 | 237 | return BLK_STS_NOTSUPP; |
ea2c18e1 | 238 | } |
766c3297 CK |
239 | |
240 | trace_nullb_zone_op(cmd, zone_no, zone->cond); | |
fceb5d1b CK |
241 | return BLK_STS_OK; |
242 | } | |
243 | ||
9dd44c7e DLM |
244 | blk_status_t null_process_zoned_cmd(struct nullb_cmd *cmd, enum req_opf op, |
245 | sector_t sector, sector_t nr_sectors) | |
fceb5d1b CK |
246 | { |
247 | switch (op) { | |
248 | case REQ_OP_WRITE: | |
249 | return null_zone_write(cmd, sector, nr_sectors); | |
250 | case REQ_OP_ZONE_RESET: | |
251 | case REQ_OP_ZONE_RESET_ALL: | |
da644b2c AJ |
252 | case REQ_OP_ZONE_OPEN: |
253 | case REQ_OP_ZONE_CLOSE: | |
254 | case REQ_OP_ZONE_FINISH: | |
255 | return null_zone_mgmt(cmd, op, sector); | |
fceb5d1b | 256 | default: |
9dd44c7e | 257 | return null_process_cmd(cmd, op, sector, nr_sectors); |
fceb5d1b | 258 | } |
ca4b2a01 | 259 | } |