]> git.ipfire.org Git - thirdparty/kernel/linux.git/blame - drivers/lightnvm/pblk.h
lightnvm: pblk: remove internal IO timeout
[thirdparty/kernel/linux.git] / drivers / lightnvm / pblk.h
CommitLineData
02a1520d 1/* SPDX-License-Identifier: GPL-2.0 */
a4bd217b
JG
2/*
3 * Copyright (C) 2015 IT University of Copenhagen (rrpc.h)
4 * Copyright (C) 2016 CNEX Labs
5 * Initial release: Matias Bjorling <matias@cnexlabs.com>
6 * Write buffering: Javier Gonzalez <javier@cnexlabs.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version
10 * 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 *
17 * Implementation of a Physical Block-device target for Open-channel SSDs.
18 *
19 */
20
21#ifndef PBLK_H_
22#define PBLK_H_
23
24#include <linux/blkdev.h>
25#include <linux/blk-mq.h>
26#include <linux/bio.h>
27#include <linux/module.h>
28#include <linux/kthread.h>
29#include <linux/vmalloc.h>
30#include <linux/crc32.h>
31#include <linux/uuid.h>
32
33#include <linux/lightnvm.h>
34
35/* Run only GC if less than 1/X blocks are free */
36#define GC_LIMIT_INVERSE 5
37#define GC_TIME_MSECS 1000
38
39#define PBLK_SECTOR (512)
40#define PBLK_EXPOSED_PAGE_SIZE (4096)
a4bd217b 41
ef576494
JG
42#define PBLK_NR_CLOSE_JOBS (4)
43
a4bd217b
JG
44#define PBLK_CACHE_NAME_LEN (DISK_NAME_LEN + 16)
45
a4bd217b
JG
46/* Max 512 LUNs per device */
47#define PBLK_MAX_LUNS_BITMAP (4)
48
49#define NR_PHY_IN_LOG (PBLK_EXPOSED_PAGE_SIZE / PBLK_SECTOR)
50
0d880398 51/* Static pool sizes */
b84ae4a8
JG
52#define PBLK_GEN_WS_POOL_SIZE (2)
53
e5392739
JG
54#define PBLK_DEFAULT_OP (11)
55
e2cddf20
JG
56enum {
57 PBLK_READ = READ,
58 PBLK_WRITE = WRITE,/* Write from write buffer */
59 PBLK_WRITE_INT, /* Internal write - no write buffer */
8f554597 60 PBLK_READ_RECOV, /* Recovery read - errors allowed */
e2cddf20
JG
61 PBLK_ERASE,
62};
63
a4bd217b
JG
64enum {
65 /* IO Types */
66 PBLK_IOTYPE_USER = 1 << 0,
67 PBLK_IOTYPE_GC = 1 << 1,
68
69 /* Write buffer flags */
70 PBLK_FLUSH_ENTRY = 1 << 2,
71 PBLK_WRITTEN_DATA = 1 << 3,
72 PBLK_SUBMITTED_ENTRY = 1 << 4,
73 PBLK_WRITABLE_ENTRY = 1 << 5,
74};
75
76enum {
77 PBLK_BLK_ST_OPEN = 0x1,
78 PBLK_BLK_ST_CLOSED = 0x2,
79};
80
4209c31c
HH
81enum {
82 PBLK_CHUNK_RESET_START,
83 PBLK_CHUNK_RESET_DONE,
84 PBLK_CHUNK_RESET_FAILED,
85};
86
b20ba1bc
JG
87struct pblk_sec_meta {
88 u64 reserved;
89 __le64 lba;
90};
91
a4bd217b
JG
92/* The number of GC lists and the rate-limiter states go together. This way the
93 * rate-limiter can dictate how much GC is needed based on resource utilization.
94 */
48b8d208 95#define PBLK_GC_NR_LISTS 4
a4bd217b
JG
96
97enum {
48b8d208
HH
98 PBLK_RL_OFF = 0,
99 PBLK_RL_WERR = 1,
100 PBLK_RL_HIGH = 2,
101 PBLK_RL_MID = 3,
102 PBLK_RL_LOW = 4
a4bd217b
JG
103};
104
afdc23c9 105#define pblk_dma_ppa_size (sizeof(u64) * NVM_MAX_VLBA)
a4bd217b 106
084ec9ba 107/* write buffer completion context */
a4bd217b
JG
108struct pblk_c_ctx {
109 struct list_head list; /* Head for out-of-order completion */
110
111 unsigned long *lun_bitmap; /* Luns used on current request */
112 unsigned int sentry;
113 unsigned int nr_valid;
114 unsigned int nr_padded;
115};
116
a4809fee 117/* read context */
084ec9ba
JG
118struct pblk_g_ctx {
119 void *private;
998ba629 120 unsigned long start_time;
a4809fee 121 u64 lba;
a4bd217b
JG
122};
123
11f6ad69
HL
124/* partial read context */
125struct pblk_pr_ctx {
126 struct bio *orig_bio;
127 DECLARE_BITMAP(bitmap, NVM_MAX_VLBA);
128 unsigned int orig_nr_secs;
129 unsigned int bio_init_idx;
130 void *ppa_ptr;
131 dma_addr_t dma_ppa_list;
e74ecf63
AS
132 u64 lba_list_mem[NVM_MAX_VLBA];
133 u64 lba_list_media[NVM_MAX_VLBA];
11f6ad69
HL
134};
135
ee8d5c1a
JG
136/* Pad context */
137struct pblk_pad_rq {
138 struct pblk *pblk;
139 struct completion wait;
140 struct kref ref;
141};
142
a4bd217b
JG
143/* Recovery context */
144struct pblk_rec_ctx {
145 struct pblk *pblk;
146 struct nvm_rq *rqd;
a4bd217b
JG
147 struct work_struct ws_rec;
148};
149
150/* Write context */
151struct pblk_w_ctx {
152 struct bio_list bios; /* Original bios - used for completion
153 * in REQ_FUA, REQ_FLUSH case
154 */
ef697902 155 u64 lba; /* Logic addr. associated with entry */
a4bd217b
JG
156 struct ppa_addr ppa; /* Physic addr. associated with entry */
157 int flags; /* Write context flags */
158};
159
160struct pblk_rb_entry {
161 struct ppa_addr cacheline; /* Cacheline for this entry */
162 void *data; /* Pointer to data on this entry */
163 struct pblk_w_ctx w_ctx; /* Context for this entry */
164 struct list_head index; /* List head to enable indexes */
165};
166
167#define EMPTY_ENTRY (~0U)
168
169struct pblk_rb_pages {
170 struct page *pages;
171 int order;
172 struct list_head list;
173};
174
175struct pblk_rb {
176 struct pblk_rb_entry *entries; /* Ring buffer entries */
177 unsigned int mem; /* Write offset - points to next
178 * writable entry in memory
179 */
180 unsigned int subm; /* Read offset - points to last entry
181 * that has been submitted to the media
182 * to be persisted
183 */
184 unsigned int sync; /* Synced - backpointer that signals
185 * the last submitted entry that has
186 * been successfully persisted to media
187 */
8154d296 188 unsigned int flush_point; /* Sync point - last entry that must be
a4bd217b
JG
189 * flushed to the media. Used with
190 * REQ_FLUSH and REQ_FUA
191 */
192 unsigned int l2p_update; /* l2p update point - next entry for
193 * which l2p mapping will be updated to
194 * contain a device ppa address (instead
195 * of a cacheline
196 */
197 unsigned int nr_entries; /* Number of entries in write buffer -
198 * must be a power of two
199 */
200 unsigned int seg_size; /* Size of the data segments being
201 * stored on each entry. Typically this
202 * will be 4KB
203 */
204
766c8ceb
JG
205 unsigned int back_thres; /* Threshold that shall be maintained by
206 * the backpointer in order to respect
207 * geo->mw_cunits on a per chunk basis
208 */
209
a4bd217b
JG
210 struct list_head pages; /* List of data pages */
211
212 spinlock_t w_lock; /* Write lock */
213 spinlock_t s_lock; /* Sync lock */
214
880eda54 215#ifdef CONFIG_NVM_PBLK_DEBUG
8154d296 216 atomic_t inflight_flush_point; /* Not served REQ_FLUSH | REQ_FUA */
a4bd217b
JG
217#endif
218};
219
220#define PBLK_RECOVERY_SECTORS 16
221
222struct pblk_lun {
223 struct ppa_addr bppa;
a4bd217b
JG
224 struct semaphore wr_sem;
225};
226
227struct pblk_gc_rq {
228 struct pblk_line *line;
229 void *data;
afdc23c9
MB
230 u64 paddr_list[NVM_MAX_VLBA];
231 u64 lba_list[NVM_MAX_VLBA];
a4bd217b
JG
232 int nr_secs;
233 int secs_to_gc;
234 struct list_head list;
235};
236
237struct pblk_gc {
b20ba1bc
JG
238 /* These states are not protected by a lock since (i) they are in the
239 * fast path, and (ii) they are not critical.
240 */
a4bd217b
JG
241 int gc_active;
242 int gc_enabled;
243 int gc_forced;
a4bd217b
JG
244
245 struct task_struct *gc_ts;
246 struct task_struct *gc_writer_ts;
b20ba1bc
JG
247 struct task_struct *gc_reader_ts;
248
249 struct workqueue_struct *gc_line_reader_wq;
a4bd217b 250 struct workqueue_struct *gc_reader_wq;
b20ba1bc 251
a4bd217b
JG
252 struct timer_list gc_timer;
253
b20ba1bc 254 struct semaphore gc_sem;
d6b992f7
HH
255 atomic_t read_inflight_gc; /* Number of lines with inflight GC reads */
256 atomic_t pipeline_gc; /* Number of lines in the GC pipeline -
257 * started reads to finished writes
258 */
a4bd217b 259 int w_entries;
b20ba1bc 260
a4bd217b 261 struct list_head w_list;
b20ba1bc 262 struct list_head r_list;
a4bd217b
JG
263
264 spinlock_t lock;
265 spinlock_t w_lock;
b20ba1bc 266 spinlock_t r_lock;
a4bd217b
JG
267};
268
269struct pblk_rl {
270 unsigned int high; /* Upper threshold for rate limiter (free run -
271 * user I/O rate limiter
272 */
a4bd217b
JG
273 unsigned int high_pw; /* High rounded up as a power of 2 */
274
b20ba1bc
JG
275#define PBLK_USER_HIGH_THRS 8 /* Begin write limit at 12% available blks */
276#define PBLK_USER_LOW_THRS 10 /* Aggressive GC at 10% available blocks */
a4bd217b
JG
277
278 int rb_windows_pw; /* Number of rate windows in the write buffer
279 * given as a power-of-2. This guarantees that
280 * when user I/O is being rate limited, there
281 * will be reserved enough space for the GC to
282 * place its payload. A window is of
283 * pblk->max_write_pgs size, which in NVMe is
284 * 64, i.e., 256kb.
285 */
286 int rb_budget; /* Total number of entries available for I/O */
287 int rb_user_max; /* Max buffer entries available for user I/O */
a4bd217b
JG
288 int rb_gc_max; /* Max buffer entries available for GC I/O */
289 int rb_gc_rsv; /* Reserved buffer entries for GC I/O */
290 int rb_state; /* Rate-limiter current state */
da67e68f 291 int rb_max_io; /* Maximum size for an I/O giving the config */
588726d3
JG
292
293 atomic_t rb_user_cnt; /* User I/O buffer counter */
a4bd217b 294 atomic_t rb_gc_cnt; /* GC I/O buffer counter */
588726d3 295 atomic_t rb_space; /* Space limit in case of reaching capacity */
a4bd217b 296
b20ba1bc
JG
297 int rsv_blocks; /* Reserved blocks for GC */
298
a4bd217b 299 int rb_user_active;
b20ba1bc
JG
300 int rb_gc_active;
301
48b8d208
HH
302 atomic_t werr_lines; /* Number of write error lines that needs gc */
303
a4bd217b
JG
304 struct timer_list u_timer;
305
a4bd217b 306 unsigned long total_blocks;
a7689938
JG
307
308 atomic_t free_blocks; /* Total number of free blocks (+ OP) */
309 atomic_t free_user_blocks; /* Number of user free blocks (no OP) */
a4bd217b
JG
310};
311
a4bd217b
JG
312#define PBLK_LINE_EMPTY (~0U)
313
314enum {
315 /* Line Types */
316 PBLK_LINETYPE_FREE = 0,
317 PBLK_LINETYPE_LOG = 1,
318 PBLK_LINETYPE_DATA = 2,
319
320 /* Line state */
32ef9412 321 PBLK_LINESTATE_NEW = 9,
a4bd217b
JG
322 PBLK_LINESTATE_FREE = 10,
323 PBLK_LINESTATE_OPEN = 11,
324 PBLK_LINESTATE_CLOSED = 12,
325 PBLK_LINESTATE_GC = 13,
326 PBLK_LINESTATE_BAD = 14,
327 PBLK_LINESTATE_CORRUPT = 15,
328
329 /* GC group */
330 PBLK_LINEGC_NONE = 20,
331 PBLK_LINEGC_EMPTY = 21,
332 PBLK_LINEGC_LOW = 22,
333 PBLK_LINEGC_MID = 23,
334 PBLK_LINEGC_HIGH = 24,
335 PBLK_LINEGC_FULL = 25,
48b8d208 336 PBLK_LINEGC_WERR = 26
a4bd217b
JG
337};
338
339#define PBLK_MAGIC 0x70626c6b /*pblk*/
d0ab0b1a
HH
340
341/* emeta/smeta persistent storage format versions:
342 * Changes in major version requires offline migration.
343 * Changes in minor version are handled automatically during
344 * recovery.
345 */
346
347#define SMETA_VERSION_MAJOR (0)
348#define SMETA_VERSION_MINOR (1)
349
350#define EMETA_VERSION_MAJOR (0)
76758390 351#define EMETA_VERSION_MINOR (2)
a4bd217b
JG
352
353struct line_header {
354 __le32 crc;
355 __le32 identifier; /* pblk identifier */
356 __u8 uuid[16]; /* instance uuid */
357 __le16 type; /* line type */
d0ab0b1a
HH
358 __u8 version_major; /* version major */
359 __u8 version_minor; /* version minor */
a4bd217b
JG
360 __le32 id; /* line id for current line */
361};
362
363struct line_smeta {
364 struct line_header header;
365
366 __le32 crc; /* Full structure including struct crc */
367 /* Previous line metadata */
368 __le32 prev_id; /* Line id for previous line */
369
370 /* Current line metadata */
371 __le64 seq_nr; /* Sequence number for current line */
372
373 /* Active writers */
374 __le32 window_wr_lun; /* Number of parallel LUNs to write */
375
376 __le32 rsvd[2];
dd2a4343
JG
377
378 __le64 lun_bitmap[];
a4bd217b
JG
379};
380
76758390 381
a4bd217b 382/*
dd2a4343
JG
383 * Metadata layout in media:
384 * First sector:
385 * 1. struct line_emeta
386 * 2. bad block bitmap (u64 * window_wr_lun)
76758390 387 * 3. write amplification counters
dd2a4343
JG
388 * Mid sectors (start at lbas_sector):
389 * 3. nr_lbas (u64) forming lba list
390 * Last sectors (start at vsc_sector):
391 * 4. u32 valid sector count (vsc) for all lines (~0U: free line)
a4bd217b
JG
392 */
393struct line_emeta {
394 struct line_header header;
395
396 __le32 crc; /* Full structure including struct crc */
397
398 /* Previous line metadata */
399 __le32 prev_id; /* Line id for prev line */
400
401 /* Current line metadata */
402 __le64 seq_nr; /* Sequence number for current line */
403
404 /* Active writers */
405 __le32 window_wr_lun; /* Number of parallel LUNs to write */
406
407 /* Bookkeeping for recovery */
408 __le32 next_id; /* Line id for next line */
409 __le64 nr_lbas; /* Number of lbas mapped in line */
410 __le64 nr_valid_lbas; /* Number of valid lbas mapped in line */
76758390
HH
411 __le64 bb_bitmap[]; /* Updated bad block bitmap for line */
412};
413
414
415/* Write amplification counters stored on media */
416struct wa_counters {
417 __le64 user; /* Number of user written sectors */
418 __le64 gc; /* Number of sectors written by GC*/
419 __le64 pad; /* Number of padded sectors */
dd2a4343
JG
420};
421
422struct pblk_emeta {
423 struct line_emeta *buf; /* emeta buffer in media format */
424 int mem; /* Write offset - points to next
425 * writable entry in memory
426 */
427 atomic_t sync; /* Synced - backpointer that signals the
428 * last entry that has been successfully
429 * persisted to media
430 */
431 unsigned int nr_entries; /* Number of emeta entries */
432};
433
434struct pblk_smeta {
435 struct line_smeta *buf; /* smeta buffer in persistent format */
a4bd217b
JG
436};
437
48b8d208
HH
438struct pblk_w_err_gc {
439 int has_write_err;
440 __le64 *lba_list;
441};
442
a4bd217b
JG
443struct pblk_line {
444 struct pblk *pblk;
445 unsigned int id; /* Line number corresponds to the
446 * block line
447 */
448 unsigned int seq_nr; /* Unique line sequence number */
449
450 int state; /* PBLK_LINESTATE_X */
451 int type; /* PBLK_LINETYPE_X */
452 int gc_group; /* PBLK_LINEGC_X */
453 struct list_head list; /* Free, GC lists */
454
455 unsigned long *lun_bitmap; /* Bitmap for LUNs mapped in line */
456
32ef9412
JG
457 struct nvm_chk_meta *chks; /* Chunks forming line */
458
dd2a4343
JG
459 struct pblk_smeta *smeta; /* Start metadata */
460 struct pblk_emeta *emeta; /* End medatada */
461
a4bd217b 462 int meta_line; /* Metadata line id */
dd2a4343
JG
463 int meta_distance; /* Distance between data and metadata */
464
a4bd217b
JG
465 u64 emeta_ssec; /* Sector where emeta starts */
466
467 unsigned int sec_in_line; /* Number of usable secs in line */
468
a44f53fa 469 atomic_t blk_in_line; /* Number of good blocks in line */
a4bd217b
JG
470 unsigned long *blk_bitmap; /* Bitmap for valid/invalid blocks */
471 unsigned long *erase_bitmap; /* Bitmap for erased blocks */
472
473 unsigned long *map_bitmap; /* Bitmap for mapped sectors in line */
474 unsigned long *invalid_bitmap; /* Bitmap for invalid sectors in line */
475
a44f53fa 476 atomic_t left_eblks; /* Blocks left for erasing */
a4bd217b
JG
477 atomic_t left_seblks; /* Blocks left for sync erasing */
478
479 int left_msecs; /* Sectors left for mapping */
a4bd217b 480 unsigned int cur_sec; /* Sector map pointer */
dd2a4343
JG
481 unsigned int nr_valid_lbas; /* Number of valid lbas in line */
482
483 __le32 *vsc; /* Valid sector count in line */
a4bd217b
JG
484
485 struct kref ref; /* Write buffer L2P references */
0586942f 486 atomic_t sec_to_update; /* Outstanding L2P updates to ppa */
a4bd217b 487
48b8d208
HH
488 struct pblk_w_err_gc *w_err_gc; /* Write error gc recovery metadata */
489
a4bd217b
JG
490 spinlock_t lock; /* Necessary for invalid_bitmap only */
491};
492
a44f53fa 493#define PBLK_DATA_LINES 4
a4bd217b 494
dd2a4343 495enum {
a4bd217b
JG
496 PBLK_KMALLOC_META = 1,
497 PBLK_VMALLOC_META = 2,
498};
499
dd2a4343
JG
500enum {
501 PBLK_EMETA_TYPE_HEADER = 1, /* struct line_emeta first sector */
502 PBLK_EMETA_TYPE_LLBA = 2, /* lba list - type: __le64 */
503 PBLK_EMETA_TYPE_VSC = 3, /* vsc list - type: __le32 */
a4bd217b
JG
504};
505
506struct pblk_line_mgmt {
507 int nr_lines; /* Total number of full lines */
508 int nr_free_lines; /* Number of full lines in free list */
509
510 /* Free lists - use free_lock */
511 struct list_head free_list; /* Full lines ready to use */
512 struct list_head corrupt_list; /* Full lines corrupted */
513 struct list_head bad_list; /* Full lines bad */
514
515 /* GC lists - use gc_lock */
b20ba1bc 516 struct list_head *gc_lists[PBLK_GC_NR_LISTS];
a4bd217b
JG
517 struct list_head gc_high_list; /* Full lines ready to GC, high isc */
518 struct list_head gc_mid_list; /* Full lines ready to GC, mid isc */
519 struct list_head gc_low_list; /* Full lines ready to GC, low isc */
520
48b8d208
HH
521 struct list_head gc_werr_list; /* Write err recovery list */
522
a4bd217b
JG
523 struct list_head gc_full_list; /* Full lines ready to GC, no valid */
524 struct list_head gc_empty_list; /* Full lines close, all valid */
525
526 struct pblk_line *log_line; /* Current FTL log line */
527 struct pblk_line *data_line; /* Current data line */
528 struct pblk_line *log_next; /* Next FTL log line */
529 struct pblk_line *data_next; /* Next data line */
530
dd2a4343
JG
531 struct list_head emeta_list; /* Lines queued to schedule emeta */
532
533 __le32 *vsc_list; /* Valid sector counts for all lines */
534
a4bd217b 535 /* Metadata allocation type: VMALLOC | KMALLOC */
a4bd217b
JG
536 int emeta_alloc_type;
537
538 /* Pre-allocated metadata for data lines */
dd2a4343
JG
539 struct pblk_smeta *sline_meta[PBLK_DATA_LINES];
540 struct pblk_emeta *eline_meta[PBLK_DATA_LINES];
a4bd217b
JG
541 unsigned long meta_bitmap;
542
53d82db6
HH
543 /* Cache and mempool for map/invalid bitmaps */
544 struct kmem_cache *bitmap_cache;
545 mempool_t *bitmap_pool;
546
a4bd217b
JG
547 /* Helpers for fast bitmap calculations */
548 unsigned long *bb_template;
549 unsigned long *bb_aux;
550
551 unsigned long d_seq_nr; /* Data line unique sequence number */
552 unsigned long l_seq_nr; /* Log line unique sequence number */
553
554 spinlock_t free_lock;
dd2a4343 555 spinlock_t close_lock;
a4bd217b
JG
556 spinlock_t gc_lock;
557};
558
559struct pblk_line_meta {
560 unsigned int smeta_len; /* Total length for smeta */
dd2a4343
JG
561 unsigned int smeta_sec; /* Sectors needed for smeta */
562
563 unsigned int emeta_len[4]; /* Lengths for emeta:
76758390
HH
564 * [0]: Total
565 * [1]: struct line_emeta +
566 * bb_bitmap + struct wa_counters
567 * [2]: L2P portion
568 * [3]: vsc
dd2a4343
JG
569 */
570 unsigned int emeta_sec[4]; /* Sectors needed for emeta. Same layout
571 * as emeta_len
572 */
573
a4bd217b 574 unsigned int emeta_bb; /* Boundary for bb that affects emeta */
dd2a4343
JG
575
576 unsigned int vsc_list_len; /* Length for vsc list */
a4bd217b
JG
577 unsigned int sec_bitmap_len; /* Length for sector bitmap in line */
578 unsigned int blk_bitmap_len; /* Length for block bitmap in line */
579 unsigned int lun_bitmap_len; /* Length for lun bitmap in line */
580
581 unsigned int blk_per_line; /* Number of blocks in a full line */
582 unsigned int sec_per_line; /* Number of sectors in a line */
dd2a4343 583 unsigned int dsec_per_line; /* Number of data sectors in a line */
a4bd217b
JG
584 unsigned int min_blk_line; /* Min. number of good blocks in line */
585
586 unsigned int mid_thrs; /* Threshold for GC mid list */
587 unsigned int high_thrs; /* Threshold for GC high list */
dd2a4343
JG
588
589 unsigned int meta_distance; /* Distance between data and metadata */
a4bd217b
JG
590};
591
588726d3
JG
592enum {
593 PBLK_STATE_RUNNING = 0,
594 PBLK_STATE_STOPPING = 1,
595 PBLK_STATE_RECOVERING = 2,
596 PBLK_STATE_STOPPED = 3,
597};
598
3b2a3ad1
JG
599/* Internal format to support not power-of-2 device formats */
600struct pblk_addrf {
601 /* gen to dev */
602 int sec_stripe;
603 int ch_stripe;
604 int lun_stripe;
605
606 /* dev to gen */
607 int sec_lun_stripe;
608 int sec_ws_stripe;
609};
610
a4bd217b
JG
611struct pblk {
612 struct nvm_tgt_dev *dev;
613 struct gendisk *disk;
614
615 struct kobject kobj;
616
617 struct pblk_lun *luns;
618
619 struct pblk_line *lines; /* Line array */
620 struct pblk_line_mgmt l_mg; /* Line management */
621 struct pblk_line_meta lm; /* Line metadata */
622
3b2a3ad1
JG
623 struct nvm_addrf addrf; /* Aligned address format */
624 struct pblk_addrf uaddrf; /* Unaligned address format */
bb845ae4 625 int addrf_len;
a4bd217b
JG
626
627 struct pblk_rb rwb;
628
588726d3
JG
629 int state; /* pblk line state */
630
a4bd217b 631 int min_write_pgs; /* Minimum amount of pages required by controller */
55d8ec35 632 int min_write_pgs_data; /* Minimum amount of payload pages */
a4bd217b 633 int max_write_pgs; /* Maximum amount of pages supported by controller */
faa79f27 634 int oob_meta_size; /* Size of OOB sector metadata */
a4bd217b
JG
635
636 sector_t capacity; /* Device capacity when bad blocks are subtracted */
a7689938
JG
637
638 int op; /* Percentage of device used for over-provisioning */
639 int op_blks; /* Number of blocks used for over-provisioning */
a4bd217b
JG
640
641 /* pblk provisioning values. Used by rate limiter */
642 struct pblk_rl rl;
643
c2e9f5d4 644 int sec_per_write;
a4bd217b 645
7e0a0847 646 guid_t instance_uuid;
76758390
HH
647
648 /* Persistent write amplification counters, 4kb sector I/Os */
649 atomic64_t user_wa; /* Sectors written by user */
650 atomic64_t gc_wa; /* Sectors written by GC */
651 atomic64_t pad_wa; /* Padded sectors written */
652
653 /* Reset values for delta write amplification measurements */
654 u64 user_rst_wa;
655 u64 gc_rst_wa;
656 u64 pad_rst_wa;
657
5d149bfa
HH
658 /* Counters used for calculating padding distribution */
659 atomic64_t *pad_dist; /* Padding distribution buckets */
660 u64 nr_flush_rst; /* Flushes reset value for pad dist.*/
661 atomic64_t nr_flush; /* Number of flush/fua I/O */
662
880eda54 663#ifdef CONFIG_NVM_PBLK_DEBUG
76758390 664 /* Non-persistent debug counters, 4kb sector I/Os */
a4bd217b
JG
665 atomic_long_t inflight_writes; /* Inflight writes (user and gc) */
666 atomic_long_t padded_writes; /* Sectors padded due to flush/fua */
667 atomic_long_t padded_wb; /* Sectors padded in write buffer */
a4bd217b
JG
668 atomic_long_t req_writes; /* Sectors stored on write buffer */
669 atomic_long_t sub_writes; /* Sectors submitted from buffer */
670 atomic_long_t sync_writes; /* Sectors synced to media */
a4bd217b 671 atomic_long_t inflight_reads; /* Inflight sector read requests */
db7ada33 672 atomic_long_t cache_reads; /* Read requests that hit the cache */
a4bd217b
JG
673 atomic_long_t sync_reads; /* Completed sector read requests */
674 atomic_long_t recov_writes; /* Sectors submitted from recovery */
675 atomic_long_t recov_gc_writes; /* Sectors submitted from write GC */
676 atomic_long_t recov_gc_reads; /* Sectors submitted from read GC */
677#endif
678
679 spinlock_t lock;
680
681 atomic_long_t read_failed;
682 atomic_long_t read_empty;
683 atomic_long_t read_high_ecc;
684 atomic_long_t read_failed_gc;
685 atomic_long_t write_failed;
686 atomic_long_t erase_failed;
687
588726d3
JG
688 atomic_t inflight_io; /* General inflight I/O counter */
689
a4bd217b
JG
690 struct task_struct *writer_ts;
691
692 /* Simple translation map of logical addresses to physical addresses.
693 * The logical addresses is known by the host system, while the physical
694 * addresses are used when writing to the disk block device.
695 */
696 unsigned char *trans_map;
697 spinlock_t trans_lock;
698
699 struct list_head compl_list;
700
6a3abf5b
HH
701 spinlock_t resubmit_lock; /* Resubmit list lock */
702 struct list_head resubmit_list; /* Resubmit list for failed writes*/
703
b906bbb6
KO
704 mempool_t page_bio_pool;
705 mempool_t gen_ws_pool;
706 mempool_t rec_pool;
707 mempool_t r_rq_pool;
708 mempool_t w_rq_pool;
709 mempool_t e_rq_pool;
a4bd217b 710
ef576494
JG
711 struct workqueue_struct *close_wq;
712 struct workqueue_struct *bb_wq;
7bd4d370 713 struct workqueue_struct *r_end_wq;
ef576494 714
a4bd217b
JG
715 struct timer_list wtimer;
716
717 struct pblk_gc gc;
718};
719
720struct pblk_line_ws {
721 struct pblk *pblk;
722 struct pblk_line *line;
723 void *priv;
724 struct work_struct ws;
725};
726
084ec9ba 727#define pblk_g_rq_size (sizeof(struct nvm_rq) + sizeof(struct pblk_g_ctx))
a4bd217b
JG
728#define pblk_w_rq_size (sizeof(struct nvm_rq) + sizeof(struct pblk_c_ctx))
729
4e495a46
MB
730#define pblk_err(pblk, fmt, ...) \
731 pr_err("pblk %s: " fmt, pblk->disk->disk_name, ##__VA_ARGS__)
732#define pblk_info(pblk, fmt, ...) \
733 pr_info("pblk %s: " fmt, pblk->disk->disk_name, ##__VA_ARGS__)
734#define pblk_warn(pblk, fmt, ...) \
735 pr_warn("pblk %s: " fmt, pblk->disk->disk_name, ##__VA_ARGS__)
736#define pblk_debug(pblk, fmt, ...) \
737 pr_debug("pblk %s: " fmt, pblk->disk->disk_name, ##__VA_ARGS__)
738
a4bd217b
JG
739/*
740 * pblk ring buffer operations
741 */
766c8ceb
JG
742int pblk_rb_init(struct pblk_rb *rb, unsigned int size, unsigned int threshold,
743 unsigned int seg_sz);
a4bd217b
JG
744int pblk_rb_may_write_user(struct pblk_rb *rb, struct bio *bio,
745 unsigned int nr_entries, unsigned int *pos);
746int pblk_rb_may_write_gc(struct pblk_rb *rb, unsigned int nr_entries,
747 unsigned int *pos);
748void pblk_rb_write_entry_user(struct pblk_rb *rb, void *data,
749 struct pblk_w_ctx w_ctx, unsigned int pos);
750void pblk_rb_write_entry_gc(struct pblk_rb *rb, void *data,
d340121e
JG
751 struct pblk_w_ctx w_ctx, struct pblk_line *line,
752 u64 paddr, unsigned int pos);
a4bd217b 753struct pblk_w_ctx *pblk_rb_w_ctx(struct pblk_rb *rb, unsigned int pos);
588726d3 754void pblk_rb_flush(struct pblk_rb *rb);
a4bd217b
JG
755
756void pblk_rb_sync_l2p(struct pblk_rb *rb);
d624f371 757unsigned int pblk_rb_read_to_bio(struct pblk_rb *rb, struct nvm_rq *rqd,
875d94f3
JG
758 unsigned int pos, unsigned int nr_entries,
759 unsigned int count);
a4bd217b 760int pblk_rb_copy_to_bio(struct pblk_rb *rb, struct bio *bio, sector_t lba,
75cb8e93 761 struct ppa_addr ppa, int bio_iter, bool advanced_bio);
a4bd217b
JG
762unsigned int pblk_rb_read_commit(struct pblk_rb *rb, unsigned int entries);
763
764unsigned int pblk_rb_sync_init(struct pblk_rb *rb, unsigned long *flags);
765unsigned int pblk_rb_sync_advance(struct pblk_rb *rb, unsigned int nr_entries);
40b8657d
JG
766unsigned int pblk_rb_ptr_wrap(struct pblk_rb *rb, unsigned int p,
767 unsigned int nr_entries);
a4bd217b 768void pblk_rb_sync_end(struct pblk_rb *rb, unsigned long *flags);
8154d296 769unsigned int pblk_rb_flush_point_count(struct pblk_rb *rb);
a4bd217b
JG
770
771unsigned int pblk_rb_read_count(struct pblk_rb *rb);
ee8d5c1a 772unsigned int pblk_rb_sync_count(struct pblk_rb *rb);
a4bd217b
JG
773unsigned int pblk_rb_wrap_pos(struct pblk_rb *rb, unsigned int pos);
774
775int pblk_rb_tear_down_check(struct pblk_rb *rb);
776int pblk_rb_pos_oob(struct pblk_rb *rb, u64 pos);
9bd1f875 777void pblk_rb_free(struct pblk_rb *rb);
a4bd217b
JG
778ssize_t pblk_rb_sysfs(struct pblk_rb *rb, char *buf);
779
780/*
781 * pblk core
782 */
67bf26a3
JG
783struct nvm_rq *pblk_alloc_rqd(struct pblk *pblk, int type);
784void pblk_free_rqd(struct pblk *pblk, struct nvm_rq *rqd, int type);
45dcf29b
JG
785int pblk_alloc_rqd_meta(struct pblk *pblk, struct nvm_rq *rqd);
786void pblk_free_rqd_meta(struct pblk *pblk, struct nvm_rq *rqd);
c2e9f5d4 787void pblk_set_sec_per_write(struct pblk *pblk, int sec_per_write);
a4bd217b
JG
788int pblk_setup_w_rec_rq(struct pblk *pblk, struct nvm_rq *rqd,
789 struct pblk_c_ctx *c_ctx);
a4bd217b 790void pblk_discard(struct pblk *pblk, struct bio *bio);
aff3fb18 791struct nvm_chk_meta *pblk_get_chunk_meta(struct pblk *pblk);
32ef9412
JG
792struct nvm_chk_meta *pblk_chunk_get_off(struct pblk *pblk,
793 struct nvm_chk_meta *lp,
794 struct ppa_addr ppa);
a4bd217b
JG
795void pblk_log_write_err(struct pblk *pblk, struct nvm_rq *rqd);
796void pblk_log_read_err(struct pblk *pblk, struct nvm_rq *rqd);
797int pblk_submit_io(struct pblk *pblk, struct nvm_rq *rqd);
1a94b2d4 798int pblk_submit_io_sync(struct pblk *pblk, struct nvm_rq *rqd);
253babc3 799int pblk_submit_io_sync_sem(struct pblk *pblk, struct nvm_rq *rqd);
dd2a4343 800int pblk_submit_meta_io(struct pblk *pblk, struct pblk_line *meta_line);
4c44abf4 801void pblk_check_chunk_state_update(struct pblk *pblk, struct nvm_rq *rqd);
a4bd217b
JG
802struct bio *pblk_bio_map_addr(struct pblk *pblk, void *data,
803 unsigned int nr_secs, unsigned int len,
de54e703 804 int alloc_type, gfp_t gfp_mask);
a4bd217b
JG
805struct pblk_line *pblk_line_get(struct pblk *pblk);
806struct pblk_line *pblk_line_get_first_data(struct pblk *pblk);
21d22871 807struct pblk_line *pblk_line_replace_data(struct pblk *pblk);
ae14cc04
MB
808void pblk_ppa_to_line_put(struct pblk *pblk, struct ppa_addr ppa);
809void pblk_rq_to_line_put(struct pblk *pblk, struct nvm_rq *rqd);
a4bd217b
JG
810int pblk_line_recov_alloc(struct pblk *pblk, struct pblk_line *line);
811void pblk_line_recov_close(struct pblk *pblk, struct pblk_line *line);
812struct pblk_line *pblk_line_get_data(struct pblk *pblk);
d624f371 813struct pblk_line *pblk_line_get_erase(struct pblk *pblk);
a4bd217b
JG
814int pblk_line_erase(struct pblk *pblk, struct pblk_line *line);
815int pblk_line_is_full(struct pblk_line *line);
8e55c07b 816void pblk_line_free(struct pblk_line *line);
dd2a4343 817void pblk_line_close_meta(struct pblk *pblk, struct pblk_line *line);
a4bd217b 818void pblk_line_close(struct pblk *pblk, struct pblk_line *line);
dd2a4343 819void pblk_line_close_ws(struct work_struct *work);
588726d3 820void pblk_pipeline_stop(struct pblk *pblk);
a7c9e910
JG
821void __pblk_pipeline_stop(struct pblk *pblk);
822void __pblk_pipeline_flush(struct pblk *pblk);
b84ae4a8
JG
823void pblk_gen_run_ws(struct pblk *pblk, struct pblk_line *line, void *priv,
824 void (*work)(struct work_struct *), gfp_t gfp_mask,
825 struct workqueue_struct *wq);
a4bd217b 826u64 pblk_line_smeta_start(struct pblk *pblk, struct pblk_line *line);
af3fac16
JG
827int pblk_line_smeta_read(struct pblk *pblk, struct pblk_line *line);
828int pblk_line_emeta_read(struct pblk *pblk, struct pblk_line *line,
dd2a4343 829 void *emeta_buf);
a4bd217b
JG
830int pblk_blk_erase_async(struct pblk *pblk, struct ppa_addr erase_ppa);
831void pblk_line_put(struct kref *ref);
7bd4d370 832void pblk_line_put_wq(struct kref *ref);
a4bd217b 833struct list_head *pblk_line_gc_list(struct pblk *pblk, struct pblk_line *line);
dd2a4343
JG
834u64 pblk_lookup_page(struct pblk *pblk, struct pblk_line *line);
835void pblk_dealloc_page(struct pblk *pblk, struct pblk_line *line, int nr_secs);
a4bd217b 836u64 pblk_alloc_page(struct pblk *pblk, struct pblk_line *line, int nr_secs);
dd2a4343 837u64 __pblk_alloc_page(struct pblk *pblk, struct pblk_line *line, int nr_secs);
a4bd217b 838int pblk_calc_secs(struct pblk *pblk, unsigned long secs_avail,
55d8ec35 839 unsigned long secs_to_flush, bool skip_meta);
43241cfe 840void pblk_down_rq(struct pblk *pblk, struct ppa_addr ppa,
a4bd217b 841 unsigned long *lun_bitmap);
43241cfe
MB
842void pblk_down_chunk(struct pblk *pblk, struct ppa_addr ppa);
843void pblk_up_chunk(struct pblk *pblk, struct ppa_addr ppa);
e99e802f 844void pblk_up_rq(struct pblk *pblk, unsigned long *lun_bitmap);
a4bd217b
JG
845int pblk_bio_add_pages(struct pblk *pblk, struct bio *bio, gfp_t flags,
846 int nr_pages);
a4bd217b
JG
847void pblk_bio_free_pages(struct pblk *pblk, struct bio *bio, int off,
848 int nr_pages);
849void pblk_map_invalidate(struct pblk *pblk, struct ppa_addr ppa);
0880a9aa
JG
850void __pblk_map_invalidate(struct pblk *pblk, struct pblk_line *line,
851 u64 paddr);
a4bd217b
JG
852void pblk_update_map(struct pblk *pblk, sector_t lba, struct ppa_addr ppa);
853void pblk_update_map_cache(struct pblk *pblk, sector_t lba,
854 struct ppa_addr ppa);
855void pblk_update_map_dev(struct pblk *pblk, sector_t lba,
856 struct ppa_addr ppa, struct ppa_addr entry_line);
857int pblk_update_map_gc(struct pblk *pblk, sector_t lba, struct ppa_addr ppa,
d340121e 858 struct pblk_line *gc_line, u64 paddr);
a4bd217b
JG
859void pblk_lookup_l2p_rand(struct pblk *pblk, struct ppa_addr *ppas,
860 u64 *lba_list, int nr_secs);
861void pblk_lookup_l2p_seq(struct pblk *pblk, struct ppa_addr *ppas,
862 sector_t blba, int nr_secs);
55d8ec35
IK
863void *pblk_get_meta_for_writes(struct pblk *pblk, struct nvm_rq *rqd);
864void pblk_get_packed_meta(struct pblk *pblk, struct nvm_rq *rqd);
a4bd217b
JG
865
866/*
867 * pblk user I/O write path
868 */
869int pblk_write_to_cache(struct pblk *pblk, struct bio *bio,
870 unsigned long flags);
d340121e 871int pblk_write_gc_to_cache(struct pblk *pblk, struct pblk_gc_rq *gc_rq);
a4bd217b
JG
872
873/*
874 * pblk map
875 */
525f7bb2 876int pblk_map_erase_rq(struct pblk *pblk, struct nvm_rq *rqd,
a4bd217b
JG
877 unsigned int sentry, unsigned long *lun_bitmap,
878 unsigned int valid_secs, struct ppa_addr *erase_ppa);
525f7bb2 879int pblk_map_rq(struct pblk *pblk, struct nvm_rq *rqd, unsigned int sentry,
a4bd217b
JG
880 unsigned long *lun_bitmap, unsigned int valid_secs,
881 unsigned int off);
882
883/*
884 * pblk write thread
885 */
886int pblk_write_ts(void *data);
87c1d2d3 887void pblk_write_timer_fn(struct timer_list *t);
a4bd217b 888void pblk_write_should_kick(struct pblk *pblk);
cc9c9a00 889void pblk_write_kick(struct pblk *pblk);
a4bd217b
JG
890
891/*
892 * pblk read path
893 */
b906bbb6 894extern struct bio_set pblk_bio_set;
a4bd217b 895int pblk_submit_read(struct pblk *pblk, struct bio *bio);
d340121e 896int pblk_submit_read_gc(struct pblk *pblk, struct pblk_gc_rq *gc_rq);
a4bd217b
JG
897/*
898 * pblk recovery
899 */
a4bd217b 900struct pblk_line *pblk_recov_l2p(struct pblk *pblk);
588726d3 901int pblk_recov_pad(struct pblk *pblk);
06bc072b 902int pblk_recov_check_emeta(struct pblk *pblk, struct line_emeta *emeta);
a4bd217b
JG
903
904/*
905 * pblk gc
906 */
b20ba1bc 907#define PBLK_GC_MAX_READERS 8 /* Max number of outstanding GC reader jobs */
3627896a 908#define PBLK_GC_RQ_QD 128 /* Queue depth for inflight GC requests */
b20ba1bc 909#define PBLK_GC_L_QD 4 /* Queue depth for inflight GC lines */
a4bd217b
JG
910
911int pblk_gc_init(struct pblk *pblk);
a7c9e910 912void pblk_gc_exit(struct pblk *pblk, bool graceful);
a4bd217b
JG
913void pblk_gc_should_start(struct pblk *pblk);
914void pblk_gc_should_stop(struct pblk *pblk);
03661b5f 915void pblk_gc_should_kick(struct pblk *pblk);
37ce33d5 916void pblk_gc_free_full_lines(struct pblk *pblk);
a4bd217b
JG
917void pblk_gc_sysfs_state_show(struct pblk *pblk, int *gc_enabled,
918 int *gc_active);
b20ba1bc 919int pblk_gc_sysfs_force(struct pblk *pblk, int force);
a4bd217b
JG
920
921/*
922 * pblk rate limiter
923 */
b4cdc426 924void pblk_rl_init(struct pblk_rl *rl, int budget, int threshold);
a4bd217b 925void pblk_rl_free(struct pblk_rl *rl);
03661b5f 926void pblk_rl_update_rates(struct pblk_rl *rl);
b20ba1bc 927int pblk_rl_high_thrs(struct pblk_rl *rl);
a4bd217b 928unsigned long pblk_rl_nr_free_blks(struct pblk_rl *rl);
a7689938 929unsigned long pblk_rl_nr_user_free_blks(struct pblk_rl *rl);
a4bd217b 930int pblk_rl_user_may_insert(struct pblk_rl *rl, int nr_entries);
588726d3 931void pblk_rl_inserted(struct pblk_rl *rl, int nr_entries);
a4bd217b
JG
932void pblk_rl_user_in(struct pblk_rl *rl, int nr_entries);
933int pblk_rl_gc_may_insert(struct pblk_rl *rl, int nr_entries);
934void pblk_rl_gc_in(struct pblk_rl *rl, int nr_entries);
935void pblk_rl_out(struct pblk_rl *rl, int nr_user, int nr_gc);
da67e68f 936int pblk_rl_max_io(struct pblk_rl *rl);
a4bd217b 937void pblk_rl_free_lines_inc(struct pblk_rl *rl, struct pblk_line *line);
a7689938
JG
938void pblk_rl_free_lines_dec(struct pblk_rl *rl, struct pblk_line *line,
939 bool used);
588726d3 940int pblk_rl_is_limit(struct pblk_rl *rl);
a4bd217b 941
48b8d208
HH
942void pblk_rl_werr_line_in(struct pblk_rl *rl);
943void pblk_rl_werr_line_out(struct pblk_rl *rl);
944
a4bd217b
JG
945/*
946 * pblk sysfs
947 */
948int pblk_sysfs_init(struct gendisk *tdisk);
949void pblk_sysfs_exit(struct gendisk *tdisk);
950
951static inline void *pblk_malloc(size_t size, int type, gfp_t flags)
952{
953 if (type == PBLK_KMALLOC_META)
954 return kmalloc(size, flags);
955 return vmalloc(size);
956}
957
958static inline void pblk_mfree(void *ptr, int type)
959{
960 if (type == PBLK_KMALLOC_META)
961 kfree(ptr);
962 else
963 vfree(ptr);
964}
965
966static inline struct nvm_rq *nvm_rq_from_c_ctx(void *c_ctx)
967{
968 return c_ctx - sizeof(struct nvm_rq);
969}
970
dd2a4343
JG
971static inline void *emeta_to_bb(struct line_emeta *emeta)
972{
973 return emeta->bb_bitmap;
974}
975
76758390
HH
976static inline void *emeta_to_wa(struct pblk_line_meta *lm,
977 struct line_emeta *emeta)
978{
979 return emeta->bb_bitmap + lm->blk_bitmap_len;
980}
981
dd2a4343
JG
982static inline void *emeta_to_lbas(struct pblk *pblk, struct line_emeta *emeta)
983{
984 return ((void *)emeta + pblk->lm.emeta_len[1]);
985}
986
987static inline void *emeta_to_vsc(struct pblk *pblk, struct line_emeta *emeta)
a4bd217b 988{
dd2a4343 989 return (emeta_to_lbas(pblk, emeta) + pblk->lm.emeta_len[2]);
a4bd217b
JG
990}
991
b20ba1bc
JG
992static inline int pblk_line_vsc(struct pblk_line *line)
993{
d340121e 994 return le32_to_cpu(*line->vsc);
b20ba1bc
JG
995}
996
cb21665c 997static inline int pblk_ppa_to_line_id(struct ppa_addr p)
a4bd217b 998{
69471513 999 return p.a.blk;
a4bd217b
JG
1000}
1001
cb21665c
JG
1002static inline struct pblk_line *pblk_ppa_to_line(struct pblk *pblk,
1003 struct ppa_addr p)
1004{
1005 return &pblk->lines[pblk_ppa_to_line_id(p)];
1006}
1007
b1bcfda1 1008static inline int pblk_ppa_to_pos(struct nvm_geo *geo, struct ppa_addr p)
a4bd217b 1009{
69471513 1010 return p.a.lun * geo->num_ch + p.a.ch;
a4bd217b
JG
1011}
1012
b1bcfda1
JG
1013static inline struct ppa_addr addr_to_gen_ppa(struct pblk *pblk, u64 paddr,
1014 u64 line_id)
a4bd217b 1015{
3b2a3ad1
JG
1016 struct nvm_tgt_dev *dev = pblk->dev;
1017 struct nvm_geo *geo = &dev->geo;
b1bcfda1
JG
1018 struct ppa_addr ppa;
1019
3b2a3ad1
JG
1020 if (geo->version == NVM_OCSSD_SPEC_12) {
1021 struct nvm_addrf_12 *ppaf = (struct nvm_addrf_12 *)&pblk->addrf;
1022
1023 ppa.ppa = 0;
1024 ppa.g.blk = line_id;
1025 ppa.g.pg = (paddr & ppaf->pg_mask) >> ppaf->pg_offset;
1026 ppa.g.lun = (paddr & ppaf->lun_mask) >> ppaf->lun_offset;
1027 ppa.g.ch = (paddr & ppaf->ch_mask) >> ppaf->ch_offset;
1028 ppa.g.pl = (paddr & ppaf->pln_mask) >> ppaf->pln_offset;
1029 ppa.g.sec = (paddr & ppaf->sec_mask) >> ppaf->sec_offset;
1030 } else {
1031 struct pblk_addrf *uaddrf = &pblk->uaddrf;
1032 int secs, chnls, luns;
1033
1034 ppa.ppa = 0;
1035
1036 ppa.m.chk = line_id;
1037
1038 paddr = div_u64_rem(paddr, uaddrf->sec_stripe, &secs);
1039 ppa.m.sec = secs;
1040
1041 paddr = div_u64_rem(paddr, uaddrf->ch_stripe, &chnls);
1042 ppa.m.grp = chnls;
1043
1044 paddr = div_u64_rem(paddr, uaddrf->lun_stripe, &luns);
1045 ppa.m.pu = luns;
1046
1047 ppa.m.sec += uaddrf->sec_stripe * paddr;
1048 }
b1bcfda1
JG
1049
1050 return ppa;
a4bd217b
JG
1051}
1052
2cf99bbd
JG
1053static inline struct nvm_chk_meta *pblk_dev_ppa_to_chunk(struct pblk *pblk,
1054 struct ppa_addr p)
1055{
1056 struct nvm_tgt_dev *dev = pblk->dev;
1057 struct nvm_geo *geo = &dev->geo;
cb21665c 1058 struct pblk_line *line = pblk_ppa_to_line(pblk, p);
2cf99bbd
JG
1059 int pos = pblk_ppa_to_pos(geo, p);
1060
1061 return &line->chks[pos];
1062}
1063
1064static inline u64 pblk_dev_ppa_to_chunk_addr(struct pblk *pblk,
1065 struct ppa_addr p)
1066{
1067 struct nvm_tgt_dev *dev = pblk->dev;
1068
1069 return dev_to_chunk_addr(dev->parent, &pblk->addrf, p);
1070}
1071
b1bcfda1
JG
1072static inline u64 pblk_dev_ppa_to_line_addr(struct pblk *pblk,
1073 struct ppa_addr p)
a4bd217b 1074{
3b2a3ad1
JG
1075 struct nvm_tgt_dev *dev = pblk->dev;
1076 struct nvm_geo *geo = &dev->geo;
b1bcfda1
JG
1077 u64 paddr;
1078
3b2a3ad1
JG
1079 if (geo->version == NVM_OCSSD_SPEC_12) {
1080 struct nvm_addrf_12 *ppaf = (struct nvm_addrf_12 *)&pblk->addrf;
1081
1082 paddr = (u64)p.g.ch << ppaf->ch_offset;
1083 paddr |= (u64)p.g.lun << ppaf->lun_offset;
1084 paddr |= (u64)p.g.pg << ppaf->pg_offset;
1085 paddr |= (u64)p.g.pl << ppaf->pln_offset;
1086 paddr |= (u64)p.g.sec << ppaf->sec_offset;
1087 } else {
1088 struct pblk_addrf *uaddrf = &pblk->uaddrf;
1089 u64 secs = p.m.sec;
1090 int sec_stripe;
1091
1092 paddr = (u64)p.m.grp * uaddrf->sec_stripe;
1093 paddr += (u64)p.m.pu * uaddrf->sec_lun_stripe;
1094
1095 secs = div_u64_rem(secs, uaddrf->sec_stripe, &sec_stripe);
1096 paddr += secs * uaddrf->sec_ws_stripe;
1097 paddr += sec_stripe;
1098 }
b1bcfda1
JG
1099
1100 return paddr;
a4bd217b
JG
1101}
1102
1103static inline struct ppa_addr pblk_ppa32_to_ppa64(struct pblk *pblk, u32 ppa32)
1104{
7f985f9a 1105 struct nvm_tgt_dev *dev = pblk->dev;
a4bd217b 1106
7f985f9a 1107 return nvm_ppa32_to_ppa64(dev->parent, &pblk->addrf, ppa32);
a4bd217b
JG
1108}
1109
a4bd217b
JG
1110static inline u32 pblk_ppa64_to_ppa32(struct pblk *pblk, struct ppa_addr ppa64)
1111{
7f985f9a 1112 struct nvm_tgt_dev *dev = pblk->dev;
a4bd217b 1113
7f985f9a 1114 return nvm_ppa64_to_ppa32(dev->parent, &pblk->addrf, ppa64);
a4bd217b
JG
1115}
1116
b1bcfda1
JG
1117static inline struct ppa_addr pblk_trans_map_get(struct pblk *pblk,
1118 sector_t lba)
a4bd217b 1119{
b1bcfda1
JG
1120 struct ppa_addr ppa;
1121
bb845ae4 1122 if (pblk->addrf_len < 32) {
a4bd217b
JG
1123 u32 *map = (u32 *)pblk->trans_map;
1124
b1bcfda1 1125 ppa = pblk_ppa32_to_ppa64(pblk, map[lba]);
a4bd217b 1126 } else {
b1bcfda1 1127 struct ppa_addr *map = (struct ppa_addr *)pblk->trans_map;
a4bd217b 1128
b1bcfda1 1129 ppa = map[lba];
a4bd217b 1130 }
b1bcfda1
JG
1131
1132 return ppa;
a4bd217b
JG
1133}
1134
b1bcfda1
JG
1135static inline void pblk_trans_map_set(struct pblk *pblk, sector_t lba,
1136 struct ppa_addr ppa)
a4bd217b 1137{
bb845ae4 1138 if (pblk->addrf_len < 32) {
b1bcfda1 1139 u32 *map = (u32 *)pblk->trans_map;
a4bd217b 1140
b1bcfda1
JG
1141 map[lba] = pblk_ppa64_to_ppa32(pblk, ppa);
1142 } else {
1143 u64 *map = (u64 *)pblk->trans_map;
a4bd217b 1144
b1bcfda1
JG
1145 map[lba] = ppa.ppa;
1146 }
a4bd217b
JG
1147}
1148
1149static inline int pblk_ppa_empty(struct ppa_addr ppa_addr)
1150{
1151 return (ppa_addr.ppa == ADDR_EMPTY);
1152}
1153
1154static inline void pblk_ppa_set_empty(struct ppa_addr *ppa_addr)
1155{
1156 ppa_addr->ppa = ADDR_EMPTY;
1157}
1158
07698466
JG
1159static inline bool pblk_ppa_comp(struct ppa_addr lppa, struct ppa_addr rppa)
1160{
8b7bc849 1161 return (lppa.ppa == rppa.ppa);
07698466
JG
1162}
1163
a4bd217b
JG
1164static inline int pblk_addr_in_cache(struct ppa_addr ppa)
1165{
1166 return (ppa.ppa != ADDR_EMPTY && ppa.c.is_cached);
1167}
1168
1169static inline int pblk_addr_to_cacheline(struct ppa_addr ppa)
1170{
1171 return ppa.c.line;
1172}
1173
1174static inline struct ppa_addr pblk_cacheline_to_addr(int addr)
1175{
1176 struct ppa_addr p;
1177
1178 p.c.line = addr;
1179 p.c.is_cached = 1;
1180
1181 return p;
1182}
1183
a4bd217b 1184static inline u32 pblk_calc_meta_header_crc(struct pblk *pblk,
dd2a4343 1185 struct line_header *header)
a4bd217b
JG
1186{
1187 u32 crc = ~(u32)0;
1188
dd2a4343 1189 crc = crc32_le(crc, (unsigned char *)header + sizeof(crc),
a4bd217b
JG
1190 sizeof(struct line_header) - sizeof(crc));
1191
1192 return crc;
1193}
1194
1195static inline u32 pblk_calc_smeta_crc(struct pblk *pblk,
1196 struct line_smeta *smeta)
1197{
1198 struct pblk_line_meta *lm = &pblk->lm;
1199 u32 crc = ~(u32)0;
1200
1201 crc = crc32_le(crc, (unsigned char *)smeta +
1202 sizeof(struct line_header) + sizeof(crc),
1203 lm->smeta_len -
1204 sizeof(struct line_header) - sizeof(crc));
1205
1206 return crc;
1207}
1208
1209static inline u32 pblk_calc_emeta_crc(struct pblk *pblk,
1210 struct line_emeta *emeta)
1211{
1212 struct pblk_line_meta *lm = &pblk->lm;
1213 u32 crc = ~(u32)0;
1214
1215 crc = crc32_le(crc, (unsigned char *)emeta +
1216 sizeof(struct line_header) + sizeof(crc),
dd2a4343 1217 lm->emeta_len[0] -
a4bd217b
JG
1218 sizeof(struct line_header) - sizeof(crc));
1219
1220 return crc;
1221}
1222
f9c10152 1223static inline int pblk_io_aligned(struct pblk *pblk, int nr_secs)
a4bd217b 1224{
f9c10152 1225 return !(nr_secs % pblk->min_write_pgs);
a4bd217b
JG
1226}
1227
880eda54 1228#ifdef CONFIG_NVM_PBLK_DEBUG
4e495a46 1229static inline void print_ppa(struct pblk *pblk, struct ppa_addr *p,
3b2a3ad1 1230 char *msg, int error)
a4bd217b 1231{
4e495a46
MB
1232 struct nvm_geo *geo = &pblk->dev->geo;
1233
a4bd217b 1234 if (p->c.is_cached) {
4e495a46 1235 pblk_err(pblk, "ppa: (%s: %x) cache line: %llu\n",
a4bd217b 1236 msg, error, (u64)p->c.line);
3b2a3ad1 1237 } else if (geo->version == NVM_OCSSD_SPEC_12) {
4e495a46 1238 pblk_err(pblk, "ppa: (%s: %x):ch:%d,lun:%d,blk:%d,pg:%d,pl:%d,sec:%d\n",
a4bd217b
JG
1239 msg, error,
1240 p->g.ch, p->g.lun, p->g.blk,
1241 p->g.pg, p->g.pl, p->g.sec);
3b2a3ad1 1242 } else {
4e495a46 1243 pblk_err(pblk, "ppa: (%s: %x):ch:%d,lun:%d,chk:%d,sec:%d\n",
3b2a3ad1
JG
1244 msg, error,
1245 p->m.grp, p->m.pu, p->m.chk, p->m.sec);
a4bd217b
JG
1246 }
1247}
1248
1249static inline void pblk_print_failed_rqd(struct pblk *pblk, struct nvm_rq *rqd,
1250 int error)
1251{
1252 int bit = -1;
1253
1254 if (rqd->nr_ppas == 1) {
4e495a46 1255 print_ppa(pblk, &rqd->ppa_addr, "rqd", error);
a4bd217b
JG
1256 return;
1257 }
1258
1259 while ((bit = find_next_bit((void *)&rqd->ppa_status, rqd->nr_ppas,
1260 bit + 1)) < rqd->nr_ppas) {
4e495a46 1261 print_ppa(pblk, &rqd->ppa_list[bit], "rqd", error);
a4bd217b
JG
1262 }
1263
4e495a46 1264 pblk_err(pblk, "error:%d, ppa_status:%llx\n", error, rqd->ppa_status);
a4bd217b 1265}
a4bd217b
JG
1266
1267static inline int pblk_boundary_ppa_checks(struct nvm_tgt_dev *tgt_dev,
1268 struct ppa_addr *ppas, int nr_ppas)
1269{
1270 struct nvm_geo *geo = &tgt_dev->geo;
1271 struct ppa_addr *ppa;
1272 int i;
1273
1274 for (i = 0; i < nr_ppas; i++) {
1275 ppa = &ppas[i];
1276
3b2a3ad1
JG
1277 if (geo->version == NVM_OCSSD_SPEC_12) {
1278 if (!ppa->c.is_cached &&
1279 ppa->g.ch < geo->num_ch &&
1280 ppa->g.lun < geo->num_lun &&
1281 ppa->g.pl < geo->num_pln &&
1282 ppa->g.blk < geo->num_chk &&
1283 ppa->g.pg < geo->num_pg &&
1284 ppa->g.sec < geo->ws_min)
1285 continue;
1286 } else {
1287 if (!ppa->c.is_cached &&
1288 ppa->m.grp < geo->num_ch &&
1289 ppa->m.pu < geo->num_lun &&
1290 ppa->m.chk < geo->num_chk &&
1291 ppa->m.sec < geo->clba)
1292 continue;
1293 }
1294
4e495a46 1295 print_ppa(tgt_dev->q->queuedata, ppa, "boundary", i);
1a94b2d4 1296
a4bd217b
JG
1297 return 1;
1298 }
1299 return 0;
1300}
1301
1a94b2d4
JG
1302static inline int pblk_check_io(struct pblk *pblk, struct nvm_rq *rqd)
1303{
1304 struct nvm_tgt_dev *dev = pblk->dev;
d68a9344 1305 struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd);
1a94b2d4
JG
1306
1307 if (pblk_boundary_ppa_checks(dev, ppa_list, rqd->nr_ppas)) {
1308 WARN_ON(1);
1309 return -EINVAL;
1310 }
1311
1312 if (rqd->opcode == NVM_OP_PWRITE) {
1313 struct pblk_line *line;
1a94b2d4
JG
1314 int i;
1315
1316 for (i = 0; i < rqd->nr_ppas; i++) {
cb21665c 1317 line = pblk_ppa_to_line(pblk, ppa_list[i]);
1a94b2d4
JG
1318
1319 spin_lock(&line->lock);
1320 if (line->state != PBLK_LINESTATE_OPEN) {
4e495a46 1321 pblk_err(pblk, "bad ppa: line:%d,state:%d\n",
1a94b2d4
JG
1322 line->id, line->state);
1323 WARN_ON(1);
1324 spin_unlock(&line->lock);
1325 return -EINVAL;
1326 }
1327 spin_unlock(&line->lock);
1328 }
1329 }
1330
1331 return 0;
1332}
1333#endif
1334
a4bd217b
JG
1335static inline int pblk_boundary_paddr_checks(struct pblk *pblk, u64 paddr)
1336{
1337 struct pblk_line_meta *lm = &pblk->lm;
1338
1339 if (paddr > lm->sec_per_line)
1340 return 1;
1341
1342 return 0;
1343}
1344
1345static inline unsigned int pblk_get_bi_idx(struct bio *bio)
1346{
1347 return bio->bi_iter.bi_idx;
1348}
1349
1350static inline sector_t pblk_get_lba(struct bio *bio)
1351{
1352 return bio->bi_iter.bi_sector / NR_PHY_IN_LOG;
1353}
1354
1355static inline unsigned int pblk_get_secs(struct bio *bio)
1356{
1357 return bio->bi_iter.bi_size / PBLK_EXPOSED_PAGE_SIZE;
1358}
1359
4c44abf4
HH
1360static inline char *pblk_disk_name(struct pblk *pblk)
1361{
1362 struct gendisk *disk = pblk->disk;
1363
1364 return disk->disk_name;
1365}
3bcebc5b
HH
1366
1367static inline unsigned int pblk_get_min_chks(struct pblk *pblk)
1368{
1369 struct pblk_line_meta *lm = &pblk->lm;
1370 /* In a worst-case scenario every line will have OP invalid sectors.
1371 * We will then need a minimum of 1/OP lines to free up a single line
1372 */
1373
1374 return DIV_ROUND_UP(100, pblk->op) * lm->blk_per_line;
faa79f27 1375}
3bcebc5b 1376
faa79f27
IK
1377static inline struct pblk_sec_meta *pblk_get_meta(struct pblk *pblk,
1378 void *meta, int index)
1379{
2c4d5356
IK
1380 return meta +
1381 max_t(int, sizeof(struct pblk_sec_meta), pblk->oob_meta_size)
1382 * index;
3bcebc5b 1383}
24828d05
IK
1384
1385static inline int pblk_dma_meta_size(struct pblk *pblk)
1386{
2c4d5356
IK
1387 return max_t(int, sizeof(struct pblk_sec_meta), pblk->oob_meta_size)
1388 * NVM_MAX_VLBA;
24828d05 1389}
55d8ec35
IK
1390
1391static inline int pblk_is_oob_meta_supported(struct pblk *pblk)
1392{
1393 return pblk->oob_meta_size >= sizeof(struct pblk_sec_meta);
1394}
a4bd217b 1395#endif /* PBLK_H_ */