]> git.ipfire.org Git - thirdparty/kernel/linux.git/blame - drivers/lightnvm/pblk.h
lightnvm: pblk: refactor read lba sanity check
[thirdparty/kernel/linux.git] / drivers / lightnvm / pblk.h
CommitLineData
a4bd217b
JG
1/*
2 * Copyright (C) 2015 IT University of Copenhagen (rrpc.h)
3 * Copyright (C) 2016 CNEX Labs
4 * Initial release: Matias Bjorling <matias@cnexlabs.com>
5 * Write buffering: Javier Gonzalez <javier@cnexlabs.com>
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License version
9 * 2 as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * Implementation of a Physical Block-device target for Open-channel SSDs.
17 *
18 */
19
20#ifndef PBLK_H_
21#define PBLK_H_
22
23#include <linux/blkdev.h>
24#include <linux/blk-mq.h>
25#include <linux/bio.h>
26#include <linux/module.h>
27#include <linux/kthread.h>
28#include <linux/vmalloc.h>
29#include <linux/crc32.h>
30#include <linux/uuid.h>
31
32#include <linux/lightnvm.h>
33
34/* Run only GC if less than 1/X blocks are free */
35#define GC_LIMIT_INVERSE 5
36#define GC_TIME_MSECS 1000
37
38#define PBLK_SECTOR (512)
39#define PBLK_EXPOSED_PAGE_SIZE (4096)
40#define PBLK_MAX_REQ_ADDRS (64)
41#define PBLK_MAX_REQ_ADDRS_PW (6)
42
ef576494
JG
43#define PBLK_NR_CLOSE_JOBS (4)
44
a4bd217b
JG
45#define PBLK_CACHE_NAME_LEN (DISK_NAME_LEN + 16)
46
47#define PBLK_COMMAND_TIMEOUT_MS 30000
48
49/* Max 512 LUNs per device */
50#define PBLK_MAX_LUNS_BITMAP (4)
51
52#define NR_PHY_IN_LOG (PBLK_EXPOSED_PAGE_SIZE / PBLK_SECTOR)
53
54#define pblk_for_each_lun(pblk, rlun, i) \
55 for ((i) = 0, rlun = &(pblk)->luns[0]; \
56 (i) < (pblk)->nr_luns; (i)++, rlun = &(pblk)->luns[(i)])
57
58#define ERASE 2 /* READ = 0, WRITE = 1 */
59
0d880398 60/* Static pool sizes */
b84ae4a8
JG
61#define PBLK_GEN_WS_POOL_SIZE (2)
62
a4bd217b
JG
63enum {
64 /* IO Types */
65 PBLK_IOTYPE_USER = 1 << 0,
66 PBLK_IOTYPE_GC = 1 << 1,
67
68 /* Write buffer flags */
69 PBLK_FLUSH_ENTRY = 1 << 2,
70 PBLK_WRITTEN_DATA = 1 << 3,
71 PBLK_SUBMITTED_ENTRY = 1 << 4,
72 PBLK_WRITABLE_ENTRY = 1 << 5,
73};
74
75enum {
76 PBLK_BLK_ST_OPEN = 0x1,
77 PBLK_BLK_ST_CLOSED = 0x2,
78};
79
b20ba1bc
JG
80struct pblk_sec_meta {
81 u64 reserved;
82 __le64 lba;
83};
84
a4bd217b
JG
85/* The number of GC lists and the rate-limiter states go together. This way the
86 * rate-limiter can dictate how much GC is needed based on resource utilization.
87 */
b20ba1bc 88#define PBLK_GC_NR_LISTS 3
a4bd217b
JG
89
90enum {
91 PBLK_RL_HIGH = 1,
92 PBLK_RL_MID = 2,
93 PBLK_RL_LOW = 3,
94};
95
a4bd217b
JG
96#define pblk_dma_meta_size (sizeof(struct pblk_sec_meta) * PBLK_MAX_REQ_ADDRS)
97
084ec9ba 98/* write buffer completion context */
a4bd217b
JG
99struct pblk_c_ctx {
100 struct list_head list; /* Head for out-of-order completion */
101
102 unsigned long *lun_bitmap; /* Luns used on current request */
103 unsigned int sentry;
104 unsigned int nr_valid;
105 unsigned int nr_padded;
106};
107
084ec9ba
JG
108/* generic context */
109struct pblk_g_ctx {
110 void *private;
a4bd217b
JG
111};
112
ee8d5c1a
JG
113/* Pad context */
114struct pblk_pad_rq {
115 struct pblk *pblk;
116 struct completion wait;
117 struct kref ref;
118};
119
a4bd217b
JG
120/* Recovery context */
121struct pblk_rec_ctx {
122 struct pblk *pblk;
123 struct nvm_rq *rqd;
124 struct list_head failed;
125 struct work_struct ws_rec;
126};
127
128/* Write context */
129struct pblk_w_ctx {
130 struct bio_list bios; /* Original bios - used for completion
131 * in REQ_FUA, REQ_FLUSH case
132 */
ef697902 133 u64 lba; /* Logic addr. associated with entry */
a4bd217b
JG
134 struct ppa_addr ppa; /* Physic addr. associated with entry */
135 int flags; /* Write context flags */
136};
137
138struct pblk_rb_entry {
139 struct ppa_addr cacheline; /* Cacheline for this entry */
140 void *data; /* Pointer to data on this entry */
141 struct pblk_w_ctx w_ctx; /* Context for this entry */
142 struct list_head index; /* List head to enable indexes */
143};
144
145#define EMPTY_ENTRY (~0U)
146
147struct pblk_rb_pages {
148 struct page *pages;
149 int order;
150 struct list_head list;
151};
152
153struct pblk_rb {
154 struct pblk_rb_entry *entries; /* Ring buffer entries */
155 unsigned int mem; /* Write offset - points to next
156 * writable entry in memory
157 */
158 unsigned int subm; /* Read offset - points to last entry
159 * that has been submitted to the media
160 * to be persisted
161 */
162 unsigned int sync; /* Synced - backpointer that signals
163 * the last submitted entry that has
164 * been successfully persisted to media
165 */
166 unsigned int sync_point; /* Sync point - last entry that must be
167 * flushed to the media. Used with
168 * REQ_FLUSH and REQ_FUA
169 */
170 unsigned int l2p_update; /* l2p update point - next entry for
171 * which l2p mapping will be updated to
172 * contain a device ppa address (instead
173 * of a cacheline
174 */
175 unsigned int nr_entries; /* Number of entries in write buffer -
176 * must be a power of two
177 */
178 unsigned int seg_size; /* Size of the data segments being
179 * stored on each entry. Typically this
180 * will be 4KB
181 */
182
183 struct list_head pages; /* List of data pages */
184
185 spinlock_t w_lock; /* Write lock */
186 spinlock_t s_lock; /* Sync lock */
187
188#ifdef CONFIG_NVM_DEBUG
189 atomic_t inflight_sync_point; /* Not served REQ_FLUSH | REQ_FUA */
190#endif
191};
192
193#define PBLK_RECOVERY_SECTORS 16
194
195struct pblk_lun {
196 struct ppa_addr bppa;
197
198 u8 *bb_list; /* Bad block list for LUN. Only used on
199 * bring up. Bad blocks are managed
200 * within lines on run-time.
201 */
202
203 struct semaphore wr_sem;
204};
205
206struct pblk_gc_rq {
207 struct pblk_line *line;
208 void *data;
b20ba1bc 209 u64 lba_list[PBLK_MAX_REQ_ADDRS];
a4bd217b
JG
210 int nr_secs;
211 int secs_to_gc;
212 struct list_head list;
213};
214
215struct pblk_gc {
b20ba1bc
JG
216 /* These states are not protected by a lock since (i) they are in the
217 * fast path, and (ii) they are not critical.
218 */
a4bd217b
JG
219 int gc_active;
220 int gc_enabled;
221 int gc_forced;
a4bd217b
JG
222
223 struct task_struct *gc_ts;
224 struct task_struct *gc_writer_ts;
b20ba1bc
JG
225 struct task_struct *gc_reader_ts;
226
227 struct workqueue_struct *gc_line_reader_wq;
a4bd217b 228 struct workqueue_struct *gc_reader_wq;
b20ba1bc 229
a4bd217b
JG
230 struct timer_list gc_timer;
231
b20ba1bc
JG
232 struct semaphore gc_sem;
233 atomic_t inflight_gc;
a4bd217b 234 int w_entries;
b20ba1bc 235
a4bd217b 236 struct list_head w_list;
b20ba1bc 237 struct list_head r_list;
a4bd217b
JG
238
239 spinlock_t lock;
240 spinlock_t w_lock;
b20ba1bc 241 spinlock_t r_lock;
a4bd217b
JG
242};
243
244struct pblk_rl {
245 unsigned int high; /* Upper threshold for rate limiter (free run -
246 * user I/O rate limiter
247 */
248 unsigned int low; /* Lower threshold for rate limiter (user I/O
249 * rate limiter - stall)
250 */
251 unsigned int high_pw; /* High rounded up as a power of 2 */
252
b20ba1bc
JG
253#define PBLK_USER_HIGH_THRS 8 /* Begin write limit at 12% available blks */
254#define PBLK_USER_LOW_THRS 10 /* Aggressive GC at 10% available blocks */
a4bd217b
JG
255
256 int rb_windows_pw; /* Number of rate windows in the write buffer
257 * given as a power-of-2. This guarantees that
258 * when user I/O is being rate limited, there
259 * will be reserved enough space for the GC to
260 * place its payload. A window is of
261 * pblk->max_write_pgs size, which in NVMe is
262 * 64, i.e., 256kb.
263 */
264 int rb_budget; /* Total number of entries available for I/O */
265 int rb_user_max; /* Max buffer entries available for user I/O */
a4bd217b
JG
266 int rb_gc_max; /* Max buffer entries available for GC I/O */
267 int rb_gc_rsv; /* Reserved buffer entries for GC I/O */
268 int rb_state; /* Rate-limiter current state */
da67e68f 269 int rb_max_io; /* Maximum size for an I/O giving the config */
588726d3
JG
270
271 atomic_t rb_user_cnt; /* User I/O buffer counter */
a4bd217b 272 atomic_t rb_gc_cnt; /* GC I/O buffer counter */
588726d3 273 atomic_t rb_space; /* Space limit in case of reaching capacity */
a4bd217b 274
b20ba1bc
JG
275 int rsv_blocks; /* Reserved blocks for GC */
276
a4bd217b 277 int rb_user_active;
b20ba1bc
JG
278 int rb_gc_active;
279
a4bd217b
JG
280 struct timer_list u_timer;
281
282 unsigned long long nr_secs;
283 unsigned long total_blocks;
284 atomic_t free_blocks;
285};
286
a4bd217b
JG
287#define PBLK_LINE_EMPTY (~0U)
288
289enum {
290 /* Line Types */
291 PBLK_LINETYPE_FREE = 0,
292 PBLK_LINETYPE_LOG = 1,
293 PBLK_LINETYPE_DATA = 2,
294
295 /* Line state */
296 PBLK_LINESTATE_FREE = 10,
297 PBLK_LINESTATE_OPEN = 11,
298 PBLK_LINESTATE_CLOSED = 12,
299 PBLK_LINESTATE_GC = 13,
300 PBLK_LINESTATE_BAD = 14,
301 PBLK_LINESTATE_CORRUPT = 15,
302
303 /* GC group */
304 PBLK_LINEGC_NONE = 20,
305 PBLK_LINEGC_EMPTY = 21,
306 PBLK_LINEGC_LOW = 22,
307 PBLK_LINEGC_MID = 23,
308 PBLK_LINEGC_HIGH = 24,
309 PBLK_LINEGC_FULL = 25,
310};
311
312#define PBLK_MAGIC 0x70626c6b /*pblk*/
c79819bc 313#define SMETA_VERSION cpu_to_le16(1)
a4bd217b
JG
314
315struct line_header {
316 __le32 crc;
317 __le32 identifier; /* pblk identifier */
318 __u8 uuid[16]; /* instance uuid */
319 __le16 type; /* line type */
320 __le16 version; /* type version */
321 __le32 id; /* line id for current line */
322};
323
324struct line_smeta {
325 struct line_header header;
326
327 __le32 crc; /* Full structure including struct crc */
328 /* Previous line metadata */
329 __le32 prev_id; /* Line id for previous line */
330
331 /* Current line metadata */
332 __le64 seq_nr; /* Sequence number for current line */
333
334 /* Active writers */
335 __le32 window_wr_lun; /* Number of parallel LUNs to write */
336
337 __le32 rsvd[2];
dd2a4343
JG
338
339 __le64 lun_bitmap[];
a4bd217b
JG
340};
341
342/*
dd2a4343
JG
343 * Metadata layout in media:
344 * First sector:
345 * 1. struct line_emeta
346 * 2. bad block bitmap (u64 * window_wr_lun)
347 * Mid sectors (start at lbas_sector):
348 * 3. nr_lbas (u64) forming lba list
349 * Last sectors (start at vsc_sector):
350 * 4. u32 valid sector count (vsc) for all lines (~0U: free line)
a4bd217b
JG
351 */
352struct line_emeta {
353 struct line_header header;
354
355 __le32 crc; /* Full structure including struct crc */
356
357 /* Previous line metadata */
358 __le32 prev_id; /* Line id for prev line */
359
360 /* Current line metadata */
361 __le64 seq_nr; /* Sequence number for current line */
362
363 /* Active writers */
364 __le32 window_wr_lun; /* Number of parallel LUNs to write */
365
366 /* Bookkeeping for recovery */
367 __le32 next_id; /* Line id for next line */
368 __le64 nr_lbas; /* Number of lbas mapped in line */
369 __le64 nr_valid_lbas; /* Number of valid lbas mapped in line */
dd2a4343
JG
370 __le64 bb_bitmap[]; /* Updated bad block bitmap for line */
371};
372
373struct pblk_emeta {
374 struct line_emeta *buf; /* emeta buffer in media format */
375 int mem; /* Write offset - points to next
376 * writable entry in memory
377 */
378 atomic_t sync; /* Synced - backpointer that signals the
379 * last entry that has been successfully
380 * persisted to media
381 */
382 unsigned int nr_entries; /* Number of emeta entries */
383};
384
385struct pblk_smeta {
386 struct line_smeta *buf; /* smeta buffer in persistent format */
a4bd217b
JG
387};
388
389struct pblk_line {
390 struct pblk *pblk;
391 unsigned int id; /* Line number corresponds to the
392 * block line
393 */
394 unsigned int seq_nr; /* Unique line sequence number */
395
396 int state; /* PBLK_LINESTATE_X */
397 int type; /* PBLK_LINETYPE_X */
398 int gc_group; /* PBLK_LINEGC_X */
399 struct list_head list; /* Free, GC lists */
400
401 unsigned long *lun_bitmap; /* Bitmap for LUNs mapped in line */
402
dd2a4343
JG
403 struct pblk_smeta *smeta; /* Start metadata */
404 struct pblk_emeta *emeta; /* End medatada */
405
a4bd217b 406 int meta_line; /* Metadata line id */
dd2a4343
JG
407 int meta_distance; /* Distance between data and metadata */
408
a4bd217b
JG
409 u64 smeta_ssec; /* Sector where smeta starts */
410 u64 emeta_ssec; /* Sector where emeta starts */
411
412 unsigned int sec_in_line; /* Number of usable secs in line */
413
a44f53fa 414 atomic_t blk_in_line; /* Number of good blocks in line */
a4bd217b
JG
415 unsigned long *blk_bitmap; /* Bitmap for valid/invalid blocks */
416 unsigned long *erase_bitmap; /* Bitmap for erased blocks */
417
418 unsigned long *map_bitmap; /* Bitmap for mapped sectors in line */
419 unsigned long *invalid_bitmap; /* Bitmap for invalid sectors in line */
420
a44f53fa 421 atomic_t left_eblks; /* Blocks left for erasing */
a4bd217b
JG
422 atomic_t left_seblks; /* Blocks left for sync erasing */
423
424 int left_msecs; /* Sectors left for mapping */
a4bd217b 425 unsigned int cur_sec; /* Sector map pointer */
dd2a4343
JG
426 unsigned int nr_valid_lbas; /* Number of valid lbas in line */
427
428 __le32 *vsc; /* Valid sector count in line */
a4bd217b
JG
429
430 struct kref ref; /* Write buffer L2P references */
431
432 spinlock_t lock; /* Necessary for invalid_bitmap only */
433};
434
a44f53fa 435#define PBLK_DATA_LINES 4
a4bd217b 436
dd2a4343 437enum {
a4bd217b
JG
438 PBLK_KMALLOC_META = 1,
439 PBLK_VMALLOC_META = 2,
440};
441
dd2a4343
JG
442enum {
443 PBLK_EMETA_TYPE_HEADER = 1, /* struct line_emeta first sector */
444 PBLK_EMETA_TYPE_LLBA = 2, /* lba list - type: __le64 */
445 PBLK_EMETA_TYPE_VSC = 3, /* vsc list - type: __le32 */
a4bd217b
JG
446};
447
448struct pblk_line_mgmt {
449 int nr_lines; /* Total number of full lines */
450 int nr_free_lines; /* Number of full lines in free list */
451
452 /* Free lists - use free_lock */
453 struct list_head free_list; /* Full lines ready to use */
454 struct list_head corrupt_list; /* Full lines corrupted */
455 struct list_head bad_list; /* Full lines bad */
456
457 /* GC lists - use gc_lock */
b20ba1bc 458 struct list_head *gc_lists[PBLK_GC_NR_LISTS];
a4bd217b
JG
459 struct list_head gc_high_list; /* Full lines ready to GC, high isc */
460 struct list_head gc_mid_list; /* Full lines ready to GC, mid isc */
461 struct list_head gc_low_list; /* Full lines ready to GC, low isc */
462
463 struct list_head gc_full_list; /* Full lines ready to GC, no valid */
464 struct list_head gc_empty_list; /* Full lines close, all valid */
465
466 struct pblk_line *log_line; /* Current FTL log line */
467 struct pblk_line *data_line; /* Current data line */
468 struct pblk_line *log_next; /* Next FTL log line */
469 struct pblk_line *data_next; /* Next data line */
470
dd2a4343
JG
471 struct list_head emeta_list; /* Lines queued to schedule emeta */
472
473 __le32 *vsc_list; /* Valid sector counts for all lines */
474
a4bd217b 475 /* Metadata allocation type: VMALLOC | KMALLOC */
a4bd217b
JG
476 int emeta_alloc_type;
477
478 /* Pre-allocated metadata for data lines */
dd2a4343
JG
479 struct pblk_smeta *sline_meta[PBLK_DATA_LINES];
480 struct pblk_emeta *eline_meta[PBLK_DATA_LINES];
a4bd217b
JG
481 unsigned long meta_bitmap;
482
483 /* Helpers for fast bitmap calculations */
484 unsigned long *bb_template;
485 unsigned long *bb_aux;
486
487 unsigned long d_seq_nr; /* Data line unique sequence number */
488 unsigned long l_seq_nr; /* Log line unique sequence number */
489
490 spinlock_t free_lock;
dd2a4343 491 spinlock_t close_lock;
a4bd217b
JG
492 spinlock_t gc_lock;
493};
494
495struct pblk_line_meta {
496 unsigned int smeta_len; /* Total length for smeta */
dd2a4343
JG
497 unsigned int smeta_sec; /* Sectors needed for smeta */
498
499 unsigned int emeta_len[4]; /* Lengths for emeta:
500 * [0]: Total length
501 * [1]: struct line_emeta length
502 * [2]: L2P portion length
503 * [3]: vsc list length
504 */
505 unsigned int emeta_sec[4]; /* Sectors needed for emeta. Same layout
506 * as emeta_len
507 */
508
a4bd217b 509 unsigned int emeta_bb; /* Boundary for bb that affects emeta */
dd2a4343
JG
510
511 unsigned int vsc_list_len; /* Length for vsc list */
a4bd217b
JG
512 unsigned int sec_bitmap_len; /* Length for sector bitmap in line */
513 unsigned int blk_bitmap_len; /* Length for block bitmap in line */
514 unsigned int lun_bitmap_len; /* Length for lun bitmap in line */
515
516 unsigned int blk_per_line; /* Number of blocks in a full line */
517 unsigned int sec_per_line; /* Number of sectors in a line */
dd2a4343 518 unsigned int dsec_per_line; /* Number of data sectors in a line */
a4bd217b
JG
519 unsigned int min_blk_line; /* Min. number of good blocks in line */
520
521 unsigned int mid_thrs; /* Threshold for GC mid list */
522 unsigned int high_thrs; /* Threshold for GC high list */
dd2a4343
JG
523
524 unsigned int meta_distance; /* Distance between data and metadata */
a4bd217b
JG
525};
526
527struct pblk_addr_format {
528 u64 ch_mask;
529 u64 lun_mask;
530 u64 pln_mask;
531 u64 blk_mask;
532 u64 pg_mask;
533 u64 sec_mask;
534 u8 ch_offset;
535 u8 lun_offset;
536 u8 pln_offset;
537 u8 blk_offset;
538 u8 pg_offset;
539 u8 sec_offset;
540};
541
588726d3
JG
542enum {
543 PBLK_STATE_RUNNING = 0,
544 PBLK_STATE_STOPPING = 1,
545 PBLK_STATE_RECOVERING = 2,
546 PBLK_STATE_STOPPED = 3,
547};
548
a4bd217b
JG
549struct pblk {
550 struct nvm_tgt_dev *dev;
551 struct gendisk *disk;
552
553 struct kobject kobj;
554
555 struct pblk_lun *luns;
556
557 struct pblk_line *lines; /* Line array */
558 struct pblk_line_mgmt l_mg; /* Line management */
559 struct pblk_line_meta lm; /* Line metadata */
560
561 int ppaf_bitsize;
562 struct pblk_addr_format ppaf;
563
564 struct pblk_rb rwb;
565
588726d3
JG
566 int state; /* pblk line state */
567
a4bd217b
JG
568 int min_write_pgs; /* Minimum amount of pages required by controller */
569 int max_write_pgs; /* Maximum amount of pages supported by controller */
570 int pgs_in_buffer; /* Number of pages that need to be held in buffer to
571 * guarantee successful reads.
572 */
573
574 sector_t capacity; /* Device capacity when bad blocks are subtracted */
575 int over_pct; /* Percentage of device used for over-provisioning */
576
577 /* pblk provisioning values. Used by rate limiter */
578 struct pblk_rl rl;
579
c2e9f5d4 580 int sec_per_write;
a4bd217b
JG
581
582 unsigned char instance_uuid[16];
583#ifdef CONFIG_NVM_DEBUG
584 /* All debug counters apply to 4kb sector I/Os */
585 atomic_long_t inflight_writes; /* Inflight writes (user and gc) */
586 atomic_long_t padded_writes; /* Sectors padded due to flush/fua */
587 atomic_long_t padded_wb; /* Sectors padded in write buffer */
588 atomic_long_t nr_flush; /* Number of flush/fua I/O */
589 atomic_long_t req_writes; /* Sectors stored on write buffer */
590 atomic_long_t sub_writes; /* Sectors submitted from buffer */
591 atomic_long_t sync_writes; /* Sectors synced to media */
a4bd217b 592 atomic_long_t inflight_reads; /* Inflight sector read requests */
db7ada33 593 atomic_long_t cache_reads; /* Read requests that hit the cache */
a4bd217b
JG
594 atomic_long_t sync_reads; /* Completed sector read requests */
595 atomic_long_t recov_writes; /* Sectors submitted from recovery */
596 atomic_long_t recov_gc_writes; /* Sectors submitted from write GC */
597 atomic_long_t recov_gc_reads; /* Sectors submitted from read GC */
598#endif
599
600 spinlock_t lock;
601
602 atomic_long_t read_failed;
603 atomic_long_t read_empty;
604 atomic_long_t read_high_ecc;
605 atomic_long_t read_failed_gc;
606 atomic_long_t write_failed;
607 atomic_long_t erase_failed;
608
588726d3
JG
609 atomic_t inflight_io; /* General inflight I/O counter */
610
a4bd217b
JG
611 struct task_struct *writer_ts;
612
613 /* Simple translation map of logical addresses to physical addresses.
614 * The logical addresses is known by the host system, while the physical
615 * addresses are used when writing to the disk block device.
616 */
617 unsigned char *trans_map;
618 spinlock_t trans_lock;
619
620 struct list_head compl_list;
621
bd432417 622 mempool_t *page_bio_pool;
b84ae4a8 623 mempool_t *gen_ws_pool;
a4bd217b 624 mempool_t *rec_pool;
0d880398 625 mempool_t *r_rq_pool;
a4bd217b 626 mempool_t *w_rq_pool;
0d880398 627 mempool_t *e_rq_pool;
a4bd217b 628
ef576494
JG
629 struct workqueue_struct *close_wq;
630 struct workqueue_struct *bb_wq;
631
a4bd217b
JG
632 struct timer_list wtimer;
633
634 struct pblk_gc gc;
635};
636
637struct pblk_line_ws {
638 struct pblk *pblk;
639 struct pblk_line *line;
640 void *priv;
641 struct work_struct ws;
642};
643
084ec9ba 644#define pblk_g_rq_size (sizeof(struct nvm_rq) + sizeof(struct pblk_g_ctx))
a4bd217b
JG
645#define pblk_w_rq_size (sizeof(struct nvm_rq) + sizeof(struct pblk_c_ctx))
646
647/*
648 * pblk ring buffer operations
649 */
650int pblk_rb_init(struct pblk_rb *rb, struct pblk_rb_entry *rb_entry_base,
651 unsigned int power_size, unsigned int power_seg_sz);
652unsigned int pblk_rb_calculate_size(unsigned int nr_entries);
653void *pblk_rb_entries_ref(struct pblk_rb *rb);
654int pblk_rb_may_write_user(struct pblk_rb *rb, struct bio *bio,
655 unsigned int nr_entries, unsigned int *pos);
656int pblk_rb_may_write_gc(struct pblk_rb *rb, unsigned int nr_entries,
657 unsigned int *pos);
658void pblk_rb_write_entry_user(struct pblk_rb *rb, void *data,
659 struct pblk_w_ctx w_ctx, unsigned int pos);
660void pblk_rb_write_entry_gc(struct pblk_rb *rb, void *data,
661 struct pblk_w_ctx w_ctx, struct pblk_line *gc_line,
662 unsigned int pos);
663struct pblk_w_ctx *pblk_rb_w_ctx(struct pblk_rb *rb, unsigned int pos);
588726d3 664void pblk_rb_flush(struct pblk_rb *rb);
a4bd217b
JG
665
666void pblk_rb_sync_l2p(struct pblk_rb *rb);
d624f371
JG
667unsigned int pblk_rb_read_to_bio(struct pblk_rb *rb, struct nvm_rq *rqd,
668 struct bio *bio, unsigned int pos,
669 unsigned int nr_entries, unsigned int count);
a4bd217b
JG
670unsigned int pblk_rb_read_to_bio_list(struct pblk_rb *rb, struct bio *bio,
671 struct list_head *list,
672 unsigned int max);
673int pblk_rb_copy_to_bio(struct pblk_rb *rb, struct bio *bio, sector_t lba,
75cb8e93 674 struct ppa_addr ppa, int bio_iter, bool advanced_bio);
a4bd217b
JG
675unsigned int pblk_rb_read_commit(struct pblk_rb *rb, unsigned int entries);
676
677unsigned int pblk_rb_sync_init(struct pblk_rb *rb, unsigned long *flags);
678unsigned int pblk_rb_sync_advance(struct pblk_rb *rb, unsigned int nr_entries);
679struct pblk_rb_entry *pblk_rb_sync_scan_entry(struct pblk_rb *rb,
680 struct ppa_addr *ppa);
681void pblk_rb_sync_end(struct pblk_rb *rb, unsigned long *flags);
682unsigned int pblk_rb_sync_point_count(struct pblk_rb *rb);
683
684unsigned int pblk_rb_read_count(struct pblk_rb *rb);
ee8d5c1a 685unsigned int pblk_rb_sync_count(struct pblk_rb *rb);
a4bd217b
JG
686unsigned int pblk_rb_wrap_pos(struct pblk_rb *rb, unsigned int pos);
687
688int pblk_rb_tear_down_check(struct pblk_rb *rb);
689int pblk_rb_pos_oob(struct pblk_rb *rb, u64 pos);
690void pblk_rb_data_free(struct pblk_rb *rb);
691ssize_t pblk_rb_sysfs(struct pblk_rb *rb, char *buf);
692
693/*
694 * pblk core
695 */
696struct nvm_rq *pblk_alloc_rqd(struct pblk *pblk, int rw);
c2e9f5d4 697void pblk_set_sec_per_write(struct pblk *pblk, int sec_per_write);
a4bd217b
JG
698int pblk_setup_w_rec_rq(struct pblk *pblk, struct nvm_rq *rqd,
699 struct pblk_c_ctx *c_ctx);
700void pblk_free_rqd(struct pblk *pblk, struct nvm_rq *rqd, int rw);
588726d3 701void pblk_wait_for_meta(struct pblk *pblk);
a4bd217b
JG
702struct ppa_addr pblk_get_lba_map(struct pblk *pblk, sector_t lba);
703void pblk_discard(struct pblk *pblk, struct bio *bio);
704void pblk_log_write_err(struct pblk *pblk, struct nvm_rq *rqd);
705void pblk_log_read_err(struct pblk *pblk, struct nvm_rq *rqd);
706int pblk_submit_io(struct pblk *pblk, struct nvm_rq *rqd);
dd2a4343 707int pblk_submit_meta_io(struct pblk *pblk, struct pblk_line *meta_line);
a4bd217b
JG
708struct bio *pblk_bio_map_addr(struct pblk *pblk, void *data,
709 unsigned int nr_secs, unsigned int len,
de54e703 710 int alloc_type, gfp_t gfp_mask);
a4bd217b
JG
711struct pblk_line *pblk_line_get(struct pblk *pblk);
712struct pblk_line *pblk_line_get_first_data(struct pblk *pblk);
588726d3 713void pblk_line_replace_data(struct pblk *pblk);
a4bd217b
JG
714int pblk_line_recov_alloc(struct pblk *pblk, struct pblk_line *line);
715void pblk_line_recov_close(struct pblk *pblk, struct pblk_line *line);
716struct pblk_line *pblk_line_get_data(struct pblk *pblk);
d624f371 717struct pblk_line *pblk_line_get_erase(struct pblk *pblk);
a4bd217b
JG
718int pblk_line_erase(struct pblk *pblk, struct pblk_line *line);
719int pblk_line_is_full(struct pblk_line *line);
720void pblk_line_free(struct pblk *pblk, struct pblk_line *line);
dd2a4343 721void pblk_line_close_meta(struct pblk *pblk, struct pblk_line *line);
a4bd217b 722void pblk_line_close(struct pblk *pblk, struct pblk_line *line);
588726d3 723void pblk_line_close_meta_sync(struct pblk *pblk);
dd2a4343 724void pblk_line_close_ws(struct work_struct *work);
588726d3 725void pblk_pipeline_stop(struct pblk *pblk);
a4bd217b 726void pblk_line_mark_bb(struct work_struct *work);
b84ae4a8
JG
727void pblk_gen_run_ws(struct pblk *pblk, struct pblk_line *line, void *priv,
728 void (*work)(struct work_struct *), gfp_t gfp_mask,
729 struct workqueue_struct *wq);
a4bd217b
JG
730u64 pblk_line_smeta_start(struct pblk *pblk, struct pblk_line *line);
731int pblk_line_read_smeta(struct pblk *pblk, struct pblk_line *line);
dd2a4343
JG
732int pblk_line_read_emeta(struct pblk *pblk, struct pblk_line *line,
733 void *emeta_buf);
a4bd217b
JG
734int pblk_blk_erase_async(struct pblk *pblk, struct ppa_addr erase_ppa);
735void pblk_line_put(struct kref *ref);
736struct list_head *pblk_line_gc_list(struct pblk *pblk, struct pblk_line *line);
dd2a4343
JG
737u64 pblk_lookup_page(struct pblk *pblk, struct pblk_line *line);
738void pblk_dealloc_page(struct pblk *pblk, struct pblk_line *line, int nr_secs);
a4bd217b 739u64 pblk_alloc_page(struct pblk *pblk, struct pblk_line *line, int nr_secs);
dd2a4343 740u64 __pblk_alloc_page(struct pblk *pblk, struct pblk_line *line, int nr_secs);
a4bd217b
JG
741int pblk_calc_secs(struct pblk *pblk, unsigned long secs_avail,
742 unsigned long secs_to_flush);
3eaa11e2 743void pblk_up_page(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas);
a4bd217b
JG
744void pblk_down_rq(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas,
745 unsigned long *lun_bitmap);
3eaa11e2 746void pblk_down_page(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas);
a4bd217b
JG
747void pblk_up_rq(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas,
748 unsigned long *lun_bitmap);
749void pblk_end_bio_sync(struct bio *bio);
750void pblk_end_io_sync(struct nvm_rq *rqd);
751int pblk_bio_add_pages(struct pblk *pblk, struct bio *bio, gfp_t flags,
752 int nr_pages);
a4bd217b
JG
753void pblk_bio_free_pages(struct pblk *pblk, struct bio *bio, int off,
754 int nr_pages);
755void pblk_map_invalidate(struct pblk *pblk, struct ppa_addr ppa);
0880a9aa
JG
756void __pblk_map_invalidate(struct pblk *pblk, struct pblk_line *line,
757 u64 paddr);
a4bd217b
JG
758void pblk_update_map(struct pblk *pblk, sector_t lba, struct ppa_addr ppa);
759void pblk_update_map_cache(struct pblk *pblk, sector_t lba,
760 struct ppa_addr ppa);
761void pblk_update_map_dev(struct pblk *pblk, sector_t lba,
762 struct ppa_addr ppa, struct ppa_addr entry_line);
763int pblk_update_map_gc(struct pblk *pblk, sector_t lba, struct ppa_addr ppa,
764 struct pblk_line *gc_line);
765void pblk_lookup_l2p_rand(struct pblk *pblk, struct ppa_addr *ppas,
766 u64 *lba_list, int nr_secs);
767void pblk_lookup_l2p_seq(struct pblk *pblk, struct ppa_addr *ppas,
768 sector_t blba, int nr_secs);
769
770/*
771 * pblk user I/O write path
772 */
773int pblk_write_to_cache(struct pblk *pblk, struct bio *bio,
774 unsigned long flags);
775int pblk_write_gc_to_cache(struct pblk *pblk, void *data, u64 *lba_list,
776 unsigned int nr_entries, unsigned int nr_rec_entries,
777 struct pblk_line *gc_line, unsigned long flags);
778
779/*
780 * pblk map
781 */
782void pblk_map_erase_rq(struct pblk *pblk, struct nvm_rq *rqd,
783 unsigned int sentry, unsigned long *lun_bitmap,
784 unsigned int valid_secs, struct ppa_addr *erase_ppa);
785void pblk_map_rq(struct pblk *pblk, struct nvm_rq *rqd, unsigned int sentry,
786 unsigned long *lun_bitmap, unsigned int valid_secs,
787 unsigned int off);
788
789/*
790 * pblk write thread
791 */
792int pblk_write_ts(void *data);
793void pblk_write_timer_fn(unsigned long data);
794void pblk_write_should_kick(struct pblk *pblk);
795
796/*
797 * pblk read path
798 */
b25d5237 799extern struct bio_set *pblk_bio_set;
a4bd217b
JG
800int pblk_submit_read(struct pblk *pblk, struct bio *bio);
801int pblk_submit_read_gc(struct pblk *pblk, u64 *lba_list, void *data,
802 unsigned int nr_secs, unsigned int *secs_to_gc,
803 struct pblk_line *line);
804/*
805 * pblk recovery
806 */
807void pblk_submit_rec(struct work_struct *work);
808struct pblk_line *pblk_recov_l2p(struct pblk *pblk);
588726d3 809int pblk_recov_pad(struct pblk *pblk);
a4bd217b
JG
810__le64 *pblk_recov_get_lba_list(struct pblk *pblk, struct line_emeta *emeta);
811int pblk_recov_setup_rq(struct pblk *pblk, struct pblk_c_ctx *c_ctx,
812 struct pblk_rec_ctx *recovery, u64 *comp_bits,
813 unsigned int comp);
814
815/*
816 * pblk gc
817 */
b20ba1bc 818#define PBLK_GC_MAX_READERS 8 /* Max number of outstanding GC reader jobs */
3627896a 819#define PBLK_GC_RQ_QD 128 /* Queue depth for inflight GC requests */
b20ba1bc
JG
820#define PBLK_GC_L_QD 4 /* Queue depth for inflight GC lines */
821#define PBLK_GC_RSV_LINE 1 /* Reserved lines for GC */
a4bd217b
JG
822
823int pblk_gc_init(struct pblk *pblk);
824void pblk_gc_exit(struct pblk *pblk);
825void pblk_gc_should_start(struct pblk *pblk);
826void pblk_gc_should_stop(struct pblk *pblk);
b20ba1bc 827void pblk_gc_kick(struct pblk *pblk);
a4bd217b
JG
828void pblk_gc_sysfs_state_show(struct pblk *pblk, int *gc_enabled,
829 int *gc_active);
b20ba1bc 830int pblk_gc_sysfs_force(struct pblk *pblk, int force);
a4bd217b
JG
831
832/*
833 * pblk rate limiter
834 */
835void pblk_rl_init(struct pblk_rl *rl, int budget);
836void pblk_rl_free(struct pblk_rl *rl);
b20ba1bc
JG
837int pblk_rl_high_thrs(struct pblk_rl *rl);
838int pblk_rl_low_thrs(struct pblk_rl *rl);
a4bd217b
JG
839unsigned long pblk_rl_nr_free_blks(struct pblk_rl *rl);
840int pblk_rl_user_may_insert(struct pblk_rl *rl, int nr_entries);
588726d3 841void pblk_rl_inserted(struct pblk_rl *rl, int nr_entries);
a4bd217b
JG
842void pblk_rl_user_in(struct pblk_rl *rl, int nr_entries);
843int pblk_rl_gc_may_insert(struct pblk_rl *rl, int nr_entries);
844void pblk_rl_gc_in(struct pblk_rl *rl, int nr_entries);
845void pblk_rl_out(struct pblk_rl *rl, int nr_user, int nr_gc);
a4bd217b 846int pblk_rl_sysfs_rate_show(struct pblk_rl *rl);
da67e68f 847int pblk_rl_max_io(struct pblk_rl *rl);
a4bd217b
JG
848void pblk_rl_free_lines_inc(struct pblk_rl *rl, struct pblk_line *line);
849void pblk_rl_free_lines_dec(struct pblk_rl *rl, struct pblk_line *line);
588726d3
JG
850void pblk_rl_set_space_limit(struct pblk_rl *rl, int entries_left);
851int pblk_rl_is_limit(struct pblk_rl *rl);
a4bd217b
JG
852
853/*
854 * pblk sysfs
855 */
856int pblk_sysfs_init(struct gendisk *tdisk);
857void pblk_sysfs_exit(struct gendisk *tdisk);
858
859static inline void *pblk_malloc(size_t size, int type, gfp_t flags)
860{
861 if (type == PBLK_KMALLOC_META)
862 return kmalloc(size, flags);
863 return vmalloc(size);
864}
865
866static inline void pblk_mfree(void *ptr, int type)
867{
868 if (type == PBLK_KMALLOC_META)
869 kfree(ptr);
870 else
871 vfree(ptr);
872}
873
874static inline struct nvm_rq *nvm_rq_from_c_ctx(void *c_ctx)
875{
876 return c_ctx - sizeof(struct nvm_rq);
877}
878
dd2a4343
JG
879static inline void *emeta_to_bb(struct line_emeta *emeta)
880{
881 return emeta->bb_bitmap;
882}
883
884static inline void *emeta_to_lbas(struct pblk *pblk, struct line_emeta *emeta)
885{
886 return ((void *)emeta + pblk->lm.emeta_len[1]);
887}
888
889static inline void *emeta_to_vsc(struct pblk *pblk, struct line_emeta *emeta)
a4bd217b 890{
dd2a4343 891 return (emeta_to_lbas(pblk, emeta) + pblk->lm.emeta_len[2]);
a4bd217b
JG
892}
893
b20ba1bc
JG
894static inline int pblk_line_vsc(struct pblk_line *line)
895{
896 int vsc;
897
898 spin_lock(&line->lock);
899 vsc = le32_to_cpu(*line->vsc);
900 spin_unlock(&line->lock);
901
902 return vsc;
903}
904
a4bd217b
JG
905#define NVM_MEM_PAGE_WRITE (8)
906
907static inline int pblk_pad_distance(struct pblk *pblk)
908{
909 struct nvm_tgt_dev *dev = pblk->dev;
910 struct nvm_geo *geo = &dev->geo;
911
912 return NVM_MEM_PAGE_WRITE * geo->nr_luns * geo->sec_per_pl;
913}
914
915static inline int pblk_dev_ppa_to_line(struct ppa_addr p)
916{
917 return p.g.blk;
918}
919
920static inline int pblk_tgt_ppa_to_line(struct ppa_addr p)
921{
922 return p.g.blk;
923}
924
925static inline int pblk_ppa_to_pos(struct nvm_geo *geo, struct ppa_addr p)
926{
927 return p.g.lun * geo->nr_chnls + p.g.ch;
928}
929
930/* A block within a line corresponds to the lun */
931static inline int pblk_dev_ppa_to_pos(struct nvm_geo *geo, struct ppa_addr p)
932{
933 return p.g.lun * geo->nr_chnls + p.g.ch;
934}
935
936static inline struct ppa_addr pblk_ppa32_to_ppa64(struct pblk *pblk, u32 ppa32)
937{
938 struct ppa_addr ppa64;
939
940 ppa64.ppa = 0;
941
942 if (ppa32 == -1) {
943 ppa64.ppa = ADDR_EMPTY;
944 } else if (ppa32 & (1U << 31)) {
945 ppa64.c.line = ppa32 & ((~0U) >> 1);
946 ppa64.c.is_cached = 1;
947 } else {
948 ppa64.g.blk = (ppa32 & pblk->ppaf.blk_mask) >>
949 pblk->ppaf.blk_offset;
950 ppa64.g.pg = (ppa32 & pblk->ppaf.pg_mask) >>
951 pblk->ppaf.pg_offset;
952 ppa64.g.lun = (ppa32 & pblk->ppaf.lun_mask) >>
953 pblk->ppaf.lun_offset;
954 ppa64.g.ch = (ppa32 & pblk->ppaf.ch_mask) >>
955 pblk->ppaf.ch_offset;
956 ppa64.g.pl = (ppa32 & pblk->ppaf.pln_mask) >>
957 pblk->ppaf.pln_offset;
958 ppa64.g.sec = (ppa32 & pblk->ppaf.sec_mask) >>
959 pblk->ppaf.sec_offset;
960 }
961
962 return ppa64;
963}
964
965static inline struct ppa_addr pblk_trans_map_get(struct pblk *pblk,
966 sector_t lba)
967{
968 struct ppa_addr ppa;
969
970 if (pblk->ppaf_bitsize < 32) {
971 u32 *map = (u32 *)pblk->trans_map;
972
973 ppa = pblk_ppa32_to_ppa64(pblk, map[lba]);
974 } else {
975 struct ppa_addr *map = (struct ppa_addr *)pblk->trans_map;
976
977 ppa = map[lba];
978 }
979
980 return ppa;
981}
982
983static inline u32 pblk_ppa64_to_ppa32(struct pblk *pblk, struct ppa_addr ppa64)
984{
985 u32 ppa32 = 0;
986
987 if (ppa64.ppa == ADDR_EMPTY) {
988 ppa32 = ~0U;
989 } else if (ppa64.c.is_cached) {
990 ppa32 |= ppa64.c.line;
991 ppa32 |= 1U << 31;
992 } else {
993 ppa32 |= ppa64.g.blk << pblk->ppaf.blk_offset;
994 ppa32 |= ppa64.g.pg << pblk->ppaf.pg_offset;
995 ppa32 |= ppa64.g.lun << pblk->ppaf.lun_offset;
996 ppa32 |= ppa64.g.ch << pblk->ppaf.ch_offset;
997 ppa32 |= ppa64.g.pl << pblk->ppaf.pln_offset;
998 ppa32 |= ppa64.g.sec << pblk->ppaf.sec_offset;
999 }
1000
1001 return ppa32;
1002}
1003
1004static inline void pblk_trans_map_set(struct pblk *pblk, sector_t lba,
1005 struct ppa_addr ppa)
1006{
1007 if (pblk->ppaf_bitsize < 32) {
1008 u32 *map = (u32 *)pblk->trans_map;
1009
1010 map[lba] = pblk_ppa64_to_ppa32(pblk, ppa);
1011 } else {
1012 u64 *map = (u64 *)pblk->trans_map;
1013
1014 map[lba] = ppa.ppa;
1015 }
1016}
1017
1018static inline u64 pblk_dev_ppa_to_line_addr(struct pblk *pblk,
1019 struct ppa_addr p)
1020{
1021 u64 paddr;
1022
1023 paddr = 0;
1024 paddr |= (u64)p.g.pg << pblk->ppaf.pg_offset;
1025 paddr |= (u64)p.g.lun << pblk->ppaf.lun_offset;
1026 paddr |= (u64)p.g.ch << pblk->ppaf.ch_offset;
1027 paddr |= (u64)p.g.pl << pblk->ppaf.pln_offset;
1028 paddr |= (u64)p.g.sec << pblk->ppaf.sec_offset;
1029
1030 return paddr;
1031}
1032
1033static inline int pblk_ppa_empty(struct ppa_addr ppa_addr)
1034{
1035 return (ppa_addr.ppa == ADDR_EMPTY);
1036}
1037
1038static inline void pblk_ppa_set_empty(struct ppa_addr *ppa_addr)
1039{
1040 ppa_addr->ppa = ADDR_EMPTY;
1041}
1042
07698466
JG
1043static inline bool pblk_ppa_comp(struct ppa_addr lppa, struct ppa_addr rppa)
1044{
1045 if (lppa.ppa == rppa.ppa)
1046 return true;
1047
1048 return false;
1049}
1050
a4bd217b
JG
1051static inline int pblk_addr_in_cache(struct ppa_addr ppa)
1052{
1053 return (ppa.ppa != ADDR_EMPTY && ppa.c.is_cached);
1054}
1055
1056static inline int pblk_addr_to_cacheline(struct ppa_addr ppa)
1057{
1058 return ppa.c.line;
1059}
1060
1061static inline struct ppa_addr pblk_cacheline_to_addr(int addr)
1062{
1063 struct ppa_addr p;
1064
1065 p.c.line = addr;
1066 p.c.is_cached = 1;
1067
1068 return p;
1069}
1070
1071static inline struct ppa_addr addr_to_gen_ppa(struct pblk *pblk, u64 paddr,
1072 u64 line_id)
1073{
1074 struct ppa_addr ppa;
1075
1076 ppa.ppa = 0;
1077 ppa.g.blk = line_id;
1078 ppa.g.pg = (paddr & pblk->ppaf.pg_mask) >> pblk->ppaf.pg_offset;
1079 ppa.g.lun = (paddr & pblk->ppaf.lun_mask) >> pblk->ppaf.lun_offset;
1080 ppa.g.ch = (paddr & pblk->ppaf.ch_mask) >> pblk->ppaf.ch_offset;
1081 ppa.g.pl = (paddr & pblk->ppaf.pln_mask) >> pblk->ppaf.pln_offset;
1082 ppa.g.sec = (paddr & pblk->ppaf.sec_mask) >> pblk->ppaf.sec_offset;
1083
1084 return ppa;
1085}
1086
1087static inline struct ppa_addr addr_to_pblk_ppa(struct pblk *pblk, u64 paddr,
1088 u64 line_id)
1089{
1090 struct ppa_addr ppa;
1091
1092 ppa = addr_to_gen_ppa(pblk, paddr, line_id);
1093
1094 return ppa;
1095}
1096
1097static inline u32 pblk_calc_meta_header_crc(struct pblk *pblk,
dd2a4343 1098 struct line_header *header)
a4bd217b
JG
1099{
1100 u32 crc = ~(u32)0;
1101
dd2a4343 1102 crc = crc32_le(crc, (unsigned char *)header + sizeof(crc),
a4bd217b
JG
1103 sizeof(struct line_header) - sizeof(crc));
1104
1105 return crc;
1106}
1107
1108static inline u32 pblk_calc_smeta_crc(struct pblk *pblk,
1109 struct line_smeta *smeta)
1110{
1111 struct pblk_line_meta *lm = &pblk->lm;
1112 u32 crc = ~(u32)0;
1113
1114 crc = crc32_le(crc, (unsigned char *)smeta +
1115 sizeof(struct line_header) + sizeof(crc),
1116 lm->smeta_len -
1117 sizeof(struct line_header) - sizeof(crc));
1118
1119 return crc;
1120}
1121
1122static inline u32 pblk_calc_emeta_crc(struct pblk *pblk,
1123 struct line_emeta *emeta)
1124{
1125 struct pblk_line_meta *lm = &pblk->lm;
1126 u32 crc = ~(u32)0;
1127
1128 crc = crc32_le(crc, (unsigned char *)emeta +
1129 sizeof(struct line_header) + sizeof(crc),
dd2a4343 1130 lm->emeta_len[0] -
a4bd217b
JG
1131 sizeof(struct line_header) - sizeof(crc));
1132
1133 return crc;
1134}
1135
1136static inline int pblk_set_progr_mode(struct pblk *pblk, int type)
1137{
1138 struct nvm_tgt_dev *dev = pblk->dev;
1139 struct nvm_geo *geo = &dev->geo;
1140 int flags;
1141
1142 flags = geo->plane_mode >> 1;
1143
1144 if (type == WRITE)
1145 flags |= NVM_IO_SCRAMBLE_ENABLE;
1146
1147 return flags;
1148}
1149
f9c10152
JG
1150enum {
1151 PBLK_READ_RANDOM = 0,
1152 PBLK_READ_SEQUENTIAL = 1,
1153};
1154
1155static inline int pblk_set_read_mode(struct pblk *pblk, int type)
1156{
1157 struct nvm_tgt_dev *dev = pblk->dev;
1158 struct nvm_geo *geo = &dev->geo;
1159 int flags;
1160
1161 flags = NVM_IO_SUSPEND | NVM_IO_SCRAMBLE_ENABLE;
1162 if (type == PBLK_READ_SEQUENTIAL)
1163 flags |= geo->plane_mode >> 1;
1164
1165 return flags;
1166}
1167
1168static inline int pblk_io_aligned(struct pblk *pblk, int nr_secs)
a4bd217b 1169{
f9c10152 1170 return !(nr_secs % pblk->min_write_pgs);
a4bd217b
JG
1171}
1172
1173#ifdef CONFIG_NVM_DEBUG
1174static inline void print_ppa(struct ppa_addr *p, char *msg, int error)
1175{
1176 if (p->c.is_cached) {
1177 pr_err("ppa: (%s: %x) cache line: %llu\n",
1178 msg, error, (u64)p->c.line);
1179 } else {
1180 pr_err("ppa: (%s: %x):ch:%d,lun:%d,blk:%d,pg:%d,pl:%d,sec:%d\n",
1181 msg, error,
1182 p->g.ch, p->g.lun, p->g.blk,
1183 p->g.pg, p->g.pl, p->g.sec);
1184 }
1185}
1186
1187static inline void pblk_print_failed_rqd(struct pblk *pblk, struct nvm_rq *rqd,
1188 int error)
1189{
1190 int bit = -1;
1191
1192 if (rqd->nr_ppas == 1) {
1193 print_ppa(&rqd->ppa_addr, "rqd", error);
1194 return;
1195 }
1196
1197 while ((bit = find_next_bit((void *)&rqd->ppa_status, rqd->nr_ppas,
1198 bit + 1)) < rqd->nr_ppas) {
1199 print_ppa(&rqd->ppa_list[bit], "rqd", error);
1200 }
1201
1202 pr_err("error:%d, ppa_status:%llx\n", error, rqd->ppa_status);
1203}
1204#endif
1205
1206static inline int pblk_boundary_ppa_checks(struct nvm_tgt_dev *tgt_dev,
1207 struct ppa_addr *ppas, int nr_ppas)
1208{
1209 struct nvm_geo *geo = &tgt_dev->geo;
1210 struct ppa_addr *ppa;
1211 int i;
1212
1213 for (i = 0; i < nr_ppas; i++) {
1214 ppa = &ppas[i];
1215
1216 if (!ppa->c.is_cached &&
1217 ppa->g.ch < geo->nr_chnls &&
1218 ppa->g.lun < geo->luns_per_chnl &&
1219 ppa->g.pl < geo->nr_planes &&
1220 ppa->g.blk < geo->blks_per_lun &&
1221 ppa->g.pg < geo->pgs_per_blk &&
1222 ppa->g.sec < geo->sec_per_pg)
1223 continue;
1224
1225#ifdef CONFIG_NVM_DEBUG
1226 print_ppa(ppa, "boundary", i);
1227#endif
1228 return 1;
1229 }
1230 return 0;
1231}
1232
1233static inline int pblk_boundary_paddr_checks(struct pblk *pblk, u64 paddr)
1234{
1235 struct pblk_line_meta *lm = &pblk->lm;
1236
1237 if (paddr > lm->sec_per_line)
1238 return 1;
1239
1240 return 0;
1241}
1242
1243static inline unsigned int pblk_get_bi_idx(struct bio *bio)
1244{
1245 return bio->bi_iter.bi_idx;
1246}
1247
1248static inline sector_t pblk_get_lba(struct bio *bio)
1249{
1250 return bio->bi_iter.bi_sector / NR_PHY_IN_LOG;
1251}
1252
1253static inline unsigned int pblk_get_secs(struct bio *bio)
1254{
1255 return bio->bi_iter.bi_size / PBLK_EXPOSED_PAGE_SIZE;
1256}
1257
1258static inline sector_t pblk_get_sector(sector_t lba)
1259{
1260 return lba * NR_PHY_IN_LOG;
1261}
1262
1263static inline void pblk_setup_uuid(struct pblk *pblk)
1264{
1265 uuid_le uuid;
1266
1267 uuid_le_gen(&uuid);
1268 memcpy(pblk->instance_uuid, uuid.b, 16);
1269}
1270#endif /* PBLK_H_ */