]> git.ipfire.org Git - thirdparty/kernel/linux.git/blame - drivers/lightnvm/pblk.h
lightnvm: pblk: refactor emeta consistency check
[thirdparty/kernel/linux.git] / drivers / lightnvm / pblk.h
CommitLineData
a4bd217b
JG
1/*
2 * Copyright (C) 2015 IT University of Copenhagen (rrpc.h)
3 * Copyright (C) 2016 CNEX Labs
4 * Initial release: Matias Bjorling <matias@cnexlabs.com>
5 * Write buffering: Javier Gonzalez <javier@cnexlabs.com>
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License version
9 * 2 as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * Implementation of a Physical Block-device target for Open-channel SSDs.
17 *
18 */
19
20#ifndef PBLK_H_
21#define PBLK_H_
22
23#include <linux/blkdev.h>
24#include <linux/blk-mq.h>
25#include <linux/bio.h>
26#include <linux/module.h>
27#include <linux/kthread.h>
28#include <linux/vmalloc.h>
29#include <linux/crc32.h>
30#include <linux/uuid.h>
31
32#include <linux/lightnvm.h>
33
34/* Run only GC if less than 1/X blocks are free */
35#define GC_LIMIT_INVERSE 5
36#define GC_TIME_MSECS 1000
37
38#define PBLK_SECTOR (512)
39#define PBLK_EXPOSED_PAGE_SIZE (4096)
40#define PBLK_MAX_REQ_ADDRS (64)
41#define PBLK_MAX_REQ_ADDRS_PW (6)
42
ef576494
JG
43#define PBLK_NR_CLOSE_JOBS (4)
44
a4bd217b
JG
45#define PBLK_CACHE_NAME_LEN (DISK_NAME_LEN + 16)
46
47#define PBLK_COMMAND_TIMEOUT_MS 30000
48
49/* Max 512 LUNs per device */
50#define PBLK_MAX_LUNS_BITMAP (4)
51
52#define NR_PHY_IN_LOG (PBLK_EXPOSED_PAGE_SIZE / PBLK_SECTOR)
53
0d880398 54/* Static pool sizes */
b84ae4a8
JG
55#define PBLK_GEN_WS_POOL_SIZE (2)
56
e2cddf20
JG
57enum {
58 PBLK_READ = READ,
59 PBLK_WRITE = WRITE,/* Write from write buffer */
60 PBLK_WRITE_INT, /* Internal write - no write buffer */
61 PBLK_ERASE,
62};
63
a4bd217b
JG
64enum {
65 /* IO Types */
66 PBLK_IOTYPE_USER = 1 << 0,
67 PBLK_IOTYPE_GC = 1 << 1,
68
69 /* Write buffer flags */
70 PBLK_FLUSH_ENTRY = 1 << 2,
71 PBLK_WRITTEN_DATA = 1 << 3,
72 PBLK_SUBMITTED_ENTRY = 1 << 4,
73 PBLK_WRITABLE_ENTRY = 1 << 5,
74};
75
76enum {
77 PBLK_BLK_ST_OPEN = 0x1,
78 PBLK_BLK_ST_CLOSED = 0x2,
79};
80
b20ba1bc
JG
81struct pblk_sec_meta {
82 u64 reserved;
83 __le64 lba;
84};
85
a4bd217b
JG
86/* The number of GC lists and the rate-limiter states go together. This way the
87 * rate-limiter can dictate how much GC is needed based on resource utilization.
88 */
b20ba1bc 89#define PBLK_GC_NR_LISTS 3
a4bd217b
JG
90
91enum {
92 PBLK_RL_HIGH = 1,
93 PBLK_RL_MID = 2,
94 PBLK_RL_LOW = 3,
95};
96
a4bd217b 97#define pblk_dma_meta_size (sizeof(struct pblk_sec_meta) * PBLK_MAX_REQ_ADDRS)
a4809fee 98#define pblk_dma_ppa_size (sizeof(u64) * PBLK_MAX_REQ_ADDRS)
a4bd217b 99
084ec9ba 100/* write buffer completion context */
a4bd217b
JG
101struct pblk_c_ctx {
102 struct list_head list; /* Head for out-of-order completion */
103
104 unsigned long *lun_bitmap; /* Luns used on current request */
105 unsigned int sentry;
106 unsigned int nr_valid;
107 unsigned int nr_padded;
108};
109
a4809fee 110/* read context */
084ec9ba
JG
111struct pblk_g_ctx {
112 void *private;
a4809fee 113 u64 lba;
a4bd217b
JG
114};
115
ee8d5c1a
JG
116/* Pad context */
117struct pblk_pad_rq {
118 struct pblk *pblk;
119 struct completion wait;
120 struct kref ref;
121};
122
a4bd217b
JG
123/* Recovery context */
124struct pblk_rec_ctx {
125 struct pblk *pblk;
126 struct nvm_rq *rqd;
127 struct list_head failed;
128 struct work_struct ws_rec;
129};
130
131/* Write context */
132struct pblk_w_ctx {
133 struct bio_list bios; /* Original bios - used for completion
134 * in REQ_FUA, REQ_FLUSH case
135 */
ef697902 136 u64 lba; /* Logic addr. associated with entry */
a4bd217b
JG
137 struct ppa_addr ppa; /* Physic addr. associated with entry */
138 int flags; /* Write context flags */
139};
140
141struct pblk_rb_entry {
142 struct ppa_addr cacheline; /* Cacheline for this entry */
143 void *data; /* Pointer to data on this entry */
144 struct pblk_w_ctx w_ctx; /* Context for this entry */
145 struct list_head index; /* List head to enable indexes */
146};
147
148#define EMPTY_ENTRY (~0U)
149
150struct pblk_rb_pages {
151 struct page *pages;
152 int order;
153 struct list_head list;
154};
155
156struct pblk_rb {
157 struct pblk_rb_entry *entries; /* Ring buffer entries */
158 unsigned int mem; /* Write offset - points to next
159 * writable entry in memory
160 */
161 unsigned int subm; /* Read offset - points to last entry
162 * that has been submitted to the media
163 * to be persisted
164 */
165 unsigned int sync; /* Synced - backpointer that signals
166 * the last submitted entry that has
167 * been successfully persisted to media
168 */
169 unsigned int sync_point; /* Sync point - last entry that must be
170 * flushed to the media. Used with
171 * REQ_FLUSH and REQ_FUA
172 */
173 unsigned int l2p_update; /* l2p update point - next entry for
174 * which l2p mapping will be updated to
175 * contain a device ppa address (instead
176 * of a cacheline
177 */
178 unsigned int nr_entries; /* Number of entries in write buffer -
179 * must be a power of two
180 */
181 unsigned int seg_size; /* Size of the data segments being
182 * stored on each entry. Typically this
183 * will be 4KB
184 */
185
186 struct list_head pages; /* List of data pages */
187
188 spinlock_t w_lock; /* Write lock */
189 spinlock_t s_lock; /* Sync lock */
190
191#ifdef CONFIG_NVM_DEBUG
192 atomic_t inflight_sync_point; /* Not served REQ_FLUSH | REQ_FUA */
193#endif
194};
195
196#define PBLK_RECOVERY_SECTORS 16
197
198struct pblk_lun {
199 struct ppa_addr bppa;
200
201 u8 *bb_list; /* Bad block list for LUN. Only used on
202 * bring up. Bad blocks are managed
203 * within lines on run-time.
204 */
205
206 struct semaphore wr_sem;
207};
208
209struct pblk_gc_rq {
210 struct pblk_line *line;
211 void *data;
d340121e 212 u64 paddr_list[PBLK_MAX_REQ_ADDRS];
b20ba1bc 213 u64 lba_list[PBLK_MAX_REQ_ADDRS];
a4bd217b
JG
214 int nr_secs;
215 int secs_to_gc;
216 struct list_head list;
217};
218
219struct pblk_gc {
b20ba1bc
JG
220 /* These states are not protected by a lock since (i) they are in the
221 * fast path, and (ii) they are not critical.
222 */
a4bd217b
JG
223 int gc_active;
224 int gc_enabled;
225 int gc_forced;
a4bd217b
JG
226
227 struct task_struct *gc_ts;
228 struct task_struct *gc_writer_ts;
b20ba1bc
JG
229 struct task_struct *gc_reader_ts;
230
231 struct workqueue_struct *gc_line_reader_wq;
a4bd217b 232 struct workqueue_struct *gc_reader_wq;
b20ba1bc 233
a4bd217b
JG
234 struct timer_list gc_timer;
235
b20ba1bc 236 struct semaphore gc_sem;
d6b992f7
HH
237 atomic_t read_inflight_gc; /* Number of lines with inflight GC reads */
238 atomic_t pipeline_gc; /* Number of lines in the GC pipeline -
239 * started reads to finished writes
240 */
a4bd217b 241 int w_entries;
b20ba1bc 242
a4bd217b 243 struct list_head w_list;
b20ba1bc 244 struct list_head r_list;
a4bd217b
JG
245
246 spinlock_t lock;
247 spinlock_t w_lock;
b20ba1bc 248 spinlock_t r_lock;
a4bd217b
JG
249};
250
251struct pblk_rl {
252 unsigned int high; /* Upper threshold for rate limiter (free run -
253 * user I/O rate limiter
254 */
255 unsigned int low; /* Lower threshold for rate limiter (user I/O
256 * rate limiter - stall)
257 */
258 unsigned int high_pw; /* High rounded up as a power of 2 */
259
b20ba1bc
JG
260#define PBLK_USER_HIGH_THRS 8 /* Begin write limit at 12% available blks */
261#define PBLK_USER_LOW_THRS 10 /* Aggressive GC at 10% available blocks */
a4bd217b
JG
262
263 int rb_windows_pw; /* Number of rate windows in the write buffer
264 * given as a power-of-2. This guarantees that
265 * when user I/O is being rate limited, there
266 * will be reserved enough space for the GC to
267 * place its payload. A window is of
268 * pblk->max_write_pgs size, which in NVMe is
269 * 64, i.e., 256kb.
270 */
271 int rb_budget; /* Total number of entries available for I/O */
272 int rb_user_max; /* Max buffer entries available for user I/O */
a4bd217b
JG
273 int rb_gc_max; /* Max buffer entries available for GC I/O */
274 int rb_gc_rsv; /* Reserved buffer entries for GC I/O */
275 int rb_state; /* Rate-limiter current state */
da67e68f 276 int rb_max_io; /* Maximum size for an I/O giving the config */
588726d3
JG
277
278 atomic_t rb_user_cnt; /* User I/O buffer counter */
a4bd217b 279 atomic_t rb_gc_cnt; /* GC I/O buffer counter */
588726d3 280 atomic_t rb_space; /* Space limit in case of reaching capacity */
a4bd217b 281
b20ba1bc
JG
282 int rsv_blocks; /* Reserved blocks for GC */
283
a4bd217b 284 int rb_user_active;
b20ba1bc
JG
285 int rb_gc_active;
286
a4bd217b
JG
287 struct timer_list u_timer;
288
289 unsigned long long nr_secs;
290 unsigned long total_blocks;
291 atomic_t free_blocks;
292};
293
a4bd217b
JG
294#define PBLK_LINE_EMPTY (~0U)
295
296enum {
297 /* Line Types */
298 PBLK_LINETYPE_FREE = 0,
299 PBLK_LINETYPE_LOG = 1,
300 PBLK_LINETYPE_DATA = 2,
301
302 /* Line state */
303 PBLK_LINESTATE_FREE = 10,
304 PBLK_LINESTATE_OPEN = 11,
305 PBLK_LINESTATE_CLOSED = 12,
306 PBLK_LINESTATE_GC = 13,
307 PBLK_LINESTATE_BAD = 14,
308 PBLK_LINESTATE_CORRUPT = 15,
309
310 /* GC group */
311 PBLK_LINEGC_NONE = 20,
312 PBLK_LINEGC_EMPTY = 21,
313 PBLK_LINEGC_LOW = 22,
314 PBLK_LINEGC_MID = 23,
315 PBLK_LINEGC_HIGH = 24,
316 PBLK_LINEGC_FULL = 25,
317};
318
319#define PBLK_MAGIC 0x70626c6b /*pblk*/
c79819bc 320#define SMETA_VERSION cpu_to_le16(1)
a4bd217b
JG
321
322struct line_header {
323 __le32 crc;
324 __le32 identifier; /* pblk identifier */
325 __u8 uuid[16]; /* instance uuid */
326 __le16 type; /* line type */
327 __le16 version; /* type version */
328 __le32 id; /* line id for current line */
329};
330
331struct line_smeta {
332 struct line_header header;
333
334 __le32 crc; /* Full structure including struct crc */
335 /* Previous line metadata */
336 __le32 prev_id; /* Line id for previous line */
337
338 /* Current line metadata */
339 __le64 seq_nr; /* Sequence number for current line */
340
341 /* Active writers */
342 __le32 window_wr_lun; /* Number of parallel LUNs to write */
343
344 __le32 rsvd[2];
dd2a4343
JG
345
346 __le64 lun_bitmap[];
a4bd217b
JG
347};
348
349/*
dd2a4343
JG
350 * Metadata layout in media:
351 * First sector:
352 * 1. struct line_emeta
353 * 2. bad block bitmap (u64 * window_wr_lun)
354 * Mid sectors (start at lbas_sector):
355 * 3. nr_lbas (u64) forming lba list
356 * Last sectors (start at vsc_sector):
357 * 4. u32 valid sector count (vsc) for all lines (~0U: free line)
a4bd217b
JG
358 */
359struct line_emeta {
360 struct line_header header;
361
362 __le32 crc; /* Full structure including struct crc */
363
364 /* Previous line metadata */
365 __le32 prev_id; /* Line id for prev line */
366
367 /* Current line metadata */
368 __le64 seq_nr; /* Sequence number for current line */
369
370 /* Active writers */
371 __le32 window_wr_lun; /* Number of parallel LUNs to write */
372
373 /* Bookkeeping for recovery */
374 __le32 next_id; /* Line id for next line */
375 __le64 nr_lbas; /* Number of lbas mapped in line */
376 __le64 nr_valid_lbas; /* Number of valid lbas mapped in line */
dd2a4343
JG
377 __le64 bb_bitmap[]; /* Updated bad block bitmap for line */
378};
379
380struct pblk_emeta {
381 struct line_emeta *buf; /* emeta buffer in media format */
382 int mem; /* Write offset - points to next
383 * writable entry in memory
384 */
385 atomic_t sync; /* Synced - backpointer that signals the
386 * last entry that has been successfully
387 * persisted to media
388 */
389 unsigned int nr_entries; /* Number of emeta entries */
390};
391
392struct pblk_smeta {
393 struct line_smeta *buf; /* smeta buffer in persistent format */
a4bd217b
JG
394};
395
396struct pblk_line {
397 struct pblk *pblk;
398 unsigned int id; /* Line number corresponds to the
399 * block line
400 */
401 unsigned int seq_nr; /* Unique line sequence number */
402
403 int state; /* PBLK_LINESTATE_X */
404 int type; /* PBLK_LINETYPE_X */
405 int gc_group; /* PBLK_LINEGC_X */
406 struct list_head list; /* Free, GC lists */
407
408 unsigned long *lun_bitmap; /* Bitmap for LUNs mapped in line */
409
dd2a4343
JG
410 struct pblk_smeta *smeta; /* Start metadata */
411 struct pblk_emeta *emeta; /* End medatada */
412
a4bd217b 413 int meta_line; /* Metadata line id */
dd2a4343
JG
414 int meta_distance; /* Distance between data and metadata */
415
a4bd217b
JG
416 u64 smeta_ssec; /* Sector where smeta starts */
417 u64 emeta_ssec; /* Sector where emeta starts */
418
419 unsigned int sec_in_line; /* Number of usable secs in line */
420
a44f53fa 421 atomic_t blk_in_line; /* Number of good blocks in line */
a4bd217b
JG
422 unsigned long *blk_bitmap; /* Bitmap for valid/invalid blocks */
423 unsigned long *erase_bitmap; /* Bitmap for erased blocks */
424
425 unsigned long *map_bitmap; /* Bitmap for mapped sectors in line */
426 unsigned long *invalid_bitmap; /* Bitmap for invalid sectors in line */
427
a44f53fa 428 atomic_t left_eblks; /* Blocks left for erasing */
a4bd217b
JG
429 atomic_t left_seblks; /* Blocks left for sync erasing */
430
431 int left_msecs; /* Sectors left for mapping */
a4bd217b 432 unsigned int cur_sec; /* Sector map pointer */
dd2a4343
JG
433 unsigned int nr_valid_lbas; /* Number of valid lbas in line */
434
435 __le32 *vsc; /* Valid sector count in line */
a4bd217b
JG
436
437 struct kref ref; /* Write buffer L2P references */
438
439 spinlock_t lock; /* Necessary for invalid_bitmap only */
440};
441
a44f53fa 442#define PBLK_DATA_LINES 4
a4bd217b 443
dd2a4343 444enum {
a4bd217b
JG
445 PBLK_KMALLOC_META = 1,
446 PBLK_VMALLOC_META = 2,
447};
448
dd2a4343
JG
449enum {
450 PBLK_EMETA_TYPE_HEADER = 1, /* struct line_emeta first sector */
451 PBLK_EMETA_TYPE_LLBA = 2, /* lba list - type: __le64 */
452 PBLK_EMETA_TYPE_VSC = 3, /* vsc list - type: __le32 */
a4bd217b
JG
453};
454
455struct pblk_line_mgmt {
456 int nr_lines; /* Total number of full lines */
457 int nr_free_lines; /* Number of full lines in free list */
458
459 /* Free lists - use free_lock */
460 struct list_head free_list; /* Full lines ready to use */
461 struct list_head corrupt_list; /* Full lines corrupted */
462 struct list_head bad_list; /* Full lines bad */
463
464 /* GC lists - use gc_lock */
b20ba1bc 465 struct list_head *gc_lists[PBLK_GC_NR_LISTS];
a4bd217b
JG
466 struct list_head gc_high_list; /* Full lines ready to GC, high isc */
467 struct list_head gc_mid_list; /* Full lines ready to GC, mid isc */
468 struct list_head gc_low_list; /* Full lines ready to GC, low isc */
469
470 struct list_head gc_full_list; /* Full lines ready to GC, no valid */
471 struct list_head gc_empty_list; /* Full lines close, all valid */
472
473 struct pblk_line *log_line; /* Current FTL log line */
474 struct pblk_line *data_line; /* Current data line */
475 struct pblk_line *log_next; /* Next FTL log line */
476 struct pblk_line *data_next; /* Next data line */
477
dd2a4343
JG
478 struct list_head emeta_list; /* Lines queued to schedule emeta */
479
480 __le32 *vsc_list; /* Valid sector counts for all lines */
481
a4bd217b 482 /* Metadata allocation type: VMALLOC | KMALLOC */
a4bd217b
JG
483 int emeta_alloc_type;
484
485 /* Pre-allocated metadata for data lines */
dd2a4343
JG
486 struct pblk_smeta *sline_meta[PBLK_DATA_LINES];
487 struct pblk_emeta *eline_meta[PBLK_DATA_LINES];
a4bd217b
JG
488 unsigned long meta_bitmap;
489
490 /* Helpers for fast bitmap calculations */
491 unsigned long *bb_template;
492 unsigned long *bb_aux;
493
494 unsigned long d_seq_nr; /* Data line unique sequence number */
495 unsigned long l_seq_nr; /* Log line unique sequence number */
496
497 spinlock_t free_lock;
dd2a4343 498 spinlock_t close_lock;
a4bd217b
JG
499 spinlock_t gc_lock;
500};
501
502struct pblk_line_meta {
503 unsigned int smeta_len; /* Total length for smeta */
dd2a4343
JG
504 unsigned int smeta_sec; /* Sectors needed for smeta */
505
506 unsigned int emeta_len[4]; /* Lengths for emeta:
507 * [0]: Total length
508 * [1]: struct line_emeta length
509 * [2]: L2P portion length
510 * [3]: vsc list length
511 */
512 unsigned int emeta_sec[4]; /* Sectors needed for emeta. Same layout
513 * as emeta_len
514 */
515
a4bd217b 516 unsigned int emeta_bb; /* Boundary for bb that affects emeta */
dd2a4343
JG
517
518 unsigned int vsc_list_len; /* Length for vsc list */
a4bd217b
JG
519 unsigned int sec_bitmap_len; /* Length for sector bitmap in line */
520 unsigned int blk_bitmap_len; /* Length for block bitmap in line */
521 unsigned int lun_bitmap_len; /* Length for lun bitmap in line */
522
523 unsigned int blk_per_line; /* Number of blocks in a full line */
524 unsigned int sec_per_line; /* Number of sectors in a line */
dd2a4343 525 unsigned int dsec_per_line; /* Number of data sectors in a line */
a4bd217b
JG
526 unsigned int min_blk_line; /* Min. number of good blocks in line */
527
528 unsigned int mid_thrs; /* Threshold for GC mid list */
529 unsigned int high_thrs; /* Threshold for GC high list */
dd2a4343
JG
530
531 unsigned int meta_distance; /* Distance between data and metadata */
a4bd217b
JG
532};
533
534struct pblk_addr_format {
535 u64 ch_mask;
536 u64 lun_mask;
537 u64 pln_mask;
538 u64 blk_mask;
539 u64 pg_mask;
540 u64 sec_mask;
541 u8 ch_offset;
542 u8 lun_offset;
543 u8 pln_offset;
544 u8 blk_offset;
545 u8 pg_offset;
546 u8 sec_offset;
547};
548
588726d3
JG
549enum {
550 PBLK_STATE_RUNNING = 0,
551 PBLK_STATE_STOPPING = 1,
552 PBLK_STATE_RECOVERING = 2,
553 PBLK_STATE_STOPPED = 3,
554};
555
a4bd217b
JG
556struct pblk {
557 struct nvm_tgt_dev *dev;
558 struct gendisk *disk;
559
560 struct kobject kobj;
561
562 struct pblk_lun *luns;
563
564 struct pblk_line *lines; /* Line array */
565 struct pblk_line_mgmt l_mg; /* Line management */
566 struct pblk_line_meta lm; /* Line metadata */
567
568 int ppaf_bitsize;
569 struct pblk_addr_format ppaf;
570
571 struct pblk_rb rwb;
572
588726d3
JG
573 int state; /* pblk line state */
574
a4bd217b
JG
575 int min_write_pgs; /* Minimum amount of pages required by controller */
576 int max_write_pgs; /* Maximum amount of pages supported by controller */
577 int pgs_in_buffer; /* Number of pages that need to be held in buffer to
578 * guarantee successful reads.
579 */
580
581 sector_t capacity; /* Device capacity when bad blocks are subtracted */
582 int over_pct; /* Percentage of device used for over-provisioning */
583
584 /* pblk provisioning values. Used by rate limiter */
585 struct pblk_rl rl;
586
c2e9f5d4 587 int sec_per_write;
a4bd217b
JG
588
589 unsigned char instance_uuid[16];
590#ifdef CONFIG_NVM_DEBUG
591 /* All debug counters apply to 4kb sector I/Os */
592 atomic_long_t inflight_writes; /* Inflight writes (user and gc) */
593 atomic_long_t padded_writes; /* Sectors padded due to flush/fua */
594 atomic_long_t padded_wb; /* Sectors padded in write buffer */
595 atomic_long_t nr_flush; /* Number of flush/fua I/O */
596 atomic_long_t req_writes; /* Sectors stored on write buffer */
597 atomic_long_t sub_writes; /* Sectors submitted from buffer */
598 atomic_long_t sync_writes; /* Sectors synced to media */
a4bd217b 599 atomic_long_t inflight_reads; /* Inflight sector read requests */
db7ada33 600 atomic_long_t cache_reads; /* Read requests that hit the cache */
a4bd217b
JG
601 atomic_long_t sync_reads; /* Completed sector read requests */
602 atomic_long_t recov_writes; /* Sectors submitted from recovery */
603 atomic_long_t recov_gc_writes; /* Sectors submitted from write GC */
604 atomic_long_t recov_gc_reads; /* Sectors submitted from read GC */
605#endif
606
607 spinlock_t lock;
608
609 atomic_long_t read_failed;
610 atomic_long_t read_empty;
611 atomic_long_t read_high_ecc;
612 atomic_long_t read_failed_gc;
613 atomic_long_t write_failed;
614 atomic_long_t erase_failed;
615
588726d3
JG
616 atomic_t inflight_io; /* General inflight I/O counter */
617
a4bd217b
JG
618 struct task_struct *writer_ts;
619
620 /* Simple translation map of logical addresses to physical addresses.
621 * The logical addresses is known by the host system, while the physical
622 * addresses are used when writing to the disk block device.
623 */
624 unsigned char *trans_map;
625 spinlock_t trans_lock;
626
627 struct list_head compl_list;
628
bd432417 629 mempool_t *page_bio_pool;
b84ae4a8 630 mempool_t *gen_ws_pool;
a4bd217b 631 mempool_t *rec_pool;
0d880398 632 mempool_t *r_rq_pool;
a4bd217b 633 mempool_t *w_rq_pool;
0d880398 634 mempool_t *e_rq_pool;
a4bd217b 635
ef576494
JG
636 struct workqueue_struct *close_wq;
637 struct workqueue_struct *bb_wq;
7bd4d370 638 struct workqueue_struct *r_end_wq;
ef576494 639
a4bd217b
JG
640 struct timer_list wtimer;
641
642 struct pblk_gc gc;
643};
644
645struct pblk_line_ws {
646 struct pblk *pblk;
647 struct pblk_line *line;
648 void *priv;
649 struct work_struct ws;
650};
651
084ec9ba 652#define pblk_g_rq_size (sizeof(struct nvm_rq) + sizeof(struct pblk_g_ctx))
a4bd217b
JG
653#define pblk_w_rq_size (sizeof(struct nvm_rq) + sizeof(struct pblk_c_ctx))
654
655/*
656 * pblk ring buffer operations
657 */
658int pblk_rb_init(struct pblk_rb *rb, struct pblk_rb_entry *rb_entry_base,
659 unsigned int power_size, unsigned int power_seg_sz);
660unsigned int pblk_rb_calculate_size(unsigned int nr_entries);
661void *pblk_rb_entries_ref(struct pblk_rb *rb);
662int pblk_rb_may_write_user(struct pblk_rb *rb, struct bio *bio,
663 unsigned int nr_entries, unsigned int *pos);
664int pblk_rb_may_write_gc(struct pblk_rb *rb, unsigned int nr_entries,
665 unsigned int *pos);
666void pblk_rb_write_entry_user(struct pblk_rb *rb, void *data,
667 struct pblk_w_ctx w_ctx, unsigned int pos);
668void pblk_rb_write_entry_gc(struct pblk_rb *rb, void *data,
d340121e
JG
669 struct pblk_w_ctx w_ctx, struct pblk_line *line,
670 u64 paddr, unsigned int pos);
a4bd217b 671struct pblk_w_ctx *pblk_rb_w_ctx(struct pblk_rb *rb, unsigned int pos);
588726d3 672void pblk_rb_flush(struct pblk_rb *rb);
a4bd217b
JG
673
674void pblk_rb_sync_l2p(struct pblk_rb *rb);
d624f371 675unsigned int pblk_rb_read_to_bio(struct pblk_rb *rb, struct nvm_rq *rqd,
875d94f3
JG
676 unsigned int pos, unsigned int nr_entries,
677 unsigned int count);
a4bd217b
JG
678unsigned int pblk_rb_read_to_bio_list(struct pblk_rb *rb, struct bio *bio,
679 struct list_head *list,
680 unsigned int max);
681int pblk_rb_copy_to_bio(struct pblk_rb *rb, struct bio *bio, sector_t lba,
75cb8e93 682 struct ppa_addr ppa, int bio_iter, bool advanced_bio);
a4bd217b
JG
683unsigned int pblk_rb_read_commit(struct pblk_rb *rb, unsigned int entries);
684
685unsigned int pblk_rb_sync_init(struct pblk_rb *rb, unsigned long *flags);
686unsigned int pblk_rb_sync_advance(struct pblk_rb *rb, unsigned int nr_entries);
687struct pblk_rb_entry *pblk_rb_sync_scan_entry(struct pblk_rb *rb,
688 struct ppa_addr *ppa);
689void pblk_rb_sync_end(struct pblk_rb *rb, unsigned long *flags);
690unsigned int pblk_rb_sync_point_count(struct pblk_rb *rb);
691
692unsigned int pblk_rb_read_count(struct pblk_rb *rb);
ee8d5c1a 693unsigned int pblk_rb_sync_count(struct pblk_rb *rb);
a4bd217b
JG
694unsigned int pblk_rb_wrap_pos(struct pblk_rb *rb, unsigned int pos);
695
696int pblk_rb_tear_down_check(struct pblk_rb *rb);
697int pblk_rb_pos_oob(struct pblk_rb *rb, u64 pos);
698void pblk_rb_data_free(struct pblk_rb *rb);
699ssize_t pblk_rb_sysfs(struct pblk_rb *rb, char *buf);
700
701/*
702 * pblk core
703 */
67bf26a3
JG
704struct nvm_rq *pblk_alloc_rqd(struct pblk *pblk, int type);
705void pblk_free_rqd(struct pblk *pblk, struct nvm_rq *rqd, int type);
c2e9f5d4 706void pblk_set_sec_per_write(struct pblk *pblk, int sec_per_write);
a4bd217b
JG
707int pblk_setup_w_rec_rq(struct pblk *pblk, struct nvm_rq *rqd,
708 struct pblk_c_ctx *c_ctx);
a4bd217b
JG
709void pblk_discard(struct pblk *pblk, struct bio *bio);
710void pblk_log_write_err(struct pblk *pblk, struct nvm_rq *rqd);
711void pblk_log_read_err(struct pblk *pblk, struct nvm_rq *rqd);
712int pblk_submit_io(struct pblk *pblk, struct nvm_rq *rqd);
1a94b2d4 713int pblk_submit_io_sync(struct pblk *pblk, struct nvm_rq *rqd);
dd2a4343 714int pblk_submit_meta_io(struct pblk *pblk, struct pblk_line *meta_line);
a4bd217b
JG
715struct bio *pblk_bio_map_addr(struct pblk *pblk, void *data,
716 unsigned int nr_secs, unsigned int len,
de54e703 717 int alloc_type, gfp_t gfp_mask);
a4bd217b
JG
718struct pblk_line *pblk_line_get(struct pblk *pblk);
719struct pblk_line *pblk_line_get_first_data(struct pblk *pblk);
21d22871 720struct pblk_line *pblk_line_replace_data(struct pblk *pblk);
a4bd217b
JG
721int pblk_line_recov_alloc(struct pblk *pblk, struct pblk_line *line);
722void pblk_line_recov_close(struct pblk *pblk, struct pblk_line *line);
723struct pblk_line *pblk_line_get_data(struct pblk *pblk);
d624f371 724struct pblk_line *pblk_line_get_erase(struct pblk *pblk);
a4bd217b
JG
725int pblk_line_erase(struct pblk *pblk, struct pblk_line *line);
726int pblk_line_is_full(struct pblk_line *line);
727void pblk_line_free(struct pblk *pblk, struct pblk_line *line);
dd2a4343 728void pblk_line_close_meta(struct pblk *pblk, struct pblk_line *line);
a4bd217b 729void pblk_line_close(struct pblk *pblk, struct pblk_line *line);
dd2a4343 730void pblk_line_close_ws(struct work_struct *work);
588726d3 731void pblk_pipeline_stop(struct pblk *pblk);
b84ae4a8
JG
732void pblk_gen_run_ws(struct pblk *pblk, struct pblk_line *line, void *priv,
733 void (*work)(struct work_struct *), gfp_t gfp_mask,
734 struct workqueue_struct *wq);
a4bd217b
JG
735u64 pblk_line_smeta_start(struct pblk *pblk, struct pblk_line *line);
736int pblk_line_read_smeta(struct pblk *pblk, struct pblk_line *line);
dd2a4343
JG
737int pblk_line_read_emeta(struct pblk *pblk, struct pblk_line *line,
738 void *emeta_buf);
a4bd217b
JG
739int pblk_blk_erase_async(struct pblk *pblk, struct ppa_addr erase_ppa);
740void pblk_line_put(struct kref *ref);
7bd4d370 741void pblk_line_put_wq(struct kref *ref);
a4bd217b 742struct list_head *pblk_line_gc_list(struct pblk *pblk, struct pblk_line *line);
dd2a4343
JG
743u64 pblk_lookup_page(struct pblk *pblk, struct pblk_line *line);
744void pblk_dealloc_page(struct pblk *pblk, struct pblk_line *line, int nr_secs);
a4bd217b 745u64 pblk_alloc_page(struct pblk *pblk, struct pblk_line *line, int nr_secs);
dd2a4343 746u64 __pblk_alloc_page(struct pblk *pblk, struct pblk_line *line, int nr_secs);
a4bd217b
JG
747int pblk_calc_secs(struct pblk *pblk, unsigned long secs_avail,
748 unsigned long secs_to_flush);
3eaa11e2 749void pblk_up_page(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas);
a4bd217b
JG
750void pblk_down_rq(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas,
751 unsigned long *lun_bitmap);
3eaa11e2 752void pblk_down_page(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas);
a4bd217b
JG
753void pblk_up_rq(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas,
754 unsigned long *lun_bitmap);
a4bd217b
JG
755void pblk_end_io_sync(struct nvm_rq *rqd);
756int pblk_bio_add_pages(struct pblk *pblk, struct bio *bio, gfp_t flags,
757 int nr_pages);
a4bd217b
JG
758void pblk_bio_free_pages(struct pblk *pblk, struct bio *bio, int off,
759 int nr_pages);
760void pblk_map_invalidate(struct pblk *pblk, struct ppa_addr ppa);
0880a9aa
JG
761void __pblk_map_invalidate(struct pblk *pblk, struct pblk_line *line,
762 u64 paddr);
a4bd217b
JG
763void pblk_update_map(struct pblk *pblk, sector_t lba, struct ppa_addr ppa);
764void pblk_update_map_cache(struct pblk *pblk, sector_t lba,
765 struct ppa_addr ppa);
766void pblk_update_map_dev(struct pblk *pblk, sector_t lba,
767 struct ppa_addr ppa, struct ppa_addr entry_line);
768int pblk_update_map_gc(struct pblk *pblk, sector_t lba, struct ppa_addr ppa,
d340121e 769 struct pblk_line *gc_line, u64 paddr);
a4bd217b
JG
770void pblk_lookup_l2p_rand(struct pblk *pblk, struct ppa_addr *ppas,
771 u64 *lba_list, int nr_secs);
772void pblk_lookup_l2p_seq(struct pblk *pblk, struct ppa_addr *ppas,
773 sector_t blba, int nr_secs);
774
775/*
776 * pblk user I/O write path
777 */
778int pblk_write_to_cache(struct pblk *pblk, struct bio *bio,
779 unsigned long flags);
d340121e 780int pblk_write_gc_to_cache(struct pblk *pblk, struct pblk_gc_rq *gc_rq);
a4bd217b
JG
781
782/*
783 * pblk map
784 */
785void pblk_map_erase_rq(struct pblk *pblk, struct nvm_rq *rqd,
786 unsigned int sentry, unsigned long *lun_bitmap,
787 unsigned int valid_secs, struct ppa_addr *erase_ppa);
788void pblk_map_rq(struct pblk *pblk, struct nvm_rq *rqd, unsigned int sentry,
789 unsigned long *lun_bitmap, unsigned int valid_secs,
790 unsigned int off);
791
792/*
793 * pblk write thread
794 */
795int pblk_write_ts(void *data);
87c1d2d3 796void pblk_write_timer_fn(struct timer_list *t);
a4bd217b
JG
797void pblk_write_should_kick(struct pblk *pblk);
798
799/*
800 * pblk read path
801 */
b25d5237 802extern struct bio_set *pblk_bio_set;
a4bd217b 803int pblk_submit_read(struct pblk *pblk, struct bio *bio);
d340121e 804int pblk_submit_read_gc(struct pblk *pblk, struct pblk_gc_rq *gc_rq);
a4bd217b
JG
805/*
806 * pblk recovery
807 */
808void pblk_submit_rec(struct work_struct *work);
809struct pblk_line *pblk_recov_l2p(struct pblk *pblk);
588726d3 810int pblk_recov_pad(struct pblk *pblk);
06bc072b 811int pblk_recov_check_emeta(struct pblk *pblk, struct line_emeta *emeta);
a4bd217b
JG
812int pblk_recov_setup_rq(struct pblk *pblk, struct pblk_c_ctx *c_ctx,
813 struct pblk_rec_ctx *recovery, u64 *comp_bits,
814 unsigned int comp);
815
816/*
817 * pblk gc
818 */
b20ba1bc 819#define PBLK_GC_MAX_READERS 8 /* Max number of outstanding GC reader jobs */
3627896a 820#define PBLK_GC_RQ_QD 128 /* Queue depth for inflight GC requests */
b20ba1bc
JG
821#define PBLK_GC_L_QD 4 /* Queue depth for inflight GC lines */
822#define PBLK_GC_RSV_LINE 1 /* Reserved lines for GC */
a4bd217b
JG
823
824int pblk_gc_init(struct pblk *pblk);
825void pblk_gc_exit(struct pblk *pblk);
826void pblk_gc_should_start(struct pblk *pblk);
827void pblk_gc_should_stop(struct pblk *pblk);
03661b5f 828void pblk_gc_should_kick(struct pblk *pblk);
37ce33d5 829void pblk_gc_free_full_lines(struct pblk *pblk);
a4bd217b
JG
830void pblk_gc_sysfs_state_show(struct pblk *pblk, int *gc_enabled,
831 int *gc_active);
b20ba1bc 832int pblk_gc_sysfs_force(struct pblk *pblk, int force);
a4bd217b
JG
833
834/*
835 * pblk rate limiter
836 */
837void pblk_rl_init(struct pblk_rl *rl, int budget);
838void pblk_rl_free(struct pblk_rl *rl);
03661b5f 839void pblk_rl_update_rates(struct pblk_rl *rl);
b20ba1bc 840int pblk_rl_high_thrs(struct pblk_rl *rl);
a4bd217b
JG
841unsigned long pblk_rl_nr_free_blks(struct pblk_rl *rl);
842int pblk_rl_user_may_insert(struct pblk_rl *rl, int nr_entries);
588726d3 843void pblk_rl_inserted(struct pblk_rl *rl, int nr_entries);
a4bd217b
JG
844void pblk_rl_user_in(struct pblk_rl *rl, int nr_entries);
845int pblk_rl_gc_may_insert(struct pblk_rl *rl, int nr_entries);
846void pblk_rl_gc_in(struct pblk_rl *rl, int nr_entries);
847void pblk_rl_out(struct pblk_rl *rl, int nr_user, int nr_gc);
da67e68f 848int pblk_rl_max_io(struct pblk_rl *rl);
a4bd217b
JG
849void pblk_rl_free_lines_inc(struct pblk_rl *rl, struct pblk_line *line);
850void pblk_rl_free_lines_dec(struct pblk_rl *rl, struct pblk_line *line);
588726d3 851int pblk_rl_is_limit(struct pblk_rl *rl);
a4bd217b
JG
852
853/*
854 * pblk sysfs
855 */
856int pblk_sysfs_init(struct gendisk *tdisk);
857void pblk_sysfs_exit(struct gendisk *tdisk);
858
859static inline void *pblk_malloc(size_t size, int type, gfp_t flags)
860{
861 if (type == PBLK_KMALLOC_META)
862 return kmalloc(size, flags);
863 return vmalloc(size);
864}
865
866static inline void pblk_mfree(void *ptr, int type)
867{
868 if (type == PBLK_KMALLOC_META)
869 kfree(ptr);
870 else
871 vfree(ptr);
872}
873
874static inline struct nvm_rq *nvm_rq_from_c_ctx(void *c_ctx)
875{
876 return c_ctx - sizeof(struct nvm_rq);
877}
878
dd2a4343
JG
879static inline void *emeta_to_bb(struct line_emeta *emeta)
880{
881 return emeta->bb_bitmap;
882}
883
884static inline void *emeta_to_lbas(struct pblk *pblk, struct line_emeta *emeta)
885{
886 return ((void *)emeta + pblk->lm.emeta_len[1]);
887}
888
889static inline void *emeta_to_vsc(struct pblk *pblk, struct line_emeta *emeta)
a4bd217b 890{
dd2a4343 891 return (emeta_to_lbas(pblk, emeta) + pblk->lm.emeta_len[2]);
a4bd217b
JG
892}
893
b20ba1bc
JG
894static inline int pblk_line_vsc(struct pblk_line *line)
895{
d340121e 896 return le32_to_cpu(*line->vsc);
b20ba1bc
JG
897}
898
a4bd217b
JG
899#define NVM_MEM_PAGE_WRITE (8)
900
901static inline int pblk_pad_distance(struct pblk *pblk)
902{
903 struct nvm_tgt_dev *dev = pblk->dev;
904 struct nvm_geo *geo = &dev->geo;
905
fae7fae4 906 return NVM_MEM_PAGE_WRITE * geo->all_luns * geo->sec_per_pl;
a4bd217b
JG
907}
908
b1bcfda1 909static inline int pblk_ppa_to_line(struct ppa_addr p)
a4bd217b
JG
910{
911 return p.g.blk;
912}
913
b1bcfda1 914static inline int pblk_ppa_to_pos(struct nvm_geo *geo, struct ppa_addr p)
a4bd217b 915{
b1bcfda1 916 return p.g.lun * geo->nr_chnls + p.g.ch;
a4bd217b
JG
917}
918
b1bcfda1
JG
919static inline struct ppa_addr addr_to_gen_ppa(struct pblk *pblk, u64 paddr,
920 u64 line_id)
a4bd217b 921{
b1bcfda1
JG
922 struct ppa_addr ppa;
923
924 ppa.ppa = 0;
925 ppa.g.blk = line_id;
926 ppa.g.pg = (paddr & pblk->ppaf.pg_mask) >> pblk->ppaf.pg_offset;
927 ppa.g.lun = (paddr & pblk->ppaf.lun_mask) >> pblk->ppaf.lun_offset;
928 ppa.g.ch = (paddr & pblk->ppaf.ch_mask) >> pblk->ppaf.ch_offset;
929 ppa.g.pl = (paddr & pblk->ppaf.pln_mask) >> pblk->ppaf.pln_offset;
930 ppa.g.sec = (paddr & pblk->ppaf.sec_mask) >> pblk->ppaf.sec_offset;
931
932 return ppa;
a4bd217b
JG
933}
934
b1bcfda1
JG
935static inline u64 pblk_dev_ppa_to_line_addr(struct pblk *pblk,
936 struct ppa_addr p)
a4bd217b 937{
b1bcfda1
JG
938 u64 paddr;
939
940 paddr = (u64)p.g.pg << pblk->ppaf.pg_offset;
941 paddr |= (u64)p.g.lun << pblk->ppaf.lun_offset;
942 paddr |= (u64)p.g.ch << pblk->ppaf.ch_offset;
943 paddr |= (u64)p.g.pl << pblk->ppaf.pln_offset;
944 paddr |= (u64)p.g.sec << pblk->ppaf.sec_offset;
945
946 return paddr;
a4bd217b
JG
947}
948
949static inline struct ppa_addr pblk_ppa32_to_ppa64(struct pblk *pblk, u32 ppa32)
950{
951 struct ppa_addr ppa64;
952
953 ppa64.ppa = 0;
954
955 if (ppa32 == -1) {
956 ppa64.ppa = ADDR_EMPTY;
957 } else if (ppa32 & (1U << 31)) {
958 ppa64.c.line = ppa32 & ((~0U) >> 1);
959 ppa64.c.is_cached = 1;
960 } else {
961 ppa64.g.blk = (ppa32 & pblk->ppaf.blk_mask) >>
962 pblk->ppaf.blk_offset;
963 ppa64.g.pg = (ppa32 & pblk->ppaf.pg_mask) >>
964 pblk->ppaf.pg_offset;
965 ppa64.g.lun = (ppa32 & pblk->ppaf.lun_mask) >>
966 pblk->ppaf.lun_offset;
967 ppa64.g.ch = (ppa32 & pblk->ppaf.ch_mask) >>
968 pblk->ppaf.ch_offset;
969 ppa64.g.pl = (ppa32 & pblk->ppaf.pln_mask) >>
970 pblk->ppaf.pln_offset;
971 ppa64.g.sec = (ppa32 & pblk->ppaf.sec_mask) >>
972 pblk->ppaf.sec_offset;
973 }
974
975 return ppa64;
976}
977
a4bd217b
JG
978static inline u32 pblk_ppa64_to_ppa32(struct pblk *pblk, struct ppa_addr ppa64)
979{
980 u32 ppa32 = 0;
981
982 if (ppa64.ppa == ADDR_EMPTY) {
983 ppa32 = ~0U;
984 } else if (ppa64.c.is_cached) {
985 ppa32 |= ppa64.c.line;
986 ppa32 |= 1U << 31;
987 } else {
988 ppa32 |= ppa64.g.blk << pblk->ppaf.blk_offset;
989 ppa32 |= ppa64.g.pg << pblk->ppaf.pg_offset;
990 ppa32 |= ppa64.g.lun << pblk->ppaf.lun_offset;
991 ppa32 |= ppa64.g.ch << pblk->ppaf.ch_offset;
992 ppa32 |= ppa64.g.pl << pblk->ppaf.pln_offset;
993 ppa32 |= ppa64.g.sec << pblk->ppaf.sec_offset;
994 }
995
996 return ppa32;
997}
998
b1bcfda1
JG
999static inline struct ppa_addr pblk_trans_map_get(struct pblk *pblk,
1000 sector_t lba)
a4bd217b 1001{
b1bcfda1
JG
1002 struct ppa_addr ppa;
1003
a4bd217b
JG
1004 if (pblk->ppaf_bitsize < 32) {
1005 u32 *map = (u32 *)pblk->trans_map;
1006
b1bcfda1 1007 ppa = pblk_ppa32_to_ppa64(pblk, map[lba]);
a4bd217b 1008 } else {
b1bcfda1 1009 struct ppa_addr *map = (struct ppa_addr *)pblk->trans_map;
a4bd217b 1010
b1bcfda1 1011 ppa = map[lba];
a4bd217b 1012 }
b1bcfda1
JG
1013
1014 return ppa;
a4bd217b
JG
1015}
1016
b1bcfda1
JG
1017static inline void pblk_trans_map_set(struct pblk *pblk, sector_t lba,
1018 struct ppa_addr ppa)
a4bd217b 1019{
b1bcfda1
JG
1020 if (pblk->ppaf_bitsize < 32) {
1021 u32 *map = (u32 *)pblk->trans_map;
a4bd217b 1022
b1bcfda1
JG
1023 map[lba] = pblk_ppa64_to_ppa32(pblk, ppa);
1024 } else {
1025 u64 *map = (u64 *)pblk->trans_map;
a4bd217b 1026
b1bcfda1
JG
1027 map[lba] = ppa.ppa;
1028 }
a4bd217b
JG
1029}
1030
1031static inline int pblk_ppa_empty(struct ppa_addr ppa_addr)
1032{
1033 return (ppa_addr.ppa == ADDR_EMPTY);
1034}
1035
1036static inline void pblk_ppa_set_empty(struct ppa_addr *ppa_addr)
1037{
1038 ppa_addr->ppa = ADDR_EMPTY;
1039}
1040
07698466
JG
1041static inline bool pblk_ppa_comp(struct ppa_addr lppa, struct ppa_addr rppa)
1042{
1043 if (lppa.ppa == rppa.ppa)
1044 return true;
1045
1046 return false;
1047}
1048
a4bd217b
JG
1049static inline int pblk_addr_in_cache(struct ppa_addr ppa)
1050{
1051 return (ppa.ppa != ADDR_EMPTY && ppa.c.is_cached);
1052}
1053
1054static inline int pblk_addr_to_cacheline(struct ppa_addr ppa)
1055{
1056 return ppa.c.line;
1057}
1058
1059static inline struct ppa_addr pblk_cacheline_to_addr(int addr)
1060{
1061 struct ppa_addr p;
1062
1063 p.c.line = addr;
1064 p.c.is_cached = 1;
1065
1066 return p;
1067}
1068
a4bd217b 1069static inline u32 pblk_calc_meta_header_crc(struct pblk *pblk,
dd2a4343 1070 struct line_header *header)
a4bd217b
JG
1071{
1072 u32 crc = ~(u32)0;
1073
dd2a4343 1074 crc = crc32_le(crc, (unsigned char *)header + sizeof(crc),
a4bd217b
JG
1075 sizeof(struct line_header) - sizeof(crc));
1076
1077 return crc;
1078}
1079
1080static inline u32 pblk_calc_smeta_crc(struct pblk *pblk,
1081 struct line_smeta *smeta)
1082{
1083 struct pblk_line_meta *lm = &pblk->lm;
1084 u32 crc = ~(u32)0;
1085
1086 crc = crc32_le(crc, (unsigned char *)smeta +
1087 sizeof(struct line_header) + sizeof(crc),
1088 lm->smeta_len -
1089 sizeof(struct line_header) - sizeof(crc));
1090
1091 return crc;
1092}
1093
1094static inline u32 pblk_calc_emeta_crc(struct pblk *pblk,
1095 struct line_emeta *emeta)
1096{
1097 struct pblk_line_meta *lm = &pblk->lm;
1098 u32 crc = ~(u32)0;
1099
1100 crc = crc32_le(crc, (unsigned char *)emeta +
1101 sizeof(struct line_header) + sizeof(crc),
dd2a4343 1102 lm->emeta_len[0] -
a4bd217b
JG
1103 sizeof(struct line_header) - sizeof(crc));
1104
1105 return crc;
1106}
1107
1108static inline int pblk_set_progr_mode(struct pblk *pblk, int type)
1109{
1110 struct nvm_tgt_dev *dev = pblk->dev;
1111 struct nvm_geo *geo = &dev->geo;
1112 int flags;
1113
1114 flags = geo->plane_mode >> 1;
1115
e2cddf20 1116 if (type == PBLK_WRITE)
a4bd217b
JG
1117 flags |= NVM_IO_SCRAMBLE_ENABLE;
1118
1119 return flags;
1120}
1121
f9c10152
JG
1122enum {
1123 PBLK_READ_RANDOM = 0,
1124 PBLK_READ_SEQUENTIAL = 1,
1125};
1126
1127static inline int pblk_set_read_mode(struct pblk *pblk, int type)
1128{
1129 struct nvm_tgt_dev *dev = pblk->dev;
1130 struct nvm_geo *geo = &dev->geo;
1131 int flags;
1132
1133 flags = NVM_IO_SUSPEND | NVM_IO_SCRAMBLE_ENABLE;
1134 if (type == PBLK_READ_SEQUENTIAL)
1135 flags |= geo->plane_mode >> 1;
1136
1137 return flags;
1138}
1139
1140static inline int pblk_io_aligned(struct pblk *pblk, int nr_secs)
a4bd217b 1141{
f9c10152 1142 return !(nr_secs % pblk->min_write_pgs);
a4bd217b
JG
1143}
1144
1145#ifdef CONFIG_NVM_DEBUG
1146static inline void print_ppa(struct ppa_addr *p, char *msg, int error)
1147{
1148 if (p->c.is_cached) {
1149 pr_err("ppa: (%s: %x) cache line: %llu\n",
1150 msg, error, (u64)p->c.line);
1151 } else {
1152 pr_err("ppa: (%s: %x):ch:%d,lun:%d,blk:%d,pg:%d,pl:%d,sec:%d\n",
1153 msg, error,
1154 p->g.ch, p->g.lun, p->g.blk,
1155 p->g.pg, p->g.pl, p->g.sec);
1156 }
1157}
1158
1159static inline void pblk_print_failed_rqd(struct pblk *pblk, struct nvm_rq *rqd,
1160 int error)
1161{
1162 int bit = -1;
1163
1164 if (rqd->nr_ppas == 1) {
1165 print_ppa(&rqd->ppa_addr, "rqd", error);
1166 return;
1167 }
1168
1169 while ((bit = find_next_bit((void *)&rqd->ppa_status, rqd->nr_ppas,
1170 bit + 1)) < rqd->nr_ppas) {
1171 print_ppa(&rqd->ppa_list[bit], "rqd", error);
1172 }
1173
1174 pr_err("error:%d, ppa_status:%llx\n", error, rqd->ppa_status);
1175}
a4bd217b
JG
1176
1177static inline int pblk_boundary_ppa_checks(struct nvm_tgt_dev *tgt_dev,
1178 struct ppa_addr *ppas, int nr_ppas)
1179{
1180 struct nvm_geo *geo = &tgt_dev->geo;
1181 struct ppa_addr *ppa;
1182 int i;
1183
1184 for (i = 0; i < nr_ppas; i++) {
1185 ppa = &ppas[i];
1186
1187 if (!ppa->c.is_cached &&
1188 ppa->g.ch < geo->nr_chnls &&
fae7fae4 1189 ppa->g.lun < geo->nr_luns &&
a4bd217b 1190 ppa->g.pl < geo->nr_planes &&
fae7fae4
MB
1191 ppa->g.blk < geo->nr_chks &&
1192 ppa->g.pg < geo->ws_per_chk &&
a4bd217b
JG
1193 ppa->g.sec < geo->sec_per_pg)
1194 continue;
1195
a4bd217b 1196 print_ppa(ppa, "boundary", i);
1a94b2d4 1197
a4bd217b
JG
1198 return 1;
1199 }
1200 return 0;
1201}
1202
1a94b2d4
JG
1203static inline int pblk_check_io(struct pblk *pblk, struct nvm_rq *rqd)
1204{
1205 struct nvm_tgt_dev *dev = pblk->dev;
1206 struct ppa_addr *ppa_list;
1207
1208 ppa_list = (rqd->nr_ppas > 1) ? rqd->ppa_list : &rqd->ppa_addr;
1209
1210 if (pblk_boundary_ppa_checks(dev, ppa_list, rqd->nr_ppas)) {
1211 WARN_ON(1);
1212 return -EINVAL;
1213 }
1214
1215 if (rqd->opcode == NVM_OP_PWRITE) {
1216 struct pblk_line *line;
1217 struct ppa_addr ppa;
1218 int i;
1219
1220 for (i = 0; i < rqd->nr_ppas; i++) {
1221 ppa = ppa_list[i];
b1bcfda1 1222 line = &pblk->lines[pblk_ppa_to_line(ppa)];
1a94b2d4
JG
1223
1224 spin_lock(&line->lock);
1225 if (line->state != PBLK_LINESTATE_OPEN) {
1226 pr_err("pblk: bad ppa: line:%d,state:%d\n",
1227 line->id, line->state);
1228 WARN_ON(1);
1229 spin_unlock(&line->lock);
1230 return -EINVAL;
1231 }
1232 spin_unlock(&line->lock);
1233 }
1234 }
1235
1236 return 0;
1237}
1238#endif
1239
a4bd217b
JG
1240static inline int pblk_boundary_paddr_checks(struct pblk *pblk, u64 paddr)
1241{
1242 struct pblk_line_meta *lm = &pblk->lm;
1243
1244 if (paddr > lm->sec_per_line)
1245 return 1;
1246
1247 return 0;
1248}
1249
1250static inline unsigned int pblk_get_bi_idx(struct bio *bio)
1251{
1252 return bio->bi_iter.bi_idx;
1253}
1254
1255static inline sector_t pblk_get_lba(struct bio *bio)
1256{
1257 return bio->bi_iter.bi_sector / NR_PHY_IN_LOG;
1258}
1259
1260static inline unsigned int pblk_get_secs(struct bio *bio)
1261{
1262 return bio->bi_iter.bi_size / PBLK_EXPOSED_PAGE_SIZE;
1263}
1264
a4bd217b
JG
1265static inline void pblk_setup_uuid(struct pblk *pblk)
1266{
1267 uuid_le uuid;
1268
1269 uuid_le_gen(&uuid);
1270 memcpy(pblk->instance_uuid, uuid.b, 16);
1271}
1272#endif /* PBLK_H_ */