]> git.ipfire.org Git - thirdparty/kernel/linux.git/blame - drivers/lightnvm/pblk.h
lightnvm: normalize geometry nomenclature
[thirdparty/kernel/linux.git] / drivers / lightnvm / pblk.h
CommitLineData
a4bd217b
JG
1/*
2 * Copyright (C) 2015 IT University of Copenhagen (rrpc.h)
3 * Copyright (C) 2016 CNEX Labs
4 * Initial release: Matias Bjorling <matias@cnexlabs.com>
5 * Write buffering: Javier Gonzalez <javier@cnexlabs.com>
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License version
9 * 2 as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * Implementation of a Physical Block-device target for Open-channel SSDs.
17 *
18 */
19
20#ifndef PBLK_H_
21#define PBLK_H_
22
23#include <linux/blkdev.h>
24#include <linux/blk-mq.h>
25#include <linux/bio.h>
26#include <linux/module.h>
27#include <linux/kthread.h>
28#include <linux/vmalloc.h>
29#include <linux/crc32.h>
30#include <linux/uuid.h>
31
32#include <linux/lightnvm.h>
33
34/* Run only GC if less than 1/X blocks are free */
35#define GC_LIMIT_INVERSE 5
36#define GC_TIME_MSECS 1000
37
38#define PBLK_SECTOR (512)
39#define PBLK_EXPOSED_PAGE_SIZE (4096)
40#define PBLK_MAX_REQ_ADDRS (64)
41#define PBLK_MAX_REQ_ADDRS_PW (6)
42
ef576494
JG
43#define PBLK_NR_CLOSE_JOBS (4)
44
a4bd217b
JG
45#define PBLK_CACHE_NAME_LEN (DISK_NAME_LEN + 16)
46
47#define PBLK_COMMAND_TIMEOUT_MS 30000
48
49/* Max 512 LUNs per device */
50#define PBLK_MAX_LUNS_BITMAP (4)
51
52#define NR_PHY_IN_LOG (PBLK_EXPOSED_PAGE_SIZE / PBLK_SECTOR)
53
0d880398 54/* Static pool sizes */
b84ae4a8
JG
55#define PBLK_GEN_WS_POOL_SIZE (2)
56
e5392739
JG
57#define PBLK_DEFAULT_OP (11)
58
e2cddf20
JG
59enum {
60 PBLK_READ = READ,
61 PBLK_WRITE = WRITE,/* Write from write buffer */
62 PBLK_WRITE_INT, /* Internal write - no write buffer */
8f554597 63 PBLK_READ_RECOV, /* Recovery read - errors allowed */
e2cddf20
JG
64 PBLK_ERASE,
65};
66
a4bd217b
JG
67enum {
68 /* IO Types */
69 PBLK_IOTYPE_USER = 1 << 0,
70 PBLK_IOTYPE_GC = 1 << 1,
71
72 /* Write buffer flags */
73 PBLK_FLUSH_ENTRY = 1 << 2,
74 PBLK_WRITTEN_DATA = 1 << 3,
75 PBLK_SUBMITTED_ENTRY = 1 << 4,
76 PBLK_WRITABLE_ENTRY = 1 << 5,
77};
78
79enum {
80 PBLK_BLK_ST_OPEN = 0x1,
81 PBLK_BLK_ST_CLOSED = 0x2,
82};
83
b20ba1bc
JG
84struct pblk_sec_meta {
85 u64 reserved;
86 __le64 lba;
87};
88
a4bd217b
JG
89/* The number of GC lists and the rate-limiter states go together. This way the
90 * rate-limiter can dictate how much GC is needed based on resource utilization.
91 */
b20ba1bc 92#define PBLK_GC_NR_LISTS 3
a4bd217b
JG
93
94enum {
95 PBLK_RL_HIGH = 1,
96 PBLK_RL_MID = 2,
97 PBLK_RL_LOW = 3,
98};
99
a4bd217b 100#define pblk_dma_meta_size (sizeof(struct pblk_sec_meta) * PBLK_MAX_REQ_ADDRS)
a4809fee 101#define pblk_dma_ppa_size (sizeof(u64) * PBLK_MAX_REQ_ADDRS)
a4bd217b 102
084ec9ba 103/* write buffer completion context */
a4bd217b
JG
104struct pblk_c_ctx {
105 struct list_head list; /* Head for out-of-order completion */
106
107 unsigned long *lun_bitmap; /* Luns used on current request */
108 unsigned int sentry;
109 unsigned int nr_valid;
110 unsigned int nr_padded;
111};
112
a4809fee 113/* read context */
084ec9ba
JG
114struct pblk_g_ctx {
115 void *private;
998ba629 116 unsigned long start_time;
a4809fee 117 u64 lba;
a4bd217b
JG
118};
119
ee8d5c1a
JG
120/* Pad context */
121struct pblk_pad_rq {
122 struct pblk *pblk;
123 struct completion wait;
124 struct kref ref;
125};
126
a4bd217b
JG
127/* Recovery context */
128struct pblk_rec_ctx {
129 struct pblk *pblk;
130 struct nvm_rq *rqd;
131 struct list_head failed;
132 struct work_struct ws_rec;
133};
134
135/* Write context */
136struct pblk_w_ctx {
137 struct bio_list bios; /* Original bios - used for completion
138 * in REQ_FUA, REQ_FLUSH case
139 */
ef697902 140 u64 lba; /* Logic addr. associated with entry */
a4bd217b
JG
141 struct ppa_addr ppa; /* Physic addr. associated with entry */
142 int flags; /* Write context flags */
143};
144
145struct pblk_rb_entry {
146 struct ppa_addr cacheline; /* Cacheline for this entry */
147 void *data; /* Pointer to data on this entry */
148 struct pblk_w_ctx w_ctx; /* Context for this entry */
149 struct list_head index; /* List head to enable indexes */
150};
151
152#define EMPTY_ENTRY (~0U)
153
154struct pblk_rb_pages {
155 struct page *pages;
156 int order;
157 struct list_head list;
158};
159
160struct pblk_rb {
161 struct pblk_rb_entry *entries; /* Ring buffer entries */
162 unsigned int mem; /* Write offset - points to next
163 * writable entry in memory
164 */
165 unsigned int subm; /* Read offset - points to last entry
166 * that has been submitted to the media
167 * to be persisted
168 */
169 unsigned int sync; /* Synced - backpointer that signals
170 * the last submitted entry that has
171 * been successfully persisted to media
172 */
8154d296 173 unsigned int flush_point; /* Sync point - last entry that must be
a4bd217b
JG
174 * flushed to the media. Used with
175 * REQ_FLUSH and REQ_FUA
176 */
177 unsigned int l2p_update; /* l2p update point - next entry for
178 * which l2p mapping will be updated to
179 * contain a device ppa address (instead
180 * of a cacheline
181 */
182 unsigned int nr_entries; /* Number of entries in write buffer -
183 * must be a power of two
184 */
185 unsigned int seg_size; /* Size of the data segments being
186 * stored on each entry. Typically this
187 * will be 4KB
188 */
189
190 struct list_head pages; /* List of data pages */
191
192 spinlock_t w_lock; /* Write lock */
193 spinlock_t s_lock; /* Sync lock */
194
195#ifdef CONFIG_NVM_DEBUG
8154d296 196 atomic_t inflight_flush_point; /* Not served REQ_FLUSH | REQ_FUA */
a4bd217b
JG
197#endif
198};
199
200#define PBLK_RECOVERY_SECTORS 16
201
202struct pblk_lun {
203 struct ppa_addr bppa;
a4bd217b
JG
204 struct semaphore wr_sem;
205};
206
207struct pblk_gc_rq {
208 struct pblk_line *line;
209 void *data;
d340121e 210 u64 paddr_list[PBLK_MAX_REQ_ADDRS];
b20ba1bc 211 u64 lba_list[PBLK_MAX_REQ_ADDRS];
a4bd217b
JG
212 int nr_secs;
213 int secs_to_gc;
214 struct list_head list;
215};
216
217struct pblk_gc {
b20ba1bc
JG
218 /* These states are not protected by a lock since (i) they are in the
219 * fast path, and (ii) they are not critical.
220 */
a4bd217b
JG
221 int gc_active;
222 int gc_enabled;
223 int gc_forced;
a4bd217b
JG
224
225 struct task_struct *gc_ts;
226 struct task_struct *gc_writer_ts;
b20ba1bc
JG
227 struct task_struct *gc_reader_ts;
228
229 struct workqueue_struct *gc_line_reader_wq;
a4bd217b 230 struct workqueue_struct *gc_reader_wq;
b20ba1bc 231
a4bd217b
JG
232 struct timer_list gc_timer;
233
b20ba1bc 234 struct semaphore gc_sem;
d6b992f7
HH
235 atomic_t read_inflight_gc; /* Number of lines with inflight GC reads */
236 atomic_t pipeline_gc; /* Number of lines in the GC pipeline -
237 * started reads to finished writes
238 */
a4bd217b 239 int w_entries;
b20ba1bc 240
a4bd217b 241 struct list_head w_list;
b20ba1bc 242 struct list_head r_list;
a4bd217b
JG
243
244 spinlock_t lock;
245 spinlock_t w_lock;
b20ba1bc 246 spinlock_t r_lock;
a4bd217b
JG
247};
248
249struct pblk_rl {
250 unsigned int high; /* Upper threshold for rate limiter (free run -
251 * user I/O rate limiter
252 */
a4bd217b
JG
253 unsigned int high_pw; /* High rounded up as a power of 2 */
254
b20ba1bc
JG
255#define PBLK_USER_HIGH_THRS 8 /* Begin write limit at 12% available blks */
256#define PBLK_USER_LOW_THRS 10 /* Aggressive GC at 10% available blocks */
a4bd217b
JG
257
258 int rb_windows_pw; /* Number of rate windows in the write buffer
259 * given as a power-of-2. This guarantees that
260 * when user I/O is being rate limited, there
261 * will be reserved enough space for the GC to
262 * place its payload. A window is of
263 * pblk->max_write_pgs size, which in NVMe is
264 * 64, i.e., 256kb.
265 */
266 int rb_budget; /* Total number of entries available for I/O */
267 int rb_user_max; /* Max buffer entries available for user I/O */
a4bd217b
JG
268 int rb_gc_max; /* Max buffer entries available for GC I/O */
269 int rb_gc_rsv; /* Reserved buffer entries for GC I/O */
270 int rb_state; /* Rate-limiter current state */
da67e68f 271 int rb_max_io; /* Maximum size for an I/O giving the config */
588726d3
JG
272
273 atomic_t rb_user_cnt; /* User I/O buffer counter */
a4bd217b 274 atomic_t rb_gc_cnt; /* GC I/O buffer counter */
588726d3 275 atomic_t rb_space; /* Space limit in case of reaching capacity */
a4bd217b 276
b20ba1bc
JG
277 int rsv_blocks; /* Reserved blocks for GC */
278
a4bd217b 279 int rb_user_active;
b20ba1bc
JG
280 int rb_gc_active;
281
a4bd217b
JG
282 struct timer_list u_timer;
283
284 unsigned long long nr_secs;
285 unsigned long total_blocks;
a7689938
JG
286
287 atomic_t free_blocks; /* Total number of free blocks (+ OP) */
288 atomic_t free_user_blocks; /* Number of user free blocks (no OP) */
a4bd217b
JG
289};
290
a4bd217b
JG
291#define PBLK_LINE_EMPTY (~0U)
292
293enum {
294 /* Line Types */
295 PBLK_LINETYPE_FREE = 0,
296 PBLK_LINETYPE_LOG = 1,
297 PBLK_LINETYPE_DATA = 2,
298
299 /* Line state */
300 PBLK_LINESTATE_FREE = 10,
301 PBLK_LINESTATE_OPEN = 11,
302 PBLK_LINESTATE_CLOSED = 12,
303 PBLK_LINESTATE_GC = 13,
304 PBLK_LINESTATE_BAD = 14,
305 PBLK_LINESTATE_CORRUPT = 15,
306
307 /* GC group */
308 PBLK_LINEGC_NONE = 20,
309 PBLK_LINEGC_EMPTY = 21,
310 PBLK_LINEGC_LOW = 22,
311 PBLK_LINEGC_MID = 23,
312 PBLK_LINEGC_HIGH = 24,
313 PBLK_LINEGC_FULL = 25,
314};
315
316#define PBLK_MAGIC 0x70626c6b /*pblk*/
d0ab0b1a
HH
317
318/* emeta/smeta persistent storage format versions:
319 * Changes in major version requires offline migration.
320 * Changes in minor version are handled automatically during
321 * recovery.
322 */
323
324#define SMETA_VERSION_MAJOR (0)
325#define SMETA_VERSION_MINOR (1)
326
327#define EMETA_VERSION_MAJOR (0)
76758390 328#define EMETA_VERSION_MINOR (2)
a4bd217b
JG
329
330struct line_header {
331 __le32 crc;
332 __le32 identifier; /* pblk identifier */
333 __u8 uuid[16]; /* instance uuid */
334 __le16 type; /* line type */
d0ab0b1a
HH
335 __u8 version_major; /* version major */
336 __u8 version_minor; /* version minor */
a4bd217b
JG
337 __le32 id; /* line id for current line */
338};
339
340struct line_smeta {
341 struct line_header header;
342
343 __le32 crc; /* Full structure including struct crc */
344 /* Previous line metadata */
345 __le32 prev_id; /* Line id for previous line */
346
347 /* Current line metadata */
348 __le64 seq_nr; /* Sequence number for current line */
349
350 /* Active writers */
351 __le32 window_wr_lun; /* Number of parallel LUNs to write */
352
353 __le32 rsvd[2];
dd2a4343
JG
354
355 __le64 lun_bitmap[];
a4bd217b
JG
356};
357
76758390 358
a4bd217b 359/*
dd2a4343
JG
360 * Metadata layout in media:
361 * First sector:
362 * 1. struct line_emeta
363 * 2. bad block bitmap (u64 * window_wr_lun)
76758390 364 * 3. write amplification counters
dd2a4343
JG
365 * Mid sectors (start at lbas_sector):
366 * 3. nr_lbas (u64) forming lba list
367 * Last sectors (start at vsc_sector):
368 * 4. u32 valid sector count (vsc) for all lines (~0U: free line)
a4bd217b
JG
369 */
370struct line_emeta {
371 struct line_header header;
372
373 __le32 crc; /* Full structure including struct crc */
374
375 /* Previous line metadata */
376 __le32 prev_id; /* Line id for prev line */
377
378 /* Current line metadata */
379 __le64 seq_nr; /* Sequence number for current line */
380
381 /* Active writers */
382 __le32 window_wr_lun; /* Number of parallel LUNs to write */
383
384 /* Bookkeeping for recovery */
385 __le32 next_id; /* Line id for next line */
386 __le64 nr_lbas; /* Number of lbas mapped in line */
387 __le64 nr_valid_lbas; /* Number of valid lbas mapped in line */
76758390
HH
388 __le64 bb_bitmap[]; /* Updated bad block bitmap for line */
389};
390
391
392/* Write amplification counters stored on media */
393struct wa_counters {
394 __le64 user; /* Number of user written sectors */
395 __le64 gc; /* Number of sectors written by GC*/
396 __le64 pad; /* Number of padded sectors */
dd2a4343
JG
397};
398
399struct pblk_emeta {
400 struct line_emeta *buf; /* emeta buffer in media format */
401 int mem; /* Write offset - points to next
402 * writable entry in memory
403 */
404 atomic_t sync; /* Synced - backpointer that signals the
405 * last entry that has been successfully
406 * persisted to media
407 */
408 unsigned int nr_entries; /* Number of emeta entries */
409};
410
411struct pblk_smeta {
412 struct line_smeta *buf; /* smeta buffer in persistent format */
a4bd217b
JG
413};
414
415struct pblk_line {
416 struct pblk *pblk;
417 unsigned int id; /* Line number corresponds to the
418 * block line
419 */
420 unsigned int seq_nr; /* Unique line sequence number */
421
422 int state; /* PBLK_LINESTATE_X */
423 int type; /* PBLK_LINETYPE_X */
424 int gc_group; /* PBLK_LINEGC_X */
425 struct list_head list; /* Free, GC lists */
426
427 unsigned long *lun_bitmap; /* Bitmap for LUNs mapped in line */
428
dd2a4343
JG
429 struct pblk_smeta *smeta; /* Start metadata */
430 struct pblk_emeta *emeta; /* End medatada */
431
a4bd217b 432 int meta_line; /* Metadata line id */
dd2a4343
JG
433 int meta_distance; /* Distance between data and metadata */
434
a4bd217b
JG
435 u64 smeta_ssec; /* Sector where smeta starts */
436 u64 emeta_ssec; /* Sector where emeta starts */
437
438 unsigned int sec_in_line; /* Number of usable secs in line */
439
a44f53fa 440 atomic_t blk_in_line; /* Number of good blocks in line */
a4bd217b
JG
441 unsigned long *blk_bitmap; /* Bitmap for valid/invalid blocks */
442 unsigned long *erase_bitmap; /* Bitmap for erased blocks */
443
444 unsigned long *map_bitmap; /* Bitmap for mapped sectors in line */
445 unsigned long *invalid_bitmap; /* Bitmap for invalid sectors in line */
446
a44f53fa 447 atomic_t left_eblks; /* Blocks left for erasing */
a4bd217b
JG
448 atomic_t left_seblks; /* Blocks left for sync erasing */
449
450 int left_msecs; /* Sectors left for mapping */
a4bd217b 451 unsigned int cur_sec; /* Sector map pointer */
dd2a4343
JG
452 unsigned int nr_valid_lbas; /* Number of valid lbas in line */
453
454 __le32 *vsc; /* Valid sector count in line */
a4bd217b
JG
455
456 struct kref ref; /* Write buffer L2P references */
457
458 spinlock_t lock; /* Necessary for invalid_bitmap only */
459};
460
a44f53fa 461#define PBLK_DATA_LINES 4
a4bd217b 462
dd2a4343 463enum {
a4bd217b
JG
464 PBLK_KMALLOC_META = 1,
465 PBLK_VMALLOC_META = 2,
466};
467
dd2a4343
JG
468enum {
469 PBLK_EMETA_TYPE_HEADER = 1, /* struct line_emeta first sector */
470 PBLK_EMETA_TYPE_LLBA = 2, /* lba list - type: __le64 */
471 PBLK_EMETA_TYPE_VSC = 3, /* vsc list - type: __le32 */
a4bd217b
JG
472};
473
474struct pblk_line_mgmt {
475 int nr_lines; /* Total number of full lines */
476 int nr_free_lines; /* Number of full lines in free list */
477
478 /* Free lists - use free_lock */
479 struct list_head free_list; /* Full lines ready to use */
480 struct list_head corrupt_list; /* Full lines corrupted */
481 struct list_head bad_list; /* Full lines bad */
482
483 /* GC lists - use gc_lock */
b20ba1bc 484 struct list_head *gc_lists[PBLK_GC_NR_LISTS];
a4bd217b
JG
485 struct list_head gc_high_list; /* Full lines ready to GC, high isc */
486 struct list_head gc_mid_list; /* Full lines ready to GC, mid isc */
487 struct list_head gc_low_list; /* Full lines ready to GC, low isc */
488
489 struct list_head gc_full_list; /* Full lines ready to GC, no valid */
490 struct list_head gc_empty_list; /* Full lines close, all valid */
491
492 struct pblk_line *log_line; /* Current FTL log line */
493 struct pblk_line *data_line; /* Current data line */
494 struct pblk_line *log_next; /* Next FTL log line */
495 struct pblk_line *data_next; /* Next data line */
496
dd2a4343
JG
497 struct list_head emeta_list; /* Lines queued to schedule emeta */
498
499 __le32 *vsc_list; /* Valid sector counts for all lines */
500
a4bd217b 501 /* Metadata allocation type: VMALLOC | KMALLOC */
a4bd217b
JG
502 int emeta_alloc_type;
503
504 /* Pre-allocated metadata for data lines */
dd2a4343
JG
505 struct pblk_smeta *sline_meta[PBLK_DATA_LINES];
506 struct pblk_emeta *eline_meta[PBLK_DATA_LINES];
a4bd217b
JG
507 unsigned long meta_bitmap;
508
509 /* Helpers for fast bitmap calculations */
510 unsigned long *bb_template;
511 unsigned long *bb_aux;
512
513 unsigned long d_seq_nr; /* Data line unique sequence number */
514 unsigned long l_seq_nr; /* Log line unique sequence number */
515
516 spinlock_t free_lock;
dd2a4343 517 spinlock_t close_lock;
a4bd217b
JG
518 spinlock_t gc_lock;
519};
520
521struct pblk_line_meta {
522 unsigned int smeta_len; /* Total length for smeta */
dd2a4343
JG
523 unsigned int smeta_sec; /* Sectors needed for smeta */
524
525 unsigned int emeta_len[4]; /* Lengths for emeta:
76758390
HH
526 * [0]: Total
527 * [1]: struct line_emeta +
528 * bb_bitmap + struct wa_counters
529 * [2]: L2P portion
530 * [3]: vsc
dd2a4343
JG
531 */
532 unsigned int emeta_sec[4]; /* Sectors needed for emeta. Same layout
533 * as emeta_len
534 */
535
a4bd217b 536 unsigned int emeta_bb; /* Boundary for bb that affects emeta */
dd2a4343
JG
537
538 unsigned int vsc_list_len; /* Length for vsc list */
a4bd217b
JG
539 unsigned int sec_bitmap_len; /* Length for sector bitmap in line */
540 unsigned int blk_bitmap_len; /* Length for block bitmap in line */
541 unsigned int lun_bitmap_len; /* Length for lun bitmap in line */
542
543 unsigned int blk_per_line; /* Number of blocks in a full line */
544 unsigned int sec_per_line; /* Number of sectors in a line */
dd2a4343 545 unsigned int dsec_per_line; /* Number of data sectors in a line */
a4bd217b
JG
546 unsigned int min_blk_line; /* Min. number of good blocks in line */
547
548 unsigned int mid_thrs; /* Threshold for GC mid list */
549 unsigned int high_thrs; /* Threshold for GC high list */
dd2a4343
JG
550
551 unsigned int meta_distance; /* Distance between data and metadata */
a4bd217b
JG
552};
553
588726d3
JG
554enum {
555 PBLK_STATE_RUNNING = 0,
556 PBLK_STATE_STOPPING = 1,
557 PBLK_STATE_RECOVERING = 2,
558 PBLK_STATE_STOPPED = 3,
559};
560
a4bd217b
JG
561struct pblk {
562 struct nvm_tgt_dev *dev;
563 struct gendisk *disk;
564
565 struct kobject kobj;
566
567 struct pblk_lun *luns;
568
569 struct pblk_line *lines; /* Line array */
570 struct pblk_line_mgmt l_mg; /* Line management */
571 struct pblk_line_meta lm; /* Line metadata */
572
e46f4e48 573 struct nvm_addrf ppaf;
a4bd217b 574 int ppaf_bitsize;
a4bd217b
JG
575
576 struct pblk_rb rwb;
577
588726d3
JG
578 int state; /* pblk line state */
579
a4bd217b
JG
580 int min_write_pgs; /* Minimum amount of pages required by controller */
581 int max_write_pgs; /* Maximum amount of pages supported by controller */
582 int pgs_in_buffer; /* Number of pages that need to be held in buffer to
583 * guarantee successful reads.
584 */
585
586 sector_t capacity; /* Device capacity when bad blocks are subtracted */
a7689938
JG
587
588 int op; /* Percentage of device used for over-provisioning */
589 int op_blks; /* Number of blocks used for over-provisioning */
a4bd217b
JG
590
591 /* pblk provisioning values. Used by rate limiter */
592 struct pblk_rl rl;
593
c2e9f5d4 594 int sec_per_write;
a4bd217b
JG
595
596 unsigned char instance_uuid[16];
76758390
HH
597
598 /* Persistent write amplification counters, 4kb sector I/Os */
599 atomic64_t user_wa; /* Sectors written by user */
600 atomic64_t gc_wa; /* Sectors written by GC */
601 atomic64_t pad_wa; /* Padded sectors written */
602
603 /* Reset values for delta write amplification measurements */
604 u64 user_rst_wa;
605 u64 gc_rst_wa;
606 u64 pad_rst_wa;
607
5d149bfa
HH
608 /* Counters used for calculating padding distribution */
609 atomic64_t *pad_dist; /* Padding distribution buckets */
610 u64 nr_flush_rst; /* Flushes reset value for pad dist.*/
611 atomic64_t nr_flush; /* Number of flush/fua I/O */
612
a4bd217b 613#ifdef CONFIG_NVM_DEBUG
76758390 614 /* Non-persistent debug counters, 4kb sector I/Os */
a4bd217b
JG
615 atomic_long_t inflight_writes; /* Inflight writes (user and gc) */
616 atomic_long_t padded_writes; /* Sectors padded due to flush/fua */
617 atomic_long_t padded_wb; /* Sectors padded in write buffer */
a4bd217b
JG
618 atomic_long_t req_writes; /* Sectors stored on write buffer */
619 atomic_long_t sub_writes; /* Sectors submitted from buffer */
620 atomic_long_t sync_writes; /* Sectors synced to media */
a4bd217b 621 atomic_long_t inflight_reads; /* Inflight sector read requests */
db7ada33 622 atomic_long_t cache_reads; /* Read requests that hit the cache */
a4bd217b
JG
623 atomic_long_t sync_reads; /* Completed sector read requests */
624 atomic_long_t recov_writes; /* Sectors submitted from recovery */
625 atomic_long_t recov_gc_writes; /* Sectors submitted from write GC */
626 atomic_long_t recov_gc_reads; /* Sectors submitted from read GC */
627#endif
628
629 spinlock_t lock;
630
631 atomic_long_t read_failed;
632 atomic_long_t read_empty;
633 atomic_long_t read_high_ecc;
634 atomic_long_t read_failed_gc;
635 atomic_long_t write_failed;
636 atomic_long_t erase_failed;
637
588726d3
JG
638 atomic_t inflight_io; /* General inflight I/O counter */
639
a4bd217b
JG
640 struct task_struct *writer_ts;
641
642 /* Simple translation map of logical addresses to physical addresses.
643 * The logical addresses is known by the host system, while the physical
644 * addresses are used when writing to the disk block device.
645 */
646 unsigned char *trans_map;
647 spinlock_t trans_lock;
648
649 struct list_head compl_list;
650
bd432417 651 mempool_t *page_bio_pool;
b84ae4a8 652 mempool_t *gen_ws_pool;
a4bd217b 653 mempool_t *rec_pool;
0d880398 654 mempool_t *r_rq_pool;
a4bd217b 655 mempool_t *w_rq_pool;
0d880398 656 mempool_t *e_rq_pool;
a4bd217b 657
ef576494
JG
658 struct workqueue_struct *close_wq;
659 struct workqueue_struct *bb_wq;
7bd4d370 660 struct workqueue_struct *r_end_wq;
ef576494 661
a4bd217b
JG
662 struct timer_list wtimer;
663
664 struct pblk_gc gc;
665};
666
667struct pblk_line_ws {
668 struct pblk *pblk;
669 struct pblk_line *line;
670 void *priv;
671 struct work_struct ws;
672};
673
084ec9ba 674#define pblk_g_rq_size (sizeof(struct nvm_rq) + sizeof(struct pblk_g_ctx))
a4bd217b
JG
675#define pblk_w_rq_size (sizeof(struct nvm_rq) + sizeof(struct pblk_c_ctx))
676
677/*
678 * pblk ring buffer operations
679 */
680int pblk_rb_init(struct pblk_rb *rb, struct pblk_rb_entry *rb_entry_base,
681 unsigned int power_size, unsigned int power_seg_sz);
682unsigned int pblk_rb_calculate_size(unsigned int nr_entries);
683void *pblk_rb_entries_ref(struct pblk_rb *rb);
684int pblk_rb_may_write_user(struct pblk_rb *rb, struct bio *bio,
685 unsigned int nr_entries, unsigned int *pos);
686int pblk_rb_may_write_gc(struct pblk_rb *rb, unsigned int nr_entries,
687 unsigned int *pos);
688void pblk_rb_write_entry_user(struct pblk_rb *rb, void *data,
689 struct pblk_w_ctx w_ctx, unsigned int pos);
690void pblk_rb_write_entry_gc(struct pblk_rb *rb, void *data,
d340121e
JG
691 struct pblk_w_ctx w_ctx, struct pblk_line *line,
692 u64 paddr, unsigned int pos);
a4bd217b 693struct pblk_w_ctx *pblk_rb_w_ctx(struct pblk_rb *rb, unsigned int pos);
588726d3 694void pblk_rb_flush(struct pblk_rb *rb);
a4bd217b
JG
695
696void pblk_rb_sync_l2p(struct pblk_rb *rb);
d624f371 697unsigned int pblk_rb_read_to_bio(struct pblk_rb *rb, struct nvm_rq *rqd,
875d94f3
JG
698 unsigned int pos, unsigned int nr_entries,
699 unsigned int count);
a4bd217b
JG
700unsigned int pblk_rb_read_to_bio_list(struct pblk_rb *rb, struct bio *bio,
701 struct list_head *list,
702 unsigned int max);
703int pblk_rb_copy_to_bio(struct pblk_rb *rb, struct bio *bio, sector_t lba,
75cb8e93 704 struct ppa_addr ppa, int bio_iter, bool advanced_bio);
a4bd217b
JG
705unsigned int pblk_rb_read_commit(struct pblk_rb *rb, unsigned int entries);
706
707unsigned int pblk_rb_sync_init(struct pblk_rb *rb, unsigned long *flags);
708unsigned int pblk_rb_sync_advance(struct pblk_rb *rb, unsigned int nr_entries);
709struct pblk_rb_entry *pblk_rb_sync_scan_entry(struct pblk_rb *rb,
710 struct ppa_addr *ppa);
711void pblk_rb_sync_end(struct pblk_rb *rb, unsigned long *flags);
8154d296 712unsigned int pblk_rb_flush_point_count(struct pblk_rb *rb);
a4bd217b
JG
713
714unsigned int pblk_rb_read_count(struct pblk_rb *rb);
ee8d5c1a 715unsigned int pblk_rb_sync_count(struct pblk_rb *rb);
a4bd217b
JG
716unsigned int pblk_rb_wrap_pos(struct pblk_rb *rb, unsigned int pos);
717
718int pblk_rb_tear_down_check(struct pblk_rb *rb);
719int pblk_rb_pos_oob(struct pblk_rb *rb, u64 pos);
720void pblk_rb_data_free(struct pblk_rb *rb);
721ssize_t pblk_rb_sysfs(struct pblk_rb *rb, char *buf);
722
723/*
724 * pblk core
725 */
67bf26a3
JG
726struct nvm_rq *pblk_alloc_rqd(struct pblk *pblk, int type);
727void pblk_free_rqd(struct pblk *pblk, struct nvm_rq *rqd, int type);
c2e9f5d4 728void pblk_set_sec_per_write(struct pblk *pblk, int sec_per_write);
a4bd217b
JG
729int pblk_setup_w_rec_rq(struct pblk *pblk, struct nvm_rq *rqd,
730 struct pblk_c_ctx *c_ctx);
a4bd217b
JG
731void pblk_discard(struct pblk *pblk, struct bio *bio);
732void pblk_log_write_err(struct pblk *pblk, struct nvm_rq *rqd);
733void pblk_log_read_err(struct pblk *pblk, struct nvm_rq *rqd);
734int pblk_submit_io(struct pblk *pblk, struct nvm_rq *rqd);
1a94b2d4 735int pblk_submit_io_sync(struct pblk *pblk, struct nvm_rq *rqd);
dd2a4343 736int pblk_submit_meta_io(struct pblk *pblk, struct pblk_line *meta_line);
a4bd217b
JG
737struct bio *pblk_bio_map_addr(struct pblk *pblk, void *data,
738 unsigned int nr_secs, unsigned int len,
de54e703 739 int alloc_type, gfp_t gfp_mask);
a4bd217b
JG
740struct pblk_line *pblk_line_get(struct pblk *pblk);
741struct pblk_line *pblk_line_get_first_data(struct pblk *pblk);
21d22871 742struct pblk_line *pblk_line_replace_data(struct pblk *pblk);
a4bd217b
JG
743int pblk_line_recov_alloc(struct pblk *pblk, struct pblk_line *line);
744void pblk_line_recov_close(struct pblk *pblk, struct pblk_line *line);
745struct pblk_line *pblk_line_get_data(struct pblk *pblk);
d624f371 746struct pblk_line *pblk_line_get_erase(struct pblk *pblk);
a4bd217b
JG
747int pblk_line_erase(struct pblk *pblk, struct pblk_line *line);
748int pblk_line_is_full(struct pblk_line *line);
749void pblk_line_free(struct pblk *pblk, struct pblk_line *line);
dd2a4343 750void pblk_line_close_meta(struct pblk *pblk, struct pblk_line *line);
a4bd217b 751void pblk_line_close(struct pblk *pblk, struct pblk_line *line);
dd2a4343 752void pblk_line_close_ws(struct work_struct *work);
588726d3 753void pblk_pipeline_stop(struct pblk *pblk);
b84ae4a8
JG
754void pblk_gen_run_ws(struct pblk *pblk, struct pblk_line *line, void *priv,
755 void (*work)(struct work_struct *), gfp_t gfp_mask,
756 struct workqueue_struct *wq);
a4bd217b
JG
757u64 pblk_line_smeta_start(struct pblk *pblk, struct pblk_line *line);
758int pblk_line_read_smeta(struct pblk *pblk, struct pblk_line *line);
dd2a4343
JG
759int pblk_line_read_emeta(struct pblk *pblk, struct pblk_line *line,
760 void *emeta_buf);
a4bd217b
JG
761int pblk_blk_erase_async(struct pblk *pblk, struct ppa_addr erase_ppa);
762void pblk_line_put(struct kref *ref);
7bd4d370 763void pblk_line_put_wq(struct kref *ref);
a4bd217b 764struct list_head *pblk_line_gc_list(struct pblk *pblk, struct pblk_line *line);
dd2a4343
JG
765u64 pblk_lookup_page(struct pblk *pblk, struct pblk_line *line);
766void pblk_dealloc_page(struct pblk *pblk, struct pblk_line *line, int nr_secs);
a4bd217b 767u64 pblk_alloc_page(struct pblk *pblk, struct pblk_line *line, int nr_secs);
dd2a4343 768u64 __pblk_alloc_page(struct pblk *pblk, struct pblk_line *line, int nr_secs);
a4bd217b
JG
769int pblk_calc_secs(struct pblk *pblk, unsigned long secs_avail,
770 unsigned long secs_to_flush);
3eaa11e2 771void pblk_up_page(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas);
a4bd217b
JG
772void pblk_down_rq(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas,
773 unsigned long *lun_bitmap);
3eaa11e2 774void pblk_down_page(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas);
a4bd217b
JG
775void pblk_up_rq(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas,
776 unsigned long *lun_bitmap);
a4bd217b
JG
777void pblk_end_io_sync(struct nvm_rq *rqd);
778int pblk_bio_add_pages(struct pblk *pblk, struct bio *bio, gfp_t flags,
779 int nr_pages);
a4bd217b
JG
780void pblk_bio_free_pages(struct pblk *pblk, struct bio *bio, int off,
781 int nr_pages);
782void pblk_map_invalidate(struct pblk *pblk, struct ppa_addr ppa);
0880a9aa
JG
783void __pblk_map_invalidate(struct pblk *pblk, struct pblk_line *line,
784 u64 paddr);
a4bd217b
JG
785void pblk_update_map(struct pblk *pblk, sector_t lba, struct ppa_addr ppa);
786void pblk_update_map_cache(struct pblk *pblk, sector_t lba,
787 struct ppa_addr ppa);
788void pblk_update_map_dev(struct pblk *pblk, sector_t lba,
789 struct ppa_addr ppa, struct ppa_addr entry_line);
790int pblk_update_map_gc(struct pblk *pblk, sector_t lba, struct ppa_addr ppa,
d340121e 791 struct pblk_line *gc_line, u64 paddr);
a4bd217b
JG
792void pblk_lookup_l2p_rand(struct pblk *pblk, struct ppa_addr *ppas,
793 u64 *lba_list, int nr_secs);
794void pblk_lookup_l2p_seq(struct pblk *pblk, struct ppa_addr *ppas,
795 sector_t blba, int nr_secs);
796
797/*
798 * pblk user I/O write path
799 */
800int pblk_write_to_cache(struct pblk *pblk, struct bio *bio,
801 unsigned long flags);
d340121e 802int pblk_write_gc_to_cache(struct pblk *pblk, struct pblk_gc_rq *gc_rq);
a4bd217b
JG
803
804/*
805 * pblk map
806 */
807void pblk_map_erase_rq(struct pblk *pblk, struct nvm_rq *rqd,
808 unsigned int sentry, unsigned long *lun_bitmap,
809 unsigned int valid_secs, struct ppa_addr *erase_ppa);
810void pblk_map_rq(struct pblk *pblk, struct nvm_rq *rqd, unsigned int sentry,
811 unsigned long *lun_bitmap, unsigned int valid_secs,
812 unsigned int off);
813
814/*
815 * pblk write thread
816 */
817int pblk_write_ts(void *data);
87c1d2d3 818void pblk_write_timer_fn(struct timer_list *t);
a4bd217b
JG
819void pblk_write_should_kick(struct pblk *pblk);
820
821/*
822 * pblk read path
823 */
b25d5237 824extern struct bio_set *pblk_bio_set;
a4bd217b 825int pblk_submit_read(struct pblk *pblk, struct bio *bio);
d340121e 826int pblk_submit_read_gc(struct pblk *pblk, struct pblk_gc_rq *gc_rq);
a4bd217b
JG
827/*
828 * pblk recovery
829 */
830void pblk_submit_rec(struct work_struct *work);
831struct pblk_line *pblk_recov_l2p(struct pblk *pblk);
588726d3 832int pblk_recov_pad(struct pblk *pblk);
06bc072b 833int pblk_recov_check_emeta(struct pblk *pblk, struct line_emeta *emeta);
a4bd217b
JG
834int pblk_recov_setup_rq(struct pblk *pblk, struct pblk_c_ctx *c_ctx,
835 struct pblk_rec_ctx *recovery, u64 *comp_bits,
836 unsigned int comp);
837
838/*
839 * pblk gc
840 */
b20ba1bc 841#define PBLK_GC_MAX_READERS 8 /* Max number of outstanding GC reader jobs */
3627896a 842#define PBLK_GC_RQ_QD 128 /* Queue depth for inflight GC requests */
b20ba1bc
JG
843#define PBLK_GC_L_QD 4 /* Queue depth for inflight GC lines */
844#define PBLK_GC_RSV_LINE 1 /* Reserved lines for GC */
a4bd217b
JG
845
846int pblk_gc_init(struct pblk *pblk);
847void pblk_gc_exit(struct pblk *pblk);
848void pblk_gc_should_start(struct pblk *pblk);
849void pblk_gc_should_stop(struct pblk *pblk);
03661b5f 850void pblk_gc_should_kick(struct pblk *pblk);
37ce33d5 851void pblk_gc_free_full_lines(struct pblk *pblk);
a4bd217b
JG
852void pblk_gc_sysfs_state_show(struct pblk *pblk, int *gc_enabled,
853 int *gc_active);
b20ba1bc 854int pblk_gc_sysfs_force(struct pblk *pblk, int force);
a4bd217b
JG
855
856/*
857 * pblk rate limiter
858 */
859void pblk_rl_init(struct pblk_rl *rl, int budget);
860void pblk_rl_free(struct pblk_rl *rl);
03661b5f 861void pblk_rl_update_rates(struct pblk_rl *rl);
b20ba1bc 862int pblk_rl_high_thrs(struct pblk_rl *rl);
a4bd217b 863unsigned long pblk_rl_nr_free_blks(struct pblk_rl *rl);
a7689938 864unsigned long pblk_rl_nr_user_free_blks(struct pblk_rl *rl);
a4bd217b 865int pblk_rl_user_may_insert(struct pblk_rl *rl, int nr_entries);
588726d3 866void pblk_rl_inserted(struct pblk_rl *rl, int nr_entries);
a4bd217b
JG
867void pblk_rl_user_in(struct pblk_rl *rl, int nr_entries);
868int pblk_rl_gc_may_insert(struct pblk_rl *rl, int nr_entries);
869void pblk_rl_gc_in(struct pblk_rl *rl, int nr_entries);
870void pblk_rl_out(struct pblk_rl *rl, int nr_user, int nr_gc);
da67e68f 871int pblk_rl_max_io(struct pblk_rl *rl);
a4bd217b 872void pblk_rl_free_lines_inc(struct pblk_rl *rl, struct pblk_line *line);
a7689938
JG
873void pblk_rl_free_lines_dec(struct pblk_rl *rl, struct pblk_line *line,
874 bool used);
588726d3 875int pblk_rl_is_limit(struct pblk_rl *rl);
a4bd217b
JG
876
877/*
878 * pblk sysfs
879 */
880int pblk_sysfs_init(struct gendisk *tdisk);
881void pblk_sysfs_exit(struct gendisk *tdisk);
882
883static inline void *pblk_malloc(size_t size, int type, gfp_t flags)
884{
885 if (type == PBLK_KMALLOC_META)
886 return kmalloc(size, flags);
887 return vmalloc(size);
888}
889
890static inline void pblk_mfree(void *ptr, int type)
891{
892 if (type == PBLK_KMALLOC_META)
893 kfree(ptr);
894 else
895 vfree(ptr);
896}
897
898static inline struct nvm_rq *nvm_rq_from_c_ctx(void *c_ctx)
899{
900 return c_ctx - sizeof(struct nvm_rq);
901}
902
dd2a4343
JG
903static inline void *emeta_to_bb(struct line_emeta *emeta)
904{
905 return emeta->bb_bitmap;
906}
907
76758390
HH
908static inline void *emeta_to_wa(struct pblk_line_meta *lm,
909 struct line_emeta *emeta)
910{
911 return emeta->bb_bitmap + lm->blk_bitmap_len;
912}
913
dd2a4343
JG
914static inline void *emeta_to_lbas(struct pblk *pblk, struct line_emeta *emeta)
915{
916 return ((void *)emeta + pblk->lm.emeta_len[1]);
917}
918
919static inline void *emeta_to_vsc(struct pblk *pblk, struct line_emeta *emeta)
a4bd217b 920{
dd2a4343 921 return (emeta_to_lbas(pblk, emeta) + pblk->lm.emeta_len[2]);
a4bd217b
JG
922}
923
b20ba1bc
JG
924static inline int pblk_line_vsc(struct pblk_line *line)
925{
d340121e 926 return le32_to_cpu(*line->vsc);
b20ba1bc
JG
927}
928
a4bd217b
JG
929static inline int pblk_pad_distance(struct pblk *pblk)
930{
931 struct nvm_tgt_dev *dev = pblk->dev;
932 struct nvm_geo *geo = &dev->geo;
933
e46f4e48 934 return geo->mw_cunits * geo->all_luns * geo->ws_opt;
a4bd217b
JG
935}
936
b1bcfda1 937static inline int pblk_ppa_to_line(struct ppa_addr p)
a4bd217b
JG
938{
939 return p.g.blk;
940}
941
b1bcfda1 942static inline int pblk_ppa_to_pos(struct nvm_geo *geo, struct ppa_addr p)
a4bd217b 943{
a40afad9 944 return p.g.lun * geo->num_ch + p.g.ch;
a4bd217b
JG
945}
946
b1bcfda1
JG
947static inline struct ppa_addr addr_to_gen_ppa(struct pblk *pblk, u64 paddr,
948 u64 line_id)
a4bd217b 949{
e46f4e48 950 struct nvm_addrf_12 *ppaf = (struct nvm_addrf_12 *)&pblk->ppaf;
b1bcfda1
JG
951 struct ppa_addr ppa;
952
953 ppa.ppa = 0;
954 ppa.g.blk = line_id;
e46f4e48
JG
955 ppa.g.pg = (paddr & ppaf->pg_mask) >> ppaf->pg_offset;
956 ppa.g.lun = (paddr & ppaf->lun_mask) >> ppaf->lun_offset;
957 ppa.g.ch = (paddr & ppaf->ch_mask) >> ppaf->ch_offset;
958 ppa.g.pl = (paddr & ppaf->pln_mask) >> ppaf->pln_offset;
a40afad9 959 ppa.g.sec = (paddr & ppaf->sec_mask) >> ppaf->sec_offset;
b1bcfda1
JG
960
961 return ppa;
a4bd217b
JG
962}
963
b1bcfda1
JG
964static inline u64 pblk_dev_ppa_to_line_addr(struct pblk *pblk,
965 struct ppa_addr p)
a4bd217b 966{
e46f4e48 967 struct nvm_addrf_12 *ppaf = (struct nvm_addrf_12 *)&pblk->ppaf;
b1bcfda1
JG
968 u64 paddr;
969
e46f4e48
JG
970 paddr = (u64)p.g.ch << ppaf->ch_offset;
971 paddr |= (u64)p.g.lun << ppaf->lun_offset;
972 paddr |= (u64)p.g.pg << ppaf->pg_offset;
973 paddr |= (u64)p.g.pl << ppaf->pln_offset;
a40afad9 974 paddr |= (u64)p.g.sec << ppaf->sec_offset;
b1bcfda1
JG
975
976 return paddr;
a4bd217b
JG
977}
978
979static inline struct ppa_addr pblk_ppa32_to_ppa64(struct pblk *pblk, u32 ppa32)
980{
981 struct ppa_addr ppa64;
982
983 ppa64.ppa = 0;
984
985 if (ppa32 == -1) {
986 ppa64.ppa = ADDR_EMPTY;
987 } else if (ppa32 & (1U << 31)) {
988 ppa64.c.line = ppa32 & ((~0U) >> 1);
989 ppa64.c.is_cached = 1;
990 } else {
e46f4e48
JG
991 struct nvm_addrf_12 *ppaf = (struct nvm_addrf_12 *)&pblk->ppaf;
992
993 ppa64.g.ch = (ppa32 & ppaf->ch_mask) >> ppaf->ch_offset;
994 ppa64.g.lun = (ppa32 & ppaf->lun_mask) >> ppaf->lun_offset;
995 ppa64.g.blk = (ppa32 & ppaf->blk_mask) >> ppaf->blk_offset;
996 ppa64.g.pg = (ppa32 & ppaf->pg_mask) >> ppaf->pg_offset;
997 ppa64.g.pl = (ppa32 & ppaf->pln_mask) >> ppaf->pln_offset;
a40afad9 998 ppa64.g.sec = (ppa32 & ppaf->sec_mask) >> ppaf->sec_offset;
a4bd217b
JG
999 }
1000
1001 return ppa64;
1002}
1003
a4bd217b
JG
1004static inline u32 pblk_ppa64_to_ppa32(struct pblk *pblk, struct ppa_addr ppa64)
1005{
1006 u32 ppa32 = 0;
1007
1008 if (ppa64.ppa == ADDR_EMPTY) {
1009 ppa32 = ~0U;
1010 } else if (ppa64.c.is_cached) {
1011 ppa32 |= ppa64.c.line;
1012 ppa32 |= 1U << 31;
1013 } else {
e46f4e48
JG
1014 struct nvm_addrf_12 *ppaf = (struct nvm_addrf_12 *)&pblk->ppaf;
1015
1016 ppa32 |= ppa64.g.ch << ppaf->ch_offset;
1017 ppa32 |= ppa64.g.lun << ppaf->lun_offset;
1018 ppa32 |= ppa64.g.blk << ppaf->blk_offset;
1019 ppa32 |= ppa64.g.pg << ppaf->pg_offset;
1020 ppa32 |= ppa64.g.pl << ppaf->pln_offset;
a40afad9 1021 ppa32 |= ppa64.g.sec << ppaf->sec_offset;
a4bd217b
JG
1022 }
1023
1024 return ppa32;
1025}
1026
b1bcfda1
JG
1027static inline struct ppa_addr pblk_trans_map_get(struct pblk *pblk,
1028 sector_t lba)
a4bd217b 1029{
b1bcfda1
JG
1030 struct ppa_addr ppa;
1031
a4bd217b
JG
1032 if (pblk->ppaf_bitsize < 32) {
1033 u32 *map = (u32 *)pblk->trans_map;
1034
b1bcfda1 1035 ppa = pblk_ppa32_to_ppa64(pblk, map[lba]);
a4bd217b 1036 } else {
b1bcfda1 1037 struct ppa_addr *map = (struct ppa_addr *)pblk->trans_map;
a4bd217b 1038
b1bcfda1 1039 ppa = map[lba];
a4bd217b 1040 }
b1bcfda1
JG
1041
1042 return ppa;
a4bd217b
JG
1043}
1044
b1bcfda1
JG
1045static inline void pblk_trans_map_set(struct pblk *pblk, sector_t lba,
1046 struct ppa_addr ppa)
a4bd217b 1047{
b1bcfda1
JG
1048 if (pblk->ppaf_bitsize < 32) {
1049 u32 *map = (u32 *)pblk->trans_map;
a4bd217b 1050
b1bcfda1
JG
1051 map[lba] = pblk_ppa64_to_ppa32(pblk, ppa);
1052 } else {
1053 u64 *map = (u64 *)pblk->trans_map;
a4bd217b 1054
b1bcfda1
JG
1055 map[lba] = ppa.ppa;
1056 }
a4bd217b
JG
1057}
1058
1059static inline int pblk_ppa_empty(struct ppa_addr ppa_addr)
1060{
1061 return (ppa_addr.ppa == ADDR_EMPTY);
1062}
1063
1064static inline void pblk_ppa_set_empty(struct ppa_addr *ppa_addr)
1065{
1066 ppa_addr->ppa = ADDR_EMPTY;
1067}
1068
07698466
JG
1069static inline bool pblk_ppa_comp(struct ppa_addr lppa, struct ppa_addr rppa)
1070{
8b7bc849 1071 return (lppa.ppa == rppa.ppa);
07698466
JG
1072}
1073
a4bd217b
JG
1074static inline int pblk_addr_in_cache(struct ppa_addr ppa)
1075{
1076 return (ppa.ppa != ADDR_EMPTY && ppa.c.is_cached);
1077}
1078
1079static inline int pblk_addr_to_cacheline(struct ppa_addr ppa)
1080{
1081 return ppa.c.line;
1082}
1083
1084static inline struct ppa_addr pblk_cacheline_to_addr(int addr)
1085{
1086 struct ppa_addr p;
1087
1088 p.c.line = addr;
1089 p.c.is_cached = 1;
1090
1091 return p;
1092}
1093
a4bd217b 1094static inline u32 pblk_calc_meta_header_crc(struct pblk *pblk,
dd2a4343 1095 struct line_header *header)
a4bd217b
JG
1096{
1097 u32 crc = ~(u32)0;
1098
dd2a4343 1099 crc = crc32_le(crc, (unsigned char *)header + sizeof(crc),
a4bd217b
JG
1100 sizeof(struct line_header) - sizeof(crc));
1101
1102 return crc;
1103}
1104
1105static inline u32 pblk_calc_smeta_crc(struct pblk *pblk,
1106 struct line_smeta *smeta)
1107{
1108 struct pblk_line_meta *lm = &pblk->lm;
1109 u32 crc = ~(u32)0;
1110
1111 crc = crc32_le(crc, (unsigned char *)smeta +
1112 sizeof(struct line_header) + sizeof(crc),
1113 lm->smeta_len -
1114 sizeof(struct line_header) - sizeof(crc));
1115
1116 return crc;
1117}
1118
1119static inline u32 pblk_calc_emeta_crc(struct pblk *pblk,
1120 struct line_emeta *emeta)
1121{
1122 struct pblk_line_meta *lm = &pblk->lm;
1123 u32 crc = ~(u32)0;
1124
1125 crc = crc32_le(crc, (unsigned char *)emeta +
1126 sizeof(struct line_header) + sizeof(crc),
dd2a4343 1127 lm->emeta_len[0] -
a4bd217b
JG
1128 sizeof(struct line_header) - sizeof(crc));
1129
1130 return crc;
1131}
1132
1133static inline int pblk_set_progr_mode(struct pblk *pblk, int type)
1134{
1135 struct nvm_tgt_dev *dev = pblk->dev;
1136 struct nvm_geo *geo = &dev->geo;
1137 int flags;
1138
a40afad9 1139 flags = geo->pln_mode >> 1;
a4bd217b 1140
e2cddf20 1141 if (type == PBLK_WRITE)
a4bd217b
JG
1142 flags |= NVM_IO_SCRAMBLE_ENABLE;
1143
1144 return flags;
1145}
1146
f9c10152
JG
1147enum {
1148 PBLK_READ_RANDOM = 0,
1149 PBLK_READ_SEQUENTIAL = 1,
1150};
1151
1152static inline int pblk_set_read_mode(struct pblk *pblk, int type)
1153{
1154 struct nvm_tgt_dev *dev = pblk->dev;
1155 struct nvm_geo *geo = &dev->geo;
1156 int flags;
1157
1158 flags = NVM_IO_SUSPEND | NVM_IO_SCRAMBLE_ENABLE;
1159 if (type == PBLK_READ_SEQUENTIAL)
a40afad9 1160 flags |= geo->pln_mode >> 1;
f9c10152
JG
1161
1162 return flags;
1163}
1164
1165static inline int pblk_io_aligned(struct pblk *pblk, int nr_secs)
a4bd217b 1166{
f9c10152 1167 return !(nr_secs % pblk->min_write_pgs);
a4bd217b
JG
1168}
1169
1170#ifdef CONFIG_NVM_DEBUG
1171static inline void print_ppa(struct ppa_addr *p, char *msg, int error)
1172{
1173 if (p->c.is_cached) {
1174 pr_err("ppa: (%s: %x) cache line: %llu\n",
1175 msg, error, (u64)p->c.line);
1176 } else {
1177 pr_err("ppa: (%s: %x):ch:%d,lun:%d,blk:%d,pg:%d,pl:%d,sec:%d\n",
1178 msg, error,
1179 p->g.ch, p->g.lun, p->g.blk,
1180 p->g.pg, p->g.pl, p->g.sec);
1181 }
1182}
1183
1184static inline void pblk_print_failed_rqd(struct pblk *pblk, struct nvm_rq *rqd,
1185 int error)
1186{
1187 int bit = -1;
1188
1189 if (rqd->nr_ppas == 1) {
1190 print_ppa(&rqd->ppa_addr, "rqd", error);
1191 return;
1192 }
1193
1194 while ((bit = find_next_bit((void *)&rqd->ppa_status, rqd->nr_ppas,
1195 bit + 1)) < rqd->nr_ppas) {
1196 print_ppa(&rqd->ppa_list[bit], "rqd", error);
1197 }
1198
1199 pr_err("error:%d, ppa_status:%llx\n", error, rqd->ppa_status);
1200}
a4bd217b
JG
1201
1202static inline int pblk_boundary_ppa_checks(struct nvm_tgt_dev *tgt_dev,
1203 struct ppa_addr *ppas, int nr_ppas)
1204{
1205 struct nvm_geo *geo = &tgt_dev->geo;
1206 struct ppa_addr *ppa;
1207 int i;
1208
1209 for (i = 0; i < nr_ppas; i++) {
1210 ppa = &ppas[i];
1211
1212 if (!ppa->c.is_cached &&
a40afad9
JG
1213 ppa->g.ch < geo->num_ch &&
1214 ppa->g.lun < geo->num_lun &&
e46f4e48 1215 ppa->g.pl < geo->num_pln &&
a40afad9 1216 ppa->g.blk < geo->num_chk &&
e46f4e48
JG
1217 ppa->g.pg < geo->num_pg &&
1218 ppa->g.sec < geo->ws_min)
a4bd217b
JG
1219 continue;
1220
a4bd217b 1221 print_ppa(ppa, "boundary", i);
1a94b2d4 1222
a4bd217b
JG
1223 return 1;
1224 }
1225 return 0;
1226}
1227
1a94b2d4
JG
1228static inline int pblk_check_io(struct pblk *pblk, struct nvm_rq *rqd)
1229{
1230 struct nvm_tgt_dev *dev = pblk->dev;
1231 struct ppa_addr *ppa_list;
1232
1233 ppa_list = (rqd->nr_ppas > 1) ? rqd->ppa_list : &rqd->ppa_addr;
1234
1235 if (pblk_boundary_ppa_checks(dev, ppa_list, rqd->nr_ppas)) {
1236 WARN_ON(1);
1237 return -EINVAL;
1238 }
1239
1240 if (rqd->opcode == NVM_OP_PWRITE) {
1241 struct pblk_line *line;
1242 struct ppa_addr ppa;
1243 int i;
1244
1245 for (i = 0; i < rqd->nr_ppas; i++) {
1246 ppa = ppa_list[i];
b1bcfda1 1247 line = &pblk->lines[pblk_ppa_to_line(ppa)];
1a94b2d4
JG
1248
1249 spin_lock(&line->lock);
1250 if (line->state != PBLK_LINESTATE_OPEN) {
1251 pr_err("pblk: bad ppa: line:%d,state:%d\n",
1252 line->id, line->state);
1253 WARN_ON(1);
1254 spin_unlock(&line->lock);
1255 return -EINVAL;
1256 }
1257 spin_unlock(&line->lock);
1258 }
1259 }
1260
1261 return 0;
1262}
1263#endif
1264
a4bd217b
JG
1265static inline int pblk_boundary_paddr_checks(struct pblk *pblk, u64 paddr)
1266{
1267 struct pblk_line_meta *lm = &pblk->lm;
1268
1269 if (paddr > lm->sec_per_line)
1270 return 1;
1271
1272 return 0;
1273}
1274
1275static inline unsigned int pblk_get_bi_idx(struct bio *bio)
1276{
1277 return bio->bi_iter.bi_idx;
1278}
1279
1280static inline sector_t pblk_get_lba(struct bio *bio)
1281{
1282 return bio->bi_iter.bi_sector / NR_PHY_IN_LOG;
1283}
1284
1285static inline unsigned int pblk_get_secs(struct bio *bio)
1286{
1287 return bio->bi_iter.bi_size / PBLK_EXPOSED_PAGE_SIZE;
1288}
1289
a4bd217b
JG
1290static inline void pblk_setup_uuid(struct pblk *pblk)
1291{
1292 uuid_le uuid;
1293
1294 uuid_le_gen(&uuid);
1295 memcpy(pblk->instance_uuid, uuid.b, 16);
1296}
1297#endif /* PBLK_H_ */