1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * vvvvvvvvvvvvvvvvvvvvvvv Original vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
4 * Copyright (C) 1992 Eric Youngdale
5 * Simulate a host adapter with 2 disks attached. Do a lot of checking
6 * to make sure that we are not getting blocks mixed up, and PANIC if
7 * anything out of the ordinary is seen.
8 * ^^^^^^^^^^^^^^^^^^^^^^^ Original ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
10 * Copyright (C) 2001 - 2021 Douglas Gilbert
12 * For documentation see http://sg.danny.cz/sg/scsi_debug.html
16 #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
18 #include <linux/module.h>
19 #include <linux/align.h>
20 #include <linux/kernel.h>
21 #include <linux/errno.h>
22 #include <linux/jiffies.h>
23 #include <linux/slab.h>
24 #include <linux/types.h>
25 #include <linux/string.h>
27 #include <linux/init.h>
28 #include <linux/proc_fs.h>
29 #include <linux/vmalloc.h>
30 #include <linux/moduleparam.h>
31 #include <linux/scatterlist.h>
32 #include <linux/blkdev.h>
33 #include <linux/crc-t10dif.h>
34 #include <linux/spinlock.h>
35 #include <linux/interrupt.h>
36 #include <linux/atomic.h>
37 #include <linux/hrtimer.h>
38 #include <linux/uuid.h>
39 #include <linux/t10-pi.h>
40 #include <linux/msdos_partition.h>
41 #include <linux/random.h>
42 #include <linux/xarray.h>
43 #include <linux/prefetch.h>
44 #include <linux/debugfs.h>
45 #include <linux/async.h>
46 #include <linux/cleanup.h>
48 #include <net/checksum.h>
50 #include <asm/unaligned.h>
52 #include <scsi/scsi.h>
53 #include <scsi/scsi_cmnd.h>
54 #include <scsi/scsi_device.h>
55 #include <scsi/scsi_host.h>
56 #include <scsi/scsicam.h>
57 #include <scsi/scsi_eh.h>
58 #include <scsi/scsi_tcq.h>
59 #include <scsi/scsi_dbg.h>
62 #include "scsi_logging.h"
64 /* make sure inq_product_rev string corresponds to this version */
65 #define SDEBUG_VERSION "0191" /* format to fit INQUIRY revision field */
66 static const char *sdebug_version_date
= "20210520";
68 #define MY_NAME "scsi_debug"
70 /* Additional Sense Code (ASC) */
71 #define NO_ADDITIONAL_SENSE 0x0
72 #define LOGICAL_UNIT_NOT_READY 0x4
73 #define LOGICAL_UNIT_COMMUNICATION_FAILURE 0x8
74 #define UNRECOVERED_READ_ERR 0x11
75 #define PARAMETER_LIST_LENGTH_ERR 0x1a
76 #define INVALID_OPCODE 0x20
77 #define LBA_OUT_OF_RANGE 0x21
78 #define INVALID_FIELD_IN_CDB 0x24
79 #define INVALID_FIELD_IN_PARAM_LIST 0x26
80 #define WRITE_PROTECTED 0x27
81 #define UA_RESET_ASC 0x29
82 #define UA_CHANGED_ASC 0x2a
83 #define TARGET_CHANGED_ASC 0x3f
84 #define LUNS_CHANGED_ASCQ 0x0e
85 #define INSUFF_RES_ASC 0x55
86 #define INSUFF_RES_ASCQ 0x3
87 #define POWER_ON_RESET_ASCQ 0x0
88 #define POWER_ON_OCCURRED_ASCQ 0x1
89 #define BUS_RESET_ASCQ 0x2 /* scsi bus reset occurred */
90 #define MODE_CHANGED_ASCQ 0x1 /* mode parameters changed */
91 #define CAPACITY_CHANGED_ASCQ 0x9
92 #define SAVING_PARAMS_UNSUP 0x39
93 #define TRANSPORT_PROBLEM 0x4b
94 #define THRESHOLD_EXCEEDED 0x5d
95 #define LOW_POWER_COND_ON 0x5e
96 #define MISCOMPARE_VERIFY_ASC 0x1d
97 #define MICROCODE_CHANGED_ASCQ 0x1 /* with TARGET_CHANGED_ASC */
98 #define MICROCODE_CHANGED_WO_RESET_ASCQ 0x16
99 #define WRITE_ERROR_ASC 0xc
100 #define UNALIGNED_WRITE_ASCQ 0x4
101 #define WRITE_BOUNDARY_ASCQ 0x5
102 #define READ_INVDATA_ASCQ 0x6
103 #define READ_BOUNDARY_ASCQ 0x7
104 #define ATTEMPT_ACCESS_GAP 0x9
105 #define INSUFF_ZONE_ASCQ 0xe
107 /* Additional Sense Code Qualifier (ASCQ) */
108 #define ACK_NAK_TO 0x3
110 /* Default values for driver parameters */
111 #define DEF_NUM_HOST 1
112 #define DEF_NUM_TGTS 1
113 #define DEF_MAX_LUNS 1
114 /* With these defaults, this driver will make 1 host with 1 target
115 * (id 0) containing 1 logical unit (lun 0). That is 1 device.
118 #define DEF_CDB_LEN 10
119 #define DEF_JDELAY 1 /* if > 0 unit is a jiffy */
120 #define DEF_DEV_SIZE_PRE_INIT 0
121 #define DEF_DEV_SIZE_MB 8
122 #define DEF_ZBC_DEV_SIZE_MB 128
125 #define DEF_PER_HOST_STORE false
126 #define DEF_D_SENSE 0
127 #define DEF_EVERY_NTH 0
128 #define DEF_FAKE_RW 0
130 #define DEF_HOST_LOCK 0
133 #define DEF_LBPWS10 0
135 #define DEF_LOWEST_ALIGNED 0
136 #define DEF_NDELAY 0 /* if > 0 unit is a nanosecond */
137 #define DEF_NO_LUN_0 0
138 #define DEF_NUM_PARTS 0
140 #define DEF_OPT_BLKS 1024
141 #define DEF_PHYSBLK_EXP 0
142 #define DEF_OPT_XFERLEN_EXP 0
143 #define DEF_PTYPE TYPE_DISK
144 #define DEF_RANDOM false
145 #define DEF_REMOVABLE false
146 #define DEF_SCSI_LEVEL 7 /* INQUIRY, byte2 [6->SPC-4; 7->SPC-5] */
147 #define DEF_SECTOR_SIZE 512
148 #define DEF_UNMAP_ALIGNMENT 0
149 #define DEF_UNMAP_GRANULARITY 1
150 #define DEF_UNMAP_MAX_BLOCKS 0xFFFFFFFF
151 #define DEF_UNMAP_MAX_DESC 256
152 #define DEF_VIRTUAL_GB 0
153 #define DEF_VPD_USE_HOSTNO 1
154 #define DEF_WRITESAME_LENGTH 0xFFFF
156 #define DEF_STATISTICS false
157 #define DEF_SUBMIT_QUEUES 1
158 #define DEF_TUR_MS_TO_READY 0
159 #define DEF_UUID_CTL 0
160 #define JDELAY_OVERRIDDEN -9999
162 /* Default parameters for ZBC drives */
163 #define DEF_ZBC_ZONE_SIZE_MB 128
164 #define DEF_ZBC_MAX_OPEN_ZONES 8
165 #define DEF_ZBC_NR_CONV_ZONES 1
167 #define SDEBUG_LUN_0_VAL 0
169 /* bit mask values for sdebug_opts */
170 #define SDEBUG_OPT_NOISE 1
171 #define SDEBUG_OPT_MEDIUM_ERR 2
172 #define SDEBUG_OPT_TIMEOUT 4
173 #define SDEBUG_OPT_RECOVERED_ERR 8
174 #define SDEBUG_OPT_TRANSPORT_ERR 16
175 #define SDEBUG_OPT_DIF_ERR 32
176 #define SDEBUG_OPT_DIX_ERR 64
177 #define SDEBUG_OPT_MAC_TIMEOUT 128
178 #define SDEBUG_OPT_SHORT_TRANSFER 0x100
179 #define SDEBUG_OPT_Q_NOISE 0x200
180 #define SDEBUG_OPT_ALL_TSF 0x400 /* ignore */
181 #define SDEBUG_OPT_RARE_TSF 0x800
182 #define SDEBUG_OPT_N_WCE 0x1000
183 #define SDEBUG_OPT_RESET_NOISE 0x2000
184 #define SDEBUG_OPT_NO_CDB_NOISE 0x4000
185 #define SDEBUG_OPT_HOST_BUSY 0x8000
186 #define SDEBUG_OPT_CMD_ABORT 0x10000
187 #define SDEBUG_OPT_ALL_NOISE (SDEBUG_OPT_NOISE | SDEBUG_OPT_Q_NOISE | \
188 SDEBUG_OPT_RESET_NOISE)
189 #define SDEBUG_OPT_ALL_INJECTING (SDEBUG_OPT_RECOVERED_ERR | \
190 SDEBUG_OPT_TRANSPORT_ERR | \
191 SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR | \
192 SDEBUG_OPT_SHORT_TRANSFER | \
193 SDEBUG_OPT_HOST_BUSY | \
194 SDEBUG_OPT_CMD_ABORT)
195 #define SDEBUG_OPT_RECOV_DIF_DIX (SDEBUG_OPT_RECOVERED_ERR | \
196 SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR)
198 /* As indicated in SAM-5 and SPC-4 Unit Attentions (UAs) are returned in
199 * priority order. In the subset implemented here lower numbers have higher
200 * priority. The UA numbers should be a sequence starting from 0 with
201 * SDEBUG_NUM_UAS being 1 higher than the highest numbered UA. */
202 #define SDEBUG_UA_POR 0 /* Power on, reset, or bus device reset */
203 #define SDEBUG_UA_POOCCUR 1 /* Power on occurred */
204 #define SDEBUG_UA_BUS_RESET 2
205 #define SDEBUG_UA_MODE_CHANGED 3
206 #define SDEBUG_UA_CAPACITY_CHANGED 4
207 #define SDEBUG_UA_LUNS_CHANGED 5
208 #define SDEBUG_UA_MICROCODE_CHANGED 6 /* simulate firmware change */
209 #define SDEBUG_UA_MICROCODE_CHANGED_WO_RESET 7
210 #define SDEBUG_NUM_UAS 8
212 /* when 1==SDEBUG_OPT_MEDIUM_ERR, a medium error is simulated at this
213 * sector on read commands: */
214 #define OPT_MEDIUM_ERR_ADDR 0x1234 /* that's sector 4660 in decimal */
215 #define OPT_MEDIUM_ERR_NUM 10 /* number of consecutive medium errs */
217 /* SDEBUG_CANQUEUE is the maximum number of commands that can be queued
218 * (for response) per submit queue at one time. Can be reduced by max_queue
219 * option. Command responses are not queued when jdelay=0 and ndelay=0. The
220 * per-device DEF_CMD_PER_LUN can be changed via sysfs:
221 * /sys/class/scsi_device/<h:c:t:l>/device/queue_depth
222 * but cannot exceed SDEBUG_CANQUEUE .
224 #define SDEBUG_CANQUEUE_WORDS 3 /* a WORD is bits in a long */
225 #define SDEBUG_CANQUEUE (SDEBUG_CANQUEUE_WORDS * BITS_PER_LONG)
226 #define DEF_CMD_PER_LUN SDEBUG_CANQUEUE
228 /* UA - Unit Attention; SA - Service Action; SSU - Start Stop Unit */
229 #define F_D_IN 1 /* Data-in command (e.g. READ) */
230 #define F_D_OUT 2 /* Data-out command (e.g. WRITE) */
231 #define F_D_OUT_MAYBE 4 /* WRITE SAME, NDOB bit */
233 #define F_RL_WLUN_OK 0x10 /* allowed with REPORT LUNS W-LUN */
234 #define F_SKIP_UA 0x20 /* bypass UAs (e.g. INQUIRY command) */
235 #define F_DELAY_OVERR 0x40 /* for commands like INQUIRY */
236 #define F_SA_LOW 0x80 /* SA is in cdb byte 1, bits 4 to 0 */
237 #define F_SA_HIGH 0x100 /* SA is in cdb bytes 8 and 9 */
238 #define F_INV_OP 0x200 /* invalid opcode (not supported) */
239 #define F_FAKE_RW 0x400 /* bypass resp_*() when fake_rw set */
240 #define F_M_ACCESS 0x800 /* media access, reacts to SSU state */
241 #define F_SSU_DELAY 0x1000 /* SSU command delay (long-ish) */
242 #define F_SYNC_DELAY 0x2000 /* SYNCHRONIZE CACHE delay */
244 /* Useful combinations of the above flags */
245 #define FF_RESPOND (F_RL_WLUN_OK | F_SKIP_UA | F_DELAY_OVERR)
246 #define FF_MEDIA_IO (F_M_ACCESS | F_FAKE_RW)
247 #define FF_SA (F_SA_HIGH | F_SA_LOW)
248 #define F_LONG_DELAY (F_SSU_DELAY | F_SYNC_DELAY)
250 #define SDEBUG_MAX_PARTS 4
252 #define SDEBUG_MAX_CMD_LEN 32
254 #define SDEB_XA_NOT_IN_USE XA_MARK_1
256 static struct kmem_cache
*queued_cmd_cache
;
258 #define TO_QUEUED_CMD(scmd) ((void *)(scmd)->host_scribble)
259 #define ASSIGN_QUEUED_CMD(scmnd, qc) { (scmnd)->host_scribble = (void *) qc; }
261 /* Zone types (zbcr05 table 25) */
266 /* ZBC_ZTYPE_SOBR = 0x4, */
270 /* enumeration names taken from table 26, zbcr05 */
272 ZBC_NOT_WRITE_POINTER
= 0x0,
274 ZC2_IMPLICIT_OPEN
= 0x2,
275 ZC3_EXPLICIT_OPEN
= 0x3,
282 struct sdeb_zone_state
{ /* ZBC: per zone state */
283 enum sdebug_z_type z_type
;
284 enum sdebug_z_cond z_cond
;
285 bool z_non_seq_resource
;
291 enum sdebug_err_type
{
292 ERR_TMOUT_CMD
= 0, /* make specific scsi command timeout */
293 ERR_FAIL_QUEUE_CMD
= 1, /* make specific scsi command's */
294 /* queuecmd return failed */
295 ERR_FAIL_CMD
= 2, /* make specific scsi command's */
296 /* queuecmd return succeed but */
297 /* with errors set in scsi_cmnd */
298 ERR_ABORT_CMD_FAILED
= 3, /* control return FAILED from */
299 /* scsi_debug_abort() */
300 ERR_LUN_RESET_FAILED
= 4, /* control return FAILED from */
301 /* scsi_debug_device_reseLUN_RESET_FAILEDt() */
304 struct sdebug_err_inject
{
306 struct list_head list
;
313 * For ERR_FAIL_QUEUE_CMD
321 unsigned char host_byte
;
322 unsigned char driver_byte
;
323 unsigned char status_byte
;
324 unsigned char sense_key
;
331 struct sdebug_dev_info
{
332 struct list_head dev_list
;
333 unsigned int channel
;
337 struct sdebug_host_info
*sdbg_host
;
338 unsigned long uas_bm
[1];
339 atomic_t stopped
; /* 1: by SSU, 2: device start */
342 /* For ZBC devices */
346 unsigned int zsize_shift
;
347 unsigned int nr_zones
;
348 unsigned int nr_conv_zones
;
349 unsigned int nr_seq_zones
;
350 unsigned int nr_imp_open
;
351 unsigned int nr_exp_open
;
352 unsigned int nr_closed
;
353 unsigned int max_open
;
354 ktime_t create_ts
; /* time since bootup that this device was created */
355 struct sdeb_zone_state
*zstate
;
357 struct dentry
*debugfs_entry
;
358 struct spinlock list_lock
;
359 struct list_head inject_err_list
;
362 struct sdebug_target_info
{
364 struct dentry
*debugfs_entry
;
367 struct sdebug_host_info
{
368 struct list_head host_list
;
369 int si_idx
; /* sdeb_store_info (per host) xarray index */
370 struct Scsi_Host
*shost
;
372 struct list_head dev_info_list
;
375 /* There is an xarray of pointers to this struct's objects, one per host */
376 struct sdeb_store_info
{
377 rwlock_t macc_lck
; /* for atomic media access on this store */
378 u8
*storep
; /* user data storage (ram) */
379 struct t10_pi_tuple
*dif_storep
; /* protection info */
380 void *map_storep
; /* provisioning map */
383 #define dev_to_sdebug_host(d) \
384 container_of(d, struct sdebug_host_info, dev)
386 #define shost_to_sdebug_host(shost) \
387 dev_to_sdebug_host(shost->dma_dev)
389 enum sdeb_defer_type
{SDEB_DEFER_NONE
= 0, SDEB_DEFER_HRT
= 1,
390 SDEB_DEFER_WQ
= 2, SDEB_DEFER_POLL
= 3};
392 struct sdebug_defer
{
394 struct execute_work ew
;
395 ktime_t cmpl_ts
;/* time since boot to complete this cmd */
397 bool aborted
; /* true when blk_abort_request() already called */
398 enum sdeb_defer_type defer_t
;
401 struct sdebug_queued_cmd
{
402 /* corresponding bit set in in_use_bm[] in owning struct sdebug_queue
403 * instance indicates this slot is in use.
405 struct sdebug_defer sd_dp
;
406 struct scsi_cmnd
*scmd
;
409 struct sdebug_scsi_cmd
{
413 static atomic_t sdebug_cmnd_count
; /* number of incoming commands */
414 static atomic_t sdebug_completions
; /* count of deferred completions */
415 static atomic_t sdebug_miss_cpus
; /* submission + completion cpus differ */
416 static atomic_t sdebug_a_tsf
; /* 'almost task set full' counter */
417 static atomic_t sdeb_inject_pending
;
418 static atomic_t sdeb_mq_poll_count
; /* bumped when mq_poll returns > 0 */
420 struct opcode_info_t
{
421 u8 num_attached
; /* 0 if this is it (i.e. a leaf); use 0xff */
422 /* for terminating element */
423 u8 opcode
; /* if num_attached > 0, preferred */
424 u16 sa
; /* service action */
425 u32 flags
; /* OR-ed set of SDEB_F_* */
426 int (*pfp
)(struct scsi_cmnd
*, struct sdebug_dev_info
*);
427 const struct opcode_info_t
*arrp
; /* num_attached elements or NULL */
428 u8 len_mask
[16]; /* len_mask[0]-->cdb_len, then mask for cdb */
429 /* 1 to min(cdb_len, 15); ignore cdb[15...] */
432 /* SCSI opcodes (first byte of cdb) of interest mapped onto these indexes */
433 enum sdeb_opcode_index
{
434 SDEB_I_INVALID_OPCODE
= 0,
436 SDEB_I_REPORT_LUNS
= 2,
437 SDEB_I_REQUEST_SENSE
= 3,
438 SDEB_I_TEST_UNIT_READY
= 4,
439 SDEB_I_MODE_SENSE
= 5, /* 6, 10 */
440 SDEB_I_MODE_SELECT
= 6, /* 6, 10 */
441 SDEB_I_LOG_SENSE
= 7,
442 SDEB_I_READ_CAPACITY
= 8, /* 10; 16 is in SA_IN(16) */
443 SDEB_I_READ
= 9, /* 6, 10, 12, 16 */
444 SDEB_I_WRITE
= 10, /* 6, 10, 12, 16 */
445 SDEB_I_START_STOP
= 11,
446 SDEB_I_SERV_ACT_IN_16
= 12, /* add ...SERV_ACT_IN_12 if needed */
447 SDEB_I_SERV_ACT_OUT_16
= 13, /* add ...SERV_ACT_OUT_12 if needed */
448 SDEB_I_MAINT_IN
= 14,
449 SDEB_I_MAINT_OUT
= 15,
450 SDEB_I_VERIFY
= 16, /* VERIFY(10), VERIFY(16) */
451 SDEB_I_VARIABLE_LEN
= 17, /* READ(32), WRITE(32), WR_SCAT(32) */
452 SDEB_I_RESERVE
= 18, /* 6, 10 */
453 SDEB_I_RELEASE
= 19, /* 6, 10 */
454 SDEB_I_ALLOW_REMOVAL
= 20, /* PREVENT ALLOW MEDIUM REMOVAL */
455 SDEB_I_REZERO_UNIT
= 21, /* REWIND in SSC */
456 SDEB_I_ATA_PT
= 22, /* 12, 16 */
457 SDEB_I_SEND_DIAG
= 23,
459 SDEB_I_WRITE_BUFFER
= 25,
460 SDEB_I_WRITE_SAME
= 26, /* 10, 16 */
461 SDEB_I_SYNC_CACHE
= 27, /* 10, 16 */
462 SDEB_I_COMP_WRITE
= 28,
463 SDEB_I_PRE_FETCH
= 29, /* 10, 16 */
464 SDEB_I_ZONE_OUT
= 30, /* 0x94+SA; includes no data xfer */
465 SDEB_I_ZONE_IN
= 31, /* 0x95+SA; all have data-in */
466 SDEB_I_LAST_ELEM_P1
= 32, /* keep this last (previous + 1) */
470 static const unsigned char opcode_ind_arr
[256] = {
471 /* 0x0; 0x0->0x1f: 6 byte cdbs */
472 SDEB_I_TEST_UNIT_READY
, SDEB_I_REZERO_UNIT
, 0, SDEB_I_REQUEST_SENSE
,
474 SDEB_I_READ
, 0, SDEB_I_WRITE
, 0, 0, 0, 0, 0,
475 0, 0, SDEB_I_INQUIRY
, 0, 0, SDEB_I_MODE_SELECT
, SDEB_I_RESERVE
,
477 0, 0, SDEB_I_MODE_SENSE
, SDEB_I_START_STOP
, 0, SDEB_I_SEND_DIAG
,
478 SDEB_I_ALLOW_REMOVAL
, 0,
479 /* 0x20; 0x20->0x3f: 10 byte cdbs */
480 0, 0, 0, 0, 0, SDEB_I_READ_CAPACITY
, 0, 0,
481 SDEB_I_READ
, 0, SDEB_I_WRITE
, 0, 0, 0, 0, SDEB_I_VERIFY
,
482 0, 0, 0, 0, SDEB_I_PRE_FETCH
, SDEB_I_SYNC_CACHE
, 0, 0,
483 0, 0, 0, SDEB_I_WRITE_BUFFER
, 0, 0, 0, 0,
484 /* 0x40; 0x40->0x5f: 10 byte cdbs */
485 0, SDEB_I_WRITE_SAME
, SDEB_I_UNMAP
, 0, 0, 0, 0, 0,
486 0, 0, 0, 0, 0, SDEB_I_LOG_SENSE
, 0, 0,
487 0, 0, 0, 0, 0, SDEB_I_MODE_SELECT
, SDEB_I_RESERVE
,
489 0, 0, SDEB_I_MODE_SENSE
, 0, 0, 0, 0, 0,
490 /* 0x60; 0x60->0x7d are reserved, 0x7e is "extended cdb" */
491 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
492 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
493 0, SDEB_I_VARIABLE_LEN
,
494 /* 0x80; 0x80->0x9f: 16 byte cdbs */
495 0, 0, 0, 0, 0, SDEB_I_ATA_PT
, 0, 0,
496 SDEB_I_READ
, SDEB_I_COMP_WRITE
, SDEB_I_WRITE
, 0,
497 0, 0, 0, SDEB_I_VERIFY
,
498 SDEB_I_PRE_FETCH
, SDEB_I_SYNC_CACHE
, 0, SDEB_I_WRITE_SAME
,
499 SDEB_I_ZONE_OUT
, SDEB_I_ZONE_IN
, 0, 0,
500 0, 0, 0, 0, 0, 0, SDEB_I_SERV_ACT_IN_16
, SDEB_I_SERV_ACT_OUT_16
,
501 /* 0xa0; 0xa0->0xbf: 12 byte cdbs */
502 SDEB_I_REPORT_LUNS
, SDEB_I_ATA_PT
, 0, SDEB_I_MAINT_IN
,
503 SDEB_I_MAINT_OUT
, 0, 0, 0,
504 SDEB_I_READ
, 0 /* SDEB_I_SERV_ACT_OUT_12 */, SDEB_I_WRITE
,
505 0 /* SDEB_I_SERV_ACT_IN_12 */, 0, 0, 0, 0,
506 0, 0, 0, 0, 0, 0, 0, 0,
507 0, 0, 0, 0, 0, 0, 0, 0,
508 /* 0xc0; 0xc0->0xff: vendor specific */
509 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
510 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
511 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
512 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
516 * The following "response" functions return the SCSI mid-level's 4 byte
517 * tuple-in-an-int. To handle commands with an IMMED bit, for a faster
518 * command completion, they can mask their return value with
519 * SDEG_RES_IMMED_MASK .
521 #define SDEG_RES_IMMED_MASK 0x40000000
523 static int resp_inquiry(struct scsi_cmnd
*, struct sdebug_dev_info
*);
524 static int resp_report_luns(struct scsi_cmnd
*, struct sdebug_dev_info
*);
525 static int resp_requests(struct scsi_cmnd
*, struct sdebug_dev_info
*);
526 static int resp_mode_sense(struct scsi_cmnd
*, struct sdebug_dev_info
*);
527 static int resp_mode_select(struct scsi_cmnd
*, struct sdebug_dev_info
*);
528 static int resp_log_sense(struct scsi_cmnd
*, struct sdebug_dev_info
*);
529 static int resp_readcap(struct scsi_cmnd
*, struct sdebug_dev_info
*);
530 static int resp_read_dt0(struct scsi_cmnd
*, struct sdebug_dev_info
*);
531 static int resp_write_dt0(struct scsi_cmnd
*, struct sdebug_dev_info
*);
532 static int resp_write_scat(struct scsi_cmnd
*, struct sdebug_dev_info
*);
533 static int resp_start_stop(struct scsi_cmnd
*, struct sdebug_dev_info
*);
534 static int resp_readcap16(struct scsi_cmnd
*, struct sdebug_dev_info
*);
535 static int resp_get_lba_status(struct scsi_cmnd
*, struct sdebug_dev_info
*);
536 static int resp_get_stream_status(struct scsi_cmnd
*scp
,
537 struct sdebug_dev_info
*devip
);
538 static int resp_report_tgtpgs(struct scsi_cmnd
*, struct sdebug_dev_info
*);
539 static int resp_unmap(struct scsi_cmnd
*, struct sdebug_dev_info
*);
540 static int resp_rsup_opcodes(struct scsi_cmnd
*, struct sdebug_dev_info
*);
541 static int resp_rsup_tmfs(struct scsi_cmnd
*, struct sdebug_dev_info
*);
542 static int resp_verify(struct scsi_cmnd
*, struct sdebug_dev_info
*);
543 static int resp_write_same_10(struct scsi_cmnd
*, struct sdebug_dev_info
*);
544 static int resp_write_same_16(struct scsi_cmnd
*, struct sdebug_dev_info
*);
545 static int resp_comp_write(struct scsi_cmnd
*, struct sdebug_dev_info
*);
546 static int resp_write_buffer(struct scsi_cmnd
*, struct sdebug_dev_info
*);
547 static int resp_sync_cache(struct scsi_cmnd
*, struct sdebug_dev_info
*);
548 static int resp_pre_fetch(struct scsi_cmnd
*, struct sdebug_dev_info
*);
549 static int resp_report_zones(struct scsi_cmnd
*, struct sdebug_dev_info
*);
550 static int resp_open_zone(struct scsi_cmnd
*, struct sdebug_dev_info
*);
551 static int resp_close_zone(struct scsi_cmnd
*, struct sdebug_dev_info
*);
552 static int resp_finish_zone(struct scsi_cmnd
*, struct sdebug_dev_info
*);
553 static int resp_rwp_zone(struct scsi_cmnd
*, struct sdebug_dev_info
*);
555 static int sdebug_do_add_host(bool mk_new_store
);
556 static int sdebug_add_host_helper(int per_host_idx
);
557 static void sdebug_do_remove_host(bool the_end
);
558 static int sdebug_add_store(void);
559 static void sdebug_erase_store(int idx
, struct sdeb_store_info
*sip
);
560 static void sdebug_erase_all_stores(bool apart_from_first
);
562 static void sdebug_free_queued_cmd(struct sdebug_queued_cmd
*sqcp
);
565 * The following are overflow arrays for cdbs that "hit" the same index in
566 * the opcode_info_arr array. The most time sensitive (or commonly used) cdb
567 * should be placed in opcode_info_arr[], the others should be placed here.
569 static const struct opcode_info_t msense_iarr
[] = {
570 {0, 0x1a, 0, F_D_IN
, NULL
, NULL
,
571 {6, 0xe8, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
574 static const struct opcode_info_t mselect_iarr
[] = {
575 {0, 0x15, 0, F_D_OUT
, NULL
, NULL
,
576 {6, 0xf1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
579 static const struct opcode_info_t read_iarr
[] = {
580 {0, 0x28, 0, F_D_IN
| FF_MEDIA_IO
, resp_read_dt0
, NULL
,/* READ(10) */
581 {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
583 {0, 0x8, 0, F_D_IN
| FF_MEDIA_IO
, resp_read_dt0
, NULL
, /* READ(6) */
584 {6, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
585 {0, 0xa8, 0, F_D_IN
| FF_MEDIA_IO
, resp_read_dt0
, NULL
,/* READ(12) */
586 {12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf,
590 static const struct opcode_info_t write_iarr
[] = {
591 {0, 0x2a, 0, F_D_OUT
| FF_MEDIA_IO
, resp_write_dt0
, /* WRITE(10) */
592 NULL
, {10, 0xfb, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7,
594 {0, 0xa, 0, F_D_OUT
| FF_MEDIA_IO
, resp_write_dt0
, /* WRITE(6) */
595 NULL
, {6, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0,
597 {0, 0xaa, 0, F_D_OUT
| FF_MEDIA_IO
, resp_write_dt0
, /* WRITE(12) */
598 NULL
, {12, 0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
599 0xbf, 0xc7, 0, 0, 0, 0} },
602 static const struct opcode_info_t verify_iarr
[] = {
603 {0, 0x2f, 0, F_D_OUT_MAYBE
| FF_MEDIA_IO
, resp_verify
,/* VERIFY(10) */
604 NULL
, {10, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xbf, 0xff, 0xff, 0xc7,
608 static const struct opcode_info_t sa_in_16_iarr
[] = {
609 {0, 0x9e, 0x12, F_SA_LOW
| F_D_IN
, resp_get_lba_status
, NULL
,
610 {16, 0x12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
611 0xff, 0xff, 0xff, 0, 0xc7} }, /* GET LBA STATUS(16) */
612 {0, 0x9e, 0x16, F_SA_LOW
| F_D_IN
, resp_get_stream_status
, NULL
,
613 {16, 0x16, 0, 0, 0xff, 0xff, 0, 0, 0, 0, 0xff, 0xff, 0xff, 0xff,
614 0, 0} }, /* GET STREAM STATUS */
617 static const struct opcode_info_t vl_iarr
[] = { /* VARIABLE LENGTH */
618 {0, 0x7f, 0xb, F_SA_HIGH
| F_D_OUT
| FF_MEDIA_IO
, resp_write_dt0
,
619 NULL
, {32, 0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0xb, 0xfa,
620 0, 0xff, 0xff, 0xff, 0xff} }, /* WRITE(32) */
621 {0, 0x7f, 0x11, F_SA_HIGH
| F_D_OUT
| FF_MEDIA_IO
, resp_write_scat
,
622 NULL
, {32, 0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x11, 0xf8,
623 0, 0xff, 0xff, 0x0, 0x0} }, /* WRITE SCATTERED(32) */
626 static const struct opcode_info_t maint_in_iarr
[] = { /* MAINT IN */
627 {0, 0xa3, 0xc, F_SA_LOW
| F_D_IN
, resp_rsup_opcodes
, NULL
,
628 {12, 0xc, 0x87, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0,
629 0xc7, 0, 0, 0, 0} }, /* REPORT SUPPORTED OPERATION CODES */
630 {0, 0xa3, 0xd, F_SA_LOW
| F_D_IN
, resp_rsup_tmfs
, NULL
,
631 {12, 0xd, 0x80, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
632 0, 0} }, /* REPORTED SUPPORTED TASK MANAGEMENT FUNCTIONS */
635 static const struct opcode_info_t write_same_iarr
[] = {
636 {0, 0x93, 0, F_D_OUT_MAYBE
| FF_MEDIA_IO
, resp_write_same_16
, NULL
,
637 {16, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
638 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* WRITE SAME(16) */
641 static const struct opcode_info_t reserve_iarr
[] = {
642 {0, 0x16, 0, F_D_OUT
, NULL
, NULL
, /* RESERVE(6) */
643 {6, 0x1f, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
646 static const struct opcode_info_t release_iarr
[] = {
647 {0, 0x17, 0, F_D_OUT
, NULL
, NULL
, /* RELEASE(6) */
648 {6, 0x1f, 0xff, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
651 static const struct opcode_info_t sync_cache_iarr
[] = {
652 {0, 0x91, 0, F_SYNC_DELAY
| F_M_ACCESS
, resp_sync_cache
, NULL
,
653 {16, 0x6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
654 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* SYNC_CACHE (16) */
657 static const struct opcode_info_t pre_fetch_iarr
[] = {
658 {0, 0x90, 0, F_SYNC_DELAY
| FF_MEDIA_IO
, resp_pre_fetch
, NULL
,
659 {16, 0x2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
660 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* PRE-FETCH (16) */
663 static const struct opcode_info_t zone_out_iarr
[] = { /* ZONE OUT(16) */
664 {0, 0x94, 0x1, F_SA_LOW
| F_M_ACCESS
, resp_close_zone
, NULL
,
665 {16, 0x1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
666 0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} }, /* CLOSE ZONE */
667 {0, 0x94, 0x2, F_SA_LOW
| F_M_ACCESS
, resp_finish_zone
, NULL
,
668 {16, 0x2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
669 0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} }, /* FINISH ZONE */
670 {0, 0x94, 0x4, F_SA_LOW
| F_M_ACCESS
, resp_rwp_zone
, NULL
,
671 {16, 0x4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
672 0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} }, /* RESET WRITE POINTER */
675 static const struct opcode_info_t zone_in_iarr
[] = { /* ZONE IN(16) */
676 {0, 0x95, 0x6, F_SA_LOW
| F_D_IN
| F_M_ACCESS
, NULL
, NULL
,
677 {16, 0x6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
678 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* REPORT ZONES */
682 /* This array is accessed via SDEB_I_* values. Make sure all are mapped,
683 * plus the terminating elements for logic that scans this table such as
684 * REPORT SUPPORTED OPERATION CODES. */
685 static const struct opcode_info_t opcode_info_arr
[SDEB_I_LAST_ELEM_P1
+ 1] = {
687 {0, 0, 0, F_INV_OP
| FF_RESPOND
, NULL
, NULL
, /* unknown opcodes */
688 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
689 {0, 0x12, 0, FF_RESPOND
| F_D_IN
, resp_inquiry
, NULL
, /* INQUIRY */
690 {6, 0xe3, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
691 {0, 0xa0, 0, FF_RESPOND
| F_D_IN
, resp_report_luns
, NULL
,
692 {12, 0xe3, 0xff, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
693 0, 0} }, /* REPORT LUNS */
694 {0, 0x3, 0, FF_RESPOND
| F_D_IN
, resp_requests
, NULL
,
695 {6, 0xe1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
696 {0, 0x0, 0, F_M_ACCESS
| F_RL_WLUN_OK
, NULL
, NULL
,/* TEST UNIT READY */
697 {6, 0, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
699 {ARRAY_SIZE(msense_iarr
), 0x5a, 0, F_D_IN
, /* MODE SENSE(10) */
700 resp_mode_sense
, msense_iarr
, {10, 0xf8, 0xff, 0xff, 0, 0, 0,
701 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
702 {ARRAY_SIZE(mselect_iarr
), 0x55, 0, F_D_OUT
, /* MODE SELECT(10) */
703 resp_mode_select
, mselect_iarr
, {10, 0xf1, 0, 0, 0, 0, 0, 0xff,
704 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
705 {0, 0x4d, 0, F_D_IN
, resp_log_sense
, NULL
, /* LOG SENSE */
706 {10, 0xe3, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0,
708 {0, 0x25, 0, F_D_IN
, resp_readcap
, NULL
, /* READ CAPACITY(10) */
709 {10, 0xe1, 0xff, 0xff, 0xff, 0xff, 0, 0, 0x1, 0xc7, 0, 0, 0, 0,
711 {ARRAY_SIZE(read_iarr
), 0x88, 0, F_D_IN
| FF_MEDIA_IO
, /* READ(16) */
712 resp_read_dt0
, read_iarr
, {16, 0xfe, 0xff, 0xff, 0xff, 0xff,
713 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} },
715 {ARRAY_SIZE(write_iarr
), 0x8a, 0, F_D_OUT
| FF_MEDIA_IO
,
716 resp_write_dt0
, write_iarr
, /* WRITE(16) */
717 {16, 0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
718 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} },
719 {0, 0x1b, 0, F_SSU_DELAY
, resp_start_stop
, NULL
,/* START STOP UNIT */
720 {6, 0x1, 0, 0xf, 0xf7, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
721 {ARRAY_SIZE(sa_in_16_iarr
), 0x9e, 0x10, F_SA_LOW
| F_D_IN
,
722 resp_readcap16
, sa_in_16_iarr
, /* SA_IN(16), READ CAPACITY(16) */
723 {16, 0x10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
724 0xff, 0xff, 0xff, 0xff, 0x1, 0xc7} },
725 {0, 0x9f, 0x12, F_SA_LOW
| F_D_OUT
| FF_MEDIA_IO
, resp_write_scat
,
726 NULL
, {16, 0x12, 0xf9, 0x0, 0xff, 0xff, 0, 0, 0xff, 0xff, 0xff,
727 0xff, 0xff, 0xff, 0xff, 0xc7} }, /* SA_OUT(16), WRITE SCAT(16) */
728 {ARRAY_SIZE(maint_in_iarr
), 0xa3, 0xa, F_SA_LOW
| F_D_IN
,
729 resp_report_tgtpgs
, /* MAINT IN, REPORT TARGET PORT GROUPS */
730 maint_in_iarr
, {12, 0xea, 0, 0, 0, 0, 0xff, 0xff, 0xff,
731 0xff, 0, 0xc7, 0, 0, 0, 0} },
733 {0, 0, 0, F_INV_OP
| FF_RESPOND
, NULL
, NULL
, /* MAINT OUT */
734 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
735 {ARRAY_SIZE(verify_iarr
), 0x8f, 0,
736 F_D_OUT_MAYBE
| FF_MEDIA_IO
, resp_verify
, /* VERIFY(16) */
737 verify_iarr
, {16, 0xf6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
738 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} },
739 {ARRAY_SIZE(vl_iarr
), 0x7f, 0x9, F_SA_HIGH
| F_D_IN
| FF_MEDIA_IO
,
740 resp_read_dt0
, vl_iarr
, /* VARIABLE LENGTH, READ(32) */
741 {32, 0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x9, 0xfe, 0, 0xff, 0xff,
743 {ARRAY_SIZE(reserve_iarr
), 0x56, 0, F_D_OUT
,
744 NULL
, reserve_iarr
, /* RESERVE(10) <no response function> */
745 {10, 0xff, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
747 {ARRAY_SIZE(release_iarr
), 0x57, 0, F_D_OUT
,
748 NULL
, release_iarr
, /* RELEASE(10) <no response function> */
749 {10, 0x13, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
752 {0, 0x1e, 0, 0, NULL
, NULL
, /* ALLOW REMOVAL */
753 {6, 0, 0, 0, 0x3, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
754 {0, 0x1, 0, 0, resp_start_stop
, NULL
, /* REWIND ?? */
755 {6, 0x1, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
756 {0, 0, 0, F_INV_OP
| FF_RESPOND
, NULL
, NULL
, /* ATA_PT */
757 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
758 {0, 0x1d, F_D_OUT
, 0, NULL
, NULL
, /* SEND DIAGNOSTIC */
759 {6, 0xf7, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
760 {0, 0x42, 0, F_D_OUT
| FF_MEDIA_IO
, resp_unmap
, NULL
, /* UNMAP */
761 {10, 0x1, 0, 0, 0, 0, 0x3f, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
763 {0, 0x3b, 0, F_D_OUT_MAYBE
, resp_write_buffer
, NULL
,
764 {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0,
765 0, 0, 0, 0} }, /* WRITE_BUFFER */
766 {ARRAY_SIZE(write_same_iarr
), 0x41, 0, F_D_OUT_MAYBE
| FF_MEDIA_IO
,
767 resp_write_same_10
, write_same_iarr
, /* WRITE SAME(10) */
768 {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0,
770 {ARRAY_SIZE(sync_cache_iarr
), 0x35, 0, F_SYNC_DELAY
| F_M_ACCESS
,
771 resp_sync_cache
, sync_cache_iarr
,
772 {10, 0x7, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
773 0, 0, 0, 0} }, /* SYNC_CACHE (10) */
774 {0, 0x89, 0, F_D_OUT
| FF_MEDIA_IO
, resp_comp_write
, NULL
,
775 {16, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0,
776 0, 0xff, 0x3f, 0xc7} }, /* COMPARE AND WRITE */
777 {ARRAY_SIZE(pre_fetch_iarr
), 0x34, 0, F_SYNC_DELAY
| FF_MEDIA_IO
,
778 resp_pre_fetch
, pre_fetch_iarr
,
779 {10, 0x2, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
780 0, 0, 0, 0} }, /* PRE-FETCH (10) */
783 {ARRAY_SIZE(zone_out_iarr
), 0x94, 0x3, F_SA_LOW
| F_M_ACCESS
,
784 resp_open_zone
, zone_out_iarr
, /* ZONE_OUT(16), OPEN ZONE) */
785 {16, 0x3 /* SA */, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
786 0xff, 0xff, 0x0, 0x0, 0xff, 0xff, 0x1, 0xc7} },
787 {ARRAY_SIZE(zone_in_iarr
), 0x95, 0x0, F_SA_LOW
| F_M_ACCESS
,
788 resp_report_zones
, zone_in_iarr
, /* ZONE_IN(16), REPORT ZONES) */
789 {16, 0x0 /* SA */, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
790 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf, 0xc7} },
792 {0xff, 0, 0, 0, NULL
, NULL
, /* terminating element */
793 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
796 static int sdebug_num_hosts
;
797 static int sdebug_add_host
= DEF_NUM_HOST
; /* in sysfs this is relative */
798 static int sdebug_ato
= DEF_ATO
;
799 static int sdebug_cdb_len
= DEF_CDB_LEN
;
800 static int sdebug_jdelay
= DEF_JDELAY
; /* if > 0 then unit is jiffies */
801 static int sdebug_dev_size_mb
= DEF_DEV_SIZE_PRE_INIT
;
802 static int sdebug_dif
= DEF_DIF
;
803 static int sdebug_dix
= DEF_DIX
;
804 static int sdebug_dsense
= DEF_D_SENSE
;
805 static int sdebug_every_nth
= DEF_EVERY_NTH
;
806 static int sdebug_fake_rw
= DEF_FAKE_RW
;
807 static unsigned int sdebug_guard
= DEF_GUARD
;
808 static int sdebug_host_max_queue
; /* per host */
809 static int sdebug_lowest_aligned
= DEF_LOWEST_ALIGNED
;
810 static int sdebug_max_luns
= DEF_MAX_LUNS
;
811 static int sdebug_max_queue
= SDEBUG_CANQUEUE
; /* per submit queue */
812 static unsigned int sdebug_medium_error_start
= OPT_MEDIUM_ERR_ADDR
;
813 static int sdebug_medium_error_count
= OPT_MEDIUM_ERR_NUM
;
814 static int sdebug_ndelay
= DEF_NDELAY
; /* if > 0 then unit is nanoseconds */
815 static int sdebug_no_lun_0
= DEF_NO_LUN_0
;
816 static int sdebug_no_uld
;
817 static int sdebug_num_parts
= DEF_NUM_PARTS
;
818 static int sdebug_num_tgts
= DEF_NUM_TGTS
; /* targets per host */
819 static int sdebug_opt_blks
= DEF_OPT_BLKS
;
820 static int sdebug_opts
= DEF_OPTS
;
821 static int sdebug_physblk_exp
= DEF_PHYSBLK_EXP
;
822 static int sdebug_opt_xferlen_exp
= DEF_OPT_XFERLEN_EXP
;
823 static int sdebug_ptype
= DEF_PTYPE
; /* SCSI peripheral device type */
824 static int sdebug_scsi_level
= DEF_SCSI_LEVEL
;
825 static int sdebug_sector_size
= DEF_SECTOR_SIZE
;
826 static int sdeb_tur_ms_to_ready
= DEF_TUR_MS_TO_READY
;
827 static int sdebug_virtual_gb
= DEF_VIRTUAL_GB
;
828 static int sdebug_vpd_use_hostno
= DEF_VPD_USE_HOSTNO
;
829 static unsigned int sdebug_lbpu
= DEF_LBPU
;
830 static unsigned int sdebug_lbpws
= DEF_LBPWS
;
831 static unsigned int sdebug_lbpws10
= DEF_LBPWS10
;
832 static unsigned int sdebug_lbprz
= DEF_LBPRZ
;
833 static unsigned int sdebug_unmap_alignment
= DEF_UNMAP_ALIGNMENT
;
834 static unsigned int sdebug_unmap_granularity
= DEF_UNMAP_GRANULARITY
;
835 static unsigned int sdebug_unmap_max_blocks
= DEF_UNMAP_MAX_BLOCKS
;
836 static unsigned int sdebug_unmap_max_desc
= DEF_UNMAP_MAX_DESC
;
837 static unsigned int sdebug_write_same_length
= DEF_WRITESAME_LENGTH
;
838 static int sdebug_uuid_ctl
= DEF_UUID_CTL
;
839 static bool sdebug_random
= DEF_RANDOM
;
840 static bool sdebug_per_host_store
= DEF_PER_HOST_STORE
;
841 static bool sdebug_removable
= DEF_REMOVABLE
;
842 static bool sdebug_clustering
;
843 static bool sdebug_host_lock
= DEF_HOST_LOCK
;
844 static bool sdebug_strict
= DEF_STRICT
;
845 static bool sdebug_any_injecting_opt
;
846 static bool sdebug_no_rwlock
;
847 static bool sdebug_verbose
;
848 static bool have_dif_prot
;
849 static bool write_since_sync
;
850 static bool sdebug_statistics
= DEF_STATISTICS
;
851 static bool sdebug_wp
;
852 static bool sdebug_allow_restart
;
857 } sdeb_zbc_model
= BLK_ZONED_NONE
;
858 static char *sdeb_zbc_model_s
;
860 enum sam_lun_addr_method
{SAM_LUN_AM_PERIPHERAL
= 0x0,
861 SAM_LUN_AM_FLAT
= 0x1,
862 SAM_LUN_AM_LOGICAL_UNIT
= 0x2,
863 SAM_LUN_AM_EXTENDED
= 0x3};
864 static enum sam_lun_addr_method sdebug_lun_am
= SAM_LUN_AM_PERIPHERAL
;
865 static int sdebug_lun_am_i
= (int)SAM_LUN_AM_PERIPHERAL
;
867 static unsigned int sdebug_store_sectors
;
868 static sector_t sdebug_capacity
; /* in sectors */
870 /* old BIOS stuff, kernel may get rid of them but some mode sense pages
871 may still need them */
872 static int sdebug_heads
; /* heads per disk */
873 static int sdebug_cylinders_per
; /* cylinders per surface */
874 static int sdebug_sectors_per
; /* sectors per cylinder */
876 static LIST_HEAD(sdebug_host_list
);
877 static DEFINE_MUTEX(sdebug_host_list_mutex
);
879 static struct xarray per_store_arr
;
880 static struct xarray
*per_store_ap
= &per_store_arr
;
881 static int sdeb_first_idx
= -1; /* invalid index ==> none created */
882 static int sdeb_most_recent_idx
= -1;
883 static DEFINE_RWLOCK(sdeb_fake_rw_lck
); /* need a RW lock when fake_rw=1 */
885 static unsigned long map_size
;
886 static int num_aborts
;
887 static int num_dev_resets
;
888 static int num_target_resets
;
889 static int num_bus_resets
;
890 static int num_host_resets
;
891 static int dix_writes
;
892 static int dix_reads
;
893 static int dif_errors
;
895 /* ZBC global data */
896 static bool sdeb_zbc_in_use
; /* true for host-aware and host-managed disks */
897 static int sdeb_zbc_zone_cap_mb
;
898 static int sdeb_zbc_zone_size_mb
;
899 static int sdeb_zbc_max_open
= DEF_ZBC_MAX_OPEN_ZONES
;
900 static int sdeb_zbc_nr_conv
= DEF_ZBC_NR_CONV_ZONES
;
902 static int submit_queues
= DEF_SUBMIT_QUEUES
; /* > 1 for multi-queue (mq) */
903 static int poll_queues
; /* iouring iopoll interface.*/
905 static atomic_long_t writes_by_group_number
[64];
907 static char sdebug_proc_name
[] = MY_NAME
;
908 static const char *my_name
= MY_NAME
;
910 static const struct bus_type pseudo_lld_bus
;
912 static struct device_driver sdebug_driverfs_driver
= {
913 .name
= sdebug_proc_name
,
914 .bus
= &pseudo_lld_bus
,
917 static const int check_condition_result
=
918 SAM_STAT_CHECK_CONDITION
;
920 static const int illegal_condition_result
=
921 (DID_ABORT
<< 16) | SAM_STAT_CHECK_CONDITION
;
923 static const int device_qfull_result
=
924 (DID_ABORT
<< 16) | SAM_STAT_TASK_SET_FULL
;
926 static const int condition_met_result
= SAM_STAT_CONDITION_MET
;
928 static struct dentry
*sdebug_debugfs_root
;
930 static void sdebug_err_free(struct rcu_head
*head
)
932 struct sdebug_err_inject
*inject
=
933 container_of(head
, typeof(*inject
), rcu
);
938 static void sdebug_err_add(struct scsi_device
*sdev
, struct sdebug_err_inject
*new)
940 struct sdebug_dev_info
*devip
= (struct sdebug_dev_info
*)sdev
->hostdata
;
941 struct sdebug_err_inject
*err
;
943 spin_lock(&devip
->list_lock
);
944 list_for_each_entry_rcu(err
, &devip
->inject_err_list
, list
) {
945 if (err
->type
== new->type
&& err
->cmd
== new->cmd
) {
946 list_del_rcu(&err
->list
);
947 call_rcu(&err
->rcu
, sdebug_err_free
);
951 list_add_tail_rcu(&new->list
, &devip
->inject_err_list
);
952 spin_unlock(&devip
->list_lock
);
955 static int sdebug_err_remove(struct scsi_device
*sdev
, const char *buf
, size_t count
)
957 struct sdebug_dev_info
*devip
= (struct sdebug_dev_info
*)sdev
->hostdata
;
958 struct sdebug_err_inject
*err
;
962 if (sscanf(buf
, "- %d %hhx", &type
, &cmd
) != 2) {
967 spin_lock(&devip
->list_lock
);
968 list_for_each_entry_rcu(err
, &devip
->inject_err_list
, list
) {
969 if (err
->type
== type
&& err
->cmd
== cmd
) {
970 list_del_rcu(&err
->list
);
971 call_rcu(&err
->rcu
, sdebug_err_free
);
972 spin_unlock(&devip
->list_lock
);
977 spin_unlock(&devip
->list_lock
);
983 static int sdebug_error_show(struct seq_file
*m
, void *p
)
985 struct scsi_device
*sdev
= (struct scsi_device
*)m
->private;
986 struct sdebug_dev_info
*devip
= (struct sdebug_dev_info
*)sdev
->hostdata
;
987 struct sdebug_err_inject
*err
;
989 seq_puts(m
, "Type\tCount\tCommand\n");
992 list_for_each_entry_rcu(err
, &devip
->inject_err_list
, list
) {
995 case ERR_ABORT_CMD_FAILED
:
996 case ERR_LUN_RESET_FAILED
:
997 seq_printf(m
, "%d\t%d\t0x%x\n", err
->type
, err
->cnt
,
1001 case ERR_FAIL_QUEUE_CMD
:
1002 seq_printf(m
, "%d\t%d\t0x%x\t0x%x\n", err
->type
,
1003 err
->cnt
, err
->cmd
, err
->queuecmd_ret
);
1007 seq_printf(m
, "%d\t%d\t0x%x\t0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
1008 err
->type
, err
->cnt
, err
->cmd
,
1009 err
->host_byte
, err
->driver_byte
,
1010 err
->status_byte
, err
->sense_key
,
1011 err
->asc
, err
->asq
);
1020 static int sdebug_error_open(struct inode
*inode
, struct file
*file
)
1022 return single_open(file
, sdebug_error_show
, inode
->i_private
);
1025 static ssize_t
sdebug_error_write(struct file
*file
, const char __user
*ubuf
,
1026 size_t count
, loff_t
*ppos
)
1029 unsigned int inject_type
;
1030 struct sdebug_err_inject
*inject
;
1031 struct scsi_device
*sdev
= (struct scsi_device
*)file
->f_inode
->i_private
;
1033 buf
= kzalloc(count
+ 1, GFP_KERNEL
);
1037 if (copy_from_user(buf
, ubuf
, count
)) {
1043 return sdebug_err_remove(sdev
, buf
, count
);
1045 if (sscanf(buf
, "%d", &inject_type
) != 1) {
1050 inject
= kzalloc(sizeof(struct sdebug_err_inject
), GFP_KERNEL
);
1056 switch (inject_type
) {
1058 case ERR_ABORT_CMD_FAILED
:
1059 case ERR_LUN_RESET_FAILED
:
1060 if (sscanf(buf
, "%d %d %hhx", &inject
->type
, &inject
->cnt
,
1065 case ERR_FAIL_QUEUE_CMD
:
1066 if (sscanf(buf
, "%d %d %hhx %x", &inject
->type
, &inject
->cnt
,
1067 &inject
->cmd
, &inject
->queuecmd_ret
) != 4)
1072 if (sscanf(buf
, "%d %d %hhx %hhx %hhx %hhx %hhx %hhx %hhx",
1073 &inject
->type
, &inject
->cnt
, &inject
->cmd
,
1074 &inject
->host_byte
, &inject
->driver_byte
,
1075 &inject
->status_byte
, &inject
->sense_key
,
1076 &inject
->asc
, &inject
->asq
) != 9)
1086 sdebug_err_add(sdev
, inject
);
1096 static const struct file_operations sdebug_error_fops
= {
1097 .open
= sdebug_error_open
,
1099 .write
= sdebug_error_write
,
1100 .release
= single_release
,
1103 static int sdebug_target_reset_fail_show(struct seq_file
*m
, void *p
)
1105 struct scsi_target
*starget
= (struct scsi_target
*)m
->private;
1106 struct sdebug_target_info
*targetip
=
1107 (struct sdebug_target_info
*)starget
->hostdata
;
1110 seq_printf(m
, "%c\n", targetip
->reset_fail
? 'Y' : 'N');
1115 static int sdebug_target_reset_fail_open(struct inode
*inode
, struct file
*file
)
1117 return single_open(file
, sdebug_target_reset_fail_show
, inode
->i_private
);
1120 static ssize_t
sdebug_target_reset_fail_write(struct file
*file
,
1121 const char __user
*ubuf
, size_t count
, loff_t
*ppos
)
1124 struct scsi_target
*starget
=
1125 (struct scsi_target
*)file
->f_inode
->i_private
;
1126 struct sdebug_target_info
*targetip
=
1127 (struct sdebug_target_info
*)starget
->hostdata
;
1130 ret
= kstrtobool_from_user(ubuf
, count
, &targetip
->reset_fail
);
1131 return ret
< 0 ? ret
: count
;
1136 static const struct file_operations sdebug_target_reset_fail_fops
= {
1137 .open
= sdebug_target_reset_fail_open
,
1139 .write
= sdebug_target_reset_fail_write
,
1140 .release
= single_release
,
1143 static int sdebug_target_alloc(struct scsi_target
*starget
)
1145 struct sdebug_target_info
*targetip
;
1147 targetip
= kzalloc(sizeof(struct sdebug_target_info
), GFP_KERNEL
);
1151 targetip
->debugfs_entry
= debugfs_create_dir(dev_name(&starget
->dev
),
1152 sdebug_debugfs_root
);
1154 debugfs_create_file("fail_reset", 0600, targetip
->debugfs_entry
, starget
,
1155 &sdebug_target_reset_fail_fops
);
1157 starget
->hostdata
= targetip
;
1162 static void sdebug_tartget_cleanup_async(void *data
, async_cookie_t cookie
)
1164 struct sdebug_target_info
*targetip
= data
;
1166 debugfs_remove(targetip
->debugfs_entry
);
1170 static void sdebug_target_destroy(struct scsi_target
*starget
)
1172 struct sdebug_target_info
*targetip
;
1174 targetip
= (struct sdebug_target_info
*)starget
->hostdata
;
1176 starget
->hostdata
= NULL
;
1177 async_schedule(sdebug_tartget_cleanup_async
, targetip
);
1181 /* Only do the extra work involved in logical block provisioning if one or
1182 * more of the lbpu, lbpws or lbpws10 parameters are given and we are doing
1183 * real reads and writes (i.e. not skipping them for speed).
1185 static inline bool scsi_debug_lbp(void)
1187 return 0 == sdebug_fake_rw
&&
1188 (sdebug_lbpu
|| sdebug_lbpws
|| sdebug_lbpws10
);
1191 static void *lba2fake_store(struct sdeb_store_info
*sip
,
1192 unsigned long long lba
)
1194 struct sdeb_store_info
*lsip
= sip
;
1196 lba
= do_div(lba
, sdebug_store_sectors
);
1197 if (!sip
|| !sip
->storep
) {
1199 lsip
= xa_load(per_store_ap
, 0); /* should never be NULL */
1201 return lsip
->storep
+ lba
* sdebug_sector_size
;
1204 static struct t10_pi_tuple
*dif_store(struct sdeb_store_info
*sip
,
1207 sector
= sector_div(sector
, sdebug_store_sectors
);
1209 return sip
->dif_storep
+ sector
;
1212 static void sdebug_max_tgts_luns(void)
1214 struct sdebug_host_info
*sdbg_host
;
1215 struct Scsi_Host
*hpnt
;
1217 mutex_lock(&sdebug_host_list_mutex
);
1218 list_for_each_entry(sdbg_host
, &sdebug_host_list
, host_list
) {
1219 hpnt
= sdbg_host
->shost
;
1220 if ((hpnt
->this_id
>= 0) &&
1221 (sdebug_num_tgts
> hpnt
->this_id
))
1222 hpnt
->max_id
= sdebug_num_tgts
+ 1;
1224 hpnt
->max_id
= sdebug_num_tgts
;
1225 /* sdebug_max_luns; */
1226 hpnt
->max_lun
= SCSI_W_LUN_REPORT_LUNS
+ 1;
1228 mutex_unlock(&sdebug_host_list_mutex
);
1231 enum sdeb_cmd_data
{SDEB_IN_DATA
= 0, SDEB_IN_CDB
= 1};
1233 /* Set in_bit to -1 to indicate no bit position of invalid field */
1234 static void mk_sense_invalid_fld(struct scsi_cmnd
*scp
,
1235 enum sdeb_cmd_data c_d
,
1236 int in_byte
, int in_bit
)
1238 unsigned char *sbuff
;
1242 sbuff
= scp
->sense_buffer
;
1244 sdev_printk(KERN_ERR
, scp
->device
,
1245 "%s: sense_buffer is NULL\n", __func__
);
1248 asc
= c_d
? INVALID_FIELD_IN_CDB
: INVALID_FIELD_IN_PARAM_LIST
;
1249 memset(sbuff
, 0, SCSI_SENSE_BUFFERSIZE
);
1250 scsi_build_sense(scp
, sdebug_dsense
, ILLEGAL_REQUEST
, asc
, 0);
1251 memset(sks
, 0, sizeof(sks
));
1257 sks
[0] |= 0x7 & in_bit
;
1259 put_unaligned_be16(in_byte
, sks
+ 1);
1260 if (sdebug_dsense
) {
1264 sbuff
[sl
+ 1] = 0x6;
1265 memcpy(sbuff
+ sl
+ 4, sks
, 3);
1267 memcpy(sbuff
+ 15, sks
, 3);
1269 sdev_printk(KERN_INFO
, scp
->device
, "%s: [sense_key,asc,ascq"
1270 "]: [0x5,0x%x,0x0] %c byte=%d, bit=%d\n",
1271 my_name
, asc
, c_d
? 'C' : 'D', in_byte
, in_bit
);
1274 static void mk_sense_buffer(struct scsi_cmnd
*scp
, int key
, int asc
, int asq
)
1276 if (!scp
->sense_buffer
) {
1277 sdev_printk(KERN_ERR
, scp
->device
,
1278 "%s: sense_buffer is NULL\n", __func__
);
1281 memset(scp
->sense_buffer
, 0, SCSI_SENSE_BUFFERSIZE
);
1283 scsi_build_sense(scp
, sdebug_dsense
, key
, asc
, asq
);
1286 sdev_printk(KERN_INFO
, scp
->device
,
1287 "%s: [sense_key,asc,ascq]: [0x%x,0x%x,0x%x]\n",
1288 my_name
, key
, asc
, asq
);
1291 static void mk_sense_invalid_opcode(struct scsi_cmnd
*scp
)
1293 mk_sense_buffer(scp
, ILLEGAL_REQUEST
, INVALID_OPCODE
, 0);
1296 static int scsi_debug_ioctl(struct scsi_device
*dev
, unsigned int cmd
,
1299 if (sdebug_verbose
) {
1301 sdev_printk(KERN_INFO
, dev
,
1302 "%s: BLKFLSBUF [0x1261]\n", __func__
);
1303 else if (0x5331 == cmd
)
1304 sdev_printk(KERN_INFO
, dev
,
1305 "%s: CDROM_GET_CAPABILITY [0x5331]\n",
1308 sdev_printk(KERN_INFO
, dev
, "%s: cmd=0x%x\n",
1312 /* return -ENOTTY; // correct return but upsets fdisk */
1315 static void config_cdb_len(struct scsi_device
*sdev
)
1317 switch (sdebug_cdb_len
) {
1318 case 6: /* suggest 6 byte READ, WRITE and MODE SENSE/SELECT */
1319 sdev
->use_10_for_rw
= false;
1320 sdev
->use_16_for_rw
= false;
1321 sdev
->use_10_for_ms
= false;
1323 case 10: /* suggest 10 byte RWs and 6 byte MODE SENSE/SELECT */
1324 sdev
->use_10_for_rw
= true;
1325 sdev
->use_16_for_rw
= false;
1326 sdev
->use_10_for_ms
= false;
1328 case 12: /* suggest 10 byte RWs and 10 byte MODE SENSE/SELECT */
1329 sdev
->use_10_for_rw
= true;
1330 sdev
->use_16_for_rw
= false;
1331 sdev
->use_10_for_ms
= true;
1334 sdev
->use_10_for_rw
= false;
1335 sdev
->use_16_for_rw
= true;
1336 sdev
->use_10_for_ms
= true;
1338 case 32: /* No knobs to suggest this so same as 16 for now */
1339 sdev
->use_10_for_rw
= false;
1340 sdev
->use_16_for_rw
= true;
1341 sdev
->use_10_for_ms
= true;
1344 pr_warn("unexpected cdb_len=%d, force to 10\n",
1346 sdev
->use_10_for_rw
= true;
1347 sdev
->use_16_for_rw
= false;
1348 sdev
->use_10_for_ms
= false;
1349 sdebug_cdb_len
= 10;
1354 static void all_config_cdb_len(void)
1356 struct sdebug_host_info
*sdbg_host
;
1357 struct Scsi_Host
*shost
;
1358 struct scsi_device
*sdev
;
1360 mutex_lock(&sdebug_host_list_mutex
);
1361 list_for_each_entry(sdbg_host
, &sdebug_host_list
, host_list
) {
1362 shost
= sdbg_host
->shost
;
1363 shost_for_each_device(sdev
, shost
) {
1364 config_cdb_len(sdev
);
1367 mutex_unlock(&sdebug_host_list_mutex
);
1370 static void clear_luns_changed_on_target(struct sdebug_dev_info
*devip
)
1372 struct sdebug_host_info
*sdhp
= devip
->sdbg_host
;
1373 struct sdebug_dev_info
*dp
;
1375 list_for_each_entry(dp
, &sdhp
->dev_info_list
, dev_list
) {
1376 if ((devip
->sdbg_host
== dp
->sdbg_host
) &&
1377 (devip
->target
== dp
->target
)) {
1378 clear_bit(SDEBUG_UA_LUNS_CHANGED
, dp
->uas_bm
);
1383 static int make_ua(struct scsi_cmnd
*scp
, struct sdebug_dev_info
*devip
)
1387 k
= find_first_bit(devip
->uas_bm
, SDEBUG_NUM_UAS
);
1388 if (k
!= SDEBUG_NUM_UAS
) {
1389 const char *cp
= NULL
;
1393 mk_sense_buffer(scp
, UNIT_ATTENTION
, UA_RESET_ASC
,
1394 POWER_ON_RESET_ASCQ
);
1396 cp
= "power on reset";
1398 case SDEBUG_UA_POOCCUR
:
1399 mk_sense_buffer(scp
, UNIT_ATTENTION
, UA_RESET_ASC
,
1400 POWER_ON_OCCURRED_ASCQ
);
1402 cp
= "power on occurred";
1404 case SDEBUG_UA_BUS_RESET
:
1405 mk_sense_buffer(scp
, UNIT_ATTENTION
, UA_RESET_ASC
,
1410 case SDEBUG_UA_MODE_CHANGED
:
1411 mk_sense_buffer(scp
, UNIT_ATTENTION
, UA_CHANGED_ASC
,
1414 cp
= "mode parameters changed";
1416 case SDEBUG_UA_CAPACITY_CHANGED
:
1417 mk_sense_buffer(scp
, UNIT_ATTENTION
, UA_CHANGED_ASC
,
1418 CAPACITY_CHANGED_ASCQ
);
1420 cp
= "capacity data changed";
1422 case SDEBUG_UA_MICROCODE_CHANGED
:
1423 mk_sense_buffer(scp
, UNIT_ATTENTION
,
1425 MICROCODE_CHANGED_ASCQ
);
1427 cp
= "microcode has been changed";
1429 case SDEBUG_UA_MICROCODE_CHANGED_WO_RESET
:
1430 mk_sense_buffer(scp
, UNIT_ATTENTION
,
1432 MICROCODE_CHANGED_WO_RESET_ASCQ
);
1434 cp
= "microcode has been changed without reset";
1436 case SDEBUG_UA_LUNS_CHANGED
:
1438 * SPC-3 behavior is to report a UNIT ATTENTION with
1439 * ASC/ASCQ REPORTED LUNS DATA HAS CHANGED on every LUN
1440 * on the target, until a REPORT LUNS command is
1441 * received. SPC-4 behavior is to report it only once.
1442 * NOTE: sdebug_scsi_level does not use the same
1443 * values as struct scsi_device->scsi_level.
1445 if (sdebug_scsi_level
>= 6) /* SPC-4 and above */
1446 clear_luns_changed_on_target(devip
);
1447 mk_sense_buffer(scp
, UNIT_ATTENTION
,
1451 cp
= "reported luns data has changed";
1454 pr_warn("unexpected unit attention code=%d\n", k
);
1459 clear_bit(k
, devip
->uas_bm
);
1461 sdev_printk(KERN_INFO
, scp
->device
,
1462 "%s reports: Unit attention: %s\n",
1464 return check_condition_result
;
1469 /* Build SCSI "data-in" buffer. Returns 0 if ok else (DID_ERROR << 16). */
1470 static int fill_from_dev_buffer(struct scsi_cmnd
*scp
, unsigned char *arr
,
1474 struct scsi_data_buffer
*sdb
= &scp
->sdb
;
1478 if (scp
->sc_data_direction
!= DMA_FROM_DEVICE
)
1479 return DID_ERROR
<< 16;
1481 act_len
= sg_copy_from_buffer(sdb
->table
.sgl
, sdb
->table
.nents
,
1483 scsi_set_resid(scp
, scsi_bufflen(scp
) - act_len
);
1488 /* Partial build of SCSI "data-in" buffer. Returns 0 if ok else
1489 * (DID_ERROR << 16). Can write to offset in data-in buffer. If multiple
1490 * calls, not required to write in ascending offset order. Assumes resid
1491 * set to scsi_bufflen() prior to any calls.
1493 static int p_fill_from_dev_buffer(struct scsi_cmnd
*scp
, const void *arr
,
1494 int arr_len
, unsigned int off_dst
)
1496 unsigned int act_len
, n
;
1497 struct scsi_data_buffer
*sdb
= &scp
->sdb
;
1498 off_t skip
= off_dst
;
1500 if (sdb
->length
<= off_dst
)
1502 if (scp
->sc_data_direction
!= DMA_FROM_DEVICE
)
1503 return DID_ERROR
<< 16;
1505 act_len
= sg_pcopy_from_buffer(sdb
->table
.sgl
, sdb
->table
.nents
,
1506 arr
, arr_len
, skip
);
1507 pr_debug("%s: off_dst=%u, scsi_bufflen=%u, act_len=%u, resid=%d\n",
1508 __func__
, off_dst
, scsi_bufflen(scp
), act_len
,
1509 scsi_get_resid(scp
));
1510 n
= scsi_bufflen(scp
) - (off_dst
+ act_len
);
1511 scsi_set_resid(scp
, min_t(u32
, scsi_get_resid(scp
), n
));
1515 /* Fetches from SCSI "data-out" buffer. Returns number of bytes fetched into
1516 * 'arr' or -1 if error.
1518 static int fetch_to_dev_buffer(struct scsi_cmnd
*scp
, unsigned char *arr
,
1521 if (!scsi_bufflen(scp
))
1523 if (scp
->sc_data_direction
!= DMA_TO_DEVICE
)
1526 return scsi_sg_copy_to_buffer(scp
, arr
, arr_len
);
1530 static char sdebug_inq_vendor_id
[9] = "Linux ";
1531 static char sdebug_inq_product_id
[17] = "scsi_debug ";
1532 static char sdebug_inq_product_rev
[5] = SDEBUG_VERSION
;
1533 /* Use some locally assigned NAAs for SAS addresses. */
1534 static const u64 naa3_comp_a
= 0x3222222000000000ULL
;
1535 static const u64 naa3_comp_b
= 0x3333333000000000ULL
;
1536 static const u64 naa3_comp_c
= 0x3111111000000000ULL
;
1538 /* Device identification VPD page. Returns number of bytes placed in arr */
1539 static int inquiry_vpd_83(unsigned char *arr
, int port_group_id
,
1540 int target_dev_id
, int dev_id_num
,
1541 const char *dev_id_str
, int dev_id_str_len
,
1542 const uuid_t
*lu_name
)
1547 port_a
= target_dev_id
+ 1;
1548 /* T10 vendor identifier field format (faked) */
1549 arr
[0] = 0x2; /* ASCII */
1552 memcpy(&arr
[4], sdebug_inq_vendor_id
, 8);
1553 memcpy(&arr
[12], sdebug_inq_product_id
, 16);
1554 memcpy(&arr
[28], dev_id_str
, dev_id_str_len
);
1555 num
= 8 + 16 + dev_id_str_len
;
1558 if (dev_id_num
>= 0) {
1559 if (sdebug_uuid_ctl
) {
1560 /* Locally assigned UUID */
1561 arr
[num
++] = 0x1; /* binary (not necessarily sas) */
1562 arr
[num
++] = 0xa; /* PIV=0, lu, naa */
1565 arr
[num
++] = 0x10; /* uuid type=1, locally assigned */
1567 memcpy(arr
+ num
, lu_name
, 16);
1570 /* NAA-3, Logical unit identifier (binary) */
1571 arr
[num
++] = 0x1; /* binary (not necessarily sas) */
1572 arr
[num
++] = 0x3; /* PIV=0, lu, naa */
1575 put_unaligned_be64(naa3_comp_b
+ dev_id_num
, arr
+ num
);
1578 /* Target relative port number */
1579 arr
[num
++] = 0x61; /* proto=sas, binary */
1580 arr
[num
++] = 0x94; /* PIV=1, target port, rel port */
1581 arr
[num
++] = 0x0; /* reserved */
1582 arr
[num
++] = 0x4; /* length */
1583 arr
[num
++] = 0x0; /* reserved */
1584 arr
[num
++] = 0x0; /* reserved */
1586 arr
[num
++] = 0x1; /* relative port A */
1588 /* NAA-3, Target port identifier */
1589 arr
[num
++] = 0x61; /* proto=sas, binary */
1590 arr
[num
++] = 0x93; /* piv=1, target port, naa */
1593 put_unaligned_be64(naa3_comp_a
+ port_a
, arr
+ num
);
1595 /* NAA-3, Target port group identifier */
1596 arr
[num
++] = 0x61; /* proto=sas, binary */
1597 arr
[num
++] = 0x95; /* piv=1, target port group id */
1602 put_unaligned_be16(port_group_id
, arr
+ num
);
1604 /* NAA-3, Target device identifier */
1605 arr
[num
++] = 0x61; /* proto=sas, binary */
1606 arr
[num
++] = 0xa3; /* piv=1, target device, naa */
1609 put_unaligned_be64(naa3_comp_a
+ target_dev_id
, arr
+ num
);
1611 /* SCSI name string: Target device identifier */
1612 arr
[num
++] = 0x63; /* proto=sas, UTF-8 */
1613 arr
[num
++] = 0xa8; /* piv=1, target device, SCSI name string */
1616 memcpy(arr
+ num
, "naa.32222220", 12);
1618 snprintf(b
, sizeof(b
), "%08X", target_dev_id
);
1619 memcpy(arr
+ num
, b
, 8);
1621 memset(arr
+ num
, 0, 4);
1626 static unsigned char vpd84_data
[] = {
1627 /* from 4th byte */ 0x22,0x22,0x22,0x0,0xbb,0x0,
1628 0x22,0x22,0x22,0x0,0xbb,0x1,
1629 0x22,0x22,0x22,0x0,0xbb,0x2,
1632 /* Software interface identification VPD page */
1633 static int inquiry_vpd_84(unsigned char *arr
)
1635 memcpy(arr
, vpd84_data
, sizeof(vpd84_data
));
1636 return sizeof(vpd84_data
);
1639 /* Management network addresses VPD page */
1640 static int inquiry_vpd_85(unsigned char *arr
)
1643 const char *na1
= "https://www.kernel.org/config";
1644 const char *na2
= "http://www.kernel.org/log";
1647 arr
[num
++] = 0x1; /* lu, storage config */
1648 arr
[num
++] = 0x0; /* reserved */
1653 plen
= ((plen
/ 4) + 1) * 4;
1654 arr
[num
++] = plen
; /* length, null termianted, padded */
1655 memcpy(arr
+ num
, na1
, olen
);
1656 memset(arr
+ num
+ olen
, 0, plen
- olen
);
1659 arr
[num
++] = 0x4; /* lu, logging */
1660 arr
[num
++] = 0x0; /* reserved */
1665 plen
= ((plen
/ 4) + 1) * 4;
1666 arr
[num
++] = plen
; /* length, null terminated, padded */
1667 memcpy(arr
+ num
, na2
, olen
);
1668 memset(arr
+ num
+ olen
, 0, plen
- olen
);
1674 /* SCSI ports VPD page */
1675 static int inquiry_vpd_88(unsigned char *arr
, int target_dev_id
)
1680 port_a
= target_dev_id
+ 1;
1681 port_b
= port_a
+ 1;
1682 arr
[num
++] = 0x0; /* reserved */
1683 arr
[num
++] = 0x0; /* reserved */
1685 arr
[num
++] = 0x1; /* relative port 1 (primary) */
1686 memset(arr
+ num
, 0, 6);
1689 arr
[num
++] = 12; /* length tp descriptor */
1690 /* naa-5 target port identifier (A) */
1691 arr
[num
++] = 0x61; /* proto=sas, binary */
1692 arr
[num
++] = 0x93; /* PIV=1, target port, NAA */
1693 arr
[num
++] = 0x0; /* reserved */
1694 arr
[num
++] = 0x8; /* length */
1695 put_unaligned_be64(naa3_comp_a
+ port_a
, arr
+ num
);
1697 arr
[num
++] = 0x0; /* reserved */
1698 arr
[num
++] = 0x0; /* reserved */
1700 arr
[num
++] = 0x2; /* relative port 2 (secondary) */
1701 memset(arr
+ num
, 0, 6);
1704 arr
[num
++] = 12; /* length tp descriptor */
1705 /* naa-5 target port identifier (B) */
1706 arr
[num
++] = 0x61; /* proto=sas, binary */
1707 arr
[num
++] = 0x93; /* PIV=1, target port, NAA */
1708 arr
[num
++] = 0x0; /* reserved */
1709 arr
[num
++] = 0x8; /* length */
1710 put_unaligned_be64(naa3_comp_a
+ port_b
, arr
+ num
);
1717 static unsigned char vpd89_data
[] = {
1718 /* from 4th byte */ 0,0,0,0,
1719 'l','i','n','u','x',' ',' ',' ',
1720 'S','A','T',' ','s','c','s','i','_','d','e','b','u','g',' ',' ',
1722 0x34,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
1724 0x5a,0xc,0xff,0x3f,0x37,0xc8,0x10,0,0,0,0,0,0x3f,0,0,0,
1725 0,0,0,0,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x20,0x20,0x20,0x20,
1726 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0,0,0,0x40,0x4,0,0x2e,0x33,
1727 0x38,0x31,0x20,0x20,0x20,0x20,0x54,0x53,0x38,0x33,0x30,0x30,0x33,0x31,
1729 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1731 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1733 0,0,0,0x2f,0,0,0,0x2,0,0x2,0x7,0,0xff,0xff,0x1,0,
1734 0x3f,0,0xc1,0xff,0x3e,0,0x10,0x1,0xb0,0xf8,0x50,0x9,0,0,0x7,0,
1735 0x3,0,0x78,0,0x78,0,0xf0,0,0x78,0,0,0,0,0,0,0,
1736 0,0,0,0,0,0,0,0,0x2,0,0,0,0,0,0,0,
1737 0x7e,0,0x1b,0,0x6b,0x34,0x1,0x7d,0x3,0x40,0x69,0x34,0x1,0x3c,0x3,0x40,
1738 0x7f,0x40,0,0,0,0,0xfe,0xfe,0,0,0,0,0,0xfe,0,0,
1739 0,0,0,0,0,0,0,0,0xb0,0xf8,0x50,0x9,0,0,0,0,
1740 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1741 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1742 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1743 0x1,0,0xb0,0xf8,0x50,0x9,0xb0,0xf8,0x50,0x9,0x20,0x20,0x2,0,0xb6,0x42,
1744 0,0x80,0x8a,0,0x6,0x3c,0xa,0x3c,0xff,0xff,0xc6,0x7,0,0x1,0,0x8,
1745 0xf0,0xf,0,0x10,0x2,0,0x30,0,0,0,0,0,0,0,0x6,0xfe,
1746 0,0,0x2,0,0x50,0,0x8a,0,0x4f,0x95,0,0,0x21,0,0xb,0,
1747 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1748 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1749 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1750 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1751 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1752 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1753 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1754 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1755 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1756 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1757 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1758 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0xa5,0x51,
1761 /* ATA Information VPD page */
1762 static int inquiry_vpd_89(unsigned char *arr
)
1764 memcpy(arr
, vpd89_data
, sizeof(vpd89_data
));
1765 return sizeof(vpd89_data
);
1769 static unsigned char vpdb0_data
[] = {
1770 /* from 4th byte */ 0,0,0,4, 0,0,0x4,0, 0,0,0,64,
1771 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1772 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1773 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1776 /* Block limits VPD page (SBC-3) */
1777 static int inquiry_vpd_b0(unsigned char *arr
)
1781 memcpy(arr
, vpdb0_data
, sizeof(vpdb0_data
));
1783 /* Optimal transfer length granularity */
1784 if (sdebug_opt_xferlen_exp
!= 0 &&
1785 sdebug_physblk_exp
< sdebug_opt_xferlen_exp
)
1786 gran
= 1 << sdebug_opt_xferlen_exp
;
1788 gran
= 1 << sdebug_physblk_exp
;
1789 put_unaligned_be16(gran
, arr
+ 2);
1791 /* Maximum Transfer Length */
1792 if (sdebug_store_sectors
> 0x400)
1793 put_unaligned_be32(sdebug_store_sectors
, arr
+ 4);
1795 /* Optimal Transfer Length */
1796 put_unaligned_be32(sdebug_opt_blks
, &arr
[8]);
1799 /* Maximum Unmap LBA Count */
1800 put_unaligned_be32(sdebug_unmap_max_blocks
, &arr
[16]);
1802 /* Maximum Unmap Block Descriptor Count */
1803 put_unaligned_be32(sdebug_unmap_max_desc
, &arr
[20]);
1806 /* Unmap Granularity Alignment */
1807 if (sdebug_unmap_alignment
) {
1808 put_unaligned_be32(sdebug_unmap_alignment
, &arr
[28]);
1809 arr
[28] |= 0x80; /* UGAVALID */
1812 /* Optimal Unmap Granularity */
1813 put_unaligned_be32(sdebug_unmap_granularity
, &arr
[24]);
1815 /* Maximum WRITE SAME Length */
1816 put_unaligned_be64(sdebug_write_same_length
, &arr
[32]);
1818 return 0x3c; /* Mandatory page length for Logical Block Provisioning */
1821 /* Block device characteristics VPD page (SBC-3) */
1822 static int inquiry_vpd_b1(struct sdebug_dev_info
*devip
, unsigned char *arr
)
1824 memset(arr
, 0, 0x3c);
1826 arr
[1] = 1; /* non rotating medium (e.g. solid state) */
1828 arr
[3] = 5; /* less than 1.8" */
1833 /* Logical block provisioning VPD page (SBC-4) */
1834 static int inquiry_vpd_b2(unsigned char *arr
)
1836 memset(arr
, 0, 0x4);
1837 arr
[0] = 0; /* threshold exponent */
1844 if (sdebug_lbprz
&& scsi_debug_lbp())
1845 arr
[1] |= (sdebug_lbprz
& 0x7) << 2; /* sbc4r07 and later */
1846 /* anc_sup=0; dp=0 (no provisioning group descriptor) */
1847 /* minimum_percentage=0; provisioning_type=0 (unknown) */
1848 /* threshold_percentage=0 */
1852 /* Zoned block device characteristics VPD page (ZBC mandatory) */
1853 static int inquiry_vpd_b6(struct sdebug_dev_info
*devip
, unsigned char *arr
)
1855 memset(arr
, 0, 0x3c);
1856 arr
[0] = 0x1; /* set URSWRZ (unrestricted read in seq. wr req zone) */
1858 * Set Optimal number of open sequential write preferred zones and
1859 * Optimal number of non-sequentially written sequential write
1860 * preferred zones fields to 'not reported' (0xffffffff). Leave other
1861 * fields set to zero, apart from Max. number of open swrz_s field.
1863 put_unaligned_be32(0xffffffff, &arr
[4]);
1864 put_unaligned_be32(0xffffffff, &arr
[8]);
1865 if (sdeb_zbc_model
== BLK_ZONED_HM
&& devip
->max_open
)
1866 put_unaligned_be32(devip
->max_open
, &arr
[12]);
1868 put_unaligned_be32(0xffffffff, &arr
[12]);
1869 if (devip
->zcap
< devip
->zsize
) {
1870 arr
[19] = ZBC_CONSTANT_ZONE_START_OFFSET
;
1871 put_unaligned_be64(devip
->zsize
, &arr
[20]);
1878 #define SDEBUG_BLE_LEN_AFTER_B4 28 /* thus vpage 32 bytes long */
1880 enum { MAXIMUM_NUMBER_OF_STREAMS
= 6, PERMANENT_STREAM_COUNT
= 5 };
1882 /* Block limits extension VPD page (SBC-4) */
1883 static int inquiry_vpd_b7(unsigned char *arrb4
)
1885 memset(arrb4
, 0, SDEBUG_BLE_LEN_AFTER_B4
);
1886 arrb4
[1] = 1; /* Reduced stream control support (RSCS) */
1887 put_unaligned_be16(MAXIMUM_NUMBER_OF_STREAMS
, &arrb4
[2]);
1888 return SDEBUG_BLE_LEN_AFTER_B4
;
1891 #define SDEBUG_LONG_INQ_SZ 96
1892 #define SDEBUG_MAX_INQ_ARR_SZ 584
1894 static int resp_inquiry(struct scsi_cmnd
*scp
, struct sdebug_dev_info
*devip
)
1896 unsigned char pq_pdt
;
1898 unsigned char *cmd
= scp
->cmnd
;
1901 bool have_wlun
, is_disk
, is_zbc
, is_disk_zbc
;
1903 alloc_len
= get_unaligned_be16(cmd
+ 3);
1904 arr
= kzalloc(SDEBUG_MAX_INQ_ARR_SZ
, GFP_ATOMIC
);
1906 return DID_REQUEUE
<< 16;
1907 is_disk
= (sdebug_ptype
== TYPE_DISK
);
1908 is_zbc
= devip
->zoned
;
1909 is_disk_zbc
= (is_disk
|| is_zbc
);
1910 have_wlun
= scsi_is_wlun(scp
->device
->lun
);
1912 pq_pdt
= TYPE_WLUN
; /* present, wlun */
1913 else if (sdebug_no_lun_0
&& (devip
->lun
== SDEBUG_LUN_0_VAL
))
1914 pq_pdt
= 0x7f; /* not present, PQ=3, PDT=0x1f */
1916 pq_pdt
= (sdebug_ptype
& 0x1f);
1918 if (0x2 & cmd
[1]) { /* CMDDT bit set */
1919 mk_sense_invalid_fld(scp
, SDEB_IN_CDB
, 1, 1);
1921 return check_condition_result
;
1922 } else if (0x1 & cmd
[1]) { /* EVPD bit set */
1923 int lu_id_num
, port_group_id
, target_dev_id
;
1926 int host_no
= devip
->sdbg_host
->shost
->host_no
;
1929 port_group_id
= (((host_no
+ 1) & 0x7f) << 8) +
1930 (devip
->channel
& 0x7f);
1931 if (sdebug_vpd_use_hostno
== 0)
1933 lu_id_num
= have_wlun
? -1 : (((host_no
+ 1) * 2000) +
1934 (devip
->target
* 1000) + devip
->lun
);
1935 target_dev_id
= ((host_no
+ 1) * 2000) +
1936 (devip
->target
* 1000) - 3;
1937 len
= scnprintf(lu_id_str
, 6, "%d", lu_id_num
);
1938 if (0 == cmd
[2]) { /* supported vital product data pages */
1940 arr
[n
++] = 0x0; /* this page */
1941 arr
[n
++] = 0x80; /* unit serial number */
1942 arr
[n
++] = 0x83; /* device identification */
1943 arr
[n
++] = 0x84; /* software interface ident. */
1944 arr
[n
++] = 0x85; /* management network addresses */
1945 arr
[n
++] = 0x86; /* extended inquiry */
1946 arr
[n
++] = 0x87; /* mode page policy */
1947 arr
[n
++] = 0x88; /* SCSI ports */
1948 if (is_disk_zbc
) { /* SBC or ZBC */
1949 arr
[n
++] = 0x89; /* ATA information */
1950 arr
[n
++] = 0xb0; /* Block limits */
1951 arr
[n
++] = 0xb1; /* Block characteristics */
1953 arr
[n
++] = 0xb2; /* LB Provisioning */
1955 arr
[n
++] = 0xb6; /* ZB dev. char. */
1956 arr
[n
++] = 0xb7; /* Block limits extension */
1958 arr
[3] = n
- 4; /* number of supported VPD pages */
1959 } else if (0x80 == cmd
[2]) { /* unit serial number */
1961 memcpy(&arr
[4], lu_id_str
, len
);
1962 } else if (0x83 == cmd
[2]) { /* device identification */
1963 arr
[3] = inquiry_vpd_83(&arr
[4], port_group_id
,
1964 target_dev_id
, lu_id_num
,
1967 } else if (0x84 == cmd
[2]) { /* Software interface ident. */
1968 arr
[3] = inquiry_vpd_84(&arr
[4]);
1969 } else if (0x85 == cmd
[2]) { /* Management network addresses */
1970 arr
[3] = inquiry_vpd_85(&arr
[4]);
1971 } else if (0x86 == cmd
[2]) { /* extended inquiry */
1972 arr
[3] = 0x3c; /* number of following entries */
1973 if (sdebug_dif
== T10_PI_TYPE3_PROTECTION
)
1974 arr
[4] = 0x4; /* SPT: GRD_CHK:1 */
1975 else if (have_dif_prot
)
1976 arr
[4] = 0x5; /* SPT: GRD_CHK:1, REF_CHK:1 */
1978 arr
[4] = 0x0; /* no protection stuff */
1980 * GROUP_SUP=1; HEADSUP=1 (HEAD OF QUEUE); ORDSUP=1
1981 * (ORDERED queuing); SIMPSUP=1 (SIMPLE queuing).
1984 } else if (0x87 == cmd
[2]) { /* mode page policy */
1985 arr
[3] = 0x8; /* number of following entries */
1986 arr
[4] = 0x2; /* disconnect-reconnect mp */
1987 arr
[6] = 0x80; /* mlus, shared */
1988 arr
[8] = 0x18; /* protocol specific lu */
1989 arr
[10] = 0x82; /* mlus, per initiator port */
1990 } else if (0x88 == cmd
[2]) { /* SCSI Ports */
1991 arr
[3] = inquiry_vpd_88(&arr
[4], target_dev_id
);
1992 } else if (is_disk_zbc
&& 0x89 == cmd
[2]) { /* ATA info */
1993 n
= inquiry_vpd_89(&arr
[4]);
1994 put_unaligned_be16(n
, arr
+ 2);
1995 } else if (is_disk_zbc
&& 0xb0 == cmd
[2]) { /* Block limits */
1996 arr
[3] = inquiry_vpd_b0(&arr
[4]);
1997 } else if (is_disk_zbc
&& 0xb1 == cmd
[2]) { /* Block char. */
1998 arr
[3] = inquiry_vpd_b1(devip
, &arr
[4]);
1999 } else if (is_disk
&& 0xb2 == cmd
[2]) { /* LB Prov. */
2000 arr
[3] = inquiry_vpd_b2(&arr
[4]);
2001 } else if (is_zbc
&& cmd
[2] == 0xb6) { /* ZB dev. charact. */
2002 arr
[3] = inquiry_vpd_b6(devip
, &arr
[4]);
2003 } else if (cmd
[2] == 0xb7) { /* block limits extension page */
2004 arr
[3] = inquiry_vpd_b7(&arr
[4]);
2006 mk_sense_invalid_fld(scp
, SDEB_IN_CDB
, 2, -1);
2008 return check_condition_result
;
2010 len
= min_t(u32
, get_unaligned_be16(arr
+ 2) + 4, alloc_len
);
2011 ret
= fill_from_dev_buffer(scp
, arr
,
2012 min_t(u32
, len
, SDEBUG_MAX_INQ_ARR_SZ
));
2016 /* drops through here for a standard inquiry */
2017 arr
[1] = sdebug_removable
? 0x80 : 0; /* Removable disk */
2018 arr
[2] = sdebug_scsi_level
;
2019 arr
[3] = 2; /* response_data_format==2 */
2020 arr
[4] = SDEBUG_LONG_INQ_SZ
- 5;
2021 arr
[5] = (int)have_dif_prot
; /* PROTECT bit */
2022 if (sdebug_vpd_use_hostno
== 0)
2023 arr
[5] |= 0x10; /* claim: implicit TPGS */
2024 arr
[6] = 0x10; /* claim: MultiP */
2025 /* arr[6] |= 0x40; ... claim: EncServ (enclosure services) */
2026 arr
[7] = 0xa; /* claim: LINKED + CMDQUE */
2027 memcpy(&arr
[8], sdebug_inq_vendor_id
, 8);
2028 memcpy(&arr
[16], sdebug_inq_product_id
, 16);
2029 memcpy(&arr
[32], sdebug_inq_product_rev
, 4);
2030 /* Use Vendor Specific area to place driver date in ASCII hex */
2031 memcpy(&arr
[36], sdebug_version_date
, 8);
2032 /* version descriptors (2 bytes each) follow */
2033 put_unaligned_be16(0xc0, arr
+ 58); /* SAM-6 no version claimed */
2034 put_unaligned_be16(0x5c0, arr
+ 60); /* SPC-5 no version claimed */
2036 if (is_disk
) { /* SBC-4 no version claimed */
2037 put_unaligned_be16(0x600, arr
+ n
);
2039 } else if (sdebug_ptype
== TYPE_TAPE
) { /* SSC-4 rev 3 */
2040 put_unaligned_be16(0x525, arr
+ n
);
2042 } else if (is_zbc
) { /* ZBC BSR INCITS 536 revision 05 */
2043 put_unaligned_be16(0x624, arr
+ n
);
2046 put_unaligned_be16(0x2100, arr
+ n
); /* SPL-4 no version claimed */
2047 ret
= fill_from_dev_buffer(scp
, arr
,
2048 min_t(u32
, alloc_len
, SDEBUG_LONG_INQ_SZ
));
2053 /* See resp_iec_m_pg() for how this data is manipulated */
2054 static unsigned char iec_m_pg
[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
2057 static int resp_requests(struct scsi_cmnd
*scp
,
2058 struct sdebug_dev_info
*devip
)
2060 unsigned char *cmd
= scp
->cmnd
;
2061 unsigned char arr
[SCSI_SENSE_BUFFERSIZE
]; /* assume >= 18 bytes */
2062 bool dsense
= !!(cmd
[1] & 1);
2063 u32 alloc_len
= cmd
[4];
2065 int stopped_state
= atomic_read(&devip
->stopped
);
2067 memset(arr
, 0, sizeof(arr
));
2068 if (stopped_state
> 0) { /* some "pollable" data [spc6r02: 5.12.2] */
2072 arr
[2] = LOGICAL_UNIT_NOT_READY
;
2073 arr
[3] = (stopped_state
== 2) ? 0x1 : 0x2;
2077 arr
[2] = NOT_READY
; /* NO_SENSE in sense_key */
2078 arr
[7] = 0xa; /* 18 byte sense buffer */
2079 arr
[12] = LOGICAL_UNIT_NOT_READY
;
2080 arr
[13] = (stopped_state
== 2) ? 0x1 : 0x2;
2082 } else if ((iec_m_pg
[2] & 0x4) && (6 == (iec_m_pg
[3] & 0xf))) {
2083 /* Information exceptions control mode page: TEST=1, MRIE=6 */
2086 arr
[1] = 0x0; /* NO_SENSE in sense_key */
2087 arr
[2] = THRESHOLD_EXCEEDED
;
2088 arr
[3] = 0xff; /* Failure prediction(false) */
2092 arr
[2] = 0x0; /* NO_SENSE in sense_key */
2093 arr
[7] = 0xa; /* 18 byte sense buffer */
2094 arr
[12] = THRESHOLD_EXCEEDED
;
2095 arr
[13] = 0xff; /* Failure prediction(false) */
2097 } else { /* nothing to report */
2100 memset(arr
, 0, len
);
2103 memset(arr
, 0, len
);
2108 return fill_from_dev_buffer(scp
, arr
, min_t(u32
, len
, alloc_len
));
2111 static int resp_start_stop(struct scsi_cmnd
*scp
, struct sdebug_dev_info
*devip
)
2113 unsigned char *cmd
= scp
->cmnd
;
2114 int power_cond
, want_stop
, stopped_state
;
2117 power_cond
= (cmd
[4] & 0xf0) >> 4;
2119 mk_sense_invalid_fld(scp
, SDEB_IN_CDB
, 4, 7);
2120 return check_condition_result
;
2122 want_stop
= !(cmd
[4] & 1);
2123 stopped_state
= atomic_read(&devip
->stopped
);
2124 if (stopped_state
== 2) {
2125 ktime_t now_ts
= ktime_get_boottime();
2127 if (ktime_to_ns(now_ts
) > ktime_to_ns(devip
->create_ts
)) {
2128 u64 diff_ns
= ktime_to_ns(ktime_sub(now_ts
, devip
->create_ts
));
2130 if (diff_ns
>= ((u64
)sdeb_tur_ms_to_ready
* 1000000)) {
2131 /* tur_ms_to_ready timer extinguished */
2132 atomic_set(&devip
->stopped
, 0);
2136 if (stopped_state
== 2) {
2138 stopped_state
= 1; /* dummy up success */
2139 } else { /* Disallow tur_ms_to_ready delay to be overridden */
2140 mk_sense_invalid_fld(scp
, SDEB_IN_CDB
, 4, 0 /* START bit */);
2141 return check_condition_result
;
2145 changing
= (stopped_state
!= want_stop
);
2147 atomic_xchg(&devip
->stopped
, want_stop
);
2148 if (!changing
|| (cmd
[1] & 0x1)) /* state unchanged or IMMED bit set in cdb */
2149 return SDEG_RES_IMMED_MASK
;
2154 static sector_t
get_sdebug_capacity(void)
2156 static const unsigned int gibibyte
= 1073741824;
2158 if (sdebug_virtual_gb
> 0)
2159 return (sector_t
)sdebug_virtual_gb
*
2160 (gibibyte
/ sdebug_sector_size
);
2162 return sdebug_store_sectors
;
2165 #define SDEBUG_READCAP_ARR_SZ 8
2166 static int resp_readcap(struct scsi_cmnd
*scp
,
2167 struct sdebug_dev_info
*devip
)
2169 unsigned char arr
[SDEBUG_READCAP_ARR_SZ
];
2172 /* following just in case virtual_gb changed */
2173 sdebug_capacity
= get_sdebug_capacity();
2174 memset(arr
, 0, SDEBUG_READCAP_ARR_SZ
);
2175 if (sdebug_capacity
< 0xffffffff) {
2176 capac
= (unsigned int)sdebug_capacity
- 1;
2177 put_unaligned_be32(capac
, arr
+ 0);
2179 put_unaligned_be32(0xffffffff, arr
+ 0);
2180 put_unaligned_be16(sdebug_sector_size
, arr
+ 6);
2181 return fill_from_dev_buffer(scp
, arr
, SDEBUG_READCAP_ARR_SZ
);
2184 #define SDEBUG_READCAP16_ARR_SZ 32
2185 static int resp_readcap16(struct scsi_cmnd
*scp
,
2186 struct sdebug_dev_info
*devip
)
2188 unsigned char *cmd
= scp
->cmnd
;
2189 unsigned char arr
[SDEBUG_READCAP16_ARR_SZ
];
2192 alloc_len
= get_unaligned_be32(cmd
+ 10);
2193 /* following just in case virtual_gb changed */
2194 sdebug_capacity
= get_sdebug_capacity();
2195 memset(arr
, 0, SDEBUG_READCAP16_ARR_SZ
);
2196 put_unaligned_be64((u64
)(sdebug_capacity
- 1), arr
+ 0);
2197 put_unaligned_be32(sdebug_sector_size
, arr
+ 8);
2198 arr
[13] = sdebug_physblk_exp
& 0xf;
2199 arr
[14] = (sdebug_lowest_aligned
>> 8) & 0x3f;
2201 if (scsi_debug_lbp()) {
2202 arr
[14] |= 0x80; /* LBPME */
2203 /* from sbc4r07, this LBPRZ field is 1 bit, but the LBPRZ in
2204 * the LB Provisioning VPD page is 3 bits. Note that lbprz=2
2205 * in the wider field maps to 0 in this field.
2207 if (sdebug_lbprz
& 1) /* precisely what the draft requires */
2212 * Since the scsi_debug READ CAPACITY implementation always reports the
2213 * total disk capacity, set RC BASIS = 1 for host-managed ZBC devices.
2218 arr
[15] = sdebug_lowest_aligned
& 0xff;
2220 if (have_dif_prot
) {
2221 arr
[12] = (sdebug_dif
- 1) << 1; /* P_TYPE */
2222 arr
[12] |= 1; /* PROT_EN */
2225 return fill_from_dev_buffer(scp
, arr
,
2226 min_t(u32
, alloc_len
, SDEBUG_READCAP16_ARR_SZ
));
2229 #define SDEBUG_MAX_TGTPGS_ARR_SZ 1412
2231 static int resp_report_tgtpgs(struct scsi_cmnd
*scp
,
2232 struct sdebug_dev_info
*devip
)
2234 unsigned char *cmd
= scp
->cmnd
;
2236 int host_no
= devip
->sdbg_host
->shost
->host_no
;
2237 int port_group_a
, port_group_b
, port_a
, port_b
;
2241 alen
= get_unaligned_be32(cmd
+ 6);
2242 arr
= kzalloc(SDEBUG_MAX_TGTPGS_ARR_SZ
, GFP_ATOMIC
);
2244 return DID_REQUEUE
<< 16;
2246 * EVPD page 0x88 states we have two ports, one
2247 * real and a fake port with no device connected.
2248 * So we create two port groups with one port each
2249 * and set the group with port B to unavailable.
2251 port_a
= 0x1; /* relative port A */
2252 port_b
= 0x2; /* relative port B */
2253 port_group_a
= (((host_no
+ 1) & 0x7f) << 8) +
2254 (devip
->channel
& 0x7f);
2255 port_group_b
= (((host_no
+ 1) & 0x7f) << 8) +
2256 (devip
->channel
& 0x7f) + 0x80;
2259 * The asymmetric access state is cycled according to the host_id.
2262 if (sdebug_vpd_use_hostno
== 0) {
2263 arr
[n
++] = host_no
% 3; /* Asymm access state */
2264 arr
[n
++] = 0x0F; /* claim: all states are supported */
2266 arr
[n
++] = 0x0; /* Active/Optimized path */
2267 arr
[n
++] = 0x01; /* only support active/optimized paths */
2269 put_unaligned_be16(port_group_a
, arr
+ n
);
2271 arr
[n
++] = 0; /* Reserved */
2272 arr
[n
++] = 0; /* Status code */
2273 arr
[n
++] = 0; /* Vendor unique */
2274 arr
[n
++] = 0x1; /* One port per group */
2275 arr
[n
++] = 0; /* Reserved */
2276 arr
[n
++] = 0; /* Reserved */
2277 put_unaligned_be16(port_a
, arr
+ n
);
2279 arr
[n
++] = 3; /* Port unavailable */
2280 arr
[n
++] = 0x08; /* claim: only unavailalbe paths are supported */
2281 put_unaligned_be16(port_group_b
, arr
+ n
);
2283 arr
[n
++] = 0; /* Reserved */
2284 arr
[n
++] = 0; /* Status code */
2285 arr
[n
++] = 0; /* Vendor unique */
2286 arr
[n
++] = 0x1; /* One port per group */
2287 arr
[n
++] = 0; /* Reserved */
2288 arr
[n
++] = 0; /* Reserved */
2289 put_unaligned_be16(port_b
, arr
+ n
);
2293 put_unaligned_be32(rlen
, arr
+ 0);
2296 * Return the smallest value of either
2297 * - The allocated length
2298 * - The constructed command length
2299 * - The maximum array size
2301 rlen
= min(alen
, n
);
2302 ret
= fill_from_dev_buffer(scp
, arr
,
2303 min_t(u32
, rlen
, SDEBUG_MAX_TGTPGS_ARR_SZ
));
2308 static int resp_rsup_opcodes(struct scsi_cmnd
*scp
,
2309 struct sdebug_dev_info
*devip
)
2312 u8 reporting_opts
, req_opcode
, sdeb_i
, supp
;
2314 u32 alloc_len
, a_len
;
2315 int k
, offset
, len
, errsts
, count
, bump
, na
;
2316 const struct opcode_info_t
*oip
;
2317 const struct opcode_info_t
*r_oip
;
2319 u8
*cmd
= scp
->cmnd
;
2321 rctd
= !!(cmd
[2] & 0x80);
2322 reporting_opts
= cmd
[2] & 0x7;
2323 req_opcode
= cmd
[3];
2324 req_sa
= get_unaligned_be16(cmd
+ 4);
2325 alloc_len
= get_unaligned_be32(cmd
+ 6);
2326 if (alloc_len
< 4 || alloc_len
> 0xffff) {
2327 mk_sense_invalid_fld(scp
, SDEB_IN_CDB
, 6, -1);
2328 return check_condition_result
;
2330 if (alloc_len
> 8192)
2334 arr
= kzalloc((a_len
< 256) ? 320 : a_len
+ 64, GFP_ATOMIC
);
2336 mk_sense_buffer(scp
, ILLEGAL_REQUEST
, INSUFF_RES_ASC
,
2338 return check_condition_result
;
2340 switch (reporting_opts
) {
2341 case 0: /* all commands */
2342 /* count number of commands */
2343 for (count
= 0, oip
= opcode_info_arr
;
2344 oip
->num_attached
!= 0xff; ++oip
) {
2345 if (F_INV_OP
& oip
->flags
)
2347 count
+= (oip
->num_attached
+ 1);
2349 bump
= rctd
? 20 : 8;
2350 put_unaligned_be32(count
* bump
, arr
);
2351 for (offset
= 4, oip
= opcode_info_arr
;
2352 oip
->num_attached
!= 0xff && offset
< a_len
; ++oip
) {
2353 if (F_INV_OP
& oip
->flags
)
2355 na
= oip
->num_attached
;
2356 arr
[offset
] = oip
->opcode
;
2357 put_unaligned_be16(oip
->sa
, arr
+ offset
+ 2);
2359 arr
[offset
+ 5] |= 0x2;
2360 if (FF_SA
& oip
->flags
)
2361 arr
[offset
+ 5] |= 0x1;
2362 put_unaligned_be16(oip
->len_mask
[0], arr
+ offset
+ 6);
2364 put_unaligned_be16(0xa, arr
+ offset
+ 8);
2366 for (k
= 0, oip
= oip
->arrp
; k
< na
; ++k
, ++oip
) {
2367 if (F_INV_OP
& oip
->flags
)
2370 arr
[offset
] = oip
->opcode
;
2371 put_unaligned_be16(oip
->sa
, arr
+ offset
+ 2);
2373 arr
[offset
+ 5] |= 0x2;
2374 if (FF_SA
& oip
->flags
)
2375 arr
[offset
+ 5] |= 0x1;
2376 put_unaligned_be16(oip
->len_mask
[0],
2379 put_unaligned_be16(0xa,
2386 case 1: /* one command: opcode only */
2387 case 2: /* one command: opcode plus service action */
2388 case 3: /* one command: if sa==0 then opcode only else opcode+sa */
2389 sdeb_i
= opcode_ind_arr
[req_opcode
];
2390 oip
= &opcode_info_arr
[sdeb_i
];
2391 if (F_INV_OP
& oip
->flags
) {
2395 if (1 == reporting_opts
) {
2396 if (FF_SA
& oip
->flags
) {
2397 mk_sense_invalid_fld(scp
, SDEB_IN_CDB
,
2400 return check_condition_result
;
2403 } else if (2 == reporting_opts
&&
2404 0 == (FF_SA
& oip
->flags
)) {
2405 mk_sense_invalid_fld(scp
, SDEB_IN_CDB
, 4, -1);
2406 kfree(arr
); /* point at requested sa */
2407 return check_condition_result
;
2409 if (0 == (FF_SA
& oip
->flags
) &&
2410 req_opcode
== oip
->opcode
)
2412 else if (0 == (FF_SA
& oip
->flags
)) {
2413 na
= oip
->num_attached
;
2414 for (k
= 0, oip
= oip
->arrp
; k
< na
;
2416 if (req_opcode
== oip
->opcode
)
2419 supp
= (k
>= na
) ? 1 : 3;
2420 } else if (req_sa
!= oip
->sa
) {
2421 na
= oip
->num_attached
;
2422 for (k
= 0, oip
= oip
->arrp
; k
< na
;
2424 if (req_sa
== oip
->sa
)
2427 supp
= (k
>= na
) ? 1 : 3;
2431 u
= oip
->len_mask
[0];
2432 put_unaligned_be16(u
, arr
+ 2);
2433 arr
[4] = oip
->opcode
;
2434 for (k
= 1; k
< u
; ++k
)
2435 arr
[4 + k
] = (k
< 16) ?
2436 oip
->len_mask
[k
] : 0xff;
2441 arr
[1] = (rctd
? 0x80 : 0) | supp
;
2443 put_unaligned_be16(0xa, arr
+ offset
);
2448 mk_sense_invalid_fld(scp
, SDEB_IN_CDB
, 2, 2);
2450 return check_condition_result
;
2452 offset
= (offset
< a_len
) ? offset
: a_len
;
2453 len
= (offset
< alloc_len
) ? offset
: alloc_len
;
2454 errsts
= fill_from_dev_buffer(scp
, arr
, len
);
2459 static int resp_rsup_tmfs(struct scsi_cmnd
*scp
,
2460 struct sdebug_dev_info
*devip
)
2465 u8
*cmd
= scp
->cmnd
;
2467 memset(arr
, 0, sizeof(arr
));
2468 repd
= !!(cmd
[2] & 0x80);
2469 alloc_len
= get_unaligned_be32(cmd
+ 6);
2470 if (alloc_len
< 4) {
2471 mk_sense_invalid_fld(scp
, SDEB_IN_CDB
, 6, -1);
2472 return check_condition_result
;
2474 arr
[0] = 0xc8; /* ATS | ATSS | LURS */
2475 arr
[1] = 0x1; /* ITNRS */
2482 len
= (len
< alloc_len
) ? len
: alloc_len
;
2483 return fill_from_dev_buffer(scp
, arr
, len
);
2486 /* <<Following mode page info copied from ST318451LW>> */
2488 static int resp_err_recov_pg(unsigned char *p
, int pcontrol
, int target
)
2489 { /* Read-Write Error Recovery page for mode_sense */
2490 unsigned char err_recov_pg
[] = {0x1, 0xa, 0xc0, 11, 240, 0, 0, 0,
2493 memcpy(p
, err_recov_pg
, sizeof(err_recov_pg
));
2495 memset(p
+ 2, 0, sizeof(err_recov_pg
) - 2);
2496 return sizeof(err_recov_pg
);
2499 static int resp_disconnect_pg(unsigned char *p
, int pcontrol
, int target
)
2500 { /* Disconnect-Reconnect page for mode_sense */
2501 unsigned char disconnect_pg
[] = {0x2, 0xe, 128, 128, 0, 10, 0, 0,
2502 0, 0, 0, 0, 0, 0, 0, 0};
2504 memcpy(p
, disconnect_pg
, sizeof(disconnect_pg
));
2506 memset(p
+ 2, 0, sizeof(disconnect_pg
) - 2);
2507 return sizeof(disconnect_pg
);
2510 static int resp_format_pg(unsigned char *p
, int pcontrol
, int target
)
2511 { /* Format device page for mode_sense */
2512 unsigned char format_pg
[] = {0x3, 0x16, 0, 0, 0, 0, 0, 0,
2513 0, 0, 0, 0, 0, 0, 0, 0,
2514 0, 0, 0, 0, 0x40, 0, 0, 0};
2516 memcpy(p
, format_pg
, sizeof(format_pg
));
2517 put_unaligned_be16(sdebug_sectors_per
, p
+ 10);
2518 put_unaligned_be16(sdebug_sector_size
, p
+ 12);
2519 if (sdebug_removable
)
2520 p
[20] |= 0x20; /* should agree with INQUIRY */
2522 memset(p
+ 2, 0, sizeof(format_pg
) - 2);
2523 return sizeof(format_pg
);
2526 static unsigned char caching_pg
[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
2527 0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0,
2530 static int resp_caching_pg(unsigned char *p
, int pcontrol
, int target
)
2531 { /* Caching page for mode_sense */
2532 unsigned char ch_caching_pg
[] = {/* 0x8, 18, */ 0x4, 0, 0, 0, 0, 0,
2533 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
2534 unsigned char d_caching_pg
[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
2535 0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0, 0, 0, 0, 0};
2537 if (SDEBUG_OPT_N_WCE
& sdebug_opts
)
2538 caching_pg
[2] &= ~0x4; /* set WCE=0 (default WCE=1) */
2539 memcpy(p
, caching_pg
, sizeof(caching_pg
));
2541 memcpy(p
+ 2, ch_caching_pg
, sizeof(ch_caching_pg
));
2542 else if (2 == pcontrol
)
2543 memcpy(p
, d_caching_pg
, sizeof(d_caching_pg
));
2544 return sizeof(caching_pg
);
2547 static unsigned char ctrl_m_pg
[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
2550 static int resp_ctrl_m_pg(unsigned char *p
, int pcontrol
, int target
)
2551 { /* Control mode page for mode_sense */
2552 unsigned char ch_ctrl_m_pg
[] = {/* 0xa, 10, */ 0x6, 0, 0, 0, 0, 0,
2554 unsigned char d_ctrl_m_pg
[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
2558 ctrl_m_pg
[2] |= 0x4;
2560 ctrl_m_pg
[2] &= ~0x4;
2563 ctrl_m_pg
[5] |= 0x80; /* ATO=1 */
2565 memcpy(p
, ctrl_m_pg
, sizeof(ctrl_m_pg
));
2567 memcpy(p
+ 2, ch_ctrl_m_pg
, sizeof(ch_ctrl_m_pg
));
2568 else if (2 == pcontrol
)
2569 memcpy(p
, d_ctrl_m_pg
, sizeof(d_ctrl_m_pg
));
2570 return sizeof(ctrl_m_pg
);
2573 /* IO Advice Hints Grouping mode page */
2574 static int resp_grouping_m_pg(unsigned char *p
, int pcontrol
, int target
)
2576 /* IO Advice Hints Grouping mode page */
2577 struct grouping_m_pg
{
2578 u8 page_code
; /* OR 0x40 when subpage_code > 0 */
2582 struct scsi_io_group_descriptor descr
[MAXIMUM_NUMBER_OF_STREAMS
];
2584 static const struct grouping_m_pg gr_m_pg
= {
2585 .page_code
= 0xa | 0x40,
2587 .page_length
= cpu_to_be16(sizeof(gr_m_pg
) - 4),
2598 BUILD_BUG_ON(sizeof(struct grouping_m_pg
) !=
2599 16 + MAXIMUM_NUMBER_OF_STREAMS
* 16);
2600 memcpy(p
, &gr_m_pg
, sizeof(gr_m_pg
));
2601 if (1 == pcontrol
) {
2602 /* There are no changeable values so clear from byte 4 on. */
2603 memset(p
+ 4, 0, sizeof(gr_m_pg
) - 4);
2605 return sizeof(gr_m_pg
);
2608 static int resp_iec_m_pg(unsigned char *p
, int pcontrol
, int target
)
2609 { /* Informational Exceptions control mode page for mode_sense */
2610 unsigned char ch_iec_m_pg
[] = {/* 0x1c, 0xa, */ 0x4, 0xf, 0, 0, 0, 0,
2612 unsigned char d_iec_m_pg
[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
2615 memcpy(p
, iec_m_pg
, sizeof(iec_m_pg
));
2617 memcpy(p
+ 2, ch_iec_m_pg
, sizeof(ch_iec_m_pg
));
2618 else if (2 == pcontrol
)
2619 memcpy(p
, d_iec_m_pg
, sizeof(d_iec_m_pg
));
2620 return sizeof(iec_m_pg
);
2623 static int resp_sas_sf_m_pg(unsigned char *p
, int pcontrol
, int target
)
2624 { /* SAS SSP mode page - short format for mode_sense */
2625 unsigned char sas_sf_m_pg
[] = {0x19, 0x6,
2626 0x6, 0x0, 0x7, 0xd0, 0x0, 0x0};
2628 memcpy(p
, sas_sf_m_pg
, sizeof(sas_sf_m_pg
));
2630 memset(p
+ 2, 0, sizeof(sas_sf_m_pg
) - 2);
2631 return sizeof(sas_sf_m_pg
);
2635 static int resp_sas_pcd_m_spg(unsigned char *p
, int pcontrol
, int target
,
2637 { /* SAS phy control and discover mode page for mode_sense */
2638 unsigned char sas_pcd_m_pg
[] = {0x59, 0x1, 0, 0x64, 0, 0x6, 0, 2,
2639 0, 0, 0, 0, 0x10, 0x9, 0x8, 0x0,
2640 0, 0, 0, 0, 0, 0, 0, 0, /* insert SAS addr */
2641 0, 0, 0, 0, 0, 0, 0, 0, /* insert SAS addr */
2642 0x2, 0, 0, 0, 0, 0, 0, 0,
2643 0x88, 0x99, 0, 0, 0, 0, 0, 0,
2644 0, 0, 0, 0, 0, 0, 0, 0,
2645 0, 1, 0, 0, 0x10, 0x9, 0x8, 0x0,
2646 0, 0, 0, 0, 0, 0, 0, 0, /* insert SAS addr */
2647 0, 0, 0, 0, 0, 0, 0, 0, /* insert SAS addr */
2648 0x3, 0, 0, 0, 0, 0, 0, 0,
2649 0x88, 0x99, 0, 0, 0, 0, 0, 0,
2650 0, 0, 0, 0, 0, 0, 0, 0,
2654 put_unaligned_be64(naa3_comp_a
, sas_pcd_m_pg
+ 16);
2655 put_unaligned_be64(naa3_comp_c
+ 1, sas_pcd_m_pg
+ 24);
2656 put_unaligned_be64(naa3_comp_a
, sas_pcd_m_pg
+ 64);
2657 put_unaligned_be64(naa3_comp_c
+ 1, sas_pcd_m_pg
+ 72);
2658 port_a
= target_dev_id
+ 1;
2659 port_b
= port_a
+ 1;
2660 memcpy(p
, sas_pcd_m_pg
, sizeof(sas_pcd_m_pg
));
2661 put_unaligned_be32(port_a
, p
+ 20);
2662 put_unaligned_be32(port_b
, p
+ 48 + 20);
2664 memset(p
+ 4, 0, sizeof(sas_pcd_m_pg
) - 4);
2665 return sizeof(sas_pcd_m_pg
);
2668 static int resp_sas_sha_m_spg(unsigned char *p
, int pcontrol
)
2669 { /* SAS SSP shared protocol specific port mode subpage */
2670 unsigned char sas_sha_m_pg
[] = {0x59, 0x2, 0, 0xc, 0, 0x6, 0x10, 0,
2671 0, 0, 0, 0, 0, 0, 0, 0,
2674 memcpy(p
, sas_sha_m_pg
, sizeof(sas_sha_m_pg
));
2676 memset(p
+ 4, 0, sizeof(sas_sha_m_pg
) - 4);
2677 return sizeof(sas_sha_m_pg
);
2680 /* PAGE_SIZE is more than necessary but provides room for future expansion. */
2681 #define SDEBUG_MAX_MSENSE_SZ PAGE_SIZE
2683 static int resp_mode_sense(struct scsi_cmnd
*scp
,
2684 struct sdebug_dev_info
*devip
)
2686 int pcontrol
, pcode
, subpcode
, bd_len
;
2687 unsigned char dev_spec
;
2688 u32 alloc_len
, offset
, len
;
2690 int target
= scp
->device
->id
;
2692 unsigned char *arr
__free(kfree
);
2693 unsigned char *cmd
= scp
->cmnd
;
2694 bool dbd
, llbaa
, msense_6
, is_disk
, is_zbc
;
2696 arr
= kzalloc(SDEBUG_MAX_MSENSE_SZ
, GFP_ATOMIC
);
2699 dbd
= !!(cmd
[1] & 0x8); /* disable block descriptors */
2700 pcontrol
= (cmd
[2] & 0xc0) >> 6;
2701 pcode
= cmd
[2] & 0x3f;
2703 msense_6
= (MODE_SENSE
== cmd
[0]);
2704 llbaa
= msense_6
? false : !!(cmd
[1] & 0x10);
2705 is_disk
= (sdebug_ptype
== TYPE_DISK
);
2706 is_zbc
= devip
->zoned
;
2707 if ((is_disk
|| is_zbc
) && !dbd
)
2708 bd_len
= llbaa
? 16 : 8;
2711 alloc_len
= msense_6
? cmd
[4] : get_unaligned_be16(cmd
+ 7);
2712 memset(arr
, 0, SDEBUG_MAX_MSENSE_SZ
);
2713 if (0x3 == pcontrol
) { /* Saving values not supported */
2714 mk_sense_buffer(scp
, ILLEGAL_REQUEST
, SAVING_PARAMS_UNSUP
, 0);
2715 return check_condition_result
;
2717 target_dev_id
= ((devip
->sdbg_host
->shost
->host_no
+ 1) * 2000) +
2718 (devip
->target
* 1000) - 3;
2719 /* for disks+zbc set DPOFUA bit and clear write protect (WP) bit */
2720 if (is_disk
|| is_zbc
) {
2721 dev_spec
= 0x10; /* =0x90 if WP=1 implies read-only */
2733 arr
[4] = 0x1; /* set LONGLBA bit */
2734 arr
[7] = bd_len
; /* assume 255 or less */
2738 if ((bd_len
> 0) && (!sdebug_capacity
))
2739 sdebug_capacity
= get_sdebug_capacity();
2742 if (sdebug_capacity
> 0xfffffffe)
2743 put_unaligned_be32(0xffffffff, ap
+ 0);
2745 put_unaligned_be32(sdebug_capacity
, ap
+ 0);
2746 put_unaligned_be16(sdebug_sector_size
, ap
+ 6);
2749 } else if (16 == bd_len
) {
2750 put_unaligned_be64((u64
)sdebug_capacity
, ap
+ 0);
2751 put_unaligned_be32(sdebug_sector_size
, ap
+ 12);
2757 * N.B. If len>0 before resp_*_pg() call, then form of that call should be:
2758 * len += resp_*_pg(ap + len, pcontrol, target);
2761 case 0x1: /* Read-Write error recovery page, direct access */
2762 if (subpcode
> 0x0 && subpcode
< 0xff)
2764 len
= resp_err_recov_pg(ap
, pcontrol
, target
);
2767 case 0x2: /* Disconnect-Reconnect page, all devices */
2768 if (subpcode
> 0x0 && subpcode
< 0xff)
2770 len
= resp_disconnect_pg(ap
, pcontrol
, target
);
2773 case 0x3: /* Format device page, direct access */
2774 if (subpcode
> 0x0 && subpcode
< 0xff)
2777 len
= resp_format_pg(ap
, pcontrol
, target
);
2783 case 0x8: /* Caching page, direct access */
2784 if (subpcode
> 0x0 && subpcode
< 0xff)
2786 if (is_disk
|| is_zbc
) {
2787 len
= resp_caching_pg(ap
, pcontrol
, target
);
2793 case 0xa: /* Control Mode page, all devices */
2796 len
= resp_ctrl_m_pg(ap
, pcontrol
, target
);
2799 len
= resp_grouping_m_pg(ap
, pcontrol
, target
);
2802 len
= resp_ctrl_m_pg(ap
, pcontrol
, target
);
2803 len
+= resp_grouping_m_pg(ap
+ len
, pcontrol
, target
);
2810 case 0x19: /* if spc==1 then sas phy, control+discover */
2811 if (subpcode
> 0x2 && subpcode
< 0xff)
2814 if ((0x0 == subpcode
) || (0xff == subpcode
))
2815 len
+= resp_sas_sf_m_pg(ap
+ len
, pcontrol
, target
);
2816 if ((0x1 == subpcode
) || (0xff == subpcode
))
2817 len
+= resp_sas_pcd_m_spg(ap
+ len
, pcontrol
, target
,
2819 if ((0x2 == subpcode
) || (0xff == subpcode
))
2820 len
+= resp_sas_sha_m_spg(ap
+ len
, pcontrol
);
2823 case 0x1c: /* Informational Exceptions Mode page, all devices */
2824 if (subpcode
> 0x0 && subpcode
< 0xff)
2826 len
= resp_iec_m_pg(ap
, pcontrol
, target
);
2829 case 0x3f: /* Read all Mode pages */
2830 if (subpcode
> 0x0 && subpcode
< 0xff)
2832 len
= resp_err_recov_pg(ap
, pcontrol
, target
);
2833 len
+= resp_disconnect_pg(ap
+ len
, pcontrol
, target
);
2835 len
+= resp_format_pg(ap
+ len
, pcontrol
, target
);
2836 len
+= resp_caching_pg(ap
+ len
, pcontrol
, target
);
2837 } else if (is_zbc
) {
2838 len
+= resp_caching_pg(ap
+ len
, pcontrol
, target
);
2840 len
+= resp_ctrl_m_pg(ap
+ len
, pcontrol
, target
);
2841 if (0xff == subpcode
)
2842 len
+= resp_grouping_m_pg(ap
+ len
, pcontrol
, target
);
2843 len
+= resp_sas_sf_m_pg(ap
+ len
, pcontrol
, target
);
2844 if (0xff == subpcode
) {
2845 len
+= resp_sas_pcd_m_spg(ap
+ len
, pcontrol
, target
,
2847 len
+= resp_sas_sha_m_spg(ap
+ len
, pcontrol
);
2849 len
+= resp_iec_m_pg(ap
+ len
, pcontrol
, target
);
2856 arr
[0] = offset
- 1;
2858 put_unaligned_be16((offset
- 2), arr
+ 0);
2859 return fill_from_dev_buffer(scp
, arr
, min_t(u32
, alloc_len
, offset
));
2862 mk_sense_invalid_fld(scp
, SDEB_IN_CDB
, 2, 5);
2863 return check_condition_result
;
2866 mk_sense_invalid_fld(scp
, SDEB_IN_CDB
, 3, -1);
2867 return check_condition_result
;
2870 #define SDEBUG_MAX_MSELECT_SZ 512
2872 static int resp_mode_select(struct scsi_cmnd
*scp
,
2873 struct sdebug_dev_info
*devip
)
2875 int pf
, sp
, ps
, md_len
, bd_len
, off
, spf
, pg_len
;
2876 int param_len
, res
, mpage
;
2877 unsigned char arr
[SDEBUG_MAX_MSELECT_SZ
];
2878 unsigned char *cmd
= scp
->cmnd
;
2879 int mselect6
= (MODE_SELECT
== cmd
[0]);
2881 memset(arr
, 0, sizeof(arr
));
2884 param_len
= mselect6
? cmd
[4] : get_unaligned_be16(cmd
+ 7);
2885 if ((0 == pf
) || sp
|| (param_len
> SDEBUG_MAX_MSELECT_SZ
)) {
2886 mk_sense_invalid_fld(scp
, SDEB_IN_CDB
, mselect6
? 4 : 7, -1);
2887 return check_condition_result
;
2889 res
= fetch_to_dev_buffer(scp
, arr
, param_len
);
2891 return DID_ERROR
<< 16;
2892 else if (sdebug_verbose
&& (res
< param_len
))
2893 sdev_printk(KERN_INFO
, scp
->device
,
2894 "%s: cdb indicated=%d, IO sent=%d bytes\n",
2895 __func__
, param_len
, res
);
2896 md_len
= mselect6
? (arr
[0] + 1) : (get_unaligned_be16(arr
+ 0) + 2);
2897 bd_len
= mselect6
? arr
[3] : get_unaligned_be16(arr
+ 6);
2898 off
= bd_len
+ (mselect6
? 4 : 8);
2899 if (md_len
> 2 || off
>= res
) {
2900 mk_sense_invalid_fld(scp
, SDEB_IN_DATA
, 0, -1);
2901 return check_condition_result
;
2903 mpage
= arr
[off
] & 0x3f;
2904 ps
= !!(arr
[off
] & 0x80);
2906 mk_sense_invalid_fld(scp
, SDEB_IN_DATA
, off
, 7);
2907 return check_condition_result
;
2909 spf
= !!(arr
[off
] & 0x40);
2910 pg_len
= spf
? (get_unaligned_be16(arr
+ off
+ 2) + 4) :
2912 if ((pg_len
+ off
) > param_len
) {
2913 mk_sense_buffer(scp
, ILLEGAL_REQUEST
,
2914 PARAMETER_LIST_LENGTH_ERR
, 0);
2915 return check_condition_result
;
2918 case 0x8: /* Caching Mode page */
2919 if (caching_pg
[1] == arr
[off
+ 1]) {
2920 memcpy(caching_pg
+ 2, arr
+ off
+ 2,
2921 sizeof(caching_pg
) - 2);
2922 goto set_mode_changed_ua
;
2925 case 0xa: /* Control Mode page */
2926 if (ctrl_m_pg
[1] == arr
[off
+ 1]) {
2927 memcpy(ctrl_m_pg
+ 2, arr
+ off
+ 2,
2928 sizeof(ctrl_m_pg
) - 2);
2929 if (ctrl_m_pg
[4] & 0x8)
2933 sdebug_dsense
= !!(ctrl_m_pg
[2] & 0x4);
2934 goto set_mode_changed_ua
;
2937 case 0x1c: /* Informational Exceptions Mode page */
2938 if (iec_m_pg
[1] == arr
[off
+ 1]) {
2939 memcpy(iec_m_pg
+ 2, arr
+ off
+ 2,
2940 sizeof(iec_m_pg
) - 2);
2941 goto set_mode_changed_ua
;
2947 mk_sense_invalid_fld(scp
, SDEB_IN_DATA
, off
, 5);
2948 return check_condition_result
;
2949 set_mode_changed_ua
:
2950 set_bit(SDEBUG_UA_MODE_CHANGED
, devip
->uas_bm
);
2954 static int resp_temp_l_pg(unsigned char *arr
)
2956 unsigned char temp_l_pg
[] = {0x0, 0x0, 0x3, 0x2, 0x0, 38,
2957 0x0, 0x1, 0x3, 0x2, 0x0, 65,
2960 memcpy(arr
, temp_l_pg
, sizeof(temp_l_pg
));
2961 return sizeof(temp_l_pg
);
2964 static int resp_ie_l_pg(unsigned char *arr
)
2966 unsigned char ie_l_pg
[] = {0x0, 0x0, 0x3, 0x3, 0x0, 0x0, 38,
2969 memcpy(arr
, ie_l_pg
, sizeof(ie_l_pg
));
2970 if (iec_m_pg
[2] & 0x4) { /* TEST bit set */
2971 arr
[4] = THRESHOLD_EXCEEDED
;
2974 return sizeof(ie_l_pg
);
2977 static int resp_env_rep_l_spg(unsigned char *arr
)
2979 unsigned char env_rep_l_spg
[] = {0x0, 0x0, 0x23, 0x8,
2980 0x0, 40, 72, 0xff, 45, 18, 0, 0,
2981 0x1, 0x0, 0x23, 0x8,
2982 0x0, 55, 72, 35, 55, 45, 0, 0,
2985 memcpy(arr
, env_rep_l_spg
, sizeof(env_rep_l_spg
));
2986 return sizeof(env_rep_l_spg
);
2989 #define SDEBUG_MAX_LSENSE_SZ 512
2991 static int resp_log_sense(struct scsi_cmnd
*scp
,
2992 struct sdebug_dev_info
*devip
)
2994 int ppc
, sp
, pcode
, subpcode
;
2995 u32 alloc_len
, len
, n
;
2996 unsigned char arr
[SDEBUG_MAX_LSENSE_SZ
];
2997 unsigned char *cmd
= scp
->cmnd
;
2999 memset(arr
, 0, sizeof(arr
));
3003 mk_sense_invalid_fld(scp
, SDEB_IN_CDB
, 1, ppc
? 1 : 0);
3004 return check_condition_result
;
3006 pcode
= cmd
[2] & 0x3f;
3007 subpcode
= cmd
[3] & 0xff;
3008 alloc_len
= get_unaligned_be16(cmd
+ 7);
3010 if (0 == subpcode
) {
3012 case 0x0: /* Supported log pages log page */
3014 arr
[n
++] = 0x0; /* this page */
3015 arr
[n
++] = 0xd; /* Temperature */
3016 arr
[n
++] = 0x2f; /* Informational exceptions */
3019 case 0xd: /* Temperature log page */
3020 arr
[3] = resp_temp_l_pg(arr
+ 4);
3022 case 0x2f: /* Informational exceptions log page */
3023 arr
[3] = resp_ie_l_pg(arr
+ 4);
3026 mk_sense_invalid_fld(scp
, SDEB_IN_CDB
, 2, 5);
3027 return check_condition_result
;
3029 } else if (0xff == subpcode
) {
3033 case 0x0: /* Supported log pages and subpages log page */
3036 arr
[n
++] = 0x0; /* 0,0 page */
3038 arr
[n
++] = 0xff; /* this page */
3040 arr
[n
++] = 0x0; /* Temperature */
3042 arr
[n
++] = 0x1; /* Environment reporting */
3044 arr
[n
++] = 0xff; /* all 0xd subpages */
3046 arr
[n
++] = 0x0; /* Informational exceptions */
3048 arr
[n
++] = 0xff; /* all 0x2f subpages */
3051 case 0xd: /* Temperature subpages */
3054 arr
[n
++] = 0x0; /* Temperature */
3056 arr
[n
++] = 0x1; /* Environment reporting */
3058 arr
[n
++] = 0xff; /* these subpages */
3061 case 0x2f: /* Informational exceptions subpages */
3064 arr
[n
++] = 0x0; /* Informational exceptions */
3066 arr
[n
++] = 0xff; /* these subpages */
3070 mk_sense_invalid_fld(scp
, SDEB_IN_CDB
, 2, 5);
3071 return check_condition_result
;
3073 } else if (subpcode
> 0) {
3076 if (pcode
== 0xd && subpcode
== 1)
3077 arr
[3] = resp_env_rep_l_spg(arr
+ 4);
3079 mk_sense_invalid_fld(scp
, SDEB_IN_CDB
, 2, 5);
3080 return check_condition_result
;
3083 mk_sense_invalid_fld(scp
, SDEB_IN_CDB
, 3, -1);
3084 return check_condition_result
;
3086 len
= min_t(u32
, get_unaligned_be16(arr
+ 2) + 4, alloc_len
);
3087 return fill_from_dev_buffer(scp
, arr
,
3088 min_t(u32
, len
, SDEBUG_MAX_INQ_ARR_SZ
));
3091 static inline bool sdebug_dev_is_zoned(struct sdebug_dev_info
*devip
)
3093 return devip
->nr_zones
!= 0;
3096 static struct sdeb_zone_state
*zbc_zone(struct sdebug_dev_info
*devip
,
3097 unsigned long long lba
)
3099 u32 zno
= lba
>> devip
->zsize_shift
;
3100 struct sdeb_zone_state
*zsp
;
3102 if (devip
->zcap
== devip
->zsize
|| zno
< devip
->nr_conv_zones
)
3103 return &devip
->zstate
[zno
];
3106 * If the zone capacity is less than the zone size, adjust for gap
3109 zno
= 2 * zno
- devip
->nr_conv_zones
;
3110 WARN_ONCE(zno
>= devip
->nr_zones
, "%u > %u\n", zno
, devip
->nr_zones
);
3111 zsp
= &devip
->zstate
[zno
];
3112 if (lba
>= zsp
->z_start
+ zsp
->z_size
)
3114 WARN_ON_ONCE(lba
>= zsp
->z_start
+ zsp
->z_size
);
3118 static inline bool zbc_zone_is_conv(struct sdeb_zone_state
*zsp
)
3120 return zsp
->z_type
== ZBC_ZTYPE_CNV
;
3123 static inline bool zbc_zone_is_gap(struct sdeb_zone_state
*zsp
)
3125 return zsp
->z_type
== ZBC_ZTYPE_GAP
;
3128 static inline bool zbc_zone_is_seq(struct sdeb_zone_state
*zsp
)
3130 return !zbc_zone_is_conv(zsp
) && !zbc_zone_is_gap(zsp
);
3133 static void zbc_close_zone(struct sdebug_dev_info
*devip
,
3134 struct sdeb_zone_state
*zsp
)
3136 enum sdebug_z_cond zc
;
3138 if (!zbc_zone_is_seq(zsp
))
3142 if (!(zc
== ZC2_IMPLICIT_OPEN
|| zc
== ZC3_EXPLICIT_OPEN
))
3145 if (zc
== ZC2_IMPLICIT_OPEN
)
3146 devip
->nr_imp_open
--;
3148 devip
->nr_exp_open
--;
3150 if (zsp
->z_wp
== zsp
->z_start
) {
3151 zsp
->z_cond
= ZC1_EMPTY
;
3153 zsp
->z_cond
= ZC4_CLOSED
;
3158 static void zbc_close_imp_open_zone(struct sdebug_dev_info
*devip
)
3160 struct sdeb_zone_state
*zsp
= &devip
->zstate
[0];
3163 for (i
= 0; i
< devip
->nr_zones
; i
++, zsp
++) {
3164 if (zsp
->z_cond
== ZC2_IMPLICIT_OPEN
) {
3165 zbc_close_zone(devip
, zsp
);
3171 static void zbc_open_zone(struct sdebug_dev_info
*devip
,
3172 struct sdeb_zone_state
*zsp
, bool explicit)
3174 enum sdebug_z_cond zc
;
3176 if (!zbc_zone_is_seq(zsp
))
3180 if ((explicit && zc
== ZC3_EXPLICIT_OPEN
) ||
3181 (!explicit && zc
== ZC2_IMPLICIT_OPEN
))
3184 /* Close an implicit open zone if necessary */
3185 if (explicit && zsp
->z_cond
== ZC2_IMPLICIT_OPEN
)
3186 zbc_close_zone(devip
, zsp
);
3187 else if (devip
->max_open
&&
3188 devip
->nr_imp_open
+ devip
->nr_exp_open
>= devip
->max_open
)
3189 zbc_close_imp_open_zone(devip
);
3191 if (zsp
->z_cond
== ZC4_CLOSED
)
3194 zsp
->z_cond
= ZC3_EXPLICIT_OPEN
;
3195 devip
->nr_exp_open
++;
3197 zsp
->z_cond
= ZC2_IMPLICIT_OPEN
;
3198 devip
->nr_imp_open
++;
3202 static inline void zbc_set_zone_full(struct sdebug_dev_info
*devip
,
3203 struct sdeb_zone_state
*zsp
)
3205 switch (zsp
->z_cond
) {
3206 case ZC2_IMPLICIT_OPEN
:
3207 devip
->nr_imp_open
--;
3209 case ZC3_EXPLICIT_OPEN
:
3210 devip
->nr_exp_open
--;
3213 WARN_ONCE(true, "Invalid zone %llu condition %x\n",
3214 zsp
->z_start
, zsp
->z_cond
);
3217 zsp
->z_cond
= ZC5_FULL
;
3220 static void zbc_inc_wp(struct sdebug_dev_info
*devip
,
3221 unsigned long long lba
, unsigned int num
)
3223 struct sdeb_zone_state
*zsp
= zbc_zone(devip
, lba
);
3224 unsigned long long n
, end
, zend
= zsp
->z_start
+ zsp
->z_size
;
3226 if (!zbc_zone_is_seq(zsp
))
3229 if (zsp
->z_type
== ZBC_ZTYPE_SWR
) {
3231 if (zsp
->z_wp
>= zend
)
3232 zbc_set_zone_full(devip
, zsp
);
3237 if (lba
!= zsp
->z_wp
)
3238 zsp
->z_non_seq_resource
= true;
3244 } else if (end
> zsp
->z_wp
) {
3250 if (zsp
->z_wp
>= zend
)
3251 zbc_set_zone_full(devip
, zsp
);
3257 zend
= zsp
->z_start
+ zsp
->z_size
;
3262 static int check_zbc_access_params(struct scsi_cmnd
*scp
,
3263 unsigned long long lba
, unsigned int num
, bool write
)
3265 struct scsi_device
*sdp
= scp
->device
;
3266 struct sdebug_dev_info
*devip
= (struct sdebug_dev_info
*)sdp
->hostdata
;
3267 struct sdeb_zone_state
*zsp
= zbc_zone(devip
, lba
);
3268 struct sdeb_zone_state
*zsp_end
= zbc_zone(devip
, lba
+ num
- 1);
3271 /* For host-managed, reads cannot cross zone types boundaries */
3272 if (zsp
->z_type
!= zsp_end
->z_type
) {
3273 mk_sense_buffer(scp
, ILLEGAL_REQUEST
,
3276 return check_condition_result
;
3281 /* Writing into a gap zone is not allowed */
3282 if (zbc_zone_is_gap(zsp
)) {
3283 mk_sense_buffer(scp
, ILLEGAL_REQUEST
, LBA_OUT_OF_RANGE
,
3284 ATTEMPT_ACCESS_GAP
);
3285 return check_condition_result
;
3288 /* No restrictions for writes within conventional zones */
3289 if (zbc_zone_is_conv(zsp
)) {
3290 if (!zbc_zone_is_conv(zsp_end
)) {
3291 mk_sense_buffer(scp
, ILLEGAL_REQUEST
,
3293 WRITE_BOUNDARY_ASCQ
);
3294 return check_condition_result
;
3299 if (zsp
->z_type
== ZBC_ZTYPE_SWR
) {
3300 /* Writes cannot cross sequential zone boundaries */
3301 if (zsp_end
!= zsp
) {
3302 mk_sense_buffer(scp
, ILLEGAL_REQUEST
,
3304 WRITE_BOUNDARY_ASCQ
);
3305 return check_condition_result
;
3307 /* Cannot write full zones */
3308 if (zsp
->z_cond
== ZC5_FULL
) {
3309 mk_sense_buffer(scp
, ILLEGAL_REQUEST
,
3310 INVALID_FIELD_IN_CDB
, 0);
3311 return check_condition_result
;
3313 /* Writes must be aligned to the zone WP */
3314 if (lba
!= zsp
->z_wp
) {
3315 mk_sense_buffer(scp
, ILLEGAL_REQUEST
,
3317 UNALIGNED_WRITE_ASCQ
);
3318 return check_condition_result
;
3322 /* Handle implicit open of closed and empty zones */
3323 if (zsp
->z_cond
== ZC1_EMPTY
|| zsp
->z_cond
== ZC4_CLOSED
) {
3324 if (devip
->max_open
&&
3325 devip
->nr_exp_open
>= devip
->max_open
) {
3326 mk_sense_buffer(scp
, DATA_PROTECT
,
3329 return check_condition_result
;
3331 zbc_open_zone(devip
, zsp
, false);
3337 static inline int check_device_access_params
3338 (struct scsi_cmnd
*scp
, unsigned long long lba
,
3339 unsigned int num
, bool write
)
3341 struct scsi_device
*sdp
= scp
->device
;
3342 struct sdebug_dev_info
*devip
= (struct sdebug_dev_info
*)sdp
->hostdata
;
3344 if (lba
+ num
> sdebug_capacity
) {
3345 mk_sense_buffer(scp
, ILLEGAL_REQUEST
, LBA_OUT_OF_RANGE
, 0);
3346 return check_condition_result
;
3348 /* transfer length excessive (tie in to block limits VPD page) */
3349 if (num
> sdebug_store_sectors
) {
3350 /* needs work to find which cdb byte 'num' comes from */
3351 mk_sense_buffer(scp
, ILLEGAL_REQUEST
, INVALID_FIELD_IN_CDB
, 0);
3352 return check_condition_result
;
3354 if (write
&& unlikely(sdebug_wp
)) {
3355 mk_sense_buffer(scp
, DATA_PROTECT
, WRITE_PROTECTED
, 0x2);
3356 return check_condition_result
;
3358 if (sdebug_dev_is_zoned(devip
))
3359 return check_zbc_access_params(scp
, lba
, num
, write
);
3365 * Note: if BUG_ON() fires it usually indicates a problem with the parser
3366 * tables. Perhaps a missing F_FAKE_RW or FF_MEDIA_IO flag. Response functions
3367 * that access any of the "stores" in struct sdeb_store_info should call this
3368 * function with bug_if_fake_rw set to true.
3370 static inline struct sdeb_store_info
*devip2sip(struct sdebug_dev_info
*devip
,
3371 bool bug_if_fake_rw
)
3373 if (sdebug_fake_rw
) {
3374 BUG_ON(bug_if_fake_rw
); /* See note above */
3377 return xa_load(per_store_ap
, devip
->sdbg_host
->si_idx
);
3380 /* Returns number of bytes copied or -1 if error. */
3381 static int do_device_access(struct sdeb_store_info
*sip
, struct scsi_cmnd
*scp
,
3382 u32 sg_skip
, u64 lba
, u32 num
, bool do_write
,
3386 u64 block
, rest
= 0;
3387 enum dma_data_direction dir
;
3388 struct scsi_data_buffer
*sdb
= &scp
->sdb
;
3392 dir
= DMA_TO_DEVICE
;
3393 write_since_sync
= true;
3395 dir
= DMA_FROM_DEVICE
;
3398 if (!sdb
->length
|| !sip
)
3400 if (scp
->sc_data_direction
!= dir
)
3403 if (do_write
&& group_number
< ARRAY_SIZE(writes_by_group_number
))
3404 atomic_long_inc(&writes_by_group_number
[group_number
]);
3408 block
= do_div(lba
, sdebug_store_sectors
);
3409 if (block
+ num
> sdebug_store_sectors
)
3410 rest
= block
+ num
- sdebug_store_sectors
;
3412 ret
= sg_copy_buffer(sdb
->table
.sgl
, sdb
->table
.nents
,
3413 fsp
+ (block
* sdebug_sector_size
),
3414 (num
- rest
) * sdebug_sector_size
, sg_skip
, do_write
);
3415 if (ret
!= (num
- rest
) * sdebug_sector_size
)
3419 ret
+= sg_copy_buffer(sdb
->table
.sgl
, sdb
->table
.nents
,
3420 fsp
, rest
* sdebug_sector_size
,
3421 sg_skip
+ ((num
- rest
) * sdebug_sector_size
),
3428 /* Returns number of bytes copied or -1 if error. */
3429 static int do_dout_fetch(struct scsi_cmnd
*scp
, u32 num
, u8
*doutp
)
3431 struct scsi_data_buffer
*sdb
= &scp
->sdb
;
3435 if (scp
->sc_data_direction
!= DMA_TO_DEVICE
)
3437 return sg_copy_buffer(sdb
->table
.sgl
, sdb
->table
.nents
, doutp
,
3438 num
* sdebug_sector_size
, 0, true);
3441 /* If sip->storep+lba compares equal to arr(num), then copy top half of
3442 * arr into sip->storep+lba and return true. If comparison fails then
3444 static bool comp_write_worker(struct sdeb_store_info
*sip
, u64 lba
, u32 num
,
3445 const u8
*arr
, bool compare_only
)
3448 u64 block
, rest
= 0;
3449 u32 store_blks
= sdebug_store_sectors
;
3450 u32 lb_size
= sdebug_sector_size
;
3451 u8
*fsp
= sip
->storep
;
3453 block
= do_div(lba
, store_blks
);
3454 if (block
+ num
> store_blks
)
3455 rest
= block
+ num
- store_blks
;
3457 res
= !memcmp(fsp
+ (block
* lb_size
), arr
, (num
- rest
) * lb_size
);
3461 res
= memcmp(fsp
, arr
+ ((num
- rest
) * lb_size
),
3467 arr
+= num
* lb_size
;
3468 memcpy(fsp
+ (block
* lb_size
), arr
, (num
- rest
) * lb_size
);
3470 memcpy(fsp
, arr
+ ((num
- rest
) * lb_size
), rest
* lb_size
);
3474 static __be16
dif_compute_csum(const void *buf
, int len
)
3479 csum
= (__force __be16
)ip_compute_csum(buf
, len
);
3481 csum
= cpu_to_be16(crc_t10dif(buf
, len
));
3486 static int dif_verify(struct t10_pi_tuple
*sdt
, const void *data
,
3487 sector_t sector
, u32 ei_lba
)
3489 __be16 csum
= dif_compute_csum(data
, sdebug_sector_size
);
3491 if (sdt
->guard_tag
!= csum
) {
3492 pr_err("GUARD check failed on sector %lu rcvd 0x%04x, data 0x%04x\n",
3493 (unsigned long)sector
,
3494 be16_to_cpu(sdt
->guard_tag
),
3498 if (sdebug_dif
== T10_PI_TYPE1_PROTECTION
&&
3499 be32_to_cpu(sdt
->ref_tag
) != (sector
& 0xffffffff)) {
3500 pr_err("REF check failed on sector %lu\n",
3501 (unsigned long)sector
);
3504 if (sdebug_dif
== T10_PI_TYPE2_PROTECTION
&&
3505 be32_to_cpu(sdt
->ref_tag
) != ei_lba
) {
3506 pr_err("REF check failed on sector %lu\n",
3507 (unsigned long)sector
);
3513 static void dif_copy_prot(struct scsi_cmnd
*scp
, sector_t sector
,
3514 unsigned int sectors
, bool read
)
3518 struct sdeb_store_info
*sip
= devip2sip((struct sdebug_dev_info
*)
3519 scp
->device
->hostdata
, true);
3520 struct t10_pi_tuple
*dif_storep
= sip
->dif_storep
;
3521 const void *dif_store_end
= dif_storep
+ sdebug_store_sectors
;
3522 struct sg_mapping_iter miter
;
3524 /* Bytes of protection data to copy into sgl */
3525 resid
= sectors
* sizeof(*dif_storep
);
3527 sg_miter_start(&miter
, scsi_prot_sglist(scp
),
3528 scsi_prot_sg_count(scp
), SG_MITER_ATOMIC
|
3529 (read
? SG_MITER_TO_SG
: SG_MITER_FROM_SG
));
3531 while (sg_miter_next(&miter
) && resid
> 0) {
3532 size_t len
= min_t(size_t, miter
.length
, resid
);
3533 void *start
= dif_store(sip
, sector
);
3536 if (dif_store_end
< start
+ len
)
3537 rest
= start
+ len
- dif_store_end
;
3542 memcpy(paddr
, start
, len
- rest
);
3544 memcpy(start
, paddr
, len
- rest
);
3548 memcpy(paddr
+ len
- rest
, dif_storep
, rest
);
3550 memcpy(dif_storep
, paddr
+ len
- rest
, rest
);
3553 sector
+= len
/ sizeof(*dif_storep
);
3556 sg_miter_stop(&miter
);
3559 static int prot_verify_read(struct scsi_cmnd
*scp
, sector_t start_sec
,
3560 unsigned int sectors
, u32 ei_lba
)
3565 struct sdeb_store_info
*sip
= devip2sip((struct sdebug_dev_info
*)
3566 scp
->device
->hostdata
, true);
3567 struct t10_pi_tuple
*sdt
;
3569 for (i
= 0; i
< sectors
; i
++, ei_lba
++) {
3570 sector
= start_sec
+ i
;
3571 sdt
= dif_store(sip
, sector
);
3573 if (sdt
->app_tag
== cpu_to_be16(0xffff))
3577 * Because scsi_debug acts as both initiator and
3578 * target we proceed to verify the PI even if
3579 * RDPROTECT=3. This is done so the "initiator" knows
3580 * which type of error to return. Otherwise we would
3581 * have to iterate over the PI twice.
3583 if (scp
->cmnd
[1] >> 5) { /* RDPROTECT */
3584 ret
= dif_verify(sdt
, lba2fake_store(sip
, sector
),
3593 dif_copy_prot(scp
, start_sec
, sectors
, true);
3600 sdeb_read_lock(struct sdeb_store_info
*sip
)
3602 if (sdebug_no_rwlock
) {
3604 __acquire(&sip
->macc_lck
);
3606 __acquire(&sdeb_fake_rw_lck
);
3609 read_lock(&sip
->macc_lck
);
3611 read_lock(&sdeb_fake_rw_lck
);
3616 sdeb_read_unlock(struct sdeb_store_info
*sip
)
3618 if (sdebug_no_rwlock
) {
3620 __release(&sip
->macc_lck
);
3622 __release(&sdeb_fake_rw_lck
);
3625 read_unlock(&sip
->macc_lck
);
3627 read_unlock(&sdeb_fake_rw_lck
);
3632 sdeb_write_lock(struct sdeb_store_info
*sip
)
3634 if (sdebug_no_rwlock
) {
3636 __acquire(&sip
->macc_lck
);
3638 __acquire(&sdeb_fake_rw_lck
);
3641 write_lock(&sip
->macc_lck
);
3643 write_lock(&sdeb_fake_rw_lck
);
3648 sdeb_write_unlock(struct sdeb_store_info
*sip
)
3650 if (sdebug_no_rwlock
) {
3652 __release(&sip
->macc_lck
);
3654 __release(&sdeb_fake_rw_lck
);
3657 write_unlock(&sip
->macc_lck
);
3659 write_unlock(&sdeb_fake_rw_lck
);
3663 static int resp_read_dt0(struct scsi_cmnd
*scp
, struct sdebug_dev_info
*devip
)
3670 struct sdeb_store_info
*sip
= devip2sip(devip
, true);
3671 u8
*cmd
= scp
->cmnd
;
3676 lba
= get_unaligned_be64(cmd
+ 2);
3677 num
= get_unaligned_be32(cmd
+ 10);
3682 lba
= get_unaligned_be32(cmd
+ 2);
3683 num
= get_unaligned_be16(cmd
+ 7);
3688 lba
= (u32
)cmd
[3] | (u32
)cmd
[2] << 8 |
3689 (u32
)(cmd
[1] & 0x1f) << 16;
3690 num
= (0 == cmd
[4]) ? 256 : cmd
[4];
3695 lba
= get_unaligned_be32(cmd
+ 2);
3696 num
= get_unaligned_be32(cmd
+ 6);
3699 case XDWRITEREAD_10
:
3701 lba
= get_unaligned_be32(cmd
+ 2);
3702 num
= get_unaligned_be16(cmd
+ 7);
3705 default: /* assume READ(32) */
3706 lba
= get_unaligned_be64(cmd
+ 12);
3707 ei_lba
= get_unaligned_be32(cmd
+ 20);
3708 num
= get_unaligned_be32(cmd
+ 28);
3712 if (unlikely(have_dif_prot
&& check_prot
)) {
3713 if (sdebug_dif
== T10_PI_TYPE2_PROTECTION
&&
3715 mk_sense_invalid_opcode(scp
);
3716 return check_condition_result
;
3718 if ((sdebug_dif
== T10_PI_TYPE1_PROTECTION
||
3719 sdebug_dif
== T10_PI_TYPE3_PROTECTION
) &&
3720 (cmd
[1] & 0xe0) == 0)
3721 sdev_printk(KERN_ERR
, scp
->device
, "Unprotected RD "
3724 if (unlikely((sdebug_opts
& SDEBUG_OPT_SHORT_TRANSFER
) &&
3725 atomic_read(&sdeb_inject_pending
))) {
3727 atomic_set(&sdeb_inject_pending
, 0);
3730 ret
= check_device_access_params(scp
, lba
, num
, false);
3733 if (unlikely((SDEBUG_OPT_MEDIUM_ERR
& sdebug_opts
) &&
3734 (lba
<= (sdebug_medium_error_start
+ sdebug_medium_error_count
- 1)) &&
3735 ((lba
+ num
) > sdebug_medium_error_start
))) {
3736 /* claim unrecoverable read error */
3737 mk_sense_buffer(scp
, MEDIUM_ERROR
, UNRECOVERED_READ_ERR
, 0);
3738 /* set info field and valid bit for fixed descriptor */
3739 if (0x70 == (scp
->sense_buffer
[0] & 0x7f)) {
3740 scp
->sense_buffer
[0] |= 0x80; /* Valid bit */
3741 ret
= (lba
< OPT_MEDIUM_ERR_ADDR
)
3742 ? OPT_MEDIUM_ERR_ADDR
: (int)lba
;
3743 put_unaligned_be32(ret
, scp
->sense_buffer
+ 3);
3745 scsi_set_resid(scp
, scsi_bufflen(scp
));
3746 return check_condition_result
;
3749 sdeb_read_lock(sip
);
3752 if (unlikely(sdebug_dix
&& scsi_prot_sg_count(scp
))) {
3753 switch (prot_verify_read(scp
, lba
, num
, ei_lba
)) {
3754 case 1: /* Guard tag error */
3755 if (cmd
[1] >> 5 != 3) { /* RDPROTECT != 3 */
3756 sdeb_read_unlock(sip
);
3757 mk_sense_buffer(scp
, ABORTED_COMMAND
, 0x10, 1);
3758 return check_condition_result
;
3759 } else if (scp
->prot_flags
& SCSI_PROT_GUARD_CHECK
) {
3760 sdeb_read_unlock(sip
);
3761 mk_sense_buffer(scp
, ILLEGAL_REQUEST
, 0x10, 1);
3762 return illegal_condition_result
;
3765 case 3: /* Reference tag error */
3766 if (cmd
[1] >> 5 != 3) { /* RDPROTECT != 3 */
3767 sdeb_read_unlock(sip
);
3768 mk_sense_buffer(scp
, ABORTED_COMMAND
, 0x10, 3);
3769 return check_condition_result
;
3770 } else if (scp
->prot_flags
& SCSI_PROT_REF_CHECK
) {
3771 sdeb_read_unlock(sip
);
3772 mk_sense_buffer(scp
, ILLEGAL_REQUEST
, 0x10, 3);
3773 return illegal_condition_result
;
3779 ret
= do_device_access(sip
, scp
, 0, lba
, num
, false, 0);
3780 sdeb_read_unlock(sip
);
3781 if (unlikely(ret
== -1))
3782 return DID_ERROR
<< 16;
3784 scsi_set_resid(scp
, scsi_bufflen(scp
) - ret
);
3786 if (unlikely((sdebug_opts
& SDEBUG_OPT_RECOV_DIF_DIX
) &&
3787 atomic_read(&sdeb_inject_pending
))) {
3788 if (sdebug_opts
& SDEBUG_OPT_RECOVERED_ERR
) {
3789 mk_sense_buffer(scp
, RECOVERED_ERROR
, THRESHOLD_EXCEEDED
, 0);
3790 atomic_set(&sdeb_inject_pending
, 0);
3791 return check_condition_result
;
3792 } else if (sdebug_opts
& SDEBUG_OPT_DIF_ERR
) {
3793 /* Logical block guard check failed */
3794 mk_sense_buffer(scp
, ABORTED_COMMAND
, 0x10, 1);
3795 atomic_set(&sdeb_inject_pending
, 0);
3796 return illegal_condition_result
;
3797 } else if (SDEBUG_OPT_DIX_ERR
& sdebug_opts
) {
3798 mk_sense_buffer(scp
, ILLEGAL_REQUEST
, 0x10, 1);
3799 atomic_set(&sdeb_inject_pending
, 0);
3800 return illegal_condition_result
;
3806 static int prot_verify_write(struct scsi_cmnd
*SCpnt
, sector_t start_sec
,
3807 unsigned int sectors
, u32 ei_lba
)
3810 struct t10_pi_tuple
*sdt
;
3812 sector_t sector
= start_sec
;
3815 struct sg_mapping_iter diter
;
3816 struct sg_mapping_iter piter
;
3818 BUG_ON(scsi_sg_count(SCpnt
) == 0);
3819 BUG_ON(scsi_prot_sg_count(SCpnt
) == 0);
3821 sg_miter_start(&piter
, scsi_prot_sglist(SCpnt
),
3822 scsi_prot_sg_count(SCpnt
),
3823 SG_MITER_ATOMIC
| SG_MITER_FROM_SG
);
3824 sg_miter_start(&diter
, scsi_sglist(SCpnt
), scsi_sg_count(SCpnt
),
3825 SG_MITER_ATOMIC
| SG_MITER_FROM_SG
);
3827 /* For each protection page */
3828 while (sg_miter_next(&piter
)) {
3830 if (WARN_ON(!sg_miter_next(&diter
))) {
3835 for (ppage_offset
= 0; ppage_offset
< piter
.length
;
3836 ppage_offset
+= sizeof(struct t10_pi_tuple
)) {
3837 /* If we're at the end of the current
3838 * data page advance to the next one
3840 if (dpage_offset
>= diter
.length
) {
3841 if (WARN_ON(!sg_miter_next(&diter
))) {
3848 sdt
= piter
.addr
+ ppage_offset
;
3849 daddr
= diter
.addr
+ dpage_offset
;
3851 if (SCpnt
->cmnd
[1] >> 5 != 3) { /* WRPROTECT */
3852 ret
= dif_verify(sdt
, daddr
, sector
, ei_lba
);
3859 dpage_offset
+= sdebug_sector_size
;
3861 diter
.consumed
= dpage_offset
;
3862 sg_miter_stop(&diter
);
3864 sg_miter_stop(&piter
);
3866 dif_copy_prot(SCpnt
, start_sec
, sectors
, false);
3873 sg_miter_stop(&diter
);
3874 sg_miter_stop(&piter
);
3878 static unsigned long lba_to_map_index(sector_t lba
)
3880 if (sdebug_unmap_alignment
)
3881 lba
+= sdebug_unmap_granularity
- sdebug_unmap_alignment
;
3882 sector_div(lba
, sdebug_unmap_granularity
);
3886 static sector_t
map_index_to_lba(unsigned long index
)
3888 sector_t lba
= index
* sdebug_unmap_granularity
;
3890 if (sdebug_unmap_alignment
)
3891 lba
-= sdebug_unmap_granularity
- sdebug_unmap_alignment
;
3895 static unsigned int map_state(struct sdeb_store_info
*sip
, sector_t lba
,
3899 unsigned int mapped
;
3900 unsigned long index
;
3903 index
= lba_to_map_index(lba
);
3904 mapped
= test_bit(index
, sip
->map_storep
);
3907 next
= find_next_zero_bit(sip
->map_storep
, map_size
, index
);
3909 next
= find_next_bit(sip
->map_storep
, map_size
, index
);
3911 end
= min_t(sector_t
, sdebug_store_sectors
, map_index_to_lba(next
));
3916 static void map_region(struct sdeb_store_info
*sip
, sector_t lba
,
3919 sector_t end
= lba
+ len
;
3922 unsigned long index
= lba_to_map_index(lba
);
3924 if (index
< map_size
)
3925 set_bit(index
, sip
->map_storep
);
3927 lba
= map_index_to_lba(index
+ 1);
3931 static void unmap_region(struct sdeb_store_info
*sip
, sector_t lba
,
3934 sector_t end
= lba
+ len
;
3935 u8
*fsp
= sip
->storep
;
3938 unsigned long index
= lba_to_map_index(lba
);
3940 if (lba
== map_index_to_lba(index
) &&
3941 lba
+ sdebug_unmap_granularity
<= end
&&
3943 clear_bit(index
, sip
->map_storep
);
3944 if (sdebug_lbprz
) { /* for LBPRZ=2 return 0xff_s */
3945 memset(fsp
+ lba
* sdebug_sector_size
,
3946 (sdebug_lbprz
& 1) ? 0 : 0xff,
3947 sdebug_sector_size
*
3948 sdebug_unmap_granularity
);
3950 if (sip
->dif_storep
) {
3951 memset(sip
->dif_storep
+ lba
, 0xff,
3952 sizeof(*sip
->dif_storep
) *
3953 sdebug_unmap_granularity
);
3956 lba
= map_index_to_lba(index
+ 1);
3960 static int resp_write_dt0(struct scsi_cmnd
*scp
, struct sdebug_dev_info
*devip
)
3968 struct sdeb_store_info
*sip
= devip2sip(devip
, true);
3969 u8
*cmd
= scp
->cmnd
;
3974 lba
= get_unaligned_be64(cmd
+ 2);
3975 num
= get_unaligned_be32(cmd
+ 10);
3976 group
= cmd
[14] & 0x3f;
3981 lba
= get_unaligned_be32(cmd
+ 2);
3982 group
= cmd
[6] & 0x3f;
3983 num
= get_unaligned_be16(cmd
+ 7);
3988 lba
= (u32
)cmd
[3] | (u32
)cmd
[2] << 8 |
3989 (u32
)(cmd
[1] & 0x1f) << 16;
3990 num
= (0 == cmd
[4]) ? 256 : cmd
[4];
3995 lba
= get_unaligned_be32(cmd
+ 2);
3996 num
= get_unaligned_be32(cmd
+ 6);
3997 group
= cmd
[6] & 0x3f;
4000 case 0x53: /* XDWRITEREAD(10) */
4002 lba
= get_unaligned_be32(cmd
+ 2);
4003 group
= cmd
[6] & 0x1f;
4004 num
= get_unaligned_be16(cmd
+ 7);
4007 default: /* assume WRITE(32) */
4008 group
= cmd
[6] & 0x3f;
4009 lba
= get_unaligned_be64(cmd
+ 12);
4010 ei_lba
= get_unaligned_be32(cmd
+ 20);
4011 num
= get_unaligned_be32(cmd
+ 28);
4015 if (unlikely(have_dif_prot
&& check_prot
)) {
4016 if (sdebug_dif
== T10_PI_TYPE2_PROTECTION
&&
4018 mk_sense_invalid_opcode(scp
);
4019 return check_condition_result
;
4021 if ((sdebug_dif
== T10_PI_TYPE1_PROTECTION
||
4022 sdebug_dif
== T10_PI_TYPE3_PROTECTION
) &&
4023 (cmd
[1] & 0xe0) == 0)
4024 sdev_printk(KERN_ERR
, scp
->device
, "Unprotected WR "
4028 sdeb_write_lock(sip
);
4029 ret
= check_device_access_params(scp
, lba
, num
, true);
4031 sdeb_write_unlock(sip
);
4036 if (unlikely(sdebug_dix
&& scsi_prot_sg_count(scp
))) {
4037 switch (prot_verify_write(scp
, lba
, num
, ei_lba
)) {
4038 case 1: /* Guard tag error */
4039 if (scp
->prot_flags
& SCSI_PROT_GUARD_CHECK
) {
4040 sdeb_write_unlock(sip
);
4041 mk_sense_buffer(scp
, ILLEGAL_REQUEST
, 0x10, 1);
4042 return illegal_condition_result
;
4043 } else if (scp
->cmnd
[1] >> 5 != 3) { /* WRPROTECT != 3 */
4044 sdeb_write_unlock(sip
);
4045 mk_sense_buffer(scp
, ABORTED_COMMAND
, 0x10, 1);
4046 return check_condition_result
;
4049 case 3: /* Reference tag error */
4050 if (scp
->prot_flags
& SCSI_PROT_REF_CHECK
) {
4051 sdeb_write_unlock(sip
);
4052 mk_sense_buffer(scp
, ILLEGAL_REQUEST
, 0x10, 3);
4053 return illegal_condition_result
;
4054 } else if (scp
->cmnd
[1] >> 5 != 3) { /* WRPROTECT != 3 */
4055 sdeb_write_unlock(sip
);
4056 mk_sense_buffer(scp
, ABORTED_COMMAND
, 0x10, 3);
4057 return check_condition_result
;
4063 ret
= do_device_access(sip
, scp
, 0, lba
, num
, true, group
);
4064 if (unlikely(scsi_debug_lbp()))
4065 map_region(sip
, lba
, num
);
4066 /* If ZBC zone then bump its write pointer */
4067 if (sdebug_dev_is_zoned(devip
))
4068 zbc_inc_wp(devip
, lba
, num
);
4069 sdeb_write_unlock(sip
);
4070 if (unlikely(-1 == ret
))
4071 return DID_ERROR
<< 16;
4072 else if (unlikely(sdebug_verbose
&&
4073 (ret
< (num
* sdebug_sector_size
))))
4074 sdev_printk(KERN_INFO
, scp
->device
,
4075 "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
4076 my_name
, num
* sdebug_sector_size
, ret
);
4078 if (unlikely((sdebug_opts
& SDEBUG_OPT_RECOV_DIF_DIX
) &&
4079 atomic_read(&sdeb_inject_pending
))) {
4080 if (sdebug_opts
& SDEBUG_OPT_RECOVERED_ERR
) {
4081 mk_sense_buffer(scp
, RECOVERED_ERROR
, THRESHOLD_EXCEEDED
, 0);
4082 atomic_set(&sdeb_inject_pending
, 0);
4083 return check_condition_result
;
4084 } else if (sdebug_opts
& SDEBUG_OPT_DIF_ERR
) {
4085 /* Logical block guard check failed */
4086 mk_sense_buffer(scp
, ABORTED_COMMAND
, 0x10, 1);
4087 atomic_set(&sdeb_inject_pending
, 0);
4088 return illegal_condition_result
;
4089 } else if (sdebug_opts
& SDEBUG_OPT_DIX_ERR
) {
4090 mk_sense_buffer(scp
, ILLEGAL_REQUEST
, 0x10, 1);
4091 atomic_set(&sdeb_inject_pending
, 0);
4092 return illegal_condition_result
;
4099 * T10 has only specified WRITE SCATTERED(16) and WRITE SCATTERED(32).
4100 * No READ GATHERED yet (requires bidi or long cdb holding gather list).
4102 static int resp_write_scat(struct scsi_cmnd
*scp
,
4103 struct sdebug_dev_info
*devip
)
4105 u8
*cmd
= scp
->cmnd
;
4108 struct sdeb_store_info
*sip
= devip2sip(devip
, true);
4110 u16 lbdof
, num_lrd
, k
;
4111 u32 num
, num_by
, bt_len
, lbdof_blen
, sg_off
, cum_lb
;
4112 u32 lb_size
= sdebug_sector_size
;
4118 static const u32 lrd_size
= 32; /* + parameter list header size */
4120 if (cmd
[0] == VARIABLE_LENGTH_CMD
) {
4122 group
= cmd
[6] & 0x3f;
4123 wrprotect
= (cmd
[10] >> 5) & 0x7;
4124 lbdof
= get_unaligned_be16(cmd
+ 12);
4125 num_lrd
= get_unaligned_be16(cmd
+ 16);
4126 bt_len
= get_unaligned_be32(cmd
+ 28);
4127 } else { /* that leaves WRITE SCATTERED(16) */
4129 wrprotect
= (cmd
[2] >> 5) & 0x7;
4130 lbdof
= get_unaligned_be16(cmd
+ 4);
4131 num_lrd
= get_unaligned_be16(cmd
+ 8);
4132 bt_len
= get_unaligned_be32(cmd
+ 10);
4133 group
= cmd
[14] & 0x3f;
4134 if (unlikely(have_dif_prot
)) {
4135 if (sdebug_dif
== T10_PI_TYPE2_PROTECTION
&&
4137 mk_sense_invalid_opcode(scp
);
4138 return illegal_condition_result
;
4140 if ((sdebug_dif
== T10_PI_TYPE1_PROTECTION
||
4141 sdebug_dif
== T10_PI_TYPE3_PROTECTION
) &&
4143 sdev_printk(KERN_ERR
, scp
->device
,
4144 "Unprotected WR to DIF device\n");
4147 if ((num_lrd
== 0) || (bt_len
== 0))
4148 return 0; /* T10 says these do-nothings are not errors */
4151 sdev_printk(KERN_INFO
, scp
->device
,
4152 "%s: %s: LB Data Offset field bad\n",
4154 mk_sense_buffer(scp
, ILLEGAL_REQUEST
, INVALID_FIELD_IN_CDB
, 0);
4155 return illegal_condition_result
;
4157 lbdof_blen
= lbdof
* lb_size
;
4158 if ((lrd_size
+ (num_lrd
* lrd_size
)) > lbdof_blen
) {
4160 sdev_printk(KERN_INFO
, scp
->device
,
4161 "%s: %s: LBA range descriptors don't fit\n",
4163 mk_sense_buffer(scp
, ILLEGAL_REQUEST
, INVALID_FIELD_IN_CDB
, 0);
4164 return illegal_condition_result
;
4166 lrdp
= kzalloc(lbdof_blen
, GFP_ATOMIC
| __GFP_NOWARN
);
4168 return SCSI_MLQUEUE_HOST_BUSY
;
4170 sdev_printk(KERN_INFO
, scp
->device
,
4171 "%s: %s: Fetch header+scatter_list, lbdof_blen=%u\n",
4172 my_name
, __func__
, lbdof_blen
);
4173 res
= fetch_to_dev_buffer(scp
, lrdp
, lbdof_blen
);
4175 ret
= DID_ERROR
<< 16;
4179 sdeb_write_lock(sip
);
4180 sg_off
= lbdof_blen
;
4181 /* Spec says Buffer xfer Length field in number of LBs in dout */
4183 for (k
= 0, up
= lrdp
+ lrd_size
; k
< num_lrd
; ++k
, up
+= lrd_size
) {
4184 lba
= get_unaligned_be64(up
+ 0);
4185 num
= get_unaligned_be32(up
+ 8);
4187 sdev_printk(KERN_INFO
, scp
->device
,
4188 "%s: %s: k=%d LBA=0x%llx num=%u sg_off=%u\n",
4189 my_name
, __func__
, k
, lba
, num
, sg_off
);
4192 ret
= check_device_access_params(scp
, lba
, num
, true);
4194 goto err_out_unlock
;
4195 num_by
= num
* lb_size
;
4196 ei_lba
= is_16
? 0 : get_unaligned_be32(up
+ 12);
4198 if ((cum_lb
+ num
) > bt_len
) {
4200 sdev_printk(KERN_INFO
, scp
->device
,
4201 "%s: %s: sum of blocks > data provided\n",
4203 mk_sense_buffer(scp
, ILLEGAL_REQUEST
, WRITE_ERROR_ASC
,
4205 ret
= illegal_condition_result
;
4206 goto err_out_unlock
;
4210 if (unlikely(sdebug_dix
&& scsi_prot_sg_count(scp
))) {
4211 int prot_ret
= prot_verify_write(scp
, lba
, num
,
4215 mk_sense_buffer(scp
, ILLEGAL_REQUEST
, 0x10,
4217 ret
= illegal_condition_result
;
4218 goto err_out_unlock
;
4222 ret
= do_device_access(sip
, scp
, sg_off
, lba
, num
, true, group
);
4223 /* If ZBC zone then bump its write pointer */
4224 if (sdebug_dev_is_zoned(devip
))
4225 zbc_inc_wp(devip
, lba
, num
);
4226 if (unlikely(scsi_debug_lbp()))
4227 map_region(sip
, lba
, num
);
4228 if (unlikely(-1 == ret
)) {
4229 ret
= DID_ERROR
<< 16;
4230 goto err_out_unlock
;
4231 } else if (unlikely(sdebug_verbose
&& (ret
< num_by
)))
4232 sdev_printk(KERN_INFO
, scp
->device
,
4233 "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
4234 my_name
, num_by
, ret
);
4236 if (unlikely((sdebug_opts
& SDEBUG_OPT_RECOV_DIF_DIX
) &&
4237 atomic_read(&sdeb_inject_pending
))) {
4238 if (sdebug_opts
& SDEBUG_OPT_RECOVERED_ERR
) {
4239 mk_sense_buffer(scp
, RECOVERED_ERROR
, THRESHOLD_EXCEEDED
, 0);
4240 atomic_set(&sdeb_inject_pending
, 0);
4241 ret
= check_condition_result
;
4242 goto err_out_unlock
;
4243 } else if (sdebug_opts
& SDEBUG_OPT_DIF_ERR
) {
4244 /* Logical block guard check failed */
4245 mk_sense_buffer(scp
, ABORTED_COMMAND
, 0x10, 1);
4246 atomic_set(&sdeb_inject_pending
, 0);
4247 ret
= illegal_condition_result
;
4248 goto err_out_unlock
;
4249 } else if (sdebug_opts
& SDEBUG_OPT_DIX_ERR
) {
4250 mk_sense_buffer(scp
, ILLEGAL_REQUEST
, 0x10, 1);
4251 atomic_set(&sdeb_inject_pending
, 0);
4252 ret
= illegal_condition_result
;
4253 goto err_out_unlock
;
4261 sdeb_write_unlock(sip
);
4267 static int resp_write_same(struct scsi_cmnd
*scp
, u64 lba
, u32 num
,
4268 u32 ei_lba
, bool unmap
, bool ndob
)
4270 struct scsi_device
*sdp
= scp
->device
;
4271 struct sdebug_dev_info
*devip
= (struct sdebug_dev_info
*)sdp
->hostdata
;
4272 unsigned long long i
;
4274 u32 lb_size
= sdebug_sector_size
;
4276 struct sdeb_store_info
*sip
= devip2sip((struct sdebug_dev_info
*)
4277 scp
->device
->hostdata
, true);
4281 sdeb_write_lock(sip
);
4283 ret
= check_device_access_params(scp
, lba
, num
, true);
4285 sdeb_write_unlock(sip
);
4289 if (unmap
&& scsi_debug_lbp()) {
4290 unmap_region(sip
, lba
, num
);
4294 block
= do_div(lbaa
, sdebug_store_sectors
);
4295 /* if ndob then zero 1 logical block, else fetch 1 logical block */
4297 fs1p
= fsp
+ (block
* lb_size
);
4299 memset(fs1p
, 0, lb_size
);
4302 ret
= fetch_to_dev_buffer(scp
, fs1p
, lb_size
);
4305 sdeb_write_unlock(sip
);
4306 return DID_ERROR
<< 16;
4307 } else if (sdebug_verbose
&& !ndob
&& (ret
< lb_size
))
4308 sdev_printk(KERN_INFO
, scp
->device
,
4309 "%s: %s: lb size=%u, IO sent=%d bytes\n",
4310 my_name
, "write same", lb_size
, ret
);
4312 /* Copy first sector to remaining blocks */
4313 for (i
= 1 ; i
< num
; i
++) {
4315 block
= do_div(lbaa
, sdebug_store_sectors
);
4316 memmove(fsp
+ (block
* lb_size
), fs1p
, lb_size
);
4318 if (scsi_debug_lbp())
4319 map_region(sip
, lba
, num
);
4320 /* If ZBC zone then bump its write pointer */
4321 if (sdebug_dev_is_zoned(devip
))
4322 zbc_inc_wp(devip
, lba
, num
);
4324 sdeb_write_unlock(sip
);
4329 static int resp_write_same_10(struct scsi_cmnd
*scp
,
4330 struct sdebug_dev_info
*devip
)
4332 u8
*cmd
= scp
->cmnd
;
4339 if (sdebug_lbpws10
== 0) {
4340 mk_sense_invalid_fld(scp
, SDEB_IN_CDB
, 1, 3);
4341 return check_condition_result
;
4345 lba
= get_unaligned_be32(cmd
+ 2);
4346 num
= get_unaligned_be16(cmd
+ 7);
4347 if (num
> sdebug_write_same_length
) {
4348 mk_sense_invalid_fld(scp
, SDEB_IN_CDB
, 7, -1);
4349 return check_condition_result
;
4351 return resp_write_same(scp
, lba
, num
, ei_lba
, unmap
, false);
4354 static int resp_write_same_16(struct scsi_cmnd
*scp
,
4355 struct sdebug_dev_info
*devip
)
4357 u8
*cmd
= scp
->cmnd
;
4364 if (cmd
[1] & 0x8) { /* UNMAP */
4365 if (sdebug_lbpws
== 0) {
4366 mk_sense_invalid_fld(scp
, SDEB_IN_CDB
, 1, 3);
4367 return check_condition_result
;
4371 if (cmd
[1] & 0x1) /* NDOB (no data-out buffer, assumes zeroes) */
4373 lba
= get_unaligned_be64(cmd
+ 2);
4374 num
= get_unaligned_be32(cmd
+ 10);
4375 if (num
> sdebug_write_same_length
) {
4376 mk_sense_invalid_fld(scp
, SDEB_IN_CDB
, 10, -1);
4377 return check_condition_result
;
4379 return resp_write_same(scp
, lba
, num
, ei_lba
, unmap
, ndob
);
4382 /* Note the mode field is in the same position as the (lower) service action
4383 * field. For the Report supported operation codes command, SPC-4 suggests
4384 * each mode of this command should be reported separately; for future. */
4385 static int resp_write_buffer(struct scsi_cmnd
*scp
,
4386 struct sdebug_dev_info
*devip
)
4388 u8
*cmd
= scp
->cmnd
;
4389 struct scsi_device
*sdp
= scp
->device
;
4390 struct sdebug_dev_info
*dp
;
4393 mode
= cmd
[1] & 0x1f;
4395 case 0x4: /* download microcode (MC) and activate (ACT) */
4396 /* set UAs on this device only */
4397 set_bit(SDEBUG_UA_BUS_RESET
, devip
->uas_bm
);
4398 set_bit(SDEBUG_UA_MICROCODE_CHANGED
, devip
->uas_bm
);
4400 case 0x5: /* download MC, save and ACT */
4401 set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET
, devip
->uas_bm
);
4403 case 0x6: /* download MC with offsets and ACT */
4404 /* set UAs on most devices (LUs) in this target */
4405 list_for_each_entry(dp
,
4406 &devip
->sdbg_host
->dev_info_list
,
4408 if (dp
->target
== sdp
->id
) {
4409 set_bit(SDEBUG_UA_BUS_RESET
, dp
->uas_bm
);
4411 set_bit(SDEBUG_UA_MICROCODE_CHANGED
,
4415 case 0x7: /* download MC with offsets, save, and ACT */
4416 /* set UA on all devices (LUs) in this target */
4417 list_for_each_entry(dp
,
4418 &devip
->sdbg_host
->dev_info_list
,
4420 if (dp
->target
== sdp
->id
)
4421 set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET
,
4425 /* do nothing for this command for other mode values */
4431 static int resp_comp_write(struct scsi_cmnd
*scp
,
4432 struct sdebug_dev_info
*devip
)
4434 u8
*cmd
= scp
->cmnd
;
4436 struct sdeb_store_info
*sip
= devip2sip(devip
, true);
4439 u32 lb_size
= sdebug_sector_size
;
4444 lba
= get_unaligned_be64(cmd
+ 2);
4445 num
= cmd
[13]; /* 1 to a maximum of 255 logical blocks */
4447 return 0; /* degenerate case, not an error */
4448 if (sdebug_dif
== T10_PI_TYPE2_PROTECTION
&&
4450 mk_sense_invalid_opcode(scp
);
4451 return check_condition_result
;
4453 if ((sdebug_dif
== T10_PI_TYPE1_PROTECTION
||
4454 sdebug_dif
== T10_PI_TYPE3_PROTECTION
) &&
4455 (cmd
[1] & 0xe0) == 0)
4456 sdev_printk(KERN_ERR
, scp
->device
, "Unprotected WR "
4458 ret
= check_device_access_params(scp
, lba
, num
, false);
4462 arr
= kcalloc(lb_size
, dnum
, GFP_ATOMIC
);
4464 mk_sense_buffer(scp
, ILLEGAL_REQUEST
, INSUFF_RES_ASC
,
4466 return check_condition_result
;
4469 sdeb_write_lock(sip
);
4471 ret
= do_dout_fetch(scp
, dnum
, arr
);
4473 retval
= DID_ERROR
<< 16;
4475 } else if (sdebug_verbose
&& (ret
< (dnum
* lb_size
)))
4476 sdev_printk(KERN_INFO
, scp
->device
, "%s: compare_write: cdb "
4477 "indicated=%u, IO sent=%d bytes\n", my_name
,
4478 dnum
* lb_size
, ret
);
4479 if (!comp_write_worker(sip
, lba
, num
, arr
, false)) {
4480 mk_sense_buffer(scp
, MISCOMPARE
, MISCOMPARE_VERIFY_ASC
, 0);
4481 retval
= check_condition_result
;
4484 if (scsi_debug_lbp())
4485 map_region(sip
, lba
, num
);
4487 sdeb_write_unlock(sip
);
4492 struct unmap_block_desc
{
4498 static int resp_unmap(struct scsi_cmnd
*scp
, struct sdebug_dev_info
*devip
)
4501 struct unmap_block_desc
*desc
;
4502 struct sdeb_store_info
*sip
= devip2sip(devip
, true);
4503 unsigned int i
, payload_len
, descriptors
;
4506 if (!scsi_debug_lbp())
4507 return 0; /* fib and say its done */
4508 payload_len
= get_unaligned_be16(scp
->cmnd
+ 7);
4509 BUG_ON(scsi_bufflen(scp
) != payload_len
);
4511 descriptors
= (payload_len
- 8) / 16;
4512 if (descriptors
> sdebug_unmap_max_desc
) {
4513 mk_sense_invalid_fld(scp
, SDEB_IN_CDB
, 7, -1);
4514 return check_condition_result
;
4517 buf
= kzalloc(scsi_bufflen(scp
), GFP_ATOMIC
);
4519 mk_sense_buffer(scp
, ILLEGAL_REQUEST
, INSUFF_RES_ASC
,
4521 return check_condition_result
;
4524 scsi_sg_copy_to_buffer(scp
, buf
, scsi_bufflen(scp
));
4526 BUG_ON(get_unaligned_be16(&buf
[0]) != payload_len
- 2);
4527 BUG_ON(get_unaligned_be16(&buf
[2]) != descriptors
* 16);
4529 desc
= (void *)&buf
[8];
4531 sdeb_write_lock(sip
);
4533 for (i
= 0 ; i
< descriptors
; i
++) {
4534 unsigned long long lba
= get_unaligned_be64(&desc
[i
].lba
);
4535 unsigned int num
= get_unaligned_be32(&desc
[i
].blocks
);
4537 ret
= check_device_access_params(scp
, lba
, num
, true);
4541 unmap_region(sip
, lba
, num
);
4547 sdeb_write_unlock(sip
);
4553 #define SDEBUG_GET_LBA_STATUS_LEN 32
4555 static int resp_get_lba_status(struct scsi_cmnd
*scp
,
4556 struct sdebug_dev_info
*devip
)
4558 u8
*cmd
= scp
->cmnd
;
4560 u32 alloc_len
, mapped
, num
;
4562 u8 arr
[SDEBUG_GET_LBA_STATUS_LEN
];
4564 lba
= get_unaligned_be64(cmd
+ 2);
4565 alloc_len
= get_unaligned_be32(cmd
+ 10);
4570 ret
= check_device_access_params(scp
, lba
, 1, false);
4574 if (scsi_debug_lbp()) {
4575 struct sdeb_store_info
*sip
= devip2sip(devip
, true);
4577 mapped
= map_state(sip
, lba
, &num
);
4580 /* following just in case virtual_gb changed */
4581 sdebug_capacity
= get_sdebug_capacity();
4582 if (sdebug_capacity
- lba
<= 0xffffffff)
4583 num
= sdebug_capacity
- lba
;
4588 memset(arr
, 0, SDEBUG_GET_LBA_STATUS_LEN
);
4589 put_unaligned_be32(20, arr
); /* Parameter Data Length */
4590 put_unaligned_be64(lba
, arr
+ 8); /* LBA */
4591 put_unaligned_be32(num
, arr
+ 16); /* Number of blocks */
4592 arr
[20] = !mapped
; /* prov_stat=0: mapped; 1: dealloc */
4594 return fill_from_dev_buffer(scp
, arr
, SDEBUG_GET_LBA_STATUS_LEN
);
4597 static int resp_get_stream_status(struct scsi_cmnd
*scp
,
4598 struct sdebug_dev_info
*devip
)
4600 u16 starting_stream_id
, stream_id
;
4601 const u8
*cmd
= scp
->cmnd
;
4602 u32 alloc_len
, offset
;
4604 struct scsi_stream_status_header
*h
= (void *)arr
;
4606 starting_stream_id
= get_unaligned_be16(cmd
+ 4);
4607 alloc_len
= get_unaligned_be32(cmd
+ 10);
4609 if (alloc_len
< 8) {
4610 mk_sense_invalid_fld(scp
, SDEB_IN_CDB
, 10, -1);
4611 return check_condition_result
;
4614 if (starting_stream_id
>= MAXIMUM_NUMBER_OF_STREAMS
) {
4615 mk_sense_invalid_fld(scp
, SDEB_IN_CDB
, 4, -1);
4616 return check_condition_result
;
4620 * The GET STREAM STATUS command only reports status information
4621 * about open streams. Treat the non-permanent stream as open.
4623 put_unaligned_be16(MAXIMUM_NUMBER_OF_STREAMS
,
4624 &h
->number_of_open_streams
);
4626 for (offset
= 8, stream_id
= starting_stream_id
;
4627 offset
+ 8 <= min_t(u32
, alloc_len
, sizeof(arr
)) &&
4628 stream_id
< MAXIMUM_NUMBER_OF_STREAMS
;
4629 offset
+= 8, stream_id
++) {
4630 struct scsi_stream_status
*stream_status
= (void *)arr
+ offset
;
4632 stream_status
->perm
= stream_id
< PERMANENT_STREAM_COUNT
;
4633 put_unaligned_be16(stream_id
,
4634 &stream_status
->stream_identifier
);
4635 stream_status
->rel_lifetime
= stream_id
+ 1;
4637 put_unaligned_be32(offset
- 8, &h
->len
); /* PARAMETER DATA LENGTH */
4639 return fill_from_dev_buffer(scp
, arr
, min(offset
, alloc_len
));
4642 static int resp_sync_cache(struct scsi_cmnd
*scp
,
4643 struct sdebug_dev_info
*devip
)
4648 u8
*cmd
= scp
->cmnd
;
4650 if (cmd
[0] == SYNCHRONIZE_CACHE
) { /* 10 byte cdb */
4651 lba
= get_unaligned_be32(cmd
+ 2);
4652 num_blocks
= get_unaligned_be16(cmd
+ 7);
4653 } else { /* SYNCHRONIZE_CACHE(16) */
4654 lba
= get_unaligned_be64(cmd
+ 2);
4655 num_blocks
= get_unaligned_be32(cmd
+ 10);
4657 if (lba
+ num_blocks
> sdebug_capacity
) {
4658 mk_sense_buffer(scp
, ILLEGAL_REQUEST
, LBA_OUT_OF_RANGE
, 0);
4659 return check_condition_result
;
4661 if (!write_since_sync
|| (cmd
[1] & 0x2))
4662 res
= SDEG_RES_IMMED_MASK
;
4663 else /* delay if write_since_sync and IMMED clear */
4664 write_since_sync
= false;
4669 * Assuming the LBA+num_blocks is not out-of-range, this function will return
4670 * CONDITION MET if the specified blocks will/have fitted in the cache, and
4671 * a GOOD status otherwise. Model a disk with a big cache and yield
4672 * CONDITION MET. Actually tries to bring range in main memory into the
4673 * cache associated with the CPU(s).
4675 static int resp_pre_fetch(struct scsi_cmnd
*scp
,
4676 struct sdebug_dev_info
*devip
)
4680 u64 block
, rest
= 0;
4682 u8
*cmd
= scp
->cmnd
;
4683 struct sdeb_store_info
*sip
= devip2sip(devip
, true);
4684 u8
*fsp
= sip
->storep
;
4686 if (cmd
[0] == PRE_FETCH
) { /* 10 byte cdb */
4687 lba
= get_unaligned_be32(cmd
+ 2);
4688 nblks
= get_unaligned_be16(cmd
+ 7);
4689 } else { /* PRE-FETCH(16) */
4690 lba
= get_unaligned_be64(cmd
+ 2);
4691 nblks
= get_unaligned_be32(cmd
+ 10);
4693 if (lba
+ nblks
> sdebug_capacity
) {
4694 mk_sense_buffer(scp
, ILLEGAL_REQUEST
, LBA_OUT_OF_RANGE
, 0);
4695 return check_condition_result
;
4699 /* PRE-FETCH spec says nothing about LBP or PI so skip them */
4700 block
= do_div(lba
, sdebug_store_sectors
);
4701 if (block
+ nblks
> sdebug_store_sectors
)
4702 rest
= block
+ nblks
- sdebug_store_sectors
;
4704 /* Try to bring the PRE-FETCH range into CPU's cache */
4705 sdeb_read_lock(sip
);
4706 prefetch_range(fsp
+ (sdebug_sector_size
* block
),
4707 (nblks
- rest
) * sdebug_sector_size
);
4709 prefetch_range(fsp
, rest
* sdebug_sector_size
);
4710 sdeb_read_unlock(sip
);
4713 res
= SDEG_RES_IMMED_MASK
;
4714 return res
| condition_met_result
;
4717 #define RL_BUCKET_ELEMS 8
4719 /* Even though each pseudo target has a REPORT LUNS "well known logical unit"
4720 * (W-LUN), the normal Linux scanning logic does not associate it with a
4721 * device (e.g. /dev/sg7). The following magic will make that association:
4722 * "cd /sys/class/scsi_host/host<n> ; echo '- - 49409' > scan"
4723 * where <n> is a host number. If there are multiple targets in a host then
4724 * the above will associate a W-LUN to each target. To only get a W-LUN
4725 * for target 2, then use "echo '- 2 49409' > scan" .
4727 static int resp_report_luns(struct scsi_cmnd
*scp
,
4728 struct sdebug_dev_info
*devip
)
4730 unsigned char *cmd
= scp
->cmnd
;
4731 unsigned int alloc_len
;
4732 unsigned char select_report
;
4734 struct scsi_lun
*lun_p
;
4735 u8 arr
[RL_BUCKET_ELEMS
* sizeof(struct scsi_lun
)];
4736 unsigned int lun_cnt
; /* normal LUN count (max: 256) */
4737 unsigned int wlun_cnt
; /* report luns W-LUN count */
4738 unsigned int tlun_cnt
; /* total LUN count */
4739 unsigned int rlen
; /* response length (in bytes) */
4741 unsigned int off_rsp
= 0;
4742 const int sz_lun
= sizeof(struct scsi_lun
);
4744 clear_luns_changed_on_target(devip
);
4746 select_report
= cmd
[2];
4747 alloc_len
= get_unaligned_be32(cmd
+ 6);
4749 if (alloc_len
< 4) {
4750 pr_err("alloc len too small %d\n", alloc_len
);
4751 mk_sense_invalid_fld(scp
, SDEB_IN_CDB
, 6, -1);
4752 return check_condition_result
;
4755 switch (select_report
) {
4756 case 0: /* all LUNs apart from W-LUNs */
4757 lun_cnt
= sdebug_max_luns
;
4760 case 1: /* only W-LUNs */
4764 case 2: /* all LUNs */
4765 lun_cnt
= sdebug_max_luns
;
4768 case 0x10: /* only administrative LUs */
4769 case 0x11: /* see SPC-5 */
4770 case 0x12: /* only subsiduary LUs owned by referenced LU */
4772 pr_debug("select report invalid %d\n", select_report
);
4773 mk_sense_invalid_fld(scp
, SDEB_IN_CDB
, 2, -1);
4774 return check_condition_result
;
4777 if (sdebug_no_lun_0
&& (lun_cnt
> 0))
4780 tlun_cnt
= lun_cnt
+ wlun_cnt
;
4781 rlen
= tlun_cnt
* sz_lun
; /* excluding 8 byte header */
4782 scsi_set_resid(scp
, scsi_bufflen(scp
));
4783 pr_debug("select_report %d luns = %d wluns = %d no_lun0 %d\n",
4784 select_report
, lun_cnt
, wlun_cnt
, sdebug_no_lun_0
);
4786 /* loops rely on sizeof response header same as sizeof lun (both 8) */
4787 lun
= sdebug_no_lun_0
? 1 : 0;
4788 for (k
= 0, j
= 0, res
= 0; true; ++k
, j
= 0) {
4789 memset(arr
, 0, sizeof(arr
));
4790 lun_p
= (struct scsi_lun
*)&arr
[0];
4792 put_unaligned_be32(rlen
, &arr
[0]);
4796 for ( ; j
< RL_BUCKET_ELEMS
; ++j
, ++lun_p
) {
4797 if ((k
* RL_BUCKET_ELEMS
) + j
> lun_cnt
)
4799 int_to_scsilun(lun
++, lun_p
);
4800 if (lun
> 1 && sdebug_lun_am
== SAM_LUN_AM_FLAT
)
4801 lun_p
->scsi_lun
[0] |= 0x40;
4803 if (j
< RL_BUCKET_ELEMS
)
4806 res
= p_fill_from_dev_buffer(scp
, arr
, n
, off_rsp
);
4812 int_to_scsilun(SCSI_W_LUN_REPORT_LUNS
, lun_p
);
4816 res
= p_fill_from_dev_buffer(scp
, arr
, j
* sz_lun
, off_rsp
);
4820 static int resp_verify(struct scsi_cmnd
*scp
, struct sdebug_dev_info
*devip
)
4822 bool is_bytchk3
= false;
4825 u32 vnum
, a_num
, off
;
4826 const u32 lb_size
= sdebug_sector_size
;
4829 u8
*cmd
= scp
->cmnd
;
4830 struct sdeb_store_info
*sip
= devip2sip(devip
, true);
4832 bytchk
= (cmd
[1] >> 1) & 0x3;
4834 return 0; /* always claim internal verify okay */
4835 } else if (bytchk
== 2) {
4836 mk_sense_invalid_fld(scp
, SDEB_IN_CDB
, 2, 2);
4837 return check_condition_result
;
4838 } else if (bytchk
== 3) {
4839 is_bytchk3
= true; /* 1 block sent, compared repeatedly */
4843 lba
= get_unaligned_be64(cmd
+ 2);
4844 vnum
= get_unaligned_be32(cmd
+ 10);
4846 case VERIFY
: /* is VERIFY(10) */
4847 lba
= get_unaligned_be32(cmd
+ 2);
4848 vnum
= get_unaligned_be16(cmd
+ 7);
4851 mk_sense_invalid_opcode(scp
);
4852 return check_condition_result
;
4855 return 0; /* not an error */
4856 a_num
= is_bytchk3
? 1 : vnum
;
4857 /* Treat following check like one for read (i.e. no write) access */
4858 ret
= check_device_access_params(scp
, lba
, a_num
, false);
4862 arr
= kcalloc(lb_size
, vnum
, GFP_ATOMIC
| __GFP_NOWARN
);
4864 mk_sense_buffer(scp
, ILLEGAL_REQUEST
, INSUFF_RES_ASC
,
4866 return check_condition_result
;
4868 /* Not changing store, so only need read access */
4869 sdeb_read_lock(sip
);
4871 ret
= do_dout_fetch(scp
, a_num
, arr
);
4873 ret
= DID_ERROR
<< 16;
4875 } else if (sdebug_verbose
&& (ret
< (a_num
* lb_size
))) {
4876 sdev_printk(KERN_INFO
, scp
->device
,
4877 "%s: %s: cdb indicated=%u, IO sent=%d bytes\n",
4878 my_name
, __func__
, a_num
* lb_size
, ret
);
4881 for (j
= 1, off
= lb_size
; j
< vnum
; ++j
, off
+= lb_size
)
4882 memcpy(arr
+ off
, arr
, lb_size
);
4885 if (!comp_write_worker(sip
, lba
, vnum
, arr
, true)) {
4886 mk_sense_buffer(scp
, MISCOMPARE
, MISCOMPARE_VERIFY_ASC
, 0);
4887 ret
= check_condition_result
;
4891 sdeb_read_unlock(sip
);
4896 #define RZONES_DESC_HD 64
4898 /* Report zones depending on start LBA and reporting options */
4899 static int resp_report_zones(struct scsi_cmnd
*scp
,
4900 struct sdebug_dev_info
*devip
)
4902 unsigned int rep_max_zones
, nrz
= 0;
4904 u32 alloc_len
, rep_opts
, rep_len
;
4907 u8
*arr
= NULL
, *desc
;
4908 u8
*cmd
= scp
->cmnd
;
4909 struct sdeb_zone_state
*zsp
= NULL
;
4910 struct sdeb_store_info
*sip
= devip2sip(devip
, false);
4912 if (!sdebug_dev_is_zoned(devip
)) {
4913 mk_sense_invalid_opcode(scp
);
4914 return check_condition_result
;
4916 zs_lba
= get_unaligned_be64(cmd
+ 2);
4917 alloc_len
= get_unaligned_be32(cmd
+ 10);
4919 return 0; /* not an error */
4920 rep_opts
= cmd
[14] & 0x3f;
4921 partial
= cmd
[14] & 0x80;
4923 if (zs_lba
>= sdebug_capacity
) {
4924 mk_sense_buffer(scp
, ILLEGAL_REQUEST
, LBA_OUT_OF_RANGE
, 0);
4925 return check_condition_result
;
4928 rep_max_zones
= (alloc_len
- 64) >> ilog2(RZONES_DESC_HD
);
4930 arr
= kzalloc(alloc_len
, GFP_ATOMIC
| __GFP_NOWARN
);
4932 mk_sense_buffer(scp
, ILLEGAL_REQUEST
, INSUFF_RES_ASC
,
4934 return check_condition_result
;
4937 sdeb_read_lock(sip
);
4940 for (lba
= zs_lba
; lba
< sdebug_capacity
;
4941 lba
= zsp
->z_start
+ zsp
->z_size
) {
4942 if (WARN_ONCE(zbc_zone(devip
, lba
) == zsp
, "lba = %llu\n", lba
))
4944 zsp
= zbc_zone(devip
, lba
);
4951 if (zsp
->z_cond
!= ZC1_EMPTY
)
4955 /* Implicit open zones */
4956 if (zsp
->z_cond
!= ZC2_IMPLICIT_OPEN
)
4960 /* Explicit open zones */
4961 if (zsp
->z_cond
!= ZC3_EXPLICIT_OPEN
)
4966 if (zsp
->z_cond
!= ZC4_CLOSED
)
4971 if (zsp
->z_cond
!= ZC5_FULL
)
4978 * Read-only, offline, reset WP recommended are
4979 * not emulated: no zones to report;
4983 /* non-seq-resource set */
4984 if (!zsp
->z_non_seq_resource
)
4988 /* All zones except gap zones. */
4989 if (zbc_zone_is_gap(zsp
))
4993 /* Not write pointer (conventional) zones */
4994 if (zbc_zone_is_seq(zsp
))
4998 mk_sense_buffer(scp
, ILLEGAL_REQUEST
,
4999 INVALID_FIELD_IN_CDB
, 0);
5000 ret
= check_condition_result
;
5004 if (nrz
< rep_max_zones
) {
5005 /* Fill zone descriptor */
5006 desc
[0] = zsp
->z_type
;
5007 desc
[1] = zsp
->z_cond
<< 4;
5008 if (zsp
->z_non_seq_resource
)
5010 put_unaligned_be64((u64
)zsp
->z_size
, desc
+ 8);
5011 put_unaligned_be64((u64
)zsp
->z_start
, desc
+ 16);
5012 put_unaligned_be64((u64
)zsp
->z_wp
, desc
+ 24);
5016 if (partial
&& nrz
>= rep_max_zones
)
5023 /* Zone list length. */
5024 put_unaligned_be32(nrz
* RZONES_DESC_HD
, arr
+ 0);
5026 put_unaligned_be64(sdebug_capacity
- 1, arr
+ 8);
5027 /* Zone starting LBA granularity. */
5028 if (devip
->zcap
< devip
->zsize
)
5029 put_unaligned_be64(devip
->zsize
, arr
+ 16);
5031 rep_len
= (unsigned long)desc
- (unsigned long)arr
;
5032 ret
= fill_from_dev_buffer(scp
, arr
, min_t(u32
, alloc_len
, rep_len
));
5035 sdeb_read_unlock(sip
);
5040 /* Logic transplanted from tcmu-runner, file_zbc.c */
5041 static void zbc_open_all(struct sdebug_dev_info
*devip
)
5043 struct sdeb_zone_state
*zsp
= &devip
->zstate
[0];
5046 for (i
= 0; i
< devip
->nr_zones
; i
++, zsp
++) {
5047 if (zsp
->z_cond
== ZC4_CLOSED
)
5048 zbc_open_zone(devip
, &devip
->zstate
[i
], true);
5052 static int resp_open_zone(struct scsi_cmnd
*scp
, struct sdebug_dev_info
*devip
)
5056 enum sdebug_z_cond zc
;
5057 u8
*cmd
= scp
->cmnd
;
5058 struct sdeb_zone_state
*zsp
;
5059 bool all
= cmd
[14] & 0x01;
5060 struct sdeb_store_info
*sip
= devip2sip(devip
, false);
5062 if (!sdebug_dev_is_zoned(devip
)) {
5063 mk_sense_invalid_opcode(scp
);
5064 return check_condition_result
;
5067 sdeb_write_lock(sip
);
5070 /* Check if all closed zones can be open */
5071 if (devip
->max_open
&&
5072 devip
->nr_exp_open
+ devip
->nr_closed
> devip
->max_open
) {
5073 mk_sense_buffer(scp
, DATA_PROTECT
, INSUFF_RES_ASC
,
5075 res
= check_condition_result
;
5078 /* Open all closed zones */
5079 zbc_open_all(devip
);
5083 /* Open the specified zone */
5084 z_id
= get_unaligned_be64(cmd
+ 2);
5085 if (z_id
>= sdebug_capacity
) {
5086 mk_sense_buffer(scp
, ILLEGAL_REQUEST
, LBA_OUT_OF_RANGE
, 0);
5087 res
= check_condition_result
;
5091 zsp
= zbc_zone(devip
, z_id
);
5092 if (z_id
!= zsp
->z_start
) {
5093 mk_sense_buffer(scp
, ILLEGAL_REQUEST
, INVALID_FIELD_IN_CDB
, 0);
5094 res
= check_condition_result
;
5097 if (zbc_zone_is_conv(zsp
)) {
5098 mk_sense_buffer(scp
, ILLEGAL_REQUEST
, INVALID_FIELD_IN_CDB
, 0);
5099 res
= check_condition_result
;
5104 if (zc
== ZC3_EXPLICIT_OPEN
|| zc
== ZC5_FULL
)
5107 if (devip
->max_open
&& devip
->nr_exp_open
>= devip
->max_open
) {
5108 mk_sense_buffer(scp
, DATA_PROTECT
, INSUFF_RES_ASC
,
5110 res
= check_condition_result
;
5114 zbc_open_zone(devip
, zsp
, true);
5116 sdeb_write_unlock(sip
);
5120 static void zbc_close_all(struct sdebug_dev_info
*devip
)
5124 for (i
= 0; i
< devip
->nr_zones
; i
++)
5125 zbc_close_zone(devip
, &devip
->zstate
[i
]);
5128 static int resp_close_zone(struct scsi_cmnd
*scp
,
5129 struct sdebug_dev_info
*devip
)
5133 u8
*cmd
= scp
->cmnd
;
5134 struct sdeb_zone_state
*zsp
;
5135 bool all
= cmd
[14] & 0x01;
5136 struct sdeb_store_info
*sip
= devip2sip(devip
, false);
5138 if (!sdebug_dev_is_zoned(devip
)) {
5139 mk_sense_invalid_opcode(scp
);
5140 return check_condition_result
;
5143 sdeb_write_lock(sip
);
5146 zbc_close_all(devip
);
5150 /* Close specified zone */
5151 z_id
= get_unaligned_be64(cmd
+ 2);
5152 if (z_id
>= sdebug_capacity
) {
5153 mk_sense_buffer(scp
, ILLEGAL_REQUEST
, LBA_OUT_OF_RANGE
, 0);
5154 res
= check_condition_result
;
5158 zsp
= zbc_zone(devip
, z_id
);
5159 if (z_id
!= zsp
->z_start
) {
5160 mk_sense_buffer(scp
, ILLEGAL_REQUEST
, INVALID_FIELD_IN_CDB
, 0);
5161 res
= check_condition_result
;
5164 if (zbc_zone_is_conv(zsp
)) {
5165 mk_sense_buffer(scp
, ILLEGAL_REQUEST
, INVALID_FIELD_IN_CDB
, 0);
5166 res
= check_condition_result
;
5170 zbc_close_zone(devip
, zsp
);
5172 sdeb_write_unlock(sip
);
5176 static void zbc_finish_zone(struct sdebug_dev_info
*devip
,
5177 struct sdeb_zone_state
*zsp
, bool empty
)
5179 enum sdebug_z_cond zc
= zsp
->z_cond
;
5181 if (zc
== ZC4_CLOSED
|| zc
== ZC2_IMPLICIT_OPEN
||
5182 zc
== ZC3_EXPLICIT_OPEN
|| (empty
&& zc
== ZC1_EMPTY
)) {
5183 if (zc
== ZC2_IMPLICIT_OPEN
|| zc
== ZC3_EXPLICIT_OPEN
)
5184 zbc_close_zone(devip
, zsp
);
5185 if (zsp
->z_cond
== ZC4_CLOSED
)
5187 zsp
->z_wp
= zsp
->z_start
+ zsp
->z_size
;
5188 zsp
->z_cond
= ZC5_FULL
;
5192 static void zbc_finish_all(struct sdebug_dev_info
*devip
)
5196 for (i
= 0; i
< devip
->nr_zones
; i
++)
5197 zbc_finish_zone(devip
, &devip
->zstate
[i
], false);
5200 static int resp_finish_zone(struct scsi_cmnd
*scp
,
5201 struct sdebug_dev_info
*devip
)
5203 struct sdeb_zone_state
*zsp
;
5206 u8
*cmd
= scp
->cmnd
;
5207 bool all
= cmd
[14] & 0x01;
5208 struct sdeb_store_info
*sip
= devip2sip(devip
, false);
5210 if (!sdebug_dev_is_zoned(devip
)) {
5211 mk_sense_invalid_opcode(scp
);
5212 return check_condition_result
;
5215 sdeb_write_lock(sip
);
5218 zbc_finish_all(devip
);
5222 /* Finish the specified zone */
5223 z_id
= get_unaligned_be64(cmd
+ 2);
5224 if (z_id
>= sdebug_capacity
) {
5225 mk_sense_buffer(scp
, ILLEGAL_REQUEST
, LBA_OUT_OF_RANGE
, 0);
5226 res
= check_condition_result
;
5230 zsp
= zbc_zone(devip
, z_id
);
5231 if (z_id
!= zsp
->z_start
) {
5232 mk_sense_buffer(scp
, ILLEGAL_REQUEST
, INVALID_FIELD_IN_CDB
, 0);
5233 res
= check_condition_result
;
5236 if (zbc_zone_is_conv(zsp
)) {
5237 mk_sense_buffer(scp
, ILLEGAL_REQUEST
, INVALID_FIELD_IN_CDB
, 0);
5238 res
= check_condition_result
;
5242 zbc_finish_zone(devip
, zsp
, true);
5244 sdeb_write_unlock(sip
);
5248 static void zbc_rwp_zone(struct sdebug_dev_info
*devip
,
5249 struct sdeb_zone_state
*zsp
)
5251 enum sdebug_z_cond zc
;
5252 struct sdeb_store_info
*sip
= devip2sip(devip
, false);
5254 if (!zbc_zone_is_seq(zsp
))
5258 if (zc
== ZC2_IMPLICIT_OPEN
|| zc
== ZC3_EXPLICIT_OPEN
)
5259 zbc_close_zone(devip
, zsp
);
5261 if (zsp
->z_cond
== ZC4_CLOSED
)
5264 if (zsp
->z_wp
> zsp
->z_start
)
5265 memset(sip
->storep
+ zsp
->z_start
* sdebug_sector_size
, 0,
5266 (zsp
->z_wp
- zsp
->z_start
) * sdebug_sector_size
);
5268 zsp
->z_non_seq_resource
= false;
5269 zsp
->z_wp
= zsp
->z_start
;
5270 zsp
->z_cond
= ZC1_EMPTY
;
5273 static void zbc_rwp_all(struct sdebug_dev_info
*devip
)
5277 for (i
= 0; i
< devip
->nr_zones
; i
++)
5278 zbc_rwp_zone(devip
, &devip
->zstate
[i
]);
5281 static int resp_rwp_zone(struct scsi_cmnd
*scp
, struct sdebug_dev_info
*devip
)
5283 struct sdeb_zone_state
*zsp
;
5286 u8
*cmd
= scp
->cmnd
;
5287 bool all
= cmd
[14] & 0x01;
5288 struct sdeb_store_info
*sip
= devip2sip(devip
, false);
5290 if (!sdebug_dev_is_zoned(devip
)) {
5291 mk_sense_invalid_opcode(scp
);
5292 return check_condition_result
;
5295 sdeb_write_lock(sip
);
5302 z_id
= get_unaligned_be64(cmd
+ 2);
5303 if (z_id
>= sdebug_capacity
) {
5304 mk_sense_buffer(scp
, ILLEGAL_REQUEST
, LBA_OUT_OF_RANGE
, 0);
5305 res
= check_condition_result
;
5309 zsp
= zbc_zone(devip
, z_id
);
5310 if (z_id
!= zsp
->z_start
) {
5311 mk_sense_buffer(scp
, ILLEGAL_REQUEST
, INVALID_FIELD_IN_CDB
, 0);
5312 res
= check_condition_result
;
5315 if (zbc_zone_is_conv(zsp
)) {
5316 mk_sense_buffer(scp
, ILLEGAL_REQUEST
, INVALID_FIELD_IN_CDB
, 0);
5317 res
= check_condition_result
;
5321 zbc_rwp_zone(devip
, zsp
);
5323 sdeb_write_unlock(sip
);
5327 static u32
get_tag(struct scsi_cmnd
*cmnd
)
5329 return blk_mq_unique_tag(scsi_cmd_to_rq(cmnd
));
5332 /* Queued (deferred) command completions converge here. */
5333 static void sdebug_q_cmd_complete(struct sdebug_defer
*sd_dp
)
5335 struct sdebug_queued_cmd
*sqcp
= container_of(sd_dp
, struct sdebug_queued_cmd
, sd_dp
);
5336 unsigned long flags
;
5337 struct scsi_cmnd
*scp
= sqcp
->scmd
;
5338 struct sdebug_scsi_cmd
*sdsc
;
5341 if (sdebug_statistics
) {
5342 atomic_inc(&sdebug_completions
);
5343 if (raw_smp_processor_id() != sd_dp
->issuing_cpu
)
5344 atomic_inc(&sdebug_miss_cpus
);
5348 pr_err("scmd=NULL\n");
5352 sdsc
= scsi_cmd_priv(scp
);
5353 spin_lock_irqsave(&sdsc
->lock
, flags
);
5354 aborted
= sd_dp
->aborted
;
5355 if (unlikely(aborted
))
5356 sd_dp
->aborted
= false;
5357 ASSIGN_QUEUED_CMD(scp
, NULL
);
5359 spin_unlock_irqrestore(&sdsc
->lock
, flags
);
5362 pr_info("bypassing scsi_done() due to aborted cmd, kicking-off EH\n");
5363 blk_abort_request(scsi_cmd_to_rq(scp
));
5367 scsi_done(scp
); /* callback to mid level */
5369 sdebug_free_queued_cmd(sqcp
);
5372 /* When high resolution timer goes off this function is called. */
5373 static enum hrtimer_restart
sdebug_q_cmd_hrt_complete(struct hrtimer
*timer
)
5375 struct sdebug_defer
*sd_dp
= container_of(timer
, struct sdebug_defer
,
5377 sdebug_q_cmd_complete(sd_dp
);
5378 return HRTIMER_NORESTART
;
5381 /* When work queue schedules work, it calls this function. */
5382 static void sdebug_q_cmd_wq_complete(struct work_struct
*work
)
5384 struct sdebug_defer
*sd_dp
= container_of(work
, struct sdebug_defer
,
5386 sdebug_q_cmd_complete(sd_dp
);
5389 static bool got_shared_uuid
;
5390 static uuid_t shared_uuid
;
5392 static int sdebug_device_create_zones(struct sdebug_dev_info
*devip
)
5394 struct sdeb_zone_state
*zsp
;
5395 sector_t capacity
= get_sdebug_capacity();
5396 sector_t conv_capacity
;
5397 sector_t zstart
= 0;
5401 * Set the zone size: if sdeb_zbc_zone_size_mb is not set, figure out
5402 * a zone size allowing for at least 4 zones on the device. Otherwise,
5403 * use the specified zone size checking that at least 2 zones can be
5404 * created for the device.
5406 if (!sdeb_zbc_zone_size_mb
) {
5407 devip
->zsize
= (DEF_ZBC_ZONE_SIZE_MB
* SZ_1M
)
5408 >> ilog2(sdebug_sector_size
);
5409 while (capacity
< devip
->zsize
<< 2 && devip
->zsize
>= 2)
5411 if (devip
->zsize
< 2) {
5412 pr_err("Device capacity too small\n");
5416 if (!is_power_of_2(sdeb_zbc_zone_size_mb
)) {
5417 pr_err("Zone size is not a power of 2\n");
5420 devip
->zsize
= (sdeb_zbc_zone_size_mb
* SZ_1M
)
5421 >> ilog2(sdebug_sector_size
);
5422 if (devip
->zsize
>= capacity
) {
5423 pr_err("Zone size too large for device capacity\n");
5428 devip
->zsize_shift
= ilog2(devip
->zsize
);
5429 devip
->nr_zones
= (capacity
+ devip
->zsize
- 1) >> devip
->zsize_shift
;
5431 if (sdeb_zbc_zone_cap_mb
== 0) {
5432 devip
->zcap
= devip
->zsize
;
5434 devip
->zcap
= (sdeb_zbc_zone_cap_mb
* SZ_1M
) >>
5435 ilog2(sdebug_sector_size
);
5436 if (devip
->zcap
> devip
->zsize
) {
5437 pr_err("Zone capacity too large\n");
5442 conv_capacity
= (sector_t
)sdeb_zbc_nr_conv
<< devip
->zsize_shift
;
5443 if (conv_capacity
>= capacity
) {
5444 pr_err("Number of conventional zones too large\n");
5447 devip
->nr_conv_zones
= sdeb_zbc_nr_conv
;
5448 devip
->nr_seq_zones
= ALIGN(capacity
- conv_capacity
, devip
->zsize
) >>
5450 devip
->nr_zones
= devip
->nr_conv_zones
+ devip
->nr_seq_zones
;
5452 /* Add gap zones if zone capacity is smaller than the zone size */
5453 if (devip
->zcap
< devip
->zsize
)
5454 devip
->nr_zones
+= devip
->nr_seq_zones
;
5457 /* zbc_max_open_zones can be 0, meaning "not reported" */
5458 if (sdeb_zbc_max_open
>= devip
->nr_zones
- 1)
5459 devip
->max_open
= (devip
->nr_zones
- 1) / 2;
5461 devip
->max_open
= sdeb_zbc_max_open
;
5464 devip
->zstate
= kcalloc(devip
->nr_zones
,
5465 sizeof(struct sdeb_zone_state
), GFP_KERNEL
);
5469 for (i
= 0; i
< devip
->nr_zones
; i
++) {
5470 zsp
= &devip
->zstate
[i
];
5472 zsp
->z_start
= zstart
;
5474 if (i
< devip
->nr_conv_zones
) {
5475 zsp
->z_type
= ZBC_ZTYPE_CNV
;
5476 zsp
->z_cond
= ZBC_NOT_WRITE_POINTER
;
5477 zsp
->z_wp
= (sector_t
)-1;
5479 min_t(u64
, devip
->zsize
, capacity
- zstart
);
5480 } else if ((zstart
& (devip
->zsize
- 1)) == 0) {
5482 zsp
->z_type
= ZBC_ZTYPE_SWR
;
5484 zsp
->z_type
= ZBC_ZTYPE_SWP
;
5485 zsp
->z_cond
= ZC1_EMPTY
;
5486 zsp
->z_wp
= zsp
->z_start
;
5488 min_t(u64
, devip
->zcap
, capacity
- zstart
);
5490 zsp
->z_type
= ZBC_ZTYPE_GAP
;
5491 zsp
->z_cond
= ZBC_NOT_WRITE_POINTER
;
5492 zsp
->z_wp
= (sector_t
)-1;
5493 zsp
->z_size
= min_t(u64
, devip
->zsize
- devip
->zcap
,
5497 WARN_ON_ONCE((int)zsp
->z_size
<= 0);
5498 zstart
+= zsp
->z_size
;
5504 static struct sdebug_dev_info
*sdebug_device_create(
5505 struct sdebug_host_info
*sdbg_host
, gfp_t flags
)
5507 struct sdebug_dev_info
*devip
;
5509 devip
= kzalloc(sizeof(*devip
), flags
);
5511 if (sdebug_uuid_ctl
== 1)
5512 uuid_gen(&devip
->lu_name
);
5513 else if (sdebug_uuid_ctl
== 2) {
5514 if (got_shared_uuid
)
5515 devip
->lu_name
= shared_uuid
;
5517 uuid_gen(&shared_uuid
);
5518 got_shared_uuid
= true;
5519 devip
->lu_name
= shared_uuid
;
5522 devip
->sdbg_host
= sdbg_host
;
5523 if (sdeb_zbc_in_use
) {
5524 devip
->zoned
= sdeb_zbc_model
== BLK_ZONED_HM
;
5525 if (sdebug_device_create_zones(devip
)) {
5530 devip
->zoned
= false;
5532 devip
->create_ts
= ktime_get_boottime();
5533 atomic_set(&devip
->stopped
, (sdeb_tur_ms_to_ready
> 0 ? 2 : 0));
5534 spin_lock_init(&devip
->list_lock
);
5535 INIT_LIST_HEAD(&devip
->inject_err_list
);
5536 list_add_tail(&devip
->dev_list
, &sdbg_host
->dev_info_list
);
5541 static struct sdebug_dev_info
*find_build_dev_info(struct scsi_device
*sdev
)
5543 struct sdebug_host_info
*sdbg_host
;
5544 struct sdebug_dev_info
*open_devip
= NULL
;
5545 struct sdebug_dev_info
*devip
;
5547 sdbg_host
= shost_to_sdebug_host(sdev
->host
);
5549 list_for_each_entry(devip
, &sdbg_host
->dev_info_list
, dev_list
) {
5550 if ((devip
->used
) && (devip
->channel
== sdev
->channel
) &&
5551 (devip
->target
== sdev
->id
) &&
5552 (devip
->lun
== sdev
->lun
))
5555 if ((!devip
->used
) && (!open_devip
))
5559 if (!open_devip
) { /* try and make a new one */
5560 open_devip
= sdebug_device_create(sdbg_host
, GFP_ATOMIC
);
5562 pr_err("out of memory at line %d\n", __LINE__
);
5567 open_devip
->channel
= sdev
->channel
;
5568 open_devip
->target
= sdev
->id
;
5569 open_devip
->lun
= sdev
->lun
;
5570 open_devip
->sdbg_host
= sdbg_host
;
5571 set_bit(SDEBUG_UA_POOCCUR
, open_devip
->uas_bm
);
5572 open_devip
->used
= true;
5576 static int scsi_debug_slave_alloc(struct scsi_device
*sdp
)
5579 pr_info("slave_alloc <%u %u %u %llu>\n",
5580 sdp
->host
->host_no
, sdp
->channel
, sdp
->id
, sdp
->lun
);
5585 static int scsi_debug_slave_configure(struct scsi_device
*sdp
)
5587 struct sdebug_dev_info
*devip
=
5588 (struct sdebug_dev_info
*)sdp
->hostdata
;
5589 struct dentry
*dentry
;
5592 pr_info("slave_configure <%u %u %u %llu>\n",
5593 sdp
->host
->host_no
, sdp
->channel
, sdp
->id
, sdp
->lun
);
5594 if (sdp
->host
->max_cmd_len
!= SDEBUG_MAX_CMD_LEN
)
5595 sdp
->host
->max_cmd_len
= SDEBUG_MAX_CMD_LEN
;
5596 if (devip
== NULL
) {
5597 devip
= find_build_dev_info(sdp
);
5599 return 1; /* no resources, will be marked offline */
5601 sdp
->hostdata
= devip
;
5603 sdp
->no_uld_attach
= 1;
5604 config_cdb_len(sdp
);
5606 if (sdebug_allow_restart
)
5607 sdp
->allow_restart
= 1;
5609 devip
->debugfs_entry
= debugfs_create_dir(dev_name(&sdp
->sdev_dev
),
5610 sdebug_debugfs_root
);
5611 if (IS_ERR_OR_NULL(devip
->debugfs_entry
))
5612 pr_info("%s: failed to create debugfs directory for device %s\n",
5613 __func__
, dev_name(&sdp
->sdev_gendev
));
5615 dentry
= debugfs_create_file("error", 0600, devip
->debugfs_entry
, sdp
,
5616 &sdebug_error_fops
);
5617 if (IS_ERR_OR_NULL(dentry
))
5618 pr_info("%s: failed to create error file for device %s\n",
5619 __func__
, dev_name(&sdp
->sdev_gendev
));
5624 static void scsi_debug_slave_destroy(struct scsi_device
*sdp
)
5626 struct sdebug_dev_info
*devip
=
5627 (struct sdebug_dev_info
*)sdp
->hostdata
;
5628 struct sdebug_err_inject
*err
;
5631 pr_info("slave_destroy <%u %u %u %llu>\n",
5632 sdp
->host
->host_no
, sdp
->channel
, sdp
->id
, sdp
->lun
);
5637 spin_lock(&devip
->list_lock
);
5638 list_for_each_entry_rcu(err
, &devip
->inject_err_list
, list
) {
5639 list_del_rcu(&err
->list
);
5640 call_rcu(&err
->rcu
, sdebug_err_free
);
5642 spin_unlock(&devip
->list_lock
);
5644 debugfs_remove(devip
->debugfs_entry
);
5646 /* make this slot available for re-use */
5647 devip
->used
= false;
5648 sdp
->hostdata
= NULL
;
5651 /* Returns true if we require the queued memory to be freed by the caller. */
5652 static bool stop_qc_helper(struct sdebug_defer
*sd_dp
,
5653 enum sdeb_defer_type defer_t
)
5655 if (defer_t
== SDEB_DEFER_HRT
) {
5656 int res
= hrtimer_try_to_cancel(&sd_dp
->hrt
);
5659 case 0: /* Not active, it must have already run */
5660 case -1: /* -1 It's executing the CB */
5662 case 1: /* Was active, we've now cancelled */
5666 } else if (defer_t
== SDEB_DEFER_WQ
) {
5667 /* Cancel if pending */
5668 if (cancel_work_sync(&sd_dp
->ew
.work
))
5670 /* Was not pending, so it must have run */
5672 } else if (defer_t
== SDEB_DEFER_POLL
) {
5680 static bool scsi_debug_stop_cmnd(struct scsi_cmnd
*cmnd
)
5682 enum sdeb_defer_type l_defer_t
;
5683 struct sdebug_defer
*sd_dp
;
5684 struct sdebug_scsi_cmd
*sdsc
= scsi_cmd_priv(cmnd
);
5685 struct sdebug_queued_cmd
*sqcp
= TO_QUEUED_CMD(cmnd
);
5687 lockdep_assert_held(&sdsc
->lock
);
5691 sd_dp
= &sqcp
->sd_dp
;
5692 l_defer_t
= READ_ONCE(sd_dp
->defer_t
);
5693 ASSIGN_QUEUED_CMD(cmnd
, NULL
);
5695 if (stop_qc_helper(sd_dp
, l_defer_t
))
5696 sdebug_free_queued_cmd(sqcp
);
5702 * Called from scsi_debug_abort() only, which is for timed-out cmd.
5704 static bool scsi_debug_abort_cmnd(struct scsi_cmnd
*cmnd
)
5706 struct sdebug_scsi_cmd
*sdsc
= scsi_cmd_priv(cmnd
);
5707 unsigned long flags
;
5710 spin_lock_irqsave(&sdsc
->lock
, flags
);
5711 res
= scsi_debug_stop_cmnd(cmnd
);
5712 spin_unlock_irqrestore(&sdsc
->lock
, flags
);
5718 * All we can do is set the cmnd as internally aborted and wait for it to
5719 * finish. We cannot call scsi_done() as normal completion path may do that.
5721 static bool sdebug_stop_cmnd(struct request
*rq
, void *data
)
5723 scsi_debug_abort_cmnd(blk_mq_rq_to_pdu(rq
));
5728 /* Deletes (stops) timers or work queues of all queued commands */
5729 static void stop_all_queued(void)
5731 struct sdebug_host_info
*sdhp
;
5733 mutex_lock(&sdebug_host_list_mutex
);
5734 list_for_each_entry(sdhp
, &sdebug_host_list
, host_list
) {
5735 struct Scsi_Host
*shost
= sdhp
->shost
;
5737 blk_mq_tagset_busy_iter(&shost
->tag_set
, sdebug_stop_cmnd
, NULL
);
5739 mutex_unlock(&sdebug_host_list_mutex
);
5742 static int sdebug_fail_abort(struct scsi_cmnd
*cmnd
)
5744 struct scsi_device
*sdp
= cmnd
->device
;
5745 struct sdebug_dev_info
*devip
= (struct sdebug_dev_info
*)sdp
->hostdata
;
5746 struct sdebug_err_inject
*err
;
5747 unsigned char *cmd
= cmnd
->cmnd
;
5754 list_for_each_entry_rcu(err
, &devip
->inject_err_list
, list
) {
5755 if (err
->type
== ERR_ABORT_CMD_FAILED
&&
5756 (err
->cmd
== cmd
[0] || err
->cmd
== 0xff)) {
5770 static int scsi_debug_abort(struct scsi_cmnd
*SCpnt
)
5772 bool ok
= scsi_debug_abort_cmnd(SCpnt
);
5773 u8
*cmd
= SCpnt
->cmnd
;
5778 if (SDEBUG_OPT_ALL_NOISE
& sdebug_opts
)
5779 sdev_printk(KERN_INFO
, SCpnt
->device
,
5780 "%s: command%s found\n", __func__
,
5783 if (sdebug_fail_abort(SCpnt
)) {
5784 scmd_printk(KERN_INFO
, SCpnt
, "fail abort command 0x%x\n",
5792 static bool scsi_debug_stop_all_queued_iter(struct request
*rq
, void *data
)
5794 struct scsi_device
*sdp
= data
;
5795 struct scsi_cmnd
*scmd
= blk_mq_rq_to_pdu(rq
);
5797 if (scmd
->device
== sdp
)
5798 scsi_debug_abort_cmnd(scmd
);
5803 /* Deletes (stops) timers or work queues of all queued commands per sdev */
5804 static void scsi_debug_stop_all_queued(struct scsi_device
*sdp
)
5806 struct Scsi_Host
*shost
= sdp
->host
;
5808 blk_mq_tagset_busy_iter(&shost
->tag_set
,
5809 scsi_debug_stop_all_queued_iter
, sdp
);
5812 static int sdebug_fail_lun_reset(struct scsi_cmnd
*cmnd
)
5814 struct scsi_device
*sdp
= cmnd
->device
;
5815 struct sdebug_dev_info
*devip
= (struct sdebug_dev_info
*)sdp
->hostdata
;
5816 struct sdebug_err_inject
*err
;
5817 unsigned char *cmd
= cmnd
->cmnd
;
5824 list_for_each_entry_rcu(err
, &devip
->inject_err_list
, list
) {
5825 if (err
->type
== ERR_LUN_RESET_FAILED
&&
5826 (err
->cmd
== cmd
[0] || err
->cmd
== 0xff)) {
5840 static int scsi_debug_device_reset(struct scsi_cmnd
*SCpnt
)
5842 struct scsi_device
*sdp
= SCpnt
->device
;
5843 struct sdebug_dev_info
*devip
= sdp
->hostdata
;
5844 u8
*cmd
= SCpnt
->cmnd
;
5849 if (SDEBUG_OPT_ALL_NOISE
& sdebug_opts
)
5850 sdev_printk(KERN_INFO
, sdp
, "%s\n", __func__
);
5852 scsi_debug_stop_all_queued(sdp
);
5854 set_bit(SDEBUG_UA_POR
, devip
->uas_bm
);
5856 if (sdebug_fail_lun_reset(SCpnt
)) {
5857 scmd_printk(KERN_INFO
, SCpnt
, "fail lun reset 0x%x\n", opcode
);
5864 static int sdebug_fail_target_reset(struct scsi_cmnd
*cmnd
)
5866 struct scsi_target
*starget
= scsi_target(cmnd
->device
);
5867 struct sdebug_target_info
*targetip
=
5868 (struct sdebug_target_info
*)starget
->hostdata
;
5871 return targetip
->reset_fail
;
5876 static int scsi_debug_target_reset(struct scsi_cmnd
*SCpnt
)
5878 struct scsi_device
*sdp
= SCpnt
->device
;
5879 struct sdebug_host_info
*sdbg_host
= shost_to_sdebug_host(sdp
->host
);
5880 struct sdebug_dev_info
*devip
;
5881 u8
*cmd
= SCpnt
->cmnd
;
5885 ++num_target_resets
;
5886 if (SDEBUG_OPT_ALL_NOISE
& sdebug_opts
)
5887 sdev_printk(KERN_INFO
, sdp
, "%s\n", __func__
);
5889 list_for_each_entry(devip
, &sdbg_host
->dev_info_list
, dev_list
) {
5890 if (devip
->target
== sdp
->id
) {
5891 set_bit(SDEBUG_UA_BUS_RESET
, devip
->uas_bm
);
5896 if (SDEBUG_OPT_RESET_NOISE
& sdebug_opts
)
5897 sdev_printk(KERN_INFO
, sdp
,
5898 "%s: %d device(s) found in target\n", __func__
, k
);
5900 if (sdebug_fail_target_reset(SCpnt
)) {
5901 scmd_printk(KERN_INFO
, SCpnt
, "fail target reset 0x%x\n",
5909 static int scsi_debug_bus_reset(struct scsi_cmnd
*SCpnt
)
5911 struct scsi_device
*sdp
= SCpnt
->device
;
5912 struct sdebug_host_info
*sdbg_host
= shost_to_sdebug_host(sdp
->host
);
5913 struct sdebug_dev_info
*devip
;
5918 if (SDEBUG_OPT_ALL_NOISE
& sdebug_opts
)
5919 sdev_printk(KERN_INFO
, sdp
, "%s\n", __func__
);
5921 list_for_each_entry(devip
, &sdbg_host
->dev_info_list
, dev_list
) {
5922 set_bit(SDEBUG_UA_BUS_RESET
, devip
->uas_bm
);
5926 if (SDEBUG_OPT_RESET_NOISE
& sdebug_opts
)
5927 sdev_printk(KERN_INFO
, sdp
,
5928 "%s: %d device(s) found in host\n", __func__
, k
);
5932 static int scsi_debug_host_reset(struct scsi_cmnd
*SCpnt
)
5934 struct sdebug_host_info
*sdbg_host
;
5935 struct sdebug_dev_info
*devip
;
5939 if (SDEBUG_OPT_ALL_NOISE
& sdebug_opts
)
5940 sdev_printk(KERN_INFO
, SCpnt
->device
, "%s\n", __func__
);
5941 mutex_lock(&sdebug_host_list_mutex
);
5942 list_for_each_entry(sdbg_host
, &sdebug_host_list
, host_list
) {
5943 list_for_each_entry(devip
, &sdbg_host
->dev_info_list
,
5945 set_bit(SDEBUG_UA_BUS_RESET
, devip
->uas_bm
);
5949 mutex_unlock(&sdebug_host_list_mutex
);
5951 if (SDEBUG_OPT_RESET_NOISE
& sdebug_opts
)
5952 sdev_printk(KERN_INFO
, SCpnt
->device
,
5953 "%s: %d device(s) found\n", __func__
, k
);
5957 static void sdebug_build_parts(unsigned char *ramp
, unsigned long store_size
)
5959 struct msdos_partition
*pp
;
5960 int starts
[SDEBUG_MAX_PARTS
+ 2], max_part_secs
;
5961 int sectors_per_part
, num_sectors
, k
;
5962 int heads_by_sects
, start_sec
, end_sec
;
5964 /* assume partition table already zeroed */
5965 if ((sdebug_num_parts
< 1) || (store_size
< 1048576))
5967 if (sdebug_num_parts
> SDEBUG_MAX_PARTS
) {
5968 sdebug_num_parts
= SDEBUG_MAX_PARTS
;
5969 pr_warn("reducing partitions to %d\n", SDEBUG_MAX_PARTS
);
5971 num_sectors
= (int)get_sdebug_capacity();
5972 sectors_per_part
= (num_sectors
- sdebug_sectors_per
)
5974 heads_by_sects
= sdebug_heads
* sdebug_sectors_per
;
5975 starts
[0] = sdebug_sectors_per
;
5976 max_part_secs
= sectors_per_part
;
5977 for (k
= 1; k
< sdebug_num_parts
; ++k
) {
5978 starts
[k
] = ((k
* sectors_per_part
) / heads_by_sects
)
5980 if (starts
[k
] - starts
[k
- 1] < max_part_secs
)
5981 max_part_secs
= starts
[k
] - starts
[k
- 1];
5983 starts
[sdebug_num_parts
] = num_sectors
;
5984 starts
[sdebug_num_parts
+ 1] = 0;
5986 ramp
[510] = 0x55; /* magic partition markings */
5988 pp
= (struct msdos_partition
*)(ramp
+ 0x1be);
5989 for (k
= 0; starts
[k
+ 1]; ++k
, ++pp
) {
5990 start_sec
= starts
[k
];
5991 end_sec
= starts
[k
] + max_part_secs
- 1;
5994 pp
->cyl
= start_sec
/ heads_by_sects
;
5995 pp
->head
= (start_sec
- (pp
->cyl
* heads_by_sects
))
5996 / sdebug_sectors_per
;
5997 pp
->sector
= (start_sec
% sdebug_sectors_per
) + 1;
5999 pp
->end_cyl
= end_sec
/ heads_by_sects
;
6000 pp
->end_head
= (end_sec
- (pp
->end_cyl
* heads_by_sects
))
6001 / sdebug_sectors_per
;
6002 pp
->end_sector
= (end_sec
% sdebug_sectors_per
) + 1;
6004 pp
->start_sect
= cpu_to_le32(start_sec
);
6005 pp
->nr_sects
= cpu_to_le32(end_sec
- start_sec
+ 1);
6006 pp
->sys_ind
= 0x83; /* plain Linux partition */
6010 static void block_unblock_all_queues(bool block
)
6012 struct sdebug_host_info
*sdhp
;
6014 lockdep_assert_held(&sdebug_host_list_mutex
);
6016 list_for_each_entry(sdhp
, &sdebug_host_list
, host_list
) {
6017 struct Scsi_Host
*shost
= sdhp
->shost
;
6020 scsi_block_requests(shost
);
6022 scsi_unblock_requests(shost
);
6026 /* Adjust (by rounding down) the sdebug_cmnd_count so abs(every_nth)-1
6027 * commands will be processed normally before triggers occur.
6029 static void tweak_cmnd_count(void)
6033 modulo
= abs(sdebug_every_nth
);
6037 mutex_lock(&sdebug_host_list_mutex
);
6038 block_unblock_all_queues(true);
6039 count
= atomic_read(&sdebug_cmnd_count
);
6040 atomic_set(&sdebug_cmnd_count
, (count
/ modulo
) * modulo
);
6041 block_unblock_all_queues(false);
6042 mutex_unlock(&sdebug_host_list_mutex
);
6045 static void clear_queue_stats(void)
6047 atomic_set(&sdebug_cmnd_count
, 0);
6048 atomic_set(&sdebug_completions
, 0);
6049 atomic_set(&sdebug_miss_cpus
, 0);
6050 atomic_set(&sdebug_a_tsf
, 0);
6053 static bool inject_on_this_cmd(void)
6055 if (sdebug_every_nth
== 0)
6057 return (atomic_read(&sdebug_cmnd_count
) % abs(sdebug_every_nth
)) == 0;
6060 #define INCLUSIVE_TIMING_MAX_NS 1000000 /* 1 millisecond */
6063 void sdebug_free_queued_cmd(struct sdebug_queued_cmd
*sqcp
)
6066 kmem_cache_free(queued_cmd_cache
, sqcp
);
6069 static struct sdebug_queued_cmd
*sdebug_alloc_queued_cmd(struct scsi_cmnd
*scmd
)
6071 struct sdebug_queued_cmd
*sqcp
;
6072 struct sdebug_defer
*sd_dp
;
6074 sqcp
= kmem_cache_zalloc(queued_cmd_cache
, GFP_ATOMIC
);
6078 sd_dp
= &sqcp
->sd_dp
;
6080 hrtimer_init(&sd_dp
->hrt
, CLOCK_MONOTONIC
, HRTIMER_MODE_REL_PINNED
);
6081 sd_dp
->hrt
.function
= sdebug_q_cmd_hrt_complete
;
6082 INIT_WORK(&sd_dp
->ew
.work
, sdebug_q_cmd_wq_complete
);
6089 /* Complete the processing of the thread that queued a SCSI command to this
6090 * driver. It either completes the command by calling cmnd_done() or
6091 * schedules a hr timer or work queue then returns 0. Returns
6092 * SCSI_MLQUEUE_HOST_BUSY if temporarily out of resources.
6094 static int schedule_resp(struct scsi_cmnd
*cmnd
, struct sdebug_dev_info
*devip
,
6096 int (*pfp
)(struct scsi_cmnd
*,
6097 struct sdebug_dev_info
*),
6098 int delta_jiff
, int ndelay
)
6100 struct request
*rq
= scsi_cmd_to_rq(cmnd
);
6101 bool polled
= rq
->cmd_flags
& REQ_POLLED
;
6102 struct sdebug_scsi_cmd
*sdsc
= scsi_cmd_priv(cmnd
);
6103 unsigned long flags
;
6104 u64 ns_from_boot
= 0;
6105 struct sdebug_queued_cmd
*sqcp
;
6106 struct scsi_device
*sdp
;
6107 struct sdebug_defer
*sd_dp
;
6109 if (unlikely(devip
== NULL
)) {
6110 if (scsi_result
== 0)
6111 scsi_result
= DID_NO_CONNECT
<< 16;
6112 goto respond_in_thread
;
6116 if (delta_jiff
== 0)
6117 goto respond_in_thread
;
6120 if (unlikely(sdebug_every_nth
&& (SDEBUG_OPT_RARE_TSF
& sdebug_opts
) &&
6121 (scsi_result
== 0))) {
6122 int num_in_q
= scsi_device_busy(sdp
);
6123 int qdepth
= cmnd
->device
->queue_depth
;
6125 if ((num_in_q
== qdepth
) &&
6126 (atomic_inc_return(&sdebug_a_tsf
) >=
6127 abs(sdebug_every_nth
))) {
6128 atomic_set(&sdebug_a_tsf
, 0);
6129 scsi_result
= device_qfull_result
;
6131 if (unlikely(SDEBUG_OPT_Q_NOISE
& sdebug_opts
))
6132 sdev_printk(KERN_INFO
, sdp
, "%s: num_in_q=%d +1, <inject> status: TASK SET FULL\n",
6133 __func__
, num_in_q
);
6137 sqcp
= sdebug_alloc_queued_cmd(cmnd
);
6139 pr_err("%s no alloc\n", __func__
);
6140 return SCSI_MLQUEUE_HOST_BUSY
;
6142 sd_dp
= &sqcp
->sd_dp
;
6145 ns_from_boot
= ktime_get_boottime_ns();
6147 /* one of the resp_*() response functions is called here */
6148 cmnd
->result
= pfp
? pfp(cmnd
, devip
) : 0;
6149 if (cmnd
->result
& SDEG_RES_IMMED_MASK
) {
6150 cmnd
->result
&= ~SDEG_RES_IMMED_MASK
;
6151 delta_jiff
= ndelay
= 0;
6153 if (cmnd
->result
== 0 && scsi_result
!= 0)
6154 cmnd
->result
= scsi_result
;
6155 if (cmnd
->result
== 0 && unlikely(sdebug_opts
& SDEBUG_OPT_TRANSPORT_ERR
)) {
6156 if (atomic_read(&sdeb_inject_pending
)) {
6157 mk_sense_buffer(cmnd
, ABORTED_COMMAND
, TRANSPORT_PROBLEM
, ACK_NAK_TO
);
6158 atomic_set(&sdeb_inject_pending
, 0);
6159 cmnd
->result
= check_condition_result
;
6163 if (unlikely(sdebug_verbose
&& cmnd
->result
))
6164 sdev_printk(KERN_INFO
, sdp
, "%s: non-zero result=0x%x\n",
6165 __func__
, cmnd
->result
);
6167 if (delta_jiff
> 0 || ndelay
> 0) {
6170 if (delta_jiff
> 0) {
6171 u64 ns
= jiffies_to_nsecs(delta_jiff
);
6173 if (sdebug_random
&& ns
< U32_MAX
) {
6174 ns
= get_random_u32_below((u32
)ns
);
6175 } else if (sdebug_random
) {
6176 ns
>>= 12; /* scale to 4 usec precision */
6177 if (ns
< U32_MAX
) /* over 4 hours max */
6178 ns
= get_random_u32_below((u32
)ns
);
6181 kt
= ns_to_ktime(ns
);
6182 } else { /* ndelay has a 4.2 second max */
6183 kt
= sdebug_random
? get_random_u32_below((u32
)ndelay
) :
6185 if (ndelay
< INCLUSIVE_TIMING_MAX_NS
) {
6186 u64 d
= ktime_get_boottime_ns() - ns_from_boot
;
6188 if (kt
<= d
) { /* elapsed duration >= kt */
6189 /* call scsi_done() from this thread */
6190 sdebug_free_queued_cmd(sqcp
);
6194 /* otherwise reduce kt by elapsed time */
6198 if (sdebug_statistics
)
6199 sd_dp
->issuing_cpu
= raw_smp_processor_id();
6201 spin_lock_irqsave(&sdsc
->lock
, flags
);
6202 sd_dp
->cmpl_ts
= ktime_add(ns_to_ktime(ns_from_boot
), kt
);
6203 ASSIGN_QUEUED_CMD(cmnd
, sqcp
);
6204 WRITE_ONCE(sd_dp
->defer_t
, SDEB_DEFER_POLL
);
6205 spin_unlock_irqrestore(&sdsc
->lock
, flags
);
6207 /* schedule the invocation of scsi_done() for a later time */
6208 spin_lock_irqsave(&sdsc
->lock
, flags
);
6209 ASSIGN_QUEUED_CMD(cmnd
, sqcp
);
6210 WRITE_ONCE(sd_dp
->defer_t
, SDEB_DEFER_HRT
);
6211 hrtimer_start(&sd_dp
->hrt
, kt
, HRTIMER_MODE_REL_PINNED
);
6213 * The completion handler will try to grab sqcp->lock,
6214 * so there is no chance that the completion handler
6215 * will call scsi_done() until we release the lock
6216 * here (so ok to keep referencing sdsc).
6218 spin_unlock_irqrestore(&sdsc
->lock
, flags
);
6220 } else { /* jdelay < 0, use work queue */
6221 if (unlikely((sdebug_opts
& SDEBUG_OPT_CMD_ABORT
) &&
6222 atomic_read(&sdeb_inject_pending
))) {
6223 sd_dp
->aborted
= true;
6224 atomic_set(&sdeb_inject_pending
, 0);
6225 sdev_printk(KERN_INFO
, sdp
, "abort request tag=%#x\n",
6226 blk_mq_unique_tag_to_tag(get_tag(cmnd
)));
6229 if (sdebug_statistics
)
6230 sd_dp
->issuing_cpu
= raw_smp_processor_id();
6232 spin_lock_irqsave(&sdsc
->lock
, flags
);
6233 ASSIGN_QUEUED_CMD(cmnd
, sqcp
);
6234 sd_dp
->cmpl_ts
= ns_to_ktime(ns_from_boot
);
6235 WRITE_ONCE(sd_dp
->defer_t
, SDEB_DEFER_POLL
);
6236 spin_unlock_irqrestore(&sdsc
->lock
, flags
);
6238 spin_lock_irqsave(&sdsc
->lock
, flags
);
6239 ASSIGN_QUEUED_CMD(cmnd
, sqcp
);
6240 WRITE_ONCE(sd_dp
->defer_t
, SDEB_DEFER_WQ
);
6241 schedule_work(&sd_dp
->ew
.work
);
6242 spin_unlock_irqrestore(&sdsc
->lock
, flags
);
6248 respond_in_thread
: /* call back to mid-layer using invocation thread */
6249 cmnd
->result
= pfp
!= NULL
? pfp(cmnd
, devip
) : 0;
6250 cmnd
->result
&= ~SDEG_RES_IMMED_MASK
;
6251 if (cmnd
->result
== 0 && scsi_result
!= 0)
6252 cmnd
->result
= scsi_result
;
6257 /* Note: The following macros create attribute files in the
6258 /sys/module/scsi_debug/parameters directory. Unfortunately this
6259 driver is unaware of a change and cannot trigger auxiliary actions
6260 as it can when the corresponding attribute in the
6261 /sys/bus/pseudo/drivers/scsi_debug directory is changed.
6263 module_param_named(add_host
, sdebug_add_host
, int, S_IRUGO
| S_IWUSR
);
6264 module_param_named(ato
, sdebug_ato
, int, S_IRUGO
);
6265 module_param_named(cdb_len
, sdebug_cdb_len
, int, 0644);
6266 module_param_named(clustering
, sdebug_clustering
, bool, S_IRUGO
| S_IWUSR
);
6267 module_param_named(delay
, sdebug_jdelay
, int, S_IRUGO
| S_IWUSR
);
6268 module_param_named(dev_size_mb
, sdebug_dev_size_mb
, int, S_IRUGO
);
6269 module_param_named(dif
, sdebug_dif
, int, S_IRUGO
);
6270 module_param_named(dix
, sdebug_dix
, int, S_IRUGO
);
6271 module_param_named(dsense
, sdebug_dsense
, int, S_IRUGO
| S_IWUSR
);
6272 module_param_named(every_nth
, sdebug_every_nth
, int, S_IRUGO
| S_IWUSR
);
6273 module_param_named(fake_rw
, sdebug_fake_rw
, int, S_IRUGO
| S_IWUSR
);
6274 module_param_named(guard
, sdebug_guard
, uint
, S_IRUGO
);
6275 module_param_named(host_lock
, sdebug_host_lock
, bool, S_IRUGO
| S_IWUSR
);
6276 module_param_named(host_max_queue
, sdebug_host_max_queue
, int, S_IRUGO
);
6277 module_param_string(inq_product
, sdebug_inq_product_id
,
6278 sizeof(sdebug_inq_product_id
), S_IRUGO
| S_IWUSR
);
6279 module_param_string(inq_rev
, sdebug_inq_product_rev
,
6280 sizeof(sdebug_inq_product_rev
), S_IRUGO
| S_IWUSR
);
6281 module_param_string(inq_vendor
, sdebug_inq_vendor_id
,
6282 sizeof(sdebug_inq_vendor_id
), S_IRUGO
| S_IWUSR
);
6283 module_param_named(lbprz
, sdebug_lbprz
, int, S_IRUGO
);
6284 module_param_named(lbpu
, sdebug_lbpu
, int, S_IRUGO
);
6285 module_param_named(lbpws
, sdebug_lbpws
, int, S_IRUGO
);
6286 module_param_named(lbpws10
, sdebug_lbpws10
, int, S_IRUGO
);
6287 module_param_named(lowest_aligned
, sdebug_lowest_aligned
, int, S_IRUGO
);
6288 module_param_named(lun_format
, sdebug_lun_am_i
, int, S_IRUGO
| S_IWUSR
);
6289 module_param_named(max_luns
, sdebug_max_luns
, int, S_IRUGO
| S_IWUSR
);
6290 module_param_named(max_queue
, sdebug_max_queue
, int, S_IRUGO
| S_IWUSR
);
6291 module_param_named(medium_error_count
, sdebug_medium_error_count
, int,
6293 module_param_named(medium_error_start
, sdebug_medium_error_start
, int,
6295 module_param_named(ndelay
, sdebug_ndelay
, int, S_IRUGO
| S_IWUSR
);
6296 module_param_named(no_lun_0
, sdebug_no_lun_0
, int, S_IRUGO
| S_IWUSR
);
6297 module_param_named(no_rwlock
, sdebug_no_rwlock
, bool, S_IRUGO
| S_IWUSR
);
6298 module_param_named(no_uld
, sdebug_no_uld
, int, S_IRUGO
);
6299 module_param_named(num_parts
, sdebug_num_parts
, int, S_IRUGO
);
6300 module_param_named(num_tgts
, sdebug_num_tgts
, int, S_IRUGO
| S_IWUSR
);
6301 module_param_named(opt_blks
, sdebug_opt_blks
, int, S_IRUGO
);
6302 module_param_named(opt_xferlen_exp
, sdebug_opt_xferlen_exp
, int, S_IRUGO
);
6303 module_param_named(opts
, sdebug_opts
, int, S_IRUGO
| S_IWUSR
);
6304 module_param_named(per_host_store
, sdebug_per_host_store
, bool,
6306 module_param_named(physblk_exp
, sdebug_physblk_exp
, int, S_IRUGO
);
6307 module_param_named(ptype
, sdebug_ptype
, int, S_IRUGO
| S_IWUSR
);
6308 module_param_named(random
, sdebug_random
, bool, S_IRUGO
| S_IWUSR
);
6309 module_param_named(removable
, sdebug_removable
, bool, S_IRUGO
| S_IWUSR
);
6310 module_param_named(scsi_level
, sdebug_scsi_level
, int, S_IRUGO
);
6311 module_param_named(sector_size
, sdebug_sector_size
, int, S_IRUGO
);
6312 module_param_named(statistics
, sdebug_statistics
, bool, S_IRUGO
| S_IWUSR
);
6313 module_param_named(strict
, sdebug_strict
, bool, S_IRUGO
| S_IWUSR
);
6314 module_param_named(submit_queues
, submit_queues
, int, S_IRUGO
);
6315 module_param_named(poll_queues
, poll_queues
, int, S_IRUGO
);
6316 module_param_named(tur_ms_to_ready
, sdeb_tur_ms_to_ready
, int, S_IRUGO
);
6317 module_param_named(unmap_alignment
, sdebug_unmap_alignment
, int, S_IRUGO
);
6318 module_param_named(unmap_granularity
, sdebug_unmap_granularity
, int, S_IRUGO
);
6319 module_param_named(unmap_max_blocks
, sdebug_unmap_max_blocks
, int, S_IRUGO
);
6320 module_param_named(unmap_max_desc
, sdebug_unmap_max_desc
, int, S_IRUGO
);
6321 module_param_named(uuid_ctl
, sdebug_uuid_ctl
, int, S_IRUGO
);
6322 module_param_named(virtual_gb
, sdebug_virtual_gb
, int, S_IRUGO
| S_IWUSR
);
6323 module_param_named(vpd_use_hostno
, sdebug_vpd_use_hostno
, int,
6325 module_param_named(wp
, sdebug_wp
, bool, S_IRUGO
| S_IWUSR
);
6326 module_param_named(write_same_length
, sdebug_write_same_length
, int,
6328 module_param_named(zbc
, sdeb_zbc_model_s
, charp
, S_IRUGO
);
6329 module_param_named(zone_cap_mb
, sdeb_zbc_zone_cap_mb
, int, S_IRUGO
);
6330 module_param_named(zone_max_open
, sdeb_zbc_max_open
, int, S_IRUGO
);
6331 module_param_named(zone_nr_conv
, sdeb_zbc_nr_conv
, int, S_IRUGO
);
6332 module_param_named(zone_size_mb
, sdeb_zbc_zone_size_mb
, int, S_IRUGO
);
6333 module_param_named(allow_restart
, sdebug_allow_restart
, bool, S_IRUGO
| S_IWUSR
);
6335 MODULE_AUTHOR("Eric Youngdale + Douglas Gilbert");
6336 MODULE_DESCRIPTION("SCSI debug adapter driver");
6337 MODULE_LICENSE("GPL");
6338 MODULE_VERSION(SDEBUG_VERSION
);
6340 MODULE_PARM_DESC(add_host
, "add n hosts, in sysfs if negative remove host(s) (def=1)");
6341 MODULE_PARM_DESC(ato
, "application tag ownership: 0=disk 1=host (def=1)");
6342 MODULE_PARM_DESC(cdb_len
, "suggest CDB lengths to drivers (def=10)");
6343 MODULE_PARM_DESC(clustering
, "when set enables larger transfers (def=0)");
6344 MODULE_PARM_DESC(delay
, "response delay (def=1 jiffy); 0:imm, -1,-2:tiny");
6345 MODULE_PARM_DESC(dev_size_mb
, "size in MiB of ram shared by devs(def=8)");
6346 MODULE_PARM_DESC(dif
, "data integrity field type: 0-3 (def=0)");
6347 MODULE_PARM_DESC(dix
, "data integrity extensions mask (def=0)");
6348 MODULE_PARM_DESC(dsense
, "use descriptor sense format(def=0 -> fixed)");
6349 MODULE_PARM_DESC(every_nth
, "timeout every nth command(def=0)");
6350 MODULE_PARM_DESC(fake_rw
, "fake reads/writes instead of copying (def=0)");
6351 MODULE_PARM_DESC(guard
, "protection checksum: 0=crc, 1=ip (def=0)");
6352 MODULE_PARM_DESC(host_lock
, "host_lock is ignored (def=0)");
6353 MODULE_PARM_DESC(host_max_queue
,
6354 "host max # of queued cmds (0 to max(def) [max_queue fixed equal for !0])");
6355 MODULE_PARM_DESC(inq_product
, "SCSI INQUIRY product string (def=\"scsi_debug\")");
6356 MODULE_PARM_DESC(inq_rev
, "SCSI INQUIRY revision string (def=\""
6357 SDEBUG_VERSION
"\")");
6358 MODULE_PARM_DESC(inq_vendor
, "SCSI INQUIRY vendor string (def=\"Linux\")");
6359 MODULE_PARM_DESC(lbprz
,
6360 "on read unmapped LBs return 0 when 1 (def), return 0xff when 2");
6361 MODULE_PARM_DESC(lbpu
, "enable LBP, support UNMAP command (def=0)");
6362 MODULE_PARM_DESC(lbpws
, "enable LBP, support WRITE SAME(16) with UNMAP bit (def=0)");
6363 MODULE_PARM_DESC(lbpws10
, "enable LBP, support WRITE SAME(10) with UNMAP bit (def=0)");
6364 MODULE_PARM_DESC(lowest_aligned
, "lowest aligned lba (def=0)");
6365 MODULE_PARM_DESC(lun_format
, "LUN format: 0->peripheral (def); 1 --> flat address method");
6366 MODULE_PARM_DESC(max_luns
, "number of LUNs per target to simulate(def=1)");
6367 MODULE_PARM_DESC(max_queue
, "max number of queued commands (1 to max(def))");
6368 MODULE_PARM_DESC(medium_error_count
, "count of sectors to return follow on MEDIUM error");
6369 MODULE_PARM_DESC(medium_error_start
, "starting sector number to return MEDIUM error");
6370 MODULE_PARM_DESC(ndelay
, "response delay in nanoseconds (def=0 -> ignore)");
6371 MODULE_PARM_DESC(no_lun_0
, "no LU number 0 (def=0 -> have lun 0)");
6372 MODULE_PARM_DESC(no_rwlock
, "don't protect user data reads+writes (def=0)");
6373 MODULE_PARM_DESC(no_uld
, "stop ULD (e.g. sd driver) attaching (def=0))");
6374 MODULE_PARM_DESC(num_parts
, "number of partitions(def=0)");
6375 MODULE_PARM_DESC(num_tgts
, "number of targets per host to simulate(def=1)");
6376 MODULE_PARM_DESC(opt_blks
, "optimal transfer length in blocks (def=1024)");
6377 MODULE_PARM_DESC(opt_xferlen_exp
, "optimal transfer length granularity exponent (def=physblk_exp)");
6378 MODULE_PARM_DESC(opts
, "1->noise, 2->medium_err, 4->timeout, 8->recovered_err... (def=0)");
6379 MODULE_PARM_DESC(per_host_store
, "If set, next positive add_host will get new store (def=0)");
6380 MODULE_PARM_DESC(physblk_exp
, "physical block exponent (def=0)");
6381 MODULE_PARM_DESC(poll_queues
, "support for iouring iopoll queues (1 to max(submit_queues - 1))");
6382 MODULE_PARM_DESC(ptype
, "SCSI peripheral type(def=0[disk])");
6383 MODULE_PARM_DESC(random
, "If set, uniformly randomize command duration between 0 and delay_in_ns");
6384 MODULE_PARM_DESC(removable
, "claim to have removable media (def=0)");
6385 MODULE_PARM_DESC(scsi_level
, "SCSI level to simulate(def=7[SPC-5])");
6386 MODULE_PARM_DESC(sector_size
, "logical block size in bytes (def=512)");
6387 MODULE_PARM_DESC(statistics
, "collect statistics on commands, queues (def=0)");
6388 MODULE_PARM_DESC(strict
, "stricter checks: reserved field in cdb (def=0)");
6389 MODULE_PARM_DESC(submit_queues
, "support for block multi-queue (def=1)");
6390 MODULE_PARM_DESC(tur_ms_to_ready
, "TEST UNIT READY millisecs before initial good status (def=0)");
6391 MODULE_PARM_DESC(unmap_alignment
, "lowest aligned thin provisioning lba (def=0)");
6392 MODULE_PARM_DESC(unmap_granularity
, "thin provisioning granularity in blocks (def=1)");
6393 MODULE_PARM_DESC(unmap_max_blocks
, "max # of blocks can be unmapped in one cmd (def=0xffffffff)");
6394 MODULE_PARM_DESC(unmap_max_desc
, "max # of ranges that can be unmapped in one cmd (def=256)");
6395 MODULE_PARM_DESC(uuid_ctl
,
6396 "1->use uuid for lu name, 0->don't, 2->all use same (def=0)");
6397 MODULE_PARM_DESC(virtual_gb
, "virtual gigabyte (GiB) size (def=0 -> use dev_size_mb)");
6398 MODULE_PARM_DESC(vpd_use_hostno
, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)");
6399 MODULE_PARM_DESC(wp
, "Write Protect (def=0)");
6400 MODULE_PARM_DESC(write_same_length
, "Maximum blocks per WRITE SAME cmd (def=0xffff)");
6401 MODULE_PARM_DESC(zbc
, "'none' [0]; 'aware' [1]; 'managed' [2] (def=0). Can have 'host-' prefix");
6402 MODULE_PARM_DESC(zone_cap_mb
, "Zone capacity in MiB (def=zone size)");
6403 MODULE_PARM_DESC(zone_max_open
, "Maximum number of open zones; [0] for no limit (def=auto)");
6404 MODULE_PARM_DESC(zone_nr_conv
, "Number of conventional zones (def=1)");
6405 MODULE_PARM_DESC(zone_size_mb
, "Zone size in MiB (def=auto)");
6406 MODULE_PARM_DESC(allow_restart
, "Set scsi_device's allow_restart flag(def=0)");
6408 #define SDEBUG_INFO_LEN 256
6409 static char sdebug_info
[SDEBUG_INFO_LEN
];
6411 static const char *scsi_debug_info(struct Scsi_Host
*shp
)
6415 k
= scnprintf(sdebug_info
, SDEBUG_INFO_LEN
, "%s: version %s [%s]\n",
6416 my_name
, SDEBUG_VERSION
, sdebug_version_date
);
6417 if (k
>= (SDEBUG_INFO_LEN
- 1))
6419 scnprintf(sdebug_info
+ k
, SDEBUG_INFO_LEN
- k
,
6420 " dev_size_mb=%d, opts=0x%x, submit_queues=%d, %s=%d",
6421 sdebug_dev_size_mb
, sdebug_opts
, submit_queues
,
6422 "statistics", (int)sdebug_statistics
);
6426 /* 'echo <val> > /proc/scsi/scsi_debug/<host_id>' writes to opts */
6427 static int scsi_debug_write_info(struct Scsi_Host
*host
, char *buffer
,
6432 int minLen
= length
> 15 ? 15 : length
;
6434 if (!capable(CAP_SYS_ADMIN
) || !capable(CAP_SYS_RAWIO
))
6436 memcpy(arr
, buffer
, minLen
);
6438 if (1 != sscanf(arr
, "%d", &opts
))
6441 sdebug_verbose
= !!(SDEBUG_OPT_NOISE
& opts
);
6442 sdebug_any_injecting_opt
= !!(SDEBUG_OPT_ALL_INJECTING
& opts
);
6443 if (sdebug_every_nth
!= 0)
6448 struct sdebug_submit_queue_data
{
6454 static bool sdebug_submit_queue_iter(struct request
*rq
, void *opaque
)
6456 struct sdebug_submit_queue_data
*data
= opaque
;
6457 u32 unique_tag
= blk_mq_unique_tag(rq
);
6458 u16 hwq
= blk_mq_unique_tag_to_hwq(unique_tag
);
6459 u16 tag
= blk_mq_unique_tag_to_tag(unique_tag
);
6460 int queue_num
= data
->queue_num
;
6462 if (hwq
!= queue_num
)
6465 /* Rely on iter'ing in ascending tag order */
6466 if (*data
->first
== -1)
6467 *data
->first
= *data
->last
= tag
;
6474 /* Output seen with 'cat /proc/scsi/scsi_debug/<host_id>'. It will be the
6475 * same for each scsi_debug host (if more than one). Some of the counters
6476 * output are not atomics so might be inaccurate in a busy system. */
6477 static int scsi_debug_show_info(struct seq_file
*m
, struct Scsi_Host
*host
)
6479 struct sdebug_host_info
*sdhp
;
6482 seq_printf(m
, "scsi_debug adapter driver, version %s [%s]\n",
6483 SDEBUG_VERSION
, sdebug_version_date
);
6484 seq_printf(m
, "num_tgts=%d, %ssize=%d MB, opts=0x%x, every_nth=%d\n",
6485 sdebug_num_tgts
, "shared (ram) ", sdebug_dev_size_mb
,
6486 sdebug_opts
, sdebug_every_nth
);
6487 seq_printf(m
, "delay=%d, ndelay=%d, max_luns=%d, sector_size=%d %s\n",
6488 sdebug_jdelay
, sdebug_ndelay
, sdebug_max_luns
,
6489 sdebug_sector_size
, "bytes");
6490 seq_printf(m
, "cylinders=%d, heads=%d, sectors=%d, command aborts=%d\n",
6491 sdebug_cylinders_per
, sdebug_heads
, sdebug_sectors_per
,
6493 seq_printf(m
, "RESETs: device=%d, target=%d, bus=%d, host=%d\n",
6494 num_dev_resets
, num_target_resets
, num_bus_resets
,
6496 seq_printf(m
, "dix_reads=%d, dix_writes=%d, dif_errors=%d\n",
6497 dix_reads
, dix_writes
, dif_errors
);
6498 seq_printf(m
, "usec_in_jiffy=%lu, statistics=%d\n", TICK_NSEC
/ 1000,
6500 seq_printf(m
, "cmnd_count=%d, completions=%d, %s=%d, a_tsf=%d, mq_polls=%d\n",
6501 atomic_read(&sdebug_cmnd_count
),
6502 atomic_read(&sdebug_completions
),
6503 "miss_cpus", atomic_read(&sdebug_miss_cpus
),
6504 atomic_read(&sdebug_a_tsf
),
6505 atomic_read(&sdeb_mq_poll_count
));
6507 seq_printf(m
, "submit_queues=%d\n", submit_queues
);
6508 for (j
= 0; j
< submit_queues
; ++j
) {
6510 struct sdebug_submit_queue_data data
= {
6515 seq_printf(m
, " queue %d:\n", j
);
6516 blk_mq_tagset_busy_iter(&host
->tag_set
, sdebug_submit_queue_iter
,
6519 seq_printf(m
, " in_use_bm BUSY: %s: %d,%d\n",
6520 "first,last bits", f
, l
);
6524 seq_printf(m
, "this host_no=%d\n", host
->host_no
);
6525 if (!xa_empty(per_store_ap
)) {
6528 unsigned long l_idx
;
6529 struct sdeb_store_info
*sip
;
6531 seq_puts(m
, "\nhost list:\n");
6533 list_for_each_entry(sdhp
, &sdebug_host_list
, host_list
) {
6535 seq_printf(m
, " %d: host_no=%d, si_idx=%d\n", j
,
6536 sdhp
->shost
->host_no
, idx
);
6539 seq_printf(m
, "\nper_store array [most_recent_idx=%d]:\n",
6540 sdeb_most_recent_idx
);
6542 xa_for_each(per_store_ap
, l_idx
, sip
) {
6543 niu
= xa_get_mark(per_store_ap
, l_idx
,
6544 SDEB_XA_NOT_IN_USE
);
6546 seq_printf(m
, " %d: idx=%d%s\n", j
, idx
,
6547 (niu
? " not_in_use" : ""));
6554 static ssize_t
delay_show(struct device_driver
*ddp
, char *buf
)
6556 return scnprintf(buf
, PAGE_SIZE
, "%d\n", sdebug_jdelay
);
6558 /* Returns -EBUSY if jdelay is being changed and commands are queued. The unit
6559 * of delay is jiffies.
6561 static ssize_t
delay_store(struct device_driver
*ddp
, const char *buf
,
6566 if (count
> 0 && sscanf(buf
, "%d", &jdelay
) == 1) {
6568 if (sdebug_jdelay
!= jdelay
) {
6569 struct sdebug_host_info
*sdhp
;
6571 mutex_lock(&sdebug_host_list_mutex
);
6572 block_unblock_all_queues(true);
6574 list_for_each_entry(sdhp
, &sdebug_host_list
, host_list
) {
6575 struct Scsi_Host
*shost
= sdhp
->shost
;
6577 if (scsi_host_busy(shost
)) {
6578 res
= -EBUSY
; /* queued commands */
6583 sdebug_jdelay
= jdelay
;
6586 block_unblock_all_queues(false);
6587 mutex_unlock(&sdebug_host_list_mutex
);
6593 static DRIVER_ATTR_RW(delay
);
6595 static ssize_t
ndelay_show(struct device_driver
*ddp
, char *buf
)
6597 return scnprintf(buf
, PAGE_SIZE
, "%d\n", sdebug_ndelay
);
6599 /* Returns -EBUSY if ndelay is being changed and commands are queued */
6600 /* If > 0 and accepted then sdebug_jdelay is set to JDELAY_OVERRIDDEN */
6601 static ssize_t
ndelay_store(struct device_driver
*ddp
, const char *buf
,
6606 if ((count
> 0) && (1 == sscanf(buf
, "%d", &ndelay
)) &&
6607 (ndelay
>= 0) && (ndelay
< (1000 * 1000 * 1000))) {
6609 if (sdebug_ndelay
!= ndelay
) {
6610 struct sdebug_host_info
*sdhp
;
6612 mutex_lock(&sdebug_host_list_mutex
);
6613 block_unblock_all_queues(true);
6615 list_for_each_entry(sdhp
, &sdebug_host_list
, host_list
) {
6616 struct Scsi_Host
*shost
= sdhp
->shost
;
6618 if (scsi_host_busy(shost
)) {
6619 res
= -EBUSY
; /* queued commands */
6625 sdebug_ndelay
= ndelay
;
6626 sdebug_jdelay
= ndelay
? JDELAY_OVERRIDDEN
6629 block_unblock_all_queues(false);
6630 mutex_unlock(&sdebug_host_list_mutex
);
6636 static DRIVER_ATTR_RW(ndelay
);
6638 static ssize_t
opts_show(struct device_driver
*ddp
, char *buf
)
6640 return scnprintf(buf
, PAGE_SIZE
, "0x%x\n", sdebug_opts
);
6643 static ssize_t
opts_store(struct device_driver
*ddp
, const char *buf
,
6649 if (sscanf(buf
, "%10s", work
) == 1) {
6650 if (strncasecmp(work
, "0x", 2) == 0) {
6651 if (kstrtoint(work
+ 2, 16, &opts
) == 0)
6654 if (kstrtoint(work
, 10, &opts
) == 0)
6661 sdebug_verbose
= !!(SDEBUG_OPT_NOISE
& opts
);
6662 sdebug_any_injecting_opt
= !!(SDEBUG_OPT_ALL_INJECTING
& opts
);
6666 static DRIVER_ATTR_RW(opts
);
6668 static ssize_t
ptype_show(struct device_driver
*ddp
, char *buf
)
6670 return scnprintf(buf
, PAGE_SIZE
, "%d\n", sdebug_ptype
);
6672 static ssize_t
ptype_store(struct device_driver
*ddp
, const char *buf
,
6677 /* Cannot change from or to TYPE_ZBC with sysfs */
6678 if (sdebug_ptype
== TYPE_ZBC
)
6681 if ((count
> 0) && (1 == sscanf(buf
, "%d", &n
)) && (n
>= 0)) {
6689 static DRIVER_ATTR_RW(ptype
);
6691 static ssize_t
dsense_show(struct device_driver
*ddp
, char *buf
)
6693 return scnprintf(buf
, PAGE_SIZE
, "%d\n", sdebug_dsense
);
6695 static ssize_t
dsense_store(struct device_driver
*ddp
, const char *buf
,
6700 if ((count
> 0) && (1 == sscanf(buf
, "%d", &n
)) && (n
>= 0)) {
6706 static DRIVER_ATTR_RW(dsense
);
6708 static ssize_t
fake_rw_show(struct device_driver
*ddp
, char *buf
)
6710 return scnprintf(buf
, PAGE_SIZE
, "%d\n", sdebug_fake_rw
);
6712 static ssize_t
fake_rw_store(struct device_driver
*ddp
, const char *buf
,
6717 if ((count
> 0) && (1 == sscanf(buf
, "%d", &n
)) && (n
>= 0)) {
6718 bool want_store
= (n
== 0);
6719 struct sdebug_host_info
*sdhp
;
6722 sdebug_fake_rw
= (sdebug_fake_rw
> 0);
6723 if (sdebug_fake_rw
== n
)
6724 return count
; /* not transitioning so do nothing */
6726 if (want_store
) { /* 1 --> 0 transition, set up store */
6727 if (sdeb_first_idx
< 0) {
6728 idx
= sdebug_add_store();
6732 idx
= sdeb_first_idx
;
6733 xa_clear_mark(per_store_ap
, idx
,
6734 SDEB_XA_NOT_IN_USE
);
6736 /* make all hosts use same store */
6737 list_for_each_entry(sdhp
, &sdebug_host_list
,
6739 if (sdhp
->si_idx
!= idx
) {
6740 xa_set_mark(per_store_ap
, sdhp
->si_idx
,
6741 SDEB_XA_NOT_IN_USE
);
6745 sdeb_most_recent_idx
= idx
;
6746 } else { /* 0 --> 1 transition is trigger for shrink */
6747 sdebug_erase_all_stores(true /* apart from first */);
6754 static DRIVER_ATTR_RW(fake_rw
);
6756 static ssize_t
no_lun_0_show(struct device_driver
*ddp
, char *buf
)
6758 return scnprintf(buf
, PAGE_SIZE
, "%d\n", sdebug_no_lun_0
);
6760 static ssize_t
no_lun_0_store(struct device_driver
*ddp
, const char *buf
,
6765 if ((count
> 0) && (1 == sscanf(buf
, "%d", &n
)) && (n
>= 0)) {
6766 sdebug_no_lun_0
= n
;
6771 static DRIVER_ATTR_RW(no_lun_0
);
6773 static ssize_t
num_tgts_show(struct device_driver
*ddp
, char *buf
)
6775 return scnprintf(buf
, PAGE_SIZE
, "%d\n", sdebug_num_tgts
);
6777 static ssize_t
num_tgts_store(struct device_driver
*ddp
, const char *buf
,
6782 if ((count
> 0) && (1 == sscanf(buf
, "%d", &n
)) && (n
>= 0)) {
6783 sdebug_num_tgts
= n
;
6784 sdebug_max_tgts_luns();
6789 static DRIVER_ATTR_RW(num_tgts
);
6791 static ssize_t
dev_size_mb_show(struct device_driver
*ddp
, char *buf
)
6793 return scnprintf(buf
, PAGE_SIZE
, "%d\n", sdebug_dev_size_mb
);
6795 static DRIVER_ATTR_RO(dev_size_mb
);
6797 static ssize_t
per_host_store_show(struct device_driver
*ddp
, char *buf
)
6799 return scnprintf(buf
, PAGE_SIZE
, "%d\n", sdebug_per_host_store
);
6802 static ssize_t
per_host_store_store(struct device_driver
*ddp
, const char *buf
,
6807 if (kstrtobool(buf
, &v
))
6810 sdebug_per_host_store
= v
;
6813 static DRIVER_ATTR_RW(per_host_store
);
6815 static ssize_t
num_parts_show(struct device_driver
*ddp
, char *buf
)
6817 return scnprintf(buf
, PAGE_SIZE
, "%d\n", sdebug_num_parts
);
6819 static DRIVER_ATTR_RO(num_parts
);
6821 static ssize_t
every_nth_show(struct device_driver
*ddp
, char *buf
)
6823 return scnprintf(buf
, PAGE_SIZE
, "%d\n", sdebug_every_nth
);
6825 static ssize_t
every_nth_store(struct device_driver
*ddp
, const char *buf
,
6831 if (sscanf(buf
, "%10s", work
) == 1) {
6832 if (strncasecmp(work
, "0x", 2) == 0) {
6833 if (kstrtoint(work
+ 2, 16, &nth
) == 0)
6834 goto every_nth_done
;
6836 if (kstrtoint(work
, 10, &nth
) == 0)
6837 goto every_nth_done
;
6843 sdebug_every_nth
= nth
;
6844 if (nth
&& !sdebug_statistics
) {
6845 pr_info("every_nth needs statistics=1, set it\n");
6846 sdebug_statistics
= true;
6851 static DRIVER_ATTR_RW(every_nth
);
6853 static ssize_t
lun_format_show(struct device_driver
*ddp
, char *buf
)
6855 return scnprintf(buf
, PAGE_SIZE
, "%d\n", (int)sdebug_lun_am
);
6857 static ssize_t
lun_format_store(struct device_driver
*ddp
, const char *buf
,
6863 if (kstrtoint(buf
, 0, &n
))
6866 if (n
> (int)SAM_LUN_AM_FLAT
) {
6867 pr_warn("only LUN address methods 0 and 1 are supported\n");
6870 changed
= ((int)sdebug_lun_am
!= n
);
6872 if (changed
&& sdebug_scsi_level
>= 5) { /* >= SPC-3 */
6873 struct sdebug_host_info
*sdhp
;
6874 struct sdebug_dev_info
*dp
;
6876 mutex_lock(&sdebug_host_list_mutex
);
6877 list_for_each_entry(sdhp
, &sdebug_host_list
, host_list
) {
6878 list_for_each_entry(dp
, &sdhp
->dev_info_list
, dev_list
) {
6879 set_bit(SDEBUG_UA_LUNS_CHANGED
, dp
->uas_bm
);
6882 mutex_unlock(&sdebug_host_list_mutex
);
6888 static DRIVER_ATTR_RW(lun_format
);
6890 static ssize_t
max_luns_show(struct device_driver
*ddp
, char *buf
)
6892 return scnprintf(buf
, PAGE_SIZE
, "%d\n", sdebug_max_luns
);
6894 static ssize_t
max_luns_store(struct device_driver
*ddp
, const char *buf
,
6900 if ((count
> 0) && (1 == sscanf(buf
, "%d", &n
)) && (n
>= 0)) {
6902 pr_warn("max_luns can be no more than 256\n");
6905 changed
= (sdebug_max_luns
!= n
);
6906 sdebug_max_luns
= n
;
6907 sdebug_max_tgts_luns();
6908 if (changed
&& (sdebug_scsi_level
>= 5)) { /* >= SPC-3 */
6909 struct sdebug_host_info
*sdhp
;
6910 struct sdebug_dev_info
*dp
;
6912 mutex_lock(&sdebug_host_list_mutex
);
6913 list_for_each_entry(sdhp
, &sdebug_host_list
,
6915 list_for_each_entry(dp
, &sdhp
->dev_info_list
,
6917 set_bit(SDEBUG_UA_LUNS_CHANGED
,
6921 mutex_unlock(&sdebug_host_list_mutex
);
6927 static DRIVER_ATTR_RW(max_luns
);
6929 static ssize_t
max_queue_show(struct device_driver
*ddp
, char *buf
)
6931 return scnprintf(buf
, PAGE_SIZE
, "%d\n", sdebug_max_queue
);
6933 /* N.B. max_queue can be changed while there are queued commands. In flight
6934 * commands beyond the new max_queue will be completed. */
6935 static ssize_t
max_queue_store(struct device_driver
*ddp
, const char *buf
,
6940 if ((count
> 0) && (1 == sscanf(buf
, "%d", &n
)) && (n
> 0) &&
6941 (n
<= SDEBUG_CANQUEUE
) &&
6942 (sdebug_host_max_queue
== 0)) {
6943 mutex_lock(&sdebug_host_list_mutex
);
6945 /* We may only change sdebug_max_queue when we have no shosts */
6946 if (list_empty(&sdebug_host_list
))
6947 sdebug_max_queue
= n
;
6950 mutex_unlock(&sdebug_host_list_mutex
);
6955 static DRIVER_ATTR_RW(max_queue
);
6957 static ssize_t
host_max_queue_show(struct device_driver
*ddp
, char *buf
)
6959 return scnprintf(buf
, PAGE_SIZE
, "%d\n", sdebug_host_max_queue
);
6962 static ssize_t
no_rwlock_show(struct device_driver
*ddp
, char *buf
)
6964 return scnprintf(buf
, PAGE_SIZE
, "%d\n", sdebug_no_rwlock
);
6967 static ssize_t
no_rwlock_store(struct device_driver
*ddp
, const char *buf
, size_t count
)
6971 if (kstrtobool(buf
, &v
))
6974 sdebug_no_rwlock
= v
;
6977 static DRIVER_ATTR_RW(no_rwlock
);
6980 * Since this is used for .can_queue, and we get the hc_idx tag from the bitmap
6981 * in range [0, sdebug_host_max_queue), we can't change it.
6983 static DRIVER_ATTR_RO(host_max_queue
);
6985 static ssize_t
no_uld_show(struct device_driver
*ddp
, char *buf
)
6987 return scnprintf(buf
, PAGE_SIZE
, "%d\n", sdebug_no_uld
);
6989 static DRIVER_ATTR_RO(no_uld
);
6991 static ssize_t
scsi_level_show(struct device_driver
*ddp
, char *buf
)
6993 return scnprintf(buf
, PAGE_SIZE
, "%d\n", sdebug_scsi_level
);
6995 static DRIVER_ATTR_RO(scsi_level
);
6997 static ssize_t
virtual_gb_show(struct device_driver
*ddp
, char *buf
)
6999 return scnprintf(buf
, PAGE_SIZE
, "%d\n", sdebug_virtual_gb
);
7001 static ssize_t
virtual_gb_store(struct device_driver
*ddp
, const char *buf
,
7007 /* Ignore capacity change for ZBC drives for now */
7008 if (sdeb_zbc_in_use
)
7011 if ((count
> 0) && (1 == sscanf(buf
, "%d", &n
)) && (n
>= 0)) {
7012 changed
= (sdebug_virtual_gb
!= n
);
7013 sdebug_virtual_gb
= n
;
7014 sdebug_capacity
= get_sdebug_capacity();
7016 struct sdebug_host_info
*sdhp
;
7017 struct sdebug_dev_info
*dp
;
7019 mutex_lock(&sdebug_host_list_mutex
);
7020 list_for_each_entry(sdhp
, &sdebug_host_list
,
7022 list_for_each_entry(dp
, &sdhp
->dev_info_list
,
7024 set_bit(SDEBUG_UA_CAPACITY_CHANGED
,
7028 mutex_unlock(&sdebug_host_list_mutex
);
7034 static DRIVER_ATTR_RW(virtual_gb
);
7036 static ssize_t
add_host_show(struct device_driver
*ddp
, char *buf
)
7038 /* absolute number of hosts currently active is what is shown */
7039 return scnprintf(buf
, PAGE_SIZE
, "%d\n", sdebug_num_hosts
);
7042 static ssize_t
add_host_store(struct device_driver
*ddp
, const char *buf
,
7047 struct sdeb_store_info
*sip
;
7048 bool want_phs
= (sdebug_fake_rw
== 0) && sdebug_per_host_store
;
7051 if (sscanf(buf
, "%d", &delta_hosts
) != 1)
7053 if (delta_hosts
> 0) {
7057 xa_for_each_marked(per_store_ap
, idx
, sip
,
7058 SDEB_XA_NOT_IN_USE
) {
7059 sdeb_most_recent_idx
= (int)idx
;
7063 if (found
) /* re-use case */
7064 sdebug_add_host_helper((int)idx
);
7066 sdebug_do_add_host(true);
7068 sdebug_do_add_host(false);
7070 } while (--delta_hosts
);
7071 } else if (delta_hosts
< 0) {
7073 sdebug_do_remove_host(false);
7074 } while (++delta_hosts
);
7078 static DRIVER_ATTR_RW(add_host
);
7080 static ssize_t
vpd_use_hostno_show(struct device_driver
*ddp
, char *buf
)
7082 return scnprintf(buf
, PAGE_SIZE
, "%d\n", sdebug_vpd_use_hostno
);
7084 static ssize_t
vpd_use_hostno_store(struct device_driver
*ddp
, const char *buf
,
7089 if ((count
> 0) && (1 == sscanf(buf
, "%d", &n
)) && (n
>= 0)) {
7090 sdebug_vpd_use_hostno
= n
;
7095 static DRIVER_ATTR_RW(vpd_use_hostno
);
7097 static ssize_t
statistics_show(struct device_driver
*ddp
, char *buf
)
7099 return scnprintf(buf
, PAGE_SIZE
, "%d\n", (int)sdebug_statistics
);
7101 static ssize_t
statistics_store(struct device_driver
*ddp
, const char *buf
,
7106 if ((count
> 0) && (sscanf(buf
, "%d", &n
) == 1) && (n
>= 0)) {
7108 sdebug_statistics
= true;
7110 clear_queue_stats();
7111 sdebug_statistics
= false;
7117 static DRIVER_ATTR_RW(statistics
);
7119 static ssize_t
sector_size_show(struct device_driver
*ddp
, char *buf
)
7121 return scnprintf(buf
, PAGE_SIZE
, "%u\n", sdebug_sector_size
);
7123 static DRIVER_ATTR_RO(sector_size
);
7125 static ssize_t
submit_queues_show(struct device_driver
*ddp
, char *buf
)
7127 return scnprintf(buf
, PAGE_SIZE
, "%d\n", submit_queues
);
7129 static DRIVER_ATTR_RO(submit_queues
);
7131 static ssize_t
dix_show(struct device_driver
*ddp
, char *buf
)
7133 return scnprintf(buf
, PAGE_SIZE
, "%d\n", sdebug_dix
);
7135 static DRIVER_ATTR_RO(dix
);
7137 static ssize_t
dif_show(struct device_driver
*ddp
, char *buf
)
7139 return scnprintf(buf
, PAGE_SIZE
, "%d\n", sdebug_dif
);
7141 static DRIVER_ATTR_RO(dif
);
7143 static ssize_t
guard_show(struct device_driver
*ddp
, char *buf
)
7145 return scnprintf(buf
, PAGE_SIZE
, "%u\n", sdebug_guard
);
7147 static DRIVER_ATTR_RO(guard
);
7149 static ssize_t
ato_show(struct device_driver
*ddp
, char *buf
)
7151 return scnprintf(buf
, PAGE_SIZE
, "%d\n", sdebug_ato
);
7153 static DRIVER_ATTR_RO(ato
);
7155 static ssize_t
map_show(struct device_driver
*ddp
, char *buf
)
7159 if (!scsi_debug_lbp())
7160 return scnprintf(buf
, PAGE_SIZE
, "0-%u\n",
7161 sdebug_store_sectors
);
7163 if (sdebug_fake_rw
== 0 && !xa_empty(per_store_ap
)) {
7164 struct sdeb_store_info
*sip
= xa_load(per_store_ap
, 0);
7167 count
= scnprintf(buf
, PAGE_SIZE
- 1, "%*pbl",
7168 (int)map_size
, sip
->map_storep
);
7170 buf
[count
++] = '\n';
7175 static DRIVER_ATTR_RO(map
);
7177 static ssize_t
random_show(struct device_driver
*ddp
, char *buf
)
7179 return scnprintf(buf
, PAGE_SIZE
, "%d\n", sdebug_random
);
7182 static ssize_t
random_store(struct device_driver
*ddp
, const char *buf
,
7187 if (kstrtobool(buf
, &v
))
7193 static DRIVER_ATTR_RW(random
);
7195 static ssize_t
removable_show(struct device_driver
*ddp
, char *buf
)
7197 return scnprintf(buf
, PAGE_SIZE
, "%d\n", sdebug_removable
? 1 : 0);
7199 static ssize_t
removable_store(struct device_driver
*ddp
, const char *buf
,
7204 if ((count
> 0) && (1 == sscanf(buf
, "%d", &n
)) && (n
>= 0)) {
7205 sdebug_removable
= (n
> 0);
7210 static DRIVER_ATTR_RW(removable
);
7212 static ssize_t
host_lock_show(struct device_driver
*ddp
, char *buf
)
7214 return scnprintf(buf
, PAGE_SIZE
, "%d\n", !!sdebug_host_lock
);
7216 /* N.B. sdebug_host_lock does nothing, kept for backward compatibility */
7217 static ssize_t
host_lock_store(struct device_driver
*ddp
, const char *buf
,
7222 if ((count
> 0) && (1 == sscanf(buf
, "%d", &n
)) && (n
>= 0)) {
7223 sdebug_host_lock
= (n
> 0);
7228 static DRIVER_ATTR_RW(host_lock
);
7230 static ssize_t
strict_show(struct device_driver
*ddp
, char *buf
)
7232 return scnprintf(buf
, PAGE_SIZE
, "%d\n", !!sdebug_strict
);
7234 static ssize_t
strict_store(struct device_driver
*ddp
, const char *buf
,
7239 if ((count
> 0) && (1 == sscanf(buf
, "%d", &n
)) && (n
>= 0)) {
7240 sdebug_strict
= (n
> 0);
7245 static DRIVER_ATTR_RW(strict
);
7247 static ssize_t
uuid_ctl_show(struct device_driver
*ddp
, char *buf
)
7249 return scnprintf(buf
, PAGE_SIZE
, "%d\n", !!sdebug_uuid_ctl
);
7251 static DRIVER_ATTR_RO(uuid_ctl
);
7253 static ssize_t
cdb_len_show(struct device_driver
*ddp
, char *buf
)
7255 return scnprintf(buf
, PAGE_SIZE
, "%d\n", sdebug_cdb_len
);
7257 static ssize_t
cdb_len_store(struct device_driver
*ddp
, const char *buf
,
7262 ret
= kstrtoint(buf
, 0, &n
);
7266 all_config_cdb_len();
7269 static DRIVER_ATTR_RW(cdb_len
);
7271 static const char * const zbc_model_strs_a
[] = {
7272 [BLK_ZONED_NONE
] = "none",
7273 [BLK_ZONED_HA
] = "host-aware",
7274 [BLK_ZONED_HM
] = "host-managed",
7277 static const char * const zbc_model_strs_b
[] = {
7278 [BLK_ZONED_NONE
] = "no",
7279 [BLK_ZONED_HA
] = "aware",
7280 [BLK_ZONED_HM
] = "managed",
7283 static const char * const zbc_model_strs_c
[] = {
7284 [BLK_ZONED_NONE
] = "0",
7285 [BLK_ZONED_HA
] = "1",
7286 [BLK_ZONED_HM
] = "2",
7289 static int sdeb_zbc_model_str(const char *cp
)
7291 int res
= sysfs_match_string(zbc_model_strs_a
, cp
);
7294 res
= sysfs_match_string(zbc_model_strs_b
, cp
);
7296 res
= sysfs_match_string(zbc_model_strs_c
, cp
);
7304 static ssize_t
zbc_show(struct device_driver
*ddp
, char *buf
)
7306 return scnprintf(buf
, PAGE_SIZE
, "%s\n",
7307 zbc_model_strs_a
[sdeb_zbc_model
]);
7309 static DRIVER_ATTR_RO(zbc
);
7311 static ssize_t
tur_ms_to_ready_show(struct device_driver
*ddp
, char *buf
)
7313 return scnprintf(buf
, PAGE_SIZE
, "%d\n", sdeb_tur_ms_to_ready
);
7315 static DRIVER_ATTR_RO(tur_ms_to_ready
);
7317 static ssize_t
group_number_stats_show(struct device_driver
*ddp
, char *buf
)
7319 char *p
= buf
, *end
= buf
+ PAGE_SIZE
;
7322 for (i
= 0; i
< ARRAY_SIZE(writes_by_group_number
); i
++)
7323 p
+= scnprintf(p
, end
- p
, "%d %ld\n", i
,
7324 atomic_long_read(&writes_by_group_number
[i
]));
7329 static ssize_t
group_number_stats_store(struct device_driver
*ddp
,
7330 const char *buf
, size_t count
)
7334 for (i
= 0; i
< ARRAY_SIZE(writes_by_group_number
); i
++)
7335 atomic_long_set(&writes_by_group_number
[i
], 0);
7339 static DRIVER_ATTR_RW(group_number_stats
);
7341 /* Note: The following array creates attribute files in the
7342 /sys/bus/pseudo/drivers/scsi_debug directory. The advantage of these
7343 files (over those found in the /sys/module/scsi_debug/parameters
7344 directory) is that auxiliary actions can be triggered when an attribute
7345 is changed. For example see: add_host_store() above.
7348 static struct attribute
*sdebug_drv_attrs
[] = {
7349 &driver_attr_delay
.attr
,
7350 &driver_attr_opts
.attr
,
7351 &driver_attr_ptype
.attr
,
7352 &driver_attr_dsense
.attr
,
7353 &driver_attr_fake_rw
.attr
,
7354 &driver_attr_host_max_queue
.attr
,
7355 &driver_attr_no_lun_0
.attr
,
7356 &driver_attr_num_tgts
.attr
,
7357 &driver_attr_dev_size_mb
.attr
,
7358 &driver_attr_num_parts
.attr
,
7359 &driver_attr_every_nth
.attr
,
7360 &driver_attr_lun_format
.attr
,
7361 &driver_attr_max_luns
.attr
,
7362 &driver_attr_max_queue
.attr
,
7363 &driver_attr_no_rwlock
.attr
,
7364 &driver_attr_no_uld
.attr
,
7365 &driver_attr_scsi_level
.attr
,
7366 &driver_attr_virtual_gb
.attr
,
7367 &driver_attr_add_host
.attr
,
7368 &driver_attr_per_host_store
.attr
,
7369 &driver_attr_vpd_use_hostno
.attr
,
7370 &driver_attr_sector_size
.attr
,
7371 &driver_attr_statistics
.attr
,
7372 &driver_attr_submit_queues
.attr
,
7373 &driver_attr_dix
.attr
,
7374 &driver_attr_dif
.attr
,
7375 &driver_attr_guard
.attr
,
7376 &driver_attr_ato
.attr
,
7377 &driver_attr_map
.attr
,
7378 &driver_attr_random
.attr
,
7379 &driver_attr_removable
.attr
,
7380 &driver_attr_host_lock
.attr
,
7381 &driver_attr_ndelay
.attr
,
7382 &driver_attr_strict
.attr
,
7383 &driver_attr_uuid_ctl
.attr
,
7384 &driver_attr_cdb_len
.attr
,
7385 &driver_attr_tur_ms_to_ready
.attr
,
7386 &driver_attr_zbc
.attr
,
7387 &driver_attr_group_number_stats
.attr
,
7390 ATTRIBUTE_GROUPS(sdebug_drv
);
7392 static struct device
*pseudo_primary
;
7394 static int __init
scsi_debug_init(void)
7396 bool want_store
= (sdebug_fake_rw
== 0);
7398 int k
, ret
, hosts_to_add
;
7401 if (sdebug_ndelay
>= 1000 * 1000 * 1000) {
7402 pr_warn("ndelay must be less than 1 second, ignored\n");
7404 } else if (sdebug_ndelay
> 0)
7405 sdebug_jdelay
= JDELAY_OVERRIDDEN
;
7407 switch (sdebug_sector_size
) {
7414 pr_err("invalid sector_size %d\n", sdebug_sector_size
);
7418 switch (sdebug_dif
) {
7419 case T10_PI_TYPE0_PROTECTION
:
7421 case T10_PI_TYPE1_PROTECTION
:
7422 case T10_PI_TYPE2_PROTECTION
:
7423 case T10_PI_TYPE3_PROTECTION
:
7424 have_dif_prot
= true;
7428 pr_err("dif must be 0, 1, 2 or 3\n");
7432 if (sdebug_num_tgts
< 0) {
7433 pr_err("num_tgts must be >= 0\n");
7437 if (sdebug_guard
> 1) {
7438 pr_err("guard must be 0 or 1\n");
7442 if (sdebug_ato
> 1) {
7443 pr_err("ato must be 0 or 1\n");
7447 if (sdebug_physblk_exp
> 15) {
7448 pr_err("invalid physblk_exp %u\n", sdebug_physblk_exp
);
7452 sdebug_lun_am
= sdebug_lun_am_i
;
7453 if (sdebug_lun_am
> SAM_LUN_AM_FLAT
) {
7454 pr_warn("Invalid LUN format %u, using default\n", (int)sdebug_lun_am
);
7455 sdebug_lun_am
= SAM_LUN_AM_PERIPHERAL
;
7458 if (sdebug_max_luns
> 256) {
7459 if (sdebug_max_luns
> 16384) {
7460 pr_warn("max_luns can be no more than 16384, use default\n");
7461 sdebug_max_luns
= DEF_MAX_LUNS
;
7463 sdebug_lun_am
= SAM_LUN_AM_FLAT
;
7466 if (sdebug_lowest_aligned
> 0x3fff) {
7467 pr_err("lowest_aligned too big: %u\n", sdebug_lowest_aligned
);
7471 if (submit_queues
< 1) {
7472 pr_err("submit_queues must be 1 or more\n");
7476 if ((sdebug_max_queue
> SDEBUG_CANQUEUE
) || (sdebug_max_queue
< 1)) {
7477 pr_err("max_queue must be in range [1, %d]\n", SDEBUG_CANQUEUE
);
7481 if ((sdebug_host_max_queue
> SDEBUG_CANQUEUE
) ||
7482 (sdebug_host_max_queue
< 0)) {
7483 pr_err("host_max_queue must be in range [0 %d]\n",
7488 if (sdebug_host_max_queue
&&
7489 (sdebug_max_queue
!= sdebug_host_max_queue
)) {
7490 sdebug_max_queue
= sdebug_host_max_queue
;
7491 pr_warn("fixing max submit queue depth to host max queue depth, %d\n",
7496 * check for host managed zoned block device specified with
7497 * ptype=0x14 or zbc=XXX.
7499 if (sdebug_ptype
== TYPE_ZBC
) {
7500 sdeb_zbc_model
= BLK_ZONED_HM
;
7501 } else if (sdeb_zbc_model_s
&& *sdeb_zbc_model_s
) {
7502 k
= sdeb_zbc_model_str(sdeb_zbc_model_s
);
7506 switch (sdeb_zbc_model
) {
7507 case BLK_ZONED_NONE
:
7509 sdebug_ptype
= TYPE_DISK
;
7512 sdebug_ptype
= TYPE_ZBC
;
7515 pr_err("Invalid ZBC model\n");
7519 if (sdeb_zbc_model
!= BLK_ZONED_NONE
) {
7520 sdeb_zbc_in_use
= true;
7521 if (sdebug_dev_size_mb
== DEF_DEV_SIZE_PRE_INIT
)
7522 sdebug_dev_size_mb
= DEF_ZBC_DEV_SIZE_MB
;
7525 if (sdebug_dev_size_mb
== DEF_DEV_SIZE_PRE_INIT
)
7526 sdebug_dev_size_mb
= DEF_DEV_SIZE_MB
;
7527 if (sdebug_dev_size_mb
< 1)
7528 sdebug_dev_size_mb
= 1; /* force minimum 1 MB ramdisk */
7529 sz
= (unsigned long)sdebug_dev_size_mb
* 1048576;
7530 sdebug_store_sectors
= sz
/ sdebug_sector_size
;
7531 sdebug_capacity
= get_sdebug_capacity();
7533 /* play around with geometry, don't waste too much on track 0 */
7535 sdebug_sectors_per
= 32;
7536 if (sdebug_dev_size_mb
>= 256)
7538 else if (sdebug_dev_size_mb
>= 16)
7540 sdebug_cylinders_per
= (unsigned long)sdebug_capacity
/
7541 (sdebug_sectors_per
* sdebug_heads
);
7542 if (sdebug_cylinders_per
>= 1024) {
7543 /* other LLDs do this; implies >= 1GB ram disk ... */
7545 sdebug_sectors_per
= 63;
7546 sdebug_cylinders_per
= (unsigned long)sdebug_capacity
/
7547 (sdebug_sectors_per
* sdebug_heads
);
7549 if (scsi_debug_lbp()) {
7550 sdebug_unmap_max_blocks
=
7551 clamp(sdebug_unmap_max_blocks
, 0U, 0xffffffffU
);
7553 sdebug_unmap_max_desc
=
7554 clamp(sdebug_unmap_max_desc
, 0U, 256U);
7556 sdebug_unmap_granularity
=
7557 clamp(sdebug_unmap_granularity
, 1U, 0xffffffffU
);
7559 if (sdebug_unmap_alignment
&&
7560 sdebug_unmap_granularity
<=
7561 sdebug_unmap_alignment
) {
7562 pr_err("ERR: unmap_granularity <= unmap_alignment\n");
7566 xa_init_flags(per_store_ap
, XA_FLAGS_ALLOC
| XA_FLAGS_LOCK_IRQ
);
7568 idx
= sdebug_add_store();
7573 pseudo_primary
= root_device_register("pseudo_0");
7574 if (IS_ERR(pseudo_primary
)) {
7575 pr_warn("root_device_register() error\n");
7576 ret
= PTR_ERR(pseudo_primary
);
7579 ret
= bus_register(&pseudo_lld_bus
);
7581 pr_warn("bus_register error: %d\n", ret
);
7584 ret
= driver_register(&sdebug_driverfs_driver
);
7586 pr_warn("driver_register error: %d\n", ret
);
7590 hosts_to_add
= sdebug_add_host
;
7591 sdebug_add_host
= 0;
7593 queued_cmd_cache
= KMEM_CACHE(sdebug_queued_cmd
, SLAB_HWCACHE_ALIGN
);
7594 if (!queued_cmd_cache
) {
7599 sdebug_debugfs_root
= debugfs_create_dir("scsi_debug", NULL
);
7600 if (IS_ERR_OR_NULL(sdebug_debugfs_root
))
7601 pr_info("%s: failed to create initial debugfs directory\n", __func__
);
7603 for (k
= 0; k
< hosts_to_add
; k
++) {
7604 if (want_store
&& k
== 0) {
7605 ret
= sdebug_add_host_helper(idx
);
7607 pr_err("add_host_helper k=%d, error=%d\n",
7612 ret
= sdebug_do_add_host(want_store
&&
7613 sdebug_per_host_store
);
7615 pr_err("add_host k=%d error=%d\n", k
, -ret
);
7621 pr_info("built %d host(s)\n", sdebug_num_hosts
);
7626 driver_unregister(&sdebug_driverfs_driver
);
7628 bus_unregister(&pseudo_lld_bus
);
7630 root_device_unregister(pseudo_primary
);
7632 sdebug_erase_store(idx
, NULL
);
7636 static void __exit
scsi_debug_exit(void)
7638 int k
= sdebug_num_hosts
;
7641 sdebug_do_remove_host(true);
7642 kmem_cache_destroy(queued_cmd_cache
);
7643 driver_unregister(&sdebug_driverfs_driver
);
7644 bus_unregister(&pseudo_lld_bus
);
7645 root_device_unregister(pseudo_primary
);
7647 sdebug_erase_all_stores(false);
7648 xa_destroy(per_store_ap
);
7649 debugfs_remove(sdebug_debugfs_root
);
7652 device_initcall(scsi_debug_init
);
7653 module_exit(scsi_debug_exit
);
7655 static void sdebug_release_adapter(struct device
*dev
)
7657 struct sdebug_host_info
*sdbg_host
;
7659 sdbg_host
= dev_to_sdebug_host(dev
);
7663 /* idx must be valid, if sip is NULL then it will be obtained using idx */
7664 static void sdebug_erase_store(int idx
, struct sdeb_store_info
*sip
)
7669 if (xa_empty(per_store_ap
))
7671 sip
= xa_load(per_store_ap
, idx
);
7675 vfree(sip
->map_storep
);
7676 vfree(sip
->dif_storep
);
7678 xa_erase(per_store_ap
, idx
);
7682 /* Assume apart_from_first==false only in shutdown case. */
7683 static void sdebug_erase_all_stores(bool apart_from_first
)
7686 struct sdeb_store_info
*sip
= NULL
;
7688 xa_for_each(per_store_ap
, idx
, sip
) {
7689 if (apart_from_first
)
7690 apart_from_first
= false;
7692 sdebug_erase_store(idx
, sip
);
7694 if (apart_from_first
)
7695 sdeb_most_recent_idx
= sdeb_first_idx
;
7699 * Returns store xarray new element index (idx) if >=0 else negated errno.
7700 * Limit the number of stores to 65536.
7702 static int sdebug_add_store(void)
7706 unsigned long iflags
;
7707 unsigned long sz
= (unsigned long)sdebug_dev_size_mb
* 1048576;
7708 struct sdeb_store_info
*sip
= NULL
;
7709 struct xa_limit xal
= { .max
= 1 << 16, .min
= 0 };
7711 sip
= kzalloc(sizeof(*sip
), GFP_KERNEL
);
7715 xa_lock_irqsave(per_store_ap
, iflags
);
7716 res
= __xa_alloc(per_store_ap
, &n_idx
, sip
, xal
, GFP_ATOMIC
);
7717 if (unlikely(res
< 0)) {
7718 xa_unlock_irqrestore(per_store_ap
, iflags
);
7720 pr_warn("%s: xa_alloc() errno=%d\n", __func__
, -res
);
7723 sdeb_most_recent_idx
= n_idx
;
7724 if (sdeb_first_idx
< 0)
7725 sdeb_first_idx
= n_idx
;
7726 xa_unlock_irqrestore(per_store_ap
, iflags
);
7729 sip
->storep
= vzalloc(sz
);
7731 pr_err("user data oom\n");
7734 if (sdebug_num_parts
> 0)
7735 sdebug_build_parts(sip
->storep
, sz
);
7737 /* DIF/DIX: what T10 calls Protection Information (PI) */
7741 dif_size
= sdebug_store_sectors
* sizeof(struct t10_pi_tuple
);
7742 sip
->dif_storep
= vmalloc(dif_size
);
7744 pr_info("dif_storep %u bytes @ %pK\n", dif_size
,
7747 if (!sip
->dif_storep
) {
7748 pr_err("DIX oom\n");
7751 memset(sip
->dif_storep
, 0xff, dif_size
);
7753 /* Logical Block Provisioning */
7754 if (scsi_debug_lbp()) {
7755 map_size
= lba_to_map_index(sdebug_store_sectors
- 1) + 1;
7756 sip
->map_storep
= vmalloc(array_size(sizeof(long),
7757 BITS_TO_LONGS(map_size
)));
7759 pr_info("%lu provisioning blocks\n", map_size
);
7761 if (!sip
->map_storep
) {
7762 pr_err("LBP map oom\n");
7766 bitmap_zero(sip
->map_storep
, map_size
);
7768 /* Map first 1KB for partition table */
7769 if (sdebug_num_parts
)
7770 map_region(sip
, 0, 2);
7773 rwlock_init(&sip
->macc_lck
);
7776 sdebug_erase_store((int)n_idx
, sip
);
7777 pr_warn("%s: failed, errno=%d\n", __func__
, -res
);
7781 static int sdebug_add_host_helper(int per_host_idx
)
7783 int k
, devs_per_host
, idx
;
7784 int error
= -ENOMEM
;
7785 struct sdebug_host_info
*sdbg_host
;
7786 struct sdebug_dev_info
*sdbg_devinfo
, *tmp
;
7788 sdbg_host
= kzalloc(sizeof(*sdbg_host
), GFP_KERNEL
);
7791 idx
= (per_host_idx
< 0) ? sdeb_first_idx
: per_host_idx
;
7792 if (xa_get_mark(per_store_ap
, idx
, SDEB_XA_NOT_IN_USE
))
7793 xa_clear_mark(per_store_ap
, idx
, SDEB_XA_NOT_IN_USE
);
7794 sdbg_host
->si_idx
= idx
;
7796 INIT_LIST_HEAD(&sdbg_host
->dev_info_list
);
7798 devs_per_host
= sdebug_num_tgts
* sdebug_max_luns
;
7799 for (k
= 0; k
< devs_per_host
; k
++) {
7800 sdbg_devinfo
= sdebug_device_create(sdbg_host
, GFP_KERNEL
);
7805 mutex_lock(&sdebug_host_list_mutex
);
7806 list_add_tail(&sdbg_host
->host_list
, &sdebug_host_list
);
7807 mutex_unlock(&sdebug_host_list_mutex
);
7809 sdbg_host
->dev
.bus
= &pseudo_lld_bus
;
7810 sdbg_host
->dev
.parent
= pseudo_primary
;
7811 sdbg_host
->dev
.release
= &sdebug_release_adapter
;
7812 dev_set_name(&sdbg_host
->dev
, "adapter%d", sdebug_num_hosts
);
7814 error
= device_register(&sdbg_host
->dev
);
7816 mutex_lock(&sdebug_host_list_mutex
);
7817 list_del(&sdbg_host
->host_list
);
7818 mutex_unlock(&sdebug_host_list_mutex
);
7826 list_for_each_entry_safe(sdbg_devinfo
, tmp
, &sdbg_host
->dev_info_list
,
7828 list_del(&sdbg_devinfo
->dev_list
);
7829 kfree(sdbg_devinfo
->zstate
);
7830 kfree(sdbg_devinfo
);
7832 if (sdbg_host
->dev
.release
)
7833 put_device(&sdbg_host
->dev
);
7836 pr_warn("%s: failed, errno=%d\n", __func__
, -error
);
7840 static int sdebug_do_add_host(bool mk_new_store
)
7842 int ph_idx
= sdeb_most_recent_idx
;
7845 ph_idx
= sdebug_add_store();
7849 return sdebug_add_host_helper(ph_idx
);
7852 static void sdebug_do_remove_host(bool the_end
)
7855 struct sdebug_host_info
*sdbg_host
= NULL
;
7856 struct sdebug_host_info
*sdbg_host2
;
7858 mutex_lock(&sdebug_host_list_mutex
);
7859 if (!list_empty(&sdebug_host_list
)) {
7860 sdbg_host
= list_entry(sdebug_host_list
.prev
,
7861 struct sdebug_host_info
, host_list
);
7862 idx
= sdbg_host
->si_idx
;
7864 if (!the_end
&& idx
>= 0) {
7867 list_for_each_entry(sdbg_host2
, &sdebug_host_list
, host_list
) {
7868 if (sdbg_host2
== sdbg_host
)
7870 if (idx
== sdbg_host2
->si_idx
) {
7876 xa_set_mark(per_store_ap
, idx
, SDEB_XA_NOT_IN_USE
);
7877 if (idx
== sdeb_most_recent_idx
)
7878 --sdeb_most_recent_idx
;
7882 list_del(&sdbg_host
->host_list
);
7883 mutex_unlock(&sdebug_host_list_mutex
);
7888 device_unregister(&sdbg_host
->dev
);
7892 static int sdebug_change_qdepth(struct scsi_device
*sdev
, int qdepth
)
7894 struct sdebug_dev_info
*devip
= sdev
->hostdata
;
7899 mutex_lock(&sdebug_host_list_mutex
);
7900 block_unblock_all_queues(true);
7902 if (qdepth
> SDEBUG_CANQUEUE
) {
7903 qdepth
= SDEBUG_CANQUEUE
;
7904 pr_warn("%s: requested qdepth [%d] exceeds canqueue [%d], trim\n", __func__
,
7905 qdepth
, SDEBUG_CANQUEUE
);
7909 if (qdepth
!= sdev
->queue_depth
)
7910 scsi_change_queue_depth(sdev
, qdepth
);
7912 block_unblock_all_queues(false);
7913 mutex_unlock(&sdebug_host_list_mutex
);
7915 if (SDEBUG_OPT_Q_NOISE
& sdebug_opts
)
7916 sdev_printk(KERN_INFO
, sdev
, "%s: qdepth=%d\n", __func__
, qdepth
);
7918 return sdev
->queue_depth
;
7921 static bool fake_timeout(struct scsi_cmnd
*scp
)
7923 if (0 == (atomic_read(&sdebug_cmnd_count
) % abs(sdebug_every_nth
))) {
7924 if (sdebug_every_nth
< -1)
7925 sdebug_every_nth
= -1;
7926 if (SDEBUG_OPT_TIMEOUT
& sdebug_opts
)
7927 return true; /* ignore command causing timeout */
7928 else if (SDEBUG_OPT_MAC_TIMEOUT
& sdebug_opts
&&
7929 scsi_medium_access_command(scp
))
7930 return true; /* time out reads and writes */
7935 /* Response to TUR or media access command when device stopped */
7936 static int resp_not_ready(struct scsi_cmnd
*scp
, struct sdebug_dev_info
*devip
)
7940 ktime_t now_ts
= ktime_get_boottime();
7941 struct scsi_device
*sdp
= scp
->device
;
7943 stopped_state
= atomic_read(&devip
->stopped
);
7944 if (stopped_state
== 2) {
7945 if (ktime_to_ns(now_ts
) > ktime_to_ns(devip
->create_ts
)) {
7946 diff_ns
= ktime_to_ns(ktime_sub(now_ts
, devip
->create_ts
));
7947 if (diff_ns
>= ((u64
)sdeb_tur_ms_to_ready
* 1000000)) {
7948 /* tur_ms_to_ready timer extinguished */
7949 atomic_set(&devip
->stopped
, 0);
7953 mk_sense_buffer(scp
, NOT_READY
, LOGICAL_UNIT_NOT_READY
, 0x1);
7955 sdev_printk(KERN_INFO
, sdp
,
7956 "%s: Not ready: in process of becoming ready\n", my_name
);
7957 if (scp
->cmnd
[0] == TEST_UNIT_READY
) {
7958 u64 tur_nanosecs_to_ready
= (u64
)sdeb_tur_ms_to_ready
* 1000000;
7960 if (diff_ns
<= tur_nanosecs_to_ready
)
7961 diff_ns
= tur_nanosecs_to_ready
- diff_ns
;
7963 diff_ns
= tur_nanosecs_to_ready
;
7964 /* As per 20-061r2 approved for spc6 by T10 on 20200716 */
7965 do_div(diff_ns
, 1000000); /* diff_ns becomes milliseconds */
7966 scsi_set_sense_information(scp
->sense_buffer
, SCSI_SENSE_BUFFERSIZE
,
7968 return check_condition_result
;
7971 mk_sense_buffer(scp
, NOT_READY
, LOGICAL_UNIT_NOT_READY
, 0x2);
7973 sdev_printk(KERN_INFO
, sdp
, "%s: Not ready: initializing command required\n",
7975 return check_condition_result
;
7978 static void sdebug_map_queues(struct Scsi_Host
*shost
)
7982 if (shost
->nr_hw_queues
== 1)
7985 for (i
= 0, qoff
= 0; i
< HCTX_MAX_TYPES
; i
++) {
7986 struct blk_mq_queue_map
*map
= &shost
->tag_set
.map
[i
];
7990 if (i
== HCTX_TYPE_DEFAULT
)
7991 map
->nr_queues
= submit_queues
- poll_queues
;
7992 else if (i
== HCTX_TYPE_POLL
)
7993 map
->nr_queues
= poll_queues
;
7995 if (!map
->nr_queues
) {
7996 BUG_ON(i
== HCTX_TYPE_DEFAULT
);
8000 map
->queue_offset
= qoff
;
8001 blk_mq_map_queues(map
);
8003 qoff
+= map
->nr_queues
;
8007 struct sdebug_blk_mq_poll_data
{
8008 unsigned int queue_num
;
8013 * We don't handle aborted commands here, but it does not seem possible to have
8014 * aborted polled commands from schedule_resp()
8016 static bool sdebug_blk_mq_poll_iter(struct request
*rq
, void *opaque
)
8018 struct sdebug_blk_mq_poll_data
*data
= opaque
;
8019 struct scsi_cmnd
*cmd
= blk_mq_rq_to_pdu(rq
);
8020 struct sdebug_scsi_cmd
*sdsc
= scsi_cmd_priv(cmd
);
8021 struct sdebug_defer
*sd_dp
;
8022 u32 unique_tag
= blk_mq_unique_tag(rq
);
8023 u16 hwq
= blk_mq_unique_tag_to_hwq(unique_tag
);
8024 struct sdebug_queued_cmd
*sqcp
;
8025 unsigned long flags
;
8026 int queue_num
= data
->queue_num
;
8029 /* We're only interested in one queue for this iteration */
8030 if (hwq
!= queue_num
)
8033 /* Subsequent checks would fail if this failed, but check anyway */
8034 if (!test_bit(SCMD_STATE_INFLIGHT
, &cmd
->state
))
8037 time
= ktime_get_boottime();
8039 spin_lock_irqsave(&sdsc
->lock
, flags
);
8040 sqcp
= TO_QUEUED_CMD(cmd
);
8042 spin_unlock_irqrestore(&sdsc
->lock
, flags
);
8046 sd_dp
= &sqcp
->sd_dp
;
8047 if (READ_ONCE(sd_dp
->defer_t
) != SDEB_DEFER_POLL
) {
8048 spin_unlock_irqrestore(&sdsc
->lock
, flags
);
8052 if (time
< sd_dp
->cmpl_ts
) {
8053 spin_unlock_irqrestore(&sdsc
->lock
, flags
);
8057 ASSIGN_QUEUED_CMD(cmd
, NULL
);
8058 spin_unlock_irqrestore(&sdsc
->lock
, flags
);
8060 if (sdebug_statistics
) {
8061 atomic_inc(&sdebug_completions
);
8062 if (raw_smp_processor_id() != sd_dp
->issuing_cpu
)
8063 atomic_inc(&sdebug_miss_cpus
);
8066 sdebug_free_queued_cmd(sqcp
);
8068 scsi_done(cmd
); /* callback to mid level */
8069 (*data
->num_entries
)++;
8073 static int sdebug_blk_mq_poll(struct Scsi_Host
*shost
, unsigned int queue_num
)
8075 int num_entries
= 0;
8076 struct sdebug_blk_mq_poll_data data
= {
8077 .queue_num
= queue_num
,
8078 .num_entries
= &num_entries
,
8081 blk_mq_tagset_busy_iter(&shost
->tag_set
, sdebug_blk_mq_poll_iter
,
8084 if (num_entries
> 0)
8085 atomic_add(num_entries
, &sdeb_mq_poll_count
);
8089 static int sdebug_timeout_cmd(struct scsi_cmnd
*cmnd
)
8091 struct scsi_device
*sdp
= cmnd
->device
;
8092 struct sdebug_dev_info
*devip
= (struct sdebug_dev_info
*)sdp
->hostdata
;
8093 struct sdebug_err_inject
*err
;
8094 unsigned char *cmd
= cmnd
->cmnd
;
8101 list_for_each_entry_rcu(err
, &devip
->inject_err_list
, list
) {
8102 if (err
->type
== ERR_TMOUT_CMD
&&
8103 (err
->cmd
== cmd
[0] || err
->cmd
== 0xff)) {
8117 static int sdebug_fail_queue_cmd(struct scsi_cmnd
*cmnd
)
8119 struct scsi_device
*sdp
= cmnd
->device
;
8120 struct sdebug_dev_info
*devip
= (struct sdebug_dev_info
*)sdp
->hostdata
;
8121 struct sdebug_err_inject
*err
;
8122 unsigned char *cmd
= cmnd
->cmnd
;
8129 list_for_each_entry_rcu(err
, &devip
->inject_err_list
, list
) {
8130 if (err
->type
== ERR_FAIL_QUEUE_CMD
&&
8131 (err
->cmd
== cmd
[0] || err
->cmd
== 0xff)) {
8132 ret
= err
->cnt
? err
->queuecmd_ret
: 0;
8145 static int sdebug_fail_cmd(struct scsi_cmnd
*cmnd
, int *retval
,
8146 struct sdebug_err_inject
*info
)
8148 struct scsi_device
*sdp
= cmnd
->device
;
8149 struct sdebug_dev_info
*devip
= (struct sdebug_dev_info
*)sdp
->hostdata
;
8150 struct sdebug_err_inject
*err
;
8151 unsigned char *cmd
= cmnd
->cmnd
;
8159 list_for_each_entry_rcu(err
, &devip
->inject_err_list
, list
) {
8160 if (err
->type
== ERR_FAIL_CMD
&&
8161 (err
->cmd
== cmd
[0] || err
->cmd
== 0xff)) {
8179 mk_sense_buffer(cmnd
, err
->sense_key
, err
->asc
, err
->asq
);
8180 result
= err
->status_byte
| err
->host_byte
<< 16 | err
->driver_byte
<< 24;
8182 *retval
= schedule_resp(cmnd
, devip
, result
, NULL
, 0, 0);
8187 static int scsi_debug_queuecommand(struct Scsi_Host
*shost
,
8188 struct scsi_cmnd
*scp
)
8191 struct scsi_device
*sdp
= scp
->device
;
8192 const struct opcode_info_t
*oip
;
8193 const struct opcode_info_t
*r_oip
;
8194 struct sdebug_dev_info
*devip
;
8195 u8
*cmd
= scp
->cmnd
;
8196 int (*r_pfp
)(struct scsi_cmnd
*, struct sdebug_dev_info
*);
8197 int (*pfp
)(struct scsi_cmnd
*, struct sdebug_dev_info
*) = NULL
;
8200 u64 lun_index
= sdp
->lun
& 0x3FFF;
8207 struct sdebug_err_inject err
;
8209 scsi_set_resid(scp
, 0);
8210 if (sdebug_statistics
) {
8211 atomic_inc(&sdebug_cmnd_count
);
8212 inject_now
= inject_on_this_cmd();
8216 if (unlikely(sdebug_verbose
&&
8217 !(SDEBUG_OPT_NO_CDB_NOISE
& sdebug_opts
))) {
8222 sb
= (int)sizeof(b
);
8224 strcpy(b
, "too long, over 32 bytes");
8226 for (k
= 0, n
= 0; k
< len
&& n
< sb
; ++k
)
8227 n
+= scnprintf(b
+ n
, sb
- n
, "%02x ",
8230 sdev_printk(KERN_INFO
, sdp
, "%s: tag=%#x, cmd %s\n", my_name
,
8231 blk_mq_unique_tag(scsi_cmd_to_rq(scp
)), b
);
8233 if (unlikely(inject_now
&& (sdebug_opts
& SDEBUG_OPT_HOST_BUSY
)))
8234 return SCSI_MLQUEUE_HOST_BUSY
;
8235 has_wlun_rl
= (sdp
->lun
== SCSI_W_LUN_REPORT_LUNS
);
8236 if (unlikely(lun_index
>= sdebug_max_luns
&& !has_wlun_rl
))
8239 sdeb_i
= opcode_ind_arr
[opcode
]; /* fully mapped */
8240 oip
= &opcode_info_arr
[sdeb_i
]; /* safe if table consistent */
8241 devip
= (struct sdebug_dev_info
*)sdp
->hostdata
;
8242 if (unlikely(!devip
)) {
8243 devip
= find_build_dev_info(sdp
);
8248 if (sdebug_timeout_cmd(scp
)) {
8249 scmd_printk(KERN_INFO
, scp
, "timeout command 0x%x\n", opcode
);
8253 ret
= sdebug_fail_queue_cmd(scp
);
8255 scmd_printk(KERN_INFO
, scp
, "fail queue command 0x%x with 0x%x\n",
8260 if (sdebug_fail_cmd(scp
, &ret
, &err
)) {
8261 scmd_printk(KERN_INFO
, scp
,
8262 "fail command 0x%x with hostbyte=0x%x, "
8263 "driverbyte=0x%x, statusbyte=0x%x, "
8264 "sense_key=0x%x, asc=0x%x, asq=0x%x\n",
8265 opcode
, err
.host_byte
, err
.driver_byte
,
8266 err
.status_byte
, err
.sense_key
, err
.asc
, err
.asq
);
8270 if (unlikely(inject_now
&& !atomic_read(&sdeb_inject_pending
)))
8271 atomic_set(&sdeb_inject_pending
, 1);
8273 na
= oip
->num_attached
;
8275 if (na
) { /* multiple commands with this opcode */
8277 if (FF_SA
& r_oip
->flags
) {
8278 if (F_SA_LOW
& oip
->flags
)
8281 sa
= get_unaligned_be16(cmd
+ 8);
8282 for (k
= 0; k
<= na
; oip
= r_oip
->arrp
+ k
++) {
8283 if (opcode
== oip
->opcode
&& sa
== oip
->sa
)
8286 } else { /* since no service action only check opcode */
8287 for (k
= 0; k
<= na
; oip
= r_oip
->arrp
+ k
++) {
8288 if (opcode
== oip
->opcode
)
8293 if (F_SA_LOW
& r_oip
->flags
)
8294 mk_sense_invalid_fld(scp
, SDEB_IN_CDB
, 1, 4);
8295 else if (F_SA_HIGH
& r_oip
->flags
)
8296 mk_sense_invalid_fld(scp
, SDEB_IN_CDB
, 8, 7);
8298 mk_sense_invalid_opcode(scp
);
8301 } /* else (when na==0) we assume the oip is a match */
8303 if (unlikely(F_INV_OP
& flags
)) {
8304 mk_sense_invalid_opcode(scp
);
8307 if (unlikely(has_wlun_rl
&& !(F_RL_WLUN_OK
& flags
))) {
8309 sdev_printk(KERN_INFO
, sdp
, "%s: Opcode 0x%x not%s\n",
8310 my_name
, opcode
, " supported for wlun");
8311 mk_sense_invalid_opcode(scp
);
8314 if (unlikely(sdebug_strict
)) { /* check cdb against mask */
8318 for (k
= 1; k
< oip
->len_mask
[0] && k
< 16; ++k
) {
8319 rem
= ~oip
->len_mask
[k
] & cmd
[k
];
8321 for (j
= 7; j
>= 0; --j
, rem
<<= 1) {
8325 mk_sense_invalid_fld(scp
, SDEB_IN_CDB
, k
, j
);
8330 if (unlikely(!(F_SKIP_UA
& flags
) &&
8331 find_first_bit(devip
->uas_bm
,
8332 SDEBUG_NUM_UAS
) != SDEBUG_NUM_UAS
)) {
8333 errsts
= make_ua(scp
, devip
);
8337 if (unlikely(((F_M_ACCESS
& flags
) || scp
->cmnd
[0] == TEST_UNIT_READY
) &&
8338 atomic_read(&devip
->stopped
))) {
8339 errsts
= resp_not_ready(scp
, devip
);
8343 if (sdebug_fake_rw
&& (F_FAKE_RW
& flags
))
8345 if (unlikely(sdebug_every_nth
)) {
8346 if (fake_timeout(scp
))
8347 return 0; /* ignore command: make trouble */
8349 if (likely(oip
->pfp
))
8350 pfp
= oip
->pfp
; /* calls a resp_* function */
8352 pfp
= r_pfp
; /* if leaf function ptr NULL, try the root's */
8355 if (F_DELAY_OVERR
& flags
) /* cmds like INQUIRY respond asap */
8356 return schedule_resp(scp
, devip
, errsts
, pfp
, 0, 0);
8357 else if ((flags
& F_LONG_DELAY
) && (sdebug_jdelay
> 0 ||
8358 sdebug_ndelay
> 10000)) {
8360 * Skip long delays if ndelay <= 10 microseconds. Otherwise
8361 * for Start Stop Unit (SSU) want at least 1 second delay and
8362 * if sdebug_jdelay>1 want a long delay of that many seconds.
8363 * For Synchronize Cache want 1/20 of SSU's delay.
8365 int jdelay
= (sdebug_jdelay
< 2) ? 1 : sdebug_jdelay
;
8366 int denom
= (flags
& F_SYNC_DELAY
) ? 20 : 1;
8368 jdelay
= mult_frac(USER_HZ
* jdelay
, HZ
, denom
* USER_HZ
);
8369 return schedule_resp(scp
, devip
, errsts
, pfp
, jdelay
, 0);
8371 return schedule_resp(scp
, devip
, errsts
, pfp
, sdebug_jdelay
,
8374 return schedule_resp(scp
, devip
, check_condition_result
, NULL
, 0, 0);
8376 return schedule_resp(scp
, NULL
, DID_NO_CONNECT
<< 16, NULL
, 0, 0);
8379 static int sdebug_init_cmd_priv(struct Scsi_Host
*shost
, struct scsi_cmnd
*cmd
)
8381 struct sdebug_scsi_cmd
*sdsc
= scsi_cmd_priv(cmd
);
8383 spin_lock_init(&sdsc
->lock
);
8388 static struct scsi_host_template sdebug_driver_template
= {
8389 .show_info
= scsi_debug_show_info
,
8390 .write_info
= scsi_debug_write_info
,
8391 .proc_name
= sdebug_proc_name
,
8392 .name
= "SCSI DEBUG",
8393 .info
= scsi_debug_info
,
8394 .slave_alloc
= scsi_debug_slave_alloc
,
8395 .slave_configure
= scsi_debug_slave_configure
,
8396 .slave_destroy
= scsi_debug_slave_destroy
,
8397 .ioctl
= scsi_debug_ioctl
,
8398 .queuecommand
= scsi_debug_queuecommand
,
8399 .change_queue_depth
= sdebug_change_qdepth
,
8400 .map_queues
= sdebug_map_queues
,
8401 .mq_poll
= sdebug_blk_mq_poll
,
8402 .eh_abort_handler
= scsi_debug_abort
,
8403 .eh_device_reset_handler
= scsi_debug_device_reset
,
8404 .eh_target_reset_handler
= scsi_debug_target_reset
,
8405 .eh_bus_reset_handler
= scsi_debug_bus_reset
,
8406 .eh_host_reset_handler
= scsi_debug_host_reset
,
8407 .can_queue
= SDEBUG_CANQUEUE
,
8409 .sg_tablesize
= SG_MAX_SEGMENTS
,
8410 .cmd_per_lun
= DEF_CMD_PER_LUN
,
8412 .max_segment_size
= -1U,
8413 .module
= THIS_MODULE
,
8414 .track_queue_depth
= 1,
8415 .cmd_size
= sizeof(struct sdebug_scsi_cmd
),
8416 .init_cmd_priv
= sdebug_init_cmd_priv
,
8417 .target_alloc
= sdebug_target_alloc
,
8418 .target_destroy
= sdebug_target_destroy
,
8421 static int sdebug_driver_probe(struct device
*dev
)
8424 struct sdebug_host_info
*sdbg_host
;
8425 struct Scsi_Host
*hpnt
;
8428 sdbg_host
= dev_to_sdebug_host(dev
);
8430 sdebug_driver_template
.can_queue
= sdebug_max_queue
;
8431 sdebug_driver_template
.cmd_per_lun
= sdebug_max_queue
;
8432 if (!sdebug_clustering
)
8433 sdebug_driver_template
.dma_boundary
= PAGE_SIZE
- 1;
8435 hpnt
= scsi_host_alloc(&sdebug_driver_template
, 0);
8437 pr_err("scsi_host_alloc failed\n");
8441 if (submit_queues
> nr_cpu_ids
) {
8442 pr_warn("%s: trim submit_queues (was %d) to nr_cpu_ids=%u\n",
8443 my_name
, submit_queues
, nr_cpu_ids
);
8444 submit_queues
= nr_cpu_ids
;
8447 * Decide whether to tell scsi subsystem that we want mq. The
8448 * following should give the same answer for each host.
8450 hpnt
->nr_hw_queues
= submit_queues
;
8451 if (sdebug_host_max_queue
)
8452 hpnt
->host_tagset
= 1;
8454 /* poll queues are possible for nr_hw_queues > 1 */
8455 if (hpnt
->nr_hw_queues
== 1 || (poll_queues
< 1)) {
8456 pr_warn("%s: trim poll_queues to 0. poll_q/nr_hw = (%d/%d)\n",
8457 my_name
, poll_queues
, hpnt
->nr_hw_queues
);
8462 * Poll queues don't need interrupts, but we need at least one I/O queue
8463 * left over for non-polled I/O.
8464 * If condition not met, trim poll_queues to 1 (just for simplicity).
8466 if (poll_queues
>= submit_queues
) {
8467 if (submit_queues
< 3)
8468 pr_warn("%s: trim poll_queues to 1\n", my_name
);
8470 pr_warn("%s: trim poll_queues to 1. Perhaps try poll_queues=%d\n",
8471 my_name
, submit_queues
- 1);
8477 sdbg_host
->shost
= hpnt
;
8478 if ((hpnt
->this_id
>= 0) && (sdebug_num_tgts
> hpnt
->this_id
))
8479 hpnt
->max_id
= sdebug_num_tgts
+ 1;
8481 hpnt
->max_id
= sdebug_num_tgts
;
8482 /* = sdebug_max_luns; */
8483 hpnt
->max_lun
= SCSI_W_LUN_REPORT_LUNS
+ 1;
8487 switch (sdebug_dif
) {
8489 case T10_PI_TYPE1_PROTECTION
:
8490 hprot
= SHOST_DIF_TYPE1_PROTECTION
;
8492 hprot
|= SHOST_DIX_TYPE1_PROTECTION
;
8495 case T10_PI_TYPE2_PROTECTION
:
8496 hprot
= SHOST_DIF_TYPE2_PROTECTION
;
8498 hprot
|= SHOST_DIX_TYPE2_PROTECTION
;
8501 case T10_PI_TYPE3_PROTECTION
:
8502 hprot
= SHOST_DIF_TYPE3_PROTECTION
;
8504 hprot
|= SHOST_DIX_TYPE3_PROTECTION
;
8509 hprot
|= SHOST_DIX_TYPE0_PROTECTION
;
8513 scsi_host_set_prot(hpnt
, hprot
);
8515 if (have_dif_prot
|| sdebug_dix
)
8516 pr_info("host protection%s%s%s%s%s%s%s\n",
8517 (hprot
& SHOST_DIF_TYPE1_PROTECTION
) ? " DIF1" : "",
8518 (hprot
& SHOST_DIF_TYPE2_PROTECTION
) ? " DIF2" : "",
8519 (hprot
& SHOST_DIF_TYPE3_PROTECTION
) ? " DIF3" : "",
8520 (hprot
& SHOST_DIX_TYPE0_PROTECTION
) ? " DIX0" : "",
8521 (hprot
& SHOST_DIX_TYPE1_PROTECTION
) ? " DIX1" : "",
8522 (hprot
& SHOST_DIX_TYPE2_PROTECTION
) ? " DIX2" : "",
8523 (hprot
& SHOST_DIX_TYPE3_PROTECTION
) ? " DIX3" : "");
8525 if (sdebug_guard
== 1)
8526 scsi_host_set_guard(hpnt
, SHOST_DIX_GUARD_IP
);
8528 scsi_host_set_guard(hpnt
, SHOST_DIX_GUARD_CRC
);
8530 sdebug_verbose
= !!(SDEBUG_OPT_NOISE
& sdebug_opts
);
8531 sdebug_any_injecting_opt
= !!(SDEBUG_OPT_ALL_INJECTING
& sdebug_opts
);
8532 if (sdebug_every_nth
) /* need stats counters for every_nth */
8533 sdebug_statistics
= true;
8534 error
= scsi_add_host(hpnt
, &sdbg_host
->dev
);
8536 pr_err("scsi_add_host failed\n");
8538 scsi_host_put(hpnt
);
8540 scsi_scan_host(hpnt
);
8546 static void sdebug_driver_remove(struct device
*dev
)
8548 struct sdebug_host_info
*sdbg_host
;
8549 struct sdebug_dev_info
*sdbg_devinfo
, *tmp
;
8551 sdbg_host
= dev_to_sdebug_host(dev
);
8553 scsi_remove_host(sdbg_host
->shost
);
8555 list_for_each_entry_safe(sdbg_devinfo
, tmp
, &sdbg_host
->dev_info_list
,
8557 list_del(&sdbg_devinfo
->dev_list
);
8558 kfree(sdbg_devinfo
->zstate
);
8559 kfree(sdbg_devinfo
);
8562 scsi_host_put(sdbg_host
->shost
);
8565 static const struct bus_type pseudo_lld_bus
= {
8567 .probe
= sdebug_driver_probe
,
8568 .remove
= sdebug_driver_remove
,
8569 .drv_groups
= sdebug_drv_groups
,