]> git.ipfire.org Git - thirdparty/qemu.git/blob - hw/scsi/scsi-generic.c
d82b462be40d656fd9def2b7f2b72b28422131e6
[thirdparty/qemu.git] / hw / scsi / scsi-generic.c
1 /*
2 * Generic SCSI Device support
3 *
4 * Copyright (c) 2007 Bull S.A.S.
5 * Based on code by Paul Brook
6 * Based on code by Fabrice Bellard
7 *
8 * Written by Laurent Vivier <Laurent.Vivier@bull.net>
9 *
10 * This code is licensed under the LGPL.
11 *
12 */
13
14 #include "qemu/osdep.h"
15 #include "qapi/error.h"
16 #include "qemu-common.h"
17 #include "qemu/error-report.h"
18 #include "hw/scsi/scsi.h"
19 #include "hw/scsi/emulation.h"
20 #include "sysemu/block-backend.h"
21 #include "trace.h"
22
23 #ifdef __linux__
24
25 #include <scsi/sg.h>
26 #include "scsi/constants.h"
27
28 #ifndef MAX_UINT
29 #define MAX_UINT ((unsigned int)-1)
30 #endif
31
32 typedef struct SCSIGenericReq {
33 SCSIRequest req;
34 uint8_t *buf;
35 int buflen;
36 int len;
37 sg_io_hdr_t io_header;
38 } SCSIGenericReq;
39
40 static void scsi_generic_save_request(QEMUFile *f, SCSIRequest *req)
41 {
42 SCSIGenericReq *r = DO_UPCAST(SCSIGenericReq, req, req);
43
44 qemu_put_sbe32s(f, &r->buflen);
45 if (r->buflen && r->req.cmd.mode == SCSI_XFER_TO_DEV) {
46 assert(!r->req.sg);
47 qemu_put_buffer(f, r->buf, r->req.cmd.xfer);
48 }
49 }
50
51 static void scsi_generic_load_request(QEMUFile *f, SCSIRequest *req)
52 {
53 SCSIGenericReq *r = DO_UPCAST(SCSIGenericReq, req, req);
54
55 qemu_get_sbe32s(f, &r->buflen);
56 if (r->buflen && r->req.cmd.mode == SCSI_XFER_TO_DEV) {
57 assert(!r->req.sg);
58 qemu_get_buffer(f, r->buf, r->req.cmd.xfer);
59 }
60 }
61
62 static void scsi_free_request(SCSIRequest *req)
63 {
64 SCSIGenericReq *r = DO_UPCAST(SCSIGenericReq, req, req);
65
66 g_free(r->buf);
67 }
68
69 /* Helper function for command completion. */
70 static void scsi_command_complete_noio(SCSIGenericReq *r, int ret)
71 {
72 int status;
73 SCSISense sense;
74
75 assert(r->req.aiocb == NULL);
76
77 if (r->req.io_canceled) {
78 scsi_req_cancel_complete(&r->req);
79 goto done;
80 }
81 status = sg_io_sense_from_errno(-ret, &r->io_header, &sense);
82 if (status == CHECK_CONDITION) {
83 if (r->io_header.driver_status & SG_ERR_DRIVER_SENSE) {
84 r->req.sense_len = r->io_header.sb_len_wr;
85 } else {
86 scsi_req_build_sense(&r->req, sense);
87 }
88 }
89
90 trace_scsi_generic_command_complete_noio(r, r->req.tag, status);
91
92 scsi_req_complete(&r->req, status);
93 done:
94 scsi_req_unref(&r->req);
95 }
96
97 static void scsi_command_complete(void *opaque, int ret)
98 {
99 SCSIGenericReq *r = (SCSIGenericReq *)opaque;
100 SCSIDevice *s = r->req.dev;
101
102 assert(r->req.aiocb != NULL);
103 r->req.aiocb = NULL;
104
105 aio_context_acquire(blk_get_aio_context(s->conf.blk));
106 scsi_command_complete_noio(r, ret);
107 aio_context_release(blk_get_aio_context(s->conf.blk));
108 }
109
110 static int execute_command(BlockBackend *blk,
111 SCSIGenericReq *r, int direction,
112 BlockCompletionFunc *complete)
113 {
114 r->io_header.interface_id = 'S';
115 r->io_header.dxfer_direction = direction;
116 r->io_header.dxferp = r->buf;
117 r->io_header.dxfer_len = r->buflen;
118 r->io_header.cmdp = r->req.cmd.buf;
119 r->io_header.cmd_len = r->req.cmd.len;
120 r->io_header.mx_sb_len = sizeof(r->req.sense);
121 r->io_header.sbp = r->req.sense;
122 r->io_header.timeout = MAX_UINT;
123 r->io_header.usr_ptr = r;
124 r->io_header.flags |= SG_FLAG_DIRECT_IO;
125
126 r->req.aiocb = blk_aio_ioctl(blk, SG_IO, &r->io_header, complete, r);
127 if (r->req.aiocb == NULL) {
128 return -EIO;
129 }
130
131 return 0;
132 }
133
134 static void scsi_handle_inquiry_reply(SCSIGenericReq *r, SCSIDevice *s)
135 {
136 uint8_t page, page_idx;
137
138 /*
139 * EVPD set to zero returns the standard INQUIRY data.
140 *
141 * Check if scsi_version is unset (-1) to avoid re-defining it
142 * each time an INQUIRY with standard data is received.
143 * scsi_version is initialized with -1 in scsi_generic_reset
144 * and scsi_disk_reset, making sure that we'll set the
145 * scsi_version after a reset. If the version field of the
146 * INQUIRY response somehow changes after a guest reboot,
147 * we'll be able to keep track of it.
148 *
149 * On SCSI-2 and older, first 3 bits of byte 2 is the
150 * ANSI-approved version, while on later versions the
151 * whole byte 2 contains the version. Check if we're dealing
152 * with a newer version and, in that case, assign the
153 * whole byte.
154 */
155 if (s->scsi_version == -1 && !(r->req.cmd.buf[1] & 0x01)) {
156 s->scsi_version = r->buf[2] & 0x07;
157 if (s->scsi_version > 2) {
158 s->scsi_version = r->buf[2];
159 }
160 }
161
162 if (s->type == TYPE_DISK && (r->req.cmd.buf[1] & 0x01)) {
163 page = r->req.cmd.buf[2];
164 if (page == 0xb0) {
165 uint32_t max_transfer =
166 blk_get_max_transfer(s->conf.blk) / s->blocksize;
167
168 assert(max_transfer);
169 stl_be_p(&r->buf[8], max_transfer);
170 /* Also take care of the opt xfer len. */
171 stl_be_p(&r->buf[12],
172 MIN_NON_ZERO(max_transfer, ldl_be_p(&r->buf[12])));
173 } else if (s->needs_vpd_bl_emulation && page == 0x00 && r->buflen >= 4) {
174 /*
175 * Now we're capable of supplying the VPD Block Limits
176 * response if the hardware can't. Add it in the INQUIRY
177 * Supported VPD pages response in case we are using the
178 * emulation for this device.
179 *
180 * This way, the guest kernel will be aware of the support
181 * and will use it to proper setup the SCSI device.
182 *
183 * VPD page numbers must be sorted, so insert 0xb0 at the
184 * right place with an in-place insert. When the while loop
185 * begins the device response is at r[0] to r[page_idx - 1].
186 */
187 page_idx = lduw_be_p(r->buf + 2) + 4;
188 page_idx = MIN(page_idx, r->buflen);
189 while (page_idx > 4 && r->buf[page_idx - 1] >= 0xb0) {
190 if (page_idx < r->buflen) {
191 r->buf[page_idx] = r->buf[page_idx - 1];
192 }
193 page_idx--;
194 }
195 if (page_idx < r->buflen) {
196 r->buf[page_idx] = 0xb0;
197 }
198 stw_be_p(r->buf + 2, lduw_be_p(r->buf + 2) + 1);
199 }
200 }
201 }
202
203 static int scsi_generic_emulate_block_limits(SCSIGenericReq *r, SCSIDevice *s)
204 {
205 int len;
206 uint8_t buf[64];
207
208 SCSIBlockLimits bl = {
209 .max_io_sectors = blk_get_max_transfer(s->conf.blk) / s->blocksize
210 };
211
212 memset(r->buf, 0, r->buflen);
213 stb_p(buf, s->type);
214 stb_p(buf + 1, 0xb0);
215 len = scsi_emulate_block_limits(buf + 4, &bl);
216 assert(len <= sizeof(buf) - 4);
217 stw_be_p(buf + 2, len);
218
219 memcpy(r->buf, buf, MIN(r->buflen, len + 4));
220
221 r->io_header.sb_len_wr = 0;
222
223 /*
224 * We have valid contents in the reply buffer but the
225 * io_header can report a sense error coming from
226 * the hardware in scsi_command_complete_noio. Clean
227 * up the io_header to avoid reporting it.
228 */
229 r->io_header.driver_status = 0;
230 r->io_header.status = 0;
231
232 return r->buflen;
233 }
234
235 static void scsi_read_complete(void * opaque, int ret)
236 {
237 SCSIGenericReq *r = (SCSIGenericReq *)opaque;
238 SCSIDevice *s = r->req.dev;
239 int len;
240
241 assert(r->req.aiocb != NULL);
242 r->req.aiocb = NULL;
243
244 aio_context_acquire(blk_get_aio_context(s->conf.blk));
245
246 if (ret || r->req.io_canceled) {
247 scsi_command_complete_noio(r, ret);
248 goto done;
249 }
250
251 len = r->io_header.dxfer_len - r->io_header.resid;
252 trace_scsi_generic_read_complete(r->req.tag, len);
253
254 r->len = -1;
255
256 /*
257 * Check if this is a VPD Block Limits request that
258 * resulted in sense error but would need emulation.
259 * In this case, emulate a valid VPD response.
260 */
261 if (s->needs_vpd_bl_emulation && ret == 0 &&
262 (r->io_header.driver_status & SG_ERR_DRIVER_SENSE) &&
263 r->req.cmd.buf[0] == INQUIRY &&
264 (r->req.cmd.buf[1] & 0x01) &&
265 r->req.cmd.buf[2] == 0xb0) {
266 SCSISense sense =
267 scsi_parse_sense_buf(r->req.sense, r->io_header.sb_len_wr);
268 if (sense.key == ILLEGAL_REQUEST) {
269 len = scsi_generic_emulate_block_limits(r, s);
270 /*
271 * No need to let scsi_read_complete go on and handle an
272 * INQUIRY VPD BL request we created manually.
273 */
274 goto req_complete;
275 }
276 }
277
278 if (len == 0) {
279 scsi_command_complete_noio(r, 0);
280 goto done;
281 }
282
283 /* Snoop READ CAPACITY output to set the blocksize. */
284 if (r->req.cmd.buf[0] == READ_CAPACITY_10 &&
285 (ldl_be_p(&r->buf[0]) != 0xffffffffU || s->max_lba == 0)) {
286 s->blocksize = ldl_be_p(&r->buf[4]);
287 s->max_lba = ldl_be_p(&r->buf[0]) & 0xffffffffULL;
288 } else if (r->req.cmd.buf[0] == SERVICE_ACTION_IN_16 &&
289 (r->req.cmd.buf[1] & 31) == SAI_READ_CAPACITY_16) {
290 s->blocksize = ldl_be_p(&r->buf[8]);
291 s->max_lba = ldq_be_p(&r->buf[0]);
292 }
293 blk_set_guest_block_size(s->conf.blk, s->blocksize);
294
295 /* Patch MODE SENSE device specific parameters if the BDS is opened
296 * readonly.
297 */
298 if ((s->type == TYPE_DISK || s->type == TYPE_TAPE) &&
299 blk_is_read_only(s->conf.blk) &&
300 (r->req.cmd.buf[0] == MODE_SENSE ||
301 r->req.cmd.buf[0] == MODE_SENSE_10) &&
302 (r->req.cmd.buf[1] & 0x8) == 0) {
303 if (r->req.cmd.buf[0] == MODE_SENSE) {
304 r->buf[2] |= 0x80;
305 } else {
306 r->buf[3] |= 0x80;
307 }
308 }
309 if (r->req.cmd.buf[0] == INQUIRY) {
310 scsi_handle_inquiry_reply(r, s);
311 }
312
313 req_complete:
314 scsi_req_data(&r->req, len);
315 scsi_req_unref(&r->req);
316
317 done:
318 aio_context_release(blk_get_aio_context(s->conf.blk));
319 }
320
321 /* Read more data from scsi device into buffer. */
322 static void scsi_read_data(SCSIRequest *req)
323 {
324 SCSIGenericReq *r = DO_UPCAST(SCSIGenericReq, req, req);
325 SCSIDevice *s = r->req.dev;
326 int ret;
327
328 trace_scsi_generic_read_data(req->tag);
329
330 /* The request is used as the AIO opaque value, so add a ref. */
331 scsi_req_ref(&r->req);
332 if (r->len == -1) {
333 scsi_command_complete_noio(r, 0);
334 return;
335 }
336
337 ret = execute_command(s->conf.blk, r, SG_DXFER_FROM_DEV,
338 scsi_read_complete);
339 if (ret < 0) {
340 scsi_command_complete_noio(r, ret);
341 }
342 }
343
344 static void scsi_write_complete(void * opaque, int ret)
345 {
346 SCSIGenericReq *r = (SCSIGenericReq *)opaque;
347 SCSIDevice *s = r->req.dev;
348
349 trace_scsi_generic_write_complete(ret);
350
351 assert(r->req.aiocb != NULL);
352 r->req.aiocb = NULL;
353
354 aio_context_acquire(blk_get_aio_context(s->conf.blk));
355
356 if (ret || r->req.io_canceled) {
357 scsi_command_complete_noio(r, ret);
358 goto done;
359 }
360
361 if (r->req.cmd.buf[0] == MODE_SELECT && r->req.cmd.buf[4] == 12 &&
362 s->type == TYPE_TAPE) {
363 s->blocksize = (r->buf[9] << 16) | (r->buf[10] << 8) | r->buf[11];
364 trace_scsi_generic_write_complete_blocksize(s->blocksize);
365 }
366
367 scsi_command_complete_noio(r, ret);
368
369 done:
370 aio_context_release(blk_get_aio_context(s->conf.blk));
371 }
372
373 /* Write data to a scsi device. Returns nonzero on failure.
374 The transfer may complete asynchronously. */
375 static void scsi_write_data(SCSIRequest *req)
376 {
377 SCSIGenericReq *r = DO_UPCAST(SCSIGenericReq, req, req);
378 SCSIDevice *s = r->req.dev;
379 int ret;
380
381 trace_scsi_generic_write_data(req->tag);
382 if (r->len == 0) {
383 r->len = r->buflen;
384 scsi_req_data(&r->req, r->len);
385 return;
386 }
387
388 /* The request is used as the AIO opaque value, so add a ref. */
389 scsi_req_ref(&r->req);
390 ret = execute_command(s->conf.blk, r, SG_DXFER_TO_DEV, scsi_write_complete);
391 if (ret < 0) {
392 scsi_command_complete_noio(r, ret);
393 }
394 }
395
396 /* Return a pointer to the data buffer. */
397 static uint8_t *scsi_get_buf(SCSIRequest *req)
398 {
399 SCSIGenericReq *r = DO_UPCAST(SCSIGenericReq, req, req);
400
401 return r->buf;
402 }
403
404 static void scsi_generic_command_dump(uint8_t *cmd, int len)
405 {
406 int i;
407 char *line_buffer, *p;
408
409 line_buffer = g_malloc(len * 5 + 1);
410
411 for (i = 0, p = line_buffer; i < len; i++) {
412 p += sprintf(p, " 0x%02x", cmd[i]);
413 }
414 trace_scsi_generic_send_command(line_buffer);
415
416 g_free(line_buffer);
417 }
418
419 /* Execute a scsi command. Returns the length of the data expected by the
420 command. This will be Positive for data transfers from the device
421 (eg. disk reads), negative for transfers to the device (eg. disk writes),
422 and zero if the command does not transfer any data. */
423
424 static int32_t scsi_send_command(SCSIRequest *req, uint8_t *cmd)
425 {
426 SCSIGenericReq *r = DO_UPCAST(SCSIGenericReq, req, req);
427 SCSIDevice *s = r->req.dev;
428 int ret;
429
430 if (trace_event_get_state_backends(TRACE_SCSI_GENERIC_SEND_COMMAND)) {
431 scsi_generic_command_dump(cmd, r->req.cmd.len);
432 }
433
434 if (r->req.cmd.xfer == 0) {
435 g_free(r->buf);
436 r->buflen = 0;
437 r->buf = NULL;
438 /* The request is used as the AIO opaque value, so add a ref. */
439 scsi_req_ref(&r->req);
440 ret = execute_command(s->conf.blk, r, SG_DXFER_NONE,
441 scsi_command_complete);
442 if (ret < 0) {
443 scsi_command_complete_noio(r, ret);
444 return 0;
445 }
446 return 0;
447 }
448
449 if (r->buflen != r->req.cmd.xfer) {
450 g_free(r->buf);
451 r->buf = g_malloc(r->req.cmd.xfer);
452 r->buflen = r->req.cmd.xfer;
453 }
454
455 memset(r->buf, 0, r->buflen);
456 r->len = r->req.cmd.xfer;
457 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) {
458 r->len = 0;
459 return -r->req.cmd.xfer;
460 } else {
461 return r->req.cmd.xfer;
462 }
463 }
464
465 static int read_naa_id(const uint8_t *p, uint64_t *p_wwn)
466 {
467 int i;
468
469 if ((p[1] & 0xF) == 3) {
470 /* NAA designator type */
471 if (p[3] != 8) {
472 return -EINVAL;
473 }
474 *p_wwn = ldq_be_p(p + 4);
475 return 0;
476 }
477
478 if ((p[1] & 0xF) == 8) {
479 /* SCSI name string designator type */
480 if (p[3] < 20 || memcmp(&p[4], "naa.", 4)) {
481 return -EINVAL;
482 }
483 if (p[3] > 20 && p[24] != ',') {
484 return -EINVAL;
485 }
486 *p_wwn = 0;
487 for (i = 8; i < 24; i++) {
488 char c = qemu_toupper(p[i]);
489 c -= (c >= '0' && c <= '9' ? '0' : 'A' - 10);
490 *p_wwn = (*p_wwn << 4) | c;
491 }
492 return 0;
493 }
494
495 return -EINVAL;
496 }
497
498 int scsi_SG_IO_FROM_DEV(BlockBackend *blk, uint8_t *cmd, uint8_t cmd_size,
499 uint8_t *buf, uint8_t buf_size)
500 {
501 sg_io_hdr_t io_header;
502 uint8_t sensebuf[8];
503 int ret;
504
505 memset(&io_header, 0, sizeof(io_header));
506 io_header.interface_id = 'S';
507 io_header.dxfer_direction = SG_DXFER_FROM_DEV;
508 io_header.dxfer_len = buf_size;
509 io_header.dxferp = buf;
510 io_header.cmdp = cmd;
511 io_header.cmd_len = cmd_size;
512 io_header.mx_sb_len = sizeof(sensebuf);
513 io_header.sbp = sensebuf;
514 io_header.timeout = 6000; /* XXX */
515
516 ret = blk_ioctl(blk, SG_IO, &io_header);
517 if (ret < 0 || io_header.driver_status || io_header.host_status) {
518 return -1;
519 }
520 return 0;
521 }
522
523 /*
524 * Executes an INQUIRY request with EVPD set to retrieve the
525 * available VPD pages of the device. If the device does
526 * not support the Block Limits page (page 0xb0), set
527 * the needs_vpd_bl_emulation flag for future use.
528 */
529 static void scsi_generic_set_vpd_bl_emulation(SCSIDevice *s)
530 {
531 uint8_t cmd[6];
532 uint8_t buf[250];
533 uint8_t page_len;
534 int ret, i;
535
536 memset(cmd, 0, sizeof(cmd));
537 memset(buf, 0, sizeof(buf));
538 cmd[0] = INQUIRY;
539 cmd[1] = 1;
540 cmd[2] = 0x00;
541 cmd[4] = sizeof(buf);
542
543 ret = scsi_SG_IO_FROM_DEV(s->conf.blk, cmd, sizeof(cmd),
544 buf, sizeof(buf));
545 if (ret < 0) {
546 /*
547 * Do not assume anything if we can't retrieve the
548 * INQUIRY response to assert the VPD Block Limits
549 * support.
550 */
551 s->needs_vpd_bl_emulation = false;
552 return;
553 }
554
555 page_len = buf[3];
556 for (i = 4; i < MIN(sizeof(buf), page_len + 4); i++) {
557 if (buf[i] == 0xb0) {
558 s->needs_vpd_bl_emulation = false;
559 return;
560 }
561 }
562 s->needs_vpd_bl_emulation = true;
563 }
564
565 static void scsi_generic_read_device_identification(SCSIDevice *s)
566 {
567 uint8_t cmd[6];
568 uint8_t buf[250];
569 int ret;
570 int i, len;
571
572 memset(cmd, 0, sizeof(cmd));
573 memset(buf, 0, sizeof(buf));
574 cmd[0] = INQUIRY;
575 cmd[1] = 1;
576 cmd[2] = 0x83;
577 cmd[4] = sizeof(buf);
578
579 ret = scsi_SG_IO_FROM_DEV(s->conf.blk, cmd, sizeof(cmd),
580 buf, sizeof(buf));
581 if (ret < 0) {
582 return;
583 }
584
585 len = MIN((buf[2] << 8) | buf[3], sizeof(buf) - 4);
586 for (i = 0; i + 3 <= len; ) {
587 const uint8_t *p = &buf[i + 4];
588 uint64_t wwn;
589
590 if (i + (p[3] + 4) > len) {
591 break;
592 }
593
594 if ((p[1] & 0x10) == 0) {
595 /* Associated with the logical unit */
596 if (read_naa_id(p, &wwn) == 0) {
597 s->wwn = wwn;
598 }
599 } else if ((p[1] & 0x10) == 0x10) {
600 /* Associated with the target port */
601 if (read_naa_id(p, &wwn) == 0) {
602 s->port_wwn = wwn;
603 }
604 }
605
606 i += p[3] + 4;
607 }
608 }
609
610 void scsi_generic_read_device_inquiry(SCSIDevice *s)
611 {
612 scsi_generic_read_device_identification(s);
613 if (s->type == TYPE_DISK) {
614 scsi_generic_set_vpd_bl_emulation(s);
615 } else {
616 s->needs_vpd_bl_emulation = false;
617 }
618 }
619
620 static int get_stream_blocksize(BlockBackend *blk)
621 {
622 uint8_t cmd[6];
623 uint8_t buf[12];
624 int ret;
625
626 memset(cmd, 0, sizeof(cmd));
627 memset(buf, 0, sizeof(buf));
628 cmd[0] = MODE_SENSE;
629 cmd[4] = sizeof(buf);
630
631 ret = scsi_SG_IO_FROM_DEV(blk, cmd, sizeof(cmd), buf, sizeof(buf));
632 if (ret < 0) {
633 return -1;
634 }
635
636 return (buf[9] << 16) | (buf[10] << 8) | buf[11];
637 }
638
639 static void scsi_generic_reset(DeviceState *dev)
640 {
641 SCSIDevice *s = SCSI_DEVICE(dev);
642
643 s->scsi_version = s->default_scsi_version;
644 scsi_device_purge_requests(s, SENSE_CODE(RESET));
645 }
646
647 static void scsi_generic_realize(SCSIDevice *s, Error **errp)
648 {
649 int rc;
650 int sg_version;
651 struct sg_scsi_id scsiid;
652
653 if (!s->conf.blk) {
654 error_setg(errp, "drive property not set");
655 return;
656 }
657
658 if (blk_get_on_error(s->conf.blk, 0) != BLOCKDEV_ON_ERROR_ENOSPC) {
659 error_setg(errp, "Device doesn't support drive option werror");
660 return;
661 }
662 if (blk_get_on_error(s->conf.blk, 1) != BLOCKDEV_ON_ERROR_REPORT) {
663 error_setg(errp, "Device doesn't support drive option rerror");
664 return;
665 }
666
667 /* check we are using a driver managing SG_IO (version 3 and after */
668 rc = blk_ioctl(s->conf.blk, SG_GET_VERSION_NUM, &sg_version);
669 if (rc < 0) {
670 error_setg_errno(errp, -rc, "cannot get SG_IO version number");
671 if (rc != -EPERM) {
672 error_append_hint(errp, "Is this a SCSI device?\n");
673 }
674 return;
675 }
676 if (sg_version < 30000) {
677 error_setg(errp, "scsi generic interface too old");
678 return;
679 }
680
681 /* get LUN of the /dev/sg? */
682 if (blk_ioctl(s->conf.blk, SG_GET_SCSI_ID, &scsiid)) {
683 error_setg(errp, "SG_GET_SCSI_ID ioctl failed");
684 return;
685 }
686 if (!blkconf_apply_backend_options(&s->conf,
687 blk_is_read_only(s->conf.blk),
688 true, errp)) {
689 return;
690 }
691
692 /* define device state */
693 s->type = scsiid.scsi_type;
694 trace_scsi_generic_realize_type(s->type);
695
696 switch (s->type) {
697 case TYPE_TAPE:
698 s->blocksize = get_stream_blocksize(s->conf.blk);
699 if (s->blocksize == -1) {
700 s->blocksize = 0;
701 }
702 break;
703
704 /* Make a guess for block devices, we'll fix it when the guest sends.
705 * READ CAPACITY. If they don't, they likely would assume these sizes
706 * anyway. (TODO: they could also send MODE SENSE).
707 */
708 case TYPE_ROM:
709 case TYPE_WORM:
710 s->blocksize = 2048;
711 break;
712 default:
713 s->blocksize = 512;
714 break;
715 }
716
717 trace_scsi_generic_realize_blocksize(s->blocksize);
718
719 /* Only used by scsi-block, but initialize it nevertheless to be clean. */
720 s->default_scsi_version = -1;
721 scsi_generic_read_device_inquiry(s);
722 }
723
724 const SCSIReqOps scsi_generic_req_ops = {
725 .size = sizeof(SCSIGenericReq),
726 .free_req = scsi_free_request,
727 .send_command = scsi_send_command,
728 .read_data = scsi_read_data,
729 .write_data = scsi_write_data,
730 .get_buf = scsi_get_buf,
731 .load_request = scsi_generic_load_request,
732 .save_request = scsi_generic_save_request,
733 };
734
735 static SCSIRequest *scsi_new_request(SCSIDevice *d, uint32_t tag, uint32_t lun,
736 uint8_t *buf, void *hba_private)
737 {
738 return scsi_req_alloc(&scsi_generic_req_ops, d, tag, lun, hba_private);
739 }
740
741 static Property scsi_generic_properties[] = {
742 DEFINE_PROP_DRIVE("drive", SCSIDevice, conf.blk),
743 DEFINE_PROP_BOOL("share-rw", SCSIDevice, conf.share_rw, false),
744 DEFINE_PROP_END_OF_LIST(),
745 };
746
747 static int scsi_generic_parse_cdb(SCSIDevice *dev, SCSICommand *cmd,
748 uint8_t *buf, void *hba_private)
749 {
750 return scsi_bus_parse_cdb(dev, cmd, buf, hba_private);
751 }
752
753 static void scsi_generic_class_initfn(ObjectClass *klass, void *data)
754 {
755 DeviceClass *dc = DEVICE_CLASS(klass);
756 SCSIDeviceClass *sc = SCSI_DEVICE_CLASS(klass);
757
758 sc->realize = scsi_generic_realize;
759 sc->alloc_req = scsi_new_request;
760 sc->parse_cdb = scsi_generic_parse_cdb;
761 dc->fw_name = "disk";
762 dc->desc = "pass through generic scsi device (/dev/sg*)";
763 dc->reset = scsi_generic_reset;
764 dc->props = scsi_generic_properties;
765 dc->vmsd = &vmstate_scsi_device;
766 }
767
768 static const TypeInfo scsi_generic_info = {
769 .name = "scsi-generic",
770 .parent = TYPE_SCSI_DEVICE,
771 .instance_size = sizeof(SCSIDevice),
772 .class_init = scsi_generic_class_initfn,
773 };
774
775 static void scsi_generic_register_types(void)
776 {
777 type_register_static(&scsi_generic_info);
778 }
779
780 type_init(scsi_generic_register_types)
781
782 #endif /* __linux__ */