]> git.ipfire.org Git - thirdparty/qemu.git/blob - hw/scsi/scsi-disk.c
Merge remote-tracking branch 'remotes/awilliam/tags/vfio-fixes-20160915.0' into staging
[thirdparty/qemu.git] / hw / scsi / scsi-disk.c
1 /*
2 * SCSI Device emulation
3 *
4 * Copyright (c) 2006 CodeSourcery.
5 * Based on code by Fabrice Bellard
6 *
7 * Written by Paul Brook
8 * Modifications:
9 * 2009-Dec-12 Artyom Tarasenko : implemented stamdard inquiry for the case
10 * when the allocation length of CDB is smaller
11 * than 36.
12 * 2009-Oct-13 Artyom Tarasenko : implemented the block descriptor in the
13 * MODE SENSE response.
14 *
15 * This code is licensed under the LGPL.
16 *
17 * Note that this file only handles the SCSI architecture model and device
18 * commands. Emulation of interface/link layer protocols is handled by
19 * the host adapter emulator.
20 */
21
22 //#define DEBUG_SCSI
23
24 #ifdef DEBUG_SCSI
25 #define DPRINTF(fmt, ...) \
26 do { printf("scsi-disk: " fmt , ## __VA_ARGS__); } while (0)
27 #else
28 #define DPRINTF(fmt, ...) do {} while(0)
29 #endif
30
31 #include "qemu/osdep.h"
32 #include "qapi/error.h"
33 #include "qemu/error-report.h"
34 #include "hw/scsi/scsi.h"
35 #include "block/scsi.h"
36 #include "sysemu/sysemu.h"
37 #include "sysemu/block-backend.h"
38 #include "sysemu/blockdev.h"
39 #include "hw/block/block.h"
40 #include "sysemu/dma.h"
41 #include "qemu/cutils.h"
42
43 #ifdef __linux
44 #include <scsi/sg.h>
45 #endif
46
47 #define SCSI_WRITE_SAME_MAX 524288
48 #define SCSI_DMA_BUF_SIZE 131072
49 #define SCSI_MAX_INQUIRY_LEN 256
50 #define SCSI_MAX_MODE_LEN 256
51
52 #define DEFAULT_DISCARD_GRANULARITY 4096
53 #define DEFAULT_MAX_UNMAP_SIZE (1 << 30) /* 1 GB */
54 #define DEFAULT_MAX_IO_SIZE INT_MAX /* 2 GB - 1 block */
55
56 #define TYPE_SCSI_DISK_BASE "scsi-disk-base"
57
58 #define SCSI_DISK_BASE(obj) \
59 OBJECT_CHECK(SCSIDiskState, (obj), TYPE_SCSI_DISK_BASE)
60 #define SCSI_DISK_BASE_CLASS(klass) \
61 OBJECT_CLASS_CHECK(SCSIDiskClass, (klass), TYPE_SCSI_DISK_BASE)
62 #define SCSI_DISK_BASE_GET_CLASS(obj) \
63 OBJECT_GET_CLASS(SCSIDiskClass, (obj), TYPE_SCSI_DISK_BASE)
64
65 typedef struct SCSIDiskClass {
66 SCSIDeviceClass parent_class;
67 DMAIOFunc *dma_readv;
68 DMAIOFunc *dma_writev;
69 bool (*need_fua_emulation)(SCSICommand *cmd);
70 } SCSIDiskClass;
71
72 typedef struct SCSIDiskReq {
73 SCSIRequest req;
74 /* Both sector and sector_count are in terms of qemu 512 byte blocks. */
75 uint64_t sector;
76 uint32_t sector_count;
77 uint32_t buflen;
78 bool started;
79 bool need_fua_emulation;
80 struct iovec iov;
81 QEMUIOVector qiov;
82 BlockAcctCookie acct;
83 unsigned char *status;
84 } SCSIDiskReq;
85
86 #define SCSI_DISK_F_REMOVABLE 0
87 #define SCSI_DISK_F_DPOFUA 1
88 #define SCSI_DISK_F_NO_REMOVABLE_DEVOPS 2
89
90 typedef struct SCSIDiskState
91 {
92 SCSIDevice qdev;
93 uint32_t features;
94 bool media_changed;
95 bool media_event;
96 bool eject_request;
97 uint16_t port_index;
98 uint64_t max_unmap_size;
99 uint64_t max_io_size;
100 QEMUBH *bh;
101 char *version;
102 char *serial;
103 char *vendor;
104 char *product;
105 bool tray_open;
106 bool tray_locked;
107 } SCSIDiskState;
108
109 static int scsi_handle_rw_error(SCSIDiskReq *r, int error, bool acct_failed);
110
111 static void scsi_free_request(SCSIRequest *req)
112 {
113 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req);
114
115 qemu_vfree(r->iov.iov_base);
116 }
117
118 /* Helper function for command completion with sense. */
119 static void scsi_check_condition(SCSIDiskReq *r, SCSISense sense)
120 {
121 DPRINTF("Command complete tag=0x%x sense=%d/%d/%d\n",
122 r->req.tag, sense.key, sense.asc, sense.ascq);
123 scsi_req_build_sense(&r->req, sense);
124 scsi_req_complete(&r->req, CHECK_CONDITION);
125 }
126
127 static void scsi_init_iovec(SCSIDiskReq *r, size_t size)
128 {
129 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
130
131 if (!r->iov.iov_base) {
132 r->buflen = size;
133 r->iov.iov_base = blk_blockalign(s->qdev.conf.blk, r->buflen);
134 }
135 r->iov.iov_len = MIN(r->sector_count * 512, r->buflen);
136 qemu_iovec_init_external(&r->qiov, &r->iov, 1);
137 }
138
139 static void scsi_disk_save_request(QEMUFile *f, SCSIRequest *req)
140 {
141 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req);
142
143 qemu_put_be64s(f, &r->sector);
144 qemu_put_be32s(f, &r->sector_count);
145 qemu_put_be32s(f, &r->buflen);
146 if (r->buflen) {
147 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) {
148 qemu_put_buffer(f, r->iov.iov_base, r->iov.iov_len);
149 } else if (!req->retry) {
150 uint32_t len = r->iov.iov_len;
151 qemu_put_be32s(f, &len);
152 qemu_put_buffer(f, r->iov.iov_base, r->iov.iov_len);
153 }
154 }
155 }
156
157 static void scsi_disk_load_request(QEMUFile *f, SCSIRequest *req)
158 {
159 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req);
160
161 qemu_get_be64s(f, &r->sector);
162 qemu_get_be32s(f, &r->sector_count);
163 qemu_get_be32s(f, &r->buflen);
164 if (r->buflen) {
165 scsi_init_iovec(r, r->buflen);
166 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) {
167 qemu_get_buffer(f, r->iov.iov_base, r->iov.iov_len);
168 } else if (!r->req.retry) {
169 uint32_t len;
170 qemu_get_be32s(f, &len);
171 r->iov.iov_len = len;
172 assert(r->iov.iov_len <= r->buflen);
173 qemu_get_buffer(f, r->iov.iov_base, r->iov.iov_len);
174 }
175 }
176
177 qemu_iovec_init_external(&r->qiov, &r->iov, 1);
178 }
179
180 static bool scsi_disk_req_check_error(SCSIDiskReq *r, int ret, bool acct_failed)
181 {
182 if (r->req.io_canceled) {
183 scsi_req_cancel_complete(&r->req);
184 return true;
185 }
186
187 if (ret < 0) {
188 return scsi_handle_rw_error(r, -ret, acct_failed);
189 }
190
191 if (r->status && *r->status) {
192 if (acct_failed) {
193 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
194 block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct);
195 }
196 scsi_req_complete(&r->req, *r->status);
197 return true;
198 }
199
200 return false;
201 }
202
203 static void scsi_aio_complete(void *opaque, int ret)
204 {
205 SCSIDiskReq *r = (SCSIDiskReq *)opaque;
206 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
207
208 assert(r->req.aiocb != NULL);
209 r->req.aiocb = NULL;
210 if (scsi_disk_req_check_error(r, ret, true)) {
211 goto done;
212 }
213
214 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct);
215 scsi_req_complete(&r->req, GOOD);
216
217 done:
218 scsi_req_unref(&r->req);
219 }
220
221 static bool scsi_is_cmd_fua(SCSICommand *cmd)
222 {
223 switch (cmd->buf[0]) {
224 case READ_10:
225 case READ_12:
226 case READ_16:
227 case WRITE_10:
228 case WRITE_12:
229 case WRITE_16:
230 return (cmd->buf[1] & 8) != 0;
231
232 case VERIFY_10:
233 case VERIFY_12:
234 case VERIFY_16:
235 case WRITE_VERIFY_10:
236 case WRITE_VERIFY_12:
237 case WRITE_VERIFY_16:
238 return true;
239
240 case READ_6:
241 case WRITE_6:
242 default:
243 return false;
244 }
245 }
246
247 static void scsi_write_do_fua(SCSIDiskReq *r)
248 {
249 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
250
251 assert(r->req.aiocb == NULL);
252 assert(!r->req.io_canceled);
253
254 if (r->need_fua_emulation) {
255 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 0,
256 BLOCK_ACCT_FLUSH);
257 r->req.aiocb = blk_aio_flush(s->qdev.conf.blk, scsi_aio_complete, r);
258 return;
259 }
260
261 scsi_req_complete(&r->req, GOOD);
262 scsi_req_unref(&r->req);
263 }
264
265 static void scsi_dma_complete_noio(SCSIDiskReq *r, int ret)
266 {
267 assert(r->req.aiocb == NULL);
268 if (scsi_disk_req_check_error(r, ret, false)) {
269 goto done;
270 }
271
272 r->sector += r->sector_count;
273 r->sector_count = 0;
274 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) {
275 scsi_write_do_fua(r);
276 return;
277 } else {
278 scsi_req_complete(&r->req, GOOD);
279 }
280
281 done:
282 scsi_req_unref(&r->req);
283 }
284
285 static void scsi_dma_complete(void *opaque, int ret)
286 {
287 SCSIDiskReq *r = (SCSIDiskReq *)opaque;
288 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
289
290 assert(r->req.aiocb != NULL);
291 r->req.aiocb = NULL;
292
293 if (ret < 0) {
294 block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct);
295 } else {
296 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct);
297 }
298 scsi_dma_complete_noio(r, ret);
299 }
300
301 static void scsi_read_complete(void * opaque, int ret)
302 {
303 SCSIDiskReq *r = (SCSIDiskReq *)opaque;
304 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
305 int n;
306
307 assert(r->req.aiocb != NULL);
308 r->req.aiocb = NULL;
309 if (scsi_disk_req_check_error(r, ret, true)) {
310 goto done;
311 }
312
313 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct);
314 DPRINTF("Data ready tag=0x%x len=%zd\n", r->req.tag, r->qiov.size);
315
316 n = r->qiov.size / 512;
317 r->sector += n;
318 r->sector_count -= n;
319 scsi_req_data(&r->req, r->qiov.size);
320
321 done:
322 scsi_req_unref(&r->req);
323 }
324
325 /* Actually issue a read to the block device. */
326 static void scsi_do_read(SCSIDiskReq *r, int ret)
327 {
328 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
329 SCSIDiskClass *sdc = (SCSIDiskClass *) object_get_class(OBJECT(s));
330
331 assert (r->req.aiocb == NULL);
332 if (scsi_disk_req_check_error(r, ret, false)) {
333 goto done;
334 }
335
336 /* The request is used as the AIO opaque value, so add a ref. */
337 scsi_req_ref(&r->req);
338
339 if (r->req.sg) {
340 dma_acct_start(s->qdev.conf.blk, &r->acct, r->req.sg, BLOCK_ACCT_READ);
341 r->req.resid -= r->req.sg->size;
342 r->req.aiocb = dma_blk_io(blk_get_aio_context(s->qdev.conf.blk),
343 r->req.sg, r->sector << BDRV_SECTOR_BITS,
344 sdc->dma_readv, r, scsi_dma_complete, r,
345 DMA_DIRECTION_FROM_DEVICE);
346 } else {
347 scsi_init_iovec(r, SCSI_DMA_BUF_SIZE);
348 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct,
349 r->qiov.size, BLOCK_ACCT_READ);
350 r->req.aiocb = sdc->dma_readv(r->sector << BDRV_SECTOR_BITS, &r->qiov,
351 scsi_read_complete, r, r);
352 }
353
354 done:
355 scsi_req_unref(&r->req);
356 }
357
358 static void scsi_do_read_cb(void *opaque, int ret)
359 {
360 SCSIDiskReq *r = (SCSIDiskReq *)opaque;
361 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
362
363 assert (r->req.aiocb != NULL);
364 r->req.aiocb = NULL;
365
366 if (ret < 0) {
367 block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct);
368 } else {
369 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct);
370 }
371 scsi_do_read(opaque, ret);
372 }
373
374 /* Read more data from scsi device into buffer. */
375 static void scsi_read_data(SCSIRequest *req)
376 {
377 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req);
378 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
379 bool first;
380
381 DPRINTF("Read sector_count=%d\n", r->sector_count);
382 if (r->sector_count == 0) {
383 /* This also clears the sense buffer for REQUEST SENSE. */
384 scsi_req_complete(&r->req, GOOD);
385 return;
386 }
387
388 /* No data transfer may already be in progress */
389 assert(r->req.aiocb == NULL);
390
391 /* The request is used as the AIO opaque value, so add a ref. */
392 scsi_req_ref(&r->req);
393 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) {
394 DPRINTF("Data transfer direction invalid\n");
395 scsi_read_complete(r, -EINVAL);
396 return;
397 }
398
399 if (!blk_is_available(req->dev->conf.blk)) {
400 scsi_read_complete(r, -ENOMEDIUM);
401 return;
402 }
403
404 first = !r->started;
405 r->started = true;
406 if (first && r->need_fua_emulation) {
407 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 0,
408 BLOCK_ACCT_FLUSH);
409 r->req.aiocb = blk_aio_flush(s->qdev.conf.blk, scsi_do_read_cb, r);
410 } else {
411 scsi_do_read(r, 0);
412 }
413 }
414
415 /*
416 * scsi_handle_rw_error has two return values. 0 means that the error
417 * must be ignored, 1 means that the error has been processed and the
418 * caller should not do anything else for this request. Note that
419 * scsi_handle_rw_error always manages its reference counts, independent
420 * of the return value.
421 */
422 static int scsi_handle_rw_error(SCSIDiskReq *r, int error, bool acct_failed)
423 {
424 bool is_read = (r->req.cmd.mode == SCSI_XFER_FROM_DEV);
425 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
426 BlockErrorAction action = blk_get_error_action(s->qdev.conf.blk,
427 is_read, error);
428
429 if (action == BLOCK_ERROR_ACTION_REPORT) {
430 if (acct_failed) {
431 block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct);
432 }
433 switch (error) {
434 case ENOMEDIUM:
435 scsi_check_condition(r, SENSE_CODE(NO_MEDIUM));
436 break;
437 case ENOMEM:
438 scsi_check_condition(r, SENSE_CODE(TARGET_FAILURE));
439 break;
440 case EINVAL:
441 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD));
442 break;
443 case ENOSPC:
444 scsi_check_condition(r, SENSE_CODE(SPACE_ALLOC_FAILED));
445 break;
446 default:
447 scsi_check_condition(r, SENSE_CODE(IO_ERROR));
448 break;
449 }
450 }
451 blk_error_action(s->qdev.conf.blk, action, is_read, error);
452 if (action == BLOCK_ERROR_ACTION_STOP) {
453 scsi_req_retry(&r->req);
454 }
455 return action != BLOCK_ERROR_ACTION_IGNORE;
456 }
457
458 static void scsi_write_complete_noio(SCSIDiskReq *r, int ret)
459 {
460 uint32_t n;
461
462 assert (r->req.aiocb == NULL);
463 if (scsi_disk_req_check_error(r, ret, false)) {
464 goto done;
465 }
466
467 n = r->qiov.size / 512;
468 r->sector += n;
469 r->sector_count -= n;
470 if (r->sector_count == 0) {
471 scsi_write_do_fua(r);
472 return;
473 } else {
474 scsi_init_iovec(r, SCSI_DMA_BUF_SIZE);
475 DPRINTF("Write complete tag=0x%x more=%zd\n", r->req.tag, r->qiov.size);
476 scsi_req_data(&r->req, r->qiov.size);
477 }
478
479 done:
480 scsi_req_unref(&r->req);
481 }
482
483 static void scsi_write_complete(void * opaque, int ret)
484 {
485 SCSIDiskReq *r = (SCSIDiskReq *)opaque;
486 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
487
488 assert (r->req.aiocb != NULL);
489 r->req.aiocb = NULL;
490
491 if (ret < 0) {
492 block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct);
493 } else {
494 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct);
495 }
496 scsi_write_complete_noio(r, ret);
497 }
498
499 static void scsi_write_data(SCSIRequest *req)
500 {
501 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req);
502 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
503 SCSIDiskClass *sdc = (SCSIDiskClass *) object_get_class(OBJECT(s));
504
505 /* No data transfer may already be in progress */
506 assert(r->req.aiocb == NULL);
507
508 /* The request is used as the AIO opaque value, so add a ref. */
509 scsi_req_ref(&r->req);
510 if (r->req.cmd.mode != SCSI_XFER_TO_DEV) {
511 DPRINTF("Data transfer direction invalid\n");
512 scsi_write_complete_noio(r, -EINVAL);
513 return;
514 }
515
516 if (!r->req.sg && !r->qiov.size) {
517 /* Called for the first time. Ask the driver to send us more data. */
518 r->started = true;
519 scsi_write_complete_noio(r, 0);
520 return;
521 }
522 if (!blk_is_available(req->dev->conf.blk)) {
523 scsi_write_complete_noio(r, -ENOMEDIUM);
524 return;
525 }
526
527 if (r->req.cmd.buf[0] == VERIFY_10 || r->req.cmd.buf[0] == VERIFY_12 ||
528 r->req.cmd.buf[0] == VERIFY_16) {
529 if (r->req.sg) {
530 scsi_dma_complete_noio(r, 0);
531 } else {
532 scsi_write_complete_noio(r, 0);
533 }
534 return;
535 }
536
537 if (r->req.sg) {
538 dma_acct_start(s->qdev.conf.blk, &r->acct, r->req.sg, BLOCK_ACCT_WRITE);
539 r->req.resid -= r->req.sg->size;
540 r->req.aiocb = dma_blk_io(blk_get_aio_context(s->qdev.conf.blk),
541 r->req.sg, r->sector << BDRV_SECTOR_BITS,
542 sdc->dma_writev, r, scsi_dma_complete, r,
543 DMA_DIRECTION_TO_DEVICE);
544 } else {
545 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct,
546 r->qiov.size, BLOCK_ACCT_WRITE);
547 r->req.aiocb = sdc->dma_writev(r->sector << BDRV_SECTOR_BITS, &r->qiov,
548 scsi_write_complete, r, r);
549 }
550 }
551
552 /* Return a pointer to the data buffer. */
553 static uint8_t *scsi_get_buf(SCSIRequest *req)
554 {
555 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req);
556
557 return (uint8_t *)r->iov.iov_base;
558 }
559
560 static int scsi_disk_emulate_inquiry(SCSIRequest *req, uint8_t *outbuf)
561 {
562 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev);
563 int buflen = 0;
564 int start;
565
566 if (req->cmd.buf[1] & 0x1) {
567 /* Vital product data */
568 uint8_t page_code = req->cmd.buf[2];
569
570 outbuf[buflen++] = s->qdev.type & 0x1f;
571 outbuf[buflen++] = page_code ; // this page
572 outbuf[buflen++] = 0x00;
573 outbuf[buflen++] = 0x00;
574 start = buflen;
575
576 switch (page_code) {
577 case 0x00: /* Supported page codes, mandatory */
578 {
579 DPRINTF("Inquiry EVPD[Supported pages] "
580 "buffer size %zd\n", req->cmd.xfer);
581 outbuf[buflen++] = 0x00; // list of supported pages (this page)
582 if (s->serial) {
583 outbuf[buflen++] = 0x80; // unit serial number
584 }
585 outbuf[buflen++] = 0x83; // device identification
586 if (s->qdev.type == TYPE_DISK) {
587 outbuf[buflen++] = 0xb0; // block limits
588 outbuf[buflen++] = 0xb2; // thin provisioning
589 }
590 break;
591 }
592 case 0x80: /* Device serial number, optional */
593 {
594 int l;
595
596 if (!s->serial) {
597 DPRINTF("Inquiry (EVPD[Serial number] not supported\n");
598 return -1;
599 }
600
601 l = strlen(s->serial);
602 if (l > 36) {
603 l = 36;
604 }
605
606 DPRINTF("Inquiry EVPD[Serial number] "
607 "buffer size %zd\n", req->cmd.xfer);
608 memcpy(outbuf+buflen, s->serial, l);
609 buflen += l;
610 break;
611 }
612
613 case 0x83: /* Device identification page, mandatory */
614 {
615 const char *str = s->serial ?: blk_name(s->qdev.conf.blk);
616 int max_len = s->serial ? 20 : 255 - 8;
617 int id_len = strlen(str);
618
619 if (id_len > max_len) {
620 id_len = max_len;
621 }
622 DPRINTF("Inquiry EVPD[Device identification] "
623 "buffer size %zd\n", req->cmd.xfer);
624
625 outbuf[buflen++] = 0x2; // ASCII
626 outbuf[buflen++] = 0; // not officially assigned
627 outbuf[buflen++] = 0; // reserved
628 outbuf[buflen++] = id_len; // length of data following
629 memcpy(outbuf+buflen, str, id_len);
630 buflen += id_len;
631
632 if (s->qdev.wwn) {
633 outbuf[buflen++] = 0x1; // Binary
634 outbuf[buflen++] = 0x3; // NAA
635 outbuf[buflen++] = 0; // reserved
636 outbuf[buflen++] = 8;
637 stq_be_p(&outbuf[buflen], s->qdev.wwn);
638 buflen += 8;
639 }
640
641 if (s->qdev.port_wwn) {
642 outbuf[buflen++] = 0x61; // SAS / Binary
643 outbuf[buflen++] = 0x93; // PIV / Target port / NAA
644 outbuf[buflen++] = 0; // reserved
645 outbuf[buflen++] = 8;
646 stq_be_p(&outbuf[buflen], s->qdev.port_wwn);
647 buflen += 8;
648 }
649
650 if (s->port_index) {
651 outbuf[buflen++] = 0x61; // SAS / Binary
652 outbuf[buflen++] = 0x94; // PIV / Target port / relative target port
653 outbuf[buflen++] = 0; // reserved
654 outbuf[buflen++] = 4;
655 stw_be_p(&outbuf[buflen + 2], s->port_index);
656 buflen += 4;
657 }
658 break;
659 }
660 case 0xb0: /* block limits */
661 {
662 unsigned int unmap_sectors =
663 s->qdev.conf.discard_granularity / s->qdev.blocksize;
664 unsigned int min_io_size =
665 s->qdev.conf.min_io_size / s->qdev.blocksize;
666 unsigned int opt_io_size =
667 s->qdev.conf.opt_io_size / s->qdev.blocksize;
668 unsigned int max_unmap_sectors =
669 s->max_unmap_size / s->qdev.blocksize;
670 unsigned int max_io_sectors =
671 s->max_io_size / s->qdev.blocksize;
672
673 if (s->qdev.type == TYPE_ROM) {
674 DPRINTF("Inquiry (EVPD[%02X] not supported for CDROM\n",
675 page_code);
676 return -1;
677 }
678 /* required VPD size with unmap support */
679 buflen = 0x40;
680 memset(outbuf + 4, 0, buflen - 4);
681
682 outbuf[4] = 0x1; /* wsnz */
683
684 /* optimal transfer length granularity */
685 outbuf[6] = (min_io_size >> 8) & 0xff;
686 outbuf[7] = min_io_size & 0xff;
687
688 /* maximum transfer length */
689 outbuf[8] = (max_io_sectors >> 24) & 0xff;
690 outbuf[9] = (max_io_sectors >> 16) & 0xff;
691 outbuf[10] = (max_io_sectors >> 8) & 0xff;
692 outbuf[11] = max_io_sectors & 0xff;
693
694 /* optimal transfer length */
695 outbuf[12] = (opt_io_size >> 24) & 0xff;
696 outbuf[13] = (opt_io_size >> 16) & 0xff;
697 outbuf[14] = (opt_io_size >> 8) & 0xff;
698 outbuf[15] = opt_io_size & 0xff;
699
700 /* max unmap LBA count, default is 1GB */
701 outbuf[20] = (max_unmap_sectors >> 24) & 0xff;
702 outbuf[21] = (max_unmap_sectors >> 16) & 0xff;
703 outbuf[22] = (max_unmap_sectors >> 8) & 0xff;
704 outbuf[23] = max_unmap_sectors & 0xff;
705
706 /* max unmap descriptors, 255 fit in 4 kb with an 8-byte header. */
707 outbuf[24] = 0;
708 outbuf[25] = 0;
709 outbuf[26] = 0;
710 outbuf[27] = 255;
711
712 /* optimal unmap granularity */
713 outbuf[28] = (unmap_sectors >> 24) & 0xff;
714 outbuf[29] = (unmap_sectors >> 16) & 0xff;
715 outbuf[30] = (unmap_sectors >> 8) & 0xff;
716 outbuf[31] = unmap_sectors & 0xff;
717
718 /* max write same size */
719 outbuf[36] = 0;
720 outbuf[37] = 0;
721 outbuf[38] = 0;
722 outbuf[39] = 0;
723
724 outbuf[40] = (max_io_sectors >> 24) & 0xff;
725 outbuf[41] = (max_io_sectors >> 16) & 0xff;
726 outbuf[42] = (max_io_sectors >> 8) & 0xff;
727 outbuf[43] = max_io_sectors & 0xff;
728 break;
729 }
730 case 0xb2: /* thin provisioning */
731 {
732 buflen = 8;
733 outbuf[4] = 0;
734 outbuf[5] = 0xe0; /* unmap & write_same 10/16 all supported */
735 outbuf[6] = s->qdev.conf.discard_granularity ? 2 : 1;
736 outbuf[7] = 0;
737 break;
738 }
739 default:
740 return -1;
741 }
742 /* done with EVPD */
743 assert(buflen - start <= 255);
744 outbuf[start - 1] = buflen - start;
745 return buflen;
746 }
747
748 /* Standard INQUIRY data */
749 if (req->cmd.buf[2] != 0) {
750 return -1;
751 }
752
753 /* PAGE CODE == 0 */
754 buflen = req->cmd.xfer;
755 if (buflen > SCSI_MAX_INQUIRY_LEN) {
756 buflen = SCSI_MAX_INQUIRY_LEN;
757 }
758
759 outbuf[0] = s->qdev.type & 0x1f;
760 outbuf[1] = (s->features & (1 << SCSI_DISK_F_REMOVABLE)) ? 0x80 : 0;
761
762 strpadcpy((char *) &outbuf[16], 16, s->product, ' ');
763 strpadcpy((char *) &outbuf[8], 8, s->vendor, ' ');
764
765 memset(&outbuf[32], 0, 4);
766 memcpy(&outbuf[32], s->version, MIN(4, strlen(s->version)));
767 /*
768 * We claim conformance to SPC-3, which is required for guests
769 * to ask for modern features like READ CAPACITY(16) or the
770 * block characteristics VPD page by default. Not all of SPC-3
771 * is actually implemented, but we're good enough.
772 */
773 outbuf[2] = 5;
774 outbuf[3] = 2 | 0x10; /* Format 2, HiSup */
775
776 if (buflen > 36) {
777 outbuf[4] = buflen - 5; /* Additional Length = (Len - 1) - 4 */
778 } else {
779 /* If the allocation length of CDB is too small,
780 the additional length is not adjusted */
781 outbuf[4] = 36 - 5;
782 }
783
784 /* Sync data transfer and TCQ. */
785 outbuf[7] = 0x10 | (req->bus->info->tcq ? 0x02 : 0);
786 return buflen;
787 }
788
789 static inline bool media_is_dvd(SCSIDiskState *s)
790 {
791 uint64_t nb_sectors;
792 if (s->qdev.type != TYPE_ROM) {
793 return false;
794 }
795 if (!blk_is_available(s->qdev.conf.blk)) {
796 return false;
797 }
798 blk_get_geometry(s->qdev.conf.blk, &nb_sectors);
799 return nb_sectors > CD_MAX_SECTORS;
800 }
801
802 static inline bool media_is_cd(SCSIDiskState *s)
803 {
804 uint64_t nb_sectors;
805 if (s->qdev.type != TYPE_ROM) {
806 return false;
807 }
808 if (!blk_is_available(s->qdev.conf.blk)) {
809 return false;
810 }
811 blk_get_geometry(s->qdev.conf.blk, &nb_sectors);
812 return nb_sectors <= CD_MAX_SECTORS;
813 }
814
815 static int scsi_read_disc_information(SCSIDiskState *s, SCSIDiskReq *r,
816 uint8_t *outbuf)
817 {
818 uint8_t type = r->req.cmd.buf[1] & 7;
819
820 if (s->qdev.type != TYPE_ROM) {
821 return -1;
822 }
823
824 /* Types 1/2 are only defined for Blu-Ray. */
825 if (type != 0) {
826 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD));
827 return -1;
828 }
829
830 memset(outbuf, 0, 34);
831 outbuf[1] = 32;
832 outbuf[2] = 0xe; /* last session complete, disc finalized */
833 outbuf[3] = 1; /* first track on disc */
834 outbuf[4] = 1; /* # of sessions */
835 outbuf[5] = 1; /* first track of last session */
836 outbuf[6] = 1; /* last track of last session */
837 outbuf[7] = 0x20; /* unrestricted use */
838 outbuf[8] = 0x00; /* CD-ROM or DVD-ROM */
839 /* 9-10-11: most significant byte corresponding bytes 4-5-6 */
840 /* 12-23: not meaningful for CD-ROM or DVD-ROM */
841 /* 24-31: disc bar code */
842 /* 32: disc application code */
843 /* 33: number of OPC tables */
844
845 return 34;
846 }
847
848 static int scsi_read_dvd_structure(SCSIDiskState *s, SCSIDiskReq *r,
849 uint8_t *outbuf)
850 {
851 static const int rds_caps_size[5] = {
852 [0] = 2048 + 4,
853 [1] = 4 + 4,
854 [3] = 188 + 4,
855 [4] = 2048 + 4,
856 };
857
858 uint8_t media = r->req.cmd.buf[1];
859 uint8_t layer = r->req.cmd.buf[6];
860 uint8_t format = r->req.cmd.buf[7];
861 int size = -1;
862
863 if (s->qdev.type != TYPE_ROM) {
864 return -1;
865 }
866 if (media != 0) {
867 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD));
868 return -1;
869 }
870
871 if (format != 0xff) {
872 if (!blk_is_available(s->qdev.conf.blk)) {
873 scsi_check_condition(r, SENSE_CODE(NO_MEDIUM));
874 return -1;
875 }
876 if (media_is_cd(s)) {
877 scsi_check_condition(r, SENSE_CODE(INCOMPATIBLE_FORMAT));
878 return -1;
879 }
880 if (format >= ARRAY_SIZE(rds_caps_size)) {
881 return -1;
882 }
883 size = rds_caps_size[format];
884 memset(outbuf, 0, size);
885 }
886
887 switch (format) {
888 case 0x00: {
889 /* Physical format information */
890 uint64_t nb_sectors;
891 if (layer != 0) {
892 goto fail;
893 }
894 blk_get_geometry(s->qdev.conf.blk, &nb_sectors);
895
896 outbuf[4] = 1; /* DVD-ROM, part version 1 */
897 outbuf[5] = 0xf; /* 120mm disc, minimum rate unspecified */
898 outbuf[6] = 1; /* one layer, read-only (per MMC-2 spec) */
899 outbuf[7] = 0; /* default densities */
900
901 stl_be_p(&outbuf[12], (nb_sectors >> 2) - 1); /* end sector */
902 stl_be_p(&outbuf[16], (nb_sectors >> 2) - 1); /* l0 end sector */
903 break;
904 }
905
906 case 0x01: /* DVD copyright information, all zeros */
907 break;
908
909 case 0x03: /* BCA information - invalid field for no BCA info */
910 return -1;
911
912 case 0x04: /* DVD disc manufacturing information, all zeros */
913 break;
914
915 case 0xff: { /* List capabilities */
916 int i;
917 size = 4;
918 for (i = 0; i < ARRAY_SIZE(rds_caps_size); i++) {
919 if (!rds_caps_size[i]) {
920 continue;
921 }
922 outbuf[size] = i;
923 outbuf[size + 1] = 0x40; /* Not writable, readable */
924 stw_be_p(&outbuf[size + 2], rds_caps_size[i]);
925 size += 4;
926 }
927 break;
928 }
929
930 default:
931 return -1;
932 }
933
934 /* Size of buffer, not including 2 byte size field */
935 stw_be_p(outbuf, size - 2);
936 return size;
937
938 fail:
939 return -1;
940 }
941
942 static int scsi_event_status_media(SCSIDiskState *s, uint8_t *outbuf)
943 {
944 uint8_t event_code, media_status;
945
946 media_status = 0;
947 if (s->tray_open) {
948 media_status = MS_TRAY_OPEN;
949 } else if (blk_is_inserted(s->qdev.conf.blk)) {
950 media_status = MS_MEDIA_PRESENT;
951 }
952
953 /* Event notification descriptor */
954 event_code = MEC_NO_CHANGE;
955 if (media_status != MS_TRAY_OPEN) {
956 if (s->media_event) {
957 event_code = MEC_NEW_MEDIA;
958 s->media_event = false;
959 } else if (s->eject_request) {
960 event_code = MEC_EJECT_REQUESTED;
961 s->eject_request = false;
962 }
963 }
964
965 outbuf[0] = event_code;
966 outbuf[1] = media_status;
967
968 /* These fields are reserved, just clear them. */
969 outbuf[2] = 0;
970 outbuf[3] = 0;
971 return 4;
972 }
973
974 static int scsi_get_event_status_notification(SCSIDiskState *s, SCSIDiskReq *r,
975 uint8_t *outbuf)
976 {
977 int size;
978 uint8_t *buf = r->req.cmd.buf;
979 uint8_t notification_class_request = buf[4];
980 if (s->qdev.type != TYPE_ROM) {
981 return -1;
982 }
983 if ((buf[1] & 1) == 0) {
984 /* asynchronous */
985 return -1;
986 }
987
988 size = 4;
989 outbuf[0] = outbuf[1] = 0;
990 outbuf[3] = 1 << GESN_MEDIA; /* supported events */
991 if (notification_class_request & (1 << GESN_MEDIA)) {
992 outbuf[2] = GESN_MEDIA;
993 size += scsi_event_status_media(s, &outbuf[size]);
994 } else {
995 outbuf[2] = 0x80;
996 }
997 stw_be_p(outbuf, size - 4);
998 return size;
999 }
1000
1001 static int scsi_get_configuration(SCSIDiskState *s, uint8_t *outbuf)
1002 {
1003 int current;
1004
1005 if (s->qdev.type != TYPE_ROM) {
1006 return -1;
1007 }
1008
1009 if (media_is_dvd(s)) {
1010 current = MMC_PROFILE_DVD_ROM;
1011 } else if (media_is_cd(s)) {
1012 current = MMC_PROFILE_CD_ROM;
1013 } else {
1014 current = MMC_PROFILE_NONE;
1015 }
1016
1017 memset(outbuf, 0, 40);
1018 stl_be_p(&outbuf[0], 36); /* Bytes after the data length field */
1019 stw_be_p(&outbuf[6], current);
1020 /* outbuf[8] - outbuf[19]: Feature 0 - Profile list */
1021 outbuf[10] = 0x03; /* persistent, current */
1022 outbuf[11] = 8; /* two profiles */
1023 stw_be_p(&outbuf[12], MMC_PROFILE_DVD_ROM);
1024 outbuf[14] = (current == MMC_PROFILE_DVD_ROM);
1025 stw_be_p(&outbuf[16], MMC_PROFILE_CD_ROM);
1026 outbuf[18] = (current == MMC_PROFILE_CD_ROM);
1027 /* outbuf[20] - outbuf[31]: Feature 1 - Core feature */
1028 stw_be_p(&outbuf[20], 1);
1029 outbuf[22] = 0x08 | 0x03; /* version 2, persistent, current */
1030 outbuf[23] = 8;
1031 stl_be_p(&outbuf[24], 1); /* SCSI */
1032 outbuf[28] = 1; /* DBE = 1, mandatory */
1033 /* outbuf[32] - outbuf[39]: Feature 3 - Removable media feature */
1034 stw_be_p(&outbuf[32], 3);
1035 outbuf[34] = 0x08 | 0x03; /* version 2, persistent, current */
1036 outbuf[35] = 4;
1037 outbuf[36] = 0x39; /* tray, load=1, eject=1, unlocked at powerup, lock=1 */
1038 /* TODO: Random readable, CD read, DVD read, drive serial number,
1039 power management */
1040 return 40;
1041 }
1042
1043 static int scsi_emulate_mechanism_status(SCSIDiskState *s, uint8_t *outbuf)
1044 {
1045 if (s->qdev.type != TYPE_ROM) {
1046 return -1;
1047 }
1048 memset(outbuf, 0, 8);
1049 outbuf[5] = 1; /* CD-ROM */
1050 return 8;
1051 }
1052
1053 static int mode_sense_page(SCSIDiskState *s, int page, uint8_t **p_outbuf,
1054 int page_control)
1055 {
1056 static const int mode_sense_valid[0x3f] = {
1057 [MODE_PAGE_HD_GEOMETRY] = (1 << TYPE_DISK),
1058 [MODE_PAGE_FLEXIBLE_DISK_GEOMETRY] = (1 << TYPE_DISK),
1059 [MODE_PAGE_CACHING] = (1 << TYPE_DISK) | (1 << TYPE_ROM),
1060 [MODE_PAGE_R_W_ERROR] = (1 << TYPE_DISK) | (1 << TYPE_ROM),
1061 [MODE_PAGE_AUDIO_CTL] = (1 << TYPE_ROM),
1062 [MODE_PAGE_CAPABILITIES] = (1 << TYPE_ROM),
1063 };
1064
1065 uint8_t *p = *p_outbuf + 2;
1066 int length;
1067
1068 if ((mode_sense_valid[page] & (1 << s->qdev.type)) == 0) {
1069 return -1;
1070 }
1071
1072 /*
1073 * If Changeable Values are requested, a mask denoting those mode parameters
1074 * that are changeable shall be returned. As we currently don't support
1075 * parameter changes via MODE_SELECT all bits are returned set to zero.
1076 * The buffer was already menset to zero by the caller of this function.
1077 *
1078 * The offsets here are off by two compared to the descriptions in the
1079 * SCSI specs, because those include a 2-byte header. This is unfortunate,
1080 * but it is done so that offsets are consistent within our implementation
1081 * of MODE SENSE and MODE SELECT. MODE SELECT has to deal with both
1082 * 2-byte and 4-byte headers.
1083 */
1084 switch (page) {
1085 case MODE_PAGE_HD_GEOMETRY:
1086 length = 0x16;
1087 if (page_control == 1) { /* Changeable Values */
1088 break;
1089 }
1090 /* if a geometry hint is available, use it */
1091 p[0] = (s->qdev.conf.cyls >> 16) & 0xff;
1092 p[1] = (s->qdev.conf.cyls >> 8) & 0xff;
1093 p[2] = s->qdev.conf.cyls & 0xff;
1094 p[3] = s->qdev.conf.heads & 0xff;
1095 /* Write precomp start cylinder, disabled */
1096 p[4] = (s->qdev.conf.cyls >> 16) & 0xff;
1097 p[5] = (s->qdev.conf.cyls >> 8) & 0xff;
1098 p[6] = s->qdev.conf.cyls & 0xff;
1099 /* Reduced current start cylinder, disabled */
1100 p[7] = (s->qdev.conf.cyls >> 16) & 0xff;
1101 p[8] = (s->qdev.conf.cyls >> 8) & 0xff;
1102 p[9] = s->qdev.conf.cyls & 0xff;
1103 /* Device step rate [ns], 200ns */
1104 p[10] = 0;
1105 p[11] = 200;
1106 /* Landing zone cylinder */
1107 p[12] = 0xff;
1108 p[13] = 0xff;
1109 p[14] = 0xff;
1110 /* Medium rotation rate [rpm], 5400 rpm */
1111 p[18] = (5400 >> 8) & 0xff;
1112 p[19] = 5400 & 0xff;
1113 break;
1114
1115 case MODE_PAGE_FLEXIBLE_DISK_GEOMETRY:
1116 length = 0x1e;
1117 if (page_control == 1) { /* Changeable Values */
1118 break;
1119 }
1120 /* Transfer rate [kbit/s], 5Mbit/s */
1121 p[0] = 5000 >> 8;
1122 p[1] = 5000 & 0xff;
1123 /* if a geometry hint is available, use it */
1124 p[2] = s->qdev.conf.heads & 0xff;
1125 p[3] = s->qdev.conf.secs & 0xff;
1126 p[4] = s->qdev.blocksize >> 8;
1127 p[6] = (s->qdev.conf.cyls >> 8) & 0xff;
1128 p[7] = s->qdev.conf.cyls & 0xff;
1129 /* Write precomp start cylinder, disabled */
1130 p[8] = (s->qdev.conf.cyls >> 8) & 0xff;
1131 p[9] = s->qdev.conf.cyls & 0xff;
1132 /* Reduced current start cylinder, disabled */
1133 p[10] = (s->qdev.conf.cyls >> 8) & 0xff;
1134 p[11] = s->qdev.conf.cyls & 0xff;
1135 /* Device step rate [100us], 100us */
1136 p[12] = 0;
1137 p[13] = 1;
1138 /* Device step pulse width [us], 1us */
1139 p[14] = 1;
1140 /* Device head settle delay [100us], 100us */
1141 p[15] = 0;
1142 p[16] = 1;
1143 /* Motor on delay [0.1s], 0.1s */
1144 p[17] = 1;
1145 /* Motor off delay [0.1s], 0.1s */
1146 p[18] = 1;
1147 /* Medium rotation rate [rpm], 5400 rpm */
1148 p[26] = (5400 >> 8) & 0xff;
1149 p[27] = 5400 & 0xff;
1150 break;
1151
1152 case MODE_PAGE_CACHING:
1153 length = 0x12;
1154 if (page_control == 1 || /* Changeable Values */
1155 blk_enable_write_cache(s->qdev.conf.blk)) {
1156 p[0] = 4; /* WCE */
1157 }
1158 break;
1159
1160 case MODE_PAGE_R_W_ERROR:
1161 length = 10;
1162 if (page_control == 1) { /* Changeable Values */
1163 break;
1164 }
1165 p[0] = 0x80; /* Automatic Write Reallocation Enabled */
1166 if (s->qdev.type == TYPE_ROM) {
1167 p[1] = 0x20; /* Read Retry Count */
1168 }
1169 break;
1170
1171 case MODE_PAGE_AUDIO_CTL:
1172 length = 14;
1173 break;
1174
1175 case MODE_PAGE_CAPABILITIES:
1176 length = 0x14;
1177 if (page_control == 1) { /* Changeable Values */
1178 break;
1179 }
1180
1181 p[0] = 0x3b; /* CD-R & CD-RW read */
1182 p[1] = 0; /* Writing not supported */
1183 p[2] = 0x7f; /* Audio, composite, digital out,
1184 mode 2 form 1&2, multi session */
1185 p[3] = 0xff; /* CD DA, DA accurate, RW supported,
1186 RW corrected, C2 errors, ISRC,
1187 UPC, Bar code */
1188 p[4] = 0x2d | (s->tray_locked ? 2 : 0);
1189 /* Locking supported, jumper present, eject, tray */
1190 p[5] = 0; /* no volume & mute control, no
1191 changer */
1192 p[6] = (50 * 176) >> 8; /* 50x read speed */
1193 p[7] = (50 * 176) & 0xff;
1194 p[8] = 2 >> 8; /* Two volume levels */
1195 p[9] = 2 & 0xff;
1196 p[10] = 2048 >> 8; /* 2M buffer */
1197 p[11] = 2048 & 0xff;
1198 p[12] = (16 * 176) >> 8; /* 16x read speed current */
1199 p[13] = (16 * 176) & 0xff;
1200 p[16] = (16 * 176) >> 8; /* 16x write speed */
1201 p[17] = (16 * 176) & 0xff;
1202 p[18] = (16 * 176) >> 8; /* 16x write speed current */
1203 p[19] = (16 * 176) & 0xff;
1204 break;
1205
1206 default:
1207 return -1;
1208 }
1209
1210 assert(length < 256);
1211 (*p_outbuf)[0] = page;
1212 (*p_outbuf)[1] = length;
1213 *p_outbuf += length + 2;
1214 return length + 2;
1215 }
1216
1217 static int scsi_disk_emulate_mode_sense(SCSIDiskReq *r, uint8_t *outbuf)
1218 {
1219 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
1220 uint64_t nb_sectors;
1221 bool dbd;
1222 int page, buflen, ret, page_control;
1223 uint8_t *p;
1224 uint8_t dev_specific_param;
1225
1226 dbd = (r->req.cmd.buf[1] & 0x8) != 0;
1227 page = r->req.cmd.buf[2] & 0x3f;
1228 page_control = (r->req.cmd.buf[2] & 0xc0) >> 6;
1229 DPRINTF("Mode Sense(%d) (page %d, xfer %zd, page_control %d)\n",
1230 (r->req.cmd.buf[0] == MODE_SENSE) ? 6 : 10, page, r->req.cmd.xfer, page_control);
1231 memset(outbuf, 0, r->req.cmd.xfer);
1232 p = outbuf;
1233
1234 if (s->qdev.type == TYPE_DISK) {
1235 dev_specific_param = s->features & (1 << SCSI_DISK_F_DPOFUA) ? 0x10 : 0;
1236 if (blk_is_read_only(s->qdev.conf.blk)) {
1237 dev_specific_param |= 0x80; /* Readonly. */
1238 }
1239 } else {
1240 /* MMC prescribes that CD/DVD drives have no block descriptors,
1241 * and defines no device-specific parameter. */
1242 dev_specific_param = 0x00;
1243 dbd = true;
1244 }
1245
1246 if (r->req.cmd.buf[0] == MODE_SENSE) {
1247 p[1] = 0; /* Default media type. */
1248 p[2] = dev_specific_param;
1249 p[3] = 0; /* Block descriptor length. */
1250 p += 4;
1251 } else { /* MODE_SENSE_10 */
1252 p[2] = 0; /* Default media type. */
1253 p[3] = dev_specific_param;
1254 p[6] = p[7] = 0; /* Block descriptor length. */
1255 p += 8;
1256 }
1257
1258 blk_get_geometry(s->qdev.conf.blk, &nb_sectors);
1259 if (!dbd && nb_sectors) {
1260 if (r->req.cmd.buf[0] == MODE_SENSE) {
1261 outbuf[3] = 8; /* Block descriptor length */
1262 } else { /* MODE_SENSE_10 */
1263 outbuf[7] = 8; /* Block descriptor length */
1264 }
1265 nb_sectors /= (s->qdev.blocksize / 512);
1266 if (nb_sectors > 0xffffff) {
1267 nb_sectors = 0;
1268 }
1269 p[0] = 0; /* media density code */
1270 p[1] = (nb_sectors >> 16) & 0xff;
1271 p[2] = (nb_sectors >> 8) & 0xff;
1272 p[3] = nb_sectors & 0xff;
1273 p[4] = 0; /* reserved */
1274 p[5] = 0; /* bytes 5-7 are the sector size in bytes */
1275 p[6] = s->qdev.blocksize >> 8;
1276 p[7] = 0;
1277 p += 8;
1278 }
1279
1280 if (page_control == 3) {
1281 /* Saved Values */
1282 scsi_check_condition(r, SENSE_CODE(SAVING_PARAMS_NOT_SUPPORTED));
1283 return -1;
1284 }
1285
1286 if (page == 0x3f) {
1287 for (page = 0; page <= 0x3e; page++) {
1288 mode_sense_page(s, page, &p, page_control);
1289 }
1290 } else {
1291 ret = mode_sense_page(s, page, &p, page_control);
1292 if (ret == -1) {
1293 return -1;
1294 }
1295 }
1296
1297 buflen = p - outbuf;
1298 /*
1299 * The mode data length field specifies the length in bytes of the
1300 * following data that is available to be transferred. The mode data
1301 * length does not include itself.
1302 */
1303 if (r->req.cmd.buf[0] == MODE_SENSE) {
1304 outbuf[0] = buflen - 1;
1305 } else { /* MODE_SENSE_10 */
1306 outbuf[0] = ((buflen - 2) >> 8) & 0xff;
1307 outbuf[1] = (buflen - 2) & 0xff;
1308 }
1309 return buflen;
1310 }
1311
1312 static int scsi_disk_emulate_read_toc(SCSIRequest *req, uint8_t *outbuf)
1313 {
1314 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev);
1315 int start_track, format, msf, toclen;
1316 uint64_t nb_sectors;
1317
1318 msf = req->cmd.buf[1] & 2;
1319 format = req->cmd.buf[2] & 0xf;
1320 start_track = req->cmd.buf[6];
1321 blk_get_geometry(s->qdev.conf.blk, &nb_sectors);
1322 DPRINTF("Read TOC (track %d format %d msf %d)\n", start_track, format, msf >> 1);
1323 nb_sectors /= s->qdev.blocksize / 512;
1324 switch (format) {
1325 case 0:
1326 toclen = cdrom_read_toc(nb_sectors, outbuf, msf, start_track);
1327 break;
1328 case 1:
1329 /* multi session : only a single session defined */
1330 toclen = 12;
1331 memset(outbuf, 0, 12);
1332 outbuf[1] = 0x0a;
1333 outbuf[2] = 0x01;
1334 outbuf[3] = 0x01;
1335 break;
1336 case 2:
1337 toclen = cdrom_read_toc_raw(nb_sectors, outbuf, msf, start_track);
1338 break;
1339 default:
1340 return -1;
1341 }
1342 return toclen;
1343 }
1344
1345 static int scsi_disk_emulate_start_stop(SCSIDiskReq *r)
1346 {
1347 SCSIRequest *req = &r->req;
1348 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev);
1349 bool start = req->cmd.buf[4] & 1;
1350 bool loej = req->cmd.buf[4] & 2; /* load on start, eject on !start */
1351 int pwrcnd = req->cmd.buf[4] & 0xf0;
1352
1353 if (pwrcnd) {
1354 /* eject/load only happens for power condition == 0 */
1355 return 0;
1356 }
1357
1358 if ((s->features & (1 << SCSI_DISK_F_REMOVABLE)) && loej) {
1359 if (!start && !s->tray_open && s->tray_locked) {
1360 scsi_check_condition(r,
1361 blk_is_inserted(s->qdev.conf.blk)
1362 ? SENSE_CODE(ILLEGAL_REQ_REMOVAL_PREVENTED)
1363 : SENSE_CODE(NOT_READY_REMOVAL_PREVENTED));
1364 return -1;
1365 }
1366
1367 if (s->tray_open != !start) {
1368 blk_eject(s->qdev.conf.blk, !start);
1369 s->tray_open = !start;
1370 }
1371 }
1372 return 0;
1373 }
1374
1375 static void scsi_disk_emulate_read_data(SCSIRequest *req)
1376 {
1377 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req);
1378 int buflen = r->iov.iov_len;
1379
1380 if (buflen) {
1381 DPRINTF("Read buf_len=%d\n", buflen);
1382 r->iov.iov_len = 0;
1383 r->started = true;
1384 scsi_req_data(&r->req, buflen);
1385 return;
1386 }
1387
1388 /* This also clears the sense buffer for REQUEST SENSE. */
1389 scsi_req_complete(&r->req, GOOD);
1390 }
1391
1392 static int scsi_disk_check_mode_select(SCSIDiskState *s, int page,
1393 uint8_t *inbuf, int inlen)
1394 {
1395 uint8_t mode_current[SCSI_MAX_MODE_LEN];
1396 uint8_t mode_changeable[SCSI_MAX_MODE_LEN];
1397 uint8_t *p;
1398 int len, expected_len, changeable_len, i;
1399
1400 /* The input buffer does not include the page header, so it is
1401 * off by 2 bytes.
1402 */
1403 expected_len = inlen + 2;
1404 if (expected_len > SCSI_MAX_MODE_LEN) {
1405 return -1;
1406 }
1407
1408 p = mode_current;
1409 memset(mode_current, 0, inlen + 2);
1410 len = mode_sense_page(s, page, &p, 0);
1411 if (len < 0 || len != expected_len) {
1412 return -1;
1413 }
1414
1415 p = mode_changeable;
1416 memset(mode_changeable, 0, inlen + 2);
1417 changeable_len = mode_sense_page(s, page, &p, 1);
1418 assert(changeable_len == len);
1419
1420 /* Check that unchangeable bits are the same as what MODE SENSE
1421 * would return.
1422 */
1423 for (i = 2; i < len; i++) {
1424 if (((mode_current[i] ^ inbuf[i - 2]) & ~mode_changeable[i]) != 0) {
1425 return -1;
1426 }
1427 }
1428 return 0;
1429 }
1430
1431 static void scsi_disk_apply_mode_select(SCSIDiskState *s, int page, uint8_t *p)
1432 {
1433 switch (page) {
1434 case MODE_PAGE_CACHING:
1435 blk_set_enable_write_cache(s->qdev.conf.blk, (p[0] & 4) != 0);
1436 break;
1437
1438 default:
1439 break;
1440 }
1441 }
1442
1443 static int mode_select_pages(SCSIDiskReq *r, uint8_t *p, int len, bool change)
1444 {
1445 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
1446
1447 while (len > 0) {
1448 int page, subpage, page_len;
1449
1450 /* Parse both possible formats for the mode page headers. */
1451 page = p[0] & 0x3f;
1452 if (p[0] & 0x40) {
1453 if (len < 4) {
1454 goto invalid_param_len;
1455 }
1456 subpage = p[1];
1457 page_len = lduw_be_p(&p[2]);
1458 p += 4;
1459 len -= 4;
1460 } else {
1461 if (len < 2) {
1462 goto invalid_param_len;
1463 }
1464 subpage = 0;
1465 page_len = p[1];
1466 p += 2;
1467 len -= 2;
1468 }
1469
1470 if (subpage) {
1471 goto invalid_param;
1472 }
1473 if (page_len > len) {
1474 goto invalid_param_len;
1475 }
1476
1477 if (!change) {
1478 if (scsi_disk_check_mode_select(s, page, p, page_len) < 0) {
1479 goto invalid_param;
1480 }
1481 } else {
1482 scsi_disk_apply_mode_select(s, page, p);
1483 }
1484
1485 p += page_len;
1486 len -= page_len;
1487 }
1488 return 0;
1489
1490 invalid_param:
1491 scsi_check_condition(r, SENSE_CODE(INVALID_PARAM));
1492 return -1;
1493
1494 invalid_param_len:
1495 scsi_check_condition(r, SENSE_CODE(INVALID_PARAM_LEN));
1496 return -1;
1497 }
1498
1499 static void scsi_disk_emulate_mode_select(SCSIDiskReq *r, uint8_t *inbuf)
1500 {
1501 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
1502 uint8_t *p = inbuf;
1503 int cmd = r->req.cmd.buf[0];
1504 int len = r->req.cmd.xfer;
1505 int hdr_len = (cmd == MODE_SELECT ? 4 : 8);
1506 int bd_len;
1507 int pass;
1508
1509 /* We only support PF=1, SP=0. */
1510 if ((r->req.cmd.buf[1] & 0x11) != 0x10) {
1511 goto invalid_field;
1512 }
1513
1514 if (len < hdr_len) {
1515 goto invalid_param_len;
1516 }
1517
1518 bd_len = (cmd == MODE_SELECT ? p[3] : lduw_be_p(&p[6]));
1519 len -= hdr_len;
1520 p += hdr_len;
1521 if (len < bd_len) {
1522 goto invalid_param_len;
1523 }
1524 if (bd_len != 0 && bd_len != 8) {
1525 goto invalid_param;
1526 }
1527
1528 len -= bd_len;
1529 p += bd_len;
1530
1531 /* Ensure no change is made if there is an error! */
1532 for (pass = 0; pass < 2; pass++) {
1533 if (mode_select_pages(r, p, len, pass == 1) < 0) {
1534 assert(pass == 0);
1535 return;
1536 }
1537 }
1538 if (!blk_enable_write_cache(s->qdev.conf.blk)) {
1539 /* The request is used as the AIO opaque value, so add a ref. */
1540 scsi_req_ref(&r->req);
1541 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 0,
1542 BLOCK_ACCT_FLUSH);
1543 r->req.aiocb = blk_aio_flush(s->qdev.conf.blk, scsi_aio_complete, r);
1544 return;
1545 }
1546
1547 scsi_req_complete(&r->req, GOOD);
1548 return;
1549
1550 invalid_param:
1551 scsi_check_condition(r, SENSE_CODE(INVALID_PARAM));
1552 return;
1553
1554 invalid_param_len:
1555 scsi_check_condition(r, SENSE_CODE(INVALID_PARAM_LEN));
1556 return;
1557
1558 invalid_field:
1559 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD));
1560 }
1561
1562 static inline bool check_lba_range(SCSIDiskState *s,
1563 uint64_t sector_num, uint32_t nb_sectors)
1564 {
1565 /*
1566 * The first line tests that no overflow happens when computing the last
1567 * sector. The second line tests that the last accessed sector is in
1568 * range.
1569 *
1570 * Careful, the computations should not underflow for nb_sectors == 0,
1571 * and a 0-block read to the first LBA beyond the end of device is
1572 * valid.
1573 */
1574 return (sector_num <= sector_num + nb_sectors &&
1575 sector_num + nb_sectors <= s->qdev.max_lba + 1);
1576 }
1577
1578 typedef struct UnmapCBData {
1579 SCSIDiskReq *r;
1580 uint8_t *inbuf;
1581 int count;
1582 } UnmapCBData;
1583
1584 static void scsi_unmap_complete(void *opaque, int ret);
1585
1586 static void scsi_unmap_complete_noio(UnmapCBData *data, int ret)
1587 {
1588 SCSIDiskReq *r = data->r;
1589 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
1590 uint64_t sector_num;
1591 uint32_t nb_sectors;
1592
1593 assert(r->req.aiocb == NULL);
1594 if (scsi_disk_req_check_error(r, ret, false)) {
1595 goto done;
1596 }
1597
1598 if (data->count > 0) {
1599 sector_num = ldq_be_p(&data->inbuf[0]);
1600 nb_sectors = ldl_be_p(&data->inbuf[8]) & 0xffffffffULL;
1601 if (!check_lba_range(s, sector_num, nb_sectors)) {
1602 scsi_check_condition(r, SENSE_CODE(LBA_OUT_OF_RANGE));
1603 goto done;
1604 }
1605
1606 r->req.aiocb = blk_aio_pdiscard(s->qdev.conf.blk,
1607 sector_num * s->qdev.blocksize,
1608 nb_sectors * s->qdev.blocksize,
1609 scsi_unmap_complete, data);
1610 data->count--;
1611 data->inbuf += 16;
1612 return;
1613 }
1614
1615 scsi_req_complete(&r->req, GOOD);
1616
1617 done:
1618 scsi_req_unref(&r->req);
1619 g_free(data);
1620 }
1621
1622 static void scsi_unmap_complete(void *opaque, int ret)
1623 {
1624 UnmapCBData *data = opaque;
1625 SCSIDiskReq *r = data->r;
1626
1627 assert(r->req.aiocb != NULL);
1628 r->req.aiocb = NULL;
1629
1630 scsi_unmap_complete_noio(data, ret);
1631 }
1632
1633 static void scsi_disk_emulate_unmap(SCSIDiskReq *r, uint8_t *inbuf)
1634 {
1635 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
1636 uint8_t *p = inbuf;
1637 int len = r->req.cmd.xfer;
1638 UnmapCBData *data;
1639
1640 /* Reject ANCHOR=1. */
1641 if (r->req.cmd.buf[1] & 0x1) {
1642 goto invalid_field;
1643 }
1644
1645 if (len < 8) {
1646 goto invalid_param_len;
1647 }
1648 if (len < lduw_be_p(&p[0]) + 2) {
1649 goto invalid_param_len;
1650 }
1651 if (len < lduw_be_p(&p[2]) + 8) {
1652 goto invalid_param_len;
1653 }
1654 if (lduw_be_p(&p[2]) & 15) {
1655 goto invalid_param_len;
1656 }
1657
1658 if (blk_is_read_only(s->qdev.conf.blk)) {
1659 scsi_check_condition(r, SENSE_CODE(WRITE_PROTECTED));
1660 return;
1661 }
1662
1663 data = g_new0(UnmapCBData, 1);
1664 data->r = r;
1665 data->inbuf = &p[8];
1666 data->count = lduw_be_p(&p[2]) >> 4;
1667
1668 /* The matching unref is in scsi_unmap_complete, before data is freed. */
1669 scsi_req_ref(&r->req);
1670 scsi_unmap_complete_noio(data, 0);
1671 return;
1672
1673 invalid_param_len:
1674 scsi_check_condition(r, SENSE_CODE(INVALID_PARAM_LEN));
1675 return;
1676
1677 invalid_field:
1678 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD));
1679 }
1680
1681 typedef struct WriteSameCBData {
1682 SCSIDiskReq *r;
1683 int64_t sector;
1684 int nb_sectors;
1685 QEMUIOVector qiov;
1686 struct iovec iov;
1687 } WriteSameCBData;
1688
1689 static void scsi_write_same_complete(void *opaque, int ret)
1690 {
1691 WriteSameCBData *data = opaque;
1692 SCSIDiskReq *r = data->r;
1693 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
1694
1695 assert(r->req.aiocb != NULL);
1696 r->req.aiocb = NULL;
1697 if (scsi_disk_req_check_error(r, ret, true)) {
1698 goto done;
1699 }
1700
1701 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct);
1702
1703 data->nb_sectors -= data->iov.iov_len / 512;
1704 data->sector += data->iov.iov_len / 512;
1705 data->iov.iov_len = MIN(data->nb_sectors * 512, data->iov.iov_len);
1706 if (data->iov.iov_len) {
1707 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct,
1708 data->iov.iov_len, BLOCK_ACCT_WRITE);
1709 /* Reinitialize qiov, to handle unaligned WRITE SAME request
1710 * where final qiov may need smaller size */
1711 qemu_iovec_init_external(&data->qiov, &data->iov, 1);
1712 r->req.aiocb = blk_aio_pwritev(s->qdev.conf.blk,
1713 data->sector << BDRV_SECTOR_BITS,
1714 &data->qiov, 0,
1715 scsi_write_same_complete, data);
1716 return;
1717 }
1718
1719 scsi_req_complete(&r->req, GOOD);
1720
1721 done:
1722 scsi_req_unref(&r->req);
1723 qemu_vfree(data->iov.iov_base);
1724 g_free(data);
1725 }
1726
1727 static void scsi_disk_emulate_write_same(SCSIDiskReq *r, uint8_t *inbuf)
1728 {
1729 SCSIRequest *req = &r->req;
1730 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev);
1731 uint32_t nb_sectors = scsi_data_cdb_xfer(r->req.cmd.buf);
1732 WriteSameCBData *data;
1733 uint8_t *buf;
1734 int i;
1735
1736 /* Fail if PBDATA=1 or LBDATA=1 or ANCHOR=1. */
1737 if (nb_sectors == 0 || (req->cmd.buf[1] & 0x16)) {
1738 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD));
1739 return;
1740 }
1741
1742 if (blk_is_read_only(s->qdev.conf.blk)) {
1743 scsi_check_condition(r, SENSE_CODE(WRITE_PROTECTED));
1744 return;
1745 }
1746 if (!check_lba_range(s, r->req.cmd.lba, nb_sectors)) {
1747 scsi_check_condition(r, SENSE_CODE(LBA_OUT_OF_RANGE));
1748 return;
1749 }
1750
1751 if (buffer_is_zero(inbuf, s->qdev.blocksize)) {
1752 int flags = (req->cmd.buf[1] & 0x8) ? BDRV_REQ_MAY_UNMAP : 0;
1753
1754 /* The request is used as the AIO opaque value, so add a ref. */
1755 scsi_req_ref(&r->req);
1756 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct,
1757 nb_sectors * s->qdev.blocksize,
1758 BLOCK_ACCT_WRITE);
1759 r->req.aiocb = blk_aio_pwrite_zeroes(s->qdev.conf.blk,
1760 r->req.cmd.lba * s->qdev.blocksize,
1761 nb_sectors * s->qdev.blocksize,
1762 flags, scsi_aio_complete, r);
1763 return;
1764 }
1765
1766 data = g_new0(WriteSameCBData, 1);
1767 data->r = r;
1768 data->sector = r->req.cmd.lba * (s->qdev.blocksize / 512);
1769 data->nb_sectors = nb_sectors * (s->qdev.blocksize / 512);
1770 data->iov.iov_len = MIN(data->nb_sectors * 512, SCSI_WRITE_SAME_MAX);
1771 data->iov.iov_base = buf = blk_blockalign(s->qdev.conf.blk,
1772 data->iov.iov_len);
1773 qemu_iovec_init_external(&data->qiov, &data->iov, 1);
1774
1775 for (i = 0; i < data->iov.iov_len; i += s->qdev.blocksize) {
1776 memcpy(&buf[i], inbuf, s->qdev.blocksize);
1777 }
1778
1779 scsi_req_ref(&r->req);
1780 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct,
1781 data->iov.iov_len, BLOCK_ACCT_WRITE);
1782 r->req.aiocb = blk_aio_pwritev(s->qdev.conf.blk,
1783 data->sector << BDRV_SECTOR_BITS,
1784 &data->qiov, 0,
1785 scsi_write_same_complete, data);
1786 }
1787
1788 static void scsi_disk_emulate_write_data(SCSIRequest *req)
1789 {
1790 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req);
1791
1792 if (r->iov.iov_len) {
1793 int buflen = r->iov.iov_len;
1794 DPRINTF("Write buf_len=%d\n", buflen);
1795 r->iov.iov_len = 0;
1796 scsi_req_data(&r->req, buflen);
1797 return;
1798 }
1799
1800 switch (req->cmd.buf[0]) {
1801 case MODE_SELECT:
1802 case MODE_SELECT_10:
1803 /* This also clears the sense buffer for REQUEST SENSE. */
1804 scsi_disk_emulate_mode_select(r, r->iov.iov_base);
1805 break;
1806
1807 case UNMAP:
1808 scsi_disk_emulate_unmap(r, r->iov.iov_base);
1809 break;
1810
1811 case VERIFY_10:
1812 case VERIFY_12:
1813 case VERIFY_16:
1814 if (r->req.status == -1) {
1815 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD));
1816 }
1817 break;
1818
1819 case WRITE_SAME_10:
1820 case WRITE_SAME_16:
1821 scsi_disk_emulate_write_same(r, r->iov.iov_base);
1822 break;
1823
1824 default:
1825 abort();
1826 }
1827 }
1828
1829 static int32_t scsi_disk_emulate_command(SCSIRequest *req, uint8_t *buf)
1830 {
1831 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req);
1832 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev);
1833 uint64_t nb_sectors;
1834 uint8_t *outbuf;
1835 int buflen;
1836
1837 switch (req->cmd.buf[0]) {
1838 case INQUIRY:
1839 case MODE_SENSE:
1840 case MODE_SENSE_10:
1841 case RESERVE:
1842 case RESERVE_10:
1843 case RELEASE:
1844 case RELEASE_10:
1845 case START_STOP:
1846 case ALLOW_MEDIUM_REMOVAL:
1847 case GET_CONFIGURATION:
1848 case GET_EVENT_STATUS_NOTIFICATION:
1849 case MECHANISM_STATUS:
1850 case REQUEST_SENSE:
1851 break;
1852
1853 default:
1854 if (!blk_is_available(s->qdev.conf.blk)) {
1855 scsi_check_condition(r, SENSE_CODE(NO_MEDIUM));
1856 return 0;
1857 }
1858 break;
1859 }
1860
1861 /*
1862 * FIXME: we shouldn't return anything bigger than 4k, but the code
1863 * requires the buffer to be as big as req->cmd.xfer in several
1864 * places. So, do not allow CDBs with a very large ALLOCATION
1865 * LENGTH. The real fix would be to modify scsi_read_data and
1866 * dma_buf_read, so that they return data beyond the buflen
1867 * as all zeros.
1868 */
1869 if (req->cmd.xfer > 65536) {
1870 goto illegal_request;
1871 }
1872 r->buflen = MAX(4096, req->cmd.xfer);
1873
1874 if (!r->iov.iov_base) {
1875 r->iov.iov_base = blk_blockalign(s->qdev.conf.blk, r->buflen);
1876 }
1877
1878 buflen = req->cmd.xfer;
1879 outbuf = r->iov.iov_base;
1880 memset(outbuf, 0, r->buflen);
1881 switch (req->cmd.buf[0]) {
1882 case TEST_UNIT_READY:
1883 assert(blk_is_available(s->qdev.conf.blk));
1884 break;
1885 case INQUIRY:
1886 buflen = scsi_disk_emulate_inquiry(req, outbuf);
1887 if (buflen < 0) {
1888 goto illegal_request;
1889 }
1890 break;
1891 case MODE_SENSE:
1892 case MODE_SENSE_10:
1893 buflen = scsi_disk_emulate_mode_sense(r, outbuf);
1894 if (buflen < 0) {
1895 goto illegal_request;
1896 }
1897 break;
1898 case READ_TOC:
1899 buflen = scsi_disk_emulate_read_toc(req, outbuf);
1900 if (buflen < 0) {
1901 goto illegal_request;
1902 }
1903 break;
1904 case RESERVE:
1905 if (req->cmd.buf[1] & 1) {
1906 goto illegal_request;
1907 }
1908 break;
1909 case RESERVE_10:
1910 if (req->cmd.buf[1] & 3) {
1911 goto illegal_request;
1912 }
1913 break;
1914 case RELEASE:
1915 if (req->cmd.buf[1] & 1) {
1916 goto illegal_request;
1917 }
1918 break;
1919 case RELEASE_10:
1920 if (req->cmd.buf[1] & 3) {
1921 goto illegal_request;
1922 }
1923 break;
1924 case START_STOP:
1925 if (scsi_disk_emulate_start_stop(r) < 0) {
1926 return 0;
1927 }
1928 break;
1929 case ALLOW_MEDIUM_REMOVAL:
1930 s->tray_locked = req->cmd.buf[4] & 1;
1931 blk_lock_medium(s->qdev.conf.blk, req->cmd.buf[4] & 1);
1932 break;
1933 case READ_CAPACITY_10:
1934 /* The normal LEN field for this command is zero. */
1935 memset(outbuf, 0, 8);
1936 blk_get_geometry(s->qdev.conf.blk, &nb_sectors);
1937 if (!nb_sectors) {
1938 scsi_check_condition(r, SENSE_CODE(LUN_NOT_READY));
1939 return 0;
1940 }
1941 if ((req->cmd.buf[8] & 1) == 0 && req->cmd.lba) {
1942 goto illegal_request;
1943 }
1944 nb_sectors /= s->qdev.blocksize / 512;
1945 /* Returned value is the address of the last sector. */
1946 nb_sectors--;
1947 /* Remember the new size for read/write sanity checking. */
1948 s->qdev.max_lba = nb_sectors;
1949 /* Clip to 2TB, instead of returning capacity modulo 2TB. */
1950 if (nb_sectors > UINT32_MAX) {
1951 nb_sectors = UINT32_MAX;
1952 }
1953 outbuf[0] = (nb_sectors >> 24) & 0xff;
1954 outbuf[1] = (nb_sectors >> 16) & 0xff;
1955 outbuf[2] = (nb_sectors >> 8) & 0xff;
1956 outbuf[3] = nb_sectors & 0xff;
1957 outbuf[4] = 0;
1958 outbuf[5] = 0;
1959 outbuf[6] = s->qdev.blocksize >> 8;
1960 outbuf[7] = 0;
1961 break;
1962 case REQUEST_SENSE:
1963 /* Just return "NO SENSE". */
1964 buflen = scsi_build_sense(NULL, 0, outbuf, r->buflen,
1965 (req->cmd.buf[1] & 1) == 0);
1966 if (buflen < 0) {
1967 goto illegal_request;
1968 }
1969 break;
1970 case MECHANISM_STATUS:
1971 buflen = scsi_emulate_mechanism_status(s, outbuf);
1972 if (buflen < 0) {
1973 goto illegal_request;
1974 }
1975 break;
1976 case GET_CONFIGURATION:
1977 buflen = scsi_get_configuration(s, outbuf);
1978 if (buflen < 0) {
1979 goto illegal_request;
1980 }
1981 break;
1982 case GET_EVENT_STATUS_NOTIFICATION:
1983 buflen = scsi_get_event_status_notification(s, r, outbuf);
1984 if (buflen < 0) {
1985 goto illegal_request;
1986 }
1987 break;
1988 case READ_DISC_INFORMATION:
1989 buflen = scsi_read_disc_information(s, r, outbuf);
1990 if (buflen < 0) {
1991 goto illegal_request;
1992 }
1993 break;
1994 case READ_DVD_STRUCTURE:
1995 buflen = scsi_read_dvd_structure(s, r, outbuf);
1996 if (buflen < 0) {
1997 goto illegal_request;
1998 }
1999 break;
2000 case SERVICE_ACTION_IN_16:
2001 /* Service Action In subcommands. */
2002 if ((req->cmd.buf[1] & 31) == SAI_READ_CAPACITY_16) {
2003 DPRINTF("SAI READ CAPACITY(16)\n");
2004 memset(outbuf, 0, req->cmd.xfer);
2005 blk_get_geometry(s->qdev.conf.blk, &nb_sectors);
2006 if (!nb_sectors) {
2007 scsi_check_condition(r, SENSE_CODE(LUN_NOT_READY));
2008 return 0;
2009 }
2010 if ((req->cmd.buf[14] & 1) == 0 && req->cmd.lba) {
2011 goto illegal_request;
2012 }
2013 nb_sectors /= s->qdev.blocksize / 512;
2014 /* Returned value is the address of the last sector. */
2015 nb_sectors--;
2016 /* Remember the new size for read/write sanity checking. */
2017 s->qdev.max_lba = nb_sectors;
2018 outbuf[0] = (nb_sectors >> 56) & 0xff;
2019 outbuf[1] = (nb_sectors >> 48) & 0xff;
2020 outbuf[2] = (nb_sectors >> 40) & 0xff;
2021 outbuf[3] = (nb_sectors >> 32) & 0xff;
2022 outbuf[4] = (nb_sectors >> 24) & 0xff;
2023 outbuf[5] = (nb_sectors >> 16) & 0xff;
2024 outbuf[6] = (nb_sectors >> 8) & 0xff;
2025 outbuf[7] = nb_sectors & 0xff;
2026 outbuf[8] = 0;
2027 outbuf[9] = 0;
2028 outbuf[10] = s->qdev.blocksize >> 8;
2029 outbuf[11] = 0;
2030 outbuf[12] = 0;
2031 outbuf[13] = get_physical_block_exp(&s->qdev.conf);
2032
2033 /* set TPE bit if the format supports discard */
2034 if (s->qdev.conf.discard_granularity) {
2035 outbuf[14] = 0x80;
2036 }
2037
2038 /* Protection, exponent and lowest lba field left blank. */
2039 break;
2040 }
2041 DPRINTF("Unsupported Service Action In\n");
2042 goto illegal_request;
2043 case SYNCHRONIZE_CACHE:
2044 /* The request is used as the AIO opaque value, so add a ref. */
2045 scsi_req_ref(&r->req);
2046 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 0,
2047 BLOCK_ACCT_FLUSH);
2048 r->req.aiocb = blk_aio_flush(s->qdev.conf.blk, scsi_aio_complete, r);
2049 return 0;
2050 case SEEK_10:
2051 DPRINTF("Seek(10) (sector %" PRId64 ")\n", r->req.cmd.lba);
2052 if (r->req.cmd.lba > s->qdev.max_lba) {
2053 goto illegal_lba;
2054 }
2055 break;
2056 case MODE_SELECT:
2057 DPRINTF("Mode Select(6) (len %lu)\n", (unsigned long)r->req.cmd.xfer);
2058 break;
2059 case MODE_SELECT_10:
2060 DPRINTF("Mode Select(10) (len %lu)\n", (unsigned long)r->req.cmd.xfer);
2061 break;
2062 case UNMAP:
2063 DPRINTF("Unmap (len %lu)\n", (unsigned long)r->req.cmd.xfer);
2064 break;
2065 case VERIFY_10:
2066 case VERIFY_12:
2067 case VERIFY_16:
2068 DPRINTF("Verify (bytchk %d)\n", (req->cmd.buf[1] >> 1) & 3);
2069 if (req->cmd.buf[1] & 6) {
2070 goto illegal_request;
2071 }
2072 break;
2073 case WRITE_SAME_10:
2074 case WRITE_SAME_16:
2075 DPRINTF("WRITE SAME %d (len %lu)\n",
2076 req->cmd.buf[0] == WRITE_SAME_10 ? 10 : 16,
2077 (unsigned long)r->req.cmd.xfer);
2078 break;
2079 default:
2080 DPRINTF("Unknown SCSI command (%2.2x=%s)\n", buf[0],
2081 scsi_command_name(buf[0]));
2082 scsi_check_condition(r, SENSE_CODE(INVALID_OPCODE));
2083 return 0;
2084 }
2085 assert(!r->req.aiocb);
2086 r->iov.iov_len = MIN(r->buflen, req->cmd.xfer);
2087 if (r->iov.iov_len == 0) {
2088 scsi_req_complete(&r->req, GOOD);
2089 }
2090 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) {
2091 assert(r->iov.iov_len == req->cmd.xfer);
2092 return -r->iov.iov_len;
2093 } else {
2094 return r->iov.iov_len;
2095 }
2096
2097 illegal_request:
2098 if (r->req.status == -1) {
2099 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD));
2100 }
2101 return 0;
2102
2103 illegal_lba:
2104 scsi_check_condition(r, SENSE_CODE(LBA_OUT_OF_RANGE));
2105 return 0;
2106 }
2107
2108 /* Execute a scsi command. Returns the length of the data expected by the
2109 command. This will be Positive for data transfers from the device
2110 (eg. disk reads), negative for transfers to the device (eg. disk writes),
2111 and zero if the command does not transfer any data. */
2112
2113 static int32_t scsi_disk_dma_command(SCSIRequest *req, uint8_t *buf)
2114 {
2115 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req);
2116 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev);
2117 SCSIDiskClass *sdc = (SCSIDiskClass *) object_get_class(OBJECT(s));
2118 uint32_t len;
2119 uint8_t command;
2120
2121 command = buf[0];
2122
2123 if (!blk_is_available(s->qdev.conf.blk)) {
2124 scsi_check_condition(r, SENSE_CODE(NO_MEDIUM));
2125 return 0;
2126 }
2127
2128 len = scsi_data_cdb_xfer(r->req.cmd.buf);
2129 switch (command) {
2130 case READ_6:
2131 case READ_10:
2132 case READ_12:
2133 case READ_16:
2134 DPRINTF("Read (sector %" PRId64 ", count %u)\n", r->req.cmd.lba, len);
2135 if (r->req.cmd.buf[1] & 0xe0) {
2136 goto illegal_request;
2137 }
2138 if (!check_lba_range(s, r->req.cmd.lba, len)) {
2139 goto illegal_lba;
2140 }
2141 r->sector = r->req.cmd.lba * (s->qdev.blocksize / 512);
2142 r->sector_count = len * (s->qdev.blocksize / 512);
2143 break;
2144 case WRITE_6:
2145 case WRITE_10:
2146 case WRITE_12:
2147 case WRITE_16:
2148 case WRITE_VERIFY_10:
2149 case WRITE_VERIFY_12:
2150 case WRITE_VERIFY_16:
2151 if (blk_is_read_only(s->qdev.conf.blk)) {
2152 scsi_check_condition(r, SENSE_CODE(WRITE_PROTECTED));
2153 return 0;
2154 }
2155 DPRINTF("Write %s(sector %" PRId64 ", count %u)\n",
2156 (command & 0xe) == 0xe ? "And Verify " : "",
2157 r->req.cmd.lba, len);
2158 if (r->req.cmd.buf[1] & 0xe0) {
2159 goto illegal_request;
2160 }
2161 if (!check_lba_range(s, r->req.cmd.lba, len)) {
2162 goto illegal_lba;
2163 }
2164 r->sector = r->req.cmd.lba * (s->qdev.blocksize / 512);
2165 r->sector_count = len * (s->qdev.blocksize / 512);
2166 break;
2167 default:
2168 abort();
2169 illegal_request:
2170 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD));
2171 return 0;
2172 illegal_lba:
2173 scsi_check_condition(r, SENSE_CODE(LBA_OUT_OF_RANGE));
2174 return 0;
2175 }
2176 r->need_fua_emulation = sdc->need_fua_emulation(&r->req.cmd);
2177 if (r->sector_count == 0) {
2178 scsi_req_complete(&r->req, GOOD);
2179 }
2180 assert(r->iov.iov_len == 0);
2181 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) {
2182 return -r->sector_count * 512;
2183 } else {
2184 return r->sector_count * 512;
2185 }
2186 }
2187
2188 static void scsi_disk_reset(DeviceState *dev)
2189 {
2190 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev.qdev, dev);
2191 uint64_t nb_sectors;
2192
2193 scsi_device_purge_requests(&s->qdev, SENSE_CODE(RESET));
2194
2195 blk_get_geometry(s->qdev.conf.blk, &nb_sectors);
2196 nb_sectors /= s->qdev.blocksize / 512;
2197 if (nb_sectors) {
2198 nb_sectors--;
2199 }
2200 s->qdev.max_lba = nb_sectors;
2201 /* reset tray statuses */
2202 s->tray_locked = 0;
2203 s->tray_open = 0;
2204 }
2205
2206 static void scsi_disk_resize_cb(void *opaque)
2207 {
2208 SCSIDiskState *s = opaque;
2209
2210 /* SPC lists this sense code as available only for
2211 * direct-access devices.
2212 */
2213 if (s->qdev.type == TYPE_DISK) {
2214 scsi_device_report_change(&s->qdev, SENSE_CODE(CAPACITY_CHANGED));
2215 }
2216 }
2217
2218 static void scsi_cd_change_media_cb(void *opaque, bool load)
2219 {
2220 SCSIDiskState *s = opaque;
2221
2222 /*
2223 * When a CD gets changed, we have to report an ejected state and
2224 * then a loaded state to guests so that they detect tray
2225 * open/close and media change events. Guests that do not use
2226 * GET_EVENT_STATUS_NOTIFICATION to detect such tray open/close
2227 * states rely on this behavior.
2228 *
2229 * media_changed governs the state machine used for unit attention
2230 * report. media_event is used by GET EVENT STATUS NOTIFICATION.
2231 */
2232 s->media_changed = load;
2233 s->tray_open = !load;
2234 scsi_device_set_ua(&s->qdev, SENSE_CODE(UNIT_ATTENTION_NO_MEDIUM));
2235 s->media_event = true;
2236 s->eject_request = false;
2237 }
2238
2239 static void scsi_cd_eject_request_cb(void *opaque, bool force)
2240 {
2241 SCSIDiskState *s = opaque;
2242
2243 s->eject_request = true;
2244 if (force) {
2245 s->tray_locked = false;
2246 }
2247 }
2248
2249 static bool scsi_cd_is_tray_open(void *opaque)
2250 {
2251 return ((SCSIDiskState *)opaque)->tray_open;
2252 }
2253
2254 static bool scsi_cd_is_medium_locked(void *opaque)
2255 {
2256 return ((SCSIDiskState *)opaque)->tray_locked;
2257 }
2258
2259 static const BlockDevOps scsi_disk_removable_block_ops = {
2260 .change_media_cb = scsi_cd_change_media_cb,
2261 .eject_request_cb = scsi_cd_eject_request_cb,
2262 .is_tray_open = scsi_cd_is_tray_open,
2263 .is_medium_locked = scsi_cd_is_medium_locked,
2264
2265 .resize_cb = scsi_disk_resize_cb,
2266 };
2267
2268 static const BlockDevOps scsi_disk_block_ops = {
2269 .resize_cb = scsi_disk_resize_cb,
2270 };
2271
2272 static void scsi_disk_unit_attention_reported(SCSIDevice *dev)
2273 {
2274 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev);
2275 if (s->media_changed) {
2276 s->media_changed = false;
2277 scsi_device_set_ua(&s->qdev, SENSE_CODE(MEDIUM_CHANGED));
2278 }
2279 }
2280
2281 static void scsi_realize(SCSIDevice *dev, Error **errp)
2282 {
2283 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev);
2284 Error *err = NULL;
2285
2286 if (!s->qdev.conf.blk) {
2287 error_setg(errp, "drive property not set");
2288 return;
2289 }
2290
2291 if (!(s->features & (1 << SCSI_DISK_F_REMOVABLE)) &&
2292 !blk_is_inserted(s->qdev.conf.blk)) {
2293 error_setg(errp, "Device needs media, but drive is empty");
2294 return;
2295 }
2296
2297 blkconf_serial(&s->qdev.conf, &s->serial);
2298 blkconf_blocksizes(&s->qdev.conf);
2299 if (dev->type == TYPE_DISK) {
2300 blkconf_geometry(&dev->conf, NULL, 65535, 255, 255, &err);
2301 if (err) {
2302 error_propagate(errp, err);
2303 return;
2304 }
2305 }
2306 blkconf_apply_backend_options(&dev->conf);
2307
2308 if (s->qdev.conf.discard_granularity == -1) {
2309 s->qdev.conf.discard_granularity =
2310 MAX(s->qdev.conf.logical_block_size, DEFAULT_DISCARD_GRANULARITY);
2311 }
2312
2313 if (!s->version) {
2314 s->version = g_strdup(qemu_hw_version());
2315 }
2316 if (!s->vendor) {
2317 s->vendor = g_strdup("QEMU");
2318 }
2319
2320 if (blk_is_sg(s->qdev.conf.blk)) {
2321 error_setg(errp, "unwanted /dev/sg*");
2322 return;
2323 }
2324
2325 if ((s->features & (1 << SCSI_DISK_F_REMOVABLE)) &&
2326 !(s->features & (1 << SCSI_DISK_F_NO_REMOVABLE_DEVOPS))) {
2327 blk_set_dev_ops(s->qdev.conf.blk, &scsi_disk_removable_block_ops, s);
2328 } else {
2329 blk_set_dev_ops(s->qdev.conf.blk, &scsi_disk_block_ops, s);
2330 }
2331 blk_set_guest_block_size(s->qdev.conf.blk, s->qdev.blocksize);
2332
2333 blk_iostatus_enable(s->qdev.conf.blk);
2334 }
2335
2336 static void scsi_hd_realize(SCSIDevice *dev, Error **errp)
2337 {
2338 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev);
2339 /* can happen for devices without drive. The error message for missing
2340 * backend will be issued in scsi_realize
2341 */
2342 if (s->qdev.conf.blk) {
2343 blkconf_blocksizes(&s->qdev.conf);
2344 }
2345 s->qdev.blocksize = s->qdev.conf.logical_block_size;
2346 s->qdev.type = TYPE_DISK;
2347 if (!s->product) {
2348 s->product = g_strdup("QEMU HARDDISK");
2349 }
2350 scsi_realize(&s->qdev, errp);
2351 }
2352
2353 static void scsi_cd_realize(SCSIDevice *dev, Error **errp)
2354 {
2355 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev);
2356
2357 if (!dev->conf.blk) {
2358 dev->conf.blk = blk_new();
2359 }
2360
2361 s->qdev.blocksize = 2048;
2362 s->qdev.type = TYPE_ROM;
2363 s->features |= 1 << SCSI_DISK_F_REMOVABLE;
2364 if (!s->product) {
2365 s->product = g_strdup("QEMU CD-ROM");
2366 }
2367 scsi_realize(&s->qdev, errp);
2368 }
2369
2370 static void scsi_disk_realize(SCSIDevice *dev, Error **errp)
2371 {
2372 DriveInfo *dinfo;
2373 Error *local_err = NULL;
2374
2375 if (!dev->conf.blk) {
2376 scsi_realize(dev, &local_err);
2377 assert(local_err);
2378 error_propagate(errp, local_err);
2379 return;
2380 }
2381
2382 dinfo = blk_legacy_dinfo(dev->conf.blk);
2383 if (dinfo && dinfo->media_cd) {
2384 scsi_cd_realize(dev, errp);
2385 } else {
2386 scsi_hd_realize(dev, errp);
2387 }
2388 }
2389
2390 static const SCSIReqOps scsi_disk_emulate_reqops = {
2391 .size = sizeof(SCSIDiskReq),
2392 .free_req = scsi_free_request,
2393 .send_command = scsi_disk_emulate_command,
2394 .read_data = scsi_disk_emulate_read_data,
2395 .write_data = scsi_disk_emulate_write_data,
2396 .get_buf = scsi_get_buf,
2397 };
2398
2399 static const SCSIReqOps scsi_disk_dma_reqops = {
2400 .size = sizeof(SCSIDiskReq),
2401 .free_req = scsi_free_request,
2402 .send_command = scsi_disk_dma_command,
2403 .read_data = scsi_read_data,
2404 .write_data = scsi_write_data,
2405 .get_buf = scsi_get_buf,
2406 .load_request = scsi_disk_load_request,
2407 .save_request = scsi_disk_save_request,
2408 };
2409
2410 static const SCSIReqOps *const scsi_disk_reqops_dispatch[256] = {
2411 [TEST_UNIT_READY] = &scsi_disk_emulate_reqops,
2412 [INQUIRY] = &scsi_disk_emulate_reqops,
2413 [MODE_SENSE] = &scsi_disk_emulate_reqops,
2414 [MODE_SENSE_10] = &scsi_disk_emulate_reqops,
2415 [START_STOP] = &scsi_disk_emulate_reqops,
2416 [ALLOW_MEDIUM_REMOVAL] = &scsi_disk_emulate_reqops,
2417 [READ_CAPACITY_10] = &scsi_disk_emulate_reqops,
2418 [READ_TOC] = &scsi_disk_emulate_reqops,
2419 [READ_DVD_STRUCTURE] = &scsi_disk_emulate_reqops,
2420 [READ_DISC_INFORMATION] = &scsi_disk_emulate_reqops,
2421 [GET_CONFIGURATION] = &scsi_disk_emulate_reqops,
2422 [GET_EVENT_STATUS_NOTIFICATION] = &scsi_disk_emulate_reqops,
2423 [MECHANISM_STATUS] = &scsi_disk_emulate_reqops,
2424 [SERVICE_ACTION_IN_16] = &scsi_disk_emulate_reqops,
2425 [REQUEST_SENSE] = &scsi_disk_emulate_reqops,
2426 [SYNCHRONIZE_CACHE] = &scsi_disk_emulate_reqops,
2427 [SEEK_10] = &scsi_disk_emulate_reqops,
2428 [MODE_SELECT] = &scsi_disk_emulate_reqops,
2429 [MODE_SELECT_10] = &scsi_disk_emulate_reqops,
2430 [UNMAP] = &scsi_disk_emulate_reqops,
2431 [WRITE_SAME_10] = &scsi_disk_emulate_reqops,
2432 [WRITE_SAME_16] = &scsi_disk_emulate_reqops,
2433 [VERIFY_10] = &scsi_disk_emulate_reqops,
2434 [VERIFY_12] = &scsi_disk_emulate_reqops,
2435 [VERIFY_16] = &scsi_disk_emulate_reqops,
2436
2437 [READ_6] = &scsi_disk_dma_reqops,
2438 [READ_10] = &scsi_disk_dma_reqops,
2439 [READ_12] = &scsi_disk_dma_reqops,
2440 [READ_16] = &scsi_disk_dma_reqops,
2441 [WRITE_6] = &scsi_disk_dma_reqops,
2442 [WRITE_10] = &scsi_disk_dma_reqops,
2443 [WRITE_12] = &scsi_disk_dma_reqops,
2444 [WRITE_16] = &scsi_disk_dma_reqops,
2445 [WRITE_VERIFY_10] = &scsi_disk_dma_reqops,
2446 [WRITE_VERIFY_12] = &scsi_disk_dma_reqops,
2447 [WRITE_VERIFY_16] = &scsi_disk_dma_reqops,
2448 };
2449
2450 static SCSIRequest *scsi_new_request(SCSIDevice *d, uint32_t tag, uint32_t lun,
2451 uint8_t *buf, void *hba_private)
2452 {
2453 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, d);
2454 SCSIRequest *req;
2455 const SCSIReqOps *ops;
2456 uint8_t command;
2457
2458 command = buf[0];
2459 ops = scsi_disk_reqops_dispatch[command];
2460 if (!ops) {
2461 ops = &scsi_disk_emulate_reqops;
2462 }
2463 req = scsi_req_alloc(ops, &s->qdev, tag, lun, hba_private);
2464
2465 #ifdef DEBUG_SCSI
2466 DPRINTF("Command: lun=%d tag=0x%x data=0x%02x", lun, tag, buf[0]);
2467 {
2468 int i;
2469 for (i = 1; i < scsi_cdb_length(buf); i++) {
2470 printf(" 0x%02x", buf[i]);
2471 }
2472 printf("\n");
2473 }
2474 #endif
2475
2476 return req;
2477 }
2478
2479 #ifdef __linux__
2480 static int get_device_type(SCSIDiskState *s)
2481 {
2482 uint8_t cmd[16];
2483 uint8_t buf[36];
2484 uint8_t sensebuf[8];
2485 sg_io_hdr_t io_header;
2486 int ret;
2487
2488 memset(cmd, 0, sizeof(cmd));
2489 memset(buf, 0, sizeof(buf));
2490 cmd[0] = INQUIRY;
2491 cmd[4] = sizeof(buf);
2492
2493 memset(&io_header, 0, sizeof(io_header));
2494 io_header.interface_id = 'S';
2495 io_header.dxfer_direction = SG_DXFER_FROM_DEV;
2496 io_header.dxfer_len = sizeof(buf);
2497 io_header.dxferp = buf;
2498 io_header.cmdp = cmd;
2499 io_header.cmd_len = sizeof(cmd);
2500 io_header.mx_sb_len = sizeof(sensebuf);
2501 io_header.sbp = sensebuf;
2502 io_header.timeout = 6000; /* XXX */
2503
2504 ret = blk_ioctl(s->qdev.conf.blk, SG_IO, &io_header);
2505 if (ret < 0 || io_header.driver_status || io_header.host_status) {
2506 return -1;
2507 }
2508 s->qdev.type = buf[0];
2509 if (buf[1] & 0x80) {
2510 s->features |= 1 << SCSI_DISK_F_REMOVABLE;
2511 }
2512 return 0;
2513 }
2514
2515 static void scsi_block_realize(SCSIDevice *dev, Error **errp)
2516 {
2517 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev);
2518 int sg_version;
2519 int rc;
2520
2521 if (!s->qdev.conf.blk) {
2522 error_setg(errp, "drive property not set");
2523 return;
2524 }
2525
2526 /* check we are using a driver managing SG_IO (version 3 and after) */
2527 rc = blk_ioctl(s->qdev.conf.blk, SG_GET_VERSION_NUM, &sg_version);
2528 if (rc < 0) {
2529 error_setg(errp, "cannot get SG_IO version number: %s. "
2530 "Is this a SCSI device?",
2531 strerror(-rc));
2532 return;
2533 }
2534 if (sg_version < 30000) {
2535 error_setg(errp, "scsi generic interface too old");
2536 return;
2537 }
2538
2539 /* get device type from INQUIRY data */
2540 rc = get_device_type(s);
2541 if (rc < 0) {
2542 error_setg(errp, "INQUIRY failed");
2543 return;
2544 }
2545
2546 /* Make a guess for the block size, we'll fix it when the guest sends.
2547 * READ CAPACITY. If they don't, they likely would assume these sizes
2548 * anyway. (TODO: check in /sys).
2549 */
2550 if (s->qdev.type == TYPE_ROM || s->qdev.type == TYPE_WORM) {
2551 s->qdev.blocksize = 2048;
2552 } else {
2553 s->qdev.blocksize = 512;
2554 }
2555
2556 /* Makes the scsi-block device not removable by using HMP and QMP eject
2557 * command.
2558 */
2559 s->features |= (1 << SCSI_DISK_F_NO_REMOVABLE_DEVOPS);
2560
2561 scsi_realize(&s->qdev, errp);
2562 scsi_generic_read_device_identification(&s->qdev);
2563 }
2564
2565 typedef struct SCSIBlockReq {
2566 SCSIDiskReq req;
2567 sg_io_hdr_t io_header;
2568
2569 /* Selected bytes of the original CDB, copied into our own CDB. */
2570 uint8_t cmd, cdb1, group_number;
2571
2572 /* CDB passed to SG_IO. */
2573 uint8_t cdb[16];
2574 } SCSIBlockReq;
2575
2576 static BlockAIOCB *scsi_block_do_sgio(SCSIBlockReq *req,
2577 int64_t offset, QEMUIOVector *iov,
2578 int direction,
2579 BlockCompletionFunc *cb, void *opaque)
2580 {
2581 sg_io_hdr_t *io_header = &req->io_header;
2582 SCSIDiskReq *r = &req->req;
2583 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
2584 int nb_logical_blocks;
2585 uint64_t lba;
2586 BlockAIOCB *aiocb;
2587
2588 /* This is not supported yet. It can only happen if the guest does
2589 * reads and writes that are not aligned to one logical sectors
2590 * _and_ cover multiple MemoryRegions.
2591 */
2592 assert(offset % s->qdev.blocksize == 0);
2593 assert(iov->size % s->qdev.blocksize == 0);
2594
2595 io_header->interface_id = 'S';
2596
2597 /* The data transfer comes from the QEMUIOVector. */
2598 io_header->dxfer_direction = direction;
2599 io_header->dxfer_len = iov->size;
2600 io_header->dxferp = (void *)iov->iov;
2601 io_header->iovec_count = iov->niov;
2602 assert(io_header->iovec_count == iov->niov); /* no overflow! */
2603
2604 /* Build a new CDB with the LBA and length patched in, in case
2605 * DMA helpers split the transfer in multiple segments. Do not
2606 * build a CDB smaller than what the guest wanted, and only build
2607 * a larger one if strictly necessary.
2608 */
2609 io_header->cmdp = req->cdb;
2610 lba = offset / s->qdev.blocksize;
2611 nb_logical_blocks = io_header->dxfer_len / s->qdev.blocksize;
2612
2613 if ((req->cmd >> 5) == 0 && lba <= 0x1ffff) {
2614 /* 6-byte CDB */
2615 stl_be_p(&req->cdb[0], lba | (req->cmd << 24));
2616 req->cdb[4] = nb_logical_blocks;
2617 req->cdb[5] = 0;
2618 io_header->cmd_len = 6;
2619 } else if ((req->cmd >> 5) <= 1 && lba <= 0xffffffffULL) {
2620 /* 10-byte CDB */
2621 req->cdb[0] = (req->cmd & 0x1f) | 0x20;
2622 req->cdb[1] = req->cdb1;
2623 stl_be_p(&req->cdb[2], lba);
2624 req->cdb[6] = req->group_number;
2625 stw_be_p(&req->cdb[7], nb_logical_blocks);
2626 req->cdb[9] = 0;
2627 io_header->cmd_len = 10;
2628 } else if ((req->cmd >> 5) != 4 && lba <= 0xffffffffULL) {
2629 /* 12-byte CDB */
2630 req->cdb[0] = (req->cmd & 0x1f) | 0xA0;
2631 req->cdb[1] = req->cdb1;
2632 stl_be_p(&req->cdb[2], lba);
2633 stl_be_p(&req->cdb[6], nb_logical_blocks);
2634 req->cdb[10] = req->group_number;
2635 req->cdb[11] = 0;
2636 io_header->cmd_len = 12;
2637 } else {
2638 /* 16-byte CDB */
2639 req->cdb[0] = (req->cmd & 0x1f) | 0x80;
2640 req->cdb[1] = req->cdb1;
2641 stq_be_p(&req->cdb[2], lba);
2642 stl_be_p(&req->cdb[10], nb_logical_blocks);
2643 req->cdb[14] = req->group_number;
2644 req->cdb[15] = 0;
2645 io_header->cmd_len = 16;
2646 }
2647
2648 /* The rest is as in scsi-generic.c. */
2649 io_header->mx_sb_len = sizeof(r->req.sense);
2650 io_header->sbp = r->req.sense;
2651 io_header->timeout = UINT_MAX;
2652 io_header->usr_ptr = r;
2653 io_header->flags |= SG_FLAG_DIRECT_IO;
2654
2655 aiocb = blk_aio_ioctl(s->qdev.conf.blk, SG_IO, io_header, cb, opaque);
2656 assert(aiocb != NULL);
2657 return aiocb;
2658 }
2659
2660 static bool scsi_block_no_fua(SCSICommand *cmd)
2661 {
2662 return false;
2663 }
2664
2665 static BlockAIOCB *scsi_block_dma_readv(int64_t offset,
2666 QEMUIOVector *iov,
2667 BlockCompletionFunc *cb, void *cb_opaque,
2668 void *opaque)
2669 {
2670 SCSIBlockReq *r = opaque;
2671 return scsi_block_do_sgio(r, offset, iov,
2672 SG_DXFER_FROM_DEV, cb, cb_opaque);
2673 }
2674
2675 static BlockAIOCB *scsi_block_dma_writev(int64_t offset,
2676 QEMUIOVector *iov,
2677 BlockCompletionFunc *cb, void *cb_opaque,
2678 void *opaque)
2679 {
2680 SCSIBlockReq *r = opaque;
2681 return scsi_block_do_sgio(r, offset, iov,
2682 SG_DXFER_TO_DEV, cb, cb_opaque);
2683 }
2684
2685 static bool scsi_block_is_passthrough(SCSIDiskState *s, uint8_t *buf)
2686 {
2687 switch (buf[0]) {
2688 case VERIFY_10:
2689 case VERIFY_12:
2690 case VERIFY_16:
2691 /* Check if BYTCHK == 0x01 (data-out buffer contains data
2692 * for the number of logical blocks specified in the length
2693 * field). For other modes, do not use scatter/gather operation.
2694 */
2695 if ((buf[1] & 6) != 2) {
2696 return false;
2697 }
2698 break;
2699
2700 case READ_6:
2701 case READ_10:
2702 case READ_12:
2703 case READ_16:
2704 case WRITE_6:
2705 case WRITE_10:
2706 case WRITE_12:
2707 case WRITE_16:
2708 case WRITE_VERIFY_10:
2709 case WRITE_VERIFY_12:
2710 case WRITE_VERIFY_16:
2711 /* MMC writing cannot be done via DMA helpers, because it sometimes
2712 * involves writing beyond the maximum LBA or to negative LBA (lead-in).
2713 * We might use scsi_disk_dma_reqops as long as no writing commands are
2714 * seen, but performance usually isn't paramount on optical media. So,
2715 * just make scsi-block operate the same as scsi-generic for them.
2716 */
2717 if (s->qdev.type != TYPE_ROM) {
2718 return false;
2719 }
2720 break;
2721
2722 default:
2723 break;
2724 }
2725
2726 return true;
2727 }
2728
2729
2730 static int32_t scsi_block_dma_command(SCSIRequest *req, uint8_t *buf)
2731 {
2732 SCSIBlockReq *r = (SCSIBlockReq *)req;
2733 r->cmd = req->cmd.buf[0];
2734 switch (r->cmd >> 5) {
2735 case 0:
2736 /* 6-byte CDB. */
2737 r->cdb1 = r->group_number = 0;
2738 break;
2739 case 1:
2740 /* 10-byte CDB. */
2741 r->cdb1 = req->cmd.buf[1];
2742 r->group_number = req->cmd.buf[6];
2743 break;
2744 case 4:
2745 /* 12-byte CDB. */
2746 r->cdb1 = req->cmd.buf[1];
2747 r->group_number = req->cmd.buf[10];
2748 break;
2749 case 5:
2750 /* 16-byte CDB. */
2751 r->cdb1 = req->cmd.buf[1];
2752 r->group_number = req->cmd.buf[14];
2753 break;
2754 default:
2755 abort();
2756 }
2757
2758 if (r->cdb1 & 0xe0) {
2759 /* Protection information is not supported. */
2760 scsi_check_condition(&r->req, SENSE_CODE(INVALID_FIELD));
2761 return 0;
2762 }
2763
2764 r->req.status = &r->io_header.status;
2765 return scsi_disk_dma_command(req, buf);
2766 }
2767
2768 static const SCSIReqOps scsi_block_dma_reqops = {
2769 .size = sizeof(SCSIBlockReq),
2770 .free_req = scsi_free_request,
2771 .send_command = scsi_block_dma_command,
2772 .read_data = scsi_read_data,
2773 .write_data = scsi_write_data,
2774 .get_buf = scsi_get_buf,
2775 .load_request = scsi_disk_load_request,
2776 .save_request = scsi_disk_save_request,
2777 };
2778
2779 static SCSIRequest *scsi_block_new_request(SCSIDevice *d, uint32_t tag,
2780 uint32_t lun, uint8_t *buf,
2781 void *hba_private)
2782 {
2783 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, d);
2784
2785 if (scsi_block_is_passthrough(s, buf)) {
2786 return scsi_req_alloc(&scsi_generic_req_ops, &s->qdev, tag, lun,
2787 hba_private);
2788 } else {
2789 return scsi_req_alloc(&scsi_block_dma_reqops, &s->qdev, tag, lun,
2790 hba_private);
2791 }
2792 }
2793
2794 static int scsi_block_parse_cdb(SCSIDevice *d, SCSICommand *cmd,
2795 uint8_t *buf, void *hba_private)
2796 {
2797 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, d);
2798
2799 if (scsi_block_is_passthrough(s, buf)) {
2800 return scsi_bus_parse_cdb(&s->qdev, cmd, buf, hba_private);
2801 } else {
2802 return scsi_req_parse_cdb(&s->qdev, cmd, buf);
2803 }
2804 }
2805
2806 #endif
2807
2808 static
2809 BlockAIOCB *scsi_dma_readv(int64_t offset, QEMUIOVector *iov,
2810 BlockCompletionFunc *cb, void *cb_opaque,
2811 void *opaque)
2812 {
2813 SCSIDiskReq *r = opaque;
2814 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
2815 return blk_aio_preadv(s->qdev.conf.blk, offset, iov, 0, cb, cb_opaque);
2816 }
2817
2818 static
2819 BlockAIOCB *scsi_dma_writev(int64_t offset, QEMUIOVector *iov,
2820 BlockCompletionFunc *cb, void *cb_opaque,
2821 void *opaque)
2822 {
2823 SCSIDiskReq *r = opaque;
2824 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
2825 return blk_aio_pwritev(s->qdev.conf.blk, offset, iov, 0, cb, cb_opaque);
2826 }
2827
2828 static void scsi_disk_base_class_initfn(ObjectClass *klass, void *data)
2829 {
2830 DeviceClass *dc = DEVICE_CLASS(klass);
2831 SCSIDiskClass *sdc = SCSI_DISK_BASE_CLASS(klass);
2832
2833 dc->fw_name = "disk";
2834 dc->reset = scsi_disk_reset;
2835 sdc->dma_readv = scsi_dma_readv;
2836 sdc->dma_writev = scsi_dma_writev;
2837 sdc->need_fua_emulation = scsi_is_cmd_fua;
2838 }
2839
2840 static const TypeInfo scsi_disk_base_info = {
2841 .name = TYPE_SCSI_DISK_BASE,
2842 .parent = TYPE_SCSI_DEVICE,
2843 .class_init = scsi_disk_base_class_initfn,
2844 .instance_size = sizeof(SCSIDiskState),
2845 .class_size = sizeof(SCSIDiskClass),
2846 .abstract = true,
2847 };
2848
2849 #define DEFINE_SCSI_DISK_PROPERTIES() \
2850 DEFINE_BLOCK_PROPERTIES(SCSIDiskState, qdev.conf), \
2851 DEFINE_BLOCK_ERROR_PROPERTIES(SCSIDiskState, qdev.conf), \
2852 DEFINE_PROP_STRING("ver", SCSIDiskState, version), \
2853 DEFINE_PROP_STRING("serial", SCSIDiskState, serial), \
2854 DEFINE_PROP_STRING("vendor", SCSIDiskState, vendor), \
2855 DEFINE_PROP_STRING("product", SCSIDiskState, product)
2856
2857 static Property scsi_hd_properties[] = {
2858 DEFINE_SCSI_DISK_PROPERTIES(),
2859 DEFINE_PROP_BIT("removable", SCSIDiskState, features,
2860 SCSI_DISK_F_REMOVABLE, false),
2861 DEFINE_PROP_BIT("dpofua", SCSIDiskState, features,
2862 SCSI_DISK_F_DPOFUA, false),
2863 DEFINE_PROP_UINT64("wwn", SCSIDiskState, qdev.wwn, 0),
2864 DEFINE_PROP_UINT64("port_wwn", SCSIDiskState, qdev.port_wwn, 0),
2865 DEFINE_PROP_UINT16("port_index", SCSIDiskState, port_index, 0),
2866 DEFINE_PROP_UINT64("max_unmap_size", SCSIDiskState, max_unmap_size,
2867 DEFAULT_MAX_UNMAP_SIZE),
2868 DEFINE_PROP_UINT64("max_io_size", SCSIDiskState, max_io_size,
2869 DEFAULT_MAX_IO_SIZE),
2870 DEFINE_BLOCK_CHS_PROPERTIES(SCSIDiskState, qdev.conf),
2871 DEFINE_PROP_END_OF_LIST(),
2872 };
2873
2874 static const VMStateDescription vmstate_scsi_disk_state = {
2875 .name = "scsi-disk",
2876 .version_id = 1,
2877 .minimum_version_id = 1,
2878 .fields = (VMStateField[]) {
2879 VMSTATE_SCSI_DEVICE(qdev, SCSIDiskState),
2880 VMSTATE_BOOL(media_changed, SCSIDiskState),
2881 VMSTATE_BOOL(media_event, SCSIDiskState),
2882 VMSTATE_BOOL(eject_request, SCSIDiskState),
2883 VMSTATE_BOOL(tray_open, SCSIDiskState),
2884 VMSTATE_BOOL(tray_locked, SCSIDiskState),
2885 VMSTATE_END_OF_LIST()
2886 }
2887 };
2888
2889 static void scsi_hd_class_initfn(ObjectClass *klass, void *data)
2890 {
2891 DeviceClass *dc = DEVICE_CLASS(klass);
2892 SCSIDeviceClass *sc = SCSI_DEVICE_CLASS(klass);
2893
2894 sc->realize = scsi_hd_realize;
2895 sc->alloc_req = scsi_new_request;
2896 sc->unit_attention_reported = scsi_disk_unit_attention_reported;
2897 dc->desc = "virtual SCSI disk";
2898 dc->props = scsi_hd_properties;
2899 dc->vmsd = &vmstate_scsi_disk_state;
2900 }
2901
2902 static const TypeInfo scsi_hd_info = {
2903 .name = "scsi-hd",
2904 .parent = TYPE_SCSI_DISK_BASE,
2905 .class_init = scsi_hd_class_initfn,
2906 };
2907
2908 static Property scsi_cd_properties[] = {
2909 DEFINE_SCSI_DISK_PROPERTIES(),
2910 DEFINE_PROP_UINT64("wwn", SCSIDiskState, qdev.wwn, 0),
2911 DEFINE_PROP_UINT64("port_wwn", SCSIDiskState, qdev.port_wwn, 0),
2912 DEFINE_PROP_UINT16("port_index", SCSIDiskState, port_index, 0),
2913 DEFINE_PROP_UINT64("max_io_size", SCSIDiskState, max_io_size,
2914 DEFAULT_MAX_IO_SIZE),
2915 DEFINE_PROP_END_OF_LIST(),
2916 };
2917
2918 static void scsi_cd_class_initfn(ObjectClass *klass, void *data)
2919 {
2920 DeviceClass *dc = DEVICE_CLASS(klass);
2921 SCSIDeviceClass *sc = SCSI_DEVICE_CLASS(klass);
2922
2923 sc->realize = scsi_cd_realize;
2924 sc->alloc_req = scsi_new_request;
2925 sc->unit_attention_reported = scsi_disk_unit_attention_reported;
2926 dc->desc = "virtual SCSI CD-ROM";
2927 dc->props = scsi_cd_properties;
2928 dc->vmsd = &vmstate_scsi_disk_state;
2929 }
2930
2931 static const TypeInfo scsi_cd_info = {
2932 .name = "scsi-cd",
2933 .parent = TYPE_SCSI_DISK_BASE,
2934 .class_init = scsi_cd_class_initfn,
2935 };
2936
2937 #ifdef __linux__
2938 static Property scsi_block_properties[] = {
2939 DEFINE_PROP_DRIVE("drive", SCSIDiskState, qdev.conf.blk),
2940 DEFINE_PROP_END_OF_LIST(),
2941 };
2942
2943 static void scsi_block_class_initfn(ObjectClass *klass, void *data)
2944 {
2945 DeviceClass *dc = DEVICE_CLASS(klass);
2946 SCSIDeviceClass *sc = SCSI_DEVICE_CLASS(klass);
2947 SCSIDiskClass *sdc = SCSI_DISK_BASE_CLASS(klass);
2948
2949 sc->realize = scsi_block_realize;
2950 sc->alloc_req = scsi_block_new_request;
2951 sc->parse_cdb = scsi_block_parse_cdb;
2952 sdc->dma_readv = scsi_block_dma_readv;
2953 sdc->dma_writev = scsi_block_dma_writev;
2954 sdc->need_fua_emulation = scsi_block_no_fua;
2955 dc->desc = "SCSI block device passthrough";
2956 dc->props = scsi_block_properties;
2957 dc->vmsd = &vmstate_scsi_disk_state;
2958 }
2959
2960 static const TypeInfo scsi_block_info = {
2961 .name = "scsi-block",
2962 .parent = TYPE_SCSI_DISK_BASE,
2963 .class_init = scsi_block_class_initfn,
2964 };
2965 #endif
2966
2967 static Property scsi_disk_properties[] = {
2968 DEFINE_SCSI_DISK_PROPERTIES(),
2969 DEFINE_PROP_BIT("removable", SCSIDiskState, features,
2970 SCSI_DISK_F_REMOVABLE, false),
2971 DEFINE_PROP_BIT("dpofua", SCSIDiskState, features,
2972 SCSI_DISK_F_DPOFUA, false),
2973 DEFINE_PROP_UINT64("wwn", SCSIDiskState, qdev.wwn, 0),
2974 DEFINE_PROP_UINT64("port_wwn", SCSIDiskState, qdev.port_wwn, 0),
2975 DEFINE_PROP_UINT16("port_index", SCSIDiskState, port_index, 0),
2976 DEFINE_PROP_UINT64("max_unmap_size", SCSIDiskState, max_unmap_size,
2977 DEFAULT_MAX_UNMAP_SIZE),
2978 DEFINE_PROP_UINT64("max_io_size", SCSIDiskState, max_io_size,
2979 DEFAULT_MAX_IO_SIZE),
2980 DEFINE_PROP_END_OF_LIST(),
2981 };
2982
2983 static void scsi_disk_class_initfn(ObjectClass *klass, void *data)
2984 {
2985 DeviceClass *dc = DEVICE_CLASS(klass);
2986 SCSIDeviceClass *sc = SCSI_DEVICE_CLASS(klass);
2987
2988 sc->realize = scsi_disk_realize;
2989 sc->alloc_req = scsi_new_request;
2990 sc->unit_attention_reported = scsi_disk_unit_attention_reported;
2991 dc->fw_name = "disk";
2992 dc->desc = "virtual SCSI disk or CD-ROM (legacy)";
2993 dc->reset = scsi_disk_reset;
2994 dc->props = scsi_disk_properties;
2995 dc->vmsd = &vmstate_scsi_disk_state;
2996 }
2997
2998 static const TypeInfo scsi_disk_info = {
2999 .name = "scsi-disk",
3000 .parent = TYPE_SCSI_DISK_BASE,
3001 .class_init = scsi_disk_class_initfn,
3002 };
3003
3004 static void scsi_disk_register_types(void)
3005 {
3006 type_register_static(&scsi_disk_base_info);
3007 type_register_static(&scsi_hd_info);
3008 type_register_static(&scsi_cd_info);
3009 #ifdef __linux__
3010 type_register_static(&scsi_block_info);
3011 #endif
3012 type_register_static(&scsi_disk_info);
3013 }
3014
3015 type_init(scsi_disk_register_types)