]> git.ipfire.org Git - thirdparty/qemu.git/blob - hw/scsi/scsi-disk.c
Merge remote-tracking branch 'remotes/bonzini/tags/for-upstream-fwcfg' into staging
[thirdparty/qemu.git] / hw / scsi / scsi-disk.c
1 /*
2 * SCSI Device emulation
3 *
4 * Copyright (c) 2006 CodeSourcery.
5 * Based on code by Fabrice Bellard
6 *
7 * Written by Paul Brook
8 * Modifications:
9 * 2009-Dec-12 Artyom Tarasenko : implemented stamdard inquiry for the case
10 * when the allocation length of CDB is smaller
11 * than 36.
12 * 2009-Oct-13 Artyom Tarasenko : implemented the block descriptor in the
13 * MODE SENSE response.
14 *
15 * This code is licensed under the LGPL.
16 *
17 * Note that this file only handles the SCSI architecture model and device
18 * commands. Emulation of interface/link layer protocols is handled by
19 * the host adapter emulator.
20 */
21
22 //#define DEBUG_SCSI
23
24 #ifdef DEBUG_SCSI
25 #define DPRINTF(fmt, ...) \
26 do { printf("scsi-disk: " fmt , ## __VA_ARGS__); } while (0)
27 #else
28 #define DPRINTF(fmt, ...) do {} while(0)
29 #endif
30
31 #include "qemu/osdep.h"
32 #include "qapi/error.h"
33 #include "qemu/error-report.h"
34 #include "hw/scsi/scsi.h"
35 #include "block/scsi.h"
36 #include "sysemu/sysemu.h"
37 #include "sysemu/block-backend.h"
38 #include "sysemu/blockdev.h"
39 #include "hw/block/block.h"
40 #include "sysemu/dma.h"
41 #include "qemu/cutils.h"
42
43 #ifdef __linux
44 #include <scsi/sg.h>
45 #endif
46
47 #define SCSI_WRITE_SAME_MAX 524288
48 #define SCSI_DMA_BUF_SIZE 131072
49 #define SCSI_MAX_INQUIRY_LEN 256
50 #define SCSI_MAX_MODE_LEN 256
51
52 #define DEFAULT_DISCARD_GRANULARITY 4096
53 #define DEFAULT_MAX_UNMAP_SIZE (1 << 30) /* 1 GB */
54 #define DEFAULT_MAX_IO_SIZE INT_MAX /* 2 GB - 1 block */
55
56 #define TYPE_SCSI_DISK_BASE "scsi-disk-base"
57
58 #define SCSI_DISK_BASE(obj) \
59 OBJECT_CHECK(SCSIDiskState, (obj), TYPE_SCSI_DISK_BASE)
60 #define SCSI_DISK_BASE_CLASS(klass) \
61 OBJECT_CLASS_CHECK(SCSIDiskClass, (klass), TYPE_SCSI_DISK_BASE)
62 #define SCSI_DISK_BASE_GET_CLASS(obj) \
63 OBJECT_GET_CLASS(SCSIDiskClass, (obj), TYPE_SCSI_DISK_BASE)
64
65 typedef struct SCSIDiskClass {
66 SCSIDeviceClass parent_class;
67 DMAIOFunc *dma_readv;
68 DMAIOFunc *dma_writev;
69 bool (*need_fua_emulation)(SCSICommand *cmd);
70 } SCSIDiskClass;
71
72 typedef struct SCSIDiskReq {
73 SCSIRequest req;
74 /* Both sector and sector_count are in terms of qemu 512 byte blocks. */
75 uint64_t sector;
76 uint32_t sector_count;
77 uint32_t buflen;
78 bool started;
79 bool need_fua_emulation;
80 struct iovec iov;
81 QEMUIOVector qiov;
82 BlockAcctCookie acct;
83 unsigned char *status;
84 } SCSIDiskReq;
85
86 #define SCSI_DISK_F_REMOVABLE 0
87 #define SCSI_DISK_F_DPOFUA 1
88 #define SCSI_DISK_F_NO_REMOVABLE_DEVOPS 2
89
90 typedef struct SCSIDiskState
91 {
92 SCSIDevice qdev;
93 uint32_t features;
94 bool media_changed;
95 bool media_event;
96 bool eject_request;
97 uint16_t port_index;
98 uint64_t max_unmap_size;
99 uint64_t max_io_size;
100 QEMUBH *bh;
101 char *version;
102 char *serial;
103 char *vendor;
104 char *product;
105 bool tray_open;
106 bool tray_locked;
107 } SCSIDiskState;
108
109 static int scsi_handle_rw_error(SCSIDiskReq *r, int error, bool acct_failed);
110
111 static void scsi_free_request(SCSIRequest *req)
112 {
113 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req);
114
115 qemu_vfree(r->iov.iov_base);
116 }
117
118 /* Helper function for command completion with sense. */
119 static void scsi_check_condition(SCSIDiskReq *r, SCSISense sense)
120 {
121 DPRINTF("Command complete tag=0x%x sense=%d/%d/%d\n",
122 r->req.tag, sense.key, sense.asc, sense.ascq);
123 scsi_req_build_sense(&r->req, sense);
124 scsi_req_complete(&r->req, CHECK_CONDITION);
125 }
126
127 static void scsi_init_iovec(SCSIDiskReq *r, size_t size)
128 {
129 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
130
131 if (!r->iov.iov_base) {
132 r->buflen = size;
133 r->iov.iov_base = blk_blockalign(s->qdev.conf.blk, r->buflen);
134 }
135 r->iov.iov_len = MIN(r->sector_count * 512, r->buflen);
136 qemu_iovec_init_external(&r->qiov, &r->iov, 1);
137 }
138
139 static void scsi_disk_save_request(QEMUFile *f, SCSIRequest *req)
140 {
141 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req);
142
143 qemu_put_be64s(f, &r->sector);
144 qemu_put_be32s(f, &r->sector_count);
145 qemu_put_be32s(f, &r->buflen);
146 if (r->buflen) {
147 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) {
148 qemu_put_buffer(f, r->iov.iov_base, r->iov.iov_len);
149 } else if (!req->retry) {
150 uint32_t len = r->iov.iov_len;
151 qemu_put_be32s(f, &len);
152 qemu_put_buffer(f, r->iov.iov_base, r->iov.iov_len);
153 }
154 }
155 }
156
157 static void scsi_disk_load_request(QEMUFile *f, SCSIRequest *req)
158 {
159 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req);
160
161 qemu_get_be64s(f, &r->sector);
162 qemu_get_be32s(f, &r->sector_count);
163 qemu_get_be32s(f, &r->buflen);
164 if (r->buflen) {
165 scsi_init_iovec(r, r->buflen);
166 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) {
167 qemu_get_buffer(f, r->iov.iov_base, r->iov.iov_len);
168 } else if (!r->req.retry) {
169 uint32_t len;
170 qemu_get_be32s(f, &len);
171 r->iov.iov_len = len;
172 assert(r->iov.iov_len <= r->buflen);
173 qemu_get_buffer(f, r->iov.iov_base, r->iov.iov_len);
174 }
175 }
176
177 qemu_iovec_init_external(&r->qiov, &r->iov, 1);
178 }
179
180 static bool scsi_disk_req_check_error(SCSIDiskReq *r, int ret, bool acct_failed)
181 {
182 if (r->req.io_canceled) {
183 scsi_req_cancel_complete(&r->req);
184 return true;
185 }
186
187 if (ret < 0) {
188 return scsi_handle_rw_error(r, -ret, acct_failed);
189 }
190
191 if (r->status && *r->status) {
192 if (acct_failed) {
193 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
194 block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct);
195 }
196 scsi_req_complete(&r->req, *r->status);
197 return true;
198 }
199
200 return false;
201 }
202
203 static void scsi_aio_complete(void *opaque, int ret)
204 {
205 SCSIDiskReq *r = (SCSIDiskReq *)opaque;
206 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
207
208 assert(r->req.aiocb != NULL);
209 r->req.aiocb = NULL;
210 if (scsi_disk_req_check_error(r, ret, true)) {
211 goto done;
212 }
213
214 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct);
215 scsi_req_complete(&r->req, GOOD);
216
217 done:
218 scsi_req_unref(&r->req);
219 }
220
221 static bool scsi_is_cmd_fua(SCSICommand *cmd)
222 {
223 switch (cmd->buf[0]) {
224 case READ_10:
225 case READ_12:
226 case READ_16:
227 case WRITE_10:
228 case WRITE_12:
229 case WRITE_16:
230 return (cmd->buf[1] & 8) != 0;
231
232 case VERIFY_10:
233 case VERIFY_12:
234 case VERIFY_16:
235 case WRITE_VERIFY_10:
236 case WRITE_VERIFY_12:
237 case WRITE_VERIFY_16:
238 return true;
239
240 case READ_6:
241 case WRITE_6:
242 default:
243 return false;
244 }
245 }
246
247 static void scsi_write_do_fua(SCSIDiskReq *r)
248 {
249 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
250
251 assert(r->req.aiocb == NULL);
252 assert(!r->req.io_canceled);
253
254 if (r->need_fua_emulation) {
255 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 0,
256 BLOCK_ACCT_FLUSH);
257 r->req.aiocb = blk_aio_flush(s->qdev.conf.blk, scsi_aio_complete, r);
258 return;
259 }
260
261 scsi_req_complete(&r->req, GOOD);
262 scsi_req_unref(&r->req);
263 }
264
265 static void scsi_dma_complete_noio(SCSIDiskReq *r, int ret)
266 {
267 assert(r->req.aiocb == NULL);
268 if (scsi_disk_req_check_error(r, ret, false)) {
269 goto done;
270 }
271
272 r->sector += r->sector_count;
273 r->sector_count = 0;
274 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) {
275 scsi_write_do_fua(r);
276 return;
277 } else {
278 scsi_req_complete(&r->req, GOOD);
279 }
280
281 done:
282 scsi_req_unref(&r->req);
283 }
284
285 static void scsi_dma_complete(void *opaque, int ret)
286 {
287 SCSIDiskReq *r = (SCSIDiskReq *)opaque;
288 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
289
290 assert(r->req.aiocb != NULL);
291 r->req.aiocb = NULL;
292
293 if (ret < 0) {
294 block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct);
295 } else {
296 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct);
297 }
298 scsi_dma_complete_noio(r, ret);
299 }
300
301 static void scsi_read_complete(void * opaque, int ret)
302 {
303 SCSIDiskReq *r = (SCSIDiskReq *)opaque;
304 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
305 int n;
306
307 assert(r->req.aiocb != NULL);
308 r->req.aiocb = NULL;
309 if (scsi_disk_req_check_error(r, ret, true)) {
310 goto done;
311 }
312
313 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct);
314 DPRINTF("Data ready tag=0x%x len=%zd\n", r->req.tag, r->qiov.size);
315
316 n = r->qiov.size / 512;
317 r->sector += n;
318 r->sector_count -= n;
319 scsi_req_data(&r->req, r->qiov.size);
320
321 done:
322 scsi_req_unref(&r->req);
323 }
324
325 /* Actually issue a read to the block device. */
326 static void scsi_do_read(SCSIDiskReq *r, int ret)
327 {
328 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
329 SCSIDiskClass *sdc = (SCSIDiskClass *) object_get_class(OBJECT(s));
330
331 assert (r->req.aiocb == NULL);
332 if (scsi_disk_req_check_error(r, ret, false)) {
333 goto done;
334 }
335
336 /* The request is used as the AIO opaque value, so add a ref. */
337 scsi_req_ref(&r->req);
338
339 if (r->req.sg) {
340 dma_acct_start(s->qdev.conf.blk, &r->acct, r->req.sg, BLOCK_ACCT_READ);
341 r->req.resid -= r->req.sg->size;
342 r->req.aiocb = dma_blk_io(blk_get_aio_context(s->qdev.conf.blk),
343 r->req.sg, r->sector << BDRV_SECTOR_BITS,
344 sdc->dma_readv, r, scsi_dma_complete, r,
345 DMA_DIRECTION_FROM_DEVICE);
346 } else {
347 scsi_init_iovec(r, SCSI_DMA_BUF_SIZE);
348 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct,
349 r->qiov.size, BLOCK_ACCT_READ);
350 r->req.aiocb = sdc->dma_readv(r->sector << BDRV_SECTOR_BITS, &r->qiov,
351 scsi_read_complete, r, r);
352 }
353
354 done:
355 scsi_req_unref(&r->req);
356 }
357
358 static void scsi_do_read_cb(void *opaque, int ret)
359 {
360 SCSIDiskReq *r = (SCSIDiskReq *)opaque;
361 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
362
363 assert (r->req.aiocb != NULL);
364 r->req.aiocb = NULL;
365
366 if (ret < 0) {
367 block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct);
368 } else {
369 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct);
370 }
371 scsi_do_read(opaque, ret);
372 }
373
374 /* Read more data from scsi device into buffer. */
375 static void scsi_read_data(SCSIRequest *req)
376 {
377 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req);
378 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
379 bool first;
380
381 DPRINTF("Read sector_count=%d\n", r->sector_count);
382 if (r->sector_count == 0) {
383 /* This also clears the sense buffer for REQUEST SENSE. */
384 scsi_req_complete(&r->req, GOOD);
385 return;
386 }
387
388 /* No data transfer may already be in progress */
389 assert(r->req.aiocb == NULL);
390
391 /* The request is used as the AIO opaque value, so add a ref. */
392 scsi_req_ref(&r->req);
393 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) {
394 DPRINTF("Data transfer direction invalid\n");
395 scsi_read_complete(r, -EINVAL);
396 return;
397 }
398
399 if (s->tray_open) {
400 scsi_read_complete(r, -ENOMEDIUM);
401 return;
402 }
403
404 first = !r->started;
405 r->started = true;
406 if (first && r->need_fua_emulation) {
407 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 0,
408 BLOCK_ACCT_FLUSH);
409 r->req.aiocb = blk_aio_flush(s->qdev.conf.blk, scsi_do_read_cb, r);
410 } else {
411 scsi_do_read(r, 0);
412 }
413 }
414
415 /*
416 * scsi_handle_rw_error has two return values. 0 means that the error
417 * must be ignored, 1 means that the error has been processed and the
418 * caller should not do anything else for this request. Note that
419 * scsi_handle_rw_error always manages its reference counts, independent
420 * of the return value.
421 */
422 static int scsi_handle_rw_error(SCSIDiskReq *r, int error, bool acct_failed)
423 {
424 bool is_read = (r->req.cmd.mode == SCSI_XFER_FROM_DEV);
425 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
426 BlockErrorAction action = blk_get_error_action(s->qdev.conf.blk,
427 is_read, error);
428
429 if (action == BLOCK_ERROR_ACTION_REPORT) {
430 if (acct_failed) {
431 block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct);
432 }
433 switch (error) {
434 case ENOMEDIUM:
435 scsi_check_condition(r, SENSE_CODE(NO_MEDIUM));
436 break;
437 case ENOMEM:
438 scsi_check_condition(r, SENSE_CODE(TARGET_FAILURE));
439 break;
440 case EINVAL:
441 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD));
442 break;
443 case ENOSPC:
444 scsi_check_condition(r, SENSE_CODE(SPACE_ALLOC_FAILED));
445 break;
446 default:
447 scsi_check_condition(r, SENSE_CODE(IO_ERROR));
448 break;
449 }
450 }
451 blk_error_action(s->qdev.conf.blk, action, is_read, error);
452 if (action == BLOCK_ERROR_ACTION_STOP) {
453 scsi_req_retry(&r->req);
454 }
455 return action != BLOCK_ERROR_ACTION_IGNORE;
456 }
457
458 static void scsi_write_complete_noio(SCSIDiskReq *r, int ret)
459 {
460 uint32_t n;
461
462 assert (r->req.aiocb == NULL);
463 if (scsi_disk_req_check_error(r, ret, false)) {
464 goto done;
465 }
466
467 n = r->qiov.size / 512;
468 r->sector += n;
469 r->sector_count -= n;
470 if (r->sector_count == 0) {
471 scsi_write_do_fua(r);
472 return;
473 } else {
474 scsi_init_iovec(r, SCSI_DMA_BUF_SIZE);
475 DPRINTF("Write complete tag=0x%x more=%zd\n", r->req.tag, r->qiov.size);
476 scsi_req_data(&r->req, r->qiov.size);
477 }
478
479 done:
480 scsi_req_unref(&r->req);
481 }
482
483 static void scsi_write_complete(void * opaque, int ret)
484 {
485 SCSIDiskReq *r = (SCSIDiskReq *)opaque;
486 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
487
488 assert (r->req.aiocb != NULL);
489 r->req.aiocb = NULL;
490
491 if (ret < 0) {
492 block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct);
493 } else {
494 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct);
495 }
496 scsi_write_complete_noio(r, ret);
497 }
498
499 static void scsi_write_data(SCSIRequest *req)
500 {
501 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req);
502 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
503 SCSIDiskClass *sdc = (SCSIDiskClass *) object_get_class(OBJECT(s));
504
505 /* No data transfer may already be in progress */
506 assert(r->req.aiocb == NULL);
507
508 /* The request is used as the AIO opaque value, so add a ref. */
509 scsi_req_ref(&r->req);
510 if (r->req.cmd.mode != SCSI_XFER_TO_DEV) {
511 DPRINTF("Data transfer direction invalid\n");
512 scsi_write_complete_noio(r, -EINVAL);
513 return;
514 }
515
516 if (!r->req.sg && !r->qiov.size) {
517 /* Called for the first time. Ask the driver to send us more data. */
518 r->started = true;
519 scsi_write_complete_noio(r, 0);
520 return;
521 }
522 if (s->tray_open) {
523 scsi_write_complete_noio(r, -ENOMEDIUM);
524 return;
525 }
526
527 if (r->req.cmd.buf[0] == VERIFY_10 || r->req.cmd.buf[0] == VERIFY_12 ||
528 r->req.cmd.buf[0] == VERIFY_16) {
529 if (r->req.sg) {
530 scsi_dma_complete_noio(r, 0);
531 } else {
532 scsi_write_complete_noio(r, 0);
533 }
534 return;
535 }
536
537 if (r->req.sg) {
538 dma_acct_start(s->qdev.conf.blk, &r->acct, r->req.sg, BLOCK_ACCT_WRITE);
539 r->req.resid -= r->req.sg->size;
540 r->req.aiocb = dma_blk_io(blk_get_aio_context(s->qdev.conf.blk),
541 r->req.sg, r->sector << BDRV_SECTOR_BITS,
542 sdc->dma_writev, r, scsi_dma_complete, r,
543 DMA_DIRECTION_TO_DEVICE);
544 } else {
545 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct,
546 r->qiov.size, BLOCK_ACCT_WRITE);
547 r->req.aiocb = sdc->dma_writev(r->sector << BDRV_SECTOR_BITS, &r->qiov,
548 scsi_write_complete, r, r);
549 }
550 }
551
552 /* Return a pointer to the data buffer. */
553 static uint8_t *scsi_get_buf(SCSIRequest *req)
554 {
555 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req);
556
557 return (uint8_t *)r->iov.iov_base;
558 }
559
560 static int scsi_disk_emulate_inquiry(SCSIRequest *req, uint8_t *outbuf)
561 {
562 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev);
563 int buflen = 0;
564 int start;
565
566 if (req->cmd.buf[1] & 0x1) {
567 /* Vital product data */
568 uint8_t page_code = req->cmd.buf[2];
569
570 outbuf[buflen++] = s->qdev.type & 0x1f;
571 outbuf[buflen++] = page_code ; // this page
572 outbuf[buflen++] = 0x00;
573 outbuf[buflen++] = 0x00;
574 start = buflen;
575
576 switch (page_code) {
577 case 0x00: /* Supported page codes, mandatory */
578 {
579 DPRINTF("Inquiry EVPD[Supported pages] "
580 "buffer size %zd\n", req->cmd.xfer);
581 outbuf[buflen++] = 0x00; // list of supported pages (this page)
582 if (s->serial) {
583 outbuf[buflen++] = 0x80; // unit serial number
584 }
585 outbuf[buflen++] = 0x83; // device identification
586 if (s->qdev.type == TYPE_DISK) {
587 outbuf[buflen++] = 0xb0; // block limits
588 outbuf[buflen++] = 0xb2; // thin provisioning
589 }
590 break;
591 }
592 case 0x80: /* Device serial number, optional */
593 {
594 int l;
595
596 if (!s->serial) {
597 DPRINTF("Inquiry (EVPD[Serial number] not supported\n");
598 return -1;
599 }
600
601 l = strlen(s->serial);
602 if (l > 20) {
603 l = 20;
604 }
605
606 DPRINTF("Inquiry EVPD[Serial number] "
607 "buffer size %zd\n", req->cmd.xfer);
608 memcpy(outbuf+buflen, s->serial, l);
609 buflen += l;
610 break;
611 }
612
613 case 0x83: /* Device identification page, mandatory */
614 {
615 const char *str = s->serial ?: blk_name(s->qdev.conf.blk);
616 int max_len = s->serial ? 20 : 255 - 8;
617 int id_len = strlen(str);
618
619 if (id_len > max_len) {
620 id_len = max_len;
621 }
622 DPRINTF("Inquiry EVPD[Device identification] "
623 "buffer size %zd\n", req->cmd.xfer);
624
625 outbuf[buflen++] = 0x2; // ASCII
626 outbuf[buflen++] = 0; // not officially assigned
627 outbuf[buflen++] = 0; // reserved
628 outbuf[buflen++] = id_len; // length of data following
629 memcpy(outbuf+buflen, str, id_len);
630 buflen += id_len;
631
632 if (s->qdev.wwn) {
633 outbuf[buflen++] = 0x1; // Binary
634 outbuf[buflen++] = 0x3; // NAA
635 outbuf[buflen++] = 0; // reserved
636 outbuf[buflen++] = 8;
637 stq_be_p(&outbuf[buflen], s->qdev.wwn);
638 buflen += 8;
639 }
640
641 if (s->qdev.port_wwn) {
642 outbuf[buflen++] = 0x61; // SAS / Binary
643 outbuf[buflen++] = 0x93; // PIV / Target port / NAA
644 outbuf[buflen++] = 0; // reserved
645 outbuf[buflen++] = 8;
646 stq_be_p(&outbuf[buflen], s->qdev.port_wwn);
647 buflen += 8;
648 }
649
650 if (s->port_index) {
651 outbuf[buflen++] = 0x61; // SAS / Binary
652 outbuf[buflen++] = 0x94; // PIV / Target port / relative target port
653 outbuf[buflen++] = 0; // reserved
654 outbuf[buflen++] = 4;
655 stw_be_p(&outbuf[buflen + 2], s->port_index);
656 buflen += 4;
657 }
658 break;
659 }
660 case 0xb0: /* block limits */
661 {
662 unsigned int unmap_sectors =
663 s->qdev.conf.discard_granularity / s->qdev.blocksize;
664 unsigned int min_io_size =
665 s->qdev.conf.min_io_size / s->qdev.blocksize;
666 unsigned int opt_io_size =
667 s->qdev.conf.opt_io_size / s->qdev.blocksize;
668 unsigned int max_unmap_sectors =
669 s->max_unmap_size / s->qdev.blocksize;
670 unsigned int max_io_sectors =
671 s->max_io_size / s->qdev.blocksize;
672
673 if (s->qdev.type == TYPE_ROM) {
674 DPRINTF("Inquiry (EVPD[%02X] not supported for CDROM\n",
675 page_code);
676 return -1;
677 }
678 /* required VPD size with unmap support */
679 buflen = 0x40;
680 memset(outbuf + 4, 0, buflen - 4);
681
682 outbuf[4] = 0x1; /* wsnz */
683
684 /* optimal transfer length granularity */
685 outbuf[6] = (min_io_size >> 8) & 0xff;
686 outbuf[7] = min_io_size & 0xff;
687
688 /* maximum transfer length */
689 outbuf[8] = (max_io_sectors >> 24) & 0xff;
690 outbuf[9] = (max_io_sectors >> 16) & 0xff;
691 outbuf[10] = (max_io_sectors >> 8) & 0xff;
692 outbuf[11] = max_io_sectors & 0xff;
693
694 /* optimal transfer length */
695 outbuf[12] = (opt_io_size >> 24) & 0xff;
696 outbuf[13] = (opt_io_size >> 16) & 0xff;
697 outbuf[14] = (opt_io_size >> 8) & 0xff;
698 outbuf[15] = opt_io_size & 0xff;
699
700 /* max unmap LBA count, default is 1GB */
701 outbuf[20] = (max_unmap_sectors >> 24) & 0xff;
702 outbuf[21] = (max_unmap_sectors >> 16) & 0xff;
703 outbuf[22] = (max_unmap_sectors >> 8) & 0xff;
704 outbuf[23] = max_unmap_sectors & 0xff;
705
706 /* max unmap descriptors, 255 fit in 4 kb with an 8-byte header. */
707 outbuf[24] = 0;
708 outbuf[25] = 0;
709 outbuf[26] = 0;
710 outbuf[27] = 255;
711
712 /* optimal unmap granularity */
713 outbuf[28] = (unmap_sectors >> 24) & 0xff;
714 outbuf[29] = (unmap_sectors >> 16) & 0xff;
715 outbuf[30] = (unmap_sectors >> 8) & 0xff;
716 outbuf[31] = unmap_sectors & 0xff;
717
718 /* max write same size */
719 outbuf[36] = 0;
720 outbuf[37] = 0;
721 outbuf[38] = 0;
722 outbuf[39] = 0;
723
724 outbuf[40] = (max_io_sectors >> 24) & 0xff;
725 outbuf[41] = (max_io_sectors >> 16) & 0xff;
726 outbuf[42] = (max_io_sectors >> 8) & 0xff;
727 outbuf[43] = max_io_sectors & 0xff;
728 break;
729 }
730 case 0xb2: /* thin provisioning */
731 {
732 buflen = 8;
733 outbuf[4] = 0;
734 outbuf[5] = 0xe0; /* unmap & write_same 10/16 all supported */
735 outbuf[6] = s->qdev.conf.discard_granularity ? 2 : 1;
736 outbuf[7] = 0;
737 break;
738 }
739 default:
740 return -1;
741 }
742 /* done with EVPD */
743 assert(buflen - start <= 255);
744 outbuf[start - 1] = buflen - start;
745 return buflen;
746 }
747
748 /* Standard INQUIRY data */
749 if (req->cmd.buf[2] != 0) {
750 return -1;
751 }
752
753 /* PAGE CODE == 0 */
754 buflen = req->cmd.xfer;
755 if (buflen > SCSI_MAX_INQUIRY_LEN) {
756 buflen = SCSI_MAX_INQUIRY_LEN;
757 }
758
759 outbuf[0] = s->qdev.type & 0x1f;
760 outbuf[1] = (s->features & (1 << SCSI_DISK_F_REMOVABLE)) ? 0x80 : 0;
761
762 strpadcpy((char *) &outbuf[16], 16, s->product, ' ');
763 strpadcpy((char *) &outbuf[8], 8, s->vendor, ' ');
764
765 memset(&outbuf[32], 0, 4);
766 memcpy(&outbuf[32], s->version, MIN(4, strlen(s->version)));
767 /*
768 * We claim conformance to SPC-3, which is required for guests
769 * to ask for modern features like READ CAPACITY(16) or the
770 * block characteristics VPD page by default. Not all of SPC-3
771 * is actually implemented, but we're good enough.
772 */
773 outbuf[2] = 5;
774 outbuf[3] = 2 | 0x10; /* Format 2, HiSup */
775
776 if (buflen > 36) {
777 outbuf[4] = buflen - 5; /* Additional Length = (Len - 1) - 4 */
778 } else {
779 /* If the allocation length of CDB is too small,
780 the additional length is not adjusted */
781 outbuf[4] = 36 - 5;
782 }
783
784 /* Sync data transfer and TCQ. */
785 outbuf[7] = 0x10 | (req->bus->info->tcq ? 0x02 : 0);
786 return buflen;
787 }
788
789 static inline bool media_is_dvd(SCSIDiskState *s)
790 {
791 uint64_t nb_sectors;
792 if (s->qdev.type != TYPE_ROM) {
793 return false;
794 }
795 if (!blk_is_inserted(s->qdev.conf.blk)) {
796 return false;
797 }
798 if (s->tray_open) {
799 return false;
800 }
801 blk_get_geometry(s->qdev.conf.blk, &nb_sectors);
802 return nb_sectors > CD_MAX_SECTORS;
803 }
804
805 static inline bool media_is_cd(SCSIDiskState *s)
806 {
807 uint64_t nb_sectors;
808 if (s->qdev.type != TYPE_ROM) {
809 return false;
810 }
811 if (!blk_is_inserted(s->qdev.conf.blk)) {
812 return false;
813 }
814 if (s->tray_open) {
815 return false;
816 }
817 blk_get_geometry(s->qdev.conf.blk, &nb_sectors);
818 return nb_sectors <= CD_MAX_SECTORS;
819 }
820
821 static int scsi_read_disc_information(SCSIDiskState *s, SCSIDiskReq *r,
822 uint8_t *outbuf)
823 {
824 uint8_t type = r->req.cmd.buf[1] & 7;
825
826 if (s->qdev.type != TYPE_ROM) {
827 return -1;
828 }
829
830 /* Types 1/2 are only defined for Blu-Ray. */
831 if (type != 0) {
832 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD));
833 return -1;
834 }
835
836 memset(outbuf, 0, 34);
837 outbuf[1] = 32;
838 outbuf[2] = 0xe; /* last session complete, disc finalized */
839 outbuf[3] = 1; /* first track on disc */
840 outbuf[4] = 1; /* # of sessions */
841 outbuf[5] = 1; /* first track of last session */
842 outbuf[6] = 1; /* last track of last session */
843 outbuf[7] = 0x20; /* unrestricted use */
844 outbuf[8] = 0x00; /* CD-ROM or DVD-ROM */
845 /* 9-10-11: most significant byte corresponding bytes 4-5-6 */
846 /* 12-23: not meaningful for CD-ROM or DVD-ROM */
847 /* 24-31: disc bar code */
848 /* 32: disc application code */
849 /* 33: number of OPC tables */
850
851 return 34;
852 }
853
854 static int scsi_read_dvd_structure(SCSIDiskState *s, SCSIDiskReq *r,
855 uint8_t *outbuf)
856 {
857 static const int rds_caps_size[5] = {
858 [0] = 2048 + 4,
859 [1] = 4 + 4,
860 [3] = 188 + 4,
861 [4] = 2048 + 4,
862 };
863
864 uint8_t media = r->req.cmd.buf[1];
865 uint8_t layer = r->req.cmd.buf[6];
866 uint8_t format = r->req.cmd.buf[7];
867 int size = -1;
868
869 if (s->qdev.type != TYPE_ROM) {
870 return -1;
871 }
872 if (media != 0) {
873 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD));
874 return -1;
875 }
876
877 if (format != 0xff) {
878 if (s->tray_open || !blk_is_inserted(s->qdev.conf.blk)) {
879 scsi_check_condition(r, SENSE_CODE(NO_MEDIUM));
880 return -1;
881 }
882 if (media_is_cd(s)) {
883 scsi_check_condition(r, SENSE_CODE(INCOMPATIBLE_FORMAT));
884 return -1;
885 }
886 if (format >= ARRAY_SIZE(rds_caps_size)) {
887 return -1;
888 }
889 size = rds_caps_size[format];
890 memset(outbuf, 0, size);
891 }
892
893 switch (format) {
894 case 0x00: {
895 /* Physical format information */
896 uint64_t nb_sectors;
897 if (layer != 0) {
898 goto fail;
899 }
900 blk_get_geometry(s->qdev.conf.blk, &nb_sectors);
901
902 outbuf[4] = 1; /* DVD-ROM, part version 1 */
903 outbuf[5] = 0xf; /* 120mm disc, minimum rate unspecified */
904 outbuf[6] = 1; /* one layer, read-only (per MMC-2 spec) */
905 outbuf[7] = 0; /* default densities */
906
907 stl_be_p(&outbuf[12], (nb_sectors >> 2) - 1); /* end sector */
908 stl_be_p(&outbuf[16], (nb_sectors >> 2) - 1); /* l0 end sector */
909 break;
910 }
911
912 case 0x01: /* DVD copyright information, all zeros */
913 break;
914
915 case 0x03: /* BCA information - invalid field for no BCA info */
916 return -1;
917
918 case 0x04: /* DVD disc manufacturing information, all zeros */
919 break;
920
921 case 0xff: { /* List capabilities */
922 int i;
923 size = 4;
924 for (i = 0; i < ARRAY_SIZE(rds_caps_size); i++) {
925 if (!rds_caps_size[i]) {
926 continue;
927 }
928 outbuf[size] = i;
929 outbuf[size + 1] = 0x40; /* Not writable, readable */
930 stw_be_p(&outbuf[size + 2], rds_caps_size[i]);
931 size += 4;
932 }
933 break;
934 }
935
936 default:
937 return -1;
938 }
939
940 /* Size of buffer, not including 2 byte size field */
941 stw_be_p(outbuf, size - 2);
942 return size;
943
944 fail:
945 return -1;
946 }
947
948 static int scsi_event_status_media(SCSIDiskState *s, uint8_t *outbuf)
949 {
950 uint8_t event_code, media_status;
951
952 media_status = 0;
953 if (s->tray_open) {
954 media_status = MS_TRAY_OPEN;
955 } else if (blk_is_inserted(s->qdev.conf.blk)) {
956 media_status = MS_MEDIA_PRESENT;
957 }
958
959 /* Event notification descriptor */
960 event_code = MEC_NO_CHANGE;
961 if (media_status != MS_TRAY_OPEN) {
962 if (s->media_event) {
963 event_code = MEC_NEW_MEDIA;
964 s->media_event = false;
965 } else if (s->eject_request) {
966 event_code = MEC_EJECT_REQUESTED;
967 s->eject_request = false;
968 }
969 }
970
971 outbuf[0] = event_code;
972 outbuf[1] = media_status;
973
974 /* These fields are reserved, just clear them. */
975 outbuf[2] = 0;
976 outbuf[3] = 0;
977 return 4;
978 }
979
980 static int scsi_get_event_status_notification(SCSIDiskState *s, SCSIDiskReq *r,
981 uint8_t *outbuf)
982 {
983 int size;
984 uint8_t *buf = r->req.cmd.buf;
985 uint8_t notification_class_request = buf[4];
986 if (s->qdev.type != TYPE_ROM) {
987 return -1;
988 }
989 if ((buf[1] & 1) == 0) {
990 /* asynchronous */
991 return -1;
992 }
993
994 size = 4;
995 outbuf[0] = outbuf[1] = 0;
996 outbuf[3] = 1 << GESN_MEDIA; /* supported events */
997 if (notification_class_request & (1 << GESN_MEDIA)) {
998 outbuf[2] = GESN_MEDIA;
999 size += scsi_event_status_media(s, &outbuf[size]);
1000 } else {
1001 outbuf[2] = 0x80;
1002 }
1003 stw_be_p(outbuf, size - 4);
1004 return size;
1005 }
1006
1007 static int scsi_get_configuration(SCSIDiskState *s, uint8_t *outbuf)
1008 {
1009 int current;
1010
1011 if (s->qdev.type != TYPE_ROM) {
1012 return -1;
1013 }
1014
1015 if (media_is_dvd(s)) {
1016 current = MMC_PROFILE_DVD_ROM;
1017 } else if (media_is_cd(s)) {
1018 current = MMC_PROFILE_CD_ROM;
1019 } else {
1020 current = MMC_PROFILE_NONE;
1021 }
1022
1023 memset(outbuf, 0, 40);
1024 stl_be_p(&outbuf[0], 36); /* Bytes after the data length field */
1025 stw_be_p(&outbuf[6], current);
1026 /* outbuf[8] - outbuf[19]: Feature 0 - Profile list */
1027 outbuf[10] = 0x03; /* persistent, current */
1028 outbuf[11] = 8; /* two profiles */
1029 stw_be_p(&outbuf[12], MMC_PROFILE_DVD_ROM);
1030 outbuf[14] = (current == MMC_PROFILE_DVD_ROM);
1031 stw_be_p(&outbuf[16], MMC_PROFILE_CD_ROM);
1032 outbuf[18] = (current == MMC_PROFILE_CD_ROM);
1033 /* outbuf[20] - outbuf[31]: Feature 1 - Core feature */
1034 stw_be_p(&outbuf[20], 1);
1035 outbuf[22] = 0x08 | 0x03; /* version 2, persistent, current */
1036 outbuf[23] = 8;
1037 stl_be_p(&outbuf[24], 1); /* SCSI */
1038 outbuf[28] = 1; /* DBE = 1, mandatory */
1039 /* outbuf[32] - outbuf[39]: Feature 3 - Removable media feature */
1040 stw_be_p(&outbuf[32], 3);
1041 outbuf[34] = 0x08 | 0x03; /* version 2, persistent, current */
1042 outbuf[35] = 4;
1043 outbuf[36] = 0x39; /* tray, load=1, eject=1, unlocked at powerup, lock=1 */
1044 /* TODO: Random readable, CD read, DVD read, drive serial number,
1045 power management */
1046 return 40;
1047 }
1048
1049 static int scsi_emulate_mechanism_status(SCSIDiskState *s, uint8_t *outbuf)
1050 {
1051 if (s->qdev.type != TYPE_ROM) {
1052 return -1;
1053 }
1054 memset(outbuf, 0, 8);
1055 outbuf[5] = 1; /* CD-ROM */
1056 return 8;
1057 }
1058
1059 static int mode_sense_page(SCSIDiskState *s, int page, uint8_t **p_outbuf,
1060 int page_control)
1061 {
1062 static const int mode_sense_valid[0x3f] = {
1063 [MODE_PAGE_HD_GEOMETRY] = (1 << TYPE_DISK),
1064 [MODE_PAGE_FLEXIBLE_DISK_GEOMETRY] = (1 << TYPE_DISK),
1065 [MODE_PAGE_CACHING] = (1 << TYPE_DISK) | (1 << TYPE_ROM),
1066 [MODE_PAGE_R_W_ERROR] = (1 << TYPE_DISK) | (1 << TYPE_ROM),
1067 [MODE_PAGE_AUDIO_CTL] = (1 << TYPE_ROM),
1068 [MODE_PAGE_CAPABILITIES] = (1 << TYPE_ROM),
1069 };
1070
1071 uint8_t *p = *p_outbuf + 2;
1072 int length;
1073
1074 if ((mode_sense_valid[page] & (1 << s->qdev.type)) == 0) {
1075 return -1;
1076 }
1077
1078 /*
1079 * If Changeable Values are requested, a mask denoting those mode parameters
1080 * that are changeable shall be returned. As we currently don't support
1081 * parameter changes via MODE_SELECT all bits are returned set to zero.
1082 * The buffer was already menset to zero by the caller of this function.
1083 *
1084 * The offsets here are off by two compared to the descriptions in the
1085 * SCSI specs, because those include a 2-byte header. This is unfortunate,
1086 * but it is done so that offsets are consistent within our implementation
1087 * of MODE SENSE and MODE SELECT. MODE SELECT has to deal with both
1088 * 2-byte and 4-byte headers.
1089 */
1090 switch (page) {
1091 case MODE_PAGE_HD_GEOMETRY:
1092 length = 0x16;
1093 if (page_control == 1) { /* Changeable Values */
1094 break;
1095 }
1096 /* if a geometry hint is available, use it */
1097 p[0] = (s->qdev.conf.cyls >> 16) & 0xff;
1098 p[1] = (s->qdev.conf.cyls >> 8) & 0xff;
1099 p[2] = s->qdev.conf.cyls & 0xff;
1100 p[3] = s->qdev.conf.heads & 0xff;
1101 /* Write precomp start cylinder, disabled */
1102 p[4] = (s->qdev.conf.cyls >> 16) & 0xff;
1103 p[5] = (s->qdev.conf.cyls >> 8) & 0xff;
1104 p[6] = s->qdev.conf.cyls & 0xff;
1105 /* Reduced current start cylinder, disabled */
1106 p[7] = (s->qdev.conf.cyls >> 16) & 0xff;
1107 p[8] = (s->qdev.conf.cyls >> 8) & 0xff;
1108 p[9] = s->qdev.conf.cyls & 0xff;
1109 /* Device step rate [ns], 200ns */
1110 p[10] = 0;
1111 p[11] = 200;
1112 /* Landing zone cylinder */
1113 p[12] = 0xff;
1114 p[13] = 0xff;
1115 p[14] = 0xff;
1116 /* Medium rotation rate [rpm], 5400 rpm */
1117 p[18] = (5400 >> 8) & 0xff;
1118 p[19] = 5400 & 0xff;
1119 break;
1120
1121 case MODE_PAGE_FLEXIBLE_DISK_GEOMETRY:
1122 length = 0x1e;
1123 if (page_control == 1) { /* Changeable Values */
1124 break;
1125 }
1126 /* Transfer rate [kbit/s], 5Mbit/s */
1127 p[0] = 5000 >> 8;
1128 p[1] = 5000 & 0xff;
1129 /* if a geometry hint is available, use it */
1130 p[2] = s->qdev.conf.heads & 0xff;
1131 p[3] = s->qdev.conf.secs & 0xff;
1132 p[4] = s->qdev.blocksize >> 8;
1133 p[6] = (s->qdev.conf.cyls >> 8) & 0xff;
1134 p[7] = s->qdev.conf.cyls & 0xff;
1135 /* Write precomp start cylinder, disabled */
1136 p[8] = (s->qdev.conf.cyls >> 8) & 0xff;
1137 p[9] = s->qdev.conf.cyls & 0xff;
1138 /* Reduced current start cylinder, disabled */
1139 p[10] = (s->qdev.conf.cyls >> 8) & 0xff;
1140 p[11] = s->qdev.conf.cyls & 0xff;
1141 /* Device step rate [100us], 100us */
1142 p[12] = 0;
1143 p[13] = 1;
1144 /* Device step pulse width [us], 1us */
1145 p[14] = 1;
1146 /* Device head settle delay [100us], 100us */
1147 p[15] = 0;
1148 p[16] = 1;
1149 /* Motor on delay [0.1s], 0.1s */
1150 p[17] = 1;
1151 /* Motor off delay [0.1s], 0.1s */
1152 p[18] = 1;
1153 /* Medium rotation rate [rpm], 5400 rpm */
1154 p[26] = (5400 >> 8) & 0xff;
1155 p[27] = 5400 & 0xff;
1156 break;
1157
1158 case MODE_PAGE_CACHING:
1159 length = 0x12;
1160 if (page_control == 1 || /* Changeable Values */
1161 blk_enable_write_cache(s->qdev.conf.blk)) {
1162 p[0] = 4; /* WCE */
1163 }
1164 break;
1165
1166 case MODE_PAGE_R_W_ERROR:
1167 length = 10;
1168 if (page_control == 1) { /* Changeable Values */
1169 break;
1170 }
1171 p[0] = 0x80; /* Automatic Write Reallocation Enabled */
1172 if (s->qdev.type == TYPE_ROM) {
1173 p[1] = 0x20; /* Read Retry Count */
1174 }
1175 break;
1176
1177 case MODE_PAGE_AUDIO_CTL:
1178 length = 14;
1179 break;
1180
1181 case MODE_PAGE_CAPABILITIES:
1182 length = 0x14;
1183 if (page_control == 1) { /* Changeable Values */
1184 break;
1185 }
1186
1187 p[0] = 0x3b; /* CD-R & CD-RW read */
1188 p[1] = 0; /* Writing not supported */
1189 p[2] = 0x7f; /* Audio, composite, digital out,
1190 mode 2 form 1&2, multi session */
1191 p[3] = 0xff; /* CD DA, DA accurate, RW supported,
1192 RW corrected, C2 errors, ISRC,
1193 UPC, Bar code */
1194 p[4] = 0x2d | (s->tray_locked ? 2 : 0);
1195 /* Locking supported, jumper present, eject, tray */
1196 p[5] = 0; /* no volume & mute control, no
1197 changer */
1198 p[6] = (50 * 176) >> 8; /* 50x read speed */
1199 p[7] = (50 * 176) & 0xff;
1200 p[8] = 2 >> 8; /* Two volume levels */
1201 p[9] = 2 & 0xff;
1202 p[10] = 2048 >> 8; /* 2M buffer */
1203 p[11] = 2048 & 0xff;
1204 p[12] = (16 * 176) >> 8; /* 16x read speed current */
1205 p[13] = (16 * 176) & 0xff;
1206 p[16] = (16 * 176) >> 8; /* 16x write speed */
1207 p[17] = (16 * 176) & 0xff;
1208 p[18] = (16 * 176) >> 8; /* 16x write speed current */
1209 p[19] = (16 * 176) & 0xff;
1210 break;
1211
1212 default:
1213 return -1;
1214 }
1215
1216 assert(length < 256);
1217 (*p_outbuf)[0] = page;
1218 (*p_outbuf)[1] = length;
1219 *p_outbuf += length + 2;
1220 return length + 2;
1221 }
1222
1223 static int scsi_disk_emulate_mode_sense(SCSIDiskReq *r, uint8_t *outbuf)
1224 {
1225 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
1226 uint64_t nb_sectors;
1227 bool dbd;
1228 int page, buflen, ret, page_control;
1229 uint8_t *p;
1230 uint8_t dev_specific_param;
1231
1232 dbd = (r->req.cmd.buf[1] & 0x8) != 0;
1233 page = r->req.cmd.buf[2] & 0x3f;
1234 page_control = (r->req.cmd.buf[2] & 0xc0) >> 6;
1235 DPRINTF("Mode Sense(%d) (page %d, xfer %zd, page_control %d)\n",
1236 (r->req.cmd.buf[0] == MODE_SENSE) ? 6 : 10, page, r->req.cmd.xfer, page_control);
1237 memset(outbuf, 0, r->req.cmd.xfer);
1238 p = outbuf;
1239
1240 if (s->qdev.type == TYPE_DISK) {
1241 dev_specific_param = s->features & (1 << SCSI_DISK_F_DPOFUA) ? 0x10 : 0;
1242 if (blk_is_read_only(s->qdev.conf.blk)) {
1243 dev_specific_param |= 0x80; /* Readonly. */
1244 }
1245 } else {
1246 /* MMC prescribes that CD/DVD drives have no block descriptors,
1247 * and defines no device-specific parameter. */
1248 dev_specific_param = 0x00;
1249 dbd = true;
1250 }
1251
1252 if (r->req.cmd.buf[0] == MODE_SENSE) {
1253 p[1] = 0; /* Default media type. */
1254 p[2] = dev_specific_param;
1255 p[3] = 0; /* Block descriptor length. */
1256 p += 4;
1257 } else { /* MODE_SENSE_10 */
1258 p[2] = 0; /* Default media type. */
1259 p[3] = dev_specific_param;
1260 p[6] = p[7] = 0; /* Block descriptor length. */
1261 p += 8;
1262 }
1263
1264 blk_get_geometry(s->qdev.conf.blk, &nb_sectors);
1265 if (!dbd && nb_sectors) {
1266 if (r->req.cmd.buf[0] == MODE_SENSE) {
1267 outbuf[3] = 8; /* Block descriptor length */
1268 } else { /* MODE_SENSE_10 */
1269 outbuf[7] = 8; /* Block descriptor length */
1270 }
1271 nb_sectors /= (s->qdev.blocksize / 512);
1272 if (nb_sectors > 0xffffff) {
1273 nb_sectors = 0;
1274 }
1275 p[0] = 0; /* media density code */
1276 p[1] = (nb_sectors >> 16) & 0xff;
1277 p[2] = (nb_sectors >> 8) & 0xff;
1278 p[3] = nb_sectors & 0xff;
1279 p[4] = 0; /* reserved */
1280 p[5] = 0; /* bytes 5-7 are the sector size in bytes */
1281 p[6] = s->qdev.blocksize >> 8;
1282 p[7] = 0;
1283 p += 8;
1284 }
1285
1286 if (page_control == 3) {
1287 /* Saved Values */
1288 scsi_check_condition(r, SENSE_CODE(SAVING_PARAMS_NOT_SUPPORTED));
1289 return -1;
1290 }
1291
1292 if (page == 0x3f) {
1293 for (page = 0; page <= 0x3e; page++) {
1294 mode_sense_page(s, page, &p, page_control);
1295 }
1296 } else {
1297 ret = mode_sense_page(s, page, &p, page_control);
1298 if (ret == -1) {
1299 return -1;
1300 }
1301 }
1302
1303 buflen = p - outbuf;
1304 /*
1305 * The mode data length field specifies the length in bytes of the
1306 * following data that is available to be transferred. The mode data
1307 * length does not include itself.
1308 */
1309 if (r->req.cmd.buf[0] == MODE_SENSE) {
1310 outbuf[0] = buflen - 1;
1311 } else { /* MODE_SENSE_10 */
1312 outbuf[0] = ((buflen - 2) >> 8) & 0xff;
1313 outbuf[1] = (buflen - 2) & 0xff;
1314 }
1315 return buflen;
1316 }
1317
1318 static int scsi_disk_emulate_read_toc(SCSIRequest *req, uint8_t *outbuf)
1319 {
1320 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev);
1321 int start_track, format, msf, toclen;
1322 uint64_t nb_sectors;
1323
1324 msf = req->cmd.buf[1] & 2;
1325 format = req->cmd.buf[2] & 0xf;
1326 start_track = req->cmd.buf[6];
1327 blk_get_geometry(s->qdev.conf.blk, &nb_sectors);
1328 DPRINTF("Read TOC (track %d format %d msf %d)\n", start_track, format, msf >> 1);
1329 nb_sectors /= s->qdev.blocksize / 512;
1330 switch (format) {
1331 case 0:
1332 toclen = cdrom_read_toc(nb_sectors, outbuf, msf, start_track);
1333 break;
1334 case 1:
1335 /* multi session : only a single session defined */
1336 toclen = 12;
1337 memset(outbuf, 0, 12);
1338 outbuf[1] = 0x0a;
1339 outbuf[2] = 0x01;
1340 outbuf[3] = 0x01;
1341 break;
1342 case 2:
1343 toclen = cdrom_read_toc_raw(nb_sectors, outbuf, msf, start_track);
1344 break;
1345 default:
1346 return -1;
1347 }
1348 return toclen;
1349 }
1350
1351 static int scsi_disk_emulate_start_stop(SCSIDiskReq *r)
1352 {
1353 SCSIRequest *req = &r->req;
1354 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev);
1355 bool start = req->cmd.buf[4] & 1;
1356 bool loej = req->cmd.buf[4] & 2; /* load on start, eject on !start */
1357 int pwrcnd = req->cmd.buf[4] & 0xf0;
1358
1359 if (pwrcnd) {
1360 /* eject/load only happens for power condition == 0 */
1361 return 0;
1362 }
1363
1364 if ((s->features & (1 << SCSI_DISK_F_REMOVABLE)) && loej) {
1365 if (!start && !s->tray_open && s->tray_locked) {
1366 scsi_check_condition(r,
1367 blk_is_inserted(s->qdev.conf.blk)
1368 ? SENSE_CODE(ILLEGAL_REQ_REMOVAL_PREVENTED)
1369 : SENSE_CODE(NOT_READY_REMOVAL_PREVENTED));
1370 return -1;
1371 }
1372
1373 if (s->tray_open != !start) {
1374 blk_eject(s->qdev.conf.blk, !start);
1375 s->tray_open = !start;
1376 }
1377 }
1378 return 0;
1379 }
1380
1381 static void scsi_disk_emulate_read_data(SCSIRequest *req)
1382 {
1383 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req);
1384 int buflen = r->iov.iov_len;
1385
1386 if (buflen) {
1387 DPRINTF("Read buf_len=%d\n", buflen);
1388 r->iov.iov_len = 0;
1389 r->started = true;
1390 scsi_req_data(&r->req, buflen);
1391 return;
1392 }
1393
1394 /* This also clears the sense buffer for REQUEST SENSE. */
1395 scsi_req_complete(&r->req, GOOD);
1396 }
1397
1398 static int scsi_disk_check_mode_select(SCSIDiskState *s, int page,
1399 uint8_t *inbuf, int inlen)
1400 {
1401 uint8_t mode_current[SCSI_MAX_MODE_LEN];
1402 uint8_t mode_changeable[SCSI_MAX_MODE_LEN];
1403 uint8_t *p;
1404 int len, expected_len, changeable_len, i;
1405
1406 /* The input buffer does not include the page header, so it is
1407 * off by 2 bytes.
1408 */
1409 expected_len = inlen + 2;
1410 if (expected_len > SCSI_MAX_MODE_LEN) {
1411 return -1;
1412 }
1413
1414 p = mode_current;
1415 memset(mode_current, 0, inlen + 2);
1416 len = mode_sense_page(s, page, &p, 0);
1417 if (len < 0 || len != expected_len) {
1418 return -1;
1419 }
1420
1421 p = mode_changeable;
1422 memset(mode_changeable, 0, inlen + 2);
1423 changeable_len = mode_sense_page(s, page, &p, 1);
1424 assert(changeable_len == len);
1425
1426 /* Check that unchangeable bits are the same as what MODE SENSE
1427 * would return.
1428 */
1429 for (i = 2; i < len; i++) {
1430 if (((mode_current[i] ^ inbuf[i - 2]) & ~mode_changeable[i]) != 0) {
1431 return -1;
1432 }
1433 }
1434 return 0;
1435 }
1436
1437 static void scsi_disk_apply_mode_select(SCSIDiskState *s, int page, uint8_t *p)
1438 {
1439 switch (page) {
1440 case MODE_PAGE_CACHING:
1441 blk_set_enable_write_cache(s->qdev.conf.blk, (p[0] & 4) != 0);
1442 break;
1443
1444 default:
1445 break;
1446 }
1447 }
1448
1449 static int mode_select_pages(SCSIDiskReq *r, uint8_t *p, int len, bool change)
1450 {
1451 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
1452
1453 while (len > 0) {
1454 int page, subpage, page_len;
1455
1456 /* Parse both possible formats for the mode page headers. */
1457 page = p[0] & 0x3f;
1458 if (p[0] & 0x40) {
1459 if (len < 4) {
1460 goto invalid_param_len;
1461 }
1462 subpage = p[1];
1463 page_len = lduw_be_p(&p[2]);
1464 p += 4;
1465 len -= 4;
1466 } else {
1467 if (len < 2) {
1468 goto invalid_param_len;
1469 }
1470 subpage = 0;
1471 page_len = p[1];
1472 p += 2;
1473 len -= 2;
1474 }
1475
1476 if (subpage) {
1477 goto invalid_param;
1478 }
1479 if (page_len > len) {
1480 goto invalid_param_len;
1481 }
1482
1483 if (!change) {
1484 if (scsi_disk_check_mode_select(s, page, p, page_len) < 0) {
1485 goto invalid_param;
1486 }
1487 } else {
1488 scsi_disk_apply_mode_select(s, page, p);
1489 }
1490
1491 p += page_len;
1492 len -= page_len;
1493 }
1494 return 0;
1495
1496 invalid_param:
1497 scsi_check_condition(r, SENSE_CODE(INVALID_PARAM));
1498 return -1;
1499
1500 invalid_param_len:
1501 scsi_check_condition(r, SENSE_CODE(INVALID_PARAM_LEN));
1502 return -1;
1503 }
1504
1505 static void scsi_disk_emulate_mode_select(SCSIDiskReq *r, uint8_t *inbuf)
1506 {
1507 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
1508 uint8_t *p = inbuf;
1509 int cmd = r->req.cmd.buf[0];
1510 int len = r->req.cmd.xfer;
1511 int hdr_len = (cmd == MODE_SELECT ? 4 : 8);
1512 int bd_len;
1513 int pass;
1514
1515 /* We only support PF=1, SP=0. */
1516 if ((r->req.cmd.buf[1] & 0x11) != 0x10) {
1517 goto invalid_field;
1518 }
1519
1520 if (len < hdr_len) {
1521 goto invalid_param_len;
1522 }
1523
1524 bd_len = (cmd == MODE_SELECT ? p[3] : lduw_be_p(&p[6]));
1525 len -= hdr_len;
1526 p += hdr_len;
1527 if (len < bd_len) {
1528 goto invalid_param_len;
1529 }
1530 if (bd_len != 0 && bd_len != 8) {
1531 goto invalid_param;
1532 }
1533
1534 len -= bd_len;
1535 p += bd_len;
1536
1537 /* Ensure no change is made if there is an error! */
1538 for (pass = 0; pass < 2; pass++) {
1539 if (mode_select_pages(r, p, len, pass == 1) < 0) {
1540 assert(pass == 0);
1541 return;
1542 }
1543 }
1544 if (!blk_enable_write_cache(s->qdev.conf.blk)) {
1545 /* The request is used as the AIO opaque value, so add a ref. */
1546 scsi_req_ref(&r->req);
1547 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 0,
1548 BLOCK_ACCT_FLUSH);
1549 r->req.aiocb = blk_aio_flush(s->qdev.conf.blk, scsi_aio_complete, r);
1550 return;
1551 }
1552
1553 scsi_req_complete(&r->req, GOOD);
1554 return;
1555
1556 invalid_param:
1557 scsi_check_condition(r, SENSE_CODE(INVALID_PARAM));
1558 return;
1559
1560 invalid_param_len:
1561 scsi_check_condition(r, SENSE_CODE(INVALID_PARAM_LEN));
1562 return;
1563
1564 invalid_field:
1565 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD));
1566 }
1567
1568 static inline bool check_lba_range(SCSIDiskState *s,
1569 uint64_t sector_num, uint32_t nb_sectors)
1570 {
1571 /*
1572 * The first line tests that no overflow happens when computing the last
1573 * sector. The second line tests that the last accessed sector is in
1574 * range.
1575 *
1576 * Careful, the computations should not underflow for nb_sectors == 0,
1577 * and a 0-block read to the first LBA beyond the end of device is
1578 * valid.
1579 */
1580 return (sector_num <= sector_num + nb_sectors &&
1581 sector_num + nb_sectors <= s->qdev.max_lba + 1);
1582 }
1583
1584 typedef struct UnmapCBData {
1585 SCSIDiskReq *r;
1586 uint8_t *inbuf;
1587 int count;
1588 } UnmapCBData;
1589
1590 static void scsi_unmap_complete(void *opaque, int ret);
1591
1592 static void scsi_unmap_complete_noio(UnmapCBData *data, int ret)
1593 {
1594 SCSIDiskReq *r = data->r;
1595 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
1596 uint64_t sector_num;
1597 uint32_t nb_sectors;
1598
1599 assert(r->req.aiocb == NULL);
1600 if (scsi_disk_req_check_error(r, ret, false)) {
1601 goto done;
1602 }
1603
1604 if (data->count > 0) {
1605 sector_num = ldq_be_p(&data->inbuf[0]);
1606 nb_sectors = ldl_be_p(&data->inbuf[8]) & 0xffffffffULL;
1607 if (!check_lba_range(s, sector_num, nb_sectors)) {
1608 scsi_check_condition(r, SENSE_CODE(LBA_OUT_OF_RANGE));
1609 goto done;
1610 }
1611
1612 r->req.aiocb = blk_aio_discard(s->qdev.conf.blk,
1613 sector_num * (s->qdev.blocksize / 512),
1614 nb_sectors * (s->qdev.blocksize / 512),
1615 scsi_unmap_complete, data);
1616 data->count--;
1617 data->inbuf += 16;
1618 return;
1619 }
1620
1621 scsi_req_complete(&r->req, GOOD);
1622
1623 done:
1624 scsi_req_unref(&r->req);
1625 g_free(data);
1626 }
1627
1628 static void scsi_unmap_complete(void *opaque, int ret)
1629 {
1630 UnmapCBData *data = opaque;
1631 SCSIDiskReq *r = data->r;
1632
1633 assert(r->req.aiocb != NULL);
1634 r->req.aiocb = NULL;
1635
1636 scsi_unmap_complete_noio(data, ret);
1637 }
1638
1639 static void scsi_disk_emulate_unmap(SCSIDiskReq *r, uint8_t *inbuf)
1640 {
1641 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
1642 uint8_t *p = inbuf;
1643 int len = r->req.cmd.xfer;
1644 UnmapCBData *data;
1645
1646 /* Reject ANCHOR=1. */
1647 if (r->req.cmd.buf[1] & 0x1) {
1648 goto invalid_field;
1649 }
1650
1651 if (len < 8) {
1652 goto invalid_param_len;
1653 }
1654 if (len < lduw_be_p(&p[0]) + 2) {
1655 goto invalid_param_len;
1656 }
1657 if (len < lduw_be_p(&p[2]) + 8) {
1658 goto invalid_param_len;
1659 }
1660 if (lduw_be_p(&p[2]) & 15) {
1661 goto invalid_param_len;
1662 }
1663
1664 if (blk_is_read_only(s->qdev.conf.blk)) {
1665 scsi_check_condition(r, SENSE_CODE(WRITE_PROTECTED));
1666 return;
1667 }
1668
1669 data = g_new0(UnmapCBData, 1);
1670 data->r = r;
1671 data->inbuf = &p[8];
1672 data->count = lduw_be_p(&p[2]) >> 4;
1673
1674 /* The matching unref is in scsi_unmap_complete, before data is freed. */
1675 scsi_req_ref(&r->req);
1676 scsi_unmap_complete_noio(data, 0);
1677 return;
1678
1679 invalid_param_len:
1680 scsi_check_condition(r, SENSE_CODE(INVALID_PARAM_LEN));
1681 return;
1682
1683 invalid_field:
1684 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD));
1685 }
1686
1687 typedef struct WriteSameCBData {
1688 SCSIDiskReq *r;
1689 int64_t sector;
1690 int nb_sectors;
1691 QEMUIOVector qiov;
1692 struct iovec iov;
1693 } WriteSameCBData;
1694
1695 static void scsi_write_same_complete(void *opaque, int ret)
1696 {
1697 WriteSameCBData *data = opaque;
1698 SCSIDiskReq *r = data->r;
1699 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
1700
1701 assert(r->req.aiocb != NULL);
1702 r->req.aiocb = NULL;
1703 if (scsi_disk_req_check_error(r, ret, true)) {
1704 goto done;
1705 }
1706
1707 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct);
1708
1709 data->nb_sectors -= data->iov.iov_len / 512;
1710 data->sector += data->iov.iov_len / 512;
1711 data->iov.iov_len = MIN(data->nb_sectors * 512, data->iov.iov_len);
1712 if (data->iov.iov_len) {
1713 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct,
1714 data->iov.iov_len, BLOCK_ACCT_WRITE);
1715 /* Reinitialize qiov, to handle unaligned WRITE SAME request
1716 * where final qiov may need smaller size */
1717 qemu_iovec_init_external(&data->qiov, &data->iov, 1);
1718 r->req.aiocb = blk_aio_pwritev(s->qdev.conf.blk,
1719 data->sector << BDRV_SECTOR_BITS,
1720 &data->qiov, 0,
1721 scsi_write_same_complete, data);
1722 return;
1723 }
1724
1725 scsi_req_complete(&r->req, GOOD);
1726
1727 done:
1728 scsi_req_unref(&r->req);
1729 qemu_vfree(data->iov.iov_base);
1730 g_free(data);
1731 }
1732
1733 static void scsi_disk_emulate_write_same(SCSIDiskReq *r, uint8_t *inbuf)
1734 {
1735 SCSIRequest *req = &r->req;
1736 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev);
1737 uint32_t nb_sectors = scsi_data_cdb_xfer(r->req.cmd.buf);
1738 WriteSameCBData *data;
1739 uint8_t *buf;
1740 int i;
1741
1742 /* Fail if PBDATA=1 or LBDATA=1 or ANCHOR=1. */
1743 if (nb_sectors == 0 || (req->cmd.buf[1] & 0x16)) {
1744 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD));
1745 return;
1746 }
1747
1748 if (blk_is_read_only(s->qdev.conf.blk)) {
1749 scsi_check_condition(r, SENSE_CODE(WRITE_PROTECTED));
1750 return;
1751 }
1752 if (!check_lba_range(s, r->req.cmd.lba, nb_sectors)) {
1753 scsi_check_condition(r, SENSE_CODE(LBA_OUT_OF_RANGE));
1754 return;
1755 }
1756
1757 if (buffer_is_zero(inbuf, s->qdev.blocksize)) {
1758 int flags = (req->cmd.buf[1] & 0x8) ? BDRV_REQ_MAY_UNMAP : 0;
1759
1760 /* The request is used as the AIO opaque value, so add a ref. */
1761 scsi_req_ref(&r->req);
1762 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct,
1763 nb_sectors * s->qdev.blocksize,
1764 BLOCK_ACCT_WRITE);
1765 r->req.aiocb = blk_aio_pwrite_zeroes(s->qdev.conf.blk,
1766 r->req.cmd.lba * s->qdev.blocksize,
1767 nb_sectors * s->qdev.blocksize,
1768 flags, scsi_aio_complete, r);
1769 return;
1770 }
1771
1772 data = g_new0(WriteSameCBData, 1);
1773 data->r = r;
1774 data->sector = r->req.cmd.lba * (s->qdev.blocksize / 512);
1775 data->nb_sectors = nb_sectors * (s->qdev.blocksize / 512);
1776 data->iov.iov_len = MIN(data->nb_sectors * 512, SCSI_WRITE_SAME_MAX);
1777 data->iov.iov_base = buf = blk_blockalign(s->qdev.conf.blk,
1778 data->iov.iov_len);
1779 qemu_iovec_init_external(&data->qiov, &data->iov, 1);
1780
1781 for (i = 0; i < data->iov.iov_len; i += s->qdev.blocksize) {
1782 memcpy(&buf[i], inbuf, s->qdev.blocksize);
1783 }
1784
1785 scsi_req_ref(&r->req);
1786 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct,
1787 data->iov.iov_len, BLOCK_ACCT_WRITE);
1788 r->req.aiocb = blk_aio_pwritev(s->qdev.conf.blk,
1789 data->sector << BDRV_SECTOR_BITS,
1790 &data->qiov, 0,
1791 scsi_write_same_complete, data);
1792 }
1793
1794 static void scsi_disk_emulate_write_data(SCSIRequest *req)
1795 {
1796 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req);
1797
1798 if (r->iov.iov_len) {
1799 int buflen = r->iov.iov_len;
1800 DPRINTF("Write buf_len=%d\n", buflen);
1801 r->iov.iov_len = 0;
1802 scsi_req_data(&r->req, buflen);
1803 return;
1804 }
1805
1806 switch (req->cmd.buf[0]) {
1807 case MODE_SELECT:
1808 case MODE_SELECT_10:
1809 /* This also clears the sense buffer for REQUEST SENSE. */
1810 scsi_disk_emulate_mode_select(r, r->iov.iov_base);
1811 break;
1812
1813 case UNMAP:
1814 scsi_disk_emulate_unmap(r, r->iov.iov_base);
1815 break;
1816
1817 case VERIFY_10:
1818 case VERIFY_12:
1819 case VERIFY_16:
1820 if (r->req.status == -1) {
1821 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD));
1822 }
1823 break;
1824
1825 case WRITE_SAME_10:
1826 case WRITE_SAME_16:
1827 scsi_disk_emulate_write_same(r, r->iov.iov_base);
1828 break;
1829
1830 default:
1831 abort();
1832 }
1833 }
1834
1835 static int32_t scsi_disk_emulate_command(SCSIRequest *req, uint8_t *buf)
1836 {
1837 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req);
1838 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev);
1839 uint64_t nb_sectors;
1840 uint8_t *outbuf;
1841 int buflen;
1842
1843 switch (req->cmd.buf[0]) {
1844 case INQUIRY:
1845 case MODE_SENSE:
1846 case MODE_SENSE_10:
1847 case RESERVE:
1848 case RESERVE_10:
1849 case RELEASE:
1850 case RELEASE_10:
1851 case START_STOP:
1852 case ALLOW_MEDIUM_REMOVAL:
1853 case GET_CONFIGURATION:
1854 case GET_EVENT_STATUS_NOTIFICATION:
1855 case MECHANISM_STATUS:
1856 case REQUEST_SENSE:
1857 break;
1858
1859 default:
1860 if (s->tray_open || !blk_is_inserted(s->qdev.conf.blk)) {
1861 scsi_check_condition(r, SENSE_CODE(NO_MEDIUM));
1862 return 0;
1863 }
1864 break;
1865 }
1866
1867 /*
1868 * FIXME: we shouldn't return anything bigger than 4k, but the code
1869 * requires the buffer to be as big as req->cmd.xfer in several
1870 * places. So, do not allow CDBs with a very large ALLOCATION
1871 * LENGTH. The real fix would be to modify scsi_read_data and
1872 * dma_buf_read, so that they return data beyond the buflen
1873 * as all zeros.
1874 */
1875 if (req->cmd.xfer > 65536) {
1876 goto illegal_request;
1877 }
1878 r->buflen = MAX(4096, req->cmd.xfer);
1879
1880 if (!r->iov.iov_base) {
1881 r->iov.iov_base = blk_blockalign(s->qdev.conf.blk, r->buflen);
1882 }
1883
1884 buflen = req->cmd.xfer;
1885 outbuf = r->iov.iov_base;
1886 memset(outbuf, 0, r->buflen);
1887 switch (req->cmd.buf[0]) {
1888 case TEST_UNIT_READY:
1889 assert(!s->tray_open && blk_is_inserted(s->qdev.conf.blk));
1890 break;
1891 case INQUIRY:
1892 buflen = scsi_disk_emulate_inquiry(req, outbuf);
1893 if (buflen < 0) {
1894 goto illegal_request;
1895 }
1896 break;
1897 case MODE_SENSE:
1898 case MODE_SENSE_10:
1899 buflen = scsi_disk_emulate_mode_sense(r, outbuf);
1900 if (buflen < 0) {
1901 goto illegal_request;
1902 }
1903 break;
1904 case READ_TOC:
1905 buflen = scsi_disk_emulate_read_toc(req, outbuf);
1906 if (buflen < 0) {
1907 goto illegal_request;
1908 }
1909 break;
1910 case RESERVE:
1911 if (req->cmd.buf[1] & 1) {
1912 goto illegal_request;
1913 }
1914 break;
1915 case RESERVE_10:
1916 if (req->cmd.buf[1] & 3) {
1917 goto illegal_request;
1918 }
1919 break;
1920 case RELEASE:
1921 if (req->cmd.buf[1] & 1) {
1922 goto illegal_request;
1923 }
1924 break;
1925 case RELEASE_10:
1926 if (req->cmd.buf[1] & 3) {
1927 goto illegal_request;
1928 }
1929 break;
1930 case START_STOP:
1931 if (scsi_disk_emulate_start_stop(r) < 0) {
1932 return 0;
1933 }
1934 break;
1935 case ALLOW_MEDIUM_REMOVAL:
1936 s->tray_locked = req->cmd.buf[4] & 1;
1937 blk_lock_medium(s->qdev.conf.blk, req->cmd.buf[4] & 1);
1938 break;
1939 case READ_CAPACITY_10:
1940 /* The normal LEN field for this command is zero. */
1941 memset(outbuf, 0, 8);
1942 blk_get_geometry(s->qdev.conf.blk, &nb_sectors);
1943 if (!nb_sectors) {
1944 scsi_check_condition(r, SENSE_CODE(LUN_NOT_READY));
1945 return 0;
1946 }
1947 if ((req->cmd.buf[8] & 1) == 0 && req->cmd.lba) {
1948 goto illegal_request;
1949 }
1950 nb_sectors /= s->qdev.blocksize / 512;
1951 /* Returned value is the address of the last sector. */
1952 nb_sectors--;
1953 /* Remember the new size for read/write sanity checking. */
1954 s->qdev.max_lba = nb_sectors;
1955 /* Clip to 2TB, instead of returning capacity modulo 2TB. */
1956 if (nb_sectors > UINT32_MAX) {
1957 nb_sectors = UINT32_MAX;
1958 }
1959 outbuf[0] = (nb_sectors >> 24) & 0xff;
1960 outbuf[1] = (nb_sectors >> 16) & 0xff;
1961 outbuf[2] = (nb_sectors >> 8) & 0xff;
1962 outbuf[3] = nb_sectors & 0xff;
1963 outbuf[4] = 0;
1964 outbuf[5] = 0;
1965 outbuf[6] = s->qdev.blocksize >> 8;
1966 outbuf[7] = 0;
1967 break;
1968 case REQUEST_SENSE:
1969 /* Just return "NO SENSE". */
1970 buflen = scsi_build_sense(NULL, 0, outbuf, r->buflen,
1971 (req->cmd.buf[1] & 1) == 0);
1972 if (buflen < 0) {
1973 goto illegal_request;
1974 }
1975 break;
1976 case MECHANISM_STATUS:
1977 buflen = scsi_emulate_mechanism_status(s, outbuf);
1978 if (buflen < 0) {
1979 goto illegal_request;
1980 }
1981 break;
1982 case GET_CONFIGURATION:
1983 buflen = scsi_get_configuration(s, outbuf);
1984 if (buflen < 0) {
1985 goto illegal_request;
1986 }
1987 break;
1988 case GET_EVENT_STATUS_NOTIFICATION:
1989 buflen = scsi_get_event_status_notification(s, r, outbuf);
1990 if (buflen < 0) {
1991 goto illegal_request;
1992 }
1993 break;
1994 case READ_DISC_INFORMATION:
1995 buflen = scsi_read_disc_information(s, r, outbuf);
1996 if (buflen < 0) {
1997 goto illegal_request;
1998 }
1999 break;
2000 case READ_DVD_STRUCTURE:
2001 buflen = scsi_read_dvd_structure(s, r, outbuf);
2002 if (buflen < 0) {
2003 goto illegal_request;
2004 }
2005 break;
2006 case SERVICE_ACTION_IN_16:
2007 /* Service Action In subcommands. */
2008 if ((req->cmd.buf[1] & 31) == SAI_READ_CAPACITY_16) {
2009 DPRINTF("SAI READ CAPACITY(16)\n");
2010 memset(outbuf, 0, req->cmd.xfer);
2011 blk_get_geometry(s->qdev.conf.blk, &nb_sectors);
2012 if (!nb_sectors) {
2013 scsi_check_condition(r, SENSE_CODE(LUN_NOT_READY));
2014 return 0;
2015 }
2016 if ((req->cmd.buf[14] & 1) == 0 && req->cmd.lba) {
2017 goto illegal_request;
2018 }
2019 nb_sectors /= s->qdev.blocksize / 512;
2020 /* Returned value is the address of the last sector. */
2021 nb_sectors--;
2022 /* Remember the new size for read/write sanity checking. */
2023 s->qdev.max_lba = nb_sectors;
2024 outbuf[0] = (nb_sectors >> 56) & 0xff;
2025 outbuf[1] = (nb_sectors >> 48) & 0xff;
2026 outbuf[2] = (nb_sectors >> 40) & 0xff;
2027 outbuf[3] = (nb_sectors >> 32) & 0xff;
2028 outbuf[4] = (nb_sectors >> 24) & 0xff;
2029 outbuf[5] = (nb_sectors >> 16) & 0xff;
2030 outbuf[6] = (nb_sectors >> 8) & 0xff;
2031 outbuf[7] = nb_sectors & 0xff;
2032 outbuf[8] = 0;
2033 outbuf[9] = 0;
2034 outbuf[10] = s->qdev.blocksize >> 8;
2035 outbuf[11] = 0;
2036 outbuf[12] = 0;
2037 outbuf[13] = get_physical_block_exp(&s->qdev.conf);
2038
2039 /* set TPE bit if the format supports discard */
2040 if (s->qdev.conf.discard_granularity) {
2041 outbuf[14] = 0x80;
2042 }
2043
2044 /* Protection, exponent and lowest lba field left blank. */
2045 break;
2046 }
2047 DPRINTF("Unsupported Service Action In\n");
2048 goto illegal_request;
2049 case SYNCHRONIZE_CACHE:
2050 /* The request is used as the AIO opaque value, so add a ref. */
2051 scsi_req_ref(&r->req);
2052 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 0,
2053 BLOCK_ACCT_FLUSH);
2054 r->req.aiocb = blk_aio_flush(s->qdev.conf.blk, scsi_aio_complete, r);
2055 return 0;
2056 case SEEK_10:
2057 DPRINTF("Seek(10) (sector %" PRId64 ")\n", r->req.cmd.lba);
2058 if (r->req.cmd.lba > s->qdev.max_lba) {
2059 goto illegal_lba;
2060 }
2061 break;
2062 case MODE_SELECT:
2063 DPRINTF("Mode Select(6) (len %lu)\n", (unsigned long)r->req.cmd.xfer);
2064 break;
2065 case MODE_SELECT_10:
2066 DPRINTF("Mode Select(10) (len %lu)\n", (unsigned long)r->req.cmd.xfer);
2067 break;
2068 case UNMAP:
2069 DPRINTF("Unmap (len %lu)\n", (unsigned long)r->req.cmd.xfer);
2070 break;
2071 case VERIFY_10:
2072 case VERIFY_12:
2073 case VERIFY_16:
2074 DPRINTF("Verify (bytchk %d)\n", (req->cmd.buf[1] >> 1) & 3);
2075 if (req->cmd.buf[1] & 6) {
2076 goto illegal_request;
2077 }
2078 break;
2079 case WRITE_SAME_10:
2080 case WRITE_SAME_16:
2081 DPRINTF("WRITE SAME %d (len %lu)\n",
2082 req->cmd.buf[0] == WRITE_SAME_10 ? 10 : 16,
2083 (unsigned long)r->req.cmd.xfer);
2084 break;
2085 default:
2086 DPRINTF("Unknown SCSI command (%2.2x=%s)\n", buf[0],
2087 scsi_command_name(buf[0]));
2088 scsi_check_condition(r, SENSE_CODE(INVALID_OPCODE));
2089 return 0;
2090 }
2091 assert(!r->req.aiocb);
2092 r->iov.iov_len = MIN(r->buflen, req->cmd.xfer);
2093 if (r->iov.iov_len == 0) {
2094 scsi_req_complete(&r->req, GOOD);
2095 }
2096 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) {
2097 assert(r->iov.iov_len == req->cmd.xfer);
2098 return -r->iov.iov_len;
2099 } else {
2100 return r->iov.iov_len;
2101 }
2102
2103 illegal_request:
2104 if (r->req.status == -1) {
2105 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD));
2106 }
2107 return 0;
2108
2109 illegal_lba:
2110 scsi_check_condition(r, SENSE_CODE(LBA_OUT_OF_RANGE));
2111 return 0;
2112 }
2113
2114 /* Execute a scsi command. Returns the length of the data expected by the
2115 command. This will be Positive for data transfers from the device
2116 (eg. disk reads), negative for transfers to the device (eg. disk writes),
2117 and zero if the command does not transfer any data. */
2118
2119 static int32_t scsi_disk_dma_command(SCSIRequest *req, uint8_t *buf)
2120 {
2121 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req);
2122 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev);
2123 SCSIDiskClass *sdc = (SCSIDiskClass *) object_get_class(OBJECT(s));
2124 uint32_t len;
2125 uint8_t command;
2126
2127 command = buf[0];
2128
2129 if (s->tray_open || !blk_is_inserted(s->qdev.conf.blk)) {
2130 scsi_check_condition(r, SENSE_CODE(NO_MEDIUM));
2131 return 0;
2132 }
2133
2134 len = scsi_data_cdb_xfer(r->req.cmd.buf);
2135 switch (command) {
2136 case READ_6:
2137 case READ_10:
2138 case READ_12:
2139 case READ_16:
2140 DPRINTF("Read (sector %" PRId64 ", count %u)\n", r->req.cmd.lba, len);
2141 if (r->req.cmd.buf[1] & 0xe0) {
2142 goto illegal_request;
2143 }
2144 if (!check_lba_range(s, r->req.cmd.lba, len)) {
2145 goto illegal_lba;
2146 }
2147 r->sector = r->req.cmd.lba * (s->qdev.blocksize / 512);
2148 r->sector_count = len * (s->qdev.blocksize / 512);
2149 break;
2150 case WRITE_6:
2151 case WRITE_10:
2152 case WRITE_12:
2153 case WRITE_16:
2154 case WRITE_VERIFY_10:
2155 case WRITE_VERIFY_12:
2156 case WRITE_VERIFY_16:
2157 if (blk_is_read_only(s->qdev.conf.blk)) {
2158 scsi_check_condition(r, SENSE_CODE(WRITE_PROTECTED));
2159 return 0;
2160 }
2161 DPRINTF("Write %s(sector %" PRId64 ", count %u)\n",
2162 (command & 0xe) == 0xe ? "And Verify " : "",
2163 r->req.cmd.lba, len);
2164 if (r->req.cmd.buf[1] & 0xe0) {
2165 goto illegal_request;
2166 }
2167 if (!check_lba_range(s, r->req.cmd.lba, len)) {
2168 goto illegal_lba;
2169 }
2170 r->sector = r->req.cmd.lba * (s->qdev.blocksize / 512);
2171 r->sector_count = len * (s->qdev.blocksize / 512);
2172 break;
2173 default:
2174 abort();
2175 illegal_request:
2176 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD));
2177 return 0;
2178 illegal_lba:
2179 scsi_check_condition(r, SENSE_CODE(LBA_OUT_OF_RANGE));
2180 return 0;
2181 }
2182 r->need_fua_emulation = sdc->need_fua_emulation(&r->req.cmd);
2183 if (r->sector_count == 0) {
2184 scsi_req_complete(&r->req, GOOD);
2185 }
2186 assert(r->iov.iov_len == 0);
2187 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) {
2188 return -r->sector_count * 512;
2189 } else {
2190 return r->sector_count * 512;
2191 }
2192 }
2193
2194 static void scsi_disk_reset(DeviceState *dev)
2195 {
2196 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev.qdev, dev);
2197 uint64_t nb_sectors;
2198
2199 scsi_device_purge_requests(&s->qdev, SENSE_CODE(RESET));
2200
2201 blk_get_geometry(s->qdev.conf.blk, &nb_sectors);
2202 nb_sectors /= s->qdev.blocksize / 512;
2203 if (nb_sectors) {
2204 nb_sectors--;
2205 }
2206 s->qdev.max_lba = nb_sectors;
2207 /* reset tray statuses */
2208 s->tray_locked = 0;
2209 s->tray_open = 0;
2210 }
2211
2212 static void scsi_disk_resize_cb(void *opaque)
2213 {
2214 SCSIDiskState *s = opaque;
2215
2216 /* SPC lists this sense code as available only for
2217 * direct-access devices.
2218 */
2219 if (s->qdev.type == TYPE_DISK) {
2220 scsi_device_report_change(&s->qdev, SENSE_CODE(CAPACITY_CHANGED));
2221 }
2222 }
2223
2224 static void scsi_cd_change_media_cb(void *opaque, bool load)
2225 {
2226 SCSIDiskState *s = opaque;
2227
2228 /*
2229 * When a CD gets changed, we have to report an ejected state and
2230 * then a loaded state to guests so that they detect tray
2231 * open/close and media change events. Guests that do not use
2232 * GET_EVENT_STATUS_NOTIFICATION to detect such tray open/close
2233 * states rely on this behavior.
2234 *
2235 * media_changed governs the state machine used for unit attention
2236 * report. media_event is used by GET EVENT STATUS NOTIFICATION.
2237 */
2238 s->media_changed = load;
2239 s->tray_open = !load;
2240 scsi_device_set_ua(&s->qdev, SENSE_CODE(UNIT_ATTENTION_NO_MEDIUM));
2241 s->media_event = true;
2242 s->eject_request = false;
2243 }
2244
2245 static void scsi_cd_eject_request_cb(void *opaque, bool force)
2246 {
2247 SCSIDiskState *s = opaque;
2248
2249 s->eject_request = true;
2250 if (force) {
2251 s->tray_locked = false;
2252 }
2253 }
2254
2255 static bool scsi_cd_is_tray_open(void *opaque)
2256 {
2257 return ((SCSIDiskState *)opaque)->tray_open;
2258 }
2259
2260 static bool scsi_cd_is_medium_locked(void *opaque)
2261 {
2262 return ((SCSIDiskState *)opaque)->tray_locked;
2263 }
2264
2265 static const BlockDevOps scsi_disk_removable_block_ops = {
2266 .change_media_cb = scsi_cd_change_media_cb,
2267 .eject_request_cb = scsi_cd_eject_request_cb,
2268 .is_tray_open = scsi_cd_is_tray_open,
2269 .is_medium_locked = scsi_cd_is_medium_locked,
2270
2271 .resize_cb = scsi_disk_resize_cb,
2272 };
2273
2274 static const BlockDevOps scsi_disk_block_ops = {
2275 .resize_cb = scsi_disk_resize_cb,
2276 };
2277
2278 static void scsi_disk_unit_attention_reported(SCSIDevice *dev)
2279 {
2280 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev);
2281 if (s->media_changed) {
2282 s->media_changed = false;
2283 scsi_device_set_ua(&s->qdev, SENSE_CODE(MEDIUM_CHANGED));
2284 }
2285 }
2286
2287 static void scsi_realize(SCSIDevice *dev, Error **errp)
2288 {
2289 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev);
2290 Error *err = NULL;
2291
2292 if (!s->qdev.conf.blk) {
2293 error_setg(errp, "drive property not set");
2294 return;
2295 }
2296
2297 if (!(s->features & (1 << SCSI_DISK_F_REMOVABLE)) &&
2298 !blk_is_inserted(s->qdev.conf.blk)) {
2299 error_setg(errp, "Device needs media, but drive is empty");
2300 return;
2301 }
2302
2303 blkconf_serial(&s->qdev.conf, &s->serial);
2304 blkconf_blocksizes(&s->qdev.conf);
2305 if (dev->type == TYPE_DISK) {
2306 blkconf_geometry(&dev->conf, NULL, 65535, 255, 255, &err);
2307 if (err) {
2308 error_propagate(errp, err);
2309 return;
2310 }
2311 }
2312 blkconf_apply_backend_options(&dev->conf);
2313
2314 if (s->qdev.conf.discard_granularity == -1) {
2315 s->qdev.conf.discard_granularity =
2316 MAX(s->qdev.conf.logical_block_size, DEFAULT_DISCARD_GRANULARITY);
2317 }
2318
2319 if (!s->version) {
2320 s->version = g_strdup(qemu_hw_version());
2321 }
2322 if (!s->vendor) {
2323 s->vendor = g_strdup("QEMU");
2324 }
2325
2326 if (blk_is_sg(s->qdev.conf.blk)) {
2327 error_setg(errp, "unwanted /dev/sg*");
2328 return;
2329 }
2330
2331 if ((s->features & (1 << SCSI_DISK_F_REMOVABLE)) &&
2332 !(s->features & (1 << SCSI_DISK_F_NO_REMOVABLE_DEVOPS))) {
2333 blk_set_dev_ops(s->qdev.conf.blk, &scsi_disk_removable_block_ops, s);
2334 } else {
2335 blk_set_dev_ops(s->qdev.conf.blk, &scsi_disk_block_ops, s);
2336 }
2337 blk_set_guest_block_size(s->qdev.conf.blk, s->qdev.blocksize);
2338
2339 blk_iostatus_enable(s->qdev.conf.blk);
2340 }
2341
2342 static void scsi_hd_realize(SCSIDevice *dev, Error **errp)
2343 {
2344 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev);
2345 /* can happen for devices without drive. The error message for missing
2346 * backend will be issued in scsi_realize
2347 */
2348 if (s->qdev.conf.blk) {
2349 blkconf_blocksizes(&s->qdev.conf);
2350 }
2351 s->qdev.blocksize = s->qdev.conf.logical_block_size;
2352 s->qdev.type = TYPE_DISK;
2353 if (!s->product) {
2354 s->product = g_strdup("QEMU HARDDISK");
2355 }
2356 scsi_realize(&s->qdev, errp);
2357 }
2358
2359 static void scsi_cd_realize(SCSIDevice *dev, Error **errp)
2360 {
2361 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev);
2362 s->qdev.blocksize = 2048;
2363 s->qdev.type = TYPE_ROM;
2364 s->features |= 1 << SCSI_DISK_F_REMOVABLE;
2365 if (!s->product) {
2366 s->product = g_strdup("QEMU CD-ROM");
2367 }
2368 scsi_realize(&s->qdev, errp);
2369 }
2370
2371 static void scsi_disk_realize(SCSIDevice *dev, Error **errp)
2372 {
2373 DriveInfo *dinfo;
2374 Error *local_err = NULL;
2375
2376 if (!dev->conf.blk) {
2377 scsi_realize(dev, &local_err);
2378 assert(local_err);
2379 error_propagate(errp, local_err);
2380 return;
2381 }
2382
2383 dinfo = blk_legacy_dinfo(dev->conf.blk);
2384 if (dinfo && dinfo->media_cd) {
2385 scsi_cd_realize(dev, errp);
2386 } else {
2387 scsi_hd_realize(dev, errp);
2388 }
2389 }
2390
2391 static const SCSIReqOps scsi_disk_emulate_reqops = {
2392 .size = sizeof(SCSIDiskReq),
2393 .free_req = scsi_free_request,
2394 .send_command = scsi_disk_emulate_command,
2395 .read_data = scsi_disk_emulate_read_data,
2396 .write_data = scsi_disk_emulate_write_data,
2397 .get_buf = scsi_get_buf,
2398 };
2399
2400 static const SCSIReqOps scsi_disk_dma_reqops = {
2401 .size = sizeof(SCSIDiskReq),
2402 .free_req = scsi_free_request,
2403 .send_command = scsi_disk_dma_command,
2404 .read_data = scsi_read_data,
2405 .write_data = scsi_write_data,
2406 .get_buf = scsi_get_buf,
2407 .load_request = scsi_disk_load_request,
2408 .save_request = scsi_disk_save_request,
2409 };
2410
2411 static const SCSIReqOps *const scsi_disk_reqops_dispatch[256] = {
2412 [TEST_UNIT_READY] = &scsi_disk_emulate_reqops,
2413 [INQUIRY] = &scsi_disk_emulate_reqops,
2414 [MODE_SENSE] = &scsi_disk_emulate_reqops,
2415 [MODE_SENSE_10] = &scsi_disk_emulate_reqops,
2416 [START_STOP] = &scsi_disk_emulate_reqops,
2417 [ALLOW_MEDIUM_REMOVAL] = &scsi_disk_emulate_reqops,
2418 [READ_CAPACITY_10] = &scsi_disk_emulate_reqops,
2419 [READ_TOC] = &scsi_disk_emulate_reqops,
2420 [READ_DVD_STRUCTURE] = &scsi_disk_emulate_reqops,
2421 [READ_DISC_INFORMATION] = &scsi_disk_emulate_reqops,
2422 [GET_CONFIGURATION] = &scsi_disk_emulate_reqops,
2423 [GET_EVENT_STATUS_NOTIFICATION] = &scsi_disk_emulate_reqops,
2424 [MECHANISM_STATUS] = &scsi_disk_emulate_reqops,
2425 [SERVICE_ACTION_IN_16] = &scsi_disk_emulate_reqops,
2426 [REQUEST_SENSE] = &scsi_disk_emulate_reqops,
2427 [SYNCHRONIZE_CACHE] = &scsi_disk_emulate_reqops,
2428 [SEEK_10] = &scsi_disk_emulate_reqops,
2429 [MODE_SELECT] = &scsi_disk_emulate_reqops,
2430 [MODE_SELECT_10] = &scsi_disk_emulate_reqops,
2431 [UNMAP] = &scsi_disk_emulate_reqops,
2432 [WRITE_SAME_10] = &scsi_disk_emulate_reqops,
2433 [WRITE_SAME_16] = &scsi_disk_emulate_reqops,
2434 [VERIFY_10] = &scsi_disk_emulate_reqops,
2435 [VERIFY_12] = &scsi_disk_emulate_reqops,
2436 [VERIFY_16] = &scsi_disk_emulate_reqops,
2437
2438 [READ_6] = &scsi_disk_dma_reqops,
2439 [READ_10] = &scsi_disk_dma_reqops,
2440 [READ_12] = &scsi_disk_dma_reqops,
2441 [READ_16] = &scsi_disk_dma_reqops,
2442 [WRITE_6] = &scsi_disk_dma_reqops,
2443 [WRITE_10] = &scsi_disk_dma_reqops,
2444 [WRITE_12] = &scsi_disk_dma_reqops,
2445 [WRITE_16] = &scsi_disk_dma_reqops,
2446 [WRITE_VERIFY_10] = &scsi_disk_dma_reqops,
2447 [WRITE_VERIFY_12] = &scsi_disk_dma_reqops,
2448 [WRITE_VERIFY_16] = &scsi_disk_dma_reqops,
2449 };
2450
2451 static SCSIRequest *scsi_new_request(SCSIDevice *d, uint32_t tag, uint32_t lun,
2452 uint8_t *buf, void *hba_private)
2453 {
2454 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, d);
2455 SCSIRequest *req;
2456 const SCSIReqOps *ops;
2457 uint8_t command;
2458
2459 command = buf[0];
2460 ops = scsi_disk_reqops_dispatch[command];
2461 if (!ops) {
2462 ops = &scsi_disk_emulate_reqops;
2463 }
2464 req = scsi_req_alloc(ops, &s->qdev, tag, lun, hba_private);
2465
2466 #ifdef DEBUG_SCSI
2467 DPRINTF("Command: lun=%d tag=0x%x data=0x%02x", lun, tag, buf[0]);
2468 {
2469 int i;
2470 for (i = 1; i < scsi_cdb_length(buf); i++) {
2471 printf(" 0x%02x", buf[i]);
2472 }
2473 printf("\n");
2474 }
2475 #endif
2476
2477 return req;
2478 }
2479
2480 #ifdef __linux__
2481 static int get_device_type(SCSIDiskState *s)
2482 {
2483 uint8_t cmd[16];
2484 uint8_t buf[36];
2485 uint8_t sensebuf[8];
2486 sg_io_hdr_t io_header;
2487 int ret;
2488
2489 memset(cmd, 0, sizeof(cmd));
2490 memset(buf, 0, sizeof(buf));
2491 cmd[0] = INQUIRY;
2492 cmd[4] = sizeof(buf);
2493
2494 memset(&io_header, 0, sizeof(io_header));
2495 io_header.interface_id = 'S';
2496 io_header.dxfer_direction = SG_DXFER_FROM_DEV;
2497 io_header.dxfer_len = sizeof(buf);
2498 io_header.dxferp = buf;
2499 io_header.cmdp = cmd;
2500 io_header.cmd_len = sizeof(cmd);
2501 io_header.mx_sb_len = sizeof(sensebuf);
2502 io_header.sbp = sensebuf;
2503 io_header.timeout = 6000; /* XXX */
2504
2505 ret = blk_ioctl(s->qdev.conf.blk, SG_IO, &io_header);
2506 if (ret < 0 || io_header.driver_status || io_header.host_status) {
2507 return -1;
2508 }
2509 s->qdev.type = buf[0];
2510 if (buf[1] & 0x80) {
2511 s->features |= 1 << SCSI_DISK_F_REMOVABLE;
2512 }
2513 return 0;
2514 }
2515
2516 static void scsi_block_realize(SCSIDevice *dev, Error **errp)
2517 {
2518 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev);
2519 int sg_version;
2520 int rc;
2521
2522 if (!s->qdev.conf.blk) {
2523 error_setg(errp, "drive property not set");
2524 return;
2525 }
2526
2527 /* check we are using a driver managing SG_IO (version 3 and after) */
2528 rc = blk_ioctl(s->qdev.conf.blk, SG_GET_VERSION_NUM, &sg_version);
2529 if (rc < 0) {
2530 error_setg(errp, "cannot get SG_IO version number: %s. "
2531 "Is this a SCSI device?",
2532 strerror(-rc));
2533 return;
2534 }
2535 if (sg_version < 30000) {
2536 error_setg(errp, "scsi generic interface too old");
2537 return;
2538 }
2539
2540 /* get device type from INQUIRY data */
2541 rc = get_device_type(s);
2542 if (rc < 0) {
2543 error_setg(errp, "INQUIRY failed");
2544 return;
2545 }
2546
2547 /* Make a guess for the block size, we'll fix it when the guest sends.
2548 * READ CAPACITY. If they don't, they likely would assume these sizes
2549 * anyway. (TODO: check in /sys).
2550 */
2551 if (s->qdev.type == TYPE_ROM || s->qdev.type == TYPE_WORM) {
2552 s->qdev.blocksize = 2048;
2553 } else {
2554 s->qdev.blocksize = 512;
2555 }
2556
2557 /* Makes the scsi-block device not removable by using HMP and QMP eject
2558 * command.
2559 */
2560 s->features |= (1 << SCSI_DISK_F_NO_REMOVABLE_DEVOPS);
2561
2562 scsi_realize(&s->qdev, errp);
2563 scsi_generic_read_device_identification(&s->qdev);
2564 }
2565
2566 typedef struct SCSIBlockReq {
2567 SCSIDiskReq req;
2568 sg_io_hdr_t io_header;
2569
2570 /* Selected bytes of the original CDB, copied into our own CDB. */
2571 uint8_t cmd, cdb1, group_number;
2572
2573 /* CDB passed to SG_IO. */
2574 uint8_t cdb[16];
2575 } SCSIBlockReq;
2576
2577 static BlockAIOCB *scsi_block_do_sgio(SCSIBlockReq *req,
2578 int64_t offset, QEMUIOVector *iov,
2579 int direction,
2580 BlockCompletionFunc *cb, void *opaque)
2581 {
2582 sg_io_hdr_t *io_header = &req->io_header;
2583 SCSIDiskReq *r = &req->req;
2584 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
2585 int nb_logical_blocks;
2586 uint64_t lba;
2587 BlockAIOCB *aiocb;
2588
2589 /* This is not supported yet. It can only happen if the guest does
2590 * reads and writes that are not aligned to one logical sectors
2591 * _and_ cover multiple MemoryRegions.
2592 */
2593 assert(offset % s->qdev.blocksize == 0);
2594 assert(iov->size % s->qdev.blocksize == 0);
2595
2596 io_header->interface_id = 'S';
2597
2598 /* The data transfer comes from the QEMUIOVector. */
2599 io_header->dxfer_direction = direction;
2600 io_header->dxfer_len = iov->size;
2601 io_header->dxferp = (void *)iov->iov;
2602 io_header->iovec_count = iov->niov;
2603 assert(io_header->iovec_count == iov->niov); /* no overflow! */
2604
2605 /* Build a new CDB with the LBA and length patched in, in case
2606 * DMA helpers split the transfer in multiple segments. Do not
2607 * build a CDB smaller than what the guest wanted, and only build
2608 * a larger one if strictly necessary.
2609 */
2610 io_header->cmdp = req->cdb;
2611 lba = offset / s->qdev.blocksize;
2612 nb_logical_blocks = io_header->dxfer_len / s->qdev.blocksize;
2613
2614 if ((req->cmd >> 5) == 0 && lba <= 0x1ffff) {
2615 /* 6-byte CDB */
2616 stl_be_p(&req->cdb[0], lba | (req->cmd << 24));
2617 req->cdb[4] = nb_logical_blocks;
2618 req->cdb[5] = 0;
2619 io_header->cmd_len = 6;
2620 } else if ((req->cmd >> 5) <= 1 && lba <= 0xffffffffULL) {
2621 /* 10-byte CDB */
2622 req->cdb[0] = (req->cmd & 0x1f) | 0x20;
2623 req->cdb[1] = req->cdb1;
2624 stl_be_p(&req->cdb[2], lba);
2625 req->cdb[6] = req->group_number;
2626 stw_be_p(&req->cdb[7], nb_logical_blocks);
2627 req->cdb[9] = 0;
2628 io_header->cmd_len = 10;
2629 } else if ((req->cmd >> 5) != 4 && lba <= 0xffffffffULL) {
2630 /* 12-byte CDB */
2631 req->cdb[0] = (req->cmd & 0x1f) | 0xA0;
2632 req->cdb[1] = req->cdb1;
2633 stl_be_p(&req->cdb[2], lba);
2634 stl_be_p(&req->cdb[6], nb_logical_blocks);
2635 req->cdb[10] = req->group_number;
2636 req->cdb[11] = 0;
2637 io_header->cmd_len = 12;
2638 } else {
2639 /* 16-byte CDB */
2640 req->cdb[0] = (req->cmd & 0x1f) | 0x80;
2641 req->cdb[1] = req->cdb1;
2642 stq_be_p(&req->cdb[2], lba);
2643 stl_be_p(&req->cdb[10], nb_logical_blocks);
2644 req->cdb[14] = req->group_number;
2645 req->cdb[15] = 0;
2646 io_header->cmd_len = 16;
2647 }
2648
2649 /* The rest is as in scsi-generic.c. */
2650 io_header->mx_sb_len = sizeof(r->req.sense);
2651 io_header->sbp = r->req.sense;
2652 io_header->timeout = UINT_MAX;
2653 io_header->usr_ptr = r;
2654 io_header->flags |= SG_FLAG_DIRECT_IO;
2655
2656 aiocb = blk_aio_ioctl(s->qdev.conf.blk, SG_IO, io_header, cb, opaque);
2657 assert(aiocb != NULL);
2658 return aiocb;
2659 }
2660
2661 static bool scsi_block_no_fua(SCSICommand *cmd)
2662 {
2663 return false;
2664 }
2665
2666 static BlockAIOCB *scsi_block_dma_readv(int64_t offset,
2667 QEMUIOVector *iov,
2668 BlockCompletionFunc *cb, void *cb_opaque,
2669 void *opaque)
2670 {
2671 SCSIBlockReq *r = opaque;
2672 return scsi_block_do_sgio(r, offset, iov,
2673 SG_DXFER_FROM_DEV, cb, cb_opaque);
2674 }
2675
2676 static BlockAIOCB *scsi_block_dma_writev(int64_t offset,
2677 QEMUIOVector *iov,
2678 BlockCompletionFunc *cb, void *cb_opaque,
2679 void *opaque)
2680 {
2681 SCSIBlockReq *r = opaque;
2682 return scsi_block_do_sgio(r, offset, iov,
2683 SG_DXFER_TO_DEV, cb, cb_opaque);
2684 }
2685
2686 static bool scsi_block_is_passthrough(SCSIDiskState *s, uint8_t *buf)
2687 {
2688 switch (buf[0]) {
2689 case VERIFY_10:
2690 case VERIFY_12:
2691 case VERIFY_16:
2692 /* Check if BYTCHK == 0x01 (data-out buffer contains data
2693 * for the number of logical blocks specified in the length
2694 * field). For other modes, do not use scatter/gather operation.
2695 */
2696 if ((buf[1] & 6) != 2) {
2697 return false;
2698 }
2699 break;
2700
2701 case READ_6:
2702 case READ_10:
2703 case READ_12:
2704 case READ_16:
2705 case WRITE_6:
2706 case WRITE_10:
2707 case WRITE_12:
2708 case WRITE_16:
2709 case WRITE_VERIFY_10:
2710 case WRITE_VERIFY_12:
2711 case WRITE_VERIFY_16:
2712 /* MMC writing cannot be done via DMA helpers, because it sometimes
2713 * involves writing beyond the maximum LBA or to negative LBA (lead-in).
2714 * We might use scsi_disk_dma_reqops as long as no writing commands are
2715 * seen, but performance usually isn't paramount on optical media. So,
2716 * just make scsi-block operate the same as scsi-generic for them.
2717 */
2718 if (s->qdev.type != TYPE_ROM) {
2719 return false;
2720 }
2721 break;
2722
2723 default:
2724 break;
2725 }
2726
2727 return true;
2728 }
2729
2730
2731 static int32_t scsi_block_dma_command(SCSIRequest *req, uint8_t *buf)
2732 {
2733 SCSIBlockReq *r = (SCSIBlockReq *)req;
2734 r->cmd = req->cmd.buf[0];
2735 switch (r->cmd >> 5) {
2736 case 0:
2737 /* 6-byte CDB. */
2738 r->cdb1 = r->group_number = 0;
2739 break;
2740 case 1:
2741 /* 10-byte CDB. */
2742 r->cdb1 = req->cmd.buf[1];
2743 r->group_number = req->cmd.buf[6];
2744 break;
2745 case 4:
2746 /* 12-byte CDB. */
2747 r->cdb1 = req->cmd.buf[1];
2748 r->group_number = req->cmd.buf[10];
2749 break;
2750 case 5:
2751 /* 16-byte CDB. */
2752 r->cdb1 = req->cmd.buf[1];
2753 r->group_number = req->cmd.buf[14];
2754 break;
2755 default:
2756 abort();
2757 }
2758
2759 if (r->cdb1 & 0xe0) {
2760 /* Protection information is not supported. */
2761 scsi_check_condition(&r->req, SENSE_CODE(INVALID_FIELD));
2762 return 0;
2763 }
2764
2765 r->req.status = &r->io_header.status;
2766 return scsi_disk_dma_command(req, buf);
2767 }
2768
2769 static const SCSIReqOps scsi_block_dma_reqops = {
2770 .size = sizeof(SCSIBlockReq),
2771 .free_req = scsi_free_request,
2772 .send_command = scsi_block_dma_command,
2773 .read_data = scsi_read_data,
2774 .write_data = scsi_write_data,
2775 .get_buf = scsi_get_buf,
2776 .load_request = scsi_disk_load_request,
2777 .save_request = scsi_disk_save_request,
2778 };
2779
2780 static SCSIRequest *scsi_block_new_request(SCSIDevice *d, uint32_t tag,
2781 uint32_t lun, uint8_t *buf,
2782 void *hba_private)
2783 {
2784 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, d);
2785
2786 if (scsi_block_is_passthrough(s, buf)) {
2787 return scsi_req_alloc(&scsi_generic_req_ops, &s->qdev, tag, lun,
2788 hba_private);
2789 } else {
2790 return scsi_req_alloc(&scsi_block_dma_reqops, &s->qdev, tag, lun,
2791 hba_private);
2792 }
2793 }
2794
2795 static int scsi_block_parse_cdb(SCSIDevice *d, SCSICommand *cmd,
2796 uint8_t *buf, void *hba_private)
2797 {
2798 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, d);
2799
2800 if (scsi_block_is_passthrough(s, buf)) {
2801 return scsi_bus_parse_cdb(&s->qdev, cmd, buf, hba_private);
2802 } else {
2803 return scsi_req_parse_cdb(&s->qdev, cmd, buf);
2804 }
2805 }
2806
2807 #endif
2808
2809 static
2810 BlockAIOCB *scsi_dma_readv(int64_t offset, QEMUIOVector *iov,
2811 BlockCompletionFunc *cb, void *cb_opaque,
2812 void *opaque)
2813 {
2814 SCSIDiskReq *r = opaque;
2815 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
2816 return blk_aio_preadv(s->qdev.conf.blk, offset, iov, 0, cb, cb_opaque);
2817 }
2818
2819 static
2820 BlockAIOCB *scsi_dma_writev(int64_t offset, QEMUIOVector *iov,
2821 BlockCompletionFunc *cb, void *cb_opaque,
2822 void *opaque)
2823 {
2824 SCSIDiskReq *r = opaque;
2825 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
2826 return blk_aio_pwritev(s->qdev.conf.blk, offset, iov, 0, cb, cb_opaque);
2827 }
2828
2829 static void scsi_disk_base_class_initfn(ObjectClass *klass, void *data)
2830 {
2831 DeviceClass *dc = DEVICE_CLASS(klass);
2832 SCSIDiskClass *sdc = SCSI_DISK_BASE_CLASS(klass);
2833
2834 dc->fw_name = "disk";
2835 dc->reset = scsi_disk_reset;
2836 sdc->dma_readv = scsi_dma_readv;
2837 sdc->dma_writev = scsi_dma_writev;
2838 sdc->need_fua_emulation = scsi_is_cmd_fua;
2839 }
2840
2841 static const TypeInfo scsi_disk_base_info = {
2842 .name = TYPE_SCSI_DISK_BASE,
2843 .parent = TYPE_SCSI_DEVICE,
2844 .class_init = scsi_disk_base_class_initfn,
2845 .instance_size = sizeof(SCSIDiskState),
2846 .class_size = sizeof(SCSIDiskClass),
2847 .abstract = true,
2848 };
2849
2850 #define DEFINE_SCSI_DISK_PROPERTIES() \
2851 DEFINE_BLOCK_PROPERTIES(SCSIDiskState, qdev.conf), \
2852 DEFINE_BLOCK_ERROR_PROPERTIES(SCSIDiskState, qdev.conf), \
2853 DEFINE_PROP_STRING("ver", SCSIDiskState, version), \
2854 DEFINE_PROP_STRING("serial", SCSIDiskState, serial), \
2855 DEFINE_PROP_STRING("vendor", SCSIDiskState, vendor), \
2856 DEFINE_PROP_STRING("product", SCSIDiskState, product)
2857
2858 static Property scsi_hd_properties[] = {
2859 DEFINE_SCSI_DISK_PROPERTIES(),
2860 DEFINE_PROP_BIT("removable", SCSIDiskState, features,
2861 SCSI_DISK_F_REMOVABLE, false),
2862 DEFINE_PROP_BIT("dpofua", SCSIDiskState, features,
2863 SCSI_DISK_F_DPOFUA, false),
2864 DEFINE_PROP_UINT64("wwn", SCSIDiskState, qdev.wwn, 0),
2865 DEFINE_PROP_UINT64("port_wwn", SCSIDiskState, qdev.port_wwn, 0),
2866 DEFINE_PROP_UINT16("port_index", SCSIDiskState, port_index, 0),
2867 DEFINE_PROP_UINT64("max_unmap_size", SCSIDiskState, max_unmap_size,
2868 DEFAULT_MAX_UNMAP_SIZE),
2869 DEFINE_PROP_UINT64("max_io_size", SCSIDiskState, max_io_size,
2870 DEFAULT_MAX_IO_SIZE),
2871 DEFINE_BLOCK_CHS_PROPERTIES(SCSIDiskState, qdev.conf),
2872 DEFINE_PROP_END_OF_LIST(),
2873 };
2874
2875 static const VMStateDescription vmstate_scsi_disk_state = {
2876 .name = "scsi-disk",
2877 .version_id = 1,
2878 .minimum_version_id = 1,
2879 .fields = (VMStateField[]) {
2880 VMSTATE_SCSI_DEVICE(qdev, SCSIDiskState),
2881 VMSTATE_BOOL(media_changed, SCSIDiskState),
2882 VMSTATE_BOOL(media_event, SCSIDiskState),
2883 VMSTATE_BOOL(eject_request, SCSIDiskState),
2884 VMSTATE_BOOL(tray_open, SCSIDiskState),
2885 VMSTATE_BOOL(tray_locked, SCSIDiskState),
2886 VMSTATE_END_OF_LIST()
2887 }
2888 };
2889
2890 static void scsi_hd_class_initfn(ObjectClass *klass, void *data)
2891 {
2892 DeviceClass *dc = DEVICE_CLASS(klass);
2893 SCSIDeviceClass *sc = SCSI_DEVICE_CLASS(klass);
2894
2895 sc->realize = scsi_hd_realize;
2896 sc->alloc_req = scsi_new_request;
2897 sc->unit_attention_reported = scsi_disk_unit_attention_reported;
2898 dc->desc = "virtual SCSI disk";
2899 dc->props = scsi_hd_properties;
2900 dc->vmsd = &vmstate_scsi_disk_state;
2901 }
2902
2903 static const TypeInfo scsi_hd_info = {
2904 .name = "scsi-hd",
2905 .parent = TYPE_SCSI_DISK_BASE,
2906 .class_init = scsi_hd_class_initfn,
2907 };
2908
2909 static Property scsi_cd_properties[] = {
2910 DEFINE_SCSI_DISK_PROPERTIES(),
2911 DEFINE_PROP_UINT64("wwn", SCSIDiskState, qdev.wwn, 0),
2912 DEFINE_PROP_UINT64("port_wwn", SCSIDiskState, qdev.port_wwn, 0),
2913 DEFINE_PROP_UINT16("port_index", SCSIDiskState, port_index, 0),
2914 DEFINE_PROP_UINT64("max_io_size", SCSIDiskState, max_io_size,
2915 DEFAULT_MAX_IO_SIZE),
2916 DEFINE_PROP_END_OF_LIST(),
2917 };
2918
2919 static void scsi_cd_class_initfn(ObjectClass *klass, void *data)
2920 {
2921 DeviceClass *dc = DEVICE_CLASS(klass);
2922 SCSIDeviceClass *sc = SCSI_DEVICE_CLASS(klass);
2923
2924 sc->realize = scsi_cd_realize;
2925 sc->alloc_req = scsi_new_request;
2926 sc->unit_attention_reported = scsi_disk_unit_attention_reported;
2927 dc->desc = "virtual SCSI CD-ROM";
2928 dc->props = scsi_cd_properties;
2929 dc->vmsd = &vmstate_scsi_disk_state;
2930 }
2931
2932 static const TypeInfo scsi_cd_info = {
2933 .name = "scsi-cd",
2934 .parent = TYPE_SCSI_DISK_BASE,
2935 .class_init = scsi_cd_class_initfn,
2936 };
2937
2938 #ifdef __linux__
2939 static Property scsi_block_properties[] = {
2940 DEFINE_PROP_DRIVE("drive", SCSIDiskState, qdev.conf.blk),
2941 DEFINE_PROP_END_OF_LIST(),
2942 };
2943
2944 static void scsi_block_class_initfn(ObjectClass *klass, void *data)
2945 {
2946 DeviceClass *dc = DEVICE_CLASS(klass);
2947 SCSIDeviceClass *sc = SCSI_DEVICE_CLASS(klass);
2948 SCSIDiskClass *sdc = SCSI_DISK_BASE_CLASS(klass);
2949
2950 sc->realize = scsi_block_realize;
2951 sc->alloc_req = scsi_block_new_request;
2952 sc->parse_cdb = scsi_block_parse_cdb;
2953 sdc->dma_readv = scsi_block_dma_readv;
2954 sdc->dma_writev = scsi_block_dma_writev;
2955 sdc->need_fua_emulation = scsi_block_no_fua;
2956 dc->desc = "SCSI block device passthrough";
2957 dc->props = scsi_block_properties;
2958 dc->vmsd = &vmstate_scsi_disk_state;
2959 }
2960
2961 static const TypeInfo scsi_block_info = {
2962 .name = "scsi-block",
2963 .parent = TYPE_SCSI_DISK_BASE,
2964 .class_init = scsi_block_class_initfn,
2965 };
2966 #endif
2967
2968 static Property scsi_disk_properties[] = {
2969 DEFINE_SCSI_DISK_PROPERTIES(),
2970 DEFINE_PROP_BIT("removable", SCSIDiskState, features,
2971 SCSI_DISK_F_REMOVABLE, false),
2972 DEFINE_PROP_BIT("dpofua", SCSIDiskState, features,
2973 SCSI_DISK_F_DPOFUA, false),
2974 DEFINE_PROP_UINT64("wwn", SCSIDiskState, qdev.wwn, 0),
2975 DEFINE_PROP_UINT64("port_wwn", SCSIDiskState, qdev.port_wwn, 0),
2976 DEFINE_PROP_UINT16("port_index", SCSIDiskState, port_index, 0),
2977 DEFINE_PROP_UINT64("max_unmap_size", SCSIDiskState, max_unmap_size,
2978 DEFAULT_MAX_UNMAP_SIZE),
2979 DEFINE_PROP_UINT64("max_io_size", SCSIDiskState, max_io_size,
2980 DEFAULT_MAX_IO_SIZE),
2981 DEFINE_PROP_END_OF_LIST(),
2982 };
2983
2984 static void scsi_disk_class_initfn(ObjectClass *klass, void *data)
2985 {
2986 DeviceClass *dc = DEVICE_CLASS(klass);
2987 SCSIDeviceClass *sc = SCSI_DEVICE_CLASS(klass);
2988
2989 sc->realize = scsi_disk_realize;
2990 sc->alloc_req = scsi_new_request;
2991 sc->unit_attention_reported = scsi_disk_unit_attention_reported;
2992 dc->fw_name = "disk";
2993 dc->desc = "virtual SCSI disk or CD-ROM (legacy)";
2994 dc->reset = scsi_disk_reset;
2995 dc->props = scsi_disk_properties;
2996 dc->vmsd = &vmstate_scsi_disk_state;
2997 }
2998
2999 static const TypeInfo scsi_disk_info = {
3000 .name = "scsi-disk",
3001 .parent = TYPE_SCSI_DISK_BASE,
3002 .class_init = scsi_disk_class_initfn,
3003 };
3004
3005 static void scsi_disk_register_types(void)
3006 {
3007 type_register_static(&scsi_disk_base_info);
3008 type_register_static(&scsi_hd_info);
3009 type_register_static(&scsi_cd_info);
3010 #ifdef __linux__
3011 type_register_static(&scsi_block_info);
3012 #endif
3013 type_register_static(&scsi_disk_info);
3014 }
3015
3016 type_init(scsi_disk_register_types)