]> git.ipfire.org Git - thirdparty/qemu.git/blob - hw/scsi/scsi-disk.c
Include qemu/module.h where needed, drop it from qemu-common.h
[thirdparty/qemu.git] / hw / scsi / scsi-disk.c
1 /*
2 * SCSI Device emulation
3 *
4 * Copyright (c) 2006 CodeSourcery.
5 * Based on code by Fabrice Bellard
6 *
7 * Written by Paul Brook
8 * Modifications:
9 * 2009-Dec-12 Artyom Tarasenko : implemented stamdard inquiry for the case
10 * when the allocation length of CDB is smaller
11 * than 36.
12 * 2009-Oct-13 Artyom Tarasenko : implemented the block descriptor in the
13 * MODE SENSE response.
14 *
15 * This code is licensed under the LGPL.
16 *
17 * Note that this file only handles the SCSI architecture model and device
18 * commands. Emulation of interface/link layer protocols is handled by
19 * the host adapter emulator.
20 */
21
22 #include "qemu/osdep.h"
23 #include "qemu/units.h"
24 #include "qapi/error.h"
25 #include "qemu/error-report.h"
26 #include "qemu/module.h"
27 #include "hw/scsi/scsi.h"
28 #include "hw/scsi/emulation.h"
29 #include "scsi/constants.h"
30 #include "sysemu/sysemu.h"
31 #include "sysemu/block-backend.h"
32 #include "sysemu/blockdev.h"
33 #include "hw/block/block.h"
34 #include "sysemu/dma.h"
35 #include "qemu/cutils.h"
36 #include "trace.h"
37
38 #ifdef __linux
39 #include <scsi/sg.h>
40 #endif
41
42 #define SCSI_WRITE_SAME_MAX (512 * KiB)
43 #define SCSI_DMA_BUF_SIZE (128 * KiB)
44 #define SCSI_MAX_INQUIRY_LEN 256
45 #define SCSI_MAX_MODE_LEN 256
46
47 #define DEFAULT_DISCARD_GRANULARITY (4 * KiB)
48 #define DEFAULT_MAX_UNMAP_SIZE (1 * GiB)
49 #define DEFAULT_MAX_IO_SIZE INT_MAX /* 2 GB - 1 block */
50
51 #define TYPE_SCSI_DISK_BASE "scsi-disk-base"
52
53 #define SCSI_DISK_BASE(obj) \
54 OBJECT_CHECK(SCSIDiskState, (obj), TYPE_SCSI_DISK_BASE)
55 #define SCSI_DISK_BASE_CLASS(klass) \
56 OBJECT_CLASS_CHECK(SCSIDiskClass, (klass), TYPE_SCSI_DISK_BASE)
57 #define SCSI_DISK_BASE_GET_CLASS(obj) \
58 OBJECT_GET_CLASS(SCSIDiskClass, (obj), TYPE_SCSI_DISK_BASE)
59
60 typedef struct SCSIDiskClass {
61 SCSIDeviceClass parent_class;
62 DMAIOFunc *dma_readv;
63 DMAIOFunc *dma_writev;
64 bool (*need_fua_emulation)(SCSICommand *cmd);
65 } SCSIDiskClass;
66
67 typedef struct SCSIDiskReq {
68 SCSIRequest req;
69 /* Both sector and sector_count are in terms of qemu 512 byte blocks. */
70 uint64_t sector;
71 uint32_t sector_count;
72 uint32_t buflen;
73 bool started;
74 bool need_fua_emulation;
75 struct iovec iov;
76 QEMUIOVector qiov;
77 BlockAcctCookie acct;
78 unsigned char *status;
79 } SCSIDiskReq;
80
81 #define SCSI_DISK_F_REMOVABLE 0
82 #define SCSI_DISK_F_DPOFUA 1
83 #define SCSI_DISK_F_NO_REMOVABLE_DEVOPS 2
84
85 typedef struct SCSIDiskState
86 {
87 SCSIDevice qdev;
88 uint32_t features;
89 bool media_changed;
90 bool media_event;
91 bool eject_request;
92 uint16_t port_index;
93 uint64_t max_unmap_size;
94 uint64_t max_io_size;
95 QEMUBH *bh;
96 char *version;
97 char *serial;
98 char *vendor;
99 char *product;
100 char *device_id;
101 bool tray_open;
102 bool tray_locked;
103 /*
104 * 0x0000 - rotation rate not reported
105 * 0x0001 - non-rotating medium (SSD)
106 * 0x0002-0x0400 - reserved
107 * 0x0401-0xffe - rotations per minute
108 * 0xffff - reserved
109 */
110 uint16_t rotation_rate;
111 } SCSIDiskState;
112
113 static bool scsi_handle_rw_error(SCSIDiskReq *r, int error, bool acct_failed);
114
115 static void scsi_free_request(SCSIRequest *req)
116 {
117 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req);
118
119 qemu_vfree(r->iov.iov_base);
120 }
121
122 /* Helper function for command completion with sense. */
123 static void scsi_check_condition(SCSIDiskReq *r, SCSISense sense)
124 {
125 trace_scsi_disk_check_condition(r->req.tag, sense.key, sense.asc,
126 sense.ascq);
127 scsi_req_build_sense(&r->req, sense);
128 scsi_req_complete(&r->req, CHECK_CONDITION);
129 }
130
131 static void scsi_init_iovec(SCSIDiskReq *r, size_t size)
132 {
133 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
134
135 if (!r->iov.iov_base) {
136 r->buflen = size;
137 r->iov.iov_base = blk_blockalign(s->qdev.conf.blk, r->buflen);
138 }
139 r->iov.iov_len = MIN(r->sector_count * 512, r->buflen);
140 qemu_iovec_init_external(&r->qiov, &r->iov, 1);
141 }
142
143 static void scsi_disk_save_request(QEMUFile *f, SCSIRequest *req)
144 {
145 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req);
146
147 qemu_put_be64s(f, &r->sector);
148 qemu_put_be32s(f, &r->sector_count);
149 qemu_put_be32s(f, &r->buflen);
150 if (r->buflen) {
151 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) {
152 qemu_put_buffer(f, r->iov.iov_base, r->iov.iov_len);
153 } else if (!req->retry) {
154 uint32_t len = r->iov.iov_len;
155 qemu_put_be32s(f, &len);
156 qemu_put_buffer(f, r->iov.iov_base, r->iov.iov_len);
157 }
158 }
159 }
160
161 static void scsi_disk_load_request(QEMUFile *f, SCSIRequest *req)
162 {
163 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req);
164
165 qemu_get_be64s(f, &r->sector);
166 qemu_get_be32s(f, &r->sector_count);
167 qemu_get_be32s(f, &r->buflen);
168 if (r->buflen) {
169 scsi_init_iovec(r, r->buflen);
170 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) {
171 qemu_get_buffer(f, r->iov.iov_base, r->iov.iov_len);
172 } else if (!r->req.retry) {
173 uint32_t len;
174 qemu_get_be32s(f, &len);
175 r->iov.iov_len = len;
176 assert(r->iov.iov_len <= r->buflen);
177 qemu_get_buffer(f, r->iov.iov_base, r->iov.iov_len);
178 }
179 }
180
181 qemu_iovec_init_external(&r->qiov, &r->iov, 1);
182 }
183
184 static bool scsi_disk_req_check_error(SCSIDiskReq *r, int ret, bool acct_failed)
185 {
186 if (r->req.io_canceled) {
187 scsi_req_cancel_complete(&r->req);
188 return true;
189 }
190
191 if (ret < 0 || (r->status && *r->status)) {
192 return scsi_handle_rw_error(r, -ret, acct_failed);
193 }
194
195 return false;
196 }
197
198 static void scsi_aio_complete(void *opaque, int ret)
199 {
200 SCSIDiskReq *r = (SCSIDiskReq *)opaque;
201 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
202
203 assert(r->req.aiocb != NULL);
204 r->req.aiocb = NULL;
205 aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk));
206 if (scsi_disk_req_check_error(r, ret, true)) {
207 goto done;
208 }
209
210 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct);
211 scsi_req_complete(&r->req, GOOD);
212
213 done:
214 aio_context_release(blk_get_aio_context(s->qdev.conf.blk));
215 scsi_req_unref(&r->req);
216 }
217
218 static bool scsi_is_cmd_fua(SCSICommand *cmd)
219 {
220 switch (cmd->buf[0]) {
221 case READ_10:
222 case READ_12:
223 case READ_16:
224 case WRITE_10:
225 case WRITE_12:
226 case WRITE_16:
227 return (cmd->buf[1] & 8) != 0;
228
229 case VERIFY_10:
230 case VERIFY_12:
231 case VERIFY_16:
232 case WRITE_VERIFY_10:
233 case WRITE_VERIFY_12:
234 case WRITE_VERIFY_16:
235 return true;
236
237 case READ_6:
238 case WRITE_6:
239 default:
240 return false;
241 }
242 }
243
244 static void scsi_write_do_fua(SCSIDiskReq *r)
245 {
246 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
247
248 assert(r->req.aiocb == NULL);
249 assert(!r->req.io_canceled);
250
251 if (r->need_fua_emulation) {
252 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 0,
253 BLOCK_ACCT_FLUSH);
254 r->req.aiocb = blk_aio_flush(s->qdev.conf.blk, scsi_aio_complete, r);
255 return;
256 }
257
258 scsi_req_complete(&r->req, GOOD);
259 scsi_req_unref(&r->req);
260 }
261
262 static void scsi_dma_complete_noio(SCSIDiskReq *r, int ret)
263 {
264 assert(r->req.aiocb == NULL);
265 if (scsi_disk_req_check_error(r, ret, false)) {
266 goto done;
267 }
268
269 r->sector += r->sector_count;
270 r->sector_count = 0;
271 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) {
272 scsi_write_do_fua(r);
273 return;
274 } else {
275 scsi_req_complete(&r->req, GOOD);
276 }
277
278 done:
279 scsi_req_unref(&r->req);
280 }
281
282 static void scsi_dma_complete(void *opaque, int ret)
283 {
284 SCSIDiskReq *r = (SCSIDiskReq *)opaque;
285 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
286
287 assert(r->req.aiocb != NULL);
288 r->req.aiocb = NULL;
289
290 aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk));
291 if (ret < 0) {
292 block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct);
293 } else {
294 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct);
295 }
296 scsi_dma_complete_noio(r, ret);
297 aio_context_release(blk_get_aio_context(s->qdev.conf.blk));
298 }
299
300 static void scsi_read_complete_noio(SCSIDiskReq *r, int ret)
301 {
302 uint32_t n;
303
304 assert(r->req.aiocb == NULL);
305 if (scsi_disk_req_check_error(r, ret, false)) {
306 goto done;
307 }
308
309 n = r->qiov.size / 512;
310 r->sector += n;
311 r->sector_count -= n;
312 scsi_req_data(&r->req, r->qiov.size);
313
314 done:
315 scsi_req_unref(&r->req);
316 }
317
318 static void scsi_read_complete(void *opaque, int ret)
319 {
320 SCSIDiskReq *r = (SCSIDiskReq *)opaque;
321 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
322
323 assert(r->req.aiocb != NULL);
324 r->req.aiocb = NULL;
325
326 aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk));
327 if (ret < 0) {
328 block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct);
329 } else {
330 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct);
331 trace_scsi_disk_read_complete(r->req.tag, r->qiov.size);
332 }
333 scsi_read_complete_noio(r, ret);
334 aio_context_release(blk_get_aio_context(s->qdev.conf.blk));
335 }
336
337 /* Actually issue a read to the block device. */
338 static void scsi_do_read(SCSIDiskReq *r, int ret)
339 {
340 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
341 SCSIDiskClass *sdc = (SCSIDiskClass *) object_get_class(OBJECT(s));
342
343 assert (r->req.aiocb == NULL);
344 if (scsi_disk_req_check_error(r, ret, false)) {
345 goto done;
346 }
347
348 /* The request is used as the AIO opaque value, so add a ref. */
349 scsi_req_ref(&r->req);
350
351 if (r->req.sg) {
352 dma_acct_start(s->qdev.conf.blk, &r->acct, r->req.sg, BLOCK_ACCT_READ);
353 r->req.resid -= r->req.sg->size;
354 r->req.aiocb = dma_blk_io(blk_get_aio_context(s->qdev.conf.blk),
355 r->req.sg, r->sector << BDRV_SECTOR_BITS,
356 BDRV_SECTOR_SIZE,
357 sdc->dma_readv, r, scsi_dma_complete, r,
358 DMA_DIRECTION_FROM_DEVICE);
359 } else {
360 scsi_init_iovec(r, SCSI_DMA_BUF_SIZE);
361 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct,
362 r->qiov.size, BLOCK_ACCT_READ);
363 r->req.aiocb = sdc->dma_readv(r->sector << BDRV_SECTOR_BITS, &r->qiov,
364 scsi_read_complete, r, r);
365 }
366
367 done:
368 scsi_req_unref(&r->req);
369 }
370
371 static void scsi_do_read_cb(void *opaque, int ret)
372 {
373 SCSIDiskReq *r = (SCSIDiskReq *)opaque;
374 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
375
376 assert (r->req.aiocb != NULL);
377 r->req.aiocb = NULL;
378
379 aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk));
380 if (ret < 0) {
381 block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct);
382 } else {
383 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct);
384 }
385 scsi_do_read(opaque, ret);
386 aio_context_release(blk_get_aio_context(s->qdev.conf.blk));
387 }
388
389 /* Read more data from scsi device into buffer. */
390 static void scsi_read_data(SCSIRequest *req)
391 {
392 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req);
393 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
394 bool first;
395
396 trace_scsi_disk_read_data_count(r->sector_count);
397 if (r->sector_count == 0) {
398 /* This also clears the sense buffer for REQUEST SENSE. */
399 scsi_req_complete(&r->req, GOOD);
400 return;
401 }
402
403 /* No data transfer may already be in progress */
404 assert(r->req.aiocb == NULL);
405
406 /* The request is used as the AIO opaque value, so add a ref. */
407 scsi_req_ref(&r->req);
408 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) {
409 trace_scsi_disk_read_data_invalid();
410 scsi_read_complete_noio(r, -EINVAL);
411 return;
412 }
413
414 if (!blk_is_available(req->dev->conf.blk)) {
415 scsi_read_complete_noio(r, -ENOMEDIUM);
416 return;
417 }
418
419 first = !r->started;
420 r->started = true;
421 if (first && r->need_fua_emulation) {
422 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 0,
423 BLOCK_ACCT_FLUSH);
424 r->req.aiocb = blk_aio_flush(s->qdev.conf.blk, scsi_do_read_cb, r);
425 } else {
426 scsi_do_read(r, 0);
427 }
428 }
429
430 /*
431 * scsi_handle_rw_error has two return values. False means that the error
432 * must be ignored, true means that the error has been processed and the
433 * caller should not do anything else for this request. Note that
434 * scsi_handle_rw_error always manages its reference counts, independent
435 * of the return value.
436 */
437 static bool scsi_handle_rw_error(SCSIDiskReq *r, int error, bool acct_failed)
438 {
439 bool is_read = (r->req.cmd.mode == SCSI_XFER_FROM_DEV);
440 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
441 BlockErrorAction action = blk_get_error_action(s->qdev.conf.blk,
442 is_read, error);
443
444 if (action == BLOCK_ERROR_ACTION_REPORT) {
445 if (acct_failed) {
446 block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct);
447 }
448 switch (error) {
449 case 0:
450 /* A passthrough command has run and has produced sense data; check
451 * whether the error has to be handled by the guest or should rather
452 * pause the host.
453 */
454 assert(r->status && *r->status);
455 error = scsi_sense_buf_to_errno(r->req.sense, sizeof(r->req.sense));
456 if (error == ECANCELED || error == EAGAIN || error == ENOTCONN ||
457 error == 0) {
458 /* These errors are handled by guest. */
459 scsi_req_complete(&r->req, *r->status);
460 return true;
461 }
462 break;
463 case ENOMEDIUM:
464 scsi_check_condition(r, SENSE_CODE(NO_MEDIUM));
465 break;
466 case ENOMEM:
467 scsi_check_condition(r, SENSE_CODE(TARGET_FAILURE));
468 break;
469 case EINVAL:
470 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD));
471 break;
472 case ENOSPC:
473 scsi_check_condition(r, SENSE_CODE(SPACE_ALLOC_FAILED));
474 break;
475 default:
476 scsi_check_condition(r, SENSE_CODE(IO_ERROR));
477 break;
478 }
479 }
480
481 blk_error_action(s->qdev.conf.blk, action, is_read, error);
482 if (action == BLOCK_ERROR_ACTION_IGNORE) {
483 scsi_req_complete(&r->req, 0);
484 return true;
485 }
486
487 if (action == BLOCK_ERROR_ACTION_STOP) {
488 scsi_req_retry(&r->req);
489 }
490 return true;
491 }
492
493 static void scsi_write_complete_noio(SCSIDiskReq *r, int ret)
494 {
495 uint32_t n;
496
497 assert (r->req.aiocb == NULL);
498 if (scsi_disk_req_check_error(r, ret, false)) {
499 goto done;
500 }
501
502 n = r->qiov.size / 512;
503 r->sector += n;
504 r->sector_count -= n;
505 if (r->sector_count == 0) {
506 scsi_write_do_fua(r);
507 return;
508 } else {
509 scsi_init_iovec(r, SCSI_DMA_BUF_SIZE);
510 trace_scsi_disk_write_complete_noio(r->req.tag, r->qiov.size);
511 scsi_req_data(&r->req, r->qiov.size);
512 }
513
514 done:
515 scsi_req_unref(&r->req);
516 }
517
518 static void scsi_write_complete(void * opaque, int ret)
519 {
520 SCSIDiskReq *r = (SCSIDiskReq *)opaque;
521 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
522
523 assert (r->req.aiocb != NULL);
524 r->req.aiocb = NULL;
525
526 aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk));
527 if (ret < 0) {
528 block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct);
529 } else {
530 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct);
531 }
532 scsi_write_complete_noio(r, ret);
533 aio_context_release(blk_get_aio_context(s->qdev.conf.blk));
534 }
535
536 static void scsi_write_data(SCSIRequest *req)
537 {
538 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req);
539 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
540 SCSIDiskClass *sdc = (SCSIDiskClass *) object_get_class(OBJECT(s));
541
542 /* No data transfer may already be in progress */
543 assert(r->req.aiocb == NULL);
544
545 /* The request is used as the AIO opaque value, so add a ref. */
546 scsi_req_ref(&r->req);
547 if (r->req.cmd.mode != SCSI_XFER_TO_DEV) {
548 trace_scsi_disk_write_data_invalid();
549 scsi_write_complete_noio(r, -EINVAL);
550 return;
551 }
552
553 if (!r->req.sg && !r->qiov.size) {
554 /* Called for the first time. Ask the driver to send us more data. */
555 r->started = true;
556 scsi_write_complete_noio(r, 0);
557 return;
558 }
559 if (!blk_is_available(req->dev->conf.blk)) {
560 scsi_write_complete_noio(r, -ENOMEDIUM);
561 return;
562 }
563
564 if (r->req.cmd.buf[0] == VERIFY_10 || r->req.cmd.buf[0] == VERIFY_12 ||
565 r->req.cmd.buf[0] == VERIFY_16) {
566 if (r->req.sg) {
567 scsi_dma_complete_noio(r, 0);
568 } else {
569 scsi_write_complete_noio(r, 0);
570 }
571 return;
572 }
573
574 if (r->req.sg) {
575 dma_acct_start(s->qdev.conf.blk, &r->acct, r->req.sg, BLOCK_ACCT_WRITE);
576 r->req.resid -= r->req.sg->size;
577 r->req.aiocb = dma_blk_io(blk_get_aio_context(s->qdev.conf.blk),
578 r->req.sg, r->sector << BDRV_SECTOR_BITS,
579 BDRV_SECTOR_SIZE,
580 sdc->dma_writev, r, scsi_dma_complete, r,
581 DMA_DIRECTION_TO_DEVICE);
582 } else {
583 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct,
584 r->qiov.size, BLOCK_ACCT_WRITE);
585 r->req.aiocb = sdc->dma_writev(r->sector << BDRV_SECTOR_BITS, &r->qiov,
586 scsi_write_complete, r, r);
587 }
588 }
589
590 /* Return a pointer to the data buffer. */
591 static uint8_t *scsi_get_buf(SCSIRequest *req)
592 {
593 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req);
594
595 return (uint8_t *)r->iov.iov_base;
596 }
597
598 static int scsi_disk_emulate_vpd_page(SCSIRequest *req, uint8_t *outbuf)
599 {
600 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev);
601 uint8_t page_code = req->cmd.buf[2];
602 int start, buflen = 0;
603
604 outbuf[buflen++] = s->qdev.type & 0x1f;
605 outbuf[buflen++] = page_code;
606 outbuf[buflen++] = 0x00;
607 outbuf[buflen++] = 0x00;
608 start = buflen;
609
610 switch (page_code) {
611 case 0x00: /* Supported page codes, mandatory */
612 {
613 trace_scsi_disk_emulate_vpd_page_00(req->cmd.xfer);
614 outbuf[buflen++] = 0x00; /* list of supported pages (this page) */
615 if (s->serial) {
616 outbuf[buflen++] = 0x80; /* unit serial number */
617 }
618 outbuf[buflen++] = 0x83; /* device identification */
619 if (s->qdev.type == TYPE_DISK) {
620 outbuf[buflen++] = 0xb0; /* block limits */
621 outbuf[buflen++] = 0xb1; /* block device characteristics */
622 outbuf[buflen++] = 0xb2; /* thin provisioning */
623 }
624 break;
625 }
626 case 0x80: /* Device serial number, optional */
627 {
628 int l;
629
630 if (!s->serial) {
631 trace_scsi_disk_emulate_vpd_page_80_not_supported();
632 return -1;
633 }
634
635 l = strlen(s->serial);
636 if (l > 36) {
637 l = 36;
638 }
639
640 trace_scsi_disk_emulate_vpd_page_80(req->cmd.xfer);
641 memcpy(outbuf + buflen, s->serial, l);
642 buflen += l;
643 break;
644 }
645
646 case 0x83: /* Device identification page, mandatory */
647 {
648 int id_len = s->device_id ? MIN(strlen(s->device_id), 255 - 8) : 0;
649
650 trace_scsi_disk_emulate_vpd_page_83(req->cmd.xfer);
651
652 if (id_len) {
653 outbuf[buflen++] = 0x2; /* ASCII */
654 outbuf[buflen++] = 0; /* not officially assigned */
655 outbuf[buflen++] = 0; /* reserved */
656 outbuf[buflen++] = id_len; /* length of data following */
657 memcpy(outbuf + buflen, s->device_id, id_len);
658 buflen += id_len;
659 }
660
661 if (s->qdev.wwn) {
662 outbuf[buflen++] = 0x1; /* Binary */
663 outbuf[buflen++] = 0x3; /* NAA */
664 outbuf[buflen++] = 0; /* reserved */
665 outbuf[buflen++] = 8;
666 stq_be_p(&outbuf[buflen], s->qdev.wwn);
667 buflen += 8;
668 }
669
670 if (s->qdev.port_wwn) {
671 outbuf[buflen++] = 0x61; /* SAS / Binary */
672 outbuf[buflen++] = 0x93; /* PIV / Target port / NAA */
673 outbuf[buflen++] = 0; /* reserved */
674 outbuf[buflen++] = 8;
675 stq_be_p(&outbuf[buflen], s->qdev.port_wwn);
676 buflen += 8;
677 }
678
679 if (s->port_index) {
680 outbuf[buflen++] = 0x61; /* SAS / Binary */
681
682 /* PIV/Target port/relative target port */
683 outbuf[buflen++] = 0x94;
684
685 outbuf[buflen++] = 0; /* reserved */
686 outbuf[buflen++] = 4;
687 stw_be_p(&outbuf[buflen + 2], s->port_index);
688 buflen += 4;
689 }
690 break;
691 }
692 case 0xb0: /* block limits */
693 {
694 SCSIBlockLimits bl = {};
695
696 if (s->qdev.type == TYPE_ROM) {
697 trace_scsi_disk_emulate_vpd_page_b0_not_supported();
698 return -1;
699 }
700 bl.wsnz = 1;
701 bl.unmap_sectors =
702 s->qdev.conf.discard_granularity / s->qdev.blocksize;
703 bl.min_io_size =
704 s->qdev.conf.min_io_size / s->qdev.blocksize;
705 bl.opt_io_size =
706 s->qdev.conf.opt_io_size / s->qdev.blocksize;
707 bl.max_unmap_sectors =
708 s->max_unmap_size / s->qdev.blocksize;
709 bl.max_io_sectors =
710 s->max_io_size / s->qdev.blocksize;
711 /* 255 descriptors fit in 4 KiB with an 8-byte header */
712 bl.max_unmap_descr = 255;
713
714 if (s->qdev.type == TYPE_DISK) {
715 int max_transfer_blk = blk_get_max_transfer(s->qdev.conf.blk);
716 int max_io_sectors_blk =
717 max_transfer_blk / s->qdev.blocksize;
718
719 bl.max_io_sectors =
720 MIN_NON_ZERO(max_io_sectors_blk, bl.max_io_sectors);
721 }
722 buflen += scsi_emulate_block_limits(outbuf + buflen, &bl);
723 break;
724 }
725 case 0xb1: /* block device characteristics */
726 {
727 buflen = 0x40;
728 outbuf[4] = (s->rotation_rate >> 8) & 0xff;
729 outbuf[5] = s->rotation_rate & 0xff;
730 outbuf[6] = 0; /* PRODUCT TYPE */
731 outbuf[7] = 0; /* WABEREQ | WACEREQ | NOMINAL FORM FACTOR */
732 outbuf[8] = 0; /* VBULS */
733 break;
734 }
735 case 0xb2: /* thin provisioning */
736 {
737 buflen = 8;
738 outbuf[4] = 0;
739 outbuf[5] = 0xe0; /* unmap & write_same 10/16 all supported */
740 outbuf[6] = s->qdev.conf.discard_granularity ? 2 : 1;
741 outbuf[7] = 0;
742 break;
743 }
744 default:
745 return -1;
746 }
747 /* done with EVPD */
748 assert(buflen - start <= 255);
749 outbuf[start - 1] = buflen - start;
750 return buflen;
751 }
752
753 static int scsi_disk_emulate_inquiry(SCSIRequest *req, uint8_t *outbuf)
754 {
755 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev);
756 int buflen = 0;
757
758 if (req->cmd.buf[1] & 0x1) {
759 /* Vital product data */
760 return scsi_disk_emulate_vpd_page(req, outbuf);
761 }
762
763 /* Standard INQUIRY data */
764 if (req->cmd.buf[2] != 0) {
765 return -1;
766 }
767
768 /* PAGE CODE == 0 */
769 buflen = req->cmd.xfer;
770 if (buflen > SCSI_MAX_INQUIRY_LEN) {
771 buflen = SCSI_MAX_INQUIRY_LEN;
772 }
773
774 outbuf[0] = s->qdev.type & 0x1f;
775 outbuf[1] = (s->features & (1 << SCSI_DISK_F_REMOVABLE)) ? 0x80 : 0;
776
777 strpadcpy((char *) &outbuf[16], 16, s->product, ' ');
778 strpadcpy((char *) &outbuf[8], 8, s->vendor, ' ');
779
780 memset(&outbuf[32], 0, 4);
781 memcpy(&outbuf[32], s->version, MIN(4, strlen(s->version)));
782 /*
783 * We claim conformance to SPC-3, which is required for guests
784 * to ask for modern features like READ CAPACITY(16) or the
785 * block characteristics VPD page by default. Not all of SPC-3
786 * is actually implemented, but we're good enough.
787 */
788 outbuf[2] = s->qdev.default_scsi_version;
789 outbuf[3] = 2 | 0x10; /* Format 2, HiSup */
790
791 if (buflen > 36) {
792 outbuf[4] = buflen - 5; /* Additional Length = (Len - 1) - 4 */
793 } else {
794 /* If the allocation length of CDB is too small,
795 the additional length is not adjusted */
796 outbuf[4] = 36 - 5;
797 }
798
799 /* Sync data transfer and TCQ. */
800 outbuf[7] = 0x10 | (req->bus->info->tcq ? 0x02 : 0);
801 return buflen;
802 }
803
804 static inline bool media_is_dvd(SCSIDiskState *s)
805 {
806 uint64_t nb_sectors;
807 if (s->qdev.type != TYPE_ROM) {
808 return false;
809 }
810 if (!blk_is_available(s->qdev.conf.blk)) {
811 return false;
812 }
813 blk_get_geometry(s->qdev.conf.blk, &nb_sectors);
814 return nb_sectors > CD_MAX_SECTORS;
815 }
816
817 static inline bool media_is_cd(SCSIDiskState *s)
818 {
819 uint64_t nb_sectors;
820 if (s->qdev.type != TYPE_ROM) {
821 return false;
822 }
823 if (!blk_is_available(s->qdev.conf.blk)) {
824 return false;
825 }
826 blk_get_geometry(s->qdev.conf.blk, &nb_sectors);
827 return nb_sectors <= CD_MAX_SECTORS;
828 }
829
830 static int scsi_read_disc_information(SCSIDiskState *s, SCSIDiskReq *r,
831 uint8_t *outbuf)
832 {
833 uint8_t type = r->req.cmd.buf[1] & 7;
834
835 if (s->qdev.type != TYPE_ROM) {
836 return -1;
837 }
838
839 /* Types 1/2 are only defined for Blu-Ray. */
840 if (type != 0) {
841 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD));
842 return -1;
843 }
844
845 memset(outbuf, 0, 34);
846 outbuf[1] = 32;
847 outbuf[2] = 0xe; /* last session complete, disc finalized */
848 outbuf[3] = 1; /* first track on disc */
849 outbuf[4] = 1; /* # of sessions */
850 outbuf[5] = 1; /* first track of last session */
851 outbuf[6] = 1; /* last track of last session */
852 outbuf[7] = 0x20; /* unrestricted use */
853 outbuf[8] = 0x00; /* CD-ROM or DVD-ROM */
854 /* 9-10-11: most significant byte corresponding bytes 4-5-6 */
855 /* 12-23: not meaningful for CD-ROM or DVD-ROM */
856 /* 24-31: disc bar code */
857 /* 32: disc application code */
858 /* 33: number of OPC tables */
859
860 return 34;
861 }
862
863 static int scsi_read_dvd_structure(SCSIDiskState *s, SCSIDiskReq *r,
864 uint8_t *outbuf)
865 {
866 static const int rds_caps_size[5] = {
867 [0] = 2048 + 4,
868 [1] = 4 + 4,
869 [3] = 188 + 4,
870 [4] = 2048 + 4,
871 };
872
873 uint8_t media = r->req.cmd.buf[1];
874 uint8_t layer = r->req.cmd.buf[6];
875 uint8_t format = r->req.cmd.buf[7];
876 int size = -1;
877
878 if (s->qdev.type != TYPE_ROM) {
879 return -1;
880 }
881 if (media != 0) {
882 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD));
883 return -1;
884 }
885
886 if (format != 0xff) {
887 if (!blk_is_available(s->qdev.conf.blk)) {
888 scsi_check_condition(r, SENSE_CODE(NO_MEDIUM));
889 return -1;
890 }
891 if (media_is_cd(s)) {
892 scsi_check_condition(r, SENSE_CODE(INCOMPATIBLE_FORMAT));
893 return -1;
894 }
895 if (format >= ARRAY_SIZE(rds_caps_size)) {
896 return -1;
897 }
898 size = rds_caps_size[format];
899 memset(outbuf, 0, size);
900 }
901
902 switch (format) {
903 case 0x00: {
904 /* Physical format information */
905 uint64_t nb_sectors;
906 if (layer != 0) {
907 goto fail;
908 }
909 blk_get_geometry(s->qdev.conf.blk, &nb_sectors);
910
911 outbuf[4] = 1; /* DVD-ROM, part version 1 */
912 outbuf[5] = 0xf; /* 120mm disc, minimum rate unspecified */
913 outbuf[6] = 1; /* one layer, read-only (per MMC-2 spec) */
914 outbuf[7] = 0; /* default densities */
915
916 stl_be_p(&outbuf[12], (nb_sectors >> 2) - 1); /* end sector */
917 stl_be_p(&outbuf[16], (nb_sectors >> 2) - 1); /* l0 end sector */
918 break;
919 }
920
921 case 0x01: /* DVD copyright information, all zeros */
922 break;
923
924 case 0x03: /* BCA information - invalid field for no BCA info */
925 return -1;
926
927 case 0x04: /* DVD disc manufacturing information, all zeros */
928 break;
929
930 case 0xff: { /* List capabilities */
931 int i;
932 size = 4;
933 for (i = 0; i < ARRAY_SIZE(rds_caps_size); i++) {
934 if (!rds_caps_size[i]) {
935 continue;
936 }
937 outbuf[size] = i;
938 outbuf[size + 1] = 0x40; /* Not writable, readable */
939 stw_be_p(&outbuf[size + 2], rds_caps_size[i]);
940 size += 4;
941 }
942 break;
943 }
944
945 default:
946 return -1;
947 }
948
949 /* Size of buffer, not including 2 byte size field */
950 stw_be_p(outbuf, size - 2);
951 return size;
952
953 fail:
954 return -1;
955 }
956
957 static int scsi_event_status_media(SCSIDiskState *s, uint8_t *outbuf)
958 {
959 uint8_t event_code, media_status;
960
961 media_status = 0;
962 if (s->tray_open) {
963 media_status = MS_TRAY_OPEN;
964 } else if (blk_is_inserted(s->qdev.conf.blk)) {
965 media_status = MS_MEDIA_PRESENT;
966 }
967
968 /* Event notification descriptor */
969 event_code = MEC_NO_CHANGE;
970 if (media_status != MS_TRAY_OPEN) {
971 if (s->media_event) {
972 event_code = MEC_NEW_MEDIA;
973 s->media_event = false;
974 } else if (s->eject_request) {
975 event_code = MEC_EJECT_REQUESTED;
976 s->eject_request = false;
977 }
978 }
979
980 outbuf[0] = event_code;
981 outbuf[1] = media_status;
982
983 /* These fields are reserved, just clear them. */
984 outbuf[2] = 0;
985 outbuf[3] = 0;
986 return 4;
987 }
988
989 static int scsi_get_event_status_notification(SCSIDiskState *s, SCSIDiskReq *r,
990 uint8_t *outbuf)
991 {
992 int size;
993 uint8_t *buf = r->req.cmd.buf;
994 uint8_t notification_class_request = buf[4];
995 if (s->qdev.type != TYPE_ROM) {
996 return -1;
997 }
998 if ((buf[1] & 1) == 0) {
999 /* asynchronous */
1000 return -1;
1001 }
1002
1003 size = 4;
1004 outbuf[0] = outbuf[1] = 0;
1005 outbuf[3] = 1 << GESN_MEDIA; /* supported events */
1006 if (notification_class_request & (1 << GESN_MEDIA)) {
1007 outbuf[2] = GESN_MEDIA;
1008 size += scsi_event_status_media(s, &outbuf[size]);
1009 } else {
1010 outbuf[2] = 0x80;
1011 }
1012 stw_be_p(outbuf, size - 4);
1013 return size;
1014 }
1015
1016 static int scsi_get_configuration(SCSIDiskState *s, uint8_t *outbuf)
1017 {
1018 int current;
1019
1020 if (s->qdev.type != TYPE_ROM) {
1021 return -1;
1022 }
1023
1024 if (media_is_dvd(s)) {
1025 current = MMC_PROFILE_DVD_ROM;
1026 } else if (media_is_cd(s)) {
1027 current = MMC_PROFILE_CD_ROM;
1028 } else {
1029 current = MMC_PROFILE_NONE;
1030 }
1031
1032 memset(outbuf, 0, 40);
1033 stl_be_p(&outbuf[0], 36); /* Bytes after the data length field */
1034 stw_be_p(&outbuf[6], current);
1035 /* outbuf[8] - outbuf[19]: Feature 0 - Profile list */
1036 outbuf[10] = 0x03; /* persistent, current */
1037 outbuf[11] = 8; /* two profiles */
1038 stw_be_p(&outbuf[12], MMC_PROFILE_DVD_ROM);
1039 outbuf[14] = (current == MMC_PROFILE_DVD_ROM);
1040 stw_be_p(&outbuf[16], MMC_PROFILE_CD_ROM);
1041 outbuf[18] = (current == MMC_PROFILE_CD_ROM);
1042 /* outbuf[20] - outbuf[31]: Feature 1 - Core feature */
1043 stw_be_p(&outbuf[20], 1);
1044 outbuf[22] = 0x08 | 0x03; /* version 2, persistent, current */
1045 outbuf[23] = 8;
1046 stl_be_p(&outbuf[24], 1); /* SCSI */
1047 outbuf[28] = 1; /* DBE = 1, mandatory */
1048 /* outbuf[32] - outbuf[39]: Feature 3 - Removable media feature */
1049 stw_be_p(&outbuf[32], 3);
1050 outbuf[34] = 0x08 | 0x03; /* version 2, persistent, current */
1051 outbuf[35] = 4;
1052 outbuf[36] = 0x39; /* tray, load=1, eject=1, unlocked at powerup, lock=1 */
1053 /* TODO: Random readable, CD read, DVD read, drive serial number,
1054 power management */
1055 return 40;
1056 }
1057
1058 static int scsi_emulate_mechanism_status(SCSIDiskState *s, uint8_t *outbuf)
1059 {
1060 if (s->qdev.type != TYPE_ROM) {
1061 return -1;
1062 }
1063 memset(outbuf, 0, 8);
1064 outbuf[5] = 1; /* CD-ROM */
1065 return 8;
1066 }
1067
1068 static int mode_sense_page(SCSIDiskState *s, int page, uint8_t **p_outbuf,
1069 int page_control)
1070 {
1071 static const int mode_sense_valid[0x3f] = {
1072 [MODE_PAGE_HD_GEOMETRY] = (1 << TYPE_DISK),
1073 [MODE_PAGE_FLEXIBLE_DISK_GEOMETRY] = (1 << TYPE_DISK),
1074 [MODE_PAGE_CACHING] = (1 << TYPE_DISK) | (1 << TYPE_ROM),
1075 [MODE_PAGE_R_W_ERROR] = (1 << TYPE_DISK) | (1 << TYPE_ROM),
1076 [MODE_PAGE_AUDIO_CTL] = (1 << TYPE_ROM),
1077 [MODE_PAGE_CAPABILITIES] = (1 << TYPE_ROM),
1078 };
1079
1080 uint8_t *p = *p_outbuf + 2;
1081 int length;
1082
1083 if ((mode_sense_valid[page] & (1 << s->qdev.type)) == 0) {
1084 return -1;
1085 }
1086
1087 /*
1088 * If Changeable Values are requested, a mask denoting those mode parameters
1089 * that are changeable shall be returned. As we currently don't support
1090 * parameter changes via MODE_SELECT all bits are returned set to zero.
1091 * The buffer was already menset to zero by the caller of this function.
1092 *
1093 * The offsets here are off by two compared to the descriptions in the
1094 * SCSI specs, because those include a 2-byte header. This is unfortunate,
1095 * but it is done so that offsets are consistent within our implementation
1096 * of MODE SENSE and MODE SELECT. MODE SELECT has to deal with both
1097 * 2-byte and 4-byte headers.
1098 */
1099 switch (page) {
1100 case MODE_PAGE_HD_GEOMETRY:
1101 length = 0x16;
1102 if (page_control == 1) { /* Changeable Values */
1103 break;
1104 }
1105 /* if a geometry hint is available, use it */
1106 p[0] = (s->qdev.conf.cyls >> 16) & 0xff;
1107 p[1] = (s->qdev.conf.cyls >> 8) & 0xff;
1108 p[2] = s->qdev.conf.cyls & 0xff;
1109 p[3] = s->qdev.conf.heads & 0xff;
1110 /* Write precomp start cylinder, disabled */
1111 p[4] = (s->qdev.conf.cyls >> 16) & 0xff;
1112 p[5] = (s->qdev.conf.cyls >> 8) & 0xff;
1113 p[6] = s->qdev.conf.cyls & 0xff;
1114 /* Reduced current start cylinder, disabled */
1115 p[7] = (s->qdev.conf.cyls >> 16) & 0xff;
1116 p[8] = (s->qdev.conf.cyls >> 8) & 0xff;
1117 p[9] = s->qdev.conf.cyls & 0xff;
1118 /* Device step rate [ns], 200ns */
1119 p[10] = 0;
1120 p[11] = 200;
1121 /* Landing zone cylinder */
1122 p[12] = 0xff;
1123 p[13] = 0xff;
1124 p[14] = 0xff;
1125 /* Medium rotation rate [rpm], 5400 rpm */
1126 p[18] = (5400 >> 8) & 0xff;
1127 p[19] = 5400 & 0xff;
1128 break;
1129
1130 case MODE_PAGE_FLEXIBLE_DISK_GEOMETRY:
1131 length = 0x1e;
1132 if (page_control == 1) { /* Changeable Values */
1133 break;
1134 }
1135 /* Transfer rate [kbit/s], 5Mbit/s */
1136 p[0] = 5000 >> 8;
1137 p[1] = 5000 & 0xff;
1138 /* if a geometry hint is available, use it */
1139 p[2] = s->qdev.conf.heads & 0xff;
1140 p[3] = s->qdev.conf.secs & 0xff;
1141 p[4] = s->qdev.blocksize >> 8;
1142 p[6] = (s->qdev.conf.cyls >> 8) & 0xff;
1143 p[7] = s->qdev.conf.cyls & 0xff;
1144 /* Write precomp start cylinder, disabled */
1145 p[8] = (s->qdev.conf.cyls >> 8) & 0xff;
1146 p[9] = s->qdev.conf.cyls & 0xff;
1147 /* Reduced current start cylinder, disabled */
1148 p[10] = (s->qdev.conf.cyls >> 8) & 0xff;
1149 p[11] = s->qdev.conf.cyls & 0xff;
1150 /* Device step rate [100us], 100us */
1151 p[12] = 0;
1152 p[13] = 1;
1153 /* Device step pulse width [us], 1us */
1154 p[14] = 1;
1155 /* Device head settle delay [100us], 100us */
1156 p[15] = 0;
1157 p[16] = 1;
1158 /* Motor on delay [0.1s], 0.1s */
1159 p[17] = 1;
1160 /* Motor off delay [0.1s], 0.1s */
1161 p[18] = 1;
1162 /* Medium rotation rate [rpm], 5400 rpm */
1163 p[26] = (5400 >> 8) & 0xff;
1164 p[27] = 5400 & 0xff;
1165 break;
1166
1167 case MODE_PAGE_CACHING:
1168 length = 0x12;
1169 if (page_control == 1 || /* Changeable Values */
1170 blk_enable_write_cache(s->qdev.conf.blk)) {
1171 p[0] = 4; /* WCE */
1172 }
1173 break;
1174
1175 case MODE_PAGE_R_W_ERROR:
1176 length = 10;
1177 if (page_control == 1) { /* Changeable Values */
1178 break;
1179 }
1180 p[0] = 0x80; /* Automatic Write Reallocation Enabled */
1181 if (s->qdev.type == TYPE_ROM) {
1182 p[1] = 0x20; /* Read Retry Count */
1183 }
1184 break;
1185
1186 case MODE_PAGE_AUDIO_CTL:
1187 length = 14;
1188 break;
1189
1190 case MODE_PAGE_CAPABILITIES:
1191 length = 0x14;
1192 if (page_control == 1) { /* Changeable Values */
1193 break;
1194 }
1195
1196 p[0] = 0x3b; /* CD-R & CD-RW read */
1197 p[1] = 0; /* Writing not supported */
1198 p[2] = 0x7f; /* Audio, composite, digital out,
1199 mode 2 form 1&2, multi session */
1200 p[3] = 0xff; /* CD DA, DA accurate, RW supported,
1201 RW corrected, C2 errors, ISRC,
1202 UPC, Bar code */
1203 p[4] = 0x2d | (s->tray_locked ? 2 : 0);
1204 /* Locking supported, jumper present, eject, tray */
1205 p[5] = 0; /* no volume & mute control, no
1206 changer */
1207 p[6] = (50 * 176) >> 8; /* 50x read speed */
1208 p[7] = (50 * 176) & 0xff;
1209 p[8] = 2 >> 8; /* Two volume levels */
1210 p[9] = 2 & 0xff;
1211 p[10] = 2048 >> 8; /* 2M buffer */
1212 p[11] = 2048 & 0xff;
1213 p[12] = (16 * 176) >> 8; /* 16x read speed current */
1214 p[13] = (16 * 176) & 0xff;
1215 p[16] = (16 * 176) >> 8; /* 16x write speed */
1216 p[17] = (16 * 176) & 0xff;
1217 p[18] = (16 * 176) >> 8; /* 16x write speed current */
1218 p[19] = (16 * 176) & 0xff;
1219 break;
1220
1221 default:
1222 return -1;
1223 }
1224
1225 assert(length < 256);
1226 (*p_outbuf)[0] = page;
1227 (*p_outbuf)[1] = length;
1228 *p_outbuf += length + 2;
1229 return length + 2;
1230 }
1231
1232 static int scsi_disk_emulate_mode_sense(SCSIDiskReq *r, uint8_t *outbuf)
1233 {
1234 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
1235 uint64_t nb_sectors;
1236 bool dbd;
1237 int page, buflen, ret, page_control;
1238 uint8_t *p;
1239 uint8_t dev_specific_param;
1240
1241 dbd = (r->req.cmd.buf[1] & 0x8) != 0;
1242 page = r->req.cmd.buf[2] & 0x3f;
1243 page_control = (r->req.cmd.buf[2] & 0xc0) >> 6;
1244
1245 trace_scsi_disk_emulate_mode_sense((r->req.cmd.buf[0] == MODE_SENSE) ? 6 :
1246 10, page, r->req.cmd.xfer, page_control);
1247 memset(outbuf, 0, r->req.cmd.xfer);
1248 p = outbuf;
1249
1250 if (s->qdev.type == TYPE_DISK) {
1251 dev_specific_param = s->features & (1 << SCSI_DISK_F_DPOFUA) ? 0x10 : 0;
1252 if (blk_is_read_only(s->qdev.conf.blk)) {
1253 dev_specific_param |= 0x80; /* Readonly. */
1254 }
1255 } else {
1256 /* MMC prescribes that CD/DVD drives have no block descriptors,
1257 * and defines no device-specific parameter. */
1258 dev_specific_param = 0x00;
1259 dbd = true;
1260 }
1261
1262 if (r->req.cmd.buf[0] == MODE_SENSE) {
1263 p[1] = 0; /* Default media type. */
1264 p[2] = dev_specific_param;
1265 p[3] = 0; /* Block descriptor length. */
1266 p += 4;
1267 } else { /* MODE_SENSE_10 */
1268 p[2] = 0; /* Default media type. */
1269 p[3] = dev_specific_param;
1270 p[6] = p[7] = 0; /* Block descriptor length. */
1271 p += 8;
1272 }
1273
1274 blk_get_geometry(s->qdev.conf.blk, &nb_sectors);
1275 if (!dbd && nb_sectors) {
1276 if (r->req.cmd.buf[0] == MODE_SENSE) {
1277 outbuf[3] = 8; /* Block descriptor length */
1278 } else { /* MODE_SENSE_10 */
1279 outbuf[7] = 8; /* Block descriptor length */
1280 }
1281 nb_sectors /= (s->qdev.blocksize / 512);
1282 if (nb_sectors > 0xffffff) {
1283 nb_sectors = 0;
1284 }
1285 p[0] = 0; /* media density code */
1286 p[1] = (nb_sectors >> 16) & 0xff;
1287 p[2] = (nb_sectors >> 8) & 0xff;
1288 p[3] = nb_sectors & 0xff;
1289 p[4] = 0; /* reserved */
1290 p[5] = 0; /* bytes 5-7 are the sector size in bytes */
1291 p[6] = s->qdev.blocksize >> 8;
1292 p[7] = 0;
1293 p += 8;
1294 }
1295
1296 if (page_control == 3) {
1297 /* Saved Values */
1298 scsi_check_condition(r, SENSE_CODE(SAVING_PARAMS_NOT_SUPPORTED));
1299 return -1;
1300 }
1301
1302 if (page == 0x3f) {
1303 for (page = 0; page <= 0x3e; page++) {
1304 mode_sense_page(s, page, &p, page_control);
1305 }
1306 } else {
1307 ret = mode_sense_page(s, page, &p, page_control);
1308 if (ret == -1) {
1309 return -1;
1310 }
1311 }
1312
1313 buflen = p - outbuf;
1314 /*
1315 * The mode data length field specifies the length in bytes of the
1316 * following data that is available to be transferred. The mode data
1317 * length does not include itself.
1318 */
1319 if (r->req.cmd.buf[0] == MODE_SENSE) {
1320 outbuf[0] = buflen - 1;
1321 } else { /* MODE_SENSE_10 */
1322 outbuf[0] = ((buflen - 2) >> 8) & 0xff;
1323 outbuf[1] = (buflen - 2) & 0xff;
1324 }
1325 return buflen;
1326 }
1327
1328 static int scsi_disk_emulate_read_toc(SCSIRequest *req, uint8_t *outbuf)
1329 {
1330 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev);
1331 int start_track, format, msf, toclen;
1332 uint64_t nb_sectors;
1333
1334 msf = req->cmd.buf[1] & 2;
1335 format = req->cmd.buf[2] & 0xf;
1336 start_track = req->cmd.buf[6];
1337 blk_get_geometry(s->qdev.conf.blk, &nb_sectors);
1338 trace_scsi_disk_emulate_read_toc(start_track, format, msf >> 1);
1339 nb_sectors /= s->qdev.blocksize / 512;
1340 switch (format) {
1341 case 0:
1342 toclen = cdrom_read_toc(nb_sectors, outbuf, msf, start_track);
1343 break;
1344 case 1:
1345 /* multi session : only a single session defined */
1346 toclen = 12;
1347 memset(outbuf, 0, 12);
1348 outbuf[1] = 0x0a;
1349 outbuf[2] = 0x01;
1350 outbuf[3] = 0x01;
1351 break;
1352 case 2:
1353 toclen = cdrom_read_toc_raw(nb_sectors, outbuf, msf, start_track);
1354 break;
1355 default:
1356 return -1;
1357 }
1358 return toclen;
1359 }
1360
1361 static int scsi_disk_emulate_start_stop(SCSIDiskReq *r)
1362 {
1363 SCSIRequest *req = &r->req;
1364 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev);
1365 bool start = req->cmd.buf[4] & 1;
1366 bool loej = req->cmd.buf[4] & 2; /* load on start, eject on !start */
1367 int pwrcnd = req->cmd.buf[4] & 0xf0;
1368
1369 if (pwrcnd) {
1370 /* eject/load only happens for power condition == 0 */
1371 return 0;
1372 }
1373
1374 if ((s->features & (1 << SCSI_DISK_F_REMOVABLE)) && loej) {
1375 if (!start && !s->tray_open && s->tray_locked) {
1376 scsi_check_condition(r,
1377 blk_is_inserted(s->qdev.conf.blk)
1378 ? SENSE_CODE(ILLEGAL_REQ_REMOVAL_PREVENTED)
1379 : SENSE_CODE(NOT_READY_REMOVAL_PREVENTED));
1380 return -1;
1381 }
1382
1383 if (s->tray_open != !start) {
1384 blk_eject(s->qdev.conf.blk, !start);
1385 s->tray_open = !start;
1386 }
1387 }
1388 return 0;
1389 }
1390
1391 static void scsi_disk_emulate_read_data(SCSIRequest *req)
1392 {
1393 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req);
1394 int buflen = r->iov.iov_len;
1395
1396 if (buflen) {
1397 trace_scsi_disk_emulate_read_data(buflen);
1398 r->iov.iov_len = 0;
1399 r->started = true;
1400 scsi_req_data(&r->req, buflen);
1401 return;
1402 }
1403
1404 /* This also clears the sense buffer for REQUEST SENSE. */
1405 scsi_req_complete(&r->req, GOOD);
1406 }
1407
1408 static int scsi_disk_check_mode_select(SCSIDiskState *s, int page,
1409 uint8_t *inbuf, int inlen)
1410 {
1411 uint8_t mode_current[SCSI_MAX_MODE_LEN];
1412 uint8_t mode_changeable[SCSI_MAX_MODE_LEN];
1413 uint8_t *p;
1414 int len, expected_len, changeable_len, i;
1415
1416 /* The input buffer does not include the page header, so it is
1417 * off by 2 bytes.
1418 */
1419 expected_len = inlen + 2;
1420 if (expected_len > SCSI_MAX_MODE_LEN) {
1421 return -1;
1422 }
1423
1424 p = mode_current;
1425 memset(mode_current, 0, inlen + 2);
1426 len = mode_sense_page(s, page, &p, 0);
1427 if (len < 0 || len != expected_len) {
1428 return -1;
1429 }
1430
1431 p = mode_changeable;
1432 memset(mode_changeable, 0, inlen + 2);
1433 changeable_len = mode_sense_page(s, page, &p, 1);
1434 assert(changeable_len == len);
1435
1436 /* Check that unchangeable bits are the same as what MODE SENSE
1437 * would return.
1438 */
1439 for (i = 2; i < len; i++) {
1440 if (((mode_current[i] ^ inbuf[i - 2]) & ~mode_changeable[i]) != 0) {
1441 return -1;
1442 }
1443 }
1444 return 0;
1445 }
1446
1447 static void scsi_disk_apply_mode_select(SCSIDiskState *s, int page, uint8_t *p)
1448 {
1449 switch (page) {
1450 case MODE_PAGE_CACHING:
1451 blk_set_enable_write_cache(s->qdev.conf.blk, (p[0] & 4) != 0);
1452 break;
1453
1454 default:
1455 break;
1456 }
1457 }
1458
1459 static int mode_select_pages(SCSIDiskReq *r, uint8_t *p, int len, bool change)
1460 {
1461 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
1462
1463 while (len > 0) {
1464 int page, subpage, page_len;
1465
1466 /* Parse both possible formats for the mode page headers. */
1467 page = p[0] & 0x3f;
1468 if (p[0] & 0x40) {
1469 if (len < 4) {
1470 goto invalid_param_len;
1471 }
1472 subpage = p[1];
1473 page_len = lduw_be_p(&p[2]);
1474 p += 4;
1475 len -= 4;
1476 } else {
1477 if (len < 2) {
1478 goto invalid_param_len;
1479 }
1480 subpage = 0;
1481 page_len = p[1];
1482 p += 2;
1483 len -= 2;
1484 }
1485
1486 if (subpage) {
1487 goto invalid_param;
1488 }
1489 if (page_len > len) {
1490 goto invalid_param_len;
1491 }
1492
1493 if (!change) {
1494 if (scsi_disk_check_mode_select(s, page, p, page_len) < 0) {
1495 goto invalid_param;
1496 }
1497 } else {
1498 scsi_disk_apply_mode_select(s, page, p);
1499 }
1500
1501 p += page_len;
1502 len -= page_len;
1503 }
1504 return 0;
1505
1506 invalid_param:
1507 scsi_check_condition(r, SENSE_CODE(INVALID_PARAM));
1508 return -1;
1509
1510 invalid_param_len:
1511 scsi_check_condition(r, SENSE_CODE(INVALID_PARAM_LEN));
1512 return -1;
1513 }
1514
1515 static void scsi_disk_emulate_mode_select(SCSIDiskReq *r, uint8_t *inbuf)
1516 {
1517 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
1518 uint8_t *p = inbuf;
1519 int cmd = r->req.cmd.buf[0];
1520 int len = r->req.cmd.xfer;
1521 int hdr_len = (cmd == MODE_SELECT ? 4 : 8);
1522 int bd_len;
1523 int pass;
1524
1525 /* We only support PF=1, SP=0. */
1526 if ((r->req.cmd.buf[1] & 0x11) != 0x10) {
1527 goto invalid_field;
1528 }
1529
1530 if (len < hdr_len) {
1531 goto invalid_param_len;
1532 }
1533
1534 bd_len = (cmd == MODE_SELECT ? p[3] : lduw_be_p(&p[6]));
1535 len -= hdr_len;
1536 p += hdr_len;
1537 if (len < bd_len) {
1538 goto invalid_param_len;
1539 }
1540 if (bd_len != 0 && bd_len != 8) {
1541 goto invalid_param;
1542 }
1543
1544 len -= bd_len;
1545 p += bd_len;
1546
1547 /* Ensure no change is made if there is an error! */
1548 for (pass = 0; pass < 2; pass++) {
1549 if (mode_select_pages(r, p, len, pass == 1) < 0) {
1550 assert(pass == 0);
1551 return;
1552 }
1553 }
1554 if (!blk_enable_write_cache(s->qdev.conf.blk)) {
1555 /* The request is used as the AIO opaque value, so add a ref. */
1556 scsi_req_ref(&r->req);
1557 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 0,
1558 BLOCK_ACCT_FLUSH);
1559 r->req.aiocb = blk_aio_flush(s->qdev.conf.blk, scsi_aio_complete, r);
1560 return;
1561 }
1562
1563 scsi_req_complete(&r->req, GOOD);
1564 return;
1565
1566 invalid_param:
1567 scsi_check_condition(r, SENSE_CODE(INVALID_PARAM));
1568 return;
1569
1570 invalid_param_len:
1571 scsi_check_condition(r, SENSE_CODE(INVALID_PARAM_LEN));
1572 return;
1573
1574 invalid_field:
1575 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD));
1576 }
1577
1578 static inline bool check_lba_range(SCSIDiskState *s,
1579 uint64_t sector_num, uint32_t nb_sectors)
1580 {
1581 /*
1582 * The first line tests that no overflow happens when computing the last
1583 * sector. The second line tests that the last accessed sector is in
1584 * range.
1585 *
1586 * Careful, the computations should not underflow for nb_sectors == 0,
1587 * and a 0-block read to the first LBA beyond the end of device is
1588 * valid.
1589 */
1590 return (sector_num <= sector_num + nb_sectors &&
1591 sector_num + nb_sectors <= s->qdev.max_lba + 1);
1592 }
1593
1594 typedef struct UnmapCBData {
1595 SCSIDiskReq *r;
1596 uint8_t *inbuf;
1597 int count;
1598 } UnmapCBData;
1599
1600 static void scsi_unmap_complete(void *opaque, int ret);
1601
1602 static void scsi_unmap_complete_noio(UnmapCBData *data, int ret)
1603 {
1604 SCSIDiskReq *r = data->r;
1605 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
1606 uint64_t sector_num;
1607 uint32_t nb_sectors;
1608
1609 assert(r->req.aiocb == NULL);
1610 if (scsi_disk_req_check_error(r, ret, false)) {
1611 goto done;
1612 }
1613
1614 if (data->count > 0) {
1615 sector_num = ldq_be_p(&data->inbuf[0]);
1616 nb_sectors = ldl_be_p(&data->inbuf[8]) & 0xffffffffULL;
1617 if (!check_lba_range(s, sector_num, nb_sectors)) {
1618 scsi_check_condition(r, SENSE_CODE(LBA_OUT_OF_RANGE));
1619 goto done;
1620 }
1621
1622 r->req.aiocb = blk_aio_pdiscard(s->qdev.conf.blk,
1623 sector_num * s->qdev.blocksize,
1624 nb_sectors * s->qdev.blocksize,
1625 scsi_unmap_complete, data);
1626 data->count--;
1627 data->inbuf += 16;
1628 return;
1629 }
1630
1631 scsi_req_complete(&r->req, GOOD);
1632
1633 done:
1634 scsi_req_unref(&r->req);
1635 g_free(data);
1636 }
1637
1638 static void scsi_unmap_complete(void *opaque, int ret)
1639 {
1640 UnmapCBData *data = opaque;
1641 SCSIDiskReq *r = data->r;
1642 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
1643
1644 assert(r->req.aiocb != NULL);
1645 r->req.aiocb = NULL;
1646
1647 aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk));
1648 scsi_unmap_complete_noio(data, ret);
1649 aio_context_release(blk_get_aio_context(s->qdev.conf.blk));
1650 }
1651
1652 static void scsi_disk_emulate_unmap(SCSIDiskReq *r, uint8_t *inbuf)
1653 {
1654 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
1655 uint8_t *p = inbuf;
1656 int len = r->req.cmd.xfer;
1657 UnmapCBData *data;
1658
1659 /* Reject ANCHOR=1. */
1660 if (r->req.cmd.buf[1] & 0x1) {
1661 goto invalid_field;
1662 }
1663
1664 if (len < 8) {
1665 goto invalid_param_len;
1666 }
1667 if (len < lduw_be_p(&p[0]) + 2) {
1668 goto invalid_param_len;
1669 }
1670 if (len < lduw_be_p(&p[2]) + 8) {
1671 goto invalid_param_len;
1672 }
1673 if (lduw_be_p(&p[2]) & 15) {
1674 goto invalid_param_len;
1675 }
1676
1677 if (blk_is_read_only(s->qdev.conf.blk)) {
1678 scsi_check_condition(r, SENSE_CODE(WRITE_PROTECTED));
1679 return;
1680 }
1681
1682 data = g_new0(UnmapCBData, 1);
1683 data->r = r;
1684 data->inbuf = &p[8];
1685 data->count = lduw_be_p(&p[2]) >> 4;
1686
1687 /* The matching unref is in scsi_unmap_complete, before data is freed. */
1688 scsi_req_ref(&r->req);
1689 scsi_unmap_complete_noio(data, 0);
1690 return;
1691
1692 invalid_param_len:
1693 scsi_check_condition(r, SENSE_CODE(INVALID_PARAM_LEN));
1694 return;
1695
1696 invalid_field:
1697 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD));
1698 }
1699
1700 typedef struct WriteSameCBData {
1701 SCSIDiskReq *r;
1702 int64_t sector;
1703 int nb_sectors;
1704 QEMUIOVector qiov;
1705 struct iovec iov;
1706 } WriteSameCBData;
1707
1708 static void scsi_write_same_complete(void *opaque, int ret)
1709 {
1710 WriteSameCBData *data = opaque;
1711 SCSIDiskReq *r = data->r;
1712 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
1713
1714 assert(r->req.aiocb != NULL);
1715 r->req.aiocb = NULL;
1716 aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk));
1717 if (scsi_disk_req_check_error(r, ret, true)) {
1718 goto done;
1719 }
1720
1721 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct);
1722
1723 data->nb_sectors -= data->iov.iov_len / 512;
1724 data->sector += data->iov.iov_len / 512;
1725 data->iov.iov_len = MIN(data->nb_sectors * 512, data->iov.iov_len);
1726 if (data->iov.iov_len) {
1727 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct,
1728 data->iov.iov_len, BLOCK_ACCT_WRITE);
1729 /* Reinitialize qiov, to handle unaligned WRITE SAME request
1730 * where final qiov may need smaller size */
1731 qemu_iovec_init_external(&data->qiov, &data->iov, 1);
1732 r->req.aiocb = blk_aio_pwritev(s->qdev.conf.blk,
1733 data->sector << BDRV_SECTOR_BITS,
1734 &data->qiov, 0,
1735 scsi_write_same_complete, data);
1736 aio_context_release(blk_get_aio_context(s->qdev.conf.blk));
1737 return;
1738 }
1739
1740 scsi_req_complete(&r->req, GOOD);
1741
1742 done:
1743 scsi_req_unref(&r->req);
1744 qemu_vfree(data->iov.iov_base);
1745 g_free(data);
1746 aio_context_release(blk_get_aio_context(s->qdev.conf.blk));
1747 }
1748
1749 static void scsi_disk_emulate_write_same(SCSIDiskReq *r, uint8_t *inbuf)
1750 {
1751 SCSIRequest *req = &r->req;
1752 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev);
1753 uint32_t nb_sectors = scsi_data_cdb_xfer(r->req.cmd.buf);
1754 WriteSameCBData *data;
1755 uint8_t *buf;
1756 int i;
1757
1758 /* Fail if PBDATA=1 or LBDATA=1 or ANCHOR=1. */
1759 if (nb_sectors == 0 || (req->cmd.buf[1] & 0x16)) {
1760 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD));
1761 return;
1762 }
1763
1764 if (blk_is_read_only(s->qdev.conf.blk)) {
1765 scsi_check_condition(r, SENSE_CODE(WRITE_PROTECTED));
1766 return;
1767 }
1768 if (!check_lba_range(s, r->req.cmd.lba, nb_sectors)) {
1769 scsi_check_condition(r, SENSE_CODE(LBA_OUT_OF_RANGE));
1770 return;
1771 }
1772
1773 if ((req->cmd.buf[1] & 0x1) || buffer_is_zero(inbuf, s->qdev.blocksize)) {
1774 int flags = (req->cmd.buf[1] & 0x8) ? BDRV_REQ_MAY_UNMAP : 0;
1775
1776 /* The request is used as the AIO opaque value, so add a ref. */
1777 scsi_req_ref(&r->req);
1778 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct,
1779 nb_sectors * s->qdev.blocksize,
1780 BLOCK_ACCT_WRITE);
1781 r->req.aiocb = blk_aio_pwrite_zeroes(s->qdev.conf.blk,
1782 r->req.cmd.lba * s->qdev.blocksize,
1783 nb_sectors * s->qdev.blocksize,
1784 flags, scsi_aio_complete, r);
1785 return;
1786 }
1787
1788 data = g_new0(WriteSameCBData, 1);
1789 data->r = r;
1790 data->sector = r->req.cmd.lba * (s->qdev.blocksize / 512);
1791 data->nb_sectors = nb_sectors * (s->qdev.blocksize / 512);
1792 data->iov.iov_len = MIN(data->nb_sectors * 512, SCSI_WRITE_SAME_MAX);
1793 data->iov.iov_base = buf = blk_blockalign(s->qdev.conf.blk,
1794 data->iov.iov_len);
1795 qemu_iovec_init_external(&data->qiov, &data->iov, 1);
1796
1797 for (i = 0; i < data->iov.iov_len; i += s->qdev.blocksize) {
1798 memcpy(&buf[i], inbuf, s->qdev.blocksize);
1799 }
1800
1801 scsi_req_ref(&r->req);
1802 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct,
1803 data->iov.iov_len, BLOCK_ACCT_WRITE);
1804 r->req.aiocb = blk_aio_pwritev(s->qdev.conf.blk,
1805 data->sector << BDRV_SECTOR_BITS,
1806 &data->qiov, 0,
1807 scsi_write_same_complete, data);
1808 }
1809
1810 static void scsi_disk_emulate_write_data(SCSIRequest *req)
1811 {
1812 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req);
1813
1814 if (r->iov.iov_len) {
1815 int buflen = r->iov.iov_len;
1816 trace_scsi_disk_emulate_write_data(buflen);
1817 r->iov.iov_len = 0;
1818 scsi_req_data(&r->req, buflen);
1819 return;
1820 }
1821
1822 switch (req->cmd.buf[0]) {
1823 case MODE_SELECT:
1824 case MODE_SELECT_10:
1825 /* This also clears the sense buffer for REQUEST SENSE. */
1826 scsi_disk_emulate_mode_select(r, r->iov.iov_base);
1827 break;
1828
1829 case UNMAP:
1830 scsi_disk_emulate_unmap(r, r->iov.iov_base);
1831 break;
1832
1833 case VERIFY_10:
1834 case VERIFY_12:
1835 case VERIFY_16:
1836 if (r->req.status == -1) {
1837 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD));
1838 }
1839 break;
1840
1841 case WRITE_SAME_10:
1842 case WRITE_SAME_16:
1843 scsi_disk_emulate_write_same(r, r->iov.iov_base);
1844 break;
1845
1846 default:
1847 abort();
1848 }
1849 }
1850
1851 static int32_t scsi_disk_emulate_command(SCSIRequest *req, uint8_t *buf)
1852 {
1853 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req);
1854 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev);
1855 uint64_t nb_sectors;
1856 uint8_t *outbuf;
1857 int buflen;
1858
1859 switch (req->cmd.buf[0]) {
1860 case INQUIRY:
1861 case MODE_SENSE:
1862 case MODE_SENSE_10:
1863 case RESERVE:
1864 case RESERVE_10:
1865 case RELEASE:
1866 case RELEASE_10:
1867 case START_STOP:
1868 case ALLOW_MEDIUM_REMOVAL:
1869 case GET_CONFIGURATION:
1870 case GET_EVENT_STATUS_NOTIFICATION:
1871 case MECHANISM_STATUS:
1872 case REQUEST_SENSE:
1873 break;
1874
1875 default:
1876 if (!blk_is_available(s->qdev.conf.blk)) {
1877 scsi_check_condition(r, SENSE_CODE(NO_MEDIUM));
1878 return 0;
1879 }
1880 break;
1881 }
1882
1883 /*
1884 * FIXME: we shouldn't return anything bigger than 4k, but the code
1885 * requires the buffer to be as big as req->cmd.xfer in several
1886 * places. So, do not allow CDBs with a very large ALLOCATION
1887 * LENGTH. The real fix would be to modify scsi_read_data and
1888 * dma_buf_read, so that they return data beyond the buflen
1889 * as all zeros.
1890 */
1891 if (req->cmd.xfer > 65536) {
1892 goto illegal_request;
1893 }
1894 r->buflen = MAX(4096, req->cmd.xfer);
1895
1896 if (!r->iov.iov_base) {
1897 r->iov.iov_base = blk_blockalign(s->qdev.conf.blk, r->buflen);
1898 }
1899
1900 buflen = req->cmd.xfer;
1901 outbuf = r->iov.iov_base;
1902 memset(outbuf, 0, r->buflen);
1903 switch (req->cmd.buf[0]) {
1904 case TEST_UNIT_READY:
1905 assert(blk_is_available(s->qdev.conf.blk));
1906 break;
1907 case INQUIRY:
1908 buflen = scsi_disk_emulate_inquiry(req, outbuf);
1909 if (buflen < 0) {
1910 goto illegal_request;
1911 }
1912 break;
1913 case MODE_SENSE:
1914 case MODE_SENSE_10:
1915 buflen = scsi_disk_emulate_mode_sense(r, outbuf);
1916 if (buflen < 0) {
1917 goto illegal_request;
1918 }
1919 break;
1920 case READ_TOC:
1921 buflen = scsi_disk_emulate_read_toc(req, outbuf);
1922 if (buflen < 0) {
1923 goto illegal_request;
1924 }
1925 break;
1926 case RESERVE:
1927 if (req->cmd.buf[1] & 1) {
1928 goto illegal_request;
1929 }
1930 break;
1931 case RESERVE_10:
1932 if (req->cmd.buf[1] & 3) {
1933 goto illegal_request;
1934 }
1935 break;
1936 case RELEASE:
1937 if (req->cmd.buf[1] & 1) {
1938 goto illegal_request;
1939 }
1940 break;
1941 case RELEASE_10:
1942 if (req->cmd.buf[1] & 3) {
1943 goto illegal_request;
1944 }
1945 break;
1946 case START_STOP:
1947 if (scsi_disk_emulate_start_stop(r) < 0) {
1948 return 0;
1949 }
1950 break;
1951 case ALLOW_MEDIUM_REMOVAL:
1952 s->tray_locked = req->cmd.buf[4] & 1;
1953 blk_lock_medium(s->qdev.conf.blk, req->cmd.buf[4] & 1);
1954 break;
1955 case READ_CAPACITY_10:
1956 /* The normal LEN field for this command is zero. */
1957 memset(outbuf, 0, 8);
1958 blk_get_geometry(s->qdev.conf.blk, &nb_sectors);
1959 if (!nb_sectors) {
1960 scsi_check_condition(r, SENSE_CODE(LUN_NOT_READY));
1961 return 0;
1962 }
1963 if ((req->cmd.buf[8] & 1) == 0 && req->cmd.lba) {
1964 goto illegal_request;
1965 }
1966 nb_sectors /= s->qdev.blocksize / 512;
1967 /* Returned value is the address of the last sector. */
1968 nb_sectors--;
1969 /* Remember the new size for read/write sanity checking. */
1970 s->qdev.max_lba = nb_sectors;
1971 /* Clip to 2TB, instead of returning capacity modulo 2TB. */
1972 if (nb_sectors > UINT32_MAX) {
1973 nb_sectors = UINT32_MAX;
1974 }
1975 outbuf[0] = (nb_sectors >> 24) & 0xff;
1976 outbuf[1] = (nb_sectors >> 16) & 0xff;
1977 outbuf[2] = (nb_sectors >> 8) & 0xff;
1978 outbuf[3] = nb_sectors & 0xff;
1979 outbuf[4] = 0;
1980 outbuf[5] = 0;
1981 outbuf[6] = s->qdev.blocksize >> 8;
1982 outbuf[7] = 0;
1983 break;
1984 case REQUEST_SENSE:
1985 /* Just return "NO SENSE". */
1986 buflen = scsi_convert_sense(NULL, 0, outbuf, r->buflen,
1987 (req->cmd.buf[1] & 1) == 0);
1988 if (buflen < 0) {
1989 goto illegal_request;
1990 }
1991 break;
1992 case MECHANISM_STATUS:
1993 buflen = scsi_emulate_mechanism_status(s, outbuf);
1994 if (buflen < 0) {
1995 goto illegal_request;
1996 }
1997 break;
1998 case GET_CONFIGURATION:
1999 buflen = scsi_get_configuration(s, outbuf);
2000 if (buflen < 0) {
2001 goto illegal_request;
2002 }
2003 break;
2004 case GET_EVENT_STATUS_NOTIFICATION:
2005 buflen = scsi_get_event_status_notification(s, r, outbuf);
2006 if (buflen < 0) {
2007 goto illegal_request;
2008 }
2009 break;
2010 case READ_DISC_INFORMATION:
2011 buflen = scsi_read_disc_information(s, r, outbuf);
2012 if (buflen < 0) {
2013 goto illegal_request;
2014 }
2015 break;
2016 case READ_DVD_STRUCTURE:
2017 buflen = scsi_read_dvd_structure(s, r, outbuf);
2018 if (buflen < 0) {
2019 goto illegal_request;
2020 }
2021 break;
2022 case SERVICE_ACTION_IN_16:
2023 /* Service Action In subcommands. */
2024 if ((req->cmd.buf[1] & 31) == SAI_READ_CAPACITY_16) {
2025 trace_scsi_disk_emulate_command_SAI_16();
2026 memset(outbuf, 0, req->cmd.xfer);
2027 blk_get_geometry(s->qdev.conf.blk, &nb_sectors);
2028 if (!nb_sectors) {
2029 scsi_check_condition(r, SENSE_CODE(LUN_NOT_READY));
2030 return 0;
2031 }
2032 if ((req->cmd.buf[14] & 1) == 0 && req->cmd.lba) {
2033 goto illegal_request;
2034 }
2035 nb_sectors /= s->qdev.blocksize / 512;
2036 /* Returned value is the address of the last sector. */
2037 nb_sectors--;
2038 /* Remember the new size for read/write sanity checking. */
2039 s->qdev.max_lba = nb_sectors;
2040 outbuf[0] = (nb_sectors >> 56) & 0xff;
2041 outbuf[1] = (nb_sectors >> 48) & 0xff;
2042 outbuf[2] = (nb_sectors >> 40) & 0xff;
2043 outbuf[3] = (nb_sectors >> 32) & 0xff;
2044 outbuf[4] = (nb_sectors >> 24) & 0xff;
2045 outbuf[5] = (nb_sectors >> 16) & 0xff;
2046 outbuf[6] = (nb_sectors >> 8) & 0xff;
2047 outbuf[7] = nb_sectors & 0xff;
2048 outbuf[8] = 0;
2049 outbuf[9] = 0;
2050 outbuf[10] = s->qdev.blocksize >> 8;
2051 outbuf[11] = 0;
2052 outbuf[12] = 0;
2053 outbuf[13] = get_physical_block_exp(&s->qdev.conf);
2054
2055 /* set TPE bit if the format supports discard */
2056 if (s->qdev.conf.discard_granularity) {
2057 outbuf[14] = 0x80;
2058 }
2059
2060 /* Protection, exponent and lowest lba field left blank. */
2061 break;
2062 }
2063 trace_scsi_disk_emulate_command_SAI_unsupported();
2064 goto illegal_request;
2065 case SYNCHRONIZE_CACHE:
2066 /* The request is used as the AIO opaque value, so add a ref. */
2067 scsi_req_ref(&r->req);
2068 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 0,
2069 BLOCK_ACCT_FLUSH);
2070 r->req.aiocb = blk_aio_flush(s->qdev.conf.blk, scsi_aio_complete, r);
2071 return 0;
2072 case SEEK_10:
2073 trace_scsi_disk_emulate_command_SEEK_10(r->req.cmd.lba);
2074 if (r->req.cmd.lba > s->qdev.max_lba) {
2075 goto illegal_lba;
2076 }
2077 break;
2078 case MODE_SELECT:
2079 trace_scsi_disk_emulate_command_MODE_SELECT(r->req.cmd.xfer);
2080 break;
2081 case MODE_SELECT_10:
2082 trace_scsi_disk_emulate_command_MODE_SELECT_10(r->req.cmd.xfer);
2083 break;
2084 case UNMAP:
2085 trace_scsi_disk_emulate_command_UNMAP(r->req.cmd.xfer);
2086 break;
2087 case VERIFY_10:
2088 case VERIFY_12:
2089 case VERIFY_16:
2090 trace_scsi_disk_emulate_command_VERIFY((req->cmd.buf[1] >> 1) & 3);
2091 if (req->cmd.buf[1] & 6) {
2092 goto illegal_request;
2093 }
2094 break;
2095 case WRITE_SAME_10:
2096 case WRITE_SAME_16:
2097 trace_scsi_disk_emulate_command_WRITE_SAME(
2098 req->cmd.buf[0] == WRITE_SAME_10 ? 10 : 16, r->req.cmd.xfer);
2099 break;
2100 default:
2101 trace_scsi_disk_emulate_command_UNKNOWN(buf[0],
2102 scsi_command_name(buf[0]));
2103 scsi_check_condition(r, SENSE_CODE(INVALID_OPCODE));
2104 return 0;
2105 }
2106 assert(!r->req.aiocb);
2107 r->iov.iov_len = MIN(r->buflen, req->cmd.xfer);
2108 if (r->iov.iov_len == 0) {
2109 scsi_req_complete(&r->req, GOOD);
2110 }
2111 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) {
2112 assert(r->iov.iov_len == req->cmd.xfer);
2113 return -r->iov.iov_len;
2114 } else {
2115 return r->iov.iov_len;
2116 }
2117
2118 illegal_request:
2119 if (r->req.status == -1) {
2120 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD));
2121 }
2122 return 0;
2123
2124 illegal_lba:
2125 scsi_check_condition(r, SENSE_CODE(LBA_OUT_OF_RANGE));
2126 return 0;
2127 }
2128
2129 /* Execute a scsi command. Returns the length of the data expected by the
2130 command. This will be Positive for data transfers from the device
2131 (eg. disk reads), negative for transfers to the device (eg. disk writes),
2132 and zero if the command does not transfer any data. */
2133
2134 static int32_t scsi_disk_dma_command(SCSIRequest *req, uint8_t *buf)
2135 {
2136 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req);
2137 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev);
2138 SCSIDiskClass *sdc = (SCSIDiskClass *) object_get_class(OBJECT(s));
2139 uint32_t len;
2140 uint8_t command;
2141
2142 command = buf[0];
2143
2144 if (!blk_is_available(s->qdev.conf.blk)) {
2145 scsi_check_condition(r, SENSE_CODE(NO_MEDIUM));
2146 return 0;
2147 }
2148
2149 len = scsi_data_cdb_xfer(r->req.cmd.buf);
2150 switch (command) {
2151 case READ_6:
2152 case READ_10:
2153 case READ_12:
2154 case READ_16:
2155 trace_scsi_disk_dma_command_READ(r->req.cmd.lba, len);
2156 /* Protection information is not supported. For SCSI versions 2 and
2157 * older (as determined by snooping the guest's INQUIRY commands),
2158 * there is no RD/WR/VRPROTECT, so skip this check in these versions.
2159 */
2160 if (s->qdev.scsi_version > 2 && (r->req.cmd.buf[1] & 0xe0)) {
2161 goto illegal_request;
2162 }
2163 if (!check_lba_range(s, r->req.cmd.lba, len)) {
2164 goto illegal_lba;
2165 }
2166 r->sector = r->req.cmd.lba * (s->qdev.blocksize / 512);
2167 r->sector_count = len * (s->qdev.blocksize / 512);
2168 break;
2169 case WRITE_6:
2170 case WRITE_10:
2171 case WRITE_12:
2172 case WRITE_16:
2173 case WRITE_VERIFY_10:
2174 case WRITE_VERIFY_12:
2175 case WRITE_VERIFY_16:
2176 if (blk_is_read_only(s->qdev.conf.blk)) {
2177 scsi_check_condition(r, SENSE_CODE(WRITE_PROTECTED));
2178 return 0;
2179 }
2180 trace_scsi_disk_dma_command_WRITE(
2181 (command & 0xe) == 0xe ? "And Verify " : "",
2182 r->req.cmd.lba, len);
2183 /* fall through */
2184 case VERIFY_10:
2185 case VERIFY_12:
2186 case VERIFY_16:
2187 /* We get here only for BYTCHK == 0x01 and only for scsi-block.
2188 * As far as DMA is concerned, we can treat it the same as a write;
2189 * scsi_block_do_sgio will send VERIFY commands.
2190 */
2191 if (s->qdev.scsi_version > 2 && (r->req.cmd.buf[1] & 0xe0)) {
2192 goto illegal_request;
2193 }
2194 if (!check_lba_range(s, r->req.cmd.lba, len)) {
2195 goto illegal_lba;
2196 }
2197 r->sector = r->req.cmd.lba * (s->qdev.blocksize / 512);
2198 r->sector_count = len * (s->qdev.blocksize / 512);
2199 break;
2200 default:
2201 abort();
2202 illegal_request:
2203 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD));
2204 return 0;
2205 illegal_lba:
2206 scsi_check_condition(r, SENSE_CODE(LBA_OUT_OF_RANGE));
2207 return 0;
2208 }
2209 r->need_fua_emulation = sdc->need_fua_emulation(&r->req.cmd);
2210 if (r->sector_count == 0) {
2211 scsi_req_complete(&r->req, GOOD);
2212 }
2213 assert(r->iov.iov_len == 0);
2214 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) {
2215 return -r->sector_count * 512;
2216 } else {
2217 return r->sector_count * 512;
2218 }
2219 }
2220
2221 static void scsi_disk_reset(DeviceState *dev)
2222 {
2223 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev.qdev, dev);
2224 uint64_t nb_sectors;
2225
2226 scsi_device_purge_requests(&s->qdev, SENSE_CODE(RESET));
2227
2228 blk_get_geometry(s->qdev.conf.blk, &nb_sectors);
2229 nb_sectors /= s->qdev.blocksize / 512;
2230 if (nb_sectors) {
2231 nb_sectors--;
2232 }
2233 s->qdev.max_lba = nb_sectors;
2234 /* reset tray statuses */
2235 s->tray_locked = 0;
2236 s->tray_open = 0;
2237
2238 s->qdev.scsi_version = s->qdev.default_scsi_version;
2239 }
2240
2241 static void scsi_disk_resize_cb(void *opaque)
2242 {
2243 SCSIDiskState *s = opaque;
2244
2245 /* SPC lists this sense code as available only for
2246 * direct-access devices.
2247 */
2248 if (s->qdev.type == TYPE_DISK) {
2249 scsi_device_report_change(&s->qdev, SENSE_CODE(CAPACITY_CHANGED));
2250 }
2251 }
2252
2253 static void scsi_cd_change_media_cb(void *opaque, bool load, Error **errp)
2254 {
2255 SCSIDiskState *s = opaque;
2256
2257 /*
2258 * When a CD gets changed, we have to report an ejected state and
2259 * then a loaded state to guests so that they detect tray
2260 * open/close and media change events. Guests that do not use
2261 * GET_EVENT_STATUS_NOTIFICATION to detect such tray open/close
2262 * states rely on this behavior.
2263 *
2264 * media_changed governs the state machine used for unit attention
2265 * report. media_event is used by GET EVENT STATUS NOTIFICATION.
2266 */
2267 s->media_changed = load;
2268 s->tray_open = !load;
2269 scsi_device_set_ua(&s->qdev, SENSE_CODE(UNIT_ATTENTION_NO_MEDIUM));
2270 s->media_event = true;
2271 s->eject_request = false;
2272 }
2273
2274 static void scsi_cd_eject_request_cb(void *opaque, bool force)
2275 {
2276 SCSIDiskState *s = opaque;
2277
2278 s->eject_request = true;
2279 if (force) {
2280 s->tray_locked = false;
2281 }
2282 }
2283
2284 static bool scsi_cd_is_tray_open(void *opaque)
2285 {
2286 return ((SCSIDiskState *)opaque)->tray_open;
2287 }
2288
2289 static bool scsi_cd_is_medium_locked(void *opaque)
2290 {
2291 return ((SCSIDiskState *)opaque)->tray_locked;
2292 }
2293
2294 static const BlockDevOps scsi_disk_removable_block_ops = {
2295 .change_media_cb = scsi_cd_change_media_cb,
2296 .eject_request_cb = scsi_cd_eject_request_cb,
2297 .is_tray_open = scsi_cd_is_tray_open,
2298 .is_medium_locked = scsi_cd_is_medium_locked,
2299
2300 .resize_cb = scsi_disk_resize_cb,
2301 };
2302
2303 static const BlockDevOps scsi_disk_block_ops = {
2304 .resize_cb = scsi_disk_resize_cb,
2305 };
2306
2307 static void scsi_disk_unit_attention_reported(SCSIDevice *dev)
2308 {
2309 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev);
2310 if (s->media_changed) {
2311 s->media_changed = false;
2312 scsi_device_set_ua(&s->qdev, SENSE_CODE(MEDIUM_CHANGED));
2313 }
2314 }
2315
2316 static void scsi_realize(SCSIDevice *dev, Error **errp)
2317 {
2318 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev);
2319
2320 if (!s->qdev.conf.blk) {
2321 error_setg(errp, "drive property not set");
2322 return;
2323 }
2324
2325 if (!(s->features & (1 << SCSI_DISK_F_REMOVABLE)) &&
2326 !blk_is_inserted(s->qdev.conf.blk)) {
2327 error_setg(errp, "Device needs media, but drive is empty");
2328 return;
2329 }
2330
2331 blkconf_blocksizes(&s->qdev.conf);
2332
2333 if (s->qdev.conf.logical_block_size >
2334 s->qdev.conf.physical_block_size) {
2335 error_setg(errp,
2336 "logical_block_size > physical_block_size not supported");
2337 return;
2338 }
2339
2340 if (blk_get_aio_context(s->qdev.conf.blk) != qemu_get_aio_context() &&
2341 !s->qdev.hba_supports_iothread)
2342 {
2343 error_setg(errp, "HBA does not support iothreads");
2344 return;
2345 }
2346
2347 if (dev->type == TYPE_DISK) {
2348 if (!blkconf_geometry(&dev->conf, NULL, 65535, 255, 255, errp)) {
2349 return;
2350 }
2351 }
2352 if (!blkconf_apply_backend_options(&dev->conf,
2353 blk_is_read_only(s->qdev.conf.blk),
2354 dev->type == TYPE_DISK, errp)) {
2355 return;
2356 }
2357
2358 if (s->qdev.conf.discard_granularity == -1) {
2359 s->qdev.conf.discard_granularity =
2360 MAX(s->qdev.conf.logical_block_size, DEFAULT_DISCARD_GRANULARITY);
2361 }
2362
2363 if (!s->version) {
2364 s->version = g_strdup(qemu_hw_version());
2365 }
2366 if (!s->vendor) {
2367 s->vendor = g_strdup("QEMU");
2368 }
2369 if (!s->device_id) {
2370 if (s->serial) {
2371 s->device_id = g_strdup_printf("%.20s", s->serial);
2372 } else {
2373 const char *str = blk_name(s->qdev.conf.blk);
2374 if (str && *str) {
2375 s->device_id = g_strdup(str);
2376 }
2377 }
2378 }
2379
2380 if (blk_is_sg(s->qdev.conf.blk)) {
2381 error_setg(errp, "unwanted /dev/sg*");
2382 return;
2383 }
2384
2385 if ((s->features & (1 << SCSI_DISK_F_REMOVABLE)) &&
2386 !(s->features & (1 << SCSI_DISK_F_NO_REMOVABLE_DEVOPS))) {
2387 blk_set_dev_ops(s->qdev.conf.blk, &scsi_disk_removable_block_ops, s);
2388 } else {
2389 blk_set_dev_ops(s->qdev.conf.blk, &scsi_disk_block_ops, s);
2390 }
2391 blk_set_guest_block_size(s->qdev.conf.blk, s->qdev.blocksize);
2392
2393 blk_iostatus_enable(s->qdev.conf.blk);
2394 }
2395
2396 static void scsi_hd_realize(SCSIDevice *dev, Error **errp)
2397 {
2398 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev);
2399 AioContext *ctx = NULL;
2400 /* can happen for devices without drive. The error message for missing
2401 * backend will be issued in scsi_realize
2402 */
2403 if (s->qdev.conf.blk) {
2404 ctx = blk_get_aio_context(s->qdev.conf.blk);
2405 aio_context_acquire(ctx);
2406 blkconf_blocksizes(&s->qdev.conf);
2407 }
2408 s->qdev.blocksize = s->qdev.conf.logical_block_size;
2409 s->qdev.type = TYPE_DISK;
2410 if (!s->product) {
2411 s->product = g_strdup("QEMU HARDDISK");
2412 }
2413 scsi_realize(&s->qdev, errp);
2414 if (ctx) {
2415 aio_context_release(ctx);
2416 }
2417 }
2418
2419 static void scsi_cd_realize(SCSIDevice *dev, Error **errp)
2420 {
2421 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev);
2422 AioContext *ctx;
2423 int ret;
2424
2425 if (!dev->conf.blk) {
2426 /* Anonymous BlockBackend for an empty drive. As we put it into
2427 * dev->conf, qdev takes care of detaching on unplug. */
2428 dev->conf.blk = blk_new(qemu_get_aio_context(), 0, BLK_PERM_ALL);
2429 ret = blk_attach_dev(dev->conf.blk, &dev->qdev);
2430 assert(ret == 0);
2431 }
2432
2433 ctx = blk_get_aio_context(dev->conf.blk);
2434 aio_context_acquire(ctx);
2435 s->qdev.blocksize = 2048;
2436 s->qdev.type = TYPE_ROM;
2437 s->features |= 1 << SCSI_DISK_F_REMOVABLE;
2438 if (!s->product) {
2439 s->product = g_strdup("QEMU CD-ROM");
2440 }
2441 scsi_realize(&s->qdev, errp);
2442 aio_context_release(ctx);
2443 }
2444
2445 static void scsi_disk_realize(SCSIDevice *dev, Error **errp)
2446 {
2447 DriveInfo *dinfo;
2448 Error *local_err = NULL;
2449
2450 if (!dev->conf.blk) {
2451 scsi_realize(dev, &local_err);
2452 assert(local_err);
2453 error_propagate(errp, local_err);
2454 return;
2455 }
2456
2457 dinfo = blk_legacy_dinfo(dev->conf.blk);
2458 if (dinfo && dinfo->media_cd) {
2459 scsi_cd_realize(dev, errp);
2460 } else {
2461 scsi_hd_realize(dev, errp);
2462 }
2463 }
2464
2465 static const SCSIReqOps scsi_disk_emulate_reqops = {
2466 .size = sizeof(SCSIDiskReq),
2467 .free_req = scsi_free_request,
2468 .send_command = scsi_disk_emulate_command,
2469 .read_data = scsi_disk_emulate_read_data,
2470 .write_data = scsi_disk_emulate_write_data,
2471 .get_buf = scsi_get_buf,
2472 };
2473
2474 static const SCSIReqOps scsi_disk_dma_reqops = {
2475 .size = sizeof(SCSIDiskReq),
2476 .free_req = scsi_free_request,
2477 .send_command = scsi_disk_dma_command,
2478 .read_data = scsi_read_data,
2479 .write_data = scsi_write_data,
2480 .get_buf = scsi_get_buf,
2481 .load_request = scsi_disk_load_request,
2482 .save_request = scsi_disk_save_request,
2483 };
2484
2485 static const SCSIReqOps *const scsi_disk_reqops_dispatch[256] = {
2486 [TEST_UNIT_READY] = &scsi_disk_emulate_reqops,
2487 [INQUIRY] = &scsi_disk_emulate_reqops,
2488 [MODE_SENSE] = &scsi_disk_emulate_reqops,
2489 [MODE_SENSE_10] = &scsi_disk_emulate_reqops,
2490 [START_STOP] = &scsi_disk_emulate_reqops,
2491 [ALLOW_MEDIUM_REMOVAL] = &scsi_disk_emulate_reqops,
2492 [READ_CAPACITY_10] = &scsi_disk_emulate_reqops,
2493 [READ_TOC] = &scsi_disk_emulate_reqops,
2494 [READ_DVD_STRUCTURE] = &scsi_disk_emulate_reqops,
2495 [READ_DISC_INFORMATION] = &scsi_disk_emulate_reqops,
2496 [GET_CONFIGURATION] = &scsi_disk_emulate_reqops,
2497 [GET_EVENT_STATUS_NOTIFICATION] = &scsi_disk_emulate_reqops,
2498 [MECHANISM_STATUS] = &scsi_disk_emulate_reqops,
2499 [SERVICE_ACTION_IN_16] = &scsi_disk_emulate_reqops,
2500 [REQUEST_SENSE] = &scsi_disk_emulate_reqops,
2501 [SYNCHRONIZE_CACHE] = &scsi_disk_emulate_reqops,
2502 [SEEK_10] = &scsi_disk_emulate_reqops,
2503 [MODE_SELECT] = &scsi_disk_emulate_reqops,
2504 [MODE_SELECT_10] = &scsi_disk_emulate_reqops,
2505 [UNMAP] = &scsi_disk_emulate_reqops,
2506 [WRITE_SAME_10] = &scsi_disk_emulate_reqops,
2507 [WRITE_SAME_16] = &scsi_disk_emulate_reqops,
2508 [VERIFY_10] = &scsi_disk_emulate_reqops,
2509 [VERIFY_12] = &scsi_disk_emulate_reqops,
2510 [VERIFY_16] = &scsi_disk_emulate_reqops,
2511
2512 [READ_6] = &scsi_disk_dma_reqops,
2513 [READ_10] = &scsi_disk_dma_reqops,
2514 [READ_12] = &scsi_disk_dma_reqops,
2515 [READ_16] = &scsi_disk_dma_reqops,
2516 [WRITE_6] = &scsi_disk_dma_reqops,
2517 [WRITE_10] = &scsi_disk_dma_reqops,
2518 [WRITE_12] = &scsi_disk_dma_reqops,
2519 [WRITE_16] = &scsi_disk_dma_reqops,
2520 [WRITE_VERIFY_10] = &scsi_disk_dma_reqops,
2521 [WRITE_VERIFY_12] = &scsi_disk_dma_reqops,
2522 [WRITE_VERIFY_16] = &scsi_disk_dma_reqops,
2523 };
2524
2525 static void scsi_disk_new_request_dump(uint32_t lun, uint32_t tag, uint8_t *buf)
2526 {
2527 int i;
2528 int len = scsi_cdb_length(buf);
2529 char *line_buffer, *p;
2530
2531 line_buffer = g_malloc(len * 5 + 1);
2532
2533 for (i = 0, p = line_buffer; i < len; i++) {
2534 p += sprintf(p, " 0x%02x", buf[i]);
2535 }
2536 trace_scsi_disk_new_request(lun, tag, line_buffer);
2537
2538 g_free(line_buffer);
2539 }
2540
2541 static SCSIRequest *scsi_new_request(SCSIDevice *d, uint32_t tag, uint32_t lun,
2542 uint8_t *buf, void *hba_private)
2543 {
2544 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, d);
2545 SCSIRequest *req;
2546 const SCSIReqOps *ops;
2547 uint8_t command;
2548
2549 command = buf[0];
2550 ops = scsi_disk_reqops_dispatch[command];
2551 if (!ops) {
2552 ops = &scsi_disk_emulate_reqops;
2553 }
2554 req = scsi_req_alloc(ops, &s->qdev, tag, lun, hba_private);
2555
2556 if (trace_event_get_state_backends(TRACE_SCSI_DISK_NEW_REQUEST)) {
2557 scsi_disk_new_request_dump(lun, tag, buf);
2558 }
2559
2560 return req;
2561 }
2562
2563 #ifdef __linux__
2564 static int get_device_type(SCSIDiskState *s)
2565 {
2566 uint8_t cmd[16];
2567 uint8_t buf[36];
2568 int ret;
2569
2570 memset(cmd, 0, sizeof(cmd));
2571 memset(buf, 0, sizeof(buf));
2572 cmd[0] = INQUIRY;
2573 cmd[4] = sizeof(buf);
2574
2575 ret = scsi_SG_IO_FROM_DEV(s->qdev.conf.blk, cmd, sizeof(cmd),
2576 buf, sizeof(buf));
2577 if (ret < 0) {
2578 return -1;
2579 }
2580 s->qdev.type = buf[0];
2581 if (buf[1] & 0x80) {
2582 s->features |= 1 << SCSI_DISK_F_REMOVABLE;
2583 }
2584 return 0;
2585 }
2586
2587 static void scsi_block_realize(SCSIDevice *dev, Error **errp)
2588 {
2589 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev);
2590 AioContext *ctx;
2591 int sg_version;
2592 int rc;
2593
2594 if (!s->qdev.conf.blk) {
2595 error_setg(errp, "drive property not set");
2596 return;
2597 }
2598
2599 if (s->rotation_rate) {
2600 error_report_once("rotation_rate is specified for scsi-block but is "
2601 "not implemented. This option is deprecated and will "
2602 "be removed in a future version");
2603 }
2604
2605 ctx = blk_get_aio_context(s->qdev.conf.blk);
2606 aio_context_acquire(ctx);
2607
2608 /* check we are using a driver managing SG_IO (version 3 and after) */
2609 rc = blk_ioctl(s->qdev.conf.blk, SG_GET_VERSION_NUM, &sg_version);
2610 if (rc < 0) {
2611 error_setg_errno(errp, -rc, "cannot get SG_IO version number");
2612 if (rc != -EPERM) {
2613 error_append_hint(errp, "Is this a SCSI device?\n");
2614 }
2615 goto out;
2616 }
2617 if (sg_version < 30000) {
2618 error_setg(errp, "scsi generic interface too old");
2619 goto out;
2620 }
2621
2622 /* get device type from INQUIRY data */
2623 rc = get_device_type(s);
2624 if (rc < 0) {
2625 error_setg(errp, "INQUIRY failed");
2626 goto out;
2627 }
2628
2629 /* Make a guess for the block size, we'll fix it when the guest sends.
2630 * READ CAPACITY. If they don't, they likely would assume these sizes
2631 * anyway. (TODO: check in /sys).
2632 */
2633 if (s->qdev.type == TYPE_ROM || s->qdev.type == TYPE_WORM) {
2634 s->qdev.blocksize = 2048;
2635 } else {
2636 s->qdev.blocksize = 512;
2637 }
2638
2639 /* Makes the scsi-block device not removable by using HMP and QMP eject
2640 * command.
2641 */
2642 s->features |= (1 << SCSI_DISK_F_NO_REMOVABLE_DEVOPS);
2643
2644 scsi_realize(&s->qdev, errp);
2645 scsi_generic_read_device_inquiry(&s->qdev);
2646
2647 out:
2648 aio_context_release(ctx);
2649 }
2650
2651 typedef struct SCSIBlockReq {
2652 SCSIDiskReq req;
2653 sg_io_hdr_t io_header;
2654
2655 /* Selected bytes of the original CDB, copied into our own CDB. */
2656 uint8_t cmd, cdb1, group_number;
2657
2658 /* CDB passed to SG_IO. */
2659 uint8_t cdb[16];
2660 } SCSIBlockReq;
2661
2662 static BlockAIOCB *scsi_block_do_sgio(SCSIBlockReq *req,
2663 int64_t offset, QEMUIOVector *iov,
2664 int direction,
2665 BlockCompletionFunc *cb, void *opaque)
2666 {
2667 sg_io_hdr_t *io_header = &req->io_header;
2668 SCSIDiskReq *r = &req->req;
2669 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
2670 int nb_logical_blocks;
2671 uint64_t lba;
2672 BlockAIOCB *aiocb;
2673
2674 /* This is not supported yet. It can only happen if the guest does
2675 * reads and writes that are not aligned to one logical sectors
2676 * _and_ cover multiple MemoryRegions.
2677 */
2678 assert(offset % s->qdev.blocksize == 0);
2679 assert(iov->size % s->qdev.blocksize == 0);
2680
2681 io_header->interface_id = 'S';
2682
2683 /* The data transfer comes from the QEMUIOVector. */
2684 io_header->dxfer_direction = direction;
2685 io_header->dxfer_len = iov->size;
2686 io_header->dxferp = (void *)iov->iov;
2687 io_header->iovec_count = iov->niov;
2688 assert(io_header->iovec_count == iov->niov); /* no overflow! */
2689
2690 /* Build a new CDB with the LBA and length patched in, in case
2691 * DMA helpers split the transfer in multiple segments. Do not
2692 * build a CDB smaller than what the guest wanted, and only build
2693 * a larger one if strictly necessary.
2694 */
2695 io_header->cmdp = req->cdb;
2696 lba = offset / s->qdev.blocksize;
2697 nb_logical_blocks = io_header->dxfer_len / s->qdev.blocksize;
2698
2699 if ((req->cmd >> 5) == 0 && lba <= 0x1ffff) {
2700 /* 6-byte CDB */
2701 stl_be_p(&req->cdb[0], lba | (req->cmd << 24));
2702 req->cdb[4] = nb_logical_blocks;
2703 req->cdb[5] = 0;
2704 io_header->cmd_len = 6;
2705 } else if ((req->cmd >> 5) <= 1 && lba <= 0xffffffffULL) {
2706 /* 10-byte CDB */
2707 req->cdb[0] = (req->cmd & 0x1f) | 0x20;
2708 req->cdb[1] = req->cdb1;
2709 stl_be_p(&req->cdb[2], lba);
2710 req->cdb[6] = req->group_number;
2711 stw_be_p(&req->cdb[7], nb_logical_blocks);
2712 req->cdb[9] = 0;
2713 io_header->cmd_len = 10;
2714 } else if ((req->cmd >> 5) != 4 && lba <= 0xffffffffULL) {
2715 /* 12-byte CDB */
2716 req->cdb[0] = (req->cmd & 0x1f) | 0xA0;
2717 req->cdb[1] = req->cdb1;
2718 stl_be_p(&req->cdb[2], lba);
2719 stl_be_p(&req->cdb[6], nb_logical_blocks);
2720 req->cdb[10] = req->group_number;
2721 req->cdb[11] = 0;
2722 io_header->cmd_len = 12;
2723 } else {
2724 /* 16-byte CDB */
2725 req->cdb[0] = (req->cmd & 0x1f) | 0x80;
2726 req->cdb[1] = req->cdb1;
2727 stq_be_p(&req->cdb[2], lba);
2728 stl_be_p(&req->cdb[10], nb_logical_blocks);
2729 req->cdb[14] = req->group_number;
2730 req->cdb[15] = 0;
2731 io_header->cmd_len = 16;
2732 }
2733
2734 /* The rest is as in scsi-generic.c. */
2735 io_header->mx_sb_len = sizeof(r->req.sense);
2736 io_header->sbp = r->req.sense;
2737 io_header->timeout = UINT_MAX;
2738 io_header->usr_ptr = r;
2739 io_header->flags |= SG_FLAG_DIRECT_IO;
2740
2741 aiocb = blk_aio_ioctl(s->qdev.conf.blk, SG_IO, io_header, cb, opaque);
2742 assert(aiocb != NULL);
2743 return aiocb;
2744 }
2745
2746 static bool scsi_block_no_fua(SCSICommand *cmd)
2747 {
2748 return false;
2749 }
2750
2751 static BlockAIOCB *scsi_block_dma_readv(int64_t offset,
2752 QEMUIOVector *iov,
2753 BlockCompletionFunc *cb, void *cb_opaque,
2754 void *opaque)
2755 {
2756 SCSIBlockReq *r = opaque;
2757 return scsi_block_do_sgio(r, offset, iov,
2758 SG_DXFER_FROM_DEV, cb, cb_opaque);
2759 }
2760
2761 static BlockAIOCB *scsi_block_dma_writev(int64_t offset,
2762 QEMUIOVector *iov,
2763 BlockCompletionFunc *cb, void *cb_opaque,
2764 void *opaque)
2765 {
2766 SCSIBlockReq *r = opaque;
2767 return scsi_block_do_sgio(r, offset, iov,
2768 SG_DXFER_TO_DEV, cb, cb_opaque);
2769 }
2770
2771 static bool scsi_block_is_passthrough(SCSIDiskState *s, uint8_t *buf)
2772 {
2773 switch (buf[0]) {
2774 case VERIFY_10:
2775 case VERIFY_12:
2776 case VERIFY_16:
2777 /* Check if BYTCHK == 0x01 (data-out buffer contains data
2778 * for the number of logical blocks specified in the length
2779 * field). For other modes, do not use scatter/gather operation.
2780 */
2781 if ((buf[1] & 6) == 2) {
2782 return false;
2783 }
2784 break;
2785
2786 case READ_6:
2787 case READ_10:
2788 case READ_12:
2789 case READ_16:
2790 case WRITE_6:
2791 case WRITE_10:
2792 case WRITE_12:
2793 case WRITE_16:
2794 case WRITE_VERIFY_10:
2795 case WRITE_VERIFY_12:
2796 case WRITE_VERIFY_16:
2797 /* MMC writing cannot be done via DMA helpers, because it sometimes
2798 * involves writing beyond the maximum LBA or to negative LBA (lead-in).
2799 * We might use scsi_block_dma_reqops as long as no writing commands are
2800 * seen, but performance usually isn't paramount on optical media. So,
2801 * just make scsi-block operate the same as scsi-generic for them.
2802 */
2803 if (s->qdev.type != TYPE_ROM) {
2804 return false;
2805 }
2806 break;
2807
2808 default:
2809 break;
2810 }
2811
2812 return true;
2813 }
2814
2815
2816 static int32_t scsi_block_dma_command(SCSIRequest *req, uint8_t *buf)
2817 {
2818 SCSIBlockReq *r = (SCSIBlockReq *)req;
2819 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev);
2820
2821 r->cmd = req->cmd.buf[0];
2822 switch (r->cmd >> 5) {
2823 case 0:
2824 /* 6-byte CDB. */
2825 r->cdb1 = r->group_number = 0;
2826 break;
2827 case 1:
2828 /* 10-byte CDB. */
2829 r->cdb1 = req->cmd.buf[1];
2830 r->group_number = req->cmd.buf[6];
2831 break;
2832 case 4:
2833 /* 12-byte CDB. */
2834 r->cdb1 = req->cmd.buf[1];
2835 r->group_number = req->cmd.buf[10];
2836 break;
2837 case 5:
2838 /* 16-byte CDB. */
2839 r->cdb1 = req->cmd.buf[1];
2840 r->group_number = req->cmd.buf[14];
2841 break;
2842 default:
2843 abort();
2844 }
2845
2846 /* Protection information is not supported. For SCSI versions 2 and
2847 * older (as determined by snooping the guest's INQUIRY commands),
2848 * there is no RD/WR/VRPROTECT, so skip this check in these versions.
2849 */
2850 if (s->qdev.scsi_version > 2 && (req->cmd.buf[1] & 0xe0)) {
2851 scsi_check_condition(&r->req, SENSE_CODE(INVALID_FIELD));
2852 return 0;
2853 }
2854
2855 r->req.status = &r->io_header.status;
2856 return scsi_disk_dma_command(req, buf);
2857 }
2858
2859 static const SCSIReqOps scsi_block_dma_reqops = {
2860 .size = sizeof(SCSIBlockReq),
2861 .free_req = scsi_free_request,
2862 .send_command = scsi_block_dma_command,
2863 .read_data = scsi_read_data,
2864 .write_data = scsi_write_data,
2865 .get_buf = scsi_get_buf,
2866 .load_request = scsi_disk_load_request,
2867 .save_request = scsi_disk_save_request,
2868 };
2869
2870 static SCSIRequest *scsi_block_new_request(SCSIDevice *d, uint32_t tag,
2871 uint32_t lun, uint8_t *buf,
2872 void *hba_private)
2873 {
2874 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, d);
2875
2876 if (scsi_block_is_passthrough(s, buf)) {
2877 return scsi_req_alloc(&scsi_generic_req_ops, &s->qdev, tag, lun,
2878 hba_private);
2879 } else {
2880 return scsi_req_alloc(&scsi_block_dma_reqops, &s->qdev, tag, lun,
2881 hba_private);
2882 }
2883 }
2884
2885 static int scsi_block_parse_cdb(SCSIDevice *d, SCSICommand *cmd,
2886 uint8_t *buf, void *hba_private)
2887 {
2888 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, d);
2889
2890 if (scsi_block_is_passthrough(s, buf)) {
2891 return scsi_bus_parse_cdb(&s->qdev, cmd, buf, hba_private);
2892 } else {
2893 return scsi_req_parse_cdb(&s->qdev, cmd, buf);
2894 }
2895 }
2896
2897 #endif
2898
2899 static
2900 BlockAIOCB *scsi_dma_readv(int64_t offset, QEMUIOVector *iov,
2901 BlockCompletionFunc *cb, void *cb_opaque,
2902 void *opaque)
2903 {
2904 SCSIDiskReq *r = opaque;
2905 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
2906 return blk_aio_preadv(s->qdev.conf.blk, offset, iov, 0, cb, cb_opaque);
2907 }
2908
2909 static
2910 BlockAIOCB *scsi_dma_writev(int64_t offset, QEMUIOVector *iov,
2911 BlockCompletionFunc *cb, void *cb_opaque,
2912 void *opaque)
2913 {
2914 SCSIDiskReq *r = opaque;
2915 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
2916 return blk_aio_pwritev(s->qdev.conf.blk, offset, iov, 0, cb, cb_opaque);
2917 }
2918
2919 static void scsi_disk_base_class_initfn(ObjectClass *klass, void *data)
2920 {
2921 DeviceClass *dc = DEVICE_CLASS(klass);
2922 SCSIDiskClass *sdc = SCSI_DISK_BASE_CLASS(klass);
2923
2924 dc->fw_name = "disk";
2925 dc->reset = scsi_disk_reset;
2926 sdc->dma_readv = scsi_dma_readv;
2927 sdc->dma_writev = scsi_dma_writev;
2928 sdc->need_fua_emulation = scsi_is_cmd_fua;
2929 }
2930
2931 static const TypeInfo scsi_disk_base_info = {
2932 .name = TYPE_SCSI_DISK_BASE,
2933 .parent = TYPE_SCSI_DEVICE,
2934 .class_init = scsi_disk_base_class_initfn,
2935 .instance_size = sizeof(SCSIDiskState),
2936 .class_size = sizeof(SCSIDiskClass),
2937 .abstract = true,
2938 };
2939
2940 #define DEFINE_SCSI_DISK_PROPERTIES() \
2941 DEFINE_PROP_DRIVE_IOTHREAD("drive", SCSIDiskState, qdev.conf.blk), \
2942 DEFINE_BLOCK_PROPERTIES_BASE(SCSIDiskState, qdev.conf), \
2943 DEFINE_BLOCK_ERROR_PROPERTIES(SCSIDiskState, qdev.conf), \
2944 DEFINE_PROP_STRING("ver", SCSIDiskState, version), \
2945 DEFINE_PROP_STRING("serial", SCSIDiskState, serial), \
2946 DEFINE_PROP_STRING("vendor", SCSIDiskState, vendor), \
2947 DEFINE_PROP_STRING("product", SCSIDiskState, product), \
2948 DEFINE_PROP_STRING("device_id", SCSIDiskState, device_id)
2949
2950
2951 static Property scsi_hd_properties[] = {
2952 DEFINE_SCSI_DISK_PROPERTIES(),
2953 DEFINE_PROP_BIT("removable", SCSIDiskState, features,
2954 SCSI_DISK_F_REMOVABLE, false),
2955 DEFINE_PROP_BIT("dpofua", SCSIDiskState, features,
2956 SCSI_DISK_F_DPOFUA, false),
2957 DEFINE_PROP_UINT64("wwn", SCSIDiskState, qdev.wwn, 0),
2958 DEFINE_PROP_UINT64("port_wwn", SCSIDiskState, qdev.port_wwn, 0),
2959 DEFINE_PROP_UINT16("port_index", SCSIDiskState, port_index, 0),
2960 DEFINE_PROP_UINT64("max_unmap_size", SCSIDiskState, max_unmap_size,
2961 DEFAULT_MAX_UNMAP_SIZE),
2962 DEFINE_PROP_UINT64("max_io_size", SCSIDiskState, max_io_size,
2963 DEFAULT_MAX_IO_SIZE),
2964 DEFINE_PROP_UINT16("rotation_rate", SCSIDiskState, rotation_rate, 0),
2965 DEFINE_PROP_INT32("scsi_version", SCSIDiskState, qdev.default_scsi_version,
2966 5),
2967 DEFINE_BLOCK_CHS_PROPERTIES(SCSIDiskState, qdev.conf),
2968 DEFINE_PROP_END_OF_LIST(),
2969 };
2970
2971 static const VMStateDescription vmstate_scsi_disk_state = {
2972 .name = "scsi-disk",
2973 .version_id = 1,
2974 .minimum_version_id = 1,
2975 .fields = (VMStateField[]) {
2976 VMSTATE_SCSI_DEVICE(qdev, SCSIDiskState),
2977 VMSTATE_BOOL(media_changed, SCSIDiskState),
2978 VMSTATE_BOOL(media_event, SCSIDiskState),
2979 VMSTATE_BOOL(eject_request, SCSIDiskState),
2980 VMSTATE_BOOL(tray_open, SCSIDiskState),
2981 VMSTATE_BOOL(tray_locked, SCSIDiskState),
2982 VMSTATE_END_OF_LIST()
2983 }
2984 };
2985
2986 static void scsi_hd_class_initfn(ObjectClass *klass, void *data)
2987 {
2988 DeviceClass *dc = DEVICE_CLASS(klass);
2989 SCSIDeviceClass *sc = SCSI_DEVICE_CLASS(klass);
2990
2991 sc->realize = scsi_hd_realize;
2992 sc->alloc_req = scsi_new_request;
2993 sc->unit_attention_reported = scsi_disk_unit_attention_reported;
2994 dc->desc = "virtual SCSI disk";
2995 dc->props = scsi_hd_properties;
2996 dc->vmsd = &vmstate_scsi_disk_state;
2997 }
2998
2999 static const TypeInfo scsi_hd_info = {
3000 .name = "scsi-hd",
3001 .parent = TYPE_SCSI_DISK_BASE,
3002 .class_init = scsi_hd_class_initfn,
3003 };
3004
3005 static Property scsi_cd_properties[] = {
3006 DEFINE_SCSI_DISK_PROPERTIES(),
3007 DEFINE_PROP_UINT64("wwn", SCSIDiskState, qdev.wwn, 0),
3008 DEFINE_PROP_UINT64("port_wwn", SCSIDiskState, qdev.port_wwn, 0),
3009 DEFINE_PROP_UINT16("port_index", SCSIDiskState, port_index, 0),
3010 DEFINE_PROP_UINT64("max_io_size", SCSIDiskState, max_io_size,
3011 DEFAULT_MAX_IO_SIZE),
3012 DEFINE_PROP_INT32("scsi_version", SCSIDiskState, qdev.default_scsi_version,
3013 5),
3014 DEFINE_PROP_END_OF_LIST(),
3015 };
3016
3017 static void scsi_cd_class_initfn(ObjectClass *klass, void *data)
3018 {
3019 DeviceClass *dc = DEVICE_CLASS(klass);
3020 SCSIDeviceClass *sc = SCSI_DEVICE_CLASS(klass);
3021
3022 sc->realize = scsi_cd_realize;
3023 sc->alloc_req = scsi_new_request;
3024 sc->unit_attention_reported = scsi_disk_unit_attention_reported;
3025 dc->desc = "virtual SCSI CD-ROM";
3026 dc->props = scsi_cd_properties;
3027 dc->vmsd = &vmstate_scsi_disk_state;
3028 }
3029
3030 static const TypeInfo scsi_cd_info = {
3031 .name = "scsi-cd",
3032 .parent = TYPE_SCSI_DISK_BASE,
3033 .class_init = scsi_cd_class_initfn,
3034 };
3035
3036 #ifdef __linux__
3037 static Property scsi_block_properties[] = {
3038 DEFINE_BLOCK_ERROR_PROPERTIES(SCSIDiskState, qdev.conf), \
3039 DEFINE_PROP_DRIVE("drive", SCSIDiskState, qdev.conf.blk),
3040 DEFINE_PROP_BOOL("share-rw", SCSIDiskState, qdev.conf.share_rw, false),
3041 DEFINE_PROP_UINT16("rotation_rate", SCSIDiskState, rotation_rate, 0),
3042 DEFINE_PROP_UINT64("max_unmap_size", SCSIDiskState, max_unmap_size,
3043 DEFAULT_MAX_UNMAP_SIZE),
3044 DEFINE_PROP_UINT64("max_io_size", SCSIDiskState, max_io_size,
3045 DEFAULT_MAX_IO_SIZE),
3046 DEFINE_PROP_INT32("scsi_version", SCSIDiskState, qdev.default_scsi_version,
3047 -1),
3048 DEFINE_PROP_END_OF_LIST(),
3049 };
3050
3051 static void scsi_block_class_initfn(ObjectClass *klass, void *data)
3052 {
3053 DeviceClass *dc = DEVICE_CLASS(klass);
3054 SCSIDeviceClass *sc = SCSI_DEVICE_CLASS(klass);
3055 SCSIDiskClass *sdc = SCSI_DISK_BASE_CLASS(klass);
3056
3057 sc->realize = scsi_block_realize;
3058 sc->alloc_req = scsi_block_new_request;
3059 sc->parse_cdb = scsi_block_parse_cdb;
3060 sdc->dma_readv = scsi_block_dma_readv;
3061 sdc->dma_writev = scsi_block_dma_writev;
3062 sdc->need_fua_emulation = scsi_block_no_fua;
3063 dc->desc = "SCSI block device passthrough";
3064 dc->props = scsi_block_properties;
3065 dc->vmsd = &vmstate_scsi_disk_state;
3066 }
3067
3068 static const TypeInfo scsi_block_info = {
3069 .name = "scsi-block",
3070 .parent = TYPE_SCSI_DISK_BASE,
3071 .class_init = scsi_block_class_initfn,
3072 };
3073 #endif
3074
3075 static Property scsi_disk_properties[] = {
3076 DEFINE_SCSI_DISK_PROPERTIES(),
3077 DEFINE_PROP_BIT("removable", SCSIDiskState, features,
3078 SCSI_DISK_F_REMOVABLE, false),
3079 DEFINE_PROP_BIT("dpofua", SCSIDiskState, features,
3080 SCSI_DISK_F_DPOFUA, false),
3081 DEFINE_PROP_UINT64("wwn", SCSIDiskState, qdev.wwn, 0),
3082 DEFINE_PROP_UINT64("port_wwn", SCSIDiskState, qdev.port_wwn, 0),
3083 DEFINE_PROP_UINT16("port_index", SCSIDiskState, port_index, 0),
3084 DEFINE_PROP_UINT64("max_unmap_size", SCSIDiskState, max_unmap_size,
3085 DEFAULT_MAX_UNMAP_SIZE),
3086 DEFINE_PROP_UINT64("max_io_size", SCSIDiskState, max_io_size,
3087 DEFAULT_MAX_IO_SIZE),
3088 DEFINE_PROP_INT32("scsi_version", SCSIDiskState, qdev.default_scsi_version,
3089 5),
3090 DEFINE_PROP_END_OF_LIST(),
3091 };
3092
3093 static void scsi_disk_class_initfn(ObjectClass *klass, void *data)
3094 {
3095 DeviceClass *dc = DEVICE_CLASS(klass);
3096 SCSIDeviceClass *sc = SCSI_DEVICE_CLASS(klass);
3097
3098 sc->realize = scsi_disk_realize;
3099 sc->alloc_req = scsi_new_request;
3100 sc->unit_attention_reported = scsi_disk_unit_attention_reported;
3101 dc->fw_name = "disk";
3102 dc->desc = "virtual SCSI disk or CD-ROM (legacy)";
3103 dc->reset = scsi_disk_reset;
3104 dc->props = scsi_disk_properties;
3105 dc->vmsd = &vmstate_scsi_disk_state;
3106 }
3107
3108 static const TypeInfo scsi_disk_info = {
3109 .name = "scsi-disk",
3110 .parent = TYPE_SCSI_DISK_BASE,
3111 .class_init = scsi_disk_class_initfn,
3112 };
3113
3114 static void scsi_disk_register_types(void)
3115 {
3116 type_register_static(&scsi_disk_base_info);
3117 type_register_static(&scsi_hd_info);
3118 type_register_static(&scsi_cd_info);
3119 #ifdef __linux__
3120 type_register_static(&scsi_block_info);
3121 #endif
3122 type_register_static(&scsi_disk_info);
3123 }
3124
3125 type_init(scsi_disk_register_types)