]>
Commit | Line | Data |
---|---|---|
2f0c9fe6 PB |
1 | /* |
2 | * QEMU System Emulator block driver | |
3 | * | |
4 | * Copyright (c) 2011 IBM Corp. | |
5 | * Copyright (c) 2012 Red Hat, Inc. | |
6 | * | |
7 | * Permission is hereby granted, free of charge, to any person obtaining a copy | |
8 | * of this software and associated documentation files (the "Software"), to deal | |
9 | * in the Software without restriction, including without limitation the rights | |
10 | * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell | |
11 | * copies of the Software, and to permit persons to whom the Software is | |
12 | * furnished to do so, subject to the following conditions: | |
13 | * | |
14 | * The above copyright notice and this permission notice shall be included in | |
15 | * all copies or substantial portions of the Software. | |
16 | * | |
17 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
18 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
19 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
20 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |
21 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, | |
22 | * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN | |
23 | * THE SOFTWARE. | |
24 | */ | |
25 | ||
d38ea87a | 26 | #include "qemu/osdep.h" |
2f0c9fe6 | 27 | #include "qemu-common.h" |
737e150e | 28 | #include "block/block.h" |
c87621ea | 29 | #include "block/blockjob_int.h" |
737e150e | 30 | #include "block/block_int.h" |
373340b2 | 31 | #include "sysemu/block-backend.h" |
cc7a8ea7 | 32 | #include "qapi/qmp/qerror.h" |
7b1b5d19 | 33 | #include "qapi/qmp/qjson.h" |
10817bf0 | 34 | #include "qemu/coroutine.h" |
7f0317cf | 35 | #include "qemu/id.h" |
2f0c9fe6 | 36 | #include "qmp-commands.h" |
1de7afc9 | 37 | #include "qemu/timer.h" |
5a2d2cbd | 38 | #include "qapi-event.h" |
2f0c9fe6 | 39 | |
8254b6d9 JS |
40 | static void block_job_event_cancelled(BlockJob *job); |
41 | static void block_job_event_completed(BlockJob *job, const char *msg); | |
42 | ||
c55a832f FZ |
43 | /* Transactional group of block jobs */ |
44 | struct BlockJobTxn { | |
45 | ||
46 | /* Is this txn being cancelled? */ | |
47 | bool aborting; | |
48 | ||
49 | /* List of jobs */ | |
50 | QLIST_HEAD(, BlockJob) jobs; | |
51 | ||
52 | /* Reference count */ | |
53 | int refcnt; | |
54 | }; | |
55 | ||
a7112795 AG |
56 | static QLIST_HEAD(, BlockJob) block_jobs = QLIST_HEAD_INITIALIZER(block_jobs); |
57 | ||
88691b37 PB |
58 | /* |
59 | * The block job API is composed of two categories of functions. | |
60 | * | |
61 | * The first includes functions used by the monitor. The monitor is | |
62 | * peculiar in that it accesses the block job list with block_job_get, and | |
63 | * therefore needs consistency across block_job_get and the actual operation | |
64 | * (e.g. block_job_set_speed). The consistency is achieved with | |
65 | * aio_context_acquire/release. These functions are declared in blockjob.h. | |
66 | * | |
67 | * The second includes functions used by the block job drivers and sometimes | |
68 | * by the core block layer. These do not care about locking, because the | |
69 | * whole coroutine runs under the AioContext lock, and are declared in | |
70 | * blockjob_int.h. | |
71 | */ | |
72 | ||
a7112795 AG |
73 | BlockJob *block_job_next(BlockJob *job) |
74 | { | |
75 | if (!job) { | |
76 | return QLIST_FIRST(&block_jobs); | |
77 | } | |
78 | return QLIST_NEXT(job, job_list); | |
79 | } | |
80 | ||
ffb1f10c AG |
81 | BlockJob *block_job_get(const char *id) |
82 | { | |
83 | BlockJob *job; | |
84 | ||
85 | QLIST_FOREACH(job, &block_jobs, job_list) { | |
559b935f | 86 | if (job->id && !strcmp(id, job->id)) { |
ffb1f10c AG |
87 | return job; |
88 | } | |
89 | } | |
90 | ||
91 | return NULL; | |
92 | } | |
93 | ||
f321dcb5 PB |
94 | static void block_job_pause(BlockJob *job) |
95 | { | |
96 | job->pause_count++; | |
97 | } | |
98 | ||
99 | static void block_job_resume(BlockJob *job) | |
100 | { | |
101 | assert(job->pause_count > 0); | |
102 | job->pause_count--; | |
103 | if (job->pause_count) { | |
104 | return; | |
105 | } | |
106 | block_job_enter(job); | |
107 | } | |
108 | ||
05b0d8e3 PB |
109 | static void block_job_ref(BlockJob *job) |
110 | { | |
111 | ++job->refcnt; | |
112 | } | |
113 | ||
114 | static void block_job_attached_aio_context(AioContext *new_context, | |
115 | void *opaque); | |
116 | static void block_job_detach_aio_context(void *opaque); | |
117 | ||
118 | static void block_job_unref(BlockJob *job) | |
119 | { | |
120 | if (--job->refcnt == 0) { | |
121 | BlockDriverState *bs = blk_bs(job->blk); | |
122 | bs->job = NULL; | |
123 | block_job_remove_all_bdrv(job); | |
124 | blk_remove_aio_context_notifier(job->blk, | |
125 | block_job_attached_aio_context, | |
126 | block_job_detach_aio_context, job); | |
127 | blk_unref(job->blk); | |
128 | error_free(job->blocker); | |
129 | g_free(job->id); | |
130 | QLIST_REMOVE(job, job_list); | |
131 | g_free(job); | |
132 | } | |
133 | } | |
134 | ||
463e0be1 SH |
135 | static void block_job_attached_aio_context(AioContext *new_context, |
136 | void *opaque) | |
137 | { | |
138 | BlockJob *job = opaque; | |
139 | ||
140 | if (job->driver->attached_aio_context) { | |
141 | job->driver->attached_aio_context(job, new_context); | |
142 | } | |
143 | ||
144 | block_job_resume(job); | |
145 | } | |
146 | ||
bae8196d PB |
147 | static void block_job_drain(BlockJob *job) |
148 | { | |
149 | /* If job is !job->busy this kicks it into the next pause point. */ | |
150 | block_job_enter(job); | |
151 | ||
152 | blk_drain(job->blk); | |
153 | if (job->driver->drain) { | |
154 | job->driver->drain(job); | |
155 | } | |
156 | } | |
157 | ||
463e0be1 SH |
158 | static void block_job_detach_aio_context(void *opaque) |
159 | { | |
160 | BlockJob *job = opaque; | |
161 | ||
162 | /* In case the job terminates during aio_poll()... */ | |
163 | block_job_ref(job); | |
164 | ||
165 | block_job_pause(job); | |
166 | ||
463e0be1 | 167 | while (!job->paused && !job->completed) { |
bae8196d | 168 | block_job_drain(job); |
463e0be1 SH |
169 | } |
170 | ||
171 | block_job_unref(job); | |
172 | } | |
173 | ||
f321dcb5 PB |
174 | static char *child_job_get_parent_desc(BdrvChild *c) |
175 | { | |
176 | BlockJob *job = c->opaque; | |
177 | return g_strdup_printf("%s job '%s'", | |
178 | BlockJobType_lookup[job->driver->job_type], | |
179 | job->id); | |
180 | } | |
181 | ||
182 | static const BdrvChildRole child_job = { | |
183 | .get_parent_desc = child_job_get_parent_desc, | |
184 | .stay_at_node = true, | |
185 | }; | |
186 | ||
187 | static void block_job_drained_begin(void *opaque) | |
188 | { | |
189 | BlockJob *job = opaque; | |
190 | block_job_pause(job); | |
191 | } | |
192 | ||
193 | static void block_job_drained_end(void *opaque) | |
194 | { | |
195 | BlockJob *job = opaque; | |
196 | block_job_resume(job); | |
197 | } | |
198 | ||
199 | static const BlockDevOps block_job_dev_ops = { | |
200 | .drained_begin = block_job_drained_begin, | |
201 | .drained_end = block_job_drained_end, | |
202 | }; | |
203 | ||
bbc02b90 KW |
204 | void block_job_remove_all_bdrv(BlockJob *job) |
205 | { | |
206 | GSList *l; | |
207 | for (l = job->nodes; l; l = l->next) { | |
208 | BdrvChild *c = l->data; | |
209 | bdrv_op_unblock_all(c->bs, job->blocker); | |
210 | bdrv_root_unref_child(c); | |
211 | } | |
212 | g_slist_free(job->nodes); | |
213 | job->nodes = NULL; | |
214 | } | |
215 | ||
76d554e2 KW |
216 | int block_job_add_bdrv(BlockJob *job, const char *name, BlockDriverState *bs, |
217 | uint64_t perm, uint64_t shared_perm, Error **errp) | |
23d402d4 | 218 | { |
76d554e2 KW |
219 | BdrvChild *c; |
220 | ||
221 | c = bdrv_root_attach_child(bs, name, &child_job, perm, shared_perm, | |
222 | job, errp); | |
223 | if (c == NULL) { | |
224 | return -EPERM; | |
225 | } | |
226 | ||
227 | job->nodes = g_slist_prepend(job->nodes, c); | |
23d402d4 AG |
228 | bdrv_ref(bs); |
229 | bdrv_op_block_all(bs, job->blocker); | |
76d554e2 KW |
230 | |
231 | return 0; | |
23d402d4 AG |
232 | } |
233 | ||
559b935f JS |
234 | bool block_job_is_internal(BlockJob *job) |
235 | { | |
236 | return (job->id == NULL); | |
237 | } | |
238 | ||
5ccac6f1 JS |
239 | static bool block_job_started(BlockJob *job) |
240 | { | |
241 | return job->co; | |
242 | } | |
243 | ||
e3796a24 JS |
244 | /** |
245 | * All jobs must allow a pause point before entering their job proper. This | |
246 | * ensures that jobs can be paused prior to being started, then resumed later. | |
247 | */ | |
248 | static void coroutine_fn block_job_co_entry(void *opaque) | |
249 | { | |
250 | BlockJob *job = opaque; | |
251 | ||
252 | assert(job && job->driver && job->driver->start); | |
253 | block_job_pause_point(job); | |
254 | job->driver->start(job); | |
255 | } | |
256 | ||
5ccac6f1 JS |
257 | void block_job_start(BlockJob *job) |
258 | { | |
259 | assert(job && !block_job_started(job) && job->paused && | |
e3796a24 JS |
260 | job->driver && job->driver->start); |
261 | job->co = qemu_coroutine_create(block_job_co_entry, job); | |
262 | job->pause_count--; | |
263 | job->busy = true; | |
264 | job->paused = false; | |
aef4278c | 265 | bdrv_coroutine_enter(blk_bs(job->blk), job->co); |
5ccac6f1 JS |
266 | } |
267 | ||
c55a832f FZ |
268 | static void block_job_completed_single(BlockJob *job) |
269 | { | |
270 | if (!job->ret) { | |
271 | if (job->driver->commit) { | |
272 | job->driver->commit(job); | |
273 | } | |
274 | } else { | |
275 | if (job->driver->abort) { | |
276 | job->driver->abort(job); | |
277 | } | |
278 | } | |
e8a40bf7 JS |
279 | if (job->driver->clean) { |
280 | job->driver->clean(job); | |
281 | } | |
8254b6d9 JS |
282 | |
283 | if (job->cb) { | |
284 | job->cb(job->opaque, job->ret); | |
285 | } | |
5ccac6f1 JS |
286 | |
287 | /* Emit events only if we actually started */ | |
288 | if (block_job_started(job)) { | |
289 | if (block_job_is_cancelled(job)) { | |
290 | block_job_event_cancelled(job); | |
291 | } else { | |
292 | const char *msg = NULL; | |
293 | if (job->ret < 0) { | |
294 | msg = strerror(-job->ret); | |
295 | } | |
296 | block_job_event_completed(job, msg); | |
8254b6d9 | 297 | } |
8254b6d9 JS |
298 | } |
299 | ||
c55a832f | 300 | if (job->txn) { |
1e93b9fb | 301 | QLIST_REMOVE(job, txn_list); |
c55a832f FZ |
302 | block_job_txn_unref(job->txn); |
303 | } | |
304 | block_job_unref(job); | |
305 | } | |
306 | ||
307 | static void block_job_completed_txn_abort(BlockJob *job) | |
308 | { | |
309 | AioContext *ctx; | |
310 | BlockJobTxn *txn = job->txn; | |
311 | BlockJob *other_job, *next; | |
312 | ||
313 | if (txn->aborting) { | |
314 | /* | |
315 | * We are cancelled by another job, which will handle everything. | |
316 | */ | |
317 | return; | |
318 | } | |
319 | txn->aborting = true; | |
320 | /* We are the first failed job. Cancel other jobs. */ | |
321 | QLIST_FOREACH(other_job, &txn->jobs, txn_list) { | |
b6d2e599 | 322 | ctx = blk_get_aio_context(other_job->blk); |
c55a832f FZ |
323 | aio_context_acquire(ctx); |
324 | } | |
325 | QLIST_FOREACH(other_job, &txn->jobs, txn_list) { | |
326 | if (other_job == job || other_job->completed) { | |
327 | /* Other jobs are "effectively" cancelled by us, set the status for | |
328 | * them; this job, however, may or may not be cancelled, depending | |
329 | * on the caller, so leave it. */ | |
330 | if (other_job != job) { | |
331 | other_job->cancelled = true; | |
332 | } | |
333 | continue; | |
334 | } | |
335 | block_job_cancel_sync(other_job); | |
336 | assert(other_job->completed); | |
337 | } | |
338 | QLIST_FOREACH_SAFE(other_job, &txn->jobs, txn_list, next) { | |
b6d2e599 | 339 | ctx = blk_get_aio_context(other_job->blk); |
c55a832f FZ |
340 | block_job_completed_single(other_job); |
341 | aio_context_release(ctx); | |
342 | } | |
343 | } | |
344 | ||
345 | static void block_job_completed_txn_success(BlockJob *job) | |
346 | { | |
347 | AioContext *ctx; | |
348 | BlockJobTxn *txn = job->txn; | |
349 | BlockJob *other_job, *next; | |
350 | /* | |
351 | * Successful completion, see if there are other running jobs in this | |
352 | * txn. | |
353 | */ | |
354 | QLIST_FOREACH(other_job, &txn->jobs, txn_list) { | |
355 | if (!other_job->completed) { | |
356 | return; | |
357 | } | |
358 | } | |
359 | /* We are the last completed job, commit the transaction. */ | |
360 | QLIST_FOREACH_SAFE(other_job, &txn->jobs, txn_list, next) { | |
b6d2e599 | 361 | ctx = blk_get_aio_context(other_job->blk); |
c55a832f FZ |
362 | aio_context_acquire(ctx); |
363 | assert(other_job->ret == 0); | |
364 | block_job_completed_single(other_job); | |
365 | aio_context_release(ctx); | |
366 | } | |
367 | } | |
368 | ||
2f0c9fe6 PB |
369 | void block_job_set_speed(BlockJob *job, int64_t speed, Error **errp) |
370 | { | |
371 | Error *local_err = NULL; | |
372 | ||
3fc4b10a | 373 | if (!job->driver->set_speed) { |
c6bd8c70 | 374 | error_setg(errp, QERR_UNSUPPORTED); |
2f0c9fe6 PB |
375 | return; |
376 | } | |
3fc4b10a | 377 | job->driver->set_speed(job, speed, &local_err); |
84d18f06 | 378 | if (local_err) { |
2f0c9fe6 PB |
379 | error_propagate(errp, local_err); |
380 | return; | |
381 | } | |
382 | ||
383 | job->speed = speed; | |
384 | } | |
385 | ||
aeae883b PB |
386 | void block_job_complete(BlockJob *job, Error **errp) |
387 | { | |
559b935f JS |
388 | /* Should not be reachable via external interface for internal jobs */ |
389 | assert(job->id); | |
5ccac6f1 JS |
390 | if (job->pause_count || job->cancelled || |
391 | !block_job_started(job) || !job->driver->complete) { | |
9df229c3 AG |
392 | error_setg(errp, "The active block job '%s' cannot be completed", |
393 | job->id); | |
aeae883b PB |
394 | return; |
395 | } | |
396 | ||
3fc4b10a | 397 | job->driver->complete(job, errp); |
aeae883b PB |
398 | } |
399 | ||
0df4ba58 JS |
400 | void block_job_user_pause(BlockJob *job) |
401 | { | |
402 | job->user_paused = true; | |
403 | block_job_pause(job); | |
404 | } | |
405 | ||
0df4ba58 JS |
406 | bool block_job_user_paused(BlockJob *job) |
407 | { | |
6573d9c6 | 408 | return job->user_paused; |
0df4ba58 JS |
409 | } |
410 | ||
0df4ba58 JS |
411 | void block_job_user_resume(BlockJob *job) |
412 | { | |
413 | if (job && job->user_paused && job->pause_count > 0) { | |
414 | job->user_paused = false; | |
415 | block_job_resume(job); | |
416 | } | |
417 | } | |
418 | ||
8acc72a4 PB |
419 | void block_job_cancel(BlockJob *job) |
420 | { | |
5ccac6f1 JS |
421 | if (block_job_started(job)) { |
422 | job->cancelled = true; | |
423 | block_job_iostatus_reset(job); | |
424 | block_job_enter(job); | |
425 | } else { | |
426 | block_job_completed(job, -ECANCELED); | |
427 | } | |
8acc72a4 PB |
428 | } |
429 | ||
32c81a4a PB |
430 | void block_job_iostatus_reset(BlockJob *job) |
431 | { | |
432 | job->iostatus = BLOCK_DEVICE_IO_STATUS_OK; | |
433 | } | |
434 | ||
345f9e1b HR |
435 | static int block_job_finish_sync(BlockJob *job, |
436 | void (*finish)(BlockJob *, Error **errp), | |
437 | Error **errp) | |
2f0c9fe6 | 438 | { |
345f9e1b | 439 | Error *local_err = NULL; |
94db6d2d | 440 | int ret; |
2f0c9fe6 | 441 | |
b6d2e599 | 442 | assert(blk_bs(job->blk)->job == job); |
2f0c9fe6 | 443 | |
94db6d2d | 444 | block_job_ref(job); |
bae8196d | 445 | |
345f9e1b HR |
446 | finish(job, &local_err); |
447 | if (local_err) { | |
448 | error_propagate(errp, local_err); | |
94db6d2d | 449 | block_job_unref(job); |
345f9e1b HR |
450 | return -EBUSY; |
451 | } | |
bae8196d PB |
452 | /* block_job_drain calls block_job_enter, and it should be enough to |
453 | * induce progress until the job completes or moves to the main thread. | |
454 | */ | |
455 | while (!job->deferred_to_main_loop && !job->completed) { | |
456 | block_job_drain(job); | |
457 | } | |
94db6d2d | 458 | while (!job->completed) { |
bae8196d | 459 | aio_poll(qemu_get_aio_context(), true); |
2f0c9fe6 | 460 | } |
94db6d2d FZ |
461 | ret = (job->cancelled && job->ret == 0) ? -ECANCELED : job->ret; |
462 | block_job_unref(job); | |
463 | return ret; | |
2f0c9fe6 PB |
464 | } |
465 | ||
345f9e1b HR |
466 | /* A wrapper around block_job_cancel() taking an Error ** parameter so it may be |
467 | * used with block_job_finish_sync() without the need for (rather nasty) | |
468 | * function pointer casts there. */ | |
469 | static void block_job_cancel_err(BlockJob *job, Error **errp) | |
470 | { | |
471 | block_job_cancel(job); | |
472 | } | |
473 | ||
474 | int block_job_cancel_sync(BlockJob *job) | |
475 | { | |
476 | return block_job_finish_sync(job, &block_job_cancel_err, NULL); | |
477 | } | |
478 | ||
a1a2af07 KW |
479 | void block_job_cancel_sync_all(void) |
480 | { | |
481 | BlockJob *job; | |
482 | AioContext *aio_context; | |
483 | ||
484 | while ((job = QLIST_FIRST(&block_jobs))) { | |
b6d2e599 | 485 | aio_context = blk_get_aio_context(job->blk); |
a1a2af07 KW |
486 | aio_context_acquire(aio_context); |
487 | block_job_cancel_sync(job); | |
488 | aio_context_release(aio_context); | |
489 | } | |
490 | } | |
491 | ||
345f9e1b HR |
492 | int block_job_complete_sync(BlockJob *job, Error **errp) |
493 | { | |
494 | return block_job_finish_sync(job, &block_job_complete, errp); | |
495 | } | |
496 | ||
559b935f | 497 | BlockJobInfo *block_job_query(BlockJob *job, Error **errp) |
30e628b7 | 498 | { |
559b935f JS |
499 | BlockJobInfo *info; |
500 | ||
501 | if (block_job_is_internal(job)) { | |
502 | error_setg(errp, "Cannot query QEMU internal jobs"); | |
503 | return NULL; | |
504 | } | |
505 | info = g_new0(BlockJobInfo, 1); | |
79e14bf7 | 506 | info->type = g_strdup(BlockJobType_lookup[job->driver->job_type]); |
8ccb9569 | 507 | info->device = g_strdup(job->id); |
32c81a4a PB |
508 | info->len = job->len; |
509 | info->busy = job->busy; | |
751ebd76 | 510 | info->paused = job->pause_count > 0; |
32c81a4a PB |
511 | info->offset = job->offset; |
512 | info->speed = job->speed; | |
513 | info->io_status = job->iostatus; | |
ef6dbf1e | 514 | info->ready = job->ready; |
30e628b7 PB |
515 | return info; |
516 | } | |
32c81a4a PB |
517 | |
518 | static void block_job_iostatus_set_err(BlockJob *job, int error) | |
519 | { | |
520 | if (job->iostatus == BLOCK_DEVICE_IO_STATUS_OK) { | |
521 | job->iostatus = error == ENOSPC ? BLOCK_DEVICE_IO_STATUS_NOSPACE : | |
522 | BLOCK_DEVICE_IO_STATUS_FAILED; | |
523 | } | |
524 | } | |
525 | ||
8254b6d9 | 526 | static void block_job_event_cancelled(BlockJob *job) |
bcada37b | 527 | { |
559b935f JS |
528 | if (block_job_is_internal(job)) { |
529 | return; | |
530 | } | |
531 | ||
bcada37b | 532 | qapi_event_send_block_job_cancelled(job->driver->job_type, |
8ccb9569 | 533 | job->id, |
bcada37b WX |
534 | job->len, |
535 | job->offset, | |
536 | job->speed, | |
537 | &error_abort); | |
538 | } | |
32c81a4a | 539 | |
8254b6d9 | 540 | static void block_job_event_completed(BlockJob *job, const char *msg) |
a66a2a36 | 541 | { |
559b935f JS |
542 | if (block_job_is_internal(job)) { |
543 | return; | |
544 | } | |
545 | ||
bcada37b | 546 | qapi_event_send_block_job_completed(job->driver->job_type, |
8ccb9569 | 547 | job->id, |
bcada37b WX |
548 | job->len, |
549 | job->offset, | |
550 | job->speed, | |
551 | !!msg, | |
552 | msg, | |
553 | &error_abort); | |
a66a2a36 PB |
554 | } |
555 | ||
88691b37 PB |
556 | /* |
557 | * API for block job drivers and the block layer. These functions are | |
558 | * declared in blockjob_int.h. | |
559 | */ | |
560 | ||
561 | void *block_job_create(const char *job_id, const BlockJobDriver *driver, | |
562 | BlockDriverState *bs, uint64_t perm, | |
563 | uint64_t shared_perm, int64_t speed, int flags, | |
564 | BlockCompletionFunc *cb, void *opaque, Error **errp) | |
565 | { | |
566 | BlockBackend *blk; | |
567 | BlockJob *job; | |
568 | int ret; | |
569 | ||
570 | if (bs->job) { | |
571 | error_setg(errp, QERR_DEVICE_IN_USE, bdrv_get_device_name(bs)); | |
572 | return NULL; | |
573 | } | |
574 | ||
575 | if (job_id == NULL && !(flags & BLOCK_JOB_INTERNAL)) { | |
576 | job_id = bdrv_get_device_name(bs); | |
577 | if (!*job_id) { | |
578 | error_setg(errp, "An explicit job ID is required for this node"); | |
579 | return NULL; | |
580 | } | |
581 | } | |
582 | ||
583 | if (job_id) { | |
584 | if (flags & BLOCK_JOB_INTERNAL) { | |
585 | error_setg(errp, "Cannot specify job ID for internal block job"); | |
586 | return NULL; | |
587 | } | |
588 | ||
589 | if (!id_wellformed(job_id)) { | |
590 | error_setg(errp, "Invalid job ID '%s'", job_id); | |
591 | return NULL; | |
592 | } | |
593 | ||
594 | if (block_job_get(job_id)) { | |
595 | error_setg(errp, "Job ID '%s' already in use", job_id); | |
596 | return NULL; | |
597 | } | |
598 | } | |
599 | ||
600 | blk = blk_new(perm, shared_perm); | |
601 | ret = blk_insert_bs(blk, bs, errp); | |
602 | if (ret < 0) { | |
603 | blk_unref(blk); | |
604 | return NULL; | |
605 | } | |
606 | ||
607 | job = g_malloc0(driver->instance_size); | |
608 | job->driver = driver; | |
609 | job->id = g_strdup(job_id); | |
610 | job->blk = blk; | |
611 | job->cb = cb; | |
612 | job->opaque = opaque; | |
613 | job->busy = false; | |
614 | job->paused = true; | |
615 | job->pause_count = 1; | |
616 | job->refcnt = 1; | |
617 | ||
618 | error_setg(&job->blocker, "block device is in use by block job: %s", | |
619 | BlockJobType_lookup[driver->job_type]); | |
620 | block_job_add_bdrv(job, "main node", bs, 0, BLK_PERM_ALL, &error_abort); | |
621 | bs->job = job; | |
622 | ||
623 | blk_set_dev_ops(blk, &block_job_dev_ops, job); | |
624 | bdrv_op_unblock(bs, BLOCK_OP_TYPE_DATAPLANE, job->blocker); | |
625 | ||
626 | QLIST_INSERT_HEAD(&block_jobs, job, job_list); | |
627 | ||
628 | blk_add_aio_context_notifier(blk, block_job_attached_aio_context, | |
629 | block_job_detach_aio_context, job); | |
630 | ||
631 | /* Only set speed when necessary to avoid NotSupported error */ | |
632 | if (speed != 0) { | |
633 | Error *local_err = NULL; | |
634 | ||
635 | block_job_set_speed(job, speed, &local_err); | |
636 | if (local_err) { | |
637 | block_job_unref(job); | |
638 | error_propagate(errp, local_err); | |
639 | return NULL; | |
640 | } | |
641 | } | |
642 | return job; | |
643 | } | |
644 | ||
f321dcb5 PB |
645 | void block_job_pause_all(void) |
646 | { | |
647 | BlockJob *job = NULL; | |
648 | while ((job = block_job_next(job))) { | |
649 | AioContext *aio_context = blk_get_aio_context(job->blk); | |
650 | ||
651 | aio_context_acquire(aio_context); | |
652 | block_job_pause(job); | |
653 | aio_context_release(aio_context); | |
654 | } | |
655 | } | |
656 | ||
88691b37 PB |
657 | void block_job_early_fail(BlockJob *job) |
658 | { | |
659 | block_job_unref(job); | |
660 | } | |
661 | ||
662 | void block_job_completed(BlockJob *job, int ret) | |
663 | { | |
664 | assert(blk_bs(job->blk)->job == job); | |
665 | assert(!job->completed); | |
666 | job->completed = true; | |
667 | job->ret = ret; | |
668 | if (!job->txn) { | |
669 | block_job_completed_single(job); | |
670 | } else if (ret < 0 || block_job_is_cancelled(job)) { | |
671 | block_job_completed_txn_abort(job); | |
672 | } else { | |
673 | block_job_completed_txn_success(job); | |
674 | } | |
675 | } | |
676 | ||
677 | static bool block_job_should_pause(BlockJob *job) | |
678 | { | |
679 | return job->pause_count > 0; | |
680 | } | |
681 | ||
682 | void coroutine_fn block_job_pause_point(BlockJob *job) | |
683 | { | |
684 | assert(job && block_job_started(job)); | |
685 | ||
686 | if (!block_job_should_pause(job)) { | |
687 | return; | |
688 | } | |
689 | if (block_job_is_cancelled(job)) { | |
690 | return; | |
691 | } | |
692 | ||
693 | if (job->driver->pause) { | |
694 | job->driver->pause(job); | |
695 | } | |
696 | ||
697 | if (block_job_should_pause(job) && !block_job_is_cancelled(job)) { | |
698 | job->paused = true; | |
699 | job->busy = false; | |
700 | qemu_coroutine_yield(); /* wait for block_job_resume() */ | |
701 | job->busy = true; | |
702 | job->paused = false; | |
703 | } | |
704 | ||
705 | if (job->driver->resume) { | |
706 | job->driver->resume(job); | |
707 | } | |
708 | } | |
709 | ||
f321dcb5 PB |
710 | void block_job_resume_all(void) |
711 | { | |
712 | BlockJob *job = NULL; | |
713 | while ((job = block_job_next(job))) { | |
714 | AioContext *aio_context = blk_get_aio_context(job->blk); | |
715 | ||
716 | aio_context_acquire(aio_context); | |
717 | block_job_resume(job); | |
718 | aio_context_release(aio_context); | |
719 | } | |
720 | } | |
721 | ||
88691b37 PB |
722 | void block_job_enter(BlockJob *job) |
723 | { | |
724 | if (job->co && !job->busy) { | |
725 | bdrv_coroutine_enter(blk_bs(job->blk), job->co); | |
726 | } | |
727 | } | |
728 | ||
729 | bool block_job_is_cancelled(BlockJob *job) | |
730 | { | |
731 | return job->cancelled; | |
732 | } | |
733 | ||
734 | void block_job_sleep_ns(BlockJob *job, QEMUClockType type, int64_t ns) | |
735 | { | |
736 | assert(job->busy); | |
737 | ||
738 | /* Check cancellation *before* setting busy = false, too! */ | |
739 | if (block_job_is_cancelled(job)) { | |
740 | return; | |
741 | } | |
742 | ||
743 | job->busy = false; | |
744 | if (!block_job_should_pause(job)) { | |
745 | co_aio_sleep_ns(blk_get_aio_context(job->blk), type, ns); | |
746 | } | |
747 | job->busy = true; | |
748 | ||
749 | block_job_pause_point(job); | |
750 | } | |
751 | ||
752 | void block_job_yield(BlockJob *job) | |
753 | { | |
754 | assert(job->busy); | |
755 | ||
756 | /* Check cancellation *before* setting busy = false, too! */ | |
757 | if (block_job_is_cancelled(job)) { | |
758 | return; | |
759 | } | |
760 | ||
761 | job->busy = false; | |
762 | if (!block_job_should_pause(job)) { | |
763 | qemu_coroutine_yield(); | |
764 | } | |
765 | job->busy = true; | |
766 | ||
767 | block_job_pause_point(job); | |
768 | } | |
769 | ||
bcada37b | 770 | void block_job_event_ready(BlockJob *job) |
a66a2a36 | 771 | { |
ef6dbf1e HR |
772 | job->ready = true; |
773 | ||
559b935f JS |
774 | if (block_job_is_internal(job)) { |
775 | return; | |
776 | } | |
777 | ||
518848a2 | 778 | qapi_event_send_block_job_ready(job->driver->job_type, |
8ccb9569 | 779 | job->id, |
518848a2 MA |
780 | job->len, |
781 | job->offset, | |
782 | job->speed, &error_abort); | |
a66a2a36 PB |
783 | } |
784 | ||
81e254dc | 785 | BlockErrorAction block_job_error_action(BlockJob *job, BlockdevOnError on_err, |
32c81a4a PB |
786 | int is_read, int error) |
787 | { | |
788 | BlockErrorAction action; | |
789 | ||
790 | switch (on_err) { | |
791 | case BLOCKDEV_ON_ERROR_ENOSPC: | |
8c398252 | 792 | case BLOCKDEV_ON_ERROR_AUTO: |
a589569f WX |
793 | action = (error == ENOSPC) ? |
794 | BLOCK_ERROR_ACTION_STOP : BLOCK_ERROR_ACTION_REPORT; | |
32c81a4a PB |
795 | break; |
796 | case BLOCKDEV_ON_ERROR_STOP: | |
a589569f | 797 | action = BLOCK_ERROR_ACTION_STOP; |
32c81a4a PB |
798 | break; |
799 | case BLOCKDEV_ON_ERROR_REPORT: | |
a589569f | 800 | action = BLOCK_ERROR_ACTION_REPORT; |
32c81a4a PB |
801 | break; |
802 | case BLOCKDEV_ON_ERROR_IGNORE: | |
a589569f | 803 | action = BLOCK_ERROR_ACTION_IGNORE; |
32c81a4a PB |
804 | break; |
805 | default: | |
806 | abort(); | |
807 | } | |
559b935f JS |
808 | if (!block_job_is_internal(job)) { |
809 | qapi_event_send_block_job_error(job->id, | |
810 | is_read ? IO_OPERATION_TYPE_READ : | |
811 | IO_OPERATION_TYPE_WRITE, | |
812 | action, &error_abort); | |
813 | } | |
a589569f | 814 | if (action == BLOCK_ERROR_ACTION_STOP) { |
751ebd76 | 815 | /* make the pause user visible, which will be resumed from QMP. */ |
0df4ba58 | 816 | block_job_user_pause(job); |
32c81a4a | 817 | block_job_iostatus_set_err(job, error); |
32c81a4a PB |
818 | } |
819 | return action; | |
820 | } | |
dec7d421 SH |
821 | |
822 | typedef struct { | |
823 | BlockJob *job; | |
dec7d421 SH |
824 | AioContext *aio_context; |
825 | BlockJobDeferToMainLoopFn *fn; | |
826 | void *opaque; | |
827 | } BlockJobDeferToMainLoopData; | |
828 | ||
829 | static void block_job_defer_to_main_loop_bh(void *opaque) | |
830 | { | |
831 | BlockJobDeferToMainLoopData *data = opaque; | |
832 | AioContext *aio_context; | |
833 | ||
dec7d421 SH |
834 | /* Prevent race with block_job_defer_to_main_loop() */ |
835 | aio_context_acquire(data->aio_context); | |
836 | ||
837 | /* Fetch BDS AioContext again, in case it has changed */ | |
b6d2e599 | 838 | aio_context = blk_get_aio_context(data->job->blk); |
d79df2a2 PB |
839 | if (aio_context != data->aio_context) { |
840 | aio_context_acquire(aio_context); | |
841 | } | |
dec7d421 | 842 | |
794f0141 | 843 | data->job->deferred_to_main_loop = false; |
dec7d421 SH |
844 | data->fn(data->job, data->opaque); |
845 | ||
d79df2a2 PB |
846 | if (aio_context != data->aio_context) { |
847 | aio_context_release(aio_context); | |
848 | } | |
dec7d421 SH |
849 | |
850 | aio_context_release(data->aio_context); | |
851 | ||
852 | g_free(data); | |
853 | } | |
854 | ||
855 | void block_job_defer_to_main_loop(BlockJob *job, | |
856 | BlockJobDeferToMainLoopFn *fn, | |
857 | void *opaque) | |
858 | { | |
859 | BlockJobDeferToMainLoopData *data = g_malloc(sizeof(*data)); | |
860 | data->job = job; | |
b6d2e599 | 861 | data->aio_context = blk_get_aio_context(job->blk); |
dec7d421 SH |
862 | data->fn = fn; |
863 | data->opaque = opaque; | |
794f0141 | 864 | job->deferred_to_main_loop = true; |
dec7d421 | 865 | |
fffb6e12 PB |
866 | aio_bh_schedule_oneshot(qemu_get_aio_context(), |
867 | block_job_defer_to_main_loop_bh, data); | |
dec7d421 | 868 | } |
c55a832f FZ |
869 | |
870 | BlockJobTxn *block_job_txn_new(void) | |
871 | { | |
872 | BlockJobTxn *txn = g_new0(BlockJobTxn, 1); | |
873 | QLIST_INIT(&txn->jobs); | |
874 | txn->refcnt = 1; | |
875 | return txn; | |
876 | } | |
877 | ||
878 | static void block_job_txn_ref(BlockJobTxn *txn) | |
879 | { | |
880 | txn->refcnt++; | |
881 | } | |
882 | ||
883 | void block_job_txn_unref(BlockJobTxn *txn) | |
884 | { | |
885 | if (txn && --txn->refcnt == 0) { | |
886 | g_free(txn); | |
887 | } | |
888 | } | |
889 | ||
890 | void block_job_txn_add_job(BlockJobTxn *txn, BlockJob *job) | |
891 | { | |
892 | if (!txn) { | |
893 | return; | |
894 | } | |
895 | ||
896 | assert(!job->txn); | |
897 | job->txn = txn; | |
898 | ||
899 | QLIST_INSERT_HEAD(&txn->jobs, job, txn_list); | |
900 | block_job_txn_ref(txn); | |
901 | } |