]> git.ipfire.org Git - thirdparty/qemu.git/blob - dump.c
dump: update phys_base header field based on VMCOREINFO content
[thirdparty/qemu.git] / dump.c
1 /*
2 * QEMU dump
3 *
4 * Copyright Fujitsu, Corp. 2011, 2012
5 *
6 * Authors:
7 * Wen Congyang <wency@cn.fujitsu.com>
8 *
9 * This work is licensed under the terms of the GNU GPL, version 2 or later.
10 * See the COPYING file in the top-level directory.
11 *
12 */
13
14 #include "qemu/osdep.h"
15 #include "qemu/cutils.h"
16 #include "elf.h"
17 #include "cpu.h"
18 #include "exec/hwaddr.h"
19 #include "monitor/monitor.h"
20 #include "sysemu/kvm.h"
21 #include "sysemu/dump.h"
22 #include "sysemu/sysemu.h"
23 #include "sysemu/memory_mapping.h"
24 #include "sysemu/cpus.h"
25 #include "qapi/qmp/qerror.h"
26 #include "qmp-commands.h"
27 #include "qapi-event.h"
28 #include "qemu/error-report.h"
29 #include "hw/misc/vmcoreinfo.h"
30
31 #include <zlib.h>
32 #ifdef CONFIG_LZO
33 #include <lzo/lzo1x.h>
34 #endif
35 #ifdef CONFIG_SNAPPY
36 #include <snappy-c.h>
37 #endif
38 #ifndef ELF_MACHINE_UNAME
39 #define ELF_MACHINE_UNAME "Unknown"
40 #endif
41
42 #define MAX_GUEST_NOTE_SIZE (1 << 20) /* 1MB should be enough */
43
44 #define ELF_NOTE_SIZE(hdr_size, name_size, desc_size) \
45 ((DIV_ROUND_UP((hdr_size), 4) + \
46 DIV_ROUND_UP((name_size), 4) + \
47 DIV_ROUND_UP((desc_size), 4)) * 4)
48
49 uint16_t cpu_to_dump16(DumpState *s, uint16_t val)
50 {
51 if (s->dump_info.d_endian == ELFDATA2LSB) {
52 val = cpu_to_le16(val);
53 } else {
54 val = cpu_to_be16(val);
55 }
56
57 return val;
58 }
59
60 uint32_t cpu_to_dump32(DumpState *s, uint32_t val)
61 {
62 if (s->dump_info.d_endian == ELFDATA2LSB) {
63 val = cpu_to_le32(val);
64 } else {
65 val = cpu_to_be32(val);
66 }
67
68 return val;
69 }
70
71 uint64_t cpu_to_dump64(DumpState *s, uint64_t val)
72 {
73 if (s->dump_info.d_endian == ELFDATA2LSB) {
74 val = cpu_to_le64(val);
75 } else {
76 val = cpu_to_be64(val);
77 }
78
79 return val;
80 }
81
82 static int dump_cleanup(DumpState *s)
83 {
84 guest_phys_blocks_free(&s->guest_phys_blocks);
85 memory_mapping_list_free(&s->list);
86 close(s->fd);
87 g_free(s->guest_note);
88 s->guest_note = NULL;
89 if (s->resume) {
90 if (s->detached) {
91 qemu_mutex_lock_iothread();
92 }
93 vm_start();
94 if (s->detached) {
95 qemu_mutex_unlock_iothread();
96 }
97 }
98
99 return 0;
100 }
101
102 static int fd_write_vmcore(const void *buf, size_t size, void *opaque)
103 {
104 DumpState *s = opaque;
105 size_t written_size;
106
107 written_size = qemu_write_full(s->fd, buf, size);
108 if (written_size != size) {
109 return -1;
110 }
111
112 return 0;
113 }
114
115 static void write_elf64_header(DumpState *s, Error **errp)
116 {
117 Elf64_Ehdr elf_header;
118 int ret;
119
120 memset(&elf_header, 0, sizeof(Elf64_Ehdr));
121 memcpy(&elf_header, ELFMAG, SELFMAG);
122 elf_header.e_ident[EI_CLASS] = ELFCLASS64;
123 elf_header.e_ident[EI_DATA] = s->dump_info.d_endian;
124 elf_header.e_ident[EI_VERSION] = EV_CURRENT;
125 elf_header.e_type = cpu_to_dump16(s, ET_CORE);
126 elf_header.e_machine = cpu_to_dump16(s, s->dump_info.d_machine);
127 elf_header.e_version = cpu_to_dump32(s, EV_CURRENT);
128 elf_header.e_ehsize = cpu_to_dump16(s, sizeof(elf_header));
129 elf_header.e_phoff = cpu_to_dump64(s, sizeof(Elf64_Ehdr));
130 elf_header.e_phentsize = cpu_to_dump16(s, sizeof(Elf64_Phdr));
131 elf_header.e_phnum = cpu_to_dump16(s, s->phdr_num);
132 if (s->have_section) {
133 uint64_t shoff = sizeof(Elf64_Ehdr) + sizeof(Elf64_Phdr) * s->sh_info;
134
135 elf_header.e_shoff = cpu_to_dump64(s, shoff);
136 elf_header.e_shentsize = cpu_to_dump16(s, sizeof(Elf64_Shdr));
137 elf_header.e_shnum = cpu_to_dump16(s, 1);
138 }
139
140 ret = fd_write_vmcore(&elf_header, sizeof(elf_header), s);
141 if (ret < 0) {
142 error_setg(errp, "dump: failed to write elf header");
143 }
144 }
145
146 static void write_elf32_header(DumpState *s, Error **errp)
147 {
148 Elf32_Ehdr elf_header;
149 int ret;
150
151 memset(&elf_header, 0, sizeof(Elf32_Ehdr));
152 memcpy(&elf_header, ELFMAG, SELFMAG);
153 elf_header.e_ident[EI_CLASS] = ELFCLASS32;
154 elf_header.e_ident[EI_DATA] = s->dump_info.d_endian;
155 elf_header.e_ident[EI_VERSION] = EV_CURRENT;
156 elf_header.e_type = cpu_to_dump16(s, ET_CORE);
157 elf_header.e_machine = cpu_to_dump16(s, s->dump_info.d_machine);
158 elf_header.e_version = cpu_to_dump32(s, EV_CURRENT);
159 elf_header.e_ehsize = cpu_to_dump16(s, sizeof(elf_header));
160 elf_header.e_phoff = cpu_to_dump32(s, sizeof(Elf32_Ehdr));
161 elf_header.e_phentsize = cpu_to_dump16(s, sizeof(Elf32_Phdr));
162 elf_header.e_phnum = cpu_to_dump16(s, s->phdr_num);
163 if (s->have_section) {
164 uint32_t shoff = sizeof(Elf32_Ehdr) + sizeof(Elf32_Phdr) * s->sh_info;
165
166 elf_header.e_shoff = cpu_to_dump32(s, shoff);
167 elf_header.e_shentsize = cpu_to_dump16(s, sizeof(Elf32_Shdr));
168 elf_header.e_shnum = cpu_to_dump16(s, 1);
169 }
170
171 ret = fd_write_vmcore(&elf_header, sizeof(elf_header), s);
172 if (ret < 0) {
173 error_setg(errp, "dump: failed to write elf header");
174 }
175 }
176
177 static void write_elf64_load(DumpState *s, MemoryMapping *memory_mapping,
178 int phdr_index, hwaddr offset,
179 hwaddr filesz, Error **errp)
180 {
181 Elf64_Phdr phdr;
182 int ret;
183
184 memset(&phdr, 0, sizeof(Elf64_Phdr));
185 phdr.p_type = cpu_to_dump32(s, PT_LOAD);
186 phdr.p_offset = cpu_to_dump64(s, offset);
187 phdr.p_paddr = cpu_to_dump64(s, memory_mapping->phys_addr);
188 phdr.p_filesz = cpu_to_dump64(s, filesz);
189 phdr.p_memsz = cpu_to_dump64(s, memory_mapping->length);
190 phdr.p_vaddr = cpu_to_dump64(s, memory_mapping->virt_addr);
191
192 assert(memory_mapping->length >= filesz);
193
194 ret = fd_write_vmcore(&phdr, sizeof(Elf64_Phdr), s);
195 if (ret < 0) {
196 error_setg(errp, "dump: failed to write program header table");
197 }
198 }
199
200 static void write_elf32_load(DumpState *s, MemoryMapping *memory_mapping,
201 int phdr_index, hwaddr offset,
202 hwaddr filesz, Error **errp)
203 {
204 Elf32_Phdr phdr;
205 int ret;
206
207 memset(&phdr, 0, sizeof(Elf32_Phdr));
208 phdr.p_type = cpu_to_dump32(s, PT_LOAD);
209 phdr.p_offset = cpu_to_dump32(s, offset);
210 phdr.p_paddr = cpu_to_dump32(s, memory_mapping->phys_addr);
211 phdr.p_filesz = cpu_to_dump32(s, filesz);
212 phdr.p_memsz = cpu_to_dump32(s, memory_mapping->length);
213 phdr.p_vaddr = cpu_to_dump32(s, memory_mapping->virt_addr);
214
215 assert(memory_mapping->length >= filesz);
216
217 ret = fd_write_vmcore(&phdr, sizeof(Elf32_Phdr), s);
218 if (ret < 0) {
219 error_setg(errp, "dump: failed to write program header table");
220 }
221 }
222
223 static void write_elf64_note(DumpState *s, Error **errp)
224 {
225 Elf64_Phdr phdr;
226 hwaddr begin = s->memory_offset - s->note_size;
227 int ret;
228
229 memset(&phdr, 0, sizeof(Elf64_Phdr));
230 phdr.p_type = cpu_to_dump32(s, PT_NOTE);
231 phdr.p_offset = cpu_to_dump64(s, begin);
232 phdr.p_paddr = 0;
233 phdr.p_filesz = cpu_to_dump64(s, s->note_size);
234 phdr.p_memsz = cpu_to_dump64(s, s->note_size);
235 phdr.p_vaddr = 0;
236
237 ret = fd_write_vmcore(&phdr, sizeof(Elf64_Phdr), s);
238 if (ret < 0) {
239 error_setg(errp, "dump: failed to write program header table");
240 }
241 }
242
243 static inline int cpu_index(CPUState *cpu)
244 {
245 return cpu->cpu_index + 1;
246 }
247
248 static void write_guest_note(WriteCoreDumpFunction f, DumpState *s,
249 Error **errp)
250 {
251 int ret;
252
253 if (s->guest_note) {
254 ret = f(s->guest_note, s->guest_note_size, s);
255 if (ret < 0) {
256 error_setg(errp, "dump: failed to write guest note");
257 }
258 }
259 }
260
261 static void write_elf64_notes(WriteCoreDumpFunction f, DumpState *s,
262 Error **errp)
263 {
264 CPUState *cpu;
265 int ret;
266 int id;
267
268 CPU_FOREACH(cpu) {
269 id = cpu_index(cpu);
270 ret = cpu_write_elf64_note(f, cpu, id, s);
271 if (ret < 0) {
272 error_setg(errp, "dump: failed to write elf notes");
273 return;
274 }
275 }
276
277 CPU_FOREACH(cpu) {
278 ret = cpu_write_elf64_qemunote(f, cpu, s);
279 if (ret < 0) {
280 error_setg(errp, "dump: failed to write CPU status");
281 return;
282 }
283 }
284
285 write_guest_note(f, s, errp);
286 }
287
288 static void write_elf32_note(DumpState *s, Error **errp)
289 {
290 hwaddr begin = s->memory_offset - s->note_size;
291 Elf32_Phdr phdr;
292 int ret;
293
294 memset(&phdr, 0, sizeof(Elf32_Phdr));
295 phdr.p_type = cpu_to_dump32(s, PT_NOTE);
296 phdr.p_offset = cpu_to_dump32(s, begin);
297 phdr.p_paddr = 0;
298 phdr.p_filesz = cpu_to_dump32(s, s->note_size);
299 phdr.p_memsz = cpu_to_dump32(s, s->note_size);
300 phdr.p_vaddr = 0;
301
302 ret = fd_write_vmcore(&phdr, sizeof(Elf32_Phdr), s);
303 if (ret < 0) {
304 error_setg(errp, "dump: failed to write program header table");
305 }
306 }
307
308 static void write_elf32_notes(WriteCoreDumpFunction f, DumpState *s,
309 Error **errp)
310 {
311 CPUState *cpu;
312 int ret;
313 int id;
314
315 CPU_FOREACH(cpu) {
316 id = cpu_index(cpu);
317 ret = cpu_write_elf32_note(f, cpu, id, s);
318 if (ret < 0) {
319 error_setg(errp, "dump: failed to write elf notes");
320 return;
321 }
322 }
323
324 CPU_FOREACH(cpu) {
325 ret = cpu_write_elf32_qemunote(f, cpu, s);
326 if (ret < 0) {
327 error_setg(errp, "dump: failed to write CPU status");
328 return;
329 }
330 }
331
332 write_guest_note(f, s, errp);
333 }
334
335 static void write_elf_section(DumpState *s, int type, Error **errp)
336 {
337 Elf32_Shdr shdr32;
338 Elf64_Shdr shdr64;
339 int shdr_size;
340 void *shdr;
341 int ret;
342
343 if (type == 0) {
344 shdr_size = sizeof(Elf32_Shdr);
345 memset(&shdr32, 0, shdr_size);
346 shdr32.sh_info = cpu_to_dump32(s, s->sh_info);
347 shdr = &shdr32;
348 } else {
349 shdr_size = sizeof(Elf64_Shdr);
350 memset(&shdr64, 0, shdr_size);
351 shdr64.sh_info = cpu_to_dump32(s, s->sh_info);
352 shdr = &shdr64;
353 }
354
355 ret = fd_write_vmcore(&shdr, shdr_size, s);
356 if (ret < 0) {
357 error_setg(errp, "dump: failed to write section header table");
358 }
359 }
360
361 static void write_data(DumpState *s, void *buf, int length, Error **errp)
362 {
363 int ret;
364
365 ret = fd_write_vmcore(buf, length, s);
366 if (ret < 0) {
367 error_setg(errp, "dump: failed to save memory");
368 } else {
369 s->written_size += length;
370 }
371 }
372
373 /* write the memory to vmcore. 1 page per I/O. */
374 static void write_memory(DumpState *s, GuestPhysBlock *block, ram_addr_t start,
375 int64_t size, Error **errp)
376 {
377 int64_t i;
378 Error *local_err = NULL;
379
380 for (i = 0; i < size / s->dump_info.page_size; i++) {
381 write_data(s, block->host_addr + start + i * s->dump_info.page_size,
382 s->dump_info.page_size, &local_err);
383 if (local_err) {
384 error_propagate(errp, local_err);
385 return;
386 }
387 }
388
389 if ((size % s->dump_info.page_size) != 0) {
390 write_data(s, block->host_addr + start + i * s->dump_info.page_size,
391 size % s->dump_info.page_size, &local_err);
392 if (local_err) {
393 error_propagate(errp, local_err);
394 return;
395 }
396 }
397 }
398
399 /* get the memory's offset and size in the vmcore */
400 static void get_offset_range(hwaddr phys_addr,
401 ram_addr_t mapping_length,
402 DumpState *s,
403 hwaddr *p_offset,
404 hwaddr *p_filesz)
405 {
406 GuestPhysBlock *block;
407 hwaddr offset = s->memory_offset;
408 int64_t size_in_block, start;
409
410 /* When the memory is not stored into vmcore, offset will be -1 */
411 *p_offset = -1;
412 *p_filesz = 0;
413
414 if (s->has_filter) {
415 if (phys_addr < s->begin || phys_addr >= s->begin + s->length) {
416 return;
417 }
418 }
419
420 QTAILQ_FOREACH(block, &s->guest_phys_blocks.head, next) {
421 if (s->has_filter) {
422 if (block->target_start >= s->begin + s->length ||
423 block->target_end <= s->begin) {
424 /* This block is out of the range */
425 continue;
426 }
427
428 if (s->begin <= block->target_start) {
429 start = block->target_start;
430 } else {
431 start = s->begin;
432 }
433
434 size_in_block = block->target_end - start;
435 if (s->begin + s->length < block->target_end) {
436 size_in_block -= block->target_end - (s->begin + s->length);
437 }
438 } else {
439 start = block->target_start;
440 size_in_block = block->target_end - block->target_start;
441 }
442
443 if (phys_addr >= start && phys_addr < start + size_in_block) {
444 *p_offset = phys_addr - start + offset;
445
446 /* The offset range mapped from the vmcore file must not spill over
447 * the GuestPhysBlock, clamp it. The rest of the mapping will be
448 * zero-filled in memory at load time; see
449 * <http://refspecs.linuxbase.org/elf/gabi4+/ch5.pheader.html>.
450 */
451 *p_filesz = phys_addr + mapping_length <= start + size_in_block ?
452 mapping_length :
453 size_in_block - (phys_addr - start);
454 return;
455 }
456
457 offset += size_in_block;
458 }
459 }
460
461 static void write_elf_loads(DumpState *s, Error **errp)
462 {
463 hwaddr offset, filesz;
464 MemoryMapping *memory_mapping;
465 uint32_t phdr_index = 1;
466 uint32_t max_index;
467 Error *local_err = NULL;
468
469 if (s->have_section) {
470 max_index = s->sh_info;
471 } else {
472 max_index = s->phdr_num;
473 }
474
475 QTAILQ_FOREACH(memory_mapping, &s->list.head, next) {
476 get_offset_range(memory_mapping->phys_addr,
477 memory_mapping->length,
478 s, &offset, &filesz);
479 if (s->dump_info.d_class == ELFCLASS64) {
480 write_elf64_load(s, memory_mapping, phdr_index++, offset,
481 filesz, &local_err);
482 } else {
483 write_elf32_load(s, memory_mapping, phdr_index++, offset,
484 filesz, &local_err);
485 }
486
487 if (local_err) {
488 error_propagate(errp, local_err);
489 return;
490 }
491
492 if (phdr_index >= max_index) {
493 break;
494 }
495 }
496 }
497
498 /* write elf header, PT_NOTE and elf note to vmcore. */
499 static void dump_begin(DumpState *s, Error **errp)
500 {
501 Error *local_err = NULL;
502
503 /*
504 * the vmcore's format is:
505 * --------------
506 * | elf header |
507 * --------------
508 * | PT_NOTE |
509 * --------------
510 * | PT_LOAD |
511 * --------------
512 * | ...... |
513 * --------------
514 * | PT_LOAD |
515 * --------------
516 * | sec_hdr |
517 * --------------
518 * | elf note |
519 * --------------
520 * | memory |
521 * --------------
522 *
523 * we only know where the memory is saved after we write elf note into
524 * vmcore.
525 */
526
527 /* write elf header to vmcore */
528 if (s->dump_info.d_class == ELFCLASS64) {
529 write_elf64_header(s, &local_err);
530 } else {
531 write_elf32_header(s, &local_err);
532 }
533 if (local_err) {
534 error_propagate(errp, local_err);
535 return;
536 }
537
538 if (s->dump_info.d_class == ELFCLASS64) {
539 /* write PT_NOTE to vmcore */
540 write_elf64_note(s, &local_err);
541 if (local_err) {
542 error_propagate(errp, local_err);
543 return;
544 }
545
546 /* write all PT_LOAD to vmcore */
547 write_elf_loads(s, &local_err);
548 if (local_err) {
549 error_propagate(errp, local_err);
550 return;
551 }
552
553 /* write section to vmcore */
554 if (s->have_section) {
555 write_elf_section(s, 1, &local_err);
556 if (local_err) {
557 error_propagate(errp, local_err);
558 return;
559 }
560 }
561
562 /* write notes to vmcore */
563 write_elf64_notes(fd_write_vmcore, s, &local_err);
564 if (local_err) {
565 error_propagate(errp, local_err);
566 return;
567 }
568 } else {
569 /* write PT_NOTE to vmcore */
570 write_elf32_note(s, &local_err);
571 if (local_err) {
572 error_propagate(errp, local_err);
573 return;
574 }
575
576 /* write all PT_LOAD to vmcore */
577 write_elf_loads(s, &local_err);
578 if (local_err) {
579 error_propagate(errp, local_err);
580 return;
581 }
582
583 /* write section to vmcore */
584 if (s->have_section) {
585 write_elf_section(s, 0, &local_err);
586 if (local_err) {
587 error_propagate(errp, local_err);
588 return;
589 }
590 }
591
592 /* write notes to vmcore */
593 write_elf32_notes(fd_write_vmcore, s, &local_err);
594 if (local_err) {
595 error_propagate(errp, local_err);
596 return;
597 }
598 }
599 }
600
601 static int get_next_block(DumpState *s, GuestPhysBlock *block)
602 {
603 while (1) {
604 block = QTAILQ_NEXT(block, next);
605 if (!block) {
606 /* no more block */
607 return 1;
608 }
609
610 s->start = 0;
611 s->next_block = block;
612 if (s->has_filter) {
613 if (block->target_start >= s->begin + s->length ||
614 block->target_end <= s->begin) {
615 /* This block is out of the range */
616 continue;
617 }
618
619 if (s->begin > block->target_start) {
620 s->start = s->begin - block->target_start;
621 }
622 }
623
624 return 0;
625 }
626 }
627
628 /* write all memory to vmcore */
629 static void dump_iterate(DumpState *s, Error **errp)
630 {
631 GuestPhysBlock *block;
632 int64_t size;
633 Error *local_err = NULL;
634
635 do {
636 block = s->next_block;
637
638 size = block->target_end - block->target_start;
639 if (s->has_filter) {
640 size -= s->start;
641 if (s->begin + s->length < block->target_end) {
642 size -= block->target_end - (s->begin + s->length);
643 }
644 }
645 write_memory(s, block, s->start, size, &local_err);
646 if (local_err) {
647 error_propagate(errp, local_err);
648 return;
649 }
650
651 } while (!get_next_block(s, block));
652 }
653
654 static void create_vmcore(DumpState *s, Error **errp)
655 {
656 Error *local_err = NULL;
657
658 dump_begin(s, &local_err);
659 if (local_err) {
660 error_propagate(errp, local_err);
661 return;
662 }
663
664 dump_iterate(s, errp);
665 }
666
667 static int write_start_flat_header(int fd)
668 {
669 MakedumpfileHeader *mh;
670 int ret = 0;
671
672 QEMU_BUILD_BUG_ON(sizeof *mh > MAX_SIZE_MDF_HEADER);
673 mh = g_malloc0(MAX_SIZE_MDF_HEADER);
674
675 memcpy(mh->signature, MAKEDUMPFILE_SIGNATURE,
676 MIN(sizeof mh->signature, sizeof MAKEDUMPFILE_SIGNATURE));
677
678 mh->type = cpu_to_be64(TYPE_FLAT_HEADER);
679 mh->version = cpu_to_be64(VERSION_FLAT_HEADER);
680
681 size_t written_size;
682 written_size = qemu_write_full(fd, mh, MAX_SIZE_MDF_HEADER);
683 if (written_size != MAX_SIZE_MDF_HEADER) {
684 ret = -1;
685 }
686
687 g_free(mh);
688 return ret;
689 }
690
691 static int write_end_flat_header(int fd)
692 {
693 MakedumpfileDataHeader mdh;
694
695 mdh.offset = END_FLAG_FLAT_HEADER;
696 mdh.buf_size = END_FLAG_FLAT_HEADER;
697
698 size_t written_size;
699 written_size = qemu_write_full(fd, &mdh, sizeof(mdh));
700 if (written_size != sizeof(mdh)) {
701 return -1;
702 }
703
704 return 0;
705 }
706
707 static int write_buffer(int fd, off_t offset, const void *buf, size_t size)
708 {
709 size_t written_size;
710 MakedumpfileDataHeader mdh;
711
712 mdh.offset = cpu_to_be64(offset);
713 mdh.buf_size = cpu_to_be64(size);
714
715 written_size = qemu_write_full(fd, &mdh, sizeof(mdh));
716 if (written_size != sizeof(mdh)) {
717 return -1;
718 }
719
720 written_size = qemu_write_full(fd, buf, size);
721 if (written_size != size) {
722 return -1;
723 }
724
725 return 0;
726 }
727
728 static int buf_write_note(const void *buf, size_t size, void *opaque)
729 {
730 DumpState *s = opaque;
731
732 /* note_buf is not enough */
733 if (s->note_buf_offset + size > s->note_size) {
734 return -1;
735 }
736
737 memcpy(s->note_buf + s->note_buf_offset, buf, size);
738
739 s->note_buf_offset += size;
740
741 return 0;
742 }
743
744 /*
745 * This function retrieves various sizes from an elf header.
746 *
747 * @note has to be a valid ELF note. The return sizes are unmodified
748 * (not padded or rounded up to be multiple of 4).
749 */
750 static void get_note_sizes(DumpState *s, const void *note,
751 uint64_t *note_head_size,
752 uint64_t *name_size,
753 uint64_t *desc_size)
754 {
755 uint64_t note_head_sz;
756 uint64_t name_sz;
757 uint64_t desc_sz;
758
759 if (s->dump_info.d_class == ELFCLASS64) {
760 const Elf64_Nhdr *hdr = note;
761 note_head_sz = sizeof(Elf64_Nhdr);
762 name_sz = tswap64(hdr->n_namesz);
763 desc_sz = tswap64(hdr->n_descsz);
764 } else {
765 const Elf32_Nhdr *hdr = note;
766 note_head_sz = sizeof(Elf32_Nhdr);
767 name_sz = tswap32(hdr->n_namesz);
768 desc_sz = tswap32(hdr->n_descsz);
769 }
770
771 if (note_head_size) {
772 *note_head_size = note_head_sz;
773 }
774 if (name_size) {
775 *name_size = name_sz;
776 }
777 if (desc_size) {
778 *desc_size = desc_sz;
779 }
780 }
781
782 static bool note_name_equal(DumpState *s,
783 const uint8_t *note, const char *name)
784 {
785 int len = strlen(name) + 1;
786 uint64_t head_size, name_size;
787
788 get_note_sizes(s, note, &head_size, &name_size, NULL);
789 head_size = ROUND_UP(head_size, 4);
790
791 if (name_size != len ||
792 memcmp(note + head_size, "VMCOREINFO", len)) {
793 return false;
794 }
795
796 return true;
797 }
798
799 /* write common header, sub header and elf note to vmcore */
800 static void create_header32(DumpState *s, Error **errp)
801 {
802 DiskDumpHeader32 *dh = NULL;
803 KdumpSubHeader32 *kh = NULL;
804 size_t size;
805 uint32_t block_size;
806 uint32_t sub_hdr_size;
807 uint32_t bitmap_blocks;
808 uint32_t status = 0;
809 uint64_t offset_note;
810 Error *local_err = NULL;
811
812 /* write common header, the version of kdump-compressed format is 6th */
813 size = sizeof(DiskDumpHeader32);
814 dh = g_malloc0(size);
815
816 strncpy(dh->signature, KDUMP_SIGNATURE, strlen(KDUMP_SIGNATURE));
817 dh->header_version = cpu_to_dump32(s, 6);
818 block_size = s->dump_info.page_size;
819 dh->block_size = cpu_to_dump32(s, block_size);
820 sub_hdr_size = sizeof(struct KdumpSubHeader32) + s->note_size;
821 sub_hdr_size = DIV_ROUND_UP(sub_hdr_size, block_size);
822 dh->sub_hdr_size = cpu_to_dump32(s, sub_hdr_size);
823 /* dh->max_mapnr may be truncated, full 64bit is in kh.max_mapnr_64 */
824 dh->max_mapnr = cpu_to_dump32(s, MIN(s->max_mapnr, UINT_MAX));
825 dh->nr_cpus = cpu_to_dump32(s, s->nr_cpus);
826 bitmap_blocks = DIV_ROUND_UP(s->len_dump_bitmap, block_size) * 2;
827 dh->bitmap_blocks = cpu_to_dump32(s, bitmap_blocks);
828 strncpy(dh->utsname.machine, ELF_MACHINE_UNAME, sizeof(dh->utsname.machine));
829
830 if (s->flag_compress & DUMP_DH_COMPRESSED_ZLIB) {
831 status |= DUMP_DH_COMPRESSED_ZLIB;
832 }
833 #ifdef CONFIG_LZO
834 if (s->flag_compress & DUMP_DH_COMPRESSED_LZO) {
835 status |= DUMP_DH_COMPRESSED_LZO;
836 }
837 #endif
838 #ifdef CONFIG_SNAPPY
839 if (s->flag_compress & DUMP_DH_COMPRESSED_SNAPPY) {
840 status |= DUMP_DH_COMPRESSED_SNAPPY;
841 }
842 #endif
843 dh->status = cpu_to_dump32(s, status);
844
845 if (write_buffer(s->fd, 0, dh, size) < 0) {
846 error_setg(errp, "dump: failed to write disk dump header");
847 goto out;
848 }
849
850 /* write sub header */
851 size = sizeof(KdumpSubHeader32);
852 kh = g_malloc0(size);
853
854 /* 64bit max_mapnr_64 */
855 kh->max_mapnr_64 = cpu_to_dump64(s, s->max_mapnr);
856 kh->phys_base = cpu_to_dump32(s, s->dump_info.phys_base);
857 kh->dump_level = cpu_to_dump32(s, DUMP_LEVEL);
858
859 offset_note = DISKDUMP_HEADER_BLOCKS * block_size + size;
860 kh->offset_note = cpu_to_dump64(s, offset_note);
861 kh->note_size = cpu_to_dump32(s, s->note_size);
862
863 if (write_buffer(s->fd, DISKDUMP_HEADER_BLOCKS *
864 block_size, kh, size) < 0) {
865 error_setg(errp, "dump: failed to write kdump sub header");
866 goto out;
867 }
868
869 /* write note */
870 s->note_buf = g_malloc0(s->note_size);
871 s->note_buf_offset = 0;
872
873 /* use s->note_buf to store notes temporarily */
874 write_elf32_notes(buf_write_note, s, &local_err);
875 if (local_err) {
876 error_propagate(errp, local_err);
877 goto out;
878 }
879 if (write_buffer(s->fd, offset_note, s->note_buf,
880 s->note_size) < 0) {
881 error_setg(errp, "dump: failed to write notes");
882 goto out;
883 }
884
885 /* get offset of dump_bitmap */
886 s->offset_dump_bitmap = (DISKDUMP_HEADER_BLOCKS + sub_hdr_size) *
887 block_size;
888
889 /* get offset of page */
890 s->offset_page = (DISKDUMP_HEADER_BLOCKS + sub_hdr_size + bitmap_blocks) *
891 block_size;
892
893 out:
894 g_free(dh);
895 g_free(kh);
896 g_free(s->note_buf);
897 }
898
899 /* write common header, sub header and elf note to vmcore */
900 static void create_header64(DumpState *s, Error **errp)
901 {
902 DiskDumpHeader64 *dh = NULL;
903 KdumpSubHeader64 *kh = NULL;
904 size_t size;
905 uint32_t block_size;
906 uint32_t sub_hdr_size;
907 uint32_t bitmap_blocks;
908 uint32_t status = 0;
909 uint64_t offset_note;
910 Error *local_err = NULL;
911
912 /* write common header, the version of kdump-compressed format is 6th */
913 size = sizeof(DiskDumpHeader64);
914 dh = g_malloc0(size);
915
916 strncpy(dh->signature, KDUMP_SIGNATURE, strlen(KDUMP_SIGNATURE));
917 dh->header_version = cpu_to_dump32(s, 6);
918 block_size = s->dump_info.page_size;
919 dh->block_size = cpu_to_dump32(s, block_size);
920 sub_hdr_size = sizeof(struct KdumpSubHeader64) + s->note_size;
921 sub_hdr_size = DIV_ROUND_UP(sub_hdr_size, block_size);
922 dh->sub_hdr_size = cpu_to_dump32(s, sub_hdr_size);
923 /* dh->max_mapnr may be truncated, full 64bit is in kh.max_mapnr_64 */
924 dh->max_mapnr = cpu_to_dump32(s, MIN(s->max_mapnr, UINT_MAX));
925 dh->nr_cpus = cpu_to_dump32(s, s->nr_cpus);
926 bitmap_blocks = DIV_ROUND_UP(s->len_dump_bitmap, block_size) * 2;
927 dh->bitmap_blocks = cpu_to_dump32(s, bitmap_blocks);
928 strncpy(dh->utsname.machine, ELF_MACHINE_UNAME, sizeof(dh->utsname.machine));
929
930 if (s->flag_compress & DUMP_DH_COMPRESSED_ZLIB) {
931 status |= DUMP_DH_COMPRESSED_ZLIB;
932 }
933 #ifdef CONFIG_LZO
934 if (s->flag_compress & DUMP_DH_COMPRESSED_LZO) {
935 status |= DUMP_DH_COMPRESSED_LZO;
936 }
937 #endif
938 #ifdef CONFIG_SNAPPY
939 if (s->flag_compress & DUMP_DH_COMPRESSED_SNAPPY) {
940 status |= DUMP_DH_COMPRESSED_SNAPPY;
941 }
942 #endif
943 dh->status = cpu_to_dump32(s, status);
944
945 if (write_buffer(s->fd, 0, dh, size) < 0) {
946 error_setg(errp, "dump: failed to write disk dump header");
947 goto out;
948 }
949
950 /* write sub header */
951 size = sizeof(KdumpSubHeader64);
952 kh = g_malloc0(size);
953
954 /* 64bit max_mapnr_64 */
955 kh->max_mapnr_64 = cpu_to_dump64(s, s->max_mapnr);
956 kh->phys_base = cpu_to_dump64(s, s->dump_info.phys_base);
957 kh->dump_level = cpu_to_dump32(s, DUMP_LEVEL);
958
959 offset_note = DISKDUMP_HEADER_BLOCKS * block_size + size;
960 kh->offset_note = cpu_to_dump64(s, offset_note);
961 kh->note_size = cpu_to_dump64(s, s->note_size);
962
963 if (write_buffer(s->fd, DISKDUMP_HEADER_BLOCKS *
964 block_size, kh, size) < 0) {
965 error_setg(errp, "dump: failed to write kdump sub header");
966 goto out;
967 }
968
969 /* write note */
970 s->note_buf = g_malloc0(s->note_size);
971 s->note_buf_offset = 0;
972
973 /* use s->note_buf to store notes temporarily */
974 write_elf64_notes(buf_write_note, s, &local_err);
975 if (local_err) {
976 error_propagate(errp, local_err);
977 goto out;
978 }
979
980 if (write_buffer(s->fd, offset_note, s->note_buf,
981 s->note_size) < 0) {
982 error_setg(errp, "dump: failed to write notes");
983 goto out;
984 }
985
986 /* get offset of dump_bitmap */
987 s->offset_dump_bitmap = (DISKDUMP_HEADER_BLOCKS + sub_hdr_size) *
988 block_size;
989
990 /* get offset of page */
991 s->offset_page = (DISKDUMP_HEADER_BLOCKS + sub_hdr_size + bitmap_blocks) *
992 block_size;
993
994 out:
995 g_free(dh);
996 g_free(kh);
997 g_free(s->note_buf);
998 }
999
1000 static void write_dump_header(DumpState *s, Error **errp)
1001 {
1002 Error *local_err = NULL;
1003
1004 if (s->dump_info.d_class == ELFCLASS32) {
1005 create_header32(s, &local_err);
1006 } else {
1007 create_header64(s, &local_err);
1008 }
1009 error_propagate(errp, local_err);
1010 }
1011
1012 static size_t dump_bitmap_get_bufsize(DumpState *s)
1013 {
1014 return s->dump_info.page_size;
1015 }
1016
1017 /*
1018 * set dump_bitmap sequencely. the bit before last_pfn is not allowed to be
1019 * rewritten, so if need to set the first bit, set last_pfn and pfn to 0.
1020 * set_dump_bitmap will always leave the recently set bit un-sync. And setting
1021 * (last bit + sizeof(buf) * 8) to 0 will do flushing the content in buf into
1022 * vmcore, ie. synchronizing un-sync bit into vmcore.
1023 */
1024 static int set_dump_bitmap(uint64_t last_pfn, uint64_t pfn, bool value,
1025 uint8_t *buf, DumpState *s)
1026 {
1027 off_t old_offset, new_offset;
1028 off_t offset_bitmap1, offset_bitmap2;
1029 uint32_t byte, bit;
1030 size_t bitmap_bufsize = dump_bitmap_get_bufsize(s);
1031 size_t bits_per_buf = bitmap_bufsize * CHAR_BIT;
1032
1033 /* should not set the previous place */
1034 assert(last_pfn <= pfn);
1035
1036 /*
1037 * if the bit needed to be set is not cached in buf, flush the data in buf
1038 * to vmcore firstly.
1039 * making new_offset be bigger than old_offset can also sync remained data
1040 * into vmcore.
1041 */
1042 old_offset = bitmap_bufsize * (last_pfn / bits_per_buf);
1043 new_offset = bitmap_bufsize * (pfn / bits_per_buf);
1044
1045 while (old_offset < new_offset) {
1046 /* calculate the offset and write dump_bitmap */
1047 offset_bitmap1 = s->offset_dump_bitmap + old_offset;
1048 if (write_buffer(s->fd, offset_bitmap1, buf,
1049 bitmap_bufsize) < 0) {
1050 return -1;
1051 }
1052
1053 /* dump level 1 is chosen, so 1st and 2nd bitmap are same */
1054 offset_bitmap2 = s->offset_dump_bitmap + s->len_dump_bitmap +
1055 old_offset;
1056 if (write_buffer(s->fd, offset_bitmap2, buf,
1057 bitmap_bufsize) < 0) {
1058 return -1;
1059 }
1060
1061 memset(buf, 0, bitmap_bufsize);
1062 old_offset += bitmap_bufsize;
1063 }
1064
1065 /* get the exact place of the bit in the buf, and set it */
1066 byte = (pfn % bits_per_buf) / CHAR_BIT;
1067 bit = (pfn % bits_per_buf) % CHAR_BIT;
1068 if (value) {
1069 buf[byte] |= 1u << bit;
1070 } else {
1071 buf[byte] &= ~(1u << bit);
1072 }
1073
1074 return 0;
1075 }
1076
1077 static uint64_t dump_paddr_to_pfn(DumpState *s, uint64_t addr)
1078 {
1079 int target_page_shift = ctz32(s->dump_info.page_size);
1080
1081 return (addr >> target_page_shift) - ARCH_PFN_OFFSET;
1082 }
1083
1084 static uint64_t dump_pfn_to_paddr(DumpState *s, uint64_t pfn)
1085 {
1086 int target_page_shift = ctz32(s->dump_info.page_size);
1087
1088 return (pfn + ARCH_PFN_OFFSET) << target_page_shift;
1089 }
1090
1091 /*
1092 * exam every page and return the page frame number and the address of the page.
1093 * bufptr can be NULL. note: the blocks here is supposed to reflect guest-phys
1094 * blocks, so block->target_start and block->target_end should be interal
1095 * multiples of the target page size.
1096 */
1097 static bool get_next_page(GuestPhysBlock **blockptr, uint64_t *pfnptr,
1098 uint8_t **bufptr, DumpState *s)
1099 {
1100 GuestPhysBlock *block = *blockptr;
1101 hwaddr addr, target_page_mask = ~((hwaddr)s->dump_info.page_size - 1);
1102 uint8_t *buf;
1103
1104 /* block == NULL means the start of the iteration */
1105 if (!block) {
1106 block = QTAILQ_FIRST(&s->guest_phys_blocks.head);
1107 *blockptr = block;
1108 assert((block->target_start & ~target_page_mask) == 0);
1109 assert((block->target_end & ~target_page_mask) == 0);
1110 *pfnptr = dump_paddr_to_pfn(s, block->target_start);
1111 if (bufptr) {
1112 *bufptr = block->host_addr;
1113 }
1114 return true;
1115 }
1116
1117 *pfnptr = *pfnptr + 1;
1118 addr = dump_pfn_to_paddr(s, *pfnptr);
1119
1120 if ((addr >= block->target_start) &&
1121 (addr + s->dump_info.page_size <= block->target_end)) {
1122 buf = block->host_addr + (addr - block->target_start);
1123 } else {
1124 /* the next page is in the next block */
1125 block = QTAILQ_NEXT(block, next);
1126 *blockptr = block;
1127 if (!block) {
1128 return false;
1129 }
1130 assert((block->target_start & ~target_page_mask) == 0);
1131 assert((block->target_end & ~target_page_mask) == 0);
1132 *pfnptr = dump_paddr_to_pfn(s, block->target_start);
1133 buf = block->host_addr;
1134 }
1135
1136 if (bufptr) {
1137 *bufptr = buf;
1138 }
1139
1140 return true;
1141 }
1142
1143 static void write_dump_bitmap(DumpState *s, Error **errp)
1144 {
1145 int ret = 0;
1146 uint64_t last_pfn, pfn;
1147 void *dump_bitmap_buf;
1148 size_t num_dumpable;
1149 GuestPhysBlock *block_iter = NULL;
1150 size_t bitmap_bufsize = dump_bitmap_get_bufsize(s);
1151 size_t bits_per_buf = bitmap_bufsize * CHAR_BIT;
1152
1153 /* dump_bitmap_buf is used to store dump_bitmap temporarily */
1154 dump_bitmap_buf = g_malloc0(bitmap_bufsize);
1155
1156 num_dumpable = 0;
1157 last_pfn = 0;
1158
1159 /*
1160 * exam memory page by page, and set the bit in dump_bitmap corresponded
1161 * to the existing page.
1162 */
1163 while (get_next_page(&block_iter, &pfn, NULL, s)) {
1164 ret = set_dump_bitmap(last_pfn, pfn, true, dump_bitmap_buf, s);
1165 if (ret < 0) {
1166 error_setg(errp, "dump: failed to set dump_bitmap");
1167 goto out;
1168 }
1169
1170 last_pfn = pfn;
1171 num_dumpable++;
1172 }
1173
1174 /*
1175 * set_dump_bitmap will always leave the recently set bit un-sync. Here we
1176 * set the remaining bits from last_pfn to the end of the bitmap buffer to
1177 * 0. With those set, the un-sync bit will be synchronized into the vmcore.
1178 */
1179 if (num_dumpable > 0) {
1180 ret = set_dump_bitmap(last_pfn, last_pfn + bits_per_buf, false,
1181 dump_bitmap_buf, s);
1182 if (ret < 0) {
1183 error_setg(errp, "dump: failed to sync dump_bitmap");
1184 goto out;
1185 }
1186 }
1187
1188 /* number of dumpable pages that will be dumped later */
1189 s->num_dumpable = num_dumpable;
1190
1191 out:
1192 g_free(dump_bitmap_buf);
1193 }
1194
1195 static void prepare_data_cache(DataCache *data_cache, DumpState *s,
1196 off_t offset)
1197 {
1198 data_cache->fd = s->fd;
1199 data_cache->data_size = 0;
1200 data_cache->buf_size = 4 * dump_bitmap_get_bufsize(s);
1201 data_cache->buf = g_malloc0(data_cache->buf_size);
1202 data_cache->offset = offset;
1203 }
1204
1205 static int write_cache(DataCache *dc, const void *buf, size_t size,
1206 bool flag_sync)
1207 {
1208 /*
1209 * dc->buf_size should not be less than size, otherwise dc will never be
1210 * enough
1211 */
1212 assert(size <= dc->buf_size);
1213
1214 /*
1215 * if flag_sync is set, synchronize data in dc->buf into vmcore.
1216 * otherwise check if the space is enough for caching data in buf, if not,
1217 * write the data in dc->buf to dc->fd and reset dc->buf
1218 */
1219 if ((!flag_sync && dc->data_size + size > dc->buf_size) ||
1220 (flag_sync && dc->data_size > 0)) {
1221 if (write_buffer(dc->fd, dc->offset, dc->buf, dc->data_size) < 0) {
1222 return -1;
1223 }
1224
1225 dc->offset += dc->data_size;
1226 dc->data_size = 0;
1227 }
1228
1229 if (!flag_sync) {
1230 memcpy(dc->buf + dc->data_size, buf, size);
1231 dc->data_size += size;
1232 }
1233
1234 return 0;
1235 }
1236
1237 static void free_data_cache(DataCache *data_cache)
1238 {
1239 g_free(data_cache->buf);
1240 }
1241
1242 static size_t get_len_buf_out(size_t page_size, uint32_t flag_compress)
1243 {
1244 switch (flag_compress) {
1245 case DUMP_DH_COMPRESSED_ZLIB:
1246 return compressBound(page_size);
1247
1248 case DUMP_DH_COMPRESSED_LZO:
1249 /*
1250 * LZO will expand incompressible data by a little amount. Please check
1251 * the following URL to see the expansion calculation:
1252 * http://www.oberhumer.com/opensource/lzo/lzofaq.php
1253 */
1254 return page_size + page_size / 16 + 64 + 3;
1255
1256 #ifdef CONFIG_SNAPPY
1257 case DUMP_DH_COMPRESSED_SNAPPY:
1258 return snappy_max_compressed_length(page_size);
1259 #endif
1260 }
1261 return 0;
1262 }
1263
1264 /*
1265 * check if the page is all 0
1266 */
1267 static inline bool is_zero_page(const uint8_t *buf, size_t page_size)
1268 {
1269 return buffer_is_zero(buf, page_size);
1270 }
1271
1272 static void write_dump_pages(DumpState *s, Error **errp)
1273 {
1274 int ret = 0;
1275 DataCache page_desc, page_data;
1276 size_t len_buf_out, size_out;
1277 #ifdef CONFIG_LZO
1278 lzo_bytep wrkmem = NULL;
1279 #endif
1280 uint8_t *buf_out = NULL;
1281 off_t offset_desc, offset_data;
1282 PageDescriptor pd, pd_zero;
1283 uint8_t *buf;
1284 GuestPhysBlock *block_iter = NULL;
1285 uint64_t pfn_iter;
1286
1287 /* get offset of page_desc and page_data in dump file */
1288 offset_desc = s->offset_page;
1289 offset_data = offset_desc + sizeof(PageDescriptor) * s->num_dumpable;
1290
1291 prepare_data_cache(&page_desc, s, offset_desc);
1292 prepare_data_cache(&page_data, s, offset_data);
1293
1294 /* prepare buffer to store compressed data */
1295 len_buf_out = get_len_buf_out(s->dump_info.page_size, s->flag_compress);
1296 assert(len_buf_out != 0);
1297
1298 #ifdef CONFIG_LZO
1299 wrkmem = g_malloc(LZO1X_1_MEM_COMPRESS);
1300 #endif
1301
1302 buf_out = g_malloc(len_buf_out);
1303
1304 /*
1305 * init zero page's page_desc and page_data, because every zero page
1306 * uses the same page_data
1307 */
1308 pd_zero.size = cpu_to_dump32(s, s->dump_info.page_size);
1309 pd_zero.flags = cpu_to_dump32(s, 0);
1310 pd_zero.offset = cpu_to_dump64(s, offset_data);
1311 pd_zero.page_flags = cpu_to_dump64(s, 0);
1312 buf = g_malloc0(s->dump_info.page_size);
1313 ret = write_cache(&page_data, buf, s->dump_info.page_size, false);
1314 g_free(buf);
1315 if (ret < 0) {
1316 error_setg(errp, "dump: failed to write page data (zero page)");
1317 goto out;
1318 }
1319
1320 offset_data += s->dump_info.page_size;
1321
1322 /*
1323 * dump memory to vmcore page by page. zero page will all be resided in the
1324 * first page of page section
1325 */
1326 while (get_next_page(&block_iter, &pfn_iter, &buf, s)) {
1327 /* check zero page */
1328 if (is_zero_page(buf, s->dump_info.page_size)) {
1329 ret = write_cache(&page_desc, &pd_zero, sizeof(PageDescriptor),
1330 false);
1331 if (ret < 0) {
1332 error_setg(errp, "dump: failed to write page desc");
1333 goto out;
1334 }
1335 } else {
1336 /*
1337 * not zero page, then:
1338 * 1. compress the page
1339 * 2. write the compressed page into the cache of page_data
1340 * 3. get page desc of the compressed page and write it into the
1341 * cache of page_desc
1342 *
1343 * only one compression format will be used here, for
1344 * s->flag_compress is set. But when compression fails to work,
1345 * we fall back to save in plaintext.
1346 */
1347 size_out = len_buf_out;
1348 if ((s->flag_compress & DUMP_DH_COMPRESSED_ZLIB) &&
1349 (compress2(buf_out, (uLongf *)&size_out, buf,
1350 s->dump_info.page_size, Z_BEST_SPEED) == Z_OK) &&
1351 (size_out < s->dump_info.page_size)) {
1352 pd.flags = cpu_to_dump32(s, DUMP_DH_COMPRESSED_ZLIB);
1353 pd.size = cpu_to_dump32(s, size_out);
1354
1355 ret = write_cache(&page_data, buf_out, size_out, false);
1356 if (ret < 0) {
1357 error_setg(errp, "dump: failed to write page data");
1358 goto out;
1359 }
1360 #ifdef CONFIG_LZO
1361 } else if ((s->flag_compress & DUMP_DH_COMPRESSED_LZO) &&
1362 (lzo1x_1_compress(buf, s->dump_info.page_size, buf_out,
1363 (lzo_uint *)&size_out, wrkmem) == LZO_E_OK) &&
1364 (size_out < s->dump_info.page_size)) {
1365 pd.flags = cpu_to_dump32(s, DUMP_DH_COMPRESSED_LZO);
1366 pd.size = cpu_to_dump32(s, size_out);
1367
1368 ret = write_cache(&page_data, buf_out, size_out, false);
1369 if (ret < 0) {
1370 error_setg(errp, "dump: failed to write page data");
1371 goto out;
1372 }
1373 #endif
1374 #ifdef CONFIG_SNAPPY
1375 } else if ((s->flag_compress & DUMP_DH_COMPRESSED_SNAPPY) &&
1376 (snappy_compress((char *)buf, s->dump_info.page_size,
1377 (char *)buf_out, &size_out) == SNAPPY_OK) &&
1378 (size_out < s->dump_info.page_size)) {
1379 pd.flags = cpu_to_dump32(s, DUMP_DH_COMPRESSED_SNAPPY);
1380 pd.size = cpu_to_dump32(s, size_out);
1381
1382 ret = write_cache(&page_data, buf_out, size_out, false);
1383 if (ret < 0) {
1384 error_setg(errp, "dump: failed to write page data");
1385 goto out;
1386 }
1387 #endif
1388 } else {
1389 /*
1390 * fall back to save in plaintext, size_out should be
1391 * assigned the target's page size
1392 */
1393 pd.flags = cpu_to_dump32(s, 0);
1394 size_out = s->dump_info.page_size;
1395 pd.size = cpu_to_dump32(s, size_out);
1396
1397 ret = write_cache(&page_data, buf,
1398 s->dump_info.page_size, false);
1399 if (ret < 0) {
1400 error_setg(errp, "dump: failed to write page data");
1401 goto out;
1402 }
1403 }
1404
1405 /* get and write page desc here */
1406 pd.page_flags = cpu_to_dump64(s, 0);
1407 pd.offset = cpu_to_dump64(s, offset_data);
1408 offset_data += size_out;
1409
1410 ret = write_cache(&page_desc, &pd, sizeof(PageDescriptor), false);
1411 if (ret < 0) {
1412 error_setg(errp, "dump: failed to write page desc");
1413 goto out;
1414 }
1415 }
1416 s->written_size += s->dump_info.page_size;
1417 }
1418
1419 ret = write_cache(&page_desc, NULL, 0, true);
1420 if (ret < 0) {
1421 error_setg(errp, "dump: failed to sync cache for page_desc");
1422 goto out;
1423 }
1424 ret = write_cache(&page_data, NULL, 0, true);
1425 if (ret < 0) {
1426 error_setg(errp, "dump: failed to sync cache for page_data");
1427 goto out;
1428 }
1429
1430 out:
1431 free_data_cache(&page_desc);
1432 free_data_cache(&page_data);
1433
1434 #ifdef CONFIG_LZO
1435 g_free(wrkmem);
1436 #endif
1437
1438 g_free(buf_out);
1439 }
1440
1441 static void create_kdump_vmcore(DumpState *s, Error **errp)
1442 {
1443 int ret;
1444 Error *local_err = NULL;
1445
1446 /*
1447 * the kdump-compressed format is:
1448 * File offset
1449 * +------------------------------------------+ 0x0
1450 * | main header (struct disk_dump_header) |
1451 * |------------------------------------------+ block 1
1452 * | sub header (struct kdump_sub_header) |
1453 * |------------------------------------------+ block 2
1454 * | 1st-dump_bitmap |
1455 * |------------------------------------------+ block 2 + X blocks
1456 * | 2nd-dump_bitmap | (aligned by block)
1457 * |------------------------------------------+ block 2 + 2 * X blocks
1458 * | page desc for pfn 0 (struct page_desc) | (aligned by block)
1459 * | page desc for pfn 1 (struct page_desc) |
1460 * | : |
1461 * |------------------------------------------| (not aligned by block)
1462 * | page data (pfn 0) |
1463 * | page data (pfn 1) |
1464 * | : |
1465 * +------------------------------------------+
1466 */
1467
1468 ret = write_start_flat_header(s->fd);
1469 if (ret < 0) {
1470 error_setg(errp, "dump: failed to write start flat header");
1471 return;
1472 }
1473
1474 write_dump_header(s, &local_err);
1475 if (local_err) {
1476 error_propagate(errp, local_err);
1477 return;
1478 }
1479
1480 write_dump_bitmap(s, &local_err);
1481 if (local_err) {
1482 error_propagate(errp, local_err);
1483 return;
1484 }
1485
1486 write_dump_pages(s, &local_err);
1487 if (local_err) {
1488 error_propagate(errp, local_err);
1489 return;
1490 }
1491
1492 ret = write_end_flat_header(s->fd);
1493 if (ret < 0) {
1494 error_setg(errp, "dump: failed to write end flat header");
1495 return;
1496 }
1497 }
1498
1499 static ram_addr_t get_start_block(DumpState *s)
1500 {
1501 GuestPhysBlock *block;
1502
1503 if (!s->has_filter) {
1504 s->next_block = QTAILQ_FIRST(&s->guest_phys_blocks.head);
1505 return 0;
1506 }
1507
1508 QTAILQ_FOREACH(block, &s->guest_phys_blocks.head, next) {
1509 if (block->target_start >= s->begin + s->length ||
1510 block->target_end <= s->begin) {
1511 /* This block is out of the range */
1512 continue;
1513 }
1514
1515 s->next_block = block;
1516 if (s->begin > block->target_start) {
1517 s->start = s->begin - block->target_start;
1518 } else {
1519 s->start = 0;
1520 }
1521 return s->start;
1522 }
1523
1524 return -1;
1525 }
1526
1527 static void get_max_mapnr(DumpState *s)
1528 {
1529 GuestPhysBlock *last_block;
1530
1531 last_block = QTAILQ_LAST(&s->guest_phys_blocks.head, GuestPhysBlockHead);
1532 s->max_mapnr = dump_paddr_to_pfn(s, last_block->target_end);
1533 }
1534
1535 static DumpState dump_state_global = { .status = DUMP_STATUS_NONE };
1536
1537 static void dump_state_prepare(DumpState *s)
1538 {
1539 /* zero the struct, setting status to active */
1540 *s = (DumpState) { .status = DUMP_STATUS_ACTIVE };
1541 }
1542
1543 bool dump_in_progress(void)
1544 {
1545 DumpState *state = &dump_state_global;
1546 return (atomic_read(&state->status) == DUMP_STATUS_ACTIVE);
1547 }
1548
1549 /* calculate total size of memory to be dumped (taking filter into
1550 * acoount.) */
1551 static int64_t dump_calculate_size(DumpState *s)
1552 {
1553 GuestPhysBlock *block;
1554 int64_t size = 0, total = 0, left = 0, right = 0;
1555
1556 QTAILQ_FOREACH(block, &s->guest_phys_blocks.head, next) {
1557 if (s->has_filter) {
1558 /* calculate the overlapped region. */
1559 left = MAX(s->begin, block->target_start);
1560 right = MIN(s->begin + s->length, block->target_end);
1561 size = right - left;
1562 size = size > 0 ? size : 0;
1563 } else {
1564 /* count the whole region in */
1565 size = (block->target_end - block->target_start);
1566 }
1567 total += size;
1568 }
1569
1570 return total;
1571 }
1572
1573 static void vmcoreinfo_update_phys_base(DumpState *s)
1574 {
1575 uint64_t size, note_head_size, name_size, phys_base;
1576 char **lines;
1577 uint8_t *vmci;
1578 size_t i;
1579
1580 if (!note_name_equal(s, s->guest_note, "VMCOREINFO")) {
1581 return;
1582 }
1583
1584 get_note_sizes(s, s->guest_note, &note_head_size, &name_size, &size);
1585 note_head_size = ROUND_UP(note_head_size, 4);
1586
1587 vmci = s->guest_note + note_head_size + ROUND_UP(name_size, 4);
1588 *(vmci + size) = '\0';
1589
1590 lines = g_strsplit((char *)vmci, "\n", -1);
1591 for (i = 0; lines[i]; i++) {
1592 if (g_str_has_prefix(lines[i], "NUMBER(phys_base)=")) {
1593 if (qemu_strtou64(lines[i] + 18, NULL, 16,
1594 &phys_base) < 0) {
1595 warn_report("Failed to read NUMBER(phys_base)=");
1596 } else {
1597 s->dump_info.phys_base = phys_base;
1598 }
1599 break;
1600 }
1601 }
1602
1603 g_strfreev(lines);
1604 }
1605
1606 static void dump_init(DumpState *s, int fd, bool has_format,
1607 DumpGuestMemoryFormat format, bool paging, bool has_filter,
1608 int64_t begin, int64_t length, Error **errp)
1609 {
1610 VMCoreInfoState *vmci = vmcoreinfo_find();
1611 CPUState *cpu;
1612 int nr_cpus;
1613 Error *err = NULL;
1614 int ret;
1615
1616 s->has_format = has_format;
1617 s->format = format;
1618 s->written_size = 0;
1619
1620 /* kdump-compressed is conflict with paging and filter */
1621 if (has_format && format != DUMP_GUEST_MEMORY_FORMAT_ELF) {
1622 assert(!paging && !has_filter);
1623 }
1624
1625 if (runstate_is_running()) {
1626 vm_stop(RUN_STATE_SAVE_VM);
1627 s->resume = true;
1628 } else {
1629 s->resume = false;
1630 }
1631
1632 /* If we use KVM, we should synchronize the registers before we get dump
1633 * info or physmap info.
1634 */
1635 cpu_synchronize_all_states();
1636 nr_cpus = 0;
1637 CPU_FOREACH(cpu) {
1638 nr_cpus++;
1639 }
1640
1641 s->fd = fd;
1642 s->has_filter = has_filter;
1643 s->begin = begin;
1644 s->length = length;
1645
1646 memory_mapping_list_init(&s->list);
1647
1648 guest_phys_blocks_init(&s->guest_phys_blocks);
1649 guest_phys_blocks_append(&s->guest_phys_blocks);
1650 s->total_size = dump_calculate_size(s);
1651 #ifdef DEBUG_DUMP_GUEST_MEMORY
1652 fprintf(stderr, "DUMP: total memory to dump: %lu\n", s->total_size);
1653 #endif
1654
1655 /* it does not make sense to dump non-existent memory */
1656 if (!s->total_size) {
1657 error_setg(errp, "dump: no guest memory to dump");
1658 goto cleanup;
1659 }
1660
1661 s->start = get_start_block(s);
1662 if (s->start == -1) {
1663 error_setg(errp, QERR_INVALID_PARAMETER, "begin");
1664 goto cleanup;
1665 }
1666
1667 /* get dump info: endian, class and architecture.
1668 * If the target architecture is not supported, cpu_get_dump_info() will
1669 * return -1.
1670 */
1671 ret = cpu_get_dump_info(&s->dump_info, &s->guest_phys_blocks);
1672 if (ret < 0) {
1673 error_setg(errp, QERR_UNSUPPORTED);
1674 goto cleanup;
1675 }
1676
1677 if (!s->dump_info.page_size) {
1678 s->dump_info.page_size = TARGET_PAGE_SIZE;
1679 }
1680
1681 s->note_size = cpu_get_note_size(s->dump_info.d_class,
1682 s->dump_info.d_machine, nr_cpus);
1683 if (s->note_size < 0) {
1684 error_setg(errp, QERR_UNSUPPORTED);
1685 goto cleanup;
1686 }
1687
1688 /*
1689 * The goal of this block is to (a) update the previously guessed
1690 * phys_base, (b) copy the guest note out of the guest.
1691 * Failure to do so is not fatal for dumping.
1692 */
1693 if (vmci) {
1694 uint64_t addr, note_head_size, name_size, desc_size;
1695 uint32_t size;
1696 uint16_t format;
1697
1698 note_head_size = s->dump_info.d_class == ELFCLASS32 ?
1699 sizeof(Elf32_Nhdr) : sizeof(Elf64_Nhdr);
1700
1701 format = le16_to_cpu(vmci->vmcoreinfo.guest_format);
1702 size = le32_to_cpu(vmci->vmcoreinfo.size);
1703 addr = le64_to_cpu(vmci->vmcoreinfo.paddr);
1704 if (!vmci->has_vmcoreinfo) {
1705 warn_report("guest note is not present");
1706 } else if (size < note_head_size || size > MAX_GUEST_NOTE_SIZE) {
1707 warn_report("guest note size is invalid: %" PRIu32, size);
1708 } else if (format != VMCOREINFO_FORMAT_ELF) {
1709 warn_report("guest note format is unsupported: %" PRIu16, format);
1710 } else {
1711 s->guest_note = g_malloc(size + 1); /* +1 for adding \0 */
1712 cpu_physical_memory_read(addr, s->guest_note, size);
1713
1714 get_note_sizes(s, s->guest_note, NULL, &name_size, &desc_size);
1715 s->guest_note_size = ELF_NOTE_SIZE(note_head_size, name_size,
1716 desc_size);
1717 if (name_size > MAX_GUEST_NOTE_SIZE ||
1718 desc_size > MAX_GUEST_NOTE_SIZE ||
1719 s->guest_note_size > size) {
1720 warn_report("Invalid guest note header");
1721 g_free(s->guest_note);
1722 s->guest_note = NULL;
1723 } else {
1724 vmcoreinfo_update_phys_base(s);
1725 s->note_size += s->guest_note_size;
1726 }
1727 }
1728 }
1729
1730 /* get memory mapping */
1731 if (paging) {
1732 qemu_get_guest_memory_mapping(&s->list, &s->guest_phys_blocks, &err);
1733 if (err != NULL) {
1734 error_propagate(errp, err);
1735 goto cleanup;
1736 }
1737 } else {
1738 qemu_get_guest_simple_memory_mapping(&s->list, &s->guest_phys_blocks);
1739 }
1740
1741 s->nr_cpus = nr_cpus;
1742
1743 get_max_mapnr(s);
1744
1745 uint64_t tmp;
1746 tmp = DIV_ROUND_UP(DIV_ROUND_UP(s->max_mapnr, CHAR_BIT),
1747 s->dump_info.page_size);
1748 s->len_dump_bitmap = tmp * s->dump_info.page_size;
1749
1750 /* init for kdump-compressed format */
1751 if (has_format && format != DUMP_GUEST_MEMORY_FORMAT_ELF) {
1752 switch (format) {
1753 case DUMP_GUEST_MEMORY_FORMAT_KDUMP_ZLIB:
1754 s->flag_compress = DUMP_DH_COMPRESSED_ZLIB;
1755 break;
1756
1757 case DUMP_GUEST_MEMORY_FORMAT_KDUMP_LZO:
1758 #ifdef CONFIG_LZO
1759 if (lzo_init() != LZO_E_OK) {
1760 error_setg(errp, "failed to initialize the LZO library");
1761 goto cleanup;
1762 }
1763 #endif
1764 s->flag_compress = DUMP_DH_COMPRESSED_LZO;
1765 break;
1766
1767 case DUMP_GUEST_MEMORY_FORMAT_KDUMP_SNAPPY:
1768 s->flag_compress = DUMP_DH_COMPRESSED_SNAPPY;
1769 break;
1770
1771 default:
1772 s->flag_compress = 0;
1773 }
1774
1775 return;
1776 }
1777
1778 if (s->has_filter) {
1779 memory_mapping_filter(&s->list, s->begin, s->length);
1780 }
1781
1782 /*
1783 * calculate phdr_num
1784 *
1785 * the type of ehdr->e_phnum is uint16_t, so we should avoid overflow
1786 */
1787 s->phdr_num = 1; /* PT_NOTE */
1788 if (s->list.num < UINT16_MAX - 2) {
1789 s->phdr_num += s->list.num;
1790 s->have_section = false;
1791 } else {
1792 s->have_section = true;
1793 s->phdr_num = PN_XNUM;
1794 s->sh_info = 1; /* PT_NOTE */
1795
1796 /* the type of shdr->sh_info is uint32_t, so we should avoid overflow */
1797 if (s->list.num <= UINT32_MAX - 1) {
1798 s->sh_info += s->list.num;
1799 } else {
1800 s->sh_info = UINT32_MAX;
1801 }
1802 }
1803
1804 if (s->dump_info.d_class == ELFCLASS64) {
1805 if (s->have_section) {
1806 s->memory_offset = sizeof(Elf64_Ehdr) +
1807 sizeof(Elf64_Phdr) * s->sh_info +
1808 sizeof(Elf64_Shdr) + s->note_size;
1809 } else {
1810 s->memory_offset = sizeof(Elf64_Ehdr) +
1811 sizeof(Elf64_Phdr) * s->phdr_num + s->note_size;
1812 }
1813 } else {
1814 if (s->have_section) {
1815 s->memory_offset = sizeof(Elf32_Ehdr) +
1816 sizeof(Elf32_Phdr) * s->sh_info +
1817 sizeof(Elf32_Shdr) + s->note_size;
1818 } else {
1819 s->memory_offset = sizeof(Elf32_Ehdr) +
1820 sizeof(Elf32_Phdr) * s->phdr_num + s->note_size;
1821 }
1822 }
1823
1824 return;
1825
1826 cleanup:
1827 dump_cleanup(s);
1828 }
1829
1830 /* this operation might be time consuming. */
1831 static void dump_process(DumpState *s, Error **errp)
1832 {
1833 Error *local_err = NULL;
1834 DumpQueryResult *result = NULL;
1835
1836 if (s->has_format && s->format != DUMP_GUEST_MEMORY_FORMAT_ELF) {
1837 create_kdump_vmcore(s, &local_err);
1838 } else {
1839 create_vmcore(s, &local_err);
1840 }
1841
1842 /* make sure status is written after written_size updates */
1843 smp_wmb();
1844 atomic_set(&s->status,
1845 (local_err ? DUMP_STATUS_FAILED : DUMP_STATUS_COMPLETED));
1846
1847 /* send DUMP_COMPLETED message (unconditionally) */
1848 result = qmp_query_dump(NULL);
1849 /* should never fail */
1850 assert(result);
1851 qapi_event_send_dump_completed(result, !!local_err, (local_err ? \
1852 error_get_pretty(local_err) : NULL),
1853 &error_abort);
1854 qapi_free_DumpQueryResult(result);
1855
1856 error_propagate(errp, local_err);
1857 dump_cleanup(s);
1858 }
1859
1860 static void *dump_thread(void *data)
1861 {
1862 DumpState *s = (DumpState *)data;
1863 dump_process(s, NULL);
1864 return NULL;
1865 }
1866
1867 DumpQueryResult *qmp_query_dump(Error **errp)
1868 {
1869 DumpQueryResult *result = g_new(DumpQueryResult, 1);
1870 DumpState *state = &dump_state_global;
1871 result->status = atomic_read(&state->status);
1872 /* make sure we are reading status and written_size in order */
1873 smp_rmb();
1874 result->completed = state->written_size;
1875 result->total = state->total_size;
1876 return result;
1877 }
1878
1879 void qmp_dump_guest_memory(bool paging, const char *file,
1880 bool has_detach, bool detach,
1881 bool has_begin, int64_t begin, bool has_length,
1882 int64_t length, bool has_format,
1883 DumpGuestMemoryFormat format, Error **errp)
1884 {
1885 const char *p;
1886 int fd = -1;
1887 DumpState *s;
1888 Error *local_err = NULL;
1889 bool detach_p = false;
1890
1891 if (runstate_check(RUN_STATE_INMIGRATE)) {
1892 error_setg(errp, "Dump not allowed during incoming migration.");
1893 return;
1894 }
1895
1896 /* if there is a dump in background, we should wait until the dump
1897 * finished */
1898 if (dump_in_progress()) {
1899 error_setg(errp, "There is a dump in process, please wait.");
1900 return;
1901 }
1902
1903 /*
1904 * kdump-compressed format need the whole memory dumped, so paging or
1905 * filter is not supported here.
1906 */
1907 if ((has_format && format != DUMP_GUEST_MEMORY_FORMAT_ELF) &&
1908 (paging || has_begin || has_length)) {
1909 error_setg(errp, "kdump-compressed format doesn't support paging or "
1910 "filter");
1911 return;
1912 }
1913 if (has_begin && !has_length) {
1914 error_setg(errp, QERR_MISSING_PARAMETER, "length");
1915 return;
1916 }
1917 if (!has_begin && has_length) {
1918 error_setg(errp, QERR_MISSING_PARAMETER, "begin");
1919 return;
1920 }
1921 if (has_detach) {
1922 detach_p = detach;
1923 }
1924
1925 /* check whether lzo/snappy is supported */
1926 #ifndef CONFIG_LZO
1927 if (has_format && format == DUMP_GUEST_MEMORY_FORMAT_KDUMP_LZO) {
1928 error_setg(errp, "kdump-lzo is not available now");
1929 return;
1930 }
1931 #endif
1932
1933 #ifndef CONFIG_SNAPPY
1934 if (has_format && format == DUMP_GUEST_MEMORY_FORMAT_KDUMP_SNAPPY) {
1935 error_setg(errp, "kdump-snappy is not available now");
1936 return;
1937 }
1938 #endif
1939
1940 #if !defined(WIN32)
1941 if (strstart(file, "fd:", &p)) {
1942 fd = monitor_get_fd(cur_mon, p, errp);
1943 if (fd == -1) {
1944 return;
1945 }
1946 }
1947 #endif
1948
1949 if (strstart(file, "file:", &p)) {
1950 fd = qemu_open(p, O_WRONLY | O_CREAT | O_TRUNC | O_BINARY, S_IRUSR);
1951 if (fd < 0) {
1952 error_setg_file_open(errp, errno, p);
1953 return;
1954 }
1955 }
1956
1957 if (fd == -1) {
1958 error_setg(errp, QERR_INVALID_PARAMETER, "protocol");
1959 return;
1960 }
1961
1962 s = &dump_state_global;
1963 dump_state_prepare(s);
1964
1965 dump_init(s, fd, has_format, format, paging, has_begin,
1966 begin, length, &local_err);
1967 if (local_err) {
1968 error_propagate(errp, local_err);
1969 atomic_set(&s->status, DUMP_STATUS_FAILED);
1970 return;
1971 }
1972
1973 if (detach_p) {
1974 /* detached dump */
1975 s->detached = true;
1976 qemu_thread_create(&s->dump_thread, "dump_thread", dump_thread,
1977 s, QEMU_THREAD_DETACHED);
1978 } else {
1979 /* sync dump */
1980 dump_process(s, errp);
1981 }
1982 }
1983
1984 DumpGuestMemoryCapability *qmp_query_dump_guest_memory_capability(Error **errp)
1985 {
1986 DumpGuestMemoryFormatList *item;
1987 DumpGuestMemoryCapability *cap =
1988 g_malloc0(sizeof(DumpGuestMemoryCapability));
1989
1990 /* elf is always available */
1991 item = g_malloc0(sizeof(DumpGuestMemoryFormatList));
1992 cap->formats = item;
1993 item->value = DUMP_GUEST_MEMORY_FORMAT_ELF;
1994
1995 /* kdump-zlib is always available */
1996 item->next = g_malloc0(sizeof(DumpGuestMemoryFormatList));
1997 item = item->next;
1998 item->value = DUMP_GUEST_MEMORY_FORMAT_KDUMP_ZLIB;
1999
2000 /* add new item if kdump-lzo is available */
2001 #ifdef CONFIG_LZO
2002 item->next = g_malloc0(sizeof(DumpGuestMemoryFormatList));
2003 item = item->next;
2004 item->value = DUMP_GUEST_MEMORY_FORMAT_KDUMP_LZO;
2005 #endif
2006
2007 /* add new item if kdump-snappy is available */
2008 #ifdef CONFIG_SNAPPY
2009 item->next = g_malloc0(sizeof(DumpGuestMemoryFormatList));
2010 item = item->next;
2011 item->value = DUMP_GUEST_MEMORY_FORMAT_KDUMP_SNAPPY;
2012 #endif
2013
2014 return cap;
2015 }