qemu-config: introduce qemu_find_opts_err()
[qemu/opensuse.git] / dump.c
blob0ca14f87edb4a0a3d747712586a73a9b2a68cf74
1 /*
2 * QEMU dump
4 * Copyright Fujitsu, Corp. 2011, 2012
6 * Authors:
7 * Wen Congyang <wency@cn.fujitsu.com>
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
14 #include "qemu-common.h"
15 #include <unistd.h>
16 #include "elf.h"
17 #include <sys/procfs.h>
18 #include <glib.h>
19 #include "cpu.h"
20 #include "cpu-all.h"
21 #include "targphys.h"
22 #include "monitor.h"
23 #include "kvm.h"
24 #include "dump.h"
25 #include "sysemu.h"
26 #include "bswap.h"
27 #include "memory_mapping.h"
28 #include "error.h"
29 #include "qmp-commands.h"
30 #include "gdbstub.h"
32 #if defined(CONFIG_HAVE_CORE_DUMP)
33 static uint16_t cpu_convert_to_target16(uint16_t val, int endian)
35 if (endian == ELFDATA2LSB) {
36 val = cpu_to_le16(val);
37 } else {
38 val = cpu_to_be16(val);
41 return val;
44 static uint32_t cpu_convert_to_target32(uint32_t val, int endian)
46 if (endian == ELFDATA2LSB) {
47 val = cpu_to_le32(val);
48 } else {
49 val = cpu_to_be32(val);
52 return val;
55 static uint64_t cpu_convert_to_target64(uint64_t val, int endian)
57 if (endian == ELFDATA2LSB) {
58 val = cpu_to_le64(val);
59 } else {
60 val = cpu_to_be64(val);
63 return val;
66 typedef struct DumpState {
67 ArchDumpInfo dump_info;
68 MemoryMappingList list;
69 uint16_t phdr_num;
70 uint32_t sh_info;
71 bool have_section;
72 bool resume;
73 size_t note_size;
74 target_phys_addr_t memory_offset;
75 int fd;
77 RAMBlock *block;
78 ram_addr_t start;
79 bool has_filter;
80 int64_t begin;
81 int64_t length;
82 Error **errp;
83 } DumpState;
85 static int dump_cleanup(DumpState *s)
87 int ret = 0;
89 memory_mapping_list_free(&s->list);
90 if (s->fd != -1) {
91 close(s->fd);
93 if (s->resume) {
94 vm_start();
97 return ret;
100 static void dump_error(DumpState *s, const char *reason)
102 dump_cleanup(s);
105 static int fd_write_vmcore(void *buf, size_t size, void *opaque)
107 DumpState *s = opaque;
108 int fd = s->fd;
109 size_t writen_size;
111 /* The fd may be passed from user, and it can be non-blocked */
112 while (size) {
113 writen_size = qemu_write_full(fd, buf, size);
114 if (writen_size != size && errno != EAGAIN) {
115 return -1;
118 buf += writen_size;
119 size -= writen_size;
122 return 0;
125 static int write_elf64_header(DumpState *s)
127 Elf64_Ehdr elf_header;
128 int ret;
129 int endian = s->dump_info.d_endian;
131 memset(&elf_header, 0, sizeof(Elf64_Ehdr));
132 memcpy(&elf_header, ELFMAG, SELFMAG);
133 elf_header.e_ident[EI_CLASS] = ELFCLASS64;
134 elf_header.e_ident[EI_DATA] = s->dump_info.d_endian;
135 elf_header.e_ident[EI_VERSION] = EV_CURRENT;
136 elf_header.e_type = cpu_convert_to_target16(ET_CORE, endian);
137 elf_header.e_machine = cpu_convert_to_target16(s->dump_info.d_machine,
138 endian);
139 elf_header.e_version = cpu_convert_to_target32(EV_CURRENT, endian);
140 elf_header.e_ehsize = cpu_convert_to_target16(sizeof(elf_header), endian);
141 elf_header.e_phoff = cpu_convert_to_target64(sizeof(Elf64_Ehdr), endian);
142 elf_header.e_phentsize = cpu_convert_to_target16(sizeof(Elf64_Phdr),
143 endian);
144 elf_header.e_phnum = cpu_convert_to_target16(s->phdr_num, endian);
145 if (s->have_section) {
146 uint64_t shoff = sizeof(Elf64_Ehdr) + sizeof(Elf64_Phdr) * s->sh_info;
148 elf_header.e_shoff = cpu_convert_to_target64(shoff, endian);
149 elf_header.e_shentsize = cpu_convert_to_target16(sizeof(Elf64_Shdr),
150 endian);
151 elf_header.e_shnum = cpu_convert_to_target16(1, endian);
154 ret = fd_write_vmcore(&elf_header, sizeof(elf_header), s);
155 if (ret < 0) {
156 dump_error(s, "dump: failed to write elf header.\n");
157 return -1;
160 return 0;
163 static int write_elf32_header(DumpState *s)
165 Elf32_Ehdr elf_header;
166 int ret;
167 int endian = s->dump_info.d_endian;
169 memset(&elf_header, 0, sizeof(Elf32_Ehdr));
170 memcpy(&elf_header, ELFMAG, SELFMAG);
171 elf_header.e_ident[EI_CLASS] = ELFCLASS32;
172 elf_header.e_ident[EI_DATA] = endian;
173 elf_header.e_ident[EI_VERSION] = EV_CURRENT;
174 elf_header.e_type = cpu_convert_to_target16(ET_CORE, endian);
175 elf_header.e_machine = cpu_convert_to_target16(s->dump_info.d_machine,
176 endian);
177 elf_header.e_version = cpu_convert_to_target32(EV_CURRENT, endian);
178 elf_header.e_ehsize = cpu_convert_to_target16(sizeof(elf_header), endian);
179 elf_header.e_phoff = cpu_convert_to_target32(sizeof(Elf32_Ehdr), endian);
180 elf_header.e_phentsize = cpu_convert_to_target16(sizeof(Elf32_Phdr),
181 endian);
182 elf_header.e_phnum = cpu_convert_to_target16(s->phdr_num, endian);
183 if (s->have_section) {
184 uint32_t shoff = sizeof(Elf32_Ehdr) + sizeof(Elf32_Phdr) * s->sh_info;
186 elf_header.e_shoff = cpu_convert_to_target32(shoff, endian);
187 elf_header.e_shentsize = cpu_convert_to_target16(sizeof(Elf32_Shdr),
188 endian);
189 elf_header.e_shnum = cpu_convert_to_target16(1, endian);
192 ret = fd_write_vmcore(&elf_header, sizeof(elf_header), s);
193 if (ret < 0) {
194 dump_error(s, "dump: failed to write elf header.\n");
195 return -1;
198 return 0;
201 static int write_elf64_load(DumpState *s, MemoryMapping *memory_mapping,
202 int phdr_index, target_phys_addr_t offset)
204 Elf64_Phdr phdr;
205 int ret;
206 int endian = s->dump_info.d_endian;
208 memset(&phdr, 0, sizeof(Elf64_Phdr));
209 phdr.p_type = cpu_convert_to_target32(PT_LOAD, endian);
210 phdr.p_offset = cpu_convert_to_target64(offset, endian);
211 phdr.p_paddr = cpu_convert_to_target64(memory_mapping->phys_addr, endian);
212 if (offset == -1) {
213 /* When the memory is not stored into vmcore, offset will be -1 */
214 phdr.p_filesz = 0;
215 } else {
216 phdr.p_filesz = cpu_convert_to_target64(memory_mapping->length, endian);
218 phdr.p_memsz = cpu_convert_to_target64(memory_mapping->length, endian);
219 phdr.p_vaddr = cpu_convert_to_target64(memory_mapping->virt_addr, endian);
221 ret = fd_write_vmcore(&phdr, sizeof(Elf64_Phdr), s);
222 if (ret < 0) {
223 dump_error(s, "dump: failed to write program header table.\n");
224 return -1;
227 return 0;
230 static int write_elf32_load(DumpState *s, MemoryMapping *memory_mapping,
231 int phdr_index, target_phys_addr_t offset)
233 Elf32_Phdr phdr;
234 int ret;
235 int endian = s->dump_info.d_endian;
237 memset(&phdr, 0, sizeof(Elf32_Phdr));
238 phdr.p_type = cpu_convert_to_target32(PT_LOAD, endian);
239 phdr.p_offset = cpu_convert_to_target32(offset, endian);
240 phdr.p_paddr = cpu_convert_to_target32(memory_mapping->phys_addr, endian);
241 if (offset == -1) {
242 /* When the memory is not stored into vmcore, offset will be -1 */
243 phdr.p_filesz = 0;
244 } else {
245 phdr.p_filesz = cpu_convert_to_target32(memory_mapping->length, endian);
247 phdr.p_memsz = cpu_convert_to_target32(memory_mapping->length, endian);
248 phdr.p_vaddr = cpu_convert_to_target32(memory_mapping->virt_addr, endian);
250 ret = fd_write_vmcore(&phdr, sizeof(Elf32_Phdr), s);
251 if (ret < 0) {
252 dump_error(s, "dump: failed to write program header table.\n");
253 return -1;
256 return 0;
259 static int write_elf64_note(DumpState *s)
261 Elf64_Phdr phdr;
262 int endian = s->dump_info.d_endian;
263 target_phys_addr_t begin = s->memory_offset - s->note_size;
264 int ret;
266 memset(&phdr, 0, sizeof(Elf64_Phdr));
267 phdr.p_type = cpu_convert_to_target32(PT_NOTE, endian);
268 phdr.p_offset = cpu_convert_to_target64(begin, endian);
269 phdr.p_paddr = 0;
270 phdr.p_filesz = cpu_convert_to_target64(s->note_size, endian);
271 phdr.p_memsz = cpu_convert_to_target64(s->note_size, endian);
272 phdr.p_vaddr = 0;
274 ret = fd_write_vmcore(&phdr, sizeof(Elf64_Phdr), s);
275 if (ret < 0) {
276 dump_error(s, "dump: failed to write program header table.\n");
277 return -1;
280 return 0;
283 static int write_elf64_notes(DumpState *s)
285 CPUArchState *env;
286 int ret;
287 int id;
289 for (env = first_cpu; env != NULL; env = env->next_cpu) {
290 id = cpu_index(env);
291 ret = cpu_write_elf64_note(fd_write_vmcore, env, id, s);
292 if (ret < 0) {
293 dump_error(s, "dump: failed to write elf notes.\n");
294 return -1;
298 for (env = first_cpu; env != NULL; env = env->next_cpu) {
299 ret = cpu_write_elf64_qemunote(fd_write_vmcore, env, s);
300 if (ret < 0) {
301 dump_error(s, "dump: failed to write CPU status.\n");
302 return -1;
306 return 0;
309 static int write_elf32_note(DumpState *s)
311 target_phys_addr_t begin = s->memory_offset - s->note_size;
312 Elf32_Phdr phdr;
313 int endian = s->dump_info.d_endian;
314 int ret;
316 memset(&phdr, 0, sizeof(Elf32_Phdr));
317 phdr.p_type = cpu_convert_to_target32(PT_NOTE, endian);
318 phdr.p_offset = cpu_convert_to_target32(begin, endian);
319 phdr.p_paddr = 0;
320 phdr.p_filesz = cpu_convert_to_target32(s->note_size, endian);
321 phdr.p_memsz = cpu_convert_to_target32(s->note_size, endian);
322 phdr.p_vaddr = 0;
324 ret = fd_write_vmcore(&phdr, sizeof(Elf32_Phdr), s);
325 if (ret < 0) {
326 dump_error(s, "dump: failed to write program header table.\n");
327 return -1;
330 return 0;
333 static int write_elf32_notes(DumpState *s)
335 CPUArchState *env;
336 int ret;
337 int id;
339 for (env = first_cpu; env != NULL; env = env->next_cpu) {
340 id = cpu_index(env);
341 ret = cpu_write_elf32_note(fd_write_vmcore, env, id, s);
342 if (ret < 0) {
343 dump_error(s, "dump: failed to write elf notes.\n");
344 return -1;
348 for (env = first_cpu; env != NULL; env = env->next_cpu) {
349 ret = cpu_write_elf32_qemunote(fd_write_vmcore, env, s);
350 if (ret < 0) {
351 dump_error(s, "dump: failed to write CPU status.\n");
352 return -1;
356 return 0;
359 static int write_elf_section(DumpState *s, int type)
361 Elf32_Shdr shdr32;
362 Elf64_Shdr shdr64;
363 int endian = s->dump_info.d_endian;
364 int shdr_size;
365 void *shdr;
366 int ret;
368 if (type == 0) {
369 shdr_size = sizeof(Elf32_Shdr);
370 memset(&shdr32, 0, shdr_size);
371 shdr32.sh_info = cpu_convert_to_target32(s->sh_info, endian);
372 shdr = &shdr32;
373 } else {
374 shdr_size = sizeof(Elf64_Shdr);
375 memset(&shdr64, 0, shdr_size);
376 shdr64.sh_info = cpu_convert_to_target32(s->sh_info, endian);
377 shdr = &shdr64;
380 ret = fd_write_vmcore(&shdr, shdr_size, s);
381 if (ret < 0) {
382 dump_error(s, "dump: failed to write section header table.\n");
383 return -1;
386 return 0;
389 static int write_data(DumpState *s, void *buf, int length)
391 int ret;
393 ret = fd_write_vmcore(buf, length, s);
394 if (ret < 0) {
395 dump_error(s, "dump: failed to save memory.\n");
396 return -1;
399 return 0;
402 /* write the memroy to vmcore. 1 page per I/O. */
403 static int write_memory(DumpState *s, RAMBlock *block, ram_addr_t start,
404 int64_t size)
406 int64_t i;
407 int ret;
409 for (i = 0; i < size / TARGET_PAGE_SIZE; i++) {
410 ret = write_data(s, block->host + start + i * TARGET_PAGE_SIZE,
411 TARGET_PAGE_SIZE);
412 if (ret < 0) {
413 return ret;
417 if ((size % TARGET_PAGE_SIZE) != 0) {
418 ret = write_data(s, block->host + start + i * TARGET_PAGE_SIZE,
419 size % TARGET_PAGE_SIZE);
420 if (ret < 0) {
421 return ret;
425 return 0;
428 /* get the memory's offset in the vmcore */
429 static target_phys_addr_t get_offset(target_phys_addr_t phys_addr,
430 DumpState *s)
432 RAMBlock *block;
433 target_phys_addr_t offset = s->memory_offset;
434 int64_t size_in_block, start;
436 if (s->has_filter) {
437 if (phys_addr < s->begin || phys_addr >= s->begin + s->length) {
438 return -1;
442 QLIST_FOREACH(block, &ram_list.blocks, next) {
443 if (s->has_filter) {
444 if (block->offset >= s->begin + s->length ||
445 block->offset + block->length <= s->begin) {
446 /* This block is out of the range */
447 continue;
450 if (s->begin <= block->offset) {
451 start = block->offset;
452 } else {
453 start = s->begin;
456 size_in_block = block->length - (start - block->offset);
457 if (s->begin + s->length < block->offset + block->length) {
458 size_in_block -= block->offset + block->length -
459 (s->begin + s->length);
461 } else {
462 start = block->offset;
463 size_in_block = block->length;
466 if (phys_addr >= start && phys_addr < start + size_in_block) {
467 return phys_addr - start + offset;
470 offset += size_in_block;
473 return -1;
476 static int write_elf_loads(DumpState *s)
478 target_phys_addr_t offset;
479 MemoryMapping *memory_mapping;
480 uint32_t phdr_index = 1;
481 int ret;
482 uint32_t max_index;
484 if (s->have_section) {
485 max_index = s->sh_info;
486 } else {
487 max_index = s->phdr_num;
490 QTAILQ_FOREACH(memory_mapping, &s->list.head, next) {
491 offset = get_offset(memory_mapping->phys_addr, s);
492 if (s->dump_info.d_class == ELFCLASS64) {
493 ret = write_elf64_load(s, memory_mapping, phdr_index++, offset);
494 } else {
495 ret = write_elf32_load(s, memory_mapping, phdr_index++, offset);
498 if (ret < 0) {
499 return -1;
502 if (phdr_index >= max_index) {
503 break;
507 return 0;
510 /* write elf header, PT_NOTE and elf note to vmcore. */
511 static int dump_begin(DumpState *s)
513 int ret;
516 * the vmcore's format is:
517 * --------------
518 * | elf header |
519 * --------------
520 * | PT_NOTE |
521 * --------------
522 * | PT_LOAD |
523 * --------------
524 * | ...... |
525 * --------------
526 * | PT_LOAD |
527 * --------------
528 * | sec_hdr |
529 * --------------
530 * | elf note |
531 * --------------
532 * | memory |
533 * --------------
535 * we only know where the memory is saved after we write elf note into
536 * vmcore.
539 /* write elf header to vmcore */
540 if (s->dump_info.d_class == ELFCLASS64) {
541 ret = write_elf64_header(s);
542 } else {
543 ret = write_elf32_header(s);
545 if (ret < 0) {
546 return -1;
549 if (s->dump_info.d_class == ELFCLASS64) {
550 /* write PT_NOTE to vmcore */
551 if (write_elf64_note(s) < 0) {
552 return -1;
555 /* write all PT_LOAD to vmcore */
556 if (write_elf_loads(s) < 0) {
557 return -1;
560 /* write section to vmcore */
561 if (s->have_section) {
562 if (write_elf_section(s, 1) < 0) {
563 return -1;
567 /* write notes to vmcore */
568 if (write_elf64_notes(s) < 0) {
569 return -1;
572 } else {
573 /* write PT_NOTE to vmcore */
574 if (write_elf32_note(s) < 0) {
575 return -1;
578 /* write all PT_LOAD to vmcore */
579 if (write_elf_loads(s) < 0) {
580 return -1;
583 /* write section to vmcore */
584 if (s->have_section) {
585 if (write_elf_section(s, 0) < 0) {
586 return -1;
590 /* write notes to vmcore */
591 if (write_elf32_notes(s) < 0) {
592 return -1;
596 return 0;
599 /* write PT_LOAD to vmcore */
600 static int dump_completed(DumpState *s)
602 dump_cleanup(s);
603 return 0;
606 static int get_next_block(DumpState *s, RAMBlock *block)
608 while (1) {
609 block = QLIST_NEXT(block, next);
610 if (!block) {
611 /* no more block */
612 return 1;
615 s->start = 0;
616 s->block = block;
617 if (s->has_filter) {
618 if (block->offset >= s->begin + s->length ||
619 block->offset + block->length <= s->begin) {
620 /* This block is out of the range */
621 continue;
624 if (s->begin > block->offset) {
625 s->start = s->begin - block->offset;
629 return 0;
633 /* write all memory to vmcore */
634 static int dump_iterate(DumpState *s)
636 RAMBlock *block;
637 int64_t size;
638 int ret;
640 while (1) {
641 block = s->block;
643 size = block->length;
644 if (s->has_filter) {
645 size -= s->start;
646 if (s->begin + s->length < block->offset + block->length) {
647 size -= block->offset + block->length - (s->begin + s->length);
650 ret = write_memory(s, block, s->start, size);
651 if (ret == -1) {
652 return ret;
655 ret = get_next_block(s, block);
656 if (ret == 1) {
657 dump_completed(s);
658 return 0;
663 static int create_vmcore(DumpState *s)
665 int ret;
667 ret = dump_begin(s);
668 if (ret < 0) {
669 return -1;
672 ret = dump_iterate(s);
673 if (ret < 0) {
674 return -1;
677 return 0;
680 static ram_addr_t get_start_block(DumpState *s)
682 RAMBlock *block;
684 if (!s->has_filter) {
685 s->block = QLIST_FIRST(&ram_list.blocks);
686 return 0;
689 QLIST_FOREACH(block, &ram_list.blocks, next) {
690 if (block->offset >= s->begin + s->length ||
691 block->offset + block->length <= s->begin) {
692 /* This block is out of the range */
693 continue;
696 s->block = block;
697 if (s->begin > block->offset) {
698 s->start = s->begin - block->offset;
699 } else {
700 s->start = 0;
702 return s->start;
705 return -1;
708 static int dump_init(DumpState *s, int fd, bool paging, bool has_filter,
709 int64_t begin, int64_t length, Error **errp)
711 CPUArchState *env;
712 int nr_cpus;
713 int ret;
715 if (runstate_is_running()) {
716 vm_stop(RUN_STATE_SAVE_VM);
717 s->resume = true;
718 } else {
719 s->resume = false;
722 s->errp = errp;
723 s->fd = fd;
724 s->has_filter = has_filter;
725 s->begin = begin;
726 s->length = length;
727 s->start = get_start_block(s);
728 if (s->start == -1) {
729 error_set(errp, QERR_INVALID_PARAMETER, "begin");
730 goto cleanup;
734 * get dump info: endian, class and architecture.
735 * If the target architecture is not supported, cpu_get_dump_info() will
736 * return -1.
738 * if we use kvm, we should synchronize the register before we get dump
739 * info.
741 nr_cpus = 0;
742 for (env = first_cpu; env != NULL; env = env->next_cpu) {
743 cpu_synchronize_state(env);
744 nr_cpus++;
747 ret = cpu_get_dump_info(&s->dump_info);
748 if (ret < 0) {
749 error_set(errp, QERR_UNSUPPORTED);
750 goto cleanup;
753 /* get memory mapping */
754 memory_mapping_list_init(&s->list);
755 if (paging) {
756 qemu_get_guest_memory_mapping(&s->list);
757 } else {
758 qemu_get_guest_simple_memory_mapping(&s->list);
761 if (s->has_filter) {
762 memory_mapping_filter(&s->list, s->begin, s->length);
766 * calculate phdr_num
768 * the type of ehdr->e_phnum is uint16_t, so we should avoid overflow
770 s->phdr_num = 1; /* PT_NOTE */
771 if (s->list.num < UINT16_MAX - 2) {
772 s->phdr_num += s->list.num;
773 s->have_section = false;
774 } else {
775 s->have_section = true;
776 s->phdr_num = PN_XNUM;
777 s->sh_info = 1; /* PT_NOTE */
779 /* the type of shdr->sh_info is uint32_t, so we should avoid overflow */
780 if (s->list.num <= UINT32_MAX - 1) {
781 s->sh_info += s->list.num;
782 } else {
783 s->sh_info = UINT32_MAX;
787 s->note_size = cpu_get_note_size(s->dump_info.d_class,
788 s->dump_info.d_machine, nr_cpus);
789 if (s->dump_info.d_class == ELFCLASS64) {
790 if (s->have_section) {
791 s->memory_offset = sizeof(Elf64_Ehdr) +
792 sizeof(Elf64_Phdr) * s->sh_info +
793 sizeof(Elf64_Shdr) + s->note_size;
794 } else {
795 s->memory_offset = sizeof(Elf64_Ehdr) +
796 sizeof(Elf64_Phdr) * s->phdr_num + s->note_size;
798 } else {
799 if (s->have_section) {
800 s->memory_offset = sizeof(Elf32_Ehdr) +
801 sizeof(Elf32_Phdr) * s->sh_info +
802 sizeof(Elf32_Shdr) + s->note_size;
803 } else {
804 s->memory_offset = sizeof(Elf32_Ehdr) +
805 sizeof(Elf32_Phdr) * s->phdr_num + s->note_size;
809 return 0;
811 cleanup:
812 if (s->resume) {
813 vm_start();
816 return -1;
819 void qmp_dump_guest_memory(bool paging, const char *file, bool has_begin,
820 int64_t begin, bool has_length, int64_t length,
821 Error **errp)
823 const char *p;
824 int fd = -1;
825 DumpState *s;
826 int ret;
828 if (has_begin && !has_length) {
829 error_set(errp, QERR_MISSING_PARAMETER, "length");
830 return;
832 if (!has_begin && has_length) {
833 error_set(errp, QERR_MISSING_PARAMETER, "begin");
834 return;
837 #if !defined(WIN32)
838 if (strstart(file, "fd:", &p)) {
839 fd = monitor_get_fd(cur_mon, p);
840 if (fd == -1) {
841 error_set(errp, QERR_FD_NOT_FOUND, p);
842 return;
845 #endif
847 if (strstart(file, "file:", &p)) {
848 fd = qemu_open(p, O_WRONLY | O_CREAT | O_TRUNC | O_BINARY, S_IRUSR);
849 if (fd < 0) {
850 error_set(errp, QERR_OPEN_FILE_FAILED, p);
851 return;
855 if (fd == -1) {
856 error_set(errp, QERR_INVALID_PARAMETER, "protocol");
857 return;
860 s = g_malloc(sizeof(DumpState));
862 ret = dump_init(s, fd, paging, has_begin, begin, length, errp);
863 if (ret < 0) {
864 g_free(s);
865 return;
868 if (create_vmcore(s) < 0 && !error_is_set(s->errp)) {
869 error_set(errp, QERR_IO_ERROR);
872 g_free(s);
875 #else
876 /* we need this function in hmp.c */
877 void qmp_dump_guest_memory(bool paging, const char *file, bool has_begin,
878 int64_t begin, bool has_length, int64_t length,
879 Error **errp)
881 error_set(errp, QERR_UNSUPPORTED);
883 #endif