2 * Generic Virtual-Device Fuzzing Target
4 * Copyright Red Hat Inc., 2020
7 * Alexander Bulekov <alxndr@bu.edu>
9 * This work is licensed under the terms of the GNU GPL, version 2 or later.
10 * See the COPYING file in the top-level directory.
13 #include "qemu/osdep.h"
17 #include "hw/core/cpu.h"
18 #include "tests/qtest/libqos/libqtest.h"
19 #include "tests/qtest/libqos/pci-pc.h"
21 #include "fork_fuzz.h"
23 #include "exec/memory.h"
24 #include "exec/ramblock.h"
25 #include "hw/qdev-core.h"
26 #include "hw/pci/pci.h"
27 #include "hw/boards.h"
28 #include "generic_fuzz_configs.h"
29 #include "hw/mem/sparse-mem.h"
32 * SEPARATOR is used to separate "operations" in the fuzz input
34 #define SEPARATOR "FUZZ"
45 OP_CLEAR_DMA_PATTERNS
,
49 #define DEFAULT_TIMEOUT_US 100000
50 #define USEC_IN_SEC 1000000000
52 #define MAX_DMA_FILL_SIZE 0x10000
54 #define PCI_HOST_BRIDGE_CFG 0xcf8
55 #define PCI_HOST_BRIDGE_DATA 0xcfc
59 ram_addr_t size
; /* The number of bytes until the end of the I/O region */
62 static useconds_t timeout
= DEFAULT_TIMEOUT_US
;
64 static bool qtest_log_enabled
;
66 MemoryRegion
*sparse_mem_mr
;
69 * A pattern used to populate a DMA region or perform a memwrite. This is
70 * useful for e.g. populating tables of unique addresses.
71 * Example {.index = 1; .stride = 2; .len = 3; .data = "\x00\x01\x02"}
72 * Renders as: 00 01 02 00 03 02 00 05 02 00 07 02 ...
75 uint8_t index
; /* Index of a byte to increment by stride */
76 uint8_t stride
; /* Increment each index'th byte by this amount */
81 /* Avoid filling the same DMA region between MMIO/PIO commands ? */
82 static bool avoid_double_fetches
;
84 static QTestState
*qts_global
; /* Need a global for the DMA callback */
87 * List of memory regions that are children of QOM objects specified by the
90 static GHashTable
*fuzzable_memoryregions
;
91 static GPtrArray
*fuzzable_pci_devices
;
93 struct get_io_cb_info
{
99 static bool get_io_address_cb(Int128 start
, Int128 size
,
100 const MemoryRegion
*mr
,
101 hwaddr offset_in_region
,
104 struct get_io_cb_info
*info
= opaque
;
105 if (g_hash_table_lookup(fuzzable_memoryregions
, mr
)) {
106 if (info
->index
== 0) {
107 info
->result
.addr
= (ram_addr_t
)start
;
108 info
->result
.size
= (ram_addr_t
)size
;
118 * List of dma regions populated since the last fuzzing command. Used to ensure
119 * that we only write to each DMA address once, to avoid race conditions when
120 * building reproducers.
122 static GArray
*dma_regions
;
124 static GArray
*dma_patterns
;
125 static int dma_pattern_index
;
126 static bool pci_disabled
;
129 * Allocate a block of memory and populate it with a pattern.
131 static void *pattern_alloc(pattern p
, size_t len
)
134 uint8_t *buf
= g_malloc(len
);
137 for (i
= 0; i
< len
; ++i
) {
138 buf
[i
] = p
.data
[i
% p
.len
];
139 if ((i
% p
.len
) == p
.index
) {
147 static int memory_access_size(MemoryRegion
*mr
, unsigned l
, hwaddr addr
)
149 unsigned access_size_max
= mr
->ops
->valid
.max_access_size
;
152 * Regions are assumed to support 1-4 byte accesses unless
153 * otherwise specified.
155 if (access_size_max
== 0) {
159 /* Bound the maximum access by the alignment of the address. */
160 if (!mr
->ops
->impl
.unaligned
) {
161 unsigned align_size_max
= addr
& -addr
;
162 if (align_size_max
!= 0 && align_size_max
< access_size_max
) {
163 access_size_max
= align_size_max
;
167 /* Don't attempt accesses larger than the maximum. */
168 if (l
> access_size_max
) {
177 * Call-back for functions that perform DMA reads from guest memory. Confirm
178 * that the region has not already been populated since the last loop in
179 * generic_fuzz(), avoiding potential race-conditions, which we don't have
180 * a good way for reproducing right now.
182 void fuzz_dma_read_cb(size_t addr
, size_t len
, MemoryRegion
*mr
)
184 /* Are we in the generic-fuzzer or are we using another fuzz-target? */
190 * Return immediately if:
191 * - We have no DMA patterns defined
192 * - The length of the DMA read request is zero
193 * - The DMA read is hitting an MR other than the machine's main RAM
194 * - The DMA request hits past the bounds of our RAM
196 if (dma_patterns
->len
== 0
198 || (mr
!= current_machine
->ram
&& mr
!= sparse_mem_mr
)) {
203 * If we overlap with any existing dma_regions, split the range and only
204 * populate the non-overlapping parts.
206 address_range region
;
207 bool double_fetch
= false;
209 i
< dma_regions
->len
&& (avoid_double_fetches
|| qtest_log_enabled
);
211 region
= g_array_index(dma_regions
, address_range
, i
);
212 if (addr
< region
.addr
+ region
.size
&& addr
+ len
> region
.addr
) {
214 if (addr
< region
.addr
215 && avoid_double_fetches
) {
216 fuzz_dma_read_cb(addr
, region
.addr
- addr
, mr
);
218 if (addr
+ len
> region
.addr
+ region
.size
219 && avoid_double_fetches
) {
220 fuzz_dma_read_cb(region
.addr
+ region
.size
,
221 addr
+ len
- (region
.addr
+ region
.size
), mr
);
227 /* Cap the length of the DMA access to something reasonable */
228 len
= MIN(len
, MAX_DMA_FILL_SIZE
);
230 address_range ar
= {addr
, len
};
231 g_array_append_val(dma_regions
, ar
);
232 pattern p
= g_array_index(dma_patterns
, pattern
, dma_pattern_index
);
233 void *buf_base
= pattern_alloc(p
, ar
.size
);
234 void *buf
= buf_base
;
239 mr1
= address_space_translate(first_cpu
->as
,
240 addr
, &addr1
, &l
, true,
241 MEMTXATTRS_UNSPECIFIED
);
244 * If mr1 isn't RAM, address_space_translate doesn't update l. Use
245 * memory_access_size to identify the number of bytes that it is safe
246 * to write without accidentally writing to another MemoryRegion.
248 if (!memory_region_is_ram(mr1
)) {
249 l
= memory_access_size(mr1
, l
, addr1
);
251 if (memory_region_is_ram(mr1
) ||
252 memory_region_is_romd(mr1
) ||
253 mr1
== sparse_mem_mr
) {
255 if (qtest_log_enabled
) {
257 * With QTEST_LOG, use a normal, slow QTest memwrite. Prefix the log
258 * that will be written by qtest.c with a DMA tag, so we can reorder
259 * the resulting QTest trace so the DMA fills precede the last PIO/MMIO
262 fprintf(stderr
, "[DMA] ");
264 fprintf(stderr
, "[DOUBLE-FETCH] ");
268 qtest_memwrite(qts_global
, addr
, buf
, l
);
277 /* Increment the index of the pattern for the next DMA access */
278 dma_pattern_index
= (dma_pattern_index
+ 1) % dma_patterns
->len
;
282 * Here we want to convert a fuzzer-provided [io-region-index, offset] to
283 * a physical address. To do this, we iterate over all of the matched
284 * MemoryRegions. Check whether each region exists within the particular io
285 * space. Return the absolute address of the offset within the index'th region
286 * that is a subregion of the io_space and the distance until the end of the
289 static bool get_io_address(address_range
*result
, AddressSpace
*as
,
293 view
= as
->current_map
;
295 struct get_io_cb_info cb_info
= {};
297 cb_info
.index
= index
;
300 * Loop around the FlatView until we match "index" number of
301 * fuzzable_memoryregions, or until we know that there are no matching
305 flatview_for_each_range(view
, get_io_address_cb
, &cb_info
);
306 } while (cb_info
.index
!= index
&& !cb_info
.found
);
308 *result
= cb_info
.result
;
310 offset
= offset
% result
->size
;
311 result
->addr
+= offset
;
312 result
->size
-= offset
;
314 return cb_info
.found
;
317 static bool get_pio_address(address_range
*result
,
318 uint8_t index
, uint16_t offset
)
321 * PIO BARs can be set past the maximum port address (0xFFFF). Thus, result
322 * can contain an addr that extends past the PIO space. When we pass this
323 * address to qtest_in/qtest_out, it is cast to a uint16_t, so we might end
324 * up fuzzing a completely different MemoryRegion/Device. Therefore, check
325 * that the address here is within the PIO space limits.
327 bool found
= get_io_address(result
, &address_space_io
, index
, offset
);
328 return result
->addr
<= 0xFFFF ? found
: false;
331 static bool get_mmio_address(address_range
*result
,
332 uint8_t index
, uint32_t offset
)
334 return get_io_address(result
, &address_space_memory
, index
, offset
);
337 static void op_in(QTestState
*s
, const unsigned char * data
, size_t len
)
339 enum Sizes
{Byte
, Word
, Long
, end_sizes
};
347 if (len
< sizeof(a
)) {
350 memcpy(&a
, data
, sizeof(a
));
351 if (get_pio_address(&abs
, a
.base
, a
.offset
) == 0) {
355 switch (a
.size
%= end_sizes
) {
357 qtest_inb(s
, abs
.addr
);
361 qtest_inw(s
, abs
.addr
);
366 qtest_inl(s
, abs
.addr
);
372 static void op_out(QTestState
*s
, const unsigned char * data
, size_t len
)
374 enum Sizes
{Byte
, Word
, Long
, end_sizes
};
383 if (len
< sizeof(a
)) {
386 memcpy(&a
, data
, sizeof(a
));
388 if (get_pio_address(&abs
, a
.base
, a
.offset
) == 0) {
392 switch (a
.size
%= end_sizes
) {
394 qtest_outb(s
, abs
.addr
, a
.value
& 0xFF);
398 qtest_outw(s
, abs
.addr
, a
.value
& 0xFFFF);
403 qtest_outl(s
, abs
.addr
, a
.value
);
409 static void op_read(QTestState
*s
, const unsigned char * data
, size_t len
)
411 enum Sizes
{Byte
, Word
, Long
, Quad
, end_sizes
};
419 if (len
< sizeof(a
)) {
422 memcpy(&a
, data
, sizeof(a
));
424 if (get_mmio_address(&abs
, a
.base
, a
.offset
) == 0) {
428 switch (a
.size
%= end_sizes
) {
430 qtest_readb(s
, abs
.addr
);
434 qtest_readw(s
, abs
.addr
);
439 qtest_readl(s
, abs
.addr
);
444 qtest_readq(s
, abs
.addr
);
450 static void op_write(QTestState
*s
, const unsigned char * data
, size_t len
)
452 enum Sizes
{Byte
, Word
, Long
, Quad
, end_sizes
};
461 if (len
< sizeof(a
)) {
464 memcpy(&a
, data
, sizeof(a
));
466 if (get_mmio_address(&abs
, a
.base
, a
.offset
) == 0) {
470 switch (a
.size
%= end_sizes
) {
472 qtest_writeb(s
, abs
.addr
, a
.value
& 0xFF);
476 qtest_writew(s
, abs
.addr
, a
.value
& 0xFFFF);
481 qtest_writel(s
, abs
.addr
, a
.value
& 0xFFFFFFFF);
486 qtest_writeq(s
, abs
.addr
, a
.value
);
492 static void op_pci_read(QTestState
*s
, const unsigned char * data
, size_t len
)
494 enum Sizes
{Byte
, Word
, Long
, end_sizes
};
500 if (len
< sizeof(a
) || fuzzable_pci_devices
->len
== 0 || pci_disabled
) {
503 memcpy(&a
, data
, sizeof(a
));
504 PCIDevice
*dev
= g_ptr_array_index(fuzzable_pci_devices
,
505 a
.base
% fuzzable_pci_devices
->len
);
506 int devfn
= dev
->devfn
;
507 qtest_outl(s
, PCI_HOST_BRIDGE_CFG
, (1U << 31) | (devfn
<< 8) | a
.offset
);
508 switch (a
.size
%= end_sizes
) {
510 qtest_inb(s
, PCI_HOST_BRIDGE_DATA
);
513 qtest_inw(s
, PCI_HOST_BRIDGE_DATA
);
516 qtest_inl(s
, PCI_HOST_BRIDGE_DATA
);
521 static void op_pci_write(QTestState
*s
, const unsigned char * data
, size_t len
)
523 enum Sizes
{Byte
, Word
, Long
, end_sizes
};
530 if (len
< sizeof(a
) || fuzzable_pci_devices
->len
== 0 || pci_disabled
) {
533 memcpy(&a
, data
, sizeof(a
));
534 PCIDevice
*dev
= g_ptr_array_index(fuzzable_pci_devices
,
535 a
.base
% fuzzable_pci_devices
->len
);
536 int devfn
= dev
->devfn
;
537 qtest_outl(s
, PCI_HOST_BRIDGE_CFG
, (1U << 31) | (devfn
<< 8) | a
.offset
);
538 switch (a
.size
%= end_sizes
) {
540 qtest_outb(s
, PCI_HOST_BRIDGE_DATA
, a
.value
& 0xFF);
543 qtest_outw(s
, PCI_HOST_BRIDGE_DATA
, a
.value
& 0xFFFF);
546 qtest_outl(s
, PCI_HOST_BRIDGE_DATA
, a
.value
& 0xFFFFFFFF);
551 static void op_add_dma_pattern(QTestState
*s
,
552 const unsigned char *data
, size_t len
)
556 * index and stride can be used to increment the index-th byte of the
557 * pattern by the value stride, for each loop of the pattern.
563 if (len
< sizeof(a
) + 1) {
566 memcpy(&a
, data
, sizeof(a
));
567 pattern p
= {a
.index
, a
.stride
, len
- sizeof(a
), data
+ sizeof(a
)};
568 p
.index
= a
.index
% p
.len
;
569 g_array_append_val(dma_patterns
, p
);
573 static void op_clear_dma_patterns(QTestState
*s
,
574 const unsigned char *data
, size_t len
)
576 g_array_set_size(dma_patterns
, 0);
577 dma_pattern_index
= 0;
580 static void op_clock_step(QTestState
*s
, const unsigned char *data
, size_t len
)
582 qtest_clock_step_next(s
);
585 static void op_disable_pci(QTestState
*s
, const unsigned char *data
, size_t len
)
590 static void handle_timeout(int sig
)
592 if (qtest_log_enabled
) {
593 fprintf(stderr
, "[Timeout]\n");
598 * If there is a crash, libfuzzer/ASAN forks a child to run an
599 * "llvm-symbolizer" process for printing out a pretty stacktrace. It
600 * communicates with this child using a pipe. If we timeout+Exit, while
601 * libfuzzer is still communicating with the llvm-symbolizer child, we will
602 * be left with an orphan llvm-symbolizer process. Sometimes, this appears
603 * to lead to a deadlock in the forkserver. Use waitpid to check if there
604 * are any waitable children. If so, exit out of the signal-handler, and
605 * let libfuzzer finish communicating with the child, and exit, on its own.
607 if (waitpid(-1, NULL
, WNOHANG
) == 0) {
615 * Here, we interpret random bytes from the fuzzer, as a sequence of commands.
616 * Some commands can be variable-width, so we use a separator, SEPARATOR, to
617 * specify the boundaries between commands. SEPARATOR is used to separate
618 * "operations" in the fuzz input. Why use a separator, instead of just using
619 * the operations' length to identify operation boundaries?
620 * 1. This is a simple way to support variable-length operations
621 * 2. This adds "stability" to the input.
622 * For example take the input "AbBcgDefg", where there is no separator and
623 * Opcodes are capitalized.
624 * Simply, by removing the first byte, we end up with a very different
627 * By adding a separator, we avoid this problem:
628 * Ab SEP Bcg SEP Defg -> B SEP Bcg SEP Defg
629 * Since B uses two additional bytes as operands, the first "B" will be
630 * ignored. The fuzzer actively tries to reduce inputs, so such unused
631 * bytes are likely to be pruned, eventually.
633 * SEPARATOR is trivial for the fuzzer to discover when using ASan. Optionally,
634 * SEPARATOR can be manually specified as a dictionary value (see libfuzzer's
635 * -dict), though this should not be necessary.
637 * As a result, the stream of bytes is converted into a sequence of commands.
638 * In a simplified example where SEPARATOR is 0xFF:
639 * 00 01 02 FF 03 04 05 06 FF 01 FF ...
640 * becomes this sequence of commands:
641 * 00 01 02 -> op00 (0102) -> in (0102, 2)
642 * 03 04 05 06 -> op03 (040506) -> write (040506, 3)
643 * 01 -> op01 (-,0) -> out (-,0)
646 * Note here that it is the job of the individual opcode functions to check
647 * that enough data was provided. I.e. in the last command out (,0), out needs
648 * to check that there is not enough data provided to select an address/value
651 static void generic_fuzz(QTestState
*s
, const unsigned char *Data
, size_t Size
)
653 void (*ops
[]) (QTestState
*s
, const unsigned char* , size_t) = {
657 [OP_WRITE
] = op_write
,
658 [OP_PCI_READ
] = op_pci_read
,
659 [OP_PCI_WRITE
] = op_pci_write
,
660 [OP_DISABLE_PCI
] = op_disable_pci
,
661 [OP_ADD_DMA_PATTERN
] = op_add_dma_pattern
,
662 [OP_CLEAR_DMA_PATTERNS
] = op_clear_dma_patterns
,
663 [OP_CLOCK_STEP
] = op_clock_step
,
665 const unsigned char *cmd
= Data
;
666 const unsigned char *nextcmd
;
671 struct sigaction sact
;
672 struct itimerval timer
;
675 * Sometimes the fuzzer will find inputs that take quite a long time to
676 * process. Often times, these inputs do not result in new coverage.
677 * Even if these inputs might be interesting, they can slow down the
678 * fuzzer, overall. Set a timeout for each command to avoid hurting
679 * performance, too much
683 sigemptyset(&sact
.sa_mask
);
684 sact
.sa_flags
= SA_NODEFER
;
685 sact
.sa_handler
= handle_timeout
;
686 sigaction(SIGALRM
, &sact
, NULL
);
689 sigaddset(&set
, SIGALRM
);
690 pthread_sigmask(SIG_UNBLOCK
, &set
, NULL
);
692 memset(&timer
, 0, sizeof(timer
));
693 timer
.it_value
.tv_sec
= timeout
/ USEC_IN_SEC
;
694 timer
.it_value
.tv_usec
= timeout
% USEC_IN_SEC
;
697 op_clear_dma_patterns(s
, NULL
, 0);
698 pci_disabled
= false;
700 while (cmd
&& Size
) {
701 /* Reset the timeout, each time we run a new command */
703 setitimer(ITIMER_REAL
, &timer
, NULL
);
706 /* Get the length until the next command or end of input */
707 nextcmd
= memmem(cmd
, Size
, SEPARATOR
, strlen(SEPARATOR
));
708 cmd_len
= nextcmd
? nextcmd
- cmd
: Size
;
711 /* Interpret the first byte of the command as an opcode */
712 op
= *cmd
% (sizeof(ops
) / sizeof((ops
)[0]));
713 ops
[op
](s
, cmd
+ 1, cmd_len
- 1);
715 /* Run the main loop */
718 /* Advance to the next command */
719 cmd
= nextcmd
? nextcmd
+ sizeof(SEPARATOR
) - 1 : nextcmd
;
720 Size
= Size
- (cmd_len
+ sizeof(SEPARATOR
) - 1);
721 g_array_set_size(dma_regions
, 0);
730 static void usage(void)
732 printf("Please specify the following environment variables:\n");
733 printf("QEMU_FUZZ_ARGS= the command line arguments passed to qemu\n");
734 printf("QEMU_FUZZ_OBJECTS= "
735 "a space separated list of QOM type names for objects to fuzz\n");
736 printf("Optionally: QEMU_AVOID_DOUBLE_FETCH= "
737 "Try to avoid racy DMA double fetch bugs? %d by default\n",
738 avoid_double_fetches
);
739 printf("Optionally: QEMU_FUZZ_TIMEOUT= Specify a custom timeout (us). "
740 "0 to disable. %d by default\n", timeout
);
744 static int locate_fuzz_memory_regions(Object
*child
, void *opaque
)
748 if (object_dynamic_cast(child
, TYPE_MEMORY_REGION
)) {
749 mr
= MEMORY_REGION(child
);
750 if ((memory_region_is_ram(mr
) ||
751 memory_region_is_ram_device(mr
) ||
752 memory_region_is_rom(mr
)) == false) {
753 name
= object_get_canonical_path_component(child
);
755 * We don't want duplicate pointers to the same MemoryRegion, so
756 * try to remove copies of the pointer, before adding it.
758 g_hash_table_insert(fuzzable_memoryregions
, mr
, (gpointer
)true);
764 static int locate_fuzz_objects(Object
*child
, void *opaque
)
768 char *pattern
= opaque
;
770 type_name
= g_string_new(object_get_typename(child
));
771 g_string_ascii_down(type_name
);
772 if (g_pattern_match_simple(pattern
, type_name
->str
)) {
773 /* Find and save ptrs to any child MemoryRegions */
774 object_child_foreach_recursive(child
, locate_fuzz_memory_regions
, NULL
);
777 * We matched an object. If its a PCI device, store a pointer to it so
778 * we can map BARs and fuzz its config space.
780 if (object_dynamic_cast(OBJECT(child
), TYPE_PCI_DEVICE
)) {
782 * Don't want duplicate pointers to the same PCIDevice, so remove
783 * copies of the pointer, before adding it.
785 g_ptr_array_remove_fast(fuzzable_pci_devices
, PCI_DEVICE(child
));
786 g_ptr_array_add(fuzzable_pci_devices
, PCI_DEVICE(child
));
788 } else if (object_dynamic_cast(OBJECT(child
), TYPE_MEMORY_REGION
)) {
789 path_name
= g_string_new(object_get_canonical_path_component(child
));
790 g_string_ascii_down(path_name
);
791 if (g_pattern_match_simple(pattern
, path_name
->str
)) {
793 mr
= MEMORY_REGION(child
);
794 if ((memory_region_is_ram(mr
) ||
795 memory_region_is_ram_device(mr
) ||
796 memory_region_is_rom(mr
)) == false) {
797 g_hash_table_insert(fuzzable_memoryregions
, mr
, (gpointer
)true);
800 g_string_free(path_name
, true);
802 g_string_free(type_name
, true);
807 static void pci_enum(gpointer pcidev
, gpointer bus
)
809 PCIDevice
*dev
= pcidev
;
813 qdev
= qpci_device_find(bus
, dev
->devfn
);
814 g_assert(qdev
!= NULL
);
815 for (i
= 0; i
< 6; i
++) {
816 if (dev
->io_regions
[i
].size
) {
817 qpci_iomap(qdev
, i
, NULL
);
820 qpci_device_enable(qdev
);
824 static void generic_pre_fuzz(QTestState
*s
)
830 GString
*name_pattern
;
832 if (!getenv("QEMU_FUZZ_OBJECTS")) {
835 if (getenv("QTEST_LOG")) {
836 qtest_log_enabled
= 1;
838 if (getenv("QEMU_AVOID_DOUBLE_FETCH")) {
839 avoid_double_fetches
= 1;
841 if (getenv("QEMU_FUZZ_TIMEOUT")) {
842 timeout
= g_ascii_strtoll(getenv("QEMU_FUZZ_TIMEOUT"), NULL
, 0);
847 * Create a special device that we can use to back DMA buffers at very
848 * high memory addresses
850 sparse_mem_mr
= sparse_mem_init(0, UINT64_MAX
);
852 dma_regions
= g_array_new(false, false, sizeof(address_range
));
853 dma_patterns
= g_array_new(false, false, sizeof(pattern
));
855 fuzzable_memoryregions
= g_hash_table_new(NULL
, NULL
);
856 fuzzable_pci_devices
= g_ptr_array_new();
858 result
= g_strsplit(getenv("QEMU_FUZZ_OBJECTS"), " ", -1);
859 for (int i
= 0; result
[i
] != NULL
; i
++) {
860 name_pattern
= g_string_new(result
[i
]);
862 * Make the pattern lowercase. We do the same for all the MemoryRegion
863 * and Type names so the configs are case-insensitive.
865 g_string_ascii_down(name_pattern
);
866 printf("Matching objects by name %s\n", result
[i
]);
867 object_child_foreach_recursive(qdev_get_machine(),
870 g_string_free(name_pattern
, true);
873 printf("This process will try to fuzz the following MemoryRegions:\n");
875 g_hash_table_iter_init(&iter
, fuzzable_memoryregions
);
876 while (g_hash_table_iter_next(&iter
, (gpointer
)&mr
, NULL
)) {
877 printf(" * %s (size 0x%" PRIx64
")\n",
878 object_get_canonical_path_component(&(mr
->parent_obj
)),
879 memory_region_size(mr
));
882 if (!g_hash_table_size(fuzzable_memoryregions
)) {
883 printf("No fuzzable memory regions found...\n");
887 pcibus
= qpci_new_pc(s
, NULL
);
888 g_ptr_array_foreach(fuzzable_pci_devices
, pci_enum
, pcibus
);
889 qpci_free_pc(pcibus
);
895 * When libfuzzer gives us two inputs to combine, return a new input with the
896 * following structure:
900 * Clear out the DMA Patterns
902 * Disable the pci_read/write instructions
906 * The idea is to collate the core behaviors of the two inputs.
908 * Input 1: maps a device's BARs, sets up three DMA patterns, and triggers
909 * device functionality A
910 * Input 2: maps a device's BARs, sets up one DMA pattern, and triggers device
913 * This function attempts to produce an input that:
914 * Ouptut: maps a device's BARs, set up three DMA patterns, triggers
915 * functionality A device, replaces the DMA patterns with a single
916 * patten, and triggers device functionality B.
918 static size_t generic_fuzz_crossover(const uint8_t *data1
, size_t size1
, const
919 uint8_t *data2
, size_t size2
, uint8_t *out
,
920 size_t max_out_size
, unsigned int seed
)
922 size_t copy_len
= 0, size
= 0;
924 /* Check that we have enough space for data1 and at least part of data2 */
925 if (max_out_size
<= size1
+ strlen(SEPARATOR
) * 3 + 2) {
929 /* Copy_Len in the first input */
931 memcpy(out
+ size
, data1
, copy_len
);
933 max_out_size
-= copy_len
;
935 /* Append a separator */
936 copy_len
= strlen(SEPARATOR
);
937 memcpy(out
+ size
, SEPARATOR
, copy_len
);
939 max_out_size
-= copy_len
;
941 /* Clear out the DMA Patterns */
944 out
[size
] = OP_CLEAR_DMA_PATTERNS
;
947 max_out_size
-= copy_len
;
949 /* Append a separator */
950 copy_len
= strlen(SEPARATOR
);
951 memcpy(out
+ size
, SEPARATOR
, copy_len
);
953 max_out_size
-= copy_len
;
955 /* Disable PCI ops. Assume data1 took care of setting up PCI */
958 out
[size
] = OP_DISABLE_PCI
;
961 max_out_size
-= copy_len
;
963 /* Append a separator */
964 copy_len
= strlen(SEPARATOR
);
965 memcpy(out
+ size
, SEPARATOR
, copy_len
);
967 max_out_size
-= copy_len
;
969 /* Copy_Len over the second input */
970 copy_len
= MIN(size2
, max_out_size
);
971 memcpy(out
+ size
, data2
, copy_len
);
973 max_out_size
-= copy_len
;
979 static GString
*generic_fuzz_cmdline(FuzzTarget
*t
)
981 GString
*cmd_line
= g_string_new(TARGET_NAME
);
982 if (!getenv("QEMU_FUZZ_ARGS")) {
985 g_string_append_printf(cmd_line
, " -display none \
986 -machine accel=qtest, \
987 -m 512M %s ", getenv("QEMU_FUZZ_ARGS"));
991 static GString
*generic_fuzz_predefined_config_cmdline(FuzzTarget
*t
)
994 const generic_fuzz_config
*config
;
998 setenv("QEMU_AVOID_DOUBLE_FETCH", "1", 1);
999 if (config
->argfunc
) {
1000 args
= config
->argfunc();
1001 setenv("QEMU_FUZZ_ARGS", args
, 1);
1004 g_assert_nonnull(config
->args
);
1005 setenv("QEMU_FUZZ_ARGS", config
->args
, 1);
1007 setenv("QEMU_FUZZ_OBJECTS", config
->objects
, 1);
1008 return generic_fuzz_cmdline(t
);
1011 static void register_generic_fuzz_targets(void)
1013 fuzz_add_target(&(FuzzTarget
){
1014 .name
= "generic-fuzz",
1015 .description
= "Fuzz based on any qemu command-line args. ",
1016 .get_init_cmdline
= generic_fuzz_cmdline
,
1017 .pre_fuzz
= generic_pre_fuzz
,
1018 .fuzz
= generic_fuzz
,
1019 .crossover
= generic_fuzz_crossover
1023 const generic_fuzz_config
*config
;
1026 i
< sizeof(predefined_configs
) / sizeof(generic_fuzz_config
);
1028 config
= predefined_configs
+ i
;
1029 name
= g_string_new("generic-fuzz");
1030 g_string_append_printf(name
, "-%s", config
->name
);
1031 fuzz_add_target(&(FuzzTarget
){
1033 .description
= "Predefined generic-fuzz config.",
1034 .get_init_cmdline
= generic_fuzz_predefined_config_cmdline
,
1035 .pre_fuzz
= generic_pre_fuzz
,
1036 .fuzz
= generic_fuzz
,
1037 .crossover
= generic_fuzz_crossover
,
1038 .opaque
= (void *)config
1043 fuzz_target_init(register_generic_fuzz_targets
);