configure: Disable (clang) initializer-overrides warnings
[qemu/pbrook.git] / cpu-common.h
blob5f9308933c9e22c660ded6e4291dec1c19098bd6
1 #ifndef CPU_COMMON_H
2 #define CPU_COMMON_H 1
4 /* CPU interfaces that are target independent. */
6 #include "hwaddr.h"
8 #ifndef NEED_CPU_H
9 #include "poison.h"
10 #endif
12 #include "bswap.h"
13 #include "qemu-queue.h"
15 #if !defined(CONFIG_USER_ONLY)
17 enum device_endian {
18 DEVICE_NATIVE_ENDIAN,
19 DEVICE_BIG_ENDIAN,
20 DEVICE_LITTLE_ENDIAN,
23 /* address in the RAM (different from a physical address) */
24 #if defined(CONFIG_XEN_BACKEND)
25 typedef uint64_t ram_addr_t;
26 # define RAM_ADDR_MAX UINT64_MAX
27 # define RAM_ADDR_FMT "%" PRIx64
28 #else
29 typedef uintptr_t ram_addr_t;
30 # define RAM_ADDR_MAX UINTPTR_MAX
31 # define RAM_ADDR_FMT "%" PRIxPTR
32 #endif
34 /* memory API */
36 typedef void CPUWriteMemoryFunc(void *opaque, hwaddr addr, uint32_t value);
37 typedef uint32_t CPUReadMemoryFunc(void *opaque, hwaddr addr);
39 void qemu_ram_remap(ram_addr_t addr, ram_addr_t length);
40 /* This should only be used for ram local to a device. */
41 void *qemu_get_ram_ptr(ram_addr_t addr);
42 void *qemu_ram_ptr_length(ram_addr_t addr, ram_addr_t *size);
43 /* Same but slower, to use for migration, where the order of
44 * RAMBlocks must not change. */
45 void *qemu_safe_ram_ptr(ram_addr_t addr);
46 void qemu_put_ram_ptr(void *addr);
47 /* This should not be used by devices. */
48 int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr);
49 ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr);
50 void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev);
52 void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
53 int len, int is_write);
54 static inline void cpu_physical_memory_read(hwaddr addr,
55 void *buf, int len)
57 cpu_physical_memory_rw(addr, buf, len, 0);
59 static inline void cpu_physical_memory_write(hwaddr addr,
60 const void *buf, int len)
62 cpu_physical_memory_rw(addr, (void *)buf, len, 1);
64 void *cpu_physical_memory_map(hwaddr addr,
65 hwaddr *plen,
66 int is_write);
67 void cpu_physical_memory_unmap(void *buffer, hwaddr len,
68 int is_write, hwaddr access_len);
69 void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque));
70 void cpu_unregister_map_client(void *cookie);
72 bool cpu_physical_memory_is_io(hwaddr phys_addr);
74 /* Coalesced MMIO regions are areas where write operations can be reordered.
75 * This usually implies that write operations are side-effect free. This allows
76 * batching which can make a major impact on performance when using
77 * virtualization.
79 void qemu_flush_coalesced_mmio_buffer(void);
81 uint32_t ldub_phys(hwaddr addr);
82 uint32_t lduw_le_phys(hwaddr addr);
83 uint32_t lduw_be_phys(hwaddr addr);
84 uint32_t ldl_le_phys(hwaddr addr);
85 uint32_t ldl_be_phys(hwaddr addr);
86 uint64_t ldq_le_phys(hwaddr addr);
87 uint64_t ldq_be_phys(hwaddr addr);
88 void stb_phys(hwaddr addr, uint32_t val);
89 void stw_le_phys(hwaddr addr, uint32_t val);
90 void stw_be_phys(hwaddr addr, uint32_t val);
91 void stl_le_phys(hwaddr addr, uint32_t val);
92 void stl_be_phys(hwaddr addr, uint32_t val);
93 void stq_le_phys(hwaddr addr, uint64_t val);
94 void stq_be_phys(hwaddr addr, uint64_t val);
96 #ifdef NEED_CPU_H
97 uint32_t lduw_phys(hwaddr addr);
98 uint32_t ldl_phys(hwaddr addr);
99 uint64_t ldq_phys(hwaddr addr);
100 void stl_phys_notdirty(hwaddr addr, uint32_t val);
101 void stq_phys_notdirty(hwaddr addr, uint64_t val);
102 void stw_phys(hwaddr addr, uint32_t val);
103 void stl_phys(hwaddr addr, uint32_t val);
104 void stq_phys(hwaddr addr, uint64_t val);
105 #endif
107 void cpu_physical_memory_write_rom(hwaddr addr,
108 const uint8_t *buf, int len);
110 extern struct MemoryRegion io_mem_ram;
111 extern struct MemoryRegion io_mem_rom;
112 extern struct MemoryRegion io_mem_unassigned;
113 extern struct MemoryRegion io_mem_notdirty;
115 #endif
117 #endif /* !CPU_COMMON_H */