4 /* CPU interfaces that are target independent. */
6 #ifdef TARGET_PHYS_ADDR_BITS
15 #include "qemu-queue.h"
17 #if !defined(CONFIG_USER_ONLY)
25 /* address in the RAM (different from a physical address) */
26 #if defined(CONFIG_XEN_BACKEND) && TARGET_PHYS_ADDR_BITS == 64
27 typedef uint64_t ram_addr_t
;
28 # define RAM_ADDR_MAX UINT64_MAX
29 # define RAM_ADDR_FMT "%" PRIx64
31 typedef unsigned long ram_addr_t
;
32 # define RAM_ADDR_MAX ULONG_MAX
33 # define RAM_ADDR_FMT "%lx"
38 typedef void CPUWriteMemoryFunc(void *opaque
, target_phys_addr_t addr
, uint32_t value
);
39 typedef uint32_t CPUReadMemoryFunc(void *opaque
, target_phys_addr_t addr
);
41 void cpu_register_physical_memory_log(target_phys_addr_t start_addr
,
43 ram_addr_t phys_offset
,
44 ram_addr_t region_offset
,
47 static inline void cpu_register_physical_memory_offset(target_phys_addr_t start_addr
,
49 ram_addr_t phys_offset
,
50 ram_addr_t region_offset
)
52 cpu_register_physical_memory_log(start_addr
, size
, phys_offset
,
53 region_offset
, false);
56 static inline void cpu_register_physical_memory(target_phys_addr_t start_addr
,
58 ram_addr_t phys_offset
)
60 cpu_register_physical_memory_offset(start_addr
, size
, phys_offset
, 0);
63 ram_addr_t
cpu_get_physical_page_desc(target_phys_addr_t addr
);
64 ram_addr_t
qemu_ram_alloc_from_ptr(DeviceState
*dev
, const char *name
,
65 ram_addr_t size
, void *host
);
66 ram_addr_t
qemu_ram_alloc(DeviceState
*dev
, const char *name
, ram_addr_t size
);
67 void qemu_ram_free(ram_addr_t addr
);
68 void qemu_ram_free_from_ptr(ram_addr_t addr
);
69 void qemu_ram_remap(ram_addr_t addr
, ram_addr_t length
);
70 /* This should only be used for ram local to a device. */
71 void *qemu_get_ram_ptr(ram_addr_t addr
);
72 void *qemu_ram_ptr_length(ram_addr_t addr
, ram_addr_t
*size
);
73 /* Same but slower, to use for migration, where the order of
74 * RAMBlocks must not change. */
75 void *qemu_safe_ram_ptr(ram_addr_t addr
);
76 void qemu_put_ram_ptr(void *addr
);
77 /* This should not be used by devices. */
78 int qemu_ram_addr_from_host(void *ptr
, ram_addr_t
*ram_addr
);
79 ram_addr_t
qemu_ram_addr_from_host_nofail(void *ptr
);
81 int cpu_register_io_memory(CPUReadMemoryFunc
* const *mem_read
,
82 CPUWriteMemoryFunc
* const *mem_write
,
83 void *opaque
, enum device_endian endian
);
84 void cpu_unregister_io_memory(int table_address
);
86 void cpu_physical_memory_rw(target_phys_addr_t addr
, uint8_t *buf
,
87 int len
, int is_write
);
88 static inline void cpu_physical_memory_read(target_phys_addr_t addr
,
91 cpu_physical_memory_rw(addr
, buf
, len
, 0);
93 static inline void cpu_physical_memory_write(target_phys_addr_t addr
,
94 const void *buf
, int len
)
96 cpu_physical_memory_rw(addr
, (void *)buf
, len
, 1);
98 void *cpu_physical_memory_map(target_phys_addr_t addr
,
99 target_phys_addr_t
*plen
,
101 void cpu_physical_memory_unmap(void *buffer
, target_phys_addr_t len
,
102 int is_write
, target_phys_addr_t access_len
);
103 void *cpu_register_map_client(void *opaque
, void (*callback
)(void *opaque
));
104 void cpu_unregister_map_client(void *cookie
);
106 struct CPUPhysMemoryClient
;
107 typedef struct CPUPhysMemoryClient CPUPhysMemoryClient
;
108 struct CPUPhysMemoryClient
{
109 void (*set_memory
)(struct CPUPhysMemoryClient
*client
,
110 target_phys_addr_t start_addr
,
112 ram_addr_t phys_offset
,
114 int (*sync_dirty_bitmap
)(struct CPUPhysMemoryClient
*client
,
115 target_phys_addr_t start_addr
,
116 target_phys_addr_t end_addr
);
117 int (*migration_log
)(struct CPUPhysMemoryClient
*client
,
119 int (*log_start
)(struct CPUPhysMemoryClient
*client
,
120 target_phys_addr_t phys_addr
, ram_addr_t size
);
121 int (*log_stop
)(struct CPUPhysMemoryClient
*client
,
122 target_phys_addr_t phys_addr
, ram_addr_t size
);
123 QLIST_ENTRY(CPUPhysMemoryClient
) list
;
126 void cpu_register_phys_memory_client(CPUPhysMemoryClient
*);
127 void cpu_unregister_phys_memory_client(CPUPhysMemoryClient
*);
129 /* Coalesced MMIO regions are areas where write operations can be reordered.
130 * This usually implies that write operations are side-effect free. This allows
131 * batching which can make a major impact on performance when using
134 void qemu_register_coalesced_mmio(target_phys_addr_t addr
, ram_addr_t size
);
136 void qemu_unregister_coalesced_mmio(target_phys_addr_t addr
, ram_addr_t size
);
138 void qemu_flush_coalesced_mmio_buffer(void);
140 uint32_t ldub_phys(target_phys_addr_t addr
);
141 uint32_t lduw_le_phys(target_phys_addr_t addr
);
142 uint32_t lduw_be_phys(target_phys_addr_t addr
);
143 uint32_t ldl_le_phys(target_phys_addr_t addr
);
144 uint32_t ldl_be_phys(target_phys_addr_t addr
);
145 uint64_t ldq_le_phys(target_phys_addr_t addr
);
146 uint64_t ldq_be_phys(target_phys_addr_t addr
);
147 void stb_phys(target_phys_addr_t addr
, uint32_t val
);
148 void stw_le_phys(target_phys_addr_t addr
, uint32_t val
);
149 void stw_be_phys(target_phys_addr_t addr
, uint32_t val
);
150 void stl_le_phys(target_phys_addr_t addr
, uint32_t val
);
151 void stl_be_phys(target_phys_addr_t addr
, uint32_t val
);
152 void stq_le_phys(target_phys_addr_t addr
, uint64_t val
);
153 void stq_be_phys(target_phys_addr_t addr
, uint64_t val
);
156 uint32_t lduw_phys(target_phys_addr_t addr
);
157 uint32_t ldl_phys(target_phys_addr_t addr
);
158 uint64_t ldq_phys(target_phys_addr_t addr
);
159 void stl_phys_notdirty(target_phys_addr_t addr
, uint32_t val
);
160 void stq_phys_notdirty(target_phys_addr_t addr
, uint64_t val
);
161 void stw_phys(target_phys_addr_t addr
, uint32_t val
);
162 void stl_phys(target_phys_addr_t addr
, uint32_t val
);
163 void stq_phys(target_phys_addr_t addr
, uint64_t val
);
166 void cpu_physical_memory_write_rom(target_phys_addr_t addr
,
167 const uint8_t *buf
, int len
);
169 #define IO_MEM_SHIFT 3
171 #define IO_MEM_RAM (0 << IO_MEM_SHIFT) /* hardcoded offset */
172 #define IO_MEM_ROM (1 << IO_MEM_SHIFT) /* hardcoded offset */
173 #define IO_MEM_UNASSIGNED (2 << IO_MEM_SHIFT)
174 #define IO_MEM_NOTDIRTY (3 << IO_MEM_SHIFT)
176 /* Acts like a ROM when read and like a device when written. */
177 #define IO_MEM_ROMD (1)
178 #define IO_MEM_SUBPAGE (2)
182 #endif /* !CPU_COMMON_H */