[PATCH 22/57][Arm][GAS] Add support for MVE instructions: vmlaldav, vmlalv, vmlsldav...
[binutils-gdb.git] / sim / aarch64 / memory.c
blob213842b60a522958ac4850b395c67378051880de
1 /* memory.c -- Memory accessor functions for the AArch64 simulator
3 Copyright (C) 2015-2019 Free Software Foundation, Inc.
5 Contributed by Red Hat.
7 This file is part of GDB.
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
22 #include "config.h"
23 #include <sys/types.h>
24 #include <stdio.h>
25 #include <stdlib.h>
26 #include <string.h>
28 #include "libiberty.h"
30 #include "memory.h"
31 #include "simulator.h"
33 #include "sim-core.h"
35 static inline void
36 mem_error (sim_cpu *cpu, const char *message, uint64_t addr)
38 TRACE_MEMORY (cpu, "ERROR: %s: %" PRIx64, message, addr);
41 /* FIXME: AArch64 requires aligned memory access if SCTRLR_ELx.A is set,
42 but we are not implementing that here. */
43 #define FETCH_FUNC64(RETURN_TYPE, ACCESS_TYPE, NAME, N) \
44 RETURN_TYPE \
45 aarch64_get_mem_##NAME (sim_cpu *cpu, uint64_t address) \
46 { \
47 RETURN_TYPE val = (RETURN_TYPE) (ACCESS_TYPE) \
48 sim_core_read_unaligned_##N (cpu, 0, read_map, address); \
49 TRACE_MEMORY (cpu, "read of %" PRIx64 " (%d bytes) from %" PRIx64, \
50 val, N, address); \
52 return val; \
55 FETCH_FUNC64 (uint64_t, uint64_t, u64, 8)
56 FETCH_FUNC64 (int64_t, int64_t, s64, 8)
58 #define FETCH_FUNC32(RETURN_TYPE, ACCESS_TYPE, NAME, N) \
59 RETURN_TYPE \
60 aarch64_get_mem_##NAME (sim_cpu *cpu, uint64_t address) \
61 { \
62 RETURN_TYPE val = (RETURN_TYPE) (ACCESS_TYPE) \
63 sim_core_read_unaligned_##N (cpu, 0, read_map, address); \
64 TRACE_MEMORY (cpu, "read of %8x (%d bytes) from %" PRIx64, \
65 val, N, address); \
67 return val; \
70 FETCH_FUNC32 (uint32_t, uint32_t, u32, 4)
71 FETCH_FUNC32 (int32_t, int32_t, s32, 4)
72 FETCH_FUNC32 (uint32_t, uint16_t, u16, 2)
73 FETCH_FUNC32 (int32_t, int16_t, s16, 2)
74 FETCH_FUNC32 (uint32_t, uint8_t, u8, 1)
75 FETCH_FUNC32 (int32_t, int8_t, s8, 1)
77 void
78 aarch64_get_mem_long_double (sim_cpu *cpu, uint64_t address, FRegister *a)
80 a->v[0] = sim_core_read_unaligned_8 (cpu, 0, read_map, address);
81 a->v[1] = sim_core_read_unaligned_8 (cpu, 0, read_map, address + 8);
84 /* FIXME: Aarch64 requires aligned memory access if SCTRLR_ELx.A is set,
85 but we are not implementing that here. */
86 #define STORE_FUNC(TYPE, NAME, N) \
87 void \
88 aarch64_set_mem_##NAME (sim_cpu *cpu, uint64_t address, TYPE value) \
89 { \
90 TRACE_MEMORY (cpu, \
91 "write of %" PRIx64 " (%d bytes) to %" PRIx64, \
92 (uint64_t) value, N, address); \
94 sim_core_write_unaligned_##N (cpu, 0, write_map, address, value); \
97 STORE_FUNC (uint64_t, u64, 8)
98 STORE_FUNC (int64_t, s64, 8)
99 STORE_FUNC (uint32_t, u32, 4)
100 STORE_FUNC (int32_t, s32, 4)
101 STORE_FUNC (uint16_t, u16, 2)
102 STORE_FUNC (int16_t, s16, 2)
103 STORE_FUNC (uint8_t, u8, 1)
104 STORE_FUNC (int8_t, s8, 1)
106 void
107 aarch64_set_mem_long_double (sim_cpu *cpu, uint64_t address, FRegister a)
109 TRACE_MEMORY (cpu,
110 "write of long double %" PRIx64 " %" PRIx64 " to %" PRIx64,
111 a.v[0], a.v[1], address);
113 sim_core_write_unaligned_8 (cpu, 0, write_map, address, a.v[0]);
114 sim_core_write_unaligned_8 (cpu, 0, write_map, address + 8, a.v[1]);
117 void
118 aarch64_get_mem_blk (sim_cpu * cpu,
119 uint64_t address,
120 char * buffer,
121 unsigned length)
123 unsigned len;
125 len = sim_core_read_buffer (CPU_STATE (cpu), cpu, read_map,
126 buffer, address, length);
127 if (len == length)
128 return;
130 memset (buffer, 0, length);
131 if (cpu)
132 mem_error (cpu, "read of non-existant mem block at", address);
134 sim_engine_halt (CPU_STATE (cpu), cpu, NULL, aarch64_get_PC (cpu),
135 sim_stopped, SIM_SIGBUS);
138 const char *
139 aarch64_get_mem_ptr (sim_cpu *cpu, uint64_t address)
141 char *addr = sim_core_trans_addr (CPU_STATE (cpu), cpu, read_map, address);
143 if (addr == NULL)
145 mem_error (cpu, "request for non-existant mem addr of", address);
146 sim_engine_halt (CPU_STATE (cpu), cpu, NULL, aarch64_get_PC (cpu),
147 sim_stopped, SIM_SIGBUS);
150 return addr;
153 /* We implement a combined stack and heap. That way the sbrk()
154 function in libgloss/aarch64/syscalls.c has a chance to detect
155 an out-of-memory condition by noticing a stack/heap collision.
157 The heap starts at the end of loaded memory and carries on up
158 to an arbitary 2Gb limit. */
160 uint64_t
161 aarch64_get_heap_start (sim_cpu *cpu)
163 uint64_t heap = trace_sym_value (CPU_STATE (cpu), "end");
165 if (heap == 0)
166 heap = trace_sym_value (CPU_STATE (cpu), "_end");
167 if (heap == 0)
169 heap = STACK_TOP - 0x100000;
170 sim_io_eprintf (CPU_STATE (cpu),
171 "Unable to find 'end' symbol - using addr based "
172 "upon stack instead %" PRIx64 "\n",
173 heap);
175 return heap;
178 uint64_t
179 aarch64_get_stack_start (sim_cpu *cpu)
181 if (aarch64_get_heap_start (cpu) >= STACK_TOP)
182 mem_error (cpu, "executable is too big", aarch64_get_heap_start (cpu));
183 return STACK_TOP;