12 #endif /* DEBUG_REMAP */
14 #include "exec/user/abitypes.h"
16 #include "exec/user/thunk.h"
17 #include "syscall_defs.h"
19 #include "target_signal.h"
20 #include "exec/gdbstub.h"
21 #include "qemu/queue.h"
23 #if defined(CONFIG_USE_NPTL)
24 #define THREAD __thread
29 /* This struct is used to hold certain information about the image.
30 * Basically, it replicates in user space what would be certain
31 * task_struct fields in the kernel
45 abi_ulong start_stack
;
46 abi_ulong stack_limit
;
48 abi_ulong code_offset
;
49 abi_ulong data_offset
;
56 #ifdef CONFIG_USE_FDPIC
57 abi_ulong loadmap_addr
;
60 abi_ulong pt_dynamic_addr
;
61 struct image_info
*other_info
;
66 /* Information about the current linux thread */
67 struct vm86_saved_state
{
68 uint32_t eax
; /* return code */
78 uint16_t cs
, ss
, ds
, es
, fs
, gs
;
84 #include "nwfpe/fpa11.h"
87 #define MAX_SIGQUEUE_SIZE 1024
90 struct sigqueue
*next
;
91 target_siginfo_t info
;
94 struct emulated_sigtable
{
95 int pending
; /* true if signal is pending */
96 struct sigqueue
*first
;
97 struct sigqueue info
; /* in order to always have memory for the
98 first signal, we put it here */
101 /* NOTE: we force a big alignment so that the stack stored after is
103 typedef struct TaskState
{
104 pid_t ts_tid
; /* tid (or pid) of this task */
110 #ifdef TARGET_UNICORE32
113 #if defined(TARGET_I386) && !defined(TARGET_X86_64)
114 abi_ulong target_v86
;
115 struct vm86_saved_state vm86_saved_regs
;
116 struct target_vm86plus_struct vm86plus
;
120 #ifdef CONFIG_USE_NPTL
121 abi_ulong child_tidptr
;
126 #if defined(TARGET_ARM) || defined(TARGET_M68K) || defined(TARGET_UNICORE32)
127 /* Extra fields for semihosted binaries. */
132 int used
; /* non zero if used */
133 struct image_info
*info
;
134 struct linux_binprm
*bprm
;
136 struct emulated_sigtable sigtab
[TARGET_NSIG
];
137 struct sigqueue sigqueue_table
[MAX_SIGQUEUE_SIZE
]; /* siginfo queue */
138 struct sigqueue
*first_free
; /* first free siginfo queue entry */
139 int signal_pending
; /* non zero if a signal may be pending */
140 } __attribute__((aligned(16))) TaskState
;
142 extern char *exec_path
;
143 void init_task_state(TaskState
*ts
);
144 void task_settid(TaskState
*);
145 void stop_all_tasks(void);
146 extern const char *qemu_uname_release
;
147 extern unsigned long mmap_min_addr
;
149 /* ??? See if we can avoid exposing so much of the loader internals. */
151 * MAX_ARG_PAGES defines the number of pages allocated for arguments
152 * and envelope for the new program. 32 should suffice, this gives
153 * a maximum env+arg of 128kB w/4KB pages!
155 #define MAX_ARG_PAGES 33
157 /* Read a good amount of data initially, to hopefully get all the
158 program headers loaded. */
159 #define BPRM_BUF_SIZE 1024
162 * This structure is used to hold the arguments that are
163 * used when loading binaries.
165 struct linux_binprm
{
166 char buf
[BPRM_BUF_SIZE
] __attribute__((aligned
));
167 void *page
[MAX_ARG_PAGES
];
174 char * filename
; /* Name of binary */
175 int (*core_dump
)(int, const CPUArchState
*); /* coredump routine */
178 void do_init_thread(struct target_pt_regs
*regs
, struct image_info
*infop
);
179 abi_ulong
loader_build_argptr(int envc
, int argc
, abi_ulong sp
,
180 abi_ulong stringp
, int push_ptr
);
181 int loader_exec(const char * filename
, char ** argv
, char ** envp
,
182 struct target_pt_regs
* regs
, struct image_info
*infop
,
183 struct linux_binprm
*);
185 int load_elf_binary(struct linux_binprm
* bprm
, struct target_pt_regs
* regs
,
186 struct image_info
* info
);
187 int load_flt_binary(struct linux_binprm
* bprm
, struct target_pt_regs
* regs
,
188 struct image_info
* info
);
190 abi_long
memcpy_to_target(abi_ulong dest
, const void *src
,
192 void target_set_brk(abi_ulong new_brk
);
193 abi_long
do_brk(abi_ulong new_brk
);
194 void syscall_init(void);
195 abi_long
do_syscall(void *cpu_env
, int num
, abi_long arg1
,
196 abi_long arg2
, abi_long arg3
, abi_long arg4
,
197 abi_long arg5
, abi_long arg6
, abi_long arg7
,
199 void gemu_log(const char *fmt
, ...) GCC_FMT_ATTR(1, 2);
200 extern THREAD CPUArchState
*thread_env
;
201 void cpu_loop(CPUArchState
*env
);
202 char *target_strerror(int err
);
203 int get_osversion(void);
204 void fork_start(void);
205 void fork_end(int child
);
207 /* Creates the initial guest address space in the host memory space using
208 * the given host start address hint and size. The guest_start parameter
209 * specifies the start address of the guest space. guest_base will be the
210 * difference between the host start address computed by this function and
211 * guest_start. If fixed is specified, then the mapped address space must
212 * start at host_start. The real start address of the mapped memory space is
213 * returned or -1 if there was an error.
215 unsigned long init_guest_space(unsigned long host_start
,
216 unsigned long host_size
,
217 unsigned long guest_start
,
220 #include "qemu/log.h"
223 int host_to_target_waitstatus(int status
);
226 void print_syscall(int num
,
227 abi_long arg1
, abi_long arg2
, abi_long arg3
,
228 abi_long arg4
, abi_long arg5
, abi_long arg6
);
229 void print_syscall_ret(int num
, abi_long arg1
);
230 extern int do_strace
;
233 void process_pending_signals(CPUArchState
*cpu_env
);
234 void signal_init(void);
235 int queue_signal(CPUArchState
*env
, int sig
, target_siginfo_t
*info
);
236 void host_to_target_siginfo(target_siginfo_t
*tinfo
, const siginfo_t
*info
);
237 void target_to_host_siginfo(siginfo_t
*info
, const target_siginfo_t
*tinfo
);
238 int target_to_host_signal(int sig
);
239 int host_to_target_signal(int sig
);
240 long do_sigreturn(CPUArchState
*env
);
241 long do_rt_sigreturn(CPUArchState
*env
);
242 abi_long
do_sigaltstack(abi_ulong uss_addr
, abi_ulong uoss_addr
, abi_ulong sp
);
246 void save_v86_state(CPUX86State
*env
);
247 void handle_vm86_trap(CPUX86State
*env
, int trapno
);
248 void handle_vm86_fault(CPUX86State
*env
);
249 int do_vm86(CPUX86State
*env
, long subfunction
, abi_ulong v86_addr
);
250 #elif defined(TARGET_SPARC64)
251 void sparc64_set_context(CPUSPARCState
*env
);
252 void sparc64_get_context(CPUSPARCState
*env
);
256 int target_mprotect(abi_ulong start
, abi_ulong len
, int prot
);
257 abi_long
target_mmap(abi_ulong start
, abi_ulong len
, int prot
,
258 int flags
, int fd
, abi_ulong offset
);
259 int target_munmap(abi_ulong start
, abi_ulong len
);
260 abi_long
target_mremap(abi_ulong old_addr
, abi_ulong old_size
,
261 abi_ulong new_size
, unsigned long flags
,
263 int target_msync(abi_ulong start
, abi_ulong len
, int flags
);
264 extern unsigned long last_brk
;
265 extern abi_ulong mmap_next_start
;
266 void mmap_lock(void);
267 void mmap_unlock(void);
268 abi_ulong
mmap_find_vma(abi_ulong
, abi_ulong
);
269 void cpu_list_lock(void);
270 void cpu_list_unlock(void);
271 #if defined(CONFIG_USE_NPTL)
272 void mmap_fork_start(void);
273 void mmap_fork_end(int child
);
277 extern unsigned long guest_stack_size
;
281 #define VERIFY_READ 0
282 #define VERIFY_WRITE 1 /* implies read access */
284 static inline int access_ok(int type
, abi_ulong addr
, abi_ulong size
)
286 return page_check_range((target_ulong
)addr
, size
,
287 (type
== VERIFY_READ
) ? PAGE_READ
: (PAGE_READ
| PAGE_WRITE
)) == 0;
290 /* NOTE __get_user and __put_user use host pointers and don't check access. */
291 /* These are usually used to access struct data members once the
292 * struct has been locked - usually with lock_user_struct().
294 #define __put_user(x, hptr)\
295 ({ __typeof(*hptr) pu_ = (x);\
296 switch(sizeof(*hptr)) {\
298 case 2: pu_ = tswap16(pu_); break; \
299 case 4: pu_ = tswap32(pu_); break; \
300 case 8: pu_ = tswap64(pu_); break; \
303 memcpy(hptr, &pu_, sizeof(pu_)); \
307 #define __get_user(x, hptr) \
308 ({ __typeof(*hptr) gu_; \
309 memcpy(&gu_, hptr, sizeof(gu_)); \
310 switch(sizeof(*hptr)) {\
312 case 2: gu_ = tswap16(gu_); break; \
313 case 4: gu_ = tswap32(gu_); break; \
314 case 8: gu_ = tswap64(gu_); break; \
321 /* put_user()/get_user() take a guest address and check access */
322 /* These are usually used to access an atomic data type, such as an int,
323 * that has been passed by address. These internally perform locking
324 * and unlocking on the data type.
326 #define put_user(x, gaddr, target_type) \
328 abi_ulong __gaddr = (gaddr); \
329 target_type *__hptr; \
331 if ((__hptr = lock_user(VERIFY_WRITE, __gaddr, sizeof(target_type), 0))) { \
332 __ret = __put_user((x), __hptr); \
333 unlock_user(__hptr, __gaddr, sizeof(target_type)); \
335 __ret = -TARGET_EFAULT; \
339 #define get_user(x, gaddr, target_type) \
341 abi_ulong __gaddr = (gaddr); \
342 target_type *__hptr; \
344 if ((__hptr = lock_user(VERIFY_READ, __gaddr, sizeof(target_type), 1))) { \
345 __ret = __get_user((x), __hptr); \
346 unlock_user(__hptr, __gaddr, 0); \
348 /* avoid warning */ \
350 __ret = -TARGET_EFAULT; \
355 #define put_user_ual(x, gaddr) put_user((x), (gaddr), abi_ulong)
356 #define put_user_sal(x, gaddr) put_user((x), (gaddr), abi_long)
357 #define put_user_u64(x, gaddr) put_user((x), (gaddr), uint64_t)
358 #define put_user_s64(x, gaddr) put_user((x), (gaddr), int64_t)
359 #define put_user_u32(x, gaddr) put_user((x), (gaddr), uint32_t)
360 #define put_user_s32(x, gaddr) put_user((x), (gaddr), int32_t)
361 #define put_user_u16(x, gaddr) put_user((x), (gaddr), uint16_t)
362 #define put_user_s16(x, gaddr) put_user((x), (gaddr), int16_t)
363 #define put_user_u8(x, gaddr) put_user((x), (gaddr), uint8_t)
364 #define put_user_s8(x, gaddr) put_user((x), (gaddr), int8_t)
366 #define get_user_ual(x, gaddr) get_user((x), (gaddr), abi_ulong)
367 #define get_user_sal(x, gaddr) get_user((x), (gaddr), abi_long)
368 #define get_user_u64(x, gaddr) get_user((x), (gaddr), uint64_t)
369 #define get_user_s64(x, gaddr) get_user((x), (gaddr), int64_t)
370 #define get_user_u32(x, gaddr) get_user((x), (gaddr), uint32_t)
371 #define get_user_s32(x, gaddr) get_user((x), (gaddr), int32_t)
372 #define get_user_u16(x, gaddr) get_user((x), (gaddr), uint16_t)
373 #define get_user_s16(x, gaddr) get_user((x), (gaddr), int16_t)
374 #define get_user_u8(x, gaddr) get_user((x), (gaddr), uint8_t)
375 #define get_user_s8(x, gaddr) get_user((x), (gaddr), int8_t)
377 /* copy_from_user() and copy_to_user() are usually used to copy data
378 * buffers between the target and host. These internally perform
379 * locking/unlocking of the memory.
381 abi_long
copy_from_user(void *hptr
, abi_ulong gaddr
, size_t len
);
382 abi_long
copy_to_user(abi_ulong gaddr
, void *hptr
, size_t len
);
384 /* Functions for accessing guest memory. The tget and tput functions
385 read/write single values, byteswapping as necessary. The lock_user
386 gets a pointer to a contiguous area of guest memory, but does not perform
387 and byteswapping. lock_user may return either a pointer to the guest
388 memory, or a temporary buffer. */
390 /* Lock an area of guest memory into the host. If copy is true then the
391 host area will have the same contents as the guest. */
392 static inline void *lock_user(int type
, abi_ulong guest_addr
, long len
, int copy
)
394 if (!access_ok(type
, guest_addr
, len
))
401 memcpy(addr
, g2h(guest_addr
), len
);
403 memset(addr
, 0, len
);
407 return g2h(guest_addr
);
411 /* Unlock an area of guest memory. The first LEN bytes must be
412 flushed back to guest memory. host_ptr = NULL is explicitly
413 allowed and does nothing. */
414 static inline void unlock_user(void *host_ptr
, abi_ulong guest_addr
,
421 if (host_ptr
== g2h(guest_addr
))
424 memcpy(g2h(guest_addr
), host_ptr
, len
);
429 /* Return the length of a string in target memory or -TARGET_EFAULT if
431 abi_long
target_strlen(abi_ulong gaddr
);
433 /* Like lock_user but for null terminated strings. */
434 static inline void *lock_user_string(abi_ulong guest_addr
)
437 len
= target_strlen(guest_addr
);
440 return lock_user(VERIFY_READ
, guest_addr
, (long)(len
+ 1), 1);
443 /* Helper macros for locking/ulocking a target struct. */
444 #define lock_user_struct(type, host_ptr, guest_addr, copy) \
445 (host_ptr = lock_user(type, guest_addr, sizeof(*host_ptr), copy))
446 #define unlock_user_struct(host_ptr, guest_addr, copy) \
447 unlock_user(host_ptr, guest_addr, (copy) ? sizeof(*host_ptr) : 0)
449 #if defined(CONFIG_USE_NPTL)