2 * arch/xtensa/kernel/syscall.c
4 * This file is subject to the terms and conditions of the GNU General Public
5 * License. See the file "COPYING" in the main directory of this archive
8 * Copyright (C) 2001 - 2005 Tensilica Inc.
9 * Copyright (C) 2000 Silicon Graphics, Inc.
10 * Copyright (C) 1995 - 2000 by Ralf Baechle
12 * Joe Taylor <joe@tensilica.com, joetylr@yahoo.com>
13 * Marc Gauthier <marc@tensilica.com, marc@alumni.uwaterloo.ca>
14 * Chris Zankel <chris@zankel.net>
18 #include <asm/uaccess.h>
19 #include <asm/syscall.h>
20 #include <asm/unistd.h>
21 #include <linux/linkage.h>
22 #include <linux/stringify.h>
23 #include <linux/errno.h>
24 #include <linux/syscalls.h>
25 #include <linux/file.h>
27 #include <linux/mman.h>
28 #include <linux/shm.h>
30 typedef void (*syscall_t
)(void);
32 syscall_t sys_call_table
[__NR_syscall_count
] /* FIXME __cacheline_aligned */= {
33 [0 ... __NR_syscall_count
- 1] = (syscall_t
)&sys_ni_syscall
,
35 #define __SYSCALL(nr,symbol,nargs) [ nr ] = (syscall_t)symbol,
36 #include <uapi/asm/unistd.h>
39 #define COLOUR_ALIGN(addr, pgoff) \
40 ((((addr) + SHMLBA - 1) & ~(SHMLBA - 1)) + \
41 (((pgoff) << PAGE_SHIFT) & (SHMLBA - 1)))
43 asmlinkage
long xtensa_shmat(int shmid
, char __user
*shmaddr
, int shmflg
)
48 err
= do_shmat(shmid
, shmaddr
, shmflg
, &ret
, SHMLBA
);
54 asmlinkage
long xtensa_fadvise64_64(int fd
, int advice
,
55 unsigned long long offset
, unsigned long long len
)
57 return sys_fadvise64_64(fd
, offset
, len
, advice
);
61 unsigned long arch_get_unmapped_area(struct file
*filp
, unsigned long addr
,
62 unsigned long len
, unsigned long pgoff
, unsigned long flags
)
64 struct vm_area_struct
*vmm
;
66 if (flags
& MAP_FIXED
) {
67 /* We do not accept a shared mapping if it would violate
68 * cache aliasing constraints.
70 if ((flags
& MAP_SHARED
) &&
71 ((addr
- (pgoff
<< PAGE_SHIFT
)) & (SHMLBA
- 1)))
79 addr
= TASK_UNMAPPED_BASE
;
81 if (flags
& MAP_SHARED
)
82 addr
= COLOUR_ALIGN(addr
, pgoff
);
84 addr
= PAGE_ALIGN(addr
);
86 for (vmm
= find_vma(current
->mm
, addr
); ; vmm
= vmm
->vm_next
) {
87 /* At this point: (!vmm || addr < vmm->vm_end). */
88 if (TASK_SIZE
- len
< addr
)
90 if (!vmm
|| addr
+ len
<= vmm
->vm_start
)
93 if (flags
& MAP_SHARED
)
94 addr
= COLOUR_ALIGN(addr
, pgoff
);