2 * linux/arch/m68k/kernel/sys_m68k.c
4 * This file contains various random system calls that
5 * have a non-standard calling sequence on the Linux/m68k
9 #include <linux/capability.h>
10 #include <linux/errno.h>
11 #include <linux/sched.h>
14 #include <linux/smp.h>
15 #include <linux/sem.h>
16 #include <linux/msg.h>
17 #include <linux/shm.h>
18 #include <linux/stat.h>
19 #include <linux/syscalls.h>
20 #include <linux/mman.h>
21 #include <linux/file.h>
22 #include <linux/ipc.h>
24 #include <asm/setup.h>
25 #include <asm/uaccess.h>
26 #include <asm/cachectl.h>
27 #include <asm/traps.h>
29 #include <asm/unistd.h>
30 #include <linux/elf.h>
33 asmlinkage
int do_page_fault(struct pt_regs
*regs
, unsigned long address
,
34 unsigned long error_code
);
36 asmlinkage
long sys_mmap2(unsigned long addr
, unsigned long len
,
37 unsigned long prot
, unsigned long flags
,
38 unsigned long fd
, unsigned long pgoff
)
41 * This is wrong for sun3 - there PAGE_SIZE is 8Kb,
42 * so we need to shift the argument down by 1; m68k mmap64(3)
43 * (in libc) expects the last argument of mmap2 in 4Kb units.
45 return sys_mmap_pgoff(addr
, len
, prot
, flags
, fd
, pgoff
);
48 /* Convert virtual (user) address VADDR to physical address PADDR */
49 #define virt_to_phys_040(vaddr) \
51 unsigned long _mmusr, _paddr; \
53 __asm__ __volatile__ (".chip 68040\n\t" \
55 "movec %%mmusr,%0\n\t" \
59 _paddr = (_mmusr & MMU_R_040) ? (_mmusr & PAGE_MASK) : 0; \
64 cache_flush_040 (unsigned long addr
, int scope
, int cache
, unsigned long len
)
66 unsigned long paddr
, i
;
73 case FLUSH_CACHE_DATA
:
74 /* This nop is needed for some broken versions of the 68040. */
75 __asm__
__volatile__ ("nop\n\t"
80 case FLUSH_CACHE_INSN
:
81 __asm__
__volatile__ ("nop\n\t"
87 case FLUSH_CACHE_BOTH
:
88 __asm__
__volatile__ ("nop\n\t"
96 case FLUSH_SCOPE_LINE
:
97 /* Find the physical address of the first mapped page in the
99 if ((paddr
= virt_to_phys_040(addr
))) {
100 paddr
+= addr
& ~(PAGE_MASK
| 15);
101 len
= (len
+ (addr
& 15) + 15) >> 4;
103 unsigned long tmp
= PAGE_SIZE
- (addr
& ~PAGE_MASK
);
112 if ((paddr
= virt_to_phys_040(addr
)))
119 len
= (len
+ 15) >> 4;
121 i
= (PAGE_SIZE
- (paddr
& ~PAGE_MASK
)) >> 4;
126 case FLUSH_CACHE_DATA
:
127 __asm__
__volatile__ ("nop\n\t"
129 "cpushl %%dc,(%0)\n\t"
133 case FLUSH_CACHE_INSN
:
134 __asm__
__volatile__ ("nop\n\t"
136 "cpushl %%ic,(%0)\n\t"
141 case FLUSH_CACHE_BOTH
:
142 __asm__
__volatile__ ("nop\n\t"
144 "cpushl %%bc,(%0)\n\t"
152 * No need to page align here since it is done by
153 * virt_to_phys_040().
157 /* Recompute physical address when crossing a page
161 if ((paddr
= virt_to_phys_040(addr
)))
175 case FLUSH_SCOPE_PAGE
:
176 len
+= (addr
& ~PAGE_MASK
) + (PAGE_SIZE
- 1);
177 for (len
>>= PAGE_SHIFT
; len
--; addr
+= PAGE_SIZE
)
179 if (!(paddr
= virt_to_phys_040(addr
)))
183 case FLUSH_CACHE_DATA
:
184 __asm__
__volatile__ ("nop\n\t"
186 "cpushp %%dc,(%0)\n\t"
190 case FLUSH_CACHE_INSN
:
191 __asm__
__volatile__ ("nop\n\t"
193 "cpushp %%ic,(%0)\n\t"
198 case FLUSH_CACHE_BOTH
:
199 __asm__
__volatile__ ("nop\n\t"
201 "cpushp %%bc,(%0)\n\t"
212 #define virt_to_phys_060(vaddr) \
214 unsigned long paddr; \
215 __asm__ __volatile__ (".chip 68060\n\t" \
224 cache_flush_060 (unsigned long addr
, int scope
, int cache
, unsigned long len
)
226 unsigned long paddr
, i
;
230 * cpush %dc : flush DC, remains valid (with our %cacr setup)
231 * cpush %ic : invalidate IC
232 * cpush %bc : flush DC + invalidate IC
236 case FLUSH_SCOPE_ALL
:
239 case FLUSH_CACHE_DATA
:
240 __asm__
__volatile__ (".chip 68060\n\t"
244 case FLUSH_CACHE_INSN
:
245 __asm__
__volatile__ (".chip 68060\n\t"
250 case FLUSH_CACHE_BOTH
:
251 __asm__
__volatile__ (".chip 68060\n\t"
258 case FLUSH_SCOPE_LINE
:
259 /* Find the physical address of the first mapped page in the
263 if (!(paddr
= virt_to_phys_060(addr
))) {
264 unsigned long tmp
= PAGE_SIZE
- (addr
& ~PAGE_MASK
);
273 if ((paddr
= virt_to_phys_060(addr
)))
281 len
= (len
+ 15) >> 4;
282 i
= (PAGE_SIZE
- (paddr
& ~PAGE_MASK
)) >> 4;
287 case FLUSH_CACHE_DATA
:
288 __asm__
__volatile__ (".chip 68060\n\t"
289 "cpushl %%dc,(%0)\n\t"
293 case FLUSH_CACHE_INSN
:
294 __asm__
__volatile__ (".chip 68060\n\t"
295 "cpushl %%ic,(%0)\n\t"
300 case FLUSH_CACHE_BOTH
:
301 __asm__
__volatile__ (".chip 68060\n\t"
302 "cpushl %%bc,(%0)\n\t"
311 * We just want to jump to the first cache line
318 /* Recompute physical address when crossing a page
322 if ((paddr
= virt_to_phys_060(addr
)))
336 case FLUSH_SCOPE_PAGE
:
337 len
+= (addr
& ~PAGE_MASK
) + (PAGE_SIZE
- 1);
338 addr
&= PAGE_MASK
; /* Workaround for bug in some
339 revisions of the 68060 */
340 for (len
>>= PAGE_SHIFT
; len
--; addr
+= PAGE_SIZE
)
342 if (!(paddr
= virt_to_phys_060(addr
)))
346 case FLUSH_CACHE_DATA
:
347 __asm__
__volatile__ (".chip 68060\n\t"
348 "cpushp %%dc,(%0)\n\t"
352 case FLUSH_CACHE_INSN
:
353 __asm__
__volatile__ (".chip 68060\n\t"
354 "cpushp %%ic,(%0)\n\t"
359 case FLUSH_CACHE_BOTH
:
360 __asm__
__volatile__ (".chip 68060\n\t"
361 "cpushp %%bc,(%0)\n\t"
372 /* sys_cacheflush -- flush (part of) the processor cache. */
374 sys_cacheflush (unsigned long addr
, int scope
, int cache
, unsigned long len
)
376 struct vm_area_struct
*vma
;
379 if (scope
< FLUSH_SCOPE_LINE
|| scope
> FLUSH_SCOPE_ALL
||
380 cache
& ~FLUSH_CACHE_BOTH
)
383 if (scope
== FLUSH_SCOPE_ALL
) {
384 /* Only the superuser may explicitly flush the whole cache. */
386 if (!capable(CAP_SYS_ADMIN
))
390 * Verify that the specified address region actually belongs
393 vma
= find_vma (current
->mm
, addr
);
395 /* Check for overflow. */
396 if (addr
+ len
< addr
)
398 if (vma
== NULL
|| addr
< vma
->vm_start
|| addr
+ len
> vma
->vm_end
)
402 if (CPU_IS_020_OR_030
) {
403 if (scope
== FLUSH_SCOPE_LINE
&& len
< 256) {
405 __asm__ ("movec %%cacr, %0" : "=r" (cacr
));
406 if (cache
& FLUSH_CACHE_INSN
)
408 if (cache
& FLUSH_CACHE_DATA
)
412 __asm__
__volatile__ ("movec %1, %%caar\n\t"
415 : "r" (cacr
), "r" (addr
));
419 /* Flush the whole cache, even if page granularity requested. */
421 __asm__ ("movec %%cacr, %0" : "=r" (cacr
));
422 if (cache
& FLUSH_CACHE_INSN
)
424 if (cache
& FLUSH_CACHE_DATA
)
426 __asm__
__volatile__ ("movec %0, %%cacr" : : "r" (cacr
));
432 * 040 or 060: don't blindly trust 'scope', someone could
433 * try to flush a few megs of memory.
436 if (len
>=3*PAGE_SIZE
&& scope
<FLUSH_SCOPE_PAGE
)
437 scope
=FLUSH_SCOPE_PAGE
;
438 if (len
>=10*PAGE_SIZE
&& scope
<FLUSH_SCOPE_ALL
)
439 scope
=FLUSH_SCOPE_ALL
;
441 ret
= cache_flush_040 (addr
, scope
, cache
, len
);
442 } else if (CPU_IS_060
) {
443 ret
= cache_flush_060 (addr
, scope
, cache
, len
);
450 asmlinkage
int sys_getpagesize(void)
456 * Do a system call from kernel instead of calling sys_execve so we
457 * end up with proper pt_regs.
459 int kernel_execve(const char *filename
,
460 const char *const argv
[],
461 const char *const envp
[])
463 register long __res
asm ("%d0") = __NR_execve
;
464 register long __a
asm ("%d1") = (long)(filename
);
465 register long __b
asm ("%d2") = (long)(argv
);
466 register long __c
asm ("%d3") = (long)(envp
);
467 asm volatile ("trap #0" : "+d" (__res
)
468 : "d" (__a
), "d" (__b
), "d" (__c
));
472 asmlinkage
unsigned long sys_get_thread_area(void)
474 return current_thread_info()->tp_value
;
477 asmlinkage
int sys_set_thread_area(unsigned long tp
)
479 current_thread_info()->tp_value
= tp
;
483 /* This syscall gets its arguments in A0 (mem), D2 (oldval) and
486 sys_atomic_cmpxchg_32(unsigned long newval
, int oldval
, int d3
, int d4
, int d5
,
487 unsigned long __user
* mem
)
489 /* This was borrowed from ARM's implementation. */
491 struct mm_struct
*mm
= current
->mm
;
496 unsigned long mem_value
;
498 down_read(&mm
->mmap_sem
);
499 pgd
= pgd_offset(mm
, (unsigned long)mem
);
500 if (!pgd_present(*pgd
))
502 pmd
= pmd_offset(pgd
, (unsigned long)mem
);
503 if (!pmd_present(*pmd
))
505 pte
= pte_offset_map_lock(mm
, pmd
, (unsigned long)mem
, &ptl
);
506 if (!pte_present(*pte
) || !pte_dirty(*pte
)
507 || !pte_write(*pte
)) {
508 pte_unmap_unlock(pte
, ptl
);
513 if (mem_value
== oldval
)
516 pte_unmap_unlock(pte
, ptl
);
517 up_read(&mm
->mmap_sem
);
521 up_read(&mm
->mmap_sem
);
522 /* This is not necessarily a bad access, we can get here if
523 a memory we're trying to write to should be copied-on-write.
524 Make the kernel do the necessary page stuff, then re-iterate.
525 Simulate a write access fault to do that. */
527 /* The first argument of the function corresponds to
528 D1, which is the first field of struct pt_regs. */
529 struct pt_regs
*fp
= (struct pt_regs
*)&newval
;
531 /* '3' is an RMW flag. */
532 if (do_page_fault(fp
, (unsigned long)mem
, 3))
533 /* If the do_page_fault() failed, we don't
534 have anything meaningful to return.
535 There should be a SIGSEGV pending for
542 asmlinkage
int sys_atomic_barrier(void)
544 /* no code needed for uniprocs */