2 * linux/arch/m68k/kernel/sys_m68k.c
4 * This file contains various random system calls that
5 * have a non-standard calling sequence on the Linux/m68k
9 #include <linux/capability.h>
10 #include <linux/errno.h>
11 #include <linux/sched.h>
14 #include <linux/smp.h>
15 #include <linux/smp_lock.h>
16 #include <linux/sem.h>
17 #include <linux/msg.h>
18 #include <linux/shm.h>
19 #include <linux/stat.h>
20 #include <linux/syscalls.h>
21 #include <linux/mman.h>
22 #include <linux/file.h>
23 #include <linux/ipc.h>
25 #include <asm/setup.h>
26 #include <asm/uaccess.h>
27 #include <asm/cachectl.h>
28 #include <asm/traps.h>
30 #include <asm/unistd.h>
31 #include <linux/elf.h>
34 asmlinkage
int do_page_fault(struct pt_regs
*regs
, unsigned long address
,
35 unsigned long error_code
);
37 asmlinkage
long sys_mmap2(unsigned long addr
, unsigned long len
,
38 unsigned long prot
, unsigned long flags
,
39 unsigned long fd
, unsigned long pgoff
)
42 * This is wrong for sun3 - there PAGE_SIZE is 8Kb,
43 * so we need to shift the argument down by 1; m68k mmap64(3)
44 * (in libc) expects the last argument of mmap2 in 4Kb units.
46 return sys_mmap_pgoff(addr
, len
, prot
, flags
, fd
, pgoff
);
49 /* Convert virtual (user) address VADDR to physical address PADDR */
50 #define virt_to_phys_040(vaddr) \
52 unsigned long _mmusr, _paddr; \
54 __asm__ __volatile__ (".chip 68040\n\t" \
56 "movec %%mmusr,%0\n\t" \
60 _paddr = (_mmusr & MMU_R_040) ? (_mmusr & PAGE_MASK) : 0; \
65 cache_flush_040 (unsigned long addr
, int scope
, int cache
, unsigned long len
)
67 unsigned long paddr
, i
;
74 case FLUSH_CACHE_DATA
:
75 /* This nop is needed for some broken versions of the 68040. */
76 __asm__
__volatile__ ("nop\n\t"
81 case FLUSH_CACHE_INSN
:
82 __asm__
__volatile__ ("nop\n\t"
88 case FLUSH_CACHE_BOTH
:
89 __asm__
__volatile__ ("nop\n\t"
97 case FLUSH_SCOPE_LINE
:
98 /* Find the physical address of the first mapped page in the
100 if ((paddr
= virt_to_phys_040(addr
))) {
101 paddr
+= addr
& ~(PAGE_MASK
| 15);
102 len
= (len
+ (addr
& 15) + 15) >> 4;
104 unsigned long tmp
= PAGE_SIZE
- (addr
& ~PAGE_MASK
);
113 if ((paddr
= virt_to_phys_040(addr
)))
120 len
= (len
+ 15) >> 4;
122 i
= (PAGE_SIZE
- (paddr
& ~PAGE_MASK
)) >> 4;
127 case FLUSH_CACHE_DATA
:
128 __asm__
__volatile__ ("nop\n\t"
130 "cpushl %%dc,(%0)\n\t"
134 case FLUSH_CACHE_INSN
:
135 __asm__
__volatile__ ("nop\n\t"
137 "cpushl %%ic,(%0)\n\t"
142 case FLUSH_CACHE_BOTH
:
143 __asm__
__volatile__ ("nop\n\t"
145 "cpushl %%bc,(%0)\n\t"
153 * No need to page align here since it is done by
154 * virt_to_phys_040().
158 /* Recompute physical address when crossing a page
162 if ((paddr
= virt_to_phys_040(addr
)))
176 case FLUSH_SCOPE_PAGE
:
177 len
+= (addr
& ~PAGE_MASK
) + (PAGE_SIZE
- 1);
178 for (len
>>= PAGE_SHIFT
; len
--; addr
+= PAGE_SIZE
)
180 if (!(paddr
= virt_to_phys_040(addr
)))
184 case FLUSH_CACHE_DATA
:
185 __asm__
__volatile__ ("nop\n\t"
187 "cpushp %%dc,(%0)\n\t"
191 case FLUSH_CACHE_INSN
:
192 __asm__
__volatile__ ("nop\n\t"
194 "cpushp %%ic,(%0)\n\t"
199 case FLUSH_CACHE_BOTH
:
200 __asm__
__volatile__ ("nop\n\t"
202 "cpushp %%bc,(%0)\n\t"
213 #define virt_to_phys_060(vaddr) \
215 unsigned long paddr; \
216 __asm__ __volatile__ (".chip 68060\n\t" \
225 cache_flush_060 (unsigned long addr
, int scope
, int cache
, unsigned long len
)
227 unsigned long paddr
, i
;
231 * cpush %dc : flush DC, remains valid (with our %cacr setup)
232 * cpush %ic : invalidate IC
233 * cpush %bc : flush DC + invalidate IC
237 case FLUSH_SCOPE_ALL
:
240 case FLUSH_CACHE_DATA
:
241 __asm__
__volatile__ (".chip 68060\n\t"
245 case FLUSH_CACHE_INSN
:
246 __asm__
__volatile__ (".chip 68060\n\t"
251 case FLUSH_CACHE_BOTH
:
252 __asm__
__volatile__ (".chip 68060\n\t"
259 case FLUSH_SCOPE_LINE
:
260 /* Find the physical address of the first mapped page in the
264 if (!(paddr
= virt_to_phys_060(addr
))) {
265 unsigned long tmp
= PAGE_SIZE
- (addr
& ~PAGE_MASK
);
274 if ((paddr
= virt_to_phys_060(addr
)))
282 len
= (len
+ 15) >> 4;
283 i
= (PAGE_SIZE
- (paddr
& ~PAGE_MASK
)) >> 4;
288 case FLUSH_CACHE_DATA
:
289 __asm__
__volatile__ (".chip 68060\n\t"
290 "cpushl %%dc,(%0)\n\t"
294 case FLUSH_CACHE_INSN
:
295 __asm__
__volatile__ (".chip 68060\n\t"
296 "cpushl %%ic,(%0)\n\t"
301 case FLUSH_CACHE_BOTH
:
302 __asm__
__volatile__ (".chip 68060\n\t"
303 "cpushl %%bc,(%0)\n\t"
312 * We just want to jump to the first cache line
319 /* Recompute physical address when crossing a page
323 if ((paddr
= virt_to_phys_060(addr
)))
337 case FLUSH_SCOPE_PAGE
:
338 len
+= (addr
& ~PAGE_MASK
) + (PAGE_SIZE
- 1);
339 addr
&= PAGE_MASK
; /* Workaround for bug in some
340 revisions of the 68060 */
341 for (len
>>= PAGE_SHIFT
; len
--; addr
+= PAGE_SIZE
)
343 if (!(paddr
= virt_to_phys_060(addr
)))
347 case FLUSH_CACHE_DATA
:
348 __asm__
__volatile__ (".chip 68060\n\t"
349 "cpushp %%dc,(%0)\n\t"
353 case FLUSH_CACHE_INSN
:
354 __asm__
__volatile__ (".chip 68060\n\t"
355 "cpushp %%ic,(%0)\n\t"
360 case FLUSH_CACHE_BOTH
:
361 __asm__
__volatile__ (".chip 68060\n\t"
362 "cpushp %%bc,(%0)\n\t"
373 /* sys_cacheflush -- flush (part of) the processor cache. */
375 sys_cacheflush (unsigned long addr
, int scope
, int cache
, unsigned long len
)
377 struct vm_area_struct
*vma
;
381 if (scope
< FLUSH_SCOPE_LINE
|| scope
> FLUSH_SCOPE_ALL
||
382 cache
& ~FLUSH_CACHE_BOTH
)
385 if (scope
== FLUSH_SCOPE_ALL
) {
386 /* Only the superuser may explicitly flush the whole cache. */
388 if (!capable(CAP_SYS_ADMIN
))
392 * Verify that the specified address region actually belongs
395 vma
= find_vma (current
->mm
, addr
);
397 /* Check for overflow. */
398 if (addr
+ len
< addr
)
400 if (vma
== NULL
|| addr
< vma
->vm_start
|| addr
+ len
> vma
->vm_end
)
404 if (CPU_IS_020_OR_030
) {
405 if (scope
== FLUSH_SCOPE_LINE
&& len
< 256) {
407 __asm__ ("movec %%cacr, %0" : "=r" (cacr
));
408 if (cache
& FLUSH_CACHE_INSN
)
410 if (cache
& FLUSH_CACHE_DATA
)
414 __asm__
__volatile__ ("movec %1, %%caar\n\t"
417 : "r" (cacr
), "r" (addr
));
421 /* Flush the whole cache, even if page granularity requested. */
423 __asm__ ("movec %%cacr, %0" : "=r" (cacr
));
424 if (cache
& FLUSH_CACHE_INSN
)
426 if (cache
& FLUSH_CACHE_DATA
)
428 __asm__
__volatile__ ("movec %0, %%cacr" : : "r" (cacr
));
434 * 040 or 060: don't blindly trust 'scope', someone could
435 * try to flush a few megs of memory.
438 if (len
>=3*PAGE_SIZE
&& scope
<FLUSH_SCOPE_PAGE
)
439 scope
=FLUSH_SCOPE_PAGE
;
440 if (len
>=10*PAGE_SIZE
&& scope
<FLUSH_SCOPE_ALL
)
441 scope
=FLUSH_SCOPE_ALL
;
443 ret
= cache_flush_040 (addr
, scope
, cache
, len
);
444 } else if (CPU_IS_060
) {
445 ret
= cache_flush_060 (addr
, scope
, cache
, len
);
453 asmlinkage
int sys_getpagesize(void)
459 * Do a system call from kernel instead of calling sys_execve so we
460 * end up with proper pt_regs.
462 int kernel_execve(const char *filename
, char *const argv
[], char *const envp
[])
464 register long __res
asm ("%d0") = __NR_execve
;
465 register long __a
asm ("%d1") = (long)(filename
);
466 register long __b
asm ("%d2") = (long)(argv
);
467 register long __c
asm ("%d3") = (long)(envp
);
468 asm volatile ("trap #0" : "+d" (__res
)
469 : "d" (__a
), "d" (__b
), "d" (__c
));
473 asmlinkage
unsigned long sys_get_thread_area(void)
475 return current_thread_info()->tp_value
;
478 asmlinkage
int sys_set_thread_area(unsigned long tp
)
480 current_thread_info()->tp_value
= tp
;
484 /* This syscall gets its arguments in A0 (mem), D2 (oldval) and
487 sys_atomic_cmpxchg_32(unsigned long newval
, int oldval
, int d3
, int d4
, int d5
,
488 unsigned long __user
* mem
)
490 /* This was borrowed from ARM's implementation. */
492 struct mm_struct
*mm
= current
->mm
;
497 unsigned long mem_value
;
499 down_read(&mm
->mmap_sem
);
500 pgd
= pgd_offset(mm
, (unsigned long)mem
);
501 if (!pgd_present(*pgd
))
503 pmd
= pmd_offset(pgd
, (unsigned long)mem
);
504 if (!pmd_present(*pmd
))
506 pte
= pte_offset_map_lock(mm
, pmd
, (unsigned long)mem
, &ptl
);
507 if (!pte_present(*pte
) || !pte_dirty(*pte
)
508 || !pte_write(*pte
)) {
509 pte_unmap_unlock(pte
, ptl
);
514 if (mem_value
== oldval
)
517 pte_unmap_unlock(pte
, ptl
);
518 up_read(&mm
->mmap_sem
);
522 up_read(&mm
->mmap_sem
);
523 /* This is not necessarily a bad access, we can get here if
524 a memory we're trying to write to should be copied-on-write.
525 Make the kernel do the necessary page stuff, then re-iterate.
526 Simulate a write access fault to do that. */
528 /* The first argument of the function corresponds to
529 D1, which is the first field of struct pt_regs. */
530 struct pt_regs
*fp
= (struct pt_regs
*)&newval
;
532 /* '3' is an RMW flag. */
533 if (do_page_fault(fp
, (unsigned long)mem
, 3))
534 /* If the do_page_fault() failed, we don't
535 have anything meaningful to return.
536 There should be a SIGSEGV pending for
543 asmlinkage
int sys_atomic_barrier(void)
545 /* no code needed for uniprocs */