WIP FPC-III support
[linux/fpc-iii.git] / arch / m68k / kernel / sys_m68k.c
blob1c235d8f53f36dbb5669e15be107de1a7abe3920
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * linux/arch/m68k/kernel/sys_m68k.c
5 * This file contains various random system calls that
6 * have a non-standard calling sequence on the Linux/m68k
7 * platform.
8 */
10 #include <linux/capability.h>
11 #include <linux/errno.h>
12 #include <linux/sched.h>
13 #include <linux/mm.h>
14 #include <linux/fs.h>
15 #include <linux/smp.h>
16 #include <linux/sem.h>
17 #include <linux/msg.h>
18 #include <linux/shm.h>
19 #include <linux/stat.h>
20 #include <linux/syscalls.h>
21 #include <linux/mman.h>
22 #include <linux/file.h>
23 #include <linux/ipc.h>
25 #include <asm/setup.h>
26 #include <linux/uaccess.h>
27 #include <asm/cachectl.h>
28 #include <asm/traps.h>
29 #include <asm/page.h>
30 #include <asm/unistd.h>
31 #include <asm/cacheflush.h>
33 #ifdef CONFIG_MMU
35 #include <asm/tlb.h>
37 asmlinkage int do_page_fault(struct pt_regs *regs, unsigned long address,
38 unsigned long error_code);
40 asmlinkage long sys_mmap2(unsigned long addr, unsigned long len,
41 unsigned long prot, unsigned long flags,
42 unsigned long fd, unsigned long pgoff)
45 * This is wrong for sun3 - there PAGE_SIZE is 8Kb,
46 * so we need to shift the argument down by 1; m68k mmap64(3)
47 * (in libc) expects the last argument of mmap2 in 4Kb units.
49 return ksys_mmap_pgoff(addr, len, prot, flags, fd, pgoff);
52 /* Convert virtual (user) address VADDR to physical address PADDR */
53 #define virt_to_phys_040(vaddr) \
54 ({ \
55 unsigned long _mmusr, _paddr; \
57 __asm__ __volatile__ (".chip 68040\n\t" \
58 "ptestr (%1)\n\t" \
59 "movec %%mmusr,%0\n\t" \
60 ".chip 68k" \
61 : "=r" (_mmusr) \
62 : "a" (vaddr)); \
63 _paddr = (_mmusr & MMU_R_040) ? (_mmusr & PAGE_MASK) : 0; \
64 _paddr; \
67 static inline int
68 cache_flush_040 (unsigned long addr, int scope, int cache, unsigned long len)
70 unsigned long paddr, i;
72 switch (scope)
74 case FLUSH_SCOPE_ALL:
75 switch (cache)
77 case FLUSH_CACHE_DATA:
78 /* This nop is needed for some broken versions of the 68040. */
79 __asm__ __volatile__ ("nop\n\t"
80 ".chip 68040\n\t"
81 "cpusha %dc\n\t"
82 ".chip 68k");
83 break;
84 case FLUSH_CACHE_INSN:
85 __asm__ __volatile__ ("nop\n\t"
86 ".chip 68040\n\t"
87 "cpusha %ic\n\t"
88 ".chip 68k");
89 break;
90 default:
91 case FLUSH_CACHE_BOTH:
92 __asm__ __volatile__ ("nop\n\t"
93 ".chip 68040\n\t"
94 "cpusha %bc\n\t"
95 ".chip 68k");
96 break;
98 break;
100 case FLUSH_SCOPE_LINE:
101 /* Find the physical address of the first mapped page in the
102 address range. */
103 if ((paddr = virt_to_phys_040(addr))) {
104 paddr += addr & ~(PAGE_MASK | 15);
105 len = (len + (addr & 15) + 15) >> 4;
106 } else {
107 unsigned long tmp = PAGE_SIZE - (addr & ~PAGE_MASK);
109 if (len <= tmp)
110 return 0;
111 addr += tmp;
112 len -= tmp;
113 tmp = PAGE_SIZE;
114 for (;;)
116 if ((paddr = virt_to_phys_040(addr)))
117 break;
118 if (len <= tmp)
119 return 0;
120 addr += tmp;
121 len -= tmp;
123 len = (len + 15) >> 4;
125 i = (PAGE_SIZE - (paddr & ~PAGE_MASK)) >> 4;
126 while (len--)
128 switch (cache)
130 case FLUSH_CACHE_DATA:
131 __asm__ __volatile__ ("nop\n\t"
132 ".chip 68040\n\t"
133 "cpushl %%dc,(%0)\n\t"
134 ".chip 68k"
135 : : "a" (paddr));
136 break;
137 case FLUSH_CACHE_INSN:
138 __asm__ __volatile__ ("nop\n\t"
139 ".chip 68040\n\t"
140 "cpushl %%ic,(%0)\n\t"
141 ".chip 68k"
142 : : "a" (paddr));
143 break;
144 default:
145 case FLUSH_CACHE_BOTH:
146 __asm__ __volatile__ ("nop\n\t"
147 ".chip 68040\n\t"
148 "cpushl %%bc,(%0)\n\t"
149 ".chip 68k"
150 : : "a" (paddr));
151 break;
153 if (!--i && len)
156 * No need to page align here since it is done by
157 * virt_to_phys_040().
159 addr += PAGE_SIZE;
160 i = PAGE_SIZE / 16;
161 /* Recompute physical address when crossing a page
162 boundary. */
163 for (;;)
165 if ((paddr = virt_to_phys_040(addr)))
166 break;
167 if (len <= i)
168 return 0;
169 len -= i;
170 addr += PAGE_SIZE;
173 else
174 paddr += 16;
176 break;
178 default:
179 case FLUSH_SCOPE_PAGE:
180 len += (addr & ~PAGE_MASK) + (PAGE_SIZE - 1);
181 for (len >>= PAGE_SHIFT; len--; addr += PAGE_SIZE)
183 if (!(paddr = virt_to_phys_040(addr)))
184 continue;
185 switch (cache)
187 case FLUSH_CACHE_DATA:
188 __asm__ __volatile__ ("nop\n\t"
189 ".chip 68040\n\t"
190 "cpushp %%dc,(%0)\n\t"
191 ".chip 68k"
192 : : "a" (paddr));
193 break;
194 case FLUSH_CACHE_INSN:
195 __asm__ __volatile__ ("nop\n\t"
196 ".chip 68040\n\t"
197 "cpushp %%ic,(%0)\n\t"
198 ".chip 68k"
199 : : "a" (paddr));
200 break;
201 default:
202 case FLUSH_CACHE_BOTH:
203 __asm__ __volatile__ ("nop\n\t"
204 ".chip 68040\n\t"
205 "cpushp %%bc,(%0)\n\t"
206 ".chip 68k"
207 : : "a" (paddr));
208 break;
211 break;
213 return 0;
216 #define virt_to_phys_060(vaddr) \
217 ({ \
218 unsigned long paddr; \
219 __asm__ __volatile__ (".chip 68060\n\t" \
220 "plpar (%0)\n\t" \
221 ".chip 68k" \
222 : "=a" (paddr) \
223 : "0" (vaddr)); \
224 (paddr); /* XXX */ \
227 static inline int
228 cache_flush_060 (unsigned long addr, int scope, int cache, unsigned long len)
230 unsigned long paddr, i;
233 * 68060 manual says:
234 * cpush %dc : flush DC, remains valid (with our %cacr setup)
235 * cpush %ic : invalidate IC
236 * cpush %bc : flush DC + invalidate IC
238 switch (scope)
240 case FLUSH_SCOPE_ALL:
241 switch (cache)
243 case FLUSH_CACHE_DATA:
244 __asm__ __volatile__ (".chip 68060\n\t"
245 "cpusha %dc\n\t"
246 ".chip 68k");
247 break;
248 case FLUSH_CACHE_INSN:
249 __asm__ __volatile__ (".chip 68060\n\t"
250 "cpusha %ic\n\t"
251 ".chip 68k");
252 break;
253 default:
254 case FLUSH_CACHE_BOTH:
255 __asm__ __volatile__ (".chip 68060\n\t"
256 "cpusha %bc\n\t"
257 ".chip 68k");
258 break;
260 break;
262 case FLUSH_SCOPE_LINE:
263 /* Find the physical address of the first mapped page in the
264 address range. */
265 len += addr & 15;
266 addr &= -16;
267 if (!(paddr = virt_to_phys_060(addr))) {
268 unsigned long tmp = PAGE_SIZE - (addr & ~PAGE_MASK);
270 if (len <= tmp)
271 return 0;
272 addr += tmp;
273 len -= tmp;
274 tmp = PAGE_SIZE;
275 for (;;)
277 if ((paddr = virt_to_phys_060(addr)))
278 break;
279 if (len <= tmp)
280 return 0;
281 addr += tmp;
282 len -= tmp;
285 len = (len + 15) >> 4;
286 i = (PAGE_SIZE - (paddr & ~PAGE_MASK)) >> 4;
287 while (len--)
289 switch (cache)
291 case FLUSH_CACHE_DATA:
292 __asm__ __volatile__ (".chip 68060\n\t"
293 "cpushl %%dc,(%0)\n\t"
294 ".chip 68k"
295 : : "a" (paddr));
296 break;
297 case FLUSH_CACHE_INSN:
298 __asm__ __volatile__ (".chip 68060\n\t"
299 "cpushl %%ic,(%0)\n\t"
300 ".chip 68k"
301 : : "a" (paddr));
302 break;
303 default:
304 case FLUSH_CACHE_BOTH:
305 __asm__ __volatile__ (".chip 68060\n\t"
306 "cpushl %%bc,(%0)\n\t"
307 ".chip 68k"
308 : : "a" (paddr));
309 break;
311 if (!--i && len)
315 * We just want to jump to the first cache line
316 * in the next page.
318 addr += PAGE_SIZE;
319 addr &= PAGE_MASK;
321 i = PAGE_SIZE / 16;
322 /* Recompute physical address when crossing a page
323 boundary. */
324 for (;;)
326 if ((paddr = virt_to_phys_060(addr)))
327 break;
328 if (len <= i)
329 return 0;
330 len -= i;
331 addr += PAGE_SIZE;
334 else
335 paddr += 16;
337 break;
339 default:
340 case FLUSH_SCOPE_PAGE:
341 len += (addr & ~PAGE_MASK) + (PAGE_SIZE - 1);
342 addr &= PAGE_MASK; /* Workaround for bug in some
343 revisions of the 68060 */
344 for (len >>= PAGE_SHIFT; len--; addr += PAGE_SIZE)
346 if (!(paddr = virt_to_phys_060(addr)))
347 continue;
348 switch (cache)
350 case FLUSH_CACHE_DATA:
351 __asm__ __volatile__ (".chip 68060\n\t"
352 "cpushp %%dc,(%0)\n\t"
353 ".chip 68k"
354 : : "a" (paddr));
355 break;
356 case FLUSH_CACHE_INSN:
357 __asm__ __volatile__ (".chip 68060\n\t"
358 "cpushp %%ic,(%0)\n\t"
359 ".chip 68k"
360 : : "a" (paddr));
361 break;
362 default:
363 case FLUSH_CACHE_BOTH:
364 __asm__ __volatile__ (".chip 68060\n\t"
365 "cpushp %%bc,(%0)\n\t"
366 ".chip 68k"
367 : : "a" (paddr));
368 break;
371 break;
373 return 0;
376 /* sys_cacheflush -- flush (part of) the processor cache. */
377 asmlinkage int
378 sys_cacheflush (unsigned long addr, int scope, int cache, unsigned long len)
380 int ret = -EINVAL;
382 if (scope < FLUSH_SCOPE_LINE || scope > FLUSH_SCOPE_ALL ||
383 cache & ~FLUSH_CACHE_BOTH)
384 goto out;
386 if (scope == FLUSH_SCOPE_ALL) {
387 /* Only the superuser may explicitly flush the whole cache. */
388 ret = -EPERM;
389 if (!capable(CAP_SYS_ADMIN))
390 goto out;
391 } else {
392 struct vm_area_struct *vma;
394 /* Check for overflow. */
395 if (addr + len < addr)
396 goto out;
399 * Verify that the specified address region actually belongs
400 * to this process.
402 mmap_read_lock(current->mm);
403 vma = find_vma(current->mm, addr);
404 if (!vma || addr < vma->vm_start || addr + len > vma->vm_end)
405 goto out_unlock;
408 if (CPU_IS_020_OR_030) {
409 if (scope == FLUSH_SCOPE_LINE && len < 256) {
410 unsigned long cacr;
411 __asm__ ("movec %%cacr, %0" : "=r" (cacr));
412 if (cache & FLUSH_CACHE_INSN)
413 cacr |= 4;
414 if (cache & FLUSH_CACHE_DATA)
415 cacr |= 0x400;
416 len >>= 2;
417 while (len--) {
418 __asm__ __volatile__ ("movec %1, %%caar\n\t"
419 "movec %0, %%cacr"
420 : /* no outputs */
421 : "r" (cacr), "r" (addr));
422 addr += 4;
424 } else {
425 /* Flush the whole cache, even if page granularity requested. */
426 unsigned long cacr;
427 __asm__ ("movec %%cacr, %0" : "=r" (cacr));
428 if (cache & FLUSH_CACHE_INSN)
429 cacr |= 8;
430 if (cache & FLUSH_CACHE_DATA)
431 cacr |= 0x800;
432 __asm__ __volatile__ ("movec %0, %%cacr" : : "r" (cacr));
434 ret = 0;
435 goto out_unlock;
436 } else {
438 * 040 or 060: don't blindly trust 'scope', someone could
439 * try to flush a few megs of memory.
442 if (len>=3*PAGE_SIZE && scope<FLUSH_SCOPE_PAGE)
443 scope=FLUSH_SCOPE_PAGE;
444 if (len>=10*PAGE_SIZE && scope<FLUSH_SCOPE_ALL)
445 scope=FLUSH_SCOPE_ALL;
446 if (CPU_IS_040) {
447 ret = cache_flush_040 (addr, scope, cache, len);
448 } else if (CPU_IS_060) {
449 ret = cache_flush_060 (addr, scope, cache, len);
452 out_unlock:
453 mmap_read_unlock(current->mm);
454 out:
455 return ret;
458 /* This syscall gets its arguments in A0 (mem), D2 (oldval) and
459 D1 (newval). */
460 asmlinkage int
461 sys_atomic_cmpxchg_32(unsigned long newval, int oldval, int d3, int d4, int d5,
462 unsigned long __user * mem)
464 /* This was borrowed from ARM's implementation. */
465 for (;;) {
466 struct mm_struct *mm = current->mm;
467 pgd_t *pgd;
468 p4d_t *p4d;
469 pud_t *pud;
470 pmd_t *pmd;
471 pte_t *pte;
472 spinlock_t *ptl;
473 unsigned long mem_value;
475 mmap_read_lock(mm);
476 pgd = pgd_offset(mm, (unsigned long)mem);
477 if (!pgd_present(*pgd))
478 goto bad_access;
479 p4d = p4d_offset(pgd, (unsigned long)mem);
480 if (!p4d_present(*p4d))
481 goto bad_access;
482 pud = pud_offset(p4d, (unsigned long)mem);
483 if (!pud_present(*pud))
484 goto bad_access;
485 pmd = pmd_offset(pud, (unsigned long)mem);
486 if (!pmd_present(*pmd))
487 goto bad_access;
488 pte = pte_offset_map_lock(mm, pmd, (unsigned long)mem, &ptl);
489 if (!pte_present(*pte) || !pte_dirty(*pte)
490 || !pte_write(*pte)) {
491 pte_unmap_unlock(pte, ptl);
492 goto bad_access;
496 * No need to check for EFAULT; we know that the page is
497 * present and writable.
499 __get_user(mem_value, mem);
500 if (mem_value == oldval)
501 __put_user(newval, mem);
503 pte_unmap_unlock(pte, ptl);
504 mmap_read_unlock(mm);
505 return mem_value;
507 bad_access:
508 mmap_read_unlock(mm);
509 /* This is not necessarily a bad access, we can get here if
510 a memory we're trying to write to should be copied-on-write.
511 Make the kernel do the necessary page stuff, then re-iterate.
512 Simulate a write access fault to do that. */
514 /* The first argument of the function corresponds to
515 D1, which is the first field of struct pt_regs. */
516 struct pt_regs *fp = (struct pt_regs *)&newval;
518 /* '3' is an RMW flag. */
519 if (do_page_fault(fp, (unsigned long)mem, 3))
520 /* If the do_page_fault() failed, we don't
521 have anything meaningful to return.
522 There should be a SIGSEGV pending for
523 the process. */
524 return 0xdeadbeef;
529 #else
531 /* sys_cacheflush -- flush (part of) the processor cache. */
532 asmlinkage int
533 sys_cacheflush (unsigned long addr, int scope, int cache, unsigned long len)
535 flush_cache_all();
536 return 0;
539 /* This syscall gets its arguments in A0 (mem), D2 (oldval) and
540 D1 (newval). */
541 asmlinkage int
542 sys_atomic_cmpxchg_32(unsigned long newval, int oldval, int d3, int d4, int d5,
543 unsigned long __user * mem)
545 struct mm_struct *mm = current->mm;
546 unsigned long mem_value;
548 mmap_read_lock(mm);
550 mem_value = *mem;
551 if (mem_value == oldval)
552 *mem = newval;
554 mmap_read_unlock(mm);
555 return mem_value;
558 #endif /* CONFIG_MMU */
560 asmlinkage int sys_getpagesize(void)
562 return PAGE_SIZE;
565 asmlinkage unsigned long sys_get_thread_area(void)
567 return current_thread_info()->tp_value;
570 asmlinkage int sys_set_thread_area(unsigned long tp)
572 current_thread_info()->tp_value = tp;
573 return 0;
576 asmlinkage int sys_atomic_barrier(void)
578 /* no code needed for uniprocs */
579 return 0;