treewide: remove redundant IS_ERR() before error code check
[linux/fpc-iii.git] / arch / csky / abiv1 / mmap.c
blob6792aca49999172fad78b5bdc6a7fc80f3cf4af1
1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
4 #include <linux/fs.h>
5 #include <linux/mm.h>
6 #include <linux/mman.h>
7 #include <linux/shm.h>
8 #include <linux/sched.h>
9 #include <linux/random.h>
10 #include <linux/io.h>
12 #define COLOUR_ALIGN(addr,pgoff) \
13 ((((addr)+SHMLBA-1)&~(SHMLBA-1)) + \
14 (((pgoff)<<PAGE_SHIFT) & (SHMLBA-1)))
17 * We need to ensure that shared mappings are correctly aligned to
18 * avoid aliasing issues with VIPT caches. We need to ensure that
19 * a specific page of an object is always mapped at a multiple of
20 * SHMLBA bytes.
22 * We unconditionally provide this function for all cases.
24 unsigned long
25 arch_get_unmapped_area(struct file *filp, unsigned long addr,
26 unsigned long len, unsigned long pgoff, unsigned long flags)
28 struct mm_struct *mm = current->mm;
29 struct vm_area_struct *vma;
30 int do_align = 0;
31 struct vm_unmapped_area_info info;
34 * We only need to do colour alignment if either the I or D
35 * caches alias.
37 do_align = filp || (flags & MAP_SHARED);
40 * We enforce the MAP_FIXED case.
42 if (flags & MAP_FIXED) {
43 if (flags & MAP_SHARED &&
44 (addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1))
45 return -EINVAL;
46 return addr;
49 if (len > TASK_SIZE)
50 return -ENOMEM;
52 if (addr) {
53 if (do_align)
54 addr = COLOUR_ALIGN(addr, pgoff);
55 else
56 addr = PAGE_ALIGN(addr);
58 vma = find_vma(mm, addr);
59 if (TASK_SIZE - len >= addr &&
60 (!vma || addr + len <= vm_start_gap(vma)))
61 return addr;
64 info.flags = 0;
65 info.length = len;
66 info.low_limit = mm->mmap_base;
67 info.high_limit = TASK_SIZE;
68 info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
69 info.align_offset = pgoff << PAGE_SHIFT;
70 return vm_unmapped_area(&info);