treewide: remove redundant IS_ERR() before error code check
[linux/fpc-iii.git] / arch / nds32 / mm / mmap.c
blobc206b31ce07ac4e239b05a0c8e00cc309666f88e
1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (C) 2005-2017 Andes Technology Corporation
4 #include <linux/sched.h>
5 #include <linux/mman.h>
6 #include <linux/shm.h>
8 #define COLOUR_ALIGN(addr,pgoff) \
9 ((((addr)+SHMLBA-1)&~(SHMLBA-1)) + \
10 (((pgoff)<<PAGE_SHIFT) & (SHMLBA-1)))
13 * We need to ensure that shared mappings are correctly aligned to
14 * avoid aliasing issues with VIPT caches. We need to ensure that
15 * a specific page of an object is always mapped at a multiple of
16 * SHMLBA bytes.
18 * We unconditionally provide this function for all cases, however
19 * in the VIVT case, we optimise out the alignment rules.
21 unsigned long
22 arch_get_unmapped_area(struct file *filp, unsigned long addr,
23 unsigned long len, unsigned long pgoff,
24 unsigned long flags)
26 struct mm_struct *mm = current->mm;
27 struct vm_area_struct *vma;
28 int do_align = 0;
29 struct vm_unmapped_area_info info;
30 int aliasing = 0;
31 if(IS_ENABLED(CONFIG_CPU_CACHE_ALIASING))
32 aliasing = 1;
35 * We only need to do colour alignment if either the I or D
36 * caches alias.
38 if (aliasing)
39 do_align = filp || (flags & MAP_SHARED);
42 * We enforce the MAP_FIXED case.
44 if (flags & MAP_FIXED) {
45 if (aliasing && flags & MAP_SHARED &&
46 (addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1))
47 return -EINVAL;
48 return addr;
51 if (len > TASK_SIZE)
52 return -ENOMEM;
54 if (addr) {
55 if (do_align)
56 addr = COLOUR_ALIGN(addr, pgoff);
57 else
58 addr = PAGE_ALIGN(addr);
60 vma = find_vma(mm, addr);
61 if (TASK_SIZE - len >= addr &&
62 (!vma || addr + len <= vma->vm_start))
63 return addr;
66 info.flags = 0;
67 info.length = len;
68 info.low_limit = mm->mmap_base;
69 info.high_limit = TASK_SIZE;
70 info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
71 info.align_offset = pgoff << PAGE_SHIFT;
72 return vm_unmapped_area(&info);