treewide: remove redundant IS_ERR() before error code check
[linux/fpc-iii.git] / arch / powerpc / include / asm / pte-walk.h
blob33fa5dd8ee6a7d7e90f5ae0c087037c4098ce904
1 #ifndef _ASM_POWERPC_PTE_WALK_H
2 #define _ASM_POWERPC_PTE_WALK_H
4 #include <linux/sched.h>
6 /* Don't use this directly */
7 extern pte_t *__find_linux_pte(pgd_t *pgdir, unsigned long ea,
8 bool *is_thp, unsigned *hshift);
10 static inline pte_t *find_linux_pte(pgd_t *pgdir, unsigned long ea,
11 bool *is_thp, unsigned *hshift)
13 pte_t *pte;
15 VM_WARN(!arch_irqs_disabled(), "%s called with irq enabled\n", __func__);
16 pte = __find_linux_pte(pgdir, ea, is_thp, hshift);
18 #if defined(CONFIG_DEBUG_VM) && \
19 !(defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE))
21 * We should not find huge page if these configs are not enabled.
23 if (hshift)
24 WARN_ON(*hshift);
25 #endif
26 return pte;
29 static inline pte_t *find_init_mm_pte(unsigned long ea, unsigned *hshift)
31 pgd_t *pgdir = init_mm.pgd;
32 return __find_linux_pte(pgdir, ea, NULL, hshift);
35 * This is what we should always use. Any other lockless page table lookup needs
36 * careful audit against THP split.
38 static inline pte_t *find_current_mm_pte(pgd_t *pgdir, unsigned long ea,
39 bool *is_thp, unsigned *hshift)
41 pte_t *pte;
43 VM_WARN(!arch_irqs_disabled(), "%s called with irq enabled\n", __func__);
44 VM_WARN(pgdir != current->mm->pgd,
45 "%s lock less page table lookup called on wrong mm\n", __func__);
46 pte = __find_linux_pte(pgdir, ea, is_thp, hshift);
48 #if defined(CONFIG_DEBUG_VM) && \
49 !(defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE))
51 * We should not find huge page if these configs are not enabled.
53 if (hshift)
54 WARN_ON(*hshift);
55 #endif
56 return pte;
59 #endif /* _ASM_POWERPC_PTE_WALK_H */