1 #ifndef _ASM_POWERPC_PTE_WALK_H
2 #define _ASM_POWERPC_PTE_WALK_H
4 #include <linux/sched.h>
6 /* Don't use this directly */
7 extern pte_t
*__find_linux_pte(pgd_t
*pgdir
, unsigned long ea
,
8 bool *is_thp
, unsigned *hshift
);
10 static inline pte_t
*find_linux_pte(pgd_t
*pgdir
, unsigned long ea
,
11 bool *is_thp
, unsigned *hshift
)
13 VM_WARN(!arch_irqs_disabled(), "%s called with irq enabled\n", __func__
);
14 return __find_linux_pte(pgdir
, ea
, is_thp
, hshift
);
17 static inline pte_t
*find_init_mm_pte(unsigned long ea
, unsigned *hshift
)
19 pgd_t
*pgdir
= init_mm
.pgd
;
20 return __find_linux_pte(pgdir
, ea
, NULL
, hshift
);
23 * This is what we should always use. Any other lockless page table lookup needs
24 * careful audit against THP split.
26 static inline pte_t
*find_current_mm_pte(pgd_t
*pgdir
, unsigned long ea
,
27 bool *is_thp
, unsigned *hshift
)
29 VM_WARN(!arch_irqs_disabled(), "%s called with irq enabled\n", __func__
);
30 VM_WARN(pgdir
!= current
->mm
->pgd
,
31 "%s lock less page table lookup called on wrong mm\n", __func__
);
32 return __find_linux_pte(pgdir
, ea
, is_thp
, hshift
);
35 #endif /* _ASM_POWERPC_PTE_WALK_H */