x86: consolidate header guards
[linux/fpc-iii.git] / include / asm-alpha / mmzone.h
blob8af56ce346add9d2bebd26641f30656141464a03
1 /*
2 * Written by Kanoj Sarcar (kanoj@sgi.com) Aug 99
3 * Adapted for the alpha wildfire architecture Jan 2001.
4 */
5 #ifndef _ASM_MMZONE_H_
6 #define _ASM_MMZONE_H_
8 #include <asm/smp.h>
10 struct bootmem_data_t; /* stupid forward decl. */
13 * Following are macros that are specific to this numa platform.
16 extern pg_data_t node_data[];
18 #define alpha_pa_to_nid(pa) \
19 (alpha_mv.pa_to_nid \
20 ? alpha_mv.pa_to_nid(pa) \
21 : (0))
22 #define node_mem_start(nid) \
23 (alpha_mv.node_mem_start \
24 ? alpha_mv.node_mem_start(nid) \
25 : (0UL))
26 #define node_mem_size(nid) \
27 (alpha_mv.node_mem_size \
28 ? alpha_mv.node_mem_size(nid) \
29 : ((nid) ? (0UL) : (~0UL)))
31 #define pa_to_nid(pa) alpha_pa_to_nid(pa)
32 #define NODE_DATA(nid) (&node_data[(nid)])
34 #define node_localnr(pfn, nid) ((pfn) - NODE_DATA(nid)->node_start_pfn)
36 #if 1
37 #define PLAT_NODE_DATA_LOCALNR(p, n) \
38 (((p) >> PAGE_SHIFT) - PLAT_NODE_DATA(n)->gendata.node_start_pfn)
39 #else
40 static inline unsigned long
41 PLAT_NODE_DATA_LOCALNR(unsigned long p, int n)
43 unsigned long temp;
44 temp = p >> PAGE_SHIFT;
45 return temp - PLAT_NODE_DATA(n)->gendata.node_start_pfn;
47 #endif
49 #ifdef CONFIG_DISCONTIGMEM
52 * Following are macros that each numa implementation must define.
56 * Given a kernel address, find the home node of the underlying memory.
58 #define kvaddr_to_nid(kaddr) pa_to_nid(__pa(kaddr))
59 #define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn)
62 * Given a kaddr, LOCAL_BASE_ADDR finds the owning node of the memory
63 * and returns the kaddr corresponding to first physical page in the
64 * node's mem_map.
66 #define LOCAL_BASE_ADDR(kaddr) \
67 ((unsigned long)__va(NODE_DATA(kvaddr_to_nid(kaddr))->node_start_pfn \
68 << PAGE_SHIFT))
70 /* XXX: FIXME -- wli */
71 #define kern_addr_valid(kaddr) (0)
73 #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
75 #define VALID_PAGE(page) (((page) - mem_map) < max_mapnr)
77 #define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> 32))
78 #define pgd_page(pgd) (pfn_to_page(pgd_val(pgd) >> 32))
79 #define pte_pfn(pte) (pte_val(pte) >> 32)
81 #define mk_pte(page, pgprot) \
82 ({ \
83 pte_t pte; \
84 unsigned long pfn; \
86 pfn = page_to_pfn(page) << 32; \
87 pte_val(pte) = pfn | pgprot_val(pgprot); \
89 pte; \
92 #define pte_page(x) \
93 ({ \
94 unsigned long kvirt; \
95 struct page * __xx; \
97 kvirt = (unsigned long)__va(pte_val(x) >> (32-PAGE_SHIFT)); \
98 __xx = virt_to_page(kvirt); \
100 __xx; \
103 #define page_to_pa(page) \
104 (page_to_pfn(page) << PAGE_SHIFT)
106 #define pfn_to_nid(pfn) pa_to_nid(((u64)(pfn) << PAGE_SHIFT))
107 #define pfn_valid(pfn) \
108 (((pfn) - node_start_pfn(pfn_to_nid(pfn))) < \
109 node_spanned_pages(pfn_to_nid(pfn))) \
111 #define virt_addr_valid(kaddr) pfn_valid((__pa(kaddr) >> PAGE_SHIFT))
113 #endif /* CONFIG_DISCONTIGMEM */
115 #endif /* _ASM_MMZONE_H_ */