2 * Written by Kanoj Sarcar (kanoj@sgi.com) Aug 99
3 * Adapted for the alpha wildfire architecture Jan 2001.
8 #include <linux/config.h>
11 struct bootmem_data_t
; /* stupid forward decl. */
14 * Following are macros that are specific to this numa platform.
17 extern pg_data_t node_data
[];
19 #define alpha_pa_to_nid(pa) \
21 ? alpha_mv.pa_to_nid(pa) \
23 #define node_mem_start(nid) \
24 (alpha_mv.node_mem_start \
25 ? alpha_mv.node_mem_start(nid) \
27 #define node_mem_size(nid) \
28 (alpha_mv.node_mem_size \
29 ? alpha_mv.node_mem_size(nid) \
30 : ((nid) ? (0UL) : (~0UL)))
32 #define pa_to_nid(pa) alpha_pa_to_nid(pa)
33 #define NODE_DATA(nid) (&node_data[(nid)])
35 #define node_localnr(pfn, nid) ((pfn) - NODE_DATA(nid)->node_start_pfn)
38 #define PLAT_NODE_DATA_LOCALNR(p, n) \
39 (((p) >> PAGE_SHIFT) - PLAT_NODE_DATA(n)->gendata.node_start_pfn)
41 static inline unsigned long
42 PLAT_NODE_DATA_LOCALNR(unsigned long p
, int n
)
45 temp
= p
>> PAGE_SHIFT
;
46 return temp
- PLAT_NODE_DATA(n
)->gendata
.node_start_pfn
;
50 #ifdef CONFIG_DISCONTIGMEM
53 * Following are macros that each numa implementation must define.
57 * Given a kernel address, find the home node of the underlying memory.
59 #define kvaddr_to_nid(kaddr) pa_to_nid(__pa(kaddr))
60 #define node_mem_map(nid) (NODE_DATA(nid)->node_mem_map)
61 #define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn)
63 #define local_mapnr(kvaddr) \
64 ((__pa(kvaddr) >> PAGE_SHIFT) - node_start_pfn(kvaddr_to_nid(kvaddr)))
67 * Given a kaddr, LOCAL_BASE_ADDR finds the owning node of the memory
68 * and returns the kaddr corresponding to first physical page in the
71 #define LOCAL_BASE_ADDR(kaddr) \
72 ((unsigned long)__va(NODE_DATA(kvaddr_to_nid(kaddr))->node_start_pfn \
75 /* XXX: FIXME -- wli */
76 #define kern_addr_valid(kaddr) (0)
78 #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
80 #define VALID_PAGE(page) (((page) - mem_map) < max_mapnr)
82 #define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> 32))
83 #define pte_pfn(pte) (pte_val(pte) >> 32)
85 #define mk_pte(page, pgprot) \
90 pfn = ((unsigned long)((page)-page_zone(page)->zone_mem_map)) << 32; \
91 pfn += page_zone(page)->zone_start_pfn << 32; \
92 pte_val(pte) = pfn | pgprot_val(pgprot); \
99 unsigned long kvirt; \
100 struct page * __xx; \
102 kvirt = (unsigned long)__va(pte_val(x) >> (32-PAGE_SHIFT)); \
103 __xx = virt_to_page(kvirt); \
108 #define pfn_to_page(pfn) \
110 unsigned long kaddr = (unsigned long)__va((pfn) << PAGE_SHIFT); \
111 (node_mem_map(kvaddr_to_nid(kaddr)) + local_mapnr(kaddr)); \
114 #define page_to_pfn(page) \
115 ((page) - page_zone(page)->zone_mem_map + \
116 (page_zone(page)->zone_start_pfn))
118 #define page_to_pa(page) \
119 ((( (page) - page_zone(page)->zone_mem_map ) \
120 + page_zone(page)->zone_start_pfn) << PAGE_SHIFT)
122 #define pfn_to_nid(pfn) pa_to_nid(((u64)(pfn) << PAGE_SHIFT))
123 #define pfn_valid(pfn) \
124 (((pfn) - node_start_pfn(pfn_to_nid(pfn))) < \
125 node_spanned_pages(pfn_to_nid(pfn))) \
127 #define virt_addr_valid(kaddr) pfn_valid((__pa(kaddr) >> PAGE_SHIFT))
129 #endif /* CONFIG_DISCONTIGMEM */
131 #endif /* _ASM_MMZONE_H_ */