Merge branch 'akpm'
[linux-2.6/next.git] / arch / s390 / kernel / mem_detect.c
blob19b4568f4ceec4ada5aa86db7251a2b8dc0afa75
1 /*
2 * Copyright IBM Corp. 2008, 2009
4 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
5 */
7 #include <linux/kernel.h>
8 #include <linux/module.h>
9 #include <asm/ipl.h>
10 #include <asm/sclp.h>
11 #include <asm/setup.h>
13 #define ADDR2G (1ULL << 31)
15 static void find_memory_chunks(struct mem_chunk chunk[])
17 unsigned long long memsize, rnmax, rzm;
18 unsigned long addr = 0, size;
19 int i = 0, type;
21 rzm = sclp_get_rzm();
22 rnmax = sclp_get_rnmax();
23 memsize = rzm * rnmax;
24 if (!rzm)
25 rzm = 1ULL << 17;
26 if (sizeof(long) == 4) {
27 rzm = min(ADDR2G, rzm);
28 memsize = memsize ? min(ADDR2G, memsize) : ADDR2G;
30 do {
31 size = 0;
32 type = tprot(addr);
33 do {
34 size += rzm;
35 if (memsize && addr + size >= memsize)
36 break;
37 } while (type == tprot(addr + size));
38 if (type == CHUNK_READ_WRITE || type == CHUNK_READ_ONLY) {
39 chunk[i].addr = addr;
40 chunk[i].size = size;
41 chunk[i].type = type;
42 i++;
44 addr += size;
45 } while (addr < memsize && i < MEMORY_CHUNKS);
48 void detect_memory_layout(struct mem_chunk chunk[])
50 unsigned long flags, cr0;
52 memset(chunk, 0, MEMORY_CHUNKS * sizeof(struct mem_chunk));
53 /* Disable IRQs, DAT and low address protection so tprot does the
54 * right thing and we don't get scheduled away with low address
55 * protection disabled.
57 flags = __arch_local_irq_stnsm(0xf8);
58 __ctl_store(cr0, 0, 0);
59 __ctl_clear_bit(0, 28);
60 find_memory_chunks(chunk);
61 __ctl_load(cr0, 0, 0);
62 arch_local_irq_restore(flags);
64 EXPORT_SYMBOL(detect_memory_layout);
67 * Create memory hole with given address, size, and type
69 void create_mem_hole(struct mem_chunk chunks[], unsigned long addr,
70 unsigned long size, int type)
72 unsigned long start, end, new_size;
73 int i;
75 for (i = 0; i < MEMORY_CHUNKS; i++) {
76 if (chunks[i].size == 0)
77 continue;
78 if (addr + size < chunks[i].addr)
79 continue;
80 if (addr >= chunks[i].addr + chunks[i].size)
81 continue;
82 start = max(addr, chunks[i].addr);
83 end = min(addr + size, chunks[i].addr + chunks[i].size);
84 new_size = end - start;
85 if (new_size == 0)
86 continue;
87 if (start == chunks[i].addr &&
88 end == chunks[i].addr + chunks[i].size) {
89 /* Remove chunk */
90 chunks[i].type = type;
91 } else if (start == chunks[i].addr) {
92 /* Make chunk smaller at start */
93 if (i >= MEMORY_CHUNKS - 1)
94 panic("Unable to create memory hole");
95 memmove(&chunks[i + 1], &chunks[i],
96 sizeof(struct mem_chunk) *
97 (MEMORY_CHUNKS - (i + 1)));
98 chunks[i + 1].addr = chunks[i].addr + new_size;
99 chunks[i + 1].size = chunks[i].size - new_size;
100 chunks[i].size = new_size;
101 chunks[i].type = type;
102 i += 1;
103 } else if (end == chunks[i].addr + chunks[i].size) {
104 /* Make chunk smaller at end */
105 if (i >= MEMORY_CHUNKS - 1)
106 panic("Unable to create memory hole");
107 memmove(&chunks[i + 1], &chunks[i],
108 sizeof(struct mem_chunk) *
109 (MEMORY_CHUNKS - (i + 1)));
110 chunks[i + 1].addr = start;
111 chunks[i + 1].size = new_size;
112 chunks[i + 1].type = type;
113 chunks[i].size -= new_size;
114 i += 1;
115 } else {
116 /* Create memory hole */
117 if (i >= MEMORY_CHUNKS - 2)
118 panic("Unable to create memory hole");
119 memmove(&chunks[i + 2], &chunks[i],
120 sizeof(struct mem_chunk) *
121 (MEMORY_CHUNKS - (i + 2)));
122 chunks[i + 1].addr = addr;
123 chunks[i + 1].size = size;
124 chunks[i + 1].type = type;
125 chunks[i + 2].addr = addr + size;
126 chunks[i + 2].size =
127 chunks[i].addr + chunks[i].size - (addr + size);
128 chunks[i + 2].type = chunks[i].type;
129 chunks[i].size = addr - chunks[i].addr;
130 i += 2;