x86, efi: Set runtime_version to the EFI spec revision
[linux/fpc-iii.git] / arch / s390 / kernel / mem_detect.c
blob22d502e885ed2b5f0b6c84c45c10e11bf47535da
1 /*
2 * Copyright IBM Corp. 2008, 2009
4 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
5 */
7 #include <linux/kernel.h>
8 #include <linux/module.h>
9 #include <asm/ipl.h>
10 #include <asm/sclp.h>
11 #include <asm/setup.h>
13 #define ADDR2G (1ULL << 31)
15 static void find_memory_chunks(struct mem_chunk chunk[])
17 unsigned long long memsize, rnmax, rzm;
18 unsigned long addr = 0, size;
19 int i = 0, type;
21 rzm = sclp_get_rzm();
22 rnmax = sclp_get_rnmax();
23 memsize = rzm * rnmax;
24 if (!rzm)
25 rzm = 1ULL << 17;
26 if (sizeof(long) == 4) {
27 rzm = min(ADDR2G, rzm);
28 memsize = memsize ? min(ADDR2G, memsize) : ADDR2G;
30 do {
31 size = 0;
32 type = tprot(addr);
33 do {
34 size += rzm;
35 if (memsize && addr + size >= memsize)
36 break;
37 } while (type == tprot(addr + size));
38 if (type == CHUNK_READ_WRITE || type == CHUNK_READ_ONLY) {
39 chunk[i].addr = addr;
40 chunk[i].size = size;
41 chunk[i].type = type;
42 i++;
44 addr += size;
45 } while (addr < memsize && i < MEMORY_CHUNKS);
48 void detect_memory_layout(struct mem_chunk chunk[])
50 unsigned long flags, cr0;
52 memset(chunk, 0, MEMORY_CHUNKS * sizeof(struct mem_chunk));
53 /* Disable IRQs, DAT and low address protection so tprot does the
54 * right thing and we don't get scheduled away with low address
55 * protection disabled.
57 flags = __arch_local_irq_stnsm(0xf8);
58 __ctl_store(cr0, 0, 0);
59 __ctl_clear_bit(0, 28);
60 find_memory_chunks(chunk);
61 __ctl_load(cr0, 0, 0);
62 arch_local_irq_restore(flags);
64 EXPORT_SYMBOL(detect_memory_layout);
67 * Move memory chunks array from index "from" to index "to"
69 static void mem_chunk_move(struct mem_chunk chunk[], int to, int from)
71 int cnt = MEMORY_CHUNKS - to;
73 memmove(&chunk[to], &chunk[from], cnt * sizeof(struct mem_chunk));
77 * Initialize memory chunk
79 static void mem_chunk_init(struct mem_chunk *chunk, unsigned long addr,
80 unsigned long size, int type)
82 chunk->type = type;
83 chunk->addr = addr;
84 chunk->size = size;
88 * Create memory hole with given address, size, and type
90 void create_mem_hole(struct mem_chunk chunk[], unsigned long addr,
91 unsigned long size, int type)
93 unsigned long lh_start, lh_end, lh_size, ch_start, ch_end, ch_size;
94 int i, ch_type;
96 for (i = 0; i < MEMORY_CHUNKS; i++) {
97 if (chunk[i].size == 0)
98 continue;
100 /* Define chunk properties */
101 ch_start = chunk[i].addr;
102 ch_size = chunk[i].size;
103 ch_end = ch_start + ch_size - 1;
104 ch_type = chunk[i].type;
106 /* Is memory chunk hit by memory hole? */
107 if (addr + size <= ch_start)
108 continue; /* No: memory hole in front of chunk */
109 if (addr > ch_end)
110 continue; /* No: memory hole after chunk */
112 /* Yes: Define local hole properties */
113 lh_start = max(addr, chunk[i].addr);
114 lh_end = min(addr + size - 1, ch_end);
115 lh_size = lh_end - lh_start + 1;
117 if (lh_start == ch_start && lh_end == ch_end) {
118 /* Hole covers complete memory chunk */
119 mem_chunk_init(&chunk[i], lh_start, lh_size, type);
120 } else if (lh_end == ch_end) {
121 /* Hole starts in memory chunk and convers chunk end */
122 mem_chunk_move(chunk, i + 1, i);
123 mem_chunk_init(&chunk[i], ch_start, ch_size - lh_size,
124 ch_type);
125 mem_chunk_init(&chunk[i + 1], lh_start, lh_size, type);
126 i += 1;
127 } else if (lh_start == ch_start) {
128 /* Hole ends in memory chunk */
129 mem_chunk_move(chunk, i + 1, i);
130 mem_chunk_init(&chunk[i], lh_start, lh_size, type);
131 mem_chunk_init(&chunk[i + 1], lh_end + 1,
132 ch_size - lh_size, ch_type);
133 break;
134 } else {
135 /* Hole splits memory chunk */
136 mem_chunk_move(chunk, i + 2, i);
137 mem_chunk_init(&chunk[i], ch_start,
138 lh_start - ch_start, ch_type);
139 mem_chunk_init(&chunk[i + 1], lh_start, lh_size, type);
140 mem_chunk_init(&chunk[i + 2], lh_end + 1,
141 ch_end - lh_end, ch_type);
142 break;