2 * Copyright IBM Corp. 2008, 2009
4 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
7 #include <linux/kernel.h>
8 #include <linux/module.h>
11 #include <asm/setup.h>
13 #define ADDR2G (1ULL << 31)
15 static void find_memory_chunks(struct mem_chunk chunk
[])
17 unsigned long long memsize
, rnmax
, rzm
;
18 unsigned long addr
= 0, size
;
22 rnmax
= sclp_get_rnmax();
23 memsize
= rzm
* rnmax
;
26 if (sizeof(long) == 4) {
27 rzm
= min(ADDR2G
, rzm
);
28 memsize
= memsize
? min(ADDR2G
, memsize
) : ADDR2G
;
35 if (memsize
&& addr
+ size
>= memsize
)
37 } while (type
== tprot(addr
+ size
));
38 if (type
== CHUNK_READ_WRITE
|| type
== CHUNK_READ_ONLY
) {
45 } while (addr
< memsize
&& i
< MEMORY_CHUNKS
);
48 void detect_memory_layout(struct mem_chunk chunk
[])
50 unsigned long flags
, cr0
;
52 memset(chunk
, 0, MEMORY_CHUNKS
* sizeof(struct mem_chunk
));
53 /* Disable IRQs, DAT and low address protection so tprot does the
54 * right thing and we don't get scheduled away with low address
55 * protection disabled.
57 flags
= __arch_local_irq_stnsm(0xf8);
58 __ctl_store(cr0
, 0, 0);
59 __ctl_clear_bit(0, 28);
60 find_memory_chunks(chunk
);
61 __ctl_load(cr0
, 0, 0);
62 arch_local_irq_restore(flags
);
64 EXPORT_SYMBOL(detect_memory_layout
);
67 * Move memory chunks array from index "from" to index "to"
69 static void mem_chunk_move(struct mem_chunk chunk
[], int to
, int from
)
71 int cnt
= MEMORY_CHUNKS
- to
;
73 memmove(&chunk
[to
], &chunk
[from
], cnt
* sizeof(struct mem_chunk
));
77 * Initialize memory chunk
79 static void mem_chunk_init(struct mem_chunk
*chunk
, unsigned long addr
,
80 unsigned long size
, int type
)
88 * Create memory hole with given address, size, and type
90 void create_mem_hole(struct mem_chunk chunk
[], unsigned long addr
,
91 unsigned long size
, int type
)
93 unsigned long lh_start
, lh_end
, lh_size
, ch_start
, ch_end
, ch_size
;
96 for (i
= 0; i
< MEMORY_CHUNKS
; i
++) {
97 if (chunk
[i
].size
== 0)
100 /* Define chunk properties */
101 ch_start
= chunk
[i
].addr
;
102 ch_size
= chunk
[i
].size
;
103 ch_end
= ch_start
+ ch_size
- 1;
104 ch_type
= chunk
[i
].type
;
106 /* Is memory chunk hit by memory hole? */
107 if (addr
+ size
<= ch_start
)
108 continue; /* No: memory hole in front of chunk */
110 continue; /* No: memory hole after chunk */
112 /* Yes: Define local hole properties */
113 lh_start
= max(addr
, chunk
[i
].addr
);
114 lh_end
= min(addr
+ size
- 1, ch_end
);
115 lh_size
= lh_end
- lh_start
+ 1;
117 if (lh_start
== ch_start
&& lh_end
== ch_end
) {
118 /* Hole covers complete memory chunk */
119 mem_chunk_init(&chunk
[i
], lh_start
, lh_size
, type
);
120 } else if (lh_end
== ch_end
) {
121 /* Hole starts in memory chunk and convers chunk end */
122 mem_chunk_move(chunk
, i
+ 1, i
);
123 mem_chunk_init(&chunk
[i
], ch_start
, ch_size
- lh_size
,
125 mem_chunk_init(&chunk
[i
+ 1], lh_start
, lh_size
, type
);
127 } else if (lh_start
== ch_start
) {
128 /* Hole ends in memory chunk */
129 mem_chunk_move(chunk
, i
+ 1, i
);
130 mem_chunk_init(&chunk
[i
], lh_start
, lh_size
, type
);
131 mem_chunk_init(&chunk
[i
+ 1], lh_end
+ 1,
132 ch_size
- lh_size
, ch_type
);
135 /* Hole splits memory chunk */
136 mem_chunk_move(chunk
, i
+ 2, i
);
137 mem_chunk_init(&chunk
[i
], ch_start
,
138 lh_start
- ch_start
, ch_type
);
139 mem_chunk_init(&chunk
[i
+ 1], lh_start
, lh_size
, type
);
140 mem_chunk_init(&chunk
[i
+ 2], lh_end
+ 1,
141 ch_end
- lh_end
, ch_type
);