2 * Procedures for maintaining information about logical memory blocks.
4 * Peter Bergner, IBM Corp. June 2001.
5 * Copyright (C) 2001 Peter Bergner.
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
13 #include <linux/kernel.h>
14 #include <linux/init.h>
15 #include <linux/bitops.h>
16 #include <asm/types.h>
21 #include "mmu_decl.h" /* for __max_low_memory */
28 #define DBG(fmt...) udbg_printf(fmt)
33 #define LMB_ALLOC_ANYWHERE 0
37 void lmb_dump_all(void)
42 DBG("lmb_dump_all:\n");
43 DBG(" memory.cnt = 0x%lx\n", lmb
.memory
.cnt
);
44 DBG(" memory.size = 0x%lx\n", lmb
.memory
.size
);
45 for (i
=0; i
< lmb
.memory
.cnt
;i
++) {
46 DBG(" memory.region[0x%x].base = 0x%lx\n",
47 i
, lmb
.memory
.region
[i
].base
);
48 DBG(" .size = 0x%lx\n",
49 lmb
.memory
.region
[i
].size
);
52 DBG("\n reserved.cnt = 0x%lx\n", lmb
.reserved
.cnt
);
53 DBG(" reserved.size = 0x%lx\n", lmb
.reserved
.size
);
54 for (i
=0; i
< lmb
.reserved
.cnt
;i
++) {
55 DBG(" reserved.region[0x%x].base = 0x%lx\n",
56 i
, lmb
.reserved
.region
[i
].base
);
57 DBG(" .size = 0x%lx\n",
58 lmb
.reserved
.region
[i
].size
);
63 static unsigned long __init
lmb_addrs_overlap(unsigned long base1
,
64 unsigned long size1
, unsigned long base2
, unsigned long size2
)
66 return ((base1
< (base2
+size2
)) && (base2
< (base1
+size1
)));
69 static long __init
lmb_addrs_adjacent(unsigned long base1
, unsigned long size1
,
70 unsigned long base2
, unsigned long size2
)
72 if (base2
== base1
+ size1
)
74 else if (base1
== base2
+ size2
)
80 static long __init
lmb_regions_adjacent(struct lmb_region
*rgn
,
81 unsigned long r1
, unsigned long r2
)
83 unsigned long base1
= rgn
->region
[r1
].base
;
84 unsigned long size1
= rgn
->region
[r1
].size
;
85 unsigned long base2
= rgn
->region
[r2
].base
;
86 unsigned long size2
= rgn
->region
[r2
].size
;
88 return lmb_addrs_adjacent(base1
, size1
, base2
, size2
);
91 static void __init
lmb_remove_region(struct lmb_region
*rgn
, unsigned long r
)
95 for (i
= r
; i
< rgn
->cnt
- 1; i
++) {
96 rgn
->region
[i
].base
= rgn
->region
[i
+ 1].base
;
97 rgn
->region
[i
].size
= rgn
->region
[i
+ 1].size
;
102 /* Assumption: base addr of region 1 < base addr of region 2 */
103 static void __init
lmb_coalesce_regions(struct lmb_region
*rgn
,
104 unsigned long r1
, unsigned long r2
)
106 rgn
->region
[r1
].size
+= rgn
->region
[r2
].size
;
107 lmb_remove_region(rgn
, r2
);
110 /* This routine called with relocation disabled. */
111 void __init
lmb_init(void)
113 /* Create a dummy zero size LMB which will get coalesced away later.
114 * This simplifies the lmb_add() code below...
116 lmb
.memory
.region
[0].base
= 0;
117 lmb
.memory
.region
[0].size
= 0;
121 lmb
.reserved
.region
[0].base
= 0;
122 lmb
.reserved
.region
[0].size
= 0;
123 lmb
.reserved
.cnt
= 1;
126 /* This routine may be called with relocation disabled. */
127 void __init
lmb_analyze(void)
133 for (i
= 0; i
< lmb
.memory
.cnt
; i
++)
134 lmb
.memory
.size
+= lmb
.memory
.region
[i
].size
;
137 /* This routine called with relocation disabled. */
138 static long __init
lmb_add_region(struct lmb_region
*rgn
, unsigned long base
,
141 unsigned long coalesced
= 0;
144 /* First try and coalesce this LMB with another. */
145 for (i
=0; i
< rgn
->cnt
; i
++) {
146 unsigned long rgnbase
= rgn
->region
[i
].base
;
147 unsigned long rgnsize
= rgn
->region
[i
].size
;
149 if ((rgnbase
== base
) && (rgnsize
== size
))
150 /* Already have this region, so we're done */
153 adjacent
= lmb_addrs_adjacent(base
,size
,rgnbase
,rgnsize
);
154 if ( adjacent
> 0 ) {
155 rgn
->region
[i
].base
-= size
;
156 rgn
->region
[i
].size
+= size
;
160 else if ( adjacent
< 0 ) {
161 rgn
->region
[i
].size
+= size
;
167 if ((i
< rgn
->cnt
-1) && lmb_regions_adjacent(rgn
, i
, i
+1) ) {
168 lmb_coalesce_regions(rgn
, i
, i
+1);
174 if (rgn
->cnt
>= MAX_LMB_REGIONS
)
177 /* Couldn't coalesce the LMB, so add it to the sorted table. */
178 for (i
= rgn
->cnt
-1; i
>= 0; i
--) {
179 if (base
< rgn
->region
[i
].base
) {
180 rgn
->region
[i
+1].base
= rgn
->region
[i
].base
;
181 rgn
->region
[i
+1].size
= rgn
->region
[i
].size
;
183 rgn
->region
[i
+1].base
= base
;
184 rgn
->region
[i
+1].size
= size
;
193 /* This routine may be called with relocation disabled. */
194 long __init
lmb_add(unsigned long base
, unsigned long size
)
196 struct lmb_region
*_rgn
= &(lmb
.memory
);
198 /* On pSeries LPAR systems, the first LMB is our RMO region. */
202 return lmb_add_region(_rgn
, base
, size
);
206 long __init
lmb_reserve(unsigned long base
, unsigned long size
)
208 struct lmb_region
*_rgn
= &(lmb
.reserved
);
212 return lmb_add_region(_rgn
, base
, size
);
215 long __init
lmb_overlaps_region(struct lmb_region
*rgn
, unsigned long base
,
220 for (i
=0; i
< rgn
->cnt
; i
++) {
221 unsigned long rgnbase
= rgn
->region
[i
].base
;
222 unsigned long rgnsize
= rgn
->region
[i
].size
;
223 if ( lmb_addrs_overlap(base
,size
,rgnbase
,rgnsize
) ) {
228 return (i
< rgn
->cnt
) ? i
: -1;
231 unsigned long __init
lmb_alloc(unsigned long size
, unsigned long align
)
233 return lmb_alloc_base(size
, align
, LMB_ALLOC_ANYWHERE
);
236 unsigned long __init
lmb_alloc_base(unsigned long size
, unsigned long align
,
237 unsigned long max_addr
)
241 alloc
= __lmb_alloc_base(size
, align
, max_addr
);
244 panic("ERROR: Failed to allocate 0x%lx bytes below 0x%lx.\n",
250 unsigned long __init
__lmb_alloc_base(unsigned long size
, unsigned long align
,
251 unsigned long max_addr
)
254 unsigned long base
= 0;
259 /* On 32-bit, make sure we allocate lowmem */
260 if (max_addr
== LMB_ALLOC_ANYWHERE
)
261 max_addr
= __max_low_memory
;
263 for (i
= lmb
.memory
.cnt
-1; i
>= 0; i
--) {
264 unsigned long lmbbase
= lmb
.memory
.region
[i
].base
;
265 unsigned long lmbsize
= lmb
.memory
.region
[i
].size
;
267 if (max_addr
== LMB_ALLOC_ANYWHERE
)
268 base
= _ALIGN_DOWN(lmbbase
+ lmbsize
- size
, align
);
269 else if (lmbbase
< max_addr
) {
270 base
= min(lmbbase
+ lmbsize
, max_addr
);
271 base
= _ALIGN_DOWN(base
- size
, align
);
275 while ((lmbbase
<= base
) &&
276 ((j
= lmb_overlaps_region(&lmb
.reserved
, base
, size
)) >= 0) )
277 base
= _ALIGN_DOWN(lmb
.reserved
.region
[j
].base
- size
,
280 if ((base
!= 0) && (lmbbase
<= base
))
287 lmb_add_region(&lmb
.reserved
, base
, size
);
292 /* You must call lmb_analyze() before this. */
293 unsigned long __init
lmb_phys_mem_size(void)
295 return lmb
.memory
.size
;
298 unsigned long __init
lmb_end_of_DRAM(void)
300 int idx
= lmb
.memory
.cnt
- 1;
302 return (lmb
.memory
.region
[idx
].base
+ lmb
.memory
.region
[idx
].size
);
305 /* You must call lmb_analyze() after this. */
306 void __init
lmb_enforce_memory_limit(unsigned long memory_limit
)
308 unsigned long i
, limit
;
309 struct lmb_property
*p
;
314 /* Truncate the lmb regions to satisfy the memory limit. */
315 limit
= memory_limit
;
316 for (i
= 0; i
< lmb
.memory
.cnt
; i
++) {
317 if (limit
> lmb
.memory
.region
[i
].size
) {
318 limit
-= lmb
.memory
.region
[i
].size
;
322 lmb
.memory
.region
[i
].size
= limit
;
323 lmb
.memory
.cnt
= i
+ 1;
327 if (lmb
.memory
.region
[0].size
< lmb
.rmo_size
)
328 lmb
.rmo_size
= lmb
.memory
.region
[0].size
;
330 /* And truncate any reserves above the limit also. */
331 for (i
= 0; i
< lmb
.reserved
.cnt
; i
++) {
332 p
= &lmb
.reserved
.region
[i
];
334 if (p
->base
> memory_limit
)
336 else if ((p
->base
+ p
->size
) > memory_limit
)
337 p
->size
= memory_limit
- p
->base
;
340 lmb_remove_region(&lmb
.reserved
, i
);
346 int __init
lmb_is_reserved(unsigned long addr
)
350 for (i
= 0; i
< lmb
.reserved
.cnt
; i
++) {
351 unsigned long upper
= lmb
.reserved
.region
[i
].base
+
352 lmb
.reserved
.region
[i
].size
- 1;
353 if ((addr
>= lmb
.reserved
.region
[i
].base
) && (addr
<= upper
))