1 /* SPDX-License-Identifier: GPL-2.0-only */
5 #include <console/console.h>
9 #include <imd_private.h>
12 /* For more details on implementation and usage please see the imd.h header. */
14 static void *relative_pointer(void *base
, ssize_t offset
)
16 intptr_t b
= (intptr_t)base
;
21 static bool imd_root_pointer_valid(const struct imd_root_pointer
*rp
)
23 return !!(rp
->magic
== IMD_ROOT_PTR_MAGIC
);
26 static struct imd_root
*imdr_root(const struct imdr
*imdr
)
32 * The root pointer is relative to the upper limit of the imd. i.e. It sits
33 * just below the upper limit.
35 static struct imd_root_pointer
*imdr_get_root_pointer(const struct imdr
*imdr
)
37 struct imd_root_pointer
*rp
;
39 rp
= relative_pointer((void *)imdr
->limit
, -sizeof(*rp
));
44 static void imd_link_root(struct imd_root_pointer
*rp
, struct imd_root
*r
)
46 rp
->magic
= IMD_ROOT_PTR_MAGIC
;
47 rp
->root_offset
= (int32_t)((intptr_t)r
- (intptr_t)rp
);
50 static struct imd_entry
*root_last_entry(struct imd_root
*r
)
52 return &r
->entries
[r
->num_entries
- 1];
55 static size_t root_num_entries(size_t root_size
)
59 entries_size
= root_size
;
60 entries_size
-= sizeof(struct imd_root_pointer
);
61 entries_size
-= sizeof(struct imd_root
);
63 return entries_size
/ sizeof(struct imd_entry
);
66 static size_t imd_root_data_left(struct imd_root
*r
)
68 struct imd_entry
*last_entry
;
70 last_entry
= root_last_entry(r
);
72 if (r
->max_offset
!= 0)
73 return last_entry
->start_offset
- r
->max_offset
;
78 static bool root_is_locked(const struct imd_root
*r
)
80 return !!(r
->flags
& IMD_FLAG_LOCKED
);
83 static void imd_entry_assign(struct imd_entry
*e
, uint32_t id
,
84 ssize_t offset
, size_t size
)
86 e
->magic
= IMD_ENTRY_MAGIC
;
87 e
->start_offset
= offset
;
92 static void imdr_init(struct imdr
*ir
, void *upper_limit
)
94 uintptr_t limit
= (uintptr_t)upper_limit
;
95 /* Upper limit is aligned down to 4KiB */
96 ir
->limit
= ALIGN_DOWN(limit
, LIMIT_ALIGN
);
100 static int imdr_create_empty(struct imdr
*imdr
, size_t root_size
,
103 struct imd_root_pointer
*rp
;
111 /* root_size and entry_align should be a power of 2. */
112 assert(IS_POWER_OF_2(root_size
));
113 assert(IS_POWER_OF_2(entry_align
));
116 * root_size needs to be large enough to accommodate root pointer and
117 * root book keeping structure. Furthermore, there needs to be a space
118 * for at least one entry covering root region. The caller needs to
119 * ensure there's enough room for tracking individual allocations.
121 if (root_size
< (sizeof(*rp
) + sizeof(*r
) + sizeof(*e
)))
124 /* For simplicity don't allow sizes or alignments to exceed LIMIT_ALIGN.
126 if (root_size
> LIMIT_ALIGN
|| entry_align
> LIMIT_ALIGN
)
129 /* Additionally, don't handle an entry alignment > root_size. */
130 if (entry_align
> root_size
)
133 rp
= imdr_get_root_pointer(imdr
);
135 root_offset
= -(ssize_t
)root_size
;
136 /* Set root pointer. */
137 imdr
->r
= relative_pointer((void *)imdr
->limit
, root_offset
);
139 imd_link_root(rp
, r
);
141 memset(r
, 0, sizeof(*r
));
142 r
->entry_align
= entry_align
;
144 /* Calculate size left for entries. */
145 r
->max_entries
= root_num_entries(root_size
);
147 /* Fill in first entry covering the root region. */
150 imd_entry_assign(e
, CBMEM_ID_IMD_ROOT
, 0, root_size
);
152 printk(BIOS_DEBUG
, "IMD: root @ %p %u entries.\n", r
, r
->max_entries
);
157 static int imdr_recover(struct imdr
*imdr
)
159 struct imd_root_pointer
*rp
;
167 rp
= imdr_get_root_pointer(imdr
);
169 if (!imd_root_pointer_valid(rp
))
172 r
= relative_pointer(rp
, rp
->root_offset
);
174 /* Ensure that root is just under the root pointer */
175 if ((intptr_t)rp
- (intptr_t)&r
->entries
[r
->max_entries
] > sizeof(struct imd_entry
))
178 if (r
->num_entries
> r
->max_entries
)
181 /* Entry alignment should be power of 2. */
182 if (!IS_POWER_OF_2(r
->entry_align
))
185 low_limit
= (uintptr_t)relative_pointer(r
, r
->max_offset
);
187 /* If no max_offset then lowest limit is 0. */
188 if (low_limit
== (uintptr_t)r
)
191 for (i
= 0; i
< r
->num_entries
; i
++) {
192 uintptr_t start_addr
;
193 const struct imd_entry
*e
= &r
->entries
[i
];
195 if (e
->magic
!= IMD_ENTRY_MAGIC
)
198 start_addr
= (uintptr_t)relative_pointer(r
, e
->start_offset
);
199 if (start_addr
< low_limit
)
201 if (start_addr
>= imdr
->limit
||
202 (start_addr
+ e
->size
) > imdr
->limit
)
206 /* Set root pointer. */
212 static const struct imd_entry
*imdr_entry_find(const struct imdr
*imdr
,
225 /* Skip first entry covering the root. */
226 for (i
= 1; i
< r
->num_entries
; i
++) {
227 if (id
!= r
->entries
[i
].id
)
236 static int imdr_limit_size(struct imdr
*imdr
, size_t max_size
)
246 root_size
= imdr
->limit
- (uintptr_t)r
;
248 if (max_size
< root_size
)
251 /* Take into account the root size. */
252 smax_size
= max_size
- root_size
;
253 smax_size
= -smax_size
;
255 r
->max_offset
= smax_size
;
260 static size_t imdr_entry_size(const struct imd_entry
*e
)
265 static void *imdr_entry_at(const struct imdr
*imdr
, const struct imd_entry
*e
)
267 return relative_pointer(imdr_root(imdr
), e
->start_offset
);
270 static struct imd_entry
*imd_entry_add_to_root(struct imd_root
*r
, uint32_t id
,
273 struct imd_entry
*entry
;
274 struct imd_entry
*last_entry
;
278 if (r
->num_entries
== r
->max_entries
)
281 /* Determine total size taken up by entry. */
282 used_size
= ALIGN_UP(size
, r
->entry_align
);
284 /* See if size overflows imd total size. */
285 if (used_size
> imd_root_data_left(r
))
289 * Determine if offset field overflows. All offsets should be lower
290 * than the previous one.
292 last_entry
= root_last_entry(r
);
293 e_offset
= last_entry
->start_offset
;
294 e_offset
-= (ssize_t
)used_size
;
295 if (e_offset
>= last_entry
->start_offset
)
298 entry
= root_last_entry(r
) + 1;
301 imd_entry_assign(entry
, id
, e_offset
, size
);
306 static const struct imd_entry
*imdr_entry_add(const struct imdr
*imdr
,
307 uint32_t id
, size_t size
)
316 if (root_is_locked(r
))
319 return imd_entry_add_to_root(r
, id
, size
);
322 static bool imdr_has_entry(const struct imdr
*imdr
, const struct imd_entry
*e
)
331 /* Determine if the entry is within this root structure. */
332 idx
= e
- &r
->entries
[0];
333 if (idx
>= r
->num_entries
)
339 static const struct imdr
*imd_entry_to_imdr(const struct imd
*imd
,
340 const struct imd_entry
*entry
)
342 if (imdr_has_entry(&imd
->lg
, entry
))
345 if (imdr_has_entry(&imd
->sm
, entry
))
351 /* Initialize imd handle. */
352 void imd_handle_init(struct imd
*imd
, void *upper_limit
)
354 imdr_init(&imd
->lg
, upper_limit
);
355 imdr_init(&imd
->sm
, NULL
);
358 void imd_handle_init_partial_recovery(struct imd
*imd
)
360 const struct imd_entry
*e
;
361 struct imd_root_pointer
*rp
;
364 if (imd
->lg
.limit
== 0)
367 imd_handle_init(imd
, (void *)imd
->lg
.limit
);
369 /* Initialize root pointer for the large regions. */
371 rp
= imdr_get_root_pointer(imdr
);
372 imdr
->r
= relative_pointer(rp
, rp
->root_offset
);
374 e
= imdr_entry_find(imdr
, SMALL_REGION_ID
);
379 imd
->sm
.limit
= (uintptr_t)imdr_entry_at(imdr
, e
);
380 imd
->sm
.limit
+= imdr_entry_size(e
);
382 rp
= imdr_get_root_pointer(imdr
);
383 imdr
->r
= relative_pointer(rp
, rp
->root_offset
);
386 int imd_create_empty(struct imd
*imd
, size_t root_size
, size_t entry_align
)
388 return imdr_create_empty(&imd
->lg
, root_size
, entry_align
);
391 int imd_create_tiered_empty(struct imd
*imd
,
392 size_t lg_root_size
, size_t lg_entry_align
,
393 size_t sm_root_size
, size_t sm_entry_align
)
395 size_t sm_region_size
;
396 const struct imd_entry
*e
;
401 if (imdr_create_empty(imdr
, lg_root_size
, lg_entry_align
) != 0)
404 /* Calculate the size of the small region to request. */
405 sm_region_size
= root_num_entries(sm_root_size
) * sm_entry_align
;
406 sm_region_size
+= sm_root_size
;
407 sm_region_size
= ALIGN_UP(sm_region_size
, lg_entry_align
);
409 /* Add a new entry to the large region to cover the root and entries. */
410 e
= imdr_entry_add(imdr
, SMALL_REGION_ID
, sm_region_size
);
415 imd
->sm
.limit
= (uintptr_t)imdr_entry_at(imdr
, e
);
416 imd
->sm
.limit
+= sm_region_size
;
418 if (imdr_create_empty(&imd
->sm
, sm_root_size
, sm_entry_align
) != 0 ||
419 imdr_limit_size(&imd
->sm
, sm_region_size
))
424 imd_handle_init(imd
, (void *)imdr
->limit
);
428 int imd_recover(struct imd
*imd
)
430 const struct imd_entry
*e
;
431 uintptr_t small_upper_limit
;
435 if (imdr_recover(imdr
) != 0)
438 /* Determine if small region is present. */
439 e
= imdr_entry_find(imdr
, SMALL_REGION_ID
);
444 small_upper_limit
= (uintptr_t)imdr_entry_at(imdr
, e
);
445 small_upper_limit
+= imdr_entry_size(e
);
447 imd
->sm
.limit
= small_upper_limit
;
449 /* Tear down any changes on failure. */
450 if (imdr_recover(&imd
->sm
) != 0) {
451 imd_handle_init(imd
, (void *)imd
->lg
.limit
);
458 int imd_limit_size(struct imd
*imd
, size_t max_size
)
460 return imdr_limit_size(&imd
->lg
, max_size
);
463 int imd_lockdown(struct imd
*imd
)
467 r
= imdr_root(&imd
->lg
);
471 r
->flags
|= IMD_FLAG_LOCKED
;
473 r
= imdr_root(&imd
->sm
);
475 r
->flags
|= IMD_FLAG_LOCKED
;
480 int imd_region_used(struct imd
*imd
, void **base
, size_t *size
)
490 r
= imdr_root(&imd
->lg
);
495 /* Use last entry to obtain lowest address. */
496 e
= root_last_entry(r
);
498 low_addr
= relative_pointer(r
, e
->start_offset
);
500 /* Total size used is the last entry's base up to the limit. */
501 sz_used
= imd
->lg
.limit
- (uintptr_t)low_addr
;
509 const struct imd_entry
*imd_entry_add(const struct imd
*imd
, uint32_t id
,
513 const struct imdr
*imdr
;
514 const struct imd_entry
*e
= NULL
;
517 * Determine if requested size is less than 1/4 of small data
523 /* No small region. Use the large region. */
525 return imdr_entry_add(&imd
->lg
, id
, size
);
526 else if (size
<= r
->entry_align
|| size
<= imd_root_data_left(r
) / 4)
527 e
= imdr_entry_add(imdr
, id
, size
);
529 /* Fall back on large region allocation. */
531 e
= imdr_entry_add(&imd
->lg
, id
, size
);
536 const struct imd_entry
*imd_entry_find(const struct imd
*imd
, uint32_t id
)
538 const struct imd_entry
*e
;
540 /* Many of the smaller allocations are used a lot. Therefore, try
541 * the small region first. */
542 e
= imdr_entry_find(&imd
->sm
, id
);
545 e
= imdr_entry_find(&imd
->lg
, id
);
550 const struct imd_entry
*imd_entry_find_or_add(const struct imd
*imd
,
551 uint32_t id
, size_t size
)
553 const struct imd_entry
*e
;
555 e
= imd_entry_find(imd
, id
);
560 return imd_entry_add(imd
, id
, size
);
563 size_t imd_entry_size(const struct imd_entry
*entry
)
565 return imdr_entry_size(entry
);
568 void *imd_entry_at(const struct imd
*imd
, const struct imd_entry
*entry
)
570 const struct imdr
*imdr
;
572 imdr
= imd_entry_to_imdr(imd
, entry
);
577 return imdr_entry_at(imdr
, entry
);
580 uint32_t imd_entry_id(const struct imd_entry
*entry
)
585 int imd_entry_remove(const struct imd
*imd
, const struct imd_entry
*entry
)
588 const struct imdr
*imdr
;
590 imdr
= imd_entry_to_imdr(imd
, entry
);
597 if (root_is_locked(r
))
600 if (entry
!= root_last_entry(r
))
603 /* Don't remove entry covering root region */
604 if (r
->num_entries
== 1)
612 static void imdr_print_entries(const struct imdr
*imdr
, const char *indent
,
613 const struct imd_lookup
*lookup
, size_t size
)
624 for (i
= 0; i
< r
->num_entries
; i
++) {
625 const char *name
= NULL
;
626 const struct imd_entry
*e
= &r
->entries
[i
];
628 for (j
= 0; j
< size
; j
++) {
629 if (lookup
[j
].id
== e
->id
) {
630 name
= lookup
[j
].name
;
635 printk(BIOS_DEBUG
, "%s", indent
);
638 printk(BIOS_DEBUG
, "%08x ", e
->id
);
640 printk(BIOS_DEBUG
, "%s", name
);
641 printk(BIOS_DEBUG
, "%2zu. ", i
);
642 printk(BIOS_DEBUG
, "%p ", imdr_entry_at(imdr
, e
));
643 printk(BIOS_DEBUG
, "0x%08zx\n", imdr_entry_size(e
));
647 int imd_print_entries(const struct imd
*imd
, const struct imd_lookup
*lookup
,
650 if (imdr_root(&imd
->lg
) == NULL
)
653 imdr_print_entries(&imd
->lg
, "", lookup
, size
);
654 if (imdr_root(&imd
->sm
) != NULL
) {
655 printk(BIOS_DEBUG
, "IMD small region:\n");
656 imdr_print_entries(&imd
->sm
, " ", lookup
, size
);
662 int imd_cursor_init(const struct imd
*imd
, struct imd_cursor
*cursor
)
664 if (imd
== NULL
|| cursor
== NULL
)
667 memset(cursor
, 0, sizeof(*cursor
));
669 cursor
->imdr
[0] = &imd
->lg
;
670 cursor
->imdr
[1] = &imd
->sm
;
675 const struct imd_entry
*imd_cursor_next(struct imd_cursor
*cursor
)
678 const struct imd_entry
*e
;
680 if (cursor
->current_imdr
>= ARRAY_SIZE(cursor
->imdr
))
683 r
= imdr_root(cursor
->imdr
[cursor
->current_imdr
]);
688 if (cursor
->current_entry
>= r
->num_entries
) {
690 cursor
->current_imdr
++;
691 cursor
->current_entry
= 0;
692 return imd_cursor_next(cursor
);
695 e
= &r
->entries
[cursor
->current_entry
];
696 cursor
->current_entry
++;