mb/hardkernel/odroid-h4: Correct number of jacks in hda_verb.c
[coreboot.git] / src / lib / imd.c
blob5cba121df2e06bd71891f0b59d454ec95ccf2920
1 /* SPDX-License-Identifier: GPL-2.0-only */
3 #include <assert.h>
4 #include <cbmem.h>
5 #include <console/console.h>
6 #include <imd.h>
7 #include <string.h>
8 #include <types.h>
9 #include <imd_private.h>
12 /* For more details on implementation and usage please see the imd.h header. */
14 static void *relative_pointer(void *base, ssize_t offset)
16 intptr_t b = (intptr_t)base;
17 b += offset;
18 return (void *)b;
21 static bool imd_root_pointer_valid(const struct imd_root_pointer *rp)
23 return !!(rp->magic == IMD_ROOT_PTR_MAGIC);
26 static struct imd_root *imdr_root(const struct imdr *imdr)
28 return imdr->r;
32 * The root pointer is relative to the upper limit of the imd. i.e. It sits
33 * just below the upper limit.
35 static struct imd_root_pointer *imdr_get_root_pointer(const struct imdr *imdr)
37 struct imd_root_pointer *rp;
39 rp = relative_pointer((void *)imdr->limit, -sizeof(*rp));
41 return rp;
44 static void imd_link_root(struct imd_root_pointer *rp, struct imd_root *r)
46 rp->magic = IMD_ROOT_PTR_MAGIC;
47 rp->root_offset = (int32_t)((intptr_t)r - (intptr_t)rp);
50 static struct imd_entry *root_last_entry(struct imd_root *r)
52 return &r->entries[r->num_entries - 1];
55 static size_t root_num_entries(size_t root_size)
57 size_t entries_size;
59 entries_size = root_size;
60 entries_size -= sizeof(struct imd_root_pointer);
61 entries_size -= sizeof(struct imd_root);
63 return entries_size / sizeof(struct imd_entry);
66 static size_t imd_root_data_left(struct imd_root *r)
68 struct imd_entry *last_entry;
70 last_entry = root_last_entry(r);
72 if (r->max_offset != 0)
73 return last_entry->start_offset - r->max_offset;
75 return ~(size_t)0;
78 static bool root_is_locked(const struct imd_root *r)
80 return !!(r->flags & IMD_FLAG_LOCKED);
83 static void imd_entry_assign(struct imd_entry *e, uint32_t id,
84 ssize_t offset, size_t size)
86 e->magic = IMD_ENTRY_MAGIC;
87 e->start_offset = offset;
88 e->size = size;
89 e->id = id;
92 static void imdr_init(struct imdr *ir, void *upper_limit)
94 uintptr_t limit = (uintptr_t)upper_limit;
95 /* Upper limit is aligned down to 4KiB */
96 ir->limit = ALIGN_DOWN(limit, LIMIT_ALIGN);
97 ir->r = NULL;
100 static int imdr_create_empty(struct imdr *imdr, size_t root_size,
101 size_t entry_align)
103 struct imd_root_pointer *rp;
104 struct imd_root *r;
105 struct imd_entry *e;
106 ssize_t root_offset;
108 if (!imdr->limit)
109 return -1;
111 /* root_size and entry_align should be a power of 2. */
112 assert(IS_POWER_OF_2(root_size));
113 assert(IS_POWER_OF_2(entry_align));
116 * root_size needs to be large enough to accommodate root pointer and
117 * root book keeping structure. Furthermore, there needs to be a space
118 * for at least one entry covering root region. The caller needs to
119 * ensure there's enough room for tracking individual allocations.
121 if (root_size < (sizeof(*rp) + sizeof(*r) + sizeof(*e)))
122 return -1;
124 /* For simplicity don't allow sizes or alignments to exceed LIMIT_ALIGN.
126 if (root_size > LIMIT_ALIGN || entry_align > LIMIT_ALIGN)
127 return -1;
129 /* Additionally, don't handle an entry alignment > root_size. */
130 if (entry_align > root_size)
131 return -1;
133 rp = imdr_get_root_pointer(imdr);
135 root_offset = -(ssize_t)root_size;
136 /* Set root pointer. */
137 imdr->r = relative_pointer((void *)imdr->limit, root_offset);
138 r = imdr_root(imdr);
139 imd_link_root(rp, r);
141 memset(r, 0, sizeof(*r));
142 r->entry_align = entry_align;
144 /* Calculate size left for entries. */
145 r->max_entries = root_num_entries(root_size);
147 /* Fill in first entry covering the root region. */
148 r->num_entries = 1;
149 e = &r->entries[0];
150 imd_entry_assign(e, CBMEM_ID_IMD_ROOT, 0, root_size);
152 printk(BIOS_DEBUG, "IMD: root @ %p %u entries.\n", r, r->max_entries);
154 return 0;
157 static int imdr_recover(struct imdr *imdr)
159 struct imd_root_pointer *rp;
160 struct imd_root *r;
161 uintptr_t low_limit;
162 size_t i;
164 if (!imdr->limit)
165 return -1;
167 rp = imdr_get_root_pointer(imdr);
169 if (!imd_root_pointer_valid(rp))
170 return -1;
172 r = relative_pointer(rp, rp->root_offset);
174 /* Ensure that root is just under the root pointer */
175 if ((intptr_t)rp - (intptr_t)&r->entries[r->max_entries] > sizeof(struct imd_entry))
176 return -1;
178 if (r->num_entries > r->max_entries)
179 return -1;
181 /* Entry alignment should be power of 2. */
182 if (!IS_POWER_OF_2(r->entry_align))
183 return -1;
185 low_limit = (uintptr_t)relative_pointer(r, r->max_offset);
187 /* If no max_offset then lowest limit is 0. */
188 if (low_limit == (uintptr_t)r)
189 low_limit = 0;
191 for (i = 0; i < r->num_entries; i++) {
192 uintptr_t start_addr;
193 const struct imd_entry *e = &r->entries[i];
195 if (e->magic != IMD_ENTRY_MAGIC)
196 return -1;
198 start_addr = (uintptr_t)relative_pointer(r, e->start_offset);
199 if (start_addr < low_limit)
200 return -1;
201 if (start_addr >= imdr->limit ||
202 (start_addr + e->size) > imdr->limit)
203 return -1;
206 /* Set root pointer. */
207 imdr->r = r;
209 return 0;
212 static const struct imd_entry *imdr_entry_find(const struct imdr *imdr,
213 uint32_t id)
215 struct imd_root *r;
216 struct imd_entry *e;
217 size_t i;
219 r = imdr_root(imdr);
221 if (r == NULL)
222 return NULL;
224 e = NULL;
225 /* Skip first entry covering the root. */
226 for (i = 1; i < r->num_entries; i++) {
227 if (id != r->entries[i].id)
228 continue;
229 e = &r->entries[i];
230 break;
233 return e;
236 static int imdr_limit_size(struct imdr *imdr, size_t max_size)
238 struct imd_root *r;
239 ssize_t smax_size;
240 size_t root_size;
242 r = imdr_root(imdr);
243 if (r == NULL)
244 return -1;
246 root_size = imdr->limit - (uintptr_t)r;
248 if (max_size < root_size)
249 return -1;
251 /* Take into account the root size. */
252 smax_size = max_size - root_size;
253 smax_size = -smax_size;
255 r->max_offset = smax_size;
257 return 0;
260 static size_t imdr_entry_size(const struct imd_entry *e)
262 return e->size;
265 static void *imdr_entry_at(const struct imdr *imdr, const struct imd_entry *e)
267 return relative_pointer(imdr_root(imdr), e->start_offset);
270 static struct imd_entry *imd_entry_add_to_root(struct imd_root *r, uint32_t id,
271 size_t size)
273 struct imd_entry *entry;
274 struct imd_entry *last_entry;
275 ssize_t e_offset;
276 size_t used_size;
278 if (r->num_entries == r->max_entries)
279 return NULL;
281 /* Determine total size taken up by entry. */
282 used_size = ALIGN_UP(size, r->entry_align);
284 /* See if size overflows imd total size. */
285 if (used_size > imd_root_data_left(r))
286 return NULL;
289 * Determine if offset field overflows. All offsets should be lower
290 * than the previous one.
292 last_entry = root_last_entry(r);
293 e_offset = last_entry->start_offset;
294 e_offset -= (ssize_t)used_size;
295 if (e_offset >= last_entry->start_offset)
296 return NULL;
298 entry = root_last_entry(r) + 1;
299 r->num_entries++;
301 imd_entry_assign(entry, id, e_offset, size);
303 return entry;
306 static const struct imd_entry *imdr_entry_add(const struct imdr *imdr,
307 uint32_t id, size_t size)
309 struct imd_root *r;
311 r = imdr_root(imdr);
313 if (r == NULL)
314 return NULL;
316 if (root_is_locked(r))
317 return NULL;
319 return imd_entry_add_to_root(r, id, size);
322 static bool imdr_has_entry(const struct imdr *imdr, const struct imd_entry *e)
324 struct imd_root *r;
325 size_t idx;
327 r = imdr_root(imdr);
328 if (r == NULL)
329 return false;
331 /* Determine if the entry is within this root structure. */
332 idx = e - &r->entries[0];
333 if (idx >= r->num_entries)
334 return false;
336 return true;
339 static const struct imdr *imd_entry_to_imdr(const struct imd *imd,
340 const struct imd_entry *entry)
342 if (imdr_has_entry(&imd->lg, entry))
343 return &imd->lg;
345 if (imdr_has_entry(&imd->sm, entry))
346 return &imd->sm;
348 return NULL;
351 /* Initialize imd handle. */
352 void imd_handle_init(struct imd *imd, void *upper_limit)
354 imdr_init(&imd->lg, upper_limit);
355 imdr_init(&imd->sm, NULL);
358 void imd_handle_init_partial_recovery(struct imd *imd)
360 const struct imd_entry *e;
361 struct imd_root_pointer *rp;
362 struct imdr *imdr;
364 if (imd->lg.limit == 0)
365 return;
367 imd_handle_init(imd, (void *)imd->lg.limit);
369 /* Initialize root pointer for the large regions. */
370 imdr = &imd->lg;
371 rp = imdr_get_root_pointer(imdr);
372 imdr->r = relative_pointer(rp, rp->root_offset);
374 e = imdr_entry_find(imdr, SMALL_REGION_ID);
376 if (e == NULL)
377 return;
379 imd->sm.limit = (uintptr_t)imdr_entry_at(imdr, e);
380 imd->sm.limit += imdr_entry_size(e);
381 imdr = &imd->sm;
382 rp = imdr_get_root_pointer(imdr);
383 imdr->r = relative_pointer(rp, rp->root_offset);
386 int imd_create_empty(struct imd *imd, size_t root_size, size_t entry_align)
388 return imdr_create_empty(&imd->lg, root_size, entry_align);
391 int imd_create_tiered_empty(struct imd *imd,
392 size_t lg_root_size, size_t lg_entry_align,
393 size_t sm_root_size, size_t sm_entry_align)
395 size_t sm_region_size;
396 const struct imd_entry *e;
397 struct imdr *imdr;
399 imdr = &imd->lg;
401 if (imdr_create_empty(imdr, lg_root_size, lg_entry_align) != 0)
402 return -1;
404 /* Calculate the size of the small region to request. */
405 sm_region_size = root_num_entries(sm_root_size) * sm_entry_align;
406 sm_region_size += sm_root_size;
407 sm_region_size = ALIGN_UP(sm_region_size, lg_entry_align);
409 /* Add a new entry to the large region to cover the root and entries. */
410 e = imdr_entry_add(imdr, SMALL_REGION_ID, sm_region_size);
412 if (e == NULL)
413 goto fail;
415 imd->sm.limit = (uintptr_t)imdr_entry_at(imdr, e);
416 imd->sm.limit += sm_region_size;
418 if (imdr_create_empty(&imd->sm, sm_root_size, sm_entry_align) != 0 ||
419 imdr_limit_size(&imd->sm, sm_region_size))
420 goto fail;
422 return 0;
423 fail:
424 imd_handle_init(imd, (void *)imdr->limit);
425 return -1;
428 int imd_recover(struct imd *imd)
430 const struct imd_entry *e;
431 uintptr_t small_upper_limit;
432 struct imdr *imdr;
434 imdr = &imd->lg;
435 if (imdr_recover(imdr) != 0)
436 return -1;
438 /* Determine if small region is present. */
439 e = imdr_entry_find(imdr, SMALL_REGION_ID);
441 if (e == NULL)
442 return 0;
444 small_upper_limit = (uintptr_t)imdr_entry_at(imdr, e);
445 small_upper_limit += imdr_entry_size(e);
447 imd->sm.limit = small_upper_limit;
449 /* Tear down any changes on failure. */
450 if (imdr_recover(&imd->sm) != 0) {
451 imd_handle_init(imd, (void *)imd->lg.limit);
452 return -1;
455 return 0;
458 int imd_limit_size(struct imd *imd, size_t max_size)
460 return imdr_limit_size(&imd->lg, max_size);
463 int imd_lockdown(struct imd *imd)
465 struct imd_root *r;
467 r = imdr_root(&imd->lg);
468 if (r == NULL)
469 return -1;
471 r->flags |= IMD_FLAG_LOCKED;
473 r = imdr_root(&imd->sm);
474 if (r != NULL)
475 r->flags |= IMD_FLAG_LOCKED;
477 return 0;
480 int imd_region_used(struct imd *imd, void **base, size_t *size)
482 struct imd_root *r;
483 struct imd_entry *e;
484 void *low_addr;
485 size_t sz_used;
487 if (!imd->lg.limit)
488 return -1;
490 r = imdr_root(&imd->lg);
492 if (r == NULL)
493 return -1;
495 /* Use last entry to obtain lowest address. */
496 e = root_last_entry(r);
498 low_addr = relative_pointer(r, e->start_offset);
500 /* Total size used is the last entry's base up to the limit. */
501 sz_used = imd->lg.limit - (uintptr_t)low_addr;
503 *base = low_addr;
504 *size = sz_used;
506 return 0;
509 const struct imd_entry *imd_entry_add(const struct imd *imd, uint32_t id,
510 size_t size)
512 struct imd_root *r;
513 const struct imdr *imdr;
514 const struct imd_entry *e = NULL;
517 * Determine if requested size is less than 1/4 of small data
518 * region is left.
520 imdr = &imd->sm;
521 r = imdr_root(imdr);
523 /* No small region. Use the large region. */
524 if (r == NULL)
525 return imdr_entry_add(&imd->lg, id, size);
526 else if (size <= r->entry_align || size <= imd_root_data_left(r) / 4)
527 e = imdr_entry_add(imdr, id, size);
529 /* Fall back on large region allocation. */
530 if (e == NULL)
531 e = imdr_entry_add(&imd->lg, id, size);
533 return e;
536 const struct imd_entry *imd_entry_find(const struct imd *imd, uint32_t id)
538 const struct imd_entry *e;
540 /* Many of the smaller allocations are used a lot. Therefore, try
541 * the small region first. */
542 e = imdr_entry_find(&imd->sm, id);
544 if (e == NULL)
545 e = imdr_entry_find(&imd->lg, id);
547 return e;
550 const struct imd_entry *imd_entry_find_or_add(const struct imd *imd,
551 uint32_t id, size_t size)
553 const struct imd_entry *e;
555 e = imd_entry_find(imd, id);
557 if (e != NULL)
558 return e;
560 return imd_entry_add(imd, id, size);
563 size_t imd_entry_size(const struct imd_entry *entry)
565 return imdr_entry_size(entry);
568 void *imd_entry_at(const struct imd *imd, const struct imd_entry *entry)
570 const struct imdr *imdr;
572 imdr = imd_entry_to_imdr(imd, entry);
574 if (imdr == NULL)
575 return NULL;
577 return imdr_entry_at(imdr, entry);
580 uint32_t imd_entry_id(const struct imd_entry *entry)
582 return entry->id;
585 int imd_entry_remove(const struct imd *imd, const struct imd_entry *entry)
587 struct imd_root *r;
588 const struct imdr *imdr;
590 imdr = imd_entry_to_imdr(imd, entry);
592 if (imdr == NULL)
593 return -1;
595 r = imdr_root(imdr);
597 if (root_is_locked(r))
598 return -1;
600 if (entry != root_last_entry(r))
601 return -1;
603 /* Don't remove entry covering root region */
604 if (r->num_entries == 1)
605 return -1;
607 r->num_entries--;
609 return 0;
612 static void imdr_print_entries(const struct imdr *imdr, const char *indent,
613 const struct imd_lookup *lookup, size_t size)
615 struct imd_root *r;
616 size_t i;
617 size_t j;
619 if (imdr == NULL)
620 return;
622 r = imdr_root(imdr);
624 for (i = 0; i < r->num_entries; i++) {
625 const char *name = NULL;
626 const struct imd_entry *e = &r->entries[i];
628 for (j = 0; j < size; j++) {
629 if (lookup[j].id == e->id) {
630 name = lookup[j].name;
631 break;
635 printk(BIOS_DEBUG, "%s", indent);
637 if (name == NULL)
638 printk(BIOS_DEBUG, "%08x ", e->id);
639 else
640 printk(BIOS_DEBUG, "%s", name);
641 printk(BIOS_DEBUG, "%2zu. ", i);
642 printk(BIOS_DEBUG, "%p ", imdr_entry_at(imdr, e));
643 printk(BIOS_DEBUG, "0x%08zx\n", imdr_entry_size(e));
647 int imd_print_entries(const struct imd *imd, const struct imd_lookup *lookup,
648 size_t size)
650 if (imdr_root(&imd->lg) == NULL)
651 return -1;
653 imdr_print_entries(&imd->lg, "", lookup, size);
654 if (imdr_root(&imd->sm) != NULL) {
655 printk(BIOS_DEBUG, "IMD small region:\n");
656 imdr_print_entries(&imd->sm, " ", lookup, size);
659 return 0;
662 int imd_cursor_init(const struct imd *imd, struct imd_cursor *cursor)
664 if (imd == NULL || cursor == NULL)
665 return -1;
667 memset(cursor, 0, sizeof(*cursor));
669 cursor->imdr[0] = &imd->lg;
670 cursor->imdr[1] = &imd->sm;
672 return 0;
675 const struct imd_entry *imd_cursor_next(struct imd_cursor *cursor)
677 struct imd_root *r;
678 const struct imd_entry *e;
680 if (cursor->current_imdr >= ARRAY_SIZE(cursor->imdr))
681 return NULL;
683 r = imdr_root(cursor->imdr[cursor->current_imdr]);
685 if (r == NULL)
686 return NULL;
688 if (cursor->current_entry >= r->num_entries) {
689 /* Try next imdr. */
690 cursor->current_imdr++;
691 cursor->current_entry = 0;
692 return imd_cursor_next(cursor);
695 e = &r->entries[cursor->current_entry];
696 cursor->current_entry++;
698 return e;