mtw(4) remove misplaced DEBUG_FLAGS
[freebsd/src.git] / sys / arm64 / acpica / acpi_iort.c
bloba0e24788b7754d9cb3f70bc8ae85559a4144e55a
1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
4 * Copyright (C) 2018 Marvell International Ltd.
6 * Author: Jayachandran C Nair <jchandra@freebsd.org>
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
30 #include "opt_acpi.h"
32 #include <sys/param.h>
33 #include <sys/bus.h>
34 #include <sys/kernel.h>
35 #include <sys/malloc.h>
37 #include <machine/intr.h>
39 #include <contrib/dev/acpica/include/acpi.h>
40 #include <contrib/dev/acpica/include/accommon.h>
41 #include <contrib/dev/acpica/include/actables.h>
43 #include <dev/acpica/acpivar.h>
46 * Track next XREF available for ITS groups.
48 static u_int acpi_its_xref = ACPI_MSI_XREF;
51 * Some types of IORT nodes have a set of mappings. Each of them map
52 * a range of device IDs [base..end] from the current node to another
53 * node. The corresponding device IDs on destination node starts at
54 * outbase.
56 struct iort_map_entry {
57 u_int base;
58 u_int end;
59 u_int outbase;
60 u_int flags;
61 u_int out_node_offset;
62 struct iort_node *out_node;
66 * The ITS group node does not have any outgoing mappings. It has a
67 * of a list of GIC ITS blocks which can handle the device ID. We
68 * will store the PIC XREF used by the block and the blocks proximity
69 * data here, so that it can be retrieved together.
71 struct iort_its_entry {
72 u_int its_id;
73 u_int xref;
74 int pxm;
77 struct iort_named_component
79 UINT32 NodeFlags;
80 UINT64 MemoryProperties;
81 UINT8 MemoryAddressLimit;
82 char DeviceName[32]; /* Path of namespace object */
86 * IORT node. Each node has some device specific data depending on the
87 * type of the node. The node can also have a set of mappings, OR in
88 * case of ITS group nodes a set of ITS entries.
89 * The nodes are kept in a TAILQ by type.
91 struct iort_node {
92 TAILQ_ENTRY(iort_node) next; /* next entry with same type */
93 enum AcpiIortNodeType type; /* ACPI type */
94 u_int node_offset; /* offset in IORT - node ID */
95 u_int nentries; /* items in array below */
96 u_int usecount; /* for bookkeeping */
97 u_int revision; /* node revision */
98 union {
99 struct iort_map_entry *mappings; /* node mappings */
100 struct iort_its_entry *its; /* ITS IDs array */
101 } entries;
102 union {
103 ACPI_IORT_ROOT_COMPLEX pci_rc; /* PCI root complex */
104 ACPI_IORT_SMMU smmu;
105 ACPI_IORT_SMMU_V3 smmu_v3;
106 struct iort_named_component named_comp;
107 } data;
110 /* Lists for each of the types. */
111 static TAILQ_HEAD(, iort_node) pci_nodes = TAILQ_HEAD_INITIALIZER(pci_nodes);
112 static TAILQ_HEAD(, iort_node) smmu_nodes = TAILQ_HEAD_INITIALIZER(smmu_nodes);
113 static TAILQ_HEAD(, iort_node) its_groups = TAILQ_HEAD_INITIALIZER(its_groups);
114 static TAILQ_HEAD(, iort_node) named_nodes = TAILQ_HEAD_INITIALIZER(named_nodes);
116 static int
117 iort_entry_get_id_mapping_index(struct iort_node *node)
120 switch(node->type) {
121 case ACPI_IORT_NODE_SMMU_V3:
122 /* The ID mapping field was added in version 1 */
123 if (node->revision < 1)
124 return (-1);
127 * If all the control interrupts are GISCV based the ID
128 * mapping field is ignored.
130 if (node->data.smmu_v3.EventGsiv != 0 &&
131 node->data.smmu_v3.PriGsiv != 0 &&
132 node->data.smmu_v3.GerrGsiv != 0 &&
133 node->data.smmu_v3.SyncGsiv != 0)
134 return (-1);
136 if (node->data.smmu_v3.IdMappingIndex >= node->nentries)
137 return (-1);
139 return (node->data.smmu_v3.IdMappingIndex);
140 case ACPI_IORT_NODE_PMCG:
141 return (0);
142 default:
143 break;
146 return (-1);
150 * Lookup an ID in the mappings array. If successful, map the input ID
151 * to the output ID and return the output node found.
153 static struct iort_node *
154 iort_entry_lookup(struct iort_node *node, u_int id, u_int *outid)
156 struct iort_map_entry *entry;
157 int i, id_map;
159 id_map = iort_entry_get_id_mapping_index(node);
160 entry = node->entries.mappings;
161 for (i = 0; i < node->nentries; i++, entry++) {
162 if (i == id_map)
163 continue;
164 if (entry->base <= id && id <= entry->end)
165 break;
167 if (i == node->nentries)
168 return (NULL);
169 if ((entry->flags & ACPI_IORT_ID_SINGLE_MAPPING) == 0)
170 *outid = entry->outbase + (id - entry->base);
171 else
172 *outid = entry->outbase;
173 return (entry->out_node);
177 * Perform an additional lookup in case of SMMU node and ITS outtype.
179 static struct iort_node *
180 iort_smmu_trymap(struct iort_node *node, u_int outtype, u_int *outid)
182 /* Original node can be not found. */
183 if (!node)
184 return (NULL);
186 /* Node can be SMMU or ITS. If SMMU, we need another lookup. */
187 if (outtype == ACPI_IORT_NODE_ITS_GROUP &&
188 (node->type == ACPI_IORT_NODE_SMMU_V3 ||
189 node->type == ACPI_IORT_NODE_SMMU)) {
190 node = iort_entry_lookup(node, *outid, outid);
191 if (node == NULL)
192 return (NULL);
195 KASSERT(node->type == outtype, ("mapping fail"));
196 return (node);
200 * Map a PCI RID to a SMMU node or an ITS node, based on outtype.
202 static struct iort_node *
203 iort_pci_rc_map(u_int seg, u_int rid, u_int outtype, u_int *outid)
205 struct iort_node *node, *out_node;
206 u_int nxtid;
208 out_node = NULL;
209 TAILQ_FOREACH(node, &pci_nodes, next) {
210 if (node->data.pci_rc.PciSegmentNumber != seg)
211 continue;
212 out_node = iort_entry_lookup(node, rid, &nxtid);
213 if (out_node != NULL)
214 break;
217 out_node = iort_smmu_trymap(out_node, outtype, &nxtid);
218 if (out_node)
219 *outid = nxtid;
221 return (out_node);
225 * Map a named component node to a SMMU node or an ITS node, based on outtype.
227 static struct iort_node *
228 iort_named_comp_map(const char *devname, u_int rid, u_int outtype, u_int *outid)
230 struct iort_node *node, *out_node;
231 u_int nxtid;
233 out_node = NULL;
234 TAILQ_FOREACH(node, &named_nodes, next) {
235 if (strstr(node->data.named_comp.DeviceName, devname) == NULL)
236 continue;
237 out_node = iort_entry_lookup(node, rid, &nxtid);
238 if (out_node != NULL)
239 break;
242 out_node = iort_smmu_trymap(out_node, outtype, &nxtid);
243 if (out_node)
244 *outid = nxtid;
246 return (out_node);
249 #ifdef notyet
251 * Not implemented, map a PCIe device to the SMMU it is associated with.
254 acpi_iort_map_smmu(u_int seg, u_int devid, void **smmu, u_int *sid)
256 /* XXX: convert oref to SMMU device */
257 return (ENXIO);
259 #endif
262 * Allocate memory for a node, initialize and copy mappings. 'start'
263 * argument provides the table start used to calculate the node offset.
265 static void
266 iort_copy_data(struct iort_node *node, ACPI_IORT_NODE *node_entry)
268 ACPI_IORT_ID_MAPPING *map_entry;
269 struct iort_map_entry *mapping;
270 int i;
272 map_entry = ACPI_ADD_PTR(ACPI_IORT_ID_MAPPING, node_entry,
273 node_entry->MappingOffset);
274 node->nentries = node_entry->MappingCount;
275 node->usecount = 0;
276 mapping = malloc(sizeof(*mapping) * node->nentries, M_DEVBUF,
277 M_WAITOK | M_ZERO);
278 node->entries.mappings = mapping;
279 for (i = 0; i < node->nentries; i++, mapping++, map_entry++) {
280 mapping->base = map_entry->InputBase;
282 * IdCount means "The number of IDs in the range minus one" (ARM DEN 0049D).
283 * We use <= for comparison against this field, so don't add one here.
285 mapping->end = map_entry->InputBase + map_entry->IdCount;
286 mapping->outbase = map_entry->OutputBase;
287 mapping->out_node_offset = map_entry->OutputReference;
288 mapping->flags = map_entry->Flags;
289 mapping->out_node = NULL;
294 * Allocate and copy an ITS group.
296 static void
297 iort_copy_its(struct iort_node *node, ACPI_IORT_NODE *node_entry)
299 struct iort_its_entry *its;
300 ACPI_IORT_ITS_GROUP *itsg_entry;
301 UINT32 *id;
302 int i;
304 itsg_entry = (ACPI_IORT_ITS_GROUP *)node_entry->NodeData;
305 node->nentries = itsg_entry->ItsCount;
306 node->usecount = 0;
307 its = malloc(sizeof(*its) * node->nentries, M_DEVBUF, M_WAITOK | M_ZERO);
308 node->entries.its = its;
309 id = &itsg_entry->Identifiers[0];
310 for (i = 0; i < node->nentries; i++, its++, id++) {
311 its->its_id = *id;
312 its->pxm = -1;
313 its->xref = 0;
318 * Walk the IORT table and add nodes to corresponding list.
320 static void
321 iort_add_nodes(ACPI_IORT_NODE *node_entry, u_int node_offset)
323 ACPI_IORT_ROOT_COMPLEX *pci_rc;
324 ACPI_IORT_SMMU *smmu;
325 ACPI_IORT_SMMU_V3 *smmu_v3;
326 ACPI_IORT_NAMED_COMPONENT *named_comp;
327 struct iort_node *node;
329 node = malloc(sizeof(*node), M_DEVBUF, M_WAITOK | M_ZERO);
330 node->type = node_entry->Type;
331 node->node_offset = node_offset;
332 node->revision = node_entry->Revision;
334 /* copy nodes depending on type */
335 switch(node_entry->Type) {
336 case ACPI_IORT_NODE_PCI_ROOT_COMPLEX:
337 pci_rc = (ACPI_IORT_ROOT_COMPLEX *)node_entry->NodeData;
338 memcpy(&node->data.pci_rc, pci_rc, sizeof(*pci_rc));
339 iort_copy_data(node, node_entry);
340 TAILQ_INSERT_TAIL(&pci_nodes, node, next);
341 break;
342 case ACPI_IORT_NODE_SMMU:
343 smmu = (ACPI_IORT_SMMU *)node_entry->NodeData;
344 memcpy(&node->data.smmu, smmu, sizeof(*smmu));
345 iort_copy_data(node, node_entry);
346 TAILQ_INSERT_TAIL(&smmu_nodes, node, next);
347 break;
348 case ACPI_IORT_NODE_SMMU_V3:
349 smmu_v3 = (ACPI_IORT_SMMU_V3 *)node_entry->NodeData;
350 memcpy(&node->data.smmu_v3, smmu_v3, sizeof(*smmu_v3));
351 iort_copy_data(node, node_entry);
352 TAILQ_INSERT_TAIL(&smmu_nodes, node, next);
353 break;
354 case ACPI_IORT_NODE_ITS_GROUP:
355 iort_copy_its(node, node_entry);
356 TAILQ_INSERT_TAIL(&its_groups, node, next);
357 break;
358 case ACPI_IORT_NODE_NAMED_COMPONENT:
359 named_comp = (ACPI_IORT_NAMED_COMPONENT *)node_entry->NodeData;
360 memcpy(&node->data.named_comp, named_comp, sizeof(*named_comp));
362 /* Copy name of the node separately. */
363 strncpy(node->data.named_comp.DeviceName,
364 named_comp->DeviceName,
365 sizeof(node->data.named_comp.DeviceName));
366 node->data.named_comp.DeviceName[31] = 0;
368 iort_copy_data(node, node_entry);
369 TAILQ_INSERT_TAIL(&named_nodes, node, next);
370 break;
371 default:
372 printf("ACPI: IORT: Dropping unhandled type %u\n",
373 node_entry->Type);
374 free(node, M_DEVBUF);
375 break;
380 * For the mapping entry given, walk thru all the possible destination
381 * nodes and resolve the output reference.
383 static void
384 iort_resolve_node(struct iort_map_entry *entry, int check_smmu)
386 struct iort_node *node, *np;
388 node = NULL;
389 if (check_smmu) {
390 TAILQ_FOREACH(np, &smmu_nodes, next) {
391 if (entry->out_node_offset == np->node_offset) {
392 node = np;
393 break;
397 if (node == NULL) {
398 TAILQ_FOREACH(np, &its_groups, next) {
399 if (entry->out_node_offset == np->node_offset) {
400 node = np;
401 break;
405 if (node != NULL) {
406 node->usecount++;
407 entry->out_node = node;
408 } else {
409 printf("ACPI: IORT: Firmware Bug: no mapping for node %u\n",
410 entry->out_node_offset);
415 * Resolve all output node references to node pointers.
417 static void
418 iort_post_process_mappings(void)
420 struct iort_node *node;
421 int i;
423 TAILQ_FOREACH(node, &pci_nodes, next)
424 for (i = 0; i < node->nentries; i++)
425 iort_resolve_node(&node->entries.mappings[i], TRUE);
426 TAILQ_FOREACH(node, &smmu_nodes, next)
427 for (i = 0; i < node->nentries; i++)
428 iort_resolve_node(&node->entries.mappings[i], FALSE);
429 TAILQ_FOREACH(node, &named_nodes, next)
430 for (i = 0; i < node->nentries; i++)
431 iort_resolve_node(&node->entries.mappings[i], TRUE);
435 * Walk MADT table, assign PIC xrefs to all ITS entries.
437 static void
438 madt_resolve_its_xref(ACPI_SUBTABLE_HEADER *entry, void *arg)
440 ACPI_MADT_GENERIC_TRANSLATOR *gict;
441 struct iort_node *its_node;
442 struct iort_its_entry *its_entry;
443 u_int xref;
444 int i, matches;
446 if (entry->Type != ACPI_MADT_TYPE_GENERIC_TRANSLATOR)
447 return;
449 gict = (ACPI_MADT_GENERIC_TRANSLATOR *)entry;
450 matches = 0;
451 xref = acpi_its_xref++;
452 TAILQ_FOREACH(its_node, &its_groups, next) {
453 its_entry = its_node->entries.its;
454 for (i = 0; i < its_node->nentries; i++, its_entry++) {
455 if (its_entry->its_id == gict->TranslationId) {
456 its_entry->xref = xref;
457 matches++;
461 if (matches == 0)
462 printf("ACPI: IORT: Unused ITS block, ID %u\n",
463 gict->TranslationId);
467 * Walk SRAT, assign proximity to all ITS entries.
469 static void
470 srat_resolve_its_pxm(ACPI_SUBTABLE_HEADER *entry, void *arg)
472 ACPI_SRAT_GIC_ITS_AFFINITY *gicits;
473 struct iort_node *its_node;
474 struct iort_its_entry *its_entry;
475 int *map_counts;
476 int i, matches, dom;
478 if (entry->Type != ACPI_SRAT_TYPE_GIC_ITS_AFFINITY)
479 return;
481 matches = 0;
482 map_counts = arg;
483 gicits = (ACPI_SRAT_GIC_ITS_AFFINITY *)entry;
484 dom = acpi_map_pxm_to_vm_domainid(gicits->ProximityDomain);
487 * Catch firmware and config errors. map_counts keeps a
488 * count of ProximityDomain values mapping to a domain ID
490 #if MAXMEMDOM > 1
491 if (dom == -1)
492 printf("Firmware Error: Proximity Domain %d could not be"
493 " mapped for GIC ITS ID %d!\n",
494 gicits->ProximityDomain, gicits->ItsId);
495 #endif
496 /* use dom + 1 as index to handle the case where dom == -1 */
497 i = ++map_counts[dom + 1];
498 if (i > 1) {
499 #ifdef NUMA
500 if (dom != -1)
501 printf("ERROR: Multiple Proximity Domains map to the"
502 " same NUMA domain %d!\n", dom);
503 #else
504 printf("WARNING: multiple Proximity Domains in SRAT but NUMA"
505 " NOT enabled!\n");
506 #endif
508 TAILQ_FOREACH(its_node, &its_groups, next) {
509 its_entry = its_node->entries.its;
510 for (i = 0; i < its_node->nentries; i++, its_entry++) {
511 if (its_entry->its_id == gicits->ItsId) {
512 its_entry->pxm = dom;
513 matches++;
517 if (matches == 0)
518 printf("ACPI: IORT: ITS block %u in SRAT not found in IORT!\n",
519 gicits->ItsId);
523 * Cross check the ITS Id with MADT and (if available) SRAT.
525 static int
526 iort_post_process_its(void)
528 ACPI_TABLE_MADT *madt;
529 ACPI_TABLE_SRAT *srat;
530 vm_paddr_t madt_pa, srat_pa;
531 int map_counts[MAXMEMDOM + 1] = { 0 };
533 /* Check ITS block in MADT */
534 madt_pa = acpi_find_table(ACPI_SIG_MADT);
535 KASSERT(madt_pa != 0, ("no MADT!"));
536 madt = acpi_map_table(madt_pa, ACPI_SIG_MADT);
537 KASSERT(madt != NULL, ("can't map MADT!"));
538 acpi_walk_subtables(madt + 1, (char *)madt + madt->Header.Length,
539 madt_resolve_its_xref, NULL);
540 acpi_unmap_table(madt);
542 /* Get proximtiy if available */
543 srat_pa = acpi_find_table(ACPI_SIG_SRAT);
544 if (srat_pa != 0) {
545 srat = acpi_map_table(srat_pa, ACPI_SIG_SRAT);
546 KASSERT(srat != NULL, ("can't map SRAT!"));
547 acpi_walk_subtables(srat + 1, (char *)srat + srat->Header.Length,
548 srat_resolve_its_pxm, map_counts);
549 acpi_unmap_table(srat);
551 return (0);
555 * Find, parse, and save IO Remapping Table ("IORT").
557 static int
558 acpi_parse_iort(void *dummy __unused)
560 ACPI_TABLE_IORT *iort;
561 ACPI_IORT_NODE *node_entry;
562 vm_paddr_t iort_pa;
563 u_int node_offset;
565 iort_pa = acpi_find_table(ACPI_SIG_IORT);
566 if (iort_pa == 0)
567 return (ENXIO);
569 iort = acpi_map_table(iort_pa, ACPI_SIG_IORT);
570 if (iort == NULL) {
571 printf("ACPI: Unable to map the IORT table!\n");
572 return (ENXIO);
574 for (node_offset = iort->NodeOffset;
575 node_offset < iort->Header.Length;
576 node_offset += node_entry->Length) {
577 node_entry = ACPI_ADD_PTR(ACPI_IORT_NODE, iort, node_offset);
578 iort_add_nodes(node_entry, node_offset);
580 acpi_unmap_table(iort);
581 iort_post_process_mappings();
582 iort_post_process_its();
583 return (0);
585 SYSINIT(acpi_parse_iort, SI_SUB_DRIVERS, SI_ORDER_FIRST, acpi_parse_iort, NULL);
588 * Provide ITS ID to PIC xref mapping.
591 acpi_iort_its_lookup(u_int its_id, u_int *xref, int *pxm)
593 struct iort_node *its_node;
594 struct iort_its_entry *its_entry;
595 int i;
597 TAILQ_FOREACH(its_node, &its_groups, next) {
598 its_entry = its_node->entries.its;
599 for (i = 0; i < its_node->nentries; i++, its_entry++) {
600 if (its_entry->its_id == its_id) {
601 *xref = its_entry->xref;
602 *pxm = its_entry->pxm;
603 return (0);
607 return (ENOENT);
611 * Find mapping for a PCIe device given segment and device ID
612 * returns the XREF for MSI interrupt setup and the device ID to
613 * use for the interrupt setup
616 acpi_iort_map_pci_msi(u_int seg, u_int rid, u_int *xref, u_int *devid)
618 struct iort_node *node;
620 node = iort_pci_rc_map(seg, rid, ACPI_IORT_NODE_ITS_GROUP, devid);
621 if (node == NULL)
622 return (ENOENT);
624 /* This should be an ITS node */
625 KASSERT(node->type == ACPI_IORT_NODE_ITS_GROUP, ("bad group"));
627 /* return first node, we don't handle more than that now. */
628 *xref = node->entries.its[0].xref;
629 return (0);
633 acpi_iort_map_pci_smmuv3(u_int seg, u_int rid, u_int *xref, u_int *sid)
635 ACPI_IORT_SMMU_V3 *smmu;
636 struct iort_node *node;
638 node = iort_pci_rc_map(seg, rid, ACPI_IORT_NODE_SMMU_V3, sid);
639 if (node == NULL)
640 return (ENOENT);
642 /* This should be an SMMU node. */
643 KASSERT(node->type == ACPI_IORT_NODE_SMMU_V3, ("bad node"));
645 smmu = (ACPI_IORT_SMMU_V3 *)&node->data.smmu_v3;
646 *xref = smmu->BaseAddress;
648 return (0);
652 * Finds mapping for a named node given name and resource ID and returns the
653 * XREF for MSI interrupt setup and the device ID to use for the interrupt setup.
656 acpi_iort_map_named_msi(const char *devname, u_int rid, u_int *xref,
657 u_int *devid)
659 struct iort_node *node;
661 node = iort_named_comp_map(devname, rid, ACPI_IORT_NODE_ITS_GROUP,
662 devid);
663 if (node == NULL)
664 return (ENOENT);
666 /* This should be an ITS node */
667 KASSERT(node->type == ACPI_IORT_NODE_ITS_GROUP, ("bad group"));
669 /* Return first node, we don't handle more than that now. */
670 *xref = node->entries.its[0].xref;
671 return (0);
675 acpi_iort_map_named_smmuv3(const char *devname, u_int rid, u_int *xref,
676 u_int *devid)
678 ACPI_IORT_SMMU_V3 *smmu;
679 struct iort_node *node;
681 node = iort_named_comp_map(devname, rid, ACPI_IORT_NODE_SMMU_V3, devid);
682 if (node == NULL)
683 return (ENOENT);
685 /* This should be an SMMU node. */
686 KASSERT(node->type == ACPI_IORT_NODE_SMMU_V3, ("bad node"));
688 smmu = (ACPI_IORT_SMMU_V3 *)&node->data.smmu_v3;
689 *xref = smmu->BaseAddress;
691 return (0);