1 // SPDX-License-Identifier: GPL-2.0
3 /* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
4 * Copyright (C) 2019-2024 Linaro Ltd.
7 #include <linux/dma-mapping.h>
9 #include <linux/iommu.h>
10 #include <linux/platform_device.h>
11 #include <linux/types.h>
13 #include <linux/soc/qcom/smem.h>
15 #include "gsi_trans.h"
21 #include "ipa_table.h"
23 /* "Canary" value placed between memory regions to detect overflow */
24 #define IPA_MEM_CANARY_VAL cpu_to_le32(0xdeadbeef)
26 /* SMEM host id representing the modem. */
27 #define QCOM_SMEM_HOST_MODEM 1
29 const struct ipa_mem
*ipa_mem_find(struct ipa
*ipa
, enum ipa_mem_id mem_id
)
33 for (i
= 0; i
< ipa
->mem_count
; i
++) {
34 const struct ipa_mem
*mem
= &ipa
->mem
[i
];
36 if (mem
->id
== mem_id
)
43 /* Add an immediate command to a transaction that zeroes a memory region */
45 ipa_mem_zero_region_add(struct gsi_trans
*trans
, enum ipa_mem_id mem_id
)
47 struct ipa
*ipa
= container_of(trans
->gsi
, struct ipa
, gsi
);
48 const struct ipa_mem
*mem
= ipa_mem_find(ipa
, mem_id
);
49 dma_addr_t addr
= ipa
->zero_addr
;
54 ipa_cmd_dma_shared_mem_add(trans
, mem
->offset
, mem
->size
, addr
, true);
58 * ipa_mem_setup() - Set up IPA AP and modem shared memory areas
61 * Set up the shared memory regions in IPA local memory. This involves
62 * zero-filling memory regions, and in the case of header memory, telling
63 * the IPA where it's located.
65 * This function performs the initial setup of this memory. If the modem
66 * crashes, its regions are re-zeroed in ipa_mem_zero_modem().
68 * The AP informs the modem where its portions of memory are located
69 * in a QMI exchange that occurs at modem startup.
71 * There is no need for a matching ipa_mem_teardown() function.
73 * Return: 0 if successful, or a negative error code
75 int ipa_mem_setup(struct ipa
*ipa
)
77 dma_addr_t addr
= ipa
->zero_addr
;
78 const struct ipa_mem
*mem
;
79 struct gsi_trans
*trans
;
80 const struct reg
*reg
;
85 /* Get a transaction to define the header memory region and to zero
86 * the processing context and modem memory regions.
88 trans
= ipa_cmd_trans_alloc(ipa
, 4);
90 dev_err(ipa
->dev
, "no transaction for memory setup\n");
94 /* Initialize IPA-local header memory. The AP header region, if
95 * present, is contiguous with and follows the modem header region,
96 * and they are initialized together.
98 mem
= ipa_mem_find(ipa
, IPA_MEM_MODEM_HEADER
);
101 mem
= ipa_mem_find(ipa
, IPA_MEM_AP_HEADER
);
105 ipa_cmd_hdr_init_local_add(trans
, offset
, size
, addr
);
107 ipa_mem_zero_region_add(trans
, IPA_MEM_MODEM_PROC_CTX
);
108 ipa_mem_zero_region_add(trans
, IPA_MEM_AP_PROC_CTX
);
109 ipa_mem_zero_region_add(trans
, IPA_MEM_MODEM
);
111 gsi_trans_commit_wait(trans
);
113 /* Tell the hardware where the processing context area is located */
114 mem
= ipa_mem_find(ipa
, IPA_MEM_MODEM_PROC_CTX
);
115 offset
= ipa
->mem_offset
+ mem
->offset
;
117 reg
= ipa_reg(ipa
, LOCAL_PKT_PROC_CNTXT
);
118 val
= reg_encode(reg
, IPA_BASE_ADDR
, offset
);
119 iowrite32(val
, ipa
->reg_virt
+ reg_offset(reg
));
124 /* Is the given memory region ID is valid for the current IPA version? */
125 static bool ipa_mem_id_valid(struct ipa
*ipa
, enum ipa_mem_id mem_id
)
127 enum ipa_version version
= ipa
->version
;
130 case IPA_MEM_UC_SHARED
:
131 case IPA_MEM_UC_INFO
:
132 case IPA_MEM_V4_FILTER_HASHED
:
133 case IPA_MEM_V4_FILTER
:
134 case IPA_MEM_V6_FILTER_HASHED
:
135 case IPA_MEM_V6_FILTER
:
136 case IPA_MEM_V4_ROUTE_HASHED
:
137 case IPA_MEM_V4_ROUTE
:
138 case IPA_MEM_V6_ROUTE_HASHED
:
139 case IPA_MEM_V6_ROUTE
:
140 case IPA_MEM_MODEM_HEADER
:
141 case IPA_MEM_AP_HEADER
:
142 case IPA_MEM_MODEM_PROC_CTX
:
143 case IPA_MEM_AP_PROC_CTX
:
145 case IPA_MEM_UC_EVENT_RING
:
146 case IPA_MEM_PDN_CONFIG
:
147 case IPA_MEM_STATS_QUOTA_MODEM
:
148 case IPA_MEM_STATS_QUOTA_AP
:
149 case IPA_MEM_END_MARKER
: /* pseudo region */
152 case IPA_MEM_STATS_TETHERING
:
153 case IPA_MEM_STATS_DROP
:
154 if (version
< IPA_VERSION_4_0
)
158 case IPA_MEM_STATS_V4_FILTER
:
159 case IPA_MEM_STATS_V6_FILTER
:
160 case IPA_MEM_STATS_V4_ROUTE
:
161 case IPA_MEM_STATS_V6_ROUTE
:
162 if (version
< IPA_VERSION_4_0
|| version
> IPA_VERSION_4_2
)
166 case IPA_MEM_AP_V4_FILTER
:
167 case IPA_MEM_AP_V6_FILTER
:
168 if (version
< IPA_VERSION_5_0
)
172 case IPA_MEM_NAT_TABLE
:
173 case IPA_MEM_STATS_FILTER_ROUTE
:
174 if (version
< IPA_VERSION_4_5
)
185 /* Must the given memory region be present in the configuration? */
186 static bool ipa_mem_id_required(struct ipa
*ipa
, enum ipa_mem_id mem_id
)
189 case IPA_MEM_UC_SHARED
:
190 case IPA_MEM_UC_INFO
:
191 case IPA_MEM_V4_FILTER_HASHED
:
192 case IPA_MEM_V4_FILTER
:
193 case IPA_MEM_V6_FILTER_HASHED
:
194 case IPA_MEM_V6_FILTER
:
195 case IPA_MEM_V4_ROUTE_HASHED
:
196 case IPA_MEM_V4_ROUTE
:
197 case IPA_MEM_V6_ROUTE_HASHED
:
198 case IPA_MEM_V6_ROUTE
:
199 case IPA_MEM_MODEM_HEADER
:
200 case IPA_MEM_MODEM_PROC_CTX
:
201 case IPA_MEM_AP_PROC_CTX
:
205 case IPA_MEM_PDN_CONFIG
:
206 case IPA_MEM_STATS_QUOTA_MODEM
:
207 return ipa
->version
>= IPA_VERSION_4_0
;
209 case IPA_MEM_STATS_TETHERING
:
210 return ipa
->version
>= IPA_VERSION_4_0
&&
211 ipa
->version
!= IPA_VERSION_5_0
;
214 return false; /* Anything else is optional */
218 static bool ipa_mem_valid_one(struct ipa
*ipa
, const struct ipa_mem
*mem
)
220 enum ipa_mem_id mem_id
= mem
->id
;
221 struct device
*dev
= ipa
->dev
;
224 /* Make sure the memory region is valid for this version of IPA */
225 if (!ipa_mem_id_valid(ipa
, mem_id
)) {
226 dev_err(dev
, "region id %u not valid\n", mem_id
);
230 if (!mem
->size
&& !mem
->canary_count
) {
231 dev_err(dev
, "empty memory region %u\n", mem_id
);
235 /* Other than modem memory, sizes must be a multiple of 8 */
236 size_multiple
= mem_id
== IPA_MEM_MODEM
? 4 : 8;
237 if (mem
->size
% size_multiple
)
238 dev_err(dev
, "region %u size not a multiple of %u bytes\n",
239 mem_id
, size_multiple
);
240 else if (mem
->offset
% 8)
241 dev_err(dev
, "region %u offset not 8-byte aligned\n", mem_id
);
242 else if (mem
->offset
< mem
->canary_count
* sizeof(__le32
))
243 dev_err(dev
, "region %u offset too small for %hu canaries\n",
244 mem_id
, mem
->canary_count
);
245 else if (mem_id
== IPA_MEM_END_MARKER
&& mem
->size
)
246 dev_err(dev
, "non-zero end marker region size\n");
253 /* Verify each defined memory region is valid. */
254 static bool ipa_mem_valid(struct ipa
*ipa
, const struct ipa_mem_data
*mem_data
)
256 DECLARE_BITMAP(regions
, IPA_MEM_COUNT
) = { };
257 struct device
*dev
= ipa
->dev
;
258 enum ipa_mem_id mem_id
;
261 if (mem_data
->local_count
> IPA_MEM_COUNT
) {
262 dev_err(dev
, "too many memory regions (%u > %u)\n",
263 mem_data
->local_count
, IPA_MEM_COUNT
);
267 for (i
= 0; i
< mem_data
->local_count
; i
++) {
268 const struct ipa_mem
*mem
= &mem_data
->local
[i
];
270 if (__test_and_set_bit(mem
->id
, regions
)) {
271 dev_err(dev
, "duplicate memory region %u\n", mem
->id
);
275 /* Defined regions have non-zero size and/or canary count */
276 if (!ipa_mem_valid_one(ipa
, mem
))
280 /* Now see if any required regions are not defined */
281 for_each_clear_bit(mem_id
, regions
, IPA_MEM_COUNT
) {
282 if (ipa_mem_id_required(ipa
, mem_id
))
283 dev_err(dev
, "required memory region %u missing\n",
290 /* Do all memory regions fit within the IPA local memory? */
291 static bool ipa_mem_size_valid(struct ipa
*ipa
)
293 struct device
*dev
= ipa
->dev
;
294 u32 limit
= ipa
->mem_size
;
297 for (i
= 0; i
< ipa
->mem_count
; i
++) {
298 const struct ipa_mem
*mem
= &ipa
->mem
[i
];
300 if (mem
->offset
+ mem
->size
<= limit
)
303 dev_err(dev
, "region %u ends beyond memory limit (0x%08x)\n",
313 * ipa_mem_config() - Configure IPA shared memory
316 * Return: 0 if successful, or a negative error code
318 int ipa_mem_config(struct ipa
*ipa
)
320 struct device
*dev
= ipa
->dev
;
321 const struct ipa_mem
*mem
;
322 const struct reg
*reg
;
329 /* Check the advertised location and size of the shared memory area */
330 reg
= ipa_reg(ipa
, SHARED_MEM_SIZE
);
331 val
= ioread32(ipa
->reg_virt
+ reg_offset(reg
));
333 /* The fields in the register are in 8 byte units */
334 ipa
->mem_offset
= 8 * reg_decode(reg
, MEM_BADDR
, val
);
336 /* Make sure the end is within the region's mapped space */
337 mem_size
= 8 * reg_decode(reg
, MEM_SIZE
, val
);
339 /* If the sizes don't match, issue a warning */
340 if (ipa
->mem_offset
+ mem_size
< ipa
->mem_size
) {
341 dev_warn(dev
, "limiting IPA memory size to 0x%08x\n",
343 ipa
->mem_size
= mem_size
;
344 } else if (ipa
->mem_offset
+ mem_size
> ipa
->mem_size
) {
345 dev_dbg(dev
, "ignoring larger reported memory size: 0x%08x\n",
349 /* We know our memory size; make sure regions are all in range */
350 if (!ipa_mem_size_valid(ipa
))
353 /* Prealloc DMA memory for zeroing regions */
354 virt
= dma_alloc_coherent(dev
, IPA_MEM_MAX
, &addr
, GFP_KERNEL
);
357 ipa
->zero_addr
= addr
;
358 ipa
->zero_virt
= virt
;
359 ipa
->zero_size
= IPA_MEM_MAX
;
361 /* For each defined region, write "canary" values in the
362 * space prior to the region's base address if indicated.
364 for (i
= 0; i
< ipa
->mem_count
; i
++) {
365 u16 canary_count
= ipa
->mem
[i
].canary_count
;
371 /* Write canary values in the space before the region */
372 canary
= ipa
->mem_virt
+ ipa
->mem_offset
+ ipa
->mem
[i
].offset
;
374 *--canary
= IPA_MEM_CANARY_VAL
;
375 while (--canary_count
);
378 /* Verify the microcontroller ring alignment (if defined) */
379 mem
= ipa_mem_find(ipa
, IPA_MEM_UC_EVENT_RING
);
380 if (mem
&& mem
->offset
% 1024) {
381 dev_err(dev
, "microcontroller ring not 1024-byte aligned\n");
388 dma_free_coherent(dev
, IPA_MEM_MAX
, ipa
->zero_virt
, ipa
->zero_addr
);
393 /* Inverse of ipa_mem_config() */
394 void ipa_mem_deconfig(struct ipa
*ipa
)
396 struct device
*dev
= ipa
->dev
;
398 dma_free_coherent(dev
, ipa
->zero_size
, ipa
->zero_virt
, ipa
->zero_addr
);
400 ipa
->zero_virt
= NULL
;
405 * ipa_mem_zero_modem() - Zero IPA-local memory regions owned by the modem
408 * Zero regions of IPA-local memory used by the modem. These are configured
409 * (and initially zeroed) by ipa_mem_setup(), but if the modem crashes and
410 * restarts via SSR we need to re-initialize them. A QMI message tells the
411 * modem where to find regions of IPA local memory it needs to know about
414 int ipa_mem_zero_modem(struct ipa
*ipa
)
416 struct gsi_trans
*trans
;
418 /* Get a transaction to zero the modem memory, modem header,
419 * and modem processing context regions.
421 trans
= ipa_cmd_trans_alloc(ipa
, 3);
423 dev_err(ipa
->dev
, "no transaction to zero modem memory\n");
427 ipa_mem_zero_region_add(trans
, IPA_MEM_MODEM_HEADER
);
428 ipa_mem_zero_region_add(trans
, IPA_MEM_MODEM_PROC_CTX
);
429 ipa_mem_zero_region_add(trans
, IPA_MEM_MODEM
);
431 gsi_trans_commit_wait(trans
);
437 * ipa_imem_init() - Initialize IMEM memory used by the IPA
439 * @addr: Physical address of the IPA region in IMEM
440 * @size: Size (bytes) of the IPA region in IMEM
442 * IMEM is a block of shared memory separate from system DRAM, and
443 * a portion of this memory is available for the IPA to use. The
444 * modem accesses this memory directly, but the IPA accesses it
445 * via the IOMMU, using the AP's credentials.
447 * If this region exists (size > 0) we map it for read/write access
448 * through the IOMMU using the IPA device.
450 * Note: @addr and @size are not guaranteed to be page-aligned.
452 static int ipa_imem_init(struct ipa
*ipa
, unsigned long addr
, size_t size
)
454 struct device
*dev
= ipa
->dev
;
455 struct iommu_domain
*domain
;
461 return 0; /* IMEM memory not used */
463 domain
= iommu_get_domain_for_dev(dev
);
465 dev_err(dev
, "no IOMMU domain found for IMEM\n");
469 /* Align the address down and the size up to page boundaries */
470 phys
= addr
& PAGE_MASK
;
471 size
= PAGE_ALIGN(size
+ addr
- phys
);
472 iova
= phys
; /* We just want a direct mapping */
474 ret
= iommu_map(domain
, iova
, phys
, size
, IOMMU_READ
| IOMMU_WRITE
,
479 ipa
->imem_iova
= iova
;
480 ipa
->imem_size
= size
;
485 static void ipa_imem_exit(struct ipa
*ipa
)
487 struct device
*dev
= ipa
->dev
;
488 struct iommu_domain
*domain
;
493 domain
= iommu_get_domain_for_dev(dev
);
497 size
= iommu_unmap(domain
, ipa
->imem_iova
, ipa
->imem_size
);
498 if (size
!= ipa
->imem_size
)
499 dev_warn(dev
, "unmapped %zu IMEM bytes, expected %zu\n",
500 size
, ipa
->imem_size
);
502 dev_err(dev
, "couldn't get IPA IOMMU domain for IMEM\n");
510 * ipa_smem_init() - Initialize SMEM memory used by the IPA
512 * @item: Item ID of SMEM memory
513 * @size: Size (bytes) of SMEM memory region
515 * SMEM is a managed block of shared DRAM, from which numbered "items"
516 * can be allocated. One item is designated for use by the IPA.
518 * The modem accesses SMEM memory directly, but the IPA accesses it
519 * via the IOMMU, using the AP's credentials.
521 * If size provided is non-zero, we allocate it and map it for
522 * access through the IOMMU.
524 * Note: @size and the item address are is not guaranteed to be page-aligned.
526 static int ipa_smem_init(struct ipa
*ipa
, u32 item
, size_t size
)
528 struct device
*dev
= ipa
->dev
;
529 struct iommu_domain
*domain
;
538 return 0; /* SMEM memory not used */
540 /* SMEM is memory shared between the AP and another system entity
541 * (in this case, the modem). An allocation from SMEM is persistent
542 * until the AP reboots; there is no way to free an allocated SMEM
543 * region. Allocation only reserves the space; to use it you need
544 * to "get" a pointer it (this does not imply reference counting).
545 * The item might have already been allocated, in which case we
546 * use it unless the size isn't what we expect.
548 ret
= qcom_smem_alloc(QCOM_SMEM_HOST_MODEM
, item
, size
);
549 if (ret
&& ret
!= -EEXIST
) {
550 dev_err(dev
, "error %d allocating size %zu SMEM item %u\n",
555 /* Now get the address of the SMEM memory region */
556 virt
= qcom_smem_get(QCOM_SMEM_HOST_MODEM
, item
, &actual
);
559 dev_err(dev
, "error %d getting SMEM item %u\n", ret
, item
);
563 /* In case the region was already allocated, verify the size */
564 if (ret
&& actual
!= size
) {
565 dev_err(dev
, "SMEM item %u has size %zu, expected %zu\n",
570 domain
= iommu_get_domain_for_dev(dev
);
572 dev_err(dev
, "no IOMMU domain found for SMEM\n");
576 /* Align the address down and the size up to a page boundary */
577 addr
= qcom_smem_virt_to_phys(virt
);
578 phys
= addr
& PAGE_MASK
;
579 size
= PAGE_ALIGN(size
+ addr
- phys
);
580 iova
= phys
; /* We just want a direct mapping */
582 ret
= iommu_map(domain
, iova
, phys
, size
, IOMMU_READ
| IOMMU_WRITE
,
587 ipa
->smem_iova
= iova
;
588 ipa
->smem_size
= size
;
593 static void ipa_smem_exit(struct ipa
*ipa
)
595 struct device
*dev
= ipa
->dev
;
596 struct iommu_domain
*domain
;
598 domain
= iommu_get_domain_for_dev(dev
);
602 size
= iommu_unmap(domain
, ipa
->smem_iova
, ipa
->smem_size
);
603 if (size
!= ipa
->smem_size
)
604 dev_warn(dev
, "unmapped %zu SMEM bytes, expected %zu\n",
605 size
, ipa
->smem_size
);
608 dev_err(dev
, "couldn't get IPA IOMMU domain for SMEM\n");
615 /* Perform memory region-related initialization */
616 int ipa_mem_init(struct ipa
*ipa
, struct platform_device
*pdev
,
617 const struct ipa_mem_data
*mem_data
)
619 struct device
*dev
= &pdev
->dev
;
620 struct resource
*res
;
623 /* Make sure the set of defined memory regions is valid */
624 if (!ipa_mem_valid(ipa
, mem_data
))
627 ipa
->mem_count
= mem_data
->local_count
;
628 ipa
->mem
= mem_data
->local
;
630 /* Check the route and filter table memory regions */
631 if (!ipa_table_mem_valid(ipa
, false))
633 if (!ipa_table_mem_valid(ipa
, true))
636 ret
= dma_set_mask_and_coherent(dev
, DMA_BIT_MASK(64));
638 dev_err(dev
, "error %d setting DMA mask\n", ret
);
642 res
= platform_get_resource_byname(pdev
, IORESOURCE_MEM
, "ipa-shared");
645 "DT error getting \"ipa-shared\" memory property\n");
649 ipa
->mem_virt
= memremap(res
->start
, resource_size(res
), MEMREMAP_WC
);
650 if (!ipa
->mem_virt
) {
651 dev_err(dev
, "unable to remap \"ipa-shared\" memory\n");
655 ipa
->mem_addr
= res
->start
;
656 ipa
->mem_size
= resource_size(res
);
658 ret
= ipa_imem_init(ipa
, mem_data
->imem_addr
, mem_data
->imem_size
);
662 ret
= ipa_smem_init(ipa
, mem_data
->smem_id
, mem_data
->smem_size
);
671 memunmap(ipa
->mem_virt
);
676 /* Inverse of ipa_mem_init() */
677 void ipa_mem_exit(struct ipa
*ipa
)
681 memunmap(ipa
->mem_virt
);