1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2018 Arm Limited. All rights reserved.
5 * Coresight Address Translation Unit support
7 * Author: Suzuki K Poulose <suzuki.poulose@arm.com>
10 #include <linux/amba/bus.h>
11 #include <linux/device.h>
12 #include <linux/dma-mapping.h>
14 #include <linux/kernel.h>
15 #include <linux/slab.h>
17 #include "coresight-catu.h"
18 #include "coresight-priv.h"
19 #include "coresight-tmc.h"
21 #define csdev_to_catu_drvdata(csdev) \
22 dev_get_drvdata(csdev->dev.parent)
24 /* Verbose output for CATU table contents */
26 #define catu_dbg(x, ...) dev_dbg(x, __VA_ARGS__)
28 #define catu_dbg(x, ...) do {} while (0)
31 DEFINE_CORESIGHT_DEVLIST(catu_devs
, "catu");
34 struct tmc_sg_table
*catu_table
;
39 * CATU uses a page size of 4KB for page tables as well as data pages.
40 * Each 64bit entry in the table has the following format.
43 * ------------------------------------
44 * | Address [63-12] | SBZ | V|
45 * ------------------------------------
47 * Where bit[0] V indicates if the address is valid or not.
48 * Each 4K table pages have upto 256 data page pointers, taking upto 2K
49 * size. There are two Link pointers, pointing to the previous and next
50 * table pages respectively at the end of the 4K page. (i.e, entry 510
52 * E.g, a table of two pages could look like :
54 * Table Page 0 Table Page 1
55 * SLADDR ===> x------------------x x--> x-----------------x
56 * INADDR ->| Page 0 | V | | | Page 256 | V | <- INADDR+1M
57 * |------------------| | |-----------------|
58 * INADDR+4K ->| Page 1 | V | | | |
59 * |------------------| | |-----------------|
60 * | Page 2 | V | | | |
61 * |------------------| | |-----------------|
62 * | ... | V | | | ... |
63 * |------------------| | |-----------------|
64 * INADDR+1020K| Page 255 | V | | | Page 511 | V |
65 * SLADDR+2K==>|------------------| | |-----------------|
67 * |------------------| | | |
69 * |------------------| | | |
71 * |------------------| | |-----------------|
72 * | IGNORED | 0 | | | Table Page 0| 1 |
73 * |------------------| | |-----------------|
74 * | Table Page 1| 1 |--x | IGNORED | 0 |
75 * x------------------x x-----------------x
78 * The base input address (used by the ETR, programmed in INADDR_{LO,HI})
79 * must be aligned to 1MB (the size addressable by a single page table).
80 * The CATU maps INADDR{LO:HI} to the first page in the table pointed
81 * to by SLADDR{LO:HI} and so on.
86 #define CATU_PAGE_SHIFT 12
87 #define CATU_PAGE_SIZE (1UL << CATU_PAGE_SHIFT)
88 #define CATU_PAGES_PER_SYSPAGE (PAGE_SIZE / CATU_PAGE_SIZE)
90 /* Page pointers are only allocated in the first 2K half */
91 #define CATU_PTRS_PER_PAGE ((CATU_PAGE_SIZE >> 1) / sizeof(cate_t))
92 #define CATU_PTRS_PER_SYSPAGE (CATU_PAGES_PER_SYSPAGE * CATU_PTRS_PER_PAGE)
93 #define CATU_LINK_PREV ((CATU_PAGE_SIZE / sizeof(cate_t)) - 2)
94 #define CATU_LINK_NEXT ((CATU_PAGE_SIZE / sizeof(cate_t)) - 1)
96 #define CATU_ADDR_SHIFT 12
97 #define CATU_ADDR_MASK ~(((cate_t)1 << CATU_ADDR_SHIFT) - 1)
98 #define CATU_ENTRY_VALID ((cate_t)0x1)
99 #define CATU_VALID_ENTRY(addr) \
100 (((cate_t)(addr) & CATU_ADDR_MASK) | CATU_ENTRY_VALID)
101 #define CATU_ENTRY_ADDR(entry) ((cate_t)(entry) & ~((cate_t)CATU_ENTRY_VALID))
103 /* CATU expects the INADDR to be aligned to 1M. */
104 #define CATU_DEFAULT_INADDR (1ULL << 20)
107 * catu_get_table : Retrieve the table pointers for the given @offset
108 * within the buffer. The buffer is wrapped around to a valid offset.
110 * Returns : The CPU virtual address for the beginning of the table
111 * containing the data page pointer for @offset. If @daddrp is not NULL,
112 * @daddrp points the DMA address of the beginning of the table.
114 static inline cate_t
*catu_get_table(struct tmc_sg_table
*catu_table
,
115 unsigned long offset
,
118 unsigned long buf_size
= tmc_sg_table_buf_size(catu_table
);
119 unsigned int table_nr
, pg_idx
, pg_offset
;
120 struct tmc_pages
*table_pages
= &catu_table
->table_pages
;
123 /* Make sure offset is within the range */
127 * Each table can address 1MB and a single kernel page can
128 * contain "CATU_PAGES_PER_SYSPAGE" CATU tables.
130 table_nr
= offset
>> 20;
131 /* Find the table page where the table_nr lies in */
132 pg_idx
= table_nr
/ CATU_PAGES_PER_SYSPAGE
;
133 pg_offset
= (table_nr
% CATU_PAGES_PER_SYSPAGE
) * CATU_PAGE_SIZE
;
135 *daddrp
= table_pages
->daddrs
[pg_idx
] + pg_offset
;
136 ptr
= page_address(table_pages
->pages
[pg_idx
]);
137 return (cate_t
*)((unsigned long)ptr
+ pg_offset
);
141 static void catu_dump_table(struct tmc_sg_table
*catu_table
)
145 unsigned long table_end
, buf_size
, offset
= 0;
147 buf_size
= tmc_sg_table_buf_size(catu_table
);
148 dev_dbg(catu_table
->dev
,
149 "Dump table %p, tdaddr: %llx\n",
150 catu_table
, catu_table
->table_daddr
);
152 while (offset
< buf_size
) {
153 table_end
= offset
+ SZ_1M
< buf_size
?
154 offset
+ SZ_1M
: buf_size
;
155 table
= catu_get_table(catu_table
, offset
, NULL
);
156 for (i
= 0; offset
< table_end
; i
++, offset
+= CATU_PAGE_SIZE
)
157 dev_dbg(catu_table
->dev
, "%d: %llx\n", i
, table
[i
]);
158 dev_dbg(catu_table
->dev
, "Prev : %llx, Next: %llx\n",
159 table
[CATU_LINK_PREV
], table
[CATU_LINK_NEXT
]);
160 dev_dbg(catu_table
->dev
, "== End of sub-table ===");
162 dev_dbg(catu_table
->dev
, "== End of Table ===");
166 static inline void catu_dump_table(struct tmc_sg_table
*catu_table
)
171 static inline cate_t
catu_make_entry(dma_addr_t addr
)
173 return addr
? CATU_VALID_ENTRY(addr
) : 0;
177 * catu_populate_table : Populate the given CATU table.
178 * The table is always populated as a circular table.
179 * i.e, the "prev" link of the "first" table points to the "last"
180 * table and the "next" link of the "last" table points to the
181 * "first" table. The buffer should be made linear by calling
185 catu_populate_table(struct tmc_sg_table
*catu_table
)
188 int sys_pidx
; /* Index to current system data page */
189 int catu_pidx
; /* Index of CATU page within the system data page */
190 unsigned long offset
, buf_size
, table_end
;
191 dma_addr_t data_daddr
;
192 dma_addr_t prev_taddr
, next_taddr
, cur_taddr
;
193 cate_t
*table_ptr
, *next_table
;
195 buf_size
= tmc_sg_table_buf_size(catu_table
);
196 sys_pidx
= catu_pidx
= 0;
199 table_ptr
= catu_get_table(catu_table
, 0, &cur_taddr
);
200 prev_taddr
= 0; /* Prev link for the first table */
202 while (offset
< buf_size
) {
204 * The @offset is always 1M aligned here and we have an
205 * empty table @table_ptr to fill. Each table can address
206 * upto 1MB data buffer. The last table may have fewer
207 * entries if the buffer size is not aligned.
209 table_end
= (offset
+ SZ_1M
) < buf_size
?
210 (offset
+ SZ_1M
) : buf_size
;
211 for (i
= 0; offset
< table_end
;
212 i
++, offset
+= CATU_PAGE_SIZE
) {
214 data_daddr
= catu_table
->data_pages
.daddrs
[sys_pidx
] +
215 catu_pidx
* CATU_PAGE_SIZE
;
216 catu_dbg(catu_table
->dev
,
217 "[table %5ld:%03d] 0x%llx\n",
218 (offset
>> 20), i
, data_daddr
);
219 table_ptr
[i
] = catu_make_entry(data_daddr
);
220 /* Move the pointers for data pages */
221 catu_pidx
= (catu_pidx
+ 1) % CATU_PAGES_PER_SYSPAGE
;
227 * If we have finished all the valid entries, fill the rest of
228 * the table (i.e, last table page) with invalid entries,
229 * to fail the lookups.
231 if (offset
== buf_size
) {
232 memset(&table_ptr
[i
], 0,
233 sizeof(cate_t
) * (CATU_PTRS_PER_PAGE
- i
));
236 next_table
= catu_get_table(catu_table
,
237 offset
, &next_taddr
);
240 table_ptr
[CATU_LINK_PREV
] = catu_make_entry(prev_taddr
);
241 table_ptr
[CATU_LINK_NEXT
] = catu_make_entry(next_taddr
);
243 catu_dbg(catu_table
->dev
,
244 "[table%5ld]: Cur: 0x%llx Prev: 0x%llx, Next: 0x%llx\n",
245 (offset
>> 20) - 1, cur_taddr
, prev_taddr
, next_taddr
);
247 /* Update the prev/next addresses */
249 prev_taddr
= cur_taddr
;
250 cur_taddr
= next_taddr
;
251 table_ptr
= next_table
;
255 /* Sync the table for device */
256 tmc_sg_table_sync_table(catu_table
);
259 static struct tmc_sg_table
*
260 catu_init_sg_table(struct device
*catu_dev
, int node
,
261 ssize_t size
, void **pages
)
264 struct tmc_sg_table
*catu_table
;
267 * Each table can address upto 1MB and we can have
268 * CATU_PAGES_PER_SYSPAGE tables in a system page.
270 nr_tpages
= DIV_ROUND_UP(size
, SZ_1M
) / CATU_PAGES_PER_SYSPAGE
;
271 catu_table
= tmc_alloc_sg_table(catu_dev
, node
, nr_tpages
,
272 size
>> PAGE_SHIFT
, pages
);
273 if (IS_ERR(catu_table
))
276 catu_populate_table(catu_table
);
278 "Setup table %p, size %ldKB, %d table pages\n",
279 catu_table
, (unsigned long)size
>> 10, nr_tpages
);
280 catu_dump_table(catu_table
);
284 static void catu_free_etr_buf(struct etr_buf
*etr_buf
)
286 struct catu_etr_buf
*catu_buf
;
288 if (!etr_buf
|| etr_buf
->mode
!= ETR_MODE_CATU
|| !etr_buf
->private)
291 catu_buf
= etr_buf
->private;
292 tmc_free_sg_table(catu_buf
->catu_table
);
296 static ssize_t
catu_get_data_etr_buf(struct etr_buf
*etr_buf
, u64 offset
,
297 size_t len
, char **bufpp
)
299 struct catu_etr_buf
*catu_buf
= etr_buf
->private;
301 return tmc_sg_table_get_data(catu_buf
->catu_table
, offset
, len
, bufpp
);
304 static void catu_sync_etr_buf(struct etr_buf
*etr_buf
, u64 rrp
, u64 rwp
)
306 struct catu_etr_buf
*catu_buf
= etr_buf
->private;
307 struct tmc_sg_table
*catu_table
= catu_buf
->catu_table
;
308 u64 r_offset
, w_offset
;
311 * ETR started off at etr_buf->hwaddr. Convert the RRP/RWP to
312 * offsets within the trace buffer.
314 r_offset
= rrp
- etr_buf
->hwaddr
;
315 w_offset
= rwp
- etr_buf
->hwaddr
;
317 if (!etr_buf
->full
) {
318 etr_buf
->len
= w_offset
- r_offset
;
319 if (w_offset
< r_offset
)
320 etr_buf
->len
+= etr_buf
->size
;
322 etr_buf
->len
= etr_buf
->size
;
325 etr_buf
->offset
= r_offset
;
326 tmc_sg_table_sync_data_range(catu_table
, r_offset
, etr_buf
->len
);
329 static int catu_alloc_etr_buf(struct tmc_drvdata
*tmc_drvdata
,
330 struct etr_buf
*etr_buf
, int node
, void **pages
)
332 struct coresight_device
*csdev
;
333 struct tmc_sg_table
*catu_table
;
334 struct catu_etr_buf
*catu_buf
;
336 csdev
= tmc_etr_get_catu_device(tmc_drvdata
);
339 catu_buf
= kzalloc(sizeof(*catu_buf
), GFP_KERNEL
);
343 catu_table
= catu_init_sg_table(&csdev
->dev
, node
,
344 etr_buf
->size
, pages
);
345 if (IS_ERR(catu_table
)) {
347 return PTR_ERR(catu_table
);
350 etr_buf
->mode
= ETR_MODE_CATU
;
351 etr_buf
->private = catu_buf
;
352 etr_buf
->hwaddr
= CATU_DEFAULT_INADDR
;
354 catu_buf
->catu_table
= catu_table
;
355 /* Get the table base address */
356 catu_buf
->sladdr
= catu_table
->table_daddr
;
361 static const struct etr_buf_operations etr_catu_buf_ops
= {
362 .alloc
= catu_alloc_etr_buf
,
363 .free
= catu_free_etr_buf
,
364 .sync
= catu_sync_etr_buf
,
365 .get_data
= catu_get_data_etr_buf
,
368 coresight_simple_reg32(struct catu_drvdata
, devid
, CORESIGHT_DEVID
);
369 coresight_simple_reg32(struct catu_drvdata
, control
, CATU_CONTROL
);
370 coresight_simple_reg32(struct catu_drvdata
, status
, CATU_STATUS
);
371 coresight_simple_reg32(struct catu_drvdata
, mode
, CATU_MODE
);
372 coresight_simple_reg32(struct catu_drvdata
, axictrl
, CATU_AXICTRL
);
373 coresight_simple_reg32(struct catu_drvdata
, irqen
, CATU_IRQEN
);
374 coresight_simple_reg64(struct catu_drvdata
, sladdr
,
375 CATU_SLADDRLO
, CATU_SLADDRHI
);
376 coresight_simple_reg64(struct catu_drvdata
, inaddr
,
377 CATU_INADDRLO
, CATU_INADDRHI
);
379 static struct attribute
*catu_mgmt_attrs
[] = {
380 &dev_attr_devid
.attr
,
381 &dev_attr_control
.attr
,
382 &dev_attr_status
.attr
,
384 &dev_attr_axictrl
.attr
,
385 &dev_attr_irqen
.attr
,
386 &dev_attr_sladdr
.attr
,
387 &dev_attr_inaddr
.attr
,
391 static const struct attribute_group catu_mgmt_group
= {
392 .attrs
= catu_mgmt_attrs
,
396 static const struct attribute_group
*catu_groups
[] = {
402 static inline int catu_wait_for_ready(struct catu_drvdata
*drvdata
)
404 return coresight_timeout(drvdata
->base
,
405 CATU_STATUS
, CATU_STATUS_READY
, 1);
408 static int catu_enable_hw(struct catu_drvdata
*drvdata
, void *data
)
412 struct etr_buf
*etr_buf
= data
;
413 struct device
*dev
= &drvdata
->csdev
->dev
;
415 if (catu_wait_for_ready(drvdata
))
416 dev_warn(dev
, "Timeout while waiting for READY\n");
418 control
= catu_read_control(drvdata
);
419 if (control
& BIT(CATU_CONTROL_ENABLE
)) {
420 dev_warn(dev
, "CATU is already enabled\n");
424 rc
= coresight_claim_device_unlocked(drvdata
->base
);
428 control
|= BIT(CATU_CONTROL_ENABLE
);
430 if (etr_buf
&& etr_buf
->mode
== ETR_MODE_CATU
) {
431 struct catu_etr_buf
*catu_buf
= etr_buf
->private;
433 mode
= CATU_MODE_TRANSLATE
;
434 catu_write_axictrl(drvdata
, CATU_OS_AXICTRL
);
435 catu_write_sladdr(drvdata
, catu_buf
->sladdr
);
436 catu_write_inaddr(drvdata
, CATU_DEFAULT_INADDR
);
438 mode
= CATU_MODE_PASS_THROUGH
;
439 catu_write_sladdr(drvdata
, 0);
440 catu_write_inaddr(drvdata
, 0);
443 catu_write_irqen(drvdata
, 0);
444 catu_write_mode(drvdata
, mode
);
445 catu_write_control(drvdata
, control
);
446 dev_dbg(dev
, "Enabled in %s mode\n",
447 (mode
== CATU_MODE_PASS_THROUGH
) ?
453 static int catu_enable(struct coresight_device
*csdev
, void *data
)
456 struct catu_drvdata
*catu_drvdata
= csdev_to_catu_drvdata(csdev
);
458 CS_UNLOCK(catu_drvdata
->base
);
459 rc
= catu_enable_hw(catu_drvdata
, data
);
460 CS_LOCK(catu_drvdata
->base
);
464 static int catu_disable_hw(struct catu_drvdata
*drvdata
)
467 struct device
*dev
= &drvdata
->csdev
->dev
;
469 catu_write_control(drvdata
, 0);
470 coresight_disclaim_device_unlocked(drvdata
->base
);
471 if (catu_wait_for_ready(drvdata
)) {
472 dev_info(dev
, "Timeout while waiting for READY\n");
476 dev_dbg(dev
, "Disabled\n");
480 static int catu_disable(struct coresight_device
*csdev
, void *__unused
)
483 struct catu_drvdata
*catu_drvdata
= csdev_to_catu_drvdata(csdev
);
485 CS_UNLOCK(catu_drvdata
->base
);
486 rc
= catu_disable_hw(catu_drvdata
);
487 CS_LOCK(catu_drvdata
->base
);
491 static const struct coresight_ops_helper catu_helper_ops
= {
492 .enable
= catu_enable
,
493 .disable
= catu_disable
,
496 static const struct coresight_ops catu_ops
= {
497 .helper_ops
= &catu_helper_ops
,
500 static int catu_probe(struct amba_device
*adev
, const struct amba_id
*id
)
504 struct catu_drvdata
*drvdata
;
505 struct coresight_desc catu_desc
;
506 struct coresight_platform_data
*pdata
= NULL
;
507 struct device
*dev
= &adev
->dev
;
510 catu_desc
.name
= coresight_alloc_device_name(&catu_devs
, dev
);
514 drvdata
= devm_kzalloc(dev
, sizeof(*drvdata
), GFP_KERNEL
);
520 dev_set_drvdata(dev
, drvdata
);
521 base
= devm_ioremap_resource(dev
, &adev
->res
);
527 /* Setup dma mask for the device */
528 dma_mask
= readl_relaxed(base
+ CORESIGHT_DEVID
) & 0x3f;
539 /* Default to the 40bits as supported by TMC-ETR */
542 ret
= dma_set_mask_and_coherent(dev
, DMA_BIT_MASK(dma_mask
));
546 pdata
= coresight_get_platform_data(dev
);
548 ret
= PTR_ERR(pdata
);
551 dev
->platform_data
= pdata
;
553 drvdata
->base
= base
;
554 catu_desc
.pdata
= pdata
;
556 catu_desc
.groups
= catu_groups
;
557 catu_desc
.type
= CORESIGHT_DEV_TYPE_HELPER
;
558 catu_desc
.subtype
.helper_subtype
= CORESIGHT_DEV_SUBTYPE_HELPER_CATU
;
559 catu_desc
.ops
= &catu_ops
;
561 drvdata
->csdev
= coresight_register(&catu_desc
);
562 if (IS_ERR(drvdata
->csdev
))
563 ret
= PTR_ERR(drvdata
->csdev
);
565 pm_runtime_put(&adev
->dev
);
570 static int catu_remove(struct amba_device
*adev
)
572 struct catu_drvdata
*drvdata
= dev_get_drvdata(&adev
->dev
);
574 coresight_unregister(drvdata
->csdev
);
578 static struct amba_id catu_ids
[] = {
579 CS_AMBA_ID(0x000bb9ee),
583 MODULE_DEVICE_TABLE(amba
, catu_ids
);
585 static struct amba_driver catu_driver
= {
587 .name
= "coresight-catu",
588 .owner
= THIS_MODULE
,
589 .suppress_bind_attrs
= true,
592 .remove
= catu_remove
,
593 .id_table
= catu_ids
,
596 static int __init
catu_init(void)
600 ret
= amba_driver_register(&catu_driver
);
602 pr_info("Error registering catu driver\n");
603 tmc_etr_set_catu_ops(&etr_catu_buf_ops
);
607 static void __exit
catu_exit(void)
609 tmc_etr_remove_catu_ops();
610 amba_driver_unregister(&catu_driver
);
613 module_init(catu_init
);
614 module_exit(catu_exit
);
616 MODULE_AUTHOR("Suzuki K Poulose <suzuki.poulose@arm.com>");
617 MODULE_DESCRIPTION("Arm CoreSight Address Translation Unit (CATU) Driver");
618 MODULE_LICENSE("GPL v2");