2 * Copyright (C) 2013, 2014 ARM Limited, All Rights Reserved.
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 #include <linux/acpi.h>
19 #include <linux/acpi_iort.h>
20 #include <linux/bitmap.h>
21 #include <linux/cpu.h>
22 #include <linux/delay.h>
23 #include <linux/dma-iommu.h>
24 #include <linux/interrupt.h>
25 #include <linux/irqdomain.h>
26 #include <linux/log2.h>
28 #include <linux/msi.h>
30 #include <linux/of_address.h>
31 #include <linux/of_irq.h>
32 #include <linux/of_pci.h>
33 #include <linux/of_platform.h>
34 #include <linux/percpu.h>
35 #include <linux/slab.h>
37 #include <linux/irqchip.h>
38 #include <linux/irqchip/arm-gic-v3.h>
40 #include <asm/cputype.h>
41 #include <asm/exception.h>
43 #include "irq-gic-common.h"
45 #define ITS_FLAGS_CMDQ_NEEDS_FLUSHING (1ULL << 0)
46 #define ITS_FLAGS_WORKAROUND_CAVIUM_22375 (1ULL << 1)
47 #define ITS_FLAGS_WORKAROUND_CAVIUM_23144 (1ULL << 2)
49 #define RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING (1 << 0)
52 * Collection structure - just an ID, and a redistributor address to
53 * ping. We use one per CPU as a bag of interrupts assigned to this
56 struct its_collection
{
62 * The ITS_BASER structure - contains memory information, cached
63 * value of BASER register configuration and ITS page size.
73 * The ITS structure - contains most of the infrastructure, with the
74 * top-level MSI domain, the command queue, the collections, and the
75 * list of devices writing to it.
79 struct list_head entry
;
81 phys_addr_t phys_base
;
82 struct its_cmd_block
*cmd_base
;
83 struct its_cmd_block
*cmd_write
;
84 struct its_baser tables
[GITS_BASER_NR_REGS
];
85 struct its_collection
*collections
;
86 struct list_head its_device_list
;
93 #define ITS_ITT_ALIGN SZ_256
95 /* Convert page order to size in bytes */
96 #define PAGE_ORDER_TO_SIZE(o) (PAGE_SIZE << (o))
98 struct event_lpi_map
{
99 unsigned long *lpi_map
;
101 irq_hw_number_t lpi_base
;
106 * The ITS view of a device - belongs to an ITS, a collection, owns an
107 * interrupt translation table, and a list of interrupts.
110 struct list_head entry
;
111 struct its_node
*its
;
112 struct event_lpi_map event_map
;
118 static LIST_HEAD(its_nodes
);
119 static DEFINE_SPINLOCK(its_lock
);
120 static struct rdists
*gic_rdists
;
121 static struct irq_domain
*its_parent
;
123 #define gic_data_rdist() (raw_cpu_ptr(gic_rdists->rdist))
124 #define gic_data_rdist_rd_base() (gic_data_rdist()->rd_base)
126 static struct its_collection
*dev_event_to_col(struct its_device
*its_dev
,
129 struct its_node
*its
= its_dev
->its
;
131 return its
->collections
+ its_dev
->event_map
.col_map
[event
];
135 * ITS command descriptors - parameters to be encoded in a command
138 struct its_cmd_desc
{
141 struct its_device
*dev
;
146 struct its_device
*dev
;
151 struct its_device
*dev
;
156 struct its_collection
*col
;
161 struct its_device
*dev
;
167 struct its_device
*dev
;
168 struct its_collection
*col
;
173 struct its_device
*dev
;
178 struct its_collection
*col
;
184 * The ITS command block, which is what the ITS actually parses.
186 struct its_cmd_block
{
190 #define ITS_CMD_QUEUE_SZ SZ_64K
191 #define ITS_CMD_QUEUE_NR_ENTRIES (ITS_CMD_QUEUE_SZ / sizeof(struct its_cmd_block))
193 typedef struct its_collection
*(*its_cmd_builder_t
)(struct its_cmd_block
*,
194 struct its_cmd_desc
*);
196 static void its_mask_encode(u64
*raw_cmd
, u64 val
, int h
, int l
)
198 u64 mask
= GENMASK_ULL(h
, l
);
200 *raw_cmd
|= (val
<< l
) & mask
;
203 static void its_encode_cmd(struct its_cmd_block
*cmd
, u8 cmd_nr
)
205 its_mask_encode(&cmd
->raw_cmd
[0], cmd_nr
, 7, 0);
208 static void its_encode_devid(struct its_cmd_block
*cmd
, u32 devid
)
210 its_mask_encode(&cmd
->raw_cmd
[0], devid
, 63, 32);
213 static void its_encode_event_id(struct its_cmd_block
*cmd
, u32 id
)
215 its_mask_encode(&cmd
->raw_cmd
[1], id
, 31, 0);
218 static void its_encode_phys_id(struct its_cmd_block
*cmd
, u32 phys_id
)
220 its_mask_encode(&cmd
->raw_cmd
[1], phys_id
, 63, 32);
223 static void its_encode_size(struct its_cmd_block
*cmd
, u8 size
)
225 its_mask_encode(&cmd
->raw_cmd
[1], size
, 4, 0);
228 static void its_encode_itt(struct its_cmd_block
*cmd
, u64 itt_addr
)
230 its_mask_encode(&cmd
->raw_cmd
[2], itt_addr
>> 8, 50, 8);
233 static void its_encode_valid(struct its_cmd_block
*cmd
, int valid
)
235 its_mask_encode(&cmd
->raw_cmd
[2], !!valid
, 63, 63);
238 static void its_encode_target(struct its_cmd_block
*cmd
, u64 target_addr
)
240 its_mask_encode(&cmd
->raw_cmd
[2], target_addr
>> 16, 50, 16);
243 static void its_encode_collection(struct its_cmd_block
*cmd
, u16 col
)
245 its_mask_encode(&cmd
->raw_cmd
[2], col
, 15, 0);
248 static inline void its_fixup_cmd(struct its_cmd_block
*cmd
)
250 /* Let's fixup BE commands */
251 cmd
->raw_cmd
[0] = cpu_to_le64(cmd
->raw_cmd
[0]);
252 cmd
->raw_cmd
[1] = cpu_to_le64(cmd
->raw_cmd
[1]);
253 cmd
->raw_cmd
[2] = cpu_to_le64(cmd
->raw_cmd
[2]);
254 cmd
->raw_cmd
[3] = cpu_to_le64(cmd
->raw_cmd
[3]);
257 static struct its_collection
*its_build_mapd_cmd(struct its_cmd_block
*cmd
,
258 struct its_cmd_desc
*desc
)
260 unsigned long itt_addr
;
261 u8 size
= ilog2(desc
->its_mapd_cmd
.dev
->nr_ites
);
263 itt_addr
= virt_to_phys(desc
->its_mapd_cmd
.dev
->itt
);
264 itt_addr
= ALIGN(itt_addr
, ITS_ITT_ALIGN
);
266 its_encode_cmd(cmd
, GITS_CMD_MAPD
);
267 its_encode_devid(cmd
, desc
->its_mapd_cmd
.dev
->device_id
);
268 its_encode_size(cmd
, size
- 1);
269 its_encode_itt(cmd
, itt_addr
);
270 its_encode_valid(cmd
, desc
->its_mapd_cmd
.valid
);
277 static struct its_collection
*its_build_mapc_cmd(struct its_cmd_block
*cmd
,
278 struct its_cmd_desc
*desc
)
280 its_encode_cmd(cmd
, GITS_CMD_MAPC
);
281 its_encode_collection(cmd
, desc
->its_mapc_cmd
.col
->col_id
);
282 its_encode_target(cmd
, desc
->its_mapc_cmd
.col
->target_address
);
283 its_encode_valid(cmd
, desc
->its_mapc_cmd
.valid
);
287 return desc
->its_mapc_cmd
.col
;
290 static struct its_collection
*its_build_mapti_cmd(struct its_cmd_block
*cmd
,
291 struct its_cmd_desc
*desc
)
293 struct its_collection
*col
;
295 col
= dev_event_to_col(desc
->its_mapti_cmd
.dev
,
296 desc
->its_mapti_cmd
.event_id
);
298 its_encode_cmd(cmd
, GITS_CMD_MAPTI
);
299 its_encode_devid(cmd
, desc
->its_mapti_cmd
.dev
->device_id
);
300 its_encode_event_id(cmd
, desc
->its_mapti_cmd
.event_id
);
301 its_encode_phys_id(cmd
, desc
->its_mapti_cmd
.phys_id
);
302 its_encode_collection(cmd
, col
->col_id
);
309 static struct its_collection
*its_build_movi_cmd(struct its_cmd_block
*cmd
,
310 struct its_cmd_desc
*desc
)
312 struct its_collection
*col
;
314 col
= dev_event_to_col(desc
->its_movi_cmd
.dev
,
315 desc
->its_movi_cmd
.event_id
);
317 its_encode_cmd(cmd
, GITS_CMD_MOVI
);
318 its_encode_devid(cmd
, desc
->its_movi_cmd
.dev
->device_id
);
319 its_encode_event_id(cmd
, desc
->its_movi_cmd
.event_id
);
320 its_encode_collection(cmd
, desc
->its_movi_cmd
.col
->col_id
);
327 static struct its_collection
*its_build_discard_cmd(struct its_cmd_block
*cmd
,
328 struct its_cmd_desc
*desc
)
330 struct its_collection
*col
;
332 col
= dev_event_to_col(desc
->its_discard_cmd
.dev
,
333 desc
->its_discard_cmd
.event_id
);
335 its_encode_cmd(cmd
, GITS_CMD_DISCARD
);
336 its_encode_devid(cmd
, desc
->its_discard_cmd
.dev
->device_id
);
337 its_encode_event_id(cmd
, desc
->its_discard_cmd
.event_id
);
344 static struct its_collection
*its_build_inv_cmd(struct its_cmd_block
*cmd
,
345 struct its_cmd_desc
*desc
)
347 struct its_collection
*col
;
349 col
= dev_event_to_col(desc
->its_inv_cmd
.dev
,
350 desc
->its_inv_cmd
.event_id
);
352 its_encode_cmd(cmd
, GITS_CMD_INV
);
353 its_encode_devid(cmd
, desc
->its_inv_cmd
.dev
->device_id
);
354 its_encode_event_id(cmd
, desc
->its_inv_cmd
.event_id
);
361 static struct its_collection
*its_build_invall_cmd(struct its_cmd_block
*cmd
,
362 struct its_cmd_desc
*desc
)
364 its_encode_cmd(cmd
, GITS_CMD_INVALL
);
365 its_encode_collection(cmd
, desc
->its_mapc_cmd
.col
->col_id
);
372 static u64
its_cmd_ptr_to_offset(struct its_node
*its
,
373 struct its_cmd_block
*ptr
)
375 return (ptr
- its
->cmd_base
) * sizeof(*ptr
);
378 static int its_queue_full(struct its_node
*its
)
383 widx
= its
->cmd_write
- its
->cmd_base
;
384 ridx
= readl_relaxed(its
->base
+ GITS_CREADR
) / sizeof(struct its_cmd_block
);
386 /* This is incredibly unlikely to happen, unless the ITS locks up. */
387 if (((widx
+ 1) % ITS_CMD_QUEUE_NR_ENTRIES
) == ridx
)
393 static struct its_cmd_block
*its_allocate_entry(struct its_node
*its
)
395 struct its_cmd_block
*cmd
;
396 u32 count
= 1000000; /* 1s! */
398 while (its_queue_full(its
)) {
401 pr_err_ratelimited("ITS queue not draining\n");
408 cmd
= its
->cmd_write
++;
410 /* Handle queue wrapping */
411 if (its
->cmd_write
== (its
->cmd_base
+ ITS_CMD_QUEUE_NR_ENTRIES
))
412 its
->cmd_write
= its
->cmd_base
;
423 static struct its_cmd_block
*its_post_commands(struct its_node
*its
)
425 u64 wr
= its_cmd_ptr_to_offset(its
, its
->cmd_write
);
427 writel_relaxed(wr
, its
->base
+ GITS_CWRITER
);
429 return its
->cmd_write
;
432 static void its_flush_cmd(struct its_node
*its
, struct its_cmd_block
*cmd
)
435 * Make sure the commands written to memory are observable by
438 if (its
->flags
& ITS_FLAGS_CMDQ_NEEDS_FLUSHING
)
439 gic_flush_dcache_to_poc(cmd
, sizeof(*cmd
));
444 static void its_wait_for_range_completion(struct its_node
*its
,
445 struct its_cmd_block
*from
,
446 struct its_cmd_block
*to
)
448 u64 rd_idx
, from_idx
, to_idx
;
449 u32 count
= 1000000; /* 1s! */
451 from_idx
= its_cmd_ptr_to_offset(its
, from
);
452 to_idx
= its_cmd_ptr_to_offset(its
, to
);
455 rd_idx
= readl_relaxed(its
->base
+ GITS_CREADR
);
456 if (rd_idx
>= to_idx
|| rd_idx
< from_idx
)
461 pr_err_ratelimited("ITS queue timeout\n");
469 static void its_send_single_command(struct its_node
*its
,
470 its_cmd_builder_t builder
,
471 struct its_cmd_desc
*desc
)
473 struct its_cmd_block
*cmd
, *sync_cmd
, *next_cmd
;
474 struct its_collection
*sync_col
;
477 raw_spin_lock_irqsave(&its
->lock
, flags
);
479 cmd
= its_allocate_entry(its
);
480 if (!cmd
) { /* We're soooooo screewed... */
481 pr_err_ratelimited("ITS can't allocate, dropping command\n");
482 raw_spin_unlock_irqrestore(&its
->lock
, flags
);
485 sync_col
= builder(cmd
, desc
);
486 its_flush_cmd(its
, cmd
);
489 sync_cmd
= its_allocate_entry(its
);
491 pr_err_ratelimited("ITS can't SYNC, skipping\n");
494 its_encode_cmd(sync_cmd
, GITS_CMD_SYNC
);
495 its_encode_target(sync_cmd
, sync_col
->target_address
);
496 its_fixup_cmd(sync_cmd
);
497 its_flush_cmd(its
, sync_cmd
);
501 next_cmd
= its_post_commands(its
);
502 raw_spin_unlock_irqrestore(&its
->lock
, flags
);
504 its_wait_for_range_completion(its
, cmd
, next_cmd
);
507 static void its_send_inv(struct its_device
*dev
, u32 event_id
)
509 struct its_cmd_desc desc
;
511 desc
.its_inv_cmd
.dev
= dev
;
512 desc
.its_inv_cmd
.event_id
= event_id
;
514 its_send_single_command(dev
->its
, its_build_inv_cmd
, &desc
);
517 static void its_send_mapd(struct its_device
*dev
, int valid
)
519 struct its_cmd_desc desc
;
521 desc
.its_mapd_cmd
.dev
= dev
;
522 desc
.its_mapd_cmd
.valid
= !!valid
;
524 its_send_single_command(dev
->its
, its_build_mapd_cmd
, &desc
);
527 static void its_send_mapc(struct its_node
*its
, struct its_collection
*col
,
530 struct its_cmd_desc desc
;
532 desc
.its_mapc_cmd
.col
= col
;
533 desc
.its_mapc_cmd
.valid
= !!valid
;
535 its_send_single_command(its
, its_build_mapc_cmd
, &desc
);
538 static void its_send_mapti(struct its_device
*dev
, u32 irq_id
, u32 id
)
540 struct its_cmd_desc desc
;
542 desc
.its_mapti_cmd
.dev
= dev
;
543 desc
.its_mapti_cmd
.phys_id
= irq_id
;
544 desc
.its_mapti_cmd
.event_id
= id
;
546 its_send_single_command(dev
->its
, its_build_mapti_cmd
, &desc
);
549 static void its_send_movi(struct its_device
*dev
,
550 struct its_collection
*col
, u32 id
)
552 struct its_cmd_desc desc
;
554 desc
.its_movi_cmd
.dev
= dev
;
555 desc
.its_movi_cmd
.col
= col
;
556 desc
.its_movi_cmd
.event_id
= id
;
558 its_send_single_command(dev
->its
, its_build_movi_cmd
, &desc
);
561 static void its_send_discard(struct its_device
*dev
, u32 id
)
563 struct its_cmd_desc desc
;
565 desc
.its_discard_cmd
.dev
= dev
;
566 desc
.its_discard_cmd
.event_id
= id
;
568 its_send_single_command(dev
->its
, its_build_discard_cmd
, &desc
);
571 static void its_send_invall(struct its_node
*its
, struct its_collection
*col
)
573 struct its_cmd_desc desc
;
575 desc
.its_invall_cmd
.col
= col
;
577 its_send_single_command(its
, its_build_invall_cmd
, &desc
);
581 * irqchip functions - assumes MSI, mostly.
584 static inline u32
its_get_event_id(struct irq_data
*d
)
586 struct its_device
*its_dev
= irq_data_get_irq_chip_data(d
);
587 return d
->hwirq
- its_dev
->event_map
.lpi_base
;
590 static void lpi_set_config(struct irq_data
*d
, bool enable
)
592 struct its_device
*its_dev
= irq_data_get_irq_chip_data(d
);
593 irq_hw_number_t hwirq
= d
->hwirq
;
594 u32 id
= its_get_event_id(d
);
595 u8
*cfg
= page_address(gic_rdists
->prop_page
) + hwirq
- 8192;
598 *cfg
|= LPI_PROP_ENABLED
;
600 *cfg
&= ~LPI_PROP_ENABLED
;
603 * Make the above write visible to the redistributors.
604 * And yes, we're flushing exactly: One. Single. Byte.
607 if (gic_rdists
->flags
& RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING
)
608 gic_flush_dcache_to_poc(cfg
, sizeof(*cfg
));
611 its_send_inv(its_dev
, id
);
614 static void its_mask_irq(struct irq_data
*d
)
616 lpi_set_config(d
, false);
619 static void its_unmask_irq(struct irq_data
*d
)
621 lpi_set_config(d
, true);
624 static int its_set_affinity(struct irq_data
*d
, const struct cpumask
*mask_val
,
628 const struct cpumask
*cpu_mask
= cpu_online_mask
;
629 struct its_device
*its_dev
= irq_data_get_irq_chip_data(d
);
630 struct its_collection
*target_col
;
631 u32 id
= its_get_event_id(d
);
633 /* lpi cannot be routed to a redistributor that is on a foreign node */
634 if (its_dev
->its
->flags
& ITS_FLAGS_WORKAROUND_CAVIUM_23144
) {
635 if (its_dev
->its
->numa_node
>= 0) {
636 cpu_mask
= cpumask_of_node(its_dev
->its
->numa_node
);
637 if (!cpumask_intersects(mask_val
, cpu_mask
))
642 cpu
= cpumask_any_and(mask_val
, cpu_mask
);
644 if (cpu
>= nr_cpu_ids
)
647 /* don't set the affinity when the target cpu is same as current one */
648 if (cpu
!= its_dev
->event_map
.col_map
[id
]) {
649 target_col
= &its_dev
->its
->collections
[cpu
];
650 its_send_movi(its_dev
, target_col
, id
);
651 its_dev
->event_map
.col_map
[id
] = cpu
;
654 return IRQ_SET_MASK_OK_DONE
;
657 static void its_irq_compose_msi_msg(struct irq_data
*d
, struct msi_msg
*msg
)
659 struct its_device
*its_dev
= irq_data_get_irq_chip_data(d
);
660 struct its_node
*its
;
664 addr
= its
->phys_base
+ GITS_TRANSLATER
;
666 msg
->address_lo
= lower_32_bits(addr
);
667 msg
->address_hi
= upper_32_bits(addr
);
668 msg
->data
= its_get_event_id(d
);
670 iommu_dma_map_msi_msg(d
->irq
, msg
);
673 static struct irq_chip its_irq_chip
= {
675 .irq_mask
= its_mask_irq
,
676 .irq_unmask
= its_unmask_irq
,
677 .irq_eoi
= irq_chip_eoi_parent
,
678 .irq_set_affinity
= its_set_affinity
,
679 .irq_compose_msi_msg
= its_irq_compose_msi_msg
,
683 * How we allocate LPIs:
685 * The GIC has id_bits bits for interrupt identifiers. From there, we
686 * must subtract 8192 which are reserved for SGIs/PPIs/SPIs. Then, as
687 * we allocate LPIs by chunks of 32, we can shift the whole thing by 5
690 * This gives us (((1UL << id_bits) - 8192) >> 5) possible allocations.
692 #define IRQS_PER_CHUNK_SHIFT 5
693 #define IRQS_PER_CHUNK (1 << IRQS_PER_CHUNK_SHIFT)
694 #define ITS_MAX_LPI_NRBITS 16 /* 64K LPIs */
696 static unsigned long *lpi_bitmap
;
697 static u32 lpi_chunks
;
698 static u32 lpi_id_bits
;
699 static DEFINE_SPINLOCK(lpi_lock
);
701 static int its_lpi_to_chunk(int lpi
)
703 return (lpi
- 8192) >> IRQS_PER_CHUNK_SHIFT
;
706 static int its_chunk_to_lpi(int chunk
)
708 return (chunk
<< IRQS_PER_CHUNK_SHIFT
) + 8192;
711 static int __init
its_lpi_init(u32 id_bits
)
713 lpi_chunks
= its_lpi_to_chunk(1UL << id_bits
);
715 lpi_bitmap
= kzalloc(BITS_TO_LONGS(lpi_chunks
) * sizeof(long),
722 pr_info("ITS: Allocated %d chunks for LPIs\n", (int)lpi_chunks
);
726 static unsigned long *its_lpi_alloc_chunks(int nr_irqs
, int *base
, int *nr_ids
)
728 unsigned long *bitmap
= NULL
;
733 nr_chunks
= DIV_ROUND_UP(nr_irqs
, IRQS_PER_CHUNK
);
735 spin_lock(&lpi_lock
);
738 chunk_id
= bitmap_find_next_zero_area(lpi_bitmap
, lpi_chunks
,
740 if (chunk_id
< lpi_chunks
)
744 } while (nr_chunks
> 0);
749 bitmap
= kzalloc(BITS_TO_LONGS(nr_chunks
* IRQS_PER_CHUNK
) * sizeof (long),
754 for (i
= 0; i
< nr_chunks
; i
++)
755 set_bit(chunk_id
+ i
, lpi_bitmap
);
757 *base
= its_chunk_to_lpi(chunk_id
);
758 *nr_ids
= nr_chunks
* IRQS_PER_CHUNK
;
761 spin_unlock(&lpi_lock
);
769 static void its_lpi_free(struct event_lpi_map
*map
)
771 int base
= map
->lpi_base
;
772 int nr_ids
= map
->nr_lpis
;
775 spin_lock(&lpi_lock
);
777 for (lpi
= base
; lpi
< (base
+ nr_ids
); lpi
+= IRQS_PER_CHUNK
) {
778 int chunk
= its_lpi_to_chunk(lpi
);
779 BUG_ON(chunk
> lpi_chunks
);
780 if (test_bit(chunk
, lpi_bitmap
)) {
781 clear_bit(chunk
, lpi_bitmap
);
783 pr_err("Bad LPI chunk %d\n", chunk
);
787 spin_unlock(&lpi_lock
);
794 * We allocate memory for PROPBASE to cover 2 ^ lpi_id_bits LPIs to
795 * deal with (one configuration byte per interrupt). PENDBASE has to
796 * be 64kB aligned (one bit per LPI, plus 8192 bits for SPI/PPI/SGI).
798 #define LPI_NRBITS lpi_id_bits
799 #define LPI_PROPBASE_SZ ALIGN(BIT(LPI_NRBITS), SZ_64K)
800 #define LPI_PENDBASE_SZ ALIGN(BIT(LPI_NRBITS) / 8, SZ_64K)
802 #define LPI_PROP_DEFAULT_PRIO 0xa0
804 static int __init
its_alloc_lpi_tables(void)
808 lpi_id_bits
= min_t(u32
, gic_rdists
->id_bits
, ITS_MAX_LPI_NRBITS
);
809 gic_rdists
->prop_page
= alloc_pages(GFP_NOWAIT
,
810 get_order(LPI_PROPBASE_SZ
));
811 if (!gic_rdists
->prop_page
) {
812 pr_err("Failed to allocate PROPBASE\n");
816 paddr
= page_to_phys(gic_rdists
->prop_page
);
817 pr_info("GIC: using LPI property table @%pa\n", &paddr
);
819 /* Priority 0xa0, Group-1, disabled */
820 memset(page_address(gic_rdists
->prop_page
),
821 LPI_PROP_DEFAULT_PRIO
| LPI_PROP_GROUP1
,
824 /* Make sure the GIC will observe the written configuration */
825 gic_flush_dcache_to_poc(page_address(gic_rdists
->prop_page
), LPI_PROPBASE_SZ
);
827 return its_lpi_init(lpi_id_bits
);
830 static const char *its_base_type_string
[] = {
831 [GITS_BASER_TYPE_DEVICE
] = "Devices",
832 [GITS_BASER_TYPE_VCPU
] = "Virtual CPUs",
833 [GITS_BASER_TYPE_RESERVED3
] = "Reserved (3)",
834 [GITS_BASER_TYPE_COLLECTION
] = "Interrupt Collections",
835 [GITS_BASER_TYPE_RESERVED5
] = "Reserved (5)",
836 [GITS_BASER_TYPE_RESERVED6
] = "Reserved (6)",
837 [GITS_BASER_TYPE_RESERVED7
] = "Reserved (7)",
840 static u64
its_read_baser(struct its_node
*its
, struct its_baser
*baser
)
842 u32 idx
= baser
- its
->tables
;
844 return gits_read_baser(its
->base
+ GITS_BASER
+ (idx
<< 3));
847 static void its_write_baser(struct its_node
*its
, struct its_baser
*baser
,
850 u32 idx
= baser
- its
->tables
;
852 gits_write_baser(val
, its
->base
+ GITS_BASER
+ (idx
<< 3));
853 baser
->val
= its_read_baser(its
, baser
);
856 static int its_setup_baser(struct its_node
*its
, struct its_baser
*baser
,
857 u64 cache
, u64 shr
, u32 psz
, u32 order
,
860 u64 val
= its_read_baser(its
, baser
);
861 u64 esz
= GITS_BASER_ENTRY_SIZE(val
);
862 u64 type
= GITS_BASER_TYPE(val
);
868 alloc_pages
= (PAGE_ORDER_TO_SIZE(order
) / psz
);
869 if (alloc_pages
> GITS_BASER_PAGES_MAX
) {
870 pr_warn("ITS@%pa: %s too large, reduce ITS pages %u->%u\n",
871 &its
->phys_base
, its_base_type_string
[type
],
872 alloc_pages
, GITS_BASER_PAGES_MAX
);
873 alloc_pages
= GITS_BASER_PAGES_MAX
;
874 order
= get_order(GITS_BASER_PAGES_MAX
* psz
);
877 base
= (void *)__get_free_pages(GFP_KERNEL
| __GFP_ZERO
, order
);
882 val
= (virt_to_phys(base
) |
883 (type
<< GITS_BASER_TYPE_SHIFT
) |
884 ((esz
- 1) << GITS_BASER_ENTRY_SIZE_SHIFT
) |
885 ((alloc_pages
- 1) << GITS_BASER_PAGES_SHIFT
) |
890 val
|= indirect
? GITS_BASER_INDIRECT
: 0x0;
894 val
|= GITS_BASER_PAGE_SIZE_4K
;
897 val
|= GITS_BASER_PAGE_SIZE_16K
;
900 val
|= GITS_BASER_PAGE_SIZE_64K
;
904 its_write_baser(its
, baser
, val
);
907 if ((val
^ tmp
) & GITS_BASER_SHAREABILITY_MASK
) {
909 * Shareability didn't stick. Just use
910 * whatever the read reported, which is likely
911 * to be the only thing this redistributor
912 * supports. If that's zero, make it
913 * non-cacheable as well.
915 shr
= tmp
& GITS_BASER_SHAREABILITY_MASK
;
917 cache
= GITS_BASER_nC
;
918 gic_flush_dcache_to_poc(base
, PAGE_ORDER_TO_SIZE(order
));
923 if ((val
^ tmp
) & GITS_BASER_PAGE_SIZE_MASK
) {
925 * Page size didn't stick. Let's try a smaller
926 * size and retry. If we reach 4K, then
927 * something is horribly wrong...
929 free_pages((unsigned long)base
, order
);
935 goto retry_alloc_baser
;
938 goto retry_alloc_baser
;
943 pr_err("ITS@%pa: %s doesn't stick: %llx %llx\n",
944 &its
->phys_base
, its_base_type_string
[type
],
946 free_pages((unsigned long)base
, order
);
950 baser
->order
= order
;
953 tmp
= indirect
? GITS_LVL1_ENTRY_SIZE
: esz
;
955 pr_info("ITS@%pa: allocated %d %s @%lx (%s, esz %d, psz %dK, shr %d)\n",
956 &its
->phys_base
, (int)(PAGE_ORDER_TO_SIZE(order
) / (int)tmp
),
957 its_base_type_string
[type
],
958 (unsigned long)virt_to_phys(base
),
959 indirect
? "indirect" : "flat", (int)esz
,
960 psz
/ SZ_1K
, (int)shr
>> GITS_BASER_SHAREABILITY_SHIFT
);
965 static bool its_parse_baser_device(struct its_node
*its
, struct its_baser
*baser
,
968 u64 esz
= GITS_BASER_ENTRY_SIZE(its_read_baser(its
, baser
));
969 u64 val
= GITS_BASER_InnerShareable
| GITS_BASER_RaWaWb
;
970 u32 ids
= its
->device_ids
;
971 u32 new_order
= *order
;
972 bool indirect
= false;
974 /* No need to enable Indirection if memory requirement < (psz*2)bytes */
975 if ((esz
<< ids
) > (psz
* 2)) {
977 * Find out whether hw supports a single or two-level table by
978 * table by reading bit at offset '62' after writing '1' to it.
980 its_write_baser(its
, baser
, val
| GITS_BASER_INDIRECT
);
981 indirect
= !!(baser
->val
& GITS_BASER_INDIRECT
);
985 * The size of the lvl2 table is equal to ITS page size
986 * which is 'psz'. For computing lvl1 table size,
987 * subtract ID bits that sparse lvl2 table from 'ids'
988 * which is reported by ITS hardware times lvl1 table
991 ids
-= ilog2(psz
/ (int)esz
);
992 esz
= GITS_LVL1_ENTRY_SIZE
;
997 * Allocate as many entries as required to fit the
998 * range of device IDs that the ITS can grok... The ID
999 * space being incredibly sparse, this results in a
1000 * massive waste of memory if two-level device table
1001 * feature is not supported by hardware.
1003 new_order
= max_t(u32
, get_order(esz
<< ids
), new_order
);
1004 if (new_order
>= MAX_ORDER
) {
1005 new_order
= MAX_ORDER
- 1;
1006 ids
= ilog2(PAGE_ORDER_TO_SIZE(new_order
) / (int)esz
);
1007 pr_warn("ITS@%pa: Device Table too large, reduce ids %u->%u\n",
1008 &its
->phys_base
, its
->device_ids
, ids
);
1016 static void its_free_tables(struct its_node
*its
)
1020 for (i
= 0; i
< GITS_BASER_NR_REGS
; i
++) {
1021 if (its
->tables
[i
].base
) {
1022 free_pages((unsigned long)its
->tables
[i
].base
,
1023 its
->tables
[i
].order
);
1024 its
->tables
[i
].base
= NULL
;
1029 static int its_alloc_tables(struct its_node
*its
)
1031 u64 typer
= gic_read_typer(its
->base
+ GITS_TYPER
);
1032 u32 ids
= GITS_TYPER_DEVBITS(typer
);
1033 u64 shr
= GITS_BASER_InnerShareable
;
1034 u64 cache
= GITS_BASER_RaWaWb
;
1038 if (its
->flags
& ITS_FLAGS_WORKAROUND_CAVIUM_22375
) {
1040 * erratum 22375: only alloc 8MB table size
1041 * erratum 24313: ignore memory access type
1043 cache
= GITS_BASER_nCnB
;
1044 ids
= 0x14; /* 20 bits, 8MB */
1047 its
->device_ids
= ids
;
1049 for (i
= 0; i
< GITS_BASER_NR_REGS
; i
++) {
1050 struct its_baser
*baser
= its
->tables
+ i
;
1051 u64 val
= its_read_baser(its
, baser
);
1052 u64 type
= GITS_BASER_TYPE(val
);
1053 u32 order
= get_order(psz
);
1054 bool indirect
= false;
1056 if (type
== GITS_BASER_TYPE_NONE
)
1059 if (type
== GITS_BASER_TYPE_DEVICE
)
1060 indirect
= its_parse_baser_device(its
, baser
, psz
, &order
);
1062 err
= its_setup_baser(its
, baser
, cache
, shr
, psz
, order
, indirect
);
1064 its_free_tables(its
);
1068 /* Update settings which will be used for next BASERn */
1070 cache
= baser
->val
& GITS_BASER_CACHEABILITY_MASK
;
1071 shr
= baser
->val
& GITS_BASER_SHAREABILITY_MASK
;
1077 static int its_alloc_collections(struct its_node
*its
)
1079 its
->collections
= kzalloc(nr_cpu_ids
* sizeof(*its
->collections
),
1081 if (!its
->collections
)
1087 static void its_cpu_init_lpis(void)
1089 void __iomem
*rbase
= gic_data_rdist_rd_base();
1090 struct page
*pend_page
;
1093 /* If we didn't allocate the pending table yet, do it now */
1094 pend_page
= gic_data_rdist()->pend_page
;
1098 * The pending pages have to be at least 64kB aligned,
1099 * hence the 'max(LPI_PENDBASE_SZ, SZ_64K)' below.
1101 pend_page
= alloc_pages(GFP_NOWAIT
| __GFP_ZERO
,
1102 get_order(max_t(u32
, LPI_PENDBASE_SZ
, SZ_64K
)));
1104 pr_err("Failed to allocate PENDBASE for CPU%d\n",
1105 smp_processor_id());
1109 /* Make sure the GIC will observe the zero-ed page */
1110 gic_flush_dcache_to_poc(page_address(pend_page
), LPI_PENDBASE_SZ
);
1112 paddr
= page_to_phys(pend_page
);
1113 pr_info("CPU%d: using LPI pending table @%pa\n",
1114 smp_processor_id(), &paddr
);
1115 gic_data_rdist()->pend_page
= pend_page
;
1119 val
= readl_relaxed(rbase
+ GICR_CTLR
);
1120 val
&= ~GICR_CTLR_ENABLE_LPIS
;
1121 writel_relaxed(val
, rbase
+ GICR_CTLR
);
1124 * Make sure any change to the table is observable by the GIC.
1129 val
= (page_to_phys(gic_rdists
->prop_page
) |
1130 GICR_PROPBASER_InnerShareable
|
1131 GICR_PROPBASER_RaWaWb
|
1132 ((LPI_NRBITS
- 1) & GICR_PROPBASER_IDBITS_MASK
));
1134 gicr_write_propbaser(val
, rbase
+ GICR_PROPBASER
);
1135 tmp
= gicr_read_propbaser(rbase
+ GICR_PROPBASER
);
1137 if ((tmp
^ val
) & GICR_PROPBASER_SHAREABILITY_MASK
) {
1138 if (!(tmp
& GICR_PROPBASER_SHAREABILITY_MASK
)) {
1140 * The HW reports non-shareable, we must
1141 * remove the cacheability attributes as
1144 val
&= ~(GICR_PROPBASER_SHAREABILITY_MASK
|
1145 GICR_PROPBASER_CACHEABILITY_MASK
);
1146 val
|= GICR_PROPBASER_nC
;
1147 gicr_write_propbaser(val
, rbase
+ GICR_PROPBASER
);
1149 pr_info_once("GIC: using cache flushing for LPI property table\n");
1150 gic_rdists
->flags
|= RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING
;
1154 val
= (page_to_phys(pend_page
) |
1155 GICR_PENDBASER_InnerShareable
|
1156 GICR_PENDBASER_RaWaWb
);
1158 gicr_write_pendbaser(val
, rbase
+ GICR_PENDBASER
);
1159 tmp
= gicr_read_pendbaser(rbase
+ GICR_PENDBASER
);
1161 if (!(tmp
& GICR_PENDBASER_SHAREABILITY_MASK
)) {
1163 * The HW reports non-shareable, we must remove the
1164 * cacheability attributes as well.
1166 val
&= ~(GICR_PENDBASER_SHAREABILITY_MASK
|
1167 GICR_PENDBASER_CACHEABILITY_MASK
);
1168 val
|= GICR_PENDBASER_nC
;
1169 gicr_write_pendbaser(val
, rbase
+ GICR_PENDBASER
);
1173 val
= readl_relaxed(rbase
+ GICR_CTLR
);
1174 val
|= GICR_CTLR_ENABLE_LPIS
;
1175 writel_relaxed(val
, rbase
+ GICR_CTLR
);
1177 /* Make sure the GIC has seen the above */
1181 static void its_cpu_init_collection(void)
1183 struct its_node
*its
;
1186 spin_lock(&its_lock
);
1187 cpu
= smp_processor_id();
1189 list_for_each_entry(its
, &its_nodes
, entry
) {
1192 /* avoid cross node collections and its mapping */
1193 if (its
->flags
& ITS_FLAGS_WORKAROUND_CAVIUM_23144
) {
1194 struct device_node
*cpu_node
;
1196 cpu_node
= of_get_cpu_node(cpu
, NULL
);
1197 if (its
->numa_node
!= NUMA_NO_NODE
&&
1198 its
->numa_node
!= of_node_to_nid(cpu_node
))
1203 * We now have to bind each collection to its target
1206 if (gic_read_typer(its
->base
+ GITS_TYPER
) & GITS_TYPER_PTA
) {
1208 * This ITS wants the physical address of the
1211 target
= gic_data_rdist()->phys_base
;
1214 * This ITS wants a linear CPU number.
1216 target
= gic_read_typer(gic_data_rdist_rd_base() + GICR_TYPER
);
1217 target
= GICR_TYPER_CPU_NUMBER(target
) << 16;
1220 /* Perform collection mapping */
1221 its
->collections
[cpu
].target_address
= target
;
1222 its
->collections
[cpu
].col_id
= cpu
;
1224 its_send_mapc(its
, &its
->collections
[cpu
], 1);
1225 its_send_invall(its
, &its
->collections
[cpu
]);
1228 spin_unlock(&its_lock
);
1231 static struct its_device
*its_find_device(struct its_node
*its
, u32 dev_id
)
1233 struct its_device
*its_dev
= NULL
, *tmp
;
1234 unsigned long flags
;
1236 raw_spin_lock_irqsave(&its
->lock
, flags
);
1238 list_for_each_entry(tmp
, &its
->its_device_list
, entry
) {
1239 if (tmp
->device_id
== dev_id
) {
1245 raw_spin_unlock_irqrestore(&its
->lock
, flags
);
1250 static struct its_baser
*its_get_baser(struct its_node
*its
, u32 type
)
1254 for (i
= 0; i
< GITS_BASER_NR_REGS
; i
++) {
1255 if (GITS_BASER_TYPE(its
->tables
[i
].val
) == type
)
1256 return &its
->tables
[i
];
1262 static bool its_alloc_device_table(struct its_node
*its
, u32 dev_id
)
1264 struct its_baser
*baser
;
1269 baser
= its_get_baser(its
, GITS_BASER_TYPE_DEVICE
);
1271 /* Don't allow device id that exceeds ITS hardware limit */
1273 return (ilog2(dev_id
) < its
->device_ids
);
1275 /* Don't allow device id that exceeds single, flat table limit */
1276 esz
= GITS_BASER_ENTRY_SIZE(baser
->val
);
1277 if (!(baser
->val
& GITS_BASER_INDIRECT
))
1278 return (dev_id
< (PAGE_ORDER_TO_SIZE(baser
->order
) / esz
));
1280 /* Compute 1st level table index & check if that exceeds table limit */
1281 idx
= dev_id
>> ilog2(baser
->psz
/ esz
);
1282 if (idx
>= (PAGE_ORDER_TO_SIZE(baser
->order
) / GITS_LVL1_ENTRY_SIZE
))
1285 table
= baser
->base
;
1287 /* Allocate memory for 2nd level table */
1289 page
= alloc_pages(GFP_KERNEL
| __GFP_ZERO
, get_order(baser
->psz
));
1293 /* Flush Lvl2 table to PoC if hw doesn't support coherency */
1294 if (!(baser
->val
& GITS_BASER_SHAREABILITY_MASK
))
1295 gic_flush_dcache_to_poc(page_address(page
), baser
->psz
);
1297 table
[idx
] = cpu_to_le64(page_to_phys(page
) | GITS_BASER_VALID
);
1299 /* Flush Lvl1 entry to PoC if hw doesn't support coherency */
1300 if (!(baser
->val
& GITS_BASER_SHAREABILITY_MASK
))
1301 gic_flush_dcache_to_poc(table
+ idx
, GITS_LVL1_ENTRY_SIZE
);
1303 /* Ensure updated table contents are visible to ITS hardware */
1310 static struct its_device
*its_create_device(struct its_node
*its
, u32 dev_id
,
1313 struct its_device
*dev
;
1314 unsigned long *lpi_map
;
1315 unsigned long flags
;
1316 u16
*col_map
= NULL
;
1323 if (!its_alloc_device_table(its
, dev_id
))
1326 dev
= kzalloc(sizeof(*dev
), GFP_KERNEL
);
1328 * At least one bit of EventID is being used, hence a minimum
1329 * of two entries. No, the architecture doesn't let you
1330 * express an ITT with a single entry.
1332 nr_ites
= max(2UL, roundup_pow_of_two(nvecs
));
1333 sz
= nr_ites
* its
->ite_size
;
1334 sz
= max(sz
, ITS_ITT_ALIGN
) + ITS_ITT_ALIGN
- 1;
1335 itt
= kzalloc(sz
, GFP_KERNEL
);
1336 lpi_map
= its_lpi_alloc_chunks(nvecs
, &lpi_base
, &nr_lpis
);
1338 col_map
= kzalloc(sizeof(*col_map
) * nr_lpis
, GFP_KERNEL
);
1340 if (!dev
|| !itt
|| !lpi_map
|| !col_map
) {
1348 gic_flush_dcache_to_poc(itt
, sz
);
1352 dev
->nr_ites
= nr_ites
;
1353 dev
->event_map
.lpi_map
= lpi_map
;
1354 dev
->event_map
.col_map
= col_map
;
1355 dev
->event_map
.lpi_base
= lpi_base
;
1356 dev
->event_map
.nr_lpis
= nr_lpis
;
1357 dev
->device_id
= dev_id
;
1358 INIT_LIST_HEAD(&dev
->entry
);
1360 raw_spin_lock_irqsave(&its
->lock
, flags
);
1361 list_add(&dev
->entry
, &its
->its_device_list
);
1362 raw_spin_unlock_irqrestore(&its
->lock
, flags
);
1364 /* Map device to its ITT */
1365 its_send_mapd(dev
, 1);
1370 static void its_free_device(struct its_device
*its_dev
)
1372 unsigned long flags
;
1374 raw_spin_lock_irqsave(&its_dev
->its
->lock
, flags
);
1375 list_del(&its_dev
->entry
);
1376 raw_spin_unlock_irqrestore(&its_dev
->its
->lock
, flags
);
1377 kfree(its_dev
->itt
);
1381 static int its_alloc_device_irq(struct its_device
*dev
, irq_hw_number_t
*hwirq
)
1385 idx
= find_first_zero_bit(dev
->event_map
.lpi_map
,
1386 dev
->event_map
.nr_lpis
);
1387 if (idx
== dev
->event_map
.nr_lpis
)
1390 *hwirq
= dev
->event_map
.lpi_base
+ idx
;
1391 set_bit(idx
, dev
->event_map
.lpi_map
);
1396 static int its_msi_prepare(struct irq_domain
*domain
, struct device
*dev
,
1397 int nvec
, msi_alloc_info_t
*info
)
1399 struct its_node
*its
;
1400 struct its_device
*its_dev
;
1401 struct msi_domain_info
*msi_info
;
1405 * We ignore "dev" entierely, and rely on the dev_id that has
1406 * been passed via the scratchpad. This limits this domain's
1407 * usefulness to upper layers that definitely know that they
1408 * are built on top of the ITS.
1410 dev_id
= info
->scratchpad
[0].ul
;
1412 msi_info
= msi_get_domain_info(domain
);
1413 its
= msi_info
->data
;
1415 its_dev
= its_find_device(its
, dev_id
);
1418 * We already have seen this ID, probably through
1419 * another alias (PCI bridge of some sort). No need to
1420 * create the device.
1422 pr_debug("Reusing ITT for devID %x\n", dev_id
);
1426 its_dev
= its_create_device(its
, dev_id
, nvec
);
1430 pr_debug("ITT %d entries, %d bits\n", nvec
, ilog2(nvec
));
1432 info
->scratchpad
[0].ptr
= its_dev
;
1436 static struct msi_domain_ops its_msi_domain_ops
= {
1437 .msi_prepare
= its_msi_prepare
,
1440 static int its_irq_gic_domain_alloc(struct irq_domain
*domain
,
1442 irq_hw_number_t hwirq
)
1444 struct irq_fwspec fwspec
;
1446 if (irq_domain_get_of_node(domain
->parent
)) {
1447 fwspec
.fwnode
= domain
->parent
->fwnode
;
1448 fwspec
.param_count
= 3;
1449 fwspec
.param
[0] = GIC_IRQ_TYPE_LPI
;
1450 fwspec
.param
[1] = hwirq
;
1451 fwspec
.param
[2] = IRQ_TYPE_EDGE_RISING
;
1452 } else if (is_fwnode_irqchip(domain
->parent
->fwnode
)) {
1453 fwspec
.fwnode
= domain
->parent
->fwnode
;
1454 fwspec
.param_count
= 2;
1455 fwspec
.param
[0] = hwirq
;
1456 fwspec
.param
[1] = IRQ_TYPE_EDGE_RISING
;
1461 return irq_domain_alloc_irqs_parent(domain
, virq
, 1, &fwspec
);
1464 static int its_irq_domain_alloc(struct irq_domain
*domain
, unsigned int virq
,
1465 unsigned int nr_irqs
, void *args
)
1467 msi_alloc_info_t
*info
= args
;
1468 struct its_device
*its_dev
= info
->scratchpad
[0].ptr
;
1469 irq_hw_number_t hwirq
;
1473 for (i
= 0; i
< nr_irqs
; i
++) {
1474 err
= its_alloc_device_irq(its_dev
, &hwirq
);
1478 err
= its_irq_gic_domain_alloc(domain
, virq
+ i
, hwirq
);
1482 irq_domain_set_hwirq_and_chip(domain
, virq
+ i
,
1483 hwirq
, &its_irq_chip
, its_dev
);
1484 pr_debug("ID:%d pID:%d vID:%d\n",
1485 (int)(hwirq
- its_dev
->event_map
.lpi_base
),
1486 (int) hwirq
, virq
+ i
);
1492 static void its_irq_domain_activate(struct irq_domain
*domain
,
1495 struct its_device
*its_dev
= irq_data_get_irq_chip_data(d
);
1496 u32 event
= its_get_event_id(d
);
1497 const struct cpumask
*cpu_mask
= cpu_online_mask
;
1499 /* get the cpu_mask of local node */
1500 if (its_dev
->its
->numa_node
>= 0)
1501 cpu_mask
= cpumask_of_node(its_dev
->its
->numa_node
);
1503 /* Bind the LPI to the first possible CPU */
1504 its_dev
->event_map
.col_map
[event
] = cpumask_first(cpu_mask
);
1506 /* Map the GIC IRQ and event to the device */
1507 its_send_mapti(its_dev
, d
->hwirq
, event
);
1510 static void its_irq_domain_deactivate(struct irq_domain
*domain
,
1513 struct its_device
*its_dev
= irq_data_get_irq_chip_data(d
);
1514 u32 event
= its_get_event_id(d
);
1516 /* Stop the delivery of interrupts */
1517 its_send_discard(its_dev
, event
);
1520 static void its_irq_domain_free(struct irq_domain
*domain
, unsigned int virq
,
1521 unsigned int nr_irqs
)
1523 struct irq_data
*d
= irq_domain_get_irq_data(domain
, virq
);
1524 struct its_device
*its_dev
= irq_data_get_irq_chip_data(d
);
1527 for (i
= 0; i
< nr_irqs
; i
++) {
1528 struct irq_data
*data
= irq_domain_get_irq_data(domain
,
1530 u32 event
= its_get_event_id(data
);
1532 /* Mark interrupt index as unused */
1533 clear_bit(event
, its_dev
->event_map
.lpi_map
);
1535 /* Nuke the entry in the domain */
1536 irq_domain_reset_irq_data(data
);
1539 /* If all interrupts have been freed, start mopping the floor */
1540 if (bitmap_empty(its_dev
->event_map
.lpi_map
,
1541 its_dev
->event_map
.nr_lpis
)) {
1542 its_lpi_free(&its_dev
->event_map
);
1544 /* Unmap device/itt */
1545 its_send_mapd(its_dev
, 0);
1546 its_free_device(its_dev
);
1549 irq_domain_free_irqs_parent(domain
, virq
, nr_irqs
);
1552 static const struct irq_domain_ops its_domain_ops
= {
1553 .alloc
= its_irq_domain_alloc
,
1554 .free
= its_irq_domain_free
,
1555 .activate
= its_irq_domain_activate
,
1556 .deactivate
= its_irq_domain_deactivate
,
1559 static int its_force_quiescent(void __iomem
*base
)
1561 u32 count
= 1000000; /* 1s */
1564 val
= readl_relaxed(base
+ GITS_CTLR
);
1566 * GIC architecture specification requires the ITS to be both
1567 * disabled and quiescent for writes to GITS_BASER<n> or
1568 * GITS_CBASER to not have UNPREDICTABLE results.
1570 if ((val
& GITS_CTLR_QUIESCENT
) && !(val
& GITS_CTLR_ENABLE
))
1573 /* Disable the generation of all interrupts to this ITS */
1574 val
&= ~GITS_CTLR_ENABLE
;
1575 writel_relaxed(val
, base
+ GITS_CTLR
);
1577 /* Poll GITS_CTLR and wait until ITS becomes quiescent */
1579 val
= readl_relaxed(base
+ GITS_CTLR
);
1580 if (val
& GITS_CTLR_QUIESCENT
)
1592 static void __maybe_unused
its_enable_quirk_cavium_22375(void *data
)
1594 struct its_node
*its
= data
;
1596 its
->flags
|= ITS_FLAGS_WORKAROUND_CAVIUM_22375
;
1599 static void __maybe_unused
its_enable_quirk_cavium_23144(void *data
)
1601 struct its_node
*its
= data
;
1603 its
->flags
|= ITS_FLAGS_WORKAROUND_CAVIUM_23144
;
1606 static void __maybe_unused
its_enable_quirk_qdf2400_e0065(void *data
)
1608 struct its_node
*its
= data
;
1610 /* On QDF2400, the size of the ITE is 16Bytes */
1614 static const struct gic_quirk its_quirks
[] = {
1615 #ifdef CONFIG_CAVIUM_ERRATUM_22375
1617 .desc
= "ITS: Cavium errata 22375, 24313",
1618 .iidr
= 0xa100034c, /* ThunderX pass 1.x */
1620 .init
= its_enable_quirk_cavium_22375
,
1623 #ifdef CONFIG_CAVIUM_ERRATUM_23144
1625 .desc
= "ITS: Cavium erratum 23144",
1626 .iidr
= 0xa100034c, /* ThunderX pass 1.x */
1628 .init
= its_enable_quirk_cavium_23144
,
1631 #ifdef CONFIG_QCOM_QDF2400_ERRATUM_0065
1633 .desc
= "ITS: QDF2400 erratum 0065",
1634 .iidr
= 0x00001070, /* QDF2400 ITS rev 1.x */
1636 .init
= its_enable_quirk_qdf2400_e0065
,
1643 static void its_enable_quirks(struct its_node
*its
)
1645 u32 iidr
= readl_relaxed(its
->base
+ GITS_IIDR
);
1647 gic_enable_quirks(iidr
, its_quirks
, its
);
1650 static int its_init_domain(struct fwnode_handle
*handle
, struct its_node
*its
)
1652 struct irq_domain
*inner_domain
;
1653 struct msi_domain_info
*info
;
1655 info
= kzalloc(sizeof(*info
), GFP_KERNEL
);
1659 inner_domain
= irq_domain_create_tree(handle
, &its_domain_ops
, its
);
1660 if (!inner_domain
) {
1665 inner_domain
->parent
= its_parent
;
1666 irq_domain_update_bus_token(inner_domain
, DOMAIN_BUS_NEXUS
);
1667 inner_domain
->flags
|= IRQ_DOMAIN_FLAG_MSI_REMAP
;
1668 info
->ops
= &its_msi_domain_ops
;
1670 inner_domain
->host_data
= info
;
1675 static int __init
its_probe_one(struct resource
*res
,
1676 struct fwnode_handle
*handle
, int numa_node
)
1678 struct its_node
*its
;
1679 void __iomem
*its_base
;
1684 its_base
= ioremap(res
->start
, resource_size(res
));
1686 pr_warn("ITS@%pa: Unable to map ITS registers\n", &res
->start
);
1690 val
= readl_relaxed(its_base
+ GITS_PIDR2
) & GIC_PIDR2_ARCH_MASK
;
1691 if (val
!= 0x30 && val
!= 0x40) {
1692 pr_warn("ITS@%pa: No ITS detected, giving up\n", &res
->start
);
1697 err
= its_force_quiescent(its_base
);
1699 pr_warn("ITS@%pa: Failed to quiesce, giving up\n", &res
->start
);
1703 pr_info("ITS %pR\n", res
);
1705 its
= kzalloc(sizeof(*its
), GFP_KERNEL
);
1711 raw_spin_lock_init(&its
->lock
);
1712 INIT_LIST_HEAD(&its
->entry
);
1713 INIT_LIST_HEAD(&its
->its_device_list
);
1714 its
->base
= its_base
;
1715 its
->phys_base
= res
->start
;
1716 its
->ite_size
= ((gic_read_typer(its_base
+ GITS_TYPER
) >> 4) & 0xf) + 1;
1717 its
->numa_node
= numa_node
;
1719 its
->cmd_base
= (void *)__get_free_pages(GFP_KERNEL
| __GFP_ZERO
,
1720 get_order(ITS_CMD_QUEUE_SZ
));
1721 if (!its
->cmd_base
) {
1725 its
->cmd_write
= its
->cmd_base
;
1727 its_enable_quirks(its
);
1729 err
= its_alloc_tables(its
);
1733 err
= its_alloc_collections(its
);
1735 goto out_free_tables
;
1737 baser
= (virt_to_phys(its
->cmd_base
) |
1738 GITS_CBASER_RaWaWb
|
1739 GITS_CBASER_InnerShareable
|
1740 (ITS_CMD_QUEUE_SZ
/ SZ_4K
- 1) |
1743 gits_write_cbaser(baser
, its
->base
+ GITS_CBASER
);
1744 tmp
= gits_read_cbaser(its
->base
+ GITS_CBASER
);
1746 if ((tmp
^ baser
) & GITS_CBASER_SHAREABILITY_MASK
) {
1747 if (!(tmp
& GITS_CBASER_SHAREABILITY_MASK
)) {
1749 * The HW reports non-shareable, we must
1750 * remove the cacheability attributes as
1753 baser
&= ~(GITS_CBASER_SHAREABILITY_MASK
|
1754 GITS_CBASER_CACHEABILITY_MASK
);
1755 baser
|= GITS_CBASER_nC
;
1756 gits_write_cbaser(baser
, its
->base
+ GITS_CBASER
);
1758 pr_info("ITS: using cache flushing for cmd queue\n");
1759 its
->flags
|= ITS_FLAGS_CMDQ_NEEDS_FLUSHING
;
1762 gits_write_cwriter(0, its
->base
+ GITS_CWRITER
);
1763 writel_relaxed(GITS_CTLR_ENABLE
, its
->base
+ GITS_CTLR
);
1765 err
= its_init_domain(handle
, its
);
1767 goto out_free_tables
;
1769 spin_lock(&its_lock
);
1770 list_add(&its
->entry
, &its_nodes
);
1771 spin_unlock(&its_lock
);
1776 its_free_tables(its
);
1778 free_pages((unsigned long)its
->cmd_base
, get_order(ITS_CMD_QUEUE_SZ
));
1783 pr_err("ITS@%pa: failed probing (%d)\n", &res
->start
, err
);
1787 static bool gic_rdists_supports_plpis(void)
1789 return !!(gic_read_typer(gic_data_rdist_rd_base() + GICR_TYPER
) & GICR_TYPER_PLPIS
);
1792 int its_cpu_init(void)
1794 if (!list_empty(&its_nodes
)) {
1795 if (!gic_rdists_supports_plpis()) {
1796 pr_info("CPU%d: LPIs not supported\n", smp_processor_id());
1799 its_cpu_init_lpis();
1800 its_cpu_init_collection();
1806 static const struct of_device_id its_device_id
[] = {
1807 { .compatible
= "arm,gic-v3-its", },
1811 static int __init
its_of_probe(struct device_node
*node
)
1813 struct device_node
*np
;
1814 struct resource res
;
1816 for (np
= of_find_matching_node(node
, its_device_id
); np
;
1817 np
= of_find_matching_node(np
, its_device_id
)) {
1818 if (!of_property_read_bool(np
, "msi-controller")) {
1819 pr_warn("%s: no msi-controller property, ITS ignored\n",
1824 if (of_address_to_resource(np
, 0, &res
)) {
1825 pr_warn("%s: no regs?\n", np
->full_name
);
1829 its_probe_one(&res
, &np
->fwnode
, of_node_to_nid(np
));
1836 #define ACPI_GICV3_ITS_MEM_SIZE (SZ_128K)
1838 #if defined(CONFIG_ACPI_NUMA) && (ACPI_CA_VERSION >= 0x20170531)
1839 struct its_srat_map
{
1846 static struct its_srat_map its_srat_maps
[MAX_NUMNODES
] __initdata
;
1847 static int its_in_srat __initdata
;
1849 static int __init
acpi_get_its_numa_node(u32 its_id
)
1853 for (i
= 0; i
< its_in_srat
; i
++) {
1854 if (its_id
== its_srat_maps
[i
].its_id
)
1855 return its_srat_maps
[i
].numa_node
;
1857 return NUMA_NO_NODE
;
1860 static int __init
gic_acpi_parse_srat_its(struct acpi_subtable_header
*header
,
1861 const unsigned long end
)
1864 struct acpi_srat_gic_its_affinity
*its_affinity
;
1866 its_affinity
= (struct acpi_srat_gic_its_affinity
*)header
;
1870 if (its_affinity
->header
.length
< sizeof(*its_affinity
)) {
1871 pr_err("SRAT: Invalid header length %d in ITS affinity\n",
1872 its_affinity
->header
.length
);
1876 if (its_in_srat
>= MAX_NUMNODES
) {
1877 pr_err("SRAT: ITS affinity exceeding max count[%d]\n",
1882 node
= acpi_map_pxm_to_node(its_affinity
->proximity_domain
);
1884 if (node
== NUMA_NO_NODE
|| node
>= MAX_NUMNODES
) {
1885 pr_err("SRAT: Invalid NUMA node %d in ITS affinity\n", node
);
1889 its_srat_maps
[its_in_srat
].numa_node
= node
;
1890 its_srat_maps
[its_in_srat
].its_id
= its_affinity
->its_id
;
1892 pr_info("SRAT: PXM %d -> ITS %d -> Node %d\n",
1893 its_affinity
->proximity_domain
, its_affinity
->its_id
, node
);
1898 static void __init
acpi_table_parse_srat_its(void)
1900 acpi_table_parse_entries(ACPI_SIG_SRAT
,
1901 sizeof(struct acpi_table_srat
),
1902 ACPI_SRAT_TYPE_GIC_ITS_AFFINITY
,
1903 gic_acpi_parse_srat_its
, 0);
1906 static void __init
acpi_table_parse_srat_its(void) { }
1907 static int __init
acpi_get_its_numa_node(u32 its_id
) { return NUMA_NO_NODE
; }
1910 static int __init
gic_acpi_parse_madt_its(struct acpi_subtable_header
*header
,
1911 const unsigned long end
)
1913 struct acpi_madt_generic_translator
*its_entry
;
1914 struct fwnode_handle
*dom_handle
;
1915 struct resource res
;
1918 its_entry
= (struct acpi_madt_generic_translator
*)header
;
1919 memset(&res
, 0, sizeof(res
));
1920 res
.start
= its_entry
->base_address
;
1921 res
.end
= its_entry
->base_address
+ ACPI_GICV3_ITS_MEM_SIZE
- 1;
1922 res
.flags
= IORESOURCE_MEM
;
1924 dom_handle
= irq_domain_alloc_fwnode((void *)its_entry
->base_address
);
1926 pr_err("ITS@%pa: Unable to allocate GICv3 ITS domain token\n",
1931 err
= iort_register_domain_token(its_entry
->translation_id
, dom_handle
);
1933 pr_err("ITS@%pa: Unable to register GICv3 ITS domain token (ITS ID %d) to IORT\n",
1934 &res
.start
, its_entry
->translation_id
);
1938 err
= its_probe_one(&res
, dom_handle
,
1939 acpi_get_its_numa_node(its_entry
->translation_id
));
1943 iort_deregister_domain_token(its_entry
->translation_id
);
1945 irq_domain_free_fwnode(dom_handle
);
1949 static void __init
its_acpi_probe(void)
1951 acpi_table_parse_srat_its();
1952 acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_TRANSLATOR
,
1953 gic_acpi_parse_madt_its
, 0);
1956 static void __init
its_acpi_probe(void) { }
1959 int __init
its_init(struct fwnode_handle
*handle
, struct rdists
*rdists
,
1960 struct irq_domain
*parent_domain
)
1962 struct device_node
*of_node
;
1964 its_parent
= parent_domain
;
1965 of_node
= to_of_node(handle
);
1967 its_of_probe(of_node
);
1971 if (list_empty(&its_nodes
)) {
1972 pr_warn("ITS: No ITS available, not enabling LPIs\n");
1976 gic_rdists
= rdists
;
1977 return its_alloc_lpi_tables();