1 /* SPDX-License-Identifier: GPL-2.0-or-later */
3 * Copyright 2016,2017 IBM Corporation.
5 #ifndef __XIVE_INTERNAL_H
6 #define __XIVE_INTERNAL_H
8 /* Each CPU carry one of these with various per-CPU state */
11 /* HW irq number and data of IPI */
13 struct xive_irq_data ipi_data
;
14 #endif /* CONFIG_SMP */
18 /* Queue datas. Only one is populated */
19 #define XIVE_MAX_QUEUES 8
20 struct xive_q queue
[XIVE_MAX_QUEUES
];
23 * Pending mask. Each bit corresponds to a priority that
24 * potentially has pending interrupts.
28 /* Cache of HW CPPR */
34 int (*populate_irq_data
)(u32 hw_irq
, struct xive_irq_data
*data
);
35 int (*configure_irq
)(u32 hw_irq
, u32 target
, u8 prio
, u32 sw_irq
);
36 int (*get_irq_config
)(u32 hw_irq
, u32
*target
, u8
*prio
,
38 int (*setup_queue
)(unsigned int cpu
, struct xive_cpu
*xc
, u8 prio
);
39 void (*cleanup_queue
)(unsigned int cpu
, struct xive_cpu
*xc
, u8 prio
);
40 void (*setup_cpu
)(unsigned int cpu
, struct xive_cpu
*xc
);
41 void (*teardown_cpu
)(unsigned int cpu
, struct xive_cpu
*xc
);
42 bool (*match
)(struct device_node
*np
);
43 void (*shutdown
)(void);
45 void (*update_pending
)(struct xive_cpu
*xc
);
46 void (*eoi
)(u32 hw_irq
);
47 void (*sync_source
)(u32 hw_irq
);
48 u64 (*esb_rw
)(u32 hw_irq
, u32 offset
, u64 data
, bool write
);
50 int (*get_ipi
)(unsigned int cpu
, struct xive_cpu
*xc
);
51 void (*put_ipi
)(unsigned int cpu
, struct xive_cpu
*xc
);
56 bool xive_core_init(const struct xive_ops
*ops
, void __iomem
*area
, u32 offset
,
58 __be32
*xive_queue_page_alloc(unsigned int cpu
, u32 queue_shift
);
60 static inline u32
xive_alloc_order(u32 queue_shift
)
62 return (queue_shift
> PAGE_SHIFT
) ? (queue_shift
- PAGE_SHIFT
) : 0;
65 extern bool xive_cmdline_disabled
;
67 #endif /* __XIVE_INTERNAL_H */