1 /* SPDX-License-Identifier: GPL-2.0-or-later */
3 * linux/drivers/net/ethernet/ibm/ehea/ehea_hw.h
5 * eHEA ethernet device driver for IBM eServer System p
7 * (C) Copyright IBM Corp. 2006
10 * Christoph Raisch <raisch@de.ibm.com>
11 * Jan-Bernd Themann <themann@de.ibm.com>
12 * Thomas Klein <tklein@de.ibm.com>
18 #define QPX_SQA_VALUE EHEA_BMASK_IBM(48, 63)
19 #define QPX_RQ1A_VALUE EHEA_BMASK_IBM(48, 63)
20 #define QPX_RQ2A_VALUE EHEA_BMASK_IBM(48, 63)
21 #define QPX_RQ3A_VALUE EHEA_BMASK_IBM(48, 63)
23 #define QPTEMM_OFFSET(x) offsetof(struct ehea_qptemm, x)
37 u64 qpx_reserved1
[(0x098 - 0x058) / 8];
39 u64 qpx_reserved2
[(0x100 - 0x0A0) / 8];
43 u64 qpx_reserved3
[(0x140 - 0x118) / 8];
45 u64 qpx_reserved4
[(0x170 - 0x148) / 8];
47 u64 qpx_reserved5
[(0x1B0 - 0x178) / 8];
53 u64 qpx_reserved6
[(0x220 - 0x1D8) / 8];
55 u64 qpx_reserved7
[(0x240 - 0x228) / 8];
62 u64 qpx_reserved8
[(0x300 - 0x270) / 8];
78 u64 qpx_reserved9
[(0x400 - 0x378) / 8];
79 u64 reserved_ext
[(0x500 - 0x400) / 8];
80 u64 reserved2
[(0x1000 - 0x500) / 8];
83 #define MRx_HCR_LPARID_VALID EHEA_BMASK_IBM(0, 0)
85 #define MRMWMM_OFFSET(x) offsetof(struct ehea_mrmwmm, x)
96 u64 reserved4
[(0x200 - 0x40) / 8];
100 #define QPEDMM_OFFSET(x) offsetof(struct ehea_qpedmm, x)
104 u64 reserved0
[(0x400) / 8];
136 #define CQX_FECADDER EHEA_BMASK_IBM(32, 63)
137 #define CQX_FEC_CQE_CNT EHEA_BMASK_IBM(32, 63)
138 #define CQX_N1_GENERATE_COMP_EVENT EHEA_BMASK_IBM(0, 0)
139 #define CQX_EP_EVENT_PENDING EHEA_BMASK_IBM(0, 0)
141 #define CQTEMM_OFFSET(x) offsetof(struct ehea_cqtemm, x)
157 u64 reserved2
[(0x1000 - 0x60) / 8];
160 #define EQTEMM_OFFSET(x) offsetof(struct ehea_eqtemm, x)
179 * These access functions will be changed when the dissuccsion about
180 * the new access methods for POWER has settled.
183 static inline u64
epa_load(struct h_epa epa
, u32 offset
)
185 return __raw_readq((void __iomem
*)(epa
.addr
+ offset
));
188 static inline void epa_store(struct h_epa epa
, u32 offset
, u64 value
)
190 __raw_writeq(value
, (void __iomem
*)(epa
.addr
+ offset
));
191 epa_load(epa
, offset
); /* synchronize explicitly to eHEA */
194 static inline void epa_store_acc(struct h_epa epa
, u32 offset
, u64 value
)
196 __raw_writeq(value
, (void __iomem
*)(epa
.addr
+ offset
));
199 #define epa_store_cq(epa, offset, value)\
200 epa_store(epa, CQTEMM_OFFSET(offset), value)
201 #define epa_load_cq(epa, offset)\
202 epa_load(epa, CQTEMM_OFFSET(offset))
204 static inline void ehea_update_sqa(struct ehea_qp
*qp
, u16 nr_wqes
)
206 struct h_epa epa
= qp
->epas
.kernel
;
207 epa_store_acc(epa
, QPTEMM_OFFSET(qpx_sqa
),
208 EHEA_BMASK_SET(QPX_SQA_VALUE
, nr_wqes
));
211 static inline void ehea_update_rq3a(struct ehea_qp
*qp
, u16 nr_wqes
)
213 struct h_epa epa
= qp
->epas
.kernel
;
214 epa_store_acc(epa
, QPTEMM_OFFSET(qpx_rq3a
),
215 EHEA_BMASK_SET(QPX_RQ1A_VALUE
, nr_wqes
));
218 static inline void ehea_update_rq2a(struct ehea_qp
*qp
, u16 nr_wqes
)
220 struct h_epa epa
= qp
->epas
.kernel
;
221 epa_store_acc(epa
, QPTEMM_OFFSET(qpx_rq2a
),
222 EHEA_BMASK_SET(QPX_RQ2A_VALUE
, nr_wqes
));
225 static inline void ehea_update_rq1a(struct ehea_qp
*qp
, u16 nr_wqes
)
227 struct h_epa epa
= qp
->epas
.kernel
;
228 epa_store_acc(epa
, QPTEMM_OFFSET(qpx_rq1a
),
229 EHEA_BMASK_SET(QPX_RQ3A_VALUE
, nr_wqes
));
232 static inline void ehea_update_feca(struct ehea_cq
*cq
, u32 nr_cqes
)
234 struct h_epa epa
= cq
->epas
.kernel
;
235 epa_store_acc(epa
, CQTEMM_OFFSET(cqx_feca
),
236 EHEA_BMASK_SET(CQX_FECADDER
, nr_cqes
));
239 static inline void ehea_reset_cq_n1(struct ehea_cq
*cq
)
241 struct h_epa epa
= cq
->epas
.kernel
;
242 epa_store_cq(epa
, cqx_n1
,
243 EHEA_BMASK_SET(CQX_N1_GENERATE_COMP_EVENT
, 1));
246 static inline void ehea_reset_cq_ep(struct ehea_cq
*my_cq
)
248 struct h_epa epa
= my_cq
->epas
.kernel
;
249 epa_store_acc(epa
, CQTEMM_OFFSET(cqx_ep
),
250 EHEA_BMASK_SET(CQX_EP_EVENT_PENDING
, 0));
253 #endif /* __EHEA_HW_H__ */