1 /* SPDX-License-Identifier: GPL-2.0-or-later */
3 * linux/drivers/net/ethernet/ibm/ehea/ehea_qmr.h
5 * eHEA ethernet device driver for IBM eServer System p
7 * (C) Copyright IBM Corp. 2006
10 * Christoph Raisch <raisch@de.ibm.com>
11 * Jan-Bernd Themann <themann@de.ibm.com>
12 * Thomas Klein <tklein@de.ibm.com>
15 #ifndef __EHEA_QMR_H__
16 #define __EHEA_QMR_H__
18 #include <linux/prefetch.h>
23 * page size of ehea hardware queues
26 #define EHEA_PAGESHIFT 12
27 #define EHEA_PAGESIZE (1UL << EHEA_PAGESHIFT)
28 #define EHEA_SECTSIZE (1UL << 24)
29 #define EHEA_PAGES_PER_SECTION (EHEA_SECTSIZE >> EHEA_PAGESHIFT)
30 #define EHEA_HUGEPAGESHIFT 34
31 #define EHEA_HUGEPAGE_SIZE (1UL << EHEA_HUGEPAGESHIFT)
32 #define EHEA_HUGEPAGE_PFN_MASK ((EHEA_HUGEPAGE_SIZE - 1) >> PAGE_SHIFT)
34 #if ((1UL << SECTION_SIZE_BITS) < EHEA_SECTSIZE)
35 #error eHEA module cannot work if kernel sectionsize < ehea sectionsize
38 /* Some abbreviations used here:
40 * WQE - Work Queue Entry
41 * SWQE - Send Work Queue Entry
42 * RWQE - Receive Work Queue Entry
43 * CQE - Completion Queue Entry
44 * EQE - Event Queue Entry
48 /* Use of WR_ID field for EHEA */
49 #define EHEA_WR_ID_COUNT EHEA_BMASK_IBM(0, 19)
50 #define EHEA_WR_ID_TYPE EHEA_BMASK_IBM(20, 23)
51 #define EHEA_SWQE2_TYPE 0x1
52 #define EHEA_SWQE3_TYPE 0x2
53 #define EHEA_RWQE2_TYPE 0x3
54 #define EHEA_RWQE3_TYPE 0x4
55 #define EHEA_WR_ID_INDEX EHEA_BMASK_IBM(24, 47)
56 #define EHEA_WR_ID_REFILL EHEA_BMASK_IBM(48, 63)
58 struct ehea_vsgentry
{
64 /* maximum number of sg entries allowed in a WQE */
65 #define EHEA_MAX_WQE_SG_ENTRIES 252
66 #define SWQE2_MAX_IMM (0xD0 - 0x30)
67 #define SWQE3_MAX_IMM 224
69 /* tx control flags for swqe */
70 #define EHEA_SWQE_CRC 0x8000
71 #define EHEA_SWQE_IP_CHECKSUM 0x4000
72 #define EHEA_SWQE_TCP_CHECKSUM 0x2000
73 #define EHEA_SWQE_TSO 0x1000
74 #define EHEA_SWQE_SIGNALLED_COMPLETION 0x0800
75 #define EHEA_SWQE_VLAN_INSERT 0x0400
76 #define EHEA_SWQE_IMM_DATA_PRESENT 0x0200
77 #define EHEA_SWQE_DESCRIPTORS_PRESENT 0x0100
78 #define EHEA_SWQE_WRAP_CTL_REC 0x0080
79 #define EHEA_SWQE_WRAP_CTL_FORCE 0x0040
80 #define EHEA_SWQE_BIND 0x0020
81 #define EHEA_SWQE_PURGE 0x0010
83 /* sizeof(struct ehea_swqe) less the union */
84 #define SWQE_HEADER_SIZE 32
93 u8 immediate_data_length
;
98 u8 descriptors
; /* number of valid descriptors in WQE */
104 /* Send WQE Format 1 */
106 struct ehea_vsgentry sg_list
[EHEA_MAX_WQE_SG_ENTRIES
];
109 /* Send WQE Format 2 */
111 struct ehea_vsgentry sg_entry
;
113 u8 immediate_data
[SWQE2_MAX_IMM
];
115 struct ehea_vsgentry sg_list
[EHEA_MAX_WQE_SG_ENTRIES
-1];
116 } immdata_desc __packed
;
118 /* Send WQE Format 3 */
120 u8 immediate_data
[SWQE3_MAX_IMM
];
126 u64 wr_id
; /* work request ID */
132 struct ehea_vsgentry sg_list
[EHEA_MAX_WQE_SG_ENTRIES
];
135 #define EHEA_CQE_VLAN_TAG_XTRACT 0x0400
137 #define EHEA_CQE_TYPE_RQ 0x60
138 #define EHEA_CQE_STAT_ERR_MASK 0x700F
139 #define EHEA_CQE_STAT_FAT_ERR_MASK 0xF
140 #define EHEA_CQE_BLIND_CKSUM 0x8000
141 #define EHEA_CQE_STAT_ERR_TCP 0x4000
142 #define EHEA_CQE_STAT_ERR_IP 0x2000
143 #define EHEA_CQE_STAT_ERR_CRC 0x1000
145 /* Defines which bad send cqe stati lead to a port reset */
146 #define EHEA_CQE_STAT_RESET_MASK 0x0002
149 u64 wr_id
; /* work request ID from WQE */
154 u16 num_bytes_transfered
;
156 u16 inet_checksum_value
;
168 #define EHEA_EQE_VALID EHEA_BMASK_IBM(0, 0)
169 #define EHEA_EQE_IS_CQE EHEA_BMASK_IBM(1, 1)
170 #define EHEA_EQE_IDENTIFIER EHEA_BMASK_IBM(2, 7)
171 #define EHEA_EQE_QP_CQ_NUMBER EHEA_BMASK_IBM(8, 31)
172 #define EHEA_EQE_QP_TOKEN EHEA_BMASK_IBM(32, 63)
173 #define EHEA_EQE_CQ_TOKEN EHEA_BMASK_IBM(32, 63)
174 #define EHEA_EQE_KEY EHEA_BMASK_IBM(32, 63)
175 #define EHEA_EQE_PORT_NUMBER EHEA_BMASK_IBM(56, 63)
176 #define EHEA_EQE_EQ_NUMBER EHEA_BMASK_IBM(48, 63)
177 #define EHEA_EQE_SM_ID EHEA_BMASK_IBM(48, 63)
178 #define EHEA_EQE_SM_MECH_NUMBER EHEA_BMASK_IBM(48, 55)
179 #define EHEA_EQE_SM_PORT_NUMBER EHEA_BMASK_IBM(56, 63)
181 #define EHEA_AER_RESTYPE_QP 0x8
182 #define EHEA_AER_RESTYPE_CQ 0x4
183 #define EHEA_AER_RESTYPE_EQ 0x3
185 /* Defines which affiliated errors lead to a port reset */
186 #define EHEA_AER_RESET_MASK 0xFFFFFFFFFEFFFFFFULL
187 #define EHEA_AERR_RESET_MASK 0xFFFFFFFFFFFFFFFFULL
193 #define ERROR_DATA_LENGTH EHEA_BMASK_IBM(52, 63)
194 #define ERROR_DATA_TYPE EHEA_BMASK_IBM(0, 7)
196 static inline void *hw_qeit_calc(struct hw_queue
*queue
, u64 q_offset
)
198 struct ehea_page
*current_page
;
200 if (q_offset
>= queue
->queue_length
)
201 q_offset
-= queue
->queue_length
;
202 current_page
= (queue
->queue_pages
)[q_offset
>> EHEA_PAGESHIFT
];
203 return ¤t_page
->entries
[q_offset
& (EHEA_PAGESIZE
- 1)];
206 static inline void *hw_qeit_get(struct hw_queue
*queue
)
208 return hw_qeit_calc(queue
, queue
->current_q_offset
);
211 static inline void hw_qeit_inc(struct hw_queue
*queue
)
213 queue
->current_q_offset
+= queue
->qe_size
;
214 if (queue
->current_q_offset
>= queue
->queue_length
) {
215 queue
->current_q_offset
= 0;
216 /* toggle the valid flag */
217 queue
->toggle_state
= (~queue
->toggle_state
) & 1;
221 static inline void *hw_qeit_get_inc(struct hw_queue
*queue
)
223 void *retvalue
= hw_qeit_get(queue
);
228 static inline void *hw_qeit_get_inc_valid(struct hw_queue
*queue
)
230 struct ehea_cqe
*retvalue
= hw_qeit_get(queue
);
231 u8 valid
= retvalue
->valid
;
234 if ((valid
>> 7) == (queue
->toggle_state
& 1)) {
235 /* this is a good one */
237 pref
= hw_qeit_calc(queue
, queue
->current_q_offset
);
239 prefetch(pref
+ 128);
245 static inline void *hw_qeit_get_valid(struct hw_queue
*queue
)
247 struct ehea_cqe
*retvalue
= hw_qeit_get(queue
);
251 pref
= hw_qeit_calc(queue
, queue
->current_q_offset
);
253 prefetch(pref
+ 128);
254 prefetch(pref
+ 256);
255 valid
= retvalue
->valid
;
256 if (!((valid
>> 7) == (queue
->toggle_state
& 1)))
261 static inline void *hw_qeit_reset(struct hw_queue
*queue
)
263 queue
->current_q_offset
= 0;
264 return hw_qeit_get(queue
);
267 static inline void *hw_qeit_eq_get_inc(struct hw_queue
*queue
)
269 u64 last_entry_in_q
= queue
->queue_length
- queue
->qe_size
;
272 retvalue
= hw_qeit_get(queue
);
273 queue
->current_q_offset
+= queue
->qe_size
;
274 if (queue
->current_q_offset
> last_entry_in_q
) {
275 queue
->current_q_offset
= 0;
276 queue
->toggle_state
= (~queue
->toggle_state
) & 1;
281 static inline void *hw_eqit_eq_get_inc_valid(struct hw_queue
*queue
)
283 void *retvalue
= hw_qeit_get(queue
);
284 u32 qe
= *(u8
*)retvalue
;
285 if ((qe
>> 7) == (queue
->toggle_state
& 1))
286 hw_qeit_eq_get_inc(queue
);
292 static inline struct ehea_rwqe
*ehea_get_next_rwqe(struct ehea_qp
*qp
,
295 struct hw_queue
*queue
;
298 queue
= &qp
->hw_rqueue1
;
300 queue
= &qp
->hw_rqueue2
;
302 queue
= &qp
->hw_rqueue3
;
304 return hw_qeit_get_inc(queue
);
307 static inline struct ehea_swqe
*ehea_get_swqe(struct ehea_qp
*my_qp
,
310 struct hw_queue
*queue
= &my_qp
->hw_squeue
;
311 struct ehea_swqe
*wqe_p
;
313 *wqe_index
= (queue
->current_q_offset
) >> (7 + EHEA_SG_SQ
);
314 wqe_p
= hw_qeit_get_inc(&my_qp
->hw_squeue
);
319 static inline void ehea_post_swqe(struct ehea_qp
*my_qp
, struct ehea_swqe
*swqe
)
322 ehea_update_sqa(my_qp
, 1);
325 static inline struct ehea_cqe
*ehea_poll_rq1(struct ehea_qp
*qp
, int *wqe_index
)
327 struct hw_queue
*queue
= &qp
->hw_rqueue1
;
329 *wqe_index
= (queue
->current_q_offset
) >> (7 + EHEA_SG_RQ1
);
330 return hw_qeit_get_valid(queue
);
333 static inline void ehea_inc_cq(struct ehea_cq
*cq
)
335 hw_qeit_inc(&cq
->hw_queue
);
338 static inline void ehea_inc_rq1(struct ehea_qp
*qp
)
340 hw_qeit_inc(&qp
->hw_rqueue1
);
343 static inline struct ehea_cqe
*ehea_poll_cq(struct ehea_cq
*my_cq
)
345 return hw_qeit_get_valid(&my_cq
->hw_queue
);
348 #define EHEA_CQ_REGISTER_ORIG 0
349 #define EHEA_EQ_REGISTER_ORIG 0
352 EHEA_EQ
= 0, /* event queue */
353 EHEA_NEQ
/* notification event queue */
356 struct ehea_eq
*ehea_create_eq(struct ehea_adapter
*adapter
,
357 enum ehea_eq_type type
,
358 const u32 length
, const u8 eqe_gen
);
360 int ehea_destroy_eq(struct ehea_eq
*eq
);
362 struct ehea_eqe
*ehea_poll_eq(struct ehea_eq
*eq
);
364 struct ehea_cq
*ehea_create_cq(struct ehea_adapter
*adapter
, int cqe
,
365 u64 eq_handle
, u32 cq_token
);
367 int ehea_destroy_cq(struct ehea_cq
*cq
);
369 struct ehea_qp
*ehea_create_qp(struct ehea_adapter
*adapter
, u32 pd
,
370 struct ehea_qp_init_attr
*init_attr
);
372 int ehea_destroy_qp(struct ehea_qp
*qp
);
374 int ehea_reg_kernel_mr(struct ehea_adapter
*adapter
, struct ehea_mr
*mr
);
376 int ehea_gen_smr(struct ehea_adapter
*adapter
, struct ehea_mr
*old_mr
,
377 struct ehea_mr
*shared_mr
);
379 int ehea_rem_mr(struct ehea_mr
*mr
);
381 u64
ehea_error_data(struct ehea_adapter
*adapter
, u64 res_handle
,
382 u64
*aer
, u64
*aerr
);
384 int ehea_add_sect_bmap(unsigned long pfn
, unsigned long nr_pages
);
385 int ehea_rem_sect_bmap(unsigned long pfn
, unsigned long nr_pages
);
386 int ehea_create_busmap(void);
387 void ehea_destroy_busmap(void);
388 u64
ehea_map_vaddr(void *caddr
);
390 #endif /* __EHEA_QMR_H__ */