WIP FPC-III support
[linux/fpc-iii.git] / drivers / net / ethernet / ibm / ehea / ehea_qmr.h
blob7c7cccd820f79eca5716cf27c8c0bfb5881bb783
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /*
3 * linux/drivers/net/ethernet/ibm/ehea/ehea_qmr.h
5 * eHEA ethernet device driver for IBM eServer System p
7 * (C) Copyright IBM Corp. 2006
9 * Authors:
10 * Christoph Raisch <raisch@de.ibm.com>
11 * Jan-Bernd Themann <themann@de.ibm.com>
12 * Thomas Klein <tklein@de.ibm.com>
15 #ifndef __EHEA_QMR_H__
16 #define __EHEA_QMR_H__
18 #include <linux/prefetch.h>
19 #include "ehea.h"
20 #include "ehea_hw.h"
23 * page size of ehea hardware queues
26 #define EHEA_PAGESHIFT 12
27 #define EHEA_PAGESIZE (1UL << EHEA_PAGESHIFT)
28 #define EHEA_SECTSIZE (1UL << 24)
29 #define EHEA_PAGES_PER_SECTION (EHEA_SECTSIZE >> EHEA_PAGESHIFT)
30 #define EHEA_HUGEPAGESHIFT 34
31 #define EHEA_HUGEPAGE_SIZE (1UL << EHEA_HUGEPAGESHIFT)
32 #define EHEA_HUGEPAGE_PFN_MASK ((EHEA_HUGEPAGE_SIZE - 1) >> PAGE_SHIFT)
34 #if ((1UL << SECTION_SIZE_BITS) < EHEA_SECTSIZE)
35 #error eHEA module cannot work if kernel sectionsize < ehea sectionsize
36 #endif
38 /* Some abbreviations used here:
40 * WQE - Work Queue Entry
41 * SWQE - Send Work Queue Entry
42 * RWQE - Receive Work Queue Entry
43 * CQE - Completion Queue Entry
44 * EQE - Event Queue Entry
45 * MR - Memory Region
48 /* Use of WR_ID field for EHEA */
49 #define EHEA_WR_ID_COUNT EHEA_BMASK_IBM(0, 19)
50 #define EHEA_WR_ID_TYPE EHEA_BMASK_IBM(20, 23)
51 #define EHEA_SWQE2_TYPE 0x1
52 #define EHEA_SWQE3_TYPE 0x2
53 #define EHEA_RWQE2_TYPE 0x3
54 #define EHEA_RWQE3_TYPE 0x4
55 #define EHEA_WR_ID_INDEX EHEA_BMASK_IBM(24, 47)
56 #define EHEA_WR_ID_REFILL EHEA_BMASK_IBM(48, 63)
58 struct ehea_vsgentry {
59 u64 vaddr;
60 u32 l_key;
61 u32 len;
64 /* maximum number of sg entries allowed in a WQE */
65 #define EHEA_MAX_WQE_SG_ENTRIES 252
66 #define SWQE2_MAX_IMM (0xD0 - 0x30)
67 #define SWQE3_MAX_IMM 224
69 /* tx control flags for swqe */
70 #define EHEA_SWQE_CRC 0x8000
71 #define EHEA_SWQE_IP_CHECKSUM 0x4000
72 #define EHEA_SWQE_TCP_CHECKSUM 0x2000
73 #define EHEA_SWQE_TSO 0x1000
74 #define EHEA_SWQE_SIGNALLED_COMPLETION 0x0800
75 #define EHEA_SWQE_VLAN_INSERT 0x0400
76 #define EHEA_SWQE_IMM_DATA_PRESENT 0x0200
77 #define EHEA_SWQE_DESCRIPTORS_PRESENT 0x0100
78 #define EHEA_SWQE_WRAP_CTL_REC 0x0080
79 #define EHEA_SWQE_WRAP_CTL_FORCE 0x0040
80 #define EHEA_SWQE_BIND 0x0020
81 #define EHEA_SWQE_PURGE 0x0010
83 /* sizeof(struct ehea_swqe) less the union */
84 #define SWQE_HEADER_SIZE 32
86 struct ehea_swqe {
87 u64 wr_id;
88 u16 tx_control;
89 u16 vlan_tag;
90 u8 reserved1;
91 u8 ip_start;
92 u8 ip_end;
93 u8 immediate_data_length;
94 u8 tcp_offset;
95 u8 reserved2;
96 u16 reserved2b;
97 u8 wrap_tag;
98 u8 descriptors; /* number of valid descriptors in WQE */
99 u16 reserved3;
100 u16 reserved4;
101 u16 mss;
102 u32 reserved5;
103 union {
104 /* Send WQE Format 1 */
105 struct {
106 struct ehea_vsgentry sg_list[EHEA_MAX_WQE_SG_ENTRIES];
107 } no_immediate_data;
109 /* Send WQE Format 2 */
110 struct {
111 struct ehea_vsgentry sg_entry;
112 /* 0x30 */
113 u8 immediate_data[SWQE2_MAX_IMM];
114 /* 0xd0 */
115 struct ehea_vsgentry sg_list[EHEA_MAX_WQE_SG_ENTRIES-1];
116 } immdata_desc __packed;
118 /* Send WQE Format 3 */
119 struct {
120 u8 immediate_data[SWQE3_MAX_IMM];
121 } immdata_nodesc;
122 } u;
125 struct ehea_rwqe {
126 u64 wr_id; /* work request ID */
127 u8 reserved1[5];
128 u8 data_segments;
129 u16 reserved2;
130 u64 reserved3;
131 u64 reserved4;
132 struct ehea_vsgentry sg_list[EHEA_MAX_WQE_SG_ENTRIES];
135 #define EHEA_CQE_VLAN_TAG_XTRACT 0x0400
137 #define EHEA_CQE_TYPE_RQ 0x60
138 #define EHEA_CQE_STAT_ERR_MASK 0x700F
139 #define EHEA_CQE_STAT_FAT_ERR_MASK 0xF
140 #define EHEA_CQE_BLIND_CKSUM 0x8000
141 #define EHEA_CQE_STAT_ERR_TCP 0x4000
142 #define EHEA_CQE_STAT_ERR_IP 0x2000
143 #define EHEA_CQE_STAT_ERR_CRC 0x1000
145 /* Defines which bad send cqe stati lead to a port reset */
146 #define EHEA_CQE_STAT_RESET_MASK 0x0002
148 struct ehea_cqe {
149 u64 wr_id; /* work request ID from WQE */
150 u8 type;
151 u8 valid;
152 u16 status;
153 u16 reserved1;
154 u16 num_bytes_transfered;
155 u16 vlan_tag;
156 u16 inet_checksum_value;
157 u8 reserved2;
158 u8 header_length;
159 u16 reserved3;
160 u16 page_offset;
161 u16 wqe_count;
162 u32 qp_token;
163 u32 timestamp;
164 u32 reserved4;
165 u64 reserved5[3];
168 #define EHEA_EQE_VALID EHEA_BMASK_IBM(0, 0)
169 #define EHEA_EQE_IS_CQE EHEA_BMASK_IBM(1, 1)
170 #define EHEA_EQE_IDENTIFIER EHEA_BMASK_IBM(2, 7)
171 #define EHEA_EQE_QP_CQ_NUMBER EHEA_BMASK_IBM(8, 31)
172 #define EHEA_EQE_QP_TOKEN EHEA_BMASK_IBM(32, 63)
173 #define EHEA_EQE_CQ_TOKEN EHEA_BMASK_IBM(32, 63)
174 #define EHEA_EQE_KEY EHEA_BMASK_IBM(32, 63)
175 #define EHEA_EQE_PORT_NUMBER EHEA_BMASK_IBM(56, 63)
176 #define EHEA_EQE_EQ_NUMBER EHEA_BMASK_IBM(48, 63)
177 #define EHEA_EQE_SM_ID EHEA_BMASK_IBM(48, 63)
178 #define EHEA_EQE_SM_MECH_NUMBER EHEA_BMASK_IBM(48, 55)
179 #define EHEA_EQE_SM_PORT_NUMBER EHEA_BMASK_IBM(56, 63)
181 #define EHEA_AER_RESTYPE_QP 0x8
182 #define EHEA_AER_RESTYPE_CQ 0x4
183 #define EHEA_AER_RESTYPE_EQ 0x3
185 /* Defines which affiliated errors lead to a port reset */
186 #define EHEA_AER_RESET_MASK 0xFFFFFFFFFEFFFFFFULL
187 #define EHEA_AERR_RESET_MASK 0xFFFFFFFFFFFFFFFFULL
189 struct ehea_eqe {
190 u64 entry;
193 #define ERROR_DATA_LENGTH EHEA_BMASK_IBM(52, 63)
194 #define ERROR_DATA_TYPE EHEA_BMASK_IBM(0, 7)
196 static inline void *hw_qeit_calc(struct hw_queue *queue, u64 q_offset)
198 struct ehea_page *current_page;
200 if (q_offset >= queue->queue_length)
201 q_offset -= queue->queue_length;
202 current_page = (queue->queue_pages)[q_offset >> EHEA_PAGESHIFT];
203 return &current_page->entries[q_offset & (EHEA_PAGESIZE - 1)];
206 static inline void *hw_qeit_get(struct hw_queue *queue)
208 return hw_qeit_calc(queue, queue->current_q_offset);
211 static inline void hw_qeit_inc(struct hw_queue *queue)
213 queue->current_q_offset += queue->qe_size;
214 if (queue->current_q_offset >= queue->queue_length) {
215 queue->current_q_offset = 0;
216 /* toggle the valid flag */
217 queue->toggle_state = (~queue->toggle_state) & 1;
221 static inline void *hw_qeit_get_inc(struct hw_queue *queue)
223 void *retvalue = hw_qeit_get(queue);
224 hw_qeit_inc(queue);
225 return retvalue;
228 static inline void *hw_qeit_get_inc_valid(struct hw_queue *queue)
230 struct ehea_cqe *retvalue = hw_qeit_get(queue);
231 u8 valid = retvalue->valid;
232 void *pref;
234 if ((valid >> 7) == (queue->toggle_state & 1)) {
235 /* this is a good one */
236 hw_qeit_inc(queue);
237 pref = hw_qeit_calc(queue, queue->current_q_offset);
238 prefetch(pref);
239 prefetch(pref + 128);
240 } else
241 retvalue = NULL;
242 return retvalue;
245 static inline void *hw_qeit_get_valid(struct hw_queue *queue)
247 struct ehea_cqe *retvalue = hw_qeit_get(queue);
248 void *pref;
249 u8 valid;
251 pref = hw_qeit_calc(queue, queue->current_q_offset);
252 prefetch(pref);
253 prefetch(pref + 128);
254 prefetch(pref + 256);
255 valid = retvalue->valid;
256 if (!((valid >> 7) == (queue->toggle_state & 1)))
257 retvalue = NULL;
258 return retvalue;
261 static inline void *hw_qeit_reset(struct hw_queue *queue)
263 queue->current_q_offset = 0;
264 return hw_qeit_get(queue);
267 static inline void *hw_qeit_eq_get_inc(struct hw_queue *queue)
269 u64 last_entry_in_q = queue->queue_length - queue->qe_size;
270 void *retvalue;
272 retvalue = hw_qeit_get(queue);
273 queue->current_q_offset += queue->qe_size;
274 if (queue->current_q_offset > last_entry_in_q) {
275 queue->current_q_offset = 0;
276 queue->toggle_state = (~queue->toggle_state) & 1;
278 return retvalue;
281 static inline void *hw_eqit_eq_get_inc_valid(struct hw_queue *queue)
283 void *retvalue = hw_qeit_get(queue);
284 u32 qe = *(u8 *)retvalue;
285 if ((qe >> 7) == (queue->toggle_state & 1))
286 hw_qeit_eq_get_inc(queue);
287 else
288 retvalue = NULL;
289 return retvalue;
292 static inline struct ehea_rwqe *ehea_get_next_rwqe(struct ehea_qp *qp,
293 int rq_nr)
295 struct hw_queue *queue;
297 if (rq_nr == 1)
298 queue = &qp->hw_rqueue1;
299 else if (rq_nr == 2)
300 queue = &qp->hw_rqueue2;
301 else
302 queue = &qp->hw_rqueue3;
304 return hw_qeit_get_inc(queue);
307 static inline struct ehea_swqe *ehea_get_swqe(struct ehea_qp *my_qp,
308 int *wqe_index)
310 struct hw_queue *queue = &my_qp->hw_squeue;
311 struct ehea_swqe *wqe_p;
313 *wqe_index = (queue->current_q_offset) >> (7 + EHEA_SG_SQ);
314 wqe_p = hw_qeit_get_inc(&my_qp->hw_squeue);
316 return wqe_p;
319 static inline void ehea_post_swqe(struct ehea_qp *my_qp, struct ehea_swqe *swqe)
321 iosync();
322 ehea_update_sqa(my_qp, 1);
325 static inline struct ehea_cqe *ehea_poll_rq1(struct ehea_qp *qp, int *wqe_index)
327 struct hw_queue *queue = &qp->hw_rqueue1;
329 *wqe_index = (queue->current_q_offset) >> (7 + EHEA_SG_RQ1);
330 return hw_qeit_get_valid(queue);
333 static inline void ehea_inc_cq(struct ehea_cq *cq)
335 hw_qeit_inc(&cq->hw_queue);
338 static inline void ehea_inc_rq1(struct ehea_qp *qp)
340 hw_qeit_inc(&qp->hw_rqueue1);
343 static inline struct ehea_cqe *ehea_poll_cq(struct ehea_cq *my_cq)
345 return hw_qeit_get_valid(&my_cq->hw_queue);
348 #define EHEA_CQ_REGISTER_ORIG 0
349 #define EHEA_EQ_REGISTER_ORIG 0
351 enum ehea_eq_type {
352 EHEA_EQ = 0, /* event queue */
353 EHEA_NEQ /* notification event queue */
356 struct ehea_eq *ehea_create_eq(struct ehea_adapter *adapter,
357 enum ehea_eq_type type,
358 const u32 length, const u8 eqe_gen);
360 int ehea_destroy_eq(struct ehea_eq *eq);
362 struct ehea_eqe *ehea_poll_eq(struct ehea_eq *eq);
364 struct ehea_cq *ehea_create_cq(struct ehea_adapter *adapter, int cqe,
365 u64 eq_handle, u32 cq_token);
367 int ehea_destroy_cq(struct ehea_cq *cq);
369 struct ehea_qp *ehea_create_qp(struct ehea_adapter *adapter, u32 pd,
370 struct ehea_qp_init_attr *init_attr);
372 int ehea_destroy_qp(struct ehea_qp *qp);
374 int ehea_reg_kernel_mr(struct ehea_adapter *adapter, struct ehea_mr *mr);
376 int ehea_gen_smr(struct ehea_adapter *adapter, struct ehea_mr *old_mr,
377 struct ehea_mr *shared_mr);
379 int ehea_rem_mr(struct ehea_mr *mr);
381 u64 ehea_error_data(struct ehea_adapter *adapter, u64 res_handle,
382 u64 *aer, u64 *aerr);
384 int ehea_add_sect_bmap(unsigned long pfn, unsigned long nr_pages);
385 int ehea_rem_sect_bmap(unsigned long pfn, unsigned long nr_pages);
386 int ehea_create_busmap(void);
387 void ehea_destroy_busmap(void);
388 u64 ehea_map_vaddr(void *caddr);
390 #endif /* __EHEA_QMR_H__ */