DaVinci: EDMA: Add queue 2 and 3 for DM365 and DM6467
[linux-ginger.git] / drivers / net / ehea / ehea_phyp.h
blobf3628c803567674c4b5650a9d71389b5aea0290e
1 /*
2 * linux/drivers/net/ehea/ehea_phyp.h
4 * eHEA ethernet device driver for IBM eServer System p
6 * (C) Copyright IBM Corp. 2006
8 * Authors:
9 * Christoph Raisch <raisch@de.ibm.com>
10 * Jan-Bernd Themann <themann@de.ibm.com>
11 * Thomas Klein <tklein@de.ibm.com>
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by
16 * the Free Software Foundation; either version 2, or (at your option)
17 * any later version.
19 * This program is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * GNU General Public License for more details.
24 * You should have received a copy of the GNU General Public License
25 * along with this program; if not, write to the Free Software
26 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
29 #ifndef __EHEA_PHYP_H__
30 #define __EHEA_PHYP_H__
32 #include <linux/delay.h>
33 #include <asm/hvcall.h>
34 #include "ehea.h"
35 #include "ehea_hw.h"
36 #include "ehea_hcall.h"
38 /* Some abbreviations used here:
40 * hcp_* - structures, variables and functions releated to Hypervisor Calls
43 static inline u32 get_longbusy_msecs(int long_busy_ret_code)
45 switch (long_busy_ret_code) {
46 case H_LONG_BUSY_ORDER_1_MSEC:
47 return 1;
48 case H_LONG_BUSY_ORDER_10_MSEC:
49 return 10;
50 case H_LONG_BUSY_ORDER_100_MSEC:
51 return 100;
52 case H_LONG_BUSY_ORDER_1_SEC:
53 return 1000;
54 case H_LONG_BUSY_ORDER_10_SEC:
55 return 10000;
56 case H_LONG_BUSY_ORDER_100_SEC:
57 return 100000;
58 default:
59 return 1;
63 /* Number of pages which can be registered at once by H_REGISTER_HEA_RPAGES */
64 #define EHEA_MAX_RPAGE 512
66 /* Notification Event Queue (NEQ) Entry bit masks */
67 #define NEQE_EVENT_CODE EHEA_BMASK_IBM(2, 7)
68 #define NEQE_PORTNUM EHEA_BMASK_IBM(32, 47)
69 #define NEQE_PORT_UP EHEA_BMASK_IBM(16, 16)
70 #define NEQE_EXTSWITCH_PORT_UP EHEA_BMASK_IBM(17, 17)
71 #define NEQE_EXTSWITCH_PRIMARY EHEA_BMASK_IBM(18, 18)
72 #define NEQE_PLID EHEA_BMASK_IBM(16, 47)
74 /* Notification Event Codes */
75 #define EHEA_EC_PORTSTATE_CHG 0x30
76 #define EHEA_EC_ADAPTER_MALFUNC 0x32
77 #define EHEA_EC_PORT_MALFUNC 0x33
79 /* Notification Event Log Register (NELR) bit masks */
80 #define NELR_PORT_MALFUNC EHEA_BMASK_IBM(61, 61)
81 #define NELR_ADAPTER_MALFUNC EHEA_BMASK_IBM(62, 62)
82 #define NELR_PORTSTATE_CHG EHEA_BMASK_IBM(63, 63)
84 static inline void hcp_epas_ctor(struct h_epas *epas, u64 paddr_kernel,
85 u64 paddr_user)
87 /* To support 64k pages we must round to 64k page boundary */
88 epas->kernel.addr = ioremap((paddr_kernel & PAGE_MASK), PAGE_SIZE) +
89 (paddr_kernel & ~PAGE_MASK);
90 epas->user.addr = paddr_user;
93 static inline void hcp_epas_dtor(struct h_epas *epas)
95 if (epas->kernel.addr)
96 iounmap((void __iomem *)((u64)epas->kernel.addr & PAGE_MASK));
98 epas->user.addr = 0;
99 epas->kernel.addr = 0;
102 struct hcp_modify_qp_cb0 {
103 u64 qp_ctl_reg; /* 00 */
104 u32 max_swqe; /* 02 */
105 u32 max_rwqe; /* 03 */
106 u32 port_nb; /* 04 */
107 u32 reserved0; /* 05 */
108 u64 qp_aer; /* 06 */
109 u64 qp_tenure; /* 08 */
112 /* Hcall Query/Modify Queue Pair Control Block 0 Selection Mask Bits */
113 #define H_QPCB0_ALL EHEA_BMASK_IBM(0, 5)
114 #define H_QPCB0_QP_CTL_REG EHEA_BMASK_IBM(0, 0)
115 #define H_QPCB0_MAX_SWQE EHEA_BMASK_IBM(1, 1)
116 #define H_QPCB0_MAX_RWQE EHEA_BMASK_IBM(2, 2)
117 #define H_QPCB0_PORT_NB EHEA_BMASK_IBM(3, 3)
118 #define H_QPCB0_QP_AER EHEA_BMASK_IBM(4, 4)
119 #define H_QPCB0_QP_TENURE EHEA_BMASK_IBM(5, 5)
121 /* Queue Pair Control Register Status Bits */
122 #define H_QP_CR_ENABLED 0x8000000000000000ULL /* QP enabled */
123 /* QP States: */
124 #define H_QP_CR_STATE_RESET 0x0000010000000000ULL /* Reset */
125 #define H_QP_CR_STATE_INITIALIZED 0x0000020000000000ULL /* Initialized */
126 #define H_QP_CR_STATE_RDY2RCV 0x0000030000000000ULL /* Ready to recv */
127 #define H_QP_CR_STATE_RDY2SND 0x0000050000000000ULL /* Ready to send */
128 #define H_QP_CR_STATE_ERROR 0x0000800000000000ULL /* Error */
129 #define H_QP_CR_RES_STATE 0x0000007F00000000ULL /* Resultant state */
131 struct hcp_modify_qp_cb1 {
132 u32 qpn; /* 00 */
133 u32 qp_asyn_ev_eq_nb; /* 01 */
134 u64 sq_cq_handle; /* 02 */
135 u64 rq_cq_handle; /* 04 */
136 /* sgel = scatter gather element */
137 u32 sgel_nb_sq; /* 06 */
138 u32 sgel_nb_rq1; /* 07 */
139 u32 sgel_nb_rq2; /* 08 */
140 u32 sgel_nb_rq3; /* 09 */
143 /* Hcall Query/Modify Queue Pair Control Block 1 Selection Mask Bits */
144 #define H_QPCB1_ALL EHEA_BMASK_IBM(0, 7)
145 #define H_QPCB1_QPN EHEA_BMASK_IBM(0, 0)
146 #define H_QPCB1_ASYN_EV_EQ_NB EHEA_BMASK_IBM(1, 1)
147 #define H_QPCB1_SQ_CQ_HANDLE EHEA_BMASK_IBM(2, 2)
148 #define H_QPCB1_RQ_CQ_HANDLE EHEA_BMASK_IBM(3, 3)
149 #define H_QPCB1_SGEL_NB_SQ EHEA_BMASK_IBM(4, 4)
150 #define H_QPCB1_SGEL_NB_RQ1 EHEA_BMASK_IBM(5, 5)
151 #define H_QPCB1_SGEL_NB_RQ2 EHEA_BMASK_IBM(6, 6)
152 #define H_QPCB1_SGEL_NB_RQ3 EHEA_BMASK_IBM(7, 7)
154 struct hcp_query_ehea {
155 u32 cur_num_qps; /* 00 */
156 u32 cur_num_cqs; /* 01 */
157 u32 cur_num_eqs; /* 02 */
158 u32 cur_num_mrs; /* 03 */
159 u32 auth_level; /* 04 */
160 u32 max_num_qps; /* 05 */
161 u32 max_num_cqs; /* 06 */
162 u32 max_num_eqs; /* 07 */
163 u32 max_num_mrs; /* 08 */
164 u32 reserved0; /* 09 */
165 u32 int_clock_freq; /* 10 */
166 u32 max_num_pds; /* 11 */
167 u32 max_num_addr_handles; /* 12 */
168 u32 max_num_cqes; /* 13 */
169 u32 max_num_wqes; /* 14 */
170 u32 max_num_sgel_rq1wqe; /* 15 */
171 u32 max_num_sgel_rq2wqe; /* 16 */
172 u32 max_num_sgel_rq3wqe; /* 17 */
173 u32 mr_page_size; /* 18 */
174 u32 reserved1; /* 19 */
175 u64 max_mr_size; /* 20 */
176 u64 reserved2; /* 22 */
177 u32 num_ports; /* 24 */
178 u32 reserved3; /* 25 */
179 u32 reserved4; /* 26 */
180 u32 reserved5; /* 27 */
181 u64 max_mc_mac; /* 28 */
182 u64 ehea_cap; /* 30 */
183 u32 max_isn_per_eq; /* 32 */
184 u32 max_num_neq; /* 33 */
185 u64 max_num_vlan_ids; /* 34 */
186 u32 max_num_port_group; /* 36 */
187 u32 max_num_phys_port; /* 37 */
191 /* Hcall Query/Modify Port Control Block defines */
192 #define H_PORT_CB0 0
193 #define H_PORT_CB1 1
194 #define H_PORT_CB2 2
195 #define H_PORT_CB3 3
196 #define H_PORT_CB4 4
197 #define H_PORT_CB5 5
198 #define H_PORT_CB6 6
199 #define H_PORT_CB7 7
201 struct hcp_ehea_port_cb0 {
202 u64 port_mac_addr;
203 u64 port_rc;
204 u64 reserved0;
205 u32 port_op_state;
206 u32 port_speed;
207 u32 ext_swport_op_state;
208 u32 neg_tpf_prpf;
209 u32 num_default_qps;
210 u32 reserved1;
211 u64 default_qpn_arr[16];
214 /* Hcall Query/Modify Port Control Block 0 Selection Mask Bits */
215 #define H_PORT_CB0_ALL EHEA_BMASK_IBM(0, 7) /* Set all bits */
216 #define H_PORT_CB0_MAC EHEA_BMASK_IBM(0, 0) /* MAC address */
217 #define H_PORT_CB0_PRC EHEA_BMASK_IBM(1, 1) /* Port Recv Control */
218 #define H_PORT_CB0_DEFQPNARRAY EHEA_BMASK_IBM(7, 7) /* Default QPN Array */
220 /* Hcall Query Port: Returned port speed values */
221 #define H_SPEED_10M_H 1 /* 10 Mbps, Half Duplex */
222 #define H_SPEED_10M_F 2 /* 10 Mbps, Full Duplex */
223 #define H_SPEED_100M_H 3 /* 100 Mbps, Half Duplex */
224 #define H_SPEED_100M_F 4 /* 100 Mbps, Full Duplex */
225 #define H_SPEED_1G_F 6 /* 1 Gbps, Full Duplex */
226 #define H_SPEED_10G_F 8 /* 10 Gbps, Full Duplex */
228 /* Port Receive Control Status Bits */
229 #define PXLY_RC_VALID EHEA_BMASK_IBM(49, 49)
230 #define PXLY_RC_VLAN_XTRACT EHEA_BMASK_IBM(50, 50)
231 #define PXLY_RC_TCP_6_TUPLE EHEA_BMASK_IBM(51, 51)
232 #define PXLY_RC_UDP_6_TUPLE EHEA_BMASK_IBM(52, 52)
233 #define PXLY_RC_TCP_3_TUPLE EHEA_BMASK_IBM(53, 53)
234 #define PXLY_RC_TCP_2_TUPLE EHEA_BMASK_IBM(54, 54)
235 #define PXLY_RC_LLC_SNAP EHEA_BMASK_IBM(55, 55)
236 #define PXLY_RC_JUMBO_FRAME EHEA_BMASK_IBM(56, 56)
237 #define PXLY_RC_FRAG_IP_PKT EHEA_BMASK_IBM(57, 57)
238 #define PXLY_RC_TCP_UDP_CHKSUM EHEA_BMASK_IBM(58, 58)
239 #define PXLY_RC_IP_CHKSUM EHEA_BMASK_IBM(59, 59)
240 #define PXLY_RC_MAC_FILTER EHEA_BMASK_IBM(60, 60)
241 #define PXLY_RC_UNTAG_FILTER EHEA_BMASK_IBM(61, 61)
242 #define PXLY_RC_VLAN_TAG_FILTER EHEA_BMASK_IBM(62, 63)
244 #define PXLY_RC_VLAN_FILTER 2
245 #define PXLY_RC_VLAN_PERM 0
248 #define H_PORT_CB1_ALL 0x8000000000000000ULL
250 struct hcp_ehea_port_cb1 {
251 u64 vlan_filter[64];
254 #define H_PORT_CB2_ALL 0xFFE0000000000000ULL
256 struct hcp_ehea_port_cb2 {
257 u64 rxo;
258 u64 rxucp;
259 u64 rxufd;
260 u64 rxuerr;
261 u64 rxftl;
262 u64 rxmcp;
263 u64 rxbcp;
264 u64 txo;
265 u64 txucp;
266 u64 txmcp;
267 u64 txbcp;
270 struct hcp_ehea_port_cb3 {
271 u64 vlan_bc_filter[64];
272 u64 vlan_mc_filter[64];
273 u64 vlan_un_filter[64];
274 u64 port_mac_hash_array[64];
277 #define H_PORT_CB4_ALL 0xF000000000000000ULL
278 #define H_PORT_CB4_JUMBO 0x1000000000000000ULL
279 #define H_PORT_CB4_SPEED 0x8000000000000000ULL
281 struct hcp_ehea_port_cb4 {
282 u32 port_speed;
283 u32 pause_frame;
284 u32 ens_port_op_state;
285 u32 jumbo_frame;
286 u32 ens_port_wrap;
289 /* Hcall Query/Modify Port Control Block 5 Selection Mask Bits */
290 #define H_PORT_CB5_RCU 0x0001000000000000ULL
291 #define PXS_RCU EHEA_BMASK_IBM(61, 63)
293 struct hcp_ehea_port_cb5 {
294 u64 prc; /* 00 */
295 u64 uaa; /* 01 */
296 u64 macvc; /* 02 */
297 u64 xpcsc; /* 03 */
298 u64 xpcsp; /* 04 */
299 u64 pcsid; /* 05 */
300 u64 xpcsst; /* 06 */
301 u64 pthlb; /* 07 */
302 u64 pthrb; /* 08 */
303 u64 pqu; /* 09 */
304 u64 pqd; /* 10 */
305 u64 prt; /* 11 */
306 u64 wsth; /* 12 */
307 u64 rcb; /* 13 */
308 u64 rcm; /* 14 */
309 u64 rcu; /* 15 */
310 u64 macc; /* 16 */
311 u64 pc; /* 17 */
312 u64 pst; /* 18 */
313 u64 ducqpn; /* 19 */
314 u64 mcqpn; /* 20 */
315 u64 mma; /* 21 */
316 u64 pmc0h; /* 22 */
317 u64 pmc0l; /* 23 */
318 u64 lbc; /* 24 */
321 #define H_PORT_CB6_ALL 0xFFFFFE7FFFFF8000ULL
323 struct hcp_ehea_port_cb6 {
324 u64 rxo; /* 00 */
325 u64 rx64; /* 01 */
326 u64 rx65; /* 02 */
327 u64 rx128; /* 03 */
328 u64 rx256; /* 04 */
329 u64 rx512; /* 05 */
330 u64 rx1024; /* 06 */
331 u64 rxbfcs; /* 07 */
332 u64 rxime; /* 08 */
333 u64 rxrle; /* 09 */
334 u64 rxorle; /* 10 */
335 u64 rxftl; /* 11 */
336 u64 rxjab; /* 12 */
337 u64 rxse; /* 13 */
338 u64 rxce; /* 14 */
339 u64 rxrf; /* 15 */
340 u64 rxfrag; /* 16 */
341 u64 rxuoc; /* 17 */
342 u64 rxcpf; /* 18 */
343 u64 rxsb; /* 19 */
344 u64 rxfd; /* 20 */
345 u64 rxoerr; /* 21 */
346 u64 rxaln; /* 22 */
347 u64 ducqpn; /* 23 */
348 u64 reserved0; /* 24 */
349 u64 rxmcp; /* 25 */
350 u64 rxbcp; /* 26 */
351 u64 txmcp; /* 27 */
352 u64 txbcp; /* 28 */
353 u64 txo; /* 29 */
354 u64 tx64; /* 30 */
355 u64 tx65; /* 31 */
356 u64 tx128; /* 32 */
357 u64 tx256; /* 33 */
358 u64 tx512; /* 34 */
359 u64 tx1024; /* 35 */
360 u64 txbfcs; /* 36 */
361 u64 txcpf; /* 37 */
362 u64 txlf; /* 38 */
363 u64 txrf; /* 39 */
364 u64 txime; /* 40 */
365 u64 txsc; /* 41 */
366 u64 txmc; /* 42 */
367 u64 txsqe; /* 43 */
368 u64 txdef; /* 44 */
369 u64 txlcol; /* 45 */
370 u64 txexcol; /* 46 */
371 u64 txcse; /* 47 */
372 u64 txbor; /* 48 */
375 #define H_PORT_CB7_DUCQPN 0x8000000000000000ULL
377 struct hcp_ehea_port_cb7 {
378 u64 def_uc_qpn;
381 u64 ehea_h_query_ehea_qp(const u64 adapter_handle,
382 const u8 qp_category,
383 const u64 qp_handle, const u64 sel_mask,
384 void *cb_addr);
386 u64 ehea_h_modify_ehea_qp(const u64 adapter_handle,
387 const u8 cat,
388 const u64 qp_handle,
389 const u64 sel_mask,
390 void *cb_addr,
391 u64 *inv_attr_id,
392 u64 *proc_mask, u16 *out_swr, u16 *out_rwr);
394 u64 ehea_h_alloc_resource_eq(const u64 adapter_handle,
395 struct ehea_eq_attr *eq_attr, u64 *eq_handle);
397 u64 ehea_h_alloc_resource_cq(const u64 adapter_handle,
398 struct ehea_cq_attr *cq_attr,
399 u64 *cq_handle, struct h_epas *epas);
401 u64 ehea_h_alloc_resource_qp(const u64 adapter_handle,
402 struct ehea_qp_init_attr *init_attr,
403 const u32 pd,
404 u64 *qp_handle, struct h_epas *h_epas);
406 #define H_REG_RPAGE_PAGE_SIZE EHEA_BMASK_IBM(48, 55)
407 #define H_REG_RPAGE_QT EHEA_BMASK_IBM(62, 63)
409 u64 ehea_h_register_rpage(const u64 adapter_handle,
410 const u8 pagesize,
411 const u8 queue_type,
412 const u64 resource_handle,
413 const u64 log_pageaddr, u64 count);
415 #define H_DISABLE_GET_EHEA_WQE_P 1
416 #define H_DISABLE_GET_SQ_WQE_P 2
417 #define H_DISABLE_GET_RQC 3
419 u64 ehea_h_disable_and_get_hea(const u64 adapter_handle, const u64 qp_handle);
421 #define FORCE_FREE 1
422 #define NORMAL_FREE 0
424 u64 ehea_h_free_resource(const u64 adapter_handle, const u64 res_handle,
425 u64 force_bit);
427 u64 ehea_h_alloc_resource_mr(const u64 adapter_handle, const u64 vaddr,
428 const u64 length, const u32 access_ctrl,
429 const u32 pd, u64 *mr_handle, u32 *lkey);
431 u64 ehea_h_register_rpage_mr(const u64 adapter_handle, const u64 mr_handle,
432 const u8 pagesize, const u8 queue_type,
433 const u64 log_pageaddr, const u64 count);
435 u64 ehea_h_register_smr(const u64 adapter_handle, const u64 orig_mr_handle,
436 const u64 vaddr_in, const u32 access_ctrl, const u32 pd,
437 struct ehea_mr *mr);
439 u64 ehea_h_query_ehea(const u64 adapter_handle, void *cb_addr);
441 /* output param R5 */
442 #define H_MEHEAPORT_CAT EHEA_BMASK_IBM(40, 47)
443 #define H_MEHEAPORT_PN EHEA_BMASK_IBM(48, 63)
445 u64 ehea_h_query_ehea_port(const u64 adapter_handle, const u16 port_num,
446 const u8 cb_cat, const u64 select_mask,
447 void *cb_addr);
449 u64 ehea_h_modify_ehea_port(const u64 adapter_handle, const u16 port_num,
450 const u8 cb_cat, const u64 select_mask,
451 void *cb_addr);
453 #define H_REGBCMC_PN EHEA_BMASK_IBM(48, 63)
454 #define H_REGBCMC_REGTYPE EHEA_BMASK_IBM(61, 63)
455 #define H_REGBCMC_MACADDR EHEA_BMASK_IBM(16, 63)
456 #define H_REGBCMC_VLANID EHEA_BMASK_IBM(52, 63)
458 u64 ehea_h_reg_dereg_bcmc(const u64 adapter_handle, const u16 port_num,
459 const u8 reg_type, const u64 mc_mac_addr,
460 const u16 vlan_id, const u32 hcall_id);
462 u64 ehea_h_reset_events(const u64 adapter_handle, const u64 neq_handle,
463 const u64 event_mask);
465 u64 ehea_h_error_data(const u64 adapter_handle, const u64 ressource_handle,
466 void *rblock);
468 #endif /* __EHEA_PHYP_H__ */