drm/virtio: Add drm_panic support
[drm/drm-misc.git] / drivers / net / ethernet / ibm / ehea / ehea.h
blob208c440a602ba7e64f73e6ab344a92b34dd6b95a
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /*
3 * linux/drivers/net/ethernet/ibm/ehea/ehea.h
5 * eHEA ethernet device driver for IBM eServer System p
7 * (C) Copyright IBM Corp. 2006
9 * Authors:
10 * Christoph Raisch <raisch@de.ibm.com>
11 * Jan-Bernd Themann <themann@de.ibm.com>
12 * Thomas Klein <tklein@de.ibm.com>
15 #ifndef __EHEA_H__
16 #define __EHEA_H__
18 #include <linux/module.h>
19 #include <linux/ethtool.h>
20 #include <linux/vmalloc.h>
21 #include <linux/if_vlan.h>
22 #include <linux/platform_device.h>
24 #include <asm/ibmebus.h>
25 #include <asm/io.h>
27 #define DRV_NAME "ehea"
28 #define DRV_VERSION "EHEA_0107"
30 /* eHEA capability flags */
31 #define DLPAR_PORT_ADD_REM 1
32 #define DLPAR_MEM_ADD 2
33 #define DLPAR_MEM_REM 4
34 #define EHEA_CAPABILITIES (DLPAR_PORT_ADD_REM | DLPAR_MEM_ADD | DLPAR_MEM_REM)
36 #define EHEA_MSG_DEFAULT (NETIF_MSG_LINK | NETIF_MSG_TIMER \
37 | NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
39 #define EHEA_MAX_ENTRIES_RQ1 32767
40 #define EHEA_MAX_ENTRIES_RQ2 16383
41 #define EHEA_MAX_ENTRIES_RQ3 16383
42 #define EHEA_MAX_ENTRIES_SQ 32767
43 #define EHEA_MIN_ENTRIES_QP 127
45 #define EHEA_SMALL_QUEUES
47 #ifdef EHEA_SMALL_QUEUES
48 #define EHEA_MAX_CQE_COUNT 1023
49 #define EHEA_DEF_ENTRIES_SQ 1023
50 #define EHEA_DEF_ENTRIES_RQ1 1023
51 #define EHEA_DEF_ENTRIES_RQ2 1023
52 #define EHEA_DEF_ENTRIES_RQ3 511
53 #else
54 #define EHEA_MAX_CQE_COUNT 4080
55 #define EHEA_DEF_ENTRIES_SQ 4080
56 #define EHEA_DEF_ENTRIES_RQ1 8160
57 #define EHEA_DEF_ENTRIES_RQ2 2040
58 #define EHEA_DEF_ENTRIES_RQ3 2040
59 #endif
61 #define EHEA_MAX_ENTRIES_EQ 20
63 #define EHEA_SG_SQ 2
64 #define EHEA_SG_RQ1 1
65 #define EHEA_SG_RQ2 0
66 #define EHEA_SG_RQ3 0
68 #define EHEA_MAX_PACKET_SIZE 9022 /* for jumbo frames */
69 #define EHEA_RQ2_PKT_SIZE 2048
70 #define EHEA_L_PKT_SIZE 256 /* low latency */
72 /* Send completion signaling */
74 /* Protection Domain Identifier */
75 #define EHEA_PD_ID 0xaabcdeff
77 #define EHEA_RQ2_THRESHOLD 1
78 #define EHEA_RQ3_THRESHOLD 4 /* use RQ3 threshold of 2048 bytes */
80 #define EHEA_SPEED_10G 10000
81 #define EHEA_SPEED_1G 1000
82 #define EHEA_SPEED_100M 100
83 #define EHEA_SPEED_10M 10
84 #define EHEA_SPEED_AUTONEG 0
86 /* Broadcast/Multicast registration types */
87 #define EHEA_BCMC_SCOPE_ALL 0x08
88 #define EHEA_BCMC_SCOPE_SINGLE 0x00
89 #define EHEA_BCMC_MULTICAST 0x04
90 #define EHEA_BCMC_BROADCAST 0x00
91 #define EHEA_BCMC_UNTAGGED 0x02
92 #define EHEA_BCMC_TAGGED 0x00
93 #define EHEA_BCMC_VLANID_ALL 0x01
94 #define EHEA_BCMC_VLANID_SINGLE 0x00
96 #define EHEA_CACHE_LINE 128
98 /* Memory Regions */
99 #define EHEA_MR_ACC_CTRL 0x00800000
101 #define EHEA_BUSMAP_START 0x8000000000000000ULL
102 #define EHEA_INVAL_ADDR 0xFFFFFFFFFFFFFFFFULL
103 #define EHEA_DIR_INDEX_SHIFT 13 /* 8k Entries in 64k block */
104 #define EHEA_TOP_INDEX_SHIFT (EHEA_DIR_INDEX_SHIFT * 2)
105 #define EHEA_MAP_ENTRIES (1 << EHEA_DIR_INDEX_SHIFT)
106 #define EHEA_MAP_SIZE (0x10000) /* currently fixed map size */
107 #define EHEA_INDEX_MASK (EHEA_MAP_ENTRIES - 1)
110 #define EHEA_WATCH_DOG_TIMEOUT 10*HZ
112 /* utility functions */
114 void ehea_dump(void *adr, int len, char *msg);
116 #define EHEA_BMASK(pos, length) (((pos) << 16) + (length))
118 #define EHEA_BMASK_IBM(from, to) (((63 - to) << 16) + ((to) - (from) + 1))
120 #define EHEA_BMASK_SHIFTPOS(mask) (((mask) >> 16) & 0xffff)
122 #define EHEA_BMASK_MASK(mask) \
123 (0xffffffffffffffffULL >> ((64 - (mask)) & 0xffff))
125 #define EHEA_BMASK_SET(mask, value) \
126 ((EHEA_BMASK_MASK(mask) & ((u64)(value))) << EHEA_BMASK_SHIFTPOS(mask))
128 #define EHEA_BMASK_GET(mask, value) \
129 (EHEA_BMASK_MASK(mask) & (((u64)(value)) >> EHEA_BMASK_SHIFTPOS(mask)))
132 * Generic ehea page
134 struct ehea_page {
135 u8 entries[PAGE_SIZE];
139 * Generic queue in linux kernel virtual memory
141 struct hw_queue {
142 u64 current_q_offset; /* current queue entry */
143 struct ehea_page **queue_pages; /* array of pages belonging to queue */
144 u32 qe_size; /* queue entry size */
145 u32 queue_length; /* queue length allocated in bytes */
146 u32 pagesize;
147 u32 toggle_state; /* toggle flag - per page */
148 u32 reserved; /* 64 bit alignment */
152 * For pSeries this is a 64bit memory address where
153 * I/O memory is mapped into CPU address space
155 struct h_epa {
156 void __iomem *addr;
159 struct h_epa_user {
160 u64 addr;
163 struct h_epas {
164 struct h_epa kernel; /* kernel space accessible resource,
165 set to 0 if unused */
166 struct h_epa_user user; /* user space accessible resource
167 set to 0 if unused */
171 * Memory map data structures
173 struct ehea_dir_bmap
175 u64 ent[EHEA_MAP_ENTRIES];
177 struct ehea_top_bmap
179 struct ehea_dir_bmap *dir[EHEA_MAP_ENTRIES];
181 struct ehea_bmap
183 struct ehea_top_bmap *top[EHEA_MAP_ENTRIES];
186 struct ehea_qp;
187 struct ehea_cq;
188 struct ehea_eq;
189 struct ehea_port;
190 struct ehea_av;
193 * Queue attributes passed to ehea_create_qp()
195 struct ehea_qp_init_attr {
196 /* input parameter */
197 u32 qp_token; /* queue token */
198 u8 low_lat_rq1;
199 u8 signalingtype; /* cqe generation flag */
200 u8 rq_count; /* num of receive queues */
201 u8 eqe_gen; /* eqe generation flag */
202 u16 max_nr_send_wqes; /* max number of send wqes */
203 u16 max_nr_rwqes_rq1; /* max number of receive wqes */
204 u16 max_nr_rwqes_rq2;
205 u16 max_nr_rwqes_rq3;
206 u8 wqe_size_enc_sq;
207 u8 wqe_size_enc_rq1;
208 u8 wqe_size_enc_rq2;
209 u8 wqe_size_enc_rq3;
210 u8 swqe_imm_data_len; /* immediate data length for swqes */
211 u16 port_nr;
212 u16 rq2_threshold;
213 u16 rq3_threshold;
214 u64 send_cq_handle;
215 u64 recv_cq_handle;
216 u64 aff_eq_handle;
218 /* output parameter */
219 u32 qp_nr;
220 u16 act_nr_send_wqes;
221 u16 act_nr_rwqes_rq1;
222 u16 act_nr_rwqes_rq2;
223 u16 act_nr_rwqes_rq3;
224 u8 act_wqe_size_enc_sq;
225 u8 act_wqe_size_enc_rq1;
226 u8 act_wqe_size_enc_rq2;
227 u8 act_wqe_size_enc_rq3;
228 u32 nr_sq_pages;
229 u32 nr_rq1_pages;
230 u32 nr_rq2_pages;
231 u32 nr_rq3_pages;
232 u32 liobn_sq;
233 u32 liobn_rq1;
234 u32 liobn_rq2;
235 u32 liobn_rq3;
239 * Event Queue attributes, passed as parameter
241 struct ehea_eq_attr {
242 u32 type;
243 u32 max_nr_of_eqes;
244 u8 eqe_gen; /* generate eqe flag */
245 u64 eq_handle;
246 u32 act_nr_of_eqes;
247 u32 nr_pages;
248 u32 ist1; /* Interrupt service token */
249 u32 ist2;
250 u32 ist3;
251 u32 ist4;
256 * Event Queue
258 struct ehea_eq {
259 struct ehea_adapter *adapter;
260 struct hw_queue hw_queue;
261 u64 fw_handle;
262 struct h_epas epas;
263 spinlock_t spinlock;
264 struct ehea_eq_attr attr;
268 * HEA Queues
270 struct ehea_qp {
271 struct ehea_adapter *adapter;
272 u64 fw_handle; /* QP handle for firmware calls */
273 struct hw_queue hw_squeue;
274 struct hw_queue hw_rqueue1;
275 struct hw_queue hw_rqueue2;
276 struct hw_queue hw_rqueue3;
277 struct h_epas epas;
278 struct ehea_qp_init_attr init_attr;
282 * Completion Queue attributes
284 struct ehea_cq_attr {
285 /* input parameter */
286 u32 max_nr_of_cqes;
287 u32 cq_token;
288 u64 eq_handle;
290 /* output parameter */
291 u32 act_nr_of_cqes;
292 u32 nr_pages;
296 * Completion Queue
298 struct ehea_cq {
299 struct ehea_adapter *adapter;
300 u64 fw_handle;
301 struct hw_queue hw_queue;
302 struct h_epas epas;
303 struct ehea_cq_attr attr;
307 * Memory Region
309 struct ehea_mr {
310 struct ehea_adapter *adapter;
311 u64 handle;
312 u64 vaddr;
313 u32 lkey;
317 * Port state information
319 struct port_stats {
320 int poll_receive_errors;
321 int queue_stopped;
322 int err_tcp_cksum;
323 int err_ip_cksum;
324 int err_frame_crc;
327 #define EHEA_IRQ_NAME_SIZE 20
330 * Queue SKB Array
332 struct ehea_q_skb_arr {
333 struct sk_buff **arr; /* skb array for queue */
334 int len; /* array length */
335 int index; /* array index */
336 int os_skbs; /* rq2/rq3 only: outstanding skbs */
340 * Port resources
342 struct ehea_port_res {
343 struct napi_struct napi;
344 struct port_stats p_stats;
345 struct ehea_mr send_mr; /* send memory region */
346 struct ehea_mr recv_mr; /* receive memory region */
347 struct ehea_port *port;
348 char int_recv_name[EHEA_IRQ_NAME_SIZE];
349 char int_send_name[EHEA_IRQ_NAME_SIZE];
350 struct ehea_qp *qp;
351 struct ehea_cq *send_cq;
352 struct ehea_cq *recv_cq;
353 struct ehea_eq *eq;
354 struct ehea_q_skb_arr rq1_skba;
355 struct ehea_q_skb_arr rq2_skba;
356 struct ehea_q_skb_arr rq3_skba;
357 struct ehea_q_skb_arr sq_skba;
358 int sq_skba_size;
359 int swqe_refill_th;
360 atomic_t swqe_avail;
361 int swqe_ll_count;
362 u32 swqe_id_counter;
363 u64 tx_packets;
364 u64 tx_bytes;
365 u64 rx_packets;
366 u64 rx_bytes;
367 int sq_restart_flag;
371 #define EHEA_MAX_PORTS 16
373 #define EHEA_NUM_PORTRES_FW_HANDLES 6 /* QP handle, SendCQ handle,
374 RecvCQ handle, EQ handle,
375 SendMR handle, RecvMR handle */
376 #define EHEA_NUM_PORT_FW_HANDLES 1 /* EQ handle */
377 #define EHEA_NUM_ADAPTER_FW_HANDLES 2 /* MR handle, NEQ handle */
379 struct ehea_adapter {
380 u64 handle;
381 struct platform_device *ofdev;
382 struct ehea_port *port[EHEA_MAX_PORTS];
383 struct ehea_eq *neq; /* notification event queue */
384 struct tasklet_struct neq_tasklet;
385 struct ehea_mr mr;
386 u32 pd; /* protection domain */
387 u64 max_mc_mac; /* max number of multicast mac addresses */
388 int active_ports;
389 struct list_head list;
393 struct ehea_mc_list {
394 struct list_head list;
395 u64 macaddr;
398 /* kdump support */
399 struct ehea_fw_handle_entry {
400 u64 adh; /* Adapter Handle */
401 u64 fwh; /* Firmware Handle */
404 struct ehea_fw_handle_array {
405 struct ehea_fw_handle_entry *arr;
406 int num_entries;
407 struct mutex lock;
410 struct ehea_bcmc_reg_entry {
411 u64 adh; /* Adapter Handle */
412 u32 port_id; /* Logical Port Id */
413 u8 reg_type; /* Registration Type */
414 u64 macaddr;
417 struct ehea_bcmc_reg_array {
418 struct ehea_bcmc_reg_entry *arr;
419 int num_entries;
420 spinlock_t lock;
423 #define EHEA_PORT_UP 1
424 #define EHEA_PORT_DOWN 0
425 #define EHEA_PHY_LINK_UP 1
426 #define EHEA_PHY_LINK_DOWN 0
427 #define EHEA_MAX_PORT_RES 16
428 struct ehea_port {
429 struct ehea_adapter *adapter; /* adapter that owns this port */
430 struct net_device *netdev;
431 struct rtnl_link_stats64 stats;
432 struct ehea_port_res port_res[EHEA_MAX_PORT_RES];
433 struct platform_device ofdev; /* Open Firmware Device */
434 struct ehea_mc_list *mc_list; /* Multicast MAC addresses */
435 struct ehea_eq *qp_eq;
436 struct work_struct reset_task;
437 struct delayed_work stats_work;
438 struct mutex port_lock;
439 char int_aff_name[EHEA_IRQ_NAME_SIZE];
440 int allmulti; /* Indicates IFF_ALLMULTI state */
441 int promisc; /* Indicates IFF_PROMISC state */
442 int num_mcs;
443 int resets;
444 unsigned long flags;
445 u64 mac_addr;
446 u32 logical_port_id;
447 u32 port_speed;
448 u32 msg_enable;
449 u32 sig_comp_iv;
450 u32 state;
451 u8 phy_link;
452 u8 full_duplex;
453 u8 autoneg;
454 u8 num_def_qps;
455 wait_queue_head_t swqe_avail_wq;
456 wait_queue_head_t restart_wq;
459 struct port_res_cfg {
460 int max_entries_rcq;
461 int max_entries_scq;
462 int max_entries_sq;
463 int max_entries_rq1;
464 int max_entries_rq2;
465 int max_entries_rq3;
468 enum ehea_flag_bits {
469 __EHEA_STOP_XFER,
470 __EHEA_DISABLE_PORT_RESET
473 void ehea_set_ethtool_ops(struct net_device *netdev);
474 int ehea_sense_port_attr(struct ehea_port *port);
475 int ehea_set_portspeed(struct ehea_port *port, u32 port_speed);
477 #endif /* __EHEA_H__ */