1 /**********************************************************************
4 * Contact: support@cavium.com
5 * Please include "LiquidIO" in the subject.
7 * Copyright (c) 2003-2016 Cavium, Inc.
9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, Version 2, as
11 * published by the Free Software Foundation.
13 * This file is distributed in the hope that it will be useful, but
14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16 * NONINFRINGEMENT. See the GNU General Public License for more
18 **********************************************************************/
20 /*! \file octeon_network.h
21 * \brief Host NIC Driver: Structure and Macro definitions used by NIC Module.
24 #ifndef __OCTEON_NETWORK_H__
25 #define __OCTEON_NETWORK_H__
26 #include <linux/ptp_clock_kernel.h>
28 #define LIO_MAX_MTU_SIZE (OCTNET_MAX_FRM_SIZE - OCTNET_FRM_HEADER_SIZE)
29 #define LIO_MIN_MTU_SIZE ETH_MIN_MTU
31 /* Bit mask values for lio->ifstate */
32 #define LIO_IFSTATE_DROQ_OPS 0x01
33 #define LIO_IFSTATE_REGISTERED 0x02
34 #define LIO_IFSTATE_RUNNING 0x04
35 #define LIO_IFSTATE_RX_TIMESTAMP_ENABLED 0x08
36 #define LIO_IFSTATE_RESETTING 0x10
38 struct oct_nic_stats_resp
{
40 struct oct_link_stats stats
;
44 struct oct_nic_stats_ctrl
{
45 struct completion complete
;
46 struct net_device
*netdev
;
49 /** LiquidIO per-interface network private data */
51 /** State of the interface. Rx/Tx happens only in the RUNNING state. */
54 /** Octeon Interface index number. This device will be represented as
55 * oct<ifidx> in the system.
59 /** Octeon Input queue to use to transmit for this network interface. */
62 /** Octeon Output queue from which pkts arrive
63 * for this network interface.
67 /** Guards each glist */
68 spinlock_t
*glist_lock
;
70 /** Array of gather component linked lists */
71 struct list_head
*glist
;
72 void **glists_virt_base
;
73 dma_addr_t
*glists_dma_base
;
76 /** Pointer to the NIC properties for the Octeon device this network
77 * interface is associated with.
79 struct octdev_props
*octprops
;
81 /** Pointer to the octeon device structure. */
82 struct octeon_device
*oct_dev
;
84 struct net_device
*netdev
;
86 /** Link information sent by the core application for this interface. */
87 struct oct_link_info linfo
;
89 /** counter of link changes */
92 /** Size of Tx queue for this octeon device. */
95 /** Size of Rx queue for this octeon device. */
98 /** Size of MTU this octeon device. */
101 /** msg level flag per interface. */
104 /** Copy of Interface capabilities: TSO, TSO6, LRO, Chescksums . */
107 /* Copy of transmit encapsulation capabilities:
108 * TSO, TSO6, Checksums for this device for Kernel
111 u64 enc_dev_capability
;
113 /** Copy of beacaon reg in phy */
116 /** Copy of ctrl reg in phy */
119 /* PTP clock information */
120 struct ptp_clock_info ptp_info
;
121 struct ptp_clock
*ptp_clock
;
124 /* for atomic access to Octeon PTP reg and data struct */
130 /* work queue for txq status */
131 struct cavium_wq txq_status_wq
;
133 /* work queue for rxq oom status */
134 struct cavium_wq rxq_status_wq
;
136 /* work queue for link status */
137 struct cavium_wq link_status_wq
;
139 /* work queue to regularly send local time to octeon firmware */
140 struct cavium_wq sync_octeon_time_wq
;
145 #define LIO_SIZE (sizeof(struct lio))
146 #define GET_LIO(netdev) ((struct lio *)netdev_priv(netdev))
148 #define LIO_MAX_CORES 12
151 * \brief Enable or disable feature
152 * @param netdev pointer to network device
153 * @param cmd Command that just requires acknowledgment
154 * @param param1 Parameter to command
156 int liquidio_set_feature(struct net_device
*netdev
, int cmd
, u16 param1
);
158 int setup_rx_oom_poll_fn(struct net_device
*netdev
);
160 void cleanup_rx_oom_poll_fn(struct net_device
*netdev
);
163 * \brief Link control command completion callback
164 * @param nctrl_ptr pointer to control packet structure
166 * This routine is called by the callback function when a ctrl pkt sent to
167 * core app completes. The nctrl_ptr contains a copy of the command type
168 * and data sent to the core app. This routine is only called if the ctrl
169 * pkt was sent successfully to the core app.
171 void liquidio_link_ctrl_cmd_completion(void *nctrl_ptr
);
173 int liquidio_setup_io_queues(struct octeon_device
*octeon_dev
, int ifidx
,
174 u32 num_iqs
, u32 num_oqs
);
176 irqreturn_t
liquidio_msix_intr_handler(int irq
__attribute__((unused
)),
179 int octeon_setup_interrupt(struct octeon_device
*oct
, u32 num_ioqs
);
182 * \brief Register ethtool operations
183 * @param netdev pointer to network device
185 void liquidio_set_ethtool_ops(struct net_device
*netdev
);
187 #define SKB_ADJ_MASK 0x3F
188 #define SKB_ADJ (SKB_ADJ_MASK + 1)
190 #define MIN_SKB_SIZE 256 /* 8 bytes and more - 8 bytes for PTP */
191 #define LIO_RXBUFFER_SZ 2048
194 *recv_buffer_alloc(struct octeon_device
*oct
,
195 struct octeon_skb_page_info
*pg_info
)
199 struct octeon_skb_page_info
*skb_pg_info
;
201 page
= alloc_page(GFP_ATOMIC
);
205 skb
= dev_alloc_skb(MIN_SKB_SIZE
+ SKB_ADJ
);
206 if (unlikely(!skb
)) {
208 pg_info
->page
= NULL
;
212 if ((unsigned long)skb
->data
& SKB_ADJ_MASK
) {
213 u32 r
= SKB_ADJ
- ((unsigned long)skb
->data
& SKB_ADJ_MASK
);
218 skb_pg_info
= ((struct octeon_skb_page_info
*)(skb
->cb
));
220 pg_info
->dma
= dma_map_page(&oct
->pci_dev
->dev
, page
, 0,
221 PAGE_SIZE
, DMA_FROM_DEVICE
);
223 /* Mapping failed!! */
224 if (dma_mapping_error(&oct
->pci_dev
->dev
, pg_info
->dma
)) {
226 dev_kfree_skb_any((struct sk_buff
*)skb
);
227 pg_info
->page
= NULL
;
231 pg_info
->page
= page
;
232 pg_info
->page_offset
= 0;
233 skb_pg_info
->page
= page
;
234 skb_pg_info
->page_offset
= 0;
235 skb_pg_info
->dma
= pg_info
->dma
;
241 *recv_buffer_fast_alloc(u32 size
)
244 struct octeon_skb_page_info
*skb_pg_info
;
246 skb
= dev_alloc_skb(size
+ SKB_ADJ
);
250 if ((unsigned long)skb
->data
& SKB_ADJ_MASK
) {
251 u32 r
= SKB_ADJ
- ((unsigned long)skb
->data
& SKB_ADJ_MASK
);
256 skb_pg_info
= ((struct octeon_skb_page_info
*)(skb
->cb
));
257 skb_pg_info
->page
= NULL
;
258 skb_pg_info
->page_offset
= 0;
259 skb_pg_info
->dma
= 0;
265 recv_buffer_recycle(struct octeon_device
*oct
, void *buf
)
267 struct octeon_skb_page_info
*pg_info
= buf
;
269 if (!pg_info
->page
) {
270 dev_err(&oct
->pci_dev
->dev
, "%s: pg_info->page NULL\n",
275 if (unlikely(page_count(pg_info
->page
) != 1) ||
276 unlikely(page_to_nid(pg_info
->page
) != numa_node_id())) {
277 dma_unmap_page(&oct
->pci_dev
->dev
,
278 pg_info
->dma
, (PAGE_SIZE
<< 0),
281 pg_info
->page
= NULL
;
282 pg_info
->page_offset
= 0;
286 /* Flip to other half of the buffer */
287 if (pg_info
->page_offset
== 0)
288 pg_info
->page_offset
= LIO_RXBUFFER_SZ
;
290 pg_info
->page_offset
= 0;
291 page_ref_inc(pg_info
->page
);
297 *recv_buffer_reuse(struct octeon_device
*oct
, void *buf
)
299 struct octeon_skb_page_info
*pg_info
= buf
, *skb_pg_info
;
302 skb
= dev_alloc_skb(MIN_SKB_SIZE
+ SKB_ADJ
);
303 if (unlikely(!skb
)) {
304 dma_unmap_page(&oct
->pci_dev
->dev
,
305 pg_info
->dma
, (PAGE_SIZE
<< 0),
310 if ((unsigned long)skb
->data
& SKB_ADJ_MASK
) {
311 u32 r
= SKB_ADJ
- ((unsigned long)skb
->data
& SKB_ADJ_MASK
);
316 skb_pg_info
= ((struct octeon_skb_page_info
*)(skb
->cb
));
317 skb_pg_info
->page
= pg_info
->page
;
318 skb_pg_info
->page_offset
= pg_info
->page_offset
;
319 skb_pg_info
->dma
= pg_info
->dma
;
325 recv_buffer_destroy(void *buffer
, struct octeon_skb_page_info
*pg_info
)
327 struct sk_buff
*skb
= (struct sk_buff
*)buffer
;
329 put_page(pg_info
->page
);
331 pg_info
->page
= NULL
;
332 pg_info
->page_offset
= 0;
335 dev_kfree_skb_any(skb
);
338 static inline void recv_buffer_free(void *buffer
)
340 struct sk_buff
*skb
= (struct sk_buff
*)buffer
;
341 struct octeon_skb_page_info
*pg_info
;
343 pg_info
= ((struct octeon_skb_page_info
*)(skb
->cb
));
346 put_page(pg_info
->page
);
348 pg_info
->page
= NULL
;
349 pg_info
->page_offset
= 0;
352 dev_kfree_skb_any((struct sk_buff
*)buffer
);
356 recv_buffer_fast_free(void *buffer
)
358 dev_kfree_skb_any((struct sk_buff
*)buffer
);
361 static inline void tx_buffer_free(void *buffer
)
363 dev_kfree_skb_any((struct sk_buff
*)buffer
);
366 #define lio_dma_alloc(oct, size, dma_addr) \
367 dma_alloc_coherent(&(oct)->pci_dev->dev, size, dma_addr, GFP_KERNEL)
368 #define lio_dma_free(oct, size, virt_addr, dma_addr) \
369 dma_free_coherent(&(oct)->pci_dev->dev, size, virt_addr, dma_addr)
372 void *get_rbd(struct sk_buff
*skb
)
374 struct octeon_skb_page_info
*pg_info
;
377 pg_info
= ((struct octeon_skb_page_info
*)(skb
->cb
));
378 va
= page_address(pg_info
->page
) + pg_info
->page_offset
;
384 lio_map_ring(void *buf
)
388 struct sk_buff
*skb
= (struct sk_buff
*)buf
;
389 struct octeon_skb_page_info
*pg_info
;
391 pg_info
= ((struct octeon_skb_page_info
*)(skb
->cb
));
392 if (!pg_info
->page
) {
393 pr_err("%s: pg_info->page NULL\n", __func__
);
398 dma_addr
= pg_info
->dma
;
400 pr_err("%s: ERROR it should be already available\n",
404 dma_addr
+= pg_info
->page_offset
;
406 return (u64
)dma_addr
;
410 lio_unmap_ring(struct pci_dev
*pci_dev
,
414 dma_unmap_page(&pci_dev
->dev
,
415 buf_ptr
, (PAGE_SIZE
<< 0),
419 static inline void *octeon_fast_packet_alloc(u32 size
)
421 return recv_buffer_fast_alloc(size
);
424 static inline void octeon_fast_packet_next(struct octeon_droq
*droq
,
425 struct sk_buff
*nicbuf
,
429 skb_put_data(nicbuf
, get_rbd(droq
->recv_buf_list
[idx
].buffer
),
434 * \brief check interface state
435 * @param lio per-network private data
436 * @param state_flag flag state to check
438 static inline int ifstate_check(struct lio
*lio
, int state_flag
)
440 return atomic_read(&lio
->ifstate
) & state_flag
;
444 * \brief set interface state
445 * @param lio per-network private data
446 * @param state_flag flag state to set
448 static inline void ifstate_set(struct lio
*lio
, int state_flag
)
450 atomic_set(&lio
->ifstate
, (atomic_read(&lio
->ifstate
) | state_flag
));
454 * \brief clear interface state
455 * @param lio per-network private data
456 * @param state_flag flag state to clear
458 static inline void ifstate_reset(struct lio
*lio
, int state_flag
)
460 atomic_set(&lio
->ifstate
, (atomic_read(&lio
->ifstate
) & ~(state_flag
)));
464 * \brief wait for all pending requests to complete
465 * @param oct Pointer to Octeon device
467 * Called during shutdown sequence
469 static inline int wait_for_pending_requests(struct octeon_device
*oct
)
473 for (i
= 0; i
< MAX_IO_PENDING_PKT_COUNT
; i
++) {
474 pcount
= atomic_read(
475 &oct
->response_list
[OCTEON_ORDERED_SC_LIST
]
478 schedule_timeout_uninterruptible(HZ
/ 10);