1 /**********************************************************************
4 * Contact: support@cavium.com
5 * Please include "LiquidIO" in the subject.
7 * Copyright (c) 2003-2016 Cavium, Inc.
9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, Version 2, as
11 * published by the Free Software Foundation.
13 * This file is distributed in the hope that it will be useful, but
14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16 * NONINFRINGEMENT. See the GNU General Public License for more
18 **********************************************************************/
20 /*! \file octeon_network.h
21 * \brief Host NIC Driver: Structure and Macro definitions used by NIC Module.
24 #ifndef __OCTEON_NETWORK_H__
25 #define __OCTEON_NETWORK_H__
26 #include <linux/ptp_clock_kernel.h>
28 #define LIO_MAX_MTU_SIZE (OCTNET_MAX_FRM_SIZE - OCTNET_FRM_HEADER_SIZE)
29 #define LIO_MIN_MTU_SIZE ETH_MIN_MTU
31 /* Bit mask values for lio->ifstate */
32 #define LIO_IFSTATE_DROQ_OPS 0x01
33 #define LIO_IFSTATE_REGISTERED 0x02
34 #define LIO_IFSTATE_RUNNING 0x04
35 #define LIO_IFSTATE_RX_TIMESTAMP_ENABLED 0x08
36 #define LIO_IFSTATE_RESETTING 0x10
38 struct liquidio_if_cfg_resp
{
40 struct liquidio_if_cfg_info cfg_info
;
44 #define LIO_IFCFG_WAIT_TIME 3000 /* In milli seconds */
45 #define LIQUIDIO_NDEV_STATS_POLL_TIME_MS 200
47 /* Structure of a node in list of gather components maintained by
48 * NIC driver for each network device.
50 struct octnic_gather
{
51 /* List manipulation. Next and prev pointers. */
52 struct list_head list
;
54 /* Size of the gather component at sg in bytes. */
57 /* Number of bytes that sg was adjusted to make it 8B-aligned. */
60 /* Gather component that can accommodate max sized fragment list
61 * received from the IP layer.
63 struct octeon_sg_entry
*sg
;
65 dma_addr_t sg_dma_ptr
;
68 struct oct_nic_stats_resp
{
70 struct oct_link_stats stats
;
74 struct oct_nic_vf_stats_resp
{
80 struct oct_nic_stats_ctrl
{
81 struct completion complete
;
82 struct net_device
*netdev
;
85 struct oct_nic_seapi_resp
{
94 /** LiquidIO per-interface network private data */
96 /** State of the interface. Rx/Tx happens only in the RUNNING state. */
99 /** Octeon Interface index number. This device will be represented as
100 * oct<ifidx> in the system.
104 /** Octeon Input queue to use to transmit for this network interface. */
107 /** Octeon Output queue from which pkts arrive
108 * for this network interface.
112 /** Guards each glist */
113 spinlock_t
*glist_lock
;
115 /** Array of gather component linked lists */
116 struct list_head
*glist
;
117 void **glists_virt_base
;
118 dma_addr_t
*glists_dma_base
;
119 u32 glist_entry_size
;
121 /** Pointer to the NIC properties for the Octeon device this network
122 * interface is associated with.
124 struct octdev_props
*octprops
;
126 /** Pointer to the octeon device structure. */
127 struct octeon_device
*oct_dev
;
129 struct net_device
*netdev
;
131 /** Link information sent by the core application for this interface. */
132 struct oct_link_info linfo
;
134 /** counter of link changes */
137 /** Size of Tx queue for this octeon device. */
140 /** Size of Rx queue for this octeon device. */
143 /** Size of MTU this octeon device. */
146 /** msg level flag per interface. */
149 /** Copy of Interface capabilities: TSO, TSO6, LRO, Chescksums . */
152 /* Copy of transmit encapsulation capabilities:
153 * TSO, TSO6, Checksums for this device for Kernel
156 u64 enc_dev_capability
;
158 /** Copy of beacaon reg in phy */
161 /** Copy of ctrl reg in phy */
164 /* PTP clock information */
165 struct ptp_clock_info ptp_info
;
166 struct ptp_clock
*ptp_clock
;
169 /* for atomic access to Octeon PTP reg and data struct */
175 /* work queue for txq status */
176 struct cavium_wq txq_status_wq
;
178 /* work queue for rxq oom status */
179 struct cavium_wq rxq_status_wq
[MAX_POSSIBLE_OCTEON_OUTPUT_QUEUES
];
181 /* work queue for link status */
182 struct cavium_wq link_status_wq
;
184 /* work queue to regularly send local time to octeon firmware */
185 struct cavium_wq sync_octeon_time_wq
;
188 struct cavium_wk stats_wk
;
191 #define LIO_SIZE (sizeof(struct lio))
192 #define GET_LIO(netdev) ((struct lio *)netdev_priv(netdev))
194 #define LIO_MAX_CORES 16
197 * \brief Enable or disable feature
198 * @param netdev pointer to network device
199 * @param cmd Command that just requires acknowledgment
200 * @param param1 Parameter to command
202 int liquidio_set_feature(struct net_device
*netdev
, int cmd
, u16 param1
);
204 int setup_rx_oom_poll_fn(struct net_device
*netdev
);
206 void cleanup_rx_oom_poll_fn(struct net_device
*netdev
);
209 * \brief Link control command completion callback
210 * @param nctrl_ptr pointer to control packet structure
212 * This routine is called by the callback function when a ctrl pkt sent to
213 * core app completes. The nctrl_ptr contains a copy of the command type
214 * and data sent to the core app. This routine is only called if the ctrl
215 * pkt was sent successfully to the core app.
217 void liquidio_link_ctrl_cmd_completion(void *nctrl_ptr
);
219 int liquidio_setup_io_queues(struct octeon_device
*octeon_dev
, int ifidx
,
220 u32 num_iqs
, u32 num_oqs
);
222 irqreturn_t
liquidio_msix_intr_handler(int irq
__attribute__((unused
)),
225 int octeon_setup_interrupt(struct octeon_device
*oct
, u32 num_ioqs
);
227 void lio_fetch_stats(struct work_struct
*work
);
229 int lio_wait_for_clean_oq(struct octeon_device
*oct
);
231 * \brief Register ethtool operations
232 * @param netdev pointer to network device
234 void liquidio_set_ethtool_ops(struct net_device
*netdev
);
236 void lio_delete_glists(struct lio
*lio
);
238 int lio_setup_glists(struct octeon_device
*oct
, struct lio
*lio
, int num_qs
);
240 int liquidio_get_speed(struct lio
*lio
);
241 int liquidio_set_speed(struct lio
*lio
, int speed
);
242 int liquidio_get_fec(struct lio
*lio
);
243 int liquidio_set_fec(struct lio
*lio
, int on_off
);
246 * \brief Net device change_mtu
247 * @param netdev network device
249 int liquidio_change_mtu(struct net_device
*netdev
, int new_mtu
);
250 #define LIO_CHANGE_MTU_SUCCESS 1
251 #define LIO_CHANGE_MTU_FAIL 2
253 #define SKB_ADJ_MASK 0x3F
254 #define SKB_ADJ (SKB_ADJ_MASK + 1)
256 #define MIN_SKB_SIZE 256 /* 8 bytes and more - 8 bytes for PTP */
257 #define LIO_RXBUFFER_SZ 2048
260 *recv_buffer_alloc(struct octeon_device
*oct
,
261 struct octeon_skb_page_info
*pg_info
)
265 struct octeon_skb_page_info
*skb_pg_info
;
267 page
= alloc_page(GFP_ATOMIC
);
271 skb
= dev_alloc_skb(MIN_SKB_SIZE
+ SKB_ADJ
);
272 if (unlikely(!skb
)) {
274 pg_info
->page
= NULL
;
278 if ((unsigned long)skb
->data
& SKB_ADJ_MASK
) {
279 u32 r
= SKB_ADJ
- ((unsigned long)skb
->data
& SKB_ADJ_MASK
);
284 skb_pg_info
= ((struct octeon_skb_page_info
*)(skb
->cb
));
286 pg_info
->dma
= dma_map_page(&oct
->pci_dev
->dev
, page
, 0,
287 PAGE_SIZE
, DMA_FROM_DEVICE
);
289 /* Mapping failed!! */
290 if (dma_mapping_error(&oct
->pci_dev
->dev
, pg_info
->dma
)) {
292 dev_kfree_skb_any((struct sk_buff
*)skb
);
293 pg_info
->page
= NULL
;
297 pg_info
->page
= page
;
298 pg_info
->page_offset
= 0;
299 skb_pg_info
->page
= page
;
300 skb_pg_info
->page_offset
= 0;
301 skb_pg_info
->dma
= pg_info
->dma
;
307 *recv_buffer_fast_alloc(u32 size
)
310 struct octeon_skb_page_info
*skb_pg_info
;
312 skb
= dev_alloc_skb(size
+ SKB_ADJ
);
316 if ((unsigned long)skb
->data
& SKB_ADJ_MASK
) {
317 u32 r
= SKB_ADJ
- ((unsigned long)skb
->data
& SKB_ADJ_MASK
);
322 skb_pg_info
= ((struct octeon_skb_page_info
*)(skb
->cb
));
323 skb_pg_info
->page
= NULL
;
324 skb_pg_info
->page_offset
= 0;
325 skb_pg_info
->dma
= 0;
331 recv_buffer_recycle(struct octeon_device
*oct
, void *buf
)
333 struct octeon_skb_page_info
*pg_info
= buf
;
335 if (!pg_info
->page
) {
336 dev_err(&oct
->pci_dev
->dev
, "%s: pg_info->page NULL\n",
341 if (unlikely(page_count(pg_info
->page
) != 1) ||
342 unlikely(page_to_nid(pg_info
->page
) != numa_node_id())) {
343 dma_unmap_page(&oct
->pci_dev
->dev
,
344 pg_info
->dma
, (PAGE_SIZE
<< 0),
347 pg_info
->page
= NULL
;
348 pg_info
->page_offset
= 0;
352 /* Flip to other half of the buffer */
353 if (pg_info
->page_offset
== 0)
354 pg_info
->page_offset
= LIO_RXBUFFER_SZ
;
356 pg_info
->page_offset
= 0;
357 page_ref_inc(pg_info
->page
);
363 *recv_buffer_reuse(struct octeon_device
*oct
, void *buf
)
365 struct octeon_skb_page_info
*pg_info
= buf
, *skb_pg_info
;
368 skb
= dev_alloc_skb(MIN_SKB_SIZE
+ SKB_ADJ
);
369 if (unlikely(!skb
)) {
370 dma_unmap_page(&oct
->pci_dev
->dev
,
371 pg_info
->dma
, (PAGE_SIZE
<< 0),
376 if ((unsigned long)skb
->data
& SKB_ADJ_MASK
) {
377 u32 r
= SKB_ADJ
- ((unsigned long)skb
->data
& SKB_ADJ_MASK
);
382 skb_pg_info
= ((struct octeon_skb_page_info
*)(skb
->cb
));
383 skb_pg_info
->page
= pg_info
->page
;
384 skb_pg_info
->page_offset
= pg_info
->page_offset
;
385 skb_pg_info
->dma
= pg_info
->dma
;
391 recv_buffer_destroy(void *buffer
, struct octeon_skb_page_info
*pg_info
)
393 struct sk_buff
*skb
= (struct sk_buff
*)buffer
;
395 put_page(pg_info
->page
);
397 pg_info
->page
= NULL
;
398 pg_info
->page_offset
= 0;
401 dev_kfree_skb_any(skb
);
404 static inline void recv_buffer_free(void *buffer
)
406 struct sk_buff
*skb
= (struct sk_buff
*)buffer
;
407 struct octeon_skb_page_info
*pg_info
;
409 pg_info
= ((struct octeon_skb_page_info
*)(skb
->cb
));
412 put_page(pg_info
->page
);
414 pg_info
->page
= NULL
;
415 pg_info
->page_offset
= 0;
418 dev_kfree_skb_any((struct sk_buff
*)buffer
);
422 recv_buffer_fast_free(void *buffer
)
424 dev_kfree_skb_any((struct sk_buff
*)buffer
);
427 static inline void tx_buffer_free(void *buffer
)
429 dev_kfree_skb_any((struct sk_buff
*)buffer
);
432 #define lio_dma_alloc(oct, size, dma_addr) \
433 dma_alloc_coherent(&(oct)->pci_dev->dev, size, dma_addr, GFP_KERNEL)
434 #define lio_dma_free(oct, size, virt_addr, dma_addr) \
435 dma_free_coherent(&(oct)->pci_dev->dev, size, virt_addr, dma_addr)
438 void *get_rbd(struct sk_buff
*skb
)
440 struct octeon_skb_page_info
*pg_info
;
443 pg_info
= ((struct octeon_skb_page_info
*)(skb
->cb
));
444 va
= page_address(pg_info
->page
) + pg_info
->page_offset
;
450 lio_map_ring(void *buf
)
454 struct sk_buff
*skb
= (struct sk_buff
*)buf
;
455 struct octeon_skb_page_info
*pg_info
;
457 pg_info
= ((struct octeon_skb_page_info
*)(skb
->cb
));
458 if (!pg_info
->page
) {
459 pr_err("%s: pg_info->page NULL\n", __func__
);
464 dma_addr
= pg_info
->dma
;
466 pr_err("%s: ERROR it should be already available\n",
470 dma_addr
+= pg_info
->page_offset
;
472 return (u64
)dma_addr
;
476 lio_unmap_ring(struct pci_dev
*pci_dev
,
480 dma_unmap_page(&pci_dev
->dev
,
481 buf_ptr
, (PAGE_SIZE
<< 0),
485 static inline void *octeon_fast_packet_alloc(u32 size
)
487 return recv_buffer_fast_alloc(size
);
490 static inline void octeon_fast_packet_next(struct octeon_droq
*droq
,
491 struct sk_buff
*nicbuf
,
495 skb_put_data(nicbuf
, get_rbd(droq
->recv_buf_list
[idx
].buffer
),
500 * \brief check interface state
501 * @param lio per-network private data
502 * @param state_flag flag state to check
504 static inline int ifstate_check(struct lio
*lio
, int state_flag
)
506 return atomic_read(&lio
->ifstate
) & state_flag
;
510 * \brief set interface state
511 * @param lio per-network private data
512 * @param state_flag flag state to set
514 static inline void ifstate_set(struct lio
*lio
, int state_flag
)
516 atomic_set(&lio
->ifstate
, (atomic_read(&lio
->ifstate
) | state_flag
));
520 * \brief clear interface state
521 * @param lio per-network private data
522 * @param state_flag flag state to clear
524 static inline void ifstate_reset(struct lio
*lio
, int state_flag
)
526 atomic_set(&lio
->ifstate
, (atomic_read(&lio
->ifstate
) & ~(state_flag
)));
530 * \brief wait for all pending requests to complete
531 * @param oct Pointer to Octeon device
533 * Called during shutdown sequence
535 static inline int wait_for_pending_requests(struct octeon_device
*oct
)
539 for (i
= 0; i
< MAX_IO_PENDING_PKT_COUNT
; i
++) {
540 pcount
= atomic_read(
541 &oct
->response_list
[OCTEON_ORDERED_SC_LIST
]
544 schedule_timeout_uninterruptible(HZ
/ 10);
556 * \brief Stop Tx queues
557 * @param netdev network device
559 static inline void stop_txqs(struct net_device
*netdev
)
563 for (i
= 0; i
< netdev
->real_num_tx_queues
; i
++)
564 netif_stop_subqueue(netdev
, i
);
568 * \brief Wake Tx queues
569 * @param netdev network device
571 static inline void wake_txqs(struct net_device
*netdev
)
573 struct lio
*lio
= GET_LIO(netdev
);
576 for (i
= 0; i
< netdev
->real_num_tx_queues
; i
++) {
577 qno
= lio
->linfo
.txpciq
[i
% lio
->oct_dev
->num_iqs
].s
.q_no
;
579 if (__netif_subqueue_stopped(netdev
, i
)) {
580 INCR_INSTRQUEUE_PKT_COUNT(lio
->oct_dev
, qno
,
582 netif_wake_subqueue(netdev
, i
);
588 * \brief Start Tx queues
589 * @param netdev network device
591 static inline void start_txqs(struct net_device
*netdev
)
593 struct lio
*lio
= GET_LIO(netdev
);
596 if (lio
->linfo
.link
.s
.link_up
) {
597 for (i
= 0; i
< netdev
->real_num_tx_queues
; i
++)
598 netif_start_subqueue(netdev
, i
);
602 static inline int skb_iq(struct octeon_device
*oct
, struct sk_buff
*skb
)
604 return skb
->queue_mapping
% oct
->num_iqs
;
608 * Remove the node at the head of the list. The list would be empty at
609 * the end of this call if there are no more nodes in the list.
611 static inline struct list_head
*lio_list_delete_head(struct list_head
*root
)
613 struct list_head
*node
;
615 if (root
->prev
== root
&& root
->next
== root
)