staging: rtl8188eu: Replace function name in string with __func__
[linux/fpc-iii.git] / drivers / net / ethernet / cavium / liquidio / octeon_network.h
blobf2d1a076a038a3bd6b6e730acfb93cc7ac3ffa4a
1 /**********************************************************************
2 * Author: Cavium, Inc.
4 * Contact: support@cavium.com
5 * Please include "LiquidIO" in the subject.
7 * Copyright (c) 2003-2016 Cavium, Inc.
9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, Version 2, as
11 * published by the Free Software Foundation.
13 * This file is distributed in the hope that it will be useful, but
14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16 * NONINFRINGEMENT. See the GNU General Public License for more
17 * details.
18 **********************************************************************/
20 /*! \file octeon_network.h
21 * \brief Host NIC Driver: Structure and Macro definitions used by NIC Module.
24 #ifndef __OCTEON_NETWORK_H__
25 #define __OCTEON_NETWORK_H__
26 #include <linux/ptp_clock_kernel.h>
28 #define LIO_MAX_MTU_SIZE (OCTNET_MAX_FRM_SIZE - OCTNET_FRM_HEADER_SIZE)
29 #define LIO_MIN_MTU_SIZE ETH_MIN_MTU
31 /* Bit mask values for lio->ifstate */
32 #define LIO_IFSTATE_DROQ_OPS 0x01
33 #define LIO_IFSTATE_REGISTERED 0x02
34 #define LIO_IFSTATE_RUNNING 0x04
35 #define LIO_IFSTATE_RX_TIMESTAMP_ENABLED 0x08
36 #define LIO_IFSTATE_RESETTING 0x10
38 struct oct_nic_stats_resp {
39 u64 rh;
40 struct oct_link_stats stats;
41 u64 status;
44 struct oct_nic_stats_ctrl {
45 struct completion complete;
46 struct net_device *netdev;
49 /** LiquidIO per-interface network private data */
50 struct lio {
51 /** State of the interface. Rx/Tx happens only in the RUNNING state. */
52 atomic_t ifstate;
54 /** Octeon Interface index number. This device will be represented as
55 * oct<ifidx> in the system.
57 int ifidx;
59 /** Octeon Input queue to use to transmit for this network interface. */
60 int txq;
62 /** Octeon Output queue from which pkts arrive
63 * for this network interface.
65 int rxq;
67 /** Guards each glist */
68 spinlock_t *glist_lock;
70 /** Array of gather component linked lists */
71 struct list_head *glist;
72 void **glists_virt_base;
73 dma_addr_t *glists_dma_base;
74 u32 glist_entry_size;
76 /** Pointer to the NIC properties for the Octeon device this network
77 * interface is associated with.
79 struct octdev_props *octprops;
81 /** Pointer to the octeon device structure. */
82 struct octeon_device *oct_dev;
84 struct net_device *netdev;
86 /** Link information sent by the core application for this interface. */
87 struct oct_link_info linfo;
89 /** counter of link changes */
90 u64 link_changes;
92 /** Size of Tx queue for this octeon device. */
93 u32 tx_qsize;
95 /** Size of Rx queue for this octeon device. */
96 u32 rx_qsize;
98 /** Size of MTU this octeon device. */
99 u32 mtu;
101 /** msg level flag per interface. */
102 u32 msg_enable;
104 /** Copy of Interface capabilities: TSO, TSO6, LRO, Chescksums . */
105 u64 dev_capability;
107 /* Copy of transmit encapsulation capabilities:
108 * TSO, TSO6, Checksums for this device for Kernel
109 * 3.10.0 onwards
111 u64 enc_dev_capability;
113 /** Copy of beacaon reg in phy */
114 u32 phy_beacon_val;
116 /** Copy of ctrl reg in phy */
117 u32 led_ctrl_val;
119 /* PTP clock information */
120 struct ptp_clock_info ptp_info;
121 struct ptp_clock *ptp_clock;
122 s64 ptp_adjust;
124 /* for atomic access to Octeon PTP reg and data struct */
125 spinlock_t ptp_lock;
127 /* Interface info */
128 u32 intf_open;
130 /* work queue for txq status */
131 struct cavium_wq txq_status_wq;
133 /* work queue for rxq oom status */
134 struct cavium_wq rxq_status_wq;
136 /* work queue for link status */
137 struct cavium_wq link_status_wq;
139 /* work queue to regularly send local time to octeon firmware */
140 struct cavium_wq sync_octeon_time_wq;
142 int netdev_uc_count;
145 #define LIO_SIZE (sizeof(struct lio))
146 #define GET_LIO(netdev) ((struct lio *)netdev_priv(netdev))
148 #define LIO_MAX_CORES 12
151 * \brief Enable or disable feature
152 * @param netdev pointer to network device
153 * @param cmd Command that just requires acknowledgment
154 * @param param1 Parameter to command
156 int liquidio_set_feature(struct net_device *netdev, int cmd, u16 param1);
158 int setup_rx_oom_poll_fn(struct net_device *netdev);
160 void cleanup_rx_oom_poll_fn(struct net_device *netdev);
163 * \brief Link control command completion callback
164 * @param nctrl_ptr pointer to control packet structure
166 * This routine is called by the callback function when a ctrl pkt sent to
167 * core app completes. The nctrl_ptr contains a copy of the command type
168 * and data sent to the core app. This routine is only called if the ctrl
169 * pkt was sent successfully to the core app.
171 void liquidio_link_ctrl_cmd_completion(void *nctrl_ptr);
173 int liquidio_setup_io_queues(struct octeon_device *octeon_dev, int ifidx,
174 u32 num_iqs, u32 num_oqs);
176 irqreturn_t liquidio_msix_intr_handler(int irq __attribute__((unused)),
177 void *dev);
179 int octeon_setup_interrupt(struct octeon_device *oct, u32 num_ioqs);
182 * \brief Register ethtool operations
183 * @param netdev pointer to network device
185 void liquidio_set_ethtool_ops(struct net_device *netdev);
187 #define SKB_ADJ_MASK 0x3F
188 #define SKB_ADJ (SKB_ADJ_MASK + 1)
190 #define MIN_SKB_SIZE 256 /* 8 bytes and more - 8 bytes for PTP */
191 #define LIO_RXBUFFER_SZ 2048
193 static inline void
194 *recv_buffer_alloc(struct octeon_device *oct,
195 struct octeon_skb_page_info *pg_info)
197 struct page *page;
198 struct sk_buff *skb;
199 struct octeon_skb_page_info *skb_pg_info;
201 page = alloc_page(GFP_ATOMIC);
202 if (unlikely(!page))
203 return NULL;
205 skb = dev_alloc_skb(MIN_SKB_SIZE + SKB_ADJ);
206 if (unlikely(!skb)) {
207 __free_page(page);
208 pg_info->page = NULL;
209 return NULL;
212 if ((unsigned long)skb->data & SKB_ADJ_MASK) {
213 u32 r = SKB_ADJ - ((unsigned long)skb->data & SKB_ADJ_MASK);
215 skb_reserve(skb, r);
218 skb_pg_info = ((struct octeon_skb_page_info *)(skb->cb));
219 /* Get DMA info */
220 pg_info->dma = dma_map_page(&oct->pci_dev->dev, page, 0,
221 PAGE_SIZE, DMA_FROM_DEVICE);
223 /* Mapping failed!! */
224 if (dma_mapping_error(&oct->pci_dev->dev, pg_info->dma)) {
225 __free_page(page);
226 dev_kfree_skb_any((struct sk_buff *)skb);
227 pg_info->page = NULL;
228 return NULL;
231 pg_info->page = page;
232 pg_info->page_offset = 0;
233 skb_pg_info->page = page;
234 skb_pg_info->page_offset = 0;
235 skb_pg_info->dma = pg_info->dma;
237 return (void *)skb;
240 static inline void
241 *recv_buffer_fast_alloc(u32 size)
243 struct sk_buff *skb;
244 struct octeon_skb_page_info *skb_pg_info;
246 skb = dev_alloc_skb(size + SKB_ADJ);
247 if (unlikely(!skb))
248 return NULL;
250 if ((unsigned long)skb->data & SKB_ADJ_MASK) {
251 u32 r = SKB_ADJ - ((unsigned long)skb->data & SKB_ADJ_MASK);
253 skb_reserve(skb, r);
256 skb_pg_info = ((struct octeon_skb_page_info *)(skb->cb));
257 skb_pg_info->page = NULL;
258 skb_pg_info->page_offset = 0;
259 skb_pg_info->dma = 0;
261 return skb;
264 static inline int
265 recv_buffer_recycle(struct octeon_device *oct, void *buf)
267 struct octeon_skb_page_info *pg_info = buf;
269 if (!pg_info->page) {
270 dev_err(&oct->pci_dev->dev, "%s: pg_info->page NULL\n",
271 __func__);
272 return -ENOMEM;
275 if (unlikely(page_count(pg_info->page) != 1) ||
276 unlikely(page_to_nid(pg_info->page) != numa_node_id())) {
277 dma_unmap_page(&oct->pci_dev->dev,
278 pg_info->dma, (PAGE_SIZE << 0),
279 DMA_FROM_DEVICE);
280 pg_info->dma = 0;
281 pg_info->page = NULL;
282 pg_info->page_offset = 0;
283 return -ENOMEM;
286 /* Flip to other half of the buffer */
287 if (pg_info->page_offset == 0)
288 pg_info->page_offset = LIO_RXBUFFER_SZ;
289 else
290 pg_info->page_offset = 0;
291 page_ref_inc(pg_info->page);
293 return 0;
296 static inline void
297 *recv_buffer_reuse(struct octeon_device *oct, void *buf)
299 struct octeon_skb_page_info *pg_info = buf, *skb_pg_info;
300 struct sk_buff *skb;
302 skb = dev_alloc_skb(MIN_SKB_SIZE + SKB_ADJ);
303 if (unlikely(!skb)) {
304 dma_unmap_page(&oct->pci_dev->dev,
305 pg_info->dma, (PAGE_SIZE << 0),
306 DMA_FROM_DEVICE);
307 return NULL;
310 if ((unsigned long)skb->data & SKB_ADJ_MASK) {
311 u32 r = SKB_ADJ - ((unsigned long)skb->data & SKB_ADJ_MASK);
313 skb_reserve(skb, r);
316 skb_pg_info = ((struct octeon_skb_page_info *)(skb->cb));
317 skb_pg_info->page = pg_info->page;
318 skb_pg_info->page_offset = pg_info->page_offset;
319 skb_pg_info->dma = pg_info->dma;
321 return skb;
324 static inline void
325 recv_buffer_destroy(void *buffer, struct octeon_skb_page_info *pg_info)
327 struct sk_buff *skb = (struct sk_buff *)buffer;
329 put_page(pg_info->page);
330 pg_info->dma = 0;
331 pg_info->page = NULL;
332 pg_info->page_offset = 0;
334 if (skb)
335 dev_kfree_skb_any(skb);
338 static inline void recv_buffer_free(void *buffer)
340 struct sk_buff *skb = (struct sk_buff *)buffer;
341 struct octeon_skb_page_info *pg_info;
343 pg_info = ((struct octeon_skb_page_info *)(skb->cb));
345 if (pg_info->page) {
346 put_page(pg_info->page);
347 pg_info->dma = 0;
348 pg_info->page = NULL;
349 pg_info->page_offset = 0;
352 dev_kfree_skb_any((struct sk_buff *)buffer);
355 static inline void
356 recv_buffer_fast_free(void *buffer)
358 dev_kfree_skb_any((struct sk_buff *)buffer);
361 static inline void tx_buffer_free(void *buffer)
363 dev_kfree_skb_any((struct sk_buff *)buffer);
366 #define lio_dma_alloc(oct, size, dma_addr) \
367 dma_alloc_coherent(&(oct)->pci_dev->dev, size, dma_addr, GFP_KERNEL)
368 #define lio_dma_free(oct, size, virt_addr, dma_addr) \
369 dma_free_coherent(&(oct)->pci_dev->dev, size, virt_addr, dma_addr)
371 static inline
372 void *get_rbd(struct sk_buff *skb)
374 struct octeon_skb_page_info *pg_info;
375 unsigned char *va;
377 pg_info = ((struct octeon_skb_page_info *)(skb->cb));
378 va = page_address(pg_info->page) + pg_info->page_offset;
380 return va;
383 static inline u64
384 lio_map_ring(void *buf)
386 dma_addr_t dma_addr;
388 struct sk_buff *skb = (struct sk_buff *)buf;
389 struct octeon_skb_page_info *pg_info;
391 pg_info = ((struct octeon_skb_page_info *)(skb->cb));
392 if (!pg_info->page) {
393 pr_err("%s: pg_info->page NULL\n", __func__);
394 WARN_ON(1);
397 /* Get DMA info */
398 dma_addr = pg_info->dma;
399 if (!pg_info->dma) {
400 pr_err("%s: ERROR it should be already available\n",
401 __func__);
402 WARN_ON(1);
404 dma_addr += pg_info->page_offset;
406 return (u64)dma_addr;
409 static inline void
410 lio_unmap_ring(struct pci_dev *pci_dev,
411 u64 buf_ptr)
414 dma_unmap_page(&pci_dev->dev,
415 buf_ptr, (PAGE_SIZE << 0),
416 DMA_FROM_DEVICE);
419 static inline void *octeon_fast_packet_alloc(u32 size)
421 return recv_buffer_fast_alloc(size);
424 static inline void octeon_fast_packet_next(struct octeon_droq *droq,
425 struct sk_buff *nicbuf,
426 int copy_len,
427 int idx)
429 skb_put_data(nicbuf, get_rbd(droq->recv_buf_list[idx].buffer),
430 copy_len);
434 * \brief check interface state
435 * @param lio per-network private data
436 * @param state_flag flag state to check
438 static inline int ifstate_check(struct lio *lio, int state_flag)
440 return atomic_read(&lio->ifstate) & state_flag;
444 * \brief set interface state
445 * @param lio per-network private data
446 * @param state_flag flag state to set
448 static inline void ifstate_set(struct lio *lio, int state_flag)
450 atomic_set(&lio->ifstate, (atomic_read(&lio->ifstate) | state_flag));
454 * \brief clear interface state
455 * @param lio per-network private data
456 * @param state_flag flag state to clear
458 static inline void ifstate_reset(struct lio *lio, int state_flag)
460 atomic_set(&lio->ifstate, (atomic_read(&lio->ifstate) & ~(state_flag)));
464 * \brief wait for all pending requests to complete
465 * @param oct Pointer to Octeon device
467 * Called during shutdown sequence
469 static inline int wait_for_pending_requests(struct octeon_device *oct)
471 int i, pcount = 0;
473 for (i = 0; i < MAX_IO_PENDING_PKT_COUNT; i++) {
474 pcount = atomic_read(
475 &oct->response_list[OCTEON_ORDERED_SC_LIST]
476 .pending_req_count);
477 if (pcount)
478 schedule_timeout_uninterruptible(HZ / 10);
479 else
480 break;
483 if (pcount)
484 return 1;
486 return 0;
489 #endif