1 /*******************************************************************************
3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2013 Intel Corporation.
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26 *******************************************************************************/
29 #include "ixgbe_sriov.h"
31 #ifdef CONFIG_IXGBE_DCB
33 * ixgbe_cache_ring_dcb_sriov - Descriptor ring to register mapping for SR-IOV
34 * @adapter: board private structure to initialize
36 * Cache the descriptor ring offsets for SR-IOV to the assigned rings. It
37 * will also try to cache the proper offsets if RSS/FCoE are enabled along
41 static bool ixgbe_cache_ring_dcb_sriov(struct ixgbe_adapter
*adapter
)
44 struct ixgbe_ring_feature
*fcoe
= &adapter
->ring_feature
[RING_F_FCOE
];
45 #endif /* IXGBE_FCOE */
46 struct ixgbe_ring_feature
*vmdq
= &adapter
->ring_feature
[RING_F_VMDQ
];
49 u8 tcs
= netdev_get_num_tc(adapter
->netdev
);
51 /* verify we have DCB queueing enabled before proceeding */
55 /* verify we have VMDq enabled before proceeding */
56 if (!(adapter
->flags
& IXGBE_FLAG_SRIOV_ENABLED
))
59 /* start at VMDq register offset for SR-IOV enabled setups */
60 reg_idx
= vmdq
->offset
* __ALIGN_MASK(1, ~vmdq
->mask
);
61 for (i
= 0; i
< adapter
->num_rx_queues
; i
++, reg_idx
++) {
62 /* If we are greater than indices move to next pool */
63 if ((reg_idx
& ~vmdq
->mask
) >= tcs
)
64 reg_idx
= __ALIGN_MASK(reg_idx
, ~vmdq
->mask
);
65 adapter
->rx_ring
[i
]->reg_idx
= reg_idx
;
68 reg_idx
= vmdq
->offset
* __ALIGN_MASK(1, ~vmdq
->mask
);
69 for (i
= 0; i
< adapter
->num_tx_queues
; i
++, reg_idx
++) {
70 /* If we are greater than indices move to next pool */
71 if ((reg_idx
& ~vmdq
->mask
) >= tcs
)
72 reg_idx
= __ALIGN_MASK(reg_idx
, ~vmdq
->mask
);
73 adapter
->tx_ring
[i
]->reg_idx
= reg_idx
;
77 /* nothing to do if FCoE is disabled */
78 if (!(adapter
->flags
& IXGBE_FLAG_FCOE_ENABLED
))
81 /* The work is already done if the FCoE ring is shared */
82 if (fcoe
->offset
< tcs
)
85 /* The FCoE rings exist separately, we need to move their reg_idx */
87 u16 queues_per_pool
= __ALIGN_MASK(1, ~vmdq
->mask
);
88 u8 fcoe_tc
= ixgbe_fcoe_get_tc(adapter
);
90 reg_idx
= (vmdq
->offset
+ vmdq
->indices
) * queues_per_pool
;
91 for (i
= fcoe
->offset
; i
< adapter
->num_rx_queues
; i
++) {
92 reg_idx
= __ALIGN_MASK(reg_idx
, ~vmdq
->mask
) + fcoe_tc
;
93 adapter
->rx_ring
[i
]->reg_idx
= reg_idx
;
97 reg_idx
= (vmdq
->offset
+ vmdq
->indices
) * queues_per_pool
;
98 for (i
= fcoe
->offset
; i
< adapter
->num_tx_queues
; i
++) {
99 reg_idx
= __ALIGN_MASK(reg_idx
, ~vmdq
->mask
) + fcoe_tc
;
100 adapter
->tx_ring
[i
]->reg_idx
= reg_idx
;
105 #endif /* IXGBE_FCOE */
109 /* ixgbe_get_first_reg_idx - Return first register index associated with ring */
110 static void ixgbe_get_first_reg_idx(struct ixgbe_adapter
*adapter
, u8 tc
,
111 unsigned int *tx
, unsigned int *rx
)
113 struct net_device
*dev
= adapter
->netdev
;
114 struct ixgbe_hw
*hw
= &adapter
->hw
;
115 u8 num_tcs
= netdev_get_num_tc(dev
);
120 switch (hw
->mac
.type
) {
121 case ixgbe_mac_82598EB
:
122 /* TxQs/TC: 4 RxQs/TC: 8 */
123 *tx
= tc
<< 2; /* 0, 4, 8, 12, 16, 20, 24, 28 */
124 *rx
= tc
<< 3; /* 0, 8, 16, 24, 32, 40, 48, 56 */
126 case ixgbe_mac_82599EB
:
130 * TCs : TC0/1 TC2/3 TC4-7
136 *tx
= tc
<< 5; /* 0, 32, 64 */
138 *tx
= (tc
+ 2) << 4; /* 80, 96 */
140 *tx
= (tc
+ 8) << 3; /* 104, 112, 120 */
143 * TCs : TC0 TC1 TC2/3
149 *tx
= tc
<< 6; /* 0, 64 */
151 *tx
= (tc
+ 4) << 4; /* 96, 112 */
159 * ixgbe_cache_ring_dcb - Descriptor ring to register mapping for DCB
160 * @adapter: board private structure to initialize
162 * Cache the descriptor ring offsets for DCB to the assigned rings.
165 static bool ixgbe_cache_ring_dcb(struct ixgbe_adapter
*adapter
)
167 struct net_device
*dev
= adapter
->netdev
;
168 unsigned int tx_idx
, rx_idx
;
169 int tc
, offset
, rss_i
, i
;
170 u8 num_tcs
= netdev_get_num_tc(dev
);
172 /* verify we have DCB queueing enabled before proceeding */
176 rss_i
= adapter
->ring_feature
[RING_F_RSS
].indices
;
178 for (tc
= 0, offset
= 0; tc
< num_tcs
; tc
++, offset
+= rss_i
) {
179 ixgbe_get_first_reg_idx(adapter
, tc
, &tx_idx
, &rx_idx
);
180 for (i
= 0; i
< rss_i
; i
++, tx_idx
++, rx_idx
++) {
181 adapter
->tx_ring
[offset
+ i
]->reg_idx
= tx_idx
;
182 adapter
->rx_ring
[offset
+ i
]->reg_idx
= rx_idx
;
183 adapter
->tx_ring
[offset
+ i
]->dcb_tc
= tc
;
184 adapter
->rx_ring
[offset
+ i
]->dcb_tc
= tc
;
193 * ixgbe_cache_ring_sriov - Descriptor ring to register mapping for sriov
194 * @adapter: board private structure to initialize
196 * SR-IOV doesn't use any descriptor rings but changes the default if
197 * no other mapping is used.
200 static bool ixgbe_cache_ring_sriov(struct ixgbe_adapter
*adapter
)
203 struct ixgbe_ring_feature
*fcoe
= &adapter
->ring_feature
[RING_F_FCOE
];
204 #endif /* IXGBE_FCOE */
205 struct ixgbe_ring_feature
*vmdq
= &adapter
->ring_feature
[RING_F_VMDQ
];
206 struct ixgbe_ring_feature
*rss
= &adapter
->ring_feature
[RING_F_RSS
];
210 /* only proceed if VMDq is enabled */
211 if (!(adapter
->flags
& IXGBE_FLAG_VMDQ_ENABLED
))
214 /* start at VMDq register offset for SR-IOV enabled setups */
215 reg_idx
= vmdq
->offset
* __ALIGN_MASK(1, ~vmdq
->mask
);
216 for (i
= 0; i
< adapter
->num_rx_queues
; i
++, reg_idx
++) {
218 /* Allow first FCoE queue to be mapped as RSS */
219 if (fcoe
->offset
&& (i
> fcoe
->offset
))
222 /* If we are greater than indices move to next pool */
223 if ((reg_idx
& ~vmdq
->mask
) >= rss
->indices
)
224 reg_idx
= __ALIGN_MASK(reg_idx
, ~vmdq
->mask
);
225 adapter
->rx_ring
[i
]->reg_idx
= reg_idx
;
229 /* FCoE uses a linear block of queues so just assigning 1:1 */
230 for (; i
< adapter
->num_rx_queues
; i
++, reg_idx
++)
231 adapter
->rx_ring
[i
]->reg_idx
= reg_idx
;
234 reg_idx
= vmdq
->offset
* __ALIGN_MASK(1, ~vmdq
->mask
);
235 for (i
= 0; i
< adapter
->num_tx_queues
; i
++, reg_idx
++) {
237 /* Allow first FCoE queue to be mapped as RSS */
238 if (fcoe
->offset
&& (i
> fcoe
->offset
))
241 /* If we are greater than indices move to next pool */
242 if ((reg_idx
& rss
->mask
) >= rss
->indices
)
243 reg_idx
= __ALIGN_MASK(reg_idx
, ~vmdq
->mask
);
244 adapter
->tx_ring
[i
]->reg_idx
= reg_idx
;
248 /* FCoE uses a linear block of queues so just assigning 1:1 */
249 for (; i
< adapter
->num_tx_queues
; i
++, reg_idx
++)
250 adapter
->tx_ring
[i
]->reg_idx
= reg_idx
;
258 * ixgbe_cache_ring_rss - Descriptor ring to register mapping for RSS
259 * @adapter: board private structure to initialize
261 * Cache the descriptor ring offsets for RSS to the assigned rings.
264 static bool ixgbe_cache_ring_rss(struct ixgbe_adapter
*adapter
)
268 for (i
= 0; i
< adapter
->num_rx_queues
; i
++)
269 adapter
->rx_ring
[i
]->reg_idx
= i
;
270 for (i
= 0; i
< adapter
->num_tx_queues
; i
++)
271 adapter
->tx_ring
[i
]->reg_idx
= i
;
277 * ixgbe_cache_ring_register - Descriptor ring to register mapping
278 * @adapter: board private structure to initialize
280 * Once we know the feature-set enabled for the device, we'll cache
281 * the register offset the descriptor ring is assigned to.
283 * Note, the order the various feature calls is important. It must start with
284 * the "most" features enabled at the same time, then trickle down to the
285 * least amount of features turned on at once.
287 static void ixgbe_cache_ring_register(struct ixgbe_adapter
*adapter
)
289 /* start with default case */
290 adapter
->rx_ring
[0]->reg_idx
= 0;
291 adapter
->tx_ring
[0]->reg_idx
= 0;
293 #ifdef CONFIG_IXGBE_DCB
294 if (ixgbe_cache_ring_dcb_sriov(adapter
))
297 if (ixgbe_cache_ring_dcb(adapter
))
301 if (ixgbe_cache_ring_sriov(adapter
))
304 ixgbe_cache_ring_rss(adapter
);
307 #define IXGBE_RSS_16Q_MASK 0xF
308 #define IXGBE_RSS_8Q_MASK 0x7
309 #define IXGBE_RSS_4Q_MASK 0x3
310 #define IXGBE_RSS_2Q_MASK 0x1
311 #define IXGBE_RSS_DISABLED_MASK 0x0
313 #ifdef CONFIG_IXGBE_DCB
315 * ixgbe_set_dcb_sriov_queues: Allocate queues for SR-IOV devices w/ DCB
316 * @adapter: board private structure to initialize
318 * When SR-IOV (Single Root IO Virtualiztion) is enabled, allocate queues
319 * and VM pools where appropriate. Also assign queues based on DCB
320 * priorities and map accordingly..
323 static bool ixgbe_set_dcb_sriov_queues(struct ixgbe_adapter
*adapter
)
326 u16 vmdq_i
= adapter
->ring_feature
[RING_F_VMDQ
].limit
;
331 u8 tcs
= netdev_get_num_tc(adapter
->netdev
);
333 /* verify we have DCB queueing enabled before proceeding */
337 /* verify we have VMDq enabled before proceeding */
338 if (!(adapter
->flags
& IXGBE_FLAG_SRIOV_ENABLED
))
341 /* Add starting offset to total pool count */
342 vmdq_i
+= adapter
->ring_feature
[RING_F_VMDQ
].offset
;
344 /* 16 pools w/ 8 TC per pool */
346 vmdq_i
= min_t(u16
, vmdq_i
, 16);
347 vmdq_m
= IXGBE_82599_VMDQ_8Q_MASK
;
348 /* 32 pools w/ 4 TC per pool */
350 vmdq_i
= min_t(u16
, vmdq_i
, 32);
351 vmdq_m
= IXGBE_82599_VMDQ_4Q_MASK
;
355 /* queues in the remaining pools are available for FCoE */
356 fcoe_i
= (128 / __ALIGN_MASK(1, ~vmdq_m
)) - vmdq_i
;
359 /* remove the starting offset from the pool count */
360 vmdq_i
-= adapter
->ring_feature
[RING_F_VMDQ
].offset
;
362 /* save features for later use */
363 adapter
->ring_feature
[RING_F_VMDQ
].indices
= vmdq_i
;
364 adapter
->ring_feature
[RING_F_VMDQ
].mask
= vmdq_m
;
367 * We do not support DCB, VMDq, and RSS all simultaneously
368 * so we will disable RSS since it is the lowest priority
370 adapter
->ring_feature
[RING_F_RSS
].indices
= 1;
371 adapter
->ring_feature
[RING_F_RSS
].mask
= IXGBE_RSS_DISABLED_MASK
;
373 /* disable ATR as it is not supported when VMDq is enabled */
374 adapter
->flags
&= ~IXGBE_FLAG_FDIR_HASH_CAPABLE
;
376 adapter
->num_rx_pools
= vmdq_i
;
377 adapter
->num_rx_queues_per_pool
= tcs
;
379 adapter
->num_tx_queues
= vmdq_i
* tcs
;
380 adapter
->num_rx_queues
= vmdq_i
* tcs
;
383 if (adapter
->flags
& IXGBE_FLAG_FCOE_ENABLED
) {
384 struct ixgbe_ring_feature
*fcoe
;
386 fcoe
= &adapter
->ring_feature
[RING_F_FCOE
];
388 /* limit ourselves based on feature limits */
389 fcoe_i
= min_t(u16
, fcoe_i
, fcoe
->limit
);
392 /* alloc queues for FCoE separately */
393 fcoe
->indices
= fcoe_i
;
394 fcoe
->offset
= vmdq_i
* tcs
;
396 /* add queues to adapter */
397 adapter
->num_tx_queues
+= fcoe_i
;
398 adapter
->num_rx_queues
+= fcoe_i
;
399 } else if (tcs
> 1) {
400 /* use queue belonging to FcoE TC */
402 fcoe
->offset
= ixgbe_fcoe_get_tc(adapter
);
404 adapter
->flags
&= ~IXGBE_FLAG_FCOE_ENABLED
;
411 #endif /* IXGBE_FCOE */
412 /* configure TC to queue mapping */
413 for (i
= 0; i
< tcs
; i
++)
414 netdev_set_tc_queue(adapter
->netdev
, i
, 1, i
);
419 static bool ixgbe_set_dcb_queues(struct ixgbe_adapter
*adapter
)
421 struct net_device
*dev
= adapter
->netdev
;
422 struct ixgbe_ring_feature
*f
;
426 /* Map queue offset and counts onto allocated tx queues */
427 tcs
= netdev_get_num_tc(dev
);
429 /* verify we have DCB queueing enabled before proceeding */
433 /* determine the upper limit for our current DCB mode */
434 rss_i
= dev
->num_tx_queues
/ tcs
;
435 if (adapter
->hw
.mac
.type
== ixgbe_mac_82598EB
) {
436 /* 8 TC w/ 4 queues per TC */
437 rss_i
= min_t(u16
, rss_i
, 4);
438 rss_m
= IXGBE_RSS_4Q_MASK
;
439 } else if (tcs
> 4) {
440 /* 8 TC w/ 8 queues per TC */
441 rss_i
= min_t(u16
, rss_i
, 8);
442 rss_m
= IXGBE_RSS_8Q_MASK
;
444 /* 4 TC w/ 16 queues per TC */
445 rss_i
= min_t(u16
, rss_i
, 16);
446 rss_m
= IXGBE_RSS_16Q_MASK
;
449 /* set RSS mask and indices */
450 f
= &adapter
->ring_feature
[RING_F_RSS
];
451 rss_i
= min_t(int, rss_i
, f
->limit
);
455 /* disable ATR as it is not supported when multiple TCs are enabled */
456 adapter
->flags
&= ~IXGBE_FLAG_FDIR_HASH_CAPABLE
;
459 /* FCoE enabled queues require special configuration indexed
460 * by feature specific indices and offset. Here we map FCoE
461 * indices onto the DCB queue pairs allowing FCoE to own
462 * configuration later.
464 if (adapter
->flags
& IXGBE_FLAG_FCOE_ENABLED
) {
465 u8 tc
= ixgbe_fcoe_get_tc(adapter
);
467 f
= &adapter
->ring_feature
[RING_F_FCOE
];
468 f
->indices
= min_t(u16
, rss_i
, f
->limit
);
469 f
->offset
= rss_i
* tc
;
472 #endif /* IXGBE_FCOE */
473 for (i
= 0; i
< tcs
; i
++)
474 netdev_set_tc_queue(dev
, i
, rss_i
, rss_i
* i
);
476 adapter
->num_tx_queues
= rss_i
* tcs
;
477 adapter
->num_rx_queues
= rss_i
* tcs
;
484 * ixgbe_set_sriov_queues - Allocate queues for SR-IOV devices
485 * @adapter: board private structure to initialize
487 * When SR-IOV (Single Root IO Virtualiztion) is enabled, allocate queues
488 * and VM pools where appropriate. If RSS is available, then also try and
489 * enable RSS and map accordingly.
492 static bool ixgbe_set_sriov_queues(struct ixgbe_adapter
*adapter
)
494 u16 vmdq_i
= adapter
->ring_feature
[RING_F_VMDQ
].limit
;
496 u16 rss_i
= adapter
->ring_feature
[RING_F_RSS
].limit
;
497 u16 rss_m
= IXGBE_RSS_DISABLED_MASK
;
502 /* only proceed if SR-IOV is enabled */
503 if (!(adapter
->flags
& IXGBE_FLAG_SRIOV_ENABLED
))
506 /* Add starting offset to total pool count */
507 vmdq_i
+= adapter
->ring_feature
[RING_F_VMDQ
].offset
;
509 /* double check we are limited to maximum pools */
510 vmdq_i
= min_t(u16
, IXGBE_MAX_VMDQ_INDICES
, vmdq_i
);
512 /* 64 pool mode with 2 queues per pool */
513 if ((vmdq_i
> 32) || (rss_i
< 4)) {
514 vmdq_m
= IXGBE_82599_VMDQ_2Q_MASK
;
515 rss_m
= IXGBE_RSS_2Q_MASK
;
516 rss_i
= min_t(u16
, rss_i
, 2);
517 /* 32 pool mode with 4 queues per pool */
519 vmdq_m
= IXGBE_82599_VMDQ_4Q_MASK
;
520 rss_m
= IXGBE_RSS_4Q_MASK
;
525 /* queues in the remaining pools are available for FCoE */
526 fcoe_i
= 128 - (vmdq_i
* __ALIGN_MASK(1, ~vmdq_m
));
529 /* remove the starting offset from the pool count */
530 vmdq_i
-= adapter
->ring_feature
[RING_F_VMDQ
].offset
;
532 /* save features for later use */
533 adapter
->ring_feature
[RING_F_VMDQ
].indices
= vmdq_i
;
534 adapter
->ring_feature
[RING_F_VMDQ
].mask
= vmdq_m
;
536 /* limit RSS based on user input and save for later use */
537 adapter
->ring_feature
[RING_F_RSS
].indices
= rss_i
;
538 adapter
->ring_feature
[RING_F_RSS
].mask
= rss_m
;
540 adapter
->num_rx_pools
= vmdq_i
;
541 adapter
->num_rx_queues_per_pool
= rss_i
;
543 adapter
->num_rx_queues
= vmdq_i
* rss_i
;
544 adapter
->num_tx_queues
= vmdq_i
* rss_i
;
546 /* disable ATR as it is not supported when VMDq is enabled */
547 adapter
->flags
&= ~IXGBE_FLAG_FDIR_HASH_CAPABLE
;
551 * FCoE can use rings from adjacent buffers to allow RSS
552 * like behavior. To account for this we need to add the
553 * FCoE indices to the total ring count.
555 if (adapter
->flags
& IXGBE_FLAG_FCOE_ENABLED
) {
556 struct ixgbe_ring_feature
*fcoe
;
558 fcoe
= &adapter
->ring_feature
[RING_F_FCOE
];
560 /* limit ourselves based on feature limits */
561 fcoe_i
= min_t(u16
, fcoe_i
, fcoe
->limit
);
563 if (vmdq_i
> 1 && fcoe_i
) {
564 /* alloc queues for FCoE separately */
565 fcoe
->indices
= fcoe_i
;
566 fcoe
->offset
= vmdq_i
* rss_i
;
568 /* merge FCoE queues with RSS queues */
569 fcoe_i
= min_t(u16
, fcoe_i
+ rss_i
, num_online_cpus());
571 /* limit indices to rss_i if MSI-X is disabled */
572 if (!(adapter
->flags
& IXGBE_FLAG_MSIX_ENABLED
))
575 /* attempt to reserve some queues for just FCoE */
576 fcoe
->indices
= min_t(u16
, fcoe_i
, fcoe
->limit
);
577 fcoe
->offset
= fcoe_i
- fcoe
->indices
;
582 /* add queues to adapter */
583 adapter
->num_tx_queues
+= fcoe_i
;
584 adapter
->num_rx_queues
+= fcoe_i
;
592 * ixgbe_set_rss_queues - Allocate queues for RSS
593 * @adapter: board private structure to initialize
595 * This is our "base" multiqueue mode. RSS (Receive Side Scaling) will try
596 * to allocate one Rx queue per CPU, and if available, one Tx queue per CPU.
599 static bool ixgbe_set_rss_queues(struct ixgbe_adapter
*adapter
)
601 struct ixgbe_ring_feature
*f
;
604 /* set mask for 16 queue limit of RSS */
605 f
= &adapter
->ring_feature
[RING_F_RSS
];
609 f
->mask
= IXGBE_RSS_16Q_MASK
;
611 /* disable ATR by default, it will be configured below */
612 adapter
->flags
&= ~IXGBE_FLAG_FDIR_HASH_CAPABLE
;
615 * Use Flow Director in addition to RSS to ensure the best
616 * distribution of flows across cores, even when an FDIR flow
619 if (rss_i
> 1 && adapter
->atr_sample_rate
) {
620 f
= &adapter
->ring_feature
[RING_F_FDIR
];
622 rss_i
= f
->indices
= f
->limit
;
624 if (!(adapter
->flags
& IXGBE_FLAG_FDIR_PERFECT_CAPABLE
))
625 adapter
->flags
|= IXGBE_FLAG_FDIR_HASH_CAPABLE
;
630 * FCoE can exist on the same rings as standard network traffic
631 * however it is preferred to avoid that if possible. In order
632 * to get the best performance we allocate as many FCoE queues
633 * as we can and we place them at the end of the ring array to
634 * avoid sharing queues with standard RSS on systems with 24 or
637 if (adapter
->flags
& IXGBE_FLAG_FCOE_ENABLED
) {
638 struct net_device
*dev
= adapter
->netdev
;
641 f
= &adapter
->ring_feature
[RING_F_FCOE
];
643 /* merge FCoE queues with RSS queues */
644 fcoe_i
= min_t(u16
, f
->limit
+ rss_i
, num_online_cpus());
645 fcoe_i
= min_t(u16
, fcoe_i
, dev
->num_tx_queues
);
647 /* limit indices to rss_i if MSI-X is disabled */
648 if (!(adapter
->flags
& IXGBE_FLAG_MSIX_ENABLED
))
651 /* attempt to reserve some queues for just FCoE */
652 f
->indices
= min_t(u16
, fcoe_i
, f
->limit
);
653 f
->offset
= fcoe_i
- f
->indices
;
654 rss_i
= max_t(u16
, fcoe_i
, rss_i
);
657 #endif /* IXGBE_FCOE */
658 adapter
->num_rx_queues
= rss_i
;
659 adapter
->num_tx_queues
= rss_i
;
665 * ixgbe_set_num_queues - Allocate queues for device, feature dependent
666 * @adapter: board private structure to initialize
668 * This is the top level queue allocation routine. The order here is very
669 * important, starting with the "most" number of features turned on at once,
670 * and ending with the smallest set of features. This way large combinations
671 * can be allocated if they're turned on, and smaller combinations are the
672 * fallthrough conditions.
675 static void ixgbe_set_num_queues(struct ixgbe_adapter
*adapter
)
677 /* Start with base case */
678 adapter
->num_rx_queues
= 1;
679 adapter
->num_tx_queues
= 1;
680 adapter
->num_rx_pools
= adapter
->num_rx_queues
;
681 adapter
->num_rx_queues_per_pool
= 1;
683 #ifdef CONFIG_IXGBE_DCB
684 if (ixgbe_set_dcb_sriov_queues(adapter
))
687 if (ixgbe_set_dcb_queues(adapter
))
691 if (ixgbe_set_sriov_queues(adapter
))
694 ixgbe_set_rss_queues(adapter
);
697 static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter
*adapter
,
700 int err
, vector_threshold
;
702 /* We'll want at least 2 (vector_threshold):
703 * 1) TxQ[0] + RxQ[0] handler
704 * 2) Other (Link Status Change, etc.)
706 vector_threshold
= MIN_MSIX_COUNT
;
709 * The more we get, the more we will assign to Tx/Rx Cleanup
710 * for the separate queues...where Rx Cleanup >= Tx Cleanup.
711 * Right now, we simply care about how many we'll get; we'll
712 * set them up later while requesting irq's.
714 while (vectors
>= vector_threshold
) {
715 err
= pci_enable_msix(adapter
->pdev
, adapter
->msix_entries
,
717 if (!err
) /* Success in acquiring all requested vectors. */
720 vectors
= 0; /* Nasty failure, quit now */
721 else /* err == number of vectors we should try again with */
725 if (vectors
< vector_threshold
) {
726 /* Can't allocate enough MSI-X interrupts? Oh well.
727 * This just means we'll go with either a single MSI
728 * vector or fall back to legacy interrupts.
730 netif_printk(adapter
, hw
, KERN_DEBUG
, adapter
->netdev
,
731 "Unable to allocate MSI-X interrupts\n");
732 adapter
->flags
&= ~IXGBE_FLAG_MSIX_ENABLED
;
733 kfree(adapter
->msix_entries
);
734 adapter
->msix_entries
= NULL
;
736 adapter
->flags
|= IXGBE_FLAG_MSIX_ENABLED
; /* Woot! */
738 * Adjust for only the vectors we'll use, which is minimum
739 * of max_msix_q_vectors + NON_Q_VECTORS, or the number of
740 * vectors we were allocated.
742 vectors
-= NON_Q_VECTORS
;
743 adapter
->num_q_vectors
= min(vectors
, adapter
->max_q_vectors
);
747 static void ixgbe_add_ring(struct ixgbe_ring
*ring
,
748 struct ixgbe_ring_container
*head
)
750 ring
->next
= head
->ring
;
756 * ixgbe_alloc_q_vector - Allocate memory for a single interrupt vector
757 * @adapter: board private structure to initialize
758 * @v_count: q_vectors allocated on adapter, used for ring interleaving
759 * @v_idx: index of vector in adapter struct
760 * @txr_count: total number of Tx rings to allocate
761 * @txr_idx: index of first Tx ring to allocate
762 * @rxr_count: total number of Rx rings to allocate
763 * @rxr_idx: index of first Rx ring to allocate
765 * We allocate one q_vector. If allocation fails we return -ENOMEM.
767 static int ixgbe_alloc_q_vector(struct ixgbe_adapter
*adapter
,
768 int v_count
, int v_idx
,
769 int txr_count
, int txr_idx
,
770 int rxr_count
, int rxr_idx
)
772 struct ixgbe_q_vector
*q_vector
;
773 struct ixgbe_ring
*ring
;
774 int node
= NUMA_NO_NODE
;
776 int ring_count
, size
;
777 u8 tcs
= netdev_get_num_tc(adapter
->netdev
);
779 ring_count
= txr_count
+ rxr_count
;
780 size
= sizeof(struct ixgbe_q_vector
) +
781 (sizeof(struct ixgbe_ring
) * ring_count
);
783 /* customize cpu for Flow Director mapping */
784 if ((tcs
<= 1) && !(adapter
->flags
& IXGBE_FLAG_SRIOV_ENABLED
)) {
785 u16 rss_i
= adapter
->ring_feature
[RING_F_RSS
].indices
;
786 if (rss_i
> 1 && adapter
->atr_sample_rate
) {
787 if (cpu_online(v_idx
)) {
789 node
= cpu_to_node(cpu
);
794 /* allocate q_vector and rings */
795 q_vector
= kzalloc_node(size
, GFP_KERNEL
, node
);
797 q_vector
= kzalloc(size
, GFP_KERNEL
);
801 /* setup affinity mask and node */
803 cpumask_set_cpu(cpu
, &q_vector
->affinity_mask
);
804 q_vector
->numa_node
= node
;
806 #ifdef CONFIG_IXGBE_DCA
807 /* initialize CPU for DCA */
811 /* initialize NAPI */
812 netif_napi_add(adapter
->netdev
, &q_vector
->napi
,
814 napi_hash_add(&q_vector
->napi
);
816 /* tie q_vector and adapter together */
817 adapter
->q_vector
[v_idx
] = q_vector
;
818 q_vector
->adapter
= adapter
;
819 q_vector
->v_idx
= v_idx
;
821 /* initialize work limits */
822 q_vector
->tx
.work_limit
= adapter
->tx_work_limit
;
824 /* initialize pointer to rings */
825 ring
= q_vector
->ring
;
828 if (txr_count
&& !rxr_count
) {
830 if (adapter
->tx_itr_setting
== 1)
831 q_vector
->itr
= IXGBE_10K_ITR
;
833 q_vector
->itr
= adapter
->tx_itr_setting
;
835 /* rx or rx/tx vector */
836 if (adapter
->rx_itr_setting
== 1)
837 q_vector
->itr
= IXGBE_20K_ITR
;
839 q_vector
->itr
= adapter
->rx_itr_setting
;
843 /* assign generic ring traits */
844 ring
->dev
= &adapter
->pdev
->dev
;
845 ring
->netdev
= adapter
->netdev
;
847 /* configure backlink on ring */
848 ring
->q_vector
= q_vector
;
850 /* update q_vector Tx values */
851 ixgbe_add_ring(ring
, &q_vector
->tx
);
853 /* apply Tx specific ring traits */
854 ring
->count
= adapter
->tx_ring_count
;
855 ring
->queue_index
= txr_idx
;
857 /* assign ring to adapter */
858 adapter
->tx_ring
[txr_idx
] = ring
;
860 /* update count and index */
864 /* push pointer to next ring */
869 /* assign generic ring traits */
870 ring
->dev
= &adapter
->pdev
->dev
;
871 ring
->netdev
= adapter
->netdev
;
873 /* configure backlink on ring */
874 ring
->q_vector
= q_vector
;
876 /* update q_vector Rx values */
877 ixgbe_add_ring(ring
, &q_vector
->rx
);
880 * 82599 errata, UDP frames with a 0 checksum
881 * can be marked as checksum errors.
883 if (adapter
->hw
.mac
.type
== ixgbe_mac_82599EB
)
884 set_bit(__IXGBE_RX_CSUM_UDP_ZERO_ERR
, &ring
->state
);
887 if (adapter
->netdev
->features
& NETIF_F_FCOE_MTU
) {
888 struct ixgbe_ring_feature
*f
;
889 f
= &adapter
->ring_feature
[RING_F_FCOE
];
890 if ((rxr_idx
>= f
->offset
) &&
891 (rxr_idx
< f
->offset
+ f
->indices
))
892 set_bit(__IXGBE_RX_FCOE
, &ring
->state
);
895 #endif /* IXGBE_FCOE */
896 /* apply Rx specific ring traits */
897 ring
->count
= adapter
->rx_ring_count
;
898 ring
->queue_index
= rxr_idx
;
900 /* assign ring to adapter */
901 adapter
->rx_ring
[rxr_idx
] = ring
;
903 /* update count and index */
907 /* push pointer to next ring */
915 * ixgbe_free_q_vector - Free memory allocated for specific interrupt vector
916 * @adapter: board private structure to initialize
917 * @v_idx: Index of vector to be freed
919 * This function frees the memory allocated to the q_vector. In addition if
920 * NAPI is enabled it will delete any references to the NAPI struct prior
921 * to freeing the q_vector.
923 static void ixgbe_free_q_vector(struct ixgbe_adapter
*adapter
, int v_idx
)
925 struct ixgbe_q_vector
*q_vector
= adapter
->q_vector
[v_idx
];
926 struct ixgbe_ring
*ring
;
928 ixgbe_for_each_ring(ring
, q_vector
->tx
)
929 adapter
->tx_ring
[ring
->queue_index
] = NULL
;
931 ixgbe_for_each_ring(ring
, q_vector
->rx
)
932 adapter
->rx_ring
[ring
->queue_index
] = NULL
;
934 adapter
->q_vector
[v_idx
] = NULL
;
935 napi_hash_del(&q_vector
->napi
);
936 netif_napi_del(&q_vector
->napi
);
939 * ixgbe_get_stats64() might access the rings on this vector,
940 * we must wait a grace period before freeing it.
942 kfree_rcu(q_vector
, rcu
);
946 * ixgbe_alloc_q_vectors - Allocate memory for interrupt vectors
947 * @adapter: board private structure to initialize
949 * We allocate one q_vector per queue interrupt. If allocation fails we
952 static int ixgbe_alloc_q_vectors(struct ixgbe_adapter
*adapter
)
954 int q_vectors
= adapter
->num_q_vectors
;
955 int rxr_remaining
= adapter
->num_rx_queues
;
956 int txr_remaining
= adapter
->num_tx_queues
;
957 int rxr_idx
= 0, txr_idx
= 0, v_idx
= 0;
960 /* only one q_vector if MSI-X is disabled. */
961 if (!(adapter
->flags
& IXGBE_FLAG_MSIX_ENABLED
))
964 if (q_vectors
>= (rxr_remaining
+ txr_remaining
)) {
965 for (; rxr_remaining
; v_idx
++) {
966 err
= ixgbe_alloc_q_vector(adapter
, q_vectors
, v_idx
,
972 /* update counts and index */
978 for (; v_idx
< q_vectors
; v_idx
++) {
979 int rqpv
= DIV_ROUND_UP(rxr_remaining
, q_vectors
- v_idx
);
980 int tqpv
= DIV_ROUND_UP(txr_remaining
, q_vectors
- v_idx
);
981 err
= ixgbe_alloc_q_vector(adapter
, q_vectors
, v_idx
,
988 /* update counts and index */
989 rxr_remaining
-= rqpv
;
990 txr_remaining
-= tqpv
;
998 adapter
->num_tx_queues
= 0;
999 adapter
->num_rx_queues
= 0;
1000 adapter
->num_q_vectors
= 0;
1003 ixgbe_free_q_vector(adapter
, v_idx
);
1009 * ixgbe_free_q_vectors - Free memory allocated for interrupt vectors
1010 * @adapter: board private structure to initialize
1012 * This function frees the memory allocated to the q_vectors. In addition if
1013 * NAPI is enabled it will delete any references to the NAPI struct prior
1014 * to freeing the q_vector.
1016 static void ixgbe_free_q_vectors(struct ixgbe_adapter
*adapter
)
1018 int v_idx
= adapter
->num_q_vectors
;
1020 adapter
->num_tx_queues
= 0;
1021 adapter
->num_rx_queues
= 0;
1022 adapter
->num_q_vectors
= 0;
1025 ixgbe_free_q_vector(adapter
, v_idx
);
1028 static void ixgbe_reset_interrupt_capability(struct ixgbe_adapter
*adapter
)
1030 if (adapter
->flags
& IXGBE_FLAG_MSIX_ENABLED
) {
1031 adapter
->flags
&= ~IXGBE_FLAG_MSIX_ENABLED
;
1032 pci_disable_msix(adapter
->pdev
);
1033 kfree(adapter
->msix_entries
);
1034 adapter
->msix_entries
= NULL
;
1035 } else if (adapter
->flags
& IXGBE_FLAG_MSI_ENABLED
) {
1036 adapter
->flags
&= ~IXGBE_FLAG_MSI_ENABLED
;
1037 pci_disable_msi(adapter
->pdev
);
1042 * ixgbe_set_interrupt_capability - set MSI-X or MSI if supported
1043 * @adapter: board private structure to initialize
1045 * Attempt to configure the interrupts using the best available
1046 * capabilities of the hardware and the kernel.
1048 static void ixgbe_set_interrupt_capability(struct ixgbe_adapter
*adapter
)
1050 struct ixgbe_hw
*hw
= &adapter
->hw
;
1051 int vector
, v_budget
, err
;
1054 * It's easy to be greedy for MSI-X vectors, but it really
1055 * doesn't do us much good if we have a lot more vectors
1056 * than CPU's. So let's be conservative and only ask for
1057 * (roughly) the same number of vectors as there are CPU's.
1058 * The default is to use pairs of vectors.
1060 v_budget
= max(adapter
->num_rx_queues
, adapter
->num_tx_queues
);
1061 v_budget
= min_t(int, v_budget
, num_online_cpus());
1062 v_budget
+= NON_Q_VECTORS
;
1065 * At the same time, hardware can only support a maximum of
1066 * hw.mac->max_msix_vectors vectors. With features
1067 * such as RSS and VMDq, we can easily surpass the number of Rx and Tx
1068 * descriptor queues supported by our device. Thus, we cap it off in
1069 * those rare cases where the cpu count also exceeds our vector limit.
1071 v_budget
= min_t(int, v_budget
, hw
->mac
.max_msix_vectors
);
1073 /* A failure in MSI-X entry allocation isn't fatal, but it does
1074 * mean we disable MSI-X capabilities of the adapter. */
1075 adapter
->msix_entries
= kcalloc(v_budget
,
1076 sizeof(struct msix_entry
), GFP_KERNEL
);
1077 if (adapter
->msix_entries
) {
1078 for (vector
= 0; vector
< v_budget
; vector
++)
1079 adapter
->msix_entries
[vector
].entry
= vector
;
1081 ixgbe_acquire_msix_vectors(adapter
, v_budget
);
1083 if (adapter
->flags
& IXGBE_FLAG_MSIX_ENABLED
)
1087 /* disable DCB if number of TCs exceeds 1 */
1088 if (netdev_get_num_tc(adapter
->netdev
) > 1) {
1089 e_err(probe
, "num TCs exceeds number of queues - disabling DCB\n");
1090 netdev_reset_tc(adapter
->netdev
);
1092 if (adapter
->hw
.mac
.type
== ixgbe_mac_82598EB
)
1093 adapter
->hw
.fc
.requested_mode
= adapter
->last_lfc_mode
;
1095 adapter
->flags
&= ~IXGBE_FLAG_DCB_ENABLED
;
1096 adapter
->temp_dcb_cfg
.pfc_mode_enable
= false;
1097 adapter
->dcb_cfg
.pfc_mode_enable
= false;
1099 adapter
->dcb_cfg
.num_tcs
.pg_tcs
= 1;
1100 adapter
->dcb_cfg
.num_tcs
.pfc_tcs
= 1;
1102 /* disable SR-IOV */
1103 ixgbe_disable_sriov(adapter
);
1106 adapter
->ring_feature
[RING_F_RSS
].limit
= 1;
1108 ixgbe_set_num_queues(adapter
);
1109 adapter
->num_q_vectors
= 1;
1111 err
= pci_enable_msi(adapter
->pdev
);
1113 netif_printk(adapter
, hw
, KERN_DEBUG
, adapter
->netdev
,
1114 "Unable to allocate MSI interrupt, "
1115 "falling back to legacy. Error: %d\n", err
);
1118 adapter
->flags
|= IXGBE_FLAG_MSI_ENABLED
;
1122 * ixgbe_init_interrupt_scheme - Determine proper interrupt scheme
1123 * @adapter: board private structure to initialize
1125 * We determine which interrupt scheme to use based on...
1126 * - Kernel support (MSI, MSI-X)
1127 * - which can be user-defined (via MODULE_PARAM)
1128 * - Hardware queue count (num_*_queues)
1129 * - defined by miscellaneous hardware support/features (RSS, etc.)
1131 int ixgbe_init_interrupt_scheme(struct ixgbe_adapter
*adapter
)
1135 /* Number of supported queues */
1136 ixgbe_set_num_queues(adapter
);
1138 /* Set interrupt mode */
1139 ixgbe_set_interrupt_capability(adapter
);
1141 err
= ixgbe_alloc_q_vectors(adapter
);
1143 e_dev_err("Unable to allocate memory for queue vectors\n");
1144 goto err_alloc_q_vectors
;
1147 ixgbe_cache_ring_register(adapter
);
1149 e_dev_info("Multiqueue %s: Rx Queue count = %u, Tx Queue count = %u\n",
1150 (adapter
->num_rx_queues
> 1) ? "Enabled" : "Disabled",
1151 adapter
->num_rx_queues
, adapter
->num_tx_queues
);
1153 set_bit(__IXGBE_DOWN
, &adapter
->state
);
1157 err_alloc_q_vectors
:
1158 ixgbe_reset_interrupt_capability(adapter
);
1163 * ixgbe_clear_interrupt_scheme - Clear the current interrupt scheme settings
1164 * @adapter: board private structure to clear interrupt scheme on
1166 * We go through and clear interrupt specific resources and reset the structure
1167 * to pre-load conditions
1169 void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter
*adapter
)
1171 adapter
->num_tx_queues
= 0;
1172 adapter
->num_rx_queues
= 0;
1174 ixgbe_free_q_vectors(adapter
);
1175 ixgbe_reset_interrupt_capability(adapter
);
1178 void ixgbe_tx_ctxtdesc(struct ixgbe_ring
*tx_ring
, u32 vlan_macip_lens
,
1179 u32 fcoe_sof_eof
, u32 type_tucmd
, u32 mss_l4len_idx
)
1181 struct ixgbe_adv_tx_context_desc
*context_desc
;
1182 u16 i
= tx_ring
->next_to_use
;
1184 context_desc
= IXGBE_TX_CTXTDESC(tx_ring
, i
);
1187 tx_ring
->next_to_use
= (i
< tx_ring
->count
) ? i
: 0;
1189 /* set bits to identify this as an advanced context descriptor */
1190 type_tucmd
|= IXGBE_TXD_CMD_DEXT
| IXGBE_ADVTXD_DTYP_CTXT
;
1192 context_desc
->vlan_macip_lens
= cpu_to_le32(vlan_macip_lens
);
1193 context_desc
->seqnum_seed
= cpu_to_le32(fcoe_sof_eof
);
1194 context_desc
->type_tucmd_mlhl
= cpu_to_le32(type_tucmd
);
1195 context_desc
->mss_l4len_idx
= cpu_to_le32(mss_l4len_idx
);