1 /*******************************************************************************
3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2012 Intel Corporation.
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26 *******************************************************************************/
29 #include "ixgbe_sriov.h"
31 #ifdef CONFIG_IXGBE_DCB
33 * ixgbe_cache_ring_dcb_sriov - Descriptor ring to register mapping for SR-IOV
34 * @adapter: board private structure to initialize
36 * Cache the descriptor ring offsets for SR-IOV to the assigned rings. It
37 * will also try to cache the proper offsets if RSS/FCoE are enabled along
41 static bool ixgbe_cache_ring_dcb_sriov(struct ixgbe_adapter
*adapter
)
44 struct ixgbe_ring_feature
*fcoe
= &adapter
->ring_feature
[RING_F_FCOE
];
45 #endif /* IXGBE_FCOE */
46 struct ixgbe_ring_feature
*vmdq
= &adapter
->ring_feature
[RING_F_VMDQ
];
49 u8 tcs
= netdev_get_num_tc(adapter
->netdev
);
51 /* verify we have DCB queueing enabled before proceeding */
55 /* verify we have VMDq enabled before proceeding */
56 if (!(adapter
->flags
& IXGBE_FLAG_SRIOV_ENABLED
))
59 /* start at VMDq register offset for SR-IOV enabled setups */
60 reg_idx
= vmdq
->offset
* __ALIGN_MASK(1, ~vmdq
->mask
);
61 for (i
= 0; i
< adapter
->num_rx_queues
; i
++, reg_idx
++) {
62 /* If we are greater than indices move to next pool */
63 if ((reg_idx
& ~vmdq
->mask
) >= tcs
)
64 reg_idx
= __ALIGN_MASK(reg_idx
, ~vmdq
->mask
);
65 adapter
->rx_ring
[i
]->reg_idx
= reg_idx
;
68 reg_idx
= vmdq
->offset
* __ALIGN_MASK(1, ~vmdq
->mask
);
69 for (i
= 0; i
< adapter
->num_tx_queues
; i
++, reg_idx
++) {
70 /* If we are greater than indices move to next pool */
71 if ((reg_idx
& ~vmdq
->mask
) >= tcs
)
72 reg_idx
= __ALIGN_MASK(reg_idx
, ~vmdq
->mask
);
73 adapter
->tx_ring
[i
]->reg_idx
= reg_idx
;
77 /* nothing to do if FCoE is disabled */
78 if (!(adapter
->flags
& IXGBE_FLAG_FCOE_ENABLED
))
81 /* The work is already done if the FCoE ring is shared */
82 if (fcoe
->offset
< tcs
)
85 /* The FCoE rings exist separately, we need to move their reg_idx */
87 u16 queues_per_pool
= __ALIGN_MASK(1, ~vmdq
->mask
);
88 u8 fcoe_tc
= ixgbe_fcoe_get_tc(adapter
);
90 reg_idx
= (vmdq
->offset
+ vmdq
->indices
) * queues_per_pool
;
91 for (i
= fcoe
->offset
; i
< adapter
->num_rx_queues
; i
++) {
92 reg_idx
= __ALIGN_MASK(reg_idx
, ~vmdq
->mask
) + fcoe_tc
;
93 adapter
->rx_ring
[i
]->reg_idx
= reg_idx
;
97 reg_idx
= (vmdq
->offset
+ vmdq
->indices
) * queues_per_pool
;
98 for (i
= fcoe
->offset
; i
< adapter
->num_tx_queues
; i
++) {
99 reg_idx
= __ALIGN_MASK(reg_idx
, ~vmdq
->mask
) + fcoe_tc
;
100 adapter
->tx_ring
[i
]->reg_idx
= reg_idx
;
105 #endif /* IXGBE_FCOE */
109 /* ixgbe_get_first_reg_idx - Return first register index associated with ring */
110 static void ixgbe_get_first_reg_idx(struct ixgbe_adapter
*adapter
, u8 tc
,
111 unsigned int *tx
, unsigned int *rx
)
113 struct net_device
*dev
= adapter
->netdev
;
114 struct ixgbe_hw
*hw
= &adapter
->hw
;
115 u8 num_tcs
= netdev_get_num_tc(dev
);
120 switch (hw
->mac
.type
) {
121 case ixgbe_mac_82598EB
:
122 /* TxQs/TC: 4 RxQs/TC: 8 */
123 *tx
= tc
<< 2; /* 0, 4, 8, 12, 16, 20, 24, 28 */
124 *rx
= tc
<< 3; /* 0, 8, 16, 24, 32, 40, 48, 56 */
126 case ixgbe_mac_82599EB
:
130 * TCs : TC0/1 TC2/3 TC4-7
136 *tx
= tc
<< 5; /* 0, 32, 64 */
138 *tx
= (tc
+ 2) << 4; /* 80, 96 */
140 *tx
= (tc
+ 8) << 3; /* 104, 112, 120 */
143 * TCs : TC0 TC1 TC2/3
149 *tx
= tc
<< 6; /* 0, 64 */
151 *tx
= (tc
+ 4) << 4; /* 96, 112 */
159 * ixgbe_cache_ring_dcb - Descriptor ring to register mapping for DCB
160 * @adapter: board private structure to initialize
162 * Cache the descriptor ring offsets for DCB to the assigned rings.
165 static bool ixgbe_cache_ring_dcb(struct ixgbe_adapter
*adapter
)
167 struct net_device
*dev
= adapter
->netdev
;
168 unsigned int tx_idx
, rx_idx
;
169 int tc
, offset
, rss_i
, i
;
170 u8 num_tcs
= netdev_get_num_tc(dev
);
172 /* verify we have DCB queueing enabled before proceeding */
176 rss_i
= adapter
->ring_feature
[RING_F_RSS
].indices
;
178 for (tc
= 0, offset
= 0; tc
< num_tcs
; tc
++, offset
+= rss_i
) {
179 ixgbe_get_first_reg_idx(adapter
, tc
, &tx_idx
, &rx_idx
);
180 for (i
= 0; i
< rss_i
; i
++, tx_idx
++, rx_idx
++) {
181 adapter
->tx_ring
[offset
+ i
]->reg_idx
= tx_idx
;
182 adapter
->rx_ring
[offset
+ i
]->reg_idx
= rx_idx
;
183 adapter
->tx_ring
[offset
+ i
]->dcb_tc
= tc
;
184 adapter
->rx_ring
[offset
+ i
]->dcb_tc
= tc
;
193 * ixgbe_cache_ring_sriov - Descriptor ring to register mapping for sriov
194 * @adapter: board private structure to initialize
196 * SR-IOV doesn't use any descriptor rings but changes the default if
197 * no other mapping is used.
200 static bool ixgbe_cache_ring_sriov(struct ixgbe_adapter
*adapter
)
203 struct ixgbe_ring_feature
*fcoe
= &adapter
->ring_feature
[RING_F_FCOE
];
204 #endif /* IXGBE_FCOE */
205 struct ixgbe_ring_feature
*vmdq
= &adapter
->ring_feature
[RING_F_VMDQ
];
206 struct ixgbe_ring_feature
*rss
= &adapter
->ring_feature
[RING_F_RSS
];
210 /* only proceed if VMDq is enabled */
211 if (!(adapter
->flags
& IXGBE_FLAG_VMDQ_ENABLED
))
214 /* start at VMDq register offset for SR-IOV enabled setups */
215 reg_idx
= vmdq
->offset
* __ALIGN_MASK(1, ~vmdq
->mask
);
216 for (i
= 0; i
< adapter
->num_rx_queues
; i
++, reg_idx
++) {
218 /* Allow first FCoE queue to be mapped as RSS */
219 if (fcoe
->offset
&& (i
> fcoe
->offset
))
222 /* If we are greater than indices move to next pool */
223 if ((reg_idx
& ~vmdq
->mask
) >= rss
->indices
)
224 reg_idx
= __ALIGN_MASK(reg_idx
, ~vmdq
->mask
);
225 adapter
->rx_ring
[i
]->reg_idx
= reg_idx
;
229 /* FCoE uses a linear block of queues so just assigning 1:1 */
230 for (; i
< adapter
->num_rx_queues
; i
++, reg_idx
++)
231 adapter
->rx_ring
[i
]->reg_idx
= reg_idx
;
234 reg_idx
= vmdq
->offset
* __ALIGN_MASK(1, ~vmdq
->mask
);
235 for (i
= 0; i
< adapter
->num_tx_queues
; i
++, reg_idx
++) {
237 /* Allow first FCoE queue to be mapped as RSS */
238 if (fcoe
->offset
&& (i
> fcoe
->offset
))
241 /* If we are greater than indices move to next pool */
242 if ((reg_idx
& rss
->mask
) >= rss
->indices
)
243 reg_idx
= __ALIGN_MASK(reg_idx
, ~vmdq
->mask
);
244 adapter
->tx_ring
[i
]->reg_idx
= reg_idx
;
248 /* FCoE uses a linear block of queues so just assigning 1:1 */
249 for (; i
< adapter
->num_tx_queues
; i
++, reg_idx
++)
250 adapter
->tx_ring
[i
]->reg_idx
= reg_idx
;
258 * ixgbe_cache_ring_rss - Descriptor ring to register mapping for RSS
259 * @adapter: board private structure to initialize
261 * Cache the descriptor ring offsets for RSS to the assigned rings.
264 static bool ixgbe_cache_ring_rss(struct ixgbe_adapter
*adapter
)
268 for (i
= 0; i
< adapter
->num_rx_queues
; i
++)
269 adapter
->rx_ring
[i
]->reg_idx
= i
;
270 for (i
= 0; i
< adapter
->num_tx_queues
; i
++)
271 adapter
->tx_ring
[i
]->reg_idx
= i
;
277 * ixgbe_cache_ring_register - Descriptor ring to register mapping
278 * @adapter: board private structure to initialize
280 * Once we know the feature-set enabled for the device, we'll cache
281 * the register offset the descriptor ring is assigned to.
283 * Note, the order the various feature calls is important. It must start with
284 * the "most" features enabled at the same time, then trickle down to the
285 * least amount of features turned on at once.
287 static void ixgbe_cache_ring_register(struct ixgbe_adapter
*adapter
)
289 /* start with default case */
290 adapter
->rx_ring
[0]->reg_idx
= 0;
291 adapter
->tx_ring
[0]->reg_idx
= 0;
293 #ifdef CONFIG_IXGBE_DCB
294 if (ixgbe_cache_ring_dcb_sriov(adapter
))
297 if (ixgbe_cache_ring_dcb(adapter
))
301 if (ixgbe_cache_ring_sriov(adapter
))
304 ixgbe_cache_ring_rss(adapter
);
307 #define IXGBE_RSS_16Q_MASK 0xF
308 #define IXGBE_RSS_8Q_MASK 0x7
309 #define IXGBE_RSS_4Q_MASK 0x3
310 #define IXGBE_RSS_2Q_MASK 0x1
311 #define IXGBE_RSS_DISABLED_MASK 0x0
313 #ifdef CONFIG_IXGBE_DCB
315 * ixgbe_set_dcb_sriov_queues: Allocate queues for SR-IOV devices w/ DCB
316 * @adapter: board private structure to initialize
318 * When SR-IOV (Single Root IO Virtualiztion) is enabled, allocate queues
319 * and VM pools where appropriate. Also assign queues based on DCB
320 * priorities and map accordingly..
323 static bool ixgbe_set_dcb_sriov_queues(struct ixgbe_adapter
*adapter
)
326 u16 vmdq_i
= adapter
->ring_feature
[RING_F_VMDQ
].limit
;
331 u8 tcs
= netdev_get_num_tc(adapter
->netdev
);
333 /* verify we have DCB queueing enabled before proceeding */
337 /* verify we have VMDq enabled before proceeding */
338 if (!(adapter
->flags
& IXGBE_FLAG_SRIOV_ENABLED
))
341 /* Add starting offset to total pool count */
342 vmdq_i
+= adapter
->ring_feature
[RING_F_VMDQ
].offset
;
344 /* 16 pools w/ 8 TC per pool */
346 vmdq_i
= min_t(u16
, vmdq_i
, 16);
347 vmdq_m
= IXGBE_82599_VMDQ_8Q_MASK
;
348 /* 32 pools w/ 4 TC per pool */
350 vmdq_i
= min_t(u16
, vmdq_i
, 32);
351 vmdq_m
= IXGBE_82599_VMDQ_4Q_MASK
;
355 /* queues in the remaining pools are available for FCoE */
356 fcoe_i
= (128 / __ALIGN_MASK(1, ~vmdq_m
)) - vmdq_i
;
359 /* remove the starting offset from the pool count */
360 vmdq_i
-= adapter
->ring_feature
[RING_F_VMDQ
].offset
;
362 /* save features for later use */
363 adapter
->ring_feature
[RING_F_VMDQ
].indices
= vmdq_i
;
364 adapter
->ring_feature
[RING_F_VMDQ
].mask
= vmdq_m
;
367 * We do not support DCB, VMDq, and RSS all simultaneously
368 * so we will disable RSS since it is the lowest priority
370 adapter
->ring_feature
[RING_F_RSS
].indices
= 1;
371 adapter
->ring_feature
[RING_F_RSS
].mask
= IXGBE_RSS_DISABLED_MASK
;
373 /* disable ATR as it is not supported when VMDq is enabled */
374 adapter
->flags
&= ~IXGBE_FLAG_FDIR_HASH_CAPABLE
;
376 adapter
->num_rx_pools
= vmdq_i
;
377 adapter
->num_rx_queues_per_pool
= tcs
;
379 adapter
->num_tx_queues
= vmdq_i
* tcs
;
380 adapter
->num_rx_queues
= vmdq_i
* tcs
;
383 if (adapter
->flags
& IXGBE_FLAG_FCOE_ENABLED
) {
384 struct ixgbe_ring_feature
*fcoe
;
386 fcoe
= &adapter
->ring_feature
[RING_F_FCOE
];
388 /* limit ourselves based on feature limits */
389 fcoe_i
= min_t(u16
, fcoe_i
, num_online_cpus());
390 fcoe_i
= min_t(u16
, fcoe_i
, fcoe
->limit
);
393 /* alloc queues for FCoE separately */
394 fcoe
->indices
= fcoe_i
;
395 fcoe
->offset
= vmdq_i
* tcs
;
397 /* add queues to adapter */
398 adapter
->num_tx_queues
+= fcoe_i
;
399 adapter
->num_rx_queues
+= fcoe_i
;
400 } else if (tcs
> 1) {
401 /* use queue belonging to FcoE TC */
403 fcoe
->offset
= ixgbe_fcoe_get_tc(adapter
);
405 adapter
->flags
&= ~IXGBE_FLAG_FCOE_ENABLED
;
412 #endif /* IXGBE_FCOE */
413 /* configure TC to queue mapping */
414 for (i
= 0; i
< tcs
; i
++)
415 netdev_set_tc_queue(adapter
->netdev
, i
, 1, i
);
420 static bool ixgbe_set_dcb_queues(struct ixgbe_adapter
*adapter
)
422 struct net_device
*dev
= adapter
->netdev
;
423 struct ixgbe_ring_feature
*f
;
427 /* Map queue offset and counts onto allocated tx queues */
428 tcs
= netdev_get_num_tc(dev
);
430 /* verify we have DCB queueing enabled before proceeding */
434 /* determine the upper limit for our current DCB mode */
435 rss_i
= dev
->num_tx_queues
/ tcs
;
436 if (adapter
->hw
.mac
.type
== ixgbe_mac_82598EB
) {
437 /* 8 TC w/ 4 queues per TC */
438 rss_i
= min_t(u16
, rss_i
, 4);
439 rss_m
= IXGBE_RSS_4Q_MASK
;
440 } else if (tcs
> 4) {
441 /* 8 TC w/ 8 queues per TC */
442 rss_i
= min_t(u16
, rss_i
, 8);
443 rss_m
= IXGBE_RSS_8Q_MASK
;
445 /* 4 TC w/ 16 queues per TC */
446 rss_i
= min_t(u16
, rss_i
, 16);
447 rss_m
= IXGBE_RSS_16Q_MASK
;
450 /* set RSS mask and indices */
451 f
= &adapter
->ring_feature
[RING_F_RSS
];
452 rss_i
= min_t(int, rss_i
, f
->limit
);
456 /* disable ATR as it is not supported when multiple TCs are enabled */
457 adapter
->flags
&= ~IXGBE_FLAG_FDIR_HASH_CAPABLE
;
460 /* FCoE enabled queues require special configuration indexed
461 * by feature specific indices and offset. Here we map FCoE
462 * indices onto the DCB queue pairs allowing FCoE to own
463 * configuration later.
465 if (adapter
->flags
& IXGBE_FLAG_FCOE_ENABLED
) {
466 u8 tc
= ixgbe_fcoe_get_tc(adapter
);
468 f
= &adapter
->ring_feature
[RING_F_FCOE
];
469 f
->indices
= min_t(u16
, rss_i
, f
->limit
);
470 f
->offset
= rss_i
* tc
;
473 #endif /* IXGBE_FCOE */
474 for (i
= 0; i
< tcs
; i
++)
475 netdev_set_tc_queue(dev
, i
, rss_i
, rss_i
* i
);
477 adapter
->num_tx_queues
= rss_i
* tcs
;
478 adapter
->num_rx_queues
= rss_i
* tcs
;
485 * ixgbe_set_sriov_queues - Allocate queues for SR-IOV devices
486 * @adapter: board private structure to initialize
488 * When SR-IOV (Single Root IO Virtualiztion) is enabled, allocate queues
489 * and VM pools where appropriate. If RSS is available, then also try and
490 * enable RSS and map accordingly.
493 static bool ixgbe_set_sriov_queues(struct ixgbe_adapter
*adapter
)
495 u16 vmdq_i
= adapter
->ring_feature
[RING_F_VMDQ
].limit
;
497 u16 rss_i
= adapter
->ring_feature
[RING_F_RSS
].limit
;
498 u16 rss_m
= IXGBE_RSS_DISABLED_MASK
;
503 /* only proceed if SR-IOV is enabled */
504 if (!(adapter
->flags
& IXGBE_FLAG_SRIOV_ENABLED
))
507 /* Add starting offset to total pool count */
508 vmdq_i
+= adapter
->ring_feature
[RING_F_VMDQ
].offset
;
510 /* double check we are limited to maximum pools */
511 vmdq_i
= min_t(u16
, IXGBE_MAX_VMDQ_INDICES
, vmdq_i
);
513 /* 64 pool mode with 2 queues per pool */
514 if ((vmdq_i
> 32) || (rss_i
< 4)) {
515 vmdq_m
= IXGBE_82599_VMDQ_2Q_MASK
;
516 rss_m
= IXGBE_RSS_2Q_MASK
;
517 rss_i
= min_t(u16
, rss_i
, 2);
518 /* 32 pool mode with 4 queues per pool */
520 vmdq_m
= IXGBE_82599_VMDQ_4Q_MASK
;
521 rss_m
= IXGBE_RSS_4Q_MASK
;
526 /* queues in the remaining pools are available for FCoE */
527 fcoe_i
= 128 - (vmdq_i
* __ALIGN_MASK(1, ~vmdq_m
));
530 /* remove the starting offset from the pool count */
531 vmdq_i
-= adapter
->ring_feature
[RING_F_VMDQ
].offset
;
533 /* save features for later use */
534 adapter
->ring_feature
[RING_F_VMDQ
].indices
= vmdq_i
;
535 adapter
->ring_feature
[RING_F_VMDQ
].mask
= vmdq_m
;
537 /* limit RSS based on user input and save for later use */
538 adapter
->ring_feature
[RING_F_RSS
].indices
= rss_i
;
539 adapter
->ring_feature
[RING_F_RSS
].mask
= rss_m
;
541 adapter
->num_rx_pools
= vmdq_i
;
542 adapter
->num_rx_queues_per_pool
= rss_i
;
544 adapter
->num_rx_queues
= vmdq_i
* rss_i
;
545 adapter
->num_tx_queues
= vmdq_i
* rss_i
;
547 /* disable ATR as it is not supported when VMDq is enabled */
548 adapter
->flags
&= ~IXGBE_FLAG_FDIR_HASH_CAPABLE
;
552 * FCoE can use rings from adjacent buffers to allow RSS
553 * like behavior. To account for this we need to add the
554 * FCoE indices to the total ring count.
556 if (adapter
->flags
& IXGBE_FLAG_FCOE_ENABLED
) {
557 struct ixgbe_ring_feature
*fcoe
;
559 fcoe
= &adapter
->ring_feature
[RING_F_FCOE
];
561 /* limit ourselves based on feature limits */
562 fcoe_i
= min_t(u16
, fcoe_i
, fcoe
->limit
);
564 if (vmdq_i
> 1 && fcoe_i
) {
565 /* reserve no more than number of CPUs */
566 fcoe_i
= min_t(u16
, fcoe_i
, num_online_cpus());
568 /* alloc queues for FCoE separately */
569 fcoe
->indices
= fcoe_i
;
570 fcoe
->offset
= vmdq_i
* rss_i
;
572 /* merge FCoE queues with RSS queues */
573 fcoe_i
= min_t(u16
, fcoe_i
+ rss_i
, num_online_cpus());
575 /* limit indices to rss_i if MSI-X is disabled */
576 if (!(adapter
->flags
& IXGBE_FLAG_MSIX_ENABLED
))
579 /* attempt to reserve some queues for just FCoE */
580 fcoe
->indices
= min_t(u16
, fcoe_i
, fcoe
->limit
);
581 fcoe
->offset
= fcoe_i
- fcoe
->indices
;
586 /* add queues to adapter */
587 adapter
->num_tx_queues
+= fcoe_i
;
588 adapter
->num_rx_queues
+= fcoe_i
;
596 * ixgbe_set_rss_queues - Allocate queues for RSS
597 * @adapter: board private structure to initialize
599 * This is our "base" multiqueue mode. RSS (Receive Side Scaling) will try
600 * to allocate one Rx queue per CPU, and if available, one Tx queue per CPU.
603 static bool ixgbe_set_rss_queues(struct ixgbe_adapter
*adapter
)
605 struct ixgbe_ring_feature
*f
;
608 /* set mask for 16 queue limit of RSS */
609 f
= &adapter
->ring_feature
[RING_F_RSS
];
613 f
->mask
= IXGBE_RSS_16Q_MASK
;
615 /* disable ATR by default, it will be configured below */
616 adapter
->flags
&= ~IXGBE_FLAG_FDIR_HASH_CAPABLE
;
619 * Use Flow Director in addition to RSS to ensure the best
620 * distribution of flows across cores, even when an FDIR flow
623 if (rss_i
> 1 && adapter
->atr_sample_rate
) {
624 f
= &adapter
->ring_feature
[RING_F_FDIR
];
626 f
->indices
= min_t(u16
, num_online_cpus(), f
->limit
);
627 rss_i
= max_t(u16
, rss_i
, f
->indices
);
629 if (!(adapter
->flags
& IXGBE_FLAG_FDIR_PERFECT_CAPABLE
))
630 adapter
->flags
|= IXGBE_FLAG_FDIR_HASH_CAPABLE
;
635 * FCoE can exist on the same rings as standard network traffic
636 * however it is preferred to avoid that if possible. In order
637 * to get the best performance we allocate as many FCoE queues
638 * as we can and we place them at the end of the ring array to
639 * avoid sharing queues with standard RSS on systems with 24 or
642 if (adapter
->flags
& IXGBE_FLAG_FCOE_ENABLED
) {
643 struct net_device
*dev
= adapter
->netdev
;
646 f
= &adapter
->ring_feature
[RING_F_FCOE
];
648 /* merge FCoE queues with RSS queues */
649 fcoe_i
= min_t(u16
, f
->limit
+ rss_i
, num_online_cpus());
650 fcoe_i
= min_t(u16
, fcoe_i
, dev
->num_tx_queues
);
652 /* limit indices to rss_i if MSI-X is disabled */
653 if (!(adapter
->flags
& IXGBE_FLAG_MSIX_ENABLED
))
656 /* attempt to reserve some queues for just FCoE */
657 f
->indices
= min_t(u16
, fcoe_i
, f
->limit
);
658 f
->offset
= fcoe_i
- f
->indices
;
659 rss_i
= max_t(u16
, fcoe_i
, rss_i
);
662 #endif /* IXGBE_FCOE */
663 adapter
->num_rx_queues
= rss_i
;
664 adapter
->num_tx_queues
= rss_i
;
670 * ixgbe_set_num_queues - Allocate queues for device, feature dependent
671 * @adapter: board private structure to initialize
673 * This is the top level queue allocation routine. The order here is very
674 * important, starting with the "most" number of features turned on at once,
675 * and ending with the smallest set of features. This way large combinations
676 * can be allocated if they're turned on, and smaller combinations are the
677 * fallthrough conditions.
680 static void ixgbe_set_num_queues(struct ixgbe_adapter
*adapter
)
682 /* Start with base case */
683 adapter
->num_rx_queues
= 1;
684 adapter
->num_tx_queues
= 1;
685 adapter
->num_rx_pools
= adapter
->num_rx_queues
;
686 adapter
->num_rx_queues_per_pool
= 1;
688 #ifdef CONFIG_IXGBE_DCB
689 if (ixgbe_set_dcb_sriov_queues(adapter
))
692 if (ixgbe_set_dcb_queues(adapter
))
696 if (ixgbe_set_sriov_queues(adapter
))
699 ixgbe_set_rss_queues(adapter
);
702 static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter
*adapter
,
705 int err
, vector_threshold
;
707 /* We'll want at least 2 (vector_threshold):
708 * 1) TxQ[0] + RxQ[0] handler
709 * 2) Other (Link Status Change, etc.)
711 vector_threshold
= MIN_MSIX_COUNT
;
714 * The more we get, the more we will assign to Tx/Rx Cleanup
715 * for the separate queues...where Rx Cleanup >= Tx Cleanup.
716 * Right now, we simply care about how many we'll get; we'll
717 * set them up later while requesting irq's.
719 while (vectors
>= vector_threshold
) {
720 err
= pci_enable_msix(adapter
->pdev
, adapter
->msix_entries
,
722 if (!err
) /* Success in acquiring all requested vectors. */
725 vectors
= 0; /* Nasty failure, quit now */
726 else /* err == number of vectors we should try again with */
730 if (vectors
< vector_threshold
) {
731 /* Can't allocate enough MSI-X interrupts? Oh well.
732 * This just means we'll go with either a single MSI
733 * vector or fall back to legacy interrupts.
735 netif_printk(adapter
, hw
, KERN_DEBUG
, adapter
->netdev
,
736 "Unable to allocate MSI-X interrupts\n");
737 adapter
->flags
&= ~IXGBE_FLAG_MSIX_ENABLED
;
738 kfree(adapter
->msix_entries
);
739 adapter
->msix_entries
= NULL
;
741 adapter
->flags
|= IXGBE_FLAG_MSIX_ENABLED
; /* Woot! */
743 * Adjust for only the vectors we'll use, which is minimum
744 * of max_msix_q_vectors + NON_Q_VECTORS, or the number of
745 * vectors we were allocated.
747 vectors
-= NON_Q_VECTORS
;
748 adapter
->num_q_vectors
= min(vectors
, adapter
->max_q_vectors
);
752 static void ixgbe_add_ring(struct ixgbe_ring
*ring
,
753 struct ixgbe_ring_container
*head
)
755 ring
->next
= head
->ring
;
761 * ixgbe_alloc_q_vector - Allocate memory for a single interrupt vector
762 * @adapter: board private structure to initialize
763 * @v_count: q_vectors allocated on adapter, used for ring interleaving
764 * @v_idx: index of vector in adapter struct
765 * @txr_count: total number of Tx rings to allocate
766 * @txr_idx: index of first Tx ring to allocate
767 * @rxr_count: total number of Rx rings to allocate
768 * @rxr_idx: index of first Rx ring to allocate
770 * We allocate one q_vector. If allocation fails we return -ENOMEM.
772 static int ixgbe_alloc_q_vector(struct ixgbe_adapter
*adapter
,
773 int v_count
, int v_idx
,
774 int txr_count
, int txr_idx
,
775 int rxr_count
, int rxr_idx
)
777 struct ixgbe_q_vector
*q_vector
;
778 struct ixgbe_ring
*ring
;
781 int ring_count
, size
;
783 ring_count
= txr_count
+ rxr_count
;
784 size
= sizeof(struct ixgbe_q_vector
) +
785 (sizeof(struct ixgbe_ring
) * ring_count
);
787 /* customize cpu for Flow Director mapping */
788 if (adapter
->flags
& IXGBE_FLAG_FDIR_HASH_CAPABLE
) {
789 if (cpu_online(v_idx
)) {
791 node
= cpu_to_node(cpu
);
795 /* allocate q_vector and rings */
796 q_vector
= kzalloc_node(size
, GFP_KERNEL
, node
);
798 q_vector
= kzalloc(size
, GFP_KERNEL
);
802 /* setup affinity mask and node */
804 cpumask_set_cpu(cpu
, &q_vector
->affinity_mask
);
806 cpumask_copy(&q_vector
->affinity_mask
, cpu_online_mask
);
807 q_vector
->numa_node
= node
;
809 /* initialize NAPI */
810 netif_napi_add(adapter
->netdev
, &q_vector
->napi
,
813 /* tie q_vector and adapter together */
814 adapter
->q_vector
[v_idx
] = q_vector
;
815 q_vector
->adapter
= adapter
;
816 q_vector
->v_idx
= v_idx
;
818 /* initialize work limits */
819 q_vector
->tx
.work_limit
= adapter
->tx_work_limit
;
821 /* initialize pointer to rings */
822 ring
= q_vector
->ring
;
825 /* assign generic ring traits */
826 ring
->dev
= &adapter
->pdev
->dev
;
827 ring
->netdev
= adapter
->netdev
;
829 /* configure backlink on ring */
830 ring
->q_vector
= q_vector
;
832 /* update q_vector Tx values */
833 ixgbe_add_ring(ring
, &q_vector
->tx
);
835 /* apply Tx specific ring traits */
836 ring
->count
= adapter
->tx_ring_count
;
837 ring
->queue_index
= txr_idx
;
839 /* assign ring to adapter */
840 adapter
->tx_ring
[txr_idx
] = ring
;
842 /* update count and index */
846 /* push pointer to next ring */
851 /* assign generic ring traits */
852 ring
->dev
= &adapter
->pdev
->dev
;
853 ring
->netdev
= adapter
->netdev
;
855 /* configure backlink on ring */
856 ring
->q_vector
= q_vector
;
858 /* update q_vector Rx values */
859 ixgbe_add_ring(ring
, &q_vector
->rx
);
862 * 82599 errata, UDP frames with a 0 checksum
863 * can be marked as checksum errors.
865 if (adapter
->hw
.mac
.type
== ixgbe_mac_82599EB
)
866 set_bit(__IXGBE_RX_CSUM_UDP_ZERO_ERR
, &ring
->state
);
869 if (adapter
->netdev
->features
& NETIF_F_FCOE_MTU
) {
870 struct ixgbe_ring_feature
*f
;
871 f
= &adapter
->ring_feature
[RING_F_FCOE
];
872 if ((rxr_idx
>= f
->offset
) &&
873 (rxr_idx
< f
->offset
+ f
->indices
))
874 set_bit(__IXGBE_RX_FCOE
, &ring
->state
);
877 #endif /* IXGBE_FCOE */
878 /* apply Rx specific ring traits */
879 ring
->count
= adapter
->rx_ring_count
;
880 ring
->queue_index
= rxr_idx
;
882 /* assign ring to adapter */
883 adapter
->rx_ring
[rxr_idx
] = ring
;
885 /* update count and index */
889 /* push pointer to next ring */
897 * ixgbe_free_q_vector - Free memory allocated for specific interrupt vector
898 * @adapter: board private structure to initialize
899 * @v_idx: Index of vector to be freed
901 * This function frees the memory allocated to the q_vector. In addition if
902 * NAPI is enabled it will delete any references to the NAPI struct prior
903 * to freeing the q_vector.
905 static void ixgbe_free_q_vector(struct ixgbe_adapter
*adapter
, int v_idx
)
907 struct ixgbe_q_vector
*q_vector
= adapter
->q_vector
[v_idx
];
908 struct ixgbe_ring
*ring
;
910 ixgbe_for_each_ring(ring
, q_vector
->tx
)
911 adapter
->tx_ring
[ring
->queue_index
] = NULL
;
913 ixgbe_for_each_ring(ring
, q_vector
->rx
)
914 adapter
->rx_ring
[ring
->queue_index
] = NULL
;
916 adapter
->q_vector
[v_idx
] = NULL
;
917 netif_napi_del(&q_vector
->napi
);
920 * ixgbe_get_stats64() might access the rings on this vector,
921 * we must wait a grace period before freeing it.
923 kfree_rcu(q_vector
, rcu
);
927 * ixgbe_alloc_q_vectors - Allocate memory for interrupt vectors
928 * @adapter: board private structure to initialize
930 * We allocate one q_vector per queue interrupt. If allocation fails we
933 static int ixgbe_alloc_q_vectors(struct ixgbe_adapter
*adapter
)
935 int q_vectors
= adapter
->num_q_vectors
;
936 int rxr_remaining
= adapter
->num_rx_queues
;
937 int txr_remaining
= adapter
->num_tx_queues
;
938 int rxr_idx
= 0, txr_idx
= 0, v_idx
= 0;
941 /* only one q_vector if MSI-X is disabled. */
942 if (!(adapter
->flags
& IXGBE_FLAG_MSIX_ENABLED
))
945 if (q_vectors
>= (rxr_remaining
+ txr_remaining
)) {
946 for (; rxr_remaining
; v_idx
++) {
947 err
= ixgbe_alloc_q_vector(adapter
, q_vectors
, v_idx
,
953 /* update counts and index */
959 for (; v_idx
< q_vectors
; v_idx
++) {
960 int rqpv
= DIV_ROUND_UP(rxr_remaining
, q_vectors
- v_idx
);
961 int tqpv
= DIV_ROUND_UP(txr_remaining
, q_vectors
- v_idx
);
962 err
= ixgbe_alloc_q_vector(adapter
, q_vectors
, v_idx
,
969 /* update counts and index */
970 rxr_remaining
-= rqpv
;
971 txr_remaining
-= tqpv
;
979 adapter
->num_tx_queues
= 0;
980 adapter
->num_rx_queues
= 0;
981 adapter
->num_q_vectors
= 0;
984 ixgbe_free_q_vector(adapter
, v_idx
);
990 * ixgbe_free_q_vectors - Free memory allocated for interrupt vectors
991 * @adapter: board private structure to initialize
993 * This function frees the memory allocated to the q_vectors. In addition if
994 * NAPI is enabled it will delete any references to the NAPI struct prior
995 * to freeing the q_vector.
997 static void ixgbe_free_q_vectors(struct ixgbe_adapter
*adapter
)
999 int v_idx
= adapter
->num_q_vectors
;
1001 adapter
->num_tx_queues
= 0;
1002 adapter
->num_rx_queues
= 0;
1003 adapter
->num_q_vectors
= 0;
1006 ixgbe_free_q_vector(adapter
, v_idx
);
1009 static void ixgbe_reset_interrupt_capability(struct ixgbe_adapter
*adapter
)
1011 if (adapter
->flags
& IXGBE_FLAG_MSIX_ENABLED
) {
1012 adapter
->flags
&= ~IXGBE_FLAG_MSIX_ENABLED
;
1013 pci_disable_msix(adapter
->pdev
);
1014 kfree(adapter
->msix_entries
);
1015 adapter
->msix_entries
= NULL
;
1016 } else if (adapter
->flags
& IXGBE_FLAG_MSI_ENABLED
) {
1017 adapter
->flags
&= ~IXGBE_FLAG_MSI_ENABLED
;
1018 pci_disable_msi(adapter
->pdev
);
1023 * ixgbe_set_interrupt_capability - set MSI-X or MSI if supported
1024 * @adapter: board private structure to initialize
1026 * Attempt to configure the interrupts using the best available
1027 * capabilities of the hardware and the kernel.
1029 static void ixgbe_set_interrupt_capability(struct ixgbe_adapter
*adapter
)
1031 struct ixgbe_hw
*hw
= &adapter
->hw
;
1032 int vector
, v_budget
, err
;
1035 * It's easy to be greedy for MSI-X vectors, but it really
1036 * doesn't do us much good if we have a lot more vectors
1037 * than CPU's. So let's be conservative and only ask for
1038 * (roughly) the same number of vectors as there are CPU's.
1039 * The default is to use pairs of vectors.
1041 v_budget
= max(adapter
->num_rx_queues
, adapter
->num_tx_queues
);
1042 v_budget
= min_t(int, v_budget
, num_online_cpus());
1043 v_budget
+= NON_Q_VECTORS
;
1046 * At the same time, hardware can only support a maximum of
1047 * hw.mac->max_msix_vectors vectors. With features
1048 * such as RSS and VMDq, we can easily surpass the number of Rx and Tx
1049 * descriptor queues supported by our device. Thus, we cap it off in
1050 * those rare cases where the cpu count also exceeds our vector limit.
1052 v_budget
= min_t(int, v_budget
, hw
->mac
.max_msix_vectors
);
1054 /* A failure in MSI-X entry allocation isn't fatal, but it does
1055 * mean we disable MSI-X capabilities of the adapter. */
1056 adapter
->msix_entries
= kcalloc(v_budget
,
1057 sizeof(struct msix_entry
), GFP_KERNEL
);
1058 if (adapter
->msix_entries
) {
1059 for (vector
= 0; vector
< v_budget
; vector
++)
1060 adapter
->msix_entries
[vector
].entry
= vector
;
1062 ixgbe_acquire_msix_vectors(adapter
, v_budget
);
1064 if (adapter
->flags
& IXGBE_FLAG_MSIX_ENABLED
)
1068 /* disable DCB if number of TCs exceeds 1 */
1069 if (netdev_get_num_tc(adapter
->netdev
) > 1) {
1070 e_err(probe
, "num TCs exceeds number of queues - disabling DCB\n");
1071 netdev_reset_tc(adapter
->netdev
);
1073 if (adapter
->hw
.mac
.type
== ixgbe_mac_82598EB
)
1074 adapter
->hw
.fc
.requested_mode
= adapter
->last_lfc_mode
;
1076 adapter
->flags
&= ~IXGBE_FLAG_DCB_ENABLED
;
1077 adapter
->temp_dcb_cfg
.pfc_mode_enable
= false;
1078 adapter
->dcb_cfg
.pfc_mode_enable
= false;
1080 adapter
->dcb_cfg
.num_tcs
.pg_tcs
= 1;
1081 adapter
->dcb_cfg
.num_tcs
.pfc_tcs
= 1;
1083 /* disable SR-IOV */
1084 ixgbe_disable_sriov(adapter
);
1087 adapter
->ring_feature
[RING_F_RSS
].limit
= 1;
1089 ixgbe_set_num_queues(adapter
);
1090 adapter
->num_q_vectors
= 1;
1092 err
= pci_enable_msi(adapter
->pdev
);
1094 netif_printk(adapter
, hw
, KERN_DEBUG
, adapter
->netdev
,
1095 "Unable to allocate MSI interrupt, "
1096 "falling back to legacy. Error: %d\n", err
);
1099 adapter
->flags
|= IXGBE_FLAG_MSI_ENABLED
;
1103 * ixgbe_init_interrupt_scheme - Determine proper interrupt scheme
1104 * @adapter: board private structure to initialize
1106 * We determine which interrupt scheme to use based on...
1107 * - Kernel support (MSI, MSI-X)
1108 * - which can be user-defined (via MODULE_PARAM)
1109 * - Hardware queue count (num_*_queues)
1110 * - defined by miscellaneous hardware support/features (RSS, etc.)
1112 int ixgbe_init_interrupt_scheme(struct ixgbe_adapter
*adapter
)
1116 /* Number of supported queues */
1117 ixgbe_set_num_queues(adapter
);
1119 /* Set interrupt mode */
1120 ixgbe_set_interrupt_capability(adapter
);
1122 err
= ixgbe_alloc_q_vectors(adapter
);
1124 e_dev_err("Unable to allocate memory for queue vectors\n");
1125 goto err_alloc_q_vectors
;
1128 ixgbe_cache_ring_register(adapter
);
1130 e_dev_info("Multiqueue %s: Rx Queue count = %u, Tx Queue count = %u\n",
1131 (adapter
->num_rx_queues
> 1) ? "Enabled" : "Disabled",
1132 adapter
->num_rx_queues
, adapter
->num_tx_queues
);
1134 set_bit(__IXGBE_DOWN
, &adapter
->state
);
1138 err_alloc_q_vectors
:
1139 ixgbe_reset_interrupt_capability(adapter
);
1144 * ixgbe_clear_interrupt_scheme - Clear the current interrupt scheme settings
1145 * @adapter: board private structure to clear interrupt scheme on
1147 * We go through and clear interrupt specific resources and reset the structure
1148 * to pre-load conditions
1150 void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter
*adapter
)
1152 adapter
->num_tx_queues
= 0;
1153 adapter
->num_rx_queues
= 0;
1155 ixgbe_free_q_vectors(adapter
);
1156 ixgbe_reset_interrupt_capability(adapter
);
1159 void ixgbe_tx_ctxtdesc(struct ixgbe_ring
*tx_ring
, u32 vlan_macip_lens
,
1160 u32 fcoe_sof_eof
, u32 type_tucmd
, u32 mss_l4len_idx
)
1162 struct ixgbe_adv_tx_context_desc
*context_desc
;
1163 u16 i
= tx_ring
->next_to_use
;
1165 context_desc
= IXGBE_TX_CTXTDESC(tx_ring
, i
);
1168 tx_ring
->next_to_use
= (i
< tx_ring
->count
) ? i
: 0;
1170 /* set bits to identify this as an advanced context descriptor */
1171 type_tucmd
|= IXGBE_TXD_CMD_DEXT
| IXGBE_ADVTXD_DTYP_CTXT
;
1173 context_desc
->vlan_macip_lens
= cpu_to_le32(vlan_macip_lens
);
1174 context_desc
->seqnum_seed
= cpu_to_le32(fcoe_sof_eof
);
1175 context_desc
->type_tucmd_mlhl
= cpu_to_le32(type_tucmd
);
1176 context_desc
->mss_l4len_idx
= cpu_to_le32(mss_l4len_idx
);