Merge remote-tracking branch 'cleancache/linux-next'
[linux-2.6/next.git] / drivers / net / ixgbe / ixgbe_dcb_82599.c
blobd50cf78c234d57f634900264191f23bfff7b7a17
1 /*******************************************************************************
3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2011 Intel Corporation.
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
22 Contact Information:
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26 *******************************************************************************/
28 #include "ixgbe.h"
29 #include "ixgbe_type.h"
30 #include "ixgbe_dcb.h"
31 #include "ixgbe_dcb_82599.h"
33 /**
34 * ixgbe_dcb_config_packet_buffers_82599 - Configure DCB packet buffers
35 * @hw: pointer to hardware structure
36 * @rx_pba: method to distribute packet buffer
38 * Configure packet buffers for DCB mode.
40 static s32 ixgbe_dcb_config_packet_buffers_82599(struct ixgbe_hw *hw, u8 rx_pba)
42 int num_tcs = IXGBE_MAX_PACKET_BUFFERS;
43 u32 rx_pb_size = hw->mac.rx_pb_size << IXGBE_RXPBSIZE_SHIFT;
44 u32 rxpktsize;
45 u32 txpktsize;
46 u32 txpbthresh;
47 u8 i = 0;
50 * This really means configure the first half of the TCs
51 * (Traffic Classes) to use 5/8 of the Rx packet buffer
52 * space. To determine the size of the buffer for each TC,
53 * we are multiplying the average size by 5/4 and applying
54 * it to half of the traffic classes.
56 if (rx_pba == pba_80_48) {
57 rxpktsize = (rx_pb_size * 5) / (num_tcs * 4);
58 rx_pb_size -= rxpktsize * (num_tcs / 2);
59 for (; i < (num_tcs / 2); i++)
60 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
63 /* Divide the remaining Rx packet buffer evenly among the TCs */
64 rxpktsize = rx_pb_size / (num_tcs - i);
65 for (; i < num_tcs; i++)
66 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
69 * Setup Tx packet buffer and threshold equally for all TCs
70 * TXPBTHRESH register is set in K so divide by 1024 and subtract
71 * 10 since the largest packet we support is just over 9K.
73 txpktsize = IXGBE_TXPBSIZE_MAX / num_tcs;
74 txpbthresh = (txpktsize / 1024) - IXGBE_TXPKT_SIZE_MAX;
75 for (i = 0; i < num_tcs; i++) {
76 IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), txpktsize);
77 IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), txpbthresh);
80 /* Clear unused TCs, if any, to zero buffer size*/
81 for (; i < MAX_TRAFFIC_CLASS; i++) {
82 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), 0);
83 IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), 0);
84 IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), 0);
87 return 0;
90 /**
91 * ixgbe_dcb_config_rx_arbiter_82599 - Config Rx Data arbiter
92 * @hw: pointer to hardware structure
93 * @refill: refill credits index by traffic class
94 * @max: max credits index by traffic class
95 * @bwg_id: bandwidth grouping indexed by traffic class
96 * @prio_type: priority type indexed by traffic class
98 * Configure Rx Packet Arbiter and credits for each traffic class.
100 s32 ixgbe_dcb_config_rx_arbiter_82599(struct ixgbe_hw *hw,
101 u16 *refill,
102 u16 *max,
103 u8 *bwg_id,
104 u8 *prio_type,
105 u8 *prio_tc)
107 u32 reg = 0;
108 u32 credit_refill = 0;
109 u32 credit_max = 0;
110 u8 i = 0;
113 * Disable the arbiter before changing parameters
114 * (always enable recycle mode; WSP)
116 reg = IXGBE_RTRPCS_RRM | IXGBE_RTRPCS_RAC | IXGBE_RTRPCS_ARBDIS;
117 IXGBE_WRITE_REG(hw, IXGBE_RTRPCS, reg);
119 /* Map all traffic classes to their UP, 1 to 1 */
120 reg = 0;
121 for (i = 0; i < MAX_TRAFFIC_CLASS; i++)
122 reg |= (prio_tc[i] << (i * IXGBE_RTRUP2TC_UP_SHIFT));
123 IXGBE_WRITE_REG(hw, IXGBE_RTRUP2TC, reg);
125 /* Configure traffic class credits and priority */
126 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
127 credit_refill = refill[i];
128 credit_max = max[i];
129 reg = credit_refill | (credit_max << IXGBE_RTRPT4C_MCL_SHIFT);
131 reg |= (u32)(bwg_id[i]) << IXGBE_RTRPT4C_BWG_SHIFT;
133 if (prio_type[i] == prio_link)
134 reg |= IXGBE_RTRPT4C_LSP;
136 IXGBE_WRITE_REG(hw, IXGBE_RTRPT4C(i), reg);
140 * Configure Rx packet plane (recycle mode; WSP) and
141 * enable arbiter
143 reg = IXGBE_RTRPCS_RRM | IXGBE_RTRPCS_RAC;
144 IXGBE_WRITE_REG(hw, IXGBE_RTRPCS, reg);
146 return 0;
150 * ixgbe_dcb_config_tx_desc_arbiter_82599 - Config Tx Desc. arbiter
151 * @hw: pointer to hardware structure
152 * @refill: refill credits index by traffic class
153 * @max: max credits index by traffic class
154 * @bwg_id: bandwidth grouping indexed by traffic class
155 * @prio_type: priority type indexed by traffic class
157 * Configure Tx Descriptor Arbiter and credits for each traffic class.
159 s32 ixgbe_dcb_config_tx_desc_arbiter_82599(struct ixgbe_hw *hw,
160 u16 *refill,
161 u16 *max,
162 u8 *bwg_id,
163 u8 *prio_type)
165 u32 reg, max_credits;
166 u8 i;
168 /* Clear the per-Tx queue credits; we use per-TC instead */
169 for (i = 0; i < 128; i++) {
170 IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, i);
171 IXGBE_WRITE_REG(hw, IXGBE_RTTDT1C, 0);
174 /* Configure traffic class credits and priority */
175 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
176 max_credits = max[i];
177 reg = max_credits << IXGBE_RTTDT2C_MCL_SHIFT;
178 reg |= refill[i];
179 reg |= (u32)(bwg_id[i]) << IXGBE_RTTDT2C_BWG_SHIFT;
181 if (prio_type[i] == prio_group)
182 reg |= IXGBE_RTTDT2C_GSP;
184 if (prio_type[i] == prio_link)
185 reg |= IXGBE_RTTDT2C_LSP;
187 IXGBE_WRITE_REG(hw, IXGBE_RTTDT2C(i), reg);
191 * Configure Tx descriptor plane (recycle mode; WSP) and
192 * enable arbiter
194 reg = IXGBE_RTTDCS_TDPAC | IXGBE_RTTDCS_TDRM;
195 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg);
197 return 0;
201 * ixgbe_dcb_config_tx_data_arbiter_82599 - Config Tx Data arbiter
202 * @hw: pointer to hardware structure
203 * @refill: refill credits index by traffic class
204 * @max: max credits index by traffic class
205 * @bwg_id: bandwidth grouping indexed by traffic class
206 * @prio_type: priority type indexed by traffic class
208 * Configure Tx Packet Arbiter and credits for each traffic class.
210 s32 ixgbe_dcb_config_tx_data_arbiter_82599(struct ixgbe_hw *hw,
211 u16 *refill,
212 u16 *max,
213 u8 *bwg_id,
214 u8 *prio_type,
215 u8 *prio_tc)
217 u32 reg;
218 u8 i;
221 * Disable the arbiter before changing parameters
222 * (always enable recycle mode; SP; arb delay)
224 reg = IXGBE_RTTPCS_TPPAC | IXGBE_RTTPCS_TPRM |
225 (IXGBE_RTTPCS_ARBD_DCB << IXGBE_RTTPCS_ARBD_SHIFT) |
226 IXGBE_RTTPCS_ARBDIS;
227 IXGBE_WRITE_REG(hw, IXGBE_RTTPCS, reg);
229 /* Map all traffic classes to their UP, 1 to 1 */
230 reg = 0;
231 for (i = 0; i < MAX_TRAFFIC_CLASS; i++)
232 reg |= (prio_tc[i] << (i * IXGBE_RTTUP2TC_UP_SHIFT));
233 IXGBE_WRITE_REG(hw, IXGBE_RTTUP2TC, reg);
235 /* Configure traffic class credits and priority */
236 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
237 reg = refill[i];
238 reg |= (u32)(max[i]) << IXGBE_RTTPT2C_MCL_SHIFT;
239 reg |= (u32)(bwg_id[i]) << IXGBE_RTTPT2C_BWG_SHIFT;
241 if (prio_type[i] == prio_group)
242 reg |= IXGBE_RTTPT2C_GSP;
244 if (prio_type[i] == prio_link)
245 reg |= IXGBE_RTTPT2C_LSP;
247 IXGBE_WRITE_REG(hw, IXGBE_RTTPT2C(i), reg);
251 * Configure Tx packet plane (recycle mode; SP; arb delay) and
252 * enable arbiter
254 reg = IXGBE_RTTPCS_TPPAC | IXGBE_RTTPCS_TPRM |
255 (IXGBE_RTTPCS_ARBD_DCB << IXGBE_RTTPCS_ARBD_SHIFT);
256 IXGBE_WRITE_REG(hw, IXGBE_RTTPCS, reg);
258 return 0;
262 * ixgbe_dcb_config_pfc_82599 - Configure priority flow control
263 * @hw: pointer to hardware structure
264 * @pfc_en: enabled pfc bitmask
266 * Configure Priority Flow Control (PFC) for each traffic class.
268 s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, u8 pfc_en)
270 u32 i, reg, rx_pba_size;
272 /* Configure PFC Tx thresholds per TC */
273 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
274 int enabled = pfc_en & (1 << i);
275 rx_pba_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i));
276 rx_pba_size >>= IXGBE_RXPBSIZE_SHIFT;
278 reg = (rx_pba_size - hw->fc.low_water) << 10;
280 if (enabled)
281 reg |= IXGBE_FCRTL_XONE;
282 IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), reg);
284 reg = (rx_pba_size - hw->fc.high_water) << 10;
285 if (enabled)
286 reg |= IXGBE_FCRTH_FCEN;
287 IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(i), reg);
290 if (pfc_en) {
291 /* Configure pause time (2 TCs per register) */
292 reg = hw->fc.pause_time | (hw->fc.pause_time << 16);
293 for (i = 0; i < (MAX_TRAFFIC_CLASS / 2); i++)
294 IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg);
296 /* Configure flow control refresh threshold value */
297 IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2);
300 reg = IXGBE_FCCFG_TFCE_PRIORITY;
301 IXGBE_WRITE_REG(hw, IXGBE_FCCFG, reg);
303 * Enable Receive PFC
304 * 82599 will always honor XOFF frames we receive when
305 * we are in PFC mode however X540 only honors enabled
306 * traffic classes.
308 reg = IXGBE_READ_REG(hw, IXGBE_MFLCN);
309 reg &= ~IXGBE_MFLCN_RFCE;
310 reg |= IXGBE_MFLCN_RPFCE | IXGBE_MFLCN_DPF;
312 if (hw->mac.type == ixgbe_mac_X540)
313 reg |= pfc_en << IXGBE_MFLCN_RPFCE_SHIFT;
315 IXGBE_WRITE_REG(hw, IXGBE_MFLCN, reg);
317 } else {
318 for (i = 0; i < MAX_TRAFFIC_CLASS; i++)
319 hw->mac.ops.fc_enable(hw, i);
322 return 0;
326 * ixgbe_dcb_config_tc_stats_82599 - Config traffic class statistics
327 * @hw: pointer to hardware structure
329 * Configure queue statistics registers, all queues belonging to same traffic
330 * class uses a single set of queue statistics counters.
332 static s32 ixgbe_dcb_config_tc_stats_82599(struct ixgbe_hw *hw)
334 u32 reg = 0;
335 u8 i = 0;
338 * Receive Queues stats setting
339 * 32 RQSMR registers, each configuring 4 queues.
340 * Set all 16 queues of each TC to the same stat
341 * with TC 'n' going to stat 'n'.
343 for (i = 0; i < 32; i++) {
344 reg = 0x01010101 * (i / 4);
345 IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), reg);
348 * Transmit Queues stats setting
349 * 32 TQSM registers, each controlling 4 queues.
350 * Set all queues of each TC to the same stat
351 * with TC 'n' going to stat 'n'.
352 * Tx queues are allocated non-uniformly to TCs:
353 * 32, 32, 16, 16, 8, 8, 8, 8.
355 for (i = 0; i < 32; i++) {
356 if (i < 8)
357 reg = 0x00000000;
358 else if (i < 16)
359 reg = 0x01010101;
360 else if (i < 20)
361 reg = 0x02020202;
362 else if (i < 24)
363 reg = 0x03030303;
364 else if (i < 26)
365 reg = 0x04040404;
366 else if (i < 28)
367 reg = 0x05050505;
368 else if (i < 30)
369 reg = 0x06060606;
370 else
371 reg = 0x07070707;
372 IXGBE_WRITE_REG(hw, IXGBE_TQSM(i), reg);
375 return 0;
379 * ixgbe_dcb_config_82599 - Configure general DCB parameters
380 * @hw: pointer to hardware structure
382 * Configure general DCB parameters.
384 static s32 ixgbe_dcb_config_82599(struct ixgbe_hw *hw)
386 u32 reg;
387 u32 q;
389 /* Disable the Tx desc arbiter so that MTQC can be changed */
390 reg = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
391 reg |= IXGBE_RTTDCS_ARBDIS;
392 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg);
394 /* Enable DCB for Rx with 8 TCs */
395 reg = IXGBE_READ_REG(hw, IXGBE_MRQC);
396 switch (reg & IXGBE_MRQC_MRQE_MASK) {
397 case 0:
398 case IXGBE_MRQC_RT4TCEN:
399 /* RSS disabled cases */
400 reg = (reg & ~IXGBE_MRQC_MRQE_MASK) | IXGBE_MRQC_RT8TCEN;
401 break;
402 case IXGBE_MRQC_RSSEN:
403 case IXGBE_MRQC_RTRSS4TCEN:
404 /* RSS enabled cases */
405 reg = (reg & ~IXGBE_MRQC_MRQE_MASK) | IXGBE_MRQC_RTRSS8TCEN;
406 break;
407 default:
408 /* Unsupported value, assume stale data, overwrite no RSS */
409 reg = (reg & ~IXGBE_MRQC_MRQE_MASK) | IXGBE_MRQC_RT8TCEN;
411 IXGBE_WRITE_REG(hw, IXGBE_MRQC, reg);
413 /* Enable DCB for Tx with 8 TCs */
414 reg = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ;
415 IXGBE_WRITE_REG(hw, IXGBE_MTQC, reg);
417 /* Disable drop for all queues */
418 for (q = 0; q < 128; q++)
419 IXGBE_WRITE_REG(hw, IXGBE_QDE, q << IXGBE_QDE_IDX_SHIFT);
421 /* Enable the Tx desc arbiter */
422 reg = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
423 reg &= ~IXGBE_RTTDCS_ARBDIS;
424 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg);
426 /* Enable Security TX Buffer IFG for DCB */
427 reg = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG);
428 reg |= IXGBE_SECTX_DCB;
429 IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, reg);
431 return 0;
435 * ixgbe_dcb_hw_config_82599 - Configure and enable DCB
436 * @hw: pointer to hardware structure
437 * @rx_pba: method to distribute packet buffer
438 * @refill: refill credits index by traffic class
439 * @max: max credits index by traffic class
440 * @bwg_id: bandwidth grouping indexed by traffic class
441 * @prio_type: priority type indexed by traffic class
442 * @pfc_en: enabled pfc bitmask
444 * Configure dcb settings and enable dcb mode.
446 s32 ixgbe_dcb_hw_config_82599(struct ixgbe_hw *hw,
447 u8 rx_pba, u8 pfc_en, u16 *refill,
448 u16 *max, u8 *bwg_id, u8 *prio_type, u8 *prio_tc)
450 ixgbe_dcb_config_packet_buffers_82599(hw, rx_pba);
451 ixgbe_dcb_config_82599(hw);
452 ixgbe_dcb_config_rx_arbiter_82599(hw, refill, max, bwg_id,
453 prio_type, prio_tc);
454 ixgbe_dcb_config_tx_desc_arbiter_82599(hw, refill, max,
455 bwg_id, prio_type);
456 ixgbe_dcb_config_tx_data_arbiter_82599(hw, refill, max,
457 bwg_id, prio_type, prio_tc);
458 ixgbe_dcb_config_pfc_82599(hw, pfc_en);
459 ixgbe_dcb_config_tc_stats_82599(hw);
461 return 0;