x86/amd-iommu: Add function to complete a tlb flush
[linux/fpc-iii.git] / drivers / net / ixgbe / ixgbe_dcb_82599.c
blobec8a252636d3b082af8428bc1bc9fab8c48c6ef6
1 /*******************************************************************************
3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2009 Intel Corporation.
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
22 Contact Information:
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26 *******************************************************************************/
28 #include "ixgbe.h"
29 #include "ixgbe_type.h"
30 #include "ixgbe_dcb.h"
31 #include "ixgbe_dcb_82599.h"
33 /**
34 * ixgbe_dcb_get_tc_stats_82599 - Returns status for each traffic class
35 * @hw: pointer to hardware structure
36 * @stats: pointer to statistics structure
37 * @tc_count: Number of elements in bwg_array.
39 * This function returns the status data for each of the Traffic Classes in use.
41 s32 ixgbe_dcb_get_tc_stats_82599(struct ixgbe_hw *hw,
42 struct ixgbe_hw_stats *stats,
43 u8 tc_count)
45 int tc;
47 if (tc_count > MAX_TRAFFIC_CLASS)
48 return DCB_ERR_PARAM;
49 /* Statistics pertaining to each traffic class */
50 for (tc = 0; tc < tc_count; tc++) {
51 /* Transmitted Packets */
52 stats->qptc[tc] += IXGBE_READ_REG(hw, IXGBE_QPTC(tc));
53 /* Transmitted Bytes */
54 stats->qbtc[tc] += IXGBE_READ_REG(hw, IXGBE_QBTC(tc));
55 /* Received Packets */
56 stats->qprc[tc] += IXGBE_READ_REG(hw, IXGBE_QPRC(tc));
57 /* Received Bytes */
58 stats->qbrc[tc] += IXGBE_READ_REG(hw, IXGBE_QBRC(tc));
61 return 0;
64 /**
65 * ixgbe_dcb_get_pfc_stats_82599 - Return CBFC status data
66 * @hw: pointer to hardware structure
67 * @stats: pointer to statistics structure
68 * @tc_count: Number of elements in bwg_array.
70 * This function returns the CBFC status data for each of the Traffic Classes.
72 s32 ixgbe_dcb_get_pfc_stats_82599(struct ixgbe_hw *hw,
73 struct ixgbe_hw_stats *stats,
74 u8 tc_count)
76 int tc;
78 if (tc_count > MAX_TRAFFIC_CLASS)
79 return DCB_ERR_PARAM;
80 for (tc = 0; tc < tc_count; tc++) {
81 /* Priority XOFF Transmitted */
82 stats->pxofftxc[tc] += IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(tc));
83 /* Priority XOFF Received */
84 stats->pxoffrxc[tc] += IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(tc));
87 return 0;
90 /**
91 * ixgbe_dcb_config_packet_buffers_82599 - Configure DCB packet buffers
92 * @hw: pointer to hardware structure
93 * @dcb_config: pointer to ixgbe_dcb_config structure
95 * Configure packet buffers for DCB mode.
97 s32 ixgbe_dcb_config_packet_buffers_82599(struct ixgbe_hw *hw,
98 struct ixgbe_dcb_config *dcb_config)
100 s32 ret_val = 0;
101 u32 value = IXGBE_RXPBSIZE_64KB;
102 u8 i = 0;
104 /* Setup Rx packet buffer sizes */
105 switch (dcb_config->rx_pba_cfg) {
106 case pba_80_48:
107 /* Setup the first four at 80KB */
108 value = IXGBE_RXPBSIZE_80KB;
109 for (; i < 4; i++)
110 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), value);
111 /* Setup the last four at 48KB...don't re-init i */
112 value = IXGBE_RXPBSIZE_48KB;
113 /* Fall Through */
114 case pba_equal:
115 default:
116 for (; i < IXGBE_MAX_PACKET_BUFFERS; i++)
117 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), value);
119 /* Setup Tx packet buffer sizes */
120 for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++) {
121 IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i),
122 IXGBE_TXPBSIZE_20KB);
123 IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i),
124 IXGBE_TXPBTHRESH_DCB);
126 break;
129 return ret_val;
133 * ixgbe_dcb_config_rx_arbiter_82599 - Config Rx Data arbiter
134 * @hw: pointer to hardware structure
135 * @dcb_config: pointer to ixgbe_dcb_config structure
137 * Configure Rx Packet Arbiter and credits for each traffic class.
139 s32 ixgbe_dcb_config_rx_arbiter_82599(struct ixgbe_hw *hw,
140 struct ixgbe_dcb_config *dcb_config)
142 struct tc_bw_alloc *p;
143 u32 reg = 0;
144 u32 credit_refill = 0;
145 u32 credit_max = 0;
146 u8 i = 0;
149 * Disable the arbiter before changing parameters
150 * (always enable recycle mode; WSP)
152 reg = IXGBE_RTRPCS_RRM | IXGBE_RTRPCS_RAC | IXGBE_RTRPCS_ARBDIS;
153 IXGBE_WRITE_REG(hw, IXGBE_RTRPCS, reg);
155 /* Map all traffic classes to their UP, 1 to 1 */
156 reg = 0;
157 for (i = 0; i < MAX_TRAFFIC_CLASS; i++)
158 reg |= (i << (i * IXGBE_RTRUP2TC_UP_SHIFT));
159 IXGBE_WRITE_REG(hw, IXGBE_RTRUP2TC, reg);
161 /* Configure traffic class credits and priority */
162 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
163 p = &dcb_config->tc_config[i].path[DCB_RX_CONFIG];
165 credit_refill = p->data_credits_refill;
166 credit_max = p->data_credits_max;
167 reg = credit_refill | (credit_max << IXGBE_RTRPT4C_MCL_SHIFT);
169 reg |= (u32)(p->bwg_id) << IXGBE_RTRPT4C_BWG_SHIFT;
171 if (p->prio_type == prio_link)
172 reg |= IXGBE_RTRPT4C_LSP;
174 IXGBE_WRITE_REG(hw, IXGBE_RTRPT4C(i), reg);
178 * Configure Rx packet plane (recycle mode; WSP) and
179 * enable arbiter
181 reg = IXGBE_RTRPCS_RRM | IXGBE_RTRPCS_RAC;
182 IXGBE_WRITE_REG(hw, IXGBE_RTRPCS, reg);
184 return 0;
188 * ixgbe_dcb_config_tx_desc_arbiter_82599 - Config Tx Desc. arbiter
189 * @hw: pointer to hardware structure
190 * @dcb_config: pointer to ixgbe_dcb_config structure
192 * Configure Tx Descriptor Arbiter and credits for each traffic class.
194 s32 ixgbe_dcb_config_tx_desc_arbiter_82599(struct ixgbe_hw *hw,
195 struct ixgbe_dcb_config *dcb_config)
197 struct tc_bw_alloc *p;
198 u32 reg, max_credits;
199 u8 i;
201 /* Clear the per-Tx queue credits; we use per-TC instead */
202 for (i = 0; i < 128; i++) {
203 IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, i);
204 IXGBE_WRITE_REG(hw, IXGBE_RTTDT1C, 0);
207 /* Configure traffic class credits and priority */
208 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
209 p = &dcb_config->tc_config[i].path[DCB_TX_CONFIG];
210 max_credits = dcb_config->tc_config[i].desc_credits_max;
211 reg = max_credits << IXGBE_RTTDT2C_MCL_SHIFT;
212 reg |= p->data_credits_refill;
213 reg |= (u32)(p->bwg_id) << IXGBE_RTTDT2C_BWG_SHIFT;
215 if (p->prio_type == prio_group)
216 reg |= IXGBE_RTTDT2C_GSP;
218 if (p->prio_type == prio_link)
219 reg |= IXGBE_RTTDT2C_LSP;
221 IXGBE_WRITE_REG(hw, IXGBE_RTTDT2C(i), reg);
225 * Configure Tx descriptor plane (recycle mode; WSP) and
226 * enable arbiter
228 reg = IXGBE_RTTDCS_TDPAC | IXGBE_RTTDCS_TDRM;
229 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg);
231 return 0;
235 * ixgbe_dcb_config_tx_data_arbiter_82599 - Config Tx Data arbiter
236 * @hw: pointer to hardware structure
237 * @dcb_config: pointer to ixgbe_dcb_config structure
239 * Configure Tx Packet Arbiter and credits for each traffic class.
241 s32 ixgbe_dcb_config_tx_data_arbiter_82599(struct ixgbe_hw *hw,
242 struct ixgbe_dcb_config *dcb_config)
244 struct tc_bw_alloc *p;
245 u32 reg;
246 u8 i;
249 * Disable the arbiter before changing parameters
250 * (always enable recycle mode; SP; arb delay)
252 reg = IXGBE_RTTPCS_TPPAC | IXGBE_RTTPCS_TPRM |
253 (IXGBE_RTTPCS_ARBD_DCB << IXGBE_RTTPCS_ARBD_SHIFT) |
254 IXGBE_RTTPCS_ARBDIS;
255 IXGBE_WRITE_REG(hw, IXGBE_RTTPCS, reg);
257 /* Map all traffic classes to their UP, 1 to 1 */
258 reg = 0;
259 for (i = 0; i < MAX_TRAFFIC_CLASS; i++)
260 reg |= (i << (i * IXGBE_RTTUP2TC_UP_SHIFT));
261 IXGBE_WRITE_REG(hw, IXGBE_RTTUP2TC, reg);
263 /* Configure traffic class credits and priority */
264 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
265 p = &dcb_config->tc_config[i].path[DCB_TX_CONFIG];
266 reg = p->data_credits_refill;
267 reg |= (u32)(p->data_credits_max) << IXGBE_RTTPT2C_MCL_SHIFT;
268 reg |= (u32)(p->bwg_id) << IXGBE_RTTPT2C_BWG_SHIFT;
270 if (p->prio_type == prio_group)
271 reg |= IXGBE_RTTPT2C_GSP;
273 if (p->prio_type == prio_link)
274 reg |= IXGBE_RTTPT2C_LSP;
276 IXGBE_WRITE_REG(hw, IXGBE_RTTPT2C(i), reg);
280 * Configure Tx packet plane (recycle mode; SP; arb delay) and
281 * enable arbiter
283 reg = IXGBE_RTTPCS_TPPAC | IXGBE_RTTPCS_TPRM |
284 (IXGBE_RTTPCS_ARBD_DCB << IXGBE_RTTPCS_ARBD_SHIFT);
285 IXGBE_WRITE_REG(hw, IXGBE_RTTPCS, reg);
287 return 0;
291 * ixgbe_dcb_config_pfc_82599 - Configure priority flow control
292 * @hw: pointer to hardware structure
293 * @dcb_config: pointer to ixgbe_dcb_config structure
295 * Configure Priority Flow Control (PFC) for each traffic class.
297 s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw,
298 struct ixgbe_dcb_config *dcb_config)
300 u32 i, reg, rx_pba_size;
302 /* If PFC is disabled globally then fall back to LFC. */
303 if (!dcb_config->pfc_mode_enable) {
304 for (i = 0; i < MAX_TRAFFIC_CLASS; i++)
305 hw->mac.ops.fc_enable(hw, i);
306 goto out;
309 /* Configure PFC Tx thresholds per TC */
310 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
311 if (dcb_config->rx_pba_cfg == pba_equal)
312 rx_pba_size = IXGBE_RXPBSIZE_64KB;
313 else
314 rx_pba_size = (i < 4) ? IXGBE_RXPBSIZE_80KB
315 : IXGBE_RXPBSIZE_48KB;
317 reg = ((rx_pba_size >> 5) & 0xFFE0);
318 if (dcb_config->tc_config[i].dcb_pfc == pfc_enabled_full ||
319 dcb_config->tc_config[i].dcb_pfc == pfc_enabled_tx)
320 reg |= IXGBE_FCRTL_XONE;
321 IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), reg);
323 reg = ((rx_pba_size >> 2) & 0xFFE0);
324 if (dcb_config->tc_config[i].dcb_pfc == pfc_enabled_full ||
325 dcb_config->tc_config[i].dcb_pfc == pfc_enabled_tx)
326 reg |= IXGBE_FCRTH_FCEN;
327 IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(i), reg);
330 /* Configure pause time (2 TCs per register) */
331 reg = hw->fc.pause_time | (hw->fc.pause_time << 16);
332 for (i = 0; i < (MAX_TRAFFIC_CLASS / 2); i++)
333 IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg);
335 /* Configure flow control refresh threshold value */
336 IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2);
338 /* Enable Transmit PFC */
339 reg = IXGBE_FCCFG_TFCE_PRIORITY;
340 IXGBE_WRITE_REG(hw, IXGBE_FCCFG, reg);
343 * Enable Receive PFC
344 * We will always honor XOFF frames we receive when
345 * we are in PFC mode.
347 reg = IXGBE_READ_REG(hw, IXGBE_MFLCN);
348 reg &= ~IXGBE_MFLCN_RFCE;
349 reg |= IXGBE_MFLCN_RPFCE;
350 IXGBE_WRITE_REG(hw, IXGBE_MFLCN, reg);
351 out:
352 return 0;
356 * ixgbe_dcb_config_tc_stats_82599 - Config traffic class statistics
357 * @hw: pointer to hardware structure
359 * Configure queue statistics registers, all queues belonging to same traffic
360 * class uses a single set of queue statistics counters.
362 s32 ixgbe_dcb_config_tc_stats_82599(struct ixgbe_hw *hw)
364 u32 reg = 0;
365 u8 i = 0;
368 * Receive Queues stats setting
369 * 32 RQSMR registers, each configuring 4 queues.
370 * Set all 16 queues of each TC to the same stat
371 * with TC 'n' going to stat 'n'.
373 for (i = 0; i < 32; i++) {
374 reg = 0x01010101 * (i / 4);
375 IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), reg);
378 * Transmit Queues stats setting
379 * 32 TQSM registers, each controlling 4 queues.
380 * Set all queues of each TC to the same stat
381 * with TC 'n' going to stat 'n'.
382 * Tx queues are allocated non-uniformly to TCs:
383 * 32, 32, 16, 16, 8, 8, 8, 8.
385 for (i = 0; i < 32; i++) {
386 if (i < 8)
387 reg = 0x00000000;
388 else if (i < 16)
389 reg = 0x01010101;
390 else if (i < 20)
391 reg = 0x02020202;
392 else if (i < 24)
393 reg = 0x03030303;
394 else if (i < 26)
395 reg = 0x04040404;
396 else if (i < 28)
397 reg = 0x05050505;
398 else if (i < 30)
399 reg = 0x06060606;
400 else
401 reg = 0x07070707;
402 IXGBE_WRITE_REG(hw, IXGBE_TQSM(i), reg);
405 return 0;
409 * ixgbe_dcb_config_82599 - Configure general DCB parameters
410 * @hw: pointer to hardware structure
411 * @dcb_config: pointer to ixgbe_dcb_config structure
413 * Configure general DCB parameters.
415 s32 ixgbe_dcb_config_82599(struct ixgbe_hw *hw)
417 u32 reg;
418 u32 q;
420 /* Disable the Tx desc arbiter so that MTQC can be changed */
421 reg = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
422 reg |= IXGBE_RTTDCS_ARBDIS;
423 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg);
425 /* Enable DCB for Rx with 8 TCs */
426 reg = IXGBE_READ_REG(hw, IXGBE_MRQC);
427 switch (reg & IXGBE_MRQC_MRQE_MASK) {
428 case 0:
429 case IXGBE_MRQC_RT4TCEN:
430 /* RSS disabled cases */
431 reg = (reg & ~IXGBE_MRQC_MRQE_MASK) | IXGBE_MRQC_RT8TCEN;
432 break;
433 case IXGBE_MRQC_RSSEN:
434 case IXGBE_MRQC_RTRSS4TCEN:
435 /* RSS enabled cases */
436 reg = (reg & ~IXGBE_MRQC_MRQE_MASK) | IXGBE_MRQC_RTRSS8TCEN;
437 break;
438 default:
439 /* Unsupported value, assume stale data, overwrite no RSS */
440 reg = (reg & ~IXGBE_MRQC_MRQE_MASK) | IXGBE_MRQC_RT8TCEN;
442 IXGBE_WRITE_REG(hw, IXGBE_MRQC, reg);
444 /* Enable DCB for Tx with 8 TCs */
445 reg = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ;
446 IXGBE_WRITE_REG(hw, IXGBE_MTQC, reg);
448 /* Disable drop for all queues */
449 for (q = 0; q < 128; q++)
450 IXGBE_WRITE_REG(hw, IXGBE_QDE, q << IXGBE_QDE_IDX_SHIFT);
452 /* Enable the Tx desc arbiter */
453 reg = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
454 reg &= ~IXGBE_RTTDCS_ARBDIS;
455 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg);
457 return 0;
461 * ixgbe_dcb_hw_config_82599 - Configure and enable DCB
462 * @hw: pointer to hardware structure
463 * @dcb_config: pointer to ixgbe_dcb_config structure
465 * Configure dcb settings and enable dcb mode.
467 s32 ixgbe_dcb_hw_config_82599(struct ixgbe_hw *hw,
468 struct ixgbe_dcb_config *dcb_config)
470 ixgbe_dcb_config_packet_buffers_82599(hw, dcb_config);
471 ixgbe_dcb_config_82599(hw);
472 ixgbe_dcb_config_rx_arbiter_82599(hw, dcb_config);
473 ixgbe_dcb_config_tx_desc_arbiter_82599(hw, dcb_config);
474 ixgbe_dcb_config_tx_data_arbiter_82599(hw, dcb_config);
475 ixgbe_dcb_config_pfc_82599(hw, dcb_config);
476 ixgbe_dcb_config_tc_stats_82599(hw);
478 return 0;