1 /*******************************************************************************
3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2013 Intel Corporation.
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26 *******************************************************************************/
29 #include <linux/if_ether.h>
30 #include <linux/gfp.h>
31 #include <linux/if_vlan.h>
32 #include <scsi/scsi_cmnd.h>
33 #include <scsi/scsi_device.h>
34 #include <scsi/fc/fc_fs.h>
35 #include <scsi/fc/fc_fcoe.h>
36 #include <scsi/libfc.h>
37 #include <scsi/libfcoe.h>
40 * ixgbe_fcoe_clear_ddp - clear the given ddp context
41 * @ddp: ptr to the ixgbe_fcoe_ddp
46 static inline void ixgbe_fcoe_clear_ddp(struct ixgbe_fcoe_ddp
*ddp
)
57 * ixgbe_fcoe_ddp_put - free the ddp context for a given xid
58 * @netdev: the corresponding net_device
59 * @xid: the xid that corresponding ddp will be freed
61 * This is the implementation of net_device_ops.ndo_fcoe_ddp_done
62 * and it is expected to be called by ULD, i.e., FCP layer of libfc
63 * to release the corresponding ddp context when the I/O is done.
65 * Returns : data length already ddp-ed in bytes
67 int ixgbe_fcoe_ddp_put(struct net_device
*netdev
, u16 xid
)
70 struct ixgbe_fcoe
*fcoe
;
71 struct ixgbe_adapter
*adapter
;
72 struct ixgbe_fcoe_ddp
*ddp
;
78 if (xid
>= IXGBE_FCOE_DDP_MAX
)
81 adapter
= netdev_priv(netdev
);
82 fcoe
= &adapter
->fcoe
;
83 ddp
= &fcoe
->ddp
[xid
];
88 /* if there an error, force to invalidate ddp context */
90 spin_lock_bh(&fcoe
->lock
);
91 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_FCFLT
, 0);
92 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_FCFLTRW
,
93 (xid
| IXGBE_FCFLTRW_WE
));
94 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_FCBUFF
, 0);
95 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_FCDMARW
,
96 (xid
| IXGBE_FCDMARW_WE
));
98 /* guaranteed to be invalidated after 100us */
99 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_FCDMARW
,
100 (xid
| IXGBE_FCDMARW_RE
));
101 fcbuff
= IXGBE_READ_REG(&adapter
->hw
, IXGBE_FCBUFF
);
102 spin_unlock_bh(&fcoe
->lock
);
103 if (fcbuff
& IXGBE_FCBUFF_VALID
)
107 dma_unmap_sg(&adapter
->pdev
->dev
, ddp
->sgl
, ddp
->sgc
,
110 dma_pool_free(ddp
->pool
, ddp
->udl
, ddp
->udp
);
114 ixgbe_fcoe_clear_ddp(ddp
);
121 * ixgbe_fcoe_ddp_setup - called to set up ddp context
122 * @netdev: the corresponding net_device
123 * @xid: the exchange id requesting ddp
124 * @sgl: the scatter-gather list for this request
125 * @sgc: the number of scatter-gather items
127 * Returns : 1 for success and 0 for no ddp
129 static int ixgbe_fcoe_ddp_setup(struct net_device
*netdev
, u16 xid
,
130 struct scatterlist
*sgl
, unsigned int sgc
,
133 struct ixgbe_adapter
*adapter
;
135 struct ixgbe_fcoe
*fcoe
;
136 struct ixgbe_fcoe_ddp
*ddp
;
137 struct ixgbe_fcoe_ddp_pool
*ddp_pool
;
138 struct scatterlist
*sg
;
139 unsigned int i
, j
, dmacount
;
141 static const unsigned int bufflen
= IXGBE_FCBUFF_MIN
;
142 unsigned int firstoff
= 0;
143 unsigned int lastsize
;
144 unsigned int thisoff
= 0;
145 unsigned int thislen
= 0;
146 u32 fcbuff
, fcdmarw
, fcfltrw
, fcrxctl
;
152 adapter
= netdev_priv(netdev
);
153 if (xid
>= IXGBE_FCOE_DDP_MAX
) {
154 e_warn(drv
, "xid=0x%x out-of-range\n", xid
);
158 /* no DDP if we are already down or resetting */
159 if (test_bit(__IXGBE_DOWN
, &adapter
->state
) ||
160 test_bit(__IXGBE_RESETTING
, &adapter
->state
))
163 fcoe
= &adapter
->fcoe
;
164 ddp
= &fcoe
->ddp
[xid
];
166 e_err(drv
, "xid 0x%x w/ non-null sgl=%p nents=%d\n",
167 xid
, ddp
->sgl
, ddp
->sgc
);
170 ixgbe_fcoe_clear_ddp(ddp
);
173 if (!fcoe
->ddp_pool
) {
174 e_warn(drv
, "No ddp_pool resources allocated\n");
178 ddp_pool
= per_cpu_ptr(fcoe
->ddp_pool
, get_cpu());
179 if (!ddp_pool
->pool
) {
180 e_warn(drv
, "xid=0x%x no ddp pool for fcoe\n", xid
);
184 /* setup dma from scsi command sgl */
185 dmacount
= dma_map_sg(&adapter
->pdev
->dev
, sgl
, sgc
, DMA_FROM_DEVICE
);
187 e_err(drv
, "xid 0x%x DMA map error\n", xid
);
191 /* alloc the udl from per cpu ddp pool */
192 ddp
->udl
= dma_pool_alloc(ddp_pool
->pool
, GFP_ATOMIC
, &ddp
->udp
);
194 e_err(drv
, "failed allocated ddp context\n");
195 goto out_noddp_unmap
;
197 ddp
->pool
= ddp_pool
->pool
;
202 for_each_sg(sgl
, sg
, dmacount
, i
) {
203 addr
= sg_dma_address(sg
);
204 len
= sg_dma_len(sg
);
206 /* max number of buffers allowed in one DDP context */
207 if (j
>= IXGBE_BUFFCNT_MAX
) {
212 /* get the offset of length of current buffer */
213 thisoff
= addr
& ((dma_addr_t
)bufflen
- 1);
214 thislen
= min((bufflen
- thisoff
), len
);
216 * all but the 1st buffer (j == 0)
217 * must be aligned on bufflen
219 if ((j
!= 0) && (thisoff
))
222 * all but the last buffer
223 * ((i == (dmacount - 1)) && (thislen == len))
224 * must end at bufflen
226 if (((i
!= (dmacount
- 1)) || (thislen
!= len
))
227 && ((thislen
+ thisoff
) != bufflen
))
230 ddp
->udl
[j
] = (u64
)(addr
- thisoff
);
231 /* only the first buffer may have none-zero offset */
239 /* only the last buffer may have non-full bufflen */
240 lastsize
= thisoff
+ thislen
;
243 * lastsize can not be buffer len.
244 * If it is then adding another buffer with lastsize = 1.
246 if (lastsize
== bufflen
) {
247 if (j
>= IXGBE_BUFFCNT_MAX
) {
248 ddp_pool
->noddp_ext_buff
++;
252 ddp
->udl
[j
] = (u64
)(fcoe
->extra_ddp_buffer_dma
);
258 fcbuff
= (IXGBE_FCBUFF_4KB
<< IXGBE_FCBUFF_BUFFSIZE_SHIFT
);
259 fcbuff
|= ((j
& 0xff) << IXGBE_FCBUFF_BUFFCNT_SHIFT
);
260 fcbuff
|= (firstoff
<< IXGBE_FCBUFF_OFFSET_SHIFT
);
261 /* Set WRCONTX bit to allow DDP for target */
263 fcbuff
|= (IXGBE_FCBUFF_WRCONTX
);
264 fcbuff
|= (IXGBE_FCBUFF_VALID
);
267 fcdmarw
|= IXGBE_FCDMARW_WE
;
268 fcdmarw
|= (lastsize
<< IXGBE_FCDMARW_LASTSIZE_SHIFT
);
271 fcfltrw
|= IXGBE_FCFLTRW_WE
;
273 /* program DMA context */
275 spin_lock_bh(&fcoe
->lock
);
277 /* turn on last frame indication for target mode as FCP_RSPtarget is
278 * supposed to send FCP_RSP when it is done. */
279 if (target_mode
&& !test_bit(__IXGBE_FCOE_TARGET
, &fcoe
->mode
)) {
280 set_bit(__IXGBE_FCOE_TARGET
, &fcoe
->mode
);
281 fcrxctl
= IXGBE_READ_REG(hw
, IXGBE_FCRXCTRL
);
282 fcrxctl
|= IXGBE_FCRXCTRL_LASTSEQH
;
283 IXGBE_WRITE_REG(hw
, IXGBE_FCRXCTRL
, fcrxctl
);
286 IXGBE_WRITE_REG(hw
, IXGBE_FCPTRL
, ddp
->udp
& DMA_BIT_MASK(32));
287 IXGBE_WRITE_REG(hw
, IXGBE_FCPTRH
, (u64
)ddp
->udp
>> 32);
288 IXGBE_WRITE_REG(hw
, IXGBE_FCBUFF
, fcbuff
);
289 IXGBE_WRITE_REG(hw
, IXGBE_FCDMARW
, fcdmarw
);
290 /* program filter context */
291 IXGBE_WRITE_REG(hw
, IXGBE_FCPARAM
, 0);
292 IXGBE_WRITE_REG(hw
, IXGBE_FCFLT
, IXGBE_FCFLT_VALID
);
293 IXGBE_WRITE_REG(hw
, IXGBE_FCFLTRW
, fcfltrw
);
295 spin_unlock_bh(&fcoe
->lock
);
300 dma_pool_free(ddp
->pool
, ddp
->udl
, ddp
->udp
);
301 ixgbe_fcoe_clear_ddp(ddp
);
304 dma_unmap_sg(&adapter
->pdev
->dev
, sgl
, sgc
, DMA_FROM_DEVICE
);
311 * ixgbe_fcoe_ddp_get - called to set up ddp context in initiator mode
312 * @netdev: the corresponding net_device
313 * @xid: the exchange id requesting ddp
314 * @sgl: the scatter-gather list for this request
315 * @sgc: the number of scatter-gather items
317 * This is the implementation of net_device_ops.ndo_fcoe_ddp_setup
318 * and is expected to be called from ULD, e.g., FCP layer of libfc
319 * to set up ddp for the corresponding xid of the given sglist for
320 * the corresponding I/O.
322 * Returns : 1 for success and 0 for no ddp
324 int ixgbe_fcoe_ddp_get(struct net_device
*netdev
, u16 xid
,
325 struct scatterlist
*sgl
, unsigned int sgc
)
327 return ixgbe_fcoe_ddp_setup(netdev
, xid
, sgl
, sgc
, 0);
331 * ixgbe_fcoe_ddp_target - called to set up ddp context in target mode
332 * @netdev: the corresponding net_device
333 * @xid: the exchange id requesting ddp
334 * @sgl: the scatter-gather list for this request
335 * @sgc: the number of scatter-gather items
337 * This is the implementation of net_device_ops.ndo_fcoe_ddp_target
338 * and is expected to be called from ULD, e.g., FCP layer of libfc
339 * to set up ddp for the corresponding xid of the given sglist for
340 * the corresponding I/O. The DDP in target mode is a write I/O request
341 * from the initiator.
343 * Returns : 1 for success and 0 for no ddp
345 int ixgbe_fcoe_ddp_target(struct net_device
*netdev
, u16 xid
,
346 struct scatterlist
*sgl
, unsigned int sgc
)
348 return ixgbe_fcoe_ddp_setup(netdev
, xid
, sgl
, sgc
, 1);
352 * ixgbe_fcoe_ddp - check ddp status and mark it done
353 * @adapter: ixgbe adapter
354 * @rx_desc: advanced rx descriptor
355 * @skb: the skb holding the received data
357 * This checks ddp status.
359 * Returns : < 0 indicates an error or not a FCiE ddp, 0 indicates
360 * not passing the skb to ULD, > 0 indicates is the length of data
363 int ixgbe_fcoe_ddp(struct ixgbe_adapter
*adapter
,
364 union ixgbe_adv_rx_desc
*rx_desc
,
368 struct ixgbe_fcoe
*fcoe
;
369 struct ixgbe_fcoe_ddp
*ddp
;
370 struct fc_frame_header
*fh
;
371 struct fcoe_crc_eof
*crc
;
372 __le32 fcerr
= ixgbe_test_staterr(rx_desc
, IXGBE_RXDADV_ERR_FCERR
);
377 if (fcerr
== cpu_to_le32(IXGBE_FCERR_BADCRC
))
378 skb
->ip_summed
= CHECKSUM_NONE
;
380 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
382 if (eth_hdr(skb
)->h_proto
== htons(ETH_P_8021Q
))
383 fh
= (struct fc_frame_header
*)(skb
->data
+
384 sizeof(struct vlan_hdr
) + sizeof(struct fcoe_hdr
));
386 fh
= (struct fc_frame_header
*)(skb
->data
+
387 sizeof(struct fcoe_hdr
));
389 fctl
= ntoh24(fh
->fh_f_ctl
);
390 if (fctl
& FC_FC_EX_CTX
)
391 xid
= be16_to_cpu(fh
->fh_ox_id
);
393 xid
= be16_to_cpu(fh
->fh_rx_id
);
395 if (xid
>= IXGBE_FCOE_DDP_MAX
)
398 fcoe
= &adapter
->fcoe
;
399 ddp
= &fcoe
->ddp
[xid
];
403 ddp_err
= ixgbe_test_staterr(rx_desc
, IXGBE_RXDADV_ERR_FCEOFE
|
404 IXGBE_RXDADV_ERR_FCERR
);
408 switch (ixgbe_test_staterr(rx_desc
, IXGBE_RXDADV_STAT_FCSTAT
)) {
409 /* return 0 to bypass going to ULD for DDPed data */
410 case __constant_cpu_to_le32(IXGBE_RXDADV_STAT_FCSTAT_DDP
):
411 /* update length of DDPed data */
412 ddp
->len
= le32_to_cpu(rx_desc
->wb
.lower
.hi_dword
.rss
);
415 /* unmap the sg list when FCPRSP is received */
416 case __constant_cpu_to_le32(IXGBE_RXDADV_STAT_FCSTAT_FCPRSP
):
417 dma_unmap_sg(&adapter
->pdev
->dev
, ddp
->sgl
,
418 ddp
->sgc
, DMA_FROM_DEVICE
);
423 /* if DDP length is present pass it through to ULD */
424 case __constant_cpu_to_le32(IXGBE_RXDADV_STAT_FCSTAT_NODDP
):
425 /* update length of DDPed data */
426 ddp
->len
= le32_to_cpu(rx_desc
->wb
.lower
.hi_dword
.rss
);
430 /* no match will return as an error */
431 case __constant_cpu_to_le32(IXGBE_RXDADV_STAT_FCSTAT_NOMTCH
):
436 /* In target mode, check the last data frame of the sequence.
437 * For DDP in target mode, data is already DDPed but the header
438 * indication of the last data frame ould allow is to tell if we
439 * got all the data and the ULP can send FCP_RSP back, as this is
440 * not a full fcoe frame, we fill the trailer here so it won't be
441 * dropped by the ULP stack.
443 if ((fh
->fh_r_ctl
== FC_RCTL_DD_SOL_DATA
) &&
444 (fctl
& FC_FC_END_SEQ
)) {
446 crc
= (struct fcoe_crc_eof
*)skb_put(skb
, sizeof(*crc
));
447 crc
->fcoe_eof
= FC_EOF_T
;
454 * ixgbe_fso - ixgbe FCoE Sequence Offload (FSO)
455 * @tx_ring: tx desc ring
456 * @first: first tx_buffer structure containing skb, tx_flags, and protocol
457 * @hdr_len: hdr_len to be returned
459 * This sets up large send offload for FCoE
461 * Returns : 0 indicates success, < 0 for error
463 int ixgbe_fso(struct ixgbe_ring
*tx_ring
,
464 struct ixgbe_tx_buffer
*first
,
467 struct sk_buff
*skb
= first
->skb
;
468 struct fc_frame_header
*fh
;
470 u32 fcoe_sof_eof
= 0;
474 if (skb_is_gso(skb
) && (skb_shinfo(skb
)->gso_type
!= SKB_GSO_FCOE
)) {
475 dev_err(tx_ring
->dev
, "Wrong gso type %d:expecting SKB_GSO_FCOE\n",
476 skb_shinfo(skb
)->gso_type
);
480 /* resets the header to point fcoe/fc */
481 skb_set_network_header(skb
, skb
->mac_len
);
482 skb_set_transport_header(skb
, skb
->mac_len
+
483 sizeof(struct fcoe_hdr
));
485 /* sets up SOF and ORIS */
486 sof
= ((struct fcoe_hdr
*)skb_network_header(skb
))->fcoe_sof
;
489 fcoe_sof_eof
= IXGBE_ADVTXD_FCOEF_ORIS
;
492 fcoe_sof_eof
= IXGBE_ADVTXD_FCOEF_SOF
|
493 IXGBE_ADVTXD_FCOEF_ORIS
;
498 fcoe_sof_eof
= IXGBE_ADVTXD_FCOEF_SOF
;
501 dev_warn(tx_ring
->dev
, "unknown sof = 0x%x\n", sof
);
505 /* the first byte of the last dword is EOF */
506 skb_copy_bits(skb
, skb
->len
- 4, &eof
, 1);
507 /* sets up EOF and ORIE */
510 fcoe_sof_eof
|= IXGBE_ADVTXD_FCOEF_EOF_N
;
515 fcoe_sof_eof
|= IXGBE_ADVTXD_FCOEF_EOF_N
|
516 IXGBE_ADVTXD_FCOEF_ORIE
;
518 fcoe_sof_eof
|= IXGBE_ADVTXD_FCOEF_EOF_T
;
521 fcoe_sof_eof
|= IXGBE_ADVTXD_FCOEF_EOF_NI
;
524 fcoe_sof_eof
|= IXGBE_ADVTXD_FCOEF_EOF_A
;
527 dev_warn(tx_ring
->dev
, "unknown eof = 0x%x\n", eof
);
531 /* sets up PARINC indicating data offset */
532 fh
= (struct fc_frame_header
*)skb_transport_header(skb
);
533 if (fh
->fh_f_ctl
[2] & FC_FC_REL_OFF
)
534 fcoe_sof_eof
|= IXGBE_ADVTXD_FCOEF_PARINC
;
536 /* include trailer in headlen as it is replicated per frame */
537 *hdr_len
= sizeof(struct fcoe_crc_eof
);
539 /* hdr_len includes fc_hdr if FCoE LSO is enabled */
540 if (skb_is_gso(skb
)) {
541 *hdr_len
+= skb_transport_offset(skb
) +
542 sizeof(struct fc_frame_header
);
543 /* update gso_segs and bytecount */
544 first
->gso_segs
= DIV_ROUND_UP(skb
->len
- *hdr_len
,
545 skb_shinfo(skb
)->gso_size
);
546 first
->bytecount
+= (first
->gso_segs
- 1) * *hdr_len
;
547 first
->tx_flags
|= IXGBE_TX_FLAGS_TSO
;
550 /* set flag indicating FCOE to ixgbe_tx_map call */
551 first
->tx_flags
|= IXGBE_TX_FLAGS_FCOE
| IXGBE_TX_FLAGS_CC
;
553 /* mss_l4len_id: use 0 for FSO as TSO, no need for L4LEN */
554 mss_l4len_idx
= skb_shinfo(skb
)->gso_size
<< IXGBE_ADVTXD_MSS_SHIFT
;
556 /* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */
557 vlan_macip_lens
= skb_transport_offset(skb
) +
558 sizeof(struct fc_frame_header
);
559 vlan_macip_lens
|= (skb_transport_offset(skb
) - 4)
560 << IXGBE_ADVTXD_MACLEN_SHIFT
;
561 vlan_macip_lens
|= first
->tx_flags
& IXGBE_TX_FLAGS_VLAN_MASK
;
563 /* write context desc */
564 ixgbe_tx_ctxtdesc(tx_ring
, vlan_macip_lens
, fcoe_sof_eof
,
565 IXGBE_ADVTXT_TUCMD_FCOE
, mss_l4len_idx
);
570 static void ixgbe_fcoe_dma_pool_free(struct ixgbe_fcoe
*fcoe
, unsigned int cpu
)
572 struct ixgbe_fcoe_ddp_pool
*ddp_pool
;
574 ddp_pool
= per_cpu_ptr(fcoe
->ddp_pool
, cpu
);
576 dma_pool_destroy(ddp_pool
->pool
);
577 ddp_pool
->pool
= NULL
;
580 static int ixgbe_fcoe_dma_pool_alloc(struct ixgbe_fcoe
*fcoe
,
584 struct ixgbe_fcoe_ddp_pool
*ddp_pool
;
585 struct dma_pool
*pool
;
588 snprintf(pool_name
, 32, "ixgbe_fcoe_ddp_%d", cpu
);
590 pool
= dma_pool_create(pool_name
, dev
, IXGBE_FCPTR_MAX
,
591 IXGBE_FCPTR_ALIGN
, PAGE_SIZE
);
595 ddp_pool
= per_cpu_ptr(fcoe
->ddp_pool
, cpu
);
596 ddp_pool
->pool
= pool
;
598 ddp_pool
->noddp_ext_buff
= 0;
604 * ixgbe_configure_fcoe - configures registers for fcoe at start
605 * @adapter: ptr to ixgbe adapter
607 * This sets up FCoE related registers
611 void ixgbe_configure_fcoe(struct ixgbe_adapter
*adapter
)
613 struct ixgbe_ring_feature
*fcoe
= &adapter
->ring_feature
[RING_F_FCOE
];
614 struct ixgbe_hw
*hw
= &adapter
->hw
;
615 int i
, fcoe_q
, fcoe_i
;
618 /* Minimal functionality for FCoE requires at least CRC offloads */
619 if (!(adapter
->netdev
->features
& NETIF_F_FCOE_CRC
))
622 /* Enable L2 EtherType filter for FCoE, needed for FCoE CRC and DDP */
623 etqf
= ETH_P_FCOE
| IXGBE_ETQF_FCOE
| IXGBE_ETQF_FILTER_EN
;
624 if (adapter
->flags
& IXGBE_FLAG_SRIOV_ENABLED
) {
625 etqf
|= IXGBE_ETQF_POOL_ENABLE
;
626 etqf
|= VMDQ_P(0) << IXGBE_ETQF_POOL_SHIFT
;
628 IXGBE_WRITE_REG(hw
, IXGBE_ETQF(IXGBE_ETQF_FILTER_FCOE
), etqf
);
629 IXGBE_WRITE_REG(hw
, IXGBE_ETQS(IXGBE_ETQF_FILTER_FCOE
), 0);
631 /* leave registers un-configured if FCoE is disabled */
632 if (!(adapter
->flags
& IXGBE_FLAG_FCOE_ENABLED
))
635 /* Use one or more Rx queues for FCoE by redirection table */
636 for (i
= 0; i
< IXGBE_FCRETA_SIZE
; i
++) {
637 fcoe_i
= fcoe
->offset
+ (i
% fcoe
->indices
);
638 fcoe_i
&= IXGBE_FCRETA_ENTRY_MASK
;
639 fcoe_q
= adapter
->rx_ring
[fcoe_i
]->reg_idx
;
640 IXGBE_WRITE_REG(hw
, IXGBE_FCRETA(i
), fcoe_q
);
642 IXGBE_WRITE_REG(hw
, IXGBE_FCRECTL
, IXGBE_FCRECTL_ENA
);
644 /* Enable L2 EtherType filter for FIP */
645 etqf
= ETH_P_FIP
| IXGBE_ETQF_FILTER_EN
;
646 if (adapter
->flags
& IXGBE_FLAG_SRIOV_ENABLED
) {
647 etqf
|= IXGBE_ETQF_POOL_ENABLE
;
648 etqf
|= VMDQ_P(0) << IXGBE_ETQF_POOL_SHIFT
;
650 IXGBE_WRITE_REG(hw
, IXGBE_ETQF(IXGBE_ETQF_FILTER_FIP
), etqf
);
652 /* Send FIP frames to the first FCoE queue */
653 fcoe_q
= adapter
->rx_ring
[fcoe
->offset
]->reg_idx
;
654 IXGBE_WRITE_REG(hw
, IXGBE_ETQS(IXGBE_ETQF_FILTER_FIP
),
655 IXGBE_ETQS_QUEUE_EN
|
656 (fcoe_q
<< IXGBE_ETQS_RX_QUEUE_SHIFT
));
658 /* Configure FCoE Rx control */
659 IXGBE_WRITE_REG(hw
, IXGBE_FCRXCTRL
,
660 IXGBE_FCRXCTRL_FCCRCBO
|
661 (FC_FCOE_VER
<< IXGBE_FCRXCTRL_FCOEVER_SHIFT
));
665 * ixgbe_free_fcoe_ddp_resources - release all fcoe ddp context resources
666 * @adapter : ixgbe adapter
668 * Cleans up outstanding ddp context resources
672 void ixgbe_free_fcoe_ddp_resources(struct ixgbe_adapter
*adapter
)
674 struct ixgbe_fcoe
*fcoe
= &adapter
->fcoe
;
677 /* do nothing if no DDP pools were allocated */
681 for (i
= 0; i
< IXGBE_FCOE_DDP_MAX
; i
++)
682 ixgbe_fcoe_ddp_put(adapter
->netdev
, i
);
684 for_each_possible_cpu(cpu
)
685 ixgbe_fcoe_dma_pool_free(fcoe
, cpu
);
687 dma_unmap_single(&adapter
->pdev
->dev
,
688 fcoe
->extra_ddp_buffer_dma
,
691 kfree(fcoe
->extra_ddp_buffer
);
693 fcoe
->extra_ddp_buffer
= NULL
;
694 fcoe
->extra_ddp_buffer_dma
= 0;
698 * ixgbe_setup_fcoe_ddp_resources - setup all fcoe ddp context resources
699 * @adapter: ixgbe adapter
701 * Sets up ddp context resouces
703 * Returns : 0 indicates success or -EINVAL on failure
705 int ixgbe_setup_fcoe_ddp_resources(struct ixgbe_adapter
*adapter
)
707 struct ixgbe_fcoe
*fcoe
= &adapter
->fcoe
;
708 struct device
*dev
= &adapter
->pdev
->dev
;
713 /* do nothing if no DDP pools were allocated */
717 /* Extra buffer to be shared by all DDPs for HW work around */
718 buffer
= kmalloc(IXGBE_FCBUFF_MIN
, GFP_ATOMIC
);
722 dma
= dma_map_single(dev
, buffer
, IXGBE_FCBUFF_MIN
, DMA_FROM_DEVICE
);
723 if (dma_mapping_error(dev
, dma
)) {
724 e_err(drv
, "failed to map extra DDP buffer\n");
729 fcoe
->extra_ddp_buffer
= buffer
;
730 fcoe
->extra_ddp_buffer_dma
= dma
;
732 /* allocate pci pool for each cpu */
733 for_each_possible_cpu(cpu
) {
734 int err
= ixgbe_fcoe_dma_pool_alloc(fcoe
, dev
, cpu
);
738 e_err(drv
, "failed to alloc DDP pool on cpu:%d\n", cpu
);
739 ixgbe_free_fcoe_ddp_resources(adapter
);
746 static int ixgbe_fcoe_ddp_enable(struct ixgbe_adapter
*adapter
)
748 struct ixgbe_fcoe
*fcoe
= &adapter
->fcoe
;
750 if (!(adapter
->flags
& IXGBE_FLAG_FCOE_CAPABLE
))
753 fcoe
->ddp_pool
= alloc_percpu(struct ixgbe_fcoe_ddp_pool
);
755 if (!fcoe
->ddp_pool
) {
756 e_err(drv
, "failed to allocate percpu DDP resources\n");
760 adapter
->netdev
->fcoe_ddp_xid
= IXGBE_FCOE_DDP_MAX
- 1;
765 static void ixgbe_fcoe_ddp_disable(struct ixgbe_adapter
*adapter
)
767 struct ixgbe_fcoe
*fcoe
= &adapter
->fcoe
;
769 adapter
->netdev
->fcoe_ddp_xid
= 0;
774 free_percpu(fcoe
->ddp_pool
);
775 fcoe
->ddp_pool
= NULL
;
779 * ixgbe_fcoe_enable - turn on FCoE offload feature
780 * @netdev: the corresponding netdev
782 * Turns on FCoE offload feature in 82599.
784 * Returns : 0 indicates success or -EINVAL on failure
786 int ixgbe_fcoe_enable(struct net_device
*netdev
)
788 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
789 struct ixgbe_fcoe
*fcoe
= &adapter
->fcoe
;
791 atomic_inc(&fcoe
->refcnt
);
793 if (!(adapter
->flags
& IXGBE_FLAG_FCOE_CAPABLE
))
796 if (adapter
->flags
& IXGBE_FLAG_FCOE_ENABLED
)
799 e_info(drv
, "Enabling FCoE offload features.\n");
801 if (adapter
->flags
& IXGBE_FLAG_SRIOV_ENABLED
)
802 e_warn(probe
, "Enabling FCoE on PF will disable legacy VFs\n");
804 if (netif_running(netdev
))
805 netdev
->netdev_ops
->ndo_stop(netdev
);
807 /* Allocate per CPU memory to track DDP pools */
808 ixgbe_fcoe_ddp_enable(adapter
);
810 /* enable FCoE and notify stack */
811 adapter
->flags
|= IXGBE_FLAG_FCOE_ENABLED
;
812 netdev
->features
|= NETIF_F_FCOE_MTU
;
813 netdev_features_change(netdev
);
815 /* release existing queues and reallocate them */
816 ixgbe_clear_interrupt_scheme(adapter
);
817 ixgbe_init_interrupt_scheme(adapter
);
819 if (netif_running(netdev
))
820 netdev
->netdev_ops
->ndo_open(netdev
);
826 * ixgbe_fcoe_disable - turn off FCoE offload feature
827 * @netdev: the corresponding netdev
829 * Turns off FCoE offload feature in 82599.
831 * Returns : 0 indicates success or -EINVAL on failure
833 int ixgbe_fcoe_disable(struct net_device
*netdev
)
835 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
837 if (!atomic_dec_and_test(&adapter
->fcoe
.refcnt
))
840 if (!(adapter
->flags
& IXGBE_FLAG_FCOE_ENABLED
))
843 e_info(drv
, "Disabling FCoE offload features.\n");
844 if (netif_running(netdev
))
845 netdev
->netdev_ops
->ndo_stop(netdev
);
847 /* Free per CPU memory to track DDP pools */
848 ixgbe_fcoe_ddp_disable(adapter
);
850 /* disable FCoE and notify stack */
851 adapter
->flags
&= ~IXGBE_FLAG_FCOE_ENABLED
;
852 netdev
->features
&= ~NETIF_F_FCOE_MTU
;
854 netdev_features_change(netdev
);
856 /* release existing queues and reallocate them */
857 ixgbe_clear_interrupt_scheme(adapter
);
858 ixgbe_init_interrupt_scheme(adapter
);
860 if (netif_running(netdev
))
861 netdev
->netdev_ops
->ndo_open(netdev
);
867 * ixgbe_fcoe_get_wwn - get world wide name for the node or the port
868 * @netdev : ixgbe adapter
869 * @wwn : the world wide name
870 * @type: the type of world wide name
872 * Returns the node or port world wide name if both the prefix and the san
873 * mac address are valid, then the wwn is formed based on the NAA-2 for
874 * IEEE Extended name identifier (ref. to T10 FC-LS Spec., Sec. 15.3).
876 * Returns : 0 on success
878 int ixgbe_fcoe_get_wwn(struct net_device
*netdev
, u64
*wwn
, int type
)
882 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
883 struct ixgbe_mac_info
*mac
= &adapter
->hw
.mac
;
886 case NETDEV_FCOE_WWNN
:
887 prefix
= mac
->wwnn_prefix
;
889 case NETDEV_FCOE_WWPN
:
890 prefix
= mac
->wwpn_prefix
;
896 if ((prefix
!= 0xffff) &&
897 is_valid_ether_addr(mac
->san_addr
)) {
898 *wwn
= ((u64
) prefix
<< 48) |
899 ((u64
) mac
->san_addr
[0] << 40) |
900 ((u64
) mac
->san_addr
[1] << 32) |
901 ((u64
) mac
->san_addr
[2] << 24) |
902 ((u64
) mac
->san_addr
[3] << 16) |
903 ((u64
) mac
->san_addr
[4] << 8) |
904 ((u64
) mac
->san_addr
[5]);
911 * ixgbe_fcoe_get_hbainfo - get FCoE HBA information
912 * @netdev : ixgbe adapter
913 * @info : HBA information
915 * Returns ixgbe HBA information
917 * Returns : 0 on success
919 int ixgbe_fcoe_get_hbainfo(struct net_device
*netdev
,
920 struct netdev_fcoe_hbainfo
*info
)
922 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
923 struct ixgbe_hw
*hw
= &adapter
->hw
;
930 /* Don't return information on unsupported devices */
931 if (hw
->mac
.type
!= ixgbe_mac_82599EB
&&
932 hw
->mac
.type
!= ixgbe_mac_X540
)
936 snprintf(info
->manufacturer
, sizeof(info
->manufacturer
),
937 "Intel Corporation");
941 /* Get the PCI-e Device Serial Number Capability */
942 pos
= pci_find_ext_capability(adapter
->pdev
, PCI_EXT_CAP_ID_DSN
);
945 for (i
= 0; i
< 8; i
++)
946 pci_read_config_byte(adapter
->pdev
, pos
+ i
, &buf
[i
]);
948 snprintf(info
->serial_number
, sizeof(info
->serial_number
),
949 "%02X%02X%02X%02X%02X%02X%02X%02X",
950 buf
[7], buf
[6], buf
[5], buf
[4],
951 buf
[3], buf
[2], buf
[1], buf
[0]);
953 snprintf(info
->serial_number
, sizeof(info
->serial_number
),
956 /* Hardware Version */
957 snprintf(info
->hardware_version
,
958 sizeof(info
->hardware_version
),
959 "Rev %d", hw
->revision_id
);
960 /* Driver Name/Version */
961 snprintf(info
->driver_version
,
962 sizeof(info
->driver_version
),
965 ixgbe_driver_version
);
966 /* Firmware Version */
967 snprintf(info
->firmware_version
,
968 sizeof(info
->firmware_version
),
970 (adapter
->eeprom_verh
<< 16) |
971 adapter
->eeprom_verl
);
974 if (hw
->mac
.type
== ixgbe_mac_82599EB
) {
975 snprintf(info
->model
,
979 snprintf(info
->model
,
984 /* Model Description */
985 snprintf(info
->model_description
,
986 sizeof(info
->model_description
),
988 ixgbe_default_device_descr
);
994 * ixgbe_fcoe_get_tc - get the current TC that fcoe is mapped to
995 * @adapter - pointer to the device adapter structure
997 * Return : TC that FCoE is mapped to
999 u8
ixgbe_fcoe_get_tc(struct ixgbe_adapter
*adapter
)
1001 #ifdef CONFIG_IXGBE_DCB
1002 return netdev_get_prio_tc_map(adapter
->netdev
, adapter
->fcoe
.up
);