1 /*******************************************************************************
3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2014 Intel Corporation.
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
23 Linux NICS <linux.nics@intel.com>
24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *******************************************************************************/
30 #include <linux/if_ether.h>
31 #include <linux/gfp.h>
32 #include <linux/if_vlan.h>
33 #include <scsi/scsi_cmnd.h>
34 #include <scsi/scsi_device.h>
35 #include <scsi/fc/fc_fs.h>
36 #include <scsi/fc/fc_fcoe.h>
37 #include <scsi/libfc.h>
38 #include <scsi/libfcoe.h>
41 * ixgbe_fcoe_clear_ddp - clear the given ddp context
42 * @ddp: ptr to the ixgbe_fcoe_ddp
47 static inline void ixgbe_fcoe_clear_ddp(struct ixgbe_fcoe_ddp
*ddp
)
58 * ixgbe_fcoe_ddp_put - free the ddp context for a given xid
59 * @netdev: the corresponding net_device
60 * @xid: the xid that corresponding ddp will be freed
62 * This is the implementation of net_device_ops.ndo_fcoe_ddp_done
63 * and it is expected to be called by ULD, i.e., FCP layer of libfc
64 * to release the corresponding ddp context when the I/O is done.
66 * Returns : data length already ddp-ed in bytes
68 int ixgbe_fcoe_ddp_put(struct net_device
*netdev
, u16 xid
)
71 struct ixgbe_fcoe
*fcoe
;
72 struct ixgbe_adapter
*adapter
;
73 struct ixgbe_fcoe_ddp
*ddp
;
80 if (xid
>= netdev
->fcoe_ddp_xid
)
83 adapter
= netdev_priv(netdev
);
84 fcoe
= &adapter
->fcoe
;
85 ddp
= &fcoe
->ddp
[xid
];
91 /* if no error then skip ddp context invalidation */
95 if (hw
->mac
.type
== ixgbe_mac_X550
) {
96 /* X550 does not require DDP FCoE lock */
98 IXGBE_WRITE_REG(hw
, IXGBE_FCDFC(0, xid
), 0);
99 IXGBE_WRITE_REG(hw
, IXGBE_FCDFC(3, xid
),
100 (xid
| IXGBE_FCFLTRW_WE
));
103 IXGBE_WRITE_REG(hw
, IXGBE_FCDDC(2, xid
), 0);
105 /* program FCDMARW */
106 IXGBE_WRITE_REG(hw
, IXGBE_FCDDC(3, xid
),
107 (xid
| IXGBE_FCDMARW_WE
));
109 /* read FCBUFF to check context invalidated */
110 IXGBE_WRITE_REG(hw
, IXGBE_FCDDC(3, xid
),
111 (xid
| IXGBE_FCDMARW_RE
));
112 fcbuff
= IXGBE_READ_REG(hw
, IXGBE_FCDDC(2, xid
));
114 /* other hardware requires DDP FCoE lock */
115 spin_lock_bh(&fcoe
->lock
);
116 IXGBE_WRITE_REG(hw
, IXGBE_FCFLT
, 0);
117 IXGBE_WRITE_REG(hw
, IXGBE_FCFLTRW
,
118 (xid
| IXGBE_FCFLTRW_WE
));
119 IXGBE_WRITE_REG(hw
, IXGBE_FCBUFF
, 0);
120 IXGBE_WRITE_REG(hw
, IXGBE_FCDMARW
,
121 (xid
| IXGBE_FCDMARW_WE
));
123 /* guaranteed to be invalidated after 100us */
124 IXGBE_WRITE_REG(hw
, IXGBE_FCDMARW
,
125 (xid
| IXGBE_FCDMARW_RE
));
126 fcbuff
= IXGBE_READ_REG(hw
, IXGBE_FCBUFF
);
127 spin_unlock_bh(&fcoe
->lock
);
130 if (fcbuff
& IXGBE_FCBUFF_VALID
)
131 usleep_range(100, 150);
135 dma_unmap_sg(&adapter
->pdev
->dev
, ddp
->sgl
, ddp
->sgc
,
138 dma_pool_free(ddp
->pool
, ddp
->udl
, ddp
->udp
);
142 ixgbe_fcoe_clear_ddp(ddp
);
148 * ixgbe_fcoe_ddp_setup - called to set up ddp context
149 * @netdev: the corresponding net_device
150 * @xid: the exchange id requesting ddp
151 * @sgl: the scatter-gather list for this request
152 * @sgc: the number of scatter-gather items
154 * Returns : 1 for success and 0 for no ddp
156 static int ixgbe_fcoe_ddp_setup(struct net_device
*netdev
, u16 xid
,
157 struct scatterlist
*sgl
, unsigned int sgc
,
160 struct ixgbe_adapter
*adapter
;
162 struct ixgbe_fcoe
*fcoe
;
163 struct ixgbe_fcoe_ddp
*ddp
;
164 struct ixgbe_fcoe_ddp_pool
*ddp_pool
;
165 struct scatterlist
*sg
;
166 unsigned int i
, j
, dmacount
;
168 static const unsigned int bufflen
= IXGBE_FCBUFF_MIN
;
169 unsigned int firstoff
= 0;
170 unsigned int lastsize
;
171 unsigned int thisoff
= 0;
172 unsigned int thislen
= 0;
173 u32 fcbuff
, fcdmarw
, fcfltrw
, fcrxctl
;
179 adapter
= netdev_priv(netdev
);
180 if (xid
>= netdev
->fcoe_ddp_xid
) {
181 e_warn(drv
, "xid=0x%x out-of-range\n", xid
);
185 /* no DDP if we are already down or resetting */
186 if (test_bit(__IXGBE_DOWN
, &adapter
->state
) ||
187 test_bit(__IXGBE_RESETTING
, &adapter
->state
))
190 fcoe
= &adapter
->fcoe
;
191 ddp
= &fcoe
->ddp
[xid
];
193 e_err(drv
, "xid 0x%x w/ non-null sgl=%p nents=%d\n",
194 xid
, ddp
->sgl
, ddp
->sgc
);
197 ixgbe_fcoe_clear_ddp(ddp
);
200 if (!fcoe
->ddp_pool
) {
201 e_warn(drv
, "No ddp_pool resources allocated\n");
205 ddp_pool
= per_cpu_ptr(fcoe
->ddp_pool
, get_cpu());
206 if (!ddp_pool
->pool
) {
207 e_warn(drv
, "xid=0x%x no ddp pool for fcoe\n", xid
);
211 /* setup dma from scsi command sgl */
212 dmacount
= dma_map_sg(&adapter
->pdev
->dev
, sgl
, sgc
, DMA_FROM_DEVICE
);
214 e_err(drv
, "xid 0x%x DMA map error\n", xid
);
218 /* alloc the udl from per cpu ddp pool */
219 ddp
->udl
= dma_pool_alloc(ddp_pool
->pool
, GFP_ATOMIC
, &ddp
->udp
);
221 e_err(drv
, "failed allocated ddp context\n");
222 goto out_noddp_unmap
;
224 ddp
->pool
= ddp_pool
->pool
;
229 for_each_sg(sgl
, sg
, dmacount
, i
) {
230 addr
= sg_dma_address(sg
);
231 len
= sg_dma_len(sg
);
233 /* max number of buffers allowed in one DDP context */
234 if (j
>= IXGBE_BUFFCNT_MAX
) {
239 /* get the offset of length of current buffer */
240 thisoff
= addr
& ((dma_addr_t
)bufflen
- 1);
241 thislen
= min((bufflen
- thisoff
), len
);
243 * all but the 1st buffer (j == 0)
244 * must be aligned on bufflen
246 if ((j
!= 0) && (thisoff
))
249 * all but the last buffer
250 * ((i == (dmacount - 1)) && (thislen == len))
251 * must end at bufflen
253 if (((i
!= (dmacount
- 1)) || (thislen
!= len
))
254 && ((thislen
+ thisoff
) != bufflen
))
257 ddp
->udl
[j
] = (u64
)(addr
- thisoff
);
258 /* only the first buffer may have none-zero offset */
266 /* only the last buffer may have non-full bufflen */
267 lastsize
= thisoff
+ thislen
;
270 * lastsize can not be buffer len.
271 * If it is then adding another buffer with lastsize = 1.
273 if (lastsize
== bufflen
) {
274 if (j
>= IXGBE_BUFFCNT_MAX
) {
275 ddp_pool
->noddp_ext_buff
++;
279 ddp
->udl
[j
] = (u64
)(fcoe
->extra_ddp_buffer_dma
);
285 fcbuff
= (IXGBE_FCBUFF_4KB
<< IXGBE_FCBUFF_BUFFSIZE_SHIFT
);
286 fcbuff
|= ((j
& 0xff) << IXGBE_FCBUFF_BUFFCNT_SHIFT
);
287 fcbuff
|= (firstoff
<< IXGBE_FCBUFF_OFFSET_SHIFT
);
288 /* Set WRCONTX bit to allow DDP for target */
290 fcbuff
|= (IXGBE_FCBUFF_WRCONTX
);
291 fcbuff
|= (IXGBE_FCBUFF_VALID
);
294 fcdmarw
|= IXGBE_FCDMARW_WE
;
295 fcdmarw
|= (lastsize
<< IXGBE_FCDMARW_LASTSIZE_SHIFT
);
298 fcfltrw
|= IXGBE_FCFLTRW_WE
;
300 /* program DMA context */
303 /* turn on last frame indication for target mode as FCP_RSPtarget is
304 * supposed to send FCP_RSP when it is done. */
305 if (target_mode
&& !test_bit(__IXGBE_FCOE_TARGET
, &fcoe
->mode
)) {
306 set_bit(__IXGBE_FCOE_TARGET
, &fcoe
->mode
);
307 fcrxctl
= IXGBE_READ_REG(hw
, IXGBE_FCRXCTRL
);
308 fcrxctl
|= IXGBE_FCRXCTRL_LASTSEQH
;
309 IXGBE_WRITE_REG(hw
, IXGBE_FCRXCTRL
, fcrxctl
);
312 if (hw
->mac
.type
== ixgbe_mac_X550
) {
313 /* X550 does not require DDP lock */
315 IXGBE_WRITE_REG(hw
, IXGBE_FCDDC(0, xid
),
316 ddp
->udp
& DMA_BIT_MASK(32));
317 IXGBE_WRITE_REG(hw
, IXGBE_FCDDC(1, xid
), (u64
)ddp
->udp
>> 32);
318 IXGBE_WRITE_REG(hw
, IXGBE_FCDDC(2, xid
), fcbuff
);
319 IXGBE_WRITE_REG(hw
, IXGBE_FCDDC(3, xid
), fcdmarw
);
320 /* program filter context */
321 IXGBE_WRITE_REG(hw
, IXGBE_FCDFC(0, xid
), IXGBE_FCFLT_VALID
);
322 IXGBE_WRITE_REG(hw
, IXGBE_FCDFC(1, xid
), 0);
323 IXGBE_WRITE_REG(hw
, IXGBE_FCDFC(3, xid
), fcfltrw
);
325 /* DDP lock for indirect DDP context access */
326 spin_lock_bh(&fcoe
->lock
);
328 IXGBE_WRITE_REG(hw
, IXGBE_FCPTRL
, ddp
->udp
& DMA_BIT_MASK(32));
329 IXGBE_WRITE_REG(hw
, IXGBE_FCPTRH
, (u64
)ddp
->udp
>> 32);
330 IXGBE_WRITE_REG(hw
, IXGBE_FCBUFF
, fcbuff
);
331 IXGBE_WRITE_REG(hw
, IXGBE_FCDMARW
, fcdmarw
);
332 /* program filter context */
333 IXGBE_WRITE_REG(hw
, IXGBE_FCPARAM
, 0);
334 IXGBE_WRITE_REG(hw
, IXGBE_FCFLT
, IXGBE_FCFLT_VALID
);
335 IXGBE_WRITE_REG(hw
, IXGBE_FCFLTRW
, fcfltrw
);
337 spin_unlock_bh(&fcoe
->lock
);
343 dma_pool_free(ddp
->pool
, ddp
->udl
, ddp
->udp
);
344 ixgbe_fcoe_clear_ddp(ddp
);
347 dma_unmap_sg(&adapter
->pdev
->dev
, sgl
, sgc
, DMA_FROM_DEVICE
);
354 * ixgbe_fcoe_ddp_get - called to set up ddp context in initiator mode
355 * @netdev: the corresponding net_device
356 * @xid: the exchange id requesting ddp
357 * @sgl: the scatter-gather list for this request
358 * @sgc: the number of scatter-gather items
360 * This is the implementation of net_device_ops.ndo_fcoe_ddp_setup
361 * and is expected to be called from ULD, e.g., FCP layer of libfc
362 * to set up ddp for the corresponding xid of the given sglist for
363 * the corresponding I/O.
365 * Returns : 1 for success and 0 for no ddp
367 int ixgbe_fcoe_ddp_get(struct net_device
*netdev
, u16 xid
,
368 struct scatterlist
*sgl
, unsigned int sgc
)
370 return ixgbe_fcoe_ddp_setup(netdev
, xid
, sgl
, sgc
, 0);
374 * ixgbe_fcoe_ddp_target - called to set up ddp context in target mode
375 * @netdev: the corresponding net_device
376 * @xid: the exchange id requesting ddp
377 * @sgl: the scatter-gather list for this request
378 * @sgc: the number of scatter-gather items
380 * This is the implementation of net_device_ops.ndo_fcoe_ddp_target
381 * and is expected to be called from ULD, e.g., FCP layer of libfc
382 * to set up ddp for the corresponding xid of the given sglist for
383 * the corresponding I/O. The DDP in target mode is a write I/O request
384 * from the initiator.
386 * Returns : 1 for success and 0 for no ddp
388 int ixgbe_fcoe_ddp_target(struct net_device
*netdev
, u16 xid
,
389 struct scatterlist
*sgl
, unsigned int sgc
)
391 return ixgbe_fcoe_ddp_setup(netdev
, xid
, sgl
, sgc
, 1);
395 * ixgbe_fcoe_ddp - check ddp status and mark it done
396 * @adapter: ixgbe adapter
397 * @rx_desc: advanced rx descriptor
398 * @skb: the skb holding the received data
400 * This checks ddp status.
402 * Returns : < 0 indicates an error or not a FCiE ddp, 0 indicates
403 * not passing the skb to ULD, > 0 indicates is the length of data
406 int ixgbe_fcoe_ddp(struct ixgbe_adapter
*adapter
,
407 union ixgbe_adv_rx_desc
*rx_desc
,
411 struct ixgbe_fcoe
*fcoe
;
412 struct ixgbe_fcoe_ddp
*ddp
;
413 struct fc_frame_header
*fh
;
414 struct fcoe_crc_eof
*crc
;
415 __le32 fcerr
= ixgbe_test_staterr(rx_desc
, IXGBE_RXDADV_ERR_FCERR
);
421 if (fcerr
== cpu_to_le32(IXGBE_FCERR_BADCRC
))
422 skb
->ip_summed
= CHECKSUM_NONE
;
424 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
426 if (eth_hdr(skb
)->h_proto
== htons(ETH_P_8021Q
))
427 fh
= (struct fc_frame_header
*)(skb
->data
+
428 sizeof(struct vlan_hdr
) + sizeof(struct fcoe_hdr
));
430 fh
= (struct fc_frame_header
*)(skb
->data
+
431 sizeof(struct fcoe_hdr
));
433 fctl
= ntoh24(fh
->fh_f_ctl
);
434 if (fctl
& FC_FC_EX_CTX
)
435 xid
= be16_to_cpu(fh
->fh_ox_id
);
437 xid
= be16_to_cpu(fh
->fh_rx_id
);
439 ddp_max
= IXGBE_FCOE_DDP_MAX
;
440 /* X550 has different DDP Max limit */
441 if (adapter
->hw
.mac
.type
== ixgbe_mac_X550
)
442 ddp_max
= IXGBE_FCOE_DDP_MAX_X550
;
446 fcoe
= &adapter
->fcoe
;
447 ddp
= &fcoe
->ddp
[xid
];
451 ddp_err
= ixgbe_test_staterr(rx_desc
, IXGBE_RXDADV_ERR_FCEOFE
|
452 IXGBE_RXDADV_ERR_FCERR
);
456 switch (ixgbe_test_staterr(rx_desc
, IXGBE_RXDADV_STAT_FCSTAT
)) {
457 /* return 0 to bypass going to ULD for DDPed data */
458 case cpu_to_le32(IXGBE_RXDADV_STAT_FCSTAT_DDP
):
459 /* update length of DDPed data */
460 ddp
->len
= le32_to_cpu(rx_desc
->wb
.lower
.hi_dword
.rss
);
463 /* unmap the sg list when FCPRSP is received */
464 case cpu_to_le32(IXGBE_RXDADV_STAT_FCSTAT_FCPRSP
):
465 dma_unmap_sg(&adapter
->pdev
->dev
, ddp
->sgl
,
466 ddp
->sgc
, DMA_FROM_DEVICE
);
471 /* if DDP length is present pass it through to ULD */
472 case cpu_to_le32(IXGBE_RXDADV_STAT_FCSTAT_NODDP
):
473 /* update length of DDPed data */
474 ddp
->len
= le32_to_cpu(rx_desc
->wb
.lower
.hi_dword
.rss
);
478 /* no match will return as an error */
479 case cpu_to_le32(IXGBE_RXDADV_STAT_FCSTAT_NOMTCH
):
484 /* In target mode, check the last data frame of the sequence.
485 * For DDP in target mode, data is already DDPed but the header
486 * indication of the last data frame ould allow is to tell if we
487 * got all the data and the ULP can send FCP_RSP back, as this is
488 * not a full fcoe frame, we fill the trailer here so it won't be
489 * dropped by the ULP stack.
491 if ((fh
->fh_r_ctl
== FC_RCTL_DD_SOL_DATA
) &&
492 (fctl
& FC_FC_END_SEQ
)) {
494 crc
= (struct fcoe_crc_eof
*)skb_put(skb
, sizeof(*crc
));
495 crc
->fcoe_eof
= FC_EOF_T
;
502 * ixgbe_fso - ixgbe FCoE Sequence Offload (FSO)
503 * @tx_ring: tx desc ring
504 * @first: first tx_buffer structure containing skb, tx_flags, and protocol
505 * @hdr_len: hdr_len to be returned
507 * This sets up large send offload for FCoE
509 * Returns : 0 indicates success, < 0 for error
511 int ixgbe_fso(struct ixgbe_ring
*tx_ring
,
512 struct ixgbe_tx_buffer
*first
,
515 struct sk_buff
*skb
= first
->skb
;
516 struct fc_frame_header
*fh
;
518 u32 fcoe_sof_eof
= 0;
520 u32 type_tucmd
= IXGBE_ADVTXT_TUCMD_FCOE
;
523 if (skb_is_gso(skb
) && (skb_shinfo(skb
)->gso_type
!= SKB_GSO_FCOE
)) {
524 dev_err(tx_ring
->dev
, "Wrong gso type %d:expecting SKB_GSO_FCOE\n",
525 skb_shinfo(skb
)->gso_type
);
529 /* resets the header to point fcoe/fc */
530 skb_set_network_header(skb
, skb
->mac_len
);
531 skb_set_transport_header(skb
, skb
->mac_len
+
532 sizeof(struct fcoe_hdr
));
534 /* sets up SOF and ORIS */
535 sof
= ((struct fcoe_hdr
*)skb_network_header(skb
))->fcoe_sof
;
538 fcoe_sof_eof
= IXGBE_ADVTXD_FCOEF_ORIS
;
541 fcoe_sof_eof
= IXGBE_ADVTXD_FCOEF_SOF
|
542 IXGBE_ADVTXD_FCOEF_ORIS
;
547 fcoe_sof_eof
= IXGBE_ADVTXD_FCOEF_SOF
;
550 dev_warn(tx_ring
->dev
, "unknown sof = 0x%x\n", sof
);
554 /* the first byte of the last dword is EOF */
555 skb_copy_bits(skb
, skb
->len
- 4, &eof
, 1);
556 /* sets up EOF and ORIE */
559 fcoe_sof_eof
|= IXGBE_ADVTXD_FCOEF_EOF_N
;
564 fcoe_sof_eof
|= IXGBE_ADVTXD_FCOEF_EOF_N
|
565 IXGBE_ADVTXD_FCOEF_ORIE
;
567 fcoe_sof_eof
|= IXGBE_ADVTXD_FCOEF_EOF_T
;
570 fcoe_sof_eof
|= IXGBE_ADVTXD_FCOEF_EOF_NI
;
573 fcoe_sof_eof
|= IXGBE_ADVTXD_FCOEF_EOF_A
;
576 dev_warn(tx_ring
->dev
, "unknown eof = 0x%x\n", eof
);
580 /* sets up PARINC indicating data offset */
581 fh
= (struct fc_frame_header
*)skb_transport_header(skb
);
582 if (fh
->fh_f_ctl
[2] & FC_FC_REL_OFF
)
583 fcoe_sof_eof
|= IXGBE_ADVTXD_FCOEF_PARINC
;
585 /* include trailer in headlen as it is replicated per frame */
586 *hdr_len
= sizeof(struct fcoe_crc_eof
);
588 /* hdr_len includes fc_hdr if FCoE LSO is enabled */
589 if (skb_is_gso(skb
)) {
590 *hdr_len
+= skb_transport_offset(skb
) +
591 sizeof(struct fc_frame_header
);
592 /* update gso_segs and bytecount */
593 first
->gso_segs
= DIV_ROUND_UP(skb
->len
- *hdr_len
,
594 skb_shinfo(skb
)->gso_size
);
595 first
->bytecount
+= (first
->gso_segs
- 1) * *hdr_len
;
596 first
->tx_flags
|= IXGBE_TX_FLAGS_TSO
;
597 /* Hardware expects L4T to be RSV for FCoE TSO */
598 type_tucmd
|= IXGBE_ADVTXD_TUCMD_L4T_RSV
;
601 /* set flag indicating FCOE to ixgbe_tx_map call */
602 first
->tx_flags
|= IXGBE_TX_FLAGS_FCOE
| IXGBE_TX_FLAGS_CC
;
604 /* mss_l4len_id: use 0 for FSO as TSO, no need for L4LEN */
605 mss_l4len_idx
= skb_shinfo(skb
)->gso_size
<< IXGBE_ADVTXD_MSS_SHIFT
;
607 /* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */
608 vlan_macip_lens
= skb_transport_offset(skb
) +
609 sizeof(struct fc_frame_header
);
610 vlan_macip_lens
|= (skb_transport_offset(skb
) - 4)
611 << IXGBE_ADVTXD_MACLEN_SHIFT
;
612 vlan_macip_lens
|= first
->tx_flags
& IXGBE_TX_FLAGS_VLAN_MASK
;
614 /* write context desc */
615 ixgbe_tx_ctxtdesc(tx_ring
, vlan_macip_lens
, fcoe_sof_eof
,
616 type_tucmd
, mss_l4len_idx
);
621 static void ixgbe_fcoe_dma_pool_free(struct ixgbe_fcoe
*fcoe
, unsigned int cpu
)
623 struct ixgbe_fcoe_ddp_pool
*ddp_pool
;
625 ddp_pool
= per_cpu_ptr(fcoe
->ddp_pool
, cpu
);
626 dma_pool_destroy(ddp_pool
->pool
);
627 ddp_pool
->pool
= NULL
;
630 static int ixgbe_fcoe_dma_pool_alloc(struct ixgbe_fcoe
*fcoe
,
634 struct ixgbe_fcoe_ddp_pool
*ddp_pool
;
635 struct dma_pool
*pool
;
638 snprintf(pool_name
, 32, "ixgbe_fcoe_ddp_%u", cpu
);
640 pool
= dma_pool_create(pool_name
, dev
, IXGBE_FCPTR_MAX
,
641 IXGBE_FCPTR_ALIGN
, PAGE_SIZE
);
645 ddp_pool
= per_cpu_ptr(fcoe
->ddp_pool
, cpu
);
646 ddp_pool
->pool
= pool
;
648 ddp_pool
->noddp_ext_buff
= 0;
654 * ixgbe_configure_fcoe - configures registers for fcoe at start
655 * @adapter: ptr to ixgbe adapter
657 * This sets up FCoE related registers
661 void ixgbe_configure_fcoe(struct ixgbe_adapter
*adapter
)
663 struct ixgbe_ring_feature
*fcoe
= &adapter
->ring_feature
[RING_F_FCOE
];
664 struct ixgbe_hw
*hw
= &adapter
->hw
;
665 int i
, fcoe_q
, fcoe_i
, fcoe_q_h
= 0;
669 /* Minimal functionality for FCoE requires at least CRC offloads */
670 if (!(adapter
->netdev
->features
& NETIF_F_FCOE_CRC
))
673 /* Enable L2 EtherType filter for FCoE, needed for FCoE CRC and DDP */
674 etqf
= ETH_P_FCOE
| IXGBE_ETQF_FCOE
| IXGBE_ETQF_FILTER_EN
;
675 if (adapter
->flags
& IXGBE_FLAG_SRIOV_ENABLED
) {
676 etqf
|= IXGBE_ETQF_POOL_ENABLE
;
677 etqf
|= VMDQ_P(0) << IXGBE_ETQF_POOL_SHIFT
;
679 IXGBE_WRITE_REG(hw
, IXGBE_ETQF(IXGBE_ETQF_FILTER_FCOE
), etqf
);
680 IXGBE_WRITE_REG(hw
, IXGBE_ETQS(IXGBE_ETQF_FILTER_FCOE
), 0);
682 /* leave registers un-configured if FCoE is disabled */
683 if (!(adapter
->flags
& IXGBE_FLAG_FCOE_ENABLED
))
686 /* Use one or more Rx queues for FCoE by redirection table */
687 fcreta_size
= IXGBE_FCRETA_SIZE
;
688 if (adapter
->hw
.mac
.type
== ixgbe_mac_X550
)
689 fcreta_size
= IXGBE_FCRETA_SIZE_X550
;
691 for (i
= 0; i
< fcreta_size
; i
++) {
692 if (adapter
->hw
.mac
.type
== ixgbe_mac_X550
) {
693 int fcoe_i_h
= fcoe
->offset
+ ((i
+ fcreta_size
) %
695 fcoe_q_h
= adapter
->rx_ring
[fcoe_i_h
]->reg_idx
;
696 fcoe_q_h
= (fcoe_q_h
<< IXGBE_FCRETA_ENTRY_HIGH_SHIFT
) &
697 IXGBE_FCRETA_ENTRY_HIGH_MASK
;
700 fcoe_i
= fcoe
->offset
+ (i
% fcoe
->indices
);
701 fcoe_i
&= IXGBE_FCRETA_ENTRY_MASK
;
702 fcoe_q
= adapter
->rx_ring
[fcoe_i
]->reg_idx
;
704 IXGBE_WRITE_REG(hw
, IXGBE_FCRETA(i
), fcoe_q
);
706 IXGBE_WRITE_REG(hw
, IXGBE_FCRECTL
, IXGBE_FCRECTL_ENA
);
708 /* Enable L2 EtherType filter for FIP */
709 etqf
= ETH_P_FIP
| IXGBE_ETQF_FILTER_EN
;
710 if (adapter
->flags
& IXGBE_FLAG_SRIOV_ENABLED
) {
711 etqf
|= IXGBE_ETQF_POOL_ENABLE
;
712 etqf
|= VMDQ_P(0) << IXGBE_ETQF_POOL_SHIFT
;
714 IXGBE_WRITE_REG(hw
, IXGBE_ETQF(IXGBE_ETQF_FILTER_FIP
), etqf
);
716 /* Send FIP frames to the first FCoE queue */
717 fcoe_q
= adapter
->rx_ring
[fcoe
->offset
]->reg_idx
;
718 IXGBE_WRITE_REG(hw
, IXGBE_ETQS(IXGBE_ETQF_FILTER_FIP
),
719 IXGBE_ETQS_QUEUE_EN
|
720 (fcoe_q
<< IXGBE_ETQS_RX_QUEUE_SHIFT
));
722 /* Configure FCoE Rx control */
723 IXGBE_WRITE_REG(hw
, IXGBE_FCRXCTRL
,
724 IXGBE_FCRXCTRL_FCCRCBO
|
725 (FC_FCOE_VER
<< IXGBE_FCRXCTRL_FCOEVER_SHIFT
));
729 * ixgbe_free_fcoe_ddp_resources - release all fcoe ddp context resources
730 * @adapter : ixgbe adapter
732 * Cleans up outstanding ddp context resources
736 void ixgbe_free_fcoe_ddp_resources(struct ixgbe_adapter
*adapter
)
738 struct ixgbe_fcoe
*fcoe
= &adapter
->fcoe
;
741 /* do nothing if no DDP pools were allocated */
745 ddp_max
= IXGBE_FCOE_DDP_MAX
;
746 /* X550 has different DDP Max limit */
747 if (adapter
->hw
.mac
.type
== ixgbe_mac_X550
)
748 ddp_max
= IXGBE_FCOE_DDP_MAX_X550
;
750 for (i
= 0; i
< ddp_max
; i
++)
751 ixgbe_fcoe_ddp_put(adapter
->netdev
, i
);
753 for_each_possible_cpu(cpu
)
754 ixgbe_fcoe_dma_pool_free(fcoe
, cpu
);
756 dma_unmap_single(&adapter
->pdev
->dev
,
757 fcoe
->extra_ddp_buffer_dma
,
760 kfree(fcoe
->extra_ddp_buffer
);
762 fcoe
->extra_ddp_buffer
= NULL
;
763 fcoe
->extra_ddp_buffer_dma
= 0;
767 * ixgbe_setup_fcoe_ddp_resources - setup all fcoe ddp context resources
768 * @adapter: ixgbe adapter
770 * Sets up ddp context resouces
772 * Returns : 0 indicates success or -EINVAL on failure
774 int ixgbe_setup_fcoe_ddp_resources(struct ixgbe_adapter
*adapter
)
776 struct ixgbe_fcoe
*fcoe
= &adapter
->fcoe
;
777 struct device
*dev
= &adapter
->pdev
->dev
;
782 /* do nothing if no DDP pools were allocated */
786 /* Extra buffer to be shared by all DDPs for HW work around */
787 buffer
= kmalloc(IXGBE_FCBUFF_MIN
, GFP_ATOMIC
);
791 dma
= dma_map_single(dev
, buffer
, IXGBE_FCBUFF_MIN
, DMA_FROM_DEVICE
);
792 if (dma_mapping_error(dev
, dma
)) {
793 e_err(drv
, "failed to map extra DDP buffer\n");
798 fcoe
->extra_ddp_buffer
= buffer
;
799 fcoe
->extra_ddp_buffer_dma
= dma
;
801 /* allocate pci pool for each cpu */
802 for_each_possible_cpu(cpu
) {
803 int err
= ixgbe_fcoe_dma_pool_alloc(fcoe
, dev
, cpu
);
807 e_err(drv
, "failed to alloc DDP pool on cpu:%d\n", cpu
);
808 ixgbe_free_fcoe_ddp_resources(adapter
);
815 static int ixgbe_fcoe_ddp_enable(struct ixgbe_adapter
*adapter
)
817 struct ixgbe_fcoe
*fcoe
= &adapter
->fcoe
;
819 if (!(adapter
->flags
& IXGBE_FLAG_FCOE_CAPABLE
))
822 fcoe
->ddp_pool
= alloc_percpu(struct ixgbe_fcoe_ddp_pool
);
824 if (!fcoe
->ddp_pool
) {
825 e_err(drv
, "failed to allocate percpu DDP resources\n");
829 adapter
->netdev
->fcoe_ddp_xid
= IXGBE_FCOE_DDP_MAX
- 1;
830 /* X550 has different DDP Max limit */
831 if (adapter
->hw
.mac
.type
== ixgbe_mac_X550
)
832 adapter
->netdev
->fcoe_ddp_xid
= IXGBE_FCOE_DDP_MAX_X550
- 1;
837 static void ixgbe_fcoe_ddp_disable(struct ixgbe_adapter
*adapter
)
839 struct ixgbe_fcoe
*fcoe
= &adapter
->fcoe
;
841 adapter
->netdev
->fcoe_ddp_xid
= 0;
846 free_percpu(fcoe
->ddp_pool
);
847 fcoe
->ddp_pool
= NULL
;
851 * ixgbe_fcoe_enable - turn on FCoE offload feature
852 * @netdev: the corresponding netdev
854 * Turns on FCoE offload feature in 82599.
856 * Returns : 0 indicates success or -EINVAL on failure
858 int ixgbe_fcoe_enable(struct net_device
*netdev
)
860 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
861 struct ixgbe_fcoe
*fcoe
= &adapter
->fcoe
;
863 atomic_inc(&fcoe
->refcnt
);
865 if (!(adapter
->flags
& IXGBE_FLAG_FCOE_CAPABLE
))
868 if (adapter
->flags
& IXGBE_FLAG_FCOE_ENABLED
)
871 e_info(drv
, "Enabling FCoE offload features.\n");
873 if (adapter
->flags
& IXGBE_FLAG_SRIOV_ENABLED
)
874 e_warn(probe
, "Enabling FCoE on PF will disable legacy VFs\n");
876 if (netif_running(netdev
))
877 netdev
->netdev_ops
->ndo_stop(netdev
);
879 /* Allocate per CPU memory to track DDP pools */
880 ixgbe_fcoe_ddp_enable(adapter
);
882 /* enable FCoE and notify stack */
883 adapter
->flags
|= IXGBE_FLAG_FCOE_ENABLED
;
884 netdev
->features
|= NETIF_F_FCOE_MTU
;
885 netdev_features_change(netdev
);
887 /* release existing queues and reallocate them */
888 ixgbe_clear_interrupt_scheme(adapter
);
889 ixgbe_init_interrupt_scheme(adapter
);
891 if (netif_running(netdev
))
892 netdev
->netdev_ops
->ndo_open(netdev
);
898 * ixgbe_fcoe_disable - turn off FCoE offload feature
899 * @netdev: the corresponding netdev
901 * Turns off FCoE offload feature in 82599.
903 * Returns : 0 indicates success or -EINVAL on failure
905 int ixgbe_fcoe_disable(struct net_device
*netdev
)
907 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
909 if (!atomic_dec_and_test(&adapter
->fcoe
.refcnt
))
912 if (!(adapter
->flags
& IXGBE_FLAG_FCOE_ENABLED
))
915 e_info(drv
, "Disabling FCoE offload features.\n");
916 if (netif_running(netdev
))
917 netdev
->netdev_ops
->ndo_stop(netdev
);
919 /* Free per CPU memory to track DDP pools */
920 ixgbe_fcoe_ddp_disable(adapter
);
922 /* disable FCoE and notify stack */
923 adapter
->flags
&= ~IXGBE_FLAG_FCOE_ENABLED
;
924 netdev
->features
&= ~NETIF_F_FCOE_MTU
;
926 netdev_features_change(netdev
);
928 /* release existing queues and reallocate them */
929 ixgbe_clear_interrupt_scheme(adapter
);
930 ixgbe_init_interrupt_scheme(adapter
);
932 if (netif_running(netdev
))
933 netdev
->netdev_ops
->ndo_open(netdev
);
939 * ixgbe_fcoe_get_wwn - get world wide name for the node or the port
940 * @netdev : ixgbe adapter
941 * @wwn : the world wide name
942 * @type: the type of world wide name
944 * Returns the node or port world wide name if both the prefix and the san
945 * mac address are valid, then the wwn is formed based on the NAA-2 for
946 * IEEE Extended name identifier (ref. to T10 FC-LS Spec., Sec. 15.3).
948 * Returns : 0 on success
950 int ixgbe_fcoe_get_wwn(struct net_device
*netdev
, u64
*wwn
, int type
)
953 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
954 struct ixgbe_mac_info
*mac
= &adapter
->hw
.mac
;
957 case NETDEV_FCOE_WWNN
:
958 prefix
= mac
->wwnn_prefix
;
960 case NETDEV_FCOE_WWPN
:
961 prefix
= mac
->wwpn_prefix
;
967 if ((prefix
!= 0xffff) &&
968 is_valid_ether_addr(mac
->san_addr
)) {
969 *wwn
= ((u64
) prefix
<< 48) |
970 ((u64
) mac
->san_addr
[0] << 40) |
971 ((u64
) mac
->san_addr
[1] << 32) |
972 ((u64
) mac
->san_addr
[2] << 24) |
973 ((u64
) mac
->san_addr
[3] << 16) |
974 ((u64
) mac
->san_addr
[4] << 8) |
975 ((u64
) mac
->san_addr
[5]);
982 * ixgbe_fcoe_get_hbainfo - get FCoE HBA information
983 * @netdev : ixgbe adapter
984 * @info : HBA information
986 * Returns ixgbe HBA information
988 * Returns : 0 on success
990 int ixgbe_fcoe_get_hbainfo(struct net_device
*netdev
,
991 struct netdev_fcoe_hbainfo
*info
)
993 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
994 struct ixgbe_hw
*hw
= &adapter
->hw
;
1001 /* Don't return information on unsupported devices */
1002 if (!(adapter
->flags
& IXGBE_FLAG_FCOE_ENABLED
))
1006 snprintf(info
->manufacturer
, sizeof(info
->manufacturer
),
1007 "Intel Corporation");
1011 /* Get the PCI-e Device Serial Number Capability */
1012 pos
= pci_find_ext_capability(adapter
->pdev
, PCI_EXT_CAP_ID_DSN
);
1015 for (i
= 0; i
< 8; i
++)
1016 pci_read_config_byte(adapter
->pdev
, pos
+ i
, &buf
[i
]);
1018 snprintf(info
->serial_number
, sizeof(info
->serial_number
),
1019 "%02X%02X%02X%02X%02X%02X%02X%02X",
1020 buf
[7], buf
[6], buf
[5], buf
[4],
1021 buf
[3], buf
[2], buf
[1], buf
[0]);
1023 snprintf(info
->serial_number
, sizeof(info
->serial_number
),
1026 /* Hardware Version */
1027 snprintf(info
->hardware_version
,
1028 sizeof(info
->hardware_version
),
1029 "Rev %d", hw
->revision_id
);
1030 /* Driver Name/Version */
1031 snprintf(info
->driver_version
,
1032 sizeof(info
->driver_version
),
1035 ixgbe_driver_version
);
1036 /* Firmware Version */
1037 snprintf(info
->firmware_version
,
1038 sizeof(info
->firmware_version
),
1040 (adapter
->eeprom_verh
<< 16) |
1041 adapter
->eeprom_verl
);
1044 if (hw
->mac
.type
== ixgbe_mac_82599EB
) {
1045 snprintf(info
->model
,
1046 sizeof(info
->model
),
1048 } else if (hw
->mac
.type
== ixgbe_mac_X550
) {
1049 snprintf(info
->model
,
1050 sizeof(info
->model
),
1053 snprintf(info
->model
,
1054 sizeof(info
->model
),
1058 /* Model Description */
1059 snprintf(info
->model_description
,
1060 sizeof(info
->model_description
),
1062 ixgbe_default_device_descr
);
1068 * ixgbe_fcoe_get_tc - get the current TC that fcoe is mapped to
1069 * @adapter - pointer to the device adapter structure
1071 * Return : TC that FCoE is mapped to
1073 u8
ixgbe_fcoe_get_tc(struct ixgbe_adapter
*adapter
)
1075 #ifdef CONFIG_IXGBE_DCB
1076 return netdev_get_prio_tc_map(adapter
->netdev
, adapter
->fcoe
.up
);