1 /*******************************************************************************
3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2014 Intel Corporation.
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
23 Linux NICS <linux.nics@intel.com>
24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *******************************************************************************/
30 #include <linux/if_ether.h>
31 #include <linux/gfp.h>
32 #include <linux/if_vlan.h>
33 #include <scsi/scsi_cmnd.h>
34 #include <scsi/scsi_device.h>
35 #include <scsi/fc/fc_fs.h>
36 #include <scsi/fc/fc_fcoe.h>
37 #include <scsi/libfc.h>
38 #include <scsi/libfcoe.h>
41 * ixgbe_fcoe_clear_ddp - clear the given ddp context
42 * @ddp: ptr to the ixgbe_fcoe_ddp
47 static inline void ixgbe_fcoe_clear_ddp(struct ixgbe_fcoe_ddp
*ddp
)
58 * ixgbe_fcoe_ddp_put - free the ddp context for a given xid
59 * @netdev: the corresponding net_device
60 * @xid: the xid that corresponding ddp will be freed
62 * This is the implementation of net_device_ops.ndo_fcoe_ddp_done
63 * and it is expected to be called by ULD, i.e., FCP layer of libfc
64 * to release the corresponding ddp context when the I/O is done.
66 * Returns : data length already ddp-ed in bytes
68 int ixgbe_fcoe_ddp_put(struct net_device
*netdev
, u16 xid
)
71 struct ixgbe_fcoe
*fcoe
;
72 struct ixgbe_adapter
*adapter
;
73 struct ixgbe_fcoe_ddp
*ddp
;
80 if (xid
>= netdev
->fcoe_ddp_xid
)
83 adapter
= netdev_priv(netdev
);
84 fcoe
= &adapter
->fcoe
;
85 ddp
= &fcoe
->ddp
[xid
];
91 /* if no error then skip ddp context invalidation */
95 if (hw
->mac
.type
== ixgbe_mac_X550
) {
96 /* X550 does not require DDP FCoE lock */
98 IXGBE_WRITE_REG(hw
, IXGBE_FCDFC(0, xid
), 0);
99 IXGBE_WRITE_REG(hw
, IXGBE_FCDFC(3, xid
),
100 (xid
| IXGBE_FCFLTRW_WE
));
103 IXGBE_WRITE_REG(hw
, IXGBE_FCDDC(2, xid
), 0);
105 /* program FCDMARW */
106 IXGBE_WRITE_REG(hw
, IXGBE_FCDDC(3, xid
),
107 (xid
| IXGBE_FCDMARW_WE
));
109 /* read FCBUFF to check context invalidated */
110 IXGBE_WRITE_REG(hw
, IXGBE_FCDDC(3, xid
),
111 (xid
| IXGBE_FCDMARW_RE
));
112 fcbuff
= IXGBE_READ_REG(hw
, IXGBE_FCDDC(2, xid
));
114 /* other hardware requires DDP FCoE lock */
115 spin_lock_bh(&fcoe
->lock
);
116 IXGBE_WRITE_REG(hw
, IXGBE_FCFLT
, 0);
117 IXGBE_WRITE_REG(hw
, IXGBE_FCFLTRW
,
118 (xid
| IXGBE_FCFLTRW_WE
));
119 IXGBE_WRITE_REG(hw
, IXGBE_FCBUFF
, 0);
120 IXGBE_WRITE_REG(hw
, IXGBE_FCDMARW
,
121 (xid
| IXGBE_FCDMARW_WE
));
123 /* guaranteed to be invalidated after 100us */
124 IXGBE_WRITE_REG(hw
, IXGBE_FCDMARW
,
125 (xid
| IXGBE_FCDMARW_RE
));
126 fcbuff
= IXGBE_READ_REG(hw
, IXGBE_FCBUFF
);
127 spin_unlock_bh(&fcoe
->lock
);
130 if (fcbuff
& IXGBE_FCBUFF_VALID
)
131 usleep_range(100, 150);
135 dma_unmap_sg(&adapter
->pdev
->dev
, ddp
->sgl
, ddp
->sgc
,
138 dma_pool_free(ddp
->pool
, ddp
->udl
, ddp
->udp
);
142 ixgbe_fcoe_clear_ddp(ddp
);
148 * ixgbe_fcoe_ddp_setup - called to set up ddp context
149 * @netdev: the corresponding net_device
150 * @xid: the exchange id requesting ddp
151 * @sgl: the scatter-gather list for this request
152 * @sgc: the number of scatter-gather items
153 * @target_mode: 1 to setup target mode, 0 to setup initiator mode
155 * Returns : 1 for success and 0 for no ddp
157 static int ixgbe_fcoe_ddp_setup(struct net_device
*netdev
, u16 xid
,
158 struct scatterlist
*sgl
, unsigned int sgc
,
161 struct ixgbe_adapter
*adapter
;
163 struct ixgbe_fcoe
*fcoe
;
164 struct ixgbe_fcoe_ddp
*ddp
;
165 struct ixgbe_fcoe_ddp_pool
*ddp_pool
;
166 struct scatterlist
*sg
;
167 unsigned int i
, j
, dmacount
;
169 static const unsigned int bufflen
= IXGBE_FCBUFF_MIN
;
170 unsigned int firstoff
= 0;
171 unsigned int lastsize
;
172 unsigned int thisoff
= 0;
173 unsigned int thislen
= 0;
174 u32 fcbuff
, fcdmarw
, fcfltrw
, fcrxctl
;
180 adapter
= netdev_priv(netdev
);
181 if (xid
>= netdev
->fcoe_ddp_xid
) {
182 e_warn(drv
, "xid=0x%x out-of-range\n", xid
);
186 /* no DDP if we are already down or resetting */
187 if (test_bit(__IXGBE_DOWN
, &adapter
->state
) ||
188 test_bit(__IXGBE_RESETTING
, &adapter
->state
))
191 fcoe
= &adapter
->fcoe
;
192 ddp
= &fcoe
->ddp
[xid
];
194 e_err(drv
, "xid 0x%x w/ non-null sgl=%p nents=%d\n",
195 xid
, ddp
->sgl
, ddp
->sgc
);
198 ixgbe_fcoe_clear_ddp(ddp
);
201 if (!fcoe
->ddp_pool
) {
202 e_warn(drv
, "No ddp_pool resources allocated\n");
206 ddp_pool
= per_cpu_ptr(fcoe
->ddp_pool
, get_cpu());
207 if (!ddp_pool
->pool
) {
208 e_warn(drv
, "xid=0x%x no ddp pool for fcoe\n", xid
);
212 /* setup dma from scsi command sgl */
213 dmacount
= dma_map_sg(&adapter
->pdev
->dev
, sgl
, sgc
, DMA_FROM_DEVICE
);
215 e_err(drv
, "xid 0x%x DMA map error\n", xid
);
219 /* alloc the udl from per cpu ddp pool */
220 ddp
->udl
= dma_pool_alloc(ddp_pool
->pool
, GFP_ATOMIC
, &ddp
->udp
);
222 e_err(drv
, "failed allocated ddp context\n");
223 goto out_noddp_unmap
;
225 ddp
->pool
= ddp_pool
->pool
;
230 for_each_sg(sgl
, sg
, dmacount
, i
) {
231 addr
= sg_dma_address(sg
);
232 len
= sg_dma_len(sg
);
234 /* max number of buffers allowed in one DDP context */
235 if (j
>= IXGBE_BUFFCNT_MAX
) {
240 /* get the offset of length of current buffer */
241 thisoff
= addr
& ((dma_addr_t
)bufflen
- 1);
242 thislen
= min((bufflen
- thisoff
), len
);
244 * all but the 1st buffer (j == 0)
245 * must be aligned on bufflen
247 if ((j
!= 0) && (thisoff
))
250 * all but the last buffer
251 * ((i == (dmacount - 1)) && (thislen == len))
252 * must end at bufflen
254 if (((i
!= (dmacount
- 1)) || (thislen
!= len
))
255 && ((thislen
+ thisoff
) != bufflen
))
258 ddp
->udl
[j
] = (u64
)(addr
- thisoff
);
259 /* only the first buffer may have none-zero offset */
267 /* only the last buffer may have non-full bufflen */
268 lastsize
= thisoff
+ thislen
;
271 * lastsize can not be buffer len.
272 * If it is then adding another buffer with lastsize = 1.
274 if (lastsize
== bufflen
) {
275 if (j
>= IXGBE_BUFFCNT_MAX
) {
276 ddp_pool
->noddp_ext_buff
++;
280 ddp
->udl
[j
] = (u64
)(fcoe
->extra_ddp_buffer_dma
);
286 fcbuff
= (IXGBE_FCBUFF_4KB
<< IXGBE_FCBUFF_BUFFSIZE_SHIFT
);
287 fcbuff
|= ((j
& 0xff) << IXGBE_FCBUFF_BUFFCNT_SHIFT
);
288 fcbuff
|= (firstoff
<< IXGBE_FCBUFF_OFFSET_SHIFT
);
289 /* Set WRCONTX bit to allow DDP for target */
291 fcbuff
|= (IXGBE_FCBUFF_WRCONTX
);
292 fcbuff
|= (IXGBE_FCBUFF_VALID
);
295 fcdmarw
|= IXGBE_FCDMARW_WE
;
296 fcdmarw
|= (lastsize
<< IXGBE_FCDMARW_LASTSIZE_SHIFT
);
299 fcfltrw
|= IXGBE_FCFLTRW_WE
;
301 /* program DMA context */
304 /* turn on last frame indication for target mode as FCP_RSPtarget is
305 * supposed to send FCP_RSP when it is done. */
306 if (target_mode
&& !test_bit(__IXGBE_FCOE_TARGET
, &fcoe
->mode
)) {
307 set_bit(__IXGBE_FCOE_TARGET
, &fcoe
->mode
);
308 fcrxctl
= IXGBE_READ_REG(hw
, IXGBE_FCRXCTRL
);
309 fcrxctl
|= IXGBE_FCRXCTRL_LASTSEQH
;
310 IXGBE_WRITE_REG(hw
, IXGBE_FCRXCTRL
, fcrxctl
);
313 if (hw
->mac
.type
== ixgbe_mac_X550
) {
314 /* X550 does not require DDP lock */
316 IXGBE_WRITE_REG(hw
, IXGBE_FCDDC(0, xid
),
317 ddp
->udp
& DMA_BIT_MASK(32));
318 IXGBE_WRITE_REG(hw
, IXGBE_FCDDC(1, xid
), (u64
)ddp
->udp
>> 32);
319 IXGBE_WRITE_REG(hw
, IXGBE_FCDDC(2, xid
), fcbuff
);
320 IXGBE_WRITE_REG(hw
, IXGBE_FCDDC(3, xid
), fcdmarw
);
321 /* program filter context */
322 IXGBE_WRITE_REG(hw
, IXGBE_FCDFC(0, xid
), IXGBE_FCFLT_VALID
);
323 IXGBE_WRITE_REG(hw
, IXGBE_FCDFC(1, xid
), 0);
324 IXGBE_WRITE_REG(hw
, IXGBE_FCDFC(3, xid
), fcfltrw
);
326 /* DDP lock for indirect DDP context access */
327 spin_lock_bh(&fcoe
->lock
);
329 IXGBE_WRITE_REG(hw
, IXGBE_FCPTRL
, ddp
->udp
& DMA_BIT_MASK(32));
330 IXGBE_WRITE_REG(hw
, IXGBE_FCPTRH
, (u64
)ddp
->udp
>> 32);
331 IXGBE_WRITE_REG(hw
, IXGBE_FCBUFF
, fcbuff
);
332 IXGBE_WRITE_REG(hw
, IXGBE_FCDMARW
, fcdmarw
);
333 /* program filter context */
334 IXGBE_WRITE_REG(hw
, IXGBE_FCPARAM
, 0);
335 IXGBE_WRITE_REG(hw
, IXGBE_FCFLT
, IXGBE_FCFLT_VALID
);
336 IXGBE_WRITE_REG(hw
, IXGBE_FCFLTRW
, fcfltrw
);
338 spin_unlock_bh(&fcoe
->lock
);
344 dma_pool_free(ddp
->pool
, ddp
->udl
, ddp
->udp
);
345 ixgbe_fcoe_clear_ddp(ddp
);
348 dma_unmap_sg(&adapter
->pdev
->dev
, sgl
, sgc
, DMA_FROM_DEVICE
);
355 * ixgbe_fcoe_ddp_get - called to set up ddp context in initiator mode
356 * @netdev: the corresponding net_device
357 * @xid: the exchange id requesting ddp
358 * @sgl: the scatter-gather list for this request
359 * @sgc: the number of scatter-gather items
361 * This is the implementation of net_device_ops.ndo_fcoe_ddp_setup
362 * and is expected to be called from ULD, e.g., FCP layer of libfc
363 * to set up ddp for the corresponding xid of the given sglist for
364 * the corresponding I/O.
366 * Returns : 1 for success and 0 for no ddp
368 int ixgbe_fcoe_ddp_get(struct net_device
*netdev
, u16 xid
,
369 struct scatterlist
*sgl
, unsigned int sgc
)
371 return ixgbe_fcoe_ddp_setup(netdev
, xid
, sgl
, sgc
, 0);
375 * ixgbe_fcoe_ddp_target - called to set up ddp context in target mode
376 * @netdev: the corresponding net_device
377 * @xid: the exchange id requesting ddp
378 * @sgl: the scatter-gather list for this request
379 * @sgc: the number of scatter-gather items
381 * This is the implementation of net_device_ops.ndo_fcoe_ddp_target
382 * and is expected to be called from ULD, e.g., FCP layer of libfc
383 * to set up ddp for the corresponding xid of the given sglist for
384 * the corresponding I/O. The DDP in target mode is a write I/O request
385 * from the initiator.
387 * Returns : 1 for success and 0 for no ddp
389 int ixgbe_fcoe_ddp_target(struct net_device
*netdev
, u16 xid
,
390 struct scatterlist
*sgl
, unsigned int sgc
)
392 return ixgbe_fcoe_ddp_setup(netdev
, xid
, sgl
, sgc
, 1);
396 * ixgbe_fcoe_ddp - check ddp status and mark it done
397 * @adapter: ixgbe adapter
398 * @rx_desc: advanced rx descriptor
399 * @skb: the skb holding the received data
401 * This checks ddp status.
403 * Returns : < 0 indicates an error or not a FCiE ddp, 0 indicates
404 * not passing the skb to ULD, > 0 indicates is the length of data
407 int ixgbe_fcoe_ddp(struct ixgbe_adapter
*adapter
,
408 union ixgbe_adv_rx_desc
*rx_desc
,
412 struct ixgbe_fcoe
*fcoe
;
413 struct ixgbe_fcoe_ddp
*ddp
;
414 struct fc_frame_header
*fh
;
415 struct fcoe_crc_eof
*crc
;
416 __le32 fcerr
= ixgbe_test_staterr(rx_desc
, IXGBE_RXDADV_ERR_FCERR
);
422 if (fcerr
== cpu_to_le32(IXGBE_FCERR_BADCRC
))
423 skb
->ip_summed
= CHECKSUM_NONE
;
425 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
427 if (eth_hdr(skb
)->h_proto
== htons(ETH_P_8021Q
))
428 fh
= (struct fc_frame_header
*)(skb
->data
+
429 sizeof(struct vlan_hdr
) + sizeof(struct fcoe_hdr
));
431 fh
= (struct fc_frame_header
*)(skb
->data
+
432 sizeof(struct fcoe_hdr
));
434 fctl
= ntoh24(fh
->fh_f_ctl
);
435 if (fctl
& FC_FC_EX_CTX
)
436 xid
= be16_to_cpu(fh
->fh_ox_id
);
438 xid
= be16_to_cpu(fh
->fh_rx_id
);
440 ddp_max
= IXGBE_FCOE_DDP_MAX
;
441 /* X550 has different DDP Max limit */
442 if (adapter
->hw
.mac
.type
== ixgbe_mac_X550
)
443 ddp_max
= IXGBE_FCOE_DDP_MAX_X550
;
447 fcoe
= &adapter
->fcoe
;
448 ddp
= &fcoe
->ddp
[xid
];
452 ddp_err
= ixgbe_test_staterr(rx_desc
, IXGBE_RXDADV_ERR_FCEOFE
|
453 IXGBE_RXDADV_ERR_FCERR
);
457 switch (ixgbe_test_staterr(rx_desc
, IXGBE_RXDADV_STAT_FCSTAT
)) {
458 /* return 0 to bypass going to ULD for DDPed data */
459 case cpu_to_le32(IXGBE_RXDADV_STAT_FCSTAT_DDP
):
460 /* update length of DDPed data */
461 ddp
->len
= le32_to_cpu(rx_desc
->wb
.lower
.hi_dword
.rss
);
464 /* unmap the sg list when FCPRSP is received */
465 case cpu_to_le32(IXGBE_RXDADV_STAT_FCSTAT_FCPRSP
):
466 dma_unmap_sg(&adapter
->pdev
->dev
, ddp
->sgl
,
467 ddp
->sgc
, DMA_FROM_DEVICE
);
472 /* if DDP length is present pass it through to ULD */
473 case cpu_to_le32(IXGBE_RXDADV_STAT_FCSTAT_NODDP
):
474 /* update length of DDPed data */
475 ddp
->len
= le32_to_cpu(rx_desc
->wb
.lower
.hi_dword
.rss
);
479 /* no match will return as an error */
480 case cpu_to_le32(IXGBE_RXDADV_STAT_FCSTAT_NOMTCH
):
485 /* In target mode, check the last data frame of the sequence.
486 * For DDP in target mode, data is already DDPed but the header
487 * indication of the last data frame ould allow is to tell if we
488 * got all the data and the ULP can send FCP_RSP back, as this is
489 * not a full fcoe frame, we fill the trailer here so it won't be
490 * dropped by the ULP stack.
492 if ((fh
->fh_r_ctl
== FC_RCTL_DD_SOL_DATA
) &&
493 (fctl
& FC_FC_END_SEQ
)) {
495 crc
= skb_put(skb
, sizeof(*crc
));
496 crc
->fcoe_eof
= FC_EOF_T
;
503 * ixgbe_fso - ixgbe FCoE Sequence Offload (FSO)
504 * @tx_ring: tx desc ring
505 * @first: first tx_buffer structure containing skb, tx_flags, and protocol
506 * @hdr_len: hdr_len to be returned
508 * This sets up large send offload for FCoE
510 * Returns : 0 indicates success, < 0 for error
512 int ixgbe_fso(struct ixgbe_ring
*tx_ring
,
513 struct ixgbe_tx_buffer
*first
,
516 struct sk_buff
*skb
= first
->skb
;
517 struct fc_frame_header
*fh
;
519 u32 fcoe_sof_eof
= 0;
521 u32 type_tucmd
= IXGBE_ADVTXT_TUCMD_FCOE
;
524 if (skb_is_gso(skb
) && (skb_shinfo(skb
)->gso_type
!= SKB_GSO_FCOE
)) {
525 dev_err(tx_ring
->dev
, "Wrong gso type %d:expecting SKB_GSO_FCOE\n",
526 skb_shinfo(skb
)->gso_type
);
530 /* resets the header to point fcoe/fc */
531 skb_set_network_header(skb
, skb
->mac_len
);
532 skb_set_transport_header(skb
, skb
->mac_len
+
533 sizeof(struct fcoe_hdr
));
535 /* sets up SOF and ORIS */
536 sof
= ((struct fcoe_hdr
*)skb_network_header(skb
))->fcoe_sof
;
539 fcoe_sof_eof
= IXGBE_ADVTXD_FCOEF_ORIS
;
542 fcoe_sof_eof
= IXGBE_ADVTXD_FCOEF_SOF
|
543 IXGBE_ADVTXD_FCOEF_ORIS
;
548 fcoe_sof_eof
= IXGBE_ADVTXD_FCOEF_SOF
;
551 dev_warn(tx_ring
->dev
, "unknown sof = 0x%x\n", sof
);
555 /* the first byte of the last dword is EOF */
556 skb_copy_bits(skb
, skb
->len
- 4, &eof
, 1);
557 /* sets up EOF and ORIE */
560 fcoe_sof_eof
|= IXGBE_ADVTXD_FCOEF_EOF_N
;
565 fcoe_sof_eof
|= IXGBE_ADVTXD_FCOEF_EOF_N
|
566 IXGBE_ADVTXD_FCOEF_ORIE
;
568 fcoe_sof_eof
|= IXGBE_ADVTXD_FCOEF_EOF_T
;
571 fcoe_sof_eof
|= IXGBE_ADVTXD_FCOEF_EOF_NI
;
574 fcoe_sof_eof
|= IXGBE_ADVTXD_FCOEF_EOF_A
;
577 dev_warn(tx_ring
->dev
, "unknown eof = 0x%x\n", eof
);
581 /* sets up PARINC indicating data offset */
582 fh
= (struct fc_frame_header
*)skb_transport_header(skb
);
583 if (fh
->fh_f_ctl
[2] & FC_FC_REL_OFF
)
584 fcoe_sof_eof
|= IXGBE_ADVTXD_FCOEF_PARINC
;
586 /* include trailer in headlen as it is replicated per frame */
587 *hdr_len
= sizeof(struct fcoe_crc_eof
);
589 /* hdr_len includes fc_hdr if FCoE LSO is enabled */
590 if (skb_is_gso(skb
)) {
591 *hdr_len
+= skb_transport_offset(skb
) +
592 sizeof(struct fc_frame_header
);
593 /* update gso_segs and bytecount */
594 first
->gso_segs
= DIV_ROUND_UP(skb
->len
- *hdr_len
,
595 skb_shinfo(skb
)->gso_size
);
596 first
->bytecount
+= (first
->gso_segs
- 1) * *hdr_len
;
597 first
->tx_flags
|= IXGBE_TX_FLAGS_TSO
;
598 /* Hardware expects L4T to be RSV for FCoE TSO */
599 type_tucmd
|= IXGBE_ADVTXD_TUCMD_L4T_RSV
;
602 /* set flag indicating FCOE to ixgbe_tx_map call */
603 first
->tx_flags
|= IXGBE_TX_FLAGS_FCOE
| IXGBE_TX_FLAGS_CC
;
605 /* mss_l4len_id: use 0 for FSO as TSO, no need for L4LEN */
606 mss_l4len_idx
= skb_shinfo(skb
)->gso_size
<< IXGBE_ADVTXD_MSS_SHIFT
;
608 /* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */
609 vlan_macip_lens
= skb_transport_offset(skb
) +
610 sizeof(struct fc_frame_header
);
611 vlan_macip_lens
|= (skb_transport_offset(skb
) - 4)
612 << IXGBE_ADVTXD_MACLEN_SHIFT
;
613 vlan_macip_lens
|= first
->tx_flags
& IXGBE_TX_FLAGS_VLAN_MASK
;
615 /* write context desc */
616 ixgbe_tx_ctxtdesc(tx_ring
, vlan_macip_lens
, fcoe_sof_eof
,
617 type_tucmd
, mss_l4len_idx
);
622 static void ixgbe_fcoe_dma_pool_free(struct ixgbe_fcoe
*fcoe
, unsigned int cpu
)
624 struct ixgbe_fcoe_ddp_pool
*ddp_pool
;
626 ddp_pool
= per_cpu_ptr(fcoe
->ddp_pool
, cpu
);
627 dma_pool_destroy(ddp_pool
->pool
);
628 ddp_pool
->pool
= NULL
;
631 static int ixgbe_fcoe_dma_pool_alloc(struct ixgbe_fcoe
*fcoe
,
635 struct ixgbe_fcoe_ddp_pool
*ddp_pool
;
636 struct dma_pool
*pool
;
639 snprintf(pool_name
, 32, "ixgbe_fcoe_ddp_%u", cpu
);
641 pool
= dma_pool_create(pool_name
, dev
, IXGBE_FCPTR_MAX
,
642 IXGBE_FCPTR_ALIGN
, PAGE_SIZE
);
646 ddp_pool
= per_cpu_ptr(fcoe
->ddp_pool
, cpu
);
647 ddp_pool
->pool
= pool
;
649 ddp_pool
->noddp_ext_buff
= 0;
655 * ixgbe_configure_fcoe - configures registers for fcoe at start
656 * @adapter: ptr to ixgbe adapter
658 * This sets up FCoE related registers
662 void ixgbe_configure_fcoe(struct ixgbe_adapter
*adapter
)
664 struct ixgbe_ring_feature
*fcoe
= &adapter
->ring_feature
[RING_F_FCOE
];
665 struct ixgbe_hw
*hw
= &adapter
->hw
;
666 int i
, fcoe_q
, fcoe_i
, fcoe_q_h
= 0;
670 /* Minimal functionality for FCoE requires at least CRC offloads */
671 if (!(adapter
->netdev
->features
& NETIF_F_FCOE_CRC
))
674 /* Enable L2 EtherType filter for FCoE, needed for FCoE CRC and DDP */
675 etqf
= ETH_P_FCOE
| IXGBE_ETQF_FCOE
| IXGBE_ETQF_FILTER_EN
;
676 if (adapter
->flags
& IXGBE_FLAG_SRIOV_ENABLED
) {
677 etqf
|= IXGBE_ETQF_POOL_ENABLE
;
678 etqf
|= VMDQ_P(0) << IXGBE_ETQF_POOL_SHIFT
;
680 IXGBE_WRITE_REG(hw
, IXGBE_ETQF(IXGBE_ETQF_FILTER_FCOE
), etqf
);
681 IXGBE_WRITE_REG(hw
, IXGBE_ETQS(IXGBE_ETQF_FILTER_FCOE
), 0);
683 /* leave registers un-configured if FCoE is disabled */
684 if (!(adapter
->flags
& IXGBE_FLAG_FCOE_ENABLED
))
687 /* Use one or more Rx queues for FCoE by redirection table */
688 fcreta_size
= IXGBE_FCRETA_SIZE
;
689 if (adapter
->hw
.mac
.type
== ixgbe_mac_X550
)
690 fcreta_size
= IXGBE_FCRETA_SIZE_X550
;
692 for (i
= 0; i
< fcreta_size
; i
++) {
693 if (adapter
->hw
.mac
.type
== ixgbe_mac_X550
) {
694 int fcoe_i_h
= fcoe
->offset
+ ((i
+ fcreta_size
) %
696 fcoe_q_h
= adapter
->rx_ring
[fcoe_i_h
]->reg_idx
;
697 fcoe_q_h
= (fcoe_q_h
<< IXGBE_FCRETA_ENTRY_HIGH_SHIFT
) &
698 IXGBE_FCRETA_ENTRY_HIGH_MASK
;
701 fcoe_i
= fcoe
->offset
+ (i
% fcoe
->indices
);
702 fcoe_i
&= IXGBE_FCRETA_ENTRY_MASK
;
703 fcoe_q
= adapter
->rx_ring
[fcoe_i
]->reg_idx
;
705 IXGBE_WRITE_REG(hw
, IXGBE_FCRETA(i
), fcoe_q
);
707 IXGBE_WRITE_REG(hw
, IXGBE_FCRECTL
, IXGBE_FCRECTL_ENA
);
709 /* Enable L2 EtherType filter for FIP */
710 etqf
= ETH_P_FIP
| IXGBE_ETQF_FILTER_EN
;
711 if (adapter
->flags
& IXGBE_FLAG_SRIOV_ENABLED
) {
712 etqf
|= IXGBE_ETQF_POOL_ENABLE
;
713 etqf
|= VMDQ_P(0) << IXGBE_ETQF_POOL_SHIFT
;
715 IXGBE_WRITE_REG(hw
, IXGBE_ETQF(IXGBE_ETQF_FILTER_FIP
), etqf
);
717 /* Send FIP frames to the first FCoE queue */
718 fcoe_q
= adapter
->rx_ring
[fcoe
->offset
]->reg_idx
;
719 IXGBE_WRITE_REG(hw
, IXGBE_ETQS(IXGBE_ETQF_FILTER_FIP
),
720 IXGBE_ETQS_QUEUE_EN
|
721 (fcoe_q
<< IXGBE_ETQS_RX_QUEUE_SHIFT
));
723 /* Configure FCoE Rx control */
724 IXGBE_WRITE_REG(hw
, IXGBE_FCRXCTRL
,
725 IXGBE_FCRXCTRL_FCCRCBO
|
726 (FC_FCOE_VER
<< IXGBE_FCRXCTRL_FCOEVER_SHIFT
));
730 * ixgbe_free_fcoe_ddp_resources - release all fcoe ddp context resources
731 * @adapter : ixgbe adapter
733 * Cleans up outstanding ddp context resources
737 void ixgbe_free_fcoe_ddp_resources(struct ixgbe_adapter
*adapter
)
739 struct ixgbe_fcoe
*fcoe
= &adapter
->fcoe
;
742 /* do nothing if no DDP pools were allocated */
746 ddp_max
= IXGBE_FCOE_DDP_MAX
;
747 /* X550 has different DDP Max limit */
748 if (adapter
->hw
.mac
.type
== ixgbe_mac_X550
)
749 ddp_max
= IXGBE_FCOE_DDP_MAX_X550
;
751 for (i
= 0; i
< ddp_max
; i
++)
752 ixgbe_fcoe_ddp_put(adapter
->netdev
, i
);
754 for_each_possible_cpu(cpu
)
755 ixgbe_fcoe_dma_pool_free(fcoe
, cpu
);
757 dma_unmap_single(&adapter
->pdev
->dev
,
758 fcoe
->extra_ddp_buffer_dma
,
761 kfree(fcoe
->extra_ddp_buffer
);
763 fcoe
->extra_ddp_buffer
= NULL
;
764 fcoe
->extra_ddp_buffer_dma
= 0;
768 * ixgbe_setup_fcoe_ddp_resources - setup all fcoe ddp context resources
769 * @adapter: ixgbe adapter
771 * Sets up ddp context resouces
773 * Returns : 0 indicates success or -EINVAL on failure
775 int ixgbe_setup_fcoe_ddp_resources(struct ixgbe_adapter
*adapter
)
777 struct ixgbe_fcoe
*fcoe
= &adapter
->fcoe
;
778 struct device
*dev
= &adapter
->pdev
->dev
;
783 /* do nothing if no DDP pools were allocated */
787 /* Extra buffer to be shared by all DDPs for HW work around */
788 buffer
= kmalloc(IXGBE_FCBUFF_MIN
, GFP_ATOMIC
);
792 dma
= dma_map_single(dev
, buffer
, IXGBE_FCBUFF_MIN
, DMA_FROM_DEVICE
);
793 if (dma_mapping_error(dev
, dma
)) {
794 e_err(drv
, "failed to map extra DDP buffer\n");
799 fcoe
->extra_ddp_buffer
= buffer
;
800 fcoe
->extra_ddp_buffer_dma
= dma
;
802 /* allocate pci pool for each cpu */
803 for_each_possible_cpu(cpu
) {
804 int err
= ixgbe_fcoe_dma_pool_alloc(fcoe
, dev
, cpu
);
808 e_err(drv
, "failed to alloc DDP pool on cpu:%d\n", cpu
);
809 ixgbe_free_fcoe_ddp_resources(adapter
);
816 static int ixgbe_fcoe_ddp_enable(struct ixgbe_adapter
*adapter
)
818 struct ixgbe_fcoe
*fcoe
= &adapter
->fcoe
;
820 if (!(adapter
->flags
& IXGBE_FLAG_FCOE_CAPABLE
))
823 fcoe
->ddp_pool
= alloc_percpu(struct ixgbe_fcoe_ddp_pool
);
825 if (!fcoe
->ddp_pool
) {
826 e_err(drv
, "failed to allocate percpu DDP resources\n");
830 adapter
->netdev
->fcoe_ddp_xid
= IXGBE_FCOE_DDP_MAX
- 1;
831 /* X550 has different DDP Max limit */
832 if (adapter
->hw
.mac
.type
== ixgbe_mac_X550
)
833 adapter
->netdev
->fcoe_ddp_xid
= IXGBE_FCOE_DDP_MAX_X550
- 1;
838 static void ixgbe_fcoe_ddp_disable(struct ixgbe_adapter
*adapter
)
840 struct ixgbe_fcoe
*fcoe
= &adapter
->fcoe
;
842 adapter
->netdev
->fcoe_ddp_xid
= 0;
847 free_percpu(fcoe
->ddp_pool
);
848 fcoe
->ddp_pool
= NULL
;
852 * ixgbe_fcoe_enable - turn on FCoE offload feature
853 * @netdev: the corresponding netdev
855 * Turns on FCoE offload feature in 82599.
857 * Returns : 0 indicates success or -EINVAL on failure
859 int ixgbe_fcoe_enable(struct net_device
*netdev
)
861 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
862 struct ixgbe_fcoe
*fcoe
= &adapter
->fcoe
;
864 atomic_inc(&fcoe
->refcnt
);
866 if (!(adapter
->flags
& IXGBE_FLAG_FCOE_CAPABLE
))
869 if (adapter
->flags
& IXGBE_FLAG_FCOE_ENABLED
)
872 e_info(drv
, "Enabling FCoE offload features.\n");
874 if (adapter
->flags
& IXGBE_FLAG_SRIOV_ENABLED
)
875 e_warn(probe
, "Enabling FCoE on PF will disable legacy VFs\n");
877 if (netif_running(netdev
))
878 netdev
->netdev_ops
->ndo_stop(netdev
);
880 /* Allocate per CPU memory to track DDP pools */
881 ixgbe_fcoe_ddp_enable(adapter
);
883 /* enable FCoE and notify stack */
884 adapter
->flags
|= IXGBE_FLAG_FCOE_ENABLED
;
885 netdev
->features
|= NETIF_F_FCOE_MTU
;
886 netdev_features_change(netdev
);
888 /* release existing queues and reallocate them */
889 ixgbe_clear_interrupt_scheme(adapter
);
890 ixgbe_init_interrupt_scheme(adapter
);
892 if (netif_running(netdev
))
893 netdev
->netdev_ops
->ndo_open(netdev
);
899 * ixgbe_fcoe_disable - turn off FCoE offload feature
900 * @netdev: the corresponding netdev
902 * Turns off FCoE offload feature in 82599.
904 * Returns : 0 indicates success or -EINVAL on failure
906 int ixgbe_fcoe_disable(struct net_device
*netdev
)
908 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
910 if (!atomic_dec_and_test(&adapter
->fcoe
.refcnt
))
913 if (!(adapter
->flags
& IXGBE_FLAG_FCOE_ENABLED
))
916 e_info(drv
, "Disabling FCoE offload features.\n");
917 if (netif_running(netdev
))
918 netdev
->netdev_ops
->ndo_stop(netdev
);
920 /* Free per CPU memory to track DDP pools */
921 ixgbe_fcoe_ddp_disable(adapter
);
923 /* disable FCoE and notify stack */
924 adapter
->flags
&= ~IXGBE_FLAG_FCOE_ENABLED
;
925 netdev
->features
&= ~NETIF_F_FCOE_MTU
;
927 netdev_features_change(netdev
);
929 /* release existing queues and reallocate them */
930 ixgbe_clear_interrupt_scheme(adapter
);
931 ixgbe_init_interrupt_scheme(adapter
);
933 if (netif_running(netdev
))
934 netdev
->netdev_ops
->ndo_open(netdev
);
940 * ixgbe_fcoe_get_wwn - get world wide name for the node or the port
941 * @netdev : ixgbe adapter
942 * @wwn : the world wide name
943 * @type: the type of world wide name
945 * Returns the node or port world wide name if both the prefix and the san
946 * mac address are valid, then the wwn is formed based on the NAA-2 for
947 * IEEE Extended name identifier (ref. to T10 FC-LS Spec., Sec. 15.3).
949 * Returns : 0 on success
951 int ixgbe_fcoe_get_wwn(struct net_device
*netdev
, u64
*wwn
, int type
)
954 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
955 struct ixgbe_mac_info
*mac
= &adapter
->hw
.mac
;
958 case NETDEV_FCOE_WWNN
:
959 prefix
= mac
->wwnn_prefix
;
961 case NETDEV_FCOE_WWPN
:
962 prefix
= mac
->wwpn_prefix
;
968 if ((prefix
!= 0xffff) &&
969 is_valid_ether_addr(mac
->san_addr
)) {
970 *wwn
= ((u64
) prefix
<< 48) |
971 ((u64
) mac
->san_addr
[0] << 40) |
972 ((u64
) mac
->san_addr
[1] << 32) |
973 ((u64
) mac
->san_addr
[2] << 24) |
974 ((u64
) mac
->san_addr
[3] << 16) |
975 ((u64
) mac
->san_addr
[4] << 8) |
976 ((u64
) mac
->san_addr
[5]);
983 * ixgbe_fcoe_get_hbainfo - get FCoE HBA information
984 * @netdev : ixgbe adapter
985 * @info : HBA information
987 * Returns ixgbe HBA information
989 * Returns : 0 on success
991 int ixgbe_fcoe_get_hbainfo(struct net_device
*netdev
,
992 struct netdev_fcoe_hbainfo
*info
)
994 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
995 struct ixgbe_hw
*hw
= &adapter
->hw
;
1002 /* Don't return information on unsupported devices */
1003 if (!(adapter
->flags
& IXGBE_FLAG_FCOE_ENABLED
))
1007 snprintf(info
->manufacturer
, sizeof(info
->manufacturer
),
1008 "Intel Corporation");
1012 /* Get the PCI-e Device Serial Number Capability */
1013 pos
= pci_find_ext_capability(adapter
->pdev
, PCI_EXT_CAP_ID_DSN
);
1016 for (i
= 0; i
< 8; i
++)
1017 pci_read_config_byte(adapter
->pdev
, pos
+ i
, &buf
[i
]);
1019 snprintf(info
->serial_number
, sizeof(info
->serial_number
),
1020 "%02X%02X%02X%02X%02X%02X%02X%02X",
1021 buf
[7], buf
[6], buf
[5], buf
[4],
1022 buf
[3], buf
[2], buf
[1], buf
[0]);
1024 snprintf(info
->serial_number
, sizeof(info
->serial_number
),
1027 /* Hardware Version */
1028 snprintf(info
->hardware_version
,
1029 sizeof(info
->hardware_version
),
1030 "Rev %d", hw
->revision_id
);
1031 /* Driver Name/Version */
1032 snprintf(info
->driver_version
,
1033 sizeof(info
->driver_version
),
1036 ixgbe_driver_version
);
1037 /* Firmware Version */
1038 strlcpy(info
->firmware_version
, adapter
->eeprom_id
,
1039 sizeof(info
->firmware_version
));
1042 if (hw
->mac
.type
== ixgbe_mac_82599EB
) {
1043 snprintf(info
->model
,
1044 sizeof(info
->model
),
1046 } else if (hw
->mac
.type
== ixgbe_mac_X550
) {
1047 snprintf(info
->model
,
1048 sizeof(info
->model
),
1051 snprintf(info
->model
,
1052 sizeof(info
->model
),
1056 /* Model Description */
1057 snprintf(info
->model_description
,
1058 sizeof(info
->model_description
),
1060 ixgbe_default_device_descr
);
1066 * ixgbe_fcoe_get_tc - get the current TC that fcoe is mapped to
1067 * @adapter: pointer to the device adapter structure
1069 * Return : TC that FCoE is mapped to
1071 u8
ixgbe_fcoe_get_tc(struct ixgbe_adapter
*adapter
)
1073 #ifdef CONFIG_IXGBE_DCB
1074 return netdev_get_prio_tc_map(adapter
->netdev
, adapter
->fcoe
.up
);