1 /*******************************************************************************
3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2011 Intel Corporation.
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26 *******************************************************************************/
29 #include <linux/if_ether.h>
30 #include <linux/gfp.h>
31 #include <linux/if_vlan.h>
32 #include <scsi/scsi_cmnd.h>
33 #include <scsi/scsi_device.h>
34 #include <scsi/fc/fc_fs.h>
35 #include <scsi/fc/fc_fcoe.h>
36 #include <scsi/libfc.h>
37 #include <scsi/libfcoe.h>
40 * ixgbe_fcoe_clear_ddp - clear the given ddp context
41 * @ddp - ptr to the ixgbe_fcoe_ddp
46 static inline void ixgbe_fcoe_clear_ddp(struct ixgbe_fcoe_ddp
*ddp
)
57 * ixgbe_fcoe_ddp_put - free the ddp context for a given xid
58 * @netdev: the corresponding net_device
59 * @xid: the xid that corresponding ddp will be freed
61 * This is the implementation of net_device_ops.ndo_fcoe_ddp_done
62 * and it is expected to be called by ULD, i.e., FCP layer of libfc
63 * to release the corresponding ddp context when the I/O is done.
65 * Returns : data length already ddp-ed in bytes
67 int ixgbe_fcoe_ddp_put(struct net_device
*netdev
, u16 xid
)
70 struct ixgbe_fcoe
*fcoe
;
71 struct ixgbe_adapter
*adapter
;
72 struct ixgbe_fcoe_ddp
*ddp
;
78 if (xid
>= IXGBE_FCOE_DDP_MAX
)
81 adapter
= netdev_priv(netdev
);
82 fcoe
= &adapter
->fcoe
;
83 ddp
= &fcoe
->ddp
[xid
];
88 /* if there an error, force to invalidate ddp context */
90 spin_lock_bh(&fcoe
->lock
);
91 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_FCFLT
, 0);
92 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_FCFLTRW
,
93 (xid
| IXGBE_FCFLTRW_WE
));
94 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_FCBUFF
, 0);
95 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_FCDMARW
,
96 (xid
| IXGBE_FCDMARW_WE
));
98 /* guaranteed to be invalidated after 100us */
99 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_FCDMARW
,
100 (xid
| IXGBE_FCDMARW_RE
));
101 fcbuff
= IXGBE_READ_REG(&adapter
->hw
, IXGBE_FCBUFF
);
102 spin_unlock_bh(&fcoe
->lock
);
103 if (fcbuff
& IXGBE_FCBUFF_VALID
)
107 pci_unmap_sg(adapter
->pdev
, ddp
->sgl
, ddp
->sgc
,
110 pci_pool_free(ddp
->pool
, ddp
->udl
, ddp
->udp
);
114 ixgbe_fcoe_clear_ddp(ddp
);
121 * ixgbe_fcoe_ddp_setup - called to set up ddp context
122 * @netdev: the corresponding net_device
123 * @xid: the exchange id requesting ddp
124 * @sgl: the scatter-gather list for this request
125 * @sgc: the number of scatter-gather items
127 * Returns : 1 for success and 0 for no ddp
129 static int ixgbe_fcoe_ddp_setup(struct net_device
*netdev
, u16 xid
,
130 struct scatterlist
*sgl
, unsigned int sgc
,
133 struct ixgbe_adapter
*adapter
;
135 struct ixgbe_fcoe
*fcoe
;
136 struct ixgbe_fcoe_ddp
*ddp
;
137 struct scatterlist
*sg
;
138 unsigned int i
, j
, dmacount
;
140 static const unsigned int bufflen
= IXGBE_FCBUFF_MIN
;
141 unsigned int firstoff
= 0;
142 unsigned int lastsize
;
143 unsigned int thisoff
= 0;
144 unsigned int thislen
= 0;
145 u32 fcbuff
, fcdmarw
, fcfltrw
, fcrxctl
;
147 struct pci_pool
*pool
;
152 adapter
= netdev_priv(netdev
);
153 if (xid
>= IXGBE_FCOE_DDP_MAX
) {
154 e_warn(drv
, "xid=0x%x out-of-range\n", xid
);
158 /* no DDP if we are already down or resetting */
159 if (test_bit(__IXGBE_DOWN
, &adapter
->state
) ||
160 test_bit(__IXGBE_RESETTING
, &adapter
->state
))
163 fcoe
= &adapter
->fcoe
;
165 e_warn(drv
, "xid=0x%x no ddp pool for fcoe\n", xid
);
169 ddp
= &fcoe
->ddp
[xid
];
171 e_err(drv
, "xid 0x%x w/ non-null sgl=%p nents=%d\n",
172 xid
, ddp
->sgl
, ddp
->sgc
);
175 ixgbe_fcoe_clear_ddp(ddp
);
177 /* setup dma from scsi command sgl */
178 dmacount
= pci_map_sg(adapter
->pdev
, sgl
, sgc
, DMA_FROM_DEVICE
);
180 e_err(drv
, "xid 0x%x DMA map error\n", xid
);
184 /* alloc the udl from per cpu ddp pool */
185 pool
= *per_cpu_ptr(fcoe
->pool
, get_cpu());
186 ddp
->udl
= pci_pool_alloc(pool
, GFP_ATOMIC
, &ddp
->udp
);
188 e_err(drv
, "failed allocated ddp context\n");
189 goto out_noddp_unmap
;
196 for_each_sg(sgl
, sg
, dmacount
, i
) {
197 addr
= sg_dma_address(sg
);
198 len
= sg_dma_len(sg
);
200 /* max number of buffers allowed in one DDP context */
201 if (j
>= IXGBE_BUFFCNT_MAX
) {
202 e_err(drv
, "xid=%x:%d,%d,%d:addr=%llx "
203 "not enough descriptors\n",
204 xid
, i
, j
, dmacount
, (u64
)addr
);
208 /* get the offset of length of current buffer */
209 thisoff
= addr
& ((dma_addr_t
)bufflen
- 1);
210 thislen
= min((bufflen
- thisoff
), len
);
212 * all but the 1st buffer (j == 0)
213 * must be aligned on bufflen
215 if ((j
!= 0) && (thisoff
))
218 * all but the last buffer
219 * ((i == (dmacount - 1)) && (thislen == len))
220 * must end at bufflen
222 if (((i
!= (dmacount
- 1)) || (thislen
!= len
))
223 && ((thislen
+ thisoff
) != bufflen
))
226 ddp
->udl
[j
] = (u64
)(addr
- thisoff
);
227 /* only the first buffer may have none-zero offset */
235 /* only the last buffer may have non-full bufflen */
236 lastsize
= thisoff
+ thislen
;
239 * lastsize can not be buffer len.
240 * If it is then adding another buffer with lastsize = 1.
242 if (lastsize
== bufflen
) {
243 if (j
>= IXGBE_BUFFCNT_MAX
) {
244 printk_once("Will NOT use DDP since there are not "
245 "enough user buffers. We need an extra "
246 "buffer because lastsize is bufflen. "
247 "xid=%x:%d,%d,%d:addr=%llx\n",
248 xid
, i
, j
, dmacount
, (u64
)addr
);
253 ddp
->udl
[j
] = (u64
)(fcoe
->extra_ddp_buffer_dma
);
259 fcbuff
= (IXGBE_FCBUFF_4KB
<< IXGBE_FCBUFF_BUFFSIZE_SHIFT
);
260 fcbuff
|= ((j
& 0xff) << IXGBE_FCBUFF_BUFFCNT_SHIFT
);
261 fcbuff
|= (firstoff
<< IXGBE_FCBUFF_OFFSET_SHIFT
);
262 /* Set WRCONTX bit to allow DDP for target */
264 fcbuff
|= (IXGBE_FCBUFF_WRCONTX
);
265 fcbuff
|= (IXGBE_FCBUFF_VALID
);
268 fcdmarw
|= IXGBE_FCDMARW_WE
;
269 fcdmarw
|= (lastsize
<< IXGBE_FCDMARW_LASTSIZE_SHIFT
);
272 fcfltrw
|= IXGBE_FCFLTRW_WE
;
274 /* program DMA context */
276 spin_lock_bh(&fcoe
->lock
);
278 /* turn on last frame indication for target mode as FCP_RSPtarget is
279 * supposed to send FCP_RSP when it is done. */
280 if (target_mode
&& !test_bit(__IXGBE_FCOE_TARGET
, &fcoe
->mode
)) {
281 set_bit(__IXGBE_FCOE_TARGET
, &fcoe
->mode
);
282 fcrxctl
= IXGBE_READ_REG(hw
, IXGBE_FCRXCTRL
);
283 fcrxctl
|= IXGBE_FCRXCTRL_LASTSEQH
;
284 IXGBE_WRITE_REG(hw
, IXGBE_FCRXCTRL
, fcrxctl
);
287 IXGBE_WRITE_REG(hw
, IXGBE_FCPTRL
, ddp
->udp
& DMA_BIT_MASK(32));
288 IXGBE_WRITE_REG(hw
, IXGBE_FCPTRH
, (u64
)ddp
->udp
>> 32);
289 IXGBE_WRITE_REG(hw
, IXGBE_FCBUFF
, fcbuff
);
290 IXGBE_WRITE_REG(hw
, IXGBE_FCDMARW
, fcdmarw
);
291 /* program filter context */
292 IXGBE_WRITE_REG(hw
, IXGBE_FCPARAM
, 0);
293 IXGBE_WRITE_REG(hw
, IXGBE_FCFLT
, IXGBE_FCFLT_VALID
);
294 IXGBE_WRITE_REG(hw
, IXGBE_FCFLTRW
, fcfltrw
);
296 spin_unlock_bh(&fcoe
->lock
);
301 pci_pool_free(pool
, ddp
->udl
, ddp
->udp
);
302 ixgbe_fcoe_clear_ddp(ddp
);
305 pci_unmap_sg(adapter
->pdev
, sgl
, sgc
, DMA_FROM_DEVICE
);
311 * ixgbe_fcoe_ddp_get - called to set up ddp context in initiator mode
312 * @netdev: the corresponding net_device
313 * @xid: the exchange id requesting ddp
314 * @sgl: the scatter-gather list for this request
315 * @sgc: the number of scatter-gather items
317 * This is the implementation of net_device_ops.ndo_fcoe_ddp_setup
318 * and is expected to be called from ULD, e.g., FCP layer of libfc
319 * to set up ddp for the corresponding xid of the given sglist for
320 * the corresponding I/O.
322 * Returns : 1 for success and 0 for no ddp
324 int ixgbe_fcoe_ddp_get(struct net_device
*netdev
, u16 xid
,
325 struct scatterlist
*sgl
, unsigned int sgc
)
327 return ixgbe_fcoe_ddp_setup(netdev
, xid
, sgl
, sgc
, 0);
331 * ixgbe_fcoe_ddp_target - called to set up ddp context in target mode
332 * @netdev: the corresponding net_device
333 * @xid: the exchange id requesting ddp
334 * @sgl: the scatter-gather list for this request
335 * @sgc: the number of scatter-gather items
337 * This is the implementation of net_device_ops.ndo_fcoe_ddp_target
338 * and is expected to be called from ULD, e.g., FCP layer of libfc
339 * to set up ddp for the corresponding xid of the given sglist for
340 * the corresponding I/O. The DDP in target mode is a write I/O request
341 * from the initiator.
343 * Returns : 1 for success and 0 for no ddp
345 int ixgbe_fcoe_ddp_target(struct net_device
*netdev
, u16 xid
,
346 struct scatterlist
*sgl
, unsigned int sgc
)
348 return ixgbe_fcoe_ddp_setup(netdev
, xid
, sgl
, sgc
, 1);
352 * ixgbe_fcoe_ddp - check ddp status and mark it done
353 * @adapter: ixgbe adapter
354 * @rx_desc: advanced rx descriptor
355 * @skb: the skb holding the received data
357 * This checks ddp status.
359 * Returns : < 0 indicates an error or not a FCiE ddp, 0 indicates
360 * not passing the skb to ULD, > 0 indicates is the length of data
363 int ixgbe_fcoe_ddp(struct ixgbe_adapter
*adapter
,
364 union ixgbe_adv_rx_desc
*rx_desc
,
370 u32 fceofe
, fcerr
, fcstat
;
372 struct ixgbe_fcoe
*fcoe
;
373 struct ixgbe_fcoe_ddp
*ddp
;
374 struct fc_frame_header
*fh
;
375 struct fcoe_crc_eof
*crc
;
377 fcerr
= (staterr
& IXGBE_RXDADV_ERR_FCERR
);
378 fceofe
= (staterr
& IXGBE_RXDADV_ERR_FCEOFE
);
379 if (fcerr
== IXGBE_FCERR_BADCRC
)
380 skb_checksum_none_assert(skb
);
382 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
384 if (eth_hdr(skb
)->h_proto
== htons(ETH_P_8021Q
))
385 fh
= (struct fc_frame_header
*)(skb
->data
+
386 sizeof(struct vlan_hdr
) + sizeof(struct fcoe_hdr
));
388 fh
= (struct fc_frame_header
*)(skb
->data
+
389 sizeof(struct fcoe_hdr
));
390 fctl
= ntoh24(fh
->fh_f_ctl
);
391 if (fctl
& FC_FC_EX_CTX
)
392 xid
= be16_to_cpu(fh
->fh_ox_id
);
394 xid
= be16_to_cpu(fh
->fh_rx_id
);
396 if (xid
>= IXGBE_FCOE_DDP_MAX
)
399 fcoe
= &adapter
->fcoe
;
400 ddp
= &fcoe
->ddp
[xid
];
407 fcstat
= (staterr
& IXGBE_RXDADV_STAT_FCSTAT
);
409 /* update length of DDPed data */
410 ddp
->len
= le32_to_cpu(rx_desc
->wb
.lower
.hi_dword
.rss
);
411 /* unmap the sg list when FCP_RSP is received */
412 if (fcstat
== IXGBE_RXDADV_STAT_FCSTAT_FCPRSP
) {
413 pci_unmap_sg(adapter
->pdev
, ddp
->sgl
,
414 ddp
->sgc
, DMA_FROM_DEVICE
);
415 ddp
->err
= (fcerr
| fceofe
);
419 /* return 0 to bypass going to ULD for DDPed data */
420 if (fcstat
== IXGBE_RXDADV_STAT_FCSTAT_DDP
)
425 /* In target mode, check the last data frame of the sequence.
426 * For DDP in target mode, data is already DDPed but the header
427 * indication of the last data frame ould allow is to tell if we
428 * got all the data and the ULP can send FCP_RSP back, as this is
429 * not a full fcoe frame, we fill the trailer here so it won't be
430 * dropped by the ULP stack.
432 if ((fh
->fh_r_ctl
== FC_RCTL_DD_SOL_DATA
) &&
433 (fctl
& FC_FC_END_SEQ
)) {
434 crc
= (struct fcoe_crc_eof
*)skb_put(skb
, sizeof(*crc
));
435 crc
->fcoe_eof
= FC_EOF_T
;
442 * ixgbe_fso - ixgbe FCoE Sequence Offload (FSO)
443 * @adapter: ixgbe adapter
444 * @tx_ring: tx desc ring
445 * @skb: associated skb
446 * @tx_flags: tx flags
447 * @hdr_len: hdr_len to be returned
449 * This sets up large send offload for FCoE
451 * Returns : 0 indicates no FSO, > 0 for FSO, < 0 for error
453 int ixgbe_fso(struct ixgbe_ring
*tx_ring
, struct sk_buff
*skb
,
454 u32 tx_flags
, u8
*hdr_len
)
456 struct fc_frame_header
*fh
;
458 u32 fcoe_sof_eof
= 0;
462 if (skb_is_gso(skb
) && (skb_shinfo(skb
)->gso_type
!= SKB_GSO_FCOE
)) {
463 dev_err(tx_ring
->dev
, "Wrong gso type %d:expecting SKB_GSO_FCOE\n",
464 skb_shinfo(skb
)->gso_type
);
468 /* resets the header to point fcoe/fc */
469 skb_set_network_header(skb
, skb
->mac_len
);
470 skb_set_transport_header(skb
, skb
->mac_len
+
471 sizeof(struct fcoe_hdr
));
473 /* sets up SOF and ORIS */
474 sof
= ((struct fcoe_hdr
*)skb_network_header(skb
))->fcoe_sof
;
477 fcoe_sof_eof
= IXGBE_ADVTXD_FCOEF_ORIS
;
480 fcoe_sof_eof
= IXGBE_ADVTXD_FCOEF_SOF
|
481 IXGBE_ADVTXD_FCOEF_ORIS
;
486 fcoe_sof_eof
= IXGBE_ADVTXD_FCOEF_SOF
;
489 dev_warn(tx_ring
->dev
, "unknown sof = 0x%x\n", sof
);
493 /* the first byte of the last dword is EOF */
494 skb_copy_bits(skb
, skb
->len
- 4, &eof
, 1);
495 /* sets up EOF and ORIE */
498 fcoe_sof_eof
|= IXGBE_ADVTXD_FCOEF_EOF_N
;
503 fcoe_sof_eof
|= IXGBE_ADVTXD_FCOEF_EOF_N
|
504 IXGBE_ADVTXD_FCOEF_ORIE
;
506 fcoe_sof_eof
|= IXGBE_ADVTXD_FCOEF_EOF_T
;
509 fcoe_sof_eof
|= IXGBE_ADVTXD_FCOEF_EOF_NI
;
512 fcoe_sof_eof
|= IXGBE_ADVTXD_FCOEF_EOF_A
;
515 dev_warn(tx_ring
->dev
, "unknown eof = 0x%x\n", eof
);
519 /* sets up PARINC indicating data offset */
520 fh
= (struct fc_frame_header
*)skb_transport_header(skb
);
521 if (fh
->fh_f_ctl
[2] & FC_FC_REL_OFF
)
522 fcoe_sof_eof
|= IXGBE_ADVTXD_FCOEF_PARINC
;
524 /* include trailer in headlen as it is replicated per frame */
525 *hdr_len
= sizeof(struct fcoe_crc_eof
);
527 /* hdr_len includes fc_hdr if FCoE LSO is enabled */
529 *hdr_len
+= (skb_transport_offset(skb
) +
530 sizeof(struct fc_frame_header
));
532 /* mss_l4len_id: use 1 for FSO as TSO, no need for L4LEN */
533 mss_l4len_idx
= skb_shinfo(skb
)->gso_size
<< IXGBE_ADVTXD_MSS_SHIFT
;
534 mss_l4len_idx
|= 1 << IXGBE_ADVTXD_IDX_SHIFT
;
536 /* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */
537 vlan_macip_lens
= skb_transport_offset(skb
) +
538 sizeof(struct fc_frame_header
);
539 vlan_macip_lens
|= (skb_transport_offset(skb
) - 4)
540 << IXGBE_ADVTXD_MACLEN_SHIFT
;
541 vlan_macip_lens
|= tx_flags
& IXGBE_TX_FLAGS_VLAN_MASK
;
543 /* write context desc */
544 ixgbe_tx_ctxtdesc(tx_ring
, vlan_macip_lens
, fcoe_sof_eof
,
545 IXGBE_ADVTXT_TUCMD_FCOE
, mss_l4len_idx
);
547 return skb_is_gso(skb
);
550 static void ixgbe_fcoe_ddp_pools_free(struct ixgbe_fcoe
*fcoe
)
553 struct pci_pool
**pool
;
555 for_each_possible_cpu(cpu
) {
556 pool
= per_cpu_ptr(fcoe
->pool
, cpu
);
558 pci_pool_destroy(*pool
);
560 free_percpu(fcoe
->pool
);
564 static void ixgbe_fcoe_ddp_pools_alloc(struct ixgbe_adapter
*adapter
)
566 struct ixgbe_fcoe
*fcoe
= &adapter
->fcoe
;
568 struct pci_pool
**pool
;
571 fcoe
->pool
= alloc_percpu(struct pci_pool
*);
575 /* allocate pci pool for each cpu */
576 for_each_possible_cpu(cpu
) {
577 snprintf(pool_name
, 32, "ixgbe_fcoe_ddp_%d", cpu
);
578 pool
= per_cpu_ptr(fcoe
->pool
, cpu
);
579 *pool
= pci_pool_create(pool_name
,
580 adapter
->pdev
, IXGBE_FCPTR_MAX
,
581 IXGBE_FCPTR_ALIGN
, PAGE_SIZE
);
583 e_err(drv
, "failed to alloc DDP pool on cpu:%d\n", cpu
);
584 ixgbe_fcoe_ddp_pools_free(fcoe
);
591 * ixgbe_configure_fcoe - configures registers for fcoe at start
592 * @adapter: ptr to ixgbe adapter
594 * This sets up FCoE related registers
598 void ixgbe_configure_fcoe(struct ixgbe_adapter
*adapter
)
600 int i
, fcoe_q
, fcoe_i
;
601 struct ixgbe_hw
*hw
= &adapter
->hw
;
602 struct ixgbe_fcoe
*fcoe
= &adapter
->fcoe
;
603 struct ixgbe_ring_feature
*f
= &adapter
->ring_feature
[RING_F_FCOE
];
606 spin_lock_init(&fcoe
->lock
);
608 ixgbe_fcoe_ddp_pools_alloc(adapter
);
610 e_err(drv
, "failed to alloc percpu fcoe DDP pools\n");
614 /* Extra buffer to be shared by all DDPs for HW work around */
615 fcoe
->extra_ddp_buffer
= kmalloc(IXGBE_FCBUFF_MIN
, GFP_ATOMIC
);
616 if (fcoe
->extra_ddp_buffer
== NULL
) {
617 e_err(drv
, "failed to allocated extra DDP buffer\n");
621 fcoe
->extra_ddp_buffer_dma
=
622 dma_map_single(&adapter
->pdev
->dev
,
623 fcoe
->extra_ddp_buffer
,
626 if (dma_mapping_error(&adapter
->pdev
->dev
,
627 fcoe
->extra_ddp_buffer_dma
)) {
628 e_err(drv
, "failed to map extra DDP buffer\n");
629 goto out_extra_ddp_buffer
;
633 /* Enable L2 eth type filter for FCoE */
634 IXGBE_WRITE_REG(hw
, IXGBE_ETQF(IXGBE_ETQF_FILTER_FCOE
),
635 (ETH_P_FCOE
| IXGBE_ETQF_FCOE
| IXGBE_ETQF_FILTER_EN
));
636 /* Enable L2 eth type filter for FIP */
637 IXGBE_WRITE_REG(hw
, IXGBE_ETQF(IXGBE_ETQF_FILTER_FIP
),
638 (ETH_P_FIP
| IXGBE_ETQF_FILTER_EN
));
639 if (adapter
->ring_feature
[RING_F_FCOE
].indices
) {
640 /* Use multiple rx queues for FCoE by redirection table */
641 for (i
= 0; i
< IXGBE_FCRETA_SIZE
; i
++) {
642 fcoe_i
= f
->mask
+ i
% f
->indices
;
643 fcoe_i
&= IXGBE_FCRETA_ENTRY_MASK
;
644 fcoe_q
= adapter
->rx_ring
[fcoe_i
]->reg_idx
;
645 IXGBE_WRITE_REG(hw
, IXGBE_FCRETA(i
), fcoe_q
);
647 IXGBE_WRITE_REG(hw
, IXGBE_FCRECTL
, IXGBE_FCRECTL_ENA
);
648 IXGBE_WRITE_REG(hw
, IXGBE_ETQS(IXGBE_ETQF_FILTER_FCOE
), 0);
650 /* Use single rx queue for FCoE */
652 fcoe_q
= adapter
->rx_ring
[fcoe_i
]->reg_idx
;
653 IXGBE_WRITE_REG(hw
, IXGBE_FCRECTL
, 0);
654 IXGBE_WRITE_REG(hw
, IXGBE_ETQS(IXGBE_ETQF_FILTER_FCOE
),
655 IXGBE_ETQS_QUEUE_EN
|
656 (fcoe_q
<< IXGBE_ETQS_RX_QUEUE_SHIFT
));
658 /* send FIP frames to the first FCoE queue */
660 fcoe_q
= adapter
->rx_ring
[fcoe_i
]->reg_idx
;
661 IXGBE_WRITE_REG(hw
, IXGBE_ETQS(IXGBE_ETQF_FILTER_FIP
),
662 IXGBE_ETQS_QUEUE_EN
|
663 (fcoe_q
<< IXGBE_ETQS_RX_QUEUE_SHIFT
));
665 IXGBE_WRITE_REG(hw
, IXGBE_FCRXCTRL
,
666 IXGBE_FCRXCTRL_FCOELLI
|
667 IXGBE_FCRXCTRL_FCCRCBO
|
668 (FC_FCOE_VER
<< IXGBE_FCRXCTRL_FCOEVER_SHIFT
));
671 out_extra_ddp_buffer
:
672 kfree(fcoe
->extra_ddp_buffer
);
674 ixgbe_fcoe_ddp_pools_free(fcoe
);
678 * ixgbe_cleanup_fcoe - release all fcoe ddp context resources
679 * @adapter : ixgbe adapter
681 * Cleans up outstanding ddp context resources
685 void ixgbe_cleanup_fcoe(struct ixgbe_adapter
*adapter
)
688 struct ixgbe_fcoe
*fcoe
= &adapter
->fcoe
;
693 for (i
= 0; i
< IXGBE_FCOE_DDP_MAX
; i
++)
694 ixgbe_fcoe_ddp_put(adapter
->netdev
, i
);
695 dma_unmap_single(&adapter
->pdev
->dev
,
696 fcoe
->extra_ddp_buffer_dma
,
699 kfree(fcoe
->extra_ddp_buffer
);
700 ixgbe_fcoe_ddp_pools_free(fcoe
);
704 * ixgbe_fcoe_enable - turn on FCoE offload feature
705 * @netdev: the corresponding netdev
707 * Turns on FCoE offload feature in 82599.
709 * Returns : 0 indicates success or -EINVAL on failure
711 int ixgbe_fcoe_enable(struct net_device
*netdev
)
714 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
715 struct ixgbe_fcoe
*fcoe
= &adapter
->fcoe
;
718 if (!(adapter
->flags
& IXGBE_FLAG_FCOE_CAPABLE
))
721 atomic_inc(&fcoe
->refcnt
);
722 if (adapter
->flags
& IXGBE_FLAG_FCOE_ENABLED
)
725 e_info(drv
, "Enabling FCoE offload features.\n");
726 if (netif_running(netdev
))
727 netdev
->netdev_ops
->ndo_stop(netdev
);
729 ixgbe_clear_interrupt_scheme(adapter
);
731 adapter
->flags
|= IXGBE_FLAG_FCOE_ENABLED
;
732 adapter
->ring_feature
[RING_F_FCOE
].indices
= IXGBE_FCRETA_SIZE
;
733 netdev
->features
|= NETIF_F_FCOE_CRC
;
734 netdev
->features
|= NETIF_F_FSO
;
735 netdev
->features
|= NETIF_F_FCOE_MTU
;
736 netdev
->fcoe_ddp_xid
= IXGBE_FCOE_DDP_MAX
- 1;
738 ixgbe_init_interrupt_scheme(adapter
);
739 netdev_features_change(netdev
);
741 if (netif_running(netdev
))
742 netdev
->netdev_ops
->ndo_open(netdev
);
750 * ixgbe_fcoe_disable - turn off FCoE offload feature
751 * @netdev: the corresponding netdev
753 * Turns off FCoE offload feature in 82599.
755 * Returns : 0 indicates success or -EINVAL on failure
757 int ixgbe_fcoe_disable(struct net_device
*netdev
)
760 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
761 struct ixgbe_fcoe
*fcoe
= &adapter
->fcoe
;
763 if (!(adapter
->flags
& IXGBE_FLAG_FCOE_CAPABLE
))
766 if (!(adapter
->flags
& IXGBE_FLAG_FCOE_ENABLED
))
769 if (!atomic_dec_and_test(&fcoe
->refcnt
))
772 e_info(drv
, "Disabling FCoE offload features.\n");
773 netdev
->features
&= ~NETIF_F_FCOE_CRC
;
774 netdev
->features
&= ~NETIF_F_FSO
;
775 netdev
->features
&= ~NETIF_F_FCOE_MTU
;
776 netdev
->fcoe_ddp_xid
= 0;
777 netdev_features_change(netdev
);
779 if (netif_running(netdev
))
780 netdev
->netdev_ops
->ndo_stop(netdev
);
782 ixgbe_clear_interrupt_scheme(adapter
);
783 adapter
->flags
&= ~IXGBE_FLAG_FCOE_ENABLED
;
784 adapter
->ring_feature
[RING_F_FCOE
].indices
= 0;
785 ixgbe_cleanup_fcoe(adapter
);
786 ixgbe_init_interrupt_scheme(adapter
);
788 if (netif_running(netdev
))
789 netdev
->netdev_ops
->ndo_open(netdev
);
797 * ixgbe_fcoe_get_wwn - get world wide name for the node or the port
798 * @netdev : ixgbe adapter
799 * @wwn : the world wide name
800 * @type: the type of world wide name
802 * Returns the node or port world wide name if both the prefix and the san
803 * mac address are valid, then the wwn is formed based on the NAA-2 for
804 * IEEE Extended name identifier (ref. to T10 FC-LS Spec., Sec. 15.3).
806 * Returns : 0 on success
808 int ixgbe_fcoe_get_wwn(struct net_device
*netdev
, u64
*wwn
, int type
)
812 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
813 struct ixgbe_mac_info
*mac
= &adapter
->hw
.mac
;
816 case NETDEV_FCOE_WWNN
:
817 prefix
= mac
->wwnn_prefix
;
819 case NETDEV_FCOE_WWPN
:
820 prefix
= mac
->wwpn_prefix
;
826 if ((prefix
!= 0xffff) &&
827 is_valid_ether_addr(mac
->san_addr
)) {
828 *wwn
= ((u64
) prefix
<< 48) |
829 ((u64
) mac
->san_addr
[0] << 40) |
830 ((u64
) mac
->san_addr
[1] << 32) |
831 ((u64
) mac
->san_addr
[2] << 24) |
832 ((u64
) mac
->san_addr
[3] << 16) |
833 ((u64
) mac
->san_addr
[4] << 8) |
834 ((u64
) mac
->san_addr
[5]);