1 /* QLogic qed NIC Driver
2 * Copyright (c) 2015-2017 QLogic Corporation
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and /or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/types.h>
34 #include <asm/byteorder.h>
35 #include <linux/bitops.h>
36 #include <linux/errno.h>
37 #include <linux/kernel.h>
38 #include <linux/string.h>
40 #include <linux/qed/qed_chain.h>
46 #include "qed_reg_addr.h"
48 #include "qed_sriov.h"
50 int qed_sp_init_request(struct qed_hwfn
*p_hwfn
,
51 struct qed_spq_entry
**pp_ent
,
52 u8 cmd
, u8 protocol
, struct qed_sp_init_data
*p_data
)
54 u32 opaque_cid
= p_data
->opaque_fid
<< 16 | p_data
->cid
;
55 struct qed_spq_entry
*p_ent
= NULL
;
61 rc
= qed_spq_get_entry(p_hwfn
, pp_ent
);
68 p_ent
->elem
.hdr
.cid
= cpu_to_le32(opaque_cid
);
69 p_ent
->elem
.hdr
.cmd_id
= cmd
;
70 p_ent
->elem
.hdr
.protocol_id
= protocol
;
72 p_ent
->priority
= QED_SPQ_PRIORITY_NORMAL
;
73 p_ent
->comp_mode
= p_data
->comp_mode
;
74 p_ent
->comp_done
.done
= 0;
76 switch (p_ent
->comp_mode
) {
77 case QED_SPQ_MODE_EBLOCK
:
78 p_ent
->comp_cb
.cookie
= &p_ent
->comp_done
;
81 case QED_SPQ_MODE_BLOCK
:
82 if (!p_data
->p_comp_data
)
85 p_ent
->comp_cb
.cookie
= p_data
->p_comp_data
->cookie
;
89 if (!p_data
->p_comp_data
)
90 p_ent
->comp_cb
.function
= NULL
;
92 p_ent
->comp_cb
= *p_data
->p_comp_data
;
96 DP_NOTICE(p_hwfn
, "Unknown SPQE completion mode %d\n",
101 DP_VERBOSE(p_hwfn
, QED_MSG_SPQ
,
102 "Initialized: CID %08x cmd %02x protocol %02x data_addr %lu comp_mode [%s]\n",
103 opaque_cid
, cmd
, protocol
,
104 (unsigned long)&p_ent
->ramrod
,
105 D_TRINE(p_ent
->comp_mode
, QED_SPQ_MODE_EBLOCK
,
106 QED_SPQ_MODE_BLOCK
, "MODE_EBLOCK", "MODE_BLOCK",
109 memset(&p_ent
->ramrod
, 0, sizeof(p_ent
->ramrod
));
114 static enum tunnel_clss
qed_tunn_clss_to_fw_clss(u8 type
)
117 case QED_TUNN_CLSS_MAC_VLAN
:
118 return TUNNEL_CLSS_MAC_VLAN
;
119 case QED_TUNN_CLSS_MAC_VNI
:
120 return TUNNEL_CLSS_MAC_VNI
;
121 case QED_TUNN_CLSS_INNER_MAC_VLAN
:
122 return TUNNEL_CLSS_INNER_MAC_VLAN
;
123 case QED_TUNN_CLSS_INNER_MAC_VNI
:
124 return TUNNEL_CLSS_INNER_MAC_VNI
;
125 case QED_TUNN_CLSS_MAC_VLAN_DUAL_STAGE
:
126 return TUNNEL_CLSS_MAC_VLAN_DUAL_STAGE
;
128 return TUNNEL_CLSS_MAC_VLAN
;
133 qed_set_pf_update_tunn_mode(struct qed_tunnel_info
*p_tun
,
134 struct qed_tunnel_info
*p_src
, bool b_pf_start
)
136 if (p_src
->vxlan
.b_update_mode
|| b_pf_start
)
137 p_tun
->vxlan
.b_mode_enabled
= p_src
->vxlan
.b_mode_enabled
;
139 if (p_src
->l2_gre
.b_update_mode
|| b_pf_start
)
140 p_tun
->l2_gre
.b_mode_enabled
= p_src
->l2_gre
.b_mode_enabled
;
142 if (p_src
->ip_gre
.b_update_mode
|| b_pf_start
)
143 p_tun
->ip_gre
.b_mode_enabled
= p_src
->ip_gre
.b_mode_enabled
;
145 if (p_src
->l2_geneve
.b_update_mode
|| b_pf_start
)
146 p_tun
->l2_geneve
.b_mode_enabled
=
147 p_src
->l2_geneve
.b_mode_enabled
;
149 if (p_src
->ip_geneve
.b_update_mode
|| b_pf_start
)
150 p_tun
->ip_geneve
.b_mode_enabled
=
151 p_src
->ip_geneve
.b_mode_enabled
;
154 static void qed_set_tunn_cls_info(struct qed_tunnel_info
*p_tun
,
155 struct qed_tunnel_info
*p_src
)
157 enum tunnel_clss type
;
159 p_tun
->b_update_rx_cls
= p_src
->b_update_rx_cls
;
160 p_tun
->b_update_tx_cls
= p_src
->b_update_tx_cls
;
162 type
= qed_tunn_clss_to_fw_clss(p_src
->vxlan
.tun_cls
);
163 p_tun
->vxlan
.tun_cls
= type
;
164 type
= qed_tunn_clss_to_fw_clss(p_src
->l2_gre
.tun_cls
);
165 p_tun
->l2_gre
.tun_cls
= type
;
166 type
= qed_tunn_clss_to_fw_clss(p_src
->ip_gre
.tun_cls
);
167 p_tun
->ip_gre
.tun_cls
= type
;
168 type
= qed_tunn_clss_to_fw_clss(p_src
->l2_geneve
.tun_cls
);
169 p_tun
->l2_geneve
.tun_cls
= type
;
170 type
= qed_tunn_clss_to_fw_clss(p_src
->ip_geneve
.tun_cls
);
171 p_tun
->ip_geneve
.tun_cls
= type
;
174 static void qed_set_tunn_ports(struct qed_tunnel_info
*p_tun
,
175 struct qed_tunnel_info
*p_src
)
177 p_tun
->geneve_port
.b_update_port
= p_src
->geneve_port
.b_update_port
;
178 p_tun
->vxlan_port
.b_update_port
= p_src
->vxlan_port
.b_update_port
;
180 if (p_src
->geneve_port
.b_update_port
)
181 p_tun
->geneve_port
.port
= p_src
->geneve_port
.port
;
183 if (p_src
->vxlan_port
.b_update_port
)
184 p_tun
->vxlan_port
.port
= p_src
->vxlan_port
.port
;
188 __qed_set_ramrod_tunnel_param(u8
*p_tunn_cls
,
189 struct qed_tunn_update_type
*tun_type
)
191 *p_tunn_cls
= tun_type
->tun_cls
;
195 qed_set_ramrod_tunnel_param(u8
*p_tunn_cls
,
196 struct qed_tunn_update_type
*tun_type
,
199 struct qed_tunn_update_udp_port
*p_udp_port
)
201 __qed_set_ramrod_tunnel_param(p_tunn_cls
, tun_type
);
202 if (p_udp_port
->b_update_port
) {
204 *p_port
= cpu_to_le16(p_udp_port
->port
);
209 qed_tunn_set_pf_update_params(struct qed_hwfn
*p_hwfn
,
210 struct qed_tunnel_info
*p_src
,
211 struct pf_update_tunnel_config
*p_tunn_cfg
)
213 struct qed_tunnel_info
*p_tun
= &p_hwfn
->cdev
->tunnel
;
215 qed_set_pf_update_tunn_mode(p_tun
, p_src
, false);
216 qed_set_tunn_cls_info(p_tun
, p_src
);
217 qed_set_tunn_ports(p_tun
, p_src
);
219 qed_set_ramrod_tunnel_param(&p_tunn_cfg
->tunnel_clss_vxlan
,
221 &p_tunn_cfg
->set_vxlan_udp_port_flg
,
222 &p_tunn_cfg
->vxlan_udp_port
,
225 qed_set_ramrod_tunnel_param(&p_tunn_cfg
->tunnel_clss_l2geneve
,
227 &p_tunn_cfg
->set_geneve_udp_port_flg
,
228 &p_tunn_cfg
->geneve_udp_port
,
229 &p_tun
->geneve_port
);
231 __qed_set_ramrod_tunnel_param(&p_tunn_cfg
->tunnel_clss_ipgeneve
,
234 __qed_set_ramrod_tunnel_param(&p_tunn_cfg
->tunnel_clss_l2gre
,
237 __qed_set_ramrod_tunnel_param(&p_tunn_cfg
->tunnel_clss_ipgre
,
240 p_tunn_cfg
->update_rx_pf_clss
= p_tun
->b_update_rx_cls
;
243 static void qed_set_hw_tunn_mode(struct qed_hwfn
*p_hwfn
,
244 struct qed_ptt
*p_ptt
,
245 struct qed_tunnel_info
*p_tun
)
247 qed_set_gre_enable(p_hwfn
, p_ptt
, p_tun
->l2_gre
.b_mode_enabled
,
248 p_tun
->ip_gre
.b_mode_enabled
);
249 qed_set_vxlan_enable(p_hwfn
, p_ptt
, p_tun
->vxlan
.b_mode_enabled
);
251 qed_set_geneve_enable(p_hwfn
, p_ptt
, p_tun
->l2_geneve
.b_mode_enabled
,
252 p_tun
->ip_geneve
.b_mode_enabled
);
255 static void qed_set_hw_tunn_mode_port(struct qed_hwfn
*p_hwfn
,
256 struct qed_ptt
*p_ptt
,
257 struct qed_tunnel_info
*p_tunn
)
259 if (p_tunn
->vxlan_port
.b_update_port
)
260 qed_set_vxlan_dest_port(p_hwfn
, p_ptt
,
261 p_tunn
->vxlan_port
.port
);
263 if (p_tunn
->geneve_port
.b_update_port
)
264 qed_set_geneve_dest_port(p_hwfn
, p_ptt
,
265 p_tunn
->geneve_port
.port
);
267 qed_set_hw_tunn_mode(p_hwfn
, p_ptt
, p_tunn
);
271 qed_tunn_set_pf_start_params(struct qed_hwfn
*p_hwfn
,
272 struct qed_tunnel_info
*p_src
,
273 struct pf_start_tunnel_config
*p_tunn_cfg
)
275 struct qed_tunnel_info
*p_tun
= &p_hwfn
->cdev
->tunnel
;
280 qed_set_pf_update_tunn_mode(p_tun
, p_src
, true);
281 qed_set_tunn_cls_info(p_tun
, p_src
);
282 qed_set_tunn_ports(p_tun
, p_src
);
284 qed_set_ramrod_tunnel_param(&p_tunn_cfg
->tunnel_clss_vxlan
,
286 &p_tunn_cfg
->set_vxlan_udp_port_flg
,
287 &p_tunn_cfg
->vxlan_udp_port
,
290 qed_set_ramrod_tunnel_param(&p_tunn_cfg
->tunnel_clss_l2geneve
,
292 &p_tunn_cfg
->set_geneve_udp_port_flg
,
293 &p_tunn_cfg
->geneve_udp_port
,
294 &p_tun
->geneve_port
);
296 __qed_set_ramrod_tunnel_param(&p_tunn_cfg
->tunnel_clss_ipgeneve
,
299 __qed_set_ramrod_tunnel_param(&p_tunn_cfg
->tunnel_clss_l2gre
,
302 __qed_set_ramrod_tunnel_param(&p_tunn_cfg
->tunnel_clss_ipgre
,
306 int qed_sp_pf_start(struct qed_hwfn
*p_hwfn
,
307 struct qed_ptt
*p_ptt
,
308 struct qed_tunnel_info
*p_tunn
,
309 enum qed_mf_mode mode
, bool allow_npar_tx_switch
)
311 struct pf_start_ramrod_data
*p_ramrod
= NULL
;
312 u16 sb
= qed_int_get_sp_sb_id(p_hwfn
);
313 u8 sb_index
= p_hwfn
->p_eq
->eq_sb_index
;
314 struct qed_spq_entry
*p_ent
= NULL
;
315 struct qed_sp_init_data init_data
;
319 /* update initial eq producer */
320 qed_eq_prod_update(p_hwfn
,
321 qed_chain_get_prod_idx(&p_hwfn
->p_eq
->chain
));
323 memset(&init_data
, 0, sizeof(init_data
));
324 init_data
.cid
= qed_spq_get_cid(p_hwfn
);
325 init_data
.opaque_fid
= p_hwfn
->hw_info
.opaque_fid
;
326 init_data
.comp_mode
= QED_SPQ_MODE_EBLOCK
;
328 rc
= qed_sp_init_request(p_hwfn
, &p_ent
,
329 COMMON_RAMROD_PF_START
,
330 PROTOCOLID_COMMON
, &init_data
);
334 p_ramrod
= &p_ent
->ramrod
.pf_start
;
336 p_ramrod
->event_ring_sb_id
= cpu_to_le16(sb
);
337 p_ramrod
->event_ring_sb_index
= sb_index
;
338 p_ramrod
->path_id
= QED_PATH_ID(p_hwfn
);
339 p_ramrod
->dont_log_ramrods
= 0;
340 p_ramrod
->log_type_mask
= cpu_to_le16(0xf);
345 p_ramrod
->mf_mode
= MF_NPAR
;
348 p_ramrod
->mf_mode
= MF_OVLAN
;
351 DP_NOTICE(p_hwfn
, "Unsupported MF mode, init as DEFAULT\n");
352 p_ramrod
->mf_mode
= MF_NPAR
;
355 p_ramrod
->outer_tag_config
.outer_tag
.tci
=
356 cpu_to_le16(p_hwfn
->hw_info
.ovlan
);
358 /* Place EQ address in RAMROD */
359 DMA_REGPAIR_LE(p_ramrod
->event_ring_pbl_addr
,
360 p_hwfn
->p_eq
->chain
.pbl_sp
.p_phys_table
);
361 page_cnt
= (u8
)qed_chain_get_page_cnt(&p_hwfn
->p_eq
->chain
);
362 p_ramrod
->event_ring_num_pages
= page_cnt
;
363 DMA_REGPAIR_LE(p_ramrod
->consolid_q_pbl_addr
,
364 p_hwfn
->p_consq
->chain
.pbl_sp
.p_phys_table
);
366 qed_tunn_set_pf_start_params(p_hwfn
, p_tunn
, &p_ramrod
->tunnel_config
);
368 if (IS_MF_SI(p_hwfn
))
369 p_ramrod
->allow_npar_tx_switching
= allow_npar_tx_switch
;
371 switch (p_hwfn
->hw_info
.personality
) {
373 p_ramrod
->personality
= PERSONALITY_ETH
;
376 p_ramrod
->personality
= PERSONALITY_FCOE
;
379 p_ramrod
->personality
= PERSONALITY_ISCSI
;
381 case QED_PCI_ETH_ROCE
:
382 case QED_PCI_ETH_IWARP
:
383 p_ramrod
->personality
= PERSONALITY_RDMA_AND_ETH
;
386 DP_NOTICE(p_hwfn
, "Unknown personality %d\n",
387 p_hwfn
->hw_info
.personality
);
388 p_ramrod
->personality
= PERSONALITY_ETH
;
391 if (p_hwfn
->cdev
->p_iov_info
) {
392 struct qed_hw_sriov_info
*p_iov
= p_hwfn
->cdev
->p_iov_info
;
394 p_ramrod
->base_vf_id
= (u8
) p_iov
->first_vf_in_pf
;
395 p_ramrod
->num_vfs
= (u8
) p_iov
->total_vfs
;
397 p_ramrod
->hsi_fp_ver
.major_ver_arr
[ETH_VER_KEY
] = ETH_HSI_VER_MAJOR
;
398 p_ramrod
->hsi_fp_ver
.minor_ver_arr
[ETH_VER_KEY
] = ETH_HSI_VER_MINOR
;
400 DP_VERBOSE(p_hwfn
, QED_MSG_SPQ
,
401 "Setting event_ring_sb [id %04x index %02x], outer_tag.tci [%d]\n",
402 sb
, sb_index
, p_ramrod
->outer_tag_config
.outer_tag
.tci
);
404 rc
= qed_spq_post(p_hwfn
, p_ent
, NULL
);
407 qed_set_hw_tunn_mode_port(p_hwfn
, p_ptt
,
408 &p_hwfn
->cdev
->tunnel
);
413 int qed_sp_pf_update(struct qed_hwfn
*p_hwfn
)
415 struct qed_spq_entry
*p_ent
= NULL
;
416 struct qed_sp_init_data init_data
;
420 memset(&init_data
, 0, sizeof(init_data
));
421 init_data
.cid
= qed_spq_get_cid(p_hwfn
);
422 init_data
.opaque_fid
= p_hwfn
->hw_info
.opaque_fid
;
423 init_data
.comp_mode
= QED_SPQ_MODE_CB
;
425 rc
= qed_sp_init_request(p_hwfn
, &p_ent
,
426 COMMON_RAMROD_PF_UPDATE
, PROTOCOLID_COMMON
,
431 qed_dcbx_set_pf_update_params(&p_hwfn
->p_dcbx_info
->results
,
432 &p_ent
->ramrod
.pf_update
);
434 return qed_spq_post(p_hwfn
, p_ent
, NULL
);
437 /* Set pf update ramrod command params */
438 int qed_sp_pf_update_tunn_cfg(struct qed_hwfn
*p_hwfn
,
439 struct qed_ptt
*p_ptt
,
440 struct qed_tunnel_info
*p_tunn
,
441 enum spq_mode comp_mode
,
442 struct qed_spq_comp_cb
*p_comp_data
)
444 struct qed_spq_entry
*p_ent
= NULL
;
445 struct qed_sp_init_data init_data
;
448 if (IS_VF(p_hwfn
->cdev
))
449 return qed_vf_pf_tunnel_param_update(p_hwfn
, p_tunn
);
455 memset(&init_data
, 0, sizeof(init_data
));
456 init_data
.cid
= qed_spq_get_cid(p_hwfn
);
457 init_data
.opaque_fid
= p_hwfn
->hw_info
.opaque_fid
;
458 init_data
.comp_mode
= comp_mode
;
459 init_data
.p_comp_data
= p_comp_data
;
461 rc
= qed_sp_init_request(p_hwfn
, &p_ent
,
462 COMMON_RAMROD_PF_UPDATE
, PROTOCOLID_COMMON
,
467 qed_tunn_set_pf_update_params(p_hwfn
, p_tunn
,
468 &p_ent
->ramrod
.pf_update
.tunnel_config
);
470 rc
= qed_spq_post(p_hwfn
, p_ent
, NULL
);
474 qed_set_hw_tunn_mode_port(p_hwfn
, p_ptt
, &p_hwfn
->cdev
->tunnel
);
479 int qed_sp_pf_stop(struct qed_hwfn
*p_hwfn
)
481 struct qed_spq_entry
*p_ent
= NULL
;
482 struct qed_sp_init_data init_data
;
486 memset(&init_data
, 0, sizeof(init_data
));
487 init_data
.cid
= qed_spq_get_cid(p_hwfn
);
488 init_data
.opaque_fid
= p_hwfn
->hw_info
.opaque_fid
;
489 init_data
.comp_mode
= QED_SPQ_MODE_EBLOCK
;
491 rc
= qed_sp_init_request(p_hwfn
, &p_ent
,
492 COMMON_RAMROD_PF_STOP
, PROTOCOLID_COMMON
,
497 return qed_spq_post(p_hwfn
, p_ent
, NULL
);
500 int qed_sp_heartbeat_ramrod(struct qed_hwfn
*p_hwfn
)
502 struct qed_spq_entry
*p_ent
= NULL
;
503 struct qed_sp_init_data init_data
;
507 memset(&init_data
, 0, sizeof(init_data
));
508 init_data
.cid
= qed_spq_get_cid(p_hwfn
);
509 init_data
.opaque_fid
= p_hwfn
->hw_info
.opaque_fid
;
510 init_data
.comp_mode
= QED_SPQ_MODE_EBLOCK
;
512 rc
= qed_sp_init_request(p_hwfn
, &p_ent
,
513 COMMON_RAMROD_EMPTY
, PROTOCOLID_COMMON
,
518 return qed_spq_post(p_hwfn
, p_ent
, NULL
);
521 int qed_sp_pf_update_stag(struct qed_hwfn
*p_hwfn
)
523 struct qed_spq_entry
*p_ent
= NULL
;
524 struct qed_sp_init_data init_data
;
528 memset(&init_data
, 0, sizeof(init_data
));
529 init_data
.cid
= qed_spq_get_cid(p_hwfn
);
530 init_data
.opaque_fid
= p_hwfn
->hw_info
.opaque_fid
;
531 init_data
.comp_mode
= QED_SPQ_MODE_CB
;
533 rc
= qed_sp_init_request(p_hwfn
, &p_ent
,
534 COMMON_RAMROD_PF_UPDATE
, PROTOCOLID_COMMON
,
539 p_ent
->ramrod
.pf_update
.update_mf_vlan_flag
= true;
540 p_ent
->ramrod
.pf_update
.mf_vlan
= cpu_to_le16(p_hwfn
->hw_info
.ovlan
);
542 return qed_spq_post(p_hwfn
, p_ent
, NULL
);