2 * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
3 * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <linux/skbuff.h>
39 static int check_type_state(struct rxe_dev
*rxe
, struct rxe_pkt_info
*pkt
,
42 if (unlikely(!qp
->valid
))
45 switch (qp_type(qp
)) {
47 if (unlikely((pkt
->opcode
& IB_OPCODE_RC
) != 0)) {
48 pr_warn_ratelimited("bad qp type\n");
53 if (unlikely(!(pkt
->opcode
& IB_OPCODE_UC
))) {
54 pr_warn_ratelimited("bad qp type\n");
61 if (unlikely(!(pkt
->opcode
& IB_OPCODE_UD
))) {
62 pr_warn_ratelimited("bad qp type\n");
67 pr_warn_ratelimited("unsupported qp type\n");
71 if (pkt
->mask
& RXE_REQ_MASK
) {
72 if (unlikely(qp
->resp
.state
!= QP_STATE_READY
))
74 } else if (unlikely(qp
->req
.state
< QP_STATE_READY
||
75 qp
->req
.state
> QP_STATE_DRAINED
)) {
85 static void set_bad_pkey_cntr(struct rxe_port
*port
)
87 spin_lock_bh(&port
->port_lock
);
88 port
->attr
.bad_pkey_cntr
= min((u32
)0xffff,
89 port
->attr
.bad_pkey_cntr
+ 1);
90 spin_unlock_bh(&port
->port_lock
);
93 static void set_qkey_viol_cntr(struct rxe_port
*port
)
95 spin_lock_bh(&port
->port_lock
);
96 port
->attr
.qkey_viol_cntr
= min((u32
)0xffff,
97 port
->attr
.qkey_viol_cntr
+ 1);
98 spin_unlock_bh(&port
->port_lock
);
101 static int check_keys(struct rxe_dev
*rxe
, struct rxe_pkt_info
*pkt
,
102 u32 qpn
, struct rxe_qp
*qp
)
106 struct rxe_port
*port
= &rxe
->port
;
107 u16 pkey
= bth_pkey(pkt
);
112 for (i
= 0; i
< port
->attr
.pkey_tbl_len
; i
++) {
113 if (pkey_match(pkey
, port
->pkey_tbl
[i
])) {
121 pr_warn_ratelimited("bad pkey = 0x%x\n", pkey
);
122 set_bad_pkey_cntr(port
);
126 if (unlikely(!pkey_match(pkey
,
127 port
->pkey_tbl
[qp
->attr
.pkey_index
]
129 pr_warn_ratelimited("bad pkey = 0x%0x\n", pkey
);
130 set_bad_pkey_cntr(port
);
133 pkt
->pkey_index
= qp
->attr
.pkey_index
;
136 if ((qp_type(qp
) == IB_QPT_UD
|| qp_type(qp
) == IB_QPT_GSI
) &&
138 u32 qkey
= (qpn
== 1) ? GSI_QKEY
: qp
->attr
.qkey
;
140 if (unlikely(deth_qkey(pkt
) != qkey
)) {
141 pr_warn_ratelimited("bad qkey, got 0x%x expected 0x%x for qpn 0x%x\n",
142 deth_qkey(pkt
), qkey
, qpn
);
143 set_qkey_viol_cntr(port
);
154 static int check_addr(struct rxe_dev
*rxe
, struct rxe_pkt_info
*pkt
,
157 struct sk_buff
*skb
= PKT_TO_SKB(pkt
);
159 if (qp_type(qp
) != IB_QPT_RC
&& qp_type(qp
) != IB_QPT_UC
)
162 if (unlikely(pkt
->port_num
!= qp
->attr
.port_num
)) {
163 pr_warn_ratelimited("port %d != qp port %d\n",
164 pkt
->port_num
, qp
->attr
.port_num
);
168 if (skb
->protocol
== htons(ETH_P_IP
)) {
169 struct in_addr
*saddr
=
170 &qp
->pri_av
.sgid_addr
._sockaddr_in
.sin_addr
;
171 struct in_addr
*daddr
=
172 &qp
->pri_av
.dgid_addr
._sockaddr_in
.sin_addr
;
174 if (ip_hdr(skb
)->daddr
!= saddr
->s_addr
) {
175 pr_warn_ratelimited("dst addr %pI4 != qp source addr %pI4\n",
181 if (ip_hdr(skb
)->saddr
!= daddr
->s_addr
) {
182 pr_warn_ratelimited("source addr %pI4 != qp dst addr %pI4\n",
188 } else if (skb
->protocol
== htons(ETH_P_IPV6
)) {
189 struct in6_addr
*saddr
=
190 &qp
->pri_av
.sgid_addr
._sockaddr_in6
.sin6_addr
;
191 struct in6_addr
*daddr
=
192 &qp
->pri_av
.dgid_addr
._sockaddr_in6
.sin6_addr
;
194 if (memcmp(&ipv6_hdr(skb
)->daddr
, saddr
, sizeof(*saddr
))) {
195 pr_warn_ratelimited("dst addr %pI6 != qp source addr %pI6\n",
196 &ipv6_hdr(skb
)->daddr
, saddr
);
200 if (memcmp(&ipv6_hdr(skb
)->saddr
, daddr
, sizeof(*daddr
))) {
201 pr_warn_ratelimited("source addr %pI6 != qp dst addr %pI6\n",
202 &ipv6_hdr(skb
)->saddr
, daddr
);
214 static int hdr_check(struct rxe_pkt_info
*pkt
)
216 struct rxe_dev
*rxe
= pkt
->rxe
;
217 struct rxe_port
*port
= &rxe
->port
;
218 struct rxe_qp
*qp
= NULL
;
219 u32 qpn
= bth_qpn(pkt
);
223 if (unlikely(bth_tver(pkt
) != BTH_TVER
)) {
224 pr_warn_ratelimited("bad tver\n");
228 if (unlikely(qpn
== 0)) {
229 pr_warn_once("QP 0 not supported");
233 if (qpn
!= IB_MULTICAST_QPN
) {
234 index
= (qpn
== 1) ? port
->qp_gsi_index
: qpn
;
236 qp
= rxe_pool_get_index(&rxe
->qp_pool
, index
);
238 pr_warn_ratelimited("no qp matches qpn 0x%x\n", qpn
);
242 err
= check_type_state(rxe
, pkt
, qp
);
246 err
= check_addr(rxe
, pkt
, qp
);
250 err
= check_keys(rxe
, pkt
, qpn
, qp
);
254 if (unlikely((pkt
->mask
& RXE_GRH_MASK
) == 0)) {
255 pr_warn_ratelimited("no grh for mcast qpn\n");
269 static inline void rxe_rcv_pkt(struct rxe_pkt_info
*pkt
, struct sk_buff
*skb
)
271 if (pkt
->mask
& RXE_REQ_MASK
)
272 rxe_resp_queue_pkt(pkt
->qp
, skb
);
274 rxe_comp_queue_pkt(pkt
->qp
, skb
);
277 static void rxe_rcv_mcast_pkt(struct rxe_dev
*rxe
, struct sk_buff
*skb
)
279 struct rxe_pkt_info
*pkt
= SKB_TO_PKT(skb
);
280 struct rxe_mc_grp
*mcg
;
281 struct rxe_mc_elem
*mce
;
286 if (skb
->protocol
== htons(ETH_P_IP
))
287 ipv6_addr_set_v4mapped(ip_hdr(skb
)->daddr
,
288 (struct in6_addr
*)&dgid
);
289 else if (skb
->protocol
== htons(ETH_P_IPV6
))
290 memcpy(&dgid
, &ipv6_hdr(skb
)->daddr
, sizeof(dgid
));
292 /* lookup mcast group corresponding to mgid, takes a ref */
293 mcg
= rxe_pool_get_key(&rxe
->mc_grp_pool
, &dgid
);
295 goto err1
; /* mcast group not registered */
297 spin_lock_bh(&mcg
->mcg_lock
);
299 list_for_each_entry(mce
, &mcg
->qp_list
, qp_list
) {
301 pkt
= SKB_TO_PKT(skb
);
303 /* validate qp for incoming packet */
304 err
= check_type_state(rxe
, pkt
, qp
);
308 err
= check_keys(rxe
, pkt
, bth_qpn(pkt
), qp
);
312 /* if *not* the last qp in the list
313 * increase the users of the skb then post to the next qp
315 if (mce
->qp_list
.next
!= &mcg
->qp_list
)
320 rxe_rcv_pkt(pkt
, skb
);
323 spin_unlock_bh(&mcg
->mcg_lock
);
325 rxe_drop_ref(mcg
); /* drop ref from rxe_pool_get_key. */
331 static int rxe_match_dgid(struct rxe_dev
*rxe
, struct sk_buff
*skb
)
333 const struct ib_gid_attr
*gid_attr
;
337 if (skb
->protocol
== htons(ETH_P_IP
)) {
338 ipv6_addr_set_v4mapped(ip_hdr(skb
)->daddr
,
339 (struct in6_addr
*)&dgid
);
342 pdgid
= (union ib_gid
*)&ipv6_hdr(skb
)->daddr
;
345 gid_attr
= rdma_find_gid_by_port(&rxe
->ib_dev
, pdgid
,
346 IB_GID_TYPE_ROCE_UDP_ENCAP
,
348 if (IS_ERR(gid_attr
))
349 return PTR_ERR(gid_attr
);
351 rdma_put_gid_attr(gid_attr
);
355 /* rxe_rcv is called from the interface driver */
356 void rxe_rcv(struct sk_buff
*skb
)
359 struct rxe_pkt_info
*pkt
= SKB_TO_PKT(skb
);
360 struct rxe_dev
*rxe
= pkt
->rxe
;
362 u32 calc_icrc
, pack_icrc
;
366 if (unlikely(skb
->len
< pkt
->offset
+ RXE_BTH_BYTES
))
369 if (unlikely(rxe_match_dgid(rxe
, skb
) < 0)) {
370 pr_warn_ratelimited("failed matching dgid\n");
374 pkt
->opcode
= bth_opcode(pkt
);
375 pkt
->psn
= bth_psn(pkt
);
377 pkt
->mask
|= rxe_opcode
[pkt
->opcode
].mask
;
379 if (unlikely(skb
->len
< header_size(pkt
)))
382 err
= hdr_check(pkt
);
387 icrcp
= (__be32
*)(pkt
->hdr
+ pkt
->paylen
- RXE_ICRC_SIZE
);
388 pack_icrc
= be32_to_cpu(*icrcp
);
390 calc_icrc
= rxe_icrc_hdr(pkt
, skb
);
391 calc_icrc
= rxe_crc32(rxe
, calc_icrc
, (u8
*)payload_addr(pkt
),
392 payload_size(pkt
) + bth_pad(pkt
));
393 calc_icrc
= (__force u32
)cpu_to_be32(~calc_icrc
);
394 if (unlikely(calc_icrc
!= pack_icrc
)) {
395 if (skb
->protocol
== htons(ETH_P_IPV6
))
396 pr_warn_ratelimited("bad ICRC from %pI6c\n",
397 &ipv6_hdr(skb
)->saddr
);
398 else if (skb
->protocol
== htons(ETH_P_IP
))
399 pr_warn_ratelimited("bad ICRC from %pI4\n",
400 &ip_hdr(skb
)->saddr
);
402 pr_warn_ratelimited("bad ICRC from unknown\n");
407 rxe_counter_inc(rxe
, RXE_CNT_RCVD_PKTS
);
409 if (unlikely(bth_qpn(pkt
) == IB_MULTICAST_QPN
))
410 rxe_rcv_mcast_pkt(rxe
, skb
);
412 rxe_rcv_pkt(pkt
, skb
);
418 rxe_drop_ref(pkt
->qp
);