2 * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
3 * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <linux/skbuff.h>
39 static int check_type_state(struct rxe_dev
*rxe
, struct rxe_pkt_info
*pkt
,
42 if (unlikely(!qp
->valid
))
45 switch (qp_type(qp
)) {
47 if (unlikely((pkt
->opcode
& IB_OPCODE_RC
) != 0)) {
48 pr_warn_ratelimited("bad qp type\n");
53 if (unlikely(!(pkt
->opcode
& IB_OPCODE_UC
))) {
54 pr_warn_ratelimited("bad qp type\n");
61 if (unlikely(!(pkt
->opcode
& IB_OPCODE_UD
))) {
62 pr_warn_ratelimited("bad qp type\n");
67 pr_warn_ratelimited("unsupported qp type\n");
71 if (pkt
->mask
& RXE_REQ_MASK
) {
72 if (unlikely(qp
->resp
.state
!= QP_STATE_READY
))
74 } else if (unlikely(qp
->req
.state
< QP_STATE_READY
||
75 qp
->req
.state
> QP_STATE_DRAINED
)) {
85 static void set_bad_pkey_cntr(struct rxe_port
*port
)
87 spin_lock_bh(&port
->port_lock
);
88 port
->attr
.bad_pkey_cntr
= min((u32
)0xffff,
89 port
->attr
.bad_pkey_cntr
+ 1);
90 spin_unlock_bh(&port
->port_lock
);
93 static void set_qkey_viol_cntr(struct rxe_port
*port
)
95 spin_lock_bh(&port
->port_lock
);
96 port
->attr
.qkey_viol_cntr
= min((u32
)0xffff,
97 port
->attr
.qkey_viol_cntr
+ 1);
98 spin_unlock_bh(&port
->port_lock
);
101 static int check_keys(struct rxe_dev
*rxe
, struct rxe_pkt_info
*pkt
,
102 u32 qpn
, struct rxe_qp
*qp
)
106 struct rxe_port
*port
= &rxe
->port
;
107 u16 pkey
= bth_pkey(pkt
);
112 for (i
= 0; i
< port
->attr
.pkey_tbl_len
; i
++) {
113 if (pkey_match(pkey
, port
->pkey_tbl
[i
])) {
121 pr_warn_ratelimited("bad pkey = 0x%x\n", pkey
);
122 set_bad_pkey_cntr(port
);
125 } else if (qpn
!= 0) {
126 if (unlikely(!pkey_match(pkey
,
127 port
->pkey_tbl
[qp
->attr
.pkey_index
]
129 pr_warn_ratelimited("bad pkey = 0x%0x\n", pkey
);
130 set_bad_pkey_cntr(port
);
133 pkt
->pkey_index
= qp
->attr
.pkey_index
;
136 if ((qp_type(qp
) == IB_QPT_UD
|| qp_type(qp
) == IB_QPT_GSI
) &&
137 qpn
!= 0 && pkt
->mask
) {
138 u32 qkey
= (qpn
== 1) ? GSI_QKEY
: qp
->attr
.qkey
;
140 if (unlikely(deth_qkey(pkt
) != qkey
)) {
141 pr_warn_ratelimited("bad qkey, got 0x%x expected 0x%x for qpn 0x%x\n",
142 deth_qkey(pkt
), qkey
, qpn
);
143 set_qkey_viol_cntr(port
);
154 static int check_addr(struct rxe_dev
*rxe
, struct rxe_pkt_info
*pkt
,
157 struct sk_buff
*skb
= PKT_TO_SKB(pkt
);
159 if (qp_type(qp
) != IB_QPT_RC
&& qp_type(qp
) != IB_QPT_UC
)
162 if (unlikely(pkt
->port_num
!= qp
->attr
.port_num
)) {
163 pr_warn_ratelimited("port %d != qp port %d\n",
164 pkt
->port_num
, qp
->attr
.port_num
);
168 if (skb
->protocol
== htons(ETH_P_IP
)) {
169 struct in_addr
*saddr
=
170 &qp
->pri_av
.sgid_addr
._sockaddr_in
.sin_addr
;
171 struct in_addr
*daddr
=
172 &qp
->pri_av
.dgid_addr
._sockaddr_in
.sin_addr
;
174 if (ip_hdr(skb
)->daddr
!= saddr
->s_addr
) {
175 pr_warn_ratelimited("dst addr %pI4 != qp source addr %pI4\n",
181 if (ip_hdr(skb
)->saddr
!= daddr
->s_addr
) {
182 pr_warn_ratelimited("source addr %pI4 != qp dst addr %pI4\n",
188 } else if (skb
->protocol
== htons(ETH_P_IPV6
)) {
189 struct in6_addr
*saddr
=
190 &qp
->pri_av
.sgid_addr
._sockaddr_in6
.sin6_addr
;
191 struct in6_addr
*daddr
=
192 &qp
->pri_av
.dgid_addr
._sockaddr_in6
.sin6_addr
;
194 if (memcmp(&ipv6_hdr(skb
)->daddr
, saddr
, sizeof(*saddr
))) {
195 pr_warn_ratelimited("dst addr %pI6 != qp source addr %pI6\n",
196 &ipv6_hdr(skb
)->daddr
, saddr
);
200 if (memcmp(&ipv6_hdr(skb
)->saddr
, daddr
, sizeof(*daddr
))) {
201 pr_warn_ratelimited("source addr %pI6 != qp dst addr %pI6\n",
202 &ipv6_hdr(skb
)->saddr
, daddr
);
214 static int hdr_check(struct rxe_pkt_info
*pkt
)
216 struct rxe_dev
*rxe
= pkt
->rxe
;
217 struct rxe_port
*port
= &rxe
->port
;
218 struct rxe_qp
*qp
= NULL
;
219 u32 qpn
= bth_qpn(pkt
);
223 if (unlikely(bth_tver(pkt
) != BTH_TVER
)) {
224 pr_warn_ratelimited("bad tver\n");
228 if (unlikely(qpn
== 0)) {
229 pr_warn_once("QP 0 not supported");
233 if (qpn
!= IB_MULTICAST_QPN
) {
234 index
= (qpn
== 1) ? port
->qp_gsi_index
: qpn
;
236 qp
= rxe_pool_get_index(&rxe
->qp_pool
, index
);
238 pr_warn_ratelimited("no qp matches qpn 0x%x\n", qpn
);
242 err
= check_type_state(rxe
, pkt
, qp
);
246 err
= check_addr(rxe
, pkt
, qp
);
250 err
= check_keys(rxe
, pkt
, qpn
, qp
);
254 if (unlikely((pkt
->mask
& RXE_GRH_MASK
) == 0)) {
255 pr_warn_ratelimited("no grh for mcast qpn\n");
269 static inline void rxe_rcv_pkt(struct rxe_dev
*rxe
,
270 struct rxe_pkt_info
*pkt
,
273 if (pkt
->mask
& RXE_REQ_MASK
)
274 rxe_resp_queue_pkt(rxe
, pkt
->qp
, skb
);
276 rxe_comp_queue_pkt(rxe
, pkt
->qp
, skb
);
279 static void rxe_rcv_mcast_pkt(struct rxe_dev
*rxe
, struct sk_buff
*skb
)
281 struct rxe_pkt_info
*pkt
= SKB_TO_PKT(skb
);
282 struct rxe_mc_grp
*mcg
;
283 struct rxe_mc_elem
*mce
;
288 if (skb
->protocol
== htons(ETH_P_IP
))
289 ipv6_addr_set_v4mapped(ip_hdr(skb
)->daddr
,
290 (struct in6_addr
*)&dgid
);
291 else if (skb
->protocol
== htons(ETH_P_IPV6
))
292 memcpy(&dgid
, &ipv6_hdr(skb
)->daddr
, sizeof(dgid
));
294 /* lookup mcast group corresponding to mgid, takes a ref */
295 mcg
= rxe_pool_get_key(&rxe
->mc_grp_pool
, &dgid
);
297 goto err1
; /* mcast group not registered */
299 spin_lock_bh(&mcg
->mcg_lock
);
301 list_for_each_entry(mce
, &mcg
->qp_list
, qp_list
) {
303 pkt
= SKB_TO_PKT(skb
);
305 /* validate qp for incoming packet */
306 err
= check_type_state(rxe
, pkt
, qp
);
310 err
= check_keys(rxe
, pkt
, bth_qpn(pkt
), qp
);
314 /* if *not* the last qp in the list
315 * increase the users of the skb then post to the next qp
317 if (mce
->qp_list
.next
!= &mcg
->qp_list
)
322 rxe_rcv_pkt(rxe
, pkt
, skb
);
325 spin_unlock_bh(&mcg
->mcg_lock
);
327 rxe_drop_ref(mcg
); /* drop ref from rxe_pool_get_key. */
333 static int rxe_match_dgid(struct rxe_dev
*rxe
, struct sk_buff
*skb
)
335 const struct ib_gid_attr
*gid_attr
;
339 if (skb
->protocol
== htons(ETH_P_IP
)) {
340 ipv6_addr_set_v4mapped(ip_hdr(skb
)->daddr
,
341 (struct in6_addr
*)&dgid
);
344 pdgid
= (union ib_gid
*)&ipv6_hdr(skb
)->daddr
;
347 gid_attr
= rdma_find_gid_by_port(&rxe
->ib_dev
, pdgid
,
348 IB_GID_TYPE_ROCE_UDP_ENCAP
,
350 if (IS_ERR(gid_attr
))
351 return PTR_ERR(gid_attr
);
353 rdma_put_gid_attr(gid_attr
);
357 /* rxe_rcv is called from the interface driver */
358 void rxe_rcv(struct sk_buff
*skb
)
361 struct rxe_pkt_info
*pkt
= SKB_TO_PKT(skb
);
362 struct rxe_dev
*rxe
= pkt
->rxe
;
364 u32 calc_icrc
, pack_icrc
;
368 if (unlikely(skb
->len
< pkt
->offset
+ RXE_BTH_BYTES
))
371 if (unlikely(rxe_match_dgid(rxe
, skb
) < 0)) {
372 pr_warn_ratelimited("failed matching dgid\n");
376 pkt
->opcode
= bth_opcode(pkt
);
377 pkt
->psn
= bth_psn(pkt
);
379 pkt
->mask
|= rxe_opcode
[pkt
->opcode
].mask
;
381 if (unlikely(skb
->len
< header_size(pkt
)))
384 err
= hdr_check(pkt
);
389 icrcp
= (__be32
*)(pkt
->hdr
+ pkt
->paylen
- RXE_ICRC_SIZE
);
390 pack_icrc
= be32_to_cpu(*icrcp
);
392 calc_icrc
= rxe_icrc_hdr(pkt
, skb
);
393 calc_icrc
= rxe_crc32(rxe
, calc_icrc
, (u8
*)payload_addr(pkt
),
394 payload_size(pkt
) + bth_pad(pkt
));
395 calc_icrc
= (__force u32
)cpu_to_be32(~calc_icrc
);
396 if (unlikely(calc_icrc
!= pack_icrc
)) {
397 if (skb
->protocol
== htons(ETH_P_IPV6
))
398 pr_warn_ratelimited("bad ICRC from %pI6c\n",
399 &ipv6_hdr(skb
)->saddr
);
400 else if (skb
->protocol
== htons(ETH_P_IP
))
401 pr_warn_ratelimited("bad ICRC from %pI4\n",
402 &ip_hdr(skb
)->saddr
);
404 pr_warn_ratelimited("bad ICRC from unknown\n");
409 rxe_counter_inc(rxe
, RXE_CNT_RCVD_PKTS
);
411 if (unlikely(bth_qpn(pkt
) == IB_MULTICAST_QPN
))
412 rxe_rcv_mcast_pkt(rxe
, skb
);
414 rxe_rcv_pkt(rxe
, pkt
, skb
);
420 rxe_drop_ref(pkt
->qp
);