2 * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
3 * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <rdma/rdma_netlink.h>
35 #include <net/addrconf.h>
39 MODULE_AUTHOR("Bob Pearson, Frank Zago, John Groves, Kamal Heib");
40 MODULE_DESCRIPTION("Soft RDMA transport");
41 MODULE_LICENSE("Dual BSD/GPL");
43 /* free resources for all ports on a device */
44 static void rxe_cleanup_ports(struct rxe_dev
*rxe
)
46 kfree(rxe
->port
.pkey_tbl
);
47 rxe
->port
.pkey_tbl
= NULL
;
51 /* free resources for a rxe device all objects created for this device must
54 void rxe_dealloc(struct ib_device
*ib_dev
)
56 struct rxe_dev
*rxe
= container_of(ib_dev
, struct rxe_dev
, ib_dev
);
58 rxe_pool_cleanup(&rxe
->uc_pool
);
59 rxe_pool_cleanup(&rxe
->pd_pool
);
60 rxe_pool_cleanup(&rxe
->ah_pool
);
61 rxe_pool_cleanup(&rxe
->srq_pool
);
62 rxe_pool_cleanup(&rxe
->qp_pool
);
63 rxe_pool_cleanup(&rxe
->cq_pool
);
64 rxe_pool_cleanup(&rxe
->mr_pool
);
65 rxe_pool_cleanup(&rxe
->mw_pool
);
66 rxe_pool_cleanup(&rxe
->mc_grp_pool
);
67 rxe_pool_cleanup(&rxe
->mc_elem_pool
);
69 rxe_cleanup_ports(rxe
);
72 crypto_free_shash(rxe
->tfm
);
75 /* initialize rxe device parameters */
76 static void rxe_init_device_param(struct rxe_dev
*rxe
)
78 rxe
->max_inline_data
= RXE_MAX_INLINE_DATA
;
80 rxe
->attr
.max_mr_size
= RXE_MAX_MR_SIZE
;
81 rxe
->attr
.page_size_cap
= RXE_PAGE_SIZE_CAP
;
82 rxe
->attr
.max_qp
= RXE_MAX_QP
;
83 rxe
->attr
.max_qp_wr
= RXE_MAX_QP_WR
;
84 rxe
->attr
.device_cap_flags
= RXE_DEVICE_CAP_FLAGS
;
85 rxe
->attr
.max_send_sge
= RXE_MAX_SGE
;
86 rxe
->attr
.max_recv_sge
= RXE_MAX_SGE
;
87 rxe
->attr
.max_sge_rd
= RXE_MAX_SGE_RD
;
88 rxe
->attr
.max_cq
= RXE_MAX_CQ
;
89 rxe
->attr
.max_cqe
= (1 << RXE_MAX_LOG_CQE
) - 1;
90 rxe
->attr
.max_mr
= RXE_MAX_MR
;
91 rxe
->attr
.max_pd
= RXE_MAX_PD
;
92 rxe
->attr
.max_qp_rd_atom
= RXE_MAX_QP_RD_ATOM
;
93 rxe
->attr
.max_res_rd_atom
= RXE_MAX_RES_RD_ATOM
;
94 rxe
->attr
.max_qp_init_rd_atom
= RXE_MAX_QP_INIT_RD_ATOM
;
95 rxe
->attr
.atomic_cap
= IB_ATOMIC_HCA
;
96 rxe
->attr
.max_mcast_grp
= RXE_MAX_MCAST_GRP
;
97 rxe
->attr
.max_mcast_qp_attach
= RXE_MAX_MCAST_QP_ATTACH
;
98 rxe
->attr
.max_total_mcast_qp_attach
= RXE_MAX_TOT_MCAST_QP_ATTACH
;
99 rxe
->attr
.max_ah
= RXE_MAX_AH
;
100 rxe
->attr
.max_srq
= RXE_MAX_SRQ
;
101 rxe
->attr
.max_srq_wr
= RXE_MAX_SRQ_WR
;
102 rxe
->attr
.max_srq_sge
= RXE_MAX_SRQ_SGE
;
103 rxe
->attr
.max_fast_reg_page_list_len
= RXE_MAX_FMR_PAGE_LIST_LEN
;
104 rxe
->attr
.max_pkeys
= RXE_MAX_PKEYS
;
105 rxe
->attr
.local_ca_ack_delay
= RXE_LOCAL_CA_ACK_DELAY
;
107 rxe
->max_ucontext
= RXE_MAX_UCONTEXT
;
110 /* initialize port attributes */
111 static int rxe_init_port_param(struct rxe_port
*port
)
113 port
->attr
.state
= IB_PORT_DOWN
;
114 port
->attr
.max_mtu
= IB_MTU_4096
;
115 port
->attr
.active_mtu
= IB_MTU_256
;
116 port
->attr
.gid_tbl_len
= RXE_PORT_GID_TBL_LEN
;
117 port
->attr
.port_cap_flags
= RXE_PORT_PORT_CAP_FLAGS
;
118 port
->attr
.max_msg_sz
= RXE_PORT_MAX_MSG_SZ
;
119 port
->attr
.bad_pkey_cntr
= RXE_PORT_BAD_PKEY_CNTR
;
120 port
->attr
.qkey_viol_cntr
= RXE_PORT_QKEY_VIOL_CNTR
;
121 port
->attr
.pkey_tbl_len
= RXE_PORT_PKEY_TBL_LEN
;
122 port
->attr
.lid
= RXE_PORT_LID
;
123 port
->attr
.sm_lid
= RXE_PORT_SM_LID
;
124 port
->attr
.lmc
= RXE_PORT_LMC
;
125 port
->attr
.max_vl_num
= RXE_PORT_MAX_VL_NUM
;
126 port
->attr
.sm_sl
= RXE_PORT_SM_SL
;
127 port
->attr
.subnet_timeout
= RXE_PORT_SUBNET_TIMEOUT
;
128 port
->attr
.init_type_reply
= RXE_PORT_INIT_TYPE_REPLY
;
129 port
->attr
.active_width
= RXE_PORT_ACTIVE_WIDTH
;
130 port
->attr
.active_speed
= RXE_PORT_ACTIVE_SPEED
;
131 port
->attr
.phys_state
= RXE_PORT_PHYS_STATE
;
132 port
->mtu_cap
= ib_mtu_enum_to_int(IB_MTU_256
);
133 port
->subnet_prefix
= cpu_to_be64(RXE_PORT_SUBNET_PREFIX
);
138 /* initialize port state, note IB convention that HCA ports are always
141 static int rxe_init_ports(struct rxe_dev
*rxe
)
143 struct rxe_port
*port
= &rxe
->port
;
145 rxe_init_port_param(port
);
147 if (!port
->attr
.pkey_tbl_len
|| !port
->attr
.gid_tbl_len
)
150 port
->pkey_tbl
= kcalloc(port
->attr
.pkey_tbl_len
,
151 sizeof(*port
->pkey_tbl
), GFP_KERNEL
);
156 port
->pkey_tbl
[0] = 0xffff;
157 addrconf_addr_eui48((unsigned char *)&port
->port_guid
,
158 rxe
->ndev
->dev_addr
);
160 spin_lock_init(&port
->port_lock
);
165 /* init pools of managed objects */
166 static int rxe_init_pools(struct rxe_dev
*rxe
)
170 err
= rxe_pool_init(rxe
, &rxe
->uc_pool
, RXE_TYPE_UC
,
175 err
= rxe_pool_init(rxe
, &rxe
->pd_pool
, RXE_TYPE_PD
,
180 err
= rxe_pool_init(rxe
, &rxe
->ah_pool
, RXE_TYPE_AH
,
185 err
= rxe_pool_init(rxe
, &rxe
->srq_pool
, RXE_TYPE_SRQ
,
190 err
= rxe_pool_init(rxe
, &rxe
->qp_pool
, RXE_TYPE_QP
,
195 err
= rxe_pool_init(rxe
, &rxe
->cq_pool
, RXE_TYPE_CQ
,
200 err
= rxe_pool_init(rxe
, &rxe
->mr_pool
, RXE_TYPE_MR
,
205 err
= rxe_pool_init(rxe
, &rxe
->mw_pool
, RXE_TYPE_MW
,
210 err
= rxe_pool_init(rxe
, &rxe
->mc_grp_pool
, RXE_TYPE_MC_GRP
,
211 rxe
->attr
.max_mcast_grp
);
215 err
= rxe_pool_init(rxe
, &rxe
->mc_elem_pool
, RXE_TYPE_MC_ELEM
,
216 rxe
->attr
.max_total_mcast_qp_attach
);
223 rxe_pool_cleanup(&rxe
->mc_grp_pool
);
225 rxe_pool_cleanup(&rxe
->mw_pool
);
227 rxe_pool_cleanup(&rxe
->mr_pool
);
229 rxe_pool_cleanup(&rxe
->cq_pool
);
231 rxe_pool_cleanup(&rxe
->qp_pool
);
233 rxe_pool_cleanup(&rxe
->srq_pool
);
235 rxe_pool_cleanup(&rxe
->ah_pool
);
237 rxe_pool_cleanup(&rxe
->pd_pool
);
239 rxe_pool_cleanup(&rxe
->uc_pool
);
244 /* initialize rxe device state */
245 static int rxe_init(struct rxe_dev
*rxe
)
249 /* init default device parameters */
250 rxe_init_device_param(rxe
);
252 err
= rxe_init_ports(rxe
);
256 err
= rxe_init_pools(rxe
);
260 /* init pending mmap list */
261 spin_lock_init(&rxe
->mmap_offset_lock
);
262 spin_lock_init(&rxe
->pending_lock
);
263 INIT_LIST_HEAD(&rxe
->pending_mmaps
);
265 mutex_init(&rxe
->usdev_lock
);
270 rxe_cleanup_ports(rxe
);
275 void rxe_set_mtu(struct rxe_dev
*rxe
, unsigned int ndev_mtu
)
277 struct rxe_port
*port
= &rxe
->port
;
280 mtu
= eth_mtu_int_to_enum(ndev_mtu
);
282 /* Make sure that new MTU in range */
283 mtu
= mtu
? min_t(enum ib_mtu
, mtu
, IB_MTU_4096
) : IB_MTU_256
;
285 port
->attr
.active_mtu
= mtu
;
286 port
->mtu_cap
= ib_mtu_enum_to_int(mtu
);
289 /* called by ifc layer to create new rxe device.
290 * The caller should allocate memory for rxe by calling ib_alloc_device.
292 int rxe_add(struct rxe_dev
*rxe
, unsigned int mtu
, const char *ibdev_name
)
300 rxe_set_mtu(rxe
, mtu
);
302 return rxe_register_device(rxe
, ibdev_name
);
305 static int rxe_newlink(const char *ibdev_name
, struct net_device
*ndev
)
307 struct rxe_dev
*exists
;
310 exists
= rxe_get_dev_from_net(ndev
);
312 ib_device_put(&exists
->ib_dev
);
313 pr_err("already configured on %s\n", ndev
->name
);
318 err
= rxe_net_add(ibdev_name
, ndev
);
320 pr_err("failed to add %s\n", ndev
->name
);
327 static struct rdma_link_ops rxe_link_ops
= {
329 .newlink
= rxe_newlink
,
332 static int __init
rxe_module_init(void)
336 /* initialize slab caches for managed objects */
337 err
= rxe_cache_init();
339 pr_err("unable to init object pools\n");
343 err
= rxe_net_init();
347 rdma_link_register(&rxe_link_ops
);
352 static void __exit
rxe_module_exit(void)
354 rdma_link_unregister(&rxe_link_ops
);
355 ib_unregister_driver(RDMA_DRIVER_RXE
);
359 pr_info("unloaded\n");
362 late_initcall(rxe_module_init
);
363 module_exit(rxe_module_exit
);
365 MODULE_ALIAS_RDMA_LINK("rxe");