2 * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
3 * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <net/addrconf.h>
38 MODULE_AUTHOR("Bob Pearson, Frank Zago, John Groves, Kamal Heib");
39 MODULE_DESCRIPTION("Soft RDMA transport");
40 MODULE_LICENSE("Dual BSD/GPL");
42 /* free resources for all ports on a device */
43 static void rxe_cleanup_ports(struct rxe_dev
*rxe
)
45 kfree(rxe
->port
.pkey_tbl
);
46 rxe
->port
.pkey_tbl
= NULL
;
50 /* free resources for a rxe device all objects created for this device must
53 static void rxe_cleanup(struct rxe_dev
*rxe
)
55 rxe_pool_cleanup(&rxe
->uc_pool
);
56 rxe_pool_cleanup(&rxe
->pd_pool
);
57 rxe_pool_cleanup(&rxe
->ah_pool
);
58 rxe_pool_cleanup(&rxe
->srq_pool
);
59 rxe_pool_cleanup(&rxe
->qp_pool
);
60 rxe_pool_cleanup(&rxe
->cq_pool
);
61 rxe_pool_cleanup(&rxe
->mr_pool
);
62 rxe_pool_cleanup(&rxe
->mw_pool
);
63 rxe_pool_cleanup(&rxe
->mc_grp_pool
);
64 rxe_pool_cleanup(&rxe
->mc_elem_pool
);
66 rxe_cleanup_ports(rxe
);
68 crypto_free_shash(rxe
->tfm
);
71 /* called when all references have been dropped */
72 void rxe_release(struct kref
*kref
)
74 struct rxe_dev
*rxe
= container_of(kref
, struct rxe_dev
, ref_cnt
);
77 ib_dealloc_device(&rxe
->ib_dev
);
80 /* initialize rxe device parameters */
81 static int rxe_init_device_param(struct rxe_dev
*rxe
)
83 rxe
->max_inline_data
= RXE_MAX_INLINE_DATA
;
85 rxe
->attr
.fw_ver
= RXE_FW_VER
;
86 rxe
->attr
.max_mr_size
= RXE_MAX_MR_SIZE
;
87 rxe
->attr
.page_size_cap
= RXE_PAGE_SIZE_CAP
;
88 rxe
->attr
.vendor_id
= RXE_VENDOR_ID
;
89 rxe
->attr
.vendor_part_id
= RXE_VENDOR_PART_ID
;
90 rxe
->attr
.hw_ver
= RXE_HW_VER
;
91 rxe
->attr
.max_qp
= RXE_MAX_QP
;
92 rxe
->attr
.max_qp_wr
= RXE_MAX_QP_WR
;
93 rxe
->attr
.device_cap_flags
= RXE_DEVICE_CAP_FLAGS
;
94 rxe
->attr
.max_sge
= RXE_MAX_SGE
;
95 rxe
->attr
.max_sge_rd
= RXE_MAX_SGE_RD
;
96 rxe
->attr
.max_cq
= RXE_MAX_CQ
;
97 rxe
->attr
.max_cqe
= (1 << RXE_MAX_LOG_CQE
) - 1;
98 rxe
->attr
.max_mr
= RXE_MAX_MR
;
99 rxe
->attr
.max_pd
= RXE_MAX_PD
;
100 rxe
->attr
.max_qp_rd_atom
= RXE_MAX_QP_RD_ATOM
;
101 rxe
->attr
.max_ee_rd_atom
= RXE_MAX_EE_RD_ATOM
;
102 rxe
->attr
.max_res_rd_atom
= RXE_MAX_RES_RD_ATOM
;
103 rxe
->attr
.max_qp_init_rd_atom
= RXE_MAX_QP_INIT_RD_ATOM
;
104 rxe
->attr
.max_ee_init_rd_atom
= RXE_MAX_EE_INIT_RD_ATOM
;
105 rxe
->attr
.atomic_cap
= RXE_ATOMIC_CAP
;
106 rxe
->attr
.max_ee
= RXE_MAX_EE
;
107 rxe
->attr
.max_rdd
= RXE_MAX_RDD
;
108 rxe
->attr
.max_mw
= RXE_MAX_MW
;
109 rxe
->attr
.max_raw_ipv6_qp
= RXE_MAX_RAW_IPV6_QP
;
110 rxe
->attr
.max_raw_ethy_qp
= RXE_MAX_RAW_ETHY_QP
;
111 rxe
->attr
.max_mcast_grp
= RXE_MAX_MCAST_GRP
;
112 rxe
->attr
.max_mcast_qp_attach
= RXE_MAX_MCAST_QP_ATTACH
;
113 rxe
->attr
.max_total_mcast_qp_attach
= RXE_MAX_TOT_MCAST_QP_ATTACH
;
114 rxe
->attr
.max_ah
= RXE_MAX_AH
;
115 rxe
->attr
.max_fmr
= RXE_MAX_FMR
;
116 rxe
->attr
.max_map_per_fmr
= RXE_MAX_MAP_PER_FMR
;
117 rxe
->attr
.max_srq
= RXE_MAX_SRQ
;
118 rxe
->attr
.max_srq_wr
= RXE_MAX_SRQ_WR
;
119 rxe
->attr
.max_srq_sge
= RXE_MAX_SRQ_SGE
;
120 rxe
->attr
.max_fast_reg_page_list_len
= RXE_MAX_FMR_PAGE_LIST_LEN
;
121 rxe
->attr
.max_pkeys
= RXE_MAX_PKEYS
;
122 rxe
->attr
.local_ca_ack_delay
= RXE_LOCAL_CA_ACK_DELAY
;
124 rxe
->max_ucontext
= RXE_MAX_UCONTEXT
;
129 /* initialize port attributes */
130 static int rxe_init_port_param(struct rxe_port
*port
)
132 port
->attr
.state
= RXE_PORT_STATE
;
133 port
->attr
.max_mtu
= RXE_PORT_MAX_MTU
;
134 port
->attr
.active_mtu
= RXE_PORT_ACTIVE_MTU
;
135 port
->attr
.gid_tbl_len
= RXE_PORT_GID_TBL_LEN
;
136 port
->attr
.port_cap_flags
= RXE_PORT_PORT_CAP_FLAGS
;
137 port
->attr
.max_msg_sz
= RXE_PORT_MAX_MSG_SZ
;
138 port
->attr
.bad_pkey_cntr
= RXE_PORT_BAD_PKEY_CNTR
;
139 port
->attr
.qkey_viol_cntr
= RXE_PORT_QKEY_VIOL_CNTR
;
140 port
->attr
.pkey_tbl_len
= RXE_PORT_PKEY_TBL_LEN
;
141 port
->attr
.lid
= RXE_PORT_LID
;
142 port
->attr
.sm_lid
= RXE_PORT_SM_LID
;
143 port
->attr
.lmc
= RXE_PORT_LMC
;
144 port
->attr
.max_vl_num
= RXE_PORT_MAX_VL_NUM
;
145 port
->attr
.sm_sl
= RXE_PORT_SM_SL
;
146 port
->attr
.subnet_timeout
= RXE_PORT_SUBNET_TIMEOUT
;
147 port
->attr
.init_type_reply
= RXE_PORT_INIT_TYPE_REPLY
;
148 port
->attr
.active_width
= RXE_PORT_ACTIVE_WIDTH
;
149 port
->attr
.active_speed
= RXE_PORT_ACTIVE_SPEED
;
150 port
->attr
.phys_state
= RXE_PORT_PHYS_STATE
;
152 ib_mtu_enum_to_int(RXE_PORT_ACTIVE_MTU
);
153 port
->subnet_prefix
= cpu_to_be64(RXE_PORT_SUBNET_PREFIX
);
158 /* initialize port state, note IB convention that HCA ports are always
161 static int rxe_init_ports(struct rxe_dev
*rxe
)
163 struct rxe_port
*port
= &rxe
->port
;
165 rxe_init_port_param(port
);
167 if (!port
->attr
.pkey_tbl_len
|| !port
->attr
.gid_tbl_len
)
170 port
->pkey_tbl
= kcalloc(port
->attr
.pkey_tbl_len
,
171 sizeof(*port
->pkey_tbl
), GFP_KERNEL
);
176 port
->pkey_tbl
[0] = 0xffff;
177 addrconf_addr_eui48((unsigned char *)&port
->port_guid
,
178 rxe
->ndev
->dev_addr
);
180 spin_lock_init(&port
->port_lock
);
185 /* init pools of managed objects */
186 static int rxe_init_pools(struct rxe_dev
*rxe
)
190 err
= rxe_pool_init(rxe
, &rxe
->uc_pool
, RXE_TYPE_UC
,
195 err
= rxe_pool_init(rxe
, &rxe
->pd_pool
, RXE_TYPE_PD
,
200 err
= rxe_pool_init(rxe
, &rxe
->ah_pool
, RXE_TYPE_AH
,
205 err
= rxe_pool_init(rxe
, &rxe
->srq_pool
, RXE_TYPE_SRQ
,
210 err
= rxe_pool_init(rxe
, &rxe
->qp_pool
, RXE_TYPE_QP
,
215 err
= rxe_pool_init(rxe
, &rxe
->cq_pool
, RXE_TYPE_CQ
,
220 err
= rxe_pool_init(rxe
, &rxe
->mr_pool
, RXE_TYPE_MR
,
225 err
= rxe_pool_init(rxe
, &rxe
->mw_pool
, RXE_TYPE_MW
,
230 err
= rxe_pool_init(rxe
, &rxe
->mc_grp_pool
, RXE_TYPE_MC_GRP
,
231 rxe
->attr
.max_mcast_grp
);
235 err
= rxe_pool_init(rxe
, &rxe
->mc_elem_pool
, RXE_TYPE_MC_ELEM
,
236 rxe
->attr
.max_total_mcast_qp_attach
);
243 rxe_pool_cleanup(&rxe
->mc_grp_pool
);
245 rxe_pool_cleanup(&rxe
->mw_pool
);
247 rxe_pool_cleanup(&rxe
->mr_pool
);
249 rxe_pool_cleanup(&rxe
->cq_pool
);
251 rxe_pool_cleanup(&rxe
->qp_pool
);
253 rxe_pool_cleanup(&rxe
->srq_pool
);
255 rxe_pool_cleanup(&rxe
->ah_pool
);
257 rxe_pool_cleanup(&rxe
->pd_pool
);
259 rxe_pool_cleanup(&rxe
->uc_pool
);
264 /* initialize rxe device state */
265 static int rxe_init(struct rxe_dev
*rxe
)
269 /* init default device parameters */
270 rxe_init_device_param(rxe
);
272 err
= rxe_init_ports(rxe
);
276 err
= rxe_init_pools(rxe
);
280 /* init pending mmap list */
281 spin_lock_init(&rxe
->mmap_offset_lock
);
282 spin_lock_init(&rxe
->pending_lock
);
283 INIT_LIST_HEAD(&rxe
->pending_mmaps
);
284 INIT_LIST_HEAD(&rxe
->list
);
286 mutex_init(&rxe
->usdev_lock
);
291 rxe_cleanup_ports(rxe
);
296 int rxe_set_mtu(struct rxe_dev
*rxe
, unsigned int ndev_mtu
)
298 struct rxe_port
*port
= &rxe
->port
;
301 mtu
= eth_mtu_int_to_enum(ndev_mtu
);
303 /* Make sure that new MTU in range */
304 mtu
= mtu
? min_t(enum ib_mtu
, mtu
, RXE_PORT_MAX_MTU
) : IB_MTU_256
;
306 port
->attr
.active_mtu
= mtu
;
307 port
->mtu_cap
= ib_mtu_enum_to_int(mtu
);
311 EXPORT_SYMBOL(rxe_set_mtu
);
313 /* called by ifc layer to create new rxe device.
314 * The caller should allocate memory for rxe by calling ib_alloc_device.
316 int rxe_add(struct rxe_dev
*rxe
, unsigned int mtu
)
320 kref_init(&rxe
->ref_cnt
);
326 err
= rxe_set_mtu(rxe
, mtu
);
330 err
= rxe_register_device(rxe
);
340 EXPORT_SYMBOL(rxe_add
);
342 /* called by the ifc layer to remove a device */
343 void rxe_remove(struct rxe_dev
*rxe
)
345 rxe_unregister_device(rxe
);
349 EXPORT_SYMBOL(rxe_remove
);
351 static int __init
rxe_module_init(void)
355 /* initialize slab caches for managed objects */
356 err
= rxe_cache_init();
358 pr_err("unable to init object pools\n");
362 err
= rxe_net_init();
370 static void __exit
rxe_module_exit(void)
376 pr_info("unloaded\n");
379 late_initcall(rxe_module_init
);
380 module_exit(rxe_module_exit
);