2 * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
3 * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
37 MODULE_AUTHOR("Bob Pearson, Frank Zago, John Groves, Kamal Heib");
38 MODULE_DESCRIPTION("Soft RDMA transport");
39 MODULE_LICENSE("Dual BSD/GPL");
40 MODULE_VERSION("0.2");
42 /* free resources for all ports on a device */
43 static void rxe_cleanup_ports(struct rxe_dev
*rxe
)
45 kfree(rxe
->port
.pkey_tbl
);
46 rxe
->port
.pkey_tbl
= NULL
;
50 /* free resources for a rxe device all objects created for this device must
53 static void rxe_cleanup(struct rxe_dev
*rxe
)
55 rxe_pool_cleanup(&rxe
->uc_pool
);
56 rxe_pool_cleanup(&rxe
->pd_pool
);
57 rxe_pool_cleanup(&rxe
->ah_pool
);
58 rxe_pool_cleanup(&rxe
->srq_pool
);
59 rxe_pool_cleanup(&rxe
->qp_pool
);
60 rxe_pool_cleanup(&rxe
->cq_pool
);
61 rxe_pool_cleanup(&rxe
->mr_pool
);
62 rxe_pool_cleanup(&rxe
->mw_pool
);
63 rxe_pool_cleanup(&rxe
->mc_grp_pool
);
64 rxe_pool_cleanup(&rxe
->mc_elem_pool
);
66 rxe_cleanup_ports(rxe
);
69 /* called when all references have been dropped */
70 void rxe_release(struct kref
*kref
)
72 struct rxe_dev
*rxe
= container_of(kref
, struct rxe_dev
, ref_cnt
);
75 ib_dealloc_device(&rxe
->ib_dev
);
78 void rxe_dev_put(struct rxe_dev
*rxe
)
80 kref_put(&rxe
->ref_cnt
, rxe_release
);
82 EXPORT_SYMBOL_GPL(rxe_dev_put
);
84 /* initialize rxe device parameters */
85 static int rxe_init_device_param(struct rxe_dev
*rxe
)
87 rxe
->max_inline_data
= RXE_MAX_INLINE_DATA
;
89 rxe
->attr
.fw_ver
= RXE_FW_VER
;
90 rxe
->attr
.max_mr_size
= RXE_MAX_MR_SIZE
;
91 rxe
->attr
.page_size_cap
= RXE_PAGE_SIZE_CAP
;
92 rxe
->attr
.vendor_id
= RXE_VENDOR_ID
;
93 rxe
->attr
.vendor_part_id
= RXE_VENDOR_PART_ID
;
94 rxe
->attr
.hw_ver
= RXE_HW_VER
;
95 rxe
->attr
.max_qp
= RXE_MAX_QP
;
96 rxe
->attr
.max_qp_wr
= RXE_MAX_QP_WR
;
97 rxe
->attr
.device_cap_flags
= RXE_DEVICE_CAP_FLAGS
;
98 rxe
->attr
.max_sge
= RXE_MAX_SGE
;
99 rxe
->attr
.max_sge_rd
= RXE_MAX_SGE_RD
;
100 rxe
->attr
.max_cq
= RXE_MAX_CQ
;
101 rxe
->attr
.max_cqe
= (1 << RXE_MAX_LOG_CQE
) - 1;
102 rxe
->attr
.max_mr
= RXE_MAX_MR
;
103 rxe
->attr
.max_pd
= RXE_MAX_PD
;
104 rxe
->attr
.max_qp_rd_atom
= RXE_MAX_QP_RD_ATOM
;
105 rxe
->attr
.max_ee_rd_atom
= RXE_MAX_EE_RD_ATOM
;
106 rxe
->attr
.max_res_rd_atom
= RXE_MAX_RES_RD_ATOM
;
107 rxe
->attr
.max_qp_init_rd_atom
= RXE_MAX_QP_INIT_RD_ATOM
;
108 rxe
->attr
.max_ee_init_rd_atom
= RXE_MAX_EE_INIT_RD_ATOM
;
109 rxe
->attr
.atomic_cap
= RXE_ATOMIC_CAP
;
110 rxe
->attr
.max_ee
= RXE_MAX_EE
;
111 rxe
->attr
.max_rdd
= RXE_MAX_RDD
;
112 rxe
->attr
.max_mw
= RXE_MAX_MW
;
113 rxe
->attr
.max_raw_ipv6_qp
= RXE_MAX_RAW_IPV6_QP
;
114 rxe
->attr
.max_raw_ethy_qp
= RXE_MAX_RAW_ETHY_QP
;
115 rxe
->attr
.max_mcast_grp
= RXE_MAX_MCAST_GRP
;
116 rxe
->attr
.max_mcast_qp_attach
= RXE_MAX_MCAST_QP_ATTACH
;
117 rxe
->attr
.max_total_mcast_qp_attach
= RXE_MAX_TOT_MCAST_QP_ATTACH
;
118 rxe
->attr
.max_ah
= RXE_MAX_AH
;
119 rxe
->attr
.max_fmr
= RXE_MAX_FMR
;
120 rxe
->attr
.max_map_per_fmr
= RXE_MAX_MAP_PER_FMR
;
121 rxe
->attr
.max_srq
= RXE_MAX_SRQ
;
122 rxe
->attr
.max_srq_wr
= RXE_MAX_SRQ_WR
;
123 rxe
->attr
.max_srq_sge
= RXE_MAX_SRQ_SGE
;
124 rxe
->attr
.max_fast_reg_page_list_len
= RXE_MAX_FMR_PAGE_LIST_LEN
;
125 rxe
->attr
.max_pkeys
= RXE_MAX_PKEYS
;
126 rxe
->attr
.local_ca_ack_delay
= RXE_LOCAL_CA_ACK_DELAY
;
128 rxe
->max_ucontext
= RXE_MAX_UCONTEXT
;
133 /* initialize port attributes */
134 static int rxe_init_port_param(struct rxe_port
*port
)
136 port
->attr
.state
= RXE_PORT_STATE
;
137 port
->attr
.max_mtu
= RXE_PORT_MAX_MTU
;
138 port
->attr
.active_mtu
= RXE_PORT_ACTIVE_MTU
;
139 port
->attr
.gid_tbl_len
= RXE_PORT_GID_TBL_LEN
;
140 port
->attr
.port_cap_flags
= RXE_PORT_PORT_CAP_FLAGS
;
141 port
->attr
.max_msg_sz
= RXE_PORT_MAX_MSG_SZ
;
142 port
->attr
.bad_pkey_cntr
= RXE_PORT_BAD_PKEY_CNTR
;
143 port
->attr
.qkey_viol_cntr
= RXE_PORT_QKEY_VIOL_CNTR
;
144 port
->attr
.pkey_tbl_len
= RXE_PORT_PKEY_TBL_LEN
;
145 port
->attr
.lid
= RXE_PORT_LID
;
146 port
->attr
.sm_lid
= RXE_PORT_SM_LID
;
147 port
->attr
.lmc
= RXE_PORT_LMC
;
148 port
->attr
.max_vl_num
= RXE_PORT_MAX_VL_NUM
;
149 port
->attr
.sm_sl
= RXE_PORT_SM_SL
;
150 port
->attr
.subnet_timeout
= RXE_PORT_SUBNET_TIMEOUT
;
151 port
->attr
.init_type_reply
= RXE_PORT_INIT_TYPE_REPLY
;
152 port
->attr
.active_width
= RXE_PORT_ACTIVE_WIDTH
;
153 port
->attr
.active_speed
= RXE_PORT_ACTIVE_SPEED
;
154 port
->attr
.phys_state
= RXE_PORT_PHYS_STATE
;
156 ib_mtu_enum_to_int(RXE_PORT_ACTIVE_MTU
);
157 port
->subnet_prefix
= cpu_to_be64(RXE_PORT_SUBNET_PREFIX
);
162 /* initialize port state, note IB convention that HCA ports are always
165 static int rxe_init_ports(struct rxe_dev
*rxe
)
167 struct rxe_port
*port
= &rxe
->port
;
169 rxe_init_port_param(port
);
171 if (!port
->attr
.pkey_tbl_len
|| !port
->attr
.gid_tbl_len
)
174 port
->pkey_tbl
= kcalloc(port
->attr
.pkey_tbl_len
,
175 sizeof(*port
->pkey_tbl
), GFP_KERNEL
);
180 port
->pkey_tbl
[0] = 0xffff;
181 port
->port_guid
= rxe_port_guid(rxe
);
183 spin_lock_init(&port
->port_lock
);
188 /* init pools of managed objects */
189 static int rxe_init_pools(struct rxe_dev
*rxe
)
193 err
= rxe_pool_init(rxe
, &rxe
->uc_pool
, RXE_TYPE_UC
,
198 err
= rxe_pool_init(rxe
, &rxe
->pd_pool
, RXE_TYPE_PD
,
203 err
= rxe_pool_init(rxe
, &rxe
->ah_pool
, RXE_TYPE_AH
,
208 err
= rxe_pool_init(rxe
, &rxe
->srq_pool
, RXE_TYPE_SRQ
,
213 err
= rxe_pool_init(rxe
, &rxe
->qp_pool
, RXE_TYPE_QP
,
218 err
= rxe_pool_init(rxe
, &rxe
->cq_pool
, RXE_TYPE_CQ
,
223 err
= rxe_pool_init(rxe
, &rxe
->mr_pool
, RXE_TYPE_MR
,
228 err
= rxe_pool_init(rxe
, &rxe
->mw_pool
, RXE_TYPE_MW
,
233 err
= rxe_pool_init(rxe
, &rxe
->mc_grp_pool
, RXE_TYPE_MC_GRP
,
234 rxe
->attr
.max_mcast_grp
);
238 err
= rxe_pool_init(rxe
, &rxe
->mc_elem_pool
, RXE_TYPE_MC_ELEM
,
239 rxe
->attr
.max_total_mcast_qp_attach
);
246 rxe_pool_cleanup(&rxe
->mc_grp_pool
);
248 rxe_pool_cleanup(&rxe
->mw_pool
);
250 rxe_pool_cleanup(&rxe
->mr_pool
);
252 rxe_pool_cleanup(&rxe
->cq_pool
);
254 rxe_pool_cleanup(&rxe
->qp_pool
);
256 rxe_pool_cleanup(&rxe
->srq_pool
);
258 rxe_pool_cleanup(&rxe
->ah_pool
);
260 rxe_pool_cleanup(&rxe
->pd_pool
);
262 rxe_pool_cleanup(&rxe
->uc_pool
);
267 /* initialize rxe device state */
268 static int rxe_init(struct rxe_dev
*rxe
)
272 /* init default device parameters */
273 rxe_init_device_param(rxe
);
275 err
= rxe_init_ports(rxe
);
279 err
= rxe_init_pools(rxe
);
283 /* init pending mmap list */
284 spin_lock_init(&rxe
->mmap_offset_lock
);
285 spin_lock_init(&rxe
->pending_lock
);
286 INIT_LIST_HEAD(&rxe
->pending_mmaps
);
287 INIT_LIST_HEAD(&rxe
->list
);
289 mutex_init(&rxe
->usdev_lock
);
294 rxe_cleanup_ports(rxe
);
299 int rxe_set_mtu(struct rxe_dev
*rxe
, unsigned int ndev_mtu
)
301 struct rxe_port
*port
= &rxe
->port
;
304 mtu
= eth_mtu_int_to_enum(ndev_mtu
);
306 /* Make sure that new MTU in range */
307 mtu
= mtu
? min_t(enum ib_mtu
, mtu
, RXE_PORT_MAX_MTU
) : IB_MTU_256
;
309 port
->attr
.active_mtu
= mtu
;
310 port
->mtu_cap
= ib_mtu_enum_to_int(mtu
);
314 EXPORT_SYMBOL(rxe_set_mtu
);
316 /* called by ifc layer to create new rxe device.
317 * The caller should allocate memory for rxe by calling ib_alloc_device.
319 int rxe_add(struct rxe_dev
*rxe
, unsigned int mtu
)
323 kref_init(&rxe
->ref_cnt
);
329 err
= rxe_set_mtu(rxe
, mtu
);
333 err
= rxe_register_device(rxe
);
343 EXPORT_SYMBOL(rxe_add
);
345 /* called by the ifc layer to remove a device */
346 void rxe_remove(struct rxe_dev
*rxe
)
348 rxe_unregister_device(rxe
);
352 EXPORT_SYMBOL(rxe_remove
);
354 static int __init
rxe_module_init(void)
358 /* initialize slab caches for managed objects */
359 err
= rxe_cache_init();
361 pr_err("unable to init object pools\n");
365 err
= rxe_net_init();
373 static void __exit
rxe_module_exit(void)
379 pr_info("unloaded\n");
382 late_initcall(rxe_module_init
);
383 module_exit(rxe_module_exit
);