1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
3 * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
4 * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
7 #include <rdma/rdma_netlink.h>
8 #include <net/addrconf.h>
12 MODULE_AUTHOR("Bob Pearson, Frank Zago, John Groves, Kamal Heib");
13 MODULE_DESCRIPTION("Soft RDMA transport");
14 MODULE_LICENSE("Dual BSD/GPL");
18 /* free resources for a rxe device all objects created for this device must
21 void rxe_dealloc(struct ib_device
*ib_dev
)
23 struct rxe_dev
*rxe
= container_of(ib_dev
, struct rxe_dev
, ib_dev
);
25 rxe_pool_cleanup(&rxe
->uc_pool
);
26 rxe_pool_cleanup(&rxe
->pd_pool
);
27 rxe_pool_cleanup(&rxe
->ah_pool
);
28 rxe_pool_cleanup(&rxe
->srq_pool
);
29 rxe_pool_cleanup(&rxe
->qp_pool
);
30 rxe_pool_cleanup(&rxe
->cq_pool
);
31 rxe_pool_cleanup(&rxe
->mr_pool
);
32 rxe_pool_cleanup(&rxe
->mw_pool
);
33 rxe_pool_cleanup(&rxe
->mc_grp_pool
);
34 rxe_pool_cleanup(&rxe
->mc_elem_pool
);
37 crypto_free_shash(rxe
->tfm
);
40 /* initialize rxe device parameters */
41 static void rxe_init_device_param(struct rxe_dev
*rxe
)
43 rxe
->max_inline_data
= RXE_MAX_INLINE_DATA
;
45 rxe
->attr
.vendor_id
= RXE_VENDOR_ID
;
46 rxe
->attr
.max_mr_size
= RXE_MAX_MR_SIZE
;
47 rxe
->attr
.page_size_cap
= RXE_PAGE_SIZE_CAP
;
48 rxe
->attr
.max_qp
= RXE_MAX_QP
;
49 rxe
->attr
.max_qp_wr
= RXE_MAX_QP_WR
;
50 rxe
->attr
.device_cap_flags
= RXE_DEVICE_CAP_FLAGS
;
51 rxe
->attr
.max_send_sge
= RXE_MAX_SGE
;
52 rxe
->attr
.max_recv_sge
= RXE_MAX_SGE
;
53 rxe
->attr
.max_sge_rd
= RXE_MAX_SGE_RD
;
54 rxe
->attr
.max_cq
= RXE_MAX_CQ
;
55 rxe
->attr
.max_cqe
= (1 << RXE_MAX_LOG_CQE
) - 1;
56 rxe
->attr
.max_mr
= RXE_MAX_MR
;
57 rxe
->attr
.max_pd
= RXE_MAX_PD
;
58 rxe
->attr
.max_qp_rd_atom
= RXE_MAX_QP_RD_ATOM
;
59 rxe
->attr
.max_res_rd_atom
= RXE_MAX_RES_RD_ATOM
;
60 rxe
->attr
.max_qp_init_rd_atom
= RXE_MAX_QP_INIT_RD_ATOM
;
61 rxe
->attr
.atomic_cap
= IB_ATOMIC_HCA
;
62 rxe
->attr
.max_mcast_grp
= RXE_MAX_MCAST_GRP
;
63 rxe
->attr
.max_mcast_qp_attach
= RXE_MAX_MCAST_QP_ATTACH
;
64 rxe
->attr
.max_total_mcast_qp_attach
= RXE_MAX_TOT_MCAST_QP_ATTACH
;
65 rxe
->attr
.max_ah
= RXE_MAX_AH
;
66 rxe
->attr
.max_srq
= RXE_MAX_SRQ
;
67 rxe
->attr
.max_srq_wr
= RXE_MAX_SRQ_WR
;
68 rxe
->attr
.max_srq_sge
= RXE_MAX_SRQ_SGE
;
69 rxe
->attr
.max_fast_reg_page_list_len
= RXE_MAX_FMR_PAGE_LIST_LEN
;
70 rxe
->attr
.max_pkeys
= RXE_MAX_PKEYS
;
71 rxe
->attr
.local_ca_ack_delay
= RXE_LOCAL_CA_ACK_DELAY
;
72 addrconf_addr_eui48((unsigned char *)&rxe
->attr
.sys_image_guid
,
75 rxe
->max_ucontext
= RXE_MAX_UCONTEXT
;
78 /* initialize port attributes */
79 static void rxe_init_port_param(struct rxe_port
*port
)
81 port
->attr
.state
= IB_PORT_DOWN
;
82 port
->attr
.max_mtu
= IB_MTU_4096
;
83 port
->attr
.active_mtu
= IB_MTU_256
;
84 port
->attr
.gid_tbl_len
= RXE_PORT_GID_TBL_LEN
;
85 port
->attr
.port_cap_flags
= RXE_PORT_PORT_CAP_FLAGS
;
86 port
->attr
.max_msg_sz
= RXE_PORT_MAX_MSG_SZ
;
87 port
->attr
.bad_pkey_cntr
= RXE_PORT_BAD_PKEY_CNTR
;
88 port
->attr
.qkey_viol_cntr
= RXE_PORT_QKEY_VIOL_CNTR
;
89 port
->attr
.pkey_tbl_len
= RXE_PORT_PKEY_TBL_LEN
;
90 port
->attr
.lid
= RXE_PORT_LID
;
91 port
->attr
.sm_lid
= RXE_PORT_SM_LID
;
92 port
->attr
.lmc
= RXE_PORT_LMC
;
93 port
->attr
.max_vl_num
= RXE_PORT_MAX_VL_NUM
;
94 port
->attr
.sm_sl
= RXE_PORT_SM_SL
;
95 port
->attr
.subnet_timeout
= RXE_PORT_SUBNET_TIMEOUT
;
96 port
->attr
.init_type_reply
= RXE_PORT_INIT_TYPE_REPLY
;
97 port
->attr
.active_width
= RXE_PORT_ACTIVE_WIDTH
;
98 port
->attr
.active_speed
= RXE_PORT_ACTIVE_SPEED
;
99 port
->attr
.phys_state
= RXE_PORT_PHYS_STATE
;
100 port
->mtu_cap
= ib_mtu_enum_to_int(IB_MTU_256
);
101 port
->subnet_prefix
= cpu_to_be64(RXE_PORT_SUBNET_PREFIX
);
104 /* initialize port state, note IB convention that HCA ports are always
107 static void rxe_init_ports(struct rxe_dev
*rxe
)
109 struct rxe_port
*port
= &rxe
->port
;
111 rxe_init_port_param(port
);
112 addrconf_addr_eui48((unsigned char *)&port
->port_guid
,
113 rxe
->ndev
->dev_addr
);
114 spin_lock_init(&port
->port_lock
);
117 /* init pools of managed objects */
118 static int rxe_init_pools(struct rxe_dev
*rxe
)
122 err
= rxe_pool_init(rxe
, &rxe
->uc_pool
, RXE_TYPE_UC
,
127 err
= rxe_pool_init(rxe
, &rxe
->pd_pool
, RXE_TYPE_PD
,
132 err
= rxe_pool_init(rxe
, &rxe
->ah_pool
, RXE_TYPE_AH
,
137 err
= rxe_pool_init(rxe
, &rxe
->srq_pool
, RXE_TYPE_SRQ
,
142 err
= rxe_pool_init(rxe
, &rxe
->qp_pool
, RXE_TYPE_QP
,
147 err
= rxe_pool_init(rxe
, &rxe
->cq_pool
, RXE_TYPE_CQ
,
152 err
= rxe_pool_init(rxe
, &rxe
->mr_pool
, RXE_TYPE_MR
,
157 err
= rxe_pool_init(rxe
, &rxe
->mw_pool
, RXE_TYPE_MW
,
162 err
= rxe_pool_init(rxe
, &rxe
->mc_grp_pool
, RXE_TYPE_MC_GRP
,
163 rxe
->attr
.max_mcast_grp
);
167 err
= rxe_pool_init(rxe
, &rxe
->mc_elem_pool
, RXE_TYPE_MC_ELEM
,
168 rxe
->attr
.max_total_mcast_qp_attach
);
175 rxe_pool_cleanup(&rxe
->mc_grp_pool
);
177 rxe_pool_cleanup(&rxe
->mw_pool
);
179 rxe_pool_cleanup(&rxe
->mr_pool
);
181 rxe_pool_cleanup(&rxe
->cq_pool
);
183 rxe_pool_cleanup(&rxe
->qp_pool
);
185 rxe_pool_cleanup(&rxe
->srq_pool
);
187 rxe_pool_cleanup(&rxe
->ah_pool
);
189 rxe_pool_cleanup(&rxe
->pd_pool
);
191 rxe_pool_cleanup(&rxe
->uc_pool
);
196 /* initialize rxe device state */
197 static int rxe_init(struct rxe_dev
*rxe
)
201 /* init default device parameters */
202 rxe_init_device_param(rxe
);
206 err
= rxe_init_pools(rxe
);
210 /* init pending mmap list */
211 spin_lock_init(&rxe
->mmap_offset_lock
);
212 spin_lock_init(&rxe
->pending_lock
);
213 INIT_LIST_HEAD(&rxe
->pending_mmaps
);
215 mutex_init(&rxe
->usdev_lock
);
220 void rxe_set_mtu(struct rxe_dev
*rxe
, unsigned int ndev_mtu
)
222 struct rxe_port
*port
= &rxe
->port
;
225 mtu
= eth_mtu_int_to_enum(ndev_mtu
);
227 /* Make sure that new MTU in range */
228 mtu
= mtu
? min_t(enum ib_mtu
, mtu
, IB_MTU_4096
) : IB_MTU_256
;
230 port
->attr
.active_mtu
= mtu
;
231 port
->mtu_cap
= ib_mtu_enum_to_int(mtu
);
234 /* called by ifc layer to create new rxe device.
235 * The caller should allocate memory for rxe by calling ib_alloc_device.
237 int rxe_add(struct rxe_dev
*rxe
, unsigned int mtu
, const char *ibdev_name
)
245 rxe_set_mtu(rxe
, mtu
);
247 return rxe_register_device(rxe
, ibdev_name
);
250 static int rxe_newlink(const char *ibdev_name
, struct net_device
*ndev
)
252 struct rxe_dev
*exists
;
255 if (is_vlan_dev(ndev
)) {
256 pr_err("rxe creation allowed on top of a real device only\n");
261 exists
= rxe_get_dev_from_net(ndev
);
263 ib_device_put(&exists
->ib_dev
);
264 pr_err("already configured on %s\n", ndev
->name
);
269 err
= rxe_net_add(ibdev_name
, ndev
);
271 pr_err("failed to add %s\n", ndev
->name
);
278 static struct rdma_link_ops rxe_link_ops
= {
280 .newlink
= rxe_newlink
,
283 static int __init
rxe_module_init(void)
287 err
= rxe_net_init();
291 rdma_link_register(&rxe_link_ops
);
292 rxe_initialized
= true;
297 static void __exit
rxe_module_exit(void)
299 rdma_link_unregister(&rxe_link_ops
);
300 ib_unregister_driver(RDMA_DRIVER_RXE
);
303 rxe_initialized
= false;
304 pr_info("unloaded\n");
307 late_initcall(rxe_module_init
);
308 module_exit(rxe_module_exit
);
310 MODULE_ALIAS_RDMA_LINK("rxe");