1 // SPDX-License-Identifier: GPL-2.0
3 * Shared Memory Communications Direct over loopback-ism device.
5 * Functions for loopback-ism device.
7 * Copyright (c) 2024, Alibaba Inc.
9 * Author: Wen Gu <guwen@linux.alibaba.com>
10 * Tony Lu <tonylu@linux.alibaba.com>
14 #include <linux/device.h>
15 #include <linux/types.h>
20 #include "smc_loopback.h"
22 #define SMC_LO_V2_CAPABLE 0x1 /* loopback-ism acts as ISMv2 */
23 #define SMC_LO_SUPPORT_NOCOPY 0x1
24 #define SMC_DMA_ADDR_INVALID (~(dma_addr_t)0)
26 static const char smc_lo_dev_name
[] = "loopback-ism";
27 static struct smc_lo_dev
*lo_dev
;
29 static void smc_lo_generate_ids(struct smc_lo_dev
*ldev
)
31 struct smcd_gid
*lgid
= &ldev
->local_gid
;
35 memcpy(&lgid
->gid
, &uuid
, sizeof(lgid
->gid
));
36 memcpy(&lgid
->gid_ext
, (u8
*)&uuid
+ sizeof(lgid
->gid
),
37 sizeof(lgid
->gid_ext
));
39 ldev
->chid
= SMC_LO_RESERVED_CHID
;
42 static int smc_lo_query_rgid(struct smcd_dev
*smcd
, struct smcd_gid
*rgid
,
43 u32 vid_valid
, u32 vid
)
45 struct smc_lo_dev
*ldev
= smcd
->priv
;
47 /* rgid should be the same as lgid */
48 if (!ldev
|| rgid
->gid
!= ldev
->local_gid
.gid
||
49 rgid
->gid_ext
!= ldev
->local_gid
.gid_ext
)
54 static int smc_lo_register_dmb(struct smcd_dev
*smcd
, struct smcd_dmb
*dmb
,
57 struct smc_lo_dmb_node
*dmb_node
, *tmp_node
;
58 struct smc_lo_dev
*ldev
= smcd
->priv
;
61 /* check space for new dmb */
62 for_each_clear_bit(sba_idx
, ldev
->sba_idx_mask
, SMC_LO_MAX_DMBS
) {
63 if (!test_and_set_bit(sba_idx
, ldev
->sba_idx_mask
))
66 if (sba_idx
== SMC_LO_MAX_DMBS
)
69 dmb_node
= kzalloc(sizeof(*dmb_node
), GFP_KERNEL
);
75 dmb_node
->sba_idx
= sba_idx
;
76 dmb_node
->len
= dmb
->dmb_len
;
77 dmb_node
->cpu_addr
= kzalloc(dmb_node
->len
, GFP_KERNEL
|
78 __GFP_NOWARN
| __GFP_NORETRY
|
80 if (!dmb_node
->cpu_addr
) {
84 dmb_node
->dma_addr
= SMC_DMA_ADDR_INVALID
;
85 refcount_set(&dmb_node
->refcnt
, 1);
88 /* add new dmb into hash table */
89 get_random_bytes(&dmb_node
->token
, sizeof(dmb_node
->token
));
90 write_lock_bh(&ldev
->dmb_ht_lock
);
91 hash_for_each_possible(ldev
->dmb_ht
, tmp_node
, list
, dmb_node
->token
) {
92 if (tmp_node
->token
== dmb_node
->token
) {
93 write_unlock_bh(&ldev
->dmb_ht_lock
);
97 hash_add(ldev
->dmb_ht
, &dmb_node
->list
, dmb_node
->token
);
98 write_unlock_bh(&ldev
->dmb_ht_lock
);
99 atomic_inc(&ldev
->dmb_cnt
);
101 dmb
->sba_idx
= dmb_node
->sba_idx
;
102 dmb
->dmb_tok
= dmb_node
->token
;
103 dmb
->cpu_addr
= dmb_node
->cpu_addr
;
104 dmb
->dma_addr
= dmb_node
->dma_addr
;
105 dmb
->dmb_len
= dmb_node
->len
;
112 clear_bit(sba_idx
, ldev
->sba_idx_mask
);
116 static void __smc_lo_unregister_dmb(struct smc_lo_dev
*ldev
,
117 struct smc_lo_dmb_node
*dmb_node
)
119 /* remove dmb from hash table */
120 write_lock_bh(&ldev
->dmb_ht_lock
);
121 hash_del(&dmb_node
->list
);
122 write_unlock_bh(&ldev
->dmb_ht_lock
);
124 clear_bit(dmb_node
->sba_idx
, ldev
->sba_idx_mask
);
125 kvfree(dmb_node
->cpu_addr
);
128 if (atomic_dec_and_test(&ldev
->dmb_cnt
))
129 wake_up(&ldev
->ldev_release
);
132 static int smc_lo_unregister_dmb(struct smcd_dev
*smcd
, struct smcd_dmb
*dmb
)
134 struct smc_lo_dmb_node
*dmb_node
= NULL
, *tmp_node
;
135 struct smc_lo_dev
*ldev
= smcd
->priv
;
137 /* find dmb from hash table */
138 read_lock_bh(&ldev
->dmb_ht_lock
);
139 hash_for_each_possible(ldev
->dmb_ht
, tmp_node
, list
, dmb
->dmb_tok
) {
140 if (tmp_node
->token
== dmb
->dmb_tok
) {
146 read_unlock_bh(&ldev
->dmb_ht_lock
);
149 read_unlock_bh(&ldev
->dmb_ht_lock
);
151 if (refcount_dec_and_test(&dmb_node
->refcnt
))
152 __smc_lo_unregister_dmb(ldev
, dmb_node
);
156 static int smc_lo_support_dmb_nocopy(struct smcd_dev
*smcd
)
158 return SMC_LO_SUPPORT_NOCOPY
;
161 static int smc_lo_attach_dmb(struct smcd_dev
*smcd
, struct smcd_dmb
*dmb
)
163 struct smc_lo_dmb_node
*dmb_node
= NULL
, *tmp_node
;
164 struct smc_lo_dev
*ldev
= smcd
->priv
;
166 /* find dmb_node according to dmb->dmb_tok */
167 read_lock_bh(&ldev
->dmb_ht_lock
);
168 hash_for_each_possible(ldev
->dmb_ht
, tmp_node
, list
, dmb
->dmb_tok
) {
169 if (tmp_node
->token
== dmb
->dmb_tok
) {
175 read_unlock_bh(&ldev
->dmb_ht_lock
);
178 read_unlock_bh(&ldev
->dmb_ht_lock
);
180 if (!refcount_inc_not_zero(&dmb_node
->refcnt
))
181 /* the dmb is being unregistered, but has
182 * not been removed from the hash table.
186 /* provide dmb information */
187 dmb
->sba_idx
= dmb_node
->sba_idx
;
188 dmb
->dmb_tok
= dmb_node
->token
;
189 dmb
->cpu_addr
= dmb_node
->cpu_addr
;
190 dmb
->dma_addr
= dmb_node
->dma_addr
;
191 dmb
->dmb_len
= dmb_node
->len
;
195 static int smc_lo_detach_dmb(struct smcd_dev
*smcd
, u64 token
)
197 struct smc_lo_dmb_node
*dmb_node
= NULL
, *tmp_node
;
198 struct smc_lo_dev
*ldev
= smcd
->priv
;
200 /* find dmb_node according to dmb->dmb_tok */
201 read_lock_bh(&ldev
->dmb_ht_lock
);
202 hash_for_each_possible(ldev
->dmb_ht
, tmp_node
, list
, token
) {
203 if (tmp_node
->token
== token
) {
209 read_unlock_bh(&ldev
->dmb_ht_lock
);
212 read_unlock_bh(&ldev
->dmb_ht_lock
);
214 if (refcount_dec_and_test(&dmb_node
->refcnt
))
215 __smc_lo_unregister_dmb(ldev
, dmb_node
);
219 static int smc_lo_move_data(struct smcd_dev
*smcd
, u64 dmb_tok
,
220 unsigned int idx
, bool sf
, unsigned int offset
,
221 void *data
, unsigned int size
)
223 struct smc_lo_dmb_node
*rmb_node
= NULL
, *tmp_node
;
224 struct smc_lo_dev
*ldev
= smcd
->priv
;
225 struct smc_connection
*conn
;
228 /* since sndbuf is merged with peer DMB, there is
229 * no need to copy data from sndbuf to peer DMB.
233 read_lock_bh(&ldev
->dmb_ht_lock
);
234 hash_for_each_possible(ldev
->dmb_ht
, tmp_node
, list
, dmb_tok
) {
235 if (tmp_node
->token
== dmb_tok
) {
241 read_unlock_bh(&ldev
->dmb_ht_lock
);
244 memcpy((char *)rmb_node
->cpu_addr
+ offset
, data
, size
);
245 read_unlock_bh(&ldev
->dmb_ht_lock
);
247 conn
= smcd
->conn
[rmb_node
->sba_idx
];
248 if (!conn
|| conn
->killed
)
250 tasklet_schedule(&conn
->rx_tsklet
);
254 static int smc_lo_supports_v2(void)
256 return SMC_LO_V2_CAPABLE
;
259 static void smc_lo_get_local_gid(struct smcd_dev
*smcd
,
260 struct smcd_gid
*smcd_gid
)
262 struct smc_lo_dev
*ldev
= smcd
->priv
;
264 smcd_gid
->gid
= ldev
->local_gid
.gid
;
265 smcd_gid
->gid_ext
= ldev
->local_gid
.gid_ext
;
268 static u16
smc_lo_get_chid(struct smcd_dev
*smcd
)
270 return ((struct smc_lo_dev
*)smcd
->priv
)->chid
;
273 static struct device
*smc_lo_get_dev(struct smcd_dev
*smcd
)
275 return &((struct smc_lo_dev
*)smcd
->priv
)->dev
;
278 static const struct smcd_ops lo_ops
= {
279 .query_remote_gid
= smc_lo_query_rgid
,
280 .register_dmb
= smc_lo_register_dmb
,
281 .unregister_dmb
= smc_lo_unregister_dmb
,
282 .support_dmb_nocopy
= smc_lo_support_dmb_nocopy
,
283 .attach_dmb
= smc_lo_attach_dmb
,
284 .detach_dmb
= smc_lo_detach_dmb
,
287 .set_vlan_required
= NULL
,
288 .reset_vlan_required
= NULL
,
289 .signal_event
= NULL
,
290 .move_data
= smc_lo_move_data
,
291 .supports_v2
= smc_lo_supports_v2
,
292 .get_local_gid
= smc_lo_get_local_gid
,
293 .get_chid
= smc_lo_get_chid
,
294 .get_dev
= smc_lo_get_dev
,
297 static struct smcd_dev
*smcd_lo_alloc_dev(const struct smcd_ops
*ops
,
300 struct smcd_dev
*smcd
;
302 smcd
= kzalloc(sizeof(*smcd
), GFP_KERNEL
);
306 smcd
->conn
= kcalloc(max_dmbs
, sizeof(struct smc_connection
*),
313 spin_lock_init(&smcd
->lock
);
314 spin_lock_init(&smcd
->lgr_lock
);
315 INIT_LIST_HEAD(&smcd
->vlan
);
316 INIT_LIST_HEAD(&smcd
->lgr_list
);
317 init_waitqueue_head(&smcd
->lgrs_deleted
);
325 static int smcd_lo_register_dev(struct smc_lo_dev
*ldev
)
327 struct smcd_dev
*smcd
;
329 smcd
= smcd_lo_alloc_dev(&lo_ops
, SMC_LO_MAX_DMBS
);
334 smc_ism_set_v2_capable();
335 mutex_lock(&smcd_dev_list
.mutex
);
336 list_add(&smcd
->list
, &smcd_dev_list
.list
);
337 mutex_unlock(&smcd_dev_list
.mutex
);
338 pr_warn_ratelimited("smc: adding smcd device %s\n",
339 dev_name(&ldev
->dev
));
343 static void smcd_lo_unregister_dev(struct smc_lo_dev
*ldev
)
345 struct smcd_dev
*smcd
= ldev
->smcd
;
347 pr_warn_ratelimited("smc: removing smcd device %s\n",
348 dev_name(&ldev
->dev
));
349 smcd
->going_away
= 1;
350 smc_smcd_terminate_all(smcd
);
351 mutex_lock(&smcd_dev_list
.mutex
);
352 list_del_init(&smcd
->list
);
353 mutex_unlock(&smcd_dev_list
.mutex
);
358 static int smc_lo_dev_init(struct smc_lo_dev
*ldev
)
360 smc_lo_generate_ids(ldev
);
361 rwlock_init(&ldev
->dmb_ht_lock
);
362 hash_init(ldev
->dmb_ht
);
363 atomic_set(&ldev
->dmb_cnt
, 0);
364 init_waitqueue_head(&ldev
->ldev_release
);
366 return smcd_lo_register_dev(ldev
);
369 static void smc_lo_dev_exit(struct smc_lo_dev
*ldev
)
371 smcd_lo_unregister_dev(ldev
);
372 if (atomic_read(&ldev
->dmb_cnt
))
373 wait_event(ldev
->ldev_release
, !atomic_read(&ldev
->dmb_cnt
));
376 static void smc_lo_dev_release(struct device
*dev
)
378 struct smc_lo_dev
*ldev
=
379 container_of(dev
, struct smc_lo_dev
, dev
);
384 static int smc_lo_dev_probe(void)
386 struct smc_lo_dev
*ldev
;
389 ldev
= kzalloc(sizeof(*ldev
), GFP_KERNEL
);
393 ldev
->dev
.parent
= NULL
;
394 ldev
->dev
.release
= smc_lo_dev_release
;
395 device_initialize(&ldev
->dev
);
396 dev_set_name(&ldev
->dev
, smc_lo_dev_name
);
398 ret
= smc_lo_dev_init(ldev
);
402 lo_dev
= ldev
; /* global loopback device */
406 put_device(&ldev
->dev
);
410 static void smc_lo_dev_remove(void)
415 smc_lo_dev_exit(lo_dev
);
416 put_device(&lo_dev
->dev
); /* device_initialize in smc_lo_dev_probe */
419 int smc_loopback_init(void)
421 return smc_lo_dev_probe();
424 void smc_loopback_exit(void)