1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Copyright (c) 2014-2015 Hisilicon Limited.
6 #include <linux/dma-mapping.h>
7 #include <linux/interrupt.h>
9 #include <linux/skbuff.h>
10 #include <linux/slab.h>
13 #define cls_to_ae_dev(dev) container_of(dev, struct hnae_ae_dev, cls_dev)
15 static struct class *hnae_class
;
18 hnae_list_add(spinlock_t
*lock
, struct list_head
*node
, struct list_head
*head
)
22 spin_lock_irqsave(lock
, flags
);
23 list_add_tail_rcu(node
, head
);
24 spin_unlock_irqrestore(lock
, flags
);
27 static void hnae_list_del(spinlock_t
*lock
, struct list_head
*node
)
31 spin_lock_irqsave(lock
, flags
);
33 spin_unlock_irqrestore(lock
, flags
);
36 static int hnae_alloc_buffer(struct hnae_ring
*ring
, struct hnae_desc_cb
*cb
)
38 unsigned int order
= hnae_page_order(ring
);
39 struct page
*p
= dev_alloc_pages(order
);
47 cb
->buf
= page_address(p
);
48 cb
->length
= hnae_page_size(ring
);
49 cb
->type
= DESC_TYPE_PAGE
;
54 static void hnae_free_buffer(struct hnae_ring
*ring
, struct hnae_desc_cb
*cb
)
56 if (unlikely(!cb
->priv
))
59 if (cb
->type
== DESC_TYPE_SKB
)
60 dev_kfree_skb_any((struct sk_buff
*)cb
->priv
);
61 else if (unlikely(is_rx_ring(ring
)))
62 put_page((struct page
*)cb
->priv
);
67 static int hnae_map_buffer(struct hnae_ring
*ring
, struct hnae_desc_cb
*cb
)
69 cb
->dma
= dma_map_page(ring_to_dev(ring
), cb
->priv
, 0,
70 cb
->length
, ring_to_dma_dir(ring
));
72 if (dma_mapping_error(ring_to_dev(ring
), cb
->dma
))
78 static void hnae_unmap_buffer(struct hnae_ring
*ring
, struct hnae_desc_cb
*cb
)
80 if (cb
->type
== DESC_TYPE_SKB
)
81 dma_unmap_single(ring_to_dev(ring
), cb
->dma
, cb
->length
,
82 ring_to_dma_dir(ring
));
84 dma_unmap_page(ring_to_dev(ring
), cb
->dma
, cb
->length
,
85 ring_to_dma_dir(ring
));
88 static struct hnae_buf_ops hnae_bops
= {
89 .alloc_buffer
= hnae_alloc_buffer
,
90 .free_buffer
= hnae_free_buffer
,
91 .map_buffer
= hnae_map_buffer
,
92 .unmap_buffer
= hnae_unmap_buffer
,
95 static int __ae_match(struct device
*dev
, const void *data
)
97 struct hnae_ae_dev
*hdev
= cls_to_ae_dev(dev
);
99 if (dev_of_node(hdev
->dev
))
100 return (data
== &hdev
->dev
->of_node
->fwnode
);
101 else if (is_acpi_node(hdev
->dev
->fwnode
))
102 return (data
== hdev
->dev
->fwnode
);
104 dev_err(dev
, "__ae_match cannot read cfg data from OF or acpi\n");
108 static struct hnae_ae_dev
*find_ae(const struct fwnode_handle
*fwnode
)
114 dev
= class_find_device(hnae_class
, NULL
, fwnode
, __ae_match
);
116 return dev
? cls_to_ae_dev(dev
) : NULL
;
119 static void hnae_free_buffers(struct hnae_ring
*ring
)
123 for (i
= 0; i
< ring
->desc_num
; i
++)
124 hnae_free_buffer_detach(ring
, i
);
127 /* Allocate memory for raw pkg, and map with dma */
128 static int hnae_alloc_buffers(struct hnae_ring
*ring
)
132 for (i
= 0; i
< ring
->desc_num
; i
++) {
133 ret
= hnae_alloc_buffer_attach(ring
, i
);
135 goto out_buffer_fail
;
141 for (j
= i
- 1; j
>= 0; j
--)
142 hnae_free_buffer_detach(ring
, j
);
146 /* free desc along with its attached buffer */
147 static void hnae_free_desc(struct hnae_ring
*ring
)
149 dma_unmap_single(ring_to_dev(ring
), ring
->desc_dma_addr
,
150 ring
->desc_num
* sizeof(ring
->desc
[0]),
151 ring_to_dma_dir(ring
));
152 ring
->desc_dma_addr
= 0;
157 /* alloc desc, without buffer attached */
158 static int hnae_alloc_desc(struct hnae_ring
*ring
)
160 int size
= ring
->desc_num
* sizeof(ring
->desc
[0]);
162 ring
->desc
= kzalloc(size
, GFP_KERNEL
);
166 ring
->desc_dma_addr
= dma_map_single(ring_to_dev(ring
),
167 ring
->desc
, size
, ring_to_dma_dir(ring
));
168 if (dma_mapping_error(ring_to_dev(ring
), ring
->desc_dma_addr
)) {
169 ring
->desc_dma_addr
= 0;
178 /* fini ring, also free the buffer for the ring */
179 static void hnae_fini_ring(struct hnae_ring
*ring
)
181 if (is_rx_ring(ring
))
182 hnae_free_buffers(ring
);
184 hnae_free_desc(ring
);
185 kfree(ring
->desc_cb
);
186 ring
->desc_cb
= NULL
;
187 ring
->next_to_clean
= 0;
188 ring
->next_to_use
= 0;
191 /* init ring, and with buffer for rx ring */
193 hnae_init_ring(struct hnae_queue
*q
, struct hnae_ring
*ring
, int flags
)
197 if (ring
->desc_num
<= 0 || ring
->buf_size
<= 0)
202 ring
->coal_param
= q
->handle
->coal_param
;
203 assert(!ring
->desc
&& !ring
->desc_cb
&& !ring
->desc_dma_addr
);
205 /* not matter for tx or rx ring, the ntc and ntc start from 0 */
206 assert(ring
->next_to_use
== 0);
207 assert(ring
->next_to_clean
== 0);
209 ring
->desc_cb
= kcalloc(ring
->desc_num
, sizeof(ring
->desc_cb
[0]),
211 if (!ring
->desc_cb
) {
216 ret
= hnae_alloc_desc(ring
);
218 goto out_with_desc_cb
;
220 if (is_rx_ring(ring
)) {
221 ret
= hnae_alloc_buffers(ring
);
229 hnae_free_desc(ring
);
231 kfree(ring
->desc_cb
);
232 ring
->desc_cb
= NULL
;
237 static int hnae_init_queue(struct hnae_handle
*h
, struct hnae_queue
*q
,
238 struct hnae_ae_dev
*dev
)
245 ret
= hnae_init_ring(q
, &q
->tx_ring
, q
->tx_ring
.flags
| RINGF_DIR
);
249 ret
= hnae_init_ring(q
, &q
->rx_ring
, q
->rx_ring
.flags
& ~RINGF_DIR
);
251 goto out_with_tx_ring
;
253 if (dev
->ops
->init_queue
)
254 dev
->ops
->init_queue(q
);
259 hnae_fini_ring(&q
->tx_ring
);
264 static void hnae_fini_queue(struct hnae_queue
*q
)
266 if (q
->dev
->ops
->fini_queue
)
267 q
->dev
->ops
->fini_queue(q
);
269 hnae_fini_ring(&q
->tx_ring
);
270 hnae_fini_ring(&q
->rx_ring
);
274 * ae_chain - define ae chain head
276 static RAW_NOTIFIER_HEAD(ae_chain
);
278 int hnae_register_notifier(struct notifier_block
*nb
)
280 return raw_notifier_chain_register(&ae_chain
, nb
);
282 EXPORT_SYMBOL(hnae_register_notifier
);
284 void hnae_unregister_notifier(struct notifier_block
*nb
)
286 if (raw_notifier_chain_unregister(&ae_chain
, nb
))
287 dev_err(NULL
, "notifier chain unregister fail\n");
289 EXPORT_SYMBOL(hnae_unregister_notifier
);
291 int hnae_reinit_handle(struct hnae_handle
*handle
)
296 for (i
= 0; i
< handle
->q_num
; i
++) /* free ring*/
297 hnae_fini_queue(handle
->qs
[i
]);
299 if (handle
->dev
->ops
->reset
)
300 handle
->dev
->ops
->reset(handle
);
302 for (i
= 0; i
< handle
->q_num
; i
++) {/* reinit ring*/
303 ret
= hnae_init_queue(handle
, handle
->qs
[i
], handle
->dev
);
305 goto out_when_init_queue
;
309 for (j
= i
- 1; j
>= 0; j
--)
310 hnae_fini_queue(handle
->qs
[j
]);
313 EXPORT_SYMBOL(hnae_reinit_handle
);
315 /* hnae_get_handle - get a handle from the AE
316 * @owner_dev: the dev use this handle
317 * @ae_id: the id of the ae to be used
318 * @ae_opts: the options set for the handle
319 * @bops: the callbacks for buffer management
321 * return handle ptr or ERR_PTR
323 struct hnae_handle
*hnae_get_handle(struct device
*owner_dev
,
324 const struct fwnode_handle
*fwnode
,
326 struct hnae_buf_ops
*bops
)
328 struct hnae_ae_dev
*dev
;
329 struct hnae_handle
*handle
;
333 dev
= find_ae(fwnode
);
335 return ERR_PTR(-ENODEV
);
337 handle
= dev
->ops
->get_handle(dev
, port_id
);
338 if (IS_ERR(handle
)) {
339 put_device(&dev
->cls_dev
);
344 handle
->owner_dev
= owner_dev
;
345 handle
->bops
= bops
? bops
: &hnae_bops
;
346 handle
->eport_id
= port_id
;
348 for (i
= 0; i
< handle
->q_num
; i
++) {
349 ret
= hnae_init_queue(handle
, handle
->qs
[i
], dev
);
351 goto out_when_init_queue
;
354 __module_get(dev
->owner
);
356 hnae_list_add(&dev
->lock
, &handle
->node
, &dev
->handle_list
);
361 for (j
= i
- 1; j
>= 0; j
--)
362 hnae_fini_queue(handle
->qs
[j
]);
364 put_device(&dev
->cls_dev
);
366 return ERR_PTR(-ENOMEM
);
368 EXPORT_SYMBOL(hnae_get_handle
);
370 void hnae_put_handle(struct hnae_handle
*h
)
372 struct hnae_ae_dev
*dev
= h
->dev
;
375 for (i
= 0; i
< h
->q_num
; i
++)
376 hnae_fini_queue(h
->qs
[i
]);
378 if (h
->dev
->ops
->reset
)
379 h
->dev
->ops
->reset(h
);
381 hnae_list_del(&dev
->lock
, &h
->node
);
383 if (dev
->ops
->put_handle
)
384 dev
->ops
->put_handle(h
);
386 module_put(dev
->owner
);
388 put_device(&dev
->cls_dev
);
390 EXPORT_SYMBOL(hnae_put_handle
);
392 static void hnae_release(struct device
*dev
)
397 * hnae_ae_register - register a AE engine to hnae framework
398 * @hdev: the hnae ae engine device
399 * @owner: the module who provides this dev
400 * NOTE: the duplicated name will not be checked
402 int hnae_ae_register(struct hnae_ae_dev
*hdev
, struct module
*owner
)
404 static atomic_t id
= ATOMIC_INIT(-1);
410 if (!hdev
->ops
|| !hdev
->ops
->get_handle
||
411 !hdev
->ops
->toggle_ring_irq
||
412 !hdev
->ops
->get_status
|| !hdev
->ops
->adjust_link
)
416 hdev
->id
= (int)atomic_inc_return(&id
);
417 hdev
->cls_dev
.parent
= hdev
->dev
;
418 hdev
->cls_dev
.class = hnae_class
;
419 hdev
->cls_dev
.release
= hnae_release
;
420 (void)dev_set_name(&hdev
->cls_dev
, "hnae%d", hdev
->id
);
421 ret
= device_register(&hdev
->cls_dev
);
425 __module_get(THIS_MODULE
);
427 INIT_LIST_HEAD(&hdev
->handle_list
);
428 spin_lock_init(&hdev
->lock
);
430 ret
= raw_notifier_call_chain(&ae_chain
, HNAE_AE_REGISTER
, NULL
);
433 "has not notifier for AE: %s\n", hdev
->name
);
437 EXPORT_SYMBOL(hnae_ae_register
);
440 * hnae_ae_unregister - unregisters a HNAE AE engine
441 * @cdev: the device to unregister
443 void hnae_ae_unregister(struct hnae_ae_dev
*hdev
)
445 device_unregister(&hdev
->cls_dev
);
446 module_put(THIS_MODULE
);
448 EXPORT_SYMBOL(hnae_ae_unregister
);
450 static int __init
hnae_init(void)
452 hnae_class
= class_create(THIS_MODULE
, "hnae");
453 return PTR_ERR_OR_ZERO(hnae_class
);
456 static void __exit
hnae_exit(void)
458 class_destroy(hnae_class
);
461 subsys_initcall(hnae_init
);
462 module_exit(hnae_exit
);
464 MODULE_AUTHOR("Hisilicon, Inc.");
465 MODULE_LICENSE("GPL");
466 MODULE_DESCRIPTION("Hisilicon Network Acceleration Engine Framework");
468 /* vi: set tw=78 noet: */