1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Copyright (c) 2014-2015 Hisilicon Limited.
6 #include <linux/dma-mapping.h>
7 #include <linux/interrupt.h>
9 #include <linux/skbuff.h>
10 #include <linux/slab.h>
13 #define cls_to_ae_dev(dev) container_of(dev, struct hnae_ae_dev, cls_dev)
15 static const struct class hnae_class
= {
20 hnae_list_add(spinlock_t
*lock
, struct list_head
*node
, struct list_head
*head
)
24 spin_lock_irqsave(lock
, flags
);
25 list_add_tail_rcu(node
, head
);
26 spin_unlock_irqrestore(lock
, flags
);
29 static void hnae_list_del(spinlock_t
*lock
, struct list_head
*node
)
33 spin_lock_irqsave(lock
, flags
);
35 spin_unlock_irqrestore(lock
, flags
);
38 static int hnae_alloc_buffer(struct hnae_ring
*ring
, struct hnae_desc_cb
*cb
)
40 unsigned int order
= hnae_page_order(ring
);
41 struct page
*p
= dev_alloc_pages(order
);
49 cb
->buf
= page_address(p
);
50 cb
->length
= hnae_page_size(ring
);
51 cb
->type
= DESC_TYPE_PAGE
;
56 static void hnae_free_buffer(struct hnae_ring
*ring
, struct hnae_desc_cb
*cb
)
58 if (unlikely(!cb
->priv
))
61 if (cb
->type
== DESC_TYPE_SKB
)
62 dev_kfree_skb_any((struct sk_buff
*)cb
->priv
);
63 else if (unlikely(is_rx_ring(ring
)))
64 put_page((struct page
*)cb
->priv
);
69 static int hnae_map_buffer(struct hnae_ring
*ring
, struct hnae_desc_cb
*cb
)
71 cb
->dma
= dma_map_page(ring_to_dev(ring
), cb
->priv
, 0,
72 cb
->length
, ring_to_dma_dir(ring
));
74 if (dma_mapping_error(ring_to_dev(ring
), cb
->dma
))
80 static void hnae_unmap_buffer(struct hnae_ring
*ring
, struct hnae_desc_cb
*cb
)
82 if (cb
->type
== DESC_TYPE_SKB
)
83 dma_unmap_single(ring_to_dev(ring
), cb
->dma
, cb
->length
,
84 ring_to_dma_dir(ring
));
86 dma_unmap_page(ring_to_dev(ring
), cb
->dma
, cb
->length
,
87 ring_to_dma_dir(ring
));
90 static struct hnae_buf_ops hnae_bops
= {
91 .alloc_buffer
= hnae_alloc_buffer
,
92 .free_buffer
= hnae_free_buffer
,
93 .map_buffer
= hnae_map_buffer
,
94 .unmap_buffer
= hnae_unmap_buffer
,
97 static int __ae_match(struct device
*dev
, const void *data
)
99 struct hnae_ae_dev
*hdev
= cls_to_ae_dev(dev
);
101 if (dev_of_node(hdev
->dev
))
102 return (data
== &hdev
->dev
->of_node
->fwnode
);
103 else if (is_acpi_node(hdev
->dev
->fwnode
))
104 return (data
== hdev
->dev
->fwnode
);
106 dev_err(dev
, "__ae_match cannot read cfg data from OF or acpi\n");
110 static struct hnae_ae_dev
*find_ae(const struct fwnode_handle
*fwnode
)
116 dev
= class_find_device(&hnae_class
, NULL
, fwnode
, __ae_match
);
118 return dev
? cls_to_ae_dev(dev
) : NULL
;
121 static void hnae_free_buffers(struct hnae_ring
*ring
)
125 for (i
= 0; i
< ring
->desc_num
; i
++)
126 hnae_free_buffer_detach(ring
, i
);
129 /* Allocate memory for raw pkg, and map with dma */
130 static int hnae_alloc_buffers(struct hnae_ring
*ring
)
134 for (i
= 0; i
< ring
->desc_num
; i
++) {
135 ret
= hnae_alloc_buffer_attach(ring
, i
);
137 goto out_buffer_fail
;
143 for (j
= i
- 1; j
>= 0; j
--)
144 hnae_free_buffer_detach(ring
, j
);
148 /* free desc along with its attached buffer */
149 static void hnae_free_desc(struct hnae_ring
*ring
)
151 dma_unmap_single(ring_to_dev(ring
), ring
->desc_dma_addr
,
152 ring
->desc_num
* sizeof(ring
->desc
[0]),
153 ring_to_dma_dir(ring
));
154 ring
->desc_dma_addr
= 0;
159 /* alloc desc, without buffer attached */
160 static int hnae_alloc_desc(struct hnae_ring
*ring
)
162 int size
= ring
->desc_num
* sizeof(ring
->desc
[0]);
164 ring
->desc
= kzalloc(size
, GFP_KERNEL
);
168 ring
->desc_dma_addr
= dma_map_single(ring_to_dev(ring
),
169 ring
->desc
, size
, ring_to_dma_dir(ring
));
170 if (dma_mapping_error(ring_to_dev(ring
), ring
->desc_dma_addr
)) {
171 ring
->desc_dma_addr
= 0;
180 /* fini ring, also free the buffer for the ring */
181 static void hnae_fini_ring(struct hnae_ring
*ring
)
183 if (is_rx_ring(ring
))
184 hnae_free_buffers(ring
);
186 hnae_free_desc(ring
);
187 kfree(ring
->desc_cb
);
188 ring
->desc_cb
= NULL
;
189 ring
->next_to_clean
= 0;
190 ring
->next_to_use
= 0;
193 /* init ring, and with buffer for rx ring */
195 hnae_init_ring(struct hnae_queue
*q
, struct hnae_ring
*ring
, int flags
)
199 if (ring
->desc_num
<= 0 || ring
->buf_size
<= 0)
204 ring
->coal_param
= q
->handle
->coal_param
;
205 assert(!ring
->desc
&& !ring
->desc_cb
&& !ring
->desc_dma_addr
);
207 /* not matter for tx or rx ring, the ntc and ntc start from 0 */
208 assert(ring
->next_to_use
== 0);
209 assert(ring
->next_to_clean
== 0);
211 ring
->desc_cb
= kcalloc(ring
->desc_num
, sizeof(ring
->desc_cb
[0]),
213 if (!ring
->desc_cb
) {
218 ret
= hnae_alloc_desc(ring
);
220 goto out_with_desc_cb
;
222 if (is_rx_ring(ring
)) {
223 ret
= hnae_alloc_buffers(ring
);
231 hnae_free_desc(ring
);
233 kfree(ring
->desc_cb
);
234 ring
->desc_cb
= NULL
;
239 static int hnae_init_queue(struct hnae_handle
*h
, struct hnae_queue
*q
,
240 struct hnae_ae_dev
*dev
)
247 ret
= hnae_init_ring(q
, &q
->tx_ring
, q
->tx_ring
.flags
| RINGF_DIR
);
251 ret
= hnae_init_ring(q
, &q
->rx_ring
, q
->rx_ring
.flags
& ~RINGF_DIR
);
253 goto out_with_tx_ring
;
255 if (dev
->ops
->init_queue
)
256 dev
->ops
->init_queue(q
);
261 hnae_fini_ring(&q
->tx_ring
);
266 static void hnae_fini_queue(struct hnae_queue
*q
)
268 if (q
->dev
->ops
->fini_queue
)
269 q
->dev
->ops
->fini_queue(q
);
271 hnae_fini_ring(&q
->tx_ring
);
272 hnae_fini_ring(&q
->rx_ring
);
276 * ae_chain - define ae chain head
278 static RAW_NOTIFIER_HEAD(ae_chain
);
280 int hnae_register_notifier(struct notifier_block
*nb
)
282 return raw_notifier_chain_register(&ae_chain
, nb
);
284 EXPORT_SYMBOL(hnae_register_notifier
);
286 void hnae_unregister_notifier(struct notifier_block
*nb
)
288 if (raw_notifier_chain_unregister(&ae_chain
, nb
))
289 dev_err(NULL
, "notifier chain unregister fail\n");
291 EXPORT_SYMBOL(hnae_unregister_notifier
);
293 int hnae_reinit_handle(struct hnae_handle
*handle
)
298 for (i
= 0; i
< handle
->q_num
; i
++) /* free ring*/
299 hnae_fini_queue(handle
->qs
[i
]);
301 if (handle
->dev
->ops
->reset
)
302 handle
->dev
->ops
->reset(handle
);
304 for (i
= 0; i
< handle
->q_num
; i
++) {/* reinit ring*/
305 ret
= hnae_init_queue(handle
, handle
->qs
[i
], handle
->dev
);
307 goto out_when_init_queue
;
311 for (j
= i
- 1; j
>= 0; j
--)
312 hnae_fini_queue(handle
->qs
[j
]);
315 EXPORT_SYMBOL(hnae_reinit_handle
);
317 /* hnae_get_handle - get a handle from the AE
318 * @owner_dev: the dev use this handle
319 * @ae_id: the id of the ae to be used
320 * @ae_opts: the options set for the handle
321 * @bops: the callbacks for buffer management
323 * return handle ptr or ERR_PTR
325 struct hnae_handle
*hnae_get_handle(struct device
*owner_dev
,
326 const struct fwnode_handle
*fwnode
,
328 struct hnae_buf_ops
*bops
)
330 struct hnae_ae_dev
*dev
;
331 struct hnae_handle
*handle
;
335 dev
= find_ae(fwnode
);
337 return ERR_PTR(-ENODEV
);
339 handle
= dev
->ops
->get_handle(dev
, port_id
);
340 if (IS_ERR(handle
)) {
341 put_device(&dev
->cls_dev
);
346 handle
->owner_dev
= owner_dev
;
347 handle
->bops
= bops
? bops
: &hnae_bops
;
348 handle
->eport_id
= port_id
;
350 for (i
= 0; i
< handle
->q_num
; i
++) {
351 ret
= hnae_init_queue(handle
, handle
->qs
[i
], dev
);
353 goto out_when_init_queue
;
356 __module_get(dev
->owner
);
358 hnae_list_add(&dev
->lock
, &handle
->node
, &dev
->handle_list
);
363 for (j
= i
- 1; j
>= 0; j
--)
364 hnae_fini_queue(handle
->qs
[j
]);
366 put_device(&dev
->cls_dev
);
368 return ERR_PTR(-ENOMEM
);
370 EXPORT_SYMBOL(hnae_get_handle
);
372 void hnae_put_handle(struct hnae_handle
*h
)
374 struct hnae_ae_dev
*dev
= h
->dev
;
377 for (i
= 0; i
< h
->q_num
; i
++)
378 hnae_fini_queue(h
->qs
[i
]);
380 if (h
->dev
->ops
->reset
)
381 h
->dev
->ops
->reset(h
);
383 hnae_list_del(&dev
->lock
, &h
->node
);
385 if (dev
->ops
->put_handle
)
386 dev
->ops
->put_handle(h
);
388 module_put(dev
->owner
);
390 put_device(&dev
->cls_dev
);
392 EXPORT_SYMBOL(hnae_put_handle
);
394 static void hnae_release(struct device
*dev
)
399 * hnae_ae_register - register a AE engine to hnae framework
400 * @hdev: the hnae ae engine device
401 * @owner: the module who provides this dev
402 * NOTE: the duplicated name will not be checked
404 int hnae_ae_register(struct hnae_ae_dev
*hdev
, struct module
*owner
)
406 static atomic_t id
= ATOMIC_INIT(-1);
412 if (!hdev
->ops
|| !hdev
->ops
->get_handle
||
413 !hdev
->ops
->toggle_ring_irq
||
414 !hdev
->ops
->get_status
|| !hdev
->ops
->adjust_link
)
418 hdev
->id
= (int)atomic_inc_return(&id
);
419 hdev
->cls_dev
.parent
= hdev
->dev
;
420 hdev
->cls_dev
.class = &hnae_class
;
421 hdev
->cls_dev
.release
= hnae_release
;
422 (void)dev_set_name(&hdev
->cls_dev
, "hnae%d", hdev
->id
);
423 ret
= device_register(&hdev
->cls_dev
);
425 put_device(&hdev
->cls_dev
);
429 INIT_LIST_HEAD(&hdev
->handle_list
);
430 spin_lock_init(&hdev
->lock
);
432 ret
= raw_notifier_call_chain(&ae_chain
, HNAE_AE_REGISTER
, NULL
);
435 "has not notifier for AE: %s\n", hdev
->name
);
439 EXPORT_SYMBOL(hnae_ae_register
);
442 * hnae_ae_unregister - unregisters a HNAE AE engine
443 * @hdev: the device to unregister
445 void hnae_ae_unregister(struct hnae_ae_dev
*hdev
)
447 device_unregister(&hdev
->cls_dev
);
449 EXPORT_SYMBOL(hnae_ae_unregister
);
451 static int __init
hnae_init(void)
453 return class_register(&hnae_class
);
456 static void __exit
hnae_exit(void)
458 class_unregister(&hnae_class
);
461 subsys_initcall(hnae_init
);
462 module_exit(hnae_exit
);
464 MODULE_AUTHOR("Hisilicon, Inc.");
465 MODULE_LICENSE("GPL");
466 MODULE_DESCRIPTION("Hisilicon Network Acceleration Engine Framework");
468 /* vi: set tw=78 noet: */