2 * Copyright (c) 2014-2015 Hisilicon Limited.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
10 #include <linux/dma-mapping.h>
11 #include <linux/interrupt.h>
12 #include <linux/skbuff.h>
13 #include <linux/slab.h>
17 #define cls_to_ae_dev(dev) container_of(dev, struct hnae_ae_dev, cls_dev)
19 static struct class *hnae_class
;
22 hnae_list_add(spinlock_t
*lock
, struct list_head
*node
, struct list_head
*head
)
26 spin_lock_irqsave(lock
, flags
);
27 list_add_tail_rcu(node
, head
);
28 spin_unlock_irqrestore(lock
, flags
);
31 static void hnae_list_del(spinlock_t
*lock
, struct list_head
*node
)
35 spin_lock_irqsave(lock
, flags
);
37 spin_unlock_irqrestore(lock
, flags
);
40 static int hnae_alloc_buffer(struct hnae_ring
*ring
, struct hnae_desc_cb
*cb
)
42 unsigned int order
= hnae_page_order(ring
);
43 struct page
*p
= dev_alloc_pages(order
);
51 cb
->buf
= page_address(p
);
52 cb
->length
= hnae_page_size(ring
);
53 cb
->type
= DESC_TYPE_PAGE
;
58 static void hnae_free_buffer(struct hnae_ring
*ring
, struct hnae_desc_cb
*cb
)
60 if (cb
->type
== DESC_TYPE_SKB
)
61 dev_kfree_skb_any((struct sk_buff
*)cb
->priv
);
62 else if (unlikely(is_rx_ring(ring
)))
63 put_page((struct page
*)cb
->priv
);
64 memset(cb
, 0, sizeof(*cb
));
67 static int hnae_map_buffer(struct hnae_ring
*ring
, struct hnae_desc_cb
*cb
)
69 cb
->dma
= dma_map_page(ring_to_dev(ring
), cb
->priv
, 0,
70 cb
->length
, ring_to_dma_dir(ring
));
72 if (dma_mapping_error(ring_to_dev(ring
), cb
->dma
))
78 static void hnae_unmap_buffer(struct hnae_ring
*ring
, struct hnae_desc_cb
*cb
)
80 if (cb
->type
== DESC_TYPE_SKB
)
81 dma_unmap_single(ring_to_dev(ring
), cb
->dma
, cb
->length
,
82 ring_to_dma_dir(ring
));
84 dma_unmap_page(ring_to_dev(ring
), cb
->dma
, cb
->length
,
85 ring_to_dma_dir(ring
));
88 static struct hnae_buf_ops hnae_bops
= {
89 .alloc_buffer
= hnae_alloc_buffer
,
90 .free_buffer
= hnae_free_buffer
,
91 .map_buffer
= hnae_map_buffer
,
92 .unmap_buffer
= hnae_unmap_buffer
,
95 static int __ae_match(struct device
*dev
, const void *data
)
97 struct hnae_ae_dev
*hdev
= cls_to_ae_dev(dev
);
99 if (dev_of_node(hdev
->dev
))
100 return (data
== &hdev
->dev
->of_node
->fwnode
);
101 else if (is_acpi_node(hdev
->dev
->fwnode
))
102 return (data
== hdev
->dev
->fwnode
);
104 dev_err(dev
, "__ae_match cannot read cfg data from OF or acpi\n");
108 static struct hnae_ae_dev
*find_ae(const struct fwnode_handle
*fwnode
)
114 dev
= class_find_device(hnae_class
, NULL
, fwnode
, __ae_match
);
116 return dev
? cls_to_ae_dev(dev
) : NULL
;
119 static void hnae_free_buffers(struct hnae_ring
*ring
)
123 for (i
= 0; i
< ring
->desc_num
; i
++)
124 hnae_free_buffer_detach(ring
, i
);
127 /* Allocate memory for raw pkg, and map with dma */
128 static int hnae_alloc_buffers(struct hnae_ring
*ring
)
132 for (i
= 0; i
< ring
->desc_num
; i
++) {
133 ret
= hnae_alloc_buffer_attach(ring
, i
);
135 goto out_buffer_fail
;
141 for (j
= i
- 1; j
>= 0; j
--)
142 hnae_free_buffer_detach(ring
, j
);
146 /* free desc along with its attached buffer */
147 static void hnae_free_desc(struct hnae_ring
*ring
)
149 hnae_free_buffers(ring
);
150 dma_unmap_single(ring_to_dev(ring
), ring
->desc_dma_addr
,
151 ring
->desc_num
* sizeof(ring
->desc
[0]),
152 ring_to_dma_dir(ring
));
153 ring
->desc_dma_addr
= 0;
158 /* alloc desc, without buffer attached */
159 static int hnae_alloc_desc(struct hnae_ring
*ring
)
161 int size
= ring
->desc_num
* sizeof(ring
->desc
[0]);
163 ring
->desc
= kzalloc(size
, GFP_KERNEL
);
167 ring
->desc_dma_addr
= dma_map_single(ring_to_dev(ring
),
168 ring
->desc
, size
, ring_to_dma_dir(ring
));
169 if (dma_mapping_error(ring_to_dev(ring
), ring
->desc_dma_addr
)) {
170 ring
->desc_dma_addr
= 0;
179 /* fini ring, also free the buffer for the ring */
180 static void hnae_fini_ring(struct hnae_ring
*ring
)
182 hnae_free_desc(ring
);
183 kfree(ring
->desc_cb
);
184 ring
->desc_cb
= NULL
;
185 ring
->next_to_clean
= 0;
186 ring
->next_to_use
= 0;
189 /* init ring, and with buffer for rx ring */
191 hnae_init_ring(struct hnae_queue
*q
, struct hnae_ring
*ring
, int flags
)
195 if (ring
->desc_num
<= 0 || ring
->buf_size
<= 0)
200 assert(!ring
->desc
&& !ring
->desc_cb
&& !ring
->desc_dma_addr
);
202 /* not matter for tx or rx ring, the ntc and ntc start from 0 */
203 assert(ring
->next_to_use
== 0);
204 assert(ring
->next_to_clean
== 0);
206 ring
->desc_cb
= kcalloc(ring
->desc_num
, sizeof(ring
->desc_cb
[0]),
208 if (!ring
->desc_cb
) {
213 ret
= hnae_alloc_desc(ring
);
215 goto out_with_desc_cb
;
217 if (is_rx_ring(ring
)) {
218 ret
= hnae_alloc_buffers(ring
);
226 hnae_free_desc(ring
);
228 kfree(ring
->desc_cb
);
229 ring
->desc_cb
= NULL
;
234 static int hnae_init_queue(struct hnae_handle
*h
, struct hnae_queue
*q
,
235 struct hnae_ae_dev
*dev
)
242 ret
= hnae_init_ring(q
, &q
->tx_ring
, q
->tx_ring
.flags
| RINGF_DIR
);
246 ret
= hnae_init_ring(q
, &q
->rx_ring
, q
->rx_ring
.flags
& ~RINGF_DIR
);
248 goto out_with_tx_ring
;
250 if (dev
->ops
->init_queue
)
251 dev
->ops
->init_queue(q
);
256 hnae_fini_ring(&q
->tx_ring
);
261 static void hnae_fini_queue(struct hnae_queue
*q
)
263 if (q
->dev
->ops
->fini_queue
)
264 q
->dev
->ops
->fini_queue(q
);
266 hnae_fini_ring(&q
->tx_ring
);
267 hnae_fini_ring(&q
->rx_ring
);
271 * ae_chain - define ae chain head
273 static RAW_NOTIFIER_HEAD(ae_chain
);
275 int hnae_register_notifier(struct notifier_block
*nb
)
277 return raw_notifier_chain_register(&ae_chain
, nb
);
279 EXPORT_SYMBOL(hnae_register_notifier
);
281 void hnae_unregister_notifier(struct notifier_block
*nb
)
283 if (raw_notifier_chain_unregister(&ae_chain
, nb
))
284 dev_err(NULL
, "notifier chain unregister fail\n");
286 EXPORT_SYMBOL(hnae_unregister_notifier
);
288 int hnae_reinit_handle(struct hnae_handle
*handle
)
293 for (i
= 0; i
< handle
->q_num
; i
++) /* free ring*/
294 hnae_fini_queue(handle
->qs
[i
]);
296 if (handle
->dev
->ops
->reset
)
297 handle
->dev
->ops
->reset(handle
);
299 for (i
= 0; i
< handle
->q_num
; i
++) {/* reinit ring*/
300 ret
= hnae_init_queue(handle
, handle
->qs
[i
], handle
->dev
);
302 goto out_when_init_queue
;
306 for (j
= i
- 1; j
>= 0; j
--)
307 hnae_fini_queue(handle
->qs
[j
]);
310 EXPORT_SYMBOL(hnae_reinit_handle
);
312 /* hnae_get_handle - get a handle from the AE
313 * @owner_dev: the dev use this handle
314 * @ae_id: the id of the ae to be used
315 * @ae_opts: the options set for the handle
316 * @bops: the callbacks for buffer management
318 * return handle ptr or ERR_PTR
320 struct hnae_handle
*hnae_get_handle(struct device
*owner_dev
,
321 const struct fwnode_handle
*fwnode
,
323 struct hnae_buf_ops
*bops
)
325 struct hnae_ae_dev
*dev
;
326 struct hnae_handle
*handle
;
330 dev
= find_ae(fwnode
);
332 return ERR_PTR(-ENODEV
);
334 handle
= dev
->ops
->get_handle(dev
, port_id
);
335 if (IS_ERR(handle
)) {
336 put_device(&dev
->cls_dev
);
341 handle
->owner_dev
= owner_dev
;
342 handle
->bops
= bops
? bops
: &hnae_bops
;
343 handle
->eport_id
= port_id
;
345 for (i
= 0; i
< handle
->q_num
; i
++) {
346 ret
= hnae_init_queue(handle
, handle
->qs
[i
], dev
);
348 goto out_when_init_queue
;
351 __module_get(dev
->owner
);
353 hnae_list_add(&dev
->lock
, &handle
->node
, &dev
->handle_list
);
358 for (j
= i
- 1; j
>= 0; j
--)
359 hnae_fini_queue(handle
->qs
[j
]);
361 put_device(&dev
->cls_dev
);
363 return ERR_PTR(-ENOMEM
);
365 EXPORT_SYMBOL(hnae_get_handle
);
367 void hnae_put_handle(struct hnae_handle
*h
)
369 struct hnae_ae_dev
*dev
= h
->dev
;
372 for (i
= 0; i
< h
->q_num
; i
++)
373 hnae_fini_queue(h
->qs
[i
]);
375 if (h
->dev
->ops
->reset
)
376 h
->dev
->ops
->reset(h
);
378 hnae_list_del(&dev
->lock
, &h
->node
);
380 if (dev
->ops
->put_handle
)
381 dev
->ops
->put_handle(h
);
383 module_put(dev
->owner
);
385 put_device(&dev
->cls_dev
);
387 EXPORT_SYMBOL(hnae_put_handle
);
389 static void hnae_release(struct device
*dev
)
394 * hnae_ae_register - register a AE engine to hnae framework
395 * @hdev: the hnae ae engine device
396 * @owner: the module who provides this dev
397 * NOTE: the duplicated name will not be checked
399 int hnae_ae_register(struct hnae_ae_dev
*hdev
, struct module
*owner
)
401 static atomic_t id
= ATOMIC_INIT(-1);
407 if (!hdev
->ops
|| !hdev
->ops
->get_handle
||
408 !hdev
->ops
->toggle_ring_irq
||
409 !hdev
->ops
->get_status
|| !hdev
->ops
->adjust_link
)
413 hdev
->id
= (int)atomic_inc_return(&id
);
414 hdev
->cls_dev
.parent
= hdev
->dev
;
415 hdev
->cls_dev
.class = hnae_class
;
416 hdev
->cls_dev
.release
= hnae_release
;
417 (void)dev_set_name(&hdev
->cls_dev
, "hnae%d", hdev
->id
);
418 ret
= device_register(&hdev
->cls_dev
);
422 __module_get(THIS_MODULE
);
424 INIT_LIST_HEAD(&hdev
->handle_list
);
425 spin_lock_init(&hdev
->lock
);
427 ret
= raw_notifier_call_chain(&ae_chain
, HNAE_AE_REGISTER
, NULL
);
430 "has not notifier for AE: %s\n", hdev
->name
);
434 EXPORT_SYMBOL(hnae_ae_register
);
437 * hnae_ae_unregister - unregisters a HNAE AE engine
438 * @cdev: the device to unregister
440 void hnae_ae_unregister(struct hnae_ae_dev
*hdev
)
442 device_unregister(&hdev
->cls_dev
);
443 module_put(THIS_MODULE
);
445 EXPORT_SYMBOL(hnae_ae_unregister
);
447 static int __init
hnae_init(void)
449 hnae_class
= class_create(THIS_MODULE
, "hnae");
450 return PTR_ERR_OR_ZERO(hnae_class
);
453 static void __exit
hnae_exit(void)
455 class_destroy(hnae_class
);
458 subsys_initcall(hnae_init
);
459 module_exit(hnae_exit
);
461 MODULE_AUTHOR("Hisilicon, Inc.");
462 MODULE_LICENSE("GPL");
463 MODULE_DESCRIPTION("Hisilicon Network Acceleration Engine Framework");
465 /* vi: set tw=78 noet: */