2 * Copyright (c) 2014-2015 Hisilicon Limited.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
10 #include <linux/dma-mapping.h>
11 #include <linux/interrupt.h>
12 #include <linux/skbuff.h>
13 #include <linux/slab.h>
17 #define cls_to_ae_dev(dev) container_of(dev, struct hnae_ae_dev, cls_dev)
19 static struct class *hnae_class
;
22 hnae_list_add(spinlock_t
*lock
, struct list_head
*node
, struct list_head
*head
)
26 spin_lock_irqsave(lock
, flags
);
27 list_add_tail_rcu(node
, head
);
28 spin_unlock_irqrestore(lock
, flags
);
31 static void hnae_list_del(spinlock_t
*lock
, struct list_head
*node
)
35 spin_lock_irqsave(lock
, flags
);
37 spin_unlock_irqrestore(lock
, flags
);
40 static int hnae_alloc_buffer(struct hnae_ring
*ring
, struct hnae_desc_cb
*cb
)
42 unsigned int order
= hnae_page_order(ring
);
43 struct page
*p
= dev_alloc_pages(order
);
51 cb
->buf
= page_address(p
);
52 cb
->length
= hnae_page_size(ring
);
53 cb
->type
= DESC_TYPE_PAGE
;
58 static void hnae_free_buffer(struct hnae_ring
*ring
, struct hnae_desc_cb
*cb
)
60 if (cb
->type
== DESC_TYPE_SKB
)
61 dev_kfree_skb_any((struct sk_buff
*)cb
->priv
);
62 else if (unlikely(is_rx_ring(ring
)))
63 put_page((struct page
*)cb
->priv
);
64 memset(cb
, 0, sizeof(*cb
));
67 static int hnae_map_buffer(struct hnae_ring
*ring
, struct hnae_desc_cb
*cb
)
69 cb
->dma
= dma_map_page(ring_to_dev(ring
), cb
->priv
, 0,
70 cb
->length
, ring_to_dma_dir(ring
));
72 if (dma_mapping_error(ring_to_dev(ring
), cb
->dma
))
78 static void hnae_unmap_buffer(struct hnae_ring
*ring
, struct hnae_desc_cb
*cb
)
80 if (cb
->type
== DESC_TYPE_SKB
)
81 dma_unmap_single(ring_to_dev(ring
), cb
->dma
, cb
->length
,
82 ring_to_dma_dir(ring
));
84 dma_unmap_page(ring_to_dev(ring
), cb
->dma
, cb
->length
,
85 ring_to_dma_dir(ring
));
88 static struct hnae_buf_ops hnae_bops
= {
89 .alloc_buffer
= hnae_alloc_buffer
,
90 .free_buffer
= hnae_free_buffer
,
91 .map_buffer
= hnae_map_buffer
,
92 .unmap_buffer
= hnae_unmap_buffer
,
95 static int __ae_match(struct device
*dev
, const void *data
)
97 struct hnae_ae_dev
*hdev
= cls_to_ae_dev(dev
);
99 if (dev_of_node(hdev
->dev
))
100 return (data
== &hdev
->dev
->of_node
->fwnode
);
101 else if (is_acpi_node(hdev
->dev
->fwnode
))
102 return (data
== hdev
->dev
->fwnode
);
104 dev_err(dev
, "__ae_match cannot read cfg data from OF or acpi\n");
108 static struct hnae_ae_dev
*find_ae(const struct fwnode_handle
*fwnode
)
114 dev
= class_find_device(hnae_class
, NULL
, fwnode
, __ae_match
);
116 return dev
? cls_to_ae_dev(dev
) : NULL
;
119 static void hnae_free_buffers(struct hnae_ring
*ring
)
123 for (i
= 0; i
< ring
->desc_num
; i
++)
124 hnae_free_buffer_detach(ring
, i
);
127 /* Allocate memory for raw pkg, and map with dma */
128 static int hnae_alloc_buffers(struct hnae_ring
*ring
)
132 for (i
= 0; i
< ring
->desc_num
; i
++) {
133 ret
= hnae_alloc_buffer_attach(ring
, i
);
135 goto out_buffer_fail
;
141 for (j
= i
- 1; j
>= 0; j
--)
142 hnae_free_buffer_detach(ring
, j
);
146 /* free desc along with its attached buffer */
147 static void hnae_free_desc(struct hnae_ring
*ring
)
149 dma_unmap_single(ring_to_dev(ring
), ring
->desc_dma_addr
,
150 ring
->desc_num
* sizeof(ring
->desc
[0]),
151 ring_to_dma_dir(ring
));
152 ring
->desc_dma_addr
= 0;
157 /* alloc desc, without buffer attached */
158 static int hnae_alloc_desc(struct hnae_ring
*ring
)
160 int size
= ring
->desc_num
* sizeof(ring
->desc
[0]);
162 ring
->desc
= kzalloc(size
, GFP_KERNEL
);
166 ring
->desc_dma_addr
= dma_map_single(ring_to_dev(ring
),
167 ring
->desc
, size
, ring_to_dma_dir(ring
));
168 if (dma_mapping_error(ring_to_dev(ring
), ring
->desc_dma_addr
)) {
169 ring
->desc_dma_addr
= 0;
178 /* fini ring, also free the buffer for the ring */
179 static void hnae_fini_ring(struct hnae_ring
*ring
)
181 if (is_rx_ring(ring
))
182 hnae_free_buffers(ring
);
184 hnae_free_desc(ring
);
185 kfree(ring
->desc_cb
);
186 ring
->desc_cb
= NULL
;
187 ring
->next_to_clean
= 0;
188 ring
->next_to_use
= 0;
191 /* init ring, and with buffer for rx ring */
193 hnae_init_ring(struct hnae_queue
*q
, struct hnae_ring
*ring
, int flags
)
197 if (ring
->desc_num
<= 0 || ring
->buf_size
<= 0)
202 assert(!ring
->desc
&& !ring
->desc_cb
&& !ring
->desc_dma_addr
);
204 /* not matter for tx or rx ring, the ntc and ntc start from 0 */
205 assert(ring
->next_to_use
== 0);
206 assert(ring
->next_to_clean
== 0);
208 ring
->desc_cb
= kcalloc(ring
->desc_num
, sizeof(ring
->desc_cb
[0]),
210 if (!ring
->desc_cb
) {
215 ret
= hnae_alloc_desc(ring
);
217 goto out_with_desc_cb
;
219 if (is_rx_ring(ring
)) {
220 ret
= hnae_alloc_buffers(ring
);
228 hnae_free_desc(ring
);
230 kfree(ring
->desc_cb
);
231 ring
->desc_cb
= NULL
;
236 static int hnae_init_queue(struct hnae_handle
*h
, struct hnae_queue
*q
,
237 struct hnae_ae_dev
*dev
)
244 ret
= hnae_init_ring(q
, &q
->tx_ring
, q
->tx_ring
.flags
| RINGF_DIR
);
248 ret
= hnae_init_ring(q
, &q
->rx_ring
, q
->rx_ring
.flags
& ~RINGF_DIR
);
250 goto out_with_tx_ring
;
252 if (dev
->ops
->init_queue
)
253 dev
->ops
->init_queue(q
);
258 hnae_fini_ring(&q
->tx_ring
);
263 static void hnae_fini_queue(struct hnae_queue
*q
)
265 if (q
->dev
->ops
->fini_queue
)
266 q
->dev
->ops
->fini_queue(q
);
268 hnae_fini_ring(&q
->tx_ring
);
269 hnae_fini_ring(&q
->rx_ring
);
273 * ae_chain - define ae chain head
275 static RAW_NOTIFIER_HEAD(ae_chain
);
277 int hnae_register_notifier(struct notifier_block
*nb
)
279 return raw_notifier_chain_register(&ae_chain
, nb
);
281 EXPORT_SYMBOL(hnae_register_notifier
);
283 void hnae_unregister_notifier(struct notifier_block
*nb
)
285 if (raw_notifier_chain_unregister(&ae_chain
, nb
))
286 dev_err(NULL
, "notifier chain unregister fail\n");
288 EXPORT_SYMBOL(hnae_unregister_notifier
);
290 int hnae_reinit_handle(struct hnae_handle
*handle
)
295 for (i
= 0; i
< handle
->q_num
; i
++) /* free ring*/
296 hnae_fini_queue(handle
->qs
[i
]);
298 if (handle
->dev
->ops
->reset
)
299 handle
->dev
->ops
->reset(handle
);
301 for (i
= 0; i
< handle
->q_num
; i
++) {/* reinit ring*/
302 ret
= hnae_init_queue(handle
, handle
->qs
[i
], handle
->dev
);
304 goto out_when_init_queue
;
308 for (j
= i
- 1; j
>= 0; j
--)
309 hnae_fini_queue(handle
->qs
[j
]);
312 EXPORT_SYMBOL(hnae_reinit_handle
);
314 /* hnae_get_handle - get a handle from the AE
315 * @owner_dev: the dev use this handle
316 * @ae_id: the id of the ae to be used
317 * @ae_opts: the options set for the handle
318 * @bops: the callbacks for buffer management
320 * return handle ptr or ERR_PTR
322 struct hnae_handle
*hnae_get_handle(struct device
*owner_dev
,
323 const struct fwnode_handle
*fwnode
,
325 struct hnae_buf_ops
*bops
)
327 struct hnae_ae_dev
*dev
;
328 struct hnae_handle
*handle
;
332 dev
= find_ae(fwnode
);
334 return ERR_PTR(-ENODEV
);
336 handle
= dev
->ops
->get_handle(dev
, port_id
);
337 if (IS_ERR(handle
)) {
338 put_device(&dev
->cls_dev
);
343 handle
->owner_dev
= owner_dev
;
344 handle
->bops
= bops
? bops
: &hnae_bops
;
345 handle
->eport_id
= port_id
;
347 for (i
= 0; i
< handle
->q_num
; i
++) {
348 ret
= hnae_init_queue(handle
, handle
->qs
[i
], dev
);
350 goto out_when_init_queue
;
353 __module_get(dev
->owner
);
355 hnae_list_add(&dev
->lock
, &handle
->node
, &dev
->handle_list
);
360 for (j
= i
- 1; j
>= 0; j
--)
361 hnae_fini_queue(handle
->qs
[j
]);
363 put_device(&dev
->cls_dev
);
365 return ERR_PTR(-ENOMEM
);
367 EXPORT_SYMBOL(hnae_get_handle
);
369 void hnae_put_handle(struct hnae_handle
*h
)
371 struct hnae_ae_dev
*dev
= h
->dev
;
374 for (i
= 0; i
< h
->q_num
; i
++)
375 hnae_fini_queue(h
->qs
[i
]);
377 if (h
->dev
->ops
->reset
)
378 h
->dev
->ops
->reset(h
);
380 hnae_list_del(&dev
->lock
, &h
->node
);
382 if (dev
->ops
->put_handle
)
383 dev
->ops
->put_handle(h
);
385 module_put(dev
->owner
);
387 put_device(&dev
->cls_dev
);
389 EXPORT_SYMBOL(hnae_put_handle
);
391 static void hnae_release(struct device
*dev
)
396 * hnae_ae_register - register a AE engine to hnae framework
397 * @hdev: the hnae ae engine device
398 * @owner: the module who provides this dev
399 * NOTE: the duplicated name will not be checked
401 int hnae_ae_register(struct hnae_ae_dev
*hdev
, struct module
*owner
)
403 static atomic_t id
= ATOMIC_INIT(-1);
409 if (!hdev
->ops
|| !hdev
->ops
->get_handle
||
410 !hdev
->ops
->toggle_ring_irq
||
411 !hdev
->ops
->get_status
|| !hdev
->ops
->adjust_link
)
415 hdev
->id
= (int)atomic_inc_return(&id
);
416 hdev
->cls_dev
.parent
= hdev
->dev
;
417 hdev
->cls_dev
.class = hnae_class
;
418 hdev
->cls_dev
.release
= hnae_release
;
419 (void)dev_set_name(&hdev
->cls_dev
, "hnae%d", hdev
->id
);
420 ret
= device_register(&hdev
->cls_dev
);
424 __module_get(THIS_MODULE
);
426 INIT_LIST_HEAD(&hdev
->handle_list
);
427 spin_lock_init(&hdev
->lock
);
429 ret
= raw_notifier_call_chain(&ae_chain
, HNAE_AE_REGISTER
, NULL
);
432 "has not notifier for AE: %s\n", hdev
->name
);
436 EXPORT_SYMBOL(hnae_ae_register
);
439 * hnae_ae_unregister - unregisters a HNAE AE engine
440 * @cdev: the device to unregister
442 void hnae_ae_unregister(struct hnae_ae_dev
*hdev
)
444 device_unregister(&hdev
->cls_dev
);
445 module_put(THIS_MODULE
);
447 EXPORT_SYMBOL(hnae_ae_unregister
);
449 static int __init
hnae_init(void)
451 hnae_class
= class_create(THIS_MODULE
, "hnae");
452 return PTR_ERR_OR_ZERO(hnae_class
);
455 static void __exit
hnae_exit(void)
457 class_destroy(hnae_class
);
460 subsys_initcall(hnae_init
);
461 module_exit(hnae_exit
);
463 MODULE_AUTHOR("Hisilicon, Inc.");
464 MODULE_LICENSE("GPL");
465 MODULE_DESCRIPTION("Hisilicon Network Acceleration Engine Framework");
467 /* vi: set tw=78 noet: */