watchdog: pic32-dmt: Remove .owner field for driver
[linux/fpc-iii.git] / drivers / crypto / qat / qat_common / adf_transport.c
blob57d2622728a57c638c080d5b1c613450081978bb
1 /*
2 This file is provided under a dual BSD/GPLv2 license. When using or
3 redistributing this file, you may do so under either license.
5 GPL LICENSE SUMMARY
6 Copyright(c) 2014 Intel Corporation.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of version 2 of the GNU General Public License as
9 published by the Free Software Foundation.
11 This program is distributed in the hope that it will be useful, but
12 WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 General Public License for more details.
16 Contact Information:
17 qat-linux@intel.com
19 BSD LICENSE
20 Copyright(c) 2014 Intel Corporation.
21 Redistribution and use in source and binary forms, with or without
22 modification, are permitted provided that the following conditions
23 are met:
25 * Redistributions of source code must retain the above copyright
26 notice, this list of conditions and the following disclaimer.
27 * Redistributions in binary form must reproduce the above copyright
28 notice, this list of conditions and the following disclaimer in
29 the documentation and/or other materials provided with the
30 distribution.
31 * Neither the name of Intel Corporation nor the names of its
32 contributors may be used to endorse or promote products derived
33 from this software without specific prior written permission.
35 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
36 "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
37 LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
38 A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
39 OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
40 SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
41 LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
42 DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
43 THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
44 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
45 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
47 #include <linux/delay.h>
48 #include "adf_accel_devices.h"
49 #include "adf_transport_internal.h"
50 #include "adf_transport_access_macros.h"
51 #include "adf_cfg.h"
52 #include "adf_common_drv.h"
54 static inline uint32_t adf_modulo(uint32_t data, uint32_t shift)
56 uint32_t div = data >> shift;
57 uint32_t mult = div << shift;
59 return data - mult;
62 static inline int adf_check_ring_alignment(uint64_t addr, uint64_t size)
64 if (((size - 1) & addr) != 0)
65 return -EFAULT;
66 return 0;
69 static int adf_verify_ring_size(uint32_t msg_size, uint32_t msg_num)
71 int i = ADF_MIN_RING_SIZE;
73 for (; i <= ADF_MAX_RING_SIZE; i++)
74 if ((msg_size * msg_num) == ADF_SIZE_TO_RING_SIZE_IN_BYTES(i))
75 return i;
77 return ADF_DEFAULT_RING_SIZE;
80 static int adf_reserve_ring(struct adf_etr_bank_data *bank, uint32_t ring)
82 spin_lock(&bank->lock);
83 if (bank->ring_mask & (1 << ring)) {
84 spin_unlock(&bank->lock);
85 return -EFAULT;
87 bank->ring_mask |= (1 << ring);
88 spin_unlock(&bank->lock);
89 return 0;
92 static void adf_unreserve_ring(struct adf_etr_bank_data *bank, uint32_t ring)
94 spin_lock(&bank->lock);
95 bank->ring_mask &= ~(1 << ring);
96 spin_unlock(&bank->lock);
99 static void adf_enable_ring_irq(struct adf_etr_bank_data *bank, uint32_t ring)
101 spin_lock_bh(&bank->lock);
102 bank->irq_mask |= (1 << ring);
103 spin_unlock_bh(&bank->lock);
104 WRITE_CSR_INT_COL_EN(bank->csr_addr, bank->bank_number, bank->irq_mask);
105 WRITE_CSR_INT_COL_CTL(bank->csr_addr, bank->bank_number,
106 bank->irq_coalesc_timer);
109 static void adf_disable_ring_irq(struct adf_etr_bank_data *bank, uint32_t ring)
111 spin_lock_bh(&bank->lock);
112 bank->irq_mask &= ~(1 << ring);
113 spin_unlock_bh(&bank->lock);
114 WRITE_CSR_INT_COL_EN(bank->csr_addr, bank->bank_number, bank->irq_mask);
117 int adf_send_message(struct adf_etr_ring_data *ring, uint32_t *msg)
119 if (atomic_add_return(1, ring->inflights) >
120 ADF_MAX_INFLIGHTS(ring->ring_size, ring->msg_size)) {
121 atomic_dec(ring->inflights);
122 return -EAGAIN;
124 spin_lock_bh(&ring->lock);
125 memcpy((void *)((uintptr_t)ring->base_addr + ring->tail), msg,
126 ADF_MSG_SIZE_TO_BYTES(ring->msg_size));
128 ring->tail = adf_modulo(ring->tail +
129 ADF_MSG_SIZE_TO_BYTES(ring->msg_size),
130 ADF_RING_SIZE_MODULO(ring->ring_size));
131 WRITE_CSR_RING_TAIL(ring->bank->csr_addr, ring->bank->bank_number,
132 ring->ring_number, ring->tail);
133 spin_unlock_bh(&ring->lock);
134 return 0;
137 static int adf_handle_response(struct adf_etr_ring_data *ring)
139 uint32_t msg_counter = 0;
140 uint32_t *msg = (uint32_t *)((uintptr_t)ring->base_addr + ring->head);
142 while (*msg != ADF_RING_EMPTY_SIG) {
143 ring->callback((uint32_t *)msg);
144 atomic_dec(ring->inflights);
145 *msg = ADF_RING_EMPTY_SIG;
146 ring->head = adf_modulo(ring->head +
147 ADF_MSG_SIZE_TO_BYTES(ring->msg_size),
148 ADF_RING_SIZE_MODULO(ring->ring_size));
149 msg_counter++;
150 msg = (uint32_t *)((uintptr_t)ring->base_addr + ring->head);
152 if (msg_counter > 0)
153 WRITE_CSR_RING_HEAD(ring->bank->csr_addr,
154 ring->bank->bank_number,
155 ring->ring_number, ring->head);
156 return 0;
159 static void adf_configure_tx_ring(struct adf_etr_ring_data *ring)
161 uint32_t ring_config = BUILD_RING_CONFIG(ring->ring_size);
163 WRITE_CSR_RING_CONFIG(ring->bank->csr_addr, ring->bank->bank_number,
164 ring->ring_number, ring_config);
167 static void adf_configure_rx_ring(struct adf_etr_ring_data *ring)
169 uint32_t ring_config =
170 BUILD_RESP_RING_CONFIG(ring->ring_size,
171 ADF_RING_NEAR_WATERMARK_512,
172 ADF_RING_NEAR_WATERMARK_0);
174 WRITE_CSR_RING_CONFIG(ring->bank->csr_addr, ring->bank->bank_number,
175 ring->ring_number, ring_config);
178 static int adf_init_ring(struct adf_etr_ring_data *ring)
180 struct adf_etr_bank_data *bank = ring->bank;
181 struct adf_accel_dev *accel_dev = bank->accel_dev;
182 struct adf_hw_device_data *hw_data = accel_dev->hw_device;
183 uint64_t ring_base;
184 uint32_t ring_size_bytes =
185 ADF_SIZE_TO_RING_SIZE_IN_BYTES(ring->ring_size);
187 ring_size_bytes = ADF_RING_SIZE_BYTES_MIN(ring_size_bytes);
188 ring->base_addr = dma_alloc_coherent(&GET_DEV(accel_dev),
189 ring_size_bytes, &ring->dma_addr,
190 GFP_KERNEL);
191 if (!ring->base_addr)
192 return -ENOMEM;
194 memset(ring->base_addr, 0x7F, ring_size_bytes);
195 /* The base_addr has to be aligned to the size of the buffer */
196 if (adf_check_ring_alignment(ring->dma_addr, ring_size_bytes)) {
197 dev_err(&GET_DEV(accel_dev), "Ring address not aligned\n");
198 dma_free_coherent(&GET_DEV(accel_dev), ring_size_bytes,
199 ring->base_addr, ring->dma_addr);
200 return -EFAULT;
203 if (hw_data->tx_rings_mask & (1 << ring->ring_number))
204 adf_configure_tx_ring(ring);
206 else
207 adf_configure_rx_ring(ring);
209 ring_base = BUILD_RING_BASE_ADDR(ring->dma_addr, ring->ring_size);
210 WRITE_CSR_RING_BASE(ring->bank->csr_addr, ring->bank->bank_number,
211 ring->ring_number, ring_base);
212 spin_lock_init(&ring->lock);
213 return 0;
216 static void adf_cleanup_ring(struct adf_etr_ring_data *ring)
218 uint32_t ring_size_bytes =
219 ADF_SIZE_TO_RING_SIZE_IN_BYTES(ring->ring_size);
220 ring_size_bytes = ADF_RING_SIZE_BYTES_MIN(ring_size_bytes);
222 if (ring->base_addr) {
223 memset(ring->base_addr, 0x7F, ring_size_bytes);
224 dma_free_coherent(&GET_DEV(ring->bank->accel_dev),
225 ring_size_bytes, ring->base_addr,
226 ring->dma_addr);
230 int adf_create_ring(struct adf_accel_dev *accel_dev, const char *section,
231 uint32_t bank_num, uint32_t num_msgs,
232 uint32_t msg_size, const char *ring_name,
233 adf_callback_fn callback, int poll_mode,
234 struct adf_etr_ring_data **ring_ptr)
236 struct adf_etr_data *transport_data = accel_dev->transport;
237 struct adf_etr_bank_data *bank;
238 struct adf_etr_ring_data *ring;
239 char val[ADF_CFG_MAX_VAL_LEN_IN_BYTES];
240 uint32_t ring_num;
241 int ret;
243 if (bank_num >= GET_MAX_BANKS(accel_dev)) {
244 dev_err(&GET_DEV(accel_dev), "Invalid bank number\n");
245 return -EFAULT;
247 if (msg_size > ADF_MSG_SIZE_TO_BYTES(ADF_MAX_MSG_SIZE)) {
248 dev_err(&GET_DEV(accel_dev), "Invalid msg size\n");
249 return -EFAULT;
251 if (ADF_MAX_INFLIGHTS(adf_verify_ring_size(msg_size, num_msgs),
252 ADF_BYTES_TO_MSG_SIZE(msg_size)) < 2) {
253 dev_err(&GET_DEV(accel_dev),
254 "Invalid ring size for given msg size\n");
255 return -EFAULT;
257 if (adf_cfg_get_param_value(accel_dev, section, ring_name, val)) {
258 dev_err(&GET_DEV(accel_dev), "Section %s, no such entry : %s\n",
259 section, ring_name);
260 return -EFAULT;
262 if (kstrtouint(val, 10, &ring_num)) {
263 dev_err(&GET_DEV(accel_dev), "Can't get ring number\n");
264 return -EFAULT;
266 if (ring_num >= ADF_ETR_MAX_RINGS_PER_BANK) {
267 dev_err(&GET_DEV(accel_dev), "Invalid ring number\n");
268 return -EFAULT;
271 bank = &transport_data->banks[bank_num];
272 if (adf_reserve_ring(bank, ring_num)) {
273 dev_err(&GET_DEV(accel_dev), "Ring %d, %s already exists.\n",
274 ring_num, ring_name);
275 return -EFAULT;
277 ring = &bank->rings[ring_num];
278 ring->ring_number = ring_num;
279 ring->bank = bank;
280 ring->callback = callback;
281 ring->msg_size = ADF_BYTES_TO_MSG_SIZE(msg_size);
282 ring->ring_size = adf_verify_ring_size(msg_size, num_msgs);
283 ring->head = 0;
284 ring->tail = 0;
285 atomic_set(ring->inflights, 0);
286 ret = adf_init_ring(ring);
287 if (ret)
288 goto err;
290 /* Enable HW arbitration for the given ring */
291 adf_update_ring_arb(ring);
293 if (adf_ring_debugfs_add(ring, ring_name)) {
294 dev_err(&GET_DEV(accel_dev),
295 "Couldn't add ring debugfs entry\n");
296 ret = -EFAULT;
297 goto err;
300 /* Enable interrupts if needed */
301 if (callback && (!poll_mode))
302 adf_enable_ring_irq(bank, ring->ring_number);
303 *ring_ptr = ring;
304 return 0;
305 err:
306 adf_cleanup_ring(ring);
307 adf_unreserve_ring(bank, ring_num);
308 adf_update_ring_arb(ring);
309 return ret;
312 void adf_remove_ring(struct adf_etr_ring_data *ring)
314 struct adf_etr_bank_data *bank = ring->bank;
316 /* Disable interrupts for the given ring */
317 adf_disable_ring_irq(bank, ring->ring_number);
319 /* Clear PCI config space */
320 WRITE_CSR_RING_CONFIG(bank->csr_addr, bank->bank_number,
321 ring->ring_number, 0);
322 WRITE_CSR_RING_BASE(bank->csr_addr, bank->bank_number,
323 ring->ring_number, 0);
324 adf_ring_debugfs_rm(ring);
325 adf_unreserve_ring(bank, ring->ring_number);
326 /* Disable HW arbitration for the given ring */
327 adf_update_ring_arb(ring);
328 adf_cleanup_ring(ring);
331 static void adf_ring_response_handler(struct adf_etr_bank_data *bank)
333 uint32_t empty_rings, i;
335 empty_rings = READ_CSR_E_STAT(bank->csr_addr, bank->bank_number);
336 empty_rings = ~empty_rings & bank->irq_mask;
338 for (i = 0; i < ADF_ETR_MAX_RINGS_PER_BANK; ++i) {
339 if (empty_rings & (1 << i))
340 adf_handle_response(&bank->rings[i]);
344 void adf_response_handler(uintptr_t bank_addr)
346 struct adf_etr_bank_data *bank = (void *)bank_addr;
348 /* Handle all the responses and reenable IRQs */
349 adf_ring_response_handler(bank);
350 WRITE_CSR_INT_FLAG_AND_COL(bank->csr_addr, bank->bank_number,
351 bank->irq_mask);
354 static inline int adf_get_cfg_int(struct adf_accel_dev *accel_dev,
355 const char *section, const char *format,
356 uint32_t key, uint32_t *value)
358 char key_buf[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
359 char val_buf[ADF_CFG_MAX_VAL_LEN_IN_BYTES];
361 snprintf(key_buf, ADF_CFG_MAX_KEY_LEN_IN_BYTES, format, key);
363 if (adf_cfg_get_param_value(accel_dev, section, key_buf, val_buf))
364 return -EFAULT;
366 if (kstrtouint(val_buf, 10, value))
367 return -EFAULT;
368 return 0;
371 static void adf_get_coalesc_timer(struct adf_etr_bank_data *bank,
372 const char *section,
373 uint32_t bank_num_in_accel)
375 if (adf_get_cfg_int(bank->accel_dev, section,
376 ADF_ETRMGR_COALESCE_TIMER_FORMAT,
377 bank_num_in_accel, &bank->irq_coalesc_timer))
378 bank->irq_coalesc_timer = ADF_COALESCING_DEF_TIME;
380 if (ADF_COALESCING_MAX_TIME < bank->irq_coalesc_timer ||
381 ADF_COALESCING_MIN_TIME > bank->irq_coalesc_timer)
382 bank->irq_coalesc_timer = ADF_COALESCING_DEF_TIME;
385 static int adf_init_bank(struct adf_accel_dev *accel_dev,
386 struct adf_etr_bank_data *bank,
387 uint32_t bank_num, void __iomem *csr_addr)
389 struct adf_hw_device_data *hw_data = accel_dev->hw_device;
390 struct adf_etr_ring_data *ring;
391 struct adf_etr_ring_data *tx_ring;
392 uint32_t i, coalesc_enabled = 0;
394 memset(bank, 0, sizeof(*bank));
395 bank->bank_number = bank_num;
396 bank->csr_addr = csr_addr;
397 bank->accel_dev = accel_dev;
398 spin_lock_init(&bank->lock);
400 /* Enable IRQ coalescing always. This will allow to use
401 * the optimised flag and coalesc register.
402 * If it is disabled in the config file just use min time value */
403 if ((adf_get_cfg_int(accel_dev, "Accelerator0",
404 ADF_ETRMGR_COALESCING_ENABLED_FORMAT, bank_num,
405 &coalesc_enabled) == 0) && coalesc_enabled)
406 adf_get_coalesc_timer(bank, "Accelerator0", bank_num);
407 else
408 bank->irq_coalesc_timer = ADF_COALESCING_MIN_TIME;
410 for (i = 0; i < ADF_ETR_MAX_RINGS_PER_BANK; i++) {
411 WRITE_CSR_RING_CONFIG(csr_addr, bank_num, i, 0);
412 WRITE_CSR_RING_BASE(csr_addr, bank_num, i, 0);
413 ring = &bank->rings[i];
414 if (hw_data->tx_rings_mask & (1 << i)) {
415 ring->inflights =
416 kzalloc_node(sizeof(atomic_t),
417 GFP_KERNEL,
418 dev_to_node(&GET_DEV(accel_dev)));
419 if (!ring->inflights)
420 goto err;
421 } else {
422 if (i < hw_data->tx_rx_gap) {
423 dev_err(&GET_DEV(accel_dev),
424 "Invalid tx rings mask config\n");
425 goto err;
427 tx_ring = &bank->rings[i - hw_data->tx_rx_gap];
428 ring->inflights = tx_ring->inflights;
431 if (adf_bank_debugfs_add(bank)) {
432 dev_err(&GET_DEV(accel_dev),
433 "Failed to add bank debugfs entry\n");
434 goto err;
437 WRITE_CSR_INT_FLAG(csr_addr, bank_num, ADF_BANK_INT_FLAG_CLEAR_MASK);
438 WRITE_CSR_INT_SRCSEL(csr_addr, bank_num);
439 return 0;
440 err:
441 for (i = 0; i < ADF_ETR_MAX_RINGS_PER_BANK; i++) {
442 ring = &bank->rings[i];
443 if (hw_data->tx_rings_mask & (1 << i))
444 kfree(ring->inflights);
446 return -ENOMEM;
450 * adf_init_etr_data() - Initialize transport rings for acceleration device
451 * @accel_dev: Pointer to acceleration device.
453 * Function is the initializes the communications channels (rings) to the
454 * acceleration device accel_dev.
455 * To be used by QAT device specific drivers.
457 * Return: 0 on success, error code otherwise.
459 int adf_init_etr_data(struct adf_accel_dev *accel_dev)
461 struct adf_etr_data *etr_data;
462 struct adf_hw_device_data *hw_data = accel_dev->hw_device;
463 void __iomem *csr_addr;
464 uint32_t size;
465 uint32_t num_banks = 0;
466 int i, ret;
468 etr_data = kzalloc_node(sizeof(*etr_data), GFP_KERNEL,
469 dev_to_node(&GET_DEV(accel_dev)));
470 if (!etr_data)
471 return -ENOMEM;
473 num_banks = GET_MAX_BANKS(accel_dev);
474 size = num_banks * sizeof(struct adf_etr_bank_data);
475 etr_data->banks = kzalloc_node(size, GFP_KERNEL,
476 dev_to_node(&GET_DEV(accel_dev)));
477 if (!etr_data->banks) {
478 ret = -ENOMEM;
479 goto err_bank;
482 accel_dev->transport = etr_data;
483 i = hw_data->get_etr_bar_id(hw_data);
484 csr_addr = accel_dev->accel_pci_dev.pci_bars[i].virt_addr;
486 /* accel_dev->debugfs_dir should always be non-NULL here */
487 etr_data->debug = debugfs_create_dir("transport",
488 accel_dev->debugfs_dir);
489 if (!etr_data->debug) {
490 dev_err(&GET_DEV(accel_dev),
491 "Unable to create transport debugfs entry\n");
492 ret = -ENOENT;
493 goto err_bank_debug;
496 for (i = 0; i < num_banks; i++) {
497 ret = adf_init_bank(accel_dev, &etr_data->banks[i], i,
498 csr_addr);
499 if (ret)
500 goto err_bank_all;
503 return 0;
505 err_bank_all:
506 debugfs_remove(etr_data->debug);
507 err_bank_debug:
508 kfree(etr_data->banks);
509 err_bank:
510 kfree(etr_data);
511 accel_dev->transport = NULL;
512 return ret;
514 EXPORT_SYMBOL_GPL(adf_init_etr_data);
516 static void cleanup_bank(struct adf_etr_bank_data *bank)
518 uint32_t i;
520 for (i = 0; i < ADF_ETR_MAX_RINGS_PER_BANK; i++) {
521 struct adf_accel_dev *accel_dev = bank->accel_dev;
522 struct adf_hw_device_data *hw_data = accel_dev->hw_device;
523 struct adf_etr_ring_data *ring = &bank->rings[i];
525 if (bank->ring_mask & (1 << i))
526 adf_cleanup_ring(ring);
528 if (hw_data->tx_rings_mask & (1 << i))
529 kfree(ring->inflights);
531 adf_bank_debugfs_rm(bank);
532 memset(bank, 0, sizeof(*bank));
535 static void adf_cleanup_etr_handles(struct adf_accel_dev *accel_dev)
537 struct adf_etr_data *etr_data = accel_dev->transport;
538 uint32_t i, num_banks = GET_MAX_BANKS(accel_dev);
540 for (i = 0; i < num_banks; i++)
541 cleanup_bank(&etr_data->banks[i]);
545 * adf_cleanup_etr_data() - Clear transport rings for acceleration device
546 * @accel_dev: Pointer to acceleration device.
548 * Function is the clears the communications channels (rings) of the
549 * acceleration device accel_dev.
550 * To be used by QAT device specific drivers.
552 * Return: void
554 void adf_cleanup_etr_data(struct adf_accel_dev *accel_dev)
556 struct adf_etr_data *etr_data = accel_dev->transport;
558 if (etr_data) {
559 adf_cleanup_etr_handles(accel_dev);
560 debugfs_remove(etr_data->debug);
561 kfree(etr_data->banks);
562 kfree(etr_data);
563 accel_dev->transport = NULL;
566 EXPORT_SYMBOL_GPL(adf_cleanup_etr_data);