1 // SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
2 /* Copyright(c) 2014 - 2020 Intel Corporation */
3 #include <linux/delay.h>
4 #include "adf_accel_devices.h"
5 #include "adf_transport_internal.h"
6 #include "adf_transport_access_macros.h"
8 #include "adf_common_drv.h"
10 static inline u32
adf_modulo(u32 data
, u32 shift
)
12 u32 div
= data
>> shift
;
13 u32 mult
= div
<< shift
;
18 static inline int adf_check_ring_alignment(u64 addr
, u64 size
)
20 if (((size
- 1) & addr
) != 0)
25 static int adf_verify_ring_size(u32 msg_size
, u32 msg_num
)
27 int i
= ADF_MIN_RING_SIZE
;
29 for (; i
<= ADF_MAX_RING_SIZE
; i
++)
30 if ((msg_size
* msg_num
) == ADF_SIZE_TO_RING_SIZE_IN_BYTES(i
))
33 return ADF_DEFAULT_RING_SIZE
;
36 static int adf_reserve_ring(struct adf_etr_bank_data
*bank
, u32 ring
)
38 spin_lock(&bank
->lock
);
39 if (bank
->ring_mask
& (1 << ring
)) {
40 spin_unlock(&bank
->lock
);
43 bank
->ring_mask
|= (1 << ring
);
44 spin_unlock(&bank
->lock
);
48 static void adf_unreserve_ring(struct adf_etr_bank_data
*bank
, u32 ring
)
50 spin_lock(&bank
->lock
);
51 bank
->ring_mask
&= ~(1 << ring
);
52 spin_unlock(&bank
->lock
);
55 static void adf_enable_ring_irq(struct adf_etr_bank_data
*bank
, u32 ring
)
57 struct adf_hw_csr_ops
*csr_ops
= GET_CSR_OPS(bank
->accel_dev
);
59 spin_lock_bh(&bank
->lock
);
60 bank
->irq_mask
|= (1 << ring
);
61 spin_unlock_bh(&bank
->lock
);
62 csr_ops
->write_csr_int_col_en(bank
->csr_addr
, bank
->bank_number
,
64 csr_ops
->write_csr_int_col_ctl(bank
->csr_addr
, bank
->bank_number
,
65 bank
->irq_coalesc_timer
);
68 static void adf_disable_ring_irq(struct adf_etr_bank_data
*bank
, u32 ring
)
70 struct adf_hw_csr_ops
*csr_ops
= GET_CSR_OPS(bank
->accel_dev
);
72 spin_lock_bh(&bank
->lock
);
73 bank
->irq_mask
&= ~(1 << ring
);
74 spin_unlock_bh(&bank
->lock
);
75 csr_ops
->write_csr_int_col_en(bank
->csr_addr
, bank
->bank_number
,
79 int adf_send_message(struct adf_etr_ring_data
*ring
, u32
*msg
)
81 struct adf_hw_csr_ops
*csr_ops
= GET_CSR_OPS(ring
->bank
->accel_dev
);
83 if (atomic_add_return(1, ring
->inflights
) >
84 ADF_MAX_INFLIGHTS(ring
->ring_size
, ring
->msg_size
)) {
85 atomic_dec(ring
->inflights
);
88 spin_lock_bh(&ring
->lock
);
89 memcpy((void *)((uintptr_t)ring
->base_addr
+ ring
->tail
), msg
,
90 ADF_MSG_SIZE_TO_BYTES(ring
->msg_size
));
92 ring
->tail
= adf_modulo(ring
->tail
+
93 ADF_MSG_SIZE_TO_BYTES(ring
->msg_size
),
94 ADF_RING_SIZE_MODULO(ring
->ring_size
));
95 csr_ops
->write_csr_ring_tail(ring
->bank
->csr_addr
,
96 ring
->bank
->bank_number
, ring
->ring_number
,
98 spin_unlock_bh(&ring
->lock
);
103 static int adf_handle_response(struct adf_etr_ring_data
*ring
)
105 struct adf_hw_csr_ops
*csr_ops
= GET_CSR_OPS(ring
->bank
->accel_dev
);
107 u32
*msg
= (u32
*)((uintptr_t)ring
->base_addr
+ ring
->head
);
109 while (*msg
!= ADF_RING_EMPTY_SIG
) {
110 ring
->callback((u32
*)msg
);
111 atomic_dec(ring
->inflights
);
112 *msg
= ADF_RING_EMPTY_SIG
;
113 ring
->head
= adf_modulo(ring
->head
+
114 ADF_MSG_SIZE_TO_BYTES(ring
->msg_size
),
115 ADF_RING_SIZE_MODULO(ring
->ring_size
));
117 msg
= (u32
*)((uintptr_t)ring
->base_addr
+ ring
->head
);
119 if (msg_counter
> 0) {
120 csr_ops
->write_csr_ring_head(ring
->bank
->csr_addr
,
121 ring
->bank
->bank_number
,
122 ring
->ring_number
, ring
->head
);
127 static void adf_configure_tx_ring(struct adf_etr_ring_data
*ring
)
129 struct adf_hw_csr_ops
*csr_ops
= GET_CSR_OPS(ring
->bank
->accel_dev
);
130 u32 ring_config
= BUILD_RING_CONFIG(ring
->ring_size
);
132 csr_ops
->write_csr_ring_config(ring
->bank
->csr_addr
,
133 ring
->bank
->bank_number
,
134 ring
->ring_number
, ring_config
);
138 static void adf_configure_rx_ring(struct adf_etr_ring_data
*ring
)
140 struct adf_hw_csr_ops
*csr_ops
= GET_CSR_OPS(ring
->bank
->accel_dev
);
142 BUILD_RESP_RING_CONFIG(ring
->ring_size
,
143 ADF_RING_NEAR_WATERMARK_512
,
144 ADF_RING_NEAR_WATERMARK_0
);
146 csr_ops
->write_csr_ring_config(ring
->bank
->csr_addr
,
147 ring
->bank
->bank_number
,
148 ring
->ring_number
, ring_config
);
151 static int adf_init_ring(struct adf_etr_ring_data
*ring
)
153 struct adf_etr_bank_data
*bank
= ring
->bank
;
154 struct adf_accel_dev
*accel_dev
= bank
->accel_dev
;
155 struct adf_hw_device_data
*hw_data
= accel_dev
->hw_device
;
156 struct adf_hw_csr_ops
*csr_ops
= GET_CSR_OPS(accel_dev
);
158 u32 ring_size_bytes
=
159 ADF_SIZE_TO_RING_SIZE_IN_BYTES(ring
->ring_size
);
161 ring_size_bytes
= ADF_RING_SIZE_BYTES_MIN(ring_size_bytes
);
162 ring
->base_addr
= dma_alloc_coherent(&GET_DEV(accel_dev
),
163 ring_size_bytes
, &ring
->dma_addr
,
165 if (!ring
->base_addr
)
168 memset(ring
->base_addr
, 0x7F, ring_size_bytes
);
169 /* The base_addr has to be aligned to the size of the buffer */
170 if (adf_check_ring_alignment(ring
->dma_addr
, ring_size_bytes
)) {
171 dev_err(&GET_DEV(accel_dev
), "Ring address not aligned\n");
172 dma_free_coherent(&GET_DEV(accel_dev
), ring_size_bytes
,
173 ring
->base_addr
, ring
->dma_addr
);
177 if (hw_data
->tx_rings_mask
& (1 << ring
->ring_number
))
178 adf_configure_tx_ring(ring
);
181 adf_configure_rx_ring(ring
);
183 ring_base
= csr_ops
->build_csr_ring_base_addr(ring
->dma_addr
,
186 csr_ops
->write_csr_ring_base(ring
->bank
->csr_addr
,
187 ring
->bank
->bank_number
, ring
->ring_number
,
189 spin_lock_init(&ring
->lock
);
193 static void adf_cleanup_ring(struct adf_etr_ring_data
*ring
)
195 u32 ring_size_bytes
=
196 ADF_SIZE_TO_RING_SIZE_IN_BYTES(ring
->ring_size
);
197 ring_size_bytes
= ADF_RING_SIZE_BYTES_MIN(ring_size_bytes
);
199 if (ring
->base_addr
) {
200 memset(ring
->base_addr
, 0x7F, ring_size_bytes
);
201 dma_free_coherent(&GET_DEV(ring
->bank
->accel_dev
),
202 ring_size_bytes
, ring
->base_addr
,
207 int adf_create_ring(struct adf_accel_dev
*accel_dev
, const char *section
,
208 u32 bank_num
, u32 num_msgs
,
209 u32 msg_size
, const char *ring_name
,
210 adf_callback_fn callback
, int poll_mode
,
211 struct adf_etr_ring_data
**ring_ptr
)
213 struct adf_etr_data
*transport_data
= accel_dev
->transport
;
214 u8 num_rings_per_bank
= GET_NUM_RINGS_PER_BANK(accel_dev
);
215 struct adf_etr_bank_data
*bank
;
216 struct adf_etr_ring_data
*ring
;
217 char val
[ADF_CFG_MAX_VAL_LEN_IN_BYTES
];
221 if (bank_num
>= GET_MAX_BANKS(accel_dev
)) {
222 dev_err(&GET_DEV(accel_dev
), "Invalid bank number\n");
225 if (msg_size
> ADF_MSG_SIZE_TO_BYTES(ADF_MAX_MSG_SIZE
)) {
226 dev_err(&GET_DEV(accel_dev
), "Invalid msg size\n");
229 if (ADF_MAX_INFLIGHTS(adf_verify_ring_size(msg_size
, num_msgs
),
230 ADF_BYTES_TO_MSG_SIZE(msg_size
)) < 2) {
231 dev_err(&GET_DEV(accel_dev
),
232 "Invalid ring size for given msg size\n");
235 if (adf_cfg_get_param_value(accel_dev
, section
, ring_name
, val
)) {
236 dev_err(&GET_DEV(accel_dev
), "Section %s, no such entry : %s\n",
240 if (kstrtouint(val
, 10, &ring_num
)) {
241 dev_err(&GET_DEV(accel_dev
), "Can't get ring number\n");
244 if (ring_num
>= num_rings_per_bank
) {
245 dev_err(&GET_DEV(accel_dev
), "Invalid ring number\n");
249 bank
= &transport_data
->banks
[bank_num
];
250 if (adf_reserve_ring(bank
, ring_num
)) {
251 dev_err(&GET_DEV(accel_dev
), "Ring %d, %s already exists.\n",
252 ring_num
, ring_name
);
255 ring
= &bank
->rings
[ring_num
];
256 ring
->ring_number
= ring_num
;
258 ring
->callback
= callback
;
259 ring
->msg_size
= ADF_BYTES_TO_MSG_SIZE(msg_size
);
260 ring
->ring_size
= adf_verify_ring_size(msg_size
, num_msgs
);
263 atomic_set(ring
->inflights
, 0);
264 ret
= adf_init_ring(ring
);
268 /* Enable HW arbitration for the given ring */
269 adf_update_ring_arb(ring
);
271 if (adf_ring_debugfs_add(ring
, ring_name
)) {
272 dev_err(&GET_DEV(accel_dev
),
273 "Couldn't add ring debugfs entry\n");
278 /* Enable interrupts if needed */
279 if (callback
&& (!poll_mode
))
280 adf_enable_ring_irq(bank
, ring
->ring_number
);
284 adf_cleanup_ring(ring
);
285 adf_unreserve_ring(bank
, ring_num
);
286 adf_update_ring_arb(ring
);
290 void adf_remove_ring(struct adf_etr_ring_data
*ring
)
292 struct adf_etr_bank_data
*bank
= ring
->bank
;
293 struct adf_hw_csr_ops
*csr_ops
= GET_CSR_OPS(bank
->accel_dev
);
295 /* Disable interrupts for the given ring */
296 adf_disable_ring_irq(bank
, ring
->ring_number
);
298 /* Clear PCI config space */
300 csr_ops
->write_csr_ring_config(bank
->csr_addr
, bank
->bank_number
,
301 ring
->ring_number
, 0);
302 csr_ops
->write_csr_ring_base(bank
->csr_addr
, bank
->bank_number
,
303 ring
->ring_number
, 0);
304 adf_ring_debugfs_rm(ring
);
305 adf_unreserve_ring(bank
, ring
->ring_number
);
306 /* Disable HW arbitration for the given ring */
307 adf_update_ring_arb(ring
);
308 adf_cleanup_ring(ring
);
311 static void adf_ring_response_handler(struct adf_etr_bank_data
*bank
)
313 struct adf_accel_dev
*accel_dev
= bank
->accel_dev
;
314 u8 num_rings_per_bank
= GET_NUM_RINGS_PER_BANK(accel_dev
);
315 struct adf_hw_csr_ops
*csr_ops
= GET_CSR_OPS(accel_dev
);
316 unsigned long empty_rings
;
319 empty_rings
= csr_ops
->read_csr_e_stat(bank
->csr_addr
,
321 empty_rings
= ~empty_rings
& bank
->irq_mask
;
323 for_each_set_bit(i
, &empty_rings
, num_rings_per_bank
)
324 adf_handle_response(&bank
->rings
[i
]);
327 void adf_response_handler(uintptr_t bank_addr
)
329 struct adf_etr_bank_data
*bank
= (void *)bank_addr
;
330 struct adf_hw_csr_ops
*csr_ops
= GET_CSR_OPS(bank
->accel_dev
);
332 /* Handle all the responses and reenable IRQs */
333 adf_ring_response_handler(bank
);
335 csr_ops
->write_csr_int_flag_and_col(bank
->csr_addr
, bank
->bank_number
,
339 static inline int adf_get_cfg_int(struct adf_accel_dev
*accel_dev
,
340 const char *section
, const char *format
,
343 char key_buf
[ADF_CFG_MAX_KEY_LEN_IN_BYTES
];
344 char val_buf
[ADF_CFG_MAX_VAL_LEN_IN_BYTES
];
346 snprintf(key_buf
, ADF_CFG_MAX_KEY_LEN_IN_BYTES
, format
, key
);
348 if (adf_cfg_get_param_value(accel_dev
, section
, key_buf
, val_buf
))
351 if (kstrtouint(val_buf
, 10, value
))
356 static void adf_get_coalesc_timer(struct adf_etr_bank_data
*bank
,
358 u32 bank_num_in_accel
)
360 if (adf_get_cfg_int(bank
->accel_dev
, section
,
361 ADF_ETRMGR_COALESCE_TIMER_FORMAT
,
362 bank_num_in_accel
, &bank
->irq_coalesc_timer
))
363 bank
->irq_coalesc_timer
= ADF_COALESCING_DEF_TIME
;
365 if (ADF_COALESCING_MAX_TIME
< bank
->irq_coalesc_timer
||
366 ADF_COALESCING_MIN_TIME
> bank
->irq_coalesc_timer
)
367 bank
->irq_coalesc_timer
= ADF_COALESCING_DEF_TIME
;
370 static int adf_init_bank(struct adf_accel_dev
*accel_dev
,
371 struct adf_etr_bank_data
*bank
,
372 u32 bank_num
, void __iomem
*csr_addr
)
374 struct adf_hw_device_data
*hw_data
= accel_dev
->hw_device
;
375 u8 num_rings_per_bank
= hw_data
->num_rings_per_bank
;
376 struct adf_hw_csr_ops
*csr_ops
= &hw_data
->csr_ops
;
377 u32 irq_mask
= BIT(num_rings_per_bank
) - 1;
378 struct adf_etr_ring_data
*ring
;
379 struct adf_etr_ring_data
*tx_ring
;
380 u32 i
, coalesc_enabled
= 0;
381 unsigned long ring_mask
;
384 memset(bank
, 0, sizeof(*bank
));
385 bank
->bank_number
= bank_num
;
386 bank
->csr_addr
= csr_addr
;
387 bank
->accel_dev
= accel_dev
;
388 spin_lock_init(&bank
->lock
);
390 /* Allocate the rings in the bank */
391 size
= num_rings_per_bank
* sizeof(struct adf_etr_ring_data
);
392 bank
->rings
= kzalloc_node(size
, GFP_KERNEL
,
393 dev_to_node(&GET_DEV(accel_dev
)));
397 /* Enable IRQ coalescing always. This will allow to use
398 * the optimised flag and coalesc register.
399 * If it is disabled in the config file just use min time value */
400 if ((adf_get_cfg_int(accel_dev
, "Accelerator0",
401 ADF_ETRMGR_COALESCING_ENABLED_FORMAT
, bank_num
,
402 &coalesc_enabled
) == 0) && coalesc_enabled
)
403 adf_get_coalesc_timer(bank
, "Accelerator0", bank_num
);
405 bank
->irq_coalesc_timer
= ADF_COALESCING_MIN_TIME
;
407 for (i
= 0; i
< num_rings_per_bank
; i
++) {
408 csr_ops
->write_csr_ring_config(csr_addr
, bank_num
, i
, 0);
409 csr_ops
->write_csr_ring_base(csr_addr
, bank_num
, i
, 0);
411 ring
= &bank
->rings
[i
];
412 if (hw_data
->tx_rings_mask
& (1 << i
)) {
414 kzalloc_node(sizeof(atomic_t
),
416 dev_to_node(&GET_DEV(accel_dev
)));
417 if (!ring
->inflights
)
420 if (i
< hw_data
->tx_rx_gap
) {
421 dev_err(&GET_DEV(accel_dev
),
422 "Invalid tx rings mask config\n");
425 tx_ring
= &bank
->rings
[i
- hw_data
->tx_rx_gap
];
426 ring
->inflights
= tx_ring
->inflights
;
429 if (adf_bank_debugfs_add(bank
)) {
430 dev_err(&GET_DEV(accel_dev
),
431 "Failed to add bank debugfs entry\n");
435 csr_ops
->write_csr_int_flag(csr_addr
, bank_num
, irq_mask
);
436 csr_ops
->write_csr_int_srcsel(csr_addr
, bank_num
);
440 ring_mask
= hw_data
->tx_rings_mask
;
441 for_each_set_bit(i
, &ring_mask
, num_rings_per_bank
) {
442 ring
= &bank
->rings
[i
];
443 kfree(ring
->inflights
);
444 ring
->inflights
= NULL
;
451 * adf_init_etr_data() - Initialize transport rings for acceleration device
452 * @accel_dev: Pointer to acceleration device.
454 * Function is the initializes the communications channels (rings) to the
455 * acceleration device accel_dev.
456 * To be used by QAT device specific drivers.
458 * Return: 0 on success, error code otherwise.
460 int adf_init_etr_data(struct adf_accel_dev
*accel_dev
)
462 struct adf_etr_data
*etr_data
;
463 struct adf_hw_device_data
*hw_data
= accel_dev
->hw_device
;
464 void __iomem
*csr_addr
;
469 etr_data
= kzalloc_node(sizeof(*etr_data
), GFP_KERNEL
,
470 dev_to_node(&GET_DEV(accel_dev
)));
474 num_banks
= GET_MAX_BANKS(accel_dev
);
475 size
= num_banks
* sizeof(struct adf_etr_bank_data
);
476 etr_data
->banks
= kzalloc_node(size
, GFP_KERNEL
,
477 dev_to_node(&GET_DEV(accel_dev
)));
478 if (!etr_data
->banks
) {
483 accel_dev
->transport
= etr_data
;
484 i
= hw_data
->get_etr_bar_id(hw_data
);
485 csr_addr
= accel_dev
->accel_pci_dev
.pci_bars
[i
].virt_addr
;
487 /* accel_dev->debugfs_dir should always be non-NULL here */
488 etr_data
->debug
= debugfs_create_dir("transport",
489 accel_dev
->debugfs_dir
);
491 for (i
= 0; i
< num_banks
; i
++) {
492 ret
= adf_init_bank(accel_dev
, &etr_data
->banks
[i
], i
,
501 debugfs_remove(etr_data
->debug
);
502 kfree(etr_data
->banks
);
505 accel_dev
->transport
= NULL
;
508 EXPORT_SYMBOL_GPL(adf_init_etr_data
);
510 static void cleanup_bank(struct adf_etr_bank_data
*bank
)
512 struct adf_accel_dev
*accel_dev
= bank
->accel_dev
;
513 struct adf_hw_device_data
*hw_data
= accel_dev
->hw_device
;
514 u8 num_rings_per_bank
= hw_data
->num_rings_per_bank
;
517 for (i
= 0; i
< num_rings_per_bank
; i
++) {
518 struct adf_etr_ring_data
*ring
= &bank
->rings
[i
];
520 if (bank
->ring_mask
& (1 << i
))
521 adf_cleanup_ring(ring
);
523 if (hw_data
->tx_rings_mask
& (1 << i
))
524 kfree(ring
->inflights
);
527 adf_bank_debugfs_rm(bank
);
528 memset(bank
, 0, sizeof(*bank
));
531 static void adf_cleanup_etr_handles(struct adf_accel_dev
*accel_dev
)
533 struct adf_etr_data
*etr_data
= accel_dev
->transport
;
534 u32 i
, num_banks
= GET_MAX_BANKS(accel_dev
);
536 for (i
= 0; i
< num_banks
; i
++)
537 cleanup_bank(&etr_data
->banks
[i
]);
541 * adf_cleanup_etr_data() - Clear transport rings for acceleration device
542 * @accel_dev: Pointer to acceleration device.
544 * Function is the clears the communications channels (rings) of the
545 * acceleration device accel_dev.
546 * To be used by QAT device specific drivers.
550 void adf_cleanup_etr_data(struct adf_accel_dev
*accel_dev
)
552 struct adf_etr_data
*etr_data
= accel_dev
->transport
;
555 adf_cleanup_etr_handles(accel_dev
);
556 debugfs_remove(etr_data
->debug
);
557 kfree(etr_data
->banks
->rings
);
558 kfree(etr_data
->banks
);
560 accel_dev
->transport
= NULL
;
563 EXPORT_SYMBOL_GPL(adf_cleanup_etr_data
);