1 // SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
2 /* Copyright(c) 2014 - 2020 Intel Corporation */
3 #include "adf_accel_devices.h"
4 #include "adf_common_drv.h"
5 #include "adf_transport_internal.h"
8 #define ADF_ARB_REG_SIZE 0x4
10 #define WRITE_CSR_ARB_SARCONFIG(csr_addr, arb_offset, index, value) \
11 ADF_CSR_WR(csr_addr, (arb_offset) + \
12 (ADF_ARB_REG_SIZE * (index)), value)
14 #define WRITE_CSR_ARB_WT2SAM(csr_addr, arb_offset, wt_offset, index, value) \
15 ADF_CSR_WR(csr_addr, ((arb_offset) + (wt_offset)) + \
16 (ADF_ARB_REG_SIZE * (index)), value)
18 int adf_init_arb(struct adf_accel_dev
*accel_dev
)
20 struct adf_hw_device_data
*hw_data
= accel_dev
->hw_device
;
21 void __iomem
*csr
= accel_dev
->transport
->banks
[0].csr_addr
;
22 u32 arb_off
, wt_off
, arb_cfg
;
23 const u32
*thd_2_arb_cfg
;
27 hw_data
->get_arb_info(&info
);
28 arb_cfg
= info
.arb_cfg
;
29 arb_off
= info
.arb_offset
;
30 wt_off
= info
.wt2sam_offset
;
32 /* Service arb configured for 32 bytes responses and
33 * ring flow control check enabled. */
34 for (arb
= 0; arb
< ADF_ARB_NUM
; arb
++)
35 WRITE_CSR_ARB_SARCONFIG(csr
, arb_off
, arb
, arb_cfg
);
37 /* Map worker threads to service arbiters */
38 hw_data
->get_arb_mapping(accel_dev
, &thd_2_arb_cfg
);
43 for (i
= 0; i
< hw_data
->num_engines
; i
++)
44 WRITE_CSR_ARB_WT2SAM(csr
, arb_off
, wt_off
, i
, thd_2_arb_cfg
[i
]);
48 EXPORT_SYMBOL_GPL(adf_init_arb
);
50 void adf_update_ring_arb(struct adf_etr_ring_data
*ring
)
52 struct adf_accel_dev
*accel_dev
= ring
->bank
->accel_dev
;
53 struct adf_hw_device_data
*hw_data
= accel_dev
->hw_device
;
54 struct adf_hw_csr_ops
*csr_ops
= GET_CSR_OPS(accel_dev
);
55 u32 tx_ring_mask
= hw_data
->tx_rings_mask
;
56 u32 shift
= hw_data
->tx_rx_gap
;
57 u32 arben
, arben_tx
, arben_rx
;
61 * Enable arbitration on a ring only if the TX half of the ring mask
62 * matches the RX part. This results in writes to CSR on both TX and
63 * RX update - only one is necessary, but both are done for
66 rx_ring_mask
= tx_ring_mask
<< shift
;
67 arben_tx
= (ring
->bank
->ring_mask
& tx_ring_mask
) >> 0;
68 arben_rx
= (ring
->bank
->ring_mask
& rx_ring_mask
) >> shift
;
69 arben
= arben_tx
& arben_rx
;
71 csr_ops
->write_csr_ring_srv_arb_en(ring
->bank
->csr_addr
,
72 ring
->bank
->bank_number
, arben
);
75 void adf_exit_arb(struct adf_accel_dev
*accel_dev
)
77 struct adf_hw_device_data
*hw_data
= accel_dev
->hw_device
;
78 struct adf_hw_csr_ops
*csr_ops
= GET_CSR_OPS(accel_dev
);
84 hw_data
->get_arb_info(&info
);
85 arb_off
= info
.arb_offset
;
86 wt_off
= info
.wt2sam_offset
;
88 if (!accel_dev
->transport
)
91 csr
= accel_dev
->transport
->banks
[0].csr_addr
;
93 hw_data
->get_arb_info(&info
);
95 /* Reset arbiter configuration */
96 for (i
= 0; i
< ADF_ARB_NUM
; i
++)
97 WRITE_CSR_ARB_SARCONFIG(csr
, arb_off
, i
, 0);
99 /* Unmap worker threads to service arbiters */
100 for (i
= 0; i
< hw_data
->num_engines
; i
++)
101 WRITE_CSR_ARB_WT2SAM(csr
, arb_off
, wt_off
, i
, 0);
103 /* Disable arbitration on all rings */
104 for (i
= 0; i
< GET_MAX_BANKS(accel_dev
); i
++)
105 csr_ops
->write_csr_ring_srv_arb_en(csr
, i
, 0);
107 EXPORT_SYMBOL_GPL(adf_exit_arb
);