Merge tag 'io_uring-5.11-2021-01-16' of git://git.kernel.dk/linux-block
[linux/fpc-iii.git] / drivers / crypto / qat / qat_c3xxx / adf_c3xxx_hw_data.c
blobeb45f1b1ae3ebe630e42e506d155af184c499544
1 // SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
2 /* Copyright(c) 2014 - 2020 Intel Corporation */
3 #include <adf_accel_devices.h>
4 #include <adf_common_drv.h>
5 #include <adf_pf2vf_msg.h>
6 #include <adf_gen2_hw_data.h>
7 #include "adf_c3xxx_hw_data.h"
8 #include "icp_qat_hw.h"
10 /* Worker thread to service arbiter mappings based on dev SKUs */
11 static const u32 thrd_to_arb_map_6_me_sku[] = {
12 0x12222AAA, 0x11222AAA, 0x12222AAA,
13 0x11222AAA, 0x12222AAA, 0x11222AAA
16 static struct adf_hw_device_class c3xxx_class = {
17 .name = ADF_C3XXX_DEVICE_NAME,
18 .type = DEV_C3XXX,
19 .instances = 0
22 static u32 get_accel_mask(struct adf_hw_device_data *self)
24 u32 straps = self->straps;
25 u32 fuses = self->fuses;
26 u32 accel;
28 accel = ~(fuses | straps) >> ADF_C3XXX_ACCELERATORS_REG_OFFSET;
29 accel &= ADF_C3XXX_ACCELERATORS_MASK;
31 return accel;
34 static u32 get_ae_mask(struct adf_hw_device_data *self)
36 u32 straps = self->straps;
37 u32 fuses = self->fuses;
38 unsigned long disabled;
39 u32 ae_disable;
40 int accel;
42 /* If an accel is disabled, then disable the corresponding two AEs */
43 disabled = ~get_accel_mask(self) & ADF_C3XXX_ACCELERATORS_MASK;
44 ae_disable = BIT(1) | BIT(0);
45 for_each_set_bit(accel, &disabled, ADF_C3XXX_MAX_ACCELERATORS)
46 straps |= ae_disable << (accel << 1);
48 return ~(fuses | straps) & ADF_C3XXX_ACCELENGINES_MASK;
51 static u32 get_num_accels(struct adf_hw_device_data *self)
53 u32 i, ctr = 0;
55 if (!self || !self->accel_mask)
56 return 0;
58 for (i = 0; i < ADF_C3XXX_MAX_ACCELERATORS; i++) {
59 if (self->accel_mask & (1 << i))
60 ctr++;
62 return ctr;
65 static u32 get_num_aes(struct adf_hw_device_data *self)
67 u32 i, ctr = 0;
69 if (!self || !self->ae_mask)
70 return 0;
72 for (i = 0; i < ADF_C3XXX_MAX_ACCELENGINES; i++) {
73 if (self->ae_mask & (1 << i))
74 ctr++;
76 return ctr;
79 static u32 get_misc_bar_id(struct adf_hw_device_data *self)
81 return ADF_C3XXX_PMISC_BAR;
84 static u32 get_etr_bar_id(struct adf_hw_device_data *self)
86 return ADF_C3XXX_ETR_BAR;
89 static u32 get_sram_bar_id(struct adf_hw_device_data *self)
91 return 0;
94 static enum dev_sku_info get_sku(struct adf_hw_device_data *self)
96 int aes = get_num_aes(self);
98 if (aes == 6)
99 return DEV_SKU_4;
101 return DEV_SKU_UNKNOWN;
104 static void adf_get_arbiter_mapping(struct adf_accel_dev *accel_dev,
105 u32 const **arb_map_config)
107 switch (accel_dev->accel_pci_dev.sku) {
108 case DEV_SKU_4:
109 *arb_map_config = thrd_to_arb_map_6_me_sku;
110 break;
111 default:
112 dev_err(&GET_DEV(accel_dev),
113 "The configuration doesn't match any SKU");
114 *arb_map_config = NULL;
118 static u32 get_pf2vf_offset(u32 i)
120 return ADF_C3XXX_PF2VF_OFFSET(i);
123 static u32 get_vintmsk_offset(u32 i)
125 return ADF_C3XXX_VINTMSK_OFFSET(i);
128 static void adf_enable_error_correction(struct adf_accel_dev *accel_dev)
130 struct adf_hw_device_data *hw_device = accel_dev->hw_device;
131 struct adf_bar *misc_bar = &GET_BARS(accel_dev)[ADF_C3XXX_PMISC_BAR];
132 unsigned long accel_mask = hw_device->accel_mask;
133 unsigned long ae_mask = hw_device->ae_mask;
134 void __iomem *csr = misc_bar->virt_addr;
135 unsigned int val, i;
137 /* Enable Accel Engine error detection & correction */
138 for_each_set_bit(i, &ae_mask, GET_MAX_ACCELENGINES(accel_dev)) {
139 val = ADF_CSR_RD(csr, ADF_C3XXX_AE_CTX_ENABLES(i));
140 val |= ADF_C3XXX_ENABLE_AE_ECC_ERR;
141 ADF_CSR_WR(csr, ADF_C3XXX_AE_CTX_ENABLES(i), val);
142 val = ADF_CSR_RD(csr, ADF_C3XXX_AE_MISC_CONTROL(i));
143 val |= ADF_C3XXX_ENABLE_AE_ECC_PARITY_CORR;
144 ADF_CSR_WR(csr, ADF_C3XXX_AE_MISC_CONTROL(i), val);
147 /* Enable shared memory error detection & correction */
148 for_each_set_bit(i, &accel_mask, ADF_C3XXX_MAX_ACCELERATORS) {
149 val = ADF_CSR_RD(csr, ADF_C3XXX_UERRSSMSH(i));
150 val |= ADF_C3XXX_ERRSSMSH_EN;
151 ADF_CSR_WR(csr, ADF_C3XXX_UERRSSMSH(i), val);
152 val = ADF_CSR_RD(csr, ADF_C3XXX_CERRSSMSH(i));
153 val |= ADF_C3XXX_ERRSSMSH_EN;
154 ADF_CSR_WR(csr, ADF_C3XXX_CERRSSMSH(i), val);
158 static void adf_enable_ints(struct adf_accel_dev *accel_dev)
160 void __iomem *addr;
162 addr = (&GET_BARS(accel_dev)[ADF_C3XXX_PMISC_BAR])->virt_addr;
164 /* Enable bundle and misc interrupts */
165 ADF_CSR_WR(addr, ADF_C3XXX_SMIAPF0_MASK_OFFSET,
166 ADF_C3XXX_SMIA0_MASK);
167 ADF_CSR_WR(addr, ADF_C3XXX_SMIAPF1_MASK_OFFSET,
168 ADF_C3XXX_SMIA1_MASK);
171 static int adf_pf_enable_vf2pf_comms(struct adf_accel_dev *accel_dev)
173 return 0;
176 static void configure_iov_threads(struct adf_accel_dev *accel_dev, bool enable)
178 adf_gen2_cfg_iov_thds(accel_dev, enable,
179 ADF_C3XXX_AE2FUNC_MAP_GRP_A_NUM_REGS,
180 ADF_C3XXX_AE2FUNC_MAP_GRP_B_NUM_REGS);
183 void adf_init_hw_data_c3xxx(struct adf_hw_device_data *hw_data)
185 hw_data->dev_class = &c3xxx_class;
186 hw_data->instance_id = c3xxx_class.instances++;
187 hw_data->num_banks = ADF_C3XXX_ETR_MAX_BANKS;
188 hw_data->num_rings_per_bank = ADF_ETR_MAX_RINGS_PER_BANK;
189 hw_data->num_accel = ADF_C3XXX_MAX_ACCELERATORS;
190 hw_data->num_logical_accel = 1;
191 hw_data->num_engines = ADF_C3XXX_MAX_ACCELENGINES;
192 hw_data->tx_rx_gap = ADF_C3XXX_RX_RINGS_OFFSET;
193 hw_data->tx_rings_mask = ADF_C3XXX_TX_RINGS_MASK;
194 hw_data->alloc_irq = adf_isr_resource_alloc;
195 hw_data->free_irq = adf_isr_resource_free;
196 hw_data->enable_error_correction = adf_enable_error_correction;
197 hw_data->get_accel_mask = get_accel_mask;
198 hw_data->get_ae_mask = get_ae_mask;
199 hw_data->get_accel_cap = adf_gen2_get_accel_cap;
200 hw_data->get_num_accels = get_num_accels;
201 hw_data->get_num_aes = get_num_aes;
202 hw_data->get_sram_bar_id = get_sram_bar_id;
203 hw_data->get_etr_bar_id = get_etr_bar_id;
204 hw_data->get_misc_bar_id = get_misc_bar_id;
205 hw_data->get_pf2vf_offset = get_pf2vf_offset;
206 hw_data->get_vintmsk_offset = get_vintmsk_offset;
207 hw_data->get_admin_info = adf_gen2_get_admin_info;
208 hw_data->get_arb_info = adf_gen2_get_arb_info;
209 hw_data->get_sku = get_sku;
210 hw_data->fw_name = ADF_C3XXX_FW;
211 hw_data->fw_mmp_name = ADF_C3XXX_MMP;
212 hw_data->init_admin_comms = adf_init_admin_comms;
213 hw_data->exit_admin_comms = adf_exit_admin_comms;
214 hw_data->configure_iov_threads = configure_iov_threads;
215 hw_data->disable_iov = adf_disable_sriov;
216 hw_data->send_admin_init = adf_send_admin_init;
217 hw_data->init_arb = adf_init_arb;
218 hw_data->exit_arb = adf_exit_arb;
219 hw_data->get_arb_mapping = adf_get_arbiter_mapping;
220 hw_data->enable_ints = adf_enable_ints;
221 hw_data->enable_vf2pf_comms = adf_pf_enable_vf2pf_comms;
222 hw_data->reset_device = adf_reset_flr;
223 hw_data->min_iov_compat_ver = ADF_PFVF_COMPATIBILITY_VERSION;
224 adf_gen2_init_hw_csr_ops(&hw_data->csr_ops);
227 void adf_clean_hw_data_c3xxx(struct adf_hw_device_data *hw_data)
229 hw_data->dev_class->instances--;