1 // SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
2 /* Copyright(c) 2014 - 2020 Intel Corporation */
3 #include <linux/mutex.h>
4 #include <linux/list.h>
6 #include "adf_common_drv.h"
8 static LIST_HEAD(accel_table
);
9 static LIST_HEAD(vfs_table
);
10 static DEFINE_MUTEX(table_lock
);
11 static u32 num_devices
;
12 static u8 id_map
[ADF_MAX_DEVICES
];
19 struct list_head list
;
22 static int adf_get_vf_id(struct adf_accel_dev
*vf
)
24 return ((7 * (PCI_SLOT(accel_to_pci_dev(vf
)->devfn
) - 1)) +
25 PCI_FUNC(accel_to_pci_dev(vf
)->devfn
) +
26 (PCI_SLOT(accel_to_pci_dev(vf
)->devfn
) - 1));
29 static int adf_get_vf_num(struct adf_accel_dev
*vf
)
31 return (accel_to_pci_dev(vf
)->bus
->number
<< 8) | adf_get_vf_id(vf
);
34 static struct vf_id_map
*adf_find_vf(u32 bdf
)
36 struct list_head
*itr
;
38 list_for_each(itr
, &vfs_table
) {
39 struct vf_id_map
*ptr
=
40 list_entry(itr
, struct vf_id_map
, list
);
48 static int adf_get_vf_real_id(u32 fake
)
50 struct list_head
*itr
;
52 list_for_each(itr
, &vfs_table
) {
53 struct vf_id_map
*ptr
=
54 list_entry(itr
, struct vf_id_map
, list
);
55 if (ptr
->fake_id
== fake
)
62 * adf_clean_vf_map() - Cleans VF id mapings
64 * Function cleans internal ids for virtual functions.
65 * @vf: flag indicating whether mappings is cleaned
66 * for vfs only or for vfs and pfs
68 void adf_clean_vf_map(bool vf
)
70 struct vf_id_map
*map
;
71 struct list_head
*ptr
, *tmp
;
73 mutex_lock(&table_lock
);
74 list_for_each_safe(ptr
, tmp
, &vfs_table
) {
75 map
= list_entry(ptr
, struct vf_id_map
, list
);
81 if (vf
&& map
->bdf
== -1)
87 mutex_unlock(&table_lock
);
89 EXPORT_SYMBOL_GPL(adf_clean_vf_map
);
92 * adf_devmgr_update_class_index() - Update internal index
93 * @hw_data: Pointer to internal device data.
95 * Function updates internal dev index for VFs
97 void adf_devmgr_update_class_index(struct adf_hw_device_data
*hw_data
)
99 struct adf_hw_device_class
*class = hw_data
->dev_class
;
100 struct list_head
*itr
;
103 list_for_each(itr
, &accel_table
) {
104 struct adf_accel_dev
*ptr
=
105 list_entry(itr
, struct adf_accel_dev
, list
);
107 if (ptr
->hw_device
->dev_class
== class)
108 ptr
->hw_device
->instance_id
= i
++;
110 if (i
== class->instances
)
114 EXPORT_SYMBOL_GPL(adf_devmgr_update_class_index
);
116 static unsigned int adf_find_free_id(void)
120 for (i
= 0; i
< ADF_MAX_DEVICES
; i
++) {
126 return ADF_MAX_DEVICES
+ 1;
130 * adf_devmgr_add_dev() - Add accel_dev to the acceleration framework
131 * @accel_dev: Pointer to acceleration device.
132 * @pf: Corresponding PF if the accel_dev is a VF
134 * Function adds acceleration device to the acceleration framework.
135 * To be used by QAT device specific drivers.
137 * Return: 0 on success, error code otherwise.
139 int adf_devmgr_add_dev(struct adf_accel_dev
*accel_dev
,
140 struct adf_accel_dev
*pf
)
142 struct list_head
*itr
;
145 if (num_devices
== ADF_MAX_DEVICES
) {
146 dev_err(&GET_DEV(accel_dev
), "Only support up to %d devices\n",
151 mutex_lock(&table_lock
);
152 atomic_set(&accel_dev
->ref_count
, 0);
154 /* PF on host or VF on guest - optimized to remove redundant is_vf */
155 if (!accel_dev
->is_vf
|| !pf
) {
156 struct vf_id_map
*map
;
158 list_for_each(itr
, &accel_table
) {
159 struct adf_accel_dev
*ptr
=
160 list_entry(itr
, struct adf_accel_dev
, list
);
162 if (ptr
== accel_dev
) {
168 list_add_tail(&accel_dev
->list
, &accel_table
);
169 accel_dev
->accel_id
= adf_find_free_id();
170 if (accel_dev
->accel_id
> ADF_MAX_DEVICES
) {
175 map
= kzalloc(sizeof(*map
), GFP_KERNEL
);
181 map
->id
= accel_dev
->accel_id
;
182 map
->fake_id
= map
->id
;
183 map
->attached
= true;
184 list_add_tail(&map
->list
, &vfs_table
);
185 } else if (accel_dev
->is_vf
&& pf
) {
187 struct vf_id_map
*map
;
189 map
= adf_find_vf(adf_get_vf_num(accel_dev
));
191 struct vf_id_map
*next
;
193 accel_dev
->accel_id
= map
->id
;
194 list_add_tail(&accel_dev
->list
, &accel_table
);
196 map
->attached
= true;
197 next
= list_next_entry(map
, list
);
198 while (next
&& &next
->list
!= &vfs_table
) {
200 next
= list_next_entry(next
, list
);
207 map
= kzalloc(sizeof(*map
), GFP_KERNEL
);
212 accel_dev
->accel_id
= adf_find_free_id();
213 if (accel_dev
->accel_id
> ADF_MAX_DEVICES
) {
219 list_add_tail(&accel_dev
->list
, &accel_table
);
220 map
->bdf
= adf_get_vf_num(accel_dev
);
221 map
->id
= accel_dev
->accel_id
;
222 map
->fake_id
= map
->id
;
223 map
->attached
= true;
224 list_add_tail(&map
->list
, &vfs_table
);
227 mutex_unlock(&table_lock
);
230 EXPORT_SYMBOL_GPL(adf_devmgr_add_dev
);
232 struct list_head
*adf_devmgr_get_head(void)
238 * adf_devmgr_rm_dev() - Remove accel_dev from the acceleration framework.
239 * @accel_dev: Pointer to acceleration device.
240 * @pf: Corresponding PF if the accel_dev is a VF
242 * Function removes acceleration device from the acceleration framework.
243 * To be used by QAT device specific drivers.
247 void adf_devmgr_rm_dev(struct adf_accel_dev
*accel_dev
,
248 struct adf_accel_dev
*pf
)
250 mutex_lock(&table_lock
);
251 /* PF on host or VF on guest - optimized to remove redundant is_vf */
252 if (!accel_dev
->is_vf
|| !pf
) {
253 id_map
[accel_dev
->accel_id
] = 0;
255 } else if (accel_dev
->is_vf
&& pf
) {
256 struct vf_id_map
*map
, *next
;
258 map
= adf_find_vf(adf_get_vf_num(accel_dev
));
260 dev_err(&GET_DEV(accel_dev
), "Failed to find VF map\n");
264 map
->attached
= false;
265 next
= list_next_entry(map
, list
);
266 while (next
&& &next
->list
!= &vfs_table
) {
268 next
= list_next_entry(next
, list
);
272 list_del(&accel_dev
->list
);
273 mutex_unlock(&table_lock
);
275 EXPORT_SYMBOL_GPL(adf_devmgr_rm_dev
);
277 struct adf_accel_dev
*adf_devmgr_get_first(void)
279 struct adf_accel_dev
*dev
= NULL
;
281 if (!list_empty(&accel_table
))
282 dev
= list_first_entry(&accel_table
, struct adf_accel_dev
,
288 * adf_devmgr_pci_to_accel_dev() - Get accel_dev associated with the pci_dev.
289 * @pci_dev: Pointer to PCI device.
291 * Function returns acceleration device associated with the given PCI device.
292 * To be used by QAT device specific drivers.
294 * Return: pointer to accel_dev or NULL if not found.
296 struct adf_accel_dev
*adf_devmgr_pci_to_accel_dev(struct pci_dev
*pci_dev
)
298 struct list_head
*itr
;
300 mutex_lock(&table_lock
);
301 list_for_each(itr
, &accel_table
) {
302 struct adf_accel_dev
*ptr
=
303 list_entry(itr
, struct adf_accel_dev
, list
);
305 if (ptr
->accel_pci_dev
.pci_dev
== pci_dev
) {
306 mutex_unlock(&table_lock
);
310 mutex_unlock(&table_lock
);
313 EXPORT_SYMBOL_GPL(adf_devmgr_pci_to_accel_dev
);
315 struct adf_accel_dev
*adf_devmgr_get_dev_by_id(u32 id
)
317 struct list_head
*itr
;
320 mutex_lock(&table_lock
);
321 real_id
= adf_get_vf_real_id(id
);
327 list_for_each(itr
, &accel_table
) {
328 struct adf_accel_dev
*ptr
=
329 list_entry(itr
, struct adf_accel_dev
, list
);
330 if (ptr
->accel_id
== id
) {
331 mutex_unlock(&table_lock
);
336 mutex_unlock(&table_lock
);
340 int adf_devmgr_verify_id(u32 id
)
342 if (id
== ADF_CFG_ALL_DEVICES
)
345 if (adf_devmgr_get_dev_by_id(id
))
351 static int adf_get_num_dettached_vfs(void)
353 struct list_head
*itr
;
356 mutex_lock(&table_lock
);
357 list_for_each(itr
, &vfs_table
) {
358 struct vf_id_map
*ptr
=
359 list_entry(itr
, struct vf_id_map
, list
);
360 if (ptr
->bdf
!= ~0 && !ptr
->attached
)
363 mutex_unlock(&table_lock
);
367 void adf_devmgr_get_num_dev(u32
*num
)
369 *num
= num_devices
- adf_get_num_dettached_vfs();
373 * adf_dev_in_use() - Check whether accel_dev is currently in use
374 * @accel_dev: Pointer to acceleration device.
376 * To be used by QAT device specific drivers.
378 * Return: 1 when device is in use, 0 otherwise.
380 int adf_dev_in_use(struct adf_accel_dev
*accel_dev
)
382 return atomic_read(&accel_dev
->ref_count
) != 0;
384 EXPORT_SYMBOL_GPL(adf_dev_in_use
);
387 * adf_dev_get() - Increment accel_dev reference count
388 * @accel_dev: Pointer to acceleration device.
390 * Increment the accel_dev refcount and if this is the first time
391 * incrementing it during this period the accel_dev is in use,
392 * increment the module refcount too.
393 * To be used by QAT device specific drivers.
395 * Return: 0 when successful, EFAULT when fail to bump module refcount
397 int adf_dev_get(struct adf_accel_dev
*accel_dev
)
399 if (atomic_add_return(1, &accel_dev
->ref_count
) == 1)
400 if (!try_module_get(accel_dev
->owner
))
404 EXPORT_SYMBOL_GPL(adf_dev_get
);
407 * adf_dev_put() - Decrement accel_dev reference count
408 * @accel_dev: Pointer to acceleration device.
410 * Decrement the accel_dev refcount and if this is the last time
411 * decrementing it during this period the accel_dev is in use,
412 * decrement the module refcount too.
413 * To be used by QAT device specific drivers.
417 void adf_dev_put(struct adf_accel_dev
*accel_dev
)
419 if (atomic_sub_return(1, &accel_dev
->ref_count
) == 0)
420 module_put(accel_dev
->owner
);
422 EXPORT_SYMBOL_GPL(adf_dev_put
);
425 * adf_devmgr_in_reset() - Check whether device is in reset
426 * @accel_dev: Pointer to acceleration device.
428 * To be used by QAT device specific drivers.
430 * Return: 1 when the device is being reset, 0 otherwise.
432 int adf_devmgr_in_reset(struct adf_accel_dev
*accel_dev
)
434 return test_bit(ADF_STATUS_RESTARTING
, &accel_dev
->status
);
436 EXPORT_SYMBOL_GPL(adf_devmgr_in_reset
);
439 * adf_dev_started() - Check whether device has started
440 * @accel_dev: Pointer to acceleration device.
442 * To be used by QAT device specific drivers.
444 * Return: 1 when the device has started, 0 otherwise
446 int adf_dev_started(struct adf_accel_dev
*accel_dev
)
448 return test_bit(ADF_STATUS_STARTED
, &accel_dev
->status
);
450 EXPORT_SYMBOL_GPL(adf_dev_started
);