1 // SPDX-License-Identifier: GPL-2.0
3 * Driver for FPGA Device Feature List (DFL) Support
5 * Copyright (C) 2017-2018 Intel Corporation, Inc.
8 * Kang Luwei <luwei.kang@intel.com>
9 * Zhang Yi <yi.z.zhang@intel.com>
10 * Wu Hao <hao.wu@intel.com>
11 * Xiao Guangrong <guangrong.xiao@linux.intel.com>
13 #include <linux/fpga-dfl.h>
14 #include <linux/module.h>
15 #include <linux/uaccess.h>
19 static DEFINE_MUTEX(dfl_id_mutex
);
22 * when adding a new feature dev support in DFL framework, it's required to
23 * add a new item in enum dfl_id_type and provide related information in below
24 * dfl_devs table which is indexed by dfl_id_type, e.g. name string used for
25 * platform device creation (define name strings in dfl.h, as they could be
26 * reused by platform device drivers).
28 * if the new feature dev needs chardev support, then it's required to add
29 * a new item in dfl_chardevs table and configure dfl_devs[i].devt_type as
30 * index to dfl_chardevs table. If no chardev support just set devt_type
31 * as one invalid index (DFL_FPGA_DEVT_MAX).
33 enum dfl_fpga_devt_type
{
39 static struct lock_class_key dfl_pdata_keys
[DFL_ID_MAX
];
41 static const char *dfl_pdata_key_strings
[DFL_ID_MAX
] = {
47 * dfl_dev_info - dfl feature device information.
48 * @name: name string of the feature platform device.
49 * @dfh_id: id value in Device Feature Header (DFH) register by DFL spec.
50 * @id: idr id of the feature dev.
51 * @devt_type: index to dfl_chrdevs[].
57 enum dfl_fpga_devt_type devt_type
;
60 /* it is indexed by dfl_id_type */
61 static struct dfl_dev_info dfl_devs
[] = {
62 {.name
= DFL_FPGA_FEATURE_DEV_FME
, .dfh_id
= DFH_ID_FIU_FME
,
63 .devt_type
= DFL_FPGA_DEVT_FME
},
64 {.name
= DFL_FPGA_FEATURE_DEV_PORT
, .dfh_id
= DFH_ID_FIU_PORT
,
65 .devt_type
= DFL_FPGA_DEVT_PORT
},
69 * dfl_chardev_info - chardev information of dfl feature device
70 * @name: nmae string of the char device.
71 * @devt: devt of the char device.
73 struct dfl_chardev_info
{
78 /* indexed by enum dfl_fpga_devt_type */
79 static struct dfl_chardev_info dfl_chrdevs
[] = {
80 {.name
= DFL_FPGA_FEATURE_DEV_FME
},
81 {.name
= DFL_FPGA_FEATURE_DEV_PORT
},
84 static void dfl_ids_init(void)
88 for (i
= 0; i
< ARRAY_SIZE(dfl_devs
); i
++)
89 idr_init(&dfl_devs
[i
].id
);
92 static void dfl_ids_destroy(void)
96 for (i
= 0; i
< ARRAY_SIZE(dfl_devs
); i
++)
97 idr_destroy(&dfl_devs
[i
].id
);
100 static int dfl_id_alloc(enum dfl_id_type type
, struct device
*dev
)
104 WARN_ON(type
>= DFL_ID_MAX
);
105 mutex_lock(&dfl_id_mutex
);
106 id
= idr_alloc(&dfl_devs
[type
].id
, dev
, 0, 0, GFP_KERNEL
);
107 mutex_unlock(&dfl_id_mutex
);
112 static void dfl_id_free(enum dfl_id_type type
, int id
)
114 WARN_ON(type
>= DFL_ID_MAX
);
115 mutex_lock(&dfl_id_mutex
);
116 idr_remove(&dfl_devs
[type
].id
, id
);
117 mutex_unlock(&dfl_id_mutex
);
120 static enum dfl_id_type
feature_dev_id_type(struct platform_device
*pdev
)
124 for (i
= 0; i
< ARRAY_SIZE(dfl_devs
); i
++)
125 if (!strcmp(dfl_devs
[i
].name
, pdev
->name
))
131 static enum dfl_id_type
dfh_id_to_type(u16 id
)
135 for (i
= 0; i
< ARRAY_SIZE(dfl_devs
); i
++)
136 if (dfl_devs
[i
].dfh_id
== id
)
143 * introduce a global port_ops list, it allows port drivers to register ops
144 * in such list, then other feature devices (e.g. FME), could use the port
145 * functions even related port platform device is hidden. Below is one example,
146 * in virtualization case of PCIe-based FPGA DFL device, when SRIOV is
147 * enabled, port (and it's AFU) is turned into VF and port platform device
148 * is hidden from system but it's still required to access port to finish FPGA
149 * reconfiguration function in FME.
152 static DEFINE_MUTEX(dfl_port_ops_mutex
);
153 static LIST_HEAD(dfl_port_ops_list
);
156 * dfl_fpga_port_ops_get - get matched port ops from the global list
157 * @pdev: platform device to match with associated port ops.
158 * Return: matched port ops on success, NULL otherwise.
160 * Please note that must dfl_fpga_port_ops_put after use the port_ops.
162 struct dfl_fpga_port_ops
*dfl_fpga_port_ops_get(struct platform_device
*pdev
)
164 struct dfl_fpga_port_ops
*ops
= NULL
;
166 mutex_lock(&dfl_port_ops_mutex
);
167 if (list_empty(&dfl_port_ops_list
))
170 list_for_each_entry(ops
, &dfl_port_ops_list
, node
) {
171 /* match port_ops using the name of platform device */
172 if (!strcmp(pdev
->name
, ops
->name
)) {
173 if (!try_module_get(ops
->owner
))
181 mutex_unlock(&dfl_port_ops_mutex
);
184 EXPORT_SYMBOL_GPL(dfl_fpga_port_ops_get
);
187 * dfl_fpga_port_ops_put - put port ops
190 void dfl_fpga_port_ops_put(struct dfl_fpga_port_ops
*ops
)
192 if (ops
&& ops
->owner
)
193 module_put(ops
->owner
);
195 EXPORT_SYMBOL_GPL(dfl_fpga_port_ops_put
);
198 * dfl_fpga_port_ops_add - add port_ops to global list
199 * @ops: port ops to add.
201 void dfl_fpga_port_ops_add(struct dfl_fpga_port_ops
*ops
)
203 mutex_lock(&dfl_port_ops_mutex
);
204 list_add_tail(&ops
->node
, &dfl_port_ops_list
);
205 mutex_unlock(&dfl_port_ops_mutex
);
207 EXPORT_SYMBOL_GPL(dfl_fpga_port_ops_add
);
210 * dfl_fpga_port_ops_del - remove port_ops from global list
211 * @ops: port ops to del.
213 void dfl_fpga_port_ops_del(struct dfl_fpga_port_ops
*ops
)
215 mutex_lock(&dfl_port_ops_mutex
);
216 list_del(&ops
->node
);
217 mutex_unlock(&dfl_port_ops_mutex
);
219 EXPORT_SYMBOL_GPL(dfl_fpga_port_ops_del
);
222 * dfl_fpga_check_port_id - check the port id
223 * @pdev: port platform device.
224 * @pport_id: port id to compare.
226 * Return: 1 if port device matches with given port id, otherwise 0.
228 int dfl_fpga_check_port_id(struct platform_device
*pdev
, void *pport_id
)
230 struct dfl_feature_platform_data
*pdata
= dev_get_platdata(&pdev
->dev
);
231 struct dfl_fpga_port_ops
*port_ops
;
233 if (pdata
->id
!= FEATURE_DEV_ID_UNUSED
)
234 return pdata
->id
== *(int *)pport_id
;
236 port_ops
= dfl_fpga_port_ops_get(pdev
);
237 if (!port_ops
|| !port_ops
->get_id
)
240 pdata
->id
= port_ops
->get_id(pdev
);
241 dfl_fpga_port_ops_put(port_ops
);
243 return pdata
->id
== *(int *)pport_id
;
245 EXPORT_SYMBOL_GPL(dfl_fpga_check_port_id
);
247 static DEFINE_IDA(dfl_device_ida
);
249 static const struct dfl_device_id
*
250 dfl_match_one_device(const struct dfl_device_id
*id
, struct dfl_device
*ddev
)
252 if (id
->type
== ddev
->type
&& id
->feature_id
== ddev
->feature_id
)
258 static int dfl_bus_match(struct device
*dev
, struct device_driver
*drv
)
260 struct dfl_device
*ddev
= to_dfl_dev(dev
);
261 struct dfl_driver
*ddrv
= to_dfl_drv(drv
);
262 const struct dfl_device_id
*id_entry
;
264 id_entry
= ddrv
->id_table
;
266 while (id_entry
->feature_id
) {
267 if (dfl_match_one_device(id_entry
, ddev
)) {
268 ddev
->id_entry
= id_entry
;
278 static int dfl_bus_probe(struct device
*dev
)
280 struct dfl_driver
*ddrv
= to_dfl_drv(dev
->driver
);
281 struct dfl_device
*ddev
= to_dfl_dev(dev
);
283 return ddrv
->probe(ddev
);
286 static int dfl_bus_remove(struct device
*dev
)
288 struct dfl_driver
*ddrv
= to_dfl_drv(dev
->driver
);
289 struct dfl_device
*ddev
= to_dfl_dev(dev
);
297 static int dfl_bus_uevent(struct device
*dev
, struct kobj_uevent_env
*env
)
299 struct dfl_device
*ddev
= to_dfl_dev(dev
);
301 /* The type has 4 valid bits and feature_id has 12 valid bits */
302 return add_uevent_var(env
, "MODALIAS=dfl:t%01Xf%03X",
303 ddev
->type
, ddev
->feature_id
);
307 type_show(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
309 struct dfl_device
*ddev
= to_dfl_dev(dev
);
311 return sprintf(buf
, "0x%x\n", ddev
->type
);
313 static DEVICE_ATTR_RO(type
);
316 feature_id_show(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
318 struct dfl_device
*ddev
= to_dfl_dev(dev
);
320 return sprintf(buf
, "0x%x\n", ddev
->feature_id
);
322 static DEVICE_ATTR_RO(feature_id
);
324 static struct attribute
*dfl_dev_attrs
[] = {
326 &dev_attr_feature_id
.attr
,
329 ATTRIBUTE_GROUPS(dfl_dev
);
331 static struct bus_type dfl_bus_type
= {
333 .match
= dfl_bus_match
,
334 .probe
= dfl_bus_probe
,
335 .remove
= dfl_bus_remove
,
336 .uevent
= dfl_bus_uevent
,
337 .dev_groups
= dfl_dev_groups
,
340 static void release_dfl_dev(struct device
*dev
)
342 struct dfl_device
*ddev
= to_dfl_dev(dev
);
344 if (ddev
->mmio_res
.parent
)
345 release_resource(&ddev
->mmio_res
);
347 ida_simple_remove(&dfl_device_ida
, ddev
->id
);
352 static struct dfl_device
*
353 dfl_dev_add(struct dfl_feature_platform_data
*pdata
,
354 struct dfl_feature
*feature
)
356 struct platform_device
*pdev
= pdata
->dev
;
357 struct resource
*parent_res
;
358 struct dfl_device
*ddev
;
361 ddev
= kzalloc(sizeof(*ddev
), GFP_KERNEL
);
363 return ERR_PTR(-ENOMEM
);
365 id
= ida_simple_get(&dfl_device_ida
, 0, 0, GFP_KERNEL
);
367 dev_err(&pdev
->dev
, "unable to get id\n");
372 /* freeing resources by put_device() after device_initialize() */
373 device_initialize(&ddev
->dev
);
374 ddev
->dev
.parent
= &pdev
->dev
;
375 ddev
->dev
.bus
= &dfl_bus_type
;
376 ddev
->dev
.release
= release_dfl_dev
;
378 ret
= dev_set_name(&ddev
->dev
, "dfl_dev.%d", id
);
382 ddev
->type
= feature_dev_id_type(pdev
);
383 ddev
->feature_id
= feature
->id
;
384 ddev
->cdev
= pdata
->dfl_cdev
;
386 /* add mmio resource */
387 parent_res
= &pdev
->resource
[feature
->resource_index
];
388 ddev
->mmio_res
.flags
= IORESOURCE_MEM
;
389 ddev
->mmio_res
.start
= parent_res
->start
;
390 ddev
->mmio_res
.end
= parent_res
->end
;
391 ddev
->mmio_res
.name
= dev_name(&ddev
->dev
);
392 ret
= insert_resource(parent_res
, &ddev
->mmio_res
);
394 dev_err(&pdev
->dev
, "%s failed to claim resource: %pR\n",
395 dev_name(&ddev
->dev
), &ddev
->mmio_res
);
399 /* then add irq resource */
400 if (feature
->nr_irqs
) {
401 ddev
->irqs
= kcalloc(feature
->nr_irqs
,
402 sizeof(*ddev
->irqs
), GFP_KERNEL
);
408 for (i
= 0; i
< feature
->nr_irqs
; i
++)
409 ddev
->irqs
[i
] = feature
->irq_ctx
[i
].irq
;
411 ddev
->num_irqs
= feature
->nr_irqs
;
414 ret
= device_add(&ddev
->dev
);
418 dev_dbg(&pdev
->dev
, "add dfl_dev: %s\n", dev_name(&ddev
->dev
));
422 /* calls release_dfl_dev() which does the clean up */
423 put_device(&ddev
->dev
);
427 static void dfl_devs_remove(struct dfl_feature_platform_data
*pdata
)
429 struct dfl_feature
*feature
;
431 dfl_fpga_dev_for_each_feature(pdata
, feature
) {
433 device_unregister(&feature
->ddev
->dev
);
434 feature
->ddev
= NULL
;
439 static int dfl_devs_add(struct dfl_feature_platform_data
*pdata
)
441 struct dfl_feature
*feature
;
442 struct dfl_device
*ddev
;
445 dfl_fpga_dev_for_each_feature(pdata
, feature
) {
454 ddev
= dfl_dev_add(pdata
, feature
);
460 feature
->ddev
= ddev
;
466 dfl_devs_remove(pdata
);
470 int __dfl_driver_register(struct dfl_driver
*dfl_drv
, struct module
*owner
)
472 if (!dfl_drv
|| !dfl_drv
->probe
|| !dfl_drv
->id_table
)
475 dfl_drv
->drv
.owner
= owner
;
476 dfl_drv
->drv
.bus
= &dfl_bus_type
;
478 return driver_register(&dfl_drv
->drv
);
480 EXPORT_SYMBOL(__dfl_driver_register
);
482 void dfl_driver_unregister(struct dfl_driver
*dfl_drv
)
484 driver_unregister(&dfl_drv
->drv
);
486 EXPORT_SYMBOL(dfl_driver_unregister
);
488 #define is_header_feature(feature) ((feature)->id == FEATURE_ID_FIU_HEADER)
491 * dfl_fpga_dev_feature_uinit - uinit for sub features of dfl feature device
492 * @pdev: feature device.
494 void dfl_fpga_dev_feature_uinit(struct platform_device
*pdev
)
496 struct dfl_feature_platform_data
*pdata
= dev_get_platdata(&pdev
->dev
);
497 struct dfl_feature
*feature
;
499 dfl_devs_remove(pdata
);
501 dfl_fpga_dev_for_each_feature(pdata
, feature
) {
503 if (feature
->ops
->uinit
)
504 feature
->ops
->uinit(pdev
, feature
);
509 EXPORT_SYMBOL_GPL(dfl_fpga_dev_feature_uinit
);
511 static int dfl_feature_instance_init(struct platform_device
*pdev
,
512 struct dfl_feature_platform_data
*pdata
,
513 struct dfl_feature
*feature
,
514 struct dfl_feature_driver
*drv
)
519 if (!is_header_feature(feature
)) {
520 base
= devm_platform_ioremap_resource(pdev
,
521 feature
->resource_index
);
524 "ioremap failed for feature 0x%x!\n",
526 return PTR_ERR(base
);
529 feature
->ioaddr
= base
;
532 if (drv
->ops
->init
) {
533 ret
= drv
->ops
->init(pdev
, feature
);
538 feature
->ops
= drv
->ops
;
543 static bool dfl_feature_drv_match(struct dfl_feature
*feature
,
544 struct dfl_feature_driver
*driver
)
546 const struct dfl_feature_id
*ids
= driver
->id_table
;
550 if (ids
->id
== feature
->id
)
559 * dfl_fpga_dev_feature_init - init for sub features of dfl feature device
560 * @pdev: feature device.
561 * @feature_drvs: drvs for sub features.
563 * This function will match sub features with given feature drvs list and
564 * use matched drv to init related sub feature.
566 * Return: 0 on success, negative error code otherwise.
568 int dfl_fpga_dev_feature_init(struct platform_device
*pdev
,
569 struct dfl_feature_driver
*feature_drvs
)
571 struct dfl_feature_platform_data
*pdata
= dev_get_platdata(&pdev
->dev
);
572 struct dfl_feature_driver
*drv
= feature_drvs
;
573 struct dfl_feature
*feature
;
577 dfl_fpga_dev_for_each_feature(pdata
, feature
) {
578 if (dfl_feature_drv_match(feature
, drv
)) {
579 ret
= dfl_feature_instance_init(pdev
, pdata
,
588 ret
= dfl_devs_add(pdata
);
594 dfl_fpga_dev_feature_uinit(pdev
);
597 EXPORT_SYMBOL_GPL(dfl_fpga_dev_feature_init
);
599 static void dfl_chardev_uinit(void)
603 for (i
= 0; i
< DFL_FPGA_DEVT_MAX
; i
++)
604 if (MAJOR(dfl_chrdevs
[i
].devt
)) {
605 unregister_chrdev_region(dfl_chrdevs
[i
].devt
,
607 dfl_chrdevs
[i
].devt
= MKDEV(0, 0);
611 static int dfl_chardev_init(void)
615 for (i
= 0; i
< DFL_FPGA_DEVT_MAX
; i
++) {
616 ret
= alloc_chrdev_region(&dfl_chrdevs
[i
].devt
, 0,
617 MINORMASK
+ 1, dfl_chrdevs
[i
].name
);
629 static dev_t
dfl_get_devt(enum dfl_fpga_devt_type type
, int id
)
631 if (type
>= DFL_FPGA_DEVT_MAX
)
634 return MKDEV(MAJOR(dfl_chrdevs
[type
].devt
), id
);
638 * dfl_fpga_dev_ops_register - register cdev ops for feature dev
640 * @pdev: feature dev.
641 * @fops: file operations for feature dev's cdev.
642 * @owner: owning module/driver.
644 * Return: 0 on success, negative error code otherwise.
646 int dfl_fpga_dev_ops_register(struct platform_device
*pdev
,
647 const struct file_operations
*fops
,
648 struct module
*owner
)
650 struct dfl_feature_platform_data
*pdata
= dev_get_platdata(&pdev
->dev
);
652 cdev_init(&pdata
->cdev
, fops
);
653 pdata
->cdev
.owner
= owner
;
656 * set parent to the feature device so that its refcount is
657 * decreased after the last refcount of cdev is gone, that
658 * makes sure the feature device is valid during device
661 pdata
->cdev
.kobj
.parent
= &pdev
->dev
.kobj
;
663 return cdev_add(&pdata
->cdev
, pdev
->dev
.devt
, 1);
665 EXPORT_SYMBOL_GPL(dfl_fpga_dev_ops_register
);
668 * dfl_fpga_dev_ops_unregister - unregister cdev ops for feature dev
669 * @pdev: feature dev.
671 void dfl_fpga_dev_ops_unregister(struct platform_device
*pdev
)
673 struct dfl_feature_platform_data
*pdata
= dev_get_platdata(&pdev
->dev
);
675 cdev_del(&pdata
->cdev
);
677 EXPORT_SYMBOL_GPL(dfl_fpga_dev_ops_unregister
);
680 * struct build_feature_devs_info - info collected during feature dev build.
682 * @dev: device to enumerate.
683 * @cdev: the container device for all feature devices.
684 * @nr_irqs: number of irqs for all feature devices.
685 * @irq_table: Linux IRQ numbers for all irqs, indexed by local irq index of
687 * @feature_dev: current feature device.
688 * @ioaddr: header register region address of current FIU in enumeration.
689 * @start: register resource start of current FIU.
690 * @len: max register resource length of current FIU.
691 * @sub_features: a sub features linked list for feature device in enumeration.
692 * @feature_num: number of sub features for feature device in enumeration.
694 struct build_feature_devs_info
{
696 struct dfl_fpga_cdev
*cdev
;
697 unsigned int nr_irqs
;
700 struct platform_device
*feature_dev
;
701 void __iomem
*ioaddr
;
702 resource_size_t start
;
704 struct list_head sub_features
;
709 * struct dfl_feature_info - sub feature info collected during feature dev build
711 * @fid: id of this sub feature.
712 * @mmio_res: mmio resource of this sub feature.
713 * @ioaddr: mapped base address of mmio resource.
714 * @node: node in sub_features linked list.
715 * @irq_base: start of irq index in this sub feature.
716 * @nr_irqs: number of irqs of this sub feature.
718 struct dfl_feature_info
{
720 struct resource mmio_res
;
721 void __iomem
*ioaddr
;
722 struct list_head node
;
723 unsigned int irq_base
;
724 unsigned int nr_irqs
;
727 static void dfl_fpga_cdev_add_port_dev(struct dfl_fpga_cdev
*cdev
,
728 struct platform_device
*port
)
730 struct dfl_feature_platform_data
*pdata
= dev_get_platdata(&port
->dev
);
732 mutex_lock(&cdev
->lock
);
733 list_add(&pdata
->node
, &cdev
->port_dev_list
);
734 get_device(&pdata
->dev
->dev
);
735 mutex_unlock(&cdev
->lock
);
739 * register current feature device, it is called when we need to switch to
740 * another feature parsing or we have parsed all features on given device
743 static int build_info_commit_dev(struct build_feature_devs_info
*binfo
)
745 struct platform_device
*fdev
= binfo
->feature_dev
;
746 struct dfl_feature_platform_data
*pdata
;
747 struct dfl_feature_info
*finfo
, *p
;
748 enum dfl_id_type type
;
749 int ret
, index
= 0, res_idx
= 0;
751 type
= feature_dev_id_type(fdev
);
752 if (WARN_ON_ONCE(type
>= DFL_ID_MAX
))
756 * we do not need to care for the memory which is associated with
757 * the platform device. After calling platform_device_unregister(),
758 * it will be automatically freed by device's release() callback,
759 * platform_device_release().
761 pdata
= kzalloc(struct_size(pdata
, features
, binfo
->feature_num
), GFP_KERNEL
);
766 pdata
->num
= binfo
->feature_num
;
767 pdata
->dfl_cdev
= binfo
->cdev
;
768 pdata
->id
= FEATURE_DEV_ID_UNUSED
;
769 mutex_init(&pdata
->lock
);
770 lockdep_set_class_and_name(&pdata
->lock
, &dfl_pdata_keys
[type
],
771 dfl_pdata_key_strings
[type
]);
774 * the count should be initialized to 0 to make sure
775 *__fpga_port_enable() following __fpga_port_disable()
776 * works properly for port device.
777 * and it should always be 0 for fme device.
779 WARN_ON(pdata
->disable_count
);
781 fdev
->dev
.platform_data
= pdata
;
783 /* each sub feature has one MMIO resource */
784 fdev
->num_resources
= binfo
->feature_num
;
785 fdev
->resource
= kcalloc(binfo
->feature_num
, sizeof(*fdev
->resource
),
790 /* fill features and resource information for feature dev */
791 list_for_each_entry_safe(finfo
, p
, &binfo
->sub_features
, node
) {
792 struct dfl_feature
*feature
= &pdata
->features
[index
++];
793 struct dfl_feature_irq_ctx
*ctx
;
796 /* save resource information for each feature */
798 feature
->id
= finfo
->fid
;
801 * the FIU header feature has some fundamental functions (sriov
802 * set, port enable/disable) needed for the dfl bus device and
803 * other sub features. So its mmio resource should be mapped by
804 * DFL bus device. And we should not assign it to feature
805 * devices (dfl-fme/afu) again.
807 if (is_header_feature(feature
)) {
808 feature
->resource_index
= -1;
810 devm_ioremap_resource(binfo
->dev
,
812 if (IS_ERR(feature
->ioaddr
))
813 return PTR_ERR(feature
->ioaddr
);
815 feature
->resource_index
= res_idx
;
816 fdev
->resource
[res_idx
++] = finfo
->mmio_res
;
819 if (finfo
->nr_irqs
) {
820 ctx
= devm_kcalloc(binfo
->dev
, finfo
->nr_irqs
,
821 sizeof(*ctx
), GFP_KERNEL
);
825 for (i
= 0; i
< finfo
->nr_irqs
; i
++)
827 binfo
->irq_table
[finfo
->irq_base
+ i
];
829 feature
->irq_ctx
= ctx
;
830 feature
->nr_irqs
= finfo
->nr_irqs
;
833 list_del(&finfo
->node
);
837 ret
= platform_device_add(binfo
->feature_dev
);
840 dfl_fpga_cdev_add_port_dev(binfo
->cdev
,
843 binfo
->cdev
->fme_dev
=
844 get_device(&binfo
->feature_dev
->dev
);
846 * reset it to avoid build_info_free() freeing their resource.
848 * The resource of successfully registered feature devices
849 * will be freed by platform_device_unregister(). See the
850 * comments in build_info_create_dev().
852 binfo
->feature_dev
= NULL
;
859 build_info_create_dev(struct build_feature_devs_info
*binfo
,
860 enum dfl_id_type type
)
862 struct platform_device
*fdev
;
864 if (type
>= DFL_ID_MAX
)
868 * we use -ENODEV as the initialization indicator which indicates
869 * whether the id need to be reclaimed
871 fdev
= platform_device_alloc(dfl_devs
[type
].name
, -ENODEV
);
875 binfo
->feature_dev
= fdev
;
876 binfo
->feature_num
= 0;
878 INIT_LIST_HEAD(&binfo
->sub_features
);
880 fdev
->id
= dfl_id_alloc(type
, &fdev
->dev
);
884 fdev
->dev
.parent
= &binfo
->cdev
->region
->dev
;
885 fdev
->dev
.devt
= dfl_get_devt(dfl_devs
[type
].devt_type
, fdev
->id
);
890 static void build_info_free(struct build_feature_devs_info
*binfo
)
892 struct dfl_feature_info
*finfo
, *p
;
895 * it is a valid id, free it. See comments in
896 * build_info_create_dev()
898 if (binfo
->feature_dev
&& binfo
->feature_dev
->id
>= 0) {
899 dfl_id_free(feature_dev_id_type(binfo
->feature_dev
),
900 binfo
->feature_dev
->id
);
902 list_for_each_entry_safe(finfo
, p
, &binfo
->sub_features
, node
) {
903 list_del(&finfo
->node
);
908 platform_device_put(binfo
->feature_dev
);
910 devm_kfree(binfo
->dev
, binfo
);
913 static inline u32
feature_size(void __iomem
*start
)
915 u64 v
= readq(start
+ DFH
);
916 u32 ofst
= FIELD_GET(DFH_NEXT_HDR_OFST
, v
);
917 /* workaround for private features with invalid size, use 4K instead */
918 return ofst
? ofst
: 4096;
921 static u16
feature_id(void __iomem
*start
)
923 u64 v
= readq(start
+ DFH
);
924 u16 id
= FIELD_GET(DFH_ID
, v
);
925 u8 type
= FIELD_GET(DFH_TYPE
, v
);
927 if (type
== DFH_TYPE_FIU
)
928 return FEATURE_ID_FIU_HEADER
;
929 else if (type
== DFH_TYPE_PRIVATE
)
931 else if (type
== DFH_TYPE_AFU
)
932 return FEATURE_ID_AFU
;
938 static int parse_feature_irqs(struct build_feature_devs_info
*binfo
,
939 resource_size_t ofst
, u16 fid
,
940 unsigned int *irq_base
, unsigned int *nr_irqs
)
942 void __iomem
*base
= binfo
->ioaddr
+ ofst
;
943 unsigned int i
, ibase
, inr
= 0;
948 * Ideally DFL framework should only read info from DFL header, but
949 * current version DFL only provides mmio resources information for
950 * each feature in DFL Header, no field for interrupt resources.
951 * Interrupt resource information is provided by specific mmio
952 * registers of each private feature which supports interrupt. So in
953 * order to parse and assign irq resources, DFL framework has to look
954 * into specific capability registers of these private features.
956 * Once future DFL version supports generic interrupt resource
957 * information in common DFL headers, the generic interrupt parsing
958 * code will be added. But in order to be compatible to old version
959 * DFL, the driver may still fall back to these quirks.
962 case PORT_FEATURE_ID_UINT
:
963 v
= readq(base
+ PORT_UINT_CAP
);
964 ibase
= FIELD_GET(PORT_UINT_CAP_FST_VECT
, v
);
965 inr
= FIELD_GET(PORT_UINT_CAP_INT_NUM
, v
);
967 case PORT_FEATURE_ID_ERROR
:
968 v
= readq(base
+ PORT_ERROR_CAP
);
969 ibase
= FIELD_GET(PORT_ERROR_CAP_INT_VECT
, v
);
970 inr
= FIELD_GET(PORT_ERROR_CAP_SUPP_INT
, v
);
972 case FME_FEATURE_ID_GLOBAL_ERR
:
973 v
= readq(base
+ FME_ERROR_CAP
);
974 ibase
= FIELD_GET(FME_ERROR_CAP_INT_VECT
, v
);
975 inr
= FIELD_GET(FME_ERROR_CAP_SUPP_INT
, v
);
985 dev_dbg(binfo
->dev
, "feature: 0x%x, irq_base: %u, nr_irqs: %u\n",
988 if (ibase
+ inr
> binfo
->nr_irqs
) {
990 "Invalid interrupt number in feature 0x%x\n", fid
);
994 for (i
= 0; i
< inr
; i
++) {
995 virq
= binfo
->irq_table
[ibase
+ i
];
996 if (virq
< 0 || virq
> NR_IRQS
) {
998 "Invalid irq table entry for feature 0x%x\n",
1011 * when create sub feature instances, for private features, it doesn't need
1012 * to provide resource size and feature id as they could be read from DFH
1013 * register. For afu sub feature, its register region only contains user
1014 * defined registers, so never trust any information from it, just use the
1015 * resource size information provided by its parent FIU.
1018 create_feature_instance(struct build_feature_devs_info
*binfo
,
1019 resource_size_t ofst
, resource_size_t size
, u16 fid
)
1021 unsigned int irq_base
, nr_irqs
;
1022 struct dfl_feature_info
*finfo
;
1025 /* read feature size and id if inputs are invalid */
1026 size
= size
? size
: feature_size(binfo
->ioaddr
+ ofst
);
1027 fid
= fid
? fid
: feature_id(binfo
->ioaddr
+ ofst
);
1029 if (binfo
->len
- ofst
< size
)
1032 ret
= parse_feature_irqs(binfo
, ofst
, fid
, &irq_base
, &nr_irqs
);
1036 finfo
= kzalloc(sizeof(*finfo
), GFP_KERNEL
);
1041 finfo
->mmio_res
.start
= binfo
->start
+ ofst
;
1042 finfo
->mmio_res
.end
= finfo
->mmio_res
.start
+ size
- 1;
1043 finfo
->mmio_res
.flags
= IORESOURCE_MEM
;
1044 finfo
->irq_base
= irq_base
;
1045 finfo
->nr_irqs
= nr_irqs
;
1047 list_add_tail(&finfo
->node
, &binfo
->sub_features
);
1048 binfo
->feature_num
++;
1053 static int parse_feature_port_afu(struct build_feature_devs_info
*binfo
,
1054 resource_size_t ofst
)
1056 u64 v
= readq(binfo
->ioaddr
+ PORT_HDR_CAP
);
1057 u32 size
= FIELD_GET(PORT_CAP_MMIO_SIZE
, v
) << 10;
1061 return create_feature_instance(binfo
, ofst
, size
, FEATURE_ID_AFU
);
1064 #define is_feature_dev_detected(binfo) (!!(binfo)->feature_dev)
1066 static int parse_feature_afu(struct build_feature_devs_info
*binfo
,
1067 resource_size_t ofst
)
1069 if (!is_feature_dev_detected(binfo
)) {
1070 dev_err(binfo
->dev
, "this AFU does not belong to any FIU.\n");
1074 switch (feature_dev_id_type(binfo
->feature_dev
)) {
1076 return parse_feature_port_afu(binfo
, ofst
);
1078 dev_info(binfo
->dev
, "AFU belonging to FIU %s is not supported yet.\n",
1079 binfo
->feature_dev
->name
);
1085 static int build_info_prepare(struct build_feature_devs_info
*binfo
,
1086 resource_size_t start
, resource_size_t len
)
1088 struct device
*dev
= binfo
->dev
;
1089 void __iomem
*ioaddr
;
1091 if (!devm_request_mem_region(dev
, start
, len
, dev_name(dev
))) {
1092 dev_err(dev
, "request region fail, start:%pa, len:%pa\n",
1097 ioaddr
= devm_ioremap(dev
, start
, len
);
1099 dev_err(dev
, "ioremap region fail, start:%pa, len:%pa\n",
1104 binfo
->start
= start
;
1106 binfo
->ioaddr
= ioaddr
;
1111 static void build_info_complete(struct build_feature_devs_info
*binfo
)
1113 devm_iounmap(binfo
->dev
, binfo
->ioaddr
);
1114 devm_release_mem_region(binfo
->dev
, binfo
->start
, binfo
->len
);
1117 static int parse_feature_fiu(struct build_feature_devs_info
*binfo
,
1118 resource_size_t ofst
)
1125 if (is_feature_dev_detected(binfo
)) {
1126 build_info_complete(binfo
);
1128 ret
= build_info_commit_dev(binfo
);
1132 ret
= build_info_prepare(binfo
, binfo
->start
+ ofst
,
1138 v
= readq(binfo
->ioaddr
+ DFH
);
1139 id
= FIELD_GET(DFH_ID
, v
);
1141 /* create platform device for dfl feature dev */
1142 ret
= build_info_create_dev(binfo
, dfh_id_to_type(id
));
1146 ret
= create_feature_instance(binfo
, 0, 0, 0);
1150 * find and parse FIU's child AFU via its NEXT_AFU register.
1151 * please note that only Port has valid NEXT_AFU pointer per spec.
1153 v
= readq(binfo
->ioaddr
+ NEXT_AFU
);
1155 offset
= FIELD_GET(NEXT_AFU_NEXT_DFH_OFST
, v
);
1157 return parse_feature_afu(binfo
, offset
);
1159 dev_dbg(binfo
->dev
, "No AFUs detected on FIU %d\n", id
);
1164 static int parse_feature_private(struct build_feature_devs_info
*binfo
,
1165 resource_size_t ofst
)
1167 if (!is_feature_dev_detected(binfo
)) {
1168 dev_err(binfo
->dev
, "the private feature 0x%x does not belong to any AFU.\n",
1169 feature_id(binfo
->ioaddr
+ ofst
));
1173 return create_feature_instance(binfo
, ofst
, 0, 0);
1177 * parse_feature - parse a feature on given device feature list
1179 * @binfo: build feature devices information.
1180 * @ofst: offset to current FIU header
1182 static int parse_feature(struct build_feature_devs_info
*binfo
,
1183 resource_size_t ofst
)
1188 v
= readq(binfo
->ioaddr
+ ofst
+ DFH
);
1189 type
= FIELD_GET(DFH_TYPE
, v
);
1193 return parse_feature_afu(binfo
, ofst
);
1194 case DFH_TYPE_PRIVATE
:
1195 return parse_feature_private(binfo
, ofst
);
1197 return parse_feature_fiu(binfo
, ofst
);
1199 dev_info(binfo
->dev
,
1200 "Feature Type %x is not supported.\n", type
);
1206 static int parse_feature_list(struct build_feature_devs_info
*binfo
,
1207 resource_size_t start
, resource_size_t len
)
1209 resource_size_t end
= start
+ len
;
1214 ret
= build_info_prepare(binfo
, start
, len
);
1218 /* walk through the device feature list via DFH's next DFH pointer. */
1219 for (; start
< end
; start
+= ofst
) {
1220 if (end
- start
< DFH_SIZE
) {
1221 dev_err(binfo
->dev
, "The region is too small to contain a feature.\n");
1225 ret
= parse_feature(binfo
, start
- binfo
->start
);
1229 v
= readq(binfo
->ioaddr
+ start
- binfo
->start
+ DFH
);
1230 ofst
= FIELD_GET(DFH_NEXT_HDR_OFST
, v
);
1232 /* stop parsing if EOL(End of List) is set or offset is 0 */
1233 if ((v
& DFH_EOL
) || !ofst
)
1237 /* commit current feature device when reach the end of list */
1238 build_info_complete(binfo
);
1240 if (is_feature_dev_detected(binfo
))
1241 ret
= build_info_commit_dev(binfo
);
1246 struct dfl_fpga_enum_info
*dfl_fpga_enum_info_alloc(struct device
*dev
)
1248 struct dfl_fpga_enum_info
*info
;
1252 info
= devm_kzalloc(dev
, sizeof(*info
), GFP_KERNEL
);
1259 INIT_LIST_HEAD(&info
->dfls
);
1263 EXPORT_SYMBOL_GPL(dfl_fpga_enum_info_alloc
);
1265 void dfl_fpga_enum_info_free(struct dfl_fpga_enum_info
*info
)
1267 struct dfl_fpga_enum_dfl
*tmp
, *dfl
;
1275 /* remove all device feature lists in the list. */
1276 list_for_each_entry_safe(dfl
, tmp
, &info
->dfls
, node
) {
1277 list_del(&dfl
->node
);
1278 devm_kfree(dev
, dfl
);
1281 /* remove irq table */
1282 if (info
->irq_table
)
1283 devm_kfree(dev
, info
->irq_table
);
1285 devm_kfree(dev
, info
);
1288 EXPORT_SYMBOL_GPL(dfl_fpga_enum_info_free
);
1291 * dfl_fpga_enum_info_add_dfl - add info of a device feature list to enum info
1293 * @info: ptr to dfl_fpga_enum_info
1294 * @start: mmio resource address of the device feature list.
1295 * @len: mmio resource length of the device feature list.
1297 * One FPGA device may have one or more Device Feature Lists (DFLs), use this
1298 * function to add information of each DFL to common data structure for next
1301 * Return: 0 on success, negative error code otherwise.
1303 int dfl_fpga_enum_info_add_dfl(struct dfl_fpga_enum_info
*info
,
1304 resource_size_t start
, resource_size_t len
)
1306 struct dfl_fpga_enum_dfl
*dfl
;
1308 dfl
= devm_kzalloc(info
->dev
, sizeof(*dfl
), GFP_KERNEL
);
1315 list_add_tail(&dfl
->node
, &info
->dfls
);
1319 EXPORT_SYMBOL_GPL(dfl_fpga_enum_info_add_dfl
);
1322 * dfl_fpga_enum_info_add_irq - add irq table to enum info
1324 * @info: ptr to dfl_fpga_enum_info
1325 * @nr_irqs: number of irqs of the DFL fpga device to be enumerated.
1326 * @irq_table: Linux IRQ numbers for all irqs, indexed by local irq index of
1329 * One FPGA device may have several interrupts. This function adds irq
1330 * information of the DFL fpga device to enum info for next step enumeration.
1331 * This function should be called before dfl_fpga_feature_devs_enumerate().
1332 * As we only support one irq domain for all DFLs in the same enum info, adding
1333 * irq table a second time for the same enum info will return error.
1335 * If we need to enumerate DFLs which belong to different irq domains, we
1336 * should fill more enum info and enumerate them one by one.
1338 * Return: 0 on success, negative error code otherwise.
1340 int dfl_fpga_enum_info_add_irq(struct dfl_fpga_enum_info
*info
,
1341 unsigned int nr_irqs
, int *irq_table
)
1343 if (!nr_irqs
|| !irq_table
)
1346 if (info
->irq_table
)
1349 info
->irq_table
= devm_kmemdup(info
->dev
, irq_table
,
1350 sizeof(int) * nr_irqs
, GFP_KERNEL
);
1351 if (!info
->irq_table
)
1354 info
->nr_irqs
= nr_irqs
;
1358 EXPORT_SYMBOL_GPL(dfl_fpga_enum_info_add_irq
);
1360 static int remove_feature_dev(struct device
*dev
, void *data
)
1362 struct platform_device
*pdev
= to_platform_device(dev
);
1363 enum dfl_id_type type
= feature_dev_id_type(pdev
);
1366 platform_device_unregister(pdev
);
1368 dfl_id_free(type
, id
);
1373 static void remove_feature_devs(struct dfl_fpga_cdev
*cdev
)
1375 device_for_each_child(&cdev
->region
->dev
, NULL
, remove_feature_dev
);
1379 * dfl_fpga_feature_devs_enumerate - enumerate feature devices
1380 * @info: information for enumeration.
1382 * This function creates a container device (base FPGA region), enumerates
1383 * feature devices based on the enumeration info and creates platform devices
1384 * under the container device.
1386 * Return: dfl_fpga_cdev struct on success, -errno on failure
1388 struct dfl_fpga_cdev
*
1389 dfl_fpga_feature_devs_enumerate(struct dfl_fpga_enum_info
*info
)
1391 struct build_feature_devs_info
*binfo
;
1392 struct dfl_fpga_enum_dfl
*dfl
;
1393 struct dfl_fpga_cdev
*cdev
;
1397 return ERR_PTR(-ENODEV
);
1399 cdev
= devm_kzalloc(info
->dev
, sizeof(*cdev
), GFP_KERNEL
);
1401 return ERR_PTR(-ENOMEM
);
1403 cdev
->region
= devm_fpga_region_create(info
->dev
, NULL
, NULL
);
1404 if (!cdev
->region
) {
1406 goto free_cdev_exit
;
1409 cdev
->parent
= info
->dev
;
1410 mutex_init(&cdev
->lock
);
1411 INIT_LIST_HEAD(&cdev
->port_dev_list
);
1413 ret
= fpga_region_register(cdev
->region
);
1415 goto free_cdev_exit
;
1417 /* create and init build info for enumeration */
1418 binfo
= devm_kzalloc(info
->dev
, sizeof(*binfo
), GFP_KERNEL
);
1421 goto unregister_region_exit
;
1424 binfo
->dev
= info
->dev
;
1427 binfo
->nr_irqs
= info
->nr_irqs
;
1429 binfo
->irq_table
= info
->irq_table
;
1432 * start enumeration for all feature devices based on Device Feature
1435 list_for_each_entry(dfl
, &info
->dfls
, node
) {
1436 ret
= parse_feature_list(binfo
, dfl
->start
, dfl
->len
);
1438 remove_feature_devs(cdev
);
1439 build_info_free(binfo
);
1440 goto unregister_region_exit
;
1444 build_info_free(binfo
);
1448 unregister_region_exit
:
1449 fpga_region_unregister(cdev
->region
);
1451 devm_kfree(info
->dev
, cdev
);
1452 return ERR_PTR(ret
);
1454 EXPORT_SYMBOL_GPL(dfl_fpga_feature_devs_enumerate
);
1457 * dfl_fpga_feature_devs_remove - remove all feature devices
1458 * @cdev: fpga container device.
1460 * Remove the container device and all feature devices under given container
1463 void dfl_fpga_feature_devs_remove(struct dfl_fpga_cdev
*cdev
)
1465 struct dfl_feature_platform_data
*pdata
, *ptmp
;
1467 mutex_lock(&cdev
->lock
);
1469 put_device(cdev
->fme_dev
);
1471 list_for_each_entry_safe(pdata
, ptmp
, &cdev
->port_dev_list
, node
) {
1472 struct platform_device
*port_dev
= pdata
->dev
;
1474 /* remove released ports */
1475 if (!device_is_registered(&port_dev
->dev
)) {
1476 dfl_id_free(feature_dev_id_type(port_dev
),
1478 platform_device_put(port_dev
);
1481 list_del(&pdata
->node
);
1482 put_device(&port_dev
->dev
);
1484 mutex_unlock(&cdev
->lock
);
1486 remove_feature_devs(cdev
);
1488 fpga_region_unregister(cdev
->region
);
1489 devm_kfree(cdev
->parent
, cdev
);
1491 EXPORT_SYMBOL_GPL(dfl_fpga_feature_devs_remove
);
1494 * __dfl_fpga_cdev_find_port - find a port under given container device
1496 * @cdev: container device
1497 * @data: data passed to match function
1498 * @match: match function used to find specific port from the port device list
1500 * Find a port device under container device. This function needs to be
1501 * invoked with lock held.
1503 * Return: pointer to port's platform device if successful, NULL otherwise.
1505 * NOTE: you will need to drop the device reference with put_device() after use.
1507 struct platform_device
*
1508 __dfl_fpga_cdev_find_port(struct dfl_fpga_cdev
*cdev
, void *data
,
1509 int (*match
)(struct platform_device
*, void *))
1511 struct dfl_feature_platform_data
*pdata
;
1512 struct platform_device
*port_dev
;
1514 list_for_each_entry(pdata
, &cdev
->port_dev_list
, node
) {
1515 port_dev
= pdata
->dev
;
1517 if (match(port_dev
, data
) && get_device(&port_dev
->dev
))
1523 EXPORT_SYMBOL_GPL(__dfl_fpga_cdev_find_port
);
1525 static int __init
dfl_fpga_init(void)
1529 ret
= bus_register(&dfl_bus_type
);
1535 ret
= dfl_chardev_init();
1538 bus_unregister(&dfl_bus_type
);
1545 * dfl_fpga_cdev_release_port - release a port platform device
1547 * @cdev: parent container device.
1548 * @port_id: id of the port platform device.
1550 * This function allows user to release a port platform device. This is a
1551 * mandatory step before turn a port from PF into VF for SRIOV support.
1553 * Return: 0 on success, negative error code otherwise.
1555 int dfl_fpga_cdev_release_port(struct dfl_fpga_cdev
*cdev
, int port_id
)
1557 struct dfl_feature_platform_data
*pdata
;
1558 struct platform_device
*port_pdev
;
1561 mutex_lock(&cdev
->lock
);
1562 port_pdev
= __dfl_fpga_cdev_find_port(cdev
, &port_id
,
1563 dfl_fpga_check_port_id
);
1567 if (!device_is_registered(&port_pdev
->dev
)) {
1572 pdata
= dev_get_platdata(&port_pdev
->dev
);
1574 mutex_lock(&pdata
->lock
);
1575 ret
= dfl_feature_dev_use_begin(pdata
, true);
1576 mutex_unlock(&pdata
->lock
);
1580 platform_device_del(port_pdev
);
1581 cdev
->released_port_num
++;
1583 put_device(&port_pdev
->dev
);
1585 mutex_unlock(&cdev
->lock
);
1588 EXPORT_SYMBOL_GPL(dfl_fpga_cdev_release_port
);
1591 * dfl_fpga_cdev_assign_port - assign a port platform device back
1593 * @cdev: parent container device.
1594 * @port_id: id of the port platform device.
1596 * This function allows user to assign a port platform device back. This is
1597 * a mandatory step after disable SRIOV support.
1599 * Return: 0 on success, negative error code otherwise.
1601 int dfl_fpga_cdev_assign_port(struct dfl_fpga_cdev
*cdev
, int port_id
)
1603 struct dfl_feature_platform_data
*pdata
;
1604 struct platform_device
*port_pdev
;
1607 mutex_lock(&cdev
->lock
);
1608 port_pdev
= __dfl_fpga_cdev_find_port(cdev
, &port_id
,
1609 dfl_fpga_check_port_id
);
1613 if (device_is_registered(&port_pdev
->dev
)) {
1618 ret
= platform_device_add(port_pdev
);
1622 pdata
= dev_get_platdata(&port_pdev
->dev
);
1624 mutex_lock(&pdata
->lock
);
1625 dfl_feature_dev_use_end(pdata
);
1626 mutex_unlock(&pdata
->lock
);
1628 cdev
->released_port_num
--;
1630 put_device(&port_pdev
->dev
);
1632 mutex_unlock(&cdev
->lock
);
1635 EXPORT_SYMBOL_GPL(dfl_fpga_cdev_assign_port
);
1637 static void config_port_access_mode(struct device
*fme_dev
, int port_id
,
1643 base
= dfl_get_feature_ioaddr_by_id(fme_dev
, FME_FEATURE_ID_HEADER
);
1645 v
= readq(base
+ FME_HDR_PORT_OFST(port_id
));
1647 v
&= ~FME_PORT_OFST_ACC_CTRL
;
1648 v
|= FIELD_PREP(FME_PORT_OFST_ACC_CTRL
,
1649 is_vf
? FME_PORT_OFST_ACC_VF
: FME_PORT_OFST_ACC_PF
);
1651 writeq(v
, base
+ FME_HDR_PORT_OFST(port_id
));
1654 #define config_port_vf_mode(dev, id) config_port_access_mode(dev, id, true)
1655 #define config_port_pf_mode(dev, id) config_port_access_mode(dev, id, false)
1658 * dfl_fpga_cdev_config_ports_pf - configure ports to PF access mode
1660 * @cdev: parent container device.
1662 * This function is needed in sriov configuration routine. It could be used to
1663 * configure the all released ports from VF access mode to PF.
1665 void dfl_fpga_cdev_config_ports_pf(struct dfl_fpga_cdev
*cdev
)
1667 struct dfl_feature_platform_data
*pdata
;
1669 mutex_lock(&cdev
->lock
);
1670 list_for_each_entry(pdata
, &cdev
->port_dev_list
, node
) {
1671 if (device_is_registered(&pdata
->dev
->dev
))
1674 config_port_pf_mode(cdev
->fme_dev
, pdata
->id
);
1676 mutex_unlock(&cdev
->lock
);
1678 EXPORT_SYMBOL_GPL(dfl_fpga_cdev_config_ports_pf
);
1681 * dfl_fpga_cdev_config_ports_vf - configure ports to VF access mode
1683 * @cdev: parent container device.
1684 * @num_vfs: VF device number.
1686 * This function is needed in sriov configuration routine. It could be used to
1687 * configure the released ports from PF access mode to VF.
1689 * Return: 0 on success, negative error code otherwise.
1691 int dfl_fpga_cdev_config_ports_vf(struct dfl_fpga_cdev
*cdev
, int num_vfs
)
1693 struct dfl_feature_platform_data
*pdata
;
1696 mutex_lock(&cdev
->lock
);
1698 * can't turn multiple ports into 1 VF device, only 1 port for 1 VF
1699 * device, so if released port number doesn't match VF device number,
1700 * then reject the request with -EINVAL error code.
1702 if (cdev
->released_port_num
!= num_vfs
) {
1707 list_for_each_entry(pdata
, &cdev
->port_dev_list
, node
) {
1708 if (device_is_registered(&pdata
->dev
->dev
))
1711 config_port_vf_mode(cdev
->fme_dev
, pdata
->id
);
1714 mutex_unlock(&cdev
->lock
);
1717 EXPORT_SYMBOL_GPL(dfl_fpga_cdev_config_ports_vf
);
1719 static irqreturn_t
dfl_irq_handler(int irq
, void *arg
)
1721 struct eventfd_ctx
*trigger
= arg
;
1723 eventfd_signal(trigger
, 1);
1727 static int do_set_irq_trigger(struct dfl_feature
*feature
, unsigned int idx
,
1730 struct platform_device
*pdev
= feature
->dev
;
1731 struct eventfd_ctx
*trigger
;
1734 irq
= feature
->irq_ctx
[idx
].irq
;
1736 if (feature
->irq_ctx
[idx
].trigger
) {
1737 free_irq(irq
, feature
->irq_ctx
[idx
].trigger
);
1738 kfree(feature
->irq_ctx
[idx
].name
);
1739 eventfd_ctx_put(feature
->irq_ctx
[idx
].trigger
);
1740 feature
->irq_ctx
[idx
].trigger
= NULL
;
1746 feature
->irq_ctx
[idx
].name
=
1747 kasprintf(GFP_KERNEL
, "fpga-irq[%u](%s-%x)", idx
,
1748 dev_name(&pdev
->dev
), feature
->id
);
1749 if (!feature
->irq_ctx
[idx
].name
)
1752 trigger
= eventfd_ctx_fdget(fd
);
1753 if (IS_ERR(trigger
)) {
1754 ret
= PTR_ERR(trigger
);
1758 ret
= request_irq(irq
, dfl_irq_handler
, 0,
1759 feature
->irq_ctx
[idx
].name
, trigger
);
1761 feature
->irq_ctx
[idx
].trigger
= trigger
;
1765 eventfd_ctx_put(trigger
);
1767 kfree(feature
->irq_ctx
[idx
].name
);
1773 * dfl_fpga_set_irq_triggers - set eventfd triggers for dfl feature interrupts
1775 * @feature: dfl sub feature.
1776 * @start: start of irq index in this dfl sub feature.
1777 * @count: number of irqs.
1778 * @fds: eventfds to bind with irqs. unbind related irq if fds[n] is negative.
1779 * unbind "count" specified number of irqs if fds ptr is NULL.
1781 * Bind given eventfds with irqs in this dfl sub feature. Unbind related irq if
1782 * fds[n] is negative. Unbind "count" specified number of irqs if fds ptr is
1785 * Return: 0 on success, negative error code otherwise.
1787 int dfl_fpga_set_irq_triggers(struct dfl_feature
*feature
, unsigned int start
,
1788 unsigned int count
, int32_t *fds
)
1794 if (unlikely(start
+ count
< start
))
1797 /* exceeds nr_irqs */
1798 if (start
+ count
> feature
->nr_irqs
)
1801 for (i
= 0; i
< count
; i
++) {
1802 int fd
= fds
? fds
[i
] : -1;
1804 ret
= do_set_irq_trigger(feature
, start
+ i
, fd
);
1807 do_set_irq_trigger(feature
, start
+ i
, -1);
1814 EXPORT_SYMBOL_GPL(dfl_fpga_set_irq_triggers
);
1817 * dfl_feature_ioctl_get_num_irqs - dfl feature _GET_IRQ_NUM ioctl interface.
1818 * @pdev: the feature device which has the sub feature
1819 * @feature: the dfl sub feature
1820 * @arg: ioctl argument
1822 * Return: 0 on success, negative error code otherwise.
1824 long dfl_feature_ioctl_get_num_irqs(struct platform_device
*pdev
,
1825 struct dfl_feature
*feature
,
1828 return put_user(feature
->nr_irqs
, (__u32 __user
*)arg
);
1830 EXPORT_SYMBOL_GPL(dfl_feature_ioctl_get_num_irqs
);
1833 * dfl_feature_ioctl_set_irq - dfl feature _SET_IRQ ioctl interface.
1834 * @pdev: the feature device which has the sub feature
1835 * @feature: the dfl sub feature
1836 * @arg: ioctl argument
1838 * Return: 0 on success, negative error code otherwise.
1840 long dfl_feature_ioctl_set_irq(struct platform_device
*pdev
,
1841 struct dfl_feature
*feature
,
1844 struct dfl_feature_platform_data
*pdata
= dev_get_platdata(&pdev
->dev
);
1845 struct dfl_fpga_irq_set hdr
;
1849 if (!feature
->nr_irqs
)
1852 if (copy_from_user(&hdr
, (void __user
*)arg
, sizeof(hdr
)))
1855 if (!hdr
.count
|| (hdr
.start
+ hdr
.count
> feature
->nr_irqs
) ||
1856 (hdr
.start
+ hdr
.count
< hdr
.start
))
1859 fds
= memdup_user((void __user
*)(arg
+ sizeof(hdr
)),
1860 hdr
.count
* sizeof(s32
));
1862 return PTR_ERR(fds
);
1864 mutex_lock(&pdata
->lock
);
1865 ret
= dfl_fpga_set_irq_triggers(feature
, hdr
.start
, hdr
.count
, fds
);
1866 mutex_unlock(&pdata
->lock
);
1871 EXPORT_SYMBOL_GPL(dfl_feature_ioctl_set_irq
);
1873 static void __exit
dfl_fpga_exit(void)
1875 dfl_chardev_uinit();
1877 bus_unregister(&dfl_bus_type
);
1880 module_init(dfl_fpga_init
);
1881 module_exit(dfl_fpga_exit
);
1883 MODULE_DESCRIPTION("FPGA Device Feature List (DFL) Support");
1884 MODULE_AUTHOR("Intel Corporation");
1885 MODULE_LICENSE("GPL v2");