1 // SPDX-License-Identifier: GPL-2.0
3 * Driver for FPGA Management Engine (FME)
5 * Copyright (C) 2017-2018 Intel Corporation, Inc.
8 * Kang Luwei <luwei.kang@intel.com>
9 * Xiao Guangrong <guangrong.xiao@linux.intel.com>
10 * Joseph Grecco <joe.grecco@intel.com>
11 * Enno Luebbers <enno.luebbers@intel.com>
12 * Tim Whisonant <tim.whisonant@intel.com>
13 * Ananda Ravuri <ananda.ravuri@intel.com>
14 * Henry Mitchel <henry.mitchel@intel.com>
17 #include <linux/hwmon.h>
18 #include <linux/hwmon-sysfs.h>
19 #include <linux/kernel.h>
20 #include <linux/module.h>
21 #include <linux/uaccess.h>
22 #include <linux/units.h>
23 #include <linux/fpga-dfl.h>
28 static ssize_t
ports_num_show(struct device
*dev
,
29 struct device_attribute
*attr
, char *buf
)
34 base
= dfl_get_feature_ioaddr_by_id(dev
, FME_FEATURE_ID_HEADER
);
36 v
= readq(base
+ FME_HDR_CAP
);
38 return scnprintf(buf
, PAGE_SIZE
, "%u\n",
39 (unsigned int)FIELD_GET(FME_CAP_NUM_PORTS
, v
));
41 static DEVICE_ATTR_RO(ports_num
);
44 * Bitstream (static FPGA region) identifier number. It contains the
45 * detailed version and other information of this static FPGA region.
47 static ssize_t
bitstream_id_show(struct device
*dev
,
48 struct device_attribute
*attr
, char *buf
)
53 base
= dfl_get_feature_ioaddr_by_id(dev
, FME_FEATURE_ID_HEADER
);
55 v
= readq(base
+ FME_HDR_BITSTREAM_ID
);
57 return scnprintf(buf
, PAGE_SIZE
, "0x%llx\n", (unsigned long long)v
);
59 static DEVICE_ATTR_RO(bitstream_id
);
62 * Bitstream (static FPGA region) meta data. It contains the synthesis
63 * date, seed and other information of this static FPGA region.
65 static ssize_t
bitstream_metadata_show(struct device
*dev
,
66 struct device_attribute
*attr
, char *buf
)
71 base
= dfl_get_feature_ioaddr_by_id(dev
, FME_FEATURE_ID_HEADER
);
73 v
= readq(base
+ FME_HDR_BITSTREAM_MD
);
75 return scnprintf(buf
, PAGE_SIZE
, "0x%llx\n", (unsigned long long)v
);
77 static DEVICE_ATTR_RO(bitstream_metadata
);
79 static ssize_t
cache_size_show(struct device
*dev
,
80 struct device_attribute
*attr
, char *buf
)
85 base
= dfl_get_feature_ioaddr_by_id(dev
, FME_FEATURE_ID_HEADER
);
87 v
= readq(base
+ FME_HDR_CAP
);
89 return sprintf(buf
, "%u\n",
90 (unsigned int)FIELD_GET(FME_CAP_CACHE_SIZE
, v
));
92 static DEVICE_ATTR_RO(cache_size
);
94 static ssize_t
fabric_version_show(struct device
*dev
,
95 struct device_attribute
*attr
, char *buf
)
100 base
= dfl_get_feature_ioaddr_by_id(dev
, FME_FEATURE_ID_HEADER
);
102 v
= readq(base
+ FME_HDR_CAP
);
104 return sprintf(buf
, "%u\n",
105 (unsigned int)FIELD_GET(FME_CAP_FABRIC_VERID
, v
));
107 static DEVICE_ATTR_RO(fabric_version
);
109 static ssize_t
socket_id_show(struct device
*dev
,
110 struct device_attribute
*attr
, char *buf
)
115 base
= dfl_get_feature_ioaddr_by_id(dev
, FME_FEATURE_ID_HEADER
);
117 v
= readq(base
+ FME_HDR_CAP
);
119 return sprintf(buf
, "%u\n",
120 (unsigned int)FIELD_GET(FME_CAP_SOCKET_ID
, v
));
122 static DEVICE_ATTR_RO(socket_id
);
124 static struct attribute
*fme_hdr_attrs
[] = {
125 &dev_attr_ports_num
.attr
,
126 &dev_attr_bitstream_id
.attr
,
127 &dev_attr_bitstream_metadata
.attr
,
128 &dev_attr_cache_size
.attr
,
129 &dev_attr_fabric_version
.attr
,
130 &dev_attr_socket_id
.attr
,
134 static const struct attribute_group fme_hdr_group
= {
135 .attrs
= fme_hdr_attrs
,
138 static long fme_hdr_ioctl_release_port(struct dfl_feature_platform_data
*pdata
,
141 struct dfl_fpga_cdev
*cdev
= pdata
->dfl_cdev
;
144 if (get_user(port_id
, (int __user
*)arg
))
147 return dfl_fpga_cdev_release_port(cdev
, port_id
);
150 static long fme_hdr_ioctl_assign_port(struct dfl_feature_platform_data
*pdata
,
153 struct dfl_fpga_cdev
*cdev
= pdata
->dfl_cdev
;
156 if (get_user(port_id
, (int __user
*)arg
))
159 return dfl_fpga_cdev_assign_port(cdev
, port_id
);
162 static long fme_hdr_ioctl(struct platform_device
*pdev
,
163 struct dfl_feature
*feature
,
164 unsigned int cmd
, unsigned long arg
)
166 struct dfl_feature_platform_data
*pdata
= dev_get_platdata(&pdev
->dev
);
169 case DFL_FPGA_FME_PORT_RELEASE
:
170 return fme_hdr_ioctl_release_port(pdata
, arg
);
171 case DFL_FPGA_FME_PORT_ASSIGN
:
172 return fme_hdr_ioctl_assign_port(pdata
, arg
);
178 static const struct dfl_feature_id fme_hdr_id_table
[] = {
179 {.id
= FME_FEATURE_ID_HEADER
,},
183 static const struct dfl_feature_ops fme_hdr_ops
= {
184 .ioctl
= fme_hdr_ioctl
,
187 #define FME_THERM_THRESHOLD 0x8
188 #define TEMP_THRESHOLD1 GENMASK_ULL(6, 0)
189 #define TEMP_THRESHOLD1_EN BIT_ULL(7)
190 #define TEMP_THRESHOLD2 GENMASK_ULL(14, 8)
191 #define TEMP_THRESHOLD2_EN BIT_ULL(15)
192 #define TRIP_THRESHOLD GENMASK_ULL(30, 24)
193 #define TEMP_THRESHOLD1_STATUS BIT_ULL(32) /* threshold1 reached */
194 #define TEMP_THRESHOLD2_STATUS BIT_ULL(33) /* threshold2 reached */
195 /* threshold1 policy: 0 - AP2 (90% throttle) / 1 - AP1 (50% throttle) */
196 #define TEMP_THRESHOLD1_POLICY BIT_ULL(44)
198 #define FME_THERM_RDSENSOR_FMT1 0x10
199 #define FPGA_TEMPERATURE GENMASK_ULL(6, 0)
201 #define FME_THERM_CAP 0x20
202 #define THERM_NO_THROTTLE BIT_ULL(0)
206 static bool fme_thermal_throttle_support(void __iomem
*base
)
208 u64 v
= readq(base
+ FME_THERM_CAP
);
210 return FIELD_GET(THERM_NO_THROTTLE
, v
) ? false : true;
213 static umode_t
thermal_hwmon_attrs_visible(const void *drvdata
,
214 enum hwmon_sensor_types type
,
215 u32 attr
, int channel
)
217 const struct dfl_feature
*feature
= drvdata
;
219 /* temperature is always supported, and check hardware cap for others */
220 if (attr
== hwmon_temp_input
)
223 return fme_thermal_throttle_support(feature
->ioaddr
) ? 0444 : 0;
226 static int thermal_hwmon_read(struct device
*dev
, enum hwmon_sensor_types type
,
227 u32 attr
, int channel
, long *val
)
229 struct dfl_feature
*feature
= dev_get_drvdata(dev
);
233 case hwmon_temp_input
:
234 v
= readq(feature
->ioaddr
+ FME_THERM_RDSENSOR_FMT1
);
235 *val
= (long)(FIELD_GET(FPGA_TEMPERATURE
, v
) * MILLI
);
238 v
= readq(feature
->ioaddr
+ FME_THERM_THRESHOLD
);
239 *val
= (long)(FIELD_GET(TEMP_THRESHOLD1
, v
) * MILLI
);
241 case hwmon_temp_crit
:
242 v
= readq(feature
->ioaddr
+ FME_THERM_THRESHOLD
);
243 *val
= (long)(FIELD_GET(TEMP_THRESHOLD2
, v
) * MILLI
);
245 case hwmon_temp_emergency
:
246 v
= readq(feature
->ioaddr
+ FME_THERM_THRESHOLD
);
247 *val
= (long)(FIELD_GET(TRIP_THRESHOLD
, v
) * MILLI
);
249 case hwmon_temp_max_alarm
:
250 v
= readq(feature
->ioaddr
+ FME_THERM_THRESHOLD
);
251 *val
= (long)FIELD_GET(TEMP_THRESHOLD1_STATUS
, v
);
253 case hwmon_temp_crit_alarm
:
254 v
= readq(feature
->ioaddr
+ FME_THERM_THRESHOLD
);
255 *val
= (long)FIELD_GET(TEMP_THRESHOLD2_STATUS
, v
);
264 static const struct hwmon_ops thermal_hwmon_ops
= {
265 .is_visible
= thermal_hwmon_attrs_visible
,
266 .read
= thermal_hwmon_read
,
269 static const struct hwmon_channel_info
* const thermal_hwmon_info
[] = {
270 HWMON_CHANNEL_INFO(temp
, HWMON_T_INPUT
| HWMON_T_EMERGENCY
|
271 HWMON_T_MAX
| HWMON_T_MAX_ALARM
|
272 HWMON_T_CRIT
| HWMON_T_CRIT_ALARM
),
276 static const struct hwmon_chip_info thermal_hwmon_chip_info
= {
277 .ops
= &thermal_hwmon_ops
,
278 .info
= thermal_hwmon_info
,
281 static ssize_t
temp1_max_policy_show(struct device
*dev
,
282 struct device_attribute
*attr
, char *buf
)
284 struct dfl_feature
*feature
= dev_get_drvdata(dev
);
287 v
= readq(feature
->ioaddr
+ FME_THERM_THRESHOLD
);
289 return sprintf(buf
, "%u\n",
290 (unsigned int)FIELD_GET(TEMP_THRESHOLD1_POLICY
, v
));
293 static DEVICE_ATTR_RO(temp1_max_policy
);
295 static struct attribute
*thermal_extra_attrs
[] = {
296 &dev_attr_temp1_max_policy
.attr
,
300 static umode_t
thermal_extra_attrs_visible(struct kobject
*kobj
,
301 struct attribute
*attr
, int index
)
303 struct device
*dev
= kobj_to_dev(kobj
);
304 struct dfl_feature
*feature
= dev_get_drvdata(dev
);
306 return fme_thermal_throttle_support(feature
->ioaddr
) ? attr
->mode
: 0;
309 static const struct attribute_group thermal_extra_group
= {
310 .attrs
= thermal_extra_attrs
,
311 .is_visible
= thermal_extra_attrs_visible
,
313 __ATTRIBUTE_GROUPS(thermal_extra
);
315 static int fme_thermal_mgmt_init(struct platform_device
*pdev
,
316 struct dfl_feature
*feature
)
318 struct device
*hwmon
;
321 * create hwmon to allow userspace monitoring temperature and other
322 * threshold information.
324 * temp1_input -> FPGA device temperature
325 * temp1_max -> hardware threshold 1 -> 50% or 90% throttling
326 * temp1_crit -> hardware threshold 2 -> 100% throttling
327 * temp1_emergency -> hardware trip_threshold to shutdown FPGA
328 * temp1_max_alarm -> hardware threshold 1 alarm
329 * temp1_crit_alarm -> hardware threshold 2 alarm
331 * create device specific sysfs interfaces, e.g. read temp1_max_policy
332 * to understand the actual hardware throttling action (50% vs 90%).
334 * If hardware doesn't support automatic throttling per thresholds,
335 * then all above sysfs interfaces are not visible except temp1_input
338 hwmon
= devm_hwmon_device_register_with_info(&pdev
->dev
,
339 "dfl_fme_thermal", feature
,
340 &thermal_hwmon_chip_info
,
341 thermal_extra_groups
);
343 dev_err(&pdev
->dev
, "Fail to register thermal hwmon\n");
344 return PTR_ERR(hwmon
);
350 static const struct dfl_feature_id fme_thermal_mgmt_id_table
[] = {
351 {.id
= FME_FEATURE_ID_THERMAL_MGMT
,},
355 static const struct dfl_feature_ops fme_thermal_mgmt_ops
= {
356 .init
= fme_thermal_mgmt_init
,
359 #define FME_PWR_STATUS 0x8
360 #define FME_LATENCY_TOLERANCE BIT_ULL(18)
361 #define PWR_CONSUMED GENMASK_ULL(17, 0)
363 #define FME_PWR_THRESHOLD 0x10
364 #define PWR_THRESHOLD1 GENMASK_ULL(6, 0) /* in Watts */
365 #define PWR_THRESHOLD2 GENMASK_ULL(14, 8) /* in Watts */
366 #define PWR_THRESHOLD_MAX 0x7f /* in Watts */
367 #define PWR_THRESHOLD1_STATUS BIT_ULL(16)
368 #define PWR_THRESHOLD2_STATUS BIT_ULL(17)
370 #define FME_PWR_XEON_LIMIT 0x18
371 #define XEON_PWR_LIMIT GENMASK_ULL(14, 0) /* in 0.1 Watts */
372 #define XEON_PWR_EN BIT_ULL(15)
373 #define FME_PWR_FPGA_LIMIT 0x20
374 #define FPGA_PWR_LIMIT GENMASK_ULL(14, 0) /* in 0.1 Watts */
375 #define FPGA_PWR_EN BIT_ULL(15)
377 static int power_hwmon_read(struct device
*dev
, enum hwmon_sensor_types type
,
378 u32 attr
, int channel
, long *val
)
380 struct dfl_feature
*feature
= dev_get_drvdata(dev
);
384 case hwmon_power_input
:
385 v
= readq(feature
->ioaddr
+ FME_PWR_STATUS
);
386 *val
= (long)(FIELD_GET(PWR_CONSUMED
, v
) * MICRO
);
388 case hwmon_power_max
:
389 v
= readq(feature
->ioaddr
+ FME_PWR_THRESHOLD
);
390 *val
= (long)(FIELD_GET(PWR_THRESHOLD1
, v
) * MICRO
);
392 case hwmon_power_crit
:
393 v
= readq(feature
->ioaddr
+ FME_PWR_THRESHOLD
);
394 *val
= (long)(FIELD_GET(PWR_THRESHOLD2
, v
) * MICRO
);
396 case hwmon_power_max_alarm
:
397 v
= readq(feature
->ioaddr
+ FME_PWR_THRESHOLD
);
398 *val
= (long)FIELD_GET(PWR_THRESHOLD1_STATUS
, v
);
400 case hwmon_power_crit_alarm
:
401 v
= readq(feature
->ioaddr
+ FME_PWR_THRESHOLD
);
402 *val
= (long)FIELD_GET(PWR_THRESHOLD2_STATUS
, v
);
411 static int power_hwmon_write(struct device
*dev
, enum hwmon_sensor_types type
,
412 u32 attr
, int channel
, long val
)
414 struct dfl_feature_platform_data
*pdata
= dev_get_platdata(dev
->parent
);
415 struct dfl_feature
*feature
= dev_get_drvdata(dev
);
419 val
= clamp_val(val
/ MICRO
, 0, PWR_THRESHOLD_MAX
);
421 mutex_lock(&pdata
->lock
);
424 case hwmon_power_max
:
425 v
= readq(feature
->ioaddr
+ FME_PWR_THRESHOLD
);
426 v
&= ~PWR_THRESHOLD1
;
427 v
|= FIELD_PREP(PWR_THRESHOLD1
, val
);
428 writeq(v
, feature
->ioaddr
+ FME_PWR_THRESHOLD
);
430 case hwmon_power_crit
:
431 v
= readq(feature
->ioaddr
+ FME_PWR_THRESHOLD
);
432 v
&= ~PWR_THRESHOLD2
;
433 v
|= FIELD_PREP(PWR_THRESHOLD2
, val
);
434 writeq(v
, feature
->ioaddr
+ FME_PWR_THRESHOLD
);
441 mutex_unlock(&pdata
->lock
);
446 static umode_t
power_hwmon_attrs_visible(const void *drvdata
,
447 enum hwmon_sensor_types type
,
448 u32 attr
, int channel
)
451 case hwmon_power_input
:
452 case hwmon_power_max_alarm
:
453 case hwmon_power_crit_alarm
:
455 case hwmon_power_max
:
456 case hwmon_power_crit
:
463 static const struct hwmon_ops power_hwmon_ops
= {
464 .is_visible
= power_hwmon_attrs_visible
,
465 .read
= power_hwmon_read
,
466 .write
= power_hwmon_write
,
469 static const struct hwmon_channel_info
* const power_hwmon_info
[] = {
470 HWMON_CHANNEL_INFO(power
, HWMON_P_INPUT
|
471 HWMON_P_MAX
| HWMON_P_MAX_ALARM
|
472 HWMON_P_CRIT
| HWMON_P_CRIT_ALARM
),
476 static const struct hwmon_chip_info power_hwmon_chip_info
= {
477 .ops
= &power_hwmon_ops
,
478 .info
= power_hwmon_info
,
481 static ssize_t
power1_xeon_limit_show(struct device
*dev
,
482 struct device_attribute
*attr
, char *buf
)
484 struct dfl_feature
*feature
= dev_get_drvdata(dev
);
488 v
= readq(feature
->ioaddr
+ FME_PWR_XEON_LIMIT
);
490 if (FIELD_GET(XEON_PWR_EN
, v
))
491 xeon_limit
= FIELD_GET(XEON_PWR_LIMIT
, v
);
493 return sprintf(buf
, "%u\n", xeon_limit
* 100000);
496 static ssize_t
power1_fpga_limit_show(struct device
*dev
,
497 struct device_attribute
*attr
, char *buf
)
499 struct dfl_feature
*feature
= dev_get_drvdata(dev
);
503 v
= readq(feature
->ioaddr
+ FME_PWR_FPGA_LIMIT
);
505 if (FIELD_GET(FPGA_PWR_EN
, v
))
506 fpga_limit
= FIELD_GET(FPGA_PWR_LIMIT
, v
);
508 return sprintf(buf
, "%u\n", fpga_limit
* 100000);
511 static ssize_t
power1_ltr_show(struct device
*dev
,
512 struct device_attribute
*attr
, char *buf
)
514 struct dfl_feature
*feature
= dev_get_drvdata(dev
);
517 v
= readq(feature
->ioaddr
+ FME_PWR_STATUS
);
519 return sprintf(buf
, "%u\n",
520 (unsigned int)FIELD_GET(FME_LATENCY_TOLERANCE
, v
));
523 static DEVICE_ATTR_RO(power1_xeon_limit
);
524 static DEVICE_ATTR_RO(power1_fpga_limit
);
525 static DEVICE_ATTR_RO(power1_ltr
);
527 static struct attribute
*power_extra_attrs
[] = {
528 &dev_attr_power1_xeon_limit
.attr
,
529 &dev_attr_power1_fpga_limit
.attr
,
530 &dev_attr_power1_ltr
.attr
,
534 ATTRIBUTE_GROUPS(power_extra
);
536 static int fme_power_mgmt_init(struct platform_device
*pdev
,
537 struct dfl_feature
*feature
)
539 struct device
*hwmon
;
541 hwmon
= devm_hwmon_device_register_with_info(&pdev
->dev
,
542 "dfl_fme_power", feature
,
543 &power_hwmon_chip_info
,
546 dev_err(&pdev
->dev
, "Fail to register power hwmon\n");
547 return PTR_ERR(hwmon
);
553 static const struct dfl_feature_id fme_power_mgmt_id_table
[] = {
554 {.id
= FME_FEATURE_ID_POWER_MGMT
,},
558 static const struct dfl_feature_ops fme_power_mgmt_ops
= {
559 .init
= fme_power_mgmt_init
,
562 static struct dfl_feature_driver fme_feature_drvs
[] = {
564 .id_table
= fme_hdr_id_table
,
568 .id_table
= fme_pr_mgmt_id_table
,
569 .ops
= &fme_pr_mgmt_ops
,
572 .id_table
= fme_global_err_id_table
,
573 .ops
= &fme_global_err_ops
,
576 .id_table
= fme_thermal_mgmt_id_table
,
577 .ops
= &fme_thermal_mgmt_ops
,
580 .id_table
= fme_power_mgmt_id_table
,
581 .ops
= &fme_power_mgmt_ops
,
584 .id_table
= fme_perf_id_table
,
585 .ops
= &fme_perf_ops
,
592 static long fme_ioctl_check_extension(struct dfl_feature_platform_data
*pdata
,
595 /* No extension support for now */
599 static int fme_open(struct inode
*inode
, struct file
*filp
)
601 struct platform_device
*fdev
= dfl_fpga_inode_to_feature_dev(inode
);
602 struct dfl_feature_platform_data
*pdata
= dev_get_platdata(&fdev
->dev
);
608 mutex_lock(&pdata
->lock
);
609 ret
= dfl_feature_dev_use_begin(pdata
, filp
->f_flags
& O_EXCL
);
611 dev_dbg(&fdev
->dev
, "Device File Opened %d Times\n",
612 dfl_feature_dev_use_count(pdata
));
613 filp
->private_data
= pdata
;
615 mutex_unlock(&pdata
->lock
);
620 static int fme_release(struct inode
*inode
, struct file
*filp
)
622 struct dfl_feature_platform_data
*pdata
= filp
->private_data
;
623 struct platform_device
*pdev
= pdata
->dev
;
624 struct dfl_feature
*feature
;
626 dev_dbg(&pdev
->dev
, "Device File Release\n");
628 mutex_lock(&pdata
->lock
);
629 dfl_feature_dev_use_end(pdata
);
631 if (!dfl_feature_dev_use_count(pdata
))
632 dfl_fpga_dev_for_each_feature(pdata
, feature
)
633 dfl_fpga_set_irq_triggers(feature
, 0,
634 feature
->nr_irqs
, NULL
);
635 mutex_unlock(&pdata
->lock
);
640 static long fme_ioctl(struct file
*filp
, unsigned int cmd
, unsigned long arg
)
642 struct dfl_feature_platform_data
*pdata
= filp
->private_data
;
643 struct platform_device
*pdev
= pdata
->dev
;
644 struct dfl_feature
*f
;
647 dev_dbg(&pdev
->dev
, "%s cmd 0x%x\n", __func__
, cmd
);
650 case DFL_FPGA_GET_API_VERSION
:
651 return DFL_FPGA_API_VERSION
;
652 case DFL_FPGA_CHECK_EXTENSION
:
653 return fme_ioctl_check_extension(pdata
, arg
);
656 * Let sub-feature's ioctl function to handle the cmd.
657 * Sub-feature's ioctl returns -ENODEV when cmd is not
658 * handled in this sub feature, and returns 0 or other
659 * error code if cmd is handled.
661 dfl_fpga_dev_for_each_feature(pdata
, f
) {
662 if (f
->ops
&& f
->ops
->ioctl
) {
663 ret
= f
->ops
->ioctl(pdev
, f
, cmd
, arg
);
673 static int fme_dev_init(struct platform_device
*pdev
)
675 struct dfl_feature_platform_data
*pdata
= dev_get_platdata(&pdev
->dev
);
678 fme
= devm_kzalloc(&pdev
->dev
, sizeof(*fme
), GFP_KERNEL
);
682 mutex_lock(&pdata
->lock
);
683 dfl_fpga_pdata_set_private(pdata
, fme
);
684 mutex_unlock(&pdata
->lock
);
689 static void fme_dev_destroy(struct platform_device
*pdev
)
691 struct dfl_feature_platform_data
*pdata
= dev_get_platdata(&pdev
->dev
);
693 mutex_lock(&pdata
->lock
);
694 dfl_fpga_pdata_set_private(pdata
, NULL
);
695 mutex_unlock(&pdata
->lock
);
698 static const struct file_operations fme_fops
= {
699 .owner
= THIS_MODULE
,
701 .release
= fme_release
,
702 .unlocked_ioctl
= fme_ioctl
,
705 static int fme_probe(struct platform_device
*pdev
)
709 ret
= fme_dev_init(pdev
);
713 ret
= dfl_fpga_dev_feature_init(pdev
, fme_feature_drvs
);
717 ret
= dfl_fpga_dev_ops_register(pdev
, &fme_fops
, THIS_MODULE
);
724 dfl_fpga_dev_feature_uinit(pdev
);
726 fme_dev_destroy(pdev
);
731 static void fme_remove(struct platform_device
*pdev
)
733 dfl_fpga_dev_ops_unregister(pdev
);
734 dfl_fpga_dev_feature_uinit(pdev
);
735 fme_dev_destroy(pdev
);
738 static const struct attribute_group
*fme_dev_groups
[] = {
740 &fme_global_err_group
,
744 static struct platform_driver fme_driver
= {
746 .name
= DFL_FPGA_FEATURE_DEV_FME
,
747 .dev_groups
= fme_dev_groups
,
750 .remove
= fme_remove
,
753 module_platform_driver(fme_driver
);
755 MODULE_DESCRIPTION("FPGA Management Engine driver");
756 MODULE_AUTHOR("Intel Corporation");
757 MODULE_LICENSE("GPL v2");
758 MODULE_ALIAS("platform:dfl-fme");