Revert "tty: hvc: Fix data abort due to race in hvc_open"
[linux/fpc-iii.git] / drivers / fpga / dfl-fme-pr.c
bloba233a53db708175f7637470b173e4c3fe52c5b4f
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Driver for FPGA Management Engine (FME) Partial Reconfiguration
5 * Copyright (C) 2017-2018 Intel Corporation, Inc.
7 * Authors:
8 * Kang Luwei <luwei.kang@intel.com>
9 * Xiao Guangrong <guangrong.xiao@linux.intel.com>
10 * Wu Hao <hao.wu@intel.com>
11 * Joseph Grecco <joe.grecco@intel.com>
12 * Enno Luebbers <enno.luebbers@intel.com>
13 * Tim Whisonant <tim.whisonant@intel.com>
14 * Ananda Ravuri <ananda.ravuri@intel.com>
15 * Christopher Rauer <christopher.rauer@intel.com>
16 * Henry Mitchel <henry.mitchel@intel.com>
19 #include <linux/types.h>
20 #include <linux/device.h>
21 #include <linux/vmalloc.h>
22 #include <linux/uaccess.h>
23 #include <linux/fpga/fpga-mgr.h>
24 #include <linux/fpga/fpga-bridge.h>
25 #include <linux/fpga/fpga-region.h>
26 #include <linux/fpga-dfl.h>
28 #include "dfl.h"
29 #include "dfl-fme.h"
30 #include "dfl-fme-pr.h"
32 static struct dfl_fme_region *
33 dfl_fme_region_find_by_port_id(struct dfl_fme *fme, int port_id)
35 struct dfl_fme_region *fme_region;
37 list_for_each_entry(fme_region, &fme->region_list, node)
38 if (fme_region->port_id == port_id)
39 return fme_region;
41 return NULL;
44 static int dfl_fme_region_match(struct device *dev, const void *data)
46 return dev->parent == data;
49 static struct fpga_region *dfl_fme_region_find(struct dfl_fme *fme, int port_id)
51 struct dfl_fme_region *fme_region;
52 struct fpga_region *region;
54 fme_region = dfl_fme_region_find_by_port_id(fme, port_id);
55 if (!fme_region)
56 return NULL;
58 region = fpga_region_class_find(NULL, &fme_region->region->dev,
59 dfl_fme_region_match);
60 if (!region)
61 return NULL;
63 return region;
66 static int fme_pr(struct platform_device *pdev, unsigned long arg)
68 struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
69 void __user *argp = (void __user *)arg;
70 struct dfl_fpga_fme_port_pr port_pr;
71 struct fpga_image_info *info;
72 struct fpga_region *region;
73 void __iomem *fme_hdr;
74 struct dfl_fme *fme;
75 unsigned long minsz;
76 void *buf = NULL;
77 size_t length;
78 int ret = 0;
79 u64 v;
81 minsz = offsetofend(struct dfl_fpga_fme_port_pr, buffer_address);
83 if (copy_from_user(&port_pr, argp, minsz))
84 return -EFAULT;
86 if (port_pr.argsz < minsz || port_pr.flags)
87 return -EINVAL;
89 /* get fme header region */
90 fme_hdr = dfl_get_feature_ioaddr_by_id(&pdev->dev,
91 FME_FEATURE_ID_HEADER);
93 /* check port id */
94 v = readq(fme_hdr + FME_HDR_CAP);
95 if (port_pr.port_id >= FIELD_GET(FME_CAP_NUM_PORTS, v)) {
96 dev_dbg(&pdev->dev, "port number more than maximum\n");
97 return -EINVAL;
100 if (!access_ok((void __user *)(unsigned long)port_pr.buffer_address,
101 port_pr.buffer_size))
102 return -EFAULT;
105 * align PR buffer per PR bandwidth, as HW ignores the extra padding
106 * data automatically.
108 length = ALIGN(port_pr.buffer_size, 4);
110 buf = vmalloc(length);
111 if (!buf)
112 return -ENOMEM;
114 if (copy_from_user(buf,
115 (void __user *)(unsigned long)port_pr.buffer_address,
116 port_pr.buffer_size)) {
117 ret = -EFAULT;
118 goto free_exit;
121 /* prepare fpga_image_info for PR */
122 info = fpga_image_info_alloc(&pdev->dev);
123 if (!info) {
124 ret = -ENOMEM;
125 goto free_exit;
128 info->flags |= FPGA_MGR_PARTIAL_RECONFIG;
130 mutex_lock(&pdata->lock);
131 fme = dfl_fpga_pdata_get_private(pdata);
132 /* fme device has been unregistered. */
133 if (!fme) {
134 ret = -EINVAL;
135 goto unlock_exit;
138 region = dfl_fme_region_find(fme, port_pr.port_id);
139 if (!region) {
140 ret = -EINVAL;
141 goto unlock_exit;
144 fpga_image_info_free(region->info);
146 info->buf = buf;
147 info->count = length;
148 info->region_id = port_pr.port_id;
149 region->info = info;
151 ret = fpga_region_program_fpga(region);
154 * it allows userspace to reset the PR region's logic by disabling and
155 * reenabling the bridge to clear things out between accleration runs.
156 * so no need to hold the bridges after partial reconfiguration.
158 if (region->get_bridges)
159 fpga_bridges_put(&region->bridge_list);
161 put_device(&region->dev);
162 unlock_exit:
163 mutex_unlock(&pdata->lock);
164 free_exit:
165 vfree(buf);
166 return ret;
170 * dfl_fme_create_mgr - create fpga mgr platform device as child device
172 * @pdata: fme platform_device's pdata
174 * Return: mgr platform device if successful, and error code otherwise.
176 static struct platform_device *
177 dfl_fme_create_mgr(struct dfl_feature_platform_data *pdata,
178 struct dfl_feature *feature)
180 struct platform_device *mgr, *fme = pdata->dev;
181 struct dfl_fme_mgr_pdata mgr_pdata;
182 int ret = -ENOMEM;
184 if (!feature->ioaddr)
185 return ERR_PTR(-ENODEV);
187 mgr_pdata.ioaddr = feature->ioaddr;
190 * Each FME has only one fpga-mgr, so allocate platform device using
191 * the same FME platform device id.
193 mgr = platform_device_alloc(DFL_FPGA_FME_MGR, fme->id);
194 if (!mgr)
195 return ERR_PTR(ret);
197 mgr->dev.parent = &fme->dev;
199 ret = platform_device_add_data(mgr, &mgr_pdata, sizeof(mgr_pdata));
200 if (ret)
201 goto create_mgr_err;
203 ret = platform_device_add(mgr);
204 if (ret)
205 goto create_mgr_err;
207 return mgr;
209 create_mgr_err:
210 platform_device_put(mgr);
211 return ERR_PTR(ret);
215 * dfl_fme_destroy_mgr - destroy fpga mgr platform device
216 * @pdata: fme platform device's pdata
218 static void dfl_fme_destroy_mgr(struct dfl_feature_platform_data *pdata)
220 struct dfl_fme *priv = dfl_fpga_pdata_get_private(pdata);
222 platform_device_unregister(priv->mgr);
226 * dfl_fme_create_bridge - create fme fpga bridge platform device as child
228 * @pdata: fme platform device's pdata
229 * @port_id: port id for the bridge to be created.
231 * Return: bridge platform device if successful, and error code otherwise.
233 static struct dfl_fme_bridge *
234 dfl_fme_create_bridge(struct dfl_feature_platform_data *pdata, int port_id)
236 struct device *dev = &pdata->dev->dev;
237 struct dfl_fme_br_pdata br_pdata;
238 struct dfl_fme_bridge *fme_br;
239 int ret = -ENOMEM;
241 fme_br = devm_kzalloc(dev, sizeof(*fme_br), GFP_KERNEL);
242 if (!fme_br)
243 return ERR_PTR(ret);
245 br_pdata.cdev = pdata->dfl_cdev;
246 br_pdata.port_id = port_id;
248 fme_br->br = platform_device_alloc(DFL_FPGA_FME_BRIDGE,
249 PLATFORM_DEVID_AUTO);
250 if (!fme_br->br)
251 return ERR_PTR(ret);
253 fme_br->br->dev.parent = dev;
255 ret = platform_device_add_data(fme_br->br, &br_pdata, sizeof(br_pdata));
256 if (ret)
257 goto create_br_err;
259 ret = platform_device_add(fme_br->br);
260 if (ret)
261 goto create_br_err;
263 return fme_br;
265 create_br_err:
266 platform_device_put(fme_br->br);
267 return ERR_PTR(ret);
271 * dfl_fme_destroy_bridge - destroy fpga bridge platform device
272 * @fme_br: fme bridge to destroy
274 static void dfl_fme_destroy_bridge(struct dfl_fme_bridge *fme_br)
276 platform_device_unregister(fme_br->br);
280 * dfl_fme_destroy_bridge - destroy all fpga bridge platform device
281 * @pdata: fme platform device's pdata
283 static void dfl_fme_destroy_bridges(struct dfl_feature_platform_data *pdata)
285 struct dfl_fme *priv = dfl_fpga_pdata_get_private(pdata);
286 struct dfl_fme_bridge *fbridge, *tmp;
288 list_for_each_entry_safe(fbridge, tmp, &priv->bridge_list, node) {
289 list_del(&fbridge->node);
290 dfl_fme_destroy_bridge(fbridge);
295 * dfl_fme_create_region - create fpga region platform device as child
297 * @pdata: fme platform device's pdata
298 * @mgr: mgr platform device needed for region
299 * @br: br platform device needed for region
300 * @port_id: port id
302 * Return: fme region if successful, and error code otherwise.
304 static struct dfl_fme_region *
305 dfl_fme_create_region(struct dfl_feature_platform_data *pdata,
306 struct platform_device *mgr,
307 struct platform_device *br, int port_id)
309 struct dfl_fme_region_pdata region_pdata;
310 struct device *dev = &pdata->dev->dev;
311 struct dfl_fme_region *fme_region;
312 int ret = -ENOMEM;
314 fme_region = devm_kzalloc(dev, sizeof(*fme_region), GFP_KERNEL);
315 if (!fme_region)
316 return ERR_PTR(ret);
318 region_pdata.mgr = mgr;
319 region_pdata.br = br;
322 * Each FPGA device may have more than one port, so allocate platform
323 * device using the same port platform device id.
325 fme_region->region = platform_device_alloc(DFL_FPGA_FME_REGION, br->id);
326 if (!fme_region->region)
327 return ERR_PTR(ret);
329 fme_region->region->dev.parent = dev;
331 ret = platform_device_add_data(fme_region->region, &region_pdata,
332 sizeof(region_pdata));
333 if (ret)
334 goto create_region_err;
336 ret = platform_device_add(fme_region->region);
337 if (ret)
338 goto create_region_err;
340 fme_region->port_id = port_id;
342 return fme_region;
344 create_region_err:
345 platform_device_put(fme_region->region);
346 return ERR_PTR(ret);
350 * dfl_fme_destroy_region - destroy fme region
351 * @fme_region: fme region to destroy
353 static void dfl_fme_destroy_region(struct dfl_fme_region *fme_region)
355 platform_device_unregister(fme_region->region);
359 * dfl_fme_destroy_regions - destroy all fme regions
360 * @pdata: fme platform device's pdata
362 static void dfl_fme_destroy_regions(struct dfl_feature_platform_data *pdata)
364 struct dfl_fme *priv = dfl_fpga_pdata_get_private(pdata);
365 struct dfl_fme_region *fme_region, *tmp;
367 list_for_each_entry_safe(fme_region, tmp, &priv->region_list, node) {
368 list_del(&fme_region->node);
369 dfl_fme_destroy_region(fme_region);
373 static int pr_mgmt_init(struct platform_device *pdev,
374 struct dfl_feature *feature)
376 struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
377 struct dfl_fme_region *fme_region;
378 struct dfl_fme_bridge *fme_br;
379 struct platform_device *mgr;
380 struct dfl_fme *priv;
381 void __iomem *fme_hdr;
382 int ret = -ENODEV, i = 0;
383 u64 fme_cap, port_offset;
385 fme_hdr = dfl_get_feature_ioaddr_by_id(&pdev->dev,
386 FME_FEATURE_ID_HEADER);
388 mutex_lock(&pdata->lock);
389 priv = dfl_fpga_pdata_get_private(pdata);
391 /* Initialize the region and bridge sub device list */
392 INIT_LIST_HEAD(&priv->region_list);
393 INIT_LIST_HEAD(&priv->bridge_list);
395 /* Create fpga mgr platform device */
396 mgr = dfl_fme_create_mgr(pdata, feature);
397 if (IS_ERR(mgr)) {
398 dev_err(&pdev->dev, "fail to create fpga mgr pdev\n");
399 goto unlock;
402 priv->mgr = mgr;
404 /* Read capability register to check number of regions and bridges */
405 fme_cap = readq(fme_hdr + FME_HDR_CAP);
406 for (; i < FIELD_GET(FME_CAP_NUM_PORTS, fme_cap); i++) {
407 port_offset = readq(fme_hdr + FME_HDR_PORT_OFST(i));
408 if (!(port_offset & FME_PORT_OFST_IMP))
409 continue;
411 /* Create bridge for each port */
412 fme_br = dfl_fme_create_bridge(pdata, i);
413 if (IS_ERR(fme_br)) {
414 ret = PTR_ERR(fme_br);
415 goto destroy_region;
418 list_add(&fme_br->node, &priv->bridge_list);
420 /* Create region for each port */
421 fme_region = dfl_fme_create_region(pdata, mgr,
422 fme_br->br, i);
423 if (IS_ERR(fme_region)) {
424 ret = PTR_ERR(fme_region);
425 goto destroy_region;
428 list_add(&fme_region->node, &priv->region_list);
430 mutex_unlock(&pdata->lock);
432 return 0;
434 destroy_region:
435 dfl_fme_destroy_regions(pdata);
436 dfl_fme_destroy_bridges(pdata);
437 dfl_fme_destroy_mgr(pdata);
438 unlock:
439 mutex_unlock(&pdata->lock);
440 return ret;
443 static void pr_mgmt_uinit(struct platform_device *pdev,
444 struct dfl_feature *feature)
446 struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
448 mutex_lock(&pdata->lock);
450 dfl_fme_destroy_regions(pdata);
451 dfl_fme_destroy_bridges(pdata);
452 dfl_fme_destroy_mgr(pdata);
453 mutex_unlock(&pdata->lock);
456 static long fme_pr_ioctl(struct platform_device *pdev,
457 struct dfl_feature *feature,
458 unsigned int cmd, unsigned long arg)
460 long ret;
462 switch (cmd) {
463 case DFL_FPGA_FME_PORT_PR:
464 ret = fme_pr(pdev, arg);
465 break;
466 default:
467 ret = -ENODEV;
470 return ret;
473 const struct dfl_feature_id fme_pr_mgmt_id_table[] = {
474 {.id = FME_FEATURE_ID_PR_MGMT,},
478 const struct dfl_feature_ops fme_pr_mgmt_ops = {
479 .init = pr_mgmt_init,
480 .uinit = pr_mgmt_uinit,
481 .ioctl = fme_pr_ioctl,