Merge tag 'io_uring-5.11-2021-01-16' of git://git.kernel.dk/linux-block
[linux/fpc-iii.git] / drivers / base / platform-msi.c
blob2c1e2e0c1a59cc5f037f57aec963c8fcfa04a8e2
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * MSI framework for platform devices
5 * Copyright (C) 2015 ARM Limited, All Rights Reserved.
6 * Author: Marc Zyngier <marc.zyngier@arm.com>
7 */
9 #include <linux/device.h>
10 #include <linux/idr.h>
11 #include <linux/irq.h>
12 #include <linux/irqdomain.h>
13 #include <linux/msi.h>
14 #include <linux/slab.h>
16 #define DEV_ID_SHIFT 21
17 #define MAX_DEV_MSIS (1 << (32 - DEV_ID_SHIFT))
20 * Internal data structure containing a (made up, but unique) devid
21 * and the callback to write the MSI message.
23 struct platform_msi_priv_data {
24 struct device *dev;
25 void *host_data;
26 msi_alloc_info_t arg;
27 irq_write_msi_msg_t write_msg;
28 int devid;
31 /* The devid allocator */
32 static DEFINE_IDA(platform_msi_devid_ida);
34 #ifdef GENERIC_MSI_DOMAIN_OPS
36 * Convert an msi_desc to a globaly unique identifier (per-device
37 * devid + msi_desc position in the msi_list).
39 static irq_hw_number_t platform_msi_calc_hwirq(struct msi_desc *desc)
41 u32 devid;
43 devid = desc->platform.msi_priv_data->devid;
45 return (devid << (32 - DEV_ID_SHIFT)) | desc->platform.msi_index;
48 static void platform_msi_set_desc(msi_alloc_info_t *arg, struct msi_desc *desc)
50 arg->desc = desc;
51 arg->hwirq = platform_msi_calc_hwirq(desc);
54 static int platform_msi_init(struct irq_domain *domain,
55 struct msi_domain_info *info,
56 unsigned int virq, irq_hw_number_t hwirq,
57 msi_alloc_info_t *arg)
59 return irq_domain_set_hwirq_and_chip(domain, virq, hwirq,
60 info->chip, info->chip_data);
63 static void platform_msi_set_proxy_dev(msi_alloc_info_t *arg)
65 arg->flags |= MSI_ALLOC_FLAGS_PROXY_DEVICE;
67 #else
68 #define platform_msi_set_desc NULL
69 #define platform_msi_init NULL
70 #define platform_msi_set_proxy_dev(x) do {} while(0)
71 #endif
73 static void platform_msi_update_dom_ops(struct msi_domain_info *info)
75 struct msi_domain_ops *ops = info->ops;
77 BUG_ON(!ops);
79 if (ops->msi_init == NULL)
80 ops->msi_init = platform_msi_init;
81 if (ops->set_desc == NULL)
82 ops->set_desc = platform_msi_set_desc;
85 static void platform_msi_write_msg(struct irq_data *data, struct msi_msg *msg)
87 struct msi_desc *desc = irq_data_get_msi_desc(data);
88 struct platform_msi_priv_data *priv_data;
90 priv_data = desc->platform.msi_priv_data;
92 priv_data->write_msg(desc, msg);
95 static void platform_msi_update_chip_ops(struct msi_domain_info *info)
97 struct irq_chip *chip = info->chip;
99 BUG_ON(!chip);
100 if (!chip->irq_mask)
101 chip->irq_mask = irq_chip_mask_parent;
102 if (!chip->irq_unmask)
103 chip->irq_unmask = irq_chip_unmask_parent;
104 if (!chip->irq_eoi)
105 chip->irq_eoi = irq_chip_eoi_parent;
106 if (!chip->irq_set_affinity)
107 chip->irq_set_affinity = msi_domain_set_affinity;
108 if (!chip->irq_write_msi_msg)
109 chip->irq_write_msi_msg = platform_msi_write_msg;
110 if (WARN_ON((info->flags & MSI_FLAG_LEVEL_CAPABLE) &&
111 !(chip->flags & IRQCHIP_SUPPORTS_LEVEL_MSI)))
112 info->flags &= ~MSI_FLAG_LEVEL_CAPABLE;
115 static void platform_msi_free_descs(struct device *dev, int base, int nvec)
117 struct msi_desc *desc, *tmp;
119 list_for_each_entry_safe(desc, tmp, dev_to_msi_list(dev), list) {
120 if (desc->platform.msi_index >= base &&
121 desc->platform.msi_index < (base + nvec)) {
122 list_del(&desc->list);
123 free_msi_entry(desc);
128 static int platform_msi_alloc_descs_with_irq(struct device *dev, int virq,
129 int nvec,
130 struct platform_msi_priv_data *data)
133 struct msi_desc *desc;
134 int i, base = 0;
136 if (!list_empty(dev_to_msi_list(dev))) {
137 desc = list_last_entry(dev_to_msi_list(dev),
138 struct msi_desc, list);
139 base = desc->platform.msi_index + 1;
142 for (i = 0; i < nvec; i++) {
143 desc = alloc_msi_entry(dev, 1, NULL);
144 if (!desc)
145 break;
147 desc->platform.msi_priv_data = data;
148 desc->platform.msi_index = base + i;
149 desc->irq = virq ? virq + i : 0;
151 list_add_tail(&desc->list, dev_to_msi_list(dev));
154 if (i != nvec) {
155 /* Clean up the mess */
156 platform_msi_free_descs(dev, base, nvec);
158 return -ENOMEM;
161 return 0;
164 static int platform_msi_alloc_descs(struct device *dev, int nvec,
165 struct platform_msi_priv_data *data)
168 return platform_msi_alloc_descs_with_irq(dev, 0, nvec, data);
172 * platform_msi_create_irq_domain - Create a platform MSI interrupt domain
173 * @fwnode: Optional fwnode of the interrupt controller
174 * @info: MSI domain info
175 * @parent: Parent irq domain
177 * Updates the domain and chip ops and creates a platform MSI
178 * interrupt domain.
180 * Returns:
181 * A domain pointer or NULL in case of failure.
183 struct irq_domain *platform_msi_create_irq_domain(struct fwnode_handle *fwnode,
184 struct msi_domain_info *info,
185 struct irq_domain *parent)
187 struct irq_domain *domain;
189 if (info->flags & MSI_FLAG_USE_DEF_DOM_OPS)
190 platform_msi_update_dom_ops(info);
191 if (info->flags & MSI_FLAG_USE_DEF_CHIP_OPS)
192 platform_msi_update_chip_ops(info);
194 domain = msi_create_irq_domain(fwnode, info, parent);
195 if (domain)
196 irq_domain_update_bus_token(domain, DOMAIN_BUS_PLATFORM_MSI);
198 return domain;
201 static struct platform_msi_priv_data *
202 platform_msi_alloc_priv_data(struct device *dev, unsigned int nvec,
203 irq_write_msi_msg_t write_msi_msg)
205 struct platform_msi_priv_data *datap;
207 * Limit the number of interrupts to 2048 per device. Should we
208 * need to bump this up, DEV_ID_SHIFT should be adjusted
209 * accordingly (which would impact the max number of MSI
210 * capable devices).
212 if (!dev->msi_domain || !write_msi_msg || !nvec || nvec > MAX_DEV_MSIS)
213 return ERR_PTR(-EINVAL);
215 if (dev->msi_domain->bus_token != DOMAIN_BUS_PLATFORM_MSI) {
216 dev_err(dev, "Incompatible msi_domain, giving up\n");
217 return ERR_PTR(-EINVAL);
220 /* Already had a helping of MSI? Greed... */
221 if (!list_empty(dev_to_msi_list(dev)))
222 return ERR_PTR(-EBUSY);
224 datap = kzalloc(sizeof(*datap), GFP_KERNEL);
225 if (!datap)
226 return ERR_PTR(-ENOMEM);
228 datap->devid = ida_simple_get(&platform_msi_devid_ida,
229 0, 1 << DEV_ID_SHIFT, GFP_KERNEL);
230 if (datap->devid < 0) {
231 int err = datap->devid;
232 kfree(datap);
233 return ERR_PTR(err);
236 datap->write_msg = write_msi_msg;
237 datap->dev = dev;
239 return datap;
242 static void platform_msi_free_priv_data(struct platform_msi_priv_data *data)
244 ida_simple_remove(&platform_msi_devid_ida, data->devid);
245 kfree(data);
249 * platform_msi_domain_alloc_irqs - Allocate MSI interrupts for @dev
250 * @dev: The device for which to allocate interrupts
251 * @nvec: The number of interrupts to allocate
252 * @write_msi_msg: Callback to write an interrupt message for @dev
254 * Returns:
255 * Zero for success, or an error code in case of failure
257 int platform_msi_domain_alloc_irqs(struct device *dev, unsigned int nvec,
258 irq_write_msi_msg_t write_msi_msg)
260 struct platform_msi_priv_data *priv_data;
261 int err;
263 priv_data = platform_msi_alloc_priv_data(dev, nvec, write_msi_msg);
264 if (IS_ERR(priv_data))
265 return PTR_ERR(priv_data);
267 err = platform_msi_alloc_descs(dev, nvec, priv_data);
268 if (err)
269 goto out_free_priv_data;
271 err = msi_domain_alloc_irqs(dev->msi_domain, dev, nvec);
272 if (err)
273 goto out_free_desc;
275 return 0;
277 out_free_desc:
278 platform_msi_free_descs(dev, 0, nvec);
279 out_free_priv_data:
280 platform_msi_free_priv_data(priv_data);
282 return err;
284 EXPORT_SYMBOL_GPL(platform_msi_domain_alloc_irqs);
287 * platform_msi_domain_free_irqs - Free MSI interrupts for @dev
288 * @dev: The device for which to free interrupts
290 void platform_msi_domain_free_irqs(struct device *dev)
292 if (!list_empty(dev_to_msi_list(dev))) {
293 struct msi_desc *desc;
295 desc = first_msi_entry(dev);
296 platform_msi_free_priv_data(desc->platform.msi_priv_data);
299 msi_domain_free_irqs(dev->msi_domain, dev);
300 platform_msi_free_descs(dev, 0, MAX_DEV_MSIS);
302 EXPORT_SYMBOL_GPL(platform_msi_domain_free_irqs);
305 * platform_msi_get_host_data - Query the private data associated with
306 * a platform-msi domain
307 * @domain: The platform-msi domain
309 * Returns the private data provided when calling
310 * platform_msi_create_device_domain.
312 void *platform_msi_get_host_data(struct irq_domain *domain)
314 struct platform_msi_priv_data *data = domain->host_data;
315 return data->host_data;
319 * platform_msi_create_device_domain - Create a platform-msi domain
321 * @dev: The device generating the MSIs
322 * @nvec: The number of MSIs that need to be allocated
323 * @write_msi_msg: Callback to write an interrupt message for @dev
324 * @ops: The hierarchy domain operations to use
325 * @host_data: Private data associated to this domain
327 * Returns an irqdomain for @nvec interrupts
329 struct irq_domain *
330 __platform_msi_create_device_domain(struct device *dev,
331 unsigned int nvec,
332 bool is_tree,
333 irq_write_msi_msg_t write_msi_msg,
334 const struct irq_domain_ops *ops,
335 void *host_data)
337 struct platform_msi_priv_data *data;
338 struct irq_domain *domain;
339 int err;
341 data = platform_msi_alloc_priv_data(dev, nvec, write_msi_msg);
342 if (IS_ERR(data))
343 return NULL;
345 data->host_data = host_data;
346 domain = irq_domain_create_hierarchy(dev->msi_domain, 0,
347 is_tree ? 0 : nvec,
348 dev->fwnode, ops, data);
349 if (!domain)
350 goto free_priv;
352 platform_msi_set_proxy_dev(&data->arg);
353 err = msi_domain_prepare_irqs(domain->parent, dev, nvec, &data->arg);
354 if (err)
355 goto free_domain;
357 return domain;
359 free_domain:
360 irq_domain_remove(domain);
361 free_priv:
362 platform_msi_free_priv_data(data);
363 return NULL;
367 * platform_msi_domain_free - Free interrupts associated with a platform-msi
368 * domain
370 * @domain: The platform-msi domain
371 * @virq: The base irq from which to perform the free operation
372 * @nvec: How many interrupts to free from @virq
374 void platform_msi_domain_free(struct irq_domain *domain, unsigned int virq,
375 unsigned int nvec)
377 struct platform_msi_priv_data *data = domain->host_data;
378 struct msi_desc *desc, *tmp;
379 for_each_msi_entry_safe(desc, tmp, data->dev) {
380 if (WARN_ON(!desc->irq || desc->nvec_used != 1))
381 return;
382 if (!(desc->irq >= virq && desc->irq < (virq + nvec)))
383 continue;
385 irq_domain_free_irqs_common(domain, desc->irq, 1);
386 list_del(&desc->list);
387 free_msi_entry(desc);
392 * platform_msi_domain_alloc - Allocate interrupts associated with
393 * a platform-msi domain
395 * @domain: The platform-msi domain
396 * @virq: The base irq from which to perform the allocate operation
397 * @nr_irqs: How many interrupts to free from @virq
399 * Return 0 on success, or an error code on failure. Must be called
400 * with irq_domain_mutex held (which can only be done as part of a
401 * top-level interrupt allocation).
403 int platform_msi_domain_alloc(struct irq_domain *domain, unsigned int virq,
404 unsigned int nr_irqs)
406 struct platform_msi_priv_data *data = domain->host_data;
407 int err;
409 err = platform_msi_alloc_descs_with_irq(data->dev, virq, nr_irqs, data);
410 if (err)
411 return err;
413 err = msi_domain_populate_irqs(domain->parent, data->dev,
414 virq, nr_irqs, &data->arg);
415 if (err)
416 platform_msi_domain_free(domain, virq, nr_irqs);
418 return err;