Merge tag 'seccomp-v6.13-rc8' of git://git.kernel.org/pub/scm/linux/kernel/git/kees...
[linux.git] / drivers / pci / endpoint / pci-epc-core.c
blobbed7c7d1fe3c372125c7661082f87c9c89383703
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * PCI Endpoint *Controller* (EPC) library
5 * Copyright (C) 2017 Texas Instruments
6 * Author: Kishon Vijay Abraham I <kishon@ti.com>
7 */
9 #include <linux/device.h>
10 #include <linux/slab.h>
11 #include <linux/module.h>
13 #include <linux/pci-epc.h>
14 #include <linux/pci-epf.h>
15 #include <linux/pci-ep-cfs.h>
17 static const struct class pci_epc_class = {
18 .name = "pci_epc",
21 static void devm_pci_epc_release(struct device *dev, void *res)
23 struct pci_epc *epc = *(struct pci_epc **)res;
25 pci_epc_destroy(epc);
28 static int devm_pci_epc_match(struct device *dev, void *res, void *match_data)
30 struct pci_epc **epc = res;
32 return *epc == match_data;
35 /**
36 * pci_epc_put() - release the PCI endpoint controller
37 * @epc: epc returned by pci_epc_get()
39 * release the refcount the caller obtained by invoking pci_epc_get()
41 void pci_epc_put(struct pci_epc *epc)
43 if (IS_ERR_OR_NULL(epc))
44 return;
46 module_put(epc->ops->owner);
47 put_device(&epc->dev);
49 EXPORT_SYMBOL_GPL(pci_epc_put);
51 /**
52 * pci_epc_get() - get the PCI endpoint controller
53 * @epc_name: device name of the endpoint controller
55 * Invoke to get struct pci_epc * corresponding to the device name of the
56 * endpoint controller
58 struct pci_epc *pci_epc_get(const char *epc_name)
60 int ret = -EINVAL;
61 struct pci_epc *epc;
62 struct device *dev;
63 struct class_dev_iter iter;
65 class_dev_iter_init(&iter, &pci_epc_class, NULL, NULL);
66 while ((dev = class_dev_iter_next(&iter))) {
67 if (strcmp(epc_name, dev_name(dev)))
68 continue;
70 epc = to_pci_epc(dev);
71 if (!try_module_get(epc->ops->owner)) {
72 ret = -EINVAL;
73 goto err;
76 class_dev_iter_exit(&iter);
77 get_device(&epc->dev);
78 return epc;
81 err:
82 class_dev_iter_exit(&iter);
83 return ERR_PTR(ret);
85 EXPORT_SYMBOL_GPL(pci_epc_get);
87 /**
88 * pci_epc_get_first_free_bar() - helper to get first unreserved BAR
89 * @epc_features: pci_epc_features structure that holds the reserved bar bitmap
91 * Invoke to get the first unreserved BAR that can be used by the endpoint
92 * function.
94 enum pci_barno
95 pci_epc_get_first_free_bar(const struct pci_epc_features *epc_features)
97 return pci_epc_get_next_free_bar(epc_features, BAR_0);
99 EXPORT_SYMBOL_GPL(pci_epc_get_first_free_bar);
102 * pci_epc_get_next_free_bar() - helper to get unreserved BAR starting from @bar
103 * @epc_features: pci_epc_features structure that holds the reserved bar bitmap
104 * @bar: the starting BAR number from where unreserved BAR should be searched
106 * Invoke to get the next unreserved BAR starting from @bar that can be used
107 * for endpoint function.
109 enum pci_barno pci_epc_get_next_free_bar(const struct pci_epc_features
110 *epc_features, enum pci_barno bar)
112 int i;
114 if (!epc_features)
115 return BAR_0;
117 /* If 'bar - 1' is a 64-bit BAR, move to the next BAR */
118 if (bar > 0 && epc_features->bar[bar - 1].only_64bit)
119 bar++;
121 for (i = bar; i < PCI_STD_NUM_BARS; i++) {
122 /* If the BAR is not reserved, return it. */
123 if (epc_features->bar[i].type != BAR_RESERVED)
124 return i;
127 return NO_BAR;
129 EXPORT_SYMBOL_GPL(pci_epc_get_next_free_bar);
131 static bool pci_epc_function_is_valid(struct pci_epc *epc,
132 u8 func_no, u8 vfunc_no)
134 if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
135 return false;
137 if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no]))
138 return false;
140 return true;
144 * pci_epc_get_features() - get the features supported by EPC
145 * @epc: the features supported by *this* EPC device will be returned
146 * @func_no: the features supported by the EPC device specific to the
147 * endpoint function with func_no will be returned
148 * @vfunc_no: the features supported by the EPC device specific to the
149 * virtual endpoint function with vfunc_no will be returned
151 * Invoke to get the features provided by the EPC which may be
152 * specific to an endpoint function. Returns pci_epc_features on success
153 * and NULL for any failures.
155 const struct pci_epc_features *pci_epc_get_features(struct pci_epc *epc,
156 u8 func_no, u8 vfunc_no)
158 const struct pci_epc_features *epc_features;
160 if (!pci_epc_function_is_valid(epc, func_no, vfunc_no))
161 return NULL;
163 if (!epc->ops->get_features)
164 return NULL;
166 mutex_lock(&epc->lock);
167 epc_features = epc->ops->get_features(epc, func_no, vfunc_no);
168 mutex_unlock(&epc->lock);
170 return epc_features;
172 EXPORT_SYMBOL_GPL(pci_epc_get_features);
175 * pci_epc_stop() - stop the PCI link
176 * @epc: the link of the EPC device that has to be stopped
178 * Invoke to stop the PCI link
180 void pci_epc_stop(struct pci_epc *epc)
182 if (IS_ERR(epc) || !epc->ops->stop)
183 return;
185 mutex_lock(&epc->lock);
186 epc->ops->stop(epc);
187 mutex_unlock(&epc->lock);
189 EXPORT_SYMBOL_GPL(pci_epc_stop);
192 * pci_epc_start() - start the PCI link
193 * @epc: the link of *this* EPC device has to be started
195 * Invoke to start the PCI link
197 int pci_epc_start(struct pci_epc *epc)
199 int ret;
201 if (IS_ERR(epc))
202 return -EINVAL;
204 if (!epc->ops->start)
205 return 0;
207 mutex_lock(&epc->lock);
208 ret = epc->ops->start(epc);
209 mutex_unlock(&epc->lock);
211 return ret;
213 EXPORT_SYMBOL_GPL(pci_epc_start);
216 * pci_epc_raise_irq() - interrupt the host system
217 * @epc: the EPC device which has to interrupt the host
218 * @func_no: the physical endpoint function number in the EPC device
219 * @vfunc_no: the virtual endpoint function number in the physical function
220 * @type: specify the type of interrupt; INTX, MSI or MSI-X
221 * @interrupt_num: the MSI or MSI-X interrupt number with range (1-N)
223 * Invoke to raise an INTX, MSI or MSI-X interrupt
225 int pci_epc_raise_irq(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
226 unsigned int type, u16 interrupt_num)
228 int ret;
230 if (!pci_epc_function_is_valid(epc, func_no, vfunc_no))
231 return -EINVAL;
233 if (!epc->ops->raise_irq)
234 return 0;
236 mutex_lock(&epc->lock);
237 ret = epc->ops->raise_irq(epc, func_no, vfunc_no, type, interrupt_num);
238 mutex_unlock(&epc->lock);
240 return ret;
242 EXPORT_SYMBOL_GPL(pci_epc_raise_irq);
245 * pci_epc_map_msi_irq() - Map physical address to MSI address and return
246 * MSI data
247 * @epc: the EPC device which has the MSI capability
248 * @func_no: the physical endpoint function number in the EPC device
249 * @vfunc_no: the virtual endpoint function number in the physical function
250 * @phys_addr: the physical address of the outbound region
251 * @interrupt_num: the MSI interrupt number with range (1-N)
252 * @entry_size: Size of Outbound address region for each interrupt
253 * @msi_data: the data that should be written in order to raise MSI interrupt
254 * with interrupt number as 'interrupt num'
255 * @msi_addr_offset: Offset of MSI address from the aligned outbound address
256 * to which the MSI address is mapped
258 * Invoke to map physical address to MSI address and return MSI data. The
259 * physical address should be an address in the outbound region. This is
260 * required to implement doorbell functionality of NTB wherein EPC on either
261 * side of the interface (primary and secondary) can directly write to the
262 * physical address (in outbound region) of the other interface to ring
263 * doorbell.
265 int pci_epc_map_msi_irq(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
266 phys_addr_t phys_addr, u8 interrupt_num, u32 entry_size,
267 u32 *msi_data, u32 *msi_addr_offset)
269 int ret;
271 if (!pci_epc_function_is_valid(epc, func_no, vfunc_no))
272 return -EINVAL;
274 if (!epc->ops->map_msi_irq)
275 return -EINVAL;
277 mutex_lock(&epc->lock);
278 ret = epc->ops->map_msi_irq(epc, func_no, vfunc_no, phys_addr,
279 interrupt_num, entry_size, msi_data,
280 msi_addr_offset);
281 mutex_unlock(&epc->lock);
283 return ret;
285 EXPORT_SYMBOL_GPL(pci_epc_map_msi_irq);
288 * pci_epc_get_msi() - get the number of MSI interrupt numbers allocated
289 * @epc: the EPC device to which MSI interrupts was requested
290 * @func_no: the physical endpoint function number in the EPC device
291 * @vfunc_no: the virtual endpoint function number in the physical function
293 * Invoke to get the number of MSI interrupts allocated by the RC
295 int pci_epc_get_msi(struct pci_epc *epc, u8 func_no, u8 vfunc_no)
297 int interrupt;
299 if (!pci_epc_function_is_valid(epc, func_no, vfunc_no))
300 return 0;
302 if (!epc->ops->get_msi)
303 return 0;
305 mutex_lock(&epc->lock);
306 interrupt = epc->ops->get_msi(epc, func_no, vfunc_no);
307 mutex_unlock(&epc->lock);
309 if (interrupt < 0)
310 return 0;
312 interrupt = 1 << interrupt;
314 return interrupt;
316 EXPORT_SYMBOL_GPL(pci_epc_get_msi);
319 * pci_epc_set_msi() - set the number of MSI interrupt numbers required
320 * @epc: the EPC device on which MSI has to be configured
321 * @func_no: the physical endpoint function number in the EPC device
322 * @vfunc_no: the virtual endpoint function number in the physical function
323 * @interrupts: number of MSI interrupts required by the EPF
325 * Invoke to set the required number of MSI interrupts.
327 int pci_epc_set_msi(struct pci_epc *epc, u8 func_no, u8 vfunc_no, u8 interrupts)
329 int ret;
330 u8 encode_int;
332 if (!pci_epc_function_is_valid(epc, func_no, vfunc_no))
333 return -EINVAL;
335 if (interrupts < 1 || interrupts > 32)
336 return -EINVAL;
338 if (!epc->ops->set_msi)
339 return 0;
341 encode_int = order_base_2(interrupts);
343 mutex_lock(&epc->lock);
344 ret = epc->ops->set_msi(epc, func_no, vfunc_no, encode_int);
345 mutex_unlock(&epc->lock);
347 return ret;
349 EXPORT_SYMBOL_GPL(pci_epc_set_msi);
352 * pci_epc_get_msix() - get the number of MSI-X interrupt numbers allocated
353 * @epc: the EPC device to which MSI-X interrupts was requested
354 * @func_no: the physical endpoint function number in the EPC device
355 * @vfunc_no: the virtual endpoint function number in the physical function
357 * Invoke to get the number of MSI-X interrupts allocated by the RC
359 int pci_epc_get_msix(struct pci_epc *epc, u8 func_no, u8 vfunc_no)
361 int interrupt;
363 if (!pci_epc_function_is_valid(epc, func_no, vfunc_no))
364 return 0;
366 if (!epc->ops->get_msix)
367 return 0;
369 mutex_lock(&epc->lock);
370 interrupt = epc->ops->get_msix(epc, func_no, vfunc_no);
371 mutex_unlock(&epc->lock);
373 if (interrupt < 0)
374 return 0;
376 return interrupt + 1;
378 EXPORT_SYMBOL_GPL(pci_epc_get_msix);
381 * pci_epc_set_msix() - set the number of MSI-X interrupt numbers required
382 * @epc: the EPC device on which MSI-X has to be configured
383 * @func_no: the physical endpoint function number in the EPC device
384 * @vfunc_no: the virtual endpoint function number in the physical function
385 * @interrupts: number of MSI-X interrupts required by the EPF
386 * @bir: BAR where the MSI-X table resides
387 * @offset: Offset pointing to the start of MSI-X table
389 * Invoke to set the required number of MSI-X interrupts.
391 int pci_epc_set_msix(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
392 u16 interrupts, enum pci_barno bir, u32 offset)
394 int ret;
396 if (!pci_epc_function_is_valid(epc, func_no, vfunc_no))
397 return -EINVAL;
399 if (interrupts < 1 || interrupts > 2048)
400 return -EINVAL;
402 if (!epc->ops->set_msix)
403 return 0;
405 mutex_lock(&epc->lock);
406 ret = epc->ops->set_msix(epc, func_no, vfunc_no, interrupts - 1, bir,
407 offset);
408 mutex_unlock(&epc->lock);
410 return ret;
412 EXPORT_SYMBOL_GPL(pci_epc_set_msix);
415 * pci_epc_unmap_addr() - unmap CPU address from PCI address
416 * @epc: the EPC device on which address is allocated
417 * @func_no: the physical endpoint function number in the EPC device
418 * @vfunc_no: the virtual endpoint function number in the physical function
419 * @phys_addr: physical address of the local system
421 * Invoke to unmap the CPU address from PCI address.
423 void pci_epc_unmap_addr(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
424 phys_addr_t phys_addr)
426 if (!pci_epc_function_is_valid(epc, func_no, vfunc_no))
427 return;
429 if (!epc->ops->unmap_addr)
430 return;
432 mutex_lock(&epc->lock);
433 epc->ops->unmap_addr(epc, func_no, vfunc_no, phys_addr);
434 mutex_unlock(&epc->lock);
436 EXPORT_SYMBOL_GPL(pci_epc_unmap_addr);
439 * pci_epc_map_addr() - map CPU address to PCI address
440 * @epc: the EPC device on which address is allocated
441 * @func_no: the physical endpoint function number in the EPC device
442 * @vfunc_no: the virtual endpoint function number in the physical function
443 * @phys_addr: physical address of the local system
444 * @pci_addr: PCI address to which the physical address should be mapped
445 * @size: the size of the allocation
447 * Invoke to map CPU address with PCI address.
449 int pci_epc_map_addr(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
450 phys_addr_t phys_addr, u64 pci_addr, size_t size)
452 int ret;
454 if (!pci_epc_function_is_valid(epc, func_no, vfunc_no))
455 return -EINVAL;
457 if (!epc->ops->map_addr)
458 return 0;
460 mutex_lock(&epc->lock);
461 ret = epc->ops->map_addr(epc, func_no, vfunc_no, phys_addr, pci_addr,
462 size);
463 mutex_unlock(&epc->lock);
465 return ret;
467 EXPORT_SYMBOL_GPL(pci_epc_map_addr);
470 * pci_epc_mem_map() - allocate and map a PCI address to a CPU address
471 * @epc: the EPC device on which the CPU address is to be allocated and mapped
472 * @func_no: the physical endpoint function number in the EPC device
473 * @vfunc_no: the virtual endpoint function number in the physical function
474 * @pci_addr: PCI address to which the CPU address should be mapped
475 * @pci_size: the number of bytes to map starting from @pci_addr
476 * @map: where to return the mapping information
478 * Allocate a controller memory address region and map it to a RC PCI address
479 * region, taking into account the controller physical address mapping
480 * constraints using the controller operation align_addr(). If this operation is
481 * not defined, we assume that there are no alignment constraints for the
482 * mapping.
484 * The effective size of the PCI address range mapped from @pci_addr is
485 * indicated by @map->pci_size. This size may be less than the requested
486 * @pci_size. The local virtual CPU address for the mapping is indicated by
487 * @map->virt_addr (@map->phys_addr indicates the physical address).
488 * The size and CPU address of the controller memory allocated and mapped are
489 * respectively indicated by @map->map_size and @map->virt_base (and
490 * @map->phys_base for the physical address of @map->virt_base).
492 * Returns 0 on success and a negative error code in case of error.
494 int pci_epc_mem_map(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
495 u64 pci_addr, size_t pci_size, struct pci_epc_map *map)
497 size_t map_size = pci_size;
498 size_t map_offset = 0;
499 int ret;
501 if (!pci_epc_function_is_valid(epc, func_no, vfunc_no))
502 return -EINVAL;
504 if (!pci_size || !map)
505 return -EINVAL;
508 * Align the PCI address to map. If the controller defines the
509 * .align_addr() operation, use it to determine the PCI address to map
510 * and the size of the mapping. Otherwise, assume that the controller
511 * has no alignment constraint.
513 memset(map, 0, sizeof(*map));
514 map->pci_addr = pci_addr;
515 if (epc->ops->align_addr)
516 map->map_pci_addr =
517 epc->ops->align_addr(epc, pci_addr,
518 &map_size, &map_offset);
519 else
520 map->map_pci_addr = pci_addr;
521 map->map_size = map_size;
522 if (map->map_pci_addr + map->map_size < pci_addr + pci_size)
523 map->pci_size = map->map_pci_addr + map->map_size - pci_addr;
524 else
525 map->pci_size = pci_size;
527 map->virt_base = pci_epc_mem_alloc_addr(epc, &map->phys_base,
528 map->map_size);
529 if (!map->virt_base)
530 return -ENOMEM;
532 map->phys_addr = map->phys_base + map_offset;
533 map->virt_addr = map->virt_base + map_offset;
535 ret = pci_epc_map_addr(epc, func_no, vfunc_no, map->phys_base,
536 map->map_pci_addr, map->map_size);
537 if (ret) {
538 pci_epc_mem_free_addr(epc, map->phys_base, map->virt_base,
539 map->map_size);
540 return ret;
543 return 0;
545 EXPORT_SYMBOL_GPL(pci_epc_mem_map);
548 * pci_epc_mem_unmap() - unmap and free a CPU address region
549 * @epc: the EPC device on which the CPU address is allocated and mapped
550 * @func_no: the physical endpoint function number in the EPC device
551 * @vfunc_no: the virtual endpoint function number in the physical function
552 * @map: the mapping information
554 * Unmap and free a CPU address region that was allocated and mapped with
555 * pci_epc_mem_map().
557 void pci_epc_mem_unmap(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
558 struct pci_epc_map *map)
560 if (!pci_epc_function_is_valid(epc, func_no, vfunc_no))
561 return;
563 if (!map || !map->virt_base)
564 return;
566 pci_epc_unmap_addr(epc, func_no, vfunc_no, map->phys_base);
567 pci_epc_mem_free_addr(epc, map->phys_base, map->virt_base,
568 map->map_size);
570 EXPORT_SYMBOL_GPL(pci_epc_mem_unmap);
573 * pci_epc_clear_bar() - reset the BAR
574 * @epc: the EPC device for which the BAR has to be cleared
575 * @func_no: the physical endpoint function number in the EPC device
576 * @vfunc_no: the virtual endpoint function number in the physical function
577 * @epf_bar: the struct epf_bar that contains the BAR information
579 * Invoke to reset the BAR of the endpoint device.
581 void pci_epc_clear_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
582 struct pci_epf_bar *epf_bar)
584 if (!pci_epc_function_is_valid(epc, func_no, vfunc_no))
585 return;
587 if (epf_bar->barno == BAR_5 &&
588 epf_bar->flags & PCI_BASE_ADDRESS_MEM_TYPE_64)
589 return;
591 if (!epc->ops->clear_bar)
592 return;
594 mutex_lock(&epc->lock);
595 epc->ops->clear_bar(epc, func_no, vfunc_no, epf_bar);
596 mutex_unlock(&epc->lock);
598 EXPORT_SYMBOL_GPL(pci_epc_clear_bar);
601 * pci_epc_set_bar() - configure BAR in order for host to assign PCI addr space
602 * @epc: the EPC device on which BAR has to be configured
603 * @func_no: the physical endpoint function number in the EPC device
604 * @vfunc_no: the virtual endpoint function number in the physical function
605 * @epf_bar: the struct epf_bar that contains the BAR information
607 * Invoke to configure the BAR of the endpoint device.
609 int pci_epc_set_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
610 struct pci_epf_bar *epf_bar)
612 int ret;
613 int flags = epf_bar->flags;
615 if (!pci_epc_function_is_valid(epc, func_no, vfunc_no))
616 return -EINVAL;
618 if ((epf_bar->barno == BAR_5 && flags & PCI_BASE_ADDRESS_MEM_TYPE_64) ||
619 (flags & PCI_BASE_ADDRESS_SPACE_IO &&
620 flags & PCI_BASE_ADDRESS_IO_MASK) ||
621 (upper_32_bits(epf_bar->size) &&
622 !(flags & PCI_BASE_ADDRESS_MEM_TYPE_64)))
623 return -EINVAL;
625 if (!epc->ops->set_bar)
626 return 0;
628 mutex_lock(&epc->lock);
629 ret = epc->ops->set_bar(epc, func_no, vfunc_no, epf_bar);
630 mutex_unlock(&epc->lock);
632 return ret;
634 EXPORT_SYMBOL_GPL(pci_epc_set_bar);
637 * pci_epc_write_header() - write standard configuration header
638 * @epc: the EPC device to which the configuration header should be written
639 * @func_no: the physical endpoint function number in the EPC device
640 * @vfunc_no: the virtual endpoint function number in the physical function
641 * @header: standard configuration header fields
643 * Invoke to write the configuration header to the endpoint controller. Every
644 * endpoint controller will have a dedicated location to which the standard
645 * configuration header would be written. The callback function should write
646 * the header fields to this dedicated location.
648 int pci_epc_write_header(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
649 struct pci_epf_header *header)
651 int ret;
653 if (!pci_epc_function_is_valid(epc, func_no, vfunc_no))
654 return -EINVAL;
656 /* Only Virtual Function #1 has deviceID */
657 if (vfunc_no > 1)
658 return -EINVAL;
660 if (!epc->ops->write_header)
661 return 0;
663 mutex_lock(&epc->lock);
664 ret = epc->ops->write_header(epc, func_no, vfunc_no, header);
665 mutex_unlock(&epc->lock);
667 return ret;
669 EXPORT_SYMBOL_GPL(pci_epc_write_header);
672 * pci_epc_add_epf() - bind PCI endpoint function to an endpoint controller
673 * @epc: the EPC device to which the endpoint function should be added
674 * @epf: the endpoint function to be added
675 * @type: Identifies if the EPC is connected to the primary or secondary
676 * interface of EPF
678 * A PCI endpoint device can have one or more functions. In the case of PCIe,
679 * the specification allows up to 8 PCIe endpoint functions. Invoke
680 * pci_epc_add_epf() to add a PCI endpoint function to an endpoint controller.
682 int pci_epc_add_epf(struct pci_epc *epc, struct pci_epf *epf,
683 enum pci_epc_interface_type type)
685 struct list_head *list;
686 u32 func_no;
687 int ret = 0;
689 if (IS_ERR_OR_NULL(epc) || epf->is_vf)
690 return -EINVAL;
692 if (type == PRIMARY_INTERFACE && epf->epc)
693 return -EBUSY;
695 if (type == SECONDARY_INTERFACE && epf->sec_epc)
696 return -EBUSY;
698 mutex_lock(&epc->list_lock);
699 func_no = find_first_zero_bit(&epc->function_num_map,
700 BITS_PER_LONG);
701 if (func_no >= BITS_PER_LONG) {
702 ret = -EINVAL;
703 goto ret;
706 if (func_no > epc->max_functions - 1) {
707 dev_err(&epc->dev, "Exceeding max supported Function Number\n");
708 ret = -EINVAL;
709 goto ret;
712 set_bit(func_no, &epc->function_num_map);
713 if (type == PRIMARY_INTERFACE) {
714 epf->func_no = func_no;
715 epf->epc = epc;
716 list = &epf->list;
717 } else {
718 epf->sec_epc_func_no = func_no;
719 epf->sec_epc = epc;
720 list = &epf->sec_epc_list;
723 list_add_tail(list, &epc->pci_epf);
724 ret:
725 mutex_unlock(&epc->list_lock);
727 return ret;
729 EXPORT_SYMBOL_GPL(pci_epc_add_epf);
732 * pci_epc_remove_epf() - remove PCI endpoint function from endpoint controller
733 * @epc: the EPC device from which the endpoint function should be removed
734 * @epf: the endpoint function to be removed
735 * @type: identifies if the EPC is connected to the primary or secondary
736 * interface of EPF
738 * Invoke to remove PCI endpoint function from the endpoint controller.
740 void pci_epc_remove_epf(struct pci_epc *epc, struct pci_epf *epf,
741 enum pci_epc_interface_type type)
743 struct list_head *list;
744 u32 func_no = 0;
746 if (IS_ERR_OR_NULL(epc) || !epf)
747 return;
749 mutex_lock(&epc->list_lock);
750 if (type == PRIMARY_INTERFACE) {
751 func_no = epf->func_no;
752 list = &epf->list;
753 epf->epc = NULL;
754 } else {
755 func_no = epf->sec_epc_func_no;
756 list = &epf->sec_epc_list;
757 epf->sec_epc = NULL;
759 clear_bit(func_no, &epc->function_num_map);
760 list_del(list);
761 mutex_unlock(&epc->list_lock);
763 EXPORT_SYMBOL_GPL(pci_epc_remove_epf);
766 * pci_epc_linkup() - Notify the EPF device that EPC device has established a
767 * connection with the Root Complex.
768 * @epc: the EPC device which has established link with the host
770 * Invoke to Notify the EPF device that the EPC device has established a
771 * connection with the Root Complex.
773 void pci_epc_linkup(struct pci_epc *epc)
775 struct pci_epf *epf;
777 if (IS_ERR_OR_NULL(epc))
778 return;
780 mutex_lock(&epc->list_lock);
781 list_for_each_entry(epf, &epc->pci_epf, list) {
782 mutex_lock(&epf->lock);
783 if (epf->event_ops && epf->event_ops->link_up)
784 epf->event_ops->link_up(epf);
785 mutex_unlock(&epf->lock);
787 mutex_unlock(&epc->list_lock);
789 EXPORT_SYMBOL_GPL(pci_epc_linkup);
792 * pci_epc_linkdown() - Notify the EPF device that EPC device has dropped the
793 * connection with the Root Complex.
794 * @epc: the EPC device which has dropped the link with the host
796 * Invoke to Notify the EPF device that the EPC device has dropped the
797 * connection with the Root Complex.
799 void pci_epc_linkdown(struct pci_epc *epc)
801 struct pci_epf *epf;
803 if (IS_ERR_OR_NULL(epc))
804 return;
806 mutex_lock(&epc->list_lock);
807 list_for_each_entry(epf, &epc->pci_epf, list) {
808 mutex_lock(&epf->lock);
809 if (epf->event_ops && epf->event_ops->link_down)
810 epf->event_ops->link_down(epf);
811 mutex_unlock(&epf->lock);
813 mutex_unlock(&epc->list_lock);
815 EXPORT_SYMBOL_GPL(pci_epc_linkdown);
818 * pci_epc_init_notify() - Notify the EPF device that EPC device initialization
819 * is completed.
820 * @epc: the EPC device whose initialization is completed
822 * Invoke to Notify the EPF device that the EPC device's initialization
823 * is completed.
825 void pci_epc_init_notify(struct pci_epc *epc)
827 struct pci_epf *epf;
829 if (IS_ERR_OR_NULL(epc))
830 return;
832 mutex_lock(&epc->list_lock);
833 list_for_each_entry(epf, &epc->pci_epf, list) {
834 mutex_lock(&epf->lock);
835 if (epf->event_ops && epf->event_ops->epc_init)
836 epf->event_ops->epc_init(epf);
837 mutex_unlock(&epf->lock);
839 epc->init_complete = true;
840 mutex_unlock(&epc->list_lock);
842 EXPORT_SYMBOL_GPL(pci_epc_init_notify);
845 * pci_epc_notify_pending_init() - Notify the pending EPC device initialization
846 * complete to the EPF device
847 * @epc: the EPC device whose initialization is pending to be notified
848 * @epf: the EPF device to be notified
850 * Invoke to notify the pending EPC device initialization complete to the EPF
851 * device. This is used to deliver the notification if the EPC initialization
852 * got completed before the EPF driver bind.
854 void pci_epc_notify_pending_init(struct pci_epc *epc, struct pci_epf *epf)
856 if (epc->init_complete) {
857 mutex_lock(&epf->lock);
858 if (epf->event_ops && epf->event_ops->epc_init)
859 epf->event_ops->epc_init(epf);
860 mutex_unlock(&epf->lock);
863 EXPORT_SYMBOL_GPL(pci_epc_notify_pending_init);
866 * pci_epc_deinit_notify() - Notify the EPF device about EPC deinitialization
867 * @epc: the EPC device whose deinitialization is completed
869 * Invoke to notify the EPF device that the EPC deinitialization is completed.
871 void pci_epc_deinit_notify(struct pci_epc *epc)
873 struct pci_epf *epf;
875 if (IS_ERR_OR_NULL(epc))
876 return;
878 mutex_lock(&epc->list_lock);
879 list_for_each_entry(epf, &epc->pci_epf, list) {
880 mutex_lock(&epf->lock);
881 if (epf->event_ops && epf->event_ops->epc_deinit)
882 epf->event_ops->epc_deinit(epf);
883 mutex_unlock(&epf->lock);
885 epc->init_complete = false;
886 mutex_unlock(&epc->list_lock);
888 EXPORT_SYMBOL_GPL(pci_epc_deinit_notify);
891 * pci_epc_bus_master_enable_notify() - Notify the EPF device that the EPC
892 * device has received the Bus Master
893 * Enable event from the Root complex
894 * @epc: the EPC device that received the Bus Master Enable event
896 * Notify the EPF device that the EPC device has generated the Bus Master Enable
897 * event due to host setting the Bus Master Enable bit in the Command register.
899 void pci_epc_bus_master_enable_notify(struct pci_epc *epc)
901 struct pci_epf *epf;
903 if (IS_ERR_OR_NULL(epc))
904 return;
906 mutex_lock(&epc->list_lock);
907 list_for_each_entry(epf, &epc->pci_epf, list) {
908 mutex_lock(&epf->lock);
909 if (epf->event_ops && epf->event_ops->bus_master_enable)
910 epf->event_ops->bus_master_enable(epf);
911 mutex_unlock(&epf->lock);
913 mutex_unlock(&epc->list_lock);
915 EXPORT_SYMBOL_GPL(pci_epc_bus_master_enable_notify);
918 * pci_epc_destroy() - destroy the EPC device
919 * @epc: the EPC device that has to be destroyed
921 * Invoke to destroy the PCI EPC device
923 void pci_epc_destroy(struct pci_epc *epc)
925 pci_ep_cfs_remove_epc_group(epc->group);
926 #ifdef CONFIG_PCI_DOMAINS_GENERIC
927 pci_bus_release_domain_nr(epc->dev.parent, epc->domain_nr);
928 #endif
929 device_unregister(&epc->dev);
931 EXPORT_SYMBOL_GPL(pci_epc_destroy);
934 * devm_pci_epc_destroy() - destroy the EPC device
935 * @dev: device that wants to destroy the EPC
936 * @epc: the EPC device that has to be destroyed
938 * Invoke to destroy the devres associated with this
939 * pci_epc and destroy the EPC device.
941 void devm_pci_epc_destroy(struct device *dev, struct pci_epc *epc)
943 int r;
945 r = devres_destroy(dev, devm_pci_epc_release, devm_pci_epc_match,
946 epc);
947 dev_WARN_ONCE(dev, r, "couldn't find PCI EPC resource\n");
949 EXPORT_SYMBOL_GPL(devm_pci_epc_destroy);
951 static void pci_epc_release(struct device *dev)
953 kfree(to_pci_epc(dev));
957 * __pci_epc_create() - create a new endpoint controller (EPC) device
958 * @dev: device that is creating the new EPC
959 * @ops: function pointers for performing EPC operations
960 * @owner: the owner of the module that creates the EPC device
962 * Invoke to create a new EPC device and add it to pci_epc class.
964 struct pci_epc *
965 __pci_epc_create(struct device *dev, const struct pci_epc_ops *ops,
966 struct module *owner)
968 int ret;
969 struct pci_epc *epc;
971 if (WARN_ON(!dev)) {
972 ret = -EINVAL;
973 goto err_ret;
976 epc = kzalloc(sizeof(*epc), GFP_KERNEL);
977 if (!epc) {
978 ret = -ENOMEM;
979 goto err_ret;
982 mutex_init(&epc->lock);
983 mutex_init(&epc->list_lock);
984 INIT_LIST_HEAD(&epc->pci_epf);
986 device_initialize(&epc->dev);
987 epc->dev.class = &pci_epc_class;
988 epc->dev.parent = dev;
989 epc->dev.release = pci_epc_release;
990 epc->ops = ops;
992 #ifdef CONFIG_PCI_DOMAINS_GENERIC
993 epc->domain_nr = pci_bus_find_domain_nr(NULL, dev);
994 #else
996 * TODO: If the architecture doesn't support generic PCI
997 * domains, then a custom implementation has to be used.
999 WARN_ONCE(1, "This architecture doesn't support generic PCI domains\n");
1000 #endif
1002 ret = dev_set_name(&epc->dev, "%s", dev_name(dev));
1003 if (ret)
1004 goto put_dev;
1006 ret = device_add(&epc->dev);
1007 if (ret)
1008 goto put_dev;
1010 epc->group = pci_ep_cfs_add_epc_group(dev_name(dev));
1012 return epc;
1014 put_dev:
1015 put_device(&epc->dev);
1017 err_ret:
1018 return ERR_PTR(ret);
1020 EXPORT_SYMBOL_GPL(__pci_epc_create);
1023 * __devm_pci_epc_create() - create a new endpoint controller (EPC) device
1024 * @dev: device that is creating the new EPC
1025 * @ops: function pointers for performing EPC operations
1026 * @owner: the owner of the module that creates the EPC device
1028 * Invoke to create a new EPC device and add it to pci_epc class.
1029 * While at that, it also associates the device with the pci_epc using devres.
1030 * On driver detach, release function is invoked on the devres data,
1031 * then, devres data is freed.
1033 struct pci_epc *
1034 __devm_pci_epc_create(struct device *dev, const struct pci_epc_ops *ops,
1035 struct module *owner)
1037 struct pci_epc **ptr, *epc;
1039 ptr = devres_alloc(devm_pci_epc_release, sizeof(*ptr), GFP_KERNEL);
1040 if (!ptr)
1041 return ERR_PTR(-ENOMEM);
1043 epc = __pci_epc_create(dev, ops, owner);
1044 if (!IS_ERR(epc)) {
1045 *ptr = epc;
1046 devres_add(dev, ptr);
1047 } else {
1048 devres_free(ptr);
1051 return epc;
1053 EXPORT_SYMBOL_GPL(__devm_pci_epc_create);
1055 static int __init pci_epc_init(void)
1057 return class_register(&pci_epc_class);
1059 module_init(pci_epc_init);
1061 static void __exit pci_epc_exit(void)
1063 class_unregister(&pci_epc_class);
1065 module_exit(pci_epc_exit);
1067 MODULE_DESCRIPTION("PCI EPC Library");
1068 MODULE_AUTHOR("Kishon Vijay Abraham I <kishon@ti.com>");