1 // SPDX-License-Identifier: GPL-2.0
3 * Handle device page faults
5 * Copyright (C) 2020 ARM Ltd.
8 #include <linux/iommu.h>
9 #include <linux/list.h>
10 #include <linux/sched/mm.h>
11 #include <linux/slab.h>
12 #include <linux/workqueue.h>
14 #include "iommu-priv.h"
17 * Return the fault parameter of a device if it exists. Otherwise, return NULL.
18 * On a successful return, the caller takes a reference of this parameter and
19 * should put it after use by calling iopf_put_dev_fault_param().
21 static struct iommu_fault_param
*iopf_get_dev_fault_param(struct device
*dev
)
23 struct dev_iommu
*param
= dev
->iommu
;
24 struct iommu_fault_param
*fault_param
;
27 fault_param
= rcu_dereference(param
->fault_param
);
28 if (fault_param
&& !refcount_inc_not_zero(&fault_param
->users
))
35 /* Caller must hold a reference of the fault parameter. */
36 static void iopf_put_dev_fault_param(struct iommu_fault_param
*fault_param
)
38 if (refcount_dec_and_test(&fault_param
->users
))
39 kfree_rcu(fault_param
, rcu
);
42 static void __iopf_free_group(struct iopf_group
*group
)
44 struct iopf_fault
*iopf
, *next
;
46 list_for_each_entry_safe(iopf
, next
, &group
->faults
, list
) {
47 if (!(iopf
->fault
.prm
.flags
& IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE
))
51 /* Pair with iommu_report_device_fault(). */
52 iopf_put_dev_fault_param(group
->fault_param
);
55 void iopf_free_group(struct iopf_group
*group
)
57 __iopf_free_group(group
);
60 EXPORT_SYMBOL_GPL(iopf_free_group
);
62 /* Non-last request of a group. Postpone until the last one. */
63 static int report_partial_fault(struct iommu_fault_param
*fault_param
,
64 struct iommu_fault
*fault
)
66 struct iopf_fault
*iopf
;
68 iopf
= kzalloc(sizeof(*iopf
), GFP_KERNEL
);
74 mutex_lock(&fault_param
->lock
);
75 list_add(&iopf
->list
, &fault_param
->partial
);
76 mutex_unlock(&fault_param
->lock
);
81 static struct iopf_group
*iopf_group_alloc(struct iommu_fault_param
*iopf_param
,
82 struct iopf_fault
*evt
,
83 struct iopf_group
*abort_group
)
85 struct iopf_fault
*iopf
, *next
;
86 struct iopf_group
*group
;
88 group
= kzalloc(sizeof(*group
), GFP_KERNEL
);
91 * We always need to construct the group as we need it to abort
92 * the request at the driver if it can't be handled.
97 group
->fault_param
= iopf_param
;
98 group
->last_fault
.fault
= evt
->fault
;
99 INIT_LIST_HEAD(&group
->faults
);
100 INIT_LIST_HEAD(&group
->pending_node
);
101 list_add(&group
->last_fault
.list
, &group
->faults
);
103 /* See if we have partial faults for this group */
104 mutex_lock(&iopf_param
->lock
);
105 list_for_each_entry_safe(iopf
, next
, &iopf_param
->partial
, list
) {
106 if (iopf
->fault
.prm
.grpid
== evt
->fault
.prm
.grpid
)
107 /* Insert *before* the last fault */
108 list_move(&iopf
->list
, &group
->faults
);
110 list_add(&group
->pending_node
, &iopf_param
->faults
);
111 mutex_unlock(&iopf_param
->lock
);
113 group
->fault_count
= list_count_nodes(&group
->faults
);
118 static struct iommu_attach_handle
*find_fault_handler(struct device
*dev
,
119 struct iopf_fault
*evt
)
121 struct iommu_fault
*fault
= &evt
->fault
;
122 struct iommu_attach_handle
*attach_handle
;
124 if (fault
->prm
.flags
& IOMMU_FAULT_PAGE_REQUEST_PASID_VALID
) {
125 attach_handle
= iommu_attach_handle_get(dev
->iommu_group
,
126 fault
->prm
.pasid
, 0);
127 if (IS_ERR(attach_handle
)) {
128 const struct iommu_ops
*ops
= dev_iommu_ops(dev
);
130 if (!ops
->user_pasid_table
)
133 * The iommu driver for this device supports user-
134 * managed PASID table. Therefore page faults for
135 * any PASID should go through the NESTING domain
136 * attached to the device RID.
138 attach_handle
= iommu_attach_handle_get(
139 dev
->iommu_group
, IOMMU_NO_PASID
,
140 IOMMU_DOMAIN_NESTED
);
141 if (IS_ERR(attach_handle
))
145 attach_handle
= iommu_attach_handle_get(dev
->iommu_group
,
148 if (IS_ERR(attach_handle
))
152 if (!attach_handle
->domain
->iopf_handler
)
155 return attach_handle
;
158 static void iopf_error_response(struct device
*dev
, struct iopf_fault
*evt
)
160 const struct iommu_ops
*ops
= dev_iommu_ops(dev
);
161 struct iommu_fault
*fault
= &evt
->fault
;
162 struct iommu_page_response resp
= {
163 .pasid
= fault
->prm
.pasid
,
164 .grpid
= fault
->prm
.grpid
,
165 .code
= IOMMU_PAGE_RESP_INVALID
168 ops
->page_response(dev
, evt
, &resp
);
172 * iommu_report_device_fault() - Report fault event to device driver
174 * @evt: fault event data
176 * Called by IOMMU drivers when a fault is detected, typically in a threaded IRQ
177 * handler. If this function fails then ops->page_response() was called to
178 * complete evt if required.
180 * This module doesn't handle PCI PASID Stop Marker; IOMMU drivers must discard
181 * them before reporting faults. A PASID Stop Marker (LRW = 0b100) doesn't
182 * expect a response. It may be generated when disabling a PASID (issuing a
183 * PASID stop request) by some PCI devices.
185 * The PASID stop request is issued by the device driver before unbind(). Once
186 * it completes, no page request is generated for this PASID anymore and
187 * outstanding ones have been pushed to the IOMMU (as per PCIe 4.0r1.0 - 6.20.1
188 * and 10.4.1.2 - Managing PASID TLP Prefix Usage). Some PCI devices will wait
189 * for all outstanding page requests to come back with a response before
190 * completing the PASID stop request. Others do not wait for page responses, and
191 * instead issue this Stop Marker that tells us when the PASID can be
194 * It is safe to discard the Stop Marker because it is an optimization.
195 * a. Page requests, which are posted requests, have been flushed to the IOMMU
196 * when the stop request completes.
197 * b. The IOMMU driver flushes all fault queues on unbind() before freeing the
200 * So even though the Stop Marker might be issued by the device *after* the stop
201 * request completes, outstanding faults will have been dealt with by the time
202 * the PASID is freed.
204 * Any valid page fault will be eventually routed to an iommu domain and the
205 * page fault handler installed there will get called. The users of this
206 * handling framework should guarantee that the iommu domain could only be
207 * freed after the device has stopped generating page faults (or the iommu
208 * hardware has been set to block the page faults) and the pending page faults
209 * have been flushed. In case no page fault handler is attached or no iopf params
210 * are setup, then the ops->page_response() is called to complete the evt.
212 * Returns 0 on success, or an error in case of a bad/failed iopf setup.
214 int iommu_report_device_fault(struct device
*dev
, struct iopf_fault
*evt
)
216 struct iommu_attach_handle
*attach_handle
;
217 struct iommu_fault
*fault
= &evt
->fault
;
218 struct iommu_fault_param
*iopf_param
;
219 struct iopf_group abort_group
= {};
220 struct iopf_group
*group
;
222 attach_handle
= find_fault_handler(dev
, evt
);
227 * Something has gone wrong if a fault capable domain is attached but no
228 * iopf_param is setup
230 iopf_param
= iopf_get_dev_fault_param(dev
);
231 if (WARN_ON(!iopf_param
))
234 if (!(fault
->prm
.flags
& IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE
)) {
237 ret
= report_partial_fault(iopf_param
, fault
);
238 iopf_put_dev_fault_param(iopf_param
);
239 /* A request that is not the last does not need to be ack'd */
245 * This is the last page fault of a group. Allocate an iopf group and
246 * pass it to domain's page fault handler. The group holds a reference
247 * count of the fault parameter. It will be released after response or
248 * error path of this function. If an error is returned, the caller
249 * will send a response to the hardware. We need to clean up before
250 * leaving, otherwise partial faults will be stuck.
252 group
= iopf_group_alloc(iopf_param
, evt
, &abort_group
);
253 if (group
== &abort_group
)
256 group
->attach_handle
= attach_handle
;
259 * On success iopf_handler must call iopf_group_response() and
262 if (group
->attach_handle
->domain
->iopf_handler(group
))
268 dev_warn_ratelimited(dev
, "iopf with pasid %d aborted\n",
270 iopf_group_response(group
, IOMMU_PAGE_RESP_FAILURE
);
271 if (group
== &abort_group
)
272 __iopf_free_group(group
);
274 iopf_free_group(group
);
279 if (fault
->type
== IOMMU_FAULT_PAGE_REQ
)
280 iopf_error_response(dev
, evt
);
284 EXPORT_SYMBOL_GPL(iommu_report_device_fault
);
287 * iopf_queue_flush_dev - Ensure that all queued faults have been processed
288 * @dev: the endpoint whose faults need to be flushed.
290 * The IOMMU driver calls this before releasing a PASID, to ensure that all
291 * pending faults for this PASID have been handled, and won't hit the address
292 * space of the next process that uses this PASID. The driver must make sure
293 * that no new fault is added to the queue. In particular it must flush its
294 * low-level queue before calling this function.
296 * Return: 0 on success and <0 on error.
298 int iopf_queue_flush_dev(struct device
*dev
)
300 struct iommu_fault_param
*iopf_param
;
303 * It's a driver bug to be here after iopf_queue_remove_device().
304 * Therefore, it's safe to dereference the fault parameter without
307 iopf_param
= rcu_dereference_check(dev
->iommu
->fault_param
, true);
308 if (WARN_ON(!iopf_param
))
311 flush_workqueue(iopf_param
->queue
->wq
);
315 EXPORT_SYMBOL_GPL(iopf_queue_flush_dev
);
318 * iopf_group_response - Respond a group of page faults
319 * @group: the group of faults with the same group id
320 * @status: the response code
322 void iopf_group_response(struct iopf_group
*group
,
323 enum iommu_page_response_code status
)
325 struct iommu_fault_param
*fault_param
= group
->fault_param
;
326 struct iopf_fault
*iopf
= &group
->last_fault
;
327 struct device
*dev
= group
->fault_param
->dev
;
328 const struct iommu_ops
*ops
= dev_iommu_ops(dev
);
329 struct iommu_page_response resp
= {
330 .pasid
= iopf
->fault
.prm
.pasid
,
331 .grpid
= iopf
->fault
.prm
.grpid
,
335 /* Only send response if there is a fault report pending */
336 mutex_lock(&fault_param
->lock
);
337 if (!list_empty(&group
->pending_node
)) {
338 ops
->page_response(dev
, &group
->last_fault
, &resp
);
339 list_del_init(&group
->pending_node
);
341 mutex_unlock(&fault_param
->lock
);
343 EXPORT_SYMBOL_GPL(iopf_group_response
);
346 * iopf_queue_discard_partial - Remove all pending partial fault
347 * @queue: the queue whose partial faults need to be discarded
349 * When the hardware queue overflows, last page faults in a group may have been
350 * lost and the IOMMU driver calls this to discard all partial faults. The
351 * driver shouldn't be adding new faults to this queue concurrently.
353 * Return: 0 on success and <0 on error.
355 int iopf_queue_discard_partial(struct iopf_queue
*queue
)
357 struct iopf_fault
*iopf
, *next
;
358 struct iommu_fault_param
*iopf_param
;
363 mutex_lock(&queue
->lock
);
364 list_for_each_entry(iopf_param
, &queue
->devices
, queue_list
) {
365 mutex_lock(&iopf_param
->lock
);
366 list_for_each_entry_safe(iopf
, next
, &iopf_param
->partial
,
368 list_del(&iopf
->list
);
371 mutex_unlock(&iopf_param
->lock
);
373 mutex_unlock(&queue
->lock
);
376 EXPORT_SYMBOL_GPL(iopf_queue_discard_partial
);
379 * iopf_queue_add_device - Add producer to the fault queue
381 * @dev: device to add
383 * Return: 0 on success and <0 on error.
385 int iopf_queue_add_device(struct iopf_queue
*queue
, struct device
*dev
)
388 struct dev_iommu
*param
= dev
->iommu
;
389 struct iommu_fault_param
*fault_param
;
390 const struct iommu_ops
*ops
= dev_iommu_ops(dev
);
392 if (!ops
->page_response
)
395 mutex_lock(&queue
->lock
);
396 mutex_lock(¶m
->lock
);
397 if (rcu_dereference_check(param
->fault_param
,
398 lockdep_is_held(¶m
->lock
))) {
403 fault_param
= kzalloc(sizeof(*fault_param
), GFP_KERNEL
);
409 mutex_init(&fault_param
->lock
);
410 INIT_LIST_HEAD(&fault_param
->faults
);
411 INIT_LIST_HEAD(&fault_param
->partial
);
412 fault_param
->dev
= dev
;
413 refcount_set(&fault_param
->users
, 1);
414 list_add(&fault_param
->queue_list
, &queue
->devices
);
415 fault_param
->queue
= queue
;
417 rcu_assign_pointer(param
->fault_param
, fault_param
);
420 mutex_unlock(¶m
->lock
);
421 mutex_unlock(&queue
->lock
);
425 EXPORT_SYMBOL_GPL(iopf_queue_add_device
);
428 * iopf_queue_remove_device - Remove producer from fault queue
430 * @dev: device to remove
432 * Removing a device from an iopf_queue. It's recommended to follow these
433 * steps when removing a device:
435 * - Disable new PRI reception: Turn off PRI generation in the IOMMU hardware
436 * and flush any hardware page request queues. This should be done before
437 * calling into this helper.
438 * - Acknowledge all outstanding PRQs to the device: Respond to all outstanding
439 * page requests with IOMMU_PAGE_RESP_INVALID, indicating the device should
440 * not retry. This helper function handles this.
441 * - Disable PRI on the device: After calling this helper, the caller could
442 * then disable PRI on the device.
444 * Calling iopf_queue_remove_device() essentially disassociates the device.
445 * The fault_param might still exist, but iommu_page_response() will do
446 * nothing. The device fault parameter reference count has been properly
447 * passed from iommu_report_device_fault() to the fault handling work, and
448 * will eventually be released after iommu_page_response().
450 void iopf_queue_remove_device(struct iopf_queue
*queue
, struct device
*dev
)
452 struct iopf_fault
*partial_iopf
;
453 struct iopf_fault
*next
;
454 struct iopf_group
*group
, *temp
;
455 struct dev_iommu
*param
= dev
->iommu
;
456 struct iommu_fault_param
*fault_param
;
457 const struct iommu_ops
*ops
= dev_iommu_ops(dev
);
459 mutex_lock(&queue
->lock
);
460 mutex_lock(¶m
->lock
);
461 fault_param
= rcu_dereference_check(param
->fault_param
,
462 lockdep_is_held(¶m
->lock
));
464 if (WARN_ON(!fault_param
|| fault_param
->queue
!= queue
))
467 mutex_lock(&fault_param
->lock
);
468 list_for_each_entry_safe(partial_iopf
, next
, &fault_param
->partial
, list
)
471 list_for_each_entry_safe(group
, temp
, &fault_param
->faults
, pending_node
) {
472 struct iopf_fault
*iopf
= &group
->last_fault
;
473 struct iommu_page_response resp
= {
474 .pasid
= iopf
->fault
.prm
.pasid
,
475 .grpid
= iopf
->fault
.prm
.grpid
,
476 .code
= IOMMU_PAGE_RESP_INVALID
479 ops
->page_response(dev
, iopf
, &resp
);
480 list_del_init(&group
->pending_node
);
482 mutex_unlock(&fault_param
->lock
);
484 list_del(&fault_param
->queue_list
);
486 /* dec the ref owned by iopf_queue_add_device() */
487 rcu_assign_pointer(param
->fault_param
, NULL
);
488 iopf_put_dev_fault_param(fault_param
);
490 mutex_unlock(¶m
->lock
);
491 mutex_unlock(&queue
->lock
);
493 EXPORT_SYMBOL_GPL(iopf_queue_remove_device
);
496 * iopf_queue_alloc - Allocate and initialize a fault queue
497 * @name: a unique string identifying the queue (for workqueue)
499 * Return: the queue on success and NULL on error.
501 struct iopf_queue
*iopf_queue_alloc(const char *name
)
503 struct iopf_queue
*queue
;
505 queue
= kzalloc(sizeof(*queue
), GFP_KERNEL
);
510 * The WQ is unordered because the low-level handler enqueues faults by
511 * group. PRI requests within a group have to be ordered, but once
512 * that's dealt with, the high-level function can handle groups out of
515 queue
->wq
= alloc_workqueue("iopf_queue/%s", WQ_UNBOUND
, 0, name
);
521 INIT_LIST_HEAD(&queue
->devices
);
522 mutex_init(&queue
->lock
);
526 EXPORT_SYMBOL_GPL(iopf_queue_alloc
);
529 * iopf_queue_free - Free IOPF queue
530 * @queue: queue to free
532 * Counterpart to iopf_queue_alloc(). The driver must not be queuing faults or
533 * adding/removing devices on this queue anymore.
535 void iopf_queue_free(struct iopf_queue
*queue
)
537 struct iommu_fault_param
*iopf_param
, *next
;
542 list_for_each_entry_safe(iopf_param
, next
, &queue
->devices
, queue_list
)
543 iopf_queue_remove_device(queue
, iopf_param
->dev
);
545 destroy_workqueue(queue
->wq
);
548 EXPORT_SYMBOL_GPL(iopf_queue_free
);