2 * Copyright 2014 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
26 #include "amdgpu_ih.h"
27 #include "amdgpu_amdkfd.h"
30 * amdgpu_ih_ring_alloc - allocate memory for the IH ring
32 * @adev: amdgpu_device pointer
34 * Allocate a ring buffer for the interrupt controller.
35 * Returns 0 for success, errors for failure.
37 static int amdgpu_ih_ring_alloc(struct amdgpu_device
*adev
)
41 /* Allocate ring buffer */
42 if (adev
->irq
.ih
.ring_obj
== NULL
) {
43 r
= amdgpu_bo_create_kernel(adev
, adev
->irq
.ih
.ring_size
,
44 PAGE_SIZE
, AMDGPU_GEM_DOMAIN_GTT
,
45 &adev
->irq
.ih
.ring_obj
,
46 &adev
->irq
.ih
.gpu_addr
,
47 (void **)&adev
->irq
.ih
.ring
);
49 DRM_ERROR("amdgpu: failed to create ih ring buffer (%d).\n", r
);
57 * amdgpu_ih_ring_init - initialize the IH state
59 * @adev: amdgpu_device pointer
61 * Initializes the IH state and allocates a buffer
62 * for the IH ring buffer.
63 * Returns 0 for success, errors for failure.
65 int amdgpu_ih_ring_init(struct amdgpu_device
*adev
, unsigned ring_size
,
72 rb_bufsz
= order_base_2(ring_size
/ 4);
73 ring_size
= (1 << rb_bufsz
) * 4;
74 adev
->irq
.ih
.ring_size
= ring_size
;
75 adev
->irq
.ih
.ptr_mask
= adev
->irq
.ih
.ring_size
- 1;
76 adev
->irq
.ih
.rptr
= 0;
77 adev
->irq
.ih
.use_bus_addr
= use_bus_addr
;
79 if (adev
->irq
.ih
.use_bus_addr
) {
80 if (!adev
->irq
.ih
.ring
) {
81 /* add 8 bytes for the rptr/wptr shadows and
82 * add them to the end of the ring allocation.
84 adev
->irq
.ih
.ring
= pci_alloc_consistent(adev
->pdev
,
85 adev
->irq
.ih
.ring_size
+ 8,
86 &adev
->irq
.ih
.rb_dma_addr
);
87 if (adev
->irq
.ih
.ring
== NULL
)
89 memset((void *)adev
->irq
.ih
.ring
, 0, adev
->irq
.ih
.ring_size
+ 8);
90 adev
->irq
.ih
.wptr_offs
= (adev
->irq
.ih
.ring_size
/ 4) + 0;
91 adev
->irq
.ih
.rptr_offs
= (adev
->irq
.ih
.ring_size
/ 4) + 1;
95 r
= amdgpu_device_wb_get(adev
, &adev
->irq
.ih
.wptr_offs
);
97 dev_err(adev
->dev
, "(%d) ih wptr_offs wb alloc failed\n", r
);
101 r
= amdgpu_device_wb_get(adev
, &adev
->irq
.ih
.rptr_offs
);
103 amdgpu_device_wb_free(adev
, adev
->irq
.ih
.wptr_offs
);
104 dev_err(adev
->dev
, "(%d) ih rptr_offs wb alloc failed\n", r
);
108 return amdgpu_ih_ring_alloc(adev
);
113 * amdgpu_ih_ring_fini - tear down the IH state
115 * @adev: amdgpu_device pointer
117 * Tears down the IH state and frees buffer
118 * used for the IH ring buffer.
120 void amdgpu_ih_ring_fini(struct amdgpu_device
*adev
)
122 if (adev
->irq
.ih
.use_bus_addr
) {
123 if (adev
->irq
.ih
.ring
) {
124 /* add 8 bytes for the rptr/wptr shadows and
125 * add them to the end of the ring allocation.
127 pci_free_consistent(adev
->pdev
, adev
->irq
.ih
.ring_size
+ 8,
128 (void *)adev
->irq
.ih
.ring
,
129 adev
->irq
.ih
.rb_dma_addr
);
130 adev
->irq
.ih
.ring
= NULL
;
133 amdgpu_bo_free_kernel(&adev
->irq
.ih
.ring_obj
,
134 &adev
->irq
.ih
.gpu_addr
,
135 (void **)&adev
->irq
.ih
.ring
);
136 amdgpu_device_wb_free(adev
, adev
->irq
.ih
.wptr_offs
);
137 amdgpu_device_wb_free(adev
, adev
->irq
.ih
.rptr_offs
);
142 * amdgpu_ih_process - interrupt handler
144 * @adev: amdgpu_device pointer
146 * Interrupt hander (VI), walk the IH ring.
147 * Returns irq process return code.
149 int amdgpu_ih_process(struct amdgpu_device
*adev
)
151 struct amdgpu_iv_entry entry
;
154 if (!adev
->irq
.ih
.enabled
|| adev
->shutdown
)
157 wptr
= amdgpu_ih_get_wptr(adev
);
160 /* is somebody else already processing irqs? */
161 if (atomic_xchg(&adev
->irq
.ih
.lock
, 1))
164 DRM_DEBUG("%s: rptr %d, wptr %d\n", __func__
, adev
->irq
.ih
.rptr
, wptr
);
166 /* Order reading of wptr vs. reading of IH ring data */
169 while (adev
->irq
.ih
.rptr
!= wptr
) {
170 u32 ring_index
= adev
->irq
.ih
.rptr
>> 2;
172 /* Prescreening of high-frequency interrupts */
173 if (!amdgpu_ih_prescreen_iv(adev
)) {
174 adev
->irq
.ih
.rptr
&= adev
->irq
.ih
.ptr_mask
;
178 /* Before dispatching irq to IP blocks, send it to amdkfd */
179 amdgpu_amdkfd_interrupt(adev
,
180 (const void *) &adev
->irq
.ih
.ring
[ring_index
]);
182 entry
.iv_entry
= (const uint32_t *)
183 &adev
->irq
.ih
.ring
[ring_index
];
184 amdgpu_ih_decode_iv(adev
, &entry
);
185 adev
->irq
.ih
.rptr
&= adev
->irq
.ih
.ptr_mask
;
187 amdgpu_irq_dispatch(adev
, &entry
);
189 amdgpu_ih_set_rptr(adev
);
190 atomic_set(&adev
->irq
.ih
.lock
, 0);
192 /* make sure wptr hasn't changed while processing */
193 wptr
= amdgpu_ih_get_wptr(adev
);
194 if (wptr
!= adev
->irq
.ih
.rptr
)
201 * amdgpu_ih_add_fault - Add a page fault record
203 * @adev: amdgpu device pointer
204 * @key: 64-bit encoding of PASID and address
206 * This should be called when a retry page fault interrupt is
207 * received. If this is a new page fault, it will be added to a hash
208 * table. The return value indicates whether this is a new fault, or
209 * a fault that was already known and is already being handled.
211 * If there are too many pending page faults, this will fail. Retry
212 * interrupts should be ignored in this case until there is enough
215 * Returns 0 if the fault was added, 1 if the fault was already known,
216 * -ENOSPC if there are too many pending faults.
218 int amdgpu_ih_add_fault(struct amdgpu_device
*adev
, u64 key
)
223 if (WARN_ON_ONCE(!adev
->irq
.ih
.faults
))
224 /* Should be allocated in <IP>_ih_sw_init on GPUs that
225 * support retry faults and require retry filtering.
229 spin_lock_irqsave(&adev
->irq
.ih
.faults
->lock
, flags
);
231 /* Only let the hash table fill up to 50% for best performance */
232 if (adev
->irq
.ih
.faults
->count
>= (1 << (AMDGPU_PAGEFAULT_HASH_BITS
-1)))
235 r
= chash_table_copy_in(&adev
->irq
.ih
.faults
->hash
, key
, NULL
);
237 adev
->irq
.ih
.faults
->count
++;
239 /* chash_table_copy_in should never fail unless we're losing count */
243 spin_unlock_irqrestore(&adev
->irq
.ih
.faults
->lock
, flags
);
248 * amdgpu_ih_clear_fault - Remove a page fault record
250 * @adev: amdgpu device pointer
251 * @key: 64-bit encoding of PASID and address
253 * This should be called when a page fault has been handled. Any
254 * future interrupt with this key will be processed as a new
257 void amdgpu_ih_clear_fault(struct amdgpu_device
*adev
, u64 key
)
262 if (!adev
->irq
.ih
.faults
)
265 spin_lock_irqsave(&adev
->irq
.ih
.faults
->lock
, flags
);
267 r
= chash_table_remove(&adev
->irq
.ih
.faults
->hash
, key
, NULL
);
268 if (!WARN_ON_ONCE(r
< 0)) {
269 adev
->irq
.ih
.faults
->count
--;
270 WARN_ON_ONCE(adev
->irq
.ih
.faults
->count
< 0);
273 spin_unlock_irqrestore(&adev
->irq
.ih
.faults
->lock
, flags
);