2 * Copyright 2014 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
23 #include <linux/mutex.h>
24 #include <linux/log2.h>
25 #include <linux/sched.h>
26 #include <linux/slab.h>
27 #include <linux/amd-iommu.h>
28 #include <linux/notifier.h>
29 #include <linux/compat.h>
36 * Initial size for the array of queues.
37 * The allocated size is doubled each time
38 * it is exceeded up to MAX_PROCESS_QUEUES.
40 #define INITIAL_QUEUE_ARRAY_SIZE 16
43 * List of struct kfd_process (field kfd_process).
44 * Unique/indexed by mm_struct*
46 #define KFD_PROCESS_TABLE_SIZE 5 /* bits: 32 entries */
47 static DEFINE_HASHTABLE(kfd_processes_table
, KFD_PROCESS_TABLE_SIZE
);
48 static DEFINE_MUTEX(kfd_processes_mutex
);
50 DEFINE_STATIC_SRCU(kfd_processes_srcu
);
52 static struct workqueue_struct
*kfd_process_wq
;
54 struct kfd_process_release_work
{
55 struct work_struct kfd_work
;
56 struct kfd_process
*p
;
59 static struct kfd_process
*find_process(const struct task_struct
*thread
);
60 static struct kfd_process
*create_process(const struct task_struct
*thread
);
62 void kfd_process_create_wq(void)
65 kfd_process_wq
= create_workqueue("kfd_process_wq");
68 void kfd_process_destroy_wq(void)
71 flush_workqueue(kfd_process_wq
);
72 destroy_workqueue(kfd_process_wq
);
73 kfd_process_wq
= NULL
;
77 struct kfd_process
*kfd_create_process(const struct task_struct
*thread
)
79 struct kfd_process
*process
;
81 BUG_ON(!kfd_process_wq
);
83 if (thread
->mm
== NULL
)
84 return ERR_PTR(-EINVAL
);
86 /* Only the pthreads threading model is supported. */
87 if (thread
->group_leader
->mm
!= thread
->mm
)
88 return ERR_PTR(-EINVAL
);
90 /* Take mmap_sem because we call __mmu_notifier_register inside */
91 down_write(&thread
->mm
->mmap_sem
);
94 * take kfd processes mutex before starting of process creation
95 * so there won't be a case where two threads of the same process
96 * create two kfd_process structures
98 mutex_lock(&kfd_processes_mutex
);
100 /* A prior open of /dev/kfd could have already created the process. */
101 process
= find_process(thread
);
103 pr_debug("kfd: process already found\n");
106 process
= create_process(thread
);
108 mutex_unlock(&kfd_processes_mutex
);
110 up_write(&thread
->mm
->mmap_sem
);
115 struct kfd_process
*kfd_get_process(const struct task_struct
*thread
)
117 struct kfd_process
*process
;
119 if (thread
->mm
== NULL
)
120 return ERR_PTR(-EINVAL
);
122 /* Only the pthreads threading model is supported. */
123 if (thread
->group_leader
->mm
!= thread
->mm
)
124 return ERR_PTR(-EINVAL
);
126 process
= find_process(thread
);
131 static struct kfd_process
*find_process_by_mm(const struct mm_struct
*mm
)
133 struct kfd_process
*process
;
135 hash_for_each_possible_rcu(kfd_processes_table
, process
,
136 kfd_processes
, (uintptr_t)mm
)
137 if (process
->mm
== mm
)
143 static struct kfd_process
*find_process(const struct task_struct
*thread
)
145 struct kfd_process
*p
;
148 idx
= srcu_read_lock(&kfd_processes_srcu
);
149 p
= find_process_by_mm(thread
->mm
);
150 srcu_read_unlock(&kfd_processes_srcu
, idx
);
155 static void kfd_process_wq_release(struct work_struct
*work
)
157 struct kfd_process_release_work
*my_work
;
158 struct kfd_process_device
*pdd
, *temp
;
159 struct kfd_process
*p
;
161 my_work
= (struct kfd_process_release_work
*) work
;
165 mutex_lock(&p
->mutex
);
167 list_for_each_entry_safe(pdd
, temp
, &p
->per_device_data
,
169 amd_iommu_unbind_pasid(pdd
->dev
->pdev
, p
->pasid
);
170 list_del(&pdd
->per_device_list
);
175 kfd_pasid_free(p
->pasid
);
177 mutex_unlock(&p
->mutex
);
179 mutex_destroy(&p
->mutex
);
188 static void kfd_process_destroy_delayed(struct rcu_head
*rcu
)
190 struct kfd_process_release_work
*work
;
191 struct kfd_process
*p
;
193 BUG_ON(!kfd_process_wq
);
195 p
= container_of(rcu
, struct kfd_process
, rcu
);
196 BUG_ON(atomic_read(&p
->mm
->mm_count
) <= 0);
200 work
= (struct kfd_process_release_work
*)
201 kmalloc(sizeof(struct kfd_process_release_work
), GFP_ATOMIC
);
204 INIT_WORK((struct work_struct
*) work
, kfd_process_wq_release
);
206 queue_work(kfd_process_wq
, (struct work_struct
*) work
);
210 static void kfd_process_notifier_release(struct mmu_notifier
*mn
,
211 struct mm_struct
*mm
)
213 struct kfd_process
*p
;
216 * The kfd_process structure can not be free because the
217 * mmu_notifier srcu is read locked
219 p
= container_of(mn
, struct kfd_process
, mmu_notifier
);
222 mutex_lock(&kfd_processes_mutex
);
223 hash_del_rcu(&p
->kfd_processes
);
224 mutex_unlock(&kfd_processes_mutex
);
225 synchronize_srcu(&kfd_processes_srcu
);
227 mutex_lock(&p
->mutex
);
229 /* In case our notifier is called before IOMMU notifier */
232 mutex_unlock(&p
->mutex
);
235 * Because we drop mm_count inside kfd_process_destroy_delayed
236 * and because the mmu_notifier_unregister function also drop
237 * mm_count we need to take an extra count here.
239 atomic_inc(&p
->mm
->mm_count
);
240 mmu_notifier_unregister_no_release(&p
->mmu_notifier
, p
->mm
);
241 mmu_notifier_call_srcu(&p
->rcu
, &kfd_process_destroy_delayed
);
244 static const struct mmu_notifier_ops kfd_process_mmu_notifier_ops
= {
245 .release
= kfd_process_notifier_release
,
248 static struct kfd_process
*create_process(const struct task_struct
*thread
)
250 struct kfd_process
*process
;
253 process
= kzalloc(sizeof(*process
), GFP_KERNEL
);
256 goto err_alloc_process
;
258 process
->queues
= kmalloc_array(INITIAL_QUEUE_ARRAY_SIZE
,
259 sizeof(process
->queues
[0]), GFP_KERNEL
);
260 if (!process
->queues
)
261 goto err_alloc_queues
;
263 process
->pasid
= kfd_pasid_alloc();
264 if (process
->pasid
== 0)
265 goto err_alloc_pasid
;
267 mutex_init(&process
->mutex
);
269 process
->mm
= thread
->mm
;
271 /* register notifier */
272 process
->mmu_notifier
.ops
= &kfd_process_mmu_notifier_ops
;
273 err
= __mmu_notifier_register(&process
->mmu_notifier
, process
->mm
);
275 goto err_mmu_notifier
;
277 hash_add_rcu(kfd_processes_table
, &process
->kfd_processes
,
278 (uintptr_t)process
->mm
);
280 process
->lead_thread
= thread
->group_leader
;
282 process
->queue_array_size
= INITIAL_QUEUE_ARRAY_SIZE
;
284 INIT_LIST_HEAD(&process
->per_device_data
);
286 err
= pqm_init(&process
->pqm
, process
);
288 goto err_process_pqm_init
;
290 /* init process apertures*/
291 process
->is_32bit_user_mode
= is_compat_task();
292 if (kfd_init_apertures(process
) != 0)
293 goto err_init_apretures
;
298 pqm_uninit(&process
->pqm
);
299 err_process_pqm_init
:
300 hash_del_rcu(&process
->kfd_processes
);
302 mmu_notifier_unregister_no_release(&process
->mmu_notifier
, process
->mm
);
304 kfd_pasid_free(process
->pasid
);
306 kfree(process
->queues
);
313 struct kfd_process_device
*kfd_get_process_device_data(struct kfd_dev
*dev
,
314 struct kfd_process
*p
)
316 struct kfd_process_device
*pdd
= NULL
;
318 list_for_each_entry(pdd
, &p
->per_device_data
, per_device_list
)
325 struct kfd_process_device
*kfd_create_process_device_data(struct kfd_dev
*dev
,
326 struct kfd_process
*p
)
328 struct kfd_process_device
*pdd
= NULL
;
330 pdd
= kzalloc(sizeof(*pdd
), GFP_KERNEL
);
333 INIT_LIST_HEAD(&pdd
->qpd
.queues_list
);
334 INIT_LIST_HEAD(&pdd
->qpd
.priv_queue_list
);
335 pdd
->qpd
.dqm
= dev
->dqm
;
336 list_add(&pdd
->per_device_list
, &p
->per_device_data
);
343 * Direct the IOMMU to bind the process (specifically the pasid->mm)
345 * Unbinding occurs when the process dies or the device is removed.
347 * Assumes that the process lock is held.
349 struct kfd_process_device
*kfd_bind_process_to_device(struct kfd_dev
*dev
,
350 struct kfd_process
*p
)
352 struct kfd_process_device
*pdd
;
355 pdd
= kfd_get_process_device_data(dev
, p
);
357 pr_err("Process device data doesn't exist\n");
358 return ERR_PTR(-ENOMEM
);
364 err
= amd_iommu_bind_pasid(dev
->pdev
, p
->pasid
, p
->lead_thread
);
373 void kfd_unbind_process_from_device(struct kfd_dev
*dev
, unsigned int pasid
)
375 struct kfd_process
*p
;
376 struct kfd_process_device
*pdd
;
381 idx
= srcu_read_lock(&kfd_processes_srcu
);
383 hash_for_each_rcu(kfd_processes_table
, i
, p
, kfd_processes
)
384 if (p
->pasid
== pasid
)
387 srcu_read_unlock(&kfd_processes_srcu
, idx
);
389 BUG_ON(p
->pasid
!= pasid
);
391 mutex_lock(&p
->mutex
);
395 pdd
= kfd_get_process_device_data(dev
, p
);
398 * Just mark pdd as unbound, because we still need it to call
399 * amd_iommu_unbind_pasid() in when the process exits.
400 * We don't call amd_iommu_unbind_pasid() here
401 * because the IOMMU called us.
406 mutex_unlock(&p
->mutex
);
409 struct kfd_process_device
*kfd_get_first_process_device_data(struct kfd_process
*p
)
411 return list_first_entry(&p
->per_device_data
,
412 struct kfd_process_device
,
416 struct kfd_process_device
*kfd_get_next_process_device_data(struct kfd_process
*p
,
417 struct kfd_process_device
*pdd
)
419 if (list_is_last(&pdd
->per_device_list
, &p
->per_device_data
))
421 return list_next_entry(pdd
, per_device_list
);
424 bool kfd_has_process_device_data(struct kfd_process
*p
)
426 return !(list_empty(&p
->per_device_data
));