2 * SN Platform GRU Driver
4 * FAULT HANDLER FOR GRU DETECTED TLB MISSES
6 * This file contains code that handles TLB misses within the GRU.
7 * These misses are reported either via interrupts or user polling of
10 * Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved.
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
27 #include <linux/kernel.h>
28 #include <linux/errno.h>
29 #include <linux/spinlock.h>
31 #include <linux/hugetlb.h>
32 #include <linux/device.h>
34 #include <linux/uaccess.h>
35 #include <linux/security.h>
36 #include <linux/prefetch.h>
37 #include <asm/pgtable.h>
39 #include "grutables.h"
41 #include "gru_instructions.h"
42 #include <asm/uv/uv_hub.h>
44 /* Return codes for vtop functions */
45 #define VTOP_SUCCESS 0
46 #define VTOP_INVALID -1
51 * Test if a physical address is a valid GRU GSEG address
53 static inline int is_gru_paddr(unsigned long paddr
)
55 return paddr
>= gru_start_paddr
&& paddr
< gru_end_paddr
;
59 * Find the vma of a GRU segment. Caller must hold mmap_sem.
61 struct vm_area_struct
*gru_find_vma(unsigned long vaddr
)
63 struct vm_area_struct
*vma
;
65 vma
= find_vma(current
->mm
, vaddr
);
66 if (vma
&& vma
->vm_start
<= vaddr
&& vma
->vm_ops
== &gru_vm_ops
)
72 * Find and lock the gts that contains the specified user vaddr.
75 * - *gts with the mmap_sem locked for read and the GTS locked.
76 * - NULL if vaddr invalid OR is not a valid GSEG vaddr.
79 static struct gru_thread_state
*gru_find_lock_gts(unsigned long vaddr
)
81 struct mm_struct
*mm
= current
->mm
;
82 struct vm_area_struct
*vma
;
83 struct gru_thread_state
*gts
= NULL
;
85 down_read(&mm
->mmap_sem
);
86 vma
= gru_find_vma(vaddr
);
88 gts
= gru_find_thread_state(vma
, TSID(vaddr
, vma
));
90 mutex_lock(>s
->ts_ctxlock
);
92 up_read(&mm
->mmap_sem
);
96 static struct gru_thread_state
*gru_alloc_locked_gts(unsigned long vaddr
)
98 struct mm_struct
*mm
= current
->mm
;
99 struct vm_area_struct
*vma
;
100 struct gru_thread_state
*gts
= ERR_PTR(-EINVAL
);
102 down_write(&mm
->mmap_sem
);
103 vma
= gru_find_vma(vaddr
);
107 gts
= gru_alloc_thread_state(vma
, TSID(vaddr
, vma
));
110 mutex_lock(>s
->ts_ctxlock
);
111 downgrade_write(&mm
->mmap_sem
);
115 up_write(&mm
->mmap_sem
);
120 * Unlock a GTS that was previously locked with gru_find_lock_gts().
122 static void gru_unlock_gts(struct gru_thread_state
*gts
)
124 mutex_unlock(>s
->ts_ctxlock
);
125 up_read(¤t
->mm
->mmap_sem
);
129 * Set a CB.istatus to active using a user virtual address. This must be done
130 * just prior to a TFH RESTART. The new cb.istatus is an in-cache status ONLY.
131 * If the line is evicted, the status may be lost. The in-cache update
132 * is necessary to prevent the user from seeing a stale cb.istatus that will
133 * change as soon as the TFH restart is complete. Races may cause an
134 * occasional failure to clear the cb.istatus, but that is ok.
136 static void gru_cb_set_istatus_active(struct gru_instruction_bits
*cbk
)
139 cbk
->istatus
= CBS_ACTIVE
;
146 * The GRU has an array of fault maps. A map is private to a cpu
147 * Only one cpu will be accessing a cpu's fault map.
149 * This function scans the cpu-private fault map & clears all bits that
150 * are set. The function returns a bitmap that indicates the bits that
151 * were cleared. Note that sense the maps may be updated asynchronously by
152 * the GRU, atomic operations must be used to clear bits.
154 static void get_clear_fault_map(struct gru_state
*gru
,
155 struct gru_tlb_fault_map
*imap
,
156 struct gru_tlb_fault_map
*dmap
)
159 struct gru_tlb_fault_map
*tfm
;
161 tfm
= get_tfm_for_cpu(gru
, gru_cpu_fault_map_id());
162 prefetchw(tfm
); /* Helps on hardware, required for emulator */
163 for (i
= 0; i
< BITS_TO_LONGS(GRU_NUM_CBE
); i
++) {
164 k
= tfm
->fault_bits
[i
];
166 k
= xchg(&tfm
->fault_bits
[i
], 0UL);
167 imap
->fault_bits
[i
] = k
;
168 k
= tfm
->done_bits
[i
];
170 k
= xchg(&tfm
->done_bits
[i
], 0UL);
171 dmap
->fault_bits
[i
] = k
;
175 * Not functionally required but helps performance. (Required
178 gru_flush_cache(tfm
);
182 * Atomic (interrupt context) & non-atomic (user context) functions to
183 * convert a vaddr into a physical address. The size of the page
184 * is returned in pageshift.
188 * 1 - (atomic only) try again in non-atomic context
190 static int non_atomic_pte_lookup(struct vm_area_struct
*vma
,
191 unsigned long vaddr
, int write
,
192 unsigned long *paddr
, int *pageshift
)
196 #ifdef CONFIG_HUGETLB_PAGE
197 *pageshift
= is_vm_hugetlb_page(vma
) ? HPAGE_SHIFT
: PAGE_SHIFT
;
199 *pageshift
= PAGE_SHIFT
;
201 if (get_user_pages(vaddr
, 1, write
? FOLL_WRITE
: 0, &page
, NULL
) <= 0)
203 *paddr
= page_to_phys(page
);
211 * Convert a user virtual address to a physical address
212 * Only supports Intel large pages (2MB only) on x86_64.
213 * ZZZ - hugepage support is incomplete
215 * NOTE: mmap_sem is already held on entry to this function. This
216 * guarantees existence of the page tables.
218 static int atomic_pte_lookup(struct vm_area_struct
*vma
, unsigned long vaddr
,
219 int write
, unsigned long *paddr
, int *pageshift
)
226 pgdp
= pgd_offset(vma
->vm_mm
, vaddr
);
227 if (unlikely(pgd_none(*pgdp
)))
230 pudp
= pud_offset(pgdp
, vaddr
);
231 if (unlikely(pud_none(*pudp
)))
234 pmdp
= pmd_offset(pudp
, vaddr
);
235 if (unlikely(pmd_none(*pmdp
)))
238 if (unlikely(pmd_large(*pmdp
)))
239 pte
= *(pte_t
*) pmdp
;
242 pte
= *pte_offset_kernel(pmdp
, vaddr
);
244 if (unlikely(!pte_present(pte
) ||
245 (write
&& (!pte_write(pte
) || !pte_dirty(pte
)))))
248 *paddr
= pte_pfn(pte
) << PAGE_SHIFT
;
249 #ifdef CONFIG_HUGETLB_PAGE
250 *pageshift
= is_vm_hugetlb_page(vma
) ? HPAGE_SHIFT
: PAGE_SHIFT
;
252 *pageshift
= PAGE_SHIFT
;
260 static int gru_vtop(struct gru_thread_state
*gts
, unsigned long vaddr
,
261 int write
, int atomic
, unsigned long *gpa
, int *pageshift
)
263 struct mm_struct
*mm
= gts
->ts_mm
;
264 struct vm_area_struct
*vma
;
268 vma
= find_vma(mm
, vaddr
);
273 * Atomic lookup is faster & usually works even if called in non-atomic
276 rmb(); /* Must/check ms_range_active before loading PTEs */
277 ret
= atomic_pte_lookup(vma
, vaddr
, write
, &paddr
, &ps
);
281 if (non_atomic_pte_lookup(vma
, vaddr
, write
, &paddr
, &ps
))
284 if (is_gru_paddr(paddr
))
286 paddr
= paddr
& ~((1UL << ps
) - 1);
287 *gpa
= uv_soc_phys_ram_to_gpa(paddr
);
299 * Flush a CBE from cache. The CBE is clean in the cache. Dirty the
300 * CBE cacheline so that the line will be written back to home agent.
301 * Otherwise the line may be silently dropped. This has no impact
302 * except on performance.
304 static void gru_flush_cache_cbe(struct gru_control_block_extended
*cbe
)
307 cbe
->cbrexecstatus
= 0; /* make CL dirty */
308 gru_flush_cache(cbe
);
313 * Preload the TLB with entries that may be required. Currently, preloading
314 * is implemented only for BCOPY. Preload <tlb_preload_count> pages OR to
315 * the end of the bcopy tranfer, whichever is smaller.
317 static void gru_preload_tlb(struct gru_state
*gru
,
318 struct gru_thread_state
*gts
, int atomic
,
319 unsigned long fault_vaddr
, int asid
, int write
,
320 unsigned char tlb_preload_count
,
321 struct gru_tlb_fault_handle
*tfh
,
322 struct gru_control_block_extended
*cbe
)
324 unsigned long vaddr
= 0, gpa
;
327 if (cbe
->opccpy
!= OP_BCOPY
)
330 if (fault_vaddr
== cbe
->cbe_baddr0
)
331 vaddr
= fault_vaddr
+ GRU_CACHE_LINE_BYTES
* cbe
->cbe_src_cl
- 1;
332 else if (fault_vaddr
== cbe
->cbe_baddr1
)
333 vaddr
= fault_vaddr
+ (1 << cbe
->xtypecpy
) * cbe
->cbe_nelemcur
- 1;
335 fault_vaddr
&= PAGE_MASK
;
337 vaddr
= min(vaddr
, fault_vaddr
+ tlb_preload_count
* PAGE_SIZE
);
339 while (vaddr
> fault_vaddr
) {
340 ret
= gru_vtop(gts
, vaddr
, write
, atomic
, &gpa
, &pageshift
);
341 if (ret
|| tfh_write_only(tfh
, gpa
, GAA_RAM
, vaddr
, asid
, write
,
342 GRU_PAGESIZE(pageshift
)))
345 "%s: gid %d, gts 0x%p, tfh 0x%p, vaddr 0x%lx, asid 0x%x, rw %d, ps %d, gpa 0x%lx\n",
346 atomic
? "atomic" : "non-atomic", gru
->gs_gid
, gts
, tfh
,
347 vaddr
, asid
, write
, pageshift
, gpa
);
349 STAT(tlb_preload_page
);
354 * Drop a TLB entry into the GRU. The fault is described by info in an TFH.
356 * cb Address of user CBR. Null if not running in user context
358 * 0 = dropin, exception, or switch to UPM successful
359 * 1 = range invalidate active
363 static int gru_try_dropin(struct gru_state
*gru
,
364 struct gru_thread_state
*gts
,
365 struct gru_tlb_fault_handle
*tfh
,
366 struct gru_instruction_bits
*cbk
)
368 struct gru_control_block_extended
*cbe
= NULL
;
369 unsigned char tlb_preload_count
= gts
->ts_tlb_preload_count
;
370 int pageshift
= 0, asid
, write
, ret
, atomic
= !cbk
, indexway
;
371 unsigned long gpa
= 0, vaddr
= 0;
374 * NOTE: The GRU contains magic hardware that eliminates races between
375 * TLB invalidates and TLB dropins. If an invalidate occurs
376 * in the window between reading the TFH and the subsequent TLB dropin,
377 * the dropin is ignored. This eliminates the need for additional locks.
381 * Prefetch the CBE if doing TLB preloading
383 if (unlikely(tlb_preload_count
)) {
384 cbe
= gru_tfh_to_cbe(tfh
);
389 * Error if TFH state is IDLE or FMM mode & the user issuing a UPM call.
390 * Might be a hardware race OR a stupid user. Ignore FMM because FMM
391 * is a transient state.
393 if (tfh
->status
!= TFHSTATUS_EXCEPTION
) {
394 gru_flush_cache(tfh
);
396 if (tfh
->status
!= TFHSTATUS_EXCEPTION
)
397 goto failnoexception
;
398 STAT(tfh_stale_on_fault
);
400 if (tfh
->state
== TFHSTATE_IDLE
)
402 if (tfh
->state
== TFHSTATE_MISS_FMM
&& cbk
)
405 write
= (tfh
->cause
& TFHCAUSE_TLB_MOD
) != 0;
406 vaddr
= tfh
->missvaddr
;
407 asid
= tfh
->missasid
;
408 indexway
= tfh
->indexway
;
412 rmb(); /* TFH must be cache resident before reading ms_range_active */
415 * TFH is cache resident - at least briefly. Fail the dropin
416 * if a range invalidate is active.
418 if (atomic_read(>s
->ts_gms
->ms_range_active
))
421 ret
= gru_vtop(gts
, vaddr
, write
, atomic
, &gpa
, &pageshift
);
422 if (ret
== VTOP_INVALID
)
424 if (ret
== VTOP_RETRY
)
427 if (!(gts
->ts_sizeavail
& GRU_SIZEAVAIL(pageshift
))) {
428 gts
->ts_sizeavail
|= GRU_SIZEAVAIL(pageshift
);
429 if (atomic
|| !gru_update_cch(gts
)) {
430 gts
->ts_force_cch_reload
= 1;
435 if (unlikely(cbe
) && pageshift
== PAGE_SHIFT
) {
436 gru_preload_tlb(gru
, gts
, atomic
, vaddr
, asid
, write
, tlb_preload_count
, tfh
, cbe
);
437 gru_flush_cache_cbe(cbe
);
440 gru_cb_set_istatus_active(cbk
);
441 gts
->ustats
.tlbdropin
++;
442 tfh_write_restart(tfh
, gpa
, GAA_RAM
, vaddr
, asid
, write
,
443 GRU_PAGESIZE(pageshift
));
445 "%s: gid %d, gts 0x%p, tfh 0x%p, vaddr 0x%lx, asid 0x%x, indexway 0x%x,"
446 " rw %d, ps %d, gpa 0x%lx\n",
447 atomic
? "atomic" : "non-atomic", gru
->gs_gid
, gts
, tfh
, vaddr
, asid
,
448 indexway
, write
, pageshift
, gpa
);
453 /* No asid (delayed unload). */
454 STAT(tlb_dropin_fail_no_asid
);
455 gru_dbg(grudev
, "FAILED no_asid tfh: 0x%p, vaddr 0x%lx\n", tfh
, vaddr
);
457 tfh_user_polling_mode(tfh
);
459 gru_flush_cache(tfh
);
460 gru_flush_cache_cbe(cbe
);
464 /* Atomic failure switch CBR to UPM */
465 tfh_user_polling_mode(tfh
);
466 gru_flush_cache_cbe(cbe
);
467 STAT(tlb_dropin_fail_upm
);
468 gru_dbg(grudev
, "FAILED upm tfh: 0x%p, vaddr 0x%lx\n", tfh
, vaddr
);
472 /* FMM state on UPM call */
473 gru_flush_cache(tfh
);
474 gru_flush_cache_cbe(cbe
);
475 STAT(tlb_dropin_fail_fmm
);
476 gru_dbg(grudev
, "FAILED fmm tfh: 0x%p, state %d\n", tfh
, tfh
->state
);
480 /* TFH status did not show exception pending */
481 gru_flush_cache(tfh
);
482 gru_flush_cache_cbe(cbe
);
484 gru_flush_cache(cbk
);
485 STAT(tlb_dropin_fail_no_exception
);
486 gru_dbg(grudev
, "FAILED non-exception tfh: 0x%p, status %d, state %d\n",
487 tfh
, tfh
->status
, tfh
->state
);
491 /* TFH state was idle - no miss pending */
492 gru_flush_cache(tfh
);
493 gru_flush_cache_cbe(cbe
);
495 gru_flush_cache(cbk
);
496 STAT(tlb_dropin_fail_idle
);
497 gru_dbg(grudev
, "FAILED idle tfh: 0x%p, state %d\n", tfh
, tfh
->state
);
501 /* All errors (atomic & non-atomic) switch CBR to EXCEPTION state */
503 gru_flush_cache_cbe(cbe
);
504 STAT(tlb_dropin_fail_invalid
);
505 gru_dbg(grudev
, "FAILED inval tfh: 0x%p, vaddr 0x%lx\n", tfh
, vaddr
);
509 /* Range invalidate active. Switch to UPM iff atomic */
511 tfh_user_polling_mode(tfh
);
513 gru_flush_cache(tfh
);
514 gru_flush_cache_cbe(cbe
);
515 STAT(tlb_dropin_fail_range_active
);
516 gru_dbg(grudev
, "FAILED range active: tfh 0x%p, vaddr 0x%lx\n",
522 * Process an external interrupt from the GRU. This interrupt is
523 * caused by a TLB miss.
524 * Note that this is the interrupt handler that is registered with linux
525 * interrupt handlers.
527 static irqreturn_t
gru_intr(int chiplet
, int blade
)
529 struct gru_state
*gru
;
530 struct gru_tlb_fault_map imap
, dmap
;
531 struct gru_thread_state
*gts
;
532 struct gru_tlb_fault_handle
*tfh
= NULL
;
533 struct completion
*cmp
;
538 gru
= &gru_base
[blade
]->bs_grus
[chiplet
];
540 dev_err(grudev
, "GRU: invalid interrupt: cpu %d, chiplet %d\n",
541 raw_smp_processor_id(), chiplet
);
544 get_clear_fault_map(gru
, &imap
, &dmap
);
546 "cpu %d, chiplet %d, gid %d, imap %016lx %016lx, dmap %016lx %016lx\n",
547 smp_processor_id(), chiplet
, gru
->gs_gid
,
548 imap
.fault_bits
[0], imap
.fault_bits
[1],
549 dmap
.fault_bits
[0], dmap
.fault_bits
[1]);
551 for_each_cbr_in_tfm(cbrnum
, dmap
.fault_bits
) {
553 cmp
= gru
->gs_blade
->bs_async_wq
;
556 gru_dbg(grudev
, "gid %d, cbr_done %d, done %d\n",
557 gru
->gs_gid
, cbrnum
, cmp
? cmp
->done
: -1);
560 for_each_cbr_in_tfm(cbrnum
, imap
.fault_bits
) {
562 tfh
= get_tfh_by_index(gru
, cbrnum
);
563 prefetchw(tfh
); /* Helps on hdw, required for emulator */
566 * When hardware sets a bit in the faultmap, it implicitly
567 * locks the GRU context so that it cannot be unloaded.
568 * The gts cannot change until a TFH start/writestart command
571 ctxnum
= tfh
->ctxnum
;
572 gts
= gru
->gs_gts
[ctxnum
];
574 /* Spurious interrupts can cause this. Ignore. */
581 * This is running in interrupt context. Trylock the mmap_sem.
582 * If it fails, retry the fault in user context.
584 gts
->ustats
.fmm_tlbmiss
++;
585 if (!gts
->ts_force_cch_reload
&&
586 down_read_trylock(>s
->ts_mm
->mmap_sem
)) {
587 gru_try_dropin(gru
, gts
, tfh
, NULL
);
588 up_read(>s
->ts_mm
->mmap_sem
);
590 tfh_user_polling_mode(tfh
);
591 STAT(intr_mm_lock_failed
);
597 irqreturn_t
gru0_intr(int irq
, void *dev_id
)
599 return gru_intr(0, uv_numa_blade_id());
602 irqreturn_t
gru1_intr(int irq
, void *dev_id
)
604 return gru_intr(1, uv_numa_blade_id());
607 irqreturn_t
gru_intr_mblade(int irq
, void *dev_id
)
611 for_each_possible_blade(blade
) {
612 if (uv_blade_nr_possible_cpus(blade
))
621 static int gru_user_dropin(struct gru_thread_state
*gts
,
622 struct gru_tlb_fault_handle
*tfh
,
625 struct gru_mm_struct
*gms
= gts
->ts_gms
;
628 gts
->ustats
.upm_tlbmiss
++;
630 wait_event(gms
->ms_wait_queue
,
631 atomic_read(&gms
->ms_range_active
) == 0);
632 prefetchw(tfh
); /* Helps on hdw, required for emulator */
633 ret
= gru_try_dropin(gts
->ts_gru
, gts
, tfh
, cb
);
636 STAT(call_os_wait_queue
);
641 * This interface is called as a result of a user detecting a "call OS" bit
642 * in a user CB. Normally means that a TLB fault has occurred.
643 * cb - user virtual address of the CB
645 int gru_handle_user_call_os(unsigned long cb
)
647 struct gru_tlb_fault_handle
*tfh
;
648 struct gru_thread_state
*gts
;
650 int ucbnum
, cbrnum
, ret
= -EINVAL
;
654 /* sanity check the cb pointer */
655 ucbnum
= get_cb_number((void *)cb
);
656 if ((cb
& (GRU_HANDLE_STRIDE
- 1)) || ucbnum
>= GRU_NUM_CB
)
659 gts
= gru_find_lock_gts(cb
);
662 gru_dbg(grudev
, "address 0x%lx, gid %d, gts 0x%p\n", cb
, gts
->ts_gru
? gts
->ts_gru
->gs_gid
: -1, gts
);
664 if (ucbnum
>= gts
->ts_cbr_au_count
* GRU_CBR_AU_SIZE
)
667 gru_check_context_placement(gts
);
670 * CCH may contain stale data if ts_force_cch_reload is set.
672 if (gts
->ts_gru
&& gts
->ts_force_cch_reload
) {
673 gts
->ts_force_cch_reload
= 0;
678 cbrnum
= thread_cbr_number(gts
, ucbnum
);
680 tfh
= get_tfh_by_index(gts
->ts_gru
, cbrnum
);
681 cbk
= get_gseg_base_address_cb(gts
->ts_gru
->gs_gru_base_vaddr
,
682 gts
->ts_ctxnum
, ucbnum
);
683 ret
= gru_user_dropin(gts
, tfh
, cbk
);
691 * Fetch the exception detail information for a CB that terminated with
694 int gru_get_exception_detail(unsigned long arg
)
696 struct control_block_extended_exc_detail excdet
;
697 struct gru_control_block_extended
*cbe
;
698 struct gru_thread_state
*gts
;
699 int ucbnum
, cbrnum
, ret
;
701 STAT(user_exception
);
702 if (copy_from_user(&excdet
, (void __user
*)arg
, sizeof(excdet
)))
705 gts
= gru_find_lock_gts(excdet
.cb
);
709 gru_dbg(grudev
, "address 0x%lx, gid %d, gts 0x%p\n", excdet
.cb
, gts
->ts_gru
? gts
->ts_gru
->gs_gid
: -1, gts
);
710 ucbnum
= get_cb_number((void *)excdet
.cb
);
711 if (ucbnum
>= gts
->ts_cbr_au_count
* GRU_CBR_AU_SIZE
) {
713 } else if (gts
->ts_gru
) {
714 cbrnum
= thread_cbr_number(gts
, ucbnum
);
715 cbe
= get_cbe_by_index(gts
->ts_gru
, cbrnum
);
716 gru_flush_cache(cbe
); /* CBE not coherent */
717 sync_core(); /* make sure we are have current data */
718 excdet
.opc
= cbe
->opccpy
;
719 excdet
.exopc
= cbe
->exopccpy
;
720 excdet
.ecause
= cbe
->ecause
;
721 excdet
.exceptdet0
= cbe
->idef1upd
;
722 excdet
.exceptdet1
= cbe
->idef3upd
;
723 excdet
.cbrstate
= cbe
->cbrstate
;
724 excdet
.cbrexecstatus
= cbe
->cbrexecstatus
;
725 gru_flush_cache_cbe(cbe
);
733 "cb 0x%lx, op %d, exopc %d, cbrstate %d, cbrexecstatus 0x%x, ecause 0x%x, "
734 "exdet0 0x%lx, exdet1 0x%x\n",
735 excdet
.cb
, excdet
.opc
, excdet
.exopc
, excdet
.cbrstate
, excdet
.cbrexecstatus
,
736 excdet
.ecause
, excdet
.exceptdet0
, excdet
.exceptdet1
);
737 if (!ret
&& copy_to_user((void __user
*)arg
, &excdet
, sizeof(excdet
)))
743 * User request to unload a context. Content is saved for possible reload.
745 static int gru_unload_all_contexts(void)
747 struct gru_thread_state
*gts
;
748 struct gru_state
*gru
;
751 if (!capable(CAP_SYS_ADMIN
))
754 gru
= GID_TO_GRU(gid
);
755 spin_lock(&gru
->gs_lock
);
756 for (ctxnum
= 0; ctxnum
< GRU_NUM_CCH
; ctxnum
++) {
757 gts
= gru
->gs_gts
[ctxnum
];
758 if (gts
&& mutex_trylock(>s
->ts_ctxlock
)) {
759 spin_unlock(&gru
->gs_lock
);
760 gru_unload_context(gts
, 1);
761 mutex_unlock(>s
->ts_ctxlock
);
762 spin_lock(&gru
->gs_lock
);
765 spin_unlock(&gru
->gs_lock
);
770 int gru_user_unload_context(unsigned long arg
)
772 struct gru_thread_state
*gts
;
773 struct gru_unload_context_req req
;
775 STAT(user_unload_context
);
776 if (copy_from_user(&req
, (void __user
*)arg
, sizeof(req
)))
779 gru_dbg(grudev
, "gseg 0x%lx\n", req
.gseg
);
782 return gru_unload_all_contexts();
784 gts
= gru_find_lock_gts(req
.gseg
);
789 gru_unload_context(gts
, 1);
796 * User request to flush a range of virtual addresses from the GRU TLB
797 * (Mainly for testing).
799 int gru_user_flush_tlb(unsigned long arg
)
801 struct gru_thread_state
*gts
;
802 struct gru_flush_tlb_req req
;
803 struct gru_mm_struct
*gms
;
805 STAT(user_flush_tlb
);
806 if (copy_from_user(&req
, (void __user
*)arg
, sizeof(req
)))
809 gru_dbg(grudev
, "gseg 0x%lx, vaddr 0x%lx, len 0x%lx\n", req
.gseg
,
812 gts
= gru_find_lock_gts(req
.gseg
);
818 gru_flush_tlb_range(gms
, req
.vaddr
, req
.len
);
824 * Fetch GSEG statisticss
826 long gru_get_gseg_statistics(unsigned long arg
)
828 struct gru_thread_state
*gts
;
829 struct gru_get_gseg_statistics_req req
;
831 if (copy_from_user(&req
, (void __user
*)arg
, sizeof(req
)))
835 * The library creates arrays of contexts for threaded programs.
836 * If no gts exists in the array, the context has never been used & all
837 * statistics are implicitly 0.
839 gts
= gru_find_lock_gts(req
.gseg
);
841 memcpy(&req
.stats
, >s
->ustats
, sizeof(gts
->ustats
));
844 memset(&req
.stats
, 0, sizeof(gts
->ustats
));
847 if (copy_to_user((void __user
*)arg
, &req
, sizeof(req
)))
854 * Register the current task as the user of the GSEG slice.
855 * Needed for TLB fault interrupt targeting.
857 int gru_set_context_option(unsigned long arg
)
859 struct gru_thread_state
*gts
;
860 struct gru_set_context_option_req req
;
863 STAT(set_context_option
);
864 if (copy_from_user(&req
, (void __user
*)arg
, sizeof(req
)))
866 gru_dbg(grudev
, "op %d, gseg 0x%lx, value1 0x%lx\n", req
.op
, req
.gseg
, req
.val1
);
868 gts
= gru_find_lock_gts(req
.gseg
);
870 gts
= gru_alloc_locked_gts(req
.gseg
);
876 case sco_blade_chiplet
:
877 /* Select blade/chiplet for GRU context */
878 if (req
.val0
< -1 || req
.val0
>= GRU_CHIPLETS_PER_HUB
||
879 req
.val1
< -1 || req
.val1
>= GRU_MAX_BLADES
||
880 (req
.val1
>= 0 && !gru_base
[req
.val1
])) {
883 gts
->ts_user_blade_id
= req
.val1
;
884 gts
->ts_user_chiplet_id
= req
.val0
;
885 gru_check_context_placement(gts
);
889 /* Register the current task as the GSEG owner */
890 gts
->ts_tgid_owner
= current
->tgid
;
892 case sco_cch_req_slice
:
893 /* Set the CCH slice option */
894 gts
->ts_cch_req_slice
= req
.val1
& 3;