1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * SN Platform GRU Driver
5 * FILE OPERATIONS & DRIVER INITIALIZATION
7 * This file supports the user system call for file open, close, mmap, etc.
8 * This also incudes the driver initialization code.
10 * (C) Copyright 2020 Hewlett Packard Enterprise Development LP
11 * Copyright (c) 2008-2014 Silicon Graphics, Inc. All Rights Reserved.
14 #include <linux/module.h>
15 #include <linux/kernel.h>
16 #include <linux/errno.h>
17 #include <linux/slab.h>
20 #include <linux/spinlock.h>
21 #include <linux/device.h>
22 #include <linux/miscdevice.h>
23 #include <linux/interrupt.h>
24 #include <linux/proc_fs.h>
25 #include <linux/uaccess.h>
27 #include <asm/uv/uv_irq.h>
29 #include <asm/uv/uv.h>
32 #include "grutables.h"
34 #include <asm/uv/uv_hub.h>
35 #include <asm/uv/uv_mmrs.h>
37 struct gru_blade_state
*gru_base
[GRU_MAX_BLADES
] __read_mostly
;
38 unsigned long gru_start_paddr __read_mostly
;
39 void *gru_start_vaddr __read_mostly
;
40 unsigned long gru_end_paddr __read_mostly
;
41 unsigned int gru_max_gids __read_mostly
;
42 struct gru_stats_s gru_stats
;
44 /* Guaranteed user available resources on each node */
45 static int max_user_cbrs
, max_user_dsr_bytes
;
47 static struct miscdevice gru_miscdev
;
49 static int gru_supported(void)
51 return is_uv_system() &&
52 (uv_hub_info
->hub_revision
< UV3_HUB_REVISION_BASE
);
58 * Called when unmapping a device mapping. Frees all gru resources
59 * and tables belonging to the vma.
61 static void gru_vma_close(struct vm_area_struct
*vma
)
63 struct gru_vma_data
*vdata
;
64 struct gru_thread_state
*gts
;
65 struct list_head
*entry
, *next
;
67 if (!vma
->vm_private_data
)
70 vdata
= vma
->vm_private_data
;
71 vma
->vm_private_data
= NULL
;
72 gru_dbg(grudev
, "vma %p, file %p, vdata %p\n", vma
, vma
->vm_file
,
74 list_for_each_safe(entry
, next
, &vdata
->vd_head
) {
76 list_entry(entry
, struct gru_thread_state
, ts_next
);
77 list_del(>s
->ts_next
);
78 mutex_lock(>s
->ts_ctxlock
);
80 gru_unload_context(gts
, 0);
81 mutex_unlock(>s
->ts_ctxlock
);
91 * Called when mmapping the device. Initializes the vma with a fault handler
92 * and private data structure necessary to allocate, track, and free the
95 static int gru_file_mmap(struct file
*file
, struct vm_area_struct
*vma
)
97 if ((vma
->vm_flags
& (VM_SHARED
| VM_WRITE
)) != (VM_SHARED
| VM_WRITE
))
100 if (vma
->vm_start
& (GRU_GSEG_PAGESIZE
- 1) ||
101 vma
->vm_end
& (GRU_GSEG_PAGESIZE
- 1))
104 vma
->vm_flags
|= VM_IO
| VM_PFNMAP
| VM_LOCKED
|
105 VM_DONTCOPY
| VM_DONTEXPAND
| VM_DONTDUMP
;
106 vma
->vm_page_prot
= PAGE_SHARED
;
107 vma
->vm_ops
= &gru_vm_ops
;
109 vma
->vm_private_data
= gru_alloc_vma_data(vma
, 0);
110 if (!vma
->vm_private_data
)
113 gru_dbg(grudev
, "file %p, vaddr 0x%lx, vma %p, vdata %p\n",
114 file
, vma
->vm_start
, vma
, vma
->vm_private_data
);
119 * Create a new GRU context
121 static int gru_create_new_context(unsigned long arg
)
123 struct gru_create_context_req req
;
124 struct vm_area_struct
*vma
;
125 struct gru_vma_data
*vdata
;
128 if (copy_from_user(&req
, (void __user
*)arg
, sizeof(req
)))
131 if (req
.data_segment_bytes
> max_user_dsr_bytes
)
133 if (req
.control_blocks
> max_user_cbrs
|| !req
.maximum_thread_count
)
136 if (!(req
.options
& GRU_OPT_MISS_MASK
))
137 req
.options
|= GRU_OPT_MISS_FMM_INTR
;
139 mmap_write_lock(current
->mm
);
140 vma
= gru_find_vma(req
.gseg
);
142 vdata
= vma
->vm_private_data
;
143 vdata
->vd_user_options
= req
.options
;
144 vdata
->vd_dsr_au_count
=
145 GRU_DS_BYTES_TO_AU(req
.data_segment_bytes
);
146 vdata
->vd_cbr_au_count
= GRU_CB_COUNT_TO_AU(req
.control_blocks
);
147 vdata
->vd_tlb_preload_count
= req
.tlb_preload_count
;
150 mmap_write_unlock(current
->mm
);
156 * Get GRU configuration info (temp - for emulator testing)
158 static long gru_get_config_info(unsigned long arg
)
160 struct gru_config_info info
;
163 if (num_online_nodes() > 1 &&
164 (uv_node_to_blade_id(1) == uv_node_to_blade_id(0)))
168 memset(&info
, 0, sizeof(info
));
169 info
.cpus
= num_online_cpus();
170 info
.nodes
= num_online_nodes();
171 info
.blades
= info
.nodes
/ nodesperblade
;
172 info
.chiplets
= GRU_CHIPLETS_PER_BLADE
* info
.blades
;
174 if (copy_to_user((void __user
*)arg
, &info
, sizeof(info
)))
180 * gru_file_unlocked_ioctl
182 * Called to update file attributes via IOCTL calls.
184 static long gru_file_unlocked_ioctl(struct file
*file
, unsigned int req
,
189 gru_dbg(grudev
, "file %p, req 0x%x, 0x%lx\n", file
, req
, arg
);
192 case GRU_CREATE_CONTEXT
:
193 err
= gru_create_new_context(arg
);
195 case GRU_SET_CONTEXT_OPTION
:
196 err
= gru_set_context_option(arg
);
198 case GRU_USER_GET_EXCEPTION_DETAIL
:
199 err
= gru_get_exception_detail(arg
);
201 case GRU_USER_UNLOAD_CONTEXT
:
202 err
= gru_user_unload_context(arg
);
204 case GRU_USER_FLUSH_TLB
:
205 err
= gru_user_flush_tlb(arg
);
207 case GRU_USER_CALL_OS
:
208 err
= gru_handle_user_call_os(arg
);
210 case GRU_GET_GSEG_STATISTICS
:
211 err
= gru_get_gseg_statistics(arg
);
214 err
= gru_ktest(arg
);
216 case GRU_GET_CONFIG_INFO
:
217 err
= gru_get_config_info(arg
);
219 case GRU_DUMP_CHIPLET_STATE
:
220 err
= gru_dump_chiplet_request(arg
);
227 * Called at init time to build tables for all GRUs that are present in the
230 static void gru_init_chiplet(struct gru_state
*gru
, unsigned long paddr
,
231 void *vaddr
, int blade_id
, int chiplet_id
)
233 spin_lock_init(&gru
->gs_lock
);
234 spin_lock_init(&gru
->gs_asid_lock
);
235 gru
->gs_gru_base_paddr
= paddr
;
236 gru
->gs_gru_base_vaddr
= vaddr
;
237 gru
->gs_gid
= blade_id
* GRU_CHIPLETS_PER_BLADE
+ chiplet_id
;
238 gru
->gs_blade
= gru_base
[blade_id
];
239 gru
->gs_blade_id
= blade_id
;
240 gru
->gs_chiplet_id
= chiplet_id
;
241 gru
->gs_cbr_map
= (GRU_CBR_AU
== 64) ? ~0 : (1UL << GRU_CBR_AU
) - 1;
242 gru
->gs_dsr_map
= (1UL << GRU_DSR_AU
) - 1;
243 gru
->gs_asid_limit
= MAX_ASID
;
244 gru_tgh_flush_init(gru
);
245 if (gru
->gs_gid
>= gru_max_gids
)
246 gru_max_gids
= gru
->gs_gid
+ 1;
247 gru_dbg(grudev
, "bid %d, gid %d, vaddr %p (0x%lx)\n",
248 blade_id
, gru
->gs_gid
, gru
->gs_gru_base_vaddr
,
249 gru
->gs_gru_base_paddr
);
252 static int gru_init_tables(unsigned long gru_base_paddr
, void *gru_base_vaddr
)
254 int pnode
, nid
, bid
, chip
;
255 int cbrs
, dsrbytes
, n
;
256 int order
= get_order(sizeof(struct gru_blade_state
));
258 struct gru_state
*gru
;
262 max_user_cbrs
= GRU_NUM_CB
;
263 max_user_dsr_bytes
= GRU_NUM_DSR_BYTES
;
264 for_each_possible_blade(bid
) {
265 pnode
= uv_blade_to_pnode(bid
);
266 nid
= uv_blade_to_memory_nid(bid
);/* -1 if no memory on blade */
267 page
= alloc_pages_node(nid
, GFP_KERNEL
, order
);
270 gru_base
[bid
] = page_address(page
);
271 memset(gru_base
[bid
], 0, sizeof(struct gru_blade_state
));
272 gru_base
[bid
]->bs_lru_gru
= &gru_base
[bid
]->bs_grus
[0];
273 spin_lock_init(&gru_base
[bid
]->bs_lock
);
274 init_rwsem(&gru_base
[bid
]->bs_kgts_sema
);
278 for (gru
= gru_base
[bid
]->bs_grus
, chip
= 0;
279 chip
< GRU_CHIPLETS_PER_BLADE
;
281 paddr
= gru_chiplet_paddr(gru_base_paddr
, pnode
, chip
);
282 vaddr
= gru_chiplet_vaddr(gru_base_vaddr
, pnode
, chip
);
283 gru_init_chiplet(gru
, paddr
, vaddr
, bid
, chip
);
284 n
= hweight64(gru
->gs_cbr_map
) * GRU_CBR_AU_SIZE
;
286 n
= hweight64(gru
->gs_dsr_map
) * GRU_DSR_AU_BYTES
;
287 dsrbytes
= max(dsrbytes
, n
);
289 max_user_cbrs
= min(max_user_cbrs
, cbrs
);
290 max_user_dsr_bytes
= min(max_user_dsr_bytes
, dsrbytes
);
296 for (bid
--; bid
>= 0; bid
--)
297 free_pages((unsigned long)gru_base
[bid
], order
);
301 static void gru_free_tables(void)
304 int order
= get_order(sizeof(struct gru_state
) *
305 GRU_CHIPLETS_PER_BLADE
);
307 for (bid
= 0; bid
< GRU_MAX_BLADES
; bid
++)
308 free_pages((unsigned long)gru_base
[bid
], order
);
311 static unsigned long gru_chiplet_cpu_to_mmr(int chiplet
, int cpu
, int *corep
)
313 unsigned long mmr
= 0;
317 * We target the cores of a blade and not the hyperthreads themselves.
318 * There is a max of 8 cores per socket and 2 sockets per blade,
319 * making for a max total of 16 cores (i.e., 16 CPUs without
320 * hyperthreading and 32 CPUs with hyperthreading).
322 core
= uv_cpu_core_number(cpu
) + UV_MAX_INT_CORES
* uv_cpu_socket_number(cpu
);
323 if (core
>= GRU_NUM_TFM
|| uv_cpu_ht_number(cpu
))
327 mmr
= UVH_GR0_TLB_INT0_CONFIG
+
328 core
* (UVH_GR0_TLB_INT1_CONFIG
- UVH_GR0_TLB_INT0_CONFIG
);
329 } else if (chiplet
== 1) {
330 mmr
= UVH_GR1_TLB_INT0_CONFIG
+
331 core
* (UVH_GR1_TLB_INT1_CONFIG
- UVH_GR1_TLB_INT0_CONFIG
);
342 static int gru_irq_count
[GRU_CHIPLETS_PER_BLADE
];
344 static void gru_noop(struct irq_data
*d
)
348 static struct irq_chip gru_chip
[GRU_CHIPLETS_PER_BLADE
] = {
349 [0 ... GRU_CHIPLETS_PER_BLADE
- 1] {
350 .irq_mask
= gru_noop
,
351 .irq_unmask
= gru_noop
,
356 static int gru_chiplet_setup_tlb_irq(int chiplet
, char *irq_name
,
357 irq_handler_t irq_handler
, int cpu
, int blade
)
360 int irq
= IRQ_GRU
+ chiplet
;
363 mmr
= gru_chiplet_cpu_to_mmr(chiplet
, cpu
, &core
);
367 if (gru_irq_count
[chiplet
] == 0) {
368 gru_chip
[chiplet
].name
= irq_name
;
369 ret
= irq_set_chip(irq
, &gru_chip
[chiplet
]);
371 printk(KERN_ERR
"%s: set_irq_chip failed, errno=%d\n",
372 GRU_DRIVER_ID_STR
, -ret
);
376 ret
= request_irq(irq
, irq_handler
, 0, irq_name
, NULL
);
378 printk(KERN_ERR
"%s: request_irq failed, errno=%d\n",
379 GRU_DRIVER_ID_STR
, -ret
);
383 gru_irq_count
[chiplet
]++;
388 static void gru_chiplet_teardown_tlb_irq(int chiplet
, int cpu
, int blade
)
391 int core
, irq
= IRQ_GRU
+ chiplet
;
393 if (gru_irq_count
[chiplet
] == 0)
396 mmr
= gru_chiplet_cpu_to_mmr(chiplet
, cpu
, &core
);
400 if (--gru_irq_count
[chiplet
] == 0)
404 #elif defined CONFIG_X86_64
406 static int gru_chiplet_setup_tlb_irq(int chiplet
, char *irq_name
,
407 irq_handler_t irq_handler
, int cpu
, int blade
)
413 mmr
= gru_chiplet_cpu_to_mmr(chiplet
, cpu
, &core
);
417 irq
= uv_setup_irq(irq_name
, cpu
, blade
, mmr
, UV_AFFINITY_CPU
);
419 printk(KERN_ERR
"%s: uv_setup_irq failed, errno=%d\n",
420 GRU_DRIVER_ID_STR
, -irq
);
424 ret
= request_irq(irq
, irq_handler
, 0, irq_name
, NULL
);
426 uv_teardown_irq(irq
);
427 printk(KERN_ERR
"%s: request_irq failed, errno=%d\n",
428 GRU_DRIVER_ID_STR
, -ret
);
431 gru_base
[blade
]->bs_grus
[chiplet
].gs_irq
[core
] = irq
;
435 static void gru_chiplet_teardown_tlb_irq(int chiplet
, int cpu
, int blade
)
440 mmr
= gru_chiplet_cpu_to_mmr(chiplet
, cpu
, &core
);
442 irq
= gru_base
[blade
]->bs_grus
[chiplet
].gs_irq
[core
];
445 uv_teardown_irq(irq
);
452 static void gru_teardown_tlb_irqs(void)
457 for_each_online_cpu(cpu
) {
458 blade
= uv_cpu_to_blade_id(cpu
);
459 gru_chiplet_teardown_tlb_irq(0, cpu
, blade
);
460 gru_chiplet_teardown_tlb_irq(1, cpu
, blade
);
462 for_each_possible_blade(blade
) {
463 if (uv_blade_nr_possible_cpus(blade
))
465 gru_chiplet_teardown_tlb_irq(0, 0, blade
);
466 gru_chiplet_teardown_tlb_irq(1, 0, blade
);
470 static int gru_setup_tlb_irqs(void)
476 for_each_online_cpu(cpu
) {
477 blade
= uv_cpu_to_blade_id(cpu
);
478 ret
= gru_chiplet_setup_tlb_irq(0, "GRU0_TLB", gru0_intr
, cpu
, blade
);
482 ret
= gru_chiplet_setup_tlb_irq(1, "GRU1_TLB", gru1_intr
, cpu
, blade
);
486 for_each_possible_blade(blade
) {
487 if (uv_blade_nr_possible_cpus(blade
))
489 ret
= gru_chiplet_setup_tlb_irq(0, "GRU0_TLB", gru_intr_mblade
, 0, blade
);
493 ret
= gru_chiplet_setup_tlb_irq(1, "GRU1_TLB", gru_intr_mblade
, 0, blade
);
501 gru_teardown_tlb_irqs();
508 * Called at boot or module load time to initialize the GRUs.
510 static int __init
gru_init(void)
514 if (!gru_supported())
517 #if defined CONFIG_IA64
518 gru_start_paddr
= 0xd000000000UL
; /* ZZZZZZZZZZZZZZZZZZZ fixme */
520 gru_start_paddr
= uv_read_local_mmr(UVH_RH_GAM_GRU_OVERLAY_CONFIG
) &
523 gru_start_vaddr
= __va(gru_start_paddr
);
524 gru_end_paddr
= gru_start_paddr
+ GRU_MAX_BLADES
* GRU_SIZE
;
525 printk(KERN_INFO
"GRU space: 0x%lx - 0x%lx\n",
526 gru_start_paddr
, gru_end_paddr
);
527 ret
= misc_register(&gru_miscdev
);
529 printk(KERN_ERR
"%s: misc_register failed\n",
534 ret
= gru_proc_init();
536 printk(KERN_ERR
"%s: proc init failed\n", GRU_DRIVER_ID_STR
);
540 ret
= gru_init_tables(gru_start_paddr
, gru_start_vaddr
);
542 printk(KERN_ERR
"%s: init tables failed\n", GRU_DRIVER_ID_STR
);
546 ret
= gru_setup_tlb_irqs();
550 gru_kservices_init();
552 printk(KERN_INFO
"%s: v%s\n", GRU_DRIVER_ID_STR
,
553 GRU_DRIVER_VERSION_STR
);
561 misc_deregister(&gru_miscdev
);
567 static void __exit
gru_exit(void)
569 if (!gru_supported())
572 gru_teardown_tlb_irqs();
573 gru_kservices_exit();
575 misc_deregister(&gru_miscdev
);
577 mmu_notifier_synchronize();
580 static const struct file_operations gru_fops
= {
581 .owner
= THIS_MODULE
,
582 .unlocked_ioctl
= gru_file_unlocked_ioctl
,
583 .mmap
= gru_file_mmap
,
584 .llseek
= noop_llseek
,
587 static struct miscdevice gru_miscdev
= {
588 .minor
= MISC_DYNAMIC_MINOR
,
593 const struct vm_operations_struct gru_vm_ops
= {
594 .close
= gru_vma_close
,
599 fs_initcall(gru_init
);
601 module_init(gru_init
);
603 module_exit(gru_exit
);
605 module_param(gru_options
, ulong
, 0644);
606 MODULE_PARM_DESC(gru_options
, "Various debug options");
608 MODULE_AUTHOR("Silicon Graphics, Inc.");
609 MODULE_LICENSE("GPL");
610 MODULE_DESCRIPTION(GRU_DRIVER_ID_STR GRU_DRIVER_VERSION_STR
);
611 MODULE_VERSION(GRU_DRIVER_VERSION_STR
);