2 * SN Platform GRU Driver
6 * Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 #include <linux/kernel.h>
25 #include <linux/spinlock.h>
26 #include <linux/uaccess.h>
27 #include <linux/delay.h>
28 #include <linux/bitops.h>
29 #include <asm/uv/uv_hub.h>
31 #include "grutables.h"
32 #include "gruhandles.h"
35 #define CCH_LOCK_ATTEMPTS 10
37 static int gru_user_copy_handle(void __user
**dp
, void *s
)
39 if (copy_to_user(*dp
, s
, GRU_HANDLE_BYTES
))
41 *dp
+= GRU_HANDLE_BYTES
;
45 static int gru_dump_context_data(void *grubase
,
46 struct gru_context_configuration_handle
*cch
,
47 void __user
*ubuf
, int ctxnum
, int dsrcnt
,
50 void *cb
, *cbe
, *tfh
, *gseg
;
53 gseg
= grubase
+ ctxnum
* GRU_GSEG_STRIDE
;
54 cb
= gseg
+ GRU_CB_BASE
;
55 cbe
= grubase
+ GRU_CBE_BASE
;
56 tfh
= grubase
+ GRU_TFH_BASE
;
58 for_each_cbr_in_allocation_map(i
, &cch
->cbr_allocation_map
, scr
) {
61 if (gru_user_copy_handle(&ubuf
, cb
))
63 if (gru_user_copy_handle(&ubuf
, tfh
+ i
* GRU_HANDLE_STRIDE
))
65 if (gru_user_copy_handle(&ubuf
, cbe
+ i
* GRU_HANDLE_STRIDE
))
67 cb
+= GRU_HANDLE_STRIDE
;
70 memcpy(ubuf
, gseg
+ GRU_DS_BASE
, dsrcnt
* GRU_HANDLE_STRIDE
);
77 static int gru_dump_tfm(struct gru_state
*gru
,
78 void __user
*ubuf
, void __user
*ubufend
)
80 struct gru_tlb_fault_map
*tfm
;
83 if (GRU_NUM_TFM
* GRU_CACHE_LINE_BYTES
> ubufend
- ubuf
)
86 for (i
= 0; i
< GRU_NUM_TFM
; i
++) {
87 tfm
= get_tfm(gru
->gs_gru_base_vaddr
, i
);
88 if (gru_user_copy_handle(&ubuf
, tfm
))
91 return GRU_NUM_TFM
* GRU_CACHE_LINE_BYTES
;
97 static int gru_dump_tgh(struct gru_state
*gru
,
98 void __user
*ubuf
, void __user
*ubufend
)
100 struct gru_tlb_global_handle
*tgh
;
103 if (GRU_NUM_TGH
* GRU_CACHE_LINE_BYTES
> ubufend
- ubuf
)
106 for (i
= 0; i
< GRU_NUM_TGH
; i
++) {
107 tgh
= get_tgh(gru
->gs_gru_base_vaddr
, i
);
108 if (gru_user_copy_handle(&ubuf
, tgh
))
111 return GRU_NUM_TGH
* GRU_CACHE_LINE_BYTES
;
117 static int gru_dump_context(struct gru_state
*gru
, int ctxnum
,
118 void __user
*ubuf
, void __user
*ubufend
, char data_opt
,
119 char lock_cch
, char flush_cbrs
)
121 struct gru_dump_context_header hdr
;
122 struct gru_dump_context_header __user
*uhdr
= ubuf
;
123 struct gru_context_configuration_handle
*cch
, *ubufcch
;
124 struct gru_thread_state
*gts
;
125 int try, cch_locked
, cbrcnt
= 0, dsrcnt
= 0, bytes
= 0, ret
= 0;
128 memset(&hdr
, 0, sizeof(hdr
));
129 grubase
= gru
->gs_gru_base_vaddr
;
130 cch
= get_cch(grubase
, ctxnum
);
131 for (try = 0; try < CCH_LOCK_ATTEMPTS
; try++) {
132 cch_locked
= trylock_cch_handle(cch
);
140 if (gru_user_copy_handle(&ubuf
, cch
)) {
142 unlock_cch_handle(cch
);
146 ubufcch
->delresp
= 0;
147 bytes
= sizeof(hdr
) + GRU_CACHE_LINE_BYTES
;
149 if (cch_locked
|| !lock_cch
) {
150 gts
= gru
->gs_gts
[ctxnum
];
151 if (gts
&& gts
->ts_vma
) {
152 hdr
.pid
= gts
->ts_tgid_owner
;
153 hdr
.vaddr
= gts
->ts_vma
->vm_start
;
155 if (cch
->state
!= CCHSTATE_INACTIVE
) {
156 cbrcnt
= hweight64(cch
->cbr_allocation_map
) *
158 dsrcnt
= data_opt
? hweight32(cch
->dsr_allocation_map
) *
161 bytes
+= (3 * cbrcnt
+ dsrcnt
) * GRU_CACHE_LINE_BYTES
;
162 if (bytes
> ubufend
- ubuf
)
165 ret
= gru_dump_context_data(grubase
, cch
, ubuf
, ctxnum
,
169 unlock_cch_handle(cch
);
173 hdr
.magic
= GRU_DUMP_MAGIC
;
174 hdr
.gid
= gru
->gs_gid
;
178 hdr
.cch_locked
= cch_locked
;
179 if (copy_to_user(uhdr
, &hdr
, sizeof(hdr
)))
185 int gru_dump_chiplet_request(unsigned long arg
)
187 struct gru_state
*gru
;
188 struct gru_dump_chiplet_state_req req
;
190 void __user
*ubufend
;
191 int ctxnum
, ret
, cnt
= 0;
193 if (copy_from_user(&req
, (void __user
*)arg
, sizeof(req
)))
196 /* Currently, only dump by gid is implemented */
197 if (req
.gid
>= gru_max_gids
)
200 gru
= GID_TO_GRU(req
.gid
);
202 ubufend
= req
.buf
+ req
.buflen
;
204 ret
= gru_dump_tfm(gru
, ubuf
, ubufend
);
209 ret
= gru_dump_tgh(gru
, ubuf
, ubufend
);
214 for (ctxnum
= 0; ctxnum
< GRU_NUM_CCH
; ctxnum
++) {
215 if (req
.ctxnum
== ctxnum
|| req
.ctxnum
< 0) {
216 ret
= gru_dump_context(gru
, ctxnum
, ubuf
, ubufend
,
217 req
.data_opt
, req
.lock_cch
,
226 if (copy_to_user((void __user
*)arg
, &req
, sizeof(req
)))