Linux 4.19.133
[linux/fpc-iii.git] / drivers / misc / sgi-gru / grukdump.c
blob1540a7785e14743ae1b035aeb21d391af8516050
1 /*
2 * SN Platform GRU Driver
4 * Dump GRU State
6 * Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 #include <linux/kernel.h>
24 #include <linux/mm.h>
25 #include <linux/spinlock.h>
26 #include <linux/uaccess.h>
27 #include <linux/delay.h>
28 #include <linux/bitops.h>
29 #include <asm/uv/uv_hub.h>
31 #include <linux/nospec.h>
33 #include "gru.h"
34 #include "grutables.h"
35 #include "gruhandles.h"
36 #include "grulib.h"
38 #define CCH_LOCK_ATTEMPTS 10
40 static int gru_user_copy_handle(void __user **dp, void *s)
42 if (copy_to_user(*dp, s, GRU_HANDLE_BYTES))
43 return -1;
44 *dp += GRU_HANDLE_BYTES;
45 return 0;
48 static int gru_dump_context_data(void *grubase,
49 struct gru_context_configuration_handle *cch,
50 void __user *ubuf, int ctxnum, int dsrcnt,
51 int flush_cbrs)
53 void *cb, *cbe, *tfh, *gseg;
54 int i, scr;
56 gseg = grubase + ctxnum * GRU_GSEG_STRIDE;
57 cb = gseg + GRU_CB_BASE;
58 cbe = grubase + GRU_CBE_BASE;
59 tfh = grubase + GRU_TFH_BASE;
61 for_each_cbr_in_allocation_map(i, &cch->cbr_allocation_map, scr) {
62 if (flush_cbrs)
63 gru_flush_cache(cb);
64 if (gru_user_copy_handle(&ubuf, cb))
65 goto fail;
66 if (gru_user_copy_handle(&ubuf, tfh + i * GRU_HANDLE_STRIDE))
67 goto fail;
68 if (gru_user_copy_handle(&ubuf, cbe + i * GRU_HANDLE_STRIDE))
69 goto fail;
70 cb += GRU_HANDLE_STRIDE;
72 if (dsrcnt)
73 memcpy(ubuf, gseg + GRU_DS_BASE, dsrcnt * GRU_HANDLE_STRIDE);
74 return 0;
76 fail:
77 return -EFAULT;
80 static int gru_dump_tfm(struct gru_state *gru,
81 void __user *ubuf, void __user *ubufend)
83 struct gru_tlb_fault_map *tfm;
84 int i;
86 if (GRU_NUM_TFM * GRU_CACHE_LINE_BYTES > ubufend - ubuf)
87 return -EFBIG;
89 for (i = 0; i < GRU_NUM_TFM; i++) {
90 tfm = get_tfm(gru->gs_gru_base_vaddr, i);
91 if (gru_user_copy_handle(&ubuf, tfm))
92 goto fail;
94 return GRU_NUM_TFM * GRU_CACHE_LINE_BYTES;
96 fail:
97 return -EFAULT;
100 static int gru_dump_tgh(struct gru_state *gru,
101 void __user *ubuf, void __user *ubufend)
103 struct gru_tlb_global_handle *tgh;
104 int i;
106 if (GRU_NUM_TGH * GRU_CACHE_LINE_BYTES > ubufend - ubuf)
107 return -EFBIG;
109 for (i = 0; i < GRU_NUM_TGH; i++) {
110 tgh = get_tgh(gru->gs_gru_base_vaddr, i);
111 if (gru_user_copy_handle(&ubuf, tgh))
112 goto fail;
114 return GRU_NUM_TGH * GRU_CACHE_LINE_BYTES;
116 fail:
117 return -EFAULT;
120 static int gru_dump_context(struct gru_state *gru, int ctxnum,
121 void __user *ubuf, void __user *ubufend, char data_opt,
122 char lock_cch, char flush_cbrs)
124 struct gru_dump_context_header hdr;
125 struct gru_dump_context_header __user *uhdr = ubuf;
126 struct gru_context_configuration_handle *cch, *ubufcch;
127 struct gru_thread_state *gts;
128 int try, cch_locked, cbrcnt = 0, dsrcnt = 0, bytes = 0, ret = 0;
129 void *grubase;
131 memset(&hdr, 0, sizeof(hdr));
132 grubase = gru->gs_gru_base_vaddr;
133 cch = get_cch(grubase, ctxnum);
134 for (try = 0; try < CCH_LOCK_ATTEMPTS; try++) {
135 cch_locked = trylock_cch_handle(cch);
136 if (cch_locked)
137 break;
138 msleep(1);
141 ubuf += sizeof(hdr);
142 ubufcch = ubuf;
143 if (gru_user_copy_handle(&ubuf, cch)) {
144 if (cch_locked)
145 unlock_cch_handle(cch);
146 return -EFAULT;
148 if (cch_locked)
149 ubufcch->delresp = 0;
150 bytes = sizeof(hdr) + GRU_CACHE_LINE_BYTES;
152 if (cch_locked || !lock_cch) {
153 gts = gru->gs_gts[ctxnum];
154 if (gts && gts->ts_vma) {
155 hdr.pid = gts->ts_tgid_owner;
156 hdr.vaddr = gts->ts_vma->vm_start;
158 if (cch->state != CCHSTATE_INACTIVE) {
159 cbrcnt = hweight64(cch->cbr_allocation_map) *
160 GRU_CBR_AU_SIZE;
161 dsrcnt = data_opt ? hweight32(cch->dsr_allocation_map) *
162 GRU_DSR_AU_CL : 0;
164 bytes += (3 * cbrcnt + dsrcnt) * GRU_CACHE_LINE_BYTES;
165 if (bytes > ubufend - ubuf)
166 ret = -EFBIG;
167 else
168 ret = gru_dump_context_data(grubase, cch, ubuf, ctxnum,
169 dsrcnt, flush_cbrs);
171 if (cch_locked)
172 unlock_cch_handle(cch);
173 if (ret)
174 return ret;
176 hdr.magic = GRU_DUMP_MAGIC;
177 hdr.gid = gru->gs_gid;
178 hdr.ctxnum = ctxnum;
179 hdr.cbrcnt = cbrcnt;
180 hdr.dsrcnt = dsrcnt;
181 hdr.cch_locked = cch_locked;
182 if (copy_to_user(uhdr, &hdr, sizeof(hdr)))
183 return -EFAULT;
185 return bytes;
188 int gru_dump_chiplet_request(unsigned long arg)
190 struct gru_state *gru;
191 struct gru_dump_chiplet_state_req req;
192 void __user *ubuf;
193 void __user *ubufend;
194 int ctxnum, ret, cnt = 0;
196 if (copy_from_user(&req, (void __user *)arg, sizeof(req)))
197 return -EFAULT;
199 /* Currently, only dump by gid is implemented */
200 if (req.gid >= gru_max_gids)
201 return -EINVAL;
202 req.gid = array_index_nospec(req.gid, gru_max_gids);
204 gru = GID_TO_GRU(req.gid);
205 ubuf = req.buf;
206 ubufend = req.buf + req.buflen;
208 ret = gru_dump_tfm(gru, ubuf, ubufend);
209 if (ret < 0)
210 goto fail;
211 ubuf += ret;
213 ret = gru_dump_tgh(gru, ubuf, ubufend);
214 if (ret < 0)
215 goto fail;
216 ubuf += ret;
218 for (ctxnum = 0; ctxnum < GRU_NUM_CCH; ctxnum++) {
219 if (req.ctxnum == ctxnum || req.ctxnum < 0) {
220 ret = gru_dump_context(gru, ctxnum, ubuf, ubufend,
221 req.data_opt, req.lock_cch,
222 req.flush_cbrs);
223 if (ret < 0)
224 goto fail;
225 ubuf += ret;
226 cnt++;
230 if (copy_to_user((void __user *)arg, &req, sizeof(req)))
231 return -EFAULT;
232 return cnt;
234 fail:
235 return ret;