0001
0002
0003
0004
0005
0006
0007
0008
0009
0010 #include <linux/kernel.h>
0011 #include <linux/mm.h>
0012 #include <linux/spinlock.h>
0013 #include <linux/uaccess.h>
0014 #include <linux/delay.h>
0015 #include <linux/bitops.h>
0016 #include <asm/uv/uv_hub.h>
0017
0018 #include <linux/nospec.h>
0019
0020 #include "gru.h"
0021 #include "grutables.h"
0022 #include "gruhandles.h"
0023 #include "grulib.h"
0024
0025 #define CCH_LOCK_ATTEMPTS 10
0026
0027 static int gru_user_copy_handle(void __user **dp, void *s)
0028 {
0029 if (copy_to_user(*dp, s, GRU_HANDLE_BYTES))
0030 return -1;
0031 *dp += GRU_HANDLE_BYTES;
0032 return 0;
0033 }
0034
0035 static int gru_dump_context_data(void *grubase,
0036 struct gru_context_configuration_handle *cch,
0037 void __user *ubuf, int ctxnum, int dsrcnt,
0038 int flush_cbrs)
0039 {
0040 void *cb, *cbe, *tfh, *gseg;
0041 int i, scr;
0042
0043 gseg = grubase + ctxnum * GRU_GSEG_STRIDE;
0044 cb = gseg + GRU_CB_BASE;
0045 cbe = grubase + GRU_CBE_BASE;
0046 tfh = grubase + GRU_TFH_BASE;
0047
0048 for_each_cbr_in_allocation_map(i, &cch->cbr_allocation_map, scr) {
0049 if (flush_cbrs)
0050 gru_flush_cache(cb);
0051 if (gru_user_copy_handle(&ubuf, cb))
0052 goto fail;
0053 if (gru_user_copy_handle(&ubuf, tfh + i * GRU_HANDLE_STRIDE))
0054 goto fail;
0055 if (gru_user_copy_handle(&ubuf, cbe + i * GRU_HANDLE_STRIDE))
0056 goto fail;
0057 cb += GRU_HANDLE_STRIDE;
0058 }
0059 if (dsrcnt)
0060 memcpy(ubuf, gseg + GRU_DS_BASE, dsrcnt * GRU_HANDLE_STRIDE);
0061 return 0;
0062
0063 fail:
0064 return -EFAULT;
0065 }
0066
0067 static int gru_dump_tfm(struct gru_state *gru,
0068 void __user *ubuf, void __user *ubufend)
0069 {
0070 struct gru_tlb_fault_map *tfm;
0071 int i;
0072
0073 if (GRU_NUM_TFM * GRU_CACHE_LINE_BYTES > ubufend - ubuf)
0074 return -EFBIG;
0075
0076 for (i = 0; i < GRU_NUM_TFM; i++) {
0077 tfm = get_tfm(gru->gs_gru_base_vaddr, i);
0078 if (gru_user_copy_handle(&ubuf, tfm))
0079 goto fail;
0080 }
0081 return GRU_NUM_TFM * GRU_CACHE_LINE_BYTES;
0082
0083 fail:
0084 return -EFAULT;
0085 }
0086
0087 static int gru_dump_tgh(struct gru_state *gru,
0088 void __user *ubuf, void __user *ubufend)
0089 {
0090 struct gru_tlb_global_handle *tgh;
0091 int i;
0092
0093 if (GRU_NUM_TGH * GRU_CACHE_LINE_BYTES > ubufend - ubuf)
0094 return -EFBIG;
0095
0096 for (i = 0; i < GRU_NUM_TGH; i++) {
0097 tgh = get_tgh(gru->gs_gru_base_vaddr, i);
0098 if (gru_user_copy_handle(&ubuf, tgh))
0099 goto fail;
0100 }
0101 return GRU_NUM_TGH * GRU_CACHE_LINE_BYTES;
0102
0103 fail:
0104 return -EFAULT;
0105 }
0106
0107 static int gru_dump_context(struct gru_state *gru, int ctxnum,
0108 void __user *ubuf, void __user *ubufend, char data_opt,
0109 char lock_cch, char flush_cbrs)
0110 {
0111 struct gru_dump_context_header hdr;
0112 struct gru_dump_context_header __user *uhdr = ubuf;
0113 struct gru_context_configuration_handle *cch, *ubufcch;
0114 struct gru_thread_state *gts;
0115 int try, cch_locked, cbrcnt = 0, dsrcnt = 0, bytes = 0, ret = 0;
0116 void *grubase;
0117
0118 memset(&hdr, 0, sizeof(hdr));
0119 grubase = gru->gs_gru_base_vaddr;
0120 cch = get_cch(grubase, ctxnum);
0121 for (try = 0; try < CCH_LOCK_ATTEMPTS; try++) {
0122 cch_locked = trylock_cch_handle(cch);
0123 if (cch_locked)
0124 break;
0125 msleep(1);
0126 }
0127
0128 ubuf += sizeof(hdr);
0129 ubufcch = ubuf;
0130 if (gru_user_copy_handle(&ubuf, cch)) {
0131 if (cch_locked)
0132 unlock_cch_handle(cch);
0133 return -EFAULT;
0134 }
0135 if (cch_locked)
0136 ubufcch->delresp = 0;
0137 bytes = sizeof(hdr) + GRU_CACHE_LINE_BYTES;
0138
0139 if (cch_locked || !lock_cch) {
0140 gts = gru->gs_gts[ctxnum];
0141 if (gts && gts->ts_vma) {
0142 hdr.pid = gts->ts_tgid_owner;
0143 hdr.vaddr = gts->ts_vma->vm_start;
0144 }
0145 if (cch->state != CCHSTATE_INACTIVE) {
0146 cbrcnt = hweight64(cch->cbr_allocation_map) *
0147 GRU_CBR_AU_SIZE;
0148 dsrcnt = data_opt ? hweight32(cch->dsr_allocation_map) *
0149 GRU_DSR_AU_CL : 0;
0150 }
0151 bytes += (3 * cbrcnt + dsrcnt) * GRU_CACHE_LINE_BYTES;
0152 if (bytes > ubufend - ubuf)
0153 ret = -EFBIG;
0154 else
0155 ret = gru_dump_context_data(grubase, cch, ubuf, ctxnum,
0156 dsrcnt, flush_cbrs);
0157 }
0158 if (cch_locked)
0159 unlock_cch_handle(cch);
0160 if (ret)
0161 return ret;
0162
0163 hdr.magic = GRU_DUMP_MAGIC;
0164 hdr.gid = gru->gs_gid;
0165 hdr.ctxnum = ctxnum;
0166 hdr.cbrcnt = cbrcnt;
0167 hdr.dsrcnt = dsrcnt;
0168 hdr.cch_locked = cch_locked;
0169 if (copy_to_user(uhdr, &hdr, sizeof(hdr)))
0170 return -EFAULT;
0171
0172 return bytes;
0173 }
0174
0175 int gru_dump_chiplet_request(unsigned long arg)
0176 {
0177 struct gru_state *gru;
0178 struct gru_dump_chiplet_state_req req;
0179 void __user *ubuf;
0180 void __user *ubufend;
0181 int ctxnum, ret, cnt = 0;
0182
0183 if (copy_from_user(&req, (void __user *)arg, sizeof(req)))
0184 return -EFAULT;
0185
0186
0187 if (req.gid >= gru_max_gids)
0188 return -EINVAL;
0189 req.gid = array_index_nospec(req.gid, gru_max_gids);
0190
0191 gru = GID_TO_GRU(req.gid);
0192 ubuf = req.buf;
0193 ubufend = req.buf + req.buflen;
0194
0195 ret = gru_dump_tfm(gru, ubuf, ubufend);
0196 if (ret < 0)
0197 goto fail;
0198 ubuf += ret;
0199
0200 ret = gru_dump_tgh(gru, ubuf, ubufend);
0201 if (ret < 0)
0202 goto fail;
0203 ubuf += ret;
0204
0205 for (ctxnum = 0; ctxnum < GRU_NUM_CCH; ctxnum++) {
0206 if (req.ctxnum == ctxnum || req.ctxnum < 0) {
0207 ret = gru_dump_context(gru, ctxnum, ubuf, ubufend,
0208 req.data_opt, req.lock_cch,
0209 req.flush_cbrs);
0210 if (ret < 0)
0211 goto fail;
0212 ubuf += ret;
0213 cnt++;
0214 }
0215 }
0216
0217 if (copy_to_user((void __user *)arg, &req, sizeof(req)))
0218 return -EFAULT;
0219 return cnt;
0220
0221 fail:
0222 return ret;
0223 }