0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032 #include <linux/circ_buf.h>
0033 #include <linux/debugfs.h>
0034 #include <linux/kfifo.h>
0035 #include <linux/uaccess.h>
0036 #include <linux/wait.h>
0037
0038 #include <drm/drm_file.h>
0039
0040 #include "msm_drv.h"
0041 #include "msm_gpu.h"
0042 #include "msm_gem.h"
0043
0044 bool rd_full = false;
0045 MODULE_PARM_DESC(rd_full, "If true, $debugfs/.../rd will snapshot all buffer contents");
0046 module_param_named(rd_full, rd_full, bool, 0600);
0047
0048 #ifdef CONFIG_DEBUG_FS
0049
0050 enum rd_sect_type {
0051 RD_NONE,
0052 RD_TEST,
0053 RD_CMD,
0054 RD_GPUADDR,
0055 RD_CONTEXT,
0056 RD_CMDSTREAM,
0057 RD_CMDSTREAM_ADDR,
0058 RD_PARAM,
0059 RD_FLUSH,
0060 RD_PROGRAM,
0061 RD_VERT_SHADER,
0062 RD_FRAG_SHADER,
0063 RD_BUFFER_CONTENTS,
0064 RD_GPU_ID,
0065 RD_CHIP_ID,
0066 };
0067
0068 #define BUF_SZ 512
0069
0070
0071 #define circ_count(circ) \
0072 (CIRC_CNT((circ)->head, (circ)->tail, BUF_SZ))
0073 #define circ_count_to_end(circ) \
0074 (CIRC_CNT_TO_END((circ)->head, (circ)->tail, BUF_SZ))
0075
0076 #define circ_space(circ) \
0077 (CIRC_SPACE((circ)->head, (circ)->tail, BUF_SZ))
0078 #define circ_space_to_end(circ) \
0079 (CIRC_SPACE_TO_END((circ)->head, (circ)->tail, BUF_SZ))
0080
0081 struct msm_rd_state {
0082 struct drm_device *dev;
0083
0084 bool open;
0085
0086
0087 struct msm_gem_submit *submit;
0088
0089
0090
0091
0092
0093
0094 struct mutex read_lock;
0095
0096 wait_queue_head_t fifo_event;
0097 struct circ_buf fifo;
0098
0099 char buf[BUF_SZ];
0100 };
0101
0102 static void rd_write(struct msm_rd_state *rd, const void *buf, int sz)
0103 {
0104 struct circ_buf *fifo = &rd->fifo;
0105 const char *ptr = buf;
0106
0107 while (sz > 0) {
0108 char *fptr = &fifo->buf[fifo->head];
0109 int n;
0110
0111 wait_event(rd->fifo_event, circ_space(&rd->fifo) > 0 || !rd->open);
0112 if (!rd->open)
0113 return;
0114
0115
0116
0117
0118
0119 n = min(sz, circ_space_to_end(&rd->fifo));
0120 memcpy(fptr, ptr, n);
0121
0122 smp_store_release(&fifo->head, (fifo->head + n) & (BUF_SZ - 1));
0123 sz -= n;
0124 ptr += n;
0125
0126 wake_up_all(&rd->fifo_event);
0127 }
0128 }
0129
0130 static void rd_write_section(struct msm_rd_state *rd,
0131 enum rd_sect_type type, const void *buf, int sz)
0132 {
0133 rd_write(rd, &type, 4);
0134 rd_write(rd, &sz, 4);
0135 rd_write(rd, buf, sz);
0136 }
0137
0138 static ssize_t rd_read(struct file *file, char __user *buf,
0139 size_t sz, loff_t *ppos)
0140 {
0141 struct msm_rd_state *rd = file->private_data;
0142 struct circ_buf *fifo = &rd->fifo;
0143 const char *fptr = &fifo->buf[fifo->tail];
0144 int n = 0, ret = 0;
0145
0146 mutex_lock(&rd->read_lock);
0147
0148 ret = wait_event_interruptible(rd->fifo_event,
0149 circ_count(&rd->fifo) > 0);
0150 if (ret)
0151 goto out;
0152
0153
0154
0155
0156
0157 n = min_t(int, sz, circ_count_to_end(&rd->fifo));
0158 if (copy_to_user(buf, fptr, n)) {
0159 ret = -EFAULT;
0160 goto out;
0161 }
0162
0163 smp_store_release(&fifo->tail, (fifo->tail + n) & (BUF_SZ - 1));
0164 *ppos += n;
0165
0166 wake_up_all(&rd->fifo_event);
0167
0168 out:
0169 mutex_unlock(&rd->read_lock);
0170 if (ret)
0171 return ret;
0172 return n;
0173 }
0174
0175 static int rd_open(struct inode *inode, struct file *file)
0176 {
0177 struct msm_rd_state *rd = inode->i_private;
0178 struct drm_device *dev = rd->dev;
0179 struct msm_drm_private *priv = dev->dev_private;
0180 struct msm_gpu *gpu = priv->gpu;
0181 uint64_t val;
0182 uint32_t gpu_id;
0183 uint32_t zero = 0;
0184 int ret = 0;
0185
0186 if (!gpu)
0187 return -ENODEV;
0188
0189 mutex_lock(&gpu->lock);
0190
0191 if (rd->open) {
0192 ret = -EBUSY;
0193 goto out;
0194 }
0195
0196 file->private_data = rd;
0197 rd->open = true;
0198
0199
0200 rd->fifo.head = rd->fifo.tail = 0;
0201
0202
0203
0204
0205
0206
0207 gpu->funcs->get_param(gpu, NULL, MSM_PARAM_GPU_ID, &val, &zero);
0208 gpu_id = val;
0209
0210 rd_write_section(rd, RD_GPU_ID, &gpu_id, sizeof(gpu_id));
0211
0212 gpu->funcs->get_param(gpu, NULL, MSM_PARAM_CHIP_ID, &val, &zero);
0213 rd_write_section(rd, RD_CHIP_ID, &val, sizeof(val));
0214
0215 out:
0216 mutex_unlock(&gpu->lock);
0217 return ret;
0218 }
0219
0220 static int rd_release(struct inode *inode, struct file *file)
0221 {
0222 struct msm_rd_state *rd = inode->i_private;
0223
0224 rd->open = false;
0225 wake_up_all(&rd->fifo_event);
0226
0227 return 0;
0228 }
0229
0230
0231 static const struct file_operations rd_debugfs_fops = {
0232 .owner = THIS_MODULE,
0233 .open = rd_open,
0234 .read = rd_read,
0235 .llseek = no_llseek,
0236 .release = rd_release,
0237 };
0238
0239
0240 static void rd_cleanup(struct msm_rd_state *rd)
0241 {
0242 if (!rd)
0243 return;
0244
0245 mutex_destroy(&rd->read_lock);
0246 kfree(rd);
0247 }
0248
0249 static struct msm_rd_state *rd_init(struct drm_minor *minor, const char *name)
0250 {
0251 struct msm_rd_state *rd;
0252
0253 rd = kzalloc(sizeof(*rd), GFP_KERNEL);
0254 if (!rd)
0255 return ERR_PTR(-ENOMEM);
0256
0257 rd->dev = minor->dev;
0258 rd->fifo.buf = rd->buf;
0259
0260 mutex_init(&rd->read_lock);
0261
0262 init_waitqueue_head(&rd->fifo_event);
0263
0264 debugfs_create_file(name, S_IFREG | S_IRUGO, minor->debugfs_root, rd,
0265 &rd_debugfs_fops);
0266
0267 return rd;
0268 }
0269
0270 int msm_rd_debugfs_init(struct drm_minor *minor)
0271 {
0272 struct msm_drm_private *priv = minor->dev->dev_private;
0273 struct msm_rd_state *rd;
0274 int ret;
0275
0276
0277 if (priv->rd)
0278 return 0;
0279
0280 rd = rd_init(minor, "rd");
0281 if (IS_ERR(rd)) {
0282 ret = PTR_ERR(rd);
0283 goto fail;
0284 }
0285
0286 priv->rd = rd;
0287
0288 rd = rd_init(minor, "hangrd");
0289 if (IS_ERR(rd)) {
0290 ret = PTR_ERR(rd);
0291 goto fail;
0292 }
0293
0294 priv->hangrd = rd;
0295
0296 return 0;
0297
0298 fail:
0299 msm_rd_debugfs_cleanup(priv);
0300 return ret;
0301 }
0302
0303 void msm_rd_debugfs_cleanup(struct msm_drm_private *priv)
0304 {
0305 rd_cleanup(priv->rd);
0306 priv->rd = NULL;
0307
0308 rd_cleanup(priv->hangrd);
0309 priv->hangrd = NULL;
0310 }
0311
0312 static void snapshot_buf(struct msm_rd_state *rd,
0313 struct msm_gem_submit *submit, int idx,
0314 uint64_t iova, uint32_t size, bool full)
0315 {
0316 struct msm_gem_object *obj = submit->bos[idx].obj;
0317 unsigned offset = 0;
0318 const char *buf;
0319
0320 if (iova) {
0321 offset = iova - submit->bos[idx].iova;
0322 } else {
0323 iova = submit->bos[idx].iova;
0324 size = obj->base.size;
0325 }
0326
0327
0328
0329
0330
0331 rd_write_section(rd, RD_GPUADDR,
0332 (uint32_t[3]){ iova, size, iova >> 32 }, 12);
0333
0334 if (!full)
0335 return;
0336
0337
0338 if (!(submit->bos[idx].flags & MSM_SUBMIT_BO_READ))
0339 return;
0340
0341 msm_gem_lock(&obj->base);
0342 buf = msm_gem_get_vaddr_active(&obj->base);
0343 if (IS_ERR(buf))
0344 goto out_unlock;
0345
0346 buf += offset;
0347
0348 rd_write_section(rd, RD_BUFFER_CONTENTS, buf, size);
0349
0350 msm_gem_put_vaddr_locked(&obj->base);
0351
0352 out_unlock:
0353 msm_gem_unlock(&obj->base);
0354 }
0355
0356
0357 void msm_rd_dump_submit(struct msm_rd_state *rd, struct msm_gem_submit *submit,
0358 const char *fmt, ...)
0359 {
0360 struct task_struct *task;
0361 char msg[256];
0362 int i, n;
0363
0364 if (!rd->open)
0365 return;
0366
0367
0368
0369
0370 WARN_ON(!mutex_is_locked(&submit->gpu->lock));
0371
0372 if (fmt) {
0373 va_list args;
0374
0375 va_start(args, fmt);
0376 n = vscnprintf(msg, sizeof(msg), fmt, args);
0377 va_end(args);
0378
0379 rd_write_section(rd, RD_CMD, msg, ALIGN(n, 4));
0380 }
0381
0382 rcu_read_lock();
0383 task = pid_task(submit->pid, PIDTYPE_PID);
0384 if (task) {
0385 n = scnprintf(msg, sizeof(msg), "%.*s/%d: fence=%u",
0386 TASK_COMM_LEN, task->comm,
0387 pid_nr(submit->pid), submit->seqno);
0388 } else {
0389 n = scnprintf(msg, sizeof(msg), "???/%d: fence=%u",
0390 pid_nr(submit->pid), submit->seqno);
0391 }
0392 rcu_read_unlock();
0393
0394 rd_write_section(rd, RD_CMD, msg, ALIGN(n, 4));
0395
0396 for (i = 0; i < submit->nr_bos; i++)
0397 snapshot_buf(rd, submit, i, 0, 0, should_dump(submit, i));
0398
0399 for (i = 0; i < submit->nr_cmds; i++) {
0400 uint32_t szd = submit->cmd[i].size;
0401
0402
0403 if (!should_dump(submit, i)) {
0404 snapshot_buf(rd, submit, submit->cmd[i].idx,
0405 submit->cmd[i].iova, szd * 4, true);
0406 }
0407 }
0408
0409 for (i = 0; i < submit->nr_cmds; i++) {
0410 uint64_t iova = submit->cmd[i].iova;
0411 uint32_t szd = submit->cmd[i].size;
0412
0413 switch (submit->cmd[i].type) {
0414 case MSM_SUBMIT_CMD_IB_TARGET_BUF:
0415
0416
0417
0418
0419 break;
0420 case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
0421 case MSM_SUBMIT_CMD_BUF:
0422 rd_write_section(rd, RD_CMDSTREAM_ADDR,
0423 (uint32_t[3]){ iova, szd, iova >> 32 }, 12);
0424 break;
0425 }
0426 }
0427 }
0428 #endif