0001
0002
0003
0004
0005
0006
0007 #ifndef __MSM_GPU_H__
0008 #define __MSM_GPU_H__
0009
0010 #include <linux/adreno-smmu-priv.h>
0011 #include <linux/clk.h>
0012 #include <linux/devfreq.h>
0013 #include <linux/interconnect.h>
0014 #include <linux/pm_opp.h>
0015 #include <linux/regulator/consumer.h>
0016
0017 #include "msm_drv.h"
0018 #include "msm_fence.h"
0019 #include "msm_ringbuffer.h"
0020 #include "msm_gem.h"
0021
0022 struct msm_gem_submit;
0023 struct msm_gpu_perfcntr;
0024 struct msm_gpu_state;
0025 struct msm_file_private;
0026
0027 struct msm_gpu_config {
0028 const char *ioname;
0029 unsigned int nr_rings;
0030 };
0031
0032
0033
0034
0035
0036
0037
0038
0039
0040
0041
0042
0043
0044
0045
0046 struct msm_gpu_funcs {
0047 int (*get_param)(struct msm_gpu *gpu, struct msm_file_private *ctx,
0048 uint32_t param, uint64_t *value, uint32_t *len);
0049 int (*set_param)(struct msm_gpu *gpu, struct msm_file_private *ctx,
0050 uint32_t param, uint64_t value, uint32_t len);
0051 int (*hw_init)(struct msm_gpu *gpu);
0052 int (*pm_suspend)(struct msm_gpu *gpu);
0053 int (*pm_resume)(struct msm_gpu *gpu);
0054 void (*submit)(struct msm_gpu *gpu, struct msm_gem_submit *submit);
0055 void (*flush)(struct msm_gpu *gpu, struct msm_ringbuffer *ring);
0056 irqreturn_t (*irq)(struct msm_gpu *irq);
0057 struct msm_ringbuffer *(*active_ring)(struct msm_gpu *gpu);
0058 void (*recover)(struct msm_gpu *gpu);
0059 void (*destroy)(struct msm_gpu *gpu);
0060 #if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP)
0061
0062 void (*show)(struct msm_gpu *gpu, struct msm_gpu_state *state,
0063 struct drm_printer *p);
0064
0065 void (*debugfs_init)(struct msm_gpu *gpu, struct drm_minor *minor);
0066 #endif
0067
0068 u64 (*gpu_busy)(struct msm_gpu *gpu, unsigned long *out_sample_rate);
0069 struct msm_gpu_state *(*gpu_state_get)(struct msm_gpu *gpu);
0070 int (*gpu_state_put)(struct msm_gpu_state *state);
0071 unsigned long (*gpu_get_freq)(struct msm_gpu *gpu);
0072
0073 void (*gpu_set_freq)(struct msm_gpu *gpu, struct dev_pm_opp *opp,
0074 bool suspended);
0075 struct msm_gem_address_space *(*create_address_space)
0076 (struct msm_gpu *gpu, struct platform_device *pdev);
0077 struct msm_gem_address_space *(*create_private_address_space)
0078 (struct msm_gpu *gpu);
0079 uint32_t (*get_rptr)(struct msm_gpu *gpu, struct msm_ringbuffer *ring);
0080 };
0081
0082
0083 struct msm_gpu_fault_info {
0084 u64 ttbr0;
0085 unsigned long iova;
0086 int flags;
0087 const char *type;
0088 const char *block;
0089 };
0090
0091
0092
0093
0094 struct msm_gpu_devfreq {
0095
0096 struct devfreq *devfreq;
0097
0098
0099 struct mutex lock;
0100
0101
0102
0103
0104
0105
0106 struct dev_pm_qos_request idle_freq;
0107
0108
0109
0110
0111
0112
0113
0114 struct dev_pm_qos_request boost_freq;
0115
0116
0117
0118
0119
0120 u64 busy_cycles;
0121
0122
0123 ktime_t time;
0124
0125
0126 ktime_t idle_time;
0127
0128 struct devfreq_dev_status average_status;
0129
0130
0131
0132
0133
0134
0135 struct msm_hrtimer_work idle_work;
0136
0137
0138
0139
0140
0141
0142
0143 struct msm_hrtimer_work boost_work;
0144
0145
0146 bool suspended;
0147 };
0148
0149 struct msm_gpu {
0150 const char *name;
0151 struct drm_device *dev;
0152 struct platform_device *pdev;
0153 const struct msm_gpu_funcs *funcs;
0154
0155 struct adreno_smmu_priv adreno_smmu;
0156
0157
0158 spinlock_t perf_lock;
0159 bool perfcntr_active;
0160 struct {
0161 bool active;
0162 ktime_t time;
0163 } last_sample;
0164 uint32_t totaltime, activetime;
0165 uint32_t last_cntrs[5];
0166 const struct msm_gpu_perfcntr *perfcntrs;
0167 uint32_t num_perfcntrs;
0168
0169 struct msm_ringbuffer *rb[MSM_GPU_MAX_RINGS];
0170 int nr_rings;
0171
0172
0173
0174
0175
0176
0177 refcount_t sysprof_active;
0178
0179
0180
0181
0182
0183
0184
0185
0186
0187
0188 int cur_ctx_seqno;
0189
0190
0191
0192
0193
0194 struct list_head active_list;
0195
0196
0197
0198
0199
0200
0201
0202
0203
0204 struct mutex lock;
0205
0206
0207
0208
0209
0210
0211
0212
0213
0214 int active_submits;
0215
0216
0217 struct mutex active_lock;
0218
0219
0220 bool needs_hw_init;
0221
0222
0223
0224
0225
0226 int global_faults;
0227
0228 void __iomem *mmio;
0229 int irq;
0230
0231 struct msm_gem_address_space *aspace;
0232
0233
0234 struct regulator *gpu_reg, *gpu_cx;
0235 struct clk_bulk_data *grp_clks;
0236 int nr_clocks;
0237 struct clk *ebi1_clk, *core_clk, *rbbmtimer_clk;
0238 uint32_t fast_rate;
0239
0240
0241
0242 #define DRM_MSM_INACTIVE_PERIOD 66
0243
0244 #define DRM_MSM_HANGCHECK_DEFAULT_PERIOD 500
0245 struct timer_list hangcheck_timer;
0246
0247
0248 struct msm_gpu_fault_info fault_info;
0249
0250
0251 struct kthread_work fault_work;
0252
0253
0254 struct kthread_work recover_work;
0255
0256
0257 wait_queue_head_t retire_event;
0258
0259
0260 struct kthread_work retire_work;
0261
0262
0263 struct kthread_worker *worker;
0264
0265 struct drm_gem_object *memptrs_bo;
0266
0267 struct msm_gpu_devfreq devfreq;
0268
0269 uint32_t suspend_count;
0270
0271 struct msm_gpu_state *crashstate;
0272
0273
0274 bool clamp_to_idle;
0275
0276
0277 bool hw_apriv;
0278
0279 struct thermal_cooling_device *cooling;
0280 };
0281
0282 static inline struct msm_gpu *dev_to_gpu(struct device *dev)
0283 {
0284 struct adreno_smmu_priv *adreno_smmu = dev_get_drvdata(dev);
0285 return container_of(adreno_smmu, struct msm_gpu, adreno_smmu);
0286 }
0287
0288
0289 #define MSM_GPU_RINGBUFFER_SZ SZ_32K
0290 #define MSM_GPU_RINGBUFFER_BLKSIZE 32
0291
0292 #define MSM_GPU_RB_CNTL_DEFAULT \
0293 (AXXX_CP_RB_CNTL_BUFSZ(ilog2(MSM_GPU_RINGBUFFER_SZ / 8)) | \
0294 AXXX_CP_RB_CNTL_BLKSZ(ilog2(MSM_GPU_RINGBUFFER_BLKSIZE / 8)))
0295
0296 static inline bool msm_gpu_active(struct msm_gpu *gpu)
0297 {
0298 int i;
0299
0300 for (i = 0; i < gpu->nr_rings; i++) {
0301 struct msm_ringbuffer *ring = gpu->rb[i];
0302
0303 if (fence_after(ring->fctx->last_fence, ring->memptrs->fence))
0304 return true;
0305 }
0306
0307 return false;
0308 }
0309
0310
0311
0312
0313
0314
0315
0316 struct msm_gpu_perfcntr {
0317 uint32_t select_reg;
0318 uint32_t sample_reg;
0319 uint32_t select_val;
0320 const char *name;
0321 };
0322
0323
0324
0325
0326
0327
0328 #define NR_SCHED_PRIORITIES (1 + DRM_SCHED_PRIORITY_HIGH - DRM_SCHED_PRIORITY_MIN)
0329
0330
0331
0332
0333
0334
0335
0336
0337
0338
0339
0340
0341 struct msm_file_private {
0342 rwlock_t queuelock;
0343 struct list_head submitqueues;
0344 int queueid;
0345 struct msm_gem_address_space *aspace;
0346 struct kref ref;
0347 int seqno;
0348
0349
0350
0351
0352
0353
0354
0355
0356
0357
0358
0359
0360
0361
0362
0363
0364
0365 int sysprof;
0366
0367
0368 char *comm;
0369
0370
0371 char *cmdline;
0372
0373
0374
0375
0376
0377
0378
0379 uint64_t elapsed_ns;
0380
0381
0382
0383
0384
0385
0386
0387 uint64_t cycles;
0388
0389
0390
0391
0392
0393
0394
0395
0396
0397
0398
0399
0400 struct drm_sched_entity *entities[NR_SCHED_PRIORITIES * MSM_GPU_MAX_RINGS];
0401 };
0402
0403
0404
0405
0406
0407
0408
0409
0410
0411
0412
0413
0414
0415
0416
0417
0418
0419
0420
0421
0422
0423
0424
0425
0426
0427
0428 static inline int msm_gpu_convert_priority(struct msm_gpu *gpu, int prio,
0429 unsigned *ring_nr, enum drm_sched_priority *sched_prio)
0430 {
0431 unsigned rn, sp;
0432
0433 rn = div_u64_rem(prio, NR_SCHED_PRIORITIES, &sp);
0434
0435
0436
0437
0438 sp = NR_SCHED_PRIORITIES - sp - 1;
0439
0440 if (rn >= gpu->nr_rings)
0441 return -EINVAL;
0442
0443 *ring_nr = rn;
0444 *sched_prio = sp;
0445
0446 return 0;
0447 }
0448
0449
0450
0451
0452
0453
0454
0455
0456
0457
0458
0459
0460
0461
0462
0463
0464
0465
0466
0467
0468
0469
0470
0471
0472
0473 struct msm_gpu_submitqueue {
0474 int id;
0475 u32 flags;
0476 u32 ring_nr;
0477 int faults;
0478 uint32_t last_fence;
0479 struct msm_file_private *ctx;
0480 struct list_head node;
0481 struct idr fence_idr;
0482 struct mutex lock;
0483 struct kref ref;
0484 struct drm_sched_entity *entity;
0485 };
0486
0487 struct msm_gpu_state_bo {
0488 u64 iova;
0489 size_t size;
0490 void *data;
0491 bool encoded;
0492 char name[32];
0493 };
0494
0495 struct msm_gpu_state {
0496 struct kref ref;
0497 struct timespec64 time;
0498
0499 struct {
0500 u64 iova;
0501 u32 fence;
0502 u32 seqno;
0503 u32 rptr;
0504 u32 wptr;
0505 void *data;
0506 int data_size;
0507 bool encoded;
0508 } ring[MSM_GPU_MAX_RINGS];
0509
0510 int nr_registers;
0511 u32 *registers;
0512
0513 u32 rbbm_status;
0514
0515 char *comm;
0516 char *cmd;
0517
0518 struct msm_gpu_fault_info fault_info;
0519
0520 int nr_bos;
0521 struct msm_gpu_state_bo *bos;
0522 };
0523
0524 static inline void gpu_write(struct msm_gpu *gpu, u32 reg, u32 data)
0525 {
0526 msm_writel(data, gpu->mmio + (reg << 2));
0527 }
0528
0529 static inline u32 gpu_read(struct msm_gpu *gpu, u32 reg)
0530 {
0531 return msm_readl(gpu->mmio + (reg << 2));
0532 }
0533
0534 static inline void gpu_rmw(struct msm_gpu *gpu, u32 reg, u32 mask, u32 or)
0535 {
0536 msm_rmw(gpu->mmio + (reg << 2), mask, or);
0537 }
0538
0539 static inline u64 gpu_read64(struct msm_gpu *gpu, u32 lo, u32 hi)
0540 {
0541 u64 val;
0542
0543
0544
0545
0546
0547
0548
0549
0550
0551
0552
0553
0554
0555
0556
0557 val = (u64) msm_readl(gpu->mmio + (lo << 2));
0558 val |= ((u64) msm_readl(gpu->mmio + (hi << 2)) << 32);
0559
0560 return val;
0561 }
0562
0563 static inline void gpu_write64(struct msm_gpu *gpu, u32 lo, u32 hi, u64 val)
0564 {
0565
0566 msm_writel(lower_32_bits(val), gpu->mmio + (lo << 2));
0567 msm_writel(upper_32_bits(val), gpu->mmio + (hi << 2));
0568 }
0569
0570 int msm_gpu_pm_suspend(struct msm_gpu *gpu);
0571 int msm_gpu_pm_resume(struct msm_gpu *gpu);
0572
0573 void msm_gpu_show_fdinfo(struct msm_gpu *gpu, struct msm_file_private *ctx,
0574 struct drm_printer *p);
0575
0576 int msm_submitqueue_init(struct drm_device *drm, struct msm_file_private *ctx);
0577 struct msm_gpu_submitqueue *msm_submitqueue_get(struct msm_file_private *ctx,
0578 u32 id);
0579 int msm_submitqueue_create(struct drm_device *drm,
0580 struct msm_file_private *ctx,
0581 u32 prio, u32 flags, u32 *id);
0582 int msm_submitqueue_query(struct drm_device *drm, struct msm_file_private *ctx,
0583 struct drm_msm_submitqueue_query *args);
0584 int msm_submitqueue_remove(struct msm_file_private *ctx, u32 id);
0585 void msm_submitqueue_close(struct msm_file_private *ctx);
0586
0587 void msm_submitqueue_destroy(struct kref *kref);
0588
0589 int msm_file_private_set_sysprof(struct msm_file_private *ctx,
0590 struct msm_gpu *gpu, int sysprof);
0591 void __msm_file_private_destroy(struct kref *kref);
0592
0593 static inline void msm_file_private_put(struct msm_file_private *ctx)
0594 {
0595 kref_put(&ctx->ref, __msm_file_private_destroy);
0596 }
0597
0598 static inline struct msm_file_private *msm_file_private_get(
0599 struct msm_file_private *ctx)
0600 {
0601 kref_get(&ctx->ref);
0602 return ctx;
0603 }
0604
0605 void msm_devfreq_init(struct msm_gpu *gpu);
0606 void msm_devfreq_cleanup(struct msm_gpu *gpu);
0607 void msm_devfreq_resume(struct msm_gpu *gpu);
0608 void msm_devfreq_suspend(struct msm_gpu *gpu);
0609 void msm_devfreq_boost(struct msm_gpu *gpu, unsigned factor);
0610 void msm_devfreq_active(struct msm_gpu *gpu);
0611 void msm_devfreq_idle(struct msm_gpu *gpu);
0612
0613 int msm_gpu_hw_init(struct msm_gpu *gpu);
0614
0615 void msm_gpu_perfcntr_start(struct msm_gpu *gpu);
0616 void msm_gpu_perfcntr_stop(struct msm_gpu *gpu);
0617 int msm_gpu_perfcntr_sample(struct msm_gpu *gpu, uint32_t *activetime,
0618 uint32_t *totaltime, uint32_t ncntrs, uint32_t *cntrs);
0619
0620 void msm_gpu_retire(struct msm_gpu *gpu);
0621 void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit);
0622
0623 int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
0624 struct msm_gpu *gpu, const struct msm_gpu_funcs *funcs,
0625 const char *name, struct msm_gpu_config *config);
0626
0627 struct msm_gem_address_space *
0628 msm_gpu_create_private_address_space(struct msm_gpu *gpu, struct task_struct *task);
0629
0630 void msm_gpu_cleanup(struct msm_gpu *gpu);
0631
0632 struct msm_gpu *adreno_load_gpu(struct drm_device *dev);
0633 void __init adreno_register(void);
0634 void __exit adreno_unregister(void);
0635
0636 static inline void msm_submitqueue_put(struct msm_gpu_submitqueue *queue)
0637 {
0638 if (queue)
0639 kref_put(&queue->ref, msm_submitqueue_destroy);
0640 }
0641
0642 static inline struct msm_gpu_state *msm_gpu_crashstate_get(struct msm_gpu *gpu)
0643 {
0644 struct msm_gpu_state *state = NULL;
0645
0646 mutex_lock(&gpu->lock);
0647
0648 if (gpu->crashstate) {
0649 kref_get(&gpu->crashstate->ref);
0650 state = gpu->crashstate;
0651 }
0652
0653 mutex_unlock(&gpu->lock);
0654
0655 return state;
0656 }
0657
0658 static inline void msm_gpu_crashstate_put(struct msm_gpu *gpu)
0659 {
0660 mutex_lock(&gpu->lock);
0661
0662 if (gpu->crashstate) {
0663 if (gpu->funcs->gpu_state_put(gpu->crashstate))
0664 gpu->crashstate = NULL;
0665 }
0666
0667 mutex_unlock(&gpu->lock);
0668 }
0669
0670
0671
0672
0673
0674 #define check_apriv(gpu, flags) \
0675 (((gpu)->hw_apriv ? MSM_BO_MAP_PRIV : 0) | (flags))
0676
0677
0678 #endif