Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
0003  */
0004 
0005 #define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
0006 
0007 #include <linux/debugfs.h>
0008 #include <linux/delay.h>
0009 
0010 #include "dpu_vbif.h"
0011 #include "dpu_hw_vbif.h"
0012 #include "dpu_trace.h"
0013 
0014 /**
0015  * _dpu_vbif_wait_for_xin_halt - wait for the xin to halt
0016  * @vbif:   Pointer to hardware vbif driver
0017  * @xin_id: Client interface identifier
0018  * @return: 0 if success; error code otherwise
0019  */
0020 static int _dpu_vbif_wait_for_xin_halt(struct dpu_hw_vbif *vbif, u32 xin_id)
0021 {
0022     ktime_t timeout;
0023     bool status;
0024     int rc;
0025 
0026     if (!vbif || !vbif->cap || !vbif->ops.get_halt_ctrl) {
0027         DPU_ERROR("invalid arguments vbif %d\n", vbif != NULL);
0028         return -EINVAL;
0029     }
0030 
0031     timeout = ktime_add_us(ktime_get(), vbif->cap->xin_halt_timeout);
0032     for (;;) {
0033         status = vbif->ops.get_halt_ctrl(vbif, xin_id);
0034         if (status)
0035             break;
0036         if (ktime_compare_safe(ktime_get(), timeout) > 0) {
0037             status = vbif->ops.get_halt_ctrl(vbif, xin_id);
0038             break;
0039         }
0040         usleep_range(501, 1000);
0041     }
0042 
0043     if (!status) {
0044         rc = -ETIMEDOUT;
0045         DPU_ERROR("VBIF %d client %d not halting. TIMEDOUT.\n",
0046                 vbif->idx - VBIF_0, xin_id);
0047     } else {
0048         rc = 0;
0049         DRM_DEBUG_ATOMIC("VBIF %d client %d is halted\n",
0050                 vbif->idx - VBIF_0, xin_id);
0051     }
0052 
0053     return rc;
0054 }
0055 
0056 /**
0057  * _dpu_vbif_apply_dynamic_ot_limit - determine OT based on usecase parameters
0058  * @vbif:   Pointer to hardware vbif driver
0059  * @ot_lim: Pointer to OT limit to be modified
0060  * @params: Pointer to usecase parameters
0061  */
0062 static void _dpu_vbif_apply_dynamic_ot_limit(struct dpu_hw_vbif *vbif,
0063         u32 *ot_lim, struct dpu_vbif_set_ot_params *params)
0064 {
0065     u64 pps;
0066     const struct dpu_vbif_dynamic_ot_tbl *tbl;
0067     u32 i;
0068 
0069     if (!vbif || !(vbif->cap->features & BIT(DPU_VBIF_QOS_OTLIM)))
0070         return;
0071 
0072     /* Dynamic OT setting done only for WFD */
0073     if (!params->is_wfd)
0074         return;
0075 
0076     pps = params->frame_rate;
0077     pps *= params->width;
0078     pps *= params->height;
0079 
0080     tbl = params->rd ? &vbif->cap->dynamic_ot_rd_tbl :
0081             &vbif->cap->dynamic_ot_wr_tbl;
0082 
0083     for (i = 0; i < tbl->count; i++) {
0084         if (pps <= tbl->cfg[i].pps) {
0085             *ot_lim = tbl->cfg[i].ot_limit;
0086             break;
0087         }
0088     }
0089 
0090     DRM_DEBUG_ATOMIC("vbif:%d xin:%d w:%d h:%d fps:%d pps:%llu ot:%u\n",
0091             vbif->idx - VBIF_0, params->xin_id,
0092             params->width, params->height, params->frame_rate,
0093             pps, *ot_lim);
0094 }
0095 
0096 /**
0097  * _dpu_vbif_get_ot_limit - get OT based on usecase & configuration parameters
0098  * @vbif:   Pointer to hardware vbif driver
0099  * @params: Pointer to usecase parameters
0100  * @return: OT limit
0101  */
0102 static u32 _dpu_vbif_get_ot_limit(struct dpu_hw_vbif *vbif,
0103     struct dpu_vbif_set_ot_params *params)
0104 {
0105     u32 ot_lim = 0;
0106     u32 val;
0107 
0108     if (!vbif || !vbif->cap) {
0109         DPU_ERROR("invalid arguments vbif %d\n", vbif != NULL);
0110         return -EINVAL;
0111     }
0112 
0113     if (vbif->cap->default_ot_wr_limit && !params->rd)
0114         ot_lim = vbif->cap->default_ot_wr_limit;
0115     else if (vbif->cap->default_ot_rd_limit && params->rd)
0116         ot_lim = vbif->cap->default_ot_rd_limit;
0117 
0118     /*
0119      * If default ot is not set from dt/catalog,
0120      * then do not configure it.
0121      */
0122     if (ot_lim == 0)
0123         goto exit;
0124 
0125     /* Modify the limits if the target and the use case requires it */
0126     _dpu_vbif_apply_dynamic_ot_limit(vbif, &ot_lim, params);
0127 
0128     if (vbif && vbif->ops.get_limit_conf) {
0129         val = vbif->ops.get_limit_conf(vbif,
0130                 params->xin_id, params->rd);
0131         if (val == ot_lim)
0132             ot_lim = 0;
0133     }
0134 
0135 exit:
0136     DRM_DEBUG_ATOMIC("vbif:%d xin:%d ot_lim:%d\n",
0137             vbif->idx - VBIF_0, params->xin_id, ot_lim);
0138     return ot_lim;
0139 }
0140 
0141 /**
0142  * dpu_vbif_set_ot_limit - set OT based on usecase & configuration parameters
0143  * @dpu_kms:    DPU handler
0144  * @params: Pointer to usecase parameters
0145  *
0146  * Note this function would block waiting for bus halt.
0147  */
0148 void dpu_vbif_set_ot_limit(struct dpu_kms *dpu_kms,
0149         struct dpu_vbif_set_ot_params *params)
0150 {
0151     struct dpu_hw_vbif *vbif = NULL;
0152     struct dpu_hw_mdp *mdp;
0153     bool forced_on = false;
0154     u32 ot_lim;
0155     int ret, i;
0156 
0157     mdp = dpu_kms->hw_mdp;
0158 
0159     for (i = 0; i < ARRAY_SIZE(dpu_kms->hw_vbif); i++) {
0160         if (dpu_kms->hw_vbif[i] &&
0161                 dpu_kms->hw_vbif[i]->idx == params->vbif_idx)
0162             vbif = dpu_kms->hw_vbif[i];
0163     }
0164 
0165     if (!vbif || !mdp) {
0166         DRM_DEBUG_ATOMIC("invalid arguments vbif %d mdp %d\n",
0167                 vbif != NULL, mdp != NULL);
0168         return;
0169     }
0170 
0171     if (!mdp->ops.setup_clk_force_ctrl ||
0172             !vbif->ops.set_limit_conf ||
0173             !vbif->ops.set_halt_ctrl)
0174         return;
0175 
0176     /* set write_gather_en for all write clients */
0177     if (vbif->ops.set_write_gather_en && !params->rd)
0178         vbif->ops.set_write_gather_en(vbif, params->xin_id);
0179 
0180     ot_lim = _dpu_vbif_get_ot_limit(vbif, params) & 0xFF;
0181 
0182     if (ot_lim == 0)
0183         return;
0184 
0185     trace_dpu_perf_set_ot(params->num, params->xin_id, ot_lim,
0186         params->vbif_idx);
0187 
0188     forced_on = mdp->ops.setup_clk_force_ctrl(mdp, params->clk_ctrl, true);
0189 
0190     vbif->ops.set_limit_conf(vbif, params->xin_id, params->rd, ot_lim);
0191 
0192     vbif->ops.set_halt_ctrl(vbif, params->xin_id, true);
0193 
0194     ret = _dpu_vbif_wait_for_xin_halt(vbif, params->xin_id);
0195     if (ret)
0196         trace_dpu_vbif_wait_xin_halt_fail(vbif->idx, params->xin_id);
0197 
0198     vbif->ops.set_halt_ctrl(vbif, params->xin_id, false);
0199 
0200     if (forced_on)
0201         mdp->ops.setup_clk_force_ctrl(mdp, params->clk_ctrl, false);
0202 }
0203 
0204 void dpu_vbif_set_qos_remap(struct dpu_kms *dpu_kms,
0205         struct dpu_vbif_set_qos_params *params)
0206 {
0207     struct dpu_hw_vbif *vbif = NULL;
0208     struct dpu_hw_mdp *mdp;
0209     bool forced_on = false;
0210     const struct dpu_vbif_qos_tbl *qos_tbl;
0211     int i;
0212 
0213     if (!params || !dpu_kms->hw_mdp) {
0214         DPU_ERROR("invalid arguments\n");
0215         return;
0216     }
0217     mdp = dpu_kms->hw_mdp;
0218 
0219     for (i = 0; i < ARRAY_SIZE(dpu_kms->hw_vbif); i++) {
0220         if (dpu_kms->hw_vbif[i] &&
0221                 dpu_kms->hw_vbif[i]->idx == params->vbif_idx) {
0222             vbif = dpu_kms->hw_vbif[i];
0223             break;
0224         }
0225     }
0226 
0227     if (!vbif || !vbif->cap) {
0228         DPU_ERROR("invalid vbif %d\n", params->vbif_idx);
0229         return;
0230     }
0231 
0232     if (!vbif->ops.set_qos_remap || !mdp->ops.setup_clk_force_ctrl) {
0233         DRM_DEBUG_ATOMIC("qos remap not supported\n");
0234         return;
0235     }
0236 
0237     qos_tbl = params->is_rt ? &vbif->cap->qos_rt_tbl :
0238             &vbif->cap->qos_nrt_tbl;
0239 
0240     if (!qos_tbl->npriority_lvl || !qos_tbl->priority_lvl) {
0241         DRM_DEBUG_ATOMIC("qos tbl not defined\n");
0242         return;
0243     }
0244 
0245     forced_on = mdp->ops.setup_clk_force_ctrl(mdp, params->clk_ctrl, true);
0246 
0247     for (i = 0; i < qos_tbl->npriority_lvl; i++) {
0248         DRM_DEBUG_ATOMIC("vbif:%d xin:%d lvl:%d/%d\n",
0249                 params->vbif_idx, params->xin_id, i,
0250                 qos_tbl->priority_lvl[i]);
0251         vbif->ops.set_qos_remap(vbif, params->xin_id, i,
0252                 qos_tbl->priority_lvl[i]);
0253     }
0254 
0255     if (forced_on)
0256         mdp->ops.setup_clk_force_ctrl(mdp, params->clk_ctrl, false);
0257 }
0258 
0259 void dpu_vbif_clear_errors(struct dpu_kms *dpu_kms)
0260 {
0261     struct dpu_hw_vbif *vbif;
0262     u32 i, pnd, src;
0263 
0264     for (i = 0; i < ARRAY_SIZE(dpu_kms->hw_vbif); i++) {
0265         vbif = dpu_kms->hw_vbif[i];
0266         if (vbif && vbif->ops.clear_errors) {
0267             vbif->ops.clear_errors(vbif, &pnd, &src);
0268             if (pnd || src) {
0269                 DRM_DEBUG_KMS("VBIF %d: pnd 0x%X, src 0x%X\n",
0270                           vbif->idx - VBIF_0, pnd, src);
0271             }
0272         }
0273     }
0274 }
0275 
0276 void dpu_vbif_init_memtypes(struct dpu_kms *dpu_kms)
0277 {
0278     struct dpu_hw_vbif *vbif;
0279     int i, j;
0280 
0281     for (i = 0; i < ARRAY_SIZE(dpu_kms->hw_vbif); i++) {
0282         vbif = dpu_kms->hw_vbif[i];
0283         if (vbif && vbif->cap && vbif->ops.set_mem_type) {
0284             for (j = 0; j < vbif->cap->memtype_count; j++)
0285                 vbif->ops.set_mem_type(
0286                         vbif, j, vbif->cap->memtype[j]);
0287         }
0288     }
0289 }
0290 
0291 #ifdef CONFIG_DEBUG_FS
0292 
0293 void dpu_debugfs_vbif_init(struct dpu_kms *dpu_kms, struct dentry *debugfs_root)
0294 {
0295     char vbif_name[32];
0296     struct dentry *entry, *debugfs_vbif;
0297     int i, j;
0298 
0299     entry = debugfs_create_dir("vbif", debugfs_root);
0300 
0301     for (i = 0; i < dpu_kms->catalog->vbif_count; i++) {
0302         const struct dpu_vbif_cfg *vbif = &dpu_kms->catalog->vbif[i];
0303 
0304         snprintf(vbif_name, sizeof(vbif_name), "%d", vbif->id);
0305 
0306         debugfs_vbif = debugfs_create_dir(vbif_name, entry);
0307 
0308         debugfs_create_u32("features", 0600, debugfs_vbif,
0309             (u32 *)&vbif->features);
0310 
0311         debugfs_create_u32("xin_halt_timeout", 0400, debugfs_vbif,
0312             (u32 *)&vbif->xin_halt_timeout);
0313 
0314         debugfs_create_u32("default_rd_ot_limit", 0400, debugfs_vbif,
0315             (u32 *)&vbif->default_ot_rd_limit);
0316 
0317         debugfs_create_u32("default_wr_ot_limit", 0400, debugfs_vbif,
0318             (u32 *)&vbif->default_ot_wr_limit);
0319 
0320         for (j = 0; j < vbif->dynamic_ot_rd_tbl.count; j++) {
0321             const struct dpu_vbif_dynamic_ot_cfg *cfg =
0322                     &vbif->dynamic_ot_rd_tbl.cfg[j];
0323 
0324             snprintf(vbif_name, sizeof(vbif_name),
0325                     "dynamic_ot_rd_%d_pps", j);
0326             debugfs_create_u64(vbif_name, 0400, debugfs_vbif,
0327                     (u64 *)&cfg->pps);
0328             snprintf(vbif_name, sizeof(vbif_name),
0329                     "dynamic_ot_rd_%d_ot_limit", j);
0330             debugfs_create_u32(vbif_name, 0400, debugfs_vbif,
0331                     (u32 *)&cfg->ot_limit);
0332         }
0333 
0334         for (j = 0; j < vbif->dynamic_ot_wr_tbl.count; j++) {
0335             const struct dpu_vbif_dynamic_ot_cfg *cfg =
0336                     &vbif->dynamic_ot_wr_tbl.cfg[j];
0337 
0338             snprintf(vbif_name, sizeof(vbif_name),
0339                     "dynamic_ot_wr_%d_pps", j);
0340             debugfs_create_u64(vbif_name, 0400, debugfs_vbif,
0341                     (u64 *)&cfg->pps);
0342             snprintf(vbif_name, sizeof(vbif_name),
0343                     "dynamic_ot_wr_%d_ot_limit", j);
0344             debugfs_create_u32(vbif_name, 0400, debugfs_vbif,
0345                     (u32 *)&cfg->ot_limit);
0346         }
0347     }
0348 }
0349 #endif