0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022 #define gk20a_pmu(p) container_of((p), struct gk20a_pmu, base)
0023 #include "priv.h"
0024
0025 #include <subdev/clk.h>
0026 #include <subdev/timer.h>
0027 #include <subdev/volt.h>
0028
0029 #define BUSY_SLOT 0
0030 #define CLK_SLOT 7
0031
0032 struct gk20a_pmu_dvfs_data {
0033 int p_load_target;
0034 int p_load_max;
0035 int p_smooth;
0036 unsigned int avg_load;
0037 };
0038
0039 struct gk20a_pmu {
0040 struct nvkm_pmu base;
0041 struct nvkm_alarm alarm;
0042 struct gk20a_pmu_dvfs_data *data;
0043 };
0044
0045 struct gk20a_pmu_dvfs_dev_status {
0046 u32 total;
0047 u32 busy;
0048 };
0049
0050 static int
0051 gk20a_pmu_dvfs_target(struct gk20a_pmu *pmu, int *state)
0052 {
0053 struct nvkm_clk *clk = pmu->base.subdev.device->clk;
0054
0055 return nvkm_clk_astate(clk, *state, 0, false);
0056 }
0057
0058 static void
0059 gk20a_pmu_dvfs_get_cur_state(struct gk20a_pmu *pmu, int *state)
0060 {
0061 struct nvkm_clk *clk = pmu->base.subdev.device->clk;
0062
0063 *state = clk->pstate;
0064 }
0065
0066 static int
0067 gk20a_pmu_dvfs_get_target_state(struct gk20a_pmu *pmu,
0068 int *state, int load)
0069 {
0070 struct gk20a_pmu_dvfs_data *data = pmu->data;
0071 struct nvkm_clk *clk = pmu->base.subdev.device->clk;
0072 int cur_level, level;
0073
0074
0075 level = cur_level = clk->pstate;
0076
0077 if (load > data->p_load_max) {
0078 level = min(clk->state_nr - 1, level + (clk->state_nr / 3));
0079 } else {
0080 level += ((load - data->p_load_target) * 10 /
0081 data->p_load_target) / 2;
0082 level = max(0, level);
0083 level = min(clk->state_nr - 1, level);
0084 }
0085
0086 nvkm_trace(&pmu->base.subdev, "cur level = %d, new level = %d\n",
0087 cur_level, level);
0088
0089 *state = level;
0090
0091 return (level != cur_level);
0092 }
0093
0094 static void
0095 gk20a_pmu_dvfs_get_dev_status(struct gk20a_pmu *pmu,
0096 struct gk20a_pmu_dvfs_dev_status *status)
0097 {
0098 struct nvkm_falcon *falcon = &pmu->base.falcon;
0099
0100 status->busy = nvkm_falcon_rd32(falcon, 0x508 + (BUSY_SLOT * 0x10));
0101 status->total= nvkm_falcon_rd32(falcon, 0x508 + (CLK_SLOT * 0x10));
0102 }
0103
0104 static void
0105 gk20a_pmu_dvfs_reset_dev_status(struct gk20a_pmu *pmu)
0106 {
0107 struct nvkm_falcon *falcon = &pmu->base.falcon;
0108
0109 nvkm_falcon_wr32(falcon, 0x508 + (BUSY_SLOT * 0x10), 0x80000000);
0110 nvkm_falcon_wr32(falcon, 0x508 + (CLK_SLOT * 0x10), 0x80000000);
0111 }
0112
0113 static void
0114 gk20a_pmu_dvfs_work(struct nvkm_alarm *alarm)
0115 {
0116 struct gk20a_pmu *pmu =
0117 container_of(alarm, struct gk20a_pmu, alarm);
0118 struct gk20a_pmu_dvfs_data *data = pmu->data;
0119 struct gk20a_pmu_dvfs_dev_status status;
0120 struct nvkm_subdev *subdev = &pmu->base.subdev;
0121 struct nvkm_device *device = subdev->device;
0122 struct nvkm_clk *clk = device->clk;
0123 struct nvkm_timer *tmr = device->timer;
0124 struct nvkm_volt *volt = device->volt;
0125 u32 utilization = 0;
0126 int state;
0127
0128
0129
0130
0131
0132 if (!clk || !volt)
0133 goto resched;
0134
0135 gk20a_pmu_dvfs_get_dev_status(pmu, &status);
0136
0137 if (status.total)
0138 utilization = div_u64((u64)status.busy * 100, status.total);
0139
0140 data->avg_load = (data->p_smooth * data->avg_load) + utilization;
0141 data->avg_load /= data->p_smooth + 1;
0142 nvkm_trace(subdev, "utilization = %d %%, avg_load = %d %%\n",
0143 utilization, data->avg_load);
0144
0145 gk20a_pmu_dvfs_get_cur_state(pmu, &state);
0146
0147 if (gk20a_pmu_dvfs_get_target_state(pmu, &state, data->avg_load)) {
0148 nvkm_trace(subdev, "set new state to %d\n", state);
0149 gk20a_pmu_dvfs_target(pmu, &state);
0150 }
0151
0152 resched:
0153 gk20a_pmu_dvfs_reset_dev_status(pmu);
0154 nvkm_timer_alarm(tmr, 100000000, alarm);
0155 }
0156
0157 static void
0158 gk20a_pmu_fini(struct nvkm_pmu *pmu)
0159 {
0160 struct gk20a_pmu *gpmu = gk20a_pmu(pmu);
0161 nvkm_timer_alarm(pmu->subdev.device->timer, 0, &gpmu->alarm);
0162
0163 nvkm_falcon_put(&pmu->falcon, &pmu->subdev);
0164 }
0165
0166 static int
0167 gk20a_pmu_init(struct nvkm_pmu *pmu)
0168 {
0169 struct gk20a_pmu *gpmu = gk20a_pmu(pmu);
0170 struct nvkm_subdev *subdev = &pmu->subdev;
0171 struct nvkm_device *device = pmu->subdev.device;
0172 struct nvkm_falcon *falcon = &pmu->falcon;
0173 int ret;
0174
0175 ret = nvkm_falcon_get(falcon, subdev);
0176 if (ret) {
0177 nvkm_error(subdev, "cannot acquire %s falcon!\n", falcon->name);
0178 return ret;
0179 }
0180
0181
0182 nvkm_falcon_wr32(falcon, 0x504 + (BUSY_SLOT * 0x10), 0x00200001);
0183 nvkm_falcon_wr32(falcon, 0x50c + (BUSY_SLOT * 0x10), 0x00000002);
0184 nvkm_falcon_wr32(falcon, 0x50c + (CLK_SLOT * 0x10), 0x00000003);
0185
0186 nvkm_timer_alarm(device->timer, 2000000000, &gpmu->alarm);
0187 return 0;
0188 }
0189
0190 static struct gk20a_pmu_dvfs_data
0191 gk20a_dvfs_data= {
0192 .p_load_target = 70,
0193 .p_load_max = 90,
0194 .p_smooth = 1,
0195 };
0196
0197 static const struct nvkm_pmu_func
0198 gk20a_pmu = {
0199 .flcn = >215_pmu_flcn,
0200 .enabled = gf100_pmu_enabled,
0201 .init = gk20a_pmu_init,
0202 .fini = gk20a_pmu_fini,
0203 .reset = gf100_pmu_reset,
0204 };
0205
0206 static const struct nvkm_pmu_fwif
0207 gk20a_pmu_fwif[] = {
0208 { -1, gf100_pmu_nofw, &gk20a_pmu },
0209 {}
0210 };
0211
0212 int
0213 gk20a_pmu_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
0214 struct nvkm_pmu **ppmu)
0215 {
0216 struct gk20a_pmu *pmu;
0217 int ret;
0218
0219 if (!(pmu = kzalloc(sizeof(*pmu), GFP_KERNEL)))
0220 return -ENOMEM;
0221 *ppmu = &pmu->base;
0222
0223 ret = nvkm_pmu_ctor(gk20a_pmu_fwif, device, type, inst, &pmu->base);
0224 if (ret)
0225 return ret;
0226
0227 pmu->data = &gk20a_dvfs_data;
0228 nvkm_alarm_init(&pmu->alarm, gk20a_pmu_dvfs_work);
0229 return 0;
0230 }