0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014 #include <linux/clk.h>
0015
0016 #include <drm/drm_atomic.h>
0017 #include <drm/drm_atomic_helper.h>
0018 #include <drm/drm_crtc.h>
0019 #include <drm/drm_fourcc.h>
0020 #include <drm/drm_gem_framebuffer_helper.h>
0021 #include <drm/drm_plane_helper.h>
0022 #include <drm/drm_probe_helper.h>
0023 #include <drm/drm_vblank.h>
0024
0025 #include "vc4_drv.h"
0026 #include "vc4_regs.h"
0027
0028 #define HVS_NUM_CHANNELS 3
0029
0030 struct vc4_ctm_state {
0031 struct drm_private_state base;
0032 struct drm_color_ctm *ctm;
0033 int fifo;
0034 };
0035
0036 static struct vc4_ctm_state *
0037 to_vc4_ctm_state(const struct drm_private_state *priv)
0038 {
0039 return container_of(priv, struct vc4_ctm_state, base);
0040 }
0041
0042 struct vc4_hvs_state {
0043 struct drm_private_state base;
0044 unsigned long core_clock_rate;
0045
0046 struct {
0047 unsigned in_use: 1;
0048 unsigned long fifo_load;
0049 struct drm_crtc_commit *pending_commit;
0050 } fifo_state[HVS_NUM_CHANNELS];
0051 };
0052
0053 static struct vc4_hvs_state *
0054 to_vc4_hvs_state(const struct drm_private_state *priv)
0055 {
0056 return container_of(priv, struct vc4_hvs_state, base);
0057 }
0058
0059 struct vc4_load_tracker_state {
0060 struct drm_private_state base;
0061 u64 hvs_load;
0062 u64 membus_load;
0063 };
0064
0065 static struct vc4_load_tracker_state *
0066 to_vc4_load_tracker_state(const struct drm_private_state *priv)
0067 {
0068 return container_of(priv, struct vc4_load_tracker_state, base);
0069 }
0070
0071 static struct vc4_ctm_state *vc4_get_ctm_state(struct drm_atomic_state *state,
0072 struct drm_private_obj *manager)
0073 {
0074 struct drm_device *dev = state->dev;
0075 struct vc4_dev *vc4 = to_vc4_dev(dev);
0076 struct drm_private_state *priv_state;
0077 int ret;
0078
0079 ret = drm_modeset_lock(&vc4->ctm_state_lock, state->acquire_ctx);
0080 if (ret)
0081 return ERR_PTR(ret);
0082
0083 priv_state = drm_atomic_get_private_obj_state(state, manager);
0084 if (IS_ERR(priv_state))
0085 return ERR_CAST(priv_state);
0086
0087 return to_vc4_ctm_state(priv_state);
0088 }
0089
0090 static struct drm_private_state *
0091 vc4_ctm_duplicate_state(struct drm_private_obj *obj)
0092 {
0093 struct vc4_ctm_state *state;
0094
0095 state = kmemdup(obj->state, sizeof(*state), GFP_KERNEL);
0096 if (!state)
0097 return NULL;
0098
0099 __drm_atomic_helper_private_obj_duplicate_state(obj, &state->base);
0100
0101 return &state->base;
0102 }
0103
0104 static void vc4_ctm_destroy_state(struct drm_private_obj *obj,
0105 struct drm_private_state *state)
0106 {
0107 struct vc4_ctm_state *ctm_state = to_vc4_ctm_state(state);
0108
0109 kfree(ctm_state);
0110 }
0111
0112 static const struct drm_private_state_funcs vc4_ctm_state_funcs = {
0113 .atomic_duplicate_state = vc4_ctm_duplicate_state,
0114 .atomic_destroy_state = vc4_ctm_destroy_state,
0115 };
0116
0117 static void vc4_ctm_obj_fini(struct drm_device *dev, void *unused)
0118 {
0119 struct vc4_dev *vc4 = to_vc4_dev(dev);
0120
0121 drm_atomic_private_obj_fini(&vc4->ctm_manager);
0122 }
0123
0124 static int vc4_ctm_obj_init(struct vc4_dev *vc4)
0125 {
0126 struct vc4_ctm_state *ctm_state;
0127
0128 drm_modeset_lock_init(&vc4->ctm_state_lock);
0129
0130 ctm_state = kzalloc(sizeof(*ctm_state), GFP_KERNEL);
0131 if (!ctm_state)
0132 return -ENOMEM;
0133
0134 drm_atomic_private_obj_init(&vc4->base, &vc4->ctm_manager, &ctm_state->base,
0135 &vc4_ctm_state_funcs);
0136
0137 return drmm_add_action_or_reset(&vc4->base, vc4_ctm_obj_fini, NULL);
0138 }
0139
0140
0141 static u16 vc4_ctm_s31_32_to_s0_9(u64 in)
0142 {
0143 u16 r;
0144
0145
0146 r = in & BIT_ULL(63) ? BIT(9) : 0;
0147
0148 if ((in & GENMASK_ULL(62, 32)) > 0) {
0149
0150 r |= GENMASK(8, 0);
0151 } else {
0152
0153 r |= (in >> 23) & GENMASK(8, 0);
0154 }
0155
0156 return r;
0157 }
0158
0159 static void
0160 vc4_ctm_commit(struct vc4_dev *vc4, struct drm_atomic_state *state)
0161 {
0162 struct vc4_hvs *hvs = vc4->hvs;
0163 struct vc4_ctm_state *ctm_state = to_vc4_ctm_state(vc4->ctm_manager.state);
0164 struct drm_color_ctm *ctm = ctm_state->ctm;
0165
0166 if (ctm_state->fifo) {
0167 HVS_WRITE(SCALER_OLEDCOEF2,
0168 VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[0]),
0169 SCALER_OLEDCOEF2_R_TO_R) |
0170 VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[3]),
0171 SCALER_OLEDCOEF2_R_TO_G) |
0172 VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[6]),
0173 SCALER_OLEDCOEF2_R_TO_B));
0174 HVS_WRITE(SCALER_OLEDCOEF1,
0175 VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[1]),
0176 SCALER_OLEDCOEF1_G_TO_R) |
0177 VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[4]),
0178 SCALER_OLEDCOEF1_G_TO_G) |
0179 VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[7]),
0180 SCALER_OLEDCOEF1_G_TO_B));
0181 HVS_WRITE(SCALER_OLEDCOEF0,
0182 VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[2]),
0183 SCALER_OLEDCOEF0_B_TO_R) |
0184 VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[5]),
0185 SCALER_OLEDCOEF0_B_TO_G) |
0186 VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[8]),
0187 SCALER_OLEDCOEF0_B_TO_B));
0188 }
0189
0190 HVS_WRITE(SCALER_OLEDOFFS,
0191 VC4_SET_FIELD(ctm_state->fifo, SCALER_OLEDOFFS_DISPFIFO));
0192 }
0193
0194 static struct vc4_hvs_state *
0195 vc4_hvs_get_new_global_state(struct drm_atomic_state *state)
0196 {
0197 struct vc4_dev *vc4 = to_vc4_dev(state->dev);
0198 struct drm_private_state *priv_state;
0199
0200 priv_state = drm_atomic_get_new_private_obj_state(state, &vc4->hvs_channels);
0201 if (IS_ERR(priv_state))
0202 return ERR_CAST(priv_state);
0203
0204 return to_vc4_hvs_state(priv_state);
0205 }
0206
0207 static struct vc4_hvs_state *
0208 vc4_hvs_get_old_global_state(struct drm_atomic_state *state)
0209 {
0210 struct vc4_dev *vc4 = to_vc4_dev(state->dev);
0211 struct drm_private_state *priv_state;
0212
0213 priv_state = drm_atomic_get_old_private_obj_state(state, &vc4->hvs_channels);
0214 if (IS_ERR(priv_state))
0215 return ERR_CAST(priv_state);
0216
0217 return to_vc4_hvs_state(priv_state);
0218 }
0219
0220 static struct vc4_hvs_state *
0221 vc4_hvs_get_global_state(struct drm_atomic_state *state)
0222 {
0223 struct vc4_dev *vc4 = to_vc4_dev(state->dev);
0224 struct drm_private_state *priv_state;
0225
0226 priv_state = drm_atomic_get_private_obj_state(state, &vc4->hvs_channels);
0227 if (IS_ERR(priv_state))
0228 return ERR_CAST(priv_state);
0229
0230 return to_vc4_hvs_state(priv_state);
0231 }
0232
0233 static void vc4_hvs_pv_muxing_commit(struct vc4_dev *vc4,
0234 struct drm_atomic_state *state)
0235 {
0236 struct vc4_hvs *hvs = vc4->hvs;
0237 struct drm_crtc_state *crtc_state;
0238 struct drm_crtc *crtc;
0239 unsigned int i;
0240
0241 for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
0242 struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
0243 struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc_state);
0244 u32 dispctrl;
0245 u32 dsp3_mux;
0246
0247 if (!crtc_state->active)
0248 continue;
0249
0250 if (vc4_state->assigned_channel != 2)
0251 continue;
0252
0253
0254
0255
0256
0257
0258
0259
0260
0261
0262
0263 if (vc4_crtc->feeds_txp)
0264 dsp3_mux = VC4_SET_FIELD(3, SCALER_DISPCTRL_DSP3_MUX);
0265 else
0266 dsp3_mux = VC4_SET_FIELD(2, SCALER_DISPCTRL_DSP3_MUX);
0267
0268 dispctrl = HVS_READ(SCALER_DISPCTRL) &
0269 ~SCALER_DISPCTRL_DSP3_MUX_MASK;
0270 HVS_WRITE(SCALER_DISPCTRL, dispctrl | dsp3_mux);
0271 }
0272 }
0273
0274 static void vc5_hvs_pv_muxing_commit(struct vc4_dev *vc4,
0275 struct drm_atomic_state *state)
0276 {
0277 struct vc4_hvs *hvs = vc4->hvs;
0278 struct drm_crtc_state *crtc_state;
0279 struct drm_crtc *crtc;
0280 unsigned char mux;
0281 unsigned int i;
0282 u32 reg;
0283
0284 for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
0285 struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc_state);
0286 struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
0287 unsigned int channel = vc4_state->assigned_channel;
0288
0289 if (!vc4_state->update_muxing)
0290 continue;
0291
0292 switch (vc4_crtc->data->hvs_output) {
0293 case 2:
0294 drm_WARN_ON(&vc4->base,
0295 VC4_GET_FIELD(HVS_READ(SCALER_DISPCTRL),
0296 SCALER_DISPCTRL_DSP3_MUX) == channel);
0297
0298 mux = (channel == 2) ? 0 : 1;
0299 reg = HVS_READ(SCALER_DISPECTRL);
0300 HVS_WRITE(SCALER_DISPECTRL,
0301 (reg & ~SCALER_DISPECTRL_DSP2_MUX_MASK) |
0302 VC4_SET_FIELD(mux, SCALER_DISPECTRL_DSP2_MUX));
0303 break;
0304
0305 case 3:
0306 if (channel == VC4_HVS_CHANNEL_DISABLED)
0307 mux = 3;
0308 else
0309 mux = channel;
0310
0311 reg = HVS_READ(SCALER_DISPCTRL);
0312 HVS_WRITE(SCALER_DISPCTRL,
0313 (reg & ~SCALER_DISPCTRL_DSP3_MUX_MASK) |
0314 VC4_SET_FIELD(mux, SCALER_DISPCTRL_DSP3_MUX));
0315 break;
0316
0317 case 4:
0318 if (channel == VC4_HVS_CHANNEL_DISABLED)
0319 mux = 3;
0320 else
0321 mux = channel;
0322
0323 reg = HVS_READ(SCALER_DISPEOLN);
0324 HVS_WRITE(SCALER_DISPEOLN,
0325 (reg & ~SCALER_DISPEOLN_DSP4_MUX_MASK) |
0326 VC4_SET_FIELD(mux, SCALER_DISPEOLN_DSP4_MUX));
0327
0328 break;
0329
0330 case 5:
0331 if (channel == VC4_HVS_CHANNEL_DISABLED)
0332 mux = 3;
0333 else
0334 mux = channel;
0335
0336 reg = HVS_READ(SCALER_DISPDITHER);
0337 HVS_WRITE(SCALER_DISPDITHER,
0338 (reg & ~SCALER_DISPDITHER_DSP5_MUX_MASK) |
0339 VC4_SET_FIELD(mux, SCALER_DISPDITHER_DSP5_MUX));
0340 break;
0341
0342 default:
0343 break;
0344 }
0345 }
0346 }
0347
0348 static void vc4_atomic_commit_tail(struct drm_atomic_state *state)
0349 {
0350 struct drm_device *dev = state->dev;
0351 struct vc4_dev *vc4 = to_vc4_dev(dev);
0352 struct vc4_hvs *hvs = vc4->hvs;
0353 struct drm_crtc_state *new_crtc_state;
0354 struct vc4_hvs_state *new_hvs_state;
0355 struct drm_crtc *crtc;
0356 struct vc4_hvs_state *old_hvs_state;
0357 unsigned int channel;
0358 int i;
0359
0360 old_hvs_state = vc4_hvs_get_old_global_state(state);
0361 if (WARN_ON(IS_ERR(old_hvs_state)))
0362 return;
0363
0364 new_hvs_state = vc4_hvs_get_new_global_state(state);
0365 if (WARN_ON(IS_ERR(new_hvs_state)))
0366 return;
0367
0368 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
0369 struct vc4_crtc_state *vc4_crtc_state;
0370
0371 if (!new_crtc_state->commit)
0372 continue;
0373
0374 vc4_crtc_state = to_vc4_crtc_state(new_crtc_state);
0375 vc4_hvs_mask_underrun(hvs, vc4_crtc_state->assigned_channel);
0376 }
0377
0378 for (channel = 0; channel < HVS_NUM_CHANNELS; channel++) {
0379 struct drm_crtc_commit *commit;
0380 int ret;
0381
0382 if (!old_hvs_state->fifo_state[channel].in_use)
0383 continue;
0384
0385 commit = old_hvs_state->fifo_state[channel].pending_commit;
0386 if (!commit)
0387 continue;
0388
0389 ret = drm_crtc_commit_wait(commit);
0390 if (ret)
0391 drm_err(dev, "Timed out waiting for commit\n");
0392
0393 drm_crtc_commit_put(commit);
0394 old_hvs_state->fifo_state[channel].pending_commit = NULL;
0395 }
0396
0397 if (vc4->is_vc5) {
0398 unsigned long state_rate = max(old_hvs_state->core_clock_rate,
0399 new_hvs_state->core_clock_rate);
0400 unsigned long core_rate = max_t(unsigned long,
0401 500000000, state_rate);
0402
0403 drm_dbg(dev, "Raising the core clock at %lu Hz\n", core_rate);
0404
0405
0406
0407
0408
0409 WARN_ON(clk_set_min_rate(hvs->core_clk, core_rate));
0410 }
0411
0412 drm_atomic_helper_commit_modeset_disables(dev, state);
0413
0414 vc4_ctm_commit(vc4, state);
0415
0416 if (vc4->is_vc5)
0417 vc5_hvs_pv_muxing_commit(vc4, state);
0418 else
0419 vc4_hvs_pv_muxing_commit(vc4, state);
0420
0421 drm_atomic_helper_commit_planes(dev, state,
0422 DRM_PLANE_COMMIT_ACTIVE_ONLY);
0423
0424 drm_atomic_helper_commit_modeset_enables(dev, state);
0425
0426 drm_atomic_helper_fake_vblank(state);
0427
0428 drm_atomic_helper_commit_hw_done(state);
0429
0430 drm_atomic_helper_wait_for_flip_done(dev, state);
0431
0432 drm_atomic_helper_cleanup_planes(dev, state);
0433
0434 if (vc4->is_vc5) {
0435 drm_dbg(dev, "Running the core clock at %lu Hz\n",
0436 new_hvs_state->core_clock_rate);
0437
0438
0439
0440
0441
0442 WARN_ON(clk_set_min_rate(hvs->core_clk, new_hvs_state->core_clock_rate));
0443
0444 drm_dbg(dev, "Core clock actual rate: %lu Hz\n",
0445 clk_get_rate(hvs->core_clk));
0446 }
0447 }
0448
0449 static int vc4_atomic_commit_setup(struct drm_atomic_state *state)
0450 {
0451 struct drm_crtc_state *crtc_state;
0452 struct vc4_hvs_state *hvs_state;
0453 struct drm_crtc *crtc;
0454 unsigned int i;
0455
0456 hvs_state = vc4_hvs_get_new_global_state(state);
0457 if (WARN_ON(IS_ERR(hvs_state)))
0458 return PTR_ERR(hvs_state);
0459
0460 for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
0461 struct vc4_crtc_state *vc4_crtc_state =
0462 to_vc4_crtc_state(crtc_state);
0463 unsigned int channel =
0464 vc4_crtc_state->assigned_channel;
0465
0466 if (channel == VC4_HVS_CHANNEL_DISABLED)
0467 continue;
0468
0469 if (!hvs_state->fifo_state[channel].in_use)
0470 continue;
0471
0472 hvs_state->fifo_state[channel].pending_commit =
0473 drm_crtc_commit_get(crtc_state->commit);
0474 }
0475
0476 return 0;
0477 }
0478
0479 static struct drm_framebuffer *vc4_fb_create(struct drm_device *dev,
0480 struct drm_file *file_priv,
0481 const struct drm_mode_fb_cmd2 *mode_cmd)
0482 {
0483 struct vc4_dev *vc4 = to_vc4_dev(dev);
0484 struct drm_mode_fb_cmd2 mode_cmd_local;
0485
0486 if (WARN_ON_ONCE(vc4->is_vc5))
0487 return ERR_PTR(-ENODEV);
0488
0489
0490
0491
0492 if (!(mode_cmd->flags & DRM_MODE_FB_MODIFIERS)) {
0493 struct drm_gem_object *gem_obj;
0494 struct vc4_bo *bo;
0495
0496 gem_obj = drm_gem_object_lookup(file_priv,
0497 mode_cmd->handles[0]);
0498 if (!gem_obj) {
0499 DRM_DEBUG("Failed to look up GEM BO %d\n",
0500 mode_cmd->handles[0]);
0501 return ERR_PTR(-ENOENT);
0502 }
0503 bo = to_vc4_bo(gem_obj);
0504
0505 mode_cmd_local = *mode_cmd;
0506
0507 if (bo->t_format) {
0508 mode_cmd_local.modifier[0] =
0509 DRM_FORMAT_MOD_BROADCOM_VC4_T_TILED;
0510 } else {
0511 mode_cmd_local.modifier[0] = DRM_FORMAT_MOD_NONE;
0512 }
0513
0514 drm_gem_object_put(gem_obj);
0515
0516 mode_cmd = &mode_cmd_local;
0517 }
0518
0519 return drm_gem_fb_create(dev, file_priv, mode_cmd);
0520 }
0521
0522
0523
0524
0525
0526 static int
0527 vc4_ctm_atomic_check(struct drm_device *dev, struct drm_atomic_state *state)
0528 {
0529 struct vc4_dev *vc4 = to_vc4_dev(dev);
0530 struct vc4_ctm_state *ctm_state = NULL;
0531 struct drm_crtc *crtc;
0532 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
0533 struct drm_color_ctm *ctm;
0534 int i;
0535
0536 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
0537
0538 if (!new_crtc_state->ctm && old_crtc_state->ctm) {
0539 ctm_state = vc4_get_ctm_state(state, &vc4->ctm_manager);
0540 if (IS_ERR(ctm_state))
0541 return PTR_ERR(ctm_state);
0542 ctm_state->fifo = 0;
0543 }
0544 }
0545
0546 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
0547 if (new_crtc_state->ctm == old_crtc_state->ctm)
0548 continue;
0549
0550 if (!ctm_state) {
0551 ctm_state = vc4_get_ctm_state(state, &vc4->ctm_manager);
0552 if (IS_ERR(ctm_state))
0553 return PTR_ERR(ctm_state);
0554 }
0555
0556
0557 if (new_crtc_state->ctm) {
0558 struct vc4_crtc_state *vc4_crtc_state =
0559 to_vc4_crtc_state(new_crtc_state);
0560
0561
0562 int fifo = vc4_crtc_state->assigned_channel + 1;
0563
0564
0565
0566
0567 if (ctm_state->fifo && ctm_state->fifo != fifo) {
0568 DRM_DEBUG_DRIVER("Too many CTM configured\n");
0569 return -EINVAL;
0570 }
0571
0572
0573
0574
0575
0576 ctm = new_crtc_state->ctm->data;
0577 for (i = 0; i < ARRAY_SIZE(ctm->matrix); i++) {
0578 u64 val = ctm->matrix[i];
0579
0580 val &= ~BIT_ULL(63);
0581 if (val > BIT_ULL(32))
0582 return -EINVAL;
0583 }
0584
0585 ctm_state->fifo = fifo;
0586 ctm_state->ctm = ctm;
0587 }
0588 }
0589
0590 return 0;
0591 }
0592
0593 static int vc4_load_tracker_atomic_check(struct drm_atomic_state *state)
0594 {
0595 struct drm_plane_state *old_plane_state, *new_plane_state;
0596 struct vc4_dev *vc4 = to_vc4_dev(state->dev);
0597 struct vc4_load_tracker_state *load_state;
0598 struct drm_private_state *priv_state;
0599 struct drm_plane *plane;
0600 int i;
0601
0602 priv_state = drm_atomic_get_private_obj_state(state,
0603 &vc4->load_tracker);
0604 if (IS_ERR(priv_state))
0605 return PTR_ERR(priv_state);
0606
0607 load_state = to_vc4_load_tracker_state(priv_state);
0608 for_each_oldnew_plane_in_state(state, plane, old_plane_state,
0609 new_plane_state, i) {
0610 struct vc4_plane_state *vc4_plane_state;
0611
0612 if (old_plane_state->fb && old_plane_state->crtc) {
0613 vc4_plane_state = to_vc4_plane_state(old_plane_state);
0614 load_state->membus_load -= vc4_plane_state->membus_load;
0615 load_state->hvs_load -= vc4_plane_state->hvs_load;
0616 }
0617
0618 if (new_plane_state->fb && new_plane_state->crtc) {
0619 vc4_plane_state = to_vc4_plane_state(new_plane_state);
0620 load_state->membus_load += vc4_plane_state->membus_load;
0621 load_state->hvs_load += vc4_plane_state->hvs_load;
0622 }
0623 }
0624
0625
0626 if (!vc4->load_tracker_enabled)
0627 return 0;
0628
0629
0630
0631
0632 if (load_state->membus_load > SZ_1G + SZ_512M)
0633 return -ENOSPC;
0634
0635
0636
0637
0638 if (load_state->hvs_load > 240000000ULL)
0639 return -ENOSPC;
0640
0641 return 0;
0642 }
0643
0644 static struct drm_private_state *
0645 vc4_load_tracker_duplicate_state(struct drm_private_obj *obj)
0646 {
0647 struct vc4_load_tracker_state *state;
0648
0649 state = kmemdup(obj->state, sizeof(*state), GFP_KERNEL);
0650 if (!state)
0651 return NULL;
0652
0653 __drm_atomic_helper_private_obj_duplicate_state(obj, &state->base);
0654
0655 return &state->base;
0656 }
0657
0658 static void vc4_load_tracker_destroy_state(struct drm_private_obj *obj,
0659 struct drm_private_state *state)
0660 {
0661 struct vc4_load_tracker_state *load_state;
0662
0663 load_state = to_vc4_load_tracker_state(state);
0664 kfree(load_state);
0665 }
0666
0667 static const struct drm_private_state_funcs vc4_load_tracker_state_funcs = {
0668 .atomic_duplicate_state = vc4_load_tracker_duplicate_state,
0669 .atomic_destroy_state = vc4_load_tracker_destroy_state,
0670 };
0671
0672 static void vc4_load_tracker_obj_fini(struct drm_device *dev, void *unused)
0673 {
0674 struct vc4_dev *vc4 = to_vc4_dev(dev);
0675
0676 drm_atomic_private_obj_fini(&vc4->load_tracker);
0677 }
0678
0679 static int vc4_load_tracker_obj_init(struct vc4_dev *vc4)
0680 {
0681 struct vc4_load_tracker_state *load_state;
0682
0683 load_state = kzalloc(sizeof(*load_state), GFP_KERNEL);
0684 if (!load_state)
0685 return -ENOMEM;
0686
0687 drm_atomic_private_obj_init(&vc4->base, &vc4->load_tracker,
0688 &load_state->base,
0689 &vc4_load_tracker_state_funcs);
0690
0691 return drmm_add_action_or_reset(&vc4->base, vc4_load_tracker_obj_fini, NULL);
0692 }
0693
0694 static struct drm_private_state *
0695 vc4_hvs_channels_duplicate_state(struct drm_private_obj *obj)
0696 {
0697 struct vc4_hvs_state *old_state = to_vc4_hvs_state(obj->state);
0698 struct vc4_hvs_state *state;
0699 unsigned int i;
0700
0701 state = kzalloc(sizeof(*state), GFP_KERNEL);
0702 if (!state)
0703 return NULL;
0704
0705 __drm_atomic_helper_private_obj_duplicate_state(obj, &state->base);
0706
0707 for (i = 0; i < HVS_NUM_CHANNELS; i++) {
0708 state->fifo_state[i].in_use = old_state->fifo_state[i].in_use;
0709 state->fifo_state[i].fifo_load = old_state->fifo_state[i].fifo_load;
0710 }
0711
0712 state->core_clock_rate = old_state->core_clock_rate;
0713
0714 return &state->base;
0715 }
0716
0717 static void vc4_hvs_channels_destroy_state(struct drm_private_obj *obj,
0718 struct drm_private_state *state)
0719 {
0720 struct vc4_hvs_state *hvs_state = to_vc4_hvs_state(state);
0721 unsigned int i;
0722
0723 for (i = 0; i < HVS_NUM_CHANNELS; i++) {
0724 if (!hvs_state->fifo_state[i].pending_commit)
0725 continue;
0726
0727 drm_crtc_commit_put(hvs_state->fifo_state[i].pending_commit);
0728 }
0729
0730 kfree(hvs_state);
0731 }
0732
0733 static void vc4_hvs_channels_print_state(struct drm_printer *p,
0734 const struct drm_private_state *state)
0735 {
0736 struct vc4_hvs_state *hvs_state = to_vc4_hvs_state(state);
0737 unsigned int i;
0738
0739 drm_printf(p, "HVS State\n");
0740 drm_printf(p, "\tCore Clock Rate: %lu\n", hvs_state->core_clock_rate);
0741
0742 for (i = 0; i < HVS_NUM_CHANNELS; i++) {
0743 drm_printf(p, "\tChannel %d\n", i);
0744 drm_printf(p, "\t\tin use=%d\n", hvs_state->fifo_state[i].in_use);
0745 drm_printf(p, "\t\tload=%lu\n", hvs_state->fifo_state[i].fifo_load);
0746 }
0747 }
0748
0749 static const struct drm_private_state_funcs vc4_hvs_state_funcs = {
0750 .atomic_duplicate_state = vc4_hvs_channels_duplicate_state,
0751 .atomic_destroy_state = vc4_hvs_channels_destroy_state,
0752 .atomic_print_state = vc4_hvs_channels_print_state,
0753 };
0754
0755 static void vc4_hvs_channels_obj_fini(struct drm_device *dev, void *unused)
0756 {
0757 struct vc4_dev *vc4 = to_vc4_dev(dev);
0758
0759 drm_atomic_private_obj_fini(&vc4->hvs_channels);
0760 }
0761
0762 static int vc4_hvs_channels_obj_init(struct vc4_dev *vc4)
0763 {
0764 struct vc4_hvs_state *state;
0765
0766 state = kzalloc(sizeof(*state), GFP_KERNEL);
0767 if (!state)
0768 return -ENOMEM;
0769
0770 drm_atomic_private_obj_init(&vc4->base, &vc4->hvs_channels,
0771 &state->base,
0772 &vc4_hvs_state_funcs);
0773
0774 return drmm_add_action_or_reset(&vc4->base, vc4_hvs_channels_obj_fini, NULL);
0775 }
0776
0777
0778
0779
0780
0781
0782
0783
0784
0785
0786
0787
0788
0789
0790
0791
0792
0793
0794
0795
0796
0797
0798
0799
0800
0801
0802
0803
0804
0805
0806
0807 static int vc4_pv_muxing_atomic_check(struct drm_device *dev,
0808 struct drm_atomic_state *state)
0809 {
0810 struct vc4_hvs_state *hvs_new_state;
0811 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
0812 struct drm_crtc *crtc;
0813 unsigned int unassigned_channels = 0;
0814 unsigned int i;
0815
0816 hvs_new_state = vc4_hvs_get_global_state(state);
0817 if (IS_ERR(hvs_new_state))
0818 return PTR_ERR(hvs_new_state);
0819
0820 for (i = 0; i < ARRAY_SIZE(hvs_new_state->fifo_state); i++)
0821 if (!hvs_new_state->fifo_state[i].in_use)
0822 unassigned_channels |= BIT(i);
0823
0824 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
0825 struct vc4_crtc_state *old_vc4_crtc_state =
0826 to_vc4_crtc_state(old_crtc_state);
0827 struct vc4_crtc_state *new_vc4_crtc_state =
0828 to_vc4_crtc_state(new_crtc_state);
0829 struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
0830 unsigned int matching_channels;
0831 unsigned int channel;
0832
0833 drm_dbg(dev, "%s: Trying to find a channel.\n", crtc->name);
0834
0835
0836 if (old_crtc_state->enable == new_crtc_state->enable) {
0837 if (new_crtc_state->enable)
0838 drm_dbg(dev, "%s: Already enabled, reusing channel %d.\n",
0839 crtc->name, new_vc4_crtc_state->assigned_channel);
0840 else
0841 drm_dbg(dev, "%s: Disabled, ignoring.\n", crtc->name);
0842
0843 continue;
0844 }
0845
0846
0847 new_vc4_crtc_state->update_muxing = true;
0848
0849
0850 if (!new_crtc_state->enable) {
0851 channel = old_vc4_crtc_state->assigned_channel;
0852
0853 drm_dbg(dev, "%s: Disabling, Freeing channel %d\n",
0854 crtc->name, channel);
0855
0856 hvs_new_state->fifo_state[channel].in_use = false;
0857 new_vc4_crtc_state->assigned_channel = VC4_HVS_CHANNEL_DISABLED;
0858 continue;
0859 }
0860
0861
0862
0863
0864
0865
0866
0867
0868
0869
0870
0871
0872
0873
0874
0875
0876
0877
0878
0879
0880
0881
0882
0883
0884
0885 matching_channels = unassigned_channels & vc4_crtc->data->hvs_available_channels;
0886 if (!matching_channels)
0887 return -EINVAL;
0888
0889 channel = ffs(matching_channels) - 1;
0890
0891 drm_dbg(dev, "Assigned HVS channel %d to CRTC %s\n", channel, crtc->name);
0892 new_vc4_crtc_state->assigned_channel = channel;
0893 unassigned_channels &= ~BIT(channel);
0894 hvs_new_state->fifo_state[channel].in_use = true;
0895 }
0896
0897 return 0;
0898 }
0899
0900 static int
0901 vc4_core_clock_atomic_check(struct drm_atomic_state *state)
0902 {
0903 struct vc4_dev *vc4 = to_vc4_dev(state->dev);
0904 struct drm_private_state *priv_state;
0905 struct vc4_hvs_state *hvs_new_state;
0906 struct vc4_load_tracker_state *load_state;
0907 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
0908 struct drm_crtc *crtc;
0909 unsigned int num_outputs;
0910 unsigned long pixel_rate;
0911 unsigned long cob_rate;
0912 unsigned int i;
0913
0914 priv_state = drm_atomic_get_private_obj_state(state,
0915 &vc4->load_tracker);
0916 if (IS_ERR(priv_state))
0917 return PTR_ERR(priv_state);
0918
0919 load_state = to_vc4_load_tracker_state(priv_state);
0920
0921 hvs_new_state = vc4_hvs_get_global_state(state);
0922 if (IS_ERR(hvs_new_state))
0923 return PTR_ERR(hvs_new_state);
0924
0925 for_each_oldnew_crtc_in_state(state, crtc,
0926 old_crtc_state,
0927 new_crtc_state,
0928 i) {
0929 if (old_crtc_state->active) {
0930 struct vc4_crtc_state *old_vc4_state =
0931 to_vc4_crtc_state(old_crtc_state);
0932 unsigned int channel = old_vc4_state->assigned_channel;
0933
0934 hvs_new_state->fifo_state[channel].fifo_load = 0;
0935 }
0936
0937 if (new_crtc_state->active) {
0938 struct vc4_crtc_state *new_vc4_state =
0939 to_vc4_crtc_state(new_crtc_state);
0940 unsigned int channel = new_vc4_state->assigned_channel;
0941
0942 hvs_new_state->fifo_state[channel].fifo_load =
0943 new_vc4_state->hvs_load;
0944 }
0945 }
0946
0947 cob_rate = 0;
0948 num_outputs = 0;
0949 for (i = 0; i < HVS_NUM_CHANNELS; i++) {
0950 if (!hvs_new_state->fifo_state[i].in_use)
0951 continue;
0952
0953 num_outputs++;
0954 cob_rate = max_t(unsigned long,
0955 hvs_new_state->fifo_state[i].fifo_load,
0956 cob_rate);
0957 }
0958
0959 pixel_rate = load_state->hvs_load;
0960 if (num_outputs > 1) {
0961 pixel_rate = (pixel_rate * 40) / 100;
0962 } else {
0963 pixel_rate = (pixel_rate * 60) / 100;
0964 }
0965
0966 hvs_new_state->core_clock_rate = max(cob_rate, pixel_rate);
0967
0968 return 0;
0969 }
0970
0971
0972 static int
0973 vc4_atomic_check(struct drm_device *dev, struct drm_atomic_state *state)
0974 {
0975 int ret;
0976
0977 ret = vc4_pv_muxing_atomic_check(dev, state);
0978 if (ret)
0979 return ret;
0980
0981 ret = vc4_ctm_atomic_check(dev, state);
0982 if (ret < 0)
0983 return ret;
0984
0985 ret = drm_atomic_helper_check(dev, state);
0986 if (ret)
0987 return ret;
0988
0989 ret = vc4_load_tracker_atomic_check(state);
0990 if (ret)
0991 return ret;
0992
0993 return vc4_core_clock_atomic_check(state);
0994 }
0995
0996 static struct drm_mode_config_helper_funcs vc4_mode_config_helpers = {
0997 .atomic_commit_setup = vc4_atomic_commit_setup,
0998 .atomic_commit_tail = vc4_atomic_commit_tail,
0999 };
1000
1001 static const struct drm_mode_config_funcs vc4_mode_funcs = {
1002 .atomic_check = vc4_atomic_check,
1003 .atomic_commit = drm_atomic_helper_commit,
1004 .fb_create = vc4_fb_create,
1005 };
1006
1007 static const struct drm_mode_config_funcs vc5_mode_funcs = {
1008 .atomic_check = vc4_atomic_check,
1009 .atomic_commit = drm_atomic_helper_commit,
1010 .fb_create = drm_gem_fb_create,
1011 };
1012
1013 int vc4_kms_load(struct drm_device *dev)
1014 {
1015 struct vc4_dev *vc4 = to_vc4_dev(dev);
1016 int ret;
1017
1018
1019
1020
1021
1022
1023 if (!vc4->is_vc5) {
1024
1025
1026
1027 vc4->load_tracker_enabled = true;
1028 }
1029
1030
1031 dev->vblank_disable_immediate = true;
1032
1033 ret = drm_vblank_init(dev, dev->mode_config.num_crtc);
1034 if (ret < 0) {
1035 dev_err(dev->dev, "failed to initialize vblank\n");
1036 return ret;
1037 }
1038
1039 if (vc4->is_vc5) {
1040 dev->mode_config.max_width = 7680;
1041 dev->mode_config.max_height = 7680;
1042 } else {
1043 dev->mode_config.max_width = 2048;
1044 dev->mode_config.max_height = 2048;
1045 }
1046
1047 dev->mode_config.funcs = vc4->is_vc5 ? &vc5_mode_funcs : &vc4_mode_funcs;
1048 dev->mode_config.helper_private = &vc4_mode_config_helpers;
1049 dev->mode_config.preferred_depth = 24;
1050 dev->mode_config.async_page_flip = true;
1051
1052 ret = vc4_ctm_obj_init(vc4);
1053 if (ret)
1054 return ret;
1055
1056 ret = vc4_load_tracker_obj_init(vc4);
1057 if (ret)
1058 return ret;
1059
1060 ret = vc4_hvs_channels_obj_init(vc4);
1061 if (ret)
1062 return ret;
1063
1064 drm_mode_config_reset(dev);
1065
1066 drm_kms_helper_poll_init(dev);
1067
1068 return 0;
1069 }