0001
0002
0003
0004
0005
0006 #define pr_fmt(fmt) "[drm:%s] " fmt, __func__
0007 #include "dpu_kms.h"
0008 #include "dpu_hw_lm.h"
0009 #include "dpu_hw_ctl.h"
0010 #include "dpu_hw_pingpong.h"
0011 #include "dpu_hw_intf.h"
0012 #include "dpu_hw_wb.h"
0013 #include "dpu_hw_dspp.h"
0014 #include "dpu_hw_merge3d.h"
0015 #include "dpu_hw_dsc.h"
0016 #include "dpu_encoder.h"
0017 #include "dpu_trace.h"
0018
0019
0020 static inline bool reserved_by_other(uint32_t *res_map, int idx,
0021 uint32_t enc_id)
0022 {
0023 return res_map[idx] && res_map[idx] != enc_id;
0024 }
0025
0026
0027
0028
0029
0030
0031 struct dpu_rm_requirements {
0032 struct msm_display_topology topology;
0033 };
0034
0035 int dpu_rm_destroy(struct dpu_rm *rm)
0036 {
0037 int i;
0038
0039 for (i = 0; i < ARRAY_SIZE(rm->dspp_blks); i++) {
0040 struct dpu_hw_dspp *hw;
0041
0042 if (rm->dspp_blks[i]) {
0043 hw = to_dpu_hw_dspp(rm->dspp_blks[i]);
0044 dpu_hw_dspp_destroy(hw);
0045 }
0046 }
0047 for (i = 0; i < ARRAY_SIZE(rm->pingpong_blks); i++) {
0048 struct dpu_hw_pingpong *hw;
0049
0050 if (rm->pingpong_blks[i]) {
0051 hw = to_dpu_hw_pingpong(rm->pingpong_blks[i]);
0052 dpu_hw_pingpong_destroy(hw);
0053 }
0054 }
0055 for (i = 0; i < ARRAY_SIZE(rm->merge_3d_blks); i++) {
0056 struct dpu_hw_merge_3d *hw;
0057
0058 if (rm->merge_3d_blks[i]) {
0059 hw = to_dpu_hw_merge_3d(rm->merge_3d_blks[i]);
0060 dpu_hw_merge_3d_destroy(hw);
0061 }
0062 }
0063 for (i = 0; i < ARRAY_SIZE(rm->mixer_blks); i++) {
0064 struct dpu_hw_mixer *hw;
0065
0066 if (rm->mixer_blks[i]) {
0067 hw = to_dpu_hw_mixer(rm->mixer_blks[i]);
0068 dpu_hw_lm_destroy(hw);
0069 }
0070 }
0071 for (i = 0; i < ARRAY_SIZE(rm->ctl_blks); i++) {
0072 struct dpu_hw_ctl *hw;
0073
0074 if (rm->ctl_blks[i]) {
0075 hw = to_dpu_hw_ctl(rm->ctl_blks[i]);
0076 dpu_hw_ctl_destroy(hw);
0077 }
0078 }
0079 for (i = 0; i < ARRAY_SIZE(rm->hw_intf); i++)
0080 dpu_hw_intf_destroy(rm->hw_intf[i]);
0081
0082 for (i = 0; i < ARRAY_SIZE(rm->dsc_blks); i++) {
0083 struct dpu_hw_dsc *hw;
0084
0085 if (rm->dsc_blks[i]) {
0086 hw = to_dpu_hw_dsc(rm->dsc_blks[i]);
0087 dpu_hw_dsc_destroy(hw);
0088 }
0089 }
0090
0091 for (i = 0; i < ARRAY_SIZE(rm->hw_wb); i++)
0092 dpu_hw_wb_destroy(rm->hw_wb[i]);
0093
0094 return 0;
0095 }
0096
0097 int dpu_rm_init(struct dpu_rm *rm,
0098 const struct dpu_mdss_cfg *cat,
0099 void __iomem *mmio)
0100 {
0101 int rc, i;
0102
0103 if (!rm || !cat || !mmio) {
0104 DPU_ERROR("invalid kms\n");
0105 return -EINVAL;
0106 }
0107
0108
0109 memset(rm, 0, sizeof(*rm));
0110
0111
0112 for (i = 0; i < cat->mixer_count; i++) {
0113 struct dpu_hw_mixer *hw;
0114 const struct dpu_lm_cfg *lm = &cat->mixer[i];
0115
0116 if (lm->pingpong == PINGPONG_MAX) {
0117 DPU_DEBUG("skip mixer %d without pingpong\n", lm->id);
0118 continue;
0119 }
0120
0121 if (lm->id < LM_0 || lm->id >= LM_MAX) {
0122 DPU_ERROR("skip mixer %d with invalid id\n", lm->id);
0123 continue;
0124 }
0125 hw = dpu_hw_lm_init(lm->id, mmio, cat);
0126 if (IS_ERR(hw)) {
0127 rc = PTR_ERR(hw);
0128 DPU_ERROR("failed lm object creation: err %d\n", rc);
0129 goto fail;
0130 }
0131 rm->mixer_blks[lm->id - LM_0] = &hw->base;
0132 }
0133
0134 for (i = 0; i < cat->merge_3d_count; i++) {
0135 struct dpu_hw_merge_3d *hw;
0136 const struct dpu_merge_3d_cfg *merge_3d = &cat->merge_3d[i];
0137
0138 if (merge_3d->id < MERGE_3D_0 || merge_3d->id >= MERGE_3D_MAX) {
0139 DPU_ERROR("skip merge_3d %d with invalid id\n", merge_3d->id);
0140 continue;
0141 }
0142 hw = dpu_hw_merge_3d_init(merge_3d->id, mmio, cat);
0143 if (IS_ERR(hw)) {
0144 rc = PTR_ERR(hw);
0145 DPU_ERROR("failed merge_3d object creation: err %d\n",
0146 rc);
0147 goto fail;
0148 }
0149 rm->merge_3d_blks[merge_3d->id - MERGE_3D_0] = &hw->base;
0150 }
0151
0152 for (i = 0; i < cat->pingpong_count; i++) {
0153 struct dpu_hw_pingpong *hw;
0154 const struct dpu_pingpong_cfg *pp = &cat->pingpong[i];
0155
0156 if (pp->id < PINGPONG_0 || pp->id >= PINGPONG_MAX) {
0157 DPU_ERROR("skip pingpong %d with invalid id\n", pp->id);
0158 continue;
0159 }
0160 hw = dpu_hw_pingpong_init(pp->id, mmio, cat);
0161 if (IS_ERR(hw)) {
0162 rc = PTR_ERR(hw);
0163 DPU_ERROR("failed pingpong object creation: err %d\n",
0164 rc);
0165 goto fail;
0166 }
0167 if (pp->merge_3d && pp->merge_3d < MERGE_3D_MAX)
0168 hw->merge_3d = to_dpu_hw_merge_3d(rm->merge_3d_blks[pp->merge_3d - MERGE_3D_0]);
0169 rm->pingpong_blks[pp->id - PINGPONG_0] = &hw->base;
0170 }
0171
0172 for (i = 0; i < cat->intf_count; i++) {
0173 struct dpu_hw_intf *hw;
0174 const struct dpu_intf_cfg *intf = &cat->intf[i];
0175
0176 if (intf->type == INTF_NONE) {
0177 DPU_DEBUG("skip intf %d with type none\n", i);
0178 continue;
0179 }
0180 if (intf->id < INTF_0 || intf->id >= INTF_MAX) {
0181 DPU_ERROR("skip intf %d with invalid id\n", intf->id);
0182 continue;
0183 }
0184 hw = dpu_hw_intf_init(intf->id, mmio, cat);
0185 if (IS_ERR(hw)) {
0186 rc = PTR_ERR(hw);
0187 DPU_ERROR("failed intf object creation: err %d\n", rc);
0188 goto fail;
0189 }
0190 rm->hw_intf[intf->id - INTF_0] = hw;
0191 }
0192
0193 for (i = 0; i < cat->wb_count; i++) {
0194 struct dpu_hw_wb *hw;
0195 const struct dpu_wb_cfg *wb = &cat->wb[i];
0196
0197 if (wb->id < WB_0 || wb->id >= WB_MAX) {
0198 DPU_ERROR("skip intf %d with invalid id\n", wb->id);
0199 continue;
0200 }
0201
0202 hw = dpu_hw_wb_init(wb->id, mmio, cat);
0203 if (IS_ERR(hw)) {
0204 rc = PTR_ERR(hw);
0205 DPU_ERROR("failed wb object creation: err %d\n", rc);
0206 goto fail;
0207 }
0208 rm->hw_wb[wb->id - WB_0] = hw;
0209 }
0210
0211 for (i = 0; i < cat->ctl_count; i++) {
0212 struct dpu_hw_ctl *hw;
0213 const struct dpu_ctl_cfg *ctl = &cat->ctl[i];
0214
0215 if (ctl->id < CTL_0 || ctl->id >= CTL_MAX) {
0216 DPU_ERROR("skip ctl %d with invalid id\n", ctl->id);
0217 continue;
0218 }
0219 hw = dpu_hw_ctl_init(ctl->id, mmio, cat);
0220 if (IS_ERR(hw)) {
0221 rc = PTR_ERR(hw);
0222 DPU_ERROR("failed ctl object creation: err %d\n", rc);
0223 goto fail;
0224 }
0225 rm->ctl_blks[ctl->id - CTL_0] = &hw->base;
0226 }
0227
0228 for (i = 0; i < cat->dspp_count; i++) {
0229 struct dpu_hw_dspp *hw;
0230 const struct dpu_dspp_cfg *dspp = &cat->dspp[i];
0231
0232 if (dspp->id < DSPP_0 || dspp->id >= DSPP_MAX) {
0233 DPU_ERROR("skip dspp %d with invalid id\n", dspp->id);
0234 continue;
0235 }
0236 hw = dpu_hw_dspp_init(dspp->id, mmio, cat);
0237 if (IS_ERR(hw)) {
0238 rc = PTR_ERR(hw);
0239 DPU_ERROR("failed dspp object creation: err %d\n", rc);
0240 goto fail;
0241 }
0242 rm->dspp_blks[dspp->id - DSPP_0] = &hw->base;
0243 }
0244
0245 for (i = 0; i < cat->dsc_count; i++) {
0246 struct dpu_hw_dsc *hw;
0247 const struct dpu_dsc_cfg *dsc = &cat->dsc[i];
0248
0249 hw = dpu_hw_dsc_init(dsc->id, mmio, cat);
0250 if (IS_ERR_OR_NULL(hw)) {
0251 rc = PTR_ERR(hw);
0252 DPU_ERROR("failed dsc object creation: err %d\n", rc);
0253 goto fail;
0254 }
0255 rm->dsc_blks[dsc->id - DSC_0] = &hw->base;
0256 }
0257
0258 return 0;
0259
0260 fail:
0261 dpu_rm_destroy(rm);
0262
0263 return rc ? rc : -EFAULT;
0264 }
0265
0266 static bool _dpu_rm_needs_split_display(const struct msm_display_topology *top)
0267 {
0268 return top->num_intf > 1;
0269 }
0270
0271
0272
0273
0274
0275
0276
0277
0278
0279 static bool _dpu_rm_check_lm_peer(struct dpu_rm *rm, int primary_idx,
0280 int peer_idx)
0281 {
0282 const struct dpu_lm_cfg *prim_lm_cfg;
0283 const struct dpu_lm_cfg *peer_cfg;
0284
0285 prim_lm_cfg = to_dpu_hw_mixer(rm->mixer_blks[primary_idx])->cap;
0286 peer_cfg = to_dpu_hw_mixer(rm->mixer_blks[peer_idx])->cap;
0287
0288 if (!test_bit(peer_cfg->id, &prim_lm_cfg->lm_pair_mask)) {
0289 DPU_DEBUG("lm %d not peer of lm %d\n", peer_cfg->id,
0290 peer_cfg->id);
0291 return false;
0292 }
0293 return true;
0294 }
0295
0296
0297
0298
0299
0300
0301
0302
0303
0304
0305
0306
0307
0308
0309
0310
0311
0312
0313
0314 static bool _dpu_rm_check_lm_and_get_connected_blks(struct dpu_rm *rm,
0315 struct dpu_global_state *global_state,
0316 uint32_t enc_id, int lm_idx, int *pp_idx, int *dspp_idx,
0317 struct dpu_rm_requirements *reqs)
0318 {
0319 const struct dpu_lm_cfg *lm_cfg;
0320 int idx;
0321
0322
0323 if (reserved_by_other(global_state->mixer_to_enc_id, lm_idx, enc_id)) {
0324 DPU_DEBUG("lm %d already reserved\n", lm_idx + LM_0);
0325 return false;
0326 }
0327
0328 lm_cfg = to_dpu_hw_mixer(rm->mixer_blks[lm_idx])->cap;
0329 idx = lm_cfg->pingpong - PINGPONG_0;
0330 if (idx < 0 || idx >= ARRAY_SIZE(rm->pingpong_blks)) {
0331 DPU_ERROR("failed to get pp on lm %d\n", lm_cfg->pingpong);
0332 return false;
0333 }
0334
0335 if (reserved_by_other(global_state->pingpong_to_enc_id, idx, enc_id)) {
0336 DPU_DEBUG("lm %d pp %d already reserved\n", lm_cfg->id,
0337 lm_cfg->pingpong);
0338 return false;
0339 }
0340 *pp_idx = idx;
0341
0342 if (!reqs->topology.num_dspp)
0343 return true;
0344
0345 idx = lm_cfg->dspp - DSPP_0;
0346 if (idx < 0 || idx >= ARRAY_SIZE(rm->dspp_blks)) {
0347 DPU_ERROR("failed to get dspp on lm %d\n", lm_cfg->dspp);
0348 return false;
0349 }
0350
0351 if (reserved_by_other(global_state->dspp_to_enc_id, idx, enc_id)) {
0352 DPU_DEBUG("lm %d dspp %d already reserved\n", lm_cfg->id,
0353 lm_cfg->dspp);
0354 return false;
0355 }
0356 *dspp_idx = idx;
0357
0358 return true;
0359 }
0360
0361 static int _dpu_rm_reserve_lms(struct dpu_rm *rm,
0362 struct dpu_global_state *global_state,
0363 uint32_t enc_id,
0364 struct dpu_rm_requirements *reqs)
0365
0366 {
0367 int lm_idx[MAX_BLOCKS];
0368 int pp_idx[MAX_BLOCKS];
0369 int dspp_idx[MAX_BLOCKS] = {0};
0370 int i, j, lm_count = 0;
0371
0372 if (!reqs->topology.num_lm) {
0373 DPU_ERROR("invalid number of lm: %d\n", reqs->topology.num_lm);
0374 return -EINVAL;
0375 }
0376
0377
0378 for (i = 0; i < ARRAY_SIZE(rm->mixer_blks) &&
0379 lm_count < reqs->topology.num_lm; i++) {
0380 if (!rm->mixer_blks[i])
0381 continue;
0382
0383 lm_count = 0;
0384 lm_idx[lm_count] = i;
0385
0386 if (!_dpu_rm_check_lm_and_get_connected_blks(rm, global_state,
0387 enc_id, i, &pp_idx[lm_count],
0388 &dspp_idx[lm_count], reqs)) {
0389 continue;
0390 }
0391
0392 ++lm_count;
0393
0394
0395 for (j = i + 1; j < ARRAY_SIZE(rm->mixer_blks) &&
0396 lm_count < reqs->topology.num_lm; j++) {
0397 if (!rm->mixer_blks[j])
0398 continue;
0399
0400 if (!_dpu_rm_check_lm_peer(rm, i, j)) {
0401 DPU_DEBUG("lm %d not peer of lm %d\n", LM_0 + j,
0402 LM_0 + i);
0403 continue;
0404 }
0405
0406 if (!_dpu_rm_check_lm_and_get_connected_blks(rm,
0407 global_state, enc_id, j,
0408 &pp_idx[lm_count], &dspp_idx[lm_count],
0409 reqs)) {
0410 continue;
0411 }
0412
0413 lm_idx[lm_count] = j;
0414 ++lm_count;
0415 }
0416 }
0417
0418 if (lm_count != reqs->topology.num_lm) {
0419 DPU_DEBUG("unable to find appropriate mixers\n");
0420 return -ENAVAIL;
0421 }
0422
0423 for (i = 0; i < lm_count; i++) {
0424 global_state->mixer_to_enc_id[lm_idx[i]] = enc_id;
0425 global_state->pingpong_to_enc_id[pp_idx[i]] = enc_id;
0426 global_state->dspp_to_enc_id[dspp_idx[i]] =
0427 reqs->topology.num_dspp ? enc_id : 0;
0428
0429 trace_dpu_rm_reserve_lms(lm_idx[i] + LM_0, enc_id,
0430 pp_idx[i] + PINGPONG_0);
0431 }
0432
0433 return 0;
0434 }
0435
0436 static int _dpu_rm_reserve_ctls(
0437 struct dpu_rm *rm,
0438 struct dpu_global_state *global_state,
0439 uint32_t enc_id,
0440 const struct msm_display_topology *top)
0441 {
0442 int ctl_idx[MAX_BLOCKS];
0443 int i = 0, j, num_ctls;
0444 bool needs_split_display;
0445
0446
0447 num_ctls = top->num_intf;
0448
0449 needs_split_display = _dpu_rm_needs_split_display(top);
0450
0451 for (j = 0; j < ARRAY_SIZE(rm->ctl_blks); j++) {
0452 const struct dpu_hw_ctl *ctl;
0453 unsigned long features;
0454 bool has_split_display;
0455
0456 if (!rm->ctl_blks[j])
0457 continue;
0458 if (reserved_by_other(global_state->ctl_to_enc_id, j, enc_id))
0459 continue;
0460
0461 ctl = to_dpu_hw_ctl(rm->ctl_blks[j]);
0462 features = ctl->caps->features;
0463 has_split_display = BIT(DPU_CTL_SPLIT_DISPLAY) & features;
0464
0465 DPU_DEBUG("ctl %d caps 0x%lX\n", j + CTL_0, features);
0466
0467 if (needs_split_display != has_split_display)
0468 continue;
0469
0470 ctl_idx[i] = j;
0471 DPU_DEBUG("ctl %d match\n", j + CTL_0);
0472
0473 if (++i == num_ctls)
0474 break;
0475
0476 }
0477
0478 if (i != num_ctls)
0479 return -ENAVAIL;
0480
0481 for (i = 0; i < ARRAY_SIZE(ctl_idx) && i < num_ctls; i++) {
0482 global_state->ctl_to_enc_id[ctl_idx[i]] = enc_id;
0483 trace_dpu_rm_reserve_ctls(i + CTL_0, enc_id);
0484 }
0485
0486 return 0;
0487 }
0488
0489 static int _dpu_rm_reserve_dsc(struct dpu_rm *rm,
0490 struct dpu_global_state *global_state,
0491 struct drm_encoder *enc,
0492 const struct msm_display_topology *top)
0493 {
0494 int num_dsc = top->num_dsc;
0495 int i;
0496
0497
0498 for (i = 0; i < num_dsc; i++) {
0499 if (global_state->dsc_to_enc_id[i]) {
0500 DPU_ERROR("DSC %d is already allocated\n", i);
0501 return -EIO;
0502 }
0503 }
0504
0505 for (i = 0; i < num_dsc; i++)
0506 global_state->dsc_to_enc_id[i] = enc->base.id;
0507
0508 return 0;
0509 }
0510
0511 static int _dpu_rm_make_reservation(
0512 struct dpu_rm *rm,
0513 struct dpu_global_state *global_state,
0514 struct drm_encoder *enc,
0515 struct dpu_rm_requirements *reqs)
0516 {
0517 int ret;
0518
0519 ret = _dpu_rm_reserve_lms(rm, global_state, enc->base.id, reqs);
0520 if (ret) {
0521 DPU_ERROR("unable to find appropriate mixers\n");
0522 return ret;
0523 }
0524
0525 ret = _dpu_rm_reserve_ctls(rm, global_state, enc->base.id,
0526 &reqs->topology);
0527 if (ret) {
0528 DPU_ERROR("unable to find appropriate CTL\n");
0529 return ret;
0530 }
0531
0532 ret = _dpu_rm_reserve_dsc(rm, global_state, enc, &reqs->topology);
0533 if (ret)
0534 return ret;
0535
0536 return ret;
0537 }
0538
0539 static int _dpu_rm_populate_requirements(
0540 struct drm_encoder *enc,
0541 struct dpu_rm_requirements *reqs,
0542 struct msm_display_topology req_topology)
0543 {
0544 reqs->topology = req_topology;
0545
0546 DRM_DEBUG_KMS("num_lm: %d num_enc: %d num_intf: %d\n",
0547 reqs->topology.num_lm, reqs->topology.num_enc,
0548 reqs->topology.num_intf);
0549
0550 return 0;
0551 }
0552
0553 static void _dpu_rm_clear_mapping(uint32_t *res_mapping, int cnt,
0554 uint32_t enc_id)
0555 {
0556 int i;
0557
0558 for (i = 0; i < cnt; i++) {
0559 if (res_mapping[i] == enc_id)
0560 res_mapping[i] = 0;
0561 }
0562 }
0563
0564 void dpu_rm_release(struct dpu_global_state *global_state,
0565 struct drm_encoder *enc)
0566 {
0567 _dpu_rm_clear_mapping(global_state->pingpong_to_enc_id,
0568 ARRAY_SIZE(global_state->pingpong_to_enc_id), enc->base.id);
0569 _dpu_rm_clear_mapping(global_state->mixer_to_enc_id,
0570 ARRAY_SIZE(global_state->mixer_to_enc_id), enc->base.id);
0571 _dpu_rm_clear_mapping(global_state->ctl_to_enc_id,
0572 ARRAY_SIZE(global_state->ctl_to_enc_id), enc->base.id);
0573 _dpu_rm_clear_mapping(global_state->dsc_to_enc_id,
0574 ARRAY_SIZE(global_state->dsc_to_enc_id), enc->base.id);
0575 }
0576
0577 int dpu_rm_reserve(
0578 struct dpu_rm *rm,
0579 struct dpu_global_state *global_state,
0580 struct drm_encoder *enc,
0581 struct drm_crtc_state *crtc_state,
0582 struct msm_display_topology topology)
0583 {
0584 struct dpu_rm_requirements reqs;
0585 int ret;
0586
0587
0588 if (!drm_atomic_crtc_needs_modeset(crtc_state))
0589 return 0;
0590
0591 if (IS_ERR(global_state)) {
0592 DPU_ERROR("failed to global state\n");
0593 return PTR_ERR(global_state);
0594 }
0595
0596 DRM_DEBUG_KMS("reserving hw for enc %d crtc %d\n",
0597 enc->base.id, crtc_state->crtc->base.id);
0598
0599 ret = _dpu_rm_populate_requirements(enc, &reqs, topology);
0600 if (ret) {
0601 DPU_ERROR("failed to populate hw requirements\n");
0602 return ret;
0603 }
0604
0605 ret = _dpu_rm_make_reservation(rm, global_state, enc, &reqs);
0606 if (ret)
0607 DPU_ERROR("failed to reserve hw resources: %d\n", ret);
0608
0609
0610
0611 return ret;
0612 }
0613
0614 int dpu_rm_get_assigned_resources(struct dpu_rm *rm,
0615 struct dpu_global_state *global_state, uint32_t enc_id,
0616 enum dpu_hw_blk_type type, struct dpu_hw_blk **blks, int blks_size)
0617 {
0618 struct dpu_hw_blk **hw_blks;
0619 uint32_t *hw_to_enc_id;
0620 int i, num_blks, max_blks;
0621
0622 switch (type) {
0623 case DPU_HW_BLK_PINGPONG:
0624 hw_blks = rm->pingpong_blks;
0625 hw_to_enc_id = global_state->pingpong_to_enc_id;
0626 max_blks = ARRAY_SIZE(rm->pingpong_blks);
0627 break;
0628 case DPU_HW_BLK_LM:
0629 hw_blks = rm->mixer_blks;
0630 hw_to_enc_id = global_state->mixer_to_enc_id;
0631 max_blks = ARRAY_SIZE(rm->mixer_blks);
0632 break;
0633 case DPU_HW_BLK_CTL:
0634 hw_blks = rm->ctl_blks;
0635 hw_to_enc_id = global_state->ctl_to_enc_id;
0636 max_blks = ARRAY_SIZE(rm->ctl_blks);
0637 break;
0638 case DPU_HW_BLK_DSPP:
0639 hw_blks = rm->dspp_blks;
0640 hw_to_enc_id = global_state->dspp_to_enc_id;
0641 max_blks = ARRAY_SIZE(rm->dspp_blks);
0642 break;
0643 case DPU_HW_BLK_DSC:
0644 hw_blks = rm->dsc_blks;
0645 hw_to_enc_id = global_state->dsc_to_enc_id;
0646 max_blks = ARRAY_SIZE(rm->dsc_blks);
0647 break;
0648 default:
0649 DPU_ERROR("blk type %d not managed by rm\n", type);
0650 return 0;
0651 }
0652
0653 num_blks = 0;
0654 for (i = 0; i < max_blks; i++) {
0655 if (hw_to_enc_id[i] != enc_id)
0656 continue;
0657
0658 if (num_blks == blks_size) {
0659 DPU_ERROR("More than %d resources assigned to enc %d\n",
0660 blks_size, enc_id);
0661 break;
0662 }
0663 blks[num_blks++] = hw_blks[i];
0664 }
0665
0666 return num_blks;
0667 }