0001
0002
0003
0004 #include <linux/kernel.h>
0005 #include <linux/nospec.h>
0006 #include "cc_driver.h"
0007 #include "cc_buffer_mgr.h"
0008 #include "cc_request_mgr.h"
0009 #include "cc_pm.h"
0010
0011 #define CC_MAX_POLL_ITER 10
0012
0013 #define CC_MAX_DESC_SEQ_LEN 23
0014
0015 struct cc_req_mgr_handle {
0016
0017 unsigned int hw_queue_size;
0018 unsigned int min_free_hw_slots;
0019 unsigned int max_used_sw_slots;
0020 struct cc_crypto_req req_queue[MAX_REQUEST_QUEUE_SIZE];
0021 u32 req_queue_head;
0022 u32 req_queue_tail;
0023 u32 axi_completed;
0024 u32 q_free_slots;
0025
0026
0027
0028 spinlock_t hw_lock;
0029 struct cc_hw_desc compl_desc;
0030 u8 *dummy_comp_buff;
0031 dma_addr_t dummy_comp_buff_dma;
0032
0033
0034 struct list_head backlog;
0035 unsigned int bl_len;
0036 spinlock_t bl_lock;
0037
0038 #ifdef COMP_IN_WQ
0039 struct workqueue_struct *workq;
0040 struct delayed_work compwork;
0041 #else
0042 struct tasklet_struct comptask;
0043 #endif
0044 };
0045
0046 struct cc_bl_item {
0047 struct cc_crypto_req creq;
0048 struct cc_hw_desc desc[CC_MAX_DESC_SEQ_LEN];
0049 unsigned int len;
0050 struct list_head list;
0051 bool notif;
0052 };
0053
0054 static const u32 cc_cpp_int_masks[CC_CPP_NUM_ALGS][CC_CPP_NUM_SLOTS] = {
0055 { BIT(CC_HOST_IRR_REE_OP_ABORTED_AES_0_INT_BIT_SHIFT),
0056 BIT(CC_HOST_IRR_REE_OP_ABORTED_AES_1_INT_BIT_SHIFT),
0057 BIT(CC_HOST_IRR_REE_OP_ABORTED_AES_2_INT_BIT_SHIFT),
0058 BIT(CC_HOST_IRR_REE_OP_ABORTED_AES_3_INT_BIT_SHIFT),
0059 BIT(CC_HOST_IRR_REE_OP_ABORTED_AES_4_INT_BIT_SHIFT),
0060 BIT(CC_HOST_IRR_REE_OP_ABORTED_AES_5_INT_BIT_SHIFT),
0061 BIT(CC_HOST_IRR_REE_OP_ABORTED_AES_6_INT_BIT_SHIFT),
0062 BIT(CC_HOST_IRR_REE_OP_ABORTED_AES_7_INT_BIT_SHIFT) },
0063 { BIT(CC_HOST_IRR_REE_OP_ABORTED_SM_0_INT_BIT_SHIFT),
0064 BIT(CC_HOST_IRR_REE_OP_ABORTED_SM_1_INT_BIT_SHIFT),
0065 BIT(CC_HOST_IRR_REE_OP_ABORTED_SM_2_INT_BIT_SHIFT),
0066 BIT(CC_HOST_IRR_REE_OP_ABORTED_SM_3_INT_BIT_SHIFT),
0067 BIT(CC_HOST_IRR_REE_OP_ABORTED_SM_4_INT_BIT_SHIFT),
0068 BIT(CC_HOST_IRR_REE_OP_ABORTED_SM_5_INT_BIT_SHIFT),
0069 BIT(CC_HOST_IRR_REE_OP_ABORTED_SM_6_INT_BIT_SHIFT),
0070 BIT(CC_HOST_IRR_REE_OP_ABORTED_SM_7_INT_BIT_SHIFT) }
0071 };
0072
0073 static void comp_handler(unsigned long devarg);
0074 #ifdef COMP_IN_WQ
0075 static void comp_work_handler(struct work_struct *work);
0076 #endif
0077
0078 static inline u32 cc_cpp_int_mask(enum cc_cpp_alg alg, int slot)
0079 {
0080 alg = array_index_nospec(alg, CC_CPP_NUM_ALGS);
0081 slot = array_index_nospec(slot, CC_CPP_NUM_SLOTS);
0082
0083 return cc_cpp_int_masks[alg][slot];
0084 }
0085
0086 void cc_req_mgr_fini(struct cc_drvdata *drvdata)
0087 {
0088 struct cc_req_mgr_handle *req_mgr_h = drvdata->request_mgr_handle;
0089 struct device *dev = drvdata_to_dev(drvdata);
0090
0091 if (!req_mgr_h)
0092 return;
0093
0094 if (req_mgr_h->dummy_comp_buff_dma) {
0095 dma_free_coherent(dev, sizeof(u32), req_mgr_h->dummy_comp_buff,
0096 req_mgr_h->dummy_comp_buff_dma);
0097 }
0098
0099 dev_dbg(dev, "max_used_hw_slots=%d\n", (req_mgr_h->hw_queue_size -
0100 req_mgr_h->min_free_hw_slots));
0101 dev_dbg(dev, "max_used_sw_slots=%d\n", req_mgr_h->max_used_sw_slots);
0102
0103 #ifdef COMP_IN_WQ
0104 destroy_workqueue(req_mgr_h->workq);
0105 #else
0106
0107 tasklet_kill(&req_mgr_h->comptask);
0108 #endif
0109 kfree_sensitive(req_mgr_h);
0110 drvdata->request_mgr_handle = NULL;
0111 }
0112
0113 int cc_req_mgr_init(struct cc_drvdata *drvdata)
0114 {
0115 struct cc_req_mgr_handle *req_mgr_h;
0116 struct device *dev = drvdata_to_dev(drvdata);
0117 int rc = 0;
0118
0119 req_mgr_h = kzalloc(sizeof(*req_mgr_h), GFP_KERNEL);
0120 if (!req_mgr_h) {
0121 rc = -ENOMEM;
0122 goto req_mgr_init_err;
0123 }
0124
0125 drvdata->request_mgr_handle = req_mgr_h;
0126
0127 spin_lock_init(&req_mgr_h->hw_lock);
0128 spin_lock_init(&req_mgr_h->bl_lock);
0129 INIT_LIST_HEAD(&req_mgr_h->backlog);
0130
0131 #ifdef COMP_IN_WQ
0132 dev_dbg(dev, "Initializing completion workqueue\n");
0133 req_mgr_h->workq = create_singlethread_workqueue("ccree");
0134 if (!req_mgr_h->workq) {
0135 dev_err(dev, "Failed creating work queue\n");
0136 rc = -ENOMEM;
0137 goto req_mgr_init_err;
0138 }
0139 INIT_DELAYED_WORK(&req_mgr_h->compwork, comp_work_handler);
0140 #else
0141 dev_dbg(dev, "Initializing completion tasklet\n");
0142 tasklet_init(&req_mgr_h->comptask, comp_handler,
0143 (unsigned long)drvdata);
0144 #endif
0145 req_mgr_h->hw_queue_size = cc_ioread(drvdata,
0146 CC_REG(DSCRPTR_QUEUE_SRAM_SIZE));
0147 dev_dbg(dev, "hw_queue_size=0x%08X\n", req_mgr_h->hw_queue_size);
0148 if (req_mgr_h->hw_queue_size < MIN_HW_QUEUE_SIZE) {
0149 dev_err(dev, "Invalid HW queue size = %u (Min. required is %u)\n",
0150 req_mgr_h->hw_queue_size, MIN_HW_QUEUE_SIZE);
0151 rc = -ENOMEM;
0152 goto req_mgr_init_err;
0153 }
0154 req_mgr_h->min_free_hw_slots = req_mgr_h->hw_queue_size;
0155 req_mgr_h->max_used_sw_slots = 0;
0156
0157
0158 req_mgr_h->dummy_comp_buff =
0159 dma_alloc_coherent(dev, sizeof(u32),
0160 &req_mgr_h->dummy_comp_buff_dma,
0161 GFP_KERNEL);
0162 if (!req_mgr_h->dummy_comp_buff) {
0163 dev_err(dev, "Not enough memory to allocate DMA (%zu) dropped buffer\n",
0164 sizeof(u32));
0165 rc = -ENOMEM;
0166 goto req_mgr_init_err;
0167 }
0168
0169
0170 hw_desc_init(&req_mgr_h->compl_desc);
0171 set_din_const(&req_mgr_h->compl_desc, 0, sizeof(u32));
0172 set_dout_dlli(&req_mgr_h->compl_desc, req_mgr_h->dummy_comp_buff_dma,
0173 sizeof(u32), NS_BIT, 1);
0174 set_flow_mode(&req_mgr_h->compl_desc, BYPASS);
0175 set_queue_last_ind(drvdata, &req_mgr_h->compl_desc);
0176
0177 return 0;
0178
0179 req_mgr_init_err:
0180 cc_req_mgr_fini(drvdata);
0181 return rc;
0182 }
0183
0184 static void enqueue_seq(struct cc_drvdata *drvdata, struct cc_hw_desc seq[],
0185 unsigned int seq_len)
0186 {
0187 int i, w;
0188 void __iomem *reg = drvdata->cc_base + CC_REG(DSCRPTR_QUEUE_WORD0);
0189 struct device *dev = drvdata_to_dev(drvdata);
0190
0191
0192
0193
0194
0195
0196 for (i = 0; i < seq_len; i++) {
0197 for (w = 0; w <= 5; w++)
0198 writel_relaxed(seq[i].word[w], reg);
0199
0200 if (cc_dump_desc)
0201 dev_dbg(dev, "desc[%02d]: 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
0202 i, seq[i].word[0], seq[i].word[1],
0203 seq[i].word[2], seq[i].word[3],
0204 seq[i].word[4], seq[i].word[5]);
0205 }
0206 }
0207
0208
0209
0210
0211
0212
0213
0214
0215
0216 static void request_mgr_complete(struct device *dev, void *dx_compl_h,
0217 int dummy)
0218 {
0219 struct completion *this_compl = dx_compl_h;
0220
0221 complete(this_compl);
0222 }
0223
0224 static int cc_queues_status(struct cc_drvdata *drvdata,
0225 struct cc_req_mgr_handle *req_mgr_h,
0226 unsigned int total_seq_len)
0227 {
0228 unsigned long poll_queue;
0229 struct device *dev = drvdata_to_dev(drvdata);
0230
0231
0232
0233
0234
0235 if (((req_mgr_h->req_queue_head + 1) & (MAX_REQUEST_QUEUE_SIZE - 1)) ==
0236 req_mgr_h->req_queue_tail) {
0237 dev_err(dev, "SW FIFO is full. req_queue_head=%d sw_fifo_len=%d\n",
0238 req_mgr_h->req_queue_head, MAX_REQUEST_QUEUE_SIZE);
0239 return -ENOSPC;
0240 }
0241
0242 if (req_mgr_h->q_free_slots >= total_seq_len)
0243 return 0;
0244
0245
0246 for (poll_queue = 0; poll_queue < CC_MAX_POLL_ITER ; poll_queue++) {
0247 req_mgr_h->q_free_slots =
0248 cc_ioread(drvdata, CC_REG(DSCRPTR_QUEUE_CONTENT));
0249 if (req_mgr_h->q_free_slots < req_mgr_h->min_free_hw_slots)
0250 req_mgr_h->min_free_hw_slots = req_mgr_h->q_free_slots;
0251
0252 if (req_mgr_h->q_free_slots >= total_seq_len) {
0253
0254 return 0;
0255 }
0256
0257 dev_dbg(dev, "HW FIFO is full. q_free_slots=%d total_seq_len=%d\n",
0258 req_mgr_h->q_free_slots, total_seq_len);
0259 }
0260
0261 dev_dbg(dev, "HW FIFO full, timeout. req_queue_head=%d sw_fifo_len=%d q_free_slots=%d total_seq_len=%d\n",
0262 req_mgr_h->req_queue_head, MAX_REQUEST_QUEUE_SIZE,
0263 req_mgr_h->q_free_slots, total_seq_len);
0264 return -ENOSPC;
0265 }
0266
0267
0268
0269
0270
0271
0272
0273
0274
0275
0276
0277
0278 static void cc_do_send_request(struct cc_drvdata *drvdata,
0279 struct cc_crypto_req *cc_req,
0280 struct cc_hw_desc *desc, unsigned int len,
0281 bool add_comp)
0282 {
0283 struct cc_req_mgr_handle *req_mgr_h = drvdata->request_mgr_handle;
0284 unsigned int used_sw_slots;
0285 unsigned int total_seq_len = len;
0286 struct device *dev = drvdata_to_dev(drvdata);
0287
0288 used_sw_slots = ((req_mgr_h->req_queue_head -
0289 req_mgr_h->req_queue_tail) &
0290 (MAX_REQUEST_QUEUE_SIZE - 1));
0291 if (used_sw_slots > req_mgr_h->max_used_sw_slots)
0292 req_mgr_h->max_used_sw_slots = used_sw_slots;
0293
0294
0295 req_mgr_h->req_queue[req_mgr_h->req_queue_head] = *cc_req;
0296 req_mgr_h->req_queue_head = (req_mgr_h->req_queue_head + 1) &
0297 (MAX_REQUEST_QUEUE_SIZE - 1);
0298
0299 dev_dbg(dev, "Enqueue request head=%u\n", req_mgr_h->req_queue_head);
0300
0301
0302
0303
0304
0305
0306 wmb();
0307
0308
0309
0310 enqueue_seq(drvdata, desc, len);
0311
0312 if (add_comp) {
0313 enqueue_seq(drvdata, &req_mgr_h->compl_desc, 1);
0314 total_seq_len++;
0315 }
0316
0317 if (req_mgr_h->q_free_slots < total_seq_len) {
0318
0319
0320
0321
0322 dev_err(dev, "HW free slot count mismatch.");
0323 req_mgr_h->q_free_slots = 0;
0324 } else {
0325
0326 req_mgr_h->q_free_slots -= total_seq_len;
0327 }
0328 }
0329
0330 static void cc_enqueue_backlog(struct cc_drvdata *drvdata,
0331 struct cc_bl_item *bli)
0332 {
0333 struct cc_req_mgr_handle *mgr = drvdata->request_mgr_handle;
0334 struct device *dev = drvdata_to_dev(drvdata);
0335
0336 spin_lock_bh(&mgr->bl_lock);
0337 list_add_tail(&bli->list, &mgr->backlog);
0338 ++mgr->bl_len;
0339 dev_dbg(dev, "+++bl len: %d\n", mgr->bl_len);
0340 spin_unlock_bh(&mgr->bl_lock);
0341 tasklet_schedule(&mgr->comptask);
0342 }
0343
0344 static void cc_proc_backlog(struct cc_drvdata *drvdata)
0345 {
0346 struct cc_req_mgr_handle *mgr = drvdata->request_mgr_handle;
0347 struct cc_bl_item *bli;
0348 struct cc_crypto_req *creq;
0349 void *req;
0350 struct device *dev = drvdata_to_dev(drvdata);
0351 int rc;
0352
0353 spin_lock(&mgr->bl_lock);
0354
0355 while (mgr->bl_len) {
0356 bli = list_first_entry(&mgr->backlog, struct cc_bl_item, list);
0357 dev_dbg(dev, "---bl len: %d\n", mgr->bl_len);
0358
0359 spin_unlock(&mgr->bl_lock);
0360
0361
0362 creq = &bli->creq;
0363 req = creq->user_arg;
0364
0365
0366
0367
0368
0369 if (!bli->notif) {
0370 creq->user_cb(dev, req, -EINPROGRESS);
0371 bli->notif = true;
0372 }
0373
0374 spin_lock(&mgr->hw_lock);
0375
0376 rc = cc_queues_status(drvdata, mgr, bli->len);
0377 if (rc) {
0378
0379
0380
0381
0382
0383 spin_unlock(&mgr->hw_lock);
0384 return;
0385 }
0386
0387 cc_do_send_request(drvdata, &bli->creq, bli->desc, bli->len,
0388 false);
0389 spin_unlock(&mgr->hw_lock);
0390
0391
0392 spin_lock(&mgr->bl_lock);
0393 list_del(&bli->list);
0394 --mgr->bl_len;
0395 kfree(bli);
0396 }
0397
0398 spin_unlock(&mgr->bl_lock);
0399 }
0400
0401 int cc_send_request(struct cc_drvdata *drvdata, struct cc_crypto_req *cc_req,
0402 struct cc_hw_desc *desc, unsigned int len,
0403 struct crypto_async_request *req)
0404 {
0405 int rc;
0406 struct cc_req_mgr_handle *mgr = drvdata->request_mgr_handle;
0407 struct device *dev = drvdata_to_dev(drvdata);
0408 bool backlog_ok = req->flags & CRYPTO_TFM_REQ_MAY_BACKLOG;
0409 gfp_t flags = cc_gfp_flags(req);
0410 struct cc_bl_item *bli;
0411
0412 rc = cc_pm_get(dev);
0413 if (rc) {
0414 dev_err(dev, "cc_pm_get returned %x\n", rc);
0415 return rc;
0416 }
0417
0418 spin_lock_bh(&mgr->hw_lock);
0419 rc = cc_queues_status(drvdata, mgr, len);
0420
0421 #ifdef CC_DEBUG_FORCE_BACKLOG
0422 if (backlog_ok)
0423 rc = -ENOSPC;
0424 #endif
0425
0426 if (rc == -ENOSPC && backlog_ok) {
0427 spin_unlock_bh(&mgr->hw_lock);
0428
0429 bli = kmalloc(sizeof(*bli), flags);
0430 if (!bli) {
0431 cc_pm_put_suspend(dev);
0432 return -ENOMEM;
0433 }
0434
0435 memcpy(&bli->creq, cc_req, sizeof(*cc_req));
0436 memcpy(&bli->desc, desc, len * sizeof(*desc));
0437 bli->len = len;
0438 bli->notif = false;
0439 cc_enqueue_backlog(drvdata, bli);
0440 return -EBUSY;
0441 }
0442
0443 if (!rc) {
0444 cc_do_send_request(drvdata, cc_req, desc, len, false);
0445 rc = -EINPROGRESS;
0446 }
0447
0448 spin_unlock_bh(&mgr->hw_lock);
0449 return rc;
0450 }
0451
0452 int cc_send_sync_request(struct cc_drvdata *drvdata,
0453 struct cc_crypto_req *cc_req, struct cc_hw_desc *desc,
0454 unsigned int len)
0455 {
0456 int rc;
0457 struct device *dev = drvdata_to_dev(drvdata);
0458 struct cc_req_mgr_handle *mgr = drvdata->request_mgr_handle;
0459
0460 init_completion(&cc_req->seq_compl);
0461 cc_req->user_cb = request_mgr_complete;
0462 cc_req->user_arg = &cc_req->seq_compl;
0463
0464 rc = cc_pm_get(dev);
0465 if (rc) {
0466 dev_err(dev, "cc_pm_get returned %x\n", rc);
0467 return rc;
0468 }
0469
0470 while (true) {
0471 spin_lock_bh(&mgr->hw_lock);
0472 rc = cc_queues_status(drvdata, mgr, len + 1);
0473
0474 if (!rc)
0475 break;
0476
0477 spin_unlock_bh(&mgr->hw_lock);
0478 wait_for_completion_interruptible(&drvdata->hw_queue_avail);
0479 reinit_completion(&drvdata->hw_queue_avail);
0480 }
0481
0482 cc_do_send_request(drvdata, cc_req, desc, len, true);
0483 spin_unlock_bh(&mgr->hw_lock);
0484 wait_for_completion(&cc_req->seq_compl);
0485 return 0;
0486 }
0487
0488
0489
0490
0491
0492
0493
0494
0495
0496
0497
0498
0499
0500
0501 int send_request_init(struct cc_drvdata *drvdata, struct cc_hw_desc *desc,
0502 unsigned int len)
0503 {
0504 struct cc_req_mgr_handle *req_mgr_h = drvdata->request_mgr_handle;
0505 unsigned int total_seq_len = len;
0506 int rc = 0;
0507
0508
0509
0510 rc = cc_queues_status(drvdata, req_mgr_h, total_seq_len);
0511 if (rc)
0512 return rc;
0513
0514 set_queue_last_ind(drvdata, &desc[(len - 1)]);
0515
0516
0517
0518
0519
0520
0521 wmb();
0522 enqueue_seq(drvdata, desc, len);
0523
0524
0525 req_mgr_h->q_free_slots =
0526 cc_ioread(drvdata, CC_REG(DSCRPTR_QUEUE_CONTENT));
0527
0528 return 0;
0529 }
0530
0531 void complete_request(struct cc_drvdata *drvdata)
0532 {
0533 struct cc_req_mgr_handle *request_mgr_handle =
0534 drvdata->request_mgr_handle;
0535
0536 complete(&drvdata->hw_queue_avail);
0537 #ifdef COMP_IN_WQ
0538 queue_delayed_work(request_mgr_handle->workq,
0539 &request_mgr_handle->compwork, 0);
0540 #else
0541 tasklet_schedule(&request_mgr_handle->comptask);
0542 #endif
0543 }
0544
0545 #ifdef COMP_IN_WQ
0546 static void comp_work_handler(struct work_struct *work)
0547 {
0548 struct cc_drvdata *drvdata =
0549 container_of(work, struct cc_drvdata, compwork.work);
0550
0551 comp_handler((unsigned long)drvdata);
0552 }
0553 #endif
0554
0555 static void proc_completions(struct cc_drvdata *drvdata)
0556 {
0557 struct cc_crypto_req *cc_req;
0558 struct device *dev = drvdata_to_dev(drvdata);
0559 struct cc_req_mgr_handle *request_mgr_handle =
0560 drvdata->request_mgr_handle;
0561 unsigned int *tail = &request_mgr_handle->req_queue_tail;
0562 unsigned int *head = &request_mgr_handle->req_queue_head;
0563 int rc;
0564 u32 mask;
0565
0566 while (request_mgr_handle->axi_completed) {
0567 request_mgr_handle->axi_completed--;
0568
0569
0570 if (*head == *tail) {
0571
0572
0573
0574
0575 dev_err(dev, "Request queue is empty head == tail %u\n",
0576 *head);
0577 break;
0578 }
0579
0580 cc_req = &request_mgr_handle->req_queue[*tail];
0581
0582 if (cc_req->cpp.is_cpp) {
0583
0584 dev_dbg(dev, "CPP request completion slot: %d alg:%d\n",
0585 cc_req->cpp.slot, cc_req->cpp.alg);
0586 mask = cc_cpp_int_mask(cc_req->cpp.alg,
0587 cc_req->cpp.slot);
0588 rc = (drvdata->irq & mask ? -EPERM : 0);
0589 dev_dbg(dev, "Got mask: %x irq: %x rc: %d\n", mask,
0590 drvdata->irq, rc);
0591 } else {
0592 dev_dbg(dev, "None CPP request completion\n");
0593 rc = 0;
0594 }
0595
0596 if (cc_req->user_cb)
0597 cc_req->user_cb(dev, cc_req->user_arg, rc);
0598 *tail = (*tail + 1) & (MAX_REQUEST_QUEUE_SIZE - 1);
0599 dev_dbg(dev, "Dequeue request tail=%u\n", *tail);
0600 dev_dbg(dev, "Request completed. axi_completed=%d\n",
0601 request_mgr_handle->axi_completed);
0602 cc_pm_put_suspend(dev);
0603 }
0604 }
0605
0606 static inline u32 cc_axi_comp_count(struct cc_drvdata *drvdata)
0607 {
0608 return FIELD_GET(AXIM_MON_COMP_VALUE,
0609 cc_ioread(drvdata, drvdata->axim_mon_offset));
0610 }
0611
0612
0613 static void comp_handler(unsigned long devarg)
0614 {
0615 struct cc_drvdata *drvdata = (struct cc_drvdata *)devarg;
0616 struct cc_req_mgr_handle *request_mgr_handle =
0617 drvdata->request_mgr_handle;
0618 struct device *dev = drvdata_to_dev(drvdata);
0619 u32 irq;
0620
0621 dev_dbg(dev, "Completion handler called!\n");
0622 irq = (drvdata->irq & drvdata->comp_mask);
0623
0624
0625
0626
0627 cc_iowrite(drvdata, CC_REG(HOST_ICR), irq);
0628
0629
0630
0631 request_mgr_handle->axi_completed += cc_axi_comp_count(drvdata);
0632
0633 dev_dbg(dev, "AXI completion after updated: %d\n",
0634 request_mgr_handle->axi_completed);
0635
0636 while (request_mgr_handle->axi_completed) {
0637 do {
0638 drvdata->irq |= cc_ioread(drvdata, CC_REG(HOST_IRR));
0639 irq = (drvdata->irq & drvdata->comp_mask);
0640 proc_completions(drvdata);
0641
0642
0643
0644
0645 request_mgr_handle->axi_completed +=
0646 cc_axi_comp_count(drvdata);
0647 } while (request_mgr_handle->axi_completed > 0);
0648
0649 cc_iowrite(drvdata, CC_REG(HOST_ICR), irq);
0650
0651 request_mgr_handle->axi_completed += cc_axi_comp_count(drvdata);
0652 }
0653
0654
0655
0656
0657 cc_iowrite(drvdata, CC_REG(HOST_IMR),
0658 cc_ioread(drvdata, CC_REG(HOST_IMR)) & ~drvdata->comp_mask);
0659
0660 cc_proc_backlog(drvdata);
0661 dev_dbg(dev, "Comp. handler done.\n");
0662 }