Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-or-later
0002 /*
0003  *  linux/drivers/net/ethernet/ibm/ehea/ehea_qmr.c
0004  *
0005  *  eHEA ethernet device driver for IBM eServer System p
0006  *
0007  *  (C) Copyright IBM Corp. 2006
0008  *
0009  *  Authors:
0010  *       Christoph Raisch <raisch@de.ibm.com>
0011  *       Jan-Bernd Themann <themann@de.ibm.com>
0012  *       Thomas Klein <tklein@de.ibm.com>
0013  */
0014 
0015 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
0016 
0017 #include <linux/mm.h>
0018 #include <linux/slab.h>
0019 #include "ehea.h"
0020 #include "ehea_phyp.h"
0021 #include "ehea_qmr.h"
0022 
0023 static struct ehea_bmap *ehea_bmap;
0024 
0025 static void *hw_qpageit_get_inc(struct hw_queue *queue)
0026 {
0027     void *retvalue = hw_qeit_get(queue);
0028 
0029     queue->current_q_offset += queue->pagesize;
0030     if (queue->current_q_offset > queue->queue_length) {
0031         queue->current_q_offset -= queue->pagesize;
0032         retvalue = NULL;
0033     } else if (((u64) retvalue) & (EHEA_PAGESIZE-1)) {
0034         pr_err("not on pageboundary\n");
0035         retvalue = NULL;
0036     }
0037     return retvalue;
0038 }
0039 
0040 static int hw_queue_ctor(struct hw_queue *queue, const u32 nr_of_pages,
0041               const u32 pagesize, const u32 qe_size)
0042 {
0043     int pages_per_kpage = PAGE_SIZE / pagesize;
0044     int i, k;
0045 
0046     if ((pagesize > PAGE_SIZE) || (!pages_per_kpage)) {
0047         pr_err("pagesize conflict! kernel pagesize=%d, ehea pagesize=%d\n",
0048                (int)PAGE_SIZE, (int)pagesize);
0049         return -EINVAL;
0050     }
0051 
0052     queue->queue_length = nr_of_pages * pagesize;
0053     queue->queue_pages = kmalloc_array(nr_of_pages, sizeof(void *),
0054                        GFP_KERNEL);
0055     if (!queue->queue_pages)
0056         return -ENOMEM;
0057 
0058     /*
0059      * allocate pages for queue:
0060      * outer loop allocates whole kernel pages (page aligned) and
0061      * inner loop divides a kernel page into smaller hea queue pages
0062      */
0063     i = 0;
0064     while (i < nr_of_pages) {
0065         u8 *kpage = (u8 *)get_zeroed_page(GFP_KERNEL);
0066         if (!kpage)
0067             goto out_nomem;
0068         for (k = 0; k < pages_per_kpage && i < nr_of_pages; k++) {
0069             (queue->queue_pages)[i] = (struct ehea_page *)kpage;
0070             kpage += pagesize;
0071             i++;
0072         }
0073     }
0074 
0075     queue->current_q_offset = 0;
0076     queue->qe_size = qe_size;
0077     queue->pagesize = pagesize;
0078     queue->toggle_state = 1;
0079 
0080     return 0;
0081 out_nomem:
0082     for (i = 0; i < nr_of_pages; i += pages_per_kpage) {
0083         if (!(queue->queue_pages)[i])
0084             break;
0085         free_page((unsigned long)(queue->queue_pages)[i]);
0086     }
0087     return -ENOMEM;
0088 }
0089 
0090 static void hw_queue_dtor(struct hw_queue *queue)
0091 {
0092     int pages_per_kpage;
0093     int i, nr_pages;
0094 
0095     if (!queue || !queue->queue_pages)
0096         return;
0097 
0098     pages_per_kpage = PAGE_SIZE / queue->pagesize;
0099 
0100     nr_pages = queue->queue_length / queue->pagesize;
0101 
0102     for (i = 0; i < nr_pages; i += pages_per_kpage)
0103         free_page((unsigned long)(queue->queue_pages)[i]);
0104 
0105     kfree(queue->queue_pages);
0106 }
0107 
0108 struct ehea_cq *ehea_create_cq(struct ehea_adapter *adapter,
0109                    int nr_of_cqe, u64 eq_handle, u32 cq_token)
0110 {
0111     struct ehea_cq *cq;
0112     u64 hret, rpage;
0113     u32 counter;
0114     int ret;
0115     void *vpage;
0116 
0117     cq = kzalloc(sizeof(*cq), GFP_KERNEL);
0118     if (!cq)
0119         goto out_nomem;
0120 
0121     cq->attr.max_nr_of_cqes = nr_of_cqe;
0122     cq->attr.cq_token = cq_token;
0123     cq->attr.eq_handle = eq_handle;
0124 
0125     cq->adapter = adapter;
0126 
0127     hret = ehea_h_alloc_resource_cq(adapter->handle, &cq->attr,
0128                     &cq->fw_handle, &cq->epas);
0129     if (hret != H_SUCCESS) {
0130         pr_err("alloc_resource_cq failed\n");
0131         goto out_freemem;
0132     }
0133 
0134     ret = hw_queue_ctor(&cq->hw_queue, cq->attr.nr_pages,
0135                 EHEA_PAGESIZE, sizeof(struct ehea_cqe));
0136     if (ret)
0137         goto out_freeres;
0138 
0139     for (counter = 0; counter < cq->attr.nr_pages; counter++) {
0140         vpage = hw_qpageit_get_inc(&cq->hw_queue);
0141         if (!vpage) {
0142             pr_err("hw_qpageit_get_inc failed\n");
0143             goto out_kill_hwq;
0144         }
0145 
0146         rpage = __pa(vpage);
0147         hret = ehea_h_register_rpage(adapter->handle,
0148                          0, EHEA_CQ_REGISTER_ORIG,
0149                          cq->fw_handle, rpage, 1);
0150         if (hret < H_SUCCESS) {
0151             pr_err("register_rpage_cq failed ehea_cq=%p hret=%llx counter=%i act_pages=%i\n",
0152                    cq, hret, counter, cq->attr.nr_pages);
0153             goto out_kill_hwq;
0154         }
0155 
0156         if (counter == (cq->attr.nr_pages - 1)) {
0157             vpage = hw_qpageit_get_inc(&cq->hw_queue);
0158 
0159             if ((hret != H_SUCCESS) || (vpage)) {
0160                 pr_err("registration of pages not complete hret=%llx\n",
0161                        hret);
0162                 goto out_kill_hwq;
0163             }
0164         } else {
0165             if (hret != H_PAGE_REGISTERED) {
0166                 pr_err("CQ: registration of page failed hret=%llx\n",
0167                        hret);
0168                 goto out_kill_hwq;
0169             }
0170         }
0171     }
0172 
0173     hw_qeit_reset(&cq->hw_queue);
0174     ehea_reset_cq_ep(cq);
0175     ehea_reset_cq_n1(cq);
0176 
0177     return cq;
0178 
0179 out_kill_hwq:
0180     hw_queue_dtor(&cq->hw_queue);
0181 
0182 out_freeres:
0183     ehea_h_free_resource(adapter->handle, cq->fw_handle, FORCE_FREE);
0184 
0185 out_freemem:
0186     kfree(cq);
0187 
0188 out_nomem:
0189     return NULL;
0190 }
0191 
0192 static u64 ehea_destroy_cq_res(struct ehea_cq *cq, u64 force)
0193 {
0194     u64 hret;
0195     u64 adapter_handle = cq->adapter->handle;
0196 
0197     /* deregister all previous registered pages */
0198     hret = ehea_h_free_resource(adapter_handle, cq->fw_handle, force);
0199     if (hret != H_SUCCESS)
0200         return hret;
0201 
0202     hw_queue_dtor(&cq->hw_queue);
0203     kfree(cq);
0204 
0205     return hret;
0206 }
0207 
0208 int ehea_destroy_cq(struct ehea_cq *cq)
0209 {
0210     u64 hret, aer, aerr;
0211     if (!cq)
0212         return 0;
0213 
0214     hcp_epas_dtor(&cq->epas);
0215     hret = ehea_destroy_cq_res(cq, NORMAL_FREE);
0216     if (hret == H_R_STATE) {
0217         ehea_error_data(cq->adapter, cq->fw_handle, &aer, &aerr);
0218         hret = ehea_destroy_cq_res(cq, FORCE_FREE);
0219     }
0220 
0221     if (hret != H_SUCCESS) {
0222         pr_err("destroy CQ failed\n");
0223         return -EIO;
0224     }
0225 
0226     return 0;
0227 }
0228 
0229 struct ehea_eq *ehea_create_eq(struct ehea_adapter *adapter,
0230                    const enum ehea_eq_type type,
0231                    const u32 max_nr_of_eqes, const u8 eqe_gen)
0232 {
0233     int ret, i;
0234     u64 hret, rpage;
0235     void *vpage;
0236     struct ehea_eq *eq;
0237 
0238     eq = kzalloc(sizeof(*eq), GFP_KERNEL);
0239     if (!eq)
0240         return NULL;
0241 
0242     eq->adapter = adapter;
0243     eq->attr.type = type;
0244     eq->attr.max_nr_of_eqes = max_nr_of_eqes;
0245     eq->attr.eqe_gen = eqe_gen;
0246     spin_lock_init(&eq->spinlock);
0247 
0248     hret = ehea_h_alloc_resource_eq(adapter->handle,
0249                     &eq->attr, &eq->fw_handle);
0250     if (hret != H_SUCCESS) {
0251         pr_err("alloc_resource_eq failed\n");
0252         goto out_freemem;
0253     }
0254 
0255     ret = hw_queue_ctor(&eq->hw_queue, eq->attr.nr_pages,
0256                 EHEA_PAGESIZE, sizeof(struct ehea_eqe));
0257     if (ret) {
0258         pr_err("can't allocate eq pages\n");
0259         goto out_freeres;
0260     }
0261 
0262     for (i = 0; i < eq->attr.nr_pages; i++) {
0263         vpage = hw_qpageit_get_inc(&eq->hw_queue);
0264         if (!vpage) {
0265             pr_err("hw_qpageit_get_inc failed\n");
0266             hret = H_RESOURCE;
0267             goto out_kill_hwq;
0268         }
0269 
0270         rpage = __pa(vpage);
0271 
0272         hret = ehea_h_register_rpage(adapter->handle, 0,
0273                          EHEA_EQ_REGISTER_ORIG,
0274                          eq->fw_handle, rpage, 1);
0275 
0276         if (i == (eq->attr.nr_pages - 1)) {
0277             /* last page */
0278             vpage = hw_qpageit_get_inc(&eq->hw_queue);
0279             if ((hret != H_SUCCESS) || (vpage))
0280                 goto out_kill_hwq;
0281 
0282         } else {
0283             if (hret != H_PAGE_REGISTERED)
0284                 goto out_kill_hwq;
0285 
0286         }
0287     }
0288 
0289     hw_qeit_reset(&eq->hw_queue);
0290     return eq;
0291 
0292 out_kill_hwq:
0293     hw_queue_dtor(&eq->hw_queue);
0294 
0295 out_freeres:
0296     ehea_h_free_resource(adapter->handle, eq->fw_handle, FORCE_FREE);
0297 
0298 out_freemem:
0299     kfree(eq);
0300     return NULL;
0301 }
0302 
0303 struct ehea_eqe *ehea_poll_eq(struct ehea_eq *eq)
0304 {
0305     struct ehea_eqe *eqe;
0306     unsigned long flags;
0307 
0308     spin_lock_irqsave(&eq->spinlock, flags);
0309     eqe = hw_eqit_eq_get_inc_valid(&eq->hw_queue);
0310     spin_unlock_irqrestore(&eq->spinlock, flags);
0311 
0312     return eqe;
0313 }
0314 
0315 static u64 ehea_destroy_eq_res(struct ehea_eq *eq, u64 force)
0316 {
0317     u64 hret;
0318     unsigned long flags;
0319 
0320     spin_lock_irqsave(&eq->spinlock, flags);
0321 
0322     hret = ehea_h_free_resource(eq->adapter->handle, eq->fw_handle, force);
0323     spin_unlock_irqrestore(&eq->spinlock, flags);
0324 
0325     if (hret != H_SUCCESS)
0326         return hret;
0327 
0328     hw_queue_dtor(&eq->hw_queue);
0329     kfree(eq);
0330 
0331     return hret;
0332 }
0333 
0334 int ehea_destroy_eq(struct ehea_eq *eq)
0335 {
0336     u64 hret, aer, aerr;
0337     if (!eq)
0338         return 0;
0339 
0340     hcp_epas_dtor(&eq->epas);
0341 
0342     hret = ehea_destroy_eq_res(eq, NORMAL_FREE);
0343     if (hret == H_R_STATE) {
0344         ehea_error_data(eq->adapter, eq->fw_handle, &aer, &aerr);
0345         hret = ehea_destroy_eq_res(eq, FORCE_FREE);
0346     }
0347 
0348     if (hret != H_SUCCESS) {
0349         pr_err("destroy EQ failed\n");
0350         return -EIO;
0351     }
0352 
0353     return 0;
0354 }
0355 
0356 /* allocates memory for a queue and registers pages in phyp */
0357 static int ehea_qp_alloc_register(struct ehea_qp *qp, struct hw_queue *hw_queue,
0358                int nr_pages, int wqe_size, int act_nr_sges,
0359                struct ehea_adapter *adapter, int h_call_q_selector)
0360 {
0361     u64 hret, rpage;
0362     int ret, cnt;
0363     void *vpage;
0364 
0365     ret = hw_queue_ctor(hw_queue, nr_pages, EHEA_PAGESIZE, wqe_size);
0366     if (ret)
0367         return ret;
0368 
0369     for (cnt = 0; cnt < nr_pages; cnt++) {
0370         vpage = hw_qpageit_get_inc(hw_queue);
0371         if (!vpage) {
0372             pr_err("hw_qpageit_get_inc failed\n");
0373             goto out_kill_hwq;
0374         }
0375         rpage = __pa(vpage);
0376         hret = ehea_h_register_rpage(adapter->handle,
0377                          0, h_call_q_selector,
0378                          qp->fw_handle, rpage, 1);
0379         if (hret < H_SUCCESS) {
0380             pr_err("register_rpage_qp failed\n");
0381             goto out_kill_hwq;
0382         }
0383     }
0384     hw_qeit_reset(hw_queue);
0385     return 0;
0386 
0387 out_kill_hwq:
0388     hw_queue_dtor(hw_queue);
0389     return -EIO;
0390 }
0391 
0392 static inline u32 map_wqe_size(u8 wqe_enc_size)
0393 {
0394     return 128 << wqe_enc_size;
0395 }
0396 
0397 struct ehea_qp *ehea_create_qp(struct ehea_adapter *adapter,
0398                    u32 pd, struct ehea_qp_init_attr *init_attr)
0399 {
0400     int ret;
0401     u64 hret;
0402     struct ehea_qp *qp;
0403     u32 wqe_size_in_bytes_sq, wqe_size_in_bytes_rq1;
0404     u32 wqe_size_in_bytes_rq2, wqe_size_in_bytes_rq3;
0405 
0406 
0407     qp = kzalloc(sizeof(*qp), GFP_KERNEL);
0408     if (!qp)
0409         return NULL;
0410 
0411     qp->adapter = adapter;
0412 
0413     hret = ehea_h_alloc_resource_qp(adapter->handle, init_attr, pd,
0414                     &qp->fw_handle, &qp->epas);
0415     if (hret != H_SUCCESS) {
0416         pr_err("ehea_h_alloc_resource_qp failed\n");
0417         goto out_freemem;
0418     }
0419 
0420     wqe_size_in_bytes_sq = map_wqe_size(init_attr->act_wqe_size_enc_sq);
0421     wqe_size_in_bytes_rq1 = map_wqe_size(init_attr->act_wqe_size_enc_rq1);
0422     wqe_size_in_bytes_rq2 = map_wqe_size(init_attr->act_wqe_size_enc_rq2);
0423     wqe_size_in_bytes_rq3 = map_wqe_size(init_attr->act_wqe_size_enc_rq3);
0424 
0425     ret = ehea_qp_alloc_register(qp, &qp->hw_squeue, init_attr->nr_sq_pages,
0426                      wqe_size_in_bytes_sq,
0427                      init_attr->act_wqe_size_enc_sq, adapter,
0428                      0);
0429     if (ret) {
0430         pr_err("can't register for sq ret=%x\n", ret);
0431         goto out_freeres;
0432     }
0433 
0434     ret = ehea_qp_alloc_register(qp, &qp->hw_rqueue1,
0435                      init_attr->nr_rq1_pages,
0436                      wqe_size_in_bytes_rq1,
0437                      init_attr->act_wqe_size_enc_rq1,
0438                      adapter, 1);
0439     if (ret) {
0440         pr_err("can't register for rq1 ret=%x\n", ret);
0441         goto out_kill_hwsq;
0442     }
0443 
0444     if (init_attr->rq_count > 1) {
0445         ret = ehea_qp_alloc_register(qp, &qp->hw_rqueue2,
0446                          init_attr->nr_rq2_pages,
0447                          wqe_size_in_bytes_rq2,
0448                          init_attr->act_wqe_size_enc_rq2,
0449                          adapter, 2);
0450         if (ret) {
0451             pr_err("can't register for rq2 ret=%x\n", ret);
0452             goto out_kill_hwr1q;
0453         }
0454     }
0455 
0456     if (init_attr->rq_count > 2) {
0457         ret = ehea_qp_alloc_register(qp, &qp->hw_rqueue3,
0458                          init_attr->nr_rq3_pages,
0459                          wqe_size_in_bytes_rq3,
0460                          init_attr->act_wqe_size_enc_rq3,
0461                          adapter, 3);
0462         if (ret) {
0463             pr_err("can't register for rq3 ret=%x\n", ret);
0464             goto out_kill_hwr2q;
0465         }
0466     }
0467 
0468     qp->init_attr = *init_attr;
0469 
0470     return qp;
0471 
0472 out_kill_hwr2q:
0473     hw_queue_dtor(&qp->hw_rqueue2);
0474 
0475 out_kill_hwr1q:
0476     hw_queue_dtor(&qp->hw_rqueue1);
0477 
0478 out_kill_hwsq:
0479     hw_queue_dtor(&qp->hw_squeue);
0480 
0481 out_freeres:
0482     ehea_h_disable_and_get_hea(adapter->handle, qp->fw_handle);
0483     ehea_h_free_resource(adapter->handle, qp->fw_handle, FORCE_FREE);
0484 
0485 out_freemem:
0486     kfree(qp);
0487     return NULL;
0488 }
0489 
0490 static u64 ehea_destroy_qp_res(struct ehea_qp *qp, u64 force)
0491 {
0492     u64 hret;
0493     struct ehea_qp_init_attr *qp_attr = &qp->init_attr;
0494 
0495 
0496     ehea_h_disable_and_get_hea(qp->adapter->handle, qp->fw_handle);
0497     hret = ehea_h_free_resource(qp->adapter->handle, qp->fw_handle, force);
0498     if (hret != H_SUCCESS)
0499         return hret;
0500 
0501     hw_queue_dtor(&qp->hw_squeue);
0502     hw_queue_dtor(&qp->hw_rqueue1);
0503 
0504     if (qp_attr->rq_count > 1)
0505         hw_queue_dtor(&qp->hw_rqueue2);
0506     if (qp_attr->rq_count > 2)
0507         hw_queue_dtor(&qp->hw_rqueue3);
0508     kfree(qp);
0509 
0510     return hret;
0511 }
0512 
0513 int ehea_destroy_qp(struct ehea_qp *qp)
0514 {
0515     u64 hret, aer, aerr;
0516     if (!qp)
0517         return 0;
0518 
0519     hcp_epas_dtor(&qp->epas);
0520 
0521     hret = ehea_destroy_qp_res(qp, NORMAL_FREE);
0522     if (hret == H_R_STATE) {
0523         ehea_error_data(qp->adapter, qp->fw_handle, &aer, &aerr);
0524         hret = ehea_destroy_qp_res(qp, FORCE_FREE);
0525     }
0526 
0527     if (hret != H_SUCCESS) {
0528         pr_err("destroy QP failed\n");
0529         return -EIO;
0530     }
0531 
0532     return 0;
0533 }
0534 
0535 static inline int ehea_calc_index(unsigned long i, unsigned long s)
0536 {
0537     return (i >> s) & EHEA_INDEX_MASK;
0538 }
0539 
0540 static inline int ehea_init_top_bmap(struct ehea_top_bmap *ehea_top_bmap,
0541                      int dir)
0542 {
0543     if (!ehea_top_bmap->dir[dir]) {
0544         ehea_top_bmap->dir[dir] =
0545             kzalloc(sizeof(struct ehea_dir_bmap), GFP_KERNEL);
0546         if (!ehea_top_bmap->dir[dir])
0547             return -ENOMEM;
0548     }
0549     return 0;
0550 }
0551 
0552 static inline int ehea_init_bmap(struct ehea_bmap *ehea_bmap, int top, int dir)
0553 {
0554     if (!ehea_bmap->top[top]) {
0555         ehea_bmap->top[top] =
0556             kzalloc(sizeof(struct ehea_top_bmap), GFP_KERNEL);
0557         if (!ehea_bmap->top[top])
0558             return -ENOMEM;
0559     }
0560     return ehea_init_top_bmap(ehea_bmap->top[top], dir);
0561 }
0562 
0563 static DEFINE_MUTEX(ehea_busmap_mutex);
0564 static unsigned long ehea_mr_len;
0565 
0566 #define EHEA_BUSMAP_ADD_SECT 1
0567 #define EHEA_BUSMAP_REM_SECT 0
0568 
0569 static void ehea_rebuild_busmap(void)
0570 {
0571     u64 vaddr = EHEA_BUSMAP_START;
0572     int top, dir, idx;
0573 
0574     for (top = 0; top < EHEA_MAP_ENTRIES; top++) {
0575         struct ehea_top_bmap *ehea_top;
0576         int valid_dir_entries = 0;
0577 
0578         if (!ehea_bmap->top[top])
0579             continue;
0580         ehea_top = ehea_bmap->top[top];
0581         for (dir = 0; dir < EHEA_MAP_ENTRIES; dir++) {
0582             struct ehea_dir_bmap *ehea_dir;
0583             int valid_entries = 0;
0584 
0585             if (!ehea_top->dir[dir])
0586                 continue;
0587             valid_dir_entries++;
0588             ehea_dir = ehea_top->dir[dir];
0589             for (idx = 0; idx < EHEA_MAP_ENTRIES; idx++) {
0590                 if (!ehea_dir->ent[idx])
0591                     continue;
0592                 valid_entries++;
0593                 ehea_dir->ent[idx] = vaddr;
0594                 vaddr += EHEA_SECTSIZE;
0595             }
0596             if (!valid_entries) {
0597                 ehea_top->dir[dir] = NULL;
0598                 kfree(ehea_dir);
0599             }
0600         }
0601         if (!valid_dir_entries) {
0602             ehea_bmap->top[top] = NULL;
0603             kfree(ehea_top);
0604         }
0605     }
0606 }
0607 
0608 static int ehea_update_busmap(unsigned long pfn, unsigned long nr_pages, int add)
0609 {
0610     unsigned long i, start_section, end_section;
0611 
0612     if (!nr_pages)
0613         return 0;
0614 
0615     if (!ehea_bmap) {
0616         ehea_bmap = kzalloc(sizeof(struct ehea_bmap), GFP_KERNEL);
0617         if (!ehea_bmap)
0618             return -ENOMEM;
0619     }
0620 
0621     start_section = (pfn * PAGE_SIZE) / EHEA_SECTSIZE;
0622     end_section = start_section + ((nr_pages * PAGE_SIZE) / EHEA_SECTSIZE);
0623     /* Mark entries as valid or invalid only; address is assigned later */
0624     for (i = start_section; i < end_section; i++) {
0625         u64 flag;
0626         int top = ehea_calc_index(i, EHEA_TOP_INDEX_SHIFT);
0627         int dir = ehea_calc_index(i, EHEA_DIR_INDEX_SHIFT);
0628         int idx = i & EHEA_INDEX_MASK;
0629 
0630         if (add) {
0631             int ret = ehea_init_bmap(ehea_bmap, top, dir);
0632             if (ret)
0633                 return ret;
0634             flag = 1; /* valid */
0635             ehea_mr_len += EHEA_SECTSIZE;
0636         } else {
0637             if (!ehea_bmap->top[top])
0638                 continue;
0639             if (!ehea_bmap->top[top]->dir[dir])
0640                 continue;
0641             flag = 0; /* invalid */
0642             ehea_mr_len -= EHEA_SECTSIZE;
0643         }
0644 
0645         ehea_bmap->top[top]->dir[dir]->ent[idx] = flag;
0646     }
0647     ehea_rebuild_busmap(); /* Assign contiguous addresses for mr */
0648     return 0;
0649 }
0650 
0651 int ehea_add_sect_bmap(unsigned long pfn, unsigned long nr_pages)
0652 {
0653     int ret;
0654 
0655     mutex_lock(&ehea_busmap_mutex);
0656     ret = ehea_update_busmap(pfn, nr_pages, EHEA_BUSMAP_ADD_SECT);
0657     mutex_unlock(&ehea_busmap_mutex);
0658     return ret;
0659 }
0660 
0661 int ehea_rem_sect_bmap(unsigned long pfn, unsigned long nr_pages)
0662 {
0663     int ret;
0664 
0665     mutex_lock(&ehea_busmap_mutex);
0666     ret = ehea_update_busmap(pfn, nr_pages, EHEA_BUSMAP_REM_SECT);
0667     mutex_unlock(&ehea_busmap_mutex);
0668     return ret;
0669 }
0670 
0671 static int ehea_is_hugepage(unsigned long pfn)
0672 {
0673     if (pfn & EHEA_HUGEPAGE_PFN_MASK)
0674         return 0;
0675 
0676     if (page_shift(pfn_to_page(pfn)) != EHEA_HUGEPAGESHIFT)
0677         return 0;
0678 
0679     return 1;
0680 }
0681 
0682 static int ehea_create_busmap_callback(unsigned long initial_pfn,
0683                        unsigned long total_nr_pages, void *arg)
0684 {
0685     int ret;
0686     unsigned long pfn, start_pfn, end_pfn, nr_pages;
0687 
0688     if ((total_nr_pages * PAGE_SIZE) < EHEA_HUGEPAGE_SIZE)
0689         return ehea_update_busmap(initial_pfn, total_nr_pages,
0690                       EHEA_BUSMAP_ADD_SECT);
0691 
0692     /* Given chunk is >= 16GB -> check for hugepages */
0693     start_pfn = initial_pfn;
0694     end_pfn = initial_pfn + total_nr_pages;
0695     pfn = start_pfn;
0696 
0697     while (pfn < end_pfn) {
0698         if (ehea_is_hugepage(pfn)) {
0699             /* Add mem found in front of the hugepage */
0700             nr_pages = pfn - start_pfn;
0701             ret = ehea_update_busmap(start_pfn, nr_pages,
0702                          EHEA_BUSMAP_ADD_SECT);
0703             if (ret)
0704                 return ret;
0705 
0706             /* Skip the hugepage */
0707             pfn += (EHEA_HUGEPAGE_SIZE / PAGE_SIZE);
0708             start_pfn = pfn;
0709         } else
0710             pfn += (EHEA_SECTSIZE / PAGE_SIZE);
0711     }
0712 
0713     /* Add mem found behind the hugepage(s)  */
0714     nr_pages = pfn - start_pfn;
0715     return ehea_update_busmap(start_pfn, nr_pages, EHEA_BUSMAP_ADD_SECT);
0716 }
0717 
0718 int ehea_create_busmap(void)
0719 {
0720     int ret;
0721 
0722     mutex_lock(&ehea_busmap_mutex);
0723     ehea_mr_len = 0;
0724     ret = walk_system_ram_range(0, 1ULL << MAX_PHYSMEM_BITS, NULL,
0725                    ehea_create_busmap_callback);
0726     mutex_unlock(&ehea_busmap_mutex);
0727     return ret;
0728 }
0729 
0730 void ehea_destroy_busmap(void)
0731 {
0732     int top, dir;
0733     mutex_lock(&ehea_busmap_mutex);
0734     if (!ehea_bmap)
0735         goto out_destroy;
0736 
0737     for (top = 0; top < EHEA_MAP_ENTRIES; top++) {
0738         if (!ehea_bmap->top[top])
0739             continue;
0740 
0741         for (dir = 0; dir < EHEA_MAP_ENTRIES; dir++) {
0742             if (!ehea_bmap->top[top]->dir[dir])
0743                 continue;
0744 
0745             kfree(ehea_bmap->top[top]->dir[dir]);
0746         }
0747 
0748         kfree(ehea_bmap->top[top]);
0749     }
0750 
0751     kfree(ehea_bmap);
0752     ehea_bmap = NULL;
0753 out_destroy:
0754     mutex_unlock(&ehea_busmap_mutex);
0755 }
0756 
0757 u64 ehea_map_vaddr(void *caddr)
0758 {
0759     int top, dir, idx;
0760     unsigned long index, offset;
0761 
0762     if (!ehea_bmap)
0763         return EHEA_INVAL_ADDR;
0764 
0765     index = __pa(caddr) >> SECTION_SIZE_BITS;
0766     top = (index >> EHEA_TOP_INDEX_SHIFT) & EHEA_INDEX_MASK;
0767     if (!ehea_bmap->top[top])
0768         return EHEA_INVAL_ADDR;
0769 
0770     dir = (index >> EHEA_DIR_INDEX_SHIFT) & EHEA_INDEX_MASK;
0771     if (!ehea_bmap->top[top]->dir[dir])
0772         return EHEA_INVAL_ADDR;
0773 
0774     idx = index & EHEA_INDEX_MASK;
0775     if (!ehea_bmap->top[top]->dir[dir]->ent[idx])
0776         return EHEA_INVAL_ADDR;
0777 
0778     offset = (unsigned long)caddr & (EHEA_SECTSIZE - 1);
0779     return ehea_bmap->top[top]->dir[dir]->ent[idx] | offset;
0780 }
0781 
0782 static inline void *ehea_calc_sectbase(int top, int dir, int idx)
0783 {
0784     unsigned long ret = idx;
0785     ret |= dir << EHEA_DIR_INDEX_SHIFT;
0786     ret |= top << EHEA_TOP_INDEX_SHIFT;
0787     return __va(ret << SECTION_SIZE_BITS);
0788 }
0789 
0790 static u64 ehea_reg_mr_section(int top, int dir, int idx, u64 *pt,
0791                    struct ehea_adapter *adapter,
0792                    struct ehea_mr *mr)
0793 {
0794     void *pg;
0795     u64 j, m, hret;
0796     unsigned long k = 0;
0797     u64 pt_abs = __pa(pt);
0798 
0799     void *sectbase = ehea_calc_sectbase(top, dir, idx);
0800 
0801     for (j = 0; j < (EHEA_PAGES_PER_SECTION / EHEA_MAX_RPAGE); j++) {
0802 
0803         for (m = 0; m < EHEA_MAX_RPAGE; m++) {
0804             pg = sectbase + ((k++) * EHEA_PAGESIZE);
0805             pt[m] = __pa(pg);
0806         }
0807         hret = ehea_h_register_rpage_mr(adapter->handle, mr->handle, 0,
0808                         0, pt_abs, EHEA_MAX_RPAGE);
0809 
0810         if ((hret != H_SUCCESS) &&
0811             (hret != H_PAGE_REGISTERED)) {
0812             ehea_h_free_resource(adapter->handle, mr->handle,
0813                          FORCE_FREE);
0814             pr_err("register_rpage_mr failed\n");
0815             return hret;
0816         }
0817     }
0818     return hret;
0819 }
0820 
0821 static u64 ehea_reg_mr_sections(int top, int dir, u64 *pt,
0822                 struct ehea_adapter *adapter,
0823                 struct ehea_mr *mr)
0824 {
0825     u64 hret = H_SUCCESS;
0826     int idx;
0827 
0828     for (idx = 0; idx < EHEA_MAP_ENTRIES; idx++) {
0829         if (!ehea_bmap->top[top]->dir[dir]->ent[idx])
0830             continue;
0831 
0832         hret = ehea_reg_mr_section(top, dir, idx, pt, adapter, mr);
0833         if ((hret != H_SUCCESS) && (hret != H_PAGE_REGISTERED))
0834             return hret;
0835     }
0836     return hret;
0837 }
0838 
0839 static u64 ehea_reg_mr_dir_sections(int top, u64 *pt,
0840                     struct ehea_adapter *adapter,
0841                     struct ehea_mr *mr)
0842 {
0843     u64 hret = H_SUCCESS;
0844     int dir;
0845 
0846     for (dir = 0; dir < EHEA_MAP_ENTRIES; dir++) {
0847         if (!ehea_bmap->top[top]->dir[dir])
0848             continue;
0849 
0850         hret = ehea_reg_mr_sections(top, dir, pt, adapter, mr);
0851         if ((hret != H_SUCCESS) && (hret != H_PAGE_REGISTERED))
0852             return hret;
0853     }
0854     return hret;
0855 }
0856 
0857 int ehea_reg_kernel_mr(struct ehea_adapter *adapter, struct ehea_mr *mr)
0858 {
0859     int ret;
0860     u64 *pt;
0861     u64 hret;
0862     u32 acc_ctrl = EHEA_MR_ACC_CTRL;
0863 
0864     unsigned long top;
0865 
0866     pt = (void *)get_zeroed_page(GFP_KERNEL);
0867     if (!pt) {
0868         pr_err("no mem\n");
0869         ret = -ENOMEM;
0870         goto out;
0871     }
0872 
0873     hret = ehea_h_alloc_resource_mr(adapter->handle, EHEA_BUSMAP_START,
0874                     ehea_mr_len, acc_ctrl, adapter->pd,
0875                     &mr->handle, &mr->lkey);
0876 
0877     if (hret != H_SUCCESS) {
0878         pr_err("alloc_resource_mr failed\n");
0879         ret = -EIO;
0880         goto out;
0881     }
0882 
0883     if (!ehea_bmap) {
0884         ehea_h_free_resource(adapter->handle, mr->handle, FORCE_FREE);
0885         pr_err("no busmap available\n");
0886         ret = -EIO;
0887         goto out;
0888     }
0889 
0890     for (top = 0; top < EHEA_MAP_ENTRIES; top++) {
0891         if (!ehea_bmap->top[top])
0892             continue;
0893 
0894         hret = ehea_reg_mr_dir_sections(top, pt, adapter, mr);
0895         if((hret != H_PAGE_REGISTERED) && (hret != H_SUCCESS))
0896             break;
0897     }
0898 
0899     if (hret != H_SUCCESS) {
0900         ehea_h_free_resource(adapter->handle, mr->handle, FORCE_FREE);
0901         pr_err("registering mr failed\n");
0902         ret = -EIO;
0903         goto out;
0904     }
0905 
0906     mr->vaddr = EHEA_BUSMAP_START;
0907     mr->adapter = adapter;
0908     ret = 0;
0909 out:
0910     free_page((unsigned long)pt);
0911     return ret;
0912 }
0913 
0914 int ehea_rem_mr(struct ehea_mr *mr)
0915 {
0916     u64 hret;
0917 
0918     if (!mr || !mr->adapter)
0919         return -EINVAL;
0920 
0921     hret = ehea_h_free_resource(mr->adapter->handle, mr->handle,
0922                     FORCE_FREE);
0923     if (hret != H_SUCCESS) {
0924         pr_err("destroy MR failed\n");
0925         return -EIO;
0926     }
0927 
0928     return 0;
0929 }
0930 
0931 int ehea_gen_smr(struct ehea_adapter *adapter, struct ehea_mr *old_mr,
0932          struct ehea_mr *shared_mr)
0933 {
0934     u64 hret;
0935 
0936     hret = ehea_h_register_smr(adapter->handle, old_mr->handle,
0937                    old_mr->vaddr, EHEA_MR_ACC_CTRL,
0938                    adapter->pd, shared_mr);
0939     if (hret != H_SUCCESS)
0940         return -EIO;
0941 
0942     shared_mr->adapter = adapter;
0943 
0944     return 0;
0945 }
0946 
0947 static void print_error_data(u64 *data)
0948 {
0949     int length;
0950     u64 type = EHEA_BMASK_GET(ERROR_DATA_TYPE, data[2]);
0951     u64 resource = data[1];
0952 
0953     length = EHEA_BMASK_GET(ERROR_DATA_LENGTH, data[0]);
0954 
0955     if (length > EHEA_PAGESIZE)
0956         length = EHEA_PAGESIZE;
0957 
0958     if (type == EHEA_AER_RESTYPE_QP)
0959         pr_err("QP (resource=%llX) state: AER=0x%llX, AERR=0x%llX, port=%llX\n",
0960                resource, data[6], data[12], data[22]);
0961     else if (type == EHEA_AER_RESTYPE_CQ)
0962         pr_err("CQ (resource=%llX) state: AER=0x%llX\n",
0963                resource, data[6]);
0964     else if (type == EHEA_AER_RESTYPE_EQ)
0965         pr_err("EQ (resource=%llX) state: AER=0x%llX\n",
0966                resource, data[6]);
0967 
0968     ehea_dump(data, length, "error data");
0969 }
0970 
0971 u64 ehea_error_data(struct ehea_adapter *adapter, u64 res_handle,
0972             u64 *aer, u64 *aerr)
0973 {
0974     unsigned long ret;
0975     u64 *rblock;
0976     u64 type = 0;
0977 
0978     rblock = (void *)get_zeroed_page(GFP_KERNEL);
0979     if (!rblock) {
0980         pr_err("Cannot allocate rblock memory\n");
0981         goto out;
0982     }
0983 
0984     ret = ehea_h_error_data(adapter->handle, res_handle, rblock);
0985 
0986     if (ret == H_SUCCESS) {
0987         type = EHEA_BMASK_GET(ERROR_DATA_TYPE, rblock[2]);
0988         *aer = rblock[6];
0989         *aerr = rblock[12];
0990         print_error_data(rblock);
0991     } else if (ret == H_R_STATE) {
0992         pr_err("No error data available: %llX\n", res_handle);
0993     } else
0994         pr_err("Error data could not be fetched: %llX\n", res_handle);
0995 
0996     free_page((unsigned long)rblock);
0997 out:
0998     return type;
0999 }