Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause
0002 /*
0003  * Copyright(c) 2015-2018 Intel Corporation.
0004  */
0005 
0006 #include <linux/delay.h>
0007 #include "hfi.h"
0008 #include "qp.h"
0009 #include "trace.h"
0010 
0011 #define SC(name) SEND_CTXT_##name
0012 /*
0013  * Send Context functions
0014  */
0015 static void sc_wait_for_packet_egress(struct send_context *sc, int pause);
0016 
0017 /*
0018  * Set the CM reset bit and wait for it to clear.  Use the provided
0019  * sendctrl register.  This routine has no locking.
0020  */
0021 void __cm_reset(struct hfi1_devdata *dd, u64 sendctrl)
0022 {
0023     write_csr(dd, SEND_CTRL, sendctrl | SEND_CTRL_CM_RESET_SMASK);
0024     while (1) {
0025         udelay(1);
0026         sendctrl = read_csr(dd, SEND_CTRL);
0027         if ((sendctrl & SEND_CTRL_CM_RESET_SMASK) == 0)
0028             break;
0029     }
0030 }
0031 
0032 /* global control of PIO send */
0033 void pio_send_control(struct hfi1_devdata *dd, int op)
0034 {
0035     u64 reg, mask;
0036     unsigned long flags;
0037     int write = 1;  /* write sendctrl back */
0038     int flush = 0;  /* re-read sendctrl to make sure it is flushed */
0039     int i;
0040 
0041     spin_lock_irqsave(&dd->sendctrl_lock, flags);
0042 
0043     reg = read_csr(dd, SEND_CTRL);
0044     switch (op) {
0045     case PSC_GLOBAL_ENABLE:
0046         reg |= SEND_CTRL_SEND_ENABLE_SMASK;
0047         fallthrough;
0048     case PSC_DATA_VL_ENABLE:
0049         mask = 0;
0050         for (i = 0; i < ARRAY_SIZE(dd->vld); i++)
0051             if (!dd->vld[i].mtu)
0052                 mask |= BIT_ULL(i);
0053         /* Disallow sending on VLs not enabled */
0054         mask = (mask & SEND_CTRL_UNSUPPORTED_VL_MASK) <<
0055             SEND_CTRL_UNSUPPORTED_VL_SHIFT;
0056         reg = (reg & ~SEND_CTRL_UNSUPPORTED_VL_SMASK) | mask;
0057         break;
0058     case PSC_GLOBAL_DISABLE:
0059         reg &= ~SEND_CTRL_SEND_ENABLE_SMASK;
0060         break;
0061     case PSC_GLOBAL_VLARB_ENABLE:
0062         reg |= SEND_CTRL_VL_ARBITER_ENABLE_SMASK;
0063         break;
0064     case PSC_GLOBAL_VLARB_DISABLE:
0065         reg &= ~SEND_CTRL_VL_ARBITER_ENABLE_SMASK;
0066         break;
0067     case PSC_CM_RESET:
0068         __cm_reset(dd, reg);
0069         write = 0; /* CSR already written (and flushed) */
0070         break;
0071     case PSC_DATA_VL_DISABLE:
0072         reg |= SEND_CTRL_UNSUPPORTED_VL_SMASK;
0073         flush = 1;
0074         break;
0075     default:
0076         dd_dev_err(dd, "%s: invalid control %d\n", __func__, op);
0077         break;
0078     }
0079 
0080     if (write) {
0081         write_csr(dd, SEND_CTRL, reg);
0082         if (flush)
0083             (void)read_csr(dd, SEND_CTRL); /* flush write */
0084     }
0085 
0086     spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
0087 }
0088 
0089 /* number of send context memory pools */
0090 #define NUM_SC_POOLS 2
0091 
0092 /* Send Context Size (SCS) wildcards */
0093 #define SCS_POOL_0 -1
0094 #define SCS_POOL_1 -2
0095 
0096 /* Send Context Count (SCC) wildcards */
0097 #define SCC_PER_VL -1
0098 #define SCC_PER_CPU  -2
0099 #define SCC_PER_KRCVQ  -3
0100 
0101 /* Send Context Size (SCS) constants */
0102 #define SCS_ACK_CREDITS  32
0103 #define SCS_VL15_CREDITS 102    /* 3 pkts of 2048B data + 128B header */
0104 
0105 #define PIO_THRESHOLD_CEILING 4096
0106 
0107 #define PIO_WAIT_BATCH_SIZE 5
0108 
0109 /* default send context sizes */
0110 static struct sc_config_sizes sc_config_sizes[SC_MAX] = {
0111     [SC_KERNEL] = { .size  = SCS_POOL_0,    /* even divide, pool 0 */
0112             .count = SCC_PER_VL },  /* one per NUMA */
0113     [SC_ACK]    = { .size  = SCS_ACK_CREDITS,
0114             .count = SCC_PER_KRCVQ },
0115     [SC_USER]   = { .size  = SCS_POOL_0,    /* even divide, pool 0 */
0116             .count = SCC_PER_CPU }, /* one per CPU */
0117     [SC_VL15]   = { .size  = SCS_VL15_CREDITS,
0118             .count = 1 },
0119 
0120 };
0121 
0122 /* send context memory pool configuration */
0123 struct mem_pool_config {
0124     int centipercent;   /* % of memory, in 100ths of 1% */
0125     int absolute_blocks;    /* absolute block count */
0126 };
0127 
0128 /* default memory pool configuration: 100% in pool 0 */
0129 static struct mem_pool_config sc_mem_pool_config[NUM_SC_POOLS] = {
0130     /* centi%, abs blocks */
0131     {  10000,     -1 },     /* pool 0 */
0132     {      0,     -1 },     /* pool 1 */
0133 };
0134 
0135 /* memory pool information, used when calculating final sizes */
0136 struct mem_pool_info {
0137     int centipercent;   /*
0138                  * 100th of 1% of memory to use, -1 if blocks
0139                  * already set
0140                  */
0141     int count;      /* count of contexts in the pool */
0142     int blocks;     /* block size of the pool */
0143     int size;       /* context size, in blocks */
0144 };
0145 
0146 /*
0147  * Convert a pool wildcard to a valid pool index.  The wildcards
0148  * start at -1 and increase negatively.  Map them as:
0149  *  -1 => 0
0150  *  -2 => 1
0151  *  etc.
0152  *
0153  * Return -1 on non-wildcard input, otherwise convert to a pool number.
0154  */
0155 static int wildcard_to_pool(int wc)
0156 {
0157     if (wc >= 0)
0158         return -1;  /* non-wildcard */
0159     return -wc - 1;
0160 }
0161 
0162 static const char *sc_type_names[SC_MAX] = {
0163     "kernel",
0164     "ack",
0165     "user",
0166     "vl15"
0167 };
0168 
0169 static const char *sc_type_name(int index)
0170 {
0171     if (index < 0 || index >= SC_MAX)
0172         return "unknown";
0173     return sc_type_names[index];
0174 }
0175 
0176 /*
0177  * Read the send context memory pool configuration and send context
0178  * size configuration.  Replace any wildcards and come up with final
0179  * counts and sizes for the send context types.
0180  */
0181 int init_sc_pools_and_sizes(struct hfi1_devdata *dd)
0182 {
0183     struct mem_pool_info mem_pool_info[NUM_SC_POOLS] = { { 0 } };
0184     int total_blocks = (chip_pio_mem_size(dd) / PIO_BLOCK_SIZE) - 1;
0185     int total_contexts = 0;
0186     int fixed_blocks;
0187     int pool_blocks;
0188     int used_blocks;
0189     int cp_total;       /* centipercent total */
0190     int ab_total;       /* absolute block total */
0191     int extra;
0192     int i;
0193 
0194     /*
0195      * When SDMA is enabled, kernel context pio packet size is capped by
0196      * "piothreshold". Reduce pio buffer allocation for kernel context by
0197      * setting it to a fixed size. The allocation allows 3-deep buffering
0198      * of the largest pio packets plus up to 128 bytes header, sufficient
0199      * to maintain verbs performance.
0200      *
0201      * When SDMA is disabled, keep the default pooling allocation.
0202      */
0203     if (HFI1_CAP_IS_KSET(SDMA)) {
0204         u16 max_pkt_size = (piothreshold < PIO_THRESHOLD_CEILING) ?
0205                      piothreshold : PIO_THRESHOLD_CEILING;
0206         sc_config_sizes[SC_KERNEL].size =
0207             3 * (max_pkt_size + 128) / PIO_BLOCK_SIZE;
0208     }
0209 
0210     /*
0211      * Step 0:
0212      *  - copy the centipercents/absolute sizes from the pool config
0213      *  - sanity check these values
0214      *  - add up centipercents, then later check for full value
0215      *  - add up absolute blocks, then later check for over-commit
0216      */
0217     cp_total = 0;
0218     ab_total = 0;
0219     for (i = 0; i < NUM_SC_POOLS; i++) {
0220         int cp = sc_mem_pool_config[i].centipercent;
0221         int ab = sc_mem_pool_config[i].absolute_blocks;
0222 
0223         /*
0224          * A negative value is "unused" or "invalid".  Both *can*
0225          * be valid, but centipercent wins, so check that first
0226          */
0227         if (cp >= 0) {          /* centipercent valid */
0228             cp_total += cp;
0229         } else if (ab >= 0) {       /* absolute blocks valid */
0230             ab_total += ab;
0231         } else {            /* neither valid */
0232             dd_dev_err(
0233                 dd,
0234                 "Send context memory pool %d: both the block count and centipercent are invalid\n",
0235                 i);
0236             return -EINVAL;
0237         }
0238 
0239         mem_pool_info[i].centipercent = cp;
0240         mem_pool_info[i].blocks = ab;
0241     }
0242 
0243     /* do not use both % and absolute blocks for different pools */
0244     if (cp_total != 0 && ab_total != 0) {
0245         dd_dev_err(
0246             dd,
0247             "All send context memory pools must be described as either centipercent or blocks, no mixing between pools\n");
0248         return -EINVAL;
0249     }
0250 
0251     /* if any percentages are present, they must add up to 100% x 100 */
0252     if (cp_total != 0 && cp_total != 10000) {
0253         dd_dev_err(
0254             dd,
0255             "Send context memory pool centipercent is %d, expecting 10000\n",
0256             cp_total);
0257         return -EINVAL;
0258     }
0259 
0260     /* the absolute pool total cannot be more than the mem total */
0261     if (ab_total > total_blocks) {
0262         dd_dev_err(
0263             dd,
0264             "Send context memory pool absolute block count %d is larger than the memory size %d\n",
0265             ab_total, total_blocks);
0266         return -EINVAL;
0267     }
0268 
0269     /*
0270      * Step 2:
0271      *  - copy from the context size config
0272      *  - replace context type wildcard counts with real values
0273      *  - add up non-memory pool block sizes
0274      *  - add up memory pool user counts
0275      */
0276     fixed_blocks = 0;
0277     for (i = 0; i < SC_MAX; i++) {
0278         int count = sc_config_sizes[i].count;
0279         int size = sc_config_sizes[i].size;
0280         int pool;
0281 
0282         /*
0283          * Sanity check count: Either a positive value or
0284          * one of the expected wildcards is valid.  The positive
0285          * value is checked later when we compare against total
0286          * memory available.
0287          */
0288         if (i == SC_ACK) {
0289             count = dd->n_krcv_queues;
0290         } else if (i == SC_KERNEL) {
0291             count = INIT_SC_PER_VL * num_vls;
0292         } else if (count == SCC_PER_CPU) {
0293             count = dd->num_rcv_contexts - dd->n_krcv_queues;
0294         } else if (count < 0) {
0295             dd_dev_err(
0296                 dd,
0297                 "%s send context invalid count wildcard %d\n",
0298                 sc_type_name(i), count);
0299             return -EINVAL;
0300         }
0301         if (total_contexts + count > chip_send_contexts(dd))
0302             count = chip_send_contexts(dd) - total_contexts;
0303 
0304         total_contexts += count;
0305 
0306         /*
0307          * Sanity check pool: The conversion will return a pool
0308          * number or -1 if a fixed (non-negative) value.  The fixed
0309          * value is checked later when we compare against
0310          * total memory available.
0311          */
0312         pool = wildcard_to_pool(size);
0313         if (pool == -1) {           /* non-wildcard */
0314             fixed_blocks += size * count;
0315         } else if (pool < NUM_SC_POOLS) {   /* valid wildcard */
0316             mem_pool_info[pool].count += count;
0317         } else {                /* invalid wildcard */
0318             dd_dev_err(
0319                 dd,
0320                 "%s send context invalid pool wildcard %d\n",
0321                 sc_type_name(i), size);
0322             return -EINVAL;
0323         }
0324 
0325         dd->sc_sizes[i].count = count;
0326         dd->sc_sizes[i].size = size;
0327     }
0328     if (fixed_blocks > total_blocks) {
0329         dd_dev_err(
0330             dd,
0331             "Send context fixed block count, %u, larger than total block count %u\n",
0332             fixed_blocks, total_blocks);
0333         return -EINVAL;
0334     }
0335 
0336     /* step 3: calculate the blocks in the pools, and pool context sizes */
0337     pool_blocks = total_blocks - fixed_blocks;
0338     if (ab_total > pool_blocks) {
0339         dd_dev_err(
0340             dd,
0341             "Send context fixed pool sizes, %u, larger than pool block count %u\n",
0342             ab_total, pool_blocks);
0343         return -EINVAL;
0344     }
0345     /* subtract off the fixed pool blocks */
0346     pool_blocks -= ab_total;
0347 
0348     for (i = 0; i < NUM_SC_POOLS; i++) {
0349         struct mem_pool_info *pi = &mem_pool_info[i];
0350 
0351         /* % beats absolute blocks */
0352         if (pi->centipercent >= 0)
0353             pi->blocks = (pool_blocks * pi->centipercent) / 10000;
0354 
0355         if (pi->blocks == 0 && pi->count != 0) {
0356             dd_dev_err(
0357                 dd,
0358                 "Send context memory pool %d has %u contexts, but no blocks\n",
0359                 i, pi->count);
0360             return -EINVAL;
0361         }
0362         if (pi->count == 0) {
0363             /* warn about wasted blocks */
0364             if (pi->blocks != 0)
0365                 dd_dev_err(
0366                     dd,
0367                     "Send context memory pool %d has %u blocks, but zero contexts\n",
0368                     i, pi->blocks);
0369             pi->size = 0;
0370         } else {
0371             pi->size = pi->blocks / pi->count;
0372         }
0373     }
0374 
0375     /* step 4: fill in the context type sizes from the pool sizes */
0376     used_blocks = 0;
0377     for (i = 0; i < SC_MAX; i++) {
0378         if (dd->sc_sizes[i].size < 0) {
0379             unsigned pool = wildcard_to_pool(dd->sc_sizes[i].size);
0380 
0381             WARN_ON_ONCE(pool >= NUM_SC_POOLS);
0382             dd->sc_sizes[i].size = mem_pool_info[pool].size;
0383         }
0384         /* make sure we are not larger than what is allowed by the HW */
0385 #define PIO_MAX_BLOCKS 1024
0386         if (dd->sc_sizes[i].size > PIO_MAX_BLOCKS)
0387             dd->sc_sizes[i].size = PIO_MAX_BLOCKS;
0388 
0389         /* calculate our total usage */
0390         used_blocks += dd->sc_sizes[i].size * dd->sc_sizes[i].count;
0391     }
0392     extra = total_blocks - used_blocks;
0393     if (extra != 0)
0394         dd_dev_info(dd, "unused send context blocks: %d\n", extra);
0395 
0396     return total_contexts;
0397 }
0398 
0399 int init_send_contexts(struct hfi1_devdata *dd)
0400 {
0401     u16 base;
0402     int ret, i, j, context;
0403 
0404     ret = init_credit_return(dd);
0405     if (ret)
0406         return ret;
0407 
0408     dd->hw_to_sw = kmalloc_array(TXE_NUM_CONTEXTS, sizeof(u8),
0409                     GFP_KERNEL);
0410     dd->send_contexts = kcalloc(dd->num_send_contexts,
0411                     sizeof(struct send_context_info),
0412                     GFP_KERNEL);
0413     if (!dd->send_contexts || !dd->hw_to_sw) {
0414         kfree(dd->hw_to_sw);
0415         kfree(dd->send_contexts);
0416         free_credit_return(dd);
0417         return -ENOMEM;
0418     }
0419 
0420     /* hardware context map starts with invalid send context indices */
0421     for (i = 0; i < TXE_NUM_CONTEXTS; i++)
0422         dd->hw_to_sw[i] = INVALID_SCI;
0423 
0424     /*
0425      * All send contexts have their credit sizes.  Allocate credits
0426      * for each context one after another from the global space.
0427      */
0428     context = 0;
0429     base = 1;
0430     for (i = 0; i < SC_MAX; i++) {
0431         struct sc_config_sizes *scs = &dd->sc_sizes[i];
0432 
0433         for (j = 0; j < scs->count; j++) {
0434             struct send_context_info *sci =
0435                         &dd->send_contexts[context];
0436             sci->type = i;
0437             sci->base = base;
0438             sci->credits = scs->size;
0439 
0440             context++;
0441             base += scs->size;
0442         }
0443     }
0444 
0445     return 0;
0446 }
0447 
0448 /*
0449  * Allocate a software index and hardware context of the given type.
0450  *
0451  * Must be called with dd->sc_lock held.
0452  */
0453 static int sc_hw_alloc(struct hfi1_devdata *dd, int type, u32 *sw_index,
0454                u32 *hw_context)
0455 {
0456     struct send_context_info *sci;
0457     u32 index;
0458     u32 context;
0459 
0460     for (index = 0, sci = &dd->send_contexts[0];
0461             index < dd->num_send_contexts; index++, sci++) {
0462         if (sci->type == type && sci->allocated == 0) {
0463             sci->allocated = 1;
0464             /* use a 1:1 mapping, but make them non-equal */
0465             context = chip_send_contexts(dd) - index - 1;
0466             dd->hw_to_sw[context] = index;
0467             *sw_index = index;
0468             *hw_context = context;
0469             return 0; /* success */
0470         }
0471     }
0472     dd_dev_err(dd, "Unable to locate a free type %d send context\n", type);
0473     return -ENOSPC;
0474 }
0475 
0476 /*
0477  * Free the send context given by its software index.
0478  *
0479  * Must be called with dd->sc_lock held.
0480  */
0481 static void sc_hw_free(struct hfi1_devdata *dd, u32 sw_index, u32 hw_context)
0482 {
0483     struct send_context_info *sci;
0484 
0485     sci = &dd->send_contexts[sw_index];
0486     if (!sci->allocated) {
0487         dd_dev_err(dd, "%s: sw_index %u not allocated? hw_context %u\n",
0488                __func__, sw_index, hw_context);
0489     }
0490     sci->allocated = 0;
0491     dd->hw_to_sw[hw_context] = INVALID_SCI;
0492 }
0493 
0494 /* return the base context of a context in a group */
0495 static inline u32 group_context(u32 context, u32 group)
0496 {
0497     return (context >> group) << group;
0498 }
0499 
0500 /* return the size of a group */
0501 static inline u32 group_size(u32 group)
0502 {
0503     return 1 << group;
0504 }
0505 
0506 /*
0507  * Obtain the credit return addresses, kernel virtual and bus, for the
0508  * given sc.
0509  *
0510  * To understand this routine:
0511  * o va and dma are arrays of struct credit_return.  One for each physical
0512  *   send context, per NUMA.
0513  * o Each send context always looks in its relative location in a struct
0514  *   credit_return for its credit return.
0515  * o Each send context in a group must have its return address CSR programmed
0516  *   with the same value.  Use the address of the first send context in the
0517  *   group.
0518  */
0519 static void cr_group_addresses(struct send_context *sc, dma_addr_t *dma)
0520 {
0521     u32 gc = group_context(sc->hw_context, sc->group);
0522     u32 index = sc->hw_context & 0x7;
0523 
0524     sc->hw_free = &sc->dd->cr_base[sc->node].va[gc].cr[index];
0525     *dma = (unsigned long)
0526            &((struct credit_return *)sc->dd->cr_base[sc->node].dma)[gc];
0527 }
0528 
0529 /*
0530  * Work queue function triggered in error interrupt routine for
0531  * kernel contexts.
0532  */
0533 static void sc_halted(struct work_struct *work)
0534 {
0535     struct send_context *sc;
0536 
0537     sc = container_of(work, struct send_context, halt_work);
0538     sc_restart(sc);
0539 }
0540 
0541 /*
0542  * Calculate PIO block threshold for this send context using the given MTU.
0543  * Trigger a return when one MTU plus optional header of credits remain.
0544  *
0545  * Parameter mtu is in bytes.
0546  * Parameter hdrqentsize is in DWORDs.
0547  *
0548  * Return value is what to write into the CSR: trigger return when
0549  * unreturned credits pass this count.
0550  */
0551 u32 sc_mtu_to_threshold(struct send_context *sc, u32 mtu, u32 hdrqentsize)
0552 {
0553     u32 release_credits;
0554     u32 threshold;
0555 
0556     /* add in the header size, then divide by the PIO block size */
0557     mtu += hdrqentsize << 2;
0558     release_credits = DIV_ROUND_UP(mtu, PIO_BLOCK_SIZE);
0559 
0560     /* check against this context's credits */
0561     if (sc->credits <= release_credits)
0562         threshold = 1;
0563     else
0564         threshold = sc->credits - release_credits;
0565 
0566     return threshold;
0567 }
0568 
0569 /*
0570  * Calculate credit threshold in terms of percent of the allocated credits.
0571  * Trigger when unreturned credits equal or exceed the percentage of the whole.
0572  *
0573  * Return value is what to write into the CSR: trigger return when
0574  * unreturned credits pass this count.
0575  */
0576 u32 sc_percent_to_threshold(struct send_context *sc, u32 percent)
0577 {
0578     return (sc->credits * percent) / 100;
0579 }
0580 
0581 /*
0582  * Set the credit return threshold.
0583  */
0584 void sc_set_cr_threshold(struct send_context *sc, u32 new_threshold)
0585 {
0586     unsigned long flags;
0587     u32 old_threshold;
0588     int force_return = 0;
0589 
0590     spin_lock_irqsave(&sc->credit_ctrl_lock, flags);
0591 
0592     old_threshold = (sc->credit_ctrl >>
0593                 SC(CREDIT_CTRL_THRESHOLD_SHIFT))
0594              & SC(CREDIT_CTRL_THRESHOLD_MASK);
0595 
0596     if (new_threshold != old_threshold) {
0597         sc->credit_ctrl =
0598             (sc->credit_ctrl
0599                 & ~SC(CREDIT_CTRL_THRESHOLD_SMASK))
0600             | ((new_threshold
0601                 & SC(CREDIT_CTRL_THRESHOLD_MASK))
0602                << SC(CREDIT_CTRL_THRESHOLD_SHIFT));
0603         write_kctxt_csr(sc->dd, sc->hw_context,
0604                 SC(CREDIT_CTRL), sc->credit_ctrl);
0605 
0606         /* force a credit return on change to avoid a possible stall */
0607         force_return = 1;
0608     }
0609 
0610     spin_unlock_irqrestore(&sc->credit_ctrl_lock, flags);
0611 
0612     if (force_return)
0613         sc_return_credits(sc);
0614 }
0615 
0616 /*
0617  * set_pio_integrity
0618  *
0619  * Set the CHECK_ENABLE register for the send context 'sc'.
0620  */
0621 void set_pio_integrity(struct send_context *sc)
0622 {
0623     struct hfi1_devdata *dd = sc->dd;
0624     u32 hw_context = sc->hw_context;
0625     int type = sc->type;
0626 
0627     write_kctxt_csr(dd, hw_context,
0628             SC(CHECK_ENABLE),
0629             hfi1_pkt_default_send_ctxt_mask(dd, type));
0630 }
0631 
0632 static u32 get_buffers_allocated(struct send_context *sc)
0633 {
0634     int cpu;
0635     u32 ret = 0;
0636 
0637     for_each_possible_cpu(cpu)
0638         ret += *per_cpu_ptr(sc->buffers_allocated, cpu);
0639     return ret;
0640 }
0641 
0642 static void reset_buffers_allocated(struct send_context *sc)
0643 {
0644     int cpu;
0645 
0646     for_each_possible_cpu(cpu)
0647         (*per_cpu_ptr(sc->buffers_allocated, cpu)) = 0;
0648 }
0649 
0650 /*
0651  * Allocate a NUMA relative send context structure of the given type along
0652  * with a HW context.
0653  */
0654 struct send_context *sc_alloc(struct hfi1_devdata *dd, int type,
0655                   uint hdrqentsize, int numa)
0656 {
0657     struct send_context_info *sci;
0658     struct send_context *sc = NULL;
0659     dma_addr_t dma;
0660     unsigned long flags;
0661     u64 reg;
0662     u32 thresh;
0663     u32 sw_index;
0664     u32 hw_context;
0665     int ret;
0666     u8 opval, opmask;
0667 
0668     /* do not allocate while frozen */
0669     if (dd->flags & HFI1_FROZEN)
0670         return NULL;
0671 
0672     sc = kzalloc_node(sizeof(*sc), GFP_KERNEL, numa);
0673     if (!sc)
0674         return NULL;
0675 
0676     sc->buffers_allocated = alloc_percpu(u32);
0677     if (!sc->buffers_allocated) {
0678         kfree(sc);
0679         dd_dev_err(dd,
0680                "Cannot allocate buffers_allocated per cpu counters\n"
0681               );
0682         return NULL;
0683     }
0684 
0685     spin_lock_irqsave(&dd->sc_lock, flags);
0686     ret = sc_hw_alloc(dd, type, &sw_index, &hw_context);
0687     if (ret) {
0688         spin_unlock_irqrestore(&dd->sc_lock, flags);
0689         free_percpu(sc->buffers_allocated);
0690         kfree(sc);
0691         return NULL;
0692     }
0693 
0694     sci = &dd->send_contexts[sw_index];
0695     sci->sc = sc;
0696 
0697     sc->dd = dd;
0698     sc->node = numa;
0699     sc->type = type;
0700     spin_lock_init(&sc->alloc_lock);
0701     spin_lock_init(&sc->release_lock);
0702     spin_lock_init(&sc->credit_ctrl_lock);
0703     seqlock_init(&sc->waitlock);
0704     INIT_LIST_HEAD(&sc->piowait);
0705     INIT_WORK(&sc->halt_work, sc_halted);
0706     init_waitqueue_head(&sc->halt_wait);
0707 
0708     /* grouping is always single context for now */
0709     sc->group = 0;
0710 
0711     sc->sw_index = sw_index;
0712     sc->hw_context = hw_context;
0713     cr_group_addresses(sc, &dma);
0714     sc->credits = sci->credits;
0715     sc->size = sc->credits * PIO_BLOCK_SIZE;
0716 
0717 /* PIO Send Memory Address details */
0718 #define PIO_ADDR_CONTEXT_MASK 0xfful
0719 #define PIO_ADDR_CONTEXT_SHIFT 16
0720     sc->base_addr = dd->piobase + ((hw_context & PIO_ADDR_CONTEXT_MASK)
0721                     << PIO_ADDR_CONTEXT_SHIFT);
0722 
0723     /* set base and credits */
0724     reg = ((sci->credits & SC(CTRL_CTXT_DEPTH_MASK))
0725                     << SC(CTRL_CTXT_DEPTH_SHIFT))
0726         | ((sci->base & SC(CTRL_CTXT_BASE_MASK))
0727                     << SC(CTRL_CTXT_BASE_SHIFT));
0728     write_kctxt_csr(dd, hw_context, SC(CTRL), reg);
0729 
0730     set_pio_integrity(sc);
0731 
0732     /* unmask all errors */
0733     write_kctxt_csr(dd, hw_context, SC(ERR_MASK), (u64)-1);
0734 
0735     /* set the default partition key */
0736     write_kctxt_csr(dd, hw_context, SC(CHECK_PARTITION_KEY),
0737             (SC(CHECK_PARTITION_KEY_VALUE_MASK) &
0738              DEFAULT_PKEY) <<
0739             SC(CHECK_PARTITION_KEY_VALUE_SHIFT));
0740 
0741     /* per context type checks */
0742     if (type == SC_USER) {
0743         opval = USER_OPCODE_CHECK_VAL;
0744         opmask = USER_OPCODE_CHECK_MASK;
0745     } else {
0746         opval = OPCODE_CHECK_VAL_DISABLED;
0747         opmask = OPCODE_CHECK_MASK_DISABLED;
0748     }
0749 
0750     /* set the send context check opcode mask and value */
0751     write_kctxt_csr(dd, hw_context, SC(CHECK_OPCODE),
0752             ((u64)opmask << SC(CHECK_OPCODE_MASK_SHIFT)) |
0753             ((u64)opval << SC(CHECK_OPCODE_VALUE_SHIFT)));
0754 
0755     /* set up credit return */
0756     reg = dma & SC(CREDIT_RETURN_ADDR_ADDRESS_SMASK);
0757     write_kctxt_csr(dd, hw_context, SC(CREDIT_RETURN_ADDR), reg);
0758 
0759     /*
0760      * Calculate the initial credit return threshold.
0761      *
0762      * For Ack contexts, set a threshold for half the credits.
0763      * For User contexts use the given percentage.  This has been
0764      * sanitized on driver start-up.
0765      * For Kernel contexts, use the default MTU plus a header
0766      * or half the credits, whichever is smaller. This should
0767      * work for both the 3-deep buffering allocation and the
0768      * pooling allocation.
0769      */
0770     if (type == SC_ACK) {
0771         thresh = sc_percent_to_threshold(sc, 50);
0772     } else if (type == SC_USER) {
0773         thresh = sc_percent_to_threshold(sc,
0774                          user_credit_return_threshold);
0775     } else { /* kernel */
0776         thresh = min(sc_percent_to_threshold(sc, 50),
0777                  sc_mtu_to_threshold(sc, hfi1_max_mtu,
0778                          hdrqentsize));
0779     }
0780     reg = thresh << SC(CREDIT_CTRL_THRESHOLD_SHIFT);
0781     /* add in early return */
0782     if (type == SC_USER && HFI1_CAP_IS_USET(EARLY_CREDIT_RETURN))
0783         reg |= SC(CREDIT_CTRL_EARLY_RETURN_SMASK);
0784     else if (HFI1_CAP_IS_KSET(EARLY_CREDIT_RETURN)) /* kernel, ack */
0785         reg |= SC(CREDIT_CTRL_EARLY_RETURN_SMASK);
0786 
0787     /* set up write-through credit_ctrl */
0788     sc->credit_ctrl = reg;
0789     write_kctxt_csr(dd, hw_context, SC(CREDIT_CTRL), reg);
0790 
0791     /* User send contexts should not allow sending on VL15 */
0792     if (type == SC_USER) {
0793         reg = 1ULL << 15;
0794         write_kctxt_csr(dd, hw_context, SC(CHECK_VL), reg);
0795     }
0796 
0797     spin_unlock_irqrestore(&dd->sc_lock, flags);
0798 
0799     /*
0800      * Allocate shadow ring to track outstanding PIO buffers _after_
0801      * unlocking.  We don't know the size until the lock is held and
0802      * we can't allocate while the lock is held.  No one is using
0803      * the context yet, so allocate it now.
0804      *
0805      * User contexts do not get a shadow ring.
0806      */
0807     if (type != SC_USER) {
0808         /*
0809          * Size the shadow ring 1 larger than the number of credits
0810          * so head == tail can mean empty.
0811          */
0812         sc->sr_size = sci->credits + 1;
0813         sc->sr = kcalloc_node(sc->sr_size,
0814                       sizeof(union pio_shadow_ring),
0815                       GFP_KERNEL, numa);
0816         if (!sc->sr) {
0817             sc_free(sc);
0818             return NULL;
0819         }
0820     }
0821 
0822     hfi1_cdbg(PIO,
0823           "Send context %u(%u) %s group %u credits %u credit_ctrl 0x%llx threshold %u\n",
0824           sw_index,
0825           hw_context,
0826           sc_type_name(type),
0827           sc->group,
0828           sc->credits,
0829           sc->credit_ctrl,
0830           thresh);
0831 
0832     return sc;
0833 }
0834 
0835 /* free a per-NUMA send context structure */
0836 void sc_free(struct send_context *sc)
0837 {
0838     struct hfi1_devdata *dd;
0839     unsigned long flags;
0840     u32 sw_index;
0841     u32 hw_context;
0842 
0843     if (!sc)
0844         return;
0845 
0846     sc->flags |= SCF_IN_FREE;   /* ensure no restarts */
0847     dd = sc->dd;
0848     if (!list_empty(&sc->piowait))
0849         dd_dev_err(dd, "piowait list not empty!\n");
0850     sw_index = sc->sw_index;
0851     hw_context = sc->hw_context;
0852     sc_disable(sc); /* make sure the HW is disabled */
0853     flush_work(&sc->halt_work);
0854 
0855     spin_lock_irqsave(&dd->sc_lock, flags);
0856     dd->send_contexts[sw_index].sc = NULL;
0857 
0858     /* clear/disable all registers set in sc_alloc */
0859     write_kctxt_csr(dd, hw_context, SC(CTRL), 0);
0860     write_kctxt_csr(dd, hw_context, SC(CHECK_ENABLE), 0);
0861     write_kctxt_csr(dd, hw_context, SC(ERR_MASK), 0);
0862     write_kctxt_csr(dd, hw_context, SC(CHECK_PARTITION_KEY), 0);
0863     write_kctxt_csr(dd, hw_context, SC(CHECK_OPCODE), 0);
0864     write_kctxt_csr(dd, hw_context, SC(CREDIT_RETURN_ADDR), 0);
0865     write_kctxt_csr(dd, hw_context, SC(CREDIT_CTRL), 0);
0866 
0867     /* release the index and context for re-use */
0868     sc_hw_free(dd, sw_index, hw_context);
0869     spin_unlock_irqrestore(&dd->sc_lock, flags);
0870 
0871     kfree(sc->sr);
0872     free_percpu(sc->buffers_allocated);
0873     kfree(sc);
0874 }
0875 
0876 /* disable the context */
0877 void sc_disable(struct send_context *sc)
0878 {
0879     u64 reg;
0880     struct pio_buf *pbuf;
0881     LIST_HEAD(wake_list);
0882 
0883     if (!sc)
0884         return;
0885 
0886     /* do all steps, even if already disabled */
0887     spin_lock_irq(&sc->alloc_lock);
0888     reg = read_kctxt_csr(sc->dd, sc->hw_context, SC(CTRL));
0889     reg &= ~SC(CTRL_CTXT_ENABLE_SMASK);
0890     sc->flags &= ~SCF_ENABLED;
0891     sc_wait_for_packet_egress(sc, 1);
0892     write_kctxt_csr(sc->dd, sc->hw_context, SC(CTRL), reg);
0893 
0894     /*
0895      * Flush any waiters.  Once the context is disabled,
0896      * credit return interrupts are stopped (although there
0897      * could be one in-process when the context is disabled).
0898      * Wait one microsecond for any lingering interrupts, then
0899      * proceed with the flush.
0900      */
0901     udelay(1);
0902     spin_lock(&sc->release_lock);
0903     if (sc->sr) {   /* this context has a shadow ring */
0904         while (sc->sr_tail != sc->sr_head) {
0905             pbuf = &sc->sr[sc->sr_tail].pbuf;
0906             if (pbuf->cb)
0907                 (*pbuf->cb)(pbuf->arg, PRC_SC_DISABLE);
0908             sc->sr_tail++;
0909             if (sc->sr_tail >= sc->sr_size)
0910                 sc->sr_tail = 0;
0911         }
0912     }
0913     spin_unlock(&sc->release_lock);
0914 
0915     write_seqlock(&sc->waitlock);
0916     if (!list_empty(&sc->piowait))
0917         list_move(&sc->piowait, &wake_list);
0918     write_sequnlock(&sc->waitlock);
0919     while (!list_empty(&wake_list)) {
0920         struct iowait *wait;
0921         struct rvt_qp *qp;
0922         struct hfi1_qp_priv *priv;
0923 
0924         wait = list_first_entry(&wake_list, struct iowait, list);
0925         qp = iowait_to_qp(wait);
0926         priv = qp->priv;
0927         list_del_init(&priv->s_iowait.list);
0928         priv->s_iowait.lock = NULL;
0929         hfi1_qp_wakeup(qp, RVT_S_WAIT_PIO | HFI1_S_WAIT_PIO_DRAIN);
0930     }
0931 
0932     spin_unlock_irq(&sc->alloc_lock);
0933 }
0934 
0935 /* return SendEgressCtxtStatus.PacketOccupancy */
0936 static u64 packet_occupancy(u64 reg)
0937 {
0938     return (reg &
0939         SEND_EGRESS_CTXT_STATUS_CTXT_EGRESS_PACKET_OCCUPANCY_SMASK)
0940         >> SEND_EGRESS_CTXT_STATUS_CTXT_EGRESS_PACKET_OCCUPANCY_SHIFT;
0941 }
0942 
0943 /* is egress halted on the context? */
0944 static bool egress_halted(u64 reg)
0945 {
0946     return !!(reg & SEND_EGRESS_CTXT_STATUS_CTXT_EGRESS_HALT_STATUS_SMASK);
0947 }
0948 
0949 /* is the send context halted? */
0950 static bool is_sc_halted(struct hfi1_devdata *dd, u32 hw_context)
0951 {
0952     return !!(read_kctxt_csr(dd, hw_context, SC(STATUS)) &
0953           SC(STATUS_CTXT_HALTED_SMASK));
0954 }
0955 
0956 /**
0957  * sc_wait_for_packet_egress - wait for packet
0958  * @sc: valid send context
0959  * @pause: wait for credit return
0960  *
0961  * Wait for packet egress, optionally pause for credit return
0962  *
0963  * Egress halt and Context halt are not necessarily the same thing, so
0964  * check for both.
0965  *
0966  * NOTE: The context halt bit may not be set immediately.  Because of this,
0967  * it is necessary to check the SW SFC_HALTED bit (set in the IRQ) and the HW
0968  * context bit to determine if the context is halted.
0969  */
0970 static void sc_wait_for_packet_egress(struct send_context *sc, int pause)
0971 {
0972     struct hfi1_devdata *dd = sc->dd;
0973     u64 reg = 0;
0974     u64 reg_prev;
0975     u32 loop = 0;
0976 
0977     while (1) {
0978         reg_prev = reg;
0979         reg = read_csr(dd, sc->hw_context * 8 +
0980                    SEND_EGRESS_CTXT_STATUS);
0981         /* done if any halt bits, SW or HW are set */
0982         if (sc->flags & SCF_HALTED ||
0983             is_sc_halted(dd, sc->hw_context) || egress_halted(reg))
0984             break;
0985         reg = packet_occupancy(reg);
0986         if (reg == 0)
0987             break;
0988         /* counter is reset if occupancy count changes */
0989         if (reg != reg_prev)
0990             loop = 0;
0991         if (loop > 50000) {
0992             /* timed out - bounce the link */
0993             dd_dev_err(dd,
0994                    "%s: context %u(%u) timeout waiting for packets to egress, remaining count %u, bouncing link\n",
0995                    __func__, sc->sw_index,
0996                    sc->hw_context, (u32)reg);
0997             queue_work(dd->pport->link_wq,
0998                    &dd->pport->link_bounce_work);
0999             break;
1000         }
1001         loop++;
1002         udelay(1);
1003     }
1004 
1005     if (pause)
1006         /* Add additional delay to ensure chip returns all credits */
1007         pause_for_credit_return(dd);
1008 }
1009 
1010 void sc_wait(struct hfi1_devdata *dd)
1011 {
1012     int i;
1013 
1014     for (i = 0; i < dd->num_send_contexts; i++) {
1015         struct send_context *sc = dd->send_contexts[i].sc;
1016 
1017         if (!sc)
1018             continue;
1019         sc_wait_for_packet_egress(sc, 0);
1020     }
1021 }
1022 
1023 /*
1024  * Restart a context after it has been halted due to error.
1025  *
1026  * If the first step fails - wait for the halt to be asserted, return early.
1027  * Otherwise complain about timeouts but keep going.
1028  *
1029  * It is expected that allocations (enabled flag bit) have been shut off
1030  * already (only applies to kernel contexts).
1031  */
1032 int sc_restart(struct send_context *sc)
1033 {
1034     struct hfi1_devdata *dd = sc->dd;
1035     u64 reg;
1036     u32 loop;
1037     int count;
1038 
1039     /* bounce off if not halted, or being free'd */
1040     if (!(sc->flags & SCF_HALTED) || (sc->flags & SCF_IN_FREE))
1041         return -EINVAL;
1042 
1043     dd_dev_info(dd, "restarting send context %u(%u)\n", sc->sw_index,
1044             sc->hw_context);
1045 
1046     /*
1047      * Step 1: Wait for the context to actually halt.
1048      *
1049      * The error interrupt is asynchronous to actually setting halt
1050      * on the context.
1051      */
1052     loop = 0;
1053     while (1) {
1054         reg = read_kctxt_csr(dd, sc->hw_context, SC(STATUS));
1055         if (reg & SC(STATUS_CTXT_HALTED_SMASK))
1056             break;
1057         if (loop > 100) {
1058             dd_dev_err(dd, "%s: context %u(%u) not halting, skipping\n",
1059                    __func__, sc->sw_index, sc->hw_context);
1060             return -ETIME;
1061         }
1062         loop++;
1063         udelay(1);
1064     }
1065 
1066     /*
1067      * Step 2: Ensure no users are still trying to write to PIO.
1068      *
1069      * For kernel contexts, we have already turned off buffer allocation.
1070      * Now wait for the buffer count to go to zero.
1071      *
1072      * For user contexts, the user handling code has cut off write access
1073      * to the context's PIO pages before calling this routine and will
1074      * restore write access after this routine returns.
1075      */
1076     if (sc->type != SC_USER) {
1077         /* kernel context */
1078         loop = 0;
1079         while (1) {
1080             count = get_buffers_allocated(sc);
1081             if (count == 0)
1082                 break;
1083             if (loop > 100) {
1084                 dd_dev_err(dd,
1085                        "%s: context %u(%u) timeout waiting for PIO buffers to zero, remaining %d\n",
1086                        __func__, sc->sw_index,
1087                        sc->hw_context, count);
1088             }
1089             loop++;
1090             udelay(1);
1091         }
1092     }
1093 
1094     /*
1095      * Step 3: Wait for all packets to egress.
1096      * This is done while disabling the send context
1097      *
1098      * Step 4: Disable the context
1099      *
1100      * This is a superset of the halt.  After the disable, the
1101      * errors can be cleared.
1102      */
1103     sc_disable(sc);
1104 
1105     /*
1106      * Step 5: Enable the context
1107      *
1108      * This enable will clear the halted flag and per-send context
1109      * error flags.
1110      */
1111     return sc_enable(sc);
1112 }
1113 
1114 /*
1115  * PIO freeze processing.  To be called after the TXE block is fully frozen.
1116  * Go through all frozen send contexts and disable them.  The contexts are
1117  * already stopped by the freeze.
1118  */
1119 void pio_freeze(struct hfi1_devdata *dd)
1120 {
1121     struct send_context *sc;
1122     int i;
1123 
1124     for (i = 0; i < dd->num_send_contexts; i++) {
1125         sc = dd->send_contexts[i].sc;
1126         /*
1127          * Don't disable unallocated, unfrozen, or user send contexts.
1128          * User send contexts will be disabled when the process
1129          * calls into the driver to reset its context.
1130          */
1131         if (!sc || !(sc->flags & SCF_FROZEN) || sc->type == SC_USER)
1132             continue;
1133 
1134         /* only need to disable, the context is already stopped */
1135         sc_disable(sc);
1136     }
1137 }
1138 
1139 /*
1140  * Unfreeze PIO for kernel send contexts.  The precondition for calling this
1141  * is that all PIO send contexts have been disabled and the SPC freeze has
1142  * been cleared.  Now perform the last step and re-enable each kernel context.
1143  * User (PSM) processing will occur when PSM calls into the kernel to
1144  * acknowledge the freeze.
1145  */
1146 void pio_kernel_unfreeze(struct hfi1_devdata *dd)
1147 {
1148     struct send_context *sc;
1149     int i;
1150 
1151     for (i = 0; i < dd->num_send_contexts; i++) {
1152         sc = dd->send_contexts[i].sc;
1153         if (!sc || !(sc->flags & SCF_FROZEN) || sc->type == SC_USER)
1154             continue;
1155         if (sc->flags & SCF_LINK_DOWN)
1156             continue;
1157 
1158         sc_enable(sc);  /* will clear the sc frozen flag */
1159     }
1160 }
1161 
1162 /**
1163  * pio_kernel_linkup() - Re-enable send contexts after linkup event
1164  * @dd: valid devive data
1165  *
1166  * When the link goes down, the freeze path is taken.  However, a link down
1167  * event is different from a freeze because if the send context is re-enabled
1168  * whowever is sending data will start sending data again, which will hang
1169  * any QP that is sending data.
1170  *
1171  * The freeze path now looks at the type of event that occurs and takes this
1172  * path for link down event.
1173  */
1174 void pio_kernel_linkup(struct hfi1_devdata *dd)
1175 {
1176     struct send_context *sc;
1177     int i;
1178 
1179     for (i = 0; i < dd->num_send_contexts; i++) {
1180         sc = dd->send_contexts[i].sc;
1181         if (!sc || !(sc->flags & SCF_LINK_DOWN) || sc->type == SC_USER)
1182             continue;
1183 
1184         sc_enable(sc);  /* will clear the sc link down flag */
1185     }
1186 }
1187 
1188 /*
1189  * Wait for the SendPioInitCtxt.PioInitInProgress bit to clear.
1190  * Returns:
1191  *  -ETIMEDOUT - if we wait too long
1192  *  -EIO       - if there was an error
1193  */
1194 static int pio_init_wait_progress(struct hfi1_devdata *dd)
1195 {
1196     u64 reg;
1197     int max, count = 0;
1198 
1199     /* max is the longest possible HW init time / delay */
1200     max = (dd->icode == ICODE_FPGA_EMULATION) ? 120 : 5;
1201     while (1) {
1202         reg = read_csr(dd, SEND_PIO_INIT_CTXT);
1203         if (!(reg & SEND_PIO_INIT_CTXT_PIO_INIT_IN_PROGRESS_SMASK))
1204             break;
1205         if (count >= max)
1206             return -ETIMEDOUT;
1207         udelay(5);
1208         count++;
1209     }
1210 
1211     return reg & SEND_PIO_INIT_CTXT_PIO_INIT_ERR_SMASK ? -EIO : 0;
1212 }
1213 
1214 /*
1215  * Reset all of the send contexts to their power-on state.  Used
1216  * only during manual init - no lock against sc_enable needed.
1217  */
1218 void pio_reset_all(struct hfi1_devdata *dd)
1219 {
1220     int ret;
1221 
1222     /* make sure the init engine is not busy */
1223     ret = pio_init_wait_progress(dd);
1224     /* ignore any timeout */
1225     if (ret == -EIO) {
1226         /* clear the error */
1227         write_csr(dd, SEND_PIO_ERR_CLEAR,
1228               SEND_PIO_ERR_CLEAR_PIO_INIT_SM_IN_ERR_SMASK);
1229     }
1230 
1231     /* reset init all */
1232     write_csr(dd, SEND_PIO_INIT_CTXT,
1233           SEND_PIO_INIT_CTXT_PIO_ALL_CTXT_INIT_SMASK);
1234     udelay(2);
1235     ret = pio_init_wait_progress(dd);
1236     if (ret < 0) {
1237         dd_dev_err(dd,
1238                "PIO send context init %s while initializing all PIO blocks\n",
1239                ret == -ETIMEDOUT ? "is stuck" : "had an error");
1240     }
1241 }
1242 
1243 /* enable the context */
1244 int sc_enable(struct send_context *sc)
1245 {
1246     u64 sc_ctrl, reg, pio;
1247     struct hfi1_devdata *dd;
1248     unsigned long flags;
1249     int ret = 0;
1250 
1251     if (!sc)
1252         return -EINVAL;
1253     dd = sc->dd;
1254 
1255     /*
1256      * Obtain the allocator lock to guard against any allocation
1257      * attempts (which should not happen prior to context being
1258      * enabled). On the release/disable side we don't need to
1259      * worry about locking since the releaser will not do anything
1260      * if the context accounting values have not changed.
1261      */
1262     spin_lock_irqsave(&sc->alloc_lock, flags);
1263     sc_ctrl = read_kctxt_csr(dd, sc->hw_context, SC(CTRL));
1264     if ((sc_ctrl & SC(CTRL_CTXT_ENABLE_SMASK)))
1265         goto unlock; /* already enabled */
1266 
1267     /* IMPORTANT: only clear free and fill if transitioning 0 -> 1 */
1268 
1269     *sc->hw_free = 0;
1270     sc->free = 0;
1271     sc->alloc_free = 0;
1272     sc->fill = 0;
1273     sc->fill_wrap = 0;
1274     sc->sr_head = 0;
1275     sc->sr_tail = 0;
1276     sc->flags = 0;
1277     /* the alloc lock insures no fast path allocation */
1278     reset_buffers_allocated(sc);
1279 
1280     /*
1281      * Clear all per-context errors.  Some of these will be set when
1282      * we are re-enabling after a context halt.  Now that the context
1283      * is disabled, the halt will not clear until after the PIO init
1284      * engine runs below.
1285      */
1286     reg = read_kctxt_csr(dd, sc->hw_context, SC(ERR_STATUS));
1287     if (reg)
1288         write_kctxt_csr(dd, sc->hw_context, SC(ERR_CLEAR), reg);
1289 
1290     /*
1291      * The HW PIO initialization engine can handle only one init
1292      * request at a time. Serialize access to each device's engine.
1293      */
1294     spin_lock(&dd->sc_init_lock);
1295     /*
1296      * Since access to this code block is serialized and
1297      * each access waits for the initialization to complete
1298      * before releasing the lock, the PIO initialization engine
1299      * should not be in use, so we don't have to wait for the
1300      * InProgress bit to go down.
1301      */
1302     pio = ((sc->hw_context & SEND_PIO_INIT_CTXT_PIO_CTXT_NUM_MASK) <<
1303            SEND_PIO_INIT_CTXT_PIO_CTXT_NUM_SHIFT) |
1304         SEND_PIO_INIT_CTXT_PIO_SINGLE_CTXT_INIT_SMASK;
1305     write_csr(dd, SEND_PIO_INIT_CTXT, pio);
1306     /*
1307      * Wait until the engine is done.  Give the chip the required time
1308      * so, hopefully, we read the register just once.
1309      */
1310     udelay(2);
1311     ret = pio_init_wait_progress(dd);
1312     spin_unlock(&dd->sc_init_lock);
1313     if (ret) {
1314         dd_dev_err(dd,
1315                "sctxt%u(%u): Context not enabled due to init failure %d\n",
1316                sc->sw_index, sc->hw_context, ret);
1317         goto unlock;
1318     }
1319 
1320     /*
1321      * All is well. Enable the context.
1322      */
1323     sc_ctrl |= SC(CTRL_CTXT_ENABLE_SMASK);
1324     write_kctxt_csr(dd, sc->hw_context, SC(CTRL), sc_ctrl);
1325     /*
1326      * Read SendCtxtCtrl to force the write out and prevent a timing
1327      * hazard where a PIO write may reach the context before the enable.
1328      */
1329     read_kctxt_csr(dd, sc->hw_context, SC(CTRL));
1330     sc->flags |= SCF_ENABLED;
1331 
1332 unlock:
1333     spin_unlock_irqrestore(&sc->alloc_lock, flags);
1334 
1335     return ret;
1336 }
1337 
1338 /* force a credit return on the context */
1339 void sc_return_credits(struct send_context *sc)
1340 {
1341     if (!sc)
1342         return;
1343 
1344     /* a 0->1 transition schedules a credit return */
1345     write_kctxt_csr(sc->dd, sc->hw_context, SC(CREDIT_FORCE),
1346             SC(CREDIT_FORCE_FORCE_RETURN_SMASK));
1347     /*
1348      * Ensure that the write is flushed and the credit return is
1349      * scheduled. We care more about the 0 -> 1 transition.
1350      */
1351     read_kctxt_csr(sc->dd, sc->hw_context, SC(CREDIT_FORCE));
1352     /* set back to 0 for next time */
1353     write_kctxt_csr(sc->dd, sc->hw_context, SC(CREDIT_FORCE), 0);
1354 }
1355 
1356 /* allow all in-flight packets to drain on the context */
1357 void sc_flush(struct send_context *sc)
1358 {
1359     if (!sc)
1360         return;
1361 
1362     sc_wait_for_packet_egress(sc, 1);
1363 }
1364 
1365 /* drop all packets on the context, no waiting until they are sent */
1366 void sc_drop(struct send_context *sc)
1367 {
1368     if (!sc)
1369         return;
1370 
1371     dd_dev_info(sc->dd, "%s: context %u(%u) - not implemented\n",
1372             __func__, sc->sw_index, sc->hw_context);
1373 }
1374 
1375 /*
1376  * Start the software reaction to a context halt or SPC freeze:
1377  *  - mark the context as halted or frozen
1378  *  - stop buffer allocations
1379  *
1380  * Called from the error interrupt.  Other work is deferred until
1381  * out of the interrupt.
1382  */
1383 void sc_stop(struct send_context *sc, int flag)
1384 {
1385     unsigned long flags;
1386 
1387     /* stop buffer allocations */
1388     spin_lock_irqsave(&sc->alloc_lock, flags);
1389     /* mark the context */
1390     sc->flags |= flag;
1391     sc->flags &= ~SCF_ENABLED;
1392     spin_unlock_irqrestore(&sc->alloc_lock, flags);
1393     wake_up(&sc->halt_wait);
1394 }
1395 
1396 #define BLOCK_DWORDS (PIO_BLOCK_SIZE / sizeof(u32))
1397 #define dwords_to_blocks(x) DIV_ROUND_UP(x, BLOCK_DWORDS)
1398 
1399 /*
1400  * The send context buffer "allocator".
1401  *
1402  * @sc: the PIO send context we are allocating from
1403  * @len: length of whole packet - including PBC - in dwords
1404  * @cb: optional callback to call when the buffer is finished sending
1405  * @arg: argument for cb
1406  *
1407  * Return a pointer to a PIO buffer, NULL if not enough room, -ECOMM
1408  * when link is down.
1409  */
1410 struct pio_buf *sc_buffer_alloc(struct send_context *sc, u32 dw_len,
1411                 pio_release_cb cb, void *arg)
1412 {
1413     struct pio_buf *pbuf = NULL;
1414     unsigned long flags;
1415     unsigned long avail;
1416     unsigned long blocks = dwords_to_blocks(dw_len);
1417     u32 fill_wrap;
1418     int trycount = 0;
1419     u32 head, next;
1420 
1421     spin_lock_irqsave(&sc->alloc_lock, flags);
1422     if (!(sc->flags & SCF_ENABLED)) {
1423         spin_unlock_irqrestore(&sc->alloc_lock, flags);
1424         return ERR_PTR(-ECOMM);
1425     }
1426 
1427 retry:
1428     avail = (unsigned long)sc->credits - (sc->fill - sc->alloc_free);
1429     if (blocks > avail) {
1430         /* not enough room */
1431         if (unlikely(trycount)) { /* already tried to get more room */
1432             spin_unlock_irqrestore(&sc->alloc_lock, flags);
1433             goto done;
1434         }
1435         /* copy from receiver cache line and recalculate */
1436         sc->alloc_free = READ_ONCE(sc->free);
1437         avail =
1438             (unsigned long)sc->credits -
1439             (sc->fill - sc->alloc_free);
1440         if (blocks > avail) {
1441             /* still no room, actively update */
1442             sc_release_update(sc);
1443             sc->alloc_free = READ_ONCE(sc->free);
1444             trycount++;
1445             goto retry;
1446         }
1447     }
1448 
1449     /* there is enough room */
1450 
1451     preempt_disable();
1452     this_cpu_inc(*sc->buffers_allocated);
1453 
1454     /* read this once */
1455     head = sc->sr_head;
1456 
1457     /* "allocate" the buffer */
1458     sc->fill += blocks;
1459     fill_wrap = sc->fill_wrap;
1460     sc->fill_wrap += blocks;
1461     if (sc->fill_wrap >= sc->credits)
1462         sc->fill_wrap = sc->fill_wrap - sc->credits;
1463 
1464     /*
1465      * Fill the parts that the releaser looks at before moving the head.
1466      * The only necessary piece is the sent_at field.  The credits
1467      * we have just allocated cannot have been returned yet, so the
1468      * cb and arg will not be looked at for a "while".  Put them
1469      * on this side of the memory barrier anyway.
1470      */
1471     pbuf = &sc->sr[head].pbuf;
1472     pbuf->sent_at = sc->fill;
1473     pbuf->cb = cb;
1474     pbuf->arg = arg;
1475     pbuf->sc = sc;  /* could be filled in at sc->sr init time */
1476     /* make sure this is in memory before updating the head */
1477 
1478     /* calculate next head index, do not store */
1479     next = head + 1;
1480     if (next >= sc->sr_size)
1481         next = 0;
1482     /*
1483      * update the head - must be last! - the releaser can look at fields
1484      * in pbuf once we move the head
1485      */
1486     smp_wmb();
1487     sc->sr_head = next;
1488     spin_unlock_irqrestore(&sc->alloc_lock, flags);
1489 
1490     /* finish filling in the buffer outside the lock */
1491     pbuf->start = sc->base_addr + fill_wrap * PIO_BLOCK_SIZE;
1492     pbuf->end = sc->base_addr + sc->size;
1493     pbuf->qw_written = 0;
1494     pbuf->carry_bytes = 0;
1495     pbuf->carry.val64 = 0;
1496 done:
1497     return pbuf;
1498 }
1499 
1500 /*
1501  * There are at least two entities that can turn on credit return
1502  * interrupts and they can overlap.  Avoid problems by implementing
1503  * a count scheme that is enforced by a lock.  The lock is needed because
1504  * the count and CSR write must be paired.
1505  */
1506 
1507 /*
1508  * Start credit return interrupts.  This is managed by a count.  If already
1509  * on, just increment the count.
1510  */
1511 void sc_add_credit_return_intr(struct send_context *sc)
1512 {
1513     unsigned long flags;
1514 
1515     /* lock must surround both the count change and the CSR update */
1516     spin_lock_irqsave(&sc->credit_ctrl_lock, flags);
1517     if (sc->credit_intr_count == 0) {
1518         sc->credit_ctrl |= SC(CREDIT_CTRL_CREDIT_INTR_SMASK);
1519         write_kctxt_csr(sc->dd, sc->hw_context,
1520                 SC(CREDIT_CTRL), sc->credit_ctrl);
1521     }
1522     sc->credit_intr_count++;
1523     spin_unlock_irqrestore(&sc->credit_ctrl_lock, flags);
1524 }
1525 
1526 /*
1527  * Stop credit return interrupts.  This is managed by a count.  Decrement the
1528  * count, if the last user, then turn the credit interrupts off.
1529  */
1530 void sc_del_credit_return_intr(struct send_context *sc)
1531 {
1532     unsigned long flags;
1533 
1534     WARN_ON(sc->credit_intr_count == 0);
1535 
1536     /* lock must surround both the count change and the CSR update */
1537     spin_lock_irqsave(&sc->credit_ctrl_lock, flags);
1538     sc->credit_intr_count--;
1539     if (sc->credit_intr_count == 0) {
1540         sc->credit_ctrl &= ~SC(CREDIT_CTRL_CREDIT_INTR_SMASK);
1541         write_kctxt_csr(sc->dd, sc->hw_context,
1542                 SC(CREDIT_CTRL), sc->credit_ctrl);
1543     }
1544     spin_unlock_irqrestore(&sc->credit_ctrl_lock, flags);
1545 }
1546 
1547 /*
1548  * The caller must be careful when calling this.  All needint calls
1549  * must be paired with !needint.
1550  */
1551 void hfi1_sc_wantpiobuf_intr(struct send_context *sc, u32 needint)
1552 {
1553     if (needint)
1554         sc_add_credit_return_intr(sc);
1555     else
1556         sc_del_credit_return_intr(sc);
1557     trace_hfi1_wantpiointr(sc, needint, sc->credit_ctrl);
1558     if (needint)
1559         sc_return_credits(sc);
1560 }
1561 
1562 /**
1563  * sc_piobufavail - callback when a PIO buffer is available
1564  * @sc: the send context
1565  *
1566  * This is called from the interrupt handler when a PIO buffer is
1567  * available after hfi1_verbs_send() returned an error that no buffers were
1568  * available. Disable the interrupt if there are no more QPs waiting.
1569  */
1570 static void sc_piobufavail(struct send_context *sc)
1571 {
1572     struct hfi1_devdata *dd = sc->dd;
1573     struct list_head *list;
1574     struct rvt_qp *qps[PIO_WAIT_BATCH_SIZE];
1575     struct rvt_qp *qp;
1576     struct hfi1_qp_priv *priv;
1577     unsigned long flags;
1578     uint i, n = 0, top_idx = 0;
1579 
1580     if (dd->send_contexts[sc->sw_index].type != SC_KERNEL &&
1581         dd->send_contexts[sc->sw_index].type != SC_VL15)
1582         return;
1583     list = &sc->piowait;
1584     /*
1585      * Note: checking that the piowait list is empty and clearing
1586      * the buffer available interrupt needs to be atomic or we
1587      * could end up with QPs on the wait list with the interrupt
1588      * disabled.
1589      */
1590     write_seqlock_irqsave(&sc->waitlock, flags);
1591     while (!list_empty(list)) {
1592         struct iowait *wait;
1593 
1594         if (n == ARRAY_SIZE(qps))
1595             break;
1596         wait = list_first_entry(list, struct iowait, list);
1597         iowait_get_priority(wait);
1598         qp = iowait_to_qp(wait);
1599         priv = qp->priv;
1600         list_del_init(&priv->s_iowait.list);
1601         priv->s_iowait.lock = NULL;
1602         if (n) {
1603             priv = qps[top_idx]->priv;
1604             top_idx = iowait_priority_update_top(wait,
1605                                  &priv->s_iowait,
1606                                  n, top_idx);
1607         }
1608 
1609         /* refcount held until actual wake up */
1610         qps[n++] = qp;
1611     }
1612     /*
1613      * If there had been waiters and there are more
1614      * insure that we redo the force to avoid a potential hang.
1615      */
1616     if (n) {
1617         hfi1_sc_wantpiobuf_intr(sc, 0);
1618         if (!list_empty(list))
1619             hfi1_sc_wantpiobuf_intr(sc, 1);
1620     }
1621     write_sequnlock_irqrestore(&sc->waitlock, flags);
1622 
1623     /* Wake up the top-priority one first */
1624     if (n)
1625         hfi1_qp_wakeup(qps[top_idx],
1626                    RVT_S_WAIT_PIO | HFI1_S_WAIT_PIO_DRAIN);
1627     for (i = 0; i < n; i++)
1628         if (i != top_idx)
1629             hfi1_qp_wakeup(qps[i],
1630                        RVT_S_WAIT_PIO | HFI1_S_WAIT_PIO_DRAIN);
1631 }
1632 
1633 /* translate a send credit update to a bit code of reasons */
1634 static inline int fill_code(u64 hw_free)
1635 {
1636     int code = 0;
1637 
1638     if (hw_free & CR_STATUS_SMASK)
1639         code |= PRC_STATUS_ERR;
1640     if (hw_free & CR_CREDIT_RETURN_DUE_TO_PBC_SMASK)
1641         code |= PRC_PBC;
1642     if (hw_free & CR_CREDIT_RETURN_DUE_TO_THRESHOLD_SMASK)
1643         code |= PRC_THRESHOLD;
1644     if (hw_free & CR_CREDIT_RETURN_DUE_TO_ERR_SMASK)
1645         code |= PRC_FILL_ERR;
1646     if (hw_free & CR_CREDIT_RETURN_DUE_TO_FORCE_SMASK)
1647         code |= PRC_SC_DISABLE;
1648     return code;
1649 }
1650 
1651 /* use the jiffies compare to get the wrap right */
1652 #define sent_before(a, b) time_before(a, b) /* a < b */
1653 
1654 /*
1655  * The send context buffer "releaser".
1656  */
1657 void sc_release_update(struct send_context *sc)
1658 {
1659     struct pio_buf *pbuf;
1660     u64 hw_free;
1661     u32 head, tail;
1662     unsigned long old_free;
1663     unsigned long free;
1664     unsigned long extra;
1665     unsigned long flags;
1666     int code;
1667 
1668     if (!sc)
1669         return;
1670 
1671     spin_lock_irqsave(&sc->release_lock, flags);
1672     /* update free */
1673     hw_free = le64_to_cpu(*sc->hw_free);        /* volatile read */
1674     old_free = sc->free;
1675     extra = (((hw_free & CR_COUNTER_SMASK) >> CR_COUNTER_SHIFT)
1676             - (old_free & CR_COUNTER_MASK))
1677                 & CR_COUNTER_MASK;
1678     free = old_free + extra;
1679     trace_hfi1_piofree(sc, extra);
1680 
1681     /* call sent buffer callbacks */
1682     code = -1;              /* code not yet set */
1683     head = READ_ONCE(sc->sr_head);  /* snapshot the head */
1684     tail = sc->sr_tail;
1685     while (head != tail) {
1686         pbuf = &sc->sr[tail].pbuf;
1687 
1688         if (sent_before(free, pbuf->sent_at)) {
1689             /* not sent yet */
1690             break;
1691         }
1692         if (pbuf->cb) {
1693             if (code < 0) /* fill in code on first user */
1694                 code = fill_code(hw_free);
1695             (*pbuf->cb)(pbuf->arg, code);
1696         }
1697 
1698         tail++;
1699         if (tail >= sc->sr_size)
1700             tail = 0;
1701     }
1702     sc->sr_tail = tail;
1703     /* make sure tail is updated before free */
1704     smp_wmb();
1705     sc->free = free;
1706     spin_unlock_irqrestore(&sc->release_lock, flags);
1707     sc_piobufavail(sc);
1708 }
1709 
1710 /*
1711  * Send context group releaser.  Argument is the send context that caused
1712  * the interrupt.  Called from the send context interrupt handler.
1713  *
1714  * Call release on all contexts in the group.
1715  *
1716  * This routine takes the sc_lock without an irqsave because it is only
1717  * called from an interrupt handler.  Adjust if that changes.
1718  */
1719 void sc_group_release_update(struct hfi1_devdata *dd, u32 hw_context)
1720 {
1721     struct send_context *sc;
1722     u32 sw_index;
1723     u32 gc, gc_end;
1724 
1725     spin_lock(&dd->sc_lock);
1726     sw_index = dd->hw_to_sw[hw_context];
1727     if (unlikely(sw_index >= dd->num_send_contexts)) {
1728         dd_dev_err(dd, "%s: invalid hw (%u) to sw (%u) mapping\n",
1729                __func__, hw_context, sw_index);
1730         goto done;
1731     }
1732     sc = dd->send_contexts[sw_index].sc;
1733     if (unlikely(!sc))
1734         goto done;
1735 
1736     gc = group_context(hw_context, sc->group);
1737     gc_end = gc + group_size(sc->group);
1738     for (; gc < gc_end; gc++) {
1739         sw_index = dd->hw_to_sw[gc];
1740         if (unlikely(sw_index >= dd->num_send_contexts)) {
1741             dd_dev_err(dd,
1742                    "%s: invalid hw (%u) to sw (%u) mapping\n",
1743                    __func__, hw_context, sw_index);
1744             continue;
1745         }
1746         sc_release_update(dd->send_contexts[sw_index].sc);
1747     }
1748 done:
1749     spin_unlock(&dd->sc_lock);
1750 }
1751 
1752 /*
1753  * pio_select_send_context_vl() - select send context
1754  * @dd: devdata
1755  * @selector: a spreading factor
1756  * @vl: this vl
1757  *
1758  * This function returns a send context based on the selector and a vl.
1759  * The mapping fields are protected by RCU
1760  */
1761 struct send_context *pio_select_send_context_vl(struct hfi1_devdata *dd,
1762                         u32 selector, u8 vl)
1763 {
1764     struct pio_vl_map *m;
1765     struct pio_map_elem *e;
1766     struct send_context *rval;
1767 
1768     /*
1769      * NOTE This should only happen if SC->VL changed after the initial
1770      * checks on the QP/AH
1771      * Default will return VL0's send context below
1772      */
1773     if (unlikely(vl >= num_vls)) {
1774         rval = NULL;
1775         goto done;
1776     }
1777 
1778     rcu_read_lock();
1779     m = rcu_dereference(dd->pio_map);
1780     if (unlikely(!m)) {
1781         rcu_read_unlock();
1782         return dd->vld[0].sc;
1783     }
1784     e = m->map[vl & m->mask];
1785     rval = e->ksc[selector & e->mask];
1786     rcu_read_unlock();
1787 
1788 done:
1789     rval = !rval ? dd->vld[0].sc : rval;
1790     return rval;
1791 }
1792 
1793 /*
1794  * pio_select_send_context_sc() - select send context
1795  * @dd: devdata
1796  * @selector: a spreading factor
1797  * @sc5: the 5 bit sc
1798  *
1799  * This function returns an send context based on the selector and an sc
1800  */
1801 struct send_context *pio_select_send_context_sc(struct hfi1_devdata *dd,
1802                         u32 selector, u8 sc5)
1803 {
1804     u8 vl = sc_to_vlt(dd, sc5);
1805 
1806     return pio_select_send_context_vl(dd, selector, vl);
1807 }
1808 
1809 /*
1810  * Free the indicated map struct
1811  */
1812 static void pio_map_free(struct pio_vl_map *m)
1813 {
1814     int i;
1815 
1816     for (i = 0; m && i < m->actual_vls; i++)
1817         kfree(m->map[i]);
1818     kfree(m);
1819 }
1820 
1821 /*
1822  * Handle RCU callback
1823  */
1824 static void pio_map_rcu_callback(struct rcu_head *list)
1825 {
1826     struct pio_vl_map *m = container_of(list, struct pio_vl_map, list);
1827 
1828     pio_map_free(m);
1829 }
1830 
1831 /*
1832  * Set credit return threshold for the kernel send context
1833  */
1834 static void set_threshold(struct hfi1_devdata *dd, int scontext, int i)
1835 {
1836     u32 thres;
1837 
1838     thres = min(sc_percent_to_threshold(dd->kernel_send_context[scontext],
1839                         50),
1840             sc_mtu_to_threshold(dd->kernel_send_context[scontext],
1841                     dd->vld[i].mtu,
1842                     dd->rcd[0]->rcvhdrqentsize));
1843     sc_set_cr_threshold(dd->kernel_send_context[scontext], thres);
1844 }
1845 
1846 /*
1847  * pio_map_init - called when #vls change
1848  * @dd: hfi1_devdata
1849  * @port: port number
1850  * @num_vls: number of vls
1851  * @vl_scontexts: per vl send context mapping (optional)
1852  *
1853  * This routine changes the mapping based on the number of vls.
1854  *
1855  * vl_scontexts is used to specify a non-uniform vl/send context
1856  * loading. NULL implies auto computing the loading and giving each
1857  * VL an uniform distribution of send contexts per VL.
1858  *
1859  * The auto algorithm computers the sc_per_vl and the number of extra
1860  * send contexts. Any extra send contexts are added from the last VL
1861  * on down
1862  *
1863  * rcu locking is used here to control access to the mapping fields.
1864  *
1865  * If either the num_vls or num_send_contexts are non-power of 2, the
1866  * array sizes in the struct pio_vl_map and the struct pio_map_elem are
1867  * rounded up to the next highest power of 2 and the first entry is
1868  * reused in a round robin fashion.
1869  *
1870  * If an error occurs the map change is not done and the mapping is not
1871  * chaged.
1872  *
1873  */
1874 int pio_map_init(struct hfi1_devdata *dd, u8 port, u8 num_vls, u8 *vl_scontexts)
1875 {
1876     int i, j;
1877     int extra, sc_per_vl;
1878     int scontext = 1;
1879     int num_kernel_send_contexts = 0;
1880     u8 lvl_scontexts[OPA_MAX_VLS];
1881     struct pio_vl_map *oldmap, *newmap;
1882 
1883     if (!vl_scontexts) {
1884         for (i = 0; i < dd->num_send_contexts; i++)
1885             if (dd->send_contexts[i].type == SC_KERNEL)
1886                 num_kernel_send_contexts++;
1887         /* truncate divide */
1888         sc_per_vl = num_kernel_send_contexts / num_vls;
1889         /* extras */
1890         extra = num_kernel_send_contexts % num_vls;
1891         vl_scontexts = lvl_scontexts;
1892         /* add extras from last vl down */
1893         for (i = num_vls - 1; i >= 0; i--, extra--)
1894             vl_scontexts[i] = sc_per_vl + (extra > 0 ? 1 : 0);
1895     }
1896     /* build new map */
1897     newmap = kzalloc(sizeof(*newmap) +
1898              roundup_pow_of_two(num_vls) *
1899              sizeof(struct pio_map_elem *),
1900              GFP_KERNEL);
1901     if (!newmap)
1902         goto bail;
1903     newmap->actual_vls = num_vls;
1904     newmap->vls = roundup_pow_of_two(num_vls);
1905     newmap->mask = (1 << ilog2(newmap->vls)) - 1;
1906     for (i = 0; i < newmap->vls; i++) {
1907         /* save for wrap around */
1908         int first_scontext = scontext;
1909 
1910         if (i < newmap->actual_vls) {
1911             int sz = roundup_pow_of_two(vl_scontexts[i]);
1912 
1913             /* only allocate once */
1914             newmap->map[i] = kzalloc(sizeof(*newmap->map[i]) +
1915                          sz * sizeof(struct
1916                                  send_context *),
1917                          GFP_KERNEL);
1918             if (!newmap->map[i])
1919                 goto bail;
1920             newmap->map[i]->mask = (1 << ilog2(sz)) - 1;
1921             /*
1922              * assign send contexts and
1923              * adjust credit return threshold
1924              */
1925             for (j = 0; j < sz; j++) {
1926                 if (dd->kernel_send_context[scontext]) {
1927                     newmap->map[i]->ksc[j] =
1928                     dd->kernel_send_context[scontext];
1929                     set_threshold(dd, scontext, i);
1930                 }
1931                 if (++scontext >= first_scontext +
1932                           vl_scontexts[i])
1933                     /* wrap back to first send context */
1934                     scontext = first_scontext;
1935             }
1936         } else {
1937             /* just re-use entry without allocating */
1938             newmap->map[i] = newmap->map[i % num_vls];
1939         }
1940         scontext = first_scontext + vl_scontexts[i];
1941     }
1942     /* newmap in hand, save old map */
1943     spin_lock_irq(&dd->pio_map_lock);
1944     oldmap = rcu_dereference_protected(dd->pio_map,
1945                        lockdep_is_held(&dd->pio_map_lock));
1946 
1947     /* publish newmap */
1948     rcu_assign_pointer(dd->pio_map, newmap);
1949 
1950     spin_unlock_irq(&dd->pio_map_lock);
1951     /* success, free any old map after grace period */
1952     if (oldmap)
1953         call_rcu(&oldmap->list, pio_map_rcu_callback);
1954     return 0;
1955 bail:
1956     /* free any partial allocation */
1957     pio_map_free(newmap);
1958     return -ENOMEM;
1959 }
1960 
1961 void free_pio_map(struct hfi1_devdata *dd)
1962 {
1963     /* Free PIO map if allocated */
1964     if (rcu_access_pointer(dd->pio_map)) {
1965         spin_lock_irq(&dd->pio_map_lock);
1966         pio_map_free(rcu_access_pointer(dd->pio_map));
1967         RCU_INIT_POINTER(dd->pio_map, NULL);
1968         spin_unlock_irq(&dd->pio_map_lock);
1969         synchronize_rcu();
1970     }
1971     kfree(dd->kernel_send_context);
1972     dd->kernel_send_context = NULL;
1973 }
1974 
1975 int init_pervl_scs(struct hfi1_devdata *dd)
1976 {
1977     int i;
1978     u64 mask, all_vl_mask = (u64)0x80ff; /* VLs 0-7, 15 */
1979     u64 data_vls_mask = (u64)0x00ff; /* VLs 0-7 */
1980     u32 ctxt;
1981     struct hfi1_pportdata *ppd = dd->pport;
1982 
1983     dd->vld[15].sc = sc_alloc(dd, SC_VL15,
1984                   dd->rcd[0]->rcvhdrqentsize, dd->node);
1985     if (!dd->vld[15].sc)
1986         return -ENOMEM;
1987 
1988     hfi1_init_ctxt(dd->vld[15].sc);
1989     dd->vld[15].mtu = enum_to_mtu(OPA_MTU_2048);
1990 
1991     dd->kernel_send_context = kcalloc_node(dd->num_send_contexts,
1992                            sizeof(struct send_context *),
1993                            GFP_KERNEL, dd->node);
1994     if (!dd->kernel_send_context)
1995         goto freesc15;
1996 
1997     dd->kernel_send_context[0] = dd->vld[15].sc;
1998 
1999     for (i = 0; i < num_vls; i++) {
2000         /*
2001          * Since this function does not deal with a specific
2002          * receive context but we need the RcvHdrQ entry size,
2003          * use the size from rcd[0]. It is guaranteed to be
2004          * valid at this point and will remain the same for all
2005          * receive contexts.
2006          */
2007         dd->vld[i].sc = sc_alloc(dd, SC_KERNEL,
2008                      dd->rcd[0]->rcvhdrqentsize, dd->node);
2009         if (!dd->vld[i].sc)
2010             goto nomem;
2011         dd->kernel_send_context[i + 1] = dd->vld[i].sc;
2012         hfi1_init_ctxt(dd->vld[i].sc);
2013         /* non VL15 start with the max MTU */
2014         dd->vld[i].mtu = hfi1_max_mtu;
2015     }
2016     for (i = num_vls; i < INIT_SC_PER_VL * num_vls; i++) {
2017         dd->kernel_send_context[i + 1] =
2018         sc_alloc(dd, SC_KERNEL, dd->rcd[0]->rcvhdrqentsize, dd->node);
2019         if (!dd->kernel_send_context[i + 1])
2020             goto nomem;
2021         hfi1_init_ctxt(dd->kernel_send_context[i + 1]);
2022     }
2023 
2024     sc_enable(dd->vld[15].sc);
2025     ctxt = dd->vld[15].sc->hw_context;
2026     mask = all_vl_mask & ~(1LL << 15);
2027     write_kctxt_csr(dd, ctxt, SC(CHECK_VL), mask);
2028     dd_dev_info(dd,
2029             "Using send context %u(%u) for VL15\n",
2030             dd->vld[15].sc->sw_index, ctxt);
2031 
2032     for (i = 0; i < num_vls; i++) {
2033         sc_enable(dd->vld[i].sc);
2034         ctxt = dd->vld[i].sc->hw_context;
2035         mask = all_vl_mask & ~(data_vls_mask);
2036         write_kctxt_csr(dd, ctxt, SC(CHECK_VL), mask);
2037     }
2038     for (i = num_vls; i < INIT_SC_PER_VL * num_vls; i++) {
2039         sc_enable(dd->kernel_send_context[i + 1]);
2040         ctxt = dd->kernel_send_context[i + 1]->hw_context;
2041         mask = all_vl_mask & ~(data_vls_mask);
2042         write_kctxt_csr(dd, ctxt, SC(CHECK_VL), mask);
2043     }
2044 
2045     if (pio_map_init(dd, ppd->port - 1, num_vls, NULL))
2046         goto nomem;
2047     return 0;
2048 
2049 nomem:
2050     for (i = 0; i < num_vls; i++) {
2051         sc_free(dd->vld[i].sc);
2052         dd->vld[i].sc = NULL;
2053     }
2054 
2055     for (i = num_vls; i < INIT_SC_PER_VL * num_vls; i++)
2056         sc_free(dd->kernel_send_context[i + 1]);
2057 
2058     kfree(dd->kernel_send_context);
2059     dd->kernel_send_context = NULL;
2060 
2061 freesc15:
2062     sc_free(dd->vld[15].sc);
2063     return -ENOMEM;
2064 }
2065 
2066 int init_credit_return(struct hfi1_devdata *dd)
2067 {
2068     int ret;
2069     int i;
2070 
2071     dd->cr_base = kcalloc(
2072         node_affinity.num_possible_nodes,
2073         sizeof(struct credit_return_base),
2074         GFP_KERNEL);
2075     if (!dd->cr_base) {
2076         ret = -ENOMEM;
2077         goto done;
2078     }
2079     for_each_node_with_cpus(i) {
2080         int bytes = TXE_NUM_CONTEXTS * sizeof(struct credit_return);
2081 
2082         set_dev_node(&dd->pcidev->dev, i);
2083         dd->cr_base[i].va = dma_alloc_coherent(&dd->pcidev->dev,
2084                                bytes,
2085                                &dd->cr_base[i].dma,
2086                                GFP_KERNEL);
2087         if (!dd->cr_base[i].va) {
2088             set_dev_node(&dd->pcidev->dev, dd->node);
2089             dd_dev_err(dd,
2090                    "Unable to allocate credit return DMA range for NUMA %d\n",
2091                    i);
2092             ret = -ENOMEM;
2093             goto done;
2094         }
2095     }
2096     set_dev_node(&dd->pcidev->dev, dd->node);
2097 
2098     ret = 0;
2099 done:
2100     return ret;
2101 }
2102 
2103 void free_credit_return(struct hfi1_devdata *dd)
2104 {
2105     int i;
2106 
2107     if (!dd->cr_base)
2108         return;
2109     for (i = 0; i < node_affinity.num_possible_nodes; i++) {
2110         if (dd->cr_base[i].va) {
2111             dma_free_coherent(&dd->pcidev->dev,
2112                       TXE_NUM_CONTEXTS *
2113                       sizeof(struct credit_return),
2114                       dd->cr_base[i].va,
2115                       dd->cr_base[i].dma);
2116         }
2117     }
2118     kfree(dd->cr_base);
2119     dd->cr_base = NULL;
2120 }
2121 
2122 void seqfile_dump_sci(struct seq_file *s, u32 i,
2123               struct send_context_info *sci)
2124 {
2125     struct send_context *sc = sci->sc;
2126     u64 reg;
2127 
2128     seq_printf(s, "SCI %u: type %u base %u credits %u\n",
2129            i, sci->type, sci->base, sci->credits);
2130     seq_printf(s, "  flags 0x%x sw_inx %u hw_ctxt %u grp %u\n",
2131            sc->flags,  sc->sw_index, sc->hw_context, sc->group);
2132     seq_printf(s, "  sr_size %u credits %u sr_head %u sr_tail %u\n",
2133            sc->sr_size, sc->credits, sc->sr_head, sc->sr_tail);
2134     seq_printf(s, "  fill %lu free %lu fill_wrap %u alloc_free %lu\n",
2135            sc->fill, sc->free, sc->fill_wrap, sc->alloc_free);
2136     seq_printf(s, "  credit_intr_count %u credit_ctrl 0x%llx\n",
2137            sc->credit_intr_count, sc->credit_ctrl);
2138     reg = read_kctxt_csr(sc->dd, sc->hw_context, SC(CREDIT_STATUS));
2139     seq_printf(s, "  *hw_free %llu CurrentFree %llu LastReturned %llu\n",
2140            (le64_to_cpu(*sc->hw_free) & CR_COUNTER_SMASK) >>
2141             CR_COUNTER_SHIFT,
2142            (reg >> SC(CREDIT_STATUS_CURRENT_FREE_COUNTER_SHIFT)) &
2143             SC(CREDIT_STATUS_CURRENT_FREE_COUNTER_MASK),
2144            reg & SC(CREDIT_STATUS_LAST_RETURNED_COUNTER_SMASK));
2145 }