Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 
0003 /* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
0004  * Copyright (C) 2019-2021 Linaro Ltd.
0005  */
0006 
0007 #include <linux/types.h>
0008 #include <linux/device.h>
0009 #include <linux/slab.h>
0010 #include <linux/bitfield.h>
0011 #include <linux/if_rmnet.h>
0012 #include <linux/dma-direction.h>
0013 
0014 #include "gsi.h"
0015 #include "gsi_trans.h"
0016 #include "ipa.h"
0017 #include "ipa_data.h"
0018 #include "ipa_endpoint.h"
0019 #include "ipa_cmd.h"
0020 #include "ipa_mem.h"
0021 #include "ipa_modem.h"
0022 #include "ipa_table.h"
0023 #include "ipa_gsi.h"
0024 #include "ipa_power.h"
0025 
0026 #define atomic_dec_not_zero(v)  atomic_add_unless((v), -1, 0)
0027 
0028 /* Hardware is told about receive buffers once a "batch" has been queued */
0029 #define IPA_REPLENISH_BATCH 16      /* Must be non-zero */
0030 
0031 /* The amount of RX buffer space consumed by standard skb overhead */
0032 #define IPA_RX_BUFFER_OVERHEAD  (PAGE_SIZE - SKB_MAX_ORDER(NET_SKB_PAD, 0))
0033 
0034 /* Where to find the QMAP mux_id for a packet within modem-supplied metadata */
0035 #define IPA_ENDPOINT_QMAP_METADATA_MASK     0x000000ff /* host byte order */
0036 
0037 #define IPA_ENDPOINT_RESET_AGGR_RETRY_MAX   3
0038 
0039 /** enum ipa_status_opcode - status element opcode hardware values */
0040 enum ipa_status_opcode {
0041     IPA_STATUS_OPCODE_PACKET        = 0x01,
0042     IPA_STATUS_OPCODE_DROPPED_PACKET    = 0x04,
0043     IPA_STATUS_OPCODE_SUSPENDED_PACKET  = 0x08,
0044     IPA_STATUS_OPCODE_PACKET_2ND_PASS   = 0x40,
0045 };
0046 
0047 /** enum ipa_status_exception - status element exception type */
0048 enum ipa_status_exception {
0049     /* 0 means no exception */
0050     IPA_STATUS_EXCEPTION_DEAGGR     = 0x01,
0051 };
0052 
0053 /* Status element provided by hardware */
0054 struct ipa_status {
0055     u8 opcode;      /* enum ipa_status_opcode */
0056     u8 exception;       /* enum ipa_status_exception */
0057     __le16 mask;
0058     __le16 pkt_len;
0059     u8 endp_src_idx;
0060     u8 endp_dst_idx;
0061     __le32 metadata;
0062     __le32 flags1;
0063     __le64 flags2;
0064     __le32 flags3;
0065     __le32 flags4;
0066 };
0067 
0068 /* Field masks for struct ipa_status structure fields */
0069 #define IPA_STATUS_MASK_TAG_VALID_FMASK     GENMASK(4, 4)
0070 #define IPA_STATUS_SRC_IDX_FMASK        GENMASK(4, 0)
0071 #define IPA_STATUS_DST_IDX_FMASK        GENMASK(4, 0)
0072 #define IPA_STATUS_FLAGS1_RT_RULE_ID_FMASK  GENMASK(31, 22)
0073 #define IPA_STATUS_FLAGS2_TAG_FMASK     GENMASK_ULL(63, 16)
0074 
0075 static u32 aggr_byte_limit_max(enum ipa_version version)
0076 {
0077     if (version < IPA_VERSION_4_5)
0078         return field_max(aggr_byte_limit_fmask(true));
0079 
0080     return field_max(aggr_byte_limit_fmask(false));
0081 }
0082 
0083 /* Compute the aggregation size value to use for a given buffer size */
0084 static u32 ipa_aggr_size_kb(u32 rx_buffer_size, bool aggr_hard_limit)
0085 {
0086     /* A hard aggregation limit will not be crossed; aggregation closes
0087      * if saving incoming data would cross the hard byte limit boundary.
0088      *
0089      * With a soft limit, aggregation closes *after* the size boundary
0090      * has been crossed.  In that case the limit must leave enough space
0091      * after that limit to receive a full MTU of data plus overhead.
0092      */
0093     if (!aggr_hard_limit)
0094         rx_buffer_size -= IPA_MTU + IPA_RX_BUFFER_OVERHEAD;
0095 
0096     /* The byte limit is encoded as a number of kilobytes */
0097 
0098     return rx_buffer_size / SZ_1K;
0099 }
0100 
0101 static bool ipa_endpoint_data_valid_one(struct ipa *ipa, u32 count,
0102                 const struct ipa_gsi_endpoint_data *all_data,
0103                 const struct ipa_gsi_endpoint_data *data)
0104 {
0105     const struct ipa_gsi_endpoint_data *other_data;
0106     struct device *dev = &ipa->pdev->dev;
0107     enum ipa_endpoint_name other_name;
0108 
0109     if (ipa_gsi_endpoint_data_empty(data))
0110         return true;
0111 
0112     if (!data->toward_ipa) {
0113         const struct ipa_endpoint_rx *rx_config;
0114         u32 buffer_size;
0115         u32 aggr_size;
0116         u32 limit;
0117 
0118         if (data->endpoint.filter_support) {
0119             dev_err(dev, "filtering not supported for "
0120                     "RX endpoint %u\n",
0121                 data->endpoint_id);
0122             return false;
0123         }
0124 
0125         /* Nothing more to check for non-AP RX */
0126         if (data->ee_id != GSI_EE_AP)
0127             return true;
0128 
0129         rx_config = &data->endpoint.config.rx;
0130 
0131         /* The buffer size must hold an MTU plus overhead */
0132         buffer_size = rx_config->buffer_size;
0133         limit = IPA_MTU + IPA_RX_BUFFER_OVERHEAD;
0134         if (buffer_size < limit) {
0135             dev_err(dev, "RX buffer size too small for RX endpoint %u (%u < %u)\n",
0136                 data->endpoint_id, buffer_size, limit);
0137             return false;
0138         }
0139 
0140         if (!data->endpoint.config.aggregation) {
0141             bool result = true;
0142 
0143             /* No aggregation; check for bogus aggregation data */
0144             if (rx_config->aggr_time_limit) {
0145                 dev_err(dev,
0146                     "time limit with no aggregation for RX endpoint %u\n",
0147                     data->endpoint_id);
0148                 result = false;
0149             }
0150 
0151             if (rx_config->aggr_hard_limit) {
0152                 dev_err(dev, "hard limit with no aggregation for RX endpoint %u\n",
0153                     data->endpoint_id);
0154                 result = false;
0155             }
0156 
0157             if (rx_config->aggr_close_eof) {
0158                 dev_err(dev, "close EOF with no aggregation for RX endpoint %u\n",
0159                     data->endpoint_id);
0160                 result = false;
0161             }
0162 
0163             return result;  /* Nothing more to check */
0164         }
0165 
0166         /* For an endpoint supporting receive aggregation, the byte
0167          * limit defines the point at which aggregation closes.  This
0168          * check ensures the receive buffer size doesn't result in a
0169          * limit that exceeds what's representable in the aggregation
0170          * byte limit field.
0171          */
0172         aggr_size = ipa_aggr_size_kb(buffer_size - NET_SKB_PAD,
0173                          rx_config->aggr_hard_limit);
0174         limit = aggr_byte_limit_max(ipa->version);
0175         if (aggr_size > limit) {
0176             dev_err(dev, "aggregated size too large for RX endpoint %u (%u KB > %u KB)\n",
0177                 data->endpoint_id, aggr_size, limit);
0178 
0179             return false;
0180         }
0181 
0182         return true;    /* Nothing more to check for RX */
0183     }
0184 
0185     if (data->endpoint.config.status_enable) {
0186         other_name = data->endpoint.config.tx.status_endpoint;
0187         if (other_name >= count) {
0188             dev_err(dev, "status endpoint name %u out of range "
0189                     "for endpoint %u\n",
0190                 other_name, data->endpoint_id);
0191             return false;
0192         }
0193 
0194         /* Status endpoint must be defined... */
0195         other_data = &all_data[other_name];
0196         if (ipa_gsi_endpoint_data_empty(other_data)) {
0197             dev_err(dev, "DMA endpoint name %u undefined "
0198                     "for endpoint %u\n",
0199                 other_name, data->endpoint_id);
0200             return false;
0201         }
0202 
0203         /* ...and has to be an RX endpoint... */
0204         if (other_data->toward_ipa) {
0205             dev_err(dev,
0206                 "status endpoint for endpoint %u not RX\n",
0207                 data->endpoint_id);
0208             return false;
0209         }
0210 
0211         /* ...and if it's to be an AP endpoint... */
0212         if (other_data->ee_id == GSI_EE_AP) {
0213             /* ...make sure it has status enabled. */
0214             if (!other_data->endpoint.config.status_enable) {
0215                 dev_err(dev,
0216                     "status not enabled for endpoint %u\n",
0217                     other_data->endpoint_id);
0218                 return false;
0219             }
0220         }
0221     }
0222 
0223     if (data->endpoint.config.dma_mode) {
0224         other_name = data->endpoint.config.dma_endpoint;
0225         if (other_name >= count) {
0226             dev_err(dev, "DMA endpoint name %u out of range "
0227                     "for endpoint %u\n",
0228                 other_name, data->endpoint_id);
0229             return false;
0230         }
0231 
0232         other_data = &all_data[other_name];
0233         if (ipa_gsi_endpoint_data_empty(other_data)) {
0234             dev_err(dev, "DMA endpoint name %u undefined "
0235                     "for endpoint %u\n",
0236                 other_name, data->endpoint_id);
0237             return false;
0238         }
0239     }
0240 
0241     return true;
0242 }
0243 
0244 static bool ipa_endpoint_data_valid(struct ipa *ipa, u32 count,
0245                     const struct ipa_gsi_endpoint_data *data)
0246 {
0247     const struct ipa_gsi_endpoint_data *dp = data;
0248     struct device *dev = &ipa->pdev->dev;
0249     enum ipa_endpoint_name name;
0250 
0251     if (count > IPA_ENDPOINT_COUNT) {
0252         dev_err(dev, "too many endpoints specified (%u > %u)\n",
0253             count, IPA_ENDPOINT_COUNT);
0254         return false;
0255     }
0256 
0257     /* Make sure needed endpoints have defined data */
0258     if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_COMMAND_TX])) {
0259         dev_err(dev, "command TX endpoint not defined\n");
0260         return false;
0261     }
0262     if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_LAN_RX])) {
0263         dev_err(dev, "LAN RX endpoint not defined\n");
0264         return false;
0265     }
0266     if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_MODEM_TX])) {
0267         dev_err(dev, "AP->modem TX endpoint not defined\n");
0268         return false;
0269     }
0270     if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_MODEM_RX])) {
0271         dev_err(dev, "AP<-modem RX endpoint not defined\n");
0272         return false;
0273     }
0274 
0275     for (name = 0; name < count; name++, dp++)
0276         if (!ipa_endpoint_data_valid_one(ipa, count, data, dp))
0277             return false;
0278 
0279     return true;
0280 }
0281 
0282 /* Allocate a transaction to use on a non-command endpoint */
0283 static struct gsi_trans *ipa_endpoint_trans_alloc(struct ipa_endpoint *endpoint,
0284                           u32 tre_count)
0285 {
0286     struct gsi *gsi = &endpoint->ipa->gsi;
0287     u32 channel_id = endpoint->channel_id;
0288     enum dma_data_direction direction;
0289 
0290     direction = endpoint->toward_ipa ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
0291 
0292     return gsi_channel_trans_alloc(gsi, channel_id, tre_count, direction);
0293 }
0294 
0295 /* suspend_delay represents suspend for RX, delay for TX endpoints.
0296  * Note that suspend is not supported starting with IPA v4.0, and
0297  * delay mode should not be used starting with IPA v4.2.
0298  */
0299 static bool
0300 ipa_endpoint_init_ctrl(struct ipa_endpoint *endpoint, bool suspend_delay)
0301 {
0302     u32 offset = IPA_REG_ENDP_INIT_CTRL_N_OFFSET(endpoint->endpoint_id);
0303     struct ipa *ipa = endpoint->ipa;
0304     bool state;
0305     u32 mask;
0306     u32 val;
0307 
0308     if (endpoint->toward_ipa)
0309         WARN_ON(ipa->version >= IPA_VERSION_4_2);
0310     else
0311         WARN_ON(ipa->version >= IPA_VERSION_4_0);
0312 
0313     mask = endpoint->toward_ipa ? ENDP_DELAY_FMASK : ENDP_SUSPEND_FMASK;
0314 
0315     val = ioread32(ipa->reg_virt + offset);
0316     state = !!(val & mask);
0317 
0318     /* Don't bother if it's already in the requested state */
0319     if (suspend_delay != state) {
0320         val ^= mask;
0321         iowrite32(val, ipa->reg_virt + offset);
0322     }
0323 
0324     return state;
0325 }
0326 
0327 /* We don't care what the previous state was for delay mode */
0328 static void
0329 ipa_endpoint_program_delay(struct ipa_endpoint *endpoint, bool enable)
0330 {
0331     /* Delay mode should not be used for IPA v4.2+ */
0332     WARN_ON(endpoint->ipa->version >= IPA_VERSION_4_2);
0333     WARN_ON(!endpoint->toward_ipa);
0334 
0335     (void)ipa_endpoint_init_ctrl(endpoint, enable);
0336 }
0337 
0338 static bool ipa_endpoint_aggr_active(struct ipa_endpoint *endpoint)
0339 {
0340     u32 mask = BIT(endpoint->endpoint_id);
0341     struct ipa *ipa = endpoint->ipa;
0342     u32 offset;
0343     u32 val;
0344 
0345     WARN_ON(!(mask & ipa->available));
0346 
0347     offset = ipa_reg_state_aggr_active_offset(ipa->version);
0348     val = ioread32(ipa->reg_virt + offset);
0349 
0350     return !!(val & mask);
0351 }
0352 
0353 static void ipa_endpoint_force_close(struct ipa_endpoint *endpoint)
0354 {
0355     u32 mask = BIT(endpoint->endpoint_id);
0356     struct ipa *ipa = endpoint->ipa;
0357 
0358     WARN_ON(!(mask & ipa->available));
0359 
0360     iowrite32(mask, ipa->reg_virt + IPA_REG_AGGR_FORCE_CLOSE_OFFSET);
0361 }
0362 
0363 /**
0364  * ipa_endpoint_suspend_aggr() - Emulate suspend interrupt
0365  * @endpoint:   Endpoint on which to emulate a suspend
0366  *
0367  *  Emulate suspend IPA interrupt to unsuspend an endpoint suspended
0368  *  with an open aggregation frame.  This is to work around a hardware
0369  *  issue in IPA version 3.5.1 where the suspend interrupt will not be
0370  *  generated when it should be.
0371  */
0372 static void ipa_endpoint_suspend_aggr(struct ipa_endpoint *endpoint)
0373 {
0374     struct ipa *ipa = endpoint->ipa;
0375 
0376     if (!endpoint->config.aggregation)
0377         return;
0378 
0379     /* Nothing to do if the endpoint doesn't have aggregation open */
0380     if (!ipa_endpoint_aggr_active(endpoint))
0381         return;
0382 
0383     /* Force close aggregation */
0384     ipa_endpoint_force_close(endpoint);
0385 
0386     ipa_interrupt_simulate_suspend(ipa->interrupt);
0387 }
0388 
0389 /* Returns previous suspend state (true means suspend was enabled) */
0390 static bool
0391 ipa_endpoint_program_suspend(struct ipa_endpoint *endpoint, bool enable)
0392 {
0393     bool suspended;
0394 
0395     if (endpoint->ipa->version >= IPA_VERSION_4_0)
0396         return enable;  /* For IPA v4.0+, no change made */
0397 
0398     WARN_ON(endpoint->toward_ipa);
0399 
0400     suspended = ipa_endpoint_init_ctrl(endpoint, enable);
0401 
0402     /* A client suspended with an open aggregation frame will not
0403      * generate a SUSPEND IPA interrupt.  If enabling suspend, have
0404      * ipa_endpoint_suspend_aggr() handle this.
0405      */
0406     if (enable && !suspended)
0407         ipa_endpoint_suspend_aggr(endpoint);
0408 
0409     return suspended;
0410 }
0411 
0412 /* Put all modem RX endpoints into suspend mode, and stop transmission
0413  * on all modem TX endpoints.  Prior to IPA v4.2, endpoint DELAY mode is
0414  * used for TX endpoints; starting with IPA v4.2 we use GSI channel flow
0415  * control instead.
0416  */
0417 void ipa_endpoint_modem_pause_all(struct ipa *ipa, bool enable)
0418 {
0419     u32 endpoint_id;
0420 
0421     for (endpoint_id = 0; endpoint_id < IPA_ENDPOINT_MAX; endpoint_id++) {
0422         struct ipa_endpoint *endpoint = &ipa->endpoint[endpoint_id];
0423 
0424         if (endpoint->ee_id != GSI_EE_MODEM)
0425             continue;
0426 
0427         if (!endpoint->toward_ipa)
0428             (void)ipa_endpoint_program_suspend(endpoint, enable);
0429         else if (ipa->version < IPA_VERSION_4_2)
0430             ipa_endpoint_program_delay(endpoint, enable);
0431         else
0432             gsi_modem_channel_flow_control(&ipa->gsi,
0433                                endpoint->channel_id,
0434                                enable);
0435     }
0436 }
0437 
0438 /* Reset all modem endpoints to use the default exception endpoint */
0439 int ipa_endpoint_modem_exception_reset_all(struct ipa *ipa)
0440 {
0441     u32 initialized = ipa->initialized;
0442     struct gsi_trans *trans;
0443     u32 count;
0444 
0445     /* We need one command per modem TX endpoint, plus the commands
0446      * that clear the pipeline.
0447      */
0448     count = ipa->modem_tx_count + ipa_cmd_pipeline_clear_count();
0449     trans = ipa_cmd_trans_alloc(ipa, count);
0450     if (!trans) {
0451         dev_err(&ipa->pdev->dev,
0452             "no transaction to reset modem exception endpoints\n");
0453         return -EBUSY;
0454     }
0455 
0456     while (initialized) {
0457         u32 endpoint_id = __ffs(initialized);
0458         struct ipa_endpoint *endpoint;
0459         u32 offset;
0460 
0461         initialized ^= BIT(endpoint_id);
0462 
0463         /* We only reset modem TX endpoints */
0464         endpoint = &ipa->endpoint[endpoint_id];
0465         if (!(endpoint->ee_id == GSI_EE_MODEM && endpoint->toward_ipa))
0466             continue;
0467 
0468         offset = IPA_REG_ENDP_STATUS_N_OFFSET(endpoint_id);
0469 
0470         /* Value written is 0, and all bits are updated.  That
0471          * means status is disabled on the endpoint, and as a
0472          * result all other fields in the register are ignored.
0473          */
0474         ipa_cmd_register_write_add(trans, offset, 0, ~0, false);
0475     }
0476 
0477     ipa_cmd_pipeline_clear_add(trans);
0478 
0479     gsi_trans_commit_wait(trans);
0480 
0481     ipa_cmd_pipeline_clear_wait(ipa);
0482 
0483     return 0;
0484 }
0485 
0486 static void ipa_endpoint_init_cfg(struct ipa_endpoint *endpoint)
0487 {
0488     u32 offset = IPA_REG_ENDP_INIT_CFG_N_OFFSET(endpoint->endpoint_id);
0489     enum ipa_cs_offload_en enabled;
0490     u32 val = 0;
0491 
0492     /* FRAG_OFFLOAD_EN is 0 */
0493     if (endpoint->config.checksum) {
0494         enum ipa_version version = endpoint->ipa->version;
0495 
0496         if (endpoint->toward_ipa) {
0497             u32 checksum_offset;
0498 
0499             /* Checksum header offset is in 4-byte units */
0500             checksum_offset = sizeof(struct rmnet_map_header);
0501             checksum_offset /= sizeof(u32);
0502             val |= u32_encode_bits(checksum_offset,
0503                            CS_METADATA_HDR_OFFSET_FMASK);
0504 
0505             enabled = version < IPA_VERSION_4_5
0506                     ? IPA_CS_OFFLOAD_UL
0507                     : IPA_CS_OFFLOAD_INLINE;
0508         } else {
0509             enabled = version < IPA_VERSION_4_5
0510                     ? IPA_CS_OFFLOAD_DL
0511                     : IPA_CS_OFFLOAD_INLINE;
0512         }
0513     } else {
0514         enabled = IPA_CS_OFFLOAD_NONE;
0515     }
0516     val |= u32_encode_bits(enabled, CS_OFFLOAD_EN_FMASK);
0517     /* CS_GEN_QMB_MASTER_SEL is 0 */
0518 
0519     iowrite32(val, endpoint->ipa->reg_virt + offset);
0520 }
0521 
0522 static void ipa_endpoint_init_nat(struct ipa_endpoint *endpoint)
0523 {
0524     u32 offset;
0525     u32 val;
0526 
0527     if (!endpoint->toward_ipa)
0528         return;
0529 
0530     offset = IPA_REG_ENDP_INIT_NAT_N_OFFSET(endpoint->endpoint_id);
0531     val = u32_encode_bits(IPA_NAT_BYPASS, NAT_EN_FMASK);
0532 
0533     iowrite32(val, endpoint->ipa->reg_virt + offset);
0534 }
0535 
0536 static u32
0537 ipa_qmap_header_size(enum ipa_version version, struct ipa_endpoint *endpoint)
0538 {
0539     u32 header_size = sizeof(struct rmnet_map_header);
0540 
0541     /* Without checksum offload, we just have the MAP header */
0542     if (!endpoint->config.checksum)
0543         return header_size;
0544 
0545     if (version < IPA_VERSION_4_5) {
0546         /* Checksum header inserted for AP TX endpoints only */
0547         if (endpoint->toward_ipa)
0548             header_size += sizeof(struct rmnet_map_ul_csum_header);
0549     } else {
0550         /* Checksum header is used in both directions */
0551         header_size += sizeof(struct rmnet_map_v5_csum_header);
0552     }
0553 
0554     return header_size;
0555 }
0556 
0557 /**
0558  * ipa_endpoint_init_hdr() - Initialize HDR endpoint configuration register
0559  * @endpoint:   Endpoint pointer
0560  *
0561  * We program QMAP endpoints so each packet received is preceded by a QMAP
0562  * header structure.  The QMAP header contains a 1-byte mux_id and 2-byte
0563  * packet size field, and we have the IPA hardware populate both for each
0564  * received packet.  The header is configured (in the HDR_EXT register)
0565  * to use big endian format.
0566  *
0567  * The packet size is written into the QMAP header's pkt_len field.  That
0568  * location is defined here using the HDR_OFST_PKT_SIZE field.
0569  *
0570  * The mux_id comes from a 4-byte metadata value supplied with each packet
0571  * by the modem.  It is *not* a QMAP header, but it does contain the mux_id
0572  * value that we want, in its low-order byte.  A bitmask defined in the
0573  * endpoint's METADATA_MASK register defines which byte within the modem
0574  * metadata contains the mux_id.  And the OFST_METADATA field programmed
0575  * here indicates where the extracted byte should be placed within the QMAP
0576  * header.
0577  */
0578 static void ipa_endpoint_init_hdr(struct ipa_endpoint *endpoint)
0579 {
0580     u32 offset = IPA_REG_ENDP_INIT_HDR_N_OFFSET(endpoint->endpoint_id);
0581     struct ipa *ipa = endpoint->ipa;
0582     u32 val = 0;
0583 
0584     if (endpoint->config.qmap) {
0585         enum ipa_version version = ipa->version;
0586         size_t header_size;
0587 
0588         header_size = ipa_qmap_header_size(version, endpoint);
0589         val = ipa_header_size_encoded(version, header_size);
0590 
0591         /* Define how to fill fields in a received QMAP header */
0592         if (!endpoint->toward_ipa) {
0593             u32 offset; /* Field offset within header */
0594 
0595             /* Where IPA will write the metadata value */
0596             offset = offsetof(struct rmnet_map_header, mux_id);
0597             val |= ipa_metadata_offset_encoded(version, offset);
0598 
0599             /* Where IPA will write the length */
0600             offset = offsetof(struct rmnet_map_header, pkt_len);
0601             /* Upper bits are stored in HDR_EXT with IPA v4.5 */
0602             if (version >= IPA_VERSION_4_5)
0603                 offset &= field_mask(HDR_OFST_PKT_SIZE_FMASK);
0604 
0605             val |= HDR_OFST_PKT_SIZE_VALID_FMASK;
0606             val |= u32_encode_bits(offset, HDR_OFST_PKT_SIZE_FMASK);
0607         }
0608         /* For QMAP TX, metadata offset is 0 (modem assumes this) */
0609         val |= HDR_OFST_METADATA_VALID_FMASK;
0610 
0611         /* HDR_ADDITIONAL_CONST_LEN is 0; (RX only) */
0612         /* HDR_A5_MUX is 0 */
0613         /* HDR_LEN_INC_DEAGG_HDR is 0 */
0614         /* HDR_METADATA_REG_VALID is 0 (TX only, version < v4.5) */
0615     }
0616 
0617     iowrite32(val, ipa->reg_virt + offset);
0618 }
0619 
0620 static void ipa_endpoint_init_hdr_ext(struct ipa_endpoint *endpoint)
0621 {
0622     u32 offset = IPA_REG_ENDP_INIT_HDR_EXT_N_OFFSET(endpoint->endpoint_id);
0623     u32 pad_align = endpoint->config.rx.pad_align;
0624     struct ipa *ipa = endpoint->ipa;
0625     u32 val = 0;
0626 
0627     if (endpoint->config.qmap) {
0628         /* We have a header, so we must specify its endianness */
0629         val |= HDR_ENDIANNESS_FMASK;    /* big endian */
0630 
0631         /* A QMAP header contains a 6 bit pad field at offset 0.
0632          * The RMNet driver assumes this field is meaningful in
0633          * packets it receives, and assumes the header's payload
0634          * length includes that padding.  The RMNet driver does
0635          * *not* pad packets it sends, however, so the pad field
0636          * (although 0) should be ignored.
0637          */
0638         if (!endpoint->toward_ipa) {
0639             val |= HDR_TOTAL_LEN_OR_PAD_VALID_FMASK;
0640             /* HDR_TOTAL_LEN_OR_PAD is 0 (pad, not total_len) */
0641             val |= HDR_PAYLOAD_LEN_INC_PADDING_FMASK;
0642             /* HDR_TOTAL_LEN_OR_PAD_OFFSET is 0 */
0643         }
0644     }
0645 
0646     /* HDR_PAYLOAD_LEN_INC_PADDING is 0 */
0647     if (!endpoint->toward_ipa)
0648         val |= u32_encode_bits(pad_align, HDR_PAD_TO_ALIGNMENT_FMASK);
0649 
0650     /* IPA v4.5 adds some most-significant bits to a few fields,
0651      * two of which are defined in the HDR (not HDR_EXT) register.
0652      */
0653     if (ipa->version >= IPA_VERSION_4_5) {
0654         /* HDR_TOTAL_LEN_OR_PAD_OFFSET is 0, so MSB is 0 */
0655         if (endpoint->config.qmap && !endpoint->toward_ipa) {
0656             u32 offset;
0657 
0658             offset = offsetof(struct rmnet_map_header, pkt_len);
0659             offset >>= hweight32(HDR_OFST_PKT_SIZE_FMASK);
0660             val |= u32_encode_bits(offset,
0661                            HDR_OFST_PKT_SIZE_MSB_FMASK);
0662             /* HDR_ADDITIONAL_CONST_LEN is 0 so MSB is 0 */
0663         }
0664     }
0665     iowrite32(val, ipa->reg_virt + offset);
0666 }
0667 
0668 static void ipa_endpoint_init_hdr_metadata_mask(struct ipa_endpoint *endpoint)
0669 {
0670     u32 endpoint_id = endpoint->endpoint_id;
0671     u32 val = 0;
0672     u32 offset;
0673 
0674     if (endpoint->toward_ipa)
0675         return;     /* Register not valid for TX endpoints */
0676 
0677     offset = IPA_REG_ENDP_INIT_HDR_METADATA_MASK_N_OFFSET(endpoint_id);
0678 
0679     /* Note that HDR_ENDIANNESS indicates big endian header fields */
0680     if (endpoint->config.qmap)
0681         val = (__force u32)cpu_to_be32(IPA_ENDPOINT_QMAP_METADATA_MASK);
0682 
0683     iowrite32(val, endpoint->ipa->reg_virt + offset);
0684 }
0685 
0686 static void ipa_endpoint_init_mode(struct ipa_endpoint *endpoint)
0687 {
0688     u32 offset = IPA_REG_ENDP_INIT_MODE_N_OFFSET(endpoint->endpoint_id);
0689     u32 val;
0690 
0691     if (!endpoint->toward_ipa)
0692         return;     /* Register not valid for RX endpoints */
0693 
0694     if (endpoint->config.dma_mode) {
0695         enum ipa_endpoint_name name = endpoint->config.dma_endpoint;
0696         u32 dma_endpoint_id;
0697 
0698         dma_endpoint_id = endpoint->ipa->name_map[name]->endpoint_id;
0699 
0700         val = u32_encode_bits(IPA_DMA, MODE_FMASK);
0701         val |= u32_encode_bits(dma_endpoint_id, DEST_PIPE_INDEX_FMASK);
0702     } else {
0703         val = u32_encode_bits(IPA_BASIC, MODE_FMASK);
0704     }
0705     /* All other bits unspecified (and 0) */
0706 
0707     iowrite32(val, endpoint->ipa->reg_virt + offset);
0708 }
0709 
0710 /* Encoded values for AGGR endpoint register fields */
0711 static u32 aggr_byte_limit_encoded(enum ipa_version version, u32 limit)
0712 {
0713     if (version < IPA_VERSION_4_5)
0714         return u32_encode_bits(limit, aggr_byte_limit_fmask(true));
0715 
0716     return u32_encode_bits(limit, aggr_byte_limit_fmask(false));
0717 }
0718 
0719 /* Encode the aggregation timer limit (microseconds) based on IPA version */
0720 static u32 aggr_time_limit_encoded(enum ipa_version version, u32 limit)
0721 {
0722     u32 gran_sel;
0723     u32 fmask;
0724     u32 val;
0725 
0726     if (version < IPA_VERSION_4_5) {
0727         /* We set aggregation granularity in ipa_hardware_config() */
0728         fmask = aggr_time_limit_fmask(true);
0729         val = DIV_ROUND_CLOSEST(limit, IPA_AGGR_GRANULARITY);
0730         WARN(val > field_max(fmask),
0731              "aggr_time_limit too large (%u > %u usec)\n",
0732              val, field_max(fmask) * IPA_AGGR_GRANULARITY);
0733 
0734         return u32_encode_bits(val, fmask);
0735     }
0736 
0737     /* IPA v4.5 expresses the time limit using Qtime.  The AP has
0738      * pulse generators 0 and 1 available, which were configured
0739      * in ipa_qtime_config() to have granularity 100 usec and
0740      * 1 msec, respectively.  Use pulse generator 0 if possible,
0741      * otherwise fall back to pulse generator 1.
0742      */
0743     fmask = aggr_time_limit_fmask(false);
0744     val = DIV_ROUND_CLOSEST(limit, 100);
0745     if (val > field_max(fmask)) {
0746         /* Have to use pulse generator 1 (millisecond granularity) */
0747         gran_sel = AGGR_GRAN_SEL_FMASK;
0748         val = DIV_ROUND_CLOSEST(limit, 1000);
0749         WARN(val > field_max(fmask),
0750              "aggr_time_limit too large (%u > %u usec)\n",
0751              limit, field_max(fmask) * 1000);
0752     } else {
0753         /* We can use pulse generator 0 (100 usec granularity) */
0754         gran_sel = 0;
0755     }
0756 
0757     return gran_sel | u32_encode_bits(val, fmask);
0758 }
0759 
0760 static u32 aggr_sw_eof_active_encoded(enum ipa_version version, bool enabled)
0761 {
0762     u32 val = enabled ? 1 : 0;
0763 
0764     if (version < IPA_VERSION_4_5)
0765         return u32_encode_bits(val, aggr_sw_eof_active_fmask(true));
0766 
0767     return u32_encode_bits(val, aggr_sw_eof_active_fmask(false));
0768 }
0769 
0770 static void ipa_endpoint_init_aggr(struct ipa_endpoint *endpoint)
0771 {
0772     u32 offset = IPA_REG_ENDP_INIT_AGGR_N_OFFSET(endpoint->endpoint_id);
0773     enum ipa_version version = endpoint->ipa->version;
0774     u32 val = 0;
0775 
0776     if (endpoint->config.aggregation) {
0777         if (!endpoint->toward_ipa) {
0778             const struct ipa_endpoint_rx *rx_config;
0779             u32 buffer_size;
0780             bool close_eof;
0781             u32 limit;
0782 
0783             rx_config = &endpoint->config.rx;
0784             val |= u32_encode_bits(IPA_ENABLE_AGGR, AGGR_EN_FMASK);
0785             val |= u32_encode_bits(IPA_GENERIC, AGGR_TYPE_FMASK);
0786 
0787             buffer_size = rx_config->buffer_size;
0788             limit = ipa_aggr_size_kb(buffer_size - NET_SKB_PAD,
0789                          rx_config->aggr_hard_limit);
0790             val |= aggr_byte_limit_encoded(version, limit);
0791 
0792             limit = rx_config->aggr_time_limit;
0793             val |= aggr_time_limit_encoded(version, limit);
0794 
0795             /* AGGR_PKT_LIMIT is 0 (unlimited) */
0796 
0797             close_eof = rx_config->aggr_close_eof;
0798             val |= aggr_sw_eof_active_encoded(version, close_eof);
0799         } else {
0800             val |= u32_encode_bits(IPA_ENABLE_DEAGGR,
0801                            AGGR_EN_FMASK);
0802             val |= u32_encode_bits(IPA_QCMAP, AGGR_TYPE_FMASK);
0803             /* other fields ignored */
0804         }
0805         /* AGGR_FORCE_CLOSE is 0 */
0806         /* AGGR_GRAN_SEL is 0 for IPA v4.5 */
0807     } else {
0808         val |= u32_encode_bits(IPA_BYPASS_AGGR, AGGR_EN_FMASK);
0809         /* other fields ignored */
0810     }
0811 
0812     iowrite32(val, endpoint->ipa->reg_virt + offset);
0813 }
0814 
0815 /* Return the Qtime-based head-of-line blocking timer value that
0816  * represents the given number of microseconds.  The result
0817  * includes both the timer value and the selected timer granularity.
0818  */
0819 static u32 hol_block_timer_qtime_val(struct ipa *ipa, u32 microseconds)
0820 {
0821     u32 gran_sel;
0822     u32 val;
0823 
0824     /* IPA v4.5 expresses time limits using Qtime.  The AP has
0825      * pulse generators 0 and 1 available, which were configured
0826      * in ipa_qtime_config() to have granularity 100 usec and
0827      * 1 msec, respectively.  Use pulse generator 0 if possible,
0828      * otherwise fall back to pulse generator 1.
0829      */
0830     val = DIV_ROUND_CLOSEST(microseconds, 100);
0831     if (val > field_max(TIME_LIMIT_FMASK)) {
0832         /* Have to use pulse generator 1 (millisecond granularity) */
0833         gran_sel = GRAN_SEL_FMASK;
0834         val = DIV_ROUND_CLOSEST(microseconds, 1000);
0835     } else {
0836         /* We can use pulse generator 0 (100 usec granularity) */
0837         gran_sel = 0;
0838     }
0839 
0840     return gran_sel | u32_encode_bits(val, TIME_LIMIT_FMASK);
0841 }
0842 
0843 /* The head-of-line blocking timer is defined as a tick count.  For
0844  * IPA version 4.5 the tick count is based on the Qtimer, which is
0845  * derived from the 19.2 MHz SoC XO clock.  For older IPA versions
0846  * each tick represents 128 cycles of the IPA core clock.
0847  *
0848  * Return the encoded value that should be written to that register
0849  * that represents the timeout period provided.  For IPA v4.2 this
0850  * encodes a base and scale value, while for earlier versions the
0851  * value is a simple tick count.
0852  */
0853 static u32 hol_block_timer_val(struct ipa *ipa, u32 microseconds)
0854 {
0855     u32 width;
0856     u32 scale;
0857     u64 ticks;
0858     u64 rate;
0859     u32 high;
0860     u32 val;
0861 
0862     if (!microseconds)
0863         return 0;   /* Nothing to compute if timer period is 0 */
0864 
0865     if (ipa->version >= IPA_VERSION_4_5)
0866         return hol_block_timer_qtime_val(ipa, microseconds);
0867 
0868     /* Use 64 bit arithmetic to avoid overflow... */
0869     rate = ipa_core_clock_rate(ipa);
0870     ticks = DIV_ROUND_CLOSEST(microseconds * rate, 128 * USEC_PER_SEC);
0871     /* ...but we still need to fit into a 32-bit register */
0872     WARN_ON(ticks > U32_MAX);
0873 
0874     /* IPA v3.5.1 through v4.1 just record the tick count */
0875     if (ipa->version < IPA_VERSION_4_2)
0876         return (u32)ticks;
0877 
0878     /* For IPA v4.2, the tick count is represented by base and
0879      * scale fields within the 32-bit timer register, where:
0880      *     ticks = base << scale;
0881      * The best precision is achieved when the base value is as
0882      * large as possible.  Find the highest set bit in the tick
0883      * count, and extract the number of bits in the base field
0884      * such that high bit is included.
0885      */
0886     high = fls(ticks);      /* 1..32 */
0887     width = HWEIGHT32(BASE_VALUE_FMASK);
0888     scale = high > width ? high - width : 0;
0889     if (scale) {
0890         /* If we're scaling, round up to get a closer result */
0891         ticks += 1 << (scale - 1);
0892         /* High bit was set, so rounding might have affected it */
0893         if (fls(ticks) != high)
0894             scale++;
0895     }
0896 
0897     val = u32_encode_bits(scale, SCALE_FMASK);
0898     val |= u32_encode_bits(ticks >> scale, BASE_VALUE_FMASK);
0899 
0900     return val;
0901 }
0902 
0903 /* If microseconds is 0, timeout is immediate */
0904 static void ipa_endpoint_init_hol_block_timer(struct ipa_endpoint *endpoint,
0905                           u32 microseconds)
0906 {
0907     u32 endpoint_id = endpoint->endpoint_id;
0908     struct ipa *ipa = endpoint->ipa;
0909     u32 offset;
0910     u32 val;
0911 
0912     /* This should only be changed when HOL_BLOCK_EN is disabled */
0913     offset = IPA_REG_ENDP_INIT_HOL_BLOCK_TIMER_N_OFFSET(endpoint_id);
0914     val = hol_block_timer_val(ipa, microseconds);
0915     iowrite32(val, ipa->reg_virt + offset);
0916 }
0917 
0918 static void
0919 ipa_endpoint_init_hol_block_en(struct ipa_endpoint *endpoint, bool enable)
0920 {
0921     u32 endpoint_id = endpoint->endpoint_id;
0922     u32 offset;
0923     u32 val;
0924 
0925     val = enable ? HOL_BLOCK_EN_FMASK : 0;
0926     offset = IPA_REG_ENDP_INIT_HOL_BLOCK_EN_N_OFFSET(endpoint_id);
0927     iowrite32(val, endpoint->ipa->reg_virt + offset);
0928     /* When enabling, the register must be written twice for IPA v4.5+ */
0929     if (enable && endpoint->ipa->version >= IPA_VERSION_4_5)
0930         iowrite32(val, endpoint->ipa->reg_virt + offset);
0931 }
0932 
0933 /* Assumes HOL_BLOCK is in disabled state */
0934 static void ipa_endpoint_init_hol_block_enable(struct ipa_endpoint *endpoint,
0935                            u32 microseconds)
0936 {
0937     ipa_endpoint_init_hol_block_timer(endpoint, microseconds);
0938     ipa_endpoint_init_hol_block_en(endpoint, true);
0939 }
0940 
0941 static void ipa_endpoint_init_hol_block_disable(struct ipa_endpoint *endpoint)
0942 {
0943     ipa_endpoint_init_hol_block_en(endpoint, false);
0944 }
0945 
0946 void ipa_endpoint_modem_hol_block_clear_all(struct ipa *ipa)
0947 {
0948     u32 i;
0949 
0950     for (i = 0; i < IPA_ENDPOINT_MAX; i++) {
0951         struct ipa_endpoint *endpoint = &ipa->endpoint[i];
0952 
0953         if (endpoint->toward_ipa || endpoint->ee_id != GSI_EE_MODEM)
0954             continue;
0955 
0956         ipa_endpoint_init_hol_block_disable(endpoint);
0957         ipa_endpoint_init_hol_block_enable(endpoint, 0);
0958     }
0959 }
0960 
0961 static void ipa_endpoint_init_deaggr(struct ipa_endpoint *endpoint)
0962 {
0963     u32 offset = IPA_REG_ENDP_INIT_DEAGGR_N_OFFSET(endpoint->endpoint_id);
0964     u32 val = 0;
0965 
0966     if (!endpoint->toward_ipa)
0967         return;     /* Register not valid for RX endpoints */
0968 
0969     /* DEAGGR_HDR_LEN is 0 */
0970     /* PACKET_OFFSET_VALID is 0 */
0971     /* PACKET_OFFSET_LOCATION is ignored (not valid) */
0972     /* MAX_PACKET_LEN is 0 (not enforced) */
0973 
0974     iowrite32(val, endpoint->ipa->reg_virt + offset);
0975 }
0976 
0977 static void ipa_endpoint_init_rsrc_grp(struct ipa_endpoint *endpoint)
0978 {
0979     u32 offset = IPA_REG_ENDP_INIT_RSRC_GRP_N_OFFSET(endpoint->endpoint_id);
0980     struct ipa *ipa = endpoint->ipa;
0981     u32 val;
0982 
0983     val = rsrc_grp_encoded(ipa->version, endpoint->config.resource_group);
0984     iowrite32(val, ipa->reg_virt + offset);
0985 }
0986 
0987 static void ipa_endpoint_init_seq(struct ipa_endpoint *endpoint)
0988 {
0989     u32 offset = IPA_REG_ENDP_INIT_SEQ_N_OFFSET(endpoint->endpoint_id);
0990     u32 val = 0;
0991 
0992     if (!endpoint->toward_ipa)
0993         return;     /* Register not valid for RX endpoints */
0994 
0995     /* Low-order byte configures primary packet processing */
0996     val |= u32_encode_bits(endpoint->config.tx.seq_type, SEQ_TYPE_FMASK);
0997 
0998     /* Second byte configures replicated packet processing */
0999     val |= u32_encode_bits(endpoint->config.tx.seq_rep_type,
1000                    SEQ_REP_TYPE_FMASK);
1001 
1002     iowrite32(val, endpoint->ipa->reg_virt + offset);
1003 }
1004 
1005 /**
1006  * ipa_endpoint_skb_tx() - Transmit a socket buffer
1007  * @endpoint:   Endpoint pointer
1008  * @skb:    Socket buffer to send
1009  *
1010  * Returns: 0 if successful, or a negative error code
1011  */
1012 int ipa_endpoint_skb_tx(struct ipa_endpoint *endpoint, struct sk_buff *skb)
1013 {
1014     struct gsi_trans *trans;
1015     u32 nr_frags;
1016     int ret;
1017 
1018     /* Make sure source endpoint's TLV FIFO has enough entries to
1019      * hold the linear portion of the skb and all its fragments.
1020      * If not, see if we can linearize it before giving up.
1021      */
1022     nr_frags = skb_shinfo(skb)->nr_frags;
1023     if (nr_frags > endpoint->skb_frag_max) {
1024         if (skb_linearize(skb))
1025             return -E2BIG;
1026         nr_frags = 0;
1027     }
1028 
1029     trans = ipa_endpoint_trans_alloc(endpoint, 1 + nr_frags);
1030     if (!trans)
1031         return -EBUSY;
1032 
1033     ret = gsi_trans_skb_add(trans, skb);
1034     if (ret)
1035         goto err_trans_free;
1036     trans->data = skb;  /* transaction owns skb now */
1037 
1038     gsi_trans_commit(trans, !netdev_xmit_more());
1039 
1040     return 0;
1041 
1042 err_trans_free:
1043     gsi_trans_free(trans);
1044 
1045     return -ENOMEM;
1046 }
1047 
1048 static void ipa_endpoint_status(struct ipa_endpoint *endpoint)
1049 {
1050     u32 endpoint_id = endpoint->endpoint_id;
1051     struct ipa *ipa = endpoint->ipa;
1052     u32 val = 0;
1053     u32 offset;
1054 
1055     offset = IPA_REG_ENDP_STATUS_N_OFFSET(endpoint_id);
1056 
1057     if (endpoint->config.status_enable) {
1058         val |= STATUS_EN_FMASK;
1059         if (endpoint->toward_ipa) {
1060             enum ipa_endpoint_name name;
1061             u32 status_endpoint_id;
1062 
1063             name = endpoint->config.tx.status_endpoint;
1064             status_endpoint_id = ipa->name_map[name]->endpoint_id;
1065 
1066             val |= u32_encode_bits(status_endpoint_id,
1067                            STATUS_ENDP_FMASK);
1068         }
1069         /* STATUS_LOCATION is 0, meaning status element precedes
1070          * packet (not present for IPA v4.5)
1071          */
1072         /* STATUS_PKT_SUPPRESS_FMASK is 0 (not present for v3.5.1) */
1073     }
1074 
1075     iowrite32(val, ipa->reg_virt + offset);
1076 }
1077 
1078 static int ipa_endpoint_replenish_one(struct ipa_endpoint *endpoint,
1079                       struct gsi_trans *trans)
1080 {
1081     struct page *page;
1082     u32 buffer_size;
1083     u32 offset;
1084     u32 len;
1085     int ret;
1086 
1087     buffer_size = endpoint->config.rx.buffer_size;
1088     page = dev_alloc_pages(get_order(buffer_size));
1089     if (!page)
1090         return -ENOMEM;
1091 
1092     /* Offset the buffer to make space for skb headroom */
1093     offset = NET_SKB_PAD;
1094     len = buffer_size - offset;
1095 
1096     ret = gsi_trans_page_add(trans, page, len, offset);
1097     if (ret)
1098         put_page(page);
1099     else
1100         trans->data = page; /* transaction owns page now */
1101 
1102     return ret;
1103 }
1104 
1105 /**
1106  * ipa_endpoint_replenish() - Replenish endpoint receive buffers
1107  * @endpoint:   Endpoint to be replenished
1108  *
1109  * The IPA hardware can hold a fixed number of receive buffers for an RX
1110  * endpoint, based on the number of entries in the underlying channel ring
1111  * buffer.  If an endpoint's "backlog" is non-zero, it indicates how many
1112  * more receive buffers can be supplied to the hardware.  Replenishing for
1113  * an endpoint can be disabled, in which case buffers are not queued to
1114  * the hardware.
1115  */
1116 static void ipa_endpoint_replenish(struct ipa_endpoint *endpoint)
1117 {
1118     struct gsi_trans *trans;
1119 
1120     if (!test_bit(IPA_REPLENISH_ENABLED, endpoint->replenish_flags))
1121         return;
1122 
1123     /* Skip it if it's already active */
1124     if (test_and_set_bit(IPA_REPLENISH_ACTIVE, endpoint->replenish_flags))
1125         return;
1126 
1127     while ((trans = ipa_endpoint_trans_alloc(endpoint, 1))) {
1128         bool doorbell;
1129 
1130         if (ipa_endpoint_replenish_one(endpoint, trans))
1131             goto try_again_later;
1132 
1133 
1134         /* Ring the doorbell if we've got a full batch */
1135         doorbell = !(++endpoint->replenish_count % IPA_REPLENISH_BATCH);
1136         gsi_trans_commit(trans, doorbell);
1137     }
1138 
1139     clear_bit(IPA_REPLENISH_ACTIVE, endpoint->replenish_flags);
1140 
1141     return;
1142 
1143 try_again_later:
1144     gsi_trans_free(trans);
1145     clear_bit(IPA_REPLENISH_ACTIVE, endpoint->replenish_flags);
1146 
1147     /* Whenever a receive buffer transaction completes we'll try to
1148      * replenish again.  It's unlikely, but if we fail to supply even
1149      * one buffer, nothing will trigger another replenish attempt.
1150      * If the hardware has no receive buffers queued, schedule work to
1151      * try replenishing again.
1152      */
1153     if (gsi_channel_trans_idle(&endpoint->ipa->gsi, endpoint->channel_id))
1154         schedule_delayed_work(&endpoint->replenish_work,
1155                       msecs_to_jiffies(1));
1156 }
1157 
1158 static void ipa_endpoint_replenish_enable(struct ipa_endpoint *endpoint)
1159 {
1160     set_bit(IPA_REPLENISH_ENABLED, endpoint->replenish_flags);
1161 
1162     /* Start replenishing if hardware currently has no buffers */
1163     if (gsi_channel_trans_idle(&endpoint->ipa->gsi, endpoint->channel_id))
1164         ipa_endpoint_replenish(endpoint);
1165 }
1166 
1167 static void ipa_endpoint_replenish_disable(struct ipa_endpoint *endpoint)
1168 {
1169     clear_bit(IPA_REPLENISH_ENABLED, endpoint->replenish_flags);
1170 }
1171 
1172 static void ipa_endpoint_replenish_work(struct work_struct *work)
1173 {
1174     struct delayed_work *dwork = to_delayed_work(work);
1175     struct ipa_endpoint *endpoint;
1176 
1177     endpoint = container_of(dwork, struct ipa_endpoint, replenish_work);
1178 
1179     ipa_endpoint_replenish(endpoint);
1180 }
1181 
1182 static void ipa_endpoint_skb_copy(struct ipa_endpoint *endpoint,
1183                   void *data, u32 len, u32 extra)
1184 {
1185     struct sk_buff *skb;
1186 
1187     if (!endpoint->netdev)
1188         return;
1189 
1190     skb = __dev_alloc_skb(len, GFP_ATOMIC);
1191     if (skb) {
1192         /* Copy the data into the socket buffer and receive it */
1193         skb_put(skb, len);
1194         memcpy(skb->data, data, len);
1195         skb->truesize += extra;
1196     }
1197 
1198     ipa_modem_skb_rx(endpoint->netdev, skb);
1199 }
1200 
1201 static bool ipa_endpoint_skb_build(struct ipa_endpoint *endpoint,
1202                    struct page *page, u32 len)
1203 {
1204     u32 buffer_size = endpoint->config.rx.buffer_size;
1205     struct sk_buff *skb;
1206 
1207     /* Nothing to do if there's no netdev */
1208     if (!endpoint->netdev)
1209         return false;
1210 
1211     WARN_ON(len > SKB_WITH_OVERHEAD(buffer_size - NET_SKB_PAD));
1212 
1213     skb = build_skb(page_address(page), buffer_size);
1214     if (skb) {
1215         /* Reserve the headroom and account for the data */
1216         skb_reserve(skb, NET_SKB_PAD);
1217         skb_put(skb, len);
1218     }
1219 
1220     /* Receive the buffer (or record drop if unable to build it) */
1221     ipa_modem_skb_rx(endpoint->netdev, skb);
1222 
1223     return skb != NULL;
1224 }
1225 
1226 /* The format of a packet status element is the same for several status
1227  * types (opcodes).  Other types aren't currently supported.
1228  */
1229 static bool ipa_status_format_packet(enum ipa_status_opcode opcode)
1230 {
1231     switch (opcode) {
1232     case IPA_STATUS_OPCODE_PACKET:
1233     case IPA_STATUS_OPCODE_DROPPED_PACKET:
1234     case IPA_STATUS_OPCODE_SUSPENDED_PACKET:
1235     case IPA_STATUS_OPCODE_PACKET_2ND_PASS:
1236         return true;
1237     default:
1238         return false;
1239     }
1240 }
1241 
1242 static bool ipa_endpoint_status_skip(struct ipa_endpoint *endpoint,
1243                      const struct ipa_status *status)
1244 {
1245     u32 endpoint_id;
1246 
1247     if (!ipa_status_format_packet(status->opcode))
1248         return true;
1249     if (!status->pkt_len)
1250         return true;
1251     endpoint_id = u8_get_bits(status->endp_dst_idx,
1252                   IPA_STATUS_DST_IDX_FMASK);
1253     if (endpoint_id != endpoint->endpoint_id)
1254         return true;
1255 
1256     return false;   /* Don't skip this packet, process it */
1257 }
1258 
1259 static bool ipa_endpoint_status_tag(struct ipa_endpoint *endpoint,
1260                     const struct ipa_status *status)
1261 {
1262     struct ipa_endpoint *command_endpoint;
1263     struct ipa *ipa = endpoint->ipa;
1264     u32 endpoint_id;
1265 
1266     if (!le16_get_bits(status->mask, IPA_STATUS_MASK_TAG_VALID_FMASK))
1267         return false;   /* No valid tag */
1268 
1269     /* The status contains a valid tag.  We know the packet was sent to
1270      * this endpoint (already verified by ipa_endpoint_status_skip()).
1271      * If the packet came from the AP->command TX endpoint we know
1272      * this packet was sent as part of the pipeline clear process.
1273      */
1274     endpoint_id = u8_get_bits(status->endp_src_idx,
1275                   IPA_STATUS_SRC_IDX_FMASK);
1276     command_endpoint = ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX];
1277     if (endpoint_id == command_endpoint->endpoint_id) {
1278         complete(&ipa->completion);
1279     } else {
1280         dev_err(&ipa->pdev->dev,
1281             "unexpected tagged packet from endpoint %u\n",
1282             endpoint_id);
1283     }
1284 
1285     return true;
1286 }
1287 
1288 /* Return whether the status indicates the packet should be dropped */
1289 static bool ipa_endpoint_status_drop(struct ipa_endpoint *endpoint,
1290                      const struct ipa_status *status)
1291 {
1292     u32 val;
1293 
1294     /* If the status indicates a tagged transfer, we'll drop the packet */
1295     if (ipa_endpoint_status_tag(endpoint, status))
1296         return true;
1297 
1298     /* Deaggregation exceptions we drop; all other types we consume */
1299     if (status->exception)
1300         return status->exception == IPA_STATUS_EXCEPTION_DEAGGR;
1301 
1302     /* Drop the packet if it fails to match a routing rule; otherwise no */
1303     val = le32_get_bits(status->flags1, IPA_STATUS_FLAGS1_RT_RULE_ID_FMASK);
1304 
1305     return val == field_max(IPA_STATUS_FLAGS1_RT_RULE_ID_FMASK);
1306 }
1307 
1308 static void ipa_endpoint_status_parse(struct ipa_endpoint *endpoint,
1309                       struct page *page, u32 total_len)
1310 {
1311     u32 buffer_size = endpoint->config.rx.buffer_size;
1312     void *data = page_address(page) + NET_SKB_PAD;
1313     u32 unused = buffer_size - total_len;
1314     u32 resid = total_len;
1315 
1316     while (resid) {
1317         const struct ipa_status *status = data;
1318         u32 align;
1319         u32 len;
1320 
1321         if (resid < sizeof(*status)) {
1322             dev_err(&endpoint->ipa->pdev->dev,
1323                 "short message (%u bytes < %zu byte status)\n",
1324                 resid, sizeof(*status));
1325             break;
1326         }
1327 
1328         /* Skip over status packets that lack packet data */
1329         if (ipa_endpoint_status_skip(endpoint, status)) {
1330             data += sizeof(*status);
1331             resid -= sizeof(*status);
1332             continue;
1333         }
1334 
1335         /* Compute the amount of buffer space consumed by the packet,
1336          * including the status element.  If the hardware is configured
1337          * to pad packet data to an aligned boundary, account for that.
1338          * And if checksum offload is enabled a trailer containing
1339          * computed checksum information will be appended.
1340          */
1341         align = endpoint->config.rx.pad_align ? : 1;
1342         len = le16_to_cpu(status->pkt_len);
1343         len = sizeof(*status) + ALIGN(len, align);
1344         if (endpoint->config.checksum)
1345             len += sizeof(struct rmnet_map_dl_csum_trailer);
1346 
1347         if (!ipa_endpoint_status_drop(endpoint, status)) {
1348             void *data2;
1349             u32 extra;
1350             u32 len2;
1351 
1352             /* Client receives only packet data (no status) */
1353             data2 = data + sizeof(*status);
1354             len2 = le16_to_cpu(status->pkt_len);
1355 
1356             /* Have the true size reflect the extra unused space in
1357              * the original receive buffer.  Distribute the "cost"
1358              * proportionately across all aggregated packets in the
1359              * buffer.
1360              */
1361             extra = DIV_ROUND_CLOSEST(unused * len, total_len);
1362             ipa_endpoint_skb_copy(endpoint, data2, len2, extra);
1363         }
1364 
1365         /* Consume status and the full packet it describes */
1366         data += len;
1367         resid -= len;
1368     }
1369 }
1370 
1371 void ipa_endpoint_trans_complete(struct ipa_endpoint *endpoint,
1372                  struct gsi_trans *trans)
1373 {
1374     struct page *page;
1375 
1376     if (endpoint->toward_ipa)
1377         return;
1378 
1379     if (trans->cancelled)
1380         goto done;
1381 
1382     /* Parse or build a socket buffer using the actual received length */
1383     page = trans->data;
1384     if (endpoint->config.status_enable)
1385         ipa_endpoint_status_parse(endpoint, page, trans->len);
1386     else if (ipa_endpoint_skb_build(endpoint, page, trans->len))
1387         trans->data = NULL; /* Pages have been consumed */
1388 done:
1389     ipa_endpoint_replenish(endpoint);
1390 }
1391 
1392 void ipa_endpoint_trans_release(struct ipa_endpoint *endpoint,
1393                 struct gsi_trans *trans)
1394 {
1395     if (endpoint->toward_ipa) {
1396         struct ipa *ipa = endpoint->ipa;
1397 
1398         /* Nothing to do for command transactions */
1399         if (endpoint != ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]) {
1400             struct sk_buff *skb = trans->data;
1401 
1402             if (skb)
1403                 dev_kfree_skb_any(skb);
1404         }
1405     } else {
1406         struct page *page = trans->data;
1407 
1408         if (page)
1409             put_page(page);
1410     }
1411 }
1412 
1413 void ipa_endpoint_default_route_set(struct ipa *ipa, u32 endpoint_id)
1414 {
1415     u32 val;
1416 
1417     /* ROUTE_DIS is 0 */
1418     val = u32_encode_bits(endpoint_id, ROUTE_DEF_PIPE_FMASK);
1419     val |= ROUTE_DEF_HDR_TABLE_FMASK;
1420     val |= u32_encode_bits(0, ROUTE_DEF_HDR_OFST_FMASK);
1421     val |= u32_encode_bits(endpoint_id, ROUTE_FRAG_DEF_PIPE_FMASK);
1422     val |= ROUTE_DEF_RETAIN_HDR_FMASK;
1423 
1424     iowrite32(val, ipa->reg_virt + IPA_REG_ROUTE_OFFSET);
1425 }
1426 
1427 void ipa_endpoint_default_route_clear(struct ipa *ipa)
1428 {
1429     ipa_endpoint_default_route_set(ipa, 0);
1430 }
1431 
1432 /**
1433  * ipa_endpoint_reset_rx_aggr() - Reset RX endpoint with aggregation active
1434  * @endpoint:   Endpoint to be reset
1435  *
1436  * If aggregation is active on an RX endpoint when a reset is performed
1437  * on its underlying GSI channel, a special sequence of actions must be
1438  * taken to ensure the IPA pipeline is properly cleared.
1439  *
1440  * Return:  0 if successful, or a negative error code
1441  */
1442 static int ipa_endpoint_reset_rx_aggr(struct ipa_endpoint *endpoint)
1443 {
1444     struct device *dev = &endpoint->ipa->pdev->dev;
1445     struct ipa *ipa = endpoint->ipa;
1446     struct gsi *gsi = &ipa->gsi;
1447     bool suspended = false;
1448     dma_addr_t addr;
1449     u32 retries;
1450     u32 len = 1;
1451     void *virt;
1452     int ret;
1453 
1454     virt = kzalloc(len, GFP_KERNEL);
1455     if (!virt)
1456         return -ENOMEM;
1457 
1458     addr = dma_map_single(dev, virt, len, DMA_FROM_DEVICE);
1459     if (dma_mapping_error(dev, addr)) {
1460         ret = -ENOMEM;
1461         goto out_kfree;
1462     }
1463 
1464     /* Force close aggregation before issuing the reset */
1465     ipa_endpoint_force_close(endpoint);
1466 
1467     /* Reset and reconfigure the channel with the doorbell engine
1468      * disabled.  Then poll until we know aggregation is no longer
1469      * active.  We'll re-enable the doorbell (if appropriate) when
1470      * we reset again below.
1471      */
1472     gsi_channel_reset(gsi, endpoint->channel_id, false);
1473 
1474     /* Make sure the channel isn't suspended */
1475     suspended = ipa_endpoint_program_suspend(endpoint, false);
1476 
1477     /* Start channel and do a 1 byte read */
1478     ret = gsi_channel_start(gsi, endpoint->channel_id);
1479     if (ret)
1480         goto out_suspend_again;
1481 
1482     ret = gsi_trans_read_byte(gsi, endpoint->channel_id, addr);
1483     if (ret)
1484         goto err_endpoint_stop;
1485 
1486     /* Wait for aggregation to be closed on the channel */
1487     retries = IPA_ENDPOINT_RESET_AGGR_RETRY_MAX;
1488     do {
1489         if (!ipa_endpoint_aggr_active(endpoint))
1490             break;
1491         usleep_range(USEC_PER_MSEC, 2 * USEC_PER_MSEC);
1492     } while (retries--);
1493 
1494     /* Check one last time */
1495     if (ipa_endpoint_aggr_active(endpoint))
1496         dev_err(dev, "endpoint %u still active during reset\n",
1497             endpoint->endpoint_id);
1498 
1499     gsi_trans_read_byte_done(gsi, endpoint->channel_id);
1500 
1501     ret = gsi_channel_stop(gsi, endpoint->channel_id);
1502     if (ret)
1503         goto out_suspend_again;
1504 
1505     /* Finally, reset and reconfigure the channel again (re-enabling
1506      * the doorbell engine if appropriate).  Sleep for 1 millisecond to
1507      * complete the channel reset sequence.  Finish by suspending the
1508      * channel again (if necessary).
1509      */
1510     gsi_channel_reset(gsi, endpoint->channel_id, true);
1511 
1512     usleep_range(USEC_PER_MSEC, 2 * USEC_PER_MSEC);
1513 
1514     goto out_suspend_again;
1515 
1516 err_endpoint_stop:
1517     (void)gsi_channel_stop(gsi, endpoint->channel_id);
1518 out_suspend_again:
1519     if (suspended)
1520         (void)ipa_endpoint_program_suspend(endpoint, true);
1521     dma_unmap_single(dev, addr, len, DMA_FROM_DEVICE);
1522 out_kfree:
1523     kfree(virt);
1524 
1525     return ret;
1526 }
1527 
1528 static void ipa_endpoint_reset(struct ipa_endpoint *endpoint)
1529 {
1530     u32 channel_id = endpoint->channel_id;
1531     struct ipa *ipa = endpoint->ipa;
1532     bool special;
1533     int ret = 0;
1534 
1535     /* On IPA v3.5.1, if an RX endpoint is reset while aggregation
1536      * is active, we need to handle things specially to recover.
1537      * All other cases just need to reset the underlying GSI channel.
1538      */
1539     special = ipa->version < IPA_VERSION_4_0 && !endpoint->toward_ipa &&
1540             endpoint->config.aggregation;
1541     if (special && ipa_endpoint_aggr_active(endpoint))
1542         ret = ipa_endpoint_reset_rx_aggr(endpoint);
1543     else
1544         gsi_channel_reset(&ipa->gsi, channel_id, true);
1545 
1546     if (ret)
1547         dev_err(&ipa->pdev->dev,
1548             "error %d resetting channel %u for endpoint %u\n",
1549             ret, endpoint->channel_id, endpoint->endpoint_id);
1550 }
1551 
1552 static void ipa_endpoint_program(struct ipa_endpoint *endpoint)
1553 {
1554     if (endpoint->toward_ipa) {
1555         /* Newer versions of IPA use GSI channel flow control
1556          * instead of endpoint DELAY mode to prevent sending data.
1557          * Flow control is disabled for newly-allocated channels,
1558          * and we can assume flow control is not (ever) enabled
1559          * for AP TX channels.
1560          */
1561         if (endpoint->ipa->version < IPA_VERSION_4_2)
1562             ipa_endpoint_program_delay(endpoint, false);
1563     } else {
1564         /* Ensure suspend mode is off on all AP RX endpoints */
1565         (void)ipa_endpoint_program_suspend(endpoint, false);
1566     }
1567     ipa_endpoint_init_cfg(endpoint);
1568     ipa_endpoint_init_nat(endpoint);
1569     ipa_endpoint_init_hdr(endpoint);
1570     ipa_endpoint_init_hdr_ext(endpoint);
1571     ipa_endpoint_init_hdr_metadata_mask(endpoint);
1572     ipa_endpoint_init_mode(endpoint);
1573     ipa_endpoint_init_aggr(endpoint);
1574     if (!endpoint->toward_ipa) {
1575         if (endpoint->config.rx.holb_drop)
1576             ipa_endpoint_init_hol_block_enable(endpoint, 0);
1577         else
1578             ipa_endpoint_init_hol_block_disable(endpoint);
1579     }
1580     ipa_endpoint_init_deaggr(endpoint);
1581     ipa_endpoint_init_rsrc_grp(endpoint);
1582     ipa_endpoint_init_seq(endpoint);
1583     ipa_endpoint_status(endpoint);
1584 }
1585 
1586 int ipa_endpoint_enable_one(struct ipa_endpoint *endpoint)
1587 {
1588     struct ipa *ipa = endpoint->ipa;
1589     struct gsi *gsi = &ipa->gsi;
1590     int ret;
1591 
1592     ret = gsi_channel_start(gsi, endpoint->channel_id);
1593     if (ret) {
1594         dev_err(&ipa->pdev->dev,
1595             "error %d starting %cX channel %u for endpoint %u\n",
1596             ret, endpoint->toward_ipa ? 'T' : 'R',
1597             endpoint->channel_id, endpoint->endpoint_id);
1598         return ret;
1599     }
1600 
1601     if (!endpoint->toward_ipa) {
1602         ipa_interrupt_suspend_enable(ipa->interrupt,
1603                          endpoint->endpoint_id);
1604         ipa_endpoint_replenish_enable(endpoint);
1605     }
1606 
1607     ipa->enabled |= BIT(endpoint->endpoint_id);
1608 
1609     return 0;
1610 }
1611 
1612 void ipa_endpoint_disable_one(struct ipa_endpoint *endpoint)
1613 {
1614     u32 mask = BIT(endpoint->endpoint_id);
1615     struct ipa *ipa = endpoint->ipa;
1616     struct gsi *gsi = &ipa->gsi;
1617     int ret;
1618 
1619     if (!(ipa->enabled & mask))
1620         return;
1621 
1622     ipa->enabled ^= mask;
1623 
1624     if (!endpoint->toward_ipa) {
1625         ipa_endpoint_replenish_disable(endpoint);
1626         ipa_interrupt_suspend_disable(ipa->interrupt,
1627                           endpoint->endpoint_id);
1628     }
1629 
1630     /* Note that if stop fails, the channel's state is not well-defined */
1631     ret = gsi_channel_stop(gsi, endpoint->channel_id);
1632     if (ret)
1633         dev_err(&ipa->pdev->dev,
1634             "error %d attempting to stop endpoint %u\n", ret,
1635             endpoint->endpoint_id);
1636 }
1637 
1638 void ipa_endpoint_suspend_one(struct ipa_endpoint *endpoint)
1639 {
1640     struct device *dev = &endpoint->ipa->pdev->dev;
1641     struct gsi *gsi = &endpoint->ipa->gsi;
1642     int ret;
1643 
1644     if (!(endpoint->ipa->enabled & BIT(endpoint->endpoint_id)))
1645         return;
1646 
1647     if (!endpoint->toward_ipa) {
1648         ipa_endpoint_replenish_disable(endpoint);
1649         (void)ipa_endpoint_program_suspend(endpoint, true);
1650     }
1651 
1652     ret = gsi_channel_suspend(gsi, endpoint->channel_id);
1653     if (ret)
1654         dev_err(dev, "error %d suspending channel %u\n", ret,
1655             endpoint->channel_id);
1656 }
1657 
1658 void ipa_endpoint_resume_one(struct ipa_endpoint *endpoint)
1659 {
1660     struct device *dev = &endpoint->ipa->pdev->dev;
1661     struct gsi *gsi = &endpoint->ipa->gsi;
1662     int ret;
1663 
1664     if (!(endpoint->ipa->enabled & BIT(endpoint->endpoint_id)))
1665         return;
1666 
1667     if (!endpoint->toward_ipa)
1668         (void)ipa_endpoint_program_suspend(endpoint, false);
1669 
1670     ret = gsi_channel_resume(gsi, endpoint->channel_id);
1671     if (ret)
1672         dev_err(dev, "error %d resuming channel %u\n", ret,
1673             endpoint->channel_id);
1674     else if (!endpoint->toward_ipa)
1675         ipa_endpoint_replenish_enable(endpoint);
1676 }
1677 
1678 void ipa_endpoint_suspend(struct ipa *ipa)
1679 {
1680     if (!ipa->setup_complete)
1681         return;
1682 
1683     if (ipa->modem_netdev)
1684         ipa_modem_suspend(ipa->modem_netdev);
1685 
1686     ipa_endpoint_suspend_one(ipa->name_map[IPA_ENDPOINT_AP_LAN_RX]);
1687     ipa_endpoint_suspend_one(ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]);
1688 }
1689 
1690 void ipa_endpoint_resume(struct ipa *ipa)
1691 {
1692     if (!ipa->setup_complete)
1693         return;
1694 
1695     ipa_endpoint_resume_one(ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]);
1696     ipa_endpoint_resume_one(ipa->name_map[IPA_ENDPOINT_AP_LAN_RX]);
1697 
1698     if (ipa->modem_netdev)
1699         ipa_modem_resume(ipa->modem_netdev);
1700 }
1701 
1702 static void ipa_endpoint_setup_one(struct ipa_endpoint *endpoint)
1703 {
1704     struct gsi *gsi = &endpoint->ipa->gsi;
1705     u32 channel_id = endpoint->channel_id;
1706 
1707     /* Only AP endpoints get set up */
1708     if (endpoint->ee_id != GSI_EE_AP)
1709         return;
1710 
1711     endpoint->skb_frag_max = gsi->channel[channel_id].trans_tre_max - 1;
1712     if (!endpoint->toward_ipa) {
1713         /* RX transactions require a single TRE, so the maximum
1714          * backlog is the same as the maximum outstanding TREs.
1715          */
1716         clear_bit(IPA_REPLENISH_ENABLED, endpoint->replenish_flags);
1717         clear_bit(IPA_REPLENISH_ACTIVE, endpoint->replenish_flags);
1718         INIT_DELAYED_WORK(&endpoint->replenish_work,
1719                   ipa_endpoint_replenish_work);
1720     }
1721 
1722     ipa_endpoint_program(endpoint);
1723 
1724     endpoint->ipa->set_up |= BIT(endpoint->endpoint_id);
1725 }
1726 
1727 static void ipa_endpoint_teardown_one(struct ipa_endpoint *endpoint)
1728 {
1729     endpoint->ipa->set_up &= ~BIT(endpoint->endpoint_id);
1730 
1731     if (!endpoint->toward_ipa)
1732         cancel_delayed_work_sync(&endpoint->replenish_work);
1733 
1734     ipa_endpoint_reset(endpoint);
1735 }
1736 
1737 void ipa_endpoint_setup(struct ipa *ipa)
1738 {
1739     u32 initialized = ipa->initialized;
1740 
1741     ipa->set_up = 0;
1742     while (initialized) {
1743         u32 endpoint_id = __ffs(initialized);
1744 
1745         initialized ^= BIT(endpoint_id);
1746 
1747         ipa_endpoint_setup_one(&ipa->endpoint[endpoint_id]);
1748     }
1749 }
1750 
1751 void ipa_endpoint_teardown(struct ipa *ipa)
1752 {
1753     u32 set_up = ipa->set_up;
1754 
1755     while (set_up) {
1756         u32 endpoint_id = __fls(set_up);
1757 
1758         set_up ^= BIT(endpoint_id);
1759 
1760         ipa_endpoint_teardown_one(&ipa->endpoint[endpoint_id]);
1761     }
1762     ipa->set_up = 0;
1763 }
1764 
1765 int ipa_endpoint_config(struct ipa *ipa)
1766 {
1767     struct device *dev = &ipa->pdev->dev;
1768     u32 initialized;
1769     u32 rx_base;
1770     u32 rx_mask;
1771     u32 tx_mask;
1772     int ret = 0;
1773     u32 max;
1774     u32 val;
1775 
1776     /* Prior to IPAv3.5, the FLAVOR_0 register was not supported.
1777      * Furthermore, the endpoints were not grouped such that TX
1778      * endpoint numbers started with 0 and RX endpoints had numbers
1779      * higher than all TX endpoints, so we can't do the simple
1780      * direction check used for newer hardware below.
1781      *
1782      * For hardware that doesn't support the FLAVOR_0 register,
1783      * just set the available mask to support any endpoint, and
1784      * assume the configuration is valid.
1785      */
1786     if (ipa->version < IPA_VERSION_3_5) {
1787         ipa->available = ~0;
1788         return 0;
1789     }
1790 
1791     /* Find out about the endpoints supplied by the hardware, and ensure
1792      * the highest one doesn't exceed the number we support.
1793      */
1794     val = ioread32(ipa->reg_virt + IPA_REG_FLAVOR_0_OFFSET);
1795 
1796     /* Our RX is an IPA producer */
1797     rx_base = u32_get_bits(val, IPA_PROD_LOWEST_FMASK);
1798     max = rx_base + u32_get_bits(val, IPA_MAX_PROD_PIPES_FMASK);
1799     if (max > IPA_ENDPOINT_MAX) {
1800         dev_err(dev, "too many endpoints (%u > %u)\n",
1801             max, IPA_ENDPOINT_MAX);
1802         return -EINVAL;
1803     }
1804     rx_mask = GENMASK(max - 1, rx_base);
1805 
1806     /* Our TX is an IPA consumer */
1807     max = u32_get_bits(val, IPA_MAX_CONS_PIPES_FMASK);
1808     tx_mask = GENMASK(max - 1, 0);
1809 
1810     ipa->available = rx_mask | tx_mask;
1811 
1812     /* Check for initialized endpoints not supported by the hardware */
1813     if (ipa->initialized & ~ipa->available) {
1814         dev_err(dev, "unavailable endpoint id(s) 0x%08x\n",
1815             ipa->initialized & ~ipa->available);
1816         ret = -EINVAL;      /* Report other errors too */
1817     }
1818 
1819     initialized = ipa->initialized;
1820     while (initialized) {
1821         u32 endpoint_id = __ffs(initialized);
1822         struct ipa_endpoint *endpoint;
1823 
1824         initialized ^= BIT(endpoint_id);
1825 
1826         /* Make sure it's pointing in the right direction */
1827         endpoint = &ipa->endpoint[endpoint_id];
1828         if ((endpoint_id < rx_base) != endpoint->toward_ipa) {
1829             dev_err(dev, "endpoint id %u wrong direction\n",
1830                 endpoint_id);
1831             ret = -EINVAL;
1832         }
1833     }
1834 
1835     return ret;
1836 }
1837 
1838 void ipa_endpoint_deconfig(struct ipa *ipa)
1839 {
1840     ipa->available = 0; /* Nothing more to do */
1841 }
1842 
1843 static void ipa_endpoint_init_one(struct ipa *ipa, enum ipa_endpoint_name name,
1844                   const struct ipa_gsi_endpoint_data *data)
1845 {
1846     struct ipa_endpoint *endpoint;
1847 
1848     endpoint = &ipa->endpoint[data->endpoint_id];
1849 
1850     if (data->ee_id == GSI_EE_AP)
1851         ipa->channel_map[data->channel_id] = endpoint;
1852     ipa->name_map[name] = endpoint;
1853 
1854     endpoint->ipa = ipa;
1855     endpoint->ee_id = data->ee_id;
1856     endpoint->channel_id = data->channel_id;
1857     endpoint->endpoint_id = data->endpoint_id;
1858     endpoint->toward_ipa = data->toward_ipa;
1859     endpoint->config = data->endpoint.config;
1860 
1861     ipa->initialized |= BIT(endpoint->endpoint_id);
1862 }
1863 
1864 static void ipa_endpoint_exit_one(struct ipa_endpoint *endpoint)
1865 {
1866     endpoint->ipa->initialized &= ~BIT(endpoint->endpoint_id);
1867 
1868     memset(endpoint, 0, sizeof(*endpoint));
1869 }
1870 
1871 void ipa_endpoint_exit(struct ipa *ipa)
1872 {
1873     u32 initialized = ipa->initialized;
1874 
1875     while (initialized) {
1876         u32 endpoint_id = __fls(initialized);
1877 
1878         initialized ^= BIT(endpoint_id);
1879 
1880         ipa_endpoint_exit_one(&ipa->endpoint[endpoint_id]);
1881     }
1882     memset(ipa->name_map, 0, sizeof(ipa->name_map));
1883     memset(ipa->channel_map, 0, sizeof(ipa->channel_map));
1884 }
1885 
1886 /* Returns a bitmask of endpoints that support filtering, or 0 on error */
1887 u32 ipa_endpoint_init(struct ipa *ipa, u32 count,
1888               const struct ipa_gsi_endpoint_data *data)
1889 {
1890     enum ipa_endpoint_name name;
1891     u32 filter_map;
1892 
1893     BUILD_BUG_ON(!IPA_REPLENISH_BATCH);
1894 
1895     if (!ipa_endpoint_data_valid(ipa, count, data))
1896         return 0;   /* Error */
1897 
1898     ipa->initialized = 0;
1899 
1900     filter_map = 0;
1901     for (name = 0; name < count; name++, data++) {
1902         if (ipa_gsi_endpoint_data_empty(data))
1903             continue;   /* Skip over empty slots */
1904 
1905         ipa_endpoint_init_one(ipa, name, data);
1906 
1907         if (data->endpoint.filter_support)
1908             filter_map |= BIT(data->endpoint_id);
1909         if (data->ee_id == GSI_EE_MODEM && data->toward_ipa)
1910             ipa->modem_tx_count++;
1911     }
1912 
1913     if (!ipa_filter_map_valid(ipa, filter_map))
1914         goto err_endpoint_exit;
1915 
1916     return filter_map;  /* Non-zero bitmask */
1917 
1918 err_endpoint_exit:
1919     ipa_endpoint_exit(ipa);
1920 
1921     return 0;   /* Error */
1922 }