0001
0002
0003
0004
0005
0006
0007 #include <linux/mhi_ep.h>
0008 #include "internal.h"
0009
0010 size_t mhi_ep_ring_addr2offset(struct mhi_ep_ring *ring, u64 ptr)
0011 {
0012 return (ptr - ring->rbase) / sizeof(struct mhi_ring_element);
0013 }
0014
0015 static u32 mhi_ep_ring_num_elems(struct mhi_ep_ring *ring)
0016 {
0017 __le64 rlen;
0018
0019 memcpy_fromio(&rlen, (void __iomem *) &ring->ring_ctx->generic.rlen, sizeof(u64));
0020
0021 return le64_to_cpu(rlen) / sizeof(struct mhi_ring_element);
0022 }
0023
0024 void mhi_ep_ring_inc_index(struct mhi_ep_ring *ring)
0025 {
0026 ring->rd_offset = (ring->rd_offset + 1) % ring->ring_size;
0027 }
0028
0029 static int __mhi_ep_cache_ring(struct mhi_ep_ring *ring, size_t end)
0030 {
0031 struct mhi_ep_cntrl *mhi_cntrl = ring->mhi_cntrl;
0032 struct device *dev = &mhi_cntrl->mhi_dev->dev;
0033 size_t start, copy_size;
0034 int ret;
0035
0036
0037 if (ring->type == RING_TYPE_ER)
0038 return 0;
0039
0040
0041 if (ring->wr_offset == end)
0042 return 0;
0043
0044 start = ring->wr_offset;
0045 if (start < end) {
0046 copy_size = (end - start) * sizeof(struct mhi_ring_element);
0047 ret = mhi_cntrl->read_from_host(mhi_cntrl, ring->rbase +
0048 (start * sizeof(struct mhi_ring_element)),
0049 &ring->ring_cache[start], copy_size);
0050 if (ret < 0)
0051 return ret;
0052 } else {
0053 copy_size = (ring->ring_size - start) * sizeof(struct mhi_ring_element);
0054 ret = mhi_cntrl->read_from_host(mhi_cntrl, ring->rbase +
0055 (start * sizeof(struct mhi_ring_element)),
0056 &ring->ring_cache[start], copy_size);
0057 if (ret < 0)
0058 return ret;
0059
0060 if (end) {
0061 ret = mhi_cntrl->read_from_host(mhi_cntrl, ring->rbase,
0062 &ring->ring_cache[0],
0063 end * sizeof(struct mhi_ring_element));
0064 if (ret < 0)
0065 return ret;
0066 }
0067 }
0068
0069 dev_dbg(dev, "Cached ring: start %zu end %zu size %zu\n", start, end, copy_size);
0070
0071 return 0;
0072 }
0073
0074 static int mhi_ep_cache_ring(struct mhi_ep_ring *ring, u64 wr_ptr)
0075 {
0076 size_t wr_offset;
0077 int ret;
0078
0079 wr_offset = mhi_ep_ring_addr2offset(ring, wr_ptr);
0080
0081
0082 ret = __mhi_ep_cache_ring(ring, wr_offset);
0083 if (ret)
0084 return ret;
0085
0086 ring->wr_offset = wr_offset;
0087
0088 return 0;
0089 }
0090
0091 int mhi_ep_update_wr_offset(struct mhi_ep_ring *ring)
0092 {
0093 u64 wr_ptr;
0094
0095 wr_ptr = mhi_ep_mmio_get_db(ring);
0096
0097 return mhi_ep_cache_ring(ring, wr_ptr);
0098 }
0099
0100
0101 int mhi_ep_ring_add_element(struct mhi_ep_ring *ring, struct mhi_ring_element *el)
0102 {
0103 struct mhi_ep_cntrl *mhi_cntrl = ring->mhi_cntrl;
0104 struct device *dev = &mhi_cntrl->mhi_dev->dev;
0105 size_t old_offset = 0;
0106 u32 num_free_elem;
0107 __le64 rp;
0108 int ret;
0109
0110 ret = mhi_ep_update_wr_offset(ring);
0111 if (ret) {
0112 dev_err(dev, "Error updating write pointer\n");
0113 return ret;
0114 }
0115
0116 if (ring->rd_offset < ring->wr_offset)
0117 num_free_elem = (ring->wr_offset - ring->rd_offset) - 1;
0118 else
0119 num_free_elem = ((ring->ring_size - ring->rd_offset) + ring->wr_offset) - 1;
0120
0121
0122 if (!num_free_elem) {
0123 dev_err(dev, "No space left in the ring\n");
0124 return -ENOSPC;
0125 }
0126
0127 old_offset = ring->rd_offset;
0128 mhi_ep_ring_inc_index(ring);
0129
0130 dev_dbg(dev, "Adding an element to ring at offset (%zu)\n", ring->rd_offset);
0131
0132
0133 rp = cpu_to_le64(ring->rd_offset * sizeof(*el) + ring->rbase);
0134 memcpy_toio((void __iomem *) &ring->ring_ctx->generic.rp, &rp, sizeof(u64));
0135
0136 ret = mhi_cntrl->write_to_host(mhi_cntrl, el, ring->rbase + (old_offset * sizeof(*el)),
0137 sizeof(*el));
0138 if (ret < 0)
0139 return ret;
0140
0141 return 0;
0142 }
0143
0144 void mhi_ep_ring_init(struct mhi_ep_ring *ring, enum mhi_ep_ring_type type, u32 id)
0145 {
0146 ring->type = type;
0147 if (ring->type == RING_TYPE_CMD) {
0148 ring->db_offset_h = EP_CRDB_HIGHER;
0149 ring->db_offset_l = EP_CRDB_LOWER;
0150 } else if (ring->type == RING_TYPE_CH) {
0151 ring->db_offset_h = CHDB_HIGHER_n(id);
0152 ring->db_offset_l = CHDB_LOWER_n(id);
0153 ring->ch_id = id;
0154 } else {
0155 ring->db_offset_h = ERDB_HIGHER_n(id);
0156 ring->db_offset_l = ERDB_LOWER_n(id);
0157 }
0158 }
0159
0160 int mhi_ep_ring_start(struct mhi_ep_cntrl *mhi_cntrl, struct mhi_ep_ring *ring,
0161 union mhi_ep_ring_ctx *ctx)
0162 {
0163 struct device *dev = &mhi_cntrl->mhi_dev->dev;
0164 __le64 val;
0165 int ret;
0166
0167 ring->mhi_cntrl = mhi_cntrl;
0168 ring->ring_ctx = ctx;
0169 ring->ring_size = mhi_ep_ring_num_elems(ring);
0170 memcpy_fromio(&val, (void __iomem *) &ring->ring_ctx->generic.rbase, sizeof(u64));
0171 ring->rbase = le64_to_cpu(val);
0172
0173 if (ring->type == RING_TYPE_CH)
0174 ring->er_index = le32_to_cpu(ring->ring_ctx->ch.erindex);
0175
0176 if (ring->type == RING_TYPE_ER)
0177 ring->irq_vector = le32_to_cpu(ring->ring_ctx->ev.msivec);
0178
0179
0180 memcpy_fromio(&val, (void __iomem *) &ring->ring_ctx->generic.rp, sizeof(u64));
0181 ring->rd_offset = mhi_ep_ring_addr2offset(ring, le64_to_cpu(val));
0182 ring->wr_offset = mhi_ep_ring_addr2offset(ring, le64_to_cpu(val));
0183
0184
0185 ring->ring_cache = kcalloc(ring->ring_size, sizeof(struct mhi_ring_element), GFP_KERNEL);
0186 if (!ring->ring_cache)
0187 return -ENOMEM;
0188
0189 memcpy_fromio(&val, (void __iomem *) &ring->ring_ctx->generic.wp, sizeof(u64));
0190 ret = mhi_ep_cache_ring(ring, le64_to_cpu(val));
0191 if (ret) {
0192 dev_err(dev, "Failed to cache ring\n");
0193 kfree(ring->ring_cache);
0194 return ret;
0195 }
0196
0197 ring->started = true;
0198
0199 return 0;
0200 }
0201
0202 void mhi_ep_ring_reset(struct mhi_ep_cntrl *mhi_cntrl, struct mhi_ep_ring *ring)
0203 {
0204 ring->started = false;
0205 kfree(ring->ring_cache);
0206 ring->ring_cache = NULL;
0207 }