0001
0002
0003
0004
0005
0006
0007
0008 #include "xgene_enet_main.h"
0009 #include "xgene_enet_hw.h"
0010 #include "xgene_enet_ring2.h"
0011
0012 static void xgene_enet_ring_init(struct xgene_enet_desc_ring *ring)
0013 {
0014 u32 *ring_cfg = ring->state;
0015 u64 addr = ring->dma;
0016
0017 if (xgene_enet_ring_owner(ring->id) == RING_OWNER_CPU) {
0018 ring_cfg[0] |= SET_VAL(X2_INTLINE, ring->id & RING_BUFNUM_MASK);
0019 ring_cfg[3] |= SET_BIT(X2_DEQINTEN);
0020 }
0021 ring_cfg[0] |= SET_VAL(X2_CFGCRID, 2);
0022
0023 addr >>= 8;
0024 ring_cfg[2] |= QCOHERENT | SET_VAL(RINGADDRL, addr);
0025
0026 addr >>= 27;
0027 ring_cfg[3] |= SET_VAL(RINGSIZE, ring->cfgsize)
0028 | ACCEPTLERR
0029 | SET_VAL(RINGADDRH, addr);
0030 ring_cfg[4] |= SET_VAL(X2_SELTHRSH, 1);
0031 ring_cfg[5] |= SET_BIT(X2_QBASE_AM) | SET_BIT(X2_MSG_AM);
0032 }
0033
0034 static void xgene_enet_ring_set_type(struct xgene_enet_desc_ring *ring)
0035 {
0036 u32 *ring_cfg = ring->state;
0037 bool is_bufpool;
0038 u32 val;
0039
0040 is_bufpool = xgene_enet_is_bufpool(ring->id);
0041 val = (is_bufpool) ? RING_BUFPOOL : RING_REGULAR;
0042 ring_cfg[4] |= SET_VAL(X2_RINGTYPE, val);
0043 if (is_bufpool)
0044 ring_cfg[3] |= SET_VAL(RINGMODE, BUFPOOL_MODE);
0045 }
0046
0047 static void xgene_enet_ring_set_recombbuf(struct xgene_enet_desc_ring *ring)
0048 {
0049 u32 *ring_cfg = ring->state;
0050
0051 ring_cfg[3] |= RECOMBBUF;
0052 ring_cfg[4] |= SET_VAL(X2_RECOMTIMEOUT, 0x7);
0053 }
0054
0055 static void xgene_enet_ring_wr32(struct xgene_enet_desc_ring *ring,
0056 u32 offset, u32 data)
0057 {
0058 struct xgene_enet_pdata *pdata = netdev_priv(ring->ndev);
0059
0060 iowrite32(data, pdata->ring_csr_addr + offset);
0061 }
0062
0063 static void xgene_enet_write_ring_state(struct xgene_enet_desc_ring *ring)
0064 {
0065 struct xgene_enet_pdata *pdata = netdev_priv(ring->ndev);
0066 int i;
0067
0068 xgene_enet_ring_wr32(ring, CSR_RING_CONFIG, ring->num);
0069 for (i = 0; i < pdata->ring_ops->num_ring_config; i++) {
0070 xgene_enet_ring_wr32(ring, CSR_RING_WR_BASE + (i * 4),
0071 ring->state[i]);
0072 }
0073 }
0074
0075 static void xgene_enet_clr_ring_state(struct xgene_enet_desc_ring *ring)
0076 {
0077 memset(ring->state, 0, sizeof(ring->state));
0078 xgene_enet_write_ring_state(ring);
0079 }
0080
0081 static void xgene_enet_set_ring_state(struct xgene_enet_desc_ring *ring)
0082 {
0083 enum xgene_ring_owner owner;
0084
0085 xgene_enet_ring_set_type(ring);
0086
0087 owner = xgene_enet_ring_owner(ring->id);
0088 if (owner == RING_OWNER_ETH0 || owner == RING_OWNER_ETH1)
0089 xgene_enet_ring_set_recombbuf(ring);
0090
0091 xgene_enet_ring_init(ring);
0092 xgene_enet_write_ring_state(ring);
0093 }
0094
0095 static void xgene_enet_set_ring_id(struct xgene_enet_desc_ring *ring)
0096 {
0097 u32 ring_id_val, ring_id_buf;
0098 bool is_bufpool;
0099
0100 if (xgene_enet_ring_owner(ring->id) == RING_OWNER_CPU)
0101 return;
0102
0103 is_bufpool = xgene_enet_is_bufpool(ring->id);
0104
0105 ring_id_val = ring->id & GENMASK(9, 0);
0106 ring_id_val |= OVERWRITE;
0107
0108 ring_id_buf = (ring->num << 9) & GENMASK(18, 9);
0109 ring_id_buf |= PREFETCH_BUF_EN;
0110
0111 if (is_bufpool)
0112 ring_id_buf |= IS_BUFFER_POOL;
0113
0114 xgene_enet_ring_wr32(ring, CSR_RING_ID, ring_id_val);
0115 xgene_enet_ring_wr32(ring, CSR_RING_ID_BUF, ring_id_buf);
0116 }
0117
0118 static void xgene_enet_clr_desc_ring_id(struct xgene_enet_desc_ring *ring)
0119 {
0120 u32 ring_id;
0121
0122 ring_id = ring->id | OVERWRITE;
0123 xgene_enet_ring_wr32(ring, CSR_RING_ID, ring_id);
0124 xgene_enet_ring_wr32(ring, CSR_RING_ID_BUF, 0);
0125 }
0126
0127 static struct xgene_enet_desc_ring *xgene_enet_setup_ring(
0128 struct xgene_enet_desc_ring *ring)
0129 {
0130 bool is_bufpool;
0131 u32 addr, i;
0132
0133 xgene_enet_clr_ring_state(ring);
0134 xgene_enet_set_ring_state(ring);
0135 xgene_enet_set_ring_id(ring);
0136
0137 ring->slots = xgene_enet_get_numslots(ring->id, ring->size);
0138
0139 is_bufpool = xgene_enet_is_bufpool(ring->id);
0140 if (is_bufpool || xgene_enet_ring_owner(ring->id) != RING_OWNER_CPU)
0141 return ring;
0142
0143 addr = CSR_VMID0_INTR_MBOX + (4 * (ring->id & RING_BUFNUM_MASK));
0144 xgene_enet_ring_wr32(ring, addr, ring->irq_mbox_dma >> 10);
0145
0146 for (i = 0; i < ring->slots; i++)
0147 xgene_enet_mark_desc_slot_empty(&ring->raw_desc[i]);
0148
0149 return ring;
0150 }
0151
0152 static void xgene_enet_clear_ring(struct xgene_enet_desc_ring *ring)
0153 {
0154 xgene_enet_clr_desc_ring_id(ring);
0155 xgene_enet_clr_ring_state(ring);
0156 }
0157
0158 static void xgene_enet_wr_cmd(struct xgene_enet_desc_ring *ring, int count)
0159 {
0160 u32 data = 0;
0161
0162 if (xgene_enet_ring_owner(ring->id) == RING_OWNER_CPU) {
0163 data = SET_VAL(X2_INTLINE, ring->id & RING_BUFNUM_MASK) |
0164 INTR_CLEAR;
0165 }
0166 data |= (count & GENMASK(16, 0));
0167
0168 iowrite32(data, ring->cmd);
0169 }
0170
0171 static u32 xgene_enet_ring_len(struct xgene_enet_desc_ring *ring)
0172 {
0173 u32 __iomem *cmd_base = ring->cmd_base;
0174 u32 ring_state, num_msgs;
0175
0176 ring_state = ioread32(&cmd_base[1]);
0177 num_msgs = GET_VAL(X2_NUMMSGSINQ, ring_state);
0178
0179 return num_msgs;
0180 }
0181
0182 static void xgene_enet_setup_coalescing(struct xgene_enet_desc_ring *ring)
0183 {
0184 u32 data = 0x77777777;
0185
0186 xgene_enet_ring_wr32(ring, CSR_PBM_COAL, 0x8e);
0187 xgene_enet_ring_wr32(ring, CSR_PBM_CTICK0, data);
0188 xgene_enet_ring_wr32(ring, CSR_PBM_CTICK1, data);
0189 xgene_enet_ring_wr32(ring, CSR_PBM_CTICK2, data);
0190 xgene_enet_ring_wr32(ring, CSR_PBM_CTICK3, data);
0191 xgene_enet_ring_wr32(ring, CSR_THRESHOLD0_SET1, 0x08);
0192 xgene_enet_ring_wr32(ring, CSR_THRESHOLD1_SET1, 0x10);
0193 }
0194
0195 struct xgene_ring_ops xgene_ring2_ops = {
0196 .num_ring_config = X2_NUM_RING_CONFIG,
0197 .num_ring_id_shift = 13,
0198 .setup = xgene_enet_setup_ring,
0199 .clear = xgene_enet_clear_ring,
0200 .wr_cmd = xgene_enet_wr_cmd,
0201 .len = xgene_enet_ring_len,
0202 .coalesce = xgene_enet_setup_coalescing,
0203 };