Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
0002 /* Copyright 2017-2019 NXP */
0003 
0004 #include "enetc.h"
0005 
0006 int enetc_setup_cbdr(struct device *dev, struct enetc_hw *hw, int bd_count,
0007              struct enetc_cbdr *cbdr)
0008 {
0009     int size = bd_count * sizeof(struct enetc_cbd);
0010 
0011     cbdr->bd_base = dma_alloc_coherent(dev, size, &cbdr->bd_dma_base,
0012                        GFP_KERNEL);
0013     if (!cbdr->bd_base)
0014         return -ENOMEM;
0015 
0016     /* h/w requires 128B alignment */
0017     if (!IS_ALIGNED(cbdr->bd_dma_base, 128)) {
0018         dma_free_coherent(dev, size, cbdr->bd_base,
0019                   cbdr->bd_dma_base);
0020         return -EINVAL;
0021     }
0022 
0023     cbdr->next_to_clean = 0;
0024     cbdr->next_to_use = 0;
0025     cbdr->dma_dev = dev;
0026     cbdr->bd_count = bd_count;
0027 
0028     cbdr->pir = hw->reg + ENETC_SICBDRPIR;
0029     cbdr->cir = hw->reg + ENETC_SICBDRCIR;
0030     cbdr->mr = hw->reg + ENETC_SICBDRMR;
0031 
0032     /* set CBDR cache attributes */
0033     enetc_wr(hw, ENETC_SICAR2,
0034          ENETC_SICAR_RD_COHERENT | ENETC_SICAR_WR_COHERENT);
0035 
0036     enetc_wr(hw, ENETC_SICBDRBAR0, lower_32_bits(cbdr->bd_dma_base));
0037     enetc_wr(hw, ENETC_SICBDRBAR1, upper_32_bits(cbdr->bd_dma_base));
0038     enetc_wr(hw, ENETC_SICBDRLENR, ENETC_RTBLENR_LEN(cbdr->bd_count));
0039 
0040     enetc_wr_reg(cbdr->pir, cbdr->next_to_clean);
0041     enetc_wr_reg(cbdr->cir, cbdr->next_to_use);
0042     /* enable ring */
0043     enetc_wr_reg(cbdr->mr, BIT(31));
0044 
0045     return 0;
0046 }
0047 
0048 void enetc_teardown_cbdr(struct enetc_cbdr *cbdr)
0049 {
0050     int size = cbdr->bd_count * sizeof(struct enetc_cbd);
0051 
0052     /* disable ring */
0053     enetc_wr_reg(cbdr->mr, 0);
0054 
0055     dma_free_coherent(cbdr->dma_dev, size, cbdr->bd_base,
0056               cbdr->bd_dma_base);
0057     cbdr->bd_base = NULL;
0058     cbdr->dma_dev = NULL;
0059 }
0060 
0061 static void enetc_clean_cbdr(struct enetc_cbdr *ring)
0062 {
0063     struct enetc_cbd *dest_cbd;
0064     int i, status;
0065 
0066     i = ring->next_to_clean;
0067 
0068     while (enetc_rd_reg(ring->cir) != i) {
0069         dest_cbd = ENETC_CBD(*ring, i);
0070         status = dest_cbd->status_flags & ENETC_CBD_STATUS_MASK;
0071         if (status)
0072             dev_warn(ring->dma_dev, "CMD err %04x for cmd %04x\n",
0073                  status, dest_cbd->cmd);
0074 
0075         memset(dest_cbd, 0, sizeof(*dest_cbd));
0076 
0077         i = (i + 1) % ring->bd_count;
0078     }
0079 
0080     ring->next_to_clean = i;
0081 }
0082 
0083 static int enetc_cbd_unused(struct enetc_cbdr *r)
0084 {
0085     return (r->next_to_clean - r->next_to_use - 1 + r->bd_count) %
0086         r->bd_count;
0087 }
0088 
0089 int enetc_send_cmd(struct enetc_si *si, struct enetc_cbd *cbd)
0090 {
0091     struct enetc_cbdr *ring = &si->cbd_ring;
0092     int timeout = ENETC_CBDR_TIMEOUT;
0093     struct enetc_cbd *dest_cbd;
0094     int i;
0095 
0096     if (unlikely(!ring->bd_base))
0097         return -EIO;
0098 
0099     if (unlikely(!enetc_cbd_unused(ring)))
0100         enetc_clean_cbdr(ring);
0101 
0102     i = ring->next_to_use;
0103     dest_cbd = ENETC_CBD(*ring, i);
0104 
0105     /* copy command to the ring */
0106     *dest_cbd = *cbd;
0107     i = (i + 1) % ring->bd_count;
0108 
0109     ring->next_to_use = i;
0110     /* let H/W know BD ring has been updated */
0111     enetc_wr_reg(ring->pir, i);
0112 
0113     do {
0114         if (enetc_rd_reg(ring->cir) == i)
0115             break;
0116         udelay(10); /* cannot sleep, rtnl_lock() */
0117         timeout -= 10;
0118     } while (timeout);
0119 
0120     if (!timeout)
0121         return -EBUSY;
0122 
0123     /* CBD may writeback data, feedback up level */
0124     *cbd = *dest_cbd;
0125 
0126     enetc_clean_cbdr(ring);
0127 
0128     return 0;
0129 }
0130 
0131 int enetc_clear_mac_flt_entry(struct enetc_si *si, int index)
0132 {
0133     struct enetc_cbd cbd;
0134 
0135     memset(&cbd, 0, sizeof(cbd));
0136 
0137     cbd.cls = 1;
0138     cbd.status_flags = ENETC_CBD_FLAGS_SF;
0139     cbd.index = cpu_to_le16(index);
0140 
0141     return enetc_send_cmd(si, &cbd);
0142 }
0143 
0144 int enetc_set_mac_flt_entry(struct enetc_si *si, int index,
0145                 char *mac_addr, int si_map)
0146 {
0147     struct enetc_cbd cbd;
0148     u32 upper;
0149     u16 lower;
0150 
0151     memset(&cbd, 0, sizeof(cbd));
0152 
0153     /* fill up the "set" descriptor */
0154     cbd.cls = 1;
0155     cbd.status_flags = ENETC_CBD_FLAGS_SF;
0156     cbd.index = cpu_to_le16(index);
0157     cbd.opt[3] = cpu_to_le32(si_map);
0158     /* enable entry */
0159     cbd.opt[0] = cpu_to_le32(BIT(31));
0160 
0161     upper = *(const u32 *)mac_addr;
0162     lower = *(const u16 *)(mac_addr + 4);
0163     cbd.addr[0] = cpu_to_le32(upper);
0164     cbd.addr[1] = cpu_to_le32(lower);
0165 
0166     return enetc_send_cmd(si, &cbd);
0167 }
0168 
0169 /* Set entry in RFS table */
0170 int enetc_set_fs_entry(struct enetc_si *si, struct enetc_cmd_rfse *rfse,
0171                int index)
0172 {
0173     struct enetc_cbdr *ring = &si->cbd_ring;
0174     struct enetc_cbd cbd = {.cmd = 0};
0175     void *tmp, *tmp_align;
0176     dma_addr_t dma;
0177     int err;
0178 
0179     /* fill up the "set" descriptor */
0180     cbd.cmd = 0;
0181     cbd.cls = 4;
0182     cbd.index = cpu_to_le16(index);
0183     cbd.opt[3] = cpu_to_le32(0); /* SI */
0184 
0185     tmp = enetc_cbd_alloc_data_mem(si, &cbd, sizeof(*rfse),
0186                        &dma, &tmp_align);
0187     if (!tmp)
0188         return -ENOMEM;
0189 
0190     memcpy(tmp_align, rfse, sizeof(*rfse));
0191 
0192     err = enetc_send_cmd(si, &cbd);
0193     if (err)
0194         dev_err(ring->dma_dev, "FS entry add failed (%d)!", err);
0195 
0196     enetc_cbd_free_data_mem(si, sizeof(*rfse), tmp, &dma);
0197 
0198     return err;
0199 }
0200 
0201 static int enetc_cmd_rss_table(struct enetc_si *si, u32 *table, int count,
0202                    bool read)
0203 {
0204     struct enetc_cbdr *ring = &si->cbd_ring;
0205     struct enetc_cbd cbd = {.cmd = 0};
0206     u8 *tmp, *tmp_align;
0207     dma_addr_t dma;
0208     int err, i;
0209 
0210     if (count < ENETC_CBD_DATA_MEM_ALIGN)
0211         /* HW only takes in a full 64 entry table */
0212         return -EINVAL;
0213 
0214     tmp = enetc_cbd_alloc_data_mem(si, &cbd, count,
0215                        &dma, (void *)&tmp_align);
0216     if (!tmp)
0217         return -ENOMEM;
0218 
0219     if (!read)
0220         for (i = 0; i < count; i++)
0221             tmp_align[i] = (u8)(table[i]);
0222 
0223     /* fill up the descriptor */
0224     cbd.cmd = read ? 2 : 1;
0225     cbd.cls = 3;
0226 
0227     err = enetc_send_cmd(si, &cbd);
0228     if (err)
0229         dev_err(ring->dma_dev, "RSS cmd failed (%d)!", err);
0230 
0231     if (read)
0232         for (i = 0; i < count; i++)
0233             table[i] = tmp_align[i];
0234 
0235     enetc_cbd_free_data_mem(si, count, tmp, &dma);
0236 
0237     return err;
0238 }
0239 
0240 /* Get RSS table */
0241 int enetc_get_rss_table(struct enetc_si *si, u32 *table, int count)
0242 {
0243     return enetc_cmd_rss_table(si, table, count, true);
0244 }
0245 
0246 /* Set RSS table */
0247 int enetc_set_rss_table(struct enetc_si *si, const u32 *table, int count)
0248 {
0249     return enetc_cmd_rss_table(si, (u32 *)table, count, false);
0250 }