0001
0002
0003
0004
0005
0006
0007
0008
0009 #include <linux/module.h>
0010 #include <linux/phy.h>
0011 #include <linux/netdevice.h>
0012 #include <linux/bitfield.h>
0013 #include <linux/regmap.h>
0014 #include <net/dsa.h>
0015 #include <linux/of_net.h>
0016 #include <linux/of_mdio.h>
0017 #include <linux/of_platform.h>
0018 #include <linux/mdio.h>
0019 #include <linux/phylink.h>
0020 #include <linux/gpio/consumer.h>
0021 #include <linux/etherdevice.h>
0022 #include <linux/dsa/tag_qca.h>
0023
0024 #include "qca8k.h"
0025
0026 static void
0027 qca8k_split_addr(u32 regaddr, u16 *r1, u16 *r2, u16 *page)
0028 {
0029 regaddr >>= 1;
0030 *r1 = regaddr & 0x1e;
0031
0032 regaddr >>= 5;
0033 *r2 = regaddr & 0x7;
0034
0035 regaddr >>= 3;
0036 *page = regaddr & 0x3ff;
0037 }
0038
0039 static int
0040 qca8k_set_lo(struct qca8k_priv *priv, int phy_id, u32 regnum, u16 lo)
0041 {
0042 u16 *cached_lo = &priv->mdio_cache.lo;
0043 struct mii_bus *bus = priv->bus;
0044 int ret;
0045
0046 if (lo == *cached_lo)
0047 return 0;
0048
0049 ret = bus->write(bus, phy_id, regnum, lo);
0050 if (ret < 0)
0051 dev_err_ratelimited(&bus->dev,
0052 "failed to write qca8k 32bit lo register\n");
0053
0054 *cached_lo = lo;
0055 return 0;
0056 }
0057
0058 static int
0059 qca8k_set_hi(struct qca8k_priv *priv, int phy_id, u32 regnum, u16 hi)
0060 {
0061 u16 *cached_hi = &priv->mdio_cache.hi;
0062 struct mii_bus *bus = priv->bus;
0063 int ret;
0064
0065 if (hi == *cached_hi)
0066 return 0;
0067
0068 ret = bus->write(bus, phy_id, regnum, hi);
0069 if (ret < 0)
0070 dev_err_ratelimited(&bus->dev,
0071 "failed to write qca8k 32bit hi register\n");
0072
0073 *cached_hi = hi;
0074 return 0;
0075 }
0076
0077 static int
0078 qca8k_mii_read32(struct mii_bus *bus, int phy_id, u32 regnum, u32 *val)
0079 {
0080 int ret;
0081
0082 ret = bus->read(bus, phy_id, regnum);
0083 if (ret >= 0) {
0084 *val = ret;
0085 ret = bus->read(bus, phy_id, regnum + 1);
0086 *val |= ret << 16;
0087 }
0088
0089 if (ret < 0) {
0090 dev_err_ratelimited(&bus->dev,
0091 "failed to read qca8k 32bit register\n");
0092 *val = 0;
0093 return ret;
0094 }
0095
0096 return 0;
0097 }
0098
0099 static void
0100 qca8k_mii_write32(struct qca8k_priv *priv, int phy_id, u32 regnum, u32 val)
0101 {
0102 u16 lo, hi;
0103 int ret;
0104
0105 lo = val & 0xffff;
0106 hi = (u16)(val >> 16);
0107
0108 ret = qca8k_set_lo(priv, phy_id, regnum, lo);
0109 if (ret >= 0)
0110 ret = qca8k_set_hi(priv, phy_id, regnum + 1, hi);
0111 }
0112
0113 static int
0114 qca8k_set_page(struct qca8k_priv *priv, u16 page)
0115 {
0116 u16 *cached_page = &priv->mdio_cache.page;
0117 struct mii_bus *bus = priv->bus;
0118 int ret;
0119
0120 if (page == *cached_page)
0121 return 0;
0122
0123 ret = bus->write(bus, 0x18, 0, page);
0124 if (ret < 0) {
0125 dev_err_ratelimited(&bus->dev,
0126 "failed to set qca8k page\n");
0127 return ret;
0128 }
0129
0130 *cached_page = page;
0131 usleep_range(1000, 2000);
0132 return 0;
0133 }
0134
0135 static void qca8k_rw_reg_ack_handler(struct dsa_switch *ds, struct sk_buff *skb)
0136 {
0137 struct qca8k_mgmt_eth_data *mgmt_eth_data;
0138 struct qca8k_priv *priv = ds->priv;
0139 struct qca_mgmt_ethhdr *mgmt_ethhdr;
0140 u8 len, cmd;
0141
0142 mgmt_ethhdr = (struct qca_mgmt_ethhdr *)skb_mac_header(skb);
0143 mgmt_eth_data = &priv->mgmt_eth_data;
0144
0145 cmd = FIELD_GET(QCA_HDR_MGMT_CMD, mgmt_ethhdr->command);
0146 len = FIELD_GET(QCA_HDR_MGMT_LENGTH, mgmt_ethhdr->command);
0147
0148
0149 if (mgmt_ethhdr->seq == mgmt_eth_data->seq)
0150 mgmt_eth_data->ack = true;
0151
0152 if (cmd == MDIO_READ) {
0153 mgmt_eth_data->data[0] = mgmt_ethhdr->mdio_data;
0154
0155
0156
0157
0158 if (len > QCA_HDR_MGMT_DATA1_LEN)
0159 memcpy(mgmt_eth_data->data + 1, skb->data,
0160 QCA_HDR_MGMT_DATA2_LEN);
0161 }
0162
0163 complete(&mgmt_eth_data->rw_done);
0164 }
0165
0166 static struct sk_buff *qca8k_alloc_mdio_header(enum mdio_cmd cmd, u32 reg, u32 *val,
0167 int priority, unsigned int len)
0168 {
0169 struct qca_mgmt_ethhdr *mgmt_ethhdr;
0170 unsigned int real_len;
0171 struct sk_buff *skb;
0172 u32 *data2;
0173 u16 hdr;
0174
0175 skb = dev_alloc_skb(QCA_HDR_MGMT_PKT_LEN);
0176 if (!skb)
0177 return NULL;
0178
0179
0180
0181
0182
0183
0184
0185
0186 if (len == 16)
0187 real_len = 15;
0188 else
0189 real_len = len;
0190
0191 skb_reset_mac_header(skb);
0192 skb_set_network_header(skb, skb->len);
0193
0194 mgmt_ethhdr = skb_push(skb, QCA_HDR_MGMT_HEADER_LEN + QCA_HDR_LEN);
0195
0196 hdr = FIELD_PREP(QCA_HDR_XMIT_VERSION, QCA_HDR_VERSION);
0197 hdr |= FIELD_PREP(QCA_HDR_XMIT_PRIORITY, priority);
0198 hdr |= QCA_HDR_XMIT_FROM_CPU;
0199 hdr |= FIELD_PREP(QCA_HDR_XMIT_DP_BIT, BIT(0));
0200 hdr |= FIELD_PREP(QCA_HDR_XMIT_CONTROL, QCA_HDR_XMIT_TYPE_RW_REG);
0201
0202 mgmt_ethhdr->command = FIELD_PREP(QCA_HDR_MGMT_ADDR, reg);
0203 mgmt_ethhdr->command |= FIELD_PREP(QCA_HDR_MGMT_LENGTH, real_len);
0204 mgmt_ethhdr->command |= FIELD_PREP(QCA_HDR_MGMT_CMD, cmd);
0205 mgmt_ethhdr->command |= FIELD_PREP(QCA_HDR_MGMT_CHECK_CODE,
0206 QCA_HDR_MGMT_CHECK_CODE_VAL);
0207
0208 if (cmd == MDIO_WRITE)
0209 mgmt_ethhdr->mdio_data = *val;
0210
0211 mgmt_ethhdr->hdr = htons(hdr);
0212
0213 data2 = skb_put_zero(skb, QCA_HDR_MGMT_DATA2_LEN + QCA_HDR_MGMT_PADDING_LEN);
0214 if (cmd == MDIO_WRITE && len > QCA_HDR_MGMT_DATA1_LEN)
0215 memcpy(data2, val + 1, len - QCA_HDR_MGMT_DATA1_LEN);
0216
0217 return skb;
0218 }
0219
0220 static void qca8k_mdio_header_fill_seq_num(struct sk_buff *skb, u32 seq_num)
0221 {
0222 struct qca_mgmt_ethhdr *mgmt_ethhdr;
0223
0224 mgmt_ethhdr = (struct qca_mgmt_ethhdr *)skb->data;
0225 mgmt_ethhdr->seq = FIELD_PREP(QCA_HDR_MGMT_SEQ_NUM, seq_num);
0226 }
0227
0228 static int qca8k_read_eth(struct qca8k_priv *priv, u32 reg, u32 *val, int len)
0229 {
0230 struct qca8k_mgmt_eth_data *mgmt_eth_data = &priv->mgmt_eth_data;
0231 struct sk_buff *skb;
0232 bool ack;
0233 int ret;
0234
0235 skb = qca8k_alloc_mdio_header(MDIO_READ, reg, NULL,
0236 QCA8K_ETHERNET_MDIO_PRIORITY, len);
0237 if (!skb)
0238 return -ENOMEM;
0239
0240 mutex_lock(&mgmt_eth_data->mutex);
0241
0242
0243 if (!priv->mgmt_master) {
0244 kfree_skb(skb);
0245 mutex_unlock(&mgmt_eth_data->mutex);
0246 return -EINVAL;
0247 }
0248
0249 skb->dev = priv->mgmt_master;
0250
0251 reinit_completion(&mgmt_eth_data->rw_done);
0252
0253
0254 mgmt_eth_data->seq++;
0255 qca8k_mdio_header_fill_seq_num(skb, mgmt_eth_data->seq);
0256 mgmt_eth_data->ack = false;
0257
0258 dev_queue_xmit(skb);
0259
0260 ret = wait_for_completion_timeout(&mgmt_eth_data->rw_done,
0261 msecs_to_jiffies(QCA8K_ETHERNET_TIMEOUT));
0262
0263 *val = mgmt_eth_data->data[0];
0264 if (len > QCA_HDR_MGMT_DATA1_LEN)
0265 memcpy(val + 1, mgmt_eth_data->data + 1, len - QCA_HDR_MGMT_DATA1_LEN);
0266
0267 ack = mgmt_eth_data->ack;
0268
0269 mutex_unlock(&mgmt_eth_data->mutex);
0270
0271 if (ret <= 0)
0272 return -ETIMEDOUT;
0273
0274 if (!ack)
0275 return -EINVAL;
0276
0277 return 0;
0278 }
0279
0280 static int qca8k_write_eth(struct qca8k_priv *priv, u32 reg, u32 *val, int len)
0281 {
0282 struct qca8k_mgmt_eth_data *mgmt_eth_data = &priv->mgmt_eth_data;
0283 struct sk_buff *skb;
0284 bool ack;
0285 int ret;
0286
0287 skb = qca8k_alloc_mdio_header(MDIO_WRITE, reg, val,
0288 QCA8K_ETHERNET_MDIO_PRIORITY, len);
0289 if (!skb)
0290 return -ENOMEM;
0291
0292 mutex_lock(&mgmt_eth_data->mutex);
0293
0294
0295 if (!priv->mgmt_master) {
0296 kfree_skb(skb);
0297 mutex_unlock(&mgmt_eth_data->mutex);
0298 return -EINVAL;
0299 }
0300
0301 skb->dev = priv->mgmt_master;
0302
0303 reinit_completion(&mgmt_eth_data->rw_done);
0304
0305
0306 mgmt_eth_data->seq++;
0307 qca8k_mdio_header_fill_seq_num(skb, mgmt_eth_data->seq);
0308 mgmt_eth_data->ack = false;
0309
0310 dev_queue_xmit(skb);
0311
0312 ret = wait_for_completion_timeout(&mgmt_eth_data->rw_done,
0313 msecs_to_jiffies(QCA8K_ETHERNET_TIMEOUT));
0314
0315 ack = mgmt_eth_data->ack;
0316
0317 mutex_unlock(&mgmt_eth_data->mutex);
0318
0319 if (ret <= 0)
0320 return -ETIMEDOUT;
0321
0322 if (!ack)
0323 return -EINVAL;
0324
0325 return 0;
0326 }
0327
0328 static int
0329 qca8k_regmap_update_bits_eth(struct qca8k_priv *priv, u32 reg, u32 mask, u32 write_val)
0330 {
0331 u32 val = 0;
0332 int ret;
0333
0334 ret = qca8k_read_eth(priv, reg, &val, sizeof(val));
0335 if (ret)
0336 return ret;
0337
0338 val &= ~mask;
0339 val |= write_val;
0340
0341 return qca8k_write_eth(priv, reg, &val, sizeof(val));
0342 }
0343
0344 static int
0345 qca8k_regmap_read(void *ctx, uint32_t reg, uint32_t *val)
0346 {
0347 struct qca8k_priv *priv = (struct qca8k_priv *)ctx;
0348 struct mii_bus *bus = priv->bus;
0349 u16 r1, r2, page;
0350 int ret;
0351
0352 if (!qca8k_read_eth(priv, reg, val, sizeof(*val)))
0353 return 0;
0354
0355 qca8k_split_addr(reg, &r1, &r2, &page);
0356
0357 mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
0358
0359 ret = qca8k_set_page(priv, page);
0360 if (ret < 0)
0361 goto exit;
0362
0363 ret = qca8k_mii_read32(bus, 0x10 | r2, r1, val);
0364
0365 exit:
0366 mutex_unlock(&bus->mdio_lock);
0367 return ret;
0368 }
0369
0370 static int
0371 qca8k_regmap_write(void *ctx, uint32_t reg, uint32_t val)
0372 {
0373 struct qca8k_priv *priv = (struct qca8k_priv *)ctx;
0374 struct mii_bus *bus = priv->bus;
0375 u16 r1, r2, page;
0376 int ret;
0377
0378 if (!qca8k_write_eth(priv, reg, &val, sizeof(val)))
0379 return 0;
0380
0381 qca8k_split_addr(reg, &r1, &r2, &page);
0382
0383 mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
0384
0385 ret = qca8k_set_page(priv, page);
0386 if (ret < 0)
0387 goto exit;
0388
0389 qca8k_mii_write32(priv, 0x10 | r2, r1, val);
0390
0391 exit:
0392 mutex_unlock(&bus->mdio_lock);
0393 return ret;
0394 }
0395
0396 static int
0397 qca8k_regmap_update_bits(void *ctx, uint32_t reg, uint32_t mask, uint32_t write_val)
0398 {
0399 struct qca8k_priv *priv = (struct qca8k_priv *)ctx;
0400 struct mii_bus *bus = priv->bus;
0401 u16 r1, r2, page;
0402 u32 val;
0403 int ret;
0404
0405 if (!qca8k_regmap_update_bits_eth(priv, reg, mask, write_val))
0406 return 0;
0407
0408 qca8k_split_addr(reg, &r1, &r2, &page);
0409
0410 mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
0411
0412 ret = qca8k_set_page(priv, page);
0413 if (ret < 0)
0414 goto exit;
0415
0416 ret = qca8k_mii_read32(bus, 0x10 | r2, r1, &val);
0417 if (ret < 0)
0418 goto exit;
0419
0420 val &= ~mask;
0421 val |= write_val;
0422 qca8k_mii_write32(priv, 0x10 | r2, r1, val);
0423
0424 exit:
0425 mutex_unlock(&bus->mdio_lock);
0426
0427 return ret;
0428 }
0429
0430 static struct regmap_config qca8k_regmap_config = {
0431 .reg_bits = 16,
0432 .val_bits = 32,
0433 .reg_stride = 4,
0434 .max_register = 0x16ac,
0435 .reg_read = qca8k_regmap_read,
0436 .reg_write = qca8k_regmap_write,
0437 .reg_update_bits = qca8k_regmap_update_bits,
0438 .rd_table = &qca8k_readable_table,
0439 .disable_locking = true,
0440 .cache_type = REGCACHE_NONE,
0441 };
0442
0443 static int
0444 qca8k_phy_eth_busy_wait(struct qca8k_mgmt_eth_data *mgmt_eth_data,
0445 struct sk_buff *read_skb, u32 *val)
0446 {
0447 struct sk_buff *skb = skb_copy(read_skb, GFP_KERNEL);
0448 bool ack;
0449 int ret;
0450
0451 reinit_completion(&mgmt_eth_data->rw_done);
0452
0453
0454 mgmt_eth_data->seq++;
0455 qca8k_mdio_header_fill_seq_num(skb, mgmt_eth_data->seq);
0456 mgmt_eth_data->ack = false;
0457
0458 dev_queue_xmit(skb);
0459
0460 ret = wait_for_completion_timeout(&mgmt_eth_data->rw_done,
0461 QCA8K_ETHERNET_TIMEOUT);
0462
0463 ack = mgmt_eth_data->ack;
0464
0465 if (ret <= 0)
0466 return -ETIMEDOUT;
0467
0468 if (!ack)
0469 return -EINVAL;
0470
0471 *val = mgmt_eth_data->data[0];
0472
0473 return 0;
0474 }
0475
0476 static int
0477 qca8k_phy_eth_command(struct qca8k_priv *priv, bool read, int phy,
0478 int regnum, u16 data)
0479 {
0480 struct sk_buff *write_skb, *clear_skb, *read_skb;
0481 struct qca8k_mgmt_eth_data *mgmt_eth_data;
0482 u32 write_val, clear_val = 0, val;
0483 struct net_device *mgmt_master;
0484 int ret, ret1;
0485 bool ack;
0486
0487 if (regnum >= QCA8K_MDIO_MASTER_MAX_REG)
0488 return -EINVAL;
0489
0490 mgmt_eth_data = &priv->mgmt_eth_data;
0491
0492 write_val = QCA8K_MDIO_MASTER_BUSY | QCA8K_MDIO_MASTER_EN |
0493 QCA8K_MDIO_MASTER_PHY_ADDR(phy) |
0494 QCA8K_MDIO_MASTER_REG_ADDR(regnum);
0495
0496 if (read) {
0497 write_val |= QCA8K_MDIO_MASTER_READ;
0498 } else {
0499 write_val |= QCA8K_MDIO_MASTER_WRITE;
0500 write_val |= QCA8K_MDIO_MASTER_DATA(data);
0501 }
0502
0503
0504 write_skb = qca8k_alloc_mdio_header(MDIO_WRITE, QCA8K_MDIO_MASTER_CTRL, &write_val,
0505 QCA8K_ETHERNET_PHY_PRIORITY, sizeof(write_val));
0506 if (!write_skb)
0507 return -ENOMEM;
0508
0509 clear_skb = qca8k_alloc_mdio_header(MDIO_WRITE, QCA8K_MDIO_MASTER_CTRL, &clear_val,
0510 QCA8K_ETHERNET_PHY_PRIORITY, sizeof(clear_val));
0511 if (!clear_skb) {
0512 ret = -ENOMEM;
0513 goto err_clear_skb;
0514 }
0515
0516 read_skb = qca8k_alloc_mdio_header(MDIO_READ, QCA8K_MDIO_MASTER_CTRL, &clear_val,
0517 QCA8K_ETHERNET_PHY_PRIORITY, sizeof(clear_val));
0518 if (!read_skb) {
0519 ret = -ENOMEM;
0520 goto err_read_skb;
0521 }
0522
0523
0524
0525
0526
0527
0528
0529 mutex_lock(&mgmt_eth_data->mutex);
0530
0531
0532 mgmt_master = priv->mgmt_master;
0533 if (!mgmt_master) {
0534 mutex_unlock(&mgmt_eth_data->mutex);
0535 ret = -EINVAL;
0536 goto err_mgmt_master;
0537 }
0538
0539 read_skb->dev = mgmt_master;
0540 clear_skb->dev = mgmt_master;
0541 write_skb->dev = mgmt_master;
0542
0543 reinit_completion(&mgmt_eth_data->rw_done);
0544
0545
0546 mgmt_eth_data->seq++;
0547 qca8k_mdio_header_fill_seq_num(write_skb, mgmt_eth_data->seq);
0548 mgmt_eth_data->ack = false;
0549
0550 dev_queue_xmit(write_skb);
0551
0552 ret = wait_for_completion_timeout(&mgmt_eth_data->rw_done,
0553 QCA8K_ETHERNET_TIMEOUT);
0554
0555 ack = mgmt_eth_data->ack;
0556
0557 if (ret <= 0) {
0558 ret = -ETIMEDOUT;
0559 kfree_skb(read_skb);
0560 goto exit;
0561 }
0562
0563 if (!ack) {
0564 ret = -EINVAL;
0565 kfree_skb(read_skb);
0566 goto exit;
0567 }
0568
0569 ret = read_poll_timeout(qca8k_phy_eth_busy_wait, ret1,
0570 !(val & QCA8K_MDIO_MASTER_BUSY), 0,
0571 QCA8K_BUSY_WAIT_TIMEOUT * USEC_PER_MSEC, false,
0572 mgmt_eth_data, read_skb, &val);
0573
0574 if (ret < 0 && ret1 < 0) {
0575 ret = ret1;
0576 goto exit;
0577 }
0578
0579 if (read) {
0580 reinit_completion(&mgmt_eth_data->rw_done);
0581
0582
0583 mgmt_eth_data->seq++;
0584 qca8k_mdio_header_fill_seq_num(read_skb, mgmt_eth_data->seq);
0585 mgmt_eth_data->ack = false;
0586
0587 dev_queue_xmit(read_skb);
0588
0589 ret = wait_for_completion_timeout(&mgmt_eth_data->rw_done,
0590 QCA8K_ETHERNET_TIMEOUT);
0591
0592 ack = mgmt_eth_data->ack;
0593
0594 if (ret <= 0) {
0595 ret = -ETIMEDOUT;
0596 goto exit;
0597 }
0598
0599 if (!ack) {
0600 ret = -EINVAL;
0601 goto exit;
0602 }
0603
0604 ret = mgmt_eth_data->data[0] & QCA8K_MDIO_MASTER_DATA_MASK;
0605 } else {
0606 kfree_skb(read_skb);
0607 }
0608 exit:
0609 reinit_completion(&mgmt_eth_data->rw_done);
0610
0611
0612 mgmt_eth_data->seq++;
0613 qca8k_mdio_header_fill_seq_num(clear_skb, mgmt_eth_data->seq);
0614 mgmt_eth_data->ack = false;
0615
0616 dev_queue_xmit(clear_skb);
0617
0618 wait_for_completion_timeout(&mgmt_eth_data->rw_done,
0619 QCA8K_ETHERNET_TIMEOUT);
0620
0621 mutex_unlock(&mgmt_eth_data->mutex);
0622
0623 return ret;
0624
0625
0626 err_mgmt_master:
0627 kfree_skb(read_skb);
0628 err_read_skb:
0629 kfree_skb(clear_skb);
0630 err_clear_skb:
0631 kfree_skb(write_skb);
0632
0633 return ret;
0634 }
0635
0636 static u32
0637 qca8k_port_to_phy(int port)
0638 {
0639
0640
0641
0642
0643
0644
0645
0646
0647
0648 return port - 1;
0649 }
0650
0651 static int
0652 qca8k_mdio_busy_wait(struct mii_bus *bus, u32 reg, u32 mask)
0653 {
0654 u16 r1, r2, page;
0655 u32 val;
0656 int ret, ret1;
0657
0658 qca8k_split_addr(reg, &r1, &r2, &page);
0659
0660 ret = read_poll_timeout(qca8k_mii_read32, ret1, !(val & mask), 0,
0661 QCA8K_BUSY_WAIT_TIMEOUT * USEC_PER_MSEC, false,
0662 bus, 0x10 | r2, r1, &val);
0663
0664
0665
0666
0667 if (ret < 0 && ret1 < 0)
0668 return ret1;
0669
0670 return ret;
0671 }
0672
0673 static int
0674 qca8k_mdio_write(struct qca8k_priv *priv, int phy, int regnum, u16 data)
0675 {
0676 struct mii_bus *bus = priv->bus;
0677 u16 r1, r2, page;
0678 u32 val;
0679 int ret;
0680
0681 if (regnum >= QCA8K_MDIO_MASTER_MAX_REG)
0682 return -EINVAL;
0683
0684 val = QCA8K_MDIO_MASTER_BUSY | QCA8K_MDIO_MASTER_EN |
0685 QCA8K_MDIO_MASTER_WRITE | QCA8K_MDIO_MASTER_PHY_ADDR(phy) |
0686 QCA8K_MDIO_MASTER_REG_ADDR(regnum) |
0687 QCA8K_MDIO_MASTER_DATA(data);
0688
0689 qca8k_split_addr(QCA8K_MDIO_MASTER_CTRL, &r1, &r2, &page);
0690
0691 mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
0692
0693 ret = qca8k_set_page(priv, page);
0694 if (ret)
0695 goto exit;
0696
0697 qca8k_mii_write32(priv, 0x10 | r2, r1, val);
0698
0699 ret = qca8k_mdio_busy_wait(bus, QCA8K_MDIO_MASTER_CTRL,
0700 QCA8K_MDIO_MASTER_BUSY);
0701
0702 exit:
0703
0704 qca8k_mii_write32(priv, 0x10 | r2, r1, 0);
0705
0706 mutex_unlock(&bus->mdio_lock);
0707
0708 return ret;
0709 }
0710
0711 static int
0712 qca8k_mdio_read(struct qca8k_priv *priv, int phy, int regnum)
0713 {
0714 struct mii_bus *bus = priv->bus;
0715 u16 r1, r2, page;
0716 u32 val;
0717 int ret;
0718
0719 if (regnum >= QCA8K_MDIO_MASTER_MAX_REG)
0720 return -EINVAL;
0721
0722 val = QCA8K_MDIO_MASTER_BUSY | QCA8K_MDIO_MASTER_EN |
0723 QCA8K_MDIO_MASTER_READ | QCA8K_MDIO_MASTER_PHY_ADDR(phy) |
0724 QCA8K_MDIO_MASTER_REG_ADDR(regnum);
0725
0726 qca8k_split_addr(QCA8K_MDIO_MASTER_CTRL, &r1, &r2, &page);
0727
0728 mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
0729
0730 ret = qca8k_set_page(priv, page);
0731 if (ret)
0732 goto exit;
0733
0734 qca8k_mii_write32(priv, 0x10 | r2, r1, val);
0735
0736 ret = qca8k_mdio_busy_wait(bus, QCA8K_MDIO_MASTER_CTRL,
0737 QCA8K_MDIO_MASTER_BUSY);
0738 if (ret)
0739 goto exit;
0740
0741 ret = qca8k_mii_read32(bus, 0x10 | r2, r1, &val);
0742
0743 exit:
0744
0745 qca8k_mii_write32(priv, 0x10 | r2, r1, 0);
0746
0747 mutex_unlock(&bus->mdio_lock);
0748
0749 if (ret >= 0)
0750 ret = val & QCA8K_MDIO_MASTER_DATA_MASK;
0751
0752 return ret;
0753 }
0754
0755 static int
0756 qca8k_internal_mdio_write(struct mii_bus *slave_bus, int phy, int regnum, u16 data)
0757 {
0758 struct qca8k_priv *priv = slave_bus->priv;
0759 int ret;
0760
0761
0762 ret = qca8k_phy_eth_command(priv, false, phy, regnum, data);
0763 if (!ret)
0764 return 0;
0765
0766 return qca8k_mdio_write(priv, phy, regnum, data);
0767 }
0768
0769 static int
0770 qca8k_internal_mdio_read(struct mii_bus *slave_bus, int phy, int regnum)
0771 {
0772 struct qca8k_priv *priv = slave_bus->priv;
0773 int ret;
0774
0775
0776 ret = qca8k_phy_eth_command(priv, true, phy, regnum, 0);
0777 if (ret >= 0)
0778 return ret;
0779
0780 ret = qca8k_mdio_read(priv, phy, regnum);
0781
0782 if (ret < 0)
0783 return 0xffff;
0784
0785 return ret;
0786 }
0787
0788 static int
0789 qca8k_legacy_mdio_write(struct mii_bus *slave_bus, int port, int regnum, u16 data)
0790 {
0791 port = qca8k_port_to_phy(port) % PHY_MAX_ADDR;
0792
0793 return qca8k_internal_mdio_write(slave_bus, port, regnum, data);
0794 }
0795
0796 static int
0797 qca8k_legacy_mdio_read(struct mii_bus *slave_bus, int port, int regnum)
0798 {
0799 port = qca8k_port_to_phy(port) % PHY_MAX_ADDR;
0800
0801 return qca8k_internal_mdio_read(slave_bus, port, regnum);
0802 }
0803
0804 static int
0805 qca8k_mdio_register(struct qca8k_priv *priv)
0806 {
0807 struct dsa_switch *ds = priv->ds;
0808 struct device_node *mdio;
0809 struct mii_bus *bus;
0810
0811 bus = devm_mdiobus_alloc(ds->dev);
0812 if (!bus)
0813 return -ENOMEM;
0814
0815 bus->priv = (void *)priv;
0816 snprintf(bus->id, MII_BUS_ID_SIZE, "qca8k-%d.%d",
0817 ds->dst->index, ds->index);
0818 bus->parent = ds->dev;
0819 bus->phy_mask = ~ds->phys_mii_mask;
0820 ds->slave_mii_bus = bus;
0821
0822
0823 mdio = of_get_child_by_name(priv->dev->of_node, "mdio");
0824 if (of_device_is_available(mdio)) {
0825 bus->name = "qca8k slave mii";
0826 bus->read = qca8k_internal_mdio_read;
0827 bus->write = qca8k_internal_mdio_write;
0828 return devm_of_mdiobus_register(priv->dev, bus, mdio);
0829 }
0830
0831
0832
0833
0834 bus->name = "qca8k-legacy slave mii";
0835 bus->read = qca8k_legacy_mdio_read;
0836 bus->write = qca8k_legacy_mdio_write;
0837 return devm_mdiobus_register(priv->dev, bus);
0838 }
0839
0840 static int
0841 qca8k_setup_mdio_bus(struct qca8k_priv *priv)
0842 {
0843 u32 internal_mdio_mask = 0, external_mdio_mask = 0, reg;
0844 struct device_node *ports, *port;
0845 phy_interface_t mode;
0846 int err;
0847
0848 ports = of_get_child_by_name(priv->dev->of_node, "ports");
0849 if (!ports)
0850 ports = of_get_child_by_name(priv->dev->of_node, "ethernet-ports");
0851
0852 if (!ports)
0853 return -EINVAL;
0854
0855 for_each_available_child_of_node(ports, port) {
0856 err = of_property_read_u32(port, "reg", ®);
0857 if (err) {
0858 of_node_put(port);
0859 of_node_put(ports);
0860 return err;
0861 }
0862
0863 if (!dsa_is_user_port(priv->ds, reg))
0864 continue;
0865
0866 of_get_phy_mode(port, &mode);
0867
0868 if (of_property_read_bool(port, "phy-handle") &&
0869 mode != PHY_INTERFACE_MODE_INTERNAL)
0870 external_mdio_mask |= BIT(reg);
0871 else
0872 internal_mdio_mask |= BIT(reg);
0873 }
0874
0875 of_node_put(ports);
0876 if (!external_mdio_mask && !internal_mdio_mask) {
0877 dev_err(priv->dev, "no PHYs are defined.\n");
0878 return -EINVAL;
0879 }
0880
0881
0882
0883
0884
0885
0886
0887
0888
0889
0890
0891
0892 if (!!external_mdio_mask && !!internal_mdio_mask) {
0893 dev_err(priv->dev, "either internal or external mdio bus configuration is supported.\n");
0894 return -EINVAL;
0895 }
0896
0897 if (external_mdio_mask) {
0898
0899
0900
0901
0902 return regmap_clear_bits(priv->regmap, QCA8K_MDIO_MASTER_CTRL,
0903 QCA8K_MDIO_MASTER_EN);
0904 }
0905
0906 return qca8k_mdio_register(priv);
0907 }
0908
0909 static int
0910 qca8k_setup_mac_pwr_sel(struct qca8k_priv *priv)
0911 {
0912 u32 mask = 0;
0913 int ret = 0;
0914
0915
0916
0917
0918
0919 if (of_machine_is_compatible("qcom,ipq8064"))
0920 mask |= QCA8K_MAC_PWR_RGMII0_1_8V;
0921
0922
0923 if (of_machine_is_compatible("qcom,ipq8065"))
0924 mask |= QCA8K_MAC_PWR_RGMII1_1_8V;
0925
0926 if (mask) {
0927 ret = qca8k_rmw(priv, QCA8K_REG_MAC_PWR_SEL,
0928 QCA8K_MAC_PWR_RGMII0_1_8V |
0929 QCA8K_MAC_PWR_RGMII1_1_8V,
0930 mask);
0931 }
0932
0933 return ret;
0934 }
0935
0936 static int qca8k_find_cpu_port(struct dsa_switch *ds)
0937 {
0938 struct qca8k_priv *priv = ds->priv;
0939
0940
0941 if (dsa_is_cpu_port(ds, 0))
0942 return 0;
0943
0944 dev_dbg(priv->dev, "port 0 is not the CPU port. Checking port 6");
0945
0946 if (dsa_is_cpu_port(ds, 6))
0947 return 6;
0948
0949 return -EINVAL;
0950 }
0951
0952 static int
0953 qca8k_setup_of_pws_reg(struct qca8k_priv *priv)
0954 {
0955 const struct qca8k_match_data *data = priv->info;
0956 struct device_node *node = priv->dev->of_node;
0957 u32 val = 0;
0958 int ret;
0959
0960
0961
0962
0963
0964 if (priv->switch_id == QCA8K_ID_QCA8327) {
0965
0966 if (data->reduced_package)
0967 val |= QCA8327_PWS_PACKAGE148_EN;
0968
0969 ret = qca8k_rmw(priv, QCA8K_REG_PWS, QCA8327_PWS_PACKAGE148_EN,
0970 val);
0971 if (ret)
0972 return ret;
0973 }
0974
0975 if (of_property_read_bool(node, "qca,ignore-power-on-sel"))
0976 val |= QCA8K_PWS_POWER_ON_SEL;
0977
0978 if (of_property_read_bool(node, "qca,led-open-drain")) {
0979 if (!(val & QCA8K_PWS_POWER_ON_SEL)) {
0980 dev_err(priv->dev, "qca,led-open-drain require qca,ignore-power-on-sel to be set.");
0981 return -EINVAL;
0982 }
0983
0984 val |= QCA8K_PWS_LED_OPEN_EN_CSR;
0985 }
0986
0987 return qca8k_rmw(priv, QCA8K_REG_PWS,
0988 QCA8K_PWS_LED_OPEN_EN_CSR | QCA8K_PWS_POWER_ON_SEL,
0989 val);
0990 }
0991
0992 static int
0993 qca8k_parse_port_config(struct qca8k_priv *priv)
0994 {
0995 int port, cpu_port_index = -1, ret;
0996 struct device_node *port_dn;
0997 phy_interface_t mode;
0998 struct dsa_port *dp;
0999 u32 delay;
1000
1001
1002 for (port = 0; port < QCA8K_NUM_PORTS; port++) {
1003
1004 if (port != 0 && port != 6)
1005 continue;
1006
1007 dp = dsa_to_port(priv->ds, port);
1008 port_dn = dp->dn;
1009 cpu_port_index++;
1010
1011 if (!of_device_is_available(port_dn))
1012 continue;
1013
1014 ret = of_get_phy_mode(port_dn, &mode);
1015 if (ret)
1016 continue;
1017
1018 switch (mode) {
1019 case PHY_INTERFACE_MODE_RGMII:
1020 case PHY_INTERFACE_MODE_RGMII_ID:
1021 case PHY_INTERFACE_MODE_RGMII_TXID:
1022 case PHY_INTERFACE_MODE_RGMII_RXID:
1023 case PHY_INTERFACE_MODE_SGMII:
1024 delay = 0;
1025
1026 if (!of_property_read_u32(port_dn, "tx-internal-delay-ps", &delay))
1027
1028 delay = delay / 1000;
1029 else if (mode == PHY_INTERFACE_MODE_RGMII_ID ||
1030 mode == PHY_INTERFACE_MODE_RGMII_TXID)
1031 delay = 1;
1032
1033 if (!FIELD_FIT(QCA8K_PORT_PAD_RGMII_TX_DELAY_MASK, delay)) {
1034 dev_err(priv->dev, "rgmii tx delay is limited to a max value of 3ns, setting to the max value");
1035 delay = 3;
1036 }
1037
1038 priv->ports_config.rgmii_tx_delay[cpu_port_index] = delay;
1039
1040 delay = 0;
1041
1042 if (!of_property_read_u32(port_dn, "rx-internal-delay-ps", &delay))
1043
1044 delay = delay / 1000;
1045 else if (mode == PHY_INTERFACE_MODE_RGMII_ID ||
1046 mode == PHY_INTERFACE_MODE_RGMII_RXID)
1047 delay = 2;
1048
1049 if (!FIELD_FIT(QCA8K_PORT_PAD_RGMII_RX_DELAY_MASK, delay)) {
1050 dev_err(priv->dev, "rgmii rx delay is limited to a max value of 3ns, setting to the max value");
1051 delay = 3;
1052 }
1053
1054 priv->ports_config.rgmii_rx_delay[cpu_port_index] = delay;
1055
1056
1057 if (mode == PHY_INTERFACE_MODE_RGMII ||
1058 mode == PHY_INTERFACE_MODE_RGMII_ID ||
1059 mode == PHY_INTERFACE_MODE_RGMII_TXID ||
1060 mode == PHY_INTERFACE_MODE_RGMII_RXID)
1061 break;
1062
1063 if (of_property_read_bool(port_dn, "qca,sgmii-txclk-falling-edge"))
1064 priv->ports_config.sgmii_tx_clk_falling_edge = true;
1065
1066 if (of_property_read_bool(port_dn, "qca,sgmii-rxclk-falling-edge"))
1067 priv->ports_config.sgmii_rx_clk_falling_edge = true;
1068
1069 if (of_property_read_bool(port_dn, "qca,sgmii-enable-pll")) {
1070 priv->ports_config.sgmii_enable_pll = true;
1071
1072 if (priv->switch_id == QCA8K_ID_QCA8327) {
1073 dev_err(priv->dev, "SGMII PLL should NOT be enabled for qca8327. Aborting enabling");
1074 priv->ports_config.sgmii_enable_pll = false;
1075 }
1076
1077 if (priv->switch_revision < 2)
1078 dev_warn(priv->dev, "SGMII PLL should NOT be enabled for qca8337 with revision 2 or more.");
1079 }
1080
1081 break;
1082 default:
1083 continue;
1084 }
1085 }
1086
1087 return 0;
1088 }
1089
1090 static void
1091 qca8k_mac_config_setup_internal_delay(struct qca8k_priv *priv, int cpu_port_index,
1092 u32 reg)
1093 {
1094 u32 delay, val = 0;
1095 int ret;
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105 if (priv->ports_config.rgmii_tx_delay[cpu_port_index]) {
1106 delay = priv->ports_config.rgmii_tx_delay[cpu_port_index];
1107
1108 val |= QCA8K_PORT_PAD_RGMII_TX_DELAY(delay) |
1109 QCA8K_PORT_PAD_RGMII_TX_DELAY_EN;
1110 }
1111
1112 if (priv->ports_config.rgmii_rx_delay[cpu_port_index]) {
1113 delay = priv->ports_config.rgmii_rx_delay[cpu_port_index];
1114
1115 val |= QCA8K_PORT_PAD_RGMII_RX_DELAY(delay) |
1116 QCA8K_PORT_PAD_RGMII_RX_DELAY_EN;
1117 }
1118
1119
1120 ret = qca8k_rmw(priv, reg,
1121 QCA8K_PORT_PAD_RGMII_TX_DELAY_MASK |
1122 QCA8K_PORT_PAD_RGMII_RX_DELAY_MASK |
1123 QCA8K_PORT_PAD_RGMII_TX_DELAY_EN |
1124 QCA8K_PORT_PAD_RGMII_RX_DELAY_EN,
1125 val);
1126 if (ret)
1127 dev_err(priv->dev, "Failed to set internal delay for CPU port%d",
1128 cpu_port_index == QCA8K_CPU_PORT0 ? 0 : 6);
1129 }
1130
1131 static struct phylink_pcs *
1132 qca8k_phylink_mac_select_pcs(struct dsa_switch *ds, int port,
1133 phy_interface_t interface)
1134 {
1135 struct qca8k_priv *priv = ds->priv;
1136 struct phylink_pcs *pcs = NULL;
1137
1138 switch (interface) {
1139 case PHY_INTERFACE_MODE_SGMII:
1140 case PHY_INTERFACE_MODE_1000BASEX:
1141 switch (port) {
1142 case 0:
1143 pcs = &priv->pcs_port_0.pcs;
1144 break;
1145
1146 case 6:
1147 pcs = &priv->pcs_port_6.pcs;
1148 break;
1149 }
1150 break;
1151
1152 default:
1153 break;
1154 }
1155
1156 return pcs;
1157 }
1158
1159 static void
1160 qca8k_phylink_mac_config(struct dsa_switch *ds, int port, unsigned int mode,
1161 const struct phylink_link_state *state)
1162 {
1163 struct qca8k_priv *priv = ds->priv;
1164 int cpu_port_index;
1165 u32 reg;
1166
1167 switch (port) {
1168 case 0:
1169 if (state->interface != PHY_INTERFACE_MODE_RGMII &&
1170 state->interface != PHY_INTERFACE_MODE_RGMII_ID &&
1171 state->interface != PHY_INTERFACE_MODE_RGMII_TXID &&
1172 state->interface != PHY_INTERFACE_MODE_RGMII_RXID &&
1173 state->interface != PHY_INTERFACE_MODE_SGMII)
1174 return;
1175
1176 reg = QCA8K_REG_PORT0_PAD_CTRL;
1177 cpu_port_index = QCA8K_CPU_PORT0;
1178 break;
1179 case 1:
1180 case 2:
1181 case 3:
1182 case 4:
1183 case 5:
1184
1185 return;
1186 case 6:
1187 if (state->interface != PHY_INTERFACE_MODE_RGMII &&
1188 state->interface != PHY_INTERFACE_MODE_RGMII_ID &&
1189 state->interface != PHY_INTERFACE_MODE_RGMII_TXID &&
1190 state->interface != PHY_INTERFACE_MODE_RGMII_RXID &&
1191 state->interface != PHY_INTERFACE_MODE_SGMII &&
1192 state->interface != PHY_INTERFACE_MODE_1000BASEX)
1193 return;
1194
1195 reg = QCA8K_REG_PORT6_PAD_CTRL;
1196 cpu_port_index = QCA8K_CPU_PORT6;
1197 break;
1198 default:
1199 dev_err(ds->dev, "%s: unsupported port: %i\n", __func__, port);
1200 return;
1201 }
1202
1203 if (port != 6 && phylink_autoneg_inband(mode)) {
1204 dev_err(ds->dev, "%s: in-band negotiation unsupported\n",
1205 __func__);
1206 return;
1207 }
1208
1209 switch (state->interface) {
1210 case PHY_INTERFACE_MODE_RGMII:
1211 case PHY_INTERFACE_MODE_RGMII_ID:
1212 case PHY_INTERFACE_MODE_RGMII_TXID:
1213 case PHY_INTERFACE_MODE_RGMII_RXID:
1214 qca8k_write(priv, reg, QCA8K_PORT_PAD_RGMII_EN);
1215
1216
1217 qca8k_mac_config_setup_internal_delay(priv, cpu_port_index, reg);
1218
1219
1220
1221
1222
1223 if (priv->switch_id == QCA8K_ID_QCA8337)
1224 qca8k_write(priv, QCA8K_REG_PORT5_PAD_CTRL,
1225 QCA8K_PORT_PAD_RGMII_RX_DELAY_EN);
1226 break;
1227 case PHY_INTERFACE_MODE_SGMII:
1228 case PHY_INTERFACE_MODE_1000BASEX:
1229
1230 qca8k_write(priv, reg, QCA8K_PORT_PAD_SGMII_EN);
1231 break;
1232 default:
1233 dev_err(ds->dev, "xMII mode %s not supported for port %d\n",
1234 phy_modes(state->interface), port);
1235 return;
1236 }
1237 }
1238
1239 static void qca8k_phylink_get_caps(struct dsa_switch *ds, int port,
1240 struct phylink_config *config)
1241 {
1242 switch (port) {
1243 case 0:
1244 phy_interface_set_rgmii(config->supported_interfaces);
1245 __set_bit(PHY_INTERFACE_MODE_SGMII,
1246 config->supported_interfaces);
1247 break;
1248
1249 case 1:
1250 case 2:
1251 case 3:
1252 case 4:
1253 case 5:
1254
1255 __set_bit(PHY_INTERFACE_MODE_GMII,
1256 config->supported_interfaces);
1257 __set_bit(PHY_INTERFACE_MODE_INTERNAL,
1258 config->supported_interfaces);
1259 break;
1260
1261 case 6:
1262 phy_interface_set_rgmii(config->supported_interfaces);
1263 __set_bit(PHY_INTERFACE_MODE_SGMII,
1264 config->supported_interfaces);
1265 __set_bit(PHY_INTERFACE_MODE_1000BASEX,
1266 config->supported_interfaces);
1267 break;
1268 }
1269
1270 config->mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
1271 MAC_10 | MAC_100 | MAC_1000FD;
1272
1273 config->legacy_pre_march2020 = false;
1274 }
1275
1276 static void
1277 qca8k_phylink_mac_link_down(struct dsa_switch *ds, int port, unsigned int mode,
1278 phy_interface_t interface)
1279 {
1280 struct qca8k_priv *priv = ds->priv;
1281
1282 qca8k_port_set_status(priv, port, 0);
1283 }
1284
1285 static void
1286 qca8k_phylink_mac_link_up(struct dsa_switch *ds, int port, unsigned int mode,
1287 phy_interface_t interface, struct phy_device *phydev,
1288 int speed, int duplex, bool tx_pause, bool rx_pause)
1289 {
1290 struct qca8k_priv *priv = ds->priv;
1291 u32 reg;
1292
1293 if (phylink_autoneg_inband(mode)) {
1294 reg = QCA8K_PORT_STATUS_LINK_AUTO;
1295 } else {
1296 switch (speed) {
1297 case SPEED_10:
1298 reg = QCA8K_PORT_STATUS_SPEED_10;
1299 break;
1300 case SPEED_100:
1301 reg = QCA8K_PORT_STATUS_SPEED_100;
1302 break;
1303 case SPEED_1000:
1304 reg = QCA8K_PORT_STATUS_SPEED_1000;
1305 break;
1306 default:
1307 reg = QCA8K_PORT_STATUS_LINK_AUTO;
1308 break;
1309 }
1310
1311 if (duplex == DUPLEX_FULL)
1312 reg |= QCA8K_PORT_STATUS_DUPLEX;
1313
1314 if (rx_pause || dsa_is_cpu_port(ds, port))
1315 reg |= QCA8K_PORT_STATUS_RXFLOW;
1316
1317 if (tx_pause || dsa_is_cpu_port(ds, port))
1318 reg |= QCA8K_PORT_STATUS_TXFLOW;
1319 }
1320
1321 reg |= QCA8K_PORT_STATUS_TXMAC | QCA8K_PORT_STATUS_RXMAC;
1322
1323 qca8k_write(priv, QCA8K_REG_PORT_STATUS(port), reg);
1324 }
1325
1326 static struct qca8k_pcs *pcs_to_qca8k_pcs(struct phylink_pcs *pcs)
1327 {
1328 return container_of(pcs, struct qca8k_pcs, pcs);
1329 }
1330
1331 static void qca8k_pcs_get_state(struct phylink_pcs *pcs,
1332 struct phylink_link_state *state)
1333 {
1334 struct qca8k_priv *priv = pcs_to_qca8k_pcs(pcs)->priv;
1335 int port = pcs_to_qca8k_pcs(pcs)->port;
1336 u32 reg;
1337 int ret;
1338
1339 ret = qca8k_read(priv, QCA8K_REG_PORT_STATUS(port), ®);
1340 if (ret < 0) {
1341 state->link = false;
1342 return;
1343 }
1344
1345 state->link = !!(reg & QCA8K_PORT_STATUS_LINK_UP);
1346 state->an_complete = state->link;
1347 state->an_enabled = !!(reg & QCA8K_PORT_STATUS_LINK_AUTO);
1348 state->duplex = (reg & QCA8K_PORT_STATUS_DUPLEX) ? DUPLEX_FULL :
1349 DUPLEX_HALF;
1350
1351 switch (reg & QCA8K_PORT_STATUS_SPEED) {
1352 case QCA8K_PORT_STATUS_SPEED_10:
1353 state->speed = SPEED_10;
1354 break;
1355 case QCA8K_PORT_STATUS_SPEED_100:
1356 state->speed = SPEED_100;
1357 break;
1358 case QCA8K_PORT_STATUS_SPEED_1000:
1359 state->speed = SPEED_1000;
1360 break;
1361 default:
1362 state->speed = SPEED_UNKNOWN;
1363 break;
1364 }
1365
1366 if (reg & QCA8K_PORT_STATUS_RXFLOW)
1367 state->pause |= MLO_PAUSE_RX;
1368 if (reg & QCA8K_PORT_STATUS_TXFLOW)
1369 state->pause |= MLO_PAUSE_TX;
1370 }
1371
1372 static int qca8k_pcs_config(struct phylink_pcs *pcs, unsigned int mode,
1373 phy_interface_t interface,
1374 const unsigned long *advertising,
1375 bool permit_pause_to_mac)
1376 {
1377 struct qca8k_priv *priv = pcs_to_qca8k_pcs(pcs)->priv;
1378 int cpu_port_index, ret, port;
1379 u32 reg, val;
1380
1381 port = pcs_to_qca8k_pcs(pcs)->port;
1382 switch (port) {
1383 case 0:
1384 reg = QCA8K_REG_PORT0_PAD_CTRL;
1385 cpu_port_index = QCA8K_CPU_PORT0;
1386 break;
1387
1388 case 6:
1389 reg = QCA8K_REG_PORT6_PAD_CTRL;
1390 cpu_port_index = QCA8K_CPU_PORT6;
1391 break;
1392
1393 default:
1394 WARN_ON(1);
1395 return -EINVAL;
1396 }
1397
1398
1399 ret = qca8k_read(priv, QCA8K_REG_PWS, &val);
1400 if (ret)
1401 return ret;
1402 if (phylink_autoneg_inband(mode))
1403 val &= ~QCA8K_PWS_SERDES_AEN_DIS;
1404 else
1405 val |= QCA8K_PWS_SERDES_AEN_DIS;
1406 qca8k_write(priv, QCA8K_REG_PWS, val);
1407
1408
1409 ret = qca8k_read(priv, QCA8K_REG_SGMII_CTRL, &val);
1410 if (ret)
1411 return ret;
1412
1413 val |= QCA8K_SGMII_EN_SD;
1414
1415 if (priv->ports_config.sgmii_enable_pll)
1416 val |= QCA8K_SGMII_EN_PLL | QCA8K_SGMII_EN_RX |
1417 QCA8K_SGMII_EN_TX;
1418
1419 if (dsa_is_cpu_port(priv->ds, port)) {
1420
1421 val &= ~QCA8K_SGMII_MODE_CTRL_MASK;
1422 val |= QCA8K_SGMII_MODE_CTRL_PHY;
1423 } else if (interface == PHY_INTERFACE_MODE_SGMII) {
1424 val &= ~QCA8K_SGMII_MODE_CTRL_MASK;
1425 val |= QCA8K_SGMII_MODE_CTRL_MAC;
1426 } else if (interface == PHY_INTERFACE_MODE_1000BASEX) {
1427 val &= ~QCA8K_SGMII_MODE_CTRL_MASK;
1428 val |= QCA8K_SGMII_MODE_CTRL_BASEX;
1429 }
1430
1431 qca8k_write(priv, QCA8K_REG_SGMII_CTRL, val);
1432
1433
1434
1435
1436 if (interface == PHY_INTERFACE_MODE_SGMII)
1437 qca8k_mac_config_setup_internal_delay(priv, cpu_port_index, reg);
1438
1439
1440
1441 if (priv->switch_id == QCA8K_ID_QCA8327 ||
1442 priv->switch_id == QCA8K_ID_QCA8337)
1443 reg = QCA8K_REG_PORT0_PAD_CTRL;
1444
1445 val = 0;
1446
1447
1448 if (priv->ports_config.sgmii_rx_clk_falling_edge)
1449 val |= QCA8K_PORT0_PAD_SGMII_RXCLK_FALLING_EDGE;
1450
1451 if (priv->ports_config.sgmii_tx_clk_falling_edge)
1452 val |= QCA8K_PORT0_PAD_SGMII_TXCLK_FALLING_EDGE;
1453
1454 if (val)
1455 ret = qca8k_rmw(priv, reg,
1456 QCA8K_PORT0_PAD_SGMII_RXCLK_FALLING_EDGE |
1457 QCA8K_PORT0_PAD_SGMII_TXCLK_FALLING_EDGE,
1458 val);
1459
1460 return 0;
1461 }
1462
1463 static void qca8k_pcs_an_restart(struct phylink_pcs *pcs)
1464 {
1465 }
1466
1467 static const struct phylink_pcs_ops qca8k_pcs_ops = {
1468 .pcs_get_state = qca8k_pcs_get_state,
1469 .pcs_config = qca8k_pcs_config,
1470 .pcs_an_restart = qca8k_pcs_an_restart,
1471 };
1472
1473 static void qca8k_setup_pcs(struct qca8k_priv *priv, struct qca8k_pcs *qpcs,
1474 int port)
1475 {
1476 qpcs->pcs.ops = &qca8k_pcs_ops;
1477
1478
1479 qpcs->pcs.poll = true;
1480 qpcs->priv = priv;
1481 qpcs->port = port;
1482 }
1483
1484 static void qca8k_mib_autocast_handler(struct dsa_switch *ds, struct sk_buff *skb)
1485 {
1486 struct qca8k_mib_eth_data *mib_eth_data;
1487 struct qca8k_priv *priv = ds->priv;
1488 const struct qca8k_mib_desc *mib;
1489 struct mib_ethhdr *mib_ethhdr;
1490 int i, mib_len, offset = 0;
1491 u64 *data;
1492 u8 port;
1493
1494 mib_ethhdr = (struct mib_ethhdr *)skb_mac_header(skb);
1495 mib_eth_data = &priv->mib_eth_data;
1496
1497
1498
1499
1500 port = FIELD_GET(QCA_HDR_RECV_SOURCE_PORT, ntohs(mib_ethhdr->hdr));
1501 if (port != mib_eth_data->req_port)
1502 goto exit;
1503
1504 data = mib_eth_data->data;
1505
1506 for (i = 0; i < priv->info->mib_count; i++) {
1507 mib = &ar8327_mib[i];
1508
1509
1510 if (i < 3) {
1511 data[i] = mib_ethhdr->data[i];
1512 continue;
1513 }
1514
1515 mib_len = sizeof(uint32_t);
1516
1517
1518 if (mib->size == 2)
1519 mib_len = sizeof(uint64_t);
1520
1521
1522 memcpy(data + i, skb->data + offset, mib_len);
1523
1524
1525 offset += mib_len;
1526 }
1527
1528 exit:
1529
1530 if (refcount_dec_and_test(&mib_eth_data->port_parsed))
1531 complete(&mib_eth_data->rw_done);
1532 }
1533
1534 static int
1535 qca8k_get_ethtool_stats_eth(struct dsa_switch *ds, int port, u64 *data)
1536 {
1537 struct dsa_port *dp = dsa_to_port(ds, port);
1538 struct qca8k_mib_eth_data *mib_eth_data;
1539 struct qca8k_priv *priv = ds->priv;
1540 int ret;
1541
1542 mib_eth_data = &priv->mib_eth_data;
1543
1544 mutex_lock(&mib_eth_data->mutex);
1545
1546 reinit_completion(&mib_eth_data->rw_done);
1547
1548 mib_eth_data->req_port = dp->index;
1549 mib_eth_data->data = data;
1550 refcount_set(&mib_eth_data->port_parsed, QCA8K_NUM_PORTS);
1551
1552 mutex_lock(&priv->reg_mutex);
1553
1554
1555 ret = regmap_update_bits(priv->regmap, QCA8K_REG_MIB,
1556 QCA8K_MIB_FUNC | QCA8K_MIB_BUSY,
1557 FIELD_PREP(QCA8K_MIB_FUNC, QCA8K_MIB_CAST) |
1558 QCA8K_MIB_BUSY);
1559
1560 mutex_unlock(&priv->reg_mutex);
1561
1562 if (ret)
1563 goto exit;
1564
1565 ret = wait_for_completion_timeout(&mib_eth_data->rw_done, QCA8K_ETHERNET_TIMEOUT);
1566
1567 exit:
1568 mutex_unlock(&mib_eth_data->mutex);
1569
1570 return ret;
1571 }
1572
1573 static u32 qca8k_get_phy_flags(struct dsa_switch *ds, int port)
1574 {
1575 struct qca8k_priv *priv = ds->priv;
1576
1577
1578
1579
1580
1581
1582
1583 if (port > 0 && port < 6)
1584 return priv->switch_revision;
1585
1586 return 0;
1587 }
1588
1589 static enum dsa_tag_protocol
1590 qca8k_get_tag_protocol(struct dsa_switch *ds, int port,
1591 enum dsa_tag_protocol mp)
1592 {
1593 return DSA_TAG_PROTO_QCA;
1594 }
1595
1596 static void
1597 qca8k_master_change(struct dsa_switch *ds, const struct net_device *master,
1598 bool operational)
1599 {
1600 struct dsa_port *dp = master->dsa_ptr;
1601 struct qca8k_priv *priv = ds->priv;
1602
1603
1604 if (dp->index != 0)
1605 return;
1606
1607 mutex_lock(&priv->mgmt_eth_data.mutex);
1608 mutex_lock(&priv->mib_eth_data.mutex);
1609
1610 priv->mgmt_master = operational ? (struct net_device *)master : NULL;
1611
1612 mutex_unlock(&priv->mib_eth_data.mutex);
1613 mutex_unlock(&priv->mgmt_eth_data.mutex);
1614 }
1615
1616 static int qca8k_connect_tag_protocol(struct dsa_switch *ds,
1617 enum dsa_tag_protocol proto)
1618 {
1619 struct qca_tagger_data *tagger_data;
1620
1621 switch (proto) {
1622 case DSA_TAG_PROTO_QCA:
1623 tagger_data = ds->tagger_data;
1624
1625 tagger_data->rw_reg_ack_handler = qca8k_rw_reg_ack_handler;
1626 tagger_data->mib_autocast_handler = qca8k_mib_autocast_handler;
1627
1628 break;
1629 default:
1630 return -EOPNOTSUPP;
1631 }
1632
1633 return 0;
1634 }
1635
1636 static int
1637 qca8k_setup(struct dsa_switch *ds)
1638 {
1639 struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
1640 int cpu_port, ret, i;
1641 u32 mask;
1642
1643 cpu_port = qca8k_find_cpu_port(ds);
1644 if (cpu_port < 0) {
1645 dev_err(priv->dev, "No cpu port configured in both cpu port0 and port6");
1646 return cpu_port;
1647 }
1648
1649
1650 ret = qca8k_parse_port_config(priv);
1651 if (ret)
1652 return ret;
1653
1654 ret = qca8k_setup_mdio_bus(priv);
1655 if (ret)
1656 return ret;
1657
1658 ret = qca8k_setup_of_pws_reg(priv);
1659 if (ret)
1660 return ret;
1661
1662 ret = qca8k_setup_mac_pwr_sel(priv);
1663 if (ret)
1664 return ret;
1665
1666 qca8k_setup_pcs(priv, &priv->pcs_port_0, 0);
1667 qca8k_setup_pcs(priv, &priv->pcs_port_6, 6);
1668
1669
1670 ret = regmap_clear_bits(priv->regmap, QCA8K_REG_PORT0_PAD_CTRL,
1671 QCA8K_PORT0_PAD_MAC06_EXCHANGE_EN);
1672 if (ret) {
1673 dev_err(priv->dev, "failed disabling MAC06 exchange");
1674 return ret;
1675 }
1676
1677
1678 ret = regmap_set_bits(priv->regmap, QCA8K_REG_GLOBAL_FW_CTRL0,
1679 QCA8K_GLOBAL_FW_CTRL0_CPU_PORT_EN);
1680 if (ret) {
1681 dev_err(priv->dev, "failed enabling CPU port");
1682 return ret;
1683 }
1684
1685
1686 ret = qca8k_mib_init(priv);
1687 if (ret)
1688 dev_warn(priv->dev, "mib init failed");
1689
1690
1691 for (i = 0; i < QCA8K_NUM_PORTS; i++) {
1692
1693 ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(i),
1694 QCA8K_PORT_LOOKUP_MEMBER, 0);
1695 if (ret)
1696 return ret;
1697
1698
1699 if (dsa_is_cpu_port(ds, i)) {
1700 ret = qca8k_write(priv, QCA8K_REG_PORT_HDR_CTRL(i),
1701 FIELD_PREP(QCA8K_PORT_HDR_CTRL_TX_MASK, QCA8K_PORT_HDR_CTRL_ALL) |
1702 FIELD_PREP(QCA8K_PORT_HDR_CTRL_RX_MASK, QCA8K_PORT_HDR_CTRL_ALL));
1703 if (ret) {
1704 dev_err(priv->dev, "failed enabling QCA header mode");
1705 return ret;
1706 }
1707 }
1708
1709
1710 if (dsa_is_user_port(ds, i))
1711 qca8k_port_set_status(priv, i, 0);
1712 }
1713
1714
1715
1716
1717
1718 ret = qca8k_write(priv, QCA8K_REG_GLOBAL_FW_CTRL1,
1719 FIELD_PREP(QCA8K_GLOBAL_FW_CTRL1_IGMP_DP_MASK, BIT(cpu_port)) |
1720 FIELD_PREP(QCA8K_GLOBAL_FW_CTRL1_BC_DP_MASK, BIT(cpu_port)) |
1721 FIELD_PREP(QCA8K_GLOBAL_FW_CTRL1_MC_DP_MASK, BIT(cpu_port)) |
1722 FIELD_PREP(QCA8K_GLOBAL_FW_CTRL1_UC_DP_MASK, BIT(cpu_port)));
1723 if (ret)
1724 return ret;
1725
1726
1727
1728
1729 for (i = 0; i < QCA8K_NUM_PORTS; i++) {
1730
1731 if (dsa_is_cpu_port(ds, i)) {
1732 ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(i),
1733 QCA8K_PORT_LOOKUP_MEMBER, dsa_user_ports(ds));
1734 if (ret)
1735 return ret;
1736 }
1737
1738
1739 if (dsa_is_user_port(ds, i)) {
1740 ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(i),
1741 QCA8K_PORT_LOOKUP_MEMBER,
1742 BIT(cpu_port));
1743 if (ret)
1744 return ret;
1745
1746
1747 ret = regmap_set_bits(priv->regmap, QCA8K_PORT_LOOKUP_CTRL(i),
1748 QCA8K_PORT_LOOKUP_LEARN);
1749 if (ret)
1750 return ret;
1751
1752
1753
1754
1755 ret = qca8k_rmw(priv, QCA8K_EGRESS_VLAN(i),
1756 QCA8K_EGREES_VLAN_PORT_MASK(i),
1757 QCA8K_EGREES_VLAN_PORT(i, QCA8K_PORT_VID_DEF));
1758 if (ret)
1759 return ret;
1760
1761 ret = qca8k_write(priv, QCA8K_REG_PORT_VLAN_CTRL0(i),
1762 QCA8K_PORT_VLAN_CVID(QCA8K_PORT_VID_DEF) |
1763 QCA8K_PORT_VLAN_SVID(QCA8K_PORT_VID_DEF));
1764 if (ret)
1765 return ret;
1766 }
1767
1768
1769
1770
1771
1772
1773
1774 if (priv->switch_id == QCA8K_ID_QCA8337) {
1775 switch (i) {
1776
1777
1778
1779 case 0:
1780 case 5:
1781 case 6:
1782 mask = QCA8K_PORT_HOL_CTRL0_EG_PRI0(0x3) |
1783 QCA8K_PORT_HOL_CTRL0_EG_PRI1(0x4) |
1784 QCA8K_PORT_HOL_CTRL0_EG_PRI2(0x4) |
1785 QCA8K_PORT_HOL_CTRL0_EG_PRI3(0x4) |
1786 QCA8K_PORT_HOL_CTRL0_EG_PRI4(0x6) |
1787 QCA8K_PORT_HOL_CTRL0_EG_PRI5(0x8) |
1788 QCA8K_PORT_HOL_CTRL0_EG_PORT(0x1e);
1789 break;
1790 default:
1791 mask = QCA8K_PORT_HOL_CTRL0_EG_PRI0(0x3) |
1792 QCA8K_PORT_HOL_CTRL0_EG_PRI1(0x4) |
1793 QCA8K_PORT_HOL_CTRL0_EG_PRI2(0x6) |
1794 QCA8K_PORT_HOL_CTRL0_EG_PRI3(0x8) |
1795 QCA8K_PORT_HOL_CTRL0_EG_PORT(0x19);
1796 }
1797 qca8k_write(priv, QCA8K_REG_PORT_HOL_CTRL0(i), mask);
1798
1799 mask = QCA8K_PORT_HOL_CTRL1_ING(0x6) |
1800 QCA8K_PORT_HOL_CTRL1_EG_PRI_BUF_EN |
1801 QCA8K_PORT_HOL_CTRL1_EG_PORT_BUF_EN |
1802 QCA8K_PORT_HOL_CTRL1_WRED_EN;
1803 qca8k_rmw(priv, QCA8K_REG_PORT_HOL_CTRL1(i),
1804 QCA8K_PORT_HOL_CTRL1_ING_BUF_MASK |
1805 QCA8K_PORT_HOL_CTRL1_EG_PRI_BUF_EN |
1806 QCA8K_PORT_HOL_CTRL1_EG_PORT_BUF_EN |
1807 QCA8K_PORT_HOL_CTRL1_WRED_EN,
1808 mask);
1809 }
1810 }
1811
1812
1813 if (priv->switch_id == QCA8K_ID_QCA8327) {
1814 mask = QCA8K_GLOBAL_FC_GOL_XON_THRES(288) |
1815 QCA8K_GLOBAL_FC_GOL_XOFF_THRES(496);
1816 qca8k_rmw(priv, QCA8K_REG_GLOBAL_FC_THRESH,
1817 QCA8K_GLOBAL_FC_GOL_XON_THRES_MASK |
1818 QCA8K_GLOBAL_FC_GOL_XOFF_THRES_MASK,
1819 mask);
1820 }
1821
1822
1823 ret = qca8k_write(priv, QCA8K_MAX_FRAME_SIZE, ETH_FRAME_LEN + ETH_FCS_LEN);
1824 if (ret)
1825 dev_warn(priv->dev, "failed setting MTU settings");
1826
1827
1828 qca8k_fdb_flush(priv);
1829
1830
1831 ds->ageing_time_min = 7000;
1832 ds->ageing_time_max = 458745000;
1833
1834
1835 ds->num_lag_ids = QCA8K_NUM_LAGS;
1836
1837 return 0;
1838 }
1839
1840 static const struct dsa_switch_ops qca8k_switch_ops = {
1841 .get_tag_protocol = qca8k_get_tag_protocol,
1842 .setup = qca8k_setup,
1843 .get_strings = qca8k_get_strings,
1844 .get_ethtool_stats = qca8k_get_ethtool_stats,
1845 .get_sset_count = qca8k_get_sset_count,
1846 .set_ageing_time = qca8k_set_ageing_time,
1847 .get_mac_eee = qca8k_get_mac_eee,
1848 .set_mac_eee = qca8k_set_mac_eee,
1849 .port_enable = qca8k_port_enable,
1850 .port_disable = qca8k_port_disable,
1851 .port_change_mtu = qca8k_port_change_mtu,
1852 .port_max_mtu = qca8k_port_max_mtu,
1853 .port_stp_state_set = qca8k_port_stp_state_set,
1854 .port_bridge_join = qca8k_port_bridge_join,
1855 .port_bridge_leave = qca8k_port_bridge_leave,
1856 .port_fast_age = qca8k_port_fast_age,
1857 .port_fdb_add = qca8k_port_fdb_add,
1858 .port_fdb_del = qca8k_port_fdb_del,
1859 .port_fdb_dump = qca8k_port_fdb_dump,
1860 .port_mdb_add = qca8k_port_mdb_add,
1861 .port_mdb_del = qca8k_port_mdb_del,
1862 .port_mirror_add = qca8k_port_mirror_add,
1863 .port_mirror_del = qca8k_port_mirror_del,
1864 .port_vlan_filtering = qca8k_port_vlan_filtering,
1865 .port_vlan_add = qca8k_port_vlan_add,
1866 .port_vlan_del = qca8k_port_vlan_del,
1867 .phylink_get_caps = qca8k_phylink_get_caps,
1868 .phylink_mac_select_pcs = qca8k_phylink_mac_select_pcs,
1869 .phylink_mac_config = qca8k_phylink_mac_config,
1870 .phylink_mac_link_down = qca8k_phylink_mac_link_down,
1871 .phylink_mac_link_up = qca8k_phylink_mac_link_up,
1872 .get_phy_flags = qca8k_get_phy_flags,
1873 .port_lag_join = qca8k_port_lag_join,
1874 .port_lag_leave = qca8k_port_lag_leave,
1875 .master_state_change = qca8k_master_change,
1876 .connect_tag_protocol = qca8k_connect_tag_protocol,
1877 };
1878
1879 static int
1880 qca8k_sw_probe(struct mdio_device *mdiodev)
1881 {
1882 struct qca8k_priv *priv;
1883 int ret;
1884
1885
1886
1887
1888 priv = devm_kzalloc(&mdiodev->dev, sizeof(*priv), GFP_KERNEL);
1889 if (!priv)
1890 return -ENOMEM;
1891
1892 priv->bus = mdiodev->bus;
1893 priv->dev = &mdiodev->dev;
1894 priv->info = of_device_get_match_data(priv->dev);
1895
1896 priv->reset_gpio = devm_gpiod_get_optional(priv->dev, "reset",
1897 GPIOD_ASIS);
1898 if (IS_ERR(priv->reset_gpio))
1899 return PTR_ERR(priv->reset_gpio);
1900
1901 if (priv->reset_gpio) {
1902 gpiod_set_value_cansleep(priv->reset_gpio, 1);
1903
1904
1905
1906 msleep(20);
1907 gpiod_set_value_cansleep(priv->reset_gpio, 0);
1908 }
1909
1910
1911 priv->regmap = devm_regmap_init(&mdiodev->dev, NULL, priv,
1912 &qca8k_regmap_config);
1913 if (IS_ERR(priv->regmap)) {
1914 dev_err(priv->dev, "regmap initialization failed");
1915 return PTR_ERR(priv->regmap);
1916 }
1917
1918 priv->mdio_cache.page = 0xffff;
1919 priv->mdio_cache.lo = 0xffff;
1920 priv->mdio_cache.hi = 0xffff;
1921
1922
1923 ret = qca8k_read_switch_id(priv);
1924 if (ret)
1925 return ret;
1926
1927 priv->ds = devm_kzalloc(&mdiodev->dev, sizeof(*priv->ds), GFP_KERNEL);
1928 if (!priv->ds)
1929 return -ENOMEM;
1930
1931 mutex_init(&priv->mgmt_eth_data.mutex);
1932 init_completion(&priv->mgmt_eth_data.rw_done);
1933
1934 mutex_init(&priv->mib_eth_data.mutex);
1935 init_completion(&priv->mib_eth_data.rw_done);
1936
1937 priv->ds->dev = &mdiodev->dev;
1938 priv->ds->num_ports = QCA8K_NUM_PORTS;
1939 priv->ds->priv = priv;
1940 priv->ds->ops = &qca8k_switch_ops;
1941 mutex_init(&priv->reg_mutex);
1942 dev_set_drvdata(&mdiodev->dev, priv);
1943
1944 return dsa_register_switch(priv->ds);
1945 }
1946
1947 static void
1948 qca8k_sw_remove(struct mdio_device *mdiodev)
1949 {
1950 struct qca8k_priv *priv = dev_get_drvdata(&mdiodev->dev);
1951 int i;
1952
1953 if (!priv)
1954 return;
1955
1956 for (i = 0; i < QCA8K_NUM_PORTS; i++)
1957 qca8k_port_set_status(priv, i, 0);
1958
1959 dsa_unregister_switch(priv->ds);
1960
1961 dev_set_drvdata(&mdiodev->dev, NULL);
1962 }
1963
1964 static void qca8k_sw_shutdown(struct mdio_device *mdiodev)
1965 {
1966 struct qca8k_priv *priv = dev_get_drvdata(&mdiodev->dev);
1967
1968 if (!priv)
1969 return;
1970
1971 dsa_switch_shutdown(priv->ds);
1972
1973 dev_set_drvdata(&mdiodev->dev, NULL);
1974 }
1975
1976 #ifdef CONFIG_PM_SLEEP
1977 static void
1978 qca8k_set_pm(struct qca8k_priv *priv, int enable)
1979 {
1980 int port;
1981
1982 for (port = 0; port < QCA8K_NUM_PORTS; port++) {
1983
1984
1985
1986 if (!(priv->port_enabled_map & BIT(port)))
1987 continue;
1988
1989 qca8k_port_set_status(priv, port, enable);
1990 }
1991 }
1992
1993 static int qca8k_suspend(struct device *dev)
1994 {
1995 struct qca8k_priv *priv = dev_get_drvdata(dev);
1996
1997 qca8k_set_pm(priv, 0);
1998
1999 return dsa_switch_suspend(priv->ds);
2000 }
2001
2002 static int qca8k_resume(struct device *dev)
2003 {
2004 struct qca8k_priv *priv = dev_get_drvdata(dev);
2005
2006 qca8k_set_pm(priv, 1);
2007
2008 return dsa_switch_resume(priv->ds);
2009 }
2010 #endif
2011
2012 static SIMPLE_DEV_PM_OPS(qca8k_pm_ops,
2013 qca8k_suspend, qca8k_resume);
2014
2015 static const struct qca8k_info_ops qca8xxx_ops = {
2016 .autocast_mib = qca8k_get_ethtool_stats_eth,
2017 .read_eth = qca8k_read_eth,
2018 .write_eth = qca8k_write_eth,
2019 };
2020
2021 static const struct qca8k_match_data qca8327 = {
2022 .id = QCA8K_ID_QCA8327,
2023 .reduced_package = true,
2024 .mib_count = QCA8K_QCA832X_MIB_COUNT,
2025 .ops = &qca8xxx_ops,
2026 };
2027
2028 static const struct qca8k_match_data qca8328 = {
2029 .id = QCA8K_ID_QCA8327,
2030 .mib_count = QCA8K_QCA832X_MIB_COUNT,
2031 .ops = &qca8xxx_ops,
2032 };
2033
2034 static const struct qca8k_match_data qca833x = {
2035 .id = QCA8K_ID_QCA8337,
2036 .mib_count = QCA8K_QCA833X_MIB_COUNT,
2037 .ops = &qca8xxx_ops,
2038 };
2039
2040 static const struct of_device_id qca8k_of_match[] = {
2041 { .compatible = "qca,qca8327", .data = &qca8327 },
2042 { .compatible = "qca,qca8328", .data = &qca8328 },
2043 { .compatible = "qca,qca8334", .data = &qca833x },
2044 { .compatible = "qca,qca8337", .data = &qca833x },
2045 { },
2046 };
2047
2048 static struct mdio_driver qca8kmdio_driver = {
2049 .probe = qca8k_sw_probe,
2050 .remove = qca8k_sw_remove,
2051 .shutdown = qca8k_sw_shutdown,
2052 .mdiodrv.driver = {
2053 .name = "qca8k",
2054 .of_match_table = qca8k_of_match,
2055 .pm = &qca8k_pm_ops,
2056 },
2057 };
2058
2059 mdio_module_driver(qca8kmdio_driver);
2060
2061 MODULE_AUTHOR("Mathieu Olivari, John Crispin <john@phrozen.org>");
2062 MODULE_DESCRIPTION("Driver for QCA8K ethernet switch family");
2063 MODULE_LICENSE("GPL v2");
2064 MODULE_ALIAS("platform:qca8k");