0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034 #include <linux/file.h>
0035 #include "device.h"
0036 #include "card.h"
0037 #include "channel.h"
0038 #include "baseband.h"
0039 #include "mac.h"
0040 #include "power.h"
0041 #include "rxtx.h"
0042 #include "dpc.h"
0043 #include "rf.h"
0044 #include <linux/delay.h>
0045 #include <linux/kthread.h>
0046 #include <linux/slab.h>
0047
0048
0049
0050
0051
0052 MODULE_AUTHOR("VIA Networking Technologies, Inc., <lyndonchen@vntek.com.tw>");
0053 MODULE_LICENSE("GPL");
0054 MODULE_DESCRIPTION("VIA Networking Solomon-A/B/G Wireless LAN Adapter Driver");
0055
0056 #define DEVICE_PARAM(N, D)
0057
0058 #define RX_DESC_MIN0 16
0059 #define RX_DESC_MAX0 128
0060 #define RX_DESC_DEF0 32
0061 DEVICE_PARAM(RxDescriptors0, "Number of receive descriptors0");
0062
0063 #define RX_DESC_MIN1 16
0064 #define RX_DESC_MAX1 128
0065 #define RX_DESC_DEF1 32
0066 DEVICE_PARAM(RxDescriptors1, "Number of receive descriptors1");
0067
0068 #define TX_DESC_MIN0 16
0069 #define TX_DESC_MAX0 128
0070 #define TX_DESC_DEF0 32
0071 DEVICE_PARAM(TxDescriptors0, "Number of transmit descriptors0");
0072
0073 #define TX_DESC_MIN1 16
0074 #define TX_DESC_MAX1 128
0075 #define TX_DESC_DEF1 64
0076 DEVICE_PARAM(TxDescriptors1, "Number of transmit descriptors1");
0077
0078 #define INT_WORKS_DEF 20
0079 #define INT_WORKS_MIN 10
0080 #define INT_WORKS_MAX 64
0081
0082 DEVICE_PARAM(int_works, "Number of packets per interrupt services");
0083
0084 #define RTS_THRESH_DEF 2347
0085
0086 #define FRAG_THRESH_DEF 2346
0087
0088 #define SHORT_RETRY_MIN 0
0089 #define SHORT_RETRY_MAX 31
0090 #define SHORT_RETRY_DEF 8
0091
0092 DEVICE_PARAM(ShortRetryLimit, "Short frame retry limits");
0093
0094 #define LONG_RETRY_MIN 0
0095 #define LONG_RETRY_MAX 15
0096 #define LONG_RETRY_DEF 4
0097
0098 DEVICE_PARAM(LongRetryLimit, "long frame retry limits");
0099
0100
0101
0102
0103
0104
0105 #define BBP_TYPE_MIN 0
0106 #define BBP_TYPE_MAX 2
0107 #define BBP_TYPE_DEF 2
0108
0109 DEVICE_PARAM(BasebandType, "baseband type");
0110
0111
0112
0113
0114 static const struct pci_device_id vt6655_pci_id_table[] = {
0115 { PCI_VDEVICE(VIA, 0x3253) },
0116 { 0, }
0117 };
0118
0119
0120
0121 static int vt6655_probe(struct pci_dev *pcid, const struct pci_device_id *ent);
0122 static void device_free_info(struct vnt_private *priv);
0123 static void device_print_info(struct vnt_private *priv);
0124
0125 static void vt6655_mac_write_bssid_addr(void __iomem *iobase, const u8 *mac_addr);
0126 static void vt6655_mac_read_ether_addr(void __iomem *iobase, u8 *mac_addr);
0127
0128 static int device_init_rd0_ring(struct vnt_private *priv);
0129 static int device_init_rd1_ring(struct vnt_private *priv);
0130 static int device_init_td0_ring(struct vnt_private *priv);
0131 static int device_init_td1_ring(struct vnt_private *priv);
0132
0133 static int device_rx_srv(struct vnt_private *priv, unsigned int idx);
0134 static int device_tx_srv(struct vnt_private *priv, unsigned int idx);
0135 static bool device_alloc_rx_buf(struct vnt_private *, struct vnt_rx_desc *);
0136 static void device_free_rx_buf(struct vnt_private *priv,
0137 struct vnt_rx_desc *rd);
0138 static void device_init_registers(struct vnt_private *priv);
0139 static void device_free_tx_buf(struct vnt_private *, struct vnt_tx_desc *);
0140 static void device_free_td0_ring(struct vnt_private *priv);
0141 static void device_free_td1_ring(struct vnt_private *priv);
0142 static void device_free_rd0_ring(struct vnt_private *priv);
0143 static void device_free_rd1_ring(struct vnt_private *priv);
0144 static void device_free_rings(struct vnt_private *priv);
0145
0146
0147
0148
0149
0150 static void vt6655_remove(struct pci_dev *pcid)
0151 {
0152 struct vnt_private *priv = pci_get_drvdata(pcid);
0153
0154 if (!priv)
0155 return;
0156 device_free_info(priv);
0157 }
0158
0159 static void device_get_options(struct vnt_private *priv)
0160 {
0161 struct vnt_options *opts = &priv->opts;
0162
0163 opts->rx_descs0 = RX_DESC_DEF0;
0164 opts->rx_descs1 = RX_DESC_DEF1;
0165 opts->tx_descs[0] = TX_DESC_DEF0;
0166 opts->tx_descs[1] = TX_DESC_DEF1;
0167 opts->int_works = INT_WORKS_DEF;
0168
0169 opts->short_retry = SHORT_RETRY_DEF;
0170 opts->long_retry = LONG_RETRY_DEF;
0171 opts->bbp_type = BBP_TYPE_DEF;
0172 }
0173
0174 static void
0175 device_set_options(struct vnt_private *priv)
0176 {
0177 priv->byShortRetryLimit = priv->opts.short_retry;
0178 priv->byLongRetryLimit = priv->opts.long_retry;
0179 priv->byBBType = priv->opts.bbp_type;
0180 priv->byPacketType = priv->byBBType;
0181 priv->byAutoFBCtrl = AUTO_FB_0;
0182 priv->bUpdateBBVGA = true;
0183 priv->preamble_type = 0;
0184
0185 pr_debug(" byShortRetryLimit= %d\n", (int)priv->byShortRetryLimit);
0186 pr_debug(" byLongRetryLimit= %d\n", (int)priv->byLongRetryLimit);
0187 pr_debug(" preamble_type= %d\n", (int)priv->preamble_type);
0188 pr_debug(" byShortPreamble= %d\n", (int)priv->byShortPreamble);
0189 pr_debug(" byBBType= %d\n", (int)priv->byBBType);
0190 }
0191
0192 static void vt6655_mac_write_bssid_addr(void __iomem *iobase, const u8 *mac_addr)
0193 {
0194 iowrite8(1, iobase + MAC_REG_PAGE1SEL);
0195 for (int i = 0; i < 6; i++)
0196 iowrite8(mac_addr[i], iobase + MAC_REG_BSSID0 + i);
0197 iowrite8(0, iobase + MAC_REG_PAGE1SEL);
0198 }
0199
0200 static void vt6655_mac_read_ether_addr(void __iomem *iobase, u8 *mac_addr)
0201 {
0202 iowrite8(1, iobase + MAC_REG_PAGE1SEL);
0203 for (int i = 0; i < 6; i++)
0204 mac_addr[i] = ioread8(iobase + MAC_REG_PAR0 + i);
0205 iowrite8(0, iobase + MAC_REG_PAGE1SEL);
0206 }
0207
0208
0209
0210
0211
0212 static void device_init_registers(struct vnt_private *priv)
0213 {
0214 unsigned long flags;
0215 unsigned int ii;
0216 unsigned char byValue;
0217 unsigned char byCCKPwrdBm = 0;
0218 unsigned char byOFDMPwrdBm = 0;
0219
0220 MACbShutdown(priv);
0221 bb_software_reset(priv);
0222
0223
0224 MACbSoftwareReset(priv);
0225
0226 priv->bAES = false;
0227
0228
0229 priv->bProtectMode = false;
0230
0231 priv->bNonERPPresent = false;
0232 priv->bBarkerPreambleMd = false;
0233 priv->wCurrentRate = RATE_1M;
0234 priv->byTopOFDMBasicRate = RATE_24M;
0235 priv->byTopCCKBasicRate = RATE_1M;
0236
0237
0238 MACvInitialize(priv);
0239
0240
0241 priv->local_id = ioread8(priv->port_offset + MAC_REG_LOCALID);
0242
0243 spin_lock_irqsave(&priv->lock, flags);
0244
0245 SROMvReadAllContents(priv->port_offset, priv->abyEEPROM);
0246
0247 spin_unlock_irqrestore(&priv->lock, flags);
0248
0249
0250 priv->byMinChannel = 1;
0251 priv->byMaxChannel = CB_MAX_CHANNEL;
0252
0253
0254 byValue = SROMbyReadEmbedded(priv->port_offset, EEP_OFS_ANTENNA);
0255 if (byValue & EEP_ANTINV)
0256 priv->bTxRxAntInv = true;
0257 else
0258 priv->bTxRxAntInv = false;
0259
0260 byValue &= (EEP_ANTENNA_AUX | EEP_ANTENNA_MAIN);
0261
0262 if (byValue == 0)
0263 byValue = (EEP_ANTENNA_AUX | EEP_ANTENNA_MAIN);
0264
0265 if (byValue == (EEP_ANTENNA_AUX | EEP_ANTENNA_MAIN)) {
0266 priv->byAntennaCount = 2;
0267 priv->byTxAntennaMode = ANT_B;
0268 priv->dwTxAntennaSel = 1;
0269 priv->dwRxAntennaSel = 1;
0270
0271 if (priv->bTxRxAntInv)
0272 priv->byRxAntennaMode = ANT_A;
0273 else
0274 priv->byRxAntennaMode = ANT_B;
0275 } else {
0276 priv->byAntennaCount = 1;
0277 priv->dwTxAntennaSel = 0;
0278 priv->dwRxAntennaSel = 0;
0279
0280 if (byValue & EEP_ANTENNA_AUX) {
0281 priv->byTxAntennaMode = ANT_A;
0282
0283 if (priv->bTxRxAntInv)
0284 priv->byRxAntennaMode = ANT_B;
0285 else
0286 priv->byRxAntennaMode = ANT_A;
0287 } else {
0288 priv->byTxAntennaMode = ANT_B;
0289
0290 if (priv->bTxRxAntInv)
0291 priv->byRxAntennaMode = ANT_A;
0292 else
0293 priv->byRxAntennaMode = ANT_B;
0294 }
0295 }
0296
0297
0298 bb_set_tx_antenna_mode(priv, priv->byTxAntennaMode);
0299 bb_set_rx_antenna_mode(priv, priv->byRxAntennaMode);
0300
0301
0302 priv->byOriginalZonetype = priv->abyEEPROM[EEP_OFS_ZONETYPE];
0303
0304 if (!priv->bZoneRegExist)
0305 priv->byZoneType = priv->abyEEPROM[EEP_OFS_ZONETYPE];
0306
0307 pr_debug("priv->byZoneType = %x\n", priv->byZoneType);
0308
0309
0310 RFbInit(priv);
0311
0312
0313 priv->byCurPwr = 0xFF;
0314 priv->byCCKPwr = SROMbyReadEmbedded(priv->port_offset, EEP_OFS_PWR_CCK);
0315 priv->byOFDMPwrG = SROMbyReadEmbedded(priv->port_offset,
0316 EEP_OFS_PWR_OFDMG);
0317
0318
0319 for (ii = 0; ii < CB_MAX_CHANNEL_24G; ii++) {
0320 priv->abyCCKPwrTbl[ii + 1] =
0321 SROMbyReadEmbedded(priv->port_offset,
0322 (unsigned char)(ii + EEP_OFS_CCK_PWR_TBL));
0323 if (priv->abyCCKPwrTbl[ii + 1] == 0)
0324 priv->abyCCKPwrTbl[ii + 1] = priv->byCCKPwr;
0325
0326 priv->abyOFDMPwrTbl[ii + 1] =
0327 SROMbyReadEmbedded(priv->port_offset,
0328 (unsigned char)(ii + EEP_OFS_OFDM_PWR_TBL));
0329 if (priv->abyOFDMPwrTbl[ii + 1] == 0)
0330 priv->abyOFDMPwrTbl[ii + 1] = priv->byOFDMPwrG;
0331
0332 priv->abyCCKDefaultPwr[ii + 1] = byCCKPwrdBm;
0333 priv->abyOFDMDefaultPwr[ii + 1] = byOFDMPwrdBm;
0334 }
0335
0336
0337 for (ii = 11; ii < 14; ii++) {
0338 priv->abyCCKPwrTbl[ii] = priv->abyCCKPwrTbl[10];
0339 priv->abyOFDMPwrTbl[ii] = priv->abyOFDMPwrTbl[10];
0340 }
0341
0342
0343 for (ii = 0; ii < CB_MAX_CHANNEL_5G; ii++) {
0344 priv->abyOFDMPwrTbl[ii + CB_MAX_CHANNEL_24G + 1] =
0345 SROMbyReadEmbedded(priv->port_offset,
0346 (unsigned char)(ii + EEP_OFS_OFDMA_PWR_TBL));
0347
0348 priv->abyOFDMDefaultPwr[ii + CB_MAX_CHANNEL_24G + 1] =
0349 SROMbyReadEmbedded(priv->port_offset,
0350 (unsigned char)(ii + EEP_OFS_OFDMA_PWR_dBm));
0351 }
0352
0353 if (priv->local_id > REV_ID_VT3253_B1) {
0354 MACvSelectPage1(priv->port_offset);
0355
0356 iowrite8(MSRCTL1_TXPWR | MSRCTL1_CSAPAREN, priv->port_offset + MAC_REG_MSRCTL + 1);
0357
0358 MACvSelectPage0(priv->port_offset);
0359 }
0360
0361
0362 vt6655_mac_word_reg_bits_on(priv->port_offset, MAC_REG_CFG,
0363 (CFG_TKIPOPT | CFG_NOTXTIMEOUT));
0364
0365
0366 MACvSetShortRetryLimit(priv, priv->byShortRetryLimit);
0367 MACvSetLongRetryLimit(priv, priv->byLongRetryLimit);
0368
0369
0370 iowrite8(TFTCTL_TSFCNTRST, priv->port_offset + MAC_REG_TFTCTL);
0371
0372 iowrite8(TFTCTL_TSFCNTREN, priv->port_offset + MAC_REG_TFTCTL);
0373
0374
0375 bb_vt3253_init(priv);
0376
0377 if (priv->bUpdateBBVGA) {
0378 priv->byBBVGACurrent = priv->abyBBVGA[0];
0379 priv->byBBVGANew = priv->byBBVGACurrent;
0380 bb_set_vga_gain_offset(priv, priv->abyBBVGA[0]);
0381 }
0382
0383 bb_set_rx_antenna_mode(priv, priv->byRxAntennaMode);
0384 bb_set_tx_antenna_mode(priv, priv->byTxAntennaMode);
0385
0386
0387
0388 priv->wCurrentRate = RATE_54M;
0389
0390 priv->radio_off = false;
0391
0392 priv->byRadioCtl = SROMbyReadEmbedded(priv->port_offset,
0393 EEP_OFS_RADIOCTL);
0394 priv->hw_radio_off = false;
0395
0396 if (priv->byRadioCtl & EEP_RADIOCTL_ENABLE) {
0397
0398 priv->byGPIO = ioread8(priv->port_offset + MAC_REG_GPIOCTL1);
0399
0400 if (((priv->byGPIO & GPIO0_DATA) &&
0401 !(priv->byRadioCtl & EEP_RADIOCTL_INV)) ||
0402 (!(priv->byGPIO & GPIO0_DATA) &&
0403 (priv->byRadioCtl & EEP_RADIOCTL_INV)))
0404 priv->hw_radio_off = true;
0405 }
0406
0407 if (priv->hw_radio_off || priv->bRadioControlOff)
0408 CARDbRadioPowerOff(priv);
0409
0410
0411 SROMvReadEtherAddress(priv->port_offset, priv->abyCurrentNetAddr);
0412 pr_debug("Network address = %pM\n", priv->abyCurrentNetAddr);
0413
0414
0415 CARDvSafeResetRx(priv);
0416
0417 CARDvSafeResetTx(priv);
0418
0419 if (priv->local_id <= REV_ID_VT3253_A1)
0420 vt6655_mac_reg_bits_on(priv->port_offset, MAC_REG_RCR, RCR_WPAERR);
0421
0422
0423 MACvReceive0(priv->port_offset);
0424 MACvReceive1(priv->port_offset);
0425
0426
0427 iowrite8(HOSTCR_MACEN | HOSTCR_RXON | HOSTCR_TXON, priv->port_offset + MAC_REG_HOSTCR);
0428 }
0429
0430 static void device_print_info(struct vnt_private *priv)
0431 {
0432 dev_info(&priv->pcid->dev, "MAC=%pM IO=0x%lx Mem=0x%lx IRQ=%d\n",
0433 priv->abyCurrentNetAddr, (unsigned long)priv->ioaddr,
0434 (unsigned long)priv->port_offset, priv->pcid->irq);
0435 }
0436
0437 static void device_free_info(struct vnt_private *priv)
0438 {
0439 if (!priv)
0440 return;
0441
0442 if (priv->mac_hw)
0443 ieee80211_unregister_hw(priv->hw);
0444
0445 if (priv->port_offset)
0446 iounmap(priv->port_offset);
0447
0448 if (priv->pcid)
0449 pci_release_regions(priv->pcid);
0450
0451 if (priv->hw)
0452 ieee80211_free_hw(priv->hw);
0453 }
0454
0455 static bool device_init_rings(struct vnt_private *priv)
0456 {
0457 void *vir_pool;
0458
0459
0460 vir_pool = dma_alloc_coherent(&priv->pcid->dev,
0461 priv->opts.rx_descs0 * sizeof(struct vnt_rx_desc) +
0462 priv->opts.rx_descs1 * sizeof(struct vnt_rx_desc) +
0463 priv->opts.tx_descs[0] * sizeof(struct vnt_tx_desc) +
0464 priv->opts.tx_descs[1] * sizeof(struct vnt_tx_desc),
0465 &priv->pool_dma, GFP_ATOMIC);
0466 if (!vir_pool) {
0467 dev_err(&priv->pcid->dev, "allocate desc dma memory failed\n");
0468 return false;
0469 }
0470
0471 priv->aRD0Ring = vir_pool;
0472 priv->aRD1Ring = vir_pool +
0473 priv->opts.rx_descs0 * sizeof(struct vnt_rx_desc);
0474
0475 priv->rd0_pool_dma = priv->pool_dma;
0476 priv->rd1_pool_dma = priv->rd0_pool_dma +
0477 priv->opts.rx_descs0 * sizeof(struct vnt_rx_desc);
0478
0479 priv->tx0_bufs = dma_alloc_coherent(&priv->pcid->dev,
0480 priv->opts.tx_descs[0] * PKT_BUF_SZ +
0481 priv->opts.tx_descs[1] * PKT_BUF_SZ +
0482 CB_BEACON_BUF_SIZE +
0483 CB_MAX_BUF_SIZE,
0484 &priv->tx_bufs_dma0, GFP_ATOMIC);
0485 if (!priv->tx0_bufs) {
0486 dev_err(&priv->pcid->dev, "allocate buf dma memory failed\n");
0487
0488 dma_free_coherent(&priv->pcid->dev,
0489 priv->opts.rx_descs0 * sizeof(struct vnt_rx_desc) +
0490 priv->opts.rx_descs1 * sizeof(struct vnt_rx_desc) +
0491 priv->opts.tx_descs[0] * sizeof(struct vnt_tx_desc) +
0492 priv->opts.tx_descs[1] * sizeof(struct vnt_tx_desc),
0493 vir_pool, priv->pool_dma);
0494 return false;
0495 }
0496
0497 priv->td0_pool_dma = priv->rd1_pool_dma +
0498 priv->opts.rx_descs1 * sizeof(struct vnt_rx_desc);
0499
0500 priv->td1_pool_dma = priv->td0_pool_dma +
0501 priv->opts.tx_descs[0] * sizeof(struct vnt_tx_desc);
0502
0503
0504 priv->apTD0Rings = vir_pool
0505 + priv->opts.rx_descs0 * sizeof(struct vnt_rx_desc)
0506 + priv->opts.rx_descs1 * sizeof(struct vnt_rx_desc);
0507
0508 priv->apTD1Rings = vir_pool
0509 + priv->opts.rx_descs0 * sizeof(struct vnt_rx_desc)
0510 + priv->opts.rx_descs1 * sizeof(struct vnt_rx_desc)
0511 + priv->opts.tx_descs[0] * sizeof(struct vnt_tx_desc);
0512
0513 priv->tx1_bufs = priv->tx0_bufs +
0514 priv->opts.tx_descs[0] * PKT_BUF_SZ;
0515
0516 priv->tx_beacon_bufs = priv->tx1_bufs +
0517 priv->opts.tx_descs[1] * PKT_BUF_SZ;
0518
0519 priv->pbyTmpBuff = priv->tx_beacon_bufs +
0520 CB_BEACON_BUF_SIZE;
0521
0522 priv->tx_bufs_dma1 = priv->tx_bufs_dma0 +
0523 priv->opts.tx_descs[0] * PKT_BUF_SZ;
0524
0525 priv->tx_beacon_dma = priv->tx_bufs_dma1 +
0526 priv->opts.tx_descs[1] * PKT_BUF_SZ;
0527
0528 return true;
0529 }
0530
0531 static void device_free_rings(struct vnt_private *priv)
0532 {
0533 dma_free_coherent(&priv->pcid->dev,
0534 priv->opts.rx_descs0 * sizeof(struct vnt_rx_desc) +
0535 priv->opts.rx_descs1 * sizeof(struct vnt_rx_desc) +
0536 priv->opts.tx_descs[0] * sizeof(struct vnt_tx_desc) +
0537 priv->opts.tx_descs[1] * sizeof(struct vnt_tx_desc),
0538 priv->aRD0Ring, priv->pool_dma);
0539
0540 if (priv->tx0_bufs)
0541 dma_free_coherent(&priv->pcid->dev,
0542 priv->opts.tx_descs[0] * PKT_BUF_SZ +
0543 priv->opts.tx_descs[1] * PKT_BUF_SZ +
0544 CB_BEACON_BUF_SIZE +
0545 CB_MAX_BUF_SIZE,
0546 priv->tx0_bufs, priv->tx_bufs_dma0);
0547 }
0548
0549 static int device_init_rd0_ring(struct vnt_private *priv)
0550 {
0551 int i;
0552 dma_addr_t curr = priv->rd0_pool_dma;
0553 struct vnt_rx_desc *desc;
0554 int ret;
0555
0556
0557 for (i = 0; i < priv->opts.rx_descs0;
0558 i ++, curr += sizeof(struct vnt_rx_desc)) {
0559 desc = &priv->aRD0Ring[i];
0560 desc->rd_info = kzalloc(sizeof(*desc->rd_info), GFP_KERNEL);
0561 if (!desc->rd_info) {
0562 ret = -ENOMEM;
0563 goto err_free_desc;
0564 }
0565
0566 if (!device_alloc_rx_buf(priv, desc)) {
0567 dev_err(&priv->pcid->dev, "can not alloc rx bufs\n");
0568 ret = -ENOMEM;
0569 goto err_free_rd;
0570 }
0571
0572 desc->next = &priv->aRD0Ring[(i + 1) % priv->opts.rx_descs0];
0573 desc->next_desc = cpu_to_le32(curr + sizeof(struct vnt_rx_desc));
0574 }
0575
0576 if (i > 0)
0577 priv->aRD0Ring[i - 1].next_desc = cpu_to_le32(priv->rd0_pool_dma);
0578 priv->pCurrRD[0] = &priv->aRD0Ring[0];
0579
0580 return 0;
0581
0582 err_free_rd:
0583 kfree(desc->rd_info);
0584
0585 err_free_desc:
0586 while (--i) {
0587 desc = &priv->aRD0Ring[i];
0588 device_free_rx_buf(priv, desc);
0589 kfree(desc->rd_info);
0590 }
0591
0592 return ret;
0593 }
0594
0595 static int device_init_rd1_ring(struct vnt_private *priv)
0596 {
0597 int i;
0598 dma_addr_t curr = priv->rd1_pool_dma;
0599 struct vnt_rx_desc *desc;
0600 int ret;
0601
0602
0603 for (i = 0; i < priv->opts.rx_descs1;
0604 i ++, curr += sizeof(struct vnt_rx_desc)) {
0605 desc = &priv->aRD1Ring[i];
0606 desc->rd_info = kzalloc(sizeof(*desc->rd_info), GFP_KERNEL);
0607 if (!desc->rd_info) {
0608 ret = -ENOMEM;
0609 goto err_free_desc;
0610 }
0611
0612 if (!device_alloc_rx_buf(priv, desc)) {
0613 dev_err(&priv->pcid->dev, "can not alloc rx bufs\n");
0614 ret = -ENOMEM;
0615 goto err_free_rd;
0616 }
0617
0618 desc->next = &priv->aRD1Ring[(i + 1) % priv->opts.rx_descs1];
0619 desc->next_desc = cpu_to_le32(curr + sizeof(struct vnt_rx_desc));
0620 }
0621
0622 if (i > 0)
0623 priv->aRD1Ring[i - 1].next_desc = cpu_to_le32(priv->rd1_pool_dma);
0624 priv->pCurrRD[1] = &priv->aRD1Ring[0];
0625
0626 return 0;
0627
0628 err_free_rd:
0629 kfree(desc->rd_info);
0630
0631 err_free_desc:
0632 while (--i) {
0633 desc = &priv->aRD1Ring[i];
0634 device_free_rx_buf(priv, desc);
0635 kfree(desc->rd_info);
0636 }
0637
0638 return ret;
0639 }
0640
0641 static void device_free_rd0_ring(struct vnt_private *priv)
0642 {
0643 int i;
0644
0645 for (i = 0; i < priv->opts.rx_descs0; i++) {
0646 struct vnt_rx_desc *desc = &priv->aRD0Ring[i];
0647
0648 device_free_rx_buf(priv, desc);
0649 kfree(desc->rd_info);
0650 }
0651 }
0652
0653 static void device_free_rd1_ring(struct vnt_private *priv)
0654 {
0655 int i;
0656
0657 for (i = 0; i < priv->opts.rx_descs1; i++) {
0658 struct vnt_rx_desc *desc = &priv->aRD1Ring[i];
0659
0660 device_free_rx_buf(priv, desc);
0661 kfree(desc->rd_info);
0662 }
0663 }
0664
0665 static int device_init_td0_ring(struct vnt_private *priv)
0666 {
0667 int i;
0668 dma_addr_t curr;
0669 struct vnt_tx_desc *desc;
0670 int ret;
0671
0672 curr = priv->td0_pool_dma;
0673 for (i = 0; i < priv->opts.tx_descs[0];
0674 i++, curr += sizeof(struct vnt_tx_desc)) {
0675 desc = &priv->apTD0Rings[i];
0676 desc->td_info = kzalloc(sizeof(*desc->td_info), GFP_KERNEL);
0677 if (!desc->td_info) {
0678 ret = -ENOMEM;
0679 goto err_free_desc;
0680 }
0681
0682 desc->td_info->buf = priv->tx0_bufs + i * PKT_BUF_SZ;
0683 desc->td_info->buf_dma = priv->tx_bufs_dma0 + i * PKT_BUF_SZ;
0684
0685 desc->next = &(priv->apTD0Rings[(i + 1) % priv->opts.tx_descs[0]]);
0686 desc->next_desc = cpu_to_le32(curr +
0687 sizeof(struct vnt_tx_desc));
0688 }
0689
0690 if (i > 0)
0691 priv->apTD0Rings[i - 1].next_desc = cpu_to_le32(priv->td0_pool_dma);
0692 priv->apTailTD[0] = priv->apCurrTD[0] = &priv->apTD0Rings[0];
0693
0694 return 0;
0695
0696 err_free_desc:
0697 while (--i) {
0698 desc = &priv->apTD0Rings[i];
0699 kfree(desc->td_info);
0700 }
0701
0702 return ret;
0703 }
0704
0705 static int device_init_td1_ring(struct vnt_private *priv)
0706 {
0707 int i;
0708 dma_addr_t curr;
0709 struct vnt_tx_desc *desc;
0710 int ret;
0711
0712
0713 curr = priv->td1_pool_dma;
0714 for (i = 0; i < priv->opts.tx_descs[1];
0715 i++, curr += sizeof(struct vnt_tx_desc)) {
0716 desc = &priv->apTD1Rings[i];
0717 desc->td_info = kzalloc(sizeof(*desc->td_info), GFP_KERNEL);
0718 if (!desc->td_info) {
0719 ret = -ENOMEM;
0720 goto err_free_desc;
0721 }
0722
0723 desc->td_info->buf = priv->tx1_bufs + i * PKT_BUF_SZ;
0724 desc->td_info->buf_dma = priv->tx_bufs_dma1 + i * PKT_BUF_SZ;
0725
0726 desc->next = &(priv->apTD1Rings[(i + 1) % priv->opts.tx_descs[1]]);
0727 desc->next_desc = cpu_to_le32(curr + sizeof(struct vnt_tx_desc));
0728 }
0729
0730 if (i > 0)
0731 priv->apTD1Rings[i - 1].next_desc = cpu_to_le32(priv->td1_pool_dma);
0732 priv->apTailTD[1] = priv->apCurrTD[1] = &priv->apTD1Rings[0];
0733
0734 return 0;
0735
0736 err_free_desc:
0737 while (--i) {
0738 desc = &priv->apTD1Rings[i];
0739 kfree(desc->td_info);
0740 }
0741
0742 return ret;
0743 }
0744
0745 static void device_free_td0_ring(struct vnt_private *priv)
0746 {
0747 int i;
0748
0749 for (i = 0; i < priv->opts.tx_descs[0]; i++) {
0750 struct vnt_tx_desc *desc = &priv->apTD0Rings[i];
0751 struct vnt_td_info *td_info = desc->td_info;
0752
0753 dev_kfree_skb(td_info->skb);
0754 kfree(desc->td_info);
0755 }
0756 }
0757
0758 static void device_free_td1_ring(struct vnt_private *priv)
0759 {
0760 int i;
0761
0762 for (i = 0; i < priv->opts.tx_descs[1]; i++) {
0763 struct vnt_tx_desc *desc = &priv->apTD1Rings[i];
0764 struct vnt_td_info *td_info = desc->td_info;
0765
0766 dev_kfree_skb(td_info->skb);
0767 kfree(desc->td_info);
0768 }
0769 }
0770
0771
0772
0773 static int device_rx_srv(struct vnt_private *priv, unsigned int idx)
0774 {
0775 struct vnt_rx_desc *rd;
0776 int works = 0;
0777
0778 for (rd = priv->pCurrRD[idx];
0779 rd->rd0.owner == OWNED_BY_HOST;
0780 rd = rd->next) {
0781 if (works++ > 15)
0782 break;
0783
0784 if (!rd->rd_info->skb)
0785 break;
0786
0787 if (vnt_receive_frame(priv, rd)) {
0788 if (!device_alloc_rx_buf(priv, rd)) {
0789 dev_err(&priv->pcid->dev,
0790 "can not allocate rx buf\n");
0791 break;
0792 }
0793 }
0794 rd->rd0.owner = OWNED_BY_NIC;
0795 }
0796
0797 priv->pCurrRD[idx] = rd;
0798
0799 return works;
0800 }
0801
0802 static bool device_alloc_rx_buf(struct vnt_private *priv,
0803 struct vnt_rx_desc *rd)
0804 {
0805 struct vnt_rd_info *rd_info = rd->rd_info;
0806
0807 rd_info->skb = dev_alloc_skb((int)priv->rx_buf_sz);
0808 if (!rd_info->skb)
0809 return false;
0810
0811 rd_info->skb_dma =
0812 dma_map_single(&priv->pcid->dev,
0813 skb_put(rd_info->skb, skb_tailroom(rd_info->skb)),
0814 priv->rx_buf_sz, DMA_FROM_DEVICE);
0815 if (dma_mapping_error(&priv->pcid->dev, rd_info->skb_dma)) {
0816 dev_kfree_skb(rd_info->skb);
0817 rd_info->skb = NULL;
0818 return false;
0819 }
0820
0821 *((unsigned int *)&rd->rd0) = 0;
0822
0823 rd->rd0.res_count = cpu_to_le16(priv->rx_buf_sz);
0824 rd->rd0.owner = OWNED_BY_NIC;
0825 rd->rd1.req_count = cpu_to_le16(priv->rx_buf_sz);
0826 rd->buff_addr = cpu_to_le32(rd_info->skb_dma);
0827
0828 return true;
0829 }
0830
0831 static void device_free_rx_buf(struct vnt_private *priv,
0832 struct vnt_rx_desc *rd)
0833 {
0834 struct vnt_rd_info *rd_info = rd->rd_info;
0835
0836 dma_unmap_single(&priv->pcid->dev, rd_info->skb_dma,
0837 priv->rx_buf_sz, DMA_FROM_DEVICE);
0838 dev_kfree_skb(rd_info->skb);
0839 }
0840
0841 static const u8 fallback_rate0[5][5] = {
0842 {RATE_18M, RATE_18M, RATE_12M, RATE_12M, RATE_12M},
0843 {RATE_24M, RATE_24M, RATE_18M, RATE_12M, RATE_12M},
0844 {RATE_36M, RATE_36M, RATE_24M, RATE_18M, RATE_18M},
0845 {RATE_48M, RATE_48M, RATE_36M, RATE_24M, RATE_24M},
0846 {RATE_54M, RATE_54M, RATE_48M, RATE_36M, RATE_36M}
0847 };
0848
0849 static const u8 fallback_rate1[5][5] = {
0850 {RATE_18M, RATE_18M, RATE_12M, RATE_6M, RATE_6M},
0851 {RATE_24M, RATE_24M, RATE_18M, RATE_6M, RATE_6M},
0852 {RATE_36M, RATE_36M, RATE_24M, RATE_12M, RATE_12M},
0853 {RATE_48M, RATE_48M, RATE_24M, RATE_12M, RATE_12M},
0854 {RATE_54M, RATE_54M, RATE_36M, RATE_18M, RATE_18M}
0855 };
0856
0857 static int vnt_int_report_rate(struct vnt_private *priv,
0858 struct vnt_td_info *context, u8 tsr0, u8 tsr1)
0859 {
0860 struct vnt_tx_fifo_head *fifo_head;
0861 struct ieee80211_tx_info *info;
0862 struct ieee80211_rate *rate;
0863 u16 fb_option;
0864 u8 tx_retry = (tsr0 & TSR0_NCR);
0865 s8 idx;
0866
0867 if (!context)
0868 return -ENOMEM;
0869
0870 if (!context->skb)
0871 return -EINVAL;
0872
0873 fifo_head = (struct vnt_tx_fifo_head *)context->buf;
0874 fb_option = (le16_to_cpu(fifo_head->fifo_ctl) &
0875 (FIFOCTL_AUTO_FB_0 | FIFOCTL_AUTO_FB_1));
0876
0877 info = IEEE80211_SKB_CB(context->skb);
0878 idx = info->control.rates[0].idx;
0879
0880 if (fb_option && !(tsr1 & TSR1_TERR)) {
0881 u8 tx_rate;
0882 u8 retry = tx_retry;
0883
0884 rate = ieee80211_get_tx_rate(priv->hw, info);
0885 tx_rate = rate->hw_value - RATE_18M;
0886
0887 if (retry > 4)
0888 retry = 4;
0889
0890 if (fb_option & FIFOCTL_AUTO_FB_0)
0891 tx_rate = fallback_rate0[tx_rate][retry];
0892 else if (fb_option & FIFOCTL_AUTO_FB_1)
0893 tx_rate = fallback_rate1[tx_rate][retry];
0894
0895 if (info->band == NL80211_BAND_5GHZ)
0896 idx = tx_rate - RATE_6M;
0897 else
0898 idx = tx_rate;
0899 }
0900
0901 ieee80211_tx_info_clear_status(info);
0902
0903 info->status.rates[0].count = tx_retry;
0904
0905 if (!(tsr1 & TSR1_TERR)) {
0906 info->status.rates[0].idx = idx;
0907
0908 if (info->flags & IEEE80211_TX_CTL_NO_ACK)
0909 info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED;
0910 else
0911 info->flags |= IEEE80211_TX_STAT_ACK;
0912 }
0913
0914 return 0;
0915 }
0916
0917 static int device_tx_srv(struct vnt_private *priv, unsigned int idx)
0918 {
0919 struct vnt_tx_desc *desc;
0920 int works = 0;
0921 unsigned char byTsr0;
0922 unsigned char byTsr1;
0923
0924 for (desc = priv->apTailTD[idx]; priv->iTDUsed[idx] > 0; desc = desc->next) {
0925 if (desc->td0.owner == OWNED_BY_NIC)
0926 break;
0927 if (works++ > 15)
0928 break;
0929
0930 byTsr0 = desc->td0.tsr0;
0931 byTsr1 = desc->td0.tsr1;
0932
0933
0934 if (desc->td1.tcr & TCR_STP) {
0935 if ((desc->td_info->flags & TD_FLAGS_NETIF_SKB) != 0) {
0936 if (!(byTsr1 & TSR1_TERR)) {
0937 if (byTsr0 != 0) {
0938 pr_debug(" Tx[%d] OK but has error. tsr1[%02X] tsr0[%02X]\n",
0939 (int)idx, byTsr1,
0940 byTsr0);
0941 }
0942 } else {
0943 pr_debug(" Tx[%d] dropped & tsr1[%02X] tsr0[%02X]\n",
0944 (int)idx, byTsr1, byTsr0);
0945 }
0946 }
0947
0948 if (byTsr1 & TSR1_TERR) {
0949 if ((desc->td_info->flags & TD_FLAGS_PRIV_SKB) != 0) {
0950 pr_debug(" Tx[%d] fail has error. tsr1[%02X] tsr0[%02X]\n",
0951 (int)idx, byTsr1, byTsr0);
0952 }
0953 }
0954
0955 vnt_int_report_rate(priv, desc->td_info, byTsr0, byTsr1);
0956
0957 device_free_tx_buf(priv, desc);
0958 priv->iTDUsed[idx]--;
0959 }
0960 }
0961
0962 priv->apTailTD[idx] = desc;
0963
0964 return works;
0965 }
0966
0967 static void device_error(struct vnt_private *priv, unsigned short status)
0968 {
0969 if (status & ISR_FETALERR) {
0970 dev_err(&priv->pcid->dev, "Hardware fatal error\n");
0971
0972 MACbShutdown(priv);
0973 return;
0974 }
0975 }
0976
0977 static void device_free_tx_buf(struct vnt_private *priv,
0978 struct vnt_tx_desc *desc)
0979 {
0980 struct vnt_td_info *td_info = desc->td_info;
0981 struct sk_buff *skb = td_info->skb;
0982
0983 if (skb)
0984 ieee80211_tx_status_irqsafe(priv->hw, skb);
0985
0986 td_info->skb = NULL;
0987 td_info->flags = 0;
0988 }
0989
0990 static void vnt_check_bb_vga(struct vnt_private *priv)
0991 {
0992 long dbm;
0993 int i;
0994
0995 if (!priv->bUpdateBBVGA)
0996 return;
0997
0998 if (priv->hw->conf.flags & IEEE80211_CONF_OFFCHANNEL)
0999 return;
1000
1001 if (!(priv->vif->cfg.assoc && priv->current_rssi))
1002 return;
1003
1004 RFvRSSITodBm(priv, (u8)priv->current_rssi, &dbm);
1005
1006 for (i = 0; i < BB_VGA_LEVEL; i++) {
1007 if (dbm < priv->dbm_threshold[i]) {
1008 priv->byBBVGANew = priv->abyBBVGA[i];
1009 break;
1010 }
1011 }
1012
1013 if (priv->byBBVGANew == priv->byBBVGACurrent) {
1014 priv->uBBVGADiffCount = 1;
1015 return;
1016 }
1017
1018 priv->uBBVGADiffCount++;
1019
1020 if (priv->uBBVGADiffCount == 1) {
1021
1022 bb_set_vga_gain_offset(priv, priv->byBBVGANew);
1023
1024 dev_dbg(&priv->pcid->dev,
1025 "First RSSI[%d] NewGain[%d] OldGain[%d] Count[%d]\n",
1026 (int)dbm, priv->byBBVGANew,
1027 priv->byBBVGACurrent,
1028 (int)priv->uBBVGADiffCount);
1029 }
1030
1031 if (priv->uBBVGADiffCount >= BB_VGA_CHANGE_THRESHOLD) {
1032 dev_dbg(&priv->pcid->dev,
1033 "RSSI[%d] NewGain[%d] OldGain[%d] Count[%d]\n",
1034 (int)dbm, priv->byBBVGANew,
1035 priv->byBBVGACurrent,
1036 (int)priv->uBBVGADiffCount);
1037
1038 bb_set_vga_gain_offset(priv, priv->byBBVGANew);
1039 }
1040 }
1041
1042 static void vnt_interrupt_process(struct vnt_private *priv)
1043 {
1044 struct ieee80211_low_level_stats *low_stats = &priv->low_stats;
1045 int max_count = 0;
1046 u32 mib_counter;
1047 u32 isr;
1048 unsigned long flags;
1049
1050 isr = ioread32(priv->port_offset + MAC_REG_ISR);
1051
1052 if (isr == 0)
1053 return;
1054
1055 if (isr == 0xffffffff) {
1056 pr_debug("isr = 0xffff\n");
1057 return;
1058 }
1059
1060 spin_lock_irqsave(&priv->lock, flags);
1061
1062
1063 mib_counter = ioread32(priv->port_offset + MAC_REG_MIBCNTR);
1064
1065 low_stats->dot11RTSSuccessCount += mib_counter & 0xff;
1066 low_stats->dot11RTSFailureCount += (mib_counter >> 8) & 0xff;
1067 low_stats->dot11ACKFailureCount += (mib_counter >> 16) & 0xff;
1068 low_stats->dot11FCSErrorCount += (mib_counter >> 24) & 0xff;
1069
1070
1071
1072
1073
1074
1075
1076 while (isr && priv->vif) {
1077 iowrite32(isr, priv->port_offset + MAC_REG_ISR);
1078
1079 if (isr & ISR_FETALERR) {
1080 pr_debug(" ISR_FETALERR\n");
1081 iowrite8(0, priv->port_offset + MAC_REG_SOFTPWRCTL);
1082 iowrite16(SOFTPWRCTL_SWPECTI, priv->port_offset + MAC_REG_SOFTPWRCTL);
1083 device_error(priv, isr);
1084 }
1085
1086 if (isr & ISR_TBTT) {
1087 if (priv->op_mode != NL80211_IFTYPE_ADHOC)
1088 vnt_check_bb_vga(priv);
1089
1090 priv->bBeaconSent = false;
1091 if (priv->bEnablePSMode)
1092 PSbIsNextTBTTWakeUp((void *)priv);
1093
1094 if ((priv->op_mode == NL80211_IFTYPE_AP ||
1095 priv->op_mode == NL80211_IFTYPE_ADHOC) &&
1096 priv->vif->bss_conf.enable_beacon)
1097 MACvOneShotTimer1MicroSec(priv,
1098 (priv->vif->bss_conf.beacon_int -
1099 MAKE_BEACON_RESERVED) << 10);
1100
1101
1102 }
1103
1104 if (isr & ISR_BNTX) {
1105 if (priv->op_mode == NL80211_IFTYPE_ADHOC) {
1106 priv->bIsBeaconBufReadySet = false;
1107 priv->cbBeaconBufReadySetCnt = 0;
1108 }
1109
1110 priv->bBeaconSent = true;
1111 }
1112
1113 if (isr & ISR_RXDMA0)
1114 max_count += device_rx_srv(priv, TYPE_RXDMA0);
1115
1116 if (isr & ISR_RXDMA1)
1117 max_count += device_rx_srv(priv, TYPE_RXDMA1);
1118
1119 if (isr & ISR_TXDMA0)
1120 max_count += device_tx_srv(priv, TYPE_TXDMA0);
1121
1122 if (isr & ISR_AC0DMA)
1123 max_count += device_tx_srv(priv, TYPE_AC0DMA);
1124
1125 if (isr & ISR_SOFTTIMER1) {
1126 if (priv->vif->bss_conf.enable_beacon)
1127 vnt_beacon_make(priv, priv->vif);
1128 }
1129
1130
1131 if (AVAIL_TD(priv, TYPE_TXDMA0) &&
1132 AVAIL_TD(priv, TYPE_AC0DMA) &&
1133 ieee80211_queue_stopped(priv->hw, 0))
1134 ieee80211_wake_queues(priv->hw);
1135
1136 isr = ioread32(priv->port_offset + MAC_REG_ISR);
1137
1138 MACvReceive0(priv->port_offset);
1139 MACvReceive1(priv->port_offset);
1140
1141 if (max_count > priv->opts.int_works)
1142 break;
1143 }
1144
1145 spin_unlock_irqrestore(&priv->lock, flags);
1146 }
1147
1148 static void vnt_interrupt_work(struct work_struct *work)
1149 {
1150 struct vnt_private *priv =
1151 container_of(work, struct vnt_private, interrupt_work);
1152
1153 if (priv->vif)
1154 vnt_interrupt_process(priv);
1155
1156 iowrite32(IMR_MASK_VALUE, priv->port_offset + MAC_REG_IMR);
1157 }
1158
1159 static irqreturn_t vnt_interrupt(int irq, void *arg)
1160 {
1161 struct vnt_private *priv = arg;
1162
1163 schedule_work(&priv->interrupt_work);
1164
1165 iowrite32(0, priv->port_offset + MAC_REG_IMR);
1166
1167 return IRQ_HANDLED;
1168 }
1169
1170 static int vnt_tx_packet(struct vnt_private *priv, struct sk_buff *skb)
1171 {
1172 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1173 struct vnt_tx_desc *head_td;
1174 u32 dma_idx;
1175 unsigned long flags;
1176
1177 spin_lock_irqsave(&priv->lock, flags);
1178
1179 if (ieee80211_is_data(hdr->frame_control))
1180 dma_idx = TYPE_AC0DMA;
1181 else
1182 dma_idx = TYPE_TXDMA0;
1183
1184 if (AVAIL_TD(priv, dma_idx) < 1) {
1185 spin_unlock_irqrestore(&priv->lock, flags);
1186 ieee80211_stop_queues(priv->hw);
1187 return -ENOMEM;
1188 }
1189
1190 head_td = priv->apCurrTD[dma_idx];
1191
1192 head_td->td1.tcr = 0;
1193
1194 head_td->td_info->skb = skb;
1195
1196 if (dma_idx == TYPE_AC0DMA)
1197 head_td->td_info->flags = TD_FLAGS_NETIF_SKB;
1198
1199 priv->apCurrTD[dma_idx] = head_td->next;
1200
1201 spin_unlock_irqrestore(&priv->lock, flags);
1202
1203 vnt_generate_fifo_header(priv, dma_idx, head_td, skb);
1204
1205 spin_lock_irqsave(&priv->lock, flags);
1206
1207 priv->bPWBitOn = false;
1208
1209
1210 head_td->td1.tcr |= (TCR_STP | TCR_EDP | EDMSDU);
1211 head_td->td1.req_count = cpu_to_le16(head_td->td_info->req_count);
1212
1213 head_td->buff_addr = cpu_to_le32(head_td->td_info->buf_dma);
1214
1215
1216 wmb();
1217 head_td->td0.owner = OWNED_BY_NIC;
1218 wmb();
1219
1220 if (head_td->td_info->flags & TD_FLAGS_NETIF_SKB)
1221 MACvTransmitAC0(priv->port_offset);
1222 else
1223 MACvTransmit0(priv->port_offset);
1224
1225 priv->iTDUsed[dma_idx]++;
1226
1227 spin_unlock_irqrestore(&priv->lock, flags);
1228
1229 return 0;
1230 }
1231
1232 static void vnt_tx_80211(struct ieee80211_hw *hw,
1233 struct ieee80211_tx_control *control,
1234 struct sk_buff *skb)
1235 {
1236 struct vnt_private *priv = hw->priv;
1237
1238 if (vnt_tx_packet(priv, skb))
1239 ieee80211_free_txskb(hw, skb);
1240 }
1241
1242 static int vnt_start(struct ieee80211_hw *hw)
1243 {
1244 struct vnt_private *priv = hw->priv;
1245 int ret;
1246
1247 priv->rx_buf_sz = PKT_BUF_SZ;
1248 if (!device_init_rings(priv))
1249 return -ENOMEM;
1250
1251 ret = request_irq(priv->pcid->irq, vnt_interrupt,
1252 IRQF_SHARED, "vt6655", priv);
1253 if (ret) {
1254 dev_dbg(&priv->pcid->dev, "failed to start irq\n");
1255 goto err_free_rings;
1256 }
1257
1258 dev_dbg(&priv->pcid->dev, "call device init rd0 ring\n");
1259 ret = device_init_rd0_ring(priv);
1260 if (ret)
1261 goto err_free_irq;
1262 ret = device_init_rd1_ring(priv);
1263 if (ret)
1264 goto err_free_rd0_ring;
1265 ret = device_init_td0_ring(priv);
1266 if (ret)
1267 goto err_free_rd1_ring;
1268 ret = device_init_td1_ring(priv);
1269 if (ret)
1270 goto err_free_td0_ring;
1271
1272 device_init_registers(priv);
1273
1274 dev_dbg(&priv->pcid->dev, "enable MAC interrupt\n");
1275 iowrite32(IMR_MASK_VALUE, priv->port_offset + MAC_REG_IMR);
1276
1277 ieee80211_wake_queues(hw);
1278
1279 return 0;
1280
1281 err_free_td0_ring:
1282 device_free_td0_ring(priv);
1283 err_free_rd1_ring:
1284 device_free_rd1_ring(priv);
1285 err_free_rd0_ring:
1286 device_free_rd0_ring(priv);
1287 err_free_irq:
1288 free_irq(priv->pcid->irq, priv);
1289 err_free_rings:
1290 device_free_rings(priv);
1291 return ret;
1292 }
1293
1294 static void vnt_stop(struct ieee80211_hw *hw)
1295 {
1296 struct vnt_private *priv = hw->priv;
1297
1298 ieee80211_stop_queues(hw);
1299
1300 cancel_work_sync(&priv->interrupt_work);
1301
1302 MACbShutdown(priv);
1303 MACbSoftwareReset(priv);
1304 CARDbRadioPowerOff(priv);
1305
1306 device_free_td0_ring(priv);
1307 device_free_td1_ring(priv);
1308 device_free_rd0_ring(priv);
1309 device_free_rd1_ring(priv);
1310 device_free_rings(priv);
1311
1312 free_irq(priv->pcid->irq, priv);
1313 }
1314
1315 static int vnt_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
1316 {
1317 struct vnt_private *priv = hw->priv;
1318
1319 priv->vif = vif;
1320
1321 switch (vif->type) {
1322 case NL80211_IFTYPE_STATION:
1323 break;
1324 case NL80211_IFTYPE_ADHOC:
1325 vt6655_mac_reg_bits_off(priv->port_offset, MAC_REG_RCR, RCR_UNICAST);
1326
1327 vt6655_mac_reg_bits_on(priv->port_offset, MAC_REG_HOSTCR, HOSTCR_ADHOC);
1328
1329 break;
1330 case NL80211_IFTYPE_AP:
1331 vt6655_mac_reg_bits_off(priv->port_offset, MAC_REG_RCR, RCR_UNICAST);
1332
1333 vt6655_mac_reg_bits_on(priv->port_offset, MAC_REG_HOSTCR, HOSTCR_AP);
1334
1335 break;
1336 default:
1337 return -EOPNOTSUPP;
1338 }
1339
1340 priv->op_mode = vif->type;
1341
1342 return 0;
1343 }
1344
1345 static void vnt_remove_interface(struct ieee80211_hw *hw,
1346 struct ieee80211_vif *vif)
1347 {
1348 struct vnt_private *priv = hw->priv;
1349
1350 switch (vif->type) {
1351 case NL80211_IFTYPE_STATION:
1352 break;
1353 case NL80211_IFTYPE_ADHOC:
1354 vt6655_mac_reg_bits_off(priv->port_offset, MAC_REG_TCR, TCR_AUTOBCNTX);
1355 vt6655_mac_reg_bits_off(priv->port_offset,
1356 MAC_REG_TFTCTL, TFTCTL_TSFCNTREN);
1357 vt6655_mac_reg_bits_off(priv->port_offset, MAC_REG_HOSTCR, HOSTCR_ADHOC);
1358 break;
1359 case NL80211_IFTYPE_AP:
1360 vt6655_mac_reg_bits_off(priv->port_offset, MAC_REG_TCR, TCR_AUTOBCNTX);
1361 vt6655_mac_reg_bits_off(priv->port_offset,
1362 MAC_REG_TFTCTL, TFTCTL_TSFCNTREN);
1363 vt6655_mac_reg_bits_off(priv->port_offset, MAC_REG_HOSTCR, HOSTCR_AP);
1364 break;
1365 default:
1366 break;
1367 }
1368
1369 priv->op_mode = NL80211_IFTYPE_UNSPECIFIED;
1370 }
1371
1372 static int vnt_config(struct ieee80211_hw *hw, u32 changed)
1373 {
1374 struct vnt_private *priv = hw->priv;
1375 struct ieee80211_conf *conf = &hw->conf;
1376 u8 bb_type;
1377
1378 if (changed & IEEE80211_CONF_CHANGE_PS) {
1379 if (conf->flags & IEEE80211_CONF_PS)
1380 PSvEnablePowerSaving(priv, conf->listen_interval);
1381 else
1382 PSvDisablePowerSaving(priv);
1383 }
1384
1385 if ((changed & IEEE80211_CONF_CHANGE_CHANNEL) ||
1386 (conf->flags & IEEE80211_CONF_OFFCHANNEL)) {
1387 set_channel(priv, conf->chandef.chan);
1388
1389 if (conf->chandef.chan->band == NL80211_BAND_5GHZ)
1390 bb_type = BB_TYPE_11A;
1391 else
1392 bb_type = BB_TYPE_11G;
1393
1394 if (priv->byBBType != bb_type) {
1395 priv->byBBType = bb_type;
1396
1397 CARDbSetPhyParameter(priv, priv->byBBType);
1398 }
1399 }
1400
1401 if (changed & IEEE80211_CONF_CHANGE_POWER) {
1402 if (priv->byBBType == BB_TYPE_11B)
1403 priv->wCurrentRate = RATE_1M;
1404 else
1405 priv->wCurrentRate = RATE_54M;
1406
1407 RFbSetPower(priv, priv->wCurrentRate,
1408 conf->chandef.chan->hw_value);
1409 }
1410
1411 return 0;
1412 }
1413
1414 static void vnt_bss_info_changed(struct ieee80211_hw *hw,
1415 struct ieee80211_vif *vif,
1416 struct ieee80211_bss_conf *conf, u64 changed)
1417 {
1418 struct vnt_private *priv = hw->priv;
1419
1420 priv->current_aid = vif->cfg.aid;
1421
1422 if (changed & BSS_CHANGED_BSSID && conf->bssid) {
1423 unsigned long flags;
1424
1425 spin_lock_irqsave(&priv->lock, flags);
1426
1427 vt6655_mac_write_bssid_addr(priv->port_offset, conf->bssid);
1428
1429 spin_unlock_irqrestore(&priv->lock, flags);
1430 }
1431
1432 if (changed & BSS_CHANGED_BASIC_RATES) {
1433 priv->basic_rates = conf->basic_rates;
1434
1435 CARDvUpdateBasicTopRate(priv);
1436
1437 dev_dbg(&priv->pcid->dev,
1438 "basic rates %x\n", conf->basic_rates);
1439 }
1440
1441 if (changed & BSS_CHANGED_ERP_PREAMBLE) {
1442 if (conf->use_short_preamble) {
1443 MACvEnableBarkerPreambleMd(priv->port_offset);
1444 priv->preamble_type = true;
1445 } else {
1446 MACvDisableBarkerPreambleMd(priv->port_offset);
1447 priv->preamble_type = false;
1448 }
1449 }
1450
1451 if (changed & BSS_CHANGED_ERP_CTS_PROT) {
1452 if (conf->use_cts_prot)
1453 MACvEnableProtectMD(priv->port_offset);
1454 else
1455 MACvDisableProtectMD(priv->port_offset);
1456 }
1457
1458 if (changed & BSS_CHANGED_ERP_SLOT) {
1459 if (conf->use_short_slot)
1460 priv->short_slot_time = true;
1461 else
1462 priv->short_slot_time = false;
1463
1464 CARDbSetPhyParameter(priv, priv->byBBType);
1465 bb_set_vga_gain_offset(priv, priv->abyBBVGA[0]);
1466 }
1467
1468 if (changed & BSS_CHANGED_TXPOWER)
1469 RFbSetPower(priv, priv->wCurrentRate,
1470 conf->chandef.chan->hw_value);
1471
1472 if (changed & BSS_CHANGED_BEACON_ENABLED) {
1473 dev_dbg(&priv->pcid->dev,
1474 "Beacon enable %d\n", conf->enable_beacon);
1475
1476 if (conf->enable_beacon) {
1477 vnt_beacon_enable(priv, vif, conf);
1478
1479 vt6655_mac_reg_bits_on(priv->port_offset, MAC_REG_TCR, TCR_AUTOBCNTX);
1480 } else {
1481 vt6655_mac_reg_bits_off(priv->port_offset, MAC_REG_TCR,
1482 TCR_AUTOBCNTX);
1483 }
1484 }
1485
1486 if (changed & (BSS_CHANGED_ASSOC | BSS_CHANGED_BEACON_INFO) &&
1487 priv->op_mode != NL80211_IFTYPE_AP) {
1488 if (vif->cfg.assoc && conf->beacon_rate) {
1489 CARDbUpdateTSF(priv, conf->beacon_rate->hw_value,
1490 conf->sync_tsf);
1491
1492 CARDbSetBeaconPeriod(priv, conf->beacon_int);
1493
1494 CARDvSetFirstNextTBTT(priv, conf->beacon_int);
1495 } else {
1496 iowrite8(TFTCTL_TSFCNTRST, priv->port_offset + MAC_REG_TFTCTL);
1497 iowrite8(TFTCTL_TSFCNTREN, priv->port_offset + MAC_REG_TFTCTL);
1498 }
1499 }
1500 }
1501
1502 static u64 vnt_prepare_multicast(struct ieee80211_hw *hw,
1503 struct netdev_hw_addr_list *mc_list)
1504 {
1505 struct vnt_private *priv = hw->priv;
1506 struct netdev_hw_addr *ha;
1507 u64 mc_filter = 0;
1508 u32 bit_nr = 0;
1509
1510 netdev_hw_addr_list_for_each(ha, mc_list) {
1511 bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26;
1512
1513 mc_filter |= 1ULL << (bit_nr & 0x3f);
1514 }
1515
1516 priv->mc_list_count = mc_list->count;
1517
1518 return mc_filter;
1519 }
1520
1521 static void vnt_configure(struct ieee80211_hw *hw,
1522 unsigned int changed_flags,
1523 unsigned int *total_flags, u64 multicast)
1524 {
1525 struct vnt_private *priv = hw->priv;
1526 u8 rx_mode = 0;
1527
1528 *total_flags &= FIF_ALLMULTI | FIF_OTHER_BSS | FIF_BCN_PRBRESP_PROMISC;
1529
1530 rx_mode = ioread8(priv->port_offset + MAC_REG_RCR);
1531
1532 dev_dbg(&priv->pcid->dev, "rx mode in = %x\n", rx_mode);
1533
1534 if (changed_flags & FIF_ALLMULTI) {
1535 if (*total_flags & FIF_ALLMULTI) {
1536 unsigned long flags;
1537
1538 spin_lock_irqsave(&priv->lock, flags);
1539
1540 if (priv->mc_list_count > 2) {
1541 MACvSelectPage1(priv->port_offset);
1542
1543 iowrite32(0xffffffff, priv->port_offset + MAC_REG_MAR0);
1544 iowrite32(0xffffffff, priv->port_offset + MAC_REG_MAR0 + 4);
1545
1546 MACvSelectPage0(priv->port_offset);
1547 } else {
1548 MACvSelectPage1(priv->port_offset);
1549
1550 multicast = le64_to_cpu(multicast);
1551 iowrite32((u32)multicast, priv->port_offset + MAC_REG_MAR0);
1552 iowrite32((u32)(multicast >> 32),
1553 priv->port_offset + MAC_REG_MAR0 + 4);
1554
1555 MACvSelectPage0(priv->port_offset);
1556 }
1557
1558 spin_unlock_irqrestore(&priv->lock, flags);
1559
1560 rx_mode |= RCR_MULTICAST | RCR_BROADCAST;
1561 } else {
1562 rx_mode &= ~(RCR_MULTICAST | RCR_BROADCAST);
1563 }
1564 }
1565
1566 if (changed_flags & (FIF_OTHER_BSS | FIF_BCN_PRBRESP_PROMISC)) {
1567 rx_mode |= RCR_MULTICAST | RCR_BROADCAST;
1568
1569 if (*total_flags & (FIF_OTHER_BSS | FIF_BCN_PRBRESP_PROMISC))
1570 rx_mode &= ~RCR_BSSID;
1571 else
1572 rx_mode |= RCR_BSSID;
1573 }
1574
1575 iowrite8(rx_mode, priv->port_offset + MAC_REG_RCR);
1576
1577 dev_dbg(&priv->pcid->dev, "rx mode out= %x\n", rx_mode);
1578 }
1579
1580 static int vnt_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
1581 struct ieee80211_vif *vif, struct ieee80211_sta *sta,
1582 struct ieee80211_key_conf *key)
1583 {
1584 struct vnt_private *priv = hw->priv;
1585
1586 switch (cmd) {
1587 case SET_KEY:
1588 if (vnt_set_keys(hw, sta, vif, key))
1589 return -EOPNOTSUPP;
1590 break;
1591 case DISABLE_KEY:
1592 if (test_bit(key->hw_key_idx, &priv->key_entry_inuse))
1593 clear_bit(key->hw_key_idx, &priv->key_entry_inuse);
1594 break;
1595 default:
1596 break;
1597 }
1598
1599 return 0;
1600 }
1601
1602 static int vnt_get_stats(struct ieee80211_hw *hw,
1603 struct ieee80211_low_level_stats *stats)
1604 {
1605 struct vnt_private *priv = hw->priv;
1606
1607 memcpy(stats, &priv->low_stats, sizeof(*stats));
1608
1609 return 0;
1610 }
1611
1612 static u64 vnt_get_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
1613 {
1614 struct vnt_private *priv = hw->priv;
1615 u64 tsf;
1616
1617 tsf = vt6655_get_current_tsf(priv);
1618
1619 return tsf;
1620 }
1621
1622 static void vnt_set_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1623 u64 tsf)
1624 {
1625 struct vnt_private *priv = hw->priv;
1626
1627 CARDvUpdateNextTBTT(priv, tsf, vif->bss_conf.beacon_int);
1628 }
1629
1630 static void vnt_reset_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
1631 {
1632 struct vnt_private *priv = hw->priv;
1633
1634
1635 iowrite8(TFTCTL_TSFCNTRST, priv->port_offset + MAC_REG_TFTCTL);
1636 }
1637
1638 static const struct ieee80211_ops vnt_mac_ops = {
1639 .tx = vnt_tx_80211,
1640 .start = vnt_start,
1641 .stop = vnt_stop,
1642 .add_interface = vnt_add_interface,
1643 .remove_interface = vnt_remove_interface,
1644 .config = vnt_config,
1645 .bss_info_changed = vnt_bss_info_changed,
1646 .prepare_multicast = vnt_prepare_multicast,
1647 .configure_filter = vnt_configure,
1648 .set_key = vnt_set_key,
1649 .get_stats = vnt_get_stats,
1650 .get_tsf = vnt_get_tsf,
1651 .set_tsf = vnt_set_tsf,
1652 .reset_tsf = vnt_reset_tsf,
1653 };
1654
1655 static int vnt_init(struct vnt_private *priv)
1656 {
1657 SET_IEEE80211_PERM_ADDR(priv->hw, priv->abyCurrentNetAddr);
1658
1659 vnt_init_bands(priv);
1660
1661 if (ieee80211_register_hw(priv->hw))
1662 return -ENODEV;
1663
1664 priv->mac_hw = true;
1665
1666 CARDbRadioPowerOff(priv);
1667
1668 return 0;
1669 }
1670
1671 static int
1672 vt6655_probe(struct pci_dev *pcid, const struct pci_device_id *ent)
1673 {
1674 struct vnt_private *priv;
1675 struct ieee80211_hw *hw;
1676 struct wiphy *wiphy;
1677 int rc;
1678
1679 dev_notice(&pcid->dev,
1680 "%s Ver. %s\n", DEVICE_FULL_DRV_NAM, DEVICE_VERSION);
1681
1682 dev_notice(&pcid->dev,
1683 "Copyright (c) 2003 VIA Networking Technologies, Inc.\n");
1684
1685 hw = ieee80211_alloc_hw(sizeof(*priv), &vnt_mac_ops);
1686 if (!hw) {
1687 dev_err(&pcid->dev, "could not register ieee80211_hw\n");
1688 return -ENOMEM;
1689 }
1690
1691 priv = hw->priv;
1692 priv->pcid = pcid;
1693
1694 spin_lock_init(&priv->lock);
1695
1696 priv->hw = hw;
1697
1698 SET_IEEE80211_DEV(priv->hw, &pcid->dev);
1699
1700 if (pci_enable_device(pcid)) {
1701 device_free_info(priv);
1702 return -ENODEV;
1703 }
1704
1705 dev_dbg(&pcid->dev,
1706 "Before get pci_info memaddr is %x\n", priv->memaddr);
1707
1708 pci_set_master(pcid);
1709
1710 priv->memaddr = pci_resource_start(pcid, 0);
1711 priv->ioaddr = pci_resource_start(pcid, 1);
1712 priv->port_offset = ioremap(priv->memaddr & PCI_BASE_ADDRESS_MEM_MASK,
1713 256);
1714 if (!priv->port_offset) {
1715 dev_err(&pcid->dev, ": Failed to IO remapping ..\n");
1716 device_free_info(priv);
1717 return -ENODEV;
1718 }
1719
1720 rc = pci_request_regions(pcid, DEVICE_NAME);
1721 if (rc) {
1722 dev_err(&pcid->dev, ": Failed to find PCI device\n");
1723 device_free_info(priv);
1724 return -ENODEV;
1725 }
1726
1727 if (dma_set_mask(&pcid->dev, DMA_BIT_MASK(32))) {
1728 dev_err(&pcid->dev, ": Failed to set dma 32 bit mask\n");
1729 device_free_info(priv);
1730 return -ENODEV;
1731 }
1732
1733 INIT_WORK(&priv->interrupt_work, vnt_interrupt_work);
1734
1735
1736 if (!MACbSoftwareReset(priv)) {
1737 dev_err(&pcid->dev, ": Failed to access MAC hardware..\n");
1738 device_free_info(priv);
1739 return -ENODEV;
1740 }
1741
1742 MACvInitialize(priv);
1743 vt6655_mac_read_ether_addr(priv->port_offset, priv->abyCurrentNetAddr);
1744
1745
1746 priv->byRFType = SROMbyReadEmbedded(priv->port_offset, EEP_OFS_RFTYPE);
1747 priv->byRFType &= RF_MASK;
1748
1749 dev_dbg(&pcid->dev, "RF Type = %x\n", priv->byRFType);
1750
1751 device_get_options(priv);
1752 device_set_options(priv);
1753
1754 wiphy = priv->hw->wiphy;
1755
1756 wiphy->frag_threshold = FRAG_THRESH_DEF;
1757 wiphy->rts_threshold = RTS_THRESH_DEF;
1758 wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
1759 BIT(NL80211_IFTYPE_ADHOC) | BIT(NL80211_IFTYPE_AP);
1760
1761 ieee80211_hw_set(priv->hw, TIMING_BEACON_ONLY);
1762 ieee80211_hw_set(priv->hw, SIGNAL_DBM);
1763 ieee80211_hw_set(priv->hw, RX_INCLUDES_FCS);
1764 ieee80211_hw_set(priv->hw, REPORTS_TX_ACK_STATUS);
1765 ieee80211_hw_set(priv->hw, SUPPORTS_PS);
1766
1767 priv->hw->max_signal = 100;
1768
1769 if (vnt_init(priv)) {
1770 device_free_info(priv);
1771 return -ENODEV;
1772 }
1773
1774 device_print_info(priv);
1775 pci_set_drvdata(pcid, priv);
1776
1777 return 0;
1778 }
1779
1780
1781
1782 static int __maybe_unused vt6655_suspend(struct device *dev_d)
1783 {
1784 struct vnt_private *priv = dev_get_drvdata(dev_d);
1785 unsigned long flags;
1786
1787 spin_lock_irqsave(&priv->lock, flags);
1788
1789 MACbShutdown(priv);
1790
1791 spin_unlock_irqrestore(&priv->lock, flags);
1792
1793 return 0;
1794 }
1795
1796 static int __maybe_unused vt6655_resume(struct device *dev_d)
1797 {
1798 device_wakeup_disable(dev_d);
1799
1800 return 0;
1801 }
1802
1803 MODULE_DEVICE_TABLE(pci, vt6655_pci_id_table);
1804
1805 static SIMPLE_DEV_PM_OPS(vt6655_pm_ops, vt6655_suspend, vt6655_resume);
1806
1807 static struct pci_driver device_driver = {
1808 .name = DEVICE_NAME,
1809 .id_table = vt6655_pci_id_table,
1810 .probe = vt6655_probe,
1811 .remove = vt6655_remove,
1812 .driver.pm = &vt6655_pm_ops,
1813 };
1814
1815 module_pci_driver(device_driver);