0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017 #include <linux/slab.h>
0018 #include <linux/delay.h>
0019 #include <linux/pci.h>
0020 #include <net/cfg80211.h>
0021 #include <net/mac80211.h>
0022
0023 #include <brcmu_utils.h>
0024 #include <aiutils.h>
0025 #include "types.h"
0026 #include "main.h"
0027 #include "dma.h"
0028 #include "soc.h"
0029 #include "scb.h"
0030 #include "ampdu.h"
0031 #include "debug.h"
0032 #include "brcms_trace_events.h"
0033
0034
0035
0036
0037 #define DMA64REGOFFS(field) offsetof(struct dma64regs, field)
0038 #define DMA64TXREGOFFS(di, field) (di->d64txregbase + DMA64REGOFFS(field))
0039 #define DMA64RXREGOFFS(di, field) (di->d64rxregbase + DMA64REGOFFS(field))
0040
0041
0042
0043
0044
0045 #define D64RINGALIGN_BITS 13
0046 #define D64MAXRINGSZ (1 << D64RINGALIGN_BITS)
0047 #define D64RINGALIGN (1 << D64RINGALIGN_BITS)
0048
0049 #define D64MAXDD (D64MAXRINGSZ / sizeof(struct dma64desc))
0050
0051
0052 #define D64_XC_XE 0x00000001
0053 #define D64_XC_SE 0x00000002
0054 #define D64_XC_LE 0x00000004
0055 #define D64_XC_FL 0x00000010
0056 #define D64_XC_PD 0x00000800
0057 #define D64_XC_AE 0x00030000
0058 #define D64_XC_AE_SHIFT 16
0059
0060
0061 #define D64_XP_LD_MASK 0x00000fff
0062
0063
0064 #define D64_XS0_CD_MASK 0x00001fff
0065 #define D64_XS0_XS_MASK 0xf0000000
0066 #define D64_XS0_XS_SHIFT 28
0067 #define D64_XS0_XS_DISABLED 0x00000000
0068 #define D64_XS0_XS_ACTIVE 0x10000000
0069 #define D64_XS0_XS_IDLE 0x20000000
0070 #define D64_XS0_XS_STOPPED 0x30000000
0071 #define D64_XS0_XS_SUSP 0x40000000
0072
0073 #define D64_XS1_AD_MASK 0x00001fff
0074 #define D64_XS1_XE_MASK 0xf0000000
0075 #define D64_XS1_XE_SHIFT 28
0076 #define D64_XS1_XE_NOERR 0x00000000
0077 #define D64_XS1_XE_DPE 0x10000000
0078 #define D64_XS1_XE_DFU 0x20000000
0079 #define D64_XS1_XE_DTE 0x30000000
0080 #define D64_XS1_XE_DESRE 0x40000000
0081 #define D64_XS1_XE_COREE 0x50000000
0082
0083
0084
0085 #define D64_RC_RE 0x00000001
0086
0087 #define D64_RC_RO_MASK 0x000000fe
0088 #define D64_RC_RO_SHIFT 1
0089
0090 #define D64_RC_FM 0x00000100
0091
0092 #define D64_RC_SH 0x00000200
0093
0094 #define D64_RC_OC 0x00000400
0095
0096 #define D64_RC_PD 0x00000800
0097
0098 #define D64_RC_AE 0x00030000
0099 #define D64_RC_AE_SHIFT 16
0100
0101
0102
0103 #define DMA_CTRL_PEN (1 << 0)
0104
0105 #define DMA_CTRL_ROC (1 << 1)
0106
0107 #define DMA_CTRL_RXMULTI (1 << 2)
0108
0109 #define DMA_CTRL_UNFRAMED (1 << 3)
0110
0111
0112 #define D64_RP_LD_MASK 0x00000fff
0113
0114
0115 #define D64_RS0_CD_MASK 0x00001fff
0116 #define D64_RS0_RS_MASK 0xf0000000
0117 #define D64_RS0_RS_SHIFT 28
0118 #define D64_RS0_RS_DISABLED 0x00000000
0119 #define D64_RS0_RS_ACTIVE 0x10000000
0120 #define D64_RS0_RS_IDLE 0x20000000
0121 #define D64_RS0_RS_STOPPED 0x30000000
0122 #define D64_RS0_RS_SUSP 0x40000000
0123
0124 #define D64_RS1_AD_MASK 0x0001ffff
0125 #define D64_RS1_RE_MASK 0xf0000000
0126 #define D64_RS1_RE_SHIFT 28
0127 #define D64_RS1_RE_NOERR 0x00000000
0128 #define D64_RS1_RE_DPO 0x10000000
0129 #define D64_RS1_RE_DFU 0x20000000
0130 #define D64_RS1_RE_DTE 0x30000000
0131 #define D64_RS1_RE_DESRE 0x40000000
0132 #define D64_RS1_RE_COREE 0x50000000
0133
0134
0135 #define D64_FA_OFF_MASK 0xffff
0136 #define D64_FA_SEL_MASK 0xf0000
0137 #define D64_FA_SEL_SHIFT 16
0138 #define D64_FA_SEL_XDD 0x00000
0139 #define D64_FA_SEL_XDP 0x10000
0140 #define D64_FA_SEL_RDD 0x40000
0141 #define D64_FA_SEL_RDP 0x50000
0142 #define D64_FA_SEL_XFD 0x80000
0143 #define D64_FA_SEL_XFP 0x90000
0144 #define D64_FA_SEL_RFD 0xc0000
0145 #define D64_FA_SEL_RFP 0xd0000
0146 #define D64_FA_SEL_RSD 0xe0000
0147 #define D64_FA_SEL_RSP 0xf0000
0148
0149
0150 #define D64_CTRL_COREFLAGS 0x0ff00000
0151 #define D64_CTRL1_EOT ((u32)1 << 28)
0152 #define D64_CTRL1_IOC ((u32)1 << 29)
0153 #define D64_CTRL1_EOF ((u32)1 << 30)
0154 #define D64_CTRL1_SOF ((u32)1 << 31)
0155
0156
0157
0158 #define D64_CTRL2_BC_MASK 0x00007fff
0159
0160 #define D64_CTRL2_AE 0x00030000
0161 #define D64_CTRL2_AE_SHIFT 16
0162
0163 #define D64_CTRL2_PARITY 0x00040000
0164
0165
0166 #define D64_CTRL_CORE_MASK 0x0ff00000
0167
0168 #define D64_RX_FRM_STS_LEN 0x0000ffff
0169 #define D64_RX_FRM_STS_OVFL 0x00800000
0170 #define D64_RX_FRM_STS_DSCRCNT 0x0f000000
0171 #define D64_RX_FRM_STS_DATATYPE 0xf0000000
0172
0173
0174
0175
0176
0177
0178
0179
0180
0181
0182 #define BCMEXTRAHDROOM 172
0183
0184 #define MAXNAMEL 8
0185
0186
0187 #define B2I(bytes, type) ((bytes) / sizeof(type))
0188 #define I2B(index, type) ((index) * sizeof(type))
0189
0190 #define PCI32ADDR_HIGH 0xc0000000
0191 #define PCI32ADDR_HIGH_SHIFT 30
0192
0193 #define PCI64ADDR_HIGH 0x80000000
0194 #define PCI64ADDR_HIGH_SHIFT 31
0195
0196
0197
0198
0199
0200 struct dma64desc {
0201 __le32 ctrl1;
0202 __le32 ctrl2;
0203 __le32 addrlow;
0204 __le32 addrhigh;
0205 };
0206
0207
0208 struct dma_info {
0209 struct dma_pub dma;
0210 char name[MAXNAMEL];
0211
0212 struct bcma_device *core;
0213 struct device *dmadev;
0214
0215
0216 struct brcms_ampdu_session ampdu_session;
0217
0218 bool dma64;
0219 bool addrext;
0220
0221
0222 uint d64txregbase;
0223
0224 uint d64rxregbase;
0225
0226 struct dma64desc *txd64;
0227
0228 struct dma64desc *rxd64;
0229
0230 u16 dmadesc_align;
0231
0232 u16 ntxd;
0233 u16 txin;
0234 u16 txout;
0235
0236 struct sk_buff **txp;
0237
0238 dma_addr_t txdpa;
0239
0240 dma_addr_t txdpaorig;
0241 u16 txdalign;
0242 u32 txdalloc;
0243 u32 xmtptrbase;
0244
0245
0246
0247
0248 u16 nrxd;
0249 u16 rxin;
0250 u16 rxout;
0251
0252 struct sk_buff **rxp;
0253
0254 dma_addr_t rxdpa;
0255
0256 dma_addr_t rxdpaorig;
0257 u16 rxdalign;
0258 u32 rxdalloc;
0259 u32 rcvptrbase;
0260
0261
0262 unsigned int rxbufsize;
0263
0264
0265 uint rxextrahdrroom;
0266
0267
0268
0269
0270
0271
0272 uint nrxpost;
0273 unsigned int rxoffset;
0274
0275 uint ddoffsetlow;
0276
0277 uint ddoffsethigh;
0278
0279 uint dataoffsetlow;
0280
0281 uint dataoffsethigh;
0282
0283 bool aligndesc_4k;
0284 };
0285
0286
0287 static u32 parity32(__le32 data)
0288 {
0289
0290 u32 par_data = *(u32 *)&data;
0291
0292 par_data ^= par_data >> 16;
0293 par_data ^= par_data >> 8;
0294 par_data ^= par_data >> 4;
0295 par_data ^= par_data >> 2;
0296 par_data ^= par_data >> 1;
0297
0298 return par_data & 1;
0299 }
0300
0301 static bool dma64_dd_parity(struct dma64desc *dd)
0302 {
0303 return parity32(dd->addrlow ^ dd->addrhigh ^ dd->ctrl1 ^ dd->ctrl2);
0304 }
0305
0306
0307
0308 static uint xxd(uint x, uint n)
0309 {
0310 return x & (n - 1);
0311 }
0312
0313 static uint txd(struct dma_info *di, uint x)
0314 {
0315 return xxd(x, di->ntxd);
0316 }
0317
0318 static uint rxd(struct dma_info *di, uint x)
0319 {
0320 return xxd(x, di->nrxd);
0321 }
0322
0323 static uint nexttxd(struct dma_info *di, uint i)
0324 {
0325 return txd(di, i + 1);
0326 }
0327
0328 static uint prevtxd(struct dma_info *di, uint i)
0329 {
0330 return txd(di, i - 1);
0331 }
0332
0333 static uint nextrxd(struct dma_info *di, uint i)
0334 {
0335 return rxd(di, i + 1);
0336 }
0337
0338 static uint ntxdactive(struct dma_info *di, uint h, uint t)
0339 {
0340 return txd(di, t-h);
0341 }
0342
0343 static uint nrxdactive(struct dma_info *di, uint h, uint t)
0344 {
0345 return rxd(di, t-h);
0346 }
0347
0348 static uint _dma_ctrlflags(struct dma_info *di, uint mask, uint flags)
0349 {
0350 uint dmactrlflags;
0351
0352 if (di == NULL)
0353 return 0;
0354
0355 dmactrlflags = di->dma.dmactrlflags;
0356 dmactrlflags &= ~mask;
0357 dmactrlflags |= flags;
0358
0359
0360 if (dmactrlflags & DMA_CTRL_PEN) {
0361 u32 control;
0362
0363 control = bcma_read32(di->core, DMA64TXREGOFFS(di, control));
0364 bcma_write32(di->core, DMA64TXREGOFFS(di, control),
0365 control | D64_XC_PD);
0366 if (bcma_read32(di->core, DMA64TXREGOFFS(di, control)) &
0367 D64_XC_PD)
0368
0369
0370
0371 bcma_write32(di->core, DMA64TXREGOFFS(di, control),
0372 control);
0373 else
0374
0375 dmactrlflags &= ~DMA_CTRL_PEN;
0376 }
0377
0378 di->dma.dmactrlflags = dmactrlflags;
0379
0380 return dmactrlflags;
0381 }
0382
0383 static bool _dma64_addrext(struct dma_info *di, uint ctrl_offset)
0384 {
0385 u32 w;
0386 bcma_set32(di->core, ctrl_offset, D64_XC_AE);
0387 w = bcma_read32(di->core, ctrl_offset);
0388 bcma_mask32(di->core, ctrl_offset, ~D64_XC_AE);
0389 return (w & D64_XC_AE) == D64_XC_AE;
0390 }
0391
0392
0393
0394
0395
0396 static bool _dma_isaddrext(struct dma_info *di)
0397 {
0398
0399
0400
0401 if (di->d64txregbase != 0) {
0402 if (!_dma64_addrext(di, DMA64TXREGOFFS(di, control)))
0403 brcms_dbg_dma(di->core,
0404 "%s: DMA64 tx doesn't have AE set\n",
0405 di->name);
0406 return true;
0407 } else if (di->d64rxregbase != 0) {
0408 if (!_dma64_addrext(di, DMA64RXREGOFFS(di, control)))
0409 brcms_dbg_dma(di->core,
0410 "%s: DMA64 rx doesn't have AE set\n",
0411 di->name);
0412 return true;
0413 }
0414
0415 return false;
0416 }
0417
0418 static bool _dma_descriptor_align(struct dma_info *di)
0419 {
0420 u32 addrl;
0421
0422
0423 if (di->d64txregbase != 0) {
0424 bcma_write32(di->core, DMA64TXREGOFFS(di, addrlow), 0xff0);
0425 addrl = bcma_read32(di->core, DMA64TXREGOFFS(di, addrlow));
0426 if (addrl != 0)
0427 return false;
0428 } else if (di->d64rxregbase != 0) {
0429 bcma_write32(di->core, DMA64RXREGOFFS(di, addrlow), 0xff0);
0430 addrl = bcma_read32(di->core, DMA64RXREGOFFS(di, addrlow));
0431 if (addrl != 0)
0432 return false;
0433 }
0434 return true;
0435 }
0436
0437
0438
0439
0440
0441 static void *dma_alloc_consistent(struct dma_info *di, uint size,
0442 u16 align_bits, uint *alloced,
0443 dma_addr_t *pap)
0444 {
0445 if (align_bits) {
0446 u16 align = (1 << align_bits);
0447 if (!IS_ALIGNED(PAGE_SIZE, align))
0448 size += align;
0449 *alloced = size;
0450 }
0451 return dma_alloc_coherent(di->dmadev, size, pap, GFP_ATOMIC);
0452 }
0453
0454 static
0455 u8 dma_align_sizetobits(uint size)
0456 {
0457 u8 bitpos = 0;
0458 while (size >>= 1)
0459 bitpos++;
0460 return bitpos;
0461 }
0462
0463
0464
0465
0466
0467
0468
0469 static void *dma_ringalloc(struct dma_info *di, u32 boundary, uint size,
0470 u16 *alignbits, uint *alloced,
0471 dma_addr_t *descpa)
0472 {
0473 void *va;
0474 u32 desc_strtaddr;
0475 u32 alignbytes = 1 << *alignbits;
0476
0477 va = dma_alloc_consistent(di, size, *alignbits, alloced, descpa);
0478
0479 if (NULL == va)
0480 return NULL;
0481
0482 desc_strtaddr = (u32) roundup((unsigned long)va, alignbytes);
0483 if (((desc_strtaddr + size - 1) & boundary) != (desc_strtaddr
0484 & boundary)) {
0485 *alignbits = dma_align_sizetobits(size);
0486 dma_free_coherent(di->dmadev, size, va, *descpa);
0487 va = dma_alloc_consistent(di, size, *alignbits,
0488 alloced, descpa);
0489 }
0490 return va;
0491 }
0492
0493 static bool dma64_alloc(struct dma_info *di, uint direction)
0494 {
0495 u16 size;
0496 uint ddlen;
0497 void *va;
0498 uint alloced = 0;
0499 u16 align;
0500 u16 align_bits;
0501
0502 ddlen = sizeof(struct dma64desc);
0503
0504 size = (direction == DMA_TX) ? (di->ntxd * ddlen) : (di->nrxd * ddlen);
0505 align_bits = di->dmadesc_align;
0506 align = (1 << align_bits);
0507
0508 if (direction == DMA_TX) {
0509 va = dma_ringalloc(di, D64RINGALIGN, size, &align_bits,
0510 &alloced, &di->txdpaorig);
0511 if (va == NULL) {
0512 brcms_dbg_dma(di->core,
0513 "%s: DMA_ALLOC_CONSISTENT(ntxd) failed\n",
0514 di->name);
0515 return false;
0516 }
0517 align = (1 << align_bits);
0518 di->txd64 = (struct dma64desc *)
0519 roundup((unsigned long)va, align);
0520 di->txdalign = (uint) ((s8 *)di->txd64 - (s8 *) va);
0521 di->txdpa = di->txdpaorig + di->txdalign;
0522 di->txdalloc = alloced;
0523 } else {
0524 va = dma_ringalloc(di, D64RINGALIGN, size, &align_bits,
0525 &alloced, &di->rxdpaorig);
0526 if (va == NULL) {
0527 brcms_dbg_dma(di->core,
0528 "%s: DMA_ALLOC_CONSISTENT(nrxd) failed\n",
0529 di->name);
0530 return false;
0531 }
0532 align = (1 << align_bits);
0533 di->rxd64 = (struct dma64desc *)
0534 roundup((unsigned long)va, align);
0535 di->rxdalign = (uint) ((s8 *)di->rxd64 - (s8 *) va);
0536 di->rxdpa = di->rxdpaorig + di->rxdalign;
0537 di->rxdalloc = alloced;
0538 }
0539
0540 return true;
0541 }
0542
0543 static bool _dma_alloc(struct dma_info *di, uint direction)
0544 {
0545 return dma64_alloc(di, direction);
0546 }
0547
0548 struct dma_pub *dma_attach(char *name, struct brcms_c_info *wlc,
0549 uint txregbase, uint rxregbase, uint ntxd, uint nrxd,
0550 uint rxbufsize, int rxextheadroom,
0551 uint nrxpost, uint rxoffset)
0552 {
0553 struct si_pub *sih = wlc->hw->sih;
0554 struct bcma_device *core = wlc->hw->d11core;
0555 struct dma_info *di;
0556 u8 rev = core->id.rev;
0557 uint size;
0558 struct si_info *sii = container_of(sih, struct si_info, pub);
0559
0560
0561 di = kzalloc(sizeof(struct dma_info), GFP_ATOMIC);
0562 if (di == NULL)
0563 return NULL;
0564
0565 di->dma64 =
0566 ((bcma_aread32(core, BCMA_IOST) & SISF_DMA64) == SISF_DMA64);
0567
0568
0569 di->core = core;
0570 di->d64txregbase = txregbase;
0571 di->d64rxregbase = rxregbase;
0572
0573
0574
0575
0576
0577
0578 _dma_ctrlflags(di, DMA_CTRL_ROC | DMA_CTRL_PEN, 0);
0579
0580 brcms_dbg_dma(di->core, "%s: %s flags 0x%x ntxd %d nrxd %d "
0581 "rxbufsize %d rxextheadroom %d nrxpost %d rxoffset %d "
0582 "txregbase %u rxregbase %u\n", name, "DMA64",
0583 di->dma.dmactrlflags, ntxd, nrxd, rxbufsize,
0584 rxextheadroom, nrxpost, rxoffset, txregbase, rxregbase);
0585
0586
0587 strncpy(di->name, name, MAXNAMEL);
0588 di->name[MAXNAMEL - 1] = '\0';
0589
0590 di->dmadev = core->dma_dev;
0591
0592
0593 di->ntxd = (u16) ntxd;
0594 di->nrxd = (u16) nrxd;
0595
0596
0597 di->rxextrahdrroom =
0598 (rxextheadroom == -1) ? BCMEXTRAHDROOM : rxextheadroom;
0599 if (rxbufsize > BCMEXTRAHDROOM)
0600 di->rxbufsize = (u16) (rxbufsize - di->rxextrahdrroom);
0601 else
0602 di->rxbufsize = (u16) rxbufsize;
0603
0604 di->nrxpost = (u16) nrxpost;
0605 di->rxoffset = (u8) rxoffset;
0606
0607
0608
0609
0610
0611
0612
0613
0614 di->ddoffsetlow = 0;
0615 di->dataoffsetlow = 0;
0616
0617 if (sii->icbus->hosttype == BCMA_HOSTTYPE_PCI) {
0618
0619 di->ddoffsetlow = 0;
0620 di->ddoffsethigh = SI_PCIE_DMA_H32;
0621 }
0622 di->dataoffsetlow = di->ddoffsetlow;
0623 di->dataoffsethigh = di->ddoffsethigh;
0624
0625
0626 if ((core->id.id == BCMA_CORE_SDIO_DEV)
0627 && ((rev > 0) && (rev <= 2)))
0628 di->addrext = false;
0629 else if ((core->id.id == BCMA_CORE_I2S) &&
0630 ((rev == 0) || (rev == 1)))
0631 di->addrext = false;
0632 else
0633 di->addrext = _dma_isaddrext(di);
0634
0635
0636 di->aligndesc_4k = _dma_descriptor_align(di);
0637 if (di->aligndesc_4k) {
0638 di->dmadesc_align = D64RINGALIGN_BITS;
0639 if ((ntxd < D64MAXDD / 2) && (nrxd < D64MAXDD / 2))
0640
0641 di->dmadesc_align = D64RINGALIGN_BITS - 1;
0642 } else {
0643 di->dmadesc_align = 4;
0644 }
0645
0646 brcms_dbg_dma(di->core, "DMA descriptor align_needed %d, align %d\n",
0647 di->aligndesc_4k, di->dmadesc_align);
0648
0649
0650 if (ntxd) {
0651 size = ntxd * sizeof(void *);
0652 di->txp = kzalloc(size, GFP_ATOMIC);
0653 if (di->txp == NULL)
0654 goto fail;
0655 }
0656
0657
0658 if (nrxd) {
0659 size = nrxd * sizeof(void *);
0660 di->rxp = kzalloc(size, GFP_ATOMIC);
0661 if (di->rxp == NULL)
0662 goto fail;
0663 }
0664
0665
0666
0667
0668
0669 if (ntxd) {
0670 if (!_dma_alloc(di, DMA_TX))
0671 goto fail;
0672 }
0673
0674
0675
0676
0677
0678 if (nrxd) {
0679 if (!_dma_alloc(di, DMA_RX))
0680 goto fail;
0681 }
0682
0683 if ((di->ddoffsetlow != 0) && !di->addrext) {
0684 if (di->txdpa > SI_PCI_DMA_SZ) {
0685 brcms_dbg_dma(di->core,
0686 "%s: txdpa 0x%x: addrext not supported\n",
0687 di->name, (u32)di->txdpa);
0688 goto fail;
0689 }
0690 if (di->rxdpa > SI_PCI_DMA_SZ) {
0691 brcms_dbg_dma(di->core,
0692 "%s: rxdpa 0x%x: addrext not supported\n",
0693 di->name, (u32)di->rxdpa);
0694 goto fail;
0695 }
0696 }
0697
0698
0699 brcms_c_ampdu_reset_session(&di->ampdu_session, wlc);
0700
0701 brcms_dbg_dma(di->core,
0702 "ddoffsetlow 0x%x ddoffsethigh 0x%x dataoffsetlow 0x%x dataoffsethigh 0x%x addrext %d\n",
0703 di->ddoffsetlow, di->ddoffsethigh,
0704 di->dataoffsetlow, di->dataoffsethigh,
0705 di->addrext);
0706
0707 return (struct dma_pub *) di;
0708
0709 fail:
0710 dma_detach((struct dma_pub *)di);
0711 return NULL;
0712 }
0713
0714 static inline void
0715 dma64_dd_upd(struct dma_info *di, struct dma64desc *ddring,
0716 dma_addr_t pa, uint outidx, u32 *flags, u32 bufcount)
0717 {
0718 u32 ctrl2 = bufcount & D64_CTRL2_BC_MASK;
0719
0720
0721 if ((di->dataoffsetlow == 0) || !(pa & PCI32ADDR_HIGH)) {
0722 ddring[outidx].addrlow = cpu_to_le32(pa + di->dataoffsetlow);
0723 ddring[outidx].addrhigh = cpu_to_le32(di->dataoffsethigh);
0724 ddring[outidx].ctrl1 = cpu_to_le32(*flags);
0725 ddring[outidx].ctrl2 = cpu_to_le32(ctrl2);
0726 } else {
0727
0728 u32 ae;
0729
0730 ae = (pa & PCI32ADDR_HIGH) >> PCI32ADDR_HIGH_SHIFT;
0731 pa &= ~PCI32ADDR_HIGH;
0732
0733 ctrl2 |= (ae << D64_CTRL2_AE_SHIFT) & D64_CTRL2_AE;
0734 ddring[outidx].addrlow = cpu_to_le32(pa + di->dataoffsetlow);
0735 ddring[outidx].addrhigh = cpu_to_le32(di->dataoffsethigh);
0736 ddring[outidx].ctrl1 = cpu_to_le32(*flags);
0737 ddring[outidx].ctrl2 = cpu_to_le32(ctrl2);
0738 }
0739 if (di->dma.dmactrlflags & DMA_CTRL_PEN) {
0740 if (dma64_dd_parity(&ddring[outidx]))
0741 ddring[outidx].ctrl2 =
0742 cpu_to_le32(ctrl2 | D64_CTRL2_PARITY);
0743 }
0744 }
0745
0746
0747 void dma_detach(struct dma_pub *pub)
0748 {
0749 struct dma_info *di = container_of(pub, struct dma_info, dma);
0750
0751 brcms_dbg_dma(di->core, "%s:\n", di->name);
0752
0753
0754 if (di->txd64)
0755 dma_free_coherent(di->dmadev, di->txdalloc,
0756 ((s8 *)di->txd64 - di->txdalign),
0757 (di->txdpaorig));
0758 if (di->rxd64)
0759 dma_free_coherent(di->dmadev, di->rxdalloc,
0760 ((s8 *)di->rxd64 - di->rxdalign),
0761 (di->rxdpaorig));
0762
0763
0764 kfree(di->txp);
0765 kfree(di->rxp);
0766
0767
0768 kfree(di);
0769
0770 }
0771
0772
0773 static void
0774 _dma_ddtable_init(struct dma_info *di, uint direction, dma_addr_t pa)
0775 {
0776 if (!di->aligndesc_4k) {
0777 if (direction == DMA_TX)
0778 di->xmtptrbase = pa;
0779 else
0780 di->rcvptrbase = pa;
0781 }
0782
0783 if ((di->ddoffsetlow == 0)
0784 || !(pa & PCI32ADDR_HIGH)) {
0785 if (direction == DMA_TX) {
0786 bcma_write32(di->core, DMA64TXREGOFFS(di, addrlow),
0787 pa + di->ddoffsetlow);
0788 bcma_write32(di->core, DMA64TXREGOFFS(di, addrhigh),
0789 di->ddoffsethigh);
0790 } else {
0791 bcma_write32(di->core, DMA64RXREGOFFS(di, addrlow),
0792 pa + di->ddoffsetlow);
0793 bcma_write32(di->core, DMA64RXREGOFFS(di, addrhigh),
0794 di->ddoffsethigh);
0795 }
0796 } else {
0797
0798 u32 ae;
0799
0800
0801 ae = (pa & PCI32ADDR_HIGH) >> PCI32ADDR_HIGH_SHIFT;
0802 pa &= ~PCI32ADDR_HIGH;
0803
0804 if (direction == DMA_TX) {
0805 bcma_write32(di->core, DMA64TXREGOFFS(di, addrlow),
0806 pa + di->ddoffsetlow);
0807 bcma_write32(di->core, DMA64TXREGOFFS(di, addrhigh),
0808 di->ddoffsethigh);
0809 bcma_maskset32(di->core, DMA64TXREGOFFS(di, control),
0810 D64_XC_AE, (ae << D64_XC_AE_SHIFT));
0811 } else {
0812 bcma_write32(di->core, DMA64RXREGOFFS(di, addrlow),
0813 pa + di->ddoffsetlow);
0814 bcma_write32(di->core, DMA64RXREGOFFS(di, addrhigh),
0815 di->ddoffsethigh);
0816 bcma_maskset32(di->core, DMA64RXREGOFFS(di, control),
0817 D64_RC_AE, (ae << D64_RC_AE_SHIFT));
0818 }
0819 }
0820 }
0821
0822 static void _dma_rxenable(struct dma_info *di)
0823 {
0824 uint dmactrlflags = di->dma.dmactrlflags;
0825 u32 control;
0826
0827 brcms_dbg_dma(di->core, "%s:\n", di->name);
0828
0829 control = D64_RC_RE | (bcma_read32(di->core,
0830 DMA64RXREGOFFS(di, control)) &
0831 D64_RC_AE);
0832
0833 if ((dmactrlflags & DMA_CTRL_PEN) == 0)
0834 control |= D64_RC_PD;
0835
0836 if (dmactrlflags & DMA_CTRL_ROC)
0837 control |= D64_RC_OC;
0838
0839 bcma_write32(di->core, DMA64RXREGOFFS(di, control),
0840 ((di->rxoffset << D64_RC_RO_SHIFT) | control));
0841 }
0842
0843 void dma_rxinit(struct dma_pub *pub)
0844 {
0845 struct dma_info *di = container_of(pub, struct dma_info, dma);
0846
0847 brcms_dbg_dma(di->core, "%s:\n", di->name);
0848
0849 if (di->nrxd == 0)
0850 return;
0851
0852 di->rxin = di->rxout = 0;
0853
0854
0855 memset(di->rxd64, '\0', di->nrxd * sizeof(struct dma64desc));
0856
0857
0858
0859
0860 if (!di->aligndesc_4k)
0861 _dma_ddtable_init(di, DMA_RX, di->rxdpa);
0862
0863 _dma_rxenable(di);
0864
0865 if (di->aligndesc_4k)
0866 _dma_ddtable_init(di, DMA_RX, di->rxdpa);
0867 }
0868
0869 static struct sk_buff *dma64_getnextrxp(struct dma_info *di, bool forceall)
0870 {
0871 uint i, curr;
0872 struct sk_buff *rxp;
0873 dma_addr_t pa;
0874
0875 i = di->rxin;
0876
0877
0878 if (i == di->rxout)
0879 return NULL;
0880
0881 curr =
0882 B2I(((bcma_read32(di->core,
0883 DMA64RXREGOFFS(di, status0)) & D64_RS0_CD_MASK) -
0884 di->rcvptrbase) & D64_RS0_CD_MASK, struct dma64desc);
0885
0886
0887 if (!forceall && (i == curr))
0888 return NULL;
0889
0890
0891 rxp = di->rxp[i];
0892 di->rxp[i] = NULL;
0893
0894 pa = le32_to_cpu(di->rxd64[i].addrlow) - di->dataoffsetlow;
0895
0896
0897 dma_unmap_single(di->dmadev, pa, di->rxbufsize, DMA_FROM_DEVICE);
0898
0899 di->rxd64[i].addrlow = cpu_to_le32(0xdeadbeef);
0900 di->rxd64[i].addrhigh = cpu_to_le32(0xdeadbeef);
0901
0902 di->rxin = nextrxd(di, i);
0903
0904 return rxp;
0905 }
0906
0907 static struct sk_buff *_dma_getnextrxp(struct dma_info *di, bool forceall)
0908 {
0909 if (di->nrxd == 0)
0910 return NULL;
0911
0912 return dma64_getnextrxp(di, forceall);
0913 }
0914
0915
0916
0917
0918
0919
0920
0921
0922
0923
0924
0925 int dma_rx(struct dma_pub *pub, struct sk_buff_head *skb_list)
0926 {
0927 struct dma_info *di = container_of(pub, struct dma_info, dma);
0928 struct sk_buff_head dma_frames;
0929 struct sk_buff *p, *next;
0930 uint len;
0931 uint pkt_len;
0932 int resid = 0;
0933 int pktcnt = 1;
0934
0935 skb_queue_head_init(&dma_frames);
0936 next_frame:
0937 p = _dma_getnextrxp(di, false);
0938 if (p == NULL)
0939 return 0;
0940
0941 len = le16_to_cpu(*(__le16 *) (p->data));
0942 brcms_dbg_dma(di->core, "%s: dma_rx len %d\n", di->name, len);
0943 dma_spin_for_len(len, p);
0944
0945
0946 pkt_len = min((di->rxoffset + len), di->rxbufsize);
0947 __skb_trim(p, pkt_len);
0948 skb_queue_tail(&dma_frames, p);
0949 resid = len - (di->rxbufsize - di->rxoffset);
0950
0951
0952 if (resid > 0) {
0953 while ((resid > 0) && (p = _dma_getnextrxp(di, false))) {
0954 pkt_len = min_t(uint, resid, di->rxbufsize);
0955 __skb_trim(p, pkt_len);
0956 skb_queue_tail(&dma_frames, p);
0957 resid -= di->rxbufsize;
0958 pktcnt++;
0959 }
0960
0961 #ifdef DEBUG
0962 if (resid > 0) {
0963 uint cur;
0964 cur =
0965 B2I(((bcma_read32(di->core,
0966 DMA64RXREGOFFS(di, status0)) &
0967 D64_RS0_CD_MASK) - di->rcvptrbase) &
0968 D64_RS0_CD_MASK, struct dma64desc);
0969 brcms_dbg_dma(di->core,
0970 "rxin %d rxout %d, hw_curr %d\n",
0971 di->rxin, di->rxout, cur);
0972 }
0973 #endif
0974
0975 if ((di->dma.dmactrlflags & DMA_CTRL_RXMULTI) == 0) {
0976 brcms_dbg_dma(di->core, "%s: bad frame length (%d)\n",
0977 di->name, len);
0978 skb_queue_walk_safe(&dma_frames, p, next) {
0979 skb_unlink(p, &dma_frames);
0980 brcmu_pkt_buf_free_skb(p);
0981 }
0982 di->dma.rxgiants++;
0983 pktcnt = 1;
0984 goto next_frame;
0985 }
0986 }
0987
0988 skb_queue_splice_tail(&dma_frames, skb_list);
0989 return pktcnt;
0990 }
0991
0992 static bool dma64_rxidle(struct dma_info *di)
0993 {
0994 brcms_dbg_dma(di->core, "%s:\n", di->name);
0995
0996 if (di->nrxd == 0)
0997 return true;
0998
0999 return ((bcma_read32(di->core,
1000 DMA64RXREGOFFS(di, status0)) & D64_RS0_CD_MASK) ==
1001 (bcma_read32(di->core, DMA64RXREGOFFS(di, ptr)) &
1002 D64_RS0_CD_MASK));
1003 }
1004
1005 static bool dma64_txidle(struct dma_info *di)
1006 {
1007 if (di->ntxd == 0)
1008 return true;
1009
1010 return ((bcma_read32(di->core,
1011 DMA64TXREGOFFS(di, status0)) & D64_XS0_CD_MASK) ==
1012 (bcma_read32(di->core, DMA64TXREGOFFS(di, ptr)) &
1013 D64_XS0_CD_MASK));
1014 }
1015
1016
1017
1018
1019
1020
1021
1022
1023 bool dma_rxfill(struct dma_pub *pub)
1024 {
1025 struct dma_info *di = container_of(pub, struct dma_info, dma);
1026 struct sk_buff *p;
1027 u16 rxin, rxout;
1028 u32 flags = 0;
1029 uint n;
1030 uint i;
1031 dma_addr_t pa;
1032 uint extra_offset = 0;
1033 bool ring_empty;
1034
1035 ring_empty = false;
1036
1037
1038
1039
1040
1041
1042
1043 rxin = di->rxin;
1044 rxout = di->rxout;
1045
1046 n = di->nrxpost - nrxdactive(di, rxin, rxout);
1047
1048 brcms_dbg_dma(di->core, "%s: post %d\n", di->name, n);
1049
1050 if (di->rxbufsize > BCMEXTRAHDROOM)
1051 extra_offset = di->rxextrahdrroom;
1052
1053 for (i = 0; i < n; i++) {
1054
1055
1056
1057
1058 p = brcmu_pkt_buf_get_skb(di->rxbufsize + extra_offset);
1059
1060 if (p == NULL) {
1061 brcms_dbg_dma(di->core, "%s: out of rxbufs\n",
1062 di->name);
1063 if (i == 0 && dma64_rxidle(di)) {
1064 brcms_dbg_dma(di->core, "%s: ring is empty !\n",
1065 di->name);
1066 ring_empty = true;
1067 }
1068 di->dma.rxnobuf++;
1069 break;
1070 }
1071
1072 if (extra_offset)
1073 skb_pull(p, extra_offset);
1074
1075
1076
1077
1078 *(u32 *) (p->data) = 0;
1079
1080 pa = dma_map_single(di->dmadev, p->data, di->rxbufsize,
1081 DMA_FROM_DEVICE);
1082 if (dma_mapping_error(di->dmadev, pa)) {
1083 brcmu_pkt_buf_free_skb(p);
1084 return false;
1085 }
1086
1087
1088 di->rxp[rxout] = p;
1089
1090
1091 flags = 0;
1092 if (rxout == (di->nrxd - 1))
1093 flags = D64_CTRL1_EOT;
1094
1095 dma64_dd_upd(di, di->rxd64, pa, rxout, &flags,
1096 di->rxbufsize);
1097 rxout = nextrxd(di, rxout);
1098 }
1099
1100 di->rxout = rxout;
1101
1102
1103 bcma_write32(di->core, DMA64RXREGOFFS(di, ptr),
1104 di->rcvptrbase + I2B(rxout, struct dma64desc));
1105
1106 return ring_empty;
1107 }
1108
1109 void dma_rxreclaim(struct dma_pub *pub)
1110 {
1111 struct dma_info *di = container_of(pub, struct dma_info, dma);
1112 struct sk_buff *p;
1113
1114 brcms_dbg_dma(di->core, "%s:\n", di->name);
1115
1116 while ((p = _dma_getnextrxp(di, true)))
1117 brcmu_pkt_buf_free_skb(p);
1118 }
1119
1120 void dma_counterreset(struct dma_pub *pub)
1121 {
1122
1123 pub->rxgiants = 0;
1124 pub->rxnobuf = 0;
1125 pub->txnobuf = 0;
1126 }
1127
1128
1129 unsigned long dma_getvar(struct dma_pub *pub, const char *name)
1130 {
1131 struct dma_info *di = container_of(pub, struct dma_info, dma);
1132
1133 if (!strcmp(name, "&txavail"))
1134 return (unsigned long)&(di->dma.txavail);
1135 return 0;
1136 }
1137
1138
1139
1140 void dma_txinit(struct dma_pub *pub)
1141 {
1142 struct dma_info *di = container_of(pub, struct dma_info, dma);
1143 u32 control = D64_XC_XE;
1144
1145 brcms_dbg_dma(di->core, "%s:\n", di->name);
1146
1147 if (di->ntxd == 0)
1148 return;
1149
1150 di->txin = di->txout = 0;
1151 di->dma.txavail = di->ntxd - 1;
1152
1153
1154 memset(di->txd64, '\0', (di->ntxd * sizeof(struct dma64desc)));
1155
1156
1157
1158
1159 if (!di->aligndesc_4k)
1160 _dma_ddtable_init(di, DMA_TX, di->txdpa);
1161
1162 if ((di->dma.dmactrlflags & DMA_CTRL_PEN) == 0)
1163 control |= D64_XC_PD;
1164 bcma_set32(di->core, DMA64TXREGOFFS(di, control), control);
1165
1166
1167
1168
1169 if (di->aligndesc_4k)
1170 _dma_ddtable_init(di, DMA_TX, di->txdpa);
1171 }
1172
1173 void dma_txsuspend(struct dma_pub *pub)
1174 {
1175 struct dma_info *di = container_of(pub, struct dma_info, dma);
1176
1177 brcms_dbg_dma(di->core, "%s:\n", di->name);
1178
1179 if (di->ntxd == 0)
1180 return;
1181
1182 bcma_set32(di->core, DMA64TXREGOFFS(di, control), D64_XC_SE);
1183 }
1184
1185 void dma_txresume(struct dma_pub *pub)
1186 {
1187 struct dma_info *di = container_of(pub, struct dma_info, dma);
1188
1189 brcms_dbg_dma(di->core, "%s:\n", di->name);
1190
1191 if (di->ntxd == 0)
1192 return;
1193
1194 bcma_mask32(di->core, DMA64TXREGOFFS(di, control), ~D64_XC_SE);
1195 }
1196
1197 bool dma_txsuspended(struct dma_pub *pub)
1198 {
1199 struct dma_info *di = container_of(pub, struct dma_info, dma);
1200
1201 return (di->ntxd == 0) ||
1202 ((bcma_read32(di->core,
1203 DMA64TXREGOFFS(di, control)) & D64_XC_SE) ==
1204 D64_XC_SE);
1205 }
1206
1207 void dma_txreclaim(struct dma_pub *pub, enum txd_range range)
1208 {
1209 struct dma_info *di = container_of(pub, struct dma_info, dma);
1210 struct sk_buff *p;
1211
1212 brcms_dbg_dma(di->core, "%s: %s\n",
1213 di->name,
1214 range == DMA_RANGE_ALL ? "all" :
1215 range == DMA_RANGE_TRANSMITTED ? "transmitted" :
1216 "transferred");
1217
1218 if (di->txin == di->txout)
1219 return;
1220
1221 while ((p = dma_getnexttxp(pub, range))) {
1222
1223 if (!(di->dma.dmactrlflags & DMA_CTRL_UNFRAMED))
1224 brcmu_pkt_buf_free_skb(p);
1225 }
1226 }
1227
1228 bool dma_txreset(struct dma_pub *pub)
1229 {
1230 struct dma_info *di = container_of(pub, struct dma_info, dma);
1231 u32 status;
1232
1233 if (di->ntxd == 0)
1234 return true;
1235
1236
1237 bcma_write32(di->core, DMA64TXREGOFFS(di, control), D64_XC_SE);
1238 SPINWAIT(((status =
1239 (bcma_read32(di->core, DMA64TXREGOFFS(di, status0)) &
1240 D64_XS0_XS_MASK)) != D64_XS0_XS_DISABLED) &&
1241 (status != D64_XS0_XS_IDLE) && (status != D64_XS0_XS_STOPPED),
1242 10000);
1243
1244 bcma_write32(di->core, DMA64TXREGOFFS(di, control), 0);
1245 SPINWAIT(((status =
1246 (bcma_read32(di->core, DMA64TXREGOFFS(di, status0)) &
1247 D64_XS0_XS_MASK)) != D64_XS0_XS_DISABLED), 10000);
1248
1249
1250 udelay(300);
1251
1252 return status == D64_XS0_XS_DISABLED;
1253 }
1254
1255 bool dma_rxreset(struct dma_pub *pub)
1256 {
1257 struct dma_info *di = container_of(pub, struct dma_info, dma);
1258 u32 status;
1259
1260 if (di->nrxd == 0)
1261 return true;
1262
1263 bcma_write32(di->core, DMA64RXREGOFFS(di, control), 0);
1264 SPINWAIT(((status =
1265 (bcma_read32(di->core, DMA64RXREGOFFS(di, status0)) &
1266 D64_RS0_RS_MASK)) != D64_RS0_RS_DISABLED), 10000);
1267
1268 return status == D64_RS0_RS_DISABLED;
1269 }
1270
1271 static void dma_txenq(struct dma_info *di, struct sk_buff *p)
1272 {
1273 unsigned char *data;
1274 uint len;
1275 u16 txout;
1276 u32 flags = 0;
1277 dma_addr_t pa;
1278
1279 txout = di->txout;
1280
1281 if (WARN_ON(nexttxd(di, txout) == di->txin))
1282 return;
1283
1284
1285
1286
1287 data = p->data;
1288 len = p->len;
1289
1290
1291 pa = dma_map_single(di->dmadev, data, len, DMA_TO_DEVICE);
1292
1293 if (dma_mapping_error(di->dmadev, pa)) {
1294 brcmu_pkt_buf_free_skb(p);
1295 return;
1296 }
1297
1298
1299
1300
1301
1302 flags = D64_CTRL1_SOF | D64_CTRL1_IOC | D64_CTRL1_EOF;
1303 if (txout == (di->ntxd - 1))
1304 flags |= D64_CTRL1_EOT;
1305
1306 dma64_dd_upd(di, di->txd64, pa, txout, &flags, len);
1307
1308 txout = nexttxd(di, txout);
1309
1310
1311 di->txp[prevtxd(di, txout)] = p;
1312
1313
1314 di->txout = txout;
1315 }
1316
1317 static void ampdu_finalize(struct dma_info *di)
1318 {
1319 struct brcms_ampdu_session *session = &di->ampdu_session;
1320 struct sk_buff *p;
1321
1322 trace_brcms_ampdu_session(&session->wlc->hw->d11core->dev,
1323 session->max_ampdu_len,
1324 session->max_ampdu_frames,
1325 session->ampdu_len,
1326 skb_queue_len(&session->skb_list),
1327 session->dma_len);
1328
1329 if (WARN_ON(skb_queue_empty(&session->skb_list)))
1330 return;
1331
1332 brcms_c_ampdu_finalize(session);
1333
1334 while (!skb_queue_empty(&session->skb_list)) {
1335 p = skb_dequeue(&session->skb_list);
1336 dma_txenq(di, p);
1337 }
1338
1339 bcma_write32(di->core, DMA64TXREGOFFS(di, ptr),
1340 di->xmtptrbase + I2B(di->txout, struct dma64desc));
1341 brcms_c_ampdu_reset_session(session, session->wlc);
1342 }
1343
1344 static void prep_ampdu_frame(struct dma_info *di, struct sk_buff *p)
1345 {
1346 struct brcms_ampdu_session *session = &di->ampdu_session;
1347 int ret;
1348
1349 ret = brcms_c_ampdu_add_frame(session, p);
1350 if (ret == -ENOSPC) {
1351
1352
1353
1354
1355 ampdu_finalize(di);
1356 ret = brcms_c_ampdu_add_frame(session, p);
1357 }
1358
1359 WARN_ON(ret);
1360 }
1361
1362
1363 static void dma_update_txavail(struct dma_info *di)
1364 {
1365
1366
1367
1368
1369 di->dma.txavail = di->ntxd - ntxdactive(di, di->txin, di->txout) -
1370 skb_queue_len(&di->ampdu_session.skb_list) - 1;
1371 }
1372
1373
1374
1375
1376
1377
1378
1379 int dma_txfast(struct brcms_c_info *wlc, struct dma_pub *pub,
1380 struct sk_buff *p)
1381 {
1382 struct dma_info *di = container_of(pub, struct dma_info, dma);
1383 struct brcms_ampdu_session *session = &di->ampdu_session;
1384 struct ieee80211_tx_info *tx_info;
1385 bool is_ampdu;
1386
1387
1388 if (p->len == 0)
1389 return 0;
1390
1391
1392 if (di->dma.txavail == 0 || nexttxd(di, di->txout) == di->txin)
1393 goto outoftxd;
1394
1395 tx_info = IEEE80211_SKB_CB(p);
1396 is_ampdu = tx_info->flags & IEEE80211_TX_CTL_AMPDU;
1397 if (is_ampdu)
1398 prep_ampdu_frame(di, p);
1399 else
1400 dma_txenq(di, p);
1401
1402
1403 dma_update_txavail(di);
1404
1405
1406 if (is_ampdu) {
1407
1408
1409
1410
1411
1412 if (skb_queue_len(&session->skb_list) == session->max_ampdu_frames ||
1413 di->dma.txavail == 0 || dma64_txidle(di))
1414 ampdu_finalize(di);
1415 } else {
1416 bcma_write32(di->core, DMA64TXREGOFFS(di, ptr),
1417 di->xmtptrbase + I2B(di->txout, struct dma64desc));
1418 }
1419
1420 return 0;
1421
1422 outoftxd:
1423 brcms_dbg_dma(di->core, "%s: out of txds !!!\n", di->name);
1424 brcmu_pkt_buf_free_skb(p);
1425 di->dma.txavail = 0;
1426 di->dma.txnobuf++;
1427 return -ENOSPC;
1428 }
1429
1430 void dma_txflush(struct dma_pub *pub)
1431 {
1432 struct dma_info *di = container_of(pub, struct dma_info, dma);
1433 struct brcms_ampdu_session *session = &di->ampdu_session;
1434
1435 if (!skb_queue_empty(&session->skb_list))
1436 ampdu_finalize(di);
1437 }
1438
1439 int dma_txpending(struct dma_pub *pub)
1440 {
1441 struct dma_info *di = container_of(pub, struct dma_info, dma);
1442 return ntxdactive(di, di->txin, di->txout);
1443 }
1444
1445
1446
1447
1448
1449 void dma_kick_tx(struct dma_pub *pub)
1450 {
1451 struct dma_info *di = container_of(pub, struct dma_info, dma);
1452 struct brcms_ampdu_session *session = &di->ampdu_session;
1453
1454 if (!skb_queue_empty(&session->skb_list) && dma64_txidle(di))
1455 ampdu_finalize(di);
1456 }
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468 struct sk_buff *dma_getnexttxp(struct dma_pub *pub, enum txd_range range)
1469 {
1470 struct dma_info *di = container_of(pub, struct dma_info, dma);
1471 u16 start, end, i;
1472 u16 active_desc;
1473 struct sk_buff *txp;
1474
1475 brcms_dbg_dma(di->core, "%s: %s\n",
1476 di->name,
1477 range == DMA_RANGE_ALL ? "all" :
1478 range == DMA_RANGE_TRANSMITTED ? "transmitted" :
1479 "transferred");
1480
1481 if (di->ntxd == 0)
1482 return NULL;
1483
1484 txp = NULL;
1485
1486 start = di->txin;
1487 if (range == DMA_RANGE_ALL)
1488 end = di->txout;
1489 else {
1490 end = (u16) (B2I(((bcma_read32(di->core,
1491 DMA64TXREGOFFS(di, status0)) &
1492 D64_XS0_CD_MASK) - di->xmtptrbase) &
1493 D64_XS0_CD_MASK, struct dma64desc));
1494
1495 if (range == DMA_RANGE_TRANSFERED) {
1496 active_desc =
1497 (u16)(bcma_read32(di->core,
1498 DMA64TXREGOFFS(di, status1)) &
1499 D64_XS1_AD_MASK);
1500 active_desc =
1501 (active_desc - di->xmtptrbase) & D64_XS0_CD_MASK;
1502 active_desc = B2I(active_desc, struct dma64desc);
1503 if (end != active_desc)
1504 end = prevtxd(di, active_desc);
1505 }
1506 }
1507
1508 if ((start == 0) && (end > di->txout))
1509 goto bogus;
1510
1511 for (i = start; i != end && !txp; i = nexttxd(di, i)) {
1512 dma_addr_t pa;
1513 uint size;
1514
1515 pa = le32_to_cpu(di->txd64[i].addrlow) - di->dataoffsetlow;
1516
1517 size =
1518 (le32_to_cpu(di->txd64[i].ctrl2) &
1519 D64_CTRL2_BC_MASK);
1520
1521 di->txd64[i].addrlow = cpu_to_le32(0xdeadbeef);
1522 di->txd64[i].addrhigh = cpu_to_le32(0xdeadbeef);
1523
1524 txp = di->txp[i];
1525 di->txp[i] = NULL;
1526
1527 dma_unmap_single(di->dmadev, pa, size, DMA_TO_DEVICE);
1528 }
1529
1530 di->txin = i;
1531
1532
1533 dma_update_txavail(di);
1534
1535 return txp;
1536
1537 bogus:
1538 brcms_dbg_dma(di->core, "bogus curr: start %d end %d txout %d\n",
1539 start, end, di->txout);
1540 return NULL;
1541 }
1542
1543
1544
1545
1546
1547
1548
1549 void dma_walk_packets(struct dma_pub *dmah, void (*callback_fnc)
1550 (void *pkt, void *arg_a), void *arg_a)
1551 {
1552 struct dma_info *di = container_of(dmah, struct dma_info, dma);
1553 uint i = di->txin;
1554 uint end = di->txout;
1555 struct sk_buff *skb;
1556 struct ieee80211_tx_info *tx_info;
1557
1558 while (i != end) {
1559 skb = di->txp[i];
1560 if (skb != NULL) {
1561 tx_info = (struct ieee80211_tx_info *)skb->cb;
1562 (callback_fnc)(tx_info, arg_a);
1563 }
1564 i = nexttxd(di, i);
1565 }
1566 }