0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035
0036
0037
0038
0039
0040
0041
0042
0043 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
0044
0045 #include <linux/module.h>
0046 #include <linux/delay.h>
0047 #include <linux/dma-mapping.h>
0048 #include <linux/hardirq.h>
0049 #include <linux/if.h>
0050 #include <linux/io.h>
0051 #include <linux/netdevice.h>
0052 #include <linux/cache.h>
0053 #include <linux/ethtool.h>
0054 #include <linux/uaccess.h>
0055 #include <linux/slab.h>
0056 #include <linux/etherdevice.h>
0057 #include <linux/nl80211.h>
0058
0059 #include <net/cfg80211.h>
0060 #include <net/ieee80211_radiotap.h>
0061
0062 #include <asm/unaligned.h>
0063
0064 #include <net/mac80211.h>
0065 #include "base.h"
0066 #include "reg.h"
0067 #include "debug.h"
0068 #include "ani.h"
0069 #include "ath5k.h"
0070 #include "../regd.h"
0071
0072 #define CREATE_TRACE_POINTS
0073 #include "trace.h"
0074
0075 bool ath5k_modparam_nohwcrypt;
0076 module_param_named(nohwcrypt, ath5k_modparam_nohwcrypt, bool, 0444);
0077 MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption.");
0078
0079 static bool modparam_fastchanswitch;
0080 module_param_named(fastchanswitch, modparam_fastchanswitch, bool, 0444);
0081 MODULE_PARM_DESC(fastchanswitch, "Enable fast channel switching for AR2413/AR5413 radios.");
0082
0083 static bool ath5k_modparam_no_hw_rfkill_switch;
0084 module_param_named(no_hw_rfkill_switch, ath5k_modparam_no_hw_rfkill_switch,
0085 bool, 0444);
0086 MODULE_PARM_DESC(no_hw_rfkill_switch, "Ignore the GPIO RFKill switch state");
0087
0088
0089
0090 MODULE_AUTHOR("Jiri Slaby");
0091 MODULE_AUTHOR("Nick Kossifidis");
0092 MODULE_DESCRIPTION("Support for 5xxx series of Atheros 802.11 wireless LAN cards.");
0093 MODULE_LICENSE("Dual BSD/GPL");
0094
0095 static int ath5k_init(struct ieee80211_hw *hw);
0096 static int ath5k_reset(struct ath5k_hw *ah, struct ieee80211_channel *chan,
0097 bool skip_pcu);
0098
0099
0100 static const struct ath5k_srev_name srev_names[] = {
0101 #ifdef CONFIG_ATH5K_AHB
0102 { "5312", AR5K_VERSION_MAC, AR5K_SREV_AR5312_R2 },
0103 { "5312", AR5K_VERSION_MAC, AR5K_SREV_AR5312_R7 },
0104 { "2313", AR5K_VERSION_MAC, AR5K_SREV_AR2313_R8 },
0105 { "2315", AR5K_VERSION_MAC, AR5K_SREV_AR2315_R6 },
0106 { "2315", AR5K_VERSION_MAC, AR5K_SREV_AR2315_R7 },
0107 { "2317", AR5K_VERSION_MAC, AR5K_SREV_AR2317_R1 },
0108 { "2317", AR5K_VERSION_MAC, AR5K_SREV_AR2317_R2 },
0109 #else
0110 { "5210", AR5K_VERSION_MAC, AR5K_SREV_AR5210 },
0111 { "5311", AR5K_VERSION_MAC, AR5K_SREV_AR5311 },
0112 { "5311A", AR5K_VERSION_MAC, AR5K_SREV_AR5311A },
0113 { "5311B", AR5K_VERSION_MAC, AR5K_SREV_AR5311B },
0114 { "5211", AR5K_VERSION_MAC, AR5K_SREV_AR5211 },
0115 { "5212", AR5K_VERSION_MAC, AR5K_SREV_AR5212 },
0116 { "5213", AR5K_VERSION_MAC, AR5K_SREV_AR5213 },
0117 { "5213A", AR5K_VERSION_MAC, AR5K_SREV_AR5213A },
0118 { "2413", AR5K_VERSION_MAC, AR5K_SREV_AR2413 },
0119 { "2414", AR5K_VERSION_MAC, AR5K_SREV_AR2414 },
0120 { "5424", AR5K_VERSION_MAC, AR5K_SREV_AR5424 },
0121 { "5413", AR5K_VERSION_MAC, AR5K_SREV_AR5413 },
0122 { "5414", AR5K_VERSION_MAC, AR5K_SREV_AR5414 },
0123 { "2415", AR5K_VERSION_MAC, AR5K_SREV_AR2415 },
0124 { "5416", AR5K_VERSION_MAC, AR5K_SREV_AR5416 },
0125 { "5418", AR5K_VERSION_MAC, AR5K_SREV_AR5418 },
0126 { "2425", AR5K_VERSION_MAC, AR5K_SREV_AR2425 },
0127 { "2417", AR5K_VERSION_MAC, AR5K_SREV_AR2417 },
0128 #endif
0129 { "xxxxx", AR5K_VERSION_MAC, AR5K_SREV_UNKNOWN },
0130 { "5110", AR5K_VERSION_RAD, AR5K_SREV_RAD_5110 },
0131 { "5111", AR5K_VERSION_RAD, AR5K_SREV_RAD_5111 },
0132 { "5111A", AR5K_VERSION_RAD, AR5K_SREV_RAD_5111A },
0133 { "2111", AR5K_VERSION_RAD, AR5K_SREV_RAD_2111 },
0134 { "5112", AR5K_VERSION_RAD, AR5K_SREV_RAD_5112 },
0135 { "5112A", AR5K_VERSION_RAD, AR5K_SREV_RAD_5112A },
0136 { "5112B", AR5K_VERSION_RAD, AR5K_SREV_RAD_5112B },
0137 { "2112", AR5K_VERSION_RAD, AR5K_SREV_RAD_2112 },
0138 { "2112A", AR5K_VERSION_RAD, AR5K_SREV_RAD_2112A },
0139 { "2112B", AR5K_VERSION_RAD, AR5K_SREV_RAD_2112B },
0140 { "2413", AR5K_VERSION_RAD, AR5K_SREV_RAD_2413 },
0141 { "5413", AR5K_VERSION_RAD, AR5K_SREV_RAD_5413 },
0142 { "5424", AR5K_VERSION_RAD, AR5K_SREV_RAD_5424 },
0143 { "5133", AR5K_VERSION_RAD, AR5K_SREV_RAD_5133 },
0144 #ifdef CONFIG_ATH5K_AHB
0145 { "2316", AR5K_VERSION_RAD, AR5K_SREV_RAD_2316 },
0146 { "2317", AR5K_VERSION_RAD, AR5K_SREV_RAD_2317 },
0147 #endif
0148 { "xxxxx", AR5K_VERSION_RAD, AR5K_SREV_UNKNOWN },
0149 };
0150
0151 static const struct ieee80211_rate ath5k_rates[] = {
0152 { .bitrate = 10,
0153 .hw_value = ATH5K_RATE_CODE_1M, },
0154 { .bitrate = 20,
0155 .hw_value = ATH5K_RATE_CODE_2M,
0156 .hw_value_short = ATH5K_RATE_CODE_2M | AR5K_SET_SHORT_PREAMBLE,
0157 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
0158 { .bitrate = 55,
0159 .hw_value = ATH5K_RATE_CODE_5_5M,
0160 .hw_value_short = ATH5K_RATE_CODE_5_5M | AR5K_SET_SHORT_PREAMBLE,
0161 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
0162 { .bitrate = 110,
0163 .hw_value = ATH5K_RATE_CODE_11M,
0164 .hw_value_short = ATH5K_RATE_CODE_11M | AR5K_SET_SHORT_PREAMBLE,
0165 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
0166 { .bitrate = 60,
0167 .hw_value = ATH5K_RATE_CODE_6M,
0168 .flags = IEEE80211_RATE_SUPPORTS_5MHZ |
0169 IEEE80211_RATE_SUPPORTS_10MHZ },
0170 { .bitrate = 90,
0171 .hw_value = ATH5K_RATE_CODE_9M,
0172 .flags = IEEE80211_RATE_SUPPORTS_5MHZ |
0173 IEEE80211_RATE_SUPPORTS_10MHZ },
0174 { .bitrate = 120,
0175 .hw_value = ATH5K_RATE_CODE_12M,
0176 .flags = IEEE80211_RATE_SUPPORTS_5MHZ |
0177 IEEE80211_RATE_SUPPORTS_10MHZ },
0178 { .bitrate = 180,
0179 .hw_value = ATH5K_RATE_CODE_18M,
0180 .flags = IEEE80211_RATE_SUPPORTS_5MHZ |
0181 IEEE80211_RATE_SUPPORTS_10MHZ },
0182 { .bitrate = 240,
0183 .hw_value = ATH5K_RATE_CODE_24M,
0184 .flags = IEEE80211_RATE_SUPPORTS_5MHZ |
0185 IEEE80211_RATE_SUPPORTS_10MHZ },
0186 { .bitrate = 360,
0187 .hw_value = ATH5K_RATE_CODE_36M,
0188 .flags = IEEE80211_RATE_SUPPORTS_5MHZ |
0189 IEEE80211_RATE_SUPPORTS_10MHZ },
0190 { .bitrate = 480,
0191 .hw_value = ATH5K_RATE_CODE_48M,
0192 .flags = IEEE80211_RATE_SUPPORTS_5MHZ |
0193 IEEE80211_RATE_SUPPORTS_10MHZ },
0194 { .bitrate = 540,
0195 .hw_value = ATH5K_RATE_CODE_54M,
0196 .flags = IEEE80211_RATE_SUPPORTS_5MHZ |
0197 IEEE80211_RATE_SUPPORTS_10MHZ },
0198 };
0199
0200 static inline u64 ath5k_extend_tsf(struct ath5k_hw *ah, u32 rstamp)
0201 {
0202 u64 tsf = ath5k_hw_get_tsf64(ah);
0203
0204 if ((tsf & 0x7fff) < rstamp)
0205 tsf -= 0x8000;
0206
0207 return (tsf & ~0x7fff) | rstamp;
0208 }
0209
0210 const char *
0211 ath5k_chip_name(enum ath5k_srev_type type, u_int16_t val)
0212 {
0213 const char *name = "xxxxx";
0214 unsigned int i;
0215
0216 for (i = 0; i < ARRAY_SIZE(srev_names); i++) {
0217 if (srev_names[i].sr_type != type)
0218 continue;
0219
0220 if ((val & 0xf0) == srev_names[i].sr_val)
0221 name = srev_names[i].sr_name;
0222
0223 if ((val & 0xff) == srev_names[i].sr_val) {
0224 name = srev_names[i].sr_name;
0225 break;
0226 }
0227 }
0228
0229 return name;
0230 }
0231 static unsigned int ath5k_ioread32(void *hw_priv, u32 reg_offset)
0232 {
0233 struct ath5k_hw *ah = (struct ath5k_hw *) hw_priv;
0234 return ath5k_hw_reg_read(ah, reg_offset);
0235 }
0236
0237 static void ath5k_iowrite32(void *hw_priv, u32 val, u32 reg_offset)
0238 {
0239 struct ath5k_hw *ah = (struct ath5k_hw *) hw_priv;
0240 ath5k_hw_reg_write(ah, val, reg_offset);
0241 }
0242
0243 static const struct ath_ops ath5k_common_ops = {
0244 .read = ath5k_ioread32,
0245 .write = ath5k_iowrite32,
0246 };
0247
0248
0249
0250
0251
0252 static void ath5k_reg_notifier(struct wiphy *wiphy,
0253 struct regulatory_request *request)
0254 {
0255 struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
0256 struct ath5k_hw *ah = hw->priv;
0257 struct ath_regulatory *regulatory = ath5k_hw_regulatory(ah);
0258
0259 ath_reg_notifier_apply(wiphy, request, regulatory);
0260 }
0261
0262
0263
0264
0265
0266
0267
0268
0269 #ifdef CONFIG_ATH5K_TEST_CHANNELS
0270 static bool ath5k_is_standard_channel(short chan, enum nl80211_band band)
0271 {
0272 return true;
0273 }
0274
0275 #else
0276 static bool ath5k_is_standard_channel(short chan, enum nl80211_band band)
0277 {
0278 if (band == NL80211_BAND_2GHZ && chan <= 14)
0279 return true;
0280
0281 return
0282 (((chan & 3) == 0 && chan >= 36 && chan <= 64) ||
0283
0284 ((chan & 3) == 0 && chan >= 100 && chan <= 140) ||
0285
0286 ((chan & 3) == 1 && chan >= 149 && chan <= 165) ||
0287
0288 (chan == 8 || chan == 12 || chan == 16) ||
0289
0290 (chan == 184 || chan == 188 || chan == 192 || chan == 196));
0291 }
0292 #endif
0293
0294 static unsigned int
0295 ath5k_setup_channels(struct ath5k_hw *ah, struct ieee80211_channel *channels,
0296 unsigned int mode, unsigned int max)
0297 {
0298 unsigned int count, size, freq, ch;
0299 enum nl80211_band band;
0300
0301 switch (mode) {
0302 case AR5K_MODE_11A:
0303
0304 size = 220;
0305 band = NL80211_BAND_5GHZ;
0306 break;
0307 case AR5K_MODE_11B:
0308 case AR5K_MODE_11G:
0309 size = 26;
0310 band = NL80211_BAND_2GHZ;
0311 break;
0312 default:
0313 ATH5K_WARN(ah, "bad mode, not copying channels\n");
0314 return 0;
0315 }
0316
0317 count = 0;
0318 for (ch = 1; ch <= size && count < max; ch++) {
0319 freq = ieee80211_channel_to_frequency(ch, band);
0320
0321 if (freq == 0)
0322 continue;
0323
0324
0325 channels[count].center_freq = freq;
0326 channels[count].band = band;
0327 channels[count].hw_value = mode;
0328
0329
0330 if (!ath5k_channel_ok(ah, &channels[count]))
0331 continue;
0332
0333 if (!ath5k_is_standard_channel(ch, band))
0334 continue;
0335
0336 count++;
0337 }
0338
0339 return count;
0340 }
0341
0342 static void
0343 ath5k_setup_rate_idx(struct ath5k_hw *ah, struct ieee80211_supported_band *b)
0344 {
0345 u8 i;
0346
0347 for (i = 0; i < AR5K_MAX_RATES; i++)
0348 ah->rate_idx[b->band][i] = -1;
0349
0350 for (i = 0; i < b->n_bitrates; i++) {
0351 ah->rate_idx[b->band][b->bitrates[i].hw_value] = i;
0352 if (b->bitrates[i].hw_value_short)
0353 ah->rate_idx[b->band][b->bitrates[i].hw_value_short] = i;
0354 }
0355 }
0356
0357 static int
0358 ath5k_setup_bands(struct ieee80211_hw *hw)
0359 {
0360 struct ath5k_hw *ah = hw->priv;
0361 struct ieee80211_supported_band *sband;
0362 int max_c, count_c = 0;
0363 int i;
0364
0365 BUILD_BUG_ON(ARRAY_SIZE(ah->sbands) < NUM_NL80211_BANDS);
0366 max_c = ARRAY_SIZE(ah->channels);
0367
0368
0369 sband = &ah->sbands[NL80211_BAND_2GHZ];
0370 sband->band = NL80211_BAND_2GHZ;
0371 sband->bitrates = &ah->rates[NL80211_BAND_2GHZ][0];
0372
0373 if (test_bit(AR5K_MODE_11G, ah->ah_capabilities.cap_mode)) {
0374
0375 memcpy(sband->bitrates, &ath5k_rates[0],
0376 sizeof(struct ieee80211_rate) * 12);
0377 sband->n_bitrates = 12;
0378
0379 sband->channels = ah->channels;
0380 sband->n_channels = ath5k_setup_channels(ah, sband->channels,
0381 AR5K_MODE_11G, max_c);
0382
0383 hw->wiphy->bands[NL80211_BAND_2GHZ] = sband;
0384 count_c = sband->n_channels;
0385 max_c -= count_c;
0386 } else if (test_bit(AR5K_MODE_11B, ah->ah_capabilities.cap_mode)) {
0387
0388 memcpy(sband->bitrates, &ath5k_rates[0],
0389 sizeof(struct ieee80211_rate) * 4);
0390 sband->n_bitrates = 4;
0391
0392
0393
0394
0395
0396 if (ah->ah_version == AR5K_AR5211) {
0397 for (i = 0; i < 4; i++) {
0398 sband->bitrates[i].hw_value =
0399 sband->bitrates[i].hw_value & 0xF;
0400 sband->bitrates[i].hw_value_short =
0401 sband->bitrates[i].hw_value_short & 0xF;
0402 }
0403 }
0404
0405 sband->channels = ah->channels;
0406 sband->n_channels = ath5k_setup_channels(ah, sband->channels,
0407 AR5K_MODE_11B, max_c);
0408
0409 hw->wiphy->bands[NL80211_BAND_2GHZ] = sband;
0410 count_c = sband->n_channels;
0411 max_c -= count_c;
0412 }
0413 ath5k_setup_rate_idx(ah, sband);
0414
0415
0416 if (test_bit(AR5K_MODE_11A, ah->ah_capabilities.cap_mode)) {
0417 sband = &ah->sbands[NL80211_BAND_5GHZ];
0418 sband->band = NL80211_BAND_5GHZ;
0419 sband->bitrates = &ah->rates[NL80211_BAND_5GHZ][0];
0420
0421 memcpy(sband->bitrates, &ath5k_rates[4],
0422 sizeof(struct ieee80211_rate) * 8);
0423 sband->n_bitrates = 8;
0424
0425 sband->channels = &ah->channels[count_c];
0426 sband->n_channels = ath5k_setup_channels(ah, sband->channels,
0427 AR5K_MODE_11A, max_c);
0428
0429 hw->wiphy->bands[NL80211_BAND_5GHZ] = sband;
0430 }
0431 ath5k_setup_rate_idx(ah, sband);
0432
0433 ath5k_debug_dump_bands(ah);
0434
0435 return 0;
0436 }
0437
0438
0439
0440
0441
0442
0443
0444
0445 int
0446 ath5k_chan_set(struct ath5k_hw *ah, struct cfg80211_chan_def *chandef)
0447 {
0448 ATH5K_DBG(ah, ATH5K_DEBUG_RESET,
0449 "channel set, resetting (%u -> %u MHz)\n",
0450 ah->curchan->center_freq, chandef->chan->center_freq);
0451
0452 switch (chandef->width) {
0453 case NL80211_CHAN_WIDTH_20:
0454 case NL80211_CHAN_WIDTH_20_NOHT:
0455 ah->ah_bwmode = AR5K_BWMODE_DEFAULT;
0456 break;
0457 case NL80211_CHAN_WIDTH_5:
0458 ah->ah_bwmode = AR5K_BWMODE_5MHZ;
0459 break;
0460 case NL80211_CHAN_WIDTH_10:
0461 ah->ah_bwmode = AR5K_BWMODE_10MHZ;
0462 break;
0463 default:
0464 WARN_ON(1);
0465 return -EINVAL;
0466 }
0467
0468
0469
0470
0471
0472
0473
0474 return ath5k_reset(ah, chandef->chan, true);
0475 }
0476
0477 void ath5k_vif_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
0478 {
0479 struct ath5k_vif_iter_data *iter_data = data;
0480 int i;
0481 struct ath5k_vif *avf = (void *)vif->drv_priv;
0482
0483 if (iter_data->hw_macaddr)
0484 for (i = 0; i < ETH_ALEN; i++)
0485 iter_data->mask[i] &=
0486 ~(iter_data->hw_macaddr[i] ^ mac[i]);
0487
0488 if (!iter_data->found_active) {
0489 iter_data->found_active = true;
0490 memcpy(iter_data->active_mac, mac, ETH_ALEN);
0491 }
0492
0493 if (iter_data->need_set_hw_addr && iter_data->hw_macaddr)
0494 if (ether_addr_equal(iter_data->hw_macaddr, mac))
0495 iter_data->need_set_hw_addr = false;
0496
0497 if (!iter_data->any_assoc) {
0498 if (avf->assoc)
0499 iter_data->any_assoc = true;
0500 }
0501
0502
0503
0504
0505
0506
0507 if (avf->opmode == NL80211_IFTYPE_AP)
0508 iter_data->opmode = NL80211_IFTYPE_AP;
0509 else {
0510 if (avf->opmode == NL80211_IFTYPE_STATION)
0511 iter_data->n_stas++;
0512 if (iter_data->opmode == NL80211_IFTYPE_UNSPECIFIED)
0513 iter_data->opmode = avf->opmode;
0514 }
0515 }
0516
0517 void
0518 ath5k_update_bssid_mask_and_opmode(struct ath5k_hw *ah,
0519 struct ieee80211_vif *vif)
0520 {
0521 struct ath_common *common = ath5k_hw_common(ah);
0522 struct ath5k_vif_iter_data iter_data;
0523 u32 rfilt;
0524
0525
0526
0527
0528
0529 iter_data.hw_macaddr = common->macaddr;
0530 eth_broadcast_addr(iter_data.mask);
0531 iter_data.found_active = false;
0532 iter_data.need_set_hw_addr = true;
0533 iter_data.opmode = NL80211_IFTYPE_UNSPECIFIED;
0534 iter_data.n_stas = 0;
0535
0536 if (vif)
0537 ath5k_vif_iter(&iter_data, vif->addr, vif);
0538
0539
0540 ieee80211_iterate_active_interfaces_atomic(
0541 ah->hw, IEEE80211_IFACE_ITER_RESUME_ALL,
0542 ath5k_vif_iter, &iter_data);
0543 memcpy(ah->bssidmask, iter_data.mask, ETH_ALEN);
0544
0545 ah->opmode = iter_data.opmode;
0546 if (ah->opmode == NL80211_IFTYPE_UNSPECIFIED)
0547
0548 ah->opmode = NL80211_IFTYPE_STATION;
0549
0550 ath5k_hw_set_opmode(ah, ah->opmode);
0551 ATH5K_DBG(ah, ATH5K_DEBUG_MODE, "mode setup opmode %d (%s)\n",
0552 ah->opmode, ath_opmode_to_string(ah->opmode));
0553
0554 if (iter_data.need_set_hw_addr && iter_data.found_active)
0555 ath5k_hw_set_lladdr(ah, iter_data.active_mac);
0556
0557 if (ath5k_hw_hasbssidmask(ah))
0558 ath5k_hw_set_bssid_mask(ah, ah->bssidmask);
0559
0560
0561 if (iter_data.n_stas > 1) {
0562
0563
0564
0565
0566 ah->filter_flags |= AR5K_RX_FILTER_PROM;
0567 }
0568
0569 rfilt = ah->filter_flags;
0570 ath5k_hw_set_rx_filter(ah, rfilt);
0571 ATH5K_DBG(ah, ATH5K_DEBUG_MODE, "RX filter 0x%x\n", rfilt);
0572 }
0573
0574 static inline int
0575 ath5k_hw_to_driver_rix(struct ath5k_hw *ah, int hw_rix)
0576 {
0577 int rix;
0578
0579
0580 if (WARN(hw_rix < 0 || hw_rix >= AR5K_MAX_RATES,
0581 "hw_rix out of bounds: %x\n", hw_rix))
0582 return 0;
0583
0584 rix = ah->rate_idx[ah->curchan->band][hw_rix];
0585 if (WARN(rix < 0, "invalid hw_rix: %x\n", hw_rix))
0586 rix = 0;
0587
0588 return rix;
0589 }
0590
0591
0592
0593
0594
0595 static
0596 struct sk_buff *ath5k_rx_skb_alloc(struct ath5k_hw *ah, dma_addr_t *skb_addr)
0597 {
0598 struct ath_common *common = ath5k_hw_common(ah);
0599 struct sk_buff *skb;
0600
0601
0602
0603
0604
0605 skb = ath_rxbuf_alloc(common,
0606 common->rx_bufsize,
0607 GFP_ATOMIC);
0608
0609 if (!skb) {
0610 ATH5K_ERR(ah, "can't alloc skbuff of size %u\n",
0611 common->rx_bufsize);
0612 return NULL;
0613 }
0614
0615 *skb_addr = dma_map_single(ah->dev,
0616 skb->data, common->rx_bufsize,
0617 DMA_FROM_DEVICE);
0618
0619 if (unlikely(dma_mapping_error(ah->dev, *skb_addr))) {
0620 ATH5K_ERR(ah, "%s: DMA mapping failed\n", __func__);
0621 dev_kfree_skb(skb);
0622 return NULL;
0623 }
0624 return skb;
0625 }
0626
0627 static int
0628 ath5k_rxbuf_setup(struct ath5k_hw *ah, struct ath5k_buf *bf)
0629 {
0630 struct sk_buff *skb = bf->skb;
0631 struct ath5k_desc *ds;
0632 int ret;
0633
0634 if (!skb) {
0635 skb = ath5k_rx_skb_alloc(ah, &bf->skbaddr);
0636 if (!skb)
0637 return -ENOMEM;
0638 bf->skb = skb;
0639 }
0640
0641
0642
0643
0644
0645
0646
0647
0648
0649
0650
0651
0652
0653
0654
0655
0656 ds = bf->desc;
0657 ds->ds_link = bf->daddr;
0658 ds->ds_data = bf->skbaddr;
0659 ret = ath5k_hw_setup_rx_desc(ah, ds, ah->common.rx_bufsize, 0);
0660 if (ret) {
0661 ATH5K_ERR(ah, "%s: could not setup RX desc\n", __func__);
0662 return ret;
0663 }
0664
0665 if (ah->rxlink != NULL)
0666 *ah->rxlink = bf->daddr;
0667 ah->rxlink = &ds->ds_link;
0668 return 0;
0669 }
0670
0671 static enum ath5k_pkt_type get_hw_packet_type(struct sk_buff *skb)
0672 {
0673 struct ieee80211_hdr *hdr;
0674 enum ath5k_pkt_type htype;
0675 __le16 fc;
0676
0677 hdr = (struct ieee80211_hdr *)skb->data;
0678 fc = hdr->frame_control;
0679
0680 if (ieee80211_is_beacon(fc))
0681 htype = AR5K_PKT_TYPE_BEACON;
0682 else if (ieee80211_is_probe_resp(fc))
0683 htype = AR5K_PKT_TYPE_PROBE_RESP;
0684 else if (ieee80211_is_atim(fc))
0685 htype = AR5K_PKT_TYPE_ATIM;
0686 else if (ieee80211_is_pspoll(fc))
0687 htype = AR5K_PKT_TYPE_PSPOLL;
0688 else
0689 htype = AR5K_PKT_TYPE_NORMAL;
0690
0691 return htype;
0692 }
0693
0694 static struct ieee80211_rate *
0695 ath5k_get_rate(const struct ieee80211_hw *hw,
0696 const struct ieee80211_tx_info *info,
0697 struct ath5k_buf *bf, int idx)
0698 {
0699
0700
0701
0702
0703 if (bf->rates[idx].idx < 0) {
0704 return NULL;
0705 }
0706
0707 return &hw->wiphy->bands[info->band]->bitrates[ bf->rates[idx].idx ];
0708 }
0709
0710 static u16
0711 ath5k_get_rate_hw_value(const struct ieee80211_hw *hw,
0712 const struct ieee80211_tx_info *info,
0713 struct ath5k_buf *bf, int idx)
0714 {
0715 struct ieee80211_rate *rate;
0716 u16 hw_rate;
0717 u8 rc_flags;
0718
0719 rate = ath5k_get_rate(hw, info, bf, idx);
0720 if (!rate)
0721 return 0;
0722
0723 rc_flags = bf->rates[idx].flags;
0724 hw_rate = (rc_flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE) ?
0725 rate->hw_value_short : rate->hw_value;
0726
0727 return hw_rate;
0728 }
0729
0730 static bool ath5k_merge_ratetbl(struct ieee80211_sta *sta,
0731 struct ath5k_buf *bf,
0732 struct ieee80211_tx_info *tx_info)
0733 {
0734 struct ieee80211_sta_rates *ratetbl;
0735 u8 i;
0736
0737 if (!sta)
0738 return false;
0739
0740 ratetbl = rcu_dereference(sta->rates);
0741 if (!ratetbl)
0742 return false;
0743
0744 if (tx_info->control.rates[0].idx < 0 ||
0745 tx_info->control.rates[0].count == 0)
0746 {
0747 i = 0;
0748 } else {
0749 bf->rates[0] = tx_info->control.rates[0];
0750 i = 1;
0751 }
0752
0753 for ( ; i < IEEE80211_TX_MAX_RATES; i++) {
0754 bf->rates[i].idx = ratetbl->rate[i].idx;
0755 bf->rates[i].flags = ratetbl->rate[i].flags;
0756 if (tx_info->control.use_rts)
0757 bf->rates[i].count = ratetbl->rate[i].count_rts;
0758 else if (tx_info->control.use_cts_prot)
0759 bf->rates[i].count = ratetbl->rate[i].count_cts;
0760 else
0761 bf->rates[i].count = ratetbl->rate[i].count;
0762 }
0763
0764 return true;
0765 }
0766
0767 static int
0768 ath5k_txbuf_setup(struct ath5k_hw *ah, struct ath5k_buf *bf,
0769 struct ath5k_txq *txq, int padsize,
0770 struct ieee80211_tx_control *control)
0771 {
0772 struct ath5k_desc *ds = bf->desc;
0773 struct sk_buff *skb = bf->skb;
0774 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
0775 unsigned int pktlen, flags, keyidx = AR5K_TXKEYIX_INVALID;
0776 struct ieee80211_rate *rate;
0777 struct ieee80211_sta *sta;
0778 unsigned int mrr_rate[3], mrr_tries[3];
0779 int i, ret;
0780 u16 hw_rate;
0781 u16 cts_rate = 0;
0782 u16 duration = 0;
0783 u8 rc_flags;
0784
0785 flags = AR5K_TXDESC_INTREQ | AR5K_TXDESC_CLRDMASK;
0786
0787
0788 bf->skbaddr = dma_map_single(ah->dev, skb->data, skb->len,
0789 DMA_TO_DEVICE);
0790
0791 if (dma_mapping_error(ah->dev, bf->skbaddr))
0792 return -ENOSPC;
0793
0794 if (control)
0795 sta = control->sta;
0796 else
0797 sta = NULL;
0798
0799 if (!ath5k_merge_ratetbl(sta, bf, info)) {
0800 ieee80211_get_tx_rates(info->control.vif,
0801 sta, skb, bf->rates,
0802 ARRAY_SIZE(bf->rates));
0803 }
0804
0805 rate = ath5k_get_rate(ah->hw, info, bf, 0);
0806
0807 if (!rate) {
0808 ret = -EINVAL;
0809 goto err_unmap;
0810 }
0811
0812 if (info->flags & IEEE80211_TX_CTL_NO_ACK)
0813 flags |= AR5K_TXDESC_NOACK;
0814
0815 rc_flags = bf->rates[0].flags;
0816
0817 hw_rate = ath5k_get_rate_hw_value(ah->hw, info, bf, 0);
0818
0819 pktlen = skb->len;
0820
0821
0822
0823
0824 if (info->control.hw_key) {
0825 keyidx = info->control.hw_key->hw_key_idx;
0826 pktlen += info->control.hw_key->icv_len;
0827 }
0828 if (rc_flags & IEEE80211_TX_RC_USE_RTS_CTS) {
0829 flags |= AR5K_TXDESC_RTSENA;
0830 cts_rate = ieee80211_get_rts_cts_rate(ah->hw, info)->hw_value;
0831 duration = le16_to_cpu(ieee80211_rts_duration(ah->hw,
0832 info->control.vif, pktlen, info));
0833 }
0834 if (rc_flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
0835 flags |= AR5K_TXDESC_CTSENA;
0836 cts_rate = ieee80211_get_rts_cts_rate(ah->hw, info)->hw_value;
0837 duration = le16_to_cpu(ieee80211_ctstoself_duration(ah->hw,
0838 info->control.vif, pktlen, info));
0839 }
0840
0841 ret = ah->ah_setup_tx_desc(ah, ds, pktlen,
0842 ieee80211_get_hdrlen_from_skb(skb), padsize,
0843 get_hw_packet_type(skb),
0844 (ah->ah_txpower.txp_requested * 2),
0845 hw_rate,
0846 bf->rates[0].count, keyidx, ah->ah_tx_ant, flags,
0847 cts_rate, duration);
0848 if (ret)
0849 goto err_unmap;
0850
0851
0852 if (ah->ah_capabilities.cap_has_mrr_support) {
0853 memset(mrr_rate, 0, sizeof(mrr_rate));
0854 memset(mrr_tries, 0, sizeof(mrr_tries));
0855
0856 for (i = 0; i < 3; i++) {
0857
0858 rate = ath5k_get_rate(ah->hw, info, bf, i);
0859 if (!rate)
0860 break;
0861
0862 mrr_rate[i] = ath5k_get_rate_hw_value(ah->hw, info, bf, i);
0863 mrr_tries[i] = bf->rates[i].count;
0864 }
0865
0866 ath5k_hw_setup_mrr_tx_desc(ah, ds,
0867 mrr_rate[0], mrr_tries[0],
0868 mrr_rate[1], mrr_tries[1],
0869 mrr_rate[2], mrr_tries[2]);
0870 }
0871
0872 ds->ds_link = 0;
0873 ds->ds_data = bf->skbaddr;
0874
0875 spin_lock_bh(&txq->lock);
0876 list_add_tail(&bf->list, &txq->q);
0877 txq->txq_len++;
0878 if (txq->link == NULL)
0879 ath5k_hw_set_txdp(ah, txq->qnum, bf->daddr);
0880 else
0881 *txq->link = bf->daddr;
0882
0883 txq->link = &ds->ds_link;
0884 ath5k_hw_start_tx_dma(ah, txq->qnum);
0885 spin_unlock_bh(&txq->lock);
0886
0887 return 0;
0888 err_unmap:
0889 dma_unmap_single(ah->dev, bf->skbaddr, skb->len, DMA_TO_DEVICE);
0890 return ret;
0891 }
0892
0893
0894
0895
0896
0897 static int
0898 ath5k_desc_alloc(struct ath5k_hw *ah)
0899 {
0900 struct ath5k_desc *ds;
0901 struct ath5k_buf *bf;
0902 dma_addr_t da;
0903 unsigned int i;
0904 int ret;
0905
0906
0907 ah->desc_len = sizeof(struct ath5k_desc) *
0908 (ATH_TXBUF + ATH_RXBUF + ATH_BCBUF + 1);
0909
0910 ah->desc = dma_alloc_coherent(ah->dev, ah->desc_len,
0911 &ah->desc_daddr, GFP_KERNEL);
0912 if (ah->desc == NULL) {
0913 ATH5K_ERR(ah, "can't allocate descriptors\n");
0914 ret = -ENOMEM;
0915 goto err;
0916 }
0917 ds = ah->desc;
0918 da = ah->desc_daddr;
0919 ATH5K_DBG(ah, ATH5K_DEBUG_ANY, "DMA map: %p (%zu) -> %llx\n",
0920 ds, ah->desc_len, (unsigned long long)ah->desc_daddr);
0921
0922 bf = kcalloc(1 + ATH_TXBUF + ATH_RXBUF + ATH_BCBUF,
0923 sizeof(struct ath5k_buf), GFP_KERNEL);
0924 if (bf == NULL) {
0925 ATH5K_ERR(ah, "can't allocate bufptr\n");
0926 ret = -ENOMEM;
0927 goto err_free;
0928 }
0929 ah->bufptr = bf;
0930
0931 INIT_LIST_HEAD(&ah->rxbuf);
0932 for (i = 0; i < ATH_RXBUF; i++, bf++, ds++, da += sizeof(*ds)) {
0933 bf->desc = ds;
0934 bf->daddr = da;
0935 list_add_tail(&bf->list, &ah->rxbuf);
0936 }
0937
0938 INIT_LIST_HEAD(&ah->txbuf);
0939 ah->txbuf_len = ATH_TXBUF;
0940 for (i = 0; i < ATH_TXBUF; i++, bf++, ds++, da += sizeof(*ds)) {
0941 bf->desc = ds;
0942 bf->daddr = da;
0943 list_add_tail(&bf->list, &ah->txbuf);
0944 }
0945
0946
0947 INIT_LIST_HEAD(&ah->bcbuf);
0948 for (i = 0; i < ATH_BCBUF; i++, bf++, ds++, da += sizeof(*ds)) {
0949 bf->desc = ds;
0950 bf->daddr = da;
0951 list_add_tail(&bf->list, &ah->bcbuf);
0952 }
0953
0954 return 0;
0955 err_free:
0956 dma_free_coherent(ah->dev, ah->desc_len, ah->desc, ah->desc_daddr);
0957 err:
0958 ah->desc = NULL;
0959 return ret;
0960 }
0961
0962 void
0963 ath5k_txbuf_free_skb(struct ath5k_hw *ah, struct ath5k_buf *bf)
0964 {
0965 BUG_ON(!bf);
0966 if (!bf->skb)
0967 return;
0968 dma_unmap_single(ah->dev, bf->skbaddr, bf->skb->len,
0969 DMA_TO_DEVICE);
0970 ieee80211_free_txskb(ah->hw, bf->skb);
0971 bf->skb = NULL;
0972 bf->skbaddr = 0;
0973 bf->desc->ds_data = 0;
0974 }
0975
0976 void
0977 ath5k_rxbuf_free_skb(struct ath5k_hw *ah, struct ath5k_buf *bf)
0978 {
0979 struct ath_common *common = ath5k_hw_common(ah);
0980
0981 BUG_ON(!bf);
0982 if (!bf->skb)
0983 return;
0984 dma_unmap_single(ah->dev, bf->skbaddr, common->rx_bufsize,
0985 DMA_FROM_DEVICE);
0986 dev_kfree_skb_any(bf->skb);
0987 bf->skb = NULL;
0988 bf->skbaddr = 0;
0989 bf->desc->ds_data = 0;
0990 }
0991
0992 static void
0993 ath5k_desc_free(struct ath5k_hw *ah)
0994 {
0995 struct ath5k_buf *bf;
0996
0997 list_for_each_entry(bf, &ah->txbuf, list)
0998 ath5k_txbuf_free_skb(ah, bf);
0999 list_for_each_entry(bf, &ah->rxbuf, list)
1000 ath5k_rxbuf_free_skb(ah, bf);
1001 list_for_each_entry(bf, &ah->bcbuf, list)
1002 ath5k_txbuf_free_skb(ah, bf);
1003
1004
1005 dma_free_coherent(ah->dev, ah->desc_len, ah->desc, ah->desc_daddr);
1006 ah->desc = NULL;
1007 ah->desc_daddr = 0;
1008
1009 kfree(ah->bufptr);
1010 ah->bufptr = NULL;
1011 }
1012
1013
1014
1015
1016
1017
1018 static struct ath5k_txq *
1019 ath5k_txq_setup(struct ath5k_hw *ah,
1020 int qtype, int subtype)
1021 {
1022 struct ath5k_txq *txq;
1023 struct ath5k_txq_info qi = {
1024 .tqi_subtype = subtype,
1025
1026
1027 .tqi_aifs = AR5K_TUNE_AIFS,
1028 .tqi_cw_min = AR5K_TUNE_CWMIN,
1029 .tqi_cw_max = AR5K_TUNE_CWMAX
1030 };
1031 int qnum;
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045 qi.tqi_flags = AR5K_TXQ_FLAG_TXEOLINT_ENABLE |
1046 AR5K_TXQ_FLAG_TXDESCINT_ENABLE;
1047 qnum = ath5k_hw_setup_tx_queue(ah, qtype, &qi);
1048 if (qnum < 0) {
1049
1050
1051
1052
1053 return ERR_PTR(qnum);
1054 }
1055 txq = &ah->txqs[qnum];
1056 if (!txq->setup) {
1057 txq->qnum = qnum;
1058 txq->link = NULL;
1059 INIT_LIST_HEAD(&txq->q);
1060 spin_lock_init(&txq->lock);
1061 txq->setup = true;
1062 txq->txq_len = 0;
1063 txq->txq_max = ATH5K_TXQ_LEN_MAX;
1064 txq->txq_poll_mark = false;
1065 txq->txq_stuck = 0;
1066 }
1067 return &ah->txqs[qnum];
1068 }
1069
1070 static int
1071 ath5k_beaconq_setup(struct ath5k_hw *ah)
1072 {
1073 struct ath5k_txq_info qi = {
1074
1075
1076 .tqi_aifs = AR5K_TUNE_AIFS,
1077 .tqi_cw_min = AR5K_TUNE_CWMIN,
1078 .tqi_cw_max = AR5K_TUNE_CWMAX,
1079
1080 .tqi_flags = AR5K_TXQ_FLAG_TXDESCINT_ENABLE
1081 };
1082
1083 return ath5k_hw_setup_tx_queue(ah, AR5K_TX_QUEUE_BEACON, &qi);
1084 }
1085
1086 static int
1087 ath5k_beaconq_config(struct ath5k_hw *ah)
1088 {
1089 struct ath5k_txq_info qi;
1090 int ret;
1091
1092 ret = ath5k_hw_get_tx_queueprops(ah, ah->bhalq, &qi);
1093 if (ret)
1094 goto err;
1095
1096 if (ah->opmode == NL80211_IFTYPE_AP ||
1097 ah->opmode == NL80211_IFTYPE_MESH_POINT) {
1098
1099
1100
1101
1102 qi.tqi_aifs = 0;
1103 qi.tqi_cw_min = 0;
1104 qi.tqi_cw_max = 0;
1105 } else if (ah->opmode == NL80211_IFTYPE_ADHOC) {
1106
1107
1108
1109 qi.tqi_aifs = 0;
1110 qi.tqi_cw_min = 0;
1111 qi.tqi_cw_max = 2 * AR5K_TUNE_CWMIN;
1112 }
1113
1114 ATH5K_DBG(ah, ATH5K_DEBUG_BEACON,
1115 "beacon queueprops tqi_aifs:%d tqi_cw_min:%d tqi_cw_max:%d\n",
1116 qi.tqi_aifs, qi.tqi_cw_min, qi.tqi_cw_max);
1117
1118 ret = ath5k_hw_set_tx_queueprops(ah, ah->bhalq, &qi);
1119 if (ret) {
1120 ATH5K_ERR(ah, "%s: unable to update parameters for beacon "
1121 "hardware queue!\n", __func__);
1122 goto err;
1123 }
1124 ret = ath5k_hw_reset_tx_queue(ah, ah->bhalq);
1125 if (ret)
1126 goto err;
1127
1128
1129 ret = ath5k_hw_get_tx_queueprops(ah, AR5K_TX_QUEUE_ID_CAB, &qi);
1130 if (ret)
1131 goto err;
1132
1133 qi.tqi_ready_time = (ah->bintval * 80) / 100;
1134 ret = ath5k_hw_set_tx_queueprops(ah, AR5K_TX_QUEUE_ID_CAB, &qi);
1135 if (ret)
1136 goto err;
1137
1138 ret = ath5k_hw_reset_tx_queue(ah, AR5K_TX_QUEUE_ID_CAB);
1139 err:
1140 return ret;
1141 }
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154 static void
1155 ath5k_drain_tx_buffs(struct ath5k_hw *ah)
1156 {
1157 struct ath5k_txq *txq;
1158 struct ath5k_buf *bf, *bf0;
1159 int i;
1160
1161 for (i = 0; i < ARRAY_SIZE(ah->txqs); i++) {
1162 if (ah->txqs[i].setup) {
1163 txq = &ah->txqs[i];
1164 spin_lock_bh(&txq->lock);
1165 list_for_each_entry_safe(bf, bf0, &txq->q, list) {
1166 ath5k_debug_printtxbuf(ah, bf);
1167
1168 ath5k_txbuf_free_skb(ah, bf);
1169
1170 spin_lock(&ah->txbuflock);
1171 list_move_tail(&bf->list, &ah->txbuf);
1172 ah->txbuf_len++;
1173 txq->txq_len--;
1174 spin_unlock(&ah->txbuflock);
1175 }
1176 txq->link = NULL;
1177 txq->txq_poll_mark = false;
1178 spin_unlock_bh(&txq->lock);
1179 }
1180 }
1181 }
1182
1183 static void
1184 ath5k_txq_release(struct ath5k_hw *ah)
1185 {
1186 struct ath5k_txq *txq = ah->txqs;
1187 unsigned int i;
1188
1189 for (i = 0; i < ARRAY_SIZE(ah->txqs); i++, txq++)
1190 if (txq->setup) {
1191 ath5k_hw_release_tx_queue(ah, txq->qnum);
1192 txq->setup = false;
1193 }
1194 }
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204 static int
1205 ath5k_rx_start(struct ath5k_hw *ah)
1206 {
1207 struct ath_common *common = ath5k_hw_common(ah);
1208 struct ath5k_buf *bf;
1209 int ret;
1210
1211 common->rx_bufsize = roundup(IEEE80211_MAX_FRAME_LEN, common->cachelsz);
1212
1213 ATH5K_DBG(ah, ATH5K_DEBUG_RESET, "cachelsz %u rx_bufsize %u\n",
1214 common->cachelsz, common->rx_bufsize);
1215
1216 spin_lock_bh(&ah->rxbuflock);
1217 ah->rxlink = NULL;
1218 list_for_each_entry(bf, &ah->rxbuf, list) {
1219 ret = ath5k_rxbuf_setup(ah, bf);
1220 if (ret != 0) {
1221 spin_unlock_bh(&ah->rxbuflock);
1222 goto err;
1223 }
1224 }
1225 bf = list_first_entry(&ah->rxbuf, struct ath5k_buf, list);
1226 ath5k_hw_set_rxdp(ah, bf->daddr);
1227 spin_unlock_bh(&ah->rxbuflock);
1228
1229 ath5k_hw_start_rx_dma(ah);
1230 ath5k_update_bssid_mask_and_opmode(ah, NULL);
1231 ath5k_hw_start_rx_pcu(ah);
1232
1233 return 0;
1234 err:
1235 return ret;
1236 }
1237
1238
1239
1240
1241
1242
1243
1244
1245 static void
1246 ath5k_rx_stop(struct ath5k_hw *ah)
1247 {
1248
1249 ath5k_hw_set_rx_filter(ah, 0);
1250 ath5k_hw_stop_rx_pcu(ah);
1251
1252 ath5k_debug_printrxbuffs(ah);
1253 }
1254
1255 static unsigned int
1256 ath5k_rx_decrypted(struct ath5k_hw *ah, struct sk_buff *skb,
1257 struct ath5k_rx_status *rs)
1258 {
1259 struct ath_common *common = ath5k_hw_common(ah);
1260 struct ieee80211_hdr *hdr = (void *)skb->data;
1261 unsigned int keyix, hlen;
1262
1263 if (!(rs->rs_status & AR5K_RXERR_DECRYPT) &&
1264 rs->rs_keyix != AR5K_RXKEYIX_INVALID)
1265 return RX_FLAG_DECRYPTED;
1266
1267
1268
1269
1270 hlen = ieee80211_hdrlen(hdr->frame_control);
1271 if (ieee80211_has_protected(hdr->frame_control) &&
1272 !(rs->rs_status & AR5K_RXERR_DECRYPT) &&
1273 skb->len >= hlen + 4) {
1274 keyix = skb->data[hlen + 3] >> 6;
1275
1276 if (test_bit(keyix, common->keymap))
1277 return RX_FLAG_DECRYPTED;
1278 }
1279
1280 return 0;
1281 }
1282
1283
1284 static void
1285 ath5k_check_ibss_tsf(struct ath5k_hw *ah, struct sk_buff *skb,
1286 struct ieee80211_rx_status *rxs)
1287 {
1288 u64 tsf, bc_tstamp;
1289 u32 hw_tu;
1290 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data;
1291
1292 if (le16_to_cpu(mgmt->u.beacon.capab_info) & WLAN_CAPABILITY_IBSS) {
1293
1294
1295
1296
1297
1298 tsf = ath5k_hw_get_tsf64(ah);
1299 bc_tstamp = le64_to_cpu(mgmt->u.beacon.timestamp);
1300 hw_tu = TSF_TO_TU(tsf);
1301
1302 ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_BEACON,
1303 "beacon %llx mactime %llx (diff %lld) tsf now %llx\n",
1304 (unsigned long long)bc_tstamp,
1305 (unsigned long long)rxs->mactime,
1306 (unsigned long long)(rxs->mactime - bc_tstamp),
1307 (unsigned long long)tsf);
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320 if (bc_tstamp > rxs->mactime) {
1321 ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_BEACON,
1322 "fixing mactime from %llx to %llx\n",
1323 (unsigned long long)rxs->mactime,
1324 (unsigned long long)tsf);
1325 rxs->mactime = tsf;
1326 }
1327
1328
1329
1330
1331
1332
1333
1334 if (hw_tu >= ah->nexttbtt)
1335 ath5k_beacon_update_timers(ah, bc_tstamp);
1336
1337
1338
1339
1340 if (!ath5k_hw_check_beacon_timers(ah, ah->bintval)) {
1341 ath5k_beacon_update_timers(ah, bc_tstamp);
1342 ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_BEACON,
1343 "fixed beacon timers after beacon receive\n");
1344 }
1345 }
1346 }
1347
1348
1349
1350
1351 static int ath5k_common_padpos(struct sk_buff *skb)
1352 {
1353 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1354 __le16 frame_control = hdr->frame_control;
1355 int padpos = 24;
1356
1357 if (ieee80211_has_a4(frame_control))
1358 padpos += ETH_ALEN;
1359
1360 if (ieee80211_is_data_qos(frame_control))
1361 padpos += IEEE80211_QOS_CTL_LEN;
1362
1363 return padpos;
1364 }
1365
1366
1367
1368
1369
1370 static int ath5k_add_padding(struct sk_buff *skb)
1371 {
1372 int padpos = ath5k_common_padpos(skb);
1373 int padsize = padpos & 3;
1374
1375 if (padsize && skb->len > padpos) {
1376
1377 if (skb_headroom(skb) < padsize)
1378 return -1;
1379
1380 skb_push(skb, padsize);
1381 memmove(skb->data, skb->data + padsize, padpos);
1382 return padsize;
1383 }
1384
1385 return 0;
1386 }
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401 static int ath5k_remove_padding(struct sk_buff *skb)
1402 {
1403 int padpos = ath5k_common_padpos(skb);
1404 int padsize = padpos & 3;
1405
1406 if (padsize && skb->len >= padpos + padsize) {
1407 memmove(skb->data + padsize, skb->data, padpos);
1408 skb_pull(skb, padsize);
1409 return padsize;
1410 }
1411
1412 return 0;
1413 }
1414
1415 static void
1416 ath5k_receive_frame(struct ath5k_hw *ah, struct sk_buff *skb,
1417 struct ath5k_rx_status *rs)
1418 {
1419 struct ieee80211_rx_status *rxs;
1420 struct ath_common *common = ath5k_hw_common(ah);
1421
1422 ath5k_remove_padding(skb);
1423
1424 rxs = IEEE80211_SKB_RXCB(skb);
1425
1426 rxs->flag = 0;
1427 if (unlikely(rs->rs_status & AR5K_RXERR_MIC))
1428 rxs->flag |= RX_FLAG_MMIC_ERROR;
1429 if (unlikely(rs->rs_status & AR5K_RXERR_CRC))
1430 rxs->flag |= RX_FLAG_FAILED_FCS_CRC;
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442 rxs->mactime = ath5k_extend_tsf(ah, rs->rs_tstamp);
1443 rxs->flag |= RX_FLAG_MACTIME_END;
1444
1445 rxs->freq = ah->curchan->center_freq;
1446 rxs->band = ah->curchan->band;
1447
1448 rxs->signal = ah->ah_noise_floor + rs->rs_rssi;
1449
1450 rxs->antenna = rs->rs_antenna;
1451
1452 if (rs->rs_antenna > 0 && rs->rs_antenna < 5)
1453 ah->stats.antenna_rx[rs->rs_antenna]++;
1454 else
1455 ah->stats.antenna_rx[0]++;
1456
1457 rxs->rate_idx = ath5k_hw_to_driver_rix(ah, rs->rs_rate);
1458 rxs->flag |= ath5k_rx_decrypted(ah, skb, rs);
1459 switch (ah->ah_bwmode) {
1460 case AR5K_BWMODE_5MHZ:
1461 rxs->bw = RATE_INFO_BW_5;
1462 break;
1463 case AR5K_BWMODE_10MHZ:
1464 rxs->bw = RATE_INFO_BW_10;
1465 break;
1466 default:
1467 break;
1468 }
1469
1470 if (rs->rs_rate ==
1471 ah->sbands[ah->curchan->band].bitrates[rxs->rate_idx].hw_value_short)
1472 rxs->enc_flags |= RX_ENC_FLAG_SHORTPRE;
1473
1474 trace_ath5k_rx(ah, skb);
1475
1476 if (ath_is_mybeacon(common, (struct ieee80211_hdr *)skb->data)) {
1477 ewma_beacon_rssi_add(&ah->ah_beacon_rssi_avg, rs->rs_rssi);
1478
1479
1480 if (ah->opmode == NL80211_IFTYPE_ADHOC)
1481 ath5k_check_ibss_tsf(ah, skb, rxs);
1482 }
1483
1484 ieee80211_rx(ah->hw, skb);
1485 }
1486
1487
1488
1489
1490
1491
1492 static bool
1493 ath5k_receive_frame_ok(struct ath5k_hw *ah, struct ath5k_rx_status *rs)
1494 {
1495 ah->stats.rx_all_count++;
1496 ah->stats.rx_bytes_count += rs->rs_datalen;
1497
1498 if (unlikely(rs->rs_status)) {
1499 unsigned int filters;
1500
1501 if (rs->rs_status & AR5K_RXERR_CRC)
1502 ah->stats.rxerr_crc++;
1503 if (rs->rs_status & AR5K_RXERR_FIFO)
1504 ah->stats.rxerr_fifo++;
1505 if (rs->rs_status & AR5K_RXERR_PHY) {
1506 ah->stats.rxerr_phy++;
1507 if (rs->rs_phyerr > 0 && rs->rs_phyerr < 32)
1508 ah->stats.rxerr_phy_code[rs->rs_phyerr]++;
1509
1510
1511
1512
1513
1514
1515
1516 if (rs->rs_phyerr == AR5K_RX_PHY_ERROR_OFDM_RESTART ||
1517 rs->rs_phyerr == AR5K_RX_PHY_ERROR_CCK_RESTART) {
1518 rs->rs_status |= AR5K_RXERR_CRC;
1519 rs->rs_status &= ~AR5K_RXERR_PHY;
1520 } else {
1521 return false;
1522 }
1523 }
1524 if (rs->rs_status & AR5K_RXERR_DECRYPT) {
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535 ah->stats.rxerr_decrypt++;
1536 if (rs->rs_keyix == AR5K_RXKEYIX_INVALID &&
1537 !(rs->rs_status & AR5K_RXERR_CRC))
1538 return true;
1539 }
1540 if (rs->rs_status & AR5K_RXERR_MIC) {
1541 ah->stats.rxerr_mic++;
1542 return true;
1543 }
1544
1545
1546
1547
1548
1549 filters = AR5K_RXERR_DECRYPT;
1550 if (ah->fif_filter_flags & FIF_FCSFAIL)
1551 filters |= AR5K_RXERR_CRC;
1552
1553 if (rs->rs_status & ~filters)
1554 return false;
1555 }
1556
1557 if (unlikely(rs->rs_more)) {
1558 ah->stats.rxerr_jumbo++;
1559 return false;
1560 }
1561 return true;
1562 }
1563
1564 static void
1565 ath5k_set_current_imask(struct ath5k_hw *ah)
1566 {
1567 enum ath5k_int imask;
1568 unsigned long flags;
1569
1570 if (test_bit(ATH_STAT_RESET, ah->status))
1571 return;
1572
1573 spin_lock_irqsave(&ah->irqlock, flags);
1574 imask = ah->imask;
1575 if (ah->rx_pending)
1576 imask &= ~AR5K_INT_RX_ALL;
1577 if (ah->tx_pending)
1578 imask &= ~AR5K_INT_TX_ALL;
1579 ath5k_hw_set_imr(ah, imask);
1580 spin_unlock_irqrestore(&ah->irqlock, flags);
1581 }
1582
1583 static void
1584 ath5k_tasklet_rx(struct tasklet_struct *t)
1585 {
1586 struct ath5k_rx_status rs = {};
1587 struct sk_buff *skb, *next_skb;
1588 dma_addr_t next_skb_addr;
1589 struct ath5k_hw *ah = from_tasklet(ah, t, rxtq);
1590 struct ath_common *common = ath5k_hw_common(ah);
1591 struct ath5k_buf *bf;
1592 struct ath5k_desc *ds;
1593 int ret;
1594
1595 spin_lock(&ah->rxbuflock);
1596 if (list_empty(&ah->rxbuf)) {
1597 ATH5K_WARN(ah, "empty rx buf pool\n");
1598 goto unlock;
1599 }
1600 do {
1601 bf = list_first_entry(&ah->rxbuf, struct ath5k_buf, list);
1602 BUG_ON(bf->skb == NULL);
1603 skb = bf->skb;
1604 ds = bf->desc;
1605
1606
1607 if (ath5k_hw_get_rxdp(ah) == bf->daddr)
1608 break;
1609
1610 ret = ah->ah_proc_rx_desc(ah, ds, &rs);
1611 if (unlikely(ret == -EINPROGRESS))
1612 break;
1613 else if (unlikely(ret)) {
1614 ATH5K_ERR(ah, "error in processing rx descriptor\n");
1615 ah->stats.rxerr_proc++;
1616 break;
1617 }
1618
1619 if (ath5k_receive_frame_ok(ah, &rs)) {
1620 next_skb = ath5k_rx_skb_alloc(ah, &next_skb_addr);
1621
1622
1623
1624
1625
1626 if (!next_skb)
1627 goto next;
1628
1629 dma_unmap_single(ah->dev, bf->skbaddr,
1630 common->rx_bufsize,
1631 DMA_FROM_DEVICE);
1632
1633 skb_put(skb, rs.rs_datalen);
1634
1635 ath5k_receive_frame(ah, skb, &rs);
1636
1637 bf->skb = next_skb;
1638 bf->skbaddr = next_skb_addr;
1639 }
1640 next:
1641 list_move_tail(&bf->list, &ah->rxbuf);
1642 } while (ath5k_rxbuf_setup(ah, bf) == 0);
1643 unlock:
1644 spin_unlock(&ah->rxbuflock);
1645 ah->rx_pending = false;
1646 ath5k_set_current_imask(ah);
1647 }
1648
1649
1650
1651
1652
1653
1654 void
1655 ath5k_tx_queue(struct ieee80211_hw *hw, struct sk_buff *skb,
1656 struct ath5k_txq *txq, struct ieee80211_tx_control *control)
1657 {
1658 struct ath5k_hw *ah = hw->priv;
1659 struct ath5k_buf *bf;
1660 unsigned long flags;
1661 int padsize;
1662
1663 trace_ath5k_tx(ah, skb, txq);
1664
1665
1666
1667
1668
1669 padsize = ath5k_add_padding(skb);
1670 if (padsize < 0) {
1671 ATH5K_ERR(ah, "tx hdrlen not %%4: not enough"
1672 " headroom to pad");
1673 goto drop_packet;
1674 }
1675
1676 if (txq->txq_len >= txq->txq_max &&
1677 txq->qnum <= AR5K_TX_QUEUE_ID_DATA_MAX)
1678 ieee80211_stop_queue(hw, txq->qnum);
1679
1680 spin_lock_irqsave(&ah->txbuflock, flags);
1681 if (list_empty(&ah->txbuf)) {
1682 ATH5K_ERR(ah, "no further txbuf available, dropping packet\n");
1683 spin_unlock_irqrestore(&ah->txbuflock, flags);
1684 ieee80211_stop_queues(hw);
1685 goto drop_packet;
1686 }
1687 bf = list_first_entry(&ah->txbuf, struct ath5k_buf, list);
1688 list_del(&bf->list);
1689 ah->txbuf_len--;
1690 if (list_empty(&ah->txbuf))
1691 ieee80211_stop_queues(hw);
1692 spin_unlock_irqrestore(&ah->txbuflock, flags);
1693
1694 bf->skb = skb;
1695
1696 if (ath5k_txbuf_setup(ah, bf, txq, padsize, control)) {
1697 bf->skb = NULL;
1698 spin_lock_irqsave(&ah->txbuflock, flags);
1699 list_add_tail(&bf->list, &ah->txbuf);
1700 ah->txbuf_len++;
1701 spin_unlock_irqrestore(&ah->txbuflock, flags);
1702 goto drop_packet;
1703 }
1704 return;
1705
1706 drop_packet:
1707 ieee80211_free_txskb(hw, skb);
1708 }
1709
1710 static void
1711 ath5k_tx_frame_completed(struct ath5k_hw *ah, struct sk_buff *skb,
1712 struct ath5k_txq *txq, struct ath5k_tx_status *ts,
1713 struct ath5k_buf *bf)
1714 {
1715 struct ieee80211_tx_info *info;
1716 u8 tries[3];
1717 int i;
1718 int size = 0;
1719
1720 ah->stats.tx_all_count++;
1721 ah->stats.tx_bytes_count += skb->len;
1722 info = IEEE80211_SKB_CB(skb);
1723
1724 size = min_t(int, sizeof(info->status.rates), sizeof(bf->rates));
1725 memcpy(info->status.rates, bf->rates, size);
1726
1727 tries[0] = info->status.rates[0].count;
1728 tries[1] = info->status.rates[1].count;
1729 tries[2] = info->status.rates[2].count;
1730
1731 ieee80211_tx_info_clear_status(info);
1732
1733 for (i = 0; i < ts->ts_final_idx; i++) {
1734 struct ieee80211_tx_rate *r =
1735 &info->status.rates[i];
1736
1737 r->count = tries[i];
1738 }
1739
1740 info->status.rates[ts->ts_final_idx].count = ts->ts_final_retry;
1741 info->status.rates[ts->ts_final_idx + 1].idx = -1;
1742
1743 if (unlikely(ts->ts_status)) {
1744 ah->stats.ack_fail++;
1745 if (ts->ts_status & AR5K_TXERR_FILT) {
1746 info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
1747 ah->stats.txerr_filt++;
1748 }
1749 if (ts->ts_status & AR5K_TXERR_XRETRY)
1750 ah->stats.txerr_retry++;
1751 if (ts->ts_status & AR5K_TXERR_FIFO)
1752 ah->stats.txerr_fifo++;
1753 } else {
1754 info->flags |= IEEE80211_TX_STAT_ACK;
1755 info->status.ack_signal = ts->ts_rssi;
1756
1757
1758 info->status.rates[ts->ts_final_idx].count++;
1759 }
1760
1761
1762
1763
1764
1765 ath5k_remove_padding(skb);
1766
1767 if (ts->ts_antenna > 0 && ts->ts_antenna < 5)
1768 ah->stats.antenna_tx[ts->ts_antenna]++;
1769 else
1770 ah->stats.antenna_tx[0]++;
1771
1772 trace_ath5k_tx_complete(ah, skb, txq, ts);
1773 ieee80211_tx_status(ah->hw, skb);
1774 }
1775
1776 static void
1777 ath5k_tx_processq(struct ath5k_hw *ah, struct ath5k_txq *txq)
1778 {
1779 struct ath5k_tx_status ts = {};
1780 struct ath5k_buf *bf, *bf0;
1781 struct ath5k_desc *ds;
1782 struct sk_buff *skb;
1783 int ret;
1784
1785 spin_lock(&txq->lock);
1786 list_for_each_entry_safe(bf, bf0, &txq->q, list) {
1787
1788 txq->txq_poll_mark = false;
1789
1790
1791 if (bf->skb != NULL) {
1792 ds = bf->desc;
1793
1794 ret = ah->ah_proc_tx_desc(ah, ds, &ts);
1795 if (unlikely(ret == -EINPROGRESS))
1796 break;
1797 else if (unlikely(ret)) {
1798 ATH5K_ERR(ah,
1799 "error %d while processing "
1800 "queue %u\n", ret, txq->qnum);
1801 break;
1802 }
1803
1804 skb = bf->skb;
1805 bf->skb = NULL;
1806
1807 dma_unmap_single(ah->dev, bf->skbaddr, skb->len,
1808 DMA_TO_DEVICE);
1809 ath5k_tx_frame_completed(ah, skb, txq, &ts, bf);
1810 }
1811
1812
1813
1814
1815
1816
1817
1818 if (ath5k_hw_get_txdp(ah, txq->qnum) != bf->daddr) {
1819 spin_lock(&ah->txbuflock);
1820 list_move_tail(&bf->list, &ah->txbuf);
1821 ah->txbuf_len++;
1822 txq->txq_len--;
1823 spin_unlock(&ah->txbuflock);
1824 }
1825 }
1826 spin_unlock(&txq->lock);
1827 if (txq->txq_len < ATH5K_TXQ_LEN_LOW && txq->qnum < 4)
1828 ieee80211_wake_queue(ah->hw, txq->qnum);
1829 }
1830
1831 static void
1832 ath5k_tasklet_tx(struct tasklet_struct *t)
1833 {
1834 int i;
1835 struct ath5k_hw *ah = from_tasklet(ah, t, txtq);
1836
1837 for (i = 0; i < AR5K_NUM_TX_QUEUES; i++)
1838 if (ah->txqs[i].setup && (ah->ah_txq_isr_txok_all & BIT(i)))
1839 ath5k_tx_processq(ah, &ah->txqs[i]);
1840
1841 ah->tx_pending = false;
1842 ath5k_set_current_imask(ah);
1843 }
1844
1845
1846
1847
1848
1849
1850
1851
1852
1853 static int
1854 ath5k_beacon_setup(struct ath5k_hw *ah, struct ath5k_buf *bf)
1855 {
1856 struct sk_buff *skb = bf->skb;
1857 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1858 struct ath5k_desc *ds;
1859 int ret = 0;
1860 u8 antenna;
1861 u32 flags;
1862 const int padsize = 0;
1863
1864 bf->skbaddr = dma_map_single(ah->dev, skb->data, skb->len,
1865 DMA_TO_DEVICE);
1866 ATH5K_DBG(ah, ATH5K_DEBUG_BEACON, "skb %p [data %p len %u] "
1867 "skbaddr %llx\n", skb, skb->data, skb->len,
1868 (unsigned long long)bf->skbaddr);
1869
1870 if (dma_mapping_error(ah->dev, bf->skbaddr)) {
1871 ATH5K_ERR(ah, "beacon DMA mapping failed\n");
1872 dev_kfree_skb_any(skb);
1873 bf->skb = NULL;
1874 return -EIO;
1875 }
1876
1877 ds = bf->desc;
1878 antenna = ah->ah_tx_ant;
1879
1880 flags = AR5K_TXDESC_NOACK;
1881 if (ah->opmode == NL80211_IFTYPE_ADHOC && ath5k_hw_hasveol(ah)) {
1882 ds->ds_link = bf->daddr;
1883 flags |= AR5K_TXDESC_VEOL;
1884 } else
1885 ds->ds_link = 0;
1886
1887
1888
1889
1890
1891
1892
1893
1894
1895
1896
1897
1898
1899
1900
1901
1902
1903
1904
1905 if (ah->ah_ant_mode == AR5K_ANTMODE_SECTOR_AP)
1906 antenna = ah->bsent & 4 ? 2 : 1;
1907
1908
1909
1910
1911
1912 ds->ds_data = bf->skbaddr;
1913 ret = ah->ah_setup_tx_desc(ah, ds, skb->len,
1914 ieee80211_get_hdrlen_from_skb(skb), padsize,
1915 AR5K_PKT_TYPE_BEACON,
1916 (ah->ah_txpower.txp_requested * 2),
1917 ieee80211_get_tx_rate(ah->hw, info)->hw_value,
1918 1, AR5K_TXKEYIX_INVALID,
1919 antenna, flags, 0, 0);
1920 if (ret)
1921 goto err_unmap;
1922
1923 return 0;
1924 err_unmap:
1925 dma_unmap_single(ah->dev, bf->skbaddr, skb->len, DMA_TO_DEVICE);
1926 return ret;
1927 }
1928
1929
1930
1931
1932
1933
1934
1935
1936 int
1937 ath5k_beacon_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
1938 {
1939 int ret;
1940 struct ath5k_hw *ah = hw->priv;
1941 struct ath5k_vif *avf;
1942 struct sk_buff *skb;
1943
1944 if (WARN_ON(!vif)) {
1945 ret = -EINVAL;
1946 goto out;
1947 }
1948
1949 skb = ieee80211_beacon_get(hw, vif, 0);
1950
1951 if (!skb) {
1952 ret = -ENOMEM;
1953 goto out;
1954 }
1955
1956 avf = (void *)vif->drv_priv;
1957 ath5k_txbuf_free_skb(ah, avf->bbuf);
1958 avf->bbuf->skb = skb;
1959 ret = ath5k_beacon_setup(ah, avf->bbuf);
1960 out:
1961 return ret;
1962 }
1963
1964
1965
1966
1967
1968
1969
1970
1971
1972 static void
1973 ath5k_beacon_send(struct ath5k_hw *ah)
1974 {
1975 struct ieee80211_vif *vif;
1976 struct ath5k_vif *avf;
1977 struct ath5k_buf *bf;
1978 struct sk_buff *skb;
1979 int err;
1980
1981 ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_BEACON, "in beacon_send\n");
1982
1983
1984
1985
1986
1987
1988
1989
1990 if (unlikely(ath5k_hw_num_tx_pending(ah, ah->bhalq) != 0)) {
1991 ah->bmisscount++;
1992 ATH5K_DBG(ah, ATH5K_DEBUG_BEACON,
1993 "missed %u consecutive beacons\n", ah->bmisscount);
1994 if (ah->bmisscount > 10) {
1995 ATH5K_DBG(ah, ATH5K_DEBUG_BEACON,
1996 "stuck beacon time (%u missed)\n",
1997 ah->bmisscount);
1998 ATH5K_DBG(ah, ATH5K_DEBUG_RESET,
1999 "stuck beacon, resetting\n");
2000 ieee80211_queue_work(ah->hw, &ah->reset_work);
2001 }
2002 return;
2003 }
2004 if (unlikely(ah->bmisscount != 0)) {
2005 ATH5K_DBG(ah, ATH5K_DEBUG_BEACON,
2006 "resume beacon xmit after %u misses\n",
2007 ah->bmisscount);
2008 ah->bmisscount = 0;
2009 }
2010
2011 if ((ah->opmode == NL80211_IFTYPE_AP && ah->num_ap_vifs +
2012 ah->num_mesh_vifs > 1) ||
2013 ah->opmode == NL80211_IFTYPE_MESH_POINT) {
2014 u64 tsf = ath5k_hw_get_tsf64(ah);
2015 u32 tsftu = TSF_TO_TU(tsf);
2016 int slot = ((tsftu % ah->bintval) * ATH_BCBUF) / ah->bintval;
2017 vif = ah->bslot[(slot + 1) % ATH_BCBUF];
2018 ATH5K_DBG(ah, ATH5K_DEBUG_BEACON,
2019 "tsf %llx tsftu %x intval %u slot %u vif %p\n",
2020 (unsigned long long)tsf, tsftu, ah->bintval, slot, vif);
2021 } else
2022 vif = ah->bslot[0];
2023
2024 if (!vif)
2025 return;
2026
2027 avf = (void *)vif->drv_priv;
2028 bf = avf->bbuf;
2029
2030
2031
2032
2033
2034
2035 if (unlikely(ath5k_hw_stop_beacon_queue(ah, ah->bhalq))) {
2036 ATH5K_WARN(ah, "beacon queue %u didn't start/stop ?\n", ah->bhalq);
2037
2038 }
2039
2040
2041 if (ah->opmode == NL80211_IFTYPE_AP ||
2042 ah->opmode == NL80211_IFTYPE_MESH_POINT) {
2043 err = ath5k_beacon_update(ah->hw, vif);
2044 if (err)
2045 return;
2046 }
2047
2048 if (unlikely(bf->skb == NULL || ah->opmode == NL80211_IFTYPE_STATION ||
2049 ah->opmode == NL80211_IFTYPE_MONITOR)) {
2050 ATH5K_WARN(ah, "bf=%p bf_skb=%p\n", bf, bf->skb);
2051 return;
2052 }
2053
2054 trace_ath5k_tx(ah, bf->skb, &ah->txqs[ah->bhalq]);
2055
2056 ath5k_hw_set_txdp(ah, ah->bhalq, bf->daddr);
2057 ath5k_hw_start_tx_dma(ah, ah->bhalq);
2058 ATH5K_DBG(ah, ATH5K_DEBUG_BEACON, "TXDP[%u] = %llx (%p)\n",
2059 ah->bhalq, (unsigned long long)bf->daddr, bf->desc);
2060
2061 skb = ieee80211_get_buffered_bc(ah->hw, vif);
2062 while (skb) {
2063 ath5k_tx_queue(ah->hw, skb, ah->cabq, NULL);
2064
2065 if (ah->cabq->txq_len >= ah->cabq->txq_max)
2066 break;
2067
2068 skb = ieee80211_get_buffered_bc(ah->hw, vif);
2069 }
2070
2071 ah->bsent++;
2072 }
2073
2074
2075
2076
2077
2078
2079
2080
2081
2082
2083
2084
2085
2086
2087
2088
2089
2090 void
2091 ath5k_beacon_update_timers(struct ath5k_hw *ah, u64 bc_tsf)
2092 {
2093 u32 nexttbtt, intval, hw_tu, bc_tu;
2094 u64 hw_tsf;
2095
2096 intval = ah->bintval & AR5K_BEACON_PERIOD;
2097 if (ah->opmode == NL80211_IFTYPE_AP && ah->num_ap_vifs
2098 + ah->num_mesh_vifs > 1) {
2099 intval /= ATH_BCBUF;
2100 if (intval < 15)
2101 ATH5K_WARN(ah, "intval %u is too low, min 15\n",
2102 intval);
2103 }
2104 if (WARN_ON(!intval))
2105 return;
2106
2107
2108 bc_tu = TSF_TO_TU(bc_tsf);
2109
2110
2111 hw_tsf = ath5k_hw_get_tsf64(ah);
2112 hw_tu = TSF_TO_TU(hw_tsf);
2113
2114 #define FUDGE (AR5K_TUNE_SW_BEACON_RESP + 3)
2115
2116
2117
2118
2119 if (bc_tsf == -1) {
2120
2121
2122
2123
2124 nexttbtt = roundup(hw_tu + FUDGE, intval);
2125 } else if (bc_tsf == 0) {
2126
2127
2128
2129
2130 nexttbtt = intval;
2131 intval |= AR5K_BEACON_RESET_TSF;
2132 } else if (bc_tsf > hw_tsf) {
2133
2134
2135
2136
2137
2138
2139
2140 ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_BEACON,
2141 "need to wait for HW TSF sync\n");
2142 return;
2143 } else {
2144
2145
2146
2147
2148
2149
2150
2151 nexttbtt = bc_tu + roundup(hw_tu + FUDGE - bc_tu, intval);
2152 }
2153 #undef FUDGE
2154
2155 ah->nexttbtt = nexttbtt;
2156
2157 intval |= AR5K_BEACON_ENA;
2158 ath5k_hw_init_beacon_timers(ah, nexttbtt, intval);
2159
2160
2161
2162
2163
2164 if (bc_tsf == -1)
2165 ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_BEACON,
2166 "reconfigured timers based on HW TSF\n");
2167 else if (bc_tsf == 0)
2168 ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_BEACON,
2169 "reset HW TSF and timers\n");
2170 else
2171 ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_BEACON,
2172 "updated timers based on beacon TSF\n");
2173
2174 ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_BEACON,
2175 "bc_tsf %llx hw_tsf %llx bc_tu %u hw_tu %u nexttbtt %u\n",
2176 (unsigned long long) bc_tsf,
2177 (unsigned long long) hw_tsf, bc_tu, hw_tu, nexttbtt);
2178 ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_BEACON, "intval %u %s %s\n",
2179 intval & AR5K_BEACON_PERIOD,
2180 intval & AR5K_BEACON_ENA ? "AR5K_BEACON_ENA" : "",
2181 intval & AR5K_BEACON_RESET_TSF ? "AR5K_BEACON_RESET_TSF" : "");
2182 }
2183
2184
2185
2186
2187
2188
2189
2190
2191
2192 void
2193 ath5k_beacon_config(struct ath5k_hw *ah)
2194 {
2195 spin_lock_bh(&ah->block);
2196 ah->bmisscount = 0;
2197 ah->imask &= ~(AR5K_INT_BMISS | AR5K_INT_SWBA);
2198
2199 if (ah->enable_beacon) {
2200
2201
2202
2203
2204
2205
2206
2207 ath5k_beaconq_config(ah);
2208
2209 ah->imask |= AR5K_INT_SWBA;
2210
2211 if (ah->opmode == NL80211_IFTYPE_ADHOC) {
2212 if (ath5k_hw_hasveol(ah))
2213 ath5k_beacon_send(ah);
2214 } else
2215 ath5k_beacon_update_timers(ah, -1);
2216 } else {
2217 ath5k_hw_stop_beacon_queue(ah, ah->bhalq);
2218 }
2219
2220 ath5k_hw_set_imr(ah, ah->imask);
2221 spin_unlock_bh(&ah->block);
2222 }
2223
2224 static void ath5k_tasklet_beacon(struct tasklet_struct *t)
2225 {
2226 struct ath5k_hw *ah = from_tasklet(ah, t, beacontq);
2227
2228
2229
2230
2231
2232
2233
2234
2235
2236 if (ah->opmode == NL80211_IFTYPE_ADHOC) {
2237
2238 u64 tsf = ath5k_hw_get_tsf64(ah);
2239 ah->nexttbtt += ah->bintval;
2240 ATH5K_DBG(ah, ATH5K_DEBUG_BEACON,
2241 "SWBA nexttbtt: %x hw_tu: %x "
2242 "TSF: %llx\n",
2243 ah->nexttbtt,
2244 TSF_TO_TU(tsf),
2245 (unsigned long long) tsf);
2246 } else {
2247 spin_lock(&ah->block);
2248 ath5k_beacon_send(ah);
2249 spin_unlock(&ah->block);
2250 }
2251 }
2252
2253
2254
2255
2256
2257
2258 static void
2259 ath5k_intr_calibration_poll(struct ath5k_hw *ah)
2260 {
2261 if (time_is_before_eq_jiffies(ah->ah_cal_next_ani) &&
2262 !(ah->ah_cal_mask & AR5K_CALIBRATION_FULL) &&
2263 !(ah->ah_cal_mask & AR5K_CALIBRATION_SHORT)) {
2264
2265
2266
2267 ah->ah_cal_next_ani = jiffies +
2268 msecs_to_jiffies(ATH5K_TUNE_CALIBRATION_INTERVAL_ANI);
2269 tasklet_schedule(&ah->ani_tasklet);
2270
2271 } else if (time_is_before_eq_jiffies(ah->ah_cal_next_short) &&
2272 !(ah->ah_cal_mask & AR5K_CALIBRATION_FULL) &&
2273 !(ah->ah_cal_mask & AR5K_CALIBRATION_SHORT)) {
2274
2275
2276
2277
2278
2279
2280
2281
2282 ah->ah_cal_next_short = jiffies +
2283 msecs_to_jiffies(ATH5K_TUNE_CALIBRATION_INTERVAL_SHORT);
2284 ieee80211_queue_work(ah->hw, &ah->calib_work);
2285 }
2286
2287
2288
2289 }
2290
2291 static void
2292 ath5k_schedule_rx(struct ath5k_hw *ah)
2293 {
2294 ah->rx_pending = true;
2295 tasklet_schedule(&ah->rxtq);
2296 }
2297
2298 static void
2299 ath5k_schedule_tx(struct ath5k_hw *ah)
2300 {
2301 ah->tx_pending = true;
2302 tasklet_schedule(&ah->txtq);
2303 }
2304
2305 static irqreturn_t
2306 ath5k_intr(int irq, void *dev_id)
2307 {
2308 struct ath5k_hw *ah = dev_id;
2309 enum ath5k_int status;
2310 unsigned int counter = 1000;
2311
2312
2313
2314
2315
2316
2317
2318
2319
2320
2321
2322
2323 if (unlikely(test_bit(ATH_STAT_INVALID, ah->status) ||
2324 ((ath5k_get_bus_type(ah) != ATH_AHB) &&
2325 !ath5k_hw_is_intr_pending(ah))))
2326 return IRQ_NONE;
2327
2328
2329 do {
2330 ath5k_hw_get_isr(ah, &status);
2331
2332 ATH5K_DBG(ah, ATH5K_DEBUG_INTR, "status 0x%x/0x%x\n",
2333 status, ah->imask);
2334
2335
2336
2337
2338
2339
2340
2341
2342 if (unlikely(status & AR5K_INT_FATAL)) {
2343
2344 ATH5K_DBG(ah, ATH5K_DEBUG_RESET,
2345 "fatal int, resetting\n");
2346 ieee80211_queue_work(ah->hw, &ah->reset_work);
2347
2348
2349
2350
2351
2352
2353
2354
2355 } else if (unlikely(status & AR5K_INT_RXORN)) {
2356
2357
2358
2359
2360
2361
2362
2363 ah->stats.rxorn_intr++;
2364
2365 if (ah->ah_mac_srev < AR5K_SREV_AR5212) {
2366 ATH5K_DBG(ah, ATH5K_DEBUG_RESET,
2367 "rx overrun, resetting\n");
2368 ieee80211_queue_work(ah->hw, &ah->reset_work);
2369 } else
2370 ath5k_schedule_rx(ah);
2371
2372 } else {
2373
2374
2375 if (status & AR5K_INT_SWBA)
2376 tasklet_hi_schedule(&ah->beacontq);
2377
2378
2379
2380
2381
2382
2383
2384
2385 if (status & AR5K_INT_RXEOL)
2386 ah->stats.rxeol_intr++;
2387
2388
2389
2390 if (status & AR5K_INT_TXURN)
2391 ath5k_hw_update_tx_triglevel(ah, true);
2392
2393
2394 if (status & (AR5K_INT_RXOK | AR5K_INT_RXERR))
2395 ath5k_schedule_rx(ah);
2396
2397
2398 if (status & (AR5K_INT_TXOK
2399 | AR5K_INT_TXDESC
2400 | AR5K_INT_TXERR
2401 | AR5K_INT_TXEOL))
2402 ath5k_schedule_tx(ah);
2403
2404
2405
2406
2407
2408
2409 if (status & AR5K_INT_MIB) {
2410 ah->stats.mib_intr++;
2411 ath5k_hw_update_mib_counters(ah);
2412 ath5k_ani_mib_intr(ah);
2413 }
2414
2415
2416 if (status & AR5K_INT_GPIO)
2417 tasklet_schedule(&ah->rf_kill.toggleq);
2418
2419 }
2420
2421 if (ath5k_get_bus_type(ah) == ATH_AHB)
2422 break;
2423
2424 } while (ath5k_hw_is_intr_pending(ah) && --counter > 0);
2425
2426
2427
2428
2429
2430
2431
2432 if (ah->rx_pending || ah->tx_pending)
2433 ath5k_set_current_imask(ah);
2434
2435 if (unlikely(!counter))
2436 ATH5K_WARN(ah, "too many interrupts, giving up for now\n");
2437
2438
2439 ath5k_intr_calibration_poll(ah);
2440
2441 return IRQ_HANDLED;
2442 }
2443
2444
2445
2446
2447
2448 static void
2449 ath5k_calibrate_work(struct work_struct *work)
2450 {
2451 struct ath5k_hw *ah = container_of(work, struct ath5k_hw,
2452 calib_work);
2453
2454
2455 if (time_is_before_eq_jiffies(ah->ah_cal_next_full)) {
2456
2457 ah->ah_cal_next_full = jiffies +
2458 msecs_to_jiffies(ATH5K_TUNE_CALIBRATION_INTERVAL_FULL);
2459 ah->ah_cal_mask |= AR5K_CALIBRATION_FULL;
2460
2461 ATH5K_DBG(ah, ATH5K_DEBUG_CALIBRATE,
2462 "running full calibration\n");
2463
2464 if (ath5k_hw_gainf_calibrate(ah) == AR5K_RFGAIN_NEED_CHANGE) {
2465
2466
2467
2468
2469 ATH5K_DBG(ah, ATH5K_DEBUG_RESET,
2470 "got new rfgain, resetting\n");
2471 ieee80211_queue_work(ah->hw, &ah->reset_work);
2472 }
2473 } else
2474 ah->ah_cal_mask |= AR5K_CALIBRATION_SHORT;
2475
2476
2477 ATH5K_DBG(ah, ATH5K_DEBUG_CALIBRATE, "channel %u/%x\n",
2478 ieee80211_frequency_to_channel(ah->curchan->center_freq),
2479 ah->curchan->hw_value);
2480
2481 if (ath5k_hw_phy_calibrate(ah, ah->curchan))
2482 ATH5K_ERR(ah, "calibration of channel %u failed\n",
2483 ieee80211_frequency_to_channel(
2484 ah->curchan->center_freq));
2485
2486
2487 if (ah->ah_cal_mask & AR5K_CALIBRATION_FULL)
2488 ah->ah_cal_mask &= ~AR5K_CALIBRATION_FULL;
2489 else if (ah->ah_cal_mask & AR5K_CALIBRATION_SHORT)
2490 ah->ah_cal_mask &= ~AR5K_CALIBRATION_SHORT;
2491 }
2492
2493
2494 static void
2495 ath5k_tasklet_ani(struct tasklet_struct *t)
2496 {
2497 struct ath5k_hw *ah = from_tasklet(ah, t, ani_tasklet);
2498
2499 ah->ah_cal_mask |= AR5K_CALIBRATION_ANI;
2500 ath5k_ani_calibration(ah);
2501 ah->ah_cal_mask &= ~AR5K_CALIBRATION_ANI;
2502 }
2503
2504
2505 static void
2506 ath5k_tx_complete_poll_work(struct work_struct *work)
2507 {
2508 struct ath5k_hw *ah = container_of(work, struct ath5k_hw,
2509 tx_complete_work.work);
2510 struct ath5k_txq *txq;
2511 int i;
2512 bool needreset = false;
2513
2514 if (!test_bit(ATH_STAT_STARTED, ah->status))
2515 return;
2516
2517 mutex_lock(&ah->lock);
2518
2519 for (i = 0; i < ARRAY_SIZE(ah->txqs); i++) {
2520 if (ah->txqs[i].setup) {
2521 txq = &ah->txqs[i];
2522 spin_lock_bh(&txq->lock);
2523 if (txq->txq_len > 1) {
2524 if (txq->txq_poll_mark) {
2525 ATH5K_DBG(ah, ATH5K_DEBUG_XMIT,
2526 "TX queue stuck %d\n",
2527 txq->qnum);
2528 needreset = true;
2529 txq->txq_stuck++;
2530 spin_unlock_bh(&txq->lock);
2531 break;
2532 } else {
2533 txq->txq_poll_mark = true;
2534 }
2535 }
2536 spin_unlock_bh(&txq->lock);
2537 }
2538 }
2539
2540 if (needreset) {
2541 ATH5K_DBG(ah, ATH5K_DEBUG_RESET,
2542 "TX queues stuck, resetting\n");
2543 ath5k_reset(ah, NULL, true);
2544 }
2545
2546 mutex_unlock(&ah->lock);
2547
2548 ieee80211_queue_delayed_work(ah->hw, &ah->tx_complete_work,
2549 msecs_to_jiffies(ATH5K_TX_COMPLETE_POLL_INT));
2550 }
2551
2552
2553
2554
2555
2556
2557 static const struct ieee80211_iface_limit if_limits[] = {
2558 { .max = 2048, .types = BIT(NL80211_IFTYPE_STATION) },
2559 { .max = 4, .types =
2560 #ifdef CONFIG_MAC80211_MESH
2561 BIT(NL80211_IFTYPE_MESH_POINT) |
2562 #endif
2563 BIT(NL80211_IFTYPE_AP) },
2564 };
2565
2566 static const struct ieee80211_iface_combination if_comb = {
2567 .limits = if_limits,
2568 .n_limits = ARRAY_SIZE(if_limits),
2569 .max_interfaces = 2048,
2570 .num_different_channels = 1,
2571 };
2572
2573 int
2574 ath5k_init_ah(struct ath5k_hw *ah, const struct ath_bus_ops *bus_ops)
2575 {
2576 struct ieee80211_hw *hw = ah->hw;
2577 struct ath_common *common;
2578 int ret;
2579 int csz;
2580
2581
2582 SET_IEEE80211_DEV(hw, ah->dev);
2583 ieee80211_hw_set(hw, SUPPORTS_RC_TABLE);
2584 ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS);
2585 ieee80211_hw_set(hw, MFP_CAPABLE);
2586 ieee80211_hw_set(hw, SIGNAL_DBM);
2587 ieee80211_hw_set(hw, RX_INCLUDES_FCS);
2588 ieee80211_hw_set(hw, HOST_BROADCAST_PS_BUFFERING);
2589
2590 hw->wiphy->interface_modes =
2591 BIT(NL80211_IFTYPE_AP) |
2592 BIT(NL80211_IFTYPE_STATION) |
2593 BIT(NL80211_IFTYPE_ADHOC) |
2594 BIT(NL80211_IFTYPE_MESH_POINT);
2595
2596 hw->wiphy->iface_combinations = &if_comb;
2597 hw->wiphy->n_iface_combinations = 1;
2598
2599
2600 hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
2601
2602 hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_5_10_MHZ;
2603
2604
2605 hw->wiphy->available_antennas_tx = 0x3;
2606 hw->wiphy->available_antennas_rx = 0x3;
2607
2608 hw->extra_tx_headroom = 2;
2609
2610 wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST);
2611
2612
2613
2614
2615
2616 __set_bit(ATH_STAT_INVALID, ah->status);
2617
2618 ah->opmode = NL80211_IFTYPE_STATION;
2619 ah->bintval = 1000;
2620 mutex_init(&ah->lock);
2621 spin_lock_init(&ah->rxbuflock);
2622 spin_lock_init(&ah->txbuflock);
2623 spin_lock_init(&ah->block);
2624 spin_lock_init(&ah->irqlock);
2625
2626
2627 ret = request_irq(ah->irq, ath5k_intr, IRQF_SHARED, "ath", ah);
2628 if (ret) {
2629 ATH5K_ERR(ah, "request_irq failed\n");
2630 goto err;
2631 }
2632
2633 common = ath5k_hw_common(ah);
2634 common->ops = &ath5k_common_ops;
2635 common->bus_ops = bus_ops;
2636 common->ah = ah;
2637 common->hw = hw;
2638 common->priv = ah;
2639 common->clockrate = 40;
2640
2641
2642
2643
2644
2645 ath5k_read_cachesize(common, &csz);
2646 common->cachelsz = csz << 2;
2647
2648 spin_lock_init(&common->cc_lock);
2649
2650
2651 ret = ath5k_hw_init(ah);
2652 if (ret)
2653 goto err_irq;
2654
2655
2656 if (ah->ah_capabilities.cap_has_mrr_support) {
2657 hw->max_rates = 4;
2658 hw->max_rate_tries = max(AR5K_INIT_RETRY_SHORT,
2659 AR5K_INIT_RETRY_LONG);
2660 }
2661
2662 hw->vif_data_size = sizeof(struct ath5k_vif);
2663
2664
2665 ret = ath5k_init(hw);
2666 if (ret)
2667 goto err_ah;
2668
2669 ATH5K_INFO(ah, "Atheros AR%s chip found (MAC: 0x%x, PHY: 0x%x)\n",
2670 ath5k_chip_name(AR5K_VERSION_MAC, ah->ah_mac_srev),
2671 ah->ah_mac_srev,
2672 ah->ah_phy_revision);
2673
2674 if (!ah->ah_single_chip) {
2675
2676 if (ah->ah_radio_5ghz_revision &&
2677 !ah->ah_radio_2ghz_revision) {
2678
2679 if (!test_bit(AR5K_MODE_11A,
2680 ah->ah_capabilities.cap_mode)) {
2681 ATH5K_INFO(ah, "RF%s 2GHz radio found (0x%x)\n",
2682 ath5k_chip_name(AR5K_VERSION_RAD,
2683 ah->ah_radio_5ghz_revision),
2684 ah->ah_radio_5ghz_revision);
2685
2686
2687 } else if (!test_bit(AR5K_MODE_11B,
2688 ah->ah_capabilities.cap_mode)) {
2689 ATH5K_INFO(ah, "RF%s 5GHz radio found (0x%x)\n",
2690 ath5k_chip_name(AR5K_VERSION_RAD,
2691 ah->ah_radio_5ghz_revision),
2692 ah->ah_radio_5ghz_revision);
2693
2694 } else {
2695 ATH5K_INFO(ah, "RF%s multiband radio found"
2696 " (0x%x)\n",
2697 ath5k_chip_name(AR5K_VERSION_RAD,
2698 ah->ah_radio_5ghz_revision),
2699 ah->ah_radio_5ghz_revision);
2700 }
2701 }
2702
2703
2704 else if (ah->ah_radio_5ghz_revision &&
2705 ah->ah_radio_2ghz_revision) {
2706 ATH5K_INFO(ah, "RF%s 5GHz radio found (0x%x)\n",
2707 ath5k_chip_name(AR5K_VERSION_RAD,
2708 ah->ah_radio_5ghz_revision),
2709 ah->ah_radio_5ghz_revision);
2710 ATH5K_INFO(ah, "RF%s 2GHz radio found (0x%x)\n",
2711 ath5k_chip_name(AR5K_VERSION_RAD,
2712 ah->ah_radio_2ghz_revision),
2713 ah->ah_radio_2ghz_revision);
2714 }
2715 }
2716
2717 ath5k_debug_init_device(ah);
2718
2719
2720 __clear_bit(ATH_STAT_INVALID, ah->status);
2721
2722 return 0;
2723 err_ah:
2724 ath5k_hw_deinit(ah);
2725 err_irq:
2726 free_irq(ah->irq, ah);
2727 err:
2728 return ret;
2729 }
2730
2731 static int
2732 ath5k_stop_locked(struct ath5k_hw *ah)
2733 {
2734
2735 ATH5K_DBG(ah, ATH5K_DEBUG_RESET, "invalid %u\n",
2736 test_bit(ATH_STAT_INVALID, ah->status));
2737
2738
2739
2740
2741
2742
2743
2744
2745
2746
2747
2748
2749
2750
2751
2752
2753 ieee80211_stop_queues(ah->hw);
2754
2755 if (!test_bit(ATH_STAT_INVALID, ah->status)) {
2756 ath5k_led_off(ah);
2757 ath5k_hw_set_imr(ah, 0);
2758 synchronize_irq(ah->irq);
2759 ath5k_rx_stop(ah);
2760 ath5k_hw_dma_stop(ah);
2761 ath5k_drain_tx_buffs(ah);
2762 ath5k_hw_phy_disable(ah);
2763 }
2764
2765 return 0;
2766 }
2767
2768 int ath5k_start(struct ieee80211_hw *hw)
2769 {
2770 struct ath5k_hw *ah = hw->priv;
2771 struct ath_common *common = ath5k_hw_common(ah);
2772 int ret, i;
2773
2774 mutex_lock(&ah->lock);
2775
2776 ATH5K_DBG(ah, ATH5K_DEBUG_RESET, "mode %d\n", ah->opmode);
2777
2778
2779
2780
2781
2782 ath5k_stop_locked(ah);
2783
2784
2785
2786
2787
2788
2789
2790
2791 ah->curchan = ah->hw->conf.chandef.chan;
2792 ah->imask = AR5K_INT_RXOK
2793 | AR5K_INT_RXERR
2794 | AR5K_INT_RXEOL
2795 | AR5K_INT_RXORN
2796 | AR5K_INT_TXDESC
2797 | AR5K_INT_TXEOL
2798 | AR5K_INT_FATAL
2799 | AR5K_INT_GLOBAL
2800 | AR5K_INT_MIB;
2801
2802 ret = ath5k_reset(ah, NULL, false);
2803 if (ret)
2804 goto done;
2805
2806 if (!ath5k_modparam_no_hw_rfkill_switch)
2807 ath5k_rfkill_hw_start(ah);
2808
2809
2810
2811
2812
2813 for (i = 0; i < common->keymax; i++)
2814 ath_hw_keyreset(common, (u16) i);
2815
2816
2817
2818 ah->ah_ack_bitrate_high = true;
2819
2820 for (i = 0; i < ARRAY_SIZE(ah->bslot); i++)
2821 ah->bslot[i] = NULL;
2822
2823 ret = 0;
2824 done:
2825 mutex_unlock(&ah->lock);
2826
2827 set_bit(ATH_STAT_STARTED, ah->status);
2828 ieee80211_queue_delayed_work(ah->hw, &ah->tx_complete_work,
2829 msecs_to_jiffies(ATH5K_TX_COMPLETE_POLL_INT));
2830
2831 return ret;
2832 }
2833
2834 static void ath5k_stop_tasklets(struct ath5k_hw *ah)
2835 {
2836 ah->rx_pending = false;
2837 ah->tx_pending = false;
2838 tasklet_kill(&ah->rxtq);
2839 tasklet_kill(&ah->txtq);
2840 tasklet_kill(&ah->beacontq);
2841 tasklet_kill(&ah->ani_tasklet);
2842 }
2843
2844
2845
2846
2847
2848
2849
2850 void ath5k_stop(struct ieee80211_hw *hw)
2851 {
2852 struct ath5k_hw *ah = hw->priv;
2853 int ret;
2854
2855 mutex_lock(&ah->lock);
2856 ret = ath5k_stop_locked(ah);
2857 if (ret == 0 && !test_bit(ATH_STAT_INVALID, ah->status)) {
2858
2859
2860
2861
2862
2863
2864
2865
2866
2867
2868
2869
2870
2871
2872
2873
2874
2875
2876
2877
2878 ret = ath5k_hw_on_hold(ah);
2879
2880 ATH5K_DBG(ah, ATH5K_DEBUG_RESET,
2881 "putting device to sleep\n");
2882 }
2883
2884 mutex_unlock(&ah->lock);
2885
2886 ath5k_stop_tasklets(ah);
2887
2888 clear_bit(ATH_STAT_STARTED, ah->status);
2889 cancel_delayed_work_sync(&ah->tx_complete_work);
2890
2891 if (!ath5k_modparam_no_hw_rfkill_switch)
2892 ath5k_rfkill_hw_stop(ah);
2893 }
2894
2895
2896
2897
2898
2899
2900
2901 static int
2902 ath5k_reset(struct ath5k_hw *ah, struct ieee80211_channel *chan,
2903 bool skip_pcu)
2904 {
2905 struct ath_common *common = ath5k_hw_common(ah);
2906 int ret, ani_mode;
2907 bool fast = chan && modparam_fastchanswitch ? 1 : 0;
2908
2909 ATH5K_DBG(ah, ATH5K_DEBUG_RESET, "resetting\n");
2910
2911 __set_bit(ATH_STAT_RESET, ah->status);
2912
2913 ath5k_hw_set_imr(ah, 0);
2914 synchronize_irq(ah->irq);
2915 ath5k_stop_tasklets(ah);
2916
2917
2918
2919
2920 ani_mode = ah->ani_state.ani_mode;
2921 ath5k_ani_init(ah, ATH5K_ANI_MODE_OFF);
2922
2923
2924
2925
2926 ath5k_drain_tx_buffs(ah);
2927
2928
2929 ath5k_hw_stop_rx_pcu(ah);
2930
2931
2932
2933
2934
2935
2936 ret = ath5k_hw_dma_stop(ah);
2937
2938
2939
2940
2941 if (ret && fast) {
2942 ATH5K_DBG(ah, ATH5K_DEBUG_RESET,
2943 "DMA didn't stop, falling back to normal reset\n");
2944 fast = false;
2945 }
2946
2947 if (chan)
2948 ah->curchan = chan;
2949
2950 ret = ath5k_hw_reset(ah, ah->opmode, ah->curchan, fast, skip_pcu);
2951 if (ret) {
2952 ATH5K_ERR(ah, "can't reset hardware (%d)\n", ret);
2953 goto err;
2954 }
2955
2956 ret = ath5k_rx_start(ah);
2957 if (ret) {
2958 ATH5K_ERR(ah, "can't start recv logic\n");
2959 goto err;
2960 }
2961
2962 ath5k_ani_init(ah, ani_mode);
2963
2964
2965
2966
2967
2968
2969
2970
2971
2972
2973
2974
2975 ah->ah_cal_next_full = jiffies +
2976 msecs_to_jiffies(ATH5K_TUNE_CALIBRATION_INTERVAL_FULL);
2977 ah->ah_cal_next_ani = jiffies +
2978 msecs_to_jiffies(ATH5K_TUNE_CALIBRATION_INTERVAL_ANI);
2979 ah->ah_cal_next_short = jiffies +
2980 msecs_to_jiffies(ATH5K_TUNE_CALIBRATION_INTERVAL_SHORT);
2981
2982 ewma_beacon_rssi_init(&ah->ah_beacon_rssi_avg);
2983
2984
2985 memset(&ah->survey, 0, sizeof(ah->survey));
2986 spin_lock_bh(&common->cc_lock);
2987 ath_hw_cycle_counters_update(common);
2988 memset(&common->cc_survey, 0, sizeof(common->cc_survey));
2989 memset(&common->cc_ani, 0, sizeof(common->cc_ani));
2990 spin_unlock_bh(&common->cc_lock);
2991
2992
2993
2994
2995
2996
2997
2998
2999
3000
3001
3002
3003 __clear_bit(ATH_STAT_RESET, ah->status);
3004
3005 ath5k_beacon_config(ah);
3006
3007
3008 ieee80211_wake_queues(ah->hw);
3009
3010 return 0;
3011 err:
3012 return ret;
3013 }
3014
3015 static void ath5k_reset_work(struct work_struct *work)
3016 {
3017 struct ath5k_hw *ah = container_of(work, struct ath5k_hw,
3018 reset_work);
3019
3020 mutex_lock(&ah->lock);
3021 ath5k_reset(ah, NULL, true);
3022 mutex_unlock(&ah->lock);
3023 }
3024
3025 static int
3026 ath5k_init(struct ieee80211_hw *hw)
3027 {
3028
3029 struct ath5k_hw *ah = hw->priv;
3030 struct ath_regulatory *regulatory = ath5k_hw_regulatory(ah);
3031 struct ath5k_txq *txq;
3032 u8 mac[ETH_ALEN] = {};
3033 int ret;
3034
3035
3036
3037
3038
3039
3040
3041
3042 ret = ath5k_setup_bands(hw);
3043 if (ret) {
3044 ATH5K_ERR(ah, "can't get channels\n");
3045 goto err;
3046 }
3047
3048
3049
3050
3051 ret = ath5k_desc_alloc(ah);
3052 if (ret) {
3053 ATH5K_ERR(ah, "can't allocate descriptors\n");
3054 goto err;
3055 }
3056
3057
3058
3059
3060
3061
3062
3063 ret = ath5k_beaconq_setup(ah);
3064 if (ret < 0) {
3065 ATH5K_ERR(ah, "can't setup a beacon xmit queue\n");
3066 goto err_desc;
3067 }
3068 ah->bhalq = ret;
3069 ah->cabq = ath5k_txq_setup(ah, AR5K_TX_QUEUE_CAB, 0);
3070 if (IS_ERR(ah->cabq)) {
3071 ATH5K_ERR(ah, "can't setup cab queue\n");
3072 ret = PTR_ERR(ah->cabq);
3073 goto err_bhal;
3074 }
3075
3076
3077
3078 if (ah->ah_capabilities.cap_queues.q_tx_num >= 6) {
3079
3080
3081 txq = ath5k_txq_setup(ah, AR5K_TX_QUEUE_DATA, AR5K_WME_AC_VO);
3082 if (IS_ERR(txq)) {
3083 ATH5K_ERR(ah, "can't setup xmit queue\n");
3084 ret = PTR_ERR(txq);
3085 goto err_queues;
3086 }
3087 txq = ath5k_txq_setup(ah, AR5K_TX_QUEUE_DATA, AR5K_WME_AC_VI);
3088 if (IS_ERR(txq)) {
3089 ATH5K_ERR(ah, "can't setup xmit queue\n");
3090 ret = PTR_ERR(txq);
3091 goto err_queues;
3092 }
3093 txq = ath5k_txq_setup(ah, AR5K_TX_QUEUE_DATA, AR5K_WME_AC_BE);
3094 if (IS_ERR(txq)) {
3095 ATH5K_ERR(ah, "can't setup xmit queue\n");
3096 ret = PTR_ERR(txq);
3097 goto err_queues;
3098 }
3099 txq = ath5k_txq_setup(ah, AR5K_TX_QUEUE_DATA, AR5K_WME_AC_BK);
3100 if (IS_ERR(txq)) {
3101 ATH5K_ERR(ah, "can't setup xmit queue\n");
3102 ret = PTR_ERR(txq);
3103 goto err_queues;
3104 }
3105 hw->queues = 4;
3106 } else {
3107
3108 txq = ath5k_txq_setup(ah, AR5K_TX_QUEUE_DATA, AR5K_WME_AC_BE);
3109 if (IS_ERR(txq)) {
3110 ATH5K_ERR(ah, "can't setup xmit queue\n");
3111 ret = PTR_ERR(txq);
3112 goto err_queues;
3113 }
3114 hw->queues = 1;
3115 }
3116
3117 tasklet_setup(&ah->rxtq, ath5k_tasklet_rx);
3118 tasklet_setup(&ah->txtq, ath5k_tasklet_tx);
3119 tasklet_setup(&ah->beacontq, ath5k_tasklet_beacon);
3120 tasklet_setup(&ah->ani_tasklet, ath5k_tasklet_ani);
3121
3122 INIT_WORK(&ah->reset_work, ath5k_reset_work);
3123 INIT_WORK(&ah->calib_work, ath5k_calibrate_work);
3124 INIT_DELAYED_WORK(&ah->tx_complete_work, ath5k_tx_complete_poll_work);
3125
3126 ret = ath5k_hw_common(ah)->bus_ops->eeprom_read_mac(ah, mac);
3127 if (ret) {
3128 ATH5K_ERR(ah, "unable to read address from EEPROM\n");
3129 goto err_queues;
3130 }
3131
3132 SET_IEEE80211_PERM_ADDR(hw, mac);
3133
3134 ath5k_update_bssid_mask_and_opmode(ah, NULL);
3135
3136 regulatory->current_rd = ah->ah_capabilities.cap_eeprom.ee_regdomain;
3137 ret = ath_regd_init(regulatory, hw->wiphy, ath5k_reg_notifier);
3138 if (ret) {
3139 ATH5K_ERR(ah, "can't initialize regulatory system\n");
3140 goto err_queues;
3141 }
3142
3143 ret = ieee80211_register_hw(hw);
3144 if (ret) {
3145 ATH5K_ERR(ah, "can't register ieee80211 hw\n");
3146 goto err_queues;
3147 }
3148
3149 if (!ath_is_world_regd(regulatory))
3150 regulatory_hint(hw->wiphy, regulatory->alpha2);
3151
3152 ath5k_init_leds(ah);
3153
3154 ath5k_sysfs_register(ah);
3155
3156 return 0;
3157 err_queues:
3158 ath5k_txq_release(ah);
3159 err_bhal:
3160 ath5k_hw_release_tx_queue(ah, ah->bhalq);
3161 err_desc:
3162 ath5k_desc_free(ah);
3163 err:
3164 return ret;
3165 }
3166
3167 void
3168 ath5k_deinit_ah(struct ath5k_hw *ah)
3169 {
3170 struct ieee80211_hw *hw = ah->hw;
3171
3172
3173
3174
3175
3176
3177
3178
3179
3180
3181
3182
3183
3184
3185 ieee80211_unregister_hw(hw);
3186 ath5k_desc_free(ah);
3187 ath5k_txq_release(ah);
3188 ath5k_hw_release_tx_queue(ah, ah->bhalq);
3189 ath5k_unregister_leds(ah);
3190
3191 ath5k_sysfs_unregister(ah);
3192
3193
3194
3195
3196
3197 ath5k_hw_deinit(ah);
3198 free_irq(ah->irq, ah);
3199 }
3200
3201 bool
3202 ath5k_any_vif_assoc(struct ath5k_hw *ah)
3203 {
3204 struct ath5k_vif_iter_data iter_data;
3205 iter_data.hw_macaddr = NULL;
3206 iter_data.any_assoc = false;
3207 iter_data.need_set_hw_addr = false;
3208 iter_data.found_active = true;
3209
3210 ieee80211_iterate_active_interfaces_atomic(
3211 ah->hw, IEEE80211_IFACE_ITER_RESUME_ALL,
3212 ath5k_vif_iter, &iter_data);
3213 return iter_data.any_assoc;
3214 }
3215
3216 void
3217 ath5k_set_beacon_filter(struct ieee80211_hw *hw, bool enable)
3218 {
3219 struct ath5k_hw *ah = hw->priv;
3220 u32 rfilt;
3221 rfilt = ath5k_hw_get_rx_filter(ah);
3222 if (enable)
3223 rfilt |= AR5K_RX_FILTER_BEACON;
3224 else
3225 rfilt &= ~AR5K_RX_FILTER_BEACON;
3226 ath5k_hw_set_rx_filter(ah, rfilt);
3227 ah->filter_flags = rfilt;
3228 }
3229
3230 void _ath5k_printk(const struct ath5k_hw *ah, const char *level,
3231 const char *fmt, ...)
3232 {
3233 struct va_format vaf;
3234 va_list args;
3235
3236 va_start(args, fmt);
3237
3238 vaf.fmt = fmt;
3239 vaf.va = &args;
3240
3241 if (ah && ah->hw)
3242 printk("%s" pr_fmt("%s: %pV"),
3243 level, wiphy_name(ah->hw->wiphy), &vaf);
3244 else
3245 printk("%s" pr_fmt("%pV"), level, &vaf);
3246
3247 va_end(args);
3248 }