0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017 #include "rsi_mgmt.h"
0018 #include "rsi_common.h"
0019 #include "rsi_hal.h"
0020 #include "rsi_coex.h"
0021
0022
0023
0024
0025
0026
0027
0028
0029 static u8 rsi_determine_min_weight_queue(struct rsi_common *common)
0030 {
0031 struct wmm_qinfo *tx_qinfo = common->tx_qinfo;
0032 u32 q_len = 0;
0033 u8 ii = 0;
0034
0035 for (ii = 0; ii < NUM_EDCA_QUEUES; ii++) {
0036 q_len = skb_queue_len(&common->tx_queue[ii]);
0037 if ((tx_qinfo[ii].pkt_contended) && q_len) {
0038 common->min_weight = tx_qinfo[ii].weight;
0039 break;
0040 }
0041 }
0042 return ii;
0043 }
0044
0045
0046
0047
0048
0049
0050
0051
0052 static bool rsi_recalculate_weights(struct rsi_common *common)
0053 {
0054 struct wmm_qinfo *tx_qinfo = common->tx_qinfo;
0055 bool recontend_queue = false;
0056 u8 ii = 0;
0057 u32 q_len = 0;
0058
0059 for (ii = 0; ii < NUM_EDCA_QUEUES; ii++) {
0060 q_len = skb_queue_len(&common->tx_queue[ii]);
0061
0062 if (q_len) {
0063 if (tx_qinfo[ii].pkt_contended) {
0064 tx_qinfo[ii].weight =
0065 ((tx_qinfo[ii].weight > common->min_weight) ?
0066 tx_qinfo[ii].weight - common->min_weight : 0);
0067 } else {
0068 tx_qinfo[ii].pkt_contended = 1;
0069 tx_qinfo[ii].weight = tx_qinfo[ii].wme_params;
0070 recontend_queue = true;
0071 }
0072 } else {
0073 tx_qinfo[ii].weight = 0;
0074 tx_qinfo[ii].pkt_contended = 0;
0075 }
0076 }
0077
0078 return recontend_queue;
0079 }
0080
0081
0082
0083
0084
0085
0086
0087
0088
0089
0090
0091 static u32 rsi_get_num_pkts_dequeue(struct rsi_common *common, u8 q_num)
0092 {
0093 struct rsi_hw *adapter = common->priv;
0094 struct sk_buff *skb;
0095 u32 pkt_cnt = 0;
0096 s16 txop = common->tx_qinfo[q_num].txop * 32;
0097 __le16 r_txop;
0098 struct ieee80211_rate rate;
0099 struct ieee80211_hdr *wh;
0100 struct ieee80211_vif *vif;
0101
0102 rate.bitrate = RSI_RATE_MCS0 * 5 * 10;
0103 if (q_num == VI_Q)
0104 txop = ((txop << 5) / 80);
0105
0106 if (skb_queue_len(&common->tx_queue[q_num]))
0107 skb = skb_peek(&common->tx_queue[q_num]);
0108 else
0109 return 0;
0110
0111 do {
0112 wh = (struct ieee80211_hdr *)skb->data;
0113 vif = rsi_get_vif(adapter, wh->addr2);
0114 r_txop = ieee80211_generic_frame_duration(adapter->hw,
0115 vif,
0116 common->band,
0117 skb->len, &rate);
0118 txop -= le16_to_cpu(r_txop);
0119 pkt_cnt += 1;
0120
0121 if (skb_queue_len(&common->tx_queue[q_num]) - pkt_cnt)
0122 skb = skb->next;
0123 else
0124 break;
0125
0126 } while (txop > 0);
0127
0128 return pkt_cnt;
0129 }
0130
0131
0132
0133
0134
0135
0136
0137
0138 static u8 rsi_core_determine_hal_queue(struct rsi_common *common)
0139 {
0140 bool recontend_queue = false;
0141 u32 q_len = 0;
0142 u8 q_num = INVALID_QUEUE;
0143 u8 ii = 0;
0144
0145 if (skb_queue_len(&common->tx_queue[MGMT_BEACON_Q])) {
0146 q_num = MGMT_BEACON_Q;
0147 return q_num;
0148 }
0149 if (skb_queue_len(&common->tx_queue[MGMT_SOFT_Q])) {
0150 if (!common->mgmt_q_block)
0151 q_num = MGMT_SOFT_Q;
0152 return q_num;
0153 }
0154
0155 if (common->hw_data_qs_blocked)
0156 return q_num;
0157
0158 if (common->pkt_cnt != 0) {
0159 --common->pkt_cnt;
0160 return common->selected_qnum;
0161 }
0162
0163 get_queue_num:
0164 recontend_queue = false;
0165
0166 q_num = rsi_determine_min_weight_queue(common);
0167
0168 ii = q_num;
0169
0170
0171 for (; ii < NUM_EDCA_QUEUES; ii++) {
0172 q_len = skb_queue_len(&common->tx_queue[ii]);
0173 if (((common->tx_qinfo[ii].pkt_contended) &&
0174 (common->tx_qinfo[ii].weight < common->min_weight)) &&
0175 q_len) {
0176 common->min_weight = common->tx_qinfo[ii].weight;
0177 q_num = ii;
0178 }
0179 }
0180
0181 if (q_num < NUM_EDCA_QUEUES)
0182 common->tx_qinfo[q_num].pkt_contended = 0;
0183
0184
0185 recontend_queue = rsi_recalculate_weights(common);
0186
0187 q_len = skb_queue_len(&common->tx_queue[q_num]);
0188 if (!q_len) {
0189
0190
0191
0192
0193 if (recontend_queue)
0194 goto get_queue_num;
0195
0196 return INVALID_QUEUE;
0197 }
0198
0199 common->selected_qnum = q_num;
0200 q_len = skb_queue_len(&common->tx_queue[q_num]);
0201
0202 if (q_num == VO_Q || q_num == VI_Q) {
0203 common->pkt_cnt = rsi_get_num_pkts_dequeue(common, q_num);
0204 common->pkt_cnt -= 1;
0205 }
0206
0207 return q_num;
0208 }
0209
0210
0211
0212
0213
0214
0215
0216
0217
0218 static void rsi_core_queue_pkt(struct rsi_common *common,
0219 struct sk_buff *skb)
0220 {
0221 u8 q_num = skb->priority;
0222 if (q_num >= NUM_SOFT_QUEUES) {
0223 rsi_dbg(ERR_ZONE, "%s: Invalid Queue Number: q_num = %d\n",
0224 __func__, q_num);
0225 dev_kfree_skb(skb);
0226 return;
0227 }
0228
0229 skb_queue_tail(&common->tx_queue[q_num], skb);
0230 }
0231
0232
0233
0234
0235
0236
0237
0238
0239
0240 static struct sk_buff *rsi_core_dequeue_pkt(struct rsi_common *common,
0241 u8 q_num)
0242 {
0243 if (q_num >= NUM_SOFT_QUEUES) {
0244 rsi_dbg(ERR_ZONE, "%s: Invalid Queue Number: q_num = %d\n",
0245 __func__, q_num);
0246 return NULL;
0247 }
0248
0249 return skb_dequeue(&common->tx_queue[q_num]);
0250 }
0251
0252
0253
0254
0255
0256
0257
0258
0259
0260
0261 void rsi_core_qos_processor(struct rsi_common *common)
0262 {
0263 struct rsi_hw *adapter = common->priv;
0264 struct sk_buff *skb;
0265 unsigned long tstamp_1, tstamp_2;
0266 u8 q_num;
0267 int status;
0268
0269 tstamp_1 = jiffies;
0270 while (1) {
0271 q_num = rsi_core_determine_hal_queue(common);
0272 rsi_dbg(DATA_TX_ZONE,
0273 "%s: Queue number = %d\n", __func__, q_num);
0274
0275 if (q_num == INVALID_QUEUE) {
0276 rsi_dbg(DATA_TX_ZONE, "%s: No More Pkt\n", __func__);
0277 break;
0278 }
0279 if (common->hibernate_resume)
0280 break;
0281
0282 mutex_lock(&common->tx_lock);
0283
0284 status = adapter->check_hw_queue_status(adapter, q_num);
0285 if ((status <= 0)) {
0286 mutex_unlock(&common->tx_lock);
0287 break;
0288 }
0289
0290 if ((q_num < MGMT_SOFT_Q) &&
0291 ((skb_queue_len(&common->tx_queue[q_num])) <=
0292 MIN_DATA_QUEUE_WATER_MARK)) {
0293 if (ieee80211_queue_stopped(adapter->hw, WME_AC(q_num)))
0294 ieee80211_wake_queue(adapter->hw,
0295 WME_AC(q_num));
0296 }
0297
0298 skb = rsi_core_dequeue_pkt(common, q_num);
0299 if (skb == NULL) {
0300 rsi_dbg(ERR_ZONE, "skb null\n");
0301 mutex_unlock(&common->tx_lock);
0302 break;
0303 }
0304 if (q_num == MGMT_BEACON_Q) {
0305 status = rsi_send_pkt_to_bus(common, skb);
0306 dev_kfree_skb(skb);
0307 } else {
0308 #ifdef CONFIG_RSI_COEX
0309 if (common->coex_mode > 1) {
0310 status = rsi_coex_send_pkt(common, skb,
0311 RSI_WLAN_Q);
0312 } else {
0313 #endif
0314 if (q_num == MGMT_SOFT_Q)
0315 status = rsi_send_mgmt_pkt(common, skb);
0316 else
0317 status = rsi_send_data_pkt(common, skb);
0318 #ifdef CONFIG_RSI_COEX
0319 }
0320 #endif
0321 }
0322
0323 if (status) {
0324 mutex_unlock(&common->tx_lock);
0325 break;
0326 }
0327
0328 common->tx_stats.total_tx_pkt_send[q_num]++;
0329
0330 tstamp_2 = jiffies;
0331 mutex_unlock(&common->tx_lock);
0332
0333 if (time_after(tstamp_2, tstamp_1 + (300 * HZ) / 1000))
0334 schedule();
0335 }
0336 }
0337
0338 struct rsi_sta *rsi_find_sta(struct rsi_common *common, u8 *mac_addr)
0339 {
0340 int i;
0341
0342 for (i = 0; i < common->max_stations; i++) {
0343 if (!common->stations[i].sta)
0344 continue;
0345 if (!(memcmp(common->stations[i].sta->addr,
0346 mac_addr, ETH_ALEN)))
0347 return &common->stations[i];
0348 }
0349 return NULL;
0350 }
0351
0352 struct ieee80211_vif *rsi_get_vif(struct rsi_hw *adapter, u8 *mac)
0353 {
0354 struct ieee80211_vif *vif;
0355 int i;
0356
0357 for (i = 0; i < RSI_MAX_VIFS; i++) {
0358 vif = adapter->vifs[i];
0359 if (!vif)
0360 continue;
0361 if (!memcmp(vif->addr, mac, ETH_ALEN))
0362 return vif;
0363 }
0364 return NULL;
0365 }
0366
0367
0368
0369
0370
0371
0372
0373
0374 void rsi_core_xmit(struct rsi_common *common, struct sk_buff *skb)
0375 {
0376 struct rsi_hw *adapter = common->priv;
0377 struct ieee80211_tx_info *info;
0378 struct skb_info *tx_params;
0379 struct ieee80211_hdr *wh = NULL;
0380 struct ieee80211_vif *vif;
0381 u8 q_num, tid = 0;
0382 struct rsi_sta *rsta = NULL;
0383
0384 if ((!skb) || (!skb->len)) {
0385 rsi_dbg(ERR_ZONE, "%s: Null skb/zero Length packet\n",
0386 __func__);
0387 goto xmit_fail;
0388 }
0389 if (common->fsm_state != FSM_MAC_INIT_DONE) {
0390 rsi_dbg(ERR_ZONE, "%s: FSM state not open\n", __func__);
0391 goto xmit_fail;
0392 }
0393 if (common->wow_flags & RSI_WOW_ENABLED) {
0394 rsi_dbg(ERR_ZONE,
0395 "%s: Blocking Tx_packets when WOWLAN is enabled\n",
0396 __func__);
0397 goto xmit_fail;
0398 }
0399
0400 info = IEEE80211_SKB_CB(skb);
0401 tx_params = (struct skb_info *)info->driver_data;
0402
0403 tx_params->have_key = !!info->control.hw_key;
0404 wh = (struct ieee80211_hdr *)&skb->data[0];
0405 tx_params->sta_id = 0;
0406
0407 vif = rsi_get_vif(adapter, wh->addr2);
0408 if (!vif)
0409 goto xmit_fail;
0410 tx_params->vif = vif;
0411 tx_params->vap_id = ((struct vif_priv *)vif->drv_priv)->vap_id;
0412 if ((ieee80211_is_mgmt(wh->frame_control)) ||
0413 (ieee80211_is_ctl(wh->frame_control)) ||
0414 (ieee80211_is_qos_nullfunc(wh->frame_control))) {
0415 if (ieee80211_is_assoc_req(wh->frame_control) ||
0416 ieee80211_is_reassoc_req(wh->frame_control)) {
0417 struct ieee80211_bss_conf *bss = &vif->bss_conf;
0418
0419 common->eapol4_confirm = false;
0420 rsi_hal_send_sta_notify_frame(common,
0421 RSI_IFTYPE_STATION,
0422 STA_CONNECTED, bss->bssid,
0423 bss->qos, vif->cfg.aid,
0424 0,
0425 vif);
0426 }
0427
0428 q_num = MGMT_SOFT_Q;
0429 skb->priority = q_num;
0430
0431 if (rsi_prepare_mgmt_desc(common, skb)) {
0432 rsi_dbg(ERR_ZONE, "Failed to prepare desc\n");
0433 goto xmit_fail;
0434 }
0435 } else {
0436 if (ieee80211_is_data_qos(wh->frame_control)) {
0437 u8 *qos = ieee80211_get_qos_ctl(wh);
0438
0439 tid = *qos & IEEE80211_QOS_CTL_TID_MASK;
0440 skb->priority = TID_TO_WME_AC(tid);
0441 } else {
0442 tid = IEEE80211_NONQOS_TID;
0443 skb->priority = BE_Q;
0444 }
0445
0446 q_num = skb->priority;
0447 tx_params->tid = tid;
0448
0449 if (((vif->type == NL80211_IFTYPE_AP) ||
0450 (vif->type == NL80211_IFTYPE_P2P_GO)) &&
0451 (!is_broadcast_ether_addr(wh->addr1)) &&
0452 (!is_multicast_ether_addr(wh->addr1))) {
0453 rsta = rsi_find_sta(common, wh->addr1);
0454 if (!rsta)
0455 goto xmit_fail;
0456 tx_params->sta_id = rsta->sta_id;
0457 } else {
0458 tx_params->sta_id = 0;
0459 }
0460
0461 if (rsta) {
0462
0463 if (!rsta->start_tx_aggr[tid]) {
0464 rsta->start_tx_aggr[tid] = true;
0465 ieee80211_start_tx_ba_session(rsta->sta,
0466 tid, 0);
0467 }
0468 }
0469 if (skb->protocol == cpu_to_be16(ETH_P_PAE)) {
0470 q_num = MGMT_SOFT_Q;
0471 skb->priority = q_num;
0472 }
0473 if (rsi_prepare_data_desc(common, skb)) {
0474 rsi_dbg(ERR_ZONE, "Failed to prepare data desc\n");
0475 goto xmit_fail;
0476 }
0477 }
0478
0479 if ((q_num < MGMT_SOFT_Q) &&
0480 ((skb_queue_len(&common->tx_queue[q_num]) + 1) >=
0481 DATA_QUEUE_WATER_MARK)) {
0482 rsi_dbg(ERR_ZONE, "%s: sw queue full\n", __func__);
0483 if (!ieee80211_queue_stopped(adapter->hw, WME_AC(q_num)))
0484 ieee80211_stop_queue(adapter->hw, WME_AC(q_num));
0485 rsi_set_event(&common->tx_thread.event);
0486 goto xmit_fail;
0487 }
0488
0489 rsi_core_queue_pkt(common, skb);
0490 rsi_dbg(DATA_TX_ZONE, "%s: ===> Scheduling TX thread <===\n", __func__);
0491 rsi_set_event(&common->tx_thread.event);
0492
0493 return;
0494
0495 xmit_fail:
0496 rsi_dbg(ERR_ZONE, "%s: Failed to queue packet\n", __func__);
0497
0498 ieee80211_free_txskb(common->priv->hw, skb);
0499 }