0001
0002
0003
0004
0005
0006
0007
0008 #include <linux/slab.h>
0009 #include <linux/etherdevice.h>
0010 #include <asm/unaligned.h>
0011 #include "wme.h"
0012 #include "mesh.h"
0013
0014 #define TEST_FRAME_LEN 8192
0015 #define MAX_METRIC 0xffffffff
0016 #define ARITH_SHIFT 8
0017 #define LINK_FAIL_THRESH 95
0018
0019 #define MAX_PREQ_QUEUE_LEN 64
0020
0021 static void mesh_queue_preq(struct mesh_path *, u8);
0022
0023 static inline u32 u32_field_get(const u8 *preq_elem, int offset, bool ae)
0024 {
0025 if (ae)
0026 offset += 6;
0027 return get_unaligned_le32(preq_elem + offset);
0028 }
0029
0030 static inline u16 u16_field_get(const u8 *preq_elem, int offset, bool ae)
0031 {
0032 if (ae)
0033 offset += 6;
0034 return get_unaligned_le16(preq_elem + offset);
0035 }
0036
0037
0038 #define AE_F (1<<6)
0039 #define AE_F_SET(x) (*x & AE_F)
0040 #define PREQ_IE_FLAGS(x) (*(x))
0041 #define PREQ_IE_HOPCOUNT(x) (*(x + 1))
0042 #define PREQ_IE_TTL(x) (*(x + 2))
0043 #define PREQ_IE_PREQ_ID(x) u32_field_get(x, 3, 0)
0044 #define PREQ_IE_ORIG_ADDR(x) (x + 7)
0045 #define PREQ_IE_ORIG_SN(x) u32_field_get(x, 13, 0)
0046 #define PREQ_IE_LIFETIME(x) u32_field_get(x, 17, AE_F_SET(x))
0047 #define PREQ_IE_METRIC(x) u32_field_get(x, 21, AE_F_SET(x))
0048 #define PREQ_IE_TARGET_F(x) (*(AE_F_SET(x) ? x + 32 : x + 26))
0049 #define PREQ_IE_TARGET_ADDR(x) (AE_F_SET(x) ? x + 33 : x + 27)
0050 #define PREQ_IE_TARGET_SN(x) u32_field_get(x, 33, AE_F_SET(x))
0051
0052
0053 #define PREP_IE_FLAGS(x) PREQ_IE_FLAGS(x)
0054 #define PREP_IE_HOPCOUNT(x) PREQ_IE_HOPCOUNT(x)
0055 #define PREP_IE_TTL(x) PREQ_IE_TTL(x)
0056 #define PREP_IE_ORIG_ADDR(x) (AE_F_SET(x) ? x + 27 : x + 21)
0057 #define PREP_IE_ORIG_SN(x) u32_field_get(x, 27, AE_F_SET(x))
0058 #define PREP_IE_LIFETIME(x) u32_field_get(x, 13, AE_F_SET(x))
0059 #define PREP_IE_METRIC(x) u32_field_get(x, 17, AE_F_SET(x))
0060 #define PREP_IE_TARGET_ADDR(x) (x + 3)
0061 #define PREP_IE_TARGET_SN(x) u32_field_get(x, 9, 0)
0062
0063 #define PERR_IE_TTL(x) (*(x))
0064 #define PERR_IE_TARGET_FLAGS(x) (*(x + 2))
0065 #define PERR_IE_TARGET_ADDR(x) (x + 3)
0066 #define PERR_IE_TARGET_SN(x) u32_field_get(x, 9, 0)
0067 #define PERR_IE_TARGET_RCODE(x) u16_field_get(x, 13, 0)
0068
0069 #define MSEC_TO_TU(x) (x*1000/1024)
0070 #define SN_GT(x, y) ((s32)(y - x) < 0)
0071 #define SN_LT(x, y) ((s32)(x - y) < 0)
0072 #define MAX_SANE_SN_DELTA 32
0073
0074 static inline u32 SN_DELTA(u32 x, u32 y)
0075 {
0076 return x >= y ? x - y : y - x;
0077 }
0078
0079 #define net_traversal_jiffies(s) \
0080 msecs_to_jiffies(s->u.mesh.mshcfg.dot11MeshHWMPnetDiameterTraversalTime)
0081 #define default_lifetime(s) \
0082 MSEC_TO_TU(s->u.mesh.mshcfg.dot11MeshHWMPactivePathTimeout)
0083 #define min_preq_int_jiff(s) \
0084 (msecs_to_jiffies(s->u.mesh.mshcfg.dot11MeshHWMPpreqMinInterval))
0085 #define max_preq_retries(s) (s->u.mesh.mshcfg.dot11MeshHWMPmaxPREQretries)
0086 #define disc_timeout_jiff(s) \
0087 msecs_to_jiffies(sdata->u.mesh.mshcfg.min_discovery_timeout)
0088 #define root_path_confirmation_jiffies(s) \
0089 msecs_to_jiffies(sdata->u.mesh.mshcfg.dot11MeshHWMPconfirmationInterval)
0090
0091 enum mpath_frame_type {
0092 MPATH_PREQ = 0,
0093 MPATH_PREP,
0094 MPATH_PERR,
0095 MPATH_RANN
0096 };
0097
0098 static const u8 broadcast_addr[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
0099
0100 static int mesh_path_sel_frame_tx(enum mpath_frame_type action, u8 flags,
0101 const u8 *orig_addr, u32 orig_sn,
0102 u8 target_flags, const u8 *target,
0103 u32 target_sn, const u8 *da,
0104 u8 hop_count, u8 ttl,
0105 u32 lifetime, u32 metric, u32 preq_id,
0106 struct ieee80211_sub_if_data *sdata)
0107 {
0108 struct ieee80211_local *local = sdata->local;
0109 struct sk_buff *skb;
0110 struct ieee80211_mgmt *mgmt;
0111 u8 *pos, ie_len;
0112 int hdr_len = offsetofend(struct ieee80211_mgmt,
0113 u.action.u.mesh_action);
0114
0115 skb = dev_alloc_skb(local->tx_headroom +
0116 hdr_len +
0117 2 + 37);
0118 if (!skb)
0119 return -1;
0120 skb_reserve(skb, local->tx_headroom);
0121 mgmt = skb_put_zero(skb, hdr_len);
0122 mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
0123 IEEE80211_STYPE_ACTION);
0124
0125 memcpy(mgmt->da, da, ETH_ALEN);
0126 memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
0127
0128 memcpy(mgmt->bssid, sdata->vif.addr, ETH_ALEN);
0129 mgmt->u.action.category = WLAN_CATEGORY_MESH_ACTION;
0130 mgmt->u.action.u.mesh_action.action_code =
0131 WLAN_MESH_ACTION_HWMP_PATH_SELECTION;
0132
0133 switch (action) {
0134 case MPATH_PREQ:
0135 mhwmp_dbg(sdata, "sending PREQ to %pM\n", target);
0136 ie_len = 37;
0137 pos = skb_put(skb, 2 + ie_len);
0138 *pos++ = WLAN_EID_PREQ;
0139 break;
0140 case MPATH_PREP:
0141 mhwmp_dbg(sdata, "sending PREP to %pM\n", orig_addr);
0142 ie_len = 31;
0143 pos = skb_put(skb, 2 + ie_len);
0144 *pos++ = WLAN_EID_PREP;
0145 break;
0146 case MPATH_RANN:
0147 mhwmp_dbg(sdata, "sending RANN from %pM\n", orig_addr);
0148 ie_len = sizeof(struct ieee80211_rann_ie);
0149 pos = skb_put(skb, 2 + ie_len);
0150 *pos++ = WLAN_EID_RANN;
0151 break;
0152 default:
0153 kfree_skb(skb);
0154 return -ENOTSUPP;
0155 }
0156 *pos++ = ie_len;
0157 *pos++ = flags;
0158 *pos++ = hop_count;
0159 *pos++ = ttl;
0160 if (action == MPATH_PREP) {
0161 memcpy(pos, target, ETH_ALEN);
0162 pos += ETH_ALEN;
0163 put_unaligned_le32(target_sn, pos);
0164 pos += 4;
0165 } else {
0166 if (action == MPATH_PREQ) {
0167 put_unaligned_le32(preq_id, pos);
0168 pos += 4;
0169 }
0170 memcpy(pos, orig_addr, ETH_ALEN);
0171 pos += ETH_ALEN;
0172 put_unaligned_le32(orig_sn, pos);
0173 pos += 4;
0174 }
0175 put_unaligned_le32(lifetime, pos);
0176 pos += 4;
0177 put_unaligned_le32(metric, pos);
0178 pos += 4;
0179 if (action == MPATH_PREQ) {
0180 *pos++ = 1;
0181 *pos++ = target_flags;
0182 memcpy(pos, target, ETH_ALEN);
0183 pos += ETH_ALEN;
0184 put_unaligned_le32(target_sn, pos);
0185 pos += 4;
0186 } else if (action == MPATH_PREP) {
0187 memcpy(pos, orig_addr, ETH_ALEN);
0188 pos += ETH_ALEN;
0189 put_unaligned_le32(orig_sn, pos);
0190 pos += 4;
0191 }
0192
0193 ieee80211_tx_skb(sdata, skb);
0194 return 0;
0195 }
0196
0197
0198
0199
0200 static void prepare_frame_for_deferred_tx(struct ieee80211_sub_if_data *sdata,
0201 struct sk_buff *skb)
0202 {
0203 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
0204 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
0205
0206 skb_reset_mac_header(skb);
0207 skb_reset_network_header(skb);
0208 skb_reset_transport_header(skb);
0209
0210
0211 skb_set_queue_mapping(skb, IEEE80211_AC_VO);
0212 skb->priority = 7;
0213
0214 info->control.vif = &sdata->vif;
0215 info->control.flags |= IEEE80211_TX_INTCFL_NEED_TXPROCESSING;
0216 ieee80211_set_qos_hdr(sdata, skb);
0217 ieee80211_mps_set_frame_flags(sdata, NULL, hdr);
0218 }
0219
0220
0221
0222
0223
0224
0225
0226
0227
0228
0229
0230
0231
0232
0233
0234 int mesh_path_error_tx(struct ieee80211_sub_if_data *sdata,
0235 u8 ttl, const u8 *target, u32 target_sn,
0236 u16 target_rcode, const u8 *ra)
0237 {
0238 struct ieee80211_local *local = sdata->local;
0239 struct sk_buff *skb;
0240 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
0241 struct ieee80211_mgmt *mgmt;
0242 u8 *pos, ie_len;
0243 int hdr_len = offsetofend(struct ieee80211_mgmt,
0244 u.action.u.mesh_action);
0245
0246 if (time_before(jiffies, ifmsh->next_perr))
0247 return -EAGAIN;
0248
0249 skb = dev_alloc_skb(local->tx_headroom +
0250 IEEE80211_ENCRYPT_HEADROOM +
0251 IEEE80211_ENCRYPT_TAILROOM +
0252 hdr_len +
0253 2 + 15 );
0254 if (!skb)
0255 return -1;
0256 skb_reserve(skb, local->tx_headroom + IEEE80211_ENCRYPT_HEADROOM);
0257 mgmt = skb_put_zero(skb, hdr_len);
0258 mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
0259 IEEE80211_STYPE_ACTION);
0260
0261 memcpy(mgmt->da, ra, ETH_ALEN);
0262 memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
0263
0264 memcpy(mgmt->bssid, sdata->vif.addr, ETH_ALEN);
0265 mgmt->u.action.category = WLAN_CATEGORY_MESH_ACTION;
0266 mgmt->u.action.u.mesh_action.action_code =
0267 WLAN_MESH_ACTION_HWMP_PATH_SELECTION;
0268 ie_len = 15;
0269 pos = skb_put(skb, 2 + ie_len);
0270 *pos++ = WLAN_EID_PERR;
0271 *pos++ = ie_len;
0272
0273 *pos++ = ttl;
0274
0275 *pos++ = 1;
0276
0277
0278
0279 *pos = 0;
0280 pos++;
0281 memcpy(pos, target, ETH_ALEN);
0282 pos += ETH_ALEN;
0283 put_unaligned_le32(target_sn, pos);
0284 pos += 4;
0285 put_unaligned_le16(target_rcode, pos);
0286
0287
0288 prepare_frame_for_deferred_tx(sdata, skb);
0289 ifmsh->next_perr = TU_TO_EXP_TIME(
0290 ifmsh->mshcfg.dot11MeshHWMPperrMinInterval);
0291 ieee80211_add_pending_skb(local, skb);
0292 return 0;
0293 }
0294
0295 void ieee80211s_update_metric(struct ieee80211_local *local,
0296 struct sta_info *sta,
0297 struct ieee80211_tx_status *st)
0298 {
0299 struct ieee80211_tx_info *txinfo = st->info;
0300 int failed;
0301 struct rate_info rinfo;
0302
0303 failed = !(txinfo->flags & IEEE80211_TX_STAT_ACK);
0304
0305
0306
0307
0308 ewma_mesh_fail_avg_add(&sta->mesh->fail_avg, failed * 100);
0309 if (ewma_mesh_fail_avg_read(&sta->mesh->fail_avg) >
0310 LINK_FAIL_THRESH)
0311 mesh_plink_broken(sta);
0312
0313
0314 if (st->n_rates)
0315 rinfo = sta->deflink.tx_stats.last_rate_info;
0316 else
0317 sta_set_rate_info_tx(sta, &sta->deflink.tx_stats.last_rate, &rinfo);
0318
0319 ewma_mesh_tx_rate_avg_add(&sta->mesh->tx_rate_avg,
0320 cfg80211_calculate_bitrate(&rinfo));
0321 }
0322
0323 u32 airtime_link_metric_get(struct ieee80211_local *local,
0324 struct sta_info *sta)
0325 {
0326
0327 int device_constant = 1 << ARITH_SHIFT;
0328 int test_frame_len = TEST_FRAME_LEN << ARITH_SHIFT;
0329 int s_unit = 1 << ARITH_SHIFT;
0330 int rate, err;
0331 u32 tx_time, estimated_retx;
0332 u64 result;
0333 unsigned long fail_avg =
0334 ewma_mesh_fail_avg_read(&sta->mesh->fail_avg);
0335
0336 if (sta->mesh->plink_state != NL80211_PLINK_ESTAB)
0337 return MAX_METRIC;
0338
0339
0340
0341
0342
0343
0344 rate = DIV_ROUND_UP(sta_get_expected_throughput(sta), 100);
0345
0346 if (rate) {
0347 err = 0;
0348 } else {
0349 if (fail_avg > LINK_FAIL_THRESH)
0350 return MAX_METRIC;
0351
0352 rate = ewma_mesh_tx_rate_avg_read(&sta->mesh->tx_rate_avg);
0353 if (WARN_ON(!rate))
0354 return MAX_METRIC;
0355
0356 err = (fail_avg << ARITH_SHIFT) / 100;
0357 }
0358
0359
0360
0361
0362 tx_time = (device_constant + 10 * test_frame_len / rate);
0363 estimated_retx = ((1 << (2 * ARITH_SHIFT)) / (s_unit - err));
0364 result = ((u64)tx_time * estimated_retx) >> (2 * ARITH_SHIFT);
0365 return (u32)result;
0366 }
0367
0368
0369
0370
0371
0372
0373
0374
0375
0376
0377
0378
0379
0380
0381
0382
0383
0384
0385 static u32 hwmp_route_info_get(struct ieee80211_sub_if_data *sdata,
0386 struct ieee80211_mgmt *mgmt,
0387 const u8 *hwmp_ie, enum mpath_frame_type action)
0388 {
0389 struct ieee80211_local *local = sdata->local;
0390 struct mesh_path *mpath;
0391 struct sta_info *sta;
0392 bool fresh_info;
0393 const u8 *orig_addr, *ta;
0394 u32 orig_sn, orig_metric;
0395 unsigned long orig_lifetime, exp_time;
0396 u32 last_hop_metric, new_metric;
0397 bool process = true;
0398 u8 hopcount;
0399
0400 rcu_read_lock();
0401 sta = sta_info_get(sdata, mgmt->sa);
0402 if (!sta) {
0403 rcu_read_unlock();
0404 return 0;
0405 }
0406
0407 last_hop_metric = airtime_link_metric_get(local, sta);
0408
0409 fresh_info = true;
0410
0411 switch (action) {
0412 case MPATH_PREQ:
0413 orig_addr = PREQ_IE_ORIG_ADDR(hwmp_ie);
0414 orig_sn = PREQ_IE_ORIG_SN(hwmp_ie);
0415 orig_lifetime = PREQ_IE_LIFETIME(hwmp_ie);
0416 orig_metric = PREQ_IE_METRIC(hwmp_ie);
0417 hopcount = PREQ_IE_HOPCOUNT(hwmp_ie) + 1;
0418 break;
0419 case MPATH_PREP:
0420
0421
0422
0423
0424
0425 orig_addr = PREP_IE_TARGET_ADDR(hwmp_ie);
0426 orig_sn = PREP_IE_TARGET_SN(hwmp_ie);
0427 orig_lifetime = PREP_IE_LIFETIME(hwmp_ie);
0428 orig_metric = PREP_IE_METRIC(hwmp_ie);
0429 hopcount = PREP_IE_HOPCOUNT(hwmp_ie) + 1;
0430 break;
0431 default:
0432 rcu_read_unlock();
0433 return 0;
0434 }
0435 new_metric = orig_metric + last_hop_metric;
0436 if (new_metric < orig_metric)
0437 new_metric = MAX_METRIC;
0438 exp_time = TU_TO_EXP_TIME(orig_lifetime);
0439
0440 if (ether_addr_equal(orig_addr, sdata->vif.addr)) {
0441
0442
0443
0444 process = false;
0445 fresh_info = false;
0446 } else {
0447 mpath = mesh_path_lookup(sdata, orig_addr);
0448 if (mpath) {
0449 spin_lock_bh(&mpath->state_lock);
0450 if (mpath->flags & MESH_PATH_FIXED)
0451 fresh_info = false;
0452 else if ((mpath->flags & MESH_PATH_ACTIVE) &&
0453 (mpath->flags & MESH_PATH_SN_VALID)) {
0454 if (SN_GT(mpath->sn, orig_sn) ||
0455 (mpath->sn == orig_sn &&
0456 (rcu_access_pointer(mpath->next_hop) !=
0457 sta ?
0458 mult_frac(new_metric, 10, 9) :
0459 new_metric) >= mpath->metric)) {
0460 process = false;
0461 fresh_info = false;
0462 }
0463 } else if (!(mpath->flags & MESH_PATH_ACTIVE)) {
0464 bool have_sn, newer_sn, bounced;
0465
0466 have_sn = mpath->flags & MESH_PATH_SN_VALID;
0467 newer_sn = have_sn && SN_GT(orig_sn, mpath->sn);
0468 bounced = have_sn &&
0469 (SN_DELTA(orig_sn, mpath->sn) >
0470 MAX_SANE_SN_DELTA);
0471
0472 if (!have_sn || newer_sn) {
0473
0474 ;
0475 } else if (bounced) {
0476
0477
0478 ;
0479 } else {
0480 process = false;
0481 fresh_info = false;
0482 }
0483 }
0484 } else {
0485 mpath = mesh_path_add(sdata, orig_addr);
0486 if (IS_ERR(mpath)) {
0487 rcu_read_unlock();
0488 return 0;
0489 }
0490 spin_lock_bh(&mpath->state_lock);
0491 }
0492
0493 if (fresh_info) {
0494 if (rcu_access_pointer(mpath->next_hop) != sta)
0495 mpath->path_change_count++;
0496 mesh_path_assign_nexthop(mpath, sta);
0497 mpath->flags |= MESH_PATH_SN_VALID;
0498 mpath->metric = new_metric;
0499 mpath->sn = orig_sn;
0500 mpath->exp_time = time_after(mpath->exp_time, exp_time)
0501 ? mpath->exp_time : exp_time;
0502 mpath->hop_count = hopcount;
0503 mesh_path_activate(mpath);
0504 spin_unlock_bh(&mpath->state_lock);
0505 ewma_mesh_fail_avg_init(&sta->mesh->fail_avg);
0506
0507 ewma_mesh_fail_avg_add(&sta->mesh->fail_avg, 1);
0508 mesh_path_tx_pending(mpath);
0509
0510
0511
0512 } else
0513 spin_unlock_bh(&mpath->state_lock);
0514 }
0515
0516
0517 ta = mgmt->sa;
0518 if (ether_addr_equal(orig_addr, ta))
0519 fresh_info = false;
0520 else {
0521 fresh_info = true;
0522
0523 mpath = mesh_path_lookup(sdata, ta);
0524 if (mpath) {
0525 spin_lock_bh(&mpath->state_lock);
0526 if ((mpath->flags & MESH_PATH_FIXED) ||
0527 ((mpath->flags & MESH_PATH_ACTIVE) &&
0528 ((rcu_access_pointer(mpath->next_hop) != sta ?
0529 mult_frac(last_hop_metric, 10, 9) :
0530 last_hop_metric) > mpath->metric)))
0531 fresh_info = false;
0532 } else {
0533 mpath = mesh_path_add(sdata, ta);
0534 if (IS_ERR(mpath)) {
0535 rcu_read_unlock();
0536 return 0;
0537 }
0538 spin_lock_bh(&mpath->state_lock);
0539 }
0540
0541 if (fresh_info) {
0542 if (rcu_access_pointer(mpath->next_hop) != sta)
0543 mpath->path_change_count++;
0544 mesh_path_assign_nexthop(mpath, sta);
0545 mpath->metric = last_hop_metric;
0546 mpath->exp_time = time_after(mpath->exp_time, exp_time)
0547 ? mpath->exp_time : exp_time;
0548 mpath->hop_count = 1;
0549 mesh_path_activate(mpath);
0550 spin_unlock_bh(&mpath->state_lock);
0551 ewma_mesh_fail_avg_init(&sta->mesh->fail_avg);
0552
0553 ewma_mesh_fail_avg_add(&sta->mesh->fail_avg, 1);
0554 mesh_path_tx_pending(mpath);
0555 } else
0556 spin_unlock_bh(&mpath->state_lock);
0557 }
0558
0559 rcu_read_unlock();
0560
0561 return process ? new_metric : 0;
0562 }
0563
0564 static void hwmp_preq_frame_process(struct ieee80211_sub_if_data *sdata,
0565 struct ieee80211_mgmt *mgmt,
0566 const u8 *preq_elem, u32 orig_metric)
0567 {
0568 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
0569 struct mesh_path *mpath = NULL;
0570 const u8 *target_addr, *orig_addr;
0571 const u8 *da;
0572 u8 target_flags, ttl, flags;
0573 u32 orig_sn, target_sn, lifetime, target_metric = 0;
0574 bool reply = false;
0575 bool forward = true;
0576 bool root_is_gate;
0577
0578
0579 target_addr = PREQ_IE_TARGET_ADDR(preq_elem);
0580 orig_addr = PREQ_IE_ORIG_ADDR(preq_elem);
0581 target_sn = PREQ_IE_TARGET_SN(preq_elem);
0582 orig_sn = PREQ_IE_ORIG_SN(preq_elem);
0583 target_flags = PREQ_IE_TARGET_F(preq_elem);
0584
0585 flags = PREQ_IE_FLAGS(preq_elem);
0586 root_is_gate = !!(flags & RANN_FLAG_IS_GATE);
0587
0588 mhwmp_dbg(sdata, "received PREQ from %pM\n", orig_addr);
0589
0590 if (ether_addr_equal(target_addr, sdata->vif.addr)) {
0591 mhwmp_dbg(sdata, "PREQ is for us\n");
0592 forward = false;
0593 reply = true;
0594 target_metric = 0;
0595
0596 if (SN_GT(target_sn, ifmsh->sn))
0597 ifmsh->sn = target_sn;
0598
0599 if (time_after(jiffies, ifmsh->last_sn_update +
0600 net_traversal_jiffies(sdata)) ||
0601 time_before(jiffies, ifmsh->last_sn_update)) {
0602 ++ifmsh->sn;
0603 ifmsh->last_sn_update = jiffies;
0604 }
0605 target_sn = ifmsh->sn;
0606 } else if (is_broadcast_ether_addr(target_addr) &&
0607 (target_flags & IEEE80211_PREQ_TO_FLAG)) {
0608 rcu_read_lock();
0609 mpath = mesh_path_lookup(sdata, orig_addr);
0610 if (mpath) {
0611 if (flags & IEEE80211_PREQ_PROACTIVE_PREP_FLAG) {
0612 reply = true;
0613 target_addr = sdata->vif.addr;
0614 target_sn = ++ifmsh->sn;
0615 target_metric = 0;
0616 ifmsh->last_sn_update = jiffies;
0617 }
0618 if (root_is_gate)
0619 mesh_path_add_gate(mpath);
0620 }
0621 rcu_read_unlock();
0622 } else {
0623 rcu_read_lock();
0624 mpath = mesh_path_lookup(sdata, target_addr);
0625 if (mpath) {
0626 if ((!(mpath->flags & MESH_PATH_SN_VALID)) ||
0627 SN_LT(mpath->sn, target_sn)) {
0628 mpath->sn = target_sn;
0629 mpath->flags |= MESH_PATH_SN_VALID;
0630 } else if ((!(target_flags & IEEE80211_PREQ_TO_FLAG)) &&
0631 (mpath->flags & MESH_PATH_ACTIVE)) {
0632 reply = true;
0633 target_metric = mpath->metric;
0634 target_sn = mpath->sn;
0635
0636 target_flags |= IEEE80211_PREQ_TO_FLAG;
0637 }
0638 }
0639 rcu_read_unlock();
0640 }
0641
0642 if (reply) {
0643 lifetime = PREQ_IE_LIFETIME(preq_elem);
0644 ttl = ifmsh->mshcfg.element_ttl;
0645 if (ttl != 0) {
0646 mhwmp_dbg(sdata, "replying to the PREQ\n");
0647 mesh_path_sel_frame_tx(MPATH_PREP, 0, orig_addr,
0648 orig_sn, 0, target_addr,
0649 target_sn, mgmt->sa, 0, ttl,
0650 lifetime, target_metric, 0,
0651 sdata);
0652 } else {
0653 ifmsh->mshstats.dropped_frames_ttl++;
0654 }
0655 }
0656
0657 if (forward && ifmsh->mshcfg.dot11MeshForwarding) {
0658 u32 preq_id;
0659 u8 hopcount;
0660
0661 ttl = PREQ_IE_TTL(preq_elem);
0662 lifetime = PREQ_IE_LIFETIME(preq_elem);
0663 if (ttl <= 1) {
0664 ifmsh->mshstats.dropped_frames_ttl++;
0665 return;
0666 }
0667 mhwmp_dbg(sdata, "forwarding the PREQ from %pM\n", orig_addr);
0668 --ttl;
0669 preq_id = PREQ_IE_PREQ_ID(preq_elem);
0670 hopcount = PREQ_IE_HOPCOUNT(preq_elem) + 1;
0671 da = (mpath && mpath->is_root) ?
0672 mpath->rann_snd_addr : broadcast_addr;
0673
0674 if (flags & IEEE80211_PREQ_PROACTIVE_PREP_FLAG) {
0675 target_addr = PREQ_IE_TARGET_ADDR(preq_elem);
0676 target_sn = PREQ_IE_TARGET_SN(preq_elem);
0677 }
0678
0679 mesh_path_sel_frame_tx(MPATH_PREQ, flags, orig_addr,
0680 orig_sn, target_flags, target_addr,
0681 target_sn, da, hopcount, ttl, lifetime,
0682 orig_metric, preq_id, sdata);
0683 if (!is_multicast_ether_addr(da))
0684 ifmsh->mshstats.fwded_unicast++;
0685 else
0686 ifmsh->mshstats.fwded_mcast++;
0687 ifmsh->mshstats.fwded_frames++;
0688 }
0689 }
0690
0691
0692 static inline struct sta_info *
0693 next_hop_deref_protected(struct mesh_path *mpath)
0694 {
0695 return rcu_dereference_protected(mpath->next_hop,
0696 lockdep_is_held(&mpath->state_lock));
0697 }
0698
0699
0700 static void hwmp_prep_frame_process(struct ieee80211_sub_if_data *sdata,
0701 struct ieee80211_mgmt *mgmt,
0702 const u8 *prep_elem, u32 metric)
0703 {
0704 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
0705 struct mesh_path *mpath;
0706 const u8 *target_addr, *orig_addr;
0707 u8 ttl, hopcount, flags;
0708 u8 next_hop[ETH_ALEN];
0709 u32 target_sn, orig_sn, lifetime;
0710
0711 mhwmp_dbg(sdata, "received PREP from %pM\n",
0712 PREP_IE_TARGET_ADDR(prep_elem));
0713
0714 orig_addr = PREP_IE_ORIG_ADDR(prep_elem);
0715 if (ether_addr_equal(orig_addr, sdata->vif.addr))
0716
0717 return;
0718
0719 if (!ifmsh->mshcfg.dot11MeshForwarding)
0720 return;
0721
0722 ttl = PREP_IE_TTL(prep_elem);
0723 if (ttl <= 1) {
0724 sdata->u.mesh.mshstats.dropped_frames_ttl++;
0725 return;
0726 }
0727
0728 rcu_read_lock();
0729 mpath = mesh_path_lookup(sdata, orig_addr);
0730 if (mpath)
0731 spin_lock_bh(&mpath->state_lock);
0732 else
0733 goto fail;
0734 if (!(mpath->flags & MESH_PATH_ACTIVE)) {
0735 spin_unlock_bh(&mpath->state_lock);
0736 goto fail;
0737 }
0738 memcpy(next_hop, next_hop_deref_protected(mpath)->sta.addr, ETH_ALEN);
0739 spin_unlock_bh(&mpath->state_lock);
0740 --ttl;
0741 flags = PREP_IE_FLAGS(prep_elem);
0742 lifetime = PREP_IE_LIFETIME(prep_elem);
0743 hopcount = PREP_IE_HOPCOUNT(prep_elem) + 1;
0744 target_addr = PREP_IE_TARGET_ADDR(prep_elem);
0745 target_sn = PREP_IE_TARGET_SN(prep_elem);
0746 orig_sn = PREP_IE_ORIG_SN(prep_elem);
0747
0748 mesh_path_sel_frame_tx(MPATH_PREP, flags, orig_addr, orig_sn, 0,
0749 target_addr, target_sn, next_hop, hopcount,
0750 ttl, lifetime, metric, 0, sdata);
0751 rcu_read_unlock();
0752
0753 sdata->u.mesh.mshstats.fwded_unicast++;
0754 sdata->u.mesh.mshstats.fwded_frames++;
0755 return;
0756
0757 fail:
0758 rcu_read_unlock();
0759 sdata->u.mesh.mshstats.dropped_frames_no_route++;
0760 }
0761
0762 static void hwmp_perr_frame_process(struct ieee80211_sub_if_data *sdata,
0763 struct ieee80211_mgmt *mgmt,
0764 const u8 *perr_elem)
0765 {
0766 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
0767 struct mesh_path *mpath;
0768 u8 ttl;
0769 const u8 *ta, *target_addr;
0770 u32 target_sn;
0771 u16 target_rcode;
0772
0773 ta = mgmt->sa;
0774 ttl = PERR_IE_TTL(perr_elem);
0775 if (ttl <= 1) {
0776 ifmsh->mshstats.dropped_frames_ttl++;
0777 return;
0778 }
0779 ttl--;
0780 target_addr = PERR_IE_TARGET_ADDR(perr_elem);
0781 target_sn = PERR_IE_TARGET_SN(perr_elem);
0782 target_rcode = PERR_IE_TARGET_RCODE(perr_elem);
0783
0784 rcu_read_lock();
0785 mpath = mesh_path_lookup(sdata, target_addr);
0786 if (mpath) {
0787 struct sta_info *sta;
0788
0789 spin_lock_bh(&mpath->state_lock);
0790 sta = next_hop_deref_protected(mpath);
0791 if (mpath->flags & MESH_PATH_ACTIVE &&
0792 ether_addr_equal(ta, sta->sta.addr) &&
0793 !(mpath->flags & MESH_PATH_FIXED) &&
0794 (!(mpath->flags & MESH_PATH_SN_VALID) ||
0795 SN_GT(target_sn, mpath->sn) || target_sn == 0)) {
0796 mpath->flags &= ~MESH_PATH_ACTIVE;
0797 if (target_sn != 0)
0798 mpath->sn = target_sn;
0799 else
0800 mpath->sn += 1;
0801 spin_unlock_bh(&mpath->state_lock);
0802 if (!ifmsh->mshcfg.dot11MeshForwarding)
0803 goto endperr;
0804 mesh_path_error_tx(sdata, ttl, target_addr,
0805 target_sn, target_rcode,
0806 broadcast_addr);
0807 } else
0808 spin_unlock_bh(&mpath->state_lock);
0809 }
0810 endperr:
0811 rcu_read_unlock();
0812 }
0813
0814 static void hwmp_rann_frame_process(struct ieee80211_sub_if_data *sdata,
0815 struct ieee80211_mgmt *mgmt,
0816 const struct ieee80211_rann_ie *rann)
0817 {
0818 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
0819 struct ieee80211_local *local = sdata->local;
0820 struct sta_info *sta;
0821 struct mesh_path *mpath;
0822 u8 ttl, flags, hopcount;
0823 const u8 *orig_addr;
0824 u32 orig_sn, new_metric, orig_metric, last_hop_metric, interval;
0825 bool root_is_gate;
0826
0827 ttl = rann->rann_ttl;
0828 flags = rann->rann_flags;
0829 root_is_gate = !!(flags & RANN_FLAG_IS_GATE);
0830 orig_addr = rann->rann_addr;
0831 orig_sn = le32_to_cpu(rann->rann_seq);
0832 interval = le32_to_cpu(rann->rann_interval);
0833 hopcount = rann->rann_hopcount;
0834 hopcount++;
0835 orig_metric = le32_to_cpu(rann->rann_metric);
0836
0837
0838 if (ether_addr_equal(orig_addr, sdata->vif.addr))
0839 return;
0840
0841 mhwmp_dbg(sdata,
0842 "received RANN from %pM via neighbour %pM (is_gate=%d)\n",
0843 orig_addr, mgmt->sa, root_is_gate);
0844
0845 rcu_read_lock();
0846 sta = sta_info_get(sdata, mgmt->sa);
0847 if (!sta) {
0848 rcu_read_unlock();
0849 return;
0850 }
0851
0852 last_hop_metric = airtime_link_metric_get(local, sta);
0853 new_metric = orig_metric + last_hop_metric;
0854 if (new_metric < orig_metric)
0855 new_metric = MAX_METRIC;
0856
0857 mpath = mesh_path_lookup(sdata, orig_addr);
0858 if (!mpath) {
0859 mpath = mesh_path_add(sdata, orig_addr);
0860 if (IS_ERR(mpath)) {
0861 rcu_read_unlock();
0862 sdata->u.mesh.mshstats.dropped_frames_no_route++;
0863 return;
0864 }
0865 }
0866
0867 if (!(SN_LT(mpath->sn, orig_sn)) &&
0868 !(mpath->sn == orig_sn && new_metric < mpath->rann_metric)) {
0869 rcu_read_unlock();
0870 return;
0871 }
0872
0873 if ((!(mpath->flags & (MESH_PATH_ACTIVE | MESH_PATH_RESOLVING)) ||
0874 (time_after(jiffies, mpath->last_preq_to_root +
0875 root_path_confirmation_jiffies(sdata)) ||
0876 time_before(jiffies, mpath->last_preq_to_root))) &&
0877 !(mpath->flags & MESH_PATH_FIXED) && (ttl != 0)) {
0878 mhwmp_dbg(sdata,
0879 "time to refresh root mpath %pM\n",
0880 orig_addr);
0881 mesh_queue_preq(mpath, PREQ_Q_F_START | PREQ_Q_F_REFRESH);
0882 mpath->last_preq_to_root = jiffies;
0883 }
0884
0885 mpath->sn = orig_sn;
0886 mpath->rann_metric = new_metric;
0887 mpath->is_root = true;
0888
0889
0890 memcpy(mpath->rann_snd_addr, mgmt->sa, ETH_ALEN);
0891
0892 if (root_is_gate)
0893 mesh_path_add_gate(mpath);
0894
0895 if (ttl <= 1) {
0896 ifmsh->mshstats.dropped_frames_ttl++;
0897 rcu_read_unlock();
0898 return;
0899 }
0900 ttl--;
0901
0902 if (ifmsh->mshcfg.dot11MeshForwarding) {
0903 mesh_path_sel_frame_tx(MPATH_RANN, flags, orig_addr,
0904 orig_sn, 0, NULL, 0, broadcast_addr,
0905 hopcount, ttl, interval,
0906 new_metric, 0, sdata);
0907 }
0908
0909 rcu_read_unlock();
0910 }
0911
0912
0913 void mesh_rx_path_sel_frame(struct ieee80211_sub_if_data *sdata,
0914 struct ieee80211_mgmt *mgmt, size_t len)
0915 {
0916 struct ieee802_11_elems *elems;
0917 size_t baselen;
0918 u32 path_metric;
0919 struct sta_info *sta;
0920
0921
0922 if (len < IEEE80211_MIN_ACTION_SIZE + 1)
0923 return;
0924
0925 rcu_read_lock();
0926 sta = sta_info_get(sdata, mgmt->sa);
0927 if (!sta || sta->mesh->plink_state != NL80211_PLINK_ESTAB) {
0928 rcu_read_unlock();
0929 return;
0930 }
0931 rcu_read_unlock();
0932
0933 baselen = (u8 *) mgmt->u.action.u.mesh_action.variable - (u8 *) mgmt;
0934 elems = ieee802_11_parse_elems(mgmt->u.action.u.mesh_action.variable,
0935 len - baselen, false, NULL);
0936 if (!elems)
0937 return;
0938
0939 if (elems->preq) {
0940 if (elems->preq_len != 37)
0941
0942 goto free;
0943 path_metric = hwmp_route_info_get(sdata, mgmt, elems->preq,
0944 MPATH_PREQ);
0945 if (path_metric)
0946 hwmp_preq_frame_process(sdata, mgmt, elems->preq,
0947 path_metric);
0948 }
0949 if (elems->prep) {
0950 if (elems->prep_len != 31)
0951
0952 goto free;
0953 path_metric = hwmp_route_info_get(sdata, mgmt, elems->prep,
0954 MPATH_PREP);
0955 if (path_metric)
0956 hwmp_prep_frame_process(sdata, mgmt, elems->prep,
0957 path_metric);
0958 }
0959 if (elems->perr) {
0960 if (elems->perr_len != 15)
0961
0962 goto free;
0963 hwmp_perr_frame_process(sdata, mgmt, elems->perr);
0964 }
0965 if (elems->rann)
0966 hwmp_rann_frame_process(sdata, mgmt, elems->rann);
0967 free:
0968 kfree(elems);
0969 }
0970
0971
0972
0973
0974
0975
0976
0977
0978
0979
0980 static void mesh_queue_preq(struct mesh_path *mpath, u8 flags)
0981 {
0982 struct ieee80211_sub_if_data *sdata = mpath->sdata;
0983 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
0984 struct mesh_preq_queue *preq_node;
0985
0986 preq_node = kmalloc(sizeof(struct mesh_preq_queue), GFP_ATOMIC);
0987 if (!preq_node) {
0988 mhwmp_dbg(sdata, "could not allocate PREQ node\n");
0989 return;
0990 }
0991
0992 spin_lock_bh(&ifmsh->mesh_preq_queue_lock);
0993 if (ifmsh->preq_queue_len == MAX_PREQ_QUEUE_LEN) {
0994 spin_unlock_bh(&ifmsh->mesh_preq_queue_lock);
0995 kfree(preq_node);
0996 if (printk_ratelimit())
0997 mhwmp_dbg(sdata, "PREQ node queue full\n");
0998 return;
0999 }
1000
1001 spin_lock(&mpath->state_lock);
1002 if (mpath->flags & MESH_PATH_REQ_QUEUED) {
1003 spin_unlock(&mpath->state_lock);
1004 spin_unlock_bh(&ifmsh->mesh_preq_queue_lock);
1005 kfree(preq_node);
1006 return;
1007 }
1008
1009 memcpy(preq_node->dst, mpath->dst, ETH_ALEN);
1010 preq_node->flags = flags;
1011
1012 mpath->flags |= MESH_PATH_REQ_QUEUED;
1013 spin_unlock(&mpath->state_lock);
1014
1015 list_add_tail(&preq_node->list, &ifmsh->preq_queue.list);
1016 ++ifmsh->preq_queue_len;
1017 spin_unlock_bh(&ifmsh->mesh_preq_queue_lock);
1018
1019 if (time_after(jiffies, ifmsh->last_preq + min_preq_int_jiff(sdata)))
1020 ieee80211_queue_work(&sdata->local->hw, &sdata->work);
1021
1022 else if (time_before(jiffies, ifmsh->last_preq)) {
1023
1024
1025
1026 ifmsh->last_preq = jiffies - min_preq_int_jiff(sdata) - 1;
1027 ieee80211_queue_work(&sdata->local->hw, &sdata->work);
1028 } else
1029 mod_timer(&ifmsh->mesh_path_timer, ifmsh->last_preq +
1030 min_preq_int_jiff(sdata));
1031 }
1032
1033
1034
1035
1036
1037
1038 void mesh_path_start_discovery(struct ieee80211_sub_if_data *sdata)
1039 {
1040 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
1041 struct mesh_preq_queue *preq_node;
1042 struct mesh_path *mpath;
1043 u8 ttl, target_flags = 0;
1044 const u8 *da;
1045 u32 lifetime;
1046
1047 spin_lock_bh(&ifmsh->mesh_preq_queue_lock);
1048 if (!ifmsh->preq_queue_len ||
1049 time_before(jiffies, ifmsh->last_preq +
1050 min_preq_int_jiff(sdata))) {
1051 spin_unlock_bh(&ifmsh->mesh_preq_queue_lock);
1052 return;
1053 }
1054
1055 preq_node = list_first_entry(&ifmsh->preq_queue.list,
1056 struct mesh_preq_queue, list);
1057 list_del(&preq_node->list);
1058 --ifmsh->preq_queue_len;
1059 spin_unlock_bh(&ifmsh->mesh_preq_queue_lock);
1060
1061 rcu_read_lock();
1062 mpath = mesh_path_lookup(sdata, preq_node->dst);
1063 if (!mpath)
1064 goto enddiscovery;
1065
1066 spin_lock_bh(&mpath->state_lock);
1067 if (mpath->flags & (MESH_PATH_DELETED | MESH_PATH_FIXED)) {
1068 spin_unlock_bh(&mpath->state_lock);
1069 goto enddiscovery;
1070 }
1071 mpath->flags &= ~MESH_PATH_REQ_QUEUED;
1072 if (preq_node->flags & PREQ_Q_F_START) {
1073 if (mpath->flags & MESH_PATH_RESOLVING) {
1074 spin_unlock_bh(&mpath->state_lock);
1075 goto enddiscovery;
1076 } else {
1077 mpath->flags &= ~MESH_PATH_RESOLVED;
1078 mpath->flags |= MESH_PATH_RESOLVING;
1079 mpath->discovery_retries = 0;
1080 mpath->discovery_timeout = disc_timeout_jiff(sdata);
1081 }
1082 } else if (!(mpath->flags & MESH_PATH_RESOLVING) ||
1083 mpath->flags & MESH_PATH_RESOLVED) {
1084 mpath->flags &= ~MESH_PATH_RESOLVING;
1085 spin_unlock_bh(&mpath->state_lock);
1086 goto enddiscovery;
1087 }
1088
1089 ifmsh->last_preq = jiffies;
1090
1091 if (time_after(jiffies, ifmsh->last_sn_update +
1092 net_traversal_jiffies(sdata)) ||
1093 time_before(jiffies, ifmsh->last_sn_update)) {
1094 ++ifmsh->sn;
1095 sdata->u.mesh.last_sn_update = jiffies;
1096 }
1097 lifetime = default_lifetime(sdata);
1098 ttl = sdata->u.mesh.mshcfg.element_ttl;
1099 if (ttl == 0) {
1100 sdata->u.mesh.mshstats.dropped_frames_ttl++;
1101 spin_unlock_bh(&mpath->state_lock);
1102 goto enddiscovery;
1103 }
1104
1105 if (preq_node->flags & PREQ_Q_F_REFRESH)
1106 target_flags |= IEEE80211_PREQ_TO_FLAG;
1107 else
1108 target_flags &= ~IEEE80211_PREQ_TO_FLAG;
1109
1110 spin_unlock_bh(&mpath->state_lock);
1111 da = (mpath->is_root) ? mpath->rann_snd_addr : broadcast_addr;
1112 mesh_path_sel_frame_tx(MPATH_PREQ, 0, sdata->vif.addr, ifmsh->sn,
1113 target_flags, mpath->dst, mpath->sn, da, 0,
1114 ttl, lifetime, 0, ifmsh->preq_id++, sdata);
1115
1116 spin_lock_bh(&mpath->state_lock);
1117 if (!(mpath->flags & MESH_PATH_DELETED))
1118 mod_timer(&mpath->timer, jiffies + mpath->discovery_timeout);
1119 spin_unlock_bh(&mpath->state_lock);
1120
1121 enddiscovery:
1122 rcu_read_unlock();
1123 kfree(preq_node);
1124 }
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138 int mesh_nexthop_resolve(struct ieee80211_sub_if_data *sdata,
1139 struct sk_buff *skb)
1140 {
1141 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
1142 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1143 struct mesh_path *mpath;
1144 struct sk_buff *skb_to_free = NULL;
1145 u8 *target_addr = hdr->addr3;
1146
1147
1148 if (ieee80211_is_qos_nullfunc(hdr->frame_control))
1149 return 0;
1150
1151
1152 if (info->control.flags & IEEE80211_TX_CTRL_SKIP_MPATH_LOOKUP)
1153 return 0;
1154
1155 if (!mesh_nexthop_lookup(sdata, skb))
1156 return 0;
1157
1158
1159 mpath = mesh_path_lookup(sdata, target_addr);
1160 if (!mpath) {
1161 mpath = mesh_path_add(sdata, target_addr);
1162 if (IS_ERR(mpath)) {
1163 mesh_path_discard_frame(sdata, skb);
1164 return PTR_ERR(mpath);
1165 }
1166 }
1167
1168 if (!(mpath->flags & MESH_PATH_RESOLVING) &&
1169 mesh_path_sel_is_hwmp(sdata))
1170 mesh_queue_preq(mpath, PREQ_Q_F_START);
1171
1172 if (skb_queue_len(&mpath->frame_queue) >= MESH_FRAME_QUEUE_LEN)
1173 skb_to_free = skb_dequeue(&mpath->frame_queue);
1174
1175 info->control.flags |= IEEE80211_TX_INTCFL_NEED_TXPROCESSING;
1176 ieee80211_set_qos_hdr(sdata, skb);
1177 skb_queue_tail(&mpath->frame_queue, skb);
1178 if (skb_to_free)
1179 mesh_path_discard_frame(sdata, skb_to_free);
1180
1181 return -ENOENT;
1182 }
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195 static int mesh_nexthop_lookup_nolearn(struct ieee80211_sub_if_data *sdata,
1196 struct sk_buff *skb)
1197 {
1198 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1199 struct sta_info *sta;
1200
1201 if (is_multicast_ether_addr(hdr->addr1))
1202 return -ENOENT;
1203
1204 rcu_read_lock();
1205 sta = sta_info_get(sdata, hdr->addr3);
1206
1207 if (!sta || sta->mesh->plink_state != NL80211_PLINK_ESTAB) {
1208 rcu_read_unlock();
1209 return -ENOENT;
1210 }
1211 rcu_read_unlock();
1212
1213 memcpy(hdr->addr1, hdr->addr3, ETH_ALEN);
1214 memcpy(hdr->addr2, sdata->vif.addr, ETH_ALEN);
1215 return 0;
1216 }
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228 int mesh_nexthop_lookup(struct ieee80211_sub_if_data *sdata,
1229 struct sk_buff *skb)
1230 {
1231 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
1232 struct mesh_path *mpath;
1233 struct sta_info *next_hop;
1234 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
1235 u8 *target_addr = hdr->addr3;
1236
1237 if (ifmsh->mshcfg.dot11MeshNolearn &&
1238 !mesh_nexthop_lookup_nolearn(sdata, skb))
1239 return 0;
1240
1241 mpath = mesh_path_lookup(sdata, target_addr);
1242 if (!mpath || !(mpath->flags & MESH_PATH_ACTIVE))
1243 return -ENOENT;
1244
1245 if (time_after(jiffies,
1246 mpath->exp_time -
1247 msecs_to_jiffies(sdata->u.mesh.mshcfg.path_refresh_time)) &&
1248 ether_addr_equal(sdata->vif.addr, hdr->addr4) &&
1249 !(mpath->flags & MESH_PATH_RESOLVING) &&
1250 !(mpath->flags & MESH_PATH_FIXED))
1251 mesh_queue_preq(mpath, PREQ_Q_F_START | PREQ_Q_F_REFRESH);
1252
1253 next_hop = rcu_dereference(mpath->next_hop);
1254 if (next_hop) {
1255 memcpy(hdr->addr1, next_hop->sta.addr, ETH_ALEN);
1256 memcpy(hdr->addr2, sdata->vif.addr, ETH_ALEN);
1257 ieee80211_mps_set_frame_flags(sdata, next_hop, hdr);
1258 return 0;
1259 }
1260
1261 return -ENOENT;
1262 }
1263
1264 void mesh_path_timer(struct timer_list *t)
1265 {
1266 struct mesh_path *mpath = from_timer(mpath, t, timer);
1267 struct ieee80211_sub_if_data *sdata = mpath->sdata;
1268 int ret;
1269
1270 if (sdata->local->quiescing)
1271 return;
1272
1273 spin_lock_bh(&mpath->state_lock);
1274 if (mpath->flags & MESH_PATH_RESOLVED ||
1275 (!(mpath->flags & MESH_PATH_RESOLVING))) {
1276 mpath->flags &= ~(MESH_PATH_RESOLVING | MESH_PATH_RESOLVED);
1277 spin_unlock_bh(&mpath->state_lock);
1278 } else if (mpath->discovery_retries < max_preq_retries(sdata)) {
1279 ++mpath->discovery_retries;
1280 mpath->discovery_timeout *= 2;
1281 mpath->flags &= ~MESH_PATH_REQ_QUEUED;
1282 spin_unlock_bh(&mpath->state_lock);
1283 mesh_queue_preq(mpath, 0);
1284 } else {
1285 mpath->flags &= ~(MESH_PATH_RESOLVING |
1286 MESH_PATH_RESOLVED |
1287 MESH_PATH_REQ_QUEUED);
1288 mpath->exp_time = jiffies;
1289 spin_unlock_bh(&mpath->state_lock);
1290 if (!mpath->is_gate && mesh_gate_num(sdata) > 0) {
1291 ret = mesh_path_send_to_gates(mpath);
1292 if (ret)
1293 mhwmp_dbg(sdata, "no gate was reachable\n");
1294 } else
1295 mesh_path_flush_pending(mpath);
1296 }
1297 }
1298
1299 void mesh_path_tx_root_frame(struct ieee80211_sub_if_data *sdata)
1300 {
1301 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
1302 u32 interval = ifmsh->mshcfg.dot11MeshHWMPRannInterval;
1303 u8 flags, target_flags = 0;
1304
1305 flags = (ifmsh->mshcfg.dot11MeshGateAnnouncementProtocol)
1306 ? RANN_FLAG_IS_GATE : 0;
1307
1308 switch (ifmsh->mshcfg.dot11MeshHWMPRootMode) {
1309 case IEEE80211_PROACTIVE_RANN:
1310 mesh_path_sel_frame_tx(MPATH_RANN, flags, sdata->vif.addr,
1311 ++ifmsh->sn, 0, NULL, 0, broadcast_addr,
1312 0, ifmsh->mshcfg.element_ttl,
1313 interval, 0, 0, sdata);
1314 break;
1315 case IEEE80211_PROACTIVE_PREQ_WITH_PREP:
1316 flags |= IEEE80211_PREQ_PROACTIVE_PREP_FLAG;
1317 fallthrough;
1318 case IEEE80211_PROACTIVE_PREQ_NO_PREP:
1319 interval = ifmsh->mshcfg.dot11MeshHWMPactivePathToRootTimeout;
1320 target_flags |= IEEE80211_PREQ_TO_FLAG |
1321 IEEE80211_PREQ_USN_FLAG;
1322 mesh_path_sel_frame_tx(MPATH_PREQ, flags, sdata->vif.addr,
1323 ++ifmsh->sn, target_flags,
1324 (u8 *) broadcast_addr, 0, broadcast_addr,
1325 0, ifmsh->mshcfg.element_ttl, interval,
1326 0, ifmsh->preq_id++, sdata);
1327 break;
1328 default:
1329 mhwmp_dbg(sdata, "Proactive mechanism not supported\n");
1330 return;
1331 }
1332 }