0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017 #include <linux/slab.h>
0018 #include <linux/spinlock.h>
0019
0020 #include "ath.h"
0021 #include "dfs_pattern_detector.h"
0022 #include "dfs_pri_detector.h"
0023
0024 struct ath_dfs_pool_stats global_dfs_pool_stats = {};
0025
0026 #define DFS_POOL_STAT_INC(c) (global_dfs_pool_stats.c++)
0027 #define DFS_POOL_STAT_DEC(c) (global_dfs_pool_stats.c--)
0028 #define GET_PRI_TO_USE(MIN, MAX, RUNTIME) \
0029 (MIN + PRI_TOLERANCE == MAX - PRI_TOLERANCE ? \
0030 MIN + PRI_TOLERANCE : RUNTIME)
0031
0032
0033
0034
0035 struct pulse_elem {
0036 struct list_head head;
0037 u64 ts;
0038 };
0039
0040
0041
0042
0043
0044 static u32 pde_get_multiple(u32 val, u32 fraction, u32 tolerance)
0045 {
0046 u32 remainder;
0047 u32 factor;
0048 u32 delta;
0049
0050 if (fraction == 0)
0051 return 0;
0052
0053 delta = (val < fraction) ? (fraction - val) : (val - fraction);
0054
0055 if (delta <= tolerance)
0056
0057 return 1;
0058
0059 factor = val / fraction;
0060 remainder = val % fraction;
0061 if (remainder > tolerance) {
0062
0063 if ((fraction - remainder) <= tolerance)
0064
0065 factor++;
0066 else
0067 factor = 0;
0068 }
0069 return factor;
0070 }
0071
0072
0073
0074
0075
0076
0077
0078
0079
0080
0081 static u32 singleton_pool_references;
0082 static LIST_HEAD(pulse_pool);
0083 static LIST_HEAD(pseq_pool);
0084 static DEFINE_SPINLOCK(pool_lock);
0085
0086 static void pool_register_ref(void)
0087 {
0088 spin_lock_bh(&pool_lock);
0089 singleton_pool_references++;
0090 DFS_POOL_STAT_INC(pool_reference);
0091 spin_unlock_bh(&pool_lock);
0092 }
0093
0094 static void pool_deregister_ref(void)
0095 {
0096 spin_lock_bh(&pool_lock);
0097 singleton_pool_references--;
0098 DFS_POOL_STAT_DEC(pool_reference);
0099 if (singleton_pool_references == 0) {
0100
0101 struct pri_sequence *ps, *ps0;
0102 struct pulse_elem *p, *p0;
0103
0104 list_for_each_entry_safe(p, p0, &pulse_pool, head) {
0105 list_del(&p->head);
0106 DFS_POOL_STAT_DEC(pulse_allocated);
0107 kfree(p);
0108 }
0109 list_for_each_entry_safe(ps, ps0, &pseq_pool, head) {
0110 list_del(&ps->head);
0111 DFS_POOL_STAT_DEC(pseq_allocated);
0112 kfree(ps);
0113 }
0114 }
0115 spin_unlock_bh(&pool_lock);
0116 }
0117
0118 static void pool_put_pulse_elem(struct pulse_elem *pe)
0119 {
0120 spin_lock_bh(&pool_lock);
0121 list_add(&pe->head, &pulse_pool);
0122 DFS_POOL_STAT_DEC(pulse_used);
0123 spin_unlock_bh(&pool_lock);
0124 }
0125
0126 static void pool_put_pseq_elem(struct pri_sequence *pse)
0127 {
0128 spin_lock_bh(&pool_lock);
0129 list_add(&pse->head, &pseq_pool);
0130 DFS_POOL_STAT_DEC(pseq_used);
0131 spin_unlock_bh(&pool_lock);
0132 }
0133
0134 static struct pri_sequence *pool_get_pseq_elem(void)
0135 {
0136 struct pri_sequence *pse = NULL;
0137 spin_lock_bh(&pool_lock);
0138 if (!list_empty(&pseq_pool)) {
0139 pse = list_first_entry(&pseq_pool, struct pri_sequence, head);
0140 list_del(&pse->head);
0141 DFS_POOL_STAT_INC(pseq_used);
0142 }
0143 spin_unlock_bh(&pool_lock);
0144 return pse;
0145 }
0146
0147 static struct pulse_elem *pool_get_pulse_elem(void)
0148 {
0149 struct pulse_elem *pe = NULL;
0150 spin_lock_bh(&pool_lock);
0151 if (!list_empty(&pulse_pool)) {
0152 pe = list_first_entry(&pulse_pool, struct pulse_elem, head);
0153 list_del(&pe->head);
0154 DFS_POOL_STAT_INC(pulse_used);
0155 }
0156 spin_unlock_bh(&pool_lock);
0157 return pe;
0158 }
0159
0160 static struct pulse_elem *pulse_queue_get_tail(struct pri_detector *pde)
0161 {
0162 struct list_head *l = &pde->pulses;
0163 if (list_empty(l))
0164 return NULL;
0165 return list_entry(l->prev, struct pulse_elem, head);
0166 }
0167
0168 static bool pulse_queue_dequeue(struct pri_detector *pde)
0169 {
0170 struct pulse_elem *p = pulse_queue_get_tail(pde);
0171 if (p != NULL) {
0172 list_del_init(&p->head);
0173 pde->count--;
0174
0175 pool_put_pulse_elem(p);
0176 }
0177 return (pde->count > 0);
0178 }
0179
0180
0181 static void pulse_queue_check_window(struct pri_detector *pde)
0182 {
0183 u64 min_valid_ts;
0184 struct pulse_elem *p;
0185
0186
0187 if (pde->count < 2)
0188 return;
0189
0190 if (pde->last_ts <= pde->window_size)
0191 return;
0192
0193 min_valid_ts = pde->last_ts - pde->window_size;
0194 while ((p = pulse_queue_get_tail(pde)) != NULL) {
0195 if (p->ts >= min_valid_ts)
0196 return;
0197 pulse_queue_dequeue(pde);
0198 }
0199 }
0200
0201 static bool pulse_queue_enqueue(struct pri_detector *pde, u64 ts)
0202 {
0203 struct pulse_elem *p = pool_get_pulse_elem();
0204 if (p == NULL) {
0205 p = kmalloc(sizeof(*p), GFP_ATOMIC);
0206 if (p == NULL) {
0207 DFS_POOL_STAT_INC(pulse_alloc_error);
0208 return false;
0209 }
0210 DFS_POOL_STAT_INC(pulse_allocated);
0211 DFS_POOL_STAT_INC(pulse_used);
0212 }
0213 INIT_LIST_HEAD(&p->head);
0214 p->ts = ts;
0215 list_add(&p->head, &pde->pulses);
0216 pde->count++;
0217 pde->last_ts = ts;
0218 pulse_queue_check_window(pde);
0219 if (pde->count >= pde->max_count)
0220 pulse_queue_dequeue(pde);
0221 return true;
0222 }
0223
0224 static bool pseq_handler_create_sequences(struct pri_detector *pde,
0225 u64 ts, u32 min_count)
0226 {
0227 struct pulse_elem *p;
0228 list_for_each_entry(p, &pde->pulses, head) {
0229 struct pri_sequence ps, *new_ps;
0230 struct pulse_elem *p2;
0231 u32 tmp_false_count;
0232 u64 min_valid_ts;
0233 u32 delta_ts = ts - p->ts;
0234
0235 if (delta_ts < pde->rs->pri_min)
0236
0237 continue;
0238
0239 if (delta_ts > pde->rs->pri_max)
0240
0241 break;
0242
0243
0244 ps.count = 2;
0245 ps.count_falses = 0;
0246 ps.first_ts = p->ts;
0247 ps.last_ts = ts;
0248 ps.pri = GET_PRI_TO_USE(pde->rs->pri_min,
0249 pde->rs->pri_max, ts - p->ts);
0250 ps.dur = ps.pri * (pde->rs->ppb - 1)
0251 + 2 * pde->rs->max_pri_tolerance;
0252
0253 p2 = p;
0254 tmp_false_count = 0;
0255 min_valid_ts = ts - ps.dur;
0256
0257 list_for_each_entry_continue(p2, &pde->pulses, head) {
0258 u32 factor;
0259 if (p2->ts < min_valid_ts)
0260
0261 break;
0262
0263 factor = pde_get_multiple(ps.last_ts - p2->ts, ps.pri,
0264 pde->rs->max_pri_tolerance);
0265 if (factor > 0) {
0266 ps.count++;
0267 ps.first_ts = p2->ts;
0268
0269
0270
0271
0272 ps.count_falses += tmp_false_count;
0273 tmp_false_count = 0;
0274 } else {
0275
0276 tmp_false_count++;
0277 }
0278 }
0279 if (ps.count <= min_count)
0280
0281 continue;
0282
0283
0284 ps.deadline_ts = ps.first_ts + ps.dur;
0285 new_ps = pool_get_pseq_elem();
0286 if (new_ps == NULL) {
0287 new_ps = kmalloc(sizeof(*new_ps), GFP_ATOMIC);
0288 if (new_ps == NULL) {
0289 DFS_POOL_STAT_INC(pseq_alloc_error);
0290 return false;
0291 }
0292 DFS_POOL_STAT_INC(pseq_allocated);
0293 DFS_POOL_STAT_INC(pseq_used);
0294 }
0295 memcpy(new_ps, &ps, sizeof(ps));
0296 INIT_LIST_HEAD(&new_ps->head);
0297 list_add(&new_ps->head, &pde->sequences);
0298 }
0299 return true;
0300 }
0301
0302
0303 static u32
0304 pseq_handler_add_to_existing_seqs(struct pri_detector *pde, u64 ts)
0305 {
0306 u32 max_count = 0;
0307 struct pri_sequence *ps, *ps2;
0308 list_for_each_entry_safe(ps, ps2, &pde->sequences, head) {
0309 u32 delta_ts;
0310 u32 factor;
0311
0312
0313 if (ts > ps->deadline_ts) {
0314 list_del_init(&ps->head);
0315 pool_put_pseq_elem(ps);
0316 continue;
0317 }
0318
0319 delta_ts = ts - ps->last_ts;
0320 factor = pde_get_multiple(delta_ts, ps->pri,
0321 pde->rs->max_pri_tolerance);
0322 if (factor > 0) {
0323 ps->last_ts = ts;
0324 ps->count++;
0325
0326 if (max_count < ps->count)
0327 max_count = ps->count;
0328 } else {
0329 ps->count_falses++;
0330 }
0331 }
0332 return max_count;
0333 }
0334
0335 static struct pri_sequence *
0336 pseq_handler_check_detection(struct pri_detector *pde)
0337 {
0338 struct pri_sequence *ps;
0339
0340 if (list_empty(&pde->sequences))
0341 return NULL;
0342
0343 list_for_each_entry(ps, &pde->sequences, head) {
0344
0345
0346
0347
0348
0349 if ((ps->count >= pde->rs->ppb_thresh) &&
0350 (ps->count * pde->rs->num_pri >= ps->count_falses))
0351 return ps;
0352 }
0353 return NULL;
0354 }
0355
0356
0357
0358 static void pri_detector_reset(struct pri_detector *pde, u64 ts)
0359 {
0360 struct pri_sequence *ps, *ps0;
0361 struct pulse_elem *p, *p0;
0362 list_for_each_entry_safe(ps, ps0, &pde->sequences, head) {
0363 list_del_init(&ps->head);
0364 pool_put_pseq_elem(ps);
0365 }
0366 list_for_each_entry_safe(p, p0, &pde->pulses, head) {
0367 list_del_init(&p->head);
0368 pool_put_pulse_elem(p);
0369 }
0370 pde->count = 0;
0371 pde->last_ts = ts;
0372 }
0373
0374 static void pri_detector_exit(struct pri_detector *de)
0375 {
0376 pri_detector_reset(de, 0);
0377 pool_deregister_ref();
0378 kfree(de);
0379 }
0380
0381 static struct pri_sequence *pri_detector_add_pulse(struct pri_detector *de,
0382 struct pulse_event *event)
0383 {
0384 u32 max_updated_seq;
0385 struct pri_sequence *ps;
0386 u64 ts = event->ts;
0387 const struct radar_detector_specs *rs = de->rs;
0388
0389
0390 if ((rs->width_min > event->width) || (rs->width_max < event->width))
0391 return NULL;
0392
0393 if ((ts - de->last_ts) < rs->max_pri_tolerance)
0394
0395 return NULL;
0396
0397 if (rs->chirp && rs->chirp != event->chirp)
0398 return NULL;
0399
0400 de->last_ts = ts;
0401
0402 max_updated_seq = pseq_handler_add_to_existing_seqs(de, ts);
0403
0404 if (!pseq_handler_create_sequences(de, ts, max_updated_seq)) {
0405 pri_detector_reset(de, ts);
0406 return NULL;
0407 }
0408
0409 ps = pseq_handler_check_detection(de);
0410
0411 if (ps == NULL)
0412 pulse_queue_enqueue(de, ts);
0413
0414 return ps;
0415 }
0416
0417 struct pri_detector *pri_detector_init(const struct radar_detector_specs *rs)
0418 {
0419 struct pri_detector *de;
0420
0421 de = kzalloc(sizeof(*de), GFP_ATOMIC);
0422 if (de == NULL)
0423 return NULL;
0424 de->exit = pri_detector_exit;
0425 de->add_pulse = pri_detector_add_pulse;
0426 de->reset = pri_detector_reset;
0427
0428 INIT_LIST_HEAD(&de->sequences);
0429 INIT_LIST_HEAD(&de->pulses);
0430 de->window_size = rs->pri_max * rs->ppb * rs->num_pri;
0431 de->max_count = rs->ppb * 2;
0432 de->rs = rs;
0433
0434 pool_register_ref();
0435 return de;
0436 }