Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0-only */
0002 /*
0003  * Copyright (c) 2016 Qualcomm Atheros, Inc
0004  *
0005  * Based on net/sched/sch_fq_codel.c
0006  */
0007 #ifndef __NET_SCHED_FQ_IMPL_H
0008 #define __NET_SCHED_FQ_IMPL_H
0009 
0010 #include <net/fq.h>
0011 
0012 /* functions that are embedded into includer */
0013 
0014 
0015 static void
0016 __fq_adjust_removal(struct fq *fq, struct fq_flow *flow, unsigned int packets,
0017             unsigned int bytes, unsigned int truesize)
0018 {
0019     struct fq_tin *tin = flow->tin;
0020     int idx;
0021 
0022     tin->backlog_bytes -= bytes;
0023     tin->backlog_packets -= packets;
0024     flow->backlog -= bytes;
0025     fq->backlog -= packets;
0026     fq->memory_usage -= truesize;
0027 
0028     if (flow->backlog)
0029         return;
0030 
0031     if (flow == &tin->default_flow) {
0032         list_del_init(&tin->tin_list);
0033         return;
0034     }
0035 
0036     idx = flow - fq->flows;
0037     __clear_bit(idx, fq->flows_bitmap);
0038 }
0039 
0040 static void fq_adjust_removal(struct fq *fq,
0041                   struct fq_flow *flow,
0042                   struct sk_buff *skb)
0043 {
0044     __fq_adjust_removal(fq, flow, 1, skb->len, skb->truesize);
0045 }
0046 
0047 static struct sk_buff *fq_flow_dequeue(struct fq *fq,
0048                        struct fq_flow *flow)
0049 {
0050     struct sk_buff *skb;
0051 
0052     lockdep_assert_held(&fq->lock);
0053 
0054     skb = __skb_dequeue(&flow->queue);
0055     if (!skb)
0056         return NULL;
0057 
0058     fq_adjust_removal(fq, flow, skb);
0059 
0060     return skb;
0061 }
0062 
0063 static int fq_flow_drop(struct fq *fq, struct fq_flow *flow,
0064             fq_skb_free_t free_func)
0065 {
0066     unsigned int packets = 0, bytes = 0, truesize = 0;
0067     struct fq_tin *tin = flow->tin;
0068     struct sk_buff *skb;
0069     int pending;
0070 
0071     lockdep_assert_held(&fq->lock);
0072 
0073     pending = min_t(int, 32, skb_queue_len(&flow->queue) / 2);
0074     do {
0075         skb = __skb_dequeue(&flow->queue);
0076         if (!skb)
0077             break;
0078 
0079         packets++;
0080         bytes += skb->len;
0081         truesize += skb->truesize;
0082         free_func(fq, tin, flow, skb);
0083     } while (packets < pending);
0084 
0085     __fq_adjust_removal(fq, flow, packets, bytes, truesize);
0086 
0087     return packets;
0088 }
0089 
0090 static struct sk_buff *fq_tin_dequeue(struct fq *fq,
0091                       struct fq_tin *tin,
0092                       fq_tin_dequeue_t dequeue_func)
0093 {
0094     struct fq_flow *flow;
0095     struct list_head *head;
0096     struct sk_buff *skb;
0097 
0098     lockdep_assert_held(&fq->lock);
0099 
0100 begin:
0101     head = &tin->new_flows;
0102     if (list_empty(head)) {
0103         head = &tin->old_flows;
0104         if (list_empty(head))
0105             return NULL;
0106     }
0107 
0108     flow = list_first_entry(head, struct fq_flow, flowchain);
0109 
0110     if (flow->deficit <= 0) {
0111         flow->deficit += fq->quantum;
0112         list_move_tail(&flow->flowchain,
0113                    &tin->old_flows);
0114         goto begin;
0115     }
0116 
0117     skb = dequeue_func(fq, tin, flow);
0118     if (!skb) {
0119         /* force a pass through old_flows to prevent starvation */
0120         if ((head == &tin->new_flows) &&
0121             !list_empty(&tin->old_flows)) {
0122             list_move_tail(&flow->flowchain, &tin->old_flows);
0123         } else {
0124             list_del_init(&flow->flowchain);
0125             flow->tin = NULL;
0126         }
0127         goto begin;
0128     }
0129 
0130     flow->deficit -= skb->len;
0131     tin->tx_bytes += skb->len;
0132     tin->tx_packets++;
0133 
0134     return skb;
0135 }
0136 
0137 static u32 fq_flow_idx(struct fq *fq, struct sk_buff *skb)
0138 {
0139     u32 hash = skb_get_hash(skb);
0140 
0141     return reciprocal_scale(hash, fq->flows_cnt);
0142 }
0143 
0144 static struct fq_flow *fq_flow_classify(struct fq *fq,
0145                     struct fq_tin *tin, u32 idx,
0146                     struct sk_buff *skb)
0147 {
0148     struct fq_flow *flow;
0149 
0150     lockdep_assert_held(&fq->lock);
0151 
0152     flow = &fq->flows[idx];
0153     if (flow->tin && flow->tin != tin) {
0154         flow = &tin->default_flow;
0155         tin->collisions++;
0156         fq->collisions++;
0157     }
0158 
0159     if (!flow->tin)
0160         tin->flows++;
0161 
0162     return flow;
0163 }
0164 
0165 static struct fq_flow *fq_find_fattest_flow(struct fq *fq)
0166 {
0167     struct fq_tin *tin;
0168     struct fq_flow *flow = NULL;
0169     u32 len = 0;
0170     int i;
0171 
0172     for_each_set_bit(i, fq->flows_bitmap, fq->flows_cnt) {
0173         struct fq_flow *cur = &fq->flows[i];
0174         unsigned int cur_len;
0175 
0176         cur_len = cur->backlog;
0177         if (cur_len <= len)
0178             continue;
0179 
0180         flow = cur;
0181         len = cur_len;
0182     }
0183 
0184     list_for_each_entry(tin, &fq->tin_backlog, tin_list) {
0185         unsigned int cur_len = tin->default_flow.backlog;
0186 
0187         if (cur_len <= len)
0188             continue;
0189 
0190         flow = &tin->default_flow;
0191         len = cur_len;
0192     }
0193 
0194     return flow;
0195 }
0196 
0197 static void fq_tin_enqueue(struct fq *fq,
0198                struct fq_tin *tin, u32 idx,
0199                struct sk_buff *skb,
0200                fq_skb_free_t free_func)
0201 {
0202     struct fq_flow *flow;
0203     bool oom;
0204 
0205     lockdep_assert_held(&fq->lock);
0206 
0207     flow = fq_flow_classify(fq, tin, idx, skb);
0208 
0209     if (!flow->backlog) {
0210         if (flow != &tin->default_flow)
0211             __set_bit(idx, fq->flows_bitmap);
0212         else if (list_empty(&tin->tin_list))
0213             list_add(&tin->tin_list, &fq->tin_backlog);
0214     }
0215 
0216     flow->tin = tin;
0217     flow->backlog += skb->len;
0218     tin->backlog_bytes += skb->len;
0219     tin->backlog_packets++;
0220     fq->memory_usage += skb->truesize;
0221     fq->backlog++;
0222 
0223     if (list_empty(&flow->flowchain)) {
0224         flow->deficit = fq->quantum;
0225         list_add_tail(&flow->flowchain,
0226                   &tin->new_flows);
0227     }
0228 
0229     __skb_queue_tail(&flow->queue, skb);
0230     oom = (fq->memory_usage > fq->memory_limit);
0231     while (fq->backlog > fq->limit || oom) {
0232         flow = fq_find_fattest_flow(fq);
0233         if (!flow)
0234             return;
0235 
0236         if (!fq_flow_drop(fq, flow, free_func))
0237             return;
0238 
0239         flow->tin->overlimit++;
0240         fq->overlimit++;
0241         if (oom) {
0242             fq->overmemory++;
0243             oom = (fq->memory_usage > fq->memory_limit);
0244         }
0245     }
0246 }
0247 
0248 static void fq_flow_filter(struct fq *fq,
0249                struct fq_flow *flow,
0250                fq_skb_filter_t filter_func,
0251                void *filter_data,
0252                fq_skb_free_t free_func)
0253 {
0254     struct fq_tin *tin = flow->tin;
0255     struct sk_buff *skb, *tmp;
0256 
0257     lockdep_assert_held(&fq->lock);
0258 
0259     skb_queue_walk_safe(&flow->queue, skb, tmp) {
0260         if (!filter_func(fq, tin, flow, skb, filter_data))
0261             continue;
0262 
0263         __skb_unlink(skb, &flow->queue);
0264         fq_adjust_removal(fq, flow, skb);
0265         free_func(fq, tin, flow, skb);
0266     }
0267 }
0268 
0269 static void fq_tin_filter(struct fq *fq,
0270               struct fq_tin *tin,
0271               fq_skb_filter_t filter_func,
0272               void *filter_data,
0273               fq_skb_free_t free_func)
0274 {
0275     struct fq_flow *flow;
0276 
0277     lockdep_assert_held(&fq->lock);
0278 
0279     list_for_each_entry(flow, &tin->new_flows, flowchain)
0280         fq_flow_filter(fq, flow, filter_func, filter_data, free_func);
0281     list_for_each_entry(flow, &tin->old_flows, flowchain)
0282         fq_flow_filter(fq, flow, filter_func, filter_data, free_func);
0283 }
0284 
0285 static void fq_flow_reset(struct fq *fq,
0286               struct fq_flow *flow,
0287               fq_skb_free_t free_func)
0288 {
0289     struct fq_tin *tin = flow->tin;
0290     struct sk_buff *skb;
0291 
0292     while ((skb = fq_flow_dequeue(fq, flow)))
0293         free_func(fq, tin, flow, skb);
0294 
0295     if (!list_empty(&flow->flowchain)) {
0296         list_del_init(&flow->flowchain);
0297         if (list_empty(&tin->new_flows) &&
0298             list_empty(&tin->old_flows))
0299             list_del_init(&tin->tin_list);
0300     }
0301 
0302     flow->tin = NULL;
0303 
0304     WARN_ON_ONCE(flow->backlog);
0305 }
0306 
0307 static void fq_tin_reset(struct fq *fq,
0308              struct fq_tin *tin,
0309              fq_skb_free_t free_func)
0310 {
0311     struct list_head *head;
0312     struct fq_flow *flow;
0313 
0314     for (;;) {
0315         head = &tin->new_flows;
0316         if (list_empty(head)) {
0317             head = &tin->old_flows;
0318             if (list_empty(head))
0319                 break;
0320         }
0321 
0322         flow = list_first_entry(head, struct fq_flow, flowchain);
0323         fq_flow_reset(fq, flow, free_func);
0324     }
0325 
0326     WARN_ON_ONCE(!list_empty(&tin->tin_list));
0327     WARN_ON_ONCE(tin->backlog_bytes);
0328     WARN_ON_ONCE(tin->backlog_packets);
0329 }
0330 
0331 static void fq_flow_init(struct fq_flow *flow)
0332 {
0333     INIT_LIST_HEAD(&flow->flowchain);
0334     __skb_queue_head_init(&flow->queue);
0335 }
0336 
0337 static void fq_tin_init(struct fq_tin *tin)
0338 {
0339     INIT_LIST_HEAD(&tin->new_flows);
0340     INIT_LIST_HEAD(&tin->old_flows);
0341     INIT_LIST_HEAD(&tin->tin_list);
0342     fq_flow_init(&tin->default_flow);
0343 }
0344 
0345 static int fq_init(struct fq *fq, int flows_cnt)
0346 {
0347     int i;
0348 
0349     memset(fq, 0, sizeof(fq[0]));
0350     spin_lock_init(&fq->lock);
0351     INIT_LIST_HEAD(&fq->tin_backlog);
0352     fq->flows_cnt = max_t(u32, flows_cnt, 1);
0353     fq->quantum = 300;
0354     fq->limit = 8192;
0355     fq->memory_limit = 16 << 20; /* 16 MBytes */
0356 
0357     fq->flows = kvcalloc(fq->flows_cnt, sizeof(fq->flows[0]), GFP_KERNEL);
0358     if (!fq->flows)
0359         return -ENOMEM;
0360 
0361     fq->flows_bitmap = bitmap_zalloc(fq->flows_cnt, GFP_KERNEL);
0362     if (!fq->flows_bitmap) {
0363         kvfree(fq->flows);
0364         fq->flows = NULL;
0365         return -ENOMEM;
0366     }
0367 
0368     for (i = 0; i < fq->flows_cnt; i++)
0369         fq_flow_init(&fq->flows[i]);
0370 
0371     return 0;
0372 }
0373 
0374 static void fq_reset(struct fq *fq,
0375              fq_skb_free_t free_func)
0376 {
0377     int i;
0378 
0379     for (i = 0; i < fq->flows_cnt; i++)
0380         fq_flow_reset(fq, &fq->flows[i], free_func);
0381 
0382     kvfree(fq->flows);
0383     fq->flows = NULL;
0384 
0385     bitmap_free(fq->flows_bitmap);
0386     fq->flows_bitmap = NULL;
0387 }
0388 
0389 #endif