0001
0002
0003
0004 #include "percpu_freelist.h"
0005
0006 int pcpu_freelist_init(struct pcpu_freelist *s)
0007 {
0008 int cpu;
0009
0010 s->freelist = alloc_percpu(struct pcpu_freelist_head);
0011 if (!s->freelist)
0012 return -ENOMEM;
0013
0014 for_each_possible_cpu(cpu) {
0015 struct pcpu_freelist_head *head = per_cpu_ptr(s->freelist, cpu);
0016
0017 raw_spin_lock_init(&head->lock);
0018 head->first = NULL;
0019 }
0020 raw_spin_lock_init(&s->extralist.lock);
0021 s->extralist.first = NULL;
0022 return 0;
0023 }
0024
0025 void pcpu_freelist_destroy(struct pcpu_freelist *s)
0026 {
0027 free_percpu(s->freelist);
0028 }
0029
0030 static inline void pcpu_freelist_push_node(struct pcpu_freelist_head *head,
0031 struct pcpu_freelist_node *node)
0032 {
0033 node->next = head->first;
0034 WRITE_ONCE(head->first, node);
0035 }
0036
0037 static inline void ___pcpu_freelist_push(struct pcpu_freelist_head *head,
0038 struct pcpu_freelist_node *node)
0039 {
0040 raw_spin_lock(&head->lock);
0041 pcpu_freelist_push_node(head, node);
0042 raw_spin_unlock(&head->lock);
0043 }
0044
0045 static inline bool pcpu_freelist_try_push_extra(struct pcpu_freelist *s,
0046 struct pcpu_freelist_node *node)
0047 {
0048 if (!raw_spin_trylock(&s->extralist.lock))
0049 return false;
0050
0051 pcpu_freelist_push_node(&s->extralist, node);
0052 raw_spin_unlock(&s->extralist.lock);
0053 return true;
0054 }
0055
0056 static inline void ___pcpu_freelist_push_nmi(struct pcpu_freelist *s,
0057 struct pcpu_freelist_node *node)
0058 {
0059 int cpu, orig_cpu;
0060
0061 orig_cpu = cpu = raw_smp_processor_id();
0062 while (1) {
0063 struct pcpu_freelist_head *head;
0064
0065 head = per_cpu_ptr(s->freelist, cpu);
0066 if (raw_spin_trylock(&head->lock)) {
0067 pcpu_freelist_push_node(head, node);
0068 raw_spin_unlock(&head->lock);
0069 return;
0070 }
0071 cpu = cpumask_next(cpu, cpu_possible_mask);
0072 if (cpu >= nr_cpu_ids)
0073 cpu = 0;
0074
0075
0076 if (cpu == orig_cpu &&
0077 pcpu_freelist_try_push_extra(s, node))
0078 return;
0079 }
0080 }
0081
0082 void __pcpu_freelist_push(struct pcpu_freelist *s,
0083 struct pcpu_freelist_node *node)
0084 {
0085 if (in_nmi())
0086 ___pcpu_freelist_push_nmi(s, node);
0087 else
0088 ___pcpu_freelist_push(this_cpu_ptr(s->freelist), node);
0089 }
0090
0091 void pcpu_freelist_push(struct pcpu_freelist *s,
0092 struct pcpu_freelist_node *node)
0093 {
0094 unsigned long flags;
0095
0096 local_irq_save(flags);
0097 __pcpu_freelist_push(s, node);
0098 local_irq_restore(flags);
0099 }
0100
0101 void pcpu_freelist_populate(struct pcpu_freelist *s, void *buf, u32 elem_size,
0102 u32 nr_elems)
0103 {
0104 struct pcpu_freelist_head *head;
0105 int i, cpu, pcpu_entries;
0106
0107 pcpu_entries = nr_elems / num_possible_cpus() + 1;
0108 i = 0;
0109
0110 for_each_possible_cpu(cpu) {
0111 again:
0112 head = per_cpu_ptr(s->freelist, cpu);
0113
0114 pcpu_freelist_push_node(head, buf);
0115 i++;
0116 buf += elem_size;
0117 if (i == nr_elems)
0118 break;
0119 if (i % pcpu_entries)
0120 goto again;
0121 }
0122 }
0123
0124 static struct pcpu_freelist_node *___pcpu_freelist_pop(struct pcpu_freelist *s)
0125 {
0126 struct pcpu_freelist_head *head;
0127 struct pcpu_freelist_node *node;
0128 int orig_cpu, cpu;
0129
0130 orig_cpu = cpu = raw_smp_processor_id();
0131 while (1) {
0132 head = per_cpu_ptr(s->freelist, cpu);
0133 if (!READ_ONCE(head->first))
0134 goto next_cpu;
0135 raw_spin_lock(&head->lock);
0136 node = head->first;
0137 if (node) {
0138 WRITE_ONCE(head->first, node->next);
0139 raw_spin_unlock(&head->lock);
0140 return node;
0141 }
0142 raw_spin_unlock(&head->lock);
0143 next_cpu:
0144 cpu = cpumask_next(cpu, cpu_possible_mask);
0145 if (cpu >= nr_cpu_ids)
0146 cpu = 0;
0147 if (cpu == orig_cpu)
0148 break;
0149 }
0150
0151
0152 if (!READ_ONCE(s->extralist.first))
0153 return NULL;
0154 raw_spin_lock(&s->extralist.lock);
0155 node = s->extralist.first;
0156 if (node)
0157 WRITE_ONCE(s->extralist.first, node->next);
0158 raw_spin_unlock(&s->extralist.lock);
0159 return node;
0160 }
0161
0162 static struct pcpu_freelist_node *
0163 ___pcpu_freelist_pop_nmi(struct pcpu_freelist *s)
0164 {
0165 struct pcpu_freelist_head *head;
0166 struct pcpu_freelist_node *node;
0167 int orig_cpu, cpu;
0168
0169 orig_cpu = cpu = raw_smp_processor_id();
0170 while (1) {
0171 head = per_cpu_ptr(s->freelist, cpu);
0172 if (!READ_ONCE(head->first))
0173 goto next_cpu;
0174 if (raw_spin_trylock(&head->lock)) {
0175 node = head->first;
0176 if (node) {
0177 WRITE_ONCE(head->first, node->next);
0178 raw_spin_unlock(&head->lock);
0179 return node;
0180 }
0181 raw_spin_unlock(&head->lock);
0182 }
0183 next_cpu:
0184 cpu = cpumask_next(cpu, cpu_possible_mask);
0185 if (cpu >= nr_cpu_ids)
0186 cpu = 0;
0187 if (cpu == orig_cpu)
0188 break;
0189 }
0190
0191
0192 if (!READ_ONCE(s->extralist.first) || !raw_spin_trylock(&s->extralist.lock))
0193 return NULL;
0194 node = s->extralist.first;
0195 if (node)
0196 WRITE_ONCE(s->extralist.first, node->next);
0197 raw_spin_unlock(&s->extralist.lock);
0198 return node;
0199 }
0200
0201 struct pcpu_freelist_node *__pcpu_freelist_pop(struct pcpu_freelist *s)
0202 {
0203 if (in_nmi())
0204 return ___pcpu_freelist_pop_nmi(s);
0205 return ___pcpu_freelist_pop(s);
0206 }
0207
0208 struct pcpu_freelist_node *pcpu_freelist_pop(struct pcpu_freelist *s)
0209 {
0210 struct pcpu_freelist_node *ret;
0211 unsigned long flags;
0212
0213 local_irq_save(flags);
0214 ret = __pcpu_freelist_pop(s);
0215 local_irq_restore(flags);
0216 return ret;
0217 }