0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014 #include <linux/vmalloc.h>
0015 #include <linux/jhash.h>
0016 #include <linux/slab.h>
0017 #include <linux/sort.h>
0018 #include <linux/kmemleak.h>
0019
0020 #include "tracing_map.h"
0021 #include "trace.h"
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035
0036
0037
0038
0039 void tracing_map_update_sum(struct tracing_map_elt *elt, unsigned int i, u64 n)
0040 {
0041 atomic64_add(n, &elt->fields[i].sum);
0042 }
0043
0044
0045
0046
0047
0048
0049
0050
0051
0052
0053
0054
0055
0056 u64 tracing_map_read_sum(struct tracing_map_elt *elt, unsigned int i)
0057 {
0058 return (u64)atomic64_read(&elt->fields[i].sum);
0059 }
0060
0061
0062
0063
0064
0065
0066
0067
0068
0069
0070
0071 void tracing_map_set_var(struct tracing_map_elt *elt, unsigned int i, u64 n)
0072 {
0073 atomic64_set(&elt->vars[i], n);
0074 elt->var_set[i] = true;
0075 }
0076
0077
0078
0079
0080
0081
0082
0083
0084
0085
0086 bool tracing_map_var_set(struct tracing_map_elt *elt, unsigned int i)
0087 {
0088 return elt->var_set[i];
0089 }
0090
0091
0092
0093
0094
0095
0096
0097
0098
0099
0100
0101
0102
0103 u64 tracing_map_read_var(struct tracing_map_elt *elt, unsigned int i)
0104 {
0105 return (u64)atomic64_read(&elt->vars[i]);
0106 }
0107
0108
0109
0110
0111
0112
0113
0114
0115
0116
0117
0118
0119
0120
0121
0122 u64 tracing_map_read_var_once(struct tracing_map_elt *elt, unsigned int i)
0123 {
0124 elt->var_set[i] = false;
0125 return (u64)atomic64_read(&elt->vars[i]);
0126 }
0127
0128 int tracing_map_cmp_string(void *val_a, void *val_b)
0129 {
0130 char *a = val_a;
0131 char *b = val_b;
0132
0133 return strcmp(a, b);
0134 }
0135
0136 int tracing_map_cmp_none(void *val_a, void *val_b)
0137 {
0138 return 0;
0139 }
0140
0141 static int tracing_map_cmp_atomic64(void *val_a, void *val_b)
0142 {
0143 u64 a = atomic64_read((atomic64_t *)val_a);
0144 u64 b = atomic64_read((atomic64_t *)val_b);
0145
0146 return (a > b) ? 1 : ((a < b) ? -1 : 0);
0147 }
0148
0149 #define DEFINE_TRACING_MAP_CMP_FN(type) \
0150 static int tracing_map_cmp_##type(void *val_a, void *val_b) \
0151 { \
0152 type a = (type)(*(u64 *)val_a); \
0153 type b = (type)(*(u64 *)val_b); \
0154 \
0155 return (a > b) ? 1 : ((a < b) ? -1 : 0); \
0156 }
0157
0158 DEFINE_TRACING_MAP_CMP_FN(s64);
0159 DEFINE_TRACING_MAP_CMP_FN(u64);
0160 DEFINE_TRACING_MAP_CMP_FN(s32);
0161 DEFINE_TRACING_MAP_CMP_FN(u32);
0162 DEFINE_TRACING_MAP_CMP_FN(s16);
0163 DEFINE_TRACING_MAP_CMP_FN(u16);
0164 DEFINE_TRACING_MAP_CMP_FN(s8);
0165 DEFINE_TRACING_MAP_CMP_FN(u8);
0166
0167 tracing_map_cmp_fn_t tracing_map_cmp_num(int field_size,
0168 int field_is_signed)
0169 {
0170 tracing_map_cmp_fn_t fn = tracing_map_cmp_none;
0171
0172 switch (field_size) {
0173 case 8:
0174 if (field_is_signed)
0175 fn = tracing_map_cmp_s64;
0176 else
0177 fn = tracing_map_cmp_u64;
0178 break;
0179 case 4:
0180 if (field_is_signed)
0181 fn = tracing_map_cmp_s32;
0182 else
0183 fn = tracing_map_cmp_u32;
0184 break;
0185 case 2:
0186 if (field_is_signed)
0187 fn = tracing_map_cmp_s16;
0188 else
0189 fn = tracing_map_cmp_u16;
0190 break;
0191 case 1:
0192 if (field_is_signed)
0193 fn = tracing_map_cmp_s8;
0194 else
0195 fn = tracing_map_cmp_u8;
0196 break;
0197 }
0198
0199 return fn;
0200 }
0201
0202 static int tracing_map_add_field(struct tracing_map *map,
0203 tracing_map_cmp_fn_t cmp_fn)
0204 {
0205 int ret = -EINVAL;
0206
0207 if (map->n_fields < TRACING_MAP_FIELDS_MAX) {
0208 ret = map->n_fields;
0209 map->fields[map->n_fields++].cmp_fn = cmp_fn;
0210 }
0211
0212 return ret;
0213 }
0214
0215
0216
0217
0218
0219
0220
0221
0222
0223
0224
0225
0226
0227 int tracing_map_add_sum_field(struct tracing_map *map)
0228 {
0229 return tracing_map_add_field(map, tracing_map_cmp_atomic64);
0230 }
0231
0232
0233
0234
0235
0236
0237
0238
0239
0240
0241
0242
0243
0244 int tracing_map_add_var(struct tracing_map *map)
0245 {
0246 int ret = -EINVAL;
0247
0248 if (map->n_vars < TRACING_MAP_VARS_MAX)
0249 ret = map->n_vars++;
0250
0251 return ret;
0252 }
0253
0254
0255
0256
0257
0258
0259
0260
0261
0262
0263
0264
0265
0266
0267
0268
0269
0270 int tracing_map_add_key_field(struct tracing_map *map,
0271 unsigned int offset,
0272 tracing_map_cmp_fn_t cmp_fn)
0273
0274 {
0275 int idx = tracing_map_add_field(map, cmp_fn);
0276
0277 if (idx < 0)
0278 return idx;
0279
0280 map->fields[idx].offset = offset;
0281
0282 map->key_idx[map->n_keys++] = idx;
0283
0284 return idx;
0285 }
0286
0287 static void tracing_map_array_clear(struct tracing_map_array *a)
0288 {
0289 unsigned int i;
0290
0291 if (!a->pages)
0292 return;
0293
0294 for (i = 0; i < a->n_pages; i++)
0295 memset(a->pages[i], 0, PAGE_SIZE);
0296 }
0297
0298 static void tracing_map_array_free(struct tracing_map_array *a)
0299 {
0300 unsigned int i;
0301
0302 if (!a)
0303 return;
0304
0305 if (!a->pages)
0306 goto free;
0307
0308 for (i = 0; i < a->n_pages; i++) {
0309 if (!a->pages[i])
0310 break;
0311 kmemleak_free(a->pages[i]);
0312 free_page((unsigned long)a->pages[i]);
0313 }
0314
0315 kfree(a->pages);
0316
0317 free:
0318 kfree(a);
0319 }
0320
0321 static struct tracing_map_array *tracing_map_array_alloc(unsigned int n_elts,
0322 unsigned int entry_size)
0323 {
0324 struct tracing_map_array *a;
0325 unsigned int i;
0326
0327 a = kzalloc(sizeof(*a), GFP_KERNEL);
0328 if (!a)
0329 return NULL;
0330
0331 a->entry_size_shift = fls(roundup_pow_of_two(entry_size) - 1);
0332 a->entries_per_page = PAGE_SIZE / (1 << a->entry_size_shift);
0333 a->n_pages = n_elts / a->entries_per_page;
0334 if (!a->n_pages)
0335 a->n_pages = 1;
0336 a->entry_shift = fls(a->entries_per_page) - 1;
0337 a->entry_mask = (1 << a->entry_shift) - 1;
0338
0339 a->pages = kcalloc(a->n_pages, sizeof(void *), GFP_KERNEL);
0340 if (!a->pages)
0341 goto free;
0342
0343 for (i = 0; i < a->n_pages; i++) {
0344 a->pages[i] = (void *)get_zeroed_page(GFP_KERNEL);
0345 if (!a->pages[i])
0346 goto free;
0347 kmemleak_alloc(a->pages[i], PAGE_SIZE, 1, GFP_KERNEL);
0348 }
0349 out:
0350 return a;
0351 free:
0352 tracing_map_array_free(a);
0353 a = NULL;
0354
0355 goto out;
0356 }
0357
0358 static void tracing_map_elt_clear(struct tracing_map_elt *elt)
0359 {
0360 unsigned i;
0361
0362 for (i = 0; i < elt->map->n_fields; i++)
0363 if (elt->fields[i].cmp_fn == tracing_map_cmp_atomic64)
0364 atomic64_set(&elt->fields[i].sum, 0);
0365
0366 for (i = 0; i < elt->map->n_vars; i++) {
0367 atomic64_set(&elt->vars[i], 0);
0368 elt->var_set[i] = false;
0369 }
0370
0371 if (elt->map->ops && elt->map->ops->elt_clear)
0372 elt->map->ops->elt_clear(elt);
0373 }
0374
0375 static void tracing_map_elt_init_fields(struct tracing_map_elt *elt)
0376 {
0377 unsigned int i;
0378
0379 tracing_map_elt_clear(elt);
0380
0381 for (i = 0; i < elt->map->n_fields; i++) {
0382 elt->fields[i].cmp_fn = elt->map->fields[i].cmp_fn;
0383
0384 if (elt->fields[i].cmp_fn != tracing_map_cmp_atomic64)
0385 elt->fields[i].offset = elt->map->fields[i].offset;
0386 }
0387 }
0388
0389 static void tracing_map_elt_free(struct tracing_map_elt *elt)
0390 {
0391 if (!elt)
0392 return;
0393
0394 if (elt->map->ops && elt->map->ops->elt_free)
0395 elt->map->ops->elt_free(elt);
0396 kfree(elt->fields);
0397 kfree(elt->vars);
0398 kfree(elt->var_set);
0399 kfree(elt->key);
0400 kfree(elt);
0401 }
0402
0403 static struct tracing_map_elt *tracing_map_elt_alloc(struct tracing_map *map)
0404 {
0405 struct tracing_map_elt *elt;
0406 int err = 0;
0407
0408 elt = kzalloc(sizeof(*elt), GFP_KERNEL);
0409 if (!elt)
0410 return ERR_PTR(-ENOMEM);
0411
0412 elt->map = map;
0413
0414 elt->key = kzalloc(map->key_size, GFP_KERNEL);
0415 if (!elt->key) {
0416 err = -ENOMEM;
0417 goto free;
0418 }
0419
0420 elt->fields = kcalloc(map->n_fields, sizeof(*elt->fields), GFP_KERNEL);
0421 if (!elt->fields) {
0422 err = -ENOMEM;
0423 goto free;
0424 }
0425
0426 elt->vars = kcalloc(map->n_vars, sizeof(*elt->vars), GFP_KERNEL);
0427 if (!elt->vars) {
0428 err = -ENOMEM;
0429 goto free;
0430 }
0431
0432 elt->var_set = kcalloc(map->n_vars, sizeof(*elt->var_set), GFP_KERNEL);
0433 if (!elt->var_set) {
0434 err = -ENOMEM;
0435 goto free;
0436 }
0437
0438 tracing_map_elt_init_fields(elt);
0439
0440 if (map->ops && map->ops->elt_alloc) {
0441 err = map->ops->elt_alloc(elt);
0442 if (err)
0443 goto free;
0444 }
0445 return elt;
0446 free:
0447 tracing_map_elt_free(elt);
0448
0449 return ERR_PTR(err);
0450 }
0451
0452 static struct tracing_map_elt *get_free_elt(struct tracing_map *map)
0453 {
0454 struct tracing_map_elt *elt = NULL;
0455 int idx;
0456
0457 idx = atomic_inc_return(&map->next_elt);
0458 if (idx < map->max_elts) {
0459 elt = *(TRACING_MAP_ELT(map->elts, idx));
0460 if (map->ops && map->ops->elt_init)
0461 map->ops->elt_init(elt);
0462 }
0463
0464 return elt;
0465 }
0466
0467 static void tracing_map_free_elts(struct tracing_map *map)
0468 {
0469 unsigned int i;
0470
0471 if (!map->elts)
0472 return;
0473
0474 for (i = 0; i < map->max_elts; i++) {
0475 tracing_map_elt_free(*(TRACING_MAP_ELT(map->elts, i)));
0476 *(TRACING_MAP_ELT(map->elts, i)) = NULL;
0477 }
0478
0479 tracing_map_array_free(map->elts);
0480 map->elts = NULL;
0481 }
0482
0483 static int tracing_map_alloc_elts(struct tracing_map *map)
0484 {
0485 unsigned int i;
0486
0487 map->elts = tracing_map_array_alloc(map->max_elts,
0488 sizeof(struct tracing_map_elt *));
0489 if (!map->elts)
0490 return -ENOMEM;
0491
0492 for (i = 0; i < map->max_elts; i++) {
0493 *(TRACING_MAP_ELT(map->elts, i)) = tracing_map_elt_alloc(map);
0494 if (IS_ERR(*(TRACING_MAP_ELT(map->elts, i)))) {
0495 *(TRACING_MAP_ELT(map->elts, i)) = NULL;
0496 tracing_map_free_elts(map);
0497
0498 return -ENOMEM;
0499 }
0500 }
0501
0502 return 0;
0503 }
0504
0505 static inline bool keys_match(void *key, void *test_key, unsigned key_size)
0506 {
0507 bool match = true;
0508
0509 if (memcmp(key, test_key, key_size))
0510 match = false;
0511
0512 return match;
0513 }
0514
0515 static inline struct tracing_map_elt *
0516 __tracing_map_insert(struct tracing_map *map, void *key, bool lookup_only)
0517 {
0518 u32 idx, key_hash, test_key;
0519 int dup_try = 0;
0520 struct tracing_map_entry *entry;
0521 struct tracing_map_elt *val;
0522
0523 key_hash = jhash(key, map->key_size, 0);
0524 if (key_hash == 0)
0525 key_hash = 1;
0526 idx = key_hash >> (32 - (map->map_bits + 1));
0527
0528 while (1) {
0529 idx &= (map->map_size - 1);
0530 entry = TRACING_MAP_ENTRY(map->map, idx);
0531 test_key = entry->key;
0532
0533 if (test_key && test_key == key_hash) {
0534 val = READ_ONCE(entry->val);
0535 if (val &&
0536 keys_match(key, val->key, map->key_size)) {
0537 if (!lookup_only)
0538 atomic64_inc(&map->hits);
0539 return val;
0540 } else if (unlikely(!val)) {
0541
0542
0543
0544
0545
0546
0547
0548
0549
0550
0551
0552
0553 dup_try++;
0554 if (dup_try > map->map_size) {
0555 atomic64_inc(&map->drops);
0556 break;
0557 }
0558 continue;
0559 }
0560 }
0561
0562 if (!test_key) {
0563 if (lookup_only)
0564 break;
0565
0566 if (!cmpxchg(&entry->key, 0, key_hash)) {
0567 struct tracing_map_elt *elt;
0568
0569 elt = get_free_elt(map);
0570 if (!elt) {
0571 atomic64_inc(&map->drops);
0572 entry->key = 0;
0573 break;
0574 }
0575
0576 memcpy(elt->key, key, map->key_size);
0577 entry->val = elt;
0578 atomic64_inc(&map->hits);
0579
0580 return entry->val;
0581 } else {
0582
0583
0584
0585
0586 dup_try++;
0587 continue;
0588 }
0589 }
0590
0591 idx++;
0592 }
0593
0594 return NULL;
0595 }
0596
0597
0598
0599
0600
0601
0602
0603
0604
0605
0606
0607
0608
0609
0610
0611
0612
0613
0614
0615
0616
0617
0618
0619
0620
0621
0622
0623
0624
0625
0626
0627
0628
0629
0630
0631
0632
0633
0634 struct tracing_map_elt *tracing_map_insert(struct tracing_map *map, void *key)
0635 {
0636 return __tracing_map_insert(map, key, false);
0637 }
0638
0639
0640
0641
0642
0643
0644
0645
0646
0647
0648
0649
0650
0651
0652
0653
0654
0655
0656 struct tracing_map_elt *tracing_map_lookup(struct tracing_map *map, void *key)
0657 {
0658 return __tracing_map_insert(map, key, true);
0659 }
0660
0661
0662
0663
0664
0665
0666
0667
0668
0669
0670
0671 void tracing_map_destroy(struct tracing_map *map)
0672 {
0673 if (!map)
0674 return;
0675
0676 tracing_map_free_elts(map);
0677
0678 tracing_map_array_free(map->map);
0679 kfree(map);
0680 }
0681
0682
0683
0684
0685
0686
0687
0688
0689
0690
0691
0692
0693 void tracing_map_clear(struct tracing_map *map)
0694 {
0695 unsigned int i;
0696
0697 atomic_set(&map->next_elt, -1);
0698 atomic64_set(&map->hits, 0);
0699 atomic64_set(&map->drops, 0);
0700
0701 tracing_map_array_clear(map->map);
0702
0703 for (i = 0; i < map->max_elts; i++)
0704 tracing_map_elt_clear(*(TRACING_MAP_ELT(map->elts, i)));
0705 }
0706
0707 static void set_sort_key(struct tracing_map *map,
0708 struct tracing_map_sort_key *sort_key)
0709 {
0710 map->sort_key = *sort_key;
0711 }
0712
0713
0714
0715
0716
0717
0718
0719
0720
0721
0722
0723
0724
0725
0726
0727
0728
0729
0730
0731
0732
0733
0734
0735
0736
0737
0738
0739
0740
0741
0742
0743
0744
0745
0746
0747
0748
0749
0750
0751
0752
0753
0754
0755
0756
0757
0758
0759
0760
0761
0762
0763 struct tracing_map *tracing_map_create(unsigned int map_bits,
0764 unsigned int key_size,
0765 const struct tracing_map_ops *ops,
0766 void *private_data)
0767 {
0768 struct tracing_map *map;
0769 unsigned int i;
0770
0771 if (map_bits < TRACING_MAP_BITS_MIN ||
0772 map_bits > TRACING_MAP_BITS_MAX)
0773 return ERR_PTR(-EINVAL);
0774
0775 map = kzalloc(sizeof(*map), GFP_KERNEL);
0776 if (!map)
0777 return ERR_PTR(-ENOMEM);
0778
0779 map->map_bits = map_bits;
0780 map->max_elts = (1 << map_bits);
0781 atomic_set(&map->next_elt, -1);
0782
0783 map->map_size = (1 << (map_bits + 1));
0784 map->ops = ops;
0785
0786 map->private_data = private_data;
0787
0788 map->map = tracing_map_array_alloc(map->map_size,
0789 sizeof(struct tracing_map_entry));
0790 if (!map->map)
0791 goto free;
0792
0793 map->key_size = key_size;
0794 for (i = 0; i < TRACING_MAP_KEYS_MAX; i++)
0795 map->key_idx[i] = -1;
0796 out:
0797 return map;
0798 free:
0799 tracing_map_destroy(map);
0800 map = ERR_PTR(-ENOMEM);
0801
0802 goto out;
0803 }
0804
0805
0806
0807
0808
0809
0810
0811
0812
0813
0814
0815
0816
0817
0818
0819
0820
0821
0822
0823
0824 int tracing_map_init(struct tracing_map *map)
0825 {
0826 int err;
0827
0828 if (map->n_fields < 2)
0829 return -EINVAL;
0830
0831 err = tracing_map_alloc_elts(map);
0832 if (err)
0833 return err;
0834
0835 tracing_map_clear(map);
0836
0837 return err;
0838 }
0839
0840 static int cmp_entries_dup(const void *A, const void *B)
0841 {
0842 const struct tracing_map_sort_entry *a, *b;
0843 int ret = 0;
0844
0845 a = *(const struct tracing_map_sort_entry **)A;
0846 b = *(const struct tracing_map_sort_entry **)B;
0847
0848 if (memcmp(a->key, b->key, a->elt->map->key_size))
0849 ret = 1;
0850
0851 return ret;
0852 }
0853
0854 static int cmp_entries_sum(const void *A, const void *B)
0855 {
0856 const struct tracing_map_elt *elt_a, *elt_b;
0857 const struct tracing_map_sort_entry *a, *b;
0858 struct tracing_map_sort_key *sort_key;
0859 struct tracing_map_field *field;
0860 tracing_map_cmp_fn_t cmp_fn;
0861 void *val_a, *val_b;
0862 int ret = 0;
0863
0864 a = *(const struct tracing_map_sort_entry **)A;
0865 b = *(const struct tracing_map_sort_entry **)B;
0866
0867 elt_a = a->elt;
0868 elt_b = b->elt;
0869
0870 sort_key = &elt_a->map->sort_key;
0871
0872 field = &elt_a->fields[sort_key->field_idx];
0873 cmp_fn = field->cmp_fn;
0874
0875 val_a = &elt_a->fields[sort_key->field_idx].sum;
0876 val_b = &elt_b->fields[sort_key->field_idx].sum;
0877
0878 ret = cmp_fn(val_a, val_b);
0879 if (sort_key->descending)
0880 ret = -ret;
0881
0882 return ret;
0883 }
0884
0885 static int cmp_entries_key(const void *A, const void *B)
0886 {
0887 const struct tracing_map_elt *elt_a, *elt_b;
0888 const struct tracing_map_sort_entry *a, *b;
0889 struct tracing_map_sort_key *sort_key;
0890 struct tracing_map_field *field;
0891 tracing_map_cmp_fn_t cmp_fn;
0892 void *val_a, *val_b;
0893 int ret = 0;
0894
0895 a = *(const struct tracing_map_sort_entry **)A;
0896 b = *(const struct tracing_map_sort_entry **)B;
0897
0898 elt_a = a->elt;
0899 elt_b = b->elt;
0900
0901 sort_key = &elt_a->map->sort_key;
0902
0903 field = &elt_a->fields[sort_key->field_idx];
0904
0905 cmp_fn = field->cmp_fn;
0906
0907 val_a = elt_a->key + field->offset;
0908 val_b = elt_b->key + field->offset;
0909
0910 ret = cmp_fn(val_a, val_b);
0911 if (sort_key->descending)
0912 ret = -ret;
0913
0914 return ret;
0915 }
0916
0917 static void destroy_sort_entry(struct tracing_map_sort_entry *entry)
0918 {
0919 if (!entry)
0920 return;
0921
0922 if (entry->elt_copied)
0923 tracing_map_elt_free(entry->elt);
0924
0925 kfree(entry);
0926 }
0927
0928
0929
0930
0931
0932
0933
0934
0935 void tracing_map_destroy_sort_entries(struct tracing_map_sort_entry **entries,
0936 unsigned int n_entries)
0937 {
0938 unsigned int i;
0939
0940 for (i = 0; i < n_entries; i++)
0941 destroy_sort_entry(entries[i]);
0942
0943 vfree(entries);
0944 }
0945
0946 static struct tracing_map_sort_entry *
0947 create_sort_entry(void *key, struct tracing_map_elt *elt)
0948 {
0949 struct tracing_map_sort_entry *sort_entry;
0950
0951 sort_entry = kzalloc(sizeof(*sort_entry), GFP_KERNEL);
0952 if (!sort_entry)
0953 return NULL;
0954
0955 sort_entry->key = key;
0956 sort_entry->elt = elt;
0957
0958 return sort_entry;
0959 }
0960
0961 static void detect_dups(struct tracing_map_sort_entry **sort_entries,
0962 int n_entries, unsigned int key_size)
0963 {
0964 unsigned int dups = 0, total_dups = 0;
0965 int i;
0966 void *key;
0967
0968 if (n_entries < 2)
0969 return;
0970
0971 sort(sort_entries, n_entries, sizeof(struct tracing_map_sort_entry *),
0972 (int (*)(const void *, const void *))cmp_entries_dup, NULL);
0973
0974 key = sort_entries[0]->key;
0975 for (i = 1; i < n_entries; i++) {
0976 if (!memcmp(sort_entries[i]->key, key, key_size)) {
0977 dups++; total_dups++;
0978 continue;
0979 }
0980 key = sort_entries[i]->key;
0981 dups = 0;
0982 }
0983
0984 WARN_ONCE(total_dups > 0,
0985 "Duplicates detected: %d\n", total_dups);
0986 }
0987
0988 static bool is_key(struct tracing_map *map, unsigned int field_idx)
0989 {
0990 unsigned int i;
0991
0992 for (i = 0; i < map->n_keys; i++)
0993 if (map->key_idx[i] == field_idx)
0994 return true;
0995 return false;
0996 }
0997
0998 static void sort_secondary(struct tracing_map *map,
0999 const struct tracing_map_sort_entry **entries,
1000 unsigned int n_entries,
1001 struct tracing_map_sort_key *primary_key,
1002 struct tracing_map_sort_key *secondary_key)
1003 {
1004 int (*primary_fn)(const void *, const void *);
1005 int (*secondary_fn)(const void *, const void *);
1006 unsigned i, start = 0, n_sub = 1;
1007
1008 if (is_key(map, primary_key->field_idx))
1009 primary_fn = cmp_entries_key;
1010 else
1011 primary_fn = cmp_entries_sum;
1012
1013 if (is_key(map, secondary_key->field_idx))
1014 secondary_fn = cmp_entries_key;
1015 else
1016 secondary_fn = cmp_entries_sum;
1017
1018 for (i = 0; i < n_entries - 1; i++) {
1019 const struct tracing_map_sort_entry **a = &entries[i];
1020 const struct tracing_map_sort_entry **b = &entries[i + 1];
1021
1022 if (primary_fn(a, b) == 0) {
1023 n_sub++;
1024 if (i < n_entries - 2)
1025 continue;
1026 }
1027
1028 if (n_sub < 2) {
1029 start = i + 1;
1030 n_sub = 1;
1031 continue;
1032 }
1033
1034 set_sort_key(map, secondary_key);
1035 sort(&entries[start], n_sub,
1036 sizeof(struct tracing_map_sort_entry *),
1037 (int (*)(const void *, const void *))secondary_fn, NULL);
1038 set_sort_key(map, primary_key);
1039
1040 start = i + 1;
1041 n_sub = 1;
1042 }
1043 }
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070 int tracing_map_sort_entries(struct tracing_map *map,
1071 struct tracing_map_sort_key *sort_keys,
1072 unsigned int n_sort_keys,
1073 struct tracing_map_sort_entry ***sort_entries)
1074 {
1075 int (*cmp_entries_fn)(const void *, const void *);
1076 struct tracing_map_sort_entry *sort_entry, **entries;
1077 int i, n_entries, ret;
1078
1079 entries = vmalloc(array_size(sizeof(sort_entry), map->max_elts));
1080 if (!entries)
1081 return -ENOMEM;
1082
1083 for (i = 0, n_entries = 0; i < map->map_size; i++) {
1084 struct tracing_map_entry *entry;
1085
1086 entry = TRACING_MAP_ENTRY(map->map, i);
1087
1088 if (!entry->key || !entry->val)
1089 continue;
1090
1091 entries[n_entries] = create_sort_entry(entry->val->key,
1092 entry->val);
1093 if (!entries[n_entries++]) {
1094 ret = -ENOMEM;
1095 goto free;
1096 }
1097 }
1098
1099 if (n_entries == 0) {
1100 ret = 0;
1101 goto free;
1102 }
1103
1104 if (n_entries == 1) {
1105 *sort_entries = entries;
1106 return 1;
1107 }
1108
1109 detect_dups(entries, n_entries, map->key_size);
1110
1111 if (is_key(map, sort_keys[0].field_idx))
1112 cmp_entries_fn = cmp_entries_key;
1113 else
1114 cmp_entries_fn = cmp_entries_sum;
1115
1116 set_sort_key(map, &sort_keys[0]);
1117
1118 sort(entries, n_entries, sizeof(struct tracing_map_sort_entry *),
1119 (int (*)(const void *, const void *))cmp_entries_fn, NULL);
1120
1121 if (n_sort_keys > 1)
1122 sort_secondary(map,
1123 (const struct tracing_map_sort_entry **)entries,
1124 n_entries,
1125 &sort_keys[0],
1126 &sort_keys[1]);
1127
1128 *sort_entries = entries;
1129
1130 return n_entries;
1131 free:
1132 tracing_map_destroy_sort_entries(entries, n_entries);
1133
1134 return ret;
1135 }