0001
0002 #ifndef __LINUX_CPUMASK_H
0003 #define __LINUX_CPUMASK_H
0004
0005
0006
0007
0008
0009
0010 #include <linux/kernel.h>
0011 #include <linux/threads.h>
0012 #include <linux/bitmap.h>
0013 #include <linux/atomic.h>
0014 #include <linux/bug.h>
0015 #include <linux/gfp_types.h>
0016 #include <linux/numa.h>
0017
0018
0019 typedef struct cpumask { DECLARE_BITMAP(bits, NR_CPUS); } cpumask_t;
0020
0021
0022
0023
0024
0025
0026
0027
0028 #define cpumask_bits(maskp) ((maskp)->bits)
0029
0030
0031
0032
0033
0034
0035
0036 #define cpumask_pr_args(maskp) nr_cpu_ids, cpumask_bits(maskp)
0037
0038 #if NR_CPUS == 1
0039 #define nr_cpu_ids 1U
0040 #else
0041 extern unsigned int nr_cpu_ids;
0042 #endif
0043
0044 #ifdef CONFIG_CPUMASK_OFFSTACK
0045
0046
0047 #define nr_cpumask_bits nr_cpu_ids
0048 #else
0049 #define nr_cpumask_bits ((unsigned int)NR_CPUS)
0050 #endif
0051
0052
0053
0054
0055
0056
0057
0058
0059
0060
0061
0062
0063
0064
0065
0066
0067
0068
0069
0070
0071
0072
0073
0074
0075
0076
0077
0078
0079
0080
0081
0082
0083
0084
0085
0086
0087
0088
0089
0090
0091
0092 extern struct cpumask __cpu_possible_mask;
0093 extern struct cpumask __cpu_online_mask;
0094 extern struct cpumask __cpu_present_mask;
0095 extern struct cpumask __cpu_active_mask;
0096 extern struct cpumask __cpu_dying_mask;
0097 #define cpu_possible_mask ((const struct cpumask *)&__cpu_possible_mask)
0098 #define cpu_online_mask ((const struct cpumask *)&__cpu_online_mask)
0099 #define cpu_present_mask ((const struct cpumask *)&__cpu_present_mask)
0100 #define cpu_active_mask ((const struct cpumask *)&__cpu_active_mask)
0101 #define cpu_dying_mask ((const struct cpumask *)&__cpu_dying_mask)
0102
0103 extern atomic_t __num_online_cpus;
0104
0105 extern cpumask_t cpus_booted_once_mask;
0106
0107 static __always_inline void cpu_max_bits_warn(unsigned int cpu, unsigned int bits)
0108 {
0109 #ifdef CONFIG_DEBUG_PER_CPU_MAPS
0110 WARN_ON_ONCE(cpu >= bits);
0111 #endif
0112 }
0113
0114
0115 static __always_inline unsigned int cpumask_check(unsigned int cpu)
0116 {
0117 cpu_max_bits_warn(cpu, nr_cpumask_bits);
0118 return cpu;
0119 }
0120
0121
0122
0123
0124
0125
0126
0127 static inline unsigned int cpumask_first(const struct cpumask *srcp)
0128 {
0129 return find_first_bit(cpumask_bits(srcp), nr_cpumask_bits);
0130 }
0131
0132
0133
0134
0135
0136
0137
0138 static inline unsigned int cpumask_first_zero(const struct cpumask *srcp)
0139 {
0140 return find_first_zero_bit(cpumask_bits(srcp), nr_cpumask_bits);
0141 }
0142
0143
0144
0145
0146
0147
0148
0149
0150 static inline
0151 unsigned int cpumask_first_and(const struct cpumask *srcp1, const struct cpumask *srcp2)
0152 {
0153 return find_first_and_bit(cpumask_bits(srcp1), cpumask_bits(srcp2), nr_cpumask_bits);
0154 }
0155
0156
0157
0158
0159
0160
0161
0162 static inline unsigned int cpumask_last(const struct cpumask *srcp)
0163 {
0164 return find_last_bit(cpumask_bits(srcp), nr_cpumask_bits);
0165 }
0166
0167
0168
0169
0170
0171
0172
0173
0174 static inline
0175 unsigned int cpumask_next(int n, const struct cpumask *srcp)
0176 {
0177
0178 if (n != -1)
0179 cpumask_check(n);
0180 return find_next_bit(cpumask_bits(srcp), nr_cpumask_bits, n + 1);
0181 }
0182
0183
0184
0185
0186
0187
0188
0189
0190 static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
0191 {
0192
0193 if (n != -1)
0194 cpumask_check(n);
0195 return find_next_zero_bit(cpumask_bits(srcp), nr_cpumask_bits, n+1);
0196 }
0197
0198 #if NR_CPUS == 1
0199
0200 static inline unsigned int cpumask_local_spread(unsigned int i, int node)
0201 {
0202 return 0;
0203 }
0204
0205 static inline unsigned int cpumask_any_and_distribute(const struct cpumask *src1p,
0206 const struct cpumask *src2p)
0207 {
0208 return cpumask_first_and(src1p, src2p);
0209 }
0210
0211 static inline unsigned int cpumask_any_distribute(const struct cpumask *srcp)
0212 {
0213 return cpumask_first(srcp);
0214 }
0215 #else
0216 unsigned int cpumask_local_spread(unsigned int i, int node);
0217 unsigned int cpumask_any_and_distribute(const struct cpumask *src1p,
0218 const struct cpumask *src2p);
0219 unsigned int cpumask_any_distribute(const struct cpumask *srcp);
0220 #endif
0221
0222
0223
0224
0225
0226
0227
0228
0229
0230 static inline
0231 unsigned int cpumask_next_and(int n, const struct cpumask *src1p,
0232 const struct cpumask *src2p)
0233 {
0234
0235 if (n != -1)
0236 cpumask_check(n);
0237 return find_next_and_bit(cpumask_bits(src1p), cpumask_bits(src2p),
0238 nr_cpumask_bits, n + 1);
0239 }
0240
0241
0242
0243
0244
0245
0246
0247
0248 #define for_each_cpu(cpu, mask) \
0249 for ((cpu) = -1; \
0250 (cpu) = cpumask_next((cpu), (mask)), \
0251 (cpu) < nr_cpu_ids;)
0252
0253
0254
0255
0256
0257
0258
0259
0260 #define for_each_cpu_not(cpu, mask) \
0261 for ((cpu) = -1; \
0262 (cpu) = cpumask_next_zero((cpu), (mask)), \
0263 (cpu) < nr_cpu_ids;)
0264
0265 #if NR_CPUS == 1
0266 static inline
0267 unsigned int cpumask_next_wrap(int n, const struct cpumask *mask, int start, bool wrap)
0268 {
0269 cpumask_check(start);
0270 if (n != -1)
0271 cpumask_check(n);
0272
0273
0274
0275
0276
0277 if (wrap && n >= 0)
0278 return nr_cpumask_bits;
0279
0280 return cpumask_first(mask);
0281 }
0282 #else
0283 unsigned int __pure cpumask_next_wrap(int n, const struct cpumask *mask, int start, bool wrap);
0284 #endif
0285
0286
0287
0288
0289
0290
0291
0292
0293
0294
0295
0296 #define for_each_cpu_wrap(cpu, mask, start) \
0297 for ((cpu) = cpumask_next_wrap((start)-1, (mask), (start), false); \
0298 (cpu) < nr_cpumask_bits; \
0299 (cpu) = cpumask_next_wrap((cpu), (mask), (start), true))
0300
0301
0302
0303
0304
0305
0306
0307
0308
0309
0310
0311
0312
0313
0314
0315 #define for_each_cpu_and(cpu, mask1, mask2) \
0316 for ((cpu) = -1; \
0317 (cpu) = cpumask_next_and((cpu), (mask1), (mask2)), \
0318 (cpu) < nr_cpu_ids;)
0319
0320
0321
0322
0323
0324
0325
0326
0327
0328 static inline
0329 unsigned int cpumask_any_but(const struct cpumask *mask, unsigned int cpu)
0330 {
0331 unsigned int i;
0332
0333 cpumask_check(cpu);
0334 for_each_cpu(i, mask)
0335 if (i != cpu)
0336 break;
0337 return i;
0338 }
0339
0340 #define CPU_BITS_NONE \
0341 { \
0342 [0 ... BITS_TO_LONGS(NR_CPUS)-1] = 0UL \
0343 }
0344
0345 #define CPU_BITS_CPU0 \
0346 { \
0347 [0] = 1UL \
0348 }
0349
0350
0351
0352
0353
0354
0355 static __always_inline void cpumask_set_cpu(unsigned int cpu, struct cpumask *dstp)
0356 {
0357 set_bit(cpumask_check(cpu), cpumask_bits(dstp));
0358 }
0359
0360 static __always_inline void __cpumask_set_cpu(unsigned int cpu, struct cpumask *dstp)
0361 {
0362 __set_bit(cpumask_check(cpu), cpumask_bits(dstp));
0363 }
0364
0365
0366
0367
0368
0369
0370
0371 static __always_inline void cpumask_clear_cpu(int cpu, struct cpumask *dstp)
0372 {
0373 clear_bit(cpumask_check(cpu), cpumask_bits(dstp));
0374 }
0375
0376 static __always_inline void __cpumask_clear_cpu(int cpu, struct cpumask *dstp)
0377 {
0378 __clear_bit(cpumask_check(cpu), cpumask_bits(dstp));
0379 }
0380
0381
0382
0383
0384
0385
0386
0387
0388 static __always_inline bool cpumask_test_cpu(int cpu, const struct cpumask *cpumask)
0389 {
0390 return test_bit(cpumask_check(cpu), cpumask_bits((cpumask)));
0391 }
0392
0393
0394
0395
0396
0397
0398
0399
0400
0401
0402 static __always_inline bool cpumask_test_and_set_cpu(int cpu, struct cpumask *cpumask)
0403 {
0404 return test_and_set_bit(cpumask_check(cpu), cpumask_bits(cpumask));
0405 }
0406
0407
0408
0409
0410
0411
0412
0413
0414
0415
0416 static __always_inline bool cpumask_test_and_clear_cpu(int cpu, struct cpumask *cpumask)
0417 {
0418 return test_and_clear_bit(cpumask_check(cpu), cpumask_bits(cpumask));
0419 }
0420
0421
0422
0423
0424
0425 static inline void cpumask_setall(struct cpumask *dstp)
0426 {
0427 bitmap_fill(cpumask_bits(dstp), nr_cpumask_bits);
0428 }
0429
0430
0431
0432
0433
0434 static inline void cpumask_clear(struct cpumask *dstp)
0435 {
0436 bitmap_zero(cpumask_bits(dstp), nr_cpumask_bits);
0437 }
0438
0439
0440
0441
0442
0443
0444
0445
0446
0447 static inline bool cpumask_and(struct cpumask *dstp,
0448 const struct cpumask *src1p,
0449 const struct cpumask *src2p)
0450 {
0451 return bitmap_and(cpumask_bits(dstp), cpumask_bits(src1p),
0452 cpumask_bits(src2p), nr_cpumask_bits);
0453 }
0454
0455
0456
0457
0458
0459
0460
0461 static inline void cpumask_or(struct cpumask *dstp, const struct cpumask *src1p,
0462 const struct cpumask *src2p)
0463 {
0464 bitmap_or(cpumask_bits(dstp), cpumask_bits(src1p),
0465 cpumask_bits(src2p), nr_cpumask_bits);
0466 }
0467
0468
0469
0470
0471
0472
0473
0474 static inline void cpumask_xor(struct cpumask *dstp,
0475 const struct cpumask *src1p,
0476 const struct cpumask *src2p)
0477 {
0478 bitmap_xor(cpumask_bits(dstp), cpumask_bits(src1p),
0479 cpumask_bits(src2p), nr_cpumask_bits);
0480 }
0481
0482
0483
0484
0485
0486
0487
0488
0489
0490 static inline bool cpumask_andnot(struct cpumask *dstp,
0491 const struct cpumask *src1p,
0492 const struct cpumask *src2p)
0493 {
0494 return bitmap_andnot(cpumask_bits(dstp), cpumask_bits(src1p),
0495 cpumask_bits(src2p), nr_cpumask_bits);
0496 }
0497
0498
0499
0500
0501
0502
0503 static inline void cpumask_complement(struct cpumask *dstp,
0504 const struct cpumask *srcp)
0505 {
0506 bitmap_complement(cpumask_bits(dstp), cpumask_bits(srcp),
0507 nr_cpumask_bits);
0508 }
0509
0510
0511
0512
0513
0514
0515 static inline bool cpumask_equal(const struct cpumask *src1p,
0516 const struct cpumask *src2p)
0517 {
0518 return bitmap_equal(cpumask_bits(src1p), cpumask_bits(src2p),
0519 nr_cpumask_bits);
0520 }
0521
0522
0523
0524
0525
0526
0527
0528 static inline bool cpumask_or_equal(const struct cpumask *src1p,
0529 const struct cpumask *src2p,
0530 const struct cpumask *src3p)
0531 {
0532 return bitmap_or_equal(cpumask_bits(src1p), cpumask_bits(src2p),
0533 cpumask_bits(src3p), nr_cpumask_bits);
0534 }
0535
0536
0537
0538
0539
0540
0541 static inline bool cpumask_intersects(const struct cpumask *src1p,
0542 const struct cpumask *src2p)
0543 {
0544 return bitmap_intersects(cpumask_bits(src1p), cpumask_bits(src2p),
0545 nr_cpumask_bits);
0546 }
0547
0548
0549
0550
0551
0552
0553
0554
0555 static inline bool cpumask_subset(const struct cpumask *src1p,
0556 const struct cpumask *src2p)
0557 {
0558 return bitmap_subset(cpumask_bits(src1p), cpumask_bits(src2p),
0559 nr_cpumask_bits);
0560 }
0561
0562
0563
0564
0565
0566 static inline bool cpumask_empty(const struct cpumask *srcp)
0567 {
0568 return bitmap_empty(cpumask_bits(srcp), nr_cpumask_bits);
0569 }
0570
0571
0572
0573
0574
0575 static inline bool cpumask_full(const struct cpumask *srcp)
0576 {
0577 return bitmap_full(cpumask_bits(srcp), nr_cpumask_bits);
0578 }
0579
0580
0581
0582
0583
0584 static inline unsigned int cpumask_weight(const struct cpumask *srcp)
0585 {
0586 return bitmap_weight(cpumask_bits(srcp), nr_cpumask_bits);
0587 }
0588
0589
0590
0591
0592
0593
0594
0595 static inline void cpumask_shift_right(struct cpumask *dstp,
0596 const struct cpumask *srcp, int n)
0597 {
0598 bitmap_shift_right(cpumask_bits(dstp), cpumask_bits(srcp), n,
0599 nr_cpumask_bits);
0600 }
0601
0602
0603
0604
0605
0606
0607
0608 static inline void cpumask_shift_left(struct cpumask *dstp,
0609 const struct cpumask *srcp, int n)
0610 {
0611 bitmap_shift_left(cpumask_bits(dstp), cpumask_bits(srcp), n,
0612 nr_cpumask_bits);
0613 }
0614
0615
0616
0617
0618
0619
0620 static inline void cpumask_copy(struct cpumask *dstp,
0621 const struct cpumask *srcp)
0622 {
0623 bitmap_copy(cpumask_bits(dstp), cpumask_bits(srcp), nr_cpumask_bits);
0624 }
0625
0626
0627
0628
0629
0630
0631
0632 #define cpumask_any(srcp) cpumask_first(srcp)
0633
0634
0635
0636
0637
0638
0639
0640
0641 #define cpumask_any_and(mask1, mask2) cpumask_first_and((mask1), (mask2))
0642
0643
0644
0645
0646
0647 #define cpumask_of(cpu) (get_cpu_mask(cpu))
0648
0649
0650
0651
0652
0653
0654
0655
0656
0657 static inline int cpumask_parse_user(const char __user *buf, int len,
0658 struct cpumask *dstp)
0659 {
0660 return bitmap_parse_user(buf, len, cpumask_bits(dstp), nr_cpumask_bits);
0661 }
0662
0663
0664
0665
0666
0667
0668
0669
0670
0671 static inline int cpumask_parselist_user(const char __user *buf, int len,
0672 struct cpumask *dstp)
0673 {
0674 return bitmap_parselist_user(buf, len, cpumask_bits(dstp),
0675 nr_cpumask_bits);
0676 }
0677
0678
0679
0680
0681
0682
0683
0684
0685 static inline int cpumask_parse(const char *buf, struct cpumask *dstp)
0686 {
0687 return bitmap_parse(buf, UINT_MAX, cpumask_bits(dstp), nr_cpumask_bits);
0688 }
0689
0690
0691
0692
0693
0694
0695
0696
0697 static inline int cpulist_parse(const char *buf, struct cpumask *dstp)
0698 {
0699 return bitmap_parselist(buf, cpumask_bits(dstp), nr_cpumask_bits);
0700 }
0701
0702
0703
0704
0705 static inline unsigned int cpumask_size(void)
0706 {
0707 return BITS_TO_LONGS(nr_cpumask_bits) * sizeof(long);
0708 }
0709
0710
0711
0712
0713
0714
0715
0716
0717
0718
0719
0720
0721
0722
0723
0724
0725
0726
0727
0728
0729
0730
0731
0732
0733
0734
0735
0736
0737
0738
0739
0740
0741
0742
0743
0744
0745
0746
0747
0748
0749
0750 #ifdef CONFIG_CPUMASK_OFFSTACK
0751 typedef struct cpumask *cpumask_var_t;
0752
0753 #define this_cpu_cpumask_var_ptr(x) this_cpu_read(x)
0754 #define __cpumask_var_read_mostly __read_mostly
0755
0756 bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node);
0757
0758 static inline
0759 bool zalloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node)
0760 {
0761 return alloc_cpumask_var_node(mask, flags | __GFP_ZERO, node);
0762 }
0763
0764
0765
0766
0767
0768
0769
0770
0771
0772
0773
0774 static inline
0775 bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags)
0776 {
0777 return alloc_cpumask_var_node(mask, flags, NUMA_NO_NODE);
0778 }
0779
0780 static inline
0781 bool zalloc_cpumask_var(cpumask_var_t *mask, gfp_t flags)
0782 {
0783 return alloc_cpumask_var(mask, flags | __GFP_ZERO);
0784 }
0785
0786 void alloc_bootmem_cpumask_var(cpumask_var_t *mask);
0787 void free_cpumask_var(cpumask_var_t mask);
0788 void free_bootmem_cpumask_var(cpumask_var_t mask);
0789
0790 static inline bool cpumask_available(cpumask_var_t mask)
0791 {
0792 return mask != NULL;
0793 }
0794
0795 #else
0796 typedef struct cpumask cpumask_var_t[1];
0797
0798 #define this_cpu_cpumask_var_ptr(x) this_cpu_ptr(x)
0799 #define __cpumask_var_read_mostly
0800
0801 static inline bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags)
0802 {
0803 return true;
0804 }
0805
0806 static inline bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags,
0807 int node)
0808 {
0809 return true;
0810 }
0811
0812 static inline bool zalloc_cpumask_var(cpumask_var_t *mask, gfp_t flags)
0813 {
0814 cpumask_clear(*mask);
0815 return true;
0816 }
0817
0818 static inline bool zalloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags,
0819 int node)
0820 {
0821 cpumask_clear(*mask);
0822 return true;
0823 }
0824
0825 static inline void alloc_bootmem_cpumask_var(cpumask_var_t *mask)
0826 {
0827 }
0828
0829 static inline void free_cpumask_var(cpumask_var_t mask)
0830 {
0831 }
0832
0833 static inline void free_bootmem_cpumask_var(cpumask_var_t mask)
0834 {
0835 }
0836
0837 static inline bool cpumask_available(cpumask_var_t mask)
0838 {
0839 return true;
0840 }
0841 #endif
0842
0843
0844
0845 extern const DECLARE_BITMAP(cpu_all_bits, NR_CPUS);
0846 #define cpu_all_mask to_cpumask(cpu_all_bits)
0847
0848
0849 #define cpu_none_mask to_cpumask(cpu_bit_bitmap[0])
0850
0851 #if NR_CPUS == 1
0852
0853 #define for_each_possible_cpu(cpu) for ((cpu) = 0; (cpu) < 1; (cpu)++)
0854 #define for_each_online_cpu(cpu) for ((cpu) = 0; (cpu) < 1; (cpu)++)
0855 #define for_each_present_cpu(cpu) for ((cpu) = 0; (cpu) < 1; (cpu)++)
0856 #else
0857 #define for_each_possible_cpu(cpu) for_each_cpu((cpu), cpu_possible_mask)
0858 #define for_each_online_cpu(cpu) for_each_cpu((cpu), cpu_online_mask)
0859 #define for_each_present_cpu(cpu) for_each_cpu((cpu), cpu_present_mask)
0860 #endif
0861
0862
0863 void init_cpu_present(const struct cpumask *src);
0864 void init_cpu_possible(const struct cpumask *src);
0865 void init_cpu_online(const struct cpumask *src);
0866
0867 static inline void reset_cpu_possible_mask(void)
0868 {
0869 bitmap_zero(cpumask_bits(&__cpu_possible_mask), NR_CPUS);
0870 }
0871
0872 static inline void
0873 set_cpu_possible(unsigned int cpu, bool possible)
0874 {
0875 if (possible)
0876 cpumask_set_cpu(cpu, &__cpu_possible_mask);
0877 else
0878 cpumask_clear_cpu(cpu, &__cpu_possible_mask);
0879 }
0880
0881 static inline void
0882 set_cpu_present(unsigned int cpu, bool present)
0883 {
0884 if (present)
0885 cpumask_set_cpu(cpu, &__cpu_present_mask);
0886 else
0887 cpumask_clear_cpu(cpu, &__cpu_present_mask);
0888 }
0889
0890 void set_cpu_online(unsigned int cpu, bool online);
0891
0892 static inline void
0893 set_cpu_active(unsigned int cpu, bool active)
0894 {
0895 if (active)
0896 cpumask_set_cpu(cpu, &__cpu_active_mask);
0897 else
0898 cpumask_clear_cpu(cpu, &__cpu_active_mask);
0899 }
0900
0901 static inline void
0902 set_cpu_dying(unsigned int cpu, bool dying)
0903 {
0904 if (dying)
0905 cpumask_set_cpu(cpu, &__cpu_dying_mask);
0906 else
0907 cpumask_clear_cpu(cpu, &__cpu_dying_mask);
0908 }
0909
0910
0911
0912
0913
0914
0915
0916
0917
0918
0919
0920 #define to_cpumask(bitmap) \
0921 ((struct cpumask *)(1 ? (bitmap) \
0922 : (void *)sizeof(__check_is_bitmap(bitmap))))
0923
0924 static inline int __check_is_bitmap(const unsigned long *bitmap)
0925 {
0926 return 1;
0927 }
0928
0929
0930
0931
0932
0933
0934
0935
0936 extern const unsigned long
0937 cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)];
0938
0939 static inline const struct cpumask *get_cpu_mask(unsigned int cpu)
0940 {
0941 const unsigned long *p = cpu_bit_bitmap[1 + cpu % BITS_PER_LONG];
0942 p -= cpu / BITS_PER_LONG;
0943 return to_cpumask(p);
0944 }
0945
0946 #if NR_CPUS > 1
0947
0948
0949
0950
0951
0952
0953
0954
0955 static inline unsigned int num_online_cpus(void)
0956 {
0957 return atomic_read(&__num_online_cpus);
0958 }
0959 #define num_possible_cpus() cpumask_weight(cpu_possible_mask)
0960 #define num_present_cpus() cpumask_weight(cpu_present_mask)
0961 #define num_active_cpus() cpumask_weight(cpu_active_mask)
0962
0963 static inline bool cpu_online(unsigned int cpu)
0964 {
0965 return cpumask_test_cpu(cpu, cpu_online_mask);
0966 }
0967
0968 static inline bool cpu_possible(unsigned int cpu)
0969 {
0970 return cpumask_test_cpu(cpu, cpu_possible_mask);
0971 }
0972
0973 static inline bool cpu_present(unsigned int cpu)
0974 {
0975 return cpumask_test_cpu(cpu, cpu_present_mask);
0976 }
0977
0978 static inline bool cpu_active(unsigned int cpu)
0979 {
0980 return cpumask_test_cpu(cpu, cpu_active_mask);
0981 }
0982
0983 static inline bool cpu_dying(unsigned int cpu)
0984 {
0985 return cpumask_test_cpu(cpu, cpu_dying_mask);
0986 }
0987
0988 #else
0989
0990 #define num_online_cpus() 1U
0991 #define num_possible_cpus() 1U
0992 #define num_present_cpus() 1U
0993 #define num_active_cpus() 1U
0994
0995 static inline bool cpu_online(unsigned int cpu)
0996 {
0997 return cpu == 0;
0998 }
0999
1000 static inline bool cpu_possible(unsigned int cpu)
1001 {
1002 return cpu == 0;
1003 }
1004
1005 static inline bool cpu_present(unsigned int cpu)
1006 {
1007 return cpu == 0;
1008 }
1009
1010 static inline bool cpu_active(unsigned int cpu)
1011 {
1012 return cpu == 0;
1013 }
1014
1015 static inline bool cpu_dying(unsigned int cpu)
1016 {
1017 return false;
1018 }
1019
1020 #endif
1021
1022 #define cpu_is_offline(cpu) unlikely(!cpu_online(cpu))
1023
1024 #if NR_CPUS <= BITS_PER_LONG
1025 #define CPU_BITS_ALL \
1026 { \
1027 [BITS_TO_LONGS(NR_CPUS)-1] = BITMAP_LAST_WORD_MASK(NR_CPUS) \
1028 }
1029
1030 #else
1031
1032 #define CPU_BITS_ALL \
1033 { \
1034 [0 ... BITS_TO_LONGS(NR_CPUS)-2] = ~0UL, \
1035 [BITS_TO_LONGS(NR_CPUS)-1] = BITMAP_LAST_WORD_MASK(NR_CPUS) \
1036 }
1037 #endif
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049 static inline ssize_t
1050 cpumap_print_to_pagebuf(bool list, char *buf, const struct cpumask *mask)
1051 {
1052 return bitmap_print_to_pagebuf(list, buf, cpumask_bits(mask),
1053 nr_cpu_ids);
1054 }
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072 static inline ssize_t
1073 cpumap_print_bitmask_to_buf(char *buf, const struct cpumask *mask,
1074 loff_t off, size_t count)
1075 {
1076 return bitmap_print_bitmask_to_buf(buf, cpumask_bits(mask),
1077 nr_cpu_ids, off, count) - 1;
1078 }
1079
1080
1081
1082
1083
1084
1085
1086
1087 static inline ssize_t
1088 cpumap_print_list_to_buf(char *buf, const struct cpumask *mask,
1089 loff_t off, size_t count)
1090 {
1091 return bitmap_print_list_to_buf(buf, cpumask_bits(mask),
1092 nr_cpu_ids, off, count) - 1;
1093 }
1094
1095 #if NR_CPUS <= BITS_PER_LONG
1096 #define CPU_MASK_ALL \
1097 (cpumask_t) { { \
1098 [BITS_TO_LONGS(NR_CPUS)-1] = BITMAP_LAST_WORD_MASK(NR_CPUS) \
1099 } }
1100 #else
1101 #define CPU_MASK_ALL \
1102 (cpumask_t) { { \
1103 [0 ... BITS_TO_LONGS(NR_CPUS)-2] = ~0UL, \
1104 [BITS_TO_LONGS(NR_CPUS)-1] = BITMAP_LAST_WORD_MASK(NR_CPUS) \
1105 } }
1106 #endif
1107
1108 #define CPU_MASK_NONE \
1109 (cpumask_t) { { \
1110 [0 ... BITS_TO_LONGS(NR_CPUS)-1] = 0UL \
1111 } }
1112
1113 #define CPU_MASK_CPU0 \
1114 (cpumask_t) { { \
1115 [0] = 1UL \
1116 } }
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133 #define CPUMAP_FILE_MAX_BYTES (((NR_CPUS * 9)/32 > PAGE_SIZE) \
1134 ? (NR_CPUS * 9)/32 - 1 : PAGE_SIZE)
1135 #define CPULIST_FILE_MAX_BYTES (((NR_CPUS * 7)/2 > PAGE_SIZE) ? (NR_CPUS * 7)/2 : PAGE_SIZE)
1136
1137 #endif