![]() |
|
|||
0001 // SPDX-License-Identifier: GPL-2.0 0002 /* 0003 * A fast, small, non-recursive O(n log n) sort for the Linux kernel 0004 * 0005 * This performs n*log2(n) + 0.37*n + o(n) comparisons on average, 0006 * and 1.5*n*log2(n) + O(n) in the (very contrived) worst case. 0007 * 0008 * Glibc qsort() manages n*log2(n) - 1.26*n for random inputs (1.63*n 0009 * better) at the expense of stack usage and much larger code to avoid 0010 * quicksort's O(n^2) worst case. 0011 */ 0012 0013 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 0014 0015 #include <linux/types.h> 0016 #include <linux/export.h> 0017 #include <linux/sort.h> 0018 0019 /** 0020 * is_aligned - is this pointer & size okay for word-wide copying? 0021 * @base: pointer to data 0022 * @size: size of each element 0023 * @align: required alignment (typically 4 or 8) 0024 * 0025 * Returns true if elements can be copied using word loads and stores. 0026 * The size must be a multiple of the alignment, and the base address must 0027 * be if we do not have CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS. 0028 * 0029 * For some reason, gcc doesn't know to optimize "if (a & mask || b & mask)" 0030 * to "if ((a | b) & mask)", so we do that by hand. 0031 */ 0032 __attribute_const__ __always_inline 0033 static bool is_aligned(const void *base, size_t size, unsigned char align) 0034 { 0035 unsigned char lsbits = (unsigned char)size; 0036 0037 (void)base; 0038 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 0039 lsbits |= (unsigned char)(uintptr_t)base; 0040 #endif 0041 return (lsbits & (align - 1)) == 0; 0042 } 0043 0044 /** 0045 * swap_words_32 - swap two elements in 32-bit chunks 0046 * @a: pointer to the first element to swap 0047 * @b: pointer to the second element to swap 0048 * @n: element size (must be a multiple of 4) 0049 * 0050 * Exchange the two objects in memory. This exploits base+index addressing, 0051 * which basically all CPUs have, to minimize loop overhead computations. 0052 * 0053 * For some reason, on x86 gcc 7.3.0 adds a redundant test of n at the 0054 * bottom of the loop, even though the zero flag is still valid from the 0055 * subtract (since the intervening mov instructions don't alter the flags). 0056 * Gcc 8.1.0 doesn't have that problem. 0057 */ 0058 static void swap_words_32(void *a, void *b, size_t n) 0059 { 0060 do { 0061 u32 t = *(u32 *)(a + (n -= 4)); 0062 *(u32 *)(a + n) = *(u32 *)(b + n); 0063 *(u32 *)(b + n) = t; 0064 } while (n); 0065 } 0066 0067 /** 0068 * swap_words_64 - swap two elements in 64-bit chunks 0069 * @a: pointer to the first element to swap 0070 * @b: pointer to the second element to swap 0071 * @n: element size (must be a multiple of 8) 0072 * 0073 * Exchange the two objects in memory. This exploits base+index 0074 * addressing, which basically all CPUs have, to minimize loop overhead 0075 * computations. 0076 * 0077 * We'd like to use 64-bit loads if possible. If they're not, emulating 0078 * one requires base+index+4 addressing which x86 has but most other 0079 * processors do not. If CONFIG_64BIT, we definitely have 64-bit loads, 0080 * but it's possible to have 64-bit loads without 64-bit pointers (e.g. 0081 * x32 ABI). Are there any cases the kernel needs to worry about? 0082 */ 0083 static void swap_words_64(void *a, void *b, size_t n) 0084 { 0085 do { 0086 #ifdef CONFIG_64BIT 0087 u64 t = *(u64 *)(a + (n -= 8)); 0088 *(u64 *)(a + n) = *(u64 *)(b + n); 0089 *(u64 *)(b + n) = t; 0090 #else 0091 /* Use two 32-bit transfers to avoid base+index+4 addressing */ 0092 u32 t = *(u32 *)(a + (n -= 4)); 0093 *(u32 *)(a + n) = *(u32 *)(b + n); 0094 *(u32 *)(b + n) = t; 0095 0096 t = *(u32 *)(a + (n -= 4)); 0097 *(u32 *)(a + n) = *(u32 *)(b + n); 0098 *(u32 *)(b + n) = t; 0099 #endif 0100 } while (n); 0101 } 0102 0103 /** 0104 * swap_bytes - swap two elements a byte at a time 0105 * @a: pointer to the first element to swap 0106 * @b: pointer to the second element to swap 0107 * @n: element size 0108 * 0109 * This is the fallback if alignment doesn't allow using larger chunks. 0110 */ 0111 static void swap_bytes(void *a, void *b, size_t n) 0112 { 0113 do { 0114 char t = ((char *)a)[--n]; 0115 ((char *)a)[n] = ((char *)b)[n]; 0116 ((char *)b)[n] = t; 0117 } while (n); 0118 } 0119 0120 /* 0121 * The values are arbitrary as long as they can't be confused with 0122 * a pointer, but small integers make for the smallest compare 0123 * instructions. 0124 */ 0125 #define SWAP_WORDS_64 (swap_r_func_t)0 0126 #define SWAP_WORDS_32 (swap_r_func_t)1 0127 #define SWAP_BYTES (swap_r_func_t)2 0128 #define SWAP_WRAPPER (swap_r_func_t)3 0129 0130 struct wrapper { 0131 cmp_func_t cmp; 0132 swap_func_t swap; 0133 }; 0134 0135 /* 0136 * The function pointer is last to make tail calls most efficient if the 0137 * compiler decides not to inline this function. 0138 */ 0139 static void do_swap(void *a, void *b, size_t size, swap_r_func_t swap_func, const void *priv) 0140 { 0141 if (swap_func == SWAP_WRAPPER) { 0142 ((const struct wrapper *)priv)->swap(a, b, (int)size); 0143 return; 0144 } 0145 0146 if (swap_func == SWAP_WORDS_64) 0147 swap_words_64(a, b, size); 0148 else if (swap_func == SWAP_WORDS_32) 0149 swap_words_32(a, b, size); 0150 else if (swap_func == SWAP_BYTES) 0151 swap_bytes(a, b, size); 0152 else 0153 swap_func(a, b, (int)size, priv); 0154 } 0155 0156 #define _CMP_WRAPPER ((cmp_r_func_t)0L) 0157 0158 static int do_cmp(const void *a, const void *b, cmp_r_func_t cmp, const void *priv) 0159 { 0160 if (cmp == _CMP_WRAPPER) 0161 return ((const struct wrapper *)priv)->cmp(a, b); 0162 return cmp(a, b, priv); 0163 } 0164 0165 /** 0166 * parent - given the offset of the child, find the offset of the parent. 0167 * @i: the offset of the heap element whose parent is sought. Non-zero. 0168 * @lsbit: a precomputed 1-bit mask, equal to "size & -size" 0169 * @size: size of each element 0170 * 0171 * In terms of array indexes, the parent of element j = @i/@size is simply 0172 * (j-1)/2. But when working in byte offsets, we can't use implicit 0173 * truncation of integer divides. 0174 * 0175 * Fortunately, we only need one bit of the quotient, not the full divide. 0176 * @size has a least significant bit. That bit will be clear if @i is 0177 * an even multiple of @size, and set if it's an odd multiple. 0178 * 0179 * Logically, we're doing "if (i & lsbit) i -= size;", but since the 0180 * branch is unpredictable, it's done with a bit of clever branch-free 0181 * code instead. 0182 */ 0183 __attribute_const__ __always_inline 0184 static size_t parent(size_t i, unsigned int lsbit, size_t size) 0185 { 0186 i -= size; 0187 i -= size & -(i & lsbit); 0188 return i / 2; 0189 } 0190 0191 /** 0192 * sort_r - sort an array of elements 0193 * @base: pointer to data to sort 0194 * @num: number of elements 0195 * @size: size of each element 0196 * @cmp_func: pointer to comparison function 0197 * @swap_func: pointer to swap function or NULL 0198 * @priv: third argument passed to comparison function 0199 * 0200 * This function does a heapsort on the given array. You may provide 0201 * a swap_func function if you need to do something more than a memory 0202 * copy (e.g. fix up pointers or auxiliary data), but the built-in swap 0203 * avoids a slow retpoline and so is significantly faster. 0204 * 0205 * Sorting time is O(n log n) both on average and worst-case. While 0206 * quicksort is slightly faster on average, it suffers from exploitable 0207 * O(n*n) worst-case behavior and extra memory requirements that make 0208 * it less suitable for kernel use. 0209 */ 0210 void sort_r(void *base, size_t num, size_t size, 0211 cmp_r_func_t cmp_func, 0212 swap_r_func_t swap_func, 0213 const void *priv) 0214 { 0215 /* pre-scale counters for performance */ 0216 size_t n = num * size, a = (num/2) * size; 0217 const unsigned int lsbit = size & -size; /* Used to find parent */ 0218 0219 if (!a) /* num < 2 || size == 0 */ 0220 return; 0221 0222 /* called from 'sort' without swap function, let's pick the default */ 0223 if (swap_func == SWAP_WRAPPER && !((struct wrapper *)priv)->swap) 0224 swap_func = NULL; 0225 0226 if (!swap_func) { 0227 if (is_aligned(base, size, 8)) 0228 swap_func = SWAP_WORDS_64; 0229 else if (is_aligned(base, size, 4)) 0230 swap_func = SWAP_WORDS_32; 0231 else 0232 swap_func = SWAP_BYTES; 0233 } 0234 0235 /* 0236 * Loop invariants: 0237 * 1. elements [a,n) satisfy the heap property (compare greater than 0238 * all of their children), 0239 * 2. elements [n,num*size) are sorted, and 0240 * 3. a <= b <= c <= d <= n (whenever they are valid). 0241 */ 0242 for (;;) { 0243 size_t b, c, d; 0244 0245 if (a) /* Building heap: sift down --a */ 0246 a -= size; 0247 else if (n -= size) /* Sorting: Extract root to --n */ 0248 do_swap(base, base + n, size, swap_func, priv); 0249 else /* Sort complete */ 0250 break; 0251 0252 /* 0253 * Sift element at "a" down into heap. This is the 0254 * "bottom-up" variant, which significantly reduces 0255 * calls to cmp_func(): we find the sift-down path all 0256 * the way to the leaves (one compare per level), then 0257 * backtrack to find where to insert the target element. 0258 * 0259 * Because elements tend to sift down close to the leaves, 0260 * this uses fewer compares than doing two per level 0261 * on the way down. (A bit more than half as many on 0262 * average, 3/4 worst-case.) 0263 */ 0264 for (b = a; c = 2*b + size, (d = c + size) < n;) 0265 b = do_cmp(base + c, base + d, cmp_func, priv) >= 0 ? c : d; 0266 if (d == n) /* Special case last leaf with no sibling */ 0267 b = c; 0268 0269 /* Now backtrack from "b" to the correct location for "a" */ 0270 while (b != a && do_cmp(base + a, base + b, cmp_func, priv) >= 0) 0271 b = parent(b, lsbit, size); 0272 c = b; /* Where "a" belongs */ 0273 while (b != a) { /* Shift it into place */ 0274 b = parent(b, lsbit, size); 0275 do_swap(base + b, base + c, size, swap_func, priv); 0276 } 0277 } 0278 } 0279 EXPORT_SYMBOL(sort_r); 0280 0281 void sort(void *base, size_t num, size_t size, 0282 cmp_func_t cmp_func, 0283 swap_func_t swap_func) 0284 { 0285 struct wrapper w = { 0286 .cmp = cmp_func, 0287 .swap = swap_func, 0288 }; 0289 0290 return sort_r(base, num, size, _CMP_WRAPPER, SWAP_WRAPPER, &w); 0291 } 0292 EXPORT_SYMBOL(sort);
[ Source navigation ] | [ Diff markup ] | [ Identifier search ] | [ general search ] |
This page was automatically generated by the 2.1.0 LXR engine. The LXR team |
![]() ![]() |