18c2ecf20Sopenharmony_ci// SPDX-License-Identifier: GPL-2.0
28c2ecf20Sopenharmony_ci/*
38c2ecf20Sopenharmony_ci * A fast, small, non-recursive O(n log n) sort for the Linux kernel
48c2ecf20Sopenharmony_ci *
58c2ecf20Sopenharmony_ci * This performs n*log2(n) + 0.37*n + o(n) comparisons on average,
68c2ecf20Sopenharmony_ci * and 1.5*n*log2(n) + O(n) in the (very contrived) worst case.
78c2ecf20Sopenharmony_ci *
88c2ecf20Sopenharmony_ci * Glibc qsort() manages n*log2(n) - 1.26*n for random inputs (1.63*n
98c2ecf20Sopenharmony_ci * better) at the expense of stack usage and much larger code to avoid
108c2ecf20Sopenharmony_ci * quicksort's O(n^2) worst case.
118c2ecf20Sopenharmony_ci */
128c2ecf20Sopenharmony_ci
138c2ecf20Sopenharmony_ci#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
148c2ecf20Sopenharmony_ci
158c2ecf20Sopenharmony_ci#include <linux/types.h>
168c2ecf20Sopenharmony_ci#include <linux/export.h>
178c2ecf20Sopenharmony_ci#include <linux/sort.h>
188c2ecf20Sopenharmony_ci
198c2ecf20Sopenharmony_ci/**
208c2ecf20Sopenharmony_ci * is_aligned - is this pointer & size okay for word-wide copying?
218c2ecf20Sopenharmony_ci * @base: pointer to data
228c2ecf20Sopenharmony_ci * @size: size of each element
238c2ecf20Sopenharmony_ci * @align: required alignment (typically 4 or 8)
248c2ecf20Sopenharmony_ci *
258c2ecf20Sopenharmony_ci * Returns true if elements can be copied using word loads and stores.
268c2ecf20Sopenharmony_ci * The size must be a multiple of the alignment, and the base address must
278c2ecf20Sopenharmony_ci * be if we do not have CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS.
288c2ecf20Sopenharmony_ci *
298c2ecf20Sopenharmony_ci * For some reason, gcc doesn't know to optimize "if (a & mask || b & mask)"
308c2ecf20Sopenharmony_ci * to "if ((a | b) & mask)", so we do that by hand.
318c2ecf20Sopenharmony_ci */
328c2ecf20Sopenharmony_ci__attribute_const__ __always_inline
338c2ecf20Sopenharmony_cistatic bool is_aligned(const void *base, size_t size, unsigned char align)
348c2ecf20Sopenharmony_ci{
358c2ecf20Sopenharmony_ci	unsigned char lsbits = (unsigned char)size;
368c2ecf20Sopenharmony_ci
378c2ecf20Sopenharmony_ci	(void)base;
388c2ecf20Sopenharmony_ci#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
398c2ecf20Sopenharmony_ci	lsbits |= (unsigned char)(uintptr_t)base;
408c2ecf20Sopenharmony_ci#endif
418c2ecf20Sopenharmony_ci	return (lsbits & (align - 1)) == 0;
428c2ecf20Sopenharmony_ci}
438c2ecf20Sopenharmony_ci
448c2ecf20Sopenharmony_ci/**
458c2ecf20Sopenharmony_ci * swap_words_32 - swap two elements in 32-bit chunks
468c2ecf20Sopenharmony_ci * @a: pointer to the first element to swap
478c2ecf20Sopenharmony_ci * @b: pointer to the second element to swap
488c2ecf20Sopenharmony_ci * @n: element size (must be a multiple of 4)
498c2ecf20Sopenharmony_ci *
508c2ecf20Sopenharmony_ci * Exchange the two objects in memory.  This exploits base+index addressing,
518c2ecf20Sopenharmony_ci * which basically all CPUs have, to minimize loop overhead computations.
528c2ecf20Sopenharmony_ci *
538c2ecf20Sopenharmony_ci * For some reason, on x86 gcc 7.3.0 adds a redundant test of n at the
548c2ecf20Sopenharmony_ci * bottom of the loop, even though the zero flag is stil valid from the
558c2ecf20Sopenharmony_ci * subtract (since the intervening mov instructions don't alter the flags).
568c2ecf20Sopenharmony_ci * Gcc 8.1.0 doesn't have that problem.
578c2ecf20Sopenharmony_ci */
588c2ecf20Sopenharmony_cistatic void swap_words_32(void *a, void *b, size_t n)
598c2ecf20Sopenharmony_ci{
608c2ecf20Sopenharmony_ci	do {
618c2ecf20Sopenharmony_ci		u32 t = *(u32 *)(a + (n -= 4));
628c2ecf20Sopenharmony_ci		*(u32 *)(a + n) = *(u32 *)(b + n);
638c2ecf20Sopenharmony_ci		*(u32 *)(b + n) = t;
648c2ecf20Sopenharmony_ci	} while (n);
658c2ecf20Sopenharmony_ci}
668c2ecf20Sopenharmony_ci
678c2ecf20Sopenharmony_ci/**
688c2ecf20Sopenharmony_ci * swap_words_64 - swap two elements in 64-bit chunks
698c2ecf20Sopenharmony_ci * @a: pointer to the first element to swap
708c2ecf20Sopenharmony_ci * @b: pointer to the second element to swap
718c2ecf20Sopenharmony_ci * @n: element size (must be a multiple of 8)
728c2ecf20Sopenharmony_ci *
738c2ecf20Sopenharmony_ci * Exchange the two objects in memory.  This exploits base+index
748c2ecf20Sopenharmony_ci * addressing, which basically all CPUs have, to minimize loop overhead
758c2ecf20Sopenharmony_ci * computations.
768c2ecf20Sopenharmony_ci *
778c2ecf20Sopenharmony_ci * We'd like to use 64-bit loads if possible.  If they're not, emulating
788c2ecf20Sopenharmony_ci * one requires base+index+4 addressing which x86 has but most other
798c2ecf20Sopenharmony_ci * processors do not.  If CONFIG_64BIT, we definitely have 64-bit loads,
808c2ecf20Sopenharmony_ci * but it's possible to have 64-bit loads without 64-bit pointers (e.g.
818c2ecf20Sopenharmony_ci * x32 ABI).  Are there any cases the kernel needs to worry about?
828c2ecf20Sopenharmony_ci */
838c2ecf20Sopenharmony_cistatic void swap_words_64(void *a, void *b, size_t n)
848c2ecf20Sopenharmony_ci{
858c2ecf20Sopenharmony_ci	do {
868c2ecf20Sopenharmony_ci#ifdef CONFIG_64BIT
878c2ecf20Sopenharmony_ci		u64 t = *(u64 *)(a + (n -= 8));
888c2ecf20Sopenharmony_ci		*(u64 *)(a + n) = *(u64 *)(b + n);
898c2ecf20Sopenharmony_ci		*(u64 *)(b + n) = t;
908c2ecf20Sopenharmony_ci#else
918c2ecf20Sopenharmony_ci		/* Use two 32-bit transfers to avoid base+index+4 addressing */
928c2ecf20Sopenharmony_ci		u32 t = *(u32 *)(a + (n -= 4));
938c2ecf20Sopenharmony_ci		*(u32 *)(a + n) = *(u32 *)(b + n);
948c2ecf20Sopenharmony_ci		*(u32 *)(b + n) = t;
958c2ecf20Sopenharmony_ci
968c2ecf20Sopenharmony_ci		t = *(u32 *)(a + (n -= 4));
978c2ecf20Sopenharmony_ci		*(u32 *)(a + n) = *(u32 *)(b + n);
988c2ecf20Sopenharmony_ci		*(u32 *)(b + n) = t;
998c2ecf20Sopenharmony_ci#endif
1008c2ecf20Sopenharmony_ci	} while (n);
1018c2ecf20Sopenharmony_ci}
1028c2ecf20Sopenharmony_ci
1038c2ecf20Sopenharmony_ci/**
1048c2ecf20Sopenharmony_ci * swap_bytes - swap two elements a byte at a time
1058c2ecf20Sopenharmony_ci * @a: pointer to the first element to swap
1068c2ecf20Sopenharmony_ci * @b: pointer to the second element to swap
1078c2ecf20Sopenharmony_ci * @n: element size
1088c2ecf20Sopenharmony_ci *
1098c2ecf20Sopenharmony_ci * This is the fallback if alignment doesn't allow using larger chunks.
1108c2ecf20Sopenharmony_ci */
1118c2ecf20Sopenharmony_cistatic void swap_bytes(void *a, void *b, size_t n)
1128c2ecf20Sopenharmony_ci{
1138c2ecf20Sopenharmony_ci	do {
1148c2ecf20Sopenharmony_ci		char t = ((char *)a)[--n];
1158c2ecf20Sopenharmony_ci		((char *)a)[n] = ((char *)b)[n];
1168c2ecf20Sopenharmony_ci		((char *)b)[n] = t;
1178c2ecf20Sopenharmony_ci	} while (n);
1188c2ecf20Sopenharmony_ci}
1198c2ecf20Sopenharmony_ci
1208c2ecf20Sopenharmony_ci/*
1218c2ecf20Sopenharmony_ci * The values are arbitrary as long as they can't be confused with
1228c2ecf20Sopenharmony_ci * a pointer, but small integers make for the smallest compare
1238c2ecf20Sopenharmony_ci * instructions.
1248c2ecf20Sopenharmony_ci */
1258c2ecf20Sopenharmony_ci#define SWAP_WORDS_64 (swap_func_t)0
1268c2ecf20Sopenharmony_ci#define SWAP_WORDS_32 (swap_func_t)1
1278c2ecf20Sopenharmony_ci#define SWAP_BYTES    (swap_func_t)2
1288c2ecf20Sopenharmony_ci
1298c2ecf20Sopenharmony_ci/*
1308c2ecf20Sopenharmony_ci * The function pointer is last to make tail calls most efficient if the
1318c2ecf20Sopenharmony_ci * compiler decides not to inline this function.
1328c2ecf20Sopenharmony_ci */
1338c2ecf20Sopenharmony_cistatic void do_swap(void *a, void *b, size_t size, swap_func_t swap_func)
1348c2ecf20Sopenharmony_ci{
1358c2ecf20Sopenharmony_ci	if (swap_func == SWAP_WORDS_64)
1368c2ecf20Sopenharmony_ci		swap_words_64(a, b, size);
1378c2ecf20Sopenharmony_ci	else if (swap_func == SWAP_WORDS_32)
1388c2ecf20Sopenharmony_ci		swap_words_32(a, b, size);
1398c2ecf20Sopenharmony_ci	else if (swap_func == SWAP_BYTES)
1408c2ecf20Sopenharmony_ci		swap_bytes(a, b, size);
1418c2ecf20Sopenharmony_ci	else
1428c2ecf20Sopenharmony_ci		swap_func(a, b, (int)size);
1438c2ecf20Sopenharmony_ci}
1448c2ecf20Sopenharmony_ci
1458c2ecf20Sopenharmony_ci#define _CMP_WRAPPER ((cmp_r_func_t)0L)
1468c2ecf20Sopenharmony_ci
1478c2ecf20Sopenharmony_cistatic int do_cmp(const void *a, const void *b, cmp_r_func_t cmp, const void *priv)
1488c2ecf20Sopenharmony_ci{
1498c2ecf20Sopenharmony_ci	if (cmp == _CMP_WRAPPER)
1508c2ecf20Sopenharmony_ci		return ((cmp_func_t)(priv))(a, b);
1518c2ecf20Sopenharmony_ci	return cmp(a, b, priv);
1528c2ecf20Sopenharmony_ci}
1538c2ecf20Sopenharmony_ci
1548c2ecf20Sopenharmony_ci/**
1558c2ecf20Sopenharmony_ci * parent - given the offset of the child, find the offset of the parent.
1568c2ecf20Sopenharmony_ci * @i: the offset of the heap element whose parent is sought.  Non-zero.
1578c2ecf20Sopenharmony_ci * @lsbit: a precomputed 1-bit mask, equal to "size & -size"
1588c2ecf20Sopenharmony_ci * @size: size of each element
1598c2ecf20Sopenharmony_ci *
1608c2ecf20Sopenharmony_ci * In terms of array indexes, the parent of element j = @i/@size is simply
1618c2ecf20Sopenharmony_ci * (j-1)/2.  But when working in byte offsets, we can't use implicit
1628c2ecf20Sopenharmony_ci * truncation of integer divides.
1638c2ecf20Sopenharmony_ci *
1648c2ecf20Sopenharmony_ci * Fortunately, we only need one bit of the quotient, not the full divide.
1658c2ecf20Sopenharmony_ci * @size has a least significant bit.  That bit will be clear if @i is
1668c2ecf20Sopenharmony_ci * an even multiple of @size, and set if it's an odd multiple.
1678c2ecf20Sopenharmony_ci *
1688c2ecf20Sopenharmony_ci * Logically, we're doing "if (i & lsbit) i -= size;", but since the
1698c2ecf20Sopenharmony_ci * branch is unpredictable, it's done with a bit of clever branch-free
1708c2ecf20Sopenharmony_ci * code instead.
1718c2ecf20Sopenharmony_ci */
1728c2ecf20Sopenharmony_ci__attribute_const__ __always_inline
1738c2ecf20Sopenharmony_cistatic size_t parent(size_t i, unsigned int lsbit, size_t size)
1748c2ecf20Sopenharmony_ci{
1758c2ecf20Sopenharmony_ci	i -= size;
1768c2ecf20Sopenharmony_ci	i -= size & -(i & lsbit);
1778c2ecf20Sopenharmony_ci	return i / 2;
1788c2ecf20Sopenharmony_ci}
1798c2ecf20Sopenharmony_ci
1808c2ecf20Sopenharmony_ci/**
1818c2ecf20Sopenharmony_ci * sort_r - sort an array of elements
1828c2ecf20Sopenharmony_ci * @base: pointer to data to sort
1838c2ecf20Sopenharmony_ci * @num: number of elements
1848c2ecf20Sopenharmony_ci * @size: size of each element
1858c2ecf20Sopenharmony_ci * @cmp_func: pointer to comparison function
1868c2ecf20Sopenharmony_ci * @swap_func: pointer to swap function or NULL
1878c2ecf20Sopenharmony_ci * @priv: third argument passed to comparison function
1888c2ecf20Sopenharmony_ci *
1898c2ecf20Sopenharmony_ci * This function does a heapsort on the given array.  You may provide
1908c2ecf20Sopenharmony_ci * a swap_func function if you need to do something more than a memory
1918c2ecf20Sopenharmony_ci * copy (e.g. fix up pointers or auxiliary data), but the built-in swap
1928c2ecf20Sopenharmony_ci * avoids a slow retpoline and so is significantly faster.
1938c2ecf20Sopenharmony_ci *
1948c2ecf20Sopenharmony_ci * Sorting time is O(n log n) both on average and worst-case. While
1958c2ecf20Sopenharmony_ci * quicksort is slightly faster on average, it suffers from exploitable
1968c2ecf20Sopenharmony_ci * O(n*n) worst-case behavior and extra memory requirements that make
1978c2ecf20Sopenharmony_ci * it less suitable for kernel use.
1988c2ecf20Sopenharmony_ci */
1998c2ecf20Sopenharmony_civoid sort_r(void *base, size_t num, size_t size,
2008c2ecf20Sopenharmony_ci	    cmp_r_func_t cmp_func,
2018c2ecf20Sopenharmony_ci	    swap_func_t swap_func,
2028c2ecf20Sopenharmony_ci	    const void *priv)
2038c2ecf20Sopenharmony_ci{
2048c2ecf20Sopenharmony_ci	/* pre-scale counters for performance */
2058c2ecf20Sopenharmony_ci	size_t n = num * size, a = (num/2) * size;
2068c2ecf20Sopenharmony_ci	const unsigned int lsbit = size & -size;  /* Used to find parent */
2078c2ecf20Sopenharmony_ci
2088c2ecf20Sopenharmony_ci	if (!a)		/* num < 2 || size == 0 */
2098c2ecf20Sopenharmony_ci		return;
2108c2ecf20Sopenharmony_ci
2118c2ecf20Sopenharmony_ci	if (!swap_func) {
2128c2ecf20Sopenharmony_ci		if (is_aligned(base, size, 8))
2138c2ecf20Sopenharmony_ci			swap_func = SWAP_WORDS_64;
2148c2ecf20Sopenharmony_ci		else if (is_aligned(base, size, 4))
2158c2ecf20Sopenharmony_ci			swap_func = SWAP_WORDS_32;
2168c2ecf20Sopenharmony_ci		else
2178c2ecf20Sopenharmony_ci			swap_func = SWAP_BYTES;
2188c2ecf20Sopenharmony_ci	}
2198c2ecf20Sopenharmony_ci
2208c2ecf20Sopenharmony_ci	/*
2218c2ecf20Sopenharmony_ci	 * Loop invariants:
2228c2ecf20Sopenharmony_ci	 * 1. elements [a,n) satisfy the heap property (compare greater than
2238c2ecf20Sopenharmony_ci	 *    all of their children),
2248c2ecf20Sopenharmony_ci	 * 2. elements [n,num*size) are sorted, and
2258c2ecf20Sopenharmony_ci	 * 3. a <= b <= c <= d <= n (whenever they are valid).
2268c2ecf20Sopenharmony_ci	 */
2278c2ecf20Sopenharmony_ci	for (;;) {
2288c2ecf20Sopenharmony_ci		size_t b, c, d;
2298c2ecf20Sopenharmony_ci
2308c2ecf20Sopenharmony_ci		if (a)			/* Building heap: sift down --a */
2318c2ecf20Sopenharmony_ci			a -= size;
2328c2ecf20Sopenharmony_ci		else if (n -= size)	/* Sorting: Extract root to --n */
2338c2ecf20Sopenharmony_ci			do_swap(base, base + n, size, swap_func);
2348c2ecf20Sopenharmony_ci		else			/* Sort complete */
2358c2ecf20Sopenharmony_ci			break;
2368c2ecf20Sopenharmony_ci
2378c2ecf20Sopenharmony_ci		/*
2388c2ecf20Sopenharmony_ci		 * Sift element at "a" down into heap.  This is the
2398c2ecf20Sopenharmony_ci		 * "bottom-up" variant, which significantly reduces
2408c2ecf20Sopenharmony_ci		 * calls to cmp_func(): we find the sift-down path all
2418c2ecf20Sopenharmony_ci		 * the way to the leaves (one compare per level), then
2428c2ecf20Sopenharmony_ci		 * backtrack to find where to insert the target element.
2438c2ecf20Sopenharmony_ci		 *
2448c2ecf20Sopenharmony_ci		 * Because elements tend to sift down close to the leaves,
2458c2ecf20Sopenharmony_ci		 * this uses fewer compares than doing two per level
2468c2ecf20Sopenharmony_ci		 * on the way down.  (A bit more than half as many on
2478c2ecf20Sopenharmony_ci		 * average, 3/4 worst-case.)
2488c2ecf20Sopenharmony_ci		 */
2498c2ecf20Sopenharmony_ci		for (b = a; c = 2*b + size, (d = c + size) < n;)
2508c2ecf20Sopenharmony_ci			b = do_cmp(base + c, base + d, cmp_func, priv) >= 0 ? c : d;
2518c2ecf20Sopenharmony_ci		if (d == n)	/* Special case last leaf with no sibling */
2528c2ecf20Sopenharmony_ci			b = c;
2538c2ecf20Sopenharmony_ci
2548c2ecf20Sopenharmony_ci		/* Now backtrack from "b" to the correct location for "a" */
2558c2ecf20Sopenharmony_ci		while (b != a && do_cmp(base + a, base + b, cmp_func, priv) >= 0)
2568c2ecf20Sopenharmony_ci			b = parent(b, lsbit, size);
2578c2ecf20Sopenharmony_ci		c = b;			/* Where "a" belongs */
2588c2ecf20Sopenharmony_ci		while (b != a) {	/* Shift it into place */
2598c2ecf20Sopenharmony_ci			b = parent(b, lsbit, size);
2608c2ecf20Sopenharmony_ci			do_swap(base + b, base + c, size, swap_func);
2618c2ecf20Sopenharmony_ci		}
2628c2ecf20Sopenharmony_ci	}
2638c2ecf20Sopenharmony_ci}
2648c2ecf20Sopenharmony_ciEXPORT_SYMBOL(sort_r);
2658c2ecf20Sopenharmony_ci
2668c2ecf20Sopenharmony_civoid sort(void *base, size_t num, size_t size,
2678c2ecf20Sopenharmony_ci	  cmp_func_t cmp_func,
2688c2ecf20Sopenharmony_ci	  swap_func_t swap_func)
2698c2ecf20Sopenharmony_ci{
2708c2ecf20Sopenharmony_ci	return sort_r(base, num, size, _CMP_WRAPPER, swap_func, cmp_func);
2718c2ecf20Sopenharmony_ci}
2728c2ecf20Sopenharmony_ciEXPORT_SYMBOL(sort);
273