#ifndef MAGYAR_SORT_H #define MAGYAR_SORT_H /** * single header lib: In-place, fast heavily modified and optimized radix sort. * * Only unsigned ints for now, but should be able to modify for int and float... * This is the counting variant with smart changes (not per-bit). * * LICENCE: CC3 - look it up, you need to mention me but that is all */ /* * Does not help much: // #pragma GCC target ("avx2") // #pragma GCC optimization ("unroll-loops") */ #include #include #include // memset // TODO: Only for the regular radix I guess #include #include // std::swap namespace MagyarSort { /* CONFIG */ // Only change these if you know what you are doing // I use these because I want to see if nibbles are // better or something... // // Bytes of nibbles only: // - DIGIT_RANGE and BITS_PER_DIGIT should correspond // - DIGITS should also correspond with the uint32_t // - and DIGIT_RANGE should be 2^n value (16 or 256) #ifdef MAGYAR_SORT_NIBBLE // Per-nibble digits sorting static constexpr int DIGITS = 8; // "helyiérték" static constexpr int BITS_PER_DIGIT = 4; // "bit / helyiérték" static constexpr int DIGIT_RANGE = 16; // "helyiérték állapottér" #else // Per-byte digits sorting static constexpr int DIGITS = 4; // "helyiérték" static constexpr int BITS_PER_DIGIT = 8; // "bit / helyiérték" static constexpr int DIGIT_RANGE = 256; // "helyiérték állapottér" #endif /* DEBUG */ void debugArr(uint32_t *arr, size_t size) { for(size_t i = 0; i < size; ++i) { printf("%x, ", arr[i]); } printf("\n"); } template void debugRadics(COUNTER_TYP *radics) { for(size_t j = 0; j < DIGITS; ++j) { printf("d%zu: ", j); for(int i = 0; i < DIGIT_RANGE; ++i) { printf("%zu,", radics[i + DIGIT_RANGE*j]); } printf("\n\n"); } } /* HELPERS */ template static inline __attribute__((always_inline)) uint32_t getDigit(uint32_t num) noexcept { static constexpr int SHIFT = DIGIT_CHOICE * BITS_PER_DIGIT; uint32_t shifted = num >> SHIFT; return shifted & (DIGIT_RANGE - 1); } /** Recursive Functor: no class should be generated I think (compiler should be smart) */ template struct OccurenceMagic : public OccurenceMagic { inline __attribute__((always_inline)) OccurenceMagic(uint32_t arr[], COUNTER_TYP i, COUNTER_TYP *radicsOut) noexcept : OccurenceMagic(arr, i, radicsOut) { // Parents run first so template recursion runs DIGIT=0 first... ++radicsOut[getDigit(arr[i]) + DIGIT_RANGE * DIGIT]; } }; /** Ends template recursion */ template struct OccurenceMagic<-1, COUNTER_TYP> { inline __attribute__((always_inline)) OccurenceMagic(uint32_t arr[], COUNTER_TYP i, COUNTER_TYP *radicsOut) noexcept {} }; /** ARR_END must be an (STEP * k) */ template struct PrefetchMagic : public PrefetchMagic<(ARR_END - STEP), STEP, ARR_T, R_OR_W, LOCALITY> { inline __attribute__((always_inline)) PrefetchMagic(ARR_T *arr) noexcept : PrefetchMagic<(ARR_END - STEP), STEP, ARR_T, R_OR_W, LOCALITY>(arr) { __builtin_prefetch(&arr[ARR_END - STEP], R_OR_W, LOCALITY); } }; template struct PrefetchMagic<0, STEP, ARR_T, R_OR_W, LOCALITY> { inline __attribute__((always_inline)) PrefetchMagic(ARR_T *arr) noexcept {} }; template static inline void countOccurences(uint32_t arr[], COUNTER_TYP size, COUNTER_TYP *radicsOut) noexcept { COUNTER_TYP i = 0; //#pragma GCC unroll 4 for(; i < size - 64; i += 64) { // Prefetch for read level-1 cache //__builtin_prefetch(&arr[i + (1 * 16)], 0/*r*/, 2/*L2 or L3 cache likely*/); __builtin_prefetch(&arr[i + (1 * 16)]); // Creates no object, struct is empty OccurenceMagic(arr, i, radicsOut); OccurenceMagic(arr, i + 1, radicsOut); OccurenceMagic(arr, i + 2, radicsOut); OccurenceMagic(arr, i + 3, radicsOut); OccurenceMagic(arr, i + 4, radicsOut); OccurenceMagic(arr, i + 5, radicsOut); OccurenceMagic(arr, i + 6, radicsOut); OccurenceMagic(arr, i + 7, radicsOut); OccurenceMagic(arr, i + 8, radicsOut); OccurenceMagic(arr, i + 9, radicsOut); OccurenceMagic(arr, i + 10, radicsOut); OccurenceMagic(arr, i + 11, radicsOut); OccurenceMagic(arr, i + 12, radicsOut); OccurenceMagic(arr, i + 13, radicsOut); OccurenceMagic(arr, i + 14, radicsOut); OccurenceMagic(arr, i + 15, radicsOut); // Prefetch for read level-1 cache __builtin_prefetch(&arr[i + (2 * 16)]); OccurenceMagic(arr, i + 16, radicsOut); OccurenceMagic(arr, i + 17, radicsOut); OccurenceMagic(arr, i + 18, radicsOut); OccurenceMagic(arr, i + 19, radicsOut); OccurenceMagic(arr, i + 20, radicsOut); OccurenceMagic(arr, i + 21, radicsOut); OccurenceMagic(arr, i + 22, radicsOut); OccurenceMagic(arr, i + 23, radicsOut); OccurenceMagic(arr, i + 24, radicsOut); OccurenceMagic(arr, i + 25, radicsOut); OccurenceMagic(arr, i + 26, radicsOut); OccurenceMagic(arr, i + 27, radicsOut); OccurenceMagic(arr, i + 28, radicsOut); OccurenceMagic(arr, i + 29, radicsOut); OccurenceMagic(arr, i + 30, radicsOut); OccurenceMagic(arr, i + 31, radicsOut); __builtin_prefetch(&arr[i + (3 * 16)]); OccurenceMagic(arr, i + 32, radicsOut); OccurenceMagic(arr, i + 33, radicsOut); OccurenceMagic(arr, i + 34, radicsOut); OccurenceMagic(arr, i + 35, radicsOut); OccurenceMagic(arr, i + 36, radicsOut); OccurenceMagic(arr, i + 37, radicsOut); OccurenceMagic(arr, i + 38, radicsOut); OccurenceMagic(arr, i + 39, radicsOut); OccurenceMagic(arr, i + 40, radicsOut); OccurenceMagic(arr, i + 41, radicsOut); OccurenceMagic(arr, i + 42, radicsOut); OccurenceMagic(arr, i + 43, radicsOut); OccurenceMagic(arr, i + 44, radicsOut); OccurenceMagic(arr, i + 45, radicsOut); OccurenceMagic(arr, i + 46, radicsOut); OccurenceMagic(arr, i + 47, radicsOut); // __builtin_prefetch(&arr[i + (4 * 16)]); // Only needed for longer than 64 unrolls OccurenceMagic(arr, i + 48, radicsOut); OccurenceMagic(arr, i + 49, radicsOut); OccurenceMagic(arr, i + 50, radicsOut); OccurenceMagic(arr, i + 51, radicsOut); OccurenceMagic(arr, i + 52, radicsOut); OccurenceMagic(arr, i + 53, radicsOut); OccurenceMagic(arr, i + 54, radicsOut); OccurenceMagic(arr, i + 55, radicsOut); OccurenceMagic(arr, i + 56, radicsOut); OccurenceMagic(arr, i + 57, radicsOut); OccurenceMagic(arr, i + 58, radicsOut); OccurenceMagic(arr, i + 59, radicsOut); OccurenceMagic(arr, i + 60, radicsOut); OccurenceMagic(arr, i + 61, radicsOut); OccurenceMagic(arr, i + 62, radicsOut); OccurenceMagic(arr, i + 63, radicsOut); } #pragma GCC unroll 4 for(; i < size; ++i) { OccurenceMagic(arr, i, radicsOut); } } /** Recursive Functor: no class should be generated I think (compiler should be smart) */ template struct PrefixMagic : public PrefixMagic { inline __attribute__((always_inline)) PrefixMagic(COUNTER_TYP *radics, COUNTER_TYP *prev, int i) noexcept : PrefixMagic(radics, prev, i) { static constexpr int DSTART = (DIGIT * DIGIT_RANGE); radics[DSTART + i] += prev[DIGIT]; prev[DIGIT] = radics[DSTART + i]; } }; /** Ends template recursion */ template struct PrefixMagic<-1, COUNTER_TYP> { inline PrefixMagic(COUNTER_TYP *radics, COUNTER_TYP *prev, int i) noexcept {} }; /** Gets REFERENCE to the given digit from the radix-array that has more than one digits */ template static inline __attribute__((always_inline)) COUNTER_TYP &rGet(COUNTER_TYP *radics, int i) noexcept { static constexpr int DSTART = (DIGIT * DIGIT_RANGE); return radics[DSTART + i]; } /** Helper for calcPrefixSums */ template struct PMagic2 : public PMagic2 { inline __attribute__((always_inline)) PMagic2(COUNTER_TYP *radics, COUNTER_TYP *prev) : PMagic2(radics, prev) { // Again first the 0th digit because of parent constructors! // This is a template-unrolled loop too PrefixMagic(radics, prev, DIGIT); } }; /** Template recursion endpoint */ template struct PMagic2<-1, COUNTER_TYP> { inline __attribute__((always_inline)) PMagic2(COUNTER_TYP *radics, COUNTER_TYP *prev) {} }; template static inline void calcPrefixSums(COUNTER_TYP *radics) noexcept { static thread_local COUNTER_TYP prev[DIGITS]; memset(prev, 0, sizeof(prev)); // This is a template-unrolled loop too PMagic2(radics, prev); } /** Recursive Functor: no class should be generated I think (compiler should be smart) */ template struct RadixMagic : public RadixMagic { inline __attribute__((always_inline)) RadixMagic(COUNTER_TYP *radics, uint32_t *&from, uint32_t *&to, COUNTER_TYP size) noexcept // BEWARE: "*&" needed to swap pointers.. : RadixMagic(radics, from, to, size) { // DEBUG //printf("%d before: ", DIGIT); //debugArr(from, size); #pragma GCC unroll 64 for(COUNTER_TYP i = size; i > 0; --i) { // right-to-left to ensure already sorted digits order we keep for iterations // Prefetch caches /* __builtin_prefetch(&from[i]); // TODO: is good? if(i >= 64) { __builtin_prefetch(&from[i - 64]); } // TODO: manually unroll? */ // Get num and its new offset / location auto num = from[i - 1]; auto digVal = getDigit(num); auto offset = (--rGet(radics, digVal)); // Add to the proper target location to[offset] = num; } // DEBUG //printf("%d after: ", DIGIT); //debugArr(to, size); // Only swaps pointers :-) std::swap(from, to); } }; /** Ends template recursion */ template struct RadixMagic<-1, COUNTER_TYP> { inline RadixMagic(COUNTER_TYP *radics, uint32_t *&from, uint32_t *&to, COUNTER_TYP size) noexcept { } }; /* SORT */ /* * Sort the given array (in-place sorting) with the given size. * * Rem.: If you use the VectorGiverWithReuse please remind yourself to Gc() it time-to-time! * * Beware: GC needs to happen on all threads that use us! * * @param arr The array to sort. Result will be in the same array - as sorted. * @param size The lenght of the array - should fit in the COUNTER_TYP. * @param COUNTER_TYP OPTIONAL: When set this type will be the counter type. * @param REUSE OPTIONAL: When true, we reuse the array instead of always gettin' and releasin' from da heap. * @param GC OPTIONAL: When true, we garbage collect memory from previous sorts if REUSE is true. * @param GC_WITHOUT_SORT OPTIONAL: When true, we "just GC" but do not sort in case of GC is true. */ template inline void __attribute__((always_inline)) sort_impl(uint32_t arr[], COUNTER_TYP size) noexcept { // Most funny optimization is this multiply here :-) // // Literally.. come on.. this makes it nearly a compile-time, macro-like // ifdef-like thing as we avoid memory allocations of size BUT also we // optimize the first call for sort when we REUSE the array so size is fine! static thread_local std::vector arc(size * REUSE); // "Garbage-collection" if(GC) { arc = std::vector(); // This must be implemented, because we can only access // the static in our function body so this is the "way". if(GC_WITHOUT_SORT) { return; } } // Holds "digit" occurences, prefix sums, whatevers // First "DIGIT_RANGE" elem is for MSB "DIGITS", last is for LSB static thread_local COUNTER_TYP radics[DIGITS * DIGIT_RANGE]; // Write prefetchin' //__builtin_prefetch(&radicsOut[..], 1); PrefetchMagic pm(radics); memset(radics, 0, sizeof(radics)); // Calculate occurences of digits countOccurences(arr, size, radics); //debugRadics(radics); // Calculate prefix sums calcPrefixSums(radics); //debugRadics(radics); /* Regular (old) radix sort with small twist */ // Regular radix sort - I just changed occurence couting and prefix summing to have more ILP // But because my approach does not use that, I want to keep this version in a branch for a // regular radix sort using better ILP just to see how it is doing if I wrote those "Magic" // above already anyways... // Regular radix sort needs a copy, see: https://www.youtube.com/watch?v=ujb2CIWE8zY // But instead of the below, we do a trickery... // //std::vector arc(size); //auto arc = VectorGiver::Give(size); // "auto" is needed for this to perform well with some givers! // // Rem.: The branch is optimized out in compile time! if(REUSE) { arc.resize(size); } else { // Must not be .clean() !!! // We must regain memory of previous! arc = std::move(std::vector(size)); } uint32_t *from = arr; uint32_t *to = &arc[0]; RadixMagic(radics, from, to, size); // With an other API we could spare this copy if we can delete original arr and return ptr or something... // I am fine with this... this is not my main idea anyways, just little ILP tweak to regular radix sort //if(to != arr) // <- logically, but bad they are already swapped here!!! BEWARE if(from != arr) { // <- in reality this is what we want because of last swap happened anyways! memcpy(arr, from, size); } } /** * Garbage collect reused data structures from last call. * * This is optimized and is a NO-OP if MAGYAR_SORT_DEFAULT_REUSE is not defined! * - unless you use the FORCE! May it be with you if you need it. * * @param FORCE OPTIONAL: When true, the gc happens even if MAGYAR_SORT_DEFAULT_REUSE is not defined! */ template inline void gc() noexcept { if(FORCE) { // Only GC-ing MagyarSort::sort_impl(nullptr, 0); } else { #ifdef MAGYAR_SORT_DEFAULT_REUSE // Only GC-ing MagyarSort::sort_impl(nullptr, 0); #endif } } /** * Sort the given array (in-place sorting) with the given size. * * Rem.: Please remind yourself to cc() from time-to-time! * Rem.: Thread-safe to use! * * Beware: MagyarSort::gc(); needs to happen on all threads that use this variant otherwise memory leaks away! * Please mind the "true" template parameter that forces the GC even when sort by default not reuses... * * @param arr The array to sort. Result will be in the same array - as sorted. * @param size The lenght of the array. * @param COUNTER_TYP OPTIONAL: It is best kepts as size_t, but for smaller arrays, you can use uint32_t. * @param GC OPTIONAL: When true, we garbage collect before this sort - so cached memory size will be "size" elems. */ template inline void __attribute__((always_inline)) sort_reuse(uint32_t arr[], COUNTER_TYP size) noexcept { // Reuse the temporary vectors across runs // This results in much less heap allocations and much faster on gcc // and also a bit faster on clang too. MagyarSort::sort_impl(arr, size); } /** * Sort the given array (in-place sorting) with the given size. * * Rem.: Thread-safe to use! * * Beware: MagyarSort::gc(); needs to happen on all threads that use this variant otherwise memory leaks away! * * @param arr The array to sort. Result will be in the same array - as sorted. * @param size The lenght of the array. * @param COUNTER_TYP OPTIONAL: It is best kepts as size_t, but for smaller arrays, you can use uint32_t. */ template inline void __attribute__((always_inline)) sort_no_reuse(uint32_t arr[], size_t size) noexcept { // We use the heap once per every call... // This is safer and we do not need garbage collecting MagyarSort::sort_impl(arr, size); } /* * Sort the given array (in-place sorting) with the given size. * * Rem.: If you use the VectorGiverWithReuse please remind yourself to Gc() it time-to-time! * * Beware: MagyarSort::gc(); should be called after "sort bursts" (consecutive fast sorts of when you need memory * on all threads that use this variant otherwise memory leaks away as biggest sorted array keeps being in ram! * This depends on the config #define MAGYAR_SORT_DEFAULT_REUSE is defined or not. Define and you get reuse * and if you get reuse you can call multiple sorts with reused temporary buffers that you gc() afterwards! * * @param arr The array to sort. Result will be in the same array - as sorted. * @param size The lenght of the array. * @param COUNTER_TYP OPTIONAL: It is best kepts as size_t, but for smaller arrays, you can use uint32_t. */ template inline void sort(uint32_t arr[], size_t size) noexcept { #ifdef MAGYAR_SORT_DEFAULT_REUSE MagyarSort::sort_reuse(arr, size); #else MagyarSort::sort_no_reuse(arr, size); #endif } }; #endif