#include #include #include #include "magyarsort.h" // ChatGPT and me did this space partitioning bucket sort void gpt_bucket_sort(uint32_t* array, int n) { // Calculate the number of buckets to use int num_buckets = std::sqrt(n); // Create a vector of buckets std::vector> buckets(num_buckets); // Calculate the range of values that each bucket can hold uint32_t min_value = *std::min_element(array, array + n); uint32_t max_value = *std::max_element(array, array + n); uint32_t range = max_value - min_value + 1; uint32_t bucket_size = range / num_buckets + 1; // Distribute the elements of the array into the buckets for (int i = 0; i < n; i++) { // Calculate the bucket index for this element // using the range of values and the bucket size as the divisor int bucket_index = (array[i] - min_value) / bucket_size; buckets[bucket_index].push_back(array[i]); } // Sort the elements in each bucket using std::sort for (int i = 0; i < num_buckets; i++) { std::sort(buckets[i].begin(), buckets[i].end()); } // Concatenate the buckets to get the sorted array int k = 0; for (int i = 0; i < num_buckets; i++) { for (int j = 0; j < buckets[i].size(); j++) { array[k++] = buckets[i][j]; } } } // Further optimizations (no chatGPT) void magyar_bucket_sort(uint32_t* array, int n) { // Calculate the number of buckets to use int num_buckets = std::sqrt(n); // Create a vector of buckets std::vector> buckets(num_buckets); // O(n) // Calculate the range of values that each bucket can hold auto mm = std::minmax_element(array, array + n); uint32_t min_value = *mm.first; uint32_t max_value = *mm.second; uint32_t range = max_value - min_value + 1; uint32_t bucket_size = range / num_buckets + 1; // Distribute the elements of the array into the buckets for (int i = 0; i < n; ++i) { // Calculate the bucket index for this element // using the range of values and the bucket size as the divisor int bucket_index = (array[i] - min_value) / bucket_size; buckets[bucket_index].push_back(array[i]); } // sqrt(n) * (sqrt(n)*log(sqrt(n))) = n*log(sqrt(n)) for std::sort and linear for magyarsort but less mem use! // Sort the elements in each bucket using std::sort for (int i = 0; i < num_buckets; ++i) { if(buckets[i].size() >= 96) { // what to choose here is pretty random MagyarSort::sort(&(buckets[i][0]), buckets[i].size()); } else { std::sort(buckets[i].begin(), buckets[i].end()); } } // Concatenate the buckets to get the sorted array int k = 0; for (int i = 0; i < num_buckets; ++i) { for (int j = 0; j < buckets[i].size(); ++j) { array[++k] = buckets[i][j]; } } } /** Simplify magyarbucket */ void magyar_bucket_sort2(uint32_t* array, int n) { // ensure bucket size as POT int bucketSize = 65536; // O(n) // Calculate the range of values that each bucket can hold auto mm = std::minmax_element(array, array + n); uint32_t min = *mm.first; uint32_t max = *mm.second; uint32_t range = max - min + 1; // Calculate number of buckets from size // bucketSize = (range / numBuckets) + 1 // so: // bucketSize + 1 = range / numBuckets // numBuckets * (bucketSize + 1) = range // so: // numBuckets = range / (bucketSize + 1) uint32_t numBuckets = range / bucketSize + 1; // Create a vector of buckets std::vector> buckets(numBuckets); // Distribute the elements of the array into the buckets for (int i = 0; i < n; ++i) { // Calculate the bucket index for this element // using the range of values and the bucket size as the divisor int bucket_index = (array[i] - min) / bucketSize; // bitshift likely buckets[bucket_index].push_back(array[i]); } // sqrt(n) * (sqrt(n)*log(sqrt(n))) = n*log(sqrt(n)) for std::sort and linear for magyarsort but less mem use! // Sort the elements in each bucket using std::sort for (int i = 0; i < numBuckets; ++i) { if(buckets[i].size() >= 96) { // what to choose here is pretty random MagyarSort::sort(&(buckets[i][0]), buckets[i].size()); } else { std::sort(buckets[i].begin(), buckets[i].end()); } } // Concatenate the buckets to get the sorted array int k = 0; for (int i = 0; i < numBuckets; ++i) { for (int j = 0; j < buckets[i].size(); ++j) { array[++k] = buckets[i][j]; } } }