diff --git a/README.md b/README.md index 0e38ddb..db1acf5 100644 --- a/README.md +++ b/README.md @@ -3,12 +3,45 @@ CUDA Stream Compaction **University of Pennsylvania, CIS 565: GPU Programming and Architecture, Project 2** -* (TODO) YOUR NAME HERE - * (TODO) [LinkedIn](), [personal website](), [twitter](), etc. -* Tested on: (TODO) Windows 22, i7-2222 @ 2.22GHz 22GB, GTX 222 222MB (Moore 2222 Lab) +* Hanting Xu +* [GitHub](https://github.com/HantingXu), [LinkedIn](www.linkedin.com/in/hanting-xu-25615b28b). +* Tested on: (Personal Computer) Windows 11, i7-12700H @ 2.70GHz 32GB, GeForce RTX 3070 Ti Laptop GPU -### (TODO: Your README) +### Basic Introduction -Include analysis, etc. (Remember, this is public, so don't put -anything here that you don't want to share with the world.) +This project implements the stream compaction and radix sort on GPU with CUDA. +#### Stream Compaction + +Stream Compaction here mainly refers to a process that taking an array of numbers as input and eliminate the zeros in that very array and then output result. A work-efficient implementation of this algorithm in GPU can be roughly devided into 3 parts: map, scan and scatter. +* map: map the elements of the original array onto 1 and 0. +* scan: generate the index that each elements should be at after the compaction by computing their exclusive prefix sum. +* scatter: scatter the elements in the original array to their corresponding place according to the index array generated in the scan function. + +* Figure - Map, Scan, Scatter for Stream Compaction + +![](img/example.png) + +The main focus of this project is on the scan function. There are 3 different scan functions implemented: CPU Scan, Naive GPU Scan and Efficient GPU Scan, and their performance are compared together with CUDA thrust scan method. For more detail on how to implement scan, this is a good reference link - [Parallel Prefix Sum (Scan) with CUDA](https://developer.nvidia.com/gpugems/GPUGems3/gpugems3_ch39.html). + +#### Radix sort + +Radix sort is a type of sorting algorithm that sort an array according to each digit of an element in the array. The time complexity of this algorithm on CPU is normally O(n). This project implement both the CPU version of radix sort and its GPU version. The basic process for this algorithm in GPU version can also be concluded as Map, Scan and Scatter, but in a slightly different way. +* Figure - Map, Scan, Scatter for Radix Sort + +![](img/radix_sort.png) +This project compares the efficiency of the CPU and GPU version of Radix Sort. The scan function used in based on the work-efficient GPU scan implemented above. + +### Analysis + +#### Scan +In the end, we record the execution time of different scan functions implemented. Through my observation, CPU runs the fastest when there are only 2^2 to 2^18 elements in an array. And as we can observe from the graph below, CPU becomes slower than all GPU algorithms when there are 2^20 or more elements in an array. That is basically because the time comlexity of CPU scan is intrinsicly larger than the GPU ones. Thus as the input size grows, it becomes inevitably slower. One interesting discovery here is that when the Naive GPU method deals with array with 2^30 elements, it hits the bottleneck and its speed drop sharply. This is probably have something to do with the shortage of memory. + +Note: all the block size for the GPU algorithms used in this project is 128. + +![](img/scan_result.svg) + +#### Radix sort +The graph shown below decribes the exetion time of radix sort on CPU and GPU as the input size changes. Both radix sort on CPU and GPU slows down as the number of inputs increases. However, as the input size increases, the performance of CPU implementation drops faster than GPU one. That is most likely because of the time compexity of the algorithm itself. + +![](img/radix_result.svg) diff --git a/img/example.png b/img/example.png new file mode 100644 index 0000000..02d0631 Binary files /dev/null and b/img/example.png differ diff --git a/img/radix_result.svg b/img/radix_result.svg new file mode 100644 index 0000000..0e22566 --- /dev/null +++ b/img/radix_result.svg @@ -0,0 +1 @@ +The execution time of radix sort on CPU and GPU withdifferent number of inputsCPUGPU16182022240200400600800Number of inputs(2^n))Execution time(ms)) \ No newline at end of file diff --git a/img/radix_sort.png b/img/radix_sort.png new file mode 100644 index 0000000..89cba01 Binary files /dev/null and b/img/radix_sort.png differ diff --git a/img/scan_result.svg b/img/scan_result.svg new file mode 100644 index 0000000..1a00deb --- /dev/null +++ b/img/scan_result.svg @@ -0,0 +1 @@ +Execution time of different scan functions with 2^n inputsCPUNaiveEffiecientThrust20222426283001,0002,0003,000data size (2^n))execution time (ms) \ No newline at end of file diff --git a/src/main.cpp b/src/main.cpp index 896ac2b..e0b273c 100644 --- a/src/main.cpp +++ b/src/main.cpp @@ -13,14 +13,16 @@ #include #include "testing_helpers.hpp" -const int SIZE = 1 << 8; // feel free to change the size of array +const int SIZE = 1 << 18; // feel free to change the size of array const int NPOT = SIZE - 3; // Non-Power-Of-Two -int *a = new int[SIZE]; -int *b = new int[SIZE]; -int *c = new int[SIZE]; +int* a = new int[SIZE]; +int* b = new int[SIZE]; +int* c = new int[SIZE]; +int* d = new int[SIZE]; + int main(int argc, char* argv[]) { - // Scan tests + // Radix Sort tests printf("\n"); printf("****************\n"); @@ -31,6 +33,26 @@ int main(int argc, char* argv[]) { a[SIZE - 1] = 0; printArray(SIZE, a, true); + zeroArray(SIZE, b); + printDesc("cpu radix sort, power-of-two"); + StreamCompaction::CPU::radixSort(SIZE, b, a); + printElapsedTime(StreamCompaction::CPU::timer().getCpuElapsedTimeForPreviousOperation(), "(std::chrono Measured)"); + printArray(SIZE, b, true); + + zeroArray(SIZE, c); + printDesc("work-efficient radix sort, power-of-two"); + StreamCompaction::Efficient::radixSort(SIZE, c, a); + printElapsedTime(StreamCompaction::Efficient::timer().getGpuElapsedTimeForPreviousOperation(), "(CUDA Measured)"); + printArray(SIZE, c, true); + + // Scan tests + + printf("\n"); + printf("****************\n"); + printf("** SCAN TESTS **\n"); + printf("****************\n"); + + // initialize b using StreamCompaction::CPU::scan you implement // We use b for further comparison. Make sure your StreamCompaction::CPU::scan is correct. // At first all cases passed because b && c are all zeroes. @@ -59,7 +81,7 @@ int main(int argc, char* argv[]) { printDesc("1s array for finding bugs"); StreamCompaction::Naive::scan(SIZE, c, a); printArray(SIZE, c, true); */ - + zeroArray(SIZE, c); printDesc("naive scan, non-power-of-two"); StreamCompaction::Naive::scan(NPOT, c, a); @@ -95,60 +117,9 @@ int main(int argc, char* argv[]) { //printArray(NPOT, c, true); printCmpResult(NPOT, b, c); - printf("\n"); - printf("*****************************\n"); - printf("** STREAM COMPACTION TESTS **\n"); - printf("*****************************\n"); - - // Compaction tests - - genArray(SIZE - 1, a, 4); // Leave a 0 at the end to test that edge case - a[SIZE - 1] = 0; - printArray(SIZE, a, true); - - int count, expectedCount, expectedNPOT; - - // initialize b using StreamCompaction::CPU::compactWithoutScan you implement - // We use b for further comparison. Make sure your StreamCompaction::CPU::compactWithoutScan is correct. - zeroArray(SIZE, b); - printDesc("cpu compact without scan, power-of-two"); - count = StreamCompaction::CPU::compactWithoutScan(SIZE, b, a); - printElapsedTime(StreamCompaction::CPU::timer().getCpuElapsedTimeForPreviousOperation(), "(std::chrono Measured)"); - expectedCount = count; - printArray(count, b, true); - printCmpLenResult(count, expectedCount, b, b); - - zeroArray(SIZE, c); - printDesc("cpu compact without scan, non-power-of-two"); - count = StreamCompaction::CPU::compactWithoutScan(NPOT, c, a); - printElapsedTime(StreamCompaction::CPU::timer().getCpuElapsedTimeForPreviousOperation(), "(std::chrono Measured)"); - expectedNPOT = count; - printArray(count, c, true); - printCmpLenResult(count, expectedNPOT, b, c); - - zeroArray(SIZE, c); - printDesc("cpu compact with scan"); - count = StreamCompaction::CPU::compactWithScan(SIZE, c, a); - printElapsedTime(StreamCompaction::CPU::timer().getCpuElapsedTimeForPreviousOperation(), "(std::chrono Measured)"); - printArray(count, c, true); - printCmpLenResult(count, expectedCount, b, c); - - zeroArray(SIZE, c); - printDesc("work-efficient compact, power-of-two"); - count = StreamCompaction::Efficient::compact(SIZE, c, a); - printElapsedTime(StreamCompaction::Efficient::timer().getGpuElapsedTimeForPreviousOperation(), "(CUDA Measured)"); - //printArray(count, c, true); - printCmpLenResult(count, expectedCount, b, c); - - zeroArray(SIZE, c); - printDesc("work-efficient compact, non-power-of-two"); - count = StreamCompaction::Efficient::compact(NPOT, c, a); - printElapsedTime(StreamCompaction::Efficient::timer().getGpuElapsedTimeForPreviousOperation(), "(CUDA Measured)"); - //printArray(count, c, true); - printCmpLenResult(count, expectedNPOT, b, c); - system("pause"); // stop Win32 console from closing on exit delete[] a; delete[] b; delete[] c; -} + delete[] d; +} \ No newline at end of file diff --git a/stream_compaction/common.cu b/stream_compaction/common.cu index 2ed6d63..1fcc8ff 100644 --- a/stream_compaction/common.cu +++ b/stream_compaction/common.cu @@ -24,6 +24,26 @@ namespace StreamCompaction { */ __global__ void kernMapToBoolean(int n, int *bools, const int *idata) { // TODO + int index = (blockIdx.x * blockDim.x) + threadIdx.x; + if (index >= n) + { + return; + } + /**don't check the last element, the last one is always true + no matter it is 0 or not*/ + if (index == n - 1) + { + bools[index] = 1; + return; + } + if (idata[index + 1] != idata[index]) + { + bools[index] = 1; + } + else + { + bools[index] = 0; + } } /** @@ -33,6 +53,15 @@ namespace StreamCompaction { __global__ void kernScatter(int n, int *odata, const int *idata, const int *bools, const int *indices) { // TODO + int index = (blockIdx.x * blockDim.x) + threadIdx.x; + if (index >= n) + { + return; + } + if (bools[index] == 1) + { + odata[indices[index]] = idata[index]; + } } } diff --git a/stream_compaction/cpu.cu b/stream_compaction/cpu.cu index 719fa11..7c766fb 100644 --- a/stream_compaction/cpu.cu +++ b/stream_compaction/cpu.cu @@ -2,6 +2,7 @@ #include "cpu.h" #include "common.h" +#define passNumber 6 namespace StreamCompaction { namespace CPU { @@ -12,15 +13,67 @@ namespace StreamCompaction { return timer; } + void map(int n, int* odata) + { + for (int i = 0; i < n; i++) + { + if (odata[i] != 0) + { + odata[i] = 1; + } + } + } + /** * CPU scan (prefix sum). * For performance analysis, this is supposed to be a simple for loop. * (Optional) For better understanding before starting moving to GPU, you can simulate your GPU scan in this function first. */ void scan(int n, int *odata, const int *idata) { - timer().startCpuTimer(); // TODO + std::copy(idata, idata + n - 1, &(odata[1])); + timer().startCpuTimer(); + odata[0] = 0; + for (int i = 1; i < n; i++) + { + odata[i] = odata[i] + odata[i - 1]; + } + timer().endCpuTimer(); + } + + void radixSort(int n, int* odata, int* idata) + { + int* temp = (int*)malloc(n * sizeof(int)); + int* input = (int*)malloc(n * sizeof(int)); + memcpy(input, idata, n * sizeof(int)); + timer().startCpuTimer(); + for (int i = 0; i < passNumber; i++) + { + int zero = 0; + for (int j = 0; j < n; j++) + { + //if is 0 + if (1 - ((input[j] >> i) & 1)) + { + temp[zero] = input[j]; + ++zero; + } + } + for (int j = 0; j < n; j++) + { + //if is 1 + if ((input[j] >> i) & 1) + { + temp[zero] = input[j]; + ++zero; + } + } + std::swap(temp, input); + } timer().endCpuTimer(); + std::copy(input, input + n, odata); + free(temp); + free(input); } /** @@ -29,10 +82,20 @@ namespace StreamCompaction { * @returns the number of elements remaining after compaction. */ int compactWithoutScan(int n, int *odata, const int *idata) { + // TODO timer().startCpuTimer(); // TODO + int j = 0; + for (int i = 0; i < n; i++) + { + if (idata[i] != 0) + { + odata[j] = idata[i]; + ++j; + } + } timer().endCpuTimer(); - return -1; + return j; } /** @@ -43,8 +106,28 @@ namespace StreamCompaction { int compactWithScan(int n, int *odata, const int *idata) { timer().startCpuTimer(); // TODO + std::copy(idata, idata + n - 1, &(odata[1])); + odata[0] = 0; + map(n, odata); + for (int i = 1; i < n; i++) + { + odata[i] = odata[i] + odata[i - 1]; + } + int lastNumber = odata[n - 1]; + int* temp = (int*)malloc(n * sizeof(int)); + memcpy(temp, odata, n * sizeof(int)); + if (idata[n - 1] != 0) + { + ++lastNumber; + } + + for (int i = 0; i < n; i++) + { + odata[temp[i]] = idata[i]; + } + free(temp); timer().endCpuTimer(); - return -1; + return lastNumber; } } } diff --git a/stream_compaction/cpu.h b/stream_compaction/cpu.h index 873c047..cbcc8b7 100644 --- a/stream_compaction/cpu.h +++ b/stream_compaction/cpu.h @@ -11,5 +11,7 @@ namespace StreamCompaction { int compactWithoutScan(int n, int *odata, const int *idata); int compactWithScan(int n, int *odata, const int *idata); + + void radixSort(int n, int* odata, int* idata); } } diff --git a/stream_compaction/efficient.cu b/stream_compaction/efficient.cu index 2db346e..40b4aa1 100644 --- a/stream_compaction/efficient.cu +++ b/stream_compaction/efficient.cu @@ -2,6 +2,9 @@ #include #include "common.h" #include "efficient.h" +/*! Block size used for CUDA kernel launch. */ +#define blockSize 128 +#define passNumber 6 namespace StreamCompaction { namespace Efficient { @@ -12,13 +15,310 @@ namespace StreamCompaction { return timer; } + //number of data with padding included + int length; + //container to store the data + int* dev_idata; + int* dev_odata; + int* dev_indices; + int* dev_bools; + int* dev_nonZero; + + //for radix sort (0 - 5) + int* dev_input; + int* dev_pass; + + dim3 threadsPerBlock(blockSize); + + void initRadixSort(int N, int* odata, const int* idata) + { + //give padding the arrays with size not the power of 2 + length = int(pow(2, ilog2ceil(N))); + + cudaMalloc((void**)&dev_input, length * sizeof(int)); + cudaMemcpy(dev_input, idata, N * sizeof(int), cudaMemcpyHostToDevice); + checkCUDAError("cudaMalloc dev_input failed!", __LINE__); + + cudaMalloc((void**)&dev_idata, length * sizeof(int)); + cudaMemcpy(dev_idata, idata, N * sizeof(int), cudaMemcpyHostToDevice); + checkCUDAError("cudaMalloc dev_idata failed!", __LINE__); + + cudaMalloc((void**)&dev_odata, length * sizeof(int)); + cudaMemcpy(dev_odata, idata, N * sizeof(int), cudaMemcpyHostToDevice); + checkCUDAError("cudaMalloc dev_odata failed!", __LINE__); + + cudaMalloc((void**)&dev_pass, length * sizeof(int)); + checkCUDAError("cudaMalloc dev_pass failed!", __LINE__); + + //to record non-zero numbers + cudaMalloc((void**)&dev_nonZero, 1 * sizeof(int)); + checkCUDAError("cudaMalloc dev_nonZero failed!", __LINE__); + + cudaDeviceSynchronize(); + } + + void initScan(int N, int* odata, const int* idata) + { + //give padding the arrays with size not the power of 2 + length = int(pow(2, ilog2ceil(N))); + cudaMalloc((void**)&dev_idata, length * sizeof(int)); + cudaMemcpy(dev_idata, idata, N * sizeof(int), cudaMemcpyHostToDevice); + checkCUDAError("cudaMalloc dev_idata failed!", __LINE__); + cudaMalloc((void**)&dev_odata, length * sizeof(int)); + cudaMemcpy(dev_odata, idata, N * sizeof(int), cudaMemcpyHostToDevice); + checkCUDAError("cudaMalloc dev_odata failed!", __LINE__); + cudaDeviceSynchronize(); + } + + void initCompact(int N, const int* idata, int* odata) + { + //give padding the arrays with size not the power of 2 + length = int(pow(2, ilog2ceil(N))); + cudaMalloc((void**)&dev_idata, length * sizeof(int)); + cudaMemcpy(dev_idata, idata, N * sizeof(int), cudaMemcpyHostToDevice); + checkCUDAError("cudaMalloc dev_idata failed!", __LINE__); + cudaMalloc((void**)&dev_odata, length * sizeof(int)); + cudaMemcpy(dev_odata, idata, N * sizeof(int), cudaMemcpyHostToDevice); + checkCUDAError("cudaMalloc dev_odata failed!", __LINE__); + //to record non-zero numbers + cudaMalloc((void**)&dev_nonZero, 1 * sizeof(int)); + checkCUDAError("cudaMalloc dev_nonZero failed!", __LINE__); + + cudaMalloc((void**)&dev_input, length * sizeof(int)); + cudaMemcpy(dev_input, idata, N * sizeof(int), cudaMemcpyHostToDevice); + checkCUDAError("cudaMalloc dev_input failed!", __LINE__); + cudaMalloc((void**)&dev_indices, length * sizeof(int)); + checkCUDAError("cudaMalloc dev_indices failed!", __LINE__); + cudaMalloc((void**)&dev_bools, length * sizeof(int)); + checkCUDAError("cudaMalloc dev_bool failed!", __LINE__); + + cudaDeviceSynchronize(); + } + + void endScan() + { + cudaFree(dev_idata); + cudaFree(dev_odata); + } + + void endCompact() + { + cudaFree(dev_bools); + cudaFree(dev_indices); + cudaFree(dev_idata); + cudaFree(dev_odata); + cudaFree(dev_input); + cudaFree(dev_nonZero); + } + + void endRadixSort() + { + cudaFree(dev_idata); + cudaFree(dev_odata); + cudaFree(dev_pass); + cudaFree(dev_input); + cudaFree(dev_nonZero); + } + + //n is number of elements in one pass + //pass is 0, 1, 2, 3, 4, 5 + __global__ void RadixSortMapKernel(int n, int realLen, int pass, int* idata, int* odata) + { + int index = (blockIdx.x * blockDim.x) + threadIdx.x; + if (index >= n) + { + return; + } + //map 0 to 1 + odata[index] = 1 - ((idata[index] >> pass) & 1); + //if there are extra space mark them as 1 + if (index >= realLen) + { + odata[index] = 0; + } + } + + //here n is the number of the array without padding + __global__ void RadixTrueScanKernel(int n, int zeros, int* idata, int* pass, int* odata) + { + int index = (blockIdx.x * blockDim.x) + threadIdx.x; + if (index >= n) + { + return; + } + if (pass[index] == 0) + { + odata[index] = index - idata[index] + zeros; + } + else + { + odata[index] = idata[index]; + } + } + + + __global__ void RadixScatterKernel(int n, int* indices, int* odata, int* input) + { + int index = (blockIdx.x * blockDim.x) + threadIdx.x; + if (index >= n) + { + return; + } + odata[indices[index]] = input[index]; + } + + __global__ void EfficientMapKernel(int n, int realLen, int* idata) + { + int index = (blockIdx.x * blockDim.x) + threadIdx.x; + if (index >= n) + { + return; + } + + if (index < realLen && idata[index] != 0) + { + idata[index] = 1; + } + else + { + idata[index] = 0; + } + //__syncthreads(); + } + + __global__ void InitDownSweepKernel(int n, int* idata, int* odata) + { + idata[n - 1] = 0; + odata[n - 1] = 0; + } + + __global__ void UpSweepScanKernel(int n, int interval, int* idata, int* odata) + { + int index = (blockIdx.x * blockDim.x) + threadIdx.x; + if (index >= n) + { + return; + } + //index that needs to be changed + int validBase = interval * 2; + int trueIndex = validBase * (index + 1) - 1; + idata[trueIndex] = odata[trueIndex - interval] + odata[trueIndex]; + odata[trueIndex] = idata[trueIndex]; + } + + __global__ void DownSweepScanKernel(int n, int interval, int* idata, int* odata) + { + int index = (blockIdx.x * blockDim.x) + threadIdx.x; + if (index >= n) + { + return; + } + //index that needs to be changed + int validBase = interval * 2; + int trueIndex = validBase * (index + 1) - 1; + idata[trueIndex] = odata[trueIndex - interval] + odata[trueIndex]; + idata[trueIndex - interval] = odata[trueIndex]; + } + + __global__ void CountKernel(int n, int* indices, int* idata, int* count) + { + int lastIndice = n - 1; + if (idata[lastIndice] == 0) + { + count[0] = indices[lastIndice]; + } + else + { + count[0] = indices[lastIndice] + 1; + } + } + /** * Performs prefix-sum (aka scan) on idata, storing the result into odata. */ void scan(int n, int *odata, const int *idata) { - timer().startGpuTimer(); // TODO + initScan(n, odata, idata); + dim3 fullBlocksPerGrid((length + blockSize - 1) / blockSize); + timer().startGpuTimer(); + for (int i = 1; i < length; i = i * 2) + { + int totalSize = length / (2 * i); + dim3 BlocksPerGrid((totalSize + blockSize - 1) / blockSize); + UpSweepScanKernel << > > (totalSize, i, dev_idata, dev_odata); + cudaDeviceSynchronize(); + + std::swap(dev_idata, dev_odata); + } + std::swap(dev_idata, dev_odata); + InitDownSweepKernel << <1, 1 >> > (length, dev_idata, dev_odata); + + for (int i = length / 2; i > 0; i = i / 2) + { + int totalSize = length / (2 * i); + dim3 BlocksPerGrid((totalSize + blockSize - 1) / blockSize); + DownSweepScanKernel << > > (totalSize , i, dev_idata, dev_odata); + cudaDeviceSynchronize(); + std::swap(dev_idata, dev_odata); + } + timer().endGpuTimer(); + cudaMemcpy(odata, dev_odata, sizeof(int) * n, cudaMemcpyDeviceToHost); + + endScan(); + } + + + void radixSort(int n, int* odata, const int* idata) + { + initRadixSort(n, odata, idata); + dim3 fullBlocksPerGrid((length + blockSize - 1) / blockSize); + timer().startGpuTimer(); + for (int i = 0; i < passNumber; i++) + { + + RadixSortMapKernel << > > (length, n, i, dev_input, dev_odata); + cudaMemcpy(dev_pass, dev_odata, sizeof(int) * length, cudaMemcpyDeviceToDevice); + /* + * Scan for one pass begins + */ + for (int i = 1; i < length; i = i * 2) + { + int totalSize = length / (2 * i); + dim3 BlocksPerGrid((totalSize + blockSize - 1) / blockSize); + UpSweepScanKernel << > > (totalSize, i, dev_idata, dev_odata); + cudaDeviceSynchronize(); + + std::swap(dev_idata, dev_odata); + } + std::swap(dev_idata, dev_odata); + InitDownSweepKernel << <1, 1 >> > (length, dev_idata, dev_odata); + + for (int i = length / 2; i > 0; i = i / 2) + { + int totalSize = length / (2 * i); + dim3 BlocksPerGrid((totalSize + blockSize - 1) / blockSize); + DownSweepScanKernel << > > (totalSize, i, dev_idata, dev_odata); + cudaDeviceSynchronize(); + std::swap(dev_idata, dev_odata); + } + /* + * scan for one pass ends + */ + //get the total number of zero + int* p = (int*)malloc(1 * sizeof(int)); + CountKernel << <1, 1 >> > (n, dev_odata, dev_pass, dev_nonZero); + cudaMemcpy(p, dev_nonZero, sizeof(int) * 1, cudaMemcpyDeviceToHost); + int zeros = p[0]; + + dim3 fullBlocksPerGrid1((n + blockSize - 1) / blockSize); + RadixTrueScanKernel << > > (n, zeros, dev_odata, dev_pass, dev_idata); + RadixScatterKernel<< > >(n, dev_idata, dev_odata, dev_input); + std::swap(dev_odata, dev_input); + } timer().endGpuTimer(); + //dev_input as the final result + cudaMemcpy(odata, dev_input, sizeof(int) * n, cudaMemcpyDeviceToHost); + endRadixSort(); } /** @@ -31,10 +331,45 @@ namespace StreamCompaction { * @returns The number of elements remaining after compaction. */ int compact(int n, int *odata, const int *idata) { + initCompact(n, idata, odata); + timer().startGpuTimer(); - // TODO + dim3 fullBlocksPerGrid((length + blockSize - 1) / blockSize); + EfficientMapKernel << > > (length, n, dev_odata); + for (int i = 1; i < length; i = i * 2) + { + int totalSize = length / (2 * i); + dim3 BlocksPerGrid((totalSize + blockSize - 1) / blockSize); + UpSweepScanKernel << > > (totalSize, i, dev_idata, dev_odata); + cudaDeviceSynchronize(); + + std::swap(dev_idata, dev_odata); + } + std::swap(dev_idata, dev_odata); + InitDownSweepKernel << <1, 1 >> > (length, dev_idata, dev_odata); + + for (int i = length / 2; i > 0; i = i / 2) + { + int totalSize = length / (2 * i); + dim3 BlocksPerGrid((totalSize + blockSize - 1) / blockSize); + DownSweepScanKernel << > > (totalSize, i, dev_idata, dev_odata); + cudaDeviceSynchronize(); + std::swap(dev_idata, dev_odata); + } + + std::swap(dev_odata, dev_indices); + StreamCompaction::Common::kernMapToBoolean << > > (length, dev_bools, dev_indices); + StreamCompaction::Common::kernScatter << > > (length, dev_odata, dev_input, dev_bools, dev_indices); timer().endGpuTimer(); - return -1; + + cudaMemcpy(odata, dev_odata, sizeof(int) * n, cudaMemcpyDeviceToHost); + int* p = (int*)malloc(1 * sizeof(int)); + CountKernel << <1, 1 >> > (n, dev_indices, dev_input, dev_nonZero); + cudaMemcpy(p, dev_nonZero, sizeof(int) * 1, cudaMemcpyDeviceToHost); + int ans = p[0]; + free(p); + endCompact(); + return ans; } } } diff --git a/stream_compaction/efficient.h b/stream_compaction/efficient.h index 803cb4f..b5948e5 100644 --- a/stream_compaction/efficient.h +++ b/stream_compaction/efficient.h @@ -8,6 +8,8 @@ namespace StreamCompaction { void scan(int n, int *odata, const int *idata); + void radixSort(int n, int* odata, const int* idata); + int compact(int n, int *odata, const int *idata); } } diff --git a/stream_compaction/naive.cu b/stream_compaction/naive.cu index 4308876..389646e 100644 --- a/stream_compaction/naive.cu +++ b/stream_compaction/naive.cu @@ -2,24 +2,107 @@ #include #include "common.h" #include "naive.h" +/*! Block size used for CUDA kernel launch. */ +#define blockSize 128 namespace StreamCompaction { + namespace Naive { using StreamCompaction::Common::PerformanceTimer; + + int* dev_idata; + int* dev_odata; + PerformanceTimer& timer() { static PerformanceTimer timer; return timer; } + + void initScan(int N, int* odata, const int* idata) + { + int shift = 0; + cudaMalloc((void**)&dev_idata, N * sizeof(int)); + cudaMemcpy(dev_idata, &(shift), sizeof(int), cudaMemcpyHostToDevice); + cudaMemcpy(&dev_idata[1], idata, sizeof(int) * (N - 1), cudaMemcpyHostToDevice); + checkCUDAError("cudaMalloc dev_idata failed!", __LINE__); + cudaMalloc((void**)&dev_odata, N * sizeof(int)); + cudaMemcpy(dev_odata, &(shift), sizeof(int), cudaMemcpyHostToDevice); + cudaMemcpy(&dev_odata[1], idata, sizeof(int) * (N - 1), cudaMemcpyHostToDevice); + checkCUDAError("cudaMalloc dev_odata failed!", __LINE__); + cudaDeviceSynchronize(); + } + + void endScan() + { + cudaFree(dev_idata); + cudaFree(dev_odata); + } + + __global__ void NaiveMapKernel(int n, int* idata, int* odata) + { + int index = (blockIdx.x * blockDim.x) + threadIdx.x; + if (index >= n) + { + return; + } + if (odata[index] != 0) + { + idata[index] = 1; + } + else + { + idata[index] = 0; + } + } + + __global__ void NaiveScanKernel(int n, int interval, int* odata, int* idata) + { + int index = (blockIdx.x * blockDim.x) + threadIdx.x; + if (index >= n) + { + return; + } + + if (index >= interval) + { + odata[index] = idata[index] + idata[index - interval]; + } + else + { + odata[index] = idata[index]; + } + } + // TODO: __global__ /** * Performs prefix-sum (aka scan) on idata, storing the result into odata. */ + void scan(int n, int *odata, const int *idata) { - timer().startGpuTimer(); + // TODO + initScan(n, odata, idata); + cudaDeviceSynchronize(); + timer().startGpuTimer(); + dim3 fullBlocksPerGrid((n + blockSize - 1) / blockSize); + int blocks = 1; + int round = ilog2ceil(n); + + cudaDeviceSynchronize(); + for (int i = 0; i < round; i++) + { + int interval = int(powf(2, i)); + NaiveScanKernel << > > (n, interval, dev_odata, dev_idata); + cudaDeviceSynchronize(); + std::swap(dev_idata, dev_odata); + } + std::swap(dev_idata, dev_odata); timer().endGpuTimer(); + cudaMemcpy(odata, dev_odata, sizeof(int) * n, cudaMemcpyDeviceToHost); + endScan(); } } + } diff --git a/stream_compaction/thrust.cu b/stream_compaction/thrust.cu index 1def45e..3f3bffc 100644 --- a/stream_compaction/thrust.cu +++ b/stream_compaction/thrust.cu @@ -9,6 +9,10 @@ namespace StreamCompaction { namespace Thrust { using StreamCompaction::Common::PerformanceTimer; + + int* dev_in; + int* dev_out; + PerformanceTimer& timer() { static PerformanceTimer timer; @@ -18,11 +22,17 @@ namespace StreamCompaction { * Performs prefix-sum (aka scan) on idata, storing the result into odata. */ void scan(int n, int *odata, const int *idata) { - timer().startGpuTimer(); + // TODO use `thrust::exclusive_scan` // example: for device_vectors dv_in and dv_out: // thrust::exclusive_scan(dv_in.begin(), dv_in.end(), dv_out.begin()); + + thrust::device_vector dev_in(idata, idata + n); + thrust::device_vector dev_out(odata, odata + n); + timer().startGpuTimer(); + thrust::exclusive_scan(dev_in.begin(), dev_in.end(), dev_out.begin()); timer().endGpuTimer(); + thrust::copy(dev_out.begin(), dev_out.end(), odata); } } }