diff --git a/README.md b/README.md index b71c458..87bd380 100644 --- a/README.md +++ b/README.md @@ -3,11 +3,89 @@ CUDA Stream Compaction **University of Pennsylvania, CIS 565: GPU Programming and Architecture, Project 2** -* (TODO) YOUR NAME HERE -* Tested on: (TODO) Windows 22, i7-2222 @ 2.22GHz 22GB, GTX 222 222MB (Moore 2222 Lab) +* Wenli Zhao +* Tested on: Windows 7, i7-6700 CPU @ 3.40GHz, NVIDIA Quadro K620 (Moore 100C Lab) -### (TODO: Your README) +### README +This project GPU stream compaction in CUDA. The implemented features include: +1. CPU scan and stream compaction. + * primarily used for performance comparison. +2. Naive GPU scan algorithm. +3. Work efficient GPU scan and stream compaction algorithm. +4. Calling Thrust's implementation -Include analysis, etc. (Remember, this is public, so don't put -anything here that you don't want to share with the world.) +Analysis +======== +In order to analyze the performance of stream compaction, I first found the highest multiple of 2 for which my program ran correctly and optimized for block size. I chose a block size of 256 which seemed to be optimized for my GPU implementation on 2^17 elements. I then collected and analyzed the runtimes for the scan algorithm. + +### Figure 1 +![](img/chart.png) + +### Figure 2 +#### Data corresponding to Figure 1 +![](img/image.png) + +Figure one shows the array size vs. the runtime of each implementation in ms. Unfortunately, the results were not quite what we wanted. My work efficient implementation is slower than my naive implementation, which is slower than my CPU implementation. This could be due to many factors. One is the amount of global memory access that I am performing in my work efficient. As the array size increases, the memory access becomes more and more costly. + +It is hard to accurately say, but the trend for work efficient is that its runtime is leveling off, whereas Naive and CPU have an upward trend. Potentially, the work efficient implementation will succeed for greater array sizes, but my implementation limits me to 2^17. + +The Thrust implementation seems relatively efficient, but has arbitrary spikes in performance time. I think this is a thrust-specific implementation. There is some behavior underlying thrust that makes the first invokation of my thrust scan slower. If I call the scan on the same array twice, the second time will run faster. Perhaps thrust caches the inputs and is quicker for later invocations. + +Although my GPU scan implementations are slower than the CPU implementation, the work-efficient compact is more efficient than cpu-compact-with-scan. + + +``` + +**************** +** SCAN TESTS ** +**************** + [ 28 17 26 2 41 12 6 34 18 12 12 33 23 ... 21 0 ] +==== cpu scan, power-of-two ==== + elapsed time: 0.012919ms (std::chrono Measured) + [ 0 28 45 71 73 114 126 132 166 184 196 208 241 ... 200656 200677 ] +==== cpu scan, non-power-of-two ==== + elapsed time: 0.012919ms (std::chrono Measured) + [ 0 28 45 71 73 114 126 132 166 184 196 208 241 ... 200635 200635 ] + passed +==== naive scan, power-of-two ==== + elapsed time: 0.180576ms (CUDA Measured) + passed +==== naive scan, non-power-of-two ==== + elapsed time: 0.171488ms (CUDA Measured) + passed +==== work-efficient scan, power-of-two ==== + elapsed time: 0.698912ms (CUDA Measured) + passed +==== work-efficient scan, non-power-of-two ==== + elapsed time: 1.96842ms (CUDA Measured) + passed +==== thrust scan, power-of-two ==== + elapsed time: 0.080352ms (CUDA Measured) + passed +==== thrust scan, non-power-of-two ==== + elapsed time: 0.030464ms (CUDA Measured) + passed + +***************************** +** STREAM COMPACTION TESTS ** +***************************** +==== cpu compact without scan, power-of-two ==== + elapsed time: 0.034251ms (std::chrono Measured) + [ 0 28 45 71 73 114 126 132 166 184 196 208 241 ... 151975 152019 ] + passed +==== cpu compact without scan, non-power-of-two ==== + elapsed time: 0.019829ms (std::chrono Measured) + [ 1 1 3 1 3 2 3 1 2 2 3 2 1 ... 3 3 ] + passed +==== cpu compact with scan ==== + elapsed time: 0.055282ms (std::chrono Measured) + [ 1 1 3 1 3 2 3 1 2 2 3 2 1 ... 3 2 ] + passed +==== work-efficient compact, power-of-two ==== + elapsed time: 0.047008ms (CUDA Measured) + passed +==== work-efficient compact, non-power-of-two ==== + elapsed time: 0.048704ms (CUDA Measured) + passed +Press any key to continue . . . diff --git a/img/Capture.PNG b/img/Capture.PNG new file mode 100644 index 0000000..f4f431d Binary files /dev/null and b/img/Capture.PNG differ diff --git a/img/chart.png b/img/chart.png new file mode 100644 index 0000000..e057c16 Binary files /dev/null and b/img/chart.png differ diff --git a/img/image.png b/img/image.png new file mode 100644 index 0000000..259e11f Binary files /dev/null and b/img/image.png differ diff --git a/src/main.cpp b/src/main.cpp index 7305641..c10fb21 100644 --- a/src/main.cpp +++ b/src/main.cpp @@ -13,8 +13,8 @@ #include #include "testing_helpers.hpp" -const int SIZE = 1 << 8; // feel free to change the size of array -const int NPOT = SIZE - 3; // Non-Power-Of-Two +const long SIZE = 1 << 8; // feel free to change the size of array +const long NPOT = SIZE - 3; // Non-Power-Of-Two int a[SIZE], b[SIZE], c[SIZE]; int main(int argc, char* argv[]) { @@ -38,6 +38,7 @@ int main(int argc, char* argv[]) { printElapsedTime(StreamCompaction::CPU::timer().getCpuElapsedTimeForPreviousOperation(), "(std::chrono Measured)"); printArray(SIZE, b, true); + zeroArray(SIZE, c); printDesc("cpu scan, non-power-of-two"); StreamCompaction::CPU::scan(NPOT, c, a); @@ -49,20 +50,26 @@ int main(int argc, char* argv[]) { printDesc("naive scan, power-of-two"); StreamCompaction::Naive::scan(SIZE, c, a); printElapsedTime(StreamCompaction::Naive::timer().getGpuElapsedTimeForPreviousOperation(), "(CUDA Measured)"); - //printArray(SIZE, c, true); + //printArray(SIZE, a, true); + //printArray(SIZE, b, true); + //printArray(SIZE, c, true); printCmpResult(SIZE, b, c); zeroArray(SIZE, c); printDesc("naive scan, non-power-of-two"); StreamCompaction::Naive::scan(NPOT, c, a); printElapsedTime(StreamCompaction::Naive::timer().getGpuElapsedTimeForPreviousOperation(), "(CUDA Measured)"); - //printArray(SIZE, c, true); + //printArray(SIZE, a, true); + //printArray(SIZE, b, true); + //printArray(SIZE, c, true); printCmpResult(NPOT, b, c); + zeroArray(SIZE, c); printDesc("work-efficient scan, power-of-two"); StreamCompaction::Efficient::scan(SIZE, c, a); printElapsedTime(StreamCompaction::Efficient::timer().getGpuElapsedTimeForPreviousOperation(), "(CUDA Measured)"); + //printArray(SIZE, b, true); //printArray(SIZE, c, true); printCmpResult(SIZE, b, c); @@ -70,9 +77,11 @@ int main(int argc, char* argv[]) { printDesc("work-efficient scan, non-power-of-two"); StreamCompaction::Efficient::scan(NPOT, c, a); printElapsedTime(StreamCompaction::Efficient::timer().getGpuElapsedTimeForPreviousOperation(), "(CUDA Measured)"); - //printArray(NPOT, c, true); + //printArray(SIZE, b, true); + //printArray(NPOT, c, true); printCmpResult(NPOT, b, c); + zeroArray(SIZE, c); printDesc("thrust scan, power-of-two"); StreamCompaction::Thrust::scan(SIZE, c, a); @@ -87,6 +96,7 @@ int main(int argc, char* argv[]) { //printArray(NPOT, c, true); printCmpResult(NPOT, b, c); + printf("\n"); printf("*****************************\n"); printf("** STREAM COMPACTION TESTS **\n"); @@ -96,7 +106,7 @@ int main(int argc, char* argv[]) { genArray(SIZE - 1, a, 4); // Leave a 0 at the end to test that edge case a[SIZE - 1] = 0; - printArray(SIZE, a, true); + //printArray(SIZE, a, true); int count, expectedCount, expectedNPOT; @@ -107,7 +117,7 @@ int main(int argc, char* argv[]) { count = StreamCompaction::CPU::compactWithoutScan(SIZE, b, a); printElapsedTime(StreamCompaction::CPU::timer().getCpuElapsedTimeForPreviousOperation(), "(std::chrono Measured)"); expectedCount = count; - printArray(count, b, true); + printArray(count, c, true); printCmpLenResult(count, expectedCount, b, b); zeroArray(SIZE, c); @@ -139,5 +149,13 @@ int main(int argc, char* argv[]) { //printArray(count, c, true); printCmpLenResult(count, expectedNPOT, b, c); + //zeroArray(6, c); + //int d[7] = { 0,1,2,0,2,0,1 }; + //int f[4] = { 1,2,2,1 }; + //printDesc("Work efficient compact, SMALL TEST CASE"); + //count = StreamCompaction::Efficient::compact(7, c, d); + //printArray(count, c, true); + //printCmpLenResult(count, 4, f, c); + system("pause"); // stop Win32 console from closing on exit } diff --git a/stream_compaction/common.cu b/stream_compaction/common.cu index 8fc0211..3d6e484 100644 --- a/stream_compaction/common.cu +++ b/stream_compaction/common.cu @@ -24,6 +24,16 @@ namespace StreamCompaction { */ __global__ void kernMapToBoolean(int n, int *bools, const int *idata) { // TODO + int index = threadIdx.x + (blockIdx.x * blockDim.x); + if (index >= n) { + return; + } + if (idata[index] != 0) { + bools[index] = 1; + } + else { + bools[index] = 0; + } } /** @@ -33,6 +43,13 @@ namespace StreamCompaction { __global__ void kernScatter(int n, int *odata, const int *idata, const int *bools, const int *indices) { // TODO + int index = threadIdx.x + (blockIdx.x * blockDim.x); + if (index >= n) { + return; + } + if (bools[index] != 0) { + odata[indices[index]] = idata[index]; + } } } diff --git a/stream_compaction/cpu.cu b/stream_compaction/cpu.cu index 05ce667..d592d43 100644 --- a/stream_compaction/cpu.cu +++ b/stream_compaction/cpu.cu @@ -1,15 +1,15 @@ #include #include "cpu.h" -#include "common.h" +#include "common.h" namespace StreamCompaction { namespace CPU { - using StreamCompaction::Common::PerformanceTimer; - PerformanceTimer& timer() - { - static PerformanceTimer timer; - return timer; + using StreamCompaction::Common::PerformanceTimer; + PerformanceTimer& timer() + { + static PerformanceTimer timer; + return timer; } /** @@ -18,11 +18,20 @@ namespace StreamCompaction { * (Optional) For better understanding before starting moving to GPU, you can simulate your GPU scan in this function first. */ void scan(int n, int *odata, const int *idata) { - timer().startCpuTimer(); - // TODO - timer().endCpuTimer(); + timer().startCpuTimer(); + //TODO + scanImplementation(n, odata, idata); + timer().endCpuTimer(); + } + void scanImplementation(int n, int *odata, const int *idata) { + odata[0] = 0; + for (int i = 1; i < n; ++i) { + odata[i] = idata[i - 1] + odata[i - 1]; + } + } + /** * CPU stream compaction without using the scan function. * @@ -30,9 +39,15 @@ namespace StreamCompaction { */ int compactWithoutScan(int n, int *odata, const int *idata) { timer().startCpuTimer(); - // TODO + int count = 0; + for (int i = 0; i < n; i++) { + if (idata[i] != 0) { + odata[count] = idata[i]; + count++; + } + } timer().endCpuTimer(); - return -1; + return count; } /** @@ -41,10 +56,29 @@ namespace StreamCompaction { * @returns the number of elements remaining after compaction. */ int compactWithScan(int n, int *odata, const int *idata) { - timer().startCpuTimer(); - // TODO + timer().startCpuTimer(); + int *temp = new int[n]; + int *temp2 = new int[n]; + for (int i = 0; i < n; i++) { + if (idata[i] != 0) { + temp[i] = 1; + } + else { + temp[i] = 0; + } + temp2[i] = 0; + } + scanImplementation(n, temp2, temp); + for (int i = 0; i <= n; i++) { + if (temp[i] == 1) { + odata[temp2[i]] = idata[i]; + } + } + int count = temp2[n - 1]; + delete[] temp; + delete[] temp2; timer().endCpuTimer(); - return -1; + return count; } } } diff --git a/stream_compaction/cpu.h b/stream_compaction/cpu.h index 236ce11..b4dc3d8 100644 --- a/stream_compaction/cpu.h +++ b/stream_compaction/cpu.h @@ -8,6 +8,8 @@ namespace StreamCompaction { void scan(int n, int *odata, const int *idata); + void scanImplementation(int n, int *odata, const int *idata); + int compactWithoutScan(int n, int *odata, const int *idata); int compactWithScan(int n, int *odata, const int *idata); diff --git a/stream_compaction/efficient.cu b/stream_compaction/efficient.cu index 36c5ef2..a5ec4f3 100644 --- a/stream_compaction/efficient.cu +++ b/stream_compaction/efficient.cu @@ -3,22 +3,84 @@ #include "common.h" #include "efficient.h" +#define blockSize 256 + namespace StreamCompaction { namespace Efficient { - using StreamCompaction::Common::PerformanceTimer; - PerformanceTimer& timer() - { - static PerformanceTimer timer; - return timer; + using StreamCompaction::Common::PerformanceTimer; + PerformanceTimer& timer() + { + static PerformanceTimer timer; + return timer; } + __global__ void kernUpSweep(int n, int d, int d1, int *idata) { + int index = threadIdx.x + (blockIdx.x * blockDim.x); + if (index > (n/d1)) { + return; + } + int k = d1 * index; + idata[k + d1 - 1] += idata[k + d - 1]; + } + + __global__ void kernDownSweep(int n, int d, int d1, int*idata) { + int index = threadIdx.x + (blockIdx.x * blockDim.x); + if (index > (n / d1)) { + return; + } + int k = d1*index; + int t = idata[k + d - 1]; + idata[k + d - 1] = idata[k + d1 - 1]; + idata[k + d1 - 1] += t; + } + /** * Performs prefix-sum (aka scan) on idata, storing the result into odata. */ void scan(int n, int *odata, const int *idata) { - timer().startGpuTimer(); // TODO - timer().endGpuTimer(); + + int *dev_iData; + + // smallest power of 2 >= n + int pow2 = pow(2, ilog2ceil(n)); + int levels = ilog2ceil(n); + cudaMalloc((void**)&dev_iData, (pow2 + 1) * sizeof(int)); + checkCUDAError("cudaMalloc dev_iData failed"); + + cudaMemcpy(dev_iData, idata, sizeof(int)*n, cudaMemcpyHostToDevice); + checkCUDAError("cudaMemcpy dev_iData failed"); + + timer().startGpuTimer(); + + for (int i = 0; i < levels; i++) { + int d = pow(2, i); + int d1 = pow(2, i + 1); + + int blocknum = ceil(pow2/ d1); + dim3 fullBlocks((blocknum + blockSize - 1) / blockSize); + + kernUpSweep << > > (n, d, d1, dev_iData); + cudaThreadSynchronize(); + } + + int a = 0; + cudaMemcpy(&dev_iData[pow2 - 1], &a, sizeof(int), cudaMemcpyHostToDevice); + for (int i = levels - 1; i >= 0; i--) { + int d = pow(2, i); + int d1 = pow(2, i + 1); + + int blocknum = ceil(pow2 / d1); + dim3 fullBlocks((blocknum + blockSize - 1) / blockSize); + + kernDownSweep << > > (n, d, d1, dev_iData); + cudaThreadSynchronize(); + } + + timer().endGpuTimer(); + cudaMemcpy(odata, dev_iData, sizeof(int)*(n), cudaMemcpyDeviceToHost); + cudaFree(dev_iData); + } /** @@ -31,10 +93,71 @@ namespace StreamCompaction { * @returns The number of elements remaining after compaction. */ int compact(int n, int *odata, const int *idata) { - timer().startGpuTimer(); // TODO - timer().endGpuTimer(); - return -1; + int *dev_bools; + int *dev_indices; + int *dev_odata; + int *dev_idata; + + cudaMalloc((void**)&dev_bools, n * sizeof(int)); + checkCUDAError("cudaMalloc dev_bools failed"); + + cudaMalloc((void**)&dev_indices, n * sizeof(int)); + checkCUDAError("cudaMalloc dev_indices failed"); + + cudaMalloc((void**)&dev_odata, n * sizeof(int)); + checkCUDAError("cudaMalloc dev_odata failed"); + + cudaMalloc((void**)&dev_idata, n * sizeof(int)); + checkCUDAError("cudaMalloc dev_idata failed"); + + cudaMemcpy(dev_idata, idata, sizeof(int)*n, cudaMemcpyHostToDevice); + checkCUDAError("cudaMemcpy dev_idata failed"); + + + timer().startGpuTimer(); + + dim3 otherName((n + blockSize - 1) / blockSize); + StreamCompaction::Common::kernMapToBoolean << > > (n, dev_bools, dev_idata); + + int *indices = new int[n]; + int *bools = new int[n]; + + cudaMemcpy(bools, dev_bools, sizeof(int)*n, cudaMemcpyDeviceToHost); + checkCUDAError("cudaMemcpy dev_bools failed"); + + timer().endGpuTimer(); + scan(n, indices, bools); + timer().startGpuTimer(); + + cudaMemcpy(dev_indices, indices, sizeof(int)*n, cudaMemcpyHostToDevice); + checkCUDAError("cudaMemcpy dev_indices failed"); + + + StreamCompaction::Common::kernScatter << > > (n, dev_odata, dev_idata, dev_bools, dev_indices); + timer().endGpuTimer(); + + int count; + cudaMemcpy(&count, &dev_indices[n-1], sizeof(int), cudaMemcpyDeviceToHost); + checkCUDAError("cudaMemcpy dev_indices failed"); + + int lastBool; + cudaMemcpy(&lastBool, &dev_bools[n - 1], sizeof(int), cudaMemcpyDeviceToHost); + checkCUDAError("cudaMemcpy dev_bools failed"); + + + cudaMemcpy(odata, dev_odata, n * sizeof(int), cudaMemcpyDeviceToHost); + checkCUDAError("cudaMemcpy dev_bools failed"); + + cudaFree(dev_bools); + cudaFree(dev_indices); + cudaFree(dev_idata); + cudaFree(dev_odata); + delete[] bools; + delete[] indices; + return count + lastBool; } + + } } diff --git a/stream_compaction/naive.cu b/stream_compaction/naive.cu index 9218f8e..bb07cc2 100644 --- a/stream_compaction/naive.cu +++ b/stream_compaction/naive.cu @@ -3,23 +3,76 @@ #include "common.h" #include "naive.h" +#define blockSize 256 + namespace StreamCompaction { namespace Naive { - using StreamCompaction::Common::PerformanceTimer; - PerformanceTimer& timer() - { - static PerformanceTimer timer; - return timer; + using StreamCompaction::Common::PerformanceTimer; + PerformanceTimer& timer() + { + static PerformanceTimer timer; + return timer; } // TODO: __global__ + __global__ void kernScan(int n, int d, int *odata, int *idata) { + int k = threadIdx.x + (blockIdx.x * blockDim.x); + if (k >= n) { + return; + } + if (k >= d) { + int offset = k - d; + odata[k] = idata[k] + idata[offset]; + } + else { + odata[k] = idata[k]; + } + } + /** * Performs prefix-sum (aka scan) on idata, storing the result into odata. */ void scan(int n, int *odata, const int *idata) { - timer().startGpuTimer(); - // TODO - timer().endGpuTimer(); + + int *dev_idata; + int *dev_odata; + + // smallest power of 2 >= n + int pow2 = pow(2,ilog2ceil(n)); + cudaMalloc((void**)&dev_idata, (pow2 + 1) * sizeof(int)); + checkCUDAError("cudaMalloc error dev_idata"); + + cudaMalloc((void**)&dev_odata, (pow2 + 1) * sizeof(int)); + checkCUDAError("cudaMalloc error dev_odata"); + + // TIMER STARTS HERE + timer().startGpuTimer(); + + // shift the input array to the right and pad with a zero. + int a = 0; + cudaMemcpy(&dev_idata[0], &a, sizeof(int), cudaMemcpyHostToDevice); + checkCUDAError("cudaMemcpy error 0 dev_idata naive"); + + cudaMemcpy(&dev_idata[1], idata, sizeof(int)*n, cudaMemcpyHostToDevice); + checkCUDAError("cudaMemcpy error dev_idata naive"); + + int levels = ilog2ceil(n); + dim3 fullBlocks((pow2 + blockSize - 1) / blockSize); + + for (int i = 0; i < levels; i++) { + int d = pow(2, i); + kernScan << > > (n, d, dev_odata, dev_idata); + int *temp = dev_odata; + dev_odata = dev_idata; + dev_idata = temp; + } + + // TIMER ENDS HERE + timer().endGpuTimer(); + + cudaMemcpy(odata, dev_idata, sizeof(int)*(n), cudaMemcpyDeviceToHost); + cudaFree(dev_idata); + cudaFree(dev_odata); } } } diff --git a/stream_compaction/thrust.cu b/stream_compaction/thrust.cu index 36b732d..1681d1c 100644 --- a/stream_compaction/thrust.cu +++ b/stream_compaction/thrust.cu @@ -8,21 +8,30 @@ namespace StreamCompaction { namespace Thrust { - using StreamCompaction::Common::PerformanceTimer; - PerformanceTimer& timer() - { - static PerformanceTimer timer; - return timer; + using StreamCompaction::Common::PerformanceTimer; + PerformanceTimer& timer() + { + static PerformanceTimer timer; + return timer; } /** * Performs prefix-sum (aka scan) on idata, storing the result into odata. */ void scan(int n, int *odata, const int *idata) { - timer().startGpuTimer(); - // TODO use `thrust::exclusive_scan` - // example: for device_vectors dv_in and dv_out: - // thrust::exclusive_scan(dv_in.begin(), dv_in.end(), dv_out.begin()); + + thrust::device_vector dev_idata(n); + thrust::device_vector dev_odata(n); + + // Copy host to device + thrust::copy(idata, idata + n,dev_idata.begin()); + + timer().startGpuTimer(); + // Scan + thrust::exclusive_scan(dev_idata.begin(), dev_idata.end(), dev_odata.begin()); timer().endGpuTimer(); + + thrust::copy(dev_odata.begin(), dev_odata.end(), odata); + } } }