diff --git a/README.md b/README.md index a82ea0f..d7dd710 100644 --- a/README.md +++ b/README.md @@ -1,213 +1,178 @@ CUDA Stream Compaction -====================== +================= **University of Pennsylvania, CIS 565: GPU Programming and Architecture, Project 2** -* (TODO) YOUR NAME HERE -* Tested on: (TODO) Windows 22, i7-2222 @ 2.22GHz 22GB, GTX 222 222MB (Moore 2222 Lab) +* Shuai Shao (Shrek) +* Tested on: Windows 7, i5-3210M @ 2.50GHz 4.00GB, GeForce GT 640M LE (Personal Laptop) + +Intro +--------------------- +This project implements parallel reduction, scan, and sort algorithm, which are building blocks for many algorithms, in cpu approach and gpu approach. The test program is able to generate random array of integers and test the correctness of these implementation, and make a comparison in terms of execution time. cpu time is currently measured by `clock_t`, while the gpu time is recorded via `cudaEvent` + +| | +| ------------- | ------------- | ----------------| +| cpu scan | +| naive scan| +| work-efficient scan| +| thrust scan| +| ------| ----------------|-------| +|cpu compact without scan| +|cpu compact with scan| +|work-efficient compact| + + +I also implemented a simple version of Radix. Due to time limitation, there's no shared memory usage. So no split and merge steps. Only global memory is used. +| | +| ------------- | ------------- | ----------------| +|cpu merge sort| +|radix sort| + + +Rough block size optimization +----------------------------------- + +Testing on a 2^16 array for block size `{64,128,192,256}`. When `blockSize=192` turns out every GPU function cost less time than other blockSizes. Here is some of the comparison (ms): +|-|naive scan| work-efficient scan|thrust scan|work-efficient compact +| ------------- | ------------- | ----------------| +|64|3.12|5.76|0.00128|6.32 +|128|2.38|3.94|0.00131|5.54 +|192|2.11|3.89|0.00128|4.97 +|256|2.90|4.09|0.00131|6.72 + + +Execution Time Analysis +--------------------------------- + +For this part, I have `blockSize = 192` constant. I test different cpu and gpu approaches on different data size. I have included the time for GPU global memory operation such as `cudaMalloc` and `cudaMemcpy`. + ++ Scan: +![scan_table](images/scan.png) + ++ Compaction: +![scan_table](images/compact.png) + +(array size = 2^n (x axis)) +(execution time = y axis) + +The unexpected thing is that my GPU implementation cost much more time than the CPU serial approach. On the other hand, the thrust toolkit function is perfect. +One thing to notice is that all the GPU parallel algorithms here use shared memory instead of global memory I used here. Shared memory accessing speed is >1TB/s while global memory accessing speed is around 150GB/s. In the case of my machine, the 48KB shared memory space per block can store 12k int array at maximum. Turns out the memory accessing speed is the bottle neck for my implementation. On the time line we can also spot this. +Another thing is that when using global memory, as the data size boom, some blocks can not be parallel any more. + +Besides, it is also unexpected that work-efficient scan runs slower than the naive one. My implementation has tried the best to reduce unnecessary memcpy and malloc, i.e. I use two arrays taking turns to be input data and output data by using two points `cur_in` and `cur_out` for the naive scan. I use only one array for work-efficient scan since there's no race on the same level. So basically the problem here is that although work-efficient scan avoid a lot of unnecessary sum operations, the work-efficient scan uses more memory access than the naive approach. The max memory access times per thread for naive scan is 3, while for work-efficient scan, the number is 3 for up-sweeping, and 5 for down-sweeping. Without cache, this is really time-consuming. + + +But when I check the timeline for thrust, I cannot find function calls but only blank. I fail to find the secret of thrust at present. + + + + +Output Sample +--------------------------- +``` +ArraySize:2^(16), 65536 +BlockSize:192 -### (TODO: Your README) +**************** +** SCAN TESTS ** +**************** + [ 38 19 38 37 5 47 15 35 0 12 3 0 42 ... 35 0 ] +==== cpu scan, power-of-two ==== +time:1.000000 + [ 0 38 57 95 132 137 184 199 234 234 246 249 249 ... 1604374 1604409 ] +==== cpu scan, non-power-of-two ==== +time:0.000000 + [ 0 38 57 95 132 137 184 199 234 234 246 249 249 ... 1604305 1604316 ] + passed +==== naive scan, power-of-two ==== +time:2.123680 + passed +==== naive scan, non-power-of-two ==== +time:2.108992 + passed +==== work-efficient scan, power-of-two ==== +time:3.803328 + passed +==== work-efficient scan, non-power-of-two ==== +time:3.889184 + passed +==== thrust scan, power-of-two ==== +time:0.001312 + passed +==== thrust scan, non-power-of-two ==== +time:0.001280 + passed -Include analysis, etc. (Remember, this is public, so don't put -anything here that you don't want to share with the world.) +***************************** +** STREAM COMPACTION TESTS ** +***************************** + [ 2 3 2 1 3 1 1 1 2 0 1 0 2 ... 1 0 ] +==== cpu compact without scan, power-of-two ==== +time:0.000000 + [ 2 3 2 1 3 1 1 1 2 1 2 1 1 ... 1 1 ] + passed +==== cpu compact without scan, non-power-of-two ==== +time:0.000000 + [ 2 3 2 1 3 1 1 1 2 1 2 1 1 ... 3 1 ] + passed +==== cpu compact with scan ==== +time:1.000000 + [ 2 3 2 1 3 1 1 1 2 1 2 1 1 ... 1 1 ] + passed +==== work-efficient compact, power-of-two ==== +time:4.982400 + [ 2 3 2 1 3 1 1 1 2 1 2 1 1 ... 1 1 ] + passed +==== work-efficient compact, non-power-of-two ==== +time:4.974688 + passed -Instructions (delete me) -======================== +***************************** +** SIMPLE RADIX SORT TESTS ** +***************************** + [ 38 99 29 24 92 113 110 27 36 5 11 33 126 ... 99 0 ] +==== cpu sort, power-of-two ==== +time:20.000000 + [ 0 0 0 0 0 0 0 0 0 0 0 0 0 ... 126 126 ] +==== radix sort, power-of-two ==== +time:30.557344 + [ 0 0 0 0 0 0 0 0 0 0 0 0 0 ... 126 126 ] + passed -This is due Sunday, September 13 at midnight. +``` -**Summary:** In this project, you'll implement GPU stream compaction in CUDA, -from scratch. This algorithm is widely used, and will be important for -accelerating your path tracer project. -Your stream compaction implementations in this project will simply remove `0`s -from an array of `int`s. In the path tracer, you will remove terminated paths -from an array of rays. -In addition to being useful for your path tracer, this project is meant to -reorient your algorithmic thinking to the way of the GPU. On GPUs, many -algorithms can benefit from massive parallelism and, in particular, data -parallelism: executing the same code many times simultaneously with different -data. -You'll implement a few different versions of the *Scan* (*Prefix Sum*) -algorithm. First, you'll implement a CPU version of the algorithm to reinforce -your understanding. Then, you'll write a few GPU implementations: "naive" and -"work-efficient." Finally, you'll use some of these to implement GPU stream -compaction. -**Algorithm overview & details:** There are two primary references for details -on the implementation of scan and stream compaction. -* The [slides on Parallel Algorithms](https://github.com/CIS565-Fall-2015/cis565-fall-2015.github.io/raw/master/lectures/2-Parallel-Algorithms.pptx) - for Scan, Stream Compaction, and Work-Efficient Parallel Scan. -* GPU Gems 3, Chapter 39 - [Parallel Prefix Sum (Scan) with CUDA](http://http.developer.nvidia.com/GPUGems3/gpugems3_ch39.html). -Your GPU stream compaction implementation will live inside of the -`stream_compaction` subproject. This way, you will be able to easily copy it -over for use in your GPU path tracer. +Extra: Radix Sort +-------------------------- +To enable Radix Sort, you need to uncomment this macro define +>//#define RADIX_SORT_TEST -## Part 0: The Usual +Due to time limitation, there's no shared memory usage. So no split and bitonic merge steps. Only global memory is used. I used a CPU Merge sort to make a comparison and do correctness checking. The range of the random number is linear to the size of the array. -This project (and all other CUDA projects in this course) requires an NVIDIA -graphics card with CUDA capability. Any card with Compute Capability 2.0 -(`sm_20`) or greater will work. Check your GPU on this -[compatibility table](https://developer.nvidia.com/cuda-gpus). -If you do not have a personal machine with these specs, you may use those -computers in the Moore 100B/C which have supported GPUs. +(time ms) +|n | cpu merge sort | gpu simple radix | +| ------------- | ------------- | ------- | +|15|11| 54.683 +|16 |21 |71.0747 +|17 |56 |95.9559 +|18 |102 |166.257 +|19 |1200 |281.074 +|20 |3793 |532.041 +|21 |10227 |1018.44 + + +![scan_table](images/radix.png) + +Turns out even with global memory access and take in account malloc and memcpy, the gpu approach still shows its power after n >=18. With split and shared memory, radix sort must be able to make an impact. -**HOWEVER**: If you need to use the lab computer for your development, you will -not presently be able to do GPU performance profiling. This will be very -important for debugging performance bottlenecks in your program. -### Useful existing code -* `stream_compaction/common.h` - * `checkCUDAError` macro: checks for CUDA errors and exits if there were any. - * `ilog2ceil(x)`: computes the ceiling of log2(x), as an integer. -* `main.cpp` - * Some testing code for your implementations. -## Part 1: CPU Scan & Stream Compaction -This stream compaction method will remove `0`s from an array of `int`s. - -In `stream_compaction/cpu.cu`, implement: - -* `StreamCompaction::CPU::scan`: compute an exclusive prefix sum. -* `StreamCompaction::CPU::compactWithoutScan`: stream compaction without using - the `scan` function. -* `StreamCompaction::CPU::compactWithScan`: stream compaction using the `scan` - function. Map the input array to an array of 0s and 1s, scan it, and use - scatter to produce the output. You will need a **CPU** scatter implementation - for this (see slides or GPU Gems chapter for an explanation). - -These implementations should only be a few lines long. - - -## Part 2: Naive GPU Scan Algorithm - -In `stream_compaction/naive.cu`, implement `StreamCompaction::Naive::scan` - -This uses the "Naive" algorithm from GPU Gems 3, Section 39.2.1. We haven't yet -taught shared memory, and you **shouldn't use it yet**. Example 39-1 uses -shared memory, but is limited to operating on very small arrays! Instead, write -this using global memory only. As a result of this, you will have to do -`ilog2ceil(n)` separate kernel invocations. - -Beware of errors in Example 39-1 in the book; both the pseudocode and the CUDA -code in the online version of Chapter 39 are known to have a few small errors -(in superscripting, missing braces, bad indentation, etc.) - -Since the parallel scan algorithm operates on a binary tree structure, it works -best with arrays with power-of-two length. Make sure your implementation works -on non-power-of-two sized arrays (see `ilog2ceil`). This requires extra memory -- your intermediate array sizes will need to be rounded to the next power of -two. - - -## Part 3: Work-Efficient GPU Scan & Stream Compaction - -### 3.1. Scan - -In `stream_compaction/efficient.cu`, implement -`StreamCompaction::Efficient::scan` - -All of the text in Part 2 applies. - -* This uses the "Work-Efficient" algorithm from GPU Gems 3, Section 39.2.2. -* Beware of errors in Example 39-2. -* Test non-power-of-two sized arrays. - -### 3.2. Stream Compaction - -This stream compaction method will remove `0`s from an array of `int`s. - -In `stream_compaction/efficient.cu`, implement -`StreamCompaction::Efficient::compact` - -For compaction, you will also need to implement the scatter algorithm presented -in the slides and the GPU Gems chapter. - -In `stream_compaction/common.cu`, implement these for use in `compact`: - -* `StreamCompaction::Common::kernMapToBoolean` -* `StreamCompaction::Common::kernScatter` - - -## Part 4: Using Thrust's Implementation - -In `stream_compaction/thrust.cu`, implement: - -* `StreamCompaction::Thrust::scan` - -This should be a very short function which wraps a call to the Thrust library -function `thrust::exclusive_scan(first, last, result)`. - -To measure timing, be sure to exclude memory operations by passing -`exclusive_scan` a `thrust::device_vector` (which is already allocated on the -GPU). You can create a `thrust::device_vector` by creating a -`thrust::host_vector` from the given pointer, then casting it. - - -## Part 5: Radix Sort (Extra Credit) (+10) - -Add an additional module to the `stream_compaction` subproject. Implement radix -sort using one of your scan implementations. Add tests to check its correctness. - - -## Write-up - -1. Update all of the TODOs at the top of this README. -2. Add a description of this project including a list of its features. -3. Add your performance analysis (see below). - -All extra credit features must be documented in your README, explaining its -value (with performance comparison, if applicable!) and showing an example how -it works. For radix sort, show how it is called and an example of its output. - -Always profile with Release mode builds and run without debugging. - -### Questions - -* Roughly optimize the block sizes of each of your implementations for minimal - run time on your GPU. - * (You shouldn't compare unoptimized implementations to each other!) - -* Compare all of these GPU Scan implementations (Naive, Work-Efficient, and - Thrust) to the serial CPU version of Scan. Plot a graph of the comparison - (with array size on the independent axis). - * You should use CUDA events for timing. Be sure **not** to include any - explicit memory operations in your performance measurements, for - comparability. - * To guess at what might be happening inside the Thrust implementation, take - a look at the Nsight timeline for its execution. - -* Write a brief explanation of the phenomena you see here. - * Can you find the performance bottlenecks? Is it memory I/O? Computation? Is - it different for each implementation? - -* Paste the output of the test program into a triple-backtick block in your - README. - * If you add your own tests (e.g. for radix sort or to test additional corner - cases), be sure to mention it explicitly. - -These questions should help guide you in performance analysis on future -assignments, as well. - -## Submit - -If you have modified any of the `CMakeLists.txt` files at all (aside from the -list of `SOURCE_FILES`), you must test that your project can build in Moore -100B/C. Beware of any build issues discussed on the Google Group. - -1. Open a GitHub pull request so that we can see that you have finished. - The title should be "Submission: YOUR NAME". -2. Send an email to the TA (gmail: kainino1+cis565@) with: - * **Subject**: in the form of `[CIS565] Project 2: PENNKEY` - * Direct link to your pull request on GitHub - * In the form of a grade (0-100+) with comments, evaluate your own - performance on the project. - * Feedback on the project itself, if any. diff --git a/images/compact.png b/images/compact.png new file mode 100644 index 0000000..54e12eb Binary files /dev/null and b/images/compact.png differ diff --git a/images/radix.png b/images/radix.png new file mode 100644 index 0000000..ff45d05 Binary files /dev/null and b/images/radix.png differ diff --git a/images/scan.png b/images/scan.png new file mode 100644 index 0000000..bef60b2 Binary files /dev/null and b/images/scan.png differ diff --git a/src/main.cpp b/src/main.cpp index 7308451..15370f3 100644 --- a/src/main.cpp +++ b/src/main.cpp @@ -5,19 +5,85 @@ * @date 2015 * @copyright University of Pennsylvania */ +#include +#include +#include + +#include +#include + +#include #include +#include #include #include #include #include #include "testing_helpers.hpp" + +cudaEvent_t beginEvent; +cudaEvent_t endEvent; + +std::ofstream of; + +void cudaRecordEndAndPrint() +{ + cudaEventRecord(endEvent,0); + cudaEventSynchronize( endEvent ); + float ms; + cudaEventElapsedTime(&ms,beginEvent,endEvent); + printf("time:%f\n",ms); + + of << "," <(end - start).count()); + //fclose(fp); + of<<'\n'; + of.close(); + + return 0; } diff --git a/stream_compaction/common.cu b/stream_compaction/common.cu index fe872d4..a8f9e94 100644 --- a/stream_compaction/common.cu +++ b/stream_compaction/common.cu @@ -1,3 +1,5 @@ +#include +#include #include "common.h" void checkCUDAErrorFn(const char *msg, const char *file, int line) { @@ -18,22 +20,62 @@ void checkCUDAErrorFn(const char *msg, const char *file, int line) { namespace StreamCompaction { namespace Common { -/** - * Maps an array to an array of 0s and 1s for stream compaction. Elements - * which map to 0 will be removed, and elements which map to 1 will be kept. - */ -__global__ void kernMapToBoolean(int n, int *bools, const int *idata) { - // TODO -} + __global__ void kernZeroArray(int n, int * data) + { + int k = threadIdx.x + blockDim.x * blockIdx.x; + if(k < n) + { + data[k] = 0; + } + } -/** - * Performs scatter on an array. That is, for each element in idata, - * if bools[idx] == 1, it copies idata[idx] to odata[indices[idx]]. - */ -__global__ void kernScatter(int n, int *odata, - const int *idata, const int *bools, const int *indices) { - // TODO -} + + + __global__ void kernInclusive2Exclusive(int n, int * exclusive, const int * inclusive) + { + int k = threadIdx.x + blockDim.x * blockIdx.x; + if( k < n) + { + if(k == 0) + { + exclusive[k] = IDENTITY; + } + else + { + exclusive[k] = inclusive[k-1]; + } + } + } + + + + /** + * Maps an array to an array of 0s and 1s for stream compaction. Elements + * which map to 0 will be removed, and elements which map to 1 will be kept. + */ + __global__ void kernMapToBoolean(int n, int *bools, const int *idata) { + int k = threadIdx.x + blockDim.x * blockIdx.x; + if( k < n ) + { + bools[k] = idata[k] != 0 ? 1 : 0; + } + } + + /** + * Performs scatter on an array. That is, for each element in idata, + * if bools[idx] == 1, it copies idata[idx] to odata[indices[idx]]. + */ + __global__ void kernScatter(int n, int *odata, + const int *idata, const int *bools, const int *indices) { + int k = threadIdx.x + blockDim.x * blockIdx.x; + if( k < n ) + { + if(bools[k] == 1) + { + odata[ indices[k] ] = idata[k]; + } + } + } } } diff --git a/stream_compaction/common.h b/stream_compaction/common.h index 4f52663..bd7afe1 100644 --- a/stream_compaction/common.h +++ b/stream_compaction/common.h @@ -1,5 +1,8 @@ #pragma once +#include +#include + #include #include #include @@ -7,6 +10,13 @@ #define FILENAME (strrchr(__FILE__, '/') ? strrchr(__FILE__, '/') + 1 : __FILE__) #define checkCUDAError(msg) checkCUDAErrorFn(msg, FILENAME, __LINE__) +#define IDENTITY (0) + + + + +const int blockSize = 192; + /** * Check for CUDA errors; print and exit if there was a problem. */ @@ -27,6 +37,10 @@ inline int ilog2ceil(int x) { namespace StreamCompaction { namespace Common { + __global__ void kernZeroArray(int n, int * data); + + __global__ void kernInclusive2Exclusive(int n, int * exclusive, const int * inclusive); + __global__ void kernMapToBoolean(int n, int *bools, const int *idata); __global__ void kernScatter(int n, int *odata, diff --git a/stream_compaction/cpu.cu b/stream_compaction/cpu.cu index e600c29..9cbaebf 100644 --- a/stream_compaction/cpu.cu +++ b/stream_compaction/cpu.cu @@ -8,8 +8,14 @@ namespace CPU { * CPU scan (prefix sum). */ void scan(int n, int *odata, const int *idata) { - // TODO - printf("TODO\n"); + if(n > 0) + { + odata[0] = 0; + for(int i = 1 ; i < n; i++) + { + odata[i] = idata[i-1] + odata[i-1]; + } + } } /** @@ -18,8 +24,16 @@ void scan(int n, int *odata, const int *idata) { * @returns the number of elements remaining after compaction. */ int compactWithoutScan(int n, int *odata, const int *idata) { - // TODO - return -1; + int r = 0; + for(int i = 0; i < n; i++) + { + if(idata[i] != 0) + { + odata[r] = idata[i]; + r++; + } + } + return r; } /** @@ -28,9 +42,108 @@ int compactWithoutScan(int n, int *odata, const int *idata) { * @returns the number of elements remaining after compaction. */ int compactWithScan(int n, int *odata, const int *idata) { - // TODO - return -1; + int* mapped_ary = new int [n]; + int* scan_ary = new int [n]; + + //map input to 0s and 1s + for(int i = 0; i < n; i++) + { + mapped_ary[i] = (idata[i]!=0) ? 1 : 0; + } + + scan(n,scan_ary,mapped_ary); + + //scatter + for(int i = 0; i < n; i++) + { + if(mapped_ary[i] != 0) + { + odata[ scan_ary[i] ] = idata[i]; + } + } + + int r = scan_ary[n-1] + mapped_ary[n-1]; + delete[] mapped_ary; + delete[] scan_ary; + return r; +} + + + +/** +* CPU simple merge sort +*/ +void merge(int left, int right, int mid,int * data) +{ + int i = left; + int j = mid+1; + int k = left; + + int * odata = new int[right+1]; + + while(i <= mid && j <= right) + { + if(data[i] <= data[j]) + { + odata[k] = data[i]; + i++; + k++; + } + else + { + odata[k] = data[j]; + j++; + k++; + } + } + + while( i <= mid ) + { + odata[k] = data[i]; + i++; + k++; + } + + while( j <= right ) + { + odata[k] = data[j]; + j++; + k++; + } + + for(int i = left; i<=right; i++) + { + data[i] = odata[i]; + } + + delete[] odata; +} + + +void mergeSort(int left, int right, int *odata) +{ + if(left < right) + { + int mid = (left+right)/2; + mergeSort(left,mid,odata); + mergeSort(mid+1,right,odata); + merge(left,right,mid,odata); + } +} + +void mergeLauncher(int left, int right, int *odata, const int *idata) +{ + for(int i=left; i<=right; i++) + { + odata[i] = idata[i]; + } + mergeSort(left,right,odata); } + + + + + } } diff --git a/stream_compaction/cpu.h b/stream_compaction/cpu.h index 6348bf3..c7c1d10 100644 --- a/stream_compaction/cpu.h +++ b/stream_compaction/cpu.h @@ -7,5 +7,10 @@ namespace CPU { int compactWithoutScan(int n, int *odata, const int *idata); int compactWithScan(int n, int *odata, const int *idata); + + //void merge(int left, int right, int mid,int * odata,const int * idata); + + //void mergeSort(int left, int right, int *odata); + void mergeLauncher(int left, int right, int *odata, const int *idata); } } diff --git a/stream_compaction/efficient.cu b/stream_compaction/efficient.cu index b2f739b..b8721ed 100644 --- a/stream_compaction/efficient.cu +++ b/stream_compaction/efficient.cu @@ -4,31 +4,333 @@ #include "efficient.h" namespace StreamCompaction { -namespace Efficient { + namespace Efficient { + //const int blockSize = 128; -// TODO: __global__ + -/** - * Performs prefix-sum (aka scan) on idata, storing the result into odata. - */ -void scan(int n, int *odata, const int *idata) { - // TODO - printf("TODO\n"); -} + __global__ void kernUpSweep(int size, int step, int * data) + { + //step = 2^(d+1) + int k = threadIdx.x + blockDim.x * blockIdx.x; + + if(k < size) + { + if ( k % step == 0 ) + { + data[k + step - 1] += data[k + (step>>1) - 1]; + } + } + + } -/** - * Performs stream compaction on idata, storing the result into odata. - * All zeroes are discarded. - * - * @param n The number of elements in idata. - * @param odata The array into which to store elements. - * @param idata The array of elements to compact. - * @returns The number of elements remaining after compaction. - */ -int compact(int n, int *odata, const int *idata) { - // TODO - return -1; -} + __global__ void kernDownSweep(int size,int step, int * data) + { + //step = 2^(d+1) + int k = threadIdx.x + blockDim.x * blockIdx.x; -} + if(k < size) + { + if ( k % step == 0 ) + { + int left_child = data[k + (step>>1) - 1]; + data[k + (step>>1) - 1] = data[k + step - 1]; + data[k + step - 1] += left_child; + } + } + } + + + __global__ void kernSetRootZero(int rootId, int * data) + { + int k = threadIdx.x + blockDim.x * blockIdx.x; + if(k == rootId) + { + data[k] = 0; + } + } + + /** + * Performs prefix-sum (aka scan) on idata, storing the result into odata. + */ + void scan(int n, int *odata, const int *idata,bool is_dev_data) { + //if using device data directly + + + int * dev_data; + + int ceil_log2n = ilog2ceil(n); + int size = 1 << ceil_log2n; + + dim3 fullBlocksPerGrid((size + blockSize - 1) / blockSize); + + + cudaMalloc((void**)&dev_data, size * sizeof(int)); + checkCUDAError("cudaMalloc dev_data failed"); + Common::kernZeroArray<<< fullBlocksPerGrid, blockSize>>>(size, dev_data); + if(!is_dev_data) + { + //host data + cudaMemcpy(dev_data,idata, n * sizeof(int),cudaMemcpyHostToDevice); + checkCUDAError("cudaMemcpy from data to dev_data failed"); + } + else + { + cudaMemcpy(dev_data,idata, n * sizeof(int),cudaMemcpyDeviceToDevice); + checkCUDAError("cudaMemcpy from data to dev_data failed"); + } + cudaDeviceSynchronize(); + + //UpSweep + for(int d = 0 ; d < ceil_log2n - 1 ; d++) + { + kernUpSweep<<< fullBlocksPerGrid, blockSize>>> (size, 1<<(d+1) , dev_data); + cudaDeviceSynchronize(); + } + + kernSetRootZero<<< fullBlocksPerGrid, blockSize>>> ( size - 1 , dev_data); + cudaDeviceSynchronize(); + + for(int d = ceil_log2n - 1 ; d >= 0 ; d--) + { + kernDownSweep<<< fullBlocksPerGrid, blockSize>>> (size, 1<<(d+1) , dev_data); + cudaDeviceSynchronize(); + } + + + if(!is_dev_data) + { + cudaMemcpy(odata,dev_data,n * sizeof(int),cudaMemcpyDeviceToHost); + checkCUDAError("cudaMemcpy from dev_data to odata failed"); + } + else + { + cudaMemcpy(odata,dev_data,n * sizeof(int),cudaMemcpyDeviceToDevice); + checkCUDAError("cudaMemcpy from dev_data to odata failed"); + } + cudaFree(dev_data); + } + + /** + * Performs stream compaction on idata, storing the result into odata. + * All zeroes are discarded. + * + * @param n The number of elements in idata. + * @param odata The array into which to store elements. + * @param idata The array of elements to compact. + * @returns The number of elements remaining after compaction. + */ + int compact(int n, int *odata, const int *idata) { + int hos_scans; + int hos_bools; + int * dev_bools; + int * dev_scans; + int * dev_idata; + int * dev_odata; + dim3 fullBlocksPerGrid((n + blockSize - 1) / blockSize); + + cudaMalloc((void**)&dev_bools, n * sizeof(int)); + checkCUDAError("cudaMalloc dev_bools failed"); + cudaMalloc((void**)&dev_scans, n * sizeof(int)); + checkCUDAError("cudaMalloc dev_scans failed"); + cudaMalloc((void**)&dev_idata, n * sizeof(int)); + checkCUDAError("cudaMalloc dev_idata failed"); + cudaMalloc((void**)&dev_odata, n * sizeof(int)); + checkCUDAError("cudaMalloc dev_odata failed"); + + cudaMemcpy(dev_idata,idata, n * sizeof(int),cudaMemcpyHostToDevice); + checkCUDAError("cudaMemcpy from data to dev_data failed"); + cudaDeviceSynchronize(); + + Common::kernMapToBoolean<<< fullBlocksPerGrid, blockSize>>> ( n , dev_bools, dev_idata ); + cudaDeviceSynchronize(); + + //cudaMemcpy(hos_bools,dev_bools, n * sizeof(int),cudaMemcpyDeviceToHost); + //checkCUDAError("cudaMemcpy from data to dev_data failed"); + //cudaDeviceSynchronize(); + + scan(n,dev_scans,dev_bools,true); + + //cudaMemcpy(dev_scans,hos_scans, n * sizeof(int),cudaMemcpyHostToDevice); + //checkCUDAError("cudaMemcpy from hos_scans to dev_scans failed"); + //cudaDeviceSynchronize(); + + Common::kernScatter<<< fullBlocksPerGrid, blockSize>>>(n, dev_odata, + dev_idata, dev_bools, dev_scans); + cudaDeviceSynchronize(); + + cudaMemcpy(odata,dev_odata,n * sizeof(int),cudaMemcpyDeviceToHost); + checkCUDAError("cudaMemcpy from dev_odata to odata failed"); + //cudaDeviceSynchronize(); + + cudaMemcpy(&hos_scans,dev_scans+n-1,sizeof(int),cudaMemcpyDeviceToHost); + checkCUDAError("cudaMemcpy scans[n-1] failed"); + + cudaMemcpy(&hos_bools,dev_bools+n-1,sizeof(int),cudaMemcpyDeviceToHost); + checkCUDAError("cudaMemcpy bools[n-1] failed"); + + cudaDeviceSynchronize(); + + + + cudaFree(dev_idata); + cudaFree(dev_odata); + cudaFree(dev_bools); + cudaFree(dev_scans); + + //int num = hos_scans[n-1] + hos_bools[n-1]; + int num = hos_scans + hos_bools; + //delete[] hos_scans; + //delete[] hos_bools; + + return num; + } + + + + + + + + //Radix sort + + + __global__ void kernGetE(int n, int * odata, const int * idata,int cur_bit) + { + int index = threadIdx.x + blockDim.x * blockIdx.x; + if( index < n) + { + odata[index] = 1 - ( ( idata[index] & (1 << cur_bit ) ) >> cur_bit ); + } + } + + __global__ void kernGetK(int n, int* t, const int * f, const int totalFalses) + { + int index = threadIdx.x + blockDim.x * blockIdx.x; + if( index < n) + { + t[index] = index - f[index] + totalFalses; + } + } + + __global__ void kernRadixScatter(int n, int * odata,const int * idata, const int * e, const int * t, const int * f) + { + int index = threadIdx.x + blockDim.x * blockIdx.x; + if( index < n) + { + odata[ (e[index]==0) ? t[index] : f[index] ] = idata[index] ; + } + } + + + + int * dev_i; + int * dev_o; + int * dev_e; // dev_e[i] = 1 - dev_idata[i].cur_bit + int * dev_f; // exclusive scan of dev_e, id if false + int * dev_t; // i ¨Cf[i] + totalFalses, id if true + + + + + void radixSort(int n, int *dev_odata, const int *dev_idata, int cur_bit) + { + dim3 fullBlocksPerGrid((n + blockSize - 1) / blockSize); + // get e + kernGetE<<< fullBlocksPerGrid, blockSize>>>(n,dev_e,dev_idata,cur_bit); + cudaDeviceSynchronize(); + + scan(n,dev_f,dev_e,true); + int totalFalses; + int last_e; + cudaMemcpy(&last_e,dev_e+n-1,sizeof(int),cudaMemcpyDeviceToHost); + checkCUDAError("cudaMemcpy dev_e[n-1] failed"); + cudaMemcpy(&totalFalses,dev_f+n-1,sizeof(int),cudaMemcpyDeviceToHost); + checkCUDAError("cudaMemcpy dev_f[n-1] failed"); + totalFalses += last_e; + + //get t + kernGetK<<< fullBlocksPerGrid, blockSize>>>(n,dev_t,dev_f,totalFalses); + + //scatter + kernRadixScatter<<< fullBlocksPerGrid, blockSize>>>(n,dev_odata,dev_idata,dev_e,dev_t,dev_f); + } + + + + //wrapper + void radixSortLauncher(int n, int *odata, const int *idata, int msb,int lsb) + { + //simple version + //no split, no merge, no shared memory + + //split + + + //sort + //for each split + + + cudaMalloc((void**)&dev_i, n * sizeof(int)); + checkCUDAError("cudaMalloc dev_i failed"); + cudaMalloc((void**)&dev_o, n * sizeof(int)); + checkCUDAError("cudaMalloc dev_o failed"); + cudaMalloc((void**)&dev_e, n * sizeof(int)); + checkCUDAError("cudaMalloc dev_e failed"); + cudaMalloc((void**)&dev_f, n * sizeof(int)); + checkCUDAError("cudaMalloc dev_f failed"); + cudaMalloc((void**)&dev_t, n * sizeof(int)); + checkCUDAError("cudaMalloc dev_t failed"); + + int * dev_cur_i = dev_i; + int * dev_cur_o = dev_o; + /* + if( (msb - lsb) % 2 == 0) + { + dev_cur_i = dev_i; + dev_cur_o = dev_o; + } + else + { + dev_cur_i = dev_o; + dev_cur_o = dev_i; + } + */ + + cudaMemcpy(dev_cur_i,idata,n*sizeof(int),cudaMemcpyHostToDevice); + checkCUDAError("cudaMemcpy from idata to dev_cur_i failed"); + + + for(int i = lsb; i <= msb; i++) + { + radixSort(n,dev_cur_o,dev_cur_i,i); + + int * tmp = dev_cur_i; + dev_cur_i = dev_cur_o; + dev_cur_o = tmp; + } + + + //merge + + + //////// + + cudaMemcpy(odata,dev_cur_i,n*sizeof(int),cudaMemcpyDeviceToHost); + checkCUDAError("cudaMemcpy from dev_cur_o to odata failed"); + + + cudaFree(dev_i); + cudaFree(dev_o); + cudaFree(dev_e); + cudaFree(dev_f); + cudaFree(dev_t); + } + + + + + + + } } diff --git a/stream_compaction/efficient.h b/stream_compaction/efficient.h index 395ba10..ac5c1cd 100644 --- a/stream_compaction/efficient.h +++ b/stream_compaction/efficient.h @@ -2,8 +2,10 @@ namespace StreamCompaction { namespace Efficient { - void scan(int n, int *odata, const int *idata); + void scan(int n, int *odata, const int *idata,bool is_dev_data=false); int compact(int n, int *odata, const int *idata); + + void radixSortLauncher(int n, int *odata, const int *idata, int msb,int lsb); } } diff --git a/stream_compaction/naive.cu b/stream_compaction/naive.cu index 3d86b60..e5940c3 100644 --- a/stream_compaction/naive.cu +++ b/stream_compaction/naive.cu @@ -5,16 +5,95 @@ namespace StreamCompaction { namespace Naive { + //const int blockSize = 128; -// TODO: __global__ + int* dev_odata; + int* dev_idata; + //int* dev_tdata; //temp transfer one -/** - * Performs prefix-sum (aka scan) on idata, storing the result into odata. - */ -void scan(int n, int *odata, const int *idata) { - // TODO - printf("TODO\n"); -} + + __global__ void kernWriteOneSum(int n,int threshold, int* odata, const int* idata) + { + //threshold ... 2^(d-1) + int k = threadIdx.x + blockDim.x * blockIdx.x; + if( k < n ) + { + if( k >= threshold ) + { + odata[k] = idata[k - threshold] + idata[k]; + } + else + { + odata[k] = idata[k]; + } + } + } + + + + + /** + * Performs prefix-sum (aka scan) on idata, storing the result into odata. + */ + void scan(int n, int *odata, const int *idata) { + //naive parrellel scan + int ceil_log2n = ilog2ceil(n); + + + dim3 fullBlocksPerGrid((n + blockSize - 1) / blockSize); + + cudaMalloc((void**)&dev_idata, n * sizeof(int)); + checkCUDAError("cudaMalloc dev_idata failed"); + + cudaMalloc((void**)&dev_odata, n * sizeof(int)); + checkCUDAError("cudaMalloc dev_odata failed"); + + + int* cur_out = dev_odata; + int* cur_in = dev_idata; + /* + //make sure the last write to idata (before inclusive 2 exclusive) + if(ceil_log2n % 2 == 0) + { + cur_out = dev_odata; + cur_in = dev_idata; + } + else + { + cur_out = dev_idata; + cur_in = dev_odata; + } + */ + + cudaMemcpy(cur_in,idata,n*sizeof(int),cudaMemcpyHostToDevice); + checkCUDAError("cudaMemcpy from idata to cur_in failed"); + + + cudaDeviceSynchronize(); + + + + for (int d = 1; d <= ceil_log2n ; d++) + { + kernWriteOneSum<<< fullBlocksPerGrid, blockSize>>> (n , 1<<(d-1) , cur_out, cur_in); + + int* tmp_p = cur_out; + cur_out = cur_in; + cur_in = tmp_p; + + + cudaDeviceSynchronize(); + } + + Common::kernInclusive2Exclusive<<< fullBlocksPerGrid, blockSize>>>(n,cur_out,cur_in); + + cudaMemcpy(odata,cur_out,n*sizeof(int),cudaMemcpyDeviceToHost); + checkCUDAError("cudaMemcpy from dev_odata to odata failed"); + + cudaFree(dev_idata); + cudaFree(dev_odata); + + } } } diff --git a/stream_compaction/thrust.cu b/stream_compaction/thrust.cu index d8dbb32..856a428 100644 --- a/stream_compaction/thrust.cu +++ b/stream_compaction/thrust.cu @@ -15,7 +15,7 @@ namespace Thrust { void scan(int n, int *odata, const int *idata) { // TODO use `thrust::exclusive_scan` // example: for device_vectors dv_in and dv_out: - // thrust::exclusive_scan(dv_in.begin(), dv_in.end(), dv_out.begin()); + thrust::exclusive_scan(idata, idata + n, odata); } }