diff --git a/README.md b/README.md index 0e38ddb..513938d 100644 --- a/README.md +++ b/README.md @@ -3,12 +3,109 @@ CUDA Stream Compaction **University of Pennsylvania, CIS 565: GPU Programming and Architecture, Project 2** -* (TODO) YOUR NAME HERE - * (TODO) [LinkedIn](), [personal website](), [twitter](), etc. -* Tested on: (TODO) Windows 22, i7-2222 @ 2.22GHz 22GB, GTX 222 222MB (Moore 2222 Lab) +* Yichao Wang + * [LinkedIn](https://www.linkedin.com/in/wangyic/) +* Tested on: Windows 10 Home 64-bit (10.0, Build 18363) + * Intel(R) Core(TM) i7-7700HQ CPU @ 2.80GHz (8 CPUs) + * GeForce GTX 1060 6.1 -### (TODO: Your README) +## Description -Include analysis, etc. (Remember, this is public, so don't put -anything here that you don't want to share with the world.) +This project implements Scan and Stream Compaction in various ways: +* Scan (all implementations support Non-Power-Of-Two input) + * cpu + * naive (gpu) + * work-efficient (gpu, optimized indexing) + * thrust (gpu) +* Stream Compaction + * cpu without scan + * cpu with scan + * gpu with work-efficient scan + +For more information, see [INSTRUCTION.md](INSTRUCTION.md). + +## Performance Analysis + +### Block Size Optimize + +![](img/blockSize.png) + +Note: ```NPOT stands for Non-Power-of-Two``` + +From above plot, we can see that there is no much difference between block sizes. So we choose block size of 512 for the following analysis. + +### Implmentations Comparation + +![](img/scan.png) + +![](img/compact.png) + +From above plots, we can see that the elapsed time increases as input size increases. However, there is no much difference for elapsed time when input size is non-power-of-two. Not surprising, thrust implmentation is the fastest one. + +For stream compaction, we can see cpu without scan runs faster than cpu with scan. This is because cpu with scan requires three for loop while cpu without scan only requires one. + +### Work Efficient Scan Optimization + +Before the optimization, work efficient scan even runs slower than naive and cpu implementation. This is because not all threads are actually working. For example, if the input size is 1024, we only need 512 threads at most instead of 1024 for the first depth. This is because the additions is half of the size. Thus, to optimize the work efficient scan, I have adjusted the box amount according to the depth level and have changed index calculation in up-sweep and down-sweep method. + + +### Output for Scan and Stream Compaction Test (512 block size, 2^24 input size) + +``` + +**************** +** SCAN TESTS ** +**************** + [ 47 5 43 43 27 45 0 29 1 26 3 8 27 ... 2 0 ] +==== cpu scan, power-of-two ==== + elapsed time: 29.0025ms (std::chrono Measured) + [ 0 47 52 95 138 165 210 210 239 240 266 269 277 ... 410953421 410953423 ] +==== cpu scan, non-power-of-two ==== + elapsed time: 29.5704ms (std::chrono Measured) + [ 0 47 52 95 138 165 210 210 239 240 266 269 277 ... 410953326 410953375 ] + passed +==== naive scan, power-of-two ==== + elapsed time: 24.2698ms (CUDA Measured) + [ 0 47 52 95 138 165 210 210 239 240 266 269 277 ... 410953421 410953423 ] + passed +==== naive scan, non-power-of-two ==== + elapsed time: 23.7513ms (CUDA Measured) + passed +==== work-efficient scan, power-of-two ==== + elapsed time: 9.78432ms (CUDA Measured) + passed +==== work-efficient scan, non-power-of-two ==== + elapsed time: 9.76662ms (CUDA Measured) + passed +==== thrust scan, power-of-two ==== + elapsed time: 1.13667ms (CUDA Measured) + passed +==== thrust scan, non-power-of-two ==== + elapsed time: 1.34381ms (CUDA Measured) + passed + +***************************** +** STREAM COMPACTION TESTS ** +***************************** + [ 2 0 0 0 1 3 3 0 2 3 3 2 2 ... 3 0 ] +==== cpu compact without scan, power-of-two ==== + elapsed time: 50.8381ms (std::chrono Measured) + [ 2 1 3 3 2 3 3 2 2 3 1 3 3 ... 3 3 ] + passed +==== cpu compact without scan, non-power-of-two ==== + elapsed time: 44.1896ms (std::chrono Measured) + [ 2 1 3 3 2 3 3 2 2 3 1 3 3 ... 1 3 ] + passed +==== cpu compact with scan ==== + elapsed time: 73.4177ms (std::chrono Measured) + [ 2 1 3 3 2 3 3 2 2 3 1 3 3 ... 3 3 ] + passed +==== work-efficient compact, power-of-two ==== + elapsed time: 13.1512ms (CUDA Measured) + passed +==== work-efficient compact, non-power-of-two ==== + elapsed time: 13.1215ms (CUDA Measured) + passed +Press any key to continue . . . +``` diff --git a/img/blockSize.png b/img/blockSize.png new file mode 100644 index 0000000..c531be8 Binary files /dev/null and b/img/blockSize.png differ diff --git a/img/compact.png b/img/compact.png new file mode 100644 index 0000000..d3343d3 Binary files /dev/null and b/img/compact.png differ diff --git a/img/scan.png b/img/scan.png new file mode 100644 index 0000000..5cb280e Binary files /dev/null and b/img/scan.png differ diff --git a/src/main.cpp b/src/main.cpp index 896ac2b..48337b9 100644 --- a/src/main.cpp +++ b/src/main.cpp @@ -13,7 +13,7 @@ #include #include "testing_helpers.hpp" -const int SIZE = 1 << 8; // feel free to change the size of array +const int SIZE = 1 << 24; // feel free to change the size of array const int NPOT = SIZE - 3; // Non-Power-Of-Two int *a = new int[SIZE]; int *b = new int[SIZE]; @@ -51,7 +51,7 @@ int main(int argc, char* argv[]) { printDesc("naive scan, power-of-two"); StreamCompaction::Naive::scan(SIZE, c, a); printElapsedTime(StreamCompaction::Naive::timer().getGpuElapsedTimeForPreviousOperation(), "(CUDA Measured)"); - //printArray(SIZE, c, true); + printArray(SIZE, c, true); printCmpResult(SIZE, b, c); /* For bug-finding only: Array of 1s to help find bugs in stream compaction or scan diff --git a/stream_compaction/common.cu b/stream_compaction/common.cu index 2ed6d63..b2d99e9 100644 --- a/stream_compaction/common.cu +++ b/stream_compaction/common.cu @@ -23,7 +23,17 @@ namespace StreamCompaction { * which map to 0 will be removed, and elements which map to 1 will be kept. */ __global__ void kernMapToBoolean(int n, int *bools, const int *idata) { - // TODO + int idx = (blockIdx.x * blockDim.x) + threadIdx.x; + if (idx >= n) { + return; + } + + if (idata[idx] != 0) { + bools[idx] = 1; + } + else { + bools[idx] = 0; + } } /** @@ -32,7 +42,14 @@ namespace StreamCompaction { */ __global__ void kernScatter(int n, int *odata, const int *idata, const int *bools, const int *indices) { - // TODO + int idx = (blockIdx.x * blockDim.x) + threadIdx.x; + if (idx >= n) { + return; + } + + if (bools[idx] == 1) { + odata[indices[idx]] = idata[idx]; + } } } diff --git a/stream_compaction/common.h b/stream_compaction/common.h index d2c1fed..6ad4383 100644 --- a/stream_compaction/common.h +++ b/stream_compaction/common.h @@ -12,6 +12,7 @@ #define FILENAME (strrchr(__FILE__, '/') ? strrchr(__FILE__, '/') + 1 : __FILE__) #define checkCUDAError(msg) checkCUDAErrorFn(msg, FILENAME, __LINE__) +#define blockSize 512 /** * Check for CUDA errors; print and exit if there was a problem. diff --git a/stream_compaction/cpu.cu b/stream_compaction/cpu.cu index 719fa11..e64af5a 100644 --- a/stream_compaction/cpu.cu +++ b/stream_compaction/cpu.cu @@ -2,6 +2,8 @@ #include "cpu.h" #include "common.h" +#include + namespace StreamCompaction { namespace CPU { @@ -12,6 +14,14 @@ namespace StreamCompaction { return timer; } + void scanNoTimer(int n, int* odata, const int* idata) { + // exclusive + odata[0] = 0; + for (int k = 1; k < n; k++) { + odata[k] = odata[k - 1] + idata[k - 1]; + } + } + /** * CPU scan (prefix sum). * For performance analysis, this is supposed to be a simple for loop. @@ -19,10 +29,12 @@ namespace StreamCompaction { */ void scan(int n, int *odata, const int *idata) { timer().startCpuTimer(); - // TODO + scanNoTimer(n, odata, idata); timer().endCpuTimer(); } + + /** * CPU stream compaction without using the scan function. * @@ -30,9 +42,15 @@ namespace StreamCompaction { */ int compactWithoutScan(int n, int *odata, const int *idata) { timer().startCpuTimer(); - // TODO + int index = 0; + for (int i = 0; i < n; i++) { + if (idata[i] != 0) { + odata[index] = idata[i]; + index++; + } + } timer().endCpuTimer(); - return -1; + return index; } /** @@ -42,9 +60,32 @@ namespace StreamCompaction { */ int compactWithScan(int n, int *odata, const int *idata) { timer().startCpuTimer(); - // TODO + + // compute temp array + int* tempArr = new int[n]; + for (int i = 0; i < n; i++) { + if (idata[i] != 0) { + tempArr[i] = 1; + } + else { + tempArr[i] = 0; + } + } + + // exclusive scan + scanNoTimer(n, odata, tempArr); + + // scatter + int count = 0; + for (int i = 0; i < n; i++) { + if (tempArr[i] != 0) { + odata[odata[i]] = idata[i]; + count++; + } + } + timer().endCpuTimer(); - return -1; + return count; } } } diff --git a/stream_compaction/efficient.cu b/stream_compaction/efficient.cu index 2db346e..579e5bb 100644 --- a/stream_compaction/efficient.cu +++ b/stream_compaction/efficient.cu @@ -3,6 +3,7 @@ #include "common.h" #include "efficient.h" + namespace StreamCompaction { namespace Efficient { using StreamCompaction::Common::PerformanceTimer; @@ -12,13 +13,83 @@ namespace StreamCompaction { return timer; } + + // only run for 1 thread at a time + __global__ void kernUpdateArr(int idx, int val, int *arr) { + arr[idx] = val; + } + + __global__ void kernScanUpSweep(int n, int *data, int pow2) { + /*int idx = (blockIdx.x * blockDim.x) + threadIdx.x; + if (idx >= n) { + return; + } + + if (idx % (2 * pow2) == 0) { + data[idx + 2 * pow2 - 1] += data[idx + pow2 - 1]; + }*/ + + // optimized solution + size_t idx = (blockIdx.x * blockDim.x) + threadIdx.x; + idx = 2 * pow2 * (idx + 1) - 1; + if (idx >= n) { + return; + } + data[idx] += data[idx - pow2]; + + } + + __global__ void kernScanDownSweep(int n, int *data, int pow2) { + /*int idx = (blockIdx.x * blockDim.x) + threadIdx.x; + if (idx >= n) { + return; + } + + if (idx % (2 * pow2) == 0) { + int temp = data[idx + pow2 - 1]; + data[idx + pow2 - 1] = data[idx + 2 * pow2 - 1]; + data[idx + 2 * pow2 - 1] += temp; + }*/ + // optimized solution + size_t idx = (blockIdx.x * blockDim.x) + threadIdx.x; + idx = 2 * pow2 * (idx + 1) - 1; + if (idx >= n) { + return; + } + int temp = data[idx - pow2]; + data[idx - pow2] = data[idx]; + data[idx] += temp; + } + /** * Performs prefix-sum (aka scan) on idata, storing the result into odata. */ void scan(int n, int *odata, const int *idata) { + int* dev_arr; + int maxDepth = ilog2ceil(n); + int size = pow(2, maxDepth); + + cudaMalloc((void**)&dev_arr, size * sizeof(int)); + cudaMemcpy(dev_arr, idata, n * sizeof(int), cudaMemcpyHostToDevice); + + dim3 blockNumPow((size + blockSize - 1) / blockSize); + + timer().startGpuTimer(); - // TODO + for (int d = 0; d < maxDepth; d++) { + blockNumPow = (size / pow(2, d + 1) + blockSize - 1) / blockSize; + kernScanUpSweep << > > (size, dev_arr, pow(2, d)); + } + + kernUpdateArr << <1, 1 >> > (size - 1, 0, dev_arr); + + for (int d = maxDepth - 1; d >= 0; d--) { + blockNumPow = (size / pow(2, d + 1) + blockSize - 1) / blockSize; + kernScanDownSweep << > > (size, dev_arr, pow(2, d)); + } timer().endGpuTimer(); + cudaMemcpy(odata, dev_arr, n * sizeof(int), cudaMemcpyDeviceToHost); + cudaFree(dev_arr); } /** @@ -31,10 +102,55 @@ namespace StreamCompaction { * @returns The number of elements remaining after compaction. */ int compact(int n, int *odata, const int *idata) { + int* dev_idata, *dev_odata, *dev_bools, *dev_indices; + int maxDepth = ilog2ceil(n); + int size = pow(2, maxDepth); + + cudaMalloc((void**)&dev_idata, size * sizeof(int)); + cudaMalloc((void**)&dev_odata, n * sizeof(int)); + cudaMalloc((void**)&dev_bools, size * sizeof(int)); + cudaMalloc((void**)&dev_indices, size * sizeof(int)); + cudaMemcpy(dev_idata, idata, n * sizeof(int), cudaMemcpyHostToDevice); + + dim3 blockNum((n + blockSize - 1) / blockSize); + dim3 blockNumPow((size + blockSize - 1) / blockSize); + timer().startGpuTimer(); - // TODO + Common::kernMapToBoolean << > > (size, dev_bools, dev_idata); + cudaMemcpy(dev_indices, dev_bools, size * sizeof(int), cudaMemcpyHostToDevice); + + // scan + for (int d = 0; d < maxDepth; d++) { + blockNumPow = (size / pow(2, d + 1) + blockSize - 1) / blockSize; + kernScanUpSweep << > > (size, dev_indices, pow(2, d)); + } + + kernUpdateArr << <1, 1 >> > (size - 1, 0, dev_indices); + + for (int d = maxDepth - 1; d >= 0; d--) { + blockNumPow = (size / pow(2, d + 1) + blockSize - 1) / blockSize; + kernScanDownSweep << > > (size, dev_indices, pow(2, d)); + } + + // scatter + Common::kernScatter << > > (n, dev_odata, dev_idata, dev_bools, dev_indices); timer().endGpuTimer(); - return -1; + + cudaMemcpy(odata, dev_odata, n * sizeof(int), cudaMemcpyDeviceToHost); + int* bools = new int[n]; + cudaMemcpy(bools, dev_bools, n * sizeof(int), cudaMemcpyDeviceToHost); + int count = 0; + for (int i = 0; i < n; i++) { + if (bools[i]) { + count++; + } + } + + cudaFree(dev_idata); + cudaFree(dev_odata); + cudaFree(dev_bools); + cudaFree(dev_indices); + return count; } } } diff --git a/stream_compaction/naive.cu b/stream_compaction/naive.cu index 4308876..d8d34ed 100644 --- a/stream_compaction/naive.cu +++ b/stream_compaction/naive.cu @@ -3,6 +3,9 @@ #include "common.h" #include "naive.h" + + + namespace StreamCompaction { namespace Naive { using StreamCompaction::Common::PerformanceTimer; @@ -13,13 +16,51 @@ namespace StreamCompaction { } // TODO: __global__ + __global__ void kernNaiveScan(int n, int *odata, int *idata, int offset) { + int idx = (blockIdx.x * blockDim.x) + threadIdx.x; + if (idx >= n) { + return; + } + if (idx >= offset) { + odata[idx] = idata[idx - offset] + idata[idx]; + } + else { + odata[idx] = idata[idx]; + } + } + /** * Performs prefix-sum (aka scan) on idata, storing the result into odata. */ void scan(int n, int *odata, const int *idata) { + int* dev_arr1,* dev_arr2; + + cudaMalloc((void**)&dev_arr1, n * sizeof(int)); + cudaMalloc((void**)&dev_arr2, n * sizeof(int)); + cudaMemcpy(dev_arr1, idata, n * sizeof(int), cudaMemcpyHostToDevice); + + dim3 blockNum((n + blockSize - 1) / blockSize); + + int maxDepth = ilog2ceil(n); timer().startGpuTimer(); - // TODO + for (int d = 1; d <= maxDepth; d++) { + kernNaiveScan << > > (n, dev_arr2, dev_arr1, pow(2.0,d-1)); + + // ping pong + if (d < maxDepth) { + int* temp = dev_arr1; + dev_arr1 = dev_arr2; + dev_arr2 = temp; + } + } timer().endGpuTimer(); + + cudaMemcpy(odata + 1, dev_arr2, (n - 1) * sizeof(int), cudaMemcpyDeviceToHost); + odata[0] = 0; + + cudaFree(dev_arr1); + cudaFree(dev_arr2); + } } } diff --git a/stream_compaction/thrust.cu b/stream_compaction/thrust.cu index 1def45e..7cb6311 100644 --- a/stream_compaction/thrust.cu +++ b/stream_compaction/thrust.cu @@ -18,11 +18,15 @@ namespace StreamCompaction { * Performs prefix-sum (aka scan) on idata, storing the result into odata. */ void scan(int n, int *odata, const int *idata) { + thrust::device_vector dv_in(idata, idata + n); + thrust::device_vector dv_out(n); timer().startGpuTimer(); // TODO use `thrust::exclusive_scan` // example: for device_vectors dv_in and dv_out: // thrust::exclusive_scan(dv_in.begin(), dv_in.end(), dv_out.begin()); + thrust::exclusive_scan(dv_in.begin(), dv_in.end(), dv_out.begin()); timer().endGpuTimer(); + thrust::copy(dv_out.begin(), dv_out.end(), odata); } } }