diff --git a/Denoised.png b/Denoised.png new file mode 100644 index 0000000..9a55a7b Binary files /dev/null and b/Denoised.png differ diff --git a/Denoised2.png b/Denoised2.png new file mode 100644 index 0000000..ddbd727 Binary files /dev/null and b/Denoised2.png differ diff --git a/Noised.png b/Noised.png new file mode 100644 index 0000000..b1098bd Binary files /dev/null and b/Noised.png differ diff --git a/Noised2.png b/Noised2.png new file mode 100644 index 0000000..e58a284 Binary files /dev/null and b/Noised2.png differ diff --git a/README.md b/README.md index f044c82..c401704 100644 --- a/README.md +++ b/README.md @@ -3,11 +3,93 @@ CUDA Denoiser For CUDA Path Tracer **University of Pennsylvania, CIS 565: GPU Programming and Architecture, Project 4** -* (TODO) YOUR NAME HERE -* Tested on: (TODO) Windows 22, i7-2222 @ 2.22GHz 22GB, GTX 222 222MB (Moore 2222 Lab) +* (Charles) Zixin Zhang +* Tested on: Windows 11, 11th Core i7, 3060 Laptop GPU -### (TODO: Your README) +![pathTracer](images/pathTracer.gif) -*DO NOT* leave the README to the last minute! It is a crucial part of the -project, and we will not be able to grade you without a good README. + + +# Results + +Figure 1: Zoomed-in "So many balls" Scene: + +Denoised: + +![](Denoised.png) + +Noised: + +![](Noised.png) + +--- + +Figure 2: "So many balls" Scene: + +Denoised: + +![](Denoised2.png) + +Noised: + +![](Noised2.png) + + +--- + +Figure 3: A Simple Cornell Box: + +![](comp.png) + + +This denoiser is achieved by implementing the paper "Edge-Avoiding A-Trous Wavelet Transform for fast Global Illumination Filtering," by Dammertz, Sewtz, Hanika, and Lensch. + +# Performance Analysis + + In my implementation, denoising is performed during the last iteration. We perform 10 iterations on the ray traced images and the 11th iteration is the denoising step. + +## Denoising Performance + +| | How much time does denoising take in ms? | +| ----------- | ----------- | +| Figure 1 | 52 | +| Figure 2 | 43 | +| Figure 3 | 44 | + +Since we apply denoising once, this technique is very efficient at eliminating noise in the scene. + + +## Comparison with Pure Ray Traced Images + +| | How many iterations do ray traced images need to achieve a smooth result? | +| ----------- | ----------- | +| Figure 1 | ~1000 | +| Figure 2 | ~5000 | +| Figure 3 | ~5000 | + +As shown in the table, if we took more samples per pixel to eliminate the noise, it would take more iterations and time to achieve a similar result. + +## Resolution + +Denoising took a significant amount of extra time when the resolution went from 1080p to 4k. It is expected since our run time depends linearly on the pixel count in the scene. + +| | 720p | 1080p | 4K | +| ----------- | ----------- | ----------- | ----------- | +| Figure 1 | 10ms | ~52ms | 120ms | +| Figure 2 | ~16ms | ~43ms |~215ms| +| Figure 3 | ~17ms | ~44ms |~215ms| + + +# Changes + +In order to programmatically generate a lot of balls (121 balls in figure 1 and 2), scene files are only used to specify the camera specifications. All geometric shapes and materials are specified at run time in the actual code. + +Fuzziness is added to the material properties such as we can have [fuzzy reflection](https://raytracing.github.io/books/RayTracingInOneWeekend.html#metal/fuzzyreflection). + +Figure 1 and 2 is genreated by `scenes/manyBalls.txt`. + +# References + +- [_Ray Tracing in One Weekend Series by Peter Shirley_](https://raytracing.github.io/books/RayTracingInOneWeekend.html) +- [_Edge-Avoiding A-Trous Wavelet Transform for fast Global Illumination Filtering by Dammertz, Sewtz, Hanika, and Lensch_](https://jo.dreggn.org/home/2010_atrous.pdf) diff --git a/comp.png b/comp.png new file mode 100644 index 0000000..2bb9d4d Binary files /dev/null and b/comp.png differ diff --git a/comp2.png b/comp2.png new file mode 100644 index 0000000..181fc9f Binary files /dev/null and b/comp2.png differ diff --git a/comp3.png b/comp3.png new file mode 100644 index 0000000..8a4b4fe Binary files /dev/null and b/comp3.png differ diff --git a/images/pathTracer.gif b/images/pathTracer.gif new file mode 100644 index 0000000..97c2d03 Binary files /dev/null and b/images/pathTracer.gif differ diff --git a/scenes/cornell_ceiling_light.txt b/scenes/cornell_ceiling_light.txt index 15af5f1..a1d519f 100644 --- a/scenes/cornell_ceiling_light.txt +++ b/scenes/cornell_ceiling_light.txt @@ -50,9 +50,9 @@ EMITTANCE 0 // Camera CAMERA -RES 800 800 +RES 1280 720 FOVY 45 -ITERATIONS 10 +ITERATIONS 1000 DEPTH 8 FILE cornell EYE 0.0 5 10.5 diff --git a/scenes/manyBalls.txt b/scenes/manyBalls.txt new file mode 100644 index 0000000..022f687 --- /dev/null +++ b/scenes/manyBalls.txt @@ -0,0 +1,10 @@ +// Camera +CAMERA +RES 1280 720 +FOVY 45 +ITERATIONS 10 +DEPTH 8 +FILE sphere +EYE 13 -2 3 +LOOKAT 0 0 0 +UP 0 1 0 \ No newline at end of file diff --git a/scenes/sphere.txt b/scenes/sphere2.txt similarity index 100% rename from scenes/sphere.txt rename to scenes/sphere2.txt diff --git a/src/interactions.h b/src/interactions.h index 144a9f5..4f6b45b 100644 --- a/src/interactions.h +++ b/src/interactions.h @@ -40,23 +40,122 @@ glm::vec3 calculateRandomDirectionInHemisphere( + sin(around) * over * perpendicularDirection2; } +__host__ __device__ +glm::vec3 calculateRandomDirectionInSphere(thrust::default_random_engine& rng) +{ + thrust::uniform_real_distribution u01(-1.f, 1.0f); + while (true) + { + glm::vec3 p = glm::vec3(u01(rng), u01(rng), u01(rng)); + float length = glm::length(p); + if (length >= 1.0f) { continue; } + return p; + } +} + +__host__ __device__ +float reflectance(float cosine, float refIdx) +{ + // Use Schlick's approximation for reflectance. + float r0 = (1.0f - refIdx) / (1.0f + refIdx); + r0 *= r0; + return r0 + (1.0f - r0) * glm::pow(1.0f - cosine, 5.0f); +} + /** - * Simple ray scattering with diffuse and perfect specular support. + * Scatter a ray with some probabilities according to the material properties. + * For example, a diffuse surface scatters in a cosine-weighted hemisphere. + * A perfect specular surface scatters in the reflected ray direction. + * In order to apply multiple effects to one surface, probabilistically choose + * between them. + * + * The visual effect you want is to straight-up add the diffuse and specular + * components. You can do this in a few ways. This logic also applies to + * combining other types of materias (such as refractive). + * + * - Always take an even (50/50) split between a each effect (a diffuse bounce + * and a specular bounce), but divide the resulting color of either branch + * by its probability (0.5), to counteract the chance (0.5) of the branch + * being taken. + * - This way is inefficient, but serves as a good starting point - it + * converges slowly, especially for pure-diffuse or pure-specular. + * - Pick the split based on the intensity of each material color, and divide + * branch result by that branch's probability (whatever probability you use). + * + * This method applies its changes to the Ray parameter `ray` in place. + * It also modifies the color `color` of the ray in place. + * + * You may need to change the parameter list for your purposes! */ + __host__ __device__ void scatterRay( - PathSegment & pathSegment, - glm::vec3 intersect, - glm::vec3 normal, - const Material &m, - thrust::default_random_engine &rng) { - glm::vec3 newDirection; - if (m.hasReflective) { - newDirection = glm::reflect(pathSegment.ray.direction, normal); - } else { - newDirection = calculateRandomDirectionInHemisphere(normal, rng); + PathSegment& pathSegment, + glm::vec3 intersect, + glm::vec3 normal, + bool outside, + const Material& m, + thrust::default_random_engine& rng, + const ShadeableIntersection& sInter) { + + thrust::uniform_real_distribution u01(0, 1); + + if (m.hasReflective) + { + pathSegment.ray.origin = intersect + EPSILON * normal; + glm::vec3 reflectedDir = glm::reflect(glm::normalize(pathSegment.ray.direction), + normal); + // add fuzziness + reflectedDir += m.fuzziness * calculateRandomDirectionInSphere(rng); + + if (glm::dot(reflectedDir, normal) > 0.0f) + { + pathSegment.color *= m.specular.color; + pathSegment.ray.direction = glm::normalize(reflectedDir); + pathSegment.remainingBounces--; + } + // for big sphere or grazing rays, we may scatter below the + // surface. In this case, terminate this segment. + else { + // NOTE: this line is necessary to prevent the white boundary + // if we terminate the ray path because reflected ray goes below + // the surface, this path's contribution should be set to black (0.f) + pathSegment.color *= glm::vec3(0.f); + pathSegment.remainingBounces = 0; + } + } + else if (m.hasRefractive) + { + float refractionRatio = outside ? (1.f / m.indexOfRefraction) : m.indexOfRefraction; + // refractive rays always shoots towards negative normal direction: that's why we use subtraction + // since intersect falls slightly short to the object it's hitting, + // we need a bigger EPSILON so that reflective rays are shoot + // from a point that is not occluded by the surface + glm::vec3 unitRayDir = glm::normalize(pathSegment.ray.direction); + float cosTheta = fmin(glm::dot(-unitRayDir, normal), 1.0f); + float sinTheta = sqrt(1.0f - cosTheta * cosTheta); + bool cannotReflect = refractionRatio * sinTheta > 1.0f; + glm::vec3 newRayDir; + if (cannotReflect || reflectance(cosTheta, refractionRatio) > u01(rng)) + { + pathSegment.ray.origin = intersect + EPSILON * normal; + newRayDir = glm::reflect(unitRayDir, normal); + } + else { + pathSegment.ray.origin = intersect - EPSILON * 100.0f * normal; + newRayDir = glm::refract(unitRayDir, normal, refractionRatio); + } + pathSegment.color *= m.color; + pathSegment.ray.direction = glm::normalize(newRayDir); + pathSegment.remainingBounces--; + } + else { + pathSegment.ray.origin = intersect + EPSILON * normal; + glm::vec3 diffuseDir = glm::normalize(calculateRandomDirectionInHemisphere(normal, rng)); + pathSegment.color *= m.color; + pathSegment.ray.direction = diffuseDir; + // diffuse always scatter + pathSegment.remainingBounces--; } - pathSegment.ray.direction = newDirection; - pathSegment.ray.origin = intersect + (newDirection * 0.0001f); } diff --git a/src/intersections.h b/src/intersections.h index c3e81f4..6826369 100644 --- a/src/intersections.h +++ b/src/intersections.h @@ -19,11 +19,13 @@ __host__ __device__ inline unsigned int utilhash(unsigned int a) { return a; } +// CHECKITOUT /** * Compute a point at parameter value `t` on ray `r`. * Falls slightly short so that it doesn't intersect the object it's hitting. */ __host__ __device__ glm::vec3 getPointOnRay(Ray r, float t) { + // return r.origin + (t - .0001f) * glm::normalize(r.direction); return r.origin + (t - .0001f) * glm::normalize(r.direction); } @@ -34,6 +36,7 @@ __host__ __device__ glm::vec3 multiplyMV(glm::mat4 m, glm::vec4 v) { return glm::vec3(m * v); } +// CHECKITOUT /** * Test intersection between a ray and a transformed cube. Untransformed, * the cube ranges from -0.5 to 0.5 in each axis and is centered at the origin. @@ -44,9 +47,9 @@ __host__ __device__ glm::vec3 multiplyMV(glm::mat4 m, glm::vec4 v) { * @return Ray parameter `t` value. -1 if no intersection. */ __host__ __device__ float boxIntersectionTest(Geom box, Ray r, - glm::vec3 &intersectionPoint, glm::vec3 &normal, bool &outside) { + glm::vec3& intersectionPoint, glm::vec3& normal, bool& outside) { Ray q; - q.origin = multiplyMV(box.inverseTransform, glm::vec4(r.origin , 1.0f)); + q.origin = multiplyMV(box.inverseTransform, glm::vec4(r.origin, 1.0f)); q.direction = glm::normalize(multiplyMV(box.inverseTransform, glm::vec4(r.direction, 0.0f))); float tmin = -1e38f; @@ -87,6 +90,7 @@ __host__ __device__ float boxIntersectionTest(Geom box, Ray r, return -1; } +// CHECKITOUT /** * Test intersection between a ray and a transformed sphere. Untransformed, * the sphere always has radius 0.5 and is centered at the origin. @@ -97,8 +101,8 @@ __host__ __device__ float boxIntersectionTest(Geom box, Ray r, * @return Ray parameter `t` value. -1 if no intersection. */ __host__ __device__ float sphereIntersectionTest(Geom sphere, Ray r, - glm::vec3 &intersectionPoint, glm::vec3 &normal, bool &outside) { - float radius = .5; + glm::vec3& intersectionPoint, glm::vec3& normal, bool& outside) { + float radius = 0.5f; glm::vec3 ro = multiplyMV(sphere.inverseTransform, glm::vec4(r.origin, 1.0f)); glm::vec3 rd = glm::normalize(multiplyMV(sphere.inverseTransform, glm::vec4(r.direction, 0.0f))); @@ -121,21 +125,34 @@ __host__ __device__ float sphereIntersectionTest(Geom sphere, Ray r, float t = 0; if (t1 < 0 && t2 < 0) { return -1; - } else if (t1 > 0 && t2 > 0) { + } + else if (t1 > 0 && t2 > 0) { t = min(t1, t2); - outside = true; - } else { + // outside = true; + } + else { t = max(t1, t2); - outside = false; + // outside = false; } glm::vec3 objspaceIntersection = getPointOnRay(rt, t); intersectionPoint = multiplyMV(sphere.transform, glm::vec4(objspaceIntersection, 1.f)); normal = glm::normalize(multiplyMV(sphere.invTranspose, glm::vec4(objspaceIntersection, 0.f))); + + if (sphere.scale[0] < 1.0f) + { + normal = -normal; + } + + outside = glm::dot(r.direction, normal) < 0; + // make it so that normals always point against the incident ray + // If the ray is outside the geometry, the normal will point outward, + // but if the ray is inside the geometry, the normal will point inward. if (!outside) { normal = -normal; } + return glm::length(r.origin - intersectionPoint); } diff --git a/src/main.cpp b/src/main.cpp index 4092ae4..3297e50 100644 --- a/src/main.cpp +++ b/src/main.cpp @@ -25,7 +25,7 @@ int lastLoopIterations = 0; bool ui_showGbuffer = false; bool ui_denoise = false; int ui_filterSize = 80; -float ui_colorWeight = 0.45f; +float ui_colorWeight = 30.f; float ui_normalWeight = 0.35f; float ui_positionWeight = 0.2f; bool ui_saveAndExit = false; @@ -45,6 +45,12 @@ int iteration; int width; int height; +// keep track of previous state of denoiser parameters +float last_colorWeight = 0.f; +float last_normalWeight = 0.f; +float last_positionWeight = 0.f; +bool last_denoise = false; + //------------------------------- //-------------MAIN-------------- //------------------------------- @@ -122,10 +128,31 @@ void saveImage() { void runCuda() { if (lastLoopIterations != ui_iterations) { - lastLoopIterations = ui_iterations; - camchanged = true; + lastLoopIterations = ui_iterations; + camchanged = true; + } + else if (last_colorWeight != ui_colorWeight) + { + last_colorWeight = ui_colorWeight; + camchanged = true; + } + else if (last_normalWeight != ui_normalWeight) + { + last_normalWeight = ui_normalWeight; + camchanged = true; + } + else if (last_positionWeight != ui_positionWeight) + { + last_positionWeight = ui_positionWeight; + camchanged = true; + } + else if (last_denoise != ui_denoise) + { + last_denoise = ui_denoise; + camchanged = true; } + if (camchanged) { iteration = 0; Camera &cam = renderState->camera; @@ -144,7 +171,7 @@ void runCuda() { cameraPosition += cam.lookAt; cam.position = cameraPosition; camchanged = false; - } + } // Map OpenGL buffer object for writing from CUDA on a single GPU // No data is moved (Win & Linux). When mapped to CUDA, OpenGL should not use this buffer @@ -164,6 +191,27 @@ void runCuda() { int frame = 0; pathtrace(frame, iteration); } + // extra iteration for denoiser + else if (iteration == ui_iterations) + { + // last iteration is denoising + iteration++; + if (ui_denoise) + { + cudaEvent_t start, stop; + cudaEventCreate(&start); + cudaEventCreate(&stop); + cudaEventRecord(start); + + denoisePathTracedImage(); + + cudaEventRecord(stop); + cudaEventSynchronize(stop); + float milliseconds = 0; + cudaEventElapsedTime(&milliseconds, start, stop); + std::cout << "Denoise: " << milliseconds << std::endl; + } + } if (ui_showGbuffer) { showGBuffer(pbo_dptr); diff --git a/src/main.h b/src/main.h index 06d311a..8517239 100644 --- a/src/main.h +++ b/src/main.h @@ -42,7 +42,11 @@ extern float ui_normalWeight; extern float ui_positionWeight; extern bool ui_saveAndExit; +// New buttons +extern bool ui_showGbufferNormals; +extern bool ui_showGbufferPos; + void runCuda(); void keyCallback(GLFWwindow *window, int key, int scancode, int action, int mods); void mousePositionCallback(GLFWwindow* window, double xpos, double ypos); -void mouseButtonCallback(GLFWwindow* window, int button, int action, int mods); +void mouseButtonCallback(GLFWwindow* window, int button, int action, int mods); \ No newline at end of file diff --git a/src/pathtrace.cu b/src/pathtrace.cu index 23e5f90..96d040d 100644 --- a/src/pathtrace.cu +++ b/src/pathtrace.cu @@ -4,6 +4,7 @@ #include #include #include +#include #include "sceneStructs.h" #include "scene.h" @@ -13,6 +14,7 @@ #include "pathtrace.h" #include "intersections.h" #include "interactions.h" +#include "main.h" #define ERRORCHECK 1 @@ -67,18 +69,32 @@ __global__ void sendImageToPBO(uchar4* pbo, glm::ivec2 resolution, } } +#define V_N 1 +#define V_P 0 + __global__ void gbufferToPBO(uchar4* pbo, glm::ivec2 resolution, GBufferPixel* gBuffer) { + int x = (blockIdx.x * blockDim.x) + threadIdx.x; int y = (blockIdx.y * blockDim.y) + threadIdx.y; if (x < resolution.x && y < resolution.y) { int index = x + (y * resolution.x); float timeToIntersect = gBuffer[index].t * 256.0; - + glm::vec3 n = gBuffer[index].normal; + glm::vec3 p = gBuffer[index].pos; + glm::vec3 color(0.f); + +#if V_N + color = glm::clamp(glm::abs(n * 255.0f), 0.0f, 255.0f); +#elif V_P + color = glm::clamp(glm::abs(p * 20.0f), 0.0f, 255.0f); +#else + color = glm::vec3(timeToIntersect); +#endif pbo[index].w = 0; - pbo[index].x = timeToIntersect; - pbo[index].y = timeToIntersect; - pbo[index].z = timeToIntersect; + pbo[index].x = color[0]; + pbo[index].y = color[1]; + pbo[index].z = color[2]; } } @@ -92,6 +108,9 @@ static GBufferPixel* dev_gBuffer = NULL; // TODO: static variables for device memory, any extra info you need, etc // ... +// for denoiser +static glm::vec3* dev_denoisedImage = NULL; + void pathtraceInit(Scene *scene) { hst_scene = scene; const Camera &cam = hst_scene->state.camera; @@ -115,6 +134,9 @@ void pathtraceInit(Scene *scene) { // TODO: initialize any extra device memeory you need + cudaMalloc(&dev_denoisedImage, pixelcount * sizeof(glm::vec3)); + cudaMemset(dev_denoisedImage, 0, pixelcount * sizeof(glm::vec3)); + checkCUDAError("pathtraceInit"); } @@ -126,6 +148,7 @@ void pathtraceFree() { cudaFree(dev_intersections); cudaFree(dev_gBuffer); // TODO: clean up any extra device memory you created + cudaFree(dev_denoisedImage); checkCUDAError("pathtraceFree"); } @@ -140,149 +163,116 @@ void pathtraceFree() { */ __global__ void generateRayFromCamera(Camera cam, int iter, int traceDepth, PathSegment* pathSegments) { - int x = (blockIdx.x * blockDim.x) + threadIdx.x; - int y = (blockIdx.y * blockDim.y) + threadIdx.y; - - if (x < cam.resolution.x && y < cam.resolution.y) { - int index = x + (y * cam.resolution.x); - PathSegment & segment = pathSegments[index]; - - segment.ray.origin = cam.position; - segment.color = glm::vec3(1.0f, 1.0f, 1.0f); - - segment.ray.direction = glm::normalize(cam.view - - cam.right * cam.pixelLength.x * ((float)x - (float)cam.resolution.x * 0.5f) - - cam.up * cam.pixelLength.y * ((float)y - (float)cam.resolution.y * 0.5f) - ); + int x = (blockIdx.x * blockDim.x) + threadIdx.x; + int y = (blockIdx.y * blockDim.y) + threadIdx.y; - segment.pixelIndex = index; - segment.remainingBounces = traceDepth; - } + if (x < cam.resolution.x && y < cam.resolution.y) { + int index = x + (y * cam.resolution.x); + PathSegment& segment = pathSegments[index]; + + segment.ray.origin = cam.position; + segment.color = glm::vec3(1.0f, 1.0f, 1.0f); + + thrust::default_random_engine rng = makeSeededRandomEngine(iter, index, 0); + thrust::uniform_real_distribution u01(0, 1); + float jitteredX = (float)x + u01(rng); + float jitteredY = (float)y + u01(rng); + // TODO: implement antialiasing by jittering the ray + segment.ray.direction = glm::normalize(cam.view + - cam.right * cam.pixelLength.x * (jitteredX - (float)cam.resolution.x * 0.5f) + - cam.up * cam.pixelLength.y * (jitteredY - (float)cam.resolution.y * 0.5f) + ); + + segment.pixelIndex = index; + segment.remainingBounces = traceDepth; + } } __global__ void computeIntersections( - int depth - , int num_paths - , PathSegment * pathSegments - , Geom * geoms - , int geoms_size - , ShadeableIntersection * intersections - ) + int depth + , int num_paths + , PathSegment* pathSegments + , Geom* geoms + , int geoms_size + , ShadeableIntersection* intersections +) { - int path_index = blockIdx.x * blockDim.x + threadIdx.x; - - if (path_index < num_paths) - { - PathSegment pathSegment = pathSegments[path_index]; - - float t; - glm::vec3 intersect_point; - glm::vec3 normal; - float t_min = FLT_MAX; - int hit_geom_index = -1; - bool outside = true; - - glm::vec3 tmp_intersect; - glm::vec3 tmp_normal; - - // naive parse through global geoms - - for (int i = 0; i < geoms_size; i++) - { - Geom & geom = geoms[i]; - - if (geom.type == CUBE) - { - t = boxIntersectionTest(geom, pathSegment.ray, tmp_intersect, tmp_normal, outside); - } - else if (geom.type == SPHERE) - { - t = sphereIntersectionTest(geom, pathSegment.ray, tmp_intersect, tmp_normal, outside); - } - - // Compute the minimum t from the intersection tests to determine what - // scene geometry object was hit first. - if (t > 0.0f && t_min > t) - { - t_min = t; - hit_geom_index = i; - intersect_point = tmp_intersect; - normal = tmp_normal; - } - } - - if (hit_geom_index == -1) - { - intersections[path_index].t = -1.0f; - } - else - { - //The ray hits something - intersections[path_index].t = t_min; - intersections[path_index].materialId = geoms[hit_geom_index].materialid; - intersections[path_index].surfaceNormal = normal; - } - } + int path_index = blockIdx.x * blockDim.x + threadIdx.x; + + if (path_index < num_paths) + { + PathSegment pathSegment = pathSegments[path_index]; + + float t; + glm::vec3 intersect_point; + glm::vec3 normal; + float t_min = FLT_MAX; + int hit_geom_index = -1; + bool outside = true; + + glm::vec3 tmp_intersect; + glm::vec3 tmp_normal; + bool tmp_outside = true; + + // naive parse through global geoms + + for (int i = 0; i < geoms_size; i++) + { + Geom& geom = geoms[i]; + + if (geom.type == CUBE) + { + t = boxIntersectionTest(geom, pathSegment.ray, tmp_intersect, tmp_normal, tmp_outside); + } + else if (geom.type == SPHERE) + { + t = sphereIntersectionTest(geom, pathSegment.ray, tmp_intersect, tmp_normal, tmp_outside); + } + // TODO: add more intersection tests here... triangle? metaball? CSG? + + // Compute the minimum t from the intersection tests to determine what + // scene geometry object was hit first. + if (t > 0.0f && t_min > t) + { + t_min = t; + hit_geom_index = i; + intersect_point = tmp_intersect; + normal = tmp_normal; + outside = tmp_outside; + } + } + + if (hit_geom_index == -1) + { + intersections[path_index].t = -1.0f; + } + else + { + //The ray hits something + intersections[path_index].t = t_min; + intersections[path_index].materialId = geoms[hit_geom_index].materialid; + intersections[path_index].surfaceNormal = normal; + intersections[path_index].intersectionPoint = intersect_point; + intersections[path_index].outside = outside; + } + } } -__global__ void shadeSimpleMaterials ( - int iter - , int num_paths - , ShadeableIntersection * shadeableIntersections - , PathSegment * pathSegments - , Material * materials - ) +__global__ void generateGBuffer (int num_paths, + ShadeableIntersection* shadeableIntersections, + PathSegment* pathSegments, + GBufferPixel* gBuffer) { - int idx = blockIdx.x * blockDim.x + threadIdx.x; - if (idx < num_paths) - { - ShadeableIntersection intersection = shadeableIntersections[idx]; - PathSegment segment = pathSegments[idx]; - if (segment.remainingBounces == 0) { - return; - } - - if (intersection.t > 0.0f) { // if the intersection exists... - segment.remainingBounces--; - // Set up the RNG - thrust::default_random_engine rng = makeSeededRandomEngine(iter, idx, segment.remainingBounces); - - Material material = materials[intersection.materialId]; - glm::vec3 materialColor = material.color; - - // If the material indicates that the object was a light, "light" the ray - if (material.emittance > 0.0f) { - segment.color *= (materialColor * material.emittance); - segment.remainingBounces = 0; - } - else { - segment.color *= materialColor; - glm::vec3 intersectPos = intersection.t * segment.ray.direction + segment.ray.origin; - scatterRay(segment, intersectPos, intersection.surfaceNormal, material, rng); - } - // If there was no intersection, color the ray black. - // Lots of renderers use 4 channel color, RGBA, where A = alpha, often - // used for opacity, in which case they can indicate "no opacity". - // This can be useful for post-processing and image compositing. - } else { - segment.color = glm::vec3(0.0f); - segment.remainingBounces = 0; + int idx = blockIdx.x * blockDim.x + threadIdx.x; + if (idx < num_paths) + { + ShadeableIntersection& inter = shadeableIntersections[idx]; + PathSegment& pathSeg = pathSegments[idx]; + Ray& r = pathSeg.ray; + gBuffer[idx].t = inter.t; + gBuffer[idx].normal = inter.surfaceNormal; + gBuffer[idx].pos = r.origin + inter.t * r.direction; } - - pathSegments[idx] = segment; - } -} - -__global__ void generateGBuffer ( - int num_paths, - ShadeableIntersection* shadeableIntersections, - PathSegment* pathSegments, - GBufferPixel* gBuffer) { - int idx = blockIdx.x * blockDim.x + threadIdx.x; - if (idx < num_paths) - { - gBuffer[idx].t = shadeableIntersections[idx].t; - } } // Add the current iteration's output to the overall image @@ -297,114 +287,217 @@ __global__ void finalGather(int nPaths, glm::vec3 * image, PathSegment * iterati } } +// passing cam by const& freezes the app??? +__global__ void performOneStepATrousFilter(Camera cam, + float colorWeight, + float normalWeight, + float positionWeight, + int currStepWidth, + GBufferPixel* gBuffer, + glm::vec3* image, + glm::vec3* denoisedImage) +{ + int x = (blockIdx.x * blockDim.x) + threadIdx.x; + int y = (blockIdx.y * blockDim.y) + threadIdx.y; + + if (x < cam.resolution.x && y < cam.resolution.y) { + int index = x + (y * cam.resolution.x); + + float h[25] = { + 1.f / 273.f, 4.f / 273.f, 7.f / 273.f, 4.f / 273.f, 1.f / 273.f, + 4.f / 273.f, 16.f / 273.f, 26.f / 273.f, 16.f / 273.f, 4.f / 273.f, + 7.f / 273.f, 26.f / 273.f, 41.f / 273.f, 26.f / 273.f, 7.f / 273.f, + 4.f / 273.f, 16.f / 273.f, 26.f / 273.f, 16.f / 273.f, 4.f / 273.f, + 1.f / 273.f, 4.f / 273.f, 7.f / 273.f, 4.f / 273.f, 1.f / 273.f, + }; + + float cum_w = 0.f; + glm::vec3 sum{ 0.f }; + + glm::vec3 cval = image[index]; + glm::vec3 nval = gBuffer[index].normal; + glm::vec3 pval = gBuffer[index].pos; + + for (int i = -2; i <= 2; i++) + { + for (int j = -2; j <= 2; j++) + { + int u = x + currStepWidth * i; + int v = y + currStepWidth * j; + if (u < cam.resolution.x && v < cam.resolution.y && u >= 0 && v >= 0) + { + int currIndex = u + (v * cam.resolution.x); + + // color + glm::vec3 ctmp = image[currIndex]; + glm::vec3 t = cval - ctmp; + float dist2 = glm::dot(t, t); + float c_w = glm::min(glm::exp(-(dist2) / colorWeight), 1.f); + // normal + glm::vec3 ntmp = gBuffer[currIndex].normal; + t = nval - ntmp; + dist2 = glm::max(glm::dot(t, t) / (currStepWidth * currStepWidth), 0.f); + float n_w = glm::min(glm::exp(-(dist2) / normalWeight), 1.f); + // position + glm::vec3 ptmp = gBuffer[currIndex].pos; + t = pval - ptmp; + dist2 = glm::dot(t, t); + float p_w = glm::min(glm::exp(-(dist2) / positionWeight), 1.f); + + float weight = c_w * n_w * p_w; + int hIndex = i + 2 + (j + 2) * 5; + sum += ctmp * weight * h[hIndex]; + cum_w += weight * h[hIndex]; + } + } + } + denoisedImage[index] = sum / cum_w; + } +} + +void denoisePathTracedImage() +{ + const Camera& cam = hst_scene->state.camera; + const int pixelcount = cam.resolution.x * cam.resolution.y; + const dim3 blockSize2d(8, 8); + const dim3 blocksPerGrid2d( + (cam.resolution.x + blockSize2d.x - 1) / blockSize2d.x, + (cam.resolution.y + blockSize2d.y - 1) / blockSize2d.y); + + int stepWidth = 1; + int colorWeight = ui_colorWeight; + for (int i = 0; i < ui_iterations; i++) { + performOneStepATrousFilter << > > ( + cam, + colorWeight, + ui_normalWeight, + ui_positionWeight, + stepWidth, + dev_gBuffer, + dev_image, + dev_denoisedImage); + stepWidth *= 2; + colorWeight *= 0.5f; + std::swap(dev_denoisedImage, dev_image); + } + + cudaMemcpy(hst_scene->state.image.data(), dev_image, + pixelcount * sizeof(glm::vec3), cudaMemcpyDeviceToHost); +} + +__global__ void shadeMaterial( + int iter + , int num_paths + , ShadeableIntersection* shadeableIntersections + , PathSegment* pathSegments + , Material* materials +) +{ + int idx = blockIdx.x * blockDim.x + threadIdx.x; + if (idx < num_paths) + { + ShadeableIntersection intersection = shadeableIntersections[idx]; + if (intersection.t > 0.0f) { // if the intersection exists... + // Set up the RNG + // LOOK: this is how you use thrust's RNG! Please look at + // makeSeededRandomEngine as well. + thrust::default_random_engine rng = makeSeededRandomEngine(iter, idx, 0); + thrust::uniform_real_distribution u01(0, 1); + + Material material = materials[intersection.materialId]; + glm::vec3 materialColor = material.color; + + // If the material indicates that the object was a light, "light" the ray + if (material.emittance > 0.0f) { + pathSegments[idx].color *= (materialColor * material.emittance); + pathSegments[idx].remainingBounces = 0; + } + else { + scatterRay(pathSegments[idx], + intersection.intersectionPoint, + intersection.surfaceNormal, + intersection.outside, + material, + rng, + intersection); + } + // If there was no intersection, color the ray black. + // Lots of renderers use 4 channel color, RGBA, where A = alpha, often + // used for opacity, in which case they can indicate "no opacity". + // This can be useful for post-processing and image compositing. + } + else { + glm::vec3 unitDirection = glm::normalize(pathSegments[idx].ray.direction); + float t = 0.5f * (unitDirection[1] + 1.0f); + pathSegments[idx].color *= (1.0f - t) * glm::vec3(1.0f, 1.0f, 1.0f) + t * glm::vec3(0.5f, 0.7f, 1.0f); + pathSegments[idx].remainingBounces = 0; + } + } +} + /** * Wrapper for the __global__ call that sets up the kernel calls and does a ton * of memory management */ void pathtrace(int frame, int iter) { const int traceDepth = hst_scene->state.traceDepth; - const Camera &cam = hst_scene->state.camera; + const Camera& cam = hst_scene->state.camera; const int pixelcount = cam.resolution.x * cam.resolution.y; - // 2D block for generating ray from camera + // 2D block for generating ray from camera const dim3 blockSize2d(8, 8); const dim3 blocksPerGrid2d( - (cam.resolution.x + blockSize2d.x - 1) / blockSize2d.x, - (cam.resolution.y + blockSize2d.y - 1) / blockSize2d.y); - - // 1D block for path tracing - const int blockSize1d = 128; - - /////////////////////////////////////////////////////////////////////////// - - // Pathtracing Recap: - // * Initialize array of path rays (using rays that come out of the camera) - // * You can pass the Camera object to that kernel. - // * Each path ray must carry at minimum a (ray, color) pair, - // * where color starts as the multiplicative identity, white = (1, 1, 1). - // * This has already been done for you. - // * NEW: For the first depth, generate geometry buffers (gbuffers) - // * For each depth: - // * Compute an intersection in the scene for each path ray. - // A very naive version of this has been implemented for you, but feel - // free to add more primitives and/or a better algorithm. - // Currently, intersection distance is recorded as a parametric distance, - // t, or a "distance along the ray." t = -1.0 indicates no intersection. - // * Color is attenuated (multiplied) by reflections off of any object - // * Stream compact away all of the terminated paths. - // You may use either your implementation or `thrust::remove_if` or its - // cousins. - // * Note that you can't really use a 2D kernel launch any more - switch - // to 1D. - // * Shade the rays that intersected something or didn't bottom out. - // That is, color the ray by performing a color computation according - // to the shader, then generate a new ray to continue the ray path. - // We recommend just updating the ray's PathSegment in place. - // Note that this step may come before or after stream compaction, - // since some shaders you write may also cause a path to terminate. - // * Finally: - // * if not denoising, add this iteration's results to the image - // * TODO: if denoising, run kernels that take both the raw pathtraced result and the gbuffer, and put the result in the "pbo" from opengl - - generateRayFromCamera <<>>(cam, iter, traceDepth, dev_paths); - checkCUDAError("generate camera ray"); - - int depth = 0; - PathSegment* dev_path_end = dev_paths + pixelcount; - int num_paths = dev_path_end - dev_paths; - - // --- PathSegment Tracing Stage --- - // Shoot ray into scene, bounce between objects, push shading chunks - - // Empty gbuffer - cudaMemset(dev_gBuffer, 0, pixelcount * sizeof(GBufferPixel)); - - // clean shading chunks - cudaMemset(dev_intersections, 0, pixelcount * sizeof(ShadeableIntersection)); - - bool iterationComplete = false; - while (!iterationComplete) { - - // tracing - dim3 numblocksPathSegmentTracing = (num_paths + blockSize1d - 1) / blockSize1d; - computeIntersections <<>> ( - depth - , num_paths - , dev_paths - , dev_geoms - , hst_scene->geoms.size() - , dev_intersections - ); - checkCUDAError("trace one bounce"); - cudaDeviceSynchronize(); - - if (depth == 0) { - generateGBuffer<<>>(num_paths, dev_intersections, dev_paths, dev_gBuffer); - } - - depth++; - - shadeSimpleMaterials<<>> ( - iter, - num_paths, - dev_intersections, - dev_paths, - dev_materials - ); - iterationComplete = depth == traceDepth; - } - - // Assemble this iteration and apply it to the image - dim3 numBlocksPixels = (pixelcount + blockSize1d - 1) / blockSize1d; - finalGather<<>>(num_paths, dev_image, dev_paths); + (cam.resolution.x + blockSize2d.x - 1) / blockSize2d.x, + (cam.resolution.y + blockSize2d.y - 1) / blockSize2d.y); + + // 1D block for path tracing + const int blockSize1d = 128; + + generateRayFromCamera << > > (cam, iter, traceDepth, dev_paths); + checkCUDAError("generate camera ray"); + + int depth = 0; + PathSegment* dev_path_end = dev_paths + pixelcount; + int num_paths = dev_path_end - dev_paths; + + bool iterationComplete = false; + while (!iterationComplete) { + cudaMemset(dev_intersections, 0, pixelcount * sizeof(ShadeableIntersection)); + + dim3 numblocksPathSegmentTracing = (num_paths + blockSize1d - 1) / blockSize1d; + computeIntersections << > > ( + depth + , num_paths + , dev_paths + , dev_geoms + , hst_scene->geoms.size() + , dev_intersections + ); + checkCUDAError("trace one bounce"); + cudaDeviceSynchronize(); + depth++; + + shadeMaterial << > > ( + iter, + num_paths, + dev_intersections, + dev_paths, + dev_materials + ); + + // Stream compact away all of the terminated paths. + PathSegment* newEnd = thrust::stable_partition(thrust::device, dev_paths, dev_paths + num_paths, isTerminated()); + num_paths = newEnd - dev_paths; + iterationComplete = num_paths <= 0; + } - /////////////////////////////////////////////////////////////////////////// + // Assemble this iteration and apply it to the image + dim3 numBlocksPixels = (pixelcount + blockSize1d - 1) / blockSize1d; + finalGather << > > (pixelcount, dev_image, dev_paths); - // CHECKITOUT: use dev_image as reference if you want to implement saving denoised images. - // Otherwise, screenshots are also acceptable. // Retrieve image from GPU cudaMemcpy(hst_scene->state.image.data(), dev_image, - pixelcount * sizeof(glm::vec3), cudaMemcpyDeviceToHost); + pixelcount * sizeof(glm::vec3), cudaMemcpyDeviceToHost); checkCUDAError("pathtrace"); } diff --git a/src/pathtrace.h b/src/pathtrace.h index 9e12f44..54176c7 100644 --- a/src/pathtrace.h +++ b/src/pathtrace.h @@ -8,3 +8,4 @@ void pathtraceFree(); void pathtrace(int frame, int iteration); void showGBuffer(uchar4 *pbo); void showImage(uchar4 *pbo, int iter); +void denoisePathTracedImage(); \ No newline at end of file diff --git a/src/preview.cpp b/src/preview.cpp index 3ca2718..c01a9e7 100644 --- a/src/preview.cpp +++ b/src/preview.cpp @@ -215,7 +215,7 @@ void drawGui(int windowWidth, int windowHeight) { ImGui::Checkbox("Denoise", &ui_denoise); ImGui::SliderInt("Filter Size", &ui_filterSize, 0, 100); - ImGui::SliderFloat("Color Weight", &ui_colorWeight, 0.0f, 10.0f); + ImGui::SliderFloat("Color Weight", &ui_colorWeight, 0.0f, 100.0f); ImGui::SliderFloat("Normal Weight", &ui_normalWeight, 0.0f, 10.0f); ImGui::SliderFloat("Position Weight", &ui_positionWeight, 0.0f, 10.0f); diff --git a/src/scene.cpp b/src/scene.cpp index cbae043..a517178 100644 --- a/src/scene.cpp +++ b/src/scene.cpp @@ -3,6 +3,9 @@ #include #include #include +#include + +#define LOAD_GEOM_AND_MAT_FROM_FILE 0 Scene::Scene(string filename) { cout << "Reading scene from " << filename << " ..." << endl; @@ -19,25 +22,217 @@ Scene::Scene(string filename) { if (!line.empty()) { vector tokens = utilityCore::tokenizeString(line); if (strcmp(tokens[0].c_str(), "MATERIAL") == 0) { - loadMaterial(tokens[1]); + loadMaterialFromFile(tokens[1]); cout << " " << endl; - } else if (strcmp(tokens[0].c_str(), "OBJECT") == 0) { - loadGeom(tokens[1]); + } + else if (strcmp(tokens[0].c_str(), "OBJECT") == 0) { + loadGeomFromFile(tokens[1]); cout << " " << endl; - } else if (strcmp(tokens[0].c_str(), "CAMERA") == 0) { + } + else if (strcmp(tokens[0].c_str(), "CAMERA") == 0) { loadCamera(); cout << " " << endl; } } } +#if !LOAD_GEOM_AND_MAT_FROM_FILE + loadGeoAndMat(); +#endif +} + +inline static float randomFloat() { + static std::uniform_real_distribution distribution(0.0f, 1.0f); + static std::mt19937 generator; + return distribution(generator); +} + +// Returns a random real in [min,max). +inline static float randomFloat(float min, float max) { + return min + (max - min) * randomFloat(); } -int Scene::loadGeom(string objectid) { +// return a vector where each component is a random number from [0,1) +inline static glm::vec3 randomVec3() +{ + return glm::vec3(randomFloat(), randomFloat(), randomFloat()); +} + +// return a vector where each component is a random number from [min,max) +inline static glm::vec3 randomVec3(float min, float max) +{ + return glm::vec3(randomFloat(min, max), randomFloat(min, max), randomFloat(min, max)); +} + +void Scene::loadGeoAndMat() +{ + int materialCounter = 0; + int geoCounter = 0; + + Material groundMaterial; + groundMaterial.color = glm::vec3(0.5f); + groundMaterial.specular.exponent = 0; + groundMaterial.specular.color = glm::vec3(0.f); + groundMaterial.hasReflective = 0; + groundMaterial.hasRefractive = 0; + groundMaterial.indexOfRefraction = 0; + groundMaterial.emittance = 0; + groundMaterial.fuzziness = 0; + materials.push_back(groundMaterial); + + Geom groundSphere; + groundSphere.type = SPHERE; + groundSphere.materialid = materialCounter++; + groundSphere.translation = glm::vec3(0.f, -1000.f, 0.f); + groundSphere.rotation = glm::vec3(0.f); + groundSphere.scale = glm::vec3(2000.f); + groundSphere.transform = utilityCore::buildTransformationMatrix( + groundSphere.translation, groundSphere.rotation, groundSphere.scale); + groundSphere.inverseTransform = glm::inverse(groundSphere.transform); + groundSphere.invTranspose = glm::inverseTranspose(groundSphere.transform); + geoms.push_back(groundSphere); + + for (int a = -11; a < 11; a++) { + for (int b = -11; b < 11; b++) { + auto chooseMat = randomFloat(); + glm::vec3 center(a + 0.9f * randomFloat(), 0.2f, b + 0.9f * randomFloat()); + + if (glm::length(center - glm::vec3(4.0f, 0.2f, 0.0f)) > 0.9f) + { + Material sphereMaterial; + + if (chooseMat < 0.8f) { + // diffuse + auto albedo = randomVec3() * randomVec3(); + sphereMaterial.color = albedo; + sphereMaterial.specular.exponent = 0; + sphereMaterial.specular.color = glm::vec3(0.f); + sphereMaterial.hasReflective = 0; + sphereMaterial.hasRefractive = 0; + sphereMaterial.indexOfRefraction = 0; + sphereMaterial.emittance = 0; + sphereMaterial.fuzziness = 0; + } + else if (chooseMat < 0.95f) { + // metal + auto albedo = randomVec3(0.5f, 1.0f); + auto fuzz = randomFloat(0.0f, 0.5f); + sphereMaterial.color = albedo; + sphereMaterial.specular.exponent = 0; + sphereMaterial.specular.color = albedo; + sphereMaterial.hasReflective = 1; + sphereMaterial.hasRefractive = 0; + sphereMaterial.indexOfRefraction = 0; + sphereMaterial.emittance = 0; + sphereMaterial.fuzziness = fuzz; + } + else { + // glass + sphereMaterial.color = glm::vec3(1.0f); + sphereMaterial.specular.exponent = 0; + sphereMaterial.specular.color = glm::vec3(0.f); + sphereMaterial.hasReflective = 0; + sphereMaterial.hasRefractive = 1; + sphereMaterial.indexOfRefraction = 1.5; + sphereMaterial.emittance = 0; + sphereMaterial.fuzziness = 0; + } + + materials.push_back(sphereMaterial); + + Geom s1; + s1.type = SPHERE; + s1.materialid = materialCounter++; + s1.translation = center; + s1.rotation = glm::vec3(0.f); + s1.scale = glm::vec3(.4f); + s1.transform = utilityCore::buildTransformationMatrix( + s1.translation, s1.rotation, s1.scale); + s1.inverseTransform = glm::inverse(s1.transform); + s1.invTranspose = glm::inverseTranspose(s1.transform); + geoms.push_back(s1); + } + } + } + + + Material dielectric; + dielectric.color = glm::vec3(1.0f); + dielectric.specular.exponent = 0; + dielectric.specular.color = glm::vec3(0.f); + dielectric.hasReflective = 0; + dielectric.hasRefractive = 1; + dielectric.indexOfRefraction = 1.5; + dielectric.emittance = 0; + dielectric.fuzziness = 0; + materials.push_back(dielectric); + + Geom s1; + s1.type = SPHERE; + s1.materialid = materialCounter++; + s1.translation = glm::vec3(0.f, 1.f, 0.f); + s1.rotation = glm::vec3(0.f); + s1.scale = glm::vec3(2.f); + s1.transform = utilityCore::buildTransformationMatrix( + s1.translation, s1.rotation, s1.scale); + s1.inverseTransform = glm::inverse(s1.transform); + s1.invTranspose = glm::inverseTranspose(s1.transform); + geoms.push_back(s1); + + Material lambertian; + lambertian.color = glm::vec3(.4f, .2f, .1f); + lambertian.specular.exponent = 0; + lambertian.specular.color = glm::vec3(0.f); + lambertian.hasReflective = 0; + lambertian.hasRefractive = 0; + lambertian.indexOfRefraction = 0; + lambertian.emittance = 0; + lambertian.fuzziness = 0; + materials.push_back(lambertian); + + Geom s2; + s2.type = SPHERE; + s2.materialid = materialCounter++; + s2.translation = glm::vec3(-4.f, 1.f, 0.f); + s2.rotation = glm::vec3(0.f); + s2.scale = glm::vec3(2.f); + s2.transform = utilityCore::buildTransformationMatrix( + s2.translation, s2.rotation, s2.scale); + s2.inverseTransform = glm::inverse(s2.transform); + s2.invTranspose = glm::inverseTranspose(s2.transform); + geoms.push_back(s2); + + Material metal; + metal.color = glm::vec3(.7f, .6f, .5f); + metal.specular.exponent = 0; + metal.specular.color = glm::vec3(.7f, .6f, .5f); + metal.hasReflective = 1; + metal.hasRefractive = 0; + metal.indexOfRefraction = 0; + metal.emittance = 0; + metal.fuzziness = 0; + materials.push_back(metal); + + Geom s3; + s3.type = SPHERE; + s3.materialid = materialCounter++; + s3.translation = glm::vec3(4.f, 1.f, 0.f); + s3.rotation = glm::vec3(0.f); + s3.scale = glm::vec3(2.f); + s3.transform = utilityCore::buildTransformationMatrix( + s3.translation, s3.rotation, s3.scale); + s3.inverseTransform = glm::inverse(s3.transform); + s3.invTranspose = glm::inverseTranspose(s3.transform); + geoms.push_back(s3); +} + + +int Scene::loadGeomFromFile(string objectid) { int id = atoi(objectid.c_str()); if (id != geoms.size()) { cout << "ERROR: OBJECT ID does not match expected number of geoms" << endl; return -1; - } else { + } + else { cout << "Loading Geom " << id << "..." << endl; Geom newGeom; string line; @@ -48,7 +243,8 @@ int Scene::loadGeom(string objectid) { if (strcmp(line.c_str(), "sphere") == 0) { cout << "Creating new sphere..." << endl; newGeom.type = SPHERE; - } else if (strcmp(line.c_str(), "cube") == 0) { + } + else if (strcmp(line.c_str(), "cube") == 0) { cout << "Creating new cube..." << endl; newGeom.type = CUBE; } @@ -70,17 +266,18 @@ int Scene::loadGeom(string objectid) { //load tranformations if (strcmp(tokens[0].c_str(), "TRANS") == 0) { newGeom.translation = glm::vec3(atof(tokens[1].c_str()), atof(tokens[2].c_str()), atof(tokens[3].c_str())); - } else if (strcmp(tokens[0].c_str(), "ROTAT") == 0) { + } + else if (strcmp(tokens[0].c_str(), "ROTAT") == 0) { newGeom.rotation = glm::vec3(atof(tokens[1].c_str()), atof(tokens[2].c_str()), atof(tokens[3].c_str())); - } else if (strcmp(tokens[0].c_str(), "SCALE") == 0) { + } + else if (strcmp(tokens[0].c_str(), "SCALE") == 0) { newGeom.scale = glm::vec3(atof(tokens[1].c_str()), atof(tokens[2].c_str()), atof(tokens[3].c_str())); } - utilityCore::safeGetline(fp_in, line); } newGeom.transform = utilityCore::buildTransformationMatrix( - newGeom.translation, newGeom.rotation, newGeom.scale); + newGeom.translation, newGeom.rotation, newGeom.scale); newGeom.inverseTransform = glm::inverse(newGeom.transform); newGeom.invTranspose = glm::inverseTranspose(newGeom.transform); @@ -91,8 +288,8 @@ int Scene::loadGeom(string objectid) { int Scene::loadCamera() { cout << "Loading Camera ..." << endl; - RenderState &state = this->state; - Camera &camera = state.camera; + RenderState& state = this->state; + Camera& camera = state.camera; float fovy; //load static properties @@ -103,13 +300,17 @@ int Scene::loadCamera() { if (strcmp(tokens[0].c_str(), "RES") == 0) { camera.resolution.x = atoi(tokens[1].c_str()); camera.resolution.y = atoi(tokens[2].c_str()); - } else if (strcmp(tokens[0].c_str(), "FOVY") == 0) { + } + else if (strcmp(tokens[0].c_str(), "FOVY") == 0) { fovy = atof(tokens[1].c_str()); - } else if (strcmp(tokens[0].c_str(), "ITERATIONS") == 0) { + } + else if (strcmp(tokens[0].c_str(), "ITERATIONS") == 0) { state.iterations = atoi(tokens[1].c_str()); - } else if (strcmp(tokens[0].c_str(), "DEPTH") == 0) { + } + else if (strcmp(tokens[0].c_str(), "DEPTH") == 0) { state.traceDepth = atoi(tokens[1].c_str()); - } else if (strcmp(tokens[0].c_str(), "FILE") == 0) { + } + else if (strcmp(tokens[0].c_str(), "FILE") == 0) { state.imageName = tokens[1]; } } @@ -120,9 +321,11 @@ int Scene::loadCamera() { vector tokens = utilityCore::tokenizeString(line); if (strcmp(tokens[0].c_str(), "EYE") == 0) { camera.position = glm::vec3(atof(tokens[1].c_str()), atof(tokens[2].c_str()), atof(tokens[3].c_str())); - } else if (strcmp(tokens[0].c_str(), "LOOKAT") == 0) { + } + else if (strcmp(tokens[0].c_str(), "LOOKAT") == 0) { camera.lookAt = glm::vec3(atof(tokens[1].c_str()), atof(tokens[2].c_str()), atof(tokens[3].c_str())); - } else if (strcmp(tokens[0].c_str(), "UP") == 0) { + } + else if (strcmp(tokens[0].c_str(), "UP") == 0) { camera.up = glm::vec3(atof(tokens[1].c_str()), atof(tokens[2].c_str()), atof(tokens[3].c_str())); } @@ -135,9 +338,9 @@ int Scene::loadCamera() { float fovx = (atan(xscaled) * 180) / PI; camera.fov = glm::vec2(fovx, fovy); - camera.right = glm::normalize(glm::cross(camera.view, camera.up)); - camera.pixelLength = glm::vec2(2 * xscaled / (float)camera.resolution.x - , 2 * yscaled / (float)camera.resolution.y); + camera.right = glm::normalize(glm::cross(camera.view, camera.up)); + camera.pixelLength = glm::vec2(2 * xscaled / (float)camera.resolution.x, + 2 * yscaled / (float)camera.resolution.y); camera.view = glm::normalize(camera.lookAt - camera.position); @@ -150,37 +353,52 @@ int Scene::loadCamera() { return 1; } -int Scene::loadMaterial(string materialid) { +int Scene::loadMaterialFromFile(string materialid) { int id = atoi(materialid.c_str()); if (id != materials.size()) { cout << "ERROR: MATERIAL ID does not match expected number of materials" << endl; return -1; - } else { + } + else { cout << "Loading Material " << id << "..." << endl; Material newMaterial; + // Adding a new material property needs two modifications: + // 1) i < (value + 1) + // 2) else if statement + //load static properties - for (int i = 0; i < 7; i++) { + for (int i = 0; i < 8; i++) { string line; utilityCore::safeGetline(fp_in, line); vector tokens = utilityCore::tokenizeString(line); if (strcmp(tokens[0].c_str(), "RGB") == 0) { - glm::vec3 color( atof(tokens[1].c_str()), atof(tokens[2].c_str()), atof(tokens[3].c_str()) ); + glm::vec3 color(atof(tokens[1].c_str()), atof(tokens[2].c_str()), atof(tokens[3].c_str())); newMaterial.color = color; - } else if (strcmp(tokens[0].c_str(), "SPECEX") == 0) { + } + else if (strcmp(tokens[0].c_str(), "SPECEX") == 0) { newMaterial.specular.exponent = atof(tokens[1].c_str()); - } else if (strcmp(tokens[0].c_str(), "SPECRGB") == 0) { + } + else if (strcmp(tokens[0].c_str(), "SPECRGB") == 0) { glm::vec3 specColor(atof(tokens[1].c_str()), atof(tokens[2].c_str()), atof(tokens[3].c_str())); newMaterial.specular.color = specColor; - } else if (strcmp(tokens[0].c_str(), "REFL") == 0) { + } + else if (strcmp(tokens[0].c_str(), "REFL") == 0) { newMaterial.hasReflective = atof(tokens[1].c_str()); - } else if (strcmp(tokens[0].c_str(), "REFR") == 0) { + } + else if (strcmp(tokens[0].c_str(), "REFR") == 0) { newMaterial.hasRefractive = atof(tokens[1].c_str()); - } else if (strcmp(tokens[0].c_str(), "REFRIOR") == 0) { + } + else if (strcmp(tokens[0].c_str(), "REFRIOR") == 0) { newMaterial.indexOfRefraction = atof(tokens[1].c_str()); - } else if (strcmp(tokens[0].c_str(), "EMITTANCE") == 0) { + } + else if (strcmp(tokens[0].c_str(), "EMITTANCE") == 0) { newMaterial.emittance = atof(tokens[1].c_str()); } + // Added fuzziness + else if (strcmp(tokens[0].c_str(), "FUZZ") == 0) { + newMaterial.fuzziness = atof(tokens[1].c_str()); + } } materials.push_back(newMaterial); return 1; diff --git a/src/scene.h b/src/scene.h index f29a917..6a2b7ac 100644 --- a/src/scene.h +++ b/src/scene.h @@ -13,9 +13,10 @@ using namespace std; class Scene { private: ifstream fp_in; - int loadMaterial(string materialid); - int loadGeom(string objectid); + int loadMaterialFromFile(string materialid); + int loadGeomFromFile(string objectid); int loadCamera(); + void loadGeoAndMat(); public: Scene(string filename); ~Scene(); diff --git a/src/sceneStructs.h b/src/sceneStructs.h index da7e558..b087175 100644 --- a/src/sceneStructs.h +++ b/src/sceneStructs.h @@ -38,6 +38,7 @@ struct Material { float hasRefractive; float indexOfRefraction; float emittance; + float fuzziness; }; struct Camera { @@ -70,13 +71,27 @@ struct PathSegment { // 1) color contribution computation // 2) BSDF evaluation: generate a new ray struct ShadeableIntersection { - float t; - glm::vec3 surfaceNormal; - int materialId; + float t; + glm::vec3 surfaceNormal; + int materialId; + glm::vec3 intersectionPoint; + bool outside; }; // CHECKITOUT - a simple struct for storing scene geometry information per-pixel. // What information might be helpful for guiding a denoising filter? struct GBufferPixel { - float t; + float t; + glm::vec3 normal; + glm::vec3 pos; }; + +struct isTerminated +{ + __host__ __device__ + bool operator()(const PathSegment& p) + { + return p.remainingBounces > 0; + } +}; +