diff --git a/.gitignore b/.gitignore index 89942d9e..46e79284 100644 --- a/.gitignore +++ b/.gitignore @@ -23,7 +23,8 @@ build .LSOverride # Icon must end with two \r -Icon +Icon + # Thumbnails ._* @@ -558,3 +559,6 @@ xcuserdata *.xccheckout *.moved-aside *.xcuserstate + + +*.m \ No newline at end of file diff --git a/CMakeLists.txt b/CMakeLists.txt index 0e535f5d..6dac1c39 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -72,6 +72,7 @@ set(sources src/intersections.cu src/interactions.cu src/scene.cpp + src/sceneStructs.cpp src/preview.cpp src/utilities.cpp ) diff --git a/README.md b/README.md index 110697ce..c91ea7b9 100644 --- a/README.md +++ b/README.md @@ -3,11 +3,188 @@ CUDA Path Tracer **University of Pennsylvania, CIS 565: GPU Programming and Architecture, Project 3** -* (TODO) YOUR NAME HERE -* Tested on: (TODO) Windows 22, i7-2222 @ 2.22GHz 22GB, GTX 222 222MB (Moore 2222 Lab) +* Mufeng Xu + * [LinkedIn](https://www.linkedin.com/in/mufeng-xu/) +* Tested on: Windows 11, i9-13900H @ 2.6GHz 32GB, RTX 4080 Laptop 12282MB (Personal Computer) -### (TODO: Your README) +## Features -*DO NOT* leave the README to the last minute! It is a crucial part of the -project, and we will not be able to grade you without a good README. +- Core + - Diffuse + - Perfectly Specular-Reflective Surfaces + - Imperfect Specular + - Stream Compaction + - Sort Path Segments by Materials + - Stochastic Sampled Anti-aliasing +- Extra + - Refraction 2️⃣ + - Dispersion 3️⃣ + - Physics-based DoF 2️⃣ + - Motion blur 3️⃣ + - Re-startable path tracing 5️⃣ +### Refraction + +The implementation of refraction uses [Schlick's Approximation](https://en.wikipedia.org/wiki/Schlick'**s_approximation**) +to produce Fresnel Effect. + +| IoR = 1.2 | IoR = 1.52 | +|:------------------------------------:|:------------------------------------:| +| ![](img/cornell_refraction=1.2.png) | ![](img/cornell_refraction=1.52.png) | + +And imperfect refraction is also implemented with BRDF and BTDF: + +![](img/BSDF.png) + +The following demos are rendered with ***Roughness = 0.03***. +| IoR = 1.2 | IoR = 1.52 | +|:---------------------------------------------------:|:---------------------------------------------------:| +| ![](img/cornell_roughness=0.03_refrection=1.2.png) | ![](img/cornell_roughness=0.03_refrection=1.52.png) | + +**Performance**: Refraction adds new branches to the `scatterRay` kernel, +and more branching causes more waste of GPU clock cycles. +The measured performance impact is about 5%. + +**Compare w/. CPU**: N/A + +**Possible Improvement**: Sorting by materials might improve the performance if the geometry/scene is more complicated, +however this is not the case with my simple scenes. + +### Dispersion + +Dispersion happens because for the some material, +the index of refraction (IoR) is varies for light with different wavelengths (colors). +In my implementation, the program samples different colors (RGB) in different iterations, +and each component has a different IoR, creating a realistic dispersion effect. + +| Without Dispersion | With Dispersion | +|:---------------------------------------:|:------------------------------------:| +| ![](img/cornell_without_dispersion.png) | ![](img/cornell_with_dispersion.png) | + +**Performance**: While the dispersion simply separately samples different colors +at each iteration, it is almost "free". There is no observable negative impact on +the performance. + +**Compare w/. CPU**: N/A + +**Possible Improvement**: My implementation assumes the white color is composed of red, green and blue colors, +however the natural light is composed of the whole spectrum, including red, orange, yellow, green, cyan, blue and +purple etc. To create the dispersion effect like a rainbow, it probably requires to decompose the white light into +many different colors and ray trace them separately. + +### Depth of Field + +For an ideal pinhole camera, the aperture size is infinitesimally small, and the Depth of Field (DoF) is infinite. +To create the effect of Depth of Field, we just modify the ideal pinhole camera model, +to make the aperture size greater than 0. + +In the implementation, what we did is to modify the ray origin in `generateRayFromCamera` kernel, +the new origin is selected randomly within the size of the aperture. +And to update the ray direction, the `view` vector is computed by `glm::normalize(cam.lookAt - segment.ray.origin)` +instead of `normalize(cam.lookAt - cam.position)`. + +| Aperture | Focus $z=-3.0$ | Focus $z=0.0$ | Focus $z=+3.0$ | +|:--------:|:-------------------------------:|:------------------------------:|:-------------------------------:| +| **20** | ![](img/cornell_A20_L-3.0.png) | ![](img/cornell_A20_L0.0.png) | ![](img/cornell_A20_L+3.0.png) | +| **200** | ![](img/cornell_A200_L-3.0.png) | ![](img/cornell_A200_L0.0.png) | ![](img/cornell_A200_L+3.0.png) | + +From the demo we can conclude that larger the aperture, the more blurry will the objects not in focus would be. +This is exactly what the real-world physics tells us. Notice that when Aperture is 20, the DoF is large and the +whole scene seem to be in focus, as they look exactly like that without DoF effect. + +**Performance**: Physics-based DoF is achieved almost "free", +since it just randomly chooses an origin for rays. +There is no observable impact introduced by the feature. + +**Compare w/. CPU**: N/A + +### Motion Blur + +To implement motion blur, a `motion` array and a `exposure` float is added to the scene file. +The former indicates the velocity (magnitude and direction) of an object, +while the latter is the exposure time. +The renderer (uniformly) randomly samples the moving objects in the exposure interval, +and update the transform matrices at the beginning of every iteration. +And then the renderer uses the new transform matrices perform ray-tracing, +after statistically large number of iterations, you can observe the object "moving". + +![](img/cornell_motion_blur.png) + +**Performance**: Motion Blur is achieved almost "free", +because it computes the new transform matrices for each moving object only once per iteration. +There is no observable impact introduced by the feature. + +**Compare w/. CPU**: A kernel is invoked for each moving object, +the kernel generate a random number to sample within the time interval, +and then update the transform matrices for the object. +If the number of moving objects is small, the overhead of the GPU implementation might cause it +to be slower than a CPU implementation. However, if the number is large enough, GPU will beat CPU. + +**Possible Improvement**: If the whole scene is moving the same way, it is actually equal to the +camera moving. Similarly we can randomly place the camera at its moving path, +to create a motion blurred photo. + +### Re-startable Path Tracing + +All the path tracing state (except the scene) has been save in `renderState`. +To enable re-startable rendering, `serialize()` method is implemented for `class RenderState`, +which write the object into a binary file when dumping the rendering state. +To resume the rendering process, +the program loads the binary file, and use `deserialize()` to recover `renderState`. + +***Press `D` to save the current state into a binary file, +and pass `-r NAME.ITERATION.bin` parameter to the program to restart rendering.*** + +![](img/restart.gif)**** + +**Performance**: No impact on rendering performance. + +**Compare w/. CPU**: N/A + +### Stream Compaction + +![](img/sc.png) + +Stream Compaction terminates path segments early, +it closes the threads occupied by the "dead" rays, and accelerate ray tracing. +There are 2 conditions that the rays are terminated before the depths run out: +first is when a ray hits a light source, another is when a ray has no intersection +with any of the objects in the scene. + +The percentage of remaining path segments decreases with depth when stream compaction is applied. +At depth 0, stream compaction does not reduce the path count, +but as the depth increases, the reduction becomes more significant. +There is only about 25% of the rays remaining at the 8th depth. +And the FPS increase was significant. + +As a comparison, in a closed box scene the number of remaining rays doesn't significantly reduce. +Thus the boost in FPS is minor. The reason is that without the opening, a ray will only terminate +because it hits a light source, while the amount of rays hitting some light source is very small, +there would be a lot of photons still traveling around after a few tracing depths. +One possible workaround is to terminate a ray if the color intensity it records is lower than some +threshold, say 0.02, which means the contribution of the ray to the pixel is negligible. + +***Blooper time*** + +![](img/blooper.png) + +### Sorting by Materials + +Theoretically, sorting the path segments by materials would reduce branching +and boost the performance. However, in my simple scenes, the overhead of sorting +the path segments overmatches the cost of branching. The FPS dropped with the +implementation of sorting by materials. + +### Anti-aliasing + +***Notice the cyan sphere, with anti-aliasing, the edge appears to be much smoother.*** + +| Without Anti-aliasing | With Anti-aliasing | +|:---------------------------------:|:-----------------------------:| +| ![](img/cornell_noAA.png) | ![](img/cornell_AA.png) | + +**Performance**: Anti-aliasing with stochastic sampling is almost "free". +Just jitter the camera ray direction with a uniform random distribution within a pixel every iteration, +the anti-aliasing is automatically achieved. There is no observable impact on the performance. + +**Compare w/. CPU**: N/A diff --git a/img/BSDF.png b/img/BSDF.png new file mode 100644 index 00000000..326a01d9 Binary files /dev/null and b/img/BSDF.png differ diff --git a/img/blooper.png b/img/blooper.png new file mode 100644 index 00000000..3e3fb00c Binary files /dev/null and b/img/blooper.png differ diff --git a/img/cornell_A200_L+3.0.png b/img/cornell_A200_L+3.0.png new file mode 100644 index 00000000..eed9f9dd Binary files /dev/null and b/img/cornell_A200_L+3.0.png differ diff --git a/img/cornell_A200_L-3.0.png b/img/cornell_A200_L-3.0.png new file mode 100644 index 00000000..a97b475e Binary files /dev/null and b/img/cornell_A200_L-3.0.png differ diff --git a/img/cornell_A200_L0.0.png b/img/cornell_A200_L0.0.png new file mode 100644 index 00000000..94ef59af Binary files /dev/null and b/img/cornell_A200_L0.0.png differ diff --git a/img/cornell_A20_L+3.0.png b/img/cornell_A20_L+3.0.png new file mode 100644 index 00000000..0ce6bc50 Binary files /dev/null and b/img/cornell_A20_L+3.0.png differ diff --git a/img/cornell_A20_L-3.0.png b/img/cornell_A20_L-3.0.png new file mode 100644 index 00000000..9cb0a8eb Binary files /dev/null and b/img/cornell_A20_L-3.0.png differ diff --git a/img/cornell_A20_L0.0.png b/img/cornell_A20_L0.0.png new file mode 100644 index 00000000..b317c995 Binary files /dev/null and b/img/cornell_A20_L0.0.png differ diff --git a/img/cornell_AA.png b/img/cornell_AA.png new file mode 100644 index 00000000..f23e0809 Binary files /dev/null and b/img/cornell_AA.png differ diff --git a/img/cornell_motion_blur.png b/img/cornell_motion_blur.png new file mode 100644 index 00000000..73d6b0ce Binary files /dev/null and b/img/cornell_motion_blur.png differ diff --git a/img/cornell_noAA.png b/img/cornell_noAA.png new file mode 100644 index 00000000..24277f62 Binary files /dev/null and b/img/cornell_noAA.png differ diff --git a/img/cornell_refraction=1.2.png b/img/cornell_refraction=1.2.png new file mode 100644 index 00000000..951b453c Binary files /dev/null and b/img/cornell_refraction=1.2.png differ diff --git a/img/cornell_refraction=1.52.png b/img/cornell_refraction=1.52.png new file mode 100644 index 00000000..423e8373 Binary files /dev/null and b/img/cornell_refraction=1.52.png differ diff --git a/img/cornell_roughness=0.03_refrection=1.2.png b/img/cornell_roughness=0.03_refrection=1.2.png new file mode 100644 index 00000000..e5d1b5ff Binary files /dev/null and b/img/cornell_roughness=0.03_refrection=1.2.png differ diff --git a/img/cornell_roughness=0.03_refrection=1.52.png b/img/cornell_roughness=0.03_refrection=1.52.png new file mode 100644 index 00000000..af6004ca Binary files /dev/null and b/img/cornell_roughness=0.03_refrection=1.52.png differ diff --git a/img/cornell_with_dispersion.png b/img/cornell_with_dispersion.png new file mode 100644 index 00000000..65b2f73e Binary files /dev/null and b/img/cornell_with_dispersion.png differ diff --git a/img/cornell_without_dispersion.png b/img/cornell_without_dispersion.png new file mode 100644 index 00000000..d40dbda2 Binary files /dev/null and b/img/cornell_without_dispersion.png differ diff --git a/img/restart.gif b/img/restart.gif new file mode 100644 index 00000000..98e0344d Binary files /dev/null and b/img/restart.gif differ diff --git a/img/sc.png b/img/sc.png new file mode 100644 index 00000000..bfac4b07 Binary files /dev/null and b/img/sc.png differ diff --git a/scenes/cornell.json b/scenes/cornell.json index e7419885..05bc0f3c 100644 --- a/scenes/cornell.json +++ b/scenes/cornell.json @@ -22,12 +22,43 @@ "TYPE":"Diffuse", "RGB":[0.35, 0.85, 0.35] }, + "diffuse_cyan": + { + "TYPE":"Diffuse", + "RGB":[0.35, 0.85, 0.85] + }, "specular_white": { "TYPE":"Specular", "RGB":[0.98, 0.98, 0.98], + "ROUGHNESS":0.5 + }, + "metal_aluminum": + { + "TYPE":"Specular", + "RGB":[0.95, 0.95, 0.95], + "ROUGHNESS":0.5 + }, + "metal_gold": + { + "TYPE":"Specular", + "RGB":[0.98, 0.74, 0.02], + "ROUGHNESS":0.2 + }, + "glass_white": + { + "TYPE":"Transparent", + "RGB":[0.99, 0.99, 0.99], + "REFRACTION":1.52, + "ROUGHNESS":0.03 + }, + "glass_dispersion": + { + "TYPE":"Transparent", + "RGB":[0.99, 0.99, 0.99], + "REFRACTION":[1.7, 1.825, 1.88], "ROUGHNESS":0.0 - } + } }, "Camera": { @@ -38,16 +69,18 @@ "FILE":"cornell", "EYE":[0.0,5.0,10.5], "LOOKAT":[0.0,5.0,0.0], - "UP":[0.0,1.0,0.0] + "UP":[0.0,1.0,0.0], + "APERTURE":100.0, + "EXPOSURE":1 }, "Objects": [ { "TYPE":"cube", "MATERIAL":"light", - "TRANS":[0.0,10.0,0.0], + "TRANS":[0.0,10.05,0.0], "ROTAT":[0.0,0.0,0.0], - "SCALE":[3.0,0.3,3.0] + "SCALE":[3.0,0.15,3.0] }, { "TYPE":"cube", @@ -86,10 +119,31 @@ }, { "TYPE":"sphere", - "MATERIAL":"specular_white", + "MATERIAL":"diffuse_white", "TRANS":[-1.0,4.0,-1.0], "ROTAT":[0.0,0.0,0.0], "SCALE":[3.0,3.0,3.0] + }, + { + "TYPE":"sphere", + "MATERIAL":"glass_white", + "TRANS":[-1.2,3.0,3.0], + "ROTAT":[0.0,0.0,0.0], + "SCALE":[3.0,3.0,3.0] + }, + { + "TYPE":"sphere", + "MATERIAL":"metal_gold", + "TRANS":[3.0,2.0,0.0], + "ROTAT":[0.0,0.0,0.0], + "SCALE":[3.0,3.0,3.0] + }, + { + "TYPE":"sphere", + "MATERIAL":"diffuse_cyan", + "TRANS":[-0.1,2.6,0.6], + "ROTAT":[0.0,0.0,0.0], + "SCALE":[2.0,2.0,2.0] } ] } \ No newline at end of file diff --git a/scenes/cornell_dispersion.json b/scenes/cornell_dispersion.json new file mode 100644 index 00000000..3fca7f9a --- /dev/null +++ b/scenes/cornell_dispersion.json @@ -0,0 +1,160 @@ +{ + "Materials": + { + "light": + { + "TYPE":"Emitting", + "RGB":[1.0, 1.0, 1.0], + "EMITTANCE":9.0 + }, + "light_strong": + { + "TYPE":"Emitting", + "RGB":[1.0, 1.0, 1.0], + "EMITTANCE":20.0 + }, + "diffuse_white": + { + "TYPE":"Diffuse", + "RGB":[0.99, 0.99, 0.99] + }, + "diffuse_red": + { + "TYPE":"Diffuse", + "RGB":[0.85, 0.35, 0.35] + }, + "diffuse_green": + { + "TYPE":"Diffuse", + "RGB":[0.35, 0.85, 0.35] + }, + "diffuse_cyan": + { + "TYPE":"Diffuse", + "RGB":[0.35, 0.85, 0.85] + }, + "diffuse_yellow": + { + "TYPE":"Diffuse", + "RGB":[0.85, 0.85, 0.35] + }, + "specular_white": + { + "TYPE":"Specular", + "RGB":[0.98, 0.98, 0.98], + "ROUGHNESS":0.5 + }, + "metal_aluminum": + { + "TYPE":"Specular", + "RGB":[0.95, 0.95, 0.95], + "ROUGHNESS":0.5 + }, + "metal_gold": + { + "TYPE":"Specular", + "RGB":[0.98, 0.74, 0.02], + "ROUGHNESS":0.2 + }, + "glass_white": + { + "TYPE":"Transparent", + "RGB":[0.99, 0.99, 0.99], + "REFRACTION":1.8, + "ROUGHNESS":0.00011 + }, + "glass_dispersion": + { + "TYPE":"Transparent", + "RGB":[0.99, 0.99, 0.99], + "REFRACTION":[1.5, 1.825, 1.90], + "ROUGHNESS":0.00011 + } + }, + "Camera": + { + "RES":[800,800], + "FOVY":45.0, + "ITERATIONS":5000, + "DEPTH":8, + "FILE":"cornell", + "EYE":[0.0,5.0,10.5], + "LOOKAT":[0.0,5.0,0.0], + "UP":[0.0,1.0,0.0], + "APERTURE":100.0, + "EXPOSURE":1 + }, + "Objects": + [ + { + "TYPE":"cube", + "MATERIAL":"light", + "TRANS":[0.0,10.05,0.0], + "ROTAT":[0.0,0.0,0.0], + "SCALE":[3.0,0.15,3.0] + }, + { + "TYPE":"cube", + "MATERIAL":"diffuse_white", + "TRANS":[0.0,0.0,0.0], + "ROTAT":[0.0,0.0,0.0], + "SCALE":[10.0,0.01,10.0] + }, + { + "TYPE":"cube", + "MATERIAL":"diffuse_white", + "TRANS":[0.0,10.0,0.0], + "ROTAT":[0.0,0.0,90.0], + "SCALE":[0.01,10.0,10.0] + }, + { + "TYPE":"cube", + "MATERIAL":"diffuse_white", + "TRANS":[0.0,5.0,-5.0], + "ROTAT":[0.0,90.0,0.0], + "SCALE":[0.01,10.0,10.0] + }, + { + "TYPE":"cube", + "MATERIAL":"diffuse_white", + "TRANS":[-5.0,5.0,0.0], + "ROTAT":[0.0,0.0,0.0], + "SCALE":[0.01,10.0,10.0] + }, + { + "TYPE":"cube", + "MATERIAL":"diffuse_white", + "TRANS":[5.0,5.0,0.0], + "ROTAT":[0.0,0.0,0.0], + "SCALE":[0.01,10.0,10.0] + }, + { + "TYPE":"sphere", + "MATERIAL":"diffuse_green", + "TRANS":[-2.0,8.0,-3.0], + "ROTAT":[0.0,0.0,0.0], + "SCALE":[1.0,1.0,1.0] + }, + { + "TYPE":"sphere", + "MATERIAL":"diffuse_yellow", + "TRANS":[2.0,7.0,-3.0], + "ROTAT":[0.0,0.0,0.0], + "SCALE":[1.0,1.0,1.0] + }, + { + "TYPE":"sphere", + "MATERIAL":"diffuse_red", + "TRANS":[3.5,1.2,0.0], + "ROTAT":[0.0,0.0,0.0], + "SCALE":[1.0,1.0,1.0] + }, + { + "TYPE":"cube", + "MATERIAL":"glass_dispersion", + "TRANS":[0.0,4.0,0.0], + "ROTAT":[-45.0,30.0,0.0], + "SCALE":[3.0,3.0,3.0] + } + ] +} \ No newline at end of file diff --git a/scenes/cornell_dof.json b/scenes/cornell_dof.json new file mode 100644 index 00000000..83396db4 --- /dev/null +++ b/scenes/cornell_dof.json @@ -0,0 +1,143 @@ +{ + "Materials": + { + "light": + { + "TYPE":"Emitting", + "RGB":[1.0, 1.0, 1.0], + "EMITTANCE":5.0 + }, + "diffuse_white": + { + "TYPE":"Diffuse", + "RGB":[0.98, 0.98, 0.98] + }, + "diffuse_red": + { + "TYPE":"Diffuse", + "RGB":[0.85, 0.35, 0.35] + }, + "diffuse_green": + { + "TYPE":"Diffuse", + "RGB":[0.35, 0.85, 0.35] + }, + "diffuse_cyan": + { + "TYPE":"Diffuse", + "RGB":[0.35, 0.85, 0.85] + }, + "specular_white": + { + "TYPE":"Specular", + "RGB":[0.98, 0.98, 0.98], + "ROUGHNESS":0.5 + }, + "metal_aluminum": + { + "TYPE":"Specular", + "RGB":[0.95, 0.95, 0.95], + "ROUGHNESS":0.5 + }, + "metal_gold": + { + "TYPE":"Specular", + "RGB":[0.98, 0.74, 0.02], + "ROUGHNESS":0.2 + }, + "glass_white": + { + "TYPE":"Transparent", + "RGB":[0.99, 0.99, 0.99], + "REFRACTION":1.2, + "ROUGHNESS":0.0 + }, + "glass_dispersion": + { + "TYPE":"Transparent", + "RGB":[0.99, 0.99, 0.99], + "REFRACTION":[1.7, 1.825, 1.88], + "ROUGHNESS":0.0 + } + }, + "Camera": + { + "RES":[800,800], + "FOVY":45.0, + "ITERATIONS":5000, + "DEPTH":8, + "FILE":"cornell", + "EYE":[0.0,5.0,10.5], + "LOOKAT":[0.0,5.0,-3.0], + "UP":[0.0,1.0,0.0], + "APERTURE":20.0, + "EXPOSURE":1 + }, + "Objects": + [ + { + "TYPE":"cube", + "MATERIAL":"light", + "TRANS":[0.0,10.05,0.0], + "ROTAT":[0.0,0.0,0.0], + "SCALE":[3.0,0.15,3.0] + }, + { + "TYPE":"cube", + "MATERIAL":"diffuse_white", + "TRANS":[0.0,0.0,0.0], + "ROTAT":[0.0,0.0,0.0], + "SCALE":[10.0,0.01,10.0] + }, + { + "TYPE":"cube", + "MATERIAL":"diffuse_white", + "TRANS":[0.0,10.0,0.0], + "ROTAT":[0.0,0.0,90.0], + "SCALE":[0.01,10.0,10.0] + }, + { + "TYPE":"cube", + "MATERIAL":"diffuse_white", + "TRANS":[0.0,5.0,-5.0], + "ROTAT":[0.0,90.0,0.0], + "SCALE":[0.01,10.0,10.0] + }, + { + "TYPE":"cube", + "MATERIAL":"diffuse_red", + "TRANS":[-5.0,5.0,0.0], + "ROTAT":[0.0,0.0,0.0], + "SCALE":[0.01,10.0,10.0] + }, + { + "TYPE":"cube", + "MATERIAL":"diffuse_green", + "TRANS":[5.0,5.0,0.0], + "ROTAT":[0.0,0.0,0.0], + "SCALE":[0.01,10.0,10.0] + }, + { + "TYPE":"sphere", + "MATERIAL":"diffuse_white", + "TRANS":[0.0,4.0,-3.0], + "ROTAT":[0.0,0.0,0.0], + "SCALE":[3.0,3.0,3.0], + "MOTION":[1.5,0.5,0.4] + }, + { + "TYPE":"sphere", + "MATERIAL":"glass_white", + "TRANS":[-1.2,3.0,3.0], + "ROTAT":[0.0,0.0,0.0], + "SCALE":[3.0,3.0,3.0] + }, + { + "TYPE":"sphere", + "MATERIAL":"metal_gold", + "TRANS":[3.0,2.0,0.0], + "ROTAT":[0.0,0.0,0.0], + "SCALE":[3.0,3.0,3.0] + } + ] +} \ No newline at end of file diff --git a/src/interactions.cu b/src/interactions.cu index 1837e713..bbf3a296 100644 --- a/src/interactions.cu +++ b/src/interactions.cu @@ -41,13 +41,96 @@ __host__ __device__ glm::vec3 calculateRandomDirectionInHemisphere( } __host__ __device__ void scatterRay( - PathSegment & pathSegment, + PathSegment & pathSegment, glm::vec3 intersect, glm::vec3 normal, - const Material &m, + const float m_hasReflective, + const float m_hasRefractive, + const float m_indexOfRefraction, + const glm::vec3 m_color, + const float m_roughness, thrust::default_random_engine &rng) { - // TODO: implement this. - // A basic implementation of pure-diffuse shading will just call the - // calculateRandomDirectionInHemisphere defined above. + // Pre-fetch + const glm::vec3 direction = glm::normalize(pathSegment.ray.direction); + glm::vec3 norm = glm::normalize(normal); + + pathSegment.ray.origin = intersect; + const glm::vec3 delta = 0.001f * norm; + pathSegment.color *= m_color; + --pathSegment.remainingBounces; + + // Diffuse for any material + thrust::uniform_real_distribution u01(0, 1); + float r01; + + // Specular reflection + if (m_hasReflective > 0) + { + if (m_roughness <= EPSILON) + { + pathSegment.ray.direction = glm::reflect(direction, norm); + } + else + { + r01 = u01(rng); + glm::vec3 randomDirectionDelta{ glm::normalize(calculateRandomDirectionInHemisphere(norm, rng)) }; + pathSegment.ray.direction = (1 - m_roughness) * glm::normalize(glm::reflect(direction, norm)) + + m_roughness * randomDirectionDelta; + } + pathSegment.ray.origin += delta; + } + // Refractive + else if (m_hasRefractive > 0) + { + r01 = u01(rng); + + // Derive reflection coeff R_theta + const float cos_theta = -glm::dot(norm, direction); + constexpr float n_i = 1.0f; + const float n_o = m_indexOfRefraction; + const float R_0 = glm::pow((n_i - n_o) / (n_i + n_o), 2.0f); + const float R_theta = R_0 + (1.0f - R_0) * glm::pow(1.0f - cos_theta, 5.0f); + float dot_n_d = glm::dot(norm, direction); + + if (r01 > R_theta) + { + // Refract + float ratio; + if (dot_n_d > 0.f) // material -> air + { + norm = -norm; + ratio = m_indexOfRefraction; + } + else // air -> material + { + ratio = 1.0f / m_indexOfRefraction; + } + pathSegment.ray.direction = glm::refract(direction, norm, ratio); + + pathSegment.ray.origin -= delta; + } + else + { + // Reflect + pathSegment.ray.direction = glm::reflect(direction, norm); + pathSegment.ray.origin += delta; + } + + if (m_roughness > EPSILON) + { + if ((r01 > R_theta && dot_n_d < 0.f) || (r01 < R_theta && dot_n_d > 0.f)) // BRDF and BTDF + { + r01 = u01(rng); + glm::vec3 randomDirectionDelta{ glm::normalize(calculateRandomDirectionInHemisphere(norm, rng)) }; + pathSegment.ray.direction = (1 - m_roughness) * pathSegment.ray.direction + m_roughness * randomDirectionDelta; + } + } + } + // Diffuse material + else + { + pathSegment.ray.direction = calculateRandomDirectionInHemisphere(norm, rng); + pathSegment.ray.origin += delta; + } } diff --git a/src/interactions.h b/src/interactions.h index bfe4cbbb..477397e8 100644 --- a/src/interactions.h +++ b/src/interactions.h @@ -41,5 +41,9 @@ __host__ __device__ void scatterRay( PathSegment& pathSegment, glm::vec3 intersect, glm::vec3 normal, - const Material& m, + const float m_hasReflective, + const float m_hasRefractive, + const float m_indexOfRefraction, + const glm::vec3 m_color, + const float m_roughness, thrust::default_random_engine& rng); diff --git a/src/main.cpp b/src/main.cpp index 31bdaab4..194cba74 100644 --- a/src/main.cpp +++ b/src/main.cpp @@ -24,6 +24,8 @@ GuiDataContainer* guiData; RenderState* renderState; int iteration; +bool resume = false; + int width; int height; @@ -37,12 +39,20 @@ int main(int argc, char** argv) if (argc < 2) { - printf("Usage: %s SCENEFILE.json\n", argv[0]); + printf("Usage: %s SCENEFILE.json [-r DUMPFILE.bin]\n", argv[0]); return 1; } const char* sceneFile = argv[1]; + if (argc >= 4) + { + if (strcmp(argv[2], "-r") == 0) + { + resume = true; + } + } + // Load scene file scene = new Scene(sceneFile); @@ -52,6 +62,14 @@ int main(int argc, char** argv) // Set up camera stuff from loaded path tracer settings iteration = 0; renderState = &scene->state; + + if (resume) + { + string dumpFile(argv[3]); + loadState(dumpFile); + camchanged = false; + } + Camera& cam = renderState->camera; width = cam.resolution.x; height = cam.resolution.y; @@ -141,6 +159,14 @@ void runCuda() { pathtraceFree(); pathtraceInit(scene); + resume = false; + } + + if (resume) + { + pathtraceFree(); + pathtraceResume(scene); + resume = false; } if (iteration < renderState->iterations) @@ -165,6 +191,38 @@ void runCuda() } } +void dumpState() +{ + std::ostringstream ss; + ss << renderState->imageName << "." << iteration << ".bin"; + std::string filename = ss.str(); + + std::ofstream ofs(filename, std::ios::binary); + if (!ofs.is_open()) + { + throw std::runtime_error("Could not open file for writing."); + } + + ofs.write(reinterpret_cast(&iteration), sizeof(iteration)); + renderState->serialize(ofs); + + ofs.close(); +} + +void loadState(const string& filename) +{ + std::ifstream ifs(filename, std::ios::binary); + if (!ifs.is_open()) + { + throw std::runtime_error("Could not open load the state."); + } + + ifs.read(reinterpret_cast(&iteration), sizeof(iteration)); + renderState->deserialize(ifs); + + ifs.close(); +} + void keyCallback(GLFWwindow* window, int key, int scancode, int action, int mods) { if (action == GLFW_PRESS) @@ -175,6 +233,10 @@ void keyCallback(GLFWwindow* window, int key, int scancode, int action, int mods saveImage(); glfwSetWindowShouldClose(window, GL_TRUE); break; + case GLFW_KEY_D: + dumpState(); + glfwSetWindowShouldClose(window, GL_TRUE); + break; case GLFW_KEY_S: saveImage(); break; diff --git a/src/main.h b/src/main.h index fdb7d5d1..4864cbe9 100644 --- a/src/main.h +++ b/src/main.h @@ -33,6 +33,8 @@ extern int width; extern int height; void runCuda(); +void dumpState(); +void loadState(const string& filename); void keyCallback(GLFWwindow *window, int key, int scancode, int action, int mods); void mousePositionCallback(GLFWwindow* window, double xpos, double ypos); void mouseButtonCallback(GLFWwindow* window, int button, int action, int mods); diff --git a/src/pathtrace.cu b/src/pathtrace.cu index 709c231b..cda5fa30 100644 --- a/src/pathtrace.cu +++ b/src/pathtrace.cu @@ -6,6 +6,9 @@ #include #include #include +#include +#include +#include #include "sceneStructs.h" #include "scene.h" @@ -15,7 +18,13 @@ #include "intersections.h" #include "interactions.h" +#include +#include + #define ERRORCHECK 1 +#define SORT_BY_MATERIAL false +#define DOF false +#define AA true #define FILENAME (strrchr(__FILE__, '/') ? strrchr(__FILE__, '/') + 1 : __FILE__) #define checkCUDAError(msg) checkCUDAErrorFn(msg, FILENAME, __LINE__) @@ -81,7 +90,11 @@ static Material* dev_materials = NULL; static PathSegment* dev_paths = NULL; static ShadeableIntersection* dev_intersections = NULL; // TODO: static variables for device memory, any extra info you need, etc -// ... +static bool* dev_hasIntersection = NULL; +#if SORT_BY_MATERIAL +static unsigned char* dev_materialIds_isec = NULL; +static unsigned char* dev_materialIds_path = NULL; +#endif void InitDataContainer(GuiDataContainer* imGuiData) { @@ -109,11 +122,51 @@ void pathtraceInit(Scene* scene) cudaMalloc(&dev_intersections, pixelcount * sizeof(ShadeableIntersection)); cudaMemset(dev_intersections, 0, pixelcount * sizeof(ShadeableIntersection)); - // TODO: initialize any extra device memeory you need - + cudaMalloc(&dev_hasIntersection, pixelcount * sizeof(bool)); + cudaMemset(dev_hasIntersection, false, pixelcount * sizeof(bool)); +#if SORT_BY_MATERIAL + cudaMalloc(&dev_materialIds_isec, pixelcount * sizeof(unsigned char)); + cudaMemset(dev_materialIds_isec, -1, pixelcount * sizeof(unsigned char)); + cudaMalloc(&dev_materialIds_path, pixelcount * sizeof(unsigned char)); + cudaMemset(dev_materialIds_path, -1, pixelcount * sizeof(unsigned char)); +#endif checkCUDAError("pathtraceInit"); } +void pathtraceResume(Scene* scene) +{ + hst_scene = scene; + + const Camera& cam = hst_scene->state.camera; + const int pixelcount = cam.resolution.x * cam.resolution.y; + + cudaMalloc(&dev_image, pixelcount * sizeof(glm::vec3)); + // Load image + cudaMemcpy(dev_image, hst_scene->state.image.data(), + pixelcount * sizeof(glm::vec3), cudaMemcpyHostToDevice); + + cudaMalloc(&dev_paths, pixelcount * sizeof(PathSegment)); + + cudaMalloc(&dev_geoms, scene->geoms.size() * sizeof(Geom)); + cudaMemcpy(dev_geoms, scene->geoms.data(), scene->geoms.size() * sizeof(Geom), cudaMemcpyHostToDevice); + + cudaMalloc(&dev_materials, scene->materials.size() * sizeof(Material)); + cudaMemcpy(dev_materials, scene->materials.data(), scene->materials.size() * sizeof(Material), cudaMemcpyHostToDevice); + + cudaMalloc(&dev_intersections, pixelcount * sizeof(ShadeableIntersection)); + cudaMemset(dev_intersections, 0, pixelcount * sizeof(ShadeableIntersection)); + + cudaMalloc(&dev_hasIntersection, pixelcount * sizeof(bool)); + cudaMemset(dev_hasIntersection, false, pixelcount * sizeof(bool)); +#if SORT_BY_MATERIAL + cudaMalloc(&dev_materialIds_isec, pixelcount * sizeof(unsigned char)); + cudaMemset(dev_materialIds_isec, -1, pixelcount * sizeof(unsigned char)); + cudaMalloc(&dev_materialIds_path, pixelcount * sizeof(unsigned char)); + cudaMemset(dev_materialIds_path, -1, pixelcount * sizeof(unsigned char)); +#endif + checkCUDAError("pathtraceResume"); +} + void pathtraceFree() { cudaFree(dev_image); // no-op if dev_image is null @@ -122,7 +175,11 @@ void pathtraceFree() cudaFree(dev_materials); cudaFree(dev_intersections); // TODO: clean up any extra device memory you created - + cudaFree(dev_hasIntersection); +#if SORT_BY_MATERIAL + cudaFree(dev_materialIds_isec); + cudaFree(dev_materialIds_path); +#endif checkCUDAError("pathtraceFree"); } @@ -143,13 +200,40 @@ __global__ void generateRayFromCamera(Camera cam, int iter, int traceDepth, Path int index = x + (y * cam.resolution.x); PathSegment& segment = pathSegments[index]; +#if AA + thrust::default_random_engine rng = makeSeededRandomEngine(iter, index, traceDepth); + thrust::uniform_real_distribution u01(-0.5f, 0.5f); + float dx = u01(rng); + float dy = u01(rng); +#endif + +#if DOF + thrust::default_random_engine rng_normal = makeSeededRandomEngine(-iter, index, traceDepth); + thrust::normal_distribution n01(-0.5, 0.5); + float dx_origin = n01(rng_normal) * cam.aperture; + float dy_origin = n01(rng_normal) * cam.aperture; +#endif + +#if DOF + segment.ray.origin = cam.position + cam.right * cam.pixelLength.x * dx_origin + cam.up * cam.pixelLength.y * dy_origin; +#else segment.ray.origin = cam.position; +#endif segment.color = glm::vec3(1.0f, 1.0f, 1.0f); - // TODO: implement antialiasing by jittering the ray +#if DOF + // Use the lookAt point as the focal point + segment.ray.direction = glm::normalize(glm::normalize(cam.lookAt - segment.ray.origin) +#else segment.ray.direction = glm::normalize(cam.view +#endif +#if AA + - cam.right * cam.pixelLength.x * ((float)x - (float)cam.resolution.x * 0.5f + dx) + - cam.up * cam.pixelLength.y * ((float)y - (float)cam.resolution.y * 0.5f + dy) +#else - cam.right * cam.pixelLength.x * ((float)x - (float)cam.resolution.x * 0.5f) - cam.up * cam.pixelLength.y * ((float)y - (float)cam.resolution.y * 0.5f) +#endif ); segment.pixelIndex = index; @@ -167,7 +251,8 @@ __global__ void computeIntersections( PathSegment* pathSegments, Geom* geoms, int geoms_size, - ShadeableIntersection* intersections) + ShadeableIntersection* intersections, + bool* hasIntersection) { int path_index = blockIdx.x * blockDim.x + threadIdx.x; @@ -215,6 +300,7 @@ __global__ void computeIntersections( if (hit_geom_index == -1) { intersections[path_index].t = -1.0f; + hasIntersection[path_index] = false; } else { @@ -222,6 +308,7 @@ __global__ void computeIntersections( intersections[path_index].t = t_min; intersections[path_index].materialId = geoms[hit_geom_index].materialid; intersections[path_index].surfaceNormal = normal; + hasIntersection[path_index] = true; } } } @@ -245,38 +332,62 @@ __global__ void shadeFakeMaterial( int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < num_paths) { + if (pathSegments[idx].remainingBounces == 0) { return; } ShadeableIntersection intersection = shadeableIntersections[idx]; - if (intersection.t > 0.0f) // if the intersection exists... - { // Set up the RNG // LOOK: this is how you use thrust's RNG! Please look at // makeSeededRandomEngine as well. - thrust::default_random_engine rng = makeSeededRandomEngine(iter, idx, 0); - thrust::uniform_real_distribution u01(0, 1); + thrust::default_random_engine rng = makeSeededRandomEngine(iter, idx, 0); + thrust::uniform_real_distribution u01(0, 1); - Material material = materials[intersection.materialId]; - glm::vec3 materialColor = material.color; + Material material = materials[intersection.materialId]; + glm::vec3 materialColor = material.color; - // If the material indicates that the object was a light, "light" the ray - if (material.emittance > 0.0f) { - pathSegments[idx].color *= (materialColor * material.emittance); - } - // Otherwise, do some pseudo-lighting computation. This is actually more - // like what you would expect from shading in a rasterizer like OpenGL. - // TODO: replace this! you should be able to start with basically a one-liner - else { - float lightTerm = glm::dot(intersection.surfaceNormal, glm::vec3(0.0f, 1.0f, 0.0f)); - pathSegments[idx].color *= (materialColor * lightTerm) * 0.3f + ((1.0f - intersection.t * 0.02f) * materialColor) * 0.7f; - pathSegments[idx].color *= u01(rng); // apply some noise because why not - } - // If there was no intersection, color the ray black. - // Lots of renderers use 4 channel color, RGBA, where A = alpha, often - // used for opacity, in which case they can indicate "no opacity". - // This can be useful for post-processing and image compositing. + // If the material indicates that the object was a light, "light" the ray + if (material.emittance > 0.0f) { + pathSegments[idx].color *= (materialColor * material.emittance); + pathSegments[idx].remainingBounces = 0; } + // Otherwise, do some pseudo-lighting computation. This is actually more + // like what you would expect from shading in a rasterizer like OpenGL. + // TODO: replace this! you should be able to start with basically a one-liner else { - pathSegments[idx].color = glm::vec3(0.0f); + // compute the point of intersection + glm::vec3 point{ pathSegments[idx].ray.origin }; + point += pathSegments[idx].ray.direction * intersection.t; + + float m_indexOfRefraction; + glm::vec3 m_color{0.f, 0.f, 0.f}; + if (material.dispersion.hasDispersion) + { + m_indexOfRefraction = material.dispersion.indexOfRefraction[iter % 3]; + m_color[iter % 3] = 1.4f * materialColor[iter % 3]; + } + else + { + m_indexOfRefraction = material.indexOfRefraction; + m_color = materialColor; + } + + // call the scatter ray function to handle interactions + scatterRay( + pathSegments[idx], + point, + intersection.surfaceNormal, + material.hasReflective, + material.hasRefractive, + m_indexOfRefraction, + m_color, + material.roughness, + rng); } + // If there was no intersection, color the ray black. + // Lots of renderers use 4 channel color, RGBA, where A = alpha, often + // used for opacity, in which case they can indicate "no opacity". + // This can be useful for post-processing and image compositing. + } + else { + pathSegments[idx].color = glm::vec3(0.0f); } } @@ -292,6 +403,35 @@ __global__ void finalGather(int nPaths, glm::vec3* image, PathSegment* iteration } } +__global__ void getMaterialId(int nPaths, unsigned char* materialIds_isec, unsigned char* materialIds_path, const ShadeableIntersection* isecs) +{ + int index = (blockIdx.x * blockDim.x) + threadIdx.x; + + if (index < nPaths) + { + unsigned char id = isecs->materialId; + materialIds_isec[index] = id; + materialIds_path[index] = id; + } +} + +__global__ void updateGeoms(int nGeoms, int iter, Geom* geoms, float exposure) +{ + int index = (blockIdx.x * blockDim.x) + threadIdx.x; + + if (index < nGeoms) + { + // Generate random time + int h = utilhash((1 << 31) | iter) ^ utilhash(index); + thrust::default_random_engine rng(h); + thrust::uniform_real_distribution u01(0, 1); + float dT = u01(rng) * exposure; + + // Update geom + geoms[index].update(dT); + } +} + /** * Wrapper for the __global__ call that sets up the kernel calls and does a ton * of memory management @@ -358,6 +498,9 @@ void pathtrace(uchar4* pbo, int frame, int iter) // clean shading chunks cudaMemset(dev_intersections, 0, pixelcount * sizeof(ShadeableIntersection)); + dim3 numblocksUpdateGeoms = (hst_scene->geoms.size() + blockSize1d - 1) / blockSize1d; + updateGeoms<<>> (hst_scene->geoms.size(), iter, dev_geoms, cam.exposure); + // tracing dim3 numblocksPathSegmentTracing = (num_paths + blockSize1d - 1) / blockSize1d; computeIntersections<<>> ( @@ -366,12 +509,43 @@ void pathtrace(uchar4* pbo, int frame, int iter) dev_paths, dev_geoms, hst_scene->geoms.size(), - dev_intersections + dev_intersections, + dev_hasIntersection ); checkCUDAError("trace one bounce"); cudaDeviceSynchronize(); depth++; + // Stream Compaction + thrust::device_ptr dev_thrust_hasIntersection{ dev_hasIntersection }; + thrust::device_ptr dev_thrust_intersections{ dev_intersections }; + thrust::device_ptr dev_thrust_paths{ dev_paths }; + auto dev_thrust_intersections_end = thrust::remove_if( + dev_thrust_intersections, + dev_thrust_intersections + num_paths, + dev_thrust_hasIntersection, + thrust::logical_not()); + + thrust::remove_if( + dev_thrust_paths, + dev_thrust_paths + num_paths, + dev_thrust_hasIntersection, + thrust::logical_not()); + + num_paths = static_cast(dev_thrust_intersections_end - dev_thrust_intersections); + +#if SORT_BY_MATERIAL // Slower! Why? + // Sort the paths by: materialId + thrust::device_ptr dev_thrust_materialIds_isec { dev_materialIds_isec }; + thrust::device_ptr dev_thrust_materialIds_path { dev_materialIds_path }; + getMaterialId<<>>(num_paths, dev_materialIds_isec, dev_materialIds_path, dev_intersections); + + checkCUDAError("get Material Ids"); + // first sort the keys and indices by the keys + thrust::sort_by_key(dev_thrust_materialIds_isec, dev_thrust_materialIds_isec + num_paths, dev_intersections); + thrust::sort_by_key(dev_thrust_materialIds_path, dev_thrust_materialIds_path + num_paths, dev_paths); +#endif + // TODO: // --- Shading Stage --- // Shade path segments based on intersections and generate new rays by @@ -388,7 +562,8 @@ void pathtrace(uchar4* pbo, int frame, int iter) dev_paths, dev_materials ); - iterationComplete = true; // TODO: should be based off stream compaction results. + + if (depth == traceDepth) { iterationComplete = true; } if (guiData != NULL) { diff --git a/src/pathtrace.h b/src/pathtrace.h index e767d0ef..39ba545b 100644 --- a/src/pathtrace.h +++ b/src/pathtrace.h @@ -5,5 +5,6 @@ void InitDataContainer(GuiDataContainer* guiData); void pathtraceInit(Scene *scene); +void pathtraceResume(Scene* scene); void pathtraceFree(); void pathtrace(uchar4 *pbo, int frame, int iteration); diff --git a/src/scene.cpp b/src/scene.cpp index 706bf85b..cddf0c6e 100644 --- a/src/scene.cpp +++ b/src/scene.cpp @@ -29,7 +29,7 @@ void Scene::loadFromJSON(const std::string& jsonName) std::ifstream f(jsonName); json data = json::parse(f); const auto& materialsData = data["Materials"]; - std::unordered_map MatNameToID; + std::unordered_map MatNameToID; for (const auto& item : materialsData.items()) { const auto& name = item.key(); @@ -51,6 +51,27 @@ void Scene::loadFromJSON(const std::string& jsonName) { const auto& col = p["RGB"]; newMaterial.color = glm::vec3(col[0], col[1], col[2]); + newMaterial.roughness = p["ROUGHNESS"]; + newMaterial.hasReflective = 1.0f; + } + else if (p["TYPE"] == "Transparent") + { + const auto& col = p["RGB"]; + newMaterial.color = glm::vec3(col[0], col[1], col[2]); + newMaterial.hasRefractive = 1.0; + newMaterial.roughness = p["ROUGHNESS"]; + if (p["REFRACTION"].is_number()) + { + newMaterial.indexOfRefraction = p["REFRACTION"]; + newMaterial.dispersion.hasDispersion = false; + } + else if (p["REFRACTION"].is_array()) + { + newMaterial.dispersion.hasDispersion = true; + newMaterial.dispersion.indexOfRefraction[0] = p["REFRACTION"][0]; + newMaterial.dispersion.indexOfRefraction[1] = p["REFRACTION"][1]; + newMaterial.dispersion.indexOfRefraction[2] = p["REFRACTION"][2]; + } } MatNameToID[name] = materials.size(); materials.emplace_back(newMaterial); @@ -80,6 +101,20 @@ void Scene::loadFromJSON(const std::string& jsonName) newGeom.inverseTransform = glm::inverse(newGeom.transform); newGeom.invTranspose = glm::inverseTranspose(newGeom.transform); + if (p.contains("MOTION")) + { + const auto& velocity = p["MOTION"]; + newGeom.velocity = glm::vec3(velocity[0], velocity[1], velocity[2]); + } + else + { + newGeom.velocity = glm::vec3(0.f, 0.f, 0.f); + } + + newGeom.has_motion = newGeom.velocity.x != 0.f || + newGeom.velocity.y != 0.f || + newGeom.velocity.z != 0.f; + geoms.push_back(newGeom); } const auto& cameraData = data["Camera"]; @@ -110,6 +145,9 @@ void Scene::loadFromJSON(const std::string& jsonName) camera.view = glm::normalize(camera.lookAt - camera.position); + camera.aperture = cameraData["APERTURE"]; + camera.exposure = cameraData["EXPOSURE"]; + //set up render camera stuff int arraylen = camera.resolution.x * camera.resolution.y; state.image.resize(arraylen); diff --git a/src/sceneStructs.cpp b/src/sceneStructs.cpp new file mode 100644 index 00000000..786c5571 --- /dev/null +++ b/src/sceneStructs.cpp @@ -0,0 +1,73 @@ +#include "sceneStructs.h" +#include + +void Camera::serialize(std::ofstream& ofs) +{ + ofs.write(reinterpret_cast(&resolution), sizeof(resolution)); + ofs.write(reinterpret_cast(&position), sizeof(position)); + ofs.write(reinterpret_cast(&lookAt), sizeof(lookAt)); + ofs.write(reinterpret_cast(&view), sizeof(view)); + ofs.write(reinterpret_cast(&up), sizeof(up)); + ofs.write(reinterpret_cast(&right), sizeof(right)); + ofs.write(reinterpret_cast(&fov), sizeof(fov)); + ofs.write(reinterpret_cast(&pixelLength), sizeof(pixelLength)); + ofs.write(reinterpret_cast(&aperture), sizeof(aperture)); + ofs.write(reinterpret_cast(&exposure), sizeof(exposure)); +} + +void Camera::deserialize(std::ifstream& ifs) +{ + ifs.read(reinterpret_cast(&resolution), sizeof(resolution)); + ifs.read(reinterpret_cast(&position), sizeof(position)); + ifs.read(reinterpret_cast(&lookAt), sizeof(lookAt)); + ifs.read(reinterpret_cast(&view), sizeof(view)); + ifs.read(reinterpret_cast(&up), sizeof(up)); + ifs.read(reinterpret_cast(&right), sizeof(right)); + ifs.read(reinterpret_cast(&fov), sizeof(fov)); + ifs.read(reinterpret_cast(&pixelLength), sizeof(pixelLength)); + ifs.read(reinterpret_cast(&aperture), sizeof(aperture)); + ifs.read(reinterpret_cast(&exposure), sizeof(exposure)); +} + + +void RenderState::serialize(std::ofstream& ofs) +{ + + // Serialize camera + camera.serialize(ofs); + + // Serialize render state properties + ofs.write(reinterpret_cast(&iterations), sizeof(iterations)); + ofs.write(reinterpret_cast(&traceDepth), sizeof(traceDepth)); + + // Serialize image + size_t imageSize = image.size(); + ofs.write(reinterpret_cast(&imageSize), sizeof(imageSize)); + ofs.write(reinterpret_cast(image.data()), imageSize * sizeof(glm::vec3)); + + // Serialize image name (store string size first) + size_t nameLength = imageName.size(); + ofs.write(reinterpret_cast(&nameLength), sizeof(nameLength)); + ofs.write(imageName.c_str(), nameLength); +} + +void RenderState::deserialize(std::ifstream& ifs) +{ + // Deserialize camera + camera.deserialize(ifs); + + // Deserialize render state properties + ifs.read(reinterpret_cast(&iterations), sizeof(iterations)); + ifs.read(reinterpret_cast(&traceDepth), sizeof(traceDepth)); + + // Deserialize image + size_t imageSize; + ifs.read(reinterpret_cast(&imageSize), sizeof(imageSize)); + ifs.read(reinterpret_cast(image.data()), imageSize * sizeof(glm::vec3)); + + // Deserialize image name + size_t nameLength; + ifs.read(reinterpret_cast(&nameLength), sizeof(nameLength)); + imageName.resize(nameLength); + ifs.read(&imageName[0], nameLength); +} diff --git a/src/sceneStructs.h b/src/sceneStructs.h index ab46f6f6..58046d9a 100644 --- a/src/sceneStructs.h +++ b/src/sceneStructs.h @@ -4,6 +4,10 @@ #include #include #include "glm/glm.hpp" +#include +#include + +#define PI 3.1415926535897932384626422832795028841971f #define BACKGROUND_COLOR (glm::vec3(0.0f)) @@ -29,6 +33,30 @@ struct Geom glm::mat4 transform; glm::mat4 inverseTransform; glm::mat4 invTranspose; + bool has_motion; + glm::vec3 velocity; + + __device__ __host__ + static glm::mat4 buildTransformationMatrix(glm::vec3 translation, glm::vec3 rotation, glm::vec3 scale) + { + glm::mat4 translationMat = glm::translate(glm::mat4(), translation); + glm::mat4 rotationMat = glm::rotate(glm::mat4(), rotation.x * (float)PI / 180, glm::vec3(1, 0, 0)); + rotationMat = rotationMat * glm::rotate(glm::mat4(), rotation.y * (float)PI / 180, glm::vec3(0, 1, 0)); + rotationMat = rotationMat * glm::rotate(glm::mat4(), rotation.z * (float)PI / 180, glm::vec3(0, 0, 1)); + glm::mat4 scaleMat = glm::scale(glm::mat4(), scale); + return translationMat * rotationMat * scaleMat; + } + + __device__ __host__ + void update(float dT) + { + if (!this->has_motion) { return; } + glm::vec3 new_translation = translation + dT * velocity; + this->transform = this->buildTransformationMatrix( + new_translation, this->rotation, this->scale); + this->inverseTransform = glm::inverse(this->transform); + this->invTranspose = glm::inverseTranspose(this->transform); + } }; struct Material @@ -43,6 +71,12 @@ struct Material float hasRefractive; float indexOfRefraction; float emittance; + float roughness; + struct + { + bool hasDispersion; + float indexOfRefraction[3]; + } dispersion; }; struct Camera @@ -55,6 +89,11 @@ struct Camera glm::vec3 right; glm::vec2 fov; glm::vec2 pixelLength; + float aperture; + float exposure; + + void serialize(std::ofstream& ofs); + void deserialize(std::ifstream& ifs); }; struct RenderState @@ -64,6 +103,9 @@ struct RenderState int traceDepth; std::vector image; std::string imageName; + + void serialize(std::ofstream& ofs); + void deserialize(std::ifstream& ifs); }; struct PathSegment @@ -81,5 +123,5 @@ struct ShadeableIntersection { float t; glm::vec3 surfaceNormal; - int materialId; + unsigned char materialId; };