Josh Posted July 8, 2017 Share Posted July 8, 2017 This is just something I wanted to try. I added scattering to the SSR shader to make a screen-space GI effect. Replace the SSR fragment shader with this code: /* //--------------------------------------------------------------------------- -- SSLR (Screen-Space Local Reflections) by Igor Katrich and Shadmar 26/03/2015 -- Email: igorbgz@outlook.com //---------------------------------------------------------------------------*/ #version 400 uniform sampler2DMS texture0; uniform sampler2D texture1; uniform sampler2DMS texture2; uniform sampler2DMS texture3; uniform bool isbackbuffer; uniform vec2 buffersize; uniform mat4 projectioncameramatrix; uniform vec3 cameraposition; uniform mat3 cameranormalmatrix; uniform mat3 camerainversenormalmatrix; //User variables #define reflectionfalloff 10.0f #define raylength 2.2f #define maxstep 5 #define edgefadefactor 0.95f #define hitThreshold 0.5 #define maxsamples 64 #define scattering 1.0 out vec4 fragData0; vec4 getPosition(in vec2 texCoord, out float z) { float x = texCoord.s * 2.0f - 1.0f; float y = texCoord.t * 2.0f - 1.0f; z = texelFetch(texture0, ivec2(texCoord*buffersize),0).r; vec4 posProj = vec4(x,y,z,1.0f); vec4 posView = inverse(projectioncameramatrix) * posProj; posView /= posView.w; return posView; } mat3 vec3tomat3( in vec3 z ) { mat3 mat; mat[2]=z; vec3 v=vec3(z.z,z.x,-z.y);//make a random vector that isn't the same as vector z mat[0]=cross(z,v);//cross product is the x axis mat[1]=cross(mat[0],z);//cross product is the y axis return mat; } float rand(vec2 co) { return fract(sin(dot(co.xy ,vec2(12.9898,78.233))) * 43758.5453); } float LinePointDistance(in vec3 v, in vec3 w, in vec3 p) { // Return minimum distance between line segment vw and point p vec3 d = w-v; float l2 = d.x*d.x+d.y*d.y+d.z*d.z; // i.e. |w-v|^2 - avoid a sqrt if (l2 == 0.0) return distance(p, v); // v == w case //float t = max(0.0f, min(1.0f, dot(p - v, w - v) / l2)); float t = dot(p - v, w - v) / l2; vec3 projection = v + t * (w - v); // Projection falls on the segment return distance(p, projection); } void main(void) { vec2 icoord = vec2(gl_FragCoord.xy/buffersize); if (isbackbuffer) icoord.y = 1.0f - icoord.y; //Get screen color vec4 color = texture(texture1,icoord); //Get normal + alpha channel. vec4 n=texelFetch(texture2, ivec2(icoord*buffersize),0); vec3 normalView = normalize(n.xyz * 2.0f - 1.0f); //Get roughness from gbuffer (normal.a) int materialflags = int(n.a*255.0+0.5); int roughness=1; //if ((32 & materialflags)!=0) roughness += 4; //if ((64 & materialflags)!=0) roughness += 2; //Get specmap from gbuffer float specularity = 1.0;//texelFetch(texture3, ivec2(icoord*buffersize),0).a; vec4 reflection = vec4(0.0f); int reflectionsamples = 0; float factor = 1.0;//normalView.x * normalView.y; //only compute if we hvae specularity if (specularity > 0.0f) { for (int n=0; n<maxsamples; ++n) { vec3 vRotation=vec3(rand(float(n)*factor+gl_FragCoord.xy*gl_FragCoord.zx),rand(float(n)*factor+gl_FragCoord.zy*-gl_FragCoord.xz),rand(float(n)*factor+gl_FragCoord.xz*gl_FragCoord.yx)); vRotation = (vRotation - 0.5) * (scattering); mat3 rotMat=vec3tomat3(normalView); //vRotation = vec3(0.0,0.0,1.0); //Get position and out depth (z) float z; vec3 posView = getPosition(icoord,z).xyz; //Reflect vector vec4 reflectedColor = color; vec3 reflected = normalize(reflect(normalize(posView-cameraposition), normalize(rotMat * vRotation))); float rayLength = raylength; vec4 T = vec4(0.0f); vec3 newPos; //Raytrace for (int i = 0; i < maxstep; i++) { newPos = posView + reflected * rayLength; T = projectioncameramatrix * vec4(newPos, 1.0f); T.xy = vec2(0.5f) + 0.5f * T.xy / T.w; T.z /= T.w; if (abs(z - T.z) < 1.0f && T.x <= 1.0f && T.x >= 0.0f && T.y <= 1.0f && T.y >= 0.0f) { float depth; newPos = getPosition(T.xy,depth).xyz; rayLength = length(posView - newPos); //Check distance of this pixel to the reflection ray. If it's close enough we count it as a hit. if (LinePointDistance(posView,posView+reflected,newPos) < hitThreshold) { //Get the pixel at this normal vec4 n1=texelFetch(texture2, ivec2(T.xy*buffersize),0); vec3 normalView1 = normalize(n1.xyz * 2.0f - 1.0f); //Make sure the pixel faces the reflection vector //if (dot(reflected,normalView1)<0.0f) //{ /*float m = max(1.0f-T.y,0.0f); m = max(1.0f-T.x,m); m += roughness * 0.1f; */ float m = 0.5; vec4 rcol=texture(texture1,T.xy); reflectedColor = mix(rcol,color,clamp(m,0.0f,1.0f)); //reflectedColor = rcol + color; //reflectedColor = max(rcol, color); //Fading to screen edges vec2 fadeToScreenEdge = vec2(1.0f); float edgedistance[2]; edgedistance[1] = 0.20; edgedistance[0] = edgedistance[1] * (buffersize.y / buffersize.x); if (T.x<edgedistance[0]) { fadeToScreenEdge.x = T.x / edgedistance[0]; } else if (T.x > 1.0 - edgedistance[0]) { fadeToScreenEdge.x = 1.0 - ((T.x - (1.0 - edgedistance[0])) / edgedistance[0]); } if (T.y<edgedistance[1]) { fadeToScreenEdge.y = T.y / edgedistance[1]; } else if (T.y>1.0-edgedistance[1]) { fadeToScreenEdge.y = 1.0 - (T.y - (1.0-edgedistance[1])) / edgedistance[1]; } float fresnel = reflectionfalloff;// * (1.0f-(pow(dot(normalize(posView-cameraposition), normalize(normalView)), 2.0f))); fresnel = clamp(fresnel,0.0f,1.0f); reflection += mix(color, reflectedColor,clamp(fresnel * fadeToScreenEdge.x * fadeToScreenEdge.y, 0.0f, 1.0f) * specularity); reflectionsamples++; //We hit the pixel, so we're done, right? break; //} } } else { break;//exit because we're out of the texture } } } } if (reflectionsamples>0) { fragData0 = clamp(reflection / float(reflectionsamples),0.0,1.0); } else { fragData0 = color; } } 4 Quote My job is to make tools you love, with the features you want, and performance you can't live without. Link to comment Share on other sites More sharing options...
gamecreator Posted July 8, 2017 Share Posted July 8, 2017 I don't know about the change but the screenshot looks "noisy." Maybe that's the point but I prefer the clean look. Quote Link to comment Share on other sites More sharing options...
nick.ace Posted July 10, 2017 Share Posted July 10, 2017 @gamecreator It looks noisy because it uses a massive amount of rays for the raycasts. People often use cone-tracing with a mipmap chain instead of scattering rays because of the cache trashing and ridiculous amounts of memory lookups. "GPU Pro 5" has a nice chapter about this called "Hi-Z Screen-Space Cone-Traced Reflections." Josh's post is a pretty interesting demo though. Quote Link to comment Share on other sites More sharing options...
Recommended Posts
Join the conversation
You can post now and register later. If you have an account, sign in now to post with your account.
Note: Your post will require moderator approval before it will be visible.