beyzend wrote:
How are you computing the input into SSAO? Anyway, I remember when I integrated SSAO from Ogre, I had to write a manual geometry pass material to output the correct normals because I generate my normals in a shader. I'm about to move to a deferred shading setup, though.
I am saving the Depth and Normals to the G-Buffer (normals saved in view space, depth in screen space) and using this shader (not, some variables were removed from actual use although still declared, got to clean it up):
Code:
float4x4 View;
float4x4 InvertProjection;
// diffuse color, and specularIntensity in the alpha channel
texture gRandomNormals;
// normals, and specularPower in the alpha channel
texture normalMap;
//depth
texture depthMap;
sampler RandomNormals = sampler_state
{
Texture = (gRandomNormals);
AddressU = WRAP;
AddressV = WRAP;
MagFilter = LINEAR;
MinFilter = POINT;
Mipfilter = POINT;
};
sampler depthSampler = sampler_state
{
Texture = (depthMap);
AddressU = CLAMP;
AddressV = CLAMP;
MagFilter = POINT;
MinFilter = POINT;
Mipfilter = POINT;
};
sampler normalSampler = sampler_state
{
Texture = (normalMap);
AddressU = CLAMP;
AddressV = CLAMP;
MagFilter = POINT;
MinFilter = POINT;
Mipfilter = POINT;
};
float2 ScreenSize;
float OcclusionSampleRadius = 0.66271;
float OcclusionScale = 1.75338;
float OcclusionBias = 0.05464;
float OcclusionIntensity = 3.04334;
float3 gAmbientLight;
struct PS_INPUT
{
float2 TexCoords : TEXCOORD0;
};
// Offsets for the pixels we can sample
const float2 offsets[8] = {float2(1,0), float2(-1,0), float2(0,1), float2(0,-1), float2(1,1), float2(1,-1), float2(-1,1), float2(-1,-1)};
//Get pixel's position from position buffer
float3 GetPosition(in float2 coords)
{
//read depth
float depthVal = tex2D(depthSampler,coords).r;
//compute screen-space position
float4 position;
position.x = coords.x * 2.0f - 1.0f;
position.y = -(coords.y * 2.0f - 1.0f);
position.z = depthVal;
position.w = 1.0f;
//transform to world space
position = mul(position, InvertProjection);
position /= position.w;
return position.xyz;
}
//Get pixel's normal from the normal buffer
float3 GetNormal(in float2 coords)
{
float3 normal = tex2D(normalSampler, coords).xyz * 2.0f - 1.0f;
// float4 viewNorm = mul(View, float4(normal, 1.0f));
// return normalize(viewNorm.xyz);
return normal;
}
// Get random number from the random normal map
float2 GetRandom(in float2 coords)
{
return normalize(tex2D(RandomNormals, ScreenSize * coords / 64).xy * 2.0f - 1.0f);
}
float ApplyOcclusion(in float2 Coords,in float2 UV, in float3 Position, in float3 Normal)
{
//Calculate vector in 3d between the two points
float3 diff = GetPosition(Coords + UV) - Position;
const float3 v = normalize(diff);
//Calculate the distance between the two points
const float d = length(diff)*OcclusionScale;
//Calculate final occlusion amount
return max(0.0,dot(Normal,v)-OcclusionBias)*(1.0/(1.0+d))*OcclusionIntensity;
}
float4 ps_main(PS_INPUT Input) : COLOR0
{
float3 Position = GetPosition(Input.TexCoords);
float3 Normal = GetNormal(Input.TexCoords);
float2 Random = GetRandom(Input.TexCoords);
float Occlusion = 0.0;
float Radius = OcclusionSampleRadius/Position.z;
//Sample 32 points to achieve higher-detail occlusion
for (int i = 0; i < 4; ++i)
{
float2 Coord1 = reflect(offsets[i],Random) * Radius;
float2 Coord2 = float2(Coord1.x * 0.707 - Coord1.y*0.707, Coord1.x * 0.707 + Coord1.y * 0.707);
Occlusion += ApplyOcclusion(Input.TexCoords, Coord1*0.25, Position, Normal);
Occlusion += ApplyOcclusion(Input.TexCoords, Coord2*0.5, Position, Normal);
Occlusion += ApplyOcclusion(Input.TexCoords, Coord1*0.75, Position, Normal);
Occlusion += ApplyOcclusion(Input.TexCoords, Coord2, Position, Normal);
}
Occlusion/=16;
return float4((1-Occlusion) * gAmbientLight, 0.0f);
}
technique Technique0
{
pass Pass0
{
PixelShader = compile ps_3_0 ps_main();
}
}
This is a very simple post processing effect, though I might still need to optimize the shader