在上一篇文章中,我们实现了URP下深度图的使用。
但是,因为是使用模型UV采样的原因。所以,深度图效果不对
所以,在这一篇文章中,我们使用屏幕坐标来采样深度图。
齐次裁剪空间进行透视除法
然后进行如下步骤,化简得到该函数结果
x s c r e e n w i d t h ? w \frac{x_{screen}}{width}*w widthxscreen???w
当
x
s
c
r
e
e
n
=
0
x_{screen} = 0
xscreen?=0
x
s
c
r
e
e
n
w
i
d
t
h
?
w
=
0
\frac{x_{screen}}{width}*w = 0
widthxscreen???w=0
当
x
s
c
r
e
e
n
=
w
i
d
t
h
x_{screen} =width
xscreen?=width
x
s
c
r
e
e
n
w
i
d
t
h
?
w
=
w
\frac{x_{screen}}{width}*w = w
widthxscreen???w=w
得到最终o的范围
( [ 0 , w ] , [ 0 , w ] , z , w ) ([0,w],[0,w],z,w) ([0,w],[0,w],z,w)
( [ 0 , w ] w , [ 0 , w ] w , z w , 1 ) (\frac{[0,w]}{w},\frac{[0,w]}{w},\frac{z}{w},1) (w[0,w]?,w[0,w]?,wz?,1)
( [ 0 , 1 ] , [ 0 , 1 ] , z w , 1 ) ([0,1],[0,1],\frac{z}{w},1) ([0,1],[0,1],wz?,1)
float4 screenPos : TEXCOORD1;
o.screenPos = ComputeScreenPos(o.positionCS);
float2 uv = i.screenPos.xy / i.screenPos.w;
float4 cameraDepthTex = SAMPLE_TEXTURE2D(_CameraDepthTexture,sampler_CameraDepthTexture,uv);
float depthTex = Linear01Depth(cameraDepthTex,_ZBufferParams);
return depthTex;
float2 uv = i.positionCS/ _ScreenParams.xy;
float4 cameraDepthTex = SAMPLE_TEXTURE2D(_CameraDepthTexture,sampler_CameraDepthTexture,uv);
float depthTex = Linear01Depth(cameraDepthTex,_ZBufferParams);
return depthTex;
Shader "MyShader/URP/P4_1"
{
Properties
{
_Color("Color",Color) = (0,0,0,0)
_MainTex("MainTex",2D) = "white"{}
}
SubShader
{
Tags
{
//告诉引擎,该Shader只用于 URP 渲染管线
"RenderPipeline"="UniversalPipeline"
//渲染类型
"RenderType"="Transparent"
//渲染队列
"Queue"="Transparent"
}
//Blend One One
ZWrite Off
Pass
{
Name "Unlit"
HLSLPROGRAM
#pragma vertex vert
#pragma fragment frag
// Pragmas
#pragma target 2.0
// Includes
#include "Packages/com.unity.render-pipelines.core/ShaderLibrary/Color.hlsl"
#include "Packages/com.unity.render-pipelines.universal/ShaderLibrary/Core.hlsl"
#include "Packages/com.unity.render-pipelines.universal/ShaderLibrary/Input.hlsl"
CBUFFER_START(UnityPerMaterial)
half4 _Color;
CBUFFER_END
//纹理的定义,如果是编译到GLES2.0平台,则相当于sample2D _MainTex;否则相当于 Texture2D _MainTex;
TEXTURE2D(_MainTex);SAMPLER(SamplerState_linear_mirrorU_ClampV); float4 _MainTex_ST;
TEXTURE2D(_CameraDepthTexture);SAMPLER(sampler_CameraDepthTexture);
//struct appdata
//顶点着色器的输入
struct Attributes
{
float3 positionOS : POSITION;
float2 uv : TEXCOORD0;
};
//struct v2f
//片元着色器的输入
struct Varyings
{
float4 positionCS : SV_POSITION;
float2 uv : TEXCOORD0;
float4 screenPos : TEXCOORD1;
};
//v2f vert(Attributes v)
//顶点着色器
Varyings vert(Attributes v)
{
Varyings o = (Varyings)0;
float3 positionWS = TransformObjectToWorld(v.positionOS);
o.positionCS = TransformWorldToHClip(positionWS);
o.uv = TRANSFORM_TEX(v.uv,_MainTex);
o.screenPos = ComputeScreenPos(o.positionCS);
return o;
}
//fixed4 frag(v2f i) : SV_TARGET
//片元着色器
half4 frag(Varyings i) : SV_TARGET
{
half4 c;
float4 mainTex = SAMPLE_TEXTURE2D(_MainTex,SamplerState_linear_mirrorU_ClampV,i.uv);
//c = _Color * mainTex;
//深度图
//float2 uv = i.screenPos.xy / i.screenPos.w;
float2 uv = i.positionCS/ _ScreenParams.xy;
float4 cameraDepthTex = SAMPLE_TEXTURE2D(_CameraDepthTexture,sampler_CameraDepthTexture,uv);
float depthTex = Linear01Depth(cameraDepthTex,_ZBufferParams);
return depthTex;
}
ENDHLSL
}
}
FallBack "Hidden/Shader Graph/FallbackError"
}