在上一篇文章中,我们实现了贴花的效果。但是,细节效果需要优化。
我们在这篇文章中,来优化一下贴花Shader的细节。
col.rgb = MixFog(col.rgb,i.fogCoord);
col.rgb*=col.a;
col.rgb *= saturate(lerp(0.5,0,i.fogCoord));
TEXTURE2D(_MainTex);
#define smp _linear_clamp
SAMPLER(smp);
half4 mainTex = SAMPLE_TEXTURE2D(_MainTex, smp, uv);
Camera.main.depthTextureMode = DepthTextureMode.Depth;
SubShader
{
Tags
{
//渲染类型
"RenderType"="Transparent"
//渲染队列
"Queue"="Transparent"
}
Pass
{
Blend One One
ZWrite Off
Name "Unlit"
CGPROGRAM
#pragma vertex vert
#pragma fragment frag
#pragma multi_compile_fog
// Pragmas
#pragma target 2.0
// Includes
#include "UnityCG.cginc"
sampler2D _MainTex;
float4 _MainTex_ST;
sampler2D _CameraDepthTexture;
struct appdata
{
float3 positionOS : POSITION;
float2 uv : TEXCOORD0;
};
struct v2f
{
float4 positionCS : SV_POSITION;
float2 uv : TEXCOORD0;
float3 positionOS : TEXCOORD1;
float3 positionVS : TEXCOORD2;
};
v2f vert(appdata v)
{
v2f o;
o.positionCS = UnityObjectToClipPos(v.positionOS);
o.uv = TRANSFORM_TEX(v.uv, _MainTex);
o.positionOS = v.positionOS;
//2、通过模型面片的求出像素在观察空间下的坐标值
o.positionVS = UnityWorldToViewPos(mul(unity_ObjectToWorld,o.positionOS));
return o;
}
half4 frag(v2f i) : SV_TARGET
{
//思路:
float4 depthVS = 1;
//1、通过深度图求出像素所在视图空间的Z值
float2 screenUV = i.positionCS.xy / _ScreenParams.xy;
half4 depthTex = tex2D(_CameraDepthTexture, screenUV);
half depthZ = LinearEyeDepth(depthTex.r);
//2、通过模型面片的求出像素在观察空间下的坐标值
//这个在顶点着色器中完成
//3、结合两者求出 深度图中像素的 XYZ值
depthVS.z = depthZ;
depthVS.xy = i.positionVS.xy * depthZ / -i.positionVS.z;
//4、再将此坐标转换到模型的本地空间,把XY作为UV来进行纹理采样
float4 depthWS = mul(unity_CameraToWorld, depthVS);
float4 depthOS = mul(unity_WorldToObject, depthWS);
float2 uv = depthOS.xz+0.5;
half4 col = 0;
half4 mainTex = tex2D(_MainTex,uv);
col += mainTex;
//针对Blend One One 的半透明雾效混合
col.rgb*=col.a;
return col;
}
ENDCG
}
}
//深度贴花
Shader "MyShader/URP/P4_4_2"
{
Properties
{
[Header(MainTex)]
_MainTex("MainTex",2D) = "white"{}
}
SubShader
{
Tags
{
//告诉引擎,该Shader只用于 URP 渲染管线
"RenderPipeline"="UniversalPipeline"
//渲染类型
"RenderType"="Transparent"
//渲染队列
"Queue"="Transparent"
}
Pass
{
Blend One One
ZWrite Off
Name "Unlit"
HLSLPROGRAM
#pragma vertex vert
#pragma fragment frag
#pragma multi_compile_fog
// Pragmas
#pragma target 2.0
// Includes
#include "Packages/com.unity.render-pipelines.core/ShaderLibrary/Color.hlsl"
#include "Packages/com.unity.render-pipelines.universal/ShaderLibrary/Core.hlsl"
#include "Packages/com.unity.render-pipelines.universal/ShaderLibrary/Input.hlsl"
CBUFFER_START(UnityPerMaterial)
float4 _MainTex_ST;
CBUFFER_END
TEXTURE2D(_CameraDepthTexture);
SAMPLER(sampler_CameraDepthTexture);
TEXTURE2D(_MainTex);
//SAMPLER(sampler_MainTex);
#define smp _linear_clamp
SAMPLER(smp);
//struct appdata
//顶点着色器的输入
struct Attributes
{
float3 positionOS : POSITION;
float2 uv : TEXCOORD0;
};
//struct v2f
//片元着色器的输入
struct Varyings
{
float4 positionCS : SV_POSITION;
float2 uv : TEXCOORD0;
float fogCoord : TEXCOORD1;
float3 positionOS : TEXCOORD2;
float3 positionVS : TEXCOORD3;
};
//v2f vert(Attributes v)
//顶点着色器
Varyings vert(Attributes v)
{
Varyings o;
o.positionCS = TransformObjectToHClip(v.positionOS);
o.uv = TRANSFORM_TEX(v.uv, _MainTex);
o.fogCoord = ComputeFogFactor(o.positionCS.z);
o.positionOS = v.positionOS;
//2、通过模型面片的求出像素在观察空间下的坐标值
o.positionVS = TransformWorldToView(TransformObjectToWorld(o.positionOS));
return o;
}
//fixed4 frag(v2f i) : SV_TARGET
//片元着色器
half4 frag(Varyings i) : SV_TARGET
{
//思路:
float4 depthVS = 1;
//1、通过深度图求出像素所在视图空间的Z值
float2 screenUV = i.positionCS.xy / _ScreenParams.xy;
half4 depthTex = SAMPLE_TEXTURE2D(_CameraDepthTexture, sampler_CameraDepthTexture, screenUV);
half depthZ = LinearEyeDepth(depthTex.r, _ZBufferParams);
//2、通过模型面片的求出像素在观察空间下的坐标值
//这个在顶点着色器中完成
//3、结合两者求出 深度图中像素的 XYZ值
depthVS.z = depthZ;
depthVS.xy = i.positionVS.xy * depthZ / -i.positionVS.z;
//4、再将此坐标转换到模型的本地空间,把XY作为UV来进行纹理采样
float4 depthWS = mul(unity_CameraToWorld, depthVS);
float4 depthOS = mul(unity_WorldToObject, depthWS);
float2 uv = depthOS.xz + 0.5;
half4 col = 0;
half4 mainTex = SAMPLE_TEXTURE2D(_MainTex, smp, uv);
col += mainTex;
//针对Blend One One 的半透明雾效混合
col.rgb *= col.a;
col.rgb *= saturate(lerp(1, 0, i.fogCoord));
//col.rgb = MixFog(col.rgb,i.fogCoord);
return col;
}
ENDHLSL
}
}
SubShader
{
Tags
{
//渲染类型
"RenderType"="Transparent"
//渲染队列
"Queue"="Transparent"
}
Pass
{
Blend One One
ZWrite Off
Name "Unlit"
CGPROGRAM
#pragma vertex vert
#pragma fragment frag
#pragma multi_compile_fog
// Pragmas
#pragma target 2.0
// Includes
#include "UnityCG.cginc"
sampler2D _MainTex;
float4 _MainTex_ST;
sampler2D _CameraDepthTexture;
struct appdata
{
float3 positionOS : POSITION;
float2 uv : TEXCOORD0;
};
struct v2f
{
float4 positionCS : SV_POSITION;
float2 uv : TEXCOORD0;
float3 positionOS : TEXCOORD1;
float3 positionVS : TEXCOORD2;
};
v2f vert(appdata v)
{
v2f o;
o.positionCS = UnityObjectToClipPos(v.positionOS);
o.uv = TRANSFORM_TEX(v.uv, _MainTex);
o.positionOS = v.positionOS;
//2、通过模型面片的求出像素在观察空间下的坐标值
o.positionVS = UnityWorldToViewPos(mul(unity_ObjectToWorld, o.positionOS));
return o;
}
half4 frag(v2f i) : SV_TARGET
{
//思路:
float4 depthVS = 1;
//1、通过深度图求出像素所在视图空间的Z值
float2 screenUV = i.positionCS.xy / _ScreenParams.xy;
half4 depthTex = tex2D(_CameraDepthTexture, screenUV);
half depthZ = LinearEyeDepth(depthTex.r);
//2、通过模型面片的求出像素在观察空间下的坐标值
//这个在顶点着色器中完成
//3、结合两者求出 深度图中像素的 XYZ值
depthVS.z = depthZ;
depthVS.xy = i.positionVS.xy * depthZ / -i.positionVS.z;
//4、再将此坐标转换到模型的本地空间,把XY作为UV来进行纹理采样
float4 depthWS = mul(unity_CameraToWorld, depthVS);
float4 depthOS = mul(unity_WorldToObject, depthWS);
float2 uv = depthOS.xz + 0.5;
half4 col = 0;
half4 mainTex = tex2D(_MainTex, uv);
col += mainTex;
//针对Blend One One 的半透明雾效混合
col.rgb *= col.a;
return col;
}
ENDCG
}
}
}