记录一下最近优化场景的做法:视差渲染
原理:通过视口坐标的变化,观察不同采样画面的功能,画面的载体为低模平面
我早期工作,在小作坊全栈的时候,做过一段时间web开发,做了一个古董藏品的h5展示,做法是360度拍摄各个角度的照片,然后在js中通过用户滑动屏幕切换不同角度的古董照片,模拟3d渲染的功能。
三维引擎中照样也可以使用这招,照片列表可以用大图uv切分或texture3d
这里我用texture3d,原理以前讲过,这里不再重复
首先需要写一个camera路径采样objectRT并生成texture3d的工具
using System.Collections;
using System.Collections.Generic;
using UnityEditor;
using UnityEngine;
[ExecuteInEditMode]
public class OpticalParallaxPanoramaTextureCreator : MonoBehaviour
{
public Camera capCamera;
public Transform viewPoint; //视点
public Transform fromPos; //起点
public Transform toPos; //终点
public int MAX_SAMPLE_COUNT = 32; //最大采样次数
public int sampleWidth = 512;
public int sampleHeight = 512;
private float sampleDistance = 0;
private int sampleCount = 0;
private Texture2D[] sampleTexes;
void Start()
{
}
#if UNITY_EDITOR
private void Update()
{
Debug.DrawLine(viewPoint.position, fromPos.position, Color.red);
Debug.DrawLine(viewPoint.position, toPos.position, Color.red);
}
#endif
public void OnCreate()
{
sampleCount = 0;
sampleDistance = 0;
sampleTexes = new Texture2D[MAX_SAMPLE_COUNT];
for (int i = 0; i < MAX_SAMPLE_COUNT; i++)
{
Vector3 wpos = Vector3.Lerp(fromPos.position, toPos.position, (float)i / (float)(MAX_SAMPLE_COUNT - 1));
capCamera.transform.position = wpos;
capCamera.transform.LookAt(viewPoint);
SampleTex();
}
CreateTex3D(sampleWidth, sampleHeight, sampleTexes);
}
/// <summary>
/// 采样单张图
/// </summary>
public void SampleTex()
{
Texture2D tex2d = CaptureCamera(capCamera, sampleWidth, sampleHeight);
sampleTexes[sampleCount] = tex2d;
sampleCount++;
}
/// <summary>
/// 组装texture3d
/// </summary>
/// <param name="wid"></param>
/// <param name="hei"></param>
/// <param name="texarr"></param>
public void CreateTex3D(int wid, int hei, Texture2D[] texarr)
{
Texture3D tex3d = new Texture3D(wid, hei, texarr.Length, TextureFormat.RGBA32, false);
List<Color> collist = new List<Color>();
for (int i = 0; i < texarr.Length; i++)
{
Color[] cols = texarr[i].GetPixels();
collist.AddRange(cols);
}
tex3d.SetPixels(collist.ToArray());
tex3d.Apply();
AssetDatabase.CreateAsset(tex3d, "Assets/OpticalParallax/parallax_tex3d.asset");
AssetDatabase.Refresh();
}
/// <summary>
/// 采样camera
/// </summary>
/// <param name="cam"></param>
/// <param name="wid"></param>
/// <param name="hei"></param>
/// <returns></returns>
private Texture2D CaptureCamera(Camera cam, int wid, int hei)
{
RenderTexture rt = new RenderTexture(wid, hei, 0);
cam.targetTexture = rt;
cam.Render();
RenderTexture.active = rt;
Texture2D tex = new Texture2D(wid, hei, TextureFormat.ARGB32, false);
tex.ReadPixels(new Rect(0, 0, wid, hei), 0, 0);
tex.Apply();
cam.targetTexture = null;
RenderTexture.active = null;
RenderTexture.Destroy(rt);
return tex;
}
}
效果如下:
这里两条红线代表可视范围张角
我采样了32张512*512 RGBA四通道的texture3d,纹理大小达到了32mb,算是比较大了,想要缩小容量,可以降低采样数量、分辨率、RGB三通道
接下来实现视差渲染效果
Shader "OpticalParallax/OP Panorama UnlitShader"
{
Properties
{
_MainTex ("Texture", 3D) = "white" {}
_ZDimen("Z Dimension",Range(0,1)) = 0
}
SubShader
{
Tags { "RenderType"="Transparent" "Queue"="Transparent" }
LOD 100
Pass
{
Blend SrcAlpha OneMinusSrcAlpha
CGPROGRAM
#pragma vertex vert
#pragma fragment frag
#include "UnityCG.cginc"
struct appdata
{
float4 vertex : POSITION;
float2 uv : TEXCOORD0;
};
struct v2f
{
float3 uv : TEXCOORD0;
float4 vertex : SV_POSITION;
};
sampler3D _MainTex;
float4 _MainTex_ST;
float _ZDimen;
v2f vert (appdata v)
{
v2f o;
o.vertex = UnityObjectToClipPos(v.vertex);
o.uv = float3(TRANSFORM_TEX(v.uv, _MainTex),_ZDimen);
return o;
}
fixed4 frag (v2f i) : SV_Target
{
fixed4 col = tex3D(_MainTex, i.uv);
return col;
}
ENDCG
}
}
}
着色器中简简单单一个texture3d zdimention采样
using System.Collections;
using System.Collections.Generic;
using UnityEngine;
public class OPPanoramaPanel : MonoBehaviour
{
public int MAX_SAMPLE_COUNT = 32; //最大采样次数
public Transform viewPoint; //使用相对坐标系
public Transform fromPos;
public Transform toPos;
private Transform mainCam;
public Material mat;
private Vector3 vp2FromNDir;
private Vector3 vp2ToNDir;
public float sampleOffset = 0.5f;
void Start()
{
mainCam = Camera.main.transform;
vp2FromNDir = fromPos.localPosition.normalized;
vp2ToNDir = toPos.localPosition.normalized;
}
void Update()
{
Vector3 vp2cam = mainCam.position - viewPoint.position;
float vp2camlen = vp2cam.magnitude;
Vector3 vp2fromext = vp2camlen * vp2FromNDir;
Vector3 vp2toext = vp2camlen * vp2ToNDir;
float zdimen = (vp2cam - vp2fromext).magnitude / (vp2toext - vp2fromext).magnitude;
//texture3d从0-n的纹理储存方式存在起始的0.5的线性插值
//进行0.5的偏移uv采样,即可保证采样精准
if (zdimen < 0)
{
zdimen = sampleOffset / (float)MAX_SAMPLE_COUNT;
}
else if (zdimen > 1)
{
zdimen = (float)MAX_SAMPLE_COUNT - sampleOffset / (float)MAX_SAMPLE_COUNT;
}
else
{
zdimen = (Mathf.FloorToInt(zdimen * (float)MAX_SAMPLE_COUNT) + sampleOffset) / (float)MAX_SAMPLE_COUNT;
}
mat.SetFloat("_ZDimen", zdimen);
}
}
控制器无非就是根据camerapos进行插值计算。需要注意视点和起始终止相对坐标和texture3d采样配参一致,同时注意因为texture3d的储存方式从0号纹理开始已经有0.5的插值预留,所以我们进行zdimension计算的时候,用0.5的偏移做精准采样,避免模糊。
最终效果如下:
这样就只用一个quad就模拟出从这辆车旁走来走去的感觉
当然这种“穷举”采样的方式优缺点明显
优点:视差范围可控,想小就小想大就大,采样方式可自主控制
缺点:采样纹理过大,所以想要达到正向效果,必须采样的对象是超精细场景或模型,不然得不偿失
适用范围:比较适合做不用交互但是又要看到的动态场景
ok,今天到这,后面有时间说一下另外一种视差渲染