unity3d中的BlendShape混合动画

一.unity3d中加载模型时,网格顶点发生变化的解决办法。

unity3d中mesh网格为三角网格,目前unity3d直接导入模型时,会使得模型的顶定点数发生改变。解决的办法:在project中点击已经加载的模型,在Import Setting中将Normals设为None,点击Apply。然后再将Normals设为Import,重新加载,再点击Apply。


二.unity3d中BlendShape混合动画编程

主要参考了http://forum.unity3d.com/threads/15424-Morph-Target-Script

在unity3d中使用Mesh.vertices获得顶点位置,使用Mesh.normal获得顶点法线。当BS模型的权重时,需要重新计算Mesh.vertices和Mesh.normal,再使用RecalculateBounds重新计算BS模型边界。


定义BS类,记录每个BS模型的顶点位置,顶点法线


internal class BlendShapeVertex
    {
        public int originalIndex;
        public Vector3 position;
        public Vector3 normal;
    }

    internal class BlendShape
    {
        public BlendShapeVertex[] vertices;// = new Array();
    }


其中定义originalIndex是为了记录某个顶点的索引值。因为在BS模型中,很多顶点位置都与对应的基础表情顶点位置一致,所以在重新计算新的混合表情时,就不需要再计算这些点,提高运算速率。


建立BS模型数据

void BuildBlendShapes()
    {

        blendShapes = new BlendShape[attributeMeshes.Length];

        for (int i = 0; i < attributeMeshes.Length; i++)
        {
            blendShapes[i] = new BlendShape();
			
            int blendShapeCounter = 0;
            for (int j = 0; j < workingMesh.vertexCount; j++)
            {
                
                if (workingMesh.vertices[j] != attributeMeshes[i].vertices[j])
                {
                    blendShapeCounter++;
                }
            }
            
            blendShapes[i].vertices = new BlendShapeVertex[blendShapeCounter];
            blendShapeCounter = 0;
            for (int j = 0; j < workingMesh.vertexCount; j++)
            {
                if (workingMesh.vertices[j] != attributeMeshes[i].vertices[j])
                {
                    BlendShapeVertex blendShapeVertex = new BlendShapeVertex();
                    blendShapeVertex.originalIndex = j;
                    blendShapeVertex.position = attributeMeshes[i].vertices[j] - workingMesh.vertices[j];
                    blendShapeVertex.normal = attributeMeshes[i].normals[j] - workingMesh.normals[j];

                    blendShapes[i].vertices[blendShapeCounter]=blendShapeVertex;
                    blendShapeCounter++;
                }
            }
        }
    }

BS融合


public void SetMorph()
    {
        Vector3[] morphedVertices = sourceMesh.vertices;
        Vector3[] morphedNormals = sourceMesh.normals;

        for (int j = 0; j < attributeMeshes.Length; j++)
        {
            if (!Mathf.Approximately(attributeProgress[j], 0))
            {
                for (int i = 0; i < blendShapes[j].vertices.Length; i++)
                {
					int a = blendShapes[j].vertices[i].originalIndex;
					if(a!=i)
					{
						int aa = 0;
					}
                    morphedVertices[blendShapes[j].vertices[i].originalIndex] += blendShapes[j].vertices[i].position * attributeProgress[j];
                    morphedNormals[blendShapes[j].vertices[i].originalIndex] += blendShapes[j].vertices[i].normal * attributeProgress[j];
                }
            }
        }	
        workingMesh.vertices = morphedVertices;
        workingMesh.normals = morphedNormals;
        workingMesh.RecalculateBounds();
    }

unity3d中BlendShape融合

using UnityEngine;
using System.Collections;

public class MorphTargets : MonoBehaviour {

	internal class BlendShapeVertex
    {
        public int originalIndex;
        public Vector3 position;
        public Vector3 normal;
    }

    internal class BlendShape
    {
        public BlendShapeVertex[] vertices;
    }

    public Mesh sourceMesh; //The original mesh
    public Mesh[] attributeMeshes; //The destination meshes for each attribute.
    public float[] attributeProgress;
    private BlendShape[] blendShapes;
    private Mesh workingMesh;

    void Awake()
    {
		attributeProgress = new float[attributeMeshes.Length];
        for (int i = 0; i < attributeMeshes.Length; i++)
        {
            if (attributeMeshes[i] == null)
            {
                Debug.Log("Attribute " + i + " has not been assigned.");
                return;
            }
        }
        MeshFilter filter = gameObject.GetComponent(typeof(MeshFilter)) as MeshFilter;
        filter.sharedMesh = sourceMesh;
        workingMesh = filter.mesh;
		
        int vertexCount = sourceMesh.vertexCount;
        for (int i = 0; i < attributeMeshes.Length; i++)
        {
            if (attributeMeshes[i].vertexCount != vertexCount)
            {

                Debug.Log("Mesh " + i + " doesn't have the same number of vertices as the first mesh");
                return;
            }
        }

        BuildBlendShapes();
    }

    void BuildBlendShapes()
    {

        blendShapes = new BlendShape[attributeMeshes.Length];

        for (int i = 0; i < attributeMeshes.Length; i++)
        {
            blendShapes[i] = new BlendShape();
			
            int blendShapeCounter = 0;
            for (int j = 0; j < workingMesh.vertexCount; j++)
            {
                
                if (workingMesh.vertices[j] != attributeMeshes[i].vertices[j])
                {
                    blendShapeCounter++;
                }
            }
            
            blendShapes[i].vertices = new BlendShapeVertex[blendShapeCounter];
            blendShapeCounter = 0;
            for (int j = 0; j < workingMesh.vertexCount; j++)
            {
                if (workingMesh.vertices[j] != attributeMeshes[i].vertices[j])
                {
                    BlendShapeVertex blendShapeVertex = new BlendShapeVertex();
                    blendShapeVertex.originalIndex = j;
                    blendShapeVertex.position = attributeMeshes[i].vertices[j] - workingMesh.vertices[j];
                    blendShapeVertex.normal = attributeMeshes[i].normals[j] - workingMesh.normals[j];

                    blendShapes[i].vertices[blendShapeCounter]=blendShapeVertex;
                    blendShapeCounter++;
                }
            }
        }
    }


    public void SetMorph()
    {
        Vector3[] morphedVertices = sourceMesh.vertices;
        Vector3[] morphedNormals = sourceMesh.normals;

        for (int j = 0; j < attributeMeshes.Length; j++)
        {
            if (!Mathf.Approximately(attributeProgress[j], 0))
            {
                for (int i = 0; i < blendShapes[j].vertices.Length; i++)
                {
					int a = blendShapes[j].vertices[i].originalIndex;
					if(a!=i)
					{
						int aa = 0;
					}
                    morphedVertices[blendShapes[j].vertices[i].originalIndex] += blendShapes[j].vertices[i].position * attributeProgress[j];
                    morphedNormals[blendShapes[j].vertices[i].originalIndex] += blendShapes[j].vertices[i].normal * attributeProgress[j];
                }
            }
        }	
        workingMesh.vertices = morphedVertices;
        workingMesh.normals = morphedNormals;
        workingMesh.RecalculateBounds();
    }
}




<think>我们正在讨论Unity中的BlendShape技术,特别是16种唇形的实现和使用方法。根据引用[1],我们知道OVRLipSync项目利用音频处理算法和Blendshape技术,通过调整Blendshapes权重来呈现唇形变化。用户的问题集中在16种口型的实现或使用上。在Unity中,BlendShape(也称为变形目标)通常用于面部动画,特别是唇形同步(lip-sync)。常见的16种口型可能参考了音素(phonemes)的基本口型分类,例如“A”、“E”、“U”等元音和“P”、“B”、“M”等辅音对应的口型。实现步骤一般包括:1.在3D建模软件中创建包含16种(或更多)唇形BlendShape的模型,每个BlendShape对应一个特定的口型。2.将模型导入Unity,确保BlendShape数据被正确导入(在导入设置中勾选“ImportBlendShapes”)。3.使用脚本控制BlendShape权重,根据音频或文本输入驱动相应的口型。OVRLipSync是一个常用的工具,它提供了预设的BlendShape映射,其中就包括16种音素对应的口型。我们可以参考OVRLipSync的实现方式。具体使用方法:-从UnityAssetStore获取OVRLipSync插件或从GitHub下载(https://github.com/facebookincubator/OVRLipSync)。-导入插件后,将OVRLipSync预制件添加到场景中。-为角色模型添加OVRLipSyncContext组件,并设置BlendShape映射(通常有16个值,对应16种音素)。-通过脚本将音频输入传递给OVRLipSync,它会实时计算每个音素的权重,然后应用到模型的Blendshapes上。示例代码(控制BlendShape权重):```csharpusingUnityEngine;usingOVRLipSync;publicclassLipSyncController:MonoBehaviour{publicOVRLipSyncContextBaselipsyncContext;publicSkinnedMeshRendererfaceRenderer;//带有BlendShape的面部渲染器privatevoidUpdate(){//获取当前的音素帧OVRLipSync.Frameframe=lipsyncContext.GetCurrentPhonemeFrame();if(frame==null)return;//遍历每个音素(假设有16个)for(inti=0;i<frame.Visemes.Length;i++){//设置对应的BlendShape权重//注意:这里需要确保BlendShape索引与音素索引对应,通常需要手动映射faceRenderer.SetBlendShapeWeight(i,frame.visemes[i]*100.0f);}}}```注意:上述代码中,需要确保`faceRenderer`引用的SkinnedMeshRenderer组件包含了与音素对应的BlendShape。通常,OVRLipSync的16种音素按照固定的顺序排列,我们需要在建模时按照相同的顺序创建BlendShape,或者在代码中建立映射关系。常见的16种音素(Visemes)包括:0=sil,1=PP,2=FF,3=TH,4=DD,5=kk,6=CH,7=SS,8=nn,9=RR,10=aa,11=E,12=I,13=O,14=U,15=aa(具体可能根据版本有所不同,请参考官方文档)如果没有使用OVRLipSync,也可以自己实现一个基于文本的唇形同步系统,通过解析文本中的音素,然后驱动对应的BlendShape。§§相关问题§§1.如何为自定义3D模型创建16种唇形的BlendShape?2.OVRLipSync支持哪些语言?3.如何在没有音频输入的情况下,通过文本驱动BlendShape唇形动画?</think>### UnityBlendShape的16种口型实现与使用方法 在Unity中实现16种标准唇形(lip shapes)主要通过BlendShape混合形状)技术完成,以下是详细实现步骤和使用方法: #### 一、基础实现流程 1. **模型准备** - 在3D建模软件(如Blender/Maya)中创建16个基础口型目标形状(如"aa"、"ee"、"oo"等音素对应的口型) - 确保每个BlendShape对应独立的面部变形目标,命名规范如:`Viseme_PP`, `Viseme_FF`等 2. **Unity导入设置** ```csharp // 在模型导入设置中启用BlendShapes ModelImporter modelImporter = (ModelImporter)AssetImporter.GetAtPath("Assets/Character.fbx"); modelImporter.importBlendShapes = true; // 启用BlendShape导入 modelImporter.SaveAndReimport(); ``` 3. **权重控制脚本** ```csharp public class LipSyncController : MonoBehaviour { private SkinnedMeshRenderer faceMesh; private int[] visemeIndexes = new int[16]; // 存储16个口型索引 void Start() { faceMesh = GetComponent<SkinnedMeshRenderer>(); // 获取BlendShape索引(示例) visemeIndexes[0] = faceMesh.sharedMesh.GetBlendShapeIndex("Viseme_sil"); visemeIndexes[1] = faceMesh.sharedMesh.GetBlendShapeIndex("Viseme_PP"); // ...初始化其他14个口型 } public void SetViseme(int index, float weight) { // 重置所有口型权重 for(int i=0; i<16; i++) { faceMesh.SetBlendShapeWeight(visemeIndexes[i], 0); } // 激活目标口型 faceMesh.SetBlendShapeWeight(visemeIndexes[index], weight * 100); } } ``` #### 二、实际应用场景 1. **音频驱动口型(如OVRLipSync)** ```csharp // 集成OVRLipSync(需先导入插件包) void Update() { OVRLipSync.Frame frame = lipsyncContext.GetCurrentPhonemeFrame(); SetViseme((int)frame.Visemes[0], frame.Visemes[0]); // 根据音频解析结果驱动口型 } ``` [^1]音频处理算法动态解析语音并映射到BlendShape权重,实现精准唇形同步。 2. **文本驱动口型** ```csharp public void Speak(string phoneme) { Dictionary<string, int> phonemeMap = new Dictionary<string, int>(){ {"aa", 10}, {"ee", 11}, {"oo", 14} // 音素到BlendShape索引的映射 }; if(phonemeMap.ContainsKey(phoneme)) { SetViseme(phonemeMap[phoneme], 1.0f); } } ``` #### 三、优化技巧 1. **混合过渡** ```csharp // 平滑过渡到目标口型 IEnumerator BlendToViseme(int targetIndex, float duration) { float timer = 0; while(timer < duration) { float weight = Mathf.Lerp(0, 100, timer/duration); faceMesh.SetBlendShapeWeight(visemeIndexes[targetIndex], weight); timer += Time.deltaTime; yield return null; } } ``` 2. **组合控制** 同时激活多个BlendShape实现复杂口型: ```csharp // 同时激活"oo"和"mm"口型 faceMesh.SetBlendShapeWeight(visemeIndexes[14], 80); // OO faceMesh.SetBlendShapeWeight(visemeIndexes[8], 40); // MM ``` #### 四、调试建议 1. 在Editor中实时调试: ```csharp [Range(0,100)] public float debugWeight; public int debugIndex; void OnValidate() { if(faceMesh != null) faceMesh.SetBlendShapeWeight(visemeIndexes[debugIndex], debugWeight); } ``` > **重要提示**:标准16口型通常对应国际音标体系,具体实现需根据角色拓扑结构调整BlendShape权重范围,推荐参考[ARKit混合形状标准](https://developer.apple.com/documentation/arkit/arblendshapelocation)进行设计[^2]。
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值