Unity 3D的“自动”唇同步?

3

我是Unity 3D的新手,并正在开发一款应用程序。其中有“自动”口型同步的功能。

我正在按照以下教程进行操作。

http://answers.unity3d.com/questions/139323/any-way-of-quotautomaticquot-lip-syncing.html

看看下面我的代码

using UnityEngine;
using System.Collections;

public class  lipmovement2: MonoBehaviour
{

    // Use this for initialization

    /*Class for implementing Lips Syncronisation*/

    public AudioClip source_clip;
    public float[] freqData;
    int nSamples = 256;
    int fMax = 24000;

    public Transform upmouth0_M, upmouth01_L, upmouth02_R, downmouth1_M, downmouth11_L, downmouth12_R;
    float volume = 1000;
//  float freqLow = 200;
//  float freqHigh = 800;
    //value change

    float freqLow = 200;
    float freqHigh = 1600;

    int sizeFilter = 5;
    float[] filter;
    float filterSum;
    int posFilter = 0;
    int qSample = 0;

    int video_Length, secCounter;

    float y0, y1;

    void OnEnable ()
    {
        secCounter = 0;

//      y0 = mouth0.localPosition.y;
//      y1 = mouth1.localPosition.y;

        y0 = upmouth0_M.localPosition.y;
        y0 = upmouth01_L.localPosition.y;
        y0 = upmouth02_R.localPosition.y;
        y1 = downmouth1_M.localPosition.y;
        y1 = downmouth11_L.localPosition.y;
        y1 = downmouth12_R.localPosition.y;

        freqData = new float[nSamples];
        //source_clip = SetFace.voiceOver;
        GetComponent<AudioSource> ().clip = Rec_voice.instance.voiceFeed.clip;
        GetComponent<AudioSource> ().Play ();
        video_Length = Mathf.CeilToInt (source_clip.length);

    }


float BandVol (float fLow, float fHigh)
    {
        fLow = Mathf.Clamp (fLow, 20, fMax);
        fHigh = Mathf.Clamp (fHigh, fLow, fMax);

        GetComponent<AudioSource> ().GetSpectrumData (freqData, 0, FFTWindow.BlackmanHarris);

        int n1 = Mathf.FloorToInt (fLow * nSamples / fMax);
        int n2 = Mathf.FloorToInt (fHigh * nSamples / fMax);

        float sum = 0;

        for (int i = n1; i <= n2; i++) {
            sum = freqData [i];
        }

        return sum;
    }

    float MovingAverage (float sample)
    {
        if (qSample == 0)
            filter = new float[sizeFilter];

        filterSum += sample - filter [posFilter];
        filter [posFilter++] = sample;

        if (posFilter > qSample) {
            qSample = posFilter;
        }

        posFilter = posFilter % sizeFilter;
        return filterSum / qSample;
    }

    void Start ()
    {
        /*secCounter = 0;

        y0 = mouth0.localPosition.y;
        y1 = mouth1.localPosition.y;

        freqData = new float[nSamples];
        //source_clip = SetFace.voiceOver;
        GetComponent<AudioSource> ().clip = Rec_voice.instance.voiceOver;
        GetComponent<AudioSource> ().Play ();
        video_Length = Mathf.CeilToInt (source_clip.length);
*/
        //Debug.Log (y0);
        //  DebugConsole.Log (y0.ToString ());


        //  Debug.Log (Application.persistentDataPath);

        /*StartCoroutine (Timer ());
        StartCoroutine (recordScreen ());
*/
    }

    /*  IEnumerator Timer ()
    {
        while (secCounter < video_Length) {
            yield return new WaitForSeconds (1f);
            secCounter += 1;
        }
    }*/


    float limValue;
    // Update is called once per frame
    void Update ()
    {
        float band_vol = BandVol (freqLow, freqHigh);
        float val = MovingAverage (band_vol) * volume;
        //limValue = val;//Mathf.Clamp (val, 0, 0.1f);
        //limValue = Mathf.Clamp (val, 0, 10f);
        //check new lip movement abd set clamp val
        limValue = Mathf.Clamp (val, 0, 25f);
        //Debug.Log (y0 - limValue);
        if (Input.GetKeyDown (KeyCode.Escape)) {
            Application.Quit ();
        }
        /*  mouth0.position = new Vector3 (mouth0.position.x, y0 - MovingAverage (band_vol) * volume, mouth0.position.z);
        mouth1.position = new Vector3 (mouth1.position.x, y1 + MovingAverage (band_vol) * volume * 0.3f, mouth1.position.z);*/
    }
    void LateUpdate ()
    {
//      mouth0.localPosition = new Vector3 (mouth0.localPosition.x, y0 - limValue, mouth0.localPosition.z);
//      mouth1.localPosition = new Vector3 (mouth1.localPosition.x, y1 + limValue, mouth1.localPosition.z);
        upmouth0_M.localPosition = new Vector3 (upmouth0_M.localPosition.x, y0 - limValue, upmouth0_M.localPosition.z);
        upmouth01_L.localPosition = new Vector3 (upmouth01_L.localPosition.x, y0 - limValue, upmouth01_L.localPosition.z);
        upmouth02_R.localPosition = new Vector3 (upmouth02_R.localPosition.x, y0 - limValue, upmouth02_R.localPosition.z);
        downmouth1_M.localPosition = new Vector3 (downmouth1_M.localPosition.x, y1 + limValue, downmouth1_M.localPosition.z);
        downmouth11_L.localPosition = new Vector3 (downmouth11_L.localPosition.x, y1 + limValue, downmouth11_L.localPosition.z);
        downmouth12_R.localPosition = new Vector3 (downmouth12_R.localPosition.x, y1 + limValue, downmouth12_R.localPosition.z);

    }

}

我遇到了以下问题:

1)如何识别人的声音?:因为如果有其他声音,比如音乐等,它也会被检测到,那么我们该如何停止呢?我希望嘴唇只与人的声音同步。

2)当我录制时,如果距离设备很近,那么它可以完美工作,但是如果距离稍远,则嘴唇不会同步。

请告诉我我做错了什么?如何解决以上问题?

1个回答

1

2) 麦克风录制的声音级别随距离减小而下降。因此,每个频率带上的能量都会减少(即GetSpectrumData给出的值较小)。如果增加“音量”参数的值,则val变得更大:

float val = MovingAverage (band_vol) * volume;

...唇部将沿y轴移动更多。

1)一个简单的算法只需查看频率数据,并在低频带(例如0-1000Hz)中有足够的噪声时将输入分类为语音,与整个频谱(例如0-16000Hz)相比。这可以防止算法在随机噪声中进行唇同步。对于更高级的需求,我会实现MFCC算法。然后,我会用常见音素训练算法,并在从录制的音频流计算出的MFCC接近训练数据时进行唇同步。


网页内容由stack overflow 提供, 点击上面的
可以查看英文原文,
原文链接