在iOS中同时录制和播放音频

11

我试图在录音的同时播放录制的内容。目前我正在使用AVAudioRecorder进行录制和AVAudioPlayer进行播放。

当我尝试同时播放内容时,没有任何声音。以下是我正在进行的伪代码。

如果我在停止录制之后做相同的事情,一切都正常工作。

AVAudioRecorder *recorder;  //Initializing the recorder properly.
[recorder record];
NSError *error=nil;
NSUrl recordingPathUrl;     //Contains the recording path.
AVAudioPlayer *audioPlayer = [[AVAudioPlayer alloc] initWithContentsOfURL:recordingPathUrl 
                                                                    error:&error];
[audioPlayer  prepareToPlay];
[audioPlayer  play];

请问有人能分享您的想法或意见吗?


1
你正在录制哪种类型的文件?如果你正在录制 MP4/MOV 文件,那么在录制停止之前 MOV 原子将不会被写入文件,因此这是不可能的。我不确定 MP3 是否也是如此。 - Steve McFarlin
可能是同时录制和播放音频的重复问题 - Mohammed Azharuddin Shaikh
你肯定可以使用核心音频来完成这个任务。虽然设置需要花费一些时间,但是肯定可以做到。 - dubbeat
3个回答

6
这是可以实现的,使用以下链接并下载它: https://code.google.com/p/ios-coreaudio-example/downloads/detail?name=Aruts.zip&can=2&q= 这个链接将会从扬声器播放声音但不会录制它,我已经实现了录制功能,下面是完整的代码描述。
在.h文件中:
#import <Foundation/Foundation.h>
#import <AudioToolbox/AudioToolbox.h>

#ifndef max
#define max( a, b ) ( ((a) > (b)) ? (a) : (b) )
#endif

#ifndef min
#define min( a, b ) ( ((a) < (b)) ? (a) : (b) )
#endif


@interface IosAudioController : NSObject {
    AudioComponentInstance audioUnit;
    AudioBuffer tempBuffer; // this will hold the latest data from the microphone
    ExtAudioFileRef             mAudioFileRef;
}
@property (readonly)ExtAudioFileRef        mAudioFileRef;
@property (readonly) AudioComponentInstance audioUnit;
@property (readonly) AudioBuffer tempBuffer;

- (void) start;
- (void) stop;
- (void) processAudio: (AudioBufferList*) bufferList;

@end

// setup a global iosAudio variable, accessible everywhere
extern IosAudioController* iosAudio;

在.m文件中

#import "IosAudioController.h"
#import <AudioToolbox/AudioToolbox.h>
#import <AVFoundation/AVFoundation.h>
#define kOutputBus 0
#define kInputBus 1

IosAudioController* iosAudio;

void checkStatus(int status){
    if (status) {
        printf("Status not 0! %d\n", status);
//      exit(1);
    }
}




static void printAudioUnitRenderActionFlags(AudioUnitRenderActionFlags * ioActionFlags)
{
    if (*ioActionFlags == 0) {

        printf("AudioUnitRenderActionFlags(%lu) ", *ioActionFlags);
        return;
    }
    printf("AudioUnitRenderActionFlags(%lu): ", *ioActionFlags);
    if (*ioActionFlags & kAudioUnitRenderAction_PreRender)              printf("kAudioUnitRenderAction_PreRender ");
    if (*ioActionFlags & kAudioUnitRenderAction_PostRender)             printf("kAudioUnitRenderAction_PostRender ");
    if (*ioActionFlags & kAudioUnitRenderAction_OutputIsSilence)        printf("kAudioUnitRenderAction_OutputIsSilence ");
    if (*ioActionFlags & kAudioOfflineUnitRenderAction_Preflight)       printf("kAudioOfflineUnitRenderAction_Prefli ght ");
    if (*ioActionFlags & kAudioOfflineUnitRenderAction_Render)          printf("kAudioOfflineUnitRenderAction_Render");
    if (*ioActionFlags & kAudioOfflineUnitRenderAction_Complete)        printf("kAudioOfflineUnitRenderAction_Complete ");
    if (*ioActionFlags & kAudioUnitRenderAction_PostRenderError)        printf("kAudioUnitRenderAction_PostRenderError ");
    if (*ioActionFlags & kAudioUnitRenderAction_DoNotCheckRenderArgs)   printf("kAudioUnitRenderAction_DoNotCheckRenderArgs ");
}


/**
 This callback is called when new audio data from the microphone is
 available.
 */
static OSStatus recordingCallback(void *inRefCon, 
                                  AudioUnitRenderActionFlags *ioActionFlags, 
                                  const AudioTimeStamp *inTimeStamp, 
                                  UInt32 inBusNumber, 
                                  UInt32 inNumberFrames, 
                                  AudioBufferList *ioData) {

    double timeInSeconds = inTimeStamp->mSampleTime / 44100.00;

     printf("\n%fs inBusNumber: %lu inNumberFrames: %lu ", timeInSeconds, inBusNumber, inNumberFrames);

    printAudioUnitRenderActionFlags(ioActionFlags);

    // Because of the way our audio format (setup below) is chosen:
    // we only need 1 buffer, since it is mono
    // Samples are 16 bits = 2 bytes.
    // 1 frame includes only 1 sample

    AudioBuffer buffer;

    buffer.mNumberChannels = 1;
    buffer.mDataByteSize = inNumberFrames * 2;
    buffer.mData = malloc( inNumberFrames * 2 );

    // Put buffer in a AudioBufferList
    AudioBufferList bufferList;

     SInt16 samples[inNumberFrames]; // A large enough size to not have to worry about buffer overrun
    memset (&samples, 0, sizeof (samples));



    bufferList.mNumberBuffers = 1;
    bufferList.mBuffers[0] = buffer;

    // Then:
    // Obtain recorded samples

    OSStatus status;

    status = AudioUnitRender([iosAudio audioUnit], 
                             ioActionFlags, 
                             inTimeStamp, 
                             inBusNumber, 
                             inNumberFrames, 
                             &bufferList);
    checkStatus(status);

    // Now, we have the samples we just read sitting in buffers in bufferList
    // Process the new data
    [iosAudio processAudio:&bufferList];


    // Now, we have the samples we just read sitting in buffers in bufferList
      ExtAudioFileWriteAsync([iosAudio mAudioFileRef], inNumberFrames, &bufferList);

    // release the malloc'ed data in the buffer we created earlier
    free(bufferList.mBuffers[0].mData);

    return noErr;
}




/**
 This callback is called when the audioUnit needs new data to play through the
 speakers. If you don't have any, just don't write anything in the buffers
 */
static OSStatus playbackCallback(void *inRefCon, 
                                 AudioUnitRenderActionFlags *ioActionFlags, 
                                 const AudioTimeStamp *inTimeStamp, 
                                 UInt32 inBusNumber, 
                                 UInt32 inNumberFrames, 
                                 AudioBufferList *ioData) {    
    // Notes: ioData contains buffers (may be more than one!)
    // Fill them up as much as you can. Remember to set the size value in each buffer to match how
    // much data is in the buffer.

    for (int i=0; i < ioData->mNumberBuffers; i++) { // in practice we will only ever have 1 buffer, since audio format is mono
        AudioBuffer buffer = ioData->mBuffers[i];

//      NSLog(@"  Buffer %d has %d channels and wants %d bytes of data.", i, buffer.mNumberChannels, buffer.mDataByteSize);

        // copy temporary buffer data to output buffer
        UInt32 size = min(buffer.mDataByteSize, [iosAudio tempBuffer].mDataByteSize); // dont copy more data then we have, or then fits
        memcpy(buffer.mData, [iosAudio tempBuffer].mData, size);
        buffer.mDataByteSize = size; // indicate how much data we wrote in the buffer

        // uncomment to hear random noise
        /*
        UInt16 *frameBuffer = buffer.mData;
        for (int j = 0; j < inNumberFrames; j++) {
            frameBuffer[j] = rand();
        }
        */

    }

    return noErr;
}

@implementation IosAudioController

@synthesize audioUnit, tempBuffer,mAudioFileRef;

/**
 Initialize the audioUnit and allocate our own temporary buffer.
 The temporary buffer will hold the latest data coming in from the microphone,
 and will be copied to the output when this is requested.
 */
- (id) init {
    self = [super init];

    OSStatus status;

    AVAudioSession *session = [AVAudioSession sharedInstance];
    NSLog(@"%f",session.preferredIOBufferDuration);


    // Describe audio component
    AudioComponentDescription desc;
    desc.componentType = kAudioUnitType_Output;
    desc.componentSubType = kAudioUnitSubType_RemoteIO;
    desc.componentFlags = 0;
    desc.componentFlagsMask = 0;
    desc.componentManufacturer = kAudioUnitManufacturer_Apple;

    // Get component
    AudioComponent inputComponent = AudioComponentFindNext(NULL, &desc);

    // Get audio units
    status = AudioComponentInstanceNew(inputComponent, &audioUnit);
    checkStatus(status);

    // Enable IO for recording
    UInt32 flag = 1;
    status = AudioUnitSetProperty(audioUnit, 
                                  kAudioOutputUnitProperty_EnableIO, 
                                  kAudioUnitScope_Input, 
                                  kInputBus,
                                  &flag, 
                                  sizeof(flag));
    checkStatus(status);

    // Enable IO for playback
    status = AudioUnitSetProperty(audioUnit, 
                                  kAudioOutputUnitProperty_EnableIO, 
                                  kAudioUnitScope_Output, 
                                  kOutputBus,
                                  &flag, 
                                  sizeof(flag));
    checkStatus(status);

    // Describe format
    AudioStreamBasicDescription audioFormat;
    audioFormat.mSampleRate         = 44100.00;
    audioFormat.mFormatID           = kAudioFormatLinearPCM;
    audioFormat.mFormatFlags        = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked;
    audioFormat.mFramesPerPacket    = 1;
    audioFormat.mChannelsPerFrame   = 1;
    audioFormat.mBitsPerChannel     = 16;
    audioFormat.mBytesPerPacket     = 2;
    audioFormat.mBytesPerFrame      = 2;

    // Apply format
    status = AudioUnitSetProperty(audioUnit, 
                                  kAudioUnitProperty_StreamFormat, 
                                  kAudioUnitScope_Output, 
                                  kInputBus, 
                                  &audioFormat, 
                                  sizeof(audioFormat));
    checkStatus(status);
    status = AudioUnitSetProperty(audioUnit, 
                                  kAudioUnitProperty_StreamFormat, 
                                  kAudioUnitScope_Input, 
                                  kOutputBus, 
                                  &audioFormat, 
                                  sizeof(audioFormat));
    checkStatus(status);


    // Set input callback
    AURenderCallbackStruct callbackStruct;
    callbackStruct.inputProc = recordingCallback;
    callbackStruct.inputProcRefCon = self;
    status = AudioUnitSetProperty(audioUnit, 
                                  kAudioOutputUnitProperty_SetInputCallback, 
                                  kAudioUnitScope_Global, 
                                  kInputBus, 
                                  &callbackStruct, 
                                  sizeof(callbackStruct));
    checkStatus(status);

    // Set output callback
    callbackStruct.inputProc = playbackCallback;
    callbackStruct.inputProcRefCon = self;
    status = AudioUnitSetProperty(audioUnit, 
                                  kAudioUnitProperty_SetRenderCallback, 
                                  kAudioUnitScope_Global, 
                                  kOutputBus,
                                  &callbackStruct, 
                                  sizeof(callbackStruct));
    checkStatus(status);

    // Disable buffer allocation for the recorder (optional - do this if we want to pass in our own)
    flag = 0;
    status = AudioUnitSetProperty(audioUnit, 
                                  kAudioUnitProperty_ShouldAllocateBuffer,
                                  kAudioUnitScope_Output, 
                                  kInputBus,
                                  &flag, 
                                  sizeof(flag));

    // set preferred buffer size
    Float32 audioBufferSize = (0.023220);
    UInt32 size = sizeof(audioBufferSize);
    status = AudioSessionSetProperty(kAudioSessionProperty_PreferredHardwareIOBufferDuration,
                                     size, &audioBufferSize);

    // Allocate our own buffers (1 channel, 16 bits per sample, thus 16 bits per frame, thus 2 bytes per frame).
    // Practice learns the buffers used contain 512 frames, if this changes it will be fixed in processAudio.
    tempBuffer.mNumberChannels = 1;
    tempBuffer.mDataByteSize = 512 * 2;
    tempBuffer.mData = malloc( 512 * 2 );





     NSArray  *paths = NSSearchPathForDirectoriesInDomains(NSDocumentDirectory, NSUserDomainMask, YES);
     NSString *documentsDirectory = [paths objectAtIndex:0];
    NSString *destinationFilePath = [[NSString alloc] initWithFormat: @"%@/output.caf", documentsDirectory];
    NSLog(@">>> %@\n", destinationFilePath);

     CFURLRef destinationURL = CFURLCreateWithFileSystemPath(kCFAllocatorDefault, ( CFStringRef)destinationFilePath, kCFURLPOSIXPathStyle, false);

    OSStatus setupErr = ExtAudioFileCreateWithURL(destinationURL, kAudioFileCAFType, &audioFormat, NULL, kAudioFileFlags_EraseFile, &mAudioFileRef);
    CFRelease(destinationURL);

    NSAssert(setupErr == noErr, @"Couldn't create file for writing");


    setupErr = ExtAudioFileSetProperty(mAudioFileRef, kExtAudioFileProperty_ClientDataFormat, sizeof(AudioStreamBasicDescription), &audioFormat);
    NSAssert(setupErr == noErr, @"Couldn't create file for format");


    setupErr =  ExtAudioFileWriteAsync(mAudioFileRef, 0, NULL);
    NSAssert(setupErr == noErr, @"Couldn't initialize write buffers for audio file");

    // Initialise
    status = AudioUnitInitialize(audioUnit);
    checkStatus(status);

  //   [NSTimer scheduledTimerWithTimeInterval:5 target:self selector:@selector(stopRecording:) userInfo:nil repeats:NO];

    return self;
}

/**
 Start the audioUnit. This means data will be provided from
 the microphone, and requested for feeding to the speakers, by
 use of the provided callbacks.
 */
- (void) start {
    OSStatus status = AudioOutputUnitStart(audioUnit);
    checkStatus(status);
}

/**
 Stop the audioUnit
 */
- (void) stop {
    OSStatus status = AudioOutputUnitStop(audioUnit);
    checkStatus(status);
    [self stopRecording:nil];
}

/**
 Change this function to decide what is done with incoming
 audio data from the microphone.
 Right now we copy it to our own temporary buffer.
 */
- (void) processAudio: (AudioBufferList*) bufferList{
    AudioBuffer sourceBuffer = bufferList->mBuffers[0];

    // fix tempBuffer size if it's the wrong size
    if (tempBuffer.mDataByteSize != sourceBuffer.mDataByteSize) {
        free(tempBuffer.mData);
        tempBuffer.mDataByteSize = sourceBuffer.mDataByteSize;
        tempBuffer.mData = malloc(sourceBuffer.mDataByteSize);
    }

    // copy incoming audio data to temporary buffer
    memcpy(tempBuffer.mData, bufferList->mBuffers[0].mData, bufferList->mBuffers[0].mDataByteSize);
}


- (void)stopRecording:(NSTimer*)theTimer
{
    printf("\nstopRecording\n");
    OSStatus status = ExtAudioFileDispose(mAudioFileRef);
    printf("OSStatus(ExtAudioFileDispose): %ld\n", status);
}

/**
 Clean up.
 */
- (void) dealloc {
    [super  dealloc];
    AudioUnitUninitialize(audioUnit);
    free(tempBuffer.mData);
}

这一定会对你们有所帮助...

另一种最佳方法是从https://github.com/tkzic/audiograph下载Audio Touch,并查看该应用程序的Echo功能,它可以重复你所说的话语音,但不会记录音频,因此请按照以下步骤添加录音功能:

IN MixerHostAudio.h

@property (readwrite) ExtAudioFileRef   mRecordFile;
-(void)Record;
-(void)StopRecord;



IN MixerHostAudio.m

//在这个类中添加这两个函数

-(void)Record{
    NSString *completeFileNameAndPath = [[NSSearchPathForDirectoriesInDomains(NSDocumentDirectory, NSUserDomainMask, YES) lastObject] stringByAppendingString:@"/Record.wav"];
    //create the url that the recording object needs to reference the file
    CFURLRef audioFileURL = CFURLCreateFromFileSystemRepresentation (NULL, (const UInt8 *)[completeFileNameAndPath cStringUsingEncoding:[NSString defaultCStringEncoding]] , strlen([completeFileNameAndPath cStringUsingEncoding:[NSString defaultCStringEncoding]]), false);
   AudioStreamBasicDescription dstFormat, clientFormat;
    memset(&dstFormat, 0, sizeof(dstFormat));
    memset(&clientFormat, 0, sizeof(clientFormat));

    AudioFileTypeID fileTypeId = kAudioFileWAVEType;
        UInt32 size = sizeof(dstFormat);
    dstFormat.mFormatID = kAudioFormatLinearPCM;

    // setup the output file format
    dstFormat.mSampleRate = 44100.0; // set sample rate

    // create a 16-bit 44100kHz Stereo format
    dstFormat.mChannelsPerFrame = 2;
    dstFormat.mBitsPerChannel = 16;
    dstFormat.mBytesPerPacket = dstFormat.mBytesPerFrame = 4;
    dstFormat.mFramesPerPacket = 1;
    dstFormat.mFormatFlags = kLinearPCMFormatFlagIsPacked | kLinearPCMFormatFlagIsSignedInteger; // little-endian

    //get the client format directly from
    UInt32 asbdSize = sizeof (AudioStreamBasicDescription);
    AudioUnitGetProperty(mixerUnit,
                         kAudioUnitProperty_StreamFormat,
                         kAudioUnitScope_Input,
                         0, // input bus
                         &clientFormat,
                         &asbdSize);

     ExtAudioFileCreateWithURL(audioFileURL, fileTypeId, &dstFormat, NULL, kAudioFileFlags_EraseFile, &mRecordFile);


        printf("recording\n");
        ExtAudioFileSetProperty(mRecordFile, kExtAudioFileProperty_ClientDataFormat, size, &clientFormat);
        //call this once as this will alloc space on the first call
        ExtAudioFileWriteAsync(mRecordFile, 0, NULL);


}



-(void)StopRecord{
    ExtAudioFileDispose(mRecordFile);
}



//In micLineInCallback function Add this line at last before  return noErr; :

  ExtAudioFileWriteAsync([THIS mRecordFile] , inNumberFrames, ioData);

请从MixerHostViewController.m文件中的-(IBAction)playOrStop:(id)sender方法调用这些函数。


有一个问题想问你。你是通过麦克风录制音频,还是直接将音频播放数据记录到文件中,以避免通过麦克风记录环境噪音? - Bagusflyer
它通过麦克风记录声音。 - h.kishan

0

RemoteIO音频单元可用于同时录制和播放。有很多使用RemoteIO(aurioTouch)进行录制和使用RemoteIO进行播放的示例。只需启用单元输入和单元输出,并处理两个缓冲区回调即可。在此处查看示例here


0

网页内容由stack overflow 提供, 点击上面的
可以查看英文原文,
原文链接