HTML5   发布时间:2022-04-27  发布网站:大佬教程  code.js-code.com
大佬教程收集整理的这篇文章主要介绍了ios – 核心音频离线渲染GenericOutput大佬教程大佬觉得挺不错的,现在分享给大家,也给大家做个参考。
任何人使用COre-Audio成功完成脱机渲染.

我必须混合两个音频文件并应用混响(使用2 AudioFilePlayer,MultiChAnnelMixer,Reverb2和RemoteIO).
得到它的工作我可以保存它,而它的预览(在renderCallBACk的RemoteIO).

我需要保存它而不玩它(离线).
提前致谢.

解决方法

离线渲染使用GenericOutput AudioUnit为我工作.
在这里分享工作代码.
核心音频框架似乎有一点.但是像ASBD这样的小事情,参数…等正在产生这些问题.尝试努力,它会工作.不要放弃:-).核心音频在处理低级音频时非常强大和有用.这是我从最近几周学到的东西.享受:-D ….

在.h中声明这些

//AUGraph
AUGraph mGraph;
//Audio Unit References
AudioUnit mFilePlayer;
AudioUnit mFilePlayer2;
AudioUnit mReverb;
AudioUnit mTone;
AudioUnit mMixer;
AudioUnit mGIO;
//Audio File LOCATIOn
AudioFilEID inputFile;
AudioFilEID inputFile2;
//Audio file refereces for saving
ExtAudioFileRef extAudioFile;
//Standard sample rate
Float64 graphSampleRate;
AudioStreamBasicDescription stereoStreamFormat864;

Float64 MaxSampleTime;

// in .m class

- (id) init
{
    self = [super init];
    graphSampleRate = 44100.0;
    MaxSampleTime   = 0.0;
    UInt32 category = kAudioSessionCategory_MediaPlayBACk;
    checkError(AudioSessionSetProperty(kAudioSessionProperty_AudioCategory,sizeof(category),&category),"Couldn't set category on audio session");
    [self initializeAUGraph];
    return self;
}

// ASBD设置

- (void) setupStereoStream864 {    
    // The AudioUnitSampleType data type is the recommended type for sample data in audio
    // units. This obtains the byte size of the type for use in filling in the ASBD.
    size_t bytesPerSample = sizeof (AudioUnitSampleTypE);
    // Fill the application audio format struct's fields to define a linear PCM,// stereo,noninterleaved stream at the hardware sample rate.
    stereoStreamFormat864.mFormatID          = kAudioFormatLinearPCM;
    stereoStreamFormat864.mFormatFlags       = kAudioFormatFlagsAudioUnitCanonical;
    stereoStreamFormat864.mBytesPerPacket    = bytesPerSample;
    stereoStreamFormat864.mFramesPerPacket   = 1;
    stereoStreamFormat864.mBytesPerFrame     = bytesPerSample;
    stereoStreamFormat864.mChAnnelsPerFrame  = 2; // 2 inDicates stereo
    stereoStreamFormat864.mBitsPerChAnnel    = 8 * bytesPerSample;
    stereoStreamFormat864.mSampleRate        = graphSampleRate;
}

// AUGraph设置

- (void)initializeAUGraph
{
    [self setupStereoStream864];

    // Setup the AUGraph,add AUNodes,and make connections
// create a new AUGraph
checkError(NewAUGraph(&mGraph),"Couldn't create new graph");

// AUNodes represent AudioUnits on the AUGraph and provide an
// easy means for connecTing audioUnits together.
    AUNode filePlayerNode;
    AUNode filePlayerNode2;
AUNode mixerNode;
AUNode reverbNode;
AUNode toneNode;
AUNode gOutputNode;

// file player component
    AudioComponentDescription filePlayer_desc;
filePlayer_desc.componentType = kAudioUnitType_Generator;
filePlayer_desc.componentSubType = kAudioUnitSubType_AudioFilePlayer;
filePlayer_desc.componentFlags = 0;
filePlayer_desc.componentFlagsmask = 0;
filePlayer_desc.componentManufacturer = kAudioUnitManufacturer_Apple;

// file player component2
    AudioComponentDescription filePlayer2_desc;
filePlayer2_desc.componentType = kAudioUnitType_Generator;
filePlayer2_desc.componentSubType = kAudioUnitSubType_AudioFilePlayer;
filePlayer2_desc.componentFlags = 0;
filePlayer2_desc.componentFlagsmask = 0;
filePlayer2_desc.componentManufacturer = kAudioUnitManufacturer_Apple;

// Create AudioComponentDescriptions for the AUs we want in the graph
// mixer component
AudioComponentDescription mixer_desc;
mixer_desc.componentType = kAudioUnitType_Mixer;
mixer_desc.componentSubType = kAudioUnitSubType_MultiChAnnelMixer;
mixer_desc.componentFlags = 0;
mixer_desc.componentFlagsmask = 0;
mixer_desc.componentManufacturer = kAudioUnitManufacturer_Apple;

// Create AudioComponentDescriptions for the AUs we want in the graph
// Reverb component
AudioComponentDescription reverb_desc;
reverb_desc.componentType = kAudioUnitType_Effect;
reverb_desc.componentSubType = kAudioUnitSubType_Reverb2;
reverb_desc.componentFlags = 0;
reverb_desc.componentFlagsmask = 0;
reverb_desc.componentManufacturer = kAudioUnitManufacturer_Apple;


//tone component
    AudioComponentDescription tone_desc;
tone_desc.componentType = kAudioUnitType_FormatConverter;
//tone_desc.componentSubType = kAudioUnitSubType_NewTimePitch;
    tone_desc.componentSubType = kAudioUnitSubType_Varispeed;
tone_desc.componentFlags = 0;
tone_desc.componentFlagsmask = 0;
tone_desc.componentManufacturer = kAudioUnitManufacturer_Apple;


    AudioComponentDescription gOutput_desc;
gOutput_desc.componentType = kAudioUnitType_Output;
gOutput_desc.componentSubType = kAudioUnitSubType_GenericOutput;
gOutput_desc.componentFlags = 0;
gOutput_desc.componentFlagsmask = 0;
gOutput_desc.componentManufacturer = kAudioUnitManufacturer_Apple;

//Add nodes to graph

// Add nodes to the graph to hold our AudioUnits,// You pass in a reference to the  AudioComponentDescription
// and get BACk an  AudioUnit
    AUGraphAddNode(mGraph,&filePlayer_desc,&filePlayerNode );
    AUGraphAddNode(mGraph,&filePlayer2_desc,&filePlayerNode2 );
    AUGraphAddNode(mGraph,&mixer_desc,&mixerNode );
    AUGraphAddNode(mGraph,&reverb_desc,&reverbNode );
    AUGraphAddNode(mGraph,&tone_desc,&toneNode );
AUGraphAddNode(mGraph,&gOutput_desc,&gOutputNodE);


//Open the graph early,initialize late
// open the graph AudioUnits are open but not initialized (no resource alLOCATIOn occurs herE)

checkError(AUGraphOpen(mGraph),"Couldn't Open the graph");

//Reference to Nodes
// get the reference to the AudioUnit object for the file player graph node
AUGraphNodeInfo(mGraph,filePlayerNode,NULL,&mFilePlayer);
AUGraphNodeInfo(mGraph,filePlayerNode2,&mFilePlayer2);
    AUGraphNodeInfo(mGraph,reverbNode,&mReverb);
    AUGraphNodeInfo(mGraph,toneNode,&mTonE);
    AUGraphNodeInfo(mGraph,mixerNode,&mMixer);
AUGraphNodeInfo(mGraph,gOutputNode,&mGIO);

    AUGraphConnectNodeInput(mGraph,0);
    AUGraphConnectNodeInput(mGraph,1);
AUGraphConnectNodeInput(mGraph,0);


    UInt32 busCount   = 2;    // bus count for mixer unit input

//Setup mixer unit bus count
    checkError(AudioUnitSetProperty (
                                 mMixer,kAudioUnitProperty_ElementCount,kAudioUnitScope_Input,&busCount,sizeof (busCount)
                                 ),"Couldn't set mixer unit's bus count");

//Enable metering mode to view levels input and output levels of mixer
    UInt32 onValue = 1;
    checkError(AudioUnitSetProperty(mMixer,kAudioUnitProperty_MeteringMode,&onValue,sizeof(onvalue)),"error");

// Increase the maximum frames per slice allows the mixer unit to accommodate the
//    larger slice size used when the screen is locked.
    UInt32 maximumFramesPerSlice = 4096;

    checkError(AudioUnitSetProperty (
                                 mMixer,kAudioUnitProperty_MaximumFramesPerSlice,kAudioUnitScope_Global,&maximumFramesPerSlice,sizeof (maximumFramesPerSlicE)
                                 ),"Couldn't set mixer units maximum framers per slice");

// set the audio data format of tone Unit
    AudioUnitSetProperty(mTone,kAudioUnitProperty_StreamFormat,&stereoStreamFormat864,sizeof(AudioStreamBasicDescription));
// set the audio data format of reverb Unit
    AudioUnitSetProperty(mReverb,sizeof(AudioStreamBasicDescription));

// set initial reverb
    AudioUnitParameterValue reverbTime = 2.5;
    AudioUnitSetParameter(mReverb,4,reverbTime,0);
    AudioUnitSetParameter(mReverb,5,0);

    AudioStreamBasicDescription     auEffectStreamFormat;
    UInt32 asbdSize = sizeof (auEffectStreamFormat);
memset (&auEffectStreamFormat,sizeof (auEffectStreamFormat ));

// get the audio data format from reverb
checkError(AudioUnitGetProperty(mReverb,&auEffectStreamFormat,&asbdSizE),"Couldn't get aueffectunit ASBD");


    auEffectStreamFormat.mSampleRate = graphSampleRate;

// set the audio data format of mixer Unit
    checkError(AudioUnitSetProperty(mMixer,kAudioUnitScope_Output,sizeof(auEffectStreamFormat)),"Couldn't set ASBD on mixer output");

checkError(AUGraphInitialize(mGraph),"Couldn't Initialize the graph");

    [self setUpAUFilePlayer];
    [self setUpAUFilePlayer2];  
}

//音频文件播放设置在这里我正在设置语音文件

-(OSStatus) setUpAUFilePlayer{
NSString *songPath = [[NSBundle mainBundle] pathForresource: @"testVoice" ofType:@".m4a"];
CFURLRef songURL = ( CFURLRef) [NSURL fileURLWithPath:songPath];

// open the input audio file
checkError(AudioFiLeopenURL(songURL,kAudioFileReadPermission,&inputFilE),"setUpAUFilePlayer AudioFiLeopenURL Failed");

AudioStreamBasicDescription fileASBD;
// get the audio data format from the file
UInt32 propSize = sizeof(fileASBD);
checkError(AudioFileGetProperty(inputFile,kAudioFilePropertyDataFormat,&propSize,&fileASBD),"setUpAUFilePlayer Couldn't get file's data format");

// tell the file player unit to load the file we want to play
checkError(AudioUnitSetProperty(mFilePlayer,kAudioUnitProperty_scheduledFilEIDs,&inputFile,sizeof(inputFilE)),"setUpAUFilePlayer AudioUnitSetPropertY[kAudioUnitProperty_scheduledFilEIDs] Failed");

UInt64 nPackets;
UInt32 propsize = sizeof(nPackets);
checkError(AudioFileGetProperty(inputFile,kAudioFilePropertyAudioDataPacketCount,&propsize,&nPackets),"setUpAUFilePlayer AudioFileGetPropertY[kAudioFilePropertyAudioDataPacketCount] Failed");

// tell the file player AU to play the entire file
scheduledAudioFileRegion rgn;
memset (&rgn.mtimestamp,sizeof(rgn.mtimestamp));
rgn.mtimestamp.mFlags = kAudiotimestampSampleTimeValid;
rgn.mtimestamp.mSampleTime = 0;
rgn.mCompletionProc = NULL;
rgn.mCompletionProcUserData = NULL;
rgn.mAudioFile = inputFile;
rgn.mLoopCount = -1;
rgn.mStartFrame = 0;
rgn.mFramesToPlay = nPackets * fileASBD.mFramesPerPacket;

if (MaxSampleTime < rgn.mFramesToPlay)
{
    MaxSampleTime = rgn.mFramesToPlay;
}

checkError(AudioUnitSetProperty(mFilePlayer,kAudioUnitProperty_scheduledFileRegion,&rgn,sizeof(rgn)),"setUpAUFilePlayer1 AudioUnitSetPropertY[kAudioUnitProperty_scheduledFileRegion] Failed");

// prime the file player AU with default values
UInt32 defaultVal = 0;

checkError(AudioUnitSetProperty(mFilePlayer,kAudioUnitProperty_scheduledFilePrime,&defaultVal,sizeof(defaultVal)),"setUpAUFilePlayer AudioUnitSetPropertY[kAudioUnitProperty_scheduledFilePrime] Failed");


// tell the file player AU when to start playing (-1 sample time means next render cyclE)
Audiotimestamp startTime;
memset (&startTime,sizeof(startTimE));
startTime.mFlags = kAudiotimestampSampleTimeValid;

startTime.mSampleTime = -1;
checkError(AudioUnitSetProperty(mFilePlayer,kAudioUnitProperty_scheduleStarttimestamp,&startTime,sizeof(startTimE)),"setUpAUFilePlayer AudioUnitSetPropertY[kAudioUnitProperty_scheduleStarttimestamp]");

return noErr;  
}

//音频文件播放设置在这里我正在设置BGMusic文件

-(OSStatus) setUpAUFilePlayer2{
NSString *songPath = [[NSBundle mainBundle] pathForresource: @"BGmusic" ofType:@".mp3"];
CFURLRef songURL = ( CFURLRef) [NSURL fileURLWithPath:songPath];

// open the input audio file
checkError(AudioFiLeopenURL(songURL,&inputFile2),"setUpAUFilePlayer2 AudioFiLeopenURL Failed");

AudioStreamBasicDescription fileASBD;
// get the audio data format from the file
UInt32 propSize = sizeof(fileASBD);
checkError(AudioFileGetProperty(inputFile2,"setUpAUFilePlayer2 Couldn't get file's data format");

// tell the file player unit to load the file we want to play
checkError(AudioUnitSetProperty(mFilePlayer2,&inputFile2,sizeof(inputFile2)),"setUpAUFilePlayer2 AudioUnitSetPropertY[kAudioUnitProperty_scheduledFilEIDs] Failed");

UInt64 nPackets;
UInt32 propsize = sizeof(nPackets);
checkError(AudioFileGetProperty(inputFile2,"setUpAUFilePlayer2 AudioFileGetPropertY[kAudioFilePropertyAudioDataPacketCount] Failed");

// tell the file player AU to play the entire file
scheduledAudioFileRegion rgn;
memset (&rgn.mtimestamp,sizeof(rgn.mtimestamp));
rgn.mtimestamp.mFlags = kAudiotimestampSampleTimeValid;
rgn.mtimestamp.mSampleTime = 0;
rgn.mCompletionProc = NULL;
rgn.mCompletionProcUserData = NULL;
rgn.mAudioFile = inputFile2;
rgn.mLoopCount = -1;
rgn.mStartFrame = 0;
rgn.mFramesToPlay = nPackets * fileASBD.mFramesPerPacket;


if (MaxSampleTime < rgn.mFramesToPlay)
{
    MaxSampleTime = rgn.mFramesToPlay;
}

checkError(AudioUnitSetProperty(mFilePlayer2,"setUpAUFilePlayer2 AudioUnitSetPropertY[kAudioUnitProperty_scheduledFileRegion] Failed");

// prime the file player AU with default values
UInt32 defaultVal = 0;
checkError(AudioUnitSetProperty(mFilePlayer2,"setUpAUFilePlayer2 AudioUnitSetPropertY[kAudioUnitProperty_scheduledFilePrime] Failed");


// tell the file player AU when to start playing (-1 sample time means next render cyclE)
Audiotimestamp startTime;
memset (&startTime,sizeof(startTimE));
startTime.mFlags = kAudiotimestampSampleTimeValid;
startTime.mSampleTime = -1;
checkError(AudioUnitSetProperty(mFilePlayer2,"setUpAUFilePlayer2 AudioUnitSetPropertY[kAudioUnitProperty_scheduleStarttimestamp]");

return noErr;  
}

//开始保存文件

- (void)startRecordingAAC{
AudioStreamBasicDescription desTinationFormat;
memset(&desTinationFormat,sizeof(desTinationFormat));
desTinationFormat.mChAnnelsPerFrame = 2;
desTinationFormat.mFormatID = kAudioFormatMPEG4AAc;
UInt32 size = sizeof(desTinationFormat);
OSStatus result = AudioFormatGetProperty(kAudioFormatProperty_FormaTinfo,&size,&desTinationFormat);
if(result) printf("AudioFormatGetProperty %ld \n",result);
NSArray  *paths = NSSearchPathForDirectoriesInDomains(NSDocumentDirectory,NSUserDomainMask,YES);
NSString *documentsDirectory = [paths objectATindex:0];



NSString *desTinationFilePath = [[NSString alloc] initWithFormat: @"%@/output.m4a",documentsDirectory];
CFURLRef desTinationURL = CFURLCreateWithFileSystemPath(kcfAllocatorDefault,(CFStringRef)desTinationFilePath,kcfURLPOSIXPathStyle,falsE);
[desTinationFilePath release];

// specify codec Saving the output in .m4a format
result = ExtAudioFileCreateWithURL(desTinationURL,kAudioFileM4AType,&desTinationFormat,kAudioFileFlags_EraseFile,&extAudioFilE);
if(result) printf("ExtAudioFileCreateWithURL %ld \n",result);
CFRelease(desTinationURL);

// This is a very important part and easiest way to set the ASBD for the File with correct format.
AudioStreamBasicDescription clientFormat;
UInt32 fSize = sizeof (clientFormat);
memset(&clientFormat,sizeof(clientFormat));
// get the audio data format from the Output Unit
checkError(AudioUnitGetProperty(mGIO,&clientFormat,&fSizE),"AudioUnitGetProperty on Failed");

// set the audio data format of mixer Unit
checkError(ExtAudioFileSetProperty(extAudioFile,kExtAudioFileProperty_ClientDataFormat,sizeof(clientFormat),&clientFormat),"ExtAudioFileSetProperty kExtAudioFileProperty_ClientDataFormat Failed");
// specify codec
UInt32 codec = kAppleHardwareAudioCodecManufacturer;
checkError(ExtAudioFileSetProperty(extAudioFile,kExtAudioFileProperty_CodecManufacturer,sizeof(codeC),&codeC),"ExtAudioFileSetProperty on extAudioFile Faild");

checkError(ExtAudioFileWriteAsync(extAudioFile,NULL),"ExtAudioFileWriteAsync Failed");

[self pullGenericOutput];
}

//从GenericOutput节点手动输入和获取数据/缓冲区.

-(void)pullGenericOutput{
AudioUnitRenderActionFlags flags = 0;
Audiotimestamp intimestamp;
memset(&intimestamp,sizeof(Audiotimestamp));
intimestamp.mFlags = kAudiotimestampSampleTimeValid;
UInt32 busnumber = 0;
UInt32 numberFrames = 512;
intimestamp.mSampleTime = 0;
int chAnnelCount = 2;

NSLog(@"Final numberFrames :%li",numberFrames);
int totFrms = MaxSampleTime;
while (totFrms > 0)
{
    if (totFrms < numberFrames)
    {
        numberFrames = totFrms;
        NSLog(@"Final numberFrames :%li",numberFrames);
    }
    else
    {
        totFrms -= numberFrames;
    }
    AudioBufferList *bufferList = (AudioBufferList*)malloc(sizeof(AudioBufferList)+sizeof(AudioBuffer)*(chAnnelCount-1));
    bufferList->mnumberBuffers = chAnnelCount;
    for (int j=0; j<chAnnelCount; j++)
    {
        AudioBuffer buffer = {0};
        buffer.mnumberChAnnels = 1;
        buffer.mDataByteSize = numberFrames*sizeof(AudioUnitSampleTypE);
        buffer.mData = calloc(numberFrames,sizeof(AudioUnitSampleTypE));

        bufferList->mBuffers[j] = buffer;

    }
    checkError(AudioUnitRender(mGIO,&flags,&intimestamp,busnumber,numberFrames,bufferList),"AudioUnitRender mGIO");



    checkError(ExtAudioFileWrite(extAudioFile,("extaudiofilewrite fail"));

}

[self FilesSavingCompleted];
}

// FilesSavingCompleted

-(void)FilesSavingCompleted{
OSStatus status = ExtAudioFileDispose(extAudioFilE);
printf("OSStatus(ExtAudioFileDisposE): %ld\n",status);
}

大佬总结

以上是大佬教程为你收集整理的ios – 核心音频离线渲染GenericOutput全部内容,希望文章能够帮你解决ios – 核心音频离线渲染GenericOutput所遇到的程序开发问题。

如果觉得大佬教程网站内容还不错,欢迎将大佬教程推荐给程序员好友。

本图文内容来源于网友网络收集整理提供,作为学习参考使用,版权属于原作者。
如您有任何意见或建议可联系处理。小编QQ:384754419,请注明来意。