void audioSpec_init(VideoState *is, Float64 sample_rate){
// is audio input available?
AudioComponentDescription outputcd = {0}; // 10.6 version
outputcd.componentType = kAudioUnitType_Output;
outputcd.componentSubType = kAudioUnitSubType_DefaultOutput;
outputcd.componentManufacturer = kAudioUnitManufacturer_Apple;
AudioComponent comp = AudioComponentFindNext (NULL, &outputcd);
if (comp == NULL) {
printf ("can't get output unit");
exit (-1);
}
CheckError (AudioComponentInstanceNew(comp, &is->audio_wanted_spec.rioUnit),
"Couldn't open component for outputUnit");
AURenderCallbackStruct input;
input.inputProc = audio_unit_callback;
input.inputProcRefCon = is;
CheckError(AudioUnitSetProperty(is->audio_wanted_spec.rioUnit,
kAudioUnitProperty_SetRenderCallback,
kAudioUnitScope_Input,
0,
&input,
sizeof(input)),
"AudioUnitSetProperty failed");
CheckError (AudioUnitInitialize(is->audio_wanted_spec.rioUnit),
"Couldn't initialize output unit");
}
is->audio_frame.data[0]:左声道 拷贝到 audio_buf
is->audio_frame.data[1]:右声道 拷贝到 audio_buf1
int audio_decode_frame(VideoState *is, uint8_t *audio_buf,uint8_t *audio_buf1, int buf_size, double *pts_ptr) {
int len1, data_size = 0;
AVPacket *pkt = &is->audio_pkt;
double pts;
int n;
for(;;) {
while(is->audio_pkt_size > 0) {
int got_frame = 0;
len1 = avcodec_decode_audio4(is->audio_ctx, &is->audio_frame, &got_frame, pkt);
if(len1 < 0) {
/* if error, skip frame */
is->audio_pkt_size = 0;
break;
}
data_size = 0;
if(got_frame) {
data_size = is->audio_frame.nb_samples * av_get_bytes_per_sample(is->audio_frame.format);
assert(data_size <= buf_size);
memcpy(audio_buf, is->audio_frame.data[0], data_size);
memcpy(audio_buf1, is->audio_frame.data[1], data_size);
}
is->audio_pkt_data += len1;
is->audio_pkt_size -= len1;
if(data_size <= 0) {
/* No data yet, get more frames */
continue;
}
pts = is->audio_clock;
*pts_ptr = pts;
n = 2 * is->audio_ctx->channels;
is->audio_clock += (double)data_size /
(double)(n * is->audio_ctx->sample_rate);
/* We have data, return it and come back for more later */
return data_size;
}
if(pkt->data)
av_free_packet(pkt);
if(is->quit) {
return -1;
}
/* next packet */
if(packet_queue_get(&is->audioq, pkt, 1) < 0) {
return -1;
}
if(pkt->data == flush_pkt.data) {
avcodec_flush_buffers(is->audio_ctx);
continue;
}
is->audio_pkt_data = pkt->data;
is->audio_pkt_size = pkt->size;
/* if update, update the audio clock w/pts */
if(pkt->pts != AV_NOPTS_VALUE) {
is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts;
}
}
}
声音回调函数
OSStatus audio_unit_callback(void *inRefCon,
AudioUnitRenderActionFlags *ioActionFlags,
const AudioTimeStamp *inTimeStamp,
UInt32 inBusNumber,
UInt32 inNumberFrames,
AudioBufferList * ioData)
{
for (int iBuffer=0; iBuffer < ioData->mNumberBuffers; ++iBuffer) {
memset(ioData->mBuffers[iBuffer].mData, 0, ioData->mBuffers[iBuffer].mDataByteSize);
}
VideoState *is = (VideoState *)inRefCon;
int len = ioData->mBuffers[0].mDataByteSize;
int len1, audio_size = 0;
double pts;
if(is->audio_buf_index >= is->audio_buf_size){
audio_size = audio_decode_frame(is, is->audio_buf,is->audio_buf1, sizeof(is->audio_buf), &pts);
if(audio_size < 0) {
/* If error, output silence */
is->audio_buf_size = 1024;
memset(is->audio_buf, 0, is->audio_buf_size);
memset(is->audio_buf1, 0, is->audio_buf_size);
} else {
audio_size = synchronize_audio(is, (int16_t *)is->audio_buf,
audio_size, pts);
is->audio_buf_size = audio_size;
audio_size = synchronize_audio(is, (int16_t *)is->audio_buf1,
audio_size, pts);
is->audio_buf_size1 = audio_size;
}
is->audio_buf_index = 0;
}
len1 = is->audio_buf_size - is->audio_buf_index;
if(len1 > len)
len1 = len;
memcpy((float *)ioData->mBuffers[0].mData, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
memcpy((float *)ioData->mBuffers[1].mData, (uint8_t *)is->audio_buf1 + is->audio_buf_index, len1);
ioData->mBuffers[0].mDataByteSize = len1;
ioData->mBuffers[1].mDataByteSize = len1;
len -= len1;
is->audio_buf_index += len1;
return noErr;
}
具体demo:https://github.com/765450034/ffplay-examples/blob/master/tutorial07%2Baudiounit.c