最近很懒,不想介绍详细了。支持配置单次采样个数、采样率、位深、声道
Recorder
import AudioUnit
import AVFoundation
protocol WSAudioRecordDelegate: AnyObject {
func audioRecorder(recorder: WSAudioUnitRecorder, didRecieve data: Data)
}
extension WSAudioRecordDelegate {
func audioRecorder(recorder: WSAudioUnitRecorder, didRecieve data: Data) {}
}
class WSAudioUnitRecorder: NSObject {
private(set) var isRecording = false
weak var delegate: WSAudioRecordDelegate?
private var asbd = WSASBD()
private let InputBus: AudioUnitElement = 1
private var perPacketSamples: UInt32 = 512
private var inputGain: Float?
private var ioUnit: AudioComponentInstance?
private var bufferList: AudioBufferList?
private var tempAudio = Data()
override init() {
super.init()
setupAudioSession()
initRecoder()
}
/// 开始采集
func startRecord() {
var error = AudioUnitInitialize(ioUnit!)
if error != noErr {
print("AudioUnitInitialize error: \(error)")
}
error = AudioOutputUnitStart(ioUnit!)
if error != noErr {
print("AudioOutputUnitStart error")
}
isRecording = true
}
/// 停止采集
func stopRecord() {
AudioUnitUninitialize(ioUnit!)
AudioOutputUnitStop(ioUnit!)
isRecording = false
tempAudio = Data()
}
/// 设置单包采样数
/// - Parameter count: 单包需要的采样点数
func setPerPacketSamples(count: Int) {
perPacketSamples = UInt32(count)
setupAudioSession()
if isRecording {
stopRecord()
DispatchQueue.main.asyncAfter(deadline: .now()+0.3, execute: {
self.startRecord()
})
}
}
/// 设置采样率
/// - Parameter sampleRate: 采样率
func setSampleRate(sampleRate: Double) {
asbd.sampleRate = sampleRate
reSetParameter()
}
/// 设置声道
/// - Parameter channels: 1-单声道 2-立体声
func setChannel(channels: UInt32) {
asbd.channels = channels
reSetParameter()
}
/// 设置采样位深
/// - Parameter bits: 位深 只能为 8、16、24、32
func setBitsPerChannel(bits: UInt32) {
asbd.mBitsPerChannel = bits
reSetParameter()
}
/// 根据ASDB设置
/// - Parameter asbd: 音频描述
func setASBD(asbd: WSASBD) {
self.asbd = asbd
reSetParameter()
}
/// 设置麦克风硬件增益
/// - Parameter inputGain: 取值范围增益 0~1
func setInputGain(inputGain: Float) {
self.inputGain = inputGain
reSetParameter()
}
}
extension WSAudioUnitRecorder {
private func initRecoder() {
initBufferList(inNumberFrames: perPacketSamples)
let _ = setupIoUnit()
}
private func initBufferList(inNumberFrames: UInt32) {
let mDataByteSize: UInt32 = inNumberFrames * asbd.mBitsPerChannel / 8 * asbd.channels
free(bufferList?.mBuffers.mData)
bufferList = AudioBufferList.init(mNumberBuffers: 1, mBuffers: AudioBuffer.init(mNumberChannels: asbd.channels, mDataByteSize: mDataByteSize, mData: UnsafeMutableRawPointer.allocate(byteCount: Int(mDataByteSize), alignment: 1)))
// print("设置缓冲区:\(bufferList.mBuffers.mDataByteSize)")
}
func setupAudioSession() {
let session = AVAudioSession.sharedInstance()
do {
try session.setPreferredSampleRate(asbd.sampleRate)
let time = Double(perPacketSamples) / asbd.sampleRate
if asbd.sampleRate == 48000 {
try session.setPreferredIOBufferDuration(time)
}
else if asbd.sampleRate == 96000 {
try session.setPreferredIOBufferDuration(time/2)
}else {
try session.setPreferredIOBufferDuration(time)
}
if inputGain != nil {
try session.setInputGain(inputGain!)
}
try session.setActive(true, options: .notifyOthersOnDeactivation)
} catch {
print(error.localizedDescription)
}
}
private func reSetParameter() {
var isRestoration = false
if isRecording {
stopRecord()
isRestoration = true
}
setupAudioSession()
initRecoder()
if isRestoration {
DispatchQueue.main.asyncAfter(deadline: .now()+0.3, execute: {
self.startRecord()
})
}
}
private func setupIoUnit() -> Bool {
var ioDes = AudioComponentDescription.init(
componentType: kAudioUnitType_Output,
componentSubType: kAudioUnitSubType_RemoteIO,
componentManufacturer: kAudioUnitManufacturer_Apple,
componentFlags: 0,
componentFlagsMask: 0
)
guard let inputComp: AudioComponent = AudioComponentFindNext(nil, &ioDes) else {
print("outputComp init error")
return false
}
if AudioComponentInstanceNew(inputComp, &ioUnit) != noErr {
print("io AudioComponentInstanceNew error")
return false
}
var ioFormat = getAudioStreamBasicDescription(des: asbd, isInterleaved: false)
if AudioUnitSetProperty(ioUnit!, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output, InputBus, &ioFormat, UInt32(MemoryLayout.size(ofValue: ioFormat))) != noErr {
print("set StreamFormat error")
return false
}
var value: UInt32 = 1
if AudioUnitSetProperty(ioUnit!, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Input, InputBus, &value, UInt32(MemoryLayout.size(ofValue: value))) != noErr {
print("can't enable input io")
return false
}
var recordCallback = AURenderCallbackStruct.init(inputProc: { inRefCon, ioActionFlags, inTimeStamp, inBusNumber, inNumberFrames, ioData -> OSStatus in
let bridgeSelf: WSAudioUnitRecorder = bridge(ptr: UnsafeRawPointer.init(inRefCon))
// if bridgeSelf.asbd.sampleRate == 44100 {
bridgeSelf.initBufferList(inNumberFrames: inNumberFrames)
// }
let error: OSStatus = AudioUnitRender(bridgeSelf.ioUnit!, ioActionFlags, inTimeStamp, bridgeSelf.InputBus, inNumberFrames, &bridgeSelf.bufferList!)
if error == noErr {
let bufferData: AudioBuffer = bridgeSelf.bufferList!.mBuffers
if let mData = bufferData.mData {
let data = Data(bytes: mData, count: Int(bufferData.mDataByteSize))
// if bridgeSelf.asbd.sampleRate == 44100{
// print("采集实际大小:\(data.count)")
bridgeSelf.addTempAudio(audio: data)
// }else{
// bridgeSelf.delegate?.audioRecorder(recorder: bridgeSelf, didRecieve: data)
// }
}
}
return noErr
}, inputProcRefCon: UnsafeMutableRawPointer(mutating: bridge(obj: self)))
if AudioUnitSetProperty(ioUnit!, kAudioOutputUnitProperty_SetInputCallback, kAudioUnitScope_Global, InputBus, &recordCallback, UInt32(MemoryLayout.size(ofValue: recordCallback))) != noErr {
print("SetRenderCallback error")
return false
}
return true
}
private func addTempAudio(audio: Data){
tempAudio.append(audio)
let mDataByteSize = perPacketSamples * asbd.mBitsPerChannel / 8 * asbd.channels
if tempAudio.count >= mDataByteSize {
delegate?.audioRecorder(recorder: self, didRecieve: tempAudio[0...mDataByteSize-1])
let startIndex = tempAudio.startIndex
let endIndex = tempAudio.index(startIndex, offsetBy: Int(mDataByteSize))
let range = startIndex ..< endIndex
tempAudio.removeSubrange(range)
}
// else{
// let count = tempAudio.count
// var pcm = tempAudio[0..<count]
// pcm.append(Data(repeating: 0, count: Int(mDataByteSize)-count))
// delegate?.audioRecorder(recorder: self, didRecieve: pcm)
// let startIndex = tempAudio.startIndex
// let endIndex = tempAudio.index(startIndex, offsetBy: count)
// let range = startIndex ..< endIndex
// tempAudio.removeSubrange(range)
// }
}
}
Player
import AudioUnit
import AVFoundation
class WSAudioUnitPlayer: NSObject {
private(set) var isPlaying = false
private var cacheBufferData = ZGAudioDataBuffer()
private var ioUnit: AudioComponentInstance?
private var asbd = WSASBD()
private let OutputBus: AudioUnitElement = 0
private var perPacketSamples: UInt32 = 512
override init() {
super.init()
let _ = setupIoUnit()
}
/// 添加需要播放的数据
/// - Parameter data: PCM数据
func addAudioData(data: Data) {
guard isPlaying, !data.isEmpty else { return }
// print("音频长度:\(data.count)")
cacheBufferData.appendData(data: data)
}
/// 开始播放
func startPlay() {
guard !isPlaying else { return }
setupAudioSession()
isPlaying = true
cacheBufferData.clear()
var error = AudioUnitInitialize(ioUnit!)
if error != noErr {
print("AudioUnitInitialize error: \(error)")
}
error = AudioOutputUnitStart(ioUnit!)
if error != noErr {
print("AudioOutputUnitStart error")
}
}
/// 停止播放
func stopPlay() {
guard isPlaying else { return }
isPlaying = false
AudioUnitUninitialize(ioUnit!)
AudioOutputUnitStop(ioUnit!)
cacheBufferData.clear()
}
/// 每次需要向缓冲池填充的采样点数
/// 为了使生产与消费速率匹配,麦克风单包采样数应该与播放器填充采样数一致
/// - Parameter count: 单包需要的采样点数
func setPerPacketSamples(count: Int) {
perPacketSamples = UInt32(count)
if isPlaying {
stopPlay()
startPlay()
}
}
/// 设置采样率
/// - Parameter sampleRate: 采样率
func setSampleRate(sampleRate: Double) {
asbd.sampleRate = sampleRate
reSetParameter()
}
/// 设置声道
/// - Parameter channels: 1-单声道 2-立体声
func setChannel(channels: UInt32) {
asbd.channels = channels
reSetParameter()
}
/// 设置采样位深
/// - Parameter bits: 位深 只能为 8、16、24、32
func setBitsPerChannel(bits: UInt32) {
asbd.mBitsPerChannel = bits
reSetParameter()
}
/// 根据ASDB设置
/// - Parameter asbd: 音频描述
func setASBD(asbd: WSASBD) {
self.asbd = asbd
reSetParameter()
}
}
extension WSAudioUnitPlayer {
private func setupIoUnit() -> Bool {
var ioDes = AudioComponentDescription.init(
componentType: kAudioUnitType_Output,
componentSubType: kAudioUnitSubType_RemoteIO,
componentManufacturer: kAudioUnitManufacturer_Apple,
componentFlags: 0,
componentFlagsMask: 0
)
guard let inputComp: AudioComponent = AudioComponentFindNext(nil, &ioDes) else {
print("outputComp init error")
return false
}
if AudioComponentInstanceNew(inputComp, &ioUnit) != noErr {
print("io AudioComponentInstanceNew error")
return false
}
var value = 1
if AudioUnitSetProperty(ioUnit!, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Output, OutputBus, &value, UInt32(MemoryLayout.size(ofValue: value))) != noErr {
print("can't enable output io")
return false
}
var ioFormat = getAudioStreamBasicDescription(des: asbd, isInterleaved: false)
if AudioUnitSetProperty(ioUnit!, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, OutputBus, &ioFormat, UInt32(MemoryLayout.size(ofValue: ioFormat))) != noErr {
print("set StreamFormat error")
return false
}
var playCallback = getPlayCallBack()
if AudioUnitSetProperty(ioUnit!, kAudioUnitProperty_SetRenderCallback, kAudioUnitScope_Input, OutputBus, &playCallback, UInt32(MemoryLayout.size(ofValue: playCallback))) != noErr {
print("SetRenderCallback error")
return false
}
return true
}
private func getPlayCallBack() -> AURenderCallbackStruct {
return AURenderCallbackStruct.init(inputProc: { inRefCon, ioActionFlags, inTimeStamp, inBusNumber, inNumberFrames, ioData -> OSStatus in
let bridgeSelf: WSAudioUnitPlayer = bridge(ptr: UnsafeRawPointer.init(inRefCon))
let bufferData: AudioBuffer = ioData!.pointee.mBuffers
let len = Int(bufferData.mDataByteSize)
// print("需要大小:\(len)")
let pcm = bridgeSelf.cacheBufferData.getData(len: len)
NSData(data: pcm).getBytes(bufferData.mData!, range: NSMakeRange(0, len))
return noErr
}, inputProcRefCon: UnsafeMutableRawPointer(mutating: bridge(obj: self)))
}
func setupAudioSession() {
let session = AVAudioSession.sharedInstance()
do {
try session.setPreferredSampleRate(asbd.sampleRate)
let time = Double(perPacketSamples) / asbd.sampleRate
if asbd.sampleRate == 48000 {
try session.setPreferredIOBufferDuration(time)
}
else if asbd.sampleRate == 96000 {
try session.setPreferredIOBufferDuration(time/2)
}else {
try session.setPreferredIOBufferDuration(time)
}
try session.setActive(true, options: .notifyOthersOnDeactivation)
} catch {
print(error.localizedDescription)
}
}
private func reSetParameter() {
// print("重置:\(asbd)")
var isRestoration = false
if isPlaying {
stopPlay()
isRestoration = true
}
let _ = setupIoUnit()
if isRestoration {
startPlay()
}
}
}
Other
import AudioUnit
struct WSASBD {
/// 声道数
var channels: UInt32 = 1
/// 采样率:
var sampleRate: Double = 48000
/// 位深
var mBitsPerChannel: UInt32 = 16
}
func getAudioStreamBasicDescription(des: WSASBD, isInterleaved: Bool) -> AudioStreamBasicDescription {
var asbd = AudioStreamBasicDescription()
let bytesPerSample = des.mBitsPerChannel / 8
asbd.mChannelsPerFrame = des.channels
asbd.mBitsPerChannel = 8 * bytesPerSample
asbd.mBytesPerFrame = des.channels * bytesPerSample
asbd.mFramesPerPacket = 1
asbd.mBytesPerPacket = des.channels * bytesPerSample
if isInterleaved {
asbd.mFormatFlags = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked
} else {
asbd.mFormatFlags = kLinearPCMFormatFlagIsSignedInteger | kLinearPCMFormatFlagIsPacked
}
asbd.mFormatID = kAudioFormatLinearPCM
asbd.mSampleRate = des.sampleRate
asbd.mReserved = 0
return asbd
}
func bridge<T: AnyObject>(ptr: UnsafeRawPointer) -> T {
return Unmanaged<T>.fromOpaque(ptr).takeUnretainedValue()
}
func bridge<T: AnyObject>(obj: T) -> UnsafeRawPointer {
return UnsafeRawPointer(Unmanaged.passUnretained(obj).toOpaque())
}
class ZGAudioDataBuffer {
private var buffer = Data()
private var startIndex = 0
private let lock = NSLock()
func appendData(data: Data) {
lock.lock()
defer { lock.unlock() }
buffer.append(data)
//控制缓冲区最大长度
let cacheLen = 1024*1024*5
if buffer.count > cacheLen {
let excessLength = buffer.count - cacheLen
startIndex -= excessLength
buffer = buffer.subdata(in: excessLength..<buffer.count)
}
}
func getData(len: Int) -> Data {
lock.lock()
defer { lock.unlock() }
guard len > 0 else {
return Data(repeating: 0, count: len)
}
let availableLength = buffer.count - startIndex
if availableLength <= 0 {
return Data(repeating: 0, count: len)
}
let actualLength = min(len, availableLength)
var data = buffer.subdata(in: startIndex..<startIndex+actualLength)
startIndex += actualLength
if data.count < len {
data.append(Data(repeating: 0, count: len-data.count))
}
return data
}
func clear(){
lock.lock()
defer { lock.unlock() }
buffer.removeAll()
startIndex = 0
}
}