diff --git a/Sources/LiveKit/Audio/AudioDeviceModuleDelegateAdapter.swift b/Sources/LiveKit/Audio/AudioDeviceModuleDelegateAdapter.swift index e8146f3d0..653777807 100644 --- a/Sources/LiveKit/Audio/AudioDeviceModuleDelegateAdapter.swift +++ b/Sources/LiveKit/Audio/AudioDeviceModuleDelegateAdapter.swift @@ -22,11 +22,45 @@ internal import LiveKitWebRTC @_implementationOnly import LiveKitWebRTC #endif +public class AudioEngineState: CustomDebugStringConvertible { + private let rtcState: LKRTCAudioEngineState + + public var isOutputEnabled: Bool { rtcState.isOutputEnabled } + public var isOutputRunning: Bool { rtcState.isOutputRunning } + public var isInputEnabled: Bool { rtcState.isInputEnabled } + public var isInputRunning: Bool { rtcState.isInputRunning } + public var isInputMuted: Bool { rtcState.isInputMuted } + public var isLegacyMuteMode: Bool { rtcState.muteMode == .restartEngine } + + init(fromRTCType rtcState: LKRTCAudioEngineState) { + self.rtcState = rtcState + } + + public var debugDescription: String { + "AudioEngineState(isOutputEnabled: \(isOutputEnabled), isOutputRunning: \(isOutputRunning), isInputEnabled: \(isInputEnabled), isInputRunning: \(isInputRunning), isInputMuted: \(isInputMuted), isLegacyMuteMode: \(isLegacyMuteMode))" + } +} + +public class AudioEngineStateTransition: CustomDebugStringConvertible { + private let rtcStateTransition: LKRTCAudioEngineStateTransition + + public var prev: AudioEngineState { AudioEngineState(fromRTCType: rtcStateTransition.prev) } + public var next: AudioEngineState { AudioEngineState(fromRTCType: rtcStateTransition.next) } + + init(fromRTCType rtcStateTransition: LKRTCAudioEngineStateTransition) { + self.rtcStateTransition = rtcStateTransition + } + + public var debugDescription: String { + "AudioEngineStateTransition(prev: \(prev), next: \(next))" + } +} + // Invoked on WebRTC's worker thread, do not block. class AudioDeviceModuleDelegateAdapter: NSObject, LKRTCAudioDeviceModuleDelegate { weak var audioManager: AudioManager? - func audioDeviceModule(_: LKRTCAudioDeviceModule, didReceiveSpeechActivityEvent speechActivityEvent: RTCSpeechActivityEvent) { + func audioDeviceModule(_: LKRTCAudioDeviceModule, didReceiveMutedSpeechActivityEvent speechActivityEvent: RTCSpeechActivityEvent) { guard let audioManager else { return } audioManager._state.onMutedSpeechActivity?(audioManager, speechActivityEvent.toLKType()) } @@ -38,51 +72,61 @@ class AudioDeviceModuleDelegateAdapter: NSObject, LKRTCAudioDeviceModuleDelegate // Engine events - func audioDeviceModule(_: LKRTCAudioDeviceModule, didCreateEngine engine: AVAudioEngine) -> Int { + func audioDeviceModule(_: LKRTCAudioDeviceModule, didCreateEngine engine: AVAudioEngine, stateTransition: LKRTCAudioEngineStateTransition) -> Int { guard let audioManager else { return 0 } let entryPoint = audioManager.buildEngineObserverChain() - return entryPoint?.engineDidCreate(engine) ?? 0 + return entryPoint?.engineDidCreate(engine, state: AudioEngineStateTransition(fromRTCType: stateTransition)) ?? 0 } - func audioDeviceModule(_: LKRTCAudioDeviceModule, willEnableEngine engine: AVAudioEngine, isPlayoutEnabled: Bool, isRecordingEnabled: Bool) -> Int { + func audioDeviceModule(_: LKRTCAudioDeviceModule, willEnableEngine engine: AVAudioEngine, stateTransition: LKRTCAudioEngineStateTransition) -> Int { guard let audioManager else { return 0 } let entryPoint = audioManager.buildEngineObserverChain() - return entryPoint?.engineWillEnable(engine, isPlayoutEnabled: isPlayoutEnabled, isRecordingEnabled: isRecordingEnabled) ?? 0 + return entryPoint?.engineWillEnable(engine, state: AudioEngineStateTransition(fromRTCType: stateTransition)) ?? 0 } - func audioDeviceModule(_: LKRTCAudioDeviceModule, willStartEngine engine: AVAudioEngine, isPlayoutEnabled: Bool, isRecordingEnabled: Bool) -> Int { + func audioDeviceModule(_: LKRTCAudioDeviceModule, willStartEngine engine: AVAudioEngine, stateTransition: LKRTCAudioEngineStateTransition) -> Int { guard let audioManager else { return 0 } let entryPoint = audioManager.buildEngineObserverChain() - return entryPoint?.engineWillStart(engine, isPlayoutEnabled: isPlayoutEnabled, isRecordingEnabled: isRecordingEnabled) ?? 0 + return entryPoint?.engineWillStart(engine, state: AudioEngineStateTransition(fromRTCType: stateTransition)) ?? 0 } - func audioDeviceModule(_: LKRTCAudioDeviceModule, didStopEngine engine: AVAudioEngine, isPlayoutEnabled: Bool, isRecordingEnabled: Bool) -> Int { + func audioDeviceModule(_: LKRTCAudioDeviceModule, didStopEngine engine: AVAudioEngine, stateTransition: LKRTCAudioEngineStateTransition) -> Int { guard let audioManager else { return 0 } let entryPoint = audioManager.buildEngineObserverChain() - return entryPoint?.engineDidStop(engine, isPlayoutEnabled: isPlayoutEnabled, isRecordingEnabled: isRecordingEnabled) ?? 0 + return entryPoint?.engineDidStop(engine, state: AudioEngineStateTransition(fromRTCType: stateTransition)) ?? 0 } - func audioDeviceModule(_: LKRTCAudioDeviceModule, didDisableEngine engine: AVAudioEngine, isPlayoutEnabled: Bool, isRecordingEnabled: Bool) -> Int { + func audioDeviceModule(_: LKRTCAudioDeviceModule, didDisableEngine engine: AVAudioEngine, stateTransition: LKRTCAudioEngineStateTransition) -> Int { guard let audioManager else { return 0 } let entryPoint = audioManager.buildEngineObserverChain() - return entryPoint?.engineDidDisable(engine, isPlayoutEnabled: isPlayoutEnabled, isRecordingEnabled: isRecordingEnabled) ?? 0 + return entryPoint?.engineDidDisable(engine, state: AudioEngineStateTransition(fromRTCType: stateTransition)) ?? 0 } - func audioDeviceModule(_: LKRTCAudioDeviceModule, willReleaseEngine engine: AVAudioEngine) -> Int { + func audioDeviceModule(_: LKRTCAudioDeviceModule, willReleaseEngine engine: AVAudioEngine, stateTransition: LKRTCAudioEngineStateTransition) -> Int { guard let audioManager else { return 0 } let entryPoint = audioManager.buildEngineObserverChain() - return entryPoint?.engineWillRelease(engine) ?? 0 + return entryPoint?.engineWillRelease(engine, state: AudioEngineStateTransition(fromRTCType: stateTransition)) ?? 0 } - func audioDeviceModule(_: LKRTCAudioDeviceModule, engine: AVAudioEngine, configureInputFromSource src: AVAudioNode?, toDestination dst: AVAudioNode, format: AVAudioFormat, context: [AnyHashable: Any]) -> Int { + func audioDeviceModule(_: LKRTCAudioDeviceModule, engine: AVAudioEngine, configureInputFromSource src: AVAudioNode?, toDestination dst: AVAudioNode, format: AVAudioFormat, stateTransition: LKRTCAudioEngineStateTransition, context: [AnyHashable: Any]) -> Int { guard let audioManager else { return 0 } let entryPoint = audioManager.buildEngineObserverChain() - return entryPoint?.engineWillConnectInput(engine, src: src, dst: dst, format: format, context: context) ?? 0 + return entryPoint?.engineWillConnectInput(engine, + src: src, + dst: dst, + format: format, + state: AudioEngineStateTransition(fromRTCType: stateTransition), + context: context) ?? 0 } - func audioDeviceModule(_: LKRTCAudioDeviceModule, engine: AVAudioEngine, configureOutputFromSource src: AVAudioNode, toDestination dst: AVAudioNode?, format: AVAudioFormat, context: [AnyHashable: Any]) -> Int { + func audioDeviceModule(_: LKRTCAudioDeviceModule, engine: AVAudioEngine, configureOutputFromSource src: AVAudioNode, toDestination dst: AVAudioNode?, format: AVAudioFormat, stateTransition: LKRTCAudioEngineStateTransition, context: [AnyHashable: Any]) -> Int { guard let audioManager else { return 0 } let entryPoint = audioManager.buildEngineObserverChain() - return entryPoint?.engineWillConnectOutput(engine, src: src, dst: dst, format: format, context: context) ?? 0 + return entryPoint?.engineWillConnectOutput(engine, + src: src, + dst: dst, + format: format, + state: AudioEngineStateTransition(fromRTCType: stateTransition), + context: context) ?? 0 } } diff --git a/Sources/LiveKit/Audio/AudioEngineObserver.swift b/Sources/LiveKit/Audio/AudioEngineObserver.swift index 8b7d9eca4..1993a6cdc 100644 --- a/Sources/LiveKit/Audio/AudioEngineObserver.swift +++ b/Sources/LiveKit/Audio/AudioEngineObserver.swift @@ -29,54 +29,86 @@ public protocol AudioEngineObserver: NextInvokable, Sendable { associatedtype Next = any AudioEngineObserver var next: (any AudioEngineObserver)? { get set } - func engineDidCreate(_ engine: AVAudioEngine) -> Int - func engineWillEnable(_ engine: AVAudioEngine, isPlayoutEnabled: Bool, isRecordingEnabled: Bool) -> Int - func engineWillStart(_ engine: AVAudioEngine, isPlayoutEnabled: Bool, isRecordingEnabled: Bool) -> Int - func engineDidStop(_ engine: AVAudioEngine, isPlayoutEnabled: Bool, isRecordingEnabled: Bool) -> Int - func engineDidDisable(_ engine: AVAudioEngine, isPlayoutEnabled: Bool, isRecordingEnabled: Bool) -> Int - func engineWillRelease(_ engine: AVAudioEngine) -> Int + func engineDidCreate(_ engine: AVAudioEngine, state: AudioEngineStateTransition) -> Int + func engineWillEnable(_ engine: AVAudioEngine, state: AudioEngineStateTransition) -> Int + func engineWillStart(_ engine: AVAudioEngine, state: AudioEngineStateTransition) -> Int + func engineDidStop(_ engine: AVAudioEngine, state: AudioEngineStateTransition) -> Int + func engineDidDisable(_ engine: AVAudioEngine, state: AudioEngineStateTransition) -> Int + func engineWillRelease(_ engine: AVAudioEngine, state: AudioEngineStateTransition) -> Int /// Provide custom implementation for internal AVAudioEngine's output configuration. /// Buffers flow from `src` to `dst`. Preferred format to connect node is provided as `format`. /// Return true if custom implementation is provided, otherwise default implementation will be used. - func engineWillConnectOutput(_ engine: AVAudioEngine, src: AVAudioNode, dst: AVAudioNode?, format: AVAudioFormat, context: [AnyHashable: Any]) -> Int + func engineWillConnectOutput(_ engine: AVAudioEngine, + src: AVAudioNode, + dst: AVAudioNode?, + format: AVAudioFormat, + state: AudioEngineStateTransition, + context: [AnyHashable: Any]) -> Int /// Provide custom implementation for internal AVAudioEngine's input configuration. /// Buffers flow from `src` to `dst`. Preferred format to connect node is provided as `format`. /// Return true if custom implementation is provided, otherwise default implementation will be used. - func engineWillConnectInput(_ engine: AVAudioEngine, src: AVAudioNode?, dst: AVAudioNode, format: AVAudioFormat, context: [AnyHashable: Any]) -> Int + func engineWillConnectInput(_ engine: AVAudioEngine, + src: AVAudioNode?, + dst: AVAudioNode, + format: AVAudioFormat, + state: AudioEngineStateTransition, + context: [AnyHashable: Any]) -> Int } /// Default implementation to make it optional. public extension AudioEngineObserver { - func engineDidCreate(_ engine: AVAudioEngine) -> Int { - next?.engineDidCreate(engine) ?? 0 + func engineDidCreate(_ engine: AVAudioEngine, state: AudioEngineStateTransition) -> Int { + next?.engineDidCreate(engine, state: state) ?? 0 } - func engineWillEnable(_ engine: AVAudioEngine, isPlayoutEnabled: Bool, isRecordingEnabled: Bool) -> Int { - next?.engineWillEnable(engine, isPlayoutEnabled: isPlayoutEnabled, isRecordingEnabled: isRecordingEnabled) ?? 0 + func engineWillEnable(_ engine: AVAudioEngine, state: AudioEngineStateTransition) -> Int { + next?.engineWillEnable(engine, state: state) ?? 0 } - func engineWillStart(_ engine: AVAudioEngine, isPlayoutEnabled: Bool, isRecordingEnabled: Bool) -> Int { - next?.engineWillStart(engine, isPlayoutEnabled: isPlayoutEnabled, isRecordingEnabled: isRecordingEnabled) ?? 0 + func engineWillStart(_ engine: AVAudioEngine, state: AudioEngineStateTransition) -> Int { + next?.engineWillStart(engine, state: state) ?? 0 } - func engineDidStop(_ engine: AVAudioEngine, isPlayoutEnabled: Bool, isRecordingEnabled: Bool) -> Int { - next?.engineDidStop(engine, isPlayoutEnabled: isPlayoutEnabled, isRecordingEnabled: isRecordingEnabled) ?? 0 + func engineDidStop(_ engine: AVAudioEngine, state: AudioEngineStateTransition) -> Int { + next?.engineDidStop(engine, state: state) ?? 0 } - func engineDidDisable(_ engine: AVAudioEngine, isPlayoutEnabled: Bool, isRecordingEnabled: Bool) -> Int { - next?.engineDidDisable(engine, isPlayoutEnabled: isPlayoutEnabled, isRecordingEnabled: isRecordingEnabled) ?? 0 + func engineDidDisable(_ engine: AVAudioEngine, state: AudioEngineStateTransition) -> Int { + next?.engineDidDisable(engine, state: state) ?? 0 } - func engineWillRelease(_ engine: AVAudioEngine) -> Int { - next?.engineWillRelease(engine) ?? 0 + func engineWillRelease(_ engine: AVAudioEngine, state: AudioEngineStateTransition) -> Int { + next?.engineWillRelease(engine, state: state) ?? 0 } - func engineWillConnectOutput(_ engine: AVAudioEngine, src: AVAudioNode, dst: AVAudioNode?, format: AVAudioFormat, context: [AnyHashable: Any]) -> Int { - next?.engineWillConnectOutput(engine, src: src, dst: dst, format: format, context: context) ?? 0 + func engineWillConnectOutput(_ engine: AVAudioEngine, + src: AVAudioNode, + dst: AVAudioNode?, + format: AVAudioFormat, + state: AudioEngineStateTransition, + context: [AnyHashable: Any]) -> Int + { + next?.engineWillConnectOutput(engine, + src: src, + dst: dst, + format: format, + state: state, + context: context) ?? 0 } - func engineWillConnectInput(_ engine: AVAudioEngine, src: AVAudioNode?, dst: AVAudioNode, format: AVAudioFormat, context: [AnyHashable: Any]) -> Int { - next?.engineWillConnectInput(engine, src: src, dst: dst, format: format, context: context) ?? 0 + func engineWillConnectInput(_ engine: AVAudioEngine, + src: AVAudioNode?, + dst: AVAudioNode, + format: AVAudioFormat, + state: AudioEngineStateTransition, + context: [AnyHashable: Any]) -> Int + { + next?.engineWillConnectInput(engine, + src: src, + dst: dst, + format: format, + state: state, + context: context) ?? 0 } } diff --git a/Sources/LiveKit/Audio/DefaultAudioSessionObserver.swift b/Sources/LiveKit/Audio/DefaultAudioSessionObserver.swift index d67d10ac9..100e1405e 100644 --- a/Sources/LiveKit/Audio/DefaultAudioSessionObserver.swift +++ b/Sources/LiveKit/Audio/DefaultAudioSessionObserver.swift @@ -28,7 +28,6 @@ internal import LiveKitWebRTC public class DefaultAudioSessionObserver: AudioEngineObserver, Loggable, @unchecked Sendable { struct State { - var isSessionActive = false var next: (any AudioEngineObserver)? // Used for backward compatibility with `customConfigureAudioSessionFunc`. @@ -58,74 +57,85 @@ public class DefaultAudioSessionObserver: AudioEngineObserver, Loggable, @unchec } } - public func engineWillEnable(_ engine: AVAudioEngine, isPlayoutEnabled: Bool, isRecordingEnabled: Bool) -> Int { + public func engineWillEnable(_ engine: AVAudioEngine, state: AudioEngineStateTransition) -> Int { if AudioManager.shared._state.customConfigureFunc == nil { - log("Configuring audio session...") let session = LKRTCAudioSession.sharedInstance() session.lockForConfiguration() defer { session.unlockForConfiguration() } - if _state.isSessionActive { + let newConfig: AudioSessionConfiguration = state.next.isInputEnabled ? .playAndRecordSpeaker : .playback + if session.category != newConfig.category.rawValue || session.mode != newConfig.mode.rawValue { do { - log("AudioSession deactivating due to category switch") - try session.setActive(false) // Deactivate first - _state.mutate { $0.isSessionActive = false } + log("AudioSession switching category: \(session.category) -> \(newConfig.category.rawValue), mode: \(session.mode) -> \(newConfig.mode.rawValue)") + try session.setConfiguration(newConfig.toRTCType()) } catch { - log("Failed to deactivate AudioSession with error: \(error)", .error) + log("AudioSession switch category with error: \(error)", .error) + return kFailedToConfigureAudioSessionErrorCode } } - let config: AudioSessionConfiguration = isRecordingEnabled ? .playAndRecordSpeaker : .playback - do { - log("AudioSession activating category to: \(config.category)") - try session.setConfiguration(config.toRTCType(), active: true) - _state.mutate { $0.isSessionActive = true } - } catch { - log("AudioSession failed to configure with error: \(error)", .error) - // Pass error code to audio engine - return kFailedToConfigureAudioSessionErrorCode + if !session.isActive { + do { + log("AudioSession activating...") + try session.setActive(true) + } catch { + log("AudioSession failed to activate with error: \(error)", .error) + return kFailedToConfigureAudioSessionErrorCode + } } log("AudioSession activationCount: \(session.activationCount), webRTCSessionCount: \(session.webRTCSessionCount)") } _state.mutate { - $0.isPlayoutEnabled = isPlayoutEnabled - $0.isRecordingEnabled = isRecordingEnabled + $0.isPlayoutEnabled = state.next.isOutputEnabled + $0.isRecordingEnabled = state.next.isInputEnabled } // Call next last - return _state.next?.engineWillEnable(engine, isPlayoutEnabled: isPlayoutEnabled, isRecordingEnabled: isRecordingEnabled) ?? 0 + return _state.next?.engineWillEnable(engine, state: state) ?? 0 } - public func engineDidDisable(_ engine: AVAudioEngine, isPlayoutEnabled: Bool, isRecordingEnabled: Bool) -> Int { + public func engineDidDisable(_ engine: AVAudioEngine, state: AudioEngineStateTransition) -> Int { // Call next first - let nextResult = _state.next?.engineDidDisable(engine, isPlayoutEnabled: isPlayoutEnabled, isRecordingEnabled: isRecordingEnabled) + let nextResult = _state.next?.engineDidDisable(engine, state: state) _state.mutate { - $0.isPlayoutEnabled = isPlayoutEnabled - $0.isRecordingEnabled = isRecordingEnabled + $0.isPlayoutEnabled = state.next.isOutputEnabled + $0.isRecordingEnabled = state.next.isInputEnabled } if AudioManager.shared._state.customConfigureFunc == nil { - log("Configuring audio session...") let session = LKRTCAudioSession.sharedInstance() session.lockForConfiguration() defer { session.unlockForConfiguration() } - do { - if isPlayoutEnabled, !isRecordingEnabled { - let config: AudioSessionConfiguration = .playback - log("AudioSession switching category to: \(config.category)") - try session.setConfiguration(config.toRTCType()) + var newConfig: AudioSessionConfiguration? = nil + + // Only when input was disabled + if state.prev.isOutputEnabled, state.next.isOutputEnabled, state.prev.isInputEnabled, !state.next.isInputEnabled { + let didLegacyMute = (!state.prev.isLegacyMuteMode && state.next.isLegacyMuteMode && state.next.isInputMuted) || + (!state.prev.isInputMuted && state.next.isInputMuted && state.next.isLegacyMuteMode) + + newConfig = didLegacyMute ? .playAndRecordDefault : .playback + } + + if let newConfig, session.category != newConfig.category.rawValue || session.mode != newConfig.mode.rawValue { + do { + log("AudioSession switching category: \(session.category) -> \(newConfig.category.rawValue), mode: \(session.mode) -> \(newConfig.mode.rawValue)") + try session.setConfiguration(newConfig.toRTCType()) + } catch { + log("AudioSession failed to switch category with error: \(error)", .error) } - if !isPlayoutEnabled, !isRecordingEnabled, _state.isSessionActive { - log("AudioSession deactivating") + } + + if !state.next.isOutputEnabled, !state.next.isInputEnabled, session.isActive { + do { + log("AudioSession deactivating...") try session.setActive(false) - _state.mutate { $0.isSessionActive = false } + } catch { + log("AudioSession failed to deactivate with error: \(error)", .error) } - } catch { - log("AudioSession failed to configure with error: \(error)", .error) } log("AudioSession activationCount: \(session.activationCount), webRTCSessionCount: \(session.webRTCSessionCount)") diff --git a/Sources/LiveKit/Audio/DefaultMixerAudioObserver.swift b/Sources/LiveKit/Audio/DefaultMixerAudioObserver.swift index 820ee09e9..3e341c862 100644 --- a/Sources/LiveKit/Audio/DefaultMixerAudioObserver.swift +++ b/Sources/LiveKit/Audio/DefaultMixerAudioObserver.swift @@ -75,7 +75,7 @@ public final class DefaultMixerAudioObserver: AudioEngineObserver, Loggable { next = handler } - public func engineDidCreate(_ engine: AVAudioEngine) -> Int { + public func engineDidCreate(_ engine: AVAudioEngine, state: AudioEngineStateTransition) -> Int { let (appNode, appMixerNode, micNode, micMixerNode) = _state.read { ($0.appNode, $0.appMixerNode, $0.micNode, $0.micMixerNode) } @@ -86,12 +86,12 @@ public final class DefaultMixerAudioObserver: AudioEngineObserver, Loggable { engine.attach(micMixerNode) // Invoke next - return next?.engineDidCreate(engine) ?? 0 + return next?.engineDidCreate(engine, state: state) ?? 0 } - public func engineWillRelease(_ engine: AVAudioEngine) -> Int { + public func engineWillRelease(_ engine: AVAudioEngine, state: AudioEngineStateTransition) -> Int { // Invoke next - let nextResult = next?.engineWillRelease(engine) + let nextResult = next?.engineWillRelease(engine, state: state) let (appNode, appMixerNode, micNode, micMixerNode) = _state.read { ($0.appNode, $0.appMixerNode, $0.micNode, $0.micMixerNode) @@ -105,11 +105,14 @@ public final class DefaultMixerAudioObserver: AudioEngineObserver, Loggable { return nextResult ?? 0 } - public func engineWillConnectInput(_ engine: AVAudioEngine, src: AVAudioNode?, dst: AVAudioNode, format: AVAudioFormat, context: [AnyHashable: Any]) -> Int { + public func engineWillConnectInput(_ engine: AVAudioEngine, src: AVAudioNode?, dst: AVAudioNode, format: AVAudioFormat, + state: AudioEngineStateTransition, + context: [AnyHashable: Any]) -> Int + { // Get the main mixer guard let mainMixerNode = context[kRTCAudioEngineInputMixerNodeKey] as? AVAudioMixerNode else { // If failed to get main mixer, call next and return. - return next?.engineWillConnectInput(engine, src: src, dst: dst, format: format, context: context) ?? 0 + return next?.engineWillConnectInput(engine, src: src, dst: dst, format: format, state: state, context: context) ?? 0 } // Read nodes from state lock. @@ -141,7 +144,7 @@ public final class DefaultMixerAudioObserver: AudioEngineObserver, Loggable { } // Invoke next - return next?.engineWillConnectInput(engine, src: src, dst: dst, format: format, context: context) ?? 0 + return next?.engineWillConnectInput(engine, src: src, dst: dst, format: format, state: state, context: context) ?? 0 } } diff --git a/Sources/LiveKit/Track/AudioManager.swift b/Sources/LiveKit/Track/AudioManager.swift index fd30259b2..4ebd8db6f 100644 --- a/Sources/LiveKit/Track/AudioManager.swift +++ b/Sources/LiveKit/Track/AudioManager.swift @@ -303,15 +303,12 @@ public class AudioManager: Loggable { /// Normally, you do not need to set this manually since it will be handled automatically. public var isMicrophoneMuted: Bool { get { RTC.audioDeviceModule.isMicrophoneMuted } - set { RTC.audioDeviceModule.isMicrophoneMuted = newValue } + set { RTC.audioDeviceModule.setMicrophoneMuted(newValue) } } // MARK: - For testing - var engineState: RTCAudioEngineState { - get { RTC.audioDeviceModule.engineState } - set { RTC.audioDeviceModule.engineState = newValue } - } + var engineState: LKRTCAudioEngineState { RTC.audioDeviceModule.engineState } var isPlayoutInitialized: Bool { RTC.audioDeviceModule.isPlayoutInitialized diff --git a/Sources/LiveKit/Types/AudioSessionConfiguration.swift b/Sources/LiveKit/Types/AudioSessionConfiguration.swift index 89f855ea7..4905a717e 100644 --- a/Sources/LiveKit/Types/AudioSessionConfiguration.swift +++ b/Sources/LiveKit/Types/AudioSessionConfiguration.swift @@ -40,6 +40,10 @@ public extension AudioSessionConfiguration { categoryOptions: [.mixWithOthers, .allowBluetooth, .allowBluetoothA2DP, .allowAirPlay], mode: .videoChat) + static let playAndRecordDefault = AudioSessionConfiguration(category: .playAndRecord, + categoryOptions: [.mixWithOthers, .allowBluetooth, .allowBluetoothA2DP, .allowAirPlay], + mode: .default) + static let playAndRecordReceiver = AudioSessionConfiguration(category: .playAndRecord, categoryOptions: [.mixWithOthers, .allowBluetooth, .allowBluetoothA2DP, .allowAirPlay], mode: .voiceChat) diff --git a/Tests/LiveKitTests/AudioEngineTests.swift b/Tests/LiveKitTests/AudioEngineTests.swift index 795ed5b97..4bfbffcb1 100644 --- a/Tests/LiveKitTests/AudioEngineTests.swift +++ b/Tests/LiveKitTests/AudioEngineTests.swift @@ -353,6 +353,46 @@ class AudioEngineTests: LKTestCase { try await rooms[0].localParticipant.setMicrophone(enabled: true) }) } + + func testRandomCalls() async throws { + struct EngineCallTest { + let title: String + let call: () -> Int + let expected: (RTCAudioEngineState) -> [Int] + } + + let adm = RTC.audioDeviceModule + + let tests: [EngineCallTest] = [ + EngineCallTest(title: "Init Playout", call: adm.initPlayout, expected: { _ in [0] }), + EngineCallTest(title: "Start Playout", call: adm.startPlayout, expected: { $0.outputEnabled ? [0] : [-1] }), + EngineCallTest(title: "Stop Playout", call: adm.stopPlayout, expected: { _ in [0] }), + EngineCallTest(title: "Init Recording", call: adm.initRecording, expected: { _ in [0] }), + EngineCallTest(title: "Start Recording", call: adm.startRecording, expected: { $0.inputEnabled ? [0] : [-1] }), + EngineCallTest(title: "Stop Recording", call: adm.stopRecording, expected: { _ in [0] }), + ] + +// let engineCalls: [String: EngineCall] = [ +// "Init Playout": mngr.initPlayout, +// "Start Playout": mngr.startPlayout, +// "Stop Playout": mngr.stopPlayout, +// "Init Recording": mngr.initRecording, +// "Start Recording": mngr.startRecording, +// "Stop Recording": mngr.stopRecording, +// "Mute mode legacy" : { RTC.audioDeviceModule.setMuteMode(.restartEngine) }, +// "Mute mode default" : { RTC.audioDeviceModule.setMuteMode(.voiceProcessing) }, +// "Set Mute" : { RTC.audioDeviceModule.setMicrophoneMuted(true) }, +// "Set Un-muted" : { RTC.audioDeviceModule.setMicrophoneMuted(false) } +// ] + + for i in 0 ..< 1000 { + let test = tests.randomElement()! + print("Calling test #\(i) \(test.title)") + let r = test.call() + let s = RTC.audioDeviceModule.engineState + XCTAssert(test.expected(s).contains(r), "Call #\(i) \(test.title) Failed with result: \(r), state: \(s)") + } + } } final class FailingEngineObserver: AudioEngineObserver {