Main controller class. More...
#import <AEAudioController.h>
Inherits <NSObject>.
Instance Methods | |
(NSTimeInterval) | - AEAudioControllerInputLatency |
Input latency (in seconds) | |
(NSTimeInterval) | - AEAudioControllerOutputLatency |
Output latency (in seconds) | |
(AudioTimeStamp) | - AEAudioControllerCurrentAudioTimestamp |
Get the current audio system timestamp. | |
(void) | - setAudiobusSenderPort:forChannel: |
Set an Audiobus sender port to send audio from a particular channel. | |
(void) | - setAudiobusSenderPort:forChannelGroup: |
Set an Audiobus sender port to send audio from a particular channel group. | |
(ABFilterPort *audiobusFilterPort) | - __deprecated_msg |
Audiobus filter port. | |
(ABSenderPort *audiobusSenderPort) | - __deprecated_msg |
Audiobus sender port. | |
Channel and channel group management | |
(void) | - addChannels: |
Add channels. | |
(void) | - addChannels:toChannelGroup: |
Add channels to a channel group. | |
(void) | - removeChannels: |
Remove channels. | |
(void) | - removeChannels:fromChannelGroup: |
Remove channels from a channel group. | |
(NSArray *) | - channels |
Obtain a list of all channels, across all channel groups. | |
(NSArray *) | - channelsInChannelGroup: |
Get a list of channels within a channel group. | |
(AEChannelGroupRef) | - createChannelGroup |
Create a channel group. | |
(AEChannelGroupRef) | - createChannelGroupWithinChannelGroup: |
Create a channel sub-group within an existing channel group. | |
(void) | - removeChannelGroup: |
Remove a channel group. | |
(NSArray *) | - topLevelChannelGroups |
Get a list of top-level channel groups. | |
(NSArray *) | - channelGroupsInChannelGroup: |
Get a list of sub-groups contained within a group. | |
(void) | - setVolume:forChannelGroup: |
Set the volume level of a channel group. | |
(float) | - volumeForChannelGroup: |
Get the volume level of a channel group. | |
(void) | - setPan:forChannelGroup: |
Set the pan of a channel group. | |
(float) | - panForChannelGroup: |
Get the pan of a channel group. | |
(void) | - setPlaying:forChannelGroup: |
Set the playing status of a channel group. | |
(BOOL) | - channelGroupIsPlaying: |
Get the playing status of a channel group. | |
(void) | - setMuted:forChannelGroup: |
Set the mute status of a channel group. | |
(BOOL) | - channelGroupIsMuted: |
Get the mute status of a channel group. | |
Filters | |
(void) | - addFilter: |
Add an audio filter to the system output. | |
(void) | - addFilter:toChannel: |
Add an audio filter to a channel. | |
(void) | - addFilter:toChannelGroup: |
Add an audio filter to a channel group. | |
(void) | - addInputFilter: |
Add an audio filter to the system input. | |
(void) | - addInputFilter:forChannels: |
Add an audio filter to the system input. | |
(void) | - removeFilter: |
Remove a filter from system output. | |
(void) | - removeFilter:fromChannel: |
Remove a filter from a channel. | |
(void) | - removeFilter:fromChannelGroup: |
Remove a filter from a channel group. | |
(void) | - removeInputFilter: |
Remove a filter from system input. | |
(NSArray *) | - filters |
Get a list of all top-level output filters. | |
(NSArray *) | - filtersForChannel: |
Get a list of all filters currently operating on the channel. | |
(NSArray *) | - filtersForChannelGroup: |
Get a list of all filters currently operating on the channel group. | |
(NSArray *) | - inputFilters |
Get a list of all input filters. | |
Output receivers | |
(void) | - addOutputReceiver: |
Add an output receiver. | |
(void) | - addOutputReceiver:forChannel: |
Add an output receiver. | |
(void) | - addOutputReceiver:forChannelGroup: |
Add an output receiver for a particular channel group. | |
(void) | - removeOutputReceiver: |
Remove an output receiver. | |
(void) | - removeOutputReceiver:fromChannel: |
Remove an output receiver from a channel. | |
(void) | - removeOutputReceiver:fromChannelGroup: |
Remove an output receiver from a particular channel group. | |
(NSArray *) | - outputReceivers |
Obtain a list of all top-level output receivers. | |
(NSArray *) | - outputReceiversForChannel: |
Obtain a list of all output receivers for the specified channel. | |
(NSArray *) | - outputReceiversForChannelGroup: |
Obtain a list of all output receivers for the specified group. | |
Input receivers | |
(void) | - addInputReceiver: |
Add an input receiver. | |
(void) | - addInputReceiver:forChannels: |
Add an input receiver, specifying a channel selection. | |
(void) | - removeInputReceiver: |
Remove an input receiver. | |
(void) | - removeInputReceiver:fromChannels: |
Remove an input receiver. | |
(NSArray *) | - inputReceivers |
Obtain a list of all input receivers. | |
Timing receivers | |
(void) | - addTimingReceiver: |
Add a timing receiver. | |
(void) | - removeTimingReceiver: |
Remove a timing receiver. | |
(NSArray *) | - timingReceivers |
Obtain a list of all timing receivers. | |
Metering | |
(void) | - outputAveragePowerLevel:peakHoldLevel: |
Get output power level information since this method was last called. | |
(void) | - outputAveragePowerLevels:peakHoldLevels:channelCount: |
Get output power level information for multiple channels since this method was last called. | |
(void) | - averagePowerLevel:peakHoldLevel:forGroup: |
Get output power level information for a particular group, since this method was last called. | |
(void) | - averagePowerLevels:peakHoldLevels:forGroup:channelCount: |
Get output power level information for a particular group, since this method was last called. | |
(void) | - inputAveragePowerLevel:peakHoldLevel: |
Get input power level information since this method was last called. | |
(void) | - inputAveragePowerLevels:peakHoldLevels:channelCount: |
Get input power level information for multiple channels since this method was last called. | |
Utilities | |
(AudioStreamBasicDescription *) | - AEAudioControllerAudioDescription |
Get access to the configured AudioStreamBasicDescription. | |
(AudioStreamBasicDescription *) | - AEAudioControllerInputAudioDescription |
Get access to the input AudioStreamBasicDescription. | |
(long) | - AEConvertSecondsToFrames |
Convert a time span in seconds into a number of frames at the current sample rate. | |
(NSTimeInterval) | - AEConvertFramesToSeconds |
Convert a number of frames into a time span in seconds. | |
(BOOL) | - AECurrentThreadIsAudioThread |
Determine if the current thread is the audio thread. | |
Properties | |
NSString * | audioSessionCategory |
Audio session category to use. | |
BOOL | allowMixingWithOtherApps |
Whether to allow mixing audio with other apps. | |
BOOL | useMeasurementMode |
Whether to use the "Measurement" Audio Session Mode for improved audio quality and bass response. | |
BOOL | avoidMeasurementModeForBuiltInSpeaker |
Whether to avoid using Measurement Mode with the built-in speaker. | |
BOOL | boostBuiltInMicGainInMeasurementMode |
Whether to boost the input volume while using Measurement Mode with the built-in mic. | |
BOOL | muteOutput |
Mute output. | |
float | masterOutputVolume |
Access the master output volume. | |
BOOL | enableBluetoothInput |
Enable audio input from Bluetooth devices. | |
BOOL | inputGainAvailable |
Determine whether input gain is available. | |
float | inputGain |
Set audio input gain (if input gain is available) | |
BOOL | voiceProcessingEnabled |
Whether to use the built-in voice processing system. | |
BOOL | voiceProcessingOnlyForSpeakerAndMicrophone |
Whether to only perform voice processing for the SpeakerAndMicrophone route. | |
AEInputMode | inputMode |
Input mode: How to handle incoming audio. | |
NSArray * | inputChannelSelection |
Input channel selection. | |
NSTimeInterval | preferredBufferDuration |
Preferred buffer duration (in seconds) | |
NSTimeInterval | currentBufferDuration |
Current buffer duration (in seconds) | |
NSTimeInterval | inputLatency |
Input latency (in seconds) | |
NSTimeInterval | outputLatency |
Output latency (in seconds) | |
BOOL | automaticLatencyManagement |
Whether to automatically account for input/output latency. | |
BOOL | running |
Determine whether the audio engine is running. | |
BOOL | playingThroughDeviceSpeaker |
Determine whether audio is currently being played through the device's speaker. | |
BOOL | recordingThroughDeviceMicrophone |
Determine whether audio is currently being recorded through the device's mic. | |
BOOL | audioInputAvailable |
Whether audio input is currently available. | |
BOOL | inputEnabled |
Whether audio input is currently enabled. | |
BOOL | outputEnabled |
Whether audio output is currently available. | |
int | numberOfInputChannels |
The number of audio channels that the current audio input device provides. | |
AudioStreamBasicDescription | inputAudioDescription |
The audio description defining the input audio format. | |
AudioStreamBasicDescription | audioDescription |
The audio description that the audio controller was setup with. | |
AudioUnit | audioUnit |
The Remote IO audio unit used for input and output. | |
AUGraph | audioGraph |
The audio graph handle. | |
ABReceiverPort * | audiobusReceiverPort |
Audiobus receiver port. | |
Setup and start/stop | |
(AudioStreamBasicDescription) | + interleaved16BitStereoAudioDescription |
16-bit stereo audio description, interleaved | |
(AudioStreamBasicDescription) | + nonInterleaved16BitStereoAudioDescription |
16-bit stereo audio description, non-interleaved | |
(AudioStreamBasicDescription) | + nonInterleavedFloatStereoAudioDescription |
Floating-point stereo audio description, non-interleaved. | |
(BOOL) | + voiceProcessingAvailable |
Determine whether voice processing is available on this device. | |
(id) | - initWithAudioDescription: |
Initialize the audio controller system, with the audio description you provide. | |
(id) | - initWithAudioDescription:inputEnabled: |
Initialize the audio controller system, with the audio description you provide. | |
(id) | - initWithAudioDescription:options: |
Initialize the audio controller system, with the audio description you provide. | |
(id) | - initWithAudioDescription:inputEnabled:useVoiceProcessing: |
Initialize the audio controller system, with the audio description you provide. | |
(id) | - initWithAudioDescription:inputEnabled:useVoiceProcessing:outputEnabled: |
Initialize the audio controller system, with the audio description you provide. | |
(BOOL) | - start: |
Start audio engine. | |
(void) | - stop |
Stop audio engine. | |
(BOOL) | - setAudioDescription:error: |
Set a new audio description. | |
(BOOL) | - setInputEnabled:error: |
Enable or disable input. | |
(BOOL) | - setOutputEnabled:error: |
Enable or disable output. | |
(BOOL) | - setAudioDescription:inputEnabled:outputEnabled:error: |
Composite update method. | |
Realtime/Main thread messaging system | |
AEMessageQueue * | messageQueue |
The asynchronous message queue used for safe communication between main and realtime thread. | |
(void) | - performAsynchronousMessageExchangeWithBlock:responseBlock: |
Send a message to the realtime thread asynchronously, if running, optionally receiving a response via a block. | |
(BOOL) | - performSynchronousMessageExchangeWithBlock: |
Send a message to the realtime thread synchronously, if running. | |
(void) | - AEAudioControllerSendAsynchronousMessageToMainThread |
Send a message to the main thread asynchronously. | |
(void) | - beginMessageExchangeBlock |
Begins a block of messages to be performed consecutively. | |
(void) | - endMessageExchangeBlock |
Ends a consecutive block of messages. | |
Main controller class.
Use:
+ (AudioStreamBasicDescription) interleaved16BitStereoAudioDescription |
16-bit stereo audio description, interleaved
+ (AudioStreamBasicDescription) nonInterleaved16BitStereoAudioDescription |
16-bit stereo audio description, non-interleaved
+ (AudioStreamBasicDescription) nonInterleavedFloatStereoAudioDescription |
Floating-point stereo audio description, non-interleaved.
+ (BOOL) voiceProcessingAvailable |
Determine whether voice processing is available on this device.
Older devices are not able to perform voice processing - this determines whether it's available. See voiceProcessingEnabled for info.
- (id) initWithAudioDescription: | (AudioStreamBasicDescription) | audioDescription |
Initialize the audio controller system, with the audio description you provide.
Creates and configures the audio unit and initial mixer audio unit.
This initialises the audio system without input (from microphone, etc) enabled. If you desire audio input, use initWithAudioDescription:inputEnabled:useVoiceProcessing:.
audioDescription | Audio description to use for all audio |
- (id) initWithAudioDescription: | (AudioStreamBasicDescription) | audioDescription | |
inputEnabled: | (BOOL) | enableInput | |
Initialize the audio controller system, with the audio description you provide.
Creates and configures the input/output audio unit and initial mixer audio unit.
audioDescription | Audio description to use for all audio |
enableInput | Whether to enable audio input from the microphone or another input device |
- (id) initWithAudioDescription: | (AudioStreamBasicDescription) | audioDescription | |
options: | (AEAudioControllerOptions) | options | |
Initialize the audio controller system, with the audio description you provide.
Creates and configures the audio unit and initial mixer audio unit.
audioDescription | Audio description to use for all audio |
options | Options to enable input, voice processing, etc. (See AEAudioControllerOptions). |
- (id) initWithAudioDescription: | (AudioStreamBasicDescription) | audioDescription | |
inputEnabled: | (BOOL) | enableInput | |
useVoiceProcessing: | ("use initWithAudioDescription:options: instead") | __deprecated_msg | |
Initialize the audio controller system, with the audio description you provide.
Creates and configures the input/output audio unit and initial mixer audio unit.
audioDescription | Audio description to use for all audio |
enableInput | Whether to enable audio input from the microphone or another input device |
useVoiceProcessing | Whether to use the voice processing unit (see voiceProcessingEnabled and voiceProcessingAvailable). |
- (id) initWithAudioDescription: | (AudioStreamBasicDescription) | audioDescription | |
inputEnabled: | (BOOL) | enableInput | |
useVoiceProcessing: | (BOOL) | useVoiceProcessing | |
outputEnabled: | ("use initWithAudioDescription:options: instead") | __deprecated_msg | |
Initialize the audio controller system, with the audio description you provide.
Creates and configures the input/output audio unit and initial mixer audio unit.
audioDescription | Audio description to use for all audio |
enableInput | Whether to enable audio input from the microphone or another input device |
useVoiceProcessing | Whether to use the voice processing unit (see voiceProcessingEnabled and voiceProcessingAvailable). |
enableOutput | Whether to enable audio output. Sometimes when recording from external input-only devices at high sample rates (96k) you may need to disable output for the sample rate to be actually used. |
- (BOOL) start: | (NSError **) | error |
Start audio engine.
error | On output, if not NULL, the error |
- (void) stop |
Stop audio engine.
- (BOOL) setAudioDescription: | (AudioStreamBasicDescription) | audioDescription | |
error: | (NSError **) | error | |
Set a new audio description.
This will cause the audio controller to stop, teardown and recreate its rendering resources, then start again (if it was previously running).
audioDescription | The new audio description |
error | On output, the error, if one occurred |
- (BOOL) setInputEnabled: | (BOOL) | inputEnabled | |
error: | (NSError **) | error | |
Enable or disable input.
This will cause the audio controller to stop, teardown and recreate its rendering resources, then start again (if it was previously running).
inputEnabled | Whether to enable input |
error | On output, the error, if one occurred |
- (BOOL) setOutputEnabled: | (BOOL) | outputEnabled | |
error: | (NSError **) | error | |
Enable or disable output.
This will cause the audio controller to stop, teardown and recreate its rendering resources, then start again (if it was previously running).
outputEnabled | Whether to enable output |
error | On output, the error, if one occurred |
- (BOOL) setAudioDescription: | (AudioStreamBasicDescription) | audioDescription | |
inputEnabled: | (BOOL) | inputEnabled | |
outputEnabled: | (BOOL) | outputEnabled | |
error: | (NSError **) | error | |
Composite update method.
This convenience method updates the audio description, and the input and output enabled status.
audioDescription | The new audio description |
inputEnabled | Whether to enable input |
outputEnabled | Whether to enable output |
error | On output, the error, if one occurred |
- (void) addChannels: | (NSArray *) | channels |
Add channels.
Takes an array of one or more objects that implement the AEAudioPlayable protocol.
channels | An array of id<AEAudioPlayable> objects |
- (void) addChannels: | (NSArray *) | channels | |
toChannelGroup: | (AEChannelGroupRef) | group | |
Add channels to a channel group.
channels | Array of id<AEAudioPlayable> objects |
group | Group identifier |
- (void) removeChannels: | (NSArray *) | channels |
Remove channels.
Takes an array of one or more objects that implement the AEAudioPlayable protocol.
channels | An array of id<AEAudioPlayable> objects |
- (void) removeChannels: | (NSArray *) | channels | |
fromChannelGroup: | (AEChannelGroupRef) | group | |
Remove channels from a channel group.
channels | Array of id<AEAudioPlayable> objects |
group | Group identifier |
- (NSArray*) channels |
Obtain a list of all channels, across all channel groups.
- (NSArray*) channelsInChannelGroup: | (AEChannelGroupRef) | group |
Get a list of channels within a channel group.
group | Group identifier |
- (AEChannelGroupRef) createChannelGroup |
Create a channel group.
Channel groups cause the channels within the group to be pre-mixed together, so that one filter can be applied to several channels without the added performance impact.
You can create trees of channel groups using addChannels:toChannelGroup:, with filtering at each branch, for complex filter chaining.
- (AEChannelGroupRef) createChannelGroupWithinChannelGroup: | (AEChannelGroupRef) | group |
Create a channel sub-group within an existing channel group.
With this method, you can create trees of channel groups, with filtering steps at each branch of the tree.
group | Group identifier |
- (void) removeChannelGroup: | (AEChannelGroupRef) | group |
Remove a channel group.
Removes channels from the group and releases associated resources.
group | Group identifier |
- (NSArray*) topLevelChannelGroups |
Get a list of top-level channel groups.
- (NSArray*) channelGroupsInChannelGroup: | (AEChannelGroupRef) | group |
Get a list of sub-groups contained within a group.
group | Group identifier |
- (void) setVolume: | (float) | volume | |
forChannelGroup: | (AEChannelGroupRef) | group | |
Set the volume level of a channel group.
volume | Group volume (0 - 1) |
group | Group identifier |
- (float) volumeForChannelGroup: | (AEChannelGroupRef) | group |
Get the volume level of a channel group.
group | Group identifier |
- (void) setPan: | (float) | pan | |
forChannelGroup: | (AEChannelGroupRef) | group | |
Set the pan of a channel group.
pan | Group pan (-1.0, left to 1.0, right) |
group | Group identifier |
- (float) panForChannelGroup: | (AEChannelGroupRef) | group |
Get the pan of a channel group.
group | Group identifier |
- (void) setPlaying: | (BOOL) | playing | |
forChannelGroup: | (AEChannelGroupRef) | group | |
Set the playing status of a channel group.
If this is NO, then the group will be silenced and no further render callbacks will be performed on child channels until set to YES again.
playing | Whether group is playing |
group | Group identifier |
- (BOOL) channelGroupIsPlaying: | (AEChannelGroupRef) | group |
Get the playing status of a channel group.
group | Group identifier |
- (void) setMuted: | (BOOL) | muted | |
forChannelGroup: | (AEChannelGroupRef) | group | |
Set the mute status of a channel group.
If YES, group will be silenced, but render callbacks of child channels will continue to be performed.
muted | Whether group is muted |
group | Group identifier |
- (BOOL) channelGroupIsMuted: | (AEChannelGroupRef) | group |
Get the mute status of a channel group.
group | Group identifier |
- (void) addFilter: | (id< AEAudioFilter >) | filter |
Add an audio filter to the system output.
Audio filters are used to process live audio before playback.
filter | An object that implements the AEAudioFilter protocol |
- (void) addFilter: | (id< AEAudioFilter >) | filter | |
toChannel: | (id< AEAudioPlayable >) | channel | |
Add an audio filter to a channel.
Audio filters are used to process live audio before playback.
You can apply audio filters to one or more channels - use channel groups to do so without the extra performance overhead by pre-mixing channels together first. See createChannelGroup.
You can also apply more than one audio filter to a channel - each audio filter will be performed on the audio in the order in which the filters were added using this method.
filter | An object that implements the AEAudioFilter protocol |
channel | The channel on which to perform audio processing |
- (void) addFilter: | (id< AEAudioFilter >) | filter | |
toChannelGroup: | (AEChannelGroupRef) | group | |
Add an audio filter to a channel group.
Audio filters are used to process live audio before playback.
Create and add filters to a channel group to process multiple channels with one filter, without the performance hit of processing each channel individually.
filter | An object that implements the AEAudioFilter protocol |
group | The channel group on which to perform audio processing |
- (void) addInputFilter: | (id< AEAudioFilter >) | filter |
Add an audio filter to the system input.
Audio filters are used to process live audio.
filter | An object that implements the AEAudioFilter protocol |
- (void) addInputFilter: | (id< AEAudioFilter >) | filter | |
forChannels: | (NSArray *) | channels | |
Add an audio filter to the system input.
Audio filters are used to process live audio.
filter | An object that implements the AEAudioFilter protocol |
channels | An array of NSNumbers identifying by index the input channels to filter, or nil for default (the same as addInputFilter:) |
- (void) removeFilter: | (id< AEAudioFilter >) | filter |
Remove a filter from system output.
filter | The filter to remove |
- (void) removeFilter: | (id< AEAudioFilter >) | filter | |
fromChannel: | (id< AEAudioPlayable >) | channel | |
Remove a filter from a channel.
filter | The filter to remove |
channel | The channel to stop filtering |
- (void) removeFilter: | (id< AEAudioFilter >) | filter | |
fromChannelGroup: | (AEChannelGroupRef) | group | |
Remove a filter from a channel group.
filter | The filter to remove |
group | The group to stop filtering |
- (void) removeInputFilter: | (id< AEAudioFilter >) | filter |
Remove a filter from system input.
filter | The filter to remove |
- (NSArray*) filters |
Get a list of all top-level output filters.
- (NSArray*) filtersForChannel: | (id< AEAudioPlayable >) | channel |
Get a list of all filters currently operating on the channel.
channel | Channel to get filters for |
- (NSArray*) filtersForChannelGroup: | (AEChannelGroupRef) | group |
Get a list of all filters currently operating on the channel group.
group | Channel group to get filters for |
- (NSArray*) inputFilters |
Get a list of all input filters.
- (void) addOutputReceiver: | (id< AEAudioReceiver >) | receiver |
Add an output receiver.
Output receivers receive audio that is being played by the system. Use this method to add a receiver to receive audio that consists of all the playing channels mixed together.
receiver | An object that implements the AEAudioReceiver protocol |
- (void) addOutputReceiver: | (id< AEAudioReceiver >) | receiver | |
forChannel: | (id< AEAudioPlayable >) | channel | |
Add an output receiver.
Output receivers receive audio that is being played by the system. Use this method to add a callback to receive audio from a particular channel.
receiver | An object that implements the AEAudioReceiver protocol |
channel | A channel |
- (void) addOutputReceiver: | (id< AEAudioReceiver >) | receiver | |
forChannelGroup: | (AEChannelGroupRef) | group | |
Add an output receiver for a particular channel group.
Output receivers receive audio that is being played by the system. By registering a callback for a particular channel group, you can receive the mixed audio of only that group.
receiver | An object that implements the AEAudioReceiver protocol |
group | A channel group identifier |
- (void) removeOutputReceiver: | (id< AEAudioReceiver >) | receiver |
Remove an output receiver.
receiver | The receiver to remove |
- (void) removeOutputReceiver: | (id< AEAudioReceiver >) | receiver | |
fromChannel: | (id< AEAudioPlayable >) | channel | |
Remove an output receiver from a channel.
receiver | The receiver to remove |
channel | Channel to remove receiver from |
- (void) removeOutputReceiver: | (id< AEAudioReceiver >) | receiver | |
fromChannelGroup: | (AEChannelGroupRef) | group | |
Remove an output receiver from a particular channel group.
receiver | The receiver to remove |
group | A channel group identifier |
- (NSArray*) outputReceivers |
Obtain a list of all top-level output receivers.
- (NSArray*) outputReceiversForChannel: | (id< AEAudioPlayable >) | channel |
Obtain a list of all output receivers for the specified channel.
channel | A channel |
- (NSArray*) outputReceiversForChannelGroup: | (AEChannelGroupRef) | group |
Obtain a list of all output receivers for the specified group.
group | A channel group identifier |
- (void) addInputReceiver: | (id< AEAudioReceiver >) | receiver |
Add an input receiver.
Input receivers receive audio that is being received by the microphone or another input device.
Note that the audio format provided to input receivers added via this method depends on the value of inputMode.
Check the audio buffer list parameters to determine the kind of audio you are receiving (for example, if you are using an interleaved format such as interleaved16BitStereoAudioDescription then the audio->mBuffers[0].mNumberOfChannels field will be 1 for mono, and 2 for stereo audio). If you are using a non-interleaved format such as nonInterleaved16BitStereoAudioDescription, then audio->mNumberBuffers will be 1 for mono, and 2 for stereo.
receiver | An object that implements the AEAudioReceiver protocol |
- (void) addInputReceiver: | (id< AEAudioReceiver >) | receiver | |
forChannels: | (NSArray *) | channels | |
Add an input receiver, specifying a channel selection.
Input receivers receive audio that is being received by the microphone or another input device.
This method allows you to specify which input channels to receive by providing an array of NSNumbers with indexes identifying the selected channels.
Note that the audio format provided to input receivers added via this method depends on the value of inputMode.
Check the audio buffer list parameters to determine the kind of audio you are receiving (for example, if you are using an interleaved format such as interleaved16BitStereoAudioDescription then the audio->mBuffers[0].mNumberOfChannels field will be 1 for mono, and 2 for stereo audio). If you are using a non-interleaved format such as nonInterleaved16BitStereoAudioDescription, then audio->mNumberBuffers will be 1 for mono, and 2 for stereo.
receiver | An object that implements the AEAudioReceiver protocol |
channels | An array of NSNumbers identifying by index the input channels to receive, or nil for default (the same as addInputReceiver:) |
- (void) removeInputReceiver: | (id< AEAudioReceiver >) | receiver |
Remove an input receiver.
If receiver is registered for multiple channels, it will be removed for all of them.
receiver | Receiver to remove |
- (void) removeInputReceiver: | (id< AEAudioReceiver >) | receiver | |
fromChannels: | (NSArray *) | channels | |
Remove an input receiver.
receiver | Receiver to remove |
channels | Specific channels to remove receiver from |
- (NSArray*) inputReceivers |
Obtain a list of all input receivers.
- (void) addTimingReceiver: | (id< AEAudioTimingReceiver >) | receiver |
Add a timing receiver.
Timing receivers receive notifications for when time has advanced. When called from an input context, the call occurs before any input receiver calls are performed. When called from an output context, it occurs before any output receivers are performed.
This mechanism can be used to trigger time-dependent events.
receiver | An object that implements the AEAudioTimingReceiver protocol |
- (void) removeTimingReceiver: | (id< AEAudioTimingReceiver >) | receiver |
Remove a timing receiver.
receiver | An object that implements the AEAudioTimingReceiver protocol |
- (NSArray*) timingReceivers |
Obtain a list of all timing receivers.
- (void) performAsynchronousMessageExchangeWithBlock: |
Send a message to the realtime thread asynchronously, if running, optionally receiving a response via a block.
This is a synchronization mechanism that allows you to schedule actions to be performed on the realtime audio thread without any locking mechanism required. Pass in a block, and the block will be performed on the realtime thread at the next polling interval.
Important: Do not interact with any Objective-C objects inside your block, or hold locks, allocate memory or interact with the BSD subsystem, as all of these may result in audio glitches due to priority inversion.
If provided, the response block will be called on the main thread after the message has been sent. You may exchange information from the realtime thread to the main thread via a shared data structure (such as a struct, allocated on the heap in advance), or __block variables.
If running is NO, then message blocks will be performed on the main thread instead of the realtime thread.
block | A block to be performed on the realtime thread. |
responseBlock | A block to be performed on the main thread after the handler has been run, or nil. |
- (BOOL) performSynchronousMessageExchangeWithBlock: |
Send a message to the realtime thread synchronously, if running.
This is a synchronization mechanism that allows you to schedule actions to be performed on the realtime audio thread without any locking mechanism required. Pass in a block, and the block will be performed on the realtime thread at the next polling interval.
Important: Do not interact with any Objective-C objects inside your block, or hold locks, allocate memory or interact with the BSD subsystem, as all of these may result in audio glitches due to priority inversion.
This method will block the current thread until the block has been performed on the realtime thread. You may pass information from the realtime thread to the calling thread via the use of __block variables.
If all you need is a checkpoint to make sure the Core Audio thread is not mid-render, etc, then you may pass nil for the block.
If running is NO, then message blocks will be performed on the main thread instead of the realtime thread.
If the block is not processed within a timeout interval, this method will return NO.
block | A block to be performed on the realtime thread. |
- (void) AEAudioControllerSendAsynchronousMessageToMainThread | (__unsafe_unretained AEAudioController *) | audioController | |
(AEMessageQueueMessageHandler) | handler | ||
(void *) | userInfo | ||
(int) | userInfoLength | ||
Send a message to the main thread asynchronously.
This is a synchronization mechanism that allows you to schedule actions to be performed on the main thread, without any locking or memory allocation. Pass in a function pointer and optionally a pointer to data to be copied and passed to the handler, and the function will be called on the realtime thread at the next polling interval.
Tip: To pass a pointer (including pointers to __unsafe_unretained Objective-C objects) through the userInfo parameter, be sure to pass the address to the pointer, using the "&" prefix:
or
You can then retrieve the pointer value via a void** dereference from your function:
To access an Objective-C object pointer, you also need to bridge the pointer value:
audioController | The audio controller. |
handler | A pointer to a function to call on the main thread. |
userInfo | Pointer to user info data to pass to handler - this will be copied. |
userInfoLength | Length of userInfo in bytes. |
- (void) beginMessageExchangeBlock |
Begins a block of messages to be performed consecutively.
Calling this method will cause message processing on the realtime thread to be suspended until endMessageExchangeBlock is called.
- (void) endMessageExchangeBlock |
Ends a consecutive block of messages.
- (void) outputAveragePowerLevel: | (Float32 *) | averagePower | |
peakHoldLevel: | (Float32 *) | peakLevel | |
Get output power level information since this method was last called.
averagePower | If not NULL, on output will be set to the average power level of the most recent output audio, in decibels |
peakLevel | If not NULL, on output will be set to the peak level of the most recent output audio, in decibels |
- (void) outputAveragePowerLevels: | (Float32 *) | averagePowers | |
peakHoldLevels: | (Float32 *) | peakLevels | |
channelCount: | (UInt32) | count | |
Get output power level information for multiple channels since this method was last called.
averagePowers | If not NULL, each element of the array on output will be set to the average power level of the most recent output audio for each channel up to count, in decibels |
peakLevels | If not NULL, each element of the array on output will be set to the peak level of the most recent output audio for each channel up to count, in decibels |
channelCount | specifies the number of channels to fill in the averagePowers and peakLevels array parameters |
- (void) averagePowerLevel: | (Float32 *) | averagePower | |
peakHoldLevel: | (Float32 *) | peakLevel | |
forGroup: | (AEChannelGroupRef) | group | |
Get output power level information for a particular group, since this method was last called.
averagePower | If not NULL, on output will be set to the average power level of the most recent audio, in decibels |
peakLevel | If not NULL, on output will be set to the peak level of the most recent audio, in decibels |
group | The channel group |
- (void) averagePowerLevels: | (Float32 *) | averagePowers | |
peakHoldLevels: | (Float32 *) | peakLevels | |
forGroup: | (AEChannelGroupRef) | group | |
channelCount: | (UInt32) | count | |
Get output power level information for a particular group, since this method was last called.
averagePower | If not NULL, each element of the array on output will be set to the average power level of the most recent audio for each channel, in decibels |
peakLevel | If not NULL, each element of the array on output will be set to the peak level of the most recent audio for each channel, in decibels |
group | The channel group |
channelCount | specifies the number of channels to fill in the averagePowers and peakLevels array parameters |
- (void) inputAveragePowerLevel: | (Float32 *) | averagePower | |
peakHoldLevel: | (Float32 *) | peakLevel | |
Get input power level information since this method was last called.
averagePower | If not NULL, on output will be set to the average power level of the most recent input audio, in decibels |
peakLevel | If not NULL, on output will be set to the peak level of the most recent input audio, in decibels |
- (void) inputAveragePowerLevels: | (Float32 *) | averagePowers | |
peakHoldLevels: | (Float32 *) | peakLevels | |
channelCount: | (UInt32) | count | |
Get input power level information for multiple channels since this method was last called.
averagePowers | If not NULL, each element of the array on output will be set to the average power level of the most recent input audio for each channel up to count, in decibels |
peakLevels | If not NULL, each element of the array on output will be set to the peak level of the most recent input audio for each channel up to count, in decibels |
channelCount | specifies the number of channels to fill in the averagePowers and peakLevels array parameters |
- (AudioStreamBasicDescription*) AEAudioControllerAudioDescription | (__unsafe_unretained AEAudioController *) | audioController |
Get access to the configured AudioStreamBasicDescription.
- (AudioStreamBasicDescription*) AEAudioControllerInputAudioDescription | (__unsafe_unretained AEAudioController *) | audioController |
Get access to the input AudioStreamBasicDescription.
- (long) AEConvertSecondsToFrames | (__unsafe_unretained AEAudioController *) | audioController | |
(NSTimeInterval) | seconds | ||
Convert a time span in seconds into a number of frames at the current sample rate.
- (NSTimeInterval) AEConvertFramesToSeconds | (__unsafe_unretained AEAudioController *) | audioController | |
(long) | frames | ||
Convert a number of frames into a time span in seconds.
- (BOOL) AECurrentThreadIsAudioThread | (void) |
Determine if the current thread is the audio thread.
- (NSTimeInterval) AEAudioControllerInputLatency | (__unsafe_unretained AEAudioController *) | controller |
Input latency (in seconds)
To account for hardware latency, if automaticLatencyManagement is NO, you can use this function to offset audio timestamps. Note that if automaticLatencyManagement is YES (the default), you should not use this method.
For example:
timestamp.mHostTime -= AEHostTicksFromSeconds(AEAudioControllerInputLatency(audioController));
Note that when connected to Audiobus input, this function returns 0.
controller | The audio controller |
- (NSTimeInterval) AEAudioControllerOutputLatency | (__unsafe_unretained AEAudioController *) | controller |
Output latency (in seconds)
To account for hardware latency, if automaticLatencyManagement is NO, you can use this function to offset audio timestamps. Note that if automaticLatencyManagement is YES (the default), you should not use this method.
For example:
timestamp.mHostTime += AEHostTicksFromSeconds(AEAudioControllerOutputLatency(audioController));
Note that when connected to Audiobus, this value will automatically account for any Audiobus latency.
controller | The audio controller |
- (AudioTimeStamp) AEAudioControllerCurrentAudioTimestamp | (__unsafe_unretained AEAudioController *) | controller |
Get the current audio system timestamp.
For use on the audio thread; returns the latest audio timestamp, either for the input or the output bus, depending on when this method is called.
controller | The audio controller |
- (void) setAudiobusSenderPort: | (ABSenderPort *) | senderPort | |
forChannel: | (id< AEAudioPlayable >) | channel | |
Set an Audiobus sender port to send audio from a particular channel.
When assigned to a channel and connected via Audiobus, audio for the given channel will be sent out the Audiobus sender port.
senderPort | The Audiobus sender port, or nil to remove the port |
channel | Channel for the sender port |
Provided by category AEAudioController(AudiobusAdditions).
- (void) setAudiobusSenderPort: | (ABSenderPort *) | senderPort | |
forChannelGroup: | (AEChannelGroupRef) | channelGroup | |
Set an Audiobus sender port to send audio from a particular channel group.
When assigned to a channel and connected via Audiobus, audio for the given group will be sent out the Audiobus sender port.
senderPort | The Audiobus sender port, or nil to remove the port |
channelGroup | Channel group for the sender port |
Provided by category AEAudioController(AudiobusAdditions).
- (ABFilterPort* audiobusFilterPort) __deprecated_msg | ("No longer in use") |
Audiobus filter port.
Set this property to an Audiobus filter port to let TAAE correctly update the number of input channels when connected.
Provided by category AEAudioController(AudiobusAdditions).
- (ABSenderPort* audiobusSenderPort) __deprecated_msg | ("use ABSenderPort's audio unit initializer instead") |
Audiobus sender port.
Deprecated: use ABSenderPort's audio unit initializer (using AEAudioController's audioUnit property.
This method has been deprecated, as it doesn't support synchronization and latency compensation.
Provided by category AEAudioController(AudiobusAdditions).
|
readnonatomicstrong |
The asynchronous message queue used for safe communication between main and realtime thread.
If running is NO, then message blocks passed to this instance will be performed on the main thread instead of the realtime thread.
|
readwritenonatomicassign |
Audio session category to use.
See discussion in the Audio Session Programming Guide The default value is AVAudioSessionCategoryPlayAndRecord if audio input is enabled, or AVAudioSessionCategoryPlayback otherwise, with mixing with other apps enabled.
|
readwritenonatomicassign |
Whether to allow mixing audio with other apps.
When this is YES, your app's audio will be mixed with the output of other applications. If NO, then any other apps playing audio will be stopped when the audio engine is started.
Note: If you are using remote controls with UIApplication
's beginReceivingRemoteControlEvents
, setting this to YES will stop the remote controls working. This is an iOS limitation.
Default: YES
|
readwritenonatomicassign |
Whether to use the "Measurement" Audio Session Mode for improved audio quality and bass response.
Note that when the device's built-in mic is being used, TAAE can automatically boost the gain, as this is very low while Measurement Mode is enabled. See boostBuiltInMicGainInMeasurementMode.
Default: NO
|
readwritenonatomicassign |
Whether to avoid using Measurement Mode with the built-in speaker.
When used with the built-in speaker, Measurement Mode results in quite low audio output levels. Setting this property to YES causes TAAE to avoid using Measurement Mode with the built-in speaker, avoiding this problem.
Default is YES.
|
readwritenonatomicassign |
Whether to boost the input volume while using Measurement Mode with the built-in mic.
When the device's built-in mic is being used while Measurement Mode is enabled (see useMeasurementMode), TAAE can automatically boost the gain, as this is very low with Measurement Mode. This takes place independently of the inputGain setting.
Default is YES.
|
readwritenonatomicassign |
Mute output.
Set to YES to mute all system output. Note that even if this is YES, playback callbacks will still receive audio, as the silencing happens after output receiver callbacks are called.
|
readwritenonatomicassign |
Access the master output volume.
Note that this value affects the output of the audio engine; it doesn't modify the hardware volume setting.
|
readwritenonatomicassign |
Enable audio input from Bluetooth devices.
Note that setting this property to YES may have implications for input latency.
Default is NO.
|
readnonatomicassign |
Determine whether input gain is available.
|
readwritenonatomicassign |
Set audio input gain (if input gain is available)
Value must be in the range 0-1
|
readwritenonatomicassign |
Whether to use the built-in voice processing system.
This can be useful for removing echo/feedback when playing through the speaker while simultaneously recording through the microphone. Not suitable for music, but works adequately well for speech.
Note that changing this value will cause the entire audio system to be shut down and restarted with the new setting, which will result in a break in audio playback.
Enabling voice processing in short buffer duration environments (< 0.01s) may cause stuttering.
Default is NO.
|
readwritenonatomicassign |
Whether to only perform voice processing for the SpeakerAndMicrophone route.
This causes voice processing to only be enabled in the classic echo removal scenario, when audio is being played through the device speaker and recorded by the device microphone.
Default is YES.
|
readwritenonatomicassign |
Input mode: How to handle incoming audio.
If you are using an audio format with more than one channel, this setting defines how the system receives incoming audio.
See AEInputMode for a description of the available options.
Default is AEInputModeFixedAudioFormat.
|
readwritenonatomicstrong |
Input channel selection.
When there are more than one input channel, you may specify which of the available channels are actually used as input. This is an array of NSNumbers, each referring to a channel (starting with the number 0 for the first channel).
Specified input channels will be mapped to output chanels in the order they appear in this array, so the first channel specified will be mapped to the first output channel (the only output channel, if output is mono, or the left channel for stereo output), the second input to the second output (the right channel).
By default, the first two inputs will be used, for devices with more than 1 input channel.
|
readwritenonatomicassign |
Preferred buffer duration (in seconds)
Set this to low values for better latency, but more processing overhead, or higher values for greater latency with lower processing overhead. This parameter affects the length of the audio buffers received by the various callbacks.
System default is ~23ms, or 1024 frames.
|
readnonatomicassign |
Current buffer duration (in seconds)
This is the current hardware buffer duration, which may or may not be the same as the preferredBufferDuration property, depending on the set of active apps on the device and the order in which they were launched.
Observable.
|
readnonatomicassign |
Input latency (in seconds)
The currently-reported hardware input latency. See AEAudioControllerInputLatency.
|
readnonatomicassign |
Output latency (in seconds)
The currently-reported hardware output latency. See AEAudioControllerOutputLatency
|
readwritenonatomicassign |
Whether to automatically account for input/output latency.
If this property to YES (defautlt), the timestamps you see in the various callbacks will automatically account for input and output latency. If you set this property to NO and you wish to account for latency, you will need to use the inputLatency and outputLatency properties, or their corresponding C functions AEAudioControllerInputLatency and AEAudioControllerOutputLatency yourself.
Default is YES.
|
readnonatomicassign |
Determine whether the audio engine is running.
This is affected by calling start and stop on the audio controller.
|
readnonatomicassign |
Determine whether audio is currently being played through the device's speaker.
This property is observable
|
readnonatomicassign |
Determine whether audio is currently being recorded through the device's mic.
This property is observable
|
readnonatomicassign |
Whether audio input is currently available.
Note: This property is observable
|
readnonatomicassign |
Whether audio input is currently enabled.
Note: This property is observable
|
readnonatomicassign |
Whether audio output is currently available.
Note: This property is observable
|
readnonatomicassign |
The number of audio channels that the current audio input device provides.
Note that this will not necessarily be the same as the number of audio channels your app will receive, depending on the inputMode and inputChannelSelection properties. Use inputAudioDescription to obtain an AudioStreamBasicDescription representing the actual incoming audio.
Note: This property is observable
|
readnonatomicassign |
The audio description defining the input audio format.
Note: This property is observable
See also inputMode and inputChannelSelection
|
readnonatomicassign |
The audio description that the audio controller was setup with.
|
readnonatomicassign |
The Remote IO audio unit used for input and output.
|
readnonatomicassign |
The audio graph handle.
|
readwritenonatomicretain |
Audiobus receiver port.
Set this property to an Audiobus receiver port to receive audio from this port instead of the system audio input.
Provided by category AEAudioController(AudiobusAdditions).