AudioRecord#
初始化#
public AudioRecord(int audioSource, int sampleRateInHz, int channelConfig, int audioFormat,int bufferSizeInBytes)
關於採樣率,除了 44100Hz 所有設備上都保證支持,其他頻率就不一定支持了,這時候可以通過使用參數 SAMPLE_RATE_UNSPECIFIED,讓設備自己去決策使用的採樣率。 channelConfig 是採樣通道,當前所有設備都保證支持單聲道,多聲道就不一定支持了。 bufferSizeInBytes 是 buffer 大小,用來存採集的數據,一般是直接用接口 getMinBufferSize 的返回值作為 buffer 大小。
getMinBufferSize#
//frameworks/base/media/java/android/media/AudioRecord.java
static public int getMinBufferSize(int sampleRateInHz, int channelConfig, int audioFormat) {
int channelCount = 0;
switch (channelConfig) {
case AudioFormat.CHANNEL_IN_DEFAULT: // AudioFormat.CHANNEL_CONFIGURATION_DEFAULT
case AudioFormat.CHANNEL_IN_MONO:
case AudioFormat.CHANNEL_CONFIGURATION_MONO:
channelCount = 1;
break;
case AudioFormat.CHANNEL_IN_STEREO:
case AudioFormat.CHANNEL_CONFIGURATION_STEREO:
case (AudioFormat.CHANNEL_IN_FRONT | AudioFormat.CHANNEL_IN_BACK):
channelCount = 2;
break;
case AudioFormat.CHANNEL_INVALID:
default:
loge("getMinBufferSize(): Invalid channel configuration.");
return ERROR_BAD_VALUE;
}
int size = native_get_min_buff_size(sampleRateInHz, channelCount, audioFormat);
if (size == 0) {
return ERROR_BAD_VALUE;
}
else if (size == -1) {
return ERROR;
}
else {
return size;
}
}
//frameworks/base/core/jni/android_media_AudioRecord.cpp
static jint android_media_AudioRecord_get_min_buff_size(JNIEnv *env, jobject thiz,
jint sampleRateInHertz, jint channelCount, jint audioFormat) {
ALOGV(">> android_media_AudioRecord_get_min_buff_size(%d, %d, %d)",
sampleRateInHertz, channelCount, audioFormat);
size_t frameCount = 0;
audio_format_t format = audioFormatToNative(audioFormat); // 把java 的格式轉成native的
status_t result = AudioRecord::getMinFrameCount(&frameCount, // 獲取最小幀數
sampleRateInHertz,
format,
audio_channel_in_mask_from_count(channelCount));
if (result == BAD_VALUE) {
return 0;
}
if (result != NO_ERROR) {
return -1;
}
return frameCount * audio_bytes_per_frame(channelCount, format);
}
//frameworks/av/media/libaudioclient/AudioRecord.cpp
status_t AudioRecord::getMinFrameCount(
size_t* frameCount,
uint32_t sampleRate,
audio_format_t format,
audio_channel_mask_t channelMask)
{
if (frameCount == NULL) {
return BAD_VALUE;
}
size_t size;
status_t status = AudioSystem::getInputBufferSize(sampleRate, format, channelMask, &size);
if (status != NO_ERROR) {
ALOGE("%s(): AudioSystem could not query the input buffer size for"
" sampleRate %u, format %#x, channelMask %#x; status %d",
__func__, sampleRate, format, channelMask, status);
return status;
}
// We double the size of input buffer for ping pong use of record buffer.
const auto frameSize = audio_bytes_per_frame(
audio_channel_count_from_in_mask(channelMask), format);
if (frameSize == 0 || ((*frameCount = (size * 2) / frameSize) == 0)) {
ALOGE("%s(): Unsupported configuration: sampleRate %u, format %#x, channelMask %#x",
__func__, sampleRate, format, channelMask);
return BAD_VALUE;
}
return NO_ERROR;
}
//frameworks/av/media/libaudioclient/AudioSystem.cpp
status_t AudioSystem::getInputBufferSize(uint32_t sampleRate, audio_format_t format,
audio_channel_mask_t channelMask, size_t* buffSize) {
const sp<AudioFlingerClient> afc = getAudioFlingerClient();
if (afc == 0) {
return NO_INIT;
}
return afc->getInputBufferSize(sampleRate, format, channelMask, buffSize);
}
status_t AudioSystem::AudioFlingerClient::getInputBufferSize(
uint32_t sampleRate, audio_format_t format,
audio_channel_mask_t channelMask, size_t* buffSize) {
const sp<IAudioFlinger> af = get_audio_flinger();
if (af == 0) {
return PERMISSION_DENIED;
}
std::lock_guard _l(mMutex);
// Do we have a stale mInBuffSize or are we requesting the input buffer size for new values
if ((mInBuffSize == 0) || (sampleRate != mInSamplingRate) || (format != mInFormat)
|| (channelMask != mInChannelMask)) {
size_t inBuffSize = af->getInputBufferSize(sampleRate, format, channelMask);
if (inBuffSize == 0) {
ALOGE("AudioSystem::getInputBufferSize failed sampleRate %d format %#x channelMask %#x",
sampleRate, format, channelMask);
return BAD_VALUE;
}
// A benign race is possible here: we could overwrite a fresher cache entry
// save the request params
mInSamplingRate = sampleRate;
mInFormat = format;
mInChannelMask = channelMask;
mInBuffSize = inBuffSize;
}
*buffSize = mInBuffSize;
return NO_ERROR;
}
//frameworks/av/services/audioflinger/AudioFlinger.cpp
size_t AudioFlinger::getInputBufferSize(uint32_t sampleRate, audio_format_t format,
audio_channel_mask_t channelMask) const
{
status_t ret = initCheck();
if (ret != NO_ERROR) {
return 0;
}
if ((sampleRate == 0) ||
!audio_is_valid_format(format) ||
!audio_is_input_channel(channelMask)) {
return 0;
}
audio_utils::lock_guard lock(hardwareMutex());
if (mPrimaryHardwareDev == nullptr) {
return 0;
}
if (mInputBufferSizeOrderedDevs.empty()) {
return 0;
}
mHardwareStatus = AUDIO_HW_GET_INPUT_BUFFER_SIZE;
std::vector<audio_channel_mask_t> channelMasks = {channelMask};
if (channelMask != AUDIO_CHANNEL_IN_MONO) {
channelMasks.push_back(AUDIO_CHANNEL_IN_MONO);
}
if (channelMask != AUDIO_CHANNEL_IN_STEREO) {
channelMasks.push_back(AUDIO_CHANNEL_IN_STEREO);
}
std::vector<audio_format_t> formats = {format};
if (format != AUDIO_FORMAT_PCM_16_BIT) {
// For compressed format, buffer size may be queried using PCM. Allow this for compatibility
// in cases the primary hw dev does not support the format.
// TODO: replace with a table of formats and nominal buffer sizes (based on nominal bitrate
// and codec frame size).
formats.push_back(AUDIO_FORMAT_PCM_16_BIT);
}
std::vector<uint32_t> sampleRates = {sampleRate};
static const uint32_t SR_44100 = 44100;
static const uint32_t SR_48000 = 48000;
if (sampleRate != SR_48000) {
sampleRates.push_back(SR_48000);
}
if (sampleRate != SR_44100) {
sampleRates.push_back(SR_44100);
}
mHardwareStatus = AUDIO_HW_IDLE;
auto getInputBufferSize = [](const sp<DeviceHalInterface>& dev, audio_config_t config,
size_t* bytes) -> status_t {
if (!dev) {
return BAD_VALUE;
}
status_t result = dev->getInputBufferSize(&config, bytes);
if (result == BAD_VALUE) {
// Retry with the config suggested by the HAL.
result = dev->getInputBufferSize(&config, bytes);
}
if (result != OK || *bytes == 0) {
return BAD_VALUE;
}
return result;
};
// Change parameters of the configuration each iteration until we find a
// configuration that the device will support, or HAL suggests what it supports.
audio_config_t config = AUDIO_CONFIG_INITIALIZER;
for (auto testChannelMask : channelMasks) {
config.channel_mask = testChannelMask;
for (auto testFormat : formats) {
config.format = testFormat;
for (auto testSampleRate : sampleRates) {
config.sample_rate = testSampleRate;
size_t bytes = 0;
ret = BAD_VALUE;
for (const AudioHwDevice* dev : mInputBufferSizeOrderedDevs) {
ret = getInputBufferSize(dev->hwDevice(), config, &bytes);
if (ret == OK) {
break;
}
}
if (ret == BAD_VALUE) continue;
if (config.sample_rate != sampleRate || config.channel_mask != channelMask ||
config.format != format) {
uint32_t dstChannelCount = audio_channel_count_from_in_mask(channelMask);
uint32_t srcChannelCount =
audio_channel_count_from_in_mask(config.channel_mask);
size_t srcFrames =
bytes / audio_bytes_per_frame(srcChannelCount, config.format);
size_t dstFrames = destinationFramesPossible(
srcFrames, config.sample_rate, sampleRate);
bytes = dstFrames * audio_bytes_per_frame(dstChannelCount, format);
}
return bytes;
}
}
}
ALOGW("getInputBufferSize failed with minimum buffer size sampleRate %u, "
"format %#x, channelMask %#x",sampleRate, format, channelMask);
return 0;
}
AudioRecord 初始化#
//frameworks/base/media/java/android/media/AudioRecord.java
private AudioRecord(AudioAttributes attributes, AudioFormat format, int bufferSizeInBytes,
int sessionId, @Nullable Context context,
int maxSharedAudioHistoryMs, int halInputFlags) throws IllegalArgumentException {
mRecordingState = RECORDSTATE_STOPPED;
mHalInputFlags = halInputFlags;
if (attributes == null) {
throw new IllegalArgumentException("Illegal null AudioAttributes");
}
if (format == null) {
throw new IllegalArgumentException("Illegal null AudioFormat");
}
// remember which looper is associated with the AudioRecord instanciation
if ((mInitializationLooper = Looper.myLooper()) == null) {
mInitializationLooper = Looper.getMainLooper();
}
// is this AudioRecord using REMOTE_SUBMIX at full volume?
if (attributes.getCapturePreset() == MediaRecorder.AudioSource.REMOTE_SUBMIX) {
final AudioAttributes.Builder ab =
new AudioAttributes.Builder(attributes);
HashSet<String> filteredTags = new HashSet<String>();
final Iterator<String> tagsIter = attributes.getTags().iterator();
while (tagsIter.hasNext()) {
final String tag = tagsIter.next();
if (tag.equalsIgnoreCase(SUBMIX_FIXED_VOLUME)) {
mIsSubmixFullVolume = true;
Log.v(TAG, "Will record from REMOTE_SUBMIX at full fixed volume");
} else { // SUBMIX_FIXED_VOLUME: is not to be propagated to the native layers
filteredTags.add(tag);
}
}
ab.replaceTags(filteredTags);
attributes = ab.build();
}
mAudioAttributes = attributes;
int rate = format.getSampleRate();
if (rate == AudioFormat.SAMPLE_RATE_UNSPECIFIED) {
rate = 0;
}
int encoding = AudioFormat.ENCODING_DEFAULT;
if ((format.getPropertySetMask() & AudioFormat.AUDIO_FORMAT_HAS_PROPERTY_ENCODING) != 0)
{
encoding = format.getEncoding();
}
audioParamCheck(mAudioAttributes.getCapturePreset(), rate, encoding);
if ((format.getPropertySetMask()
& AudioFormat.AUDIO_FORMAT_HAS_PROPERTY_CHANNEL_INDEX_MASK) != 0) {
mChannelIndexMask = format.getChannelIndexMask();
mChannelCount = format.getChannelCount();
}
if ((format.getPropertySetMask()
& AudioFormat.AUDIO_FORMAT_HAS_PROPERTY_CHANNEL_MASK) != 0) {
mChannelMask = getChannelMaskFromLegacyConfig(format.getChannelMask(), false);
mChannelCount = format.getChannelCount();
} else if (mChannelIndexMask == 0) {
mChannelMask = getChannelMaskFromLegacyConfig(AudioFormat.CHANNEL_IN_DEFAULT, false);
mChannelCount = AudioFormat.channelCountFromInChannelMask(mChannelMask);
}
audioBuffSizeCheck(bufferSizeInBytes);
AttributionSource attributionSource = (context != null)
? context.getAttributionSource() : AttributionSource.myAttributionSource();
if (attributionSource.getPackageName() == null) {
// Command line utility
attributionSource = attributionSource.withPackageName("uid:" + Binder.getCallingUid());
}
int[] sampleRate = new int[] {mSampleRate};
int[] session = new int[1];
session[0] = resolveSessionId(context, sessionId);
//TODO: update native initialization when information about hardware init failure
// due to capture device already open is available.
try (ScopedParcelState attributionSourceState = attributionSource.asScopedParcelState()) {
int initResult = native_setup(new WeakReference<AudioRecord>(this), mAudioAttributes,
sampleRate, mChannelMask, mChannelIndexMask, mAudioFormat,
mNativeBufferSizeInBytes, session, attributionSourceState.getParcel(),
0 /*nativeRecordInJavaObj*/, maxSharedAudioHistoryMs, mHalInputFlags);
if (initResult != SUCCESS) {
loge("Error code " + initResult + " when initializing native AudioRecord object.");
return; // with mState == STATE_UNINITIALIZED
}
}
mSampleRate = sampleRate[0];
mSessionId = session[0];
mState = STATE_INITIALIZED;
}
//frameworks/base/core/jni/android_media_AudioRecord.cpp
static jint android_media_AudioRecord_setup(JNIEnv *env, jobject thiz, jobject weak_this,
jobject jaa, jintArray jSampleRate, jint channelMask,
jint channelIndexMask, jint audioFormat,
jint buffSizeInBytes, jintArray jSession,
jobject jAttributionSource, jlong nativeRecordInJavaObj,
jint sharedAudioHistoryMs,
jint halFlags) {
//ALOGV(">> Entering android_media_AudioRecord_setup");
//ALOGV("sampleRate=%d, audioFormat=%d, channel mask=%x, buffSizeInBytes=%d "
// "nativeRecordInJavaObj=0x%llX",
// sampleRateInHertz, audioFormat, channelMask, buffSizeInBytes, nativeRecordInJavaObj);
audio_channel_mask_t localChanMask = inChannelMaskToNative(channelMask);
if (jSession == NULL) {
ALOGE("Error creating AudioRecord: invalid session ID pointer");
return (jint) AUDIO_JAVA_ERROR;
}
jint* nSession = env->GetIntArrayElements(jSession, nullptr /* isCopy */);
if (nSession == NULL) {
ALOGE("Error creating AudioRecord: Error retrieving session id pointer");
return (jint) AUDIO_JAVA_ERROR;
}
audio_session_t sessionId = (audio_session_t) nSession[0];
env->ReleaseIntArrayElements(jSession, nSession, 0 /* mode */);
nSession = NULL;
sp<AudioRecord> lpRecorder;
sp<AudioRecordJNIStorage> callbackData;
jclass clazz = env->GetObjectClass(thiz);
if (clazz == NULL) {
ALOGE("Can't find %s when setting up callback.", kClassPathName);
return (jint) AUDIORECORD_ERROR_SETUP_NATIVEINITFAILED;
}
// if we pass in an existing *Native* AudioRecord, we don't need to create/initialize one.
if (nativeRecordInJavaObj == 0) { // native的audio record還沒創建,那麼就需要創建
if (jaa == 0) {
ALOGE("Error creating AudioRecord: invalid audio attributes");
return (jint) AUDIO_JAVA_ERROR;
}
if (jSampleRate == 0) {
ALOGE("Error creating AudioRecord: invalid sample rates");
return (jint) AUDIO_JAVA_ERROR;
}
jint elements[1];
env->GetIntArrayRegion(jSampleRate, 0, 1, elements);
int sampleRateInHertz = elements[0];
// channel index mask takes priority over channel position masks.
if (channelIndexMask) {
// Java channel index masks need the representation bits set.
localChanMask = audio_channel_mask_from_representation_and_bits(
AUDIO_CHANNEL_REPRESENTATION_INDEX,
channelIndexMask);
}
// Java channel position masks map directly to the native definition
if (!audio_is_input_channel(localChanMask)) {
ALOGE("Error creating AudioRecord: channel mask %#x is not valid.", localChanMask);
return (jint) AUDIORECORD_ERROR_SETUP_INVALIDCHANNELMASK;
}
uint32_t channelCount = audio_channel_count_from_in_mask(localChanMask);
// compare the format against the Java constants
audio_format_t format = audioFormatToNative(audioFormat);
if (format == AUDIO_FORMAT_INVALID) {
ALOGE("Error creating AudioRecord: unsupported audio format %d.", audioFormat);
return (jint) AUDIORECORD_ERROR_SETUP_INVALIDFORMAT;
}
if (buffSizeInBytes == 0) {
ALOGE("Error creating AudioRecord: frameCount is 0.");
return (jint) AUDIORECORD_ERROR_SETUP_ZEROFRAMECOUNT;
}
size_t frameCount = buffSizeInBytes / audio_bytes_per_frame(channelCount, format);
// create an uninitialized AudioRecord object
Parcel* parcel = parcelForJavaObject(env, jAttributionSource);
android::content::AttributionSourceState attributionSource;
attributionSource.readFromParcel(parcel);
lpRecorder = new AudioRecord(attributionSource);
// read the AudioAttributes values
auto paa = JNIAudioAttributeHelper::makeUnique();
jint jStatus = JNIAudioAttributeHelper::nativeFromJava(env, jaa, paa.get());
if (jStatus != (jint)AUDIO_JAVA_SUCCESS) {
return jStatus;
}
ALOGV("AudioRecord_setup for source=%d tags=%s flags=%08x", paa->source, paa->tags, paa->flags);
const auto flags = static_cast<audio_input_flags_t>(halFlags);
// create the callback information:
// this data will be passed with every AudioRecord callback
// we use a weak reference so the AudioRecord object can be garbage collected.
callbackData = sp<AudioRecordJNIStorage>::make(clazz, weak_this);
const status_t status =
lpRecorder->set(paa->source, sampleRateInHertz, // set 配置
format, // word length, PCM
localChanMask, frameCount,
callbackData, // callback
0, // notificationFrames,
true, // threadCanCallJava
sessionId, AudioRecord::TRANSFER_DEFAULT, flags, -1,
-1, // default uid, pid
paa.get(), AUDIO_PORT_HANDLE_NONE, MIC_DIRECTION_UNSPECIFIED,
MIC_FIELD_DIMENSION_DEFAULT, sharedAudioHistoryMs);
if (status != NO_ERROR) {
ALOGE("Error creating AudioRecord instance: initialization check failed with status %d.",
status);
goto native_init_failure;
}
// Set caller name so it can be logged in destructor.
// MediaMetricsConstants.h: AMEDIAMETRICS_PROP_CALLERNAME_VALUE_JAVA
lpRecorder->setCallerName("java");
} else { // end if nativeRecordInJavaObj == 0) // 已經創建了native的AudioRecord
lpRecorder = (AudioRecord*)nativeRecordInJavaObj;
// TODO: We need to find out which members of the Java AudioRecord might need to be
// initialized from the Native AudioRecord
// these are directly returned from getters:
// mSampleRate
// mRecordSource
// mAudioFormat
// mChannelMask
// mChannelCount
// mState (?)
// mRecordingState (?)
// mPreferredDevice
// create the callback information:
// this data will be passed with every AudioRecord callback
// This next line makes little sense
// callbackData = sp<AudioRecordJNIStorage>::make(clazz, weak_this);
}
nSession = env->GetIntArrayElements(jSession, nullptr /* isCopy */);
if (nSession == NULL) {
ALOGE("Error creating AudioRecord: Error retrieving session id pointer");
goto native_init_failure;
}
// read the audio session ID back from AudioRecord in case a new session was created during set()
nSession[0] = lpRecorder->getSessionId();
env->ReleaseIntArrayElements(jSession, nSession, 0 /* mode */);
nSession = NULL;
{
const jint elements[1] = { (jint) lpRecorder->getSampleRate() };
env->SetIntArrayRegion(jSampleRate, 0, 1, elements);
}
// save our newly created C++ AudioRecord in the "nativeRecorderInJavaObj" field
// of the Java object
// 相關Java對象
setFieldSp(env, thiz, lpRecorder, javaAudioRecordFields.nativeRecorderInJavaObj);
// save our newly created callback information in the "jniData" field
// of the Java object (in mNativeJNIDataHandle) so we can free the memory in finalize()
setFieldSp(env, thiz, callbackData, javaAudioRecordFields.jniData);
return (jint) AUDIO_JAVA_SUCCESS;
// failure:
native_init_failure:
setFieldSp(env, thiz, sp<AudioRecord>{}, javaAudioRecordFields.nativeRecorderInJavaObj);
setFieldSp(env, thiz, sp<AudioRecordJNIStorage>{}, javaAudioRecordFields.jniData);
// lpRecorder goes out of scope, so reference count drops to zero
return (jint) AUDIORECORD_ERROR_SETUP_NATIVEINITFAILED;
}
//frameworks/av/media/libaudioclient/AudioRecord.cpp
status_t AudioRecord::set(
audio_source_t inputSource,
uint32_t sampleRate,
audio_format_t format,
audio_channel_mask_t channelMask,
size_t frameCount,
const wp<IAudioRecordCallback>& callback,
uint32_t notificationFrames,
bool threadCanCallJava,
audio_session_t sessionId,
transfer_type transferType,
audio_input_flags_t flags,
uid_t uid,
pid_t pid,
const audio_attributes_t* pAttributes,
audio_port_handle_t selectedDeviceId,
audio_microphone_direction_t selectedMicDirection,
float microphoneFieldDimension,
int32_t maxSharedAudioHistoryMs)
{
status_t status = NO_ERROR;
LOG_ALWAYS_FATAL_IF(mInitialized, "%s: should not be called twice", __func__);
mInitialized = true;
// Note mPortId is not valid until the track is created, so omit mPortId in ALOG for set.
ALOGV("%s(): inputSource %d, sampleRate %u, format %#x, channelMask %#x, frameCount %zu, "
"notificationFrames %u, sessionId %d, transferType %d, flags %#x, attributionSource %s"
"uid %d, pid %d",
__func__,
inputSource, sampleRate, format, channelMask, frameCount, notificationFrames,
sessionId, transferType, flags, mClientAttributionSource.toString().c_str(), uid, pid);
// TODO b/182392553: refactor or remove
pid_t callingPid = IPCThreadState::self()->getCallingPid();
pid_t myPid = getpid();
pid_t adjPid = pid;
if (pid == -1 || (callingPid != myPid)) {
adjPid = callingPid;
}
auto clientAttributionSourcePid = legacy2aidl_pid_t_int32_t(adjPid);
if (!clientAttributionSourcePid.ok()) {
return logIfErrorAndReturnStatus(BAD_VALUE,
StringPrintf("%s: received invalid client attribution "
"source pid, pid: %d, sessionId: %d",
__func__, pid, sessionId),
__func__);
}
mClientAttributionSource.pid = clientAttributionSourcePid.value();
uid_t adjUid = uid;
if (uid == -1 || (callingPid != myPid)) {
adjUid = IPCThreadState::self()->getCallingUid();
}
auto clientAttributionSourceUid = legacy2aidl_uid_t_int32_t(adjUid);
if (!clientAttributionSourceUid.ok()) {
return logIfErrorAndReturnStatus(BAD_VALUE,
StringPrintf("%s: received invalid client attribution "
"source uid, pid: %d, session id: %d",
__func__, pid, sessionId),
__func__);
}
mClientAttributionSource.uid = clientAttributionSourceUid.value();
mTracker.reset(new RecordingActivityTracker());
mSelectedDeviceId = selectedDeviceId;
mSelectedMicDirection = selectedMicDirection;
mSelectedMicFieldDimension = microphoneFieldDimension;
mMaxSharedAudioHistoryMs = maxSharedAudioHistoryMs;
// Copy the state variables early so they are available for error reporting.
if (pAttributes == nullptr) {
mAttributes = AUDIO_ATTRIBUTES_INITIALIZER;
mAttributes.source = inputSource;
if (inputSource == AUDIO_SOURCE_VOICE_COMMUNICATION
|| inputSource == AUDIO_SOURCE_CAMCORDER) {
mAttributes.flags = static_cast<audio_flags_mask_t>(
mAttributes.flags | AUDIO_FLAG_CAPTURE_PRIVATE);
}
} else {
// stream type shouldn't be looked at, this track has audio attributes
memcpy(&mAttributes, pAttributes, sizeof(audio_attributes_t));
ALOGV("%s: Building AudioRecord with attributes: source=%d flags=0x%x tags=[%s]",
__func__, mAttributes.source, mAttributes.flags, mAttributes.tags);
}
mSampleRate = sampleRate;
if (format == AUDIO_FORMAT_DEFAULT) {
format = AUDIO_FORMAT_PCM_16_BIT;
}
if (!audio_is_linear_pcm(format)) {
// Compressed capture requires direct
flags = (audio_input_flags_t) (flags | AUDIO_INPUT_FLAG_DIRECT);
ALOGI("%s(): Format %#x is not linear pcm. Setting DIRECT, using flags %#x", __func__,
format, flags);
}
mFormat = format;
mChannelMask = channelMask;
mSessionId = sessionId;
ALOGV("%s: mSessionId %d", __func__, mSessionId);
mOrigFlags = mFlags = flags;
mTransfer = transferType;
switch (mTransfer) { // 傳入的transfer是default,其餘case可以先忽略
case TRANSFER_DEFAULT:
if (callback == nullptr || threadCanCallJava) {
mTransfer = TRANSFER_SYNC;
} else {
mTransfer = TRANSFER_CALLBACK;
}
break;
case TRANSFER_CALLBACK:
if (callback == nullptr) {
return logIfErrorAndReturnStatus(
BAD_VALUE,
StringPrintf("%s: Transfer type TRANSFER_CALLBACK but callback == nullptr, "
"pid: %d, session id: %d",
__func__, pid, sessionId),
__func__);
}
break;
case TRANSFER_OBTAIN:
case TRANSFER_SYNC:
break;
default:
return logIfErrorAndReturnStatus(
BAD_VALUE,
StringPrintf("%s: Invalid transfer type %d, pid: %d, session id: %d", __func__,
mTransfer, pid, sessionId),
__func__);
}
// invariant that mAudioRecord != 0 is true only after set() returns successfully
if (mAudioRecord != 0) {
return logIfErrorAndReturnStatus(
INVALID_OPERATION,
StringPrintf("%s: Track already in use, pid: %d, session id: %d", __func__, pid,
sessionId),
__func__);
}
if (!audio_is_valid_format(mFormat)) {
return logIfErrorAndReturnStatus(
BAD_VALUE,
StringPrintf("%s: Format %#x is not valid, pid: %d, session id: %d", __func__,
mFormat, pid, sessionId),
__func__);
}
if (!audio_is_input_channel(mChannelMask)) {
return logIfErrorAndReturnStatus(
BAD_VALUE,
StringPrintf("%s: Invalid channel mask %#x, pid: %d, session id: %d", __func__,
mChannelMask, pid, sessionId),
__func__);
}
mChannelCount = audio_channel_count_from_in_mask(mChannelMask);
mFrameSize = audio_bytes_per_frame(mChannelCount, mFormat);
// mFrameCount is initialized in createRecord_l
mReqFrameCount = frameCount;
mNotificationFramesReq = notificationFrames;
// mNotificationFramesAct is initialized in createRecord_l
mCallback = callback;
if (mCallback != nullptr) {
mAudioRecordThread = new AudioRecordThread(*this); // 回調線程,這樣採集好數據後,可以主動回調調用方
mAudioRecordThread->run("AudioRecord", ANDROID_PRIORITY_AUDIO);
// thread begins in paused state, and will not reference us until start()
}
// create the IAudioRecord
// 關鍵
{
AutoMutex lock(mLock);
status = createRecord_l(0 /*epoch*/);
}
ALOGV("%s(%d): status %d", __func__, mPortId, status);
if (status != NO_ERROR) {
if (mAudioRecordThread != 0) {
mAudioRecordThread->requestExit(); // see comment in AudioRecord.h
mAudioRecordThread->requestExitAndWait();
mAudioRecordThread.clear();
}
// bypass error message to avoid logging twice (createRecord_l logs the error).
mStatus = status;
return mStatus;
}
// TODO: add audio hardware input latency here
mLatency = (1000LL * mFrameCount) / mSampleRate;
mMarkerPosition = 0;
mMarkerReached = false;
mNewPosition = 0;
mUpdatePeriod = 0;
AudioSystem::acquireAudioSessionId(mSessionId, adjPid, adjUid);
mSequence = 1;
mObservedSequence = mSequence;
mInOverrun = false;
mFramesRead = 0;
mFramesReadServerOffset = 0;
return logIfErrorAndReturnStatus(status, "", __func__);
}
status_t AudioRecord::createRecord_l(const Modulo<uint32_t> &epoch)
{
const int64_t beginNs = systemTime();
const sp<IAudioFlinger>& audioFlinger = AudioSystem::get_audio_flinger();
IAudioFlinger::CreateRecordInput input;
IAudioFlinger::CreateRecordOutput output;
[[maybe_unused]] audio_session_t originalSessionId;
void *iMemPointer;
audio_track_cblk_t* cblk;
status_t status;
static const int32_t kMaxCreateAttempts = 3;
int32_t remainingAttempts = kMaxCreateAttempts;
if (audioFlinger == 0) {
return logIfErrorAndReturnStatus(
NO_INIT, StringPrintf("%s(%d): Could not get audioflinger", __func__, mPortId), "");
}
// mFlags (not mOrigFlags) is modified depending on whether fast request is accepted.
// After fast request is denied, we will request again if IAudioRecord is re-created.
// Now that we have a reference to an I/O handle and have not yet handed it off to AudioFlinger,
// we must release it ourselves if anything goes wrong.
// Client can only express a preference for FAST. Server will perform additional tests.
if (mFlags & AUDIO_INPUT_FLAG_FAST) {
bool useCaseAllowed =
// any of these use cases:
// use case 1: callback transfer mode
(mTransfer == TRANSFER_CALLBACK) ||
// use case 2: blocking read mode
// The default buffer capacity at 48 kHz is 2048 frames, or ~42.6 ms.
// That's enough for double-buffering with our standard 20 ms rule of thumb for
// the minimum period of a non-SCHED_FIFO thread.
// This is needed so that AAudio apps can do a low latency non-blocking read from a
// callback running with SCHED_FIFO.
(mTransfer == TRANSFER_SYNC) ||
// use case 3: obtain/release mode
(mTransfer == TRANSFER_OBTAIN);
if (!useCaseAllowed) {
ALOGD("%s(%d): AUDIO_INPUT_FLAG_FAST denied, incompatible transfer = %s",
__func__, mPortId,
convertTransferToText(mTransfer));
mFlags = (audio_input_flags_t) (mFlags & ~(AUDIO_INPUT_FLAG_FAST |
AUDIO_INPUT_FLAG_RAW));
}
}
input.attr = mAttributes;
input.config.sample_rate = mSampleRate;
input.config.channel_mask = mChannelMask;
input.config.format = mFormat;
input.clientInfo.attributionSource = mClientAttributionSource;
input.clientInfo.clientTid = -1;
if (mFlags & AUDIO_INPUT_FLAG_FAST) {
if (mAudioRecordThread != 0) {
input.clientInfo.clientTid = mAudioRecordThread->getTid();
}
}
input.riid = mTracker->getRiid();
input.flags = mFlags;
// The notification frame count is the period between callbacks, as suggested by the client
// but moderated by the server. For record, the calculations are done entirely on server side.
input.frameCount = mReqFrameCount;
input.notificationFrameCount = mNotificationFramesReq;
input.selectedDeviceId = mSelectedDeviceId;
input.sessionId = mSessionId;
originalSessionId = mSessionId;
input.maxSharedAudioHistoryMs = mMaxSharedAudioHistoryMs;
do {
media::CreateRecordResponse response;
auto aidlInput = input.toAidl();
if (!aidlInput.ok()) {
return logIfErrorAndReturnStatus(
BAD_VALUE,
StringPrintf("%s(%d): Could not create record due to invalid input", __func__,
mPortId),
"");
}
status = audioFlinger->createRecord(aidlInput.value(), response); // 創建record
auto recordOutput = IAudioFlinger::CreateRecordOutput::fromAidl(response);
if (!recordOutput.ok()) {
return logIfErrorAndReturnStatus(
BAD_VALUE,
StringPrintf("%s(%d): Could not create record output due to invalid response",
__func__, mPortId),
"");
}
output = recordOutput.value();
if (status == NO_ERROR) {
break;
}
if (status != FAILED_TRANSACTION || --remainingAttempts <= 0) {
return logIfErrorAndReturnStatus(
status,
StringPrintf("%s(%d): AudioFlinger could not create record track, status: %d",
__func__, mPortId, status),
"");
}
// FAILED_TRANSACTION happens under very specific conditions causing a state mismatch
// between audio policy manager and audio flinger during the input stream open sequence
// and can be recovered by retrying.
// Leave time for race condition to clear before retrying and randomize delay
// to reduce the probability of concurrent retries in locked steps.
usleep((20 + rand() % 30) * 10000);
} while (1);
ALOG_ASSERT(output.audioRecord != 0);
// AudioFlinger now owns the reference to the I/O handle,
// so we are no longer responsible for releasing it.
mAwaitBoost = false;
if (output.flags & AUDIO_INPUT_FLAG_FAST) {
ALOGI("%s(%d): AUDIO_INPUT_FLAG_FAST successful; frameCount %zu -> %zu",
__func__, mPortId,
mReqFrameCount, output.frameCount);
mAwaitBoost = true;
}
mFlags = output.flags;
mRoutedDeviceId = output.selectedDeviceId;
mSessionId = output.sessionId;
mSampleRate = output.sampleRate;
mServerConfig = output.serverConfig;
mServerFrameSize = audio_bytes_per_frame(
audio_channel_count_from_in_mask(mServerConfig.channel_mask), mServerConfig.format);
mServerSampleSize = audio_bytes_per_sample(mServerConfig.format);
mHalSampleRate = output.halConfig.sample_rate;
mHalChannelCount = audio_channel_count_from_in_mask(output.halConfig.channel_mask);
mHalFormat = output.halConfig.format;
if (output.cblk == 0) {
return logIfErrorAndReturnStatus(
NO_INIT, StringPrintf("%s(%d): Could not get control block", __func__, mPortId),
"");
}
// TODO: Using unsecurePointer() has some associated security pitfalls
// (see declaration for details).
// Either document why it is safe in this case or address the
// issue (e.g. by copying).
iMemPointer = output.cblk ->unsecurePointer(); // 獲取共享內存
if (iMemPointer == NULL) {
return logIfErrorAndReturnStatus(
NO_INIT,
StringPrintf("%s(%d): Could not get control block pointer", __func__, mPortId), "");
}
cblk = static_cast<audio_track_cblk_t*>(iMemPointer);
// Starting address of buffers in shared memory.
// The buffers are either immediately after the control block,
// or in a separate area at discretion of server.
void *buffers;
if (output.buffers == 0) {
buffers = cblk + 1;
} else {
// TODO: Using unsecurePointer() has some associated security pitfalls
// (see declaration for details).
// Either document why it is safe in this case or address the
// issue (e.g. by copying).
buffers = output.buffers->unsecurePointer();
if (buffers == NULL) {
return logIfErrorAndReturnStatus(
NO_INIT,
StringPrintf("%s(%d): Could not get buffer pointer", __func__, mPortId), "");
}
}
// invariant that mAudioRecord != 0 is true only after set() returns successfully
if (mAudioRecord != 0) {
IInterface::asBinder(mAudioRecord)->unlinkToDeath(mDeathNotifier, this);
mDeathNotifier.clear();
}
mAudioRecord = output.audioRecord;
mCblkMemory = output.cblk;
mBufferMemory = output.buffers;
IPCThreadState::self()->flushCommands();
mCblk = cblk;
// note that output.frameCount is the (possibly revised) value of mReqFrameCount
if (output.frameCount < mReqFrameCount || (mReqFrameCount == 0 && output.frameCount == 0)) {
ALOGW("%s(%d): Requested frameCount %zu but received frameCount %zu",
__func__, output.portId,
mReqFrameCount, output.frameCount);
}
// Make sure that application is notified with sufficient margin before overrun.
// The computation is done on server side.
if (mNotificationFramesReq > 0 && output.notificationFrameCount != mNotificationFramesReq) {
ALOGW("%s(%d): Server adjusted notificationFrames from %u to %zu for frameCount %zu",
__func__, output.portId,
mNotificationFramesReq, output.notificationFrameCount, output.frameCount);
}
mNotificationFramesAct = (uint32_t)output.notificationFrameCount;
if (mServerConfig.format != mFormat && mCallback != nullptr) {
mFormatConversionBufRaw = std::make_unique<uint8_t[]>(mNotificationFramesAct * mFrameSize);
mFormatConversionBuffer.raw = mFormatConversionBufRaw.get();
}
//mInput != input includes the case where mInput == AUDIO_IO_HANDLE_NONE for first creation
if (mDeviceCallback != 0) {
if (mInput != AUDIO_IO_HANDLE_NONE) {
AudioSystem::removeAudioDeviceCallback(this, mInput, mPortId);
}
AudioSystem::addAudioDeviceCallback(this, output.inputId, output.portId);
}
if (!mSharedAudioPackageName.empty()) {
mAudioRecord->shareAudioHistory(mSharedAudioPackageName, mSharedAudioStartMs);
}
mPortId = output.portId;
// We retain a copy of the I/O handle, but don't own the reference
mInput = output.inputId;
mRefreshRemaining = true;
mFrameCount = output.frameCount;
// If IAudioRecord is re-created, don't let the requested frameCount
// decrease. This can confuse clients that cache frameCount().
if (mFrameCount > mReqFrameCount) {
mReqFrameCount = mFrameCount;
}
// update proxy
mProxy = new AudioRecordClientProxy(cblk, buffers, mFrameCount, mServerFrameSize);
mProxy->setEpoch(epoch);
mProxy->setMinimum(mNotificationFramesAct);
mDeathNotifier = new DeathNotifier(this);
IInterface::asBinder(mAudioRecord)->linkToDeath(mDeathNotifier, this);
mMetricsId = std::string(AMEDIAMETRICS_KEY_PREFIX_AUDIO_RECORD) + std::to_string(mPortId);
mediametrics::LogItem(mMetricsId)
.set(AMEDIAMETRICS_PROP_EVENT, AMEDIAMETRICS_PROP_EVENT_VALUE_CREATE)
.set(AMEDIAMETRICS_PROP_EXECUTIONTIMENS, (int64_t)(systemTime() - beginNs))
// the following are immutable (at least until restore)
.set(AMEDIAMETRICS_PROP_FLAGS, toString(mFlags).c_str())
.set(AMEDIAMETRICS_PROP_ORIGINALFLAGS, toString(mOrigFlags).c_str())
.set(AMEDIAMETRICS_PROP_SESSIONID, (int32_t)mSessionId)
.set(AMEDIAMETRICS_PROP_TRACKID, mPortId)
.set(AMEDIAMETRICS_PROP_LOGSESSIONID, mLogSessionId)
.set(AMEDIAMETRICS_PROP_SOURCE, toString(mAttributes.source).c_str())
.set(AMEDIAMETRICS_PROP_THREADID, (int32_t)output.inputId)
.set(AMEDIAMETRICS_PROP_SELECTEDDEVICEID, (int32_t)mSelectedDeviceId)
.set(AMEDIAMETRICS_PROP_ROUTEDDEVICEID, (int32_t)mRoutedDeviceId)
.set(AMEDIAMETRICS_PROP_ENCODING, toString(mFormat).c_str())
.set(AMEDIAMETRICS_PROP_CHANNELMASK, (int32_t)mChannelMask)
.set(AMEDIAMETRICS_PROP_FRAMECOUNT, (int32_t)mFrameCount)
.set(AMEDIAMETRICS_PROP_SAMPLERATE, (int32_t)mSampleRate)
// the following are NOT immutable
.set(AMEDIAMETRICS_PROP_STATE, stateToString(mActive))
.set(AMEDIAMETRICS_PROP_STATUS, (int32_t)status)
.set(AMEDIAMETRICS_PROP_SELECTEDMICDIRECTION, (int32_t)mSelectedMicDirection)
.set(AMEDIAMETRICS_PROP_SELECTEDMICFIELDDIRECTION, (double)mSelectedMicFieldDimension)
.record();
// sp<IAudioTrack> track destructor will cause releaseOutput() to be called by AudioFlinger
return logIfErrorAndReturnStatus(status, "", "");
}
status_t AudioRecord::start(AudioSystem::sync_event_t event, audio_session_t triggerSession)
{
const int64_t beginNs = systemTime();
ALOGV("%s(%d): sync event %d trigger session %d", __func__, mPortId, event, triggerSession);
AutoMutex lock(mLock);
status_t status = NO_ERROR;
mediametrics::Defer defer([&] {
mediametrics::LogItem(mMetricsId)
.set(AMEDIAMETRICS_PROP_CALLERNAME,
mCallerName.empty()
? AMEDIAMETRICS_PROP_CALLERNAME_VALUE_UNKNOWN
: mCallerName.c_str())
.set(AMEDIAMETRICS_PROP_EVENT, AMEDIAMETRICS_PROP_EVENT_VALUE_START)
.set(AMEDIAMETRICS_PROP_EXECUTIONTIMENS, (int64_t)(systemTime() - beginNs))
.set(AMEDIAMETRICS_PROP_STATE, stateToString(mActive))
.set(AMEDIAMETRICS_PROP_STATUS, (int32_t)status)
.record(); });
if (mActive) {
return status;
}
// discard data in buffer
const uint32_t framesFlushed = mProxy->flush();
mFramesReadServerOffset -= mFramesRead + framesFlushed;
mFramesRead = 0;
mProxy->clearTimestamp(); // timestamp is invalid until next server push
mPreviousTimestamp.clear();
mTimestampRetrogradePositionReported = false;
mTimestampRetrogradeTimeReported = false;
// reset current position as seen by client to 0
mProxy->setEpoch(mProxy->getEpoch() - mProxy->getPosition());
// force refresh of remaining frames by processAudioBuffer() as last
// read before stop could be partial.
mRefreshRemaining = true;
mNewPosition = mProxy->getPosition() + mUpdatePeriod;
int32_t flags = android_atomic_acquire_load(&mCblk->mFlags);
// we reactivate markers (mMarkerPosition != 0) as the position is reset to 0.
// This is legacy behavior. This is not done in stop() to avoid a race condition
// where the last marker event is issued twice.
mMarkerReached = false;
// mActive is checked by restoreRecord_l
mActive = true;
if (!(flags & CBLK_INVALID)) {
status = statusTFromBinderStatus(mAudioRecord->start(event, triggerSession));
if (status == DEAD_OBJECT) {
flags |= CBLK_INVALID;
}
}
if (flags & CBLK_INVALID) {
status = restoreRecord_l("start");
}
// Call these directly because we are already holding the lock.
mAudioRecord->setPreferredMicrophoneDirection(mSelectedMicDirection);
mAudioRecord->setPreferredMicrophoneFieldDimension(mSelectedMicFieldDimension);
if (status != NO_ERROR) {
mActive = false;
ALOGE("%s(%d): status %d", __func__, mPortId, status);
mMediaMetrics.markError(status, __FUNCTION__);
} else {
mTracker->recordingStarted(); // 記錄啟動事件,這樣可以被client感知到有應用開啟採集了
sp<AudioRecordThread> t = mAudioRecordThread; // 驅動採集回調線程
if (t != 0) {
t->resume();
} else {
mPreviousPriority = getpriority(PRIO_PROCESS, 0);
get_sched_policy(0, &mPreviousSchedulingGroup);
androidSetThreadPriority(0, ANDROID_PRIORITY_AUDIO);
}
// we've successfully started, log that time
mMediaMetrics.logStart(systemTime());
}
return status;
}
// frameworks/av/media/libaudioclient/AudioRecord.cpp
ssize_t AudioRecord::read(void* buffer, size_t userSize, bool blocking)
{
if (mTransfer != TRANSFER_SYNC) {
return INVALID_OPERATION;
}
if (ssize_t(userSize) < 0 || (buffer == NULL && userSize != 0)) {
// Validation. user is most-likely passing an error code, and it would
// make the return value ambiguous (actualSize vs error).
ALOGE("%s(%d) (buffer=%p, size=%zu (%zu)",
__func__, mPortId, buffer, userSize, userSize);
return BAD_VALUE;
}
ssize_t read = 0;
Buffer audioBuffer;
while (userSize >= mFrameSize) {
audioBuffer.frameCount = userSize / mFrameSize;
status_t err = obtainBuffer(&audioBuffer,
blocking ? &ClientProxy::kForever : &ClientProxy::kNonBlocking); //獲取共享內存數據
if (read > 0) {
break;
}
if (err == TIMED_OUT || err == -EINTR) {
err = WOULD_BLOCK;
}
return ssize_t(err);
}
size_t bytesRead = audioBuffer.frameCount * mFrameSize;
if (audio_is_linear_pcm(mFormat)) {
memcpy_by_audio_format(buffer, mFormat, audioBuffer.raw, mServerConfig.format,
audioBuffer.mSize / mServerSampleSize);
} else {
memcpy(buffer, audioBuffer.raw, audioBuffer.mSize);
}
buffer = ((char *) buffer) + bytesRead;
userSize -= bytesRead;
read += bytesRead;
releaseBuffer(&audioBuffer);
}
if (read > 0) {
mFramesRead += read / mFrameSize;
// mFramesReadTime = systemTime(SYSTEM_TIME_MONOTONIC); // not provided at this time.
}
return read;
}
status_t AudioRecord::obtainBuffer(Buffer* audioBuffer, const struct timespec *requested,
struct timespec *elapsed, size_t *nonContig)
{
// previous and new IAudioRecord sequence numbers are used to detect track re-creation
uint32_t oldSequence = 0;
Proxy::Buffer buffer;
status_t status = NO_ERROR;
static const int32_t kMaxTries = 5;
int32_t tryCounter = kMaxTries;
do {
// obtainBuffer() is called with mutex unlocked, so keep extra references to these fields to
// keep them from going away if another thread re-creates the track during obtainBuffer()
sp<AudioRecordClientProxy> proxy;
sp<IMemory> iMem;
sp<IMemory> bufferMem;
{
// start of lock scope
AutoMutex lock(mLock);
uint32_t newSequence = mSequence;
// did previous obtainBuffer() fail due to media server death or voluntary invalidation?
if (status == DEAD_OBJECT) {
// re-create track, unless someone else has already done so
if (newSequence == oldSequence) {
if (!audio_is_linear_pcm(mFormat)) {
// If compressed capture, don't attempt to restore the track.
// Return a DEAD_OBJECT error and let the caller recreate.
tryCounter = 0;
} else {
status = restoreRecord_l("obtainBuffer");
}
if (status != NO_ERROR) {
buffer.mFrameCount = 0;
buffer.mRaw = NULL;
buffer.mNonContig = 0;
break;
}
}
}
oldSequence = newSequence;
// Keep the extra references
proxy = mProxy;
iMem = mCblkMemory;
bufferMem = mBufferMemory;
// Non-blocking if track is stopped
if (!mActive) {
requested = &ClientProxy::kNonBlocking;
}
} // end of lock scope
buffer.mFrameCount = audioBuffer->frameCount;
// FIXME starts the requested timeout and elapsed over from scratch
status = proxy->obtainBuffer(&buffer, requested, elapsed);
} while ((status == DEAD_OBJECT) && (tryCounter-- > 0));
audioBuffer->frameCount = buffer.mFrameCount;
audioBuffer->mSize = buffer.mFrameCount * mServerFrameSize;
audioBuffer->raw = buffer.mRaw;
audioBuffer->sequence = oldSequence;
if (nonContig != NULL) {
*nonContig = buffer.mNonContig;
}
return status;
}
// frameworks/av/media/libaudioclient/AudioTrackShared.cpp
status_t ClientProxy::obtainBuffer(Buffer* buffer, const struct timespec *requested,
struct timespec *elapsed)
{
LOG_ALWAYS_FATAL_IF(buffer == NULL || buffer->mFrameCount == 0,
"%s: null or zero frame buffer, buffer:%p", __func__, buffer);
struct timespec total; // total elapsed time spent waiting
total.tv_sec = 0;
total.tv_nsec = 0;
bool measure = elapsed != NULL; // whether to measure total elapsed time spent waiting
status_t status;
enum {
TIMEOUT_ZERO, // requested == NULL || *requested == 0
TIMEOUT_INFINITE, // *requested == infinity
TIMEOUT_FINITE, // 0 < *requested < infinity
TIMEOUT_CONTINUE, // additional chances after TIMEOUT_FINITE
} timeout;
if (requested == NULL) {
timeout = TIMEOUT_ZERO;
} else if (requested->tv_sec == 0 && requested->tv_nsec == 0) {
timeout = TIMEOUT_ZERO;
} else if (requested->tv_sec == INT_MAX) {
timeout = TIMEOUT_INFINITE;
} else {
timeout = TIMEOUT_FINITE;
if (requested->tv_sec > 0 || requested->tv_nsec >= MEASURE_NS) {
measure = true;
}
}
struct timespec before;
bool beforeIsValid = false;
audio_track_cblk_t* cblk = mCblk;
bool ignoreInitialPendingInterrupt = true;
// check for shared memory corruption
if (mIsShutdown) {
status = NO_INIT;
goto end;
}
for (;;) {
int32_t flags = android_atomic_and(~CBLK_INTERRUPT, &cblk->mFlags);
// check for track invalidation by server, or server death detection
if (flags & CBLK_INVALID) {
ALOGV("Track invalidated");
status = DEAD_OBJECT;
goto end;
}
if (flags & CBLK_DISABLED) {
ALOGV("Track disabled");
status = NOT_ENOUGH_DATA;
goto end;
}
// check for obtainBuffer interrupted by client
if (!ignoreInitialPendingInterrupt && (flags & CBLK_INTERRUPT)) {
ALOGV("obtainBuffer() interrupted by client");
status = -EINTR;
goto end;
}
ignoreInitialPendingInterrupt = false;
// compute number of frames available to write (AudioTrack) or read (AudioRecord)
int32_t front; // 環形buffer 頭
int32_t rear; // 環形buffer 尾
if (mIsOut) {
// The barrier following the read of mFront is probably redundant.
// We're about to perform a conditional branch based on 'filled',
// which will force the processor to observe the read of mFront
// prior to allowing data writes starting at mRaw.
// However, the processor may support speculative execution,
// and be unable to undo speculative writes into shared memory.
// The barrier will prevent such speculative execution.
front = android_atomic_acquire_load(&cblk->u.mStreaming.mFront);
rear = cblk->u.mStreaming.mRear;
} else {
// On the other hand, this barrier is required.
rear = android_atomic_acquire_load(&cblk->u.mStreaming.mRear);
front = cblk->u.mStreaming.mFront;
}
// write to rear, read from front
ssize_t filled = audio_utils::safe_sub_overflow(rear, front);
// pipe should not be overfull
if (!(0 <= filled && (size_t) filled <= mFrameCount)) {
if (mIsOut) {
ALOGE("Shared memory control block is corrupt (filled=%zd, mFrameCount=%zu); "
"shutting down", filled, mFrameCount);
mIsShutdown = true;
status = NO_INIT;
goto end;
}
// for input, sync up on overrun
filled = 0;
cblk->u.mStreaming.mFront = rear;
(void) android_atomic_or(CBLK_OVERRUN, &cblk->mFlags);
}
// Don't allow filling pipe beyond the user settable size.
// The calculation for avail can go negative if the buffer size
// is suddenly dropped below the amount already in the buffer.
// So use a signed calculation to prevent a numeric overflow abort.
ssize_t adjustableSize = (ssize_t) getBufferSizeInFrames();
ssize_t avail = (mIsOut) ? adjustableSize - filled : filled;
if (avail < 0) {
avail = 0;
} else if (avail > 0) {
// 'avail' may be non-contiguous, so return only the first contiguous chunk
size_t part1;
if (mIsOut) {
rear &= mFrameCountP2 - 1;
part1 = mFrameCountP2 - rear;
} else {
front &= mFrameCountP2 - 1;
part1 = mFrameCountP2 - front;
}
if (part1 > (size_t)avail) {
part1 = avail;
}
if (part1 > buffer->mFrameCount) {
part1 = buffer->mFrameCount;
}
buffer->mFrameCount = part1;
buffer->mRaw = part1 > 0 ?
&((char *) mBuffers)[(mIsOut ? rear : front) * mFrameSize] : NULL; // 修改地址,這樣就不用拷貝了
buffer->mNonContig = avail - part1;
mUnreleased = part1;
status = NO_ERROR;
break;
}
struct timespec remaining;
const struct timespec *ts;
switch (timeout) {
case TIMEOUT_ZERO:
status = WOULD_BLOCK;
goto end;
case TIMEOUT_INFINITE:
ts = NULL;
break;
case TIMEOUT_FINITE:
timeout = TIMEOUT_CONTINUE;
if (MAX_SEC == 0) {
ts = requested;
break;
}
FALLTHROUGH_INTENDED;
case TIMEOUT_CONTINUE:
// FIXME we do not retry if requested < 10ms? needs documentation on this state machine
if (!measure || requested->tv_sec < total.tv_sec ||
(requested->tv_sec == total.tv_sec && requested->tv_nsec <= total.tv_nsec)) {
status = TIMED_OUT;
goto end;
}
remaining.tv_sec = requested->tv_sec - total.tv_sec;
if ((remaining.tv_nsec = requested->tv_nsec - total.tv_nsec) < 0) {
remaining.tv_nsec += 1000000000;
remaining.tv_sec++;
}
if (0 < MAX_SEC && MAX_SEC < remaining.tv_sec) {
remaining.tv_sec = MAX_SEC;
remaining.tv_nsec = 0;
}
ts = &remaining;
break;
default:
LOG_ALWAYS_FATAL("obtainBuffer() timeout=%d", timeout);
ts = NULL;
break;
}
int32_t old = android_atomic_and(~CBLK_FUTEX_WAKE, &cblk->mFutex);
if (!(old & CBLK_FUTEX_WAKE)) {
if (measure && !beforeIsValid) {
clock_gettime(CLOCK_MONOTONIC, &before);
beforeIsValid = true;
}
errno = 0;
(void) syscall(__NR_futex, &cblk->mFutex,
mClientInServer ? FUTEX_WAIT_PRIVATE : FUTEX_WAIT, old & ~CBLK_FUTEX_WAKE, ts);
status_t error = errno; // clock_gettime can affect errno
// update total elapsed time spent waiting
if (measure) {
struct timespec after;
clock_gettime(CLOCK_MONOTONIC, &after);
total.tv_sec += after.tv_sec - before.tv_sec;
// Use auto instead of long to avoid the google-runtime-int warning.
auto deltaNs = after.tv_nsec - before.tv_nsec;
if (deltaNs < 0) {
deltaNs += 1000000000;
total.tv_sec--;
}
if ((total.tv_nsec += deltaNs) >= 1000000000) {
total.tv_nsec -= 1000000000;
total.tv_sec++;
}
before = after;
beforeIsValid = true;
}
switch (error) {
case 0: // normal wakeup by server, or by binderDied()
case EWOULDBLOCK: // benign race condition with server
case EINTR: // wait was interrupted by signal or other spurious wakeup
case ETIMEDOUT: // time-out expired
// FIXME these error/non-0 status are being dropped
break;
default:
status = error;
ALOGE("%s unexpected error %s", __func__, strerror(status));
goto end;
}
}
}
end:
if (status != NO_ERROR) {
buffer->mFrameCount = 0;
buffer->mRaw = NULL;
buffer->mNonContig = 0;
mUnreleased = 0;
}
if (elapsed != NULL) {
*elapsed = total;
}
if (requested == NULL) {
requested = &kNonBlocking;
}
if (measure) {
ALOGV("requested %ld.%03ld elapsed %ld.%03ld",
requested->tv_sec, requested->tv_nsec / 1000000,
total.tv_sec, total.tv_nsec / 1000000);
}
return status;
}
本質上就是共享了一塊內存,這塊內存有兩部分,一部分記錄內存使用信息,比如環形 buffer 頭和尾的位置等,一部分記錄真正的數據,由於這塊內存都是共享的,所以環形 buffer 使用情況信息也是共享的,這樣就可以直接操作了。