banner
fwrite

fwrite

好好生活
twitter
github
email

AudioRecord

AudioRecord#

初始化#

public AudioRecord(int audioSource, int sampleRateInHz, int channelConfig, int audioFormat,int bufferSizeInBytes)

关于采样率,除了 44100Hz 所有设备上都保证支持,其他频率就不一定支持了,这时候可以通过使用参数 SAMPLE_RATE_UNSPECIFIED,让设备自己去决策使用的采样率。 channelConfig 是采样通道,当前所有设备都保证支持单声道,多声道就不一定支持了。 bufferSizeInBytes 是 buffer 大小,用来存采集的数据,一般是直接用接口 getMinBufferSize 的返回值作为 buffer 大小。

getMinBufferSize#

//frameworks/base/media/java/android/media/AudioRecord.java    
static public int getMinBufferSize(int sampleRateInHz, int channelConfig, int audioFormat) {
        int channelCount = 0;
        switch (channelConfig) {
        case AudioFormat.CHANNEL_IN_DEFAULT: // AudioFormat.CHANNEL_CONFIGURATION_DEFAULT
        case AudioFormat.CHANNEL_IN_MONO:
        case AudioFormat.CHANNEL_CONFIGURATION_MONO:
            channelCount = 1;
            break;
        case AudioFormat.CHANNEL_IN_STEREO:
        case AudioFormat.CHANNEL_CONFIGURATION_STEREO:
        case (AudioFormat.CHANNEL_IN_FRONT | AudioFormat.CHANNEL_IN_BACK):
            channelCount = 2;
            break;
        case AudioFormat.CHANNEL_INVALID:
        default:
            loge("getMinBufferSize(): Invalid channel configuration.");
            return ERROR_BAD_VALUE;
        }

        int size = native_get_min_buff_size(sampleRateInHz, channelCount, audioFormat);
        if (size == 0) {
            return ERROR_BAD_VALUE;
        }
        else if (size == -1) {
            return ERROR;
        }
        else {
            return size;
        }
   }
//frameworks/base/core/jni/android_media_AudioRecord.cpp
static jint android_media_AudioRecord_get_min_buff_size(JNIEnv *env,  jobject thiz,
    jint sampleRateInHertz, jint channelCount, jint audioFormat) {

    ALOGV(">> android_media_AudioRecord_get_min_buff_size(%d, %d, %d)",
          sampleRateInHertz, channelCount, audioFormat);

    size_t frameCount = 0;
    audio_format_t format = audioFormatToNative(audioFormat); // 把java 的格式转成native的
    status_t result = AudioRecord::getMinFrameCount(&frameCount, // 获取最小帧数
            sampleRateInHertz,
            format,
            audio_channel_in_mask_from_count(channelCount));

    if (result == BAD_VALUE) {
        return 0;
    }
    if (result != NO_ERROR) {
        return -1;
    }
    return frameCount * audio_bytes_per_frame(channelCount, format);
}
//frameworks/av/media/libaudioclient/AudioRecord.cpp
status_t AudioRecord::getMinFrameCount(
        size_t* frameCount,
        uint32_t sampleRate,
        audio_format_t format,
        audio_channel_mask_t channelMask)
{
    if (frameCount == NULL) {
        return BAD_VALUE;
    }

    size_t size;
    status_t status = AudioSystem::getInputBufferSize(sampleRate, format, channelMask, &size);
    if (status != NO_ERROR) {
        ALOGE("%s(): AudioSystem could not query the input buffer size for"
              " sampleRate %u, format %#x, channelMask %#x; status %d",
               __func__, sampleRate, format, channelMask, status);
        return status;
    }

    // We double the size of input buffer for ping pong use of record buffer.
    const auto frameSize = audio_bytes_per_frame(
            audio_channel_count_from_in_mask(channelMask), format);
    if (frameSize == 0 || ((*frameCount = (size * 2) / frameSize) == 0)) {
        ALOGE("%s(): Unsupported configuration: sampleRate %u, format %#x, channelMask %#x",
                __func__, sampleRate, format, channelMask);
        return BAD_VALUE;
    }

    return NO_ERROR;
}
//frameworks/av/media/libaudioclient/AudioSystem.cpp
status_t AudioSystem::getInputBufferSize(uint32_t sampleRate, audio_format_t format,
                                         audio_channel_mask_t channelMask, size_t* buffSize) {
    const sp<AudioFlingerClient> afc = getAudioFlingerClient();
    if (afc == 0) {
        return NO_INIT;
    }
    return afc->getInputBufferSize(sampleRate, format, channelMask, buffSize);
}

status_t AudioSystem::AudioFlingerClient::getInputBufferSize(
        uint32_t sampleRate, audio_format_t format,
        audio_channel_mask_t channelMask, size_t* buffSize) {
    const sp<IAudioFlinger> af = get_audio_flinger();
    if (af == 0) {
        return PERMISSION_DENIED;
    }
    std::lock_guard _l(mMutex);
    // Do we have a stale mInBuffSize or are we requesting the input buffer size for new values
    if ((mInBuffSize == 0) || (sampleRate != mInSamplingRate) || (format != mInFormat)
        || (channelMask != mInChannelMask)) {
        size_t inBuffSize = af->getInputBufferSize(sampleRate, format, channelMask);
        if (inBuffSize == 0) {
            ALOGE("AudioSystem::getInputBufferSize failed sampleRate %d format %#x channelMask %#x",
                  sampleRate, format, channelMask);
            return BAD_VALUE;
        }
        // A benign race is possible here: we could overwrite a fresher cache entry
        // save the request params
        mInSamplingRate = sampleRate;
        mInFormat = format;
        mInChannelMask = channelMask;

        mInBuffSize = inBuffSize;
    }

    *buffSize = mInBuffSize;

    return NO_ERROR;
}

//frameworks/av/services/audioflinger/AudioFlinger.cpp
size_t AudioFlinger::getInputBufferSize(uint32_t sampleRate, audio_format_t format,
        audio_channel_mask_t channelMask) const
{
    status_t ret = initCheck();
    if (ret != NO_ERROR) {
        return 0;
    }
    if ((sampleRate == 0) ||
            !audio_is_valid_format(format) ||
            !audio_is_input_channel(channelMask)) {
        return 0;
    }

    audio_utils::lock_guard lock(hardwareMutex());
    if (mPrimaryHardwareDev == nullptr) {
        return 0;
    }
    if (mInputBufferSizeOrderedDevs.empty()) {
        return 0;
    }
    mHardwareStatus = AUDIO_HW_GET_INPUT_BUFFER_SIZE;

    std::vector<audio_channel_mask_t> channelMasks = {channelMask};
    if (channelMask != AUDIO_CHANNEL_IN_MONO) {
        channelMasks.push_back(AUDIO_CHANNEL_IN_MONO);
    }
    if (channelMask != AUDIO_CHANNEL_IN_STEREO) {
        channelMasks.push_back(AUDIO_CHANNEL_IN_STEREO);
    }

    std::vector<audio_format_t> formats = {format};
    if (format != AUDIO_FORMAT_PCM_16_BIT) {
        // For compressed format, buffer size may be queried using PCM. Allow this for compatibility
        // in cases the primary hw dev does not support the format.
        // TODO: replace with a table of formats and nominal buffer sizes (based on nominal bitrate
        // and codec frame size).
        formats.push_back(AUDIO_FORMAT_PCM_16_BIT);
    }

    std::vector<uint32_t> sampleRates = {sampleRate};
    static const uint32_t SR_44100 = 44100;
    static const uint32_t SR_48000 = 48000;
    if (sampleRate != SR_48000) {
        sampleRates.push_back(SR_48000);
    }
    if (sampleRate != SR_44100) {
        sampleRates.push_back(SR_44100);
    }

    mHardwareStatus = AUDIO_HW_IDLE;

    auto getInputBufferSize = [](const sp<DeviceHalInterface>& dev, audio_config_t config,
                                 size_t* bytes) -> status_t {
        if (!dev) {
            return BAD_VALUE;
        }
        status_t result = dev->getInputBufferSize(&config, bytes);
        if (result == BAD_VALUE) {
            // Retry with the config suggested by the HAL.
            result = dev->getInputBufferSize(&config, bytes);
        }
        if (result != OK || *bytes == 0) {
            return BAD_VALUE;
        }
        return result;
    };

    // Change parameters of the configuration each iteration until we find a
    // configuration that the device will support, or HAL suggests what it supports.
    audio_config_t config = AUDIO_CONFIG_INITIALIZER;
    for (auto testChannelMask : channelMasks) {
        config.channel_mask = testChannelMask;
        for (auto testFormat : formats) {
            config.format = testFormat;
            for (auto testSampleRate : sampleRates) {
                config.sample_rate = testSampleRate;

                size_t bytes = 0;
                ret = BAD_VALUE;
                for (const AudioHwDevice* dev : mInputBufferSizeOrderedDevs) {
                    ret = getInputBufferSize(dev->hwDevice(), config, &bytes);
                    if (ret == OK) {
                        break;
                    }
                }
                if (ret == BAD_VALUE) continue;

                if (config.sample_rate != sampleRate || config.channel_mask != channelMask ||
                    config.format != format) {
                    uint32_t dstChannelCount = audio_channel_count_from_in_mask(channelMask);
                    uint32_t srcChannelCount =
                        audio_channel_count_from_in_mask(config.channel_mask);
                    size_t srcFrames =
                        bytes / audio_bytes_per_frame(srcChannelCount, config.format);
                    size_t dstFrames = destinationFramesPossible(
                        srcFrames, config.sample_rate, sampleRate);
                    bytes = dstFrames * audio_bytes_per_frame(dstChannelCount, format);
                }
                return bytes;
            }
        }
    }

    ALOGW("getInputBufferSize failed with minimum buffer size sampleRate %u, "
              "format %#x, channelMask %#x",sampleRate, format, channelMask);
    return 0;
}

AudioRecord 初始化#

//frameworks/base/media/java/android/media/AudioRecord.java 
private AudioRecord(AudioAttributes attributes, AudioFormat format, int bufferSizeInBytes,
            int sessionId, @Nullable Context context,
            int maxSharedAudioHistoryMs, int halInputFlags) throws IllegalArgumentException {
        mRecordingState = RECORDSTATE_STOPPED;
        mHalInputFlags = halInputFlags;
        if (attributes == null) {
            throw new IllegalArgumentException("Illegal null AudioAttributes");
        }
        if (format == null) {
            throw new IllegalArgumentException("Illegal null AudioFormat");
        }

        // remember which looper is associated with the AudioRecord instanciation
        if ((mInitializationLooper = Looper.myLooper()) == null) {
            mInitializationLooper = Looper.getMainLooper();
        }

        // is this AudioRecord using REMOTE_SUBMIX at full volume?
        if (attributes.getCapturePreset() == MediaRecorder.AudioSource.REMOTE_SUBMIX) {
            final AudioAttributes.Builder ab =
                    new AudioAttributes.Builder(attributes);
            HashSet<String> filteredTags = new HashSet<String>();
            final Iterator<String> tagsIter = attributes.getTags().iterator();
            while (tagsIter.hasNext()) {
                final String tag = tagsIter.next();
                if (tag.equalsIgnoreCase(SUBMIX_FIXED_VOLUME)) {
                    mIsSubmixFullVolume = true;
                    Log.v(TAG, "Will record from REMOTE_SUBMIX at full fixed volume");
                } else { // SUBMIX_FIXED_VOLUME: is not to be propagated to the native layers
                    filteredTags.add(tag);
                }
            }
            ab.replaceTags(filteredTags);
            attributes = ab.build();
        }

        mAudioAttributes = attributes;

        int rate = format.getSampleRate();
        if (rate == AudioFormat.SAMPLE_RATE_UNSPECIFIED) {
            rate = 0;
        }

        int encoding = AudioFormat.ENCODING_DEFAULT;
        if ((format.getPropertySetMask() & AudioFormat.AUDIO_FORMAT_HAS_PROPERTY_ENCODING) != 0)
        {
            encoding = format.getEncoding();
        }

        audioParamCheck(mAudioAttributes.getCapturePreset(), rate, encoding);

        if ((format.getPropertySetMask()
                & AudioFormat.AUDIO_FORMAT_HAS_PROPERTY_CHANNEL_INDEX_MASK) != 0) {
            mChannelIndexMask = format.getChannelIndexMask();
            mChannelCount = format.getChannelCount();
        }
        if ((format.getPropertySetMask()
                & AudioFormat.AUDIO_FORMAT_HAS_PROPERTY_CHANNEL_MASK) != 0) {
            mChannelMask = getChannelMaskFromLegacyConfig(format.getChannelMask(), false);
            mChannelCount = format.getChannelCount();
        } else if (mChannelIndexMask == 0) {
            mChannelMask = getChannelMaskFromLegacyConfig(AudioFormat.CHANNEL_IN_DEFAULT, false);
            mChannelCount =  AudioFormat.channelCountFromInChannelMask(mChannelMask);
        }

        audioBuffSizeCheck(bufferSizeInBytes);

        AttributionSource attributionSource = (context != null)
                ? context.getAttributionSource() : AttributionSource.myAttributionSource();
        if (attributionSource.getPackageName() == null) {
            // Command line utility
            attributionSource = attributionSource.withPackageName("uid:" + Binder.getCallingUid());
        }

        int[] sampleRate = new int[] {mSampleRate};
        int[] session = new int[1];
        session[0] = resolveSessionId(context, sessionId);

        //TODO: update native initialization when information about hardware init failure
        //      due to capture device already open is available.
        try (ScopedParcelState attributionSourceState = attributionSource.asScopedParcelState()) {
            int initResult = native_setup(new WeakReference<AudioRecord>(this), mAudioAttributes,
                    sampleRate, mChannelMask, mChannelIndexMask, mAudioFormat,
                    mNativeBufferSizeInBytes, session, attributionSourceState.getParcel(),
                    0 /*nativeRecordInJavaObj*/, maxSharedAudioHistoryMs, mHalInputFlags);
            if (initResult != SUCCESS) {
                loge("Error code " + initResult + " when initializing native AudioRecord object.");
                return; // with mState == STATE_UNINITIALIZED
            }
        }

        mSampleRate = sampleRate[0];
        mSessionId = session[0];

        mState = STATE_INITIALIZED;
    }
//frameworks/base/core/jni/android_media_AudioRecord.cpp
static jint android_media_AudioRecord_setup(JNIEnv *env, jobject thiz, jobject weak_this,
                                            jobject jaa, jintArray jSampleRate, jint channelMask,
                                            jint channelIndexMask, jint audioFormat,
                                            jint buffSizeInBytes, jintArray jSession,
                                            jobject jAttributionSource, jlong nativeRecordInJavaObj,
                                            jint sharedAudioHistoryMs,
                                            jint halFlags) {
    //ALOGV(">> Entering android_media_AudioRecord_setup");
    //ALOGV("sampleRate=%d, audioFormat=%d, channel mask=%x, buffSizeInBytes=%d "
    //     "nativeRecordInJavaObj=0x%llX",
    //     sampleRateInHertz, audioFormat, channelMask, buffSizeInBytes, nativeRecordInJavaObj);
    audio_channel_mask_t localChanMask = inChannelMaskToNative(channelMask);

    if (jSession == NULL) {
        ALOGE("Error creating AudioRecord: invalid session ID pointer");
        return (jint) AUDIO_JAVA_ERROR;
    }

    jint* nSession = env->GetIntArrayElements(jSession, nullptr /* isCopy */);
    if (nSession == NULL) {
        ALOGE("Error creating AudioRecord: Error retrieving session id pointer");
        return (jint) AUDIO_JAVA_ERROR;
    }
    audio_session_t sessionId = (audio_session_t) nSession[0];
    env->ReleaseIntArrayElements(jSession, nSession, 0 /* mode */);
    nSession = NULL;

    sp<AudioRecord> lpRecorder;
    sp<AudioRecordJNIStorage> callbackData;
    jclass clazz = env->GetObjectClass(thiz);
    if (clazz == NULL) {
        ALOGE("Can't find %s when setting up callback.", kClassPathName);
        return (jint) AUDIORECORD_ERROR_SETUP_NATIVEINITFAILED;
    }

    // if we pass in an existing *Native* AudioRecord, we don't need to create/initialize one.
    if (nativeRecordInJavaObj == 0) { // native的audio record还没创建,那么就需要创建
        if (jaa == 0) {
            ALOGE("Error creating AudioRecord: invalid audio attributes");
            return (jint) AUDIO_JAVA_ERROR;
        }

        if (jSampleRate == 0) {
            ALOGE("Error creating AudioRecord: invalid sample rates");
            return (jint) AUDIO_JAVA_ERROR;
        }
        jint elements[1];
        env->GetIntArrayRegion(jSampleRate, 0, 1, elements);
        int sampleRateInHertz = elements[0];

        // channel index mask takes priority over channel position masks.
        if (channelIndexMask) {
            // Java channel index masks need the representation bits set.
            localChanMask = audio_channel_mask_from_representation_and_bits(
                    AUDIO_CHANNEL_REPRESENTATION_INDEX,
                    channelIndexMask);
        }
        // Java channel position masks map directly to the native definition

        if (!audio_is_input_channel(localChanMask)) {
            ALOGE("Error creating AudioRecord: channel mask %#x is not valid.", localChanMask);
            return (jint) AUDIORECORD_ERROR_SETUP_INVALIDCHANNELMASK;
        }
        uint32_t channelCount = audio_channel_count_from_in_mask(localChanMask);

        // compare the format against the Java constants
        audio_format_t format = audioFormatToNative(audioFormat);
        if (format == AUDIO_FORMAT_INVALID) {
            ALOGE("Error creating AudioRecord: unsupported audio format %d.", audioFormat);
            return (jint) AUDIORECORD_ERROR_SETUP_INVALIDFORMAT;
        }

        if (buffSizeInBytes == 0) {
            ALOGE("Error creating AudioRecord: frameCount is 0.");
            return (jint) AUDIORECORD_ERROR_SETUP_ZEROFRAMECOUNT;
        }
        size_t frameCount = buffSizeInBytes / audio_bytes_per_frame(channelCount, format);

        // create an uninitialized AudioRecord object
        Parcel* parcel = parcelForJavaObject(env, jAttributionSource);
        android::content::AttributionSourceState attributionSource;
        attributionSource.readFromParcel(parcel);

        lpRecorder = new AudioRecord(attributionSource);

        // read the AudioAttributes values
        auto paa = JNIAudioAttributeHelper::makeUnique();
        jint jStatus = JNIAudioAttributeHelper::nativeFromJava(env, jaa, paa.get());
        if (jStatus != (jint)AUDIO_JAVA_SUCCESS) {
            return jStatus;
        }
        ALOGV("AudioRecord_setup for source=%d tags=%s flags=%08x", paa->source, paa->tags, paa->flags);

        const auto flags = static_cast<audio_input_flags_t>(halFlags);
        // create the callback information:
        // this data will be passed with every AudioRecord callback
        // we use a weak reference so the AudioRecord object can be garbage collected.
        callbackData = sp<AudioRecordJNIStorage>::make(clazz, weak_this);

        const status_t status =
                lpRecorder->set(paa->source, sampleRateInHertz, // set 配置
                                format, // word length, PCM
                                localChanMask, frameCount,
                                callbackData,   // callback
                                0,                // notificationFrames,
                                true,             // threadCanCallJava
                                sessionId, AudioRecord::TRANSFER_DEFAULT, flags, -1,
                                -1, // default uid, pid
                                paa.get(), AUDIO_PORT_HANDLE_NONE, MIC_DIRECTION_UNSPECIFIED,
                                MIC_FIELD_DIMENSION_DEFAULT, sharedAudioHistoryMs);

        if (status != NO_ERROR) {
            ALOGE("Error creating AudioRecord instance: initialization check failed with status %d.",
                    status);
            goto native_init_failure;
        }
        // Set caller name so it can be logged in destructor.
        // MediaMetricsConstants.h: AMEDIAMETRICS_PROP_CALLERNAME_VALUE_JAVA
        lpRecorder->setCallerName("java");
    } else { // end if nativeRecordInJavaObj == 0) // 已经创建了native的AudioRecord
        lpRecorder = (AudioRecord*)nativeRecordInJavaObj;
        // TODO: We need to find out which members of the Java AudioRecord might need to be
        // initialized from the Native AudioRecord
        // these are directly returned from getters:
        //  mSampleRate
        //  mRecordSource
        //  mAudioFormat
        //  mChannelMask
        //  mChannelCount
        //  mState (?)
        //  mRecordingState (?)
        //  mPreferredDevice

        // create the callback information:
        // this data will be passed with every AudioRecord callback
        // This next line makes little sense
        // callbackData = sp<AudioRecordJNIStorage>::make(clazz, weak_this);
    }

    nSession = env->GetIntArrayElements(jSession, nullptr /* isCopy */);
    if (nSession == NULL) {
        ALOGE("Error creating AudioRecord: Error retrieving session id pointer");
        goto native_init_failure;
    }
    // read the audio session ID back from AudioRecord in case a new session was created during set()
    nSession[0] = lpRecorder->getSessionId();
    env->ReleaseIntArrayElements(jSession, nSession, 0 /* mode */);
    nSession = NULL;

    {
        const jint elements[1] = { (jint) lpRecorder->getSampleRate() };
        env->SetIntArrayRegion(jSampleRate, 0, 1, elements);
    }

    // save our newly created C++ AudioRecord in the "nativeRecorderInJavaObj" field
    // of the Java object
    // 关联Java对象
    setFieldSp(env, thiz, lpRecorder, javaAudioRecordFields.nativeRecorderInJavaObj);

    // save our newly created callback information in the "jniData" field
    // of the Java object (in mNativeJNIDataHandle) so we can free the memory in finalize()
    setFieldSp(env, thiz, callbackData, javaAudioRecordFields.jniData);

    return (jint) AUDIO_JAVA_SUCCESS;

    // failure:
native_init_failure:
    setFieldSp(env, thiz, sp<AudioRecord>{}, javaAudioRecordFields.nativeRecorderInJavaObj);
    setFieldSp(env, thiz, sp<AudioRecordJNIStorage>{}, javaAudioRecordFields.jniData);

    // lpRecorder goes out of scope, so reference count drops to zero
    return (jint) AUDIORECORD_ERROR_SETUP_NATIVEINITFAILED;
}
//frameworks/av/media/libaudioclient/AudioRecord.cpp
status_t AudioRecord::set(
        audio_source_t inputSource,
        uint32_t sampleRate,
        audio_format_t format,
        audio_channel_mask_t channelMask,
        size_t frameCount,
        const wp<IAudioRecordCallback>& callback,
        uint32_t notificationFrames,
        bool threadCanCallJava,
        audio_session_t sessionId,
        transfer_type transferType,
        audio_input_flags_t flags,
        uid_t uid,
        pid_t pid,
        const audio_attributes_t* pAttributes,
        audio_port_handle_t selectedDeviceId,
        audio_microphone_direction_t selectedMicDirection,
        float microphoneFieldDimension,
        int32_t maxSharedAudioHistoryMs)
{
    status_t status = NO_ERROR;
    LOG_ALWAYS_FATAL_IF(mInitialized, "%s: should not be called twice", __func__);
    mInitialized = true;
    // Note mPortId is not valid until the track is created, so omit mPortId in ALOG for set.
    ALOGV("%s(): inputSource %d, sampleRate %u, format %#x, channelMask %#x, frameCount %zu, "
          "notificationFrames %u, sessionId %d, transferType %d, flags %#x, attributionSource %s"
          "uid %d, pid %d",
          __func__,
          inputSource, sampleRate, format, channelMask, frameCount, notificationFrames,
          sessionId, transferType, flags, mClientAttributionSource.toString().c_str(), uid, pid);

    // TODO b/182392553: refactor or remove
    pid_t callingPid = IPCThreadState::self()->getCallingPid();
    pid_t myPid = getpid();
    pid_t adjPid = pid;
    if (pid == -1 || (callingPid != myPid)) {
        adjPid = callingPid;
    }
    auto clientAttributionSourcePid = legacy2aidl_pid_t_int32_t(adjPid);
    if (!clientAttributionSourcePid.ok()) {
        return logIfErrorAndReturnStatus(BAD_VALUE,
                                         StringPrintf("%s: received invalid client attribution "
                                                      "source pid, pid: %d, sessionId: %d",
                                                      __func__, pid, sessionId),
                                         __func__);
    }
    mClientAttributionSource.pid = clientAttributionSourcePid.value();
    uid_t adjUid = uid;
    if (uid == -1 || (callingPid != myPid)) {
        adjUid = IPCThreadState::self()->getCallingUid();
    }
    auto clientAttributionSourceUid = legacy2aidl_uid_t_int32_t(adjUid);
    if (!clientAttributionSourceUid.ok()) {
        return logIfErrorAndReturnStatus(BAD_VALUE,
                                         StringPrintf("%s: received invalid client attribution "
                                                      "source uid, pid: %d, session id: %d",
                                                      __func__, pid, sessionId),
                                         __func__);
    }
    mClientAttributionSource.uid = clientAttributionSourceUid.value();

    mTracker.reset(new RecordingActivityTracker());

    mSelectedDeviceId = selectedDeviceId;
    mSelectedMicDirection = selectedMicDirection;
    mSelectedMicFieldDimension = microphoneFieldDimension;
    mMaxSharedAudioHistoryMs = maxSharedAudioHistoryMs;

    // Copy the state variables early so they are available for error reporting.
    if (pAttributes == nullptr) {
        mAttributes = AUDIO_ATTRIBUTES_INITIALIZER;
        mAttributes.source = inputSource;
        if (inputSource == AUDIO_SOURCE_VOICE_COMMUNICATION
                || inputSource == AUDIO_SOURCE_CAMCORDER) {
            mAttributes.flags = static_cast<audio_flags_mask_t>(
                    mAttributes.flags | AUDIO_FLAG_CAPTURE_PRIVATE);
        }
    } else {
        // stream type shouldn't be looked at, this track has audio attributes
        memcpy(&mAttributes, pAttributes, sizeof(audio_attributes_t));
        ALOGV("%s: Building AudioRecord with attributes: source=%d flags=0x%x tags=[%s]",
                __func__, mAttributes.source, mAttributes.flags, mAttributes.tags);
    }
    mSampleRate = sampleRate;
    if (format == AUDIO_FORMAT_DEFAULT) {
        format = AUDIO_FORMAT_PCM_16_BIT;
    }
    if (!audio_is_linear_pcm(format)) {
       // Compressed capture requires direct
       flags = (audio_input_flags_t) (flags | AUDIO_INPUT_FLAG_DIRECT);
       ALOGI("%s(): Format %#x is not linear pcm. Setting DIRECT, using flags %#x", __func__,
             format, flags);
    }
    mFormat = format;
    mChannelMask = channelMask;
    mSessionId = sessionId;
    ALOGV("%s: mSessionId %d", __func__, mSessionId);
    mOrigFlags = mFlags = flags;

    mTransfer = transferType;
    switch (mTransfer) { // 传入的transfer是default,其余case可以先忽略
    case TRANSFER_DEFAULT: 
        if (callback == nullptr || threadCanCallJava) {
            mTransfer = TRANSFER_SYNC;
        } else {
            mTransfer = TRANSFER_CALLBACK;
        }
        break;
    case TRANSFER_CALLBACK:
        if (callback == nullptr) {
            return logIfErrorAndReturnStatus(
                    BAD_VALUE,
                    StringPrintf("%s: Transfer type TRANSFER_CALLBACK but callback == nullptr, "
                                 "pid: %d, session id: %d",
                                 __func__, pid, sessionId),
                    __func__);
        }
        break;
    case TRANSFER_OBTAIN:
    case TRANSFER_SYNC:
        break;
    default:
        return logIfErrorAndReturnStatus(
                BAD_VALUE,
                StringPrintf("%s: Invalid transfer type %d, pid: %d, session id: %d", __func__,
                             mTransfer, pid, sessionId),
                __func__);
    }

    // invariant that mAudioRecord != 0 is true only after set() returns successfully
    if (mAudioRecord != 0) {
        return logIfErrorAndReturnStatus(
                INVALID_OPERATION,
                StringPrintf("%s: Track already in use, pid: %d, session id: %d", __func__, pid,
                             sessionId),
                __func__);
    }

    if (!audio_is_valid_format(mFormat)) {
        return logIfErrorAndReturnStatus(
                BAD_VALUE,
                StringPrintf("%s: Format %#x is not valid, pid: %d, session id: %d", __func__,
                             mFormat, pid, sessionId),
                __func__);
    }

    if (!audio_is_input_channel(mChannelMask)) {
        return logIfErrorAndReturnStatus(
                BAD_VALUE,
                StringPrintf("%s: Invalid channel mask %#x, pid: %d, session id: %d", __func__,
                             mChannelMask, pid, sessionId),
                __func__);
    }

    mChannelCount = audio_channel_count_from_in_mask(mChannelMask);
    mFrameSize = audio_bytes_per_frame(mChannelCount, mFormat);

    // mFrameCount is initialized in createRecord_l
    mReqFrameCount = frameCount;

    mNotificationFramesReq = notificationFrames;
    // mNotificationFramesAct is initialized in createRecord_l

    mCallback = callback;
    if (mCallback != nullptr) {
        mAudioRecordThread = new AudioRecordThread(*this);  // 回调线程,这样采集好数据后,可以主动回调调用方
        mAudioRecordThread->run("AudioRecord", ANDROID_PRIORITY_AUDIO);
        // thread begins in paused state, and will not reference us until start()
    }

    // create the IAudioRecord
    // 关键
    {
        AutoMutex lock(mLock);
        status = createRecord_l(0 /*epoch*/);
    }

    ALOGV("%s(%d): status %d", __func__, mPortId, status);

    if (status != NO_ERROR) {
        if (mAudioRecordThread != 0) {
            mAudioRecordThread->requestExit();   // see comment in AudioRecord.h
            mAudioRecordThread->requestExitAndWait();
            mAudioRecordThread.clear();
        }
        // bypass error message to avoid logging twice (createRecord_l logs the error).
        mStatus = status;
        return mStatus;
    }

    // TODO: add audio hardware input latency here
    mLatency = (1000LL * mFrameCount) / mSampleRate;
    mMarkerPosition = 0;
    mMarkerReached = false;
    mNewPosition = 0;
    mUpdatePeriod = 0;
    AudioSystem::acquireAudioSessionId(mSessionId, adjPid, adjUid);
    mSequence = 1;
    mObservedSequence = mSequence;
    mInOverrun = false;
    mFramesRead = 0;
    mFramesReadServerOffset = 0;

    return logIfErrorAndReturnStatus(status, "", __func__);
}
status_t AudioRecord::createRecord_l(const Modulo<uint32_t> &epoch)
{
    const int64_t beginNs = systemTime();
    const sp<IAudioFlinger>& audioFlinger = AudioSystem::get_audio_flinger();
    IAudioFlinger::CreateRecordInput input;
    IAudioFlinger::CreateRecordOutput output;
    [[maybe_unused]] audio_session_t originalSessionId;
    void *iMemPointer;
    audio_track_cblk_t* cblk;
    status_t status;
    static const int32_t kMaxCreateAttempts = 3;
    int32_t remainingAttempts = kMaxCreateAttempts;

    if (audioFlinger == 0) {
        return logIfErrorAndReturnStatus(
                NO_INIT, StringPrintf("%s(%d): Could not get audioflinger", __func__, mPortId), "");
    }

    // mFlags (not mOrigFlags) is modified depending on whether fast request is accepted.
    // After fast request is denied, we will request again if IAudioRecord is re-created.

    // Now that we have a reference to an I/O handle and have not yet handed it off to AudioFlinger,
    // we must release it ourselves if anything goes wrong.

    // Client can only express a preference for FAST.  Server will perform additional tests.
    if (mFlags & AUDIO_INPUT_FLAG_FAST) {
        bool useCaseAllowed =
            // any of these use cases:
            // use case 1: callback transfer mode
            (mTransfer == TRANSFER_CALLBACK) ||
            // use case 2: blocking read mode
            // The default buffer capacity at 48 kHz is 2048 frames, or ~42.6 ms.
            // That's enough for double-buffering with our standard 20 ms rule of thumb for
            // the minimum period of a non-SCHED_FIFO thread.
            // This is needed so that AAudio apps can do a low latency non-blocking read from a
            // callback running with SCHED_FIFO.
            (mTransfer == TRANSFER_SYNC) ||
            // use case 3: obtain/release mode
            (mTransfer == TRANSFER_OBTAIN);
        if (!useCaseAllowed) {
            ALOGD("%s(%d): AUDIO_INPUT_FLAG_FAST denied, incompatible transfer = %s",
                  __func__, mPortId,
                  convertTransferToText(mTransfer));
            mFlags = (audio_input_flags_t) (mFlags & ~(AUDIO_INPUT_FLAG_FAST |
                    AUDIO_INPUT_FLAG_RAW));
        }
    }

    input.attr = mAttributes;
    input.config.sample_rate = mSampleRate;
    input.config.channel_mask = mChannelMask;
    input.config.format = mFormat;
    input.clientInfo.attributionSource = mClientAttributionSource;
    input.clientInfo.clientTid = -1;
    if (mFlags & AUDIO_INPUT_FLAG_FAST) {
        if (mAudioRecordThread != 0) {
            input.clientInfo.clientTid = mAudioRecordThread->getTid();
        }
    }
    input.riid = mTracker->getRiid();

    input.flags = mFlags;
    // The notification frame count is the period between callbacks, as suggested by the client
    // but moderated by the server.  For record, the calculations are done entirely on server side.
    input.frameCount = mReqFrameCount;
    input.notificationFrameCount = mNotificationFramesReq;
    input.selectedDeviceId = mSelectedDeviceId;
    input.sessionId = mSessionId;
    originalSessionId = mSessionId;
    input.maxSharedAudioHistoryMs = mMaxSharedAudioHistoryMs;

    do {
        media::CreateRecordResponse response;
        auto aidlInput = input.toAidl();
        if (!aidlInput.ok()) {
            return logIfErrorAndReturnStatus(
                    BAD_VALUE,
                    StringPrintf("%s(%d): Could not create record due to invalid input", __func__,
                                 mPortId),
                    "");
        }
        status = audioFlinger->createRecord(aidlInput.value(), response); // 创建record

        auto recordOutput = IAudioFlinger::CreateRecordOutput::fromAidl(response);
        if (!recordOutput.ok()) {
            return logIfErrorAndReturnStatus(
                    BAD_VALUE,
                    StringPrintf("%s(%d): Could not create record output due to invalid response",
                                 __func__, mPortId),
                    "");
        }
        output = recordOutput.value();
        if (status == NO_ERROR) {
            break;
        }
        if (status != FAILED_TRANSACTION || --remainingAttempts <= 0) {
            return logIfErrorAndReturnStatus(
                    status,
                    StringPrintf("%s(%d): AudioFlinger could not create record track, status: %d",
                                 __func__, mPortId, status),
                    "");
        }
        // FAILED_TRANSACTION happens under very specific conditions causing a state mismatch
        // between audio policy manager and audio flinger during the input stream open sequence
        // and can be recovered by retrying.
        // Leave time for race condition to clear before retrying and randomize delay
        // to reduce the probability of concurrent retries in locked steps.
        usleep((20 + rand() % 30) * 10000);
    } while (1);

    ALOG_ASSERT(output.audioRecord != 0);

    // AudioFlinger now owns the reference to the I/O handle,
    // so we are no longer responsible for releasing it.

    mAwaitBoost = false;
    if (output.flags & AUDIO_INPUT_FLAG_FAST) {
        ALOGI("%s(%d): AUDIO_INPUT_FLAG_FAST successful; frameCount %zu -> %zu",
              __func__, mPortId,
              mReqFrameCount, output.frameCount);
        mAwaitBoost = true;
    }
    mFlags = output.flags;
    mRoutedDeviceId = output.selectedDeviceId;
    mSessionId = output.sessionId;
    mSampleRate = output.sampleRate;
    mServerConfig = output.serverConfig;
    mServerFrameSize = audio_bytes_per_frame(
            audio_channel_count_from_in_mask(mServerConfig.channel_mask), mServerConfig.format);
    mServerSampleSize = audio_bytes_per_sample(mServerConfig.format);
    mHalSampleRate = output.halConfig.sample_rate;
    mHalChannelCount = audio_channel_count_from_in_mask(output.halConfig.channel_mask);
    mHalFormat = output.halConfig.format;

    if (output.cblk == 0) {
        return logIfErrorAndReturnStatus(
                NO_INIT, StringPrintf("%s(%d): Could not get control block", __func__, mPortId),
                "");
    }
    // TODO: Using unsecurePointer() has some associated security pitfalls
    //       (see declaration for details).
    //       Either document why it is safe in this case or address the
    //       issue (e.g. by copying).
    iMemPointer = output.cblk ->unsecurePointer(); // 获取共享内存
    if (iMemPointer == NULL) {
        return logIfErrorAndReturnStatus(
                NO_INIT,
                StringPrintf("%s(%d): Could not get control block pointer", __func__, mPortId), "");
    }
    cblk = static_cast<audio_track_cblk_t*>(iMemPointer);

    // Starting address of buffers in shared memory.
    // The buffers are either immediately after the control block,
    // or in a separate area at discretion of server.
    void *buffers;
    if (output.buffers == 0) {
        buffers = cblk + 1;
    } else {
        // TODO: Using unsecurePointer() has some associated security pitfalls
        //       (see declaration for details).
        //       Either document why it is safe in this case or address the
        //       issue (e.g. by copying).
        buffers = output.buffers->unsecurePointer();
        if (buffers == NULL) {
            return logIfErrorAndReturnStatus(
                    NO_INIT,
                    StringPrintf("%s(%d): Could not get buffer pointer", __func__, mPortId), "");
        }
    }

    // invariant that mAudioRecord != 0 is true only after set() returns successfully
    if (mAudioRecord != 0) {
        IInterface::asBinder(mAudioRecord)->unlinkToDeath(mDeathNotifier, this);
        mDeathNotifier.clear();
    }
    mAudioRecord = output.audioRecord;
    mCblkMemory = output.cblk;
    mBufferMemory = output.buffers;
    IPCThreadState::self()->flushCommands();

    mCblk = cblk;
    // note that output.frameCount is the (possibly revised) value of mReqFrameCount
    if (output.frameCount < mReqFrameCount || (mReqFrameCount == 0 && output.frameCount == 0)) {
        ALOGW("%s(%d): Requested frameCount %zu but received frameCount %zu",
              __func__, output.portId,
              mReqFrameCount,  output.frameCount);
    }

    // Make sure that application is notified with sufficient margin before overrun.
    // The computation is done on server side.
    if (mNotificationFramesReq > 0 && output.notificationFrameCount != mNotificationFramesReq) {
        ALOGW("%s(%d): Server adjusted notificationFrames from %u to %zu for frameCount %zu",
                __func__, output.portId,
                mNotificationFramesReq, output.notificationFrameCount, output.frameCount);
    }
    mNotificationFramesAct = (uint32_t)output.notificationFrameCount;
    if (mServerConfig.format != mFormat && mCallback != nullptr) {
        mFormatConversionBufRaw = std::make_unique<uint8_t[]>(mNotificationFramesAct * mFrameSize);
        mFormatConversionBuffer.raw = mFormatConversionBufRaw.get();
    }

    //mInput != input includes the case where mInput == AUDIO_IO_HANDLE_NONE for first creation
    if (mDeviceCallback != 0) {
        if (mInput != AUDIO_IO_HANDLE_NONE) {
            AudioSystem::removeAudioDeviceCallback(this, mInput, mPortId);
        }
        AudioSystem::addAudioDeviceCallback(this, output.inputId, output.portId);
    }

    if (!mSharedAudioPackageName.empty()) {
        mAudioRecord->shareAudioHistory(mSharedAudioPackageName, mSharedAudioStartMs);
    }

    mPortId = output.portId;
    // We retain a copy of the I/O handle, but don't own the reference
    mInput = output.inputId;
    mRefreshRemaining = true;

    mFrameCount = output.frameCount;
    // If IAudioRecord is re-created, don't let the requested frameCount
    // decrease.  This can confuse clients that cache frameCount().
    if (mFrameCount > mReqFrameCount) {
        mReqFrameCount = mFrameCount;
    }

    // update proxy
    mProxy = new AudioRecordClientProxy(cblk, buffers, mFrameCount, mServerFrameSize);
    mProxy->setEpoch(epoch);
    mProxy->setMinimum(mNotificationFramesAct);

    mDeathNotifier = new DeathNotifier(this);
    IInterface::asBinder(mAudioRecord)->linkToDeath(mDeathNotifier, this);

    mMetricsId = std::string(AMEDIAMETRICS_KEY_PREFIX_AUDIO_RECORD) + std::to_string(mPortId);
    mediametrics::LogItem(mMetricsId)
        .set(AMEDIAMETRICS_PROP_EVENT, AMEDIAMETRICS_PROP_EVENT_VALUE_CREATE)
        .set(AMEDIAMETRICS_PROP_EXECUTIONTIMENS, (int64_t)(systemTime() - beginNs))
        // the following are immutable (at least until restore)
        .set(AMEDIAMETRICS_PROP_FLAGS, toString(mFlags).c_str())
        .set(AMEDIAMETRICS_PROP_ORIGINALFLAGS, toString(mOrigFlags).c_str())
        .set(AMEDIAMETRICS_PROP_SESSIONID, (int32_t)mSessionId)
        .set(AMEDIAMETRICS_PROP_TRACKID, mPortId)
        .set(AMEDIAMETRICS_PROP_LOGSESSIONID, mLogSessionId)
        .set(AMEDIAMETRICS_PROP_SOURCE, toString(mAttributes.source).c_str())
        .set(AMEDIAMETRICS_PROP_THREADID, (int32_t)output.inputId)
        .set(AMEDIAMETRICS_PROP_SELECTEDDEVICEID, (int32_t)mSelectedDeviceId)
        .set(AMEDIAMETRICS_PROP_ROUTEDDEVICEID, (int32_t)mRoutedDeviceId)
        .set(AMEDIAMETRICS_PROP_ENCODING, toString(mFormat).c_str())
        .set(AMEDIAMETRICS_PROP_CHANNELMASK, (int32_t)mChannelMask)
        .set(AMEDIAMETRICS_PROP_FRAMECOUNT, (int32_t)mFrameCount)
        .set(AMEDIAMETRICS_PROP_SAMPLERATE, (int32_t)mSampleRate)
        // the following are NOT immutable
        .set(AMEDIAMETRICS_PROP_STATE, stateToString(mActive))
        .set(AMEDIAMETRICS_PROP_STATUS, (int32_t)status)
        .set(AMEDIAMETRICS_PROP_SELECTEDMICDIRECTION, (int32_t)mSelectedMicDirection)
        .set(AMEDIAMETRICS_PROP_SELECTEDMICFIELDDIRECTION, (double)mSelectedMicFieldDimension)
        .record();

    // sp<IAudioTrack> track destructor will cause releaseOutput() to be called by AudioFlinger
    return logIfErrorAndReturnStatus(status, "", "");
}
  • 创建一个 Record 结构,并创建可以跨进程共享的内存
  • 将该 Record 和一个线程关联,这样这个线程可以一直 loop,并调用 hal 层的采集接口进行采集
  • 把这个 Record 结构包装成一个 binder,这样调用方可以直接操作该 Record
//frameworks/av/services/audioflinger/AudioFlinger.cpp
status_t AudioFlinger::createRecord(const media::CreateRecordRequest& _input,
                                    media::CreateRecordResponse& _output)
{
    CreateRecordInput input = VALUE_OR_RETURN_STATUS(CreateRecordInput::fromAidl(_input));
    CreateRecordOutput output;

    sp<IAfRecordTrack> recordTrack;
    sp<Client> client;
    status_t lStatus;
    audio_session_t sessionId = input.sessionId;
    audio_port_handle_t portId = AUDIO_PORT_HANDLE_NONE;

    output.cblk.clear();
    output.buffers.clear();
    output.inputId = AUDIO_IO_HANDLE_NONE;

    // TODO b/182392553: refactor or clean up
    AttributionSourceState adjAttributionSource = input.clientInfo.attributionSource;
    bool updatePid = (adjAttributionSource.pid == -1);
    const uid_t callingUid = IPCThreadState::self()->getCallingUid();
    const uid_t currentUid = VALUE_OR_RETURN_STATUS(legacy2aidl_uid_t_int32_t(
           adjAttributionSource.uid));
    if (!isAudioServerOrMediaServerOrSystemServerOrRootUid(callingUid)) {
        ALOGW_IF(currentUid != callingUid,
                "%s uid %d tried to pass itself off as %d",
                __FUNCTION__, callingUid, currentUid);
        adjAttributionSource.uid = VALUE_OR_RETURN_STATUS(legacy2aidl_uid_t_int32_t(callingUid));
        updatePid = true;
    }
    const pid_t callingPid = IPCThreadState::self()->getCallingPid();
    const pid_t currentPid = VALUE_OR_RETURN_STATUS(aidl2legacy_int32_t_pid_t(
            adjAttributionSource.pid));
    if (updatePid) {
        ALOGW_IF(currentPid != (pid_t)-1 && currentPid != callingPid,
                 "%s uid %d pid %d tried to pass itself off as pid %d",
                 __func__, callingUid, callingPid, currentPid);
        adjAttributionSource.pid = VALUE_OR_RETURN_STATUS(legacy2aidl_pid_t_int32_t(callingPid));
    }
    adjAttributionSource = afutils::checkAttributionSourcePackage(
            adjAttributionSource);
    // further format checks are performed by createRecordTrack_l()
    if (!audio_is_valid_format(input.config.format)) {
        ALOGE("createRecord() invalid format %#x", input.config.format);
        lStatus = BAD_VALUE;
        goto Exit;
    }

    // further channel mask checks are performed by createRecordTrack_l()
    if (!audio_is_input_channel(input.config.channel_mask)) {
        ALOGE("createRecord() invalid channel mask %#x", input.config.channel_mask);
        lStatus = BAD_VALUE;
        goto Exit;
    }

    if (sessionId == AUDIO_SESSION_ALLOCATE) {
        sessionId = (audio_session_t) newAudioUniqueId(AUDIO_UNIQUE_ID_USE_SESSION);
    } else if (audio_unique_id_get_use(sessionId) != AUDIO_UNIQUE_ID_USE_SESSION) {
        lStatus = BAD_VALUE;
        goto Exit;
    }

    output.sessionId = sessionId;
    output.selectedDeviceId = input.selectedDeviceId;
    output.flags = input.flags;

    client = registerPid(VALUE_OR_FATAL(aidl2legacy_int32_t_pid_t(adjAttributionSource.pid))); //创建共享内存

    // Not a conventional loop, but a retry loop for at most two iterations total.
    // Try first maybe with FAST flag then try again without FAST flag if that fails.
    // Exits loop via break on no error of got exit on error
    // The sp<> references will be dropped when re-entering scope.
    // The lack of indentation is deliberate, to reduce code churn and ease merges.
    for (;;) {
    // release previously opened input if retrying.
    if (output.inputId != AUDIO_IO_HANDLE_NONE) {
        recordTrack.clear();
        AudioSystem::releaseInput(portId);
        output.inputId = AUDIO_IO_HANDLE_NONE;
        output.selectedDeviceId = input.selectedDeviceId;
        portId = AUDIO_PORT_HANDLE_NONE;
    }
    lStatus = AudioSystem::getInputForAttr(&input.attr, &output.inputId, // 调用hal层获取属性信息
                                      input.riid,
                                      sessionId,
                                    // FIXME compare to AudioTrack
                                      adjAttributionSource,
                                      &input.config,
                                      output.flags, &output.selectedDeviceId, &portId);
    if (lStatus != NO_ERROR) {
        ALOGE("createRecord() getInputForAttr return error %d", lStatus);
        goto Exit;
    }

    {
        audio_utils::lock_guard _l(mutex());
        IAfRecordThread* const thread = checkRecordThread_l(output.inputId); // 获取采集线程
        if (thread == NULL) {
            ALOGW("createRecord() checkRecordThread_l failed, input handle %d", output.inputId);
            lStatus = FAILED_TRANSACTION;
            goto Exit;
        }

        ALOGV("createRecord() lSessionId: %d input %d", sessionId, output.inputId);

        output.sampleRate = input.config.sample_rate;
        output.frameCount = input.frameCount;
        output.notificationFrameCount = input.notificationFrameCount;

        recordTrack = thread->createRecordTrack_l(client, input.attr, &output.sampleRate, // 在采集线程中创建Record
                                                  input.config.format, input.config.channel_mask,
                                                  &output.frameCount, sessionId,
                                                  &output.notificationFrameCount,
                                                  callingPid, adjAttributionSource, &output.flags,
                                                  input.clientInfo.clientTid,
                                                  &lStatus, portId, input.maxSharedAudioHistoryMs);
        LOG_ALWAYS_FATAL_IF((lStatus == NO_ERROR) && (recordTrack == 0));

        // lStatus == BAD_TYPE means FAST flag was rejected: request a new input from
        // audio policy manager without FAST constraint
        if (lStatus == BAD_TYPE) {
            continue;
        }

        if (lStatus != NO_ERROR) {
            goto Exit;
        }

        if (recordTrack->isFastTrack()) {
            output.serverConfig = {
                    thread->sampleRate(),
                    thread->channelMask(),
                    thread->format()
            };
        } else {
            output.serverConfig = {
                    recordTrack->sampleRate(),
                    recordTrack->channelMask(),
                    recordTrack->format()
            };
        }

        output.halConfig = {
                thread->sampleRate(),
                thread->channelMask(),
                thread->format()
        };

        // Check if one effect chain was awaiting for an AudioRecord to be created on this
        // session and move it to this thread.
        sp<IAfEffectChain> chain = getOrphanEffectChain_l(sessionId);
        if (chain != 0) {
            audio_utils::lock_guard _l2(thread->mutex());
            thread->addEffectChain_l(chain);
        }
        break;
    }
    // End of retry loop.
    // The lack of indentation is deliberate, to reduce code churn and ease merges.
    }

    output.cblk = recordTrack->getCblk();
    output.buffers = recordTrack->getBuffers(); // 返回匿名共享内存操作符
    output.portId = portId;

    output.audioRecord = IAfRecordTrack::createIAudioRecordAdapter(recordTrack);
    _output = VALUE_OR_FATAL(output.toAidl()); // 包装成binder

Exit:
    if (lStatus != NO_ERROR) {
        // remove local strong reference to Client before deleting the RecordTrack so that the
        // Client destructor is called by the TrackBase destructor with clientMutex() held
        // Don't hold clientMutex() when releasing the reference on the track as the
        // destructor will acquire it.
        {
            audio_utils::lock_guard _cl(clientMutex());
            client.clear();
        }
        recordTrack.clear();
        if (output.inputId != AUDIO_IO_HANDLE_NONE) {
            AudioSystem::releaseInput(portId);
        }
    }

    return lStatus;
// frameworks/av/services/audioflinger/Threads.cpp
// RecordThread::createRecordTrack_l() must be called with AudioFlinger::mutex() held
sp<IAfRecordTrack> RecordThread::createRecordTrack_l(
        const sp<Client>& client,
        const audio_attributes_t& attr,
        uint32_t *pSampleRate,
        audio_format_t format,
        audio_channel_mask_t channelMask,
        size_t *pFrameCount,
        audio_session_t sessionId,
        size_t *pNotificationFrameCount,
        pid_t creatorPid,
        const AttributionSourceState& attributionSource,
        audio_input_flags_t *flags,
        pid_t tid,
        status_t *status,
        audio_port_handle_t portId,
        int32_t maxSharedAudioHistoryMs)
{
    size_t frameCount = *pFrameCount;
    size_t notificationFrameCount = *pNotificationFrameCount;
    sp<IAfRecordTrack> track;
    status_t lStatus;
    audio_input_flags_t inputFlags = mInput->flags;
    audio_input_flags_t requestedFlags = *flags;
    uint32_t sampleRate;

    lStatus = initCheck();
    if (lStatus != NO_ERROR) {
        ALOGE("createRecordTrack_l() audio driver not initialized");
        goto Exit;
    }

    if (!audio_is_linear_pcm(mFormat) && (*flags & AUDIO_INPUT_FLAG_DIRECT) == 0) {
        ALOGE("createRecordTrack_l() on an encoded stream requires AUDIO_INPUT_FLAG_DIRECT");
        lStatus = BAD_VALUE;
        goto Exit;
    }

    if (maxSharedAudioHistoryMs != 0) {
        if (!captureHotwordAllowed(attributionSource)) {
            lStatus = PERMISSION_DENIED;
            goto Exit;
        }
        if (maxSharedAudioHistoryMs < 0
                || maxSharedAudioHistoryMs > kMaxSharedAudioHistoryMs) {
            lStatus = BAD_VALUE;
            goto Exit;
        }
    }
    if (*pSampleRate == 0) {
        *pSampleRate = mSampleRate;
    }
    sampleRate = *pSampleRate;

    // special case for FAST flag considered OK if fast capture is present and access to
    // audio history is not required
    if (hasFastCapture() && mMaxSharedAudioHistoryMs == 0) {
        inputFlags = (audio_input_flags_t)(inputFlags | AUDIO_INPUT_FLAG_FAST);
    }

    // Check if requested flags are compatible with input stream flags
    if ((*flags & inputFlags) != *flags) {
        ALOGW("createRecordTrack_l(): mismatch between requested flags (%08x) and"
                " input flags (%08x)",
              *flags, inputFlags);
        *flags = (audio_input_flags_t)(*flags & inputFlags);
    }

    // client expresses a preference for FAST and no access to audio history,
    // but we get the final say
    if (*flags & AUDIO_INPUT_FLAG_FAST && maxSharedAudioHistoryMs == 0) {
      if (
            // we formerly checked for a callback handler (non-0 tid),
            // but that is no longer required for TRANSFER_OBTAIN mode
            // No need to match hardware format, format conversion will be done in client side.
            //
            // Frame count is not specified (0), or is less than or equal the pipe depth.
            // It is OK to provide a higher capacity than requested.
            // We will force it to mPipeFramesP2 below.
            (frameCount <= mPipeFramesP2) &&
            // PCM data
            audio_is_linear_pcm(format) &&
            // hardware channel mask
            (channelMask == mChannelMask) &&
            // hardware sample rate
            (sampleRate == mSampleRate) &&
            // record thread has an associated fast capture
            hasFastCapture() &&
            // there are sufficient fast track slots available
            mFastTrackAvail
        ) {
          // check compatibility with audio effects.
          audio_utils::lock_guard _l(mutex());
          // Do not accept FAST flag if the session has software effects
          sp<IAfEffectChain> chain = getEffectChain_l(sessionId);
          if (chain != 0) {
              audio_input_flags_t old = *flags;
              chain->checkInputFlagCompatibility(flags);
              if (old != *flags) {
                  ALOGV("%p AUDIO_INPUT_FLAGS denied by effect old=%#x new=%#x",
                          this, (int)old, (int)*flags);
              }
          }
          ALOGV_IF((*flags & AUDIO_INPUT_FLAG_FAST) != 0,
                   "%p AUDIO_INPUT_FLAG_FAST accepted: frameCount=%zu mFrameCount=%zu",
                   this, frameCount, mFrameCount);
      } else {
        ALOGV("%p AUDIO_INPUT_FLAG_FAST denied: frameCount=%zu mFrameCount=%zu mPipeFramesP2=%zu "
                "format=%#x isLinear=%d mFormat=%#x channelMask=%#x sampleRate=%u mSampleRate=%u "
                "hasFastCapture=%d tid=%d mFastTrackAvail=%d",
                this, frameCount, mFrameCount, mPipeFramesP2,
                format, audio_is_linear_pcm(format), mFormat, channelMask, sampleRate, mSampleRate,
                hasFastCapture(), tid, mFastTrackAvail);
        *flags = (audio_input_flags_t)(*flags & ~AUDIO_INPUT_FLAG_FAST);
      }
    }

    // If FAST or RAW flags were corrected, ask caller to request new input from audio policy
    if ((*flags & AUDIO_INPUT_FLAG_FAST) !=
            (requestedFlags & AUDIO_INPUT_FLAG_FAST)) {
        *flags = (audio_input_flags_t) (*flags & ~(AUDIO_INPUT_FLAG_FAST | AUDIO_INPUT_FLAG_RAW));
        lStatus = BAD_TYPE;
        goto Exit;
    }

    // compute track buffer size in frames, and suggest the notification frame count
    if (*flags & AUDIO_INPUT_FLAG_FAST) {
        // fast track: frame count is exactly the pipe depth
        frameCount = mPipeFramesP2;
        // ignore requested notificationFrames, and always notify exactly once every HAL buffer
        notificationFrameCount = mFrameCount;
    } else {
        // not fast track: max notification period is resampled equivalent of one HAL buffer time
        //                 or 20 ms if there is a fast capture
        // TODO This could be a roundupRatio inline, and const
        size_t maxNotificationFrames = ((int64_t) (hasFastCapture() ? mSampleRate/50 : mFrameCount)
                * sampleRate + mSampleRate - 1) / mSampleRate;
        // minimum number of notification periods is at least kMinNotifications,
        // and at least kMinMs rounded up to a whole notification period (minNotificationsByMs)
        static const size_t kMinNotifications = 3;
        static const uint32_t kMinMs = 30;
        // TODO This could be a roundupRatio inline
        const size_t minFramesByMs = (sampleRate * kMinMs + 1000 - 1) / 1000;
        // TODO This could be a roundupRatio inline
        const size_t minNotificationsByMs = (minFramesByMs + maxNotificationFrames - 1) /
                maxNotificationFrames;
        const size_t minFrameCount = maxNotificationFrames *
                max(kMinNotifications, minNotificationsByMs);
        frameCount = max(frameCount, minFrameCount);
        if (notificationFrameCount == 0 || notificationFrameCount > maxNotificationFrames) {
            notificationFrameCount = maxNotificationFrames;
        }
    }
    *pFrameCount = frameCount;
    *pNotificationFrameCount = notificationFrameCount;

    { // scope for mutex()
        audio_utils::lock_guard _l(mutex());
        int32_t startFrames = -1;
        if (!mSharedAudioPackageName.empty()
                && mSharedAudioPackageName == attributionSource.packageName
                && mSharedAudioSessionId == sessionId
                && captureHotwordAllowed(attributionSource)) {
            startFrames = mSharedAudioStartFrames;
        }

        track = IAfRecordTrack::create(this, client, attr, sampleRate, // 创建Record
                      format, channelMask, frameCount,
                      nullptr /* buffer */, (size_t)0 /* bufferSize */, sessionId, creatorPid,
                      attributionSource, *flags, IAfTrackBase::TYPE_DEFAULT, portId,
                      startFrames);

        lStatus = track->initCheck();
        if (lStatus != NO_ERROR) {
            ALOGE("createRecordTrack_l() initCheck failed %d; no control block?", lStatus);
            // track must be cleared from the caller as the caller has the AF lock
            goto Exit;
        }
        mTracks.add(track);  // 将Record加入到列表中

        if ((*flags & AUDIO_INPUT_FLAG_FAST) && (tid != -1)) {
            pid_t callingPid = IPCThreadState::self()->getCallingPid();
            // we don't have CAP_SYS_NICE, nor do we want to have it as it's too powerful,
            // so ask activity manager to do this on our behalf
            sendPrioConfigEvent_l(callingPid, tid, kPriorityAudioApp, true /*forApp*/);
        }

        if (maxSharedAudioHistoryMs != 0) {
            sendResizeBufferConfigEvent_l(maxSharedAudioHistoryMs);
        }
    }

    lStatus = NO_ERROR;

Exit:
    *status = lStatus;
    return track;
}
// frameworks/av/services/audioflinger/Tracks.cpp
// RecordTrack constructor must be called with AudioFlinger::mLock and ThreadBase::mLock held
RecordTrack::RecordTrack(
            IAfRecordThread* thread,
            const sp<Client>& client,
            const audio_attributes_t& attr,
            uint32_t sampleRate,
            audio_format_t format,
            audio_channel_mask_t channelMask,
            size_t frameCount,
            void *buffer,
            size_t bufferSize,
            audio_session_t sessionId,
            pid_t creatorPid,
            const AttributionSourceState& attributionSource,
            audio_input_flags_t flags,
            track_type type,
            audio_port_handle_t portId,
            int32_t startFrames)
    :   TrackBase(thread, client, attr, sampleRate, format,
                  channelMask, frameCount, buffer, bufferSize, sessionId,
                  creatorPid,
                  VALUE_OR_FATAL(aidl2legacy_int32_t_uid_t(attributionSource.uid)),
                  false /*isOut*/,
                  (type == TYPE_DEFAULT) ?
                          ((flags & AUDIO_INPUT_FLAG_FAST) ? ALLOC_PIPE : ALLOC_CBLK) :
                          ((buffer == NULL) ? ALLOC_LOCAL : ALLOC_NONE),
                  type, portId,
                  std::string(AMEDIAMETRICS_KEY_PREFIX_AUDIO_RECORD) + std::to_string(portId)),
        mOverflow(false),
        mResamplerBufferProvider(NULL), // initialize in case of early constructor exit
        mRecordBufferConverter(NULL),
        mFlags(flags),
        mSilenced(false),
        mStartFrames(startFrames)
{
    if (mCblk == NULL) {
        return;
    }

    if (!isDirect()) {
        mRecordBufferConverter = new RecordBufferConverter(
                thread->channelMask(), thread->format(), thread->sampleRate(),
                channelMask, format, sampleRate);
        // Check if the RecordBufferConverter construction was successful.
        // If not, don't continue with construction.
        //
        // NOTE: It would be extremely rare that the record track cannot be created
        // for the current device, but a pending or future device change would make
        // the record track configuration valid.
        if (mRecordBufferConverter->initCheck() != NO_ERROR) {
            ALOGE("%s(%d): RecordTrack unable to create record buffer converter", __func__, mId);
            return;
        }
    }

    mServerProxy = new AudioRecordServerProxy(mCblk, mBuffer, frameCount,
            mFrameSize, !isExternalTrack());

    mResamplerBufferProvider = new ResamplerBufferProvider(this);

    if (flags & AUDIO_INPUT_FLAG_FAST) {
        ALOG_ASSERT(thread->fastTrackAvailable());
        thread->setFastTrackAvailable(false);
    } else {
        // TODO: only Normal Record has timestamps (Fast Record does not).
        mServerLatencySupported = checkServerLatencySupported(mFormat, flags);
    }
#ifdef TEE_SINK
    mTee.setId(std::string("_") + std::to_string(mThreadIoHandle)
            + "_" + std::to_string(mId)
            + "_R");
#endif

    // Once this item is logged by the server, the client can add properties.
    mTrackMetrics.logConstructor(creatorPid, uid(), id());
}
//frameworks/av/services/audioflinger/Threads.cpp
RecordThread::RecordThread(const sp<IAfThreadCallback>& afThreadCallback,
                                         AudioStreamIn *input,
                                         audio_io_handle_t id,
                                         bool systemReady
                                         ) :
    ThreadBase(afThreadCallback, id, RECORD, systemReady, false /* isOut */),
    mInput(input),
    mSource(mInput),
    mActiveTracks(&this->mLocalLog),
    mRsmpInBuffer(NULL),
    // mRsmpInFrames, mRsmpInFramesP2, and mRsmpInFramesOA are set by readInputParameters_l()
    mRsmpInRear(0)
    , mReadOnlyHeap(new MemoryDealer(kRecordThreadReadOnlyHeapSize,
            "RecordThreadRO", MemoryHeapBase::READ_ONLY))
    // mFastCapture below
    , mFastCaptureFutex(0)
    // mInputSource
    // mPipeSink
    // mPipeSource
    , mPipeFramesP2(0)
    // mPipeMemory
    // mFastCaptureNBLogWriter
    , mFastTrackAvail(false)
    , mBtNrecSuspended(false)
{
    snprintf(mThreadName, kThreadNameLength, "AudioIn_%X", id);
    mNBLogWriter = afThreadCallback->newWriter_l(kLogSize, mThreadName);

    if (mInput->audioHwDev != nullptr) {
        mIsMsdDevice = strcmp(
                mInput->audioHwDev->moduleName(), AUDIO_HARDWARE_MODULE_ID_MSD) == 0;
    }

    readInputParameters_l();

    // TODO: We may also match on address as well as device type for
    // AUDIO_DEVICE_IN_BUS, AUDIO_DEVICE_IN_BLUETOOTH_A2DP, AUDIO_DEVICE_IN_REMOTE_SUBMIX
    // TODO: This property should be ensure that only contains one single device type.
    mTimestampCorrectedDevice = (audio_devices_t)property_get_int64(
            "audio.timestamp.corrected_input_device",
            (int64_t)(mIsMsdDevice ? AUDIO_DEVICE_IN_BUS // turn on by default for MSD
                                   : AUDIO_DEVICE_NONE));

    // create an NBAIO source for the HAL input stream, and negotiate
    mInputSource = new AudioStreamInSource(input->stream); // 读取源头
    size_t numCounterOffers = 0;
    const NBAIO_Format offers[1] = {Format_from_SR_C(mSampleRate, mChannelCount, mFormat)};
#if !LOG_NDEBUG
    [[maybe_unused]] ssize_t index =
#else
    (void)
#endif
            mInputSource->negotiate(offers, 1, NULL, numCounterOffers);
    ALOG_ASSERT(index == 0);

    // initialize fast capture depending on configuration
    bool initFastCapture;
    switch (kUseFastCapture) {
    case FastCapture_Never:
        initFastCapture = false;
        ALOGV("%p kUseFastCapture = Never, initFastCapture = false", this);
        break;
    case FastCapture_Always:
        initFastCapture = true;
        ALOGV("%p kUseFastCapture = Always, initFastCapture = true", this);
        break;
    case FastCapture_Static:
        initFastCapture = !mIsMsdDevice // Disable fast capture for MSD BUS devices.
                && audio_is_linear_pcm(mFormat)
                && (mFrameCount * 1000) / mSampleRate < kMinNormalCaptureBufferSizeMs;
        ALOGV("%p kUseFastCapture = Static, format = 0x%x, (%lld * 1000) / %u vs %u, "
                "initFastCapture = %d, mIsMsdDevice = %d", this, mFormat, (long long)mFrameCount,
                mSampleRate, kMinNormalCaptureBufferSizeMs, initFastCapture, mIsMsdDevice);
        break;
    // case FastCapture_Dynamic:
    }

    if (initFastCapture) {
        // create a Pipe for FastCapture to write to, and for us and fast tracks to read from
        NBAIO_Format format = mInputSource->format();
        // quadruple-buffering of 20 ms each; this ensures we can sleep for 20ms in RecordThread
        size_t pipeFramesP2 = roundup(4 * FMS_20 * mSampleRate / 1000);
        size_t pipeSize = pipeFramesP2 * Format_frameSize(format);
        void *pipeBuffer = nullptr;
        const sp<MemoryDealer> roHeap(readOnlyHeap());
        sp<IMemory> pipeMemory;
        if ((roHeap == 0) ||
                (pipeMemory = roHeap->allocate(pipeSize)) == 0 ||
                (pipeBuffer = pipeMemory->unsecurePointer()) == nullptr) {
            ALOGE("not enough memory for pipe buffer size=%zu; "
                    "roHeap=%p, pipeMemory=%p, pipeBuffer=%p; roHeapSize: %lld",
                    pipeSize, roHeap.get(), pipeMemory.get(), pipeBuffer,
                    (long long)kRecordThreadReadOnlyHeapSize);
            goto failed;
        }
        // pipe will be shared directly with fast clients, so clear to avoid leaking old information
        memset(pipeBuffer, 0, pipeSize);
        Pipe *pipe = new Pipe(pipeFramesP2, format, pipeBuffer);
        const NBAIO_Format offersFast[1] = {format};
        size_t numCounterOffersFast = 0;
        [[maybe_unused]] ssize_t index2 = pipe->negotiate(offersFast, std::size(offersFast),
                nullptr /* counterOffers */, numCounterOffersFast);
        ALOG_ASSERT(index2 == 0);
        mPipeSink = pipe;
        PipeReader *pipeReader = new PipeReader(*pipe);
        numCounterOffersFast = 0;
        index2 = pipeReader->negotiate(offersFast, std::size(offersFast),
                nullptr /* counterOffers */, numCounterOffersFast);
        ALOG_ASSERT(index2 == 0);
        mPipeSource = pipeReader;
        mPipeFramesP2 = pipeFramesP2;
        mPipeMemory = pipeMemory;

        // create fast capture
        mFastCapture = new FastCapture();
        FastCaptureStateQueue *sq = mFastCapture->sq();
#ifdef STATE_QUEUE_DUMP
        // FIXME
#endif
        FastCaptureState *state = sq->begin();
        state->mCblk = NULL;
        state->mInputSource = mInputSource.get();
        state->mInputSourceGen++;
        state->mPipeSink = pipe;
        state->mPipeSinkGen++;
        state->mFrameCount = mFrameCount;
        state->mCommand = FastCaptureState::COLD_IDLE;
        // already done in constructor initialization list
        //mFastCaptureFutex = 0;
        state->mColdFutexAddr = &mFastCaptureFutex;
        state->mColdGen++;
        state->mDumpState = &mFastCaptureDumpState;
#ifdef TEE_SINK
        // FIXME
#endif
        mFastCaptureNBLogWriter =
                afThreadCallback->newWriter_l(kFastCaptureLogSize, "FastCapture");
        state->mNBLogWriter = mFastCaptureNBLogWriter.get();
        sq->end();
        sq->push(FastCaptureStateQueue::BLOCK_UNTIL_PUSHED);

        // start the fast capture
        mFastCapture->run("FastCapture", ANDROID_PRIORITY_URGENT_AUDIO);
        pid_t tid = mFastCapture->getTid();
        sendPrioConfigEvent(getpid(), tid, kPriorityFastCapture, false /*forApp*/);
        stream()->setHalThreadPriority(kPriorityFastCapture);
#ifdef AUDIO_WATCHDOG
        // FIXME
#endif

        mFastTrackAvail = true;
    }
#ifdef TEE_SINK
    mTee.set(mInputSource->format(), NBAIO_Tee::TEE_FLAG_INPUT_THREAD);
    mTee.setId(std::string("_") + std::to_string(mId) + "_C");
#endif
failed: ;

    // FIXME mNormalSource
}
//frameworks/av/services/audioflinger/Threads.cpp
bool RecordThread::threadLoop()
{
    nsecs_t lastWarning = 0;

    inputStandBy();

reacquire_wakelock:
    {
        audio_utils::lock_guard _l(mutex());
        acquireWakeLock_l();
    }

    // used to request a deferred sleep, to be executed later while mutex is unlocked
    uint32_t sleepUs = 0;

    // timestamp correction enable is determined under lock, used in processing step.
    bool timestampCorrectionEnabled = false;

    int64_t lastLoopCountRead = -2;  // never matches "previous" loop, when loopCount = 0.

    // loop while there is work to do
    for (int64_t loopCount = 0;; ++loopCount) {  // loopCount used for statistics tracking
        // Note: these sp<> are released at the end of the for loop outside of the mutex() lock.
        sp<IAfRecordTrack> activeTrack;
        std::vector<sp<IAfRecordTrack>> oldActiveTracks;
        Vector<sp<IAfEffectChain>> effectChains;

        // activeTracks accumulates a copy of a subset of mActiveTracks
        Vector<sp<IAfRecordTrack>> activeTracks;

        // reference to the (first and only) active fast track
        sp<IAfRecordTrack> fastTrack;

        // reference to a fast track which is about to be removed
        sp<IAfRecordTrack> fastTrackToRemove;

        bool silenceFastCapture = false;

        { // scope for mutex()
            audio_utils::unique_lock _l(mutex());

            processConfigEvents_l();

            // check exitPending here because checkForNewParameters_l() and
            // checkForNewParameters_l() can temporarily release mutex()
            if (exitPending()) {
                break;
            }

            // sleep with mutex unlocked
            if (sleepUs > 0) {
                ATRACE_BEGIN("sleepC");
                (void)mWaitWorkCV.wait_for(_l, std::chrono::microseconds(sleepUs));
                ATRACE_END();
                sleepUs = 0;
                continue;
            }

            // if no active track(s), then standby and release wakelock
            size_t size = mActiveTracks.size();
            if (size == 0) {
                standbyIfNotAlreadyInStandby();
                // exitPending() can't become true here
                releaseWakeLock_l();
                ALOGV("RecordThread: loop stopping");
                // go to sleep
                mWaitWorkCV.wait(_l);
                ALOGV("RecordThread: loop starting");
                goto reacquire_wakelock;
            }

            bool doBroadcast = false;
            bool allStopped = true;
            for (size_t i = 0; i < size; ) {
                if (activeTrack) {  // ensure track release is outside lock.
                    oldActiveTracks.emplace_back(std::move(activeTrack));
                }
                activeTrack = mActiveTracks[i];
                if (activeTrack->isTerminated()) {
                    if (activeTrack->isFastTrack()) {
                        ALOG_ASSERT(fastTrackToRemove == 0);
                        fastTrackToRemove = activeTrack;
                    }
                    removeTrack_l(activeTrack);
                    mActiveTracks.remove(activeTrack);
                    size--;
                    continue;
                }

                IAfTrackBase::track_state activeTrackState = activeTrack->state();
                switch (activeTrackState) {

                case IAfTrackBase::PAUSING:
                    mActiveTracks.remove(activeTrack);
                    activeTrack->setState(IAfTrackBase::PAUSED);
                    if (activeTrack->isFastTrack()) {
                        ALOGV("%s fast track is paused, thus removed from active list", __func__);
                        // Keep a ref on fast track to wait for FastCapture thread to get updated
                        // state before potential track removal
                        fastTrackToRemove = activeTrack;
                    }
                    doBroadcast = true;
                    size--;
                    continue;

                case IAfTrackBase::STARTING_1:
                    sleepUs = 10000;
                    i++;
                    allStopped = false;
                    continue;

                case IAfTrackBase::STARTING_2:
                    doBroadcast = true;
                    if (mStandby) {
                        mThreadMetrics.logBeginInterval();
                        mThreadSnapshot.onBegin();
                        mStandby = false;
                    }
                    activeTrack->setState(IAfTrackBase::ACTIVE);
                    allStopped = false;
                    break;

                case IAfTrackBase::ACTIVE:
                    allStopped = false;
                    break;

                case IAfTrackBase::IDLE:    // cannot be on ActiveTracks if idle
                case IAfTrackBase::PAUSED:  // cannot be on ActiveTracks if paused
                case IAfTrackBase::STOPPED: // cannot be on ActiveTracks if destroyed/terminated
                default:
                    LOG_ALWAYS_FATAL("%s: Unexpected active track state:%d, id:%d, tracks:%zu",
                            __func__, activeTrackState, activeTrack->id(), size);
                }

                if (activeTrack->isFastTrack()) {
                    ALOG_ASSERT(!mFastTrackAvail);
                    ALOG_ASSERT(fastTrack == 0);
                    // if the active fast track is silenced either:
                    // 1) silence the whole capture from fast capture buffer if this is
                    //    the only active track
                    // 2) invalidate this track: this will cause the client to reconnect and possibly
                    //    be invalidated again until unsilenced
                    bool invalidate = false;
                    if (activeTrack->isSilenced()) {
                        if (size > 1) {
                            invalidate = true;
                        } else {
                            silenceFastCapture = true;
                        }
                    }
                    // Invalidate fast tracks if access to audio history is required as this is not
                    // possible with fast tracks. Once the fast track has been invalidated, no new
                    // fast track will be created until mMaxSharedAudioHistoryMs is cleared.
                    if (mMaxSharedAudioHistoryMs != 0) {
                        invalidate = true;
                    }
                    if (invalidate) {
                        activeTrack->invalidate();
                        ALOG_ASSERT(fastTrackToRemove == 0);
                        fastTrackToRemove = activeTrack;
                        removeTrack_l(activeTrack);
                        mActiveTracks.remove(activeTrack);
                        size--;
                        continue;
                    }
                    fastTrack = activeTrack;
                }

                activeTracks.add(activeTrack);
                i++;

            }

            mActiveTracks.updatePowerState_l(this);

            updateMetadata_l();

            if (allStopped) {
                standbyIfNotAlreadyInStandby();
            }
            if (doBroadcast) {
                mStartStopCV.notify_all();
            }

            // sleep if there are no active tracks to process
            if (activeTracks.isEmpty()) {
                if (sleepUs == 0) {
                    sleepUs = kRecordThreadSleepUs;
                }
                continue;
            }
            sleepUs = 0;

            timestampCorrectionEnabled = isTimestampCorrectionEnabled_l();
            lockEffectChains_l(effectChains);
        }

        // thread mutex is now unlocked, mActiveTracks unknown, activeTracks.size() > 0

        size_t size = effectChains.size();
        for (size_t i = 0; i < size; i++) {
            // thread mutex is not locked, but effect chain is locked
            effectChains[i]->process_l();
        }

        // Push a new fast capture state if fast capture is not already running, or cblk change
        if (mFastCapture != 0) {
            FastCaptureStateQueue *sq = mFastCapture->sq();
            FastCaptureState *state = sq->begin();
            bool didModify = false;
            FastCaptureStateQueue::block_t block = FastCaptureStateQueue::BLOCK_UNTIL_PUSHED;
            if (state->mCommand != FastCaptureState::READ_WRITE /* FIXME &&
                    (kUseFastMixer != FastMixer_Dynamic || state->mTrackMask > 1)*/) {
                if (state->mCommand == FastCaptureState::COLD_IDLE) {
                    int32_t old = android_atomic_inc(&mFastCaptureFutex);
                    if (old == -1) {
                        (void) syscall(__NR_futex, &mFastCaptureFutex, FUTEX_WAKE_PRIVATE, 1);
                    }
                }
                state->mCommand = FastCaptureState::READ_WRITE;
#if 0   // FIXME
                mFastCaptureDumpState.increaseSamplingN(mAfThreadCallback->isLowRamDevice() ?
                        FastThreadDumpState::kSamplingNforLowRamDevice :
                        FastThreadDumpState::kSamplingN);
#endif
                didModify = true;
            }
            audio_track_cblk_t *cblkOld = state->mCblk;
            audio_track_cblk_t *cblkNew = fastTrack != 0 ? fastTrack->cblk() : NULL;
            if (cblkNew != cblkOld) {
                state->mCblk = cblkNew;
                // block until acked if removing a fast track
                if (cblkOld != NULL) {
                    block = FastCaptureStateQueue::BLOCK_UNTIL_ACKED;
                }
                didModify = true;
            }
            AudioBufferProvider* abp = (fastTrack != 0 && fastTrack->isPatchTrack()) ?
                    reinterpret_cast<AudioBufferProvider*>(fastTrack.get()) : nullptr;
            if (state->mFastPatchRecordBufferProvider != abp) {
                state->mFastPatchRecordBufferProvider = abp;
                state->mFastPatchRecordFormat = fastTrack == 0 ?
                        AUDIO_FORMAT_INVALID : fastTrack->format();
                didModify = true;
            }
            if (state->mSilenceCapture != silenceFastCapture) {
                state->mSilenceCapture = silenceFastCapture;
                didModify = true;
            }
            sq->end(didModify);
            if (didModify) {
                sq->push(block);
#if 0
                if (kUseFastCapture == FastCapture_Dynamic) {
                    mNormalSource = mPipeSource;
                }
#endif
            }
        }

        // now run the fast track destructor with thread mutex unlocked
        fastTrackToRemove.clear();

        // Read from HAL to keep up with fastest client if multiple active tracks, not slowest one.
        // Only the client(s) that are too slow will overrun. But if even the fastest client is too
        // slow, then this RecordThread will overrun by not calling HAL read often enough.
        // If destination is non-contiguous, first read past the nominal end of buffer, then
        // copy to the right place.  Permitted because mRsmpInBuffer was over-allocated.

        int32_t rear = mRsmpInRear & (mRsmpInFramesP2 - 1);
        ssize_t framesRead = 0; // not needed, remove clang-tidy warning.
        const int64_t lastIoBeginNs = systemTime(); // start IO timing

        // If an NBAIO source is present, use it to read the normal capture's data
        // 接下来就是开始采集
        if (mPipeSource != 0) {
            size_t framesToRead = min(mRsmpInFramesOA - rear, mRsmpInFramesP2 / 2);

            // The audio fifo read() returns OVERRUN on overflow, and advances the read pointer
            // to the full buffer point (clearing the overflow condition).  Upon OVERRUN error,
            // we immediately retry the read() to get data and prevent another overflow.
            for (int retries = 0; retries <= 2; ++retries) {
                ALOGW_IF(retries > 0, "overrun on read from pipe, retry #%d", retries);
                framesRead = mPipeSource->read((uint8_t*)mRsmpInBuffer + rear * mFrameSize, // 通过hal 采集
                        framesToRead);
                if (framesRead != OVERRUN) break;
            }

            const ssize_t availableToRead = mPipeSource->availableToRead();
            if (availableToRead >= 0) {
                mMonopipePipeDepthStats.add(availableToRead);
                // PipeSource is the primary clock.  It is up to the AudioRecord client to keep up.
                LOG_ALWAYS_FATAL_IF((size_t)availableToRead > mPipeFramesP2,
                        "more frames to read than fifo size, %zd > %zu",
                        availableToRead, mPipeFramesP2);
                const size_t pipeFramesFree = mPipeFramesP2 - availableToRead;
                const size_t sleepFrames = min(pipeFramesFree, mRsmpInFramesP2) / 2;
                ALOGVV("mPipeFramesP2:%zu mRsmpInFramesP2:%zu sleepFrames:%zu availableToRead:%zd",
                        mPipeFramesP2, mRsmpInFramesP2, sleepFrames, availableToRead);
                sleepUs = (sleepFrames * 1000000LL) / mSampleRate;
            }
            if (framesRead < 0) {
                status_t status = (status_t) framesRead;
                switch (status) {
                case OVERRUN:
                    ALOGW("overrun on read from pipe");
                    framesRead = 0;
                    break;
                case NEGOTIATE:
                    ALOGE("re-negotiation is needed");
                    framesRead = -1;  // Will cause an attempt to recover.
                    break;
                default:
                    ALOGE("unknown error %d on read from pipe", status);
                    break;
                }
            }
        // otherwise use the HAL / AudioStreamIn directly
        } else {
            ATRACE_BEGIN("read");
            size_t bytesRead;
            status_t result = mSource->read(
                    (uint8_t*)mRsmpInBuffer + rear * mFrameSize, mBufferSize, &bytesRead); // 通过hal 采集
            ATRACE_END();
            if (result < 0) {
                framesRead = result;
            } else {
                framesRead = bytesRead / mFrameSize;
            }
        }

        const int64_t lastIoEndNs = systemTime(); // end IO timing

        // Update server timestamp with server stats
        // systemTime() is optional if the hardware supports timestamps.
        if (framesRead >= 0) {
            mTimestamp.mPosition[ExtendedTimestamp::LOCATION_SERVER] += framesRead;
            mTimestamp.mTimeNs[ExtendedTimestamp::LOCATION_SERVER] = lastIoEndNs;
        }

        // Update server timestamp with kernel stats
        if (mPipeSource.get() == nullptr /* don't obtain for FastCapture, could block */) {
            int64_t position, time;
            if (mStandby) {
                mTimestampVerifier.discontinuity(audio_is_linear_pcm(mFormat) ?
                    mTimestampVerifier.DISCONTINUITY_MODE_CONTINUOUS :
                    mTimestampVerifier.DISCONTINUITY_MODE_ZERO);
            } else if (mSource->getCapturePosition(&position, &time) == NO_ERROR
                    && time > mTimestamp.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL]) {

                mTimestampVerifier.add(position, time, mSampleRate);
                if (timestampCorrectionEnabled) {
                    ALOGVV("TS_BEFORE: %d %lld %lld",
                            id(), (long long)time, (long long)position);
                    auto correctedTimestamp = mTimestampVerifier.getLastCorrectedTimestamp();
                    position = correctedTimestamp.mFrames;
                    time = correctedTimestamp.mTimeNs;
                    ALOGVV("TS_AFTER: %d %lld %lld",
                            id(), (long long)time, (long long)position);
                }

                mTimestamp.mPosition[ExtendedTimestamp::LOCATION_KERNEL] = position;
                mTimestamp.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL] = time;
                // Note: In general record buffers should tend to be empty in
                // a properly running pipeline.
                //
                // Also, it is not advantageous to call get_presentation_position during the read
                // as the read obtains a lock, preventing the timestamp call from executing.
            } else {
                mTimestampVerifier.error();
            }
        }

        // From the timestamp, input read latency is negative output write latency.
        const audio_input_flags_t flags = mInput != NULL ? mInput->flags : AUDIO_INPUT_FLAG_NONE;
        const double latencyMs = IAfRecordTrack::checkServerLatencySupported(mFormat, flags)
                ? - mTimestamp.getOutputServerLatencyMs(mSampleRate) : 0.;
        if (latencyMs != 0.) { // note 0. means timestamp is empty.
            mLatencyMs.add(latencyMs);
        }

        // Use this to track timestamp information
        // ALOGD("%s", mTimestamp.toString().c_str());

        if (framesRead < 0 || (framesRead == 0 && mPipeSource == 0)) {
            ALOGE("read failed: framesRead=%zd", framesRead);
            // Force input into standby so that it tries to recover at next read attempt
            inputStandBy();
            sleepUs = kRecordThreadSleepUs;
        }
        if (framesRead <= 0) {
            goto unlock;
        }
        ALOG_ASSERT(framesRead > 0);
        mFramesRead += framesRead;

#ifdef TEE_SINK
        (void)mTee.write((uint8_t*)mRsmpInBuffer + rear * mFrameSize, framesRead);
#endif
        // If destination is non-contiguous, we now correct for reading past end of buffer.
        {
            size_t part1 = mRsmpInFramesP2 - rear;
            if ((size_t) framesRead > part1) {
                memcpy(mRsmpInBuffer, (uint8_t*)mRsmpInBuffer + mRsmpInFramesP2 * mFrameSize,
                        (framesRead - part1) * mFrameSize);
            }
        }
        mRsmpInRear = audio_utils::safe_add_overflow(mRsmpInRear, (int32_t)framesRead);

        size = activeTracks.size();

        // loop over each active track
        // 接下来就是copy 采集音频给mActiveTracks
        for (size_t i = 0; i < size; i++) {
            if (activeTrack) {  // ensure track release is outside lock.
                oldActiveTracks.emplace_back(std::move(activeTrack));
            }
            activeTrack = activeTracks[i];

            // skip fast tracks, as those are handled directly by FastCapture
            if (activeTrack->isFastTrack()) {
                continue;
            }

            // TODO: This code probably should be moved to RecordTrack.
            // TODO: Update the activeTrack buffer converter in case of reconfigure.

            enum {
                OVERRUN_UNKNOWN,
                OVERRUN_TRUE,
                OVERRUN_FALSE
            } overrun = OVERRUN_UNKNOWN;

            // loop over getNextBuffer to handle circular sink
            for (;;) {

                activeTrack->sinkBuffer().frameCount = ~0;
                status_t status = activeTrack->getNextBuffer(&activeTrack->sinkBuffer());
                size_t framesOut = activeTrack->sinkBuffer().frameCount;
                LOG_ALWAYS_FATAL_IF((status == OK) != (framesOut > 0));

                // check available frames and handle overrun conditions
                // if the record track isn't draining fast enough.
                bool hasOverrun;
                size_t framesIn;
                activeTrack->resamplerBufferProvider()->sync(&framesIn, &hasOverrun);
                if (hasOverrun) {
                    overrun = OVERRUN_TRUE;
                }
                if (framesOut == 0 || framesIn == 0) {
                    break;
                }

                // Don't allow framesOut to be larger than what is possible with resampling
                // from framesIn.
                // This isn't strictly necessary but helps limit buffer resizing in
                // RecordBufferConverter.  TODO: remove when no longer needed.
                if (audio_is_linear_pcm(activeTrack->format())) {
                    framesOut = min(framesOut,
                            destinationFramesPossible(
                                    framesIn, mSampleRate, activeTrack->sampleRate()));
                }

                if (activeTrack->isDirect()) {
                    // No RecordBufferConverter used for direct streams. Pass
                    // straight from RecordThread buffer to RecordTrack buffer.
                    AudioBufferProvider::Buffer buffer;
                    buffer.frameCount = framesOut;
                    const status_t getNextBufferStatus =
                            activeTrack->resamplerBufferProvider()->getNextBuffer(&buffer);
                    if (getNextBufferStatus == OK && buffer.frameCount != 0) {
                        ALOGV_IF(buffer.frameCount != framesOut,
                                "%s() read less than expected (%zu vs %zu)",
                                __func__, buffer.frameCount, framesOut);
                        framesOut = buffer.frameCount;
                        memcpy(activeTrack->sinkBuffer().raw,
                                buffer.raw, buffer.frameCount * mFrameSize);
                        activeTrack->resamplerBufferProvider()->releaseBuffer(&buffer);
                    } else {
                        framesOut = 0;
                        ALOGE("%s() cannot fill request, status: %d, frameCount: %zu",
                            __func__, getNextBufferStatus, buffer.frameCount);
                    }
                } else {
                    // process frames from the RecordThread buffer provider to the RecordTrack
                    // buffer
                    framesOut = activeTrack->recordBufferConverter()->convert(
                            activeTrack->sinkBuffer().raw,
                            activeTrack->resamplerBufferProvider(),
                            framesOut);
                }

                if (framesOut > 0 && (overrun == OVERRUN_UNKNOWN)) {
                    overrun = OVERRUN_FALSE;
                }

                // MediaSyncEvent handling: Synchronize AudioRecord to AudioTrack completion.
                const ssize_t framesToDrop =
                        activeTrack->synchronizedRecordState().updateRecordFrames(framesOut);
                if (framesToDrop == 0) {
                    // no sync event, process normally, otherwise ignore.
                    if (framesOut > 0) {
                        activeTrack->sinkBuffer().frameCount = framesOut;
                        // Sanitize before releasing if the track has no access to the source data
                        // An idle UID receives silence from non virtual devices until active
                        if (activeTrack->isSilenced()) {
                            memset(activeTrack->sinkBuffer().raw,
                                    0, framesOut * activeTrack->frameSize());
                        }
                        activeTrack->releaseBuffer(&activeTrack->sinkBuffer());
                    }
                }
                if (framesOut == 0) {
                    break;
                }
            }

            switch (overrun) {
            case OVERRUN_TRUE:
                // client isn't retrieving buffers fast enough
                if (!activeTrack->setOverflow()) {
                    nsecs_t now = systemTime();
                    // FIXME should lastWarning per track?
                    if ((now - lastWarning) > kWarningThrottleNs) {
                        ALOGW("RecordThread: buffer overflow");
                        lastWarning = now;
                    }
                }
                break;
            case OVERRUN_FALSE:
                activeTrack->clearOverflow();
                break;
            case OVERRUN_UNKNOWN:
                break;
            }

            // update frame information and push timestamp out
            activeTrack->updateTrackFrameInfo(
                    activeTrack->serverProxy()->framesReleased(),
                    mTimestamp.mPosition[ExtendedTimestamp::LOCATION_SERVER],
                    mSampleRate, mTimestamp);
        }

unlock:
        // enable changes in effect chain
        unlockEffectChains(effectChains);
        // effectChains doesn't need to be cleared, since it is cleared by destructor at scope end
        if (audio_has_proportional_frames(mFormat)
            && loopCount == lastLoopCountRead + 1) {
            const int64_t readPeriodNs = lastIoEndNs - mLastIoEndNs;
            const double jitterMs =
                TimestampVerifier<int64_t, int64_t>::computeJitterMs(
                    {framesRead, readPeriodNs},
                    {0, 0} /* lastTimestamp */, mSampleRate);
            const double processMs = (lastIoBeginNs - mLastIoEndNs) * 1e-6;

            audio_utils::lock_guard _l(mutex());
            mIoJitterMs.add(jitterMs);
            mProcessTimeMs.add(processMs);
        }
       mThreadloopExecutor.process();
        // update timing info.
        mLastIoBeginNs = lastIoBeginNs;
        mLastIoEndNs = lastIoEndNs;
        lastLoopCountRead = loopCount;
    }
    mThreadloopExecutor.process(); // process any remaining deferred actions.
    // deferred actions after this point are ignored.

    standbyIfNotAlreadyInStandby();

    {
        audio_utils::lock_guard _l(mutex());
        for (size_t i = 0; i < mTracks.size(); i++) {
            sp<IAfRecordTrack> track = mTracks[i];
            track->invalidate();
        }
        mActiveTracks.clear();
        mStartStopCV.notify_all();
    }

    releaseWakeLock();

    ALOGV("RecordThread %p exiting", this);
    return false;
}

startRecording#

//frameworks/av/media/libaudioclient/AudioRecord.cpp
status_t AudioRecord::start(AudioSystem::sync_event_t event, audio_session_t triggerSession)
{
    const int64_t beginNs = systemTime();
    ALOGV("%s(%d): sync event %d trigger session %d", __func__, mPortId, event, triggerSession);
    AutoMutex lock(mLock);

    status_t status = NO_ERROR;
    mediametrics::Defer defer([&] {
        mediametrics::LogItem(mMetricsId)
            .set(AMEDIAMETRICS_PROP_CALLERNAME,
                    mCallerName.empty()
                    ? AMEDIAMETRICS_PROP_CALLERNAME_VALUE_UNKNOWN
                    : mCallerName.c_str())
            .set(AMEDIAMETRICS_PROP_EVENT, AMEDIAMETRICS_PROP_EVENT_VALUE_START)
            .set(AMEDIAMETRICS_PROP_EXECUTIONTIMENS, (int64_t)(systemTime() - beginNs))
            .set(AMEDIAMETRICS_PROP_STATE, stateToString(mActive))
            .set(AMEDIAMETRICS_PROP_STATUS, (int32_t)status)
            .record(); });

    if (mActive) {
        return status;
    }

    // discard data in buffer
    const uint32_t framesFlushed = mProxy->flush();
    mFramesReadServerOffset -= mFramesRead + framesFlushed;
    mFramesRead = 0;
    mProxy->clearTimestamp();  // timestamp is invalid until next server push
    mPreviousTimestamp.clear();
    mTimestampRetrogradePositionReported = false;
    mTimestampRetrogradeTimeReported = false;

    // reset current position as seen by client to 0
    mProxy->setEpoch(mProxy->getEpoch() - mProxy->getPosition());
    // force refresh of remaining frames by processAudioBuffer() as last
    // read before stop could be partial.
    mRefreshRemaining = true;

    mNewPosition = mProxy->getPosition() + mUpdatePeriod;
    int32_t flags = android_atomic_acquire_load(&mCblk->mFlags);

    // we reactivate markers (mMarkerPosition != 0) as the position is reset to 0.
    // This is legacy behavior.  This is not done in stop() to avoid a race condition
    // where the last marker event is issued twice.
    mMarkerReached = false;
    // mActive is checked by restoreRecord_l
    mActive = true;

    if (!(flags & CBLK_INVALID)) {
        status = statusTFromBinderStatus(mAudioRecord->start(event, triggerSession));
        if (status == DEAD_OBJECT) {
            flags |= CBLK_INVALID;
        }
    }
    if (flags & CBLK_INVALID) {
        status = restoreRecord_l("start");
    }

    // Call these directly because we are already holding the lock.
    mAudioRecord->setPreferredMicrophoneDirection(mSelectedMicDirection);
    mAudioRecord->setPreferredMicrophoneFieldDimension(mSelectedMicFieldDimension);

    if (status != NO_ERROR) {
        mActive = false;
        ALOGE("%s(%d): status %d", __func__, mPortId, status);
        mMediaMetrics.markError(status, __FUNCTION__);
    } else {
        mTracker->recordingStarted(); // 记录启动事件,这样可以被client感知到有应用开启采集了
        sp<AudioRecordThread> t = mAudioRecordThread; // 驱动采集回调线程
        if (t != 0) {
            t->resume();
        } else {
            mPreviousPriority = getpriority(PRIO_PROCESS, 0);
            get_sched_policy(0, &mPreviousSchedulingGroup);
            androidSetThreadPriority(0, ANDROID_PRIORITY_AUDIO);
        }

        // we've successfully started, log that time
        mMediaMetrics.logStart(systemTime());
    }
    return status;
}
//frameworks/av/media/libaudioclient/AudioRecord.cpp
bool AudioRecord::AudioRecordThread::threadLoop()
{
    {
        AutoMutex _l(mMyLock);
        if (mPaused) {
            // TODO check return value and handle or log
            mMyCond.wait(mMyLock);
            // caller will check for exitPending()
            return true;
        }
        if (mIgnoreNextPausedInt) {
            mIgnoreNextPausedInt = false;
            mPausedInt = false;
        }
        if (mPausedInt) {
            if (mPausedNs > 0) {
                // TODO check return value and handle or log
                (void) mMyCond.waitRelative(mMyLock, mPausedNs);
            } else {
                // TODO check return value and handle or log
                mMyCond.wait(mMyLock);
            }
            mPausedInt = false;
            return true;
        }
    }
    if (exitPending()) {
        return false;
    }
    nsecs_t ns =  mReceiver.processAudioBuffer();
    switch (ns) {
    case 0:
        return true;
    case NS_INACTIVE:
        pauseInternal();
        return true;
    case NS_NEVER:
        return false;
    case NS_WHENEVER:
        // Event driven: call wake() when callback notifications conditions change.
        ns = INT64_MAX;
        FALLTHROUGH_INTENDED;
    default:
        LOG_ALWAYS_FATAL_IF(ns < 0, "%s() returned %lld", __func__, (long long)ns);
        pauseInternal(ns);
        return true;
    }
}
nsecs_t AudioRecord::processAudioBuffer()
{
    mLock.lock();
    const sp<IAudioRecordCallback> callback = mCallback.promote();
    if (!callback) {
        mCallback = nullptr;
        mLock.unlock();
        return NS_NEVER;
    }
    if (mAwaitBoost) {
        mAwaitBoost = false;
        mLock.unlock();
        static const int32_t kMaxTries = 5;
        int32_t tryCounter = kMaxTries;
        uint32_t pollUs = 10000;
        do {
            int policy = sched_getscheduler(0) & ~SCHED_RESET_ON_FORK;
            if (policy == SCHED_FIFO || policy == SCHED_RR) {
                break;
            }
            usleep(pollUs);
            pollUs <<= 1;
        } while (tryCounter-- > 0);
        if (tryCounter < 0) {
            ALOGE("%s(%d): did not receive expected priority boost on time", __func__, mPortId);
        }
        // Run again immediately
        return 0;
    }

    // Can only reference mCblk while locked
    int32_t flags = android_atomic_and(~CBLK_OVERRUN, &mCblk->mFlags);

    // Check for track invalidation
    if (flags & CBLK_INVALID) {
        (void) restoreRecord_l("processAudioBuffer");
        mLock.unlock();
        // Run again immediately, but with a new IAudioRecord
        return 0;
    }

    bool active = mActive;

    // Manage overrun callback, must be done under lock to avoid race with releaseBuffer()
    bool newOverrun = false;
    if (flags & CBLK_OVERRUN) {
        if (!mInOverrun) {
            mInOverrun = true;
            newOverrun = true;
        }
    }

    // Get current position of server
    Modulo<uint32_t> position(mProxy->getPosition());

    // Manage marker callback
    bool markerReached = false;
    Modulo<uint32_t> markerPosition(mMarkerPosition);
    // FIXME fails for wraparound, need 64 bits
    if (!mMarkerReached && markerPosition.value() > 0 && position >= markerPosition) {
        mMarkerReached = markerReached = true;
    }

    // Determine the number of new position callback(s) that will be needed, while locked
    size_t newPosCount = 0;
    Modulo<uint32_t> newPosition(mNewPosition);
    uint32_t updatePeriod = mUpdatePeriod;
    // FIXME fails for wraparound, need 64 bits
    if (updatePeriod > 0 && position >= newPosition) {
        newPosCount = ((position - newPosition).value() / updatePeriod) + 1;
        mNewPosition += updatePeriod * newPosCount;
    }

    // Cache other fields that will be needed soon
    uint32_t notificationFrames = mNotificationFramesAct;
    if (mRefreshRemaining) {
        mRefreshRemaining = false;
        mRemainingFrames = notificationFrames;
        mRetryOnPartialBuffer = false;
    }
    size_t misalignment = mProxy->getMisalignment();
    uint32_t sequence = mSequence;

    // These fields don't need to be cached, because they are assigned only by set():
    //      mTransfer, mCallback, mUserData, mSampleRate, mFrameSize

    mLock.unlock();

    // perform callbacks while unlocked
    if (newOverrun) {
        callback->onOverrun();

    }
    if (markerReached) {
        callback->onMarker(markerPosition.value());
    }
    while (newPosCount > 0) {
        callback->onNewPos(newPosition.value());
        newPosition += updatePeriod;
        newPosCount--;
    }
    if (mObservedSequence != sequence) {
        mObservedSequence = sequence;
        callback->onNewIAudioRecord();
    }

    // if inactive, then don't run me again until re-started
    if (!active) {
        return NS_INACTIVE;
    }

    // Compute the estimated time until the next timed event (position, markers)
    uint32_t minFrames = ~0;
    if (!markerReached && position < markerPosition) {
        minFrames = (markerPosition - position).value();
    }
    if (updatePeriod > 0) {
        uint32_t remaining = (newPosition - position).value();
        if (remaining < minFrames) {
            minFrames = remaining;
        }
    }

    // If > 0, poll periodically to recover from a stuck server.  A good value is 2.
    static const uint32_t kPoll = 0;
    if (kPoll > 0 && mTransfer == TRANSFER_CALLBACK && kPoll * notificationFrames < minFrames) {
        minFrames = kPoll * notificationFrames;
    }

    // Convert frame units to time units
    nsecs_t ns = NS_WHENEVER;
    if (minFrames != (uint32_t) ~0) {
        // This "fudge factor" avoids soaking CPU, and compensates for late progress by server
        static const nsecs_t kFudgeNs = 10000000LL; // 10 ms
        ns = ((minFrames * 1000000000LL) / mSampleRate) + kFudgeNs;
    }

    // If not supplying data by EVENT_MORE_DATA, then we're done
    if (mTransfer != TRANSFER_CALLBACK) {
        return ns;
    }

    struct timespec timeout;
    const struct timespec *requested = &ClientProxy::kForever;
    if (ns != NS_WHENEVER) {
        timeout.tv_sec = ns / 1000000000LL;
        timeout.tv_nsec = ns % 1000000000LL;
        ALOGV("%s(%d): timeout %ld.%03d",
                __func__, mPortId, timeout.tv_sec, (int) timeout.tv_nsec / 1000000);
        requested = &timeout;
    }

    size_t readFrames = 0;
    while (mRemainingFrames > 0) {

        Buffer audioBuffer;
        audioBuffer.frameCount = mRemainingFrames;
        size_t nonContig;
        status_t err = obtainBuffer(&audioBuffer, requested, NULL, &nonContig); // 获取采集数据
        LOG_ALWAYS_FATAL_IF((err != NO_ERROR) != (audioBuffer.frameCount == 0),
                "%s(%d): obtainBuffer() err=%d frameCount=%zu",
                __func__, mPortId, err, audioBuffer.frameCount);
        requested = &ClientProxy::kNonBlocking;
        size_t avail = audioBuffer.frameCount + nonContig;
        ALOGV("%s(%d): obtainBuffer(%u) returned %zu = %zu + %zu err %d",
                __func__, mPortId, mRemainingFrames, avail, audioBuffer.frameCount, nonContig, err);
        if (err != NO_ERROR) {
            if (err == TIMED_OUT || err == WOULD_BLOCK || err == -EINTR) {
                break;
            }
            ALOGE("%s(%d): Error %d obtaining an audio buffer, giving up.",
                    __func__, mPortId, err);
            return NS_NEVER;
        }

        if (mRetryOnPartialBuffer) {
            mRetryOnPartialBuffer = false;
            if (avail < mRemainingFrames) {
                int64_t myns = ((mRemainingFrames - avail) *
                        1100000000LL) / mSampleRate;
                if (ns < 0 || myns < ns) {
                    ns = myns;
                }
                return ns;
            }
        }

        Buffer* buffer = &audioBuffer;
        if (mServerConfig.format != mFormat) {
            buffer = &mFormatConversionBuffer;
            buffer->frameCount = audioBuffer.frameCount;
            buffer->mSize = buffer->frameCount * mFrameSize;
            buffer->sequence = audioBuffer.sequence;
            memcpy_by_audio_format(buffer->raw, mFormat, audioBuffer.raw,
                                   mServerConfig.format, audioBuffer.size() / mServerSampleSize);
        }

        const size_t reqSize = buffer->size();
        const size_t readSize = callback->onMoreData(*buffer);
        buffer->mSize = readSize;

        // Validate on returned size
        if (ssize_t(readSize) < 0 || readSize > reqSize) {
            ALOGE("%s(%d):  EVENT_MORE_DATA requested %zu bytes but callback returned %zd bytes",
                    __func__, mPortId, reqSize, ssize_t(readSize));
            return NS_NEVER;
        }

        if (readSize == 0) {
            // The callback is done consuming buffers
            // Keep this thread going to handle timed events and
            // still try to provide more data in intervals of WAIT_PERIOD_MS
            // but don't just loop and block the CPU, so wait
            return WAIT_PERIOD_MS * 1000000LL;
        }

        size_t releasedFrames = readSize / mFrameSize;
        audioBuffer.frameCount = releasedFrames;
        mRemainingFrames -= releasedFrames;
        if (misalignment >= releasedFrames) {
            misalignment -= releasedFrames;
        } else {
            misalignment = 0;
        }

        releaseBuffer(&audioBuffer);
        readFrames += releasedFrames;

        // FIXME here is where we would repeat EVENT_MORE_DATA again on same advanced buffer
        // if callback doesn't like to accept the full chunk
        if (readSize < reqSize) {
            continue;
        }

        // There could be enough non-contiguous frames available to satisfy the remaining request
        if (mRemainingFrames <= nonContig) {
            continue;
        }

#if 0
        // This heuristic tries to collapse a series of EVENT_MORE_DATA that would total to a
        // sum <= notificationFrames.  It replaces that series by at most two EVENT_MORE_DATA
        // that total to a sum == notificationFrames.
        if (0 < misalignment && misalignment <= mRemainingFrames) {
            mRemainingFrames = misalignment;
            return (mRemainingFrames * 1100000000LL) / mSampleRate;
        }
#endif

    }
    if (readFrames > 0) {
        AutoMutex lock(mLock);
        mFramesRead += readFrames;
        // mFramesReadTime = systemTime(SYSTEM_TIME_MONOTONIC); // not provided at this time.
    }
    mRemainingFrames = notificationFrames;
    mRetryOnPartialBuffer = true;

    // A lot has transpired since ns was calculated, so run again immediately and re-calculate
    return 0;
}
// frameworks/av/services/audioflinger/Threads.cpp
status_t RecordThread::start(IAfRecordTrack* recordTrack,
                                           AudioSystem::sync_event_t event,
                                           audio_session_t triggerSession)
{
    ALOGV("RecordThread::start event %d, triggerSession %d", event, triggerSession);
    sp<ThreadBase> strongMe = this;
    status_t status = NO_ERROR;

    if (event == AudioSystem::SYNC_EVENT_NONE) {
        recordTrack->clearSyncStartEvent();
    } else if (event != AudioSystem::SYNC_EVENT_SAME) {
        recordTrack->synchronizedRecordState().startRecording(
                mAfThreadCallback->createSyncEvent(
                        event, triggerSession,
                        recordTrack->sessionId(), syncStartEventCallback, recordTrack));
    }

    {
        // This section is a rendezvous between binder thread executing start() and RecordThread
         audio_utils::lock_guard lock(mutex());
        if (recordTrack->isInvalid()) {
            recordTrack->clearSyncStartEvent();
            ALOGW("%s track %d: invalidated before startInput", __func__, recordTrack->portId());
            return DEAD_OBJECT;
        }
        // 修改recordTrack的状态
        if (mActiveTracks.indexOf(recordTrack) >= 0) {
            if (recordTrack->state() == IAfTrackBase::PAUSING) {
                // We haven't stopped yet (moved to PAUSED and not in mActiveTracks)
                // so no need to startInput().
                ALOGV("active record track PAUSING -> ACTIVE");
                recordTrack->setState(IAfTrackBase::ACTIVE);
            } else {
                ALOGV("active record track state %d", (int)recordTrack->state());
            }
            return status;
        }

        // TODO consider other ways of handling this, such as changing the state to :STARTING and
        //      adding the track to mActiveTracks after returning from AudioSystem::startInput(),
        //      or using a separate command thread
        recordTrack->setState(IAfTrackBase::STARTING_1);
        mActiveTracks.add(recordTrack); // 加入mActiveTracks列表,这样在ThreadLoop中采集到数据就会拷贝给recordTrack
        if (recordTrack->isExternalTrack()) {
            mutex().unlock();
            status = AudioSystem::startInput(recordTrack->portId());
            mutex().lock();
            if (recordTrack->isInvalid()) {
                recordTrack->clearSyncStartEvent();
                if (status == NO_ERROR && recordTrack->state() == IAfTrackBase::STARTING_1) {
                    recordTrack->setState(IAfTrackBase::STARTING_2);
                    // STARTING_2 forces destroy to call stopInput.
                }
                ALOGW("%s track %d: invalidated after startInput", __func__, recordTrack->portId());
                return DEAD_OBJECT;
            }
            if (recordTrack->state() != IAfTrackBase::STARTING_1) {
                ALOGW("%s(%d): unsynchronized mState:%d change",
                    __func__, recordTrack->id(), (int)recordTrack->state());
                // Someone else has changed state, let them take over,
                // leave mState in the new state.
                recordTrack->clearSyncStartEvent();
                return INVALID_OPERATION;
            }
            // we're ok, but perhaps startInput has failed
            if (status != NO_ERROR) {
                ALOGW("%s(%d): startInput failed, status %d",
                    __func__, recordTrack->id(), status);
                // We are in ActiveTracks if STARTING_1 and valid, so remove from ActiveTracks,
                // leave in STARTING_1, so destroy() will not call stopInput.
                mActiveTracks.remove(recordTrack);
                recordTrack->clearSyncStartEvent();
                return status;
            }
            sendIoConfigEvent_l(
                AUDIO_CLIENT_STARTED, recordTrack->creatorPid(), recordTrack->portId());
        }

        recordTrack->logBeginInterval(patchSourcesToString(&mPatch)); // log to MediaMetrics

        // Catch up with current buffer indices if thread is already running.
        // This is what makes a new client discard all buffered data.  If the track's mRsmpInFront
        // was initialized to some value closer to the thread's mRsmpInFront, then the track could
        // see previously buffered data before it called start(), but with greater risk of overrun.

        recordTrack->resamplerBufferProvider()->reset();
        if (!recordTrack->isDirect()) {
            // clear any converter state as new data will be discontinuous
            recordTrack->recordBufferConverter()->reset();
        }
        recordTrack->setState(IAfTrackBase::STARTING_2);
        // signal thread to start
        mWaitWorkCV.notify_all();
        return status;
    }
}

read#

// frameworks/av/media/libaudioclient/AudioRecord.cpp
ssize_t AudioRecord::read(void* buffer, size_t userSize, bool blocking)
{
    if (mTransfer != TRANSFER_SYNC) {
        return INVALID_OPERATION;
    }

    if (ssize_t(userSize) < 0 || (buffer == NULL && userSize != 0)) {
        // Validation. user is most-likely passing an error code, and it would
        // make the return value ambiguous (actualSize vs error).
        ALOGE("%s(%d) (buffer=%p, size=%zu (%zu)",
                __func__, mPortId, buffer, userSize, userSize);
        return BAD_VALUE;
    }

    ssize_t read = 0;
    Buffer audioBuffer;

    while (userSize >= mFrameSize) {
        audioBuffer.frameCount = userSize / mFrameSize;

        status_t err = obtainBuffer(&audioBuffer,
                blocking ? &ClientProxy::kForever : &ClientProxy::kNonBlocking); //获取共享内存数据
            if (read > 0) {
                break;
            }
            if (err == TIMED_OUT || err == -EINTR) {
                err = WOULD_BLOCK;
            }
            return ssize_t(err);
        }

        size_t bytesRead = audioBuffer.frameCount * mFrameSize;
        if (audio_is_linear_pcm(mFormat)) {
            memcpy_by_audio_format(buffer, mFormat, audioBuffer.raw, mServerConfig.format,
                                audioBuffer.mSize / mServerSampleSize);
        } else {
            memcpy(buffer, audioBuffer.raw, audioBuffer.mSize);
        }
        buffer = ((char *) buffer) + bytesRead;
        userSize -= bytesRead;
        read += bytesRead;

        releaseBuffer(&audioBuffer);
    }
    if (read > 0) {
        mFramesRead += read / mFrameSize;
        // mFramesReadTime = systemTime(SYSTEM_TIME_MONOTONIC); // not provided at this time.
    }
    return read;
}
status_t AudioRecord::obtainBuffer(Buffer* audioBuffer, const struct timespec *requested,
        struct timespec *elapsed, size_t *nonContig)
{
    // previous and new IAudioRecord sequence numbers are used to detect track re-creation
    uint32_t oldSequence = 0;

    Proxy::Buffer buffer;
    status_t status = NO_ERROR;

    static const int32_t kMaxTries = 5;
    int32_t tryCounter = kMaxTries;

    do {
        // obtainBuffer() is called with mutex unlocked, so keep extra references to these fields to
        // keep them from going away if another thread re-creates the track during obtainBuffer()
        sp<AudioRecordClientProxy> proxy;
        sp<IMemory> iMem;
        sp<IMemory> bufferMem;
        {
            // start of lock scope
            AutoMutex lock(mLock);

            uint32_t newSequence = mSequence;
            // did previous obtainBuffer() fail due to media server death or voluntary invalidation?
            if (status == DEAD_OBJECT) {
                // re-create track, unless someone else has already done so
                if (newSequence == oldSequence) {
                    if (!audio_is_linear_pcm(mFormat)) {
                        // If compressed capture, don't attempt to restore the track.
                        // Return a DEAD_OBJECT error and let the caller recreate.
                        tryCounter = 0;
                    } else {
                        status = restoreRecord_l("obtainBuffer");
                    }
                    if (status != NO_ERROR) {
                        buffer.mFrameCount = 0;
                        buffer.mRaw = NULL;
                        buffer.mNonContig = 0;
                        break;
                    }
                }
            }
            oldSequence = newSequence;

            // Keep the extra references
            proxy = mProxy;
            iMem = mCblkMemory;
            bufferMem = mBufferMemory;

            // Non-blocking if track is stopped
            if (!mActive) {
                requested = &ClientProxy::kNonBlocking;
            }

        }   // end of lock scope

        buffer.mFrameCount = audioBuffer->frameCount;
        // FIXME starts the requested timeout and elapsed over from scratch
        status = proxy->obtainBuffer(&buffer, requested, elapsed);

    } while ((status == DEAD_OBJECT) && (tryCounter-- > 0));

    audioBuffer->frameCount = buffer.mFrameCount;
    audioBuffer->mSize = buffer.mFrameCount * mServerFrameSize;
    audioBuffer->raw = buffer.mRaw;
    audioBuffer->sequence = oldSequence;
    if (nonContig != NULL) {
        *nonContig = buffer.mNonContig;
    }
    return status;
}
// frameworks/av/media/libaudioclient/AudioTrackShared.cpp
status_t ClientProxy::obtainBuffer(Buffer* buffer, const struct timespec *requested,
        struct timespec *elapsed)
{
    LOG_ALWAYS_FATAL_IF(buffer == NULL || buffer->mFrameCount == 0,
            "%s: null or zero frame buffer, buffer:%p", __func__, buffer);
    struct timespec total;          // total elapsed time spent waiting
    total.tv_sec = 0;
    total.tv_nsec = 0;
    bool measure = elapsed != NULL; // whether to measure total elapsed time spent waiting

    status_t status;
    enum {
        TIMEOUT_ZERO,       // requested == NULL || *requested == 0
        TIMEOUT_INFINITE,   // *requested == infinity
        TIMEOUT_FINITE,     // 0 < *requested < infinity
        TIMEOUT_CONTINUE,   // additional chances after TIMEOUT_FINITE
    } timeout;
    if (requested == NULL) {
        timeout = TIMEOUT_ZERO;
    } else if (requested->tv_sec == 0 && requested->tv_nsec == 0) {
        timeout = TIMEOUT_ZERO;
    } else if (requested->tv_sec == INT_MAX) {
        timeout = TIMEOUT_INFINITE;
    } else {
        timeout = TIMEOUT_FINITE;
        if (requested->tv_sec > 0 || requested->tv_nsec >= MEASURE_NS) {
            measure = true;
        }
    }
    struct timespec before;
    bool beforeIsValid = false;
    audio_track_cblk_t* cblk = mCblk;
    bool ignoreInitialPendingInterrupt = true;
    // check for shared memory corruption
    if (mIsShutdown) {
        status = NO_INIT;
        goto end;
    }
    for (;;) {
        int32_t flags = android_atomic_and(~CBLK_INTERRUPT, &cblk->mFlags);
        // check for track invalidation by server, or server death detection
        if (flags & CBLK_INVALID) {
            ALOGV("Track invalidated");
            status = DEAD_OBJECT;
            goto end;
        }
        if (flags & CBLK_DISABLED) {
            ALOGV("Track disabled");
            status = NOT_ENOUGH_DATA;
            goto end;
        }
        // check for obtainBuffer interrupted by client
        if (!ignoreInitialPendingInterrupt && (flags & CBLK_INTERRUPT)) {
            ALOGV("obtainBuffer() interrupted by client");
            status = -EINTR;
            goto end;
        }
        ignoreInitialPendingInterrupt = false;
        // compute number of frames available to write (AudioTrack) or read (AudioRecord)
        int32_t front; // 环形buffer 头
        int32_t rear; // 环形buffer 尾
        if (mIsOut) {
            // The barrier following the read of mFront is probably redundant.
            // We're about to perform a conditional branch based on 'filled',
            // which will force the processor to observe the read of mFront
            // prior to allowing data writes starting at mRaw.
            // However, the processor may support speculative execution,
            // and be unable to undo speculative writes into shared memory.
            // The barrier will prevent such speculative execution.
            front = android_atomic_acquire_load(&cblk->u.mStreaming.mFront);
            rear = cblk->u.mStreaming.mRear;
        } else {
            // On the other hand, this barrier is required.
            rear = android_atomic_acquire_load(&cblk->u.mStreaming.mRear);
            front = cblk->u.mStreaming.mFront;
        }
        // write to rear, read from front
        ssize_t filled = audio_utils::safe_sub_overflow(rear, front);
        // pipe should not be overfull
        if (!(0 <= filled && (size_t) filled <= mFrameCount)) {
            if (mIsOut) {
                ALOGE("Shared memory control block is corrupt (filled=%zd, mFrameCount=%zu); "
                        "shutting down", filled, mFrameCount);
                mIsShutdown = true;
                status = NO_INIT;
                goto end;
            }
            // for input, sync up on overrun
            filled = 0;
            cblk->u.mStreaming.mFront = rear;
            (void) android_atomic_or(CBLK_OVERRUN, &cblk->mFlags);
        }
        // Don't allow filling pipe beyond the user settable size.
        // The calculation for avail can go negative if the buffer size
        // is suddenly dropped below the amount already in the buffer.
        // So use a signed calculation to prevent a numeric overflow abort.
        ssize_t adjustableSize = (ssize_t) getBufferSizeInFrames();
        ssize_t avail =  (mIsOut) ? adjustableSize - filled : filled;
        if (avail < 0) {
            avail = 0;
        } else if (avail > 0) {
            // 'avail' may be non-contiguous, so return only the first contiguous chunk
            size_t part1;
            if (mIsOut) {
                rear &= mFrameCountP2 - 1;
                part1 = mFrameCountP2 - rear;
            } else {
                front &= mFrameCountP2 - 1;
                part1 = mFrameCountP2 - front;
            }
            if (part1 > (size_t)avail) {
                part1 = avail;
            }
            if (part1 > buffer->mFrameCount) {
                part1 = buffer->mFrameCount;
            }
            buffer->mFrameCount = part1;
            buffer->mRaw = part1 > 0 ?
                    &((char *) mBuffers)[(mIsOut ? rear : front) * mFrameSize] : NULL; // 修改地址,这样就不用拷贝了
            buffer->mNonContig = avail - part1;
            mUnreleased = part1;
            status = NO_ERROR;
            break;
        }
        struct timespec remaining;
        const struct timespec *ts;
        switch (timeout) {
        case TIMEOUT_ZERO:
            status = WOULD_BLOCK;
            goto end;
        case TIMEOUT_INFINITE:
            ts = NULL;
            break;
        case TIMEOUT_FINITE:
            timeout = TIMEOUT_CONTINUE;
            if (MAX_SEC == 0) {
                ts = requested;
                break;
            }
            FALLTHROUGH_INTENDED;
        case TIMEOUT_CONTINUE:
            // FIXME we do not retry if requested < 10ms? needs documentation on this state machine
            if (!measure || requested->tv_sec < total.tv_sec ||
                    (requested->tv_sec == total.tv_sec && requested->tv_nsec <= total.tv_nsec)) {
                status = TIMED_OUT;
                goto end;
            }
            remaining.tv_sec = requested->tv_sec - total.tv_sec;
            if ((remaining.tv_nsec = requested->tv_nsec - total.tv_nsec) < 0) {
                remaining.tv_nsec += 1000000000;
                remaining.tv_sec++;
            }
            if (0 < MAX_SEC && MAX_SEC < remaining.tv_sec) {
                remaining.tv_sec = MAX_SEC;
                remaining.tv_nsec = 0;
            }
            ts = &remaining;
            break;
        default:
            LOG_ALWAYS_FATAL("obtainBuffer() timeout=%d", timeout);
            ts = NULL;
            break;
        }
        int32_t old = android_atomic_and(~CBLK_FUTEX_WAKE, &cblk->mFutex);
        if (!(old & CBLK_FUTEX_WAKE)) {
            if (measure && !beforeIsValid) {
                clock_gettime(CLOCK_MONOTONIC, &before);
                beforeIsValid = true;
            }
            errno = 0;
            (void) syscall(__NR_futex, &cblk->mFutex,
                    mClientInServer ? FUTEX_WAIT_PRIVATE : FUTEX_WAIT, old & ~CBLK_FUTEX_WAKE, ts);
            status_t error = errno; // clock_gettime can affect errno
            // update total elapsed time spent waiting
            if (measure) {
                struct timespec after;
                clock_gettime(CLOCK_MONOTONIC, &after);
                total.tv_sec += after.tv_sec - before.tv_sec;
                // Use auto instead of long to avoid the google-runtime-int warning.
                auto deltaNs = after.tv_nsec - before.tv_nsec;
                if (deltaNs < 0) {
                    deltaNs += 1000000000;
                    total.tv_sec--;
                }
                if ((total.tv_nsec += deltaNs) >= 1000000000) {
                    total.tv_nsec -= 1000000000;
                    total.tv_sec++;
                }
                before = after;
                beforeIsValid = true;
            }
            switch (error) {
            case 0:            // normal wakeup by server, or by binderDied()
            case EWOULDBLOCK:  // benign race condition with server
            case EINTR:        // wait was interrupted by signal or other spurious wakeup
            case ETIMEDOUT:    // time-out expired
                // FIXME these error/non-0 status are being dropped
                break;
            default:
                status = error;
                ALOGE("%s unexpected error %s", __func__, strerror(status));
                goto end;
            }
        }
    }

end:
    if (status != NO_ERROR) {
        buffer->mFrameCount = 0;
        buffer->mRaw = NULL;
        buffer->mNonContig = 0;
        mUnreleased = 0;
    }
    if (elapsed != NULL) {
        *elapsed = total;
    }
    if (requested == NULL) {
        requested = &kNonBlocking;
    }
    if (measure) {
        ALOGV("requested %ld.%03ld elapsed %ld.%03ld",
              requested->tv_sec, requested->tv_nsec / 1000000,
              total.tv_sec, total.tv_nsec / 1000000);
    }
    return status;
}

本质上就是共享了一块内存,这块内存有两部分,一部分记录内存使用信息,比如环形 buffer 头和尾的位置等,一部分记录真正的数据,由于这块内存都是共享的,所以环形 buffer 使用情况信息也是共享的,这样就可以直接操作了。

読み込み中...
文章は、創作者によって署名され、ブロックチェーンに安全に保存されています。