温馨提示:这篇文章已超过440天没有更新,请注意相关的内容是否还可用!
摘要:本文介绍了Android 13中的Media框架中的ACodec部分(九)。文章详细阐述了ACodec在媒体编码和解码方面的作用,包括其在音频和视频处理中的应用。通过优化ACodec的性能,可以提高Android设备的媒体处理能力,提升用户体验。
前一节我们学习了Output Format Changed事件是如何上抛并且被处理的,这一节我们紧接着来学习OutputBuffer是如何上抛并且被处理的

1、CodecObserver::onMessages
virtual void onMessages(const std::list &messages) { for (std::list::const_iterator it = messages.cbegin(); it != messages.cend(); ++it) { const omx_message &omx_msg = *it; sp msg = new AMessage; msg->setInt32("type", omx_msg.type); switch (omx_msg.type) { case omx_message::FILL_BUFFER_DONE: { msg->setInt32( "buffer", omx_msg.u.extended_buffer_data.buffer); msg->setInt32( "range_offset", omx_msg.u.extended_buffer_data.range_offset); msg->setInt32( "range_length", omx_msg.u.extended_buffer_data.range_length); msg->setInt32( "flags", omx_msg.u.extended_buffer_data.flags); msg->setInt64( "timestamp", omx_msg.u.extended_buffer_data.timestamp); msg->setInt32( "fence_fd", omx_msg.fenceFd); break; } } msgList->getList().push_back(msg); } notify->setObject("messages", msgList); notify->post(); }
OMXNodeInstance通过CodecObserver将event和message组织成为omx_message后上抛给ACodec,这一节我们重点来看OutputBuffer的处理流程。
因为OutputBuffer是被OMX组件填充完成,所以上抛的消息名称为FILL_BUFFER_DONE,组织到omx_message中的信息包含

- buffer id:buffer索引;
- buffer offset:buffer中数据的偏移量;
- buffer length:buffer中数据的长度;
- flags:携带的标志位;
- timestamp:该帧的时间戳;
- fence fd:同步句柄;
2、BaseState::onOMXFillBufferDone
消息会发送到ACodec Looper线程中,最终在ACodec::BaseState::onOMXFillBufferDone函数中做处理:
bool ACodec::BaseState::onOMXFillBufferDone( IOMX::buffer_id bufferID, size_t rangeOffset, size_t rangeLength, OMX_U32 flags, int64_t timeUs, int fenceFd) { ALOGV("[%s] onOMXFillBufferDone %u time %" PRId64 " us, flags = 0x%08x", mCodec->mComponentName.c_str(), bufferID, timeUs, flags); ssize_t index; status_t err= OK; // 这部分是预埋的log,某一帧pts的输出时间 - 某一帧pts的输入时间 = 该帧的解码时长 #if TRACK_BUFFER_TIMING index = mCodec->mBufferStats.indexOfKey(timeUs); if (index >= 0) { ACodec::BufferStats *stats = &mCodec->mBufferStats.editValueAt(index); stats->mFillBufferDoneTimeUs = ALooper::GetNowUs(); ALOGI("frame PTS %lld: %lld", timeUs, stats->mFillBufferDoneTimeUs - stats->mEmptyBufferTimeUs); mCodec->mBufferStats.removeItemsAt(index); stats = NULL; } #endif // 遍历BufferInfo数组,根据buffer id找到对应的BufferInfo BufferInfo *info = mCodec->findBufferByID(kPortIndexOutput, bufferID, &index); // 检查当前BufferInfo的状态 BufferInfo::Status status = BufferInfo::getSafeStatus(info); if (status != BufferInfo::OWNED_BY_COMPONENT) { ALOGE("Wrong ownership in FBD: %s(%d) buffer #%u", _asString(status), status, bufferID); mCodec->dumpBuffers(kPortIndexOutput); mCodec->signalError(OMX_ErrorUndefined, FAILED_TRANSACTION); if (fenceFd >= 0) { ::close(fenceFd); } return true; } // 记录当前 BufferInfo 在第几帧输出被使用 info->mDequeuedAt = ++mCodec->mDequeueCounter; // 修改当前 BufferInfo 状态 info->mStatus = BufferInfo::OWNED_BY_US; if (info->mRenderInfo != NULL) { // The fence for an emptied buffer must have signaled, but there still could be queued // or out-of-order dequeued buffers in the render queue prior to this buffer. Drop these, // as we will soon requeue this buffer to the surface. While in theory we could still keep // track of buffers that are requeued to the surface, it is better to add support to the // buffer-queue to notify us of released buffers and their fences (in the future). mCodec->notifyOfRenderedFrames(true /* dropIncomplete */); } // byte buffers cannot take fences, so wait for any fence now if (mCodec->mNativeWindow == NULL) { (void)mCodec->waitForFence(fenceFd, "onOMXFillBufferDone"); fenceFd = -1; } info->setReadFence(fenceFd, "onOMXFillBufferDone"); // 获取当前端口模式 PortMode mode = getPortMode(kPortIndexOutput); switch (mode) { // 如果端口状态为 KEEP_BUFFERS,那么ACodec 持有Buffer,什么都不做 case KEEP_BUFFERS: break; case RESUBMIT_BUFFERS: { // 如果数据长度为0,并且flag不是eos,说明这是一帧无效输出 if (rangeLength == 0 && (!(flags & OMX_BUFFERFLAG_EOS) || mCodec->mPortEOS[kPortIndexOutput])) { ALOGV("[%s] calling fillBuffer %u", mCodec->mComponentName.c_str(), info->mBufferID); // 重新送给OMX组件填充 err = mCodec->fillBuffer(info); if (err != OK) { mCodec->signalError(OMX_ErrorUndefined, makeNoSideEffectStatus(err)); return true; } break; } sp buffer = info->mData; // 给 BufferInfo 设置 format if (mCodec->mOutputFormat != mCodec->mLastOutputFormat && rangeLength > 0) { // pretend that output format has changed on the first frame (we used to do this) if (mCodec->mBaseOutputFormat == mCodec->mOutputFormat) { mCodec->onOutputFormatChanged(mCodec->mOutputFormat); } mCodec->sendFormatChange(); } buffer->setFormat(mCodec->mOutputFormat); if (mCodec->usingSecureBufferOnEncoderOutput()) { native_handle_t *handle = NULL; sp secureBuffer = static_cast(buffer.get()); if (secureBuffer != NULL) { #ifdef OMX_ANDROID_COMPILE_AS_32BIT_ON_64BIT_PLATFORMS // handle is only valid on 32-bit/mediaserver process handle = NULL; #else handle = (native_handle_t *)secureBuffer->getDestinationPointer(); #endif } buffer->meta()->setPointer("handle", handle); buffer->meta()->setInt32("rangeOffset", rangeOffset); buffer->meta()->setInt32("rangeLength", rangeLength); } else if (buffer->base() == info->mCodecData->base()) { // 如果不需要converter做转换,即codecData 和 mData指向同一块缓冲区,则直接设定buffer的读取范围 buffer->setRange(rangeOffset, rangeLength); } else { // 如果需要converter,则做数据转换,将codecData中的数据转换到mData中 info->mCodecData->setRange(rangeOffset, rangeLength); // in this case we know that mConverter is not null status_t err = mCodec->mConverter[kPortIndexOutput]->convert( info->mCodecData, buffer); if (err != OK) { mCodec->signalError(OMX_ErrorUndefined, makeNoSideEffectStatus(err)); return true; } } #if 0 // 如果没有native window,如果mData中的数据可访问,可以检查是否是IDR帧 if (mCodec->mNativeWindow == NULL) { if (IsIDR(info->mData->data(), info->mData->size())) { ALOGI("IDR frame"); } } #endif if (mCodec->mSkipCutBuffer != NULL) { mCodec->mSkipCutBuffer->submit(buffer); } // 设置 BufferInfo 的时间戳 buffer->meta()->setInt64("timeUs", timeUs); // 清除 BufferInfo 对 mData 的引用 info->mData.clear(); // 将 BufferID 交给 BufferChannel,让他上抛给 MediaCodec mCodec->mBufferChannel->drainThisBuffer(info->mBufferID, flags); // 设置 BufferInfo 的状态为 OWNED_BY_DOWNSTREAM info->mStatus = BufferInfo::OWNED_BY_DOWNSTREAM; // 如果是 EOS,那么需要标注端口收到 EOS if (flags & OMX_BUFFERFLAG_EOS) { ALOGV("[%s] saw output EOS", mCodec->mComponentName.c_str()); mCodec->mCallback->onEos(mCodec->mInputEOSResult); mCodec->mPortEOS[kPortIndexOutput] = true; } break; } case FREE_BUFFERS: // 如果端口状态为 Free_Buffers,那么就释放掉该output Buffer err = mCodec->freeBuffer(kPortIndexOutput, index); if (err != OK) { mCodec->signalError(OMX_ErrorUndefined, makeNoSideEffectStatus(err)); return true; } break; default: ALOGE("Invalid port mode: %d", mode); return false; } return true; }
这部分的代码有一点点长,但是思路很简单,主要是要理清getPortMode的意思:
- OMX callback 回来的是一个 id,我们首先要利用这个id,找到ACodec存储的BufferInfo,以及BufferInfo在数组中的索引;
- 检查BufferInfo的归属,是否是属于OMX组件,如果不是则状态异常;
- 记录当前输出帧数,同时记录当前 BufferInfo 在第几帧输出被使用,这在之前的章节中已经讲过如何使用了;
- 修改当前 BufferInfo 的归属为 ACodec;
- 获取当前端口模式,这个步骤非常关键,所谓端口模式,指的就是当前播放器状态,普通状态下port mode为KEEP_BUFFERS,表示ACodec会持有buffer,不做任何动作;运行状态下port mode为RESUBMIT_BUFFERS,ACodec会向目标提交Buffer,保持Buffer的流转;OutputPortSettingsChangedState状态下,会直接销毁 Output Buffer,这一部分内容在前一节中已经学习过了;KEEP_BUFFERS很简单,所以我们只看RESUBMIT_BUFFERS。
- 检查buffer数据长度,如果数据长度为0,并且flag不是eos,说明这是一帧无效输出,直接重新送给OMX组件填充;
- 获取 BufferInfo 中与 ACodecBufferChannel 交互的 MediaCodecBuffer(mData),为改mData设置输出格式,设置缓冲区的范围和偏移量,以及时间戳;
- 设置完成后移除 BufferInfo 对改 MediaCodecBuffer 的引用;
- 调用 ACodecBufferChannel 的 drainThisBuffer 方法,将Buffer传递给MediaCodec;
- 设置BufferInfo的归属为 上层(OWNED_BY_DOWNSTREAM);
- 如果flag是 EOS,那么需要标注端口收到 EOS。
到这FILL_BUFFER_DONE的消息就处理完成了。
3、BaseState::onOutputBufferDrained
上层做完Avsync后会调用MediaCodec方法,将OutputBuffer渲染出去或者drop掉,消息最终会进入到BaseState::onMessageReceived中来处理:
bool ACodec::BaseState::onMessageReceived(const sp &msg) { switch (msg->what()) { case kWhatOutputBufferDrained: { onOutputBufferDrained(msg); break; } } }
搜索代码我们会发现,onOutputBufferDrained有多个覆写,
void ACodec::ExecutingToIdleState::onOutputBufferDrained( const sp &msg) { BaseState::onOutputBufferDrained(msg); changeStateIfWeOwnAllBuffers(); } void ACodec::FlushingState::onOutputBufferDrained(const sp &msg) { BaseState::onOutputBufferDrained(msg); changeStateIfWeOwnAllBuffers(); }
ExecutingToIdleState 和 FlushingState 下也是调用的 BaseState::onOutputBufferDrained,只是要多调用一个 changeStateIfWeOwnAllBuffers 方法。
我们先来看 BaseState::onOutputBufferDrained:
void ACodec::BaseState::onOutputBufferDrained(const sp &msg) { IOMX::buffer_id bufferID; // 获取要处理的 Output Buffer id,以及对应的 mData CHECK(msg->findInt32("buffer-id", (int32_t*)&bufferID)); sp obj; CHECK(msg->findObject("buffer", &obj)); sp buffer = static_cast(obj.get()); int32_t discarded = 0; // 是否render or drop msg->findInt32("discarded", &discarded); // 找到id对应的 BufferInfo ssize_t index; BufferInfo *info = mCodec->findBufferByID(kPortIndexOutput, bufferID, &index); // 获取 BufferInfo 的归属 BufferInfo::Status status = BufferInfo::getSafeStatus(info); if (status != BufferInfo::OWNED_BY_DOWNSTREAM) { ALOGE("Wrong ownership in OBD: %s(%d) buffer #%u", _asString(status), status, bufferID); mCodec->dumpBuffers(kPortIndexOutput); mCodec->signalError(OMX_ErrorUndefined, FAILED_TRANSACTION); return; } // 重新将 BufferInfo 的mData指向 MediaCodecBuffer info->mData = buffer; int32_t render; // 如果需要render则进入分支 if (mCodec->mNativeWindow != NULL && msg->findInt32("render", &render) && render != 0 && !discarded && buffer->size() != 0) { ATRACE_NAME("render"); // The client wants this buffer to be rendered. // 如果需要剪裁,则进行剪裁处理 android_native_rect_t crop; if (buffer->format()->findRect("crop", &crop.left, &crop.top, &crop.right, &crop.bottom)) { // NOTE: native window uses extended right-bottom coordinate ++crop.right; ++crop.bottom; if (memcmp(&crop, &mCodec->mLastNativeWindowCrop, sizeof(crop)) != 0) { mCodec->mLastNativeWindowCrop = crop; status_t err = native_window_set_crop(mCodec->mNativeWindow.get(), &crop); ALOGW_IF(err != NO_ERROR, "failed to set crop: %d", err); } } // 判断上层是否指定数据空间 int32_t dataSpace; if (buffer->format()->findInt32("android._dataspace", &dataSpace) && dataSpace != mCodec->mLastNativeWindowDataSpace) { status_t err = native_window_set_buffers_data_space( mCodec->mNativeWindow.get(), (android_dataspace)dataSpace); mCodec->mLastNativeWindowDataSpace = dataSpace; ALOGW_IF(err != NO_ERROR, "failed to set dataspace: %d", err); } // 判断上层是否指定HDR info if (buffer->format()->contains("hdr-static-info")) { HDRStaticInfo info; if (ColorUtils::getHDRStaticInfoFromFormat(buffer->format(), &info) && memcmp(&mCodec->mLastHDRStaticInfo, &info, sizeof(info))) { setNativeWindowHdrMetadata(mCodec->mNativeWindow.get(), &info); mCodec->mLastHDRStaticInfo = info; } } sp hdr10PlusInfo; if (buffer->format()->findBuffer("hdr10-plus-info", &hdr10PlusInfo) && hdr10PlusInfo != nullptr && hdr10PlusInfo->size() > 0 && hdr10PlusInfo != mCodec->mLastHdr10PlusBuffer) { native_window_set_buffers_hdr10_plus_metadata(mCodec->mNativeWindow.get(), hdr10PlusInfo->size(), hdr10PlusInfo->data()); mCodec->mLastHdr10PlusBuffer = hdr10PlusInfo; } // save buffers sent to the surface so we can get render time when they return int64_t mediaTimeUs = -1; buffer->meta()->findInt64("timeUs", &mediaTimeUs); if (mediaTimeUs >= 0) { mCodec->mRenderTracker.onFrameQueued( mediaTimeUs, info->mGraphicBuffer, new Fence(::dup(info->mFenceFd))); } // 查找上层是否有设定渲染的系统时间,如果没有设定则将pts*1000作为渲染的系统时间 int64_t timestampNs = 0; if (!msg->findInt64("timestampNs", ×tampNs)) { // use media timestamp if client did not request a specific render timestamp if (buffer->meta()->findInt64("timeUs", ×tampNs)) { ALOGV("using buffer PTS of %lld", (long long)timestampNs); timestampNs *= 1000; } } // 设置OutputBuffer的渲染时间,并且将Buffer加入到队列中 status_t err; err = native_window_set_buffers_timestamp(mCodec->mNativeWindow.get(), timestampNs); ALOGW_IF(err != NO_ERROR, "failed to set buffer timestamp: %d", err); info->checkReadFence("onOutputBufferDrained before queueBuffer"); err = mCodec->mNativeWindow->queueBuffer( mCodec->mNativeWindow.get(), info->mGraphicBuffer.get(), info->mFenceFd); info->mFenceFd = -1; if (err == OK) { // 切换BufferInfo归属 info->mStatus = BufferInfo::OWNED_BY_NATIVE_WINDOW; } else { ALOGE("queueBuffer failed in onOutputBufferDrained: %d", err); mCodec->signalError(OMX_ErrorUndefined, makeNoSideEffectStatus(err)); info->mStatus = BufferInfo::OWNED_BY_US; // keeping read fence as write fence to avoid clobbering info->mIsReadFence = false; } } else { if (mCodec->mNativeWindow != NULL && (discarded || buffer->size() != 0)) { // move read fence into write fence to avoid clobbering info->mIsReadFence = false; ATRACE_NAME("frame-drop"); } // 如果drop frame,则直接修改归属为 ACodec info->mStatus = BufferInfo::OWNED_BY_US; } // 获取当前的 port mode PortMode mode = getPortMode(kPortIndexOutput); switch (mode) { case KEEP_BUFFERS: { // XXX fishy, revisit!!! What about the FREE_BUFFERS case below? // 如果是 keep Buffer,则从native window获取一个buffer,并且不做任何动作 if (info->mStatus == BufferInfo::OWNED_BY_NATIVE_WINDOW) { // We cannot resubmit the buffer we just rendered, dequeue // the spare instead. info = mCodec->dequeueBufferFromNativeWindow(); } break; } case RESUBMIT_BUFFERS: { if (!mCodec->mPortEOS[kPortIndexOutput]) { if (info->mStatus == BufferInfo::OWNED_BY_NATIVE_WINDOW) { // We cannot resubmit the buffer we just rendered, dequeue // the spare instead. // 获取一块新的outputbuffer info = mCodec->dequeueBufferFromNativeWindow(); } // 将OutputBuffer提交给 OMX组件使用 if (info != NULL) { ALOGV("[%s] calling fillBuffer %u", mCodec->mComponentName.c_str(), info->mBufferID); info->checkWriteFence("onOutputBufferDrained::RESUBMIT_BUFFERS"); status_t err = mCodec->fillBuffer(info); if (err != OK) { mCodec->signalError(OMX_ErrorUndefined, makeNoSideEffectStatus(err)); } } } break; } case FREE_BUFFERS: { // 如果是 FREE_BUFFERS,则直接释放Buffer status_t err = mCodec->freeBuffer(kPortIndexOutput, index); if (err != OK) { mCodec->signalError(OMX_ErrorUndefined, makeNoSideEffectStatus(err)); } break; } default: ALOGE("Invalid port mode: %d", mode); return; } }
我已经在代码中对BaseState::onOutputBufferDrained做了注释,相信有仔细阅读前面文章的小伙伴很容就知道在部分在做什么了。
至于changeStateIfWeOwnAllBuffers是在做什么,我们后续再做学习!
这一节到这就结束了。
还没有评论,来说两句吧...