Invoke a padding round in certain use-cases.

* In some use-cases, it is possible that there are
no AppBuffer layers on the external/virtual layer-list
during which all the pipes will be allocated to the
primary. When layers do comeup on external/virtual
layer-list, subsequent overlay sets fail.

* This change ensures that in such cases, we invoke a
padding round on all the displays to free up the
hw resources which can be used in subsequent cycles.

Change-Id: Ifac0b8f51a8719eb55b11010d05b8d11352db054
This commit is contained in:
Raj Kamal
2014-02-07 16:15:17 +05:30
parent 25154792a5
commit 9ed3d6b35e
4 changed files with 29 additions and 8 deletions

View File

@@ -105,7 +105,10 @@ static void hwc_registerProcs(struct hwc_composer_device_1* dev,
//Helper
static void reset(hwc_context_t *ctx, int numDisplays,
hwc_display_contents_1_t** displays) {
ctx->numActiveDisplays = 0;
ctx->isPaddingRound = false;
for(int i = 0; i < numDisplays; i++) {
hwc_display_contents_1_t *list = displays[i];
// XXX:SurfaceFlinger no longer guarantees that this
@@ -125,6 +128,17 @@ static void reset(hwc_context_t *ctx, int numDisplays,
* the display device to be active.
*/
ctx->numActiveDisplays += 1;
if((ctx->mPrevHwLayerCount[i] == 1) and (list->numHwLayers > 1)) {
/* If the previous cycle for dpy 'i' has 0 AppLayers and the
* current cycle has atleast 1 AppLayer, padding round needs
* to be invoked on current cycle to free up the resources.
*/
ctx->isPaddingRound = true;
}
ctx->mPrevHwLayerCount[i] = list->numHwLayers;
} else {
ctx->mPrevHwLayerCount[i] = 0;
}
if(ctx->mFBUpdate[i])
@@ -133,7 +147,6 @@ static void reset(hwc_context_t *ctx, int numDisplays,
ctx->mCopyBit[i]->reset();
if(ctx->mLayerRotMap[i])
ctx->mLayerRotMap[i]->reset();
}
ctx->mAD->reset();

View File

@@ -419,8 +419,8 @@ bool MDPComp::isFrameDoable(hwc_context_t *ctx) {
__FUNCTION__);
ret = false;
} else if(ctx->isPaddingRound) {
ctx->isPaddingRound = false;
ALOGD_IF(isDebug(), "%s: padding round",__FUNCTION__);
ALOGD_IF(isDebug(), "%s: padding round invoked for dpy %d",
__FUNCTION__,mDpy);
ret = false;
}
return ret;
@@ -1410,10 +1410,9 @@ int MDPComp::prepare(hwc_context_t *ctx, hwc_display_contents_1_t* list) {
const int numLayers = ctx->listStats[mDpy].numAppLayers;
MDPVersion& mdpVersion = qdutils::MDPVersion::getInstance();
//number of app layers exceeds MAX_NUM_APP_LAYERS fall back to GPU
//do not cache the information for next draw cycle.
if(numLayers > MAX_NUM_APP_LAYERS) {
ALOGI("%s: Number of App layers exceeded the limit ",
//Do not cache the information for next draw cycle.
if(numLayers > MAX_NUM_APP_LAYERS or (!numLayers)) {
ALOGI("%s: Unsupported layer count for mdp composition",
__FUNCTION__);
mCachedFrame.reset();
return -1;

View File

@@ -213,6 +213,10 @@ void initContext(hwc_context_t *ctx)
ctx->dpyAttr[i].mAsHeightRatio = 0;
}
for (uint32_t i = 0; i < HWC_NUM_DISPLAY_TYPES; i++) {
ctx->mPrevHwLayerCount[i] = 0;
}
MDPComp::init(ctx);
ctx->mAD = new AssertiveDisplay(ctx);
@@ -897,6 +901,7 @@ void setListStats(hwc_context_t *ctx,
if(prevYuvCount != ctx->listStats[dpy].yuvCount) {
ctx->mVideoTransFlag = true;
}
if(dpy == HWC_DISPLAY_PRIMARY) {
ctx->mAD->markDoable(ctx, list);
}

View File

@@ -483,6 +483,10 @@ struct hwc_context_t {
eAnimationState mAnimationState[HWC_NUM_DISPLAY_TYPES];
qhwc::HWCVirtualBase *mHWCVirtual;
// stores the #numHwLayers of the previous frame
// for each display device
int mPrevHwLayerCount[HWC_NUM_DISPLAY_TYPES];
// stores the primary device orientation
int deviceOrientation;
//Securing in progress indicator