libhwcomposer: Enhance mixed mode composition.

While marking layers for cache, neglect the z-order when
possible, to improve the scope of mixed mode usage.
It is safe to neglect the z-order if a static layer
doesn't have any overlapping region with updating layers
in between static layer & contiguous static layers batch.
- if above layer doesn't have any overlapping with updating
  layer in middle, push it to the batch.
- If above case fails, but layers below(existing batch) does not
  have any overlapping region with the updating layer, bring the
  batch to top and modify fb-zorder accordingly.

Change-Id: I4882a750d0a9a2e11272fcbb146202160673d04f
This commit is contained in:
Prabhanjan Kandula
2013-09-25 17:00:36 +05:30
parent 11888cb1bd
commit 9bd5f64b2b
4 changed files with 150 additions and 37 deletions

View File

@@ -600,7 +600,7 @@ bool MDPComp::partialMDPComp(hwc_context_t *ctx, hwc_display_contents_1_t* list)
} }
updateYUV(ctx, list, false /*secure only*/); updateYUV(ctx, list, false /*secure only*/);
bool ret = batchLayers(ctx, list); //sets up fbZ also bool ret = markLayersForCaching(ctx, list); //sets up fbZ also
if(!ret) { if(!ret) {
ALOGD_IF(isDebug(),"%s: batching failed, dpy %d",__FUNCTION__, mDpy); ALOGD_IF(isDebug(),"%s: batching failed, dpy %d",__FUNCTION__, mDpy);
return false; return false;
@@ -702,14 +702,126 @@ bool MDPComp::isYUVDoable(hwc_context_t* ctx, hwc_layer_1_t* layer) {
return true; return true;
} }
bool MDPComp::batchLayers(hwc_context_t *ctx, hwc_display_contents_1_t* list) { /* starts at fromIndex and check for each layer to find
/* Idea is to keep as many contiguous non-updating(cached) layers in FB and * if it it has overlapping with any Updating layer above it in zorder
* send rest of them through MDP. NEVER mark an updating layer for caching. * till the end of the batch. returns true if it finds any intersection */
* But cached ones can be marked for MDP*/ bool MDPComp::canPushBatchToTop(const hwc_display_contents_1_t* list,
int fromIndex, int toIndex) {
for(int i = fromIndex; i < toIndex; i++) {
if(mCurrentFrame.isFBComposed[i] && !mCurrentFrame.drop[i]) {
if(intersectingUpdatingLayers(list, i+1, toIndex, i)) {
return false;
}
}
}
return true;
}
/* Checks if given layer at targetLayerIndex has any
* intersection with all the updating layers in beween
* fromIndex and toIndex. Returns true if it finds intersectiion */
bool MDPComp::intersectingUpdatingLayers(const hwc_display_contents_1_t* list,
int fromIndex, int toIndex, int targetLayerIndex) {
for(int i = fromIndex; i <= toIndex; i++) {
if(!mCurrentFrame.isFBComposed[i]) {
if(areLayersIntersecting(&list->hwLayers[i],
&list->hwLayers[targetLayerIndex])) {
return true;
}
}
}
return false;
}
int MDPComp::getBatch(hwc_display_contents_1_t* list,
int& maxBatchStart, int& maxBatchEnd,
int& maxBatchCount) {
int i = 0;
int updatingLayersAbove = 0;//Updating layer count in middle of batch
int fbZOrder =-1;
while (i < mCurrentFrame.layerCount) {
int batchCount = 0;
int batchStart = i;
int batchEnd = i;
int fbZ = batchStart;
int firstZReverseIndex = -1;
while(i < mCurrentFrame.layerCount) {
if(!mCurrentFrame.isFBComposed[i]) {
if(!batchCount) {
i++;
break;
}
updatingLayersAbove++;
i++;
continue;
} else {
if(mCurrentFrame.drop[i]) {
i++;
continue;
} else if(updatingLayersAbove <= 0) {
batchCount++;
batchEnd = i;
i++;
continue;
} else { //Layer is FBComposed, not a drop & updatingLayer > 0
// We have a valid updating layer already. If layer-i not
// have overlapping with all updating layers in between
// batch-start and i, then we can add layer i to batch.
if(!intersectingUpdatingLayers(list, batchStart, i-1, i)) {
batchCount++;
batchEnd = i;
i++;
continue;
} else if(canPushBatchToTop(list, batchStart, i)) {
//If All the non-updating layers with in this batch
//does not have intersection with the updating layers
//above in z-order, then we can safely move the batch to
//higher z-order. Increment fbZ as it is moving up.
if( firstZReverseIndex < 0) {
firstZReverseIndex = i;
}
batchCount++;
batchEnd = i;
fbZ += updatingLayersAbove;
i++;
updatingLayersAbove = 0;
continue;
} else {
//both failed.start the loop again from here.
if(firstZReverseIndex >= 0) {
i = firstZReverseIndex;
}
break;
}
}
}
}
if(batchCount > maxBatchCount) {
maxBatchCount = batchCount;
maxBatchStart = batchStart;
maxBatchEnd = batchEnd;
fbZOrder = fbZ;
}
}
return fbZOrder;
}
bool MDPComp::markLayersForCaching(hwc_context_t* ctx,
hwc_display_contents_1_t* list) {
/* Idea is to keep as many non-updating(cached) layers in FB and
* send rest of them through MDP. This is done in 2 steps.
* 1. Find the maximum contiguous batch of non-updating layers.
* 2. See if we can improve this batch size for caching by adding
* opaque layers around the batch, if they don't have
* any overlapping with the updating layers in between.
* NEVER mark an updating layer for caching.
* But cached ones can be marked for MDP */
int maxBatchStart = -1; int maxBatchStart = -1;
int maxBatchEnd = -1; int maxBatchEnd = -1;
int maxBatchCount = 0; int maxBatchCount = 0;
int fbZ = -1;
/* All or Nothing is cached. No batching needed */ /* All or Nothing is cached. No batching needed */
if(!mCurrentFrame.fbCount) { if(!mCurrentFrame.fbCount) {
@@ -721,33 +833,13 @@ bool MDPComp::batchLayers(hwc_context_t *ctx, hwc_display_contents_1_t* list) {
return true; return true;
} }
/* Search for max number of contiguous (cached) layers excluding dropped fbZ = getBatch(list, maxBatchStart, maxBatchEnd, maxBatchCount);
* layers */
int i = 0;
while (i < mCurrentFrame.layerCount) {
int count = 0;
int start = i;
while(mCurrentFrame.isFBComposed[i] && i < mCurrentFrame.layerCount) {
if(!mCurrentFrame.drop[i])
count++;
i++;
}
if(count > maxBatchCount) {
maxBatchCount = count;
maxBatchStart = start;
maxBatchEnd = i - 1;
mCurrentFrame.fbZ = maxBatchStart;
}
if(i < mCurrentFrame.layerCount) i++;
}
mCurrentFrame.fbCount = maxBatchCount; /* reset rest of the layers lying inside ROI for MDP comp */
/* reset rest of the layers lying inside ROI for MDP comp */
for(int i = 0; i < mCurrentFrame.layerCount; i++) { for(int i = 0; i < mCurrentFrame.layerCount; i++) {
hwc_layer_1_t* layer = &list->hwLayers[i]; hwc_layer_1_t* layer = &list->hwLayers[i];
if((i < maxBatchStart || i > maxBatchEnd) && if((i < maxBatchStart || i > maxBatchEnd) &&
mCurrentFrame.isFBComposed[i]){ mCurrentFrame.isFBComposed[i]){
if(!mCurrentFrame.drop[i]){ if(!mCurrentFrame.drop[i]){
//If an unsupported layer is being attempted to //If an unsupported layer is being attempted to
//be pulled out we should fail //be pulled out we should fail
@@ -759,11 +851,14 @@ bool MDPComp::batchLayers(hwc_context_t *ctx, hwc_display_contents_1_t* list) {
} }
} }
// update the frame data
mCurrentFrame.fbZ = fbZ;
mCurrentFrame.fbCount = maxBatchCount;
mCurrentFrame.mdpCount = mCurrentFrame.layerCount - mCurrentFrame.mdpCount = mCurrentFrame.layerCount -
mCurrentFrame.fbCount - mCurrentFrame.dropCount; mCurrentFrame.fbCount - mCurrentFrame.dropCount;
ALOGD_IF(isDebug(),"%s: cached count: %d",__FUNCTION__, ALOGD_IF(isDebug(),"%s: cached count: %d",__FUNCTION__,
mCurrentFrame.fbCount); mCurrentFrame.fbCount);
return true; return true;
} }
@@ -838,24 +933,25 @@ bool MDPComp::programMDP(hwc_context_t *ctx, hwc_display_contents_1_t* list) {
return false; return false;
} }
bool fbBatch = false;
for (int index = 0, mdpNextZOrder = 0; index < mCurrentFrame.layerCount; for (int index = 0, mdpNextZOrder = 0; index < mCurrentFrame.layerCount;
index++) { index++) {
if(!mCurrentFrame.isFBComposed[index]) { if(!mCurrentFrame.isFBComposed[index]) {
int mdpIndex = mCurrentFrame.layerToMDP[index]; int mdpIndex = mCurrentFrame.layerToMDP[index];
hwc_layer_1_t* layer = &list->hwLayers[index]; hwc_layer_1_t* layer = &list->hwLayers[index];
//Leave fbZ for framebuffer. CACHE/GLES layers go here.
if(mdpNextZOrder == mCurrentFrame.fbZ) {
mdpNextZOrder++;
}
MdpPipeInfo* cur_pipe = mCurrentFrame.mdpToLayer[mdpIndex].pipeInfo; MdpPipeInfo* cur_pipe = mCurrentFrame.mdpToLayer[mdpIndex].pipeInfo;
cur_pipe->zOrder = mdpNextZOrder++; cur_pipe->zOrder = mdpNextZOrder++;
if(configure(ctx, layer, mCurrentFrame.mdpToLayer[mdpIndex]) != 0 ){ if(configure(ctx, layer, mCurrentFrame.mdpToLayer[mdpIndex]) != 0 ){
ALOGD_IF(isDebug(), "%s: Failed to configure overlay for \ ALOGD_IF(isDebug(), "%s: Failed to configure overlay for \
layer %d",__FUNCTION__, index); layer %d",__FUNCTION__, index);
return false; return false;
} }
} else if(fbBatch == false && !mCurrentFrame.drop[index]) {
mdpNextZOrder++;
fbBatch = true;
} }
} }

View File

@@ -168,8 +168,17 @@ protected:
/* tracks non updating layers*/ /* tracks non updating layers*/
void updateLayerCache(hwc_context_t* ctx, hwc_display_contents_1_t* list); void updateLayerCache(hwc_context_t* ctx, hwc_display_contents_1_t* list);
/* optimize layers for mdp comp*/ /* optimize layers for mdp comp*/
bool batchLayers(hwc_context_t *ctx, hwc_display_contents_1_t* list); bool markLayersForCaching(hwc_context_t* ctx,
/* updates cache map with YUV info */ hwc_display_contents_1_t* list);
int getBatch(hwc_display_contents_1_t* list,
int& maxBatchStart, int& maxBatchEnd,
int& maxBatchCount);
bool canPushBatchToTop(const hwc_display_contents_1_t* list,
int fromIndex, int toIndex);
bool intersectingUpdatingLayers(const hwc_display_contents_1_t* list,
int fromIndex, int toIndex, int targetLayerIndex);
/* updates cache map with YUV info */
void updateYUV(hwc_context_t* ctx, hwc_display_contents_1_t* list, void updateYUV(hwc_context_t* ctx, hwc_display_contents_1_t* list,
bool secureOnly); bool secureOnly);
bool programMDP(hwc_context_t *ctx, hwc_display_contents_1_t* list); bool programMDP(hwc_context_t *ctx, hwc_display_contents_1_t* list);

View File

@@ -923,6 +923,13 @@ void calculate_crop_rects(hwc_rect_t& crop, hwc_rect_t& dst,
crop_b -= crop_h * bottomCutRatio; crop_b -= crop_h * bottomCutRatio;
} }
bool areLayersIntersecting(const hwc_layer_1_t* layer1,
const hwc_layer_1_t* layer2) {
hwc_rect_t irect = getIntersection(layer1->displayFrame,
layer2->displayFrame);
return isValidRect(irect);
}
bool isValidRect(const hwc_rect& rect) bool isValidRect(const hwc_rect& rect)
{ {
return ((rect.bottom > rect.top) && (rect.right > rect.left)) ; return ((rect.bottom > rect.top) && (rect.right > rect.left)) ;

View File

@@ -186,13 +186,14 @@ int getBlending(int blending);
void dumpsys_log(android::String8& buf, const char* fmt, ...); void dumpsys_log(android::String8& buf, const char* fmt, ...);
int getExtOrientation(hwc_context_t* ctx); int getExtOrientation(hwc_context_t* ctx);
bool isValidRect(const hwc_rect_t& rect); bool isValidRect(const hwc_rect_t& rect);
void deductRect(const hwc_layer_1_t* layer, hwc_rect_t& irect); void deductRect(const hwc_layer_1_t* layer, hwc_rect_t& irect);
hwc_rect_t getIntersection(const hwc_rect_t& rect1, const hwc_rect_t& rect2); hwc_rect_t getIntersection(const hwc_rect_t& rect1, const hwc_rect_t& rect2);
hwc_rect_t getUnion(const hwc_rect_t& rect1, const hwc_rect_t& rect2); hwc_rect_t getUnion(const hwc_rect_t& rect1, const hwc_rect_t& rect2);
void optimizeLayerRects(hwc_context_t *ctx, void optimizeLayerRects(hwc_context_t *ctx,
const hwc_display_contents_1_t *list, const int& dpy); const hwc_display_contents_1_t *list, const int& dpy);
bool areLayersIntersecting(const hwc_layer_1_t* layer1,
const hwc_layer_1_t* layer2);
/* Calculates the destination position based on the action safe rectangle */ /* Calculates the destination position based on the action safe rectangle */
void getActionSafePosition(hwc_context_t *ctx, int dpy, hwc_rect_t& dst); void getActionSafePosition(hwc_context_t *ctx, int dpy, hwc_rect_t& dst);