Merge "hwc: Add support for secure RGB layer"
This commit is contained in:
committed by
Gerrit - the friendly Code Review server
commit
f4045c63bb
@@ -678,7 +678,9 @@ bool MDPComp::tryFullFrame(hwc_context_t *ctx,
|
||||
const int numAppLayers = ctx->listStats[mDpy].numAppLayers;
|
||||
int priDispW = ctx->dpyAttr[HWC_DISPLAY_PRIMARY].xres;
|
||||
|
||||
if(sIdleFallBack && !ctx->listStats[mDpy].secureUI) {
|
||||
// No Idle fall back, if secure display or secure RGB layers are present
|
||||
if(sIdleFallBack && (!ctx->listStats[mDpy].secureUI &&
|
||||
!ctx->listStats[mDpy].secureRGBCount)) {
|
||||
ALOGD_IF(isDebug(), "%s: Idle fallback dpy %d",__FUNCTION__, mDpy);
|
||||
return false;
|
||||
}
|
||||
@@ -1048,6 +1050,8 @@ bool MDPComp::cacheBasedComp(hwc_context_t *ctx,
|
||||
}
|
||||
|
||||
updateYUV(ctx, list, false /*secure only*/);
|
||||
/* mark secure RGB layers for MDP comp */
|
||||
updateSecureRGB(ctx, list);
|
||||
bool ret = markLayersForCaching(ctx, list); //sets up fbZ also
|
||||
if(!ret) {
|
||||
ALOGD_IF(isDebug(),"%s: batching failed, dpy %d",__FUNCTION__, mDpy);
|
||||
@@ -1238,6 +1242,64 @@ bool MDPComp::videoOnlyComp(hwc_context_t *ctx,
|
||||
return true;
|
||||
}
|
||||
|
||||
/* if tryFullFrame fails, try to push all video and secure RGB layers to MDP */
|
||||
bool MDPComp::tryMDPOnlyLayers(hwc_context_t *ctx,
|
||||
hwc_display_contents_1_t* list) {
|
||||
const bool secureOnly = true;
|
||||
return mdpOnlyLayersComp(ctx, list, not secureOnly) or
|
||||
mdpOnlyLayersComp(ctx, list, secureOnly);
|
||||
|
||||
}
|
||||
|
||||
bool MDPComp::mdpOnlyLayersComp(hwc_context_t *ctx,
|
||||
hwc_display_contents_1_t* list, bool secureOnly) {
|
||||
|
||||
if(sSimulationFlags & MDPCOMP_AVOID_MDP_ONLY_LAYERS)
|
||||
return false;
|
||||
|
||||
/* Bail out if we are processing only secured video layers
|
||||
* and we dont have any */
|
||||
if(!isSecurePresent(ctx, mDpy) && secureOnly){
|
||||
reset(ctx);
|
||||
return false;
|
||||
}
|
||||
|
||||
int numAppLayers = ctx->listStats[mDpy].numAppLayers;
|
||||
mCurrentFrame.reset(numAppLayers);
|
||||
mCurrentFrame.fbCount -= mCurrentFrame.dropCount;
|
||||
|
||||
updateYUV(ctx, list, secureOnly);
|
||||
/* mark secure RGB layers for MDP comp */
|
||||
updateSecureRGB(ctx, list);
|
||||
|
||||
if(mCurrentFrame.mdpCount == 0) {
|
||||
reset(ctx);
|
||||
return false;
|
||||
}
|
||||
|
||||
/* find the maximum batch of layers to be marked for framebuffer */
|
||||
bool ret = markLayersForCaching(ctx, list); //sets up fbZ also
|
||||
if(!ret) {
|
||||
ALOGD_IF(isDebug(),"%s: batching failed, dpy %d",__FUNCTION__, mDpy);
|
||||
reset(ctx);
|
||||
return false;
|
||||
}
|
||||
|
||||
if(sEnableYUVsplit){
|
||||
adjustForSourceSplit(ctx, list);
|
||||
}
|
||||
|
||||
if(!postHeuristicsHandling(ctx, list)) {
|
||||
ALOGD_IF(isDebug(), "post heuristic handling failed");
|
||||
reset(ctx);
|
||||
return false;
|
||||
}
|
||||
|
||||
ALOGD_IF(sSimulationFlags,"%s: MDP_ONLY_LAYERS_COMP SUCCEEDED",
|
||||
__FUNCTION__);
|
||||
return true;
|
||||
}
|
||||
|
||||
/* Checks for conditions where YUV layers cannot be bypassed */
|
||||
bool MDPComp::isYUVDoable(hwc_context_t* ctx, hwc_layer_1_t* layer) {
|
||||
if(isSkipLayer(layer)) {
|
||||
@@ -1271,6 +1333,27 @@ bool MDPComp::isYUVDoable(hwc_context_t* ctx, hwc_layer_1_t* layer) {
|
||||
return true;
|
||||
}
|
||||
|
||||
/* Checks for conditions where Secure RGB layers cannot be bypassed */
|
||||
bool MDPComp::isSecureRGBDoable(hwc_context_t* ctx, hwc_layer_1_t* layer) {
|
||||
if(isSkipLayer(layer)) {
|
||||
ALOGD_IF(isDebug(), "%s: Secure RGB layer marked SKIP dpy %d",
|
||||
__FUNCTION__, mDpy);
|
||||
return false;
|
||||
}
|
||||
|
||||
if(isSecuring(ctx, layer)) {
|
||||
ALOGD_IF(isDebug(), "%s: MDP securing is active", __FUNCTION__);
|
||||
return false;
|
||||
}
|
||||
|
||||
if(not isSupportedForMDPComp(ctx, layer)) {
|
||||
ALOGD_IF(isDebug(), "%s: Unsupported secure RGB layer",
|
||||
__FUNCTION__);
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
/* starts at fromIndex and check for each layer to find
|
||||
* if it it has overlapping with any Updating layer above it in zorder
|
||||
* till the end of the batch. returns true if it finds any intersection */
|
||||
@@ -1488,6 +1571,32 @@ void MDPComp::updateYUV(hwc_context_t* ctx, hwc_display_contents_1_t* list,
|
||||
mCurrentFrame.fbCount);
|
||||
}
|
||||
|
||||
void MDPComp::updateSecureRGB(hwc_context_t* ctx,
|
||||
hwc_display_contents_1_t* list) {
|
||||
int nSecureRGBCount = ctx->listStats[mDpy].secureRGBCount;
|
||||
for(int index = 0;index < nSecureRGBCount; index++){
|
||||
int nSecureRGBIndex = ctx->listStats[mDpy].secureRGBIndices[index];
|
||||
hwc_layer_1_t* layer = &list->hwLayers[nSecureRGBIndex];
|
||||
|
||||
if(!isSecureRGBDoable(ctx, layer)) {
|
||||
if(!mCurrentFrame.isFBComposed[nSecureRGBIndex]) {
|
||||
mCurrentFrame.isFBComposed[nSecureRGBIndex] = true;
|
||||
mCurrentFrame.fbCount++;
|
||||
}
|
||||
} else {
|
||||
if(mCurrentFrame.isFBComposed[nSecureRGBIndex]) {
|
||||
mCurrentFrame.isFBComposed[nSecureRGBIndex] = false;
|
||||
mCurrentFrame.fbCount--;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
mCurrentFrame.mdpCount = mCurrentFrame.layerCount -
|
||||
mCurrentFrame.fbCount - mCurrentFrame.dropCount;
|
||||
ALOGD_IF(isDebug(),"%s: fb count: %d",__FUNCTION__,
|
||||
mCurrentFrame.fbCount);
|
||||
}
|
||||
|
||||
hwc_rect_t MDPComp::getUpdatingFBRect(hwc_context_t *ctx,
|
||||
hwc_display_contents_1_t* list){
|
||||
hwc_rect_t fbRect = (struct hwc_rect){0, 0, 0, 0};
|
||||
@@ -1709,7 +1818,10 @@ int MDPComp::prepare(hwc_context_t *ctx, hwc_display_contents_1_t* list) {
|
||||
if(isFrameDoable(ctx)) {
|
||||
generateROI(ctx, list);
|
||||
|
||||
mModeOn = tryFullFrame(ctx, list) || tryVideoOnly(ctx, list);
|
||||
// if tryFullFrame fails, try to push all video and secure RGB layers
|
||||
// to MDP for composition.
|
||||
mModeOn = tryFullFrame(ctx, list) || tryMDPOnlyLayers(ctx, list) ||
|
||||
tryVideoOnly(ctx, list);
|
||||
if(mModeOn) {
|
||||
setMDPCompLayerFlags(ctx, list);
|
||||
} else {
|
||||
|
||||
@@ -74,6 +74,7 @@ protected:
|
||||
MDPCOMP_AVOID_CACHE_MDP = 0x002,
|
||||
MDPCOMP_AVOID_LOAD_MDP = 0x004,
|
||||
MDPCOMP_AVOID_VIDEO_ONLY = 0x008,
|
||||
MDPCOMP_AVOID_MDP_ONLY_LAYERS = 0x010,
|
||||
};
|
||||
|
||||
/* mdp pipe data */
|
||||
@@ -190,8 +191,14 @@ protected:
|
||||
bool tryVideoOnly(hwc_context_t *ctx, hwc_display_contents_1_t* list);
|
||||
bool videoOnlyComp(hwc_context_t *ctx, hwc_display_contents_1_t* list,
|
||||
bool secureOnly);
|
||||
/* checks for conditions where only secure RGB and video can be bypassed */
|
||||
bool tryMDPOnlyLayers(hwc_context_t *ctx, hwc_display_contents_1_t* list);
|
||||
bool mdpOnlyLayersComp(hwc_context_t *ctx, hwc_display_contents_1_t* list,
|
||||
bool secureOnly);
|
||||
/* checks for conditions where YUV layers cannot be bypassed */
|
||||
bool isYUVDoable(hwc_context_t* ctx, hwc_layer_1_t* layer);
|
||||
/* checks for conditions where Secure RGB layers cannot be bypassed */
|
||||
bool isSecureRGBDoable(hwc_context_t* ctx, hwc_layer_1_t* layer);
|
||||
/* checks if MDP/MDSS can process current list w.r.to HW limitations
|
||||
* All peculiar HW limitations should go here */
|
||||
bool hwLimitationsCheck(hwc_context_t* ctx, hwc_display_contents_1_t* list);
|
||||
@@ -217,6 +224,9 @@ protected:
|
||||
/* updates cache map with YUV info */
|
||||
void updateYUV(hwc_context_t* ctx, hwc_display_contents_1_t* list,
|
||||
bool secureOnly);
|
||||
/* updates cache map with secure RGB info */
|
||||
void updateSecureRGB(hwc_context_t* ctx,
|
||||
hwc_display_contents_1_t* list);
|
||||
/* Validates if the GPU/MDP layer split chosen by a strategy is supported
|
||||
* by MDP.
|
||||
* Sets up MDP comp data structures to reflect covnversion from layers to
|
||||
|
||||
@@ -840,6 +840,7 @@ void setListStats(hwc_context_t *ctx,
|
||||
ctx->listStats[dpy].yuv4k2kCount = 0;
|
||||
ctx->dpyAttr[dpy].mActionSafePresent = isActionSafePresent(ctx, dpy);
|
||||
ctx->listStats[dpy].renderBufIndexforABC = -1;
|
||||
ctx->listStats[dpy].secureRGBCount = 0;
|
||||
|
||||
resetROI(ctx, dpy);
|
||||
|
||||
@@ -867,6 +868,12 @@ void setListStats(hwc_context_t *ctx,
|
||||
|
||||
if (isSecureBuffer(hnd)) {
|
||||
ctx->listStats[dpy].isSecurePresent = true;
|
||||
if(not isYuvBuffer(hnd)) {
|
||||
// cache secureRGB layer parameters like we cache for YUV layers
|
||||
int& secureRGBCount = ctx->listStats[dpy].secureRGBCount;
|
||||
ctx->listStats[dpy].secureRGBIndices[secureRGBCount] = (int)i;
|
||||
secureRGBCount++;
|
||||
}
|
||||
}
|
||||
|
||||
if (isSkipLayer(&list->hwLayers[i])) {
|
||||
@@ -1472,22 +1479,20 @@ void setMdpFlags(hwc_context_t *ctx, hwc_layer_1_t *layer,
|
||||
ovutils::OV_MDP_BLEND_FG_PREMULT);
|
||||
}
|
||||
|
||||
if(isYuvBuffer(hnd)) {
|
||||
if(isSecureBuffer(hnd)) {
|
||||
ovutils::setMdpFlags(mdpFlags,
|
||||
ovutils::OV_MDP_SECURE_OVERLAY_SESSION);
|
||||
}
|
||||
if(metadata && (metadata->operation & PP_PARAM_INTERLACED) &&
|
||||
metadata->interlaced) {
|
||||
ovutils::setMdpFlags(mdpFlags,
|
||||
ovutils::OV_MDP_DEINTERLACE);
|
||||
}
|
||||
|
||||
// Mark MDP flags with SECURE_OVERLAY_SESSION for driver
|
||||
if(isSecureBuffer(hnd)) {
|
||||
ovutils::setMdpFlags(mdpFlags,
|
||||
ovutils::OV_MDP_SECURE_OVERLAY_SESSION);
|
||||
}
|
||||
|
||||
if(isSecureDisplayBuffer(hnd)) {
|
||||
// Secure display needs both SECURE_OVERLAY and SECURE_DISPLAY_OV
|
||||
ovutils::setMdpFlags(mdpFlags,
|
||||
ovutils::OV_MDP_SECURE_OVERLAY_SESSION);
|
||||
// Mark MDP flags with SECURE_DISPLAY_OVERLAY_SESSION for driver
|
||||
ovutils::setMdpFlags(mdpFlags,
|
||||
ovutils::OV_MDP_SECURE_DISPLAY_OVERLAY_SESSION);
|
||||
}
|
||||
|
||||
@@ -132,6 +132,9 @@ struct ListStats {
|
||||
hwc_rect_t rRoi; //right ROI. Unused in single DSI panels.
|
||||
//App Buffer Composition index
|
||||
int renderBufIndexforABC;
|
||||
// Secure RGB specific
|
||||
int secureRGBCount;
|
||||
int secureRGBIndices[MAX_NUM_APP_LAYERS];
|
||||
};
|
||||
|
||||
//PTOR Comp info
|
||||
|
||||
Reference in New Issue
Block a user