gralloc: Remove opaque types
Remove opaque types like size_t, uintptr_t, intptr_t to support 32bit and 64bit processes together. When a 64bit process creates a handle and a 32bit process validates the incoming ints against expected ints, opaque types lead to different and mismatching values. Always use unit64_t for base address for 32bit and 64bit SF. Use unsigned int for offset and size, since ION uses that. Change-Id: I7db5544556a8924f98010b965f837592e9f0b4ca
This commit is contained in:
@@ -394,9 +394,9 @@ bool isMacroTileEnabled(int format, int usage)
|
|||||||
}
|
}
|
||||||
|
|
||||||
// helper function
|
// helper function
|
||||||
size_t getSize(int format, int width, int height, const int alignedw,
|
unsigned int getSize(int format, int width, int height, const int alignedw,
|
||||||
const int alignedh) {
|
const int alignedh) {
|
||||||
size_t size = 0;
|
unsigned int size = 0;
|
||||||
|
|
||||||
switch (format) {
|
switch (format) {
|
||||||
case HAL_PIXEL_FORMAT_RGBA_8888:
|
case HAL_PIXEL_FORMAT_RGBA_8888:
|
||||||
@@ -433,7 +433,7 @@ size_t getSize(int format, int width, int height, const int alignedw,
|
|||||||
}
|
}
|
||||||
size = alignedw*alignedh +
|
size = alignedw*alignedh +
|
||||||
(ALIGN(alignedw/2, 16) * (alignedh/2))*2;
|
(ALIGN(alignedw/2, 16) * (alignedh/2))*2;
|
||||||
size = ALIGN(size, (size_t)4096);
|
size = ALIGN(size, (unsigned int)4096);
|
||||||
break;
|
break;
|
||||||
case HAL_PIXEL_FORMAT_YCbCr_420_SP:
|
case HAL_PIXEL_FORMAT_YCbCr_420_SP:
|
||||||
case HAL_PIXEL_FORMAT_YCrCb_420_SP:
|
case HAL_PIXEL_FORMAT_YCrCb_420_SP:
|
||||||
@@ -501,10 +501,10 @@ size_t getSize(int format, int width, int height, const int alignedw,
|
|||||||
return size;
|
return size;
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t getBufferSizeAndDimensions(int width, int height, int format,
|
unsigned int getBufferSizeAndDimensions(int width, int height, int format,
|
||||||
int& alignedw, int &alignedh)
|
int& alignedw, int &alignedh)
|
||||||
{
|
{
|
||||||
size_t size;
|
unsigned int size;
|
||||||
|
|
||||||
AdrenoMemInfo::getInstance().getAlignedWidthAndHeight(width,
|
AdrenoMemInfo::getInstance().getAlignedWidthAndHeight(width,
|
||||||
height,
|
height,
|
||||||
@@ -519,10 +519,10 @@ size_t getBufferSizeAndDimensions(int width, int height, int format,
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
size_t getBufferSizeAndDimensions(int width, int height, int format, int usage,
|
unsigned int getBufferSizeAndDimensions(int width, int height, int format,
|
||||||
int& alignedw, int &alignedh)
|
int usage, int& alignedw, int &alignedh)
|
||||||
{
|
{
|
||||||
size_t size;
|
unsigned int size;
|
||||||
int tileEnabled = isMacroTileEnabled(format, usage);
|
int tileEnabled = isMacroTileEnabled(format, usage);
|
||||||
|
|
||||||
AdrenoMemInfo::getInstance().getAlignedWidthAndHeight(width,
|
AdrenoMemInfo::getInstance().getAlignedWidthAndHeight(width,
|
||||||
@@ -539,7 +539,7 @@ size_t getBufferSizeAndDimensions(int width, int height, int format, int usage,
|
|||||||
|
|
||||||
|
|
||||||
void getBufferAttributes(int width, int height, int format, int usage,
|
void getBufferAttributes(int width, int height, int format, int usage,
|
||||||
int& alignedw, int &alignedh, int& tileEnabled, size_t& size)
|
int& alignedw, int &alignedh, int& tileEnabled, unsigned int& size)
|
||||||
{
|
{
|
||||||
tileEnabled = isMacroTileEnabled(format, usage);
|
tileEnabled = isMacroTileEnabled(format, usage);
|
||||||
|
|
||||||
@@ -555,7 +555,7 @@ void getBufferAttributes(int width, int height, int format, int usage,
|
|||||||
int getYUVPlaneInfo(private_handle_t* hnd, struct android_ycbcr* ycbcr)
|
int getYUVPlaneInfo(private_handle_t* hnd, struct android_ycbcr* ycbcr)
|
||||||
{
|
{
|
||||||
int err = 0;
|
int err = 0;
|
||||||
size_t ystride, cstride;
|
unsigned int ystride, cstride;
|
||||||
memset(ycbcr->reserved, 0, sizeof(ycbcr->reserved));
|
memset(ycbcr->reserved, 0, sizeof(ycbcr->reserved));
|
||||||
|
|
||||||
// Get the chroma offsets from the handle width/height. We take advantage
|
// Get the chroma offsets from the handle width/height. We take advantage
|
||||||
@@ -647,7 +647,7 @@ int alloc_buffer(private_handle_t **pHnd, int w, int h, int format, int usage)
|
|||||||
private_handle_t* hnd = new private_handle_t(data.fd, data.size,
|
private_handle_t* hnd = new private_handle_t(data.fd, data.size,
|
||||||
data.allocType, 0, format,
|
data.allocType, 0, format,
|
||||||
alignedw, alignedh);
|
alignedw, alignedh);
|
||||||
hnd->base = (uintptr_t) data.base;
|
hnd->base = (uint64_t) data.base;
|
||||||
hnd->offset = data.offset;
|
hnd->offset = data.offset;
|
||||||
hnd->gpuaddr = 0;
|
hnd->gpuaddr = 0;
|
||||||
*pHnd = hnd;
|
*pHnd = hnd;
|
||||||
|
|||||||
@@ -41,7 +41,7 @@ struct private_module_t {
|
|||||||
uint32_t fbFormat;
|
uint32_t fbFormat;
|
||||||
uint32_t flags;
|
uint32_t flags;
|
||||||
uint32_t numBuffers;
|
uint32_t numBuffers;
|
||||||
size_t bufferMask;
|
uint32_t bufferMask;
|
||||||
pthread_mutex_t lock;
|
pthread_mutex_t lock;
|
||||||
private_handle_t *currentBuffer;
|
private_handle_t *currentBuffer;
|
||||||
struct fb_var_screeninfo info;
|
struct fb_var_screeninfo info;
|
||||||
|
|||||||
@@ -87,7 +87,8 @@ static int fb_post(struct framebuffer_device_t* dev, buffer_handle_t buffer)
|
|||||||
reinterpret_cast<private_module_t*>(dev->common.module);
|
reinterpret_cast<private_module_t*>(dev->common.module);
|
||||||
private_handle_t *hnd = static_cast<private_handle_t*>
|
private_handle_t *hnd = static_cast<private_handle_t*>
|
||||||
(const_cast<native_handle_t*>(buffer));
|
(const_cast<native_handle_t*>(buffer));
|
||||||
const size_t offset = hnd->base - m->framebuffer->base;
|
const unsigned int offset = (unsigned int) (hnd->base -
|
||||||
|
m->framebuffer->base);
|
||||||
m->info.activate = FB_ACTIVATE_VBL;
|
m->info.activate = FB_ACTIVATE_VBL;
|
||||||
m->info.yoffset = (int)(offset / m->finfo.line_length);
|
m->info.yoffset = (int)(offset / m->finfo.line_length);
|
||||||
if (ioctl(m->framebuffer->fd, FBIOPUT_VSCREENINFO, &m->info) == -1) {
|
if (ioctl(m->framebuffer->fd, FBIOPUT_VSCREENINFO, &m->info) == -1) {
|
||||||
@@ -204,7 +205,7 @@ int mapFrameBufferLocked(struct private_module_t* module)
|
|||||||
}
|
}
|
||||||
|
|
||||||
//adreno needs 4k aligned offsets. Max hole size is 4096-1
|
//adreno needs 4k aligned offsets. Max hole size is 4096-1
|
||||||
size_t size = roundUpToPageSize(info.yres * info.xres *
|
unsigned int size = roundUpToPageSize(info.yres * info.xres *
|
||||||
(info.bits_per_pixel/8));
|
(info.bits_per_pixel/8));
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@@ -326,7 +327,7 @@ int mapFrameBufferLocked(struct private_module_t* module)
|
|||||||
module->numBuffers = info.yres_virtual / info.yres;
|
module->numBuffers = info.yres_virtual / info.yres;
|
||||||
module->bufferMask = 0;
|
module->bufferMask = 0;
|
||||||
//adreno needs page aligned offsets. Align the fbsize to pagesize.
|
//adreno needs page aligned offsets. Align the fbsize to pagesize.
|
||||||
size_t fbSize = roundUpToPageSize(finfo.line_length * info.yres)*
|
unsigned int fbSize = roundUpToPageSize(finfo.line_length * info.yres)*
|
||||||
module->numBuffers;
|
module->numBuffers;
|
||||||
module->framebuffer = new private_handle_t(fd, fbSize,
|
module->framebuffer = new private_handle_t(fd, fbSize,
|
||||||
private_handle_t::PRIV_FLAGS_USES_ION,
|
private_handle_t::PRIV_FLAGS_USES_ION,
|
||||||
@@ -338,7 +339,7 @@ int mapFrameBufferLocked(struct private_module_t* module)
|
|||||||
close(fd);
|
close(fd);
|
||||||
return -errno;
|
return -errno;
|
||||||
}
|
}
|
||||||
module->framebuffer->base = uintptr_t(vaddr);
|
module->framebuffer->base = uint64_t(vaddr);
|
||||||
memset(vaddr, 0, fbSize);
|
memset(vaddr, 0, fbSize);
|
||||||
//Enable vsync
|
//Enable vsync
|
||||||
int enable = 1;
|
int enable = 1;
|
||||||
|
|||||||
@@ -51,7 +51,7 @@ gpu_context_t::gpu_context_t(const private_module_t* module,
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
int gpu_context_t::gralloc_alloc_buffer(size_t size, int usage,
|
int gpu_context_t::gralloc_alloc_buffer(unsigned int size, int usage,
|
||||||
buffer_handle_t* pHandle, int bufferType,
|
buffer_handle_t* pHandle, int bufferType,
|
||||||
int format, int width, int height)
|
int format, int width, int height)
|
||||||
{
|
{
|
||||||
@@ -152,13 +152,13 @@ int gpu_context_t::gralloc_alloc_buffer(size_t size, int usage,
|
|||||||
}
|
}
|
||||||
|
|
||||||
flags |= data.allocType;
|
flags |= data.allocType;
|
||||||
uintptr_t eBaseAddr = (uintptr_t)(eData.base) + eData.offset;
|
uint64_t eBaseAddr = (uint64_t)(eData.base) + eData.offset;
|
||||||
private_handle_t *hnd = new private_handle_t(data.fd, size, flags,
|
private_handle_t *hnd = new private_handle_t(data.fd, size, flags,
|
||||||
bufferType, format, width, height, eData.fd, eData.offset,
|
bufferType, format, width, height, eData.fd, eData.offset,
|
||||||
eBaseAddr);
|
eBaseAddr);
|
||||||
|
|
||||||
hnd->offset = data.offset;
|
hnd->offset = data.offset;
|
||||||
hnd->base = (uintptr_t)(data.base) + data.offset;
|
hnd->base = (uint64_t)(data.base) + data.offset;
|
||||||
hnd->gpuaddr = 0;
|
hnd->gpuaddr = 0;
|
||||||
setMetaData(hnd, UPDATE_COLOR_SPACE, (void*) &colorSpace);
|
setMetaData(hnd, UPDATE_COLOR_SPACE, (void*) &colorSpace);
|
||||||
|
|
||||||
@@ -199,9 +199,9 @@ int gpu_context_t::gralloc_alloc_framebuffer_locked(int usage,
|
|||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
const size_t bufferMask = m->bufferMask;
|
const unsigned int bufferMask = m->bufferMask;
|
||||||
const uint32_t numBuffers = m->numBuffers;
|
const uint32_t numBuffers = m->numBuffers;
|
||||||
size_t bufferSize = m->finfo.line_length * m->info.yres;
|
unsigned int bufferSize = m->finfo.line_length * m->info.yres;
|
||||||
|
|
||||||
//adreno needs FB size to be page aligned
|
//adreno needs FB size to be page aligned
|
||||||
bufferSize = roundUpToPageSize(bufferSize);
|
bufferSize = roundUpToPageSize(bufferSize);
|
||||||
@@ -221,7 +221,7 @@ int gpu_context_t::gralloc_alloc_framebuffer_locked(int usage,
|
|||||||
}
|
}
|
||||||
|
|
||||||
// create a "fake" handle for it
|
// create a "fake" handle for it
|
||||||
uintptr_t vaddr = uintptr_t(m->framebuffer->base);
|
uint64_t vaddr = uint64_t(m->framebuffer->base);
|
||||||
private_handle_t* hnd = new private_handle_t(
|
private_handle_t* hnd = new private_handle_t(
|
||||||
dup(m->framebuffer->fd), bufferSize,
|
dup(m->framebuffer->fd), bufferSize,
|
||||||
private_handle_t::PRIV_FLAGS_USES_PMEM |
|
private_handle_t::PRIV_FLAGS_USES_PMEM |
|
||||||
@@ -238,7 +238,7 @@ int gpu_context_t::gralloc_alloc_framebuffer_locked(int usage,
|
|||||||
vaddr += bufferSize;
|
vaddr += bufferSize;
|
||||||
}
|
}
|
||||||
hnd->base = vaddr;
|
hnd->base = vaddr;
|
||||||
hnd->offset = vaddr - uintptr_t(m->framebuffer->base);
|
hnd->offset = (unsigned int)(vaddr - m->framebuffer->base);
|
||||||
*pHandle = hnd;
|
*pHandle = hnd;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@@ -256,11 +256,11 @@ int gpu_context_t::gralloc_alloc_framebuffer(int usage,
|
|||||||
|
|
||||||
int gpu_context_t::alloc_impl(int w, int h, int format, int usage,
|
int gpu_context_t::alloc_impl(int w, int h, int format, int usage,
|
||||||
buffer_handle_t* pHandle, int* pStride,
|
buffer_handle_t* pHandle, int* pStride,
|
||||||
size_t bufferSize) {
|
unsigned int bufferSize) {
|
||||||
if (!pHandle || !pStride)
|
if (!pHandle || !pStride)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
size_t size;
|
unsigned int size;
|
||||||
int alignedw, alignedh;
|
int alignedw, alignedh;
|
||||||
int grallocFormat = format;
|
int grallocFormat = format;
|
||||||
int bufferType;
|
int bufferType;
|
||||||
@@ -287,7 +287,7 @@ int gpu_context_t::alloc_impl(int w, int h, int format, int usage,
|
|||||||
size = getBufferSizeAndDimensions(w, h, grallocFormat, usage, alignedw,
|
size = getBufferSizeAndDimensions(w, h, grallocFormat, usage, alignedw,
|
||||||
alignedh);
|
alignedh);
|
||||||
|
|
||||||
if ((ssize_t)size <= 0)
|
if ((unsigned int)size <= 0)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
size = (bufferSize >= size)? bufferSize : size;
|
size = (bufferSize >= size)? bufferSize : size;
|
||||||
|
|
||||||
@@ -319,19 +319,20 @@ int gpu_context_t::alloc_impl(int w, int h, int format, int usage,
|
|||||||
int gpu_context_t::free_impl(private_handle_t const* hnd) {
|
int gpu_context_t::free_impl(private_handle_t const* hnd) {
|
||||||
private_module_t* m = reinterpret_cast<private_module_t*>(common.module);
|
private_module_t* m = reinterpret_cast<private_module_t*>(common.module);
|
||||||
if (hnd->flags & private_handle_t::PRIV_FLAGS_FRAMEBUFFER) {
|
if (hnd->flags & private_handle_t::PRIV_FLAGS_FRAMEBUFFER) {
|
||||||
const size_t bufferSize = m->finfo.line_length * m->info.yres;
|
const unsigned int bufferSize = m->finfo.line_length * m->info.yres;
|
||||||
size_t index = (hnd->base - m->framebuffer->base) / bufferSize;
|
unsigned int index = (unsigned int) ((hnd->base - m->framebuffer->base)
|
||||||
|
/ bufferSize);
|
||||||
m->bufferMask &= ~(1LU<<index);
|
m->bufferMask &= ~(1LU<<index);
|
||||||
} else {
|
} else {
|
||||||
|
|
||||||
terminateBuffer(&m->base, const_cast<private_handle_t*>(hnd));
|
terminateBuffer(&m->base, const_cast<private_handle_t*>(hnd));
|
||||||
IMemAlloc* memalloc = mAllocCtrl->getAllocator(hnd->flags);
|
IMemAlloc* memalloc = mAllocCtrl->getAllocator(hnd->flags);
|
||||||
int err = memalloc->free_buffer((void*)hnd->base, (size_t) hnd->size,
|
int err = memalloc->free_buffer((void*)hnd->base, hnd->size,
|
||||||
hnd->offset, hnd->fd);
|
hnd->offset, hnd->fd);
|
||||||
if(err)
|
if(err)
|
||||||
return err;
|
return err;
|
||||||
// free the metadata space
|
// free the metadata space
|
||||||
size_t size = ROUND_UP_PAGESIZE(sizeof(MetaData_t));
|
unsigned int size = ROUND_UP_PAGESIZE(sizeof(MetaData_t));
|
||||||
err = memalloc->free_buffer((void*)hnd->base_metadata,
|
err = memalloc->free_buffer((void*)hnd->base_metadata,
|
||||||
size, hnd->offset_metadata,
|
size, hnd->offset_metadata,
|
||||||
hnd->fd_metadata);
|
hnd->fd_metadata);
|
||||||
|
|||||||
@@ -35,7 +35,7 @@ class gpu_context_t : public alloc_device_t {
|
|||||||
gpu_context_t(const private_module_t* module,
|
gpu_context_t(const private_module_t* module,
|
||||||
IAllocController* alloc_ctrl);
|
IAllocController* alloc_ctrl);
|
||||||
|
|
||||||
int gralloc_alloc_buffer(size_t size, int usage,
|
int gralloc_alloc_buffer(unsigned int size, int usage,
|
||||||
buffer_handle_t* pHandle,
|
buffer_handle_t* pHandle,
|
||||||
int bufferType, int format,
|
int bufferType, int format,
|
||||||
int width, int height);
|
int width, int height);
|
||||||
@@ -44,7 +44,7 @@ class gpu_context_t : public alloc_device_t {
|
|||||||
|
|
||||||
int alloc_impl(int w, int h, int format, int usage,
|
int alloc_impl(int w, int h, int format, int usage,
|
||||||
buffer_handle_t* pHandle, int* pStride,
|
buffer_handle_t* pHandle, int* pStride,
|
||||||
size_t bufferSize = 0);
|
unsigned int bufferSize = 0);
|
||||||
|
|
||||||
static int gralloc_alloc(alloc_device_t* dev, int w, int h,
|
static int gralloc_alloc(alloc_device_t* dev, int w, int h,
|
||||||
int format, int usage,
|
int format, int usage,
|
||||||
|
|||||||
@@ -33,7 +33,7 @@
|
|||||||
struct private_module_t;
|
struct private_module_t;
|
||||||
struct private_handle_t;
|
struct private_handle_t;
|
||||||
|
|
||||||
inline size_t roundUpToPageSize(size_t x) {
|
inline unsigned int roundUpToPageSize(unsigned int x) {
|
||||||
return (x + (PAGE_SIZE-1)) & ~(PAGE_SIZE-1);
|
return (x + (PAGE_SIZE-1)) & ~(PAGE_SIZE-1);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -47,16 +47,16 @@ inline Type ALIGN(Type x, Type align) {
|
|||||||
|
|
||||||
int mapFrameBufferLocked(struct private_module_t* module);
|
int mapFrameBufferLocked(struct private_module_t* module);
|
||||||
int terminateBuffer(gralloc_module_t const* module, private_handle_t* hnd);
|
int terminateBuffer(gralloc_module_t const* module, private_handle_t* hnd);
|
||||||
size_t getBufferSizeAndDimensions(int width, int height, int format, int usage,
|
unsigned int getBufferSizeAndDimensions(int width, int height, int format,
|
||||||
int& alignedw, int &alignedh);
|
int usage, int& alignedw, int &alignedh);
|
||||||
size_t getBufferSizeAndDimensions(int width, int height, int format,
|
unsigned int getBufferSizeAndDimensions(int width, int height, int format,
|
||||||
int& alignedw, int &alignedh);
|
int& alignedw, int &alignedh);
|
||||||
|
|
||||||
|
|
||||||
// Attributes include aligned width, aligned height, tileEnabled and size of the buffer
|
// Attributes include aligned width, aligned height, tileEnabled and size of the buffer
|
||||||
void getBufferAttributes(int width, int height, int format, int usage,
|
void getBufferAttributes(int width, int height, int format, int usage,
|
||||||
int& alignedw, int &alignedh,
|
int& alignedw, int &alignedh,
|
||||||
int& tileEnabled, size_t &size);
|
int& tileEnabled, unsigned int &size);
|
||||||
|
|
||||||
|
|
||||||
bool isMacroTileEnabled(int format, int usage);
|
bool isMacroTileEnabled(int format, int usage);
|
||||||
|
|||||||
@@ -211,28 +211,29 @@ struct private_handle_t : public native_handle {
|
|||||||
// ints
|
// ints
|
||||||
int magic;
|
int magic;
|
||||||
int flags;
|
int flags;
|
||||||
size_t size;
|
unsigned int size;
|
||||||
size_t offset;
|
unsigned int offset;
|
||||||
int bufferType;
|
int bufferType;
|
||||||
uintptr_t base;
|
uint64_t base __attribute__((aligned(8)));
|
||||||
size_t offset_metadata;
|
unsigned int offset_metadata;
|
||||||
// The gpu address mapped into the mmu.
|
// The gpu address mapped into the mmu.
|
||||||
uintptr_t gpuaddr;
|
uint64_t gpuaddr __attribute__((aligned(8)));
|
||||||
int format;
|
int format;
|
||||||
int width;
|
int width;
|
||||||
int height;
|
int height;
|
||||||
uintptr_t base_metadata;
|
uint64_t base_metadata __attribute__((aligned(8)));
|
||||||
|
|
||||||
#ifdef __cplusplus
|
#ifdef __cplusplus
|
||||||
//TODO64: Revisit this on 64-bit
|
|
||||||
static const int sNumInts = (6 + (3 * (sizeof(size_t)/sizeof(int))) +
|
|
||||||
(3 * (sizeof(uintptr_t)/sizeof(int))));
|
|
||||||
static const int sNumFds = 2;
|
static const int sNumFds = 2;
|
||||||
|
static inline int sNumInts() {
|
||||||
|
return ((sizeof(private_handle_t) - sizeof(native_handle_t)) /
|
||||||
|
sizeof(int)) - sNumFds;
|
||||||
|
}
|
||||||
static const int sMagic = 'gmsm';
|
static const int sMagic = 'gmsm';
|
||||||
|
|
||||||
private_handle_t(int fd, size_t size, int flags, int bufferType,
|
private_handle_t(int fd, unsigned int size, int flags, int bufferType,
|
||||||
int format, int width, int height, int eFd = -1,
|
int format, int width, int height, int eFd = -1,
|
||||||
size_t eOffset = 0, uintptr_t eBase = 0) :
|
unsigned int eOffset = 0, uint64_t eBase = 0) :
|
||||||
fd(fd), fd_metadata(eFd), magic(sMagic),
|
fd(fd), fd_metadata(eFd), magic(sMagic),
|
||||||
flags(flags), size(size), offset(0), bufferType(bufferType),
|
flags(flags), size(size), offset(0), bufferType(bufferType),
|
||||||
base(0), offset_metadata(eOffset), gpuaddr(0),
|
base(0), offset_metadata(eOffset), gpuaddr(0),
|
||||||
@@ -240,7 +241,7 @@ struct private_handle_t : public native_handle {
|
|||||||
base_metadata(eBase)
|
base_metadata(eBase)
|
||||||
{
|
{
|
||||||
version = (int) sizeof(native_handle);
|
version = (int) sizeof(native_handle);
|
||||||
numInts = sNumInts;
|
numInts = sNumInts();
|
||||||
numFds = sNumFds;
|
numFds = sNumFds;
|
||||||
}
|
}
|
||||||
~private_handle_t() {
|
~private_handle_t() {
|
||||||
@@ -254,15 +255,15 @@ struct private_handle_t : public native_handle {
|
|||||||
static int validate(const native_handle* h) {
|
static int validate(const native_handle* h) {
|
||||||
const private_handle_t* hnd = (const private_handle_t*)h;
|
const private_handle_t* hnd = (const private_handle_t*)h;
|
||||||
if (!h || h->version != sizeof(native_handle) ||
|
if (!h || h->version != sizeof(native_handle) ||
|
||||||
h->numInts != sNumInts || h->numFds != sNumFds ||
|
h->numInts != sNumInts() || h->numFds != sNumFds ||
|
||||||
hnd->magic != sMagic)
|
hnd->magic != sMagic)
|
||||||
{
|
{
|
||||||
ALOGD("Invalid gralloc handle (at %p): "
|
ALOGD("Invalid gralloc handle (at %p): "
|
||||||
"ver(%d/%zu) ints(%d/%d) fds(%d/%d)"
|
"ver(%d/%u) ints(%d/%d) fds(%d/%d)"
|
||||||
"magic(%c%c%c%c/%c%c%c%c)",
|
"magic(%c%c%c%c/%c%c%c%c)",
|
||||||
h,
|
h,
|
||||||
h ? h->version : -1, sizeof(native_handle),
|
h ? h->version : -1, sizeof(native_handle),
|
||||||
h ? h->numInts : -1, sNumInts,
|
h ? h->numInts : -1, sNumInts(),
|
||||||
h ? h->numFds : -1, sNumFds,
|
h ? h->numFds : -1, sNumFds,
|
||||||
hnd ? (((hnd->magic >> 24) & 0xFF)?
|
hnd ? (((hnd->magic >> 24) & 0xFF)?
|
||||||
((hnd->magic >> 24) & 0xFF) : '-') : '?',
|
((hnd->magic >> 24) & 0xFF) : '-') : '?',
|
||||||
|
|||||||
@@ -118,16 +118,17 @@ int IonAlloc::alloc_buffer(alloc_data& data)
|
|||||||
data.base = base;
|
data.base = base;
|
||||||
data.fd = fd_data.fd;
|
data.fd = fd_data.fd;
|
||||||
ioctl(mIonFd, ION_IOC_FREE, &handle_data);
|
ioctl(mIonFd, ION_IOC_FREE, &handle_data);
|
||||||
ALOGD_IF(DEBUG, "ion: Allocated buffer base:%p size:%zu fd:%d",
|
ALOGD_IF(DEBUG, "ion: Allocated buffer base:%p size:%u fd:%d",
|
||||||
data.base, ionAllocData.len, data.fd);
|
data.base, ionAllocData.len, data.fd);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
int IonAlloc::free_buffer(void* base, size_t size, size_t offset, int fd)
|
int IonAlloc::free_buffer(void* base, unsigned int size, unsigned int offset,
|
||||||
|
int fd)
|
||||||
{
|
{
|
||||||
Locker::Autolock _l(mLock);
|
Locker::Autolock _l(mLock);
|
||||||
ALOGD_IF(DEBUG, "ion: Freeing buffer base:%p size:%zu fd:%d",
|
ALOGD_IF(DEBUG, "ion: Freeing buffer base:%p size:%u fd:%d",
|
||||||
base, size, fd);
|
base, size, fd);
|
||||||
int err = 0;
|
int err = 0;
|
||||||
err = open_device();
|
err = open_device();
|
||||||
@@ -140,7 +141,8 @@ int IonAlloc::free_buffer(void* base, size_t size, size_t offset, int fd)
|
|||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
int IonAlloc::map_buffer(void **pBase, size_t size, size_t offset, int fd)
|
int IonAlloc::map_buffer(void **pBase, unsigned int size, unsigned int offset,
|
||||||
|
int fd)
|
||||||
{
|
{
|
||||||
int err = 0;
|
int err = 0;
|
||||||
void *base = 0;
|
void *base = 0;
|
||||||
@@ -158,15 +160,16 @@ int IonAlloc::map_buffer(void **pBase, size_t size, size_t offset, int fd)
|
|||||||
ALOGE("ion: Failed to map memory in the client: %s",
|
ALOGE("ion: Failed to map memory in the client: %s",
|
||||||
strerror(errno));
|
strerror(errno));
|
||||||
} else {
|
} else {
|
||||||
ALOGD_IF(DEBUG, "ion: Mapped buffer base:%p size:%zu offset:%d fd:%d",
|
ALOGD_IF(DEBUG, "ion: Mapped buffer base:%p size:%u offset:%u fd:%d",
|
||||||
base, size, offset, fd);
|
base, size, offset, fd);
|
||||||
}
|
}
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
int IonAlloc::unmap_buffer(void *base, size_t size, size_t /*offset*/)
|
int IonAlloc::unmap_buffer(void *base, unsigned int size,
|
||||||
|
unsigned int /*offset*/)
|
||||||
{
|
{
|
||||||
ALOGD_IF(DEBUG, "ion: Unmapping buffer base:%p size:%zu", base, size);
|
ALOGD_IF(DEBUG, "ion: Unmapping buffer base:%p size:%u", base, size);
|
||||||
int err = 0;
|
int err = 0;
|
||||||
if(munmap(base, size)) {
|
if(munmap(base, size)) {
|
||||||
err = -errno;
|
err = -errno;
|
||||||
@@ -176,7 +179,8 @@ int IonAlloc::unmap_buffer(void *base, size_t size, size_t /*offset*/)
|
|||||||
return err;
|
return err;
|
||||||
|
|
||||||
}
|
}
|
||||||
int IonAlloc::clean_buffer(void *base, size_t size, size_t offset, int fd, int op)
|
int IonAlloc::clean_buffer(void *base, unsigned int size, unsigned int offset,
|
||||||
|
int fd, int op)
|
||||||
{
|
{
|
||||||
struct ion_flush_data flush_data;
|
struct ion_flush_data flush_data;
|
||||||
struct ion_fd_data fd_data;
|
struct ion_fd_data fd_data;
|
||||||
@@ -198,9 +202,9 @@ int IonAlloc::clean_buffer(void *base, size_t size, size_t offset, int fd, int o
|
|||||||
handle_data.handle = fd_data.handle;
|
handle_data.handle = fd_data.handle;
|
||||||
flush_data.handle = fd_data.handle;
|
flush_data.handle = fd_data.handle;
|
||||||
flush_data.vaddr = base;
|
flush_data.vaddr = base;
|
||||||
// offset and length are uint32_t
|
// offset and length are unsigned int
|
||||||
flush_data.offset = (uint32_t) offset;
|
flush_data.offset = offset;
|
||||||
flush_data.length = (uint32_t) size;
|
flush_data.length = size;
|
||||||
|
|
||||||
struct ion_custom_data d;
|
struct ion_custom_data d;
|
||||||
switch(op) {
|
switch(op) {
|
||||||
|
|||||||
@@ -41,17 +41,17 @@ class IonAlloc : public IMemAlloc {
|
|||||||
public:
|
public:
|
||||||
virtual int alloc_buffer(alloc_data& data);
|
virtual int alloc_buffer(alloc_data& data);
|
||||||
|
|
||||||
virtual int free_buffer(void *base, size_t size,
|
virtual int free_buffer(void *base, unsigned int size,
|
||||||
size_t offset, int fd);
|
unsigned int offset, int fd);
|
||||||
|
|
||||||
virtual int map_buffer(void **pBase, size_t size,
|
virtual int map_buffer(void **pBase, unsigned int size,
|
||||||
size_t offset, int fd);
|
unsigned int offset, int fd);
|
||||||
|
|
||||||
virtual int unmap_buffer(void *base, size_t size,
|
virtual int unmap_buffer(void *base, unsigned int size,
|
||||||
size_t offset);
|
unsigned int offset);
|
||||||
|
|
||||||
virtual int clean_buffer(void*base, size_t size,
|
virtual int clean_buffer(void*base, unsigned int size,
|
||||||
size_t offset, int fd, int op);
|
unsigned int offset, int fd, int op);
|
||||||
|
|
||||||
IonAlloc() { mIonFd = FD_INIT; }
|
IonAlloc() { mIonFd = FD_INIT; }
|
||||||
|
|
||||||
|
|||||||
@@ -62,7 +62,7 @@ static int gralloc_map(gralloc_module_t const* module,
|
|||||||
void *mappedAddress;
|
void *mappedAddress;
|
||||||
if (!(hnd->flags & private_handle_t::PRIV_FLAGS_FRAMEBUFFER) &&
|
if (!(hnd->flags & private_handle_t::PRIV_FLAGS_FRAMEBUFFER) &&
|
||||||
!(hnd->flags & private_handle_t::PRIV_FLAGS_SECURE_BUFFER)) {
|
!(hnd->flags & private_handle_t::PRIV_FLAGS_SECURE_BUFFER)) {
|
||||||
size_t size = hnd->size;
|
unsigned int size = hnd->size;
|
||||||
IMemAlloc* memalloc = getAllocator(hnd->flags) ;
|
IMemAlloc* memalloc = getAllocator(hnd->flags) ;
|
||||||
int err = memalloc->map_buffer(&mappedAddress, size,
|
int err = memalloc->map_buffer(&mappedAddress, size,
|
||||||
hnd->offset, hnd->fd);
|
hnd->offset, hnd->fd);
|
||||||
@@ -73,7 +73,7 @@ static int gralloc_map(gralloc_module_t const* module,
|
|||||||
return -errno;
|
return -errno;
|
||||||
}
|
}
|
||||||
|
|
||||||
hnd->base = intptr_t(mappedAddress) + hnd->offset;
|
hnd->base = uint64_t(mappedAddress) + hnd->offset;
|
||||||
mappedAddress = MAP_FAILED;
|
mappedAddress = MAP_FAILED;
|
||||||
size = ROUND_UP_PAGESIZE(sizeof(MetaData_t));
|
size = ROUND_UP_PAGESIZE(sizeof(MetaData_t));
|
||||||
err = memalloc->map_buffer(&mappedAddress, size,
|
err = memalloc->map_buffer(&mappedAddress, size,
|
||||||
@@ -84,7 +84,7 @@ static int gralloc_map(gralloc_module_t const* module,
|
|||||||
hnd->base_metadata = 0;
|
hnd->base_metadata = 0;
|
||||||
return -errno;
|
return -errno;
|
||||||
}
|
}
|
||||||
hnd->base_metadata = intptr_t(mappedAddress) + hnd->offset_metadata;
|
hnd->base_metadata = uint64_t(mappedAddress) + hnd->offset_metadata;
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@@ -99,7 +99,7 @@ static int gralloc_unmap(gralloc_module_t const* module,
|
|||||||
if (!(hnd->flags & private_handle_t::PRIV_FLAGS_FRAMEBUFFER)) {
|
if (!(hnd->flags & private_handle_t::PRIV_FLAGS_FRAMEBUFFER)) {
|
||||||
int err = -EINVAL;
|
int err = -EINVAL;
|
||||||
void* base = (void*)hnd->base;
|
void* base = (void*)hnd->base;
|
||||||
size_t size = hnd->size;
|
unsigned int size = hnd->size;
|
||||||
IMemAlloc* memalloc = getAllocator(hnd->flags) ;
|
IMemAlloc* memalloc = getAllocator(hnd->flags) ;
|
||||||
if(memalloc != NULL) {
|
if(memalloc != NULL) {
|
||||||
err = memalloc->unmap_buffer(base, size, hnd->offset);
|
err = memalloc->unmap_buffer(base, size, hnd->offset);
|
||||||
@@ -307,8 +307,8 @@ int gralloc_perform(struct gralloc_module_t const* module,
|
|||||||
case GRALLOC_MODULE_PERFORM_CREATE_HANDLE_FROM_BUFFER:
|
case GRALLOC_MODULE_PERFORM_CREATE_HANDLE_FROM_BUFFER:
|
||||||
{
|
{
|
||||||
int fd = va_arg(args, int);
|
int fd = va_arg(args, int);
|
||||||
size_t size = va_arg(args, size_t);
|
unsigned int size = va_arg(args, unsigned int);
|
||||||
size_t offset = va_arg(args, size_t);
|
unsigned int offset = va_arg(args, unsigned int);
|
||||||
void* base = va_arg(args, void*);
|
void* base = va_arg(args, void*);
|
||||||
int width = va_arg(args, int);
|
int width = va_arg(args, int);
|
||||||
int height = va_arg(args, int);
|
int height = va_arg(args, int);
|
||||||
@@ -316,13 +316,13 @@ int gralloc_perform(struct gralloc_module_t const* module,
|
|||||||
|
|
||||||
native_handle_t** handle = va_arg(args, native_handle_t**);
|
native_handle_t** handle = va_arg(args, native_handle_t**);
|
||||||
private_handle_t* hnd = (private_handle_t*)native_handle_create(
|
private_handle_t* hnd = (private_handle_t*)native_handle_create(
|
||||||
private_handle_t::sNumFds, private_handle_t::sNumInts);
|
private_handle_t::sNumFds, private_handle_t::sNumInts());
|
||||||
hnd->magic = private_handle_t::sMagic;
|
hnd->magic = private_handle_t::sMagic;
|
||||||
hnd->fd = fd;
|
hnd->fd = fd;
|
||||||
hnd->flags = private_handle_t::PRIV_FLAGS_USES_ION;
|
hnd->flags = private_handle_t::PRIV_FLAGS_USES_ION;
|
||||||
hnd->size = size;
|
hnd->size = size;
|
||||||
hnd->offset = offset;
|
hnd->offset = offset;
|
||||||
hnd->base = intptr_t(base) + offset;
|
hnd->base = uint64_t(base) + offset;
|
||||||
hnd->gpuaddr = 0;
|
hnd->gpuaddr = 0;
|
||||||
hnd->width = width;
|
hnd->width = width;
|
||||||
hnd->height = height;
|
hnd->height = height;
|
||||||
|
|||||||
@@ -43,9 +43,9 @@ enum {
|
|||||||
struct alloc_data {
|
struct alloc_data {
|
||||||
void *base;
|
void *base;
|
||||||
int fd;
|
int fd;
|
||||||
size_t offset;
|
unsigned int offset;
|
||||||
size_t size;
|
unsigned int size;
|
||||||
size_t align;
|
unsigned int align;
|
||||||
uintptr_t pHandle;
|
uintptr_t pHandle;
|
||||||
bool uncached;
|
bool uncached;
|
||||||
unsigned int flags;
|
unsigned int flags;
|
||||||
@@ -61,20 +61,20 @@ class IMemAlloc {
|
|||||||
virtual int alloc_buffer(alloc_data& data) = 0;
|
virtual int alloc_buffer(alloc_data& data) = 0;
|
||||||
|
|
||||||
// Free buffer
|
// Free buffer
|
||||||
virtual int free_buffer(void *base, size_t size,
|
virtual int free_buffer(void *base, unsigned int size,
|
||||||
size_t offset, int fd) = 0;
|
unsigned int offset, int fd) = 0;
|
||||||
|
|
||||||
// Map buffer
|
// Map buffer
|
||||||
virtual int map_buffer(void **pBase, size_t size,
|
virtual int map_buffer(void **pBase, unsigned int size,
|
||||||
size_t offset, int fd) = 0;
|
unsigned int offset, int fd) = 0;
|
||||||
|
|
||||||
// Unmap buffer
|
// Unmap buffer
|
||||||
virtual int unmap_buffer(void *base, size_t size,
|
virtual int unmap_buffer(void *base, unsigned int size,
|
||||||
size_t offset) = 0;
|
unsigned int offset) = 0;
|
||||||
|
|
||||||
// Clean and invalidate
|
// Clean and invalidate
|
||||||
virtual int clean_buffer(void *base, size_t size,
|
virtual int clean_buffer(void *base, unsigned int size,
|
||||||
size_t offset, int fd, int op) = 0;
|
unsigned int offset, int fd, int op) = 0;
|
||||||
|
|
||||||
// Destructor
|
// Destructor
|
||||||
virtual ~IMemAlloc() {};
|
virtual ~IMemAlloc() {};
|
||||||
|
|||||||
@@ -47,7 +47,7 @@ IFBUpdate* IFBUpdate::getObject(hwc_context_t *ctx, const int& dpy) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
IFBUpdate::IFBUpdate(hwc_context_t *ctx, const int& dpy) : mDpy(dpy) {
|
IFBUpdate::IFBUpdate(hwc_context_t *ctx, const int& dpy) : mDpy(dpy) {
|
||||||
size_t size = 0;
|
unsigned int size = 0;
|
||||||
uint32_t xres = ctx->dpyAttr[mDpy].xres;
|
uint32_t xres = ctx->dpyAttr[mDpy].xres;
|
||||||
uint32_t yres = ctx->dpyAttr[mDpy].yres;
|
uint32_t yres = ctx->dpyAttr[mDpy].yres;
|
||||||
if (ctx->dpyAttr[dpy].customFBSize) {
|
if (ctx->dpyAttr[dpy].customFBSize) {
|
||||||
|
|||||||
Reference in New Issue
Block a user