[gbinder] Add support for FMQ (Fast Message Queue). JB#54946

This commit is contained in:
Matti Lehtimäki
2021-10-28 14:27:57 +03:00
parent 7f12f1a476
commit a1616163e7
10 changed files with 1130 additions and 0 deletions

View File

@@ -82,6 +82,7 @@ SRC = \
gbinder_config.c \
gbinder_driver.c \
gbinder_eventloop.c \
gbinder_fmq.c \
gbinder_io_32.c \
gbinder_io_64.c \
gbinder_ipc.c \

View File

@@ -38,6 +38,7 @@
#include "gbinder_bridge.h"
#include "gbinder_buffer.h"
#include "gbinder_client.h"
#include "gbinder_fmq.h"
#include "gbinder_local_object.h"
#include "gbinder_local_reply.h"
#include "gbinder_local_request.h"

149
include/gbinder_fmq.h Normal file
View File

@@ -0,0 +1,149 @@
/*
* Copyright (C) 2021 Jolla Ltd.
*
* You may use this file under the terms of BSD license as follows:
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the names of the copyright holders nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
* THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef GBINDER_FMQ_H
#define GBINDER_FMQ_H
#include <gbinder_types.h>
G_BEGIN_DECLS
/* Since 1.1.14 */
typedef enum gbinder_fmq_type {
GBINDER_FMQ_TYPE_SYNC_READ_WRITE = 1,
GBINDER_FMQ_TYPE_UNSYNC_WRITE
} GBINDER_FMQ_TYPE;
typedef enum gbinder_fmq_flags {
GBINDER_FMQ_FLAG_CONFIGURE_EVENT_FLAG = 0x1,
GBINDER_FMQ_FLAG_NO_RESET_POINTERS = 0x2
} GBINDER_FMQ_FLAGS;
GBinderFmq*
gbinder_fmq_new(
gsize item_size,
gsize max_num_items,
GBINDER_FMQ_TYPE type,
GBINDER_FMQ_FLAGS flags,
gint fd,
gsize buffer_size);
GBinderFmq*
gbinder_fmq_ref(
GBinderFmq* self);
void
gbinder_fmq_unref(
GBinderFmq* self);
/* Functions for checking how many items are available in queue */
gsize
gbinder_fmq_available_to_read(
GBinderFmq* self);
gsize
gbinder_fmq_available_to_write(
GBinderFmq* self);
gsize
gbinder_fmq_available_to_read_contiguous(
GBinderFmq* self);
gsize
gbinder_fmq_available_to_write_contiguous(
GBinderFmq* self);
/* Functions for obtaining data pointer for zero copy read/write */
const void*
gbinder_fmq_begin_read(
GBinderFmq* self,
gsize items);
void*
gbinder_fmq_begin_write(
GBinderFmq* self,
gsize items);
/* Functions for ending zero copy read/write
* The number of items must match the value provided to gbinder_fmq_begin_read
* or gbinder_fmq_begin_write */
void
gbinder_fmq_end_read(
GBinderFmq* self,
gsize items);
void
gbinder_fmq_end_write(
GBinderFmq* self,
gsize items);
/* Regular read/write functions (non-zero-copy) */
gboolean
gbinder_fmq_read(
GBinderFmq* self,
void* data,
gsize items);
gboolean
gbinder_fmq_write(
GBinderFmq* self,
const void* data,
gsize items);
/* Functions for waiting and waking message queue.
* Requires configured event flag in message queue */
int
gbinder_fmq_wait_timeout(
GBinderFmq* self,
guint32 bit_mask,
guint32* state,
guint timeout_ms);
#define gbinder_fmq_wait(fmq, mask, state) \
gbinder_fmq_wait_timeout(fmq, mask, state, 0)
int
gbinder_fmq_wake(
GBinderFmq* self,
guint32 bit_mask);
G_END_DECLS
#endif /* GBINDER_FMQ_H */
/*
* Local Variables:
* mode: C
* c-basic-offset: 4
* indent-tabs-mode: nil
* End:
*/

View File

@@ -62,6 +62,7 @@ G_BEGIN_DECLS
typedef struct gbinder_bridge GBinderBridge; /* Since 1.1.5 */
typedef struct gbinder_buffer GBinderBuffer;
typedef struct gbinder_client GBinderClient;
typedef struct gbinder_fmq GBinderFmq; /* Since 1.1.14 */
typedef struct gbinder_ipc GBinderIpc;
typedef struct gbinder_local_object GBinderLocalObject;
typedef struct gbinder_local_reply GBinderLocalReply;

View File

@@ -119,6 +119,12 @@ gbinder_writer_append_fd(
GBinderWriter* writer,
int fd); /* Since 1.0.18 */
void
gbinder_writer_append_fds(
GBinderWriter* writer,
const GBinderFds *fds,
const GBinderParent* parent); /* Since 1.1.14 */
gsize
gbinder_writer_bytes_written(
GBinderWriter* writer); /* Since 1.0.21 */
@@ -181,6 +187,11 @@ gbinder_writer_append_byte_array(
const void* byte_array,
gint32 len); /* Since 1.0.12 */
void
gbinder_writer_append_fmq_descriptor(
GBinderWriter* writer,
const GBinderFmq* queue); /* since 1.1.14 */
/* Note: memory allocated by GBinderWriter is owned by GBinderWriter */
void*

763
src/gbinder_fmq.c Normal file
View File

@@ -0,0 +1,763 @@
/*
* Copyright (C) 2021 Jolla Ltd.
*
* You may use this file under the terms of BSD license as follows:
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the names of the copyright holders nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
* THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "gbinder_fmq_p.h"
#include "gbinder_log.h"
#include <gutil_macros.h>
#include <errno.h>
#include <fcntl.h>
#include <linux/futex.h>
#include <linux/memfd.h>
#include <stdatomic.h>
#include <stdint.h>
#include <sys/mman.h>
#include <sys/syscall.h>
#include <unistd.h>
/* Private API */
/* Grantor data positions */
enum {
READ_PTR_POS = 0,
WRITE_PTR_POS,
DATA_PTR_POS,
EVENT_FLAG_PTR_POS
};
typedef struct gbinder_fmq {
GBinderMQDescriptor* desc;
guint8* ring;
_Atomic guint64* read_ptr;
_Atomic guint64* write_ptr;
_Atomic guint32* event_flag_ptr;
guint32 refcount;
} GBinderFmq;
GBinderMQDescriptor*
gbinder_fmq_get_descriptor(
const GBinderFmq* self)
{
return self->desc;
}
GBINDER_INLINE_FUNC
GBinderFmqGrantorDescriptor*
gbinder_fmq_get_grantor_descriptor(
GBinderFmq* self,
gint index)
{
return &((GBinderFmqGrantorDescriptor *)(
self->desc->grantors.data.ptr))[index];
}
static
gsize
gbinder_fmq_available_to_read_bytes(
GBinderFmq* self,
gboolean contiguous)
{
gsize available;
guint64 read_ptr =
atomic_load_explicit(self->read_ptr, memory_order_acquire);
gsize available_to_read =
atomic_load_explicit(self->write_ptr, memory_order_acquire) - read_ptr;
if (contiguous) {
gsize size =
gbinder_fmq_get_grantor_descriptor(self, DATA_PTR_POS)->extent;
gsize read_offset = read_ptr % size;
/* The number of bytes that can be read contiguously from
* read_offset without wrapping around the ring buffer */
gsize available_to_read_contiguous = size - read_offset;
available = available_to_read_contiguous < available_to_read ?
available_to_read_contiguous : available_to_read;
} else {
available = available_to_read;
}
return available;
}
static
gsize
gbinder_fmq_available_to_write_bytes(
GBinderFmq* self,
gboolean contiguous)
{
gsize available;
gsize available_to_write =
gbinder_fmq_get_grantor_descriptor(self, DATA_PTR_POS)->extent -
gbinder_fmq_available_to_read_bytes(self, FALSE);
if (contiguous) {
guint64 write_ptr = atomic_load_explicit(self->write_ptr,
memory_order_relaxed);
gsize size =
gbinder_fmq_get_grantor_descriptor(self, DATA_PTR_POS)->extent;
gsize write_offset = write_ptr % size;
/* The number of bytes that can be written contiguously starting from
* write_offset without wrapping around the ring buffer */
gsize available_to_write_contiguous = size - write_offset;
available = available_to_write_contiguous < available_to_write ?
available_to_write_contiguous : available_to_write;
} else {
available = available_to_write;
}
return available;
}
static
GBinderFmqGrantorDescriptor*
gbinder_fmq_create_grantors(
gsize queue_size_bytes,
gsize num_fds,
gboolean configure_event_flag)
{
gsize num_grantors = configure_event_flag ? EVENT_FLAG_PTR_POS + 1
: DATA_PTR_POS + 1;
GBinderFmqGrantorDescriptor* grantors =
g_new0(GBinderFmqGrantorDescriptor, num_grantors);
gsize mem_sizes[] = {
sizeof(guint64), /* read pointer counter */
sizeof(guint64), /* write pointer counter */
queue_size_bytes, /* data buffer */
sizeof(guint32) /* event flag pointer */
};
gsize grantor_pos, offset;
for (grantor_pos = 0, offset = 0; grantor_pos < num_grantors;
grantor_pos++) {
GBinderFmqGrantorDescriptor *grantor = &grantors[grantor_pos];
guint32 grantor_fd_index;
gsize grantor_offset;
if (grantor_pos == DATA_PTR_POS && num_fds == 2) {
grantor_fd_index = 1;
grantor_offset = 0;
} else {
grantor_fd_index = 0;
grantor_offset = offset;
offset += mem_sizes[grantor_pos];
}
grantor->fd_index = grantor_fd_index;
grantor->offset = (guint32)(G_ALIGN8(grantor_offset));
grantor->extent = mem_sizes[grantor_pos];
}
return grantors;
}
static
void*
gbinder_fmq_map_grantor_descriptor(
GBinderFmq* self,
guint32 index)
{
const GBinderFds* fds = self->desc->data.fds;
gint fd_index;
gint map_offset;
gint map_length;
void* address;
void* ptr = NULL;
if (index >= self->desc->grantors.count) {
GWARN("grantor index must be less than %d", self->desc->grantors.count);
} else {
fd_index = gbinder_fmq_get_grantor_descriptor(self, index)->fd_index;
/* Offset for mmap must be a multiple of PAGE_SIZE */
map_offset = (gbinder_fmq_get_grantor_descriptor(self, index)->offset /
getpagesize()) * getpagesize();
map_length = gbinder_fmq_get_grantor_descriptor(self, index)->offset -
map_offset +
gbinder_fmq_get_grantor_descriptor(self, index)->extent;
address = mmap(0, map_length, PROT_READ | PROT_WRITE, MAP_SHARED,
gbinder_fds_get_fd(fds, fd_index), map_offset);
if (address == MAP_FAILED) {
GWARN("mmap failed: %d", errno);
} else {
ptr = (guint8*)(address) +
(gbinder_fmq_get_grantor_descriptor(self, index)->offset -
map_offset);
}
}
return ptr;
}
static
void
gbinder_fmq_unmap_grantor_descriptor(
GBinderFmq* self,
void* address,
guint32 index)
{
gint map_offset;
gint map_length;
void* base_address;
if (index >= self->desc->grantors.count) {
GWARN("grantor index must be less than %d", self->desc->grantors.count);
} else if (address) {
map_offset = (gbinder_fmq_get_grantor_descriptor(self, index)->offset /
getpagesize()) * getpagesize();
map_length =
gbinder_fmq_get_grantor_descriptor(self, index)->offset - map_offset
+ gbinder_fmq_get_grantor_descriptor(self, index)->extent;
base_address = (guint8*)(address) -
(gbinder_fmq_get_grantor_descriptor(self, index)->offset -
map_offset);
if (base_address) {
munmap(base_address, map_length);
}
}
}
static
void
gbinder_fmq_free(
GBinderFmq* self)
{
if (self->desc) {
if (self->desc->flags == GBINDER_FMQ_TYPE_UNSYNC_WRITE) {
g_free(self->read_ptr);
} else {
gbinder_fmq_unmap_grantor_descriptor(self, self->read_ptr,
READ_PTR_POS);
}
gbinder_fmq_unmap_grantor_descriptor(self, self->write_ptr,
WRITE_PTR_POS);
gbinder_fmq_unmap_grantor_descriptor(self, self->ring,
DATA_PTR_POS);
gbinder_fmq_unmap_grantor_descriptor(self, self->event_flag_ptr,
EVENT_FLAG_PTR_POS);
g_free((GBinderFmqGrantorDescriptor*)self->desc->grantors.data.ptr);
g_free((GBinderFds*)self->desc->data.fds);
g_free(self->desc);
}
g_slice_free(GBinderFmq, self);
}
/* Public API */
GBinderFmq*
gbinder_fmq_new(
gsize item_size,
gsize num_items,
GBINDER_FMQ_TYPE type,
GBINDER_FMQ_FLAGS flags,
gint fd,
gsize buffer_size)
{
GBinderFmq* self = NULL;
if (item_size <= 0) {
GWARN("Incorrect item size");
} else if (num_items <= 0) {
GWARN("Empty queue requested");
} else if (num_items > SIZE_MAX / item_size) {
GWARN("Requested message queue size too large");
} else if (fd != -1 && num_items * item_size > buffer_size) {
GWARN("The size needed for items (%"G_GSIZE_FORMAT") is larger\
than the supplied buffer size (%"G_GSIZE_FORMAT")",
num_items * item_size, buffer_size);
} else {
gboolean configure_event_flag =
(flags & GBINDER_FMQ_FLAG_CONFIGURE_EVENT_FLAG) != 0;
gsize queue_size_bytes = num_items * item_size;
gsize meta_data_size;
gsize shmem_size;
int shmem_fd;
self = g_slice_new0(GBinderFmq);
meta_data_size = 2 * sizeof(guint64);
if (configure_event_flag) {
meta_data_size += sizeof(guint32);
}
/* Allocate shared memory */
if (fd != -1) {
/* User-supplied ringbuffer memory provided,
* allocating memory only for meta data */
shmem_size = (meta_data_size + getpagesize() - 1) &
~(getpagesize() - 1);
} else {
/* Allocate ringbuffer, read counter and write counter */
shmem_size = (G_ALIGN8(queue_size_bytes) +
meta_data_size + getpagesize() - 1) & ~(getpagesize() - 1);
}
shmem_fd = syscall(__NR_memfd_create, "MessageQueue", MFD_CLOEXEC);
if (shmem_fd < 0 || ftruncate(shmem_fd, shmem_size) < 0) {
GWARN("Failed to create shared memory file");
gbinder_fmq_free(self);
self = NULL;
} else {
GBinderFmqGrantorDescriptor* grantors;
gsize num_fds = (fd != -1) ? 2 : 1;
gsize fds_size = sizeof(GBinderFds) + sizeof(int) * num_fds;
GBinderFds* fds = (GBinderFds*)g_malloc0(fds_size);
fds->version = fds_size;
fds->num_fds = num_fds;
(((int*)((fds) + 1))[0]) = shmem_fd;
if (fd != -1) {
/* Use user-supplied file descriptor for fd_index 1 */
(((int*)((fds) + 1))[1]) = fd;
}
grantors = gbinder_fmq_create_grantors(
queue_size_bytes, num_fds, configure_event_flag);
/* Fill FMQ descriptor */
self->desc = g_new0(GBinderMQDescriptor, 1);
self->desc->data.fds = fds;
self->desc->quantum = item_size;
self->desc->flags = type;
self->desc->grantors.data.ptr = grantors;
self->desc->grantors.count =
configure_event_flag ? EVENT_FLAG_PTR_POS + 1
: DATA_PTR_POS + 1;
self->desc->grantors.owns_buffer = TRUE;
/* Initialize memory pointers */
if (type == GBINDER_FMQ_TYPE_SYNC_READ_WRITE) {
self->read_ptr = (_Atomic guint64*)(
gbinder_fmq_map_grantor_descriptor(self, READ_PTR_POS));
} else {
/* Unsynchronized write FMQs may have multiple readers and
* each reader would have their own read pointer counter */
self->read_ptr = g_new0(_Atomic guint64, 1);
}
if (self->read_ptr == NULL) {
GWARN("Read pointer is null");
}
self->write_ptr = (_Atomic guint64*)(
gbinder_fmq_map_grantor_descriptor(self, WRITE_PTR_POS));
if (self->write_ptr == NULL) {
GWARN("Write pointer is null");
}
if ((flags & GBINDER_FMQ_FLAG_NO_RESET_POINTERS) == 0) {
atomic_store_explicit(self->read_ptr, 0, memory_order_release);
atomic_store_explicit(self->write_ptr, 0, memory_order_release);
} else if (type != GBINDER_FMQ_TYPE_SYNC_READ_WRITE) {
/* Always reset the read pointer */
atomic_store_explicit(self->read_ptr, 0, memory_order_release);
}
self->ring = (guint8 *)(gbinder_fmq_map_grantor_descriptor(self,
DATA_PTR_POS));
if (self->ring == NULL) {
GWARN("Ring buffer pointer is null");
}
if (self->desc->grantors.count > EVENT_FLAG_PTR_POS) {
self->event_flag_ptr = (_Atomic guint32*)(
gbinder_fmq_map_grantor_descriptor(self,
EVENT_FLAG_PTR_POS));
if (self->event_flag_ptr == NULL) {
GWARN("Event flag pointer is null");
}
}
g_atomic_int_set(&self->refcount, 1);
}
}
return self;
}
GBinderFmq*
gbinder_fmq_ref(
GBinderFmq* self)
{
if (G_LIKELY(self)) {
GASSERT(self->refcount > 0);
g_atomic_int_inc(&self->refcount);
}
return self;
}
void
gbinder_fmq_unref(
GBinderFmq* self)
{
if (G_LIKELY(self)) {
GASSERT(self->refcount > 0);
if (g_atomic_int_dec_and_test(&self->refcount)) {
gbinder_fmq_free(self);
}
}
}
gsize
gbinder_fmq_available_to_read(
GBinderFmq* self)
{
gsize ret = 0;
if (G_LIKELY(self)) {
ret = gbinder_fmq_available_to_read_bytes(self, FALSE) /
self->desc->quantum;
}
return ret;
}
gsize
gbinder_fmq_available_to_write(
GBinderFmq* self)
{
gsize ret = 0;
if (G_LIKELY(self)) {
ret = gbinder_fmq_available_to_write_bytes(self, FALSE) /
self->desc->quantum;
}
return ret;
}
gsize
gbinder_fmq_available_to_read_contiguous(
GBinderFmq* self)
{
gsize ret = 0;
if (G_LIKELY(self)) {
ret = gbinder_fmq_available_to_read_bytes(self, TRUE) /
self->desc->quantum;
}
return ret;
}
gsize
gbinder_fmq_available_to_write_contiguous(
GBinderFmq* self)
{
gsize ret = 0;
if (G_LIKELY(self)) {
ret = gbinder_fmq_available_to_write_bytes(self, TRUE) /
self->desc->quantum;
}
return ret;
}
const void*
gbinder_fmq_begin_read(
GBinderFmq* self,
gsize items)
{
void* ptr = NULL;
if (G_LIKELY(self) && G_LIKELY(items > 0)) {
gsize size = gbinder_fmq_get_grantor_descriptor(self,
DATA_PTR_POS)->extent;
gsize item_size = self->desc->quantum;
gsize bytes_desired = items * item_size;
gsize read_offset;
guint64 write_ptr = atomic_load_explicit(self->write_ptr,
memory_order_acquire);
guint64 read_ptr = atomic_load_explicit(self->read_ptr,
memory_order_relaxed);
if (write_ptr % item_size != 0 || read_ptr % item_size != 0) {
GWARN("Unable to write data because of misaligned pointer");
} else if (write_ptr - read_ptr > size) {
atomic_store_explicit(self->read_ptr, write_ptr,
memory_order_release);
} else if (write_ptr - read_ptr < bytes_desired) {
/* Not enough data to read in FMQ. */
} else {
read_offset = read_ptr % size;
ptr = self->ring + read_offset;
}
}
return ptr;
}
void*
gbinder_fmq_begin_write(
GBinderFmq* self,
gsize items)
{
void* ptr = NULL;
if (G_LIKELY(self) && G_LIKELY(items > 0)) {
gsize size = gbinder_fmq_get_grantor_descriptor(self,
DATA_PTR_POS)->extent;
gsize item_size = self->desc->quantum;
if ((self->desc->flags == GBINDER_FMQ_TYPE_SYNC_READ_WRITE &&
(gbinder_fmq_available_to_write(self) < items)) ||
items > gbinder_fmq_get_grantor_descriptor(self,
DATA_PTR_POS)->extent / item_size) {
/* Incorrect parameters */
} else {
guint64 write_ptr = atomic_load_explicit(self->write_ptr,
memory_order_relaxed);
if (write_ptr % item_size != 0) {
GWARN("The write pointer has become misaligned.");
} else {
ptr = self->ring + (write_ptr % size);
}
}
}
return ptr;
}
void
gbinder_fmq_end_read(
GBinderFmq* self,
gsize items)
{
if (G_LIKELY(self) && G_LIKELY(items > 0)) {
gsize size = gbinder_fmq_get_grantor_descriptor(self,
DATA_PTR_POS)->extent;
guint64 read_ptr = atomic_load_explicit(self->read_ptr,
memory_order_relaxed);
guint64 write_ptr = atomic_load_explicit(self->write_ptr,
memory_order_acquire);
/* If queue type is unsynchronized, it is possible that a write overflow
* may have occurred */
if (write_ptr - read_ptr > size) {
atomic_store_explicit(self->read_ptr, write_ptr,
memory_order_release);
} else {
read_ptr += items * self->desc->quantum;
atomic_store_explicit(self->read_ptr, read_ptr,
memory_order_release);
}
}
}
void
gbinder_fmq_end_write(
GBinderFmq* self,
gsize items)
{
if (G_LIKELY(self) && G_LIKELY(items > 0)) {
guint64 write_ptr = atomic_load_explicit(self->write_ptr,
memory_order_relaxed);
write_ptr += items * self->desc->quantum;
atomic_store_explicit(self->write_ptr, write_ptr, memory_order_release);
}
}
gboolean
gbinder_fmq_read(
GBinderFmq* self,
void* data,
gsize items)
{
gboolean ret = FALSE;
if (G_LIKELY(self) && G_LIKELY(data) && G_LIKELY(items > 0)) {
const void *in_data = gbinder_fmq_begin_read(self, items);
if (in_data) {
gsize item_size = self->desc->quantum;
/* The number of messages that can be read contiguously without
* wrapping around the ring buffer */
gsize contiguous_messages =
gbinder_fmq_available_to_read_contiguous(self);
if (contiguous_messages < items) {
/* A wrap around is required */
memcpy(data, in_data, contiguous_messages * item_size);
memcpy((char *)data + contiguous_messages * item_size /
sizeof(char), self->ring,
(items - contiguous_messages) * item_size);
} else {
/* A wrap around is not required */
memcpy(data, in_data, items * item_size);
}
gbinder_fmq_end_read(self, items);
ret = TRUE;
}
}
return ret;
}
gboolean
gbinder_fmq_write(
GBinderFmq* self,
const void* data,
gsize items)
{
gboolean ret = FALSE;
if (G_LIKELY(self) && G_LIKELY(data) && G_LIKELY(items > 0)) {
void *out_data = gbinder_fmq_begin_write(self, items);
if (out_data) {
gsize item_size = self->desc->quantum;
/* The number of messages that can be written contiguously without
* wrapping around the ring buffer */
gsize contiguous_messages =
gbinder_fmq_available_to_write_contiguous(self);
if (contiguous_messages < items) {
/* A wrap around is required. */
memcpy(out_data, data, contiguous_messages * item_size);
memcpy(self->ring, (char *)data + contiguous_messages *
item_size / sizeof(char),
(items - contiguous_messages) * item_size);
} else {
/* A wrap around is not required to write items */
memcpy(out_data, data, items * item_size);
}
gbinder_fmq_end_write(self, items);
ret = TRUE;
}
}
return ret;
}
int
gbinder_fmq_wait_timeout(
GBinderFmq* self,
guint32 bit_mask,
guint32* state,
guint timeout_ms)
{
int ret = 0;
if (G_LIKELY(self) && G_LIKELY(state)) {
/* Event flag is not configured */
if (self->event_flag_ptr == NULL) {
ret = -ENOSYS;
} else if (bit_mask == 0 || state == NULL) {
ret = -EINVAL;
} else {
guint32 old_value = atomic_fetch_and(self->event_flag_ptr,
~bit_mask);
guint32 set_bits = old_value & bit_mask;
/* Check if any of the bits was already set */
if (set_bits != 0) {
*state = set_bits;
} else {
if (timeout_ms > 0) {
struct timespec wait_time;
clock_gettime(CLOCK_MONOTONIC, &wait_time);
guint64 ns_in_sec = 1000000000;
wait_time.tv_sec += timeout_ms / 1000;
wait_time.tv_nsec += (timeout_ms % 1000) * 1000000;
if (wait_time.tv_nsec >= ns_in_sec) {
wait_time.tv_sec++;
wait_time.tv_nsec -= ns_in_sec;
}
ret = syscall(__NR_futex, self->event_flag_ptr,
FUTEX_WAIT_BITSET, old_value, &wait_time, NULL,
bit_mask);
} else {
ret = syscall(__NR_futex, self->event_flag_ptr,
FUTEX_WAIT_BITSET, old_value, NULL, NULL, bit_mask);
}
if (ret != -1) {
old_value = atomic_fetch_and(self->event_flag_ptr,
~bit_mask);
*state = old_value & bit_mask;
if (*state == 0) {
ret = -EINTR;
}
ret = 0;
} else {
/* Report error code */
*state = 0;
ret = -errno;
}
}
}
} else {
ret = -EINVAL;
}
return ret;
}
int
gbinder_fmq_wake(
GBinderFmq* self,
guint32 bit_mask)
{
int ret = 0;
if (G_LIKELY(self)) {
if (self->event_flag_ptr == NULL) {
/* Event flag is not configured */
ret = -ENOSYS;
} else if (bit_mask == 0) {
/* Ignore zero bit mask */
} else {
/* Set bit mask only if needed */
guint32 old_value = atomic_fetch_or(self->event_flag_ptr, bit_mask);
if ((~old_value & bit_mask) != 0) {
ret = syscall(__NR_futex, self->event_flag_ptr,
FUTEX_WAKE_BITSET, G_MAXUINT32, NULL, NULL, bit_mask);
}
if (ret == -1) {
/* Report error code */
ret = -errno;
}
}
} else {
ret = -EINVAL;
}
return ret;
}
/*
* Local Variables:
* mode: C
* c-basic-offset: 4
* indent-tabs-mode: nil
* End:
*/

81
src/gbinder_fmq_p.h Normal file
View File

@@ -0,0 +1,81 @@
/*
* Copyright (C) 2021 Jolla Ltd.
*
* You may use this file under the terms of BSD license as follows:
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the names of the copyright holders nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
* THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef GBINDER_FMQ_PRIVATE_H
#define GBINDER_FMQ_PRIVATE_H
#include <gbinder_fmq.h>
#include "gbinder_types_p.h"
typedef struct gbinder_fmq_grantor_descriptor {
guint32 flags GBINDER_ALIGNED(4);
guint32 fd_index GBINDER_ALIGNED(4);
guint32 offset GBINDER_ALIGNED(4);
guint64 extent GBINDER_ALIGNED(8);
} GBinderFmqGrantorDescriptor;
G_STATIC_ASSERT(G_STRUCT_OFFSET(GBinderFmqGrantorDescriptor, flags) == 0);
G_STATIC_ASSERT(G_STRUCT_OFFSET(GBinderFmqGrantorDescriptor, fd_index) == 4);
G_STATIC_ASSERT(G_STRUCT_OFFSET(GBinderFmqGrantorDescriptor, offset) == 8);
G_STATIC_ASSERT(G_STRUCT_OFFSET(GBinderFmqGrantorDescriptor, extent) == 16);
G_STATIC_ASSERT(sizeof(GBinderFmqGrantorDescriptor) == 24);
typedef struct gbinder_mq_descriptor {
GBinderHidlVec grantors;
union {
guint64 value;
const GBinderFds* fds;
} data;
guint32 quantum;
guint32 flags;
} GBinderMQDescriptor;
#define GBINDER_MQ_DESCRIPTOR_GRANTORS_OFFSET (0)
#define GBINDER_MQ_DESCRIPTOR_FDS_OFFSET (16)
G_STATIC_ASSERT(G_STRUCT_OFFSET(GBinderMQDescriptor, grantors) == GBINDER_MQ_DESCRIPTOR_GRANTORS_OFFSET);
G_STATIC_ASSERT(G_STRUCT_OFFSET(GBinderMQDescriptor, data) == GBINDER_MQ_DESCRIPTOR_FDS_OFFSET);
G_STATIC_ASSERT(sizeof(GBinderMQDescriptor) == 32);
GBinderMQDescriptor*
gbinder_fmq_get_descriptor(
const GBinderFmq* self)
GBINDER_INTERNAL;
#endif /* GBINDER_FMQ_PRIVATE_H */
/*
* Local Variables:
* mode: C
* c-basic-offset: 4
* indent-tabs-mode: nil
* End:
*/

View File

@@ -215,6 +215,23 @@ GBINDER_IO_FN(encode_fd_object)(
return sizeof(*dest);
}
static
guint
GBINDER_IO_FN(encode_fda_object)(
void* out,
const GBinderFds *fds,
const GBinderParent* parent)
{
struct binder_fd_array_object* dest = out;
memset(dest, 0, sizeof(*dest));
dest->hdr.type = BINDER_TYPE_FDA;
dest->num_fds = fds->num_fds;
dest->parent = parent->index;
dest->parent_offset = parent->offset;
return sizeof(*dest);
}
/* Encodes binder_buffer_object */
static
guint
@@ -637,6 +654,7 @@ const GBinderIo GBINDER_IO_PREFIX = {
.encode_local_object = GBINDER_IO_FN(encode_local_object),
.encode_remote_object = GBINDER_IO_FN(encode_remote_object),
.encode_fd_object = GBINDER_IO_FN(encode_fd_object),
.encode_fda_object = GBINDER_IO_FN(encode_fda_object),
.encode_buffer_object = GBINDER_IO_FN(encode_buffer_object),
.encode_handle_cookie = GBINDER_IO_FN(encode_handle_cookie),
.encode_ptr_cookie = GBINDER_IO_FN(encode_ptr_cookie),

View File

@@ -146,6 +146,8 @@ struct gbinder_io {
guint (*encode_local_object)(void* out, GBinderLocalObject* obj);
guint (*encode_remote_object)(void* out, GBinderRemoteObject* obj);
guint (*encode_fd_object)(void* out, int fd);
guint (*encode_fda_object)(void* out, const GBinderFds *fds,
const GBinderParent* parent);
/* Encode binder_buffer_object */
#define GBINDER_MAX_BUFFER_OBJECT_SIZE (40)

View File

@@ -32,6 +32,7 @@
#include "gbinder_writer_p.h"
#include "gbinder_buffer_p.h"
#include "gbinder_fmq_p.h"
#include "gbinder_local_object.h"
#include "gbinder_object_converter.h"
#include "gbinder_io.h"
@@ -610,6 +611,53 @@ gbinder_writer_append_fd(
}
}
void
gbinder_writer_data_append_fda_object(
GBinderWriterData* data,
const GBinderFds *fds,
const GBinderParent* parent)
{
GByteArray* buf = data->bytes;
const guint offset = buf->len;
guint written;
/* Preallocate enough space */
g_byte_array_set_size(buf, offset + GBINDER_MAX_BINDER_OBJECT_SIZE);
written = data->io->encode_fda_object(buf->data + offset, fds, parent);
/* Fix the data size */
g_byte_array_set_size(buf, offset + written);
/* Record the offset */
gbinder_writer_data_record_offset(data, offset);
}
void
gbinder_writer_data_append_fds(
GBinderWriterData* data,
const GBinderFds *fds,
const GBinderParent* parent)
{
/* If the pointer is null only write zero size */
if (!fds) {
gbinder_writer_data_append_int64(data, 0);
return;
}
/* Write the fds information: size, fds data buffer and fd_array_object */
const gsize fds_total = sizeof(GBinderFds) +
sizeof(int) * (fds->num_fds + fds->num_ints);
GBinderParent fds_parent;
gbinder_writer_data_append_int64(data, fds_total);
fds_parent.index = gbinder_writer_data_append_buffer_object(data,
fds, fds_total, parent);
fds_parent.offset = sizeof(GBinderFds);
gbinder_writer_data_append_fda_object(data, fds, &fds_parent);
}
guint
gbinder_writer_append_buffer_object_with_parent(
GBinderWriter* self,
@@ -926,6 +974,61 @@ gbinder_writer_append_byte_array(
}
}
void
gbinder_writer_data_append_fmq_descriptor(
GBinderWriterData* data,
const GBinderFmq* queue)
{
GBinderParent parent;
GBinderMQDescriptor* desc = gbinder_fmq_get_descriptor(queue);
GBinderMQDescriptor* mqdesc = gutil_memdup(desc,
sizeof(GBinderMQDescriptor));
const gsize vec_total =
desc->grantors.count * sizeof(GBinderFmqGrantorDescriptor);
void* vec_buf = gutil_memdup(desc->grantors.data.ptr, vec_total);
const gsize fds_total = sizeof(GBinderFds) +
sizeof(int) * (desc->data.fds->num_fds + desc->data.fds->num_ints);
GBinderFds* fds = gutil_memdup(desc->data.fds, fds_total);
mqdesc->data.fds = fds;
data->cleanup = gbinder_cleanup_add(data->cleanup, g_free, fds);
/* Fill in the grantor vector descriptor */
if (vec_buf) {
mqdesc->grantors.count = desc->grantors.count;
mqdesc->grantors.data.ptr = vec_buf;
mqdesc->grantors.owns_buffer = TRUE;
data->cleanup = gbinder_cleanup_add(data->cleanup, g_free, vec_buf);
}
data->cleanup = gbinder_cleanup_add(data->cleanup, g_free, mqdesc);
/* Write the FMQ descriptor object */
parent.index = gbinder_writer_data_append_buffer_object(data,
mqdesc, sizeof(*mqdesc), NULL);
/* Write the vector data buffer */
parent.offset = GBINDER_MQ_DESCRIPTOR_GRANTORS_OFFSET;
gbinder_writer_data_append_buffer_object(data, vec_buf, vec_total,
&parent);
/* Write the fds */
parent.offset = GBINDER_MQ_DESCRIPTOR_FDS_OFFSET;
gbinder_writer_data_append_fds(data, mqdesc->data.fds, &parent);
}
void
gbinder_writer_append_fmq_descriptor(
GBinderWriter* self,
const GBinderFmq* queue) /* since 1.1.14 */
{
GBinderWriterData* data = gbinder_writer_data(self);
if (G_LIKELY(data) && G_LIKELY(queue)) {
gbinder_writer_data_append_fmq_descriptor(data, queue);
}
}
void
gbinder_writer_data_append_remote_object(
GBinderWriterData* data,