Commit 62605e11 authored by Nicolas Dufresne's avatar Nicolas Dufresne

v4l2: Fixup USERPTR/DMABUF capture support

parent 7f8eff78
......@@ -489,9 +489,110 @@ gst_v4l2_allocator_probe (GstV4l2Allocator * allocator, guint32 memory,
return flags;
}
static GstV4l2MemoryGroup *
gst_v4l2_allocator_create_buf (GstV4l2Allocator * allocator)
{
struct v4l2_create_buffers bcreate = { 0 };
GstV4l2MemoryGroup *group = NULL;
GST_OBJECT_LOCK (allocator);
if (!allocator->active)
goto done;
bcreate.memory = allocator->memory;
bcreate.format = allocator->format;
bcreate.count = 1;
if (!allocator->can_allocate)
goto done;
if (v4l2_ioctl (allocator->video_fd, VIDIOC_CREATE_BUFS, &bcreate) < 0)
goto create_bufs_failed;
group = gst_v4l2_memory_group_new (allocator, bcreate.index);
if (group) {
allocator->groups[bcreate.index] = group;
allocator->count++;
}
done:
GST_OBJECT_UNLOCK (allocator);
return group;
create_bufs_failed:
{
GST_WARNING_OBJECT (allocator, "error creating a new buffer: %s",
g_strerror (errno));
goto done;
}
}
static GstV4l2MemoryGroup *
gst_v4l2_allocator_alloc (GstV4l2Allocator * allocator)
{
GstV4l2MemoryGroup *group;
if (!g_atomic_int_get (&allocator->active))
return NULL;
group = gst_atomic_queue_pop (allocator->free_queue);
if (group == NULL) {
if (allocator->can_allocate) {
group = gst_v4l2_allocator_create_buf (allocator);
/* Don't hammer on CREATE_BUFS */
if (group == NULL)
allocator->can_allocate = FALSE;
}
}
return group;
}
static void
gst_v4l2_allocator_reset_size (GstV4l2Allocator * allocator,
GstV4l2MemoryGroup * group)
{
gsize size;
if (V4L2_TYPE_IS_MULTIPLANAR (allocator->type)) {
gint i;
for (i = 0; i < group->n_mem; i++) {
size = allocator->format.fmt.pix_mp.plane_fmt[i].sizeimage;
gst_memory_resize (group->mem[i], 0, size);
}
} else {
size = allocator->format.fmt.pix.sizeimage;
gst_memory_resize (group->mem[0], 0, size);
}
}
static void
_cleanup_failed_alloc (GstV4l2Allocator * allocator, GstV4l2MemoryGroup * group)
{
if (group->mems_allocated > 0) {
gint i;
/* If one or more mmap worked, we need to unref the memory, otherwise
* they will keep a ref on the allocator and leak it. This will put back
* the group into the free_queue */
for (i = 0; i < group->n_mem; i++)
gst_memory_unref (group->mem[i]);
} else {
/* Otherwise, group has to be on free queue for _stop() to work */
gst_atomic_queue_push (allocator->free_queue, group);
}
}
GstV4l2Allocator *
gst_v4l2_allocator_new (GstObject * parent, gint video_fd,
struct v4l2_format * format)
struct v4l2_format *format)
{
GstV4l2Allocator *allocator;
guint32 flags = 0;
......@@ -658,85 +759,6 @@ reqbufs_failed:
}
}
static GstV4l2MemoryGroup *
gst_v4l2_allocator_create_buf (GstV4l2Allocator * allocator)
{
struct v4l2_create_buffers bcreate = { 0 };
GstV4l2MemoryGroup *group = NULL;
GST_OBJECT_LOCK (allocator);
if (!allocator->active)
goto done;
bcreate.memory = allocator->memory;
bcreate.format = allocator->format;
bcreate.count = 1;
if (!allocator->can_allocate)
goto done;
if (v4l2_ioctl (allocator->video_fd, VIDIOC_CREATE_BUFS, &bcreate) < 0)
goto create_bufs_failed;
group = gst_v4l2_memory_group_new (allocator, bcreate.index);
if (group) {
allocator->groups[bcreate.index] = group;
allocator->count++;
}
done:
GST_OBJECT_UNLOCK (allocator);
return group;
create_bufs_failed:
{
GST_WARNING_OBJECT (allocator, "error creating a new buffer: %s",
g_strerror (errno));
goto done;
}
}
static GstV4l2MemoryGroup *
gst_v4l2_allocator_alloc (GstV4l2Allocator * allocator)
{
GstV4l2MemoryGroup *group;
if (!g_atomic_int_get (&allocator->active))
return NULL;
group = gst_atomic_queue_pop (allocator->free_queue);
if (group == NULL) {
if (allocator->can_allocate) {
group = gst_v4l2_allocator_create_buf (allocator);
/* Don't hammer on CREATE_BUFS */
if (group == NULL)
allocator->can_allocate = FALSE;
}
}
return group;
}
static void
_cleanup_failed_alloc (GstV4l2Allocator * allocator, GstV4l2MemoryGroup * group)
{
if (group->mems_allocated > 0) {
gint i;
/* If one or more mmap worked, we need to unref the memory, otherwise
* they will keep a ref on the allocator and leak it. This will put back
* the group into the free_queue */
for (i = 0; i < group->n_mem; i++)
gst_memory_unref (group->mem[i]);
} else {
/* Otherwise, group has to be on free queue for _stop() to work */
gst_atomic_queue_push (allocator->free_queue, group);
}
}
GstV4l2MemoryGroup *
gst_v4l2_allocator_alloc_mmap (GstV4l2Allocator * allocator)
{
......@@ -869,6 +891,45 @@ cleanup:
}
}
static void
gst_v4l2_allocator_clear_dmabufin (GstV4l2Allocator * allocator,
GstV4l2MemoryGroup * group)
{
GstV4l2Memory *mem;
gint i;
g_return_if_fail (allocator->memory == V4L2_MEMORY_DMABUF);
for (i = 0; i < group->n_mem; i++) {
mem = (GstV4l2Memory *) group->mem[i];
GST_LOG_OBJECT (allocator, "clearing DMABUF import, fd %i plane %d",
mem->dmafd, i);
if (mem->dmafd >= 0)
close (mem->dmafd);
/* Update memory */
mem->mem.maxsize = 0;
mem->mem.offset = 0;
mem->mem.size = 0;
mem->dmafd = -1;
/* Update v4l2 structure */
group->planes[i].length = 0;
group->planes[i].bytesused = 0;
group->planes[i].m.fd = -1;
group->planes[i].data_offset = 0;
}
if (!V4L2_TYPE_IS_MULTIPLANAR (allocator->type)) {
group->buffer.bytesused = 0;
group->buffer.length = 0;
group->buffer.m.fd = -1;
}
}
GstV4l2MemoryGroup *
gst_v4l2_allocator_alloc_dmabufin (GstV4l2Allocator * allocator)
{
......@@ -901,6 +962,37 @@ gst_v4l2_allocator_alloc_dmabufin (GstV4l2Allocator * allocator)
return group;
}
static void
gst_v4l2_allocator_clear_userptr (GstV4l2Allocator * allocator,
GstV4l2MemoryGroup * group)
{
GstV4l2Memory *mem;
gint i;
g_return_if_fail (allocator->memory == V4L2_MEMORY_USERPTR);
for (i = 0; i < group->n_mem; i++) {
mem = (GstV4l2Memory *) group->mem[i];
GST_LOG_OBJECT (allocator, "clearing USERPTR %p plane %d size %"
G_GSIZE_FORMAT, mem->data, i, mem->mem.size);
mem->mem.maxsize = 0;
mem->mem.size = 0;
mem->data = NULL;
group->planes[i].length = 0;
group->planes[i].bytesused = 0;
group->planes[i].m.userptr = 0;
}
if (!V4L2_TYPE_IS_MULTIPLANAR (allocator->type)) {
group->buffer.bytesused = 0;
group->buffer.length = 0;
group->buffer.m.userptr = 0;
}
}
GstV4l2MemoryGroup *
gst_v4l2_allocator_alloc_userptr (GstV4l2Allocator * allocator)
{
......@@ -1005,46 +1097,6 @@ dup_failed:
}
}
void
gst_v4l2_allocator_clear_dmabufin (GstV4l2Allocator * allocator,
GstV4l2MemoryGroup * group)
{
GstV4l2Memory *mem;
gint i;
g_return_if_fail (allocator->memory == V4L2_MEMORY_DMABUF);
for (i = 0; i < group->n_mem; i++) {
mem = (GstV4l2Memory *) group->mem[i];
GST_LOG_OBJECT (allocator, "clearing DMABUF import, fd %i plane %d",
mem->dmafd, i);
if (mem->dmafd >= 0)
close (mem->dmafd);
/* Update memory */
mem->mem.maxsize = 0;
mem->mem.offset = 0;
mem->mem.size = 0;
mem->dmafd = -1;
/* Update v4l2 structure */
group->planes[i].length = 0;
group->planes[i].bytesused = 0;
group->planes[i].m.fd = -1;
group->planes[i].data_offset = 0;
}
if (!V4L2_TYPE_IS_MULTIPLANAR (allocator->type)) {
group->buffer.bytesused = 0;
group->buffer.length = 0;
group->buffer.m.fd = -1;
}
}
gboolean
gst_v4l2_allocator_import_userptr (GstV4l2Allocator * allocator,
GstV4l2MemoryGroup * group, gsize img_size, int n_planes,
......@@ -1111,37 +1163,6 @@ n_mem_missmatch:
}
}
void
gst_v4l2_allocator_clear_userptr (GstV4l2Allocator * allocator,
GstV4l2MemoryGroup * group)
{
GstV4l2Memory *mem;
gint i;
g_return_if_fail (allocator->memory == V4L2_MEMORY_USERPTR);
for (i = 0; i < group->n_mem; i++) {
mem = (GstV4l2Memory *) group->mem[i];
GST_LOG_OBJECT (allocator, "clearing USERPTR %p plane %d size %"
G_GSIZE_FORMAT, mem->data, i, mem->mem.size);
mem->mem.maxsize = 0;
mem->mem.size = 0;
mem->data = NULL;
group->planes[i].length = 0;
group->planes[i].bytesused = 0;
group->planes[i].m.userptr = 0;
}
if (!V4L2_TYPE_IS_MULTIPLANAR (allocator->type)) {
group->buffer.bytesused = 0;
group->buffer.length = 0;
group->buffer.m.userptr = 0;
}
}
void
gst_v4l2_allocator_flush (GstV4l2Allocator * allocator)
{
......@@ -1322,21 +1343,21 @@ error:
}
void
gst_v4l2_allocator_reset_size (GstV4l2Allocator * allocator,
gst_v4l2_allocator_reset_group (GstV4l2Allocator * allocator,
GstV4l2MemoryGroup * group)
{
gsize size;
if (V4L2_TYPE_IS_MULTIPLANAR (allocator->type)) {
gint i;
for (i = 0; i < group->n_mem; i++) {
size = allocator->format.fmt.pix_mp.plane_fmt[i].sizeimage;
gst_memory_resize (group->mem[i], 0, size);
}
} else {
size = allocator->format.fmt.pix.sizeimage;
gst_memory_resize (group->mem[0], 0, size);
switch (allocator->memory) {
case V4L2_MEMORY_USERPTR:
gst_v4l2_allocator_clear_userptr (allocator, group);
break;
case V4L2_MEMORY_DMABUF:
gst_v4l2_allocator_clear_dmabufin (allocator, group);
break;
case V4L2_MEMORY_MMAP:
gst_v4l2_allocator_reset_size (allocator, group);
break;
default:
g_assert_not_reached ();
break;
}
}
......@@ -134,18 +134,11 @@ gboolean gst_v4l2_allocator_import_dmabuf (GstV4l2Allocator * alloc
GstV4l2MemoryGroup *group,
gint n_mem, GstMemory ** dma_mem);
void gst_v4l2_allocator_clear_dmabufin (GstV4l2Allocator * allocator,
GstV4l2MemoryGroup *group);
gboolean gst_v4l2_allocator_import_userptr (GstV4l2Allocator * allocator,
GstV4l2MemoryGroup *group,
gsize img_size, int n_planes,
gpointer * data, gsize * offset);
void gst_v4l2_allocator_clear_userptr (GstV4l2Allocator * allocator,
GstV4l2MemoryGroup *group);
void gst_v4l2_allocator_flush (GstV4l2Allocator * allocator);
gboolean gst_v4l2_allocator_qbuf (GstV4l2Allocator * allocator,
......@@ -153,7 +146,7 @@ gboolean gst_v4l2_allocator_qbuf (GstV4l2Allocator * alloc
GstV4l2MemoryGroup* gst_v4l2_allocator_dqbuf (GstV4l2Allocator * allocator);
void gst_v4l2_allocator_reset_size (GstV4l2Allocator * allocator,
void gst_v4l2_allocator_reset_group (GstV4l2Allocator * allocator,
GstV4l2MemoryGroup * group);
G_END_DECLS
......
......@@ -1334,8 +1334,12 @@ gst_v4l2_buffer_pool_release_buffer (GstBufferPool * bpool, GstBuffer * buffer)
GST_LOG_OBJECT (pool, "buffer %u not queued, putting on free list",
index);
/* Remove qdata, this will unmap any map data in userptr */
gst_mini_object_set_qdata (GST_MINI_OBJECT (buffer),
GST_V4L2_IMPORT_QUARK, NULL, NULL);
/* reset to default size */
gst_v4l2_allocator_reset_size (pool->vallocator, group);
gst_v4l2_allocator_reset_group (pool->vallocator, group);
/* playback, put the buffer back in the queue to refill later. */
GST_BUFFER_POOL_CLASS (parent_class)->release_buffer (bpool,
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment