Commit 92bdd596 authored by Nicolas Dufresne's avatar Nicolas Dufresne

v4l2: Add DMABUF and USERPTR importation

parent a114a32d
......@@ -339,6 +339,18 @@ gst_v4l2_allocator_release (GstV4l2Allocator * allocator, GstV4l2Memory * mem)
GST_LOG_OBJECT (allocator, "plane %i of buffer %u released",
mem->plane, group->buffer.index);
switch (allocator->memory) {
case V4L2_MEMORY_DMABUF:
close (mem->dmafd);
mem->dmafd = -1;
break;
case V4L2_MEMORY_USERPTR:
mem->data = NULL;
break;
default:
break;
}
/* When all memory are back, put the group back in the free queue */
if (g_atomic_int_dec_and_test (&group->mems_allocated)) {
GST_LOG_OBJECT (allocator, "buffer %u released", group->buffer.index);
......@@ -709,6 +721,22 @@ gst_v4l2_allocator_alloc (GstV4l2Allocator * allocator)
return group;
}
static void
_cleanup_failed_alloc (GstV4l2Allocator * allocator, GstV4l2MemoryGroup * group)
{
if (group->mems_allocated > 0) {
gint i;
/* If one or more mmap worked, we need to unref the memory, otherwise
* they will keep a ref on the allocator and leak it. This will put back
* the group into the free_queue */
for (i = 0; i < group->n_mem; i++)
gst_memory_unref (group->mem[i]);
} else {
/* Otherwise, group has to be on free queue for _stop() to work */
gst_atomic_queue_push (allocator->free_queue, group);
}
}
GstV4l2MemoryGroup *
gst_v4l2_allocator_alloc_mmap (GstV4l2Allocator * allocator)
{
......@@ -758,17 +786,7 @@ mmap_failed:
{
GST_ERROR_OBJECT (allocator, "Failed to mmap buffer: %s",
g_strerror (errno));
if (group->mems_allocated > 0) {
/* If one or more mmap worked, we need to unref the memory, otherwise
* they will keep a ref on the allocator and leak it. This will put back
* the group into the free_queue */
for (i = 0; i < group->n_mem; i++)
gst_memory_unref (group->mem[i]);
} else {
/* Otherwise, group has to be on free queue for _stop() to work */
gst_atomic_queue_push (allocator->free_queue, group);
}
_cleanup_failed_alloc (allocator, group);
return NULL;
}
}
......@@ -846,34 +864,283 @@ dup_failed:
}
cleanup:
{
if (group->mems_allocated > 0) {
for (i = 0; i < group->n_mem; i++)
gst_memory_unref (group->mem[i]);
_cleanup_failed_alloc (allocator, group);
return NULL;
}
}
GstV4l2MemoryGroup *
gst_v4l2_allocator_alloc_dmabufin (GstV4l2Allocator * allocator)
{
GstV4l2MemoryGroup *group;
gint i;
g_return_val_if_fail (allocator->memory == V4L2_MEMORY_DMABUF, NULL);
group = gst_v4l2_allocator_alloc (allocator);
if (group == NULL)
return NULL;
for (i = 0; i < group->n_mem; i++) {
GST_LOG_OBJECT (allocator, "allocation empty DMABUF import group");
if (group->mem[i] == NULL) {
group->mem[i] = (GstMemory *) _v4l2mem_new (0, GST_ALLOCATOR (allocator),
NULL, 0, 0, 0, 0, i, NULL, -1, group);
} else {
gst_atomic_queue_push (allocator->free_queue, group);
/* Take back the allocator reference */
gst_object_ref (allocator);
}
return NULL;
group->mems_allocated++;
}
gst_v4l2_allocator_clear_dmabufin (allocator, group);
return group;
}
#if 0
GstV4l2MemoryGroup *
gst_v4l2_allocator_alloc_userptr (GstV4l2Allocator * allocator)
{
GstV4l2MemoryGroup *group;
gint i;
g_return_val_if_fail (allocator->memory == V4L2_MEMORY_USERPTR, NULL);
group = gst_v4l2_allocator_alloc (allocator);
if (group == NULL)
return NULL;
for (i = 0; i < group->n_mem; i++) {
GST_LOG_OBJECT (allocator, "allocating empty USERPTR group");
if (group->mem[i] == NULL) {
group->mem[i] = (GstMemory *) _v4l2mem_new (0, GST_ALLOCATOR (allocator),
NULL, 0, 0, 0, 0, i, NULL, -1, group);
} else {
/* Take back the allocator reference */
gst_object_ref (allocator);
}
group->mems_allocated++;
}
gst_v4l2_allocator_clear_userptr (allocator, group);
return group;
}
gboolean
gst_v4l2_allocator_import_dmabuf (GstV4l2Allocator * allocator,
gint dmabuf_fd[VIDEO_MAX_PLANES])
GstV4l2MemoryGroup * group, gint n_mem, GstMemory ** dma_mem)
{
/* TODO */
return NULL;
GstV4l2Memory *mem;
gint i;
g_return_val_if_fail (allocator->memory == V4L2_MEMORY_DMABUF, FALSE);
if (group->n_mem != n_mem)
goto n_mem_missmatch;
for (i = 0; i < group->n_mem; i++) {
gint dmafd;
gsize size, offset, maxsize;
if (!gst_is_dmabuf_memory (dma_mem[i]))
goto not_dmabuf;
size = gst_memory_get_sizes (dma_mem[i], &offset, &maxsize);
if ((dmafd = dup (gst_dmabuf_memory_get_fd (dma_mem[i]))) < 0)
goto dup_failed;
GST_LOG_OBJECT (allocator, "imported DMABUF as fd %i plane %d", dmafd, i);
mem = (GstV4l2Memory *) group->mem[i];
/* Update memory */
mem->mem.maxsize = maxsize;
mem->mem.offset = offset;
mem->mem.size = size;
mem->dmafd = dmafd;
/* Update v4l2 structure */
group->planes[i].length = maxsize;
group->planes[i].bytesused = size;
group->planes[i].m.fd = dmafd;
group->planes[i].data_offset = offset;
}
/* Copy into buffer structure if not using planes */
if (!V4L2_TYPE_IS_MULTIPLANAR (allocator->type)) {
group->buffer.bytesused = group->planes[0].bytesused;
group->buffer.length = group->planes[0].length;
group->buffer.m.fd = group->planes[0].m.userptr;
} else {
group->buffer.length = group->n_mem;
}
return TRUE;
n_mem_missmatch:
{
GST_ERROR_OBJECT (allocator, "Got %i dmabuf but needed %i", n_mem,
group->n_mem);
return FALSE;
}
not_dmabuf:
{
GST_ERROR_OBJECT (allocator, "Memory %i is not of DMABUF", i);
return FALSE;
}
dup_failed:
{
GST_ERROR_OBJECT (allocator, "Failed to dup DMABUF descriptor: %s",
g_strerror (errno));
return FALSE;
}
}
GstV4l2MemoryGroup *
void
gst_v4l2_allocator_clear_dmabufin (GstV4l2Allocator * allocator,
GstV4l2MemoryGroup * group)
{
GstV4l2Memory *mem;
gint i;
g_return_if_fail (allocator->memory == V4L2_MEMORY_DMABUF);
for (i = 0; i < group->n_mem; i++) {
mem = (GstV4l2Memory *) group->mem[i];
GST_LOG_OBJECT (allocator, "clearing DMABUF import, fd %i plane %d",
mem->dmafd, i);
if (mem->dmafd >= 0)
close (mem->dmafd);
/* Update memory */
mem->mem.maxsize = 0;
mem->mem.offset = 0;
mem->mem.size = 0;
mem->dmafd = -1;
/* Update v4l2 structure */
group->planes[i].length = 0;
group->planes[i].bytesused = 0;
group->planes[i].m.fd = -1;
group->planes[i].data_offset = 0;
}
if (!V4L2_TYPE_IS_MULTIPLANAR (allocator->type)) {
group->buffer.bytesused = 0;
group->buffer.length = 0;
group->buffer.m.fd = -1;
}
}
gboolean
gst_v4l2_allocator_import_userptr (GstV4l2Allocator * allocator,
gpointer data[VIDEO_MAX_PLANES], gint stride[VIDEO_MAX_PLANES],
gint offset[VIDEO_MAX_PLANES])
GstV4l2MemoryGroup * group, gsize img_size, int n_planes,
gpointer * data, gsize * offset)
{
/* TODO */
return NULL;
GstV4l2Memory *mem;
gint i;
g_return_val_if_fail (allocator->memory == V4L2_MEMORY_USERPTR, FALSE);
/* TODO Support passing N plane from 1 memory to MPLANE v4l2 format */
if (n_planes != group->n_mem)
goto n_mem_missmatch;
for (i = 0; i < group->n_mem; i++) {
gsize size, maxsize;
if (V4L2_TYPE_IS_MULTIPLANAR (allocator->type)) {
struct v4l2_pix_format_mplane *pix = &allocator->format.fmt.pix_mp;
maxsize = pix->plane_fmt[i].sizeimage;
} else {
maxsize = allocator->format.fmt.pix.sizeimage;
}
if ((i + 1) == n_planes) {
size = img_size - offset[i];
} else {
size = offset[i + 1] - offset[i];
}
g_assert (size <= img_size);
GST_LOG_OBJECT (allocator, "imported USERPTR %p plane %d size %"
G_GSIZE_FORMAT, data[i], i, size);
mem = (GstV4l2Memory *) group->mem[i];
mem->mem.maxsize = maxsize;
mem->mem.size = size;
mem->data = data[i];
group->planes[i].length = maxsize;
group->planes[i].bytesused = size;
group->planes[i].m.userptr = (unsigned long) data[i];
group->planes[i].data_offset = 0;
}
/* Copy into buffer structure if not using planes */
if (!V4L2_TYPE_IS_MULTIPLANAR (allocator->type)) {
group->buffer.bytesused = group->planes[0].bytesused;
group->buffer.length = group->planes[0].length;
group->buffer.m.userptr = group->planes[0].m.userptr;
} else {
group->buffer.length = group->n_mem;
}
return TRUE;
n_mem_missmatch:
{
GST_ERROR_OBJECT (allocator, "Got %i userptr plane while driver need %i",
n_planes, group->n_mem);
return FALSE;
}
}
void
gst_v4l2_allocator_clear_userptr (GstV4l2Allocator * allocator,
GstV4l2MemoryGroup * group)
{
GstV4l2Memory *mem;
gint i;
g_return_if_fail (allocator->memory == V4L2_MEMORY_USERPTR);
for (i = 0; i < group->n_mem; i++) {
mem = (GstV4l2Memory *) group->mem[i];
GST_LOG_OBJECT (allocator, "clearing USERPTR %p plane %d size %"
G_GSIZE_FORMAT, mem->data, i, mem->mem.size);
mem->mem.maxsize = 0;
mem->mem.size = 0;
mem->data = NULL;
group->planes[i].length = 0;
group->planes[i].bytesused = 0;
group->planes[i].m.userptr = 0;
}
if (!V4L2_TYPE_IS_MULTIPLANAR (allocator->type)) {
group->buffer.bytesused = 0;
group->buffer.length = 0;
group->buffer.m.userptr = 0;
}
}
#endif
void
gst_v4l2_allocator_flush (GstV4l2Allocator * allocator)
......
......@@ -126,6 +126,26 @@ GstV4l2MemoryGroup* gst_v4l2_allocator_alloc_mmap (GstV4l2Allocator * alloc
GstV4l2MemoryGroup* gst_v4l2_allocator_alloc_dmabuf (GstV4l2Allocator * allocator,
GstAllocator * dmabuf_allocator);
GstV4l2MemoryGroup * gst_v4l2_allocator_alloc_dmabufin (GstV4l2Allocator * allocator);
GstV4l2MemoryGroup * gst_v4l2_allocator_alloc_userptr (GstV4l2Allocator * allocator);
gboolean gst_v4l2_allocator_import_dmabuf (GstV4l2Allocator * allocator,
GstV4l2MemoryGroup *group,
gint n_mem, GstMemory ** dma_mem);
void gst_v4l2_allocator_clear_dmabufin (GstV4l2Allocator * allocator,
GstV4l2MemoryGroup *group);
gboolean gst_v4l2_allocator_import_userptr (GstV4l2Allocator * allocator,
GstV4l2MemoryGroup *group,
gsize img_size, int n_planes,
gpointer * data, gsize * offset);
void gst_v4l2_allocator_clear_userptr (GstV4l2Allocator * allocator,
GstV4l2MemoryGroup *group);
void gst_v4l2_allocator_flush (GstV4l2Allocator * allocator);
gboolean gst_v4l2_allocator_qbuf (GstV4l2Allocator * allocator,
......
This diff is collapsed.
......@@ -56,6 +56,7 @@ struct _GstV4l2BufferPool
GstV4l2Allocator *vallocator;
GstAllocator *allocator;
GstAllocationParams params;
GstBufferPool *other_pool;
guint size;
gboolean add_videometa;
......@@ -85,7 +86,10 @@ GstBufferPool * gst_v4l2_buffer_pool_new (GstV4l2Object *obj, GstCaps *c
GstFlowReturn gst_v4l2_buffer_pool_process (GstV4l2BufferPool * bpool, GstBuffer * buf);
gboolean gst_v4l2_buffer_pool_flush (GstV4l2BufferPool * pool);
gboolean gst_v4l2_buffer_pool_flush (GstV4l2BufferPool * pool);
void gst_v4l2_buffer_pool_set_other_pool (GstV4l2BufferPool * pool,
GstBufferPool * other_pool);
G_END_DECLS
......
......@@ -45,7 +45,6 @@
#include <gst/video/video.h>
GST_DEBUG_CATEGORY_EXTERN (v4l2_debug);
GST_DEBUG_CATEGORY_EXTERN (GST_CAT_PERFORMANCE);
#define GST_CAT_DEFAULT v4l2_debug
#define DEFAULT_PROP_DEVICE_NAME NULL
......@@ -2937,57 +2936,6 @@ done:
return TRUE;
}
gboolean
gst_v4l2_object_copy (GstV4l2Object * v4l2object, GstBuffer * dest,
GstBuffer * src)
{
const GstVideoFormatInfo *finfo = v4l2object->info.finfo;
if (finfo && (finfo->format != GST_VIDEO_FORMAT_UNKNOWN &&
finfo->format != GST_VIDEO_FORMAT_ENCODED)) {
GstVideoFrame src_frame, dest_frame;
GST_DEBUG_OBJECT (v4l2object->element, "copy video frame");
/* FIXME This won't work if cropping apply */
/* we have raw video, use videoframe copy to get strides right */
if (!gst_video_frame_map (&src_frame, &v4l2object->info, src, GST_MAP_READ))
goto invalid_buffer;
if (!gst_video_frame_map (&dest_frame, &v4l2object->info, dest,
GST_MAP_WRITE)) {
gst_video_frame_unmap (&src_frame);
goto invalid_buffer;
}
gst_video_frame_copy (&dest_frame, &src_frame);
gst_video_frame_unmap (&src_frame);
gst_video_frame_unmap (&dest_frame);
} else {
GstMapInfo map;
GST_DEBUG_OBJECT (v4l2object->element, "copy raw bytes");
gst_buffer_map (src, &map, GST_MAP_READ);
gst_buffer_fill (dest, 0, map.data, gst_buffer_get_size (src));
gst_buffer_unmap (src, &map);
gst_buffer_resize (dest, 0, gst_buffer_get_size (src));
}
GST_CAT_LOG_OBJECT (GST_CAT_PERFORMANCE, v4l2object->element,
"slow copy into buffer %p", dest);
return TRUE;
/* ERRORS */
invalid_buffer:
{
/* No Window available to put our image into */
GST_WARNING_OBJECT (v4l2object->element, "could not map image");
return FALSE;
}
}
GstCaps *
gst_v4l2_object_get_caps (GstV4l2Object * v4l2object, GstCaps * filter)
{
......@@ -3043,12 +2991,12 @@ gboolean
gst_v4l2_object_decide_allocation (GstV4l2Object * obj, GstQuery * query)
{
GstCaps *caps;
GstBufferPool *pool;
GstBufferPool *pool, *other_pool = NULL;
GstStructure *config;
guint size, min, max, extra = 0;
gboolean update;
gboolean has_video_meta, has_crop_meta;
gboolean can_use_own_pool;
gboolean can_share_own_pool, pushing_from_our_pool = FALSE;
struct v4l2_control ctl = { 0, };
GST_DEBUG_OBJECT (obj->element, "decide allocation");
......@@ -3078,7 +3026,7 @@ gst_v4l2_object_decide_allocation (GstV4l2Object * obj, GstQuery * query)
if (min != 0) {
/* if there is a min-buffers suggestion, use it. We add 1 because we need 1
* buffer extra to capture while the other two buffers are downstream */
* buffer extra to capture while the other buffers are downstream */
min += 1;
} else {
min = 2;
......@@ -3101,7 +3049,7 @@ gst_v4l2_object_decide_allocation (GstV4l2Object * obj, GstQuery * query)
gst_query_find_allocation_meta (query, GST_VIDEO_CROP_META_API_TYPE,
NULL);
can_use_own_pool = ((has_crop_meta || !obj->need_crop_meta) &&
can_share_own_pool = ((has_crop_meta || !obj->need_crop_meta) &&
(has_video_meta || !obj->need_video_meta));
/* select a pool */
......@@ -3116,28 +3064,42 @@ gst_v4l2_object_decide_allocation (GstV4l2Object * obj, GstQuery * query)
* other size than what the hardware gives us but for downstream pools
* we can try */
size = MAX (size, obj->sizeimage);
} else if (can_use_own_pool) {
} else if (can_share_own_pool) {
/* no downstream pool, use our own then */
GST_DEBUG_OBJECT (obj->element,
"read/write mode: no downstream pool, using our own");
pool = gst_object_ref (obj->pool);
size = obj->sizeimage;
pushing_from_our_pool = TRUE;
}
break;
case GST_V4L2_IO_MMAP:
case GST_V4L2_IO_DMABUF:
/* FIXME in these case we actually prefer/need a downstream pool */
case GST_V4L2_IO_USERPTR:
case GST_V4L2_IO_DMABUF_IMPORT:
/* in importing mode, prefer our own pool, and pass the other pool to
* our own, so it can serve itself */
if (pool == NULL)
goto no_downstream_pool;
gst_v4l2_buffer_pool_set_other_pool (GST_V4L2_BUFFER_POOL (obj->pool),
pool);
other_pool = pool;
gst_object_unref (pool);
pool = gst_object_ref (obj->pool);
size = obj->sizeimage;
break;
case GST_V4L2_IO_MMAP:
case GST_V4L2_IO_DMABUF:
/* in streaming mode, prefer our own pool */
/* Check if we can use it ... */
if (can_use_own_pool) {
if (can_share_own_pool) {
if (pool)
gst_object_unref (pool);
pool = gst_object_ref (obj->pool);
size = obj->sizeimage;
GST_DEBUG_OBJECT (obj->element,
"streaming mode: using our own pool %" GST_PTR_FORMAT, pool);
pushing_from_our_pool = TRUE;
} else if (pool) {
GST_DEBUG_OBJECT (obj->element,
"streaming mode: copying to downstream pool %" GST_PTR_FORMAT,
......@@ -3182,7 +3144,14 @@ gst_v4l2_object_decide_allocation (GstV4l2Object * obj, GstQuery * query)
GST_V4L2_BUFFER_POOL_OPTION_CROP_META);
}
gst_buffer_pool_config_set_params (config, caps, size, min + extra, 0);
/* If pushing from our own pool, configure it with queried minimum,
* otherwise use the minimum required */
if (pushing_from_our_pool)
extra += min;
else
extra += GST_V4L2_MIN_BUFFERS;
gst_buffer_pool_config_set_params (config, caps, size, extra, 0);
GST_DEBUG_OBJECT (pool, "setting config %" GST_PTR_FORMAT, config);
......@@ -3199,8 +3168,12 @@ gst_v4l2_object_decide_allocation (GstV4l2Object * obj, GstQuery * query)
}
setup_other_pool:
/* Now configure the other pool if different */
if (pool && obj->pool != pool) {
if (obj->pool != pool)
other_pool = pool;
if (other_pool) {
if (gst_buffer_pool_is_active (obj->pool))
goto done;
......@@ -3252,6 +3225,13 @@ cleanup:
gst_object_unref (pool);
return FALSE;
}
no_downstream_pool:
{
GST_ELEMENT_ERROR (obj->element, RESOURCE, SETTINGS,
(_("No downstream pool to import from.")),
("When importing DMABUF or USERPTR, we need a pool to import from"));
return FALSE;
}
}
gboolean
......
......@@ -253,10 +253,6 @@ gboolean gst_v4l2_object_unlock_stop (GstV4l2Object * v4l2object);
gboolean gst_v4l2_object_stop (GstV4l2Object * v4l2object);
gboolean gst_v4l2_object_copy (GstV4l2Object * v4l2object,
GstBuffer * dest, GstBuffer * src);
GstCaps * gst_v4l2_object_get_caps (GstV4l2Object * v4l2object,
GstCaps * filter);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment