Commit 0ac9bb7d authored by Wim Taymans's avatar Wim Taymans
Browse files

Merge branch 'master' into 0.11

parents db999572 efdd3258
......@@ -13,9 +13,9 @@ Description:
_ a GstTypeFindElement connected to the single sink pad
_ optionnaly a demuxer/parser
_ optionally a demuxer/parser
_ optionnaly one or more DecodeGroup
_ optionally one or more DecodeGroup
* Autoplugging
......@@ -203,3 +203,87 @@ differences:
controlled by the element. This means that a buffer cannot be pushed to a
non-linked pad any sooner than buffers in any other stream which were received
before it.
Parsers, decoders and auto-plugging
This section has DRAFT status.
Some media formats come in different "flavours" or "stream formats". These
formats differ in the way the setup data and media data is signalled and/or
packaged. An example for this is H.264 video, where there is a bytestream
format (with codec setup data signalled inline and units prefixed by a sync
code and packet length information) and a "raw" format where codec setup
data is signalled out of band (via the caps) and the chunking is implicit
in the way the buffers were muxed into a container, to mention just two of
the possible variants.
Especially on embedded platforms it is common that decoders can only
handle one particular stream format, and not all of them.
Where there are multiple stream formats, parsers are usually expected
to be able to convert between the different formats. This will, if
implemented correctly, work as expected in a static pipeline such as
... ! parser ! decoder ! sink
where the parser can query the decoder's capabilities even before
processing the first piece of data, and configure itself to convert
accordingly, if conversion is needed at all.
In an auto-plugging context this is not so straight-forward though,
because elements are plugged incrementally and not before the previous
element has processes some data and decided what it will output exactly
(unless the template caps are completely fixed, then it can continue
right away, this is not always the case here though, see below). A
parser will thus have to decide on *some* output format so auto-plugging
can continue. It doesn't know anything about the available decoders and
their capabilities though, so it's possible that it will choose a format
that is not supported by any of the available decoders, or by the preferred
If the parser had sufficiently concise but fixed source pad template caps,
decodebin could continue to plug a decoder right away, allowing the
parser to configure itself in the same way as it would with a static
pipeline. This is not an option, unfortunately, because often the
parser needs to process some data to determine e.g. the format's profile or
other stream properties (resolution, sample rate, channel configuration, etc.),
and there may be different decoders for different profiles (e.g. DSP codec
for baseline profile, and software fallback for main/high profile; or a DSP
codec only supporting certain resolutions, with a software fallback for
unusual resolutions). So if decodebin just plugged the most highest-ranking
decoder, that decoder might not be be able to handle the actual stream later
on, which would yield in an error (this is a data flow error then which would
be hard to intercept and avoid in decodebin). In other words, we can't solve
this issue by plugging a decoder right away with the parser.
So decodebin need to communicate to the parser the set of available decoder
caps (which would contain the relevant capabilities/restrictions such as
supported profiles, resolutions, etc.), after the usual "autoplug-*" signal
filtering/sorting of course.
This could be done in multiple ways, e.g.
- plug a capsfilter element right after the parser, and construct
a set of filter caps from the list of available decoders (one
could append at the end just the name(s) of the caps structures
from the parser pad template caps to function as an 'ANY other'
caps equivalent). This would let the parser negotiate to a
supported stream format in the same way as with the static
pipeline mentioned above, but of course incur some overhead
through the additional capsfilter element.
- one could add a filter-caps equivalent property to the parsers
(and/or GstBaseParse class) (e.g. "prefered-caps" or so).
- one could add some kind of "fixate-caps" or "fixate-format"
signal to such parsers
Alternatively, one could simply make all decoders incorporate parsers, so
that always all formats are supported. This is problematic for other reasons
though (e.g. we would not be able to detect the profile in all cases then
before plugging a decoder, which would make it hard to just play the audio
part of a stream and not the video if a suitable decoder was missing, for
......@@ -21,7 +21,6 @@ libgstpango_la_CFLAGS = \
libgstpango_la_LIBADD = \
$(top_builddir)/gst-libs/gst/video/libgstvideo-$(GST_MAJORMINOR).la \
......@@ -225,7 +225,7 @@ static const GstTagEntryMatch tag_matches[] = {
* Returns: The corresponding GStreamer tag or NULL if none exists.
const gchar *
gst_tag_from_id3_tag (const gchar * id3_tag)
int i = 0;
......@@ -285,7 +285,7 @@ static const GstTagEntryMatch user_tag_matches[] = {
* Returns: The corresponding GStreamer tag or NULL if none exists.
const gchar *
gst_tag_from_id3_user_tag (const gchar * type, const gchar * id3_user_tag)
int i = 0;
......@@ -317,7 +317,7 @@ gst_tag_from_id3_user_tag (const gchar * type, const gchar * id3_user_tag)
* Returns: The corresponding ID3v2 tag or NULL if none exists.
const gchar *
gst_tag_to_id3_tag (const gchar * gst_tag)
int i = 0;
......@@ -423,7 +423,7 @@ gst_tag_id3_genre_count (void)
* Returns: the genre or NULL if no genre is associated with that ID.
const gchar *
gst_tag_id3_genre_get (const guint id)
if (id >= G_N_ELEMENTS (genres))
......@@ -109,7 +109,7 @@ static const GstTagEntryMatch tag_matches[] = {
* Returns: The corresponding GStreamer tag or NULL if none exists.
const gchar *
gst_tag_from_vorbis_tag (const gchar * vorbis_tag)
int i = 0;
......@@ -138,7 +138,7 @@ gst_tag_from_vorbis_tag (const gchar * vorbis_tag)
* Returns: The corresponding vorbiscomment tag or NULL if none exists.
const gchar *
gst_tag_to_vorbis_tag (const gchar * gst_tag)
int i = 0;
......@@ -115,7 +115,8 @@ xmp_serialization_data_use_schema (XmpSerializationData * serdata,
typedef enum
GstXmpTagTypeSimple = 0,
GstXmpTagTypeNone = 0,
......@@ -136,6 +137,13 @@ struct _XmpTag
const gchar *tag_name;
GstXmpTagType type;
/* some tags must be inside a Bag even
* if they are a single entry. Set it here so we know */
GstXmpTagType supertype;
/* For tags that need a rdf:parseType attribute */
const gchar *parse_type;
/* Used for struct and compound types */
GSList *children;
......@@ -157,9 +165,9 @@ xmp_tag_get_merge_mode (XmpTag * xmptag)
static const gchar *
xmp_tag_get_type_name (XmpTag * xmptag)
xmp_tag_type_get_name (GstXmpTagType tagtype)
switch (xmptag->type) {
switch (tagtype) {
case GstXmpTagTypeSeq:
return "rdf:Seq";
case GstXmpTagTypeBag:
......@@ -251,6 +259,8 @@ gst_xmp_tag_create (const gchar * gst_tag, const gchar * xmp_tag,
xmpinfo->gst_tag = gst_tag;
xmpinfo->tag_name = xmp_tag;
xmpinfo->type = xmp_type;
xmpinfo->supertype = GstXmpTagTypeNone;
xmpinfo->parse_type = NULL;
xmpinfo->serialize = serialization_func;
xmpinfo->deserialize = deserialization_func;
xmpinfo->children = NULL;
......@@ -1012,6 +1022,8 @@ _init_xmp_tag_map (gpointer user_data)
schema = gst_xmp_schema_new ();
xmpinfo = gst_xmp_tag_create (NULL, "Iptc4xmpExt:LocationShown",
GstXmpTagTypeStruct, NULL, NULL);
xmpinfo->supertype = GstXmpTagTypeBag;
xmpinfo->parse_type = "Resource";
xmpinfo->children = g_slist_prepend (xmpinfo->children,
"LocationDetails:Sublocation", GstXmpTagTypeSimple, NULL, NULL));
......@@ -1041,17 +1053,26 @@ struct _GstXmpNamespaceMatch
const gchar *ns_prefix;
const gchar *ns_uri;
* Stores extra namespaces for array tags
* The namespaces should be writen in the form:
* xmlns:XpTo=" (next ones)"
const gchar *extra_ns;
static const GstXmpNamespaceMatch ns_match[] = {
{"dc", ""},
{"exif", ""},
{"tiff", ""},
{"xap", ""},
{"photoshop", ""},
{"Iptc4xmpCore", ""},
{"Iptc4xmpExt", ""},
{"dc", "", NULL},
{"exif", "", NULL},
{"tiff", "", NULL},
{"xap", "", NULL},
{"photoshop", "", NULL},
{"Iptc4xmpCore", "", NULL},
{"Iptc4xmpExt", "",
typedef struct _GstXmpNamespaceMap GstXmpNamespaceMap;
......@@ -1667,10 +1688,29 @@ write_one_tag (const GstTagList * list, XmpTag * xmp_tag, gpointer user_data)
if (use_it) {
if (xmp_tag->tag_name)
string_open_tag (data, xmp_tag->tag_name);
if (xmp_tag->supertype) {
string_open_tag (data, xmp_tag_type_get_name (xmp_tag->supertype));
if (xmp_tag->parse_type) {
g_string_append (data, "<rdf:li rdf:parseType=\"");
g_string_append (data, xmp_tag->parse_type);
g_string_append_c (data, '"');
g_string_append_c (data, '>');
} else {
string_open_tag (data, "rdf:li");
/* now write it */
for (iter = xmp_tag->children; iter; iter = g_slist_next (iter)) {
write_one_tag (list, iter->data, user_data);
if (xmp_tag->supertype) {
string_close_tag (data, "rdf:li");
string_close_tag (data, xmp_tag_type_get_name (xmp_tag->supertype));
if (xmp_tag->tag_name)
string_close_tag (data, xmp_tag->tag_name);
......@@ -1703,7 +1743,7 @@ write_one_tag (const GstTagList * list, XmpTag * xmp_tag, gpointer user_data)
} else {
const gchar *typename;
typename = xmp_tag_get_type_name (xmp_tag);
typename = xmp_tag_type_get_name (xmp_tag->type);
string_open_tag (data, typename);
for (i = 0; i < ct; i++) {
......@@ -1773,9 +1813,13 @@ gst_tag_list_to_xmp_buffer_full (const GstTagList * list, gboolean read_only,
i = 0;
while (ns_match[i].ns_prefix) {
if (xmp_serialization_data_use_schema (&serialization_data,
ns_match[i].ns_prefix)) {
g_string_append_printf (data, " xmlns:%s=\"%s\"",
ns_match[i].ns_prefix, ns_match[i].ns_uri);
if (ns_match[i].extra_ns) {
g_string_append_printf (data, " %s", ns_match[i].extra_ns);
g_string_append (data, ">\n");
......@@ -444,8 +444,8 @@ GType gst_tag_image_type_get_type (void);
/* functions for vorbis comment manipulation */
G_CONST_RETURN gchar * gst_tag_from_vorbis_tag (const gchar * vorbis_tag);
G_CONST_RETURN gchar * gst_tag_to_vorbis_tag (const gchar * gst_tag);
const gchar * gst_tag_from_vorbis_tag (const gchar * vorbis_tag);
const gchar * gst_tag_to_vorbis_tag (const gchar * gst_tag);
void gst_vorbis_tag_add (GstTagList * list,
const gchar * tag,
const gchar * value);
......@@ -471,13 +471,13 @@ GstBuffer * gst_tag_list_to_vorbiscomment_buffer (const GstTagLis
/* functions for ID3 tag manipulation */
guint gst_tag_id3_genre_count (void);
G_CONST_RETURN gchar * gst_tag_id3_genre_get (const guint id);
const gchar * gst_tag_id3_genre_get (const guint id);
GstTagList * gst_tag_list_new_from_id3v1 (const guint8 * data);
G_CONST_RETURN gchar * gst_tag_from_id3_tag (const gchar * id3_tag);
G_CONST_RETURN gchar * gst_tag_from_id3_user_tag (const gchar * type,
const gchar * gst_tag_from_id3_tag (const gchar * id3_tag);
const gchar * gst_tag_from_id3_user_tag (const gchar * type,
const gchar * id3_user_tag);
G_CONST_RETURN gchar * gst_tag_to_id3_tag (const gchar * gst_tag);
const gchar * gst_tag_to_id3_tag (const gchar * gst_tag);
gboolean gst_tag_list_add_id3_image (GstTagList * tag_list,
const guint8 * image_data,
......@@ -20,7 +20,6 @@ libgstaudioresample_la_CFLAGS = \
libgstaudioresample_la_LIBADD = \
......@@ -409,7 +409,6 @@ elements_textoverlay_CFLAGS = $(GST_BASE_CFLAGS) $(AM_CFLAGS)
elements_volume_LDADD = \
$(top_builddir)/gst-libs/gst/interfaces/ \
......@@ -2,6 +2,5 @@
noinst_PROGRAMS = testchannels
testchannels_SOURCES = testchannels.c
testchannels_LDADD = $(top_builddir)/gst-libs/gst/audio/ \
testchannels_LDADD = $(top_builddir)/gst-libs/gst/audio/libgstaudio-$(GST_MAJORMINOR).la \
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment