Commit 099ac9fa authored by Thibault Saunier's avatar Thibault Saunier

docs: Convert gtkdoc comments to markdown

Modernizing the documentation, making it simpler to read an
modify and allowing us to possibly switch to hotdoc in the
future.
parent a1221351
......@@ -20,6 +20,7 @@
*/
/**
* SECTION:element-alsamidisrc
* @title: alsamidisrc
* @see_also: #GstPushSrc
*
* The alsamidisrc element is an element that fetches ALSA MIDI sequencer
......@@ -28,13 +29,13 @@
*
* It can be used to generate notes from a MIDI input device.
*
* <refsect2>
* <title>Example launch line</title>
* ## Example launch line
* |[
* gst-launch -v alsamidisrc ports=129:0 ! fluiddec ! audioconvert ! autoaudiosink
* ]| This pipeline will listen for events from the sequencer device at port 129:0,
* ]|
* This pipeline will listen for events from the sequencer device at port 129:0,
* and generate notes using the fluiddec element.
* </refsect2>
*
*/
#ifdef HAVE_CONFIG_H
......
......@@ -22,16 +22,18 @@
/**
* SECTION:element-alsasink
* @title: alsasink
* @see_also: alsasrc
*
* This element renders audio samples using the ALSA audio API.
*
* <refsect2>
* <title>Example pipelines</title>
* ## Example pipelines
* |[
* gst-launch-1.0 -v uridecodebin uri=file:///path/to/audio.ogg ! audioconvert ! audioresample ! autoaudiosink
* ]| Play an Ogg/Vorbis file and output audio via ALSA.
* </refsect2>
* ]|
*
* Play an Ogg/Vorbis file and output audio via ALSA.
*
*/
#ifdef HAVE_CONFIG_H
......
......@@ -21,16 +21,17 @@
/**
* SECTION:element-alsasrc
* @title: alsasrc
* @see_also: alsasink
*
* This element reads data from an audio card using the ALSA API.
*
* <refsect2>
* <title>Example pipelines</title>
* ## Example pipelines
* |[
* gst-launch-1.0 -v alsasrc ! queue ! audioconvert ! vorbisenc ! oggmux ! filesink location=alsasrc.ogg
* ]| Record from a sound card using ALSA and encode to Ogg/Vorbis.
* </refsect2>
* ]|
* Record from a sound card using ALSA and encode to Ogg/Vorbis.
*
*/
#ifdef HAVE_CONFIG_H
......
......@@ -21,16 +21,17 @@
/**
* SECTION:element-oggdemux
* @title: oggdemux
* @see_also: <link linkend="gst-plugins-base-plugins-oggmux">oggmux</link>
*
* This element demuxes ogg files into their encoded audio and video components.
*
* <refsect2>
* <title>Example pipelines</title>
* ## Example pipelines
* |[
* gst-launch-1.0 -v filesrc location=test.ogg ! oggdemux ! vorbisdec ! audioconvert ! audioresample ! autoaudiosink
* ]| Decodes a vorbis audio stream stored inside an ogg container and plays it.
* </refsect2>
* ]|
* Decodes a vorbis audio stream stored inside an ogg container and plays it.
*
*/
......
......@@ -20,17 +20,18 @@
/**
* SECTION:element-oggmux
* @title: oggmux
* @see_also: <link linkend="gst-plugins-base-plugins-oggdemux">oggdemux</link>
*
* This element merges streams (audio and video) into ogg files.
*
* <refsect2>
* <title>Example pipelines</title>
* ## Example pipelines
* |[
* gst-launch-1.0 v4l2src num-buffers=500 ! video/x-raw,width=320,height=240 ! videoconvert ! videorate ! theoraenc ! oggmux ! filesink location=video.ogg
* ]| Encodes a video stream captured from a v4l2-compatible camera to Ogg/Theora
* ]|
* Encodes a video stream captured from a v4l2-compatible camera to Ogg/Theora
* (the encoding will stop automatically after 500 frames)
* </refsect2>
*
*/
#ifdef HAVE_CONFIG_H
......@@ -968,14 +969,14 @@ no_granule:
/* make sure at least one buffer is queued on all pads, two if possible
*
*
* if pad->buffer == NULL, pad->next_buffer != NULL, then
* we do not know if the buffer is the last or not
* if pad->buffer != NULL, pad->next_buffer != NULL, then
* pad->buffer is not the last buffer for the pad
* if pad->buffer != NULL, pad->next_buffer == NULL, then
* pad->buffer if the last buffer for the pad
*
*
* returns a pointer to an oggpad that holds the best buffer, or
* NULL when no pad was usable. "best" means the buffer marked
* with the lowest timestamp. If best->buffer == NULL then either
......@@ -1409,7 +1410,7 @@ gst_ogg_mux_make_fistail (GstOggMux * mux, ogg_stream_state * os)
* page that allows decoders to identify the type of the stream.
* After that we need to write out all extra info for the decoders.
* In the case of a codec that also needs data as configuration, we can
* find that info in the streamcaps.
* find that info in the streamcaps.
* After writing the headers we must start a new page for the data.
*/
static GstFlowReturn
......@@ -2034,11 +2035,11 @@ gst_ogg_mux_send_start_events (GstOggMux * ogg_mux, GstCollectPads * pads)
}
/* This function is called when there is data on all pads.
*
*
* It finds a pad to pull on, this is done by looking at the buffers
* to decide which one to use, and using the 'oldest' one first. It then calls
* gst_ogg_mux_process_best_pad() to process as much data as possible.
*
*
* If all the pads have received EOS, it flushes out all data by continually
* getting the best pad and calling gst_ogg_mux_process_best_pad() until they
* are all empty, and then sends EOS.
......
......@@ -26,16 +26,17 @@
/**
* SECTION:element-opusdec
* @title: opusdec
* @see_also: opusenc, oggdemux
*
* This element decodes a OPUS stream to raw integer audio.
*
* <refsect2>
* <title>Example pipelines</title>
* ## Example pipelines
* |[
* gst-launch-1.0 -v filesrc location=opus.ogg ! oggdemux ! opusdec ! audioconvert ! audioresample ! alsasink
* ]| Decode an Ogg/Opus file. To create an Ogg/Opus file refer to the documentation of opusenc.
* </refsect2>
* ]|
* Decode an Ogg/Opus file. To create an Ogg/Opus file refer to the documentation of opusenc.
*
*/
#ifdef HAVE_CONFIG_H
......
......@@ -25,16 +25,17 @@
/**
* SECTION:element-opusenc
* @title: opusenc
* @see_also: opusdec, oggmux
*
* This element encodes raw audio to OPUS.
*
* <refsect2>
* <title>Example pipelines</title>
* ## Example pipelines
* |[
* gst-launch-1.0 -v audiotestsrc wave=sine num-buffers=100 ! audioconvert ! opusenc ! oggmux ! filesink location=sine.ogg
* ]| Encode a test sine signal to Ogg/OPUS.
* </refsect2>
* ]|
* Encode a test sine signal to Ogg/OPUS.
*
*/
#ifdef HAVE_CONFIG_H
......
......@@ -20,6 +20,7 @@
/**
* SECTION:element-clockoverlay
* @title: clockoverlay
* @see_also: #GstBaseTextOverlay, #GstTimeOverlay
*
* This element overlays the current clock time on top of a video
......@@ -28,18 +29,19 @@
* time is displayed in the top left corner of the picture, with some
* padding to the left and to the top.
*
* <refsect2>
* <title>Example launch lines</title>
* ## Example launch lines
* |[
* gst-launch-1.0 -v videotestsrc ! clockoverlay ! autovideosink
* ]| Display the current wall clock time in the top left corner of the video picture
* ]|
* Display the current wall clock time in the top left corner of the video picture
* |[
* gst-launch-1.0 -v videotestsrc ! clockoverlay halignment=right valignment=bottom text="Edge City" shaded-background=true font-desc="Sans, 36" ! videoconvert ! autovideosink
* ]| Another pipeline that displays the current time with some leading
* ]|
* Another pipeline that displays the current time with some leading
* text in the bottom right corner of the video picture, with the background
* of the text being shaded in order to make it more legible on top of a
* bright video background.
* </refsect2>
*
*/
#ifdef HAVE_CONFIG_H
......
......@@ -25,6 +25,7 @@
/**
* SECTION:element-textoverlay
* @title: textoverlay
* @see_also: #GstTextRender, #GstTextOverlay, #GstTimeOverlay, #GstSubParse
*
* This plugin renders text on top of a video stream. This can be either
......@@ -37,18 +38,19 @@
* The text can contain newline characters and text wrapping is enabled by
* default.
*
* <refsect2>
* <title>Example launch lines</title>
* ## Example launch lines
* |[
* gst-launch-1.0 -v gst-launch-1.0 videotestsrc ! textoverlay text="Room A" valignment=top halignment=left font-desc="Sans, 72" ! autovideosink
* ]| Here is a simple pipeline that displays a static text in the top left
* ]|
* Here is a simple pipeline that displays a static text in the top left
* corner of the video picture
* |[
* gst-launch-1.0 -v filesrc location=subtitles.srt ! subparse ! txt. videotestsrc ! timeoverlay ! textoverlay name=txt shaded-background=yes ! autovideosink
* ]| Here is another pipeline that displays subtitles from an .srt subtitle
* ]|
* Here is another pipeline that displays subtitles from an .srt subtitle
* file, centered at the bottom of the picture and with a rectangular shading
* around the text in the background:
* <para>
*
* If you do not have such a subtitle file, create one looking like this
* in a text editor:
* |[
......@@ -66,8 +68,7 @@
* Uh? What are you talking about?
* I don&apos;t understand (18-62s)
* ]|
* </para>
* </refsect2>
*
*/
#ifdef HAVE_CONFIG_H
......
......@@ -22,21 +22,21 @@
/**
* SECTION:element-textrender
* @title: textrender
* @see_also: #GstTextOverlay
*
* This plugin renders text received on the text sink pad to a video
* buffer (retaining the alpha channel), so it can later be overlayed
* on top of video streams using other elements.
*
* The text can contain newline characters. (FIXME: What about text
* The text can contain newline characters. (FIXME: What about text
* wrapping? It does not make sense in this context)
*
* <refsect2>
* <title>Example launch lines</title>
* ## Example launch lines
* |[
* gst-launch-1.0 -v filesrc location=subtitles.srt ! subparse ! textrender ! videoconvert ! autovideosink
* ]|
* </refsect2>
*
*/
#ifdef HAVE_CONFIG_H
......
......@@ -20,6 +20,7 @@
/**
* SECTION:element-timeoverlay
* @title: timeoverlay
* @see_also: #GstBaseTextOverlay, #GstClockOverlay
*
* This element overlays the buffer time stamps of a video stream on
......@@ -28,17 +29,18 @@
* time stamp is displayed in the top left corner of the picture, with some
* padding to the left and to the top.
*
* <refsect2>
* |[
* gst-launch-1.0 -v videotestsrc ! timeoverlay ! autovideosink
* ]| Display the time stamps in the top left corner of the video picture.
* ]|
* Display the time stamps in the top left corner of the video picture.
* |[
* gst-launch-1.0 -v videotestsrc ! timeoverlay halignment=right valignment=bottom text="Stream time:" shaded-background=true font-desc="Sans, 24" ! autovideosink
* ]| Another pipeline that displays the time stamps with some leading
* ]|
* Another pipeline that displays the time stamps with some leading
* text in the bottom right corner of the video picture, with the background
* of the text being shaded in order to make it more legible on top of a
* bright video background.
* </refsect2>
*
*/
#ifdef HAVE_CONFIG_H
......
......@@ -22,6 +22,7 @@
/**
* SECTION:element-theoradec
* @title: theoradec
* @see_also: theoraenc, oggdemux
*
* This element decodes theora streams into raw video
......@@ -29,13 +30,13 @@
* video codec maintained by the <ulink url="http://www.xiph.org/">Xiph.org
* Foundation</ulink>, based on the VP3 codec.
*
* <refsect2>
* <title>Example pipeline</title>
* ## Example pipeline
* |[
* gst-launch-1.0 -v filesrc location=videotestsrc.ogg ! oggdemux ! theoradec ! videoconvert ! videoscale ! autovideosink
* ]| This example pipeline will decode an ogg stream and decodes the theora video in it.
* ]|
* This example pipeline will decode an ogg stream and decodes the theora video in it.
* Refer to the theoraenc example to create the ogg file.
* </refsect2>
*
*/
#ifdef HAVE_CONFIG_H
......
......@@ -22,6 +22,7 @@
/**
* SECTION:element-theoraenc
* @title: theoraenc
* @see_also: theoradec, oggmux
*
* This element encodes raw video into a Theora stream.
......@@ -45,14 +46,14 @@
* A videorate element is often required in front of theoraenc, especially
* when transcoding and when putting Theora into the Ogg container.
*
* <refsect2>
* <title>Example pipeline</title>
* ## Example pipeline
* |[
* gst-launch-1.0 -v videotestsrc num-buffers=500 ! video/x-raw,width=1280,height=720 ! queue ! progressreport ! theoraenc ! oggmux ! filesink location=videotestsrc.ogg
* ]| This example pipeline will encode a test video source to theora muxed in an
* ]|
* This example pipeline will encode a test video source to theora muxed in an
* ogg container. Refer to the theoradec documentation to decode the create
* stream.
* </refsect2>
*
*/
#ifdef HAVE_CONFIG_H
......
......@@ -20,6 +20,7 @@
/**
* SECTION:element-theoraparse
* @title: theoraparse
* @see_also: theoradec, oggdemux, vorbisparse
*
* The theoraparse element will parse the header packets of the Theora
......@@ -40,18 +41,19 @@
* offsetting all buffers that it outputs by a specified amount, and updating
* that offset from the value array whenever a keyframe is processed.
*
* <refsect2>
* <title>Example pipelines</title>
* ## Example pipelines
* |[
* gst-launch-1.0 -v filesrc location=video.ogg ! oggdemux ! theoraparse ! fakesink
* ]| This pipeline shows that the streamheader is set in the caps, and that each
* ]|
* This pipeline shows that the streamheader is set in the caps, and that each
* buffer has the timestamp, duration, offset, and offset_end set.
* |[
* gst-launch-1.0 filesrc location=video.ogg ! oggdemux ! theoraparse \
* ! oggmux ! filesink location=video-remuxed.ogg
* ]| This pipeline shows remuxing. video-remuxed.ogg might not be exactly the same
* ]|
* This pipeline shows remuxing. video-remuxed.ogg might not be exactly the same
* as video.ogg, but they should produce exactly the same decoded data.
* </refsect2>
*
*/
/* FIXME 0.11: suppress warnings for deprecated API such as GValueArray
......
......@@ -19,6 +19,7 @@
/**
* SECTION:element-vorbisdec
* @title: vorbisdec
* @see_also: vorbisenc, oggdemux
*
* This element decodes a Vorbis stream to raw float audio.
......@@ -27,13 +28,12 @@
* Foundation</ulink>. As it outputs raw float audio you will often need to
* put an audioconvert element after it.
*
*
* <refsect2>
* <title>Example pipelines</title>
* ## Example pipelines
* |[
* gst-launch-1.0 -v filesrc location=sine.ogg ! oggdemux ! vorbisdec ! audioconvert ! audioresample ! autoaudiosink
* ]| Decode an Ogg/Vorbis. To create an Ogg/Vorbis file refer to the documentation of vorbisenc.
* </refsect2>
* ]|
* Decode an Ogg/Vorbis. To create an Ogg/Vorbis file refer to the documentation of vorbisenc.
*
*/
#ifdef HAVE_CONFIG_H
......
......@@ -19,6 +19,7 @@
/**
* SECTION:element-vorbisenc
* @title: vorbisenc
* @see_also: vorbisdec, oggmux
*
* This element encodes raw float audio into a Vorbis stream.
......@@ -26,16 +27,17 @@
* audio codec maintained by the <ulink url="http://www.xiph.org/">Xiph.org
* Foundation</ulink>.
*
* <refsect2>
* <title>Example pipelines</title>
* ## Example pipelines
* |[
* gst-launch-1.0 -v audiotestsrc wave=sine num-buffers=100 ! audioconvert ! vorbisenc ! oggmux ! filesink location=sine.ogg
* ]| Encode a test sine signal to Ogg/Vorbis. Note that the resulting file
* ]|
* Encode a test sine signal to Ogg/Vorbis. Note that the resulting file
* will be really small because a sine signal compresses very well.
* |[
* gst-launch-1.0 -v autoaudiosrc ! audioconvert ! vorbisenc ! oggmux ! filesink location=alsasrc.ogg
* ]| Record from a sound card and encode to Ogg/Vorbis.
* </refsect2>
* ]|
* Record from a sound card and encode to Ogg/Vorbis.
*
*/
#ifdef HAVE_CONFIG_H
#include "config.h"
......
......@@ -20,6 +20,7 @@
/**
* SECTION:element-vorbisparse
* @title: vorbisparse
* @see_also: vorbisdec, oggdemux, theoraparse
*
* The vorbisparse element will parse the header packets of the Vorbis
......@@ -33,18 +34,19 @@
* vorbisparse outputs have all of the metadata that oggmux expects to receive,
* which allows you to (for example) remux an ogg/vorbis file.
*
* <refsect2>
* <title>Example pipelines</title>
* ## Example pipelines
* |[
* gst-launch-1.0 -v filesrc location=sine.ogg ! oggdemux ! vorbisparse ! fakesink
* ]| This pipeline shows that the streamheader is set in the caps, and that each
* ]|
* This pipeline shows that the streamheader is set in the caps, and that each
* buffer has the timestamp, duration, offset, and offset_end set.
* |[
* gst-launch-1.0 filesrc location=sine.ogg ! oggdemux ! vorbisparse \
* ! oggmux ! filesink location=sine-remuxed.ogg
* ]| This pipeline shows remuxing. sine-remuxed.ogg might not be exactly the same
* ]|
* This pipeline shows remuxing. sine-remuxed.ogg might not be exactly the same
* as sine.ogg, but they should produce exactly the same decoded data.
* </refsect2>
*
*/
#ifdef HAVE_CONFIG_H
......
......@@ -19,6 +19,7 @@
/**
* SECTION:element-vorbistag
* @title: vorbistag
* @see_also: #oggdemux, #oggmux, #vorbisparse, #GstTagSetter
*
* The vorbistags element can change the tag contained within a raw
......@@ -34,14 +35,14 @@
* automatically (and merged according to the merge mode set via the tag
* setter interface).
*
* <refsect2>
* <title>Example pipelines</title>
* ## Example pipelines
* |[
* gst-launch-1.0 -v filesrc location=foo.ogg ! oggdemux ! vorbistag ! oggmux ! filesink location=bar.ogg
* ]| This element is not useful with gst-launch-1.0, because it does not support
* ]|
* This element is not useful with gst-launch-1.0, because it does not support
* setting the tags on a #GstTagSetter interface. Conceptually, the element
* will usually be used in this order though.
* </refsect2>
*
*/
#ifdef HAVE_CONFIG_H
......
......@@ -27,6 +27,7 @@
/**
* SECTION:gstdmabuf
* @title: GstDmaBufAllocator
* @short_description: Memory wrapper for Linux dmabuf memory
* @see_also: #GstMemory
*
......
......@@ -20,6 +20,7 @@
/**
* SECTION:gstfdmemory
* @title: GstFdAllocator
* @short_description: Memory wrapper for fd backed memory
* @see_also: #GstMemory
*
......
......@@ -19,6 +19,7 @@
*/
/**
* SECTION:gstappsink
* @title: GstAppSink
* @short_description: Easy way for applications to extract samples from a
* pipeline
* @see_also: #GstSample, #GstBaseSink, appsrc
......
......@@ -19,6 +19,7 @@
*/
/**
* SECTION:gstappsrc
* @title: GstAppSrc
* @short_description: Easy way for applications to inject buffers into a
* pipeline
* @see_also: #GstBaseSrc, appsink
......
......@@ -18,6 +18,7 @@
*/
/**
* SECTION:gstaudiochannels
* @title: Audio-channels
* @short_description: Support library for audio channel handling
*
* This library contains some helper functions for multichannel audio.
......
......@@ -32,24 +32,18 @@
/**
* SECTION:audioconverter
* @title: GstAudioConverter
* @short_description: Generic audio conversion
*
* <refsect2>
* <para>
* This object is used to convert audio samples from one format to another.
* The object can perform conversion of:
* <itemizedlist>
* <listitem><para>
* audio format with optional dithering and noise shaping
* </para></listitem>
* <listitem><para>
* audio samplerate
* </para></listitem>
* <listitem><para>
* audio channels and channel layout
* </para></listitem>
* </para>
* </refsect2>
*
* * audio format with optional dithering and noise shaping
*
* * audio samplerate
*
* * audio channels and channel layout
*
*/
#ifndef GST_DISABLE_GST_DEBUG
......@@ -1336,7 +1330,7 @@ gst_audio_converter_samples (GstAudioConverter * convert,
}
/**
* gst_audio_converter_supports_inplace
* gst_audio_converter_supports_inplace:
* @convert: a #GstAudioConverter
*
* Returns whether the audio converter can perform the conversion in-place.
......
......@@ -42,6 +42,7 @@ GST_DEBUG_CATEGORY_STATIC (audio_resampler_debug);
/**
* SECTION:gstaudioresampler
* @title: GstAudioResampler
* @short_description: Utility structure for resampler information
*
* #GstAudioResampler is a structure which holds the information
......
......@@ -28,20 +28,20 @@ G_BEGIN_DECLS
typedef struct _GstAudioResampler GstAudioResampler;
/**
* GST_AUDIO_RESAMPLER_OPT_CUTOFF
* GST_AUDIO_RESAMPLER_OPT_CUTOFF:
*
* G_TYPE_DOUBLE, Cutoff parameter for the filter. 0.940 is the default.
*/
#define GST_AUDIO_RESAMPLER_OPT_CUTOFF "GstAudioResampler.cutoff"
/**
* GST_AUDIO_RESAMPLER_OPT_STOP_ATTENUTATION
* GST_AUDIO_RESAMPLER_OPT_STOP_ATTENUTATION:
*
* G_TYPE_DOUBLE, stopband attenuation in debibels. The attenutation
* after the stopband for the kaiser window. 85 dB is the default.
*/
#define GST_AUDIO_RESAMPLER_OPT_STOP_ATTENUATION "GstAudioResampler.stop-attenutation"
/**
* GST_AUDIO_RESAMPLER_OPT_TRANSITION_BANDWIDTH
* GST_AUDIO_RESAMPLER_OPT_TRANSITION_BANDWIDTH:
*
* G_TYPE_DOUBLE, transition bandwidth. The width of the
* transition band for the kaiser window. 0.087 is the default.
......@@ -137,7 +137,7 @@ typedef enum {
*/
#define GST_AUDIO_RESAMPLER_OPT_FILTER_INTERPOLATION "GstAudioResampler.filter-interpolation"
/**
* GST_AUDIO_RESAMPLER_OPT_FILTER_OVERSAMPLE
* GST_AUDIO_RESAMPLER_OPT_FILTER_OVERSAMPLE:
*
* G_TYPE_UINT, oversampling to use when interpolating filters
* 8 is the default.
......
......@@ -18,6 +18,7 @@
*/
/**
* SECTION:gstaudio
* @title: GstAudio
* @short_description: Support library for audio elements
*
* This library contains some helper functions for audio elements.
......@@ -60,7 +61,7 @@ ensure_debug_category (void)
* @segment: Segment in %GST_FORMAT_TIME or %GST_FORMAT_DEFAULT to which
* the buffer should be clipped.
* @rate: sample rate.
* @bpf: size of one audio frame in bytes. This is the size of one sample *
* @bpf: size of one audio frame in bytes. This is the size of one sample *
* number of channels.
*
* Clip the buffer to the given %GstSegment.
......
......@@ -22,6 +22,7 @@
/**
* SECTION:gstaudiobasesink
* @title: GstAudioBaseSink
* @short_description: Base class for audio sinks
* @see_also: #GstAudioSink, #GstAudioRingBuffer.
*
......
......@@ -22,6 +22,7 @@
/**
* SECTION:gstaudiobasesrc
* @title: GstAudioBaseSrc
* @short_description: Base class for audio sources
* @see_also: #GstAudioSrc, #GstAudioRingBuffer.
*
......
......@@ -36,62 +36,53 @@
/**
* SECTION:gstaudiocdsrc
* @title: GstAudioCdSrc
* @short_description: Base class for Audio CD sources
*
* <para>
* Provides a base class for CD digital audio (CDDA) sources, which handles
* things like seeking, querying, discid calculation, tags, and buffer
* timestamping.
* </para>
* <refsect2>
* <title>Using GstAudioCdSrc-based elements in applications</title>
* <para>
*
* ## Using GstAudioCdSrc-based elements in applications
*
<