diff --git a/CMakeLists.txt b/CMakeLists.txt index e41f87f..3038cab 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -27,6 +27,8 @@ add_library(gstprojectm SHARED src/projectm.c src/gstglbaseaudiovisualizer.h src/gstglbaseaudiovisualizer.c + src/gstpmaudiovisualizer.h + src/gstpmaudiovisualizer.c ) target_include_directories(gstprojectm diff --git a/README.md b/README.md index 1b43b93..c5afe57 100644 --- a/README.md +++ b/README.md @@ -60,7 +60,7 @@ The documentation has been organized into distinct files, each dedicated to a sp Once the plugin has been installed, you can use it something like this: ```shell -gst-launch pipewiresrc ! queue ! audioconvert ! projectm preset=/usr/local/share/projectM/presets preset-duration=5 ! video/x-raw,width=2048,height=1440,framerate=60/1 ! videoconvert ! xvimagesink sync=false +gst-launch pipewiresrc ! queue ! audioconvert ! projectm preset=/usr/local/share/projectM/presets preset-duration=5 ! video/x-raw,width=2048,height=1440,framerate=60/1 ! videoconvert ! xvimagesink sync=true ``` Or to convert an audio file to video: diff --git a/build.sh b/build.sh index 74a1148..3fd9d8c 100755 --- a/build.sh +++ b/build.sh @@ -99,7 +99,7 @@ prompt_install() { # Print example command echo echo "Done! Here's an example command:" - echo "gst-launch-1.0 audiotestsrc ! queue ! audioconvert ! projectm ! "video/x-raw,width=512,height=512,framerate=60/1" ! videoconvert ! $VIDEO_SINK sync=false" + echo "gst-launch-1.0 audiotestsrc ! queue ! audioconvert ! projectm ! "video/x-raw,width=512,height=512,framerate=60/1" ! videoconvert ! $VIDEO_SINK sync=true" else echo echo "Done!" diff --git a/src/caps.c b/src/caps.c index 09b5add..c7a1941 100644 --- a/src/caps.c +++ b/src/caps.c @@ -39,7 +39,7 @@ const gchar *get_video_src_cap(unsigned int type) { switch (type) { case 0: - format = GST_VIDEO_CAPS_MAKE("video/x-raw, format = (string) { ABGR }, " + format = GST_VIDEO_CAPS_MAKE("video/x-raw, format = (string) { RGBA }, " "framerate=(fraction)[0/1,MAX]"); break; default: diff --git a/src/config.h b/src/config.h index bc83e3b..ce2cc92 100644 --- a/src/config.h +++ b/src/config.h @@ -33,6 +33,7 @@ G_BEGIN_DECLS #define DEFAULT_PRESET_LOCKED FALSE #define DEFAULT_ENABLE_PLAYLIST TRUE #define DEFAULT_SHUFFLE_PRESETS TRUE // depends on ENABLE_PLAYLIST +#define DEFAULT_PTS_SYNC TRUE G_END_DECLS diff --git a/src/enums.h b/src/enums.h index 863d677..c3b5969 100644 --- a/src/enums.h +++ b/src/enums.h @@ -24,7 +24,8 @@ enum { PROP_EASTER_EGG, PROP_PRESET_LOCKED, PROP_SHUFFLE_PRESETS, - PROP_ENABLE_PLAYLIST + PROP_ENABLE_PLAYLIST, + PROP_PTS_SYNC }; G_END_DECLS diff --git a/src/gstglbaseaudiovisualizer.c b/src/gstglbaseaudiovisualizer.c index e1f786a..d48dc1c 100644 --- a/src/gstglbaseaudiovisualizer.c +++ b/src/gstglbaseaudiovisualizer.c @@ -35,31 +35,48 @@ #endif #include "gstglbaseaudiovisualizer.h" +#include "gstpmaudiovisualizer.h" #include /** * SECTION:GstGLBaseAudioVisualizer - * @short_description: #GstAudioVisualizer subclass for injecting OpenGL + * @short_description: #GstPMAudioVisualizer subclass for injecting OpenGL * resources in a pipeline * @title: GstGLBaseAudioVisualizer - * @see_also: #GstAudioVisualizer + * @see_also: #GstPMAudioVisualizer * - * Wrapper for GstAudioVisualizer for handling OpenGL contexts. + * Wrapper for GstPMAudioVisualizer for handling OpenGL contexts. * * #GstGLBaseAudioVisualizer handles the nitty gritty details of retrieving an * OpenGL context. It also provides `gl_start()` and `gl_stop()` virtual methods * that ensure an OpenGL context is available and current in the calling thread - * for initializing and cleaning up OpenGL dependent resources. The `gl_render` - * virtual method is used to perform OpenGL rendering. + * for initializing and cleaning up OpenGL resources. The `render` + * virtual method of the GstPMAudioVisualizer is implemented to perform OpenGL + * rendering. The implementer provides an implementation for fill_gl_memory to + * render directly to gl memory. + * + * Typical plug-in call order for implementer-provided functions: + * - setup (once) + * - gl_start (once) + * - fill_gl_memory (once for each frame) + * - gl_stop (once) */ #define GST_CAT_DEFAULT gst_gl_base_audio_visualizer_debug GST_DEBUG_CATEGORY_STATIC(GST_CAT_DEFAULT); +#define DEFAULT_TIMESTAMP_OFFSET 0 + struct _GstGLBaseAudioVisualizerPrivate { GstGLContext *other_context; + GstGLMemory *out_tex; + GstBuffer *in_audio; + + gint64 timestamp_offset; /* base offset */ + gint64 n_frames; /* total frames sent */ + GstClockTime buf_running_time; /* determined by no. of frames rendered. clock + for buffer position. */ - gint64 n_frames; /* total frames sent */ gboolean gl_result; gboolean gl_started; @@ -67,12 +84,12 @@ struct _GstGLBaseAudioVisualizerPrivate { }; /* Properties */ -enum { PROP_0 }; +enum { PROP_0, PROP_TIMESTAMP_OFFSET }; #define gst_gl_base_audio_visualizer_parent_class parent_class G_DEFINE_ABSTRACT_TYPE_WITH_CODE( GstGLBaseAudioVisualizer, gst_gl_base_audio_visualizer, - GST_TYPE_AUDIO_VISUALIZER, + GST_TYPE_PM_AUDIO_VISUALIZER, G_ADD_PRIVATE(GstGLBaseAudioVisualizer) GST_DEBUG_CATEGORY_INIT(gst_gl_base_audio_visualizer_debug, "glbaseaudiovisualizer", 0, @@ -88,39 +105,76 @@ static void gst_gl_base_audio_visualizer_get_property(GObject *object, GValue *value, GParamSpec *pspec); +/* discover gl context / display from gst */ static void gst_gl_base_audio_visualizer_set_context(GstElement *element, GstContext *context); +/* handle pipeline state changes */ static GstStateChangeReturn gst_gl_base_audio_visualizer_change_state(GstElement *element, GstStateChange transition); -static gboolean gst_gl_base_audio_visualizer_render(GstAudioVisualizer *bscope, - GstBuffer *audio, - GstVideoFrame *video); +/* renders a video frame using gl, impl for parent class + * GstPMAudioVisualizerClass. */ +static gboolean gst_gl_base_audio_visualizer_parent_render( + GstPMAudioVisualizer *bscope, GstBuffer *audio, GstVideoFrame *video); + +/* internal utility for resetting state on start */ static void gst_gl_base_audio_visualizer_start(GstGLBaseAudioVisualizer *glav); + +/* internal utility for cleaning up gl context on stop */ static void gst_gl_base_audio_visualizer_stop(GstGLBaseAudioVisualizer *glav); -static gboolean -gst_gl_base_audio_visualizer_decide_allocation(GstAudioVisualizer *gstav, - GstQuery *query); +/* gl memory pool allocation impl for parent class GstPMAudioVisualizerClass */ +static gboolean gst_gl_base_audio_visualizer_parent_decide_allocation( + GstPMAudioVisualizer *gstav, GstQuery *query); + +/* called when format changes, default empty v-impl for this class. can be + * overwritten by implementer. */ static gboolean gst_gl_base_audio_visualizer_default_setup(GstGLBaseAudioVisualizer *glav); + +/* gl context is started and usable. called from gl thread. default empty v-impl + * for this class, can be overwritten by implementer. */ static gboolean gst_gl_base_audio_visualizer_default_gl_start(GstGLBaseAudioVisualizer *glav); + +/* gl context is shutting down. called from gl thread. default empty v-impl for + * this class. can be overwritten by implementer. */ static void gst_gl_base_audio_visualizer_default_gl_stop(GstGLBaseAudioVisualizer *glav); -static gboolean gst_gl_base_audio_visualizer_default_gl_render( - GstGLBaseAudioVisualizer *glav, GstBuffer *audio, GstVideoFrame *video); +/* default empty v-impl for rendering a frame. called from gl thread. can be + * overwritten by implementer. */ +static gboolean gst_gl_base_audio_visualizer_default_fill_gl_memory( + GstGLBaseAudioVisualizer *glav, GstBuffer *in_audio, GstGLMemory *mem); + +/* find a valid gl context. lock must have already been acquired. */ static gboolean gst_gl_base_audio_visualizer_find_gl_context_unlocked( GstGLBaseAudioVisualizer *glav); -static gboolean gst_gl_base_audio_visualizer_setup(GstAudioVisualizer *gstav); +/* called whenever the format changes, impl for parent class + * GstPMAudioVisualizerClass */ +static gboolean +gst_gl_base_audio_visualizer_parent_setup(GstPMAudioVisualizer *gstav); + +/* output buffer allocation default v-impl for this class. can be overwritten by + * implementer. */ +static GstFlowReturn gst_gl_base_audio_visualizer_default_prepare_output_buffer( + GstGLBaseAudioVisualizer *scope, GstBuffer **outbuf); + +/* output buffer allocation impl for parent class GstPMAudioVisualizerClass */ +static GstFlowReturn gst_gl_base_audio_visualizer_parent_prepare_output_buffer( + GstPMAudioVisualizer *scope, GstBuffer **outbuf); + +/* map output video frame to buffer outbuf with gl flags, impl for parent class + * GstPMAudioVisualizerClass */ +static void gst_gl_base_audio_visualizer_parent_map_output_buffer( + GstPMAudioVisualizer *scope, GstVideoFrame *outframe, GstBuffer *outbuf); static void gst_gl_base_audio_visualizer_class_init(GstGLBaseAudioVisualizerClass *klass) { GObjectClass *gobject_class = G_OBJECT_CLASS(klass); - GstAudioVisualizerClass *gstav_class = GST_AUDIO_VISUALIZER_CLASS(klass); + GstPMAudioVisualizerClass *gstav_class = GST_PM_AUDIO_VISUALIZER_CLASS(klass); GstElementClass *element_class = GST_ELEMENT_CLASS(klass); gobject_class->finalize = gst_gl_base_audio_visualizer_finalize; @@ -134,26 +188,52 @@ gst_gl_base_audio_visualizer_class_init(GstGLBaseAudioVisualizerClass *klass) { GST_DEBUG_FUNCPTR(gst_gl_base_audio_visualizer_change_state); gstav_class->decide_allocation = - GST_DEBUG_FUNCPTR(gst_gl_base_audio_visualizer_decide_allocation); - gstav_class->setup = GST_DEBUG_FUNCPTR(gst_gl_base_audio_visualizer_setup); + GST_DEBUG_FUNCPTR(gst_gl_base_audio_visualizer_parent_decide_allocation); - gstav_class->render = GST_DEBUG_FUNCPTR(gst_gl_base_audio_visualizer_render); + gstav_class->setup = + GST_DEBUG_FUNCPTR(gst_gl_base_audio_visualizer_parent_setup); + + gstav_class->render = + GST_DEBUG_FUNCPTR(gst_gl_base_audio_visualizer_parent_render); + + gstav_class->prepare_output_buffer = GST_DEBUG_FUNCPTR( + gst_gl_base_audio_visualizer_parent_prepare_output_buffer); + + gstav_class->map_output_buffer = + GST_DEBUG_FUNCPTR(gst_gl_base_audio_visualizer_parent_map_output_buffer); klass->supported_gl_api = GST_GL_API_ANY; + klass->gl_start = GST_DEBUG_FUNCPTR(gst_gl_base_audio_visualizer_default_gl_start); + klass->gl_stop = GST_DEBUG_FUNCPTR(gst_gl_base_audio_visualizer_default_gl_stop); - klass->gl_render = - GST_DEBUG_FUNCPTR(gst_gl_base_audio_visualizer_default_gl_render); + klass->setup = GST_DEBUG_FUNCPTR(gst_gl_base_audio_visualizer_default_setup); + + klass->fill_gl_memory = + GST_DEBUG_FUNCPTR(gst_gl_base_audio_visualizer_default_fill_gl_memory); + + klass->prepare_output_buffer = GST_DEBUG_FUNCPTR( + gst_gl_base_audio_visualizer_default_prepare_output_buffer); + + g_object_class_install_property( + gobject_class, PROP_TIMESTAMP_OFFSET, + g_param_spec_int64("timestamp-offset", "Timestamp Offset", + "Specifies initial offset for the stream timestamp.", + 0, G_MAXINT64, DEFAULT_TIMESTAMP_OFFSET, + G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS)); } static void gst_gl_base_audio_visualizer_init(GstGLBaseAudioVisualizer *glav) { glav->priv = gst_gl_base_audio_visualizer_get_instance_private(glav); glav->priv->gl_started = FALSE; glav->priv->gl_result = TRUE; + glav->priv->in_audio = NULL; + glav->priv->out_tex = NULL; glav->context = NULL; + glav->pts = 0; g_rec_mutex_init(&glav->priv->context_lock); gst_gl_base_audio_visualizer_start(glav); } @@ -174,6 +254,11 @@ static void gst_gl_base_audio_visualizer_set_property(GObject *object, GstGLBaseAudioVisualizer *glav = GST_GL_BASE_AUDIO_VISUALIZER(object); switch (prop_id) { + + case PROP_TIMESTAMP_OFFSET: + glav->priv->timestamp_offset = g_value_get_int64(value); + break; + default: G_OBJECT_WARN_INVALID_PROPERTY_ID(object, prop_id, pspec); break; @@ -187,6 +272,11 @@ static void gst_gl_base_audio_visualizer_get_property(GObject *object, GstGLBaseAudioVisualizer *glav = GST_GL_BASE_AUDIO_VISUALIZER(object); switch (prop_id) { + + case PROP_TIMESTAMP_OFFSET: + g_value_set_int64(value, glav->priv->timestamp_offset); + break; + default: G_OBJECT_WARN_INVALID_PROPERTY_ID(object, prop_id, pspec); break; @@ -212,8 +302,7 @@ static void gst_gl_base_audio_visualizer_set_context(GstElement *element, if (old_display != new_display) { gst_clear_object(&glav->context); if (gst_gl_base_audio_visualizer_find_gl_context_unlocked(glav)) { - // TODO does this need to be handled ? - // gst_pad_mark_reconfigure (GST_BASE_SRC_PAD (glav)); + gst_pad_mark_reconfigure(GST_BASE_SRC_PAD(glav)); } } } @@ -266,81 +355,149 @@ static void gst_gl_base_audio_visualizer_gl_stop(GstGLContext *context, glav->priv->gl_started = FALSE; } -static gboolean gst_gl_base_audio_visualizer_setup(GstAudioVisualizer *gstav) { - GstGLBaseAudioVisualizer *glav = GST_GL_BASE_AUDIO_VISUALIZER(gstav); - GstGLBaseAudioVisualizerClass *glav_class = - GST_GL_BASE_AUDIO_VISUALIZER_GET_CLASS(gstav); +static GstFlowReturn gst_gl_base_audio_visualizer_default_prepare_output_buffer( + GstGLBaseAudioVisualizer *scope, GstBuffer **outbuf) { + GstPMAudioVisualizer *pmav = GST_PM_AUDIO_VISUALIZER(scope); + return gst_pm_audio_visualizer_default_prepare_output_buffer(pmav, outbuf); +} - // cascade setup to the derived plugin after gl initialization has been - // completed - return glav_class->setup(glav); +static GstFlowReturn gst_gl_base_audio_visualizer_parent_prepare_output_buffer( + GstPMAudioVisualizer *scope, GstBuffer **outbuf) { + GstGLBaseAudioVisualizer *glav = GST_GL_BASE_AUDIO_VISUALIZER(scope); + GstGLBaseAudioVisualizerClass *klass = + GST_GL_BASE_AUDIO_VISUALIZER_GET_CLASS(glav); + return klass->prepare_output_buffer(glav, outbuf); } -static gboolean gst_gl_base_audio_visualizer_default_gl_render( - GstGLBaseAudioVisualizer *glav, GstBuffer *audio, GstVideoFrame *video) { - return TRUE; +static void gst_gl_base_audio_visualizer_parent_map_output_buffer( + GstPMAudioVisualizer *scope, GstVideoFrame *outframe, GstBuffer *outbuf) { + /* map video to gl memory */ + gst_video_frame_map(outframe, &scope->vinfo, outbuf, + GST_MAP_WRITE | GST_MAP_GL | + GST_VIDEO_FRAME_MAP_FLAG_NO_REF); } -typedef struct { - GstGLBaseAudioVisualizer *glav; - GstBuffer *in_audio; - GstVideoFrame *out_video; -} GstGLRenderCallbackParams; +static gboolean gst_gl_base_audio_visualizer_default_fill_gl_memory( + GstGLBaseAudioVisualizer *glav, GstBuffer *in_audio, GstGLMemory *mem) { + return TRUE; +} -static void -gst_gl_base_audio_visualizer_gl_thread_render_callback(gpointer params) { - GstGLRenderCallbackParams *cb_params = (GstGLRenderCallbackParams *)params; +static void _fill_gl(GstGLContext *context, GstGLBaseAudioVisualizer *glav) { GstGLBaseAudioVisualizerClass *klass = - GST_GL_BASE_AUDIO_VISUALIZER_GET_CLASS(cb_params->glav); + GST_GL_BASE_AUDIO_VISUALIZER_GET_CLASS(glav); + GST_TRACE_OBJECT(glav, "filling gl memory %p", glav->priv->out_tex); // inside gl thread: call virtual render function with audio and video - cb_params->glav->priv->gl_result = klass->gl_render( - cb_params->glav, cb_params->in_audio, cb_params->out_video); + glav->priv->gl_result = + klass->fill_gl_memory(glav, glav->priv->in_audio, glav->priv->out_tex); } -static gboolean gst_gl_base_audio_visualizer_render(GstAudioVisualizer *bscope, - GstBuffer *audio, - GstVideoFrame *video) { - GstGLBaseAudioVisualizer *glav = GST_GL_BASE_AUDIO_VISUALIZER(bscope); - GstGLRenderCallbackParams cb_params; - GstGLWindow *window; +static GstFlowReturn +gst_gl_base_audio_visualizer_fill(GstPMAudioVisualizer *bscope, + GstGLBaseAudioVisualizer *glav, + GstBuffer *audio, GstVideoFrame *video) { + GstClockTime next_time; + GstGLSyncMeta *sync_meta; g_rec_mutex_lock(&glav->priv->context_lock); + if (G_UNLIKELY(!glav->context)) + goto not_negotiated; + + /* 0 framerate and we are at the second frame, eos */ + if (G_UNLIKELY(GST_VIDEO_INFO_FPS_N(&bscope->vinfo) == 0 && + glav->priv->n_frames == 1)) + goto eos; - // wrap params into cb_params struct to pass them to the GL window/thread via - // userdata pointer - cb_params.glav = glav; - cb_params.in_audio = audio; - cb_params.out_video = video; + GstBuffer *buffer = video->buffer; - window = gst_gl_context_get_window(glav->context); + // the following vars are params for passing values to _fill_gl() + // video is mapped to gl memory + glav->priv->out_tex = (GstGLMemory *)video->map[0].memory; + glav->priv->in_audio = audio; - // dispatch render call through the gl thread - // call is blocking, accessing audio and video params from gl thread *should* - // be safe - gst_gl_window_send_message( - window, - GST_GL_WINDOW_CB(gst_gl_base_audio_visualizer_gl_thread_render_callback), - &cb_params); + // make current presentation timestamp accessible before rendering + glav->pts = GST_BUFFER_PTS(buffer); - gst_object_unref(window); + // dispatch _fill_gl to the gl thread, blocking call + gst_gl_context_thread_add(glav->context, (GstGLContextThreadFunc)_fill_gl, + glav); + + // clear param refs, these pointers never owned the data + glav->priv->out_tex = NULL; + glav->priv->in_audio = NULL; + + if (!glav->priv->gl_result) + goto gl_error; + + sync_meta = gst_buffer_get_gl_sync_meta(buffer); + if (sync_meta) + gst_gl_sync_meta_set_sync_point(sync_meta, glav->context); g_rec_mutex_unlock(&glav->priv->context_lock); - if (glav->priv->gl_result) { - glav->priv->n_frames++; + GST_BUFFER_TIMESTAMP(buffer) = + glav->priv->timestamp_offset + glav->priv->buf_running_time; + GST_BUFFER_OFFSET(buffer) = glav->priv->n_frames; + glav->priv->n_frames++; + GST_BUFFER_OFFSET_END(buffer) = glav->priv->n_frames; + if (bscope->vinfo.fps_n) { + next_time = + gst_util_uint64_scale_int(glav->priv->n_frames * GST_SECOND, + bscope->vinfo.fps_d, bscope->vinfo.fps_n); + GST_BUFFER_DURATION(buffer) = next_time - glav->priv->buf_running_time; } else { - // gl error - GST_ELEMENT_ERROR(glav, RESOURCE, NOT_FOUND, - (("failed to render audio visualizer")), - (("A GL error occurred"))); + next_time = glav->priv->timestamp_offset; + /* NONE means forever */ + GST_BUFFER_DURATION(buffer) = GST_CLOCK_TIME_NONE; } + glav->priv->buf_running_time = next_time; + + return GST_FLOW_OK; + +gl_error: { + g_rec_mutex_unlock(&glav->priv->context_lock); + GST_ELEMENT_ERROR(glav, RESOURCE, NOT_FOUND, (("failed to draw pattern")), + (("A GL error occurred"))); + return GST_FLOW_NOT_NEGOTIATED; +} +not_negotiated: { + g_rec_mutex_unlock(&glav->priv->context_lock); + GST_ELEMENT_ERROR(glav, CORE, NEGOTIATION, (NULL), + (("format wasn't negotiated before get function"))); + return GST_FLOW_NOT_NEGOTIATED; +} +eos: { + g_rec_mutex_unlock(&glav->priv->context_lock); + GST_DEBUG_OBJECT(glav, "eos: 0 framerate, frame %d", + (gint)glav->priv->n_frames); + return GST_FLOW_EOS; +} +} + +static gboolean +gst_gl_base_audio_visualizer_parent_setup(GstPMAudioVisualizer *gstav) { + GstGLBaseAudioVisualizer *glav = GST_GL_BASE_AUDIO_VISUALIZER(gstav); + GstGLBaseAudioVisualizerClass *glav_class = + GST_GL_BASE_AUDIO_VISUALIZER_GET_CLASS(gstav); + + // cascade setup to the derived plugin after gl initialization has been + // completed + return glav_class->setup(glav); +} + +static gboolean gst_gl_base_audio_visualizer_parent_render( + GstPMAudioVisualizer *bscope, GstBuffer *audio, GstVideoFrame *video) { + GstGLBaseAudioVisualizer *glav = GST_GL_BASE_AUDIO_VISUALIZER(bscope); + + gst_gl_base_audio_visualizer_fill(bscope, glav, audio, video); + return glav->priv->gl_result; } static void gst_gl_base_audio_visualizer_start(GstGLBaseAudioVisualizer *glav) { glav->priv->n_frames = 0; + glav->priv->buf_running_time = 0; } static void gst_gl_base_audio_visualizer_stop(GstGLBaseAudioVisualizer *glav) { @@ -493,9 +650,8 @@ error: { } } -static gboolean -gst_gl_base_audio_visualizer_decide_allocation(GstAudioVisualizer *gstav, - GstQuery *query) { +static gboolean gst_gl_base_audio_visualizer_parent_decide_allocation( + GstPMAudioVisualizer *gstav, GstQuery *query) { GstGLBaseAudioVisualizer *glav = GST_GL_BASE_AUDIO_VISUALIZER(gstav); GstGLContext *context; GstBufferPool *pool = NULL; diff --git a/src/gstglbaseaudiovisualizer.h b/src/gstglbaseaudiovisualizer.h index a48781b..1ecf611 100644 --- a/src/gstglbaseaudiovisualizer.h +++ b/src/gstglbaseaudiovisualizer.h @@ -32,9 +32,8 @@ #ifndef __GST_GL_BASE_AUDIO_VISUALIZER_H__ #define __GST_GL_BASE_AUDIO_VISUALIZER_H__ +#include "gstpmaudiovisualizer.h" #include -#include -#include #include typedef struct _GstGLBaseAudioVisualizer GstGLBaseAudioVisualizer; @@ -72,12 +71,15 @@ GType gst_gl_base_audio_visualizer_get_type(void); * The parent instance type of a base GL Audio Visualizer. */ struct _GstGLBaseAudioVisualizer { - GstAudioVisualizer parent; + GstPMAudioVisualizer parent; /*< public >*/ GstGLDisplay *display; GstGLContext *context; + /* current buffer presentation timestamp */ + guint64 pts; + /*< private >*/ gpointer _padding[GST_PADDING]; @@ -91,21 +93,31 @@ struct _GstGLBaseAudioVisualizer { * @gl_stop: called in the GL thread to clean up the element GL state. * @gl_render: called in the GL thread to fill the current video texture. * @setup: called when the format changes (delegate from - * GstAudioVisualizer.setup) + * GstPMAudioVisualizer.setup) * * The base class for OpenGL based audio visualizers. - * + * Extends GstPMAudioVisualizer to add GL rendering callbacks. + * Handles GL context and render buffers. */ struct _GstGLBaseAudioVisualizerClass { - GstAudioVisualizerClass parent_class; + GstPMAudioVisualizerClass parent_class; /*< public >*/ GstGLAPI supported_gl_api; + /* called from gl thread once the gl context can be used for initializing gl + * resources */ gboolean (*gl_start)(GstGLBaseAudioVisualizer *glav); + /* called from gl thread when gl context is being closed for gl resource up */ void (*gl_stop)(GstGLBaseAudioVisualizer *glav); - gboolean (*gl_render)(GstGLBaseAudioVisualizer *glav, GstBuffer *audio, - GstVideoFrame *video); + /* called when caps have been set for the pipeline */ gboolean (*setup)(GstGLBaseAudioVisualizer *glav); + /* called to render each frame */ + gboolean (*fill_gl_memory)(GstGLBaseAudioVisualizer *glav, + GstBuffer *in_audio, GstGLMemory *mem); + /* allocate buffer for frame rendering */ + GstFlowReturn (*prepare_output_buffer)(GstGLBaseAudioVisualizer *glav, + GstBuffer **outbuf); + /*< private >*/ gpointer _padding[GST_PADDING]; }; diff --git a/src/gstpmaudiovisualizer.c b/src/gstpmaudiovisualizer.c new file mode 100644 index 0000000..f781c9e --- /dev/null +++ b/src/gstpmaudiovisualizer.c @@ -0,0 +1,1013 @@ +/* GStreamer + * Copyright (C) <2011> Stefan Kost + * Copyright (C) <2015> Luis de Bethencourt + * + * gstaudiovisualizer.h: base class for audio visualisation elements + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Library General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Library General Public License for more details. + * + * You should have received a copy of the GNU Library General Public + * License along with this library; if not, write to the + * Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, + * Boston, MA 02110-1301, USA. + */ +/** + * SECTION:gstaudiovisualizer + * @title: GstPMAudioVisualizer + * @short_description: Base class for visualizers. + * + * A baseclass for scopes (visualizers). It takes care of re-fitting the + * audio-rate to video-rate and handles renegotiation (downstream video size + * changes). + * + * It also provides several background shading effects. These effects are + * applied to a previous picture before the `render()` implementation can draw a + * new frame. + */ + +/* + * The code in this file is based on + * GStreamer / gst-plugins-base, latest version as of 2025/05/29. + * gst-libs/gst/pbutils/gstaudiovisualizer.c Git Repository: + * https://gitlab.freedesktop.org/gstreamer/gstreamer/-/blob/main/subprojects/gst-plugins-base/gst-libs/gst/pbutils/gstaudiovisualizer.c + * Original copyright notice has been retained at the top of this file. + * + * The code has been modified to improve compatibility with projectM and OpenGL. + * + * - New apis for implementer-provided memory allocation and video frame + * buffer mapping. Used by gl plugins for mapping video frames directly to gl + * memory. + * + * - Expose the stream time (dts) state. + * + * - Main memory based video frame buffers have been removed. + * + * - Cpu based transition shaders have been removed. + * + * - Bugfix for the amount of bytes being flushed for a single video frame from + * the audio input buffer. + * + * - Bugfix for long qos frame drops while real-time rendering. + * + * Typical plug-in call order for implementer-provided functions: + * - decide_allocation (once) + * - setup (once) + * - prepare_output_buffer (once for each frame) + * - map_output_buffer (once for each frame) + * - render (once for each frame) + */ +#ifdef HAVE_CONFIG_H +#include "config.h" +#endif + +#include + +#include + +#include "gstpmaudiovisualizer.h" +#include + +GST_DEBUG_CATEGORY_STATIC(pm_audio_visualizer_debug); +#define GST_CAT_DEFAULT (pm_audio_visualizer_debug) + +enum { PROP_0 }; + +static GstBaseTransformClass *parent_class = NULL; +static gint private_offset = 0; + +static void +gst_pm_audio_visualizer_class_init(GstPMAudioVisualizerClass *klass); +static void gst_pm_audio_visualizer_init(GstPMAudioVisualizer *scope, + GstPMAudioVisualizerClass *g_class); +static void gst_pm_audio_visualizer_set_property(GObject *object, guint prop_id, + const GValue *value, + GParamSpec *pspec); +static void gst_pm_audio_visualizer_get_property(GObject *object, guint prop_id, + GValue *value, + GParamSpec *pspec); +static void gst_pm_audio_visualizer_dispose(GObject *object); + +static gboolean +gst_pm_audio_visualizer_src_negotiate(GstPMAudioVisualizer *scope); +static gboolean gst_pm_audio_visualizer_src_setcaps(GstPMAudioVisualizer *scope, + GstCaps *caps); +static gboolean +gst_pm_audio_visualizer_sink_setcaps(GstPMAudioVisualizer *scope, + GstCaps *caps); + +static GstFlowReturn gst_pm_audio_visualizer_chain(GstPad *pad, + GstObject *parent, + GstBuffer *buffer); + +static gboolean gst_pm_audio_visualizer_src_event(GstPad *pad, + GstObject *parent, + GstEvent *event); +static gboolean gst_pm_audio_visualizer_sink_event(GstPad *pad, + GstObject *parent, + GstEvent *event); + +static gboolean gst_pm_audio_visualizer_src_query(GstPad *pad, + GstObject *parent, + GstQuery *query); + +static GstStateChangeReturn +gst_pm_audio_visualizer_change_state(GstElement *element, + GstStateChange transition); + +static gboolean +gst_pm_audio_visualizer_do_bufferpool(GstPMAudioVisualizer *scope, + GstCaps *outcaps); + +static gboolean +gst_pm_audio_visualizer_default_decide_allocation(GstPMAudioVisualizer *scope, + GstQuery *query); + +static void gst_pm_audio_visualizer_default_map_output_buffer( + GstPMAudioVisualizer *scope, GstVideoFrame *outframe, GstBuffer *outbuf); + +struct _GstPMAudioVisualizerPrivate { + gboolean negotiated; + + GstBufferPool *pool; + gboolean pool_active; + GstAllocator *allocator; + GstAllocationParams params; + GstQuery *query; + + /* pads */ + GstPad *srcpad, *sinkpad; + + GstAdapter *adapter; + + GstBuffer *inbuf; + + guint spf; /* samples per video frame */ + guint64 frame_duration; + + /* QoS stuff */ /* with LOCK */ + gdouble proportion; + /* qos: earliest time to render the next frame, the render loop will skip + * frames until this time */ + GstClockTime earliest_time; + + guint dropped; /* frames dropped / not dropped */ + guint processed; + + /* configuration mutex */ + GMutex config_lock; + + GstSegment segment; +}; + +/* base class */ + +GType gst_pm_audio_visualizer_get_type(void) { + static gsize audio_visualizer_type = 0; + + if (g_once_init_enter(&audio_visualizer_type)) { + static const GTypeInfo audio_visualizer_info = { + sizeof(GstPMAudioVisualizerClass), + NULL, + NULL, + (GClassInitFunc)gst_pm_audio_visualizer_class_init, + NULL, + NULL, + sizeof(GstPMAudioVisualizer), + 0, + (GInstanceInitFunc)gst_pm_audio_visualizer_init, + }; + GType _type; + + /* TODO: rename when exporting it as a library */ + _type = + g_type_register_static(GST_TYPE_ELEMENT, "GstPMAudioVisualizer", + &audio_visualizer_info, G_TYPE_FLAG_ABSTRACT); + + private_offset = + g_type_add_instance_private(_type, sizeof(GstPMAudioVisualizerPrivate)); + + g_once_init_leave(&audio_visualizer_type, _type); + } + return (GType)audio_visualizer_type; +} + +static inline GstPMAudioVisualizerPrivate * +gst_pm_audio_visualizer_get_instance_private(GstPMAudioVisualizer *self) { + return (G_STRUCT_MEMBER_P(self, private_offset)); +} + +static void +gst_pm_audio_visualizer_class_init(GstPMAudioVisualizerClass *klass) { + GObjectClass *gobject_class = (GObjectClass *)klass; + GstElementClass *element_class = (GstElementClass *)klass; + + if (private_offset != 0) + g_type_class_adjust_private_offset(klass, &private_offset); + + parent_class = g_type_class_peek_parent(klass); + + GST_DEBUG_CATEGORY_INIT(pm_audio_visualizer_debug, "pmaudiovisualizer", 0, + "projectm audio visualisation base class"); + + gobject_class->set_property = gst_pm_audio_visualizer_set_property; + gobject_class->get_property = gst_pm_audio_visualizer_get_property; + gobject_class->dispose = gst_pm_audio_visualizer_dispose; + + element_class->change_state = + GST_DEBUG_FUNCPTR(gst_pm_audio_visualizer_change_state); + + klass->decide_allocation = + GST_DEBUG_FUNCPTR(gst_pm_audio_visualizer_default_decide_allocation); + klass->prepare_output_buffer = + GST_DEBUG_FUNCPTR(gst_pm_audio_visualizer_default_prepare_output_buffer); + klass->map_output_buffer = + GST_DEBUG_FUNCPTR(gst_pm_audio_visualizer_default_map_output_buffer); +} + +static void gst_pm_audio_visualizer_init(GstPMAudioVisualizer *scope, + GstPMAudioVisualizerClass *g_class) { + GstPadTemplate *pad_template; + + scope->priv = gst_pm_audio_visualizer_get_instance_private(scope); + + /* create the sink and src pads */ + pad_template = + gst_element_class_get_pad_template(GST_ELEMENT_CLASS(g_class), "sink"); + g_return_if_fail(pad_template != NULL); + scope->priv->sinkpad = gst_pad_new_from_template(pad_template, "sink"); + gst_pad_set_chain_function(scope->priv->sinkpad, + GST_DEBUG_FUNCPTR(gst_pm_audio_visualizer_chain)); + gst_pad_set_event_function( + scope->priv->sinkpad, + GST_DEBUG_FUNCPTR(gst_pm_audio_visualizer_sink_event)); + gst_element_add_pad(GST_ELEMENT(scope), scope->priv->sinkpad); + + pad_template = + gst_element_class_get_pad_template(GST_ELEMENT_CLASS(g_class), "src"); + g_return_if_fail(pad_template != NULL); + scope->priv->srcpad = gst_pad_new_from_template(pad_template, "src"); + gst_pad_set_event_function( + scope->priv->srcpad, + GST_DEBUG_FUNCPTR(gst_pm_audio_visualizer_src_event)); + gst_pad_set_query_function( + scope->priv->srcpad, + GST_DEBUG_FUNCPTR(gst_pm_audio_visualizer_src_query)); + gst_element_add_pad(GST_ELEMENT(scope), scope->priv->srcpad); + + scope->priv->adapter = gst_adapter_new(); + scope->priv->inbuf = gst_buffer_new(); + scope->stream_time = 0; + + /* properties */ + + /* reset the initial video state */ + gst_video_info_init(&scope->vinfo); + scope->priv->frame_duration = GST_CLOCK_TIME_NONE; + + /* reset the initial state */ + gst_audio_info_init(&scope->ainfo); + gst_video_info_init(&scope->vinfo); + + g_mutex_init(&scope->priv->config_lock); +} + +static void gst_pm_audio_visualizer_set_property(GObject *object, guint prop_id, + const GValue *value, + GParamSpec *pspec) { + GstPMAudioVisualizer *scope = GST_PM_AUDIO_VISUALIZER(object); + + switch (prop_id) { + default: + G_OBJECT_WARN_INVALID_PROPERTY_ID(object, prop_id, pspec); + break; + } +} + +static void gst_pm_audio_visualizer_get_property(GObject *object, guint prop_id, + GValue *value, + GParamSpec *pspec) { + GstPMAudioVisualizer *scope = GST_PM_AUDIO_VISUALIZER(object); + + switch (prop_id) { + default: + G_OBJECT_WARN_INVALID_PROPERTY_ID(object, prop_id, pspec); + break; + } +} + +static void gst_pm_audio_visualizer_dispose(GObject *object) { + GstPMAudioVisualizer *scope = GST_PM_AUDIO_VISUALIZER(object); + + if (scope->priv->adapter) { + g_object_unref(scope->priv->adapter); + scope->priv->adapter = NULL; + } + if (scope->priv->inbuf) { + gst_buffer_unref(scope->priv->inbuf); + scope->priv->inbuf = NULL; + } + if (scope->priv->config_lock.p) { + g_mutex_clear(&scope->priv->config_lock); + scope->priv->config_lock.p = NULL; + } + G_OBJECT_CLASS(parent_class)->dispose(object); +} + +static void gst_pm_audio_visualizer_reset(GstPMAudioVisualizer *scope) { + gst_adapter_clear(scope->priv->adapter); + gst_segment_init(&scope->priv->segment, GST_FORMAT_UNDEFINED); + + GST_OBJECT_LOCK(scope); + scope->priv->proportion = 1.0; + scope->priv->earliest_time = -1; + scope->priv->dropped = 0; + scope->priv->processed = 0; + GST_OBJECT_UNLOCK(scope); +} + +static gboolean +gst_pm_audio_visualizer_sink_setcaps(GstPMAudioVisualizer *scope, + GstCaps *caps) { + GstAudioInfo info; + + if (!gst_audio_info_from_caps(&info, caps)) + goto wrong_caps; + + g_mutex_lock(&scope->priv->config_lock); + scope->ainfo = info; + g_mutex_unlock(&scope->priv->config_lock); + + GST_DEBUG_OBJECT(scope, "audio: channels %d, rate %d", + GST_AUDIO_INFO_CHANNELS(&info), GST_AUDIO_INFO_RATE(&info)); + + if (!gst_pm_audio_visualizer_src_negotiate(scope)) { + goto not_negotiated; + } + + return TRUE; + + /* Errors */ +wrong_caps: { + GST_WARNING_OBJECT(scope, "could not parse caps"); + return FALSE; +} +not_negotiated: { + GST_WARNING_OBJECT(scope, "failed to negotiate"); + return FALSE; +} +} + +static gboolean gst_pm_audio_visualizer_src_setcaps(GstPMAudioVisualizer *scope, + GstCaps *caps) { + GstVideoInfo info; + GstPMAudioVisualizerClass *klass; + gboolean res; + + if (!gst_video_info_from_caps(&info, caps)) + goto wrong_caps; + + klass = GST_PM_AUDIO_VISUALIZER_CLASS(G_OBJECT_GET_CLASS(scope)); + + g_mutex_lock(&scope->priv->config_lock); + + scope->vinfo = info; + + scope->priv->frame_duration = gst_util_uint64_scale_int( + GST_SECOND, GST_VIDEO_INFO_FPS_D(&info), GST_VIDEO_INFO_FPS_N(&info)); + scope->priv->spf = gst_util_uint64_scale_int( + GST_AUDIO_INFO_RATE(&scope->ainfo), GST_VIDEO_INFO_FPS_D(&info), + GST_VIDEO_INFO_FPS_N(&info)); + scope->req_spf = scope->priv->spf; + + g_mutex_unlock(&scope->priv->config_lock); + + if (klass->setup && !klass->setup(scope)) + goto setup_failed; + + GST_INFO_OBJECT(scope, "video: dimension %dx%d, framerate %d/%d", + GST_VIDEO_INFO_WIDTH(&info), GST_VIDEO_INFO_HEIGHT(&info), + GST_VIDEO_INFO_FPS_N(&info), GST_VIDEO_INFO_FPS_D(&info)); + GST_INFO_OBJECT(scope, "audio: rate %d, channels: %d, bpf: %d", + GST_AUDIO_INFO_RATE(&scope->ainfo), + GST_AUDIO_INFO_CHANNELS(&scope->ainfo), + GST_AUDIO_INFO_BPF(&scope->ainfo)); + GST_INFO_OBJECT(scope, "blocks: spf %u, req_spf %u", scope->priv->spf, + scope->req_spf); + + gst_pad_set_caps(scope->priv->srcpad, caps); + + /* find a pool for the negotiated caps now */ + res = gst_pm_audio_visualizer_do_bufferpool(scope, caps); + gst_caps_unref(caps); + + return res; + + /* ERRORS */ +wrong_caps: { + gst_caps_unref(caps); + GST_DEBUG_OBJECT(scope, "error parsing caps"); + return FALSE; +} + +setup_failed: { + GST_WARNING_OBJECT(scope, "failed to set up"); + return FALSE; +} +} + +static gboolean +gst_pm_audio_visualizer_src_negotiate(GstPMAudioVisualizer *scope) { + GstCaps *othercaps, *target; + GstStructure *structure; + GstCaps *templ; + gboolean ret; + + templ = gst_pad_get_pad_template_caps(scope->priv->srcpad); + + GST_DEBUG_OBJECT(scope, "performing negotiation"); + + /* see what the peer can do */ + othercaps = gst_pad_peer_query_caps(scope->priv->srcpad, NULL); + if (othercaps) { + target = gst_caps_intersect(othercaps, templ); + gst_caps_unref(othercaps); + gst_caps_unref(templ); + + if (gst_caps_is_empty(target)) + goto no_format; + + target = gst_caps_truncate(target); + } else { + target = templ; + } + + target = gst_caps_make_writable(target); + structure = gst_caps_get_structure(target, 0); + gst_structure_fixate_field_nearest_int(structure, "width", 320); + gst_structure_fixate_field_nearest_int(structure, "height", 200); + gst_structure_fixate_field_nearest_fraction(structure, "framerate", 25, 1); + if (gst_structure_has_field(structure, "pixel-aspect-ratio")) + gst_structure_fixate_field_nearest_fraction(structure, "pixel-aspect-ratio", + 1, 1); + + target = gst_caps_fixate(target); + + GST_DEBUG_OBJECT(scope, "final caps are %" GST_PTR_FORMAT, target); + + ret = gst_pm_audio_visualizer_src_setcaps(scope, target); + + return ret; + +no_format: { + gst_caps_unref(target); + return FALSE; +} +} + +void gst_pm_audio_visualizer_on_pad_added(GstElement *src, GstPad *new_pad, + gpointer data) { + GstElement *sink = GST_ELEMENT(data); + GstPad *sink_pad = gst_element_get_static_pad(sink, "sink"); + + if (!gst_pad_is_linked(sink_pad)) { + if (gst_pad_link(new_pad, sink_pad) != GST_PAD_LINK_OK) { + g_warning("Failed to link pads"); + } + } + + gst_object_unref(sink_pad); +} + +/* takes ownership of the pool, allocator and query */ +static gboolean gst_pm_audio_visualizer_set_allocation( + GstPMAudioVisualizer *scope, GstBufferPool *pool, GstAllocator *allocator, + const GstAllocationParams *params, GstQuery *query) { + GstAllocator *oldalloc; + GstBufferPool *oldpool; + GstQuery *oldquery; + GstPMAudioVisualizerPrivate *priv = scope->priv; + + GST_OBJECT_LOCK(scope); + oldpool = priv->pool; + priv->pool = pool; + priv->pool_active = FALSE; + + oldalloc = priv->allocator; + priv->allocator = allocator; + + oldquery = priv->query; + priv->query = query; + + if (params) + priv->params = *params; + else + gst_allocation_params_init(&priv->params); + GST_OBJECT_UNLOCK(scope); + + if (oldpool) { + GST_DEBUG_OBJECT(scope, "deactivating old pool %p", oldpool); + gst_buffer_pool_set_active(oldpool, FALSE); + gst_object_unref(oldpool); + } + if (oldalloc) { + gst_object_unref(oldalloc); + } + if (oldquery) { + gst_query_unref(oldquery); + } + return TRUE; +} + +static gboolean +gst_pm_audio_visualizer_do_bufferpool(GstPMAudioVisualizer *scope, + GstCaps *outcaps) { + GstQuery *query; + gboolean result = TRUE; + GstBufferPool *pool = NULL; + GstPMAudioVisualizerClass *klass; + GstAllocator *allocator; + GstAllocationParams params; + + /* not passthrough, we need to allocate */ + /* find a pool for the negotiated caps now */ + GST_DEBUG_OBJECT(scope, "doing allocation query"); + query = gst_query_new_allocation(outcaps, TRUE); + + if (!gst_pad_peer_query(scope->priv->srcpad, query)) { + /* not a problem, we use the query defaults */ + GST_DEBUG_OBJECT(scope, "allocation query failed"); + } + + klass = GST_PM_AUDIO_VISUALIZER_GET_CLASS(scope); + + GST_DEBUG_OBJECT(scope, "calling decide_allocation"); + g_assert(klass->decide_allocation != NULL); + result = klass->decide_allocation(scope, query); + + GST_DEBUG_OBJECT(scope, "ALLOCATION (%d) params: %" GST_PTR_FORMAT, result, + query); + + if (!result) + goto no_decide_allocation; + + /* we got configuration from our peer or the decide_allocation method, + * parse them */ + if (gst_query_get_n_allocation_params(query) > 0) { + gst_query_parse_nth_allocation_param(query, 0, &allocator, ¶ms); + } else { + allocator = NULL; + gst_allocation_params_init(¶ms); + } + + if (gst_query_get_n_allocation_pools(query) > 0) + gst_query_parse_nth_allocation_pool(query, 0, &pool, NULL, NULL, NULL); + + /* now store */ + result = gst_pm_audio_visualizer_set_allocation(scope, pool, allocator, + ¶ms, query); + + return result; + + /* Errors */ +no_decide_allocation: { + GST_WARNING_OBJECT(scope, "Subclass failed to decide allocation"); + gst_query_unref(query); + + return result; +} +} + +static gboolean +gst_pm_audio_visualizer_default_decide_allocation(GstPMAudioVisualizer *scope, + GstQuery *query) { + /* removed main memory pool implementation. This vmethod is overridden for + * using gl memory by gstglbaseaudiovisualizer. */ + g_error("vmethod gst_pm_audio_visualizer_default_decide_allocation is not " + "implemented"); +} + +static void gst_pm_audio_visualizer_default_map_output_buffer( + GstPMAudioVisualizer *scope, GstVideoFrame *outframe, GstBuffer *outbuf) { + /* removed main memory buffer implementation. This vmethod is overridden for + * using gl memory by gstglbaseaudiovisualizer. */ + g_error("vmethod gst_pm_audio_visualizer_default_map_output_buffer is not " + "implemented"); +} + +GstFlowReturn gst_pm_audio_visualizer_default_prepare_output_buffer( + GstPMAudioVisualizer *scope, GstBuffer **outbuf) { + GstPMAudioVisualizerPrivate *priv; + + priv = scope->priv; + + g_assert(priv->pool != NULL); + + /* we can't reuse the input buffer */ + if (!priv->pool_active) { + GST_DEBUG_OBJECT(scope, "setting pool %p active", priv->pool); + if (!gst_buffer_pool_set_active(priv->pool, TRUE)) + goto activate_failed; + priv->pool_active = TRUE; + } + GST_DEBUG_OBJECT(scope, "using pool alloc"); + + return gst_buffer_pool_acquire_buffer(priv->pool, outbuf, NULL); + + /* ERRORS */ +activate_failed: { + GST_ELEMENT_ERROR(scope, RESOURCE, SETTINGS, + ("failed to activate bufferpool"), + ("failed to activate bufferpool")); + return GST_FLOW_ERROR; +} +} + +static GstFlowReturn gst_pm_audio_visualizer_chain(GstPad *pad, + GstObject *parent, + GstBuffer *buffer) { + GstFlowReturn ret = GST_FLOW_OK; + GstPMAudioVisualizer *scope; + GstPMAudioVisualizerClass *klass; + guint64 dist, ts; + guint avail, sbpf; + // databuf is a buffer holding to one video frame worth of audio data used as + // temp buffer for copying from the adapter only + // inbuf is a plugin-scoped buffer holding a copy of the one video frame worth + // of audio data from the adapter to process + GstBuffer *databuf, *inbuf; + gint bpf, rate; + + scope = GST_PM_AUDIO_VISUALIZER(parent); + klass = GST_PM_AUDIO_VISUALIZER_CLASS(G_OBJECT_GET_CLASS(scope)); + + GST_LOG_OBJECT(scope, "chainfunc called"); + + /* resync on DISCONT */ + if (GST_BUFFER_FLAG_IS_SET(buffer, GST_BUFFER_FLAG_DISCONT)) { + gst_adapter_clear(scope->priv->adapter); + } + + /* Make sure have an output format */ + if (gst_pad_check_reconfigure(scope->priv->srcpad)) { + if (!gst_pm_audio_visualizer_src_negotiate(scope)) { + gst_pad_mark_reconfigure(scope->priv->srcpad); + goto not_negotiated; + } + } + + rate = GST_AUDIO_INFO_RATE(&scope->ainfo); + bpf = GST_AUDIO_INFO_BPF(&scope->ainfo); + + if (bpf == 0) { + ret = GST_FLOW_NOT_NEGOTIATED; + goto beach; + } + + gst_adapter_push(scope->priv->adapter, buffer); + + g_mutex_lock(&scope->priv->config_lock); + + /* this is what we want */ + /* number of audio bytes to process for one video frame */ + /* samples per video frame * audio bytes per frame for both channels */ + sbpf = scope->req_spf * bpf; + + inbuf = scope->priv->inbuf; + /* FIXME: the timestamp in the adapter would be different */ + gst_buffer_copy_into(inbuf, buffer, GST_BUFFER_COPY_METADATA, 0, -1); + + /* this is what we have */ + avail = gst_adapter_available(scope->priv->adapter); + GST_LOG_OBJECT(scope, "avail: %u, bpf: %u", avail, sbpf); + while (avail >= sbpf) { + GstBuffer *outbuf; + GstVideoFrame outframe; + + /* get timestamp of the current adapter content (audio input) */ + ts = gst_adapter_prev_pts(scope->priv->adapter, &dist); + if (GST_CLOCK_TIME_IS_VALID(ts)) { + /* convert bytes to time */ + ts += gst_util_uint64_scale_int(dist, GST_SECOND, rate * bpf); + } + + /* check for QoS, don't compute buffers that are known to be late */ + if (GST_CLOCK_TIME_IS_VALID(ts)) { + GstClockTime earliest_time; + gdouble proportion; + gint64 qostime; + + qostime = gst_segment_to_running_time(&scope->priv->segment, + GST_FORMAT_TIME, ts) + + scope->priv->frame_duration; + + GST_OBJECT_LOCK(scope); + earliest_time = scope->priv->earliest_time; + proportion = scope->priv->proportion; + GST_OBJECT_UNLOCK(scope); + + if (GST_CLOCK_TIME_IS_VALID(earliest_time) && qostime <= earliest_time) { + GstClockTime stream_time, jitter; + GstMessage *qos_msg; + + GST_DEBUG_OBJECT(scope, + "QoS: skip ts: %" GST_TIME_FORMAT + ", earliest: %" GST_TIME_FORMAT, + GST_TIME_ARGS(qostime), GST_TIME_ARGS(earliest_time)); + + ++scope->priv->dropped; + stream_time = gst_segment_to_stream_time(&scope->priv->segment, + GST_FORMAT_TIME, ts); + jitter = GST_CLOCK_DIFF(qostime, earliest_time); + qos_msg = + gst_message_new_qos(GST_OBJECT(scope), FALSE, qostime, stream_time, + ts, GST_BUFFER_DURATION(buffer)); + gst_message_set_qos_values(qos_msg, jitter, proportion, 1000000); + gst_message_set_qos_stats(qos_msg, GST_FORMAT_BUFFERS, + scope->priv->processed, scope->priv->dropped); + gst_element_post_message(GST_ELEMENT(scope), qos_msg); + + goto skip; + } + } + + // get stream time for others interested in timing information + scope->stream_time = + gst_segment_to_stream_time(&scope->priv->segment, GST_FORMAT_TIME, ts); + + ++scope->priv->processed; + + g_mutex_unlock(&scope->priv->config_lock); + ret = klass->prepare_output_buffer(scope, &outbuf); + g_mutex_lock(&scope->priv->config_lock); + /* recheck as the value could have changed */ + sbpf = scope->req_spf * bpf; + + /* no buffer allocated, we don't care why. */ + if (ret != GST_FLOW_OK) + break; + + /* sync controlled properties */ + if (GST_CLOCK_TIME_IS_VALID(ts)) + gst_object_sync_values(GST_OBJECT(scope), ts); + + GST_BUFFER_PTS(outbuf) = ts; + GST_BUFFER_DURATION(outbuf) = scope->priv->frame_duration; + + /* this can fail as the data size we need could have changed */ + if (!(databuf = gst_adapter_get_buffer(scope->priv->adapter, sbpf))) + break; + + /* allow customized memory to video frame mapping */ + g_mutex_unlock(&scope->priv->config_lock); + klass->map_output_buffer(scope, &outframe, outbuf); + g_mutex_lock(&scope->priv->config_lock); + + /* place sbpf number of bytes of audio data into inbuf */ + gst_buffer_remove_all_memory(inbuf); + gst_buffer_copy_into(inbuf, databuf, GST_BUFFER_COPY_MEMORY, 0, sbpf); + gst_buffer_unref(databuf); + + /* call class->render() vmethod */ + if (klass->render) { + g_mutex_unlock(&scope->priv->config_lock); + if (!klass->render(scope, inbuf, &outframe)) { + g_mutex_lock(&scope->priv->config_lock); + ret = GST_FLOW_ERROR; + gst_video_frame_unmap(&outframe); + goto beach; + } + g_mutex_lock(&scope->priv->config_lock); + } + gst_video_frame_unmap(&outframe); + + g_mutex_unlock(&scope->priv->config_lock); + ret = gst_pad_push(scope->priv->srcpad, outbuf); + outbuf = NULL; + g_mutex_lock(&scope->priv->config_lock); + + skip: + /* recheck as the value could have changed */ + sbpf = scope->req_spf * bpf; + GST_LOG_OBJECT(scope, "avail: %u, bpf: %u", avail, sbpf); + /* we want to take less or more, depending on spf : req_spf */ + if (avail - sbpf >= sbpf) { + // enough audio data for more frames is available + gst_adapter_unmap(scope->priv->adapter); + gst_adapter_flush(scope->priv->adapter, sbpf); + } else if (avail >= sbpf) { + // was just enough audio data for one frame + /* just flush a bit and stop */ + // todo: this messes with the length and timing when using offline + // rendering. seems like a bug in the original code + // gst_adapter_flush(scope->priv->adapter, (avail - sbpf)); + + // instead just flush one video frame worth of audio data from the buffer + // and stop + gst_adapter_unmap(scope->priv->adapter); + gst_adapter_flush(scope->priv->adapter, sbpf); + break; + } + avail = gst_adapter_available(scope->priv->adapter); + + if (ret != GST_FLOW_OK) + break; + } + + g_mutex_unlock(&scope->priv->config_lock); + +beach: + return ret; + + /* ERRORS */ +not_negotiated: { + GST_DEBUG_OBJECT(scope, "Failed to renegotiate"); + return GST_FLOW_NOT_NEGOTIATED; +} +} + +static gboolean gst_pm_audio_visualizer_src_event(GstPad *pad, + GstObject *parent, + GstEvent *event) { + gboolean res; + GstPMAudioVisualizer *scope; + + scope = GST_PM_AUDIO_VISUALIZER(parent); + + switch (GST_EVENT_TYPE(event)) { + case GST_EVENT_QOS: { + gdouble proportion; + GstClockTimeDiff diff; + GstClockTime timestamp; + + gst_event_parse_qos(event, NULL, &proportion, &diff, ×tamp); + + /* save stuff for the _chain() function */ + GST_OBJECT_LOCK(scope); + scope->priv->proportion = proportion; + if (diff > 0) + /* we're late, this is a good estimate for next displayable + * frame (see part-qos.txt) */ + // bugfix, original calc seems like a lot: + // timestamp + diff * 2 + scope->priv->frame_duration; + // a bugfix has been added since to limit drops to second: + // scope->priv->earliest_time = timestamp + MIN (2 * diff, GST_SECOND) + + // scope->priv->frame_duration; + // the proposed one second is still way too much for us + // just allow dropping a few frames + scope->priv->earliest_time = + timestamp + MIN(2 * diff, scope->priv->frame_duration * 2) + + scope->priv->frame_duration; + else + scope->priv->earliest_time = timestamp + diff; + GST_OBJECT_UNLOCK(scope); + + res = gst_pad_push_event(scope->priv->sinkpad, event); + break; + } + case GST_EVENT_RECONFIGURE: + /* don't forward */ + gst_event_unref(event); + res = TRUE; + break; + default: + res = gst_pad_event_default(pad, parent, event); + break; + } + + return res; +} + +static gboolean gst_pm_audio_visualizer_sink_event(GstPad *pad, + GstObject *parent, + GstEvent *event) { + gboolean res; + GstPMAudioVisualizer *scope; + + scope = GST_PM_AUDIO_VISUALIZER(parent); + + switch (GST_EVENT_TYPE(event)) { + case GST_EVENT_CAPS: { + GstCaps *caps; + + gst_event_parse_caps(event, &caps); + res = gst_pm_audio_visualizer_sink_setcaps(scope, caps); + gst_event_unref(event); + break; + } + case GST_EVENT_FLUSH_STOP: + gst_pm_audio_visualizer_reset(scope); + res = gst_pad_push_event(scope->priv->srcpad, event); + break; + case GST_EVENT_SEGMENT: { + /* the newsegment values are used to clip the input samples + * and to convert the incoming timestamps to running time so + * we can do QoS */ + gst_event_copy_segment(event, &scope->priv->segment); + + res = gst_pad_push_event(scope->priv->srcpad, event); + break; + } + default: + res = gst_pad_event_default(pad, parent, event); + break; + } + + return res; +} + +static gboolean gst_pm_audio_visualizer_src_query(GstPad *pad, + GstObject *parent, + GstQuery *query) { + gboolean res = FALSE; + GstPMAudioVisualizer *scope; + + scope = GST_PM_AUDIO_VISUALIZER(parent); + + switch (GST_QUERY_TYPE(query)) { + case GST_QUERY_LATENCY: { + /* We need to send the query upstream and add the returned latency to our + * own */ + GstClockTime min_latency, max_latency; + gboolean us_live; + GstClockTime our_latency; + guint max_samples; + gint rate = GST_AUDIO_INFO_RATE(&scope->ainfo); + + if (rate == 0) + break; + + if ((res = gst_pad_peer_query(scope->priv->sinkpad, query))) { + gst_query_parse_latency(query, &us_live, &min_latency, &max_latency); + + GST_DEBUG_OBJECT( + scope, "Peer latency: min %" GST_TIME_FORMAT " max %" GST_TIME_FORMAT, + GST_TIME_ARGS(min_latency), GST_TIME_ARGS(max_latency)); + + /* the max samples we must buffer buffer */ + max_samples = MAX(scope->req_spf, scope->priv->spf); + our_latency = gst_util_uint64_scale_int(max_samples, GST_SECOND, rate); + + GST_DEBUG_OBJECT(scope, "Our latency: %" GST_TIME_FORMAT, + GST_TIME_ARGS(our_latency)); + + /* we add some latency but only if we need to buffer more than what + * upstream gives us */ + min_latency += our_latency; + if (max_latency != -1) + max_latency += our_latency; + + GST_DEBUG_OBJECT(scope, + "Calculated total latency : min %" GST_TIME_FORMAT + " max %" GST_TIME_FORMAT, + GST_TIME_ARGS(min_latency), GST_TIME_ARGS(max_latency)); + + gst_query_set_latency(query, TRUE, min_latency, max_latency); + } + break; + } + default: + res = gst_pad_query_default(pad, parent, query); + break; + } + + return res; +} + +static GstStateChangeReturn +gst_pm_audio_visualizer_change_state(GstElement *element, + GstStateChange transition) { + GstStateChangeReturn ret; + GstPMAudioVisualizer *scope; + + scope = GST_PM_AUDIO_VISUALIZER(element); + + switch (transition) { + case GST_STATE_CHANGE_READY_TO_PAUSED: + gst_pm_audio_visualizer_reset(scope); + break; + default: + break; + } + + ret = GST_ELEMENT_CLASS(parent_class)->change_state(element, transition); + + switch (transition) { + case GST_STATE_CHANGE_PAUSED_TO_READY: + gst_pm_audio_visualizer_set_allocation(scope, NULL, NULL, NULL, NULL); + break; + case GST_STATE_CHANGE_READY_TO_NULL: + break; + default: + break; + } + + return ret; +} diff --git a/src/gstpmaudiovisualizer.h b/src/gstpmaudiovisualizer.h new file mode 100644 index 0000000..e88e11f --- /dev/null +++ b/src/gstpmaudiovisualizer.h @@ -0,0 +1,124 @@ +/* GStreamer + * Copyright (C) <2011> Stefan Kost + * Copyright (C) <2015> Luis de Bethencourt + * + * gstaudiovisualizer.c: base class for audio visualisation elements + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Library General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Library General Public License for more details. + * + * You should have received a copy of the GNU Library General Public + * License along with this library; if not, write to the + * Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, + * Boston, MA 02110-1301, USA. + */ + +/* + * The code in this file is based on + * GStreamer / gst-plugins-base, latest version as of 2025/05/29. + * gst-libs/gst/pbutils/gstaudiovisualizer.h Git Repository: + * https://gitlab.freedesktop.org/gstreamer/gstreamer/-/blob/main/subprojects/gst-plugins-base/gst-libs/gst/pbutils/gstaudiovisualizer.h + * + * Original copyright notice has been retained at the top of this file. + * The code has been modified to improve compatibility with projectM and OpenGL. + * See impl for details. + */ + +#ifndef __GST_PM_AUDIO_VISUALIZER_H__ +#define __GST_PM_AUDIO_VISUALIZER_H__ + +#include + +#include +#include + +G_BEGIN_DECLS +#define GST_TYPE_PM_AUDIO_VISUALIZER (gst_pm_audio_visualizer_get_type()) +#define GST_PM_AUDIO_VISUALIZER(obj) \ + (G_TYPE_CHECK_INSTANCE_CAST((obj), GST_TYPE_PM_AUDIO_VISUALIZER, \ + GstPMAudioVisualizer)) +#define GST_PM_AUDIO_VISUALIZER_CLASS(klass) \ + (G_TYPE_CHECK_CLASS_CAST((klass), GST_TYPE_PM_AUDIO_VISUALIZER, \ + GstPMAudioVisualizerClass)) +#define GST_PM_AUDIO_VISUALIZER_GET_CLASS(obj) \ + (G_TYPE_INSTANCE_GET_CLASS((obj), GST_TYPE_PM_AUDIO_VISUALIZER, \ + GstPMAudioVisualizerClass)) +#define GST_PM_IS_SYNAESTHESIA(obj) \ + (G_TYPE_CHECK_INSTANCE_TYPE((obj), GST_TYPE_PM_AUDIO_VISUALIZER)) +#define GST_PM_IS_SYNAESTHESIA_CLASS(klass) \ + (G_TYPE_CHECK_CLASS_TYPE((klass), GST_TYPE_PM_AUDIO_VISUALIZER)) +typedef struct _GstPMAudioVisualizer GstPMAudioVisualizer; +typedef struct _GstPMAudioVisualizerClass GstPMAudioVisualizerClass; +typedef struct _GstPMAudioVisualizerPrivate GstPMAudioVisualizerPrivate; + +struct _GstPMAudioVisualizer { + GstElement parent; + + /* min samples per frame wanted by the subclass (one channel) */ + guint req_spf; + + /* video state */ + GstVideoInfo vinfo; + + /* audio state */ + GstAudioInfo ainfo; + + /* current time (ns) position within the input stream */ + guint64 stream_time; + + /*< private >*/ + GstPMAudioVisualizerPrivate *priv; +}; + +/** + * GstPMAudioVisualizerClass: + * @decide_allocation: buffer pool allocation + * @prepare_output_buffer: allocate a buffer for rendering a frame. + * @map_output_buffer: map video frame to memory buffer. + * @render: render a frame from an audio buffer. + * @setup: called whenever the format changes. + * + * Base class for audio visualizers, derived from gstreamer + * GstAudioVisualizerClass. This plugin handles rendering video frames with a + * fixed framerate from audio input samples. + */ +struct _GstPMAudioVisualizerClass { + /*< private >*/ + GstElementClass parent_class; + + /*< public >*/ + /* virtual function, called whenever the format changes */ + gboolean (*setup)(GstPMAudioVisualizer *scope); + + /* virtual function for rendering a frame */ + gboolean (*render)(GstPMAudioVisualizer *scope, GstBuffer *audio, + GstVideoFrame *video); + + /* virtual function for buffer pool allocation */ + gboolean (*decide_allocation)(GstPMAudioVisualizer *scope, GstQuery *query); + + /* virtual function for output buffer allocation */ + GstFlowReturn (*prepare_output_buffer)(GstPMAudioVisualizer *scope, + GstBuffer **outbuf); + + /* virtual function for mapping the output buffer to video frame */ + void (*map_output_buffer)(GstPMAudioVisualizer *scope, + GstVideoFrame *outframe, GstBuffer *outbuf); +}; + +GType gst_pm_audio_visualizer_get_type(void); + +GstFlowReturn gst_pm_audio_visualizer_default_prepare_output_buffer( + GstPMAudioVisualizer *scope, GstBuffer **outbuf); + +G_DEFINE_AUTOPTR_CLEANUP_FUNC(GstPMAudioVisualizer, gst_object_unref) + +G_END_DECLS +#endif /* __GST_PM_AUDIO_VISUALIZER_H__ */ diff --git a/src/plugin.c b/src/plugin.c index 125b2ed..0cd0500 100644 --- a/src/plugin.c +++ b/src/plugin.c @@ -8,7 +8,6 @@ #endif #include #include -#include #include @@ -22,13 +21,23 @@ GST_DEBUG_CATEGORY_STATIC(gst_projectm_debug); #define GST_CAT_DEFAULT gst_projectm_debug +#define GST_PROJECTM_LOCK(plugin) (g_mutex_lock(&plugin->priv->projectm_lock)) +#define GST_PROJECTM_UNLOCK(plugin) \ + (g_mutex_unlock(&plugin->priv->projectm_lock)) struct _GstProjectMPrivate { - GLenum gl_format; projectm_handle handle; + projectm_playlist_handle playlist; + GMutex projectm_lock; GstClockTime first_frame_time; gboolean first_frame_received; + + GstGLFramebuffer *fbo; + GLuint texture_id; + GstBuffer *in_audio; + GstGLMemory *mem; + GstGLVideoAllocationParams *allocation_params; }; G_DEFINE_TYPE_WITH_CODE(GstProjectM, gst_projectm, @@ -38,6 +47,50 @@ G_DEFINE_TYPE_WITH_CODE(GstProjectM, gst_projectm, "gstprojectm", 0, "Plugin Root")); +static GstBuffer *wrap_gl_texture(GstGLBaseAudioVisualizer *glav, + GstProjectM *plugin) { + GstGLMemoryAllocator *allocator; + gpointer glTextures[1]; + GstGLFormat glFormats[1]; + GstBuffer *glBuffer; + gboolean ret; + + allocator = gst_gl_memory_allocator_get_default(glav->context); + + glBuffer = gst_buffer_new(); + if (!glBuffer) { + g_error("Failed to create new buffer\n"); + return NULL; + } + + glTextures[0] = (gpointer)plugin->priv->texture_id; + glFormats[0] = GST_GL_RGBA8; + + // create gl mem buffer for texture + ret = gst_gl_memory_setup_buffer(allocator, glBuffer, + plugin->priv->allocation_params, glFormats, + glTextures, 1); + if (!ret) { + g_error("Failed to setup gl memory\n"); + return NULL; + } + + gst_object_unref(allocator); + + return glBuffer; +} + +static GstFlowReturn +gst_projectm_prepare_output_buffer(GstGLBaseAudioVisualizer *scope, + GstBuffer **outbuf) { + GstProjectM *plugin = GST_PROJECTM(scope); + GST_PROJECTM_LOCK(plugin); + *outbuf = wrap_gl_texture(scope, plugin); + GST_DEBUG_OBJECT(plugin, "Wrapped RT texture buffer"); + GST_PROJECTM_UNLOCK(plugin); + return GST_FLOW_OK; +} + void gst_projectm_set_property(GObject *object, guint property_id, const GValue *value, GParamSpec *pspec) { GstProjectM *plugin = GST_PROJECTM(object); @@ -101,6 +154,8 @@ void gst_projectm_set_property(GObject *object, guint property_id, case PROP_SHUFFLE_PRESETS: plugin->shuffle_presets = g_value_get_boolean(value); break; + case PROP_PTS_SYNC: + plugin->pts_sync = g_value_get_boolean(value); default: G_OBJECT_WARN_INVALID_PROPERTY_ID(object, property_id, pspec); break; @@ -161,6 +216,9 @@ void gst_projectm_get_property(GObject *object, guint property_id, case PROP_SHUFFLE_PRESETS: g_value_set_boolean(value, plugin->shuffle_presets); break; + case PROP_PTS_SYNC: + g_value_set_boolean(value, plugin->pts_sync); + break; default: G_OBJECT_WARN_INVALID_PROPERTY_ID(object, property_id, pspec); break; @@ -181,6 +239,9 @@ static void gst_projectm_init(GstProjectM *plugin) { plugin->preset_duration = DEFAULT_PRESET_DURATION; plugin->enable_playlist = DEFAULT_ENABLE_PLAYLIST; plugin->shuffle_presets = DEFAULT_SHUFFLE_PRESETS; + plugin->pts_sync = true; + plugin->priv->first_frame_time = 0; + plugin->priv->first_frame_received = FALSE; const gchar *meshSizeStr = DEFAULT_MESH_SIZE; gint width, height; @@ -201,27 +262,51 @@ static void gst_projectm_init(GstProjectM *plugin) { plugin->easter_egg = DEFAULT_EASTER_EGG; plugin->preset_locked = DEFAULT_PRESET_LOCKED; plugin->priv->handle = NULL; + plugin->priv->fbo = NULL; + plugin->priv->texture_id = 0; + plugin->priv->in_audio = NULL; + plugin->priv->mem = NULL; + plugin->priv->allocation_params = NULL; + g_mutex_init(&plugin->priv->projectm_lock); } static void gst_projectm_finalize(GObject *object) { GstProjectM *plugin = GST_PROJECTM(object); g_free(plugin->preset_path); g_free(plugin->texture_dir_path); + g_mutex_clear(&plugin->priv->projectm_lock); G_OBJECT_CLASS(gst_projectm_parent_class)->finalize(object); } static void gst_projectm_gl_stop(GstGLBaseAudioVisualizer *src) { GstProjectM *plugin = GST_PROJECTM(src); + GST_PROJECTM_LOCK(plugin); if (plugin->priv->handle) { GST_DEBUG_OBJECT(plugin, "Destroying ProjectM instance"); projectm_destroy(plugin->priv->handle); plugin->priv->handle = NULL; } + if (plugin->priv->fbo) { + gst_object_unref(plugin->priv->fbo); + plugin->priv->fbo = NULL; + } + + if (plugin->priv->texture_id) { + glDeleteTextures(1, &plugin->priv->texture_id); + plugin->priv->texture_id = 0; + } + + if (plugin->priv->allocation_params) { + gst_gl_video_allocation_params_free_data(plugin->priv->allocation_params); + plugin->priv->allocation_params = NULL; + } + GST_PROJECTM_UNLOCK(plugin); } static gboolean gst_projectm_gl_start(GstGLBaseAudioVisualizer *glav) { // Cast the audio visualizer to the ProjectM plugin GstProjectM *plugin = GST_PROJECTM(glav); + GstPMAudioVisualizer *gstav = GST_PM_AUDIO_VISUALIZER(glav); #ifdef USE_GLEW GST_DEBUG_OBJECT(plugin, "Initializing GLEW"); @@ -232,81 +317,95 @@ static gboolean gst_projectm_gl_start(GstGLBaseAudioVisualizer *glav) { } #endif + GST_PROJECTM_LOCK(plugin); + // initialize render texture + // todo: let gst create the texture + const GstGLFuncs *glFunctions = glav->context->gl_vtable; + + glFunctions->GenTextures(1, &plugin->priv->texture_id); + glFunctions->BindTexture(GL_TEXTURE_2D, plugin->priv->texture_id); + + // allocate texture + glFunctions->TexImage2D( + GL_TEXTURE_2D, 0, GL_RGBA, GST_VIDEO_INFO_WIDTH(&gstav->vinfo), + GST_VIDEO_INFO_HEIGHT(&gstav->vinfo), 0, GL_RGBA, GL_UNSIGNED_BYTE, NULL); + + glFunctions->TexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST); + glFunctions->TexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST); + glFunctions->TexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, + GL_CLAMP_TO_EDGE); + glFunctions->TexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, + GL_CLAMP_TO_EDGE); + // glTexEnvi(GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_REPLACE); + glFunctions->BindTexture(GL_TEXTURE_2D, 0); + + plugin->priv->allocation_params = + gst_gl_video_allocation_params_new_wrapped_texture( + glav->context, NULL, &gstav->vinfo, 0, NULL, GST_GL_TEXTURE_TARGET_2D, + GST_GL_RGBA, plugin->priv->texture_id, NULL, 0); + // Check if ProjectM instance exists, and create if not if (!plugin->priv->handle) { // Create ProjectM instance - plugin->priv->handle = projectm_init(plugin); - if (!plugin->priv->handle) { + plugin->priv->first_frame_received = FALSE; + if (!projectm_init(plugin, &plugin->priv->handle, + &plugin->priv->playlist)) { GST_ERROR_OBJECT(plugin, "ProjectM could not be initialized"); return FALSE; } gl_error_handler(glav->context, plugin); } + plugin->priv->fbo = gst_gl_framebuffer_new_with_default_depth( + glav->context, GST_VIDEO_INFO_WIDTH(&gstav->vinfo), + GST_VIDEO_INFO_HEIGHT(&gstav->vinfo)); + + GST_PROJECTM_UNLOCK(plugin); + GST_INFO_OBJECT(plugin, "GL start complete"); return TRUE; } static gboolean gst_projectm_setup(GstGLBaseAudioVisualizer *glav) { - GstAudioVisualizer *bscope = GST_AUDIO_VISUALIZER(glav); - GstProjectM *plugin = GST_PROJECTM(glav); - - // Calculate depth based on pixel stride and bits - gint depth = bscope->vinfo.finfo->pixel_stride[0] * - ((bscope->vinfo.finfo->bits >= 8) ? 8 : 1); - - // Calculate required samples per frame - bscope->req_spf = - (bscope->ainfo.channels * bscope->ainfo.rate * 2) / bscope->vinfo.fps_n; - - // get GStreamer video format and map it to the corresponding OpenGL pixel - // format - const GstVideoFormat video_format = GST_VIDEO_INFO_FORMAT(&bscope->vinfo); - // TODO: why is the reversed byte order needed when copying pixel data from - // OpenGL ? - switch (video_format) { - case GST_VIDEO_FORMAT_ABGR: - plugin->priv->gl_format = GL_RGBA; - break; - - case GST_VIDEO_FORMAT_RGBA: - // GL_ABGR_EXT does not seem to be well-supported, does not work on Windows - plugin->priv->gl_format = GL_ABGR_EXT; - break; - - default: - GST_ERROR_OBJECT(plugin, "Unsupported video format: %d", video_format); - return FALSE; - } + GstPMAudioVisualizer *gstav = GST_PM_AUDIO_VISUALIZER(glav); // Log audio info GST_DEBUG_OBJECT( glav, "Audio Information ", - bscope->ainfo.channels, bscope->ainfo.rate, - bscope->ainfo.finfo->description); + gstav->ainfo.channels, gstav->ainfo.rate, + gstav->ainfo.finfo->description); // Log video info - GST_DEBUG_OBJECT(glav, - "Video Information ", - GST_VIDEO_INFO_WIDTH(&bscope->vinfo), - GST_VIDEO_INFO_HEIGHT(&bscope->vinfo), bscope->vinfo.fps_n, - bscope->vinfo.fps_d, depth, bscope->req_spf); + GST_DEBUG_OBJECT( + glav, + "Video Information ", + GST_VIDEO_INFO_WIDTH(&gstav->vinfo), GST_VIDEO_INFO_HEIGHT(&gstav->vinfo), + gstav->vinfo.fps_n, gstav->vinfo.fps_d, gstav->req_spf); return TRUE; } -static double get_seconds_since_first_frame(GstProjectM *plugin, - GstVideoFrame *frame) { +static gdouble get_seconds_since_first_frame(GstProjectM *plugin, + GstGLBaseAudioVisualizer *glav) { + // pick timestamp to sync to + GstClockTime current_time; + if (plugin->pts_sync) { + // sync to pts + current_time = glav->pts; + } else { + // sync to dts + GstPMAudioVisualizer *pmav = GST_PM_AUDIO_VISUALIZER(plugin); + current_time = pmav->stream_time; + } + if (!plugin->priv->first_frame_received) { // Store the timestamp of the first frame - plugin->priv->first_frame_time = GST_BUFFER_PTS(frame->buffer); + plugin->priv->first_frame_time = current_time; plugin->priv->first_frame_received = TRUE; return 0.0; } // Calculate elapsed time - GstClockTime current_time = GST_BUFFER_PTS(frame->buffer); GstClockTime elapsed_time = current_time - plugin->priv->first_frame_time; // Convert to fractional seconds @@ -315,21 +414,21 @@ static double get_seconds_since_first_frame(GstProjectM *plugin, return elapsed_seconds; } -// TODO: CLEANUP & ADD DEBUGGING -static gboolean gst_projectm_render(GstGLBaseAudioVisualizer *glav, - GstBuffer *audio, GstVideoFrame *video) { - GstProjectM *plugin = GST_PROJECTM(glav); +static gboolean gst_projectm_fill_gl_memory_callback(gpointer stuff) { + GstProjectM *plugin = GST_PROJECTM(stuff); + GstGLBaseAudioVisualizer *glav = GST_GL_BASE_AUDIO_VISUALIZER(stuff); GstMapInfo audioMap; gboolean result = TRUE; - // get current gst (PTS) time and set projectM time - double seconds_since_first_frame = - get_seconds_since_first_frame(plugin, video); + // get current gst sync time (pts or stream time/dts) and set projectM time + gdouble seconds_since_first_frame = + get_seconds_since_first_frame(plugin, glav); + projectm_set_frame_time(plugin->priv->handle, seconds_since_first_frame); // AUDIO - gst_buffer_map(audio, &audioMap, GST_MAP_READ); + gst_buffer_map(plugin->priv->in_audio, &audioMap, GST_MAP_READ); // GST_DEBUG_OBJECT(plugin, "Audio Samples: %u, Offset: %lu, Offset End: %lu, // Sample Rate: %d, FPS: %d, Required Samples Per Frame: %d", @@ -344,20 +443,14 @@ static gboolean gst_projectm_render(GstGLBaseAudioVisualizer *glav, // *)audioMap.data)[102], ((gint16 *)audioMap.data)[103]); // VIDEO - const GstGLFuncs *glFunctions = glav->context->gl_vtable; - - size_t windowWidth, windowHeight; - - projectm_get_window_size(plugin->priv->handle, &windowWidth, &windowHeight); + GST_TRACE_OBJECT(plugin, "rendering projectM to fbo %d", + plugin->priv->fbo->fbo_id); + projectm_opengl_render_frame_fbo(plugin->priv->handle, + plugin->priv->fbo->fbo_id); - projectm_opengl_render_frame(plugin->priv->handle); gl_error_handler(glav->context, plugin); - glFunctions->ReadPixels(0, 0, windowWidth, windowHeight, - plugin->priv->gl_format, GL_UNSIGNED_INT_8_8_8_8, - (guint8 *)GST_VIDEO_FRAME_PLANE_DATA(video, 0)); - - gst_buffer_unmap(audio, &audioMap); + gst_buffer_unmap(plugin->priv->in_audio, &audioMap); // GST_DEBUG_OBJECT(plugin, "Video Data: %d %d\n", // GST_VIDEO_FRAME_N_PLANES(video), ((uint8_t @@ -368,6 +461,28 @@ static gboolean gst_projectm_render(GstGLBaseAudioVisualizer *glav, return result; } +static gboolean gst_projectm_fill_gl_memory(GstGLBaseAudioVisualizer *glav, + GstBuffer *in_audio, + GstGLMemory *mem) { + + GstProjectM *plugin = GST_PROJECTM(glav); + + GST_PROJECTM_LOCK(plugin); + + plugin->priv->in_audio = in_audio; + plugin->priv->mem = mem; + + gboolean result = gst_gl_framebuffer_draw_to_texture( + plugin->priv->fbo, mem, gst_projectm_fill_gl_memory_callback, plugin); + + plugin->priv->in_audio = NULL; + plugin->priv->mem = NULL; + + GST_PROJECTM_UNLOCK(plugin); + + return result; +} + static void gst_projectm_class_init(GstProjectMClass *klass) { GObjectClass *gobject_class = (GObjectClass *)klass; GstElementClass *element_class = (GstElementClass *)klass; @@ -516,13 +631,23 @@ static void gst_projectm_class_init(GstProjectMClass *klass) { "and not locked. Playlist must be enabled for this to take effect.", DEFAULT_SHUFFLE_PRESETS, G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS)); + g_object_class_install_property( + gobject_class, PROP_PTS_SYNC, + g_param_spec_boolean( + "pts-sync", "Presentation Timestamp Sync", + "If true, projectM will be synced to the gst presentation timestamp. " + "In case of false, the stream time (dts) will be used.", + DEFAULT_PTS_SYNC, G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS)); + gobject_class->finalize = gst_projectm_finalize; scope_class->supported_gl_api = GST_GL_API_OPENGL3 | GST_GL_API_GLES2; scope_class->gl_start = GST_DEBUG_FUNCPTR(gst_projectm_gl_start); scope_class->gl_stop = GST_DEBUG_FUNCPTR(gst_projectm_gl_stop); - scope_class->gl_render = GST_DEBUG_FUNCPTR(gst_projectm_render); + scope_class->fill_gl_memory = GST_DEBUG_FUNCPTR(gst_projectm_fill_gl_memory); scope_class->setup = GST_DEBUG_FUNCPTR(gst_projectm_setup); + scope_class->prepare_output_buffer = + GST_DEBUG_FUNCPTR(gst_projectm_prepare_output_buffer); } static gboolean plugin_init(GstPlugin *plugin) { diff --git a/src/plugin.h b/src/plugin.h index de1acff..b46977c 100644 --- a/src/plugin.h +++ b/src/plugin.h @@ -12,6 +12,13 @@ G_BEGIN_DECLS G_DECLARE_FINAL_TYPE(GstProjectM, gst_projectm, GST, PROJECTM, GstGLBaseAudioVisualizer) +/* + * Main plug-in. Handles interactions with projectM. + * Uses GstPMAudioVisualizer for handling audio-visualization (audio input, + * timing, video frame data). GstGLBaseAudioVisualizer extends + * GstPMAudioVisualizer to add gl context handling and is used by this plugin + * directly. GstProjectM -> GstGLBaseAudioVisualizer -> GstPMAudioVisualizer. + */ struct _GstProjectM { GstGLBaseAudioVisualizer element; @@ -31,12 +38,13 @@ struct _GstProjectM { gboolean preset_locked; gboolean enable_playlist; gboolean shuffle_presets; + gboolean pts_sync; GstProjectMPrivate *priv; }; struct _GstProjectMClass { - GstAudioVisualizerClass parent_class; + GstGLBaseAudioVisualizerClass parent_class; }; static void gst_projectm_set_property(GObject *object, guint prop_id, @@ -53,8 +61,9 @@ static gboolean gst_projectm_gl_start(GstGLBaseAudioVisualizer *glav); static void gst_projectm_gl_stop(GstGLBaseAudioVisualizer *glav); -static gboolean gst_projectm_render(GstGLBaseAudioVisualizer *glav, - GstBuffer *audio, GstVideoFrame *video); +static gboolean gst_projectm_fill_gl_memory(GstGLBaseAudioVisualizer *glav, + GstBuffer *in_audio, + GstGLMemory *mem); static void gst_projectm_class_init(GstProjectMClass *klass); diff --git a/src/projectm.c b/src/projectm.c index 1bac137..16d89c3 100644 --- a/src/projectm.c +++ b/src/projectm.c @@ -13,13 +13,13 @@ GST_DEBUG_CATEGORY_STATIC(projectm_debug); #define GST_CAT_DEFAULT projectm_debug -projectm_handle projectm_init(GstProjectM *plugin) { +bool projectm_init(GstProjectM *plugin, projectm_handle *ret_handle, + projectm_playlist_handle *ret_playlist) { projectm_handle handle = NULL; projectm_playlist_handle playlist = NULL; - GST_DEBUG_CATEGORY_INIT(projectm_debug, "projectm", 0, "ProjectM"); - GstAudioVisualizer *bscope = GST_AUDIO_VISUALIZER(plugin); + GstPMAudioVisualizer *bscope = GST_PM_AUDIO_VISUALIZER(plugin); // Create ProjectM instance GST_DEBUG_OBJECT(plugin, "Creating projectM instance.."); @@ -29,51 +29,53 @@ projectm_handle projectm_init(GstProjectM *plugin) { GST_DEBUG_OBJECT( plugin, "project_create() returned NULL, projectM instance was not created!"); - return NULL; + return FALSE; } else { GST_DEBUG_OBJECT(plugin, "Created projectM instance!"); } + *ret_handle = handle; if (plugin->enable_playlist) { GST_DEBUG_OBJECT(plugin, "Playlist enabled"); // initialize preset playlist playlist = projectm_playlist_create(handle); + *ret_playlist = playlist; projectm_playlist_set_shuffle(playlist, plugin->shuffle_presets); // projectm_playlist_set_preset_switched_event_callback(_playlist, // &ProjectMWrapper::PresetSwitchedEvent, static_cast(this)); } else { GST_DEBUG_OBJECT(plugin, "Playlist disabled"); } - // Log properties - GST_INFO_OBJECT( - plugin, - "Using Properties: " - "preset=%s, " - "texture-dir=%s, " - "beat-sensitivity=%f, " - "hard-cut-duration=%f, " - "hard-cut-enabled=%d, " - "hard-cut-sensitivity=%f, " - "soft-cut-duration=%f, " - "preset-duration=%f, " - "mesh-size=(%lu, %lu)" - "aspect-correction=%d, " - "easter-egg=%f, " - "preset-locked=%d, " - "enable-playlist=%d, " - "shuffle-presets=%d", - plugin->preset_path, plugin->texture_dir_path, plugin->beat_sensitivity, - plugin->hard_cut_duration, plugin->hard_cut_enabled, - plugin->hard_cut_sensitivity, plugin->soft_cut_duration, - plugin->preset_duration, plugin->mesh_width, plugin->mesh_height, - plugin->aspect_correction, plugin->easter_egg, plugin->preset_locked, - plugin->enable_playlist, plugin->shuffle_presets); + GST_INFO_OBJECT(plugin, + "Using Properties: " + "preset=%s, " + "texture-dir=%s, " + "beat-sensitivity=%f, " + "hard-cut-duration=%f, " + "hard-cut-enabled=%d, " + "hard-cut-sensitivity=%f, " + "soft-cut-duration=%f, " + "preset-duration=%f, " + "mesh-size=(%lu, %lu)" + "aspect-correction=%d, " + "easter-egg=%f, " + "preset-locked=%d, " + "enable-playlist=%d, " + "shuffle-presets=%d", + "pts-sync=%d", plugin->preset_path, plugin->texture_dir_path, + plugin->beat_sensitivity, plugin->hard_cut_duration, + plugin->hard_cut_enabled, plugin->hard_cut_sensitivity, + plugin->soft_cut_duration, plugin->preset_duration, + plugin->mesh_width, plugin->mesh_height, + plugin->aspect_correction, plugin->easter_egg, + plugin->preset_locked, plugin->enable_playlist, + plugin->shuffle_presets, plugin->pts_sync); // Load preset file if path is provided if (plugin->preset_path != NULL) { - int added_count = + unsigned int added_count = projectm_playlist_add_path(playlist, plugin->preset_path, true, false); GST_INFO("Loaded preset path: %s, presets found: %d", plugin->preset_path, added_count); @@ -95,7 +97,6 @@ projectm_handle projectm_init(GstProjectM *plugin) { // Set preset duration, or set to in infinite duration if zero if (plugin->preset_duration > 0.0) { projectm_set_preset_duration(handle, plugin->preset_duration); - // kick off the first preset if (projectm_playlist_size(playlist) > 1 && !plugin->preset_locked) { projectm_playlist_play_next(playlist, true); @@ -109,11 +110,15 @@ projectm_handle projectm_init(GstProjectM *plugin) { projectm_set_easter_egg(handle, plugin->easter_egg); projectm_set_preset_locked(handle, plugin->preset_locked); - projectm_set_fps(handle, GST_VIDEO_INFO_FPS_N(&bscope->vinfo)); + gdouble fps; + gst_util_fraction_to_double(GST_VIDEO_INFO_FPS_N(&bscope->vinfo), + GST_VIDEO_INFO_FPS_D(&bscope->vinfo), &fps); + + projectm_set_fps(handle, gst_util_gdouble_to_guint64(fps)); projectm_set_window_size(handle, GST_VIDEO_INFO_WIDTH(&bscope->vinfo), GST_VIDEO_INFO_HEIGHT(&bscope->vinfo)); - return handle; + return TRUE; } // void projectm_render(GstProjectM *plugin, gint16 *samples, gint sample_count) diff --git a/src/projectm.h b/src/projectm.h index 1ba6a37..246b633 100644 --- a/src/projectm.h +++ b/src/projectm.h @@ -1,17 +1,16 @@ #ifndef __PROJECTM_H__ #define __PROJECTM_H__ -#include - #include "plugin.h" -#include +#include G_BEGIN_DECLS /** * @brief Initialize ProjectM */ -projectm_handle projectm_init(GstProjectM *plugin); +bool projectm_init(GstProjectM *plugin, projectm_handle *handle, + projectm_playlist_handle *playlist); /** * @brief Render ProjectM