gstreamer:将音频流拆分为文件
gstreamer: split audio stream into files
我有一个音频流,我想将其保存为按时间分割的单个可播放文件。
如果我没理解错的话,splitmuxsink 会对视频文件执行此操作,但不会对纯音频文件执行此操作。
这里有一些关于如何完成此操作的提示:
http://gstreamer-devel.966125.n4.nabble.com/Dynamically-updating-filesink-location-at-run-time-on-the-fly-td4660569.html
我正在努力重现这一点,并且提到的功能适用于 gstreamer-0.1。
从splitmuxsink
的描述来看似乎包含音频:
Pad Templates:
SINK template: 'video'
Availability: On request
Capabilities:
ANY
SINK template: 'audio_%u'
Availability: On request
Capabilities:
ANY
SINK template: 'subtitle_%u'
Availability: On request
Capabilities:
ANY
所以我不明白为什么只有音频文件不能使用它。特别是因为 视频文件 通常被认为是只有音频的视频文件。
编辑 下面的命令应该生成 AC3/MP4 个文件,每 1 分钟长:
gst-launch-1.0 -e audiotestsrc ! avenc_ac3 ! ac3parse ! mux.audio_0 splitmuxsink name=mux max-size-time=60000000000 location=out_%d.mp4
我使用 C 中的动态流水线进行了 ogg 编码的音频文件拆分。
Florian Zwoch 在之前的回答中展示了 splitmuxsink 如何拆分音频流。
使用动态管道拆分文件的解决方法:
#include <stdio.h>
#define GLIB_DISABLE_DEPRECATION_WARNINGS
#include <gst/gst.h>
static GstElement *pipeline;
static GstPad *queue_src_pad;
static GstElement *bins[2];
static GstPad *bin_pads[2];
static GstElement *filesink[2];
static size_t current_bin = 0;
static size_t current_file = 0;
static GstPadProbeReturn
pad_probe_cb (GstPad * pad, GstPadProbeInfo * info, gpointer user_data) {
gst_pad_remove_probe (pad, GST_PAD_PROBE_INFO_ID (info));
gst_pad_unlink(queue_src_pad, bin_pads[current_bin]);
gst_pad_send_event(bin_pads[current_bin], gst_event_new_eos());
gst_bin_remove(GST_BIN(pipeline), bins[current_bin]);
gst_element_set_state(bins[current_bin], GST_STATE_NULL);
current_file++;
current_bin = (current_file % 2);
{
char file_location[32];
sprintf(file_location, "recording_%ld.ogg", current_file);
g_object_set(G_OBJECT(
filesink[current_bin]), "location", file_location, NULL);
printf("now writing to %s\n", file_location);
}
gst_bin_add(GST_BIN(pipeline), bins[current_bin]);
gst_pad_link(queue_src_pad, bin_pads[current_bin]);
gst_element_sync_state_with_parent(bins[current_bin]);
return GST_PAD_PROBE_OK;
}
static gboolean timeout_cb(gpointer user_data) {
gst_pad_add_probe (queue_src_pad, GST_PAD_PROBE_TYPE_BLOCK_DOWNSTREAM,
pad_probe_cb, NULL, NULL);
return TRUE;
}
int main(int argc, char *argv[]) {
GstElement *audiosrc, *queue;
GstElement *vorbisenc[2], *oggmux[2];
GstBus *bus;
GMainLoop *loop;
gst_init (&argc, &argv);
//audiosrc = gst_element_factory_make("audiotestsrc", "audiosrc");
//g_object_set (G_OBJECT (audiosrc), "is-live", TRUE, NULL);
audiosrc = gst_element_factory_make("pulsesrc", "audiosrc");
queue = gst_element_factory_make("queue", "queue");
bins[0] = gst_bin_new ("bin0");
bins[1] = gst_bin_new ("bin1");
vorbisenc[0] = gst_element_factory_make("vorbisenc", "vorbisenc0");
vorbisenc[1] = gst_element_factory_make("vorbisenc", "vorbisenc1");
oggmux[0] = gst_element_factory_make("oggmux", "oggmux0");
oggmux[1] = gst_element_factory_make("oggmux", "oggmux1");
filesink[0] = gst_element_factory_make("filesink", "filesink0");
filesink[1] = gst_element_factory_make("filesink", "filesink1");
pipeline = gst_pipeline_new ("test-pipeline");
if (!pipeline || !audiosrc || !queue
|| !vorbisenc[0] || !oggmux[0] || !filesink[0]
|| !vorbisenc[1] || !oggmux[1] || !filesink[1]
) {
g_printerr ("not all elements could be created\n");
return -1;
}
gst_bin_add_many(
GST_BIN(bins[0]), vorbisenc[0], oggmux[0], filesink[0], NULL);
gst_bin_add_many(
GST_BIN(bins[1]), vorbisenc[1], oggmux[1], filesink[1], NULL);
gst_bin_add_many(
GST_BIN(pipeline), audiosrc, queue, bins[0], NULL);
g_assert (gst_element_link(audiosrc, queue));
g_assert (gst_element_link_many(
vorbisenc[0], oggmux[0], filesink[0], NULL));
g_assert (gst_element_link_many(
vorbisenc[1], oggmux[1], filesink[1], NULL));
{
GstPad* pad = gst_element_get_static_pad(vorbisenc[0], "sink");
gst_element_add_pad(bins[0], gst_ghost_pad_new("sink", pad));
gst_object_unref(pad);
}
{
GstPad* pad = gst_element_get_static_pad(vorbisenc[1], "sink");
gst_element_add_pad(bins[1], gst_ghost_pad_new("sink", pad));
gst_object_unref(pad);
}
bin_pads[0] = gst_element_get_static_pad(bins[0], "sink");
bin_pads[1] = gst_element_get_static_pad(bins[1], "sink");
current_bin = 0;
gst_element_link(queue, bins[current_bin]);
g_object_set (filesink[current_bin], "location", "recording_0.ogg", NULL);
queue_src_pad = gst_element_get_static_pad (queue, "src");
bus = gst_element_get_bus (pipeline);
gst_element_set_state (pipeline, GST_STATE_PLAYING);
loop = g_main_loop_new (NULL, FALSE);
g_timeout_add_seconds (3, timeout_cb, NULL);
g_main_loop_run (loop);
gst_object_unref (bus);
gst_element_set_state (pipeline, GST_STATE_NULL);
gst_object_unref (pipeline);
return 0;
}
我有一个音频流,我想将其保存为按时间分割的单个可播放文件。
如果我没理解错的话,splitmuxsink 会对视频文件执行此操作,但不会对纯音频文件执行此操作。
这里有一些关于如何完成此操作的提示: http://gstreamer-devel.966125.n4.nabble.com/Dynamically-updating-filesink-location-at-run-time-on-the-fly-td4660569.html 我正在努力重现这一点,并且提到的功能适用于 gstreamer-0.1。
从splitmuxsink
的描述来看似乎包含音频:
Pad Templates:
SINK template: 'video'
Availability: On request
Capabilities:
ANY
SINK template: 'audio_%u'
Availability: On request
Capabilities:
ANY
SINK template: 'subtitle_%u'
Availability: On request
Capabilities:
ANY
所以我不明白为什么只有音频文件不能使用它。特别是因为 视频文件 通常被认为是只有音频的视频文件。
编辑 下面的命令应该生成 AC3/MP4 个文件,每 1 分钟长:
gst-launch-1.0 -e audiotestsrc ! avenc_ac3 ! ac3parse ! mux.audio_0 splitmuxsink name=mux max-size-time=60000000000 location=out_%d.mp4
我使用 C 中的动态流水线进行了 ogg 编码的音频文件拆分。
Florian Zwoch 在之前的回答中展示了 splitmuxsink 如何拆分音频流。
使用动态管道拆分文件的解决方法:
#include <stdio.h>
#define GLIB_DISABLE_DEPRECATION_WARNINGS
#include <gst/gst.h>
static GstElement *pipeline;
static GstPad *queue_src_pad;
static GstElement *bins[2];
static GstPad *bin_pads[2];
static GstElement *filesink[2];
static size_t current_bin = 0;
static size_t current_file = 0;
static GstPadProbeReturn
pad_probe_cb (GstPad * pad, GstPadProbeInfo * info, gpointer user_data) {
gst_pad_remove_probe (pad, GST_PAD_PROBE_INFO_ID (info));
gst_pad_unlink(queue_src_pad, bin_pads[current_bin]);
gst_pad_send_event(bin_pads[current_bin], gst_event_new_eos());
gst_bin_remove(GST_BIN(pipeline), bins[current_bin]);
gst_element_set_state(bins[current_bin], GST_STATE_NULL);
current_file++;
current_bin = (current_file % 2);
{
char file_location[32];
sprintf(file_location, "recording_%ld.ogg", current_file);
g_object_set(G_OBJECT(
filesink[current_bin]), "location", file_location, NULL);
printf("now writing to %s\n", file_location);
}
gst_bin_add(GST_BIN(pipeline), bins[current_bin]);
gst_pad_link(queue_src_pad, bin_pads[current_bin]);
gst_element_sync_state_with_parent(bins[current_bin]);
return GST_PAD_PROBE_OK;
}
static gboolean timeout_cb(gpointer user_data) {
gst_pad_add_probe (queue_src_pad, GST_PAD_PROBE_TYPE_BLOCK_DOWNSTREAM,
pad_probe_cb, NULL, NULL);
return TRUE;
}
int main(int argc, char *argv[]) {
GstElement *audiosrc, *queue;
GstElement *vorbisenc[2], *oggmux[2];
GstBus *bus;
GMainLoop *loop;
gst_init (&argc, &argv);
//audiosrc = gst_element_factory_make("audiotestsrc", "audiosrc");
//g_object_set (G_OBJECT (audiosrc), "is-live", TRUE, NULL);
audiosrc = gst_element_factory_make("pulsesrc", "audiosrc");
queue = gst_element_factory_make("queue", "queue");
bins[0] = gst_bin_new ("bin0");
bins[1] = gst_bin_new ("bin1");
vorbisenc[0] = gst_element_factory_make("vorbisenc", "vorbisenc0");
vorbisenc[1] = gst_element_factory_make("vorbisenc", "vorbisenc1");
oggmux[0] = gst_element_factory_make("oggmux", "oggmux0");
oggmux[1] = gst_element_factory_make("oggmux", "oggmux1");
filesink[0] = gst_element_factory_make("filesink", "filesink0");
filesink[1] = gst_element_factory_make("filesink", "filesink1");
pipeline = gst_pipeline_new ("test-pipeline");
if (!pipeline || !audiosrc || !queue
|| !vorbisenc[0] || !oggmux[0] || !filesink[0]
|| !vorbisenc[1] || !oggmux[1] || !filesink[1]
) {
g_printerr ("not all elements could be created\n");
return -1;
}
gst_bin_add_many(
GST_BIN(bins[0]), vorbisenc[0], oggmux[0], filesink[0], NULL);
gst_bin_add_many(
GST_BIN(bins[1]), vorbisenc[1], oggmux[1], filesink[1], NULL);
gst_bin_add_many(
GST_BIN(pipeline), audiosrc, queue, bins[0], NULL);
g_assert (gst_element_link(audiosrc, queue));
g_assert (gst_element_link_many(
vorbisenc[0], oggmux[0], filesink[0], NULL));
g_assert (gst_element_link_many(
vorbisenc[1], oggmux[1], filesink[1], NULL));
{
GstPad* pad = gst_element_get_static_pad(vorbisenc[0], "sink");
gst_element_add_pad(bins[0], gst_ghost_pad_new("sink", pad));
gst_object_unref(pad);
}
{
GstPad* pad = gst_element_get_static_pad(vorbisenc[1], "sink");
gst_element_add_pad(bins[1], gst_ghost_pad_new("sink", pad));
gst_object_unref(pad);
}
bin_pads[0] = gst_element_get_static_pad(bins[0], "sink");
bin_pads[1] = gst_element_get_static_pad(bins[1], "sink");
current_bin = 0;
gst_element_link(queue, bins[current_bin]);
g_object_set (filesink[current_bin], "location", "recording_0.ogg", NULL);
queue_src_pad = gst_element_get_static_pad (queue, "src");
bus = gst_element_get_bus (pipeline);
gst_element_set_state (pipeline, GST_STATE_PLAYING);
loop = g_main_loop_new (NULL, FALSE);
g_timeout_add_seconds (3, timeout_cb, NULL);
g_main_loop_run (loop);
gst_object_unref (bus);
gst_element_set_state (pipeline, GST_STATE_NULL);
gst_object_unref (pipeline);
return 0;
}