From: David Testé Date: Thu, 10 Mar 2016 22:24:46 +0000 (+0100) Subject: Testing pipeline completed. X-Git-Url: https://vcs.fsf.org/?p=libre-streamer.git;a=commitdiff_plain;h=dfee4fc3b731f0544bbef4f813278936cc2ea3c7 Testing pipeline completed. Enhancing the camera choice buttons behavior. --- diff --git a/stream_2016/gstconf.py b/stream_2016/gstconf.py index fa9a63f..5d016d1 100755 --- a/stream_2016/gstconf.py +++ b/stream_2016/gstconf.py @@ -1,6 +1,8 @@ #!/usr/bin/env python3.4 +# -*- coding: utf-8 -*- # This file is part of ABYSS. +# ABYSS Broadcast Your Streaming Successfully # # ABYSS is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by @@ -41,19 +43,22 @@ INFO = '[INFO] ' WARN = '[WARN] ' AUDIO_INPUT = 'alsa_input.usb-Burr-Brown_from_TI_USB_Audio_CODEC-00-CODEC.analog-stereo' +AUDIO_OUTPUT = 'alsa_output.pci-0000_00_1b.0.analog-stereo' class New_user_pipeline(): - def __init__(self, feed='main'): + def __init__(self, rtsp_address, feed='main'): + self.rtsp_address = 'rtsp://' + rtsp_address self.feed = feed self.user_pipeline = self.create_gstreamer_pipeline() def create_video_sources(self): """Create video inputs from various sources.""" self.videosrc = Gst.ElementFactory.make('rtspsrc', 'videosrc') - self.videosrc.set_property('location', 'rtsp://192.168.48.2:554') + self.videosrc.set_property('location', self.rtsp_address) +## self.videosrc.set_property('location', 'rtsp://192.168.48.2:554') self.videosrc.set_property('latency', 0) ## self.videosrc.set_property('debug', True) if self.feed == 'backup': @@ -120,6 +125,10 @@ class New_user_pipeline(): # To local screen: self.screensink = Gst.ElementFactory.make('xvimagesink', 'screensink') self.screensink.set_property('sync', False) + # To local audio output (headphones): + self.audiosink = Gst.ElementFactory.make('pulsesink', 'audiosink') + self.audiosink.set_property('device', AUDIO_OUTPUT) + self.audiosink.set_property('sync', False) # To icecast server: self.icecastsink_audio = Gst.ElementFactory.make('shout2send', 'icecastsink_audio') self.icecastsink_audio.set_property('sync', False) @@ -245,6 +254,8 @@ class New_user_pipeline(): # Output elements: self.create_filesink() self.create_streamsink() + if self.feed == 'test': + print('TEST OK...', end='') print('created') if self.feed == 'backup': print (INFO, @@ -255,38 +266,45 @@ class New_user_pipeline(): def add_elements_to_pipeline(self): print(INFO, gettime(), 'Pipeline creation state: adding elements... ', end='') + cond = self.feed != 'test' + # Inputs elements: self.streampipe.add(self.audiosrc) # Middle elements: self.streampipe.add(self.audiolevel) - self.streampipe.add(self.vorbisenc) - self.streampipe.add(self.vp8enc) - self.streampipe.add(self.mkvmux) - self.streampipe.add(self.oggmux) - self.streampipe.add(self.webmmux) - self.streampipe.add(self.tee_rawaudio) - self.streampipe.add(self.tee_rawvideo) - self.streampipe.add(self.tee_streamaudio) - self.streampipe.add(self.tee_streamfull) - self.streampipe.add(self.queuev_2) - self.streampipe.add(self.queuev_3) - self.streampipe.add(self.queuev_4) - self.streampipe.add(self.queuev_5) self.streampipe.add(self.queuea_1) - self.streampipe.add(self.queuea_2) - self.streampipe.add(self.queuea_3) - self.streampipe.add(self.queuea_4) - self.streampipe.add(self.queuea_5) - self.streampipe.add(self.queuem_1) - self.streampipe.add(self.queuem_2) + self.streampipe.add(self.queuev_3) + if cond: + self.streampipe.add(self.vorbisenc) + self.streampipe.add(self.oggmux) + self.streampipe.add(self.queuea_2) + self.streampipe.add(self.queuea_3) + self.streampipe.add(self.vp8enc) + self.streampipe.add(self.mkvmux) + self.streampipe.add(self.webmmux) + self.streampipe.add(self.tee_rawaudio) + self.streampipe.add(self.tee_rawvideo) + self.streampipe.add(self.tee_streamaudio) + self.streampipe.add(self.tee_streamfull) + self.streampipe.add(self.queuev_2) + self.streampipe.add(self.queuev_4) + self.streampipe.add(self.queuev_5) + self.streampipe.add(self.queuea_4) + self.streampipe.add(self.queuea_5) + self.streampipe.add(self.queuem_1) + self.streampipe.add(self.queuem_2) # Outputs elements: self.streampipe.add(self.screensink) - self.streampipe.add(self.disksink_rawvideo) - self.streampipe.add(self.disksink_audio) - self.streampipe.add(self.disksink_stream) - self.streampipe.add(self.icecastsink_audio) - self.streampipe.add(self.icecastsink_stream) - if self.feed == 'main': + if cond: + self.streampipe.add(self.disksink_rawvideo) + self.streampipe.add(self.disksink_audio) + self.streampipe.add(self.disksink_stream) + self.streampipe.add(self.icecastsink_audio) + self.streampipe.add(self.icecastsink_stream) + else: + self.streampipe.add(self.audiosink) + + if self.feed == 'main' or self.feed == 'test': # Inputs elements: self.streampipe.add(self.videosrc) # Middle elements: @@ -296,6 +314,8 @@ class New_user_pipeline(): self.streampipe.add(self.capsfilter) self.streampipe.add(self.tee_videodecoded) self.streampipe.add(self.queuev_1) + if self.feed == 'test': + print ('TEST OK...', end='') elif self.feed == 'backup': # Inputs elements: self.streampipe.add(self.videosrc_backup) @@ -307,62 +327,75 @@ class New_user_pipeline(): def link_pipeline_elements(self): """Link all elements with static pads.""" print(INFO, gettime(), 'Pipeline creation state: linking elements... ', end='') + cond = self.feed != 'test' + # Audio feed: self.audiosrc.link(self.audiolevel) self.audiolevel.link(self.queuea_1) - self.queuea_1.link(self.vorbisenc) - self.connect_tee(self.tee_rawaudio, - self.vorbisenc, - self.queuea_2, - self.queuea_5,) - self.queuea_2.link(self.oggmux) - self.connect_tee(self.tee_streamaudio, - self.oggmux, - self.queuea_3, - self.queuea_4,) - self.queuea_3.link(self.disksink_audio) - self.queuea_4.link(self.icecastsink_audio) - self.queuea_5.link(self.webmmux) + if cond: + self.queuea_1.link(self.vorbisenc) + self.connect_tee(self.tee_rawaudio, + self.vorbisenc, + self.queuea_2, + self.queuea_5,) + self.queuea_2.link(self.oggmux) + self.connect_tee(self.tee_streamaudio, + self.oggmux, + self.queuea_3, + self.queuea_4,) + self.queuea_3.link(self.disksink_audio) + self.queuea_4.link(self.icecastsink_audio) + self.queuea_5.link(self.webmmux) + else: + self.queuea_1.link(self.audiosink) + # Video feed: - self.queuev_2.link(self.mkvmux) - self.mkvmux.link(self.queuev_4) - self.queuev_4.link(self.disksink_rawvideo) + if cond: + self.queuev_2.link(self.mkvmux) + self.mkvmux.link(self.queuev_4) + self.queuev_4.link(self.disksink_rawvideo) + else: + self.queuev_1.link(self.rtpjpegdepay) + self.rtpjpegdepay.link(self.jpegdec) + self.jpegdec.link(self.queuev_3) self.queuev_3.link(self.screensink) + # Stream (audio+video) feed: - self.vp8enc.link(self.queuev_5) - self.queuev_5.link(self.webmmux) - self.connect_tee(self.tee_streamfull, - self.webmmux, - self.queuem_1, - self.queuem_2,) - self.queuem_1.link(self.disksink_stream) - self.queuem_2.link(self.icecastsink_stream) - if self.feed == 'main': - # linking here RTSP feed - self.queuev_1.link(self.rtpjpegdepay) - self.connect_tee(self.tee_rawvideo, - self.rtpjpegdepay, - self.queuev_2, - self.jpegdec,) - self.connect_tee(self.tee_videodecoded, - self.jpegdec, - self.queuev_3, - self.scaling,) - # Stream (video) feed: - self.scaling.link(self.capsfilter) - self.capsfilter.link(self.vp8enc) - - elif self.feed == 'backup': - # linking here backup feed (WEBCAM) - self.videosrc_backup.link(self.capsfilter_backup) - self.connect_tee(self.tee_rawvideo, - self.capsfilter_backup, - self.queuev_2, - self.queuev_3, - output_element_3=self.vp8enc) + if cond: + self.vp8enc.link(self.queuev_5) + self.queuev_5.link(self.webmmux) + self.connect_tee(self.tee_streamfull, + self.webmmux, + self.queuem_1, + self.queuem_2,) + self.queuem_1.link(self.disksink_stream) + self.queuem_2.link(self.icecastsink_stream) + if self.feed == 'main': + # linking here RTSP feed + self.queuev_1.link(self.rtpjpegdepay) + self.connect_tee(self.tee_rawvideo, + self.rtpjpegdepay, + self.queuev_2, + self.jpegdec,) + self.connect_tee(self.tee_videodecoded, + self.jpegdec, + self.queuev_3, + self.scaling,) + # Stream (video) feed: + self.scaling.link(self.capsfilter) + self.capsfilter.link(self.vp8enc) + elif self.feed == 'backup': + # linking here backup feed (WEBCAM) + self.videosrc_backup.link(self.capsfilter_backup) + self.connect_tee(self.tee_rawvideo, + self.capsfilter_backup, + self.queuev_2, + self.queuev_3, + output_element_3=self.vp8enc) ## self.capsfilter_backup.link(self.queuev_3) - # Stream (video) feed: - print('BACKUP OK...', end='') + print('BACKUP OK...', end='') + if not cond: + print('TEST OK...', end='') print('linked') def create_gstreamer_pipeline(self): @@ -372,7 +405,7 @@ class New_user_pipeline(): # Setting-up: self.add_elements_to_pipeline() self.link_pipeline_elements() - if self.feed == 'main': + if self.feed == 'main' or self.feed == 'test': self.create_pipeline_callbacks() global bus @@ -386,16 +419,12 @@ class New_user_pipeline(): return self.streampipe def on_message(self, bus, message): - # -## print("[MESSAGE]", message.get_structure().get_name()) # [DEBUG] - # t = message.type if t == Gst.MessageType.EOS: self.streampipe.set_state(Gst.State.NULL) elif t == Gst.MessageType.ERROR: err, debug = message.parse_error() print (ERROR, '%s' % err, debug) -# self.streampipe.set_state(Gst.State.NULL) def stream_play(self): self.streampipe.set_state(Gst.State.PLAYING) diff --git a/stream_2016/libre-streamer.py b/stream_2016/libre-streamer.py index f48e230..6fae3db 100755 --- a/stream_2016/libre-streamer.py +++ b/stream_2016/libre-streamer.py @@ -1,6 +1,8 @@ #!/usr/bin/env python3.4 +# -*- coding: utf-8 -*- # This file is part of ABYSS. +# ABYSS Broadcast Your Streaming Successfully # # ABYSS is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by @@ -66,14 +68,17 @@ from gi.repository import GObject import gstconf # Based on 2016 FSF's ELPHEL camera configuration +PORT = ':554' IP_1 = '192.168.48.2' IP_2 = '192.168.48.3' IP_3 = '192.168.48.4' CAM1_IP1 = 'CAM_1: ' + IP_1 -CAM2_IP2 = 'CAM_1: ' + IP_2 -CAM3_IP3 = 'CAM_1: ' + IP_3 -ENTRYFIELD_TEXT = 'Please fill both entry\nfield to stop streaming.' - +CAM2_IP2 = 'CAM_2: ' + IP_2 +CAM3_IP3 = 'CAM_3: ' + IP_3 +rtsp_address = None +ENTRYFIELD_TEXT = 'Please fill both entry field to stop streaming.' +CAMCHOICE_TEXT = 'Please choose a camera address.' +TESTMODE_TEXT = 'Quit testing mode to switch to streaming mode.' formatted_date = strftime('%Y_%m_%d', localtime()) metadata = {'speaker_name':'NC', 'session_title':'NC', @@ -86,6 +91,9 @@ class Streamgui(object): def __init__(self): + # Initialize a pipeline + self.pipel = None + # Create the GUI self.win = Gtk.Window() self.win.set_title("ABYSS") @@ -95,15 +103,15 @@ class Streamgui(object): vbox = Gtk.VBox(False, 0) vbox_labels = Gtk.VBox(False, 0) vbox_entries = Gtk.VBox(False, 0) - vbox_streaminfo = Gtk.VBox(False, 0) - vbox_cpuinfo = Gtk.VBox(False, 0) - vbox_rbuttongrp = Gtk.VBox(False, 0) + vbox_streaminfo = Gtk.VBox(True, 0) + vbox_tbuttongrp = Gtk.VBox(False, 0) hbox = Gtk.HBox(False, 30) hbox_videoaudio = Gtk.HBox(False, 0) hbox_time = Gtk.HBox(False, 0) + hbox_cpu = Gtk.HBox(False, 0) self.videowidget = Gtk.DrawingArea() - self.videowidget.set_size_request(600, 400) + self.videowidget.set_size_request(800, 600) # True stereo feed has to be implemented: @@ -128,22 +136,33 @@ class Streamgui(object): self.sessioninfo_label = Gtk.Label('Session name: ') self.sessioninfo_entry = Gtk.Entry() - self.stream_button = Gtk.Button("Stream") - self.stream_button.connect("clicked", self.on_stream_clicked) + self.stream_button = Gtk.Button('Stream') + self.stream_button.connect('clicked', self.on_stream_clicked) self.streamtime_label = Gtk.Label('Time elapsed ') self.streamtime_value = Gtk.Label('00:00:00') + self.test_button = Gtk.Button('Set-up test') + self.test_button.connect('clicked', self.on_test_clicked) self.cpuload_label = Gtk.Label('CPU load: ') self.cpuload_value = Gtk.Label('NC') - self.cam1_rbutton = Gtk.RadioButton(None, label=CAM1_IP1) - self.cam2_rbutton = Gtk.RadioButton(self.cam1_rbutton, label=CAM2_IP2) - self.cam3_rbutton = Gtk.RadioButton(self.cam1_rbutton, label=CAM3_IP3) + self.cam1_tbutton = Gtk.ToggleButton(None, label=CAM1_IP1) + self.cam1_tbutton.connect('toggled', self.on_tbutton_toggled, 'cam1') + self.cam2_tbutton = Gtk.ToggleButton(self.cam1_tbutton) + self.cam2_tbutton.set_label(CAM2_IP2) + self.cam2_tbutton.connect('toggled', self.on_tbutton_toggled, 'cam2') + self.cam3_tbutton = Gtk.ToggleButton(self.cam1_tbutton) + self.cam3_tbutton.set_label(CAM3_IP3) + self.cam3_tbutton.connect('toggled', self.on_tbutton_toggled, 'cam3') self.entryfield_info = Gtk.MessageDialog(buttons=Gtk.ButtonsType.CLOSE, text=ENTRYFIELD_TEXT,) ##messagetype=Gtk.MessageType.WARNING, ##Gtk.MessageType.INFO,) + self.camchoice_info = Gtk.MessageDialog(buttons=Gtk.ButtonsType.CLOSE, + text=CAMCHOICE_TEXT,) + self.testmode_info = Gtk.MessageDialog(buttons=Gtk.ButtonsType.CLOSE, + text=TESTMODE_TEXT,) hbox_videoaudio.pack_start(self.videowidget, True, True, 0) hbox_videoaudio.pack_start(self.vumeter_l, False, False, 3) @@ -154,19 +173,21 @@ class Streamgui(object): vbox_entries.pack_start(self.baseinfo_entry_label, True, True, 0) vbox_entries.pack_start(self.speakerinfo_entry, True, True, 0) vbox_entries.pack_start(self.sessioninfo_entry, True, True, 0) - vbox_streaminfo.pack_start(self.stream_button, False, True, 15) hbox_time.pack_start(self.streamtime_label, False, False, 0) hbox_time.pack_start(self.streamtime_value, False, False, 0) + hbox_cpu.pack_start(self.cpuload_label, False, False, 0) + hbox_cpu.pack_start(self.cpuload_value, False, False, 0) vbox_streaminfo.pack_start(hbox_time, False, True, 0) - vbox_rbuttongrp.pack_start(self.cam1_rbutton, False, False, 0) - vbox_rbuttongrp.pack_start(self.cam2_rbutton, False, False, 0) - vbox_rbuttongrp.pack_start(self.cam3_rbutton, False, False, 0) + vbox_streaminfo.pack_start(hbox_cpu, False, True, 0) + vbox_tbuttongrp.pack_start(self.cam1_tbutton, False, False, 0) + vbox_tbuttongrp.pack_start(self.cam2_tbutton, False, False, 0) + vbox_tbuttongrp.pack_start(self.cam3_tbutton, False, False, 0) hbox.pack_start(vbox_labels, False, False, 0) hbox.pack_start(vbox_entries, False, False, 0) + hbox.pack_start(vbox_tbuttongrp, False, False, 0) + hbox.pack_start(self.test_button, False, False, 0) + hbox.pack_start(self.stream_button, False , False, 0) hbox.pack_start(vbox_streaminfo, False, False, 0) - hbox.pack_start(self.cpuload_label, False, False, 0) - hbox.pack_start(self.cpuload_value, False, False, 0) - hbox.pack_start(vbox_rbuttongrp, False, False, 0) vbox.pack_start(hbox_videoaudio, True, True, 0) vbox.pack_start(hbox, False, True, 0) @@ -176,15 +197,18 @@ class Streamgui(object): self.xid = self.videowidget.get_property('window').get_xid() - self.create_pipeline_instance() - def create_pipeline_instance(self, feed='main'): """Creates pipeline instance and attaches it to GUI.""" - self.pipel = gstconf.New_user_pipeline(feed) - bus = gstconf.get_gstreamer_bus() - bus.connect('sync-message::element', self.on_sync_message) - bus.connect('message', self.on_message) - # Try to use 'sync-message::element' instead of 'message' + if rtsp_address: + self.pipel = gstconf.New_user_pipeline(rtsp_address, feed,) + bus = gstconf.get_gstreamer_bus() + bus.connect('sync-message::element', self.on_sync_message) + bus.connect('message', self.on_message) + return True + else: + self.camchoice_info.run() + self.camchoice_info.hide() + return False def create_backup_pipeline(self): labelname = self.stream_button.get_label() @@ -219,20 +243,57 @@ class Streamgui(object): self.create_backup_pipeline() def on_stream_clicked(self, widget): - labelname = self.stream_button.get_label() - if labelname == 'Stream': - if self.pipel.feed == 'backup': - # Get back to main feed: - self.create_pipeline_instance() - self.clean_entry_fields() - self.pipel.stream_play() - self.stream_button.set_label('ON AIR') - start_time = time() - elif labelname == 'ON AIR': + labelname1 = self.stream_button.get_label() + labelname2 = self.test_button.get_label() + if labelname1 == 'Stream': + if labelname2 != 'Testing ...': + if self.create_pipeline_instance(): + self.clean_entry_fields() + self.pipel.stream_play() + self.stream_button.set_label('ON AIR') + start_time = time() + else: + self.testmode_info.run() + self.testmode_info.hide() + elif labelname1 == 'ON AIR': if self.build_filename(): self.pipel.stream_stop() self.stream_button.set_label('Stream') + def on_test_clicked(self, widget): + labelname = self.test_button.get_label() + if labelname == 'Set-up test': + if self.create_pipeline_instance(feed='test'): + self.pipel.stream_play() + self.test_button.set_label('Testing ...') + elif labelname == 'Testing ...': + self.pipel.stream_stop() + self.test_button.set_label('Set-up test') + + def on_tbutton_toggled(self, tbutton, name): + global rtsp_address + running_cond = (self.stream_button.get_label() == 'ON AIR' or + self.test_button.get_label() == 'Testing ...') + if running_cond: + tbutton.set_active(False) +## if tbutton.active(): +## tbutton.set_active(True) + return + + if tbutton.get_active(): + if name == 'cam1': + self.cam2_tbutton.set_active(False) + self.cam3_tbutton.set_active(False) + rtsp_address = IP_1 + PORT + elif name == 'cam2': + self.cam1_tbutton.set_active(False) + self.cam3_tbutton.set_active(False) + rtsp_address = IP_2 + PORT + elif name == 'cam3': + self.cam1_tbutton.set_active(False) + self.cam2_tbutton.set_active(False) + rtsp_address = IP_3 + PORT + def build_filename(self, streamfailed=False): """Get text in entries, check if empty and apply formatting if needed.""" sep = '_' @@ -251,7 +312,7 @@ class Streamgui(object): self.pipel.set_filenames(raw_filename, streamfailed=True) else: self.pipel.set_filenames(raw_filename,) - print('RAWFILENAM: ', raw_filename, ' <--') +## print('RAWFILENAME: ', raw_filename, ' <--') # [DEBUG] elif streamfailed: self.pipel.set_filenames(raw_filename, streamfailed=True) return True