From 969dd83740a7f1ed31f164a6762ac8f82ebf8f71 Mon Sep 17 00:00:00 2001 From: =?utf8?q?David=20Test=C3=A9?= Date: Fri, 11 Mar 2016 23:53:44 +0100 Subject: [PATCH] Adding fixed configuration (parsing over .abyss). Deleted input selection buttons. --- stream_2016/abyss.py | 353 +++++++++++++++++++++++++++++++++++++++++ stream_2016/gstconf.py | 72 ++++++--- 2 files changed, 401 insertions(+), 24 deletions(-) create mode 100755 stream_2016/abyss.py diff --git a/stream_2016/abyss.py b/stream_2016/abyss.py new file mode 100755 index 0000000..bade6d6 --- /dev/null +++ b/stream_2016/abyss.py @@ -0,0 +1,353 @@ +#!/usr/bin/env python3.4 +# -*- coding: utf-8 -*- + +# This file is part of ABYSS. +# ABYSS Broadcast Your Streaming Successfully +# +# ABYSS is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# ABYSS is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with ABYSS. If not, see . +# +# Copyright (c) 2016 David Testé + +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +# TODO list: +# ---------- +# - Implement a method to switch to webcam feed if Elphel cam feed is lost +# --> Use ping by opening Telnet connexion every 2 seconds (if it fails, then switch to webcam) +# --> Has to be threaded +# - Add a checkbox to enable/disable options (storing/streaming - storing only - stream only - etc...) +# - Add a function to get the ip address of the camera automatically (see github.com/paulmilliken) +# - Create a module for the network configuration (fan/cpu, ifconfig, stream server,etc) +# --> Taken care in FAI building +# - Generate a log file during runtime. (e.g. this will let you know if the network configuration +# and the pipeline construction went well (or not)) +# - Add an input source choice for the user (camera on IP or webcam) +# - Add a time counter +# --> Has to be threaded +# - Add a 'CPU load' widget +# - Add the FSF logo (need to do some pixel art) as an application icon +# - Add the FSF logo inside the streamer use the 'textoverlay' method in ElementFactory.make() +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! +# INFO: run the following command in a terminal before launching libre-streamer to get a error log. +# GST_DEBUG=4,python:5,gnl*:5 ./libre-streamer.py | tee -a log 2>&1 +# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + +__author__ = 'David Testé' +__licence__ = 'GPLv3' +__version__ = 0.1 +__maintainer__ = 'David Testé' +__email__ = 'soonum@gnu.org' +__status__ = 'Prototype' + + +import sys +from time import time, localtime, strftime + +import gi +gi.require_version('Gtk', '3.0') +from gi.repository import Gtk +from gi.repository import Gdk +gi.require_version('Gst', '1.0') +from gi.repository import Gst +from gi.repository import GdkX11 +from gi.repository import GstVideo +from gi.repository import GObject + +import gstconf + +# Based on 2016 FSF's ELPHEL camera configuration +##PORT = ':554' +##IP_1 = '192.168.48.2' +##IP_2 = '192.168.48.3' +##IP_3 = '192.168.48.4' +##CAM1_IP1 = 'CAM_1: ' + IP_1 +##CAM2_IP2 = 'CAM_2: ' + IP_2 +##CAM3_IP3 = 'CAM_3: ' + IP_3 +##rtsp_address = None +ENTRYFIELD_TEXT = 'Please fill both entry field to stop streaming.' +CAMCHOICE_TEXT = 'Please choose a camera address.' +TESTMODE_TEXT = 'Quit testing mode to switch to streaming mode.' +formatted_date = strftime('%Y_%m_%d', localtime()) +metadata = {'speaker_name':'NC', + 'session_title':'NC', + 'organisation':'NC',} +start_time = 0 + + +class Streamgui(object): + + + def __init__(self): + + # Initialize a pipeline + self.pipel = None + + # Create the GUI + self.win = Gtk.Window() + self.win.set_title("ABYSS") + self.win.connect("delete_event", + lambda w,e: Gtk.main_quit()) +## self.win.fullscreen() + vbox = Gtk.VBox(False, 0) + vbox_labels = Gtk.VBox(False, 0) + vbox_entries = Gtk.VBox(False, 0) + vbox_streaminfo = Gtk.VBox(True, 0) + vbox_tbuttongrp = Gtk.VBox(False, 0) + hbox = Gtk.HBox(False, 30) + hbox_videoaudio = Gtk.HBox(False, 0) + hbox_time = Gtk.HBox(False, 0) + hbox_cpu = Gtk.HBox(False, 0) + + self.videowidget = Gtk.DrawingArea() + self.videowidget.set_size_request(800, 600) + + + # True stereo feed has to be implemented: + self.vumeter_l = Gtk.ProgressBar() + self.vumeter_l.set_orientation(Gtk.Orientation.VERTICAL) + self.vumeter_l.set_inverted(True) + self.vumeter_r = Gtk.ProgressBar() + self.vumeter_r.set_orientation(Gtk.Orientation.VERTICAL) + self.vumeter_r.set_inverted(True) +## Use CSS to modify the color of ProgressBar +## color = Gdk.RGBA() +## Gdk.RGBA.parse(color, 'rgb(240,0,150)') +## print ("Color: ", color) +## self.vumeter.override_background_color(Gtk.StateFlags.NORMAL, color) +## self.vumeter.override_symbolic_color('bg_color', color) +## self.vumeter.override_symbolic_color('theme_bg_color', color) + + self.baseinfo_label = Gtk.Label('Base info: ') + self.baseinfo_entry_label = Gtk.Label('LP_' + formatted_date) + self.speakerinfo_label = Gtk.Label('Speaker name: ') + self.speakerinfo_entry = Gtk.Entry() + self.sessioninfo_label = Gtk.Label('Session name: ') + self.sessioninfo_entry = Gtk.Entry() + + self.stream_button = Gtk.Button('Stream') + self.stream_button.connect('clicked', self.on_stream_clicked) + self.streamtime_label = Gtk.Label('Time elapsed ') + self.streamtime_value = Gtk.Label('00:00:00') + self.test_button = Gtk.Button('Set-up test') + self.test_button.connect('clicked', self.on_test_clicked) + + self.cpuload_label = Gtk.Label('CPU load: ') + self.cpuload_value = Gtk.Label('NC') + +## self.cam1_tbutton = Gtk.ToggleButton(None, label=CAM1_IP1) +## self.cam1_tbutton.connect('toggled', self.on_tbutton_toggled, 'cam1') +## self.cam2_tbutton = Gtk.ToggleButton(self.cam1_tbutton) +## self.cam2_tbutton.set_label(CAM2_IP2) +## self.cam2_tbutton.connect('toggled', self.on_tbutton_toggled, 'cam2') +## self.cam3_tbutton = Gtk.ToggleButton(self.cam1_tbutton) +## self.cam3_tbutton.set_label(CAM3_IP3) +## self.cam3_tbutton.connect('toggled', self.on_tbutton_toggled, 'cam3') + + self.entryfield_info = Gtk.MessageDialog(buttons=Gtk.ButtonsType.CLOSE, + text=ENTRYFIELD_TEXT,) + ##messagetype=Gtk.MessageType.WARNING, + ##Gtk.MessageType.INFO,) +## self.camchoice_info = Gtk.MessageDialog(buttons=Gtk.ButtonsType.CLOSE, +## text=CAMCHOICE_TEXT,) + self.testmode_info = Gtk.MessageDialog(buttons=Gtk.ButtonsType.CLOSE, + text=TESTMODE_TEXT,) + + hbox_videoaudio.pack_start(self.videowidget, True, True, 0) + hbox_videoaudio.pack_start(self.vumeter_l, False, False, 3) + hbox_videoaudio.pack_start(self.vumeter_r, False, False, 3) + vbox_labels.pack_start(self.baseinfo_label, True, True, 0) + vbox_labels.pack_start(self.speakerinfo_label, True, True, 0) + vbox_labels.pack_start(self.sessioninfo_label, True, True, 0) + vbox_entries.pack_start(self.baseinfo_entry_label, True, True, 0) + vbox_entries.pack_start(self.speakerinfo_entry, True, True, 0) + vbox_entries.pack_start(self.sessioninfo_entry, True, True, 0) + hbox_time.pack_start(self.streamtime_label, False, False, 0) + hbox_time.pack_start(self.streamtime_value, False, False, 0) + hbox_cpu.pack_start(self.cpuload_label, False, False, 0) + hbox_cpu.pack_start(self.cpuload_value, False, False, 0) + vbox_streaminfo.pack_start(hbox_time, False, True, 0) + vbox_streaminfo.pack_start(hbox_cpu, False, True, 0) +## vbox_tbuttongrp.pack_start(self.cam1_tbutton, False, False, 0) +## vbox_tbuttongrp.pack_start(self.cam2_tbutton, False, False, 0) +## vbox_tbuttongrp.pack_start(self.cam3_tbutton, False, False, 0) + hbox.pack_start(vbox_labels, False, False, 0) + hbox.pack_start(vbox_entries, False, False, 0) +## hbox.pack_start(vbox_tbuttongrp, False, False, 0) + hbox.pack_start(self.test_button, False, False, 0) + hbox.pack_start(self.stream_button, False , False, 0) + hbox.pack_start(vbox_streaminfo, False, False, 0) + vbox.pack_start(hbox_videoaudio, True, True, 0) + vbox.pack_start(hbox, False, True, 0) + + self.win.add(vbox) + self.win.set_position(Gtk.WindowPosition.CENTER) + self.win.show_all() + + self.xid = self.videowidget.get_property('window').get_xid() + + def create_pipeline_instance(self, feed='main'): + """Creates pipeline instance and attaches it to GUI.""" + self.pipel = gstconf.New_user_pipeline(feed,) + bus = gstconf.get_gstreamer_bus() + bus.connect('sync-message::element', self.on_sync_message) + bus.connect('message', self.on_message) + return True + + def create_backup_pipeline(self): + labelname = self.stream_button.get_label() + if labelname == 'ON AIR': + self.create_pipeline_instance(feed='backup') + self.pipel.stream_play() + + def on_sync_message(self, bus, message): + + if message.get_structure().get_name() == 'prepare-window-handle': + imagesink = message.src + imagesink.set_property('force-aspect-ratio', True) + imagesink.set_window_handle(self.videowidget.get_property('window').get_xid()) + + def on_message(self, bus, message): + # Getting the RMS audio level value: + s = Gst.Message.get_structure(message) + if message.type == Gst.MessageType.ELEMENT: + if str(Gst.Structure.get_name(s)) == 'level': + pct = self.iec_scale(s.get_value('rms')[0]) + ##print('Level value: ', pct, '%') # [DEBUG] + self.vumeter_l.set_fraction(pct) + self.vumeter_r.set_fraction(pct) + # Watching for feed loss during streaming: + t = message.type + if t == Gst.MessageType.ERROR: + err, debug = message.parse_error() + if '(651)' not in debug: + # The error is not a socket error. + self.pipel.stream_stop() + self.build_filename(streamfailed=True) + self.create_backup_pipeline() + + def on_stream_clicked(self, widget): + labelname1 = self.stream_button.get_label() + labelname2 = self.test_button.get_label() + if labelname1 == 'Stream': + if labelname2 != 'Testing ...': + if self.create_pipeline_instance(): + self.clean_entry_fields() + self.pipel.stream_play() + self.stream_button.set_label('ON AIR') + start_time = time() + else: + self.testmode_info.run() + self.testmode_info.hide() + elif labelname1 == 'ON AIR': + if self.build_filename(): + self.pipel.stream_stop() + self.stream_button.set_label('Stream') + + def on_test_clicked(self, widget): + labelname = self.test_button.get_label() + if labelname == 'Set-up test': + if self.create_pipeline_instance(feed='test'): + self.pipel.stream_play() + self.test_button.set_label('Testing ...') + elif labelname == 'Testing ...': + self.pipel.stream_stop() + self.test_button.set_label('Set-up test') + +## def on_tbutton_toggled(self, tbutton, name): +## global rtsp_address +## running_cond = (self.stream_button.get_label() == 'ON AIR' or +## self.test_button.get_label() == 'Testing ...') +## if running_cond: +## tbutton.set_active(False) +## return +## +## if tbutton.get_active(): +## if name == 'cam1': +## self.cam2_tbutton.set_active(False) +## self.cam3_tbutton.set_active(False) +## rtsp_address = IP_1 + PORT +## elif name == 'cam2': +## self.cam1_tbutton.set_active(False) +## self.cam3_tbutton.set_active(False) +## rtsp_address = IP_2 + PORT +## elif name == 'cam3': +## self.cam1_tbutton.set_active(False) +## self.cam2_tbutton.set_active(False) +## rtsp_address = IP_3 + PORT + + def build_filename(self, streamfailed=False): + """Get text in entries, check if empty and apply formatting if needed.""" + sep = '_' + base = self.baseinfo_entry_label.get_text() + speaker = self.speakerinfo_entry.get_text() + speaker = sep.join(speaker.split()) + session = self.sessioninfo_entry.get_text() + session = sep.join(session.split()) + raw_filename = base + sep + speaker + sep + session + maxlen = 70 + if speaker and session: + if len(raw_filename) >= maxlen: + offset = len(raw_filename) - maxlen + raw_filename = raw_filename[:-offset] + if streamfailed: + self.pipel.set_filenames(raw_filename, streamfailed=True) + else: + self.pipel.set_filenames(raw_filename,) +## print('RAWFILENAME: ', raw_filename, ' <--') # [DEBUG] + elif streamfailed: + self.pipel.set_filenames(raw_filename, streamfailed=True) + return True + elif not streamfailed: + self.entryfield_info.run() + self.entryfield_info.hide() + return False + + + def clean_entry_fields(self): + self.speakerinfo_entry.set_text('') + self.sessioninfo_entry.set_text('') + + def iec_scale(self, db): + """Returns the meter deflection percentage given a db value.""" + pct = 0.0 + + if db < -70.0: + pct = 0.0 + elif db < -60.0: + pct = (db + 70.0) * 0.25 + elif db < -50.0: + pct = (db + 60.0) * 0.5 + 2.5 + elif db < -40.0: + pct = (db + 50.0) * 0.75 + 7.5 + elif db < -30.0: + pct = (db + 40.0) * 1.5 + 15.0 + elif db < -20.0: + pct = (db + 30.0) * 2.0 + 30.0 + elif db < 0.0: + pct = (db + 20.0) * 2.5 + 50.0 + else: + pct = 100.0 + return pct / 100 + + ## Use threading module to refresh the time elapsed sinc the begining of the stream?? + def time_elapsed(self, widget): + if self.pipel.stream_get_state() == 'PLAYING': + pass + + +if __name__ == "__main__": + Gst.init() + Streamgui() + Gtk.main() diff --git a/stream_2016/gstconf.py b/stream_2016/gstconf.py index 5d016d1..785d0af 100755 --- a/stream_2016/gstconf.py +++ b/stream_2016/gstconf.py @@ -21,17 +21,18 @@ from os import rename from os import listdir +from os import path from time import localtime, strftime +import configparser import gi from gi.repository import Gst from gi.repository import GstVideo # Pathname has to be defined -PATHNAME = '' -AUDIO_DEFAULT = PATHNAME + 'AUDIO_DEFAULT' -RAWVIDEO_DEFAULT = PATHNAME + 'RAWVIDEO_DEFAULT' -STREAM_DEFAULT = PATHNAME + 'STREAM_DEFAULT' +AUDIO_DEFAULT = 'AUDIO_DEFAULT' +RAWVIDEO_DEFAULT = 'RAWVIDEO_DEFAULT' +STREAM_DEFAULT = 'STREAM_DEFAULT' BACKUP_SUFFIX = '_BACKUP' FAILED_SUFFIX = '_FAILED_' fail_counter = 1 @@ -41,16 +42,38 @@ STREAM_BACKUP = STREAM_DEFAULT + BACKUP_SUFFIX ERROR = '[ERROR] ' INFO = '[INFO] ' WARN = '[WARN] ' - -AUDIO_INPUT = 'alsa_input.usb-Burr-Brown_from_TI_USB_Audio_CODEC-00-CODEC.analog-stereo' -AUDIO_OUTPUT = 'alsa_output.pci-0000_00_1b.0.analog-stereo' +CONFIG = '.abyss' + +sources = {'RTSP_IP' : None, + 'AUDIO_INPUT' : None,} +sinks = {'AUDIO_OUTPUT' : None, + 'DIR': None, + 'STREAM_SERVER_IP' : None, + 'SERVER_PORT' : None, + 'PASSWORD' : None, + 'AUDIO_MOUNT' : None, + 'VIDEO_MOUNT' : None,} + +##AUDIO_INPUT = 'alsa_input.usb-Burr-Brown_from_TI_USB_Audio_CODEC-00-CODEC.analog-stereo' +##AUDIO_OUTPUT = 'alsa_output.pci-0000_00_1b.0.analog-stereo' + +config = configparser.RawConfigParser() +if path.exists(CONFIG): + config.read(CONFIG) + try: + sources = {key : config.get('sources', key) for key in sources} + sinks = {key : config.get('sinks', key) for key in sinks} + except: + print(ERROR, gettime(), 'Failed to parse config file.') +else: + print(ERROR, gettime(), '".abyss" config file doesn\'t exist.') class New_user_pipeline(): - def __init__(self, rtsp_address, feed='main'): - self.rtsp_address = 'rtsp://' + rtsp_address + def __init__(self, feed='main'): + self.rtsp_address = 'rtsp://' + sources['RTSP_IP'] self.feed = feed self.user_pipeline = self.create_gstreamer_pipeline() @@ -58,7 +81,6 @@ class New_user_pipeline(): """Create video inputs from various sources.""" self.videosrc = Gst.ElementFactory.make('rtspsrc', 'videosrc') self.videosrc.set_property('location', self.rtsp_address) -## self.videosrc.set_property('location', 'rtsp://192.168.48.2:554') self.videosrc.set_property('latency', 0) ## self.videosrc.set_property('debug', True) if self.feed == 'backup': @@ -99,7 +121,7 @@ class New_user_pipeline(): def create_audio_sources(self): """Create audio inputs from various sources.""" self.audiosrc = Gst.ElementFactory.make('pulsesrc', 'audiosrc') - self.audiosrc.set_property('device', AUDIO_INPUT) + self.audiosrc.set_property('device', sources['AUDIO_INPUT']) def create_audiolevel_plugin(self): """Create audio level plugin to feed a vu-meter.""" @@ -127,22 +149,21 @@ class New_user_pipeline(): self.screensink.set_property('sync', False) # To local audio output (headphones): self.audiosink = Gst.ElementFactory.make('pulsesink', 'audiosink') - self.audiosink.set_property('device', AUDIO_OUTPUT) + self.audiosink.set_property('device', sinks['AUDIO_OUTPUT']) self.audiosink.set_property('sync', False) # To icecast server: self.icecastsink_audio = Gst.ElementFactory.make('shout2send', 'icecastsink_audio') self.icecastsink_audio.set_property('sync', False) -## Configuration should be written on a file locally to keep safe private addresses - self.icecastsink_audio.set_property('ip', 'live2.fsf.org') - self.icecastsink_audio.set_property('port', 80) - self.icecastsink_audio.set_property('mount', 'testaudio.ogg') - self.icecastsink_audio.set_property('password', 'thahw3Wiez') + self.icecastsink_audio.set_property('ip', sinks['STREAM_SERVER_IP']) #'live2.fsf.org') + self.icecastsink_audio.set_property('port', int(sinks['SERVER_PORT'])) + self.icecastsink_audio.set_property('mount', sinks['AUDIO_MOUNT'])# 'testaudio.ogg') + self.icecastsink_audio.set_property('password', sinks['PASSWORD'])#'thahw3Wiez') self.icecastsink_stream = Gst.ElementFactory.make('shout2send', 'icecastsink_stream') self.icecastsink_stream.set_property('sync', False) - self.icecastsink_stream.set_property('ip', 'live2.fsf.org') - self.icecastsink_stream.set_property('port', 80) - self.icecastsink_stream.set_property('mount', 'teststream.webm') - self.icecastsink_stream.set_property('password', 'thahw3Wiez') + self.icecastsink_stream.set_property('ip', sinks['STREAM_SERVER_IP'])#'live2.fsf.org') + self.icecastsink_stream.set_property('port', int(sinks['SERVER_PORT']))#80) + self.icecastsink_stream.set_property('mount', sinks['VIDEO_MOUNT'])#'teststream.webm') + self.icecastsink_stream.set_property('password', sinks['PASSWORD'])#'thahw3Wiez') def create_payloader_elements(self): pass @@ -289,6 +310,7 @@ class New_user_pipeline(): self.streampipe.add(self.queuev_2) self.streampipe.add(self.queuev_4) self.streampipe.add(self.queuev_5) +## self.streampipe.add(self.queuev_6) self.streampipe.add(self.queuea_4) self.streampipe.add(self.queuea_5) self.streampipe.add(self.queuem_1) @@ -377,6 +399,8 @@ class New_user_pipeline(): self.rtpjpegdepay, self.queuev_2, self.jpegdec,) +## self.queuev_6,) +## self.queuev_6.link(self.jpegdec) self.connect_tee(self.tee_videodecoded, self.jpegdec, self.queuev_3, @@ -440,9 +464,9 @@ class New_user_pipeline(): """Sets filename and location for each sink.""" global fail_counter filename = string - audio = PATHNAME + filename + '_AUDIO' - rawvideo = PATHNAME + filename + '_RAWVIDEO' - stream = PATHNAME + filename + '_STREAM' + audio = sources['DIR'] + filename + '_AUDIO' + rawvideo = sources['DIR'] + filename + '_RAWVIDEO' + stream = sources['DIR'] + filename + '_STREAM' print('FEED STATE: ', self.feed) if self.feed == 'main': if streamfailed and filename: -- 2.25.1