Raspberry pi 3 disable red and green lights

Disabling the lights on raspberry pi has been documented a number of times on the web.
If differs from model to model, however the following method just sets the brightness to zero and should work on all models.

Modify your /etc/rc.local too like the following

#!/bin/sh -e
#
# rc.local
#
# This script is executed at the end of each multiuser runlevel.
# Make sure that the script will "exit 0" on success or any other
# value on error.
#
# In order to enable or disable this script just change the execution
# bits.
#
# By default this script does nothing.

# Print the IP address
_IP=$(hostname -I) || true
if [ "$_IP" ]; then
  printf "My IP address is %s\n" "$_IP"
fi

echo 0 > /sys/class/leds/led1/brightness
echo 0 > /sys/class/leds/led0/brightness

exit 0

Seedbox autoupload to google drive with rclone

If you are an avid seedbox user, here is another option for automatic uploads to google drive.
There are many ways of achieving this, including a cronjob.
I like this option, as it only runs rclone when a torrent completes.

Using inotifywait to watch for file system events, we look out for primarily the ‘move’ event.
When your torrent completes, it moves the completed torrent into the completed folder.
When this event takes place, when then execute rcopy.sh

Setup rclone and scripts

mkdir ~/bin
cd ~/bin
wget https://downloads.rclone.org/rclone-v1.38-linux-amd64.zip
unzup rclone-v1.38-linux-amd64.zip
mv rclone*/* .

rscreen.sh

#!/bin/bash -x

ScSess=$(screen -ls)

if [[ $ScSess == *"No Sockets found"* ]]; then
  killall -9 rclone
  rm -rf ~/bin/rcopy.sh.lock
  screen -dmS rwait ~/bin/rwait.sh 
else
  exit
fi

rwait.sh

#!/bin/bash -x

COMPLETED_TORRENTS=~/private/deluge/completed/

while true; do
  inotifywait -r -e modify,attrib,move,close_write,create,delete,delete_self $COMPLETED_TORRENTS
  ~/bin/rcopy.sh
done

rcopy.sh

#!/bin/bash -x
#!/bin/bash -x

COMPLETED_TORRENTS=~/private/deluge/completed/

if [ -f "~/bin/rcopy.sh.lock" ]; then
  exit
else
  touch ~/bin/rcopy.sh.lock
  ~/bin/rclone --config=~/.config/rclone/rclone.conf --verbose --transfers=8 copy "$COMPLETED_TORRENTS" "google:/unsorted/"
  rm -f ~/bin/rcopy.sh.lock
fi

Permissions and PATH

Once you have setup the 3 scripts, make them executable

Permissions

chmod +x ~/bin/*

If ~/bin is not in your path, then add it and reload your shell

Edit Path

echo "PATH=~/bin:\$PATH" >> ~/.bashrc
source ~/.bashrc

Finally you run the main script in a cronjob

Finalize and run in the background

Screen session

*/1 * * * * /media/sdf1/feeditout/bin/rscreen.sh >/dev/null 2>&1

You can read more about inotifywait and rclone here
https://linux.die.net/man/1/inotifywait
https://rclone.org/


Bordgais Energy Charts

Work in progress. script available soon including gas bills.


Distributed FFMPEG using Google App Engine

I’ve developed a distributed ffmpeg google app engine solution.

The solution uses a combination of publish subscribe and redis queues for distributed communication.
https://github.com/dmzoneill/appengine-ffmpeg

Its composed of 2 services which scale horizontally (default and worker).

Coordinator (default service and human interface)

#!/usr/bin/python
from flask import Flask
import os
import sys
import glob
import string
import random
import redis
import logging
from gcloud import storage, pubsub
from google.cloud import logging

PROJECT_ID = 'transcode-159215'
TOPIC = 'projects/{}/topics/message'.format(PROJECT_ID)

logclient = logging.Client()
logger = logclient.logger( "ffmpeg-pool" )

app = Flask(__name__)
app.config[ "SECRET_KEY" ] = "test"
app.debug = True


def publish( msg ):
    pubsub_client = pubsub.Client( PROJECT_ID )
    topic = pubsub_client.topic( "ffmpeg-pool" )

    if not topic.exists():
        topic.create()

    topic.publish( msg )


@app.route( "/readlog" )
def readLog():
    msg = ""

    try: 
	for entry in logger.list_entries():
            msg = msg + entry.payload + "
" logger.delete() except: msg = "" return msg @app.route( "/cleantopic" ) def cleanTopics(): client = pubsub.Client( PROJECT_ID ) topic = client.topic( "ffmpeg-pool" ) topic.delete() topic.create() return "Cleaned topic" @app.route( "/split" ) def split(): publish( "split" ) return "File queued for spliting" @app.route( "/transcode" ) def transcode(): publish( "transcode" ) return "Job queued for transcoding" @app.route( "/combine" ) def combine(): publish( "combine" ) return "Job queued for combining" @app.route( "/" ) def home(): return "/split | /transcode | /combine | /cleantopic | /readlog" if __name__ == '__main__': app.run(host='127.0.0.1', port=8080, debug=True)

Worker

import os
from gcloud import storage, pubsub, logging
import sys
import socket
import time
import redis
import glob
from google.cloud import logging


logclient = logging.Client()
logger = logclient.logger( "ffmpeg-pool" )

PROJECT_ID = 'transcode-159215'
TOPIC = 'projects/{}/topics/message'.format(PROJECT_ID)
psclient = None
pstopic = None
pssub = None

class RedisQueue(object):
    def __init__( self, name, namespace = 'queue' ):
       self.__db = redis.Redis( host = "redis-11670.c10.us-east-1-4.ec2.cloud.redislabs.com", port=11670 )
       self.key = '%s:%s' %(namespace, name)

    def qsize( self ):
        return self.__db.llen( self.key )

    def empty( self ):
        return self.qsize() == 0

    def put( self, item ):
        self.__db.rpush( self.key, item )

    def get( self, block=True, timeout=None ):
        if block:
            item = self.__db.blpop( self.key, timeout=timeout )
        else:
            item = self.__db.lpop( self.key )

        if item:
            item = item[1]
        return item

    def get_nowait( self ):
        return self.get( False )


def download( rfile ):
    client = storage.Client( PROJECT_ID )
    bucket = client.bucket( PROJECT_ID + ".appspot.com" )
    blob = bucket.blob( rfile )

    with open( "/tmp/" + rfile, 'w' ) as f:
        blob.download_to_file( f )
        logger.log_text( "Worker: Downloaded: /tmp/" + rfile )


def upload( rfile ):
    client = storage.Client( PROJECT_ID )
    bucket = client.bucket( PROJECT_ID + ".appspot.com" )
    blob = bucket.blob( rfile )
    
    blob = bucket.blob( rfile )
    blob.upload_from_file( open( "/tmp/" + rfile ) )

    logger.log_text( "Worker: Uploaded /tmp/" + rfile )


def transcode( rfile ):
    download( rfile )
    
    os.system( "rm /tmp/output*" )
    ret = os.system( "ffmpeg -i /tmp/" + rfile + " -c:v libx265 -preset medium -crf 28 -c:a aac -b:a 128k -strict -2 /tmp/output-" + rfile + ".mkv" )    
    
    if ret:
        logger.log_text( "Worker: convert failed : " + rfile + " - " + str( ret ).encode( 'utf-8' ) )
        return

    upload( "output-" + rfile + ".mkv" ) 


def split():
    rqueue = RedisQueue( "test" )
    download( "sample.mp4" )

    os.system( "rm -f /tmp/chunk*" )
    ret = os.system( "ffmpeg -i /tmp/sample.mp4 -map 0:a -map 0:v -codec copy -f segment -segment_time 10 -segment_format matroska -v error '/tmp/chunk-%03d.orig'" )

    if ret:
        return "Failed"

    for rfile in glob.glob( "/tmp/chunk*" ):
        basename = os.path.basename( rfile )
        upload( basename )
        rqueue.put( basename )


def combine():
    client = storage.Client( PROJECT_ID )
    bucket = client.bucket( PROJECT_ID + ".appspot.com" )
    blobs = bucket.list_blobs()

    os.system( "rm /tmp/*" )
    
    names = []
    
    for blob in blobs:
        if "output" in blob.name:
            names.append( blob.name.encode( 'utf-8' ) )

    names.sort()

    with open( '/tmp/combine.lst', 'w' ) as f1:
        for name in names:
            f1.write( "file '/tmp/" + name + "'\n" )
            download( name )

    logger.log_text( "Worker: created combine list: /tmp/combine.lst" )

    ret = os.system( "ffmpeg -f concat -safe 0 -i  /tmp/combine.lst -c copy /tmp/combined.mkv" )    
    
    if ret:
        logger.log_text( "Worker: combine failed: /tmp/combine.mkv - " + str(ret).encode( 'utf-8' ) )
        return

    upload( "combined.mkv" )


def subscribe():
    global psclient, pstopic, pssub

    psclient = pubsub.Client( PROJECT_ID )
    pstopic = psclient.topic( "ffmpeg-pool" )

    if not pstopic.exists():
        pstopic.create()
    
    pssub = pstopic.subscription( "ffmpeg-worker-" + socket.gethostname() )
    
    if not pssub.exists():
        pssub.create()
    

def handlemessages():
    global psclient, pstopic, pssub
    
    rqueue = RedisQueue( 'test' )
    subscribe()

    while True:
        messages = pssub.pull( return_immediately=False, max_messages=110 )

        for ack_id, message in messages:
            payload = message.data.encode( 'utf-8' ).replace( u"\u2018", "'" ).replace( u"\u2019", "'" )
            logger.log_text( "Worker: Received message: " + payload )
 
            try:
                pssub.acknowledge( [ack_id] )
                if payload == "combine":
                    combine()
                elif payload  == "split":
                    split()
                else:
                    rfile = rqueue.get()
                    basename = os.path.basename( rfile )
                    logger.log_text( "Worker: Redis popped: " + basename )

                    while basename != "None":
                        transcode( basename )
                        rfile = rqueue.get()
                        basename = os.path.basename( rfile )
                        logger.log_text( "Worker: Redis popped: " + rfile )

            except Exception as e:
                logger.log_text( "Worker: Error: " + e.message )
                sys.stderr.write( e.message )

        subscribe()
        time.sleep( 1 )


if __name__ == '__main__':
    handlemessages()

Android Digiweb App

For the first time in 5 years i banged out an android app, and here it is Digiweb broadband usage indicator.
It fetches your broadband usage and displays it as a widget on home screen. Fairly basic, but does the job nicely.

https://play.google.com/store/apps/details?id=ie.fio.dave.digiwebusage&hl=en

digiweb