From b5567c289f8791f0416cf5db248270b6c278b85a Mon Sep 17 00:00:00 2001 From: Fabio Manganiello Date: Sat, 24 Mar 2018 03:05:46 +0100 Subject: [PATCH] - Added Pi camera stream over TCP backend - More consistent event handling for the pushtotalk assistant - Added general GPIO module --- .../backend/assistant/google/pushtotalk.py | 47 ++++++++++------ platypush/backend/camera/__init__.py | 0 platypush/backend/camera/pi.py | 56 +++++++++++++++++++ platypush/plugins/gpio/__init__.py | 36 ++++++++++++ .../plugins/gpio/zeroborg/lib/__init__.py | 3 +- requirements.txt | 3 + 6 files changed, 127 insertions(+), 18 deletions(-) create mode 100644 platypush/backend/camera/__init__.py create mode 100644 platypush/backend/camera/pi.py diff --git a/platypush/backend/assistant/google/pushtotalk.py b/platypush/backend/assistant/google/pushtotalk.py index c10f0a12ce..750d4fd9b6 100644 --- a/platypush/backend/assistant/google/pushtotalk.py +++ b/platypush/backend/assistant/google/pushtotalk.py @@ -122,18 +122,6 @@ class AssistantGooglePushtotalkBackend(Backend): self.device_handler = device_helpers.DeviceRequestHandler(self.device_id) - def _process_event(self, event): - logging.info('Received assistant event: {}'.format(event)) - - if event.type == EventType.ON_CONVERSATION_TURN_STARTED: - self.bus.post(ConversationStartEvent()) - elif event.type == EventType.ON_CONVERSATION_TURN_FINISHED: - self.bus.post(ConversationEndEvent()) - elif event.type == EventType.ON_RECOGNIZING_SPEECH_FINISHED: - phrase = event.args['text'].lower().strip() - logging.info('Speech recognized: {}'.format(phrase)) - self.bus.post(SpeechRecognizedEvent(phrase=phrase)) - def start_conversation(self): if self.assistant: with open(self.conversation_start_fifo, 'w') as f: @@ -142,30 +130,42 @@ class AssistantGooglePushtotalkBackend(Backend): def stop_conversation(self): if self.assistant: self.conversation_stream.stop_playback() + self.bus.post(ConversationEndEvent()) def send_message(self, msg): pass + def on_conversation_start(self): + self.bus.post(ConversationStartEvent()) + + def on_conversation_end(self): + self.bus.post(ConversationEndEvent()) + + def on_speech_recognized(self, speech): + self.bus.post(SpeechRecognizedEvent(phrase=speech)) + def run(self): super().run() with SampleAssistant(self.lang, self.device_model_id, self.device_id, self.conversation_stream, self.grpc_channel, self.grpc_deadline, - self.device_handler) as self.assistant: + self.device_handler, + on_conversation_start=self.on_conversation_start, + on_conversation_end=self.on_conversation_end, + on_speech_recognized=self.on_speech_recognized) as self.assistant: while not self.should_stop(): with open(self.conversation_start_fifo, 'r') as f: for line in f: pass - logging.info('Assistant conversation triggered') + logging.info('Received conversation start event') continue_conversation = True user_request = None while continue_conversation: (user_request, continue_conversation) = self.assistant.assist() - if user_request: - self.bus.post(SpeechRecognizedEvent(phrase=user_request)) + self.on_conversation_end() class SampleAssistant(object): @@ -188,12 +188,19 @@ class SampleAssistant(object): def __init__(self, language_code, device_model_id, device_id, conversation_stream, - channel, deadline_sec, device_handler): + channel, deadline_sec, device_handler, + on_conversation_start=None, + on_conversation_end=None, + on_speech_recognized=None): self.language_code = language_code self.device_model_id = device_model_id self.device_id = device_id self.conversation_stream = conversation_stream + self.on_conversation_start = on_conversation_start + self.on_conversation_end = on_conversation_end + self.on_speech_recognized = on_speech_recognized + # Opaque blob provided in AssistResponse that, # when provided in a follow-up AssistRequest, # gives the Assistant a context marker within the current state @@ -238,6 +245,9 @@ class SampleAssistant(object): self.conversation_stream.start_recording() logging.info('Recording audio request.') + if self.on_conversation_start: + self.on_conversation_start() + def iter_assist_requests(): for c in self.gen_assist_requests(): assistant_helpers.log_assist_request_without_audio(c) @@ -291,6 +301,9 @@ class SampleAssistant(object): if user_request: self.conversation_stream.stop_playback() + if self.on_speech_recognized: + self.on_speech_recognized(user_request) + return (user_request, continue_conversation) def gen_assist_requests(self): diff --git a/platypush/backend/camera/__init__.py b/platypush/backend/camera/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/platypush/backend/camera/pi.py b/platypush/backend/camera/pi.py new file mode 100644 index 0000000000..b2a5849f55 --- /dev/null +++ b/platypush/backend/camera/pi.py @@ -0,0 +1,56 @@ +import logging +import socket +import time +import picamera + +from platypush.backend import Backend + +class CameraPiBackend(Backend): + def __init__(self, listen_port, x_resolution=640, y_resolution=480, + framerate=24, hflip=False, vflip=False, **kwargs): + super().__init__(**kwargs) + + self.listen_port = listen_port + self.x_resolution = x_resolution + self.y_resolution = y_resolution + self.framerate = framerate + self.hflip = hflip + self.vflip = vflip + + self.server_socket = socket.socket() + self.server_socket.bind(('0.0.0.0', self.listen_port)) + self.server_socket.listen(0) + + self.camera = picamera.PiCamera() + self.camera.resolution = (self.x_resolution, self.y_resolution) + self.camera.framerate = framerate + self.camera.hflip = self.hflip + self.camera.vflip = self.vflip + + logging.info('Initialized Pi camera backend') + + def send_message(self, msg): + pass + + def run(self): + super().run() + + while True: + connection = self.server_socket.accept()[0].makefile('wb') + + try: + self.camera.start_recording(connection, format='h264') + while True: + self.camera.wait_recording(60) + except ConnectionError as e: + pass + finally: + try: + self.camera.stop_recording() + connection.close() + except: + pass + + +# vim:sw=4:ts=4:et: + diff --git a/platypush/plugins/gpio/__init__.py b/platypush/plugins/gpio/__init__.py index e69de29bb2..ca184cfcbd 100644 --- a/platypush/plugins/gpio/__init__.py +++ b/platypush/plugins/gpio/__init__.py @@ -0,0 +1,36 @@ +import logging +import threading +import time + +import RPi.GPIO as gpio + +from platypush.message.response import Response +from platypush.plugins import Plugin + + +class GpioPlugin(Plugin): + def write(self, pin, val): + gpio.setmode(gpio.BCM) + gpio.setup(pin, gpio.OUT) + gpio.output(pin, val) + + return Response(output={ + 'pin': pin, + 'val': val, + 'method': 'write', + }) + + def read(self, pin, val): + gpio.setmode(gpio.BCM) + gpio.setup(pin, gpio.IN) + val = gpio.input(pin) + + return Response(output={ + 'pin': pin, + 'val': val, + 'method': 'read', + }) + + +# vim:sw=4:ts=4:et: + diff --git a/platypush/plugins/gpio/zeroborg/lib/__init__.py b/platypush/plugins/gpio/zeroborg/lib/__init__.py index 87a15aa07a..0d85faee63 100644 --- a/platypush/plugins/gpio/zeroborg/lib/__init__.py +++ b/platypush/plugins/gpio/zeroborg/lib/__init__.py @@ -646,8 +646,9 @@ Sets the current state of the LED, False for off, True for on self.RawWrite(COMMAND_SET_LED, [level]) except KeyboardInterrupt: raise - except: + except Exception as e: self.Print('Failed sending LED state!') + self.Print(e) def GetLed(self): diff --git a/requirements.txt b/requirements.txt index 3fc6066da5..f6386af605 100644 --- a/requirements.txt +++ b/requirements.txt @@ -60,3 +60,6 @@ pylast # Custom hotword detection: Snowboy snowboy +# Support for the RaspberryPi camera module +# apt install python3-picamera +