forked from platypush/platypush
Merge pull request 'New streaming endpoints' (#265) from 264-streaming-endpoints into master
Reviewed-on: platypush/platypush#265
This commit is contained in:
commit
b01bf43552
108 changed files with 4136 additions and 1826 deletions
|
@ -16,12 +16,11 @@ from tornado.web import Application, FallbackHandler
|
||||||
|
|
||||||
from platypush.backend import Backend
|
from platypush.backend import Backend
|
||||||
from platypush.backend.http.app import application
|
from platypush.backend.http.app import application
|
||||||
from platypush.backend.http.app.utils import get_ws_routes
|
from platypush.backend.http.app.utils import get_streaming_routes, get_ws_routes
|
||||||
from platypush.backend.http.app.ws.events import events_redis_topic
|
from platypush.backend.http.app.ws.events import WSEventProxy
|
||||||
|
|
||||||
from platypush.bus.redis import RedisBus
|
from platypush.bus.redis import RedisBus
|
||||||
from platypush.config import Config
|
from platypush.config import Config
|
||||||
from platypush.utils import get_redis
|
|
||||||
|
|
||||||
|
|
||||||
class HttpBackend(Backend):
|
class HttpBackend(Backend):
|
||||||
|
@ -286,7 +285,7 @@ class HttpBackend(Backend):
|
||||||
|
|
||||||
def notify_web_clients(self, event):
|
def notify_web_clients(self, event):
|
||||||
"""Notify all the connected web clients (over websocket) of a new event"""
|
"""Notify all the connected web clients (over websocket) of a new event"""
|
||||||
get_redis().publish(events_redis_topic, str(event))
|
WSEventProxy.publish(event) # noqa: E1120
|
||||||
|
|
||||||
def _get_secret_key(self, _create=False):
|
def _get_secret_key(self, _create=False):
|
||||||
if _create:
|
if _create:
|
||||||
|
@ -331,7 +330,10 @@ class HttpBackend(Backend):
|
||||||
container = WSGIContainer(application)
|
container = WSGIContainer(application)
|
||||||
tornado_app = Application(
|
tornado_app = Application(
|
||||||
[
|
[
|
||||||
*[(route.path(), route) for route in get_ws_routes()],
|
*[
|
||||||
|
(route.path(), route)
|
||||||
|
for route in [*get_ws_routes(), *get_streaming_routes()]
|
||||||
|
],
|
||||||
(r'.*', FallbackHandler, {'fallback': container}),
|
(r'.*', FallbackHandler, {'fallback': container}),
|
||||||
]
|
]
|
||||||
)
|
)
|
||||||
|
@ -352,7 +354,7 @@ class HttpBackend(Backend):
|
||||||
)
|
)
|
||||||
|
|
||||||
if self.use_werkzeug_server:
|
if self.use_werkzeug_server:
|
||||||
application.config['redis_queue'] = self.bus.redis_queue
|
application.config['redis_queue'] = self.bus.redis_queue # type: ignore
|
||||||
application.run(
|
application.run(
|
||||||
host=self.bind_address,
|
host=self.bind_address,
|
||||||
port=self.port,
|
port=self.port,
|
||||||
|
|
158
platypush/backend/http/app/mixins/__init__.py
Normal file
158
platypush/backend/http/app/mixins/__init__.py
Normal file
|
@ -0,0 +1,158 @@
|
||||||
|
from contextlib import contextmanager
|
||||||
|
from dataclasses import dataclass
|
||||||
|
import json
|
||||||
|
import logging
|
||||||
|
from multiprocessing import RLock
|
||||||
|
from typing import Generator, Iterable, Optional, Set, Union
|
||||||
|
|
||||||
|
from redis import ConnectionError as RedisConnectionError
|
||||||
|
from redis.client import PubSub
|
||||||
|
|
||||||
|
from platypush.config import Config
|
||||||
|
from platypush.message import Message as AppMessage
|
||||||
|
from platypush.utils import get_redis
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
MessageType = Union[AppMessage, bytes, str, dict, list, set, tuple]
|
||||||
|
"""Types of supported messages on Redis/websocket channels."""
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class Message:
|
||||||
|
"""
|
||||||
|
A wrapper for a message received on a Redis subscription.
|
||||||
|
"""
|
||||||
|
|
||||||
|
data: bytes
|
||||||
|
"""The data received in the message."""
|
||||||
|
channel: str
|
||||||
|
"""The channel the message was received on."""
|
||||||
|
|
||||||
|
|
||||||
|
class PubSubMixin:
|
||||||
|
"""
|
||||||
|
A mixin for Tornado route handlers that support pub/sub mechanisms.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, *_, subscriptions: Optional[Iterable[str]] = None, **__):
|
||||||
|
self._pubsub: Optional[PubSub] = None
|
||||||
|
"""Pub/sub proxy."""
|
||||||
|
self._subscriptions: Set[str] = set(subscriptions or [])
|
||||||
|
"""Set of current channel subscriptions."""
|
||||||
|
self._pubsub_lock = RLock()
|
||||||
|
"""
|
||||||
|
Subscriptions lock. It ensures that the list of subscriptions is
|
||||||
|
manipulated by one thread or process at the time.
|
||||||
|
"""
|
||||||
|
|
||||||
|
self.subscribe(*self._subscriptions)
|
||||||
|
|
||||||
|
@property
|
||||||
|
@contextmanager
|
||||||
|
def pubsub(self):
|
||||||
|
"""
|
||||||
|
Pub/sub proxy lazy property with context manager.
|
||||||
|
"""
|
||||||
|
with self._pubsub_lock:
|
||||||
|
# Lazy initialization for the pub/sub object.
|
||||||
|
if self._pubsub is None:
|
||||||
|
self._pubsub = get_redis().pubsub()
|
||||||
|
|
||||||
|
# Yield the pub/sub object (context manager pattern).
|
||||||
|
yield self._pubsub
|
||||||
|
|
||||||
|
with self._pubsub_lock:
|
||||||
|
# Close and free the pub/sub object if it has no active subscriptions.
|
||||||
|
if self._pubsub is not None and len(self._subscriptions) == 0:
|
||||||
|
self._pubsub.close()
|
||||||
|
self._pubsub = None
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _serialize(data: MessageType) -> bytes:
|
||||||
|
"""
|
||||||
|
Serialize a message as bytes before delivering it to either a Redis or websocket channel.
|
||||||
|
"""
|
||||||
|
if isinstance(data, AppMessage):
|
||||||
|
data = str(data)
|
||||||
|
if isinstance(data, (list, tuple, set)):
|
||||||
|
data = list(data)
|
||||||
|
if isinstance(data, (list, dict)):
|
||||||
|
data = json.dumps(data, cls=AppMessage.Encoder)
|
||||||
|
if isinstance(data, str):
|
||||||
|
data = data.encode('utf-8')
|
||||||
|
|
||||||
|
return data
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def publish(cls, data: MessageType, *channels: str) -> None:
|
||||||
|
"""
|
||||||
|
Publish data on one or more Redis channels.
|
||||||
|
"""
|
||||||
|
for channel in channels:
|
||||||
|
get_redis().publish(channel, cls._serialize(data))
|
||||||
|
|
||||||
|
def subscribe(self, *channels: str) -> None:
|
||||||
|
"""
|
||||||
|
Subscribe to a set of Redis channels.
|
||||||
|
"""
|
||||||
|
with self.pubsub as pubsub:
|
||||||
|
for channel in channels:
|
||||||
|
pubsub.subscribe(channel)
|
||||||
|
self._subscriptions.add(channel)
|
||||||
|
|
||||||
|
def unsubscribe(self, *channels: str) -> None:
|
||||||
|
"""
|
||||||
|
Unsubscribe from a set of Redis channels.
|
||||||
|
"""
|
||||||
|
with self.pubsub as pubsub:
|
||||||
|
for channel in channels:
|
||||||
|
if channel in self._subscriptions:
|
||||||
|
pubsub.unsubscribe(channel)
|
||||||
|
self._subscriptions.remove(channel)
|
||||||
|
|
||||||
|
def listen(self) -> Generator[Message, None, None]:
|
||||||
|
"""
|
||||||
|
Listens for pub/sub messages and yields them.
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
with self.pubsub as pubsub:
|
||||||
|
for msg in pubsub.listen():
|
||||||
|
channel = msg.get('channel', b'').decode()
|
||||||
|
if msg.get('type') != 'message' or not (
|
||||||
|
channel and channel in self._subscriptions
|
||||||
|
):
|
||||||
|
continue
|
||||||
|
|
||||||
|
yield Message(data=msg.get('data', b''), channel=channel)
|
||||||
|
except (AttributeError, RedisConnectionError):
|
||||||
|
return
|
||||||
|
|
||||||
|
def _pubsub_close(self):
|
||||||
|
"""
|
||||||
|
Closes the pub/sub object.
|
||||||
|
"""
|
||||||
|
with self._pubsub_lock:
|
||||||
|
if self._pubsub is not None:
|
||||||
|
try:
|
||||||
|
self._pubsub.close()
|
||||||
|
except Exception as e:
|
||||||
|
logger.debug('Error on pubsub close: %s', e)
|
||||||
|
finally:
|
||||||
|
self._pubsub = None
|
||||||
|
|
||||||
|
def on_close(self):
|
||||||
|
"""
|
||||||
|
Extensible close handler that closes the pub/sub object.
|
||||||
|
"""
|
||||||
|
self._pubsub_close()
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def get_channel(channel: str) -> str:
|
||||||
|
"""
|
||||||
|
Utility method that returns the prefixed Redis channel for a certain subscription name.
|
||||||
|
"""
|
||||||
|
return f'_platypush/{Config.get("device_id")}/{channel}' # type: ignore
|
||||||
|
|
||||||
|
|
||||||
|
# vim:sw=4:ts=4:et:
|
|
@ -1,116 +0,0 @@
|
||||||
import json
|
|
||||||
from typing import Optional
|
|
||||||
|
|
||||||
from flask import Blueprint, request
|
|
||||||
from flask.wrappers import Response
|
|
||||||
|
|
||||||
from platypush.backend.http.app import template_folder
|
|
||||||
from platypush.backend.http.app.utils import authenticate
|
|
||||||
from platypush.context import get_plugin
|
|
||||||
from platypush.plugins.camera import CameraPlugin, Camera, StreamWriter
|
|
||||||
|
|
||||||
camera = Blueprint('camera', __name__, template_folder=template_folder)
|
|
||||||
|
|
||||||
# Declare routes list
|
|
||||||
__routes__ = [
|
|
||||||
camera,
|
|
||||||
]
|
|
||||||
|
|
||||||
|
|
||||||
def get_camera(plugin: str) -> CameraPlugin:
|
|
||||||
plugin_name = f'camera.{plugin}'
|
|
||||||
p = get_plugin(plugin_name)
|
|
||||||
assert p, f'No such plugin: {plugin_name}'
|
|
||||||
return p
|
|
||||||
|
|
||||||
|
|
||||||
def get_frame(session: Camera, timeout: Optional[float] = None) -> Optional[bytes]:
|
|
||||||
if session.stream:
|
|
||||||
with session.stream.ready:
|
|
||||||
session.stream.ready.wait(timeout=timeout)
|
|
||||||
return session.stream.frame
|
|
||||||
|
|
||||||
|
|
||||||
def feed(camera: CameraPlugin, **kwargs):
|
|
||||||
with camera.open(**kwargs) as session:
|
|
||||||
camera.start_camera(session)
|
|
||||||
while True:
|
|
||||||
frame = get_frame(session, timeout=5.0)
|
|
||||||
if frame:
|
|
||||||
yield frame
|
|
||||||
|
|
||||||
|
|
||||||
def get_args(kwargs):
|
|
||||||
kwargs = kwargs.copy()
|
|
||||||
if 't' in kwargs:
|
|
||||||
del kwargs['t']
|
|
||||||
|
|
||||||
for k, v in kwargs.items():
|
|
||||||
if k == 'resolution':
|
|
||||||
v = json.loads('[{}]'.format(v))
|
|
||||||
else:
|
|
||||||
try:
|
|
||||||
v = int(v)
|
|
||||||
except (ValueError, TypeError):
|
|
||||||
try:
|
|
||||||
v = float(v)
|
|
||||||
except (ValueError, TypeError):
|
|
||||||
pass
|
|
||||||
|
|
||||||
kwargs[k] = v
|
|
||||||
|
|
||||||
return kwargs
|
|
||||||
|
|
||||||
|
|
||||||
@camera.route('/camera/<plugin>/photo.<extension>', methods=['GET'])
|
|
||||||
@authenticate()
|
|
||||||
def get_photo(plugin, extension):
|
|
||||||
plugin = get_camera(plugin)
|
|
||||||
extension = 'jpeg' if extension in ('jpg', 'jpeg') else extension
|
|
||||||
|
|
||||||
with plugin.open(stream=True, stream_format=extension, frames_dir=None, **get_args(request.args)) as session:
|
|
||||||
plugin.start_camera(session)
|
|
||||||
frame = None
|
|
||||||
for _ in range(session.info.warmup_frames):
|
|
||||||
frame = get_frame(session)
|
|
||||||
|
|
||||||
return Response(frame, mimetype=session.stream.mimetype)
|
|
||||||
|
|
||||||
|
|
||||||
@camera.route('/camera/<plugin>/video.<extension>', methods=['GET'])
|
|
||||||
@authenticate()
|
|
||||||
def get_video(plugin, extension):
|
|
||||||
stream_class = StreamWriter.get_class_by_name(extension)
|
|
||||||
camera = get_camera(plugin)
|
|
||||||
return Response(
|
|
||||||
feed(camera, stream=True, stream_format=extension, frames_dir=None,
|
|
||||||
**get_args(request.args)
|
|
||||||
), mimetype=stream_class.mimetype
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
@camera.route('/camera/<plugin>/photo', methods=['GET'])
|
|
||||||
@authenticate()
|
|
||||||
def get_photo_default(plugin):
|
|
||||||
return get_photo(plugin, 'jpeg')
|
|
||||||
|
|
||||||
|
|
||||||
@camera.route('/camera/<plugin>/video', methods=['GET'])
|
|
||||||
@authenticate()
|
|
||||||
def get_video_default(plugin):
|
|
||||||
return get_video(plugin, 'mjpeg')
|
|
||||||
|
|
||||||
|
|
||||||
@camera.route('/camera/<plugin>/frame', methods=['GET'])
|
|
||||||
@authenticate()
|
|
||||||
def get_photo_deprecated(plugin):
|
|
||||||
return get_photo_default(plugin)
|
|
||||||
|
|
||||||
|
|
||||||
@camera.route('/camera/<plugin>/feed', methods=['GET'])
|
|
||||||
@authenticate()
|
|
||||||
def get_video_deprecated(plugin):
|
|
||||||
return get_video_default(plugin)
|
|
||||||
|
|
||||||
|
|
||||||
# vim:sw=4:ts=4:et:
|
|
|
@ -1,51 +0,0 @@
|
||||||
from flask import Blueprint
|
|
||||||
|
|
||||||
from platypush.backend.http.app import template_folder
|
|
||||||
from platypush.backend.http.app.routes.plugins.camera import get_photo, get_video
|
|
||||||
from platypush.backend.http.app.utils import authenticate
|
|
||||||
|
|
||||||
camera_ir_mlx90640 = Blueprint('camera-ir-mlx90640', __name__, template_folder=template_folder)
|
|
||||||
|
|
||||||
# Declare routes list
|
|
||||||
__routes__ = [
|
|
||||||
camera_ir_mlx90640,
|
|
||||||
]
|
|
||||||
|
|
||||||
|
|
||||||
@camera_ir_mlx90640.route('/camera/ir/mlx90640/photo.<extension>', methods=['GET'])
|
|
||||||
@authenticate()
|
|
||||||
def get_photo_route(extension):
|
|
||||||
return get_photo('ir.mlx90640', extension)
|
|
||||||
|
|
||||||
|
|
||||||
@camera_ir_mlx90640.route('/camera/ir/mlx90640/video.<extension>', methods=['GET'])
|
|
||||||
@authenticate()
|
|
||||||
def get_video_route(extension):
|
|
||||||
return get_video('ir.mlx90640', extension)
|
|
||||||
|
|
||||||
|
|
||||||
@camera_ir_mlx90640.route('/camera/ir/mlx90640/photo', methods=['GET'])
|
|
||||||
@authenticate()
|
|
||||||
def get_photo_route_default():
|
|
||||||
return get_photo_route('jpeg')
|
|
||||||
|
|
||||||
|
|
||||||
@camera_ir_mlx90640.route('/camera/ir/mlx90640/video', methods=['GET'])
|
|
||||||
@authenticate()
|
|
||||||
def get_video_route_default():
|
|
||||||
return get_video_route('mjpeg')
|
|
||||||
|
|
||||||
|
|
||||||
@camera_ir_mlx90640.route('/camera/ir/mlx90640/frame', methods=['GET'])
|
|
||||||
@authenticate()
|
|
||||||
def get_photo_route_deprecated():
|
|
||||||
return get_photo_route_default()
|
|
||||||
|
|
||||||
|
|
||||||
@camera_ir_mlx90640.route('/camera/ir/mlx90640/feed', methods=['GET'])
|
|
||||||
@authenticate()
|
|
||||||
def get_video_route_deprecated():
|
|
||||||
return get_video_route_default()
|
|
||||||
|
|
||||||
|
|
||||||
# vim:sw=4:ts=4:et:
|
|
|
@ -1,74 +0,0 @@
|
||||||
import os
|
|
||||||
import tempfile
|
|
||||||
|
|
||||||
from flask import Response, Blueprint, request
|
|
||||||
|
|
||||||
from platypush.backend.http.app import template_folder
|
|
||||||
from platypush.backend.http.app.utils import authenticate, send_request
|
|
||||||
|
|
||||||
sound = Blueprint('sound', __name__, template_folder=template_folder)
|
|
||||||
|
|
||||||
# Declare routes list
|
|
||||||
__routes__ = [
|
|
||||||
sound,
|
|
||||||
]
|
|
||||||
|
|
||||||
|
|
||||||
# Generates the .wav file header for a given set of samples and specs
|
|
||||||
# noinspection PyRedundantParentheses
|
|
||||||
def gen_header(sample_rate, sample_width, channels):
|
|
||||||
datasize = int(2000 * 1e6) # Arbitrary data size for streaming
|
|
||||||
o = bytes("RIFF", ' ascii') # (4byte) Marks file as RIFF
|
|
||||||
o += (datasize + 36).to_bytes(4, 'little') # (4byte) File size in bytes
|
|
||||||
o += bytes("WAVE", 'ascii') # (4byte) File type
|
|
||||||
o += bytes("fmt ", 'ascii') # (4byte) Format Chunk Marker
|
|
||||||
o += (16).to_bytes(4, 'little') # (4byte) Length of above format data
|
|
||||||
o += (1).to_bytes(2, 'little') # (2byte) Format type (1 - PCM)
|
|
||||||
o += channels.to_bytes(2, 'little') # (2byte)
|
|
||||||
o += sample_rate.to_bytes(4, 'little') # (4byte)
|
|
||||||
o += (sample_rate * channels * sample_width // 8).to_bytes(4, 'little') # (4byte)
|
|
||||||
o += (channels * sample_width // 8).to_bytes(2, 'little') # (2byte)
|
|
||||||
o += sample_width.to_bytes(2, 'little') # (2byte)
|
|
||||||
o += bytes("data", 'ascii') # (4byte) Data Chunk Marker
|
|
||||||
o += datasize.to_bytes(4, 'little') # (4byte) Data size in bytes
|
|
||||||
return o
|
|
||||||
|
|
||||||
|
|
||||||
def audio_feed(device, fifo, sample_rate, blocksize, latency, channels):
|
|
||||||
send_request(action='sound.stream_recording', device=device, sample_rate=sample_rate,
|
|
||||||
dtype='int16', fifo=fifo, blocksize=blocksize, latency=latency,
|
|
||||||
channels=channels)
|
|
||||||
|
|
||||||
try:
|
|
||||||
with open(fifo, 'rb') as f: # lgtm [py/path-injection]
|
|
||||||
send_header = True
|
|
||||||
|
|
||||||
while True:
|
|
||||||
audio = f.read(blocksize)
|
|
||||||
|
|
||||||
if audio:
|
|
||||||
if send_header:
|
|
||||||
audio = gen_header(sample_rate=sample_rate, sample_width=16, channels=channels) + audio
|
|
||||||
send_header = False
|
|
||||||
|
|
||||||
yield audio
|
|
||||||
finally:
|
|
||||||
send_request(action='sound.stop_recording')
|
|
||||||
|
|
||||||
|
|
||||||
@sound.route('/sound/stream', methods=['GET'])
|
|
||||||
@authenticate()
|
|
||||||
def get_sound_feed():
|
|
||||||
device = request.args.get('device')
|
|
||||||
sample_rate = request.args.get('sample_rate', 44100)
|
|
||||||
blocksize = request.args.get('blocksize', 512)
|
|
||||||
latency = request.args.get('latency', 0)
|
|
||||||
channels = request.args.get('channels', 1)
|
|
||||||
fifo = request.args.get('fifo', os.path.join(tempfile.gettempdir(), 'inputstream'))
|
|
||||||
|
|
||||||
return Response(audio_feed(device=device, fifo=fifo, sample_rate=sample_rate,
|
|
||||||
blocksize=blocksize, latency=latency, channels=channels),
|
|
||||||
mimetype='audio/x-wav;codec=pcm')
|
|
||||||
|
|
||||||
|
|
||||||
# vim:sw=4:ts=4:et:
|
|
3
platypush/backend/http/app/streaming/__init__.py
Normal file
3
platypush/backend/http/app/streaming/__init__.py
Normal file
|
@ -0,0 +1,3 @@
|
||||||
|
from ._base import StreamingRoute
|
||||||
|
|
||||||
|
__all__ = ['StreamingRoute']
|
126
platypush/backend/http/app/streaming/_base.py
Normal file
126
platypush/backend/http/app/streaming/_base.py
Normal file
|
@ -0,0 +1,126 @@
|
||||||
|
from abc import ABC, abstractmethod
|
||||||
|
from http.client import responses
|
||||||
|
import json
|
||||||
|
from logging import getLogger
|
||||||
|
from typing import Optional
|
||||||
|
from typing_extensions import override
|
||||||
|
|
||||||
|
from tornado.web import RequestHandler, stream_request_body
|
||||||
|
|
||||||
|
from platypush.backend.http.app.utils.auth import AuthStatus, get_auth_status
|
||||||
|
|
||||||
|
from ..mixins import PubSubMixin
|
||||||
|
|
||||||
|
|
||||||
|
@stream_request_body
|
||||||
|
class StreamingRoute(RequestHandler, PubSubMixin, ABC):
|
||||||
|
"""
|
||||||
|
Base class for Tornado streaming routes.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, *args, **kwargs):
|
||||||
|
super().__init__(*args, **kwargs)
|
||||||
|
self.logger = getLogger(__name__)
|
||||||
|
|
||||||
|
@override
|
||||||
|
def prepare(self):
|
||||||
|
"""
|
||||||
|
Request preparation logic. It performs user authentication if
|
||||||
|
``auth_required`` returns True, and it can be extended/overridden.
|
||||||
|
"""
|
||||||
|
if self.auth_required:
|
||||||
|
auth_status = get_auth_status(self.request)
|
||||||
|
if auth_status != AuthStatus.OK:
|
||||||
|
self.send_error(auth_status.value.code, error=auth_status.value.message)
|
||||||
|
return
|
||||||
|
|
||||||
|
self.logger.info(
|
||||||
|
'Client %s connected to %s', self.request.remote_ip, self.request.path
|
||||||
|
)
|
||||||
|
|
||||||
|
@override
|
||||||
|
def write_error(self, status_code: int, error: Optional[str] = None, **_):
|
||||||
|
"""
|
||||||
|
Make sure that errors are always returned in JSON format.
|
||||||
|
"""
|
||||||
|
self.set_header("Content-Type", "application/json")
|
||||||
|
self.finish(
|
||||||
|
json.dumps(
|
||||||
|
{"status": status_code, "error": error or responses.get(status_code)}
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
@abstractmethod
|
||||||
|
def path(cls) -> str:
|
||||||
|
"""
|
||||||
|
Path/URL pattern for this route.
|
||||||
|
"""
|
||||||
|
raise NotImplementedError()
|
||||||
|
|
||||||
|
@property
|
||||||
|
def auth_required(self) -> bool:
|
||||||
|
"""
|
||||||
|
If set to True (default) then this route will require user
|
||||||
|
authentication and return 401 if authentication fails.
|
||||||
|
"""
|
||||||
|
return True
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def _get_redis_queue(cls, *_, **__) -> Optional[str]:
|
||||||
|
"""
|
||||||
|
Returns the Redis channel associated with a given set of arguments.
|
||||||
|
|
||||||
|
This is None by default, and it should be implemented by subclasses if
|
||||||
|
required.
|
||||||
|
"""
|
||||||
|
return None
|
||||||
|
|
||||||
|
def forward_stream(self, *args, **kwargs):
|
||||||
|
"""
|
||||||
|
Utility method that does the following:
|
||||||
|
|
||||||
|
1. It listens for new messages on the subscribed Redis channels;
|
||||||
|
2. It applies a filter on the channel if :meth:`._get_redis_queue`
|
||||||
|
returns a non-null result given ``args`` and ``kwargs``;
|
||||||
|
3. It forward the frames read from the Redis channel(s) to the HTTP client;
|
||||||
|
4. It periodically invokes :meth:`._should_stop` to cleanly
|
||||||
|
terminate when the HTTP client socket is closed.
|
||||||
|
|
||||||
|
"""
|
||||||
|
redis_queue = self._get_redis_queue( # pylint: disable=assignment-from-none
|
||||||
|
*args, **kwargs
|
||||||
|
)
|
||||||
|
|
||||||
|
if redis_queue:
|
||||||
|
self.subscribe(redis_queue)
|
||||||
|
|
||||||
|
try:
|
||||||
|
for msg in self.listen():
|
||||||
|
if self._should_stop():
|
||||||
|
break
|
||||||
|
|
||||||
|
if redis_queue and msg.channel != redis_queue:
|
||||||
|
continue
|
||||||
|
|
||||||
|
frame = msg.data
|
||||||
|
if frame:
|
||||||
|
self.write(frame)
|
||||||
|
self.flush()
|
||||||
|
finally:
|
||||||
|
if redis_queue:
|
||||||
|
self.unsubscribe(redis_queue)
|
||||||
|
|
||||||
|
def _should_stop(self):
|
||||||
|
"""
|
||||||
|
Utility method used by :meth:`._forward_stream` to automatically
|
||||||
|
terminate when the client connection is closed (it can be overridden by
|
||||||
|
the subclasses).
|
||||||
|
"""
|
||||||
|
if self._finished:
|
||||||
|
return True
|
||||||
|
|
||||||
|
if self.request.connection and getattr(self.request.connection, 'stream', None):
|
||||||
|
return self.request.connection.stream.closed() # type: ignore
|
||||||
|
|
||||||
|
return True
|
138
platypush/backend/http/app/streaming/plugins/camera.py
Normal file
138
platypush/backend/http/app/streaming/plugins/camera.py
Normal file
|
@ -0,0 +1,138 @@
|
||||||
|
from enum import Enum
|
||||||
|
import json
|
||||||
|
from typing import Optional
|
||||||
|
from typing_extensions import override
|
||||||
|
|
||||||
|
from tornado.web import stream_request_body
|
||||||
|
from platypush.context import get_plugin
|
||||||
|
|
||||||
|
from platypush.config import Config
|
||||||
|
from platypush.plugins.camera import Camera, CameraPlugin, StreamWriter
|
||||||
|
from platypush.utils import get_plugin_name_by_class
|
||||||
|
|
||||||
|
from .. import StreamingRoute
|
||||||
|
|
||||||
|
|
||||||
|
class RequestType(Enum):
|
||||||
|
"""
|
||||||
|
Models the camera route request type (video or photo)
|
||||||
|
"""
|
||||||
|
|
||||||
|
UNKNOWN = ''
|
||||||
|
PHOTO = 'photo'
|
||||||
|
VIDEO = 'video'
|
||||||
|
|
||||||
|
|
||||||
|
@stream_request_body
|
||||||
|
class CameraRoute(StreamingRoute):
|
||||||
|
"""
|
||||||
|
Route for camera streams.
|
||||||
|
"""
|
||||||
|
|
||||||
|
_redis_queue_prefix = f'_platypush/{Config.get("device_id") or ""}/camera'
|
||||||
|
|
||||||
|
def __init__(self, *args, **kwargs):
|
||||||
|
super().__init__(*args, **kwargs)
|
||||||
|
self._camera: Optional[Camera] = None
|
||||||
|
self._request_type = RequestType.UNKNOWN
|
||||||
|
self._extension: str = ''
|
||||||
|
|
||||||
|
@override
|
||||||
|
@classmethod
|
||||||
|
def path(cls) -> str:
|
||||||
|
return r"/camera/([a-zA-Z0-9_./]+)/([a-zA-Z0-9_]+)\.?([a-zA-Z0-9_]+)?"
|
||||||
|
|
||||||
|
def _get_camera(self, plugin: str) -> CameraPlugin:
|
||||||
|
plugin_name = f'camera.{plugin.replace("/", ".")}'
|
||||||
|
p = get_plugin(plugin_name)
|
||||||
|
assert p, f'No such plugin: {plugin_name}'
|
||||||
|
return p
|
||||||
|
|
||||||
|
def _get_frame(
|
||||||
|
self, camera: Camera, timeout: Optional[float] = None
|
||||||
|
) -> Optional[bytes]:
|
||||||
|
if camera.stream:
|
||||||
|
with camera.stream.ready:
|
||||||
|
camera.stream.ready.wait(timeout=timeout)
|
||||||
|
return camera.stream.frame
|
||||||
|
|
||||||
|
return None
|
||||||
|
|
||||||
|
def send_frame(self, camera: Camera):
|
||||||
|
frame = None
|
||||||
|
for _ in range(camera.info.warmup_frames):
|
||||||
|
frame = self._get_frame(camera)
|
||||||
|
|
||||||
|
if frame:
|
||||||
|
self.write(frame)
|
||||||
|
self.flush()
|
||||||
|
|
||||||
|
def _set_request_type_and_extension(self, route: str, extension: str):
|
||||||
|
if route in {'photo', 'frame'}:
|
||||||
|
self._request_type = RequestType.PHOTO
|
||||||
|
if extension == 'jpg':
|
||||||
|
extension = 'jpeg'
|
||||||
|
self._extension = extension or 'jpeg'
|
||||||
|
elif route in {'video', 'feed'}:
|
||||||
|
self._request_type = RequestType.VIDEO
|
||||||
|
self._extension = extension or 'mjpeg'
|
||||||
|
|
||||||
|
def _get_args(self, kwargs: dict):
|
||||||
|
kwargs = {k: v[0].decode() for k, v in kwargs.items() if k != 't'}
|
||||||
|
for k, v in kwargs.items():
|
||||||
|
if k == 'resolution':
|
||||||
|
v = json.loads(f'[{v}]')
|
||||||
|
else:
|
||||||
|
try:
|
||||||
|
v = int(v)
|
||||||
|
except (ValueError, TypeError):
|
||||||
|
try:
|
||||||
|
v = float(v)
|
||||||
|
except (ValueError, TypeError):
|
||||||
|
pass
|
||||||
|
|
||||||
|
kwargs[k] = v
|
||||||
|
|
||||||
|
return kwargs
|
||||||
|
|
||||||
|
@override
|
||||||
|
@classmethod
|
||||||
|
def _get_redis_queue(cls, camera: CameraPlugin, *_, **__) -> str:
|
||||||
|
plugin_name = get_plugin_name_by_class(camera.__class__)
|
||||||
|
assert plugin_name, f'No such plugin: {plugin_name}'
|
||||||
|
return '/'.join(
|
||||||
|
[
|
||||||
|
cls._redis_queue_prefix,
|
||||||
|
plugin_name,
|
||||||
|
*map(
|
||||||
|
str,
|
||||||
|
[camera.camera_info.device] if camera.camera_info.device else [],
|
||||||
|
),
|
||||||
|
]
|
||||||
|
)
|
||||||
|
|
||||||
|
def get(self, plugin: str, route: str, extension: str = '') -> None:
|
||||||
|
self._set_request_type_and_extension(route, extension)
|
||||||
|
if not (self._request_type and self._extension):
|
||||||
|
self.write_error(404, 'Not Found')
|
||||||
|
return
|
||||||
|
|
||||||
|
stream_class = StreamWriter.get_class_by_name(self._extension)
|
||||||
|
camera = self._get_camera(plugin)
|
||||||
|
redis_queue = self._get_redis_queue(camera)
|
||||||
|
self.set_header('Content-Type', stream_class.mimetype)
|
||||||
|
|
||||||
|
with camera.open(
|
||||||
|
stream=True,
|
||||||
|
stream_format=self._extension,
|
||||||
|
frames_dir=None,
|
||||||
|
redis_queue=redis_queue,
|
||||||
|
**self._get_args(self.request.arguments),
|
||||||
|
) as session:
|
||||||
|
camera.start_camera(session)
|
||||||
|
if self._request_type == RequestType.PHOTO:
|
||||||
|
self.send_frame(session)
|
||||||
|
elif self._request_type == RequestType.VIDEO:
|
||||||
|
self.forward_stream(camera)
|
||||||
|
|
||||||
|
self.finish()
|
97
platypush/backend/http/app/streaming/plugins/sound.py
Normal file
97
platypush/backend/http/app/streaming/plugins/sound.py
Normal file
|
@ -0,0 +1,97 @@
|
||||||
|
from contextlib import contextmanager
|
||||||
|
import json
|
||||||
|
from typing import Generator, Optional
|
||||||
|
from typing_extensions import override
|
||||||
|
|
||||||
|
from tornado.web import stream_request_body
|
||||||
|
|
||||||
|
from platypush.backend.http.app.utils import send_request
|
||||||
|
from platypush.config import Config
|
||||||
|
|
||||||
|
from .. import StreamingRoute
|
||||||
|
|
||||||
|
|
||||||
|
@stream_request_body
|
||||||
|
class SoundRoute(StreamingRoute):
|
||||||
|
"""
|
||||||
|
Route for audio streams.
|
||||||
|
"""
|
||||||
|
|
||||||
|
_redis_queue_prefix = f'_platypush/{Config.get("device_id") or ""}/sound'
|
||||||
|
|
||||||
|
def __init__(self, *args, **kwargs):
|
||||||
|
super().__init__(*args, **kwargs)
|
||||||
|
self._audio_headers_written: bool = False
|
||||||
|
"""Send the audio file headers before we send the first audio frame."""
|
||||||
|
|
||||||
|
@override
|
||||||
|
@classmethod
|
||||||
|
def path(cls) -> str:
|
||||||
|
return r"/sound/stream\.?([a-zA-Z0-9_]+)?"
|
||||||
|
|
||||||
|
@contextmanager
|
||||||
|
def _audio_stream(self, **kwargs) -> Generator[None, None, None]:
|
||||||
|
response = send_request(
|
||||||
|
'sound.record',
|
||||||
|
dtype='int16',
|
||||||
|
**kwargs,
|
||||||
|
)
|
||||||
|
|
||||||
|
assert response and not response.is_error(), (
|
||||||
|
'Streaming error: ' + str(response.errors) if response else '(unknown)'
|
||||||
|
)
|
||||||
|
|
||||||
|
yield
|
||||||
|
send_request('sound.stop_recording')
|
||||||
|
|
||||||
|
@override
|
||||||
|
@classmethod
|
||||||
|
def _get_redis_queue(cls, *_, device: Optional[str] = None, **__) -> str:
|
||||||
|
return '/'.join([cls._redis_queue_prefix, *([device] if device else [])])
|
||||||
|
|
||||||
|
def _get_args(self, **kwargs):
|
||||||
|
kwargs.update({k: v[0].decode() for k, v in self.request.arguments.items()})
|
||||||
|
device = kwargs.get('device')
|
||||||
|
return {
|
||||||
|
'device': device,
|
||||||
|
'sample_rate': int(kwargs.get('sample_rate', 44100)),
|
||||||
|
'blocksize': int(kwargs.get('blocksize', 512)),
|
||||||
|
'latency': float(kwargs.get('latency', 0)),
|
||||||
|
'channels': int(kwargs.get('channels', 1)),
|
||||||
|
'format': kwargs.get('format', 'wav'),
|
||||||
|
'redis_queue': kwargs.get('redis_queue', self._get_redis_queue(device)),
|
||||||
|
}
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _content_type_by_extension(extension: str) -> str:
|
||||||
|
if extension == 'mp3':
|
||||||
|
return 'audio/mpeg'
|
||||||
|
if extension == 'ogg':
|
||||||
|
return 'audio/ogg'
|
||||||
|
if extension == 'wav':
|
||||||
|
return 'audio/wav;codec=pcm'
|
||||||
|
if extension == 'flac':
|
||||||
|
return 'audio/flac'
|
||||||
|
if extension == 'aac':
|
||||||
|
return 'audio/aac'
|
||||||
|
return 'application/octet-stream'
|
||||||
|
|
||||||
|
def get(self, extension: Optional[str] = None) -> None:
|
||||||
|
ext = extension or 'wav'
|
||||||
|
args = self._get_args(format=ext)
|
||||||
|
|
||||||
|
try:
|
||||||
|
with self._audio_stream(**args):
|
||||||
|
self.set_header('Content-Type', self._content_type_by_extension(ext))
|
||||||
|
self.forward_stream(**args)
|
||||||
|
|
||||||
|
self.finish()
|
||||||
|
except AssertionError as e:
|
||||||
|
self.set_header("Content-Type", "application/json")
|
||||||
|
self.set_status(400, str(e))
|
||||||
|
self.finish(json.dumps({"error": str(e)}))
|
||||||
|
except Exception as e:
|
||||||
|
self.set_header("Content-Type", "application/json")
|
||||||
|
self.logger.exception(e)
|
||||||
|
self.set_status(500, str(e))
|
||||||
|
self.finish(json.dumps({"error": str(e)}))
|
|
@ -13,6 +13,7 @@ from .routes import (
|
||||||
get_remote_base_url,
|
get_remote_base_url,
|
||||||
get_routes,
|
get_routes,
|
||||||
)
|
)
|
||||||
|
from .streaming import get_streaming_routes
|
||||||
from .ws import get_ws_routes
|
from .ws import get_ws_routes
|
||||||
|
|
||||||
__all__ = [
|
__all__ = [
|
||||||
|
@ -27,6 +28,7 @@ __all__ = [
|
||||||
'get_message_response',
|
'get_message_response',
|
||||||
'get_remote_base_url',
|
'get_remote_base_url',
|
||||||
'get_routes',
|
'get_routes',
|
||||||
|
'get_streaming_routes',
|
||||||
'get_ws_routes',
|
'get_ws_routes',
|
||||||
'logger',
|
'logger',
|
||||||
'send_message',
|
'send_message',
|
||||||
|
|
42
platypush/backend/http/app/utils/streaming.py
Normal file
42
platypush/backend/http/app/utils/streaming.py
Normal file
|
@ -0,0 +1,42 @@
|
||||||
|
import logging
|
||||||
|
import os
|
||||||
|
import importlib
|
||||||
|
import inspect
|
||||||
|
from typing import List, Type
|
||||||
|
|
||||||
|
import pkgutil
|
||||||
|
|
||||||
|
from ..streaming import StreamingRoute
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
def get_streaming_routes() -> List[Type[StreamingRoute]]:
|
||||||
|
"""
|
||||||
|
Scans for streaming routes.
|
||||||
|
"""
|
||||||
|
from platypush.backend.http import HttpBackend
|
||||||
|
|
||||||
|
base_pkg = '.'.join([HttpBackend.__module__, 'app', 'streaming'])
|
||||||
|
base_dir = os.path.join(
|
||||||
|
os.path.dirname(inspect.getfile(HttpBackend)), 'app', 'streaming'
|
||||||
|
)
|
||||||
|
routes = []
|
||||||
|
|
||||||
|
for _, mod_name, _ in pkgutil.walk_packages([base_dir], prefix=base_pkg + '.'):
|
||||||
|
try:
|
||||||
|
module = importlib.import_module(mod_name)
|
||||||
|
except Exception as e:
|
||||||
|
logger.warning('Could not import module %s', mod_name)
|
||||||
|
logger.exception(e)
|
||||||
|
continue
|
||||||
|
|
||||||
|
for _, obj in inspect.getmembers(module):
|
||||||
|
if (
|
||||||
|
inspect.isclass(obj)
|
||||||
|
and not inspect.isabstract(obj)
|
||||||
|
and issubclass(obj, StreamingRoute)
|
||||||
|
):
|
||||||
|
routes.append(obj)
|
||||||
|
|
||||||
|
return routes
|
|
@ -1,3 +1,3 @@
|
||||||
from ._base import WSRoute, logger, pubsub_redis_topic
|
from ._base import WSRoute, logger
|
||||||
|
|
||||||
__all__ = ['WSRoute', 'logger', 'pubsub_redis_topic']
|
__all__ = ['WSRoute', 'logger']
|
||||||
|
|
|
@ -1,37 +1,28 @@
|
||||||
from abc import ABC, abstractclassmethod
|
from abc import ABC, abstractmethod
|
||||||
import json
|
|
||||||
from logging import getLogger
|
from logging import getLogger
|
||||||
from threading import RLock, Thread
|
from threading import Thread
|
||||||
from typing import Any, Generator, Iterable, Optional, Union
|
|
||||||
from typing_extensions import override
|
from typing_extensions import override
|
||||||
|
|
||||||
from redis import ConnectionError as RedisConnectionError
|
|
||||||
from tornado.ioloop import IOLoop
|
from tornado.ioloop import IOLoop
|
||||||
from tornado.websocket import WebSocketHandler
|
from tornado.websocket import WebSocketHandler
|
||||||
|
|
||||||
from platypush.backend.http.app.utils.auth import AuthStatus, get_auth_status
|
from platypush.backend.http.app.utils.auth import AuthStatus, get_auth_status
|
||||||
from platypush.config import Config
|
|
||||||
from platypush.message import Message
|
from ..mixins import MessageType, PubSubMixin
|
||||||
from platypush.utils import get_redis
|
|
||||||
|
|
||||||
logger = getLogger(__name__)
|
logger = getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
def pubsub_redis_topic(topic: str) -> str:
|
class WSRoute(WebSocketHandler, Thread, PubSubMixin, ABC):
|
||||||
return f'_platypush/{Config.get("device_id")}/{topic}' # type: ignore
|
|
||||||
|
|
||||||
|
|
||||||
class WSRoute(WebSocketHandler, Thread, ABC):
|
|
||||||
"""
|
"""
|
||||||
Base class for Tornado websocket endpoints.
|
Base class for Tornado websocket endpoints.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, *args, redis_topics: Optional[Iterable[str]] = None, **kwargs):
|
def __init__(self, *args, **kwargs):
|
||||||
super().__init__(*args, **kwargs)
|
WebSocketHandler.__init__(self, *args)
|
||||||
self._redis_topics = set(redis_topics or [])
|
PubSubMixin.__init__(self, **kwargs)
|
||||||
self._sub = get_redis().pubsub()
|
Thread.__init__(self)
|
||||||
self._io_loop = IOLoop.current()
|
self._io_loop = IOLoop.current()
|
||||||
self._sub_lock = RLock()
|
|
||||||
|
|
||||||
@override
|
@override
|
||||||
def open(self, *_, **__):
|
def open(self, *_, **__):
|
||||||
|
@ -51,10 +42,11 @@ class WSRoute(WebSocketHandler, Thread, ABC):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
@override
|
@override
|
||||||
def on_message(self, message): # type: ignore
|
def on_message(self, message):
|
||||||
pass
|
return message
|
||||||
|
|
||||||
@abstractclassmethod
|
@classmethod
|
||||||
|
@abstractmethod
|
||||||
def app_name(cls) -> str:
|
def app_name(cls) -> str:
|
||||||
raise NotImplementedError()
|
raise NotImplementedError()
|
||||||
|
|
||||||
|
@ -66,55 +58,25 @@ class WSRoute(WebSocketHandler, Thread, ABC):
|
||||||
def auth_required(self):
|
def auth_required(self):
|
||||||
return True
|
return True
|
||||||
|
|
||||||
def subscribe(self, *topics: str) -> None:
|
def send(self, msg: MessageType) -> None:
|
||||||
with self._sub_lock:
|
|
||||||
for topic in topics:
|
|
||||||
self._sub.subscribe(topic)
|
|
||||||
self._redis_topics.add(topic)
|
|
||||||
|
|
||||||
def unsubscribe(self, *topics: str) -> None:
|
|
||||||
with self._sub_lock:
|
|
||||||
for topic in topics:
|
|
||||||
if topic in self._redis_topics:
|
|
||||||
self._sub.unsubscribe(topic)
|
|
||||||
self._redis_topics.remove(topic)
|
|
||||||
|
|
||||||
def listen(self) -> Generator[Any, None, None]:
|
|
||||||
try:
|
|
||||||
for msg in self._sub.listen():
|
|
||||||
if (
|
|
||||||
msg.get('type') != 'message'
|
|
||||||
and msg.get('channel').decode() not in self._redis_topics
|
|
||||||
):
|
|
||||||
continue
|
|
||||||
|
|
||||||
yield msg.get('data')
|
|
||||||
except (AttributeError, RedisConnectionError):
|
|
||||||
return
|
|
||||||
|
|
||||||
def send(self, msg: Union[str, bytes, dict, list, tuple, set]) -> None:
|
|
||||||
if isinstance(msg, (list, tuple, set)):
|
|
||||||
msg = list(msg)
|
|
||||||
if isinstance(msg, (list, dict)):
|
|
||||||
msg = json.dumps(msg, cls=Message.Encoder)
|
|
||||||
|
|
||||||
self._io_loop.asyncio_loop.call_soon_threadsafe( # type: ignore
|
self._io_loop.asyncio_loop.call_soon_threadsafe( # type: ignore
|
||||||
self.write_message, msg
|
self.write_message, self._serialize(msg)
|
||||||
)
|
)
|
||||||
|
|
||||||
@override
|
@override
|
||||||
def run(self) -> None:
|
def run(self) -> None:
|
||||||
super().run()
|
super().run()
|
||||||
for topic in self._redis_topics:
|
self.subscribe(*self._subscriptions)
|
||||||
self._sub.subscribe(topic)
|
|
||||||
|
|
||||||
@override
|
@override
|
||||||
def on_close(self):
|
def on_close(self):
|
||||||
topics = self._redis_topics.copy()
|
super().on_close()
|
||||||
for topic in topics:
|
for channel in self._subscriptions.copy():
|
||||||
self.unsubscribe(topic)
|
self.unsubscribe(channel)
|
||||||
|
|
||||||
|
if self._pubsub:
|
||||||
|
self._pubsub.close()
|
||||||
|
|
||||||
self._sub.close()
|
|
||||||
logger.info(
|
logger.info(
|
||||||
'Client %s disconnected from %s, reason=%s, message=%s',
|
'Client %s disconnected from %s, reason=%s, message=%s',
|
||||||
self.request.remote_ip,
|
self.request.remote_ip,
|
||||||
|
|
|
@ -1,12 +1,11 @@
|
||||||
from typing_extensions import override
|
from typing_extensions import override
|
||||||
|
|
||||||
|
from platypush.backend.http.app.mixins import MessageType
|
||||||
from platypush.message.event import Event
|
from platypush.message.event import Event
|
||||||
|
|
||||||
from . import WSRoute, logger, pubsub_redis_topic
|
from . import WSRoute, logger
|
||||||
from ..utils import send_message
|
from ..utils import send_message
|
||||||
|
|
||||||
events_redis_topic = pubsub_redis_topic('events')
|
|
||||||
|
|
||||||
|
|
||||||
class WSEventProxy(WSRoute):
|
class WSEventProxy(WSRoute):
|
||||||
"""
|
"""
|
||||||
|
@ -14,14 +13,23 @@ class WSEventProxy(WSRoute):
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, *args, **kwargs):
|
def __init__(self, *args, **kwargs):
|
||||||
super().__init__(*args, **kwargs)
|
super().__init__(*args, subscriptions=[self.events_channel], **kwargs)
|
||||||
self.subscribe(events_redis_topic)
|
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
@override
|
@override
|
||||||
def app_name(cls) -> str:
|
def app_name(cls) -> str:
|
||||||
return 'events'
|
return 'events'
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
@property
|
||||||
|
def events_channel(cls) -> str:
|
||||||
|
return cls.get_channel('events')
|
||||||
|
|
||||||
|
@override
|
||||||
|
@classmethod
|
||||||
|
def publish(cls, data: MessageType, *_) -> None:
|
||||||
|
super().publish(data, cls.events_channel)
|
||||||
|
|
||||||
@override
|
@override
|
||||||
def on_message(self, message):
|
def on_message(self, message):
|
||||||
try:
|
try:
|
||||||
|
@ -38,9 +46,9 @@ class WSEventProxy(WSRoute):
|
||||||
def run(self) -> None:
|
def run(self) -> None:
|
||||||
for msg in self.listen():
|
for msg in self.listen():
|
||||||
try:
|
try:
|
||||||
evt = Event.build(msg)
|
evt = Event.build(msg.data)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.warning('Error parsing event: %s: %s', msg, e)
|
logger.warning('Error parsing event: %s: %s', msg, e)
|
||||||
continue
|
continue
|
||||||
|
|
||||||
self.send(str(evt))
|
self.send(evt)
|
||||||
|
|
|
@ -1 +1 @@
|
||||||
<!doctype html><html lang="en"><head><meta charset="utf-8"><meta http-equiv="X-UA-Compatible" content="IE=edge"><meta name="viewport" content="width=device-width,initial-scale=1"><!--[if IE]><link rel="icon" href="/favicon.ico"><![endif]--><link rel="stylesheet" href="/fonts/poppins.css"><title>platypush</title><script defer="defer" type="module" src="/static/js/chunk-vendors.0f6060b6.js"></script><script defer="defer" type="module" src="/static/js/app.8e3d4fb1.js"></script><link href="/static/css/chunk-vendors.0fcd36f0.css" rel="stylesheet"><link href="/static/css/app.0a781c41.css" rel="stylesheet"><link rel="icon" type="image/svg+xml" href="/img/icons/favicon.svg"><link rel="icon" type="image/png" sizes="32x32" href="/img/icons/favicon-32x32.png"><link rel="icon" type="image/png" sizes="16x16" href="/img/icons/favicon-16x16.png"><link rel="manifest" href="/manifest.json"><meta name="theme-color" content="#ffffff"><meta name="apple-mobile-web-app-capable" content="no"><meta name="apple-mobile-web-app-status-bar-style" content="default"><meta name="apple-mobile-web-app-title" content="Platypush"><link rel="apple-touch-icon" href="/img/icons/apple-touch-icon-152x152.png"><link rel="mask-icon" href="/img/icons/safari-pinned-tab.svg" color="#ffffff"><meta name="msapplication-TileImage" content="/img/icons/msapplication-icon-144x144.png"><meta name="msapplication-TileColor" content="#000000"><script defer="defer" src="/static/js/chunk-vendors-legacy.037e71b7.js" nomodule></script><script defer="defer" src="/static/js/app-legacy.523328cf.js" nomodule></script></head><body><noscript><strong>We're sorry but platypush doesn't work properly without JavaScript enabled. Please enable it to continue.</strong></noscript><div id="app"></div></body></html>
|
<!doctype html><html lang="en"><head><meta charset="utf-8"><meta http-equiv="X-UA-Compatible" content="IE=edge"><meta name="viewport" content="width=device-width,initial-scale=1"><!--[if IE]><link rel="icon" href="/favicon.ico"><![endif]--><link rel="stylesheet" href="/fonts/poppins.css"><title>platypush</title><script defer="defer" type="module" src="/static/js/chunk-vendors.0f6060b6.js"></script><script defer="defer" type="module" src="/static/js/app.a0889d9d.js"></script><link href="/static/css/chunk-vendors.0fcd36f0.css" rel="stylesheet"><link href="/static/css/app.0a781c41.css" rel="stylesheet"><link rel="icon" type="image/svg+xml" href="/img/icons/favicon.svg"><link rel="icon" type="image/png" sizes="32x32" href="/img/icons/favicon-32x32.png"><link rel="icon" type="image/png" sizes="16x16" href="/img/icons/favicon-16x16.png"><link rel="manifest" href="/manifest.json"><meta name="theme-color" content="#ffffff"><meta name="apple-mobile-web-app-capable" content="no"><meta name="apple-mobile-web-app-status-bar-style" content="default"><meta name="apple-mobile-web-app-title" content="Platypush"><link rel="apple-touch-icon" href="/img/icons/apple-touch-icon-152x152.png"><link rel="mask-icon" href="/img/icons/safari-pinned-tab.svg" color="#ffffff"><meta name="msapplication-TileImage" content="/img/icons/msapplication-icon-144x144.png"><meta name="msapplication-TileColor" content="#000000"><script defer="defer" src="/static/js/chunk-vendors-legacy.037e71b7.js" nomodule></script><script defer="defer" src="/static/js/app-legacy.83532d44.js" nomodule></script></head><body><noscript><strong>We're sorry but platypush doesn't work properly without JavaScript enabled. Please enable it to continue.</strong></noscript><div id="app"></div></body></html>
|
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
1
platypush/backend/http/webapp/dist/static/css/4118.25e7d5ff.css
vendored
Normal file
1
platypush/backend/http/webapp/dist/static/css/4118.25e7d5ff.css
vendored
Normal file
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
2
platypush/backend/http/webapp/dist/static/js/4118-legacy.fdfd71bc.js
vendored
Normal file
2
platypush/backend/http/webapp/dist/static/js/4118-legacy.fdfd71bc.js
vendored
Normal file
|
@ -0,0 +1,2 @@
|
||||||
|
"use strict";(self["webpackChunkplatypush"]=self["webpackChunkplatypush"]||[]).push([[4118],{4118:function(n,t,r){r.r(t),r.d(t,{default:function(){return b}});var e=r(6252),o=function(n){return(0,e.dD)("data-v-911495ca"),n=n(),(0,e.Cn)(),n},a={class:"sound"},u={class:"sound-container"},i={key:0,autoplay:"",preload:"none",ref:"player"},s=["src"],c=(0,e.Uk)(" Your browser does not support audio elements "),d={class:"controls"},p=o((function(){return(0,e._)("i",{class:"fa fa-play"},null,-1)})),l=(0,e.Uk)(" Start streaming audio "),f=[p,l],g=o((function(){return(0,e._)("i",{class:"fa fa-stop"},null,-1)})),k=(0,e.Uk)(" Stop streaming audio "),y=[g,k];function m(n,t,r,o,p,l){return(0,e.wg)(),(0,e.iD)("div",a,[(0,e._)("div",u,[p.recording?((0,e.wg)(),(0,e.iD)("audio",i,[(0,e._)("source",{src:"/sound/stream.aac?t=".concat((new Date).getTime())},null,8,s),c],512)):(0,e.kq)("",!0)]),(0,e._)("div",d,[p.recording?((0,e.wg)(),(0,e.iD)("button",{key:1,type:"button",onClick:t[1]||(t[1]=function(){return l.stopRecording&&l.stopRecording.apply(l,arguments)})},y)):((0,e.wg)(),(0,e.iD)("button",{key:0,type:"button",onClick:t[0]||(t[0]=function(){return l.startRecording&&l.startRecording.apply(l,arguments)})},f))])])}var w=r(8534),h=(r(5666),r(6813)),v={name:"Sound",mixins:[h.Z],data:function(){return{recording:!1}},methods:{startRecording:function(){this.recording=!0},stopRecording:function(){var n=this;return(0,w.Z)(regeneratorRuntime.mark((function t(){return regeneratorRuntime.wrap((function(t){while(1)switch(t.prev=t.next){case 0:return n.recording=!1,t.next=3,n.request("sound.stop_recording");case 3:case"end":return t.stop()}}),t)})))()}}},R=r(3744);const _=(0,R.Z)(v,[["render",m],["__scopeId","data-v-911495ca"]]);var b=_}}]);
|
||||||
|
//# sourceMappingURL=4118-legacy.fdfd71bc.js.map
|
1
platypush/backend/http/webapp/dist/static/js/4118-legacy.fdfd71bc.js.map
vendored
Normal file
1
platypush/backend/http/webapp/dist/static/js/4118-legacy.fdfd71bc.js.map
vendored
Normal file
|
@ -0,0 +1 @@
|
||||||
|
{"version":3,"file":"static/js/4118-legacy.fdfd71bc.js","mappings":"oPACOA,MAAM,S,GACJA,MAAM,mB,SACFC,SAAA,GAASC,QAAQ,OAAOC,IAAI,U,qBAC8B,kD,GAK9DH,MAAM,Y,uBAEP,OAA0B,KAAvBA,MAAM,cAAY,Q,eAAK,4B,GAA1B,K,uBAIA,OAA0B,KAAvBA,MAAM,cAAY,Q,eAAK,2B,GAA1B,K,0CAdN,QAiBM,MAjBN,EAiBM,EAhBJ,OAKM,MALN,EAKM,CAJ8C,EAAAI,YAAA,WAAlD,QAGQ,QAHR,EAGQ,EAFN,OAA+D,UAAtDC,IAAG,mCAA8BC,MAAQC,YAAlD,UAEM,GAHR,yBAMF,OAQM,MARN,EAQM,CAPiD,EAAAH,YAArD,WAIA,QAES,U,MAFDI,KAAK,SAAU,QAAK,8BAAE,EAAAC,eAAA,EAAAA,cAAA,kBAAF,IAA5B,MAJqD,WAArD,QAES,U,MAFDD,KAAK,SAAU,QAAK,8BAAE,EAAAE,gBAAA,EAAAA,eAAA,kBAAF,IAA5B,O,mCAcN,GACEC,KAAM,QACNC,OAAQ,CAACC,EAAA,GAETC,KAJa,WAKX,MAAO,CACLV,WAAW,EAEd,EAEDW,QAAS,CACPL,eADO,WAELM,KAAKZ,WAAY,CAClB,EAEKK,cALC,WAKe,uJACpB,EAAKL,WAAY,EADG,SAEd,EAAKa,QAAQ,wBAFC,4CAGrB,I,UCnCL,MAAMC,GAA2B,OAAgB,EAAQ,CAAC,CAAC,SAASC,GAAQ,CAAC,YAAY,qBAEzF,O","sources":["webpack://platypush/./src/components/panels/Sound/Index.vue","webpack://platypush/./src/components/panels/Sound/Index.vue?0677"],"sourcesContent":["<template>\n <div class=\"sound\">\n <div class=\"sound-container\">\n <audio autoplay preload=\"none\" ref=\"player\" v-if=\"recording\">\n <source :src=\"`/sound/stream.aac?t=${(new Date()).getTime()}`\">\n Your browser does not support audio elements\n </audio>\n </div>\n\n <div class=\"controls\">\n <button type=\"button\" @click=\"startRecording\" v-if=\"!recording\">\n <i class=\"fa fa-play\"></i> Start streaming audio\n </button>\n\n <button type=\"button\" @click=\"stopRecording\" v-else>\n <i class=\"fa fa-stop\"></i> Stop streaming audio\n </button>\n </div>\n </div>\n</template>\n\n<script>\nimport Utils from \"@/Utils\";\n\nexport default {\n name: \"Sound\",\n mixins: [Utils],\n\n data() {\n return {\n recording: false,\n };\n },\n\n methods: {\n startRecording() {\n this.recording = true\n },\n\n async stopRecording() {\n this.recording = false\n await this.request('sound.stop_recording')\n },\n },\n}\n</script>\n\n<style lang=\"scss\" scoped>\n.sound {\n width: 100%;\n height: 90%;\n margin-top: 7%;\n overflow: hidden;\n display: flex;\n flex-direction: column;\n align-items: center;\n\n .sound-container {\n margin-bottom: 1em;\n }\n}\n</style>\n","import { render } from \"./Index.vue?vue&type=template&id=911495ca&scoped=true\"\nimport script from \"./Index.vue?vue&type=script&lang=js\"\nexport * from \"./Index.vue?vue&type=script&lang=js\"\n\nimport \"./Index.vue?vue&type=style&index=0&id=911495ca&lang=scss&scoped=true\"\n\nimport exportComponent from \"/home/blacklight/git_tree/platypush/platypush/backend/http/webapp/node_modules/vue-loader/dist/exportHelper.js\"\nconst __exports__ = /*#__PURE__*/exportComponent(script, [['render',render],['__scopeId',\"data-v-911495ca\"]])\n\nexport default __exports__"],"names":["class","autoplay","preload","ref","recording","src","Date","getTime","type","stopRecording","startRecording","name","mixins","Utils","data","methods","this","request","__exports__","render"],"sourceRoot":""}
|
2
platypush/backend/http/webapp/dist/static/js/4118.eb9d25ca.js
vendored
Normal file
2
platypush/backend/http/webapp/dist/static/js/4118.eb9d25ca.js
vendored
Normal file
|
@ -0,0 +1,2 @@
|
||||||
|
"use strict";(self["webpackChunkplatypush"]=self["webpackChunkplatypush"]||[]).push([[4118],{4118:function(t,n,o){o.r(n),o.d(n,{default:function(){return b}});var e=o(6252);const r=t=>((0,e.dD)("data-v-911495ca"),t=t(),(0,e.Cn)(),t),s={class:"sound"},a={class:"sound-container"},i={key:0,autoplay:"",preload:"none",ref:"player"},c=["src"],d=(0,e.Uk)(" Your browser does not support audio elements "),u={class:"controls"},l=r((()=>(0,e._)("i",{class:"fa fa-play"},null,-1))),p=(0,e.Uk)(" Start streaming audio "),g=[l,p],k=r((()=>(0,e._)("i",{class:"fa fa-stop"},null,-1))),f=(0,e.Uk)(" Stop streaming audio "),y=[k,f];function h(t,n,o,r,l,p){return(0,e.wg)(),(0,e.iD)("div",s,[(0,e._)("div",a,[l.recording?((0,e.wg)(),(0,e.iD)("audio",i,[(0,e._)("source",{src:`/sound/stream.aac?t=${(new Date).getTime()}`},null,8,c),d],512)):(0,e.kq)("",!0)]),(0,e._)("div",u,[l.recording?((0,e.wg)(),(0,e.iD)("button",{key:1,type:"button",onClick:n[1]||(n[1]=(...t)=>p.stopRecording&&p.stopRecording(...t))},y)):((0,e.wg)(),(0,e.iD)("button",{key:0,type:"button",onClick:n[0]||(n[0]=(...t)=>p.startRecording&&p.startRecording(...t))},g))])])}var w=o(6813),m={name:"Sound",mixins:[w.Z],data(){return{recording:!1}},methods:{startRecording(){this.recording=!0},async stopRecording(){this.recording=!1,await this.request("sound.stop_recording")}}},v=o(3744);const _=(0,v.Z)(m,[["render",h],["__scopeId","data-v-911495ca"]]);var b=_}}]);
|
||||||
|
//# sourceMappingURL=4118.eb9d25ca.js.map
|
1
platypush/backend/http/webapp/dist/static/js/4118.eb9d25ca.js.map
vendored
Normal file
1
platypush/backend/http/webapp/dist/static/js/4118.eb9d25ca.js.map
vendored
Normal file
|
@ -0,0 +1 @@
|
||||||
|
{"version":3,"file":"static/js/4118.eb9d25ca.js","mappings":"4OACOA,MAAM,S,GACJA,MAAM,mB,SACFC,SAAA,GAASC,QAAQ,OAAOC,IAAI,U,qBAC8B,kD,GAK9DH,MAAM,Y,UAEP,OAA0B,KAAvBA,MAAM,cAAY,W,WAAK,4B,GAA1B,K,UAIA,OAA0B,KAAvBA,MAAM,cAAY,W,WAAK,2B,GAA1B,K,0CAdN,QAiBM,MAjBN,EAiBM,EAhBJ,OAKM,MALN,EAKM,CAJ8C,EAAAI,YAAA,WAAlD,QAGQ,QAHR,EAGQ,EAFN,OAA+D,UAAtDC,IAAG,4BAA8BC,MAAQC,aAAlD,UAEM,GAHR,yBAMF,OAQM,MARN,EAQM,CAPiD,EAAAH,YAArD,WAIA,QAES,U,MAFDI,KAAK,SAAU,QAAK,oBAAE,EAAAC,eAAA,EAAAA,iBAAA,KAA9B,MAJqD,WAArD,QAES,U,MAFDD,KAAK,SAAU,QAAK,oBAAE,EAAAE,gBAAA,EAAAA,kBAAA,KAA9B,O,eAcN,GACEC,KAAM,QACNC,OAAQ,CAACC,EAAA,GAETC,OACE,MAAO,CACLV,WAAW,EAEd,EAEDW,QAAS,CACPL,iBACEM,KAAKZ,WAAY,CAClB,EAEDa,sBACED,KAAKZ,WAAY,QACXY,KAAKE,QAAQ,uBACpB,I,UCnCL,MAAMC,GAA2B,OAAgB,EAAQ,CAAC,CAAC,SAASC,GAAQ,CAAC,YAAY,qBAEzF,O","sources":["webpack://platypush/./src/components/panels/Sound/Index.vue","webpack://platypush/./src/components/panels/Sound/Index.vue?0677"],"sourcesContent":["<template>\n <div class=\"sound\">\n <div class=\"sound-container\">\n <audio autoplay preload=\"none\" ref=\"player\" v-if=\"recording\">\n <source :src=\"`/sound/stream.aac?t=${(new Date()).getTime()}`\">\n Your browser does not support audio elements\n </audio>\n </div>\n\n <div class=\"controls\">\n <button type=\"button\" @click=\"startRecording\" v-if=\"!recording\">\n <i class=\"fa fa-play\"></i> Start streaming audio\n </button>\n\n <button type=\"button\" @click=\"stopRecording\" v-else>\n <i class=\"fa fa-stop\"></i> Stop streaming audio\n </button>\n </div>\n </div>\n</template>\n\n<script>\nimport Utils from \"@/Utils\";\n\nexport default {\n name: \"Sound\",\n mixins: [Utils],\n\n data() {\n return {\n recording: false,\n };\n },\n\n methods: {\n startRecording() {\n this.recording = true\n },\n\n async stopRecording() {\n this.recording = false\n await this.request('sound.stop_recording')\n },\n },\n}\n</script>\n\n<style lang=\"scss\" scoped>\n.sound {\n width: 100%;\n height: 90%;\n margin-top: 7%;\n overflow: hidden;\n display: flex;\n flex-direction: column;\n align-items: center;\n\n .sound-container {\n margin-bottom: 1em;\n }\n}\n</style>\n","import { render } from \"./Index.vue?vue&type=template&id=911495ca&scoped=true\"\nimport script from \"./Index.vue?vue&type=script&lang=js\"\nexport * from \"./Index.vue?vue&type=script&lang=js\"\n\nimport \"./Index.vue?vue&type=style&index=0&id=911495ca&lang=scss&scoped=true\"\n\nimport exportComponent from \"/home/blacklight/git_tree/platypush/platypush/backend/http/webapp/node_modules/vue-loader/dist/exportHelper.js\"\nconst __exports__ = /*#__PURE__*/exportComponent(script, [['render',render],['__scopeId',\"data-v-911495ca\"]])\n\nexport default __exports__"],"names":["class","autoplay","preload","ref","recording","src","Date","getTime","type","stopRecording","startRecording","name","mixins","Utils","data","methods","this","async","request","__exports__","render"],"sourceRoot":""}
|
|
@ -1,2 +1,2 @@
|
||||||
"use strict";(self["webpackChunkplatypush"]=self["webpackChunkplatypush"]||[]).push([[4548],{4548:function(a,e,n){n.r(e),n.d(e,{default:function(){return f}});var r=n(6252);function u(a,e,n,u,t,p){var c=(0,r.up)("Camera");return(0,r.wg)(),(0,r.j4)(c,{"camera-plugin":"pi"})}var t=n(5528),p={name:"CameraPi",components:{Camera:t["default"]}},c=n(3744);const s=(0,c.Z)(p,[["render",u]]);var f=s}}]);
|
"use strict";(self["webpackChunkplatypush"]=self["webpackChunkplatypush"]||[]).push([[4548],{4548:function(a,e,n){n.r(e),n.d(e,{default:function(){return f}});var r=n(6252);function u(a,e,n,u,t,p){var c=(0,r.up)("Camera");return(0,r.wg)(),(0,r.j4)(c,{"camera-plugin":"pi"})}var t=n(9021),p={name:"CameraPi",components:{Camera:t["default"]}},c=n(3744);const s=(0,c.Z)(p,[["render",u]]);var f=s}}]);
|
||||||
//# sourceMappingURL=4548-legacy.e2883bdd.js.map
|
//# sourceMappingURL=4548-legacy.7f4c9c3f.js.map
|
|
@ -1 +1 @@
|
||||||
{"version":3,"file":"static/js/4548-legacy.e2883bdd.js","mappings":"gPACE,QAA6B,GAArB,gBAAc,M,eAMxB,GACEA,KAAM,WACNC,WAAY,CAACC,OAAA,e,UCJf,MAAMC,GAA2B,OAAgB,EAAQ,CAAC,CAAC,SAASC,KAEpE,O","sources":["webpack://platypush/./src/components/panels/CameraPi/Index.vue","webpack://platypush/./src/components/panels/CameraPi/Index.vue?7074"],"sourcesContent":["<template>\n <Camera camera-plugin=\"pi\" />\n</template>\n\n<script>\nimport Camera from \"@/components/panels/Camera/Index\";\n\nexport default {\n name: \"CameraPi\",\n components: {Camera},\n}\n</script>\n","import { render } from \"./Index.vue?vue&type=template&id=6f4a0590\"\nimport script from \"./Index.vue?vue&type=script&lang=js\"\nexport * from \"./Index.vue?vue&type=script&lang=js\"\n\nimport exportComponent from \"/home/blacklight/git_tree/platypush/platypush/backend/http/webapp/node_modules/vue-loader/dist/exportHelper.js\"\nconst __exports__ = /*#__PURE__*/exportComponent(script, [['render',render]])\n\nexport default __exports__"],"names":["name","components","Camera","__exports__","render"],"sourceRoot":""}
|
{"version":3,"file":"static/js/4548-legacy.7f4c9c3f.js","mappings":"gPACE,QAA6B,GAArB,gBAAc,M,eAMxB,GACEA,KAAM,WACNC,WAAY,CAACC,OAAA,e,UCJf,MAAMC,GAA2B,OAAgB,EAAQ,CAAC,CAAC,SAASC,KAEpE,O","sources":["webpack://platypush/./src/components/panels/CameraPi/Index.vue","webpack://platypush/./src/components/panels/CameraPi/Index.vue?7074"],"sourcesContent":["<template>\n <Camera camera-plugin=\"pi\" />\n</template>\n\n<script>\nimport Camera from \"@/components/panels/Camera/Index\";\n\nexport default {\n name: \"CameraPi\",\n components: {Camera},\n}\n</script>\n","import { render } from \"./Index.vue?vue&type=template&id=6f4a0590\"\nimport script from \"./Index.vue?vue&type=script&lang=js\"\nexport * from \"./Index.vue?vue&type=script&lang=js\"\n\nimport exportComponent from \"/home/blacklight/git_tree/platypush/platypush/backend/http/webapp/node_modules/vue-loader/dist/exportHelper.js\"\nconst __exports__ = /*#__PURE__*/exportComponent(script, [['render',render]])\n\nexport default __exports__"],"names":["name","components","Camera","__exports__","render"],"sourceRoot":""}
|
2
platypush/backend/http/webapp/dist/static/js/4548.75a2e6f8.js
vendored
Normal file
2
platypush/backend/http/webapp/dist/static/js/4548.75a2e6f8.js
vendored
Normal file
File diff suppressed because one or more lines are too long
1
platypush/backend/http/webapp/dist/static/js/4548.75a2e6f8.js.map
vendored
Normal file
1
platypush/backend/http/webapp/dist/static/js/4548.75a2e6f8.js.map
vendored
Normal file
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
|
@ -1,2 +1,2 @@
|
||||||
"use strict";(self["webpackChunkplatypush"]=self["webpackChunkplatypush"]||[]).push([[5111],{5111:function(a,e,n){n.r(e),n.d(e,{default:function(){return s}});var r=n(6252);function u(a,e,n,u,t,p){var f=(0,r.up)("Camera");return(0,r.wg)(),(0,r.j4)(f,{"camera-plugin":"ffmpeg"})}var t=n(5528),p={name:"CameraFfmpeg",components:{Camera:t["default"]}},f=n(3744);const c=(0,f.Z)(p,[["render",u]]);var s=c}}]);
|
"use strict";(self["webpackChunkplatypush"]=self["webpackChunkplatypush"]||[]).push([[5111],{5111:function(a,e,n){n.r(e),n.d(e,{default:function(){return s}});var r=n(6252);function u(a,e,n,u,t,p){var f=(0,r.up)("Camera");return(0,r.wg)(),(0,r.j4)(f,{"camera-plugin":"ffmpeg"})}var t=n(9021),p={name:"CameraFfmpeg",components:{Camera:t["default"]}},f=n(3744);const c=(0,f.Z)(p,[["render",u]]);var s=c}}]);
|
||||||
//# sourceMappingURL=5111-legacy.262ea3c5.js.map
|
//# sourceMappingURL=5111-legacy.d4568c17.js.map
|
|
@ -1 +1 @@
|
||||||
{"version":3,"file":"static/js/5111-legacy.262ea3c5.js","mappings":"gPACE,QAAiC,GAAzB,gBAAc,U,eAMxB,GACEA,KAAM,eACNC,WAAY,CAACC,OAAA,e,UCJf,MAAMC,GAA2B,OAAgB,EAAQ,CAAC,CAAC,SAASC,KAEpE,O","sources":["webpack://platypush/./src/components/panels/CameraFfmpeg/Index.vue","webpack://platypush/./src/components/panels/CameraFfmpeg/Index.vue?3548"],"sourcesContent":["<template>\n <Camera camera-plugin=\"ffmpeg\" />\n</template>\n\n<script>\nimport Camera from \"@/components/panels/Camera/Index\";\n\nexport default {\n name: \"CameraFfmpeg\",\n components: {Camera},\n}\n</script>\n","import { render } from \"./Index.vue?vue&type=template&id=dd632828\"\nimport script from \"./Index.vue?vue&type=script&lang=js\"\nexport * from \"./Index.vue?vue&type=script&lang=js\"\n\nimport exportComponent from \"/home/blacklight/git_tree/platypush/platypush/backend/http/webapp/node_modules/vue-loader/dist/exportHelper.js\"\nconst __exports__ = /*#__PURE__*/exportComponent(script, [['render',render]])\n\nexport default __exports__"],"names":["name","components","Camera","__exports__","render"],"sourceRoot":""}
|
{"version":3,"file":"static/js/5111-legacy.d4568c17.js","mappings":"gPACE,QAAiC,GAAzB,gBAAc,U,eAMxB,GACEA,KAAM,eACNC,WAAY,CAACC,OAAA,e,UCJf,MAAMC,GAA2B,OAAgB,EAAQ,CAAC,CAAC,SAASC,KAEpE,O","sources":["webpack://platypush/./src/components/panels/CameraFfmpeg/Index.vue","webpack://platypush/./src/components/panels/CameraFfmpeg/Index.vue?3548"],"sourcesContent":["<template>\n <Camera camera-plugin=\"ffmpeg\" />\n</template>\n\n<script>\nimport Camera from \"@/components/panels/Camera/Index\";\n\nexport default {\n name: \"CameraFfmpeg\",\n components: {Camera},\n}\n</script>\n","import { render } from \"./Index.vue?vue&type=template&id=dd632828\"\nimport script from \"./Index.vue?vue&type=script&lang=js\"\nexport * from \"./Index.vue?vue&type=script&lang=js\"\n\nimport exportComponent from \"/home/blacklight/git_tree/platypush/platypush/backend/http/webapp/node_modules/vue-loader/dist/exportHelper.js\"\nconst __exports__ = /*#__PURE__*/exportComponent(script, [['render',render]])\n\nexport default __exports__"],"names":["name","components","Camera","__exports__","render"],"sourceRoot":""}
|
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
2
platypush/backend/http/webapp/dist/static/js/5111.fbd25a85.js
vendored
Normal file
2
platypush/backend/http/webapp/dist/static/js/5111.fbd25a85.js
vendored
Normal file
File diff suppressed because one or more lines are too long
1
platypush/backend/http/webapp/dist/static/js/5111.fbd25a85.js.map
vendored
Normal file
1
platypush/backend/http/webapp/dist/static/js/5111.fbd25a85.js.map
vendored
Normal file
File diff suppressed because one or more lines are too long
|
@ -1,2 +0,0 @@
|
||||||
"use strict";(self["webpackChunkplatypush"]=self["webpackChunkplatypush"]||[]).push([[5193],{5193:function(n,t,r){r.r(t),r.d(t,{default:function(){return b}});var e=r(6252),o=function(n){return(0,e.dD)("data-v-30d09191"),n=n(),(0,e.Cn)(),n},u={class:"sound"},a={class:"sound-container"},i={key:0,autoplay:"",preload:"none",ref:"player"},s=["src"],c=(0,e.Uk)(" Your browser does not support audio elements "),d={class:"controls"},p=o((function(){return(0,e._)("i",{class:"fa fa-play"},null,-1)})),l=(0,e.Uk)(" Start streaming audio "),f=[p,l],g=o((function(){return(0,e._)("i",{class:"fa fa-stop"},null,-1)})),k=(0,e.Uk)(" Stop streaming audio "),y=[g,k];function m(n,t,r,o,p,l){return(0,e.wg)(),(0,e.iD)("div",u,[(0,e._)("div",a,[p.recording?((0,e.wg)(),(0,e.iD)("audio",i,[(0,e._)("source",{src:"/sound/stream?t=".concat((new Date).getTime()),type:"audio/x-wav;codec=pcm"},null,8,s),c],512)):(0,e.kq)("",!0)]),(0,e._)("div",d,[p.recording?((0,e.wg)(),(0,e.iD)("button",{key:1,type:"button",onClick:t[1]||(t[1]=function(){return l.stopRecording&&l.stopRecording.apply(l,arguments)})},y)):((0,e.wg)(),(0,e.iD)("button",{key:0,type:"button",onClick:t[0]||(t[0]=function(){return l.startRecording&&l.startRecording.apply(l,arguments)})},f))])])}var w=r(8534),v=(r(5666),r(6813)),h={name:"Sound",mixins:[v.Z],data:function(){return{recording:!1}},methods:{startRecording:function(){this.recording=!0},stopRecording:function(){var n=this;return(0,w.Z)(regeneratorRuntime.mark((function t(){return regeneratorRuntime.wrap((function(t){while(1)switch(t.prev=t.next){case 0:return n.recording=!1,t.next=3,n.request("sound.stop_recording");case 3:case"end":return t.stop()}}),t)})))()}}},R=r(3744);const _=(0,R.Z)(h,[["render",m],["__scopeId","data-v-30d09191"]]);var b=_}}]);
|
|
||||||
//# sourceMappingURL=5193-legacy.d8c2e027.js.map
|
|
|
@ -1 +0,0 @@
|
||||||
{"version":3,"file":"static/js/5193-legacy.d8c2e027.js","mappings":"oPACOA,MAAM,S,GACJA,MAAM,mB,SACFC,SAAA,GAASC,QAAQ,OAAOC,IAAI,U,qBAEuD,kD,GAKvFH,MAAM,Y,uBAEP,OAA0B,KAAvBA,MAAM,cAAY,Q,eAAK,4B,GAA1B,K,uBAIA,OAA0B,KAAvBA,MAAM,cAAY,Q,eAAK,2B,GAA1B,K,0CAfN,QAkBM,MAlBN,EAkBM,EAjBJ,OAMM,MANN,EAMM,CAL8C,EAAAI,YAAA,WAAlD,QAIQ,QAJR,EAIQ,EAFN,OAAwF,UAA/EC,IAAG,+BAA0BC,MAAQC,WAAaC,KAAK,yBAAhE,UAEM,GAJR,yBAOF,OAQM,MARN,EAQM,CAPiD,EAAAJ,YAArD,WAIA,QAES,U,MAFDI,KAAK,SAAU,QAAK,8BAAE,EAAAC,eAAA,EAAAA,cAAA,kBAAF,IAA5B,MAJqD,WAArD,QAES,U,MAFDD,KAAK,SAAU,QAAK,8BAAE,EAAAE,gBAAA,EAAAA,eAAA,kBAAF,IAA5B,O,mCAcN,GACEC,KAAM,QACNC,OAAQ,CAACC,EAAA,GAETC,KAJa,WAKX,MAAO,CACLV,WAAW,EAEd,EAEDW,QAAS,CACPL,eADO,WAELM,KAAKZ,WAAY,CAClB,EAEKK,cALC,WAKe,uJACpB,EAAKL,WAAY,EADG,SAEd,EAAKa,QAAQ,wBAFC,4CAGrB,I,UCpCL,MAAMC,GAA2B,OAAgB,EAAQ,CAAC,CAAC,SAASC,GAAQ,CAAC,YAAY,qBAEzF,O","sources":["webpack://platypush/./src/components/panels/Sound/Index.vue","webpack://platypush/./src/components/panels/Sound/Index.vue?0677"],"sourcesContent":["<template>\n <div class=\"sound\">\n <div class=\"sound-container\">\n <audio autoplay preload=\"none\" ref=\"player\" v-if=\"recording\">\n <!--suppress HtmlUnknownTarget -->\n <source :src=\"`/sound/stream?t=${(new Date()).getTime()}`\" type=\"audio/x-wav;codec=pcm\">\n Your browser does not support audio elements\n </audio>\n </div>\n\n <div class=\"controls\">\n <button type=\"button\" @click=\"startRecording\" v-if=\"!recording\">\n <i class=\"fa fa-play\"></i> Start streaming audio\n </button>\n\n <button type=\"button\" @click=\"stopRecording\" v-else>\n <i class=\"fa fa-stop\"></i> Stop streaming audio\n </button>\n </div>\n </div>\n</template>\n\n<script>\nimport Utils from \"@/Utils\";\n\nexport default {\n name: \"Sound\",\n mixins: [Utils],\n\n data() {\n return {\n recording: false,\n };\n },\n\n methods: {\n startRecording() {\n this.recording = true\n },\n\n async stopRecording() {\n this.recording = false\n await this.request('sound.stop_recording')\n },\n },\n}\n</script>\n\n<style lang=\"scss\" scoped>\n.sound {\n width: 100%;\n height: 90%;\n margin-top: 7%;\n overflow: hidden;\n display: flex;\n flex-direction: column;\n align-items: center;\n\n .sound-container {\n margin-bottom: 1em;\n }\n}\n</style>\n","import { render } from \"./Index.vue?vue&type=template&id=30d09191&scoped=true\"\nimport script from \"./Index.vue?vue&type=script&lang=js\"\nexport * from \"./Index.vue?vue&type=script&lang=js\"\n\nimport \"./Index.vue?vue&type=style&index=0&id=30d09191&lang=scss&scoped=true\"\n\nimport exportComponent from \"/home/blacklight/git_tree/platypush/platypush/backend/http/webapp/node_modules/vue-loader/dist/exportHelper.js\"\nconst __exports__ = /*#__PURE__*/exportComponent(script, [['render',render],['__scopeId',\"data-v-30d09191\"]])\n\nexport default __exports__"],"names":["class","autoplay","preload","ref","recording","src","Date","getTime","type","stopRecording","startRecording","name","mixins","Utils","data","methods","this","request","__exports__","render"],"sourceRoot":""}
|
|
|
@ -1,2 +0,0 @@
|
||||||
"use strict";(self["webpackChunkplatypush"]=self["webpackChunkplatypush"]||[]).push([[5193],{5193:function(t,n,o){o.r(n),o.d(n,{default:function(){return b}});var e=o(6252);const r=t=>((0,e.dD)("data-v-30d09191"),t=t(),(0,e.Cn)(),t),s={class:"sound"},a={class:"sound-container"},i={key:0,autoplay:"",preload:"none",ref:"player"},d=["src"],c=(0,e.Uk)(" Your browser does not support audio elements "),u={class:"controls"},l=r((()=>(0,e._)("i",{class:"fa fa-play"},null,-1))),p=(0,e.Uk)(" Start streaming audio "),g=[l,p],k=r((()=>(0,e._)("i",{class:"fa fa-stop"},null,-1))),y=(0,e.Uk)(" Stop streaming audio "),f=[k,y];function w(t,n,o,r,l,p){return(0,e.wg)(),(0,e.iD)("div",s,[(0,e._)("div",a,[l.recording?((0,e.wg)(),(0,e.iD)("audio",i,[(0,e._)("source",{src:`/sound/stream?t=${(new Date).getTime()}`,type:"audio/x-wav;codec=pcm"},null,8,d),c],512)):(0,e.kq)("",!0)]),(0,e._)("div",u,[l.recording?((0,e.wg)(),(0,e.iD)("button",{key:1,type:"button",onClick:n[1]||(n[1]=(...t)=>p.stopRecording&&p.stopRecording(...t))},f)):((0,e.wg)(),(0,e.iD)("button",{key:0,type:"button",onClick:n[0]||(n[0]=(...t)=>p.startRecording&&p.startRecording(...t))},g))])])}var h=o(6813),m={name:"Sound",mixins:[h.Z],data(){return{recording:!1}},methods:{startRecording(){this.recording=!0},async stopRecording(){this.recording=!1,await this.request("sound.stop_recording")}}},v=o(3744);const _=(0,v.Z)(m,[["render",w],["__scopeId","data-v-30d09191"]]);var b=_}}]);
|
|
||||||
//# sourceMappingURL=5193.1de6bb98.js.map
|
|
|
@ -1 +0,0 @@
|
||||||
{"version":3,"file":"static/js/5193.1de6bb98.js","mappings":"4OACOA,MAAM,S,GACJA,MAAM,mB,SACFC,SAAA,GAASC,QAAQ,OAAOC,IAAI,U,qBAEuD,kD,GAKvFH,MAAM,Y,UAEP,OAA0B,KAAvBA,MAAM,cAAY,W,WAAK,4B,GAA1B,K,UAIA,OAA0B,KAAvBA,MAAM,cAAY,W,WAAK,2B,GAA1B,K,0CAfN,QAkBM,MAlBN,EAkBM,EAjBJ,OAMM,MANN,EAMM,CAL8C,EAAAI,YAAA,WAAlD,QAIQ,QAJR,EAIQ,EAFN,OAAwF,UAA/EC,IAAG,wBAA0BC,MAAQC,YAAaC,KAAK,yBAAhE,UAEM,GAJR,yBAOF,OAQM,MARN,EAQM,CAPiD,EAAAJ,YAArD,WAIA,QAES,U,MAFDI,KAAK,SAAU,QAAK,oBAAE,EAAAC,eAAA,EAAAA,iBAAA,KAA9B,MAJqD,WAArD,QAES,U,MAFDD,KAAK,SAAU,QAAK,oBAAE,EAAAE,gBAAA,EAAAA,kBAAA,KAA9B,O,eAcN,GACEC,KAAM,QACNC,OAAQ,CAACC,EAAA,GAETC,OACE,MAAO,CACLV,WAAW,EAEd,EAEDW,QAAS,CACPL,iBACEM,KAAKZ,WAAY,CAClB,EAEDa,sBACED,KAAKZ,WAAY,QACXY,KAAKE,QAAQ,uBACpB,I,UCpCL,MAAMC,GAA2B,OAAgB,EAAQ,CAAC,CAAC,SAASC,GAAQ,CAAC,YAAY,qBAEzF,O","sources":["webpack://platypush/./src/components/panels/Sound/Index.vue","webpack://platypush/./src/components/panels/Sound/Index.vue?0677"],"sourcesContent":["<template>\n <div class=\"sound\">\n <div class=\"sound-container\">\n <audio autoplay preload=\"none\" ref=\"player\" v-if=\"recording\">\n <!--suppress HtmlUnknownTarget -->\n <source :src=\"`/sound/stream?t=${(new Date()).getTime()}`\" type=\"audio/x-wav;codec=pcm\">\n Your browser does not support audio elements\n </audio>\n </div>\n\n <div class=\"controls\">\n <button type=\"button\" @click=\"startRecording\" v-if=\"!recording\">\n <i class=\"fa fa-play\"></i> Start streaming audio\n </button>\n\n <button type=\"button\" @click=\"stopRecording\" v-else>\n <i class=\"fa fa-stop\"></i> Stop streaming audio\n </button>\n </div>\n </div>\n</template>\n\n<script>\nimport Utils from \"@/Utils\";\n\nexport default {\n name: \"Sound\",\n mixins: [Utils],\n\n data() {\n return {\n recording: false,\n };\n },\n\n methods: {\n startRecording() {\n this.recording = true\n },\n\n async stopRecording() {\n this.recording = false\n await this.request('sound.stop_recording')\n },\n },\n}\n</script>\n\n<style lang=\"scss\" scoped>\n.sound {\n width: 100%;\n height: 90%;\n margin-top: 7%;\n overflow: hidden;\n display: flex;\n flex-direction: column;\n align-items: center;\n\n .sound-container {\n margin-bottom: 1em;\n }\n}\n</style>\n","import { render } from \"./Index.vue?vue&type=template&id=30d09191&scoped=true\"\nimport script from \"./Index.vue?vue&type=script&lang=js\"\nexport * from \"./Index.vue?vue&type=script&lang=js\"\n\nimport \"./Index.vue?vue&type=style&index=0&id=30d09191&lang=scss&scoped=true\"\n\nimport exportComponent from \"/home/blacklight/git_tree/platypush/platypush/backend/http/webapp/node_modules/vue-loader/dist/exportHelper.js\"\nconst __exports__ = /*#__PURE__*/exportComponent(script, [['render',render],['__scopeId',\"data-v-30d09191\"]])\n\nexport default __exports__"],"names":["class","autoplay","preload","ref","recording","src","Date","getTime","type","stopRecording","startRecording","name","mixins","Utils","data","methods","this","async","request","__exports__","render"],"sourceRoot":""}
|
|
2
platypush/backend/http/webapp/dist/static/js/5465-legacy.f819fef2.js
vendored
Normal file
2
platypush/backend/http/webapp/dist/static/js/5465-legacy.f819fef2.js
vendored
Normal file
File diff suppressed because one or more lines are too long
1
platypush/backend/http/webapp/dist/static/js/5465-legacy.f819fef2.js.map
vendored
Normal file
1
platypush/backend/http/webapp/dist/static/js/5465-legacy.f819fef2.js.map
vendored
Normal file
File diff suppressed because one or more lines are too long
2
platypush/backend/http/webapp/dist/static/js/5465.e48f0738.js
vendored
Normal file
2
platypush/backend/http/webapp/dist/static/js/5465.e48f0738.js
vendored
Normal file
File diff suppressed because one or more lines are too long
1
platypush/backend/http/webapp/dist/static/js/5465.e48f0738.js.map
vendored
Normal file
1
platypush/backend/http/webapp/dist/static/js/5465.e48f0738.js.map
vendored
Normal file
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
|
@ -1,2 +1,2 @@
|
||||||
"use strict";(self["webpackChunkplatypush"]=self["webpackChunkplatypush"]||[]).push([[699],{699:function(a,e,r){r.r(e),r.d(e,{default:function(){return m}});var n=r(6252);function t(a,e,r,t,u,s){var p=(0,n.up)("Camera");return(0,n.wg)(),(0,n.j4)(p,{"camera-plugin":"gstreamer"})}var u=r(5528),s={name:"CameraGstreamer",components:{Camera:u["default"]}},p=r(3744);const c=(0,p.Z)(s,[["render",t]]);var m=c}}]);
|
"use strict";(self["webpackChunkplatypush"]=self["webpackChunkplatypush"]||[]).push([[699],{699:function(a,e,r){r.r(e),r.d(e,{default:function(){return m}});var n=r(6252);function t(a,e,r,t,u,s){var p=(0,n.up)("Camera");return(0,n.wg)(),(0,n.j4)(p,{"camera-plugin":"gstreamer"})}var u=r(9021),s={name:"CameraGstreamer",components:{Camera:u["default"]}},p=r(3744);const c=(0,p.Z)(s,[["render",t]]);var m=c}}]);
|
||||||
//# sourceMappingURL=699-legacy.cb1ccfbb.js.map
|
//# sourceMappingURL=699-legacy.e258b653.js.map
|
|
@ -1 +1 @@
|
||||||
{"version":3,"file":"static/js/699-legacy.cb1ccfbb.js","mappings":"8OACE,QAAoC,GAA5B,gBAAc,a,eAMxB,GACEA,KAAM,kBACNC,WAAY,CAACC,OAAA,e,UCJf,MAAMC,GAA2B,OAAgB,EAAQ,CAAC,CAAC,SAASC,KAEpE,O","sources":["webpack://platypush/./src/components/panels/CameraGstreamer/Index.vue","webpack://platypush/./src/components/panels/CameraGstreamer/Index.vue?5a11"],"sourcesContent":["<template>\n <Camera camera-plugin=\"gstreamer\" />\n</template>\n\n<script>\nimport Camera from \"@/components/panels/Camera/Index\";\n\nexport default {\n name: \"CameraGstreamer\",\n components: {Camera},\n}\n</script>\n","import { render } from \"./Index.vue?vue&type=template&id=6c669f2b\"\nimport script from \"./Index.vue?vue&type=script&lang=js\"\nexport * from \"./Index.vue?vue&type=script&lang=js\"\n\nimport exportComponent from \"/home/blacklight/git_tree/platypush/platypush/backend/http/webapp/node_modules/vue-loader/dist/exportHelper.js\"\nconst __exports__ = /*#__PURE__*/exportComponent(script, [['render',render]])\n\nexport default __exports__"],"names":["name","components","Camera","__exports__","render"],"sourceRoot":""}
|
{"version":3,"file":"static/js/699-legacy.e258b653.js","mappings":"8OACE,QAAoC,GAA5B,gBAAc,a,eAMxB,GACEA,KAAM,kBACNC,WAAY,CAACC,OAAA,e,UCJf,MAAMC,GAA2B,OAAgB,EAAQ,CAAC,CAAC,SAASC,KAEpE,O","sources":["webpack://platypush/./src/components/panels/CameraGstreamer/Index.vue","webpack://platypush/./src/components/panels/CameraGstreamer/Index.vue?5a11"],"sourcesContent":["<template>\n <Camera camera-plugin=\"gstreamer\" />\n</template>\n\n<script>\nimport Camera from \"@/components/panels/Camera/Index\";\n\nexport default {\n name: \"CameraGstreamer\",\n components: {Camera},\n}\n</script>\n","import { render } from \"./Index.vue?vue&type=template&id=6c669f2b\"\nimport script from \"./Index.vue?vue&type=script&lang=js\"\nexport * from \"./Index.vue?vue&type=script&lang=js\"\n\nimport exportComponent from \"/home/blacklight/git_tree/platypush/platypush/backend/http/webapp/node_modules/vue-loader/dist/exportHelper.js\"\nconst __exports__ = /*#__PURE__*/exportComponent(script, [['render',render]])\n\nexport default __exports__"],"names":["name","components","Camera","__exports__","render"],"sourceRoot":""}
|
2
platypush/backend/http/webapp/dist/static/js/699.85a689b1.js
vendored
Normal file
2
platypush/backend/http/webapp/dist/static/js/699.85a689b1.js
vendored
Normal file
File diff suppressed because one or more lines are too long
1
platypush/backend/http/webapp/dist/static/js/699.85a689b1.js.map
vendored
Normal file
1
platypush/backend/http/webapp/dist/static/js/699.85a689b1.js.map
vendored
Normal file
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
|
@ -1,2 +1,2 @@
|
||||||
"use strict";(self["webpackChunkplatypush"]=self["webpackChunkplatypush"]||[]).push([[8184],{8184:function(a,e,n){n.r(e),n.d(e,{default:function(){return f}});var r=n(6252);function u(a,e,n,u,t,c){var p=(0,r.up)("Camera");return(0,r.wg)(),(0,r.j4)(p,{"camera-plugin":"cv"})}var t=n(5528),c={name:"CameraCv",components:{Camera:t["default"]}},p=n(3744);const s=(0,p.Z)(c,[["render",u]]);var f=s}}]);
|
"use strict";(self["webpackChunkplatypush"]=self["webpackChunkplatypush"]||[]).push([[8184],{8184:function(a,e,n){n.r(e),n.d(e,{default:function(){return f}});var r=n(6252);function u(a,e,n,u,t,c){var p=(0,r.up)("Camera");return(0,r.wg)(),(0,r.j4)(p,{"camera-plugin":"cv"})}var t=n(9021),c={name:"CameraCv",components:{Camera:t["default"]}},p=n(3744);const s=(0,p.Z)(c,[["render",u]]);var f=s}}]);
|
||||||
//# sourceMappingURL=8184-legacy.702db0b7.js.map
|
//# sourceMappingURL=8184-legacy.73f24c6e.js.map
|
|
@ -1 +1 @@
|
||||||
{"version":3,"file":"static/js/8184-legacy.702db0b7.js","mappings":"gPACE,QAA6B,GAArB,gBAAc,M,eAMxB,GACEA,KAAM,WACNC,WAAY,CAACC,OAAA,e,UCJf,MAAMC,GAA2B,OAAgB,EAAQ,CAAC,CAAC,SAASC,KAEpE,O","sources":["webpack://platypush/./src/components/panels/CameraCv/Index.vue","webpack://platypush/./src/components/panels/CameraCv/Index.vue?6f97"],"sourcesContent":["<template>\n <Camera camera-plugin=\"cv\" />\n</template>\n\n<script>\nimport Camera from \"@/components/panels/Camera/Index\";\n\nexport default {\n name: \"CameraCv\",\n components: {Camera},\n}\n</script>\n","import { render } from \"./Index.vue?vue&type=template&id=351194be\"\nimport script from \"./Index.vue?vue&type=script&lang=js\"\nexport * from \"./Index.vue?vue&type=script&lang=js\"\n\nimport exportComponent from \"/home/blacklight/git_tree/platypush/platypush/backend/http/webapp/node_modules/vue-loader/dist/exportHelper.js\"\nconst __exports__ = /*#__PURE__*/exportComponent(script, [['render',render]])\n\nexport default __exports__"],"names":["name","components","Camera","__exports__","render"],"sourceRoot":""}
|
{"version":3,"file":"static/js/8184-legacy.73f24c6e.js","mappings":"gPACE,QAA6B,GAArB,gBAAc,M,eAMxB,GACEA,KAAM,WACNC,WAAY,CAACC,OAAA,e,UCJf,MAAMC,GAA2B,OAAgB,EAAQ,CAAC,CAAC,SAASC,KAEpE,O","sources":["webpack://platypush/./src/components/panels/CameraCv/Index.vue","webpack://platypush/./src/components/panels/CameraCv/Index.vue?6f97"],"sourcesContent":["<template>\n <Camera camera-plugin=\"cv\" />\n</template>\n\n<script>\nimport Camera from \"@/components/panels/Camera/Index\";\n\nexport default {\n name: \"CameraCv\",\n components: {Camera},\n}\n</script>\n","import { render } from \"./Index.vue?vue&type=template&id=351194be\"\nimport script from \"./Index.vue?vue&type=script&lang=js\"\nexport * from \"./Index.vue?vue&type=script&lang=js\"\n\nimport exportComponent from \"/home/blacklight/git_tree/platypush/platypush/backend/http/webapp/node_modules/vue-loader/dist/exportHelper.js\"\nconst __exports__ = /*#__PURE__*/exportComponent(script, [['render',render]])\n\nexport default __exports__"],"names":["name","components","Camera","__exports__","render"],"sourceRoot":""}
|
2
platypush/backend/http/webapp/dist/static/js/8184.3768abaf.js
vendored
Normal file
2
platypush/backend/http/webapp/dist/static/js/8184.3768abaf.js
vendored
Normal file
File diff suppressed because one or more lines are too long
1
platypush/backend/http/webapp/dist/static/js/8184.3768abaf.js.map
vendored
Normal file
1
platypush/backend/http/webapp/dist/static/js/8184.3768abaf.js.map
vendored
Normal file
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
|
@ -1,2 +1,2 @@
|
||||||
"use strict";(self["webpackChunkplatypush"]=self["webpackChunkplatypush"]||[]).push([[9895],{9895:function(a,r,e){e.r(r),e.d(r,{default:function(){return i}});var t=e(6252);function s(a,r,e,s,n,c){var u=(0,t.up)("Camera");return(0,t.wg)(),(0,t.j4)(u,{"camera-plugin":"ir.mlx90640",ref:"camera"},null,512)}var n=e(5528),c={name:"CameraIrMlx90640",components:{Camera:n["default"]},mounted:function(){var a=this.$root.config["camera.".concat(this.cameraPlugin)]||{};a.resolution||(this.$refs.camera.attrs.resolution=[32,24]),a.scale_x||(this.$refs.camera.attrs.scale_x=15),a.scale_y||(this.$refs.camera.attrs.scale_y=15)}},u=e(3744);const l=(0,u.Z)(c,[["render",s]]);var i=l}}]);
|
"use strict";(self["webpackChunkplatypush"]=self["webpackChunkplatypush"]||[]).push([[9895],{9895:function(a,r,e){e.r(r),e.d(r,{default:function(){return i}});var t=e(6252);function s(a,r,e,s,n,c){var u=(0,t.up)("Camera");return(0,t.wg)(),(0,t.j4)(u,{"camera-plugin":"ir.mlx90640",ref:"camera"},null,512)}var n=e(9021),c={name:"CameraIrMlx90640",components:{Camera:n["default"]},mounted:function(){var a=this.$root.config["camera.".concat(this.cameraPlugin)]||{};a.resolution||(this.$refs.camera.attrs.resolution=[32,24]),a.scale_x||(this.$refs.camera.attrs.scale_x=15),a.scale_y||(this.$refs.camera.attrs.scale_y=15)}},u=e(3744);const l=(0,u.Z)(c,[["render",s]]);var i=l}}]);
|
||||||
//# sourceMappingURL=9895-legacy.acee9428.js.map
|
//# sourceMappingURL=9895-legacy.1fd296a4.js.map
|
|
@ -1 +1 @@
|
||||||
{"version":3,"file":"static/js/9895-legacy.acee9428.js","mappings":"gPACE,QAAmD,GAA3C,gBAAc,cAAcA,IAAI,UAAxC,S,eAMF,GACEC,KAAM,mBACNC,WAAY,CAACC,OAAA,cAEbC,QAJa,WAKX,IAAMC,EAASC,KAAKC,MAAMF,OAAX,iBAA4BC,KAAKE,gBAAmB,CAAC,EAC/DH,EAAOI,aACVH,KAAKI,MAAMC,OAAOC,MAAMH,WAAa,CAAC,GAAI,KACvCJ,EAAOQ,UACVP,KAAKI,MAAMC,OAAOC,MAAMC,QAAU,IAC/BR,EAAOS,UACVR,KAAKI,MAAMC,OAAOC,MAAME,QAAU,GACrC,G,UCdH,MAAMC,GAA2B,OAAgB,EAAQ,CAAC,CAAC,SAASC,KAEpE,O","sources":["webpack://platypush/./src/components/panels/CameraIrMlx90640/Index.vue","webpack://platypush/./src/components/panels/CameraIrMlx90640/Index.vue?0a62"],"sourcesContent":["<template>\n <Camera camera-plugin=\"ir.mlx90640\" ref=\"camera\" />\n</template>\n\n<script>\nimport Camera from \"@/components/panels/Camera/Index\";\n\nexport default {\n name: \"CameraIrMlx90640\",\n components: {Camera},\n\n mounted() {\n const config = this.$root.config[`camera.${this.cameraPlugin}`] || {}\n if (!config.resolution)\n this.$refs.camera.attrs.resolution = [32, 24]\n if (!config.scale_x)\n this.$refs.camera.attrs.scale_x = 15\n if (!config.scale_y)\n this.$refs.camera.attrs.scale_y = 15\n },\n}\n</script>\n","import { render } from \"./Index.vue?vue&type=template&id=5585d4f1\"\nimport script from \"./Index.vue?vue&type=script&lang=js\"\nexport * from \"./Index.vue?vue&type=script&lang=js\"\n\nimport exportComponent from \"/home/blacklight/git_tree/platypush/platypush/backend/http/webapp/node_modules/vue-loader/dist/exportHelper.js\"\nconst __exports__ = /*#__PURE__*/exportComponent(script, [['render',render]])\n\nexport default __exports__"],"names":["ref","name","components","Camera","mounted","config","this","$root","cameraPlugin","resolution","$refs","camera","attrs","scale_x","scale_y","__exports__","render"],"sourceRoot":""}
|
{"version":3,"file":"static/js/9895-legacy.1fd296a4.js","mappings":"gPACE,QAAmD,GAA3C,gBAAc,cAAcA,IAAI,UAAxC,S,eAMF,GACEC,KAAM,mBACNC,WAAY,CAACC,OAAA,cAEbC,QAJa,WAKX,IAAMC,EAASC,KAAKC,MAAMF,OAAX,iBAA4BC,KAAKE,gBAAmB,CAAC,EAC/DH,EAAOI,aACVH,KAAKI,MAAMC,OAAOC,MAAMH,WAAa,CAAC,GAAI,KACvCJ,EAAOQ,UACVP,KAAKI,MAAMC,OAAOC,MAAMC,QAAU,IAC/BR,EAAOS,UACVR,KAAKI,MAAMC,OAAOC,MAAME,QAAU,GACrC,G,UCdH,MAAMC,GAA2B,OAAgB,EAAQ,CAAC,CAAC,SAASC,KAEpE,O","sources":["webpack://platypush/./src/components/panels/CameraIrMlx90640/Index.vue","webpack://platypush/./src/components/panels/CameraIrMlx90640/Index.vue?0a62"],"sourcesContent":["<template>\n <Camera camera-plugin=\"ir.mlx90640\" ref=\"camera\" />\n</template>\n\n<script>\nimport Camera from \"@/components/panels/Camera/Index\";\n\nexport default {\n name: \"CameraIrMlx90640\",\n components: {Camera},\n\n mounted() {\n const config = this.$root.config[`camera.${this.cameraPlugin}`] || {}\n if (!config.resolution)\n this.$refs.camera.attrs.resolution = [32, 24]\n if (!config.scale_x)\n this.$refs.camera.attrs.scale_x = 15\n if (!config.scale_y)\n this.$refs.camera.attrs.scale_y = 15\n },\n}\n</script>\n","import { render } from \"./Index.vue?vue&type=template&id=5585d4f1\"\nimport script from \"./Index.vue?vue&type=script&lang=js\"\nexport * from \"./Index.vue?vue&type=script&lang=js\"\n\nimport exportComponent from \"/home/blacklight/git_tree/platypush/platypush/backend/http/webapp/node_modules/vue-loader/dist/exportHelper.js\"\nconst __exports__ = /*#__PURE__*/exportComponent(script, [['render',render]])\n\nexport default __exports__"],"names":["ref","name","components","Camera","mounted","config","this","$root","cameraPlugin","resolution","$refs","camera","attrs","scale_x","scale_y","__exports__","render"],"sourceRoot":""}
|
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
2
platypush/backend/http/webapp/dist/static/js/9895.a39079d5.js
vendored
Normal file
2
platypush/backend/http/webapp/dist/static/js/9895.a39079d5.js
vendored
Normal file
File diff suppressed because one or more lines are too long
1
platypush/backend/http/webapp/dist/static/js/9895.a39079d5.js.map
vendored
Normal file
1
platypush/backend/http/webapp/dist/static/js/9895.a39079d5.js.map
vendored
Normal file
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
|
@ -40,8 +40,7 @@
|
||||||
|
|
||||||
<div class="audio-container">
|
<div class="audio-container">
|
||||||
<audio autoplay preload="none" ref="player" v-if="audioOn">
|
<audio autoplay preload="none" ref="player" v-if="audioOn">
|
||||||
<!--suppress HtmlUnknownTarget -->
|
<source :src="`/sound/stream.aac?t=${(new Date()).getTime()}`">
|
||||||
<source :src="`/sound/stream?t=${(new Date()).getTime()}`" type="audio/x-wav;codec=pcm">
|
|
||||||
Your browser does not support audio elements
|
Your browser does not support audio elements
|
||||||
</audio>
|
</audio>
|
||||||
</div>
|
</div>
|
||||||
|
|
|
@ -2,8 +2,7 @@
|
||||||
<div class="sound">
|
<div class="sound">
|
||||||
<div class="sound-container">
|
<div class="sound-container">
|
||||||
<audio autoplay preload="none" ref="player" v-if="recording">
|
<audio autoplay preload="none" ref="player" v-if="recording">
|
||||||
<!--suppress HtmlUnknownTarget -->
|
<source :src="`/sound/stream.aac?t=${(new Date()).getTime()}`">
|
||||||
<source :src="`/sound/stream?t=${(new Date()).getTime()}`" type="audio/x-wav;codec=pcm">
|
|
||||||
Your browser does not support audio elements
|
Your browser does not support audio elements
|
||||||
</audio>
|
</audio>
|
||||||
</div>
|
</div>
|
||||||
|
|
|
@ -1,65 +1,70 @@
|
||||||
|
from abc import ABC
|
||||||
|
from typing import Optional, Tuple, Union
|
||||||
from platypush.message.event import Event
|
from platypush.message.event import Event
|
||||||
|
|
||||||
|
|
||||||
class SoundEvent(Event):
|
class SoundEvent(Event, ABC):
|
||||||
""" Base class for sound events """
|
"""Base class for sound events"""
|
||||||
|
|
||||||
def __init__(self, *args, **kwargs):
|
def __init__(
|
||||||
super().__init__(*args, **kwargs)
|
self, *args, device: Optional[Union[str, Tuple[str, str]]] = None, **kwargs
|
||||||
|
):
|
||||||
|
super().__init__(*args, device=device, **kwargs)
|
||||||
|
|
||||||
|
|
||||||
class SoundPlaybackStartedEvent(SoundEvent):
|
class SoundEventWithResource(SoundEvent, ABC):
|
||||||
|
"""Base class for sound events with resource names attached"""
|
||||||
|
|
||||||
|
def __init__(self, *args, resource: Optional[str] = None, **kwargs):
|
||||||
|
super().__init__(*args, resource=resource, **kwargs)
|
||||||
|
|
||||||
|
|
||||||
|
class SoundPlaybackStartedEvent(SoundEventWithResource):
|
||||||
"""
|
"""
|
||||||
Event triggered when a new sound playback starts
|
Event triggered when a new sound playback starts
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, filename=None, *args, **kwargs):
|
|
||||||
super().__init__(*args, filename=filename, **kwargs)
|
|
||||||
|
|
||||||
|
class SoundPlaybackStoppedEvent(SoundEventWithResource):
|
||||||
class SoundPlaybackStoppedEvent(SoundEvent):
|
|
||||||
"""
|
"""
|
||||||
Event triggered when the sound playback stops
|
Event triggered when the sound playback stops
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, filename=None, *args, **kwargs):
|
|
||||||
super().__init__(*args, filename=filename, **kwargs)
|
|
||||||
|
|
||||||
|
class SoundPlaybackPausedEvent(SoundEventWithResource):
|
||||||
class SoundPlaybackPausedEvent(SoundEvent):
|
|
||||||
"""
|
"""
|
||||||
Event triggered when the sound playback pauses
|
Event triggered when the sound playback pauses
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, *args, **kwargs):
|
|
||||||
super().__init__(*args, **kwargs)
|
class SoundPlaybackResumedEvent(SoundEventWithResource):
|
||||||
|
"""
|
||||||
|
Event triggered when the sound playback resumsed from a paused state
|
||||||
|
"""
|
||||||
|
|
||||||
|
|
||||||
class SoundRecordingStartedEvent(SoundEvent):
|
class SoundRecordingStartedEvent(SoundEventWithResource):
|
||||||
"""
|
"""
|
||||||
Event triggered when a new recording starts
|
Event triggered when a new recording starts
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, filename=None, *args, **kwargs):
|
|
||||||
super().__init__(*args, filename=filename, **kwargs)
|
|
||||||
|
|
||||||
|
class SoundRecordingStoppedEvent(SoundEventWithResource):
|
||||||
class SoundRecordingStoppedEvent(SoundEvent):
|
|
||||||
"""
|
"""
|
||||||
Event triggered when a sound recording stops
|
Event triggered when a sound recording stops
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, filename=None, *args, **kwargs):
|
|
||||||
super().__init__(*args, filename=filename, **kwargs)
|
|
||||||
|
|
||||||
|
class SoundRecordingPausedEvent(SoundEventWithResource):
|
||||||
class SoundRecordingPausedEvent(SoundEvent):
|
|
||||||
"""
|
"""
|
||||||
Event triggered when a sound recording pauses
|
Event triggered when a sound recording pauses
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, *args, **kwargs):
|
|
||||||
super().__init__(*args, **kwargs)
|
class SoundRecordingResumedEvent(SoundEvent):
|
||||||
|
"""
|
||||||
|
Event triggered when a sound recording resumes from a paused state
|
||||||
|
"""
|
||||||
|
|
||||||
|
|
||||||
# vim:sw=4:ts=4:et:
|
# vim:sw=4:ts=4:et:
|
||||||
|
|
|
@ -7,24 +7,43 @@ import time
|
||||||
|
|
||||||
from abc import ABC, abstractmethod
|
from abc import ABC, abstractmethod
|
||||||
from contextlib import contextmanager
|
from contextlib import contextmanager
|
||||||
|
from dataclasses import asdict
|
||||||
from datetime import datetime
|
from datetime import datetime
|
||||||
from multiprocessing import Process
|
from multiprocessing import Process
|
||||||
from queue import Queue
|
from queue import Queue
|
||||||
from typing import Optional, Union, Dict, Tuple, IO
|
from typing import Generator, Optional, Union, Dict, Tuple, IO
|
||||||
|
|
||||||
from platypush.config import Config
|
from platypush.config import Config
|
||||||
from platypush.message.event.camera import CameraRecordingStartedEvent, CameraPictureTakenEvent, \
|
from platypush.message.event.camera import (
|
||||||
CameraRecordingStoppedEvent, CameraVideoRenderedEvent
|
CameraRecordingStartedEvent,
|
||||||
|
CameraPictureTakenEvent,
|
||||||
|
CameraRecordingStoppedEvent,
|
||||||
|
CameraVideoRenderedEvent,
|
||||||
|
)
|
||||||
from platypush.plugins import Plugin, action
|
from platypush.plugins import Plugin, action
|
||||||
from platypush.plugins.camera.model.camera import CameraInfo, Camera
|
from platypush.plugins.camera.model.camera import CameraInfo, Camera
|
||||||
from platypush.plugins.camera.model.exceptions import CameraException, CaptureAlreadyRunningException
|
from platypush.plugins.camera.model.exceptions import (
|
||||||
|
CameraException,
|
||||||
|
CaptureAlreadyRunningException,
|
||||||
|
)
|
||||||
from platypush.plugins.camera.model.writer import VideoWriter, StreamWriter
|
from platypush.plugins.camera.model.writer import VideoWriter, StreamWriter
|
||||||
from platypush.plugins.camera.model.writer.ffmpeg import FFmpegFileWriter
|
from platypush.plugins.camera.model.writer.ffmpeg import FFmpegFileWriter
|
||||||
from platypush.plugins.camera.model.writer.preview import PreviewWriter, PreviewWriterFactory
|
from platypush.plugins.camera.model.writer.preview import (
|
||||||
|
PreviewWriter,
|
||||||
|
PreviewWriterFactory,
|
||||||
|
)
|
||||||
from platypush.utils import get_plugin_name_by_class
|
from platypush.utils import get_plugin_name_by_class
|
||||||
|
|
||||||
__all__ = ['Camera', 'CameraInfo', 'CameraException', 'CameraPlugin', 'CaptureAlreadyRunningException',
|
__all__ = [
|
||||||
'VideoWriter', 'StreamWriter', 'PreviewWriter']
|
'Camera',
|
||||||
|
'CameraInfo',
|
||||||
|
'CameraException',
|
||||||
|
'CameraPlugin',
|
||||||
|
'CaptureAlreadyRunningException',
|
||||||
|
'VideoWriter',
|
||||||
|
'StreamWriter',
|
||||||
|
'PreviewWriter',
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
class CameraPlugin(Plugin, ABC):
|
class CameraPlugin(Plugin, ABC):
|
||||||
|
@ -66,15 +85,32 @@ class CameraPlugin(Plugin, ABC):
|
||||||
_camera_info_class = CameraInfo
|
_camera_info_class = CameraInfo
|
||||||
_video_writer_class = FFmpegFileWriter
|
_video_writer_class = FFmpegFileWriter
|
||||||
|
|
||||||
def __init__(self, device: Optional[Union[int, str]] = None, resolution: Tuple[int, int] = (640, 480),
|
def __init__(
|
||||||
frames_dir: Optional[str] = None, warmup_frames: int = 5, warmup_seconds: Optional[float] = 0.,
|
self,
|
||||||
capture_timeout: Optional[float] = 20.0, scale_x: Optional[float] = None,
|
device: Optional[Union[int, str]] = None,
|
||||||
scale_y: Optional[float] = None, rotate: Optional[float] = None, grayscale: Optional[bool] = None,
|
resolution: Tuple[int, int] = (640, 480),
|
||||||
color_transform: Optional[Union[int, str]] = None, fps: float = 16, horizontal_flip: bool = False,
|
frames_dir: Optional[str] = None,
|
||||||
vertical_flip: bool = False, input_format: Optional[str] = None, output_format: Optional[str] = None,
|
warmup_frames: int = 5,
|
||||||
stream_format: str = 'mjpeg', listen_port: Optional[int] = 5000, bind_address: str = '0.0.0.0',
|
warmup_seconds: Optional[float] = 0.0,
|
||||||
ffmpeg_bin: str = 'ffmpeg', input_codec: Optional[str] = None, output_codec: Optional[str] = None,
|
capture_timeout: Optional[float] = 20.0,
|
||||||
**kwargs):
|
scale_x: Optional[float] = None,
|
||||||
|
scale_y: Optional[float] = None,
|
||||||
|
rotate: Optional[float] = None,
|
||||||
|
grayscale: Optional[bool] = None,
|
||||||
|
color_transform: Optional[Union[int, str]] = None,
|
||||||
|
fps: float = 16,
|
||||||
|
horizontal_flip: bool = False,
|
||||||
|
vertical_flip: bool = False,
|
||||||
|
input_format: Optional[str] = None,
|
||||||
|
output_format: Optional[str] = None,
|
||||||
|
stream_format: str = 'mjpeg',
|
||||||
|
listen_port: Optional[int] = 5000,
|
||||||
|
bind_address: str = '0.0.0.0',
|
||||||
|
ffmpeg_bin: str = 'ffmpeg',
|
||||||
|
input_codec: Optional[str] = None,
|
||||||
|
output_codec: Optional[str] = None,
|
||||||
|
**kwargs,
|
||||||
|
):
|
||||||
"""
|
"""
|
||||||
:param device: Identifier of the default capturing device.
|
:param device: Identifier of the default capturing device.
|
||||||
:param resolution: Default resolution, as a tuple of two integers.
|
:param resolution: Default resolution, as a tuple of two integers.
|
||||||
|
@ -117,22 +153,38 @@ class CameraPlugin(Plugin, ABC):
|
||||||
"""
|
"""
|
||||||
super().__init__(**kwargs)
|
super().__init__(**kwargs)
|
||||||
|
|
||||||
self.workdir = os.path.join(Config.get('workdir'), get_plugin_name_by_class(self))
|
workdir = Config.get('workdir')
|
||||||
|
plugin_name = get_plugin_name_by_class(self)
|
||||||
|
assert isinstance(workdir, str) and plugin_name
|
||||||
|
self.workdir = os.path.join(workdir, plugin_name)
|
||||||
pathlib.Path(self.workdir).mkdir(mode=0o755, exist_ok=True, parents=True)
|
pathlib.Path(self.workdir).mkdir(mode=0o755, exist_ok=True, parents=True)
|
||||||
|
|
||||||
# noinspection PyArgumentList
|
self.camera_info = self._camera_info_class(
|
||||||
self.camera_info = self._camera_info_class(device, color_transform=color_transform, warmup_frames=warmup_frames,
|
device,
|
||||||
warmup_seconds=warmup_seconds, rotate=rotate, scale_x=scale_x,
|
color_transform=color_transform,
|
||||||
scale_y=scale_y, capture_timeout=capture_timeout, fps=fps,
|
warmup_frames=warmup_frames,
|
||||||
input_format=input_format, output_format=output_format,
|
warmup_seconds=warmup_seconds or 0,
|
||||||
stream_format=stream_format, resolution=resolution,
|
rotate=rotate,
|
||||||
grayscale=grayscale, listen_port=listen_port,
|
scale_x=scale_x,
|
||||||
horizontal_flip=horizontal_flip, vertical_flip=vertical_flip,
|
scale_y=scale_y,
|
||||||
ffmpeg_bin=ffmpeg_bin, input_codec=input_codec,
|
capture_timeout=capture_timeout or 20,
|
||||||
output_codec=output_codec, bind_address=bind_address,
|
fps=fps,
|
||||||
frames_dir=os.path.abspath(
|
input_format=input_format,
|
||||||
os.path.expanduser(frames_dir or
|
output_format=output_format,
|
||||||
os.path.join(self.workdir, 'frames'))))
|
stream_format=stream_format,
|
||||||
|
resolution=resolution,
|
||||||
|
grayscale=grayscale,
|
||||||
|
listen_port=listen_port,
|
||||||
|
horizontal_flip=horizontal_flip,
|
||||||
|
vertical_flip=vertical_flip,
|
||||||
|
ffmpeg_bin=ffmpeg_bin,
|
||||||
|
input_codec=input_codec,
|
||||||
|
output_codec=output_codec,
|
||||||
|
bind_address=bind_address,
|
||||||
|
frames_dir=os.path.abspath(
|
||||||
|
os.path.expanduser(frames_dir or os.path.join(self.workdir, 'frames'))
|
||||||
|
),
|
||||||
|
)
|
||||||
|
|
||||||
self._devices: Dict[Union[int, str], Camera] = {}
|
self._devices: Dict[Union[int, str], Camera] = {}
|
||||||
self._streams: Dict[Union[int, str], Camera] = {}
|
self._streams: Dict[Union[int, str], Camera] = {}
|
||||||
|
@ -142,7 +194,13 @@ class CameraPlugin(Plugin, ABC):
|
||||||
merged_info.set(**info)
|
merged_info.set(**info)
|
||||||
return merged_info
|
return merged_info
|
||||||
|
|
||||||
def open_device(self, device: Optional[Union[int, str]] = None, stream: bool = False, **params) -> Camera:
|
def open_device(
|
||||||
|
self,
|
||||||
|
device: Optional[Union[int, str]],
|
||||||
|
stream: bool = False,
|
||||||
|
redis_queue: Optional[str] = None,
|
||||||
|
**params,
|
||||||
|
) -> Camera:
|
||||||
"""
|
"""
|
||||||
Initialize and open a device.
|
Initialize and open a device.
|
||||||
|
|
||||||
|
@ -160,25 +218,31 @@ class CameraPlugin(Plugin, ABC):
|
||||||
assert device is not None, 'No device specified/configured'
|
assert device is not None, 'No device specified/configured'
|
||||||
if device in self._devices:
|
if device in self._devices:
|
||||||
camera = self._devices[device]
|
camera = self._devices[device]
|
||||||
if camera.capture_thread and camera.capture_thread.is_alive() and camera.start_event.is_set():
|
if (
|
||||||
|
camera.capture_thread
|
||||||
|
and camera.capture_thread.is_alive()
|
||||||
|
and camera.start_event.is_set()
|
||||||
|
):
|
||||||
raise CaptureAlreadyRunningException(device)
|
raise CaptureAlreadyRunningException(device)
|
||||||
|
|
||||||
camera.start_event.clear()
|
camera.start_event.clear()
|
||||||
camera.capture_thread = None
|
camera.capture_thread = None
|
||||||
else:
|
else:
|
||||||
# noinspection PyArgumentList
|
|
||||||
camera = self._camera_class(info=info)
|
camera = self._camera_class(info=info)
|
||||||
|
|
||||||
camera.info.set(**params)
|
camera.info.set(**params)
|
||||||
camera.object = self.prepare_device(camera)
|
camera.object = self.prepare_device(camera)
|
||||||
|
|
||||||
if stream:
|
if stream and camera.info.stream_format:
|
||||||
writer_class = StreamWriter.get_class_by_name(camera.info.stream_format)
|
writer_class = StreamWriter.get_class_by_name(camera.info.stream_format)
|
||||||
camera.stream = writer_class(camera=camera, plugin=self)
|
camera.stream = writer_class(
|
||||||
|
camera=camera, plugin=self, redis_queue=redis_queue
|
||||||
|
)
|
||||||
|
|
||||||
if camera.info.frames_dir:
|
if camera.info.frames_dir:
|
||||||
pathlib.Path(os.path.abspath(os.path.expanduser(camera.info.frames_dir))).mkdir(
|
pathlib.Path(
|
||||||
mode=0o755, exist_ok=True, parents=True)
|
os.path.abspath(os.path.expanduser(camera.info.frames_dir))
|
||||||
|
).mkdir(mode=0o755, exist_ok=True, parents=True)
|
||||||
|
|
||||||
self._devices[device] = camera
|
self._devices[device] = camera
|
||||||
return camera
|
return camera
|
||||||
|
@ -205,29 +269,43 @@ class CameraPlugin(Plugin, ABC):
|
||||||
:param camera: Camera object. ``camera.info.capture_timeout`` is used as a capture thread termination timeout
|
:param camera: Camera object. ``camera.info.capture_timeout`` is used as a capture thread termination timeout
|
||||||
if set.
|
if set.
|
||||||
"""
|
"""
|
||||||
if camera.capture_thread and camera.capture_thread.is_alive() and \
|
if (
|
||||||
threading.get_ident() != camera.capture_thread.ident:
|
camera.capture_thread
|
||||||
|
and camera.capture_thread.is_alive()
|
||||||
|
and threading.get_ident() != camera.capture_thread.ident
|
||||||
|
):
|
||||||
try:
|
try:
|
||||||
camera.capture_thread.join(timeout=camera.info.capture_timeout)
|
camera.capture_thread.join(timeout=camera.info.capture_timeout)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
self.logger.warning('Error on FFmpeg capture wait: {}'.format(str(e)))
|
self.logger.warning('Error on FFmpeg capture wait: %s', e)
|
||||||
|
|
||||||
@contextmanager
|
@contextmanager
|
||||||
def open(self, device: Optional[Union[int, str]] = None, stream: bool = None, **info) -> Camera:
|
def open(
|
||||||
|
self,
|
||||||
|
device: Optional[Union[int, str]] = None,
|
||||||
|
stream: bool = False,
|
||||||
|
redis_queue: Optional[str] = None,
|
||||||
|
**info,
|
||||||
|
) -> Generator[Camera, None, None]:
|
||||||
"""
|
"""
|
||||||
Initialize and open a device using a context manager pattern.
|
Initialize and open a device using a context manager pattern.
|
||||||
|
|
||||||
:param device: Capture device by name, path or ID.
|
:param device: Capture device by name, path or ID.
|
||||||
:param stream: If set, the frames will be streamed to ``camera.stream``.
|
:param stream: If set, the frames will be streamed to ``camera.stream``.
|
||||||
|
:param redis_queue: If set, the frames will be streamed to
|
||||||
|
``redis_queue``.
|
||||||
:param info: Camera parameters override - see constructors parameters.
|
:param info: Camera parameters override - see constructors parameters.
|
||||||
:return: The initialized :class:`platypush.plugins.camera.Camera` object.
|
:return: The initialized :class:`platypush.plugins.camera.Camera` object.
|
||||||
"""
|
"""
|
||||||
camera = None
|
camera = None
|
||||||
try:
|
try:
|
||||||
camera = self.open_device(device, stream=stream, **info)
|
camera = self.open_device(
|
||||||
|
device, stream=stream, redis_queue=redis_queue, **info
|
||||||
|
)
|
||||||
yield camera
|
yield camera
|
||||||
finally:
|
finally:
|
||||||
self.close_device(camera)
|
if camera:
|
||||||
|
self.close_device(camera)
|
||||||
|
|
||||||
@abstractmethod
|
@abstractmethod
|
||||||
def prepare_device(self, device: Camera):
|
def prepare_device(self, device: Camera):
|
||||||
|
@ -256,7 +334,6 @@ class CameraPlugin(Plugin, ABC):
|
||||||
"""
|
"""
|
||||||
raise NotImplementedError()
|
raise NotImplementedError()
|
||||||
|
|
||||||
# noinspection PyShadowingBuiltins
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def store_frame(frame, filepath: str, format: Optional[str] = None):
|
def store_frame(frame, filepath: str, format: Optional[str] = None):
|
||||||
"""
|
"""
|
||||||
|
@ -267,9 +344,10 @@ class CameraPlugin(Plugin, ABC):
|
||||||
:param format: Output format.
|
:param format: Output format.
|
||||||
"""
|
"""
|
||||||
from PIL import Image
|
from PIL import Image
|
||||||
|
|
||||||
if isinstance(frame, bytes):
|
if isinstance(frame, bytes):
|
||||||
frame = list(frame)
|
frame = list(frame)
|
||||||
elif not isinstance(frame, Image.Image):
|
if not isinstance(frame, Image.Image):
|
||||||
frame = Image.fromarray(frame)
|
frame = Image.fromarray(frame)
|
||||||
|
|
||||||
save_args = {}
|
save_args = {}
|
||||||
|
@ -278,16 +356,28 @@ class CameraPlugin(Plugin, ABC):
|
||||||
|
|
||||||
frame.save(filepath, **save_args)
|
frame.save(filepath, **save_args)
|
||||||
|
|
||||||
def _store_frame(self, frame, frames_dir: Optional[str] = None, image_file: Optional[str] = None,
|
def _store_frame(
|
||||||
*args, **kwargs) -> str:
|
self,
|
||||||
|
frame,
|
||||||
|
frames_dir: Optional[str] = None,
|
||||||
|
image_file: Optional[str] = None,
|
||||||
|
*args,
|
||||||
|
**kwargs,
|
||||||
|
) -> str:
|
||||||
"""
|
"""
|
||||||
:meth:`.store_frame` wrapper.
|
:meth:`.store_frame` wrapper.
|
||||||
"""
|
"""
|
||||||
if image_file:
|
if image_file:
|
||||||
filepath = os.path.abspath(os.path.expanduser(image_file))
|
filepath = os.path.abspath(os.path.expanduser(image_file))
|
||||||
else:
|
else:
|
||||||
filepath = os.path.abspath(os.path.expanduser(
|
filepath = os.path.abspath(
|
||||||
os.path.join(frames_dir or '', datetime.now().strftime('%Y-%m-%d_%H-%M-%S-%f.jpg'))))
|
os.path.expanduser(
|
||||||
|
os.path.join(
|
||||||
|
frames_dir or '',
|
||||||
|
datetime.now().strftime('%Y-%m-%d_%H-%M-%S-%f.jpg'),
|
||||||
|
)
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
pathlib.Path(filepath).parent.mkdir(mode=0o755, exist_ok=True, parents=True)
|
pathlib.Path(filepath).parent.mkdir(mode=0o755, exist_ok=True, parents=True)
|
||||||
self.store_frame(frame, filepath, *args, **kwargs)
|
self.store_frame(frame, filepath, *args, **kwargs)
|
||||||
|
@ -295,7 +385,9 @@ class CameraPlugin(Plugin, ABC):
|
||||||
|
|
||||||
def start_preview(self, camera: Camera):
|
def start_preview(self, camera: Camera):
|
||||||
if camera.preview and not camera.preview.closed:
|
if camera.preview and not camera.preview.closed:
|
||||||
self.logger.info('A preview window is already active on device {}'.format(camera.info.device))
|
self.logger.info(
|
||||||
|
'A preview window is already active on device %s', camera.info.device
|
||||||
|
)
|
||||||
return
|
return
|
||||||
|
|
||||||
camera.preview = PreviewWriterFactory.get(camera, self)
|
camera.preview = PreviewWriterFactory.get(camera, self)
|
||||||
|
@ -315,7 +407,9 @@ class CameraPlugin(Plugin, ABC):
|
||||||
|
|
||||||
camera.preview = None
|
camera.preview = None
|
||||||
|
|
||||||
def frame_processor(self, frame_queue: Queue, camera: Camera, image_file: Optional[str] = None):
|
def frame_processor(
|
||||||
|
self, frame_queue: Queue, camera: Camera, image_file: Optional[str] = None
|
||||||
|
):
|
||||||
while True:
|
while True:
|
||||||
frame = frame_queue.get()
|
frame = frame_queue.get()
|
||||||
if frame is None:
|
if frame is None:
|
||||||
|
@ -326,18 +420,31 @@ class CameraPlugin(Plugin, ABC):
|
||||||
frame = self.to_grayscale(frame)
|
frame = self.to_grayscale(frame)
|
||||||
|
|
||||||
frame = self.rotate_frame(frame, camera.info.rotate)
|
frame = self.rotate_frame(frame, camera.info.rotate)
|
||||||
frame = self.flip_frame(frame, camera.info.horizontal_flip, camera.info.vertical_flip)
|
frame = self.flip_frame(
|
||||||
|
frame, camera.info.horizontal_flip, camera.info.vertical_flip
|
||||||
|
)
|
||||||
frame = self.scale_frame(frame, camera.info.scale_x, camera.info.scale_y)
|
frame = self.scale_frame(frame, camera.info.scale_x, camera.info.scale_y)
|
||||||
|
|
||||||
for output in camera.get_outputs():
|
for output in camera.get_outputs():
|
||||||
output.write(frame)
|
output.write(frame)
|
||||||
|
|
||||||
if camera.info.frames_dir or image_file:
|
if camera.info.frames_dir or image_file:
|
||||||
self._store_frame(frame=frame, frames_dir=camera.info.frames_dir, image_file=image_file)
|
self._store_frame(
|
||||||
|
frame=frame,
|
||||||
|
frames_dir=camera.info.frames_dir,
|
||||||
|
image_file=image_file,
|
||||||
|
)
|
||||||
|
|
||||||
def capturing_thread(self, camera: Camera, duration: Optional[float] = None, video_file: Optional[str] = None,
|
def capturing_thread(
|
||||||
image_file: Optional[str] = None, n_frames: Optional[int] = None, preview: bool = False,
|
self,
|
||||||
**kwargs):
|
camera: Camera,
|
||||||
|
duration: Optional[float] = None,
|
||||||
|
video_file: Optional[str] = None,
|
||||||
|
image_file: Optional[str] = None,
|
||||||
|
n_frames: Optional[int] = None,
|
||||||
|
preview: bool = False,
|
||||||
|
**kwargs,
|
||||||
|
):
|
||||||
"""
|
"""
|
||||||
Camera capturing thread.
|
Camera capturing thread.
|
||||||
|
|
||||||
|
@ -366,25 +473,36 @@ class CameraPlugin(Plugin, ABC):
|
||||||
if duration and camera.info.warmup_seconds:
|
if duration and camera.info.warmup_seconds:
|
||||||
duration = duration + camera.info.warmup_seconds
|
duration = duration + camera.info.warmup_seconds
|
||||||
if video_file:
|
if video_file:
|
||||||
camera.file_writer = self._video_writer_class(camera=camera, plugin=self, output_file=video_file)
|
camera.file_writer = self._video_writer_class(
|
||||||
|
camera=camera, plugin=self, output_file=video_file
|
||||||
|
)
|
||||||
|
|
||||||
frame_queue = Queue()
|
frame_queue = Queue()
|
||||||
frame_processor = threading.Thread(target=self.frame_processor,
|
frame_processor = threading.Thread(
|
||||||
kwargs=dict(frame_queue=frame_queue, camera=camera, image_file=image_file))
|
target=self.frame_processor,
|
||||||
|
kwargs={
|
||||||
|
'frame_queue': frame_queue,
|
||||||
|
'camera': camera,
|
||||||
|
'image_file': image_file,
|
||||||
|
},
|
||||||
|
)
|
||||||
frame_processor.start()
|
frame_processor.start()
|
||||||
self.fire_event(CameraRecordingStartedEvent(**evt_args))
|
self.fire_event(CameraRecordingStartedEvent(**evt_args))
|
||||||
|
|
||||||
try:
|
try:
|
||||||
while camera.start_event.is_set():
|
while camera.start_event.is_set():
|
||||||
if (duration and time.time() - recording_started_time >= duration) \
|
if (duration and time.time() - recording_started_time >= duration) or (
|
||||||
or (n_frames and captured_frames >= n_frames):
|
n_frames and captured_frames >= n_frames
|
||||||
|
):
|
||||||
break
|
break
|
||||||
|
|
||||||
frame_capture_start = time.time()
|
frame_capture_start = time.time()
|
||||||
try:
|
try:
|
||||||
frame = self.capture_frame(camera, **kwargs)
|
frame = self.capture_frame(camera, **kwargs)
|
||||||
if not frame:
|
if not frame:
|
||||||
self.logger.warning('Invalid frame received, terminating the capture session')
|
self.logger.warning(
|
||||||
|
'Invalid frame received, terminating the capture session'
|
||||||
|
)
|
||||||
break
|
break
|
||||||
|
|
||||||
frame_queue.put(frame)
|
frame_queue.put(frame)
|
||||||
|
@ -392,12 +510,20 @@ class CameraPlugin(Plugin, ABC):
|
||||||
self.logger.warning(str(e))
|
self.logger.warning(str(e))
|
||||||
continue
|
continue
|
||||||
|
|
||||||
if not n_frames or not camera.info.warmup_seconds or \
|
if (
|
||||||
(time.time() - recording_started_time >= camera.info.warmup_seconds):
|
not n_frames
|
||||||
|
or not camera.info.warmup_seconds
|
||||||
|
or (
|
||||||
|
time.time() - recording_started_time
|
||||||
|
>= camera.info.warmup_seconds
|
||||||
|
)
|
||||||
|
):
|
||||||
captured_frames += 1
|
captured_frames += 1
|
||||||
|
|
||||||
if camera.info.fps:
|
if camera.info.fps:
|
||||||
wait_time = (1. / camera.info.fps) - (time.time() - frame_capture_start)
|
wait_time = (1.0 / camera.info.fps) - (
|
||||||
|
time.time() - frame_capture_start
|
||||||
|
)
|
||||||
if wait_time > 0:
|
if wait_time > 0:
|
||||||
time.sleep(wait_time)
|
time.sleep(wait_time)
|
||||||
finally:
|
finally:
|
||||||
|
@ -407,7 +533,7 @@ class CameraPlugin(Plugin, ABC):
|
||||||
try:
|
try:
|
||||||
output.close()
|
output.close()
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
self.logger.warning('Could not close camera output: {}'.format(str(e)))
|
self.logger.warning('Could not close camera output: %s', e)
|
||||||
|
|
||||||
self.close_device(camera, wait_capture=False)
|
self.close_device(camera, wait_capture=False)
|
||||||
frame_processor.join(timeout=5.0)
|
frame_processor.join(timeout=5.0)
|
||||||
|
@ -426,17 +552,26 @@ class CameraPlugin(Plugin, ABC):
|
||||||
:param camera: An initialized :class:`platypush.plugins.camera.Camera` object.
|
:param camera: An initialized :class:`platypush.plugins.camera.Camera` object.
|
||||||
:param preview: Show a preview of the camera frames.
|
:param preview: Show a preview of the camera frames.
|
||||||
"""
|
"""
|
||||||
assert not (camera.capture_thread and camera.capture_thread.is_alive()), \
|
assert not (
|
||||||
'A capture session is already in progress'
|
camera.capture_thread and camera.capture_thread.is_alive()
|
||||||
|
), 'A capture session is already in progress'
|
||||||
|
|
||||||
camera.capture_thread = threading.Thread(target=self.capturing_thread, args=(camera, *args),
|
camera.capture_thread = threading.Thread(
|
||||||
kwargs={'preview': preview, **kwargs})
|
target=self.capturing_thread,
|
||||||
|
args=(camera, *args),
|
||||||
|
kwargs={'preview': preview, **kwargs},
|
||||||
|
)
|
||||||
camera.capture_thread.start()
|
camera.capture_thread.start()
|
||||||
camera.start_event.set()
|
camera.start_event.set()
|
||||||
|
|
||||||
@action
|
@action
|
||||||
def capture_video(self, duration: Optional[float] = None, video_file: Optional[str] = None, preview: bool = False,
|
def capture_video(
|
||||||
**camera) -> Union[str, dict]:
|
self,
|
||||||
|
duration: Optional[float] = None,
|
||||||
|
video_file: Optional[str] = None,
|
||||||
|
preview: bool = False,
|
||||||
|
**camera,
|
||||||
|
) -> Optional[Union[str, dict]]:
|
||||||
"""
|
"""
|
||||||
Capture a video.
|
Capture a video.
|
||||||
|
|
||||||
|
@ -448,14 +583,20 @@ class CameraPlugin(Plugin, ABC):
|
||||||
to the recorded resource. Otherwise, it will return the status of the camera device after starting it.
|
to the recorded resource. Otherwise, it will return the status of the camera device after starting it.
|
||||||
"""
|
"""
|
||||||
camera = self.open_device(**camera)
|
camera = self.open_device(**camera)
|
||||||
self.start_camera(camera, duration=duration, video_file=video_file, frames_dir=None, image_file=None,
|
self.start_camera(
|
||||||
preview=preview)
|
camera,
|
||||||
|
duration=duration,
|
||||||
|
video_file=video_file,
|
||||||
|
frames_dir=None,
|
||||||
|
image_file=None,
|
||||||
|
preview=preview,
|
||||||
|
)
|
||||||
|
|
||||||
if duration:
|
if duration:
|
||||||
self.wait_capture(camera)
|
self.wait_capture(camera)
|
||||||
return video_file
|
return video_file
|
||||||
|
|
||||||
return self.status(camera.info.device)
|
return self.status(camera.info.device).output
|
||||||
|
|
||||||
@action
|
@action
|
||||||
def stop_capture(self, device: Optional[Union[int, str]] = None):
|
def stop_capture(self, device: Optional[Union[int, str]] = None):
|
||||||
|
@ -465,12 +606,12 @@ class CameraPlugin(Plugin, ABC):
|
||||||
:param device: Name/path/ID of the device to stop (default: all the active devices).
|
:param device: Name/path/ID of the device to stop (default: all the active devices).
|
||||||
"""
|
"""
|
||||||
devices = self._devices.copy()
|
devices = self._devices.copy()
|
||||||
stop_devices = list(devices.values())[:]
|
stop_devices = list(devices.values())
|
||||||
if device:
|
if device:
|
||||||
stop_devices = [self._devices[device]] if device in self._devices else []
|
stop_devices = [self._devices[device]] if device in self._devices else []
|
||||||
|
|
||||||
for device in stop_devices:
|
for dev in stop_devices:
|
||||||
self.close_device(device)
|
self.close_device(dev)
|
||||||
|
|
||||||
@action
|
@action
|
||||||
def capture_image(self, image_file: str, preview: bool = False, **camera) -> str:
|
def capture_image(self, image_file: str, preview: bool = False, **camera) -> str:
|
||||||
|
@ -484,15 +625,18 @@ class CameraPlugin(Plugin, ABC):
|
||||||
"""
|
"""
|
||||||
|
|
||||||
with self.open(**camera) as camera:
|
with self.open(**camera) as camera:
|
||||||
warmup_frames = camera.info.warmup_frames if camera.info.warmup_frames else 1
|
warmup_frames = (
|
||||||
self.start_camera(camera, image_file=image_file, n_frames=warmup_frames, preview=preview)
|
camera.info.warmup_frames if camera.info.warmup_frames else 1
|
||||||
|
)
|
||||||
|
self.start_camera(
|
||||||
|
camera, image_file=image_file, n_frames=warmup_frames, preview=preview
|
||||||
|
)
|
||||||
self.wait_capture(camera)
|
self.wait_capture(camera)
|
||||||
|
|
||||||
return image_file
|
return image_file
|
||||||
|
|
||||||
# noinspection PyUnusedLocal
|
|
||||||
@action
|
@action
|
||||||
def take_picture(self, image_file: str, preview: bool = False, **camera) -> str:
|
def take_picture(self, image_file: str, **camera) -> str:
|
||||||
"""
|
"""
|
||||||
Alias for :meth:`.capture_image`.
|
Alias for :meth:`.capture_image`.
|
||||||
|
|
||||||
|
@ -501,11 +645,16 @@ class CameraPlugin(Plugin, ABC):
|
||||||
:param preview: Show a preview of the camera frames.
|
:param preview: Show a preview of the camera frames.
|
||||||
:return: The local path to the saved image.
|
:return: The local path to the saved image.
|
||||||
"""
|
"""
|
||||||
return self.capture_image(image_file, **camera)
|
return str(self.capture_image(image_file, **camera).output)
|
||||||
|
|
||||||
@action
|
@action
|
||||||
def capture_sequence(self, duration: Optional[float] = None, n_frames: Optional[int] = None, preview: bool = False,
|
def capture_sequence(
|
||||||
**camera) -> str:
|
self,
|
||||||
|
duration: Optional[float] = None,
|
||||||
|
n_frames: Optional[int] = None,
|
||||||
|
preview: bool = False,
|
||||||
|
**camera,
|
||||||
|
) -> Optional[str]:
|
||||||
"""
|
"""
|
||||||
Capture a sequence of frames from a camera and store them to a directory.
|
Capture a sequence of frames from a camera and store them to a directory.
|
||||||
|
|
||||||
|
@ -517,12 +666,16 @@ class CameraPlugin(Plugin, ABC):
|
||||||
:return: The directory where the image files have been stored.
|
:return: The directory where the image files have been stored.
|
||||||
"""
|
"""
|
||||||
with self.open(**camera) as camera:
|
with self.open(**camera) as camera:
|
||||||
self.start_camera(camera, duration=duration, n_frames=n_frames, preview=preview)
|
self.start_camera(
|
||||||
|
camera, duration=duration, n_frames=n_frames, preview=preview
|
||||||
|
)
|
||||||
self.wait_capture(camera)
|
self.wait_capture(camera)
|
||||||
return camera.info.frames_dir
|
return camera.info.frames_dir
|
||||||
|
|
||||||
@action
|
@action
|
||||||
def capture_preview(self, duration: Optional[float] = None, n_frames: Optional[int] = None, **camera) -> dict:
|
def capture_preview(
|
||||||
|
self, duration: Optional[float] = None, n_frames: Optional[int] = None, **camera
|
||||||
|
) -> dict:
|
||||||
"""
|
"""
|
||||||
Start a camera preview session.
|
Start a camera preview session.
|
||||||
|
|
||||||
|
@ -533,16 +686,18 @@ class CameraPlugin(Plugin, ABC):
|
||||||
"""
|
"""
|
||||||
camera = self.open_device(frames_dir=None, **camera)
|
camera = self.open_device(frames_dir=None, **camera)
|
||||||
self.start_camera(camera, duration=duration, n_frames=n_frames, preview=True)
|
self.start_camera(camera, duration=duration, n_frames=n_frames, preview=True)
|
||||||
return self.status(camera.info.device)
|
return self.status(camera.info.device) # type: ignore
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def _prepare_server_socket(camera: Camera) -> socket.socket:
|
def _prepare_server_socket(camera: Camera) -> socket.socket:
|
||||||
server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
|
server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
|
||||||
server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
|
server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
|
||||||
server_socket.bind(( # lgtm [py/bind-socket-all-network-interfaces]
|
server_socket.bind(
|
||||||
camera.info.bind_address or '0.0.0.0',
|
( # lgtm [py/bind-socket-all-network-interfaces]
|
||||||
camera.info.listen_port
|
camera.info.bind_address or '0.0.0.0',
|
||||||
))
|
camera.info.listen_port,
|
||||||
|
)
|
||||||
|
)
|
||||||
server_socket.listen(1)
|
server_socket.listen(1)
|
||||||
server_socket.settimeout(1)
|
server_socket.settimeout(1)
|
||||||
return server_socket
|
return server_socket
|
||||||
|
@ -550,16 +705,18 @@ class CameraPlugin(Plugin, ABC):
|
||||||
def _accept_client(self, server_socket: socket.socket) -> Optional[IO]:
|
def _accept_client(self, server_socket: socket.socket) -> Optional[IO]:
|
||||||
try:
|
try:
|
||||||
sock = server_socket.accept()[0]
|
sock = server_socket.accept()[0]
|
||||||
self.logger.info('Accepted client connection from {}'.format(sock.getpeername()))
|
self.logger.info('Accepted client connection from %s', sock.getpeername())
|
||||||
return sock.makefile('wb')
|
return sock.makefile('wb')
|
||||||
except socket.timeout:
|
except socket.timeout:
|
||||||
return
|
return
|
||||||
|
|
||||||
def streaming_thread(self, camera: Camera, stream_format: str, duration: Optional[float] = None):
|
def streaming_thread(
|
||||||
|
self, camera: Camera, stream_format: str, duration: Optional[float] = None
|
||||||
|
):
|
||||||
streaming_started_time = time.time()
|
streaming_started_time = time.time()
|
||||||
server_socket = self._prepare_server_socket(camera)
|
server_socket = self._prepare_server_socket(camera)
|
||||||
sock = None
|
sock = None
|
||||||
self.logger.info('Starting streaming on port {}'.format(camera.info.listen_port))
|
self.logger.info('Starting streaming on port %s', camera.info.listen_port)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
while camera.stream_event.is_set():
|
while camera.stream_event.is_set():
|
||||||
|
@ -571,36 +728,43 @@ class CameraPlugin(Plugin, ABC):
|
||||||
continue
|
continue
|
||||||
|
|
||||||
if camera.info.device not in self._devices:
|
if camera.info.device not in self._devices:
|
||||||
info = camera.info.to_dict()
|
info = asdict(camera.info)
|
||||||
info['stream_format'] = stream_format
|
info['stream_format'] = stream_format
|
||||||
camera = self.open_device(stream=True, **info)
|
camera = self.open_device(stream=True, **info)
|
||||||
|
|
||||||
|
assert camera.stream, 'No camera stream available'
|
||||||
camera.stream.sock = sock
|
camera.stream.sock = sock
|
||||||
self.start_camera(camera, duration=duration, frames_dir=None, image_file=None)
|
self.start_camera(
|
||||||
|
camera, duration=duration, frames_dir=None, image_file=None
|
||||||
|
)
|
||||||
finally:
|
finally:
|
||||||
self._cleanup_stream(camera, server_socket, sock)
|
self._cleanup_stream(camera, server_socket, sock)
|
||||||
self.logger.info('Stopped camera stream')
|
self.logger.info('Stopped camera stream')
|
||||||
|
|
||||||
def _cleanup_stream(self, camera: Camera, server_socket: socket.socket, client: IO):
|
def _cleanup_stream(
|
||||||
|
self, camera: Camera, server_socket: socket.socket, client: Optional[IO]
|
||||||
|
):
|
||||||
if client:
|
if client:
|
||||||
try:
|
try:
|
||||||
client.close()
|
client.close()
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
self.logger.warning('Error on client socket close: {}'.format(str(e)))
|
self.logger.warning('Error on client socket close: %s', e)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
server_socket.close()
|
server_socket.close()
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
self.logger.warning('Error on server socket close: {}'.format(str(e)))
|
self.logger.warning('Error on server socket close: %s', e)
|
||||||
|
|
||||||
if camera.stream:
|
if camera.stream:
|
||||||
try:
|
try:
|
||||||
camera.stream.close()
|
camera.stream.close()
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
self.logger.warning('Error while closing the encoding stream: {}'.format(str(e)))
|
self.logger.warning('Error while closing the encoding stream: %s', e)
|
||||||
|
|
||||||
@action
|
@action
|
||||||
def start_streaming(self, duration: Optional[float] = None, stream_format: str = 'mkv', **camera) -> dict:
|
def start_streaming(
|
||||||
|
self, duration: Optional[float] = None, stream_format: str = 'mkv', **camera
|
||||||
|
) -> dict:
|
||||||
"""
|
"""
|
||||||
Expose the video stream of a camera over a TCP connection.
|
Expose the video stream of a camera over a TCP connection.
|
||||||
|
|
||||||
|
@ -610,18 +774,28 @@ class CameraPlugin(Plugin, ABC):
|
||||||
:return: The status of the device.
|
:return: The status of the device.
|
||||||
"""
|
"""
|
||||||
camera = self.open_device(stream=True, stream_format=stream_format, **camera)
|
camera = self.open_device(stream=True, stream_format=stream_format, **camera)
|
||||||
return self._start_streaming(camera, duration, stream_format)
|
return self._start_streaming(camera, duration, stream_format) # type: ignore
|
||||||
|
|
||||||
def _start_streaming(self, camera: Camera, duration: Optional[float], stream_format: str):
|
def _start_streaming(
|
||||||
|
self, camera: Camera, duration: Optional[float], stream_format: str
|
||||||
|
):
|
||||||
assert camera.info.listen_port, 'No listen_port specified/configured'
|
assert camera.info.listen_port, 'No listen_port specified/configured'
|
||||||
assert not camera.stream_event.is_set() and camera.info.device not in self._streams, \
|
assert (
|
||||||
'A streaming session is already running for device {}'.format(camera.info.device)
|
not camera.stream_event.is_set() and camera.info.device not in self._streams
|
||||||
|
), f'A streaming session is already running for device {camera.info.device}'
|
||||||
|
assert camera.info.device, 'No device name available'
|
||||||
|
|
||||||
self._streams[camera.info.device] = camera
|
self._streams[camera.info.device] = camera
|
||||||
camera.stream_event.set()
|
camera.stream_event.set()
|
||||||
|
|
||||||
camera.stream_thread = threading.Thread(target=self.streaming_thread, kwargs=dict(
|
camera.stream_thread = threading.Thread(
|
||||||
camera=camera, duration=duration, stream_format=stream_format))
|
target=self.streaming_thread,
|
||||||
|
kwargs={
|
||||||
|
'camera': camera,
|
||||||
|
'duration': duration,
|
||||||
|
'stream_format': stream_format,
|
||||||
|
},
|
||||||
|
)
|
||||||
camera.stream_thread.start()
|
camera.stream_thread.start()
|
||||||
return self.status(camera.info.device)
|
return self.status(camera.info.device)
|
||||||
|
|
||||||
|
@ -633,12 +807,12 @@ class CameraPlugin(Plugin, ABC):
|
||||||
:param device: Name/path/ID of the device to stop (default: all the active devices).
|
:param device: Name/path/ID of the device to stop (default: all the active devices).
|
||||||
"""
|
"""
|
||||||
streams = self._streams.copy()
|
streams = self._streams.copy()
|
||||||
stop_devices = list(streams.values())[:]
|
stop_devices = list(streams.values())
|
||||||
if device:
|
if device:
|
||||||
stop_devices = [self._streams[device]] if device in self._streams else []
|
stop_devices = [self._streams[device]] if device in self._streams else []
|
||||||
|
|
||||||
for device in stop_devices:
|
for dev in stop_devices:
|
||||||
self._stop_streaming(device)
|
self._stop_streaming(dev)
|
||||||
|
|
||||||
def _stop_streaming(self, camera: Camera):
|
def _stop_streaming(self, camera: Camera):
|
||||||
camera.stream_event.clear()
|
camera.stream_event.clear()
|
||||||
|
@ -654,11 +828,18 @@ class CameraPlugin(Plugin, ABC):
|
||||||
return {}
|
return {}
|
||||||
|
|
||||||
return {
|
return {
|
||||||
**camera.info.to_dict(),
|
**asdict(camera.info),
|
||||||
'active': True if camera.capture_thread and camera.capture_thread.is_alive() else False,
|
'active': bool(camera.capture_thread and camera.capture_thread.is_alive()),
|
||||||
'capturing': True if camera.capture_thread and camera.capture_thread.is_alive() and
|
'capturing': bool(
|
||||||
camera.start_event.is_set() else False,
|
camera.capture_thread
|
||||||
'streaming': camera.stream_thread and camera.stream_thread.is_alive() and camera.stream_event.is_set(),
|
and camera.capture_thread.is_alive()
|
||||||
|
and camera.start_event.is_set()
|
||||||
|
),
|
||||||
|
'streaming': (
|
||||||
|
camera.stream_thread
|
||||||
|
and camera.stream_thread.is_alive()
|
||||||
|
and camera.stream_event.is_set()
|
||||||
|
),
|
||||||
}
|
}
|
||||||
|
|
||||||
@action
|
@action
|
||||||
|
@ -670,10 +851,7 @@ class CameraPlugin(Plugin, ABC):
|
||||||
if device:
|
if device:
|
||||||
return self._status(device)
|
return self._status(device)
|
||||||
|
|
||||||
return {
|
return {id: self._status(id) for id in self._devices}
|
||||||
id: self._status(device)
|
|
||||||
for id, camera in self._devices.items()
|
|
||||||
}
|
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def transform_frame(frame, color_transform):
|
def transform_frame(frame, color_transform):
|
||||||
|
@ -690,6 +868,7 @@ class CameraPlugin(Plugin, ABC):
|
||||||
:param frame: Image frame (default: a ``PIL.Image`` object).
|
:param frame: Image frame (default: a ``PIL.Image`` object).
|
||||||
"""
|
"""
|
||||||
from PIL import ImageOps
|
from PIL import ImageOps
|
||||||
|
|
||||||
return ImageOps.grayscale(frame)
|
return ImageOps.grayscale(frame)
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
|
@ -724,7 +903,9 @@ class CameraPlugin(Plugin, ABC):
|
||||||
return frame
|
return frame
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def scale_frame(frame, scale_x: Optional[float] = None, scale_y: Optional[float] = None):
|
def scale_frame(
|
||||||
|
frame, scale_x: Optional[float] = None, scale_y: Optional[float] = None
|
||||||
|
):
|
||||||
"""
|
"""
|
||||||
Frame scaling logic. The default implementation assumes that frame is a ``PIL.Image`` object.
|
Frame scaling logic. The default implementation assumes that frame is a ``PIL.Image`` object.
|
||||||
|
|
||||||
|
@ -733,6 +914,7 @@ class CameraPlugin(Plugin, ABC):
|
||||||
:param scale_y: Y-scale factor.
|
:param scale_y: Y-scale factor.
|
||||||
"""
|
"""
|
||||||
from PIL import Image
|
from PIL import Image
|
||||||
|
|
||||||
if not (scale_x and scale_y) or (scale_x == 1 and scale_y == 1):
|
if not (scale_x and scale_y) or (scale_x == 1 and scale_y == 1):
|
||||||
return frame
|
return frame
|
||||||
|
|
||||||
|
|
|
@ -1,11 +1,15 @@
|
||||||
import math
|
import math
|
||||||
import threading
|
import threading
|
||||||
from dataclasses import dataclass
|
from dataclasses import asdict, dataclass
|
||||||
from typing import Optional, Union, Tuple, Set
|
from typing import Optional, Union, Tuple, Set
|
||||||
|
|
||||||
import numpy as np
|
import numpy as np
|
||||||
|
|
||||||
from platypush.plugins.camera.model.writer import StreamWriter, VideoWriter, FileVideoWriter
|
from platypush.plugins.camera.model.writer import (
|
||||||
|
StreamWriter,
|
||||||
|
VideoWriter,
|
||||||
|
FileVideoWriter,
|
||||||
|
)
|
||||||
from platypush.plugins.camera.model.writer.preview import PreviewWriter
|
from platypush.plugins.camera.model.writer.preview import PreviewWriter
|
||||||
|
|
||||||
|
|
||||||
|
@ -13,8 +17,8 @@ from platypush.plugins.camera.model.writer.preview import PreviewWriter
|
||||||
class CameraInfo:
|
class CameraInfo:
|
||||||
device: Optional[Union[int, str]]
|
device: Optional[Union[int, str]]
|
||||||
bind_address: Optional[str] = None
|
bind_address: Optional[str] = None
|
||||||
capture_timeout: float = 20.0
|
capture_timeout: float = 0
|
||||||
color_transform: Optional[str] = None
|
color_transform: Optional[Union[int, str]] = None
|
||||||
ffmpeg_bin: Optional[str] = None
|
ffmpeg_bin: Optional[str] = None
|
||||||
fps: Optional[float] = None
|
fps: Optional[float] = None
|
||||||
frames_dir: Optional[str] = None
|
frames_dir: Optional[str] = None
|
||||||
|
@ -32,42 +36,15 @@ class CameraInfo:
|
||||||
stream_format: Optional[str] = None
|
stream_format: Optional[str] = None
|
||||||
vertical_flip: bool = False
|
vertical_flip: bool = False
|
||||||
warmup_frames: int = 0
|
warmup_frames: int = 0
|
||||||
warmup_seconds: float = 0.
|
warmup_seconds: float = 0
|
||||||
|
|
||||||
def set(self, **kwargs):
|
def set(self, **kwargs):
|
||||||
for k, v in kwargs.items():
|
for k, v in kwargs.items():
|
||||||
if hasattr(self, k):
|
if hasattr(self, k):
|
||||||
setattr(self, k, v)
|
setattr(self, k, v)
|
||||||
|
|
||||||
def to_dict(self) -> dict:
|
|
||||||
return {
|
|
||||||
'bind_address': self.bind_address,
|
|
||||||
'capture_timeout': self.capture_timeout,
|
|
||||||
'color_transform': self.color_transform,
|
|
||||||
'device': self.device,
|
|
||||||
'ffmpeg_bin': self.ffmpeg_bin,
|
|
||||||
'fps': self.fps,
|
|
||||||
'frames_dir': self.frames_dir,
|
|
||||||
'grayscale': self.grayscale,
|
|
||||||
'horizontal_flip': self.horizontal_flip,
|
|
||||||
'input_codec': self.input_codec,
|
|
||||||
'input_format': self.input_format,
|
|
||||||
'listen_port': self.listen_port,
|
|
||||||
'output_codec': self.output_codec,
|
|
||||||
'output_format': self.output_format,
|
|
||||||
'resolution': list(self.resolution or ()),
|
|
||||||
'rotate': self.rotate,
|
|
||||||
'scale_x': self.scale_x,
|
|
||||||
'scale_y': self.scale_y,
|
|
||||||
'stream_format': self.stream_format,
|
|
||||||
'vertical_flip': self.vertical_flip,
|
|
||||||
'warmup_frames': self.warmup_frames,
|
|
||||||
'warmup_seconds': self.warmup_seconds,
|
|
||||||
}
|
|
||||||
|
|
||||||
def clone(self):
|
def clone(self):
|
||||||
# noinspection PyArgumentList
|
return self.__class__(**asdict(self))
|
||||||
return self.__class__(**self.to_dict())
|
|
||||||
|
|
||||||
|
|
||||||
@dataclass
|
@dataclass
|
||||||
|
@ -97,10 +74,15 @@ class Camera:
|
||||||
return writers
|
return writers
|
||||||
|
|
||||||
def effective_resolution(self) -> Tuple[int, int]:
|
def effective_resolution(self) -> Tuple[int, int]:
|
||||||
|
"""
|
||||||
|
Calculates the effective resolution of the camera in pixels, taking
|
||||||
|
into account the base resolution, the scale and the rotation.
|
||||||
|
"""
|
||||||
|
assert self.info.resolution, 'No base resolution specified'
|
||||||
rot = (self.info.rotate or 0) * math.pi / 180
|
rot = (self.info.rotate or 0) * math.pi / 180
|
||||||
sin = math.sin(rot)
|
sin = math.sin(rot)
|
||||||
cos = math.cos(rot)
|
cos = math.cos(rot)
|
||||||
scale = np.array([[self.info.scale_x or 1., self.info.scale_y or 1.]])
|
scale = np.array([[self.info.scale_x or 1.0, self.info.scale_y or 1.0]])
|
||||||
resolution = np.array([[self.info.resolution[0], self.info.resolution[1]]])
|
resolution = np.array([[self.info.resolution[0], self.info.resolution[1]]])
|
||||||
rot_matrix = np.array([[sin, cos], [cos, sin]])
|
rot_matrix = np.array([[sin, cos], [cos, sin]])
|
||||||
resolution = (scale * abs(np.cross(rot_matrix, resolution)))[0]
|
resolution = (scale * abs(np.cross(rot_matrix, resolution)))[0]
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
import io
|
import io
|
||||||
import logging
|
import logging
|
||||||
|
import multiprocessing
|
||||||
import os
|
import os
|
||||||
import threading
|
|
||||||
import time
|
import time
|
||||||
|
|
||||||
from abc import ABC, abstractmethod
|
from abc import ABC, abstractmethod
|
||||||
|
@ -9,6 +9,8 @@ from typing import Optional, IO
|
||||||
|
|
||||||
from PIL.Image import Image
|
from PIL.Image import Image
|
||||||
|
|
||||||
|
from platypush.utils import get_redis
|
||||||
|
|
||||||
|
|
||||||
class VideoWriter(ABC):
|
class VideoWriter(ABC):
|
||||||
"""
|
"""
|
||||||
|
@ -26,11 +28,11 @@ class VideoWriter(ABC):
|
||||||
self.closed = False
|
self.closed = False
|
||||||
|
|
||||||
@abstractmethod
|
@abstractmethod
|
||||||
def write(self, img: Image):
|
def write(self, image: Image):
|
||||||
"""
|
"""
|
||||||
Write an image to the channel.
|
Write an image to the channel.
|
||||||
|
|
||||||
:param img: PIL Image instance.
|
:param image: PIL Image instance.
|
||||||
"""
|
"""
|
||||||
raise NotImplementedError()
|
raise NotImplementedError()
|
||||||
|
|
||||||
|
@ -49,7 +51,7 @@ class VideoWriter(ABC):
|
||||||
"""
|
"""
|
||||||
return self
|
return self
|
||||||
|
|
||||||
def __exit__(self, exc_type, exc_val, exc_tb):
|
def __exit__(self, *_, **__):
|
||||||
"""
|
"""
|
||||||
Context manager-based interface.
|
Context manager-based interface.
|
||||||
"""
|
"""
|
||||||
|
@ -60,8 +62,9 @@ class FileVideoWriter(VideoWriter, ABC):
|
||||||
"""
|
"""
|
||||||
Abstract class to handle frames-to-video file operations.
|
Abstract class to handle frames-to-video file operations.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, *args, output_file: str, **kwargs):
|
def __init__(self, *args, output_file: str, **kwargs):
|
||||||
VideoWriter.__init__(self, *args, **kwargs)
|
super().__init__(self, *args, **kwargs)
|
||||||
self.output_file = os.path.abspath(os.path.expanduser(output_file))
|
self.output_file = os.path.abspath(os.path.expanduser(output_file))
|
||||||
|
|
||||||
|
|
||||||
|
@ -69,12 +72,20 @@ class StreamWriter(VideoWriter, ABC):
|
||||||
"""
|
"""
|
||||||
Abstract class for camera streaming operations.
|
Abstract class for camera streaming operations.
|
||||||
"""
|
"""
|
||||||
def __init__(self, *args, sock: Optional[IO] = None, **kwargs):
|
|
||||||
VideoWriter.__init__(self, *args, **kwargs)
|
def __init__(
|
||||||
|
self,
|
||||||
|
*args,
|
||||||
|
sock: Optional[IO] = None,
|
||||||
|
redis_queue: Optional[str] = None,
|
||||||
|
**kwargs,
|
||||||
|
):
|
||||||
|
super().__init__(*args, **kwargs)
|
||||||
self.frame: Optional[bytes] = None
|
self.frame: Optional[bytes] = None
|
||||||
self.frame_time: Optional[float] = None
|
self.frame_time: Optional[float] = None
|
||||||
self.buffer = io.BytesIO()
|
self.buffer = io.BytesIO()
|
||||||
self.ready = threading.Condition()
|
self.ready = multiprocessing.Condition()
|
||||||
|
self.redis_queue = redis_queue
|
||||||
self.sock = sock
|
self.sock = sock
|
||||||
|
|
||||||
def write(self, image: Image):
|
def write(self, image: Image):
|
||||||
|
@ -101,6 +112,9 @@ class StreamWriter(VideoWriter, ABC):
|
||||||
self.logger.info('Client connection closed')
|
self.logger.info('Client connection closed')
|
||||||
self.close()
|
self.close()
|
||||||
|
|
||||||
|
if self.redis_queue:
|
||||||
|
get_redis().publish(self.redis_queue, data)
|
||||||
|
|
||||||
@abstractmethod
|
@abstractmethod
|
||||||
def encode(self, image: Image) -> bytes:
|
def encode(self, image: Image) -> bytes:
|
||||||
"""
|
"""
|
||||||
|
@ -117,16 +131,20 @@ class StreamWriter(VideoWriter, ABC):
|
||||||
try:
|
try:
|
||||||
self.sock.close()
|
self.sock.close()
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
self.logger.warning('Could not close camera resource: {}'.format(str(e)))
|
self.logger.warning('Could not close camera resource: %s', e)
|
||||||
|
|
||||||
super().close()
|
super().close()
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def get_class_by_name(name: str):
|
def get_class_by_name(name: str):
|
||||||
from platypush.plugins.camera.model.writer.index import StreamHandlers
|
from platypush.plugins.camera.model.writer.index import StreamHandlers
|
||||||
|
|
||||||
name = name.upper()
|
name = name.upper()
|
||||||
assert hasattr(StreamHandlers, name), 'No such stream handler: {}. Supported types: {}'.format(
|
assert hasattr(
|
||||||
name, [hndl.name for hndl in list(StreamHandlers)])
|
StreamHandlers, name
|
||||||
|
), f'No such stream handler: {name}. Supported types: ' + (
|
||||||
|
', '.join([hndl.name for hndl in list(StreamHandlers)])
|
||||||
|
)
|
||||||
|
|
||||||
return getattr(StreamHandlers, name).value
|
return getattr(StreamHandlers, name).value
|
||||||
|
|
||||||
|
|
|
@ -8,7 +8,11 @@ from typing import Optional, Tuple
|
||||||
from PIL.Image import Image
|
from PIL.Image import Image
|
||||||
|
|
||||||
from platypush.plugins.camera.model.camera import Camera
|
from platypush.plugins.camera.model.camera import Camera
|
||||||
from platypush.plugins.camera.model.writer import VideoWriter, FileVideoWriter, StreamWriter
|
from platypush.plugins.camera.model.writer import (
|
||||||
|
VideoWriter,
|
||||||
|
FileVideoWriter,
|
||||||
|
StreamWriter,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
class FFmpegWriter(VideoWriter, ABC):
|
class FFmpegWriter(VideoWriter, ABC):
|
||||||
|
@ -16,9 +20,17 @@ class FFmpegWriter(VideoWriter, ABC):
|
||||||
Generic FFmpeg encoder for camera frames.
|
Generic FFmpeg encoder for camera frames.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, *args, input_file: str = '-', input_format: str = 'rawvideo', output_file: str = '-',
|
def __init__(
|
||||||
output_format: Optional[str] = None, pix_fmt: Optional[str] = None,
|
self,
|
||||||
output_opts: Optional[Tuple] = None, **kwargs):
|
*args,
|
||||||
|
input_file: str = '-',
|
||||||
|
input_format: str = 'rawvideo',
|
||||||
|
output_file: str = '-',
|
||||||
|
output_format: Optional[str] = None,
|
||||||
|
pix_fmt: Optional[str] = None,
|
||||||
|
output_opts: Optional[Tuple] = None,
|
||||||
|
**kwargs,
|
||||||
|
):
|
||||||
super().__init__(*args, **kwargs)
|
super().__init__(*args, **kwargs)
|
||||||
|
|
||||||
self.input_file = input_file
|
self.input_file = input_file
|
||||||
|
@ -29,21 +41,34 @@ class FFmpegWriter(VideoWriter, ABC):
|
||||||
self.pix_fmt = pix_fmt
|
self.pix_fmt = pix_fmt
|
||||||
self.output_opts = output_opts or ()
|
self.output_opts = output_opts or ()
|
||||||
|
|
||||||
self.logger.info('Starting FFmpeg. Command: {}'.format(' '.join(self.ffmpeg_args)))
|
self.logger.info('Starting FFmpeg. Command: ' + ' '.join(self.ffmpeg_args))
|
||||||
self.ffmpeg = subprocess.Popen(self.ffmpeg_args, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
|
self.ffmpeg = subprocess.Popen(
|
||||||
|
self.ffmpeg_args, stdin=subprocess.PIPE, stdout=subprocess.PIPE
|
||||||
|
)
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def ffmpeg_args(self):
|
def ffmpeg_args(self):
|
||||||
return [self.camera.info.ffmpeg_bin, '-y',
|
return [
|
||||||
'-f', self.input_format,
|
self.camera.info.ffmpeg_bin,
|
||||||
*(('-pix_fmt', self.pix_fmt) if self.pix_fmt else ()),
|
'-y',
|
||||||
'-s', '{}x{}'.format(self.width, self.height),
|
'-f',
|
||||||
'-r', str(self.camera.info.fps),
|
self.input_format,
|
||||||
'-i', self.input_file,
|
*(('-pix_fmt', self.pix_fmt) if self.pix_fmt else ()),
|
||||||
*(('-f', self.output_format) if self.output_format else ()),
|
'-s',
|
||||||
*self.output_opts,
|
f'{self.width}x{self.height}',
|
||||||
*(('-vcodec', self.camera.info.output_codec) if self.camera.info.output_codec else ()),
|
'-r',
|
||||||
self.output_file]
|
str(self.camera.info.fps),
|
||||||
|
'-i',
|
||||||
|
self.input_file,
|
||||||
|
*(('-f', self.output_format) if self.output_format else ()),
|
||||||
|
*self.output_opts,
|
||||||
|
*(
|
||||||
|
('-vcodec', self.camera.info.output_codec)
|
||||||
|
if self.camera.info.output_codec
|
||||||
|
else ()
|
||||||
|
),
|
||||||
|
self.output_file,
|
||||||
|
]
|
||||||
|
|
||||||
def is_closed(self):
|
def is_closed(self):
|
||||||
return self.closed or not self.ffmpeg or self.ffmpeg.poll() is not None
|
return self.closed or not self.ffmpeg or self.ffmpeg.poll() is not None
|
||||||
|
@ -55,7 +80,7 @@ class FFmpegWriter(VideoWriter, ABC):
|
||||||
try:
|
try:
|
||||||
self.ffmpeg.stdin.write(image.convert('RGB').tobytes())
|
self.ffmpeg.stdin.write(image.convert('RGB').tobytes())
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
self.logger.warning('FFmpeg send error: {}'.format(str(e)))
|
self.logger.warning('FFmpeg send error: %s', e)
|
||||||
self.close()
|
self.close()
|
||||||
|
|
||||||
def close(self):
|
def close(self):
|
||||||
|
@ -63,7 +88,7 @@ class FFmpegWriter(VideoWriter, ABC):
|
||||||
if self.ffmpeg and self.ffmpeg.stdin:
|
if self.ffmpeg and self.ffmpeg.stdin:
|
||||||
try:
|
try:
|
||||||
self.ffmpeg.stdin.close()
|
self.ffmpeg.stdin.close()
|
||||||
except (IOError, OSError):
|
except OSError:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
if self.ffmpeg:
|
if self.ffmpeg:
|
||||||
|
@ -77,7 +102,7 @@ class FFmpegWriter(VideoWriter, ABC):
|
||||||
if self.ffmpeg and self.ffmpeg.stdout:
|
if self.ffmpeg and self.ffmpeg.stdout:
|
||||||
try:
|
try:
|
||||||
self.ffmpeg.stdout.close()
|
self.ffmpeg.stdout.close()
|
||||||
except (IOError, OSError):
|
except OSError:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
self.ffmpeg = None
|
self.ffmpeg = None
|
||||||
|
@ -98,10 +123,26 @@ class FFmpegStreamWriter(StreamWriter, FFmpegWriter, ABC):
|
||||||
Stream camera frames using FFmpeg.
|
Stream camera frames using FFmpeg.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, *args, output_format: str, output_opts: Optional[Tuple] = None, **kwargs):
|
def __init__(
|
||||||
super().__init__(*args, pix_fmt='rgb24', output_format=output_format, output_opts=output_opts or (
|
self, *args, output_format: str, output_opts: Optional[Tuple] = None, **kwargs
|
||||||
'-tune', 'zerolatency', '-preset', 'superfast', '-trellis', '0',
|
):
|
||||||
'-fflags', 'nobuffer'), **kwargs)
|
super().__init__(
|
||||||
|
*args,
|
||||||
|
pix_fmt='rgb24',
|
||||||
|
output_format=output_format,
|
||||||
|
output_opts=output_opts
|
||||||
|
or (
|
||||||
|
'-tune',
|
||||||
|
'zerolatency',
|
||||||
|
'-preset',
|
||||||
|
'superfast',
|
||||||
|
'-trellis',
|
||||||
|
'0',
|
||||||
|
'-fflags',
|
||||||
|
'nobuffer',
|
||||||
|
),
|
||||||
|
**kwargs,
|
||||||
|
)
|
||||||
self._reader = threading.Thread(target=self._reader_thread)
|
self._reader = threading.Thread(target=self._reader_thread)
|
||||||
self._reader.start()
|
self._reader.start()
|
||||||
|
|
||||||
|
@ -115,7 +156,7 @@ class FFmpegStreamWriter(StreamWriter, FFmpegWriter, ABC):
|
||||||
try:
|
try:
|
||||||
data = self.ffmpeg.stdout.read(1 << 15)
|
data = self.ffmpeg.stdout.read(1 << 15)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
self.logger.warning('FFmpeg reader error: {}'.format(str(e)))
|
self.logger.warning('FFmpeg reader error: %s', e)
|
||||||
break
|
break
|
||||||
|
|
||||||
if not data:
|
if not data:
|
||||||
|
@ -123,7 +164,7 @@ class FFmpegStreamWriter(StreamWriter, FFmpegWriter, ABC):
|
||||||
|
|
||||||
if self.frame is None:
|
if self.frame is None:
|
||||||
latency = time.time() - start_time
|
latency = time.time() - start_time
|
||||||
self.logger.info('FFmpeg stream latency: {} secs'.format(latency))
|
self.logger.info('FFmpeg stream latency: %d secs', latency)
|
||||||
|
|
||||||
with self.ready:
|
with self.ready:
|
||||||
self.frame = data
|
self.frame = data
|
||||||
|
@ -140,12 +181,16 @@ class FFmpegStreamWriter(StreamWriter, FFmpegWriter, ABC):
|
||||||
try:
|
try:
|
||||||
self.ffmpeg.stdin.write(data)
|
self.ffmpeg.stdin.write(data)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
self.logger.warning('FFmpeg send error: {}'.format(str(e)))
|
self.logger.warning('FFmpeg send error: %s', e)
|
||||||
self.close()
|
self.close()
|
||||||
|
|
||||||
def close(self):
|
def close(self):
|
||||||
super().close()
|
super().close()
|
||||||
if self._reader and self._reader.is_alive() and threading.get_ident() != self._reader.ident:
|
if (
|
||||||
|
self._reader
|
||||||
|
and self._reader.is_alive()
|
||||||
|
and threading.get_ident() != self._reader.ident
|
||||||
|
):
|
||||||
self._reader.join(timeout=5.0)
|
self._reader.join(timeout=5.0)
|
||||||
self._reader = None
|
self._reader = None
|
||||||
|
|
||||||
|
|
File diff suppressed because it is too large
Load diff
10
platypush/plugins/sound/_converters/__init__.py
Normal file
10
platypush/plugins/sound/_converters/__init__.py
Normal file
|
@ -0,0 +1,10 @@
|
||||||
|
from ._base import AudioConverter
|
||||||
|
from ._from_raw import RawInputAudioConverter
|
||||||
|
from ._to_raw import RawOutputAudioConverter, RawOutputAudioFromFileConverter
|
||||||
|
|
||||||
|
__all__ = [
|
||||||
|
'AudioConverter',
|
||||||
|
'RawInputAudioConverter',
|
||||||
|
'RawOutputAudioConverter',
|
||||||
|
'RawOutputAudioFromFileConverter',
|
||||||
|
]
|
331
platypush/plugins/sound/_converters/_base.py
Normal file
331
platypush/plugins/sound/_converters/_base.py
Normal file
|
@ -0,0 +1,331 @@
|
||||||
|
from abc import ABC, abstractmethod
|
||||||
|
import asyncio
|
||||||
|
from asyncio.subprocess import PIPE
|
||||||
|
from logging import getLogger
|
||||||
|
from queue import Empty, Queue
|
||||||
|
from threading import Event, RLock, Thread
|
||||||
|
from typing import Any, Callable, Coroutine, Iterable, Optional, Self
|
||||||
|
|
||||||
|
from platypush.context import get_or_create_event_loop
|
||||||
|
|
||||||
|
_dtype_to_ffmpeg_format = {
|
||||||
|
'int8': 's8',
|
||||||
|
'uint8': 'u8',
|
||||||
|
'int16': 's16le',
|
||||||
|
'uint16': 'u16le',
|
||||||
|
'int32': 's32le',
|
||||||
|
'uint32': 'u32le',
|
||||||
|
'float32': 'f32le',
|
||||||
|
'float64': 'f64le',
|
||||||
|
}
|
||||||
|
"""
|
||||||
|
Supported raw types:
|
||||||
|
'int8', 'uint8', 'int16', 'uint16', 'int32', 'uint32', 'float32', 'float64'
|
||||||
|
"""
|
||||||
|
|
||||||
|
|
||||||
|
class AudioConverter(Thread, ABC):
|
||||||
|
"""
|
||||||
|
Base class for an ffmpeg audio converter instance.
|
||||||
|
"""
|
||||||
|
|
||||||
|
_format_to_ffmpeg_args = {
|
||||||
|
'wav': ('-f', 'wav'),
|
||||||
|
'ogg': ('-f', 'ogg'),
|
||||||
|
'mp3': ('-f', 'mp3'),
|
||||||
|
'aac': ('-f', 'adts'),
|
||||||
|
'flac': ('-f', 'flac'),
|
||||||
|
}
|
||||||
|
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
*args,
|
||||||
|
ffmpeg_bin: str,
|
||||||
|
sample_rate: int,
|
||||||
|
channels: int,
|
||||||
|
volume: float,
|
||||||
|
dtype: str,
|
||||||
|
chunk_size: int,
|
||||||
|
format: Optional[str] = None, # pylint: disable=redefined-builtin
|
||||||
|
on_exit: Optional[Callable[[], Any]] = None,
|
||||||
|
**kwargs,
|
||||||
|
):
|
||||||
|
"""
|
||||||
|
:param ffmpeg_bin: Path to the ffmpeg binary.
|
||||||
|
:param sample_rate: The sample rate of the input/output audio.
|
||||||
|
:param channels: The number of channels of the input/output audio.
|
||||||
|
:param volume: Audio volume, as a percentage between 0 and 100.
|
||||||
|
:param dtype: The (numpy) data type of the raw input/output audio.
|
||||||
|
:param chunk_size: Number of bytes that will be read at once from the
|
||||||
|
ffmpeg process.
|
||||||
|
:param format: Input/output audio format.
|
||||||
|
:param on_exit: Function to call when the ffmpeg process exits.
|
||||||
|
"""
|
||||||
|
super().__init__(*args, **kwargs)
|
||||||
|
|
||||||
|
ffmpeg_format = _dtype_to_ffmpeg_format.get(dtype)
|
||||||
|
assert ffmpeg_format, (
|
||||||
|
f'Unsupported data type: {dtype}. Supported data types: '
|
||||||
|
f'{list(_dtype_to_ffmpeg_format.keys())}'
|
||||||
|
)
|
||||||
|
|
||||||
|
self._ffmpeg_bin = ffmpeg_bin
|
||||||
|
self._ffmpeg_format = ffmpeg_format
|
||||||
|
self._ffmpeg_task: Optional[Coroutine] = None
|
||||||
|
self._sample_rate = sample_rate
|
||||||
|
self._channels = channels
|
||||||
|
self._chunk_size = chunk_size
|
||||||
|
self._format = format
|
||||||
|
self._closed = False
|
||||||
|
self._out_queue = Queue()
|
||||||
|
self.ffmpeg = None
|
||||||
|
self.volume = volume
|
||||||
|
self.logger = getLogger(__name__)
|
||||||
|
self._loop = None
|
||||||
|
self._should_stop = Event()
|
||||||
|
self._stop_lock = RLock()
|
||||||
|
self._on_exit = on_exit
|
||||||
|
self._ffmpeg_terminated = Event()
|
||||||
|
|
||||||
|
def __enter__(self) -> Self:
|
||||||
|
"""
|
||||||
|
Audio converter context manager.
|
||||||
|
|
||||||
|
It starts and registers the ffmpeg converter process.
|
||||||
|
"""
|
||||||
|
self.start()
|
||||||
|
return self
|
||||||
|
|
||||||
|
def __exit__(self, *_, **__):
|
||||||
|
"""
|
||||||
|
Audio converter context manager.
|
||||||
|
|
||||||
|
It stops and unregisters the ffmpeg converter process.
|
||||||
|
"""
|
||||||
|
self.stop()
|
||||||
|
|
||||||
|
def _check_ffmpeg(self):
|
||||||
|
assert not self.terminated, 'The ffmpeg process has already terminated'
|
||||||
|
|
||||||
|
@property
|
||||||
|
def gain(self) -> float:
|
||||||
|
return self.volume / 100
|
||||||
|
|
||||||
|
@property
|
||||||
|
def terminated(self) -> bool:
|
||||||
|
return self._ffmpeg_terminated.is_set()
|
||||||
|
|
||||||
|
@property
|
||||||
|
def _default_args(self) -> Iterable[str]:
|
||||||
|
"""
|
||||||
|
Set of arguments common to all ffmpeg converter instances.
|
||||||
|
"""
|
||||||
|
return ('-hide_banner', '-loglevel', 'warning', '-y')
|
||||||
|
|
||||||
|
@property
|
||||||
|
@abstractmethod
|
||||||
|
def _input_format_args(self) -> Iterable[str]:
|
||||||
|
"""
|
||||||
|
Ffmpeg audio input arguments.
|
||||||
|
"""
|
||||||
|
raise NotImplementedError()
|
||||||
|
|
||||||
|
@property
|
||||||
|
@abstractmethod
|
||||||
|
def _output_format_args(self):
|
||||||
|
"""
|
||||||
|
Ffmpeg audio output arguments.
|
||||||
|
"""
|
||||||
|
raise NotImplementedError()
|
||||||
|
|
||||||
|
@property
|
||||||
|
def _channel_layout_args(self) -> Iterable[str]:
|
||||||
|
"""
|
||||||
|
Set of extra ffmpeg arguments for the channel layout.
|
||||||
|
"""
|
||||||
|
args = ('-ac', str(self._channels))
|
||||||
|
if self._channels == 1:
|
||||||
|
return args + ('-channel_layout', 'mono')
|
||||||
|
if self._channels == 2:
|
||||||
|
return args + ('-channel_layout', 'stereo')
|
||||||
|
return args
|
||||||
|
|
||||||
|
@property
|
||||||
|
def _raw_ffmpeg_args(self) -> Iterable[str]:
|
||||||
|
"""
|
||||||
|
Ffmpeg arguments for raw audio input/output given the current
|
||||||
|
configuration.
|
||||||
|
"""
|
||||||
|
return (
|
||||||
|
'-f',
|
||||||
|
self._ffmpeg_format,
|
||||||
|
'-ar',
|
||||||
|
str(self._sample_rate),
|
||||||
|
*self._channel_layout_args,
|
||||||
|
)
|
||||||
|
|
||||||
|
@property
|
||||||
|
def _audio_volume_args(self) -> Iterable[str]:
|
||||||
|
"""
|
||||||
|
Ffmpeg audio volume arguments.
|
||||||
|
"""
|
||||||
|
return ('-filter:a', f'volume={self.gain}')
|
||||||
|
|
||||||
|
@property
|
||||||
|
def _input_source_args(self) -> Iterable[str]:
|
||||||
|
"""
|
||||||
|
Default arguments for the ffmpeg input source (default: ``-i pipe:``,
|
||||||
|
ffmpeg will read from a pipe filled by the application).
|
||||||
|
"""
|
||||||
|
return ('-i', 'pipe:')
|
||||||
|
|
||||||
|
@property
|
||||||
|
def _output_target_args(self) -> Iterable[str]:
|
||||||
|
"""
|
||||||
|
Default arguments for the ffmpeg output target (default: ``pipe:``,
|
||||||
|
ffmpeg will write the output to a pipe read by the application).
|
||||||
|
"""
|
||||||
|
return ('pipe:',)
|
||||||
|
|
||||||
|
@property
|
||||||
|
def _converter_stdin(self) -> Optional[int]:
|
||||||
|
"""
|
||||||
|
Default stdin file descriptor to be used by the ffmpeg converter.
|
||||||
|
|
||||||
|
Default: ``PIPE``, as the ffmpeg process by default reads audio frames
|
||||||
|
from the stdin.
|
||||||
|
"""
|
||||||
|
return PIPE
|
||||||
|
|
||||||
|
@property
|
||||||
|
def _compressed_ffmpeg_args(self) -> Iterable[str]:
|
||||||
|
"""
|
||||||
|
Ffmpeg arguments for the compressed audio given the current
|
||||||
|
configuration.
|
||||||
|
"""
|
||||||
|
if not self._format:
|
||||||
|
return ()
|
||||||
|
|
||||||
|
ffmpeg_args = self._format_to_ffmpeg_args.get(self._format)
|
||||||
|
assert ffmpeg_args, (
|
||||||
|
f'Unsupported output format: {self._format}. Supported formats: '
|
||||||
|
f'{list(self._format_to_ffmpeg_args.keys())}'
|
||||||
|
)
|
||||||
|
|
||||||
|
return ffmpeg_args
|
||||||
|
|
||||||
|
async def _audio_proxy(self, timeout: Optional[float] = None):
|
||||||
|
"""
|
||||||
|
Proxy the converted audio stream to the output queue for downstream
|
||||||
|
consumption.
|
||||||
|
"""
|
||||||
|
ffmpeg_args = (
|
||||||
|
self._ffmpeg_bin,
|
||||||
|
*self._default_args,
|
||||||
|
*self._input_format_args,
|
||||||
|
*self._input_source_args,
|
||||||
|
*self._output_format_args,
|
||||||
|
*self._output_target_args,
|
||||||
|
)
|
||||||
|
|
||||||
|
self.ffmpeg = await asyncio.create_subprocess_exec(
|
||||||
|
*ffmpeg_args,
|
||||||
|
stdin=self._converter_stdin,
|
||||||
|
stdout=PIPE,
|
||||||
|
)
|
||||||
|
|
||||||
|
self.logger.info('Running ffmpeg: %s', ' '.join(ffmpeg_args))
|
||||||
|
|
||||||
|
try:
|
||||||
|
await asyncio.wait_for(self.ffmpeg.wait(), 0.1)
|
||||||
|
except asyncio.TimeoutError:
|
||||||
|
pass
|
||||||
|
|
||||||
|
while (
|
||||||
|
self._loop
|
||||||
|
and self.ffmpeg
|
||||||
|
and self.ffmpeg.returncode is None
|
||||||
|
and not self.should_stop
|
||||||
|
):
|
||||||
|
self._check_ffmpeg()
|
||||||
|
assert (
|
||||||
|
self.ffmpeg and self.ffmpeg.stdout
|
||||||
|
), 'The stdout is closed for the ffmpeg process'
|
||||||
|
|
||||||
|
self._ffmpeg_terminated.clear()
|
||||||
|
try:
|
||||||
|
data = await asyncio.wait_for(
|
||||||
|
self.ffmpeg.stdout.read(self._chunk_size), timeout
|
||||||
|
)
|
||||||
|
self._out_queue.put(data)
|
||||||
|
except asyncio.TimeoutError:
|
||||||
|
self._out_queue.put(b'')
|
||||||
|
|
||||||
|
def write(self, data: bytes):
|
||||||
|
"""
|
||||||
|
Write raw data to the ffmpeg process.
|
||||||
|
"""
|
||||||
|
self._check_ffmpeg()
|
||||||
|
assert (
|
||||||
|
self.ffmpeg and self._loop and self.ffmpeg.stdin
|
||||||
|
), 'The stdin is closed for the ffmpeg process'
|
||||||
|
|
||||||
|
self._loop.call_soon_threadsafe(self.ffmpeg.stdin.write, data)
|
||||||
|
|
||||||
|
def read(self, timeout: Optional[float] = None) -> Optional[bytes]:
|
||||||
|
"""
|
||||||
|
Read the next chunk of converted audio bytes from the converter queue.
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
return self._out_queue.get(timeout=timeout)
|
||||||
|
except Empty:
|
||||||
|
return None
|
||||||
|
|
||||||
|
def run(self):
|
||||||
|
"""
|
||||||
|
Main runner. It runs the audio proxy in a loop and cleans up everything
|
||||||
|
in case of stop/failure.
|
||||||
|
"""
|
||||||
|
super().run()
|
||||||
|
self._loop = get_or_create_event_loop()
|
||||||
|
try:
|
||||||
|
self._ffmpeg_task = self._audio_proxy(timeout=1)
|
||||||
|
self._loop.run_until_complete(self._ffmpeg_task)
|
||||||
|
except RuntimeError as e:
|
||||||
|
self.logger.warning(e)
|
||||||
|
finally:
|
||||||
|
self.stop()
|
||||||
|
|
||||||
|
def stop(self):
|
||||||
|
"""
|
||||||
|
Sets the stop event, kills the ffmpeg process and resets the context.
|
||||||
|
"""
|
||||||
|
with self._stop_lock:
|
||||||
|
self._should_stop.set()
|
||||||
|
if self._ffmpeg_task:
|
||||||
|
self._ffmpeg_task.close()
|
||||||
|
self._ffmpeg_task = None
|
||||||
|
|
||||||
|
try:
|
||||||
|
if self.ffmpeg and self.ffmpeg.returncode is None:
|
||||||
|
self.ffmpeg.kill()
|
||||||
|
except ProcessLookupError:
|
||||||
|
pass
|
||||||
|
|
||||||
|
self.ffmpeg = None
|
||||||
|
self._loop = None
|
||||||
|
|
||||||
|
self._ffmpeg_terminated.set()
|
||||||
|
|
||||||
|
if self._on_exit:
|
||||||
|
self._on_exit()
|
||||||
|
|
||||||
|
@property
|
||||||
|
def should_stop(self) -> bool:
|
||||||
|
"""
|
||||||
|
Proxy property for the ``_should_stop`` event.
|
||||||
|
"""
|
||||||
|
return self._should_stop.is_set()
|
||||||
|
|
||||||
|
|
||||||
|
# vim:sw=4:ts=4:et:
|
23
platypush/plugins/sound/_converters/_from_raw.py
Normal file
23
platypush/plugins/sound/_converters/_from_raw.py
Normal file
|
@ -0,0 +1,23 @@
|
||||||
|
from typing import Iterable
|
||||||
|
from typing_extensions import override
|
||||||
|
|
||||||
|
from ._base import AudioConverter
|
||||||
|
|
||||||
|
|
||||||
|
class RawInputAudioConverter(AudioConverter):
|
||||||
|
"""
|
||||||
|
Converts raw audio input to a compressed media format.
|
||||||
|
"""
|
||||||
|
|
||||||
|
@property
|
||||||
|
@override
|
||||||
|
def _input_format_args(self) -> Iterable[str]:
|
||||||
|
return self._raw_ffmpeg_args
|
||||||
|
|
||||||
|
@property
|
||||||
|
@override
|
||||||
|
def _output_format_args(self) -> Iterable[str]:
|
||||||
|
return self._compressed_ffmpeg_args
|
||||||
|
|
||||||
|
|
||||||
|
# vim:sw=4:ts=4:et:
|
38
platypush/plugins/sound/_converters/_to_raw.py
Normal file
38
platypush/plugins/sound/_converters/_to_raw.py
Normal file
|
@ -0,0 +1,38 @@
|
||||||
|
from typing import Iterable
|
||||||
|
from typing_extensions import override
|
||||||
|
|
||||||
|
from ._base import AudioConverter
|
||||||
|
|
||||||
|
|
||||||
|
class RawOutputAudioConverter(AudioConverter):
|
||||||
|
"""
|
||||||
|
Converts input audio to raw audio output.
|
||||||
|
"""
|
||||||
|
|
||||||
|
@property
|
||||||
|
@override
|
||||||
|
def _input_format_args(self) -> Iterable[str]:
|
||||||
|
return self._compressed_ffmpeg_args
|
||||||
|
|
||||||
|
@property
|
||||||
|
@override
|
||||||
|
def _output_format_args(self) -> Iterable[str]:
|
||||||
|
return self._raw_ffmpeg_args
|
||||||
|
|
||||||
|
|
||||||
|
class RawOutputAudioFromFileConverter(RawOutputAudioConverter):
|
||||||
|
"""
|
||||||
|
Converts an input file to raw audio output.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, *args, infile: str, **kwargs):
|
||||||
|
super().__init__(*args, **kwargs)
|
||||||
|
self.infile = infile
|
||||||
|
|
||||||
|
@property
|
||||||
|
@override
|
||||||
|
def _input_source_args(self) -> Iterable[str]:
|
||||||
|
return ('-i', self.infile)
|
||||||
|
|
||||||
|
|
||||||
|
# vim:sw=4:ts=4:et:
|
3
platypush/plugins/sound/_manager/__init__.py
Normal file
3
platypush/plugins/sound/_manager/__init__.py
Normal file
|
@ -0,0 +1,3 @@
|
||||||
|
from ._main import AudioManager
|
||||||
|
|
||||||
|
__all__ = ["AudioManager"]
|
91
platypush/plugins/sound/_manager/_device.py
Normal file
91
platypush/plugins/sound/_manager/_device.py
Normal file
|
@ -0,0 +1,91 @@
|
||||||
|
from typing import List, Optional
|
||||||
|
|
||||||
|
import sounddevice as sd
|
||||||
|
|
||||||
|
from .._model import AudioDevice, DeviceType, StreamType
|
||||||
|
|
||||||
|
|
||||||
|
class DeviceManager:
|
||||||
|
"""
|
||||||
|
The device manager is responsible for managing the virtual audio device
|
||||||
|
abstractions exposed by the OS.
|
||||||
|
|
||||||
|
For example, on a pure ALSA system virtual devices are usually mapped the
|
||||||
|
physical audio devices available on the system.
|
||||||
|
|
||||||
|
On a system that runs through PulseAudio or Jack, there may be a
|
||||||
|
``default`` virtual device whose sound card mappings may be managed by the
|
||||||
|
audio server.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
input_device: Optional[DeviceType] = None,
|
||||||
|
output_device: Optional[DeviceType] = None,
|
||||||
|
):
|
||||||
|
"""
|
||||||
|
:param input_device: The default input device to use (by index or name).
|
||||||
|
:param output_device: The default output device to use (by index or name).
|
||||||
|
"""
|
||||||
|
self.input_device = (
|
||||||
|
self.get_device(input_device, StreamType.INPUT)
|
||||||
|
if input_device is not None
|
||||||
|
else None
|
||||||
|
)
|
||||||
|
|
||||||
|
self.output_device = (
|
||||||
|
self.get_device(output_device, StreamType.OUTPUT)
|
||||||
|
if output_device is not None
|
||||||
|
else None
|
||||||
|
)
|
||||||
|
|
||||||
|
def get_devices(
|
||||||
|
self, type: Optional[StreamType] = None # pylint: disable=redefined-builtin
|
||||||
|
) -> List[AudioDevice]:
|
||||||
|
"""
|
||||||
|
Get available audio devices.
|
||||||
|
|
||||||
|
:param type: The type of devices to filter (default: return all).
|
||||||
|
"""
|
||||||
|
devices: List[dict] = sd.query_devices() # type: ignore
|
||||||
|
if type:
|
||||||
|
devices = [dev for dev in devices if dev.get(f'max_{type.value}_channels')]
|
||||||
|
|
||||||
|
return [AudioDevice(**info) for info in devices]
|
||||||
|
|
||||||
|
def get_device(
|
||||||
|
self,
|
||||||
|
device: Optional[DeviceType] = None,
|
||||||
|
type: Optional[StreamType] = None, # pylint: disable=redefined-builtin
|
||||||
|
) -> AudioDevice:
|
||||||
|
"""
|
||||||
|
Search for a device.
|
||||||
|
|
||||||
|
Either ``device`` or ``type`` have to be specified.
|
||||||
|
|
||||||
|
:param device: The device to search for, either by index or name. If
|
||||||
|
not specified, then the default device for the given type is
|
||||||
|
returned.
|
||||||
|
:param type: The type of the device to search.
|
||||||
|
"""
|
||||||
|
assert device or type, 'Please specify either device or type'
|
||||||
|
if device is None:
|
||||||
|
if type == StreamType.INPUT and self.input_device is not None:
|
||||||
|
return self.input_device
|
||||||
|
if type == StreamType.OUTPUT and self.output_device is not None:
|
||||||
|
return self.output_device
|
||||||
|
|
||||||
|
try:
|
||||||
|
info: dict = sd.query_devices(
|
||||||
|
kind=type.value if type else None, device=device # type: ignore
|
||||||
|
)
|
||||||
|
except sd.PortAudioError as e:
|
||||||
|
raise AssertionError(
|
||||||
|
f'Could not get device for type={type} and device={device}: {e}',
|
||||||
|
type,
|
||||||
|
device,
|
||||||
|
e,
|
||||||
|
) from e
|
||||||
|
|
||||||
|
assert info, f'No such device: {device}'
|
||||||
|
return AudioDevice(**info)
|
291
platypush/plugins/sound/_manager/_main.py
Normal file
291
platypush/plugins/sound/_manager/_main.py
Normal file
|
@ -0,0 +1,291 @@
|
||||||
|
from logging import getLogger
|
||||||
|
import os
|
||||||
|
import stat
|
||||||
|
from threading import Event
|
||||||
|
from time import time
|
||||||
|
from typing import Iterable, List, Optional, Union
|
||||||
|
|
||||||
|
from .._model import AudioDevice, DeviceType, StreamType
|
||||||
|
from .._streams import AudioPlayer, AudioRecorder, AudioThread
|
||||||
|
from ._device import DeviceManager
|
||||||
|
from ._stream import StreamManager
|
||||||
|
|
||||||
|
|
||||||
|
class AudioManager:
|
||||||
|
"""
|
||||||
|
The audio manager is responsible for managing multiple audio controllers and
|
||||||
|
their access to audio resources.
|
||||||
|
|
||||||
|
It main purpose is to act as a proxy/facade between the high-level audio
|
||||||
|
plugin and the audio functionalities (allocating streams, managing the state
|
||||||
|
of the player and recorder processes, etc.).
|
||||||
|
"""
|
||||||
|
|
||||||
|
_default_signal_timeout = 2
|
||||||
|
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
should_stop: Event,
|
||||||
|
input_blocksize: int,
|
||||||
|
output_blocksize: int,
|
||||||
|
input_device: Optional[DeviceType] = None,
|
||||||
|
output_device: Optional[DeviceType] = None,
|
||||||
|
queue_size: Optional[int] = None,
|
||||||
|
):
|
||||||
|
"""
|
||||||
|
:param should_stop: Event to synchronize the audio manager stop.
|
||||||
|
:param input_blocksize: Block size for the input stream.
|
||||||
|
:param output_blocksize: Block size for the output stream.
|
||||||
|
:param input_device: Default device to use for the input stream.
|
||||||
|
:param output_device: Default device to use for the output stream.
|
||||||
|
:param queue_size: Maximum size of the audio queues.
|
||||||
|
"""
|
||||||
|
self._should_stop = should_stop
|
||||||
|
self._device_manager = DeviceManager(
|
||||||
|
input_device=input_device, output_device=output_device
|
||||||
|
)
|
||||||
|
|
||||||
|
self._stream_manager = StreamManager(device_manager=self._device_manager)
|
||||||
|
self.logger = getLogger(__name__)
|
||||||
|
self.input_blocksize = input_blocksize
|
||||||
|
self.output_blocksize = output_blocksize
|
||||||
|
self.queue_size = queue_size
|
||||||
|
|
||||||
|
def create_player(
|
||||||
|
self,
|
||||||
|
device: DeviceType,
|
||||||
|
channels: int,
|
||||||
|
volume: float,
|
||||||
|
infile: Optional[str] = None,
|
||||||
|
sound: Optional[Union[dict, Iterable[dict]]] = None,
|
||||||
|
duration: Optional[float] = None,
|
||||||
|
sample_rate: Optional[int] = None,
|
||||||
|
dtype: str = 'int16',
|
||||||
|
blocksize: Optional[int] = None,
|
||||||
|
latency: Union[float, str] = 'high',
|
||||||
|
stream_name: Optional[str] = None,
|
||||||
|
) -> AudioPlayer:
|
||||||
|
"""
|
||||||
|
Create an audio player thread.
|
||||||
|
|
||||||
|
:param device: Audio device to use.
|
||||||
|
:param channels: Number of output channels.
|
||||||
|
:param volume: Output volume, between 0 and 100.
|
||||||
|
:param infile: File or URL to play.
|
||||||
|
:param sound: Alternatively to a file/URL, you can play synthetic
|
||||||
|
sounds.
|
||||||
|
:param duration: Duration of the stream in seconds.
|
||||||
|
:param sample_rate: Sample rate of the stream.
|
||||||
|
:param dtype: Data type of the stream.
|
||||||
|
:param blocksize: Block size of the stream.
|
||||||
|
:param latency: Latency of the stream.
|
||||||
|
:param stream_name: Name of the stream.
|
||||||
|
"""
|
||||||
|
dev = self._device_manager.get_device(device, type=StreamType.OUTPUT)
|
||||||
|
player = AudioPlayer.build(
|
||||||
|
device=device,
|
||||||
|
infile=infile,
|
||||||
|
sound=sound,
|
||||||
|
duration=duration,
|
||||||
|
volume=volume,
|
||||||
|
sample_rate=sample_rate or dev.default_samplerate,
|
||||||
|
dtype=dtype,
|
||||||
|
blocksize=blocksize or self.output_blocksize,
|
||||||
|
latency=latency,
|
||||||
|
channels=channels,
|
||||||
|
queue_size=self.queue_size,
|
||||||
|
should_stop=self._should_stop,
|
||||||
|
)
|
||||||
|
|
||||||
|
self._stream_manager.register(
|
||||||
|
player, dev, StreamType.OUTPUT, stream_name=stream_name
|
||||||
|
)
|
||||||
|
return player
|
||||||
|
|
||||||
|
def create_recorder(
|
||||||
|
self,
|
||||||
|
device: DeviceType,
|
||||||
|
output_device: Optional[DeviceType] = None,
|
||||||
|
fifo: Optional[str] = None,
|
||||||
|
outfile: Optional[str] = None,
|
||||||
|
duration: Optional[float] = None,
|
||||||
|
sample_rate: Optional[int] = None,
|
||||||
|
dtype: str = 'int16',
|
||||||
|
blocksize: Optional[int] = None,
|
||||||
|
latency: Union[float, str] = 'high',
|
||||||
|
channels: int = 1,
|
||||||
|
volume: float = 100,
|
||||||
|
redis_queue: Optional[str] = None,
|
||||||
|
format: str = 'wav', # pylint: disable=redefined-builtin
|
||||||
|
stream: bool = True,
|
||||||
|
stream_name: Optional[str] = None,
|
||||||
|
play_audio: bool = False,
|
||||||
|
) -> AudioRecorder:
|
||||||
|
"""
|
||||||
|
Create an audio recorder thread.
|
||||||
|
|
||||||
|
:param device: Audio device to use.
|
||||||
|
:param output_device: Output device to use.
|
||||||
|
:param fifo: Path to an output FIFO file to use to synchronize the audio
|
||||||
|
to other processes.
|
||||||
|
:param outfile: Optional output file for the recorded audio.
|
||||||
|
:param duration: Duration of the recording in seconds.
|
||||||
|
:param sample_rate: Sample rate of the stream.
|
||||||
|
:param dtype: Data type of the stream.
|
||||||
|
:param blocksize: Block size of the stream.
|
||||||
|
:param latency: Latency of the stream.
|
||||||
|
:param channels: Number of output channels.
|
||||||
|
:param volume: Input volume, between 0 and 100.
|
||||||
|
:param redis_queue: Name of the Redis queue to use.
|
||||||
|
:param format: Format of the recorded audio.
|
||||||
|
:param stream: Whether to stream the recorded audio.
|
||||||
|
:param play_audio: Whether to play the recorded audio in real-time.
|
||||||
|
:param stream_name: Name of the stream.
|
||||||
|
"""
|
||||||
|
blocksize = blocksize or self.input_blocksize
|
||||||
|
dev = self._device_manager.get_device(device, type=StreamType.OUTPUT)
|
||||||
|
|
||||||
|
if fifo:
|
||||||
|
fifo = os.path.expanduser(fifo)
|
||||||
|
if os.path.exists(fifo) and stat.S_ISFIFO(os.stat(fifo).st_mode):
|
||||||
|
self.logger.info('Removing previous input stream FIFO %s', fifo)
|
||||||
|
os.unlink(fifo)
|
||||||
|
|
||||||
|
os.mkfifo(fifo, 0o644)
|
||||||
|
outfile = fifo
|
||||||
|
elif outfile:
|
||||||
|
outfile = os.path.expanduser(outfile)
|
||||||
|
|
||||||
|
outfile = outfile or fifo or os.devnull
|
||||||
|
recorder = AudioRecorder(
|
||||||
|
device=(
|
||||||
|
(
|
||||||
|
dev.index,
|
||||||
|
self._device_manager.get_device(
|
||||||
|
type=StreamType.OUTPUT, device=output_device
|
||||||
|
).index,
|
||||||
|
)
|
||||||
|
if play_audio
|
||||||
|
else dev.index
|
||||||
|
),
|
||||||
|
outfile=outfile,
|
||||||
|
duration=duration,
|
||||||
|
sample_rate=sample_rate or dev.default_samplerate,
|
||||||
|
dtype=dtype,
|
||||||
|
blocksize=blocksize,
|
||||||
|
latency=latency,
|
||||||
|
output_format=format,
|
||||||
|
channels=channels,
|
||||||
|
volume=volume,
|
||||||
|
redis_queue=redis_queue,
|
||||||
|
stream=stream,
|
||||||
|
audio_pass_through=play_audio,
|
||||||
|
queue_size=self.queue_size,
|
||||||
|
should_stop=self._should_stop,
|
||||||
|
)
|
||||||
|
|
||||||
|
self._stream_manager.register(
|
||||||
|
recorder, dev, StreamType.INPUT, stream_name=stream_name
|
||||||
|
)
|
||||||
|
return recorder
|
||||||
|
|
||||||
|
def get_device(
|
||||||
|
self,
|
||||||
|
device: Optional[DeviceType] = None,
|
||||||
|
type: Optional[StreamType] = None, # pylint: disable=redefined-builtin
|
||||||
|
) -> AudioDevice:
|
||||||
|
"""
|
||||||
|
Proxy to ``self._device_manager.get_device``.
|
||||||
|
"""
|
||||||
|
return self._device_manager.get_device(device=device, type=type)
|
||||||
|
|
||||||
|
def get_devices(
|
||||||
|
self,
|
||||||
|
type: Optional[StreamType] = None, # pylint: disable=redefined-builtin
|
||||||
|
) -> List[AudioDevice]:
|
||||||
|
"""
|
||||||
|
Proxy to ``self._device_manager.get_devices``.
|
||||||
|
"""
|
||||||
|
return self._device_manager.get_devices(type=type)
|
||||||
|
|
||||||
|
def get_streams(
|
||||||
|
self,
|
||||||
|
device: Optional[DeviceType] = None,
|
||||||
|
type: Optional[StreamType] = None, # pylint: disable=redefined-builtin
|
||||||
|
streams: Optional[Iterable[Union[str, int]]] = None,
|
||||||
|
) -> List[AudioThread]:
|
||||||
|
"""
|
||||||
|
Proxy to ``self._stream_manager.get``.
|
||||||
|
"""
|
||||||
|
return self._stream_manager.get(device=device, type=type, streams=streams)
|
||||||
|
|
||||||
|
def stop_audio(
|
||||||
|
self,
|
||||||
|
device: Optional[DeviceType] = None,
|
||||||
|
type: Optional[StreamType] = None, # pylint: disable=redefined-builtin
|
||||||
|
streams: Optional[Iterable[Union[str, int]]] = None,
|
||||||
|
timeout: Optional[float] = 2,
|
||||||
|
):
|
||||||
|
"""
|
||||||
|
Stops audio sessions.
|
||||||
|
|
||||||
|
:param device: Filter by host audio device.
|
||||||
|
:param type: Filter by stream type (input or output).
|
||||||
|
:param streams: Filter by stream indices/names.
|
||||||
|
:param timeout: Wait timeout in seconds.
|
||||||
|
"""
|
||||||
|
streams_to_stop = self._stream_manager.get(device, type, streams=streams)
|
||||||
|
|
||||||
|
# Send the stop signals
|
||||||
|
for audio_thread in streams_to_stop:
|
||||||
|
audio_thread.notify_stop()
|
||||||
|
|
||||||
|
# Wait for termination (with timeout)
|
||||||
|
wait_start = time()
|
||||||
|
for audio_thread in streams_to_stop:
|
||||||
|
audio_thread.join(
|
||||||
|
timeout=max(0, timeout - (time() - wait_start))
|
||||||
|
if timeout is not None
|
||||||
|
else None
|
||||||
|
)
|
||||||
|
|
||||||
|
# Remove references
|
||||||
|
for audio_thread in streams_to_stop:
|
||||||
|
self._stream_manager.unregister(audio_thread)
|
||||||
|
|
||||||
|
def pause_audio(
|
||||||
|
self,
|
||||||
|
device: Optional[DeviceType] = None,
|
||||||
|
type: Optional[StreamType] = None, # pylint: disable=redefined-builtin
|
||||||
|
streams: Optional[Iterable[Union[str, int]]] = None,
|
||||||
|
):
|
||||||
|
"""
|
||||||
|
Pauses/resumes audio sessions.
|
||||||
|
|
||||||
|
:param device: Filter by host audio device.
|
||||||
|
:param type: Filter by stream type (input or output).
|
||||||
|
:param streams: Filter by stream indices/names.
|
||||||
|
"""
|
||||||
|
streams_to_pause = self._stream_manager.get(device, type, streams=streams)
|
||||||
|
|
||||||
|
# Send the pause toggle signals
|
||||||
|
for audio_thread in streams_to_pause:
|
||||||
|
audio_thread.notify_pause()
|
||||||
|
|
||||||
|
def set_volume(
|
||||||
|
self,
|
||||||
|
volume: float,
|
||||||
|
device: Optional[DeviceType] = None,
|
||||||
|
streams: Optional[Iterable[Union[str, int]]] = None,
|
||||||
|
):
|
||||||
|
"""
|
||||||
|
:param volume: New volume, between 0 and 100.
|
||||||
|
:param device: Set the volume only on the specified device (default:
|
||||||
|
all).
|
||||||
|
:param streams: Set the volume only on the specified list of stream
|
||||||
|
indices/names (default: all).
|
||||||
|
"""
|
||||||
|
stream_objs = self._stream_manager.get(device=device, streams=streams)
|
||||||
|
|
||||||
|
for stream in stream_objs:
|
||||||
|
stream.volume = volume
|
207
platypush/plugins/sound/_manager/_stream.py
Normal file
207
platypush/plugins/sound/_manager/_stream.py
Normal file
|
@ -0,0 +1,207 @@
|
||||||
|
from collections import defaultdict
|
||||||
|
from logging import getLogger
|
||||||
|
from threading import RLock
|
||||||
|
from typing import Dict, Iterable, List, Optional, Union
|
||||||
|
|
||||||
|
from .._model import AudioDevice, DeviceType, StreamType
|
||||||
|
from .._streams import AudioThread
|
||||||
|
from ._device import DeviceManager
|
||||||
|
|
||||||
|
|
||||||
|
class StreamManager:
|
||||||
|
"""
|
||||||
|
The audio manager is responsible for storing the current state of the
|
||||||
|
playing/recording audio streams and allowing fast flexible lookups (by
|
||||||
|
stream index, name, type, device, and any combination of those).
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, device_manager: DeviceManager):
|
||||||
|
"""
|
||||||
|
:param device_manager: Reference to the device manager.
|
||||||
|
"""
|
||||||
|
self._next_stream_index = 1
|
||||||
|
self._device_manager = device_manager
|
||||||
|
self._state_lock = RLock()
|
||||||
|
self._stream_index_by_name: Dict[str, int] = {}
|
||||||
|
self._stream_name_by_index: Dict[int, str] = {}
|
||||||
|
self._stream_index_to_device: Dict[int, AudioDevice] = {}
|
||||||
|
self._stream_index_to_type: Dict[int, StreamType] = {}
|
||||||
|
self.logger = getLogger(__name__)
|
||||||
|
|
||||||
|
self._streams: Dict[
|
||||||
|
int, Dict[StreamType, Dict[int, AudioThread]]
|
||||||
|
] = defaultdict(lambda: {stream_type: {} for stream_type in StreamType})
|
||||||
|
""" {device_index: {stream_type: {stream_index: audio_thread}}} """
|
||||||
|
|
||||||
|
self._streams_by_index: Dict[StreamType, Dict[int, AudioThread]] = {
|
||||||
|
stream_type: {} for stream_type in StreamType
|
||||||
|
}
|
||||||
|
""" {stream_type: {stream_index: [audio_threads]}} """
|
||||||
|
|
||||||
|
self._stream_locks: Dict[int, Dict[StreamType, RLock]] = defaultdict(
|
||||||
|
lambda: {stream_type: RLock() for stream_type in StreamType}
|
||||||
|
)
|
||||||
|
""" {device_index: {stream_type: RLock}} """
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def _generate_stream_name(
|
||||||
|
cls,
|
||||||
|
type: StreamType, # pylint: disable=redefined-builtin
|
||||||
|
stream_index: int,
|
||||||
|
) -> str:
|
||||||
|
return f'platypush:audio:{type.value}:{stream_index}'
|
||||||
|
|
||||||
|
def _gen_next_stream_index(
|
||||||
|
self,
|
||||||
|
type: StreamType, # pylint: disable=redefined-builtin
|
||||||
|
stream_name: Optional[str] = None,
|
||||||
|
) -> int:
|
||||||
|
"""
|
||||||
|
:param type: The type of the stream to allocate (input or output).
|
||||||
|
:param stream_name: The name of the stream to allocate.
|
||||||
|
:return: The index of the new stream.
|
||||||
|
"""
|
||||||
|
with self._state_lock:
|
||||||
|
stream_index = self._next_stream_index
|
||||||
|
|
||||||
|
if not stream_name:
|
||||||
|
stream_name = self._generate_stream_name(type, stream_index)
|
||||||
|
|
||||||
|
self._stream_name_by_index[stream_index] = stream_name
|
||||||
|
self._stream_index_by_name[stream_name] = stream_index
|
||||||
|
self._next_stream_index += 1
|
||||||
|
|
||||||
|
return stream_index
|
||||||
|
|
||||||
|
def register(
|
||||||
|
self,
|
||||||
|
audio_thread: AudioThread,
|
||||||
|
device: AudioDevice,
|
||||||
|
type: StreamType, # pylint: disable=redefined-builtin
|
||||||
|
stream_name: Optional[str] = None,
|
||||||
|
):
|
||||||
|
"""
|
||||||
|
Registers an audio stream to a device.
|
||||||
|
|
||||||
|
:param audio_thread: Stream to register.
|
||||||
|
:param device: Device to register the stream to.
|
||||||
|
:param type: The type of the stream to allocate (input or output).
|
||||||
|
:param stream_name: The name of the stream to allocate.
|
||||||
|
"""
|
||||||
|
with self._state_lock:
|
||||||
|
stream_index = audio_thread.stream_index
|
||||||
|
if stream_index is None:
|
||||||
|
stream_index = audio_thread.stream_index = self._gen_next_stream_index(
|
||||||
|
type, stream_name=stream_name
|
||||||
|
)
|
||||||
|
|
||||||
|
self._streams[device.index][type][stream_index] = audio_thread
|
||||||
|
self._stream_index_to_device[stream_index] = device
|
||||||
|
self._stream_index_to_type[stream_index] = type
|
||||||
|
self._streams_by_index[type][stream_index] = audio_thread
|
||||||
|
|
||||||
|
def unregister(
|
||||||
|
self,
|
||||||
|
audio_thread: AudioThread,
|
||||||
|
device: Optional[AudioDevice] = None,
|
||||||
|
type: Optional[StreamType] = None, # pylint: disable=redefined-builtin
|
||||||
|
):
|
||||||
|
"""
|
||||||
|
Unregisters an audio stream from a device.
|
||||||
|
|
||||||
|
:param audio_thread: Stream to unregister.
|
||||||
|
:param device: Device to unregister the stream from.
|
||||||
|
:param type: The type of the stream to unregister (input or output).
|
||||||
|
"""
|
||||||
|
with self._state_lock:
|
||||||
|
stream_index = audio_thread.stream_index
|
||||||
|
if stream_index is None:
|
||||||
|
return
|
||||||
|
|
||||||
|
if device is None:
|
||||||
|
device = self._stream_index_to_device.get(stream_index)
|
||||||
|
|
||||||
|
if not type:
|
||||||
|
type = self._stream_index_to_type.get(stream_index)
|
||||||
|
|
||||||
|
if device is None or type is None:
|
||||||
|
return
|
||||||
|
|
||||||
|
self._streams[device.index][type].pop(stream_index, None)
|
||||||
|
self._stream_index_to_device.pop(stream_index, None)
|
||||||
|
self._stream_index_to_type.pop(stream_index, None)
|
||||||
|
self._streams_by_index[type].pop(stream_index, None)
|
||||||
|
stream_name = self._stream_name_by_index.pop(stream_index, None)
|
||||||
|
if stream_name:
|
||||||
|
self._stream_index_by_name.pop(stream_name, None)
|
||||||
|
|
||||||
|
def _get_by_device_and_type(
|
||||||
|
self,
|
||||||
|
device: Optional[DeviceType] = None,
|
||||||
|
type: Optional[StreamType] = None, # pylint: disable=redefined-builtin
|
||||||
|
) -> List[AudioThread]:
|
||||||
|
"""
|
||||||
|
Filter streams by device and/or type.
|
||||||
|
"""
|
||||||
|
devs = (
|
||||||
|
[self._device_manager.get_device(device, type)]
|
||||||
|
if device is not None
|
||||||
|
else self._device_manager.get_devices(type)
|
||||||
|
)
|
||||||
|
|
||||||
|
return [
|
||||||
|
audio_thread
|
||||||
|
for dev in devs
|
||||||
|
for stream_info in (
|
||||||
|
[self._streams[dev.index].get(type, {})]
|
||||||
|
if type
|
||||||
|
else list(self._streams[dev.index].values())
|
||||||
|
)
|
||||||
|
for audio_thread in stream_info.values()
|
||||||
|
if audio_thread and audio_thread.is_alive()
|
||||||
|
]
|
||||||
|
|
||||||
|
def _get_by_stream_index_or_name(
|
||||||
|
self, streams: Iterable[Union[str, int]]
|
||||||
|
) -> List[AudioThread]:
|
||||||
|
"""
|
||||||
|
Filter streams by index or name.
|
||||||
|
"""
|
||||||
|
threads = []
|
||||||
|
|
||||||
|
for stream in streams:
|
||||||
|
try:
|
||||||
|
stream_index = int(stream)
|
||||||
|
except (TypeError, ValueError):
|
||||||
|
stream_index = self._stream_index_by_name.get(stream) # type: ignore
|
||||||
|
if stream_index is None:
|
||||||
|
self.logger.warning('No such audio stream: %s', stream)
|
||||||
|
continue
|
||||||
|
|
||||||
|
stream_type = self._stream_index_to_type.get(stream_index)
|
||||||
|
if not stream_type:
|
||||||
|
self.logger.warning(
|
||||||
|
'No type available for this audio stream: %s', stream
|
||||||
|
)
|
||||||
|
continue
|
||||||
|
|
||||||
|
thread = self._streams_by_index.get(stream_type, {}).get(stream_index)
|
||||||
|
if thread:
|
||||||
|
threads.append(thread)
|
||||||
|
|
||||||
|
return threads
|
||||||
|
|
||||||
|
def get(
|
||||||
|
self,
|
||||||
|
device: Optional[DeviceType] = None,
|
||||||
|
type: Optional[StreamType] = None, # pylint: disable=redefined-builtin
|
||||||
|
streams: Optional[Iterable[Union[str, int]]] = None,
|
||||||
|
) -> List[AudioThread]:
|
||||||
|
"""
|
||||||
|
Searches streams, either by device and/or type, or by stream index/name.
|
||||||
|
"""
|
||||||
|
return (
|
||||||
|
self._get_by_stream_index_or_name(streams)
|
||||||
|
if streams
|
||||||
|
else self._get_by_device_and_type(device, type)
|
||||||
|
)
|
42
platypush/plugins/sound/_model.py
Normal file
42
platypush/plugins/sound/_model.py
Normal file
|
@ -0,0 +1,42 @@
|
||||||
|
from dataclasses import dataclass
|
||||||
|
from enum import Enum
|
||||||
|
from typing import Union
|
||||||
|
|
||||||
|
DeviceType = Union[int, str]
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class AudioDevice:
|
||||||
|
"""
|
||||||
|
Maps the properties of an audio device.
|
||||||
|
"""
|
||||||
|
|
||||||
|
index: int
|
||||||
|
name: str
|
||||||
|
hostapi: int
|
||||||
|
max_input_channels: int
|
||||||
|
max_output_channels: int
|
||||||
|
default_samplerate: int
|
||||||
|
default_low_input_latency: float = 0
|
||||||
|
default_low_output_latency: float = 0
|
||||||
|
default_high_input_latency: float = 0
|
||||||
|
default_high_output_latency: float = 0
|
||||||
|
|
||||||
|
|
||||||
|
class AudioState(Enum):
|
||||||
|
"""
|
||||||
|
Audio states.
|
||||||
|
"""
|
||||||
|
|
||||||
|
STOPPED = 'STOPPED'
|
||||||
|
RUNNING = 'RUNNING'
|
||||||
|
PAUSED = 'PAUSED'
|
||||||
|
|
||||||
|
|
||||||
|
class StreamType(Enum):
|
||||||
|
"""
|
||||||
|
Stream types.
|
||||||
|
"""
|
||||||
|
|
||||||
|
INPUT = 'input'
|
||||||
|
OUTPUT = 'output'
|
6
platypush/plugins/sound/_streams/__init__.py
Normal file
6
platypush/plugins/sound/_streams/__init__.py
Normal file
|
@ -0,0 +1,6 @@
|
||||||
|
from ._base import AudioThread
|
||||||
|
from ._player import AudioPlayer
|
||||||
|
from ._recorder import AudioRecorder
|
||||||
|
|
||||||
|
|
||||||
|
__all__ = ['AudioPlayer', 'AudioRecorder', 'AudioThread']
|
502
platypush/plugins/sound/_streams/_base.py
Normal file
502
platypush/plugins/sound/_streams/_base.py
Normal file
|
@ -0,0 +1,502 @@
|
||||||
|
from abc import ABC, abstractmethod
|
||||||
|
from contextlib import contextmanager
|
||||||
|
from datetime import datetime
|
||||||
|
from logging import getLogger
|
||||||
|
import os
|
||||||
|
import queue
|
||||||
|
from threading import Event, RLock, Thread
|
||||||
|
import time
|
||||||
|
from typing import IO, Callable, Final, Generator, Optional, Tuple, Type, Union
|
||||||
|
from typing_extensions import override
|
||||||
|
|
||||||
|
import sounddevice as sd
|
||||||
|
|
||||||
|
from platypush.context import get_bus
|
||||||
|
from platypush.message.event.sound import SoundEvent
|
||||||
|
from platypush.utils import get_redis
|
||||||
|
|
||||||
|
from .._converters import AudioConverter
|
||||||
|
from .._model import AudioState, StreamType
|
||||||
|
|
||||||
|
_StreamType = Union[sd.Stream, sd.OutputStream]
|
||||||
|
|
||||||
|
|
||||||
|
class AudioThread(Thread, ABC):
|
||||||
|
"""
|
||||||
|
Base class for audio play/record stream threads.
|
||||||
|
"""
|
||||||
|
|
||||||
|
_DEFAULT_FILE: Final[str] = os.devnull
|
||||||
|
"""Unless otherwise specified, the audio streams will be sent to /dev/null"""
|
||||||
|
_DEFAULT_CONVERTER_TIMEOUT: Final[float] = 1
|
||||||
|
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
device: Union[str, Tuple[str, str]],
|
||||||
|
channels: int,
|
||||||
|
volume: float,
|
||||||
|
sample_rate: int,
|
||||||
|
dtype: str,
|
||||||
|
blocksize: int,
|
||||||
|
ffmpeg_bin: str = 'ffmpeg',
|
||||||
|
stream: bool = False,
|
||||||
|
audio_pass_through: bool = False,
|
||||||
|
infile: Optional[str] = None,
|
||||||
|
outfile: Optional[str] = None,
|
||||||
|
duration: Optional[float] = None,
|
||||||
|
latency: Union[float, str] = 'high',
|
||||||
|
redis_queue: Optional[str] = None,
|
||||||
|
should_stop: Optional[Event] = None,
|
||||||
|
converter_timeout: Optional[float] = None,
|
||||||
|
stream_name: Optional[str] = None,
|
||||||
|
queue_size: Optional[int] = None,
|
||||||
|
**kwargs,
|
||||||
|
):
|
||||||
|
"""
|
||||||
|
:param device: Audio device to use.
|
||||||
|
:param channels: Number of channels to use.
|
||||||
|
:param volume: Input/output volume, between 0 and 100.
|
||||||
|
:param sample_rate: Sample rate to use.
|
||||||
|
:param dtype: Data type to use.
|
||||||
|
:param blocksize: Block size to use.
|
||||||
|
:param ffmpeg_bin: Path to the ffmpeg binary.
|
||||||
|
:param stream: Whether to stream the audio to Redis consumers.
|
||||||
|
:param audio_pass_through: Whether to pass the audio through to the
|
||||||
|
application's output stream.
|
||||||
|
:param infile: Path to the input file or URL, if this is an output
|
||||||
|
stream.
|
||||||
|
:param outfile: Path to the output file.
|
||||||
|
:param duration: Duration of the audio stream.
|
||||||
|
:param latency: Latency to use.
|
||||||
|
:param redis_queue: Redis queue to use.
|
||||||
|
:param should_stop: Synchronize with upstream stop events.
|
||||||
|
:param converter_timeout: How long to wait for the converter to finish.
|
||||||
|
:param stream_name: Name of the stream.
|
||||||
|
:param queue_size: Maximum size of the audio queue.
|
||||||
|
"""
|
||||||
|
super().__init__(**kwargs)
|
||||||
|
|
||||||
|
self.device = device
|
||||||
|
self.outfile = os.path.expanduser(outfile or self._DEFAULT_FILE)
|
||||||
|
self.infile = os.path.expanduser(infile or self._DEFAULT_FILE)
|
||||||
|
self.ffmpeg_bin = ffmpeg_bin
|
||||||
|
self.channels = channels
|
||||||
|
self.volume = volume
|
||||||
|
self.sample_rate = sample_rate
|
||||||
|
self.dtype = dtype
|
||||||
|
self.stream = stream
|
||||||
|
self.duration = duration
|
||||||
|
self.blocksize = blocksize * channels
|
||||||
|
self.latency = latency
|
||||||
|
self._redis_queue = redis_queue
|
||||||
|
self.audio_pass_through = audio_pass_through
|
||||||
|
self.queue_size = queue_size
|
||||||
|
self._stream_name = stream_name
|
||||||
|
self.logger = getLogger(__name__)
|
||||||
|
|
||||||
|
self._state = AudioState.STOPPED
|
||||||
|
self._state_lock = RLock()
|
||||||
|
self._started_time: Optional[float] = None
|
||||||
|
self._converter: Optional[AudioConverter] = None
|
||||||
|
self._should_stop = should_stop or Event()
|
||||||
|
self._converter_timeout = converter_timeout or self._DEFAULT_CONVERTER_TIMEOUT
|
||||||
|
self.audio_stream: Optional[_StreamType] = None
|
||||||
|
self.stream_index: Optional[int] = None
|
||||||
|
self.paused_changed = Event()
|
||||||
|
self._converter_terminated = Event()
|
||||||
|
|
||||||
|
@property
|
||||||
|
def should_stop(self) -> bool:
|
||||||
|
"""
|
||||||
|
Proxy for `._should_stop.is_set()`.
|
||||||
|
"""
|
||||||
|
return self._should_stop.is_set() or bool(
|
||||||
|
self.state == AudioState.STOPPED and self._started_time
|
||||||
|
)
|
||||||
|
|
||||||
|
@property
|
||||||
|
def gain(self) -> float:
|
||||||
|
return self.volume / 100
|
||||||
|
|
||||||
|
def wait_stop(self, timeout: Optional[float] = None):
|
||||||
|
"""
|
||||||
|
Wait for the stop signal to be received.
|
||||||
|
"""
|
||||||
|
return self._should_stop.wait(timeout=timeout)
|
||||||
|
|
||||||
|
def _audio_callback(self) -> Callable:
|
||||||
|
"""
|
||||||
|
Returns a callback to handle the raw frames captures from the audio device.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def empty_callback(*_, **__):
|
||||||
|
pass
|
||||||
|
|
||||||
|
return empty_callback
|
||||||
|
|
||||||
|
@property
|
||||||
|
def stream_name(self) -> str:
|
||||||
|
if self._stream_name:
|
||||||
|
return self._stream_name
|
||||||
|
|
||||||
|
ret = f'platypush:audio:{self.direction.value}'
|
||||||
|
if self.stream_index is not None:
|
||||||
|
ret += f':{self.stream_index}'
|
||||||
|
return ret
|
||||||
|
|
||||||
|
@stream_name.setter
|
||||||
|
def stream_name(self, value: Optional[str]):
|
||||||
|
self._stream_name = value
|
||||||
|
|
||||||
|
@property
|
||||||
|
@abstractmethod
|
||||||
|
def direction(self) -> StreamType:
|
||||||
|
"""
|
||||||
|
The default direction for this stream - input or output.
|
||||||
|
"""
|
||||||
|
raise NotImplementedError()
|
||||||
|
|
||||||
|
@property
|
||||||
|
@abstractmethod
|
||||||
|
def _audio_converter_type(self) -> Optional[Type[AudioConverter]]:
|
||||||
|
"""
|
||||||
|
This property indicates the type that should be used for the audio
|
||||||
|
converter.
|
||||||
|
"""
|
||||||
|
raise NotImplementedError()
|
||||||
|
|
||||||
|
@property
|
||||||
|
@abstractmethod
|
||||||
|
def _started_event_type(self) -> Type[SoundEvent]:
|
||||||
|
"""
|
||||||
|
Event type that will be emitted when the audio starts.
|
||||||
|
"""
|
||||||
|
raise NotImplementedError()
|
||||||
|
|
||||||
|
@property
|
||||||
|
@abstractmethod
|
||||||
|
def _stopped_event_type(self) -> Type[SoundEvent]:
|
||||||
|
"""
|
||||||
|
Event type that will be emitted when the audio stops.
|
||||||
|
"""
|
||||||
|
raise NotImplementedError()
|
||||||
|
|
||||||
|
@property
|
||||||
|
@abstractmethod
|
||||||
|
def _paused_event_type(self) -> Type[SoundEvent]:
|
||||||
|
"""
|
||||||
|
Event type that will be emitted when the audio is paused.
|
||||||
|
"""
|
||||||
|
raise NotImplementedError()
|
||||||
|
|
||||||
|
@property
|
||||||
|
@abstractmethod
|
||||||
|
def _resumed_event_type(self) -> Type[SoundEvent]:
|
||||||
|
"""
|
||||||
|
Event type that will be emitted when the audio is resumed.
|
||||||
|
"""
|
||||||
|
raise NotImplementedError()
|
||||||
|
|
||||||
|
@property
|
||||||
|
def _stream_type(self) -> Union[Type[sd.Stream], Type[sd.OutputStream]]:
|
||||||
|
"""
|
||||||
|
The type of stream this thread is mapped to.
|
||||||
|
"""
|
||||||
|
return sd.Stream
|
||||||
|
|
||||||
|
@property
|
||||||
|
def _converter_args(self) -> dict:
|
||||||
|
"""
|
||||||
|
Extra arguments to pass to the audio converter.
|
||||||
|
"""
|
||||||
|
return {}
|
||||||
|
|
||||||
|
@property
|
||||||
|
def _stream_args(self) -> dict:
|
||||||
|
"""
|
||||||
|
Extra arguments to pass to the stream constructor.
|
||||||
|
"""
|
||||||
|
return {}
|
||||||
|
|
||||||
|
@property
|
||||||
|
def redis_queue(self) -> str:
|
||||||
|
"""
|
||||||
|
Redis queue for audio streaming.
|
||||||
|
"""
|
||||||
|
if self._redis_queue:
|
||||||
|
return self._redis_queue
|
||||||
|
|
||||||
|
dev = (
|
||||||
|
self.device
|
||||||
|
if isinstance(self.device, (str, int))
|
||||||
|
else '-'.join(map(str, self.device))
|
||||||
|
)
|
||||||
|
|
||||||
|
name = f'platypush-audio-stream-{self.__class__.__name__}-{dev}'
|
||||||
|
if self.stream_index is not None:
|
||||||
|
name = f'{name}-{self.stream_index}'
|
||||||
|
|
||||||
|
return name
|
||||||
|
|
||||||
|
def _on_audio_converted(self, data: bytes, out_f: Optional[IO] = None):
|
||||||
|
"""
|
||||||
|
This callback will be called when the audio data has been converted.
|
||||||
|
"""
|
||||||
|
if out_f:
|
||||||
|
out_f.write(data)
|
||||||
|
|
||||||
|
if self.stream:
|
||||||
|
get_redis().publish(self.redis_queue, data)
|
||||||
|
|
||||||
|
def _wait_running(self):
|
||||||
|
"""
|
||||||
|
If the stream is in paused state, wait for the state to change.
|
||||||
|
"""
|
||||||
|
while self.state == AudioState.PAUSED:
|
||||||
|
self.paused_changed.wait()
|
||||||
|
|
||||||
|
def main(
|
||||||
|
self,
|
||||||
|
converter: Optional[AudioConverter] = None,
|
||||||
|
out_f: Optional[IO] = None,
|
||||||
|
):
|
||||||
|
"""
|
||||||
|
Main loop.
|
||||||
|
"""
|
||||||
|
self.notify_start()
|
||||||
|
|
||||||
|
self.logger.info(
|
||||||
|
'Started %s on device [%s]', self.__class__.__name__, self.device
|
||||||
|
)
|
||||||
|
self._started_time = time.time()
|
||||||
|
|
||||||
|
while not self.should_stop and (
|
||||||
|
self.duration is None or time.time() - self._started_time < self.duration
|
||||||
|
):
|
||||||
|
self._wait_running()
|
||||||
|
if not converter:
|
||||||
|
self.wait_stop(0.1)
|
||||||
|
continue
|
||||||
|
|
||||||
|
if self.should_stop:
|
||||||
|
break
|
||||||
|
|
||||||
|
timeout = (
|
||||||
|
max(
|
||||||
|
0,
|
||||||
|
min(
|
||||||
|
self.duration - (time.time() - self._started_time),
|
||||||
|
self._converter_timeout,
|
||||||
|
),
|
||||||
|
)
|
||||||
|
if self.duration is not None
|
||||||
|
else self._converter_timeout
|
||||||
|
)
|
||||||
|
|
||||||
|
should_continue = self._process_converted_audio(
|
||||||
|
converter, timeout=timeout, out_f=out_f
|
||||||
|
)
|
||||||
|
|
||||||
|
if not should_continue:
|
||||||
|
break
|
||||||
|
|
||||||
|
def _process_converted_audio(
|
||||||
|
self, converter: AudioConverter, timeout: float, out_f: Optional[IO]
|
||||||
|
) -> bool:
|
||||||
|
"""
|
||||||
|
It reads the converted audio from the converter and passes it downstream.
|
||||||
|
|
||||||
|
:return: True if the process should continue, False if it should terminate.
|
||||||
|
"""
|
||||||
|
data = converter.read(timeout=timeout)
|
||||||
|
if not data:
|
||||||
|
return self._on_converter_timeout(converter)
|
||||||
|
|
||||||
|
self._on_audio_converted(data, out_f)
|
||||||
|
return True
|
||||||
|
|
||||||
|
def _on_converter_timeout(self, converter: AudioConverter) -> bool:
|
||||||
|
"""
|
||||||
|
Callback logic invoked if the converter times out.
|
||||||
|
|
||||||
|
:return: ``True`` (default) if the thread is supposed to continue,
|
||||||
|
``False`` if it should terminate.
|
||||||
|
"""
|
||||||
|
self.logger.debug('Timeout on converter %s', converter.__class__.__name__)
|
||||||
|
# Continue only if the converter hasn't terminated
|
||||||
|
return self._converter_terminated.is_set()
|
||||||
|
|
||||||
|
@override
|
||||||
|
def run(self):
|
||||||
|
"""
|
||||||
|
Wrapper for the main loop that initializes the converter and the stream.
|
||||||
|
"""
|
||||||
|
super().run()
|
||||||
|
self.paused_changed.clear()
|
||||||
|
|
||||||
|
try:
|
||||||
|
with self.open_converter() as converter, self._stream_type(
|
||||||
|
samplerate=self.sample_rate,
|
||||||
|
device=self.device,
|
||||||
|
channels=self.channels,
|
||||||
|
dtype=self.dtype,
|
||||||
|
latency=self.latency,
|
||||||
|
blocksize=self.blocksize,
|
||||||
|
**self._stream_args,
|
||||||
|
) as self.audio_stream, open(
|
||||||
|
self.outfile, 'wb'
|
||||||
|
) as out_f, self._audio_generator():
|
||||||
|
self.main(converter=converter, out_f=out_f)
|
||||||
|
except queue.Empty:
|
||||||
|
self.logger.warning(
|
||||||
|
'Audio callback timeout for %s', self.__class__.__name__
|
||||||
|
)
|
||||||
|
finally:
|
||||||
|
self.notify_stop()
|
||||||
|
|
||||||
|
@contextmanager
|
||||||
|
def _audio_generator(self) -> Generator[Optional[Thread], None, None]:
|
||||||
|
"""
|
||||||
|
:yield: A <Thread, Queue> pair where the thread generates raw audio
|
||||||
|
frames (as numpy arrays) that are sent to the specified queue.
|
||||||
|
"""
|
||||||
|
yield None
|
||||||
|
|
||||||
|
@contextmanager
|
||||||
|
def open_converter(self) -> Generator[Optional[AudioConverter], None, None]:
|
||||||
|
"""
|
||||||
|
Context manager for the converter process.
|
||||||
|
"""
|
||||||
|
if self._audio_converter_type is None:
|
||||||
|
yield None
|
||||||
|
return
|
||||||
|
|
||||||
|
assert not self._converter, 'A converter process is already running'
|
||||||
|
self._converter = self._audio_converter_type(
|
||||||
|
ffmpeg_bin=self.ffmpeg_bin,
|
||||||
|
sample_rate=self.sample_rate,
|
||||||
|
channels=self.channels,
|
||||||
|
volume=self.volume,
|
||||||
|
dtype=self.dtype,
|
||||||
|
chunk_size=self.blocksize,
|
||||||
|
on_exit=self._converter_terminated.set,
|
||||||
|
**self._converter_args,
|
||||||
|
)
|
||||||
|
|
||||||
|
self._converter.start()
|
||||||
|
yield self._converter
|
||||||
|
|
||||||
|
self._converter.stop()
|
||||||
|
self._converter.join(timeout=2)
|
||||||
|
self._converter = None
|
||||||
|
|
||||||
|
@contextmanager
|
||||||
|
def _change_state(self, state: AudioState, event_type: Type[SoundEvent]):
|
||||||
|
"""
|
||||||
|
Changes the state and it emits the specified event if the state has
|
||||||
|
actually changed.
|
||||||
|
|
||||||
|
It uses a context manager pattern, and everything in between will be
|
||||||
|
executed before the events are dispatched.
|
||||||
|
"""
|
||||||
|
with self._state_lock:
|
||||||
|
prev_state = self.state
|
||||||
|
self.state = state
|
||||||
|
|
||||||
|
yield
|
||||||
|
if prev_state != state:
|
||||||
|
self._notify(event_type)
|
||||||
|
|
||||||
|
def _notify(self, event_type: Type[SoundEvent], **kwargs):
|
||||||
|
"""
|
||||||
|
Notifies the specified event.
|
||||||
|
"""
|
||||||
|
get_bus().post(event_type(device=self.device, **kwargs))
|
||||||
|
|
||||||
|
def notify_start(self):
|
||||||
|
"""
|
||||||
|
Notifies the start event.
|
||||||
|
"""
|
||||||
|
with self._change_state(AudioState.RUNNING, self._started_event_type):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def notify_stop(self):
|
||||||
|
"""
|
||||||
|
Notifies the stop event.
|
||||||
|
"""
|
||||||
|
with self._change_state(AudioState.STOPPED, self._stopped_event_type):
|
||||||
|
if self._converter:
|
||||||
|
self._converter.stop()
|
||||||
|
self.paused_changed.set()
|
||||||
|
self.paused_changed.clear()
|
||||||
|
|
||||||
|
def notify_pause(self):
|
||||||
|
"""
|
||||||
|
Notifies a pause toggle event.
|
||||||
|
"""
|
||||||
|
states = {
|
||||||
|
AudioState.PAUSED: AudioState.RUNNING,
|
||||||
|
AudioState.RUNNING: AudioState.PAUSED,
|
||||||
|
}
|
||||||
|
|
||||||
|
with self._state_lock:
|
||||||
|
new_state = states.get(self.state)
|
||||||
|
if not new_state:
|
||||||
|
return
|
||||||
|
|
||||||
|
event_type = (
|
||||||
|
self._paused_event_type
|
||||||
|
if new_state == AudioState.PAUSED
|
||||||
|
else self._resumed_event_type
|
||||||
|
)
|
||||||
|
|
||||||
|
with self._change_state(new_state, event_type):
|
||||||
|
self.paused_changed.set()
|
||||||
|
self.paused_changed.clear()
|
||||||
|
|
||||||
|
@property
|
||||||
|
def state(self):
|
||||||
|
"""
|
||||||
|
Thread-safe wrapper for the stream state.
|
||||||
|
"""
|
||||||
|
with self._state_lock:
|
||||||
|
return self._state
|
||||||
|
|
||||||
|
@state.setter
|
||||||
|
def state(self, value: AudioState):
|
||||||
|
"""
|
||||||
|
Thread-safe setter for the stream state.
|
||||||
|
"""
|
||||||
|
with self._state_lock:
|
||||||
|
self._state = value
|
||||||
|
|
||||||
|
def asdict(self) -> dict:
|
||||||
|
"""
|
||||||
|
Serialize the thread information.
|
||||||
|
"""
|
||||||
|
return {
|
||||||
|
'device': self.device,
|
||||||
|
'outfile': self.outfile,
|
||||||
|
'infile': self.infile,
|
||||||
|
'direction': self.direction,
|
||||||
|
'ffmpeg_bin': self.ffmpeg_bin,
|
||||||
|
'channels': self.channels,
|
||||||
|
'sample_rate': self.sample_rate,
|
||||||
|
'dtype': self.dtype,
|
||||||
|
'streaming': self.stream,
|
||||||
|
'duration': self.duration,
|
||||||
|
'blocksize': self.blocksize,
|
||||||
|
'latency': self.latency,
|
||||||
|
'redis_queue': self.redis_queue,
|
||||||
|
'audio_pass_through': self.audio_pass_through,
|
||||||
|
'state': self._state.value,
|
||||||
|
'volume': self.volume,
|
||||||
|
'started_time': datetime.fromtimestamp(self._started_time)
|
||||||
|
if self._started_time
|
||||||
|
else None,
|
||||||
|
'stream_index': self.stream_index,
|
||||||
|
'stream_name': self.stream_name,
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
# vim:sw=4:ts=4:et:
|
3
platypush/plugins/sound/_streams/_player/__init__.py
Normal file
3
platypush/plugins/sound/_streams/_player/__init__.py
Normal file
|
@ -0,0 +1,3 @@
|
||||||
|
from ._base import AudioPlayer
|
||||||
|
|
||||||
|
__all__ = ['AudioPlayer']
|
110
platypush/plugins/sound/_streams/_player/_base.py
Normal file
110
platypush/plugins/sound/_streams/_player/_base.py
Normal file
|
@ -0,0 +1,110 @@
|
||||||
|
from abc import ABC
|
||||||
|
from typing import IO, Iterable, List, Optional, Self, Type, Union
|
||||||
|
from typing_extensions import override
|
||||||
|
|
||||||
|
import numpy as np
|
||||||
|
import sounddevice as sd
|
||||||
|
|
||||||
|
from platypush.message.event.sound import (
|
||||||
|
SoundPlaybackPausedEvent,
|
||||||
|
SoundPlaybackResumedEvent,
|
||||||
|
SoundPlaybackStartedEvent,
|
||||||
|
SoundPlaybackStoppedEvent,
|
||||||
|
)
|
||||||
|
|
||||||
|
from ..._converters import RawOutputAudioConverter
|
||||||
|
from ..._model import StreamType
|
||||||
|
from .._base import AudioThread
|
||||||
|
|
||||||
|
|
||||||
|
class AudioPlayer(AudioThread, ABC):
|
||||||
|
"""
|
||||||
|
Base ``AudioPlayer`` class.
|
||||||
|
|
||||||
|
An ``AudioPlayer`` thread is responsible for playing audio (either from a
|
||||||
|
file/URL or from a synthetic source) to an output device, writing it to the
|
||||||
|
converter process and dispatching the converted audio to the registered
|
||||||
|
consumers.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(
|
||||||
|
self, *args, sound: Optional[Union[dict, Iterable[dict]]] = None, **kwargs
|
||||||
|
):
|
||||||
|
super().__init__(*args, **kwargs)
|
||||||
|
self.sound = sound
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def build(
|
||||||
|
cls,
|
||||||
|
infile: Optional[str] = None,
|
||||||
|
sound: Optional[Union[dict, Iterable[dict]]] = None,
|
||||||
|
**kwargs,
|
||||||
|
) -> Self:
|
||||||
|
from ._resource import AudioResourcePlayer
|
||||||
|
from ._synth import AudioSynthPlayer, Sound
|
||||||
|
|
||||||
|
if infile:
|
||||||
|
return AudioResourcePlayer(infile=infile, **kwargs)
|
||||||
|
if sound:
|
||||||
|
sounds: List[dict] = ( # type: ignore
|
||||||
|
[sound] if isinstance(sound, dict) else sound
|
||||||
|
)
|
||||||
|
|
||||||
|
return AudioSynthPlayer(sounds=[Sound.build(**s) for s in sounds], **kwargs)
|
||||||
|
|
||||||
|
raise AssertionError('Either infile or url must be specified')
|
||||||
|
|
||||||
|
@property
|
||||||
|
@override
|
||||||
|
def direction(self) -> StreamType:
|
||||||
|
return StreamType.OUTPUT
|
||||||
|
|
||||||
|
@override
|
||||||
|
def _on_converter_timeout(self, *_, **__) -> bool:
|
||||||
|
return False # break
|
||||||
|
|
||||||
|
@property
|
||||||
|
@override
|
||||||
|
def _stream_type(self) -> Type[sd.RawOutputStream]:
|
||||||
|
return sd.RawOutputStream
|
||||||
|
|
||||||
|
@property
|
||||||
|
@override
|
||||||
|
def _audio_converter_type(self) -> Type[RawOutputAudioConverter]:
|
||||||
|
return RawOutputAudioConverter
|
||||||
|
|
||||||
|
@override
|
||||||
|
def _on_audio_converted(self, data: bytes, out_f: Optional[IO] = None):
|
||||||
|
if self.audio_stream:
|
||||||
|
self.audio_stream.write(
|
||||||
|
np.asarray(
|
||||||
|
self.gain
|
||||||
|
* np.frombuffer(data, dtype=self.dtype).reshape(-1, self.channels),
|
||||||
|
dtype=self.dtype,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
super()._on_audio_converted(data, out_f)
|
||||||
|
|
||||||
|
@property
|
||||||
|
@override
|
||||||
|
def _started_event_type(self) -> Type[SoundPlaybackStartedEvent]:
|
||||||
|
return SoundPlaybackStartedEvent
|
||||||
|
|
||||||
|
@property
|
||||||
|
@override
|
||||||
|
def _stopped_event_type(self) -> Type[SoundPlaybackStoppedEvent]:
|
||||||
|
return SoundPlaybackStoppedEvent
|
||||||
|
|
||||||
|
@property
|
||||||
|
@override
|
||||||
|
def _paused_event_type(self) -> Type[SoundPlaybackPausedEvent]:
|
||||||
|
return SoundPlaybackPausedEvent
|
||||||
|
|
||||||
|
@property
|
||||||
|
@override
|
||||||
|
def _resumed_event_type(self) -> Type[SoundPlaybackResumedEvent]:
|
||||||
|
return SoundPlaybackResumedEvent
|
||||||
|
|
||||||
|
|
||||||
|
# vim:sw=4:ts=4:et:
|
39
platypush/plugins/sound/_streams/_player/_resource.py
Normal file
39
platypush/plugins/sound/_streams/_player/_resource.py
Normal file
|
@ -0,0 +1,39 @@
|
||||||
|
from typing import Optional, Type
|
||||||
|
from typing_extensions import override
|
||||||
|
|
||||||
|
from platypush.message.event.sound import SoundEvent
|
||||||
|
|
||||||
|
from ..._converters import RawOutputAudioFromFileConverter
|
||||||
|
from ._base import AudioPlayer
|
||||||
|
|
||||||
|
|
||||||
|
class AudioResourcePlayer(AudioPlayer):
|
||||||
|
"""
|
||||||
|
A ``AudioResourcePlayer`` thread is responsible for playing an audio
|
||||||
|
resource - either a file or a URL.
|
||||||
|
"""
|
||||||
|
|
||||||
|
@property
|
||||||
|
@override
|
||||||
|
def _audio_converter_type(self) -> Type[RawOutputAudioFromFileConverter]:
|
||||||
|
return RawOutputAudioFromFileConverter
|
||||||
|
|
||||||
|
@property
|
||||||
|
@override
|
||||||
|
def _converter_args(self) -> dict:
|
||||||
|
return {
|
||||||
|
'infile': self.infile,
|
||||||
|
**super()._converter_args,
|
||||||
|
}
|
||||||
|
|
||||||
|
@property
|
||||||
|
@override
|
||||||
|
def _converter_stdin(self) -> Optional[int]:
|
||||||
|
return None
|
||||||
|
|
||||||
|
@override
|
||||||
|
def _notify(self, event_type: Type[SoundEvent], **kwargs):
|
||||||
|
return super()._notify(event_type, resource=self.infile, **kwargs)
|
||||||
|
|
||||||
|
|
||||||
|
# vim:sw=4:ts=4:et:
|
|
@ -0,0 +1,4 @@
|
||||||
|
from ._player import AudioSynthPlayer
|
||||||
|
from ._sound import Sound
|
||||||
|
|
||||||
|
__all__ = ['AudioSynthPlayer', 'Sound']
|
79
platypush/plugins/sound/_streams/_player/_synth/_base.py
Normal file
79
platypush/plugins/sound/_streams/_player/_synth/_base.py
Normal file
|
@ -0,0 +1,79 @@
|
||||||
|
from abc import ABC, abstractmethod
|
||||||
|
from typing import Optional, Tuple
|
||||||
|
|
||||||
|
import numpy as np
|
||||||
|
from numpy.typing import NDArray
|
||||||
|
|
||||||
|
from ._parser import SoundParser
|
||||||
|
|
||||||
|
|
||||||
|
class SoundBase(SoundParser, ABC):
|
||||||
|
"""
|
||||||
|
Base class for synthetic sounds and mixes.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, *args, volume: float = 100, **kwargs) -> None:
|
||||||
|
super().__init__(*args, **kwargs)
|
||||||
|
self.volume = volume
|
||||||
|
|
||||||
|
@property
|
||||||
|
def gain(self) -> float:
|
||||||
|
return self.volume / 100
|
||||||
|
|
||||||
|
@gain.setter
|
||||||
|
def gain(self, value: float):
|
||||||
|
self.volume = value * 100
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def get_wave(
|
||||||
|
self,
|
||||||
|
sample_rate: float,
|
||||||
|
t_start: float = 0,
|
||||||
|
t_end: float = 0,
|
||||||
|
**_,
|
||||||
|
) -> NDArray[np.floating]:
|
||||||
|
"""
|
||||||
|
Get the wave binary data associated to this sound
|
||||||
|
|
||||||
|
:param t_start: Start offset for the wave in seconds. Default: 0
|
||||||
|
:param t_end: End offset for the wave in seconds. Default: 0
|
||||||
|
:param sample_rate: Audio sample rate. Default: 44100 Hz
|
||||||
|
:returns: A ``numpy.ndarray[(t_end-t_start)*sample_rate, 1]``
|
||||||
|
with the raw float values
|
||||||
|
"""
|
||||||
|
raise NotImplementedError()
|
||||||
|
|
||||||
|
def fft(
|
||||||
|
self,
|
||||||
|
sample_rate: float,
|
||||||
|
t_start: float = 0.0,
|
||||||
|
t_end: float = 0.0,
|
||||||
|
freq_range: Optional[Tuple[float, float]] = None,
|
||||||
|
freq_buckets: Optional[int] = None,
|
||||||
|
) -> NDArray[np.floating]:
|
||||||
|
"""
|
||||||
|
Get the real part of the Fourier transform associated to a time-bounded
|
||||||
|
sample of this sound.
|
||||||
|
|
||||||
|
:param t_start: Start offset for the wave in seconds. Default: 0
|
||||||
|
:param t_end: End offset for the wave in seconds. Default: 0
|
||||||
|
:param sample_rate: Audio sample rate. Default: 44100 Hz
|
||||||
|
:param freq_range: FFT frequency range. Default: ``(0, sample_rate/2)``
|
||||||
|
(see`Nyquist-Shannon sampling theorem
|
||||||
|
<https://en.wikipedia.org/wiki/Nyquist%E2%80%93Shannon_sampling_theorem>`_)
|
||||||
|
:param freq_buckets: Number of buckets to subdivide the frequency range.
|
||||||
|
Default: None
|
||||||
|
:returns: A numpy.ndarray[freq_range,1] with the raw float values
|
||||||
|
"""
|
||||||
|
|
||||||
|
if not freq_range:
|
||||||
|
freq_range = (0, int(sample_rate / 2))
|
||||||
|
|
||||||
|
wave = self.get_wave(t_start=t_start, t_end=t_end, sample_rate=sample_rate)
|
||||||
|
fft = np.fft.fft(wave.reshape(len(wave)))
|
||||||
|
fft = fft.real[freq_range[0] : freq_range[1]]
|
||||||
|
|
||||||
|
if freq_buckets is not None:
|
||||||
|
fft = np.histogram(fft, bins=freq_buckets)[0]
|
||||||
|
|
||||||
|
return fft
|
101
platypush/plugins/sound/_streams/_player/_synth/_generator.py
Normal file
101
platypush/plugins/sound/_streams/_player/_synth/_generator.py
Normal file
|
@ -0,0 +1,101 @@
|
||||||
|
from logging import getLogger
|
||||||
|
from queue import Full, Queue
|
||||||
|
from threading import Thread
|
||||||
|
from time import time
|
||||||
|
from typing import Any, Callable, Optional
|
||||||
|
|
||||||
|
import numpy as np
|
||||||
|
from numpy.typing import NDArray
|
||||||
|
|
||||||
|
from ._mix import Mix
|
||||||
|
|
||||||
|
|
||||||
|
class AudioGenerator(Thread):
|
||||||
|
"""
|
||||||
|
The ``AudioGenerator`` class is a thread that generates synthetic raw audio
|
||||||
|
waves and dispatches them to a queue that can be consumed by other players,
|
||||||
|
streamers and converters.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
*args,
|
||||||
|
audio_queue: Queue[NDArray[np.number]],
|
||||||
|
mix: Mix,
|
||||||
|
blocksize: int,
|
||||||
|
sample_rate: int,
|
||||||
|
queue_timeout: Optional[float] = None,
|
||||||
|
should_stop: Callable[[], bool] = lambda: False,
|
||||||
|
wait_running: Callable[[], Any] = lambda: None,
|
||||||
|
on_stop: Callable[[], Any] = lambda: None,
|
||||||
|
**kwargs,
|
||||||
|
):
|
||||||
|
super().__init__(*args, **kwargs)
|
||||||
|
self._audio_queue = audio_queue
|
||||||
|
self._t_start: float = 0
|
||||||
|
self._blocksize: int = blocksize
|
||||||
|
self._sample_rate: int = sample_rate
|
||||||
|
self._blocktime = self._blocksize / self._sample_rate
|
||||||
|
self._should_stop = should_stop
|
||||||
|
self._queue_timeout = queue_timeout
|
||||||
|
self._wait_running = wait_running
|
||||||
|
self._on_stop = on_stop
|
||||||
|
self.mix = mix
|
||||||
|
self.logger = getLogger(__name__)
|
||||||
|
|
||||||
|
def _next_t(self, t: float) -> float:
|
||||||
|
"""
|
||||||
|
Calculates the next starting time for the wave function.
|
||||||
|
"""
|
||||||
|
return (
|
||||||
|
min(t + self._blocktime, self._duration)
|
||||||
|
if self._duration is not None
|
||||||
|
else t + self._blocktime
|
||||||
|
)
|
||||||
|
|
||||||
|
def should_stop(self) -> bool:
|
||||||
|
"""
|
||||||
|
Stops if the upstream dependencies have signalled to stop or if the
|
||||||
|
duration is set and we have reached it.
|
||||||
|
"""
|
||||||
|
return self._should_stop() or (
|
||||||
|
self._duration is not None and time() - self._t_start >= self._duration
|
||||||
|
)
|
||||||
|
|
||||||
|
@property
|
||||||
|
def _duration(self) -> Optional[float]:
|
||||||
|
"""
|
||||||
|
Proxy to the mix object's duration.
|
||||||
|
"""
|
||||||
|
return self.mix.duration()
|
||||||
|
|
||||||
|
def run(self):
|
||||||
|
super().run()
|
||||||
|
self._t_start = time()
|
||||||
|
t = 0
|
||||||
|
|
||||||
|
while not self.should_stop():
|
||||||
|
self._wait_running()
|
||||||
|
if self.should_stop():
|
||||||
|
break
|
||||||
|
|
||||||
|
next_t = self._next_t(t)
|
||||||
|
|
||||||
|
try:
|
||||||
|
data = self.mix.get_wave(
|
||||||
|
t_start=t, t_end=next_t, sample_rate=self._sample_rate
|
||||||
|
)
|
||||||
|
except Exception as e:
|
||||||
|
self.logger.warning('Could not generate the audio wave: %s', e)
|
||||||
|
break
|
||||||
|
|
||||||
|
try:
|
||||||
|
self._audio_queue.put(data, timeout=self._queue_timeout)
|
||||||
|
t = next_t
|
||||||
|
except Full:
|
||||||
|
self.logger.warning(
|
||||||
|
'The processing queue is full: either the audio consumer is stuck, '
|
||||||
|
'or you may want to increase queue_size'
|
||||||
|
)
|
||||||
|
|
||||||
|
self._on_stop()
|
115
platypush/plugins/sound/_streams/_player/_synth/_mix.py
Normal file
115
platypush/plugins/sound/_streams/_player/_synth/_mix.py
Normal file
|
@ -0,0 +1,115 @@
|
||||||
|
import json
|
||||||
|
import logging
|
||||||
|
from typing import List, Tuple, Union
|
||||||
|
from typing_extensions import override
|
||||||
|
|
||||||
|
import numpy as np
|
||||||
|
from numpy.typing import DTypeLike, NDArray
|
||||||
|
|
||||||
|
from ...._utils import convert_nd_array
|
||||||
|
from ._base import SoundBase
|
||||||
|
from ._sound import Sound
|
||||||
|
|
||||||
|
|
||||||
|
class Mix(SoundBase):
|
||||||
|
"""
|
||||||
|
This class models a set of mixed :class:`._sound.Sound` instances that can be played
|
||||||
|
through an audio stream to an audio device
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, *sounds, channels: int, dtype: DTypeLike, **kwargs):
|
||||||
|
super().__init__(**kwargs)
|
||||||
|
self._sounds: List[Sound] = []
|
||||||
|
self.logger = logging.getLogger(__name__)
|
||||||
|
self.channels = channels
|
||||||
|
self.dtype = np.dtype(dtype)
|
||||||
|
|
||||||
|
for sound in sounds:
|
||||||
|
self.add(sound)
|
||||||
|
|
||||||
|
def __iter__(self):
|
||||||
|
"""
|
||||||
|
Iterate over the object's attributes and return key-pair values.
|
||||||
|
"""
|
||||||
|
for sound in self._sounds:
|
||||||
|
yield dict(sound)
|
||||||
|
|
||||||
|
def __str__(self):
|
||||||
|
"""
|
||||||
|
Return a JSON string representation of the object.
|
||||||
|
"""
|
||||||
|
return json.dumps(list(self))
|
||||||
|
|
||||||
|
def add(self, *sounds: Union[Sound, dict]):
|
||||||
|
"""
|
||||||
|
Add one or more sounds to the mix.
|
||||||
|
"""
|
||||||
|
self._sounds += [Sound.build(sound) for sound in sounds]
|
||||||
|
|
||||||
|
def remove(self, *sound_indices: int):
|
||||||
|
"""
|
||||||
|
Remove one or more sounds from the mix.
|
||||||
|
"""
|
||||||
|
assert self._sounds and all(
|
||||||
|
0 <= sound_index < len(sound_indices) for sound_index in sound_indices
|
||||||
|
), f'Sound indices must be between 0 and {len(self._sounds) - 1}'
|
||||||
|
|
||||||
|
for sound_index in sound_indices[::-1]:
|
||||||
|
self._sounds.pop(sound_index)
|
||||||
|
|
||||||
|
@override
|
||||||
|
def get_wave(
|
||||||
|
self,
|
||||||
|
sample_rate: float,
|
||||||
|
t_start: float = 0,
|
||||||
|
t_end: float = 0,
|
||||||
|
normalize_range: Tuple[float, float] = (-1.0, 1.0),
|
||||||
|
on_clip: str = 'scale',
|
||||||
|
**_,
|
||||||
|
) -> NDArray[np.number]:
|
||||||
|
wave = None
|
||||||
|
|
||||||
|
for sound in self._sounds:
|
||||||
|
sound_wave = sound.get_wave(
|
||||||
|
t_start=t_start, t_end=t_end, sample_rate=sample_rate
|
||||||
|
)
|
||||||
|
|
||||||
|
if wave is None:
|
||||||
|
wave = sound_wave
|
||||||
|
else:
|
||||||
|
wave += sound_wave
|
||||||
|
|
||||||
|
if wave is not None and len(wave):
|
||||||
|
scale_factor = (normalize_range[1] - normalize_range[0]) / (
|
||||||
|
wave.max() - wave.min()
|
||||||
|
)
|
||||||
|
|
||||||
|
if scale_factor < 1.0: # Wave clipping
|
||||||
|
if on_clip == 'scale':
|
||||||
|
wave = scale_factor * wave
|
||||||
|
elif on_clip == 'clip':
|
||||||
|
wave[wave < normalize_range[0]] = normalize_range[0]
|
||||||
|
wave[wave > normalize_range[1]] = normalize_range[1]
|
||||||
|
else:
|
||||||
|
raise RuntimeError(
|
||||||
|
'Supported values for "on_clip": ' + '"scale" or "clip"'
|
||||||
|
)
|
||||||
|
|
||||||
|
assert wave is not None
|
||||||
|
return convert_nd_array(self.gain * wave, dtype=self.dtype)
|
||||||
|
|
||||||
|
def duration(self):
|
||||||
|
"""
|
||||||
|
:returns: The duration of the mix in seconds as duration of its longest
|
||||||
|
sample, or None if the mixed sample have no duration set
|
||||||
|
"""
|
||||||
|
|
||||||
|
# If any sound has no duration specified, then the resulting mix will
|
||||||
|
# have no duration as well.
|
||||||
|
if any(sound.duration is None for sound in self._sounds):
|
||||||
|
return None
|
||||||
|
|
||||||
|
return max(((sound.duration or 0) + sound.delay for sound in self._sounds))
|
||||||
|
|
||||||
|
|
||||||
|
# vim:sw=4:ts=4:et:
|
79
platypush/plugins/sound/_streams/_player/_synth/_output.py
Normal file
79
platypush/plugins/sound/_streams/_player/_synth/_output.py
Normal file
|
@ -0,0 +1,79 @@
|
||||||
|
from logging import getLogger
|
||||||
|
from queue import Empty, Queue
|
||||||
|
from typing import Callable, Optional
|
||||||
|
|
||||||
|
import sounddevice as sd
|
||||||
|
|
||||||
|
import numpy as np
|
||||||
|
from numpy.typing import NDArray
|
||||||
|
|
||||||
|
|
||||||
|
# pylint: disable=too-few-public-methods
|
||||||
|
class AudioOutputCallback:
|
||||||
|
"""
|
||||||
|
The ``AudioSynthOutput`` is a functor that wraps the ``sounddevice.Stream``
|
||||||
|
callback and writes raw audio data to the audio device.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
*args,
|
||||||
|
audio_queue: Queue[NDArray[np.number]],
|
||||||
|
channels: int,
|
||||||
|
blocksize: int,
|
||||||
|
should_stop: Callable[[], bool] = lambda: False,
|
||||||
|
is_paused: Callable[[], bool] = lambda: False,
|
||||||
|
queue_timeout: Optional[float] = None,
|
||||||
|
**kwargs,
|
||||||
|
):
|
||||||
|
super().__init__(*args, **kwargs)
|
||||||
|
self._audio_queue = audio_queue
|
||||||
|
self._channels = channels
|
||||||
|
self._blocksize = blocksize
|
||||||
|
self._should_stop = should_stop
|
||||||
|
self._is_paused = is_paused
|
||||||
|
self._queue_timeout = queue_timeout
|
||||||
|
self.logger = getLogger(__name__)
|
||||||
|
|
||||||
|
def _check_status(self, frames: int, status):
|
||||||
|
"""
|
||||||
|
Checks the current status of the audio callback and raises errors if
|
||||||
|
the processing shouldn't continue.
|
||||||
|
"""
|
||||||
|
if self._should_stop():
|
||||||
|
raise sd.CallbackStop
|
||||||
|
|
||||||
|
assert frames == self._blocksize, (
|
||||||
|
f'Received {frames} frames, expected blocksize is {self._blocksize}',
|
||||||
|
)
|
||||||
|
|
||||||
|
assert not status.output_underflow, 'Output underflow: increase blocksize?'
|
||||||
|
assert not status, f'Audio callback failed: {status}'
|
||||||
|
|
||||||
|
def _audio_callback(self, outdata: NDArray[np.number], frames: int, status):
|
||||||
|
if self._is_paused():
|
||||||
|
return
|
||||||
|
|
||||||
|
self._check_status(frames, status)
|
||||||
|
|
||||||
|
try:
|
||||||
|
data = self._audio_queue.get_nowait()
|
||||||
|
except Empty as e:
|
||||||
|
raise (
|
||||||
|
sd.CallbackStop
|
||||||
|
if self._should_stop()
|
||||||
|
else AssertionError('Buffer is empty: increase buffersize?')
|
||||||
|
) from e
|
||||||
|
|
||||||
|
if data.shape[0] == 0:
|
||||||
|
raise sd.CallbackStop
|
||||||
|
|
||||||
|
audio_length = min(len(data), len(outdata))
|
||||||
|
outdata[:audio_length] = data[:audio_length]
|
||||||
|
|
||||||
|
# _ = time
|
||||||
|
def __call__(self, outdata: NDArray[np.number], frames: int, _, status):
|
||||||
|
try:
|
||||||
|
self._audio_callback(outdata, frames, status)
|
||||||
|
except AssertionError as e:
|
||||||
|
self.logger.warning(str(e))
|
111
platypush/plugins/sound/_streams/_player/_synth/_parser.py
Normal file
111
platypush/plugins/sound/_streams/_player/_synth/_parser.py
Normal file
|
@ -0,0 +1,111 @@
|
||||||
|
import math
|
||||||
|
import re
|
||||||
|
from typing import Optional, Union
|
||||||
|
|
||||||
|
|
||||||
|
class SoundParser:
|
||||||
|
"""
|
||||||
|
A utility mixin with some methods to parse and convert sound information -
|
||||||
|
e.g. MIDI notes from strings, MIDI notes to frequencies, and the other way
|
||||||
|
around.
|
||||||
|
"""
|
||||||
|
|
||||||
|
_DEFAULT_A4_FREQUENCY = 440.0
|
||||||
|
_MIDI_NOTE_REGEX = re.compile(r'^([A-G])([#b]?)(-?[0-9]+)$')
|
||||||
|
_MID_A_MIDI_NOTE = 69
|
||||||
|
_NOTE_OFFSETS = {
|
||||||
|
'C': 0,
|
||||||
|
'C#': 1,
|
||||||
|
'Db': 1,
|
||||||
|
'D': 2,
|
||||||
|
'D#': 3,
|
||||||
|
'Eb': 3,
|
||||||
|
'E': 4,
|
||||||
|
'F': 5,
|
||||||
|
'F#': 6,
|
||||||
|
'Gb': 6,
|
||||||
|
'G': 7,
|
||||||
|
'G#': 8,
|
||||||
|
'Ab': 8,
|
||||||
|
'A': 9,
|
||||||
|
'A#': 10,
|
||||||
|
'Bb': 10,
|
||||||
|
'B': 11,
|
||||||
|
}
|
||||||
|
|
||||||
|
_ALTERATION_OFFSETS = {
|
||||||
|
'b': -1,
|
||||||
|
'': 0,
|
||||||
|
'#': 1,
|
||||||
|
}
|
||||||
|
|
||||||
|
def __init__(self, *_, ref_frequency: float = _DEFAULT_A4_FREQUENCY, **__) -> None:
|
||||||
|
self._ref_frequency = ref_frequency
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _get_alteration_offset(alt: str) -> int:
|
||||||
|
"""
|
||||||
|
Calculate the MIDI note offset given by its reported sharp/flat alteration.
|
||||||
|
"""
|
||||||
|
if alt == '#':
|
||||||
|
return 1
|
||||||
|
if alt == 'b':
|
||||||
|
return -1
|
||||||
|
return 0
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def get_midi_note(cls, note: Union[str, int]) -> int:
|
||||||
|
"""
|
||||||
|
Convert a MIDI note given as input (either an integer or a string like
|
||||||
|
'C4') to a MIDI note number.
|
||||||
|
|
||||||
|
:raise: ValueError
|
||||||
|
"""
|
||||||
|
|
||||||
|
if isinstance(note, str):
|
||||||
|
note = note[:1].upper() + note[1:]
|
||||||
|
m = cls._MIDI_NOTE_REGEX.match(note)
|
||||||
|
if not m:
|
||||||
|
raise ValueError(f'Invalid MIDI note: {note}')
|
||||||
|
|
||||||
|
base_note, alteration, octave = m.groups()
|
||||||
|
octave = int(octave)
|
||||||
|
note_offset = cls._NOTE_OFFSETS[base_note] + cls._get_alteration_offset(
|
||||||
|
alteration
|
||||||
|
)
|
||||||
|
|
||||||
|
octave_offset = (octave + 1) * 12
|
||||||
|
note = octave_offset + note_offset
|
||||||
|
|
||||||
|
if isinstance(note, int):
|
||||||
|
if not 0 <= note <= 127:
|
||||||
|
raise ValueError(f'MIDI note out of range: {note}')
|
||||||
|
return note
|
||||||
|
|
||||||
|
raise ValueError(f'Invalid MIDI note: {note}')
|
||||||
|
|
||||||
|
def note_to_freq(
|
||||||
|
self, midi_note: Union[int, str], ref_frequency: Optional[float] = None
|
||||||
|
):
|
||||||
|
"""
|
||||||
|
Converts a MIDI note to its frequency in Hz
|
||||||
|
|
||||||
|
:param midi_note: MIDI note to convert
|
||||||
|
:param ref_frequency: Reference A4 frequency override (default: 440 Hz).
|
||||||
|
"""
|
||||||
|
|
||||||
|
note = self.get_midi_note(midi_note)
|
||||||
|
return (2.0 ** ((note - self._MID_A_MIDI_NOTE) / 12.0)) * (
|
||||||
|
ref_frequency or self._ref_frequency
|
||||||
|
)
|
||||||
|
|
||||||
|
def freq_to_note(self, frequency: float, ref_frequency: Optional[float] = None):
|
||||||
|
"""
|
||||||
|
Converts a frequency in Hz to its closest MIDI note
|
||||||
|
|
||||||
|
:param frequency: Frequency in Hz
|
||||||
|
:param ref_frequency: Reference A4 frequency override (default: 440 Hz).
|
||||||
|
"""
|
||||||
|
|
||||||
|
std_freq = ref_frequency or self._ref_frequency
|
||||||
|
return int(12.0 * math.log(frequency / std_freq, 2) + self._MID_A_MIDI_NOTE)
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue