Merge pull request 'New `picamera` integration' (#367) from 363/new-picamera-integration into master
continuous-integration/drone/push Build is passing Details

Reviewed-on: #367
This commit is contained in:
Fabio Manganiello 2024-02-25 21:41:42 +01:00
commit 5bf286d07c
23 changed files with 908 additions and 496 deletions

View File

@ -7,7 +7,6 @@ Backends
:caption: Backends:
platypush/backend/button.flic.rst
platypush/backend/camera.pi.rst
platypush/backend/chat.telegram.rst
platypush/backend/http.rst
platypush/backend/midi.rst

View File

@ -1,6 +0,0 @@
``camera.pi``
===============================
.. automodule:: platypush.backend.camera.pi
:members:

View File

@ -0,0 +1,5 @@
``camera.pi.legacy``
====================
.. automodule:: platypush.plugins.camera.pi.legacy
:members:

View File

@ -21,6 +21,7 @@ Plugins
platypush/plugins/camera.gstreamer.rst
platypush/plugins/camera.ir.mlx90640.rst
platypush/plugins/camera.pi.rst
platypush/plugins/camera.pi.legacy.rst
platypush/plugins/chat.irc.rst
platypush/plugins/chat.telegram.rst
platypush/plugins/clipboard.rst

View File

@ -1,214 +0,0 @@
import json
import socket
from enum import Enum
from threading import Thread
from platypush.backend import Backend
from platypush.context import get_backend
class CameraPiBackend(Backend):
"""
Backend to interact with a Raspberry Pi camera. It can start and stop
recordings and take pictures. It can be programmatically controlled through
the :class:`platypush.plugins.camera.pi` plugin. Note that the Redis backend
must be configured and running to enable camera control.
This backend is **DEPRECATED**. Use the plugin :class:`platypush.plugins.camera.pi.CameraPiPlugin` instead to run
Pi camera actions. If you want to start streaming the camera on application start then simply create an event hook
on :class:`platypush.message.event.application.ApplicationStartedEvent` that runs ``camera.pi.start_streaming``.
"""
class CameraAction(Enum):
START_RECORDING = 'START_RECORDING'
STOP_RECORDING = 'STOP_RECORDING'
TAKE_PICTURE = 'TAKE_PICTURE'
def __eq__(self, other):
return self.value == other
# noinspection PyUnresolvedReferences,PyPackageRequirements
def __init__(
self,
listen_port,
bind_address='0.0.0.0',
x_resolution=640,
y_resolution=480,
redis_queue='platypush/camera/pi',
start_recording_on_startup=True,
framerate=24,
hflip=False,
vflip=False,
sharpness=0,
contrast=0,
brightness=50,
video_stabilization=False,
iso=0,
exposure_compensation=0,
exposure_mode='auto',
meter_mode='average',
awb_mode='auto',
image_effect='none',
color_effects=None,
rotation=0,
crop=(0.0, 0.0, 1.0, 1.0),
**kwargs
):
"""
See https://www.raspberrypi.org/documentation/usage/camera/python/README.md
for a detailed reference about the Pi camera options.
:param listen_port: Port where the camera process will provide the video output while recording
:type listen_port: int
:param bind_address: Bind address (default: 0.0.0.0).
:type bind_address: str
"""
super().__init__(**kwargs)
self.bind_address = bind_address
self.listen_port = listen_port
self.server_socket = socket.socket()
self.server_socket.bind(
(self.bind_address, self.listen_port)
) # lgtm [py/bind-socket-all-network-interfaces]
self.server_socket.listen(0)
import picamera
self.camera = picamera.PiCamera()
self.camera.resolution = (x_resolution, y_resolution)
self.camera.framerate = framerate
self.camera.hflip = hflip
self.camera.vflip = vflip
self.camera.sharpness = sharpness
self.camera.contrast = contrast
self.camera.brightness = brightness
self.camera.video_stabilization = video_stabilization
self.camera.ISO = iso
self.camera.exposure_compensation = exposure_compensation
self.camera.exposure_mode = exposure_mode
self.camera.meter_mode = meter_mode
self.camera.awb_mode = awb_mode
self.camera.image_effect = image_effect
self.camera.color_effects = color_effects
self.camera.rotation = rotation
self.camera.crop = crop
self.start_recording_on_startup = start_recording_on_startup
self.redis = None
self.redis_queue = redis_queue
self._recording_thread = None
def send_camera_action(self, action, **kwargs):
action = {'action': action.value, **kwargs}
self.redis.send_message(msg=json.dumps(action), queue_name=self.redis_queue)
def take_picture(self, image_file):
"""
Take a picture.
:param image_file: Output image file
:type image_file: str
"""
self.logger.info('Capturing camera snapshot to {}'.format(image_file))
self.camera.capture(image_file)
self.logger.info('Captured camera snapshot to {}'.format(image_file))
# noinspection PyShadowingBuiltins
def start_recording(self, video_file=None, format='h264'):
"""
Start a recording.
:param video_file: Output video file. If specified, the video will be recorded to file, otherwise it will be
served via TCP/IP on the listen_port. Use ``stop_recording`` to stop the recording.
:type video_file: str
:param format: Video format (default: h264)
:type format: str
"""
# noinspection PyBroadException
def recording_thread():
if video_file:
self.camera.start_recording(video_file, format=format)
while True:
self.camera.wait_recording(2)
else:
while not self.should_stop():
connection = self.server_socket.accept()[0].makefile('wb')
self.logger.info(
'Accepted client connection on port {}'.format(self.listen_port)
)
try:
self.camera.start_recording(connection, format=format)
while True:
self.camera.wait_recording(2)
except ConnectionError:
self.logger.info('Client closed connection')
try:
self.stop_recording()
except Exception as e:
self.logger.warning(
'Could not stop recording: {}'.format(str(e))
)
try:
connection.close()
except Exception as e:
self.logger.warning(
'Could not close connection: {}'.format(str(e))
)
self.send_camera_action(self.CameraAction.START_RECORDING)
if self._recording_thread:
self.logger.info('Recording already running')
return
self.logger.info('Starting camera recording')
self._recording_thread = Thread(
target=recording_thread, name='PiCameraRecorder'
)
self._recording_thread.start()
def stop_recording(self):
"""Stops recording"""
self.logger.info('Stopping camera recording')
try:
self.camera.stop_recording()
except Exception as e:
self.logger.warning('Failed to stop recording')
self.logger.exception(e)
def run(self):
super().run()
if not self.redis:
self.redis = get_backend('redis')
if self.start_recording_on_startup:
self.send_camera_action(self.CameraAction.START_RECORDING)
self.logger.info('Initialized Pi camera backend')
while not self.should_stop():
try:
msg = self.redis.get_message(self.redis_queue)
if msg.get('action') == self.CameraAction.START_RECORDING:
self.start_recording()
elif msg.get('action') == self.CameraAction.STOP_RECORDING:
self.stop_recording()
elif msg.get('action') == self.CameraAction.TAKE_PICTURE:
self.take_picture(image_file=msg.get('image_file'))
except Exception as e:
self.logger.exception(e)
# vim:sw=4:ts=4:et:

View File

@ -594,6 +594,29 @@ backend.http:
# horizontal_flip: false
# # Whether to flip the image along the horizontal axis (default: False)
# vertical_flip: false
#
# # -- Streaming options
# # If `stream_on_start` is set to true, then camera streaming will start as
# # soon as the application/plugin is started. Otherwise, only when the
# # `camera.<plugin>.start_streaming` action is run. The camera will be
# # streamed on the specified `bind_address` and `listen_port` in the
# # specified `stream_format`. If `stream_format` is a video format (e.g.
# # h264 or mkv) then you can play the raw camera stream through e.g.
# # `vlc tcp://<address>:<listen_port>`.
# # Alternatively, you can access the camera stream over HTTP at
# # `http(s)://<address>:<http-port>/camera/<plugin>/video.<format>`.
# # For example, for MJPEG stream (usually the fastest option over HTTP):
# # `http://localhost:8008/camera/ffmpeg/video.mjpeg`.
# # An HTTP stream is the safest option, as it has to go through the standard
# # HTTP authentication process, while direct TCP access may expose your
# # camera to unauthenticated access. If you decide to directly stream over
# # TCP, make sure to carefully select the `bind_address`, add a firewall
# # rule for the streaming port, and/or ensure that the device's port is only
# # accessible from a safe network.
# # stream_on_start: false
# # bind_address: 0.0.0.0
# # listen_port: 5000
# # stream_format: h264
###
### -----------------

View File

@ -19,7 +19,7 @@ from platypush.message.event.camera import (
CameraRecordingStoppedEvent,
CameraVideoRenderedEvent,
)
from platypush.plugins import Plugin, action
from platypush.plugins import RunnablePlugin, action
from platypush.plugins.camera.model.camera import CameraInfo, Camera
from platypush.plugins.camera.model.exceptions import (
CameraException,
@ -31,7 +31,7 @@ from platypush.plugins.camera.model.writer.preview import (
PreviewWriter,
PreviewWriterFactory,
)
from platypush.utils import get_plugin_name_by_class
from platypush.utils import get_plugin_name_by_class, wait_for_either
__all__ = [
'Camera',
@ -45,7 +45,7 @@ __all__ = [
]
class CameraPlugin(Plugin, ABC):
class CameraPlugin(RunnablePlugin, ABC):
"""
Abstract plugin to control camera devices.
@ -86,6 +86,7 @@ class CameraPlugin(Plugin, ABC):
stream_format: str = 'mjpeg',
listen_port: Optional[int] = 5000,
bind_address: str = '0.0.0.0',
stream_on_start: bool = False,
ffmpeg_bin: str = 'ffmpeg',
input_codec: Optional[str] = None,
output_codec: Optional[str] = None,
@ -94,41 +95,57 @@ class CameraPlugin(Plugin, ABC):
"""
:param device: Identifier of the default capturing device.
:param resolution: Default resolution, as a tuple of two integers.
:param frames_dir: Directory where the camera frames will be stored (default:
``~/.local/share/platypush/<plugin.name>/frames``)
:param frames_dir: Directory where the camera frames will be stored
(default: ``~/.local/share/platypush/<plugin.name>/frames``)
:param warmup_frames: Cameras usually take a while to adapt their
luminosity and focus to the environment when taking a picture.
This parameter allows you to specify the number of "warmup" frames
to capture upon picture command before actually capturing a frame
(default: 5 but you may want to calibrate this parameter for your
camera)
:param warmup_seconds: Number of seconds to wait before a picture is taken or the first frame of a
video/sequence is captured (default: 0).
:param capture_timeout: Maximum number of seconds to wait between the programmed termination of a capture
session and the moment the device is released.
:param scale_x: If set, the images will be scaled along the x-axis by the specified factor
:param scale_y: If set, the images will be scaled along the y-axis by the specified factor
:param warmup_seconds: Number of seconds to wait before a picture is
taken or the first frame of a video/sequence is captured (default:
0).
:param capture_timeout: Maximum number of seconds to wait between the
programmed termination of a capture session and the moment the
device is released.
:param scale_x: If set, the images will be scaled along the x-axis by
the specified factor
:param scale_y: If set, the images will be scaled along the y-axis by
the specified factor
:param color_transform: Color transformation to apply to the images.
:param grayscale: Whether the output should be converted to grayscale.
:param rotate: If set, the images will be rotated by the specified number of degrees
:param rotate: If set, the images will be rotated by the specified
number of degrees
:param fps: Frames per second (default: 25).
:param horizontal_flip: If set, the images will be flipped on the horizontal axis.
:param vertical_flip: If set, the images will be flipped on the vertical axis.
:param listen_port: Default port to be used for streaming over TCP (default: 5000).
:param bind_address: Default bind address for TCP streaming (default: 0.0.0.0, accept any connections).
:param input_codec: Specify the ffmpeg video codec (``-vcodec``) used for the input.
:param output_codec: Specify the ffmpeg video codec (``-vcodec``) to be used for encoding the output. For some
ffmpeg output formats (e.g. ``h264`` and ``rtp``) this may default to ``libxvid``.
:param horizontal_flip: If set, the images will be flipped on the
horizontal axis.
:param vertical_flip: If set, the images will be flipped on the vertical
axis.
:param listen_port: Default port to be used for streaming over TCP
(default: 5000).
:param bind_address: Default bind address for TCP streaming (default:
0.0.0.0, accept connections on any network interface).
:param stream_on_start: If set, the camera will start streaming on the
specified ``bind_address`` and ``listen_port`` as soon as the plugin
is started. Otherwise, the stream will be started only when the
:meth:`.start_streaming` method is called. Default: False.
:param input_codec: Specify the ffmpeg video codec (``-vcodec``) used
for the input.
:param output_codec: Specify the ffmpeg video codec (``-vcodec``) to be
used for encoding the output. For some ffmpeg output formats (e.g.
``h264`` and ``rtp``) this may default to ``libxvid``.
:param input_format: Plugin-specific format/type for the input stream.
:param output_format: Plugin-specific format/type for the output videos.
:param ffmpeg_bin: Path to the ffmpeg binary (default: ``ffmpeg``).
:param stream_format: Default format for the output when streamed to a network device. Available:
:param stream_format: Default format for the output when streamed to a
network device. Available:
- ``MJPEG`` (default)
- ``H264`` (over ``ffmpeg``)
- ``H265`` (over ``ffmpeg``)
- ``MKV`` (over ``ffmpeg``)
- ``MP4`` (over ``ffmpeg``)
- ``mjpeg`` (default)
- ``h264`` (over ``ffmpeg``)
- ``h265`` (over ``ffmpeg``)
- ``mkv`` (over ``ffmpeg``)
- ``mp4`` (over ``ffmpeg``)
"""
super().__init__(**kwargs)
@ -137,6 +154,7 @@ class CameraPlugin(Plugin, ABC):
plugin_name = get_plugin_name_by_class(self)
assert isinstance(workdir, str) and plugin_name
self.workdir = os.path.join(workdir, plugin_name)
self._stream_on_start = stream_on_start
pathlib.Path(self.workdir).mkdir(mode=0o755, exist_ok=True, parents=True)
self.camera_info = self._camera_info_class(
@ -176,9 +194,10 @@ class CameraPlugin(Plugin, ABC):
def open_device(
self,
device: Optional[Union[int, str]],
device: Optional[Union[int, str]] = None,
stream: bool = False,
redis_queue: Optional[str] = None,
ctx: Optional[dict] = None,
**params,
) -> Camera:
"""
@ -210,14 +229,13 @@ class CameraPlugin(Plugin, ABC):
else:
camera = self._camera_class(info=info)
ctx = ctx or {}
ctx['stream'] = stream
camera.info.set(**params)
camera.object = self.prepare_device(camera)
camera.object = self.prepare_device(camera, **ctx)
if stream and camera.info.stream_format:
writer_class = StreamWriter.get_class_by_name(camera.info.stream_format)
camera.stream = writer_class(
camera=camera, plugin=self, redis_queue=redis_queue
)
self._prepare_stream_writer(camera, redis_queue=redis_queue)
if camera.info.frames_dir:
pathlib.Path(
@ -227,6 +245,13 @@ class CameraPlugin(Plugin, ABC):
self._devices[device] = camera
return camera
def _prepare_stream_writer(self, camera: Camera, redis_queue: Optional[str] = None):
assert camera.info.stream_format, 'No stream format specified'
writer_class = StreamWriter.get_class_by_name(camera.info.stream_format)
camera.stream = writer_class(
camera=camera, plugin=self, redis_queue=redis_queue
)
def close_device(self, camera: Camera, wait_capture: bool = True) -> None:
"""
Close and release a device.
@ -288,7 +313,7 @@ class CameraPlugin(Plugin, ABC):
self.close_device(camera)
@abstractmethod
def prepare_device(self, device: Camera):
def prepare_device(self, device: Camera, **_):
"""
Prepare a device using the plugin-specific logic - to be implemented by the derived classes.
@ -315,7 +340,9 @@ class CameraPlugin(Plugin, ABC):
raise NotImplementedError()
@staticmethod
def store_frame(frame, filepath: str, format: Optional[str] = None):
def store_frame( # pylint: disable=redefined-builtin
frame, filepath: str, format: Optional[str] = None
):
"""
Capture a frame to the filesystem using the ``PIL`` library - it can be overridden by derived classes.
@ -339,9 +366,9 @@ class CameraPlugin(Plugin, ABC):
def _store_frame(
self,
frame,
*args,
frames_dir: Optional[str] = None,
image_file: Optional[str] = None,
*args,
**kwargs,
) -> str:
"""
@ -687,61 +714,84 @@ class CameraPlugin(Plugin, ABC):
return self.status(camera.info.device) # type: ignore
@staticmethod
def _prepare_server_socket(camera: Camera) -> socket.socket:
server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
server_socket.bind(
( # lgtm [py/bind-socket-all-network-interfaces]
camera.info.bind_address or '0.0.0.0',
camera.info.listen_port,
@contextmanager
def _prepare_server_socket(camera: Camera) -> Generator[socket.socket, None, None]:
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as srv_sock:
srv_sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
srv_sock.bind(
( # lgtm [py/bind-socket-all-network-interfaces]
camera.info.bind_address or '0.0.0.0',
camera.info.listen_port,
)
)
)
server_socket.listen(1)
server_socket.settimeout(1)
return server_socket
srv_sock.listen(1)
srv_sock.settimeout(1)
yield srv_sock
def _accept_client(self, server_socket: socket.socket) -> Optional[IO]:
def _accept_client(
self, server_socket: socket.socket
) -> Tuple[Optional[socket.socket], Optional[IO]]:
try:
sock = server_socket.accept()[0]
self.logger.info('Accepted client connection from %s', sock.getpeername())
return sock.makefile('wb')
return sock, sock.makefile('wb')
except socket.timeout:
return
return None, None
def streaming_thread(
self, camera: Camera, stream_format: str, duration: Optional[float] = None
):
streaming_started_time = time.time()
server_socket = self._prepare_server_socket(camera)
sock = None
self.logger.info('Starting streaming on port %s', camera.info.listen_port)
with self._prepare_server_socket(camera) as srv_sock:
streaming_started_time = time.time()
sock, fp = None, None
self.logger.info('Starting streaming on port %s', camera.info.listen_port)
try:
while camera.stream_event.is_set():
if duration and time.time() - streaming_started_time >= duration:
break
try:
while (
camera.stream_event.is_set()
and not camera.stop_stream_event.is_set()
and not self.should_stop()
):
if duration and time.time() - streaming_started_time >= duration:
break
sock = self._accept_client(server_socket)
if not sock:
continue
sock, fp = self._accept_client(srv_sock)
if not (sock and fp):
continue
if camera.info.device not in self._devices:
info = asdict(camera.info)
info['stream_format'] = stream_format
camera = self.open_device(stream=True, **info)
if duration and time.time() - streaming_started_time >= duration:
break
assert camera.stream, 'No camera stream available'
camera.stream.sock = sock
self.start_camera(
camera, duration=duration, frames_dir=None, image_file=None
)
finally:
self._cleanup_stream(camera, server_socket, sock)
self.logger.info('Stopped camera stream')
self._streaming_loop(
camera, stream_format, sock=fp, duration=duration
)
finally:
self._cleanup_stream(camera, srv_sock, fp)
self.logger.info('Stopped camera stream')
def _streaming_loop(
self,
camera: Camera,
stream_format: str,
sock: IO,
duration: Optional[float] = None,
):
if camera.info.device not in self._devices:
info = asdict(camera.info)
info['stream_format'] = stream_format
camera = self.open_device(stream=True, **info)
assert camera.stream, 'No camera stream available'
camera.stream.sock = sock
self.start_camera(camera, duration=duration, frames_dir=None, image_file=None)
def _cleanup_stream(
self, camera: Camera, server_socket: socket.socket, client: Optional[IO]
):
camera.stream_event.clear()
camera.stop_stream_event.set()
if client:
try:
client.close()
@ -764,22 +814,40 @@ class CameraPlugin(Plugin, ABC):
self,
device: Optional[Union[int, str]] = None,
duration: Optional[float] = None,
stream_format: str = 'mkv',
stream_format: Optional[str] = None,
**camera,
) -> dict:
"""
Expose the video stream of a camera over a TCP connection.
:param device: Name/path/ID of the device to capture from (default: None, use the default device).
:param duration: Streaming thread duration (default: until :meth:`.stop_streaming` is called).
:param stream_format: Format of the output stream - e.g. ``h264``, ``mjpeg``, ``mkv`` etc. (default: ``mkv``).
When the streaming is started, the plugin will listen on the specified
``bind_address`` and ``listen_port`` and stream camera frames to
connected clients. If ``stream_format`` is a video format (H264, H265,
MKV, MP4 etc.) then the camera stream can be viewed using a video
player - for example, using ``vlc``:
.. code-block:: bash
vlc tcp://<host>:<port>
:param device: Name/path/ID of the device to capture from (default:
None, use the default device).
:param duration: Streaming thread duration (default: until
:meth:`.stop_streaming` is called).
:param stream_format: Format of the output stream - e.g. ``h264``,
``mjpeg``, ``mkv`` etc. If not specified, the ``stream_format``
configured on the plugin will be used.
:param camera: Camera object properties - see constructor parameters.
:return: The status of the device.
"""
camera = self.open_device(
device=device, stream=True, stream_format=stream_format, **camera
device=device,
stream=True,
stream_format=stream_format or self.camera_info.stream_format,
**camera,
)
return self._start_streaming(camera, duration, stream_format) # type: ignore
return self._start_streaming(camera, duration, camera.info.stream_format) # type: ignore
def _start_streaming(
self, camera: Camera, duration: Optional[float], stream_format: str
@ -788,10 +856,11 @@ class CameraPlugin(Plugin, ABC):
assert (
not camera.stream_event.is_set() and camera.info.device not in self._streams
), f'A streaming session is already running for device {camera.info.device}'
assert camera.info.device, 'No device name available'
assert camera.info.device is not None, 'No device name available'
self._streams[camera.info.device] = camera
camera.stream_event.set()
camera.stop_stream_event.clear()
camera.stream_thread = threading.Thread(
target=self.streaming_thread,
@ -821,6 +890,8 @@ class CameraPlugin(Plugin, ABC):
def _stop_streaming(self, camera: Camera):
camera.stream_event.clear()
camera.stop_stream_event.set()
if camera.stream_thread and camera.stream_thread.is_alive():
camera.stream_thread.join(timeout=5.0)
@ -949,5 +1020,30 @@ class CameraPlugin(Plugin, ABC):
return camera.info.warmup_frames / camera.info.fps
return 0
def main(self):
if not self._stream_on_start:
self.wait_stop()
return
while not self.should_stop():
if self._stream_on_start:
self.start_streaming()
cameras = list(self._streams.values())
if not cameras:
self.logger.warning('No camera devices could be streamed')
self.wait_stop()
break
camera = cameras[0]
try:
wait_for_either(self._should_stop, camera.stop_stream_event)
self.stop_streaming()
except Exception as e:
self.logger.warning('Error while stopping the camera stream: %s', e)
finally:
self.wait_stop(timeout=2)
# vim:sw=4:ts=4:et:

View File

@ -43,7 +43,7 @@ class CameraCvPlugin(CameraPlugin):
if video_writer == 'cv':
self._video_writer_class = CvFileWriter
def prepare_device(self, device: Camera):
def prepare_device(self, device: Camera, **_):
import cv2
cam = cv2.VideoCapture(device.info.device)

View File

@ -34,7 +34,7 @@ class CameraFfmpegPlugin(CameraPlugin):
super().__init__(device=device, input_format=input_format, **opts)
self.camera_info.ffmpeg_args = ffmpeg_args or () # type: ignore
def prepare_device(self, device: Camera) -> subprocess.Popen:
def prepare_device(self, device: Camera, **_) -> subprocess.Popen:
assert isinstance(device, FFmpegCamera)
warmup_seconds = self._get_warmup_seconds(device)
ffmpeg = [

View File

@ -22,7 +22,7 @@ class CameraGstreamerPlugin(CameraPlugin):
"""
super().__init__(device=device, **opts)
def prepare_device(self, camera: GStreamerCamera) -> Pipeline:
def prepare_device(self, camera: GStreamerCamera, **_) -> Pipeline:
pipeline = Pipeline()
src = pipeline.add_source('v4l2src', device=camera.info.device)
convert = pipeline.add('videoconvert')

View File

@ -65,7 +65,7 @@ class CameraIrMlx90640Plugin(CameraPlugin):
def _is_capture_running(self):
return self._capture_proc is not None and self._capture_proc.poll() is None
def prepare_device(self, device: Camera):
def prepare_device(self, device: Camera, **_):
if not self._is_capture_running():
self._capture_proc = subprocess.Popen(
[self.rawrgb_path, '{}'.format(device.info.fps)],

View File

@ -50,6 +50,7 @@ class Camera:
info: CameraInfo
start_event: threading.Event = threading.Event()
stream_event: threading.Event = threading.Event()
stop_stream_event: threading.Event = threading.Event()
capture_thread: Optional[threading.Thread] = None
stream_thread: Optional[threading.Thread] = None
object = None

View File

@ -17,11 +17,9 @@ class VideoWriter(ABC):
mimetype: Optional[str] = None
def __init__(self, *_, **kwargs):
from platypush.plugins.camera import Camera, CameraPlugin
self.logger = logging.getLogger(self.__class__.__name__)
self.camera: Camera = kwargs.pop('camera')
self.plugin: CameraPlugin = kwargs.pop('plugin')
self.camera = kwargs.get('camera', getattr(self, 'camera', None))
self.plugin = kwargs.get('plugin', getattr(self, 'plugin', None))
self.closed = False
@abstractmethod

View File

@ -203,10 +203,10 @@ class MKVStreamWriter(FFmpegStreamWriter):
class H264StreamWriter(FFmpegStreamWriter):
mimetype = 'video/h264'
def __init__(self, camera: Camera, *args, **kwargs):
def __init__(self, *args, camera: Camera, **kwargs):
if not camera.info.output_codec:
camera.info.output_codec = 'libxvid'
super().__init__(camera, *args, output_format='h264', **kwargs)
super().__init__(*args, camera=camera, output_format='h264', **kwargs)
class H265StreamWriter(FFmpegStreamWriter):

View File

@ -1,208 +1,303 @@
import threading
import os
import time
from typing import Optional, List, Tuple, Union
from typing import IO, Optional, Union
from platypush.plugins import action
from platypush.plugins.camera import CameraPlugin, Camera
from platypush.plugins.camera.pi.model import PiCameraInfo, PiCamera
from .model import PiCameraInfo, PiCamera
class CameraPiPlugin(CameraPlugin):
"""
Plugin to control a Pi camera.
Plugin to interact with a `Pi Camera
<https://www.raspberrypi.com/documentation/accessories/camera.html>`_.
.. warning::
This plugin is **DEPRECATED**, as it relies on the old ``picamera`` module.
On recent systems, it should be possible to access the Pi Camera through
the ffmpeg or gstreamer integrations.
This integration is intended to work with the `picamera2
<https://github.com/raspberrypi/picamera2>`_ module.
If you are running a very old OS that only provides the deprecated
`picamera <https://github.com/waveform80/picamera>`_ module, or you rely on
features that are currently only supported by the old module, you should
use :class:`platypush.plugins.camera.pi.legacy.CameraPiLegacyPlugin`
instead.
"""
_camera_class = PiCamera
_camera_info_class = PiCameraInfo
_awb_modes = [
"Auto",
"Incandescent",
"Tungsten",
"Fluorescent",
"Indoor",
"Daylight",
"Cloudy",
]
def __init__(
self,
device: int = 0,
fps: float = 30.0,
warmup_seconds: float = 2.0,
sharpness: int = 0,
contrast: int = 0,
brightness: int = 50,
video_stabilization: bool = False,
sharpness: float = 1.0,
contrast: float = 1.0,
brightness: float = 0.0,
iso: int = 0,
exposure_compensation: int = 0,
exposure_mode: str = 'auto',
meter_mode: str = 'average',
awb_mode: str = 'auto',
image_effect: str = 'none',
led_pin: Optional[int] = None,
color_effects: Optional[Union[str, List[str]]] = None,
zoom: Tuple[float, float, float, float] = (0.0, 0.0, 1.0, 1.0),
**camera
exposure_compensation: float = 0.0,
awb_mode: str = 'Auto',
stream_format: str = 'h264',
**camera,
):
"""
See https://www.raspberrypi.org/documentation/usage/camera/python/README.md
for a detailed reference about the Pi camera options.
:param device: Camera device number (default: 0). Only supported on
devices with multiple camera slots.
:param fps: Frames per second (default: 30.0).
:param warmup_seconds: Seconds to wait for the camera to warm up
before taking a photo (default: 2.0).
:param sharpness: Sharpness level, as a float between 0.0 and 16.0,
where 1.0 is the default value, and higher values are mapped to
higher sharpness levels.
:param contrast: Contrast level, as a float between 0.0 and 32.0, where
1.0 is the default value, and higher values are mapped to higher
contrast levels.
:param brightness: Brightness level, as a float between -1.0 and 1.0.
:param iso: ISO level (default: 0).
:param exposure_compensation: Exposure compensation level, as a float
between -8.0 and 8.0.
:param awb_mode: Auto white balance mode. Allowed values:
:param camera: Options for the base camera plugin (see :class:`platypush.plugins.camera.CameraPlugin`).
- ``Auto`` (default)
- ``Daylight``
- ``Cloudy``
- ``Indoor``
- ``Fluorescent``
:param stream_format: Default format for the output when streamed to a
network device. Available:
- ``h264`` (default)
- ``mjpeg``
:param camera: Options for the base camera plugin (see
:class:`platypush.plugins.camera.CameraPlugin`).
"""
super().__init__(
device=device, fps=fps, warmup_seconds=warmup_seconds, **camera
device=device,
fps=fps,
warmup_seconds=warmup_seconds,
stream_format=stream_format,
**camera,
)
self.camera_info.sharpness = sharpness
self.camera_info.contrast = contrast
self.camera_info.brightness = brightness
self.camera_info.video_stabilization = video_stabilization
self.camera_info.iso = iso
self.camera_info.exposure_compensation = exposure_compensation
self.camera_info.meter_mode = meter_mode
self.camera_info.exposure_mode = exposure_mode
self.camera_info.awb_mode = awb_mode
self.camera_info.image_effect = image_effect
self.camera_info.color_effects = color_effects
self.camera_info.zoom = zoom
self.camera_info.led_pin = led_pin
self.camera_info.sharpness = sharpness # type: ignore
self.camera_info.contrast = contrast # type: ignore
self.camera_info.brightness = brightness # type: ignore
self.camera_info.iso = iso # type: ignore
self.camera_info.exposure_compensation = exposure_compensation # type: ignore
self.camera_info.awb_mode = awb_mode # type: ignore
# noinspection DuplicatedCode
def prepare_device(self, device: PiCamera):
# noinspection PyUnresolvedReferences
import picamera
def _get_transform(self, device: Camera):
from libcamera import Orientation, Transform # type: ignore
from picamera2.utils import orientation_to_transform # type: ignore
camera = picamera.PiCamera(
camera_num=device.info.device,
resolution=device.info.resolution,
framerate=device.info.fps,
led_pin=device.info.led_pin,
)
rot = device.info.rotate
if not rot:
return Transform(
# It may seem counterintuitive, but the picamera2 library's flip
# definition is the opposite of ours
hflip=device.info.vertical_flip,
vflip=device.info.horizontal_flip,
)
camera.hflip = device.info.horizontal_flip
camera.vflip = device.info.vertical_flip
camera.sharpness = device.info.sharpness
camera.contrast = device.info.contrast
camera.brightness = device.info.brightness
camera.video_stabilization = device.info.video_stabilization
camera.iso = device.info.iso
camera.exposure_compensation = device.info.exposure_compensation
camera.exposure_mode = device.info.exposure_mode
camera.meter_mode = device.info.meter_mode
camera.awb_mode = device.info.awb_mode
camera.image_effect = device.info.image_effect
camera.color_effects = device.info.color_effects
camera.rotation = device.info.rotate or 0
camera.zoom = device.info.zoom
if rot == 90:
orient = (
Orientation.Rotate90Mirror
if device.info.vertical_flip
else Orientation.Rotate90
)
elif rot == 180:
orient = (
Orientation.Rotate180Mirror
if device.info.horizontal_flip
else Orientation.Rotate180
)
elif rot == 270:
orient = (
Orientation.Rotate270Mirror
if device.info.vertical_flip
else Orientation.Rotate270
)
else:
raise AssertionError(
f'Invalid rotation: {rot}. Supported values: 0, 90, 180, 270'
)
return orientation_to_transform(orient)
def prepare_device(
self,
device: Camera,
start: bool = True,
video: bool = False,
stream: bool = False,
**_,
):
from picamera2 import Picamera2 # type: ignore
assert isinstance(device, PiCamera), f'Invalid device type: {type(device)}'
camera = Picamera2(camera_num=device.info.device)
still = not (video or stream)
cfg_params = {
'main': {
'format': 'XBGR8888' if not still else 'BGR888',
**(
{'size': tuple(map(int, device.info.resolution))}
if device.info.resolution
else {}
),
},
**(
{'transform': self._get_transform(device)}
if not still
# We don't need to flip the image for individual frames, the base camera
# class methods will take care of that
else {}
),
'controls': {
'Brightness': float(device.info.brightness),
'Contrast': float(device.info.contrast),
'Sharpness': float(device.info.sharpness),
'AwbMode': self._awb_modes.index(device.info.awb_mode),
},
}
cfg = (
camera.create_video_configuration
if not still
else camera.create_still_configuration
)(**cfg_params)
camera.configure(cfg)
if start:
camera.start()
time.sleep(max(1, device.info.warmup_seconds))
return camera
def release_device(self, device: PiCamera):
# noinspection PyUnresolvedReferences
import picamera
def release_device(self, device: Camera):
if device.object:
try:
device.object.stop_recording()
except (ConnectionError, picamera.PiCameraNotRecording):
pass
device.object.stop()
device.object.close()
if device.object and not device.object.closed:
try:
device.object.close()
except (ConnectionError, picamera.PiCameraClosed):
pass
def capture_frame(self, device: Camera, *_, **__):
assert device.object, 'Camera not open'
return device.object.capture_image('main')
def capture_frame(self, camera: Camera, *args, **kwargs):
import numpy as np
from PIL import Image
@property
def _video_encoders_by_format(self) -> dict:
from picamera2.encoders import H264Encoder, MJPEGEncoder # type: ignore
shape = (
camera.info.resolution[1] + (camera.info.resolution[1] % 16),
camera.info.resolution[0] + (camera.info.resolution[0] % 32),
3,
)
frame = np.empty(shape, dtype=np.uint8)
camera.object.capture(frame, 'rgb')
return Image.fromarray(frame)
def start_preview(self, camera: Camera):
"""
Start camera preview.
"""
camera.object.start_preview()
def stop_preview(self, camera: Camera):
"""
Stop camera preview.
"""
try:
camera.object.stop_preview()
except Exception as e:
self.logger.warning(str(e))
return {
'h264': H264Encoder,
'mjpeg': MJPEGEncoder,
}
@action
def capture_preview(
self, duration: Optional[float] = None, n_frames: Optional[int] = None, **camera
) -> dict:
camera = self.open_device(**camera)
self.start_preview(camera)
def capture_video(
self,
device: Optional[int] = None,
duration: Optional[float] = None,
video_file: Optional[str] = None,
preview: bool = False,
**camera,
) -> Optional[Union[str, dict]]:
"""
Capture a video.
:param device: 0-based index of the camera to capture from, if the
device supports multiple cameras. Default: use the configured
camera index or the first available camera.
:param duration: Record duration in seconds (default: None, record
until :meth:`.stop_capture``).
:param video_file: If set, the stream will be recorded to the specified
video file (default: None).
:param camera: Camera parameters override - see constructors parameters.
:param preview: Show a preview of the camera frames.
:return: If duration is specified, the method will wait until the
recording is done and return the local path to the recorded
resource. Otherwise, it will return the status of the camera device
after starting it.
"""
from picamera2 import Picamera2 # type: ignore
from picamera2.encoders import H264Encoder # type: ignore
assert video_file, 'Video file is required'
camera = self.open_device(
device=device, ctx={'start': False, 'video': True}, **camera
)
encoder = H264Encoder()
assert camera.object, 'Camera not open'
assert isinstance(
camera.object, Picamera2
), f'Invalid camera object type: {type(camera.object)}'
if preview:
camera.object.start_preview()
# Only H264 is supported for now
camera.object.start_recording(encoder, os.path.expanduser(video_file))
if n_frames:
duration = n_frames * (camera.info.fps or 0)
if duration:
threading.Timer(duration, lambda: self.stop_preview(camera))
self.wait_stop(duration)
try:
if preview:
camera.object.stop_preview()
finally:
if camera.object:
camera.object.stop_recording()
camera.object.close()
return self.status()
return video_file
def streaming_thread(
self, camera: PiCamera, stream_format: str, duration: Optional[float] = None
):
server_socket = self._prepare_server_socket(camera)
sock = None
streaming_started_time = time.time()
self.logger.info(
'Starting streaming on port {}'.format(camera.info.listen_port)
)
return self.status(camera.info.device).output
try:
while camera.stream_event.is_set():
if duration and time.time() - streaming_started_time >= duration:
break
def _streaming_loop(self, camera: Camera, stream_format: str, sock: IO, *_, **__):
from picamera2 import Picamera2 # type: ignore
from picamera2.outputs import FileOutput # type: ignore
sock = self._accept_client(server_socket)
if not sock:
continue
encoder_cls = self._video_encoders_by_format.get(stream_format.lower())
assert (
encoder_cls
), f'Invalid stream format: {stream_format}. Supported formats: {", ".join(self._video_encoders_by_format)}'
assert isinstance(camera, PiCamera), f'Invalid camera type: {type(camera)}'
assert camera.object and isinstance(
camera.object, Picamera2
), f'Invalid camera object type: {type(camera.object)}'
if camera.object is None or camera.object.closed:
camera = self.open_device(**camera.info.to_dict())
cam = camera.object
encoder = encoder_cls()
cam.encoders = encoder
encoder.output = FileOutput(sock)
cam.start_encoder(encoder)
cam.start()
try:
camera.object.start_recording(sock, format=stream_format)
while camera.stream_event.is_set():
camera.object.wait_recording(1)
except ConnectionError:
self.logger.info('Client closed connection')
finally:
if sock:
try:
sock.close()
except Exception as e:
self.logger.warning(
'Error while closing client socket: {}'.format(str(e))
)
def _prepare_stream_writer(self, *_, **__):
"""
Overrides the base method to do nothing - the stream writer is handled
by the picamera2 library.
"""
self.close_device(camera)
finally:
self._cleanup_stream(camera, server_socket, sock)
self.logger.info('Stopped camera stream')
def _cleanup_stream(self, camera: Camera, *_, **__):
cam = camera.object
if not cam:
return
@action
def start_streaming(
self, duration: Optional[float] = None, stream_format: str = 'h264', **camera
) -> dict:
camera = self.open_device(stream_format=stream_format, **camera)
return self._start_streaming(camera, duration, stream_format)
cam.stop()
cam.stop_encoder()
cam.close()
# vim:sw=4:ts=4:et:

View File

@ -0,0 +1,301 @@
import threading
from typing import IO, Optional, List, Tuple, Union
from platypush.plugins import action
from platypush.plugins.camera import CameraPlugin, Camera
from platypush.utils import wait_for_either
from .model import PiCameraInfo, PiCamera
class CameraPiLegacyPlugin(CameraPlugin):
"""
Plugin to interact with a `Pi Camera
<https://www.raspberrypi.com/documentation/accessories/camera.html>`_.
.. warning::
This plugin is **DEPRECATED**, as it relies on the old ``picamera``
module.
The ``picamera`` module used in this plugin is deprecated and no longer
maintained. The `picamera2 <https://github.com/raspberrypi/picamera2>`_
module is advised instead, which is used by
:class:`platypush.plugins.camera.pi.CameraPiPlugin`.
You may want to use this plugin if you are running an old OS that does not
support the new ``picamera2`` module. Even in that case, you may probably
consider using :class:`platypush.plugins.camera.ffmpeg.FfmpegCameraPlugin`
or :class:`platypush.plugins.camera.gstreamer.GStreamerCameraPlugin`, as
``picamera`` is not maintained anymore and may not work properly.
"""
_camera_class = PiCamera
_camera_info_class = PiCameraInfo
_supported_encoders = ('h264', 'mjpeg')
def __init__(
self,
device: int = 0,
fps: float = 30.0,
warmup_seconds: float = 2.0,
sharpness: int = 0,
contrast: int = 0,
brightness: int = 50,
video_stabilization: bool = False,
iso: int = 0,
exposure_compensation: int = 0,
exposure_mode: str = 'auto',
meter_mode: str = 'average',
awb_mode: str = 'auto',
image_effect: str = 'none',
led_pin: Optional[int] = None,
color_effects: Optional[Union[str, List[str]]] = None,
zoom: Tuple[float, float, float, float] = (0.0, 0.0, 1.0, 1.0),
stream_format: str = 'h264',
**camera,
):
"""
:param device: Camera device number (default: 0). Only supported on
devices with multiple camera slots.
:param fps: Frames per second (default: 30.0).
:param warmup_seconds: Seconds to wait for the camera to warm up
before taking a photo (default: 2.0).
:param sharpness: Sharpness level, as an integer between -100 and 100.
:param contrast: Contrast level, as an integer between -100 and 100.
:param brightness: Brightness level, as an integer between 0 and 100.
:param video_stabilization: Enable video stabilization (default: False).
:param iso: ISO level (default: 0).
:param exposure_compensation: Exposure compensation level, as an
integer between -25 and 25.
:param exposure_mode: Exposure mode. Allowed values:
- ``off``
- ``auto`` (default)
- ``night``
- ``nightpreview``
- ``backlight``
- ``spotlight``
- ``sports``
- ``snow``
- ``beach``
- ``verylong``
- ``fixedfps``
- ``antishake``
- ``fireworks``
:param meter_mode: Metering mode used for the exposure. Allowed values:
- ``average`` (default)
- ``spot``
- ``backlit``
- ``matrix``
:param awb_mode: Auto white balance mode. Allowed values:
- ``off``
- ``auto`` (default)
- ``sunlight``
- ``cloudy``
- ``shade``
- ``tungsten``
- ``fluorescent``
- ``incandescent``
- ``flash``
- ``horizon``
:param image_effect: Image effect applied to the camera. Allowed values:
- ``none`` (default)
- ``negative``
- ``solarize``
- ``sketch``
- ``denoise``
- ``emboss``
- ``oilpaint``
- ``hatch``
- ``gpen``
- ``pastel``
- ``watercolor``
- ``film``
- ``blur``
- ``saturation``
- ``colorswap``
- ``washedout``
- ``posterise``
- ``colorpoint``
- ``colorbalance``
- ``cartoon``
- ``deinterlace1``
- ``deinterlace2``
:param led_pin: LED PIN number, if the camera LED is wired to a GPIO
PIN and you want to control it.
:param zoom: Camera zoom, in the format ``(x, y, width, height)``
(default: ``(0.0, 0.0, 1.0, 1.0)``).
:param stream_format: Default format for the output when streamed to a
network device. Available:
- ``h264`` (default)
- ``mjpeg``
:param camera: Options for the base camera plugin (see
:class:`platypush.plugins.camera.CameraPlugin`).
"""
super().__init__(
device=device,
fps=fps,
warmup_seconds=warmup_seconds,
stream_format=stream_format,
**camera,
)
self.camera_info.sharpness = sharpness # type: ignore
self.camera_info.contrast = contrast # type: ignore
self.camera_info.brightness = brightness # type: ignore
self.camera_info.video_stabilization = video_stabilization # type: ignore
self.camera_info.iso = iso # type: ignore
self.camera_info.exposure_compensation = exposure_compensation # type: ignore
self.camera_info.meter_mode = meter_mode # type: ignore
self.camera_info.exposure_mode = exposure_mode # type: ignore
self.camera_info.awb_mode = awb_mode # type: ignore
self.camera_info.image_effect = image_effect # type: ignore
self.camera_info.color_effects = color_effects # type: ignore
self.camera_info.zoom = zoom # type: ignore
self.camera_info.led_pin = led_pin # type: ignore
def prepare_device(self, device: Camera, **_):
import picamera # type: ignore
assert isinstance(device, PiCamera), f'Invalid camera type: {type(device)}'
camera = picamera.PiCamera(
camera_num=device.info.device,
resolution=device.info.resolution,
framerate=device.info.fps,
led_pin=device.info.led_pin,
)
camera.hflip = device.info.horizontal_flip
camera.vflip = device.info.vertical_flip
camera.sharpness = device.info.sharpness
camera.contrast = device.info.contrast
camera.brightness = device.info.brightness
camera.video_stabilization = device.info.video_stabilization
camera.iso = device.info.iso
camera.exposure_compensation = device.info.exposure_compensation
camera.exposure_mode = device.info.exposure_mode
camera.meter_mode = device.info.meter_mode
camera.awb_mode = device.info.awb_mode
camera.image_effect = device.info.image_effect
camera.color_effects = device.info.color_effects
camera.rotation = device.info.rotate or 0
camera.zoom = device.info.zoom
return camera
def release_device(self, device: Camera):
import picamera # type: ignore
assert isinstance(device, PiCamera), f'Invalid camera type: {type(device)}'
if device.object:
try:
device.object.stop_recording()
except (ConnectionError, picamera.PiCameraNotRecording):
pass
if device.object and not device.object.closed:
try:
device.object.close()
except (ConnectionError, picamera.PiCameraClosed):
pass
def capture_frame(self, device: Camera, *_, **__):
import numpy as np
from PIL import Image
assert device.info.resolution, 'Invalid resolution'
assert device.object, 'Camera not opened'
shape = (
device.info.resolution[1] + (device.info.resolution[1] % 16),
device.info.resolution[0] + (device.info.resolution[0] % 32),
3,
)
frame = np.empty(shape, dtype=np.uint8)
device.object.capture(frame, 'rgb')
return Image.fromarray(frame)
def start_preview(self, camera: Camera):
"""
Start camera preview.
"""
assert camera.object, 'Camera not opened'
camera.object.start_preview()
def stop_preview(self, camera: Camera):
"""
Stop camera preview.
"""
if not camera.object:
return
try:
camera.object.stop_preview()
except Exception as e:
self.logger.warning(str(e))
@action
def capture_preview(
self,
device: Optional[Union[str, int]] = None,
duration: Optional[float] = None,
n_frames: Optional[int] = None,
**camera,
) -> dict:
camera = self.open_device(device=device, **camera)
self.start_preview(camera)
if n_frames:
duration = n_frames * (camera.info.fps or 0)
if duration:
threading.Timer(duration, lambda: self.stop_preview(camera))
return self.status() # type: ignore
def _streaming_loop(self, camera: Camera, stream_format: str, sock: IO, *_, **__):
from picamera import PiCamera as PiCamera_ # type: ignore
stream_format = stream_format.lower()
assert (
stream_format in self._supported_encoders
), f'Invalid stream format: {stream_format}. Supported formats: {", ".join(self._supported_encoders)}'
assert isinstance(camera, PiCamera), f'Invalid camera type: {type(camera)}'
assert camera.object and isinstance(
camera.object, PiCamera_
), f'Invalid camera object type: {type(camera.object)}'
cam = camera.object
try:
cam.start_recording(sock, format=stream_format)
while not wait_for_either(
camera.stop_stream_event, self._should_stop, timeout=1
):
cam.wait_recording(1)
except ConnectionError:
self.logger.info('Client closed connection')
finally:
try:
cam.stop_recording()
self.stop_streaming()
except Exception as e:
self.logger.warning('Could not stop streaming: %s', e)
def _prepare_stream_writer(self, *_, **__):
"""
Overrides the base method to do nothing - the stream writer is handled
by the picamera library.
"""
# vim:sw=4:ts=4:et:

View File

@ -2,20 +2,24 @@ manifest:
events: {}
install:
apk:
- ffmpeg
- py3-numpy
- py3-pillow
apt:
- ffmpeg
- python3-numpy
- python3-pillow
dnf:
- ffmpeg
- python-numpy
- python-pillow
pacman:
- ffmpeg
- python-numpy
- python-pillow
apt:
- python3-numpy
- python3-pillow
pip:
- picamera
- numpy
- Pillow
package: platypush.backend.camera.pi
type: backend
package: platypush.plugins.camera.pi.legacy
type: plugin

View File

@ -0,0 +1,46 @@
from dataclasses import asdict, dataclass
from typing import Optional, Union, List, Tuple
from platypush.plugins.camera import CameraInfo, Camera
@dataclass
class PiCameraInfo(CameraInfo):
sharpness: int = 0
contrast: int = 0
brightness: int = 50
video_stabilization: bool = False
iso: int = 0
exposure_compensation: int = 0
exposure_mode: str = 'auto'
meter_mode: str = 'average'
awb_mode: str = 'auto'
image_effect: str = 'none'
color_effects: Optional[Union[str, List[str]]] = None
zoom: Tuple[float, float, float, float] = (0.0, 0.0, 1.0, 1.0)
led_pin: Optional[int] = None
def to_dict(self) -> dict:
return {
'sharpness': self.sharpness,
'contrast': self.contrast,
'brightness': self.brightness,
'video_stabilization': self.video_stabilization,
'iso': self.iso,
'exposure_compensation': self.exposure_compensation,
'exposure_mode': self.exposure_mode,
'meter_mode': self.meter_mode,
'awb_mode': self.awb_mode,
'image_effect': self.image_effect,
'color_effects': self.color_effects,
'zoom': self.zoom,
'led_pin': self.led_pin,
**asdict(super()),
}
class PiCamera(Camera):
info: PiCameraInfo # type: ignore
# vim:sw=4:ts=4:et:

View File

@ -2,19 +2,23 @@ manifest:
events: {}
install:
apk:
- ffmpeg
- py3-numpy
- py3-pillow
apt:
- ffmpeg
- python3-numpy
- python3-pillow
dnf:
- ffmpeg
- python-numpy
- python-pillow
pacman:
- ffmpeg
- python-numpy
- python-pillow
pip:
- picamera
- picamera2
- numpy
- Pillow
package: platypush.plugins.camera.pi

View File

@ -1,46 +1,34 @@
from dataclasses import dataclass
from typing import Optional, Union, List, Tuple
from dataclasses import asdict, dataclass
from platypush.plugins.camera import CameraInfo, Camera
@dataclass
class PiCameraInfo(CameraInfo):
"""
PiCamera info dataclass.
"""
sharpness: int = 0
contrast: int = 0
brightness: int = 50
video_stabilization: bool = False
iso: int = 0
exposure_compensation: int = 0
exposure_mode: str = 'auto'
hdr_mode: str = 'auto'
meter_mode: str = 'average'
awb_mode: str = 'auto'
image_effect: str = 'none'
color_effects: Optional[Union[str, List[str]]] = None
zoom: Tuple[float, float, float, float] = (0.0, 0.0, 1.0, 1.0)
led_pin: Optional[int] = None
def to_dict(self) -> dict:
return {
'sharpness': self.sharpness,
'contrast': self.contrast,
'brightness': self.brightness,
'video_stabilization': self.video_stabilization,
'iso': self.iso,
'exposure_compensation': self.exposure_compensation,
'exposure_mode': self.exposure_mode,
'meter_mode': self.meter_mode,
'awb_mode': self.awb_mode,
'image_effect': self.image_effect,
'color_effects': self.color_effects,
'zoom': self.zoom,
'led_pin': self.led_pin,
**super().to_dict()
}
return asdict(self)
class PiCamera(Camera):
info: PiCameraInfo
"""
PiCamera model.
"""
info: PiCameraInfo # type: ignore
# vim:sw=4:ts=4:et:

View File

@ -18,8 +18,8 @@ from importlib.machinery import SourceFileLoader
from importlib.util import spec_from_loader, module_from_spec
from multiprocessing import Lock as PLock
from tempfile import gettempdir
from threading import Lock as TLock
from typing import Generator, Optional, Tuple, Type, Union
from threading import Event, Lock as TLock
from typing import Callable, Generator, Optional, Tuple, Type, Union
from dateutil import parser, tz
from redis import Redis
@ -780,4 +780,17 @@ def get_default_downloads_dir() -> str:
return os.path.join(os.path.expanduser('~'), 'Downloads')
def wait_for_either(*events, timeout: Optional[float] = None, cls: Type = Event):
"""
Wait for any of the given events to be set.
:param events: The events to be checked.
:param timeout: The maximum time to wait for the event to be set. Default: None.
:param cls: The class to be used for the event. Default: threading.Event.
"""
from .threads import OrEvent
return OrEvent(*events, cls=cls).wait(timeout=timeout)
# vim:sw=4:ts=4:et:

View File

@ -0,0 +1,58 @@
import threading
from typing import Callable, Optional, Type
def OrEvent(*events, cls: Type = threading.Event):
"""
Wrapper for threading.Event that allows to create an event that is
set if any of the given events are set.
Adapted from
https://stackoverflow.com/questions/12317940/python-threading-can-i-sleep-on-two-threading-events-simultaneously#12320352.
:param events: The events to be checked.
:param cls: The class to be used for the event. Default: threading.Event.
"""
or_event = cls()
def changed():
bools = [e.is_set() for e in events]
if any(bools):
or_event.set()
else:
or_event.clear()
def _to_or(e, changed_callback: Callable[[], None]):
e._set = e.set
e._clear = e.clear
e.changed = changed_callback
e.set = lambda: _or_set(e)
e.clear = lambda: _clear_or(e)
def _clear_or(e):
e._clear()
e.changed()
def _or_set(e):
e._set()
e.changed()
for e in events:
_to_or(e, changed)
changed()
return or_event
def wait_for_either(
*events, timeout: Optional[float] = None, cls: Type = threading.Event
):
"""
Wait for any of the given events to be set.
:param events: The events to be checked.
:param timeout: The maximum time to wait for the event to be set. Default: None.
:param cls: The class to be used for the event. Default: threading.Event.
"""
return OrEvent(*events, cls=cls).wait(timeout=timeout)