Merge pull request 'New `picamera` integration' (#367) from 363/new-picamera-integration into master
continuous-integration/drone/push Build is passing Details

Reviewed-on: #367
This commit is contained in:
Fabio Manganiello 2024-02-25 21:41:42 +01:00
commit 5bf286d07c
23 changed files with 908 additions and 496 deletions

View File

@ -7,7 +7,6 @@ Backends
:caption: Backends: :caption: Backends:
platypush/backend/button.flic.rst platypush/backend/button.flic.rst
platypush/backend/camera.pi.rst
platypush/backend/chat.telegram.rst platypush/backend/chat.telegram.rst
platypush/backend/http.rst platypush/backend/http.rst
platypush/backend/midi.rst platypush/backend/midi.rst

View File

@ -1,6 +0,0 @@
``camera.pi``
===============================
.. automodule:: platypush.backend.camera.pi
:members:

View File

@ -0,0 +1,5 @@
``camera.pi.legacy``
====================
.. automodule:: platypush.plugins.camera.pi.legacy
:members:

View File

@ -21,6 +21,7 @@ Plugins
platypush/plugins/camera.gstreamer.rst platypush/plugins/camera.gstreamer.rst
platypush/plugins/camera.ir.mlx90640.rst platypush/plugins/camera.ir.mlx90640.rst
platypush/plugins/camera.pi.rst platypush/plugins/camera.pi.rst
platypush/plugins/camera.pi.legacy.rst
platypush/plugins/chat.irc.rst platypush/plugins/chat.irc.rst
platypush/plugins/chat.telegram.rst platypush/plugins/chat.telegram.rst
platypush/plugins/clipboard.rst platypush/plugins/clipboard.rst

View File

@ -1,214 +0,0 @@
import json
import socket
from enum import Enum
from threading import Thread
from platypush.backend import Backend
from platypush.context import get_backend
class CameraPiBackend(Backend):
"""
Backend to interact with a Raspberry Pi camera. It can start and stop
recordings and take pictures. It can be programmatically controlled through
the :class:`platypush.plugins.camera.pi` plugin. Note that the Redis backend
must be configured and running to enable camera control.
This backend is **DEPRECATED**. Use the plugin :class:`platypush.plugins.camera.pi.CameraPiPlugin` instead to run
Pi camera actions. If you want to start streaming the camera on application start then simply create an event hook
on :class:`platypush.message.event.application.ApplicationStartedEvent` that runs ``camera.pi.start_streaming``.
"""
class CameraAction(Enum):
START_RECORDING = 'START_RECORDING'
STOP_RECORDING = 'STOP_RECORDING'
TAKE_PICTURE = 'TAKE_PICTURE'
def __eq__(self, other):
return self.value == other
# noinspection PyUnresolvedReferences,PyPackageRequirements
def __init__(
self,
listen_port,
bind_address='0.0.0.0',
x_resolution=640,
y_resolution=480,
redis_queue='platypush/camera/pi',
start_recording_on_startup=True,
framerate=24,
hflip=False,
vflip=False,
sharpness=0,
contrast=0,
brightness=50,
video_stabilization=False,
iso=0,
exposure_compensation=0,
exposure_mode='auto',
meter_mode='average',
awb_mode='auto',
image_effect='none',
color_effects=None,
rotation=0,
crop=(0.0, 0.0, 1.0, 1.0),
**kwargs
):
"""
See https://www.raspberrypi.org/documentation/usage/camera/python/README.md
for a detailed reference about the Pi camera options.
:param listen_port: Port where the camera process will provide the video output while recording
:type listen_port: int
:param bind_address: Bind address (default: 0.0.0.0).
:type bind_address: str
"""
super().__init__(**kwargs)
self.bind_address = bind_address
self.listen_port = listen_port
self.server_socket = socket.socket()
self.server_socket.bind(
(self.bind_address, self.listen_port)
) # lgtm [py/bind-socket-all-network-interfaces]
self.server_socket.listen(0)
import picamera
self.camera = picamera.PiCamera()
self.camera.resolution = (x_resolution, y_resolution)
self.camera.framerate = framerate
self.camera.hflip = hflip
self.camera.vflip = vflip
self.camera.sharpness = sharpness
self.camera.contrast = contrast
self.camera.brightness = brightness
self.camera.video_stabilization = video_stabilization
self.camera.ISO = iso
self.camera.exposure_compensation = exposure_compensation
self.camera.exposure_mode = exposure_mode
self.camera.meter_mode = meter_mode
self.camera.awb_mode = awb_mode
self.camera.image_effect = image_effect
self.camera.color_effects = color_effects
self.camera.rotation = rotation
self.camera.crop = crop
self.start_recording_on_startup = start_recording_on_startup
self.redis = None
self.redis_queue = redis_queue
self._recording_thread = None
def send_camera_action(self, action, **kwargs):
action = {'action': action.value, **kwargs}
self.redis.send_message(msg=json.dumps(action), queue_name=self.redis_queue)
def take_picture(self, image_file):
"""
Take a picture.
:param image_file: Output image file
:type image_file: str
"""
self.logger.info('Capturing camera snapshot to {}'.format(image_file))
self.camera.capture(image_file)
self.logger.info('Captured camera snapshot to {}'.format(image_file))
# noinspection PyShadowingBuiltins
def start_recording(self, video_file=None, format='h264'):
"""
Start a recording.
:param video_file: Output video file. If specified, the video will be recorded to file, otherwise it will be
served via TCP/IP on the listen_port. Use ``stop_recording`` to stop the recording.
:type video_file: str
:param format: Video format (default: h264)
:type format: str
"""
# noinspection PyBroadException
def recording_thread():
if video_file:
self.camera.start_recording(video_file, format=format)
while True:
self.camera.wait_recording(2)
else:
while not self.should_stop():
connection = self.server_socket.accept()[0].makefile('wb')
self.logger.info(
'Accepted client connection on port {}'.format(self.listen_port)
)
try:
self.camera.start_recording(connection, format=format)
while True:
self.camera.wait_recording(2)
except ConnectionError:
self.logger.info('Client closed connection')
try:
self.stop_recording()
except Exception as e:
self.logger.warning(
'Could not stop recording: {}'.format(str(e))
)
try:
connection.close()
except Exception as e:
self.logger.warning(
'Could not close connection: {}'.format(str(e))
)
self.send_camera_action(self.CameraAction.START_RECORDING)
if self._recording_thread:
self.logger.info('Recording already running')
return
self.logger.info('Starting camera recording')
self._recording_thread = Thread(
target=recording_thread, name='PiCameraRecorder'
)
self._recording_thread.start()
def stop_recording(self):
"""Stops recording"""
self.logger.info('Stopping camera recording')
try:
self.camera.stop_recording()
except Exception as e:
self.logger.warning('Failed to stop recording')
self.logger.exception(e)
def run(self):
super().run()
if not self.redis:
self.redis = get_backend('redis')
if self.start_recording_on_startup:
self.send_camera_action(self.CameraAction.START_RECORDING)
self.logger.info('Initialized Pi camera backend')
while not self.should_stop():
try:
msg = self.redis.get_message(self.redis_queue)
if msg.get('action') == self.CameraAction.START_RECORDING:
self.start_recording()
elif msg.get('action') == self.CameraAction.STOP_RECORDING:
self.stop_recording()
elif msg.get('action') == self.CameraAction.TAKE_PICTURE:
self.take_picture(image_file=msg.get('image_file'))
except Exception as e:
self.logger.exception(e)
# vim:sw=4:ts=4:et:

View File

@ -594,6 +594,29 @@ backend.http:
# horizontal_flip: false # horizontal_flip: false
# # Whether to flip the image along the horizontal axis (default: False) # # Whether to flip the image along the horizontal axis (default: False)
# vertical_flip: false # vertical_flip: false
#
# # -- Streaming options
# # If `stream_on_start` is set to true, then camera streaming will start as
# # soon as the application/plugin is started. Otherwise, only when the
# # `camera.<plugin>.start_streaming` action is run. The camera will be
# # streamed on the specified `bind_address` and `listen_port` in the
# # specified `stream_format`. If `stream_format` is a video format (e.g.
# # h264 or mkv) then you can play the raw camera stream through e.g.
# # `vlc tcp://<address>:<listen_port>`.
# # Alternatively, you can access the camera stream over HTTP at
# # `http(s)://<address>:<http-port>/camera/<plugin>/video.<format>`.
# # For example, for MJPEG stream (usually the fastest option over HTTP):
# # `http://localhost:8008/camera/ffmpeg/video.mjpeg`.
# # An HTTP stream is the safest option, as it has to go through the standard
# # HTTP authentication process, while direct TCP access may expose your
# # camera to unauthenticated access. If you decide to directly stream over
# # TCP, make sure to carefully select the `bind_address`, add a firewall
# # rule for the streaming port, and/or ensure that the device's port is only
# # accessible from a safe network.
# # stream_on_start: false
# # bind_address: 0.0.0.0
# # listen_port: 5000
# # stream_format: h264
### ###
### ----------------- ### -----------------

View File

@ -19,7 +19,7 @@ from platypush.message.event.camera import (
CameraRecordingStoppedEvent, CameraRecordingStoppedEvent,
CameraVideoRenderedEvent, CameraVideoRenderedEvent,
) )
from platypush.plugins import Plugin, action from platypush.plugins import RunnablePlugin, action
from platypush.plugins.camera.model.camera import CameraInfo, Camera from platypush.plugins.camera.model.camera import CameraInfo, Camera
from platypush.plugins.camera.model.exceptions import ( from platypush.plugins.camera.model.exceptions import (
CameraException, CameraException,
@ -31,7 +31,7 @@ from platypush.plugins.camera.model.writer.preview import (
PreviewWriter, PreviewWriter,
PreviewWriterFactory, PreviewWriterFactory,
) )
from platypush.utils import get_plugin_name_by_class from platypush.utils import get_plugin_name_by_class, wait_for_either
__all__ = [ __all__ = [
'Camera', 'Camera',
@ -45,7 +45,7 @@ __all__ = [
] ]
class CameraPlugin(Plugin, ABC): class CameraPlugin(RunnablePlugin, ABC):
""" """
Abstract plugin to control camera devices. Abstract plugin to control camera devices.
@ -86,6 +86,7 @@ class CameraPlugin(Plugin, ABC):
stream_format: str = 'mjpeg', stream_format: str = 'mjpeg',
listen_port: Optional[int] = 5000, listen_port: Optional[int] = 5000,
bind_address: str = '0.0.0.0', bind_address: str = '0.0.0.0',
stream_on_start: bool = False,
ffmpeg_bin: str = 'ffmpeg', ffmpeg_bin: str = 'ffmpeg',
input_codec: Optional[str] = None, input_codec: Optional[str] = None,
output_codec: Optional[str] = None, output_codec: Optional[str] = None,
@ -94,41 +95,57 @@ class CameraPlugin(Plugin, ABC):
""" """
:param device: Identifier of the default capturing device. :param device: Identifier of the default capturing device.
:param resolution: Default resolution, as a tuple of two integers. :param resolution: Default resolution, as a tuple of two integers.
:param frames_dir: Directory where the camera frames will be stored (default: :param frames_dir: Directory where the camera frames will be stored
``~/.local/share/platypush/<plugin.name>/frames``) (default: ``~/.local/share/platypush/<plugin.name>/frames``)
:param warmup_frames: Cameras usually take a while to adapt their :param warmup_frames: Cameras usually take a while to adapt their
luminosity and focus to the environment when taking a picture. luminosity and focus to the environment when taking a picture.
This parameter allows you to specify the number of "warmup" frames This parameter allows you to specify the number of "warmup" frames
to capture upon picture command before actually capturing a frame to capture upon picture command before actually capturing a frame
(default: 5 but you may want to calibrate this parameter for your (default: 5 but you may want to calibrate this parameter for your
camera) camera)
:param warmup_seconds: Number of seconds to wait before a picture is taken or the first frame of a :param warmup_seconds: Number of seconds to wait before a picture is
video/sequence is captured (default: 0). taken or the first frame of a video/sequence is captured (default:
:param capture_timeout: Maximum number of seconds to wait between the programmed termination of a capture 0).
session and the moment the device is released. :param capture_timeout: Maximum number of seconds to wait between the
:param scale_x: If set, the images will be scaled along the x-axis by the specified factor programmed termination of a capture session and the moment the
:param scale_y: If set, the images will be scaled along the y-axis by the specified factor device is released.
:param scale_x: If set, the images will be scaled along the x-axis by
the specified factor
:param scale_y: If set, the images will be scaled along the y-axis by
the specified factor
:param color_transform: Color transformation to apply to the images. :param color_transform: Color transformation to apply to the images.
:param grayscale: Whether the output should be converted to grayscale. :param grayscale: Whether the output should be converted to grayscale.
:param rotate: If set, the images will be rotated by the specified number of degrees :param rotate: If set, the images will be rotated by the specified
number of degrees
:param fps: Frames per second (default: 25). :param fps: Frames per second (default: 25).
:param horizontal_flip: If set, the images will be flipped on the horizontal axis. :param horizontal_flip: If set, the images will be flipped on the
:param vertical_flip: If set, the images will be flipped on the vertical axis. horizontal axis.
:param listen_port: Default port to be used for streaming over TCP (default: 5000). :param vertical_flip: If set, the images will be flipped on the vertical
:param bind_address: Default bind address for TCP streaming (default: 0.0.0.0, accept any connections). axis.
:param input_codec: Specify the ffmpeg video codec (``-vcodec``) used for the input. :param listen_port: Default port to be used for streaming over TCP
:param output_codec: Specify the ffmpeg video codec (``-vcodec``) to be used for encoding the output. For some (default: 5000).
ffmpeg output formats (e.g. ``h264`` and ``rtp``) this may default to ``libxvid``. :param bind_address: Default bind address for TCP streaming (default:
0.0.0.0, accept connections on any network interface).
:param stream_on_start: If set, the camera will start streaming on the
specified ``bind_address`` and ``listen_port`` as soon as the plugin
is started. Otherwise, the stream will be started only when the
:meth:`.start_streaming` method is called. Default: False.
:param input_codec: Specify the ffmpeg video codec (``-vcodec``) used
for the input.
:param output_codec: Specify the ffmpeg video codec (``-vcodec``) to be
used for encoding the output. For some ffmpeg output formats (e.g.
``h264`` and ``rtp``) this may default to ``libxvid``.
:param input_format: Plugin-specific format/type for the input stream. :param input_format: Plugin-specific format/type for the input stream.
:param output_format: Plugin-specific format/type for the output videos. :param output_format: Plugin-specific format/type for the output videos.
:param ffmpeg_bin: Path to the ffmpeg binary (default: ``ffmpeg``). :param ffmpeg_bin: Path to the ffmpeg binary (default: ``ffmpeg``).
:param stream_format: Default format for the output when streamed to a network device. Available: :param stream_format: Default format for the output when streamed to a
network device. Available:
- ``MJPEG`` (default) - ``mjpeg`` (default)
- ``H264`` (over ``ffmpeg``) - ``h264`` (over ``ffmpeg``)
- ``H265`` (over ``ffmpeg``) - ``h265`` (over ``ffmpeg``)
- ``MKV`` (over ``ffmpeg``) - ``mkv`` (over ``ffmpeg``)
- ``MP4`` (over ``ffmpeg``) - ``mp4`` (over ``ffmpeg``)
""" """
super().__init__(**kwargs) super().__init__(**kwargs)
@ -137,6 +154,7 @@ class CameraPlugin(Plugin, ABC):
plugin_name = get_plugin_name_by_class(self) plugin_name = get_plugin_name_by_class(self)
assert isinstance(workdir, str) and plugin_name assert isinstance(workdir, str) and plugin_name
self.workdir = os.path.join(workdir, plugin_name) self.workdir = os.path.join(workdir, plugin_name)
self._stream_on_start = stream_on_start
pathlib.Path(self.workdir).mkdir(mode=0o755, exist_ok=True, parents=True) pathlib.Path(self.workdir).mkdir(mode=0o755, exist_ok=True, parents=True)
self.camera_info = self._camera_info_class( self.camera_info = self._camera_info_class(
@ -176,9 +194,10 @@ class CameraPlugin(Plugin, ABC):
def open_device( def open_device(
self, self,
device: Optional[Union[int, str]], device: Optional[Union[int, str]] = None,
stream: bool = False, stream: bool = False,
redis_queue: Optional[str] = None, redis_queue: Optional[str] = None,
ctx: Optional[dict] = None,
**params, **params,
) -> Camera: ) -> Camera:
""" """
@ -210,14 +229,13 @@ class CameraPlugin(Plugin, ABC):
else: else:
camera = self._camera_class(info=info) camera = self._camera_class(info=info)
ctx = ctx or {}
ctx['stream'] = stream
camera.info.set(**params) camera.info.set(**params)
camera.object = self.prepare_device(camera) camera.object = self.prepare_device(camera, **ctx)
if stream and camera.info.stream_format: if stream and camera.info.stream_format:
writer_class = StreamWriter.get_class_by_name(camera.info.stream_format) self._prepare_stream_writer(camera, redis_queue=redis_queue)
camera.stream = writer_class(
camera=camera, plugin=self, redis_queue=redis_queue
)
if camera.info.frames_dir: if camera.info.frames_dir:
pathlib.Path( pathlib.Path(
@ -227,6 +245,13 @@ class CameraPlugin(Plugin, ABC):
self._devices[device] = camera self._devices[device] = camera
return camera return camera
def _prepare_stream_writer(self, camera: Camera, redis_queue: Optional[str] = None):
assert camera.info.stream_format, 'No stream format specified'
writer_class = StreamWriter.get_class_by_name(camera.info.stream_format)
camera.stream = writer_class(
camera=camera, plugin=self, redis_queue=redis_queue
)
def close_device(self, camera: Camera, wait_capture: bool = True) -> None: def close_device(self, camera: Camera, wait_capture: bool = True) -> None:
""" """
Close and release a device. Close and release a device.
@ -288,7 +313,7 @@ class CameraPlugin(Plugin, ABC):
self.close_device(camera) self.close_device(camera)
@abstractmethod @abstractmethod
def prepare_device(self, device: Camera): def prepare_device(self, device: Camera, **_):
""" """
Prepare a device using the plugin-specific logic - to be implemented by the derived classes. Prepare a device using the plugin-specific logic - to be implemented by the derived classes.
@ -315,7 +340,9 @@ class CameraPlugin(Plugin, ABC):
raise NotImplementedError() raise NotImplementedError()
@staticmethod @staticmethod
def store_frame(frame, filepath: str, format: Optional[str] = None): def store_frame( # pylint: disable=redefined-builtin
frame, filepath: str, format: Optional[str] = None
):
""" """
Capture a frame to the filesystem using the ``PIL`` library - it can be overridden by derived classes. Capture a frame to the filesystem using the ``PIL`` library - it can be overridden by derived classes.
@ -339,9 +366,9 @@ class CameraPlugin(Plugin, ABC):
def _store_frame( def _store_frame(
self, self,
frame, frame,
*args,
frames_dir: Optional[str] = None, frames_dir: Optional[str] = None,
image_file: Optional[str] = None, image_file: Optional[str] = None,
*args,
**kwargs, **kwargs,
) -> str: ) -> str:
""" """
@ -687,61 +714,84 @@ class CameraPlugin(Plugin, ABC):
return self.status(camera.info.device) # type: ignore return self.status(camera.info.device) # type: ignore
@staticmethod @staticmethod
def _prepare_server_socket(camera: Camera) -> socket.socket: @contextmanager
server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) def _prepare_server_socket(camera: Camera) -> Generator[socket.socket, None, None]:
server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as srv_sock:
server_socket.bind( srv_sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
( # lgtm [py/bind-socket-all-network-interfaces] srv_sock.bind(
camera.info.bind_address or '0.0.0.0', ( # lgtm [py/bind-socket-all-network-interfaces]
camera.info.listen_port, camera.info.bind_address or '0.0.0.0',
camera.info.listen_port,
)
) )
) srv_sock.listen(1)
server_socket.listen(1) srv_sock.settimeout(1)
server_socket.settimeout(1) yield srv_sock
return server_socket
def _accept_client(self, server_socket: socket.socket) -> Optional[IO]: def _accept_client(
self, server_socket: socket.socket
) -> Tuple[Optional[socket.socket], Optional[IO]]:
try: try:
sock = server_socket.accept()[0] sock = server_socket.accept()[0]
self.logger.info('Accepted client connection from %s', sock.getpeername()) self.logger.info('Accepted client connection from %s', sock.getpeername())
return sock.makefile('wb') return sock, sock.makefile('wb')
except socket.timeout: except socket.timeout:
return return None, None
def streaming_thread( def streaming_thread(
self, camera: Camera, stream_format: str, duration: Optional[float] = None self, camera: Camera, stream_format: str, duration: Optional[float] = None
): ):
streaming_started_time = time.time() with self._prepare_server_socket(camera) as srv_sock:
server_socket = self._prepare_server_socket(camera) streaming_started_time = time.time()
sock = None sock, fp = None, None
self.logger.info('Starting streaming on port %s', camera.info.listen_port) self.logger.info('Starting streaming on port %s', camera.info.listen_port)
try: try:
while camera.stream_event.is_set(): while (
if duration and time.time() - streaming_started_time >= duration: camera.stream_event.is_set()
break and not camera.stop_stream_event.is_set()
and not self.should_stop()
):
if duration and time.time() - streaming_started_time >= duration:
break
sock = self._accept_client(server_socket) sock, fp = self._accept_client(srv_sock)
if not sock: if not (sock and fp):
continue continue
if camera.info.device not in self._devices: if duration and time.time() - streaming_started_time >= duration:
info = asdict(camera.info) break
info['stream_format'] = stream_format
camera = self.open_device(stream=True, **info)
assert camera.stream, 'No camera stream available' self._streaming_loop(
camera.stream.sock = sock camera, stream_format, sock=fp, duration=duration
self.start_camera( )
camera, duration=duration, frames_dir=None, image_file=None finally:
) self._cleanup_stream(camera, srv_sock, fp)
finally:
self._cleanup_stream(camera, server_socket, sock) self.logger.info('Stopped camera stream')
self.logger.info('Stopped camera stream')
def _streaming_loop(
self,
camera: Camera,
stream_format: str,
sock: IO,
duration: Optional[float] = None,
):
if camera.info.device not in self._devices:
info = asdict(camera.info)
info['stream_format'] = stream_format
camera = self.open_device(stream=True, **info)
assert camera.stream, 'No camera stream available'
camera.stream.sock = sock
self.start_camera(camera, duration=duration, frames_dir=None, image_file=None)
def _cleanup_stream( def _cleanup_stream(
self, camera: Camera, server_socket: socket.socket, client: Optional[IO] self, camera: Camera, server_socket: socket.socket, client: Optional[IO]
): ):
camera.stream_event.clear()
camera.stop_stream_event.set()
if client: if client:
try: try:
client.close() client.close()
@ -764,22 +814,40 @@ class CameraPlugin(Plugin, ABC):
self, self,
device: Optional[Union[int, str]] = None, device: Optional[Union[int, str]] = None,
duration: Optional[float] = None, duration: Optional[float] = None,
stream_format: str = 'mkv', stream_format: Optional[str] = None,
**camera, **camera,
) -> dict: ) -> dict:
""" """
Expose the video stream of a camera over a TCP connection. Expose the video stream of a camera over a TCP connection.
:param device: Name/path/ID of the device to capture from (default: None, use the default device). When the streaming is started, the plugin will listen on the specified
:param duration: Streaming thread duration (default: until :meth:`.stop_streaming` is called). ``bind_address`` and ``listen_port`` and stream camera frames to
:param stream_format: Format of the output stream - e.g. ``h264``, ``mjpeg``, ``mkv`` etc. (default: ``mkv``). connected clients. If ``stream_format`` is a video format (H264, H265,
MKV, MP4 etc.) then the camera stream can be viewed using a video
player - for example, using ``vlc``:
.. code-block:: bash
vlc tcp://<host>:<port>
:param device: Name/path/ID of the device to capture from (default:
None, use the default device).
:param duration: Streaming thread duration (default: until
:meth:`.stop_streaming` is called).
:param stream_format: Format of the output stream - e.g. ``h264``,
``mjpeg``, ``mkv`` etc. If not specified, the ``stream_format``
configured on the plugin will be used.
:param camera: Camera object properties - see constructor parameters. :param camera: Camera object properties - see constructor parameters.
:return: The status of the device. :return: The status of the device.
""" """
camera = self.open_device( camera = self.open_device(
device=device, stream=True, stream_format=stream_format, **camera device=device,
stream=True,
stream_format=stream_format or self.camera_info.stream_format,
**camera,
) )
return self._start_streaming(camera, duration, stream_format) # type: ignore
return self._start_streaming(camera, duration, camera.info.stream_format) # type: ignore
def _start_streaming( def _start_streaming(
self, camera: Camera, duration: Optional[float], stream_format: str self, camera: Camera, duration: Optional[float], stream_format: str
@ -788,10 +856,11 @@ class CameraPlugin(Plugin, ABC):
assert ( assert (
not camera.stream_event.is_set() and camera.info.device not in self._streams not camera.stream_event.is_set() and camera.info.device not in self._streams
), f'A streaming session is already running for device {camera.info.device}' ), f'A streaming session is already running for device {camera.info.device}'
assert camera.info.device, 'No device name available' assert camera.info.device is not None, 'No device name available'
self._streams[camera.info.device] = camera self._streams[camera.info.device] = camera
camera.stream_event.set() camera.stream_event.set()
camera.stop_stream_event.clear()
camera.stream_thread = threading.Thread( camera.stream_thread = threading.Thread(
target=self.streaming_thread, target=self.streaming_thread,
@ -821,6 +890,8 @@ class CameraPlugin(Plugin, ABC):
def _stop_streaming(self, camera: Camera): def _stop_streaming(self, camera: Camera):
camera.stream_event.clear() camera.stream_event.clear()
camera.stop_stream_event.set()
if camera.stream_thread and camera.stream_thread.is_alive(): if camera.stream_thread and camera.stream_thread.is_alive():
camera.stream_thread.join(timeout=5.0) camera.stream_thread.join(timeout=5.0)
@ -949,5 +1020,30 @@ class CameraPlugin(Plugin, ABC):
return camera.info.warmup_frames / camera.info.fps return camera.info.warmup_frames / camera.info.fps
return 0 return 0
def main(self):
if not self._stream_on_start:
self.wait_stop()
return
while not self.should_stop():
if self._stream_on_start:
self.start_streaming()
cameras = list(self._streams.values())
if not cameras:
self.logger.warning('No camera devices could be streamed')
self.wait_stop()
break
camera = cameras[0]
try:
wait_for_either(self._should_stop, camera.stop_stream_event)
self.stop_streaming()
except Exception as e:
self.logger.warning('Error while stopping the camera stream: %s', e)
finally:
self.wait_stop(timeout=2)
# vim:sw=4:ts=4:et: # vim:sw=4:ts=4:et:

View File

@ -43,7 +43,7 @@ class CameraCvPlugin(CameraPlugin):
if video_writer == 'cv': if video_writer == 'cv':
self._video_writer_class = CvFileWriter self._video_writer_class = CvFileWriter
def prepare_device(self, device: Camera): def prepare_device(self, device: Camera, **_):
import cv2 import cv2
cam = cv2.VideoCapture(device.info.device) cam = cv2.VideoCapture(device.info.device)

View File

@ -34,7 +34,7 @@ class CameraFfmpegPlugin(CameraPlugin):
super().__init__(device=device, input_format=input_format, **opts) super().__init__(device=device, input_format=input_format, **opts)
self.camera_info.ffmpeg_args = ffmpeg_args or () # type: ignore self.camera_info.ffmpeg_args = ffmpeg_args or () # type: ignore
def prepare_device(self, device: Camera) -> subprocess.Popen: def prepare_device(self, device: Camera, **_) -> subprocess.Popen:
assert isinstance(device, FFmpegCamera) assert isinstance(device, FFmpegCamera)
warmup_seconds = self._get_warmup_seconds(device) warmup_seconds = self._get_warmup_seconds(device)
ffmpeg = [ ffmpeg = [

View File

@ -22,7 +22,7 @@ class CameraGstreamerPlugin(CameraPlugin):
""" """
super().__init__(device=device, **opts) super().__init__(device=device, **opts)
def prepare_device(self, camera: GStreamerCamera) -> Pipeline: def prepare_device(self, camera: GStreamerCamera, **_) -> Pipeline:
pipeline = Pipeline() pipeline = Pipeline()
src = pipeline.add_source('v4l2src', device=camera.info.device) src = pipeline.add_source('v4l2src', device=camera.info.device)
convert = pipeline.add('videoconvert') convert = pipeline.add('videoconvert')

View File

@ -65,7 +65,7 @@ class CameraIrMlx90640Plugin(CameraPlugin):
def _is_capture_running(self): def _is_capture_running(self):
return self._capture_proc is not None and self._capture_proc.poll() is None return self._capture_proc is not None and self._capture_proc.poll() is None
def prepare_device(self, device: Camera): def prepare_device(self, device: Camera, **_):
if not self._is_capture_running(): if not self._is_capture_running():
self._capture_proc = subprocess.Popen( self._capture_proc = subprocess.Popen(
[self.rawrgb_path, '{}'.format(device.info.fps)], [self.rawrgb_path, '{}'.format(device.info.fps)],

View File

@ -50,6 +50,7 @@ class Camera:
info: CameraInfo info: CameraInfo
start_event: threading.Event = threading.Event() start_event: threading.Event = threading.Event()
stream_event: threading.Event = threading.Event() stream_event: threading.Event = threading.Event()
stop_stream_event: threading.Event = threading.Event()
capture_thread: Optional[threading.Thread] = None capture_thread: Optional[threading.Thread] = None
stream_thread: Optional[threading.Thread] = None stream_thread: Optional[threading.Thread] = None
object = None object = None

View File

@ -17,11 +17,9 @@ class VideoWriter(ABC):
mimetype: Optional[str] = None mimetype: Optional[str] = None
def __init__(self, *_, **kwargs): def __init__(self, *_, **kwargs):
from platypush.plugins.camera import Camera, CameraPlugin
self.logger = logging.getLogger(self.__class__.__name__) self.logger = logging.getLogger(self.__class__.__name__)
self.camera: Camera = kwargs.pop('camera') self.camera = kwargs.get('camera', getattr(self, 'camera', None))
self.plugin: CameraPlugin = kwargs.pop('plugin') self.plugin = kwargs.get('plugin', getattr(self, 'plugin', None))
self.closed = False self.closed = False
@abstractmethod @abstractmethod

View File

@ -203,10 +203,10 @@ class MKVStreamWriter(FFmpegStreamWriter):
class H264StreamWriter(FFmpegStreamWriter): class H264StreamWriter(FFmpegStreamWriter):
mimetype = 'video/h264' mimetype = 'video/h264'
def __init__(self, camera: Camera, *args, **kwargs): def __init__(self, *args, camera: Camera, **kwargs):
if not camera.info.output_codec: if not camera.info.output_codec:
camera.info.output_codec = 'libxvid' camera.info.output_codec = 'libxvid'
super().__init__(camera, *args, output_format='h264', **kwargs) super().__init__(*args, camera=camera, output_format='h264', **kwargs)
class H265StreamWriter(FFmpegStreamWriter): class H265StreamWriter(FFmpegStreamWriter):

View File

@ -1,208 +1,303 @@
import threading import os
import time import time
from typing import IO, Optional, Union
from typing import Optional, List, Tuple, Union
from platypush.plugins import action from platypush.plugins import action
from platypush.plugins.camera import CameraPlugin, Camera from platypush.plugins.camera import CameraPlugin, Camera
from platypush.plugins.camera.pi.model import PiCameraInfo, PiCamera
from .model import PiCameraInfo, PiCamera
class CameraPiPlugin(CameraPlugin): class CameraPiPlugin(CameraPlugin):
""" """
Plugin to control a Pi camera. Plugin to interact with a `Pi Camera
<https://www.raspberrypi.com/documentation/accessories/camera.html>`_.
.. warning:: This integration is intended to work with the `picamera2
This plugin is **DEPRECATED**, as it relies on the old ``picamera`` module. <https://github.com/raspberrypi/picamera2>`_ module.
On recent systems, it should be possible to access the Pi Camera through
the ffmpeg or gstreamer integrations.
If you are running a very old OS that only provides the deprecated
`picamera <https://github.com/waveform80/picamera>`_ module, or you rely on
features that are currently only supported by the old module, you should
use :class:`platypush.plugins.camera.pi.legacy.CameraPiLegacyPlugin`
instead.
""" """
_camera_class = PiCamera _camera_class = PiCamera
_camera_info_class = PiCameraInfo _camera_info_class = PiCameraInfo
_awb_modes = [
"Auto",
"Incandescent",
"Tungsten",
"Fluorescent",
"Indoor",
"Daylight",
"Cloudy",
]
def __init__( def __init__(
self, self,
device: int = 0, device: int = 0,
fps: float = 30.0, fps: float = 30.0,
warmup_seconds: float = 2.0, warmup_seconds: float = 2.0,
sharpness: int = 0, sharpness: float = 1.0,
contrast: int = 0, contrast: float = 1.0,
brightness: int = 50, brightness: float = 0.0,
video_stabilization: bool = False,
iso: int = 0, iso: int = 0,
exposure_compensation: int = 0, exposure_compensation: float = 0.0,
exposure_mode: str = 'auto', awb_mode: str = 'Auto',
meter_mode: str = 'average', stream_format: str = 'h264',
awb_mode: str = 'auto', **camera,
image_effect: str = 'none',
led_pin: Optional[int] = None,
color_effects: Optional[Union[str, List[str]]] = None,
zoom: Tuple[float, float, float, float] = (0.0, 0.0, 1.0, 1.0),
**camera
): ):
""" """
See https://www.raspberrypi.org/documentation/usage/camera/python/README.md :param device: Camera device number (default: 0). Only supported on
for a detailed reference about the Pi camera options. devices with multiple camera slots.
:param fps: Frames per second (default: 30.0).
:param warmup_seconds: Seconds to wait for the camera to warm up
before taking a photo (default: 2.0).
:param sharpness: Sharpness level, as a float between 0.0 and 16.0,
where 1.0 is the default value, and higher values are mapped to
higher sharpness levels.
:param contrast: Contrast level, as a float between 0.0 and 32.0, where
1.0 is the default value, and higher values are mapped to higher
contrast levels.
:param brightness: Brightness level, as a float between -1.0 and 1.0.
:param iso: ISO level (default: 0).
:param exposure_compensation: Exposure compensation level, as a float
between -8.0 and 8.0.
:param awb_mode: Auto white balance mode. Allowed values:
:param camera: Options for the base camera plugin (see :class:`platypush.plugins.camera.CameraPlugin`). - ``Auto`` (default)
- ``Daylight``
- ``Cloudy``
- ``Indoor``
- ``Fluorescent``
:param stream_format: Default format for the output when streamed to a
network device. Available:
- ``h264`` (default)
- ``mjpeg``
:param camera: Options for the base camera plugin (see
:class:`platypush.plugins.camera.CameraPlugin`).
""" """
super().__init__( super().__init__(
device=device, fps=fps, warmup_seconds=warmup_seconds, **camera device=device,
fps=fps,
warmup_seconds=warmup_seconds,
stream_format=stream_format,
**camera,
) )
self.camera_info.sharpness = sharpness self.camera_info.sharpness = sharpness # type: ignore
self.camera_info.contrast = contrast self.camera_info.contrast = contrast # type: ignore
self.camera_info.brightness = brightness self.camera_info.brightness = brightness # type: ignore
self.camera_info.video_stabilization = video_stabilization self.camera_info.iso = iso # type: ignore
self.camera_info.iso = iso self.camera_info.exposure_compensation = exposure_compensation # type: ignore
self.camera_info.exposure_compensation = exposure_compensation self.camera_info.awb_mode = awb_mode # type: ignore
self.camera_info.meter_mode = meter_mode
self.camera_info.exposure_mode = exposure_mode
self.camera_info.awb_mode = awb_mode
self.camera_info.image_effect = image_effect
self.camera_info.color_effects = color_effects
self.camera_info.zoom = zoom
self.camera_info.led_pin = led_pin
# noinspection DuplicatedCode def _get_transform(self, device: Camera):
def prepare_device(self, device: PiCamera): from libcamera import Orientation, Transform # type: ignore
# noinspection PyUnresolvedReferences from picamera2.utils import orientation_to_transform # type: ignore
import picamera
camera = picamera.PiCamera( rot = device.info.rotate
camera_num=device.info.device, if not rot:
resolution=device.info.resolution, return Transform(
framerate=device.info.fps, # It may seem counterintuitive, but the picamera2 library's flip
led_pin=device.info.led_pin, # definition is the opposite of ours
) hflip=device.info.vertical_flip,
vflip=device.info.horizontal_flip,
)
camera.hflip = device.info.horizontal_flip if rot == 90:
camera.vflip = device.info.vertical_flip orient = (
camera.sharpness = device.info.sharpness Orientation.Rotate90Mirror
camera.contrast = device.info.contrast if device.info.vertical_flip
camera.brightness = device.info.brightness else Orientation.Rotate90
camera.video_stabilization = device.info.video_stabilization )
camera.iso = device.info.iso elif rot == 180:
camera.exposure_compensation = device.info.exposure_compensation orient = (
camera.exposure_mode = device.info.exposure_mode Orientation.Rotate180Mirror
camera.meter_mode = device.info.meter_mode if device.info.horizontal_flip
camera.awb_mode = device.info.awb_mode else Orientation.Rotate180
camera.image_effect = device.info.image_effect )
camera.color_effects = device.info.color_effects elif rot == 270:
camera.rotation = device.info.rotate or 0 orient = (
camera.zoom = device.info.zoom Orientation.Rotate270Mirror
if device.info.vertical_flip
else Orientation.Rotate270
)
else:
raise AssertionError(
f'Invalid rotation: {rot}. Supported values: 0, 90, 180, 270'
)
return orientation_to_transform(orient)
def prepare_device(
self,
device: Camera,
start: bool = True,
video: bool = False,
stream: bool = False,
**_,
):
from picamera2 import Picamera2 # type: ignore
assert isinstance(device, PiCamera), f'Invalid device type: {type(device)}'
camera = Picamera2(camera_num=device.info.device)
still = not (video or stream)
cfg_params = {
'main': {
'format': 'XBGR8888' if not still else 'BGR888',
**(
{'size': tuple(map(int, device.info.resolution))}
if device.info.resolution
else {}
),
},
**(
{'transform': self._get_transform(device)}
if not still
# We don't need to flip the image for individual frames, the base camera
# class methods will take care of that
else {}
),
'controls': {
'Brightness': float(device.info.brightness),
'Contrast': float(device.info.contrast),
'Sharpness': float(device.info.sharpness),
'AwbMode': self._awb_modes.index(device.info.awb_mode),
},
}
cfg = (
camera.create_video_configuration
if not still
else camera.create_still_configuration
)(**cfg_params)
camera.configure(cfg)
if start:
camera.start()
time.sleep(max(1, device.info.warmup_seconds))
return camera return camera
def release_device(self, device: PiCamera): def release_device(self, device: Camera):
# noinspection PyUnresolvedReferences
import picamera
if device.object: if device.object:
try: device.object.stop()
device.object.stop_recording() device.object.close()
except (ConnectionError, picamera.PiCameraNotRecording):
pass
if device.object and not device.object.closed: def capture_frame(self, device: Camera, *_, **__):
try: assert device.object, 'Camera not open'
device.object.close() return device.object.capture_image('main')
except (ConnectionError, picamera.PiCameraClosed):
pass
def capture_frame(self, camera: Camera, *args, **kwargs): @property
import numpy as np def _video_encoders_by_format(self) -> dict:
from PIL import Image from picamera2.encoders import H264Encoder, MJPEGEncoder # type: ignore
shape = ( return {
camera.info.resolution[1] + (camera.info.resolution[1] % 16), 'h264': H264Encoder,
camera.info.resolution[0] + (camera.info.resolution[0] % 32), 'mjpeg': MJPEGEncoder,
3, }
)
frame = np.empty(shape, dtype=np.uint8)
camera.object.capture(frame, 'rgb')
return Image.fromarray(frame)
def start_preview(self, camera: Camera):
"""
Start camera preview.
"""
camera.object.start_preview()
def stop_preview(self, camera: Camera):
"""
Stop camera preview.
"""
try:
camera.object.stop_preview()
except Exception as e:
self.logger.warning(str(e))
@action @action
def capture_preview( def capture_video(
self, duration: Optional[float] = None, n_frames: Optional[int] = None, **camera self,
) -> dict: device: Optional[int] = None,
camera = self.open_device(**camera) duration: Optional[float] = None,
self.start_preview(camera) video_file: Optional[str] = None,
preview: bool = False,
**camera,
) -> Optional[Union[str, dict]]:
"""
Capture a video.
:param device: 0-based index of the camera to capture from, if the
device supports multiple cameras. Default: use the configured
camera index or the first available camera.
:param duration: Record duration in seconds (default: None, record
until :meth:`.stop_capture``).
:param video_file: If set, the stream will be recorded to the specified
video file (default: None).
:param camera: Camera parameters override - see constructors parameters.
:param preview: Show a preview of the camera frames.
:return: If duration is specified, the method will wait until the
recording is done and return the local path to the recorded
resource. Otherwise, it will return the status of the camera device
after starting it.
"""
from picamera2 import Picamera2 # type: ignore
from picamera2.encoders import H264Encoder # type: ignore
assert video_file, 'Video file is required'
camera = self.open_device(
device=device, ctx={'start': False, 'video': True}, **camera
)
encoder = H264Encoder()
assert camera.object, 'Camera not open'
assert isinstance(
camera.object, Picamera2
), f'Invalid camera object type: {type(camera.object)}'
if preview:
camera.object.start_preview()
# Only H264 is supported for now
camera.object.start_recording(encoder, os.path.expanduser(video_file))
if n_frames:
duration = n_frames * (camera.info.fps or 0)
if duration: if duration:
threading.Timer(duration, lambda: self.stop_preview(camera)) self.wait_stop(duration)
try:
if preview:
camera.object.stop_preview()
finally:
if camera.object:
camera.object.stop_recording()
camera.object.close()
return self.status() return video_file
def streaming_thread( return self.status(camera.info.device).output
self, camera: PiCamera, stream_format: str, duration: Optional[float] = None
):
server_socket = self._prepare_server_socket(camera)
sock = None
streaming_started_time = time.time()
self.logger.info(
'Starting streaming on port {}'.format(camera.info.listen_port)
)
try: def _streaming_loop(self, camera: Camera, stream_format: str, sock: IO, *_, **__):
while camera.stream_event.is_set(): from picamera2 import Picamera2 # type: ignore
if duration and time.time() - streaming_started_time >= duration: from picamera2.outputs import FileOutput # type: ignore
break
sock = self._accept_client(server_socket) encoder_cls = self._video_encoders_by_format.get(stream_format.lower())
if not sock: assert (
continue encoder_cls
), f'Invalid stream format: {stream_format}. Supported formats: {", ".join(self._video_encoders_by_format)}'
assert isinstance(camera, PiCamera), f'Invalid camera type: {type(camera)}'
assert camera.object and isinstance(
camera.object, Picamera2
), f'Invalid camera object type: {type(camera.object)}'
if camera.object is None or camera.object.closed: cam = camera.object
camera = self.open_device(**camera.info.to_dict()) encoder = encoder_cls()
cam.encoders = encoder
encoder.output = FileOutput(sock)
cam.start_encoder(encoder)
cam.start()
try: def _prepare_stream_writer(self, *_, **__):
camera.object.start_recording(sock, format=stream_format) """
while camera.stream_event.is_set(): Overrides the base method to do nothing - the stream writer is handled
camera.object.wait_recording(1) by the picamera2 library.
except ConnectionError: """
self.logger.info('Client closed connection')
finally:
if sock:
try:
sock.close()
except Exception as e:
self.logger.warning(
'Error while closing client socket: {}'.format(str(e))
)
self.close_device(camera) def _cleanup_stream(self, camera: Camera, *_, **__):
finally: cam = camera.object
self._cleanup_stream(camera, server_socket, sock) if not cam:
self.logger.info('Stopped camera stream') return
@action cam.stop()
def start_streaming( cam.stop_encoder()
self, duration: Optional[float] = None, stream_format: str = 'h264', **camera cam.close()
) -> dict:
camera = self.open_device(stream_format=stream_format, **camera)
return self._start_streaming(camera, duration, stream_format)
# vim:sw=4:ts=4:et: # vim:sw=4:ts=4:et:

View File

@ -0,0 +1,301 @@
import threading
from typing import IO, Optional, List, Tuple, Union
from platypush.plugins import action
from platypush.plugins.camera import CameraPlugin, Camera
from platypush.utils import wait_for_either
from .model import PiCameraInfo, PiCamera
class CameraPiLegacyPlugin(CameraPlugin):
"""
Plugin to interact with a `Pi Camera
<https://www.raspberrypi.com/documentation/accessories/camera.html>`_.
.. warning::
This plugin is **DEPRECATED**, as it relies on the old ``picamera``
module.
The ``picamera`` module used in this plugin is deprecated and no longer
maintained. The `picamera2 <https://github.com/raspberrypi/picamera2>`_
module is advised instead, which is used by
:class:`platypush.plugins.camera.pi.CameraPiPlugin`.
You may want to use this plugin if you are running an old OS that does not
support the new ``picamera2`` module. Even in that case, you may probably
consider using :class:`platypush.plugins.camera.ffmpeg.FfmpegCameraPlugin`
or :class:`platypush.plugins.camera.gstreamer.GStreamerCameraPlugin`, as
``picamera`` is not maintained anymore and may not work properly.
"""
_camera_class = PiCamera
_camera_info_class = PiCameraInfo
_supported_encoders = ('h264', 'mjpeg')
def __init__(
self,
device: int = 0,
fps: float = 30.0,
warmup_seconds: float = 2.0,
sharpness: int = 0,
contrast: int = 0,
brightness: int = 50,
video_stabilization: bool = False,
iso: int = 0,
exposure_compensation: int = 0,
exposure_mode: str = 'auto',
meter_mode: str = 'average',
awb_mode: str = 'auto',
image_effect: str = 'none',
led_pin: Optional[int] = None,
color_effects: Optional[Union[str, List[str]]] = None,
zoom: Tuple[float, float, float, float] = (0.0, 0.0, 1.0, 1.0),
stream_format: str = 'h264',
**camera,
):
"""
:param device: Camera device number (default: 0). Only supported on
devices with multiple camera slots.
:param fps: Frames per second (default: 30.0).
:param warmup_seconds: Seconds to wait for the camera to warm up
before taking a photo (default: 2.0).
:param sharpness: Sharpness level, as an integer between -100 and 100.
:param contrast: Contrast level, as an integer between -100 and 100.
:param brightness: Brightness level, as an integer between 0 and 100.
:param video_stabilization: Enable video stabilization (default: False).
:param iso: ISO level (default: 0).
:param exposure_compensation: Exposure compensation level, as an
integer between -25 and 25.
:param exposure_mode: Exposure mode. Allowed values:
- ``off``
- ``auto`` (default)
- ``night``
- ``nightpreview``
- ``backlight``
- ``spotlight``
- ``sports``
- ``snow``
- ``beach``
- ``verylong``
- ``fixedfps``
- ``antishake``
- ``fireworks``
:param meter_mode: Metering mode used for the exposure. Allowed values:
- ``average`` (default)
- ``spot``
- ``backlit``
- ``matrix``
:param awb_mode: Auto white balance mode. Allowed values:
- ``off``
- ``auto`` (default)
- ``sunlight``
- ``cloudy``
- ``shade``
- ``tungsten``
- ``fluorescent``
- ``incandescent``
- ``flash``
- ``horizon``
:param image_effect: Image effect applied to the camera. Allowed values:
- ``none`` (default)
- ``negative``
- ``solarize``
- ``sketch``
- ``denoise``
- ``emboss``
- ``oilpaint``
- ``hatch``
- ``gpen``
- ``pastel``
- ``watercolor``
- ``film``
- ``blur``
- ``saturation``
- ``colorswap``
- ``washedout``
- ``posterise``
- ``colorpoint``
- ``colorbalance``
- ``cartoon``
- ``deinterlace1``
- ``deinterlace2``
:param led_pin: LED PIN number, if the camera LED is wired to a GPIO
PIN and you want to control it.
:param zoom: Camera zoom, in the format ``(x, y, width, height)``
(default: ``(0.0, 0.0, 1.0, 1.0)``).
:param stream_format: Default format for the output when streamed to a
network device. Available:
- ``h264`` (default)
- ``mjpeg``
:param camera: Options for the base camera plugin (see
:class:`platypush.plugins.camera.CameraPlugin`).
"""
super().__init__(
device=device,
fps=fps,
warmup_seconds=warmup_seconds,
stream_format=stream_format,
**camera,
)
self.camera_info.sharpness = sharpness # type: ignore
self.camera_info.contrast = contrast # type: ignore
self.camera_info.brightness = brightness # type: ignore
self.camera_info.video_stabilization = video_stabilization # type: ignore
self.camera_info.iso = iso # type: ignore
self.camera_info.exposure_compensation = exposure_compensation # type: ignore
self.camera_info.meter_mode = meter_mode # type: ignore
self.camera_info.exposure_mode = exposure_mode # type: ignore
self.camera_info.awb_mode = awb_mode # type: ignore
self.camera_info.image_effect = image_effect # type: ignore
self.camera_info.color_effects = color_effects # type: ignore
self.camera_info.zoom = zoom # type: ignore
self.camera_info.led_pin = led_pin # type: ignore
def prepare_device(self, device: Camera, **_):
import picamera # type: ignore
assert isinstance(device, PiCamera), f'Invalid camera type: {type(device)}'
camera = picamera.PiCamera(
camera_num=device.info.device,
resolution=device.info.resolution,
framerate=device.info.fps,
led_pin=device.info.led_pin,
)
camera.hflip = device.info.horizontal_flip
camera.vflip = device.info.vertical_flip
camera.sharpness = device.info.sharpness
camera.contrast = device.info.contrast
camera.brightness = device.info.brightness
camera.video_stabilization = device.info.video_stabilization
camera.iso = device.info.iso
camera.exposure_compensation = device.info.exposure_compensation
camera.exposure_mode = device.info.exposure_mode
camera.meter_mode = device.info.meter_mode
camera.awb_mode = device.info.awb_mode
camera.image_effect = device.info.image_effect
camera.color_effects = device.info.color_effects
camera.rotation = device.info.rotate or 0
camera.zoom = device.info.zoom
return camera
def release_device(self, device: Camera):
import picamera # type: ignore
assert isinstance(device, PiCamera), f'Invalid camera type: {type(device)}'
if device.object:
try:
device.object.stop_recording()
except (ConnectionError, picamera.PiCameraNotRecording):
pass
if device.object and not device.object.closed:
try:
device.object.close()
except (ConnectionError, picamera.PiCameraClosed):
pass
def capture_frame(self, device: Camera, *_, **__):
import numpy as np
from PIL import Image
assert device.info.resolution, 'Invalid resolution'
assert device.object, 'Camera not opened'
shape = (
device.info.resolution[1] + (device.info.resolution[1] % 16),
device.info.resolution[0] + (device.info.resolution[0] % 32),
3,
)
frame = np.empty(shape, dtype=np.uint8)
device.object.capture(frame, 'rgb')
return Image.fromarray(frame)
def start_preview(self, camera: Camera):
"""
Start camera preview.
"""
assert camera.object, 'Camera not opened'
camera.object.start_preview()
def stop_preview(self, camera: Camera):
"""
Stop camera preview.
"""
if not camera.object:
return
try:
camera.object.stop_preview()
except Exception as e:
self.logger.warning(str(e))
@action
def capture_preview(
self,
device: Optional[Union[str, int]] = None,
duration: Optional[float] = None,
n_frames: Optional[int] = None,
**camera,
) -> dict:
camera = self.open_device(device=device, **camera)
self.start_preview(camera)
if n_frames:
duration = n_frames * (camera.info.fps or 0)
if duration:
threading.Timer(duration, lambda: self.stop_preview(camera))
return self.status() # type: ignore
def _streaming_loop(self, camera: Camera, stream_format: str, sock: IO, *_, **__):
from picamera import PiCamera as PiCamera_ # type: ignore
stream_format = stream_format.lower()
assert (
stream_format in self._supported_encoders
), f'Invalid stream format: {stream_format}. Supported formats: {", ".join(self._supported_encoders)}'
assert isinstance(camera, PiCamera), f'Invalid camera type: {type(camera)}'
assert camera.object and isinstance(
camera.object, PiCamera_
), f'Invalid camera object type: {type(camera.object)}'
cam = camera.object
try:
cam.start_recording(sock, format=stream_format)
while not wait_for_either(
camera.stop_stream_event, self._should_stop, timeout=1
):
cam.wait_recording(1)
except ConnectionError:
self.logger.info('Client closed connection')
finally:
try:
cam.stop_recording()
self.stop_streaming()
except Exception as e:
self.logger.warning('Could not stop streaming: %s', e)
def _prepare_stream_writer(self, *_, **__):
"""
Overrides the base method to do nothing - the stream writer is handled
by the picamera library.
"""
# vim:sw=4:ts=4:et:

View File

@ -2,20 +2,24 @@ manifest:
events: {} events: {}
install: install:
apk: apk:
- ffmpeg
- py3-numpy - py3-numpy
- py3-pillow - py3-pillow
apt:
- ffmpeg
- python3-numpy
- python3-pillow
dnf: dnf:
- ffmpeg
- python-numpy - python-numpy
- python-pillow - python-pillow
pacman: pacman:
- ffmpeg
- python-numpy - python-numpy
- python-pillow - python-pillow
apt:
- python3-numpy
- python3-pillow
pip: pip:
- picamera - picamera
- numpy - numpy
- Pillow - Pillow
package: platypush.backend.camera.pi package: platypush.plugins.camera.pi.legacy
type: backend type: plugin

View File

@ -0,0 +1,46 @@
from dataclasses import asdict, dataclass
from typing import Optional, Union, List, Tuple
from platypush.plugins.camera import CameraInfo, Camera
@dataclass
class PiCameraInfo(CameraInfo):
sharpness: int = 0
contrast: int = 0
brightness: int = 50
video_stabilization: bool = False
iso: int = 0
exposure_compensation: int = 0
exposure_mode: str = 'auto'
meter_mode: str = 'average'
awb_mode: str = 'auto'
image_effect: str = 'none'
color_effects: Optional[Union[str, List[str]]] = None
zoom: Tuple[float, float, float, float] = (0.0, 0.0, 1.0, 1.0)
led_pin: Optional[int] = None
def to_dict(self) -> dict:
return {
'sharpness': self.sharpness,
'contrast': self.contrast,
'brightness': self.brightness,
'video_stabilization': self.video_stabilization,
'iso': self.iso,
'exposure_compensation': self.exposure_compensation,
'exposure_mode': self.exposure_mode,
'meter_mode': self.meter_mode,
'awb_mode': self.awb_mode,
'image_effect': self.image_effect,
'color_effects': self.color_effects,
'zoom': self.zoom,
'led_pin': self.led_pin,
**asdict(super()),
}
class PiCamera(Camera):
info: PiCameraInfo # type: ignore
# vim:sw=4:ts=4:et:

View File

@ -2,19 +2,23 @@ manifest:
events: {} events: {}
install: install:
apk: apk:
- ffmpeg
- py3-numpy - py3-numpy
- py3-pillow - py3-pillow
apt: apt:
- ffmpeg
- python3-numpy - python3-numpy
- python3-pillow - python3-pillow
dnf: dnf:
- ffmpeg
- python-numpy - python-numpy
- python-pillow - python-pillow
pacman: pacman:
- ffmpeg
- python-numpy - python-numpy
- python-pillow - python-pillow
pip: pip:
- picamera - picamera2
- numpy - numpy
- Pillow - Pillow
package: platypush.plugins.camera.pi package: platypush.plugins.camera.pi

View File

@ -1,46 +1,34 @@
from dataclasses import dataclass from dataclasses import asdict, dataclass
from typing import Optional, Union, List, Tuple
from platypush.plugins.camera import CameraInfo, Camera from platypush.plugins.camera import CameraInfo, Camera
@dataclass @dataclass
class PiCameraInfo(CameraInfo): class PiCameraInfo(CameraInfo):
"""
PiCamera info dataclass.
"""
sharpness: int = 0 sharpness: int = 0
contrast: int = 0 contrast: int = 0
brightness: int = 50 brightness: int = 50
video_stabilization: bool = False video_stabilization: bool = False
iso: int = 0 iso: int = 0
exposure_compensation: int = 0 exposure_compensation: int = 0
exposure_mode: str = 'auto' hdr_mode: str = 'auto'
meter_mode: str = 'average' meter_mode: str = 'average'
awb_mode: str = 'auto' awb_mode: str = 'auto'
image_effect: str = 'none'
color_effects: Optional[Union[str, List[str]]] = None
zoom: Tuple[float, float, float, float] = (0.0, 0.0, 1.0, 1.0)
led_pin: Optional[int] = None
def to_dict(self) -> dict: def to_dict(self) -> dict:
return { return asdict(self)
'sharpness': self.sharpness,
'contrast': self.contrast,
'brightness': self.brightness,
'video_stabilization': self.video_stabilization,
'iso': self.iso,
'exposure_compensation': self.exposure_compensation,
'exposure_mode': self.exposure_mode,
'meter_mode': self.meter_mode,
'awb_mode': self.awb_mode,
'image_effect': self.image_effect,
'color_effects': self.color_effects,
'zoom': self.zoom,
'led_pin': self.led_pin,
**super().to_dict()
}
class PiCamera(Camera): class PiCamera(Camera):
info: PiCameraInfo """
PiCamera model.
"""
info: PiCameraInfo # type: ignore
# vim:sw=4:ts=4:et: # vim:sw=4:ts=4:et:

View File

@ -18,8 +18,8 @@ from importlib.machinery import SourceFileLoader
from importlib.util import spec_from_loader, module_from_spec from importlib.util import spec_from_loader, module_from_spec
from multiprocessing import Lock as PLock from multiprocessing import Lock as PLock
from tempfile import gettempdir from tempfile import gettempdir
from threading import Lock as TLock from threading import Event, Lock as TLock
from typing import Generator, Optional, Tuple, Type, Union from typing import Callable, Generator, Optional, Tuple, Type, Union
from dateutil import parser, tz from dateutil import parser, tz
from redis import Redis from redis import Redis
@ -780,4 +780,17 @@ def get_default_downloads_dir() -> str:
return os.path.join(os.path.expanduser('~'), 'Downloads') return os.path.join(os.path.expanduser('~'), 'Downloads')
def wait_for_either(*events, timeout: Optional[float] = None, cls: Type = Event):
"""
Wait for any of the given events to be set.
:param events: The events to be checked.
:param timeout: The maximum time to wait for the event to be set. Default: None.
:param cls: The class to be used for the event. Default: threading.Event.
"""
from .threads import OrEvent
return OrEvent(*events, cls=cls).wait(timeout=timeout)
# vim:sw=4:ts=4:et: # vim:sw=4:ts=4:et:

View File

@ -0,0 +1,58 @@
import threading
from typing import Callable, Optional, Type
def OrEvent(*events, cls: Type = threading.Event):
"""
Wrapper for threading.Event that allows to create an event that is
set if any of the given events are set.
Adapted from
https://stackoverflow.com/questions/12317940/python-threading-can-i-sleep-on-two-threading-events-simultaneously#12320352.
:param events: The events to be checked.
:param cls: The class to be used for the event. Default: threading.Event.
"""
or_event = cls()
def changed():
bools = [e.is_set() for e in events]
if any(bools):
or_event.set()
else:
or_event.clear()
def _to_or(e, changed_callback: Callable[[], None]):
e._set = e.set
e._clear = e.clear
e.changed = changed_callback
e.set = lambda: _or_set(e)
e.clear = lambda: _clear_or(e)
def _clear_or(e):
e._clear()
e.changed()
def _or_set(e):
e._set()
e.changed()
for e in events:
_to_or(e, changed)
changed()
return or_event
def wait_for_either(
*events, timeout: Optional[float] = None, cls: Type = threading.Event
):
"""
Wait for any of the given events to be set.
:param events: The events to be checked.
:param timeout: The maximum time to wait for the event to be set. Default: None.
:param cls: The class to be used for the event. Default: threading.Event.
"""
return OrEvent(*events, cls=cls).wait(timeout=timeout)